diff --git a/.release b/.release deleted file mode 100644 index 2c0733315e41..000000000000 --- a/.release +++ /dev/null @@ -1 +0,0 @@ -3.11 diff --git a/.travis.yml b/.travis.yml index 5ce91b991684..c07a4b527218 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,7 +6,7 @@ services: language: go go: - - 1.10.2 + - 1.11.2 before_install: - sudo apt-get install libseccomp-dev -qq diff --git a/Makefile b/Makefile index be3b73e5953a..410695dc4c71 100644 --- a/Makefile +++ b/Makefile @@ -114,7 +114,7 @@ build-rpms: .PHONY: build-rpms # Build images from the official RPMs -# +# # Args: # # Example: @@ -122,15 +122,3 @@ build-rpms: build-images: build-rpms hack/build-images.sh .PHONY: build-images - -.PHONY: lint -lint: ## Go lint your code - hack/go-lint.sh -min_confidence 0.9 ./cluster-autoscaler/cloudprovider/openshiftmachineapi/... - -.PHONY: fmt -fmt: ## Go fmt your code - hack/go-fmt.sh ./cluster-autoscaler/cloudprovider/openshiftmachineapi - -.PHONY: vet -vet: ## Go fmt your code - hack/go-vet.sh ./cluster-autoscaler/cloudprovider/openshiftmachineapi diff --git a/addon-resizer/Godeps/Godeps.json b/addon-resizer/Godeps/Godeps.json index 124cc55487a3..9824a0b58ed5 100644 --- a/addon-resizer/Godeps/Godeps.json +++ b/addon-resizer/Godeps/Godeps.json @@ -1,5 +1,5 @@ { - "ImportPath": "k8s.io/contrib/addon-resizer", + "ImportPath": "k8s.io/autoscaler/addon-resizer", "GoVersion": "go1.6", "GodepVersion": "v74", "Packages": [ diff --git a/addon-resizer/README.md b/addon-resizer/README.md index 80ce8ffd2a2f..9ec13a8340ce 100644 --- a/addon-resizer/README.md +++ b/addon-resizer/README.md @@ -116,7 +116,7 @@ name of your addon, for example: ADDON_NAME=heapster ``` -Currently Addon Resizer is used to scale addons: `heapster`, `metrics-server`. +Currently Addon Resizer is used to scale addons: `heapster`, `metrics-server`, `kube-state-metrics`. ### Overview diff --git a/builder/Dockerfile b/builder/Dockerfile index 42e02c721fe3..131d6506ae71 100644 --- a/builder/Dockerfile +++ b/builder/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM golang:1.10.2 +FROM golang:1.11.2 LABEL maintainer="Marcin Wielgus " ENV GOPATH /gopath/ diff --git a/cluster-autoscaler/Dockerfile b/cluster-autoscaler/Dockerfile index 219f403f628a..c13dcceffa3e 100644 --- a/cluster-autoscaler/Dockerfile +++ b/cluster-autoscaler/Dockerfile @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG BASEIMAGE=k8s.gcr.io/debian-base-amd64:0.3.2 +ARG BASEIMAGE=k8s.gcr.io/debian-base-amd64:v1.0.0 FROM $BASEIMAGE LABEL maintainer="Marcin Wielgus " ENV DEBIAN_FRONTEND noninteractive -RUN clean-install ca-certificates +RUN clean-install ca-certificates tzdata ADD cluster-autoscaler cluster-autoscaler ADD run.sh run.sh diff --git a/cluster-autoscaler/FAQ.md b/cluster-autoscaler/FAQ.md index 0858d10d21b7..f98ce1bed511 100644 --- a/cluster-autoscaler/FAQ.md +++ b/cluster-autoscaler/FAQ.md @@ -41,6 +41,7 @@ this document: * [How fast is HPA when combined with CA?](#how-fast-is-hpa-when-combined-with-ca) * [Where can I find the designs of the upcoming features?](#where-can-i-find-the-designs-of-the-upcoming-features) * [What are Expanders?](#what-are-expanders) + * [What are the parameters to CA?](#what-are-the-parameters-to-ca) * [Troubleshooting](#troubleshooting) * [I have a couple of nodes with low utilization, but they are not scaled down. Why?](#i-have-a-couple-of-nodes-with-low-utilization-but-they-are-not-scaled-down-why) * [How to set PDBs to enable CA to move kube-system pods?](#how-to-set-pdbs-to-enable-ca-to-move-kube-system-pods) @@ -309,14 +310,14 @@ to scale up the cluster. The size of overprovisioned resources can be controlled by changing the size of pause pods and the number of replicas. This way you can configure static size of overprovisioning resources (i.e. 2 -additional cores). If we want to configure dynamic size (i.e. 20% of recources in the cluster) +additional cores). If we want to configure dynamic size (i.e. 20% of resources in the cluster) then we need to use [Horizontal Cluster Proportional Autoscaler](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler) which will change number of pause pods depending on the size of the cluster. It will increase the number of replicas when cluster grows and decrease the number of replicas if cluster shrinks. Configuration of dynamic overprovisioning: -1. Enable priority preemption in your cluster. It can be done by exporting following env +1. (For 1.10, and below) Enable priority preemption in your cluster. It can be done by exporting following env variables before executing kube-up (more details [here](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/)): ```sh export KUBE_RUNTIME_CONFIG=scheduling.k8s.io/v1alpha1=true @@ -326,7 +327,9 @@ export ENABLE_POD_PRIORITY=true 2. Define priority class for overprovisioning pods. Priority -1 will be reserved for overprovisioning pods as it is the lowest priority that triggers scaling clusters. Other pods need to use priority 0 or higher in order to be able to preempt overprovisioning pods. You can use -following definitions: +following definitions. + +**For 1.10, and below:** ```yaml apiVersion: scheduling.k8s.io/v1alpha1 @@ -338,6 +341,18 @@ globalDefault: false description: "Priority class used by overprovisioning." ``` +**For 1.11:** + +``` +apiVersion: scheduling.k8s.io/v1beta +kind: PriorityClass +metadata: + name: overprovisioning +value: -1 +globalDefault: false +description: "Priority class used by overprovisioning." +``` + 3. Change pod priority cutoff in CA to -10 so pause pods are taken into account during scale down and scale up. Set flag ```expendable-pods-priority-cutoff``` to -10. If you already use priority preemption then pods with priorities between -10 and -1 won't be best effort anymore. @@ -442,14 +457,12 @@ Autoscaler expects requested nodes to appear within 15 minutes (configured by `--max-node-provision-time` flag.) After this time, if they are still unregistered, it stops considering them in simulations and may attempt to scale up a different group if the pods are still pending. It will also attempt to remove -any nodes left unregistered after 15 minutes (configured by -`--unregistered-node-removal-time` flag.) For this reason, we strongly -recommend to set those flags to the same value. +any nodes left unregistered after this time. ### How does scale-down work? Every 10 seconds (configurable by `--scan-interval` flag), if no scale-up is -needed, Cluster Autoscaler checks which nodes are unneeded. A node is considered for removal when: +needed, Cluster Autoscaler checks which nodes are unneeded. A node is considered for removal when **all** below conditions hold: * The sum of cpu and memory requests of all pods running on this node is smaller than 50% of the node's allocatable. (Before 1.1.0, node capacity was used @@ -515,7 +528,13 @@ then this node group may be excluded from future scale-ups. ### How fast is Cluster Autoscaler? -By default, scale-up is considered up to 10 seconds after pod is marked as unschedulable, and scale-down 10 minutes after a node becomes unneeded. There are multiple flags which can be used to configure them. Assuming default settings, [SLOs described here apply](#what-are-the-service-level-objectives-for-cluster-autoscaler). +By default, scale-up is considered up to 10 seconds after pod is marked as unschedulable, and scale-down 10 minutes after a node becomes unneeded. +There are multiple flags which can be used to configure these thresholds. For example, in some environments, you may wish to give the k8s scheduler +a bit more time to schedule a pod than the CA's scan-interval. One way to do this is by setting `--new-pod-scale-up-delay`, which causes the CA to +ignore unschedulable pods until they are a certain "age", regardless of the scan-interval. If k8s has not scheduled them by the end of that delay, +then they may be considered by the CA for a possible scale-up. + +Assuming default settings, [SLOs described here apply](#what-are-the-service-level-objectives-for-cluster-autoscaler). ### How fast is HPA when combined with CA? @@ -585,6 +604,58 @@ would match the cluster size. This expander is described in more details ************ +### What are the parameters to CA? + +The following startup parameters are supported for cluster autoscaler: + +| Parameter | Description | Default | +| --- | --- | --- | +| `cluster-name` | Autoscaled cluster name, if available | "" +| `address` | The address to expose prometheus metrics | :8085 +| `kubernetes` | Kubernetes master location. Leave blank for default | "" +| `kubeconfig` | Path to kubeconfig file with authorization and master location information | "" +| `cloud-config` | The path to the cloud provider configuration file. Empty string for no configuration file | "" +| `namespace` | Namespace in which cluster-autoscaler run | "kube-system" +| `scale-down-enabled` | Should CA scale down the cluster | true +| `scale-down-delay-after-add` | How long after scale up that scale down evaluation resumes | 10 minutes +| `scale-down-delay-after-delete` | How long after node deletion that scale down evaluation resumes, defaults to scan-interval | scan-interval +| `scale-down-delay-after-failure` | How long after scale down failure that scale down evaluation resumes | 3 minutes +| `scale-down-unneeded-time` | How long a node should be unneeded before it is eligible for scale down | 10 minutes +| `scale-down-unready-time` | How long an unready node should be unneeded before it is eligible for scale down | 20 minutes +| `scale-down-utilization-threshold` | Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down | 0.5 +| `scale-down-non-empty-candidates-count` | Maximum number of non empty nodes considered in one iteration as candidates for scale down with drain
Lower value means better CA responsiveness but possible slower scale down latency
Higher value can affect CA performance with big clusters (hundreds of nodes)
Set to non positive value to turn this heuristic off - CA will not limit the number of nodes it considers." | 30 +| `scale-down-candidates-pool-ratio` | A ratio of nodes that are considered as additional non empty candidates for
scale down when some candidates from previous iteration are no longer valid
Lower value means better CA responsiveness but possible slower scale down latency
Higher value can affect CA performance with big clusters (hundreds of nodes)
Set to 1.0 to turn this heuristics off - CA will take all nodes as additional candidates. | 0.1 +| `scale-down-candidates-pool-min-count` | Minimum number of nodes that are considered as additional non empty candidates
for scale down when some candidates from previous iteration are no longer valid.
When calculating the pool size for additional candidates we take
`max(#nodes * scale-down-candidates-pool-ratio, scale-down-candidates-pool-min-count)` | 50 +| `scan-interval` | How often cluster is reevaluated for scale up or down | 10 seconds +| `max-nodes-total` | Maximum number of nodes in all node groups. Cluster autoscaler will not grow the cluster beyond this number. | 0 +| `cores-total` | Minimum and maximum number of cores in cluster, in the format :. Cluster autoscaler will not scale the cluster beyond these numbers. | 320000 +| `memory-total` | Minimum and maximum number of gigabytes of memory in cluster, in the format :. Cluster autoscaler will not scale the cluster beyond these numbers. | 6400000 +| `gpu-total` | Minimum and maximum number of different GPUs in cluster, in the format ::. Cluster autoscaler will not scale the cluster beyond these numbers. Can be passed multiple times. CURRENTLY THIS FLAG ONLY WORKS ON GKE. | "" +| `cloud-provider` | Cloud provider type. | gce +| `max-empty-bulk-delete` | Maximum number of empty nodes that can be deleted at the same time. | 10 +| `max-graceful-termination-sec` | Maximum number of seconds CA waits for pod termination when trying to scale down a node. | 600 +| `max-total-unready-percentage` | Maximum percentage of unready nodes in the cluster. After this is exceeded, CA halts operations | 45 +| `ok-total-unready-count` | Number of allowed unready nodes, irrespective of max-total-unready-percentage | 3 +| `max-node-provision-time` | Maximum time CA waits for node to be provisioned | 15 minutes +| `nodes` | sets min,max size and other configuration data for a node group in a format accepted by cloud provider. Can be used multiple times. Format: :: | "" +| `node-group-auto-discovery` | One or more definition(s) of node group auto-discovery.
A definition is expressed `:[[=]]`
The `aws` and `gce` cloud providers are currently supported. AWS matches by ASG tags, e.g. `asg:tag=tagKey,anotherTagKey`
GCE matches by IG name prefix, and requires you to specify min and max nodes per IG, e.g. `mig:namePrefix=pfx,min=0,max=10`
Can be used multiple times | "" +| `estimator` | Type of resource estimator to be used in scale up | binpacking +| `expander` | Type of node group expander to be used in scale up. | random +| `write-status-configmap` | Should CA write status information to a configmap | true +| `max-inactivity` | Maximum time from last recorded autoscaler activity before automatic restart | 10 minutes +| `max-failing-time` | Maximum time from last recorded successful autoscaler run before automatic restart | 15 minutes +| `balance-similar-node-groups` | Detect similar node groups and balance the number of nodes between them | false +| `node-autoprovisioning-enabled` | Should CA autoprovision node groups when needed | false +| `max-autoprovisioned-node-group-count` | The maximum number of autoprovisioned groups in the cluster | 15 +| `unremovable-node-recheck-timeout` | The timeout before we check again a node that couldn't be removed before | 5 minutes +| `expendable-pods-priority-cutoff` | Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable | 0 +| `regional` | Cluster is regional | false +| `leader-elect` | Start a leader election client and gain leadership before executing the main loop.
Enable this when running replicated components for high availability | true +| `leader-elect-lease-duration` | The duration that non-leader candidates will wait after observing a leadership
renewal until attempting to acquire leadership of a led but unrenewed leader slot.
This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate.
This is only applicable if leader election is enabled | 15 seconds +| `leader-elect-renew-deadline` | The interval between attempts by the acting master to renew a leadership slot before it stops leading.
This must be less than or equal to the lease duration.
This is only applicable if leader election is enabled | 10 seconds +| `leader-elect-retry-period` | The duration the clients should wait between attempting acquisition and renewal of a leadership.
This is only applicable if leader election is enabled | 2 seconds +| `leader-elect-resource-lock` | The type of resource object that is used for locking during leader election.
Supported options are `endpoints` (default) and `configmaps` | "endpoints" + # Troubleshooting: ### I have a couple of nodes with low utilization, but they are not scaled down. Why? @@ -655,10 +726,12 @@ Events: Warning FailedScheduling .. default-scheduler No nodes are available that match all of the following predicates:: Insufficient cpu (4), NoVolumeZoneConflict (2) ``` -This limitation will go away with +This limitation was solved with [volume topological scheduling](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/volume-topology-scheduling.md) -support in Kubernetes. Currently, we advice to set CA upper limits in a way to -allow for some slack capacity. +introduced as beta in Kubernetes 1.11 and planned for GA in 1.13. +To allow CA to take advantage of topological scheduling, use separate node groups per zone. +This way CA knows exactly which node group will create nodes in the required zone rather than relying on the cloud provider choosing a zone for a new node in a multi-zone node group. +When using separate node groups per zone, the `--balance-similar-node-groups` flag will keep nodes balanced across zones for workloads that dont require topological scheduling. ### CA doesn’t work, but it used to work yesterday. Why? diff --git a/cluster-autoscaler/Godeps/Godeps.json b/cluster-autoscaler/Godeps/Godeps.json index 16c02d9966be..648bb73852d4 100644 --- a/cluster-autoscaler/Godeps/Godeps.json +++ b/cluster-autoscaler/Godeps/Godeps.json @@ -1,6 +1,6 @@ { "ImportPath": "k8s.io/autoscaler/cluster-autoscaler", - "GoVersion": "go1.10", + "GoVersion": "go1.11", "GodepVersion": "v80", "Packages": [ "./..." @@ -22,44 +22,44 @@ "Rev": "3b1ae45394a234c385be014e9a488f2bb6eef821" }, { - "ImportPath": "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute", - "Comment": "v19.0.0-1-g80862a3e3", - "Rev": "80862a3e3882bfab661e590430558c2ba1a5f76d" + "ImportPath": "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute", + "Comment": "v21.3.0-1-g274d2b51f", + "Rev": "274d2b51f1342584e885445e85556bc109f528fb" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2017-10-01/containerregistry", - "Comment": "v19.0.0-1-g80862a3e3", - "Rev": "80862a3e3882bfab661e590430558c2ba1a5f76d" + "Comment": "v21.3.0-1-g274d2b51f", + "Rev": "274d2b51f1342584e885445e85556bc109f528fb" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice", - "Comment": "v19.0.0-1-g80862a3e3", - "Rev": "80862a3e3882bfab661e590430558c2ba1a5f76d" + "Comment": "v21.3.0-1-g274d2b51f", + "Rev": "274d2b51f1342584e885445e85556bc109f528fb" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network", - "Comment": "v19.0.0-1-g80862a3e3", - "Rev": "80862a3e3882bfab661e590430558c2ba1a5f76d" + "Comment": "v21.3.0-1-g274d2b51f", + "Rev": "274d2b51f1342584e885445e85556bc109f528fb" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources", - "Comment": "v19.0.0-1-g80862a3e3", - "Rev": "80862a3e3882bfab661e590430558c2ba1a5f76d" + "Comment": "v21.3.0-1-g274d2b51f", + "Rev": "274d2b51f1342584e885445e85556bc109f528fb" }, { - "ImportPath": "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage", - "Comment": "v19.0.0-1-g80862a3e3", - "Rev": "80862a3e3882bfab661e590430558c2ba1a5f76d" + "ImportPath": "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage", + "Comment": "v21.3.0-1-g274d2b51f", + "Rev": "274d2b51f1342584e885445e85556bc109f528fb" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", - "Comment": "v19.0.0-1-g80862a3e3", - "Rev": "80862a3e3882bfab661e590430558c2ba1a5f76d" + "Comment": "v21.3.0-1-g274d2b51f", + "Rev": "274d2b51f1342584e885445e85556bc109f528fb" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/version", - "Comment": "v19.0.0-1-g80862a3e3", - "Rev": "80862a3e3882bfab661e590430558c2ba1a5f76d" + "Comment": "v21.3.0-1-g274d2b51f", + "Rev": "274d2b51f1342584e885445e85556bc109f528fb" }, { "ImportPath": "github.com/Azure/go-ansiterm", @@ -71,33 +71,43 @@ }, { "ImportPath": "github.com/Azure/go-autorest/autorest", - "Comment": "v10.14.0-1-g87362f1", - "Rev": "87362f106f7c6486b755ffc2cacb2903fa59f2d4" + "Comment": "v11.1.0-1-g8619900", + "Rev": "8619900403353b6fd7c55ee353c4e9725e20f904" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/adal", - "Comment": "v10.14.0-1-g87362f1", - "Rev": "87362f106f7c6486b755ffc2cacb2903fa59f2d4" + "Comment": "v11.1.0-1-g8619900", + "Rev": "8619900403353b6fd7c55ee353c4e9725e20f904" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/azure", - "Comment": "v10.14.0-1-g87362f1", - "Rev": "87362f106f7c6486b755ffc2cacb2903fa59f2d4" + "Comment": "v11.1.0-1-g8619900", + "Rev": "8619900403353b6fd7c55ee353c4e9725e20f904" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/date", - "Comment": "v10.14.0-1-g87362f1", - "Rev": "87362f106f7c6486b755ffc2cacb2903fa59f2d4" + "Comment": "v11.1.0-1-g8619900", + "Rev": "8619900403353b6fd7c55ee353c4e9725e20f904" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/to", - "Comment": "v10.14.0-1-g87362f1", - "Rev": "87362f106f7c6486b755ffc2cacb2903fa59f2d4" + "Comment": "v11.1.0-1-g8619900", + "Rev": "8619900403353b6fd7c55ee353c4e9725e20f904" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/validation", - "Comment": "v10.14.0-1-g87362f1", - "Rev": "87362f106f7c6486b755ffc2cacb2903fa59f2d4" + "Comment": "v11.1.0-1-g8619900", + "Rev": "8619900403353b6fd7c55ee353c4e9725e20f904" + }, + { + "ImportPath": "github.com/Azure/go-autorest/logger", + "Comment": "v11.1.0-1-g8619900", + "Rev": "8619900403353b6fd7c55ee353c4e9725e20f904" + }, + { + "ImportPath": "github.com/Azure/go-autorest/version", + "Comment": "v11.1.0-1-g8619900", + "Rev": "8619900403353b6fd7c55ee353c4e9725e20f904" }, { "ImportPath": "github.com/JeffAshton/win_pdh", @@ -130,184 +140,209 @@ "ImportPath": "github.com/PuerkitoBio/urlesc", "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" }, + { + "ImportPath": "github.com/Rican7/retry", + "Comment": "v0.1.0-9-g272ad12", + "Rev": "272ad122d6e5ce1be757544007cf8bcd1c9c9ab0" + }, + { + "ImportPath": "github.com/Rican7/retry/backoff", + "Comment": "v0.1.0-9-g272ad12", + "Rev": "272ad122d6e5ce1be757544007cf8bcd1c9c9ab0" + }, + { + "ImportPath": "github.com/Rican7/retry/jitter", + "Comment": "v0.1.0-9-g272ad12", + "Rev": "272ad122d6e5ce1be757544007cf8bcd1c9c9ab0" + }, + { + "ImportPath": "github.com/Rican7/retry/strategy", + "Comment": "v0.1.0-9-g272ad12", + "Rev": "272ad122d6e5ce1be757544007cf8bcd1c9c9ab0" + }, { "ImportPath": "github.com/armon/circbuf", "Rev": "bbbad097214e2918d8543d5201d12bfd7bca254d" }, + { + "ImportPath": "github.com/asaskevich/govalidator", + "Comment": "v9-26-gf9ffefc", + "Rev": "f9ffefc3facfbe0caee3fea233cbb6e8208f4541" + }, { "ImportPath": "github.com/aws/aws-sdk-go/aws", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/awserr", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/client", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/csm", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/defaults", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/endpoints", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/request", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/session", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/internal/sdkio", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/internal/sdkrand", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/internal/shareddefaults", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/autoscaling", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/ec2", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/ecr", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/elb", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/elbv2", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/kms", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/sts", - "Comment": "v1.14.12-2-g2a1555fa", - "Rev": "2a1555faedc4907a34b57173c9125f626e38071e" + "Comment": "v1.14.12", + "Rev": "fde4ded7becdeae4d26bf1212916aabba79349b4" }, { "ImportPath": "github.com/beorn7/perks/quantile", @@ -331,9 +366,9 @@ "Rev": "20e2ce2cf8852dc78bd42b76698dcd8dcd77b7b1" }, { - "ImportPath": "github.com/container-storage-interface/spec/lib/go/csi/v0", - "Comment": "v0.3.0", - "Rev": "2178fdeea87f1150a17a63252eee28d4d8141f72" + "ImportPath": "github.com/container-storage-interface/spec/lib/go/csi", + "Comment": "v1.0.0", + "Rev": "ed0bb0e1557548aa028307f48728767cfe8f6345" }, { "ImportPath": "github.com/containerd/console", @@ -341,137 +376,108 @@ }, { "ImportPath": "github.com/containerd/containerd/api/services/containers/v1", - "Comment": "v1.0.2-1-gede969be", - "Rev": "ede969bef9fc97dda519df18e684fd7d947789b1" + "Comment": "v1.0.2", + "Rev": "cfd04396dc68220d1cecbe686a6cc3aa5ce3667c" }, { "ImportPath": "github.com/containerd/containerd/api/services/tasks/v1", - "Comment": "v1.0.2-1-gede969be", - "Rev": "ede969bef9fc97dda519df18e684fd7d947789b1" + "Comment": "v1.0.2", + "Rev": "cfd04396dc68220d1cecbe686a6cc3aa5ce3667c" }, { "ImportPath": "github.com/containerd/containerd/api/services/version/v1", - "Comment": "v1.0.2-1-gede969be", - "Rev": "ede969bef9fc97dda519df18e684fd7d947789b1" + "Comment": "v1.0.2", + "Rev": "cfd04396dc68220d1cecbe686a6cc3aa5ce3667c" }, { "ImportPath": "github.com/containerd/containerd/api/types", - "Comment": "v1.0.2-1-gede969be", - "Rev": "ede969bef9fc97dda519df18e684fd7d947789b1" + "Comment": "v1.0.2", + "Rev": "cfd04396dc68220d1cecbe686a6cc3aa5ce3667c" }, { "ImportPath": "github.com/containerd/containerd/api/types/task", - "Comment": "v1.0.2-1-gede969be", - "Rev": "ede969bef9fc97dda519df18e684fd7d947789b1" + "Comment": "v1.0.2", + "Rev": "cfd04396dc68220d1cecbe686a6cc3aa5ce3667c" }, { "ImportPath": "github.com/containerd/containerd/containers", - "Comment": "v1.0.2-1-gede969be", - "Rev": "ede969bef9fc97dda519df18e684fd7d947789b1" + "Comment": "v1.0.2", + "Rev": "cfd04396dc68220d1cecbe686a6cc3aa5ce3667c" }, { "ImportPath": "github.com/containerd/containerd/dialer", - "Comment": "v1.0.2-1-gede969be", - "Rev": "ede969bef9fc97dda519df18e684fd7d947789b1" + "Comment": "v1.0.2", + "Rev": "cfd04396dc68220d1cecbe686a6cc3aa5ce3667c" }, { "ImportPath": "github.com/containerd/containerd/errdefs", - "Comment": "v1.0.2-1-gede969be", - "Rev": "ede969bef9fc97dda519df18e684fd7d947789b1" + "Comment": "v1.0.2", + "Rev": "cfd04396dc68220d1cecbe686a6cc3aa5ce3667c" }, { "ImportPath": "github.com/containerd/containerd/namespaces", - "Comment": "v1.0.2-1-gede969be", - "Rev": "ede969bef9fc97dda519df18e684fd7d947789b1" + "Comment": "v1.0.2", + "Rev": "cfd04396dc68220d1cecbe686a6cc3aa5ce3667c" }, { "ImportPath": "github.com/containernetworking/cni/libcni", - "Comment": "v0.6.0-1-gc013194", - "Rev": "c013194b7c6ecbd757dba10622011b618e025243" + "Comment": "v0.6.0", + "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containernetworking/cni/pkg/invoke", - "Comment": "v0.6.0-1-gc013194", - "Rev": "c013194b7c6ecbd757dba10622011b618e025243" + "Comment": "v0.6.0", + "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containernetworking/cni/pkg/types", - "Comment": "v0.6.0-1-gc013194", - "Rev": "c013194b7c6ecbd757dba10622011b618e025243" + "Comment": "v0.6.0", + "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containernetworking/cni/pkg/types/020", - "Comment": "v0.6.0-1-gc013194", - "Rev": "c013194b7c6ecbd757dba10622011b618e025243" + "Comment": "v0.6.0", + "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containernetworking/cni/pkg/types/current", - "Comment": "v0.6.0-1-gc013194", - "Rev": "c013194b7c6ecbd757dba10622011b618e025243" + "Comment": "v0.6.0", + "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containernetworking/cni/pkg/version", - "Comment": "v0.6.0-1-gc013194", - "Rev": "c013194b7c6ecbd757dba10622011b618e025243" - }, - { - "ImportPath": "github.com/coreos/etcd/client", - "Comment": "v3.2.24-1-gab64899ec", - "Rev": "ab64899ecaa3ce70942c565b173d27ffb6ff159b" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Comment": "v3.2.24-1-gab64899ec", - "Rev": "ab64899ecaa3ce70942c565b173d27ffb6ff159b" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/srv", - "Comment": "v3.2.24-1-gab64899ec", - "Rev": "ab64899ecaa3ce70942c565b173d27ffb6ff159b" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/types", - "Comment": "v3.2.24-1-gab64899ec", - "Rev": "ab64899ecaa3ce70942c565b173d27ffb6ff159b" - }, - { - "ImportPath": "github.com/coreos/etcd/version", - "Comment": "v3.2.24-1-gab64899ec", - "Rev": "ab64899ecaa3ce70942c565b173d27ffb6ff159b" - }, - { - "ImportPath": "github.com/coreos/go-semver/semver", - "Rev": "568e959cd89871e61434c1143528d9162da89ef2" + "Comment": "v0.6.0", + "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/coreos/go-systemd/daemon", - "Comment": "v14", - "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" + "Comment": "v17", + "Rev": "39ca1b05acc7ad1220e09f133283b8859a8b71ab" }, { "ImportPath": "github.com/coreos/go-systemd/dbus", - "Comment": "v14", - "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" + "Comment": "v17", + "Rev": "39ca1b05acc7ad1220e09f133283b8859a8b71ab" }, { "ImportPath": "github.com/coreos/go-systemd/util", - "Comment": "v14", - "Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6" + "Comment": "v17", + "Rev": "39ca1b05acc7ad1220e09f133283b8859a8b71ab" }, { "ImportPath": "github.com/coreos/pkg/dlopen", - "Comment": "v2-8-gfa29b1d", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" + "Comment": "v4", + "Rev": "97fdf19511ea361ae1c100dd393cc47f8dcfa1e1" }, { "ImportPath": "github.com/coreos/rkt/api/v1alpha", - "Comment": "v1.25.0-1-gcf595d9d", - "Rev": "cf595d9d015f3c0ca15a85b94dc19c5ca557e149" + "Comment": "v1.25.0", + "Rev": "ec37f3cb649bfb72408906e7cbf330e4aeda1075" }, { "ImportPath": "github.com/cyphar/filepath-securejoin", - "Comment": "v0.2.1-2-g47da00b", - "Rev": "47da00bc449028174d656b85a8f5736326fdf0a3" + "Comment": "v0.2.1-1-gae69057", + "Rev": "ae69057f2299fb9e5ba2df738607e6a505b74ab6" }, { "ImportPath": "github.com/d2g/dhcp4", @@ -493,143 +499,143 @@ }, { "ImportPath": "github.com/docker/distribution/digestset", - "Comment": "v2.6.0-rc.1-210-ge0850c9b", - "Rev": "e0850c9bcf3e450bdcd23c05efd16b42d4499531" + "Comment": "v2.6.0-rc.1-209-gedc3ab29", + "Rev": "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" }, { "ImportPath": "github.com/docker/distribution/reference", - "Comment": "v2.6.0-rc.1-210-ge0850c9b", - "Rev": "e0850c9bcf3e450bdcd23c05efd16b42d4499531" + "Comment": "v2.6.0-rc.1-209-gedc3ab29", + "Rev": "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" }, { "ImportPath": "github.com/docker/docker/api", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/blkiodev", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/container", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/events", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/filters", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/image", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/mount", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/network", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/registry", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/strslice", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/swarm", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/swarm/runtime", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/time", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/versions", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/volume", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/client", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/jsonmessage", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/mount", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/parsers", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/parsers/operatingsystem", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/stdcopy", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/sysinfo", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/term", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/term/windows", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9512-ga09cf25b07", - "Rev": "a09cf25b07e8462be55217778c7d2cfe694d7ad7" + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/go-connections/nat", @@ -653,8 +659,8 @@ }, { "ImportPath": "github.com/docker/libnetwork/ipvs", - "Comment": "v0.8.0-dev.2-911-g7bd74a48", - "Rev": "7bd74a48e771f28cb47e20f1e6021c089e0b4284" + "Comment": "v0.8.0-dev.2-1265-ga9cd636e", + "Rev": "a9cd636e37898226332c439363e2ed0ea185ae92" }, { "ImportPath": "github.com/docker/spdystream", @@ -690,8 +696,8 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Comment": "v3.0.0-23-g94e38aa", - "Rev": "94e38aa1586e8a6c8a75770bddf5ff84c48a106b" + "Comment": "v4.0.0-3-g36442db", + "Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4" }, { "ImportPath": "github.com/fatih/camelcase", @@ -704,7 +710,8 @@ }, { "ImportPath": "github.com/ghodss/yaml", - "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" + "Comment": "v1.0.0-4-gc7ce166", + "Rev": "c7ce16629ff4cd059ed96ed06419dd3856fd3577" }, { "ImportPath": "github.com/go-ini/ini", @@ -713,19 +720,33 @@ }, { "ImportPath": "github.com/go-openapi/jsonpointer", - "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" + "Comment": "v0.18.0", + "Rev": "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004" }, { "ImportPath": "github.com/go-openapi/jsonreference", - "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" + "Comment": "v0.17.2", + "Rev": "8483a886a90412cd6858df4ea3483dce9c8e35a3" }, { "ImportPath": "github.com/go-openapi/spec", - "Rev": "1de3e0542de65ad8d75452a595886fdd0befb363" + "Comment": "v0.17.2", + "Rev": "5bae59e25b21498baea7f9d46e9c147ec106a42e" }, { "ImportPath": "github.com/go-openapi/swag", - "Rev": "f3f9494671f93fcff853e3c6e9e948b3eb71e590" + "Comment": "v0.17.2", + "Rev": "5899d5c5e619fda5fa86e14795a835f473ca284c" + }, + { + "ImportPath": "github.com/go-ozzo/ozzo-validation", + "Comment": "v3.5.0", + "Rev": "106681dbb37bfa3e7683c4c8129cb7f5925ea3e9" + }, + { + "ImportPath": "github.com/go-ozzo/ozzo-validation/is", + "Comment": "v3.5.0", + "Rev": "106681dbb37bfa3e7683c4c8129cb7f5925ea3e9" }, { "ImportPath": "github.com/godbus/dbus", @@ -734,32 +755,28 @@ }, { "ImportPath": "github.com/gogo/protobuf/gogoproto", - "Comment": "v0.4-3-gc0656edd", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Comment": "v0.5", + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/proto", - "Comment": "v0.4-3-gc0656edd", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Comment": "v0.5", + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor", - "Comment": "v0.4-3-gc0656edd", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Comment": "v0.5", + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/sortkeys", - "Comment": "v0.4-3-gc0656edd", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + "Comment": "v0.5", + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/gogo/protobuf/types", - "Comment": "v0.4-3-gc0656edd", - "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" - }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" + "Comment": "v0.5", + "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, { "ImportPath": "github.com/golang/groupcache/lru", @@ -774,6 +791,11 @@ "Comment": "v1.1.0", "Rev": "b4deda0973fb4c70b50d226b1af49f3da59f5265" }, + { + "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", + "Comment": "v1.1.0", + "Rev": "b4deda0973fb4c70b50d226b1af49f3da59f5265" + }, { "ImportPath": "github.com/golang/protobuf/ptypes", "Comment": "v1.1.0", @@ -805,178 +827,183 @@ }, { "ImportPath": "github.com/google/cadvisor/accelerators", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/cache/memory", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/collector", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/container", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/container/common", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/container/containerd", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/container/crio", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/container/docker", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/container/libcontainer", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" + }, + { + "ImportPath": "github.com/google/cadvisor/container/mesos", + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/container/raw", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/container/rkt", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/container/systemd", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/devicemapper", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/events", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/fs", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/info/v2", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/machine", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/manager", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/raw", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/rkt", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/metrics", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/storage", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/summary", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/utils", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/utils/cloudinfo", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload/netlink", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/utils/docker", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/utils/oomparser", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/utils/sysfs", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/utils/sysinfo", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/version", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/cadvisor/zfs", - "Comment": "v0.30.0-13-ge2818eda", - "Rev": "e2818eda1e39a377915812760c4d8a98022464d2" + "Comment": "v0.32.0", + "Rev": "8949c822ea91fa6b4996614a5ad6ade840be24ee" }, { "ImportPath": "github.com/google/gofuzz", @@ -1121,7 +1148,8 @@ }, { "ImportPath": "github.com/gorilla/websocket", - "Rev": "6eb6ad425a89d9da7a5549bc6da8f79ba5c17844" + "Comment": "v1.2.0-9-g4201258", + "Rev": "4201258b820c74ac8e6922fc9e6b52f71fe46f8d" }, { "ImportPath": "github.com/gregjones/httpcache", @@ -1141,18 +1169,18 @@ }, { "ImportPath": "github.com/heketi/heketi/client/api/go-client", - "Comment": "v4.0.0-95-gaaf40619", - "Rev": "aaf40619d85fda757e7a1c1ea1b5118cea65594b" + "Comment": "v8.0.0-49-g558b292", + "Rev": "558b29266ce0a873991ecfb3edc41a668a998514" }, { "ImportPath": "github.com/heketi/heketi/pkg/glusterfs/api", - "Comment": "v4.0.0-95-gaaf40619", - "Rev": "aaf40619d85fda757e7a1c1ea1b5118cea65594b" + "Comment": "v8.0.0-49-g558b292", + "Rev": "558b29266ce0a873991ecfb3edc41a668a998514" }, { "ImportPath": "github.com/heketi/heketi/pkg/utils", - "Comment": "v4.0.0-95-gaaf40619", - "Rev": "aaf40619d85fda757e7a1c1ea1b5118cea65594b" + "Comment": "v8.0.0-49-g558b292", + "Rev": "558b29266ce0a873991ecfb3edc41a668a998514" }, { "ImportPath": "github.com/imdario/mergo", @@ -1171,49 +1199,49 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Comment": "1.1.3-22-gf2b4162", - "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" + "Comment": "1.1.4", + "Rev": "ab8a2e0c74be9d3be70b3184d9acc634935ded82" }, { "ImportPath": "github.com/kardianos/osext", "Rev": "8fef92e41e22a70e700a96b29f066cda30ea24ef" }, + { + "ImportPath": "github.com/karrick/godirwalk", + "Comment": "v1.7.5", + "Rev": "2de2192f9e35ce981c152a873ed943b93b79ced4" + }, { "ImportPath": "github.com/kr/fs", "Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b" }, { "ImportPath": "github.com/libopenstorage/openstorage/api", - "Rev": "8e1ec19f5d60f5752cd0243b093fb6187798a369" + "Rev": "093a0c3888753c2056e7373183693d670c6bba01" }, { "ImportPath": "github.com/libopenstorage/openstorage/api/client", - "Rev": "8e1ec19f5d60f5752cd0243b093fb6187798a369" + "Rev": "093a0c3888753c2056e7373183693d670c6bba01" }, { "ImportPath": "github.com/libopenstorage/openstorage/api/client/volume", - "Rev": "8e1ec19f5d60f5752cd0243b093fb6187798a369" + "Rev": "093a0c3888753c2056e7373183693d670c6bba01" }, { "ImportPath": "github.com/libopenstorage/openstorage/api/spec", - "Rev": "8e1ec19f5d60f5752cd0243b093fb6187798a369" + "Rev": "093a0c3888753c2056e7373183693d670c6bba01" }, { "ImportPath": "github.com/libopenstorage/openstorage/pkg/parser", - "Rev": "8e1ec19f5d60f5752cd0243b093fb6187798a369" + "Rev": "093a0c3888753c2056e7373183693d670c6bba01" }, { "ImportPath": "github.com/libopenstorage/openstorage/pkg/units", - "Rev": "8e1ec19f5d60f5752cd0243b093fb6187798a369" + "Rev": "093a0c3888753c2056e7373183693d670c6bba01" }, { "ImportPath": "github.com/libopenstorage/openstorage/volume", - "Rev": "8e1ec19f5d60f5752cd0243b093fb6187798a369" - }, - { - "ImportPath": "github.com/lpabon/godbc", - "Comment": "v1.0-1-g9577782", - "Rev": "9577782540c1398b710ddae1b86268ba03a19b0c" + "Rev": "093a0c3888753c2056e7373183693d670c6bba01" }, { "ImportPath": "github.com/mailru/easyjson/buffer", @@ -1242,6 +1270,76 @@ "Comment": "v1.0.1", "Rev": "c12348ce28de40eed0136aa2b644d0ee0650e56c" }, + { + "ImportPath": "github.com/mesos/mesos-go/api/v1/lib", + "Comment": "mesos-1.6.x-12-gff8175b", + "Rev": "ff8175bfda54b1eb1f1954c8102a9dc612aa2312" + }, + { + "ImportPath": "github.com/mesos/mesos-go/api/v1/lib/agent", + "Comment": "mesos-1.6.x-12-gff8175b", + "Rev": "ff8175bfda54b1eb1f1954c8102a9dc612aa2312" + }, + { + "ImportPath": "github.com/mesos/mesos-go/api/v1/lib/agent/calls", + "Comment": "mesos-1.6.x-12-gff8175b", + "Rev": "ff8175bfda54b1eb1f1954c8102a9dc612aa2312" + }, + { + "ImportPath": "github.com/mesos/mesos-go/api/v1/lib/client", + "Comment": "mesos-1.6.x-12-gff8175b", + "Rev": "ff8175bfda54b1eb1f1954c8102a9dc612aa2312" + }, + { + "ImportPath": "github.com/mesos/mesos-go/api/v1/lib/debug", + "Comment": "mesos-1.6.x-12-gff8175b", + "Rev": "ff8175bfda54b1eb1f1954c8102a9dc612aa2312" + }, + { + "ImportPath": "github.com/mesos/mesos-go/api/v1/lib/encoding", + "Comment": "mesos-1.6.x-12-gff8175b", + "Rev": "ff8175bfda54b1eb1f1954c8102a9dc612aa2312" + }, + { + "ImportPath": "github.com/mesos/mesos-go/api/v1/lib/encoding/codecs", + "Comment": "mesos-1.6.x-12-gff8175b", + "Rev": "ff8175bfda54b1eb1f1954c8102a9dc612aa2312" + }, + { + "ImportPath": "github.com/mesos/mesos-go/api/v1/lib/encoding/framing", + "Comment": "mesos-1.6.x-12-gff8175b", + "Rev": "ff8175bfda54b1eb1f1954c8102a9dc612aa2312" + }, + { + "ImportPath": "github.com/mesos/mesos-go/api/v1/lib/encoding/json", + "Comment": "mesos-1.6.x-12-gff8175b", + "Rev": "ff8175bfda54b1eb1f1954c8102a9dc612aa2312" + }, + { + "ImportPath": "github.com/mesos/mesos-go/api/v1/lib/encoding/proto", + "Comment": "mesos-1.6.x-12-gff8175b", + "Rev": "ff8175bfda54b1eb1f1954c8102a9dc612aa2312" + }, + { + "ImportPath": "github.com/mesos/mesos-go/api/v1/lib/httpcli", + "Comment": "mesos-1.6.x-12-gff8175b", + "Rev": "ff8175bfda54b1eb1f1954c8102a9dc612aa2312" + }, + { + "ImportPath": "github.com/mesos/mesos-go/api/v1/lib/httpcli/apierrors", + "Comment": "mesos-1.6.x-12-gff8175b", + "Rev": "ff8175bfda54b1eb1f1954c8102a9dc612aa2312" + }, + { + "ImportPath": "github.com/mesos/mesos-go/api/v1/lib/recordio", + "Comment": "mesos-1.6.x-12-gff8175b", + "Rev": "ff8175bfda54b1eb1f1954c8102a9dc612aa2312" + }, + { + "ImportPath": "github.com/mesos/mesos-go/api/v1/lib/roles", + "Comment": "mesos-1.6.x-12-gff8175b", + "Rev": "ff8175bfda54b1eb1f1954c8102a9dc612aa2312" + }, { "ImportPath": "github.com/miekg/dns", "Rev": "5d001d020961ae1c184f9f8152fdc73810481677" @@ -1266,8 +1364,8 @@ }, { "ImportPath": "github.com/modern-go/reflect2", - "Comment": "1.0.0-9-g05fbef0", - "Rev": "05fbef0ca5da472bbf96c9322b84a53edc03c9fd" + "Comment": "v1.0.1", + "Rev": "94122c33edd36123c84d5368cfb2b69df93a0ec8" }, { "ImportPath": "github.com/mohae/deepcopy", @@ -1297,83 +1395,83 @@ }, { "ImportPath": "github.com/opencontainers/runc/libcontainer", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/apparmor", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs/validate", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/criurpc", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/intelrdt", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/keys", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/mount", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/seccomp", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/stacktrace", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/system", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/user", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/utils", - "Comment": "v1.0.0-rc5-47-g7619009a", - "Rev": "7619009aad53f9201995c53b447f48a890f39a3b" + "Comment": "v1.0.0-rc5-46-g871ba2e5", + "Rev": "871ba2e58e24314d1fab4517a80410191ba5ad01" }, { "ImportPath": "github.com/opencontainers/runtime-spec/specs-go", @@ -1390,101 +1488,6 @@ "Comment": "v1.0.0-rc1-5-g4a2974b", "Rev": "4a2974bf1ee960774ffd517717f1f45325af0206" }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/apis/cluster/common", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/apis/machine/common", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/fake", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, - { - "ImportPath": "github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1", - "Comment": "0.0.0-alpha.4-56-g628950e96", - "Rev": "628950e96f5946bf89c683102ec916bbbe5569c9" - }, { "ImportPath": "github.com/pborman/uuid", "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" @@ -1507,6 +1510,14 @@ "ImportPath": "github.com/pmezard/go-difflib/difflib", "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" }, + { + "ImportPath": "github.com/pquerna/ffjson/fflib/v1", + "Rev": "af8b230fcd2007c7095168ca8ab94c68b60840c6" + }, + { + "ImportPath": "github.com/pquerna/ffjson/fflib/v1/internal", + "Rev": "af8b230fcd2007c7095168ca8ab94c68b60840c6" + }, { "ImportPath": "github.com/prometheus/client_golang/prometheus", "Comment": "v0.8.0-83-ge7e9030", @@ -1549,8 +1560,8 @@ }, { "ImportPath": "github.com/rancher/go-rancher/client", - "Comment": "v0.1.0-197-ge8d5f57", - "Rev": "e8d5f577f9a7c54fed90c37ebb061e284730946a" + "Comment": "v0.1.0-196-g09693a8", + "Rev": "09693a8743ba5ee58c58c1b1e8a4abd17af00d45" }, { "ImportPath": "github.com/renstrom/dedent", @@ -1559,7 +1570,7 @@ }, { "ImportPath": "github.com/rubiojr/go-vhd/vhd", - "Rev": "002180ac9aa8962eea39c7a9130576ab00a92b1d" + "Rev": "0bfd3b39853cdde5762efda92289f14b0ac0491b" }, { "ImportPath": "github.com/satori/go.uuid", @@ -1570,6 +1581,10 @@ "ImportPath": "github.com/seccomp/libseccomp-golang", "Rev": "1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1" }, + { + "ImportPath": "github.com/sigma/go-inotify", + "Rev": "c87b6cf5033d2c6486046f045eeebdc3d910fd38" + }, { "ImportPath": "github.com/sirupsen/logrus", "Comment": "v1.0.3-11-g89742ae", @@ -1623,29 +1638,25 @@ }, { "ImportPath": "github.com/stretchr/testify/assert", - "Comment": "v1.2.1-15-g083d1d2", - "Rev": "083d1d25509c9756783fbb3ca0d7092c4a7d79c0" + "Comment": "v1.2.1-14-gc679ae2", + "Rev": "c679ae2cc0cb27ec3293fea7e254e47386f05d69" }, { "ImportPath": "github.com/stretchr/testify/mock", - "Comment": "v1.2.1-15-g083d1d2", - "Rev": "083d1d25509c9756783fbb3ca0d7092c4a7d79c0" + "Comment": "v1.2.1-14-gc679ae2", + "Rev": "c679ae2cc0cb27ec3293fea7e254e47386f05d69" }, { "ImportPath": "github.com/syndtr/gocapability/capability", "Rev": "e7cb7fa329f456b3855136a2642b197bad7366ba" }, - { - "ImportPath": "github.com/ugorji/go/codec", - "Rev": "ded73eae5db7e7a0ef6f55aace87a2873c5d2b74" - }, { "ImportPath": "github.com/vishvananda/netlink", - "Rev": "f67b75edbf5e3bb7dfe70bb788610693a71be3d1" + "Rev": "b2de5d10e38ecce8607e6b438b6d174f389a004e" }, { "ImportPath": "github.com/vishvananda/netlink/nl", - "Rev": "f67b75edbf5e3bb7dfe70bb788610693a71be3d1" + "Rev": "b2de5d10e38ecce8607e6b438b6d174f389a004e" }, { "ImportPath": "github.com/vishvananda/netns", @@ -1653,133 +1664,133 @@ }, { "ImportPath": "github.com/vmware/govmomi/find", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/list", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/lookup", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/lookup/methods", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/lookup/types", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/nfc", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/object", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/pbm", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/pbm/methods", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/pbm/types", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/property", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/session", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/sts", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/sts/internal", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/task", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/vapi/internal", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/vapi/rest", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/vapi/tags", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/vim25", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/vim25/debug", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/vim25/methods", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/vim25/mo", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/vim25/progress", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/vim25/soap", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/vim25/types", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/govmomi/vim25/xml", - "Comment": "v0.18.0-50-g8919665", - "Rev": "89196653b33d8884fc3a394d367ad1b341d18571" + "Comment": "v0.18.0-48-g22f7465", + "Rev": "22f74650cf39ba4649fba45e770df0f44df6f758" }, { "ImportPath": "github.com/vmware/photon-controller-go-sdk/SSPI", @@ -1841,57 +1852,53 @@ "ImportPath": "golang.org/x/crypto/ssh/terminal", "Rev": "de0752318171da717af4ce24d0a2e8626afaeb11" }, - { - "ImportPath": "golang.org/x/exp/inotify", - "Rev": "9e18480f546346da26e76a769954c16bba8e1605" - }, { "ImportPath": "golang.org/x/net/context", - "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" + "Rev": "0ed95abb35c445290478a5348a7b38bb154135fd" }, { "ImportPath": "golang.org/x/net/context/ctxhttp", - "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" + "Rev": "0ed95abb35c445290478a5348a7b38bb154135fd" }, { "ImportPath": "golang.org/x/net/html", - "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" + "Rev": "0ed95abb35c445290478a5348a7b38bb154135fd" }, { "ImportPath": "golang.org/x/net/html/atom", - "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" + "Rev": "0ed95abb35c445290478a5348a7b38bb154135fd" }, { "ImportPath": "golang.org/x/net/http2", - "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" + "Rev": "0ed95abb35c445290478a5348a7b38bb154135fd" }, { "ImportPath": "golang.org/x/net/http2/hpack", - "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" + "Rev": "0ed95abb35c445290478a5348a7b38bb154135fd" }, { "ImportPath": "golang.org/x/net/idna", - "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" + "Rev": "0ed95abb35c445290478a5348a7b38bb154135fd" }, { "ImportPath": "golang.org/x/net/internal/timeseries", - "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" + "Rev": "0ed95abb35c445290478a5348a7b38bb154135fd" }, { "ImportPath": "golang.org/x/net/lex/httplex", - "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" + "Rev": "0ed95abb35c445290478a5348a7b38bb154135fd" }, { "ImportPath": "golang.org/x/net/proxy", - "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" + "Rev": "0ed95abb35c445290478a5348a7b38bb154135fd" }, { "ImportPath": "golang.org/x/net/trace", - "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" + "Rev": "0ed95abb35c445290478a5348a7b38bb154135fd" }, { "ImportPath": "golang.org/x/net/websocket", - "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" + "Rev": "0ed95abb35c445290478a5348a7b38bb154135fd" }, { "ImportPath": "golang.org/x/oauth2", @@ -1999,39 +2006,39 @@ }, { "ImportPath": "google.golang.org/api/compute/v0.alpha", - "Rev": "1a9304e34f296395f1e15189db2a9f62c54f4b69" + "Rev": "90af239a3cd31991862262cace75332d7df7ae48" }, { "ImportPath": "google.golang.org/api/compute/v0.beta", - "Rev": "1a9304e34f296395f1e15189db2a9f62c54f4b69" + "Rev": "90af239a3cd31991862262cace75332d7df7ae48" }, { "ImportPath": "google.golang.org/api/compute/v1", - "Rev": "1a9304e34f296395f1e15189db2a9f62c54f4b69" + "Rev": "90af239a3cd31991862262cace75332d7df7ae48" }, { "ImportPath": "google.golang.org/api/container/v1", - "Rev": "1a9304e34f296395f1e15189db2a9f62c54f4b69" + "Rev": "90af239a3cd31991862262cace75332d7df7ae48" }, { "ImportPath": "google.golang.org/api/container/v1beta1", - "Rev": "1a9304e34f296395f1e15189db2a9f62c54f4b69" + "Rev": "90af239a3cd31991862262cace75332d7df7ae48" }, { "ImportPath": "google.golang.org/api/gensupport", - "Rev": "1a9304e34f296395f1e15189db2a9f62c54f4b69" + "Rev": "90af239a3cd31991862262cace75332d7df7ae48" }, { "ImportPath": "google.golang.org/api/googleapi", - "Rev": "1a9304e34f296395f1e15189db2a9f62c54f4b69" + "Rev": "90af239a3cd31991862262cace75332d7df7ae48" }, { "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", - "Rev": "1a9304e34f296395f1e15189db2a9f62c54f4b69" + "Rev": "90af239a3cd31991862262cace75332d7df7ae48" }, { "ImportPath": "google.golang.org/api/tpu/v1", - "Rev": "1a9304e34f296395f1e15189db2a9f62c54f4b69" + "Rev": "90af239a3cd31991862262cace75332d7df7ae48" }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", @@ -2039,88 +2046,128 @@ }, { "ImportPath": "google.golang.org/grpc", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "google.golang.org/grpc/balancer", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" + }, + { + "ImportPath": "google.golang.org/grpc/balancer/base", + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" + }, + { + "ImportPath": "google.golang.org/grpc/balancer/roundrobin", + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "google.golang.org/grpc/codes", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "google.golang.org/grpc/connectivity", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { - "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "ImportPath": "google.golang.org/grpc/encoding", + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" + }, + { + "ImportPath": "google.golang.org/grpc/encoding/proto", + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "google.golang.org/grpc/internal", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" + }, + { + "ImportPath": "google.golang.org/grpc/internal/backoff", + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" + }, + { + "ImportPath": "google.golang.org/grpc/internal/channelz", + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" + }, + { + "ImportPath": "google.golang.org/grpc/internal/grpcrand", + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "google.golang.org/grpc/naming", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "google.golang.org/grpc/peer", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "google.golang.org/grpc/resolver", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" + }, + { + "ImportPath": "google.golang.org/grpc/resolver/dns", + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" + }, + { + "ImportPath": "google.golang.org/grpc/resolver/passthrough", + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "google.golang.org/grpc/stats", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "google.golang.org/grpc/status", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "google.golang.org/grpc/tap", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "google.golang.org/grpc/transport", - "Comment": "v1.7.5", - "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + "Comment": "v1.13.0", + "Rev": "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" }, { "ImportPath": "gopkg.in/gcfg.v1", @@ -2174,3319 +2221,3396 @@ }, { "ImportPath": "gopkg.in/yaml.v2", - "Rev": "670d4cfef0544295bc27a114dbac37980d83185a" + "Comment": "v2.2.1", + "Rev": "5420a8b6744d3b0345ab293f6fcba19c978f1183" }, { "ImportPath": "k8s.io/api/admission/v1beta1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/admissionregistration/v1alpha1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/admissionregistration/v1beta1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/apps/v1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/apps/v1beta1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/apps/v1beta2", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" + }, + { + "ImportPath": "k8s.io/api/auditregistration/v1alpha1", + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/authentication/v1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/authentication/v1beta1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/authorization/v1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/authorization/v1beta1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/autoscaling/v1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/autoscaling/v2beta1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/autoscaling/v2beta2", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/batch/v1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/batch/v1beta1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/batch/v2alpha1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/certificates/v1beta1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/coordination/v1beta1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/core/v1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/events/v1beta1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/extensions/v1beta1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/imagepolicy/v1alpha1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/networking/v1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/policy/v1beta1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/rbac/v1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/rbac/v1alpha1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/rbac/v1beta1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/scheduling/v1alpha1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/scheduling/v1beta1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/settings/v1alpha1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/storage/v1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/storage/v1alpha1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/api/storage/v1beta1", - "Rev": "5d4d6f5eb46925a17a7411adad1148739b126925" + "Rev": "3fdb474e1ac5a3cdd0a6c5d40c708a4a93916853" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/features", - "Rev": "1d3714e44bcd2cd4482b06496523f6051ca1def2" + "Rev": "d4e06f92344e3d9fe1dbcf7d068a726f5f4926b9" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/equality", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/errors", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/meta", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/resource", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/validation", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/validation/path", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/config", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/config/v1alpha1", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/internalversion", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/validation", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1beta1", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/conversion", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/conversion/queryparams", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/fields", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/labels", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/schema", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/json", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/protobuf", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/recognizer", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/streaming", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/versioning", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/selection", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/types", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/cache", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/clock", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/diff", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/duration", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/errors", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/framer", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/httpstream", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/httpstream/spdy", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/intstr", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/json", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/mergepatch", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/naming", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/net", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/proxy", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/rand", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/remotecommand", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/runtime", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/sets", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/strategicpatch", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/uuid", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/validation", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/validation/field", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" + }, + { + "ImportPath": "k8s.io/apimachinery/pkg/util/version", + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/wait", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/yaml", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/version", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/pkg/watch", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/json", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/netutil", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/reflect", - "Rev": "71f5c0df86aa210c9d58de513c88bdc2207e4132" + "Rev": "f0ee137a8dfdefe049204ac948a7a4a9765d8060" }, { "ImportPath": "k8s.io/apiserver/pkg/admission", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/apiserver", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/audit", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/audit/v1", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/audit/v1alpha1", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/audit/v1beta1", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/config", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/audit", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/authenticator", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/authenticatorfactory", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/group", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/request/anonymous", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/request/bearertoken", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/request/headerrequest", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/request/union", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/request/websocket", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/request/x509", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/serviceaccount", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" + }, + { + "ImportPath": "k8s.io/apiserver/pkg/authentication/token/cache", + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/token/tokenfile", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/user", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/authorization/authorizer", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/authorization/authorizerfactory", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/endpoints/handlers/negotiation", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/endpoints/metrics", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/endpoints/request", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/features", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/registry/rest", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/server/healthz", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/server/httplog", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/server/mux", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/server/routes", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/server/routes/data/swagger", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/storage", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/storage/etcd", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/storage/etcd/metrics", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" - }, - { - "ImportPath": "k8s.io/apiserver/pkg/storage/etcd/util", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/storage/names", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/util/feature", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/util/flag", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/util/flushwriter", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/util/logs", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/util/trace", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/util/webhook", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/pkg/util/wsstream", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/plugin/pkg/authenticator/token/webhook", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/apiserver/plugin/pkg/authorizer/webhook", - "Rev": "378c77eb524c444c0ea480087933cf3b55cc5f33" + "Rev": "3cbe8d15bf3a73aa191242fbd0c0a559fb0b1983" }, { "ImportPath": "k8s.io/cli-runtime/pkg/genericclioptions", - "Rev": "ba14ad55f1ebf43ec59bef7c7e0c4affab227233" + "Rev": "08f673b7d7c154e5ee7e7260086d557d23bf1d58" }, { "ImportPath": "k8s.io/cli-runtime/pkg/genericclioptions/printers", - "Rev": "ba14ad55f1ebf43ec59bef7c7e0c4affab227233" + "Rev": "08f673b7d7c154e5ee7e7260086d557d23bf1d58" }, { "ImportPath": "k8s.io/cli-runtime/pkg/genericclioptions/resource", - "Rev": "ba14ad55f1ebf43ec59bef7c7e0c4affab227233" + "Rev": "08f673b7d7c154e5ee7e7260086d557d23bf1d58" }, { "ImportPath": "k8s.io/client-go/discovery", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/discovery/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/dynamic", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/admissionregistration", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/admissionregistration/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/admissionregistration/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/apps", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/apps/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/apps/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/apps/v1beta2", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" + }, + { + "ImportPath": "k8s.io/client-go/informers/auditregistration", + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" + }, + { + "ImportPath": "k8s.io/client-go/informers/auditregistration/v1alpha1", + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/autoscaling", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/autoscaling/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/autoscaling/v2beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/autoscaling/v2beta2", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/batch", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/batch/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/batch/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/batch/v2alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/certificates", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/certificates/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/coordination", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/coordination/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/core", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/core/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/events", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/events/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/extensions", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/extensions/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/internalinterfaces", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/networking", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/networking/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/policy", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/policy/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/rbac", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/rbac/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/rbac/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/rbac/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/scheduling", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/scheduling/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/scheduling/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/settings", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/settings/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/storage", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/storage/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/storage/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/informers/storage/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/scheme", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta2", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1", + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake", + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/coordination/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/events/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/events/v1beta1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/networking/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/networking/v1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/admissionregistration/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/admissionregistration/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/apps/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/apps/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/apps/v1beta2", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" + }, + { + "ImportPath": "k8s.io/client-go/listers/auditregistration/v1alpha1", + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/autoscaling/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/autoscaling/v2beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/autoscaling/v2beta2", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/batch/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/batch/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/batch/v2alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/certificates/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/coordination/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/core/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/events/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/extensions/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/networking/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/policy/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/rbac/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/rbac/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/rbac/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/scheduling/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/scheduling/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/settings/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/storage/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/storage/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/listers/storage/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/pkg/apis/clientauthentication", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/pkg/version", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/plugin/pkg/client/auth/exec", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/rest", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/rest/watch", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/restmapper", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/scale", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/scale/scheme", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/scale/scheme/appsint", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/scale/scheme/appsv1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/scale/scheme/appsv1beta2", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/scale/scheme/autoscalingv1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/scale/scheme/extensionsint", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/scale/scheme/extensionsv1beta1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/testing", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/third_party/forked/golang/template", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/tools/auth", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/tools/cache", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd/api", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd/api/latest", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd/api/v1", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/tools/leaderelection", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/tools/leaderelection/resourcelock", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/tools/metrics", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/tools/pager", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/tools/record", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/tools/reference", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/tools/remotecommand", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/tools/watch", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/transport", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/transport/spdy", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/util/buffer", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/util/cert", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/util/certificate", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/util/certificate/csr", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/util/connrotation", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/util/exec", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/util/flowcontrol", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/util/homedir", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/util/integer", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/util/jsonpath", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/util/retry", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" }, { "ImportPath": "k8s.io/client-go/util/workqueue", - "Rev": "8485feed9099506ca4ec5c9e5fd8ecb3e9f113f7" + "Rev": "dc29a5eb1f5fa01d02ba6bf219d28bcb2571111c" + }, + { + "ImportPath": "k8s.io/cloud-provider", + "Rev": "8708f3ec3c2f687df74680734fc6f095190db624" }, { "ImportPath": "k8s.io/csi-api/pkg/apis/csi/v1alpha1", - "Rev": "66c88e69e3b8bfc58784f0bb85d4924665a0cdf9" + "Rev": "1884a80de40fbc726d67f9fa516b962b4c9fabba" }, { "ImportPath": "k8s.io/csi-api/pkg/client/clientset/versioned", - "Rev": "66c88e69e3b8bfc58784f0bb85d4924665a0cdf9" + "Rev": "1884a80de40fbc726d67f9fa516b962b4c9fabba" }, { "ImportPath": "k8s.io/csi-api/pkg/client/clientset/versioned/scheme", - "Rev": "66c88e69e3b8bfc58784f0bb85d4924665a0cdf9" + "Rev": "1884a80de40fbc726d67f9fa516b962b4c9fabba" }, { "ImportPath": "k8s.io/csi-api/pkg/client/clientset/versioned/typed/csi/v1alpha1", - "Rev": "66c88e69e3b8bfc58784f0bb85d4924665a0cdf9" + "Rev": "1884a80de40fbc726d67f9fa516b962b4c9fabba" }, { "ImportPath": "k8s.io/csi-api/pkg/client/informers/externalversions", - "Rev": "66c88e69e3b8bfc58784f0bb85d4924665a0cdf9" + "Rev": "1884a80de40fbc726d67f9fa516b962b4c9fabba" }, { "ImportPath": "k8s.io/csi-api/pkg/client/informers/externalversions/csi", - "Rev": "66c88e69e3b8bfc58784f0bb85d4924665a0cdf9" + "Rev": "1884a80de40fbc726d67f9fa516b962b4c9fabba" }, { "ImportPath": "k8s.io/csi-api/pkg/client/informers/externalversions/csi/v1alpha1", - "Rev": "66c88e69e3b8bfc58784f0bb85d4924665a0cdf9" + "Rev": "1884a80de40fbc726d67f9fa516b962b4c9fabba" }, { "ImportPath": "k8s.io/csi-api/pkg/client/informers/externalversions/internalinterfaces", - "Rev": "66c88e69e3b8bfc58784f0bb85d4924665a0cdf9" + "Rev": "1884a80de40fbc726d67f9fa516b962b4c9fabba" }, { "ImportPath": "k8s.io/csi-api/pkg/client/listers/csi/v1alpha1", - "Rev": "66c88e69e3b8bfc58784f0bb85d4924665a0cdf9" + "Rev": "1884a80de40fbc726d67f9fa516b962b4c9fabba" }, { "ImportPath": "k8s.io/klog", - "Comment": "v0.1.0-31-g5a3af3e", - "Rev": "5a3af3e2572037ebf8b0ca75f3e6f3a8ff2ae0a8" + "Rev": "8139d8cb77af419532b33dfa7dd09fbc5f1d344f" }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" + "Rev": "c59034cc13d587f5ef4e85ca0ade0c1866ae8e1d" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" + "Rev": "c59034cc13d587f5ef4e85ca0ade0c1866ae8e1d" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" + "Rev": "c59034cc13d587f5ef4e85ca0ade0c1866ae8e1d" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" + "Rev": "c59034cc13d587f5ef4e85ca0ade0c1866ae8e1d" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" + "Rev": "c59034cc13d587f5ef4e85ca0ade0c1866ae8e1d" }, { "ImportPath": "k8s.io/kube-proxy/config/v1alpha1", - "Rev": "150a55da91df8d71a5b2f8dbfd515095821675df" + "Rev": "ac24c0ed3fc840396460c8da3b5038c9ec763854" }, { "ImportPath": "k8s.io/kubelet/config/v1beta1", - "Rev": "d9621112645819c52994484b4b9ad725dad9491d" + "Rev": "c37265953d471c2bc513d0763736dc2513cdbeef" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-proxy/app", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubelet/app", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubelet/app/options", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/events", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/legacyscheme", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/pod", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/ref", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/resource", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/service", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/testapi", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/pod", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/resource", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/service", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admission", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admission/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admission/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1beta2", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/validation", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/auditregistration", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/auditregistration/install", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/coordination", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/coordination/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/coordination/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/helper", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/core/helper/qos", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/pods", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1/helper", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1/validation", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/validation", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/events", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/events/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/events/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/validation", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking/v1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/validation", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/v1alpha1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/install", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/util", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1alpha1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/capabilities", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/chaosclient", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/leaderelectionconfig", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/metrics/prometheus", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/aws", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/azure", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/photon", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/deployment/util", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/events", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/expand/cache", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/persistentvolume", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/aws", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/azure", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/gcp", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/rancher", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/secrets", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/features", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/fieldpath", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/apps", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/describe", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/describe/versioned", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/scheme", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/certificate", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/deployment", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/event", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/fieldpath", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/podutils", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/qos", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/hash", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/rbac", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/resource", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/slice", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/storage", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/config", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/config/scheme", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/config/validation", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/cri", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/podresources", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cadvisor", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/certificate", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/checkpoint", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/checkpointmanager", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cloudresource", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpuset", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/util", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/config", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/configmap", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/container", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/container/testing", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/cm", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/network", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/remote", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/envvars", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/events", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/eviction", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/eviction/api", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/images", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kuberuntime", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/leaky", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/lifecycle", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/logs", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/metrics", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/metrics/collectors", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/mountpod", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/dns", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/nodelease", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/nodestatus", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/pleg", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/pod", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/preemption", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/prober", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/prober/results", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/remote", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/runtimeclass", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/secret", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/portforward", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/remotecommand", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/stats", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/streaming", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/stats", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/status", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/sysctl", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/token", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/types", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/cache", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/format", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/ioutils", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/manager", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/queue", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/sliceutils", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/store", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/winstats", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubemark", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/master/ports", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/printers", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/printers/internalversion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/exec", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/http", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/tcp", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/config", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/config/scheme", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/config/validation", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/config", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/healthcheck", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/iptables", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/ipvs", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/metrics", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/userspace", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/util", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/winkernel", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/winuserspace", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/quota", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/validation", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { - "ImportPath": "k8s.io/kubernetes/pkg/scheduler", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "ImportPath": "k8s.io/kubernetes/pkg/quota/v1", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/algorithm", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/algorithmprovider", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/api", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/api/validation", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/cache", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/core", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/core/equivalence", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/factory", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/internal/cache", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/internal/queue", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/metrics", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/util", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/volumebinder", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/apparmor", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/securitycontext", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/serviceaccount", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/async", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/bandwidth", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/config", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/configz", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/conntrack", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/dbus", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ebtables", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/env", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/file", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/filesystem", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/flag", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/flock", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/goroutinemap", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/hash", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/io", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ipconfig", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ipset", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/iptables", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ipvs", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/keymutex", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/labels", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/mount", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/net", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/net/sets", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/netsh", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/node", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/nsenter", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/oom", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/parsers", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/pod", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/procfs", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/removeall", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/resizefs", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/resourcecontainer", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/rlimit", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/selinux", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/slice", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/strings", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/sysctl", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/tail", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/taints", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/version", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/version", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/version/verflag", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { - "ImportPath": "k8s.io/kubernetes/pkg/volume/aws_ebs", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "ImportPath": "k8s.io/kubernetes/pkg/volume/awsebs", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/azure_dd", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/azure_file", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/cephfs", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/cinder", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/configmap", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/csi", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/volume/csi/csiv0", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { - "ImportPath": "k8s.io/kubernetes/pkg/volume/csi/nodeupdater", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "ImportPath": "k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/downwardapi", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { - "ImportPath": "k8s.io/kubernetes/pkg/volume/empty_dir", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "ImportPath": "k8s.io/kubernetes/pkg/volume/emptydir", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/fc", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/flexvolume", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/flocker", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { - "ImportPath": "k8s.io/kubernetes/pkg/volume/gce_pd", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "ImportPath": "k8s.io/kubernetes/pkg/volume/gcepd", + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/git_repo", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/glusterfs", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/host_path", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/iscsi", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/local", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/nfs", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/photon_pd", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/portworx", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/projected", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/quobyte", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/rbd", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/scaleio", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/secret", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/storageos", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/fs", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/operationexecutor", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/recyclerclient", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/types", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/volumepathhandler", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/validation", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/vsphere_volume", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/pkg/windows/service", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/test/utils", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/golang/expansion", - "Comment": "v1.13.0-alpha.0-1135-g3b6b174a5c", - "Rev": "3b6b174a5cdf1ae3649fa081458aa7d0ddbe7227" + "Comment": "v1.13.0-beta.2-12-g80b400f60b", + "Rev": "80b400f60b7980558614cc3dca1cffa452d8dd57" }, { "ImportPath": "k8s.io/utils/clock", @@ -5500,14 +5624,72 @@ "ImportPath": "k8s.io/utils/pointer", "Rev": "66066c83e385e385ccc3c964b44fd7dcd413d0ed" }, + { + "ImportPath": "sigs.k8s.io/cluster-api/pkg/apis/cluster/common", + "Comment": "0.0.0-alpha.3-17-g36eecfab0", + "Rev": "36eecfab0fd0702b3b0a3edf9bf8bd41996477b0" + }, + { + "ImportPath": "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1", + "Comment": "0.0.0-alpha.3-17-g36eecfab0", + "Rev": "36eecfab0fd0702b3b0a3edf9bf8bd41996477b0" + }, + { + "ImportPath": "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset", + "Comment": "0.0.0-alpha.3-17-g36eecfab0", + "Rev": "36eecfab0fd0702b3b0a3edf9bf8bd41996477b0" + }, + { + "ImportPath": "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake", + "Comment": "0.0.0-alpha.3-17-g36eecfab0", + "Rev": "36eecfab0fd0702b3b0a3edf9bf8bd41996477b0" + }, + { + "ImportPath": "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme", + "Comment": "0.0.0-alpha.3-17-g36eecfab0", + "Rev": "36eecfab0fd0702b3b0a3edf9bf8bd41996477b0" + }, + { + "ImportPath": "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1", + "Comment": "0.0.0-alpha.3-17-g36eecfab0", + "Rev": "36eecfab0fd0702b3b0a3edf9bf8bd41996477b0" + }, + { + "ImportPath": "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake", + "Comment": "0.0.0-alpha.3-17-g36eecfab0", + "Rev": "36eecfab0fd0702b3b0a3edf9bf8bd41996477b0" + }, + { + "ImportPath": "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions", + "Comment": "0.0.0-alpha.3-17-g36eecfab0", + "Rev": "36eecfab0fd0702b3b0a3edf9bf8bd41996477b0" + }, + { + "ImportPath": "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster", + "Comment": "0.0.0-alpha.3-17-g36eecfab0", + "Rev": "36eecfab0fd0702b3b0a3edf9bf8bd41996477b0" + }, + { + "ImportPath": "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1", + "Comment": "0.0.0-alpha.3-17-g36eecfab0", + "Rev": "36eecfab0fd0702b3b0a3edf9bf8bd41996477b0" + }, + { + "ImportPath": "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces", + "Comment": "0.0.0-alpha.3-17-g36eecfab0", + "Rev": "36eecfab0fd0702b3b0a3edf9bf8bd41996477b0" + }, + { + "ImportPath": "sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1", + "Comment": "0.0.0-alpha.3-17-g36eecfab0", + "Rev": "36eecfab0fd0702b3b0a3edf9bf8bd41996477b0" + }, { "ImportPath": "sigs.k8s.io/controller-runtime/pkg/runtime/scheme", - "Comment": "v0.1.1-1-gc76f5f24", - "Rev": "c76f5f2428810f45d23095bac6df629060e3c9e0" + "Comment": "v0.1.4-25-g26e59af7", + "Rev": "26e59af72836db80f816314dc8d1c37fe5733298" }, { - "ImportPath": "vbom.ml/util/sortorder", - "Rev": "db5cfe13f5cc80a4990d98e2e1b0707a4d1a5394" } ] } diff --git a/cluster-autoscaler/Makefile b/cluster-autoscaler/Makefile index 96d0d298182c..b6ce9d08bcd4 100644 --- a/cluster-autoscaler/Makefile +++ b/cluster-autoscaler/Makefile @@ -5,30 +5,42 @@ FLAGS= ENVVAR= GOOS?=linux REGISTRY?=staging-k8s.gcr.io -BASEIMAGE?=k8s.gcr.io/debian-base-amd64:0.3.2 +ifdef BUILD_TAGS + TAGS_FLAG=--tags ${BUILD_TAGS} + PROVIDER=-${BUILD_TAGS} + FOR_PROVIDER=" for ${BUILD_TAGS}" +else + TAGS_FLAG= + PROVIDER= + FOR_PROVIDER= +endif deps: go get github.com/tools/godep build: clean deps - $(ENVVAR) GOOS=$(GOOS) godep go build ./... - $(ENVVAR) GOOS=$(GOOS) godep go build -o cluster-autoscaler + $(ENVVAR) GOOS=$(GOOS) godep go build ./... ${TAGS_FLAG} + $(ENVVAR) GOOS=$(GOOS) godep go build -o cluster-autoscaler ${TAGS_FLAG} build-binary: clean deps - $(ENVVAR) GOOS=$(GOOS) godep go build -o cluster-autoscaler + $(ENVVAR) GOOS=$(GOOS) godep go build -o cluster-autoscaler ${TAGS_FLAG} test-unit: clean deps build - $(ENVVAR) godep go test --test.short -race ./... $(FLAGS) + $(ENVVAR) godep go test --test.short -race ./... $(FLAGS) ${TAGS_FLAG} dev-release: build-binary execute-release - echo "Release ${TAG} completed" + @echo "Release ${TAG}${FOR_PROVIDER} completed" make-image: +ifdef BASEIMAGE docker build --pull --build-arg BASEIMAGE=${BASEIMAGE} \ - -t ${REGISTRY}/cluster-autoscaler:${TAG} . + -t ${REGISTRY}/cluster-autoscaler${PROVIDER}:${TAG} . +else + docker build --pull -t ${REGISTRY}/cluster-autoscaler${PROVIDER}:${TAG} . +endif push-image: - ./push_image.sh ${REGISTRY}/cluster-autoscaler:${TAG} + ./push_image.sh ${REGISTRY}/cluster-autoscaler${PROVIDER}:${TAG} execute-release: make-image push-image @@ -46,16 +58,16 @@ docker-builder: docker build -t autoscaling-builder ../builder build-in-docker: clean docker-builder - docker run -v `pwd`:/gopath/src/k8s.io/autoscaler/cluster-autoscaler/ autoscaling-builder:latest bash -c 'cd /gopath/src/k8s.io/autoscaler/cluster-autoscaler && make build-binary' + docker run -v `pwd`:/gopath/src/k8s.io/autoscaler/cluster-autoscaler/ autoscaling-builder:latest bash -c 'cd /gopath/src/k8s.io/autoscaler/cluster-autoscaler && BUILD_TAGS=${BUILD_TAGS} make build-binary' release: build-in-docker execute-release - echo "Full in-docker release ${TAG} completed" + @echo "Full in-docker release ${TAG}${FOR_PROVIDER} completed" container: build-in-docker make-image - echo "Created in-docker image ${TAG}" + @echo "Created in-docker image ${TAG}${FOR_PROVIDER}" test-in-docker: clean docker-builder - docker run -v `pwd`:/gopath/src/k8s.io/autoscaler/cluster-autoscaler/ autoscaling-builder:latest bash -c 'cd /gopath/src/k8s.io/autoscaler/cluster-autoscaler && godep go test ./... ' + docker run -v `pwd`:/gopath/src/k8s.io/autoscaler/cluster-autoscaler/ autoscaling-builder:latest bash -c 'cd /gopath/src/k8s.io/autoscaler/cluster-autoscaler && godep go test ./... ${TAGS_FLAG}' .PHONY: all deps build test-unit clean format execute-release dev-release docker-builder build-in-docker release generate diff --git a/cluster-autoscaler/OWNERS b/cluster-autoscaler/OWNERS index d0010c1c9100..c3f20bc85294 100644 --- a/cluster-autoscaler/OWNERS +++ b/cluster-autoscaler/OWNERS @@ -1,9 +1,9 @@ approvers: - piosz -- aleksandram -- lukaszos +- aleksandra-malinowska +- losipiuk reviewers: - feiskyer - piosz -- aleksandram -- lukaszos +- aleksandra-malinowska +- losipiuk diff --git a/cluster-autoscaler/README.md b/cluster-autoscaler/README.md index 17659bdd4a5a..25c86fb347f2 100644 --- a/cluster-autoscaler/README.md +++ b/cluster-autoscaler/README.md @@ -15,8 +15,11 @@ Is available [HERE](./FAQ.md). We recommend using Cluster Autoscaler with the Kubernetes master version for which it was meant. The below combinations have been tested on GCP. We don't do cross version testing or compatibility testing in other environments. Some user reports indicate successful use of a newer version of Cluster Autoscaler with older clusters, however, there is always a chance that it won't work as expected. +Starting from Kubernetes 1.12, versioning scheme was changed to match Kubernetes minor releases exactly. + | Kubernetes Version | CA Version | |--------|--------| +| 1.12.X | 1.12.X | | 1.11.X | 1.3.X | | 1.10.X | 1.2.X | | 1.9.X | 1.1.X | @@ -126,3 +129,4 @@ Supported cloud providers: * GKE https://cloud.google.com/container-engine/docs/cluster-autoscaler * AWS https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md * Azure https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/azure/README.md +* Alibaba Cloud https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/alicloud/README.md diff --git a/cluster-autoscaler/_override/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/managedclusters.go b/cluster-autoscaler/_override/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/managedclusters.go index 753471bfbf24..661ab1347660 100644 --- a/cluster-autoscaler/_override/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/managedclusters.go +++ b/cluster-autoscaler/_override/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/managedclusters.go @@ -76,7 +76,6 @@ func (client ManagedClustersClient) CreateOrUpdate(ctx context.Context, resource {Target: "parameters.ManagedClusterProperties.AadProfile", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.AadProfile.ClientAppID", Name: validation.Null, Rule: true, Chain: nil}, {Target: "parameters.ManagedClusterProperties.AadProfile.ServerAppID", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.ManagedClusterProperties.AadProfile.ServerAppSecret", Name: validation.Null, Rule: true, Chain: nil}, }}, }}}}}); err != nil { return result, validation.NewError("containerservice.ManagedClustersClient", "CreateOrUpdate", err.Error()) @@ -609,3 +608,77 @@ func (client ManagedClustersClient) ListByResourceGroupComplete(ctx context.Cont result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) return } + +// UpdateTags updates a managed cluster with the specified tags. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +// parameters - parameters supplied to the Update Managed Cluster Tags operation. +func (client ManagedClustersClient) UpdateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (result ManagedClustersUpdateTagsFuture, err error) { + req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, resourceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "UpdateTags", nil, "Failure preparing request") + return + } + + result, err = client.UpdateTagsSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "UpdateTags", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdateTagsPreparer prepares the UpdateTags request. +func (client ManagedClustersClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-03-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateTagsSender sends the UpdateTags request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) UpdateTagsSender(req *http.Request) (future ManagedClustersUpdateTagsFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateTagsResponder handles the response to the UpdateTags request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) UpdateTagsResponder(resp *http.Response) (result ManagedCluster, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/cluster-autoscaler/_override/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/models.go b/cluster-autoscaler/_override/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/models.go index f8b0bc6c3b8b..ba39aa50ca0c 100644 --- a/cluster-autoscaler/_override/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/models.go +++ b/cluster-autoscaler/_override/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/models.go @@ -1301,6 +1301,35 @@ func (future *ManagedClustersDeleteFuture) Result(client ManagedClustersClient) return } +// ManagedClustersUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type ManagedClustersUpdateTagsFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ManagedClustersUpdateTagsFuture) Result(client ManagedClustersClient) (mc ManagedCluster, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if mc.Response.Response, err = future.GetResult(sender); err == nil && mc.Response.Response.StatusCode != http.StatusNoContent { + mc, err = client.UpdateTagsResponder(mc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersUpdateTagsFuture", "Result", mc.Response.Response, "Failure responding to request") + } + } + return +} + // ManagedClusterUpgradeProfile the list of available upgrades for compute pools. type ManagedClusterUpgradeProfile struct { autorest.Response `json:"-"` @@ -1709,6 +1738,21 @@ type SSHPublicKey struct { KeyData *string `json:"keyData,omitempty"` } +// TagsObject tags object for patch operations. +type TagsObject struct { + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for TagsObject. +func (toVar TagsObject) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if toVar.Tags != nil { + objectMap["tags"] = toVar.Tags + } + return json.Marshal(objectMap) +} + // VMDiagnostics profile for diagnostics on the container service VMs. type VMDiagnostics struct { // Enabled - Whether the VM diagnostic agent is provisioned on the VM. diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/BUILD b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/BUILD new file mode 100644 index 000000000000..e77ba34e19ab --- /dev/null +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/BUILD @@ -0,0 +1,48 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "authorization.go", + "autorest.go", + "client.go", + "error.go", + "preparer.go", + "responder.go", + "retriablerequest.go", + "retriablerequest_1.7.go", + "retriablerequest_1.8.go", + "sender.go", + "utility.go", + "version.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/autorest", + importpath = "github.com/Azure/go-autorest/autorest", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", + "//vendor/github.com/Azure/go-autorest/logger:go_default_library", + "//vendor/github.com/Azure/go-autorest/version:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/Azure/go-autorest/autorest/adal:all-srcs", + "//vendor/github.com/Azure/go-autorest/autorest/azure:all-srcs", + "//vendor/github.com/Azure/go-autorest/autorest/date:all-srcs", + "//vendor/github.com/Azure/go-autorest/autorest/to:all-srcs", + "//vendor/github.com/Azure/go-autorest/autorest/validation:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/adal/BUILD b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/adal/BUILD new file mode 100644 index 000000000000..9d6551166477 --- /dev/null +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/adal/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "config.go", + "devicetoken.go", + "persist.go", + "sender.go", + "token.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/autorest/adal", + importpath = "github.com/Azure/go-autorest/autorest/adal", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/Azure/go-autorest/autorest/date:go_default_library", + "//vendor/github.com/Azure/go-autorest/version:go_default_library", + "//vendor/github.com/dgrijalva/jwt-go:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/adal/config.go b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/adal/config.go index bee5e61ddb2c..8c83a917ff7e 100644 --- a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/adal/config.go +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/adal/config.go @@ -19,10 +19,6 @@ import ( "net/url" ) -const ( - activeDirectoryAPIVersion = "1.0" -) - // OAuthConfig represents the endpoints needed // in OAuth operations type OAuthConfig struct { @@ -46,11 +42,25 @@ func validateStringParam(param, name string) error { // NewOAuthConfig returns an OAuthConfig with tenant specific urls func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { return nil, err } + api := "" // it's legal for tenantID to be empty so don't validate it - const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" u, err := url.Parse(activeDirectoryEndpoint) if err != nil { return nil, err @@ -59,15 +69,15 @@ func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, err if err != nil { return nil, err } - authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) if err != nil { return nil, err } - tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) if err != nil { return nil, err } - deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) if err != nil { return nil, err } diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/adal/sender.go b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/adal/sender.go index 29f5e086cb46..834401e00dec 100644 --- a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/adal/sender.go +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -38,7 +38,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { return sf(r) } -// SendDecorator takes and possibility decorates, by wrapping, a Sender. Decorators may affect the +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the // http.Request and pass it along or, first, pass the http.Request along then react to the // http.Response result. type SendDecorator func(Sender) Sender diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/adal/token.go b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/adal/token.go index eec4dced7e56..2fd340d6922f 100644 --- a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/adal/token.go @@ -29,12 +29,12 @@ import ( "net" "net/http" "net/url" - "strconv" "strings" "sync" "time" "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/version" "github.com/dgrijalva/jwt-go" ) @@ -96,18 +96,27 @@ type RefresherWithContext interface { type TokenRefreshCallback func(Token) error // Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response type Token struct { AccessToken string `json:"access_token"` RefreshToken string `json:"refresh_token"` - ExpiresIn string `json:"expires_in"` - ExpiresOn string `json:"expires_on"` - NotBefore string `json:"not_before"` + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` Resource string `json:"resource"` Type string `json:"token_type"` } +func newToken() Token { + return Token{ + ExpiresIn: "0", + ExpiresOn: "0", + NotBefore: "0", + } +} + // IsZero returns true if the token object is zero-initialized. func (t Token) IsZero() bool { return t == Token{} @@ -115,12 +124,12 @@ func (t Token) IsZero() bool { // Expires returns the time.Time when the Token expires. func (t Token) Expires() time.Time { - s, err := strconv.Atoi(t.ExpiresOn) + s, err := t.ExpiresOn.Float64() if err != nil { s = -3600 } - expiration := date.NewUnixTimeFromSeconds(float64(s)) + expiration := date.NewUnixTimeFromSeconds(s) return time.Time(expiration).UTC() } @@ -217,6 +226,8 @@ func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalTo token := jwt.New(jwt.SigningMethodRS256) token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c token.Claims = jwt.MapClaims{ "aud": spt.inner.OauthConfig.TokenEndpoint.String(), "iss": spt.inner.ClientID, @@ -413,6 +424,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso } spt := &ServicePrincipalToken{ inner: servicePrincipalToken{ + Token: newToken(), OauthConfig: oauthConfig, Secret: secret, ClientID: id, @@ -652,6 +664,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI spt := &ServicePrincipalToken{ inner: servicePrincipalToken{ + Token: newToken(), OauthConfig: OAuthConfig{ TokenEndpoint: *msiEndpointURL, }, @@ -778,6 +791,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource if err != nil { return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) } + req.Header.Add("User-Agent", version.UserAgent()) req = req.WithContext(ctx) if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { v := url.Values{} diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/azure/BUILD b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/azure/BUILD new file mode 100644 index 000000000000..fd41c61c8231 --- /dev/null +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/azure/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "async.go", + "azure.go", + "environments.go", + "metadata_environment.go", + "rp.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/autorest/azure", + importpath = "github.com/Azure/go-autorest/autorest/azure", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/Azure/go-autorest/autorest:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/azure/async.go b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/azure/async.go index e85e84fd1f21..e94e68567b2c 100644 --- a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/azure/async.go @@ -119,7 +119,10 @@ func (f *Future) Done(sender autorest.Sender) (bool, error) { if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { return false, err } - if err := f.pt.updateHeaders(); err != nil { + if err := f.pt.initPollingMethod(); err != nil { + return false, err + } + if err := f.pt.updatePollingMethod(); err != nil { return false, err } return f.pt.hasTerminated(), f.pt.pollingError() @@ -165,8 +168,12 @@ func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) e // polling duration has been exceeded. It will retry failed polling attempts based on // the retry value defined in the client up to the maximum retry attempts. func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) error { - ctx, cancel := context.WithTimeout(ctx, client.PollingDuration) - defer cancel() + if d := client.PollingDuration; d != 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, d) + defer cancel() + } + done, err := f.Done(client) for attempts := 0; !done; done, err = f.Done(client) { if attempts >= client.RetryAttempts { @@ -264,7 +271,7 @@ type pollingTracker interface { // these methods can differ per tracker // checks the response headers and status code to determine the polling mechanism - updateHeaders() error + updatePollingMethod() error // checks the response for tracker-specific error conditions checkForErrors() error @@ -274,6 +281,10 @@ type pollingTracker interface { // methods common to all trackers + // initializes a tracker's polling URL and method, called for each iteration. + // these values can be overridden by each polling tracker as required. + initPollingMethod() error + // initializes the tracker's internal state, call this when the tracker is created initializeState() error @@ -348,6 +359,10 @@ func (pt *pollingTrackerBase) initializeState() error { case http.StatusOK: if ps := pt.getProvisioningState(); ps != nil { pt.State = *ps + if pt.hasFailed() { + pt.updateErrorFromResponse() + return pt.pollingError() + } } else { pt.State = operationSucceeded } @@ -364,8 +379,9 @@ func (pt *pollingTrackerBase) initializeState() error { default: pt.State = operationFailed pt.updateErrorFromResponse() + return pt.pollingError() } - return nil + return pt.initPollingMethod() } func (pt pollingTrackerBase) getProvisioningState() *string { @@ -416,12 +432,14 @@ func (pt *pollingTrackerBase) pollForStatus(sender autorest.Sender) error { } else { // check response body for error content pt.updateErrorFromResponse() + err = pt.pollingError() } return err } // attempts to unmarshal a ServiceError type from the response body. // if that fails then make a best attempt at creating something meaningful. +// NOTE: this assumes that the async operation has failed. func (pt *pollingTrackerBase) updateErrorFromResponse() { var err error if pt.resp.ContentLength != 0 { @@ -431,8 +449,7 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() { re := respErr{} defer pt.resp.Body.Close() var b []byte - b, err = ioutil.ReadAll(pt.resp.Body) - if err != nil { + if b, err = ioutil.ReadAll(pt.resp.Body); err != nil { goto Default } if err = json.Unmarshal(b, &re); err != nil { @@ -445,20 +462,29 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() { goto Default } } - if re.ServiceError != nil { + // the unmarshaller will ensure re.ServiceError is non-nil + // even if there was no content unmarshalled so check the code. + if re.ServiceError.Code != "" { pt.Err = re.ServiceError return } } Default: se := &ServiceError{ - Code: fmt.Sprintf("HTTP status code %v", pt.resp.StatusCode), - Message: pt.resp.Status, + Code: pt.pollingStatus(), + Message: "The async operation failed.", } if err != nil { se.InnerError = make(map[string]interface{}) se.InnerError["unmarshalError"] = err.Error() } + // stick the response body into the error object in hopes + // it contains something useful to help diagnose the failure. + if len(pt.rawBody) > 0 { + se.AdditionalInfo = []map[string]interface{}{ + pt.rawBody, + } + } pt.Err = se } @@ -538,13 +564,33 @@ func (pt pollingTrackerBase) baseCheckForErrors() error { return nil } +// default initialization of polling URL/method. each verb tracker will update this as required. +func (pt *pollingTrackerBase) initPollingMethod() error { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + return nil + } + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh != "" { + pt.URI = lh + pt.Pm = PollingLocation + return nil + } + // it's ok if we didn't find a polling header, this will be handled elsewhere + return nil +} + // DELETE type pollingTrackerDelete struct { pollingTrackerBase } -func (pt *pollingTrackerDelete) updateHeaders() error { +func (pt *pollingTrackerDelete) updatePollingMethod() error { // for 201 the Location header is required if pt.resp.StatusCode == http.StatusCreated { if lh, err := getURLFromLocationHeader(pt.resp); err != nil { @@ -600,7 +646,7 @@ type pollingTrackerPatch struct { pollingTrackerBase } -func (pt *pollingTrackerPatch) updateHeaders() error { +func (pt *pollingTrackerPatch) updatePollingMethod() error { // by default we can use the original URL for polling and final GET if pt.URI == "" { pt.URI = pt.resp.Request.URL.String() @@ -658,7 +704,7 @@ type pollingTrackerPost struct { pollingTrackerBase } -func (pt *pollingTrackerPost) updateHeaders() error { +func (pt *pollingTrackerPost) updatePollingMethod() error { // 201 requires Location header if pt.resp.StatusCode == http.StatusCreated { if lh, err := getURLFromLocationHeader(pt.resp); err != nil { @@ -714,7 +760,7 @@ type pollingTrackerPut struct { pollingTrackerBase } -func (pt *pollingTrackerPut) updateHeaders() error { +func (pt *pollingTrackerPut) updatePollingMethod() error { // by default we can use the original URL for polling and final GET if pt.URI == "" { pt.URI = pt.resp.Request.URL.String() @@ -808,7 +854,7 @@ func createPollingTracker(resp *http.Response) (pollingTracker, error) { // this initializes the polling header values, we do this during creation in case the // initial response send us invalid values; this way the API call will return a non-nil // error (not doing this means the error shows up in Future.Done) - return pt, pt.updateHeaders() + return pt, pt.updatePollingMethod() } // gets the polling URL from the Azure-AsyncOperation header. diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/azure/azure.go b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/azure/azure.go index a702ffe75172..3a0a439ff930 100644 --- a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/azure/azure.go +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -44,11 +44,12 @@ const ( // ServiceError encapsulates the error response from an Azure service. // It adhears to the OData v4 specification for error responses. type ServiceError struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` } func (se ServiceError) Error() string { @@ -74,6 +75,14 @@ func (se ServiceError) Error() string { result += fmt.Sprintf(" InnerError=%v", string(d)) } + if se.AdditionalInfo != nil { + d, err := json.Marshal(se.AdditionalInfo) + if err != nil { + result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) + } + result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) + } + return result } @@ -86,44 +95,47 @@ func (se *ServiceError) UnmarshalJSON(b []byte) error { // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 type serviceError1 struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` } type serviceError2 struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` } se1 := serviceError1{} err := json.Unmarshal(b, &se1) if err == nil { - se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError) + se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) return nil } se2 := serviceError2{} err = json.Unmarshal(b, &se2) if err == nil { - se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError) + se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo) se.Details = append(se.Details, se2.Details) return nil } return err } -func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}) { +func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { se.Code = code se.Message = message se.Target = target se.Details = details se.InnerError = inner + se.AdditionalInfo = additional } // RequestError describes an error response returned by Azure service. diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/azure/rp.go b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/azure/rp.go index bd34f0ed5a58..86ce9f2b5b1e 100644 --- a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/azure/rp.go +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -140,8 +140,8 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError } // poll for registered provisioning state - now := time.Now() - for err == nil && time.Since(now) < client.PollingDuration { + registrationStartTime := time.Now() + for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { // taken from the resources SDK // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 preparer := autorest.CreatePreparer( @@ -183,7 +183,7 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError return originalReq.Context().Err() } } - if !(time.Since(now) < client.PollingDuration) { + if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { return errors.New("polling for resource provider registration has exceeded the polling duration") } return err diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/client.go b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/client.go index 4e92dcad077a..48eb0e8b73bc 100644 --- a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/client.go +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/client.go @@ -22,8 +22,11 @@ import ( "log" "net/http" "net/http/cookiejar" - "runtime" + "strings" "time" + + "github.com/Azure/go-autorest/logger" + "github.com/Azure/go-autorest/version" ) const ( @@ -41,15 +44,6 @@ const ( ) var ( - // defaultUserAgent builds a string containing the Go version, system archityecture and OS, - // and the go-autorest version. - defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - Version(), - ) - // StatusCodesForRetry are a defined group of status code for which the client will retry StatusCodesForRetry = []int{ http.StatusRequestTimeout, // 408 @@ -153,6 +147,7 @@ type Client struct { PollingDelay time.Duration // PollingDuration sets the maximum polling time after which an error is returned. + // Setting this to zero will use the provided context to control the duration. PollingDuration time.Duration // RetryAttempts sets the default number of retry attempts for client. @@ -179,7 +174,7 @@ func NewClientWithUserAgent(ua string) Client { PollingDuration: DefaultPollingDuration, RetryAttempts: DefaultRetryAttempts, RetryDuration: DefaultRetryDuration, - UserAgent: defaultUserAgent, + UserAgent: version.UserAgent(), } c.Sender = c.sender() c.AddToUserAgent(ua) @@ -216,8 +211,17 @@ func (c Client) Do(r *http.Request) (*http.Response, error) { } return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") } - + logger.Instance.WriteRequest(r, logger.Filter{ + Header: func(k string, v []string) (bool, []string) { + // remove the auth token from the log + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { + v = []string{"**REDACTED**"} + } + return true, v + }, + }) resp, err := SendWithSender(c.sender(), r) + logger.Instance.WriteResponse(resp, logger.Filter{}) Respond(resp, c.ByInspecting()) return resp, err } diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/date/BUILD b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/date/BUILD new file mode 100644 index 000000000000..19fce15d84f4 --- /dev/null +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/date/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "date.go", + "time.go", + "timerfc1123.go", + "unixtime.go", + "utility.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/autorest/date", + importpath = "github.com/Azure/go-autorest/autorest/date", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/sender.go b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/sender.go index 28c7222d6b6d..8811cb2e7f9b 100644 --- a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/sender.go +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/sender.go @@ -234,7 +234,7 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se } delayed := DelayWithRetryAfter(resp, r.Context().Done()) if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() + return resp, r.Context().Err() } // don't count a 429 against the number of attempts // so that we continue to retry until it succeeds diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/to/BUILD b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/to/BUILD new file mode 100644 index 000000000000..e435f0e4d67e --- /dev/null +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/to/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["convert.go"], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/autorest/to", + importpath = "github.com/Azure/go-autorest/autorest/to", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/validation/BUILD b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/validation/BUILD new file mode 100644 index 000000000000..411f4e8a24f7 --- /dev/null +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/validation/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "error.go", + "validation.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/autorest/validation", + importpath = "github.com/Azure/go-autorest/autorest/validation", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/version.go b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/version.go index e32cd68fecac..3c6451546bae 100644 --- a/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/version.go +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/autorest/version.go @@ -1,5 +1,7 @@ package autorest +import "github.com/Azure/go-autorest/version" + // Copyright 2017 Microsoft Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,5 +18,5 @@ package autorest // Version returns the semantic version (see http://semver.org). func Version() string { - return "v10.12.0" + return version.Number } diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/logger/BUILD b/cluster-autoscaler/_override/github.com/Azure/go-autorest/logger/BUILD new file mode 100644 index 000000000000..d43bee7c193f --- /dev/null +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/logger/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["logger.go"], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/logger", + importpath = "github.com/Azure/go-autorest/logger", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/logger/logger.go b/cluster-autoscaler/_override/github.com/Azure/go-autorest/logger/logger.go new file mode 100644 index 000000000000..bd9e0a3b1ee0 --- /dev/null +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/logger/logger.go @@ -0,0 +1,328 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// LevelType tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LevelType uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LevelType = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug +) + +const ( + logNone = "NONE" + logFatal = "FATAL" + logPanic = "PANIC" + logError = "ERROR" + logWarning = "WARNING" + logInfo = "INFO" + logDebug = "DEBUG" + logUnknown = "UNKNOWN" +) + +// ParseLevel converts the specified string into the corresponding LevelType. +func ParseLevel(s string) (lt LevelType, err error) { + switch strings.ToUpper(s) { + case logFatal: + lt = LogFatal + case logPanic: + lt = LogPanic + case logError: + lt = LogError + case logWarning: + lt = LogWarning + case logInfo: + lt = LogInfo + case logDebug: + lt = LogDebug + default: + err = fmt.Errorf("bad log level '%s'", s) + } + return +} + +// String implements the stringer interface for LevelType. +func (lt LevelType) String() string { + switch lt { + case LogNone: + return logNone + case LogFatal: + return logFatal + case LogPanic: + return logPanic + case LogError: + return logError + case LogWarning: + return logWarning + case LogInfo: + return logInfo + case LogDebug: + return logDebug + default: + return logUnknown + } +} + +// Filter defines functions for filtering HTTP request/response content. +type Filter struct { + // URL returns a potentially modified string representation of a request URL. + URL func(u *url.URL) string + + // Header returns a potentially modified set of values for the specified key. + // To completely exclude the header key/values return false. + Header func(key string, val []string) (bool, []string) + + // Body returns a potentially modified request/response body. + Body func(b []byte) []byte +} + +func (f Filter) processURL(u *url.URL) string { + if f.URL == nil { + return u.String() + } + return f.URL(u) +} + +func (f Filter) processHeader(k string, val []string) (bool, []string) { + if f.Header == nil { + return true, val + } + return f.Header(k, val) +} + +func (f Filter) processBody(b []byte) []byte { + if f.Body == nil { + return b + } + return f.Body(b) +} + +// Writer defines methods for writing to a logging facility. +type Writer interface { + // Writeln writes the specified message with the standard log entry header and new-line character. + Writeln(level LevelType, message string) + + // Writef writes the specified format specifier with the standard log entry header and no new-line character. + Writef(level LevelType, format string, a ...interface{}) + + // WriteRequest writes the specified HTTP request to the logger if the log level is greater than + // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no request content is excluded. + WriteRequest(req *http.Request, filter Filter) + + // WriteResponse writes the specified HTTP response to the logger if the log level is greater than + // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no response content is excluded. + WriteResponse(resp *http.Response, filter Filter) +} + +// Instance is the default log writer initialized during package init. +// This can be replaced with a custom implementation as required. +var Instance Writer + +// default log level +var logLevel = LogNone + +// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// If no value was specified the default value is LogNone. +// Custom loggers can call this to retrieve the configured log level. +func Level() LevelType { + return logLevel +} + +func init() { + // separated for testing purposes + initDefaultLogger() +} + +func initDefaultLogger() { + // init with nilLogger so callers don't have to do a nil check on Default + Instance = nilLogger{} + llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) + if llStr == "" { + return + } + var err error + logLevel, err = ParseLevel(llStr) + if err != nil { + fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) + return + } + if logLevel == LogNone { + return + } + // default to stderr + dest := os.Stderr + lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") + if strings.EqualFold(lfStr, "stdout") { + dest = os.Stdout + } else if lfStr != "" { + lf, err := os.Create(lfStr) + if err == nil { + dest = lf + } else { + fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) + } + } + Instance = fileLogger{ + logLevel: logLevel, + mu: &sync.Mutex{}, + logFile: dest, + } +} + +// the nil logger does nothing +type nilLogger struct{} + +func (nilLogger) Writeln(LevelType, string) {} + +func (nilLogger) Writef(LevelType, string, ...interface{}) {} + +func (nilLogger) WriteRequest(*http.Request, Filter) {} + +func (nilLogger) WriteResponse(*http.Response, Filter) {} + +// A File is used instead of a Logger so the stream can be flushed after every write. +type fileLogger struct { + logLevel LevelType + mu *sync.Mutex // for synchronizing writes to logFile + logFile *os.File +} + +func (fl fileLogger) Writeln(level LevelType, message string) { + fl.Writef(level, "%s\n", message) +} + +func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { + if fl.logLevel >= level { + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) + fl.logFile.Sync() + } +} + +func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { + if req == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) + // dump headers + for k, v := range req.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(req.Header, req.Body) { + // dump body + body, err := ioutil.ReadAll(req.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + if nc, ok := req.Body.(io.Seeker); ok { + // rewind to the beginning + nc.Seek(0, io.SeekStart) + } else { + // recreate the body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { + if resp == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) + // dump headers + for k, v := range resp.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(resp.Header, resp.Body) { + // dump body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +// returns true if the provided body should be included in the log +func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { + ct := header.Get("Content-Type") + return fl.logLevel >= LogDebug && body != nil && strings.Index(ct, "application/octet-stream") == -1 +} + +// creates standard header for log entries, it contains a timestamp and the log level +func entryHeader(level LevelType) string { + // this format provides a fixed number of digits so the size of the timestamp is constant + return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) +} diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/version/BUILD b/cluster-autoscaler/_override/github.com/Azure/go-autorest/version/BUILD new file mode 100644 index 000000000000..5c8d025bde41 --- /dev/null +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/version/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["version.go"], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/version", + importpath = "github.com/Azure/go-autorest/version", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/_override/github.com/Azure/go-autorest/version/version.go b/cluster-autoscaler/_override/github.com/Azure/go-autorest/version/version.go new file mode 100644 index 000000000000..6323c288fbfc --- /dev/null +++ b/cluster-autoscaler/_override/github.com/Azure/go-autorest/version/version.go @@ -0,0 +1,37 @@ +package version + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "runtime" +) + +// Number contains the semantic version of this SDK. +const Number = "v11.1.0" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + Number, + ) +) + +// UserAgent returns a string containing the Go version, system archityecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/README.md b/cluster-autoscaler/cloudprovider/alicloud/README.md new file mode 100644 index 000000000000..1c92cf076118 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/README.md @@ -0,0 +1,186 @@ +# Cluster Autoscaler on AliCloud +The cluster autoscaler on AliCloud scales worker nodes within any specified autoscaling group. It will run as a `Deployment` in your cluster. This README will go over some of the necessary steps required to get the cluster autoscaler up and running. + +## Kubernetes Version +Cluster autoscaler must run on v1.9.3 or greater. + +## Instance Type Support +- **Standard Instance**x86-Architecture,suitable for common scenes such as websites or api services. +- **GPU/FPGA Instance**Heterogeneous Computing,suitable for high performance computing. +- **Bare Metal Instance**Both the elasticity of a virtual server and the high-performance and comprehensive features of a physical server. +- **Spot Instance**Spot instance are on-demand instances. They are designed to reduce your ECS costs in some cases. + + +## ACS Console Deployment +doc: https://www.alibabacloud.com/help/doc-detail/89733.html + +## Custom Deployment +### 1.Prepare Identity authentication +#### Use access-key-id and access-key-secret +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: cloud-config + namespace: kube-system +data: + # insert your base64 encoded Alicloud access id and key here, ensure there's no trailing newline: + # such as: echo -n "your_access_key_id" | base64 + access-key-id: "" + access-key-secret: "" + region-id: "" +``` +#### Use STS with RAM Role +```yaml +{ + "Version": "1", + "Statement": [ + { + "Action": [ + "ess:Describe*", + "ess:CreateScalingRule", + "ess:ModifyScalingGroup", + "ess:RemoveInstances", + "ess:ExecuteScalingRule", + "ess:ModifyScalingRule", + "ess:DeleteScalingRule", + "ess:DetachInstances", + "ecs:DescribeInstanceTypes" + ], + "Resource": [ + "*" + ], + "Effect": "Allow" + } + ] +} +``` + +### 2.ASG Setup +* create a Scaling Group in ESS(https://essnew.console.aliyun.com) with valid configurations. +* create a Scaling Configuration for this Scaling Group with valid instanceType and User Data.In User Data,you can specific the script to initialize the environment and join this node to kubernetes cluster.If your Kubernetes cluster is hosted by ACS.you can use the attach script like this. +```shell +#!/bin/sh +# The token is generated by ACS console. https://www.alibabacloud.com/help/doc-detail/64983.htm?spm=a2c63.l28256.b99.33.46395ad54ozJFq +curl http://aliacs-k8s-cn-hangzhou.oss-cn-hangzhou.aliyuncs.com/public/pkg/run/attach/[kubernetes_cluster_version]/attach_node.sh | bash -s -- --openapi-token [token] --ess true +``` + + +### 3.cluster-autoscaler deployment + +#### Use access-key-id and access-key-secret +```yaml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + app: cluster-autoscaler +spec: + replicas: 1 + selector: + matchLabels: + app: cluster-autoscaler + template: + metadata: + labels: + app: cluster-autoscaler + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + serviceAccountName: admin + containers: + - image: registry.cn-hangzhou.aliyuncs.com/acs/autoscaler:v1.3.1.2 + name: cluster-autoscaler + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 100m + memory: 300Mi + command: + - ./cluster-autoscaler + - --v=4 + - --stderrthreshold=info + - --cloud-provider=alicloud + - --nodes=[min]:[max]:[ASG_ID] + imagePullPolicy: "Always" + env: + - name: ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: cloud-config + key: access-key-id + - name: ACCESS_KEY_SECRET + valueFrom: + secretKeyRef: + name: cloud-config + key: access-key-secret + - name: REGION_ID + valueFrom: + secretKeyRef: + name: cloud-config + key: region-id + volumeMounts: + - name: ssl-certs + mountPath: /etc/ssl/certs/ca-certificates.crt + readOnly: true + imagePullPolicy: "Always" + volumes: + - name: ssl-certs + hostPath: + path: "/etc/ssl/certs/ca-certificates.crt" +``` + +#### Use STS with RAM Role + +```yaml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + app: cluster-autoscaler +spec: + replicas: 1 + selector: + matchLabels: + app: cluster-autoscaler + template: + metadata: + labels: + app: cluster-autoscaler + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + serviceAccountName: admin + containers: + - image: registry.cn-hangzhou.aliyuncs.com/acs/autoscaler:v1.3.1.2 + name: cluster-autoscaler + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 100m + memory: 300Mi + command: + - ./cluster-autoscaler + - --v=4 + - --stderrthreshold=info + - --cloud-provider=alicloud + - --nodes=[min]:[max]:[ASG_ID] + imagePullPolicy: "Always" +``` + +### Auto-Discovery Setup +Auto Discovery is not supported in AliCloud currently. + +## Common Notes and Gotchas: +- The `/etc/ssl/certs/ca-certificates.crt` should exist by default on your ecs instance. +- By default, cluster autoscaler will not terminate nodes running pods in the kube-system namespace. You can override this default behaviour by passing in the `--skip-nodes-with-system-pods=false` flag. +- By default, cluster autoscaler will wait 10 minutes between scale down operations, you can adjust this using the `--scale-down-delay` flag. E.g. `--scale-down-delay=5m` to decrease the scale down delay to 5 minutes. +- If you're running multiple ASGs, the `--expander` flag supports three options: `random`, `most-pods` and `least-waste`. `random` will expand a random ASG on scale up. `most-pods` will scale up the ASG that will scheduable the most amount of pods. `least-waste` will expand the ASG that will waste the least amount of CPU/MEM resources. In the event of a tie, cluster-autoscaler will fall back to `random`. diff --git a/cluster-autoscaler/cloudprovider/alicloud/acs.png b/cluster-autoscaler/cloudprovider/alicloud/acs.png new file mode 100644 index 000000000000..1032225d3432 Binary files /dev/null and b/cluster-autoscaler/cloudprovider/alicloud/acs.png differ diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credential.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credential.go new file mode 100644 index 000000000000..bcf011b6ea0d --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credential.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +// Credential doesn't implement +type Credential interface{} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials/access_key_credential.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials/access_key_credential.go new file mode 100644 index 000000000000..81aa6a0f1786 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials/access_key_credential.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +// BaseCredential is deprecated: Use AccessKeyCredential in this package instead. +type BaseCredential struct { + AccessKeyId string + AccessKeySecret string +} + +// AccessKeyCredential is kind of credential +type AccessKeyCredential struct { + AccessKeyId string + AccessKeySecret string +} + +// NewBaseCredential is deprecated: Use NewAccessKeyCredential in this package instead. +func NewBaseCredential(accessKeyId, accessKeySecret string) *BaseCredential { + return &BaseCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + } +} + +// ToAccessKeyCredential returns AccessKeyCredential +func (baseCred *BaseCredential) ToAccessKeyCredential() *AccessKeyCredential { + return &AccessKeyCredential{ + AccessKeyId: baseCred.AccessKeyId, + AccessKeySecret: baseCred.AccessKeySecret, + } +} + +// NewAccessKeyCredential returns AccessKeyCredential +func NewAccessKeyCredential(accessKeyId, accessKeySecret string) *AccessKeyCredential { + return &AccessKeyCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + } +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials/ecs_ram_role.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials/ecs_ram_role.go new file mode 100644 index 000000000000..79ba19dd85f2 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials/ecs_ram_role.go @@ -0,0 +1,48 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +// StsRoleNameOnEcsCredential is deprecated: Use EcsRamRoleCredential in this package instead. +type StsRoleNameOnEcsCredential struct { + RoleName string +} + +// NewStsRoleNameOnEcsCredential is deprecated: Use NewEcsRamRoleCredential in this package instead. +func NewStsRoleNameOnEcsCredential(roleName string) *StsRoleNameOnEcsCredential { + return &StsRoleNameOnEcsCredential{ + RoleName: roleName, + } +} + +// ToEcsRamRoleCredential is deprecated +func (oldCred *StsRoleNameOnEcsCredential) ToEcsRamRoleCredential() *EcsRamRoleCredential { + return &EcsRamRoleCredential{ + RoleName: oldCred.RoleName, + } +} + +// EcsRamRoleCredential is kind of credential on ECS +type EcsRamRoleCredential struct { + RoleName string +} + +// NewEcsRamRoleCredential returns EcsRamRoleCredential +func NewEcsRamRoleCredential(roleName string) *EcsRamRoleCredential { + return &EcsRamRoleCredential{ + RoleName: roleName, + } +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials/rsa_key_pair_credential.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials/rsa_key_pair_credential.go new file mode 100644 index 000000000000..0947e280b7a8 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials/rsa_key_pair_credential.go @@ -0,0 +1,33 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +// RsaKeyPairCredential is kind of credential +type RsaKeyPairCredential struct { + PrivateKey string + PublicKeyId string + SessionExpiration int +} + +// NewRsaKeyPairCredential returns RsaKeyPairCredential +func NewRsaKeyPairCredential(privateKey, publicKeyId string, sessionExpiration int) *RsaKeyPairCredential { + return &RsaKeyPairCredential{ + PrivateKey: privateKey, + PublicKeyId: publicKeyId, + SessionExpiration: sessionExpiration, + } +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_credential.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_credential.go new file mode 100644 index 000000000000..03eb455baad1 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_credential.go @@ -0,0 +1,33 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +// StsTokenCredential is kind of credential +type StsTokenCredential struct { + AccessKeyId string + AccessKeySecret string + AccessKeyStsToken string +} + +// NewStsTokenCredential returns StsTokenCredential +func NewStsTokenCredential(accessKeyId, accessKeySecret, accessKeyStsToken string) *StsTokenCredential { + return &StsTokenCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + AccessKeyStsToken: accessKeyStsToken, + } +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_role_arn_credential.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_role_arn_credential.go new file mode 100644 index 000000000000..fed6e0a2b673 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_role_arn_credential.go @@ -0,0 +1,68 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +// StsRoleArnCredential is deprecated: Use RamRoleArnCredential in this package instead. +type StsRoleArnCredential struct { + AccessKeyId string + AccessKeySecret string + RoleArn string + RoleSessionName string + RoleSessionExpiration int +} + +// RamRoleArnCredential is going to replace StsRoleArnCredential +type RamRoleArnCredential struct { + AccessKeyId string + AccessKeySecret string + RoleArn string + RoleSessionName string + RoleSessionExpiration int +} + +// NewStsRoleArnCredential is deprecated: Use RamRoleArnCredential in this package instead. +func NewStsRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName string, roleSessionExpiration int) *StsRoleArnCredential { + return &StsRoleArnCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + RoleSessionExpiration: roleSessionExpiration, + } +} + +// ToRamRoleArnCredential returns RamRoleArnCredential +func (oldCred *StsRoleArnCredential) ToRamRoleArnCredential() *RamRoleArnCredential { + return &RamRoleArnCredential{ + AccessKeyId: oldCred.AccessKeyId, + AccessKeySecret: oldCred.AccessKeySecret, + RoleArn: oldCred.RoleArn, + RoleSessionName: oldCred.RoleSessionName, + RoleSessionExpiration: oldCred.RoleSessionExpiration, + } +} + +// NewRamRoleArnCredential returns RamRoleArnCredential +func NewRamRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName string, roleSessionExpiration int) *RamRoleArnCredential { + return &RamRoleArnCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + RoleSessionExpiration: roleSessionExpiration, + } +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/roa_signature_composer.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/roa_signature_composer.go new file mode 100644 index 000000000000..edd76c47bf41 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/roa_signature_composer.go @@ -0,0 +1,123 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + "bytes" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/utils" + "sort" + "strings" +) + +func signRoaRequest(request requests.AcsRequest, signer Signer, regionId string) (err error) { + completeROASignParams(request, signer, regionId) + stringToSign := buildRoaStringToSign(request) + request.SetStringToSign(stringToSign) + signature := signer.Sign(stringToSign, "") + accessKeyId, err := signer.GetAccessKeyId() + if err != nil { + return nil + } + + request.GetHeaders()["Authorization"] = "acs " + accessKeyId + ":" + signature + + return +} + +func completeROASignParams(request requests.AcsRequest, signer Signer, regionId string) { + headerParams := request.GetHeaders() + + // complete query params + queryParams := request.GetQueryParams() + //if _, ok := queryParams["RegionId"]; !ok { + // queryParams["RegionId"] = regionId + //} + if extraParam := signer.GetExtraParam(); extraParam != nil { + for key, value := range extraParam { + if key == "SecurityToken" { + headerParams["x-acs-security-token"] = value + continue + } + + queryParams[key] = value + } + } + + // complete header params + headerParams["Date"] = utils.GetTimeInFormatRFC2616() + headerParams["x-acs-signature-method"] = signer.GetName() + headerParams["x-acs-signature-version"] = signer.GetVersion() + if request.GetFormParams() != nil && len(request.GetFormParams()) > 0 { + formString := utils.GetUrlFormedMap(request.GetFormParams()) + request.SetContent([]byte(formString)) + headerParams["Content-Type"] = requests.Form + } + contentMD5 := utils.GetMD5Base64(request.GetContent()) + headerParams["Content-MD5"] = contentMD5 + if _, contains := headerParams["Content-Type"]; !contains { + headerParams["Content-Type"] = requests.Raw + } + switch format := request.GetAcceptFormat(); format { + case "JSON": + headerParams["Accept"] = requests.Json + case "XML": + headerParams["Accept"] = requests.Xml + default: + headerParams["Accept"] = requests.Raw + } +} + +func buildRoaStringToSign(request requests.AcsRequest) (stringToSign string) { + + headers := request.GetHeaders() + + stringToSignBuilder := bytes.Buffer{} + stringToSignBuilder.WriteString(request.GetMethod()) + stringToSignBuilder.WriteString(requests.HeaderSeparator) + + // append header keys for sign + appendIfContain(headers, &stringToSignBuilder, "Accept", requests.HeaderSeparator) + appendIfContain(headers, &stringToSignBuilder, "Content-MD5", requests.HeaderSeparator) + appendIfContain(headers, &stringToSignBuilder, "Content-Type", requests.HeaderSeparator) + appendIfContain(headers, &stringToSignBuilder, "Date", requests.HeaderSeparator) + + // sort and append headers witch starts with 'x-acs-' + var acsHeaders []string + for key := range headers { + if strings.HasPrefix(key, "x-acs-") { + acsHeaders = append(acsHeaders, key) + } + } + sort.Strings(acsHeaders) + for _, key := range acsHeaders { + stringToSignBuilder.WriteString(key + ":" + headers[key]) + stringToSignBuilder.WriteString(requests.HeaderSeparator) + } + + // append query params + stringToSignBuilder.WriteString(request.BuildQueries()) + stringToSign = stringToSignBuilder.String() + return +} + +func appendIfContain(sourceMap map[string]string, target *bytes.Buffer, key, separator string) { + if value, contain := sourceMap[key]; contain && len(value) > 0 { + target.WriteString(sourceMap[key]) + target.WriteString(separator) + } +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/rpc_signature_composer.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/rpc_signature_composer.go new file mode 100644 index 000000000000..eb10e1c978d0 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/rpc_signature_composer.go @@ -0,0 +1,98 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/utils" + "net/url" + "sort" + "strings" +) + +func signRpcRequest(request requests.AcsRequest, signer Signer, regionId string) (err error) { + err = completeRpcSignParams(request, signer, regionId) + if err != nil { + return + } + // remove while retry + if _, containsSign := request.GetQueryParams()["Signature"]; containsSign { + delete(request.GetQueryParams(), "Signature") + } + stringToSign := buildRpcStringToSign(request) + request.SetStringToSign(stringToSign) + signature := signer.Sign(stringToSign, "&") + request.GetQueryParams()["Signature"] = signature + + return +} + +func completeRpcSignParams(request requests.AcsRequest, signer Signer, regionId string) (err error) { + queryParams := request.GetQueryParams() + queryParams["Version"] = request.GetVersion() + queryParams["Action"] = request.GetActionName() + queryParams["Format"] = request.GetAcceptFormat() + queryParams["Timestamp"] = utils.GetTimeInFormatISO8601() + queryParams["SignatureMethod"] = signer.GetName() + queryParams["SignatureType"] = signer.GetType() + queryParams["SignatureVersion"] = signer.GetVersion() + queryParams["SignatureNonce"] = utils.GetUUIDV4() + queryParams["AccessKeyId"], err = signer.GetAccessKeyId() + + if err != nil { + return + } + + if _, contains := queryParams["RegionId"]; !contains { + queryParams["RegionId"] = regionId + } + if extraParam := signer.GetExtraParam(); extraParam != nil { + for key, value := range extraParam { + queryParams[key] = value + } + } + + request.GetHeaders()["Content-Type"] = requests.Form + formString := utils.GetUrlFormedMap(request.GetFormParams()) + request.SetContent([]byte(formString)) + + return +} + +func buildRpcStringToSign(request requests.AcsRequest) (stringToSign string) { + signParams := make(map[string]string) + for key, value := range request.GetQueryParams() { + signParams[key] = value + } + for key, value := range request.GetFormParams() { + signParams[key] = value + } + + // sort params by key + var paramKeySlice []string + for key := range signParams { + paramKeySlice = append(paramKeySlice, key) + } + sort.Strings(paramKeySlice) + stringToSign = utils.GetUrlFormedMap(signParams) + stringToSign = strings.Replace(stringToSign, "+", "%20", -1) + stringToSign = strings.Replace(stringToSign, "*", "%2A", -1) + stringToSign = strings.Replace(stringToSign, "%7E", "~", -1) + stringToSign = url.QueryEscape(stringToSign) + stringToSign = request.GetMethod() + "&%2F&" + stringToSign + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signer.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signer.go new file mode 100644 index 000000000000..424db3b3cd3d --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signer.go @@ -0,0 +1,100 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + "fmt" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" + "reflect" +) + +// Signer sign client token +type Signer interface { + GetName() string + GetType() string + GetVersion() string + GetAccessKeyId() (string, error) + GetExtraParam() map[string]string + Sign(stringToSign, secretSuffix string) string + Shutdown() +} + +// NewSignerWithCredential create signer with credential +func NewSignerWithCredential(credential Credential, commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error)) (signer Signer, err error) { + switch instance := credential.(type) { + case *credentials.AccessKeyCredential: + { + signer, err = signers.NewAccessKeySigner(instance) + } + case *credentials.StsTokenCredential: + { + signer, err = signers.NewStsTokenSigner(instance) + } + + case *credentials.RamRoleArnCredential: + { + signer, err = signers.NewRamRoleArnSigner(instance, commonApi) + } + case *credentials.RsaKeyPairCredential: + { + signer, err = signers.NewSignerKeyPair(instance, commonApi) + } + case *credentials.EcsRamRoleCredential: + { + signer, err = signers.NewEcsRamRoleSigner(instance, commonApi) + } + case *credentials.BaseCredential: // deprecated user interface + { + signer, err = signers.NewAccessKeySigner(instance.ToAccessKeyCredential()) + } + case *credentials.StsRoleArnCredential: // deprecated user interface + { + signer, err = signers.NewRamRoleArnSigner(instance.ToRamRoleArnCredential(), commonApi) + } + case *credentials.StsRoleNameOnEcsCredential: // deprecated user interface + { + signer, err = signers.NewEcsRamRoleSigner(instance.ToEcsRamRoleCredential(), commonApi) + } + default: + message := fmt.Sprintf(errors.UnsupportedCredentialErrorMessage, reflect.TypeOf(credential)) + err = errors.NewClientError(errors.UnsupportedCredentialErrorCode, message, nil) + } + return +} + +// Sign will generate signer token +func Sign(request requests.AcsRequest, signer Signer, regionId string) (err error) { + switch request.GetStyle() { + case requests.ROA: + { + signRoaRequest(request, signer, regionId) + } + case requests.RPC: + { + err = signRpcRequest(request, signer, regionId) + } + default: + message := fmt.Sprintf(errors.UnknownRequestTypeErrorMessage, reflect.TypeOf(request)) + err = errors.NewClientError(errors.UnknownRequestTypeErrorCode, message, nil) + } + + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/algorithms.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/algorithms.go new file mode 100644 index 000000000000..3282488ab285 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/algorithms.go @@ -0,0 +1,64 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signers + +import ( + "crypto" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "fmt" +) + +// ShaHmac1 returns sha-hmac1 secret +func ShaHmac1(source, secret string) string { + key := []byte(secret) + hmac := hmac.New(sha1.New, key) + hmac.Write([]byte(source)) + signedBytes := hmac.Sum(nil) + signedString := base64.StdEncoding.EncodeToString(signedBytes) + return signedString +} + +// Sha256WithRsa returns sh256 secret +func Sha256WithRsa(source, secret string) string { + decodeString, err := base64.StdEncoding.DecodeString(secret) + if err != nil { + fmt.Println("DecodeString err", err) + } + private, err := x509.ParsePKCS8PrivateKey(decodeString) + if err != nil { + fmt.Println("ParsePKCS8PrivateKey err", err) + } + + h := crypto.Hash.New(crypto.SHA256) + h.Write([]byte(source)) + hashed := h.Sum(nil) + signature, err := rsa.SignPKCS1v15(rand.Reader, private.(*rsa.PrivateKey), + crypto.SHA256, hashed) + if err != nil { + fmt.Println("Error from signing:", err) + return "" + } + + signedString := base64.StdEncoding.EncodeToString(signature) + //fmt.Printf("Encoded: %v\n", signedString) + return signedString +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/credential_updater.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/credential_updater.go new file mode 100644 index 000000000000..8c60db55c0f3 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/credential_updater.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signers + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" + "time" +) + +const defaultInAdvanceScale = 0.8 + +type credentialUpdater struct { + credentialExpiration int + lastUpdateTimestamp int64 + inAdvanceScale float64 + buildRequestMethod func() (*requests.CommonRequest, error) + responseCallBack func(response *responses.CommonResponse) error + refreshApi func(request *requests.CommonRequest) (response *responses.CommonResponse, err error) +} + +func (updater *credentialUpdater) needUpdateCredential() (result bool) { + if updater.inAdvanceScale == 0 { + updater.inAdvanceScale = defaultInAdvanceScale + } + return time.Now().Unix()-updater.lastUpdateTimestamp >= int64(float64(updater.credentialExpiration)*updater.inAdvanceScale) +} + +func (updater *credentialUpdater) updateCredential() (err error) { + request, err := updater.buildRequestMethod() + if err != nil { + return + } + response, err := updater.refreshApi(request) + if err != nil { + return + } + updater.lastUpdateTimestamp = time.Now().Unix() + err = updater.responseCallBack(response) + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/session_credential.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/session_credential.go new file mode 100644 index 000000000000..b0848d41aa6e --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/session_credential.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signers + +// SessionCredential is kind of credential +type SessionCredential struct { + AccessKeyId string + AccessKeySecret string + StsToken string +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_access_key.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_access_key.go new file mode 100644 index 000000000000..78dbe5b124ac --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_access_key.go @@ -0,0 +1,67 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signers + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials" +) + +// AccessKeySigner is wrapper of AccessKeyCredential +type AccessKeySigner struct { + credential *credentials.AccessKeyCredential +} + +// GetExtraParam doesn't implement +func (signer *AccessKeySigner) GetExtraParam() map[string]string { + return nil +} + +// NewAccessKeySigner returns AccessKeySigner +func NewAccessKeySigner(credential *credentials.AccessKeyCredential) (*AccessKeySigner, error) { + return &AccessKeySigner{ + credential: credential, + }, nil +} + +// GetName returns "HMAC-SHA1" +func (*AccessKeySigner) GetName() string { + return "HMAC-SHA1" +} + +// GetType returns "" +func (*AccessKeySigner) GetType() string { + return "" +} + +// GetVersion returns "1.0" +func (*AccessKeySigner) GetVersion() string { + return "1.0" +} + +// GetAccessKeyId returns accessKeyId +func (signer *AccessKeySigner) GetAccessKeyId() (accessKeyId string, err error) { + return signer.credential.AccessKeyId, nil +} + +// Sign returns a signer +func (signer *AccessKeySigner) Sign(stringToSign, secretSuffix string) string { + secret := signer.credential.AccessKeySecret + secretSuffix + return ShaHmac1(stringToSign, secret) +} + +// Shutdown doesn't implement +func (signer *AccessKeySigner) Shutdown() {} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ecs_ram_role.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ecs_ram_role.go new file mode 100644 index 000000000000..c64c40d82777 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ecs_ram_role.go @@ -0,0 +1,185 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signers + +import ( + "encoding/json" + "fmt" + "github.com/jmespath/go-jmespath" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" + "net/http" + "strings" + "time" +) + +// EcsRamRoleSigner is kind of signer +type EcsRamRoleSigner struct { + *credentialUpdater + sessionCredential *SessionCredential + credential *credentials.EcsRamRoleCredential + commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error) +} + +// NewEcsRamRoleSigner returns EcsRamRoleSigner +func NewEcsRamRoleSigner(credential *credentials.EcsRamRoleCredential, commonApi func(*requests.CommonRequest, interface{}) (response *responses.CommonResponse, err error)) (signer *EcsRamRoleSigner, err error) { + signer = &EcsRamRoleSigner{ + credential: credential, + commonApi: commonApi, + } + + signer.credentialUpdater = &credentialUpdater{ + credentialExpiration: defaultDurationSeconds / 60, + buildRequestMethod: signer.buildCommonRequest, + responseCallBack: signer.refreshCredential, + refreshApi: signer.refreshApi, + } + + return +} + +// GetName returns "HMAC-SHA1" +func (*EcsRamRoleSigner) GetName() string { + return "HMAC-SHA1" +} + +// GetType returns "" +func (*EcsRamRoleSigner) GetType() string { + return "" +} + +// GetVersion returns "1.0" +func (*EcsRamRoleSigner) GetVersion() string { + return "1.0" +} + +// GetAccessKeyId returns accessKeyId +func (signer *EcsRamRoleSigner) GetAccessKeyId() (accessKeyId string, err error) { + if signer.sessionCredential == nil || signer.needUpdateCredential() { + err = signer.updateCredential() + } + if err != nil && (signer.sessionCredential == nil || len(signer.sessionCredential.AccessKeyId) <= 0) { + return "", err + } + return signer.sessionCredential.AccessKeyId, nil +} + +// GetExtraParam returns params in map +func (signer *EcsRamRoleSigner) GetExtraParam() map[string]string { + if signer.sessionCredential == nil { + return make(map[string]string) + } + if len(signer.sessionCredential.StsToken) <= 0 { + return make(map[string]string) + } + return map[string]string{"SecurityToken": signer.sessionCredential.StsToken} +} + +// Sign creates a signer +func (signer *EcsRamRoleSigner) Sign(stringToSign, secretSuffix string) string { + secret := signer.sessionCredential.AccessKeyId + secretSuffix + return ShaHmac1(stringToSign, secret) +} + +func (signer *EcsRamRoleSigner) buildCommonRequest() (request *requests.CommonRequest, err error) { + request = requests.NewCommonRequest() + return +} + +func (signer *EcsRamRoleSigner) refreshApi(request *requests.CommonRequest) (response *responses.CommonResponse, err error) { + requestUrl := "http://100.100.100.200/latest/meta-data/ram/security-credentials/" + signer.credential.RoleName + httpRequest, err := http.NewRequest(requests.GET, requestUrl, strings.NewReader("")) + if err != nil { + fmt.Println("refresh Ecs sts token err", err) + return + } + httpClient := &http.Client{} + httpResponse, err := httpClient.Do(httpRequest) + if err != nil { + fmt.Println("refresh Ecs sts token err", err) + return + } + + response = responses.NewCommonResponse() + err = responses.Unmarshal(response, httpResponse, "") + + return +} + +func (signer *EcsRamRoleSigner) refreshCredential(response *responses.CommonResponse) (err error) { + if response.GetHttpStatus() != http.StatusOK { + fmt.Println("refresh Ecs sts token err, httpStatus: " + string(response.GetHttpStatus()) + ", message = " + response.GetHttpContentString()) + return + } + var data interface{} + err = json.Unmarshal(response.GetHttpContentBytes(), &data) + if err != nil { + fmt.Println("refresh Ecs sts token err, json.Unmarshal fail", err) + return + } + code, err := jmespath.Search("Code", data) + if err != nil { + fmt.Println("refresh Ecs sts token err, fail to get Code", err) + return + } + if code.(string) != "Success" { + fmt.Println("refresh Ecs sts token err, Code is not Success", err) + return + } + accessKeyId, err := jmespath.Search("AccessKeyId", data) + if err != nil { + fmt.Println("refresh Ecs sts token err, fail to get AccessKeyId", err) + return + } + accessKeySecret, err := jmespath.Search("AccessKeySecret", data) + if err != nil { + fmt.Println("refresh Ecs sts token err, fail to get AccessKeySecret", err) + return + } + securityToken, err := jmespath.Search("SecurityToken", data) + if err != nil { + fmt.Println("refresh Ecs sts token err, fail to get SecurityToken", err) + return + } + expiration, err := jmespath.Search("Expiration", data) + if err != nil { + fmt.Println("refresh Ecs sts token err, fail to get Expiration", err) + return + } + if accessKeyId == nil || accessKeySecret == nil || securityToken == nil { + return + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", expiration.(string)) + signer.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix()) + signer.sessionCredential = &SessionCredential{ + AccessKeyId: accessKeyId.(string), + AccessKeySecret: accessKeySecret.(string), + StsToken: securityToken.(string), + } + + return +} + +// GetSessionCredential returns SessionCredential +func (signer *EcsRamRoleSigner) GetSessionCredential() *SessionCredential { + return signer.sessionCredential +} + +// Shutdown doesn't implement +func (signer *EcsRamRoleSigner) Shutdown() {} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_key_pair.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_key_pair.go new file mode 100644 index 000000000000..dd10a73e94eb --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_key_pair.go @@ -0,0 +1,157 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signers + +import ( + "encoding/json" + "fmt" + "github.com/jmespath/go-jmespath" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" + "net/http" + "strconv" +) + +// SignerKeyPair is kind of signer +type SignerKeyPair struct { + *credentialUpdater + sessionCredential *SessionCredential + credential *credentials.RsaKeyPairCredential + commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error) +} + +// NewSignerKeyPair returns SignerKeyPair +func NewSignerKeyPair(credential *credentials.RsaKeyPairCredential, commonApi func(*requests.CommonRequest, interface{}) (response *responses.CommonResponse, err error)) (signer *SignerKeyPair, err error) { + signer = &SignerKeyPair{ + credential: credential, + commonApi: commonApi, + } + + signer.credentialUpdater = &credentialUpdater{ + credentialExpiration: credential.SessionExpiration, + buildRequestMethod: signer.buildCommonRequest, + responseCallBack: signer.refreshCredential, + refreshApi: signer.refreshApi, + } + + if credential.SessionExpiration > 0 { + if credential.SessionExpiration >= 900 && credential.SessionExpiration <= 3600 { + signer.credentialExpiration = credential.SessionExpiration + } else { + err = errors.NewClientError(errors.InvalidParamErrorCode, "Key Pair session duration should be in the range of 15min - 1Hr", nil) + } + } else { + signer.credentialExpiration = defaultDurationSeconds + } + return +} + +// GetName returns "HMAC-SHA1" +func (*SignerKeyPair) GetName() string { + return "HMAC-SHA1" +} + +// GetType returns "" +func (*SignerKeyPair) GetType() string { + return "" +} + +// GetVersion returns "1.0" +func (*SignerKeyPair) GetVersion() string { + return "1.0" +} + +// GetAccessKeyId returns accessKeyId +func (signer *SignerKeyPair) GetAccessKeyId() (accessKeyId string, err error) { + if signer.sessionCredential == nil || signer.needUpdateCredential() { + err = signer.updateCredential() + } + if err != nil && (signer.sessionCredential == nil || len(signer.sessionCredential.AccessKeyId) <= 0) { + return "", err + } + return signer.sessionCredential.AccessKeyId, err +} + +// GetExtraParam returns params +func (signer *SignerKeyPair) GetExtraParam() map[string]string { + if signer.sessionCredential == nil || signer.needUpdateCredential() { + signer.updateCredential() + } + if signer.sessionCredential == nil || len(signer.sessionCredential.AccessKeyId) <= 0 { + return make(map[string]string) + } + return make(map[string]string) +} + +// Sign create signer +func (signer *SignerKeyPair) Sign(stringToSign, secretSuffix string) string { + secret := signer.sessionCredential.AccessKeyId + secretSuffix + return ShaHmac1(stringToSign, secret) +} + +func (signer *SignerKeyPair) buildCommonRequest() (request *requests.CommonRequest, err error) { + request = requests.NewCommonRequest() + request.Product = "Sts" + request.Version = "2015-04-01" + request.ApiName = "GenerateSessionAccessKey" + request.Scheme = requests.HTTPS + request.QueryParams["PublicKeyId"] = signer.credential.PublicKeyId + request.QueryParams["DurationSeconds"] = strconv.Itoa(signer.credentialExpiration) + return +} + +func (signer *SignerKeyPair) refreshApi(request *requests.CommonRequest) (response *responses.CommonResponse, err error) { + signerV2, err := NewSignerV2(signer.credential) + return signer.commonApi(request, signerV2) +} + +func (signer *SignerKeyPair) refreshCredential(response *responses.CommonResponse) (err error) { + if response.GetHttpStatus() != http.StatusOK { + message := "refresh session AccessKey failed" + err = errors.NewServerError(response.GetHttpStatus(), response.GetHttpContentString(), message) + return + } + var data interface{} + err = json.Unmarshal(response.GetHttpContentBytes(), &data) + if err != nil { + fmt.Println("refresh KeyPair err, json.Unmarshal fail", err) + return + } + accessKeyId, err := jmespath.Search("SessionAccessKey.SessionAccessKeyId", data) + if err != nil { + fmt.Println("refresh KeyPair err, fail to get SessionAccessKeyId", err) + return + } + accessKeySecret, err := jmespath.Search("SessionAccessKey.SessionAccessKeySecret", data) + if err != nil { + fmt.Println("refresh KeyPair err, fail to get SessionAccessKeySecret", err) + return + } + if accessKeyId == nil || accessKeySecret == nil { + return + } + signer.sessionCredential = &SessionCredential{ + AccessKeyId: accessKeyId.(string), + AccessKeySecret: accessKeySecret.(string), + } + return +} + +// Shutdown doesn't implement +func (signer *SignerKeyPair) Shutdown() {} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ram_role_arn.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ram_role_arn.go new file mode 100644 index 000000000000..4f5b6cd60316 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ram_role_arn.go @@ -0,0 +1,184 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signers + +import ( + "encoding/json" + "fmt" + "github.com/jmespath/go-jmespath" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" + "net/http" + "strconv" + "time" +) + +const ( + defaultDurationSeconds = 3600 +) + +// RamRoleArnSigner is kind of signer +type RamRoleArnSigner struct { + *credentialUpdater + roleSessionName string + sessionCredential *SessionCredential + credential *credentials.RamRoleArnCredential + commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error) +} + +// NewRamRoleArnSigner returns RamRoleArnSigner +func NewRamRoleArnSigner(credential *credentials.RamRoleArnCredential, commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error)) (signer *RamRoleArnSigner, err error) { + signer = &RamRoleArnSigner{ + credential: credential, + commonApi: commonApi, + } + + signer.credentialUpdater = &credentialUpdater{ + credentialExpiration: credential.RoleSessionExpiration, + buildRequestMethod: signer.buildCommonRequest, + responseCallBack: signer.refreshCredential, + refreshApi: signer.refreshApi, + } + + if len(credential.RoleSessionName) > 0 { + signer.roleSessionName = credential.RoleSessionName + } else { + signer.roleSessionName = "aliyun-go-sdk-" + strconv.FormatInt(time.Now().UnixNano()/1000, 10) + } + if credential.RoleSessionExpiration > 0 { + if credential.RoleSessionExpiration >= 900 && credential.RoleSessionExpiration <= 3600 { + signer.credentialExpiration = credential.RoleSessionExpiration + } else { + err = errors.NewClientError(errors.InvalidParamErrorCode, "Assume Role session duration should be in the range of 15min - 1Hr", nil) + } + } else { + signer.credentialExpiration = defaultDurationSeconds + } + return +} + +// GetName returns "HMAC-SHA1" +func (*RamRoleArnSigner) GetName() string { + return "HMAC-SHA1" +} + +// GetType returns "" +func (*RamRoleArnSigner) GetType() string { + return "" +} + +// GetVersion returns "1.0" +func (*RamRoleArnSigner) GetVersion() string { + return "1.0" +} + +// GetAccessKeyId returns accessKeyId +func (signer *RamRoleArnSigner) GetAccessKeyId() (accessKeyId string, err error) { + if signer.sessionCredential == nil || signer.needUpdateCredential() { + err = signer.updateCredential() + } + if err != nil && (signer.sessionCredential == nil || len(signer.sessionCredential.AccessKeyId) <= 0) { + return "", err + } + return signer.sessionCredential.AccessKeyId, nil +} + +// GetExtraParam returns params +func (signer *RamRoleArnSigner) GetExtraParam() map[string]string { + if signer.sessionCredential == nil || signer.needUpdateCredential() { + signer.updateCredential() + } + if signer.sessionCredential == nil || len(signer.sessionCredential.StsToken) <= 0 { + return make(map[string]string) + } + return map[string]string{"SecurityToken": signer.sessionCredential.StsToken} +} + +// Sign create signer +func (signer *RamRoleArnSigner) Sign(stringToSign, secretSuffix string) string { + secret := signer.sessionCredential.AccessKeySecret + secretSuffix + return ShaHmac1(stringToSign, secret) +} + +func (signer *RamRoleArnSigner) buildCommonRequest() (request *requests.CommonRequest, err error) { + request = requests.NewCommonRequest() + request.Product = "Sts" + request.Version = "2015-04-01" + request.ApiName = "AssumeRole" + request.Scheme = requests.HTTPS + request.QueryParams["RoleArn"] = signer.credential.RoleArn + request.QueryParams["RoleSessionName"] = signer.credential.RoleSessionName + request.QueryParams["DurationSeconds"] = strconv.Itoa(signer.credentialExpiration) + return +} + +func (signer *RamRoleArnSigner) refreshApi(request *requests.CommonRequest) (response *responses.CommonResponse, err error) { + credential := &credentials.AccessKeyCredential{ + AccessKeyId: signer.credential.AccessKeyId, + AccessKeySecret: signer.credential.AccessKeySecret, + } + signerV1, err := NewAccessKeySigner(credential) + return signer.commonApi(request, signerV1) +} + +func (signer *RamRoleArnSigner) refreshCredential(response *responses.CommonResponse) (err error) { + if response.GetHttpStatus() != http.StatusOK { + message := "refresh session token failed" + err = errors.NewServerError(response.GetHttpStatus(), response.GetHttpContentString(), message) + return + } + var data interface{} + err = json.Unmarshal(response.GetHttpContentBytes(), &data) + if err != nil { + fmt.Println("refresh RoleArn sts token err, json.Unmarshal fail", err) + return + } + accessKeyId, err := jmespath.Search("Credentials.AccessKeyId", data) + if err != nil { + fmt.Println("refresh RoleArn sts token err, fail to get AccessKeyId", err) + return + } + accessKeySecret, err := jmespath.Search("Credentials.AccessKeySecret", data) + if err != nil { + fmt.Println("refresh RoleArn sts token err, fail to get AccessKeySecret", err) + return + } + securityToken, err := jmespath.Search("Credentials.SecurityToken", data) + if err != nil { + fmt.Println("refresh RoleArn sts token err, fail to get SecurityToken", err) + return + } + if accessKeyId == nil || accessKeySecret == nil || securityToken == nil { + return + } + signer.sessionCredential = &SessionCredential{ + AccessKeyId: accessKeyId.(string), + AccessKeySecret: accessKeySecret.(string), + StsToken: securityToken.(string), + } + return +} + +// GetSessionCredential returns SessionCredential +func (signer *RamRoleArnSigner) GetSessionCredential() *SessionCredential { + return signer.sessionCredential +} + +// Shutdown doesn't implement +func (signer *RamRoleArnSigner) Shutdown() {} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_sts_token.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_sts_token.go new file mode 100644 index 000000000000..a716bda677a8 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_sts_token.go @@ -0,0 +1,67 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signers + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials" +) + +// StsTokenSigner is kind of Signer +type StsTokenSigner struct { + credential *credentials.StsTokenCredential +} + +// NewStsTokenSigner returns StsTokenSigner +func NewStsTokenSigner(credential *credentials.StsTokenCredential) (*StsTokenSigner, error) { + return &StsTokenSigner{ + credential: credential, + }, nil +} + +// GetName returns "HMAC-SHA1" +func (*StsTokenSigner) GetName() string { + return "HMAC-SHA1" +} + +// GetType returns "" +func (*StsTokenSigner) GetType() string { + return "" +} + +// GetVersion returns "" +func (*StsTokenSigner) GetVersion() string { + return "1.0" +} + +// GetAccessKeyId returns accessKeyId +func (signer *StsTokenSigner) GetAccessKeyId() (accessKeyId string, err error) { + return signer.credential.AccessKeyId, nil +} + +// GetExtraParam returns params +func (signer *StsTokenSigner) GetExtraParam() map[string]string { + return map[string]string{"SecurityToken": signer.credential.AccessKeyStsToken} +} + +// Sign creates signer +func (signer *StsTokenSigner) Sign(stringToSign, secretSuffix string) string { + secret := signer.credential.AccessKeySecret + secretSuffix + return ShaHmac1(stringToSign, secret) +} + +// Shutdown doesn't implement +func (signer *StsTokenSigner) Shutdown() {} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_v2.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_v2.go new file mode 100644 index 000000000000..ae7c90d8b61e --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/signers/signer_v2.go @@ -0,0 +1,67 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signers + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials" +) + +// SignerV2 is wrapper of credential +type SignerV2 struct { + credential *credentials.RsaKeyPairCredential +} + +// GetExtraParam returns params +func (signer *SignerV2) GetExtraParam() map[string]string { + return nil +} + +// NewSignerV2 returns SignerV2 +func NewSignerV2(credential *credentials.RsaKeyPairCredential) (*SignerV2, error) { + return &SignerV2{ + credential: credential, + }, nil +} + +// GetName returns "SHA256withRSA" +func (*SignerV2) GetName() string { + return "SHA256withRSA" +} + +// GetType returns "PRIVATEKEY" +func (*SignerV2) GetType() string { + return "PRIVATEKEY" +} + +// GetVersion returns "1.0" +func (*SignerV2) GetVersion() string { + return "1.0" +} + +// GetAccessKeyId returns accessKeyId +func (signer *SignerV2) GetAccessKeyId() (accessKeyId string, err error) { + return signer.credential.PublicKeyId, err +} + +// Sign create signer +func (signer *SignerV2) Sign(stringToSign, secretSuffix string) string { + secret := signer.credential.PrivateKey + return Sha256WithRsa(stringToSign, secret) +} + +// Shutdown doesn't implement +func (signer *SignerV2) Shutdown() {} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/client.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/client.go new file mode 100644 index 000000000000..9ba0123d0475 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/client.go @@ -0,0 +1,442 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sdk + +import ( + "fmt" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth/credentials" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" + "net" + "net/http" + "strconv" + "sync" +) + +// Version value will be replaced while build: -ldflags="-X sdk.version=x.x.x" +var Version = "0.0.1" + +// Client is common SDK client +type Client struct { + regionId string + config *Config + signer auth.Signer + httpClient *http.Client + asyncTaskQueue chan func() + + debug bool + isRunning bool + // void "panic(write to close channel)" cause of addAsync() after Shutdown() + asyncChanLock *sync.RWMutex +} + +// Init not support yet +func (client *Client) Init() (err error) { + panic("not support yet") +} + +// InitWithOptions provide options such as regionId and auth +func (client *Client) InitWithOptions(regionId string, config *Config, credential auth.Credential) (err error) { + client.isRunning = true + client.asyncChanLock = new(sync.RWMutex) + client.regionId = regionId + client.config = config + if err != nil { + return + } + client.httpClient = &http.Client{} + + if config.HttpTransport != nil { + client.httpClient.Transport = config.HttpTransport + } + + if config.Timeout > 0 { + client.httpClient.Timeout = config.Timeout + } + + if config.EnableAsync { + client.EnableAsync(config.GoRoutinePoolSize, config.MaxTaskQueueSize) + } + + client.signer, err = auth.NewSignerWithCredential(credential, client.ProcessCommonRequestWithSigner) + + return +} + +// EnableAsync enable async task queue +func (client *Client) EnableAsync(routinePoolSize, maxTaskQueueSize int) { + client.asyncTaskQueue = make(chan func(), maxTaskQueueSize) + for i := 0; i < routinePoolSize; i++ { + go func() { + for client.isRunning { + select { + case task, notClosed := <-client.asyncTaskQueue: + if notClosed { + task() + } + } + } + }() + } +} + +// InitWithAccessKey need accessKeyId and accessKeySecret +func (client *Client) InitWithAccessKey(regionId, accessKeyId, accessKeySecret string) (err error) { + config := client.InitClientConfig() + credential := &credentials.BaseCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + } + return client.InitWithOptions(regionId, config, credential) +} + +// InitWithStsToken need regionId,accessKeyId,accessKeySecret and securityToken +func (client *Client) InitWithStsToken(regionId, accessKeyId, accessKeySecret, securityToken string) (err error) { + config := client.InitClientConfig() + credential := &credentials.StsTokenCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + AccessKeyStsToken: securityToken, + } + return client.InitWithOptions(regionId, config, credential) +} + +// InitWithRamRoleArn need regionId,accessKeyId,accessKeySecret,roleArn and roleSessionName +func (client *Client) InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (err error) { + config := client.InitClientConfig() + credential := &credentials.RamRoleArnCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + } + return client.InitWithOptions(regionId, config, credential) +} + +// InitWithRsaKeyPair need key pair +func (client *Client) InitWithRsaKeyPair(regionId, publicKeyId, privateKey string, sessionExpiration int) (err error) { + config := client.InitClientConfig() + credential := &credentials.RsaKeyPairCredential{ + PrivateKey: privateKey, + PublicKeyId: publicKeyId, + SessionExpiration: sessionExpiration, + } + return client.InitWithOptions(regionId, config, credential) +} + +// InitWithEcsRamRole need regionId and roleName +func (client *Client) InitWithEcsRamRole(regionId, roleName string) (err error) { + config := client.InitClientConfig() + credential := &credentials.EcsRamRoleCredential{ + RoleName: roleName, + } + return client.InitWithOptions(regionId, config, credential) +} + +// InitClientConfig init client config +func (client *Client) InitClientConfig() (config *Config) { + if client.config != nil { + return client.config + } + return NewConfig() +} + +// DoAction do request to open api +func (client *Client) DoAction(request requests.AcsRequest, response responses.AcsResponse) (err error) { + return client.DoActionWithSigner(request, response, nil) +} + +// BuildRequestWithSigner build request signer +func (client *Client) BuildRequestWithSigner(request requests.AcsRequest, signer auth.Signer) (err error) { + // add clientVersion + request.GetHeaders()["x-sdk-core-version"] = Version + + regionId := client.regionId + if len(request.GetRegionId()) > 0 { + regionId = request.GetRegionId() + } + + // resolve endpoint + resolveParam := &endpoints.ResolveParam{ + Domain: request.GetDomain(), + Product: request.GetProduct(), + RegionId: regionId, + LocationProduct: request.GetLocationServiceCode(), + LocationEndpointType: request.GetLocationEndpointType(), + CommonApi: client.ProcessCommonRequest, + } + endpoint, err := endpoints.Resolve(resolveParam) + if err != nil { + return + } + request.SetDomain(endpoint) + + // init request params + err = requests.InitParams(request) + if err != nil { + return + } + + // signature + var finalSigner auth.Signer + if signer != nil { + finalSigner = signer + } else { + finalSigner = client.signer + } + httpRequest, err := buildHttpRequest(request, finalSigner, regionId) + if client.config.UserAgent != "" { + httpRequest.Header.Set("User-Agent", client.config.UserAgent) + } + return err +} + +// DoActionWithSigner do action with signer +func (client *Client) DoActionWithSigner(request requests.AcsRequest, response responses.AcsResponse, signer auth.Signer) (err error) { + + // add clientVersion + request.GetHeaders()["x-sdk-core-version"] = Version + + regionId := client.regionId + if len(request.GetRegionId()) > 0 { + regionId = request.GetRegionId() + } + + // resolve endpoint + resolveParam := &endpoints.ResolveParam{ + Domain: request.GetDomain(), + Product: request.GetProduct(), + RegionId: regionId, + LocationProduct: request.GetLocationServiceCode(), + LocationEndpointType: request.GetLocationEndpointType(), + CommonApi: client.ProcessCommonRequest, + } + endpoint, err := endpoints.Resolve(resolveParam) + if err != nil { + return + } + request.SetDomain(endpoint) + + if request.GetScheme() == "" { + request.SetScheme(client.config.Scheme) + } + // init request params + err = requests.InitParams(request) + if err != nil { + return + } + + // signature + var finalSigner auth.Signer + if signer != nil { + finalSigner = signer + } else { + finalSigner = client.signer + } + httpRequest, err := buildHttpRequest(request, finalSigner, regionId) + if client.config.UserAgent != "" { + httpRequest.Header.Set("User-Agent", client.config.UserAgent) + } + if err != nil { + return + } + var httpResponse *http.Response + for retryTimes := 0; retryTimes <= client.config.MaxRetryTime; retryTimes++ { + httpResponse, err = client.httpClient.Do(httpRequest) + + var timeout bool + // receive error + if err != nil { + if !client.config.AutoRetry { + return + } else if timeout = isTimeout(err); !timeout { + // if not timeout error, return + return + } else if retryTimes >= client.config.MaxRetryTime { + // timeout but reached the max retry times, return + timeoutErrorMsg := fmt.Sprintf(errors.TimeoutErrorMessage, strconv.Itoa(retryTimes+1), strconv.Itoa(retryTimes+1)) + err = errors.NewClientError(errors.TimeoutErrorCode, timeoutErrorMsg, err) + return + } + } + // if status code >= 500 or timeout, will trigger retry + if client.config.AutoRetry && (timeout || isServerError(httpResponse)) { + // rewrite signatureNonce and signature + httpRequest, err = buildHttpRequest(request, finalSigner, regionId) + if err != nil { + return + } + continue + } + break + } + err = responses.Unmarshal(response, httpResponse, request.GetAcceptFormat()) + // wrap server errors + if serverErr, ok := err.(*errors.ServerError); ok { + var wrapInfo = map[string]string{} + wrapInfo["StringToSign"] = request.GetStringToSign() + err = errors.WrapServerError(serverErr, wrapInfo) + } + return +} + +func buildHttpRequest(request requests.AcsRequest, singer auth.Signer, regionId string) (httpRequest *http.Request, err error) { + err = auth.Sign(request, singer, regionId) + if err != nil { + return + } + requestMethod := request.GetMethod() + requestUrl := request.BuildUrl() + body := request.GetBodyReader() + httpRequest, err = http.NewRequest(requestMethod, requestUrl, body) + if err != nil { + return + } + for key, value := range request.GetHeaders() { + httpRequest.Header[key] = []string{value} + } + // host is a special case + if host, containsHost := request.GetHeaders()["Host"]; containsHost { + httpRequest.Host = host + } + return +} + +func isTimeout(err error) bool { + if err == nil { + return false + } + netErr, isNetError := err.(net.Error) + return isNetError && netErr.Timeout() +} + +func isServerError(httpResponse *http.Response) bool { + return httpResponse.StatusCode >= http.StatusInternalServerError +} + +// AddAsyncTask create async task +// only block when any one of the following occurs: +// 1. the asyncTaskQueue is full, increase the queue size to avoid this +// 2. Shutdown() in progressing, the client is being closed +func (client *Client) AddAsyncTask(task func()) (err error) { + if client.asyncTaskQueue != nil { + client.asyncChanLock.RLock() + defer client.asyncChanLock.RUnlock() + if client.isRunning { + client.asyncTaskQueue <- task + } + } else { + err = errors.NewClientError(errors.AsyncFunctionNotEnabledCode, errors.AsyncFunctionNotEnabledMessage, nil) + } + return +} + +// GetConfig return client config +func (client *Client) GetConfig() *Config { + return client.config +} + +// NewClient return SDK client +func NewClient() (client *Client, err error) { + client = &Client{} + err = client.Init() + return +} + +// NewClientWithOptions create client with options +func NewClientWithOptions(regionId string, config *Config, credential auth.Credential) (client *Client, err error) { + client = &Client{} + err = client.InitWithOptions(regionId, config, credential) + return +} + +// NewClientWithAccessKey create client with accessKeyId and accessKeySecret +func NewClientWithAccessKey(regionId, accessKeyId, accessKeySecret string) (client *Client, err error) { + client = &Client{} + err = client.InitWithAccessKey(regionId, accessKeyId, accessKeySecret) + return +} + +// NewClientWithStsToken create client with stsToken +func NewClientWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken string) (client *Client, err error) { + client = &Client{} + err = client.InitWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken) + return +} + +// NewClientWithRamRoleArn create client with ramRoleArn +func NewClientWithRamRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) { + client = &Client{} + err = client.InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName) + return +} + +// NewClientWithEcsRamRole create client with ramRole on ECS +func NewClientWithEcsRamRole(regionId string, roleName string) (client *Client, err error) { + client = &Client{} + err = client.InitWithEcsRamRole(regionId, roleName) + return +} + +// NewClientWithRsaKeyPair create client with key-pair +func NewClientWithRsaKeyPair(regionId string, publicKeyId, privateKey string, sessionExpiration int) (client *Client, err error) { + client = &Client{} + err = client.InitWithRsaKeyPair(regionId, publicKeyId, privateKey, sessionExpiration) + return +} + +// NewClientWithStsRoleArn is deprecated: Use NewClientWithRamRoleArn in this package instead. +func NewClientWithStsRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) { + return NewClientWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName) +} + +// NewClientWithStsRoleNameOnEcs is deprecated: Use NewClientWithEcsRamRole in this package instead. +func NewClientWithStsRoleNameOnEcs(regionId string, roleName string) (client *Client, err error) { + return NewClientWithEcsRamRole(regionId, roleName) +} + +// ProcessCommonRequest do action with common request +func (client *Client) ProcessCommonRequest(request *requests.CommonRequest) (response *responses.CommonResponse, err error) { + request.TransToAcsRequest() + response = responses.NewCommonResponse() + err = client.DoAction(request, response) + return +} + +// ProcessCommonRequestWithSigner do action with common request and singer +func (client *Client) ProcessCommonRequestWithSigner(request *requests.CommonRequest, signerInterface interface{}) (response *responses.CommonResponse, err error) { + if signer, isSigner := signerInterface.(auth.Signer); isSigner { + request.TransToAcsRequest() + response = responses.NewCommonResponse() + err = client.DoActionWithSigner(request, response, signer) + return + } + panic("should not be here") +} + +// Shutdown destruction of client +func (client *Client) Shutdown() { + client.signer.Shutdown() + // lock the addAsync() + client.asyncChanLock.Lock() + defer client.asyncChanLock.Unlock() + client.isRunning = false + close(client.asyncTaskQueue) +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/config.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/config.go new file mode 100644 index 000000000000..a50158dbc1ab --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/config.go @@ -0,0 +1,98 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sdk + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/utils" + "net/http" + "time" +) + +// Config is SDK client options +type Config struct { + AutoRetry bool `default:"true"` + MaxRetryTime int `default:"3"` + UserAgent string `default:""` + Debug bool `default:"false"` + Timeout time.Duration `default:"10000000000"` + HttpTransport *http.Transport `default:""` + EnableAsync bool `default:"false"` + MaxTaskQueueSize int `default:"1000"` + GoRoutinePoolSize int `default:"5"` + Scheme string `default:"HTTP"` +} + +// NewConfig returns client config +func NewConfig() (config *Config) { + config = &Config{} + utils.InitStructWithDefaultTag(config) + return +} + +// WithTimeout set client timeout +func (c *Config) WithTimeout(timeout time.Duration) *Config { + c.Timeout = timeout + return c +} + +// WithAutoRetry set client with retry +func (c *Config) WithAutoRetry(isAutoRetry bool) *Config { + c.AutoRetry = isAutoRetry + return c +} + +// WithMaxRetryTime set client with max retry times +func (c *Config) WithMaxRetryTime(maxRetryTime int) *Config { + c.MaxRetryTime = maxRetryTime + return c +} + +// WithUserAgent set client user agent +func (c *Config) WithUserAgent(userAgent string) *Config { + c.UserAgent = userAgent + return c +} + +// WithHttpTransport set client custom http transport +func (c *Config) WithHttpTransport(httpTransport *http.Transport) *Config { + c.HttpTransport = httpTransport + return c +} + +// WithEnableAsync enable client async option +func (c *Config) WithEnableAsync(isEnableAsync bool) *Config { + c.EnableAsync = isEnableAsync + return c +} + +// WithMaxTaskQueueSize set client max task queue size +func (c *Config) WithMaxTaskQueueSize(maxTaskQueueSize int) *Config { + c.MaxTaskQueueSize = maxTaskQueueSize + return c +} + +// WithGoRoutinePoolSize set client go routine pool size +func (c *Config) WithGoRoutinePoolSize(goRoutinePoolSize int) *Config { + c.GoRoutinePoolSize = goRoutinePoolSize + return c +} + +// WithDebug set client debug mode +func (c *Config) WithDebug(isDebug bool) *Config { + c.Debug = isDebug + return c +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/endpoints_config.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/endpoints_config.go new file mode 100644 index 000000000000..e57f673bb500 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/endpoints_config.go @@ -0,0 +1,521 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "encoding/json" + "fmt" + "sync" +) + +const endpointsJson = "{" + + " \"products\":[" + + " {" + + " \"code\": \"aegis\"," + + " \"document_id\": \"28449\"," + + " \"location_service_code\": \"vipaegis\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"aegis.cn-hangzhou.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"alidns\"," + + " \"document_id\": \"29739\"," + + " \"location_service_code\": \"alidns\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"alidns.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"arms\"," + + " \"document_id\": \"42924\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": [ {" + + " \"region\": \"ap-southeast-1\"," + + " \"endpoint\": \"arms.ap-southeast-1.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-beijing\"," + + " \"endpoint\": \"arms.cn-beijing.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-hangzhou\"," + + " \"endpoint\": \"arms.cn-hangzhou.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-hongkong\"," + + " \"endpoint\": \"arms.cn-hongkong.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-qingdao\"," + + " \"endpoint\": \"arms.cn-qingdao.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-shanghai\"," + + " \"endpoint\": \"arms.cn-shanghai.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-shenzhen\"," + + " \"endpoint\": \"arms.cn-shenzhen.aliyuncs.com\"" + + " }]," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"arms.[RegionId].aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"batchcompute\"," + + " \"document_id\": \"44717\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": [ {" + + " \"region\": \"ap-southeast-1\"," + + " \"endpoint\": \"batchcompute.ap-southeast-1.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-beijing\"," + + " \"endpoint\": \"batchcompute.cn-beijing.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-hangzhou\"," + + " \"endpoint\": \"batchcompute.cn-hangzhou.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-huhehaote\"," + + " \"endpoint\": \"batchcompute.cn-huhehaote.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-qingdao\"," + + " \"endpoint\": \"batchcompute.cn-qingdao.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-shanghai\"," + + " \"endpoint\": \"batchcompute.cn-shanghai.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-shenzhen\"," + + " \"endpoint\": \"batchcompute.cn-shenzhen.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-zhangjiakou\"," + + " \"endpoint\": \"batchcompute.cn-zhangjiakou.aliyuncs.com\"" + + " }, {" + + " \"region\": \"us-west-1\"," + + " \"endpoint\": \"batchcompute.us-west-1.aliyuncs.com\"" + + " }]," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"batchcompute.[RegionId].aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"ccc\"," + + " \"document_id\": \"63027\"," + + " \"location_service_code\": \"ccc\"," + + " \"regional_endpoints\": [ {" + + " \"region\": \"cn-hangzhou\"," + + " \"endpoint\": \"ccc.cn-hangzhou.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-shanghai\"," + + " \"endpoint\": \"ccc.cn-shanghai.aliyuncs.com\"" + + " }]," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"ccc.[RegionId].aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"cdn\"," + + " \"document_id\": \"27148\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"cdn.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"cds\"," + + " \"document_id\": \"62887\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"cds.cn-beijing.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"chatbot\"," + + " \"document_id\": \"60760\"," + + " \"location_service_code\": \"beebot\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"chatbot.[RegionId].aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"cloudapi\"," + + " \"document_id\": \"43590\"," + + " \"location_service_code\": \"apigateway\"," + + " \"regional_endpoints\": [ {" + + " \"region\": \"ap-northeast-1\"," + + " \"endpoint\": \"apigateway.ap-northeast-1.aliyuncs.com\"" + + " }, {" + + " \"region\": \"us-west-1\"," + + " \"endpoint\": \"apigateway.us-west-1.aliyuncs.com\"" + + " }]," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"apigateway.[RegionId].aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"cloudauth\"," + + " \"document_id\": \"60687\"," + + " \"location_service_code\": \"cloudauth\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"cloudauth.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"cloudphoto\"," + + " \"document_id\": \"59902\"," + + " \"location_service_code\": \"cloudphoto\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"cloudphoto.[RegionId].aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"cloudwf\"," + + " \"document_id\": \"58111\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"cloudwf.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"cms\"," + + " \"document_id\": \"28615\"," + + " \"location_service_code\": \"cms\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"cr\"," + + " \"document_id\": \"60716\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"cr.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"cs\"," + + " \"document_id\": \"26043\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"cs.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"csb\"," + + " \"document_id\": \"64837\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": [ {" + + " \"region\": \"cn-beijing\"," + + " \"endpoint\": \"csb.cn-beijing.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-hangzhou\"," + + " \"endpoint\": \"csb.cn-hangzhou.aliyuncs.com\"" + + " }]," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"csb.[RegionId].aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"dds\"," + + " \"document_id\": \"61715\"," + + " \"location_service_code\": \"dds\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"mongodb.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"mongodb.[RegionId].aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"dm\"," + + " \"document_id\": \"29434\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": [ {" + + " \"region\": \"ap-southeast-1\"," + + " \"endpoint\": \"dm.aliyuncs.com\"" + + " }, {" + + " \"region\": \"ap-southeast-2\"," + + " \"endpoint\": \"dm.ap-southeast-2.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-beijing\"," + + " \"endpoint\": \"dm.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-hangzhou\"," + + " \"endpoint\": \"dm.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-hongkong\"," + + " \"endpoint\": \"dm.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-qingdao\"," + + " \"endpoint\": \"dm.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-shanghai\"," + + " \"endpoint\": \"dm.aliyuncs.com\"" + + " }, {" + + " \"region\": \"cn-shenzhen\"," + + " \"endpoint\": \"dm.aliyuncs.com\"" + + " }, {" + + " \"region\": \"us-east-1\"," + + " \"endpoint\": \"dm.aliyuncs.com\"" + + " }, {" + + " \"region\": \"us-west-1\"," + + " \"endpoint\": \"dm.aliyuncs.com\"" + + " }]," + + " \"global_endpoint\": \"dm.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"dm.[RegionId].aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"domain\"," + + " \"document_id\": \"42875\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"domain.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"domain.aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"domain-intl\"," + + " \"document_id\": \"\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"domain-intl.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"domain-intl.aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"drds\"," + + " \"document_id\": \"51111\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"drds.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"drds.aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"ecs\"," + + " \"document_id\": \"25484\"," + + " \"location_service_code\": \"ecs\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"emr\"," + + " \"document_id\": \"28140\"," + + " \"location_service_code\": \"emr\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"emr.[RegionId].aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"ess\"," + + " \"document_id\": \"25925\"," + + " \"location_service_code\": \"ess\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"ess.[RegionId].aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"green\"," + + " \"document_id\": \"28427\"," + + " \"location_service_code\": \"green\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"green.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"hpc\"," + + " \"document_id\": \"35201\"," + + " \"location_service_code\": \"hpc\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"hpc.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"httpdns\"," + + " \"document_id\": \"52679\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"httpdns-api.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"iot\"," + + " \"document_id\": \"30557\"," + + " \"location_service_code\": \"iot\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"iot.[RegionId].aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"itaas\"," + + " \"document_id\": \"55759\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"itaas.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"jaq\"," + + " \"document_id\": \"35037\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"jaq.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"live\"," + + " \"document_id\": \"48207\"," + + " \"location_service_code\": \"live\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"live.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"mts\"," + + " \"document_id\": \"29212\"," + + " \"location_service_code\": \"mts\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"nas\"," + + " \"document_id\": \"62598\"," + + " \"location_service_code\": \"nas\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"ons\"," + + " \"document_id\": \"44416\"," + + " \"location_service_code\": \"ons\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"polardb\"," + + " \"document_id\": \"58764\"," + + " \"location_service_code\": \"polardb\"," + + " \"regional_endpoints\": [ {" + + " \"region\": \"ap-south-1\"," + + " \"endpoint\": \"polardb.ap-south-1.aliyuncs.com\"" + + " }, {" + + " \"region\": \"ap-southeast-5\"," + + " \"endpoint\": \"polardb.ap-southeast-5.aliyuncs.com\"" + + " }]," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"polardb.aliyuncs.com\"" + + " }," + + " {" + + " \"code\": \"push\"," + + " \"document_id\": \"30074\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"cloudpush.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"qualitycheck\"," + + " \"document_id\": \"50807\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": [ {" + + " \"region\": \"cn-hangzhou\"," + + " \"endpoint\": \"qualitycheck.cn-hangzhou.aliyuncs.com\"" + + " }]," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"r-kvstore\"," + + " \"document_id\": \"60831\"," + + " \"location_service_code\": \"redisa\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"ram\"," + + " \"document_id\": \"28672\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"ram.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"rds\"," + + " \"document_id\": \"26223\"," + + " \"location_service_code\": \"rds\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"ros\"," + + " \"document_id\": \"28899\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"ros.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"sas-api\"," + + " \"document_id\": \"28498\"," + + " \"location_service_code\": \"sas\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"slb\"," + + " \"document_id\": \"27565\"," + + " \"location_service_code\": \"slb\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"sts\"," + + " \"document_id\": \"28756\"," + + " \"location_service_code\": \"\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"sts.aliyuncs.com\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"vod\"," + + " \"document_id\": \"60574\"," + + " \"location_service_code\": \"vod\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"vpc\"," + + " \"document_id\": \"34962\"," + + " \"location_service_code\": \"vpc\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }," + + " {" + + " \"code\": \"waf\"," + + " \"document_id\": \"62847\"," + + " \"location_service_code\": \"waf\"," + + " \"regional_endpoints\": []," + + " \"global_endpoint\": \"\"," + + " \"regional_endpoint_pattern\": \"\"" + + " }]" + + "}" + +var initOnce sync.Once +var data interface{} + +func getEndpointConfigData() interface{} { + initOnce.Do(func() { + err := json.Unmarshal([]byte(endpointsJson), &data) + if err != nil { + fmt.Println("init endpoint config data failed.", err) + } + }) + return data +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/local_global_resolver.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/local_global_resolver.go new file mode 100644 index 000000000000..25f1d8cdfe5f --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/local_global_resolver.go @@ -0,0 +1,40 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "fmt" + "github.com/jmespath/go-jmespath" + "strings" +) + +// LocalGlobalResolver is global resolver +type LocalGlobalResolver struct{} + +// TryResolve returns endpoint +func (resolver *LocalGlobalResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) { + // get the global endpoints configs + endpointExpression := fmt.Sprintf("products[?code=='%s'].global_endpoint", strings.ToLower(param.Product)) + endpointData, err := jmespath.Search(endpointExpression, getEndpointConfigData()) + if err == nil && endpointData != nil && len(endpointData.([]interface{})) > 0 { + endpoint = endpointData.([]interface{})[0].(string) + support = len(endpoint) > 0 + return endpoint, support, nil + } + support = false + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/local_regional_resolver.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/local_regional_resolver.go new file mode 100644 index 000000000000..c96f198093b8 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/local_regional_resolver.go @@ -0,0 +1,44 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "fmt" + "github.com/jmespath/go-jmespath" + "strings" +) + +// LocalRegionalResolver is regional resolver +type LocalRegionalResolver struct{} + +// TryResolve returns endpoint +func (resolver *LocalRegionalResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) { + // get the regional endpoints configs + regionalExpression := fmt.Sprintf("products[?code=='%s'].regional_endpoints", strings.ToLower(param.Product)) + regionalData, err := jmespath.Search(regionalExpression, getEndpointConfigData()) + if err == nil && regionalData != nil && len(regionalData.([]interface{})) > 0 { + endpointExpression := fmt.Sprintf("[0][?region=='%s'].endpoint", strings.ToLower(param.RegionId)) + endpointData, err := jmespath.Search(endpointExpression, regionalData) + if err == nil && endpointData != nil && len(endpointData.([]interface{})) > 0 { + endpoint = endpointData.([]interface{})[0].(string) + support = len(endpoint) > 0 + return endpoint, support, nil + } + } + support = false + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/location_resolver.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/location_resolver.go new file mode 100644 index 000000000000..48b9e618c4d3 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/location_resolver.go @@ -0,0 +1,148 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "encoding/json" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "sync" + "time" +) + +const ( + // EndpointCacheExpireTime in seconds + EndpointCacheExpireTime = 3600 +) + +var lastClearTimePerProduct = struct { + sync.RWMutex + cache map[string]int64 +}{cache: make(map[string]int64)} + +var endpointCache = struct { + sync.RWMutex + cache map[string]string +}{cache: make(map[string]string)} + +// LocationResolver is kind of resolver +type LocationResolver struct{} + +// TryResolve return endpoint +func (resolver *LocationResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) { + if len(param.LocationProduct) <= 0 { + support = false + return + } + + //get from cache + cacheKey := param.Product + "#" + param.RegionId + if endpointCache.cache != nil && len(endpointCache.cache[cacheKey]) > 0 && !CheckCacheIsExpire(cacheKey) { + endpoint = endpointCache.cache[cacheKey] + support = true + return + } + + //get from remote + getEndpointRequest := requests.NewCommonRequest() + + getEndpointRequest.Product = "Location" + getEndpointRequest.Version = "2015-06-12" + getEndpointRequest.ApiName = "DescribeEndpoints" + getEndpointRequest.Domain = "location.aliyuncs.com" + getEndpointRequest.Method = "GET" + getEndpointRequest.Scheme = requests.HTTPS + + getEndpointRequest.QueryParams["Id"] = param.RegionId + getEndpointRequest.QueryParams["ServiceCode"] = param.LocationProduct + if len(param.LocationEndpointType) > 0 { + getEndpointRequest.QueryParams["Type"] = param.LocationEndpointType + } else { + getEndpointRequest.QueryParams["Type"] = "openAPI" + } + + response, err := param.CommonApi(getEndpointRequest) + var getEndpointResponse GetEndpointResponse + if !response.IsSuccess() { + support = false + return + } + + json.Unmarshal([]byte(response.GetHttpContentString()), &getEndpointResponse) + if !getEndpointResponse.Success || getEndpointResponse.Endpoints == nil { + support = false + return + } + if len(getEndpointResponse.Endpoints.Endpoint) <= 0 { + support = false + return + } + if len(getEndpointResponse.Endpoints.Endpoint[0].Endpoint) > 0 { + endpoint = getEndpointResponse.Endpoints.Endpoint[0].Endpoint + endpointCache.Lock() + endpointCache.cache[cacheKey] = endpoint + endpointCache.Unlock() + lastClearTimePerProduct.Lock() + lastClearTimePerProduct.cache[cacheKey] = time.Now().Unix() + lastClearTimePerProduct.Unlock() + support = true + return + } + + support = false + return +} + +// CheckCacheIsExpire valid the cacheKey +func CheckCacheIsExpire(cacheKey string) bool { + lastClearTime := lastClearTimePerProduct.cache[cacheKey] + if lastClearTime <= 0 { + lastClearTime = time.Now().Unix() + lastClearTimePerProduct.Lock() + lastClearTimePerProduct.cache[cacheKey] = lastClearTime + lastClearTimePerProduct.Unlock() + } + + now := time.Now().Unix() + elapsedTime := now - lastClearTime + if elapsedTime > EndpointCacheExpireTime { + return true + } + + return false +} + +// GetEndpointResponse returns Endpoints +type GetEndpointResponse struct { + Endpoints *EndpointsObj + RequestId string + Success bool +} + +// EndpointsObj wrapper Endpoint array +type EndpointsObj struct { + Endpoint []EndpointObj +} + +// EndpointObj wrapper endpoint +type EndpointObj struct { + Protocols map[string]string + Type string + Namespace string + Id string + SerivceCode string + Endpoint string +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/mapping_resolver.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/mapping_resolver.go new file mode 100644 index 000000000000..c345a87a0e13 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/mapping_resolver.go @@ -0,0 +1,43 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "fmt" + "strings" +) + +const keyFormatter = "%s::%s" + +var endpointMapping = make(map[string]string) + +// AddEndpointMapping adds endpoint to cache +func AddEndpointMapping(regionId, productId, endpoint string) (err error) { + key := fmt.Sprintf(keyFormatter, strings.ToLower(regionId), strings.ToLower(productId)) + endpointMapping[key] = endpoint + return nil +} + +// MappingResolver is kind of resolver +type MappingResolver struct{} + +// TryResolve returns endpoint +func (resolver *MappingResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) { + key := fmt.Sprintf(keyFormatter, strings.ToLower(param.RegionId), strings.ToLower(param.Product)) + endpoint, contains := endpointMapping[key] + return endpoint, contains, nil +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/resolver.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/resolver.go new file mode 100644 index 000000000000..52c0291abe4d --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/resolver.go @@ -0,0 +1,87 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "encoding/json" + "fmt" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" + "sync" +) + +const ( + // ResolveEndpointUserGuideLink returns guide link + ResolveEndpointUserGuideLink = "" +) + +var once sync.Once +var resolvers []Resolver + +// Resolver interface +type Resolver interface { + TryResolve(param *ResolveParam) (endpoint string, support bool, err error) +} + +// Resolve returns endpoint +func Resolve(param *ResolveParam) (endpoint string, err error) { + supportedResolvers := getAllResolvers() + for _, resolver := range supportedResolvers { + endpoint, supported, err := resolver.TryResolve(param) + if supported { + return endpoint, err + } + } + + // not support + errorMsg := fmt.Sprintf(errors.CanNotResolveEndpointErrorMessage, param, ResolveEndpointUserGuideLink) + err = errors.NewClientError(errors.CanNotResolveEndpointErrorCode, errorMsg, nil) + return +} + +func getAllResolvers() []Resolver { + once.Do(func() { + resolvers = []Resolver{ + &SimpleHostResolver{}, + &MappingResolver{}, + &LocationResolver{}, + &LocalRegionalResolver{}, + &LocalGlobalResolver{}, + } + }) + return resolvers +} + +// ResolveParam params to resolve endpoint +type ResolveParam struct { + Domain string + Product string + RegionId string + LocationProduct string + LocationEndpointType string + CommonApi func(request *requests.CommonRequest) (response *responses.CommonResponse, err error) `json:"-"` +} + +// String returns json string +func (param *ResolveParam) String() string { + jsonBytes, err := json.Marshal(param) + if err != nil { + return fmt.Sprint("ResolveParam.String() process error:", err) + } + return string(jsonBytes) +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/simple_host_resolver.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/simple_host_resolver.go new file mode 100644 index 000000000000..e7b1d8b7c39e --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/endpoints/simple_host_resolver.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +// SimpleHostResolver is kind of resolver +type SimpleHostResolver struct{} + +// TryResolve returns endpoint +func (resolver *SimpleHostResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) { + if support = len(param.Domain) > 0; support { + endpoint = param.Domain + } + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors/client_error.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors/client_error.go new file mode 100644 index 000000000000..7deaef06b095 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors/client_error.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import "fmt" + +/* const code and msg */ +const ( + DefaultClientErrorStatus = 400 + DefaultClientErrorCode = "SDK.ClientError" + + UnsupportedCredentialErrorCode = "SDK.UnsupportedCredential" + UnsupportedCredentialErrorMessage = "Specified credential (type = %s) is not supported, please check" + + CanNotResolveEndpointErrorCode = "SDK.CanNotResolveEndpoint" + CanNotResolveEndpointErrorMessage = "Can not resolve endpoint(param = %s), please check your accessKey with secret, and read the user guide\n %s" + + UnsupportedParamPositionErrorCode = "SDK.UnsupportedParamPosition" + UnsupportedParamPositionErrorMessage = "Specified param position (%s) is not supported, please upgrade sdk and retry" + + AsyncFunctionNotEnabledCode = "SDK.AsyncFunctionNotEnabled" + AsyncFunctionNotEnabledMessage = "Async function is not enabled in client, please invoke 'client.EnableAsync' function" + + UnknownRequestTypeErrorCode = "SDK.UnknownRequestType" + UnknownRequestTypeErrorMessage = "Unknown Request Type: %s" + + MissingParamErrorCode = "SDK.MissingParam" + InvalidParamErrorCode = "SDK.InvalidParam" + + JsonUnmarshalErrorCode = "SDK.JsonUnmarshalError" + JsonUnmarshalErrorMessage = "Failed to unmarshal response, but you can get the data via response.GetHttpStatusCode() and response.GetHttpContentString()" + + TimeoutErrorCode = "SDK.TimeoutError" + TimeoutErrorMessage = "The request timed out %s times(%s for retry), perhaps we should have the threshold raised a little?" +) + +// ClientError wrap error +type ClientError struct { + errorCode string + message string + originError error +} + +// NewClientError returns Error +func NewClientError(errorCode, message string, originErr error) Error { + return &ClientError{ + errorCode: errorCode, + message: message, + originError: originErr, + } +} + +// Error returns clientErrMsg +func (err *ClientError) Error() string { + clientErrMsg := fmt.Sprintf("[%s] %s", err.errorCode, err.message) + if err.originError != nil { + return clientErrMsg + "\ncaused by:\n" + err.originError.Error() + } + return clientErrMsg +} + +// OriginError returns originError +func (err *ClientError) OriginError() error { + return err.originError +} + +// HttpStatus returns DefaultClientErrorStatus +func (*ClientError) HttpStatus() int { + return DefaultClientErrorStatus +} + +// ErrorCode returns error code +func (err *ClientError) ErrorCode() string { + if err.errorCode == "" { + return DefaultClientErrorCode + } + return err.errorCode +} + +// Message returns error message +func (err *ClientError) Message() string { + return err.message +} + +// String returns error string +func (err *ClientError) String() string { + return err.Error() +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors/error.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors/error.go new file mode 100644 index 000000000000..cbf6b6177a49 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors/error.go @@ -0,0 +1,26 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +// Error wrap error interface +type Error interface { + error + HttpStatus() int + ErrorCode() string + Message() string + OriginError() error +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors/server_error.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors/server_error.go new file mode 100644 index 000000000000..7ad7e04efc9a --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors/server_error.go @@ -0,0 +1,137 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "encoding/json" + "fmt" + "github.com/jmespath/go-jmespath" +) + +var wrapperList = []ServerErrorWrapper{ + &SignatureDostNotMatchWrapper{}, +} + +// ServerError wrap error +type ServerError struct { + httpStatus int + requestId string + hostId string + errorCode string + recommend string + message string + comment string +} + +// ServerErrorWrapper provides tryWrap func +type ServerErrorWrapper interface { + tryWrap(error *ServerError, wrapInfo map[string]string) (bool, *ServerError) +} + +// Error returns error msg +func (err *ServerError) Error() string { + return fmt.Sprintf("SDK.ServerError\nErrorCode: %s\nRecommend: %s\nRequestId: %s\nMessage: %s", + err.errorCode, err.comment+err.recommend, err.requestId, err.message) +} + +// NewServerError returns server error +func NewServerError(httpStatus int, responseContent, comment string) Error { + result := &ServerError{ + httpStatus: httpStatus, + message: responseContent, + comment: comment, + } + + var data interface{} + err := json.Unmarshal([]byte(responseContent), &data) + if err == nil { + requestId, _ := jmespath.Search("RequestId", data) + hostId, _ := jmespath.Search("HostId", data) + errorCode, _ := jmespath.Search("Code", data) + recommend, _ := jmespath.Search("Recommend", data) + message, _ := jmespath.Search("Message", data) + + if requestId != nil { + result.requestId = requestId.(string) + } + if hostId != nil { + result.hostId = hostId.(string) + } + if errorCode != nil { + result.errorCode = errorCode.(string) + } + if recommend != nil { + result.recommend = recommend.(string) + } + if message != nil { + result.message = message.(string) + } + } + + return result +} + +// WrapServerError returns ServerError +func WrapServerError(originError *ServerError, wrapInfo map[string]string) *ServerError { + for _, wrapper := range wrapperList { + ok, newError := wrapper.tryWrap(originError, wrapInfo) + if ok { + return newError + } + } + return originError +} + +// HttpStatus returns http status +func (err *ServerError) HttpStatus() int { + return err.httpStatus +} + +// ErrorCode returns error code +func (err *ServerError) ErrorCode() string { + return err.errorCode +} + +// Message returns message +func (err *ServerError) Message() string { + return err.message +} + +// OriginError returns nil +func (err *ServerError) OriginError() error { + return nil +} + +// HostId returns host id +func (err *ServerError) HostId() string { + return err.hostId +} + +// RequestId returns request id +func (err *ServerError) RequestId() string { + return err.requestId +} + +// Recommend returns error's recommend +func (err *ServerError) Recommend() string { + return err.recommend +} + +// Comment returns error's comment +func (err *ServerError) Comment() string { + return err.comment +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors/signature_does_not_match_wrapper.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors/signature_does_not_match_wrapper.go new file mode 100644 index 000000000000..9a5769a991ed --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors/signature_does_not_match_wrapper.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import "strings" + +/* const code and msg prefix */ +const ( + SignatureDostNotMatchErrorCode = "SignatureDoesNotMatch" + MessagePrefix = "Specified signature is not matched with our calculation. server string to sign is:" +) + +// SignatureDostNotMatchWrapper implements tryWrap interface +type SignatureDostNotMatchWrapper struct{} + +func (*SignatureDostNotMatchWrapper) tryWrap(error *ServerError, wrapInfo map[string]string) (bool, *ServerError) { + clientStringToSign := wrapInfo["StringToSign"] + if error.errorCode == SignatureDostNotMatchErrorCode && clientStringToSign != "" { + message := error.message + if strings.HasPrefix(message, MessagePrefix) { + serverStringToSign := message[len(MessagePrefix):] + if clientStringToSign == serverStringToSign { + // user secret is error + error.recommend = "Please check you AccessKeySecret" + } else { + error.recommend = "This may be a bug with the SDK and we hope you can submit this question in the " + + "github issue(https://github.com/aliyun/alibaba-cloud-sdk-go/issues), thanks very much" + } + } + return true, error + } + return false, nil +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests/acs_reqeust.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests/acs_reqeust.go new file mode 100644 index 000000000000..af8ff4534891 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests/acs_reqeust.go @@ -0,0 +1,335 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package requests + +import ( + "fmt" + "io" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors" + "reflect" + "strconv" +) + +/* const vars */ +const ( + RPC = "RPC" + ROA = "ROA" + + HTTP = "HTTP" + HTTPS = "HTTPS" + + DefaultHttpPort = "80" + + GET = "GET" + PUT = "PUT" + POST = "POST" + DELETE = "DELETE" + HEAD = "HEAD" + OPTIONS = "OPTIONS" + + Json = "application/json" + Xml = "application/xml" + Raw = "application/octet-stream" + Form = "application/x-www-form-urlencoded" + + Header = "Header" + Query = "Query" + Body = "Body" + Path = "Path" + + HeaderSeparator = "\n" +) + +// AcsRequest interface +type AcsRequest interface { + GetScheme() string + GetMethod() string + GetDomain() string + GetPort() string + GetRegionId() string + GetUrl() string + GetQueries() string + GetHeaders() map[string]string + GetQueryParams() map[string]string + GetFormParams() map[string]string + GetContent() []byte + GetBodyReader() io.Reader + GetStyle() string + GetProduct() string + GetVersion() string + GetActionName() string + GetAcceptFormat() string + GetLocationServiceCode() string + GetLocationEndpointType() string + + SetStringToSign(stringToSign string) + GetStringToSign() string + + SetDomain(domain string) + SetContent(content []byte) + SetScheme(scheme string) + BuildUrl() string + BuildQueries() string + + addHeaderParam(key, value string) + addQueryParam(key, value string) + addFormParam(key, value string) + addPathParam(key, value string) +} + +// base class +type baseRequest struct { + Scheme string + Method string + Domain string + Port string + RegionId string + + product string + version string + + actionName string + + AcceptFormat string + + QueryParams map[string]string + Headers map[string]string + FormParams map[string]string + Content []byte + + locationServiceCode string + locationEndpointType string + + queries string + + stringToSign string +} + +// GetQueryParams returns QueryParams +func (request *baseRequest) GetQueryParams() map[string]string { + return request.QueryParams +} + +// GetFormParams returns FormParams +func (request *baseRequest) GetFormParams() map[string]string { + return request.FormParams +} + +// GetContent returns Content +func (request *baseRequest) GetContent() []byte { + return request.Content +} + +// GetVersion returns version +func (request *baseRequest) GetVersion() string { + return request.version +} + +// GetActionName returns actionName +func (request *baseRequest) GetActionName() string { + return request.actionName +} + +// SetContent returns content +func (request *baseRequest) SetContent(content []byte) { + request.Content = content +} + +func (request *baseRequest) addHeaderParam(key, value string) { + request.Headers[key] = value +} + +func (request *baseRequest) addQueryParam(key, value string) { + request.QueryParams[key] = value +} + +func (request *baseRequest) addFormParam(key, value string) { + request.FormParams[key] = value +} + +// GetAcceptFormat returns AcceptFormat +func (request *baseRequest) GetAcceptFormat() string { + return request.AcceptFormat +} + +// GetLocationServiceCode returns locationServiceCode +func (request *baseRequest) GetLocationServiceCode() string { + return request.locationServiceCode +} + +// GetLocationEndpointType returns locationEndpointType +func (request *baseRequest) GetLocationEndpointType() string { + return request.locationEndpointType +} + +// GetProduct returns product +func (request *baseRequest) GetProduct() string { + return request.product +} + +// GetScheme returns scheme +func (request *baseRequest) GetScheme() string { + return request.Scheme +} + +// SetScheme sets scheme +func (request *baseRequest) SetScheme(scheme string) { + request.Scheme = scheme +} + +// GetMethod returns Method +func (request *baseRequest) GetMethod() string { + return request.Method +} + +// GetDomain returns Domain +func (request *baseRequest) GetDomain() string { + return request.Domain +} + +// SetDomain sets host +func (request *baseRequest) SetDomain(host string) { + request.Domain = host +} + +// GetPort returns port +func (request *baseRequest) GetPort() string { + return request.Port +} + +// GetRegionId returns regionId +func (request *baseRequest) GetRegionId() string { + return request.RegionId +} + +// GetHeaders returns headers +func (request *baseRequest) GetHeaders() map[string]string { + return request.Headers +} + +// SetContentType sets content type +func (request *baseRequest) SetContentType(contentType string) { + request.Headers["Content-Type"] = contentType +} + +// GetContentType returns content type +func (request *baseRequest) GetContentType() (contentType string, contains bool) { + contentType, contains = request.Headers["Content-Type"] + return +} + +// SetStringToSign sets stringToSign +func (request *baseRequest) SetStringToSign(stringToSign string) { + request.stringToSign = stringToSign +} + +// GetStringToSign returns stringToSign +func (request *baseRequest) GetStringToSign() string { + return request.stringToSign +} + +func defaultBaseRequest() (request *baseRequest) { + request = &baseRequest{ + Scheme: "", + AcceptFormat: "JSON", + Method: GET, + QueryParams: make(map[string]string), + Headers: map[string]string{ + "x-sdk-client": "golang/1.0.0", + "x-sdk-invoke-type": "normal", + "Accept-Encoding": "identity", + }, + FormParams: make(map[string]string), + } + return +} + +// InitParams returns params +func InitParams(request AcsRequest) (err error) { + requestValue := reflect.ValueOf(request).Elem() + err = flatRepeatedList(requestValue, request, "", "") + return +} + +func flatRepeatedList(dataValue reflect.Value, request AcsRequest, position, prefix string) (err error) { + dataType := dataValue.Type() + for i := 0; i < dataType.NumField(); i++ { + field := dataType.Field(i) + name, containsNameTag := field.Tag.Lookup("name") + fieldPosition := position + if fieldPosition == "" { + fieldPosition, _ = field.Tag.Lookup("position") + } + typeTag, containsTypeTag := field.Tag.Lookup("type") + if containsNameTag { + if !containsTypeTag { + // simple param + key := prefix + name + value := dataValue.Field(i).String() + err = addParam(request, fieldPosition, key, value) + if err != nil { + return + } + } else if typeTag == "Repeated" { + // repeated param + repeatedFieldValue := dataValue.Field(i) + if repeatedFieldValue.Kind() != reflect.Slice { + // possible value: {"[]string", "*[]struct"}, we must call Elem() in the last condition + repeatedFieldValue = repeatedFieldValue.Elem() + } + if repeatedFieldValue.IsValid() && !repeatedFieldValue.IsNil() { + for m := 0; m < repeatedFieldValue.Len(); m++ { + elementValue := repeatedFieldValue.Index(m) + key := prefix + name + "." + strconv.Itoa(m+1) + if elementValue.Type().String() == "string" { + value := elementValue.String() + err = addParam(request, fieldPosition, key, value) + if err != nil { + return + } + } else { + err = flatRepeatedList(elementValue, request, fieldPosition, key+".") + if err != nil { + return + } + } + } + } + } + } + } + return +} + +func addParam(request AcsRequest, position, name, value string) (err error) { + if len(value) > 0 { + switch position { + case Header: + request.addHeaderParam(name, value) + case Query: + request.addQueryParam(name, value) + case Path: + request.addPathParam(name, value) + case Body: + request.addFormParam(name, value) + default: + errMsg := fmt.Sprintf(errors.UnsupportedParamPositionErrorMessage, position) + err = errors.NewClientError(errors.UnsupportedParamPositionErrorCode, errMsg, nil) + } + } + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests/common_request.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests/common_request.go new file mode 100644 index 000000000000..21f07d8b7ddc --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests/common_request.go @@ -0,0 +1,154 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package requests + +import ( + "bytes" + "fmt" + "io" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors" + "strings" +) + +// CommonRequest wrap base request +type CommonRequest struct { + *baseRequest + + Version string + ApiName string + Product string + + // roa params + PathPattern string + PathParams map[string]string + + Ontology AcsRequest +} + +// NewCommonRequest returns CommonRequest +func NewCommonRequest() (request *CommonRequest) { + request = &CommonRequest{ + baseRequest: defaultBaseRequest(), + } + request.Headers["x-sdk-invoke-type"] = "common" + request.PathParams = make(map[string]string) + return +} + +// String returns CommonRequest +func (request *CommonRequest) String() string { + request.TransToAcsRequest() + request.BuildQueries() + request.BuildUrl() + + resultBuilder := bytes.Buffer{} + + mapOutput := func(m map[string]string) { + if len(m) > 0 { + for key, value := range m { + resultBuilder.WriteString(key + ": " + value + "\n") + } + } + } + + // Request Line + resultBuilder.WriteString("\n") + resultBuilder.WriteString(fmt.Sprintf("%s %s %s/1.1\n", request.Method, request.GetQueries(), strings.ToUpper(request.Scheme))) + + // Headers + resultBuilder.WriteString("Host" + ": " + request.Domain + "\n") + mapOutput(request.Headers) + + resultBuilder.WriteString("\n") + // Body + if len(request.Content) > 0 { + resultBuilder.WriteString(string(request.Content) + "\n") + } else { + mapOutput(request.FormParams) + } + + return resultBuilder.String() +} + +// TransToAcsRequest convert common request +func (request *CommonRequest) TransToAcsRequest() { + if len(request.Version) == 0 { + errors.NewClientError(errors.MissingParamErrorCode, "Common request [version] is required", nil) + } + if len(request.ApiName) == 0 && len(request.PathPattern) == 0 { + errors.NewClientError(errors.MissingParamErrorCode, "At least one of [ApiName] and [PathPattern] should has a value", nil) + } + if len(request.Domain) == 0 && len(request.Product) == 0 { + errors.NewClientError(errors.MissingParamErrorCode, "At least one of [Domain] and [Product] should has a value", nil) + } + + if len(request.PathPattern) > 0 { + roaRequest := &RoaRequest{} + roaRequest.initWithCommonRequest(request) + request.Ontology = roaRequest + } else { + rpcRequest := &RpcRequest{} + rpcRequest.baseRequest = request.baseRequest + rpcRequest.product = request.Product + rpcRequest.version = request.Version + rpcRequest.actionName = request.ApiName + request.Ontology = rpcRequest + } + +} + +// BuildUrl returns request url +func (request *CommonRequest) BuildUrl() string { + if len(request.Port) > 0 { + return strings.ToLower(request.Scheme) + "://" + request.Domain + ":" + request.Port + request.BuildQueries() + } + + return strings.ToLower(request.Scheme) + "://" + request.Domain + request.BuildQueries() +} + +// BuildQueries returns queries +func (request *CommonRequest) BuildQueries() string { + return request.Ontology.BuildQueries() +} + +// GetUrl returns url +func (request *CommonRequest) GetUrl() string { + if len(request.Port) > 0 { + return strings.ToLower(request.Scheme) + "://" + request.Domain + ":" + request.Port + request.GetQueries() + } + + return strings.ToLower(request.Scheme) + "://" + request.Domain + request.GetQueries() +} + +// GetQueries returns queries +func (request *CommonRequest) GetQueries() string { + return request.Ontology.GetQueries() +} + +// GetBodyReader returns body +func (request *CommonRequest) GetBodyReader() io.Reader { + return request.Ontology.GetBodyReader() +} + +// GetStyle returns style +func (request *CommonRequest) GetStyle() string { + return request.Ontology.GetStyle() +} + +func (request *CommonRequest) addPathParam(key, value string) { + request.PathParams[key] = value +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests/roa_request.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests/roa_request.go new file mode 100644 index 000000000000..4340b26df372 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests/roa_request.go @@ -0,0 +1,155 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package requests + +import ( + "bytes" + "io" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/utils" + "net/url" + "sort" + "strings" +) + +// RoaRequest wrap base request +type RoaRequest struct { + *baseRequest + pathPattern string + PathParams map[string]string +} + +// GetStyle returns ROA +func (*RoaRequest) GetStyle() string { + return ROA +} + +// GetBodyReader returns body +func (request *RoaRequest) GetBodyReader() io.Reader { + if request.FormParams != nil && len(request.FormParams) > 0 { + formString := utils.GetUrlFormedMap(request.FormParams) + return strings.NewReader(formString) + } else if len(request.Content) > 0 { + return bytes.NewReader(request.Content) + } else { + return nil + } +} + +// GetQueries returns queries +func (request *RoaRequest) GetQueries() string { + return request.queries +} + +// BuildQueries for sign method, need not url encoded +func (request *RoaRequest) BuildQueries() string { + return request.buildQueries(false) +} + +func (request *RoaRequest) buildQueries(needParamEncode bool) string { + // replace path params with value + path := request.pathPattern + for key, value := range request.PathParams { + path = strings.Replace(path, "["+key+"]", value, 1) + } + + queryParams := request.QueryParams + // check if path contains params + splitArray := strings.Split(path, "?") + path = splitArray[0] + if len(splitArray) > 1 && len(splitArray[1]) > 0 { + queryParams[splitArray[1]] = "" + } + // sort QueryParams by key + var queryKeys []string + for key := range queryParams { + queryKeys = append(queryKeys, key) + } + sort.Strings(queryKeys) + + // append urlBuilder + urlBuilder := bytes.Buffer{} + urlBuilder.WriteString(path) + if len(queryKeys) > 0 { + urlBuilder.WriteString("?") + } + for i := 0; i < len(queryKeys); i++ { + queryKey := queryKeys[i] + urlBuilder.WriteString(queryKey) + if value := queryParams[queryKey]; len(value) > 0 { + urlBuilder.WriteString("=") + if needParamEncode { + urlBuilder.WriteString(url.QueryEscape(value)) + } else { + urlBuilder.WriteString(value) + } + } + if i < len(queryKeys)-1 { + urlBuilder.WriteString("&") + } + } + result := urlBuilder.String() + result = popStandardUrlencode(result) + request.queries = result + return request.queries +} + +func popStandardUrlencode(stringToSign string) (result string) { + result = strings.Replace(stringToSign, "+", "%20", -1) + result = strings.Replace(result, "*", "%2A", -1) + result = strings.Replace(result, "%7E", "~", -1) + return +} + +// GetUrl returns url +func (request *RoaRequest) GetUrl() string { + return strings.ToLower(request.Scheme) + "://" + request.Domain + ":" + request.Port + request.GetQueries() +} + +// BuildUrl creates url +func (request *RoaRequest) BuildUrl() string { + // for network trans, need url encoded + return strings.ToLower(request.Scheme) + "://" + request.Domain + ":" + request.Port + request.buildQueries(true) +} + +func (request *RoaRequest) addPathParam(key, value string) { + request.PathParams[key] = value +} + +// InitWithApiInfo creates api info +func (request *RoaRequest) InitWithApiInfo(product, version, action, uriPattern, serviceCode, endpointType string) { + request.baseRequest = defaultBaseRequest() + request.PathParams = make(map[string]string) + request.Headers["x-acs-version"] = version + request.pathPattern = uriPattern + request.locationServiceCode = serviceCode + request.locationEndpointType = endpointType + //request.product = product + //request.version = version + //request.actionName = action +} + +func (request *RoaRequest) initWithCommonRequest(commonRequest *CommonRequest) { + request.baseRequest = commonRequest.baseRequest + request.PathParams = commonRequest.PathParams + //request.product = commonRequest.Product + //request.version = commonRequest.Version + request.Headers["x-acs-version"] = commonRequest.Version + //request.actionName = commonRequest.ApiName + request.pathPattern = commonRequest.PathPattern + request.locationServiceCode = "" + request.locationEndpointType = "" +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests/rpc_request.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests/rpc_request.go new file mode 100644 index 000000000000..5e701d3f850b --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests/rpc_request.go @@ -0,0 +1,92 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package requests + +import ( + "io" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/utils" + "strings" +) + +// RpcRequest wrap base request +type RpcRequest struct { + *baseRequest +} + +func (request *RpcRequest) init() { + request.baseRequest = defaultBaseRequest() + request.Method = POST +} + +// GetStyle returns RPC +func (*RpcRequest) GetStyle() string { + return RPC +} + +// GetBodyReader return body +func (request *RpcRequest) GetBodyReader() io.Reader { + if request.FormParams != nil && len(request.FormParams) > 0 { + formString := utils.GetUrlFormedMap(request.FormParams) + return strings.NewReader(formString) + } + return strings.NewReader("") +} + +// BuildQueries builds queries +func (request *RpcRequest) BuildQueries() string { + request.queries = "/?" + utils.GetUrlFormedMap(request.QueryParams) + return request.queries +} + +// GetQueries returns queries +func (request *RpcRequest) GetQueries() string { + return request.queries +} + +// BuildUrl creates url +func (request *RpcRequest) BuildUrl() string { + return strings.ToLower(request.Scheme) + "://" + request.Domain + ":" + request.Port + request.BuildQueries() +} + +// GetUrl returns url +func (request *RpcRequest) GetUrl() string { + return strings.ToLower(request.Scheme) + "://" + request.Domain + request.GetQueries() +} + +// GetVersion returns version +func (request *RpcRequest) GetVersion() string { + return request.version +} + +// GetActionName returns action name +func (request *RpcRequest) GetActionName() string { + return request.actionName +} + +func (request *RpcRequest) addPathParam(key, value string) { + panic("not support") +} + +// InitWithApiInfo init api info +func (request *RpcRequest) InitWithApiInfo(product, version, action, serviceCode, endpointType string) { + request.init() + request.product = product + request.version = version + request.actionName = action + request.locationServiceCode = serviceCode + request.locationEndpointType = endpointType +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests/types.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests/types.go new file mode 100644 index 000000000000..9705d9712edb --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests/types.go @@ -0,0 +1,83 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package requests + +import "strconv" + +// Integer wrap string +type Integer string + +// NewInteger returns Integer format +func NewInteger(integer int) Integer { + return Integer(strconv.Itoa(integer)) +} + +// HasValue returns true if integer is not null +func (integer Integer) HasValue() bool { + return integer != "" +} + +// GetValue returns int value +func (integer Integer) GetValue() (int, error) { + return strconv.Atoi(string(integer)) +} + +// NewInteger64 returns Integer format +func NewInteger64(integer int64) Integer { + return Integer(strconv.FormatInt(integer, 10)) +} + +// GetValue64 returns int64 value +func (integer Integer) GetValue64() (int64, error) { + return strconv.ParseInt(string(integer), 10, 0) +} + +// Boolean wrap string +type Boolean string + +// NewBoolean returns Boolean format +func NewBoolean(bool bool) Boolean { + return Boolean(strconv.FormatBool(bool)) +} + +// HasValue returns true if boolean is not null +func (boolean Boolean) HasValue() bool { + return boolean != "" +} + +// GetValue returns bool format +func (boolean Boolean) GetValue() (bool, error) { + return strconv.ParseBool(string(boolean)) +} + +// Float wrap string +type Float string + +// NewFloat returns Float format +func NewFloat(f float64) Float { + return Float(strconv.FormatFloat(f, 'f', 6, 64)) +} + +// HasValue returns true if float is not null +func (float Float) HasValue() bool { + return float != "" +} + +// GetValue returns float64 format +func (float Float) GetValue() (float64, error) { + return strconv.ParseFloat(string(float), 64) +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses/json_parser.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses/json_parser.go new file mode 100644 index 000000000000..f981cf42f437 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses/json_parser.go @@ -0,0 +1,357 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package responses + +import ( + "encoding/json" + "github.com/json-iterator/go" + "io" + "math" + "strconv" + "strings" + "sync" + "unsafe" +) + +const maxUint = ^uint(0) +const maxInt = int(maxUint >> 1) +const minInt = -maxInt - 1 + +var jsonParser jsoniter.API +var initJson = &sync.Once{} + +func initJsonParserOnce() { + initJson.Do(func() { + registerBetterFuzzyDecoder() + jsonParser = jsoniter.ConfigCompatibleWithStandardLibrary + }) +} + +func registerBetterFuzzyDecoder() { + jsoniter.RegisterTypeDecoder("string", &nullableFuzzyStringDecoder{}) + jsoniter.RegisterTypeDecoder("bool", &fuzzyBoolDecoder{}) + jsoniter.RegisterTypeDecoder("float32", &nullableFuzzyFloat32Decoder{}) + jsoniter.RegisterTypeDecoder("float64", &nullableFuzzyFloat64Decoder{}) + jsoniter.RegisterTypeDecoder("int", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(maxInt) || val < float64(minInt) { + iter.ReportError("fuzzy decode int", "exceed range") + return + } + *((*int)(ptr)) = int(val) + } else { + *((*int)(ptr)) = iter.ReadInt() + } + }}) + jsoniter.RegisterTypeDecoder("uint", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(maxUint) || val < 0 { + iter.ReportError("fuzzy decode uint", "exceed range") + return + } + *((*uint)(ptr)) = uint(val) + } else { + *((*uint)(ptr)) = iter.ReadUint() + } + }}) + jsoniter.RegisterTypeDecoder("int8", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt8) || val < float64(math.MinInt8) { + iter.ReportError("fuzzy decode int8", "exceed range") + return + } + *((*int8)(ptr)) = int8(val) + } else { + *((*int8)(ptr)) = iter.ReadInt8() + } + }}) + jsoniter.RegisterTypeDecoder("uint8", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint8) || val < 0 { + iter.ReportError("fuzzy decode uint8", "exceed range") + return + } + *((*uint8)(ptr)) = uint8(val) + } else { + *((*uint8)(ptr)) = iter.ReadUint8() + } + }}) + jsoniter.RegisterTypeDecoder("int16", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt16) || val < float64(math.MinInt16) { + iter.ReportError("fuzzy decode int16", "exceed range") + return + } + *((*int16)(ptr)) = int16(val) + } else { + *((*int16)(ptr)) = iter.ReadInt16() + } + }}) + jsoniter.RegisterTypeDecoder("uint16", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint16) || val < 0 { + iter.ReportError("fuzzy decode uint16", "exceed range") + return + } + *((*uint16)(ptr)) = uint16(val) + } else { + *((*uint16)(ptr)) = iter.ReadUint16() + } + }}) + jsoniter.RegisterTypeDecoder("int32", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt32) || val < float64(math.MinInt32) { + iter.ReportError("fuzzy decode int32", "exceed range") + return + } + *((*int32)(ptr)) = int32(val) + } else { + *((*int32)(ptr)) = iter.ReadInt32() + } + }}) + jsoniter.RegisterTypeDecoder("uint32", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint32) || val < 0 { + iter.ReportError("fuzzy decode uint32", "exceed range") + return + } + *((*uint32)(ptr)) = uint32(val) + } else { + *((*uint32)(ptr)) = iter.ReadUint32() + } + }}) + jsoniter.RegisterTypeDecoder("int64", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt64) || val < float64(math.MinInt64) { + iter.ReportError("fuzzy decode int64", "exceed range") + return + } + *((*int64)(ptr)) = int64(val) + } else { + *((*int64)(ptr)) = iter.ReadInt64() + } + }}) + jsoniter.RegisterTypeDecoder("uint64", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint64) || val < 0 { + iter.ReportError("fuzzy decode uint64", "exceed range") + return + } + *((*uint64)(ptr)) = uint64(val) + } else { + *((*uint64)(ptr)) = iter.ReadUint64() + } + }}) +} + +type nullableFuzzyStringDecoder struct { +} + +func (decoder *nullableFuzzyStringDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + switch valueType { + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + *((*string)(ptr)) = string(number) + case jsoniter.StringValue: + *((*string)(ptr)) = iter.ReadString() + case jsoniter.BoolValue: + *((*string)(ptr)) = strconv.FormatBool(iter.ReadBool()) + case jsoniter.NilValue: + iter.ReadNil() + *((*string)(ptr)) = "" + default: + iter.ReportError("fuzzyStringDecoder", "not number or string or bool") + } +} + +type fuzzyBoolDecoder struct { +} + +func (decoder *fuzzyBoolDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + switch valueType { + case jsoniter.BoolValue: + *((*bool)(ptr)) = iter.ReadBool() + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + num, err := number.Int64() + if err != nil { + iter.ReportError("fuzzyBoolDecoder", "get value from json.number failed") + } + if num == 0 { + *((*bool)(ptr)) = false + } else { + *((*bool)(ptr)) = true + } + case jsoniter.StringValue: + strValue := strings.ToLower(iter.ReadString()) + if strValue == "true" { + *((*bool)(ptr)) = true + } else if strValue == "false" || strValue == "" { + *((*bool)(ptr)) = false + } else { + iter.ReportError("fuzzyBoolDecoder", "unsupported bool value: "+strValue) + } + case jsoniter.NilValue: + iter.ReadNil() + *((*bool)(ptr)) = false + default: + iter.ReportError("fuzzyBoolDecoder", "not number or string or nil") + } +} + +type tolerateEmptyArrayDecoder struct { + valDecoder jsoniter.ValDecoder +} + +func (decoder *tolerateEmptyArrayDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if iter.WhatIsNext() == jsoniter.ArrayValue { + iter.Skip() + newIter := iter.Pool().BorrowIterator([]byte("{}")) + defer iter.Pool().ReturnIterator(newIter) + decoder.valDecoder.Decode(ptr, newIter) + } else { + decoder.valDecoder.Decode(ptr, iter) + } +} + +type nullableFuzzyIntegerDecoder struct { + fun func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) +} + +func (decoder *nullableFuzzyIntegerDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + var str string + switch valueType { + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + str = string(number) + case jsoniter.StringValue: + str = iter.ReadString() + // support empty string + if str == "" { + str = "0" + } + case jsoniter.BoolValue: + if iter.ReadBool() { + str = "1" + } else { + str = "0" + } + case jsoniter.NilValue: + iter.ReadNil() + str = "0" + default: + iter.ReportError("fuzzyIntegerDecoder", "not number or string") + } + newIter := iter.Pool().BorrowIterator([]byte(str)) + defer iter.Pool().ReturnIterator(newIter) + isFloat := strings.IndexByte(str, '.') != -1 + decoder.fun(isFloat, ptr, newIter) + if newIter.Error != nil && newIter.Error != io.EOF { + iter.Error = newIter.Error + } +} + +type nullableFuzzyFloat32Decoder struct { +} + +func (decoder *nullableFuzzyFloat32Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + var str string + switch valueType { + case jsoniter.NumberValue: + *((*float32)(ptr)) = iter.ReadFloat32() + case jsoniter.StringValue: + str = iter.ReadString() + // support empty string + if str == "" { + *((*float32)(ptr)) = 0 + return + } + newIter := iter.Pool().BorrowIterator([]byte(str)) + defer iter.Pool().ReturnIterator(newIter) + *((*float32)(ptr)) = newIter.ReadFloat32() + if newIter.Error != nil && newIter.Error != io.EOF { + iter.Error = newIter.Error + } + case jsoniter.BoolValue: + // support bool to float32 + if iter.ReadBool() { + *((*float32)(ptr)) = 1 + } else { + *((*float32)(ptr)) = 0 + } + case jsoniter.NilValue: + iter.ReadNil() + *((*float32)(ptr)) = 0 + default: + iter.ReportError("nullableFuzzyFloat32Decoder", "not number or string") + } +} + +type nullableFuzzyFloat64Decoder struct { +} + +func (decoder *nullableFuzzyFloat64Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + var str string + switch valueType { + case jsoniter.NumberValue: + *((*float64)(ptr)) = iter.ReadFloat64() + case jsoniter.StringValue: + str = iter.ReadString() + // support empty string + if str == "" { + *((*float64)(ptr)) = 0 + return + } + newIter := iter.Pool().BorrowIterator([]byte(str)) + defer iter.Pool().ReturnIterator(newIter) + *((*float64)(ptr)) = newIter.ReadFloat64() + if newIter.Error != nil && newIter.Error != io.EOF { + iter.Error = newIter.Error + } + case jsoniter.BoolValue: + // support bool to float64 + if iter.ReadBool() { + *((*float64)(ptr)) = 1 + } else { + *((*float64)(ptr)) = 0 + } + case jsoniter.NilValue: + // support empty string + iter.ReadNil() + *((*float64)(ptr)) = 0 + default: + iter.ReportError("nullableFuzzyFloat32Decoder", "not number or string") + } +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses/response.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses/response.go new file mode 100644 index 000000000000..22aa86d1f110 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses/response.go @@ -0,0 +1,156 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package responses + +import ( + "bytes" + "encoding/xml" + "fmt" + "io/ioutil" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/errors" + "net/http" + "strings" +) + +// AcsResponse interface +type AcsResponse interface { + IsSuccess() bool + GetHttpStatus() int + GetHttpHeaders() map[string][]string + GetHttpContentString() string + GetHttpContentBytes() []byte + GetOriginHttpResponse() *http.Response + parseFromHttpResponse(httpResponse *http.Response) error +} + +// Unmarshal return response body +func Unmarshal(response AcsResponse, httpResponse *http.Response, format string) (err error) { + err = response.parseFromHttpResponse(httpResponse) + if err != nil { + return + } + if !response.IsSuccess() { + err = errors.NewServerError(response.GetHttpStatus(), response.GetHttpContentString(), "") + return + } + if _, isCommonResponse := response.(CommonResponse); isCommonResponse { + // common response need not unmarshal + return + } + + if len(response.GetHttpContentBytes()) == 0 { + return + } + + if strings.ToUpper(format) == "JSON" { + initJsonParserOnce() + err = jsonParser.Unmarshal(response.GetHttpContentBytes(), response) + if err != nil { + err = errors.NewClientError(errors.JsonUnmarshalErrorCode, errors.JsonUnmarshalErrorMessage, err) + } + } else if strings.ToUpper(format) == "XML" { + err = xml.Unmarshal(response.GetHttpContentBytes(), response) + } + return +} + +// BaseResponse wrap originHttpResponse +type BaseResponse struct { + httpStatus int + httpHeaders map[string][]string + httpContentString string + httpContentBytes []byte + originHttpResponse *http.Response +} + +// GetHttpStatus returns httpStatus +func (baseResponse *BaseResponse) GetHttpStatus() int { + return baseResponse.httpStatus +} + +// GetHttpHeaders returns httpHeaders +func (baseResponse *BaseResponse) GetHttpHeaders() map[string][]string { + return baseResponse.httpHeaders +} + +// GetHttpContentString returns httpContentString +func (baseResponse *BaseResponse) GetHttpContentString() string { + return baseResponse.httpContentString +} + +// GetHttpContentBytes returns httpContentBytes +func (baseResponse *BaseResponse) GetHttpContentBytes() []byte { + return baseResponse.httpContentBytes +} + +// GetOriginHttpResponse returns originHttpResponse +func (baseResponse *BaseResponse) GetOriginHttpResponse() *http.Response { + return baseResponse.originHttpResponse +} + +// IsSuccess checks weather httpStatus is 200 or not +func (baseResponse *BaseResponse) IsSuccess() bool { + if baseResponse.GetHttpStatus() >= 200 && baseResponse.GetHttpStatus() < 300 { + return true + } + + return false +} + +func (baseResponse *BaseResponse) parseFromHttpResponse(httpResponse *http.Response) (err error) { + defer httpResponse.Body.Close() + body, err := ioutil.ReadAll(httpResponse.Body) + if err != nil { + return + } + baseResponse.httpStatus = httpResponse.StatusCode + baseResponse.httpHeaders = httpResponse.Header + baseResponse.httpContentBytes = body + baseResponse.httpContentString = string(body) + baseResponse.originHttpResponse = httpResponse + return +} + +// String returns base response content +func (baseResponse *BaseResponse) String() string { + resultBuilder := bytes.Buffer{} + // statusCode + resultBuilder.WriteString("\n") + resultBuilder.WriteString(fmt.Sprintf("%s %s\n", baseResponse.originHttpResponse.Proto, baseResponse.originHttpResponse.Status)) + // httpHeaders + //resultBuilder.WriteString("Headers:\n") + for key, value := range baseResponse.httpHeaders { + resultBuilder.WriteString(key + ": " + strings.Join(value, ";") + "\n") + } + resultBuilder.WriteString("\n") + // content + //resultBuilder.WriteString("Content:\n") + resultBuilder.WriteString(baseResponse.httpContentString + "\n") + return resultBuilder.String() +} + +// CommonResponse wrap base response +type CommonResponse struct { + *BaseResponse +} + +// NewCommonResponse return common response +func NewCommonResponse() (response *CommonResponse) { + return &CommonResponse{ + BaseResponse: &BaseResponse{}, + } +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/utils/utils.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/utils/utils.go new file mode 100644 index 000000000000..68da41409fa9 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/utils/utils.go @@ -0,0 +1,128 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/satori/go.uuid" + "net/url" + "reflect" + "strconv" + "time" +) + +/* if you use go 1.10 or higher, you can hack this util by these to avoid "TimeZone.zip not found" on Windows */ +var ( + LoadLocationFromTZData func(name string, data []byte) (*time.Location, error) + TZData []byte +) + +// GetUUIDV4 returns uuidHex +func GetUUIDV4() (uuidHex string) { + uuidV4 := uuid.NewV4() + uuidHex = hex.EncodeToString(uuidV4.Bytes()) + return +} + +// GetMD5Base64 returns base64Value +func GetMD5Base64(bytes []byte) (base64Value string) { + md5Ctx := md5.New() + md5Ctx.Write(bytes) + md5Value := md5Ctx.Sum(nil) + base64Value = base64.StdEncoding.EncodeToString(md5Value) + return +} + +// GetGMTLocation returns gmt location +func GetGMTLocation() (*time.Location, error) { + if LoadLocationFromTZData != nil && TZData != nil { + return LoadLocationFromTZData("GMT", TZData) + } + return time.LoadLocation("GMT") +} + +// GetTimeInFormatISO8601 returns time in ISO format +func GetTimeInFormatISO8601() (timeStr string) { + gmt, err := GetGMTLocation() + + if err != nil { + panic(err) + } + return time.Now().In(gmt).Format("2006-01-02T15:04:05Z") +} + +// GetTimeInFormatRFC2616 returns time in RFC format +func GetTimeInFormatRFC2616() (timeStr string) { + gmt, err := GetGMTLocation() + + if err != nil { + panic(err) + } + return time.Now().In(gmt).Format("Mon, 02 Jan 2006 15:04:05 GMT") +} + +// GetUrlFormedMap returns url formed map +func GetUrlFormedMap(source map[string]string) (urlEncoded string) { + urlEncoder := url.Values{} + for key, value := range source { + urlEncoder.Add(key, value) + } + urlEncoded = urlEncoder.Encode() + return +} + +// GetFromJsonString returns json string +func GetFromJsonString(jsonString, key string) (result string, err error) { + var responseMap map[string]*json.RawMessage + err = json.Unmarshal([]byte(jsonString), &responseMap) + if err != nil { + return + } + fmt.Println(string(*responseMap[key])) + err = json.Unmarshal(*responseMap[key], &result) + return +} + +// InitStructWithDefaultTag returns default struct +func InitStructWithDefaultTag(bean interface{}) { + configType := reflect.TypeOf(bean) + for i := 0; i < configType.Elem().NumField(); i++ { + field := configType.Elem().Field(i) + defaultValue := field.Tag.Get("default") + if defaultValue == "" { + continue + } + setter := reflect.ValueOf(bean).Elem().Field(i) + switch field.Type.String() { + case "int": + intValue, _ := strconv.ParseInt(defaultValue, 10, 64) + setter.SetInt(intValue) + case "time.Duration": + intValue, _ := strconv.ParseInt(defaultValue, 10, 64) + setter.SetInt(intValue) + case "string": + setter.SetString(defaultValue) + case "bool": + boolValue, _ := strconv.ParseBool(defaultValue) + setter.SetBool(boolValue) + } + } +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ecs/client.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ecs/client.go new file mode 100644 index 000000000000..3d43f1ff45df --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ecs/client.go @@ -0,0 +1,82 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ecs + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth" +) + +// Client is the sdk client struct, each func corresponds to an OpenAPI +type Client struct { + sdk.Client +} + +// NewClient creates a sdk client with environment variables +func NewClient() (client *Client, err error) { + client = &Client{} + err = client.Init() + return +} + +// NewClientWithOptions creates a sdk client with regionId/sdkConfig/credential +// this is the common api to create a sdk client +func NewClientWithOptions(regionId string, config *sdk.Config, credential auth.Credential) (client *Client, err error) { + client = &Client{} + err = client.InitWithOptions(regionId, config, credential) + return +} + +// NewClientWithAccessKey is a shortcut to create sdk client with accesskey +// usage: https://help.aliyun.com/document_detail/66217.html +func NewClientWithAccessKey(regionId, accessKeyId, accessKeySecret string) (client *Client, err error) { + client = &Client{} + err = client.InitWithAccessKey(regionId, accessKeyId, accessKeySecret) + return +} + +// NewClientWithStsToken is a shortcut to create sdk client with sts token +// usage: https://help.aliyun.com/document_detail/66222.html +func NewClientWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken string) (client *Client, err error) { + client = &Client{} + err = client.InitWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken) + return +} + +// NewClientWithRamRoleArn is a shortcut to create sdk client with ram roleArn +// usage: https://help.aliyun.com/document_detail/66222.html +func NewClientWithRamRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) { + client = &Client{} + err = client.InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName) + return +} + +// NewClientWithEcsRamRole is a shortcut to create sdk client with ecs ram role +// usage: https://help.aliyun.com/document_detail/66223.html +func NewClientWithEcsRamRole(regionId string, roleName string) (client *Client, err error) { + client = &Client{} + err = client.InitWithEcsRamRole(regionId, roleName) + return +} + +// NewClientWithRsaKeyPair is a shortcut to create sdk client with rsa key pair +// attention: rsa key pair auth is only Japan regions available +func NewClientWithRsaKeyPair(regionId string, publicKeyId, privateKey string, sessionExpiration int) (client *Client, err error) { + client = &Client{} + err = client.InitWithRsaKeyPair(regionId, publicKeyId, privateKey, sessionExpiration) + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ecs/describe_instance_types.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ecs/describe_instance_types.go new file mode 100644 index 000000000000..22751d384ef4 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ecs/describe_instance_types.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ecs + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" +) + +// DescribeInstanceTypes invokes the ecs.DescribeInstanceTypes API synchronously +// api document: https://help.aliyun.com/api/ecs/describeinstancetypes.html +func (client *Client) DescribeInstanceTypes(request *DescribeInstanceTypesRequest) (response *DescribeInstanceTypesResponse, err error) { + response = CreateDescribeInstanceTypesResponse() + err = client.DoAction(request, response) + return +} + +// DescribeInstanceTypesWithChan invokes the ecs.DescribeInstanceTypes API asynchronously +// api document: https://help.aliyun.com/api/ecs/describeinstancetypes.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeInstanceTypesWithChan(request *DescribeInstanceTypesRequest) (<-chan *DescribeInstanceTypesResponse, <-chan error) { + responseChan := make(chan *DescribeInstanceTypesResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.DescribeInstanceTypes(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// DescribeInstanceTypesWithCallback invokes the ecs.DescribeInstanceTypes API asynchronously +// api document: https://help.aliyun.com/api/ecs/describeinstancetypes.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeInstanceTypesWithCallback(request *DescribeInstanceTypesRequest, callback func(response *DescribeInstanceTypesResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *DescribeInstanceTypesResponse + var err error + defer close(result) + response, err = client.DescribeInstanceTypes(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// DescribeInstanceTypesRequest is the request struct for api DescribeInstanceTypes +type DescribeInstanceTypesRequest struct { + *requests.RpcRequest + ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"` + ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` + OwnerAccount string `position:"Query" name:"OwnerAccount"` + InstanceTypeFamily string `position:"Query" name:"InstanceTypeFamily"` + OwnerId requests.Integer `position:"Query" name:"OwnerId"` +} + +// DescribeInstanceTypesResponse is the response struct for api DescribeInstanceTypes +type DescribeInstanceTypesResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` + InstanceTypes InstanceTypesInDescribeInstanceTypes `json:"InstanceTypes" xml:"InstanceTypes"` +} + +// InstanceTypesInDescribeInstanceTypes is a nested struct in ecs response +type InstanceTypesInDescribeInstanceTypes struct { + InstanceType []InstanceType `json:"InstanceType" xml:"InstanceType"` +} + +// InstanceType is a nested struct in ecs response +type InstanceType struct { + MemorySize float64 `json:"MemorySize" xml:"MemorySize"` + InstancePpsRx int `json:"InstancePpsRx" xml:"InstancePpsRx"` + CpuCoreCount int `json:"CpuCoreCount" xml:"CpuCoreCount"` + Cores int `json:"Cores" xml:"Cores"` + Memory int `json:"Memory" xml:"Memory"` + InstanceTypeId string `json:"InstanceTypeId" xml:"InstanceTypeId"` + InstanceBandwidthRx int `json:"InstanceBandwidthRx" xml:"InstanceBandwidthRx"` + InstanceType string `json:"InstanceType" xml:"InstanceType"` + BaselineCredit int `json:"BaselineCredit" xml:"BaselineCredit"` + EniQuantity int `json:"EniQuantity" xml:"EniQuantity"` + Generation string `json:"Generation" xml:"Generation"` + GPUAmount int `json:"GPUAmount" xml:"GPUAmount"` + SupportIoOptimized string `json:"SupportIoOptimized" xml:"SupportIoOptimized"` + InstanceTypeFamily string `json:"InstanceTypeFamily" xml:"InstanceTypeFamily"` + InitialCredit int `json:"InitialCredit" xml:"InitialCredit"` + InstancePpsTx int `json:"InstancePpsTx" xml:"InstancePpsTx"` + LocalStorageAmount int `json:"LocalStorageAmount" xml:"LocalStorageAmount"` + InstanceFamilyLevel string `json:"InstanceFamilyLevel" xml:"InstanceFamilyLevel"` + LocalStorageCapacity int `json:"LocalStorageCapacity" xml:"LocalStorageCapacity"` + GPUSpec string `json:"GPUSpec" xml:"GPUSpec"` + LocalStorageCategory string `json:"LocalStorageCategory" xml:"LocalStorageCategory"` + InstanceBandwidthTx int `json:"InstanceBandwidthTx" xml:"InstanceBandwidthTx"` +} + +// CreateDescribeInstanceTypesRequest creates a request to invoke DescribeInstanceTypes API +func CreateDescribeInstanceTypesRequest() (request *DescribeInstanceTypesRequest) { + request = &DescribeInstanceTypesRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Ecs", "2014-05-26", "DescribeInstanceTypes", "ecs", "openAPI") + return +} + +// CreateDescribeInstanceTypesResponse creates a response to parse from DescribeInstanceTypes response +func CreateDescribeInstanceTypesResponse() (response *DescribeInstanceTypesResponse) { + response = &DescribeInstanceTypesResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/client.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/client.go new file mode 100755 index 000000000000..cf19bb4bfdce --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/client.go @@ -0,0 +1,82 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ess + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/auth" +) + +// Client is the sdk client struct, each func corresponds to an OpenAPI +type Client struct { + sdk.Client +} + +// NewClient creates a sdk client with environment variables +func NewClient() (client *Client, err error) { + client = &Client{} + err = client.Init() + return +} + +// NewClientWithOptions creates a sdk client with regionId/sdkConfig/credential +// this is the common api to create a sdk client +func NewClientWithOptions(regionId string, config *sdk.Config, credential auth.Credential) (client *Client, err error) { + client = &Client{} + err = client.InitWithOptions(regionId, config, credential) + return +} + +// NewClientWithAccessKey is a shortcut to create sdk client with accesskey +// usage: https://help.aliyun.com/document_detail/66217.html +func NewClientWithAccessKey(regionId, accessKeyId, accessKeySecret string) (client *Client, err error) { + client = &Client{} + err = client.InitWithAccessKey(regionId, accessKeyId, accessKeySecret) + return +} + +// NewClientWithStsToken is a shortcut to create sdk client with sts token +// usage: https://help.aliyun.com/document_detail/66222.html +func NewClientWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken string) (client *Client, err error) { + client = &Client{} + err = client.InitWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken) + return +} + +// NewClientWithRamRoleArn is a shortcut to create sdk client with ram roleArn +// usage: https://help.aliyun.com/document_detail/66222.html +func NewClientWithRamRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) { + client = &Client{} + err = client.InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName) + return +} + +// NewClientWithEcsRamRole is a shortcut to create sdk client with ecs ram role +// usage: https://help.aliyun.com/document_detail/66223.html +func NewClientWithEcsRamRole(regionId string, roleName string) (client *Client, err error) { + client = &Client{} + err = client.InitWithEcsRamRole(regionId, roleName) + return +} + +// NewClientWithRsaKeyPair is a shortcut to create sdk client with rsa key pair +// attention: rsa key pair auth is only Japan regions available +func NewClientWithRsaKeyPair(regionId string, publicKeyId, privateKey string, sessionExpiration int) (client *Client, err error) { + client = &Client{} + err = client.InitWithRsaKeyPair(regionId, publicKeyId, privateKey, sessionExpiration) + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/create_scaling_rule.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/create_scaling_rule.go new file mode 100755 index 000000000000..ce19bfdfb72c --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/create_scaling_rule.go @@ -0,0 +1,113 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ess + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" +) + +// CreateScalingRule invokes the ess.CreateScalingRule API synchronously +// api document: https://help.aliyun.com/api/ess/createscalingrule.html +func (client *Client) CreateScalingRule(request *CreateScalingRuleRequest) (response *CreateScalingRuleResponse, err error) { + response = CreateCreateScalingRuleResponse() + err = client.DoAction(request, response) + return +} + +// CreateScalingRuleWithChan invokes the ess.CreateScalingRule API asynchronously +// api document: https://help.aliyun.com/api/ess/createscalingrule.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) CreateScalingRuleWithChan(request *CreateScalingRuleRequest) (<-chan *CreateScalingRuleResponse, <-chan error) { + responseChan := make(chan *CreateScalingRuleResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.CreateScalingRule(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// CreateScalingRuleWithCallback invokes the ess.CreateScalingRule API asynchronously +// api document: https://help.aliyun.com/api/ess/createscalingrule.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) CreateScalingRuleWithCallback(request *CreateScalingRuleRequest, callback func(response *CreateScalingRuleResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *CreateScalingRuleResponse + var err error + defer close(result) + response, err = client.CreateScalingRule(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// CreateScalingRuleRequest is the request struct for api CreateScalingRule +type CreateScalingRuleRequest struct { + *requests.RpcRequest + ScalingRuleName string `position:"Query" name:"ScalingRuleName"` + ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` + AdjustmentValue requests.Integer `position:"Query" name:"AdjustmentValue"` + ScalingGroupId string `position:"Query" name:"ScalingGroupId"` + OwnerAccount string `position:"Query" name:"OwnerAccount"` + Cooldown requests.Integer `position:"Query" name:"Cooldown"` + AdjustmentType string `position:"Query" name:"AdjustmentType"` + OwnerId requests.Integer `position:"Query" name:"OwnerId"` +} + +// CreateScalingRuleResponse is the response struct for api CreateScalingRule +type CreateScalingRuleResponse struct { + *responses.BaseResponse + ScalingRuleId string `json:"ScalingRuleId" xml:"ScalingRuleId"` + ScalingRuleAri string `json:"ScalingRuleAri" xml:"ScalingRuleAri"` + RequestId string `json:"RequestId" xml:"RequestId"` +} + +// CreateCreateScalingRuleRequest creates a request to invoke CreateScalingRule API +func CreateCreateScalingRuleRequest() (request *CreateScalingRuleRequest) { + request = &CreateScalingRuleRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Ess", "2014-08-28", "CreateScalingRule", "ess", "openAPI") + return +} + +// CreateCreateScalingRuleResponse creates a response to parse from CreateScalingRule response +func CreateCreateScalingRuleResponse() (response *CreateScalingRuleResponse) { + response = &CreateScalingRuleResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/delete_scaling_rule.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/delete_scaling_rule.go new file mode 100755 index 000000000000..38d4ad2ae56e --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/delete_scaling_rule.go @@ -0,0 +1,107 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ess + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" +) + +// DeleteScalingRule invokes the ess.DeleteScalingRule API synchronously +// api document: https://help.aliyun.com/api/ess/deletescalingrule.html +func (client *Client) DeleteScalingRule(request *DeleteScalingRuleRequest) (response *DeleteScalingRuleResponse, err error) { + response = CreateDeleteScalingRuleResponse() + err = client.DoAction(request, response) + return +} + +// DeleteScalingRuleWithChan invokes the ess.DeleteScalingRule API asynchronously +// api document: https://help.aliyun.com/api/ess/deletescalingrule.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DeleteScalingRuleWithChan(request *DeleteScalingRuleRequest) (<-chan *DeleteScalingRuleResponse, <-chan error) { + responseChan := make(chan *DeleteScalingRuleResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.DeleteScalingRule(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// DeleteScalingRuleWithCallback invokes the ess.DeleteScalingRule API asynchronously +// api document: https://help.aliyun.com/api/ess/deletescalingrule.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DeleteScalingRuleWithCallback(request *DeleteScalingRuleRequest, callback func(response *DeleteScalingRuleResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *DeleteScalingRuleResponse + var err error + defer close(result) + response, err = client.DeleteScalingRule(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// DeleteScalingRuleRequest is the request struct for api DeleteScalingRule +type DeleteScalingRuleRequest struct { + *requests.RpcRequest + ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` + OwnerAccount string `position:"Query" name:"OwnerAccount"` + OwnerId requests.Integer `position:"Query" name:"OwnerId"` + ScalingRuleId string `position:"Query" name:"ScalingRuleId"` +} + +// DeleteScalingRuleResponse is the response struct for api DeleteScalingRule +type DeleteScalingRuleResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` +} + +// CreateDeleteScalingRuleRequest creates a request to invoke DeleteScalingRule API +func CreateDeleteScalingRuleRequest() (request *DeleteScalingRuleRequest) { + request = &DeleteScalingRuleRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Ess", "2014-08-28", "DeleteScalingRule", "ess", "openAPI") + return +} + +// CreateDeleteScalingRuleResponse creates a response to parse from DeleteScalingRule response +func CreateDeleteScalingRuleResponse() (response *DeleteScalingRuleResponse) { + response = &DeleteScalingRuleResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/describe_scaling_configurations.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/describe_scaling_configurations.go new file mode 100755 index 000000000000..1ab4bc94bba9 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/describe_scaling_configurations.go @@ -0,0 +1,214 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ess + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" +) + +// DescribeScalingConfigurations invokes the ess.DescribeScalingConfigurations API synchronously +// api document: https://help.aliyun.com/api/ess/describescalingconfigurations.html +func (client *Client) DescribeScalingConfigurations(request *DescribeScalingConfigurationsRequest) (response *DescribeScalingConfigurationsResponse, err error) { + response = CreateDescribeScalingConfigurationsResponse() + err = client.DoAction(request, response) + return +} + +// DescribeScalingConfigurationsWithChan invokes the ess.DescribeScalingConfigurations API asynchronously +// api document: https://help.aliyun.com/api/ess/describescalingconfigurations.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeScalingConfigurationsWithChan(request *DescribeScalingConfigurationsRequest) (<-chan *DescribeScalingConfigurationsResponse, <-chan error) { + responseChan := make(chan *DescribeScalingConfigurationsResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.DescribeScalingConfigurations(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// DescribeScalingConfigurationsWithCallback invokes the ess.DescribeScalingConfigurations API asynchronously +// api document: https://help.aliyun.com/api/ess/describescalingconfigurations.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeScalingConfigurationsWithCallback(request *DescribeScalingConfigurationsRequest, callback func(response *DescribeScalingConfigurationsResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *DescribeScalingConfigurationsResponse + var err error + defer close(result) + response, err = client.DescribeScalingConfigurations(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// DescribeScalingConfigurationsRequest is the request struct for api DescribeScalingConfigurations +type DescribeScalingConfigurationsRequest struct { + *requests.RpcRequest + ScalingConfigurationId6 string `position:"Query" name:"ScalingConfigurationId.6"` + ScalingConfigurationId7 string `position:"Query" name:"ScalingConfigurationId.7"` + ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"` + ScalingConfigurationId4 string `position:"Query" name:"ScalingConfigurationId.4"` + ScalingConfigurationId5 string `position:"Query" name:"ScalingConfigurationId.5"` + ScalingGroupId string `position:"Query" name:"ScalingGroupId"` + ScalingConfigurationId8 string `position:"Query" name:"ScalingConfigurationId.8"` + ScalingConfigurationId9 string `position:"Query" name:"ScalingConfigurationId.9"` + ScalingConfigurationId10 string `position:"Query" name:"ScalingConfigurationId.10"` + PageNumber requests.Integer `position:"Query" name:"PageNumber"` + ScalingConfigurationName2 string `position:"Query" name:"ScalingConfigurationName.2"` + ScalingConfigurationName3 string `position:"Query" name:"ScalingConfigurationName.3"` + ScalingConfigurationName1 string `position:"Query" name:"ScalingConfigurationName.1"` + PageSize requests.Integer `position:"Query" name:"PageSize"` + ScalingConfigurationId2 string `position:"Query" name:"ScalingConfigurationId.2"` + ScalingConfigurationId3 string `position:"Query" name:"ScalingConfigurationId.3"` + ScalingConfigurationId1 string `position:"Query" name:"ScalingConfigurationId.1"` + ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` + OwnerAccount string `position:"Query" name:"OwnerAccount"` + ScalingConfigurationName6 string `position:"Query" name:"ScalingConfigurationName.6"` + ScalingConfigurationName7 string `position:"Query" name:"ScalingConfigurationName.7"` + ScalingConfigurationName4 string `position:"Query" name:"ScalingConfigurationName.4"` + ScalingConfigurationName5 string `position:"Query" name:"ScalingConfigurationName.5"` + OwnerId requests.Integer `position:"Query" name:"OwnerId"` + ScalingConfigurationName8 string `position:"Query" name:"ScalingConfigurationName.8"` + ScalingConfigurationName9 string `position:"Query" name:"ScalingConfigurationName.9"` + ScalingConfigurationName10 string `position:"Query" name:"ScalingConfigurationName.10"` +} + +// DescribeScalingConfigurationsResponse is the response struct for api DescribeScalingConfigurations +type DescribeScalingConfigurationsResponse struct { + *responses.BaseResponse + TotalCount int `json:"TotalCount" xml:"TotalCount"` + PageNumber int `json:"PageNumber" xml:"PageNumber"` + PageSize int `json:"PageSize" xml:"PageSize"` + RequestId string `json:"RequestId" xml:"RequestId"` + ScalingConfigurations ScalingConfigurations `json:"ScalingConfigurations" xml:"ScalingConfigurations"` +} + +// CreateDescribeScalingConfigurationsRequest creates a request to invoke DescribeScalingConfigurations API +func CreateDescribeScalingConfigurationsRequest() (request *DescribeScalingConfigurationsRequest) { + request = &DescribeScalingConfigurationsRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Ess", "2014-08-28", "DescribeScalingConfigurations", "ess", "openAPI") + return +} + +// CreateDescribeScalingConfigurationsResponse creates a response to parse from DescribeScalingConfigurations response +func CreateDescribeScalingConfigurationsResponse() (response *DescribeScalingConfigurationsResponse) { + response = &DescribeScalingConfigurationsResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} + +// ScalingConfigurations is a nested struct in ess response +type ScalingConfigurations struct { + ScalingConfiguration []ScalingConfiguration `json:"ScalingConfiguration" xml:"ScalingConfiguration"` +} + +// ScalingConfiguration is a nested struct in ess response +type ScalingConfiguration struct { + ScalingConfigurationId string `json:"ScalingConfigurationId" xml:"ScalingConfigurationId"` + ScalingConfigurationName string `json:"ScalingConfigurationName" xml:"ScalingConfigurationName"` + ScalingGroupId string `json:"ScalingGroupId" xml:"ScalingGroupId"` + InstanceName string `json:"InstanceName" xml:"InstanceName"` + ImageId string `json:"ImageId" xml:"ImageId"` + ImageName string `json:"ImageName" xml:"ImageName"` + HostName string `json:"HostName" xml:"HostName"` + InstanceType string `json:"InstanceType" xml:"InstanceType"` + InstanceGeneration string `json:"InstanceGeneration" xml:"InstanceGeneration"` + SecurityGroupId string `json:"SecurityGroupId" xml:"SecurityGroupId"` + IoOptimized string `json:"IoOptimized" xml:"IoOptimized"` + InternetChargeType string `json:"InternetChargeType" xml:"InternetChargeType"` + InternetMaxBandwidthIn int `json:"InternetMaxBandwidthIn" xml:"InternetMaxBandwidthIn"` + InternetMaxBandwidthOut int `json:"InternetMaxBandwidthOut" xml:"InternetMaxBandwidthOut"` + SystemDiskCategory string `json:"SystemDiskCategory" xml:"SystemDiskCategory"` + SystemDiskSize int `json:"SystemDiskSize" xml:"SystemDiskSize"` + LifecycleState string `json:"LifecycleState" xml:"LifecycleState"` + CreationTime string `json:"CreationTime" xml:"CreationTime"` + LoadBalancerWeight int `json:"LoadBalancerWeight" xml:"LoadBalancerWeight"` + UserData string `json:"UserData" xml:"UserData"` + KeyPairName string `json:"KeyPairName" xml:"KeyPairName"` + RamRoleName string `json:"RamRoleName" xml:"RamRoleName"` + DeploymentSetId string `json:"DeploymentSetId" xml:"DeploymentSetId"` + SecurityEnhancementStrategy string `json:"SecurityEnhancementStrategy" xml:"SecurityEnhancementStrategy"` + SpotStrategy string `json:"SpotStrategy" xml:"SpotStrategy"` + PasswordInherit bool `json:"PasswordInherit" xml:"PasswordInherit"` + InstanceTypes InstanceTypes `json:"InstanceTypes" xml:"InstanceTypes"` + DataDisks DataDisks `json:"DataDisks" xml:"DataDisks"` + Tags Tags `json:"Tags" xml:"Tags"` + SpotPriceLimit SpotPriceLimit `json:"SpotPriceLimit" xml:"SpotPriceLimit"` +} + +// InstanceTypes is a nested struct in ess response +type InstanceTypes struct { + InstanceType []string `json:"InstanceType" xml:"InstanceType"` +} + +// DataDisks is a nested struct in ess response +type DataDisks struct { + DataDisk []DataDisk `json:"DataDisk" xml:"DataDisk"` +} + +// DataDisk is a nested struct in ess response +type DataDisk struct { + Size int `json:"Size" xml:"Size"` + Category string `json:"Category" xml:"Category"` + SnapshotId string `json:"SnapshotId" xml:"SnapshotId"` + Device string `json:"Device" xml:"Device"` + DeleteWithInstance bool `json:"DeleteWithInstance" xml:"DeleteWithInstance"` +} + +// Tags is a nested struct in ess response +type Tags struct { + Tag []Tag `json:"Tag" xml:"Tag"` +} + +// Tag is a nested struct in ess response +type Tag struct { + Key string `json:"Key" xml:"Key"` + Value string `json:"Value" xml:"Value"` +} + +// SpotPriceLimit is a nested struct in ess response +type SpotPriceLimit struct { + SpotPriceModel []SpotPriceModel `json:"SpotPriceModel" xml:"SpotPriceModel"` +} + +// SpotPriceModel is a nested struct in ess response +type SpotPriceModel struct { + InstanceType string `json:"InstanceType" xml:"InstanceType"` + PriceLimit float64 `json:"PriceLimit" xml:"PriceLimit"` +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/describe_scaling_groups.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/describe_scaling_groups.go new file mode 100755 index 000000000000..2c2c92a85144 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/describe_scaling_groups.go @@ -0,0 +1,210 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ess + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" +) + +// DescribeScalingGroups invokes the ess.DescribeScalingGroups API synchronously +// api document: https://help.aliyun.com/api/ess/describescalinggroups.html +func (client *Client) DescribeScalingGroups(request *DescribeScalingGroupsRequest) (response *DescribeScalingGroupsResponse, err error) { + response = CreateDescribeScalingGroupsResponse() + err = client.DoAction(request, response) + return +} + +// DescribeScalingGroupsWithChan invokes the ess.DescribeScalingGroups API asynchronously +// api document: https://help.aliyun.com/api/ess/describescalinggroups.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeScalingGroupsWithChan(request *DescribeScalingGroupsRequest) (<-chan *DescribeScalingGroupsResponse, <-chan error) { + responseChan := make(chan *DescribeScalingGroupsResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.DescribeScalingGroups(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// DescribeScalingGroupsWithCallback invokes the ess.DescribeScalingGroups API asynchronously +// api document: https://help.aliyun.com/api/ess/describescalinggroups.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeScalingGroupsWithCallback(request *DescribeScalingGroupsRequest, callback func(response *DescribeScalingGroupsResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *DescribeScalingGroupsResponse + var err error + defer close(result) + response, err = client.DescribeScalingGroups(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// DescribeScalingGroupsRequest is the request struct for api DescribeScalingGroups +type DescribeScalingGroupsRequest struct { + *requests.RpcRequest + ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"` + ScalingGroupId10 string `position:"Query" name:"ScalingGroupId.10"` + ScalingGroupId12 string `position:"Query" name:"ScalingGroupId.12"` + ScalingGroupId13 string `position:"Query" name:"ScalingGroupId.13"` + ScalingGroupId14 string `position:"Query" name:"ScalingGroupId.14"` + ScalingGroupId15 string `position:"Query" name:"ScalingGroupId.15"` + OwnerId requests.Integer `position:"Query" name:"OwnerId"` + PageNumber requests.Integer `position:"Query" name:"PageNumber"` + PageSize requests.Integer `position:"Query" name:"PageSize"` + ScalingGroupName20 string `position:"Query" name:"ScalingGroupName.20"` + ScalingGroupName19 string `position:"Query" name:"ScalingGroupName.19"` + ScalingGroupId20 string `position:"Query" name:"ScalingGroupId.20"` + ScalingGroupName18 string `position:"Query" name:"ScalingGroupName.18"` + ScalingGroupName17 string `position:"Query" name:"ScalingGroupName.17"` + ScalingGroupName16 string `position:"Query" name:"ScalingGroupName.16"` + ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` + ScalingGroupName string `position:"Query" name:"ScalingGroupName"` + OwnerAccount string `position:"Query" name:"OwnerAccount"` + ScalingGroupName1 string `position:"Query" name:"ScalingGroupName.1"` + ScalingGroupName2 string `position:"Query" name:"ScalingGroupName.2"` + ScalingGroupId2 string `position:"Query" name:"ScalingGroupId.2"` + ScalingGroupId1 string `position:"Query" name:"ScalingGroupId.1"` + ScalingGroupId6 string `position:"Query" name:"ScalingGroupId.6"` + ScalingGroupId16 string `position:"Query" name:"ScalingGroupId.16"` + ScalingGroupName7 string `position:"Query" name:"ScalingGroupName.7"` + ScalingGroupName11 string `position:"Query" name:"ScalingGroupName.11"` + ScalingGroupId5 string `position:"Query" name:"ScalingGroupId.5"` + ScalingGroupId17 string `position:"Query" name:"ScalingGroupId.17"` + ScalingGroupName8 string `position:"Query" name:"ScalingGroupName.8"` + ScalingGroupName10 string `position:"Query" name:"ScalingGroupName.10"` + ScalingGroupId4 string `position:"Query" name:"ScalingGroupId.4"` + ScalingGroupId18 string `position:"Query" name:"ScalingGroupId.18"` + ScalingGroupName9 string `position:"Query" name:"ScalingGroupName.9"` + ScalingGroupId3 string `position:"Query" name:"ScalingGroupId.3"` + ScalingGroupId19 string `position:"Query" name:"ScalingGroupId.19"` + ScalingGroupName3 string `position:"Query" name:"ScalingGroupName.3"` + ScalingGroupName15 string `position:"Query" name:"ScalingGroupName.15"` + ScalingGroupId9 string `position:"Query" name:"ScalingGroupId.9"` + ScalingGroupName4 string `position:"Query" name:"ScalingGroupName.4"` + ScalingGroupName14 string `position:"Query" name:"ScalingGroupName.14"` + ScalingGroupId8 string `position:"Query" name:"ScalingGroupId.8"` + ScalingGroupName5 string `position:"Query" name:"ScalingGroupName.5"` + ScalingGroupName13 string `position:"Query" name:"ScalingGroupName.13"` + ScalingGroupId7 string `position:"Query" name:"ScalingGroupId.7"` + ScalingGroupName6 string `position:"Query" name:"ScalingGroupName.6"` + ScalingGroupName12 string `position:"Query" name:"ScalingGroupName.12"` +} + +// DescribeScalingGroupsResponse is the response struct for api DescribeScalingGroups +type DescribeScalingGroupsResponse struct { + *responses.BaseResponse + TotalCount int `json:"TotalCount" xml:"TotalCount"` + PageNumber int `json:"PageNumber" xml:"PageNumber"` + PageSize int `json:"PageSize" xml:"PageSize"` + RequestId string `json:"RequestId" xml:"RequestId"` + ScalingGroups ScalingGroups `json:"ScalingGroups" xml:"ScalingGroups"` +} + +// CreateDescribeScalingGroupsRequest creates a request to invoke DescribeScalingGroups API +func CreateDescribeScalingGroupsRequest() (request *DescribeScalingGroupsRequest) { + request = &DescribeScalingGroupsRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Ess", "2014-08-28", "DescribeScalingGroups", "ess", "openAPI") + return +} + +// CreateDescribeScalingGroupsResponse creates a response to parse from DescribeScalingGroups response +func CreateDescribeScalingGroupsResponse() (response *DescribeScalingGroupsResponse) { + response = &DescribeScalingGroupsResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} + +// ScalingGroups is a nested struct in ess response +type ScalingGroups struct { + ScalingGroup []ScalingGroup `json:"ScalingGroup" xml:"ScalingGroup"` +} + +// ScalingGroup is a nested struct in ess response +type ScalingGroup struct { + DefaultCooldown int `json:"DefaultCooldown" xml:"DefaultCooldown"` + MaxSize int `json:"MaxSize" xml:"MaxSize"` + PendingWaitCapacity int `json:"PendingWaitCapacity" xml:"PendingWaitCapacity"` + RemovingWaitCapacity int `json:"RemovingWaitCapacity" xml:"RemovingWaitCapacity"` + PendingCapacity int `json:"PendingCapacity" xml:"PendingCapacity"` + RemovingCapacity int `json:"RemovingCapacity" xml:"RemovingCapacity"` + ScalingGroupName string `json:"ScalingGroupName" xml:"ScalingGroupName"` + ActiveCapacity int `json:"ActiveCapacity" xml:"ActiveCapacity"` + StandbyCapacity int `json:"StandbyCapacity" xml:"StandbyCapacity"` + ProtectedCapacity int `json:"ProtectedCapacity" xml:"ProtectedCapacity"` + ActiveScalingConfigurationId string `json:"ActiveScalingConfigurationId" xml:"ActiveScalingConfigurationId"` + LaunchTemplateId string `json:"LaunchTemplateId" xml:"LaunchTemplateId"` + LaunchTemplateVersion string `json:"LaunchTemplateVersion" xml:"LaunchTemplateVersion"` + ScalingGroupId string `json:"ScalingGroupId" xml:"ScalingGroupId"` + RegionId string `json:"RegionId" xml:"RegionId"` + TotalCapacity int `json:"TotalCapacity" xml:"TotalCapacity"` + MinSize int `json:"MinSize" xml:"MinSize"` + LifecycleState string `json:"LifecycleState" xml:"LifecycleState"` + CreationTime string `json:"CreationTime" xml:"CreationTime"` + ModificationTime string `json:"ModificationTime" xml:"ModificationTime"` + VpcId string `json:"VpcId" xml:"VpcId"` + VSwitchId string `json:"VSwitchId" xml:"VSwitchId"` + MultiAZPolicy string `json:"MultiAZPolicy" xml:"MultiAZPolicy"` + HealthCheckType string `json:"HealthCheckType" xml:"HealthCheckType"` + VSwitchIds VSwitchIds `json:"VSwitchIds" xml:"VSwitchIds"` + RemovalPolicies RemovalPolicies `json:"RemovalPolicies" xml:"RemovalPolicies"` + DBInstanceIds DBInstanceIds `json:"DBInstanceIds" xml:"DBInstanceIds"` + LoadBalancerIds LoadBalancerIds `json:"LoadBalancerIds" xml:"LoadBalancerIds"` +} + +// VSwitchIds is a nested struct in ess response +type VSwitchIds struct { + VSwitchId []string `json:"VSwitchId" xml:"VSwitchId"` +} + +// RemovalPolicies is a nested struct in ess response +type RemovalPolicies struct { + RemovalPolicy []string `json:"RemovalPolicy" xml:"RemovalPolicy"` +} + +// DBInstanceIds is a nested struct in ess response +type DBInstanceIds struct { + DBInstanceId []string `json:"DBInstanceId" xml:"DBInstanceId"` +} + +// LoadBalancerIds is a nested struct in ess response +type LoadBalancerIds struct { + LoadBalancerId []string `json:"LoadBalancerId" xml:"LoadBalancerId"` +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/describe_scaling_instances.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/describe_scaling_instances.go new file mode 100755 index 000000000000..01cec9a93fc8 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/describe_scaling_instances.go @@ -0,0 +1,155 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ess + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" +) + +// DescribeScalingInstances invokes the ess.DescribeScalingInstances API synchronously +// api document: https://help.aliyun.com/api/ess/describescalinginstances.html +func (client *Client) DescribeScalingInstances(request *DescribeScalingInstancesRequest) (response *DescribeScalingInstancesResponse, err error) { + response = CreateDescribeScalingInstancesResponse() + err = client.DoAction(request, response) + return +} + +// DescribeScalingInstancesWithChan invokes the ess.DescribeScalingInstances API asynchronously +// api document: https://help.aliyun.com/api/ess/describescalinginstances.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeScalingInstancesWithChan(request *DescribeScalingInstancesRequest) (<-chan *DescribeScalingInstancesResponse, <-chan error) { + responseChan := make(chan *DescribeScalingInstancesResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.DescribeScalingInstances(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// DescribeScalingInstancesWithCallback invokes the ess.DescribeScalingInstances API asynchronously +// api document: https://help.aliyun.com/api/ess/describescalinginstances.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeScalingInstancesWithCallback(request *DescribeScalingInstancesRequest, callback func(response *DescribeScalingInstancesResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *DescribeScalingInstancesResponse + var err error + defer close(result) + response, err = client.DescribeScalingInstances(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// DescribeScalingInstancesRequest is the request struct for api DescribeScalingInstances +type DescribeScalingInstancesRequest struct { + *requests.RpcRequest + InstanceId10 string `position:"Query" name:"InstanceId.10"` + ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"` + InstanceId12 string `position:"Query" name:"InstanceId.12"` + InstanceId11 string `position:"Query" name:"InstanceId.11"` + ScalingGroupId string `position:"Query" name:"ScalingGroupId"` + LifecycleState string `position:"Query" name:"LifecycleState"` + CreationType string `position:"Query" name:"CreationType"` + PageNumber requests.Integer `position:"Query" name:"PageNumber"` + PageSize requests.Integer `position:"Query" name:"PageSize"` + InstanceId20 string `position:"Query" name:"InstanceId.20"` + InstanceId1 string `position:"Query" name:"InstanceId.1"` + InstanceId3 string `position:"Query" name:"InstanceId.3"` + ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` + InstanceId2 string `position:"Query" name:"InstanceId.2"` + InstanceId5 string `position:"Query" name:"InstanceId.5"` + InstanceId4 string `position:"Query" name:"InstanceId.4"` + OwnerAccount string `position:"Query" name:"OwnerAccount"` + InstanceId7 string `position:"Query" name:"InstanceId.7"` + InstanceId6 string `position:"Query" name:"InstanceId.6"` + InstanceId9 string `position:"Query" name:"InstanceId.9"` + InstanceId8 string `position:"Query" name:"InstanceId.8"` + OwnerId requests.Integer `position:"Query" name:"OwnerId"` + ScalingConfigurationId string `position:"Query" name:"ScalingConfigurationId"` + HealthStatus string `position:"Query" name:"HealthStatus"` + InstanceId18 string `position:"Query" name:"InstanceId.18"` + InstanceId17 string `position:"Query" name:"InstanceId.17"` + InstanceId19 string `position:"Query" name:"InstanceId.19"` + InstanceId14 string `position:"Query" name:"InstanceId.14"` + InstanceId13 string `position:"Query" name:"InstanceId.13"` + InstanceId16 string `position:"Query" name:"InstanceId.16"` + InstanceId15 string `position:"Query" name:"InstanceId.15"` +} + +// DescribeScalingInstancesResponse is the response struct for api DescribeScalingInstances +type DescribeScalingInstancesResponse struct { + *responses.BaseResponse + TotalCount int `json:"TotalCount" xml:"TotalCount"` + PageNumber int `json:"PageNumber" xml:"PageNumber"` + PageSize int `json:"PageSize" xml:"PageSize"` + RequestId string `json:"RequestId" xml:"RequestId"` + ScalingInstances ScalingInstances `json:"ScalingInstances" xml:"ScalingInstances"` +} + +// CreateDescribeScalingInstancesRequest creates a request to invoke DescribeScalingInstances API +func CreateDescribeScalingInstancesRequest() (request *DescribeScalingInstancesRequest) { + request = &DescribeScalingInstancesRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Ess", "2014-08-28", "DescribeScalingInstances", "ess", "openAPI") + return +} + +// CreateDescribeScalingInstancesResponse creates a response to parse from DescribeScalingInstances response +func CreateDescribeScalingInstancesResponse() (response *DescribeScalingInstancesResponse) { + response = &DescribeScalingInstancesResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} + +// ScalingInstances is a nested struct in ess response +type ScalingInstances struct { + ScalingInstance []ScalingInstance `json:"ScalingInstance" xml:"ScalingInstance"` +} + +// ScalingInstance is a nested struct in ess response +type ScalingInstance struct { + InstanceId string `json:"InstanceId" xml:"InstanceId"` + ScalingConfigurationId string `json:"ScalingConfigurationId" xml:"ScalingConfigurationId"` + ScalingGroupId string `json:"ScalingGroupId" xml:"ScalingGroupId"` + HealthStatus string `json:"HealthStatus" xml:"HealthStatus"` + LoadBalancerWeight int `json:"LoadBalancerWeight" xml:"LoadBalancerWeight"` + LifecycleState string `json:"LifecycleState" xml:"LifecycleState"` + CreationTime string `json:"CreationTime" xml:"CreationTime"` + CreationType string `json:"CreationType" xml:"CreationType"` +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/describe_scaling_rules.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/describe_scaling_rules.go new file mode 100755 index 000000000000..d9c5063a5614 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/describe_scaling_rules.go @@ -0,0 +1,162 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ess + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" +) + +// DescribeScalingRules invokes the ess.DescribeScalingRules API synchronously +// api document: https://help.aliyun.com/api/ess/describescalingrules.html +func (client *Client) DescribeScalingRules(request *DescribeScalingRulesRequest) (response *DescribeScalingRulesResponse, err error) { + response = CreateDescribeScalingRulesResponse() + err = client.DoAction(request, response) + return +} + +// DescribeScalingRulesWithChan invokes the ess.DescribeScalingRules API asynchronously +// api document: https://help.aliyun.com/api/ess/describescalingrules.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeScalingRulesWithChan(request *DescribeScalingRulesRequest) (<-chan *DescribeScalingRulesResponse, <-chan error) { + responseChan := make(chan *DescribeScalingRulesResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.DescribeScalingRules(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// DescribeScalingRulesWithCallback invokes the ess.DescribeScalingRules API asynchronously +// api document: https://help.aliyun.com/api/ess/describescalingrules.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeScalingRulesWithCallback(request *DescribeScalingRulesRequest, callback func(response *DescribeScalingRulesResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *DescribeScalingRulesResponse + var err error + defer close(result) + response, err = client.DescribeScalingRules(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// DescribeScalingRulesRequest is the request struct for api DescribeScalingRules +type DescribeScalingRulesRequest struct { + *requests.RpcRequest + ScalingRuleName1 string `position:"Query" name:"ScalingRuleName.1"` + ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"` + ScalingRuleName2 string `position:"Query" name:"ScalingRuleName.2"` + ScalingRuleName3 string `position:"Query" name:"ScalingRuleName.3"` + ScalingRuleName4 string `position:"Query" name:"ScalingRuleName.4"` + ScalingRuleName5 string `position:"Query" name:"ScalingRuleName.5"` + ScalingGroupId string `position:"Query" name:"ScalingGroupId"` + ScalingRuleName6 string `position:"Query" name:"ScalingRuleName.6"` + ScalingRuleName7 string `position:"Query" name:"ScalingRuleName.7"` + ScalingRuleName8 string `position:"Query" name:"ScalingRuleName.8"` + ScalingRuleAri9 string `position:"Query" name:"ScalingRuleAri.9"` + ScalingRuleName9 string `position:"Query" name:"ScalingRuleName.9"` + PageNumber requests.Integer `position:"Query" name:"PageNumber"` + PageSize requests.Integer `position:"Query" name:"PageSize"` + ScalingRuleId10 string `position:"Query" name:"ScalingRuleId.10"` + ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` + OwnerAccount string `position:"Query" name:"OwnerAccount"` + OwnerId requests.Integer `position:"Query" name:"OwnerId"` + ScalingRuleAri1 string `position:"Query" name:"ScalingRuleAri.1"` + ScalingRuleAri2 string `position:"Query" name:"ScalingRuleAri.2"` + ScalingRuleName10 string `position:"Query" name:"ScalingRuleName.10"` + ScalingRuleAri3 string `position:"Query" name:"ScalingRuleAri.3"` + ScalingRuleAri4 string `position:"Query" name:"ScalingRuleAri.4"` + ScalingRuleId8 string `position:"Query" name:"ScalingRuleId.8"` + ScalingRuleAri5 string `position:"Query" name:"ScalingRuleAri.5"` + ScalingRuleId9 string `position:"Query" name:"ScalingRuleId.9"` + ScalingRuleAri6 string `position:"Query" name:"ScalingRuleAri.6"` + ScalingRuleAri7 string `position:"Query" name:"ScalingRuleAri.7"` + ScalingRuleAri10 string `position:"Query" name:"ScalingRuleAri.10"` + ScalingRuleAri8 string `position:"Query" name:"ScalingRuleAri.8"` + ScalingRuleId4 string `position:"Query" name:"ScalingRuleId.4"` + ScalingRuleId5 string `position:"Query" name:"ScalingRuleId.5"` + ScalingRuleId6 string `position:"Query" name:"ScalingRuleId.6"` + ScalingRuleId7 string `position:"Query" name:"ScalingRuleId.7"` + ScalingRuleId1 string `position:"Query" name:"ScalingRuleId.1"` + ScalingRuleId2 string `position:"Query" name:"ScalingRuleId.2"` + ScalingRuleId3 string `position:"Query" name:"ScalingRuleId.3"` +} + +// DescribeScalingRulesResponse is the response struct for api DescribeScalingRules +type DescribeScalingRulesResponse struct { + *responses.BaseResponse + TotalCount int `json:"TotalCount" xml:"TotalCount"` + PageNumber int `json:"PageNumber" xml:"PageNumber"` + PageSize int `json:"PageSize" xml:"PageSize"` + RequestId string `json:"RequestId" xml:"RequestId"` + ScalingRules ScalingRules `json:"ScalingRules" xml:"ScalingRules"` +} + +// CreateDescribeScalingRulesRequest creates a request to invoke DescribeScalingRules API +func CreateDescribeScalingRulesRequest() (request *DescribeScalingRulesRequest) { + request = &DescribeScalingRulesRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Ess", "2014-08-28", "DescribeScalingRules", "ess", "openAPI") + return +} + +// CreateDescribeScalingRulesResponse creates a response to parse from DescribeScalingRules response +func CreateDescribeScalingRulesResponse() (response *DescribeScalingRulesResponse) { + response = &DescribeScalingRulesResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} + +// ScalingRules is a nested struct in ess response +type ScalingRules struct { + ScalingRule []ScalingRule `json:"ScalingRule" xml:"ScalingRule"` +} + +// ScalingRule is a nested struct in ess response +type ScalingRule struct { + ScalingRuleId string `json:"ScalingRuleId" xml:"ScalingRuleId"` + ScalingGroupId string `json:"ScalingGroupId" xml:"ScalingGroupId"` + ScalingRuleName string `json:"ScalingRuleName" xml:"ScalingRuleName"` + Cooldown int `json:"Cooldown" xml:"Cooldown"` + AdjustmentType string `json:"AdjustmentType" xml:"AdjustmentType"` + AdjustmentValue int `json:"AdjustmentValue" xml:"AdjustmentValue"` + MinSize int `json:"MinSize" xml:"MinSize"` + MaxSize int `json:"MaxSize" xml:"MaxSize"` + ScalingRuleAri string `json:"ScalingRuleAri" xml:"ScalingRuleAri"` +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/execute_scaling_rule.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/execute_scaling_rule.go new file mode 100755 index 000000000000..ed8f18451a92 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/execute_scaling_rule.go @@ -0,0 +1,110 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ess + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" +) + +// ExecuteScalingRule invokes the ess.ExecuteScalingRule API synchronously +// api document: https://help.aliyun.com/api/ess/executescalingrule.html +func (client *Client) ExecuteScalingRule(request *ExecuteScalingRuleRequest) (response *ExecuteScalingRuleResponse, err error) { + response = CreateExecuteScalingRuleResponse() + err = client.DoAction(request, response) + return +} + +// ExecuteScalingRuleWithChan invokes the ess.ExecuteScalingRule API asynchronously +// api document: https://help.aliyun.com/api/ess/executescalingrule.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) ExecuteScalingRuleWithChan(request *ExecuteScalingRuleRequest) (<-chan *ExecuteScalingRuleResponse, <-chan error) { + responseChan := make(chan *ExecuteScalingRuleResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.ExecuteScalingRule(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// ExecuteScalingRuleWithCallback invokes the ess.ExecuteScalingRule API asynchronously +// api document: https://help.aliyun.com/api/ess/executescalingrule.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) ExecuteScalingRuleWithCallback(request *ExecuteScalingRuleRequest, callback func(response *ExecuteScalingRuleResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *ExecuteScalingRuleResponse + var err error + defer close(result) + response, err = client.ExecuteScalingRule(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// ExecuteScalingRuleRequest is the request struct for api ExecuteScalingRule +type ExecuteScalingRuleRequest struct { + *requests.RpcRequest + ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"` + ScalingRuleAri string `position:"Query" name:"ScalingRuleAri"` + ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` + ClientToken string `position:"Query" name:"ClientToken"` + OwnerAccount string `position:"Query" name:"OwnerAccount"` + OwnerId requests.Integer `position:"Query" name:"OwnerId"` +} + +// ExecuteScalingRuleResponse is the response struct for api ExecuteScalingRule +type ExecuteScalingRuleResponse struct { + *responses.BaseResponse + ScalingActivityId string `json:"ScalingActivityId" xml:"ScalingActivityId"` + RequestId string `json:"RequestId" xml:"RequestId"` +} + +// CreateExecuteScalingRuleRequest creates a request to invoke ExecuteScalingRule API +func CreateExecuteScalingRuleRequest() (request *ExecuteScalingRuleRequest) { + request = &ExecuteScalingRuleRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Ess", "2014-08-28", "ExecuteScalingRule", "ess", "openAPI") + return +} + +// CreateExecuteScalingRuleResponse creates a response to parse from ExecuteScalingRule response +func CreateExecuteScalingRuleResponse() (response *ExecuteScalingRuleResponse) { + response = &ExecuteScalingRuleResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/modify_scaling_group.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/modify_scaling_group.go new file mode 100755 index 000000000000..783692863a76 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/modify_scaling_group.go @@ -0,0 +1,118 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ess + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" +) + +// ModifyScalingGroup invokes the ess.ModifyScalingGroup API synchronously +// api document: https://help.aliyun.com/api/ess/modifyscalinggroup.html +func (client *Client) ModifyScalingGroup(request *ModifyScalingGroupRequest) (response *ModifyScalingGroupResponse, err error) { + response = CreateModifyScalingGroupResponse() + err = client.DoAction(request, response) + return +} + +// ModifyScalingGroupWithChan invokes the ess.ModifyScalingGroup API asynchronously +// api document: https://help.aliyun.com/api/ess/modifyscalinggroup.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) ModifyScalingGroupWithChan(request *ModifyScalingGroupRequest) (<-chan *ModifyScalingGroupResponse, <-chan error) { + responseChan := make(chan *ModifyScalingGroupResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.ModifyScalingGroup(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// ModifyScalingGroupWithCallback invokes the ess.ModifyScalingGroup API asynchronously +// api document: https://help.aliyun.com/api/ess/modifyscalinggroup.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) ModifyScalingGroupWithCallback(request *ModifyScalingGroupRequest, callback func(response *ModifyScalingGroupResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *ModifyScalingGroupResponse + var err error + defer close(result) + response, err = client.ModifyScalingGroup(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// ModifyScalingGroupRequest is the request struct for api ModifyScalingGroup +type ModifyScalingGroupRequest struct { + *requests.RpcRequest + ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"` + HealthCheckType string `position:"Query" name:"HealthCheckType"` + LaunchTemplateId string `position:"Query" name:"LaunchTemplateId"` + ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` + ScalingGroupName string `position:"Query" name:"ScalingGroupName"` + ScalingGroupId string `position:"Query" name:"ScalingGroupId"` + OwnerAccount string `position:"Query" name:"OwnerAccount"` + ActiveScalingConfigurationId string `position:"Query" name:"ActiveScalingConfigurationId"` + MinSize requests.Integer `position:"Query" name:"MinSize"` + OwnerId requests.Integer `position:"Query" name:"OwnerId"` + LaunchTemplateVersion string `position:"Query" name:"LaunchTemplateVersion"` + MaxSize requests.Integer `position:"Query" name:"MaxSize"` + DefaultCooldown requests.Integer `position:"Query" name:"DefaultCooldown"` + RemovalPolicy1 string `position:"Query" name:"RemovalPolicy.1"` + RemovalPolicy2 string `position:"Query" name:"RemovalPolicy.2"` +} + +// ModifyScalingGroupResponse is the response struct for api ModifyScalingGroup +type ModifyScalingGroupResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` +} + +// CreateModifyScalingGroupRequest creates a request to invoke ModifyScalingGroup API +func CreateModifyScalingGroupRequest() (request *ModifyScalingGroupRequest) { + request = &ModifyScalingGroupRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Ess", "2014-08-28", "ModifyScalingGroup", "ess", "openAPI") + return +} + +// CreateModifyScalingGroupResponse creates a response to parse from ModifyScalingGroup response +func CreateModifyScalingGroupResponse() (response *ModifyScalingGroupResponse) { + response = &ModifyScalingGroupResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/modify_scaling_rule.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/modify_scaling_rule.go new file mode 100644 index 000000000000..1c071a5ae0b6 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/modify_scaling_rule.go @@ -0,0 +1,112 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ess + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" +) + +// ModifyScalingRule invokes the ess.ModifyScalingRule API synchronously +// api document: https://help.aliyun.com/api/ess/modifyscalingrule.html +func (client *Client) ModifyScalingRule(request *ModifyScalingRuleRequest) (response *ModifyScalingRuleResponse, err error) { + response = CreateModifyScalingRuleResponse() + err = client.DoAction(request, response) + return +} + +// ModifyScalingRuleWithChan invokes the ess.ModifyScalingRule API asynchronously +// api document: https://help.aliyun.com/api/ess/modifyscalingrule.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) ModifyScalingRuleWithChan(request *ModifyScalingRuleRequest) (<-chan *ModifyScalingRuleResponse, <-chan error) { + responseChan := make(chan *ModifyScalingRuleResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.ModifyScalingRule(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// ModifyScalingRuleWithCallback invokes the ess.ModifyScalingRule API asynchronously +// api document: https://help.aliyun.com/api/ess/modifyscalingrule.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) ModifyScalingRuleWithCallback(request *ModifyScalingRuleRequest, callback func(response *ModifyScalingRuleResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *ModifyScalingRuleResponse + var err error + defer close(result) + response, err = client.ModifyScalingRule(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// ModifyScalingRuleRequest is the request struct for api ModifyScalingRule +type ModifyScalingRuleRequest struct { + *requests.RpcRequest + ScalingRuleName string `position:"Query" name:"ScalingRuleName"` + ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"` + ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` + AdjustmentValue requests.Integer `position:"Query" name:"AdjustmentValue"` + OwnerAccount string `position:"Query" name:"OwnerAccount"` + Cooldown requests.Integer `position:"Query" name:"Cooldown"` + AdjustmentType string `position:"Query" name:"AdjustmentType"` + OwnerId requests.Integer `position:"Query" name:"OwnerId"` + ScalingRuleId string `position:"Query" name:"ScalingRuleId"` +} + +// ModifyScalingRuleResponse is the response struct for api ModifyScalingRule +type ModifyScalingRuleResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` +} + +// CreateModifyScalingRuleRequest creates a request to invoke ModifyScalingRule API +func CreateModifyScalingRuleRequest() (request *ModifyScalingRuleRequest) { + request = &ModifyScalingRuleRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Ess", "2014-08-28", "ModifyScalingRule", "ess", "openAPI") + return +} + +// CreateModifyScalingRuleResponse creates a response to parse from ModifyScalingRule response +func CreateModifyScalingRuleResponse() (response *ModifyScalingRuleResponse) { + response = &ModifyScalingRuleResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/remove_instances.go b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/remove_instances.go new file mode 100755 index 000000000000..8fa896fe4c9b --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess/remove_instances.go @@ -0,0 +1,129 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ess + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/responses" +) + +// RemoveInstances invokes the ess.RemoveInstances API synchronously +// api document: https://help.aliyun.com/api/ess/removeinstances.html +func (client *Client) RemoveInstances(request *RemoveInstancesRequest) (response *RemoveInstancesResponse, err error) { + response = CreateRemoveInstancesResponse() + err = client.DoAction(request, response) + return +} + +// RemoveInstancesWithChan invokes the ess.RemoveInstances API asynchronously +// api document: https://help.aliyun.com/api/ess/removeinstances.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) RemoveInstancesWithChan(request *RemoveInstancesRequest) (<-chan *RemoveInstancesResponse, <-chan error) { + responseChan := make(chan *RemoveInstancesResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.RemoveInstances(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// RemoveInstancesWithCallback invokes the ess.RemoveInstances API asynchronously +// api document: https://help.aliyun.com/api/ess/removeinstances.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) RemoveInstancesWithCallback(request *RemoveInstancesRequest, callback func(response *RemoveInstancesResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *RemoveInstancesResponse + var err error + defer close(result) + response, err = client.RemoveInstances(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// RemoveInstancesRequest is the request struct for api RemoveInstances +type RemoveInstancesRequest struct { + *requests.RpcRequest + InstanceId10 string `position:"Query" name:"InstanceId.10"` + ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"` + InstanceId12 string `position:"Query" name:"InstanceId.12"` + InstanceId11 string `position:"Query" name:"InstanceId.11"` + ScalingGroupId string `position:"Query" name:"ScalingGroupId"` + InstanceId20 string `position:"Query" name:"InstanceId.20"` + InstanceId1 string `position:"Query" name:"InstanceId.1"` + InstanceId3 string `position:"Query" name:"InstanceId.3"` + ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` + InstanceId2 string `position:"Query" name:"InstanceId.2"` + InstanceId5 string `position:"Query" name:"InstanceId.5"` + InstanceId4 string `position:"Query" name:"InstanceId.4"` + OwnerAccount string `position:"Query" name:"OwnerAccount"` + InstanceId7 string `position:"Query" name:"InstanceId.7"` + InstanceId6 string `position:"Query" name:"InstanceId.6"` + InstanceId9 string `position:"Query" name:"InstanceId.9"` + InstanceId8 string `position:"Query" name:"InstanceId.8"` + OwnerId requests.Integer `position:"Query" name:"OwnerId"` + InstanceId18 string `position:"Query" name:"InstanceId.18"` + InstanceId17 string `position:"Query" name:"InstanceId.17"` + InstanceId19 string `position:"Query" name:"InstanceId.19"` + InstanceId14 string `position:"Query" name:"InstanceId.14"` + InstanceId13 string `position:"Query" name:"InstanceId.13"` + InstanceId16 string `position:"Query" name:"InstanceId.16"` + InstanceId15 string `position:"Query" name:"InstanceId.15"` +} + +// RemoveInstancesResponse is the response struct for api RemoveInstances +type RemoveInstancesResponse struct { + *responses.BaseResponse + ScalingActivityId string `json:"ScalingActivityId" xml:"ScalingActivityId"` + RequestId string `json:"RequestId" xml:"RequestId"` +} + +// CreateRemoveInstancesRequest creates a request to invoke RemoveInstances API +func CreateRemoveInstancesRequest() (request *RemoveInstancesRequest) { + request = &RemoveInstancesRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Ess", "2014-08-28", "RemoveInstances", "ess", "openAPI") + return +} + +// CreateRemoveInstancesResponse creates a response to parse from RemoveInstances response +func CreateRemoveInstancesResponse() (response *RemoveInstancesResponse) { + response = &RemoveInstancesResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alicloud_auto_scaling.go b/cluster-autoscaler/cloudprovider/alicloud/alicloud_auto_scaling.go new file mode 100644 index 000000000000..e0a4fb4f83d3 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alicloud_auto_scaling.go @@ -0,0 +1,235 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package alicloud + +import ( + "fmt" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/sdk/requests" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess" + "k8s.io/klog" + "time" +) + +const ( + refreshClientInterval = 60 * time.Minute + acsAutogenIncreaseRules = "acs-autogen-increase-rules" + defaultAdjustmentType = "TotalCapacity" +) + +//autoScaling define the interface usage in alibaba-cloud-sdk-go. +type autoScaling interface { + DescribeScalingGroups(req *ess.DescribeScalingGroupsRequest) (*ess.DescribeScalingGroupsResponse, error) + DescribeScalingConfigurations(req *ess.DescribeScalingConfigurationsRequest) (*ess.DescribeScalingConfigurationsResponse, error) + DescribeScalingRules(req *ess.DescribeScalingRulesRequest) (*ess.DescribeScalingRulesResponse, error) + DescribeScalingInstances(req *ess.DescribeScalingInstancesRequest) (*ess.DescribeScalingInstancesResponse, error) + CreateScalingRule(req *ess.CreateScalingRuleRequest) (*ess.CreateScalingRuleResponse, error) + ModifyScalingGroup(req *ess.ModifyScalingGroupRequest) (*ess.ModifyScalingGroupResponse, error) + RemoveInstances(req *ess.RemoveInstancesRequest) (*ess.RemoveInstancesResponse, error) + ExecuteScalingRule(req *ess.ExecuteScalingRuleRequest) (*ess.ExecuteScalingRuleResponse, error) + ModifyScalingRule(req *ess.ModifyScalingRuleRequest) (*ess.ModifyScalingRuleResponse, error) + DeleteScalingRule(req *ess.DeleteScalingRuleRequest) (*ess.DeleteScalingRuleResponse, error) +} + +func newAutoScalingWrapper(cfg *cloudConfig) (*autoScalingWrapper, error) { + if cfg.isValid() == false { + //Never reach here. + return nil, fmt.Errorf("your cloud config is not valid") + } + asw := &autoScalingWrapper{ + cfg: cfg, + } + if cfg.STSEnabled == true { + go func(asw *autoScalingWrapper, cfg *cloudConfig) { + timer := time.NewTicker(refreshClientInterval) + defer timer.Stop() + for { + select { + case <-timer.C: + client, err := getEssClient(cfg) + if err == nil { + asw.autoScaling = client + } + } + } + }(asw, cfg) + } + client, err := getEssClient(cfg) + if err == nil { + asw.autoScaling = client + } + return asw, err +} + +func getEssClient(cfg *cloudConfig) (client *ess.Client, err error) { + region := cfg.getRegion() + if cfg.STSEnabled == true { + auth, err := cfg.getSTSToken() + if err != nil { + klog.Errorf("Failed to get sts token from metadata,Because of %s", err.Error()) + return nil, err + } + client, err = ess.NewClientWithStsToken(region, auth.AccessKeyId, auth.AccessKeySecret, auth.SecurityToken) + if err != nil { + klog.Errorf("Failed to create client with sts in metadata because of %s", err.Error()) + } + } else { + client, err = ess.NewClientWithAccessKey(region, cfg.AccessKeyID, cfg.AccessKeySecret) + if err != nil { + klog.Errorf("Failed to create ess client with AccessKeyId and AccessKeySecret,Because of %s", err.Error()) + } + } + return +} + +//autoScalingWrapper will serve as the +type autoScalingWrapper struct { + autoScaling + cfg *cloudConfig +} + +func (m autoScalingWrapper) getInstanceTypeByConfiguration(configID string, asgId string) (string, error) { + params := ess.CreateDescribeScalingConfigurationsRequest() + params.ScalingConfigurationId1 = configID + params.ScalingGroupId = asgId + + resp, err := m.DescribeScalingConfigurations(params) + if err != nil { + klog.Errorf("failed to get ScalingConfiguration info request for %s,because of %s", configID, err.Error()) + return "", err + } + + configurations := resp.ScalingConfigurations.ScalingConfiguration + + if len(configurations) < 1 { + return "", fmt.Errorf("unable to get first ScalingConfiguration for %s", configID) + } + if len(configurations) > 1 { + klog.Warningf("more than one ScalingConfiguration found for config(%q) and ASG(%q)", configID, asgId) + } + + return configurations[0].InstanceType, nil +} + +func (m autoScalingWrapper) getScalingGroupByID(groupID string) (*ess.ScalingGroup, error) { + params := ess.CreateDescribeScalingGroupsRequest() + params.ScalingGroupId1 = groupID + + resp, err := m.DescribeScalingGroups(params) + if err != nil { + return nil, err + } + groups := resp.ScalingGroups.ScalingGroup + if len(groups) < 1 { + return nil, fmt.Errorf("unable to get first ScalingGroup for %s", groupID) + } + if len(groups) > 1 { + klog.Warningf("more than one ScalingGroup for %s, use first one", groupID) + } + return &groups[0], nil +} + +func (m autoScalingWrapper) getScalingGroupByName(groupName string) (*ess.ScalingGroup, error) { + params := ess.CreateDescribeScalingGroupsRequest() + params.ScalingGroupName = groupName + + resp, err := m.DescribeScalingGroups(params) + if err != nil { + return nil, err + } + groups := resp.ScalingGroups.ScalingGroup + if len(groups) < 1 { + return nil, fmt.Errorf("unable to get first ScalingGroup for %q", groupName) + } + if len(groups) > 1 { + klog.Warningf("more than one ScalingGroup for %q, use first one", groupName) + } + return &groups[0], nil +} + +func (m autoScalingWrapper) getScalingInstancesByGroup(asgId string) ([]ess.ScalingInstance, error) { + params := ess.CreateDescribeScalingInstancesRequest() + params.ScalingGroupId = asgId + resp, err := m.DescribeScalingInstances(params) + if err != nil { + klog.Errorf("falied to request scaling instances for %s,Because of %s", asgId, err.Error()) + return nil, err + } + return resp.ScalingInstances.ScalingInstance, nil +} + +func (m autoScalingWrapper) setCapcityInstanceSize(groupId string, capcityInstanceSize int64) error { + var ( + ruleId string + scalingRuleAri string + ) + req := ess.CreateDescribeScalingRulesRequest() + req.RegionId = m.cfg.getRegion() + req.ScalingGroupId = groupId + req.ScalingRuleName1 = acsAutogenIncreaseRules + resp, err := m.DescribeScalingRules(req) + if err != nil { + //need to handle + return err + } + + defer func() { + deleteReq := ess.CreateDeleteScalingRuleRequest() + deleteReq.ScalingRuleId = ruleId + deleteReq.RegionId = m.cfg.getRegion() + _, err := m.DeleteScalingRule(deleteReq) + if err != nil { + klog.Warningf("failed to clean scaling group rules,Because of %s", err.Error()) + } + }() + + if len(resp.ScalingRules.ScalingRule) == 0 { + //found the specific rules + createReq := ess.CreateCreateScalingRuleRequest() + createReq.RegionId = m.cfg.getRegion() + createReq.ScalingGroupId = groupId + createReq.AdjustmentType = defaultAdjustmentType + createReq.AdjustmentValue = requests.NewInteger64(capcityInstanceSize) + resp, err := m.CreateScalingRule(createReq) + if err != nil { + return err + } + ruleId = resp.ScalingRuleId + scalingRuleAri = resp.ScalingRuleAri + } else { + ruleId = resp.ScalingRules.ScalingRule[0].ScalingRuleId + scalingRuleAri = resp.ScalingRules.ScalingRule[0].ScalingRuleAri + } + + modifyReq := ess.CreateModifyScalingRuleRequest() + modifyReq.RegionId = m.cfg.getRegion() + modifyReq.ScalingRuleId = ruleId + modifyReq.AdjustmentType = defaultAdjustmentType + modifyReq.AdjustmentValue = requests.NewInteger64(capcityInstanceSize) + _, err = m.ModifyScalingRule(modifyReq) + if err != nil { + return err + } + executeReq := ess.CreateExecuteScalingRuleRequest() + executeReq.RegionId = m.cfg.getRegion() + executeReq.ScalingRuleAri = scalingRuleAri + + _, err = m.ExecuteScalingRule(executeReq) + if err != nil { + return err + } + return nil +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alicloud_auto_scaling_group.go b/cluster-autoscaler/cloudprovider/alicloud/alicloud_auto_scaling_group.go new file mode 100644 index 000000000000..cf3b20581f92 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alicloud_auto_scaling_group.go @@ -0,0 +1,213 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package alicloud + +import ( + "fmt" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/klog" + schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" +) + +// Asg implements NodeGroup interface. +type Asg struct { + manager *AliCloudManager + minSize int + maxSize int + regionId string + id string +} + +// MaxSize returns maximum size of the node group. +func (asg *Asg) MaxSize() int { + return asg.maxSize +} + +// MinSize returns minimum size of the node group. +func (asg *Asg) MinSize() int { + return asg.minSize +} + +// TargetSize returns the current TARGET size of the node group. It is possible that the +// number is different from the number of nodes registered in Kubernetes. +func (asg *Asg) TargetSize() (int, error) { + size, err := asg.manager.GetAsgSize(asg) + return int(size), err +} + +// IncreaseSize increases Asg size +func (asg *Asg) IncreaseSize(delta int) error { + klog.Infof("increase ASG:%s with %d nodes", asg.Id(), delta) + if delta <= 0 { + return fmt.Errorf("size increase must be positive") + } + size, err := asg.manager.GetAsgSize(asg) + if err != nil { + klog.Errorf("failed to get ASG size because of %s", err.Error()) + return err + } + if int(size)+delta > asg.MaxSize() { + return fmt.Errorf("size increase is too large - desired:%d max:%d", int(size)+delta, asg.MaxSize()) + } + return asg.manager.SetAsgSize(asg, size+int64(delta)) +} + +// DecreaseTargetSize decreases the target size of the node group. This function +// doesn't permit to delete any existing node and can be used only to reduce the +// request for new nodes that have not been yet fulfilled. Delta should be negative. +// It is assumed that cloud provider will not delete the existing nodes if the size +// when there is an option to just decrease the target. +func (asg *Asg) DecreaseTargetSize(delta int) error { + klog.V(4).Infof("Aliyun: DecreaseTargetSize() with args: %v", delta) + if delta >= 0 { + return fmt.Errorf("size decrease size must be negative") + } + size, err := asg.manager.GetAsgSize(asg) + if err != nil { + klog.Errorf("failed to get ASG size because of %s", err.Error()) + return err + } + nodes, err := asg.manager.GetAsgNodes(asg) + if err != nil { + klog.Errorf("failed to get ASG nodes because of %s", err.Error()) + return err + } + if int(size)+delta < len(nodes) { + return fmt.Errorf("attempt to delete existing nodes targetSize:%d delta:%d existingNodes: %d", + size, delta, len(nodes)) + } + return asg.manager.SetAsgSize(asg, size+int64(delta)) +} + +// Belongs returns true if the given node belongs to the NodeGroup. +func (asg *Asg) Belongs(node *apiv1.Node) (bool, error) { + instanceId, err := ecsInstanceIdFromProviderId(node.Spec.ProviderID) + if err != nil { + return false, err + } + targetAsg, err := asg.manager.GetAsgForInstance(instanceId) + if err != nil { + return false, err + } + if targetAsg == nil { + return false, fmt.Errorf("%s doesn't belong to a known Asg", node.Name) + } + if targetAsg.Id() != asg.Id() { + return false, nil + } + return true, nil +} + +// DeleteNodes deletes the nodes from the group. +func (asg *Asg) DeleteNodes(nodes []*apiv1.Node) error { + size, err := asg.manager.GetAsgSize(asg) + if err != nil { + klog.Errorf("failed to get ASG size because of %s", err.Error()) + return err + } + if int(size) <= asg.MinSize() { + return fmt.Errorf("min size reached, nodes will not be deleted") + } + nodeIds := make([]string, 0, len(nodes)) + for _, node := range nodes { + belongs, err := asg.Belongs(node) + if err != nil { + klog.Errorf("failed to check whether node:%s is belong to asg:%s", node.GetName(), asg.Id()) + return err + } + if belongs != true { + return fmt.Errorf("%s belongs to a different asg than %s", node.Name, asg.Id()) + } + instanceId, err := ecsInstanceIdFromProviderId(node.Spec.ProviderID) + if err != nil { + klog.Errorf("failed to find instanceId from providerId,because of %s", err.Error()) + return err + } + nodeIds = append(nodeIds, instanceId) + } + return asg.manager.DeleteInstances(nodeIds) +} + +// Id returns asg id. +func (asg *Asg) Id() string { + return asg.id +} + +// RegionId returns regionId of asg +func (asg *Asg) RegionId() string { + return asg.regionId +} + +// Debug returns a debug string for the Asg. +func (asg *Asg) Debug() string { + return fmt.Sprintf("%s (%d:%d)", asg.Id(), asg.MinSize(), asg.MaxSize()) +} + +// Nodes returns a list of all nodes that belong to this node group. +func (asg *Asg) Nodes() ([]cloudprovider.Instance, error) { + instanceNames, err := asg.manager.GetAsgNodes(asg) + if err != nil { + return nil, err + } + instances := make([]cloudprovider.Instance, 0, len(instanceNames)) + for _, instanceName := range instanceNames { + instances = append(instances, cloudprovider.Instance{Id: instanceName}) + } + return instances, nil +} + +// TemplateNodeInfo returns a node template for this node group. +func (asg *Asg) TemplateNodeInfo() (*schedulercache.NodeInfo, error) { + template, err := asg.manager.getAsgTemplate(asg.id) + if err != nil { + return nil, err + } + + node, err := asg.manager.buildNodeFromTemplate(asg, template) + if err != nil { + klog.Errorf("failed to build instanceType:%v from template in ASG:%s,because of %s", template.InstanceType, asg.Id(), err.Error()) + return nil, err + } + + nodeInfo := schedulercache.NewNodeInfo(cloudprovider.BuildKubeProxy(asg.id)) + nodeInfo.SetNode(node) + return nodeInfo, nil +} + +// Exist checks if the node group really exists on the cloud provider side. Allows to tell the +// theoretical node group from the real one. +func (asg *Asg) Exist() bool { + return true +} + +// Create creates the node group on the cloud provider side. +func (asg *Asg) Create() (cloudprovider.NodeGroup, error) { + return nil, cloudprovider.ErrNotImplemented +} + +// Autoprovisioned returns true if the node group is autoprovisioned. +func (asg *Asg) Autoprovisioned() bool { + return false +} + +// Delete deletes the node group on the cloud provider side. +// This will be executed only for autoprovisioned node groups, once their size drops to 0. +func (asg *Asg) Delete() error { + return cloudprovider.ErrNotImplemented +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alicloud_auto_scaling_groups.go b/cluster-autoscaler/cloudprovider/alicloud/alicloud_auto_scaling_groups.go new file mode 100644 index 000000000000..ec32f446f5c2 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alicloud_auto_scaling_groups.go @@ -0,0 +1,103 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package alicloud + +import ( + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" +) + +type autoScalingGroups struct { + registeredAsgs []*asgInformation + instanceToAsg map[string]*Asg + cacheMutex sync.Mutex + instancesNotInManagedAsg map[string]struct{} + service *autoScalingWrapper +} + +func newAutoScalingGroups(service *autoScalingWrapper) *autoScalingGroups { + registry := &autoScalingGroups{ + registeredAsgs: make([]*asgInformation, 0), + service: service, + instanceToAsg: make(map[string]*Asg), + instancesNotInManagedAsg: make(map[string]struct{}), + } + + go wait.Forever(func() { + registry.cacheMutex.Lock() + defer registry.cacheMutex.Unlock() + if err := registry.regenerateCache(); err != nil { + klog.Errorf("failed to do regenerating ASG cache,because of %s", err.Error()) + } + }, time.Hour) + + return registry +} + +// Register registers asg in AliCloud Manager. +func (m *autoScalingGroups) Register(asg *Asg) { + m.cacheMutex.Lock() + defer m.cacheMutex.Unlock() + + m.registeredAsgs = append(m.registeredAsgs, &asgInformation{ + config: asg, + }) +} + +// FindForInstance returns AsgConfig of the given Instance +func (m *autoScalingGroups) FindForInstance(instanceId string) (*Asg, error) { + m.cacheMutex.Lock() + defer m.cacheMutex.Unlock() + if config, found := m.instanceToAsg[instanceId]; found { + return config, nil + } + if _, found := m.instancesNotInManagedAsg[instanceId]; found { + // The instance is already known to not belong to any configured ASG + // Skip regenerateCache so that we won't unnecessarily call DescribeAutoScalingGroups + // See https://github.com/kubernetes/contrib/issues/2541 + return nil, nil + } + if err := m.regenerateCache(); err != nil { + return nil, err + } + if config, found := m.instanceToAsg[instanceId]; found { + return config, nil + } + // instance does not belong to any configured ASG + m.instancesNotInManagedAsg[instanceId] = struct{}{} + return nil, nil +} + +func (m *autoScalingGroups) regenerateCache() error { + newCache := make(map[string]*Asg) + + for _, asg := range m.registeredAsgs { + instances, err := m.service.getScalingInstancesByGroup(asg.config.id) + if err != nil { + return err + } + for _, instance := range instances { + newCache[instance.InstanceId] = asg.config + } + } + + m.instanceToAsg = newCache + return nil +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alicloud_cloud_config.go b/cluster-autoscaler/cloudprovider/alicloud/alicloud_cloud_config.go new file mode 100644 index 000000000000..d8297acf46b4 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alicloud_cloud_config.go @@ -0,0 +1,99 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package alicloud + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/metadata" + "k8s.io/klog" + "os" +) + +const ( + accessKeyId = "ACCESS_KEY_ID" + accessKeyScret = "ACCESS_KEY_SECRET" + regionId = "REGION_ID" +) + +type cloudConfig struct { + RegionId string + AccessKeyID string + AccessKeySecret string + STSEnabled bool +} + +func (cc *cloudConfig) isValid() bool { + if cc.AccessKeyID == "" { + cc.AccessKeyID = os.Getenv(accessKeyId) + } + + if cc.AccessKeySecret == "" { + cc.AccessKeySecret = os.Getenv(accessKeyScret) + } + + if cc.RegionId == "" { + cc.RegionId = os.Getenv(regionId) + } + + if cc.RegionId == "" || cc.AccessKeyID == "" || cc.AccessKeySecret == "" { + klog.V(5).Infof("Failed to get AccessKeyId:%s,AccessKeySecret:%s,RegionId:%s from cloudConfig and Env\n", cc.AccessKeyID, cc.AccessKeySecret, cc.RegionId) + klog.V(5).Infof("Try to use sts token in metadata instead.\n") + if cc.validateSTSToken() == true && cc.getRegion() != "" { + //if CA is working on ECS with valid role name, use sts token instead. + cc.STSEnabled = true + return true + } + } else { + cc.STSEnabled = false + return true + } + return false +} + +func (cc *cloudConfig) validateSTSToken() bool { + m := metadata.NewMetaData(nil) + r, err := m.RoleName() + if err != nil || r == "" { + klog.Warningf("The role name %s is not valid and error is %v", r, err) + return false + } + return true +} + +func (cc *cloudConfig) getSTSToken() (metadata.RoleAuth, error) { + m := metadata.NewMetaData(nil) + r, err := m.RoleName() + if err != nil { + return metadata.RoleAuth{}, err + } + auth, err := m.RamRoleToken(r) + if err != nil { + return metadata.RoleAuth{}, err + } + return auth, nil +} + +func (cc *cloudConfig) getRegion() string { + if cc.RegionId != "" { + return cc.RegionId + } + m := metadata.NewMetaData(nil) + r, err := m.Region() + if err != nil { + klog.Errorf("Failed to get RegionId from metadata.Because of %s\n", err.Error()) + } + return r +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alicloud_cloud_provider.go b/cluster-autoscaler/cloudprovider/alicloud/alicloud_cloud_provider.go new file mode 100644 index 000000000000..d3dcdbc838d1 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alicloud_cloud_provider.go @@ -0,0 +1,214 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package alicloud + +import ( + "fmt" + "strings" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" + "k8s.io/autoscaler/cluster-autoscaler/utils/errors" + "k8s.io/klog" + "os" +) + +const ( + // ProviderName is the cloud provider name for alicloud + ProviderName = "alicloud" +) + +type aliCloudProvider struct { + manager *AliCloudManager + asgs []*Asg + resourceLimiter *cloudprovider.ResourceLimiter +} + +// BuildAliCloudProvider builds CloudProvider implementation for AliCloud. +func BuildAliCloudProvider(manager *AliCloudManager, discoveryOpts cloudprovider.NodeGroupDiscoveryOptions, resourceLimiter *cloudprovider.ResourceLimiter) (cloudprovider.CloudProvider, error) { + // TODO add discoveryOpts parameters check. + if discoveryOpts.StaticDiscoverySpecified() { + return buildStaticallyDiscoveringProvider(manager, discoveryOpts.NodeGroupSpecs, resourceLimiter) + } + if discoveryOpts.AutoDiscoverySpecified() { + return nil, fmt.Errorf("only support static discovery scaling group in alicloud for now") + } + return nil, fmt.Errorf("failed to build alicloud provider: node group specs must be specified") +} + +func buildStaticallyDiscoveringProvider(manager *AliCloudManager, specs []string, resourceLimiter *cloudprovider.ResourceLimiter) (*aliCloudProvider, error) { + acp := &aliCloudProvider{ + manager: manager, + asgs: make([]*Asg, 0), + resourceLimiter: resourceLimiter, + } + for _, spec := range specs { + if err := acp.addNodeGroup(spec); err != nil { + klog.Warningf("failed to add node group to alicloud provider with spec: %s", spec) + return nil, err + } + } + return acp, nil +} + +// add node group defined in string spec. Format: +// minNodes:maxNodes:asgName +func (ali *aliCloudProvider) addNodeGroup(spec string) error { + asg, err := buildAsgFromSpec(spec, ali.manager) + if err != nil { + klog.Errorf("failed to build ASG from spec,because of %s", err.Error()) + return err + } + ali.addAsg(asg) + return nil +} + +// add and register an asg to this cloud provider +func (ali *aliCloudProvider) addAsg(asg *Asg) { + ali.asgs = append(ali.asgs, asg) + ali.manager.RegisterAsg(asg) +} + +func (ali *aliCloudProvider) Name() string { + return ProviderName +} + +func (ali *aliCloudProvider) NodeGroups() []cloudprovider.NodeGroup { + result := make([]cloudprovider.NodeGroup, 0, len(ali.asgs)) + for _, asg := range ali.asgs { + result = append(result, asg) + } + return result +} + +// NodeGroupForNode returns the node group for the given node. +func (ali *aliCloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudprovider.NodeGroup, error) { + instanceId, err := ecsInstanceIdFromProviderId(node.Spec.ProviderID) + if err != nil { + klog.Errorf("failed to get instance Id from provider Id:%s,because of %s", node.Spec.ProviderID, err.Error()) + return nil, err + } + return ali.manager.GetAsgForInstance(instanceId) +} + +// Pricing returns pricing model for this cloud provider or error if not available. +func (ali *aliCloudProvider) Pricing() (cloudprovider.PricingModel, errors.AutoscalerError) { + return nil, cloudprovider.ErrNotImplemented +} + +// GetAvailableMachineTypes get all machine types that can be requested from the cloud provider. +func (ali *aliCloudProvider) GetAvailableMachineTypes() ([]string, error) { + return []string{}, nil +} + +// NewNodeGroup builds a theoretical node group based on the node definition provided. The node group is not automatically +// created on the cloud provider side. The node group is not returned by NodeGroups() until it is created. +func (ali *aliCloudProvider) NewNodeGroup(machineType string, labels map[string]string, systemLabels map[string]string, taints []apiv1.Taint, extraResources map[string]resource.Quantity) (cloudprovider.NodeGroup, error) { + return nil, cloudprovider.ErrNotImplemented +} + +// GetResourceLimiter returns struct containing limits (max, min) for resources (cores, memory etc.). +func (ali *aliCloudProvider) GetResourceLimiter() (*cloudprovider.ResourceLimiter, error) { + return ali.resourceLimiter, nil +} + +// Refresh is called before every main loop and can be used to dynamically update cloud provider state. +// In particular the list of node groups returned by NodeGroups can change as a result of CloudProvider.Refresh(). +func (ali *aliCloudProvider) Refresh() error { + return nil +} + +// Cleanup stops the go routine that is handling the current view of the ASGs in the form of a cache +func (ali *aliCloudProvider) Cleanup() error { + return nil +} + +// GetInstanceID gets the instance ID for the specified node. +func (ali *aliCloudProvider) GetInstanceID(node *apiv1.Node) string { + return node.Spec.ProviderID +} + +// AliRef contains a reference to ECS instance or . +type AliRef struct { + ID string + Region string +} + +// ECSInstanceIdFromProviderId must be in format: `REGION.INSTANCE_ID` +func ecsInstanceIdFromProviderId(id string) (string, error) { + parts := strings.Split(id, ".") + if len(parts) < 2 { + return "", fmt.Errorf("AliCloud: unexpected ProviderID format, providerID=%s", id) + } + return parts[1], nil +} + +func buildAsgFromSpec(value string, manager *AliCloudManager) (*Asg, error) { + spec, err := dynamic.SpecFromString(value, true) + + if err != nil { + return nil, fmt.Errorf("failed to parse node group spec: %v", err) + } + + // check auto scaling group is exists or not + _, err = manager.aService.getScalingGroupByID(spec.Name) + if err != nil { + klog.Errorf("your scaling group: %s does not exist", spec.Name) + return nil, err + } + + asg := buildAsg(manager, spec.MinSize, spec.MaxSize, spec.Name, manager.cfg.getRegion()) + + return asg, nil +} + +func buildAsg(manager *AliCloudManager, minSize int, maxSize int, id string, regionId string) *Asg { + return &Asg{ + manager: manager, + minSize: minSize, + maxSize: maxSize, + regionId: regionId, + id: id, + } +} + +// BuildAlicloud returns alicloud provider +func BuildAlicloud(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + var aliManager *AliCloudManager + var aliError error + if opts.CloudConfig != "" { + config, fileErr := os.Open(opts.CloudConfig) + if fileErr != nil { + klog.Fatalf("Couldn't open cloud provider configuration %s: %#v", opts.CloudConfig, fileErr) + } + defer config.Close() + aliManager, aliError = CreateAliCloudManager(config) + } else { + aliManager, aliError = CreateAliCloudManager(nil) + } + if aliError != nil { + klog.Fatalf("Failed to create Alicloud Manager: %v", aliError) + } + cloudProvider, err := BuildAliCloudProvider(aliManager, do, rl) + if err != nil { + klog.Fatalf("Failed to create Alicloud cloud provider: %v", err) + } + return cloudProvider +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alicloud_cloud_provider_test.go b/cluster-autoscaler/cloudprovider/alicloud/alicloud_cloud_provider_test.go new file mode 100644 index 000000000000..681ec600360b --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alicloud_cloud_provider_test.go @@ -0,0 +1,32 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package alicloud + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestEcsInstanceIdFromProviderId(t *testing.T) { + instanceName := "cn-hangzhou.instanceId" + instanceId, err := ecsInstanceIdFromProviderId(instanceName) + if err != nil { + t.Error(err) + return + } + assert.Equal(t, instanceId, "instanceId") +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alicloud_instance_types.go b/cluster-autoscaler/cloudprovider/alicloud/alicloud_instance_types.go new file mode 100644 index 000000000000..72f3362a0185 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alicloud_instance_types.go @@ -0,0 +1,142 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package alicloud + +import ( + "fmt" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ecs" + "k8s.io/klog" + "time" +) + +type ecsInstance interface { + DescribeInstanceTypes(req *ecs.DescribeInstanceTypesRequest) (*ecs.DescribeInstanceTypesResponse, error) +} + +type instanceType struct { + instanceTypeID string + vcpu int64 + memoryInBytes int64 + gpu int64 +} + +type instanceTypeModel struct { + instanceType + // TODO add price model . +} + +// instanceWrapper will provide functions about +// instance types,price model and so on. +type instanceWrapper struct { + ecsInstance + InstanceTypeCache map[string]*instanceTypeModel +} + +func (iw *instanceWrapper) getInstanceTypeById(typeId string) (*instanceType, error) { + if instanceTypeModel := iw.FindInstanceType(typeId); instanceTypeModel != nil { + return &instanceTypeModel.instanceType, nil + } + err := iw.RefreshCache() + if err != nil { + klog.Errorf("failed to refresh instance type cache,because of %s", err.Error()) + return nil, err + } + if instanceTypeModel := iw.FindInstanceType(typeId); instanceTypeModel != nil { + return &instanceTypeModel.instanceType, nil + } + return nil, fmt.Errorf("failed to find the specific instance type by Id: %s", typeId) +} + +func (iw *instanceWrapper) FindInstanceType(typeId string) *instanceTypeModel { + if iw.InstanceTypeCache == nil || iw.InstanceTypeCache[typeId] == nil { + return nil + } + return iw.InstanceTypeCache[typeId] +} + +func (iw *instanceWrapper) RefreshCache() error { + req := ecs.CreateDescribeInstanceTypesRequest() + resp, err := iw.DescribeInstanceTypes(req) + if err != nil { + return err + } + if iw.InstanceTypeCache == nil { + iw.InstanceTypeCache = make(map[string]*instanceTypeModel) + } + + types := resp.InstanceTypes.InstanceType + + for _, item := range types { + iw.InstanceTypeCache[item.InstanceTypeId] = &instanceTypeModel{ + instanceType{ + instanceTypeID: item.InstanceTypeId, + vcpu: int64(item.CpuCoreCount), + memoryInBytes: int64(item.MemorySize * 1024 * 1024 * 1024), + gpu: int64(item.GPUAmount), + }, + } + } + return nil +} + +func newInstanceWrapper(cfg *cloudConfig) (*instanceWrapper, error) { + if cfg.isValid() == false { + return nil, fmt.Errorf("your cloud config is not valid") + } + iw := &instanceWrapper{} + if cfg.STSEnabled == true { + go func(iw *instanceWrapper, cfg *cloudConfig) { + timer := time.NewTicker(refreshClientInterval) + defer timer.Stop() + for { + select { + case <-timer.C: + client, err := getEcsClient(cfg) + if err == nil { + iw.ecsInstance = client + } + } + } + }(iw, cfg) + } + client, err := getEcsClient(cfg) + if err == nil { + iw.ecsInstance = client + } + return iw, err +} + +func getEcsClient(cfg *cloudConfig) (client *ecs.Client, err error) { + region := cfg.getRegion() + if cfg.STSEnabled == true { + auth, err := cfg.getSTSToken() + if err != nil { + klog.Errorf("failed to get sts token from metadata,because of %s", err.Error()) + return nil, err + } + client, err = ecs.NewClientWithStsToken(region, auth.AccessKeyId, auth.AccessKeySecret, auth.SecurityToken) + if err != nil { + klog.Errorf("failed to create client with sts in metadata,because of %s", err.Error()) + } + } else { + client, err = ecs.NewClientWithAccessKey(region, cfg.AccessKeyID, cfg.AccessKeySecret) + if err != nil { + klog.Errorf("failed to create ecs client with AccessKeyId and AccessKeySecret,because of %s", err.Error()) + } + } + return +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alicloud_manager.go b/cluster-autoscaler/cloudprovider/alicloud/alicloud_manager.go new file mode 100644 index 000000000000..7a27ffe89cfd --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alicloud_manager.go @@ -0,0 +1,240 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package alicloud + +import ( + "errors" + "fmt" + "gopkg.in/gcfg.v1" + "io" + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud/alibaba-cloud-sdk-go/services/ess" + "k8s.io/klog" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" + "math/rand" + "time" +) + +const ( + sdkCoolDownTimeout = 200 * time.Millisecond + defaultPodAmountsLimit = 110 + //ResourceGPU GPU resource type + ResourceGPU apiv1.ResourceName = "nvidia.com/gpu" +) + +type asgInformation struct { + config *Asg + basename string +} + +// AliCloudManager handles alicloud service communication. +type AliCloudManager struct { + cfg *cloudConfig + aService *autoScalingWrapper + iService *instanceWrapper + asgs *autoScalingGroups +} + +type sgTemplate struct { + InstanceType *instanceType + Region string + Zone string +} + +// CreateAliCloudManager constructs aliCloudManager object. +func CreateAliCloudManager(configReader io.Reader) (*AliCloudManager, error) { + cfg := &cloudConfig{} + if configReader != nil { + if err := gcfg.ReadInto(cfg, configReader); err != nil { + klog.Errorf("couldn't read config: %v", err) + return nil, err + } + } + if cfg.isValid() == false { + return nil, errors.New("please check whether you have provided correct AccessKeyId,AccessKeySecret,RegionId or STS Token") + } + + asw, err := newAutoScalingWrapper(cfg) + if err != nil { + klog.Errorf("failed to create NewAutoScalingWrapper because of %s", err.Error()) + return nil, err + } + iw, err := newInstanceWrapper(cfg) + if err != nil { + klog.Errorf("failed to create NewInstanceWrapper because of %s", err.Error()) + return nil, err + } + + manager := &AliCloudManager{ + cfg: cfg, + asgs: newAutoScalingGroups(asw), + aService: asw, + iService: iw, + } + return manager, nil +} + +// RegisterAsg registers asg in AliCloud Manager. +func (m *AliCloudManager) RegisterAsg(asg *Asg) { + m.asgs.Register(asg) +} + +// GetAsgForInstance returns AsgConfig of the given Instance +func (m *AliCloudManager) GetAsgForInstance(instanceId string) (*Asg, error) { + return m.asgs.FindForInstance(instanceId) +} + +// GetAsgSize gets ASG size. +func (m *AliCloudManager) GetAsgSize(asgConfig *Asg) (int64, error) { + sg, err := m.aService.getScalingGroupByID(asgConfig.id) + if err != nil { + return -1, fmt.Errorf("failed to describe ASG %s,Because of %s", asgConfig.id, err.Error()) + } + return int64(sg.ActiveCapacity + sg.PendingCapacity), nil +} + +// SetAsgSize sets ASG size. +func (m *AliCloudManager) SetAsgSize(asg *Asg, size int64) error { + return m.aService.setCapcityInstanceSize(asg.id, size) +} + +// DeleteInstances deletes the given instances. All instances must be controlled by the same ASG. +func (m *AliCloudManager) DeleteInstances(instanceIds []string) error { + klog.Infof("start to remove Instances from ASG %v", instanceIds) + if len(instanceIds) == 0 { + klog.Warningf("you don't provide any instanceIds to remove") + return nil + } + // Check whether instances are in the same group + // TODO: remove or provide more meaningful check method. + commonAsg, err := m.asgs.FindForInstance(instanceIds[0]) + if err != nil { + klog.Errorf("failed to find instance:%s in ASG", instanceIds[0]) + return err + } + for _, instanceId := range instanceIds { + asg, err := m.asgs.FindForInstance(instanceId) + if err != nil { + klog.Errorf("failed to find instanceId:%s from ASG and exit", instanceId) + return err + } + if asg != commonAsg { + return fmt.Errorf("cannot delete instances which doesn't belong to the same ASG") + } + } + // Remove instance from ASG in loop + for _, instanceId := range instanceIds { + req := ess.CreateRemoveInstancesRequest() + req.ScalingGroupId = commonAsg.id + req.InstanceId1 = instanceId + + resp, err := m.aService.RemoveInstances(req) + if err != nil { + fmt.Errorf("failed to remove instance from scaling group %s,because of %s", commonAsg.id, err.Error()) + continue + } + klog.Infof("remove instances successfully with response: %s", resp.GetHttpContentString()) + // prevent from triggering api flow control + time.Sleep(sdkCoolDownTimeout) + } + return nil +} + +// GetAsgNodes returns Asg nodes. +func (m *AliCloudManager) GetAsgNodes(sg *Asg) ([]string, error) { + result := make([]string, 0) + instances, err := m.aService.getScalingInstancesByGroup(sg.id) + if err != nil { + return []string{}, err + } + for _, instance := range instances { + result = append(result, getNodeProviderID(instance.InstanceId, sg.RegionId())) + } + return result, nil +} + +// getNodeProviderID build provider id from ecs id and region +func getNodeProviderID(id, region string) string { + return fmt.Sprintf("%s.%s", region, id) +} + +func (m *AliCloudManager) getAsgTemplate(asgId string) (*sgTemplate, error) { + sg, err := m.aService.getScalingGroupByID(asgId) + if err != nil { + klog.Errorf("failed to get ASG by id:%s,because of %s", asgId, err.Error()) + return nil, err + } + + typeID, err := m.aService.getInstanceTypeByConfiguration(sg.ActiveScalingConfigurationId, asgId) + if err != nil { + klog.Errorf("failed to get instanceType by configuration Id:%s from ASG:%s,because of %s", sg.ActiveScalingConfigurationId, asgId, err.Error()) + return nil, err + } + instanceType, err := m.iService.getInstanceTypeById(typeID) + if err != nil { + klog.Errorf("failed to get instanceType by Id:%s,because of %s", typeID, err.Error()) + return nil, err + } + return &sgTemplate{ + InstanceType: instanceType, + Region: sg.RegionId, + }, nil +} + +func (m *AliCloudManager) buildNodeFromTemplate(sg *Asg, template *sgTemplate) (*apiv1.Node, error) { + node := apiv1.Node{} + nodeName := fmt.Sprintf("%s-asg-%d", sg.id, rand.Int63()) + + node.ObjectMeta = metav1.ObjectMeta{ + Name: nodeName, + SelfLink: fmt.Sprintf("/api/v1/nodes/%s", nodeName), + Labels: map[string]string{}, + } + + node.Status = apiv1.NodeStatus{ + Capacity: apiv1.ResourceList{}, + } + + node.Status.Capacity[apiv1.ResourcePods] = *resource.NewQuantity(defaultPodAmountsLimit, resource.DecimalSI) + node.Status.Capacity[apiv1.ResourceCPU] = *resource.NewQuantity(template.InstanceType.vcpu, resource.DecimalSI) + node.Status.Capacity[apiv1.ResourceMemory] = *resource.NewQuantity(template.InstanceType.memoryInBytes, resource.DecimalSI) + //add gpu resource support + node.Status.Capacity[ResourceGPU] = *resource.NewQuantity(template.InstanceType.gpu, resource.DecimalSI) + + node.Status.Allocatable = node.Status.Capacity + + node.Labels = cloudprovider.JoinStringMaps(node.Labels, buildGenericLabels(template, nodeName)) + + node.Status.Conditions = cloudprovider.BuildReadyConditions() + return &node, nil +} + +func buildGenericLabels(template *sgTemplate, nodeName string) map[string]string { + result := make(map[string]string) + result[kubeletapis.LabelArch] = cloudprovider.DefaultArch + result[kubeletapis.LabelOS] = cloudprovider.DefaultOS + + result[kubeletapis.LabelInstanceType] = template.InstanceType.instanceTypeID + + result[kubeletapis.LabelZoneRegion] = template.Region + result[kubeletapis.LabelZoneFailureDomain] = template.Zone + result[kubeletapis.LabelHostname] = nodeName + return result +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/alicloud_manager_test.go b/cluster-autoscaler/cloudprovider/alicloud/alicloud_manager_test.go new file mode 100644 index 000000000000..9d985531a6ad --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/alicloud_manager_test.go @@ -0,0 +1,39 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package alicloud + +import ( + "github.com/stretchr/testify/assert" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" + "testing" +) + +func TestBuildGenericLabels(t *testing.T) { + template := &sgTemplate{ + InstanceType: &instanceType{ + instanceTypeID: "gn5-4c-8g", + vcpu: 4, + memoryInBytes: 8 * 1024 * 1024 * 1024, + gpu: 1, + }, + Region: "cn-hangzhou", + Zone: "cn-hangzhou-a", + } + nodeName := "virtual-node" + labels := buildGenericLabels(template, nodeName) + assert.Equal(t, labels[kubeletapis.LabelInstanceType], template.InstanceType.instanceTypeID) +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/examples/cluster-autoscaler-standard.yaml b/cluster-autoscaler/cloudprovider/alicloud/examples/cluster-autoscaler-standard.yaml new file mode 100644 index 000000000000..ae40545dac6a --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/examples/cluster-autoscaler-standard.yaml @@ -0,0 +1,176 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler + name: cluster-autoscaler + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cluster-autoscaler + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +rules: +- apiGroups: [""] + resources: ["events","endpoints"] + verbs: ["create", "patch"] +- apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] +- apiGroups: [""] + resources: ["pods/status"] + verbs: ["update"] +- apiGroups: [""] + resources: ["endpoints"] + resourceNames: ["cluster-autoscaler"] + verbs: ["get","update"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["watch","list","get","update"] +- apiGroups: [""] + resources: ["pods","services","replicationcontrollers","persistentvolumeclaims","persistentvolumes"] + verbs: ["watch","list","get"] +- apiGroups: ["extensions"] + resources: ["replicasets","daemonsets"] + verbs: ["watch","list","get"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["watch","list"] +- apiGroups: ["apps"] + resources: ["statefulsets"] + verbs: ["watch","list","get"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["watch","list","get"] + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create"] +- apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["cluster-autoscaler-status"] + verbs: ["delete","get","update"] + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cluster-autoscaler + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-autoscaler +subjects: + - kind: ServiceAccount + name: cluster-autoscaler + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cluster-autoscaler +subjects: + - kind: ServiceAccount + name: cluster-autoscaler + namespace: kube-system + +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-config +type: Opaque +data: + access-key-id: [YOUR_BASE64_AK_ID] + access-key-id: [YOUR_BASE64_AK_SECRET] + region-id: [YOUR_BASE64_REGION_ID] + +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + app: cluster-autoscaler +spec: + replicas: 1 + selector: + matchLabels: + app: cluster-autoscaler + template: + metadata: + labels: + app: cluster-autoscaler + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + serviceAccountName: admin + containers: + - image: registry.cn-hangzhou.aliyuncs.com/acs/autoscaler:v1.3.1 + name: cluster-autoscaler + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 100m + memory: 300Mi + command: + - ./cluster-autoscaler + - --v=4 + - --stderrthreshold=info + - --cloud-provider=alicloud + - --nodes=[min]:[max]:[ASG_ID] + imagePullPolicy: "Always" + env: + - name: ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: cloud-config + key: access-key-id + - name: ACCESS_KEY_SECRET + valueFrom: + secretKeyRef: + name: cloud-config + key: access-key-secret + - name: REGION_ID + valueFrom: + secretKeyRef: + name: cloud-config + key: region-id + volumeMounts: + - name: ssl-certs + mountPath: /etc/ssl/certs/ca-certificates.crt + readOnly: true + volumes: + - name: ssl-certs + hostPath: + path: "/etc/ssl/certs/ca-certificates.crt" \ No newline at end of file diff --git a/cluster-autoscaler/cloudprovider/alicloud/metadata/attempt.go b/cluster-autoscaler/cloudprovider/alicloud/metadata/attempt.go new file mode 100644 index 000000000000..796533926c5f --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/metadata/attempt.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +import ( + "time" +) + +// AttemptStrategy represents a strategy for waiting for an action +// to complete successfully. This is an internal type used by the +// implementation of other packages. +type AttemptStrategy struct { + Total time.Duration // total duration of attempt. + Delay time.Duration // interval between each try in the burst. + Min int // minimum number of retries; overrides Total +} + +// Attempt struct +type Attempt struct { + strategy AttemptStrategy + last time.Time + end time.Time + force bool + count int +} + +// Start begins a new sequence of attempts for the given strategy. +func (s AttemptStrategy) Start() *Attempt { + now := time.Now() + return &Attempt{ + strategy: s, + last: now, + end: now.Add(s.Total), + force: true, + } +} + +// Next waits until it is time to perform the next attempt or returns +// false if it is time to stop trying. +func (a *Attempt) Next() bool { + now := time.Now() + sleep := a.nextSleep(now) + if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { + return false + } + a.force = false + if sleep > 0 && a.count > 0 { + time.Sleep(sleep) + now = time.Now() + } + a.count++ + a.last = now + return true +} + +func (a *Attempt) nextSleep(now time.Time) time.Duration { + sleep := a.strategy.Delay - now.Sub(a.last) + if sleep < 0 { + return 0 + } + return sleep +} + +// HasNext returns whether another attempt will be made if the current +// one fails. If it returns true, the following call to Next is +// guaranteed to return true. +func (a *Attempt) HasNext() bool { + if a.force || a.strategy.Min > a.count { + return true + } + now := time.Now() + if now.Add(a.nextSleep(now)).Before(a.end) { + a.force = true + return true + } + return false +} diff --git a/cluster-autoscaler/cloudprovider/alicloud/metadata/metadata.go b/cluster-autoscaler/cloudprovider/alicloud/metadata/metadata.go new file mode 100644 index 000000000000..4d4f6574fbfd --- /dev/null +++ b/cluster-autoscaler/cloudprovider/alicloud/metadata/metadata.go @@ -0,0 +1,492 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" + + "encoding/json" + "reflect" + + "os" +) + +/* const vars */ +const ( + ENDPOINT = "http://100.100.100.200" + DNS_NAMESERVERS = "dns-conf/nameservers" + EIPV4 = "eipv4" + HOSTNAME = "hostname" + IMAGE_ID = "image-id" + INSTANCE_ID = "instance-id" + MAC = "mac" + NETWORK_TYPE = "network-type" + NTP_CONF_SERVERS = "ntp-conf/ntp-servers" + OWNER_ACCOUNT_ID = "owner-account-id" + PRIVATE_IPV4 = "private-ipv4" + REGION = "region-id" + SERIAL_NUMBER = "serial-number" + SOURCE_ADDRESS = "source-address" + VPC_CIDR_BLOCK = "vpc-cidr-block" + VPC_ID = "vpc-id" + VSWITCH_CIDR_BLOCK = "vswitch-cidr-block" + VSWITCH_ID = "vswitch-id" + ZONE = "zone-id" + RAM_SECURITY = "Ram/security-credentials" +) + +// IMetaDataRequest interface +type IMetaDataRequest interface { + Version(version string) IMetaDataRequest + ResourceType(rtype string) IMetaDataRequest + Resource(resource string) IMetaDataRequest + SubResource(sub string) IMetaDataRequest + Url() (string, error) + Do(api interface{}) error +} + +// MetaData wrap http client +type MetaData struct { + // mock for unit test. + mock requestMock + client *http.Client +} + +// NewMetaData returns MetaData +func NewMetaData(client *http.Client) *MetaData { + if client == nil { + client = &http.Client{} + } + return &MetaData{ + client: client, + } +} + +// NewMockMetaData returns mock MetaData +func NewMockMetaData(client *http.Client, sendRequest requestMock) *MetaData { + if client == nil { + client = &http.Client{} + } + return &MetaData{ + client: client, + mock: sendRequest, + } +} + +// New returns MetaDataRequest +func (m *MetaData) New() *MetaDataRequest { + return &MetaDataRequest{ + client: m.client, + sendRequest: m.mock, + } +} + +// HostName returns host name +func (m *MetaData) HostName() (string, error) { + var hostname ResultList + err := m.New().Resource(HOSTNAME).Do(&hostname) + if err != nil { + return "", err + } + return hostname.result[0], nil +} + +// ImageID returns image id +func (m *MetaData) ImageID() (string, error) { + var image ResultList + err := m.New().Resource(IMAGE_ID).Do(&image) + if err != nil { + return "", err + } + return image.result[0], err +} + +// InstanceID returns instance Id +func (m *MetaData) InstanceID() (string, error) { + var instanceid ResultList + err := m.New().Resource(INSTANCE_ID).Do(&instanceid) + if err != nil { + return "", err + } + return instanceid.result[0], err +} + +// Mac returns mac address +func (m *MetaData) Mac() (string, error) { + var mac ResultList + err := m.New().Resource(MAC).Do(&mac) + if err != nil { + return "", err + } + return mac.result[0], nil +} + +// NetworkType returns network type +func (m *MetaData) NetworkType() (string, error) { + var network ResultList + err := m.New().Resource(NETWORK_TYPE).Do(&network) + if err != nil { + return "", err + } + return network.result[0], nil +} + +// OwnerAccountID returns owner account id +func (m *MetaData) OwnerAccountID() (string, error) { + var owner ResultList + err := m.New().Resource(OWNER_ACCOUNT_ID).Do(&owner) + if err != nil { + return "", err + } + return owner.result[0], nil +} + +// PrivateIPv4 returns private ipv4 ip address +func (m *MetaData) PrivateIPv4() (string, error) { + var private ResultList + err := m.New().Resource(PRIVATE_IPV4).Do(&private) + if err != nil { + return "", err + } + return private.result[0], nil +} + +// Region returns region +func (m *MetaData) Region() (string, error) { + var region ResultList + err := m.New().Resource(REGION).Do(®ion) + if err != nil { + return "", err + } + return region.result[0], nil +} + +// SerialNumber returns serial number +func (m *MetaData) SerialNumber() (string, error) { + var serial ResultList + err := m.New().Resource(SERIAL_NUMBER).Do(&serial) + if err != nil { + return "", err + } + return serial.result[0], nil +} + +// SourceAddress returns source address +func (m *MetaData) SourceAddress() (string, error) { + var source ResultList + err := m.New().Resource(SOURCE_ADDRESS).Do(&source) + if err != nil { + return "", err + } + return source.result[0], nil + +} + +// VpcCIDRBlock returns vpc cidr block +func (m *MetaData) VpcCIDRBlock() (string, error) { + var vpcCIDR ResultList + err := m.New().Resource(VPC_CIDR_BLOCK).Do(&vpcCIDR) + if err != nil { + return "", err + } + return vpcCIDR.result[0], err +} + +// VpcID returns vpc id +func (m *MetaData) VpcID() (string, error) { + var vpcId ResultList + err := m.New().Resource(VPC_ID).Do(&vpcId) + if err != nil { + return "", err + } + return vpcId.result[0], err +} + +// VswitchCIDRBlock returns vswitch cidr block +func (m *MetaData) VswitchCIDRBlock() (string, error) { + var cidr ResultList + err := m.New().Resource(VSWITCH_CIDR_BLOCK).Do(&cidr) + if err != nil { + return "", err + } + return cidr.result[0], err +} + +// VswitchID returns vswitch id +func (m *MetaData) VswitchID() (string, error) { + var vswithcid ResultList + err := m.New().Resource(VSWITCH_ID).Do(&vswithcid) + if err != nil { + return "", err + } + return vswithcid.result[0], err +} + +// EIPv4 returns eip +func (m *MetaData) EIPv4() (string, error) { + var eip ResultList + err := m.New().Resource(EIPV4).Do(&eip) + if err != nil { + return "", err + } + return eip.result[0], nil +} + +// DNSNameServers returns dns servers +func (m *MetaData) DNSNameServers() ([]string, error) { + var data ResultList + err := m.New().Resource(DNS_NAMESERVERS).Do(&data) + if err != nil { + return []string{}, err + } + return data.result, nil +} + +// NTPConfigServers returns ntp servers +func (m *MetaData) NTPConfigServers() ([]string, error) { + var data ResultList + err := m.New().Resource(NTP_CONF_SERVERS).Do(&data) + if err != nil { + return []string{}, err + } + return data.result, nil +} + +// Zone returns zone id +func (m *MetaData) Zone() (string, error) { + var zone ResultList + err := m.New().Resource(ZONE).Do(&zone) + if err != nil { + return "", err + } + return zone.result[0], nil +} + +// RoleName returns role name +func (m *MetaData) RoleName() (string, error) { + var roleName ResultList + err := m.New().Resource("ram/security-credentials/").Do(&roleName) + if err != nil { + return "", err + } + return roleName.result[0], nil +} + +// RamRoleToken returns ram role token +func (m *MetaData) RamRoleToken(role string) (RoleAuth, error) { + var roleauth RoleAuth + err := m.New().Resource(RAM_SECURITY).SubResource(role).Do(&roleauth) + if err != nil { + return RoleAuth{}, err + } + return roleauth, nil +} + +type requestMock func(resource string) (string, error) + +// MetaDataRequest struct +type MetaDataRequest struct { + version string + resourceType string + resource string + subResource string + client *http.Client + + sendRequest requestMock +} + +// Version sets version +func (vpc *MetaDataRequest) Version(version string) IMetaDataRequest { + vpc.version = version + return vpc +} + +// ResourceType sets resource type +func (vpc *MetaDataRequest) ResourceType(rtype string) IMetaDataRequest { + vpc.resourceType = rtype + return vpc +} + +// Resource sets resource +func (vpc *MetaDataRequest) Resource(resource string) IMetaDataRequest { + vpc.resource = resource + return vpc +} + +// SubResource set sub resource +func (vpc *MetaDataRequest) SubResource(sub string) IMetaDataRequest { + vpc.subResource = sub + return vpc +} + +var retry = AttemptStrategy{ + Min: 5, + Total: 5 * time.Second, + Delay: 200 * time.Millisecond, +} + +// Url returns url +func (vpc *MetaDataRequest) Url() (string, error) { + if vpc.version == "" { + vpc.version = "latest" + } + if vpc.resourceType == "" { + vpc.resourceType = "meta-data" + } + if vpc.resource == "" { + return "", errors.New("the resource you want to visit must not be nil!") + } + endpoint := os.Getenv("METADATA_ENDPOINT") + if endpoint == "" { + endpoint = ENDPOINT + } + r := fmt.Sprintf("%s/%s/%s/%s", endpoint, vpc.version, vpc.resourceType, vpc.resource) + if vpc.subResource == "" { + return r, nil + } + return fmt.Sprintf("%s/%s", r, vpc.subResource), nil +} + +// Do try to do MetaDataRequest +func (vpc *MetaDataRequest) Do(api interface{}) (err error) { + var res = "" + for r := retry.Start(); r.Next(); { + if vpc.sendRequest != nil { + res, err = vpc.sendRequest(vpc.resource) + } else { + res, err = vpc.send() + } + if !shouldRetry(err) { + break + } + } + if err != nil { + return err + } + return vpc.Decode(res, api) +} + +// Decode returns decoded content +func (vpc *MetaDataRequest) Decode(data string, api interface{}) error { + if data == "" { + url, _ := vpc.Url() + return fmt.Errorf("metadata: alivpc decode data must not be nil. url=[%s]\n", url) + } + switch api.(type) { + case *ResultList: + api.(*ResultList).result = strings.Split(data, "\n") + return nil + case *RoleAuth: + return json.Unmarshal([]byte(data), api) + default: + return fmt.Errorf("metadata: unknow type to decode, type=%s\n", reflect.TypeOf(api)) + } +} + +func (vpc *MetaDataRequest) send() (string, error) { + url, err := vpc.Url() + if err != nil { + return "", err + } + requ, err := http.NewRequest(http.MethodGet, url, nil) + + if err != nil { + return "", err + } + resp, err := vpc.client.Do(requ) + if err != nil { + return "", err + } + if resp.StatusCode != 200 { + return "", fmt.Errorf("Aliyun Metadata API Error: Status Code: %d", resp.StatusCode) + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(data), nil +} + +// TimeoutError interface +type TimeoutError interface { + error + Timeout() bool // Is the error a timeout? +} + +func shouldRetry(err error) bool { + if err == nil { + return false + } + + _, ok := err.(TimeoutError) + if ok { + return true + } + + switch err { + case io.ErrUnexpectedEOF, io.EOF: + return true + } + switch e := err.(type) { + case *net.DNSError: + return true + case *net.OpError: + switch e.Op { + case "read", "write": + return true + } + case *url.Error: + // url.Error can be returned either by net/url if a URL cannot be + // parsed, or by net/http if the response is closed before the headers + // are received or parsed correctly. In that later case, e.Op is set to + // the HTTP method name with the first letter uppercased. We don't want + // to retry on POST operations, since those are not idempotent, all the + // other ones should be safe to retry. + switch e.Op { + case "Get", "Put", "Delete", "Head": + return shouldRetry(e.Err) + default: + return false + } + } + return false +} + +// ResultList struct +type ResultList struct { + result []string +} + +// RoleAuth struct +type RoleAuth struct { + AccessKeyId string + AccessKeySecret string + Expiration time.Time + SecurityToken string + LastUpdated time.Time + Code string +} diff --git a/cluster-autoscaler/cloudprovider/aws/README.md b/cluster-autoscaler/cloudprovider/aws/README.md index 4175c4febb8f..344ba8bc4d16 100644 --- a/cluster-autoscaler/cloudprovider/aws/README.md +++ b/cluster-autoscaler/cloudprovider/aws/README.md @@ -26,7 +26,7 @@ A minimum IAM policy would look like: } ``` -If you'd like to auto-discover node groups by specifying the `--node-group-auto-discover` flag, a `DescribeTags` permission is also required: +If you'd like to auto-discover node groups by specifying the `--node-group-auto-discovery` flag, a `DescribeTags` permission is also required: ```json { diff --git a/cluster-autoscaler/cloudprovider/aws/auto_scaling.go b/cluster-autoscaler/cloudprovider/aws/auto_scaling.go index 772d8c14c80d..02c96f03af66 100644 --- a/cluster-autoscaler/cloudprovider/aws/auto_scaling.go +++ b/cluster-autoscaler/cloudprovider/aws/auto_scaling.go @@ -21,7 +21,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/golang/glog" + "k8s.io/klog" ) // autoScaling is the interface represents a specific aspect of the auto-scaling service provided by AWS SDK for use in CA @@ -45,7 +45,7 @@ func (m autoScalingWrapper) getInstanceTypeByLCName(name string) (string, error) } launchConfigurations, err := m.DescribeLaunchConfigurations(params) if err != nil { - glog.V(4).Infof("Failed LaunchConfiguration info request for %s: %v", name, err) + klog.V(4).Infof("Failed LaunchConfiguration info request for %s: %v", name, err) return "", err } if len(launchConfigurations.LaunchConfigurations) < 1 { @@ -59,19 +59,31 @@ func (m *autoScalingWrapper) getAutoscalingGroupsByNames(names []string) ([]*aut if len(names) == 0 { return nil, nil } - input := &autoscaling.DescribeAutoScalingGroupsInput{ - AutoScalingGroupNames: aws.StringSlice(names), - MaxRecords: aws.Int64(maxRecordsReturnedByAPI), - } + asgs := make([]*autoscaling.Group, 0) - if err := m.DescribeAutoScalingGroupsPages(input, func(output *autoscaling.DescribeAutoScalingGroupsOutput, _ bool) bool { - asgs = append(asgs, output.AutoScalingGroups...) - // We return true while we want to be called with the next page of - // results, if any. - return true - }); err != nil { - return nil, err + + // AWS only accepts up to 50 ASG names as input, describe them in batches + for i := 0; i < len(names); i += maxAsgNamesPerDescribe { + end := i + maxAsgNamesPerDescribe + + if end > len(names) { + end = len(names) + } + + input := &autoscaling.DescribeAutoScalingGroupsInput{ + AutoScalingGroupNames: aws.StringSlice(names[i:end]), + MaxRecords: aws.Int64(maxRecordsReturnedByAPI), + } + if err := m.DescribeAutoScalingGroupsPages(input, func(output *autoscaling.DescribeAutoScalingGroupsOutput, _ bool) bool { + asgs = append(asgs, output.AutoScalingGroups...) + // We return true while we want to be called with the next page of + // results, if any. + return true + }); err != nil { + return nil, err + } } + return asgs, nil } diff --git a/cluster-autoscaler/cloudprovider/aws/auto_scaling_groups.go b/cluster-autoscaler/cloudprovider/aws/auto_scaling_groups.go index 59425cbbc7d9..c626d267d262 100644 --- a/cluster-autoscaler/cloudprovider/aws/auto_scaling_groups.go +++ b/cluster-autoscaler/cloudprovider/aws/auto_scaling_groups.go @@ -19,6 +19,7 @@ package aws import ( "fmt" "reflect" + "strings" "sync" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" @@ -26,7 +27,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/golang/glog" + "k8s.io/klog" ) const scaleToZeroSupported = true @@ -90,15 +91,15 @@ func (m *asgCache) parseExplicitAsgs(specs []string) error { return nil } -// Register ASG. Returns true if the ASG was registered. -func (m *asgCache) register(asg *asg) bool { +// Register ASG. Returns the registered ASG. +func (m *asgCache) register(asg *asg) *asg { for i := range m.registeredAsgs { if existing := m.registeredAsgs[i]; existing.AwsRef == asg.AwsRef { if reflect.DeepEqual(existing, asg) { - return false + return existing } - glog.V(4).Infof("Updating ASG %s", asg.AwsRef.Name) + klog.V(4).Infof("Updating ASG %s", asg.AwsRef.Name) // Explicit registered groups should always use the manually provided min/max // values and the not the ones returned by the API @@ -117,22 +118,22 @@ func (m *asgCache) register(asg *asg) bool { existing.LaunchTemplateVersion = asg.LaunchTemplateVersion existing.Tags = asg.Tags - return true + return existing } } - glog.V(1).Infof("Registering ASG %s", asg.AwsRef.Name) + klog.V(1).Infof("Registering ASG %s", asg.AwsRef.Name) m.registeredAsgs = append(m.registeredAsgs, asg) - return true + return asg } -// Unregister ASG. Returns true if the ASG was unregistered. -func (m *asgCache) unregister(a *asg) bool { +// Unregister ASG. Returns the unregistered ASG. +func (m *asgCache) unregister(a *asg) *asg { updated := make([]*asg, 0, len(m.registeredAsgs)) - changed := false + var changed *asg for _, existing := range m.registeredAsgs { if existing.AwsRef == a.AwsRef { - glog.V(1).Infof("Unregistered ASG %s", a.AwsRef.Name) - changed = true + klog.V(1).Infof("Unregistered ASG %s", a.AwsRef.Name) + changed = a continue } updated = append(updated, existing) @@ -167,6 +168,10 @@ func (m *asgCache) FindForInstance(instance AwsInstanceRef) *asg { m.mutex.Lock() defer m.mutex.Unlock() + return m.findForInstance(instance) +} + +func (m *asgCache) findForInstance(instance AwsInstanceRef) *asg { if asg, found := m.instanceToAsg[instance]; found { return asg } @@ -186,6 +191,72 @@ func (m *asgCache) InstancesByAsg(ref AwsRef) ([]AwsInstanceRef, error) { return nil, fmt.Errorf("Error while looking for instances of ASG: %s", ref) } +func (m *asgCache) SetAsgSize(asg *asg, size int) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + params := &autoscaling.SetDesiredCapacityInput{ + AutoScalingGroupName: aws.String(asg.Name), + DesiredCapacity: aws.Int64(int64(size)), + HonorCooldown: aws.Bool(false), + } + klog.V(0).Infof("Setting asg %s size to %d", asg.Name, size) + _, err := m.service.SetDesiredCapacity(params) + if err != nil { + return err + } + + // Proactively set the ASG size so autoscaler makes better decisions + asg.curSize = size + + return nil +} + +// DeleteInstances deletes the given instances. All instances must be controlled by the same ASG. +func (m *asgCache) DeleteInstances(instances []*AwsInstanceRef) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + if len(instances) == 0 { + return nil + } + commonAsg := m.findForInstance(*instances[0]) + if commonAsg == nil { + return fmt.Errorf("can't delete instance %s, which is not part of an ASG", instances[0].Name) + } + + for _, instance := range instances { + asg := m.findForInstance(*instance) + + if asg != commonAsg { + instanceIds := make([]string, len(instances)) + for i, instance := range instances { + instanceIds[i] = instance.Name + } + + return fmt.Errorf("can't delete instances %s as they belong to at least two different ASGs (%s and %s)", strings.Join(instanceIds, ","), commonAsg.Name, asg.Name) + } + } + + for _, instance := range instances { + params := &autoscaling.TerminateInstanceInAutoScalingGroupInput{ + InstanceId: aws.String(instance.Name), + ShouldDecrementDesiredCapacity: aws.Bool(true), + } + resp, err := m.service.TerminateInstanceInAutoScalingGroup(params) + if err != nil { + return err + } + + // Proactively decrement the size so autoscaler makes better decisions + commonAsg.curSize-- + + klog.V(4).Infof(*resp.Activity.Description) + } + + return nil +} + // Fetch automatically discovered ASGs. These ASGs should be unregistered if // they no longer exist in AWS. func (m *asgCache) fetchAutoAsgNames() ([]string, error) { @@ -246,7 +317,7 @@ func (m *asgCache) regenerate() error { } // Fetch details of all ASGs - glog.V(4).Infof("Regenerating instance to ASG map for ASGs: %v", refreshNames) + klog.V(4).Infof("Regenerating instance to ASG map for ASGs: %v", refreshNames) groups, err := m.service.getAutoscalingGroupsByNames(refreshNames) if err != nil { return err @@ -261,7 +332,7 @@ func (m *asgCache) regenerate() error { } exists[asg.AwsRef] = true - m.register(asg) + asg = m.register(asg) newAsgToInstancesCache[asg.AwsRef] = make([]AwsInstanceRef, len(group.Instances)) @@ -308,7 +379,7 @@ func (m *asgCache) buildAsgFromAWS(g *autoscaling.Group) (*asg, error) { LaunchConfigurationName: aws.StringValue(g.LaunchConfigurationName), LaunchTemplateName: launchTemplateName, LaunchTemplateVersion: launchTemplateVersion, - Tags: g.Tags, + Tags: g.Tags, } return asg, nil diff --git a/cluster-autoscaler/cloudprovider/aws/auto_scaling_test.go b/cluster-autoscaler/cloudprovider/aws/auto_scaling_test.go new file mode 100644 index 000000000000..dd9f13971d21 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/aws/auto_scaling_test.go @@ -0,0 +1,70 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestMoreThen50Groups(t *testing.T) { + service := &AutoScalingMock{} + autoScalingWrapper := &autoScalingWrapper{ + autoScaling: service, + } + + // Generate 51 ASG names + names := make([]string, 51) + for i := 0; i < len(names); i++ { + names[i] = fmt.Sprintf("asg-%d", i) + } + + // First batch, first 50 elements + service.On("DescribeAutoScalingGroupsPages", + &autoscaling.DescribeAutoScalingGroupsInput{ + AutoScalingGroupNames: aws.StringSlice(names[:50]), + MaxRecords: aws.Int64(maxRecordsReturnedByAPI), + }, + mock.AnythingOfType("func(*autoscaling.DescribeAutoScalingGroupsOutput, bool) bool"), + ).Run(func(args mock.Arguments) { + fn := args.Get(1).(func(*autoscaling.DescribeAutoScalingGroupsOutput, bool) bool) + fn(testNamedDescribeAutoScalingGroupsOutput("asg-1", 1, "test-instance-id"), false) + }).Return(nil) + + // Second batch, element 51 + service.On("DescribeAutoScalingGroupsPages", + &autoscaling.DescribeAutoScalingGroupsInput{ + AutoScalingGroupNames: aws.StringSlice([]string{"asg-50"}), + MaxRecords: aws.Int64(maxRecordsReturnedByAPI), + }, + mock.AnythingOfType("func(*autoscaling.DescribeAutoScalingGroupsOutput, bool) bool"), + ).Run(func(args mock.Arguments) { + fn := args.Get(1).(func(*autoscaling.DescribeAutoScalingGroupsOutput, bool) bool) + fn(testNamedDescribeAutoScalingGroupsOutput("asg-2", 1, "test-instance-id"), false) + }).Return(nil) + + asgs, err := autoScalingWrapper.getAutoscalingGroupsByNames(names) + assert.Nil(t, err) + assert.Equal(t, len(asgs), 2) + assert.Equal(t, *asgs[0].AutoScalingGroupName, "asg-1") + assert.Equal(t, *asgs[1].AutoScalingGroupName, "asg-2") +} diff --git a/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider.go b/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider.go index 08d8d04a082a..d2151cbafcd6 100644 --- a/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider.go @@ -18,14 +18,17 @@ package aws import ( "fmt" + "io" + "os" "regexp" "strings" - "github.com/golang/glog" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" + "k8s.io/klog" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" ) @@ -77,7 +80,7 @@ func (aws *awsCloudProvider) NodeGroups() []cloudprovider.NodeGroup { // NodeGroupForNode returns the node group for the given node. func (aws *awsCloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudprovider.NodeGroup, error) { if len(node.Spec.ProviderID) == 0 { - glog.Warningf("Node %v has no providerId", node.Name) + klog.Warningf("Node %v has no providerId", node.Name) return nil, nil } ref, err := AwsRefFromProviderId(node.Spec.ProviderID) @@ -124,6 +127,11 @@ func (aws *awsCloudProvider) Refresh() error { return aws.awsManager.Refresh() } +// GetInstanceID gets the instance ID for the specified node. +func (aws *awsCloudProvider) GetInstanceID(node *apiv1.Node) string { + return node.Spec.ProviderID +} + // AwsRef contains a reference to some entity in AWS world. type AwsRef struct { Name string @@ -279,18 +287,18 @@ func (ng *AwsNodeGroup) Debug() string { } // Nodes returns a list of all nodes that belong to this node group. -func (ng *AwsNodeGroup) Nodes() ([]string, error) { +func (ng *AwsNodeGroup) Nodes() ([]cloudprovider.Instance, error) { asgNodes, err := ng.awsManager.GetAsgNodes(ng.asg.AwsRef) if err != nil { return nil, err } - nodes := make([]string, len(asgNodes)) + instances := make([]cloudprovider.Instance, len(asgNodes)) for i, asgNode := range asgNodes { - nodes[i] = asgNode.ProviderID + instances[i] = cloudprovider.Instance{Id: asgNode.ProviderID} } - return nodes, nil + return instances, nil } // TemplateNodeInfo returns a node template for this node group. @@ -309,3 +317,27 @@ func (ng *AwsNodeGroup) TemplateNodeInfo() (*schedulercache.NodeInfo, error) { nodeInfo.SetNode(node) return nodeInfo, nil } + +// BuildAWS builds AWS cloud provider, manager etc. +func BuildAWS(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + var config io.ReadCloser + if opts.CloudConfig != "" { + var err error + config, err = os.Open(opts.CloudConfig) + if err != nil { + klog.Fatalf("Couldn't open cloud provider configuration %s: %#v", opts.CloudConfig, err) + } + defer config.Close() + } + + manager, err := CreateAwsManager(config, do) + if err != nil { + klog.Fatalf("Failed to create AWS Manager: %v", err) + } + + provider, err := BuildAwsCloudProvider(manager, rl) + if err != nil { + klog.Fatalf("Failed to create AWS cloud provider: %v", err) + } + return provider +} diff --git a/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider_test.go b/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider_test.go index 51a2e491d588..d8cde5cf953e 100644 --- a/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider_test.go +++ b/cluster-autoscaler/cloudprovider/aws/aws_cloud_provider_test.go @@ -239,7 +239,7 @@ func TestNodeGroupForNode(t *testing.T) { assert.NoError(t, err) - assert.Equal(t, nodes, []string{"aws:///us-east-1a/test-instance-id"}) + assert.Equal(t, []cloudprovider.Instance{{Id: "aws:///us-east-1a/test-instance-id"}}, nodes) service.AssertNumberOfCalls(t, "DescribeAutoScalingGroupsPages", 1) // test node in cluster that is not in a group managed by cluster autoscaler @@ -330,10 +330,18 @@ func TestIncreaseSize(t *testing.T) { provider.Refresh() - err := asgs[0].IncreaseSize(1) + initialSize, err := asgs[0].TargetSize() + assert.NoError(t, err) + assert.Equal(t, 2, initialSize) + + err = asgs[0].IncreaseSize(1) assert.NoError(t, err) service.AssertNumberOfCalls(t, "SetDesiredCapacity", 1) service.AssertNumberOfCalls(t, "DescribeAutoScalingGroupsPages", 1) + + newSize, err := asgs[0].TargetSize() + assert.NoError(t, err) + assert.Equal(t, 3, newSize) } func TestBelongs(t *testing.T) { @@ -403,15 +411,61 @@ func TestDeleteNodes(t *testing.T) { provider.Refresh() + initialSize, err := asgs[0].TargetSize() + assert.NoError(t, err) + assert.Equal(t, 2, initialSize) + node := &apiv1.Node{ Spec: apiv1.NodeSpec{ ProviderID: "aws:///us-east-1a/test-instance-id", }, } - err := asgs[0].DeleteNodes([]*apiv1.Node{node}) + err = asgs[0].DeleteNodes([]*apiv1.Node{node}) assert.NoError(t, err) service.AssertNumberOfCalls(t, "TerminateInstanceInAutoScalingGroup", 1) service.AssertNumberOfCalls(t, "DescribeAutoScalingGroupsPages", 1) + + newSize, err := asgs[0].TargetSize() + assert.NoError(t, err) + assert.Equal(t, 1, newSize) +} + +func TestDeleteNodesAfterMultipleRefreshes(t *testing.T) { + service := &AutoScalingMock{} + manager := newTestAwsManagerWithAsgs(t, service, []string{"1:5:test-asg"}) + provider := testProvider(t, manager) + asgs := provider.NodeGroups() + + service.On("TerminateInstanceInAutoScalingGroup", &autoscaling.TerminateInstanceInAutoScalingGroupInput{ + InstanceId: aws.String("test-instance-id"), + ShouldDecrementDesiredCapacity: aws.Bool(true), + }).Return(&autoscaling.TerminateInstanceInAutoScalingGroupOutput{ + Activity: &autoscaling.Activity{Description: aws.String("Deleted instance")}, + }) + + // Look up the current number of instances... + service.On("DescribeAutoScalingGroupsPages", + &autoscaling.DescribeAutoScalingGroupsInput{ + AutoScalingGroupNames: aws.StringSlice([]string{"test-asg"}), + MaxRecords: aws.Int64(maxRecordsReturnedByAPI), + }, + mock.AnythingOfType("func(*autoscaling.DescribeAutoScalingGroupsOutput, bool) bool"), + ).Run(func(args mock.Arguments) { + fn := args.Get(1).(func(*autoscaling.DescribeAutoScalingGroupsOutput, bool) bool) + fn(testNamedDescribeAutoScalingGroupsOutput("test-asg", 2, "test-instance-id", "second-test-instance-id"), false) + }).Return(nil) + + provider.Refresh() + // Call the manager directly as otherwise the call would be a noop as its within less then 60s + manager.forceRefresh() + + node := &apiv1.Node{ + Spec: apiv1.NodeSpec{ + ProviderID: "aws:///us-east-1a/test-instance-id", + }, + } + err := asgs[0].DeleteNodes([]*apiv1.Node{node}) + assert.NoError(t, err) } func TestGetResourceLimiter(t *testing.T) { diff --git a/cluster-autoscaler/cloudprovider/aws/aws_manager.go b/cluster-autoscaler/cloudprovider/aws/aws_manager.go index 1bbfeed99e86..142b7f918877 100644 --- a/cluster-autoscaler/cloudprovider/aws/aws_manager.go +++ b/cluster-autoscaler/cloudprovider/aws/aws_manager.go @@ -19,24 +19,27 @@ limitations under the License. package aws import ( + "errors" "fmt" "io" "math/rand" + "os" "regexp" "strings" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/golang/glog" "gopkg.in/gcfg.v1" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" + "k8s.io/klog" provider_aws "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) @@ -45,6 +48,7 @@ const ( operationWaitTimeout = 5 * time.Second operationPollInterval = 100 * time.Millisecond maxRecordsReturnedByAPI = 100 + maxAsgNamesPerDescribe = 50 refreshInterval = 10 * time.Second ) @@ -63,6 +67,18 @@ type asgTemplate struct { Tags []*autoscaling.TagDescription } +// getRegion deduces the current AWS Region. +func getRegion(cfg ...*aws.Config) string { + region, present := os.LookupEnv("AWS_REGION") + if !present { + svc := ec2metadata.New(session.New(), cfg...) + if r, err := svc.Region(); err == nil { + region = r + } + } + return region +} + // createAwsManagerInternal allows for a customer autoScalingWrapper to be passed in by tests func createAWSManagerInternal( configReader io.Reader, @@ -73,13 +89,13 @@ func createAWSManagerInternal( if configReader != nil { var cfg provider_aws.CloudConfig if err := gcfg.ReadInto(&cfg, configReader); err != nil { - glog.Errorf("Couldn't read config: %v", err) + klog.Errorf("Couldn't read config: %v", err) return nil, err } } if autoScalingService == nil || ec2Service == nil { - sess := session.New() + sess := session.New(aws.NewConfig().WithRegion(getRegion())) if autoScalingService == nil { autoScalingService = &autoScalingWrapper{autoscaling.New(sess)} @@ -129,11 +145,11 @@ func (m *AwsManager) Refresh() error { func (m *AwsManager) forceRefresh() error { if err := m.asgCache.regenerate(); err != nil { - glog.Errorf("Failed to regenerate ASG cache: %v", err) + klog.Errorf("Failed to regenerate ASG cache: %v", err) return err } m.lastRefresh = time.Now() - glog.V(2).Infof("Refreshed ASG list, next refresh after %v", m.lastRefresh.Add(refreshInterval)) + klog.V(2).Infof("Refreshed ASG list, next refresh after %v", m.lastRefresh.Add(refreshInterval)) return nil } @@ -153,55 +169,12 @@ func (m *AwsManager) getAsgs() []*asg { // SetAsgSize sets ASG size. func (m *AwsManager) SetAsgSize(asg *asg, size int) error { - params := &autoscaling.SetDesiredCapacityInput{ - AutoScalingGroupName: aws.String(asg.Name), - DesiredCapacity: aws.Int64(int64(size)), - HonorCooldown: aws.Bool(false), - } - glog.V(0).Infof("Setting asg %s size to %d", asg.Name, size) - _, err := m.autoScalingService.SetDesiredCapacity(params) - if err != nil { - return err - } - return nil + return m.asgCache.SetAsgSize(asg, size) } // DeleteInstances deletes the given instances. All instances must be controlled by the same ASG. func (m *AwsManager) DeleteInstances(instances []*AwsInstanceRef) error { - if len(instances) == 0 { - return nil - } - commonAsg := m.asgCache.FindForInstance(*instances[0]) - if commonAsg == nil { - return fmt.Errorf("can't delete instance %s, which is not part of an ASG", instances[0].Name) - } - - for _, instance := range instances { - asg := m.asgCache.FindForInstance(*instance) - - if asg != commonAsg { - instanceIds := make([]string, len(instances)) - for i, instance := range instances { - instanceIds[i] = instance.Name - } - - return fmt.Errorf("can't delete instances %s as they belong to at least two different ASGs (%s and %s)", strings.Join(instanceIds, ","), commonAsg.Name, asg.Name) - } - } - - for _, instance := range instances { - params := &autoscaling.TerminateInstanceInAutoScalingGroupInput{ - InstanceId: aws.String(instance.Name), - ShouldDecrementDesiredCapacity: aws.Bool(true), - } - resp, err := m.autoScalingService.TerminateInstanceInAutoScalingGroup(params) - if err != nil { - return err - } - glog.V(4).Infof(*resp.Activity.Description) - } - - return nil + return m.asgCache.DeleteInstances(instances) } // GetAsgNodes returns Asg nodes. @@ -211,14 +184,14 @@ func (m *AwsManager) GetAsgNodes(ref AwsRef) ([]AwsInstanceRef, error) { func (m *AwsManager) getAsgTemplate(asg *asg) (*asgTemplate, error) { if len(asg.AvailabilityZones) < 1 { - return nil, fmt.Errorf("Unable to get first AvailabilityZone for %s", asg.Name) + return nil, fmt.Errorf("Unable to get first AvailabilityZone for ASG %q", asg.Name) } az := asg.AvailabilityZones[0] region := az[0 : len(az)-1] if len(asg.AvailabilityZones) > 1 { - glog.Warningf("Found multiple availability zones, using %s\n", az) + klog.Warningf("Found multiple availability zones for ASG %q; using %s\n", asg.Name, az) } instanceTypeName, err := m.buildInstanceType(asg) @@ -226,12 +199,15 @@ func (m *AwsManager) getAsgTemplate(asg *asg) (*asgTemplate, error) { return nil, err } - return &asgTemplate{ - InstanceType: InstanceTypes[instanceTypeName], - Region: region, - Zone: az, - Tags: asg.Tags, - }, nil + if t, ok := InstanceTypes[instanceTypeName]; ok { + return &asgTemplate{ + InstanceType: t, + Region: region, + Zone: az, + Tags: asg.Tags, + }, nil + } + return nil, fmt.Errorf("ASG %q uses the unknown EC2 instance type %q", asg.Name, instanceTypeName) } func (m *AwsManager) buildInstanceType(asg *asg) (string, error) { @@ -241,7 +217,7 @@ func (m *AwsManager) buildInstanceType(asg *asg) (string, error) { return m.ec2Service.getInstanceTypeByLT(asg.LaunchTemplateName, asg.LaunchTemplateVersion) } - return "", fmt.Errorf("Unable to get instance type from launch config or launch template") + return "", errors.New("Unable to get instance type from launch config or launch template") } func (m *AwsManager) buildNodeFromTemplate(asg *asg, template *asgTemplate) (*apiv1.Node, error) { diff --git a/cluster-autoscaler/cloudprovider/aws/aws_manager_test.go b/cluster-autoscaler/cloudprovider/aws/aws_manager_test.go index 8eddd7eb53a9..ceb505865deb 100644 --- a/cluster-autoscaler/cloudprovider/aws/aws_manager_test.go +++ b/cluster-autoscaler/cloudprovider/aws/aws_manager_test.go @@ -18,6 +18,13 @@ package aws import ( "fmt" + + "net/http" + "net/http/httptest" + "os" + "reflect" + "sort" + "strings" "testing" "github.com/aws/aws-sdk-go/aws" @@ -30,6 +37,32 @@ import ( kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) +// TestGetRegion ensures correct source supplies AWS Region. +func TestGetRegion(t *testing.T) { + key := "AWS_REGION" + originalRegion, originalPresent := os.LookupEnv(key) + defer func(region string, present bool) { + os.Unsetenv(key) + if present { + os.Setenv(key, region) + } + }(originalRegion, originalPresent) + // Ensure environment variable retains precedence. + expected1 := "the-shire-1" + os.Setenv(key, expected1) + assert.Equal(t, expected1, getRegion()) + // Ensure without environment variable, EC2 Metadata used... and it merely + // chops the last character off the Availability Zone. + expected2 := "mordor-2" + expected2a := expected2 + "a" + os.Unsetenv(key) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(expected2a)) + })) + cfg := aws.NewConfig().WithEndpoint(server.URL) + assert.Equal(t, expected2, getRegion(cfg)) +} + func TestBuildGenericLabels(t *testing.T) { labels := buildGenericLabels(&asgTemplate{ InstanceType: &instanceType{ @@ -199,21 +232,96 @@ func TestBuildInstanceType(t *testing.T) { assert.Equal(t, instanceType, builtInstanceType) } -/* Disabled due to flakiness. See https://github.com/kubernetes/autoscaler/issues/608 +func TestGetASGTemplate(t *testing.T) { + const ( + knownInstanceType = "t3.micro" + region = "us-east-1" + az = region + "a" + ltName = "launcher" + ltVersion = "1" + ) + + tags := []*autoscaling.TagDescription{ + { + Key: aws.String("k8s.io/cluster-autoscaler/node-template/taint/dedicated"), + Value: aws.String("foo:NoSchedule"), + }, + } + + tests := []struct { + description string + instanceType string + availabilityZones []string + error bool + }{ + {"insufficient availability zones", + knownInstanceType, []string{}, true}, + {"single availability zone", + knownInstanceType, []string{az}, false}, + {"multiple availability zones", + knownInstanceType, []string{az, "us-west-1b"}, false}, + {"unknown instance type", + "nonexistent.xlarge", []string{az}, true}, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + s := &EC2Mock{} + s.On("DescribeLaunchTemplateVersions", &ec2.DescribeLaunchTemplateVersionsInput{ + LaunchTemplateName: aws.String(ltName), + Versions: []*string{aws.String(ltVersion)}, + }).Return(&ec2.DescribeLaunchTemplateVersionsOutput{ + LaunchTemplateVersions: []*ec2.LaunchTemplateVersion{ + { + LaunchTemplateData: &ec2.ResponseLaunchTemplateData{ + InstanceType: aws.String(test.instanceType), + }, + }, + }, + }) + + m, err := createAWSManagerInternal(nil, cloudprovider.NodeGroupDiscoveryOptions{}, nil, &ec2Wrapper{s}) + assert.NoError(t, err) + + asg := &asg{ + AwsRef: AwsRef{Name: "sample"}, + AvailabilityZones: test.availabilityZones, + LaunchTemplateName: ltName, + LaunchTemplateVersion: ltVersion, + Tags: tags, + } + + template, err := m.getAsgTemplate(asg) + if test.error { + assert.Error(t, err) + } else { + assert.NoError(t, err) + if assert.NotNil(t, template) { + assert.Equal(t, test.instanceType, template.InstanceType.InstanceType) + assert.Equal(t, region, template.Region) + assert.Equal(t, test.availabilityZones[0], template.Zone) + assert.Equal(t, tags, template.Tags) + } + } + }) + } +} + func TestFetchAutoAsgs(t *testing.T) { min, max := 1, 10 groupname, tags := "coolasg", []string{"tag", "anothertag"} s := &AutoScalingMock{} // Lookup groups associated with tags - s.On("DescribeTagsPages", - &autoscaling.DescribeTagsInput{ - Filters: []*autoscaling.Filter{ - {Name: aws.String("key"), Values: aws.StringSlice([]string{tags[0]})}, - {Name: aws.String("key"), Values: aws.StringSlice([]string{tags[1]})}, - }, - MaxRecords: aws.Int64(maxRecordsReturnedByAPI), + expectedTagsInput := &autoscaling.DescribeTagsInput{ + Filters: []*autoscaling.Filter{ + {Name: aws.String("key"), Values: aws.StringSlice([]string{tags[0]})}, + {Name: aws.String("key"), Values: aws.StringSlice([]string{tags[1]})}, }, + MaxRecords: aws.Int64(maxRecordsReturnedByAPI), + } + // Use MatchedBy pattern to avoid list order issue https://github.com/kubernetes/autoscaler/issues/1346 + s.On("DescribeTagsPages", mock.MatchedBy(tagsMatcher(expectedTagsInput)), mock.AnythingOfType("func(*autoscaling.DescribeTagsOutput, bool) bool"), ).Run(func(args mock.Arguments) { fn := args.Get(1).(func(*autoscaling.DescribeTagsOutput, bool) bool) @@ -247,30 +355,41 @@ func TestFetchAutoAsgs(t *testing.T) { } // fetchAutoASGs is called at manager creation time, via forceRefresh - m, err := createAWSManagerInternal(nil, do, &autoScalingWrapper{s}) + m, err := createAWSManagerInternal(nil, do, &autoScalingWrapper{s}, nil) assert.NoError(t, err) - asgs := m.asgCache.get() + asgs := m.asgCache.Get() assert.Equal(t, 1, len(asgs)) - validateAsg(t, asgs[0].config, groupname, min, max) + validateAsg(t, asgs[0], groupname, min, max) // Simulate the previously discovered ASG disappearing - s.On("DescribeTagsPages", - &autoscaling.DescribeTagsInput{ - Filters: []*autoscaling.Filter{ - {Name: aws.String("key"), Values: aws.StringSlice([]string{tags[0]})}, - {Name: aws.String("key"), Values: aws.StringSlice([]string{tags[1]})}, - }, - MaxRecords: aws.Int64(maxRecordsReturnedByAPI), - }, + s.On("DescribeTagsPages", mock.MatchedBy(tagsMatcher(expectedTagsInput)), mock.AnythingOfType("func(*autoscaling.DescribeTagsOutput, bool) bool"), ).Run(func(args mock.Arguments) { fn := args.Get(1).(func(*autoscaling.DescribeTagsOutput, bool) bool) fn(&autoscaling.DescribeTagsOutput{Tags: []*autoscaling.TagDescription{}}, false) }).Return(nil).Once() - err = m.fetchAutoAsgs() + err = m.asgCache.regenerate() assert.NoError(t, err) - assert.Empty(t, m.asgCache.get()) + assert.Empty(t, m.asgCache.Get()) +} + +func tagsMatcher(expected *autoscaling.DescribeTagsInput) func(*autoscaling.DescribeTagsInput) bool { + return func(actual *autoscaling.DescribeTagsInput) bool { + expectedTags := flatTagSlice(expected.Filters) + actualTags := flatTagSlice(actual.Filters) + + return *expected.MaxRecords == *actual.MaxRecords && reflect.DeepEqual(expectedTags, actualTags) + } +} + +func flatTagSlice(filters []*autoscaling.Filter) []string { + tags := []string{} + for _, filter := range filters { + tags = append(tags, aws.StringValueSlice(filter.Values)...) + } + // Sort slice for compare + sort.Strings(tags) + return tags } -*/ diff --git a/cluster-autoscaler/cloudprovider/aws/ec2_instance_types.go b/cluster-autoscaler/cloudprovider/aws/ec2_instance_types.go index 792374d568a0..faf30984af29 100644 --- a/cluster-autoscaler/cloudprovider/aws/ec2_instance_types.go +++ b/cluster-autoscaler/cloudprovider/aws/ec2_instance_types.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -255,6 +255,12 @@ var InstanceTypes = map[string]*instanceType{ MemoryMb: 124928, GPU: 0, }, + "f1.4xlarge": { + InstanceType: "f1.4xlarge", + VCPU: 16, + MemoryMb: 249856, + GPU: 0, + }, "g2": { InstanceType: "g2", VCPU: 32, @@ -297,6 +303,12 @@ var InstanceTypes = map[string]*instanceType{ MemoryMb: 249856, GPU: 2, }, + "g3s.xlarge": { + InstanceType: "g3s.xlarge", + VCPU: 4, + MemoryMb: 31232, + GPU: 0, + }, "h1": { InstanceType: "h1", VCPU: 64, @@ -567,6 +579,42 @@ var InstanceTypes = map[string]*instanceType{ MemoryMb: 16384, GPU: 0, }, + "m5a.12xlarge": { + InstanceType: "m5a.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + }, + "m5a.24xlarge": { + InstanceType: "m5a.24xlarge", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + }, + "m5a.2xlarge": { + InstanceType: "m5a.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + }, + "m5a.4xlarge": { + InstanceType: "m5a.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + }, + "m5a.large": { + InstanceType: "m5a.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + }, + "m5a.xlarge": { + InstanceType: "m5a.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + }, "m5d": { InstanceType: "m5d", VCPU: 96, @@ -777,6 +825,42 @@ var InstanceTypes = map[string]*instanceType{ MemoryMb: 32768, GPU: 0, }, + "r5a.12xlarge": { + InstanceType: "r5a.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + }, + "r5a.24xlarge": { + InstanceType: "r5a.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + }, + "r5a.2xlarge": { + InstanceType: "r5a.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + }, + "r5a.4xlarge": { + InstanceType: "r5a.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + }, + "r5a.large": { + InstanceType: "r5a.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + }, + "r5a.xlarge": { + InstanceType: "r5a.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + }, "r5d": { InstanceType: "r5d", VCPU: 96, @@ -867,6 +951,66 @@ var InstanceTypes = map[string]*instanceType{ MemoryMb: 16384, GPU: 0, }, + "t3.2xlarge": { + InstanceType: "t3.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + }, + "t3.large": { + InstanceType: "t3.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + }, + "t3.medium": { + InstanceType: "t3.medium", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + }, + "t3.micro": { + InstanceType: "t3.micro", + VCPU: 2, + MemoryMb: 1024, + GPU: 0, + }, + "t3.nano": { + InstanceType: "t3.nano", + VCPU: 2, + MemoryMb: 512, + GPU: 0, + }, + "t3.small": { + InstanceType: "t3.small", + VCPU: 2, + MemoryMb: 2048, + GPU: 0, + }, + "t3.xlarge": { + InstanceType: "t3.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + }, + "u-12tb1": { + InstanceType: "u-12tb1", + VCPU: 448, + MemoryMb: 12582912, + GPU: 0, + }, + "u-6tb1": { + InstanceType: "u-6tb1", + VCPU: 448, + MemoryMb: 6291456, + GPU: 0, + }, + "u-9tb1": { + InstanceType: "u-9tb1", + VCPU: 448, + MemoryMb: 9437184, + GPU: 0, + }, "x1": { InstanceType: "x1", VCPU: 128, diff --git a/cluster-autoscaler/cloudprovider/aws/ec2_instance_types/gen.go b/cluster-autoscaler/cloudprovider/aws/ec2_instance_types/gen.go index 2bb590e99b7a..6614bcc71434 100644 --- a/cluster-autoscaler/cloudprovider/aws/ec2_instance_types/gen.go +++ b/cluster-autoscaler/cloudprovider/aws/ec2_instance_types/gen.go @@ -30,7 +30,7 @@ import ( "strings" "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/golang/glog" + "k8s.io/klog" ) type response struct { @@ -56,7 +56,7 @@ type instanceType struct { } var packageTemplate = template.Must(template.New("").Parse(`/* -Copyright 2017 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -97,7 +97,7 @@ var InstanceTypes = map[string]*instanceType{ func main() { flag.Parse() - defer glog.Flush() + defer klog.Flush() instanceTypes := make(map[string]*instanceType) @@ -107,10 +107,10 @@ func main() { for _, p := range partitions { for _, r := range p.Regions() { url := "https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/" + r.ID() + "/index.json" - glog.V(1).Infof("fetching %s\n", url) + klog.V(1).Infof("fetching %s\n", url) res, err := http.Get(url) if err != nil { - glog.Warningf("Error fetching %s skipping...\n", url) + klog.Warningf("Error fetching %s skipping...\n", url) continue } @@ -118,14 +118,14 @@ func main() { body, err := ioutil.ReadAll(res.Body) if err != nil { - glog.Warningf("Error parsing %s skipping...\n", url) + klog.Warningf("Error parsing %s skipping...\n", url) continue } var unmarshalled = response{} err = json.Unmarshal(body, &unmarshalled) if err != nil { - glog.Warningf("Error unmarshaling %s skipping...\n", url) + klog.Warningf("Error unmarshaling %s skipping...\n", url) continue } @@ -151,7 +151,7 @@ func main() { f, err := os.Create("ec2_instance_types.go") if err != nil { - glog.Fatal(err) + klog.Fatal(err) } defer f.Close() @@ -163,20 +163,20 @@ func main() { }) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } } func parseMemory(memory string) int64 { reg, err := regexp.Compile("[^0-9\\.]+") if err != nil { - glog.Fatal(err) + klog.Fatal(err) } parsed := strings.TrimSpace(reg.ReplaceAllString(memory, "")) mem, err := strconv.ParseFloat(parsed, 64) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return int64(mem * float64(1024)) @@ -185,7 +185,7 @@ func parseMemory(memory string) int64 { func parseCPU(cpu string) int64 { i, err := strconv.ParseInt(cpu, 10, 64) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return i } diff --git a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-autodiscover.yaml b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-autodiscover.yaml index 14dc8e6ded2a..5179ce14e1e3 100644 --- a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-autodiscover.yaml +++ b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-autodiscover.yaml @@ -42,7 +42,7 @@ rules: resources: ["poddisruptionbudgets"] verbs: ["watch","list"] - apiGroups: ["apps"] - resources: ["statefulsets"] + resources: ["statefulsets", "replicasets"] verbs: ["watch","list","get"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] @@ -121,7 +121,7 @@ spec: spec: serviceAccountName: cluster-autoscaler containers: - - image: k8s.gcr.io/cluster-autoscaler:v1.2.2 + - image: k8s.gcr.io/cluster-autoscaler:v1.13.2 name: cluster-autoscaler resources: limits: @@ -138,9 +138,6 @@ spec: - --skip-nodes-with-local-storage=false - --expander=least-waste - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ - env: - - name: AWS_REGION - value: us-east-1 volumeMounts: - name: ssl-certs mountPath: /etc/ssl/certs/ca-certificates.crt diff --git a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-multi-asg.yaml b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-multi-asg.yaml index a1cf1e986162..1091486bdcf5 100644 --- a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-multi-asg.yaml +++ b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-multi-asg.yaml @@ -42,7 +42,7 @@ rules: resources: ["poddisruptionbudgets"] verbs: ["watch","list"] - apiGroups: ["apps"] - resources: ["statefulsets"] + resources: ["statefulsets", "replicasets"] verbs: ["watch","list","get"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] @@ -121,7 +121,7 @@ spec: spec: serviceAccountName: cluster-autoscaler containers: - - image: k8s.gcr.io/cluster-autoscaler:v1.2.2 + - image: k8s.gcr.io/cluster-autoscaler:v1.13.2 name: cluster-autoscaler resources: limits: @@ -139,9 +139,6 @@ spec: - --expander=least-waste - --nodes=1:10:k8s-worker-asg-1 - --nodes=1:3:k8s-worker-asg-2 - env: - - name: AWS_REGION - value: us-east-1 volumeMounts: - name: ssl-certs mountPath: /etc/ssl/certs/ca-certificates.crt diff --git a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-one-asg.yaml b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-one-asg.yaml index f48251f26746..9ce2c322ec14 100644 --- a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-one-asg.yaml +++ b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-one-asg.yaml @@ -42,7 +42,7 @@ rules: resources: ["poddisruptionbudgets"] verbs: ["watch","list"] - apiGroups: ["apps"] - resources: ["statefulsets"] + resources: ["statefulsets", "replicasets"] verbs: ["watch","list","get"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] @@ -121,7 +121,7 @@ spec: spec: serviceAccountName: cluster-autoscaler containers: - - image: k8s.gcr.io/cluster-autoscaler:v1.2.2 + - image: k8s.gcr.io/cluster-autoscaler:v1.13.2 name: cluster-autoscaler resources: limits: @@ -137,9 +137,6 @@ spec: - --cloud-provider=aws - --skip-nodes-with-local-storage=false - --nodes=1:10:k8s-worker-asg-1 - env: - - name: AWS_REGION - value: us-east-1 volumeMounts: - name: ssl-certs mountPath: /etc/ssl/certs/ca-certificates.crt diff --git a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-master.yaml b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-master.yaml index 3e033c731140..9336adff9587 100644 --- a/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-master.yaml +++ b/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-master.yaml @@ -42,7 +42,7 @@ rules: resources: ["poddisruptionbudgets"] verbs: ["watch","list"] - apiGroups: ["apps"] - resources: ["statefulsets"] + resources: ["statefulsets", "replicasets"] verbs: ["watch","list","get"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] @@ -122,11 +122,13 @@ spec: serviceAccountName: cluster-autoscaler tolerations: - effect: NoSchedule + operator: "Equal" + value: "true" key: node-role.kubernetes.io/master nodeSelector: kubernetes.io/role: master containers: - - image: k8s.gcr.io/cluster-autoscaler:v1.2.2 + - image: k8s.gcr.io/cluster-autoscaler:v1.13.2 name: cluster-autoscaler resources: limits: @@ -142,9 +144,6 @@ spec: - --cloud-provider=aws - --skip-nodes-with-local-storage=false - --nodes={{ node_asg_min }}:{{ node_asg_max }}:{{ name }} - env: - - name: AWS_REGION - value: {{ region }} volumeMounts: - name: ssl-certs mountPath: /etc/ssl/certs/ca-certificates.crt diff --git a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go index b15ef43a6e23..b6327069f3a2 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_agent_pool.go @@ -20,14 +20,15 @@ import ( "fmt" "math/rand" "sort" + "strings" "sync" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources" azStorage "github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/go-autorest/autorest/to" - "github.com/golang/glog" + "k8s.io/klog" apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" @@ -75,7 +76,7 @@ func (as *AgentPool) initialize() error { template, err := as.manager.azClient.deploymentsClient.ExportTemplate(ctx, as.manager.config.ResourceGroup, as.manager.config.Deployment) if err != nil { - glog.Errorf("deploymentsClient.ExportTemplate(%s, %s) failed: %v", as.manager.config.ResourceGroup, as.manager.config.Deployment, err) + klog.Errorf("deploymentsClient.ExportTemplate(%s, %s) failed: %v", as.manager.config.ResourceGroup, as.manager.config.Deployment, err) return err } @@ -132,7 +133,7 @@ func (as *AgentPool) GetVMIndexes() ([]int, map[int]string, error) { } indexes = append(indexes, index) - indexToVM[index] = "azure://" + *instance.ID + indexToVM[index] = "azure://" + strings.ToLower(*instance.ID) } sortedIndexes := sort.IntSlice(indexes) @@ -148,12 +149,12 @@ func (as *AgentPool) getCurSize() (int64, error) { return as.curSize, nil } - glog.V(5).Infof("Get agent pool size for %q", as.Name) + klog.V(5).Infof("Get agent pool size for %q", as.Name) indexes, _, err := as.GetVMIndexes() if err != nil { return 0, err } - glog.V(5).Infof("Returning agent pool (%q) size: %d\n", as.Name, len(indexes)) + klog.V(5).Infof("Returning agent pool (%q) size: %d\n", as.Name, len(indexes)) as.curSize = int64(len(indexes)) as.lastRefresh = time.Now() @@ -209,15 +210,20 @@ func (as *AgentPool) IncreaseSize(delta int) error { } ctx, cancel := getContextWithCancel() defer cancel() - _, err = as.manager.azClient.deploymentsClient.CreateOrUpdate(ctx, as.manager.config.ResourceGroup, newDeploymentName, newDeployment) - glog.V(3).Infof("Waiting for deploymentsClient.CreateOrUpdate(%s, %s, %s)", as.manager.config.ResourceGroup, newDeploymentName, newDeployment) - if err != nil { - return err + klog.V(3).Infof("Waiting for deploymentsClient.CreateOrUpdate(%s, %s, %v)", as.manager.config.ResourceGroup, newDeploymentName, newDeployment) + resp, err := as.manager.azClient.deploymentsClient.CreateOrUpdate(ctx, as.manager.config.ResourceGroup, newDeploymentName, newDeployment) + isSuccess, realError := isSuccessHTTPResponse(resp, err) + if isSuccess { + klog.V(3).Infof("deploymentsClient.CreateOrUpdate(%s, %s, %v) success", as.manager.config.ResourceGroup, newDeploymentName, newDeployment) + + // Update cache after scale success. + as.curSize = int64(expectedSize) + as.lastRefresh = time.Now() + return nil } - as.curSize = int64(expectedSize) - as.lastRefresh = time.Now() - return err + klog.Errorf("deploymentsClient.CreateOrUpdate for deployment %q failed: %v", newDeploymentName, realError) + return realError } // GetVirtualMachines returns list of nodes for the given agent pool. @@ -237,7 +243,7 @@ func (as *AgentPool) GetVirtualMachines() (instances []compute.VirtualMachine, e tags := instance.Tags vmPoolName := tags["poolName"] - if vmPoolName == nil || *vmPoolName != as.Id() { + if vmPoolName == nil || !strings.EqualFold(*vmPoolName, as.Id()) { continue } @@ -274,7 +280,7 @@ func (as *AgentPool) DecreaseTargetSize(delta int) error { // Belongs returns true if the given node belongs to the NodeGroup. func (as *AgentPool) Belongs(node *apiv1.Node) (bool, error) { - glog.V(6).Infof("Check if node belongs to this agent pool: AgentPool:%v, node:%v\n", as, node) + klog.V(6).Infof("Check if node belongs to this agent pool: AgentPool:%v, node:%v\n", as, node) ref := &azureRef{ Name: node.Spec.ProviderID, @@ -287,7 +293,7 @@ func (as *AgentPool) Belongs(node *apiv1.Node) (bool, error) { if targetAsg == nil { return false, fmt.Errorf("%s doesn't belong to a known agent pool", node.Name) } - if targetAsg.Id() != as.Id() { + if !strings.EqualFold(targetAsg.Id(), as.Id()) { return false, nil } return true, nil @@ -310,7 +316,7 @@ func (as *AgentPool) DeleteInstances(instances []*azureRef) error { return err } - if asg != commonAsg { + if !strings.EqualFold(asg.Id(), commonAsg.Id()) { return fmt.Errorf("cannot delete instance (%s) which don't belong to the same node pool (%q)", instance.GetKey(), commonAsg) } } @@ -318,13 +324,13 @@ func (as *AgentPool) DeleteInstances(instances []*azureRef) error { for _, instance := range instances { name, err := resourceName((*instance).Name) if err != nil { - glog.Errorf("Get name for instance %q failed: %v", *instance, err) + klog.Errorf("Get name for instance %q failed: %v", *instance, err) return err } err = as.deleteVirtualMachine(name) if err != nil { - glog.Errorf("Delete virtual machine %q failed: %v", name, err) + klog.Errorf("Delete virtual machine %q failed: %v", name, err) return err } } @@ -334,7 +340,7 @@ func (as *AgentPool) DeleteInstances(instances []*azureRef) error { // DeleteNodes deletes the nodes from the group. func (as *AgentPool) DeleteNodes(nodes []*apiv1.Node) error { - glog.V(8).Infof("Delete nodes requested: %v\n", nodes) + klog.V(8).Infof("Delete nodes requested: %v\n", nodes) indexes, _, err := as.GetVMIndexes() if err != nil { return err @@ -380,21 +386,21 @@ func (as *AgentPool) TemplateNodeInfo() (*schedulercache.NodeInfo, error) { } // Nodes returns a list of all nodes that belong to this node group. -func (as *AgentPool) Nodes() ([]string, error) { +func (as *AgentPool) Nodes() ([]cloudprovider.Instance, error) { instances, err := as.GetVirtualMachines() if err != nil { return nil, err } - nodes := make([]string, 0, len(instances)) + nodes := make([]cloudprovider.Instance, 0, len(instances)) for _, instance := range instances { if len(*instance.ID) == 0 { continue } // To keep consistent with providerID from kubernetes cloud provider, do not convert ID to lower case. - name := "azure://" + *instance.ID - nodes = append(nodes, name) + name := "azure://" + strings.ToLower(*instance.ID) + nodes = append(nodes, cloudprovider.Instance{Id: name}) } return nodes, nil @@ -430,18 +436,18 @@ func (as *AgentPool) deleteVirtualMachine(name string) error { vm, err := as.manager.azClient.virtualMachinesClient.Get(ctx, as.manager.config.ResourceGroup, name, "") if err != nil { if exists, _ := checkResourceExistsFromError(err); !exists { - glog.V(2).Infof("VirtualMachine %s/%s has already been removed", as.manager.config.ResourceGroup, name) + klog.V(2).Infof("VirtualMachine %s/%s has already been removed", as.manager.config.ResourceGroup, name) return nil } - glog.Errorf("failed to get VM: %s/%s: %s", as.manager.config.ResourceGroup, name, err.Error()) + klog.Errorf("failed to get VM: %s/%s: %s", as.manager.config.ResourceGroup, name, err.Error()) return err } vhd := vm.VirtualMachineProperties.StorageProfile.OsDisk.Vhd managedDisk := vm.VirtualMachineProperties.StorageProfile.OsDisk.ManagedDisk if vhd == nil && managedDisk == nil { - glog.Errorf("failed to get a valid os disk URI for VM: %s/%s", as.manager.config.ResourceGroup, name) + klog.Errorf("failed to get a valid os disk URI for VM: %s/%s", as.manager.config.ResourceGroup, name) return fmt.Errorf("os disk does not have a VHD URI") } @@ -449,38 +455,38 @@ func (as *AgentPool) deleteVirtualMachine(name string) error { var nicName string nicID := (*vm.VirtualMachineProperties.NetworkProfile.NetworkInterfaces)[0].ID if nicID == nil { - glog.Warningf("NIC ID is not set for VM (%s/%s)", as.manager.config.ResourceGroup, name) + klog.Warningf("NIC ID is not set for VM (%s/%s)", as.manager.config.ResourceGroup, name) } else { nicName, err = resourceName(*nicID) if err != nil { return err } - glog.Infof("found nic name for VM (%s/%s): %s", as.manager.config.ResourceGroup, name, nicName) + klog.Infof("found nic name for VM (%s/%s): %s", as.manager.config.ResourceGroup, name, nicName) } - glog.Infof("deleting VM: %s/%s", as.manager.config.ResourceGroup, name) + klog.Infof("deleting VM: %s/%s", as.manager.config.ResourceGroup, name) deleteCtx, deleteCancel := getContextWithCancel() defer deleteCancel() - glog.Infof("waiting for VirtualMachine deletion: %s/%s", as.manager.config.ResourceGroup, name) + klog.Infof("waiting for VirtualMachine deletion: %s/%s", as.manager.config.ResourceGroup, name) _, err = as.manager.azClient.virtualMachinesClient.Delete(deleteCtx, as.manager.config.ResourceGroup, name) _, realErr := checkResourceExistsFromError(err) if realErr != nil { return realErr } - glog.V(2).Infof("VirtualMachine %s/%s removed", as.manager.config.ResourceGroup, name) + klog.V(2).Infof("VirtualMachine %s/%s removed", as.manager.config.ResourceGroup, name) if len(nicName) > 0 { - glog.Infof("deleting nic: %s/%s", as.manager.config.ResourceGroup, nicName) + klog.Infof("deleting nic: %s/%s", as.manager.config.ResourceGroup, nicName) interfaceCtx, interfaceCancel := getContextWithCancel() defer interfaceCancel() _, err = as.manager.azClient.interfacesClient.Delete(interfaceCtx, as.manager.config.ResourceGroup, nicName) - glog.Infof("waiting for nic deletion: %s/%s", as.manager.config.ResourceGroup, nicName) + klog.Infof("waiting for nic deletion: %s/%s", as.manager.config.ResourceGroup, nicName) _, realErr := checkResourceExistsFromError(err) if realErr != nil { return realErr } - glog.V(2).Infof("interface %s/%s removed", as.manager.config.ResourceGroup, nicName) + klog.V(2).Infof("interface %s/%s removed", as.manager.config.ResourceGroup, nicName) } if vhd != nil { @@ -489,21 +495,21 @@ func (as *AgentPool) deleteVirtualMachine(name string) error { return err } - glog.Infof("found os disk storage reference: %s %s %s", accountName, vhdContainer, vhdBlob) + klog.Infof("found os disk storage reference: %s %s %s", accountName, vhdContainer, vhdBlob) - glog.Infof("deleting blob: %s/%s", vhdContainer, vhdBlob) + klog.Infof("deleting blob: %s/%s", vhdContainer, vhdBlob) if err = as.deleteBlob(accountName, vhdContainer, vhdBlob); err != nil { _, realErr := checkResourceExistsFromError(err) if realErr != nil { return realErr } - glog.V(2).Infof("Blob %s/%s removed", as.manager.config.ResourceGroup, vhdBlob) + klog.V(2).Infof("Blob %s/%s removed", as.manager.config.ResourceGroup, vhdBlob) } } else if managedDisk != nil { if osDiskName == nil { - glog.Warningf("osDisk is not set for VM %s/%s", as.manager.config.ResourceGroup, name) + klog.Warningf("osDisk is not set for VM %s/%s", as.manager.config.ResourceGroup, name) } else { - glog.Infof("deleting managed disk: %s/%s", as.manager.config.ResourceGroup, *osDiskName) + klog.Infof("deleting managed disk: %s/%s", as.manager.config.ResourceGroup, *osDiskName) disksCtx, disksCancel := getContextWithCancel() defer disksCancel() _, err = as.manager.azClient.disksClient.Delete(disksCtx, as.manager.config.ResourceGroup, *osDiskName) @@ -511,7 +517,7 @@ func (as *AgentPool) deleteVirtualMachine(name string) error { if realErr != nil { return realErr } - glog.V(2).Infof("disk %s/%s removed", as.manager.config.ResourceGroup, *osDiskName) + klog.V(2).Infof("disk %s/%s removed", as.manager.config.ResourceGroup, *osDiskName) } } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_cache.go b/cluster-autoscaler/cloudprovider/azure/azure_cache.go index 6e8fabb8f26f..b0306d54f1f0 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_cache.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_cache.go @@ -20,15 +20,16 @@ import ( "fmt" "reflect" "regexp" + "strings" "sync" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/klog" ) -var virtualMachineRE = regexp.MustCompile(`^azure://(?:.*)/providers/Microsoft.Compute/virtualMachines/(.+)$`) +var virtualMachineRE = regexp.MustCompile(`^azure://(?:.*)/providers/microsoft.compute/virtualmachines/(.+)$`) type asgCache struct { registeredAsgs []cloudprovider.NodeGroup @@ -50,7 +51,7 @@ func newAsgCache() (*asgCache, error) { cache.mutex.Lock() defer cache.mutex.Unlock() if err := cache.regenerate(); err != nil { - glog.Errorf("Error while regenerating Asg cache: %v", err) + klog.Errorf("Error while regenerating Asg cache: %v", err) } }, time.Hour, cache.interrupt) @@ -63,26 +64,26 @@ func (m *asgCache) Register(asg cloudprovider.NodeGroup) bool { defer m.mutex.Unlock() for i := range m.registeredAsgs { - if existing := m.registeredAsgs[i]; existing.Id() == asg.Id() { + if existing := m.registeredAsgs[i]; strings.EqualFold(existing.Id(), asg.Id()) { if reflect.DeepEqual(existing, asg) { return false } m.registeredAsgs[i] = asg - glog.V(4).Infof("ASG %q updated", asg.Id()) + klog.V(4).Infof("ASG %q updated", asg.Id()) m.invalidateUnownedInstanceCache() return true } } - glog.V(4).Infof("Registering ASG %q", asg.Id()) + klog.V(4).Infof("Registering ASG %q", asg.Id()) m.registeredAsgs = append(m.registeredAsgs, asg) m.invalidateUnownedInstanceCache() return true } func (m *asgCache) invalidateUnownedInstanceCache() { - glog.V(4).Info("Invalidating unowned instance cache") + klog.V(4).Info("Invalidating unowned instance cache") m.notInRegisteredAsg = make(map[azureRef]bool) } @@ -94,8 +95,8 @@ func (m *asgCache) Unregister(asg cloudprovider.NodeGroup) bool { updated := make([]cloudprovider.NodeGroup, 0, len(m.registeredAsgs)) changed := false for _, existing := range m.registeredAsgs { - if existing.Id() == asg.Id() { - glog.V(1).Infof("Unregistered ASG %s", asg.Id()) + if strings.EqualFold(existing.Id(), asg.Id()) { + klog.V(1).Infof("Unregistered ASG %s", asg.Id()) changed = true continue } @@ -117,7 +118,8 @@ func (m *asgCache) FindForInstance(instance *azureRef, vmType string) (cloudprov m.mutex.Lock() defer m.mutex.Unlock() - if m.notInRegisteredAsg[*instance] { + inst := azureRef{Name: strings.ToLower(instance.Name)} + if m.notInRegisteredAsg[inst] { // We already know we don't own this instance. Return early and avoid // additional calls. return nil, nil @@ -125,34 +127,37 @@ func (m *asgCache) FindForInstance(instance *azureRef, vmType string) (cloudprov if vmType == vmTypeVMSS { // Omit virtual machines not managed by vmss. - if ok := virtualMachineRE.Match([]byte(instance.Name)); ok { - glog.V(3).Infof("Instance %q is not managed by vmss, omit it in autoscaler", instance.Name) - m.notInRegisteredAsg[*instance] = true + if ok := virtualMachineRE.Match([]byte(inst.Name)); ok { + klog.V(3).Infof("Instance %q is not managed by vmss, omit it in autoscaler", instance.Name) + m.notInRegisteredAsg[inst] = true return nil, nil } } if vmType == vmTypeStandard { // Omit virtual machines with providerID not in Azure resource ID format. - if ok := virtualMachineRE.Match([]byte(instance.Name)); !ok { - glog.V(3).Infof("Instance %q is not in Azure resource ID format, omit it in autoscaler", instance.Name) - m.notInRegisteredAsg[*instance] = true + if ok := virtualMachineRE.Match([]byte(inst.Name)); !ok { + klog.V(3).Infof("Instance %q is not in Azure resource ID format, omit it in autoscaler", instance.Name) + m.notInRegisteredAsg[inst] = true return nil, nil } } - if asg, found := m.instanceToAsg[*instance]; found { + // Look up caches for the instance. + if asg := m.getInstanceFromCache(inst.Name); asg != nil { return asg, nil } + // Not found, regenerate the cache and try again. if err := m.regenerate(); err != nil { - return nil, fmt.Errorf("Error while looking for ASG for instance %+v, error: %v", *instance, err) + return nil, fmt.Errorf("error while looking for ASG for instance %q, error: %v", instance.Name, err) } - if config, found := m.instanceToAsg[*instance]; found { - return config, nil + if asg := m.getInstanceFromCache(inst.Name); asg != nil { + return asg, nil } - m.notInRegisteredAsg[*instance] = true + // Add the instance to notInRegisteredAsg since it's unknown from Azure. + m.notInRegisteredAsg[inst] = true return nil, nil } @@ -171,7 +176,7 @@ func (m *asgCache) regenerate() error { } for _, instance := range instances { - ref := azureRef{Name: instance} + ref := azureRef{Name: instance.Id} newCache[ref] = nsg } } @@ -179,3 +184,15 @@ func (m *asgCache) regenerate() error { m.instanceToAsg = newCache return nil } + +// Get node group from cache. nil would be return if not found. +// Should be call with lock protected. +func (m *asgCache) getInstanceFromCache(providerID string) cloudprovider.NodeGroup { + for instanceID, asg := range m.instanceToAsg { + if strings.EqualFold(instanceID.GetKey(), providerID) { + return asg + } + } + + return nil +} diff --git a/cluster-autoscaler/cloudprovider/azure/azure_client.go b/cluster-autoscaler/cloudprovider/azure/azure_client.go index e36ec82c63fd..d97699671915 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_client.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_client.go @@ -23,15 +23,15 @@ import ( "net/http" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" - "github.com/golang/glog" + "k8s.io/klog" ) // VirtualMachineScaleSetsClient defines needed functions for azure compute.VirtualMachineScaleSetsClient. @@ -95,9 +95,9 @@ func newAzVirtualMachineScaleSetsClient(subscriptionID, endpoint string, service } func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) (resp *http.Response, err error) { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, VMScaleSetName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, VMScaleSetName) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, VMScaleSetName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, VMScaleSetName) }() future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, VMScaleSetName, parameters) @@ -110,18 +110,18 @@ func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, r } func (az *azVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): start", resourceGroupName, VMScaleSetName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): start", resourceGroupName, VMScaleSetName) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName) }() return az.client.Get(ctx, resourceGroupName, VMScaleSetName) } func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachineScaleSet, err error) { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): start", resourceGroupName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): start", resourceGroupName) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): end", resourceGroupName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): end", resourceGroupName) }() iterator, err := az.client.ListComplete(ctx, resourceGroupName) @@ -142,9 +142,9 @@ func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGro } func (az *azVirtualMachineScaleSetsClient) DeleteInstances(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (resp *http.Response, err error) { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.DeleteInstances(%q,%q,%q): start", resourceGroupName, vmScaleSetName, vmInstanceIDs) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.DeleteInstances(%q,%q,%v): start", resourceGroupName, vmScaleSetName, vmInstanceIDs) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.DeleteInstances(%q,%q,%q): end", resourceGroupName, vmScaleSetName, vmInstanceIDs) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.DeleteInstances(%q,%q,%v): end", resourceGroupName, vmScaleSetName, vmInstanceIDs) }() future, err := az.client.DeleteInstances(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs) @@ -174,18 +174,18 @@ func newAzVirtualMachineScaleSetVMsClient(subscriptionID, endpoint string, servi } func (az *azVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) { - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() return az.client.Get(ctx, resourceGroupName, VMScaleSetName, instanceID) } func (az *azVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result []compute.VirtualMachineScaleSetVM, err error) { - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, filter) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, filter) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter) }() iterator, err := az.client.ListComplete(ctx, resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) @@ -223,18 +223,18 @@ func newAzVirtualMachinesClient(subscriptionID, endpoint string, servicePrincipa } func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) { - glog.V(10).Infof("azVirtualMachinesClient.Get(%q,%q,%q): start", resourceGroupName, VMName, expand) + klog.V(10).Infof("azVirtualMachinesClient.Get(%q,%q,%q): start", resourceGroupName, VMName, expand) defer func() { - glog.V(10).Infof("azVirtualMachinesClient.Get(%q,%q,%q): end", resourceGroupName, VMName, expand) + klog.V(10).Infof("azVirtualMachinesClient.Get(%q,%q,%q): end", resourceGroupName, VMName, expand) }() return az.client.Get(ctx, resourceGroupName, VMName, expand) } func (az *azVirtualMachinesClient) Delete(ctx context.Context, resourceGroupName string, VMName string) (resp *http.Response, err error) { - glog.V(10).Infof("azVirtualMachinesClient.Delete(%q,%q): start", resourceGroupName, VMName) + klog.V(10).Infof("azVirtualMachinesClient.Delete(%q,%q): start", resourceGroupName, VMName) defer func() { - glog.V(10).Infof("azVirtualMachinesClient.Delete(%q,%q): end", resourceGroupName, VMName) + klog.V(10).Infof("azVirtualMachinesClient.Delete(%q,%q): end", resourceGroupName, VMName) }() future, err := az.client.Delete(ctx, resourceGroupName, VMName) @@ -247,9 +247,9 @@ func (az *azVirtualMachinesClient) Delete(ctx context.Context, resourceGroupName } func (az *azVirtualMachinesClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachine, err error) { - glog.V(10).Infof("azVirtualMachinesClient.List(%q): start", resourceGroupName) + klog.V(10).Infof("azVirtualMachinesClient.List(%q): start", resourceGroupName) defer func() { - glog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName) + klog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName) }() iterator, err := az.client.ListComplete(ctx, resourceGroupName) @@ -286,9 +286,9 @@ func newAzInterfacesClient(subscriptionID, endpoint string, servicePrincipalToke } func (az *azInterfacesClient) Delete(ctx context.Context, resourceGroupName string, networkInterfaceName string) (resp *http.Response, err error) { - glog.V(10).Infof("azInterfacesClient.Delete(%q,%q): start", resourceGroupName, networkInterfaceName) + klog.V(10).Infof("azInterfacesClient.Delete(%q,%q): start", resourceGroupName, networkInterfaceName) defer func() { - glog.V(10).Infof("azInterfacesClient.Delete(%q,%q): end", resourceGroupName, networkInterfaceName) + klog.V(10).Infof("azInterfacesClient.Delete(%q,%q): end", resourceGroupName, networkInterfaceName) }() future, err := az.client.Delete(ctx, resourceGroupName, networkInterfaceName) @@ -317,27 +317,27 @@ func newAzDeploymentsClient(subscriptionID, endpoint string, servicePrincipalTok } func (az *azDeploymentsClient) Get(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExtended, err error) { - glog.V(10).Infof("azDeploymentsClient.Get(%q,%q): start", resourceGroupName, deploymentName) + klog.V(10).Infof("azDeploymentsClient.Get(%q,%q): start", resourceGroupName, deploymentName) defer func() { - glog.V(10).Infof("azDeploymentsClient.Get(%q,%q): end", resourceGroupName, deploymentName) + klog.V(10).Infof("azDeploymentsClient.Get(%q,%q): end", resourceGroupName, deploymentName) }() return az.client.Get(ctx, resourceGroupName, deploymentName) } func (az *azDeploymentsClient) ExportTemplate(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExportResult, err error) { - glog.V(10).Infof("azDeploymentsClient.ExportTemplate(%q,%q): start", resourceGroupName, deploymentName) + klog.V(10).Infof("azDeploymentsClient.ExportTemplate(%q,%q): start", resourceGroupName, deploymentName) defer func() { - glog.V(10).Infof("azDeploymentsClient.ExportTemplate(%q,%q): end", resourceGroupName, deploymentName) + klog.V(10).Infof("azDeploymentsClient.ExportTemplate(%q,%q): end", resourceGroupName, deploymentName) }() return az.client.ExportTemplate(ctx, resourceGroupName, deploymentName) } func (az *azDeploymentsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment) (resp *http.Response, err error) { - glog.V(10).Infof("azDeploymentsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, deploymentName) + klog.V(10).Infof("azDeploymentsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, deploymentName) defer func() { - glog.V(10).Infof("azDeploymentsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, deploymentName) + klog.V(10).Infof("azDeploymentsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, deploymentName) }() future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, deploymentName, parameters) @@ -366,9 +366,9 @@ func newAzDisksClient(subscriptionID, endpoint string, servicePrincipalToken *ad } func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, diskName string) (resp *http.Response, err error) { - glog.V(10).Infof("azDisksClient.Delete(%q,%q): start", resourceGroupName, diskName) + klog.V(10).Infof("azDisksClient.Delete(%q,%q): start", resourceGroupName, diskName) defer func() { - glog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName) + klog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName) }() future, err := az.client.Delete(ctx, resourceGroupName, diskName) @@ -397,9 +397,9 @@ func newAzAccountsClient(subscriptionID, endpoint string, servicePrincipalToken } func (az *azAccountsClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) { - glog.V(10).Infof("azAccountsClient.ListKeys(%q,%q): start", resourceGroupName, accountName) + klog.V(10).Infof("azAccountsClient.ListKeys(%q,%q): start", resourceGroupName, accountName) defer func() { - glog.V(10).Infof("azAccountsClient.ListKeys(%q,%q): end", resourceGroupName, accountName) + klog.V(10).Infof("azAccountsClient.ListKeys(%q,%q): end", resourceGroupName, accountName) }() return az.client.ListKeys(ctx, resourceGroupName, accountName) @@ -426,7 +426,7 @@ func newServicePrincipalTokenFromCredentials(config *Config, env *azure.Environm } if config.UseManagedIdentityExtension { - glog.V(2).Infoln("azure: using managed identity extension to retrieve access token") + klog.V(2).Infoln("azure: using managed identity extension to retrieve access token") msiEndpoint, err := adal.GetMSIVMEndpoint() if err != nil { return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err) @@ -437,7 +437,7 @@ func newServicePrincipalTokenFromCredentials(config *Config, env *azure.Environm } if len(config.AADClientSecret) > 0 { - glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token") + klog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token") return adal.NewServicePrincipalToken( *oauthConfig, config.AADClientID, @@ -446,7 +446,7 @@ func newServicePrincipalTokenFromCredentials(config *Config, env *azure.Environm } if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { - glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token") + klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token") certData, err := ioutil.ReadFile(config.AADClientCertPath) if err != nil { return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err) @@ -473,39 +473,39 @@ func newAzClient(cfg *Config, env *azure.Environment) (*azClient, error) { } scaleSetsClient := newAzVirtualMachineScaleSetsClient(cfg.SubscriptionID, env.ResourceManagerEndpoint, spt) - glog.V(5).Infof("Created scale set client with authorizer: %v", scaleSetsClient) + klog.V(5).Infof("Created scale set client with authorizer: %v", scaleSetsClient) scaleSetVMsClient := newAzVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, env.ResourceManagerEndpoint, spt) - glog.V(5).Infof("Created scale set vm client with authorizer: %v", scaleSetVMsClient) + klog.V(5).Infof("Created scale set vm client with authorizer: %v", scaleSetVMsClient) virtualMachinesClient := newAzVirtualMachinesClient(cfg.SubscriptionID, env.ResourceManagerEndpoint, spt) - glog.V(5).Infof("Created vm client with authorizer: %v", virtualMachinesClient) + klog.V(5).Infof("Created vm client with authorizer: %v", virtualMachinesClient) deploymentsClient := newAzDeploymentsClient(cfg.SubscriptionID, env.ResourceManagerEndpoint, spt) - glog.V(5).Infof("Created deployments client with authorizer: %v", deploymentsClient) + klog.V(5).Infof("Created deployments client with authorizer: %v", deploymentsClient) interfacesClient := newAzInterfacesClient(cfg.SubscriptionID, env.ResourceManagerEndpoint, spt) - glog.V(5).Infof("Created interfaces client with authorizer: %v", interfacesClient) + klog.V(5).Infof("Created interfaces client with authorizer: %v", interfacesClient) storageAccountsClient := newAzAccountsClient(cfg.SubscriptionID, env.ResourceManagerEndpoint, spt) - glog.V(5).Infof("Created storage accounts client with authorizer: %v", storageAccountsClient) + klog.V(5).Infof("Created storage accounts client with authorizer: %v", storageAccountsClient) disksClient := newAzDisksClient(cfg.SubscriptionID, env.ResourceManagerEndpoint, spt) - glog.V(5).Infof("Created disks client with authorizer: %v", disksClient) + klog.V(5).Infof("Created disks client with authorizer: %v", disksClient) containerServicesClient := containerservice.NewContainerServicesClient(cfg.SubscriptionID) containerServicesClient.BaseURI = env.ResourceManagerEndpoint containerServicesClient.Authorizer = autorest.NewBearerAuthorizer(spt) containerServicesClient.PollingDelay = 5 * time.Second containerServicesClient.Sender = autorest.CreateSender() - glog.V(5).Infof("Created Container services client with authorizer: %v", containerServicesClient) + klog.V(5).Infof("Created Container services client with authorizer: %v", containerServicesClient) managedContainerServicesClient := containerservice.NewManagedClustersClient(cfg.SubscriptionID) managedContainerServicesClient.BaseURI = env.ResourceManagerEndpoint managedContainerServicesClient.Authorizer = autorest.NewBearerAuthorizer(spt) managedContainerServicesClient.PollingDelay = 5 * time.Second managedContainerServicesClient.Sender = autorest.CreateSender() - glog.V(5).Infof("Created Managed Container services client with authorizer: %v", managedContainerServicesClient) + klog.V(5).Infof("Created Managed Container services client with authorizer: %v", managedContainerServicesClient) return &azClient{ disksClient: disksClient, diff --git a/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider.go b/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider.go index 70365f15f36d..15dd6aa57357 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider.go @@ -17,11 +17,16 @@ limitations under the License. package azure import ( - "github.com/golang/glog" + "io" + "os" + "strings" + + "k8s.io/klog" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" ) @@ -70,7 +75,7 @@ func (azure *AzureCloudProvider) NodeGroups() []cloudprovider.NodeGroup { // NodeGroupForNode returns the node group for the given node. func (azure *AzureCloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudprovider.NodeGroup, error) { - glog.V(6).Infof("Searching for node group for the node: %s\n", node.Spec.ProviderID) + klog.V(6).Infof("Searching for node group for the node: %s\n", node.Spec.ProviderID) ref := &azureRef{ Name: node.Spec.ProviderID, } @@ -106,6 +111,11 @@ func (azure *AzureCloudProvider) Refresh() error { return azure.azureManager.Refresh() } +// GetInstanceID gets the instance ID for the specified node. +func (azure *AzureCloudProvider) GetInstanceID(node *apiv1.Node) string { + return strings.ToLower(node.Spec.ProviderID) +} + // azureRef contains a reference to some entity in Azure world. type azureRef struct { Name string @@ -115,3 +125,28 @@ type azureRef struct { func (m *azureRef) GetKey() string { return m.Name } + +// BuildAzure builds Azure cloud provider, manager etc. +func BuildAzure(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + var config io.ReadCloser + if opts.CloudConfig != "" { + klog.Infof("Creating Azure Manager using cloud-config file: %v", opts.CloudConfig) + var err error + config, err := os.Open(opts.CloudConfig) + if err != nil { + klog.Fatalf("Couldn't open cloud provider configuration %s: %#v", opts.CloudConfig, err) + } + defer config.Close() + } else { + klog.Info("Creating Azure Manager with default configuration.") + } + manager, err := CreateAzureManager(config, do) + if err != nil { + klog.Fatalf("Failed to create Azure Manager: %v", err) + } + provider, err := BuildAzureCloudProvider(manager, rl) + if err != nil { + klog.Fatalf("Failed to create Azure cloud provider: %v", err) + } + return provider +} diff --git a/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider_test.go b/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider_test.go index 54b4c4358095..e6a6a9bb6520 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_cloud_provider_test.go @@ -19,7 +19,7 @@ package azure import ( "testing" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources" "github.com/Azure/go-autorest/autorest/azure" "github.com/stretchr/testify/assert" diff --git a/cluster-autoscaler/cloudprovider/azure/azure_container_service_pool.go b/cluster-autoscaler/cloudprovider/azure/azure_container_service_pool.go index 52f1909251da..43329e47cf86 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_container_service_pool.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_container_service_pool.go @@ -21,7 +21,7 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice" - "github.com/golang/glog" + "k8s.io/klog" apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" @@ -77,8 +77,8 @@ func NewContainerServiceAgentPool(spec *dynamic.NodeGroupSpec, am *AzureManager) func (agentPool *ContainerServiceAgentPool) GetAKSAgentPool(agentProfiles *[]containerservice.ManagedClusterAgentPoolProfile) (ret *containerservice.ManagedClusterAgentPoolProfile) { for _, value := range *agentProfiles { profileName := *value.Name - glog.V(5).Infof("AKS AgentPool profile name: %s", profileName) - if profileName == (agentPool.azureRef.Name) { + klog.V(5).Infof("AKS AgentPool profile name: %s", profileName) + if strings.EqualFold(profileName, agentPool.azureRef.Name) { return &value } } @@ -91,8 +91,8 @@ func (agentPool *ContainerServiceAgentPool) GetAKSAgentPool(agentProfiles *[]con func (agentPool *ContainerServiceAgentPool) GetACSAgentPool(agentProfiles *[]containerservice.AgentPoolProfile) (ret *containerservice.AgentPoolProfile) { for _, value := range *agentProfiles { profileName := *value.Name - glog.V(5).Infof("ACS AgentPool profile name: %s", profileName) - if profileName == (agentPool.azureRef.Name) { + klog.V(5).Infof("ACS AgentPool profile name: %s", profileName) + if strings.EqualFold(profileName, agentPool.azureRef.Name) { return &value } } @@ -104,8 +104,8 @@ func (agentPool *ContainerServiceAgentPool) GetACSAgentPool(agentProfiles *[]con for _, value := range *agentProfiles { profileName := *value.Name poolName := agentPool.azureRef.Name + "pool0" - glog.V(5).Infof("Workaround match check - ACS AgentPool Profile: %s <=> Poolname: %s", profileName, poolName) - if profileName == poolName { + klog.V(5).Infof("Workaround match check - ACS AgentPool Profile: %s <=> Poolname: %s", profileName, poolName) + if strings.EqualFold(profileName, poolName) { return &value } } @@ -122,7 +122,7 @@ func (agentPool *ContainerServiceAgentPool) getAKSNodeCount() (count int, err er agentPool.resourceGroup, agentPool.clusterName) if err != nil { - glog.Error("Failed to get AKS cluster (name:%q): %v", agentPool.clusterName, err) + klog.Errorf("Failed to get AKS cluster (name:%q): %v", agentPool.clusterName, err) return -1, err } @@ -147,7 +147,7 @@ func (agentPool *ContainerServiceAgentPool) getACSNodeCount() (count int, err er agentPool.resourceGroup, agentPool.clusterName) if err != nil { - glog.Error("Failed to get ACS cluster (name:%q): %v", agentPool.clusterName, err) + klog.Errorf("Failed to get ACS cluster (name:%q): %v", agentPool.clusterName, err) return -1, err } @@ -172,7 +172,7 @@ func (agentPool *ContainerServiceAgentPool) setAKSNodeCount(count int) error { agentPool.resourceGroup, agentPool.clusterName) if err != nil { - glog.Error("Failed to get AKS cluster (name:%q): %v", agentPool.clusterName, err) + klog.Errorf("Failed to get AKS cluster (name:%q): %v", agentPool.clusterName, err) return err } @@ -181,7 +181,7 @@ func (agentPool *ContainerServiceAgentPool) setAKSNodeCount(count int) error { return fmt.Errorf("could not find pool with name: %s", agentPool.azureRef) } - glog.Infof("Current size: %d, Target size requested: %d", *pool.Count, count) + klog.Infof("Current size: %d, Target size requested: %d", *pool.Count, count) updateCtx, updateCancel := getContextWithCancel() defer updateCancel() @@ -190,10 +190,19 @@ func (agentPool *ContainerServiceAgentPool) setAKSNodeCount(count int) error { future, err := aksClient.CreateOrUpdate(updateCtx, agentPool.resourceGroup, agentPool.clusterName, managedCluster) if err != nil { - glog.Error("Failed to update AKS cluster (%q): %v", agentPool.clusterName, err) + klog.Errorf("Failed to update AKS cluster (%q): %v", agentPool.clusterName, err) return err } - return future.WaitForCompletion(updateCtx, aksClient.Client) + + err = future.WaitForCompletionRef(updateCtx, aksClient.Client) + isSuccess, realError := isSuccessHTTPResponse(future.Response(), err) + if isSuccess { + klog.V(3).Infof("aksClient.CreateOrUpdate for aks cluster %q success", agentPool.clusterName) + return nil + } + + klog.Errorf("aksClient.CreateOrUpdate for aks cluster %q failed: %v", agentPool.clusterName, realError) + return realError } // setACSNodeCount sets node count for ACS agent pool. @@ -205,7 +214,7 @@ func (agentPool *ContainerServiceAgentPool) setACSNodeCount(count int) error { agentPool.resourceGroup, agentPool.clusterName) if err != nil { - glog.Error("Failed to get ACS cluster (name:%q): %v", agentPool.clusterName, err) + klog.Errorf("Failed to get ACS cluster (name:%q): %v", agentPool.clusterName, err) return err } @@ -214,7 +223,7 @@ func (agentPool *ContainerServiceAgentPool) setACSNodeCount(count int) error { return fmt.Errorf("could not find pool with name: %s", agentPool.azureRef) } - glog.Infof("Current size: %d, Target size requested: %d", *pool.Count, count) + klog.Infof("Current size: %d, Target size requested: %d", *pool.Count, count) updateCtx, updateCancel := getContextWithCancel() defer updateCancel() @@ -223,10 +232,19 @@ func (agentPool *ContainerServiceAgentPool) setACSNodeCount(count int) error { future, err := acsClient.CreateOrUpdate(updateCtx, agentPool.resourceGroup, agentPool.clusterName, acsCluster) if err != nil { - glog.Error("Failed to update ACS cluster (%q): %v", agentPool.clusterName, err) + klog.Errorf("Failed to update ACS cluster (%q): %v", agentPool.clusterName, err) return err } - return future.WaitForCompletion(updateCtx, acsClient.Client) + + err = future.WaitForCompletionRef(updateCtx, acsClient.Client) + isSuccess, realError := isSuccessHTTPResponse(future.Response(), err) + if isSuccess { + klog.V(3).Infof("acsClient.CreateOrUpdate for acs cluster %q success", agentPool.clusterName) + return nil + } + + klog.Errorf("acsClient.CreateOrUpdate for acs cluster %q failed: %v", agentPool.clusterName, realError) + return realError } //GetNodeCount returns the count of nodes from the managed agent pool profile @@ -252,7 +270,7 @@ func (agentPool *ContainerServiceAgentPool) SetNodeCount(count int) (err error) func (agentPool *ContainerServiceAgentPool) GetProviderID(name string) string { //TODO: come with a generic way to make it work with provider id formats // in different version of k8s. - return "azure://" + name + return "azure://" + strings.ToLower(name) } //GetName extracts the name of the node (a format which underlying cloud service understands) @@ -267,7 +285,7 @@ func (agentPool *ContainerServiceAgentPool) GetName(providerID string) (string, return "", err } for _, vm := range vms { - if strings.Compare(*vm.ID, providerID) == 0 { + if strings.EqualFold(*vm.ID, providerID) { return *vm.Name, nil } } @@ -297,11 +315,11 @@ func (agentPool *ContainerServiceAgentPool) TargetSize() (int, error) { //a delete is performed from a scale down. func (agentPool *ContainerServiceAgentPool) SetSize(targetSize int) error { if targetSize > agentPool.MaxSize() || targetSize < agentPool.MinSize() { - glog.Errorf("Target size %d requested outside Max: %d, Min: %d", targetSize, agentPool.MaxSize(), agentPool.MaxSize) + klog.Errorf("Target size %d requested outside Max: %d, Min: %d", targetSize, agentPool.MaxSize(), agentPool.MaxSize()) return fmt.Errorf("Target size %d requested outside Max: %d, Min: %d", targetSize, agentPool.MaxSize(), agentPool.MinSize()) } - glog.V(2).Infof("Setting size for cluster (%q) with new count (%d)", agentPool.clusterName, targetSize) + klog.V(2).Infof("Setting size for cluster (%q) with new count (%d)", agentPool.clusterName, targetSize) if agentPool.serviceType == vmTypeAKS { return agentPool.setAKSNodeCount(targetSize) } @@ -339,16 +357,16 @@ func (agentPool *ContainerServiceAgentPool) DeleteNodesInternal(providerIDs []st targetSize := currentSize for _, providerID := range providerIDs { - glog.Infof("ProviderID got to delete: %s", providerID) + klog.Infof("ProviderID got to delete: %s", providerID) nodeName, err := agentPool.GetName(providerID) if err != nil { return err } - glog.Infof("VM name got to delete: %s", nodeName) + klog.Infof("VM name got to delete: %s", nodeName) err = agentPool.util.DeleteVirtualMachine(agentPool.nodeResourceGroup, nodeName) if err != nil { - glog.Error(err) + klog.Error(err) return err } targetSize-- @@ -366,11 +384,11 @@ func (agentPool *ContainerServiceAgentPool) DeleteNodesInternal(providerIDs []st func (agentPool *ContainerServiceAgentPool) DeleteNodes(nodes []*apiv1.Node) error { var providerIDs []string for _, node := range nodes { - glog.Infof("Node: %s", node.Spec.ProviderID) + klog.Infof("Node: %s", node.Spec.ProviderID) providerIDs = append(providerIDs, node.Spec.ProviderID) } for _, p := range providerIDs { - glog.Infof("ProviderID before calling acsmgr: %s", p) + klog.Infof("ProviderID before calling acsmgr: %s", p) } return agentPool.DeleteNodesInternal(providerIDs) } @@ -379,8 +397,8 @@ func (agentPool *ContainerServiceAgentPool) DeleteNodes(nodes []*apiv1.Node) err func (agentPool *ContainerServiceAgentPool) IsContainerServiceNode(tags map[string]*string) bool { poolName := tags["poolName"] if poolName != nil { - glog.V(5).Infof("Matching agentPool name: %s with tag name: %s", agentPool.azureRef.Name, *poolName) - if *poolName == agentPool.azureRef.Name { + klog.V(5).Infof("Matching agentPool name: %s with tag name: %s", agentPool.azureRef.Name, *poolName) + if strings.EqualFold(*poolName, agentPool.azureRef.Name) { return true } } @@ -394,15 +412,15 @@ func (agentPool *ContainerServiceAgentPool) GetNodes() ([]string, error) { defer cancel() vmList, err := agentPool.manager.azClient.virtualMachinesClient.List(ctx, agentPool.nodeResourceGroup) if err != nil { - glog.Error("Error", err) + klog.Errorf("Azure client list vm error : %v", err) return nil, err } var nodeArray []string for _, node := range vmList { - glog.V(5).Infof("Node Name: %s, ID: %s", *node.Name, *node.ID) + klog.V(5).Infof("Node Name: %s, ID: %s", *node.Name, *node.ID) if agentPool.IsContainerServiceNode(node.Tags) { providerID := agentPool.GetProviderID(*node.ID) - glog.V(5).Infof("Returning back the providerID: %s", providerID) + klog.V(5).Infof("Returning back the providerID: %s", providerID) nodeArray = append(nodeArray, providerID) } } @@ -412,18 +430,18 @@ func (agentPool *ContainerServiceAgentPool) GetNodes() ([]string, error) { //DecreaseTargetSize requests the underlying service to decrease the node count. func (agentPool *ContainerServiceAgentPool) DecreaseTargetSize(delta int) error { if delta >= 0 { - glog.Errorf("Size decrease error: %d", delta) + klog.Errorf("Size decrease error: %d", delta) return fmt.Errorf("Size decrease must be negative") } currentSize, err := agentPool.TargetSize() if err != nil { - glog.Error(err) + klog.Error(err) return err } // Get the current nodes in the list nodes, err := agentPool.GetNodes() if err != nil { - glog.Error(err) + klog.Error(err) return err } targetSize := int(currentSize) + delta @@ -445,8 +463,16 @@ func (agentPool *ContainerServiceAgentPool) Debug() string { } //Nodes returns the list of nodes in the agentPool. -func (agentPool *ContainerServiceAgentPool) Nodes() ([]string, error) { - return agentPool.GetNodes() +func (agentPool *ContainerServiceAgentPool) Nodes() ([]cloudprovider.Instance, error) { + instanceNames, err := agentPool.GetNodes() + if err != nil { + return nil, err + } + instances := make([]cloudprovider.Instance, 0, len(instanceNames)) + for _, instanceName := range instanceNames { + instances = append(instances, cloudprovider.Instance{Id: instanceName}) + } + return instances, nil } //TemplateNodeInfo is not implemented. diff --git a/cluster-autoscaler/cloudprovider/azure/azure_fakes.go b/cluster-autoscaler/cloudprovider/azure/azure_fakes.go index 147e804ca363..395f01a4c0b4 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_fakes.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_fakes.go @@ -22,15 +22,15 @@ import ( "net/http" "sync" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" "github.com/Azure/go-autorest/autorest" "github.com/stretchr/testify/mock" ) const ( - fakeVirtualMachineScaleSetVMID = "/subscriptions/test-subscription-id/resourceGroups/test-asg/providers/Microsoft.Compute/virtualMachineScaleSets/agents/virtualMachines/0" + fakeVirtualMachineScaleSetVMID = "/subscriptions/test-subscription-id/resourcegroups/test-asg/providers/microsoft.compute/virtualmachinescalesets/agents/virtualmachines/0" ) // VirtualMachineScaleSetsClientMock mocks for VirtualMachineScaleSetsClient. @@ -63,7 +63,9 @@ func (client *VirtualMachineScaleSetsClientMock) CreateOrUpdate(ctx context.Cont } client.FakeStore[resourceGroupName][VMScaleSetName] = parameters - return nil, nil + return &http.Response{ + StatusCode: http.StatusOK, + }, nil } // DeleteInstances deletes a set of instances for specified VirtualMachineScaleSet. diff --git a/cluster-autoscaler/cloudprovider/azure/azure_manager.go b/cluster-autoscaler/cloudprovider/azure/azure_manager.go index 98a326ceaa70..a113ed99c295 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_manager.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_manager.go @@ -25,10 +25,10 @@ import ( "time" "github.com/Azure/go-autorest/autorest/azure" - "github.com/golang/glog" "gopkg.in/gcfg.v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" + "k8s.io/klog" ) const ( @@ -104,7 +104,7 @@ func CreateAzureManager(configReader io.Reader, discoveryOpts cloudprovider.Node if configReader != nil { if err := gcfg.ReadInto(&cfg, configReader); err != nil { - glog.Errorf("Couldn't read config: %v", err) + klog.Errorf("Couldn't read config: %v", err) return nil, err } } else { @@ -140,7 +140,7 @@ func CreateAzureManager(configReader io.Reader, discoveryOpts cloudprovider.Node if cfg.VMType == vmTypeStandard && len(cfg.DeploymentParameters) == 0 { parameters, err := readDeploymentParameters(deploymentParametersPath) if err != nil { - glog.Errorf("readDeploymentParameters failed with error: %v", err) + klog.Errorf("readDeploymentParameters failed with error: %v", err) return nil, err } @@ -160,7 +160,7 @@ func CreateAzureManager(configReader io.Reader, discoveryOpts cloudprovider.Node return nil, err } - glog.Infof("Starting azure manager with subscription ID %q", cfg.SubscriptionID) + klog.Infof("Starting azure manager with subscription ID %q", cfg.SubscriptionID) azClient, err := newAzClient(&cfg, &env) if err != nil { @@ -254,11 +254,11 @@ func (m *AzureManager) Refresh() error { func (m *AzureManager) forceRefresh() error { if err := m.fetchAutoAsgs(); err != nil { - glog.Errorf("Failed to fetch ASGs: %v", err) + klog.Errorf("Failed to fetch ASGs: %v", err) return err } m.lastRefresh = time.Now() - glog.V(2).Infof("Refreshed ASG list, next refresh after %v", m.lastRefresh.Add(refreshInterval)) + klog.V(2).Infof("Refreshed ASG list, next refresh after %v", m.lastRefresh.Add(refreshInterval)) return nil } @@ -279,11 +279,11 @@ func (m *AzureManager) fetchAutoAsgs() error { // This ASG was explicitly configured, but would also be // autodiscovered. We want the explicitly configured min and max // nodes to take precedence. - glog.V(3).Infof("Ignoring explicitly configured ASG %s for autodiscovery.", asg.Id()) + klog.V(3).Infof("Ignoring explicitly configured ASG %s for autodiscovery.", asg.Id()) continue } if m.RegisterAsg(asg) { - glog.V(3).Infof("Autodiscovered ASG %s using tags %v", asg.Id(), m.asgAutoDiscoverySpecs) + klog.V(3).Infof("Autodiscovered ASG %s using tags %v", asg.Id(), m.asgAutoDiscoverySpecs) changed = true } } @@ -365,7 +365,7 @@ func (m *AzureManager) listScaleSets(filter []cloudprovider.LabelAutoDiscoveryCo result, err := m.azClient.virtualMachineScaleSetsClient.List(ctx, m.config.ResourceGroup) if err != nil { - glog.Errorf("VirtualMachineScaleSetsClient.List for %v failed: %v", m.config.ResourceGroup, err) + klog.Errorf("VirtualMachineScaleSetsClient.List for %v failed: %v", m.config.ResourceGroup, err) return nil, err } @@ -400,7 +400,7 @@ func (m *AzureManager) listAgentPools(filter []cloudprovider.LabelAutoDiscoveryC defer cancel() deploy, err := m.azClient.deploymentsClient.Get(ctx, m.config.ResourceGroup, m.config.Deployment) if err != nil { - glog.Errorf("deploymentsClient.Get(%s, %s) failed: %v", m.config.ResourceGroup, m.config.Deployment, err) + klog.Errorf("deploymentsClient.Get(%s, %s) failed: %v", m.config.ResourceGroup, m.config.Deployment, err) return nil, err } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go b/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go index 5d36d8a33194..956995d57617 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_scale_set.go @@ -19,20 +19,21 @@ package azure import ( "fmt" "math/rand" + "strings" "sync" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" - "github.com/golang/glog" - apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" + "k8s.io/klog" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" ) // ScaleSet implements NodeGroup interface. @@ -116,12 +117,12 @@ func (scaleSet *ScaleSet) getCurSize() (int64, error) { return scaleSet.curSize, nil } - glog.V(5).Infof("Get scale set size for %q", scaleSet.Name) + klog.V(5).Infof("Get scale set size for %q", scaleSet.Name) set, err := scaleSet.getVMSSInfo() if err != nil { return -1, err } - glog.V(5).Infof("Getting scale set (%q) capacity: %d\n", scaleSet.Name, *set.Sku.Capacity) + klog.V(5).Infof("Getting scale set (%q) capacity: %d\n", scaleSet.Name, *set.Sku.Capacity) scaleSet.curSize = *set.Sku.Capacity scaleSet.lastRefresh = time.Now() @@ -148,15 +149,18 @@ func (scaleSet *ScaleSet) SetScaleSetSize(size int64) error { op.VirtualMachineScaleSetProperties.ProvisioningState = nil updateCtx, updateCancel := getContextWithCancel() defer updateCancel() - _, err = scaleSet.manager.azClient.virtualMachineScaleSetsClient.CreateOrUpdate(updateCtx, resourceGroup, scaleSet.Name, op) - if err != nil { - glog.Errorf("virtualMachineScaleSetsClient.CreateOrUpdate for scale set %q failed: %v", scaleSet.Name, err) - return err + klog.V(3).Infof("Waiting for virtualMachineScaleSetsClient.CreateOrUpdate(%s)", scaleSet.Name) + resp, err := scaleSet.manager.azClient.virtualMachineScaleSetsClient.CreateOrUpdate(updateCtx, resourceGroup, scaleSet.Name, op) + isSuccess, realError := isSuccessHTTPResponse(resp, err) + if isSuccess { + klog.V(3).Infof("virtualMachineScaleSetsClient.CreateOrUpdate(%s) success", scaleSet.Name) + scaleSet.curSize = size + scaleSet.lastRefresh = time.Now() + return nil } - scaleSet.curSize = size - scaleSet.lastRefresh = time.Now() - return nil + klog.Errorf("virtualMachineScaleSetsClient.CreateOrUpdate for scale set %q failed: %v", scaleSet.Name, realError) + return realError } // TargetSize returns the current TARGET size of the node group. It is possible that the @@ -187,43 +191,25 @@ func (scaleSet *ScaleSet) IncreaseSize(delta int) error { // GetScaleSetVms returns list of nodes for the given scale set. // Note that the list results is not used directly because their resource ID format // is not consistent with Get results. -// TODO(feiskyer): use list results directly after the issue fixed in Azure VMSS API. -func (scaleSet *ScaleSet) GetScaleSetVms() ([]compute.VirtualMachineScaleSetVM, error) { - instanceIDs := make([]string, 0) +func (scaleSet *ScaleSet) GetScaleSetVms() ([]string, error) { ctx, cancel := getContextWithCancel() defer cancel() resourceGroup := scaleSet.manager.config.ResourceGroup - result, err := scaleSet.manager.azClient.virtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSet.Name, "", "", "") + vmList, err := scaleSet.manager.azClient.virtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSet.Name, "", "", "") if err != nil { - glog.Errorf("VirtualMachineScaleSetVMsClient.List failed for %s: %v", scaleSet.Name, err) + klog.Errorf("VirtualMachineScaleSetVMsClient.List failed for %s: %v", scaleSet.Name, err) return nil, err } - for _, vm := range result { - instanceIDs = append(instanceIDs, *vm.InstanceID) - } - - allVMs := make([]compute.VirtualMachineScaleSetVM, 0) - for _, instanceID := range instanceIDs { - getCtx, getCancel := getContextWithCancel() - defer getCancel() - - vm, err := scaleSet.manager.azClient.virtualMachineScaleSetVMsClient.Get(getCtx, resourceGroup, scaleSet.Name, instanceID) - if err != nil { - exists, realErr := checkResourceExistsFromError(err) - if realErr != nil { - glog.Errorf("Failed to get VirtualMachineScaleSetVM by (%s,%s), error: %v", scaleSet.Name, instanceID, err) - return nil, realErr - } - - if !exists { - glog.Warningf("Couldn't find VirtualMachineScaleSetVM by (%s,%s), assuming it has been removed", scaleSet.Name, instanceID) - continue - } + allVMs := make([]string, 0) + for _, vm := range vmList { + // The resource ID is empty string, which indicates the instance may be in deleting state. + if len(*vm.ID) == 0 { + continue } - allVMs = append(allVMs, vm) + allVMs = append(allVMs, *vm.ID) } return allVMs, nil @@ -259,7 +245,7 @@ func (scaleSet *ScaleSet) DecreaseTargetSize(delta int) error { // Belongs returns true if the given node belongs to the NodeGroup. func (scaleSet *ScaleSet) Belongs(node *apiv1.Node) (bool, error) { - glog.V(6).Infof("Check if node belongs to this scale set: scaleset:%v, node:%v\n", scaleSet, node) + klog.V(6).Infof("Check if node belongs to this scale set: scaleset:%v, node:%v\n", scaleSet, node) ref := &azureRef{ Name: node.Spec.ProviderID, @@ -272,7 +258,7 @@ func (scaleSet *ScaleSet) Belongs(node *apiv1.Node) (bool, error) { if targetAsg == nil { return false, fmt.Errorf("%s doesn't belong to a known scale set", node.Name) } - if targetAsg.Id() != scaleSet.Id() { + if !strings.EqualFold(targetAsg.Id(), scaleSet.Id()) { return false, nil } return true, nil @@ -284,7 +270,7 @@ func (scaleSet *ScaleSet) DeleteInstances(instances []*azureRef) error { return nil } - glog.V(3).Infof("Deleting vmss instances %q", instances) + klog.V(3).Infof("Deleting vmss instances %q", instances) commonAsg, err := scaleSet.manager.GetAsgForInstance(instances[0]) if err != nil { @@ -298,13 +284,13 @@ func (scaleSet *ScaleSet) DeleteInstances(instances []*azureRef) error { return err } - if asg != commonAsg { + if !strings.EqualFold(asg.Id(), commonAsg.Id()) { return fmt.Errorf("cannot delete instance (%s) which don't belong to the same Scale Set (%q)", instance.Name, commonAsg) } instanceID, err := getLastSegment(instance.Name) if err != nil { - glog.Errorf("getLastSegment failed with error: %v", err) + klog.Errorf("getLastSegment failed with error: %v", err) return err } @@ -323,7 +309,7 @@ func (scaleSet *ScaleSet) DeleteInstances(instances []*azureRef) error { // DeleteNodes deletes the nodes from the group. func (scaleSet *ScaleSet) DeleteNodes(nodes []*apiv1.Node) error { - glog.V(8).Infof("Delete nodes requested: %q\n", nodes) + klog.V(8).Infof("Delete nodes requested: %q\n", nodes) size, err := scaleSet.GetScaleSetSize() if err != nil { return err @@ -446,20 +432,20 @@ func (scaleSet *ScaleSet) TemplateNodeInfo() (*schedulercache.NodeInfo, error) { } // Nodes returns a list of all nodes that belong to this node group. -func (scaleSet *ScaleSet) Nodes() ([]string, error) { +func (scaleSet *ScaleSet) Nodes() ([]cloudprovider.Instance, error) { + scaleSet.mutex.Lock() + defer scaleSet.mutex.Unlock() + vms, err := scaleSet.GetScaleSetVms() if err != nil { return nil, err } - result := make([]string, 0, len(vms)) + instances := make([]cloudprovider.Instance, 0, len(vms)) for i := range vms { - if len(*vms[i].ID) == 0 { - continue - } - name := "azure://" + *vms[i].ID - result = append(result, name) + name := "azure://" + strings.ToLower(vms[i]) + instances = append(instances, cloudprovider.Instance{Id: name}) } - return result, nil + return instances, nil } diff --git a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go index 9286f8dbbf57..d9d9451ea44a 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_scale_set_test.go @@ -91,7 +91,7 @@ func TestBelongs(t *testing.T) { invalidNode := &apiv1.Node{ Spec: apiv1.NodeSpec{ - ProviderID: "azure:///subscriptions/test-subscrition-id/resourceGroups/invalid-asg/providers/Microsoft.Compute/virtualMachineScaleSets/agents/virtualMachines/0", + ProviderID: "azure:///subscriptions/test-subscrition-id/resourcegroups/invalid-asg/providers/microsoft.compute/virtualmachinescalesets/agents/virtualmachines/0", }, } _, err := scaleSet.Belongs(invalidNode) @@ -158,3 +158,32 @@ func TestDebug(t *testing.T) { asg.Name = "test-scale-set" assert.Equal(t, asg.Debug(), "test-scale-set (5:55)") } + +func TestScaleSetNodes(t *testing.T) { + provider := newTestProvider(t) + registered := provider.azureManager.RegisterAsg( + newTestScaleSet(provider.azureManager, "test-asg")) + assert.True(t, registered) + assert.Equal(t, len(provider.NodeGroups()), 1) + + fakeProviderID := "azure://" + fakeVirtualMachineScaleSetVMID + node := &apiv1.Node{ + Spec: apiv1.NodeSpec{ + ProviderID: fakeProviderID, + }, + } + group, err := provider.NodeGroupForNode(node) + assert.NoError(t, err) + assert.NotNil(t, group, "Group should not be nil") + assert.Equal(t, group.Id(), "test-asg") + assert.Equal(t, group.MinSize(), 1) + assert.Equal(t, group.MaxSize(), 5) + + ss, ok := group.(*ScaleSet) + assert.True(t, ok) + assert.NotNil(t, ss) + instances, err := group.Nodes() + assert.NoError(t, err) + assert.Equal(t, len(instances), 1) + assert.Equal(t, instances[0], cloudprovider.Instance{Id: fakeProviderID}) +} diff --git a/cluster-autoscaler/cloudprovider/azure/azure_util.go b/cluster-autoscaler/cloudprovider/azure/azure_util.go index 82f6e69a9905..3d4f6a650acb 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_util.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_util.go @@ -30,12 +30,12 @@ import ( "strconv" "strings" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" azStorage "github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/to" - "github.com/golang/glog" "golang.org/x/crypto/pkcs12" + "k8s.io/klog" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/client-go/pkg/version" @@ -70,15 +70,17 @@ const ( k8sLinuxVMAgentClusterIDIndex = 2 k8sLinuxVMAgentIndexArrayIndex = 3 - k8sWindowsVMNamingFormat = "^([a-fA-F0-9]{5})([0-9a-zA-Z]{3})([a-zA-Z0-9]{4,6})$" + k8sWindowsOldVMNamingFormat = "^([a-fA-F0-9]{5})([0-9a-zA-Z]{3})([9])([a-zA-Z0-9]{3,5})$" + k8sWindowsVMNamingFormat = "^([a-fA-F0-9]{4})([0-9a-zA-Z]{3})([0-9]{3,8})$" k8sWindowsVMAgentPoolPrefixIndex = 1 k8sWindowsVMAgentOrchestratorNameIndex = 2 k8sWindowsVMAgentPoolInfoIndex = 3 ) var ( - vmnameLinuxRegexp = regexp.MustCompile(k8sLinuxVMNamingFormat) - vmnameWindowsRegexp = regexp.MustCompile(k8sWindowsVMNamingFormat) + vmnameLinuxRegexp = regexp.MustCompile(k8sLinuxVMNamingFormat) + vmnameWindowsRegexp = regexp.MustCompile(k8sWindowsVMNamingFormat) + oldvmnameWindowsRegexp = regexp.MustCompile(k8sWindowsOldVMNamingFormat) ) //AzUtil consists of utility functions which utilizes clients to different services. @@ -88,7 +90,7 @@ type AzUtil struct { manager *AzureManager } -// DeleteBlob deletes the blob using the storage storage client. +// DeleteBlob deletes the blob using the storage client. func (util *AzUtil) DeleteBlob(accountName, vhdContainer, vhdBlob string) error { ctx, cancel := getContextWithCancel() defer cancel() @@ -119,18 +121,18 @@ func (util *AzUtil) DeleteVirtualMachine(rg string, name string) error { vm, err := util.manager.azClient.virtualMachinesClient.Get(ctx, rg, name, "") if err != nil { if exists, _ := checkResourceExistsFromError(err); !exists { - glog.V(2).Infof("VirtualMachine %s/%s has already been removed", rg, name) + klog.V(2).Infof("VirtualMachine %s/%s has already been removed", rg, name) return nil } - glog.Errorf("failed to get VM: %s/%s: %s", rg, name, err.Error()) + klog.Errorf("failed to get VM: %s/%s: %s", rg, name, err.Error()) return err } vhd := vm.VirtualMachineProperties.StorageProfile.OsDisk.Vhd managedDisk := vm.VirtualMachineProperties.StorageProfile.OsDisk.ManagedDisk if vhd == nil && managedDisk == nil { - glog.Errorf("failed to get a valid os disk URI for VM: %s/%s", rg, name) + klog.Errorf("failed to get a valid os disk URI for VM: %s/%s", rg, name) return fmt.Errorf("os disk does not have a VHD URI") } @@ -138,38 +140,38 @@ func (util *AzUtil) DeleteVirtualMachine(rg string, name string) error { var nicName string nicID := (*vm.VirtualMachineProperties.NetworkProfile.NetworkInterfaces)[0].ID if nicID == nil { - glog.Warningf("NIC ID is not set for VM (%s/%s)", rg, name) + klog.Warningf("NIC ID is not set for VM (%s/%s)", rg, name) } else { nicName, err = resourceName(*nicID) if err != nil { return err } - glog.Infof("found nic name for VM (%s/%s): %s", rg, name, nicName) + klog.Infof("found nic name for VM (%s/%s): %s", rg, name, nicName) } - glog.Infof("deleting VM: %s/%s", rg, name) + klog.Infof("deleting VM: %s/%s", rg, name) deleteCtx, deleteCancel := getContextWithCancel() defer deleteCancel() - glog.Infof("waiting for VirtualMachine deletion: %s/%s", rg, name) + klog.Infof("waiting for VirtualMachine deletion: %s/%s", rg, name) _, err = util.manager.azClient.virtualMachinesClient.Delete(deleteCtx, rg, name) _, realErr := checkResourceExistsFromError(err) if realErr != nil { return realErr } - glog.V(2).Infof("VirtualMachine %s/%s removed", rg, name) + klog.V(2).Infof("VirtualMachine %s/%s removed", rg, name) if len(nicName) > 0 { - glog.Infof("deleting nic: %s/%s", rg, nicName) + klog.Infof("deleting nic: %s/%s", rg, nicName) interfaceCtx, interfaceCancel := getContextWithCancel() defer interfaceCancel() - glog.Infof("waiting for nic deletion: %s/%s", rg, nicName) + klog.Infof("waiting for nic deletion: %s/%s", rg, nicName) _, nicErr := util.manager.azClient.interfacesClient.Delete(interfaceCtx, rg, nicName) _, realErr := checkResourceExistsFromError(nicErr) if realErr != nil { return realErr } - glog.V(2).Infof("interface %s/%s removed", rg, nicName) + klog.V(2).Infof("interface %s/%s removed", rg, nicName) } if vhd != nil { @@ -178,21 +180,21 @@ func (util *AzUtil) DeleteVirtualMachine(rg string, name string) error { return err } - glog.Infof("found os disk storage reference: %s %s %s", accountName, vhdContainer, vhdBlob) + klog.Infof("found os disk storage reference: %s %s %s", accountName, vhdContainer, vhdBlob) - glog.Infof("deleting blob: %s/%s", vhdContainer, vhdBlob) + klog.Infof("deleting blob: %s/%s", vhdContainer, vhdBlob) if err = util.DeleteBlob(accountName, vhdContainer, vhdBlob); err != nil { _, realErr := checkResourceExistsFromError(err) if realErr != nil { return realErr } - glog.V(2).Infof("Blob %s/%s removed", rg, vhdBlob) + klog.V(2).Infof("Blob %s/%s removed", rg, vhdBlob) } } else if managedDisk != nil { if osDiskName == nil { - glog.Warningf("osDisk is not set for VM %s/%s", rg, name) + klog.Warningf("osDisk is not set for VM %s/%s", rg, name) } else { - glog.Infof("deleting managed disk: %s/%s", rg, *osDiskName) + klog.Infof("deleting managed disk: %s/%s", rg, *osDiskName) disksCtx, disksCancel := getContextWithCancel() defer disksCancel() _, diskErr := util.manager.azClient.disksClient.Delete(disksCtx, rg, *osDiskName) @@ -200,7 +202,7 @@ func (util *AzUtil) DeleteVirtualMachine(rg string, name string) error { if realErr != nil { return realErr } - glog.V(2).Infof("disk %s/%s removed", rg, *osDiskName) + klog.V(2).Infof("disk %s/%s removed", rg, *osDiskName) } } @@ -242,7 +244,7 @@ func normalizeForK8sVMASScalingUp(templateMap map[string]interface{}) error { for index, resource := range resources { resourceMap, ok := resource.(map[string]interface{}) if !ok { - glog.Warningf("Template improperly formatted for resource") + klog.Warning("Template improperly formatted for resource") continue } @@ -250,7 +252,7 @@ func normalizeForK8sVMASScalingUp(templateMap map[string]interface{}) error { if ok && resourceType == nsgResourceType { if nsgIndex != -1 { err := fmt.Errorf("Found 2 resources with type %s in the template. There should only be 1", nsgResourceType) - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) return err } nsgIndex = index @@ -258,7 +260,7 @@ func normalizeForK8sVMASScalingUp(templateMap map[string]interface{}) error { if ok && resourceType == rtResourceType { if rtIndex != -1 { err := fmt.Errorf("Found 2 resources with type %s in the template. There should only be 1", rtResourceType) - glog.Warningf(err.Error()) + klog.Warningf(err.Error()) return err } rtIndex = index @@ -287,11 +289,11 @@ func normalizeForK8sVMASScalingUp(templateMap map[string]interface{}) error { indexesToRemove := []int{} if nsgIndex == -1 { err := fmt.Errorf("Found no resources with type %s in the template. There should have been 1", nsgResourceType) - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) return err } if rtIndex == -1 { - glog.Infof("Found no resources with type %s in the template.", rtResourceType) + klog.Infof("Found no resources with type %s in the template.", rtResourceType) } else { indexesToRemove = append(indexesToRemove, rtIndex) } @@ -317,7 +319,7 @@ func normalizeMasterResourcesForScaling(templateMap map[string]interface{}) erro for index, resource := range resources { resourceMap, ok := resource.(map[string]interface{}) if !ok { - glog.Warningf("Template improperly formatted") + klog.Warning("Template improperly formatted") continue } @@ -325,7 +327,7 @@ func normalizeMasterResourcesForScaling(templateMap map[string]interface{}) erro if !ok || resourceType != vmResourceType { resourceName, ok := resourceMap[nameFieldName].(string) if !ok { - glog.Warningf("Template improperly formatted") + klog.Warning("Template improperly formatted") continue } if strings.Contains(resourceName, "variables('masterVMNamePrefix')") && resourceType == vmExtensionType { @@ -336,7 +338,7 @@ func normalizeMasterResourcesForScaling(templateMap map[string]interface{}) erro resourceName, ok := resourceMap[nameFieldName].(string) if !ok { - glog.Warningf("Template improperly formatted") + klog.Warning("Template improperly formatted") continue } @@ -347,13 +349,13 @@ func normalizeMasterResourcesForScaling(templateMap map[string]interface{}) erro resourceProperties, ok := resourceMap[propertiesFieldName].(map[string]interface{}) if !ok { - glog.Warningf("Template improperly formatted") + klog.Warning("Template improperly formatted") continue } hardwareProfile, ok := resourceProperties[hardwareProfileFieldName].(map[string]interface{}) if !ok { - glog.Warningf("Template improperly formatted") + klog.Warning("Template improperly formatted") continue } @@ -373,7 +375,7 @@ func normalizeMasterResourcesForScaling(templateMap map[string]interface{}) erro func removeCustomData(resourceProperties map[string]interface{}) bool { osProfile, ok := resourceProperties[osProfileFieldName].(map[string]interface{}) if !ok { - glog.Warningf("Template improperly formatted") + klog.Warning("Template improperly formatted") return ok } @@ -386,7 +388,7 @@ func removeCustomData(resourceProperties map[string]interface{}) bool { func removeImageReference(resourceProperties map[string]interface{}) bool { storageProfile, ok := resourceProperties[storageProfileFieldName].(map[string]interface{}) if !ok { - glog.Warningf("Template improperly formatted. Could not find: %s", storageProfileFieldName) + klog.Warningf("Template improperly formatted. Could not find: %s", storageProfileFieldName) return ok } @@ -439,28 +441,33 @@ func k8sLinuxVMNameParts(vmName string) (poolIdentifier, nameSuffix string, agen return vmNameParts[k8sLinuxVMAgentPoolNameIndex], vmNameParts[k8sLinuxVMAgentClusterIDIndex], vmNum, nil } -// windowsVMNameParts returns parts of Windows VM name e.g: 50621k8s9000 -func windowsVMNameParts(vmName string) (poolPrefix string, acsStr string, poolIndex int, agentIndex int, err error) { - vmNameParts := vmnameWindowsRegexp.FindStringSubmatch(vmName) - if len(vmNameParts) != 4 { - return "", "", -1, -1, fmt.Errorf("resource name was missing from identifier") +// windowsVMNameParts returns parts of Windows VM name +func windowsVMNameParts(vmName string) (poolPrefix string, orch string, poolIndex int, agentIndex int, err error) { + var poolInfo string + vmNameParts := oldvmnameWindowsRegexp.FindStringSubmatch(vmName) + if len(vmNameParts) != 5 { + vmNameParts = vmnameWindowsRegexp.FindStringSubmatch(vmName) + if len(vmNameParts) != 4 { + return "", "", -1, -1, fmt.Errorf("resource name was missing from identifier") + } + poolInfo = vmNameParts[3] + } else { + poolInfo = vmNameParts[4] } - poolPrefix = vmNameParts[k8sWindowsVMAgentPoolPrefixIndex] - acsStr = vmNameParts[k8sWindowsVMAgentOrchestratorNameIndex] - poolInfo := vmNameParts[k8sWindowsVMAgentPoolInfoIndex] + poolPrefix = vmNameParts[1] + orch = vmNameParts[2] - poolIndex, err = strconv.Atoi(poolInfo[:3]) + poolIndex, err = strconv.Atoi(poolInfo[:2]) if err != nil { - return "", "", -1, -1, fmt.Errorf("Error parsing VM Name: %v", err) + return "", "", -1, -1, fmt.Errorf("error parsing VM Name: %v", err) } - - agentIndex, err = strconv.Atoi(poolInfo[3:]) + agentIndex, err = strconv.Atoi(poolInfo[2:]) if err != nil { - return "", "", -1, -1, fmt.Errorf("Error parsing VM Name: %v", err) + return "", "", -1, -1, fmt.Errorf("error parsing VM Name: %v", err) } - return poolPrefix, acsStr, poolIndex, agentIndex, nil + return poolPrefix, orch, poolIndex, agentIndex, nil } // GetVMNameIndex return the index of VM in the node pools. @@ -565,13 +572,13 @@ func getLastSegment(ID string) (string, error) { func readDeploymentParameters(paramFilePath string) (map[string]interface{}, error) { contents, err := ioutil.ReadFile(paramFilePath) if err != nil { - glog.Errorf("Failed to read deployment parameters from file %q: %v", paramFilePath, err) + klog.Errorf("Failed to read deployment parameters from file %q: %v", paramFilePath, err) return nil, err } deploymentParameters := make(map[string]interface{}) if err := json.Unmarshal(contents, &deploymentParameters); err != nil { - glog.Errorf("Failed to unmarshal deployment parameters from file %q: %v", paramFilePath, err) + klog.Errorf("Failed to unmarshal deployment parameters from file %q: %v", paramFilePath, err) return nil, err } @@ -602,3 +609,22 @@ func checkResourceExistsFromError(err error) (bool, error) { } return false, v } + +// isSuccessHTTPResponse determines if the response from an HTTP request suggests success +func isSuccessHTTPResponse(resp *http.Response, err error) (isSuccess bool, realError error) { + if err != nil { + return false, err + } + + if resp != nil { + // HTTP 2xx suggests a successful response + if 199 < resp.StatusCode && resp.StatusCode < 300 { + return true, nil + } + + return false, fmt.Errorf("failed with HTTP status code %d", resp.StatusCode) + } + + // This shouldn't happen, it only ensures all exceptions are handled. + return false, fmt.Errorf("failed with unknown error") +} diff --git a/cluster-autoscaler/cloudprovider/azure/azure_util_test.go b/cluster-autoscaler/cloudprovider/azure/azure_util_test.go index 8ed748e86a34..c93cb0f3825f 100644 --- a/cluster-autoscaler/cloudprovider/azure/azure_util_test.go +++ b/cluster-autoscaler/cloudprovider/azure/azure_util_test.go @@ -18,9 +18,11 @@ package azure import ( "fmt" + "net/http" "testing" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" + "github.com/stretchr/testify/assert" ) func TestSplitBlobURI(t *testing.T) { @@ -71,26 +73,33 @@ func TestK8sLinuxVMNameParts(t *testing.T) { } func TestWindowsVMNameParts(t *testing.T) { - expectedPoolPrefix := "38988" - expectedAcs := "k8s" - expectedPoolIndex := 903 - expectedAgentIndex := 12 - - poolPrefix, acs, poolIndex, agentIndex, err := windowsVMNameParts("38988k8s90312") - if poolPrefix != expectedPoolPrefix { - t.Fatalf("incorrect poolPrefix. expected=%s actual=%s", expectedPoolPrefix, poolPrefix) - } - if acs != expectedAcs { - t.Fatalf("incorrect acs string. expected=%s actual=%s", expectedAcs, acs) - } - if poolIndex != expectedPoolIndex { - t.Fatalf("incorrect poolIndex. expected=%d actual=%d", expectedPoolIndex, poolIndex) - } - if agentIndex != expectedAgentIndex { - t.Fatalf("incorrect agentIndex. expected=%d actual=%d", expectedAgentIndex, agentIndex) + data := []struct { + VMName, expectedPoolPrefix, expectedOrch string + expectedPoolIndex, expectedAgentIndex int + }{ + {"38988k8s90312", "38988", "k8s", 3, 12}, + {"4506k8s010", "4506", "k8s", 1, 0}, + {"2314k8s03000001", "2314", "k8s", 3, 1}, + {"2314k8s0310", "2314", "k8s", 3, 10}, } - if err != nil { - t.Fatalf("unexpected error: %s", err) + + for _, d := range data { + poolPrefix, orch, poolIndex, agentIndex, err := windowsVMNameParts(d.VMName) + if poolPrefix != d.expectedPoolPrefix { + t.Fatalf("incorrect poolPrefix. expected=%s actual=%s", d.expectedPoolPrefix, poolPrefix) + } + if orch != d.expectedOrch { + t.Fatalf("incorrect acs string. expected=%s actual=%s", d.expectedOrch, orch) + } + if poolIndex != d.expectedPoolIndex { + t.Fatalf("incorrect poolIndex. expected=%d actual=%d", d.expectedPoolIndex, poolIndex) + } + if agentIndex != d.expectedAgentIndex { + t.Fatalf("incorrect agentIndex. expected=%d actual=%d", d.expectedAgentIndex, agentIndex) + } + if err != nil { + t.Fatalf("unexpected error: %s", err) + } } } @@ -117,3 +126,66 @@ func TestGetVMNameIndexWindows(t *testing.T) { t.Fatalf("unexpected error: %s", err) } } + +func TestIsSuccessResponse(t *testing.T) { + tests := []struct { + name string + resp *http.Response + err error + expected bool + expectedError error + }{ + { + name: "both resp and err nil should report error", + expected: false, + expectedError: fmt.Errorf("failed with unknown error"), + }, + { + name: "http.StatusNotFound should report error", + resp: &http.Response{ + StatusCode: http.StatusNotFound, + }, + expected: false, + expectedError: fmt.Errorf("failed with HTTP status code %d", http.StatusNotFound), + }, + { + name: "http.StatusInternalServerError should report error", + resp: &http.Response{ + StatusCode: http.StatusInternalServerError, + }, + expected: false, + expectedError: fmt.Errorf("failed with HTTP status code %d", http.StatusInternalServerError), + }, + { + name: "http.StatusOK shouldn't report error", + resp: &http.Response{ + StatusCode: http.StatusOK, + }, + expected: true, + }, + { + name: "non-nil response error with http.StatusOK should report error", + resp: &http.Response{ + StatusCode: http.StatusOK, + }, + err: fmt.Errorf("test error"), + expected: false, + expectedError: fmt.Errorf("test error"), + }, + { + name: "non-nil response error with http.StatusInternalServerError should report error", + resp: &http.Response{ + StatusCode: http.StatusInternalServerError, + }, + err: fmt.Errorf("test error"), + expected: false, + expectedError: fmt.Errorf("test error"), + }, + } + + for _, test := range tests { + result, realError := isSuccessHTTPResponse(test.resp, test.err) + assert.Equal(t, test.expected, result, "[%s] expected: %v, saw: %v", test.name, result, test.expected) + assert.Equal(t, test.expectedError, realError, "[%s] expected: %v, saw: %v", test.name, realError, test.expectedError) + } +} diff --git a/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-containerservice.yaml b/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-containerservice.yaml index f3be49d84b01..89b5b3506708 100644 --- a/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-containerservice.yaml +++ b/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-containerservice.yaml @@ -42,7 +42,7 @@ rules: resources: ["poddisruptionbudgets"] verbs: ["watch","list"] - apiGroups: ["apps"] - resources: ["statefulsets"] + resources: ["statefulsets", "replicasets"] verbs: ["watch","list","get"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] diff --git a/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-standard-master.yaml b/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-standard-master.yaml index ec4203614561..7e1a9a2f972c 100644 --- a/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-standard-master.yaml +++ b/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-standard-master.yaml @@ -42,7 +42,7 @@ rules: resources: ["poddisruptionbudgets"] verbs: ["watch","list"] - apiGroups: ["apps"] - resources: ["statefulsets"] + resources: ["statefulsets", "replicasets"] verbs: ["watch","list","get"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] diff --git a/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-standard-msi.yaml b/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-standard-msi.yaml index 67e349e65cc6..6dec82b37440 100644 --- a/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-standard-msi.yaml +++ b/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-standard-msi.yaml @@ -42,7 +42,7 @@ rules: resources: ["poddisruptionbudgets"] verbs: ["watch","list"] - apiGroups: ["apps"] - resources: ["statefulsets"] + resources: ["statefulsets", "replicasets"] verbs: ["watch","list","get"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] diff --git a/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-standard.yaml b/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-standard.yaml index 0062e5f24acc..f43291899f26 100644 --- a/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-standard.yaml +++ b/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-standard.yaml @@ -42,7 +42,7 @@ rules: resources: ["poddisruptionbudgets"] verbs: ["watch","list"] - apiGroups: ["apps"] - resources: ["statefulsets"] + resources: ["statefulsets", "replicasets"] verbs: ["watch","list","get"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] diff --git a/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-vmss-master.yaml b/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-vmss-master.yaml index 5b351633501a..115cc90e9e22 100644 --- a/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-vmss-master.yaml +++ b/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-vmss-master.yaml @@ -42,7 +42,7 @@ rules: resources: ["poddisruptionbudgets"] verbs: ["watch","list"] - apiGroups: ["apps"] - resources: ["statefulsets"] + resources: ["statefulsets", "replicasets"] verbs: ["watch","list","get"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] diff --git a/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-vmss-msi.yaml b/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-vmss-msi.yaml index a341e897c299..274ff00a082b 100644 --- a/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-vmss-msi.yaml +++ b/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-vmss-msi.yaml @@ -42,7 +42,7 @@ rules: resources: ["poddisruptionbudgets"] verbs: ["watch","list"] - apiGroups: ["apps"] - resources: ["statefulsets"] + resources: ["statefulsets", "replicasets"] verbs: ["watch","list","get"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] diff --git a/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-vmss.yaml b/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-vmss.yaml index faf4cd9ba9d1..29416a115d75 100644 --- a/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-vmss.yaml +++ b/cluster-autoscaler/cloudprovider/azure/cluster-autoscaler-vmss.yaml @@ -42,7 +42,7 @@ rules: resources: ["poddisruptionbudgets"] verbs: ["watch","list"] - apiGroups: ["apps"] - resources: ["statefulsets"] + resources: ["statefulsets", "replicasets"] verbs: ["watch","list","get"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] diff --git a/cluster-autoscaler/cloudprovider/builder/builder_alicloud.go b/cluster-autoscaler/cloudprovider/builder/builder_alicloud.go new file mode 100644 index 000000000000..3784021c9e92 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/builder/builder_alicloud.go @@ -0,0 +1,42 @@ +// +build alicloud + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud" + "k8s.io/autoscaler/cluster-autoscaler/config" +) + +// AvailableCloudProviders supported by the cloud provider builder. +var AvailableCloudProviders = []string{ + alicloud.ProviderName, +} + +// DefaultCloudProvider for alicloud-only build is alicloud. +const DefaultCloudProvider = alicloud.ProviderName + +func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + switch opts.CloudProviderName { + case alicloud.ProviderName: + return alicloud.BuildAlicloud(opts, do, rl) + } + + return nil +} diff --git a/cluster-autoscaler/cloudprovider/builder/builder_all.go b/cluster-autoscaler/cloudprovider/builder/builder_all.go new file mode 100644 index 000000000000..fafcd15689da --- /dev/null +++ b/cluster-autoscaler/cloudprovider/builder/builder_all.go @@ -0,0 +1,61 @@ +// +build !gce,!aws,!azure,!kubemark,!alicloud,!openshiftmachineapi + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/alicloud" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/azure" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gke" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/openshiftmachineapi" + "k8s.io/autoscaler/cluster-autoscaler/config" +) + +// AvailableCloudProviders supported by the cloud provider builder. +var AvailableCloudProviders = []string{ + aws.ProviderName, + azure.ProviderName, + gce.ProviderNameGCE, + gke.ProviderNameGKE, + alicloud.ProviderName, + openshiftmachineapi.ProviderName, +} + +// DefaultCloudProvider is GCE. +const DefaultCloudProvider = gce.ProviderNameGCE + +func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + switch opts.CloudProviderName { + case gce.ProviderNameGCE: + return gce.BuildGCE(opts, do, rl) + case gke.ProviderNameGKE: + return gke.BuildGKE(opts, do, rl) + case aws.ProviderName: + return aws.BuildAWS(opts, do, rl) + case azure.ProviderName: + return azure.BuildAzure(opts, do, rl) + case alicloud.ProviderName: + return alicloud.BuildAlicloud(opts, do, rl) + case openshiftmachineapi.ProviderName: + return openshiftmachineapi.BuildOpenShiftMachineAPI(opts, do, rl) + } + return nil +} diff --git a/cluster-autoscaler/cloudprovider/builder/builder_aws.go b/cluster-autoscaler/cloudprovider/builder/builder_aws.go new file mode 100644 index 000000000000..5c9c3fb4d8f9 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/builder/builder_aws.go @@ -0,0 +1,42 @@ +// +build aws + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws" + "k8s.io/autoscaler/cluster-autoscaler/config" +) + +// AvailableCloudProviders supported by the cloud provider builder. +var AvailableCloudProviders = []string{ + aws.ProviderName, +} + +// DefaultCloudProvider for AWS-only build is AWS. +const DefaultCloudProvider = aws.ProviderName + +func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + switch opts.CloudProviderName { + case aws.ProviderName: + return aws.BuildAWS(opts, do, rl) + } + + return nil +} diff --git a/cluster-autoscaler/cloudprovider/builder/builder_azure.go b/cluster-autoscaler/cloudprovider/builder/builder_azure.go new file mode 100644 index 000000000000..358786433f9d --- /dev/null +++ b/cluster-autoscaler/cloudprovider/builder/builder_azure.go @@ -0,0 +1,42 @@ +// +build azure + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/azure" + "k8s.io/autoscaler/cluster-autoscaler/config" +) + +// AvailableCloudProviders supported by the cloud provider builder. +var AvailableCloudProviders = []string{ + azure.ProviderName, +} + +// DefaultCloudProvider on Azure-only build is Azure. +const DefaultCloudProvider = azure.ProviderName + +func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + switch opts.CloudProviderName { + case azure.ProviderName: + return azure.BuildAzure(opts, do, rl) + } + + return nil +} diff --git a/cluster-autoscaler/cloudprovider/builder/builder_gce.go b/cluster-autoscaler/cloudprovider/builder/builder_gce.go new file mode 100644 index 000000000000..9307043f1031 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/builder/builder_gce.go @@ -0,0 +1,46 @@ +// +build gce + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gke" + "k8s.io/autoscaler/cluster-autoscaler/config" +) + +// AvailableCloudProviders supported by the cloud provider builder. +var AvailableCloudProviders = []string{ + gce.ProviderNameGCE, + gke.ProviderNameGKE, +} + +// DefaultCloudProvider is GCE. +const DefaultCloudProvider = gce.ProviderNameGCE + +func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + switch opts.CloudProviderName { + case gce.ProviderNameGCE: + return gce.BuildGCE(opts, do, rl) + case gke.ProviderNameGKE: + return gke.BuildGKE(opts, do, rl) + } + + return nil +} diff --git a/cluster-autoscaler/cloudprovider/builder/builder_kubemark.go b/cluster-autoscaler/cloudprovider/builder/builder_kubemark.go new file mode 100644 index 000000000000..0f0dca8faf73 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/builder/builder_kubemark.go @@ -0,0 +1,42 @@ +// +build kubemark + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/kubemark" + "k8s.io/autoscaler/cluster-autoscaler/config" +) + +// AvailableCloudProviders supported by the cloud provider builder. +var AvailableCloudProviders = []string{ + kubemark.ProviderName, +} + +// DefaultCloudProvider for Kubemark-only build is Kubemark. +const DefaultCloudProvider = kubemark.ProviderName + +func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + switch opts.CloudProviderName { + case kubemark.ProviderName: + return kubemark.BuildKubemark(opts, do, rl) + } + + return nil +} diff --git a/cluster-autoscaler/cloudprovider/builder/builder_openshiftmachineapi.go b/cluster-autoscaler/cloudprovider/builder/builder_openshiftmachineapi.go new file mode 100644 index 000000000000..c25e745e03aa --- /dev/null +++ b/cluster-autoscaler/cloudprovider/builder/builder_openshiftmachineapi.go @@ -0,0 +1,42 @@ +// +build openshiftmachineapi + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/openshiftmachineapi" + "k8s.io/autoscaler/cluster-autoscaler/config" +) + +// AvailableCloudProviders supported by the cloud provider builder. +var AvailableCloudProviders = []string{ + openshiftmachineapi.ProviderName, +} + +// DefaultCloudProvider for machineapi-only build. +const DefaultCloudProvider = openshiftmachineapi.ProviderName + +func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + switch opts.CloudProviderName { + case openshiftmachineapi.ProviderName: + return openshiftmachineapi.BuildOpenShiftMachineAPI(opts, do, rl) + } + + return nil +} diff --git a/cluster-autoscaler/cloudprovider/builder/cloud_provider_builder.go b/cluster-autoscaler/cloudprovider/builder/cloud_provider_builder.go index 91bbf8261abe..5fca997e910b 100644 --- a/cluster-autoscaler/cloudprovider/builder/cloud_provider_builder.go +++ b/cluster-autoscaler/cloudprovider/builder/cloud_provider_builder.go @@ -17,218 +17,36 @@ limitations under the License. package builder import ( - "io" - "os" - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws" - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/azure" - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce" - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gke" - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/kubemark" - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/openshiftmachineapi" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/context" - "k8s.io/client-go/informers" - kubeclient "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - kubemarkcontroller "k8s.io/kubernetes/pkg/kubemark" - "github.com/golang/glog" + "k8s.io/klog" ) -// AvailableCloudProviders supported by the cloud provider builder. -var AvailableCloudProviders = []string{ - aws.ProviderName, - azure.ProviderName, - gce.ProviderNameGCE, - gke.ProviderNameGKE, - kubemark.ProviderName, - openshiftmachineapi.ProviderName, -} - -// DefaultCloudProvider is GCE. -const DefaultCloudProvider = gce.ProviderNameGCE - // NewCloudProvider builds a cloud provider from provided parameters. func NewCloudProvider(opts config.AutoscalingOptions) cloudprovider.CloudProvider { - glog.V(1).Infof("Building %s cloud provider.", opts.CloudProviderName) + klog.V(1).Infof("Building %s cloud provider.", opts.CloudProviderName) do := cloudprovider.NodeGroupDiscoveryOptions{ NodeGroupSpecs: opts.NodeGroups, NodeGroupAutoDiscoverySpecs: opts.NodeGroupAutoDiscovery, } + rl := context.NewResourceLimiterFromAutoscalingOptions(opts) - switch opts.CloudProviderName { - case gce.ProviderNameGCE: - return buildGCE(opts, do, rl) - case gke.ProviderNameGKE: - if do.DiscoverySpecified() { - glog.Fatalf("GKE gets nodegroup specification via API, command line specs are not allowed") - } - if opts.NodeAutoprovisioningEnabled { - return buildGKE(opts, rl, gke.ModeGKENAP) - } - return buildGKE(opts, rl, gke.ModeGKE) - case aws.ProviderName: - return buildAWS(opts, do, rl) - case azure.ProviderName: - return buildAzure(opts, do, rl) - case kubemark.ProviderName: - return buildKubemark(opts, do, rl) - case openshiftmachineapi.ProviderName: - return buildClusterAPI(openshiftmachineapi.ProviderName, opts, do, rl) - case "": + if opts.CloudProviderName == "" { // Ideally this would be an error, but several unit tests of the // StaticAutoscaler depend on this behaviour. - glog.Warning("Returning a nil cloud provider") + klog.Warning("Returning a nil cloud provider") return nil } - glog.Fatalf("Unknown cloud provider: %s", opts.CloudProviderName) - return nil // This will never happen because the Fatalf will os.Exit -} - -func buildGCE(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { - var config io.ReadCloser - if opts.CloudConfig != "" { - var err error - config, err = os.Open(opts.CloudConfig) - if err != nil { - glog.Fatalf("Couldn't open cloud provider configuration %s: %#v", opts.CloudConfig, err) - } - defer config.Close() - } - - manager, err := gce.CreateGceManager(config, do, opts.Regional) - if err != nil { - glog.Fatalf("Failed to create GCE Manager: %v", err) + provider := buildCloudProvider(opts, do, rl) + if provider != nil { + return provider } - provider, err := gce.BuildGceCloudProvider(manager, rl) - if err != nil { - glog.Fatalf("Failed to create GCE cloud provider: %v", err) - } - return provider -} - -func buildGKE(opts config.AutoscalingOptions, rl *cloudprovider.ResourceLimiter, mode gke.GcpCloudProviderMode) cloudprovider.CloudProvider { - var config io.ReadCloser - if opts.CloudConfig != "" { - var err error - config, err = os.Open(opts.CloudConfig) - if err != nil { - glog.Fatalf("Couldn't open cloud provider configuration %s: %#v", opts.CloudConfig, err) - } - defer config.Close() - } - - manager, err := gke.CreateGkeManager(config, mode, opts.ClusterName, opts.Regional) - if err != nil { - glog.Fatalf("Failed to create GKE Manager: %v", err) - } - - provider, err := gke.BuildGkeCloudProvider(manager, rl) - if err != nil { - glog.Fatalf("Failed to create GKE cloud provider: %v", err) - } - return provider -} - -func buildAWS(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { - var config io.ReadCloser - if opts.CloudConfig != "" { - var err error - config, err = os.Open(opts.CloudConfig) - if err != nil { - glog.Fatalf("Couldn't open cloud provider configuration %s: %#v", opts.CloudConfig, err) - } - defer config.Close() - } - - manager, err := aws.CreateAwsManager(config, do) - if err != nil { - glog.Fatalf("Failed to create AWS Manager: %v", err) - } - - provider, err := aws.BuildAwsCloudProvider(manager, rl) - if err != nil { - glog.Fatalf("Failed to create AWS cloud provider: %v", err) - } - return provider -} - -func buildAzure(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { - var config io.ReadCloser - if opts.CloudConfig != "" { - glog.Info("Creating Azure Manager using cloud-config file: %v", opts.CloudConfig) - var err error - config, err := os.Open(opts.CloudConfig) - if err != nil { - glog.Fatalf("Couldn't open cloud provider configuration %s: %#v", opts.CloudConfig, err) - } - defer config.Close() - } else { - glog.Info("Creating Azure Manager with default configuration.") - } - manager, err := azure.CreateAzureManager(config, do) - if err != nil { - glog.Fatalf("Failed to create Azure Manager: %v", err) - } - provider, err := azure.BuildAzureCloudProvider(manager, rl) - if err != nil { - glog.Fatalf("Failed to create Azure cloud provider: %v", err) - } - return provider -} - -func buildKubemark(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { - externalConfig, err := rest.InClusterConfig() - if err != nil { - glog.Fatalf("Failed to get kubeclient config for external cluster: %v", err) - } - - kubemarkConfig, err := clientcmd.BuildConfigFromFlags("", "/kubeconfig/cluster_autoscaler.kubeconfig") - if err != nil { - glog.Fatalf("Failed to get kubeclient config for kubemark cluster: %v", err) - } - - stop := make(chan struct{}) - - externalClient := kubeclient.NewForConfigOrDie(externalConfig) - kubemarkClient := kubeclient.NewForConfigOrDie(kubemarkConfig) - - externalInformerFactory := informers.NewSharedInformerFactory(externalClient, 0) - kubemarkInformerFactory := informers.NewSharedInformerFactory(kubemarkClient, 0) - kubemarkNodeInformer := kubemarkInformerFactory.Core().V1().Nodes() - go kubemarkNodeInformer.Informer().Run(stop) - - kubemarkController, err := kubemarkcontroller.NewKubemarkController(externalClient, externalInformerFactory, - kubemarkClient, kubemarkNodeInformer) - if err != nil { - glog.Fatalf("Failed to create Kubemark cloud provider: %v", err) - } - - externalInformerFactory.Start(stop) - if !kubemarkController.WaitForCacheSync(stop) { - glog.Fatalf("Failed to sync caches for kubemark controller") - } - go kubemarkController.Run(stop) - - provider, err := kubemark.BuildKubemarkCloudProvider(kubemarkController, do.NodeGroupSpecs, rl) - if err != nil { - glog.Fatalf("Failed to create Kubemark cloud provider: %v", err) - } - return provider -} - -func buildClusterAPI(name string, opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { - provider, err := openshiftmachineapi.BuildCloudProvider(name, opts, rl) - if err != nil { - glog.Fatalf("Failed to create %q cloud provider: %v", name, err) - } - - return provider + klog.Fatalf("Unknown cloud provider: %s", opts.CloudProviderName) + return nil // This will never happen because the Fatalf will os.Exit } diff --git a/cluster-autoscaler/cloudprovider/cloud_provider.go b/cluster-autoscaler/cloudprovider/cloud_provider.go index 98bf5ce76aa4..4672009a3b8f 100644 --- a/cluster-autoscaler/cloudprovider/cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/cloud_provider.go @@ -60,6 +60,9 @@ type CloudProvider interface { // GetResourceLimiter returns struct containing limits (max, min) for resources (cores, memory etc.). GetResourceLimiter() (*ResourceLimiter, error) + // GetInstanceID gets the instance ID for the specified node. + GetInstanceID(node *apiv1.Node) string + // Cleanup cleans up open resources before the cloud provider is destroyed, i.e. go routines etc. Cleanup() error @@ -117,7 +120,9 @@ type NodeGroup interface { Debug() string // Nodes returns a list of all nodes that belong to this node group. - Nodes() ([]string, error) + // It is required that Instance objects returned by this method have Id field set. + // Other fields are optional. + Nodes() ([]Instance, error) // TemplateNodeInfo returns a schedulercache.NodeInfo structure of an empty // (as if just started) node. This will be used in scale-up simulations to @@ -144,6 +149,58 @@ type NodeGroup interface { Autoprovisioned() bool } +// Instance represents a cloud-provider node. The node does not necessarily map to k8s node +// i.e it does not have to be registered in k8s cluster despite being returned by NodeGroup.Nodes() +// method. Also it is sane to have Instance object for nodes which are being created or deleted. +type Instance struct { + // Id is instance id. + Id string + // Status represents status of node. (Optional) + Status *InstanceStatus +} + +// InstanceStatus represents instance status. +type InstanceStatus struct { + // State tells if instance is running, being created or being deleted + State InstanceState + // ErrorInfo is not nil if there is error condition related to instance. + // E.g instance cannot be created. + ErrorInfo *InstanceErrorInfo +} + +// InstanceState tells if instance is running, being created or being deleted +type InstanceState int + +const ( + // STATE_RUNNING means instance is running + STATE_RUNNING InstanceState = 1 + // STATE_BEING_CREATED means instance is being created + STATE_BEING_CREATED InstanceState = 2 + // STATE_BEING_DELETED means instance is being deleted + STATE_BEING_DELETED InstanceState = 3 +) + +// InstanceErrorInfo provides information about error condition on instance +type InstanceErrorInfo struct { + // ErrorClass tells what is class of error on instance + ErrorClass InstanceErrorClass + // ErrorCode is cloud-provider specific error code for error condition + ErrorCode string + // ErrorMessage is human readable description of error condition + ErrorMessage string +} + +// InstanceErrorClass defines class of error condition +type InstanceErrorClass int + +const ( + // ERROR_OUT_OF_RESOURCES means that error is related to lack of resources (e.g. due to + // stockout or quota-exceeded situation) + ERROR_OUT_OF_RESOURCES InstanceErrorClass = 1 + // ERROR_OTHER means some non-specific error situation occurred + ERROR_OTHER InstanceErrorClass = 99 +) + // PricingModel contains information about the node price and how it changes in time. type PricingModel interface { // NodePrice returns a price of running the given node for a given period of time. diff --git a/cluster-autoscaler/cloudprovider/gce/autoscaling_gce_client.go b/cluster-autoscaler/cloudprovider/gce/autoscaling_gce_client.go index a99f41679b27..41c70c14424b 100644 --- a/cluster-autoscaler/cloudprovider/gce/autoscaling_gce_client.go +++ b/cluster-autoscaler/cloudprovider/gce/autoscaling_gce_client.go @@ -25,8 +25,8 @@ import ( "regexp" "time" - "github.com/golang/glog" gce "google.golang.org/api/compute/v1" + "k8s.io/klog" ) const ( @@ -95,10 +95,12 @@ func NewCustomAutoscalingGceClientV1(client *http.Client, projectId, serverUrl s } func (client *autoscalingGceClientV1) FetchMachineType(zone, machineType string) (*gce.MachineType, error) { + registerRequest("machine_types", "get") return client.gceService.MachineTypes.Get(client.projectId, zone, machineType).Do() } func (client *autoscalingGceClientV1) FetchMachineTypes(zone string) ([]*gce.MachineType, error) { + registerRequest("machine_types", "list") machines, err := client.gceService.MachineTypes.List(client.projectId, zone).Do() if err != nil { return nil, err @@ -107,6 +109,7 @@ func (client *autoscalingGceClientV1) FetchMachineTypes(zone string) ([]*gce.Mac } func (client *autoscalingGceClientV1) FetchMigTargetSize(migRef GceRef) (int64, error) { + registerRequest("instance_group_managers", "get") igm, err := client.gceService.InstanceGroupManagers.Get(migRef.Project, migRef.Zone, migRef.Name).Do() if err != nil { return 0, err @@ -115,6 +118,7 @@ func (client *autoscalingGceClientV1) FetchMigTargetSize(migRef GceRef) (int64, } func (client *autoscalingGceClientV1) FetchMigBasename(migRef GceRef) (string, error) { + registerRequest("instance_group_managers", "get") igm, err := client.gceService.InstanceGroupManagers.Get(migRef.Project, migRef.Zone, migRef.Name).Do() if err != nil { return "", err @@ -123,6 +127,7 @@ func (client *autoscalingGceClientV1) FetchMigBasename(migRef GceRef) (string, e } func (client *autoscalingGceClientV1) ResizeMig(migRef GceRef, size int64) error { + registerRequest("instance_group_managers", "resize") op, err := client.gceService.InstanceGroupManagers.Resize(migRef.Project, migRef.Zone, migRef.Name, size).Do() if err != nil { return err @@ -132,14 +137,15 @@ func (client *autoscalingGceClientV1) ResizeMig(migRef GceRef, size int64) error func (client *autoscalingGceClientV1) waitForOp(operation *gce.Operation, project, zone string) error { for start := time.Now(); time.Since(start) < client.operationWaitTimeout; time.Sleep(client.operationPollInterval) { - glog.V(4).Infof("Waiting for operation %s %s %s", project, zone, operation.Name) + klog.V(4).Infof("Waiting for operation %s %s %s", project, zone, operation.Name) + registerRequest("zone_operations", "get") if op, err := client.gceService.ZoneOperations.Get(project, zone, operation.Name).Do(); err == nil { - glog.V(4).Infof("Operation %s %s %s status: %s", project, zone, operation.Name, op.Status) + klog.V(4).Infof("Operation %s %s %s status: %s", project, zone, operation.Name, op.Status) if op.Status == "DONE" { return nil } } else { - glog.Warningf("Error while getting operation %s on %s: %v", operation.Name, operation.TargetLink, err) + klog.Warningf("Error while getting operation %s on %s: %v", operation.Name, operation.TargetLink, err) } } return fmt.Errorf("Timeout while waiting for operation %s on %s to complete.", operation.Name, operation.TargetLink) @@ -152,6 +158,7 @@ func (client *autoscalingGceClientV1) DeleteInstances(migRef GceRef, instances [ for _, i := range instances { req.Instances = append(req.Instances, GenerateInstanceUrl(*i)) } + registerRequest("instance_group_managers", "delete_instances") op, err := client.gceService.InstanceGroupManagers.DeleteInstances(migRef.Project, migRef.Zone, migRef.Name, &req).Do() if err != nil { return err @@ -160,9 +167,10 @@ func (client *autoscalingGceClientV1) DeleteInstances(migRef GceRef, instances [ } func (client *autoscalingGceClientV1) FetchMigInstances(migRef GceRef) ([]GceRef, error) { + registerRequest("instance_group_managers", "list_managed_instances") instances, err := client.gceService.InstanceGroupManagers.ListManagedInstances(migRef.Project, migRef.Zone, migRef.Name).Do() if err != nil { - glog.V(4).Infof("Failed MIG info request for %s %s %s: %v", migRef.Project, migRef.Zone, migRef.Name, err) + klog.V(4).Infof("Failed MIG info request for %s %s %s: %v", migRef.Project, migRef.Zone, migRef.Name, err) return nil, err } refs := []GceRef{} @@ -177,6 +185,7 @@ func (client *autoscalingGceClientV1) FetchMigInstances(migRef GceRef) ([]GceRef } func (client *autoscalingGceClientV1) FetchZones(region string) ([]string, error) { + registerRequest("regions", "get") r, err := client.gceService.Regions.Get(client.projectId, region).Do() if err != nil { return nil, fmt.Errorf("cannot get zones for GCE region %s: %v", region, err) @@ -189,6 +198,7 @@ func (client *autoscalingGceClientV1) FetchZones(region string) ([]string, error } func (client *autoscalingGceClientV1) FetchMigTemplate(migRef GceRef) (*gce.InstanceTemplate, error) { + registerRequest("instance_group_managers", "get") igm, err := client.gceService.InstanceGroupManagers.Get(migRef.Project, migRef.Zone, migRef.Name).Do() if err != nil { return nil, err @@ -198,17 +208,19 @@ func (client *autoscalingGceClientV1) FetchMigTemplate(migRef GceRef) (*gce.Inst return nil, err } _, templateName := path.Split(templateUrl.EscapedPath()) + registerRequest("instance_templates", "get") return client.gceService.InstanceTemplates.Get(migRef.Project, templateName).Do() } func (client *autoscalingGceClientV1) FetchMigsWithName(zone string, name *regexp.Regexp) ([]string, error) { filter := fmt.Sprintf("name eq %s", name) links := make([]string, 0) + registerRequest("instance_groups", "list") req := client.gceService.InstanceGroups.List(client.projectId, zone).Filter(filter) if err := req.Pages(context.TODO(), func(page *gce.InstanceGroupList) error { for _, ig := range page.Items { links = append(links, ig.SelfLink) - glog.V(3).Infof("found managed instance group %s matching regexp %s", ig.Name, name) + klog.V(3).Infof("found managed instance group %s matching regexp %s", ig.Name, name) } return nil }); err != nil { diff --git a/cluster-autoscaler/cloudprovider/gce/cache.go b/cluster-autoscaler/cloudprovider/gce/cache.go index 5a32698701ca..90a52434aba0 100644 --- a/cluster-autoscaler/cloudprovider/gce/cache.go +++ b/cluster-autoscaler/cloudprovider/gce/cache.go @@ -24,8 +24,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" - "github.com/golang/glog" gce "google.golang.org/api/compute/v1" + "k8s.io/klog" ) // MigInformation is a wrapper for Mig. @@ -97,14 +97,14 @@ func (gc *GceCache) RegisterMig(mig Mig) bool { if oldMig := gc.migs[i].Config; oldMig.GceRef() == mig.GceRef() { if !reflect.DeepEqual(oldMig, mig) { gc.migs[i].Config = mig - glog.V(4).Infof("Updated Mig %s", mig.GceRef().String()) + klog.V(4).Infof("Updated Mig %s", mig.GceRef().String()) return true } return false } } - glog.V(1).Infof("Registering %s", mig.GceRef().String()) + klog.V(1).Infof("Registering %s", mig.GceRef().String()) // TODO(aleksandra-malinowska): fetch and set MIG basename here. gc.migs = append(gc.migs, &MigInformation{ Config: mig, @@ -112,7 +112,7 @@ func (gc *GceCache) RegisterMig(mig Mig) bool { return true } -// UnregisterMig returns true if the node group has been removed, and false if it was alredy missing from cache. +// UnregisterMig returns true if the node group has been removed, and false if it was already missing from cache. func (gc *GceCache) UnregisterMig(toBeRemoved Mig) bool { gc.migsMutex.Lock() defer gc.migsMutex.Unlock() @@ -121,7 +121,7 @@ func (gc *GceCache) UnregisterMig(toBeRemoved Mig) bool { found := false for _, mig := range gc.migs { if mig.Config.GceRef() == toBeRemoved.GceRef() { - glog.V(1).Infof("Unregistered Mig %s", toBeRemoved.GceRef().String()) + klog.V(1).Infof("Unregistered Mig %s", toBeRemoved.GceRef().String()) found = true } else { newMigs = append(newMigs, mig) @@ -202,7 +202,7 @@ func (gc *GceCache) regenerateCache() error { for _, migInfo := range gc.GetMigs() { mig := migInfo.Config - glog.V(4).Infof("Regenerating MIG information for %s", mig.GceRef().String()) + klog.V(4).Infof("Regenerating MIG information for %s", mig.GceRef().String()) basename, err := gc.GceService.FetchMigBasename(mig.GceRef()) if err != nil { @@ -212,7 +212,7 @@ func (gc *GceCache) regenerateCache() error { instances, err := gc.GceService.FetchMigInstances(mig.GceRef()) if err != nil { - glog.V(4).Infof("Failed MIG info request for %s: %v", mig.GceRef().String(), err) + klog.V(4).Infof("Failed MIG info request for %s: %v", mig.GceRef().String(), err) return err } for _, ref := range instances { diff --git a/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go b/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go index c28462ec7a68..12dd61cdf591 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider.go @@ -18,12 +18,16 @@ package gce import ( "fmt" + "io" + "os" "strings" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" + "k8s.io/klog" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" ) @@ -110,6 +114,11 @@ func (gce *GceCloudProvider) Refresh() error { return gce.gceManager.Refresh() } +// GetInstanceID gets the instance ID for the specified node. +func (gce *GceCloudProvider) GetInstanceID(node *apiv1.Node) string { + return node.Spec.ProviderID +} + // GceRef contains s reference to some entity in GCE world. type GceRef struct { Project string @@ -269,8 +278,17 @@ func (mig *gceMig) Debug() string { } // Nodes returns a list of all nodes that belong to this node group. -func (mig *gceMig) Nodes() ([]string, error) { - return mig.gceManager.GetMigNodes(mig) +func (mig *gceMig) Nodes() ([]cloudprovider.Instance, error) { + instanceNames, err := mig.gceManager.GetMigNodes(mig) + if err != nil { + return nil, err + } + instances := make([]cloudprovider.Instance, 0, len(instanceNames)) + for _, instanceName := range instanceNames { + instances = append(instances, cloudprovider.Instance{Id: instanceName}) + } + return instances, nil + } // Exist checks if the node group really exists on the cloud provider side. @@ -303,3 +321,29 @@ func (mig *gceMig) TemplateNodeInfo() (*schedulercache.NodeInfo, error) { nodeInfo.SetNode(node) return nodeInfo, nil } + +// BuildGCE builds GCE cloud provider, manager etc. +func BuildGCE(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + var config io.ReadCloser + if opts.CloudConfig != "" { + var err error + config, err = os.Open(opts.CloudConfig) + if err != nil { + klog.Fatalf("Couldn't open cloud provider configuration %s: %#v", opts.CloudConfig, err) + } + defer config.Close() + } + + manager, err := CreateGceManager(config, do, opts.Regional) + if err != nil { + klog.Fatalf("Failed to create GCE Manager: %v", err) + } + + provider, err := BuildGceCloudProvider(manager, rl) + if err != nil { + klog.Fatalf("Failed to create GCE cloud provider: %v", err) + } + // Register GCE API usage metrics. + RegisterMetrics() + return provider +} diff --git a/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider_test.go b/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider_test.go index 5da452d15615..fd95db4537c0 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider_test.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_cloud_provider_test.go @@ -365,8 +365,8 @@ func TestMig(t *testing.T) { "gce://project1/us-central1-b/gke-cluster-1-default-pool-f7607aac-dck1"}, nil).Once() nodes, err := mig1.Nodes() assert.NoError(t, err) - assert.Equal(t, "gce://project1/us-central1-b/gke-cluster-1-default-pool-f7607aac-9j4g", nodes[0]) - assert.Equal(t, "gce://project1/us-central1-b/gke-cluster-1-default-pool-f7607aac-dck1", nodes[1]) + assert.Equal(t, cloudprovider.Instance{Id: "gce://project1/us-central1-b/gke-cluster-1-default-pool-f7607aac-9j4g"}, nodes[0]) + assert.Equal(t, cloudprovider.Instance{Id: "gce://project1/us-central1-b/gke-cluster-1-default-pool-f7607aac-dck1"}, nodes[1]) mock.AssertExpectationsForObjects(t, gceManagerMock) // Test TemplateNodeInfo. diff --git a/cluster-autoscaler/cloudprovider/gce/gce_manager.go b/cluster-autoscaler/cloudprovider/gce/gce_manager.go index d3b8a3af685e..cdcd67d204b2 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_manager.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_manager.go @@ -26,17 +26,18 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" + "k8s.io/autoscaler/cluster-autoscaler/utils/units" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" provider_gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "cloud.google.com/go/compute/metadata" - "github.com/golang/glog" "golang.org/x/oauth2" "golang.org/x/oauth2/google" gce "google.golang.org/api/compute/v1" gcfg "gopkg.in/gcfg.v1" + "k8s.io/klog" ) const ( @@ -112,19 +113,19 @@ func CreateGceManager(configReader io.Reader, discoveryOpts cloudprovider.NodeGr if configReader != nil { var cfg provider_gce.ConfigFile if err := gcfg.ReadInto(&cfg, configReader); err != nil { - glog.Errorf("Couldn't read config: %v", err) + klog.Errorf("Couldn't read config: %v", err) return nil, err } if cfg.Global.TokenURL == "" { - glog.Warning("Empty tokenUrl in cloud config") + klog.Warning("Empty tokenUrl in cloud config") } else { tokenSource = provider_gce.NewAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody) - glog.V(1).Infof("Using TokenSource from config %#v", tokenSource) + klog.V(1).Infof("Using TokenSource from config %#v", tokenSource) } projectId = cfg.Global.ProjectID location = cfg.Global.LocalZone } else { - glog.V(1).Infof("Using default TokenSource %#v", tokenSource) + klog.V(1).Infof("Using default TokenSource %#v", tokenSource) } if len(projectId) == 0 || len(location) == 0 { // XXX: On GKE discoveredProjectId is hosted master project and @@ -143,7 +144,7 @@ func CreateGceManager(configReader io.Reader, discoveryOpts cloudprovider.NodeGr location = discoveredLocation } } - glog.V(1).Infof("GCE projectId=%s location=%s", projectId, location) + klog.V(1).Infof("GCE projectId=%s location=%s", projectId, location) // Create Google Compute Engine service. client := oauth2.NewClient(oauth2.NoContext, tokenSource) @@ -176,7 +177,7 @@ func CreateGceManager(configReader io.Reader, discoveryOpts cloudprovider.NodeGr go wait.Until(func() { if err := manager.cache.RegenerateInstancesCache(); err != nil { - glog.Errorf("Error while regenerating Mig cache: %v", err) + klog.Errorf("Error while regenerating Mig cache: %v", err) } }, time.Hour, manager.interrupt) @@ -197,7 +198,7 @@ func (m *gceManagerImpl) registerMig(mig Mig) bool { // can be scaled up from 0 nodes. // We may never need to do it, so just log error if it fails. if _, err := m.GetMigTemplateNode(mig); err != nil { - glog.Errorf("Can't build node from template for %s, won't be able to scale from 0: %v", mig.GceRef().String(), err) + klog.Errorf("Can't build node from template for %s, won't be able to scale from 0: %v", mig.GceRef().String(), err) } } return changed @@ -214,7 +215,7 @@ func (m *gceManagerImpl) GetMigSize(mig Mig) (int64, error) { // SetMigSize sets MIG size. func (m *gceManagerImpl) SetMigSize(mig Mig, size int64) error { - glog.V(0).Infof("Setting mig size %s to %d", mig.Id(), size) + klog.V(0).Infof("Setting mig size %s to %d", mig.Id(), size) return m.GceService.ResizeMig(mig.GceRef(), size) } @@ -274,11 +275,11 @@ func (m *gceManagerImpl) Refresh() error { func (m *gceManagerImpl) forceRefresh() error { m.clearMachinesCache() if err := m.fetchAutoMigs(); err != nil { - glog.Errorf("Failed to fetch MIGs: %v", err) + klog.Errorf("Failed to fetch MIGs: %v", err) return err } m.lastRefresh = time.Now() - glog.V(2).Infof("Refreshed GCE resources, next refresh after %v", m.lastRefresh.Add(refreshInterval)) + klog.V(2).Infof("Refreshed GCE resources, next refresh after %v", m.lastRefresh.Add(refreshInterval)) return nil } @@ -362,11 +363,11 @@ func (m *gceManagerImpl) fetchAutoMigs() error { // This MIG was explicitly configured, but would also be // autodiscovered. We want the explicitly configured min and max // nodes to take precedence. - glog.V(3).Infof("Ignoring explicitly configured MIG %s in autodiscovery.", mig.GceRef().String()) + klog.V(3).Infof("Ignoring explicitly configured MIG %s in autodiscovery.", mig.GceRef().String()) continue } if m.registerMig(mig) { - glog.V(3).Infof("Autodiscovered MIG %s using regexp %s", mig.GceRef().String(), cfg.Re.String()) + klog.V(3).Infof("Autodiscovered MIG %s using regexp %s", mig.GceRef().String(), cfg.Re.String()) changed = true } } @@ -400,7 +401,7 @@ func (m *gceManagerImpl) clearMachinesCache() { m.cache.SetMachinesCache(machinesCache) nextRefresh := time.Now() m.machinesCacheLastRefresh = nextRefresh - glog.V(2).Infof("Cleared machine types cache, next clear after %v", nextRefresh) + klog.V(2).Infof("Cleared machine types cache, next clear after %v", nextRefresh) } // Code borrowed from gce cloud provider. Reuse the original as soon as it becomes public. @@ -486,7 +487,7 @@ func (m *gceManagerImpl) getCpuAndMemoryForMachineType(machineType string, zone } m.cache.AddMachineToCache(machineType, zone, machine) } - return machine.GuestCpus, machine.MemoryMb * bytesPerMB, nil + return machine.GuestCpus, machine.MemoryMb * units.MiB, nil } func parseCustomMachineType(machineType string) (cpu, mem int64, err error) { @@ -500,6 +501,6 @@ func parseCustomMachineType(machineType string) (cpu, mem int64, err error) { return 0, 0, fmt.Errorf("failed to parse all params in %s", machineType) } // Mb to bytes - mem = mem * bytesPerMB + mem = mem * units.MiB return } diff --git a/cluster-autoscaler/cloudprovider/gce/gce_manager_test.go b/cluster-autoscaler/cloudprovider/gce/gce_manager_test.go index 79e7fb380936..fbe68273d7e6 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_manager_test.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_manager_test.go @@ -23,6 +23,7 @@ import ( "time" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/utils/units" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" @@ -47,146 +48,6 @@ const ( gceMigB = "gce-mig-b" ) -const allNodePools1 = `{ - "nodePools": [ - { - "name": "default-pool", - "config": { - "machineType": "n1-standard-1", - "diskSizeGb": 100, - "oauthScopes": [ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring.write", - "https://www.googleapis.com/auth/servicecontrol", - "https://www.googleapis.com/auth/service.management.readonly", - "https://www.googleapis.com/auth/trace.append" - ], - "imageType": "COS", - "serviceAccount": "default" - }, - "initialNodeCount": 3, - "autoscaling": { - "Enabled": true, - "MinNodeCount": 1, - "MaxNodeCount": 11 - }, - "management": {}, - "selfLink": "https://container.googleapis.com/v1/projects/project1/locations/us-central1-b/clusters/cluster-1/nodePools/default-pool", - "version": "1.6.9", - "instanceGroupUrls": [ - "https://www.googleapis.com/compute/v1/projects/project1/zones/us-central1-b/instanceGroupManagers/gke-cluster-1-default-pool" - ], - "status": "RUNNING" - } - ] -}` - -const allNodePoolsRegional = `{ - "nodePools": [ - { - "name": "default-pool", - "config": { - "machineType": "n1-standard-1", - "diskSizeGb": 100, - "oauthScopes": [ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring.write", - "https://www.googleapis.com/auth/servicecontrol", - "https://www.googleapis.com/auth/service.management.readonly", - "https://www.googleapis.com/auth/trace.append" - ], - "imageType": "COS", - "serviceAccount": "default" - }, - "initialNodeCount": 3, - "autoscaling": { - "Enabled": true, - "MinNodeCount": 1, - "MaxNodeCount": 11 - }, - "management": {}, - "selfLink": "https://container.googleapis.com/v1/projects/project1/locations/us-central1-b/clusters/cluster-1/nodePools/default-pool", - "version": "1.6.9", - "instanceGroupUrls": [ - "https://www.googleapis.com/compute/v1/projects/project1/zones/us-central1-b/instanceGroupManagers/gke-cluster-1-default-pool", - "https://www.googleapis.com/compute/v1/projects/project1/zones/us-central1-c/instanceGroupManagers/gke-cluster-1-default-pool", - "https://www.googleapis.com/compute/v1/projects/project1/zones/us-central1-f/instanceGroupManagers/gke-cluster-1-default-pool" - ], - "status": "RUNNING" - } - ] -}` - -const allNodePools2 = `{ - "nodePools": [ - { - "name": "default-pool", - "config": { - "machineType": "n1-standard-1", - "diskSizeGb": 100, - "oauthScopes": [ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring.write", - "https://www.googleapis.com/auth/servicecontrol", - "https://www.googleapis.com/auth/service.management.readonly", - "https://www.googleapis.com/auth/trace.append" - ], - "imageType": "COS", - "serviceAccount": "default" - }, - "initialNodeCount": 3, - "autoscaling": { - "Enabled": true, - "MinNodeCount": 1, - "MaxNodeCount": 11}, - "management": {}, - "selfLink": "https://container.googleapis.com/v1/projects/project1/locations/us-central1-b/clusters/cluster-1/nodePools/default-pool", - "version": "1.6.9", - "instanceGroupUrls": [ - "https://www.googleapis.com/compute/v1/projects/project1/zones/us-central1-b/instanceGroupManagers/gke-cluster-1-default-pool" - ], - "status": "RUNNING" - }, - { - "name": "extra-pool-323233232", - "config": { - "machineType": "n1-standard-1", - "diskSizeGb": 100, - "oauthScopes": [ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring.write", - "https://www.googleapis.com/auth/servicecontrol", - "https://www.googleapis.com/auth/service.management.readonly", - "https://www.googleapis.com/auth/trace.append" - ], - "imageType": "COS", - "serviceAccount": "default" - }, - "initialNodeCount": 3, - "autoscaling": { - "Enabled": true, - "MinNodeCount": 0, - "MaxNodeCount": 1000 - }, - "management": {}, - "selfLink": "https://container.googleapis.com/v1/projects/project1/locations/us-central1-b/clusters/cluster-1/nodePools/extra-pool-323233232", - "version": "1.6.9", - "instanceGroupUrls": [ - "https://www.googleapis.com/compute/v1/projects/project1/zones/us-central1-b/instanceGroupManagers/gke-cluster-1-extra-pool-323233232" - ], - "status": "RUNNING" - } - ] -}` - const instanceGroupManager = `{ "kind": "compute#instanceGroupManager", "id": "3213213219", @@ -349,109 +210,6 @@ const managedInstancesResponse2 = `{ ] }` -const getClusterResponse = `{ - "name": "usertest", - "nodeConfig": { - "machineType": "n1-standard-1", - "diskSizeGb": 100, - "oauthScopes": [ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/service.management.readonly", - "https://www.googleapis.com/auth/servicecontrol", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring" - ], - "imageType": "COS", - "serviceAccount": "default", - "diskType": "pd-standard" - }, - "masterAuth": { - "username": "admin", - "password": "pass", - "clusterCaCertificate": "cer1", - "clientCertificate": "cer1", - "clientKey": "cer1==" - }, - "loggingService": "logging.googleapis.com", - "monitoringService": "monitoring.googleapis.com", - "network": "default", - "clusterIpv4Cidr": "10.32.0.0/14", - "addonsConfig": { - "networkPolicyConfig": { - "disabled": true - } - }, - "nodePools": [ - { - "name": "default-pool", - "config": { - "machineType": "n1-standard-1", - "diskSizeGb": 100, - "oauthScopes": [ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/service.management.readonly", - "https://www.googleapis.com/auth/servicecontrol", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring" - ], - "imageType": "COS", - "serviceAccount": "default", - "diskType": "pd-standard" - }, - "initialNodeCount": 1, - "autoscaling": { - "enabled": true, - "maxNodeCount": 5 - }, - "management": {}, - "selfLink": "https:///v1beta1/projects/user-gke-dev/locations/us-central1-c/clusters/usertest/nodePools/default-pool", - "version": "1.8.0-gke.1", - "instanceGroupUrls": [ - "https://www.googleapis.com/compute/v1/projects/user-gke-dev/zones/us-central1-c/instanceGroupManagers/gke-usertest-default-pool-fdsafds2d5-grp" - ], - "status": "RUNNING" - } - ], - "locations": [ - "us-central1-b" - ], - "labelFingerprint": "fasdfds", - "legacyAbac": {}, - "autoscaling": { - "resourceLimits": [ - { - "resourceType": "cpu", - "minimum": "2", - "maximum": "3" - }, - { - "resourceType": "memory", - "minimum": "2000000000", - "maximum": "3000000000" - } - ] - }, - "networkConfig": { - "network": "https://www.googleapis.com/compute/v1/projects/user-gke-dev/global/networks/default" - }, - "selfLink": "https:///v1beta1/projects/user-gke-dev/locations/us-central1-c/clusters/usertest", - "zone": "us-central1-c", - "endpoint": "xxx", - "initialClusterVersion": "1.sdafsa", - "currentMasterVersion": "1fdsfdsfsauser", - "currentNodeVersion": "xxx", - "createTime": "2017-10-24T12:20:00+00:00", - "status": "RUNNING", - "nodeIpv4CidrSize": 24, - "servicesIpv4Cidr": "10.35.240.0/20", - "instanceGroupUrls": [ - "https://www.googleapis.com/compute/v1/projects/user-gke-dev/zones/us-central1-c/instanceGroupManagers/gke-usertest-default-pool-323-grp" - ], - "currentNodeCount": 1 -}` - func getInstanceGroupManager(zone string) string { return getInstanceGroupManagerNamed(defaultPoolMig, zone) } @@ -1050,14 +808,14 @@ func TestGetCpuAndMemoryForMachineType(t *testing.T) { cpu, mem, err := g.getCpuAndMemoryForMachineType("custom-8-2", zoneB) assert.NoError(t, err) assert.Equal(t, int64(8), cpu) - assert.Equal(t, int64(2*bytesPerMB), mem) + assert.Equal(t, int64(2*units.MiB), mem) mock.AssertExpectationsForObjects(t, server) // Standard machine type found in cache. cpu, mem, err = g.getCpuAndMemoryForMachineType("n1-standard-1", zoneB) assert.NoError(t, err) assert.Equal(t, int64(1), cpu) - assert.Equal(t, int64(1*bytesPerMB), mem) + assert.Equal(t, int64(1*units.MiB), mem) mock.AssertExpectationsForObjects(t, server) // Standard machine type not found in cache. @@ -1065,14 +823,14 @@ func TestGetCpuAndMemoryForMachineType(t *testing.T) { cpu, mem, err = g.getCpuAndMemoryForMachineType("n1-standard-2", zoneB) assert.NoError(t, err) assert.Equal(t, int64(2), cpu) - assert.Equal(t, int64(3840*bytesPerMB), mem) + assert.Equal(t, int64(3840*units.MiB), mem) mock.AssertExpectationsForObjects(t, server) // Standard machine type cached. cpu, mem, err = g.getCpuAndMemoryForMachineType("n1-standard-2", zoneB) assert.NoError(t, err) assert.Equal(t, int64(2), cpu) - assert.Equal(t, int64(3840*bytesPerMB), mem) + assert.Equal(t, int64(3840*units.MiB), mem) mock.AssertExpectationsForObjects(t, server) // Standard machine type not found in the zone. @@ -1087,7 +845,7 @@ func TestParseCustomMachineType(t *testing.T) { cpu, mem, err := parseCustomMachineType("custom-2-2816") assert.NoError(t, err) assert.Equal(t, int64(2), cpu) - assert.Equal(t, int64(2816*bytesPerMB), mem) + assert.Equal(t, int64(2816*units.MiB), mem) cpu, mem, err = parseCustomMachineType("other-a2-2816") assert.Error(t, err) cpu, mem, err = parseCustomMachineType("other-2-2816") diff --git a/cluster-autoscaler/cloudprovider/gce/gce_metrics.go b/cluster-autoscaler/cloudprovider/gce/gce_metrics.go new file mode 100644 index 000000000000..2aeb9e3a1acd --- /dev/null +++ b/cluster-autoscaler/cloudprovider/gce/gce_metrics.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gce + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +const ( + caNamespace = "cluster_autoscaler" +) + +var ( + /**** Metrics related to GCE API usage ****/ + requestCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: caNamespace, + Name: "gce_request_count", + Help: "Counter of GCE API requests for each verb and API resource.", + }, []string{"resource", "verb"}, + ) +) + +// RegisterMetrics registers all GCE metrics. +func RegisterMetrics() { + prometheus.MustRegister(requestCounter) +} + +// registerRequest registers request to GCE API. +func registerRequest(resource string, verb string) { + requestCounter.WithLabelValues(resource, verb).Add(1.0) +} diff --git a/cluster-autoscaler/cloudprovider/gce/gce_price_model.go b/cluster-autoscaler/cloudprovider/gce/gce_price_model.go index 2031ec13ceed..c89368f3d3da 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_price_model.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_price_model.go @@ -147,7 +147,7 @@ func getBasePrice(resources apiv1.ResourceList, startTime time.Time, endTime tim cpu := resources[apiv1.ResourceCPU] mem := resources[apiv1.ResourceMemory] price += float64(cpu.MilliValue()) / 1000.0 * cpuPricePerHour * hours - price += float64(mem.Value()) / float64(units.Gigabyte) * memoryPricePerHourPerGb * hours + price += float64(mem.Value()) / float64(units.GiB) * memoryPricePerHourPerGb * hours return price } diff --git a/cluster-autoscaler/cloudprovider/gce/gce_price_model_test.go b/cluster-autoscaler/cloudprovider/gce/gce_price_model_test.go index 3a6af64ea7e5..cb54ee4ecdb9 100644 --- a/cluster-autoscaler/cloudprovider/gce/gce_price_model_test.go +++ b/cluster-autoscaler/cloudprovider/gce/gce_price_model_test.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" + "k8s.io/autoscaler/cluster-autoscaler/utils/units" "github.com/stretchr/testify/assert" ) @@ -46,13 +47,13 @@ func TestGetNodePrice(t *testing.T) { now := time.Now() // regular - node1 := BuildTestNode("sillyname1", 8000, 30*1024*1024*1024) + node1 := BuildTestNode("sillyname1", 8000, 30*units.GiB) node1.Labels = labels1 price1, err := model.NodePrice(node1, now, now.Add(time.Hour)) assert.NoError(t, err) // preemptible - node2 := BuildTestNode("sillyname2", 8000, 30*1024*1024*1024) + node2 := BuildTestNode("sillyname2", 8000, 30*units.GiB) node2.Labels = labels2 price2, err := model.NodePrice(node2, now, now.Add(time.Hour)) assert.NoError(t, err) @@ -60,7 +61,7 @@ func TestGetNodePrice(t *testing.T) { assert.True(t, price1 > 3*price2) // custom node - node3 := BuildTestNode("sillyname3", 8000, 30*1024*1024*1024) + node3 := BuildTestNode("sillyname3", 8000, 30*units.GiB) price3, err := model.NodePrice(node3, now, now.Add(time.Hour)) assert.NoError(t, err) // custom nodes should be slightly more expensive than regular. @@ -68,13 +69,13 @@ func TestGetNodePrice(t *testing.T) { assert.True(t, price1*1.2 > price3) // regular with gpu - node4 := BuildTestNode("sillyname4", 8000, 30*1024*1024*1024) + node4 := BuildTestNode("sillyname4", 8000, 30*units.GiB) node4.Status.Capacity[gpu.ResourceNvidiaGPU] = *resource.NewQuantity(1, resource.DecimalSI) node4.Labels = labels1 price4, err := model.NodePrice(node4, now, now.Add(time.Hour)) // preemptible with gpu - node5 := BuildTestNode("sillyname5", 8000, 30*1024*1024*1024) + node5 := BuildTestNode("sillyname5", 8000, 30*units.GiB) node5.Labels = labels2 node5.Status.Capacity[gpu.ResourceNvidiaGPU] = *resource.NewQuantity(1, resource.DecimalSI) price5, err := model.NodePrice(node5, now, now.Add(time.Hour)) @@ -86,7 +87,7 @@ func TestGetNodePrice(t *testing.T) { assert.True(t, price4 > 2*price1) // small custom node - node6 := BuildTestNode("sillyname6", 1000, 3750*1024*1024) + node6 := BuildTestNode("sillyname6", 1000, 3750*units.MiB) price6, err := model.NodePrice(node6, now, now.Add(time.Hour)) assert.NoError(t, err) // 8 times smaller node should be 8 times less expensive. @@ -94,8 +95,8 @@ func TestGetNodePrice(t *testing.T) { } func TestGetPodPrice(t *testing.T) { - pod1 := BuildTestPod("a1", 100, 500*1024*1024) - pod2 := BuildTestPod("a2", 2*100, 2*500*1024*1024) + pod1 := BuildTestPod("a1", 100, 500*units.MiB) + pod2 := BuildTestPod("a2", 2*100, 2*500*units.MiB) model := &GcePriceModel{} now := time.Now() diff --git a/cluster-autoscaler/cloudprovider/gce/reserved.go b/cluster-autoscaler/cloudprovider/gce/reserved.go new file mode 100644 index 000000000000..0b4a38cd183d --- /dev/null +++ b/cluster-autoscaler/cloudprovider/gce/reserved.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gce + +// There should be no imports as it is used standalone in e2e tests + +const ( + // MiB - MebiByte size (2^20) + MiB = 1024 * 1024 + // GiB - GibiByte size (2^30) + GiB = 1024 * 1024 * 1024 + + // KubeletEvictionHardMemory is subtracted from capacity + // when calculating allocatable (on top of kube-reserved). + // Equals kubelet "evictionHard: {memory.available}" + // We don't have a good place to get it from, but it has been hard-coded + // to 100Mi since at least k8s 1.4. + KubeletEvictionHardMemory = 100 * MiB + + // Kernel reserved memory is subtracted when calculating total memory. + kernelReservedRatio = 64 + kernelReservedMemory = 16 * MiB + // Reserved memory for software IO TLB + swiotlbReservedMemory = 64 * MiB + swiotlbThresholdMemory = 3 * GiB +) + +// CalculateKernelReserved computes how much memory Linux kernel will reserve. +// TODO(jkaniuk): account for crashkernel reservation on RHEL / CentOS +func CalculateKernelReserved(physicalMemory int64) int64 { + // Account for memory reserved by kernel + reserved := int64(physicalMemory / kernelReservedRatio) + reserved += kernelReservedMemory + // Account for software IO TLB allocation if memory requires 64bit addressing + if physicalMemory > swiotlbThresholdMemory { + reserved += swiotlbReservedMemory + } + return reserved +} diff --git a/cluster-autoscaler/cloudprovider/gce/reserved_test.go b/cluster-autoscaler/cloudprovider/gce/reserved_test.go new file mode 100644 index 000000000000..fd5aab531636 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/gce/reserved_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gce + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCalculateKernelReserved(t *testing.T) { + type testCase struct { + physicalMemory int64 + reservedMemory int64 + } + testCases := []testCase{ + { + physicalMemory: 256 * MiB, + reservedMemory: 4*MiB + kernelReservedMemory, + }, + { + physicalMemory: 2 * GiB, + reservedMemory: 32*MiB + kernelReservedMemory, + }, + { + physicalMemory: 3 * GiB, + reservedMemory: 48*MiB + kernelReservedMemory, + }, + { + physicalMemory: 3.25 * GiB, + reservedMemory: 52*MiB + kernelReservedMemory + swiotlbReservedMemory, + }, + { + physicalMemory: 4 * GiB, + reservedMemory: 64*MiB + kernelReservedMemory + swiotlbReservedMemory, + }, + { + physicalMemory: 128 * GiB, + reservedMemory: 2*GiB + kernelReservedMemory + swiotlbReservedMemory, + }, + } + for idx, tc := range testCases { + t.Run(fmt.Sprintf("%v", idx), func(t *testing.T) { + reserved := CalculateKernelReserved(tc.physicalMemory) + assert.Equal(t, tc.reservedMemory, reserved) + }) + } +} diff --git a/cluster-autoscaler/cloudprovider/gce/templates.go b/cluster-autoscaler/cloudprovider/gce/templates.go index 07fb428fccd7..b4400dd80413 100644 --- a/cluster-autoscaler/cloudprovider/gce/templates.go +++ b/cluster-autoscaler/cloudprovider/gce/templates.go @@ -22,28 +22,16 @@ import ( "regexp" "strings" - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" - gce "google.golang.org/api/compute/v1" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "github.com/ghodss/yaml" - "github.com/golang/glog" -) - -const ( - mbPerGB = 1000 - bytesPerMB = 1000 * 1000 - millicoresPerCore = 1000 - // Kubelet "evictionHard: {memory.available}" is subtracted from - // capacity when calculating allocatable (on top of kube-reserved). - // We don't have a good place to get it from, but it has been hard-coded - // to 100Mi since at least k8s 1.4. - kubeletEvictionHardMemory = 100 * 1024 * 1024 + "k8s.io/klog" ) // GceTemplateBuilder builds templates for GCE nodes. @@ -59,13 +47,14 @@ func (t *GceTemplateBuilder) getAcceleratorCount(accelerators []*gce.Accelerator return count } -// BuildCapacity builds a list of resource capacities for a node. -func (t *GceTemplateBuilder) BuildCapacity(machineType string, accelerators []*gce.AcceleratorConfig, zone string, cpu int64, mem int64) (apiv1.ResourceList, error) { +// BuildCapacity builds a list of resource capacities given list of hardware. +func (t *GceTemplateBuilder) BuildCapacity(cpu int64, mem int64, accelerators []*gce.AcceleratorConfig) (apiv1.ResourceList, error) { capacity := apiv1.ResourceList{} // TODO: get a real value. capacity[apiv1.ResourcePods] = *resource.NewQuantity(110, resource.DecimalSI) capacity[apiv1.ResourceCPU] = *resource.NewQuantity(cpu, resource.DecimalSI) - capacity[apiv1.ResourceMemory] = *resource.NewQuantity(mem, resource.DecimalSI) + memTotal := mem - CalculateKernelReserved(mem) + capacity[apiv1.ResourceMemory] = *resource.NewQuantity(memTotal, resource.DecimalSI) if accelerators != nil && len(accelerators) > 0 { capacity[gpu.ResourceNvidiaGPU] = *resource.NewQuantity(t.getAcceleratorCount(accelerators), resource.DecimalSI) @@ -91,33 +80,21 @@ func (t *GceTemplateBuilder) BuildAllocatableFromKubeEnv(capacity apiv1.Resource if err != nil { return nil, err } - if quantity, found := reserved[apiv1.ResourceMemory]; found { - reserved[apiv1.ResourceMemory] = *resource.NewQuantity(quantity.Value()+kubeletEvictionHardMemory, resource.BinarySI) - } - return t.getAllocatable(capacity, reserved), nil + return t.CalculateAllocatable(capacity, reserved), nil } -// BuildAllocatableFromCapacity builds node allocatable based only on node capacity. -// Calculates reserved as a ratio of capacity. See calculateReserved for more details -func (t *GceTemplateBuilder) BuildAllocatableFromCapacity(capacity apiv1.ResourceList) apiv1.ResourceList { - memoryReserved := memoryReservedMB(capacity.Memory().Value() / bytesPerMB) - cpuReserved := cpuReservedMillicores(capacity.Cpu().MilliValue()) - reserved := apiv1.ResourceList{} - reserved[apiv1.ResourceCPU] = *resource.NewMilliQuantity(cpuReserved, resource.DecimalSI) - // Duplicating an upstream bug treating MB as MiB (we need to predict the end result accurately). - memoryReserved = memoryReserved * 1024 * 1024 - memoryReserved += kubeletEvictionHardMemory - reserved[apiv1.ResourceMemory] = *resource.NewQuantity(memoryReserved, resource.BinarySI) - return t.getAllocatable(capacity, reserved) -} - -func (t *GceTemplateBuilder) getAllocatable(capacity, reserved apiv1.ResourceList) apiv1.ResourceList { +// CalculateAllocatable computes allocatable resources subtracting kube reserved values +// and kubelet eviction memory buffer from corresponding capacity. +func (t *GceTemplateBuilder) CalculateAllocatable(capacity, kubeReserved apiv1.ResourceList) apiv1.ResourceList { allocatable := apiv1.ResourceList{} for key, value := range capacity { quantity := *value.Copy() - if reservedQuantity, found := reserved[key]; found { + if reservedQuantity, found := kubeReserved[key]; found { quantity.Sub(reservedQuantity) } + if key == apiv1.ResourceMemory { + quantity = *resource.NewQuantity(quantity.Value()-KubeletEvictionHardMemory, resource.BinarySI) + } allocatable[key] = quantity } return allocatable @@ -139,7 +116,7 @@ func (t *GceTemplateBuilder) BuildNodeFromTemplate(mig Mig, template *gce.Instan Labels: map[string]string{}, } - capacity, err := t.BuildCapacity(template.Properties.MachineType, template.Properties.GuestAccelerators, mig.GceRef().Zone, cpu, mem) + capacity, err := t.BuildCapacity(cpu, mem, template.Properties.GuestAccelerators) if err != nil { return nil, err } @@ -176,7 +153,7 @@ func (t *GceTemplateBuilder) BuildNodeFromTemplate(mig Mig, template *gce.Instan } } if nodeAllocatable == nil { - glog.Warningf("could not extract kube-reserved from kubeEnv for mig %q, setting allocatable to capacity.", mig.GceRef().Name) + klog.Warningf("could not extract kube-reserved from kubeEnv for mig %q, setting allocatable to capacity.", mig.GceRef().Name) node.Status.Allocatable = node.Status.Capacity } else { node.Status.Allocatable = nodeAllocatable @@ -226,7 +203,7 @@ func parseKubeReserved(kubeReserved string) (apiv1.ResourceList, error) { reservedResources[apiv1.ResourceName(name)] = q } default: - glog.Warningf("ignoring resource from kube-reserved: %q", name) + klog.Warningf("ignoring resource from kube-reserved: %q", name) } } return reservedResources, nil @@ -238,7 +215,7 @@ func extractLabelsFromKubeEnv(kubeEnv string) (map[string]string, error) { // fall back to the old way. labels, err := extractAutoscalerVarFromKubeEnv(kubeEnv, "node_labels") if err != nil { - glog.Errorf("node_labels not found via AUTOSCALER_ENV_VARS due to error, will try NODE_LABELS: %v", err) + klog.Errorf("node_labels not found via AUTOSCALER_ENV_VARS due to error, will try NODE_LABELS: %v", err) labels, err = extractFromKubeEnv(kubeEnv, "NODE_LABELS") if err != nil { return nil, err @@ -253,7 +230,7 @@ func extractTaintsFromKubeEnv(kubeEnv string) ([]apiv1.Taint, error) { // fall back to the old way. taints, err := extractAutoscalerVarFromKubeEnv(kubeEnv, "node_taints") if err != nil { - glog.Errorf("node_taints not found via AUTOSCALER_ENV_VARS due to error, will try NODE_TAINTS: %v", err) + klog.Errorf("node_taints not found via AUTOSCALER_ENV_VARS due to error, will try NODE_TAINTS: %v", err) taints, err = extractFromKubeEnv(kubeEnv, "NODE_TAINTS") if err != nil { return nil, err @@ -272,7 +249,7 @@ func extractKubeReservedFromKubeEnv(kubeEnv string) (string, error) { // fall back to the old way. kubeReserved, err := extractAutoscalerVarFromKubeEnv(kubeEnv, "kube_reserved") if err != nil { - glog.Errorf("kube_reserved not found via AUTOSCALER_ENV_VARS due to error, will try kube-reserved in KUBELET_TEST_ARGS: %v", err) + klog.Errorf("kube_reserved not found via AUTOSCALER_ENV_VARS due to error, will try kube-reserved in KUBELET_TEST_ARGS: %v", err) kubeletArgs, err := extractFromKubeEnv(kubeEnv, "KUBELET_TEST_ARGS") if err != nil { return "", err @@ -346,81 +323,3 @@ func buildTaints(kubeEnvTaints map[string]string) ([]apiv1.Taint, error) { } return taints, nil } - -type allocatableBracket struct { - threshold int64 - marginalReservedRate float64 -} - -func memoryReservedMB(memoryCapacityMB int64) int64 { - if memoryCapacityMB <= 1*mbPerGB { - // do not set any memory reserved for nodes with less than 1 Gb of capacity - return 0 - } - return calculateReserved(memoryCapacityMB, []allocatableBracket{ - { - threshold: 0, - marginalReservedRate: 0.25, - }, - { - threshold: 4 * mbPerGB, - marginalReservedRate: 0.2, - }, - { - threshold: 8 * mbPerGB, - marginalReservedRate: 0.1, - }, - { - threshold: 16 * mbPerGB, - marginalReservedRate: 0.06, - }, - { - threshold: 128 * mbPerGB, - marginalReservedRate: 0.02, - }, - }) -} - -func cpuReservedMillicores(cpuCapacityMillicores int64) int64 { - return calculateReserved(cpuCapacityMillicores, []allocatableBracket{ - { - threshold: 0, - marginalReservedRate: 0.06, - }, - { - threshold: 1 * millicoresPerCore, - marginalReservedRate: 0.01, - }, - { - threshold: 2 * millicoresPerCore, - marginalReservedRate: 0.005, - }, - { - threshold: 4 * millicoresPerCore, - marginalReservedRate: 0.0025, - }, - }) -} - -// calculateReserved calculates reserved using capacity and a series of -// brackets as follows: the marginalReservedRate applies to all capacity -// greater than the bracket, but less than the next bracket. For example, if -// the first bracket is threshold: 0, rate:0.1, and the second bracket has -// threshold: 100, rate: 0.4, a capacity of 100 results in a reserved of -// 100*0.1 = 10, but a capacity of 200 results in a reserved of -// 10 + (200-100)*.4 = 50. Using brackets with marginal rates ensures that as -// capacity increases, reserved always increases, and never decreases. -func calculateReserved(capacity int64, brackets []allocatableBracket) int64 { - var reserved float64 - for i, bracket := range brackets { - c := capacity - if i < len(brackets)-1 && brackets[i+1].threshold < capacity { - c = brackets[i+1].threshold - } - additionalReserved := float64(c-bracket.threshold) * bracket.marginalReservedRate - if additionalReserved > 0 { - reserved += additionalReserved - } - } - return int64(reserved) -} diff --git a/cluster-autoscaler/cloudprovider/gce/templates_test.go b/cluster-autoscaler/cloudprovider/gce/templates_test.go index 11a31d93667f..9680d9133467 100644 --- a/cluster-autoscaler/cloudprovider/gce/templates_test.go +++ b/cluster-autoscaler/cloudprovider/gce/templates_test.go @@ -18,120 +18,111 @@ package gce import ( "fmt" + "strings" "testing" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" gpuUtils "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" + "k8s.io/autoscaler/cluster-autoscaler/utils/units" gce "google.golang.org/api/compute/v1" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - "k8s.io/kubernetes/pkg/quota" + quota "k8s.io/kubernetes/pkg/quota/v1" "github.com/stretchr/testify/assert" ) func TestBuildNodeFromTemplateSetsResources(t *testing.T) { type testCase struct { - kubeEnv string - name string - machineType string - accelerators []*gce.AcceleratorConfig - mig Mig - capacityCpu int64 - capacityMemory int64 - allocatableCpu string - allocatableMemory string - gpuCount int64 - expectedErr bool + scenario string + kubeEnv string + accelerators []*gce.AcceleratorConfig + mig Mig + physicalCpu int64 + physicalMemory int64 + kubeReserved bool + reservedCpu string + reservedMemory string + expectedGpuCount int64 + expectedErr bool } - testCases := []testCase{{ - kubeEnv: "ENABLE_NODE_PROBLEM_DETECTOR: 'daemonset'\n" + - "NODE_LABELS: a=b,c=d,cloud.google.com/gke-nodepool=pool-3,cloud.google.com/gke-preemptible=true\n" + - "DNS_SERVER_IP: '10.0.0.10'\n" + - fmt.Sprintf("KUBELET_TEST_ARGS: --experimental-allocatable-ignore-eviction --kube-reserved=cpu=1000m,memory=%v\n", 1024*1024) + - "NODE_TAINTS: 'dedicated=ml:NoSchedule,test=dev:PreferNoSchedule,a=b:c'\n", - name: "nodeName", - machineType: "custom-8-2", - accelerators: []*gce.AcceleratorConfig{ - {AcceleratorType: "nvidia-tesla-k80", AcceleratorCount: 3}, - {AcceleratorType: "nvidia-tesla-p100", AcceleratorCount: 8}, - }, - mig: &gceMig{ - gceRef: GceRef{ - Name: "some-name", - Project: "some-proj", - Zone: "us-central1-b", + testCases := []testCase{ + { + scenario: "kube-reserved present in kube-env", + kubeEnv: "ENABLE_NODE_PROBLEM_DETECTOR: 'daemonset'\n" + + "NODE_LABELS: a=b,c=d,cloud.google.com/gke-nodepool=pool-3,cloud.google.com/gke-preemptible=true\n" + + "DNS_SERVER_IP: '10.0.0.10'\n" + + fmt.Sprintf("KUBELET_TEST_ARGS: --experimental-allocatable-ignore-eviction --kube-reserved=cpu=1000m,memory=%v\n", 1*units.MiB) + + "NODE_TAINTS: 'dedicated=ml:NoSchedule,test=dev:PreferNoSchedule,a=b:c'\n", + accelerators: []*gce.AcceleratorConfig{ + {AcceleratorType: "nvidia-tesla-k80", AcceleratorCount: 3}, + {AcceleratorType: "nvidia-tesla-p100", AcceleratorCount: 8}, }, + physicalCpu: 8, + physicalMemory: 200 * units.MiB, + kubeReserved: true, + reservedCpu: "1000m", + reservedMemory: fmt.Sprintf("%v", 1*units.MiB), + expectedGpuCount: 11, + expectedErr: false, }, - capacityCpu: 8, - capacityMemory: 200 * 1024 * 1024, - allocatableCpu: "7000m", - allocatableMemory: fmt.Sprintf("%v", 99*1024*1024), - gpuCount: 11, - expectedErr: false, - }, { + scenario: "no kube-reserved in kube-env", kubeEnv: "ENABLE_NODE_PROBLEM_DETECTOR: 'daemonset'\n" + "NODE_LABELS: a=b,c=d,cloud.google.com/gke-nodepool=pool-3,cloud.google.com/gke-preemptible=true\n" + "DNS_SERVER_IP: '10.0.0.10'\n" + "NODE_TAINTS: 'dedicated=ml:NoSchedule,test=dev:PreferNoSchedule,a=b:c'\n", - name: "nodeName", - machineType: "custom-8-2", - mig: &gceMig{ - gceRef: GceRef{ - Name: "some-name", - Project: "some-proj", - Zone: "us-central1-b", - }, - }, - capacityCpu: 8, - capacityMemory: 2 * 1024 * 1024, - allocatableCpu: "8000m", - allocatableMemory: fmt.Sprintf("%v", 2*1024*1024), - expectedErr: false, + physicalCpu: 8, + physicalMemory: 200 * units.MiB, + kubeReserved: false, + expectedGpuCount: 11, + expectedErr: false, }, { + scenario: "totally messed up kube-env", kubeEnv: "This kube-env is totally messed up", - name: "nodeName", - machineType: "custom-8-2", - mig: &gceMig{ + expectedErr: true, + }, + } + for _, tc := range testCases { + t.Run(tc.scenario, func(t *testing.T) { + tb := &GceTemplateBuilder{} + mig := &gceMig{ gceRef: GceRef{ Name: "some-name", Project: "some-proj", Zone: "us-central1-b", }, - }, - expectedErr: true, - }, - } - for _, tc := range testCases { - tb := &GceTemplateBuilder{} - template := &gce.InstanceTemplate{ - Name: tc.name, - Properties: &gce.InstanceProperties{ - GuestAccelerators: tc.accelerators, - Metadata: &gce.Metadata{ - Items: []*gce.MetadataItems{{Key: "kube-env", Value: &tc.kubeEnv}}, + } + template := &gce.InstanceTemplate{ + Name: "node-name", + Properties: &gce.InstanceProperties{ + GuestAccelerators: tc.accelerators, + Metadata: &gce.Metadata{ + Items: []*gce.MetadataItems{{Key: "kube-env", Value: &tc.kubeEnv}}, + }, + MachineType: "irrelevant-type", }, - MachineType: tc.machineType, - }, - } - node, err := tb.BuildNodeFromTemplate(tc.mig, template, tc.capacityCpu, tc.capacityMemory) - if tc.expectedErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - podsQuantity, _ := resource.ParseQuantity("110") - capacity, err := makeResourceList(fmt.Sprintf("%dm", tc.capacityCpu*1000), fmt.Sprintf("%v", tc.capacityMemory), tc.gpuCount) - capacity[apiv1.ResourcePods] = podsQuantity - assert.NoError(t, err) - allocatable, err := makeResourceList(tc.allocatableCpu, tc.allocatableMemory, tc.gpuCount) - allocatable[apiv1.ResourcePods] = podsQuantity - assert.NoError(t, err) - assertEqualResourceLists(t, "Capacity", capacity, node.Status.Capacity) - assertEqualResourceLists(t, "Allocatable", allocatable, node.Status.Allocatable) - } + } + node, err := tb.BuildNodeFromTemplate(mig, template, tc.physicalCpu, tc.physicalMemory) + if tc.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + capacity, err := tb.BuildCapacity(tc.physicalCpu, tc.physicalMemory, tc.accelerators) + assert.NoError(t, err) + assertEqualResourceLists(t, "Capacity", capacity, node.Status.Capacity) + if !tc.kubeReserved { + assertEqualResourceLists(t, "Allocatable", capacity, node.Status.Allocatable) + } else { + reserved, err := makeResourceList(tc.reservedCpu, tc.reservedMemory, 0) + assert.NoError(t, err) + allocatable := tb.CalculateAllocatable(capacity, reserved) + assertEqualResourceLists(t, "Allocatable", allocatable, node.Status.Allocatable) + } + } + }) } } @@ -150,6 +141,51 @@ func TestBuildGenericLabels(t *testing.T) { assert.Equal(t, cloudprovider.DefaultOS, labels[kubeletapis.LabelOS]) } +func TestCalculateAllocatable(t *testing.T) { + type testCase struct { + scenario string + capacityCpu string + reservedCpu string + allocatableCpu string + capacityMemory string + reservedMemory string + allocatableMemory string + } + testCases := []testCase{ + { + scenario: "no reservations", + capacityCpu: "8", + reservedCpu: "0", + allocatableCpu: "8", + capacityMemory: fmt.Sprintf("%v", 200*units.MiB), + reservedMemory: "0", + allocatableMemory: fmt.Sprintf("%v", 200*units.MiB-KubeletEvictionHardMemory), + }, + { + scenario: "reserved cpu and memory", + capacityCpu: "8", + reservedCpu: "1000m", + allocatableCpu: "7000m", + capacityMemory: fmt.Sprintf("%v", 200*units.MiB), + reservedMemory: fmt.Sprintf("%v", 50*units.MiB), + allocatableMemory: fmt.Sprintf("%v", 150*units.MiB-KubeletEvictionHardMemory), + }, + } + for _, tc := range testCases { + t.Run(tc.scenario, func(t *testing.T) { + tb := GceTemplateBuilder{} + capacity, err := makeResourceList(tc.capacityCpu, tc.capacityMemory, 0) + assert.NoError(t, err) + reserved, err := makeResourceList(tc.reservedCpu, tc.reservedMemory, 0) + assert.NoError(t, err) + expectedAllocatable, err := makeResourceList(tc.allocatableCpu, tc.allocatableMemory, 0) + assert.NoError(t, err) + allocatable := tb.CalculateAllocatable(capacity, reserved) + assertEqualResourceLists(t, "Allocatable", expectedAllocatable, allocatable) + }) + } +} + func TestBuildAllocatableFromKubeEnv(t *testing.T) { type testCase struct { kubeEnv string @@ -233,36 +269,38 @@ func TestGetAcceleratorCount(t *testing.T) { } } -func TestBuildAllocatableFromCapacity(t *testing.T) { +func TestBuildCapacityMemory(t *testing.T) { type testCase struct { - capacityCpu string - capacityMemory string - allocatableCpu string - allocatableMemory string - gpuCount int64 + physicalMemory int64 + capacityMemory int64 + physicalCpu int64 } - testCases := []testCase{{ - capacityCpu: "16000m", - capacityMemory: fmt.Sprintf("%v", 1*mbPerGB*bytesPerMB), - allocatableCpu: "15890m", - // Below threshold for reserving memory - allocatableMemory: fmt.Sprintf("%v", 1*mbPerGB*bytesPerMB-kubeletEvictionHardMemory), - gpuCount: 1, - }, { - capacityCpu: "500m", - capacityMemory: fmt.Sprintf("%v", 1.1*mbPerGB*bytesPerMB), - allocatableCpu: "470m", - // Final 1024*1024 because we're duplicating upstream bug using MB as MiB - allocatableMemory: fmt.Sprintf("%v", 1.1*mbPerGB*bytesPerMB-0.25*1.1*mbPerGB*1024*1024-kubeletEvictionHardMemory), - }} - for _, tc := range testCases { - tb := GceTemplateBuilder{} - capacity, err := makeResourceList(tc.capacityCpu, tc.capacityMemory, tc.gpuCount) - assert.NoError(t, err) - expectedAllocatable, err := makeResourceList(tc.allocatableCpu, tc.allocatableMemory, tc.gpuCount) - assert.NoError(t, err) - allocatable := tb.BuildAllocatableFromCapacity(capacity) - assertEqualResourceLists(t, "Allocatable", expectedAllocatable, allocatable) + testCases := []testCase{ + { + physicalMemory: 2 * units.GiB, + capacityMemory: 2*units.GiB - 32*units.MiB - kernelReservedMemory, + physicalCpu: 1, + }, + { + physicalMemory: 4 * units.GiB, + capacityMemory: 4*units.GiB - 64*units.MiB - kernelReservedMemory - swiotlbReservedMemory, + physicalCpu: 2, + }, + { + physicalMemory: 128 * units.GiB, + capacityMemory: 128*units.GiB - 2*units.GiB - kernelReservedMemory - swiotlbReservedMemory, + physicalCpu: 32, + }, + } + for idx, tc := range testCases { + t.Run(fmt.Sprintf("%v", idx), func(t *testing.T) { + tb := GceTemplateBuilder{} + capacity, err := tb.BuildCapacity(tc.physicalCpu, tc.physicalMemory, make([]*gce.AcceleratorConfig, 0)) + assert.NoError(t, err) + expected, err := makeResourceList2(tc.physicalCpu, tc.capacityMemory, 0, 110) + assert.NoError(t, err) + assertEqualResourceLists(t, "Capacity", capacity, expected) + }) } } @@ -531,52 +569,6 @@ func TestParseKubeReserved(t *testing.T) { } } -func TestCalculateReserved(t *testing.T) { - type testCase struct { - name string - function func(capacity int64) int64 - capacity int64 - expectedReserved int64 - } - testCases := []testCase{ - { - name: "zero memory capacity", - function: memoryReservedMB, - capacity: 0, - expectedReserved: 0, - }, - { - name: "between memory thresholds", - function: memoryReservedMB, - capacity: 2 * mbPerGB, - expectedReserved: 500, // 0.5 Gb - }, - { - name: "at a memory threshold boundary", - function: memoryReservedMB, - capacity: 8 * mbPerGB, - expectedReserved: 1800, // 1.8 Gb - }, - { - name: "exceeds highest memory threshold", - function: memoryReservedMB, - capacity: 200 * mbPerGB, - expectedReserved: 10760, // 10.8 Gb - }, - { - name: "cpu sanity check", - function: cpuReservedMillicores, - capacity: 4 * millicoresPerCore, - expectedReserved: 80, - }, - } - for _, tc := range testCases { - if actualReserved := tc.function(tc.capacity); actualReserved != tc.expectedReserved { - t.Errorf("Test case: %s, Got f(%d Mb) = %d. Want %d", tc.name, tc.capacity, actualReserved, tc.expectedReserved) - } - } -} - func makeTaintSet(taints []apiv1.Taint) map[apiv1.Taint]bool { set := make(map[apiv1.Taint]bool) for _, taint := range taints { @@ -607,6 +599,38 @@ func makeResourceList(cpu string, memory string, gpu int64) (apiv1.ResourceList, return result, nil } +func makeResourceList2(cpu int64, memory int64, gpu int64, pods int64) (apiv1.ResourceList, error) { + result := apiv1.ResourceList{} + result[apiv1.ResourceCPU] = *resource.NewQuantity(cpu, resource.DecimalSI) + result[apiv1.ResourceMemory] = *resource.NewQuantity(memory, resource.BinarySI) + if gpu > 0 { + result[gpuUtils.ResourceNvidiaGPU] = *resource.NewQuantity(gpu, resource.DecimalSI) + } + if pods > 0 { + result[apiv1.ResourcePods] = *resource.NewQuantity(pods, resource.DecimalSI) + } + return result, nil +} + func assertEqualResourceLists(t *testing.T, name string, expected, actual apiv1.ResourceList) { - assert.True(t, quota.V1Equals(expected, actual), "%q unequal:\nExpected:%v\nActual:%v", name, expected, actual) + t.Helper() + assert.True(t, quota.V1Equals(expected, actual), + "%q unequal:\nExpected: %v\nActual: %v", name, stringifyResourceList(expected), stringifyResourceList(actual)) +} + +func stringifyResourceList(resourceList apiv1.ResourceList) string { + resourceNames := []apiv1.ResourceName{ + apiv1.ResourcePods, apiv1.ResourceCPU, gpuUtils.ResourceNvidiaGPU, apiv1.ResourceMemory, apiv1.ResourceEphemeralStorage} + var results []string + for _, name := range resourceNames { + quantity, found := resourceList[name] + if found { + value := quantity.Value() + if name == apiv1.ResourceCPU { + value = quantity.MilliValue() + } + results = append(results, fmt.Sprintf("%v: %v", string(name), value)) + } + } + return strings.Join(results, ", ") } diff --git a/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client.go b/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client.go index 6c35df75a89d..e73d52c5db5d 100644 --- a/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client.go +++ b/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client.go @@ -23,9 +23,10 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" ) -// This flag is outside main as it's only useful for test/development. var ( - gkeAPIEndpoint = flag.String("gke-api-endpoint", "", "GKE API endpoint address. This flag is used by developers only. Users shouldn't change this flag.") + // GkeAPIEndpoint overrides default GKE API endpoint for testing. + // This flag is outside main as it's only useful for test/development. + GkeAPIEndpoint = flag.String("gke-api-endpoint", "", "GKE API endpoint address. This flag is used by developers only. Users shouldn't change this flag.") ) const ( diff --git a/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1.go b/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1.go index b89f9348e767..179810374a37 100644 --- a/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1.go +++ b/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1.go @@ -43,8 +43,8 @@ func NewAutoscalingGkeClientV1(client *http.Client, projectId, location, cluster if err != nil { return nil, err } - if *gkeAPIEndpoint != "" { - gkeService.BasePath = *gkeAPIEndpoint + if *GkeAPIEndpoint != "" { + gkeService.BasePath = *GkeAPIEndpoint } autoscalingGkeClient.gkeService = gkeService @@ -52,6 +52,7 @@ func NewAutoscalingGkeClientV1(client *http.Client, projectId, location, cluster } func (m *autoscalingGkeClientV1) GetCluster() (Cluster, error) { + registerRequest("clusters", "get") clusterResponse, err := m.gkeService.Projects.Locations.Clusters.Get(m.clusterPath).Do() if err != nil { return Cluster{}, err diff --git a/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1beta1.go b/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1beta1.go index 95062e14fc6e..8517fe21ca4d 100644 --- a/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1beta1.go +++ b/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1beta1.go @@ -27,8 +27,8 @@ import ( apiv1 "k8s.io/api/core/v1" - "github.com/golang/glog" gke_api_beta "google.golang.org/api/container/v1beta1" + "k8s.io/klog" ) var ( @@ -65,8 +65,8 @@ func NewAutoscalingGkeClientV1beta1(client *http.Client, projectId, location, cl if err != nil { return nil, err } - if *gkeAPIEndpoint != "" { - gkeBetaService.BasePath = *gkeAPIEndpoint + if *GkeAPIEndpoint != "" { + gkeBetaService.BasePath = *GkeAPIEndpoint } autoscalingGkeClient.gkeBetaService = gkeBetaService @@ -74,6 +74,7 @@ func NewAutoscalingGkeClientV1beta1(client *http.Client, projectId, location, cl } func (m *autoscalingGkeClientV1beta1) GetCluster() (Cluster, error) { + registerRequest("clusters", "get") clusterResponse, err := m.gkeBetaService.Projects.Locations.Clusters.Get(m.clusterPath).Do() if err != nil { return Cluster{}, err @@ -100,7 +101,7 @@ func (m *autoscalingGkeClientV1beta1) GetCluster() (Cluster, error) { func buildResourceLimiter(cluster *gke_api_beta.Cluster) *cloudprovider.ResourceLimiter { if cluster.Autoscaling == nil { - glog.Warningf("buildResourceLimiter called without autoscaling limits set") + klog.Warningf("buildResourceLimiter called without autoscaling limits set") return nil } @@ -108,7 +109,7 @@ func buildResourceLimiter(cluster *gke_api_beta.Cluster) *cloudprovider.Resource maxLimits := make(map[string]int64) for _, limit := range cluster.Autoscaling.ResourceLimits { if _, found := supportedResources[limit.ResourceType]; !found { - glog.Warningf("Unsupported limit defined %s: %d - %d", limit.ResourceType, limit.Minimum, limit.Maximum) + klog.Warningf("Unsupported limit defined %s: %d - %d", limit.ResourceType, limit.Minimum, limit.Maximum) } minLimits[limit.ResourceType] = limit.Minimum maxLimits[limit.ResourceType] = limit.Maximum @@ -116,16 +117,17 @@ func buildResourceLimiter(cluster *gke_api_beta.Cluster) *cloudprovider.Resource // GKE API provides memory in GB, but ResourceLimiter expects them in bytes if _, found := minLimits[cloudprovider.ResourceNameMemory]; found { - minLimits[cloudprovider.ResourceNameMemory] = minLimits[cloudprovider.ResourceNameMemory] * units.Gigabyte + minLimits[cloudprovider.ResourceNameMemory] = minLimits[cloudprovider.ResourceNameMemory] * units.GiB } if _, found := maxLimits[cloudprovider.ResourceNameMemory]; found { - maxLimits[cloudprovider.ResourceNameMemory] = maxLimits[cloudprovider.ResourceNameMemory] * units.Gigabyte + maxLimits[cloudprovider.ResourceNameMemory] = maxLimits[cloudprovider.ResourceNameMemory] * units.GiB } return cloudprovider.NewResourceLimiter(minLimits, maxLimits) } func (m *autoscalingGkeClientV1beta1) DeleteNodePool(toBeRemoved string) error { + registerRequest("node_pools", "delete") deleteOp, err := m.gkeBetaService.Projects.Locations.Clusters.NodePools.Delete( fmt.Sprintf(m.nodePoolPath, toBeRemoved)).Do() if err != nil { @@ -200,6 +202,7 @@ func (m *autoscalingGkeClientV1beta1) CreateNodePool(mig *GkeMig) error { Autoscaling: &autoscaling, }, } + registerRequest("node_pools", "create") createOp, err := m.gkeBetaService.Projects.Locations.Clusters.NodePools.Create( m.clusterPath, &createRequest).Do() if err != nil { @@ -210,15 +213,16 @@ func (m *autoscalingGkeClientV1beta1) CreateNodePool(mig *GkeMig) error { func (m *autoscalingGkeClientV1beta1) waitForGkeOp(op *gke_api_beta.Operation) error { for start := time.Now(); time.Since(start) < m.operationWaitTimeout; time.Sleep(m.operationPollInterval) { - glog.V(4).Infof("Waiting for operation %s %s", op.TargetLink, op.Name) + klog.V(4).Infof("Waiting for operation %s %s", op.TargetLink, op.Name) + registerRequest("operations", "get") if op, err := m.gkeBetaService.Projects.Locations.Operations.Get( fmt.Sprintf(m.operationPath, op.Name)).Do(); err == nil { - glog.V(4).Infof("Operation %s %s status: %s", op.TargetLink, op.Name, op.Status) + klog.V(4).Infof("Operation %s %s status: %s", op.TargetLink, op.Name, op.Status) if op.Status == "DONE" { return nil } } else { - glog.Warningf("Error while getting operation %s on %s: %v", op.Name, op.TargetLink, err) + klog.Warningf("Error while getting operation %s on %s: %v", op.Name, op.TargetLink, err) } } return fmt.Errorf("Timeout while waiting for operation %s on %s to complete.", op.Name, op.TargetLink) diff --git a/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1beta1_test.go b/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1beta1_test.go index 96a3c88bf7e1..8c8828613057 100644 --- a/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1beta1_test.go +++ b/cluster-autoscaler/cloudprovider/gke/autoscaling_gke_client_v1beta1_test.go @@ -51,7 +51,7 @@ const operationDoneResponse = `{ }` func newTestAutoscalingGkeClientV1beta1(t *testing.T, project, location, clusterName, url string) *autoscalingGkeClientV1beta1 { - *gkeAPIEndpoint = url + *GkeAPIEndpoint = url client := &http.Client{} gkeClient, err := NewAutoscalingGkeClientV1beta1(client, project, location, clusterName) if !assert.NoError(t, err) { diff --git a/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider.go b/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider.go index 61b3a4fc4af4..c7437f3df21e 100644 --- a/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider.go @@ -18,14 +18,18 @@ package gke import ( "fmt" + "io" + "os" "time" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce" + "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" + "k8s.io/klog" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" ) @@ -210,6 +214,11 @@ func (gke *GkeCloudProvider) GetNodeLocations() []string { return gke.gkeManager.GetNodeLocations() } +// GetInstanceID gets the instance ID for the specified node. +func (gke *GkeCloudProvider) GetInstanceID(node *apiv1.Node) string { + return node.Spec.ProviderID +} + // MigSpec contains information about what machines in a MIG look like. type MigSpec struct { MachineType string @@ -361,8 +370,16 @@ func (mig *GkeMig) Debug() string { } // Nodes returns a list of all nodes that belong to this node group. -func (mig *GkeMig) Nodes() ([]string, error) { - return mig.gkeManager.GetMigNodes(mig) +func (mig *GkeMig) Nodes() ([]cloudprovider.Instance, error) { + instanceNames, err := mig.gkeManager.GetMigNodes(mig) + if err != nil { + return nil, err + } + instances := make([]cloudprovider.Instance, 0, len(instanceNames)) + for _, instanceName := range instanceNames { + instances = append(instances, cloudprovider.Instance{Id: instanceName}) + } + return instances, nil } // Exist checks if the node group really exists on the cloud provider side. Allows to tell the @@ -403,3 +420,37 @@ func (mig *GkeMig) TemplateNodeInfo() (*schedulercache.NodeInfo, error) { nodeInfo.SetNode(node) return nodeInfo, nil } + +// BuildGKE builds a new GKE cloud provider, manager etc. +func BuildGKE(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + if do.DiscoverySpecified() { + klog.Fatal("GKE gets nodegroup specification via API, command line specs are not allowed") + } + var config io.ReadCloser + if opts.CloudConfig != "" { + var err error + config, err = os.Open(opts.CloudConfig) + if err != nil { + klog.Fatalf("Couldn't open cloud provider configuration %s: %#v", opts.CloudConfig, err) + } + defer config.Close() + } + + mode := ModeGKE + if opts.NodeAutoprovisioningEnabled { + mode = ModeGKENAP + } + manager, err := CreateGkeManager(config, mode, opts.ClusterName, opts.Regional) + if err != nil { + klog.Fatalf("Failed to create GKE Manager: %v", err) + } + + provider, err := BuildGkeCloudProvider(manager, rl) + if err != nil { + klog.Fatalf("Failed to create GKE cloud provider: %v", err) + } + // Register GKE & GCE API usage metrics. + registerMetrics() + gce.RegisterMetrics() + return provider +} diff --git a/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider_test.go b/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider_test.go index 01ff589000f0..ced4d3e9cf2d 100644 --- a/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider_test.go +++ b/cluster-autoscaler/cloudprovider/gke/gke_cloud_provider_test.go @@ -421,8 +421,8 @@ func TestMig(t *testing.T) { "gce://project1/us-central1-b/gke-cluster-1-default-pool-f7607aac-dck1"}, nil).Once() nodes, err := mig1.Nodes() assert.NoError(t, err) - assert.Equal(t, "gce://project1/us-central1-b/gke-cluster-1-default-pool-f7607aac-9j4g", nodes[0]) - assert.Equal(t, "gce://project1/us-central1-b/gke-cluster-1-default-pool-f7607aac-dck1", nodes[1]) + assert.Equal(t, cloudprovider.Instance{Id: "gce://project1/us-central1-b/gke-cluster-1-default-pool-f7607aac-9j4g"}, nodes[0]) + assert.Equal(t, cloudprovider.Instance{Id: "gce://project1/us-central1-b/gke-cluster-1-default-pool-f7607aac-dck1"}, nodes[1]) mock.AssertExpectationsForObjects(t, gkeManagerMock) // Test Create. diff --git a/cluster-autoscaler/cloudprovider/gke/gke_manager.go b/cluster-autoscaler/cloudprovider/gke/gke_manager.go index 757b7cd30866..a314b01fa8a4 100644 --- a/cluster-autoscaler/cloudprovider/gke/gke_manager.go +++ b/cluster-autoscaler/cloudprovider/gke/gke_manager.go @@ -29,17 +29,18 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce" "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" + "k8s.io/autoscaler/cluster-autoscaler/utils/units" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" provider_gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "cloud.google.com/go/compute/metadata" - "github.com/golang/glog" "golang.org/x/oauth2" "golang.org/x/oauth2/google" gce_api "google.golang.org/api/compute/v1" gcfg "gopkg.in/gcfg.v1" + "k8s.io/klog" ) // GcpCloudProviderMode allows to pass information whether the cluster is in NAP mode. @@ -82,32 +83,42 @@ func init() { } } -// GkeManager handles gce communication and data caching. +// GkeManager handles GCE and GKE communication and data caching. type GkeManager interface { - // GetMigSize gets MIG size. - GetMigSize(mig gce.Mig) (int64, error) - // SetMigSize sets MIG size. - SetMigSize(mig gce.Mig, size int64) error - // DeleteInstances deletes the given instances. All instances must be controlled by the same MIG. - DeleteInstances(instances []*gce.GceRef) error - // GetMigForInstance returns MigConfig of the given Instance - GetMigForInstance(instance *gce.GceRef) (gce.Mig, error) - // GetMigNodes returns mig nodes. - GetMigNodes(mig gce.Mig) ([]string, error) - // Refresh updates config by calling GKE API (in GKE mode only). + // Refresh triggers refresh of cached resources. Refresh() error - // GetResourceLimiter returns resource limiter. - GetResourceLimiter() (*cloudprovider.ResourceLimiter, error) // Cleanup cleans up open resources before the cloud provider is destroyed, i.e. go routines etc. Cleanup() error - GetMigs() []*gce.MigInformation - CreateNodePool(mig *GkeMig) (*GkeMig, error) - DeleteNodePool(toBeRemoved *GkeMig) error + + // GetLocation returns cluster's location. GetLocation() string + // GetProjectId returns id of GCE project to which the cluster belongs. GetProjectId() string + // GetClusterName returns the name of the GKE cluster. GetClusterName() string + // GetMigs returns a list of registered MIGs. + GetMigs() []*gce.MigInformation + // GetMigNodes returns mig nodes. + GetMigNodes(mig gce.Mig) ([]string, error) + // GetMigForInstance returns MigConfig of the given Instance + GetMigForInstance(instance *gce.GceRef) (gce.Mig, error) + // GetMigTemplateNode returns a template node for MIG. GetMigTemplateNode(mig *GkeMig) (*apiv1.Node, error) + // GetMigSize gets MIG size. + GetMigSize(mig gce.Mig) (int64, error) + // GetNodeLocations returns a list of locations with nodes. GetNodeLocations() []string + // GetResourceLimiter returns resource limiter. + GetResourceLimiter() (*cloudprovider.ResourceLimiter, error) + + // SetMigSize sets MIG size. + SetMigSize(mig gce.Mig, size int64) error + // DeleteInstances deletes the given instances. All instances must be controlled by the same MIG. + DeleteInstances(instances []*gce.GceRef) error + // CreateNodePool creates a MIG based on blueprint and returns the newly created MIG. + CreateNodePool(mig *GkeMig) (*GkeMig, error) + // DeleteNodePool deletes a MIG from cloud provider. + DeleteNodePool(toBeRemoved *GkeMig) error } // gkeConfigurationCache is used for storing cached cluster configuration. @@ -133,7 +144,6 @@ func (cache *gkeConfigurationCache) getNodeLocations() []string { return locations } -// gkeManagerImpl handles gce communication and data caching. type gkeManagerImpl struct { cache gce.GceCache gkeConfigurationCache gkeConfigurationCache @@ -152,7 +162,7 @@ type gkeManagerImpl struct { regional bool } -// CreateGkeManager constructs gkeManager object. +// CreateGkeManager constructs GkeManager object. func CreateGkeManager(configReader io.Reader, mode GcpCloudProviderMode, clusterName string, regional bool) (GkeManager, error) { // Create Google Compute Engine token. var err error @@ -167,19 +177,19 @@ func CreateGkeManager(configReader io.Reader, mode GcpCloudProviderMode, cluster if configReader != nil { var cfg provider_gce.ConfigFile if err := gcfg.ReadInto(&cfg, configReader); err != nil { - glog.Errorf("Couldn't read config: %v", err) + klog.Errorf("Couldn't read config: %v", err) return nil, err } if cfg.Global.TokenURL == "" { - glog.Warning("Empty tokenUrl in cloud config") + klog.Warning("Empty tokenUrl in cloud config") } else { tokenSource = provider_gce.NewAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody) - glog.V(1).Infof("Using TokenSource from config %#v", tokenSource) + klog.V(1).Infof("Using TokenSource from config %#v", tokenSource) } projectId = cfg.Global.ProjectID location = cfg.Global.LocalZone } else { - glog.V(1).Infof("Using default TokenSource %#v", tokenSource) + klog.V(1).Infof("Using default TokenSource %#v", tokenSource) } if len(projectId) == 0 || len(location) == 0 { // XXX: On GKE discoveredProjectId is hosted master project and @@ -198,7 +208,7 @@ func CreateGkeManager(configReader io.Reader, mode GcpCloudProviderMode, cluster location = discoveredLocation } } - glog.V(1).Infof("GCE projectId=%s location=%s", projectId, location) + klog.V(1).Infof("GCE projectId=%s location=%s", projectId, location) // Create Google Compute Engine service. client := oauth2.NewClient(oauth2.NoContext, tokenSource) @@ -232,7 +242,7 @@ func CreateGkeManager(configReader io.Reader, mode GcpCloudProviderMode, cluster return nil, err } manager.GkeService = gkeBetaService - glog.V(1).Info("Using GKE-NAP mode") + klog.V(1).Info("Using GKE-NAP mode") } if err := manager.forceRefresh(); err != nil { @@ -241,14 +251,14 @@ func CreateGkeManager(configReader io.Reader, mode GcpCloudProviderMode, cluster go wait.Until(func() { if err := manager.cache.RegenerateInstancesCache(); err != nil { - glog.Errorf("Error while regenerating Mig cache: %v", err) + klog.Errorf("Error while regenerating Mig cache: %v", err) } }, time.Hour, manager.interrupt) return manager, nil } -// Cleanup closes the channel to signal the go routine to stop that is handling the cache +// Cleanup closes the channel to stop the goroutine refreshing cache. func (m *gkeManagerImpl) Cleanup() error { close(m.interrupt) return nil @@ -256,7 +266,7 @@ func (m *gkeManagerImpl) Cleanup() error { func (m *gkeManagerImpl) assertGKENAP() { if m.mode != ModeGKENAP { - glog.Fatalf("This should run only in GKE mode with autoprovisioning enabled") + klog.Fatalf("This should run only in GKE mode with autoprovisioning enabled") } } @@ -307,7 +317,6 @@ func (m *gkeManagerImpl) GetNodeLocations() []string { return m.gkeConfigurationCache.getNodeLocations() } -// RegisterMig registers mig in GceManager. Returns true if the node group didn't exist before or its config has changed. func (m *gkeManagerImpl) registerMig(mig *GkeMig) bool { changed := m.cache.RegisterMig(mig) if changed { @@ -315,19 +324,19 @@ func (m *gkeManagerImpl) registerMig(mig *GkeMig) bool { // can be scaled up from 0 nodes. // We may never need to do it, so just log error if it fails. if _, err := m.GetMigTemplateNode(mig); err != nil { - glog.Errorf("Can't build node from template for %s, won't be able to scale from 0: %v", mig.GceRef().String(), err) + klog.Errorf("Can't build node from template for %s, won't be able to scale from 0: %v", mig.GceRef().String(), err) } } return changed } +// DeleteNodePool deletes a node pool corresponding to the given MIG. func (m *gkeManagerImpl) DeleteNodePool(toBeRemoved *GkeMig) error { m.assertGKENAP() if !toBeRemoved.Autoprovisioned() { return fmt.Errorf("only autoprovisioned node pools can be deleted") } - // TODO: handle multi-zonal node pools. err := m.GkeService.DeleteNodePool(toBeRemoved.NodePoolName()) if err != nil { return err @@ -335,6 +344,7 @@ func (m *gkeManagerImpl) DeleteNodePool(toBeRemoved *GkeMig) error { return m.refreshClusterResources() } +// CreateNodePool creates a node pool based on provided spec and returns newly created MIG. func (m *gkeManagerImpl) CreateNodePool(mig *GkeMig) (*GkeMig, error) { m.assertGKENAP() @@ -346,7 +356,6 @@ func (m *gkeManagerImpl) CreateNodePool(mig *GkeMig) (*GkeMig, error) { if err != nil { return nil, err } - // TODO(aleksandra-malinowska): support multi-zonal node pools. for _, existingMig := range m.cache.GetMigs() { gkeMig, ok := existingMig.Config.(*GkeMig) if !ok { @@ -354,7 +363,7 @@ func (m *gkeManagerImpl) CreateNodePool(mig *GkeMig) (*GkeMig, error) { // Report error as InternalError since it would signify a // serious bug in autoscaler code. errMsg := fmt.Sprintf("Mig %s is not GkeMig: got %v, want GkeMig", existingMig.Config.GceRef().String(), reflect.TypeOf(existingMig.Config)) - glog.Error(errMsg) + klog.Error(errMsg) return nil, errors.NewAutoscalerError(errors.InternalError, errMsg) } if gkeMig.NodePoolName() == mig.NodePoolName() { @@ -384,7 +393,7 @@ func (m *gkeManagerImpl) refreshMachinesCache() error { m.cache.SetMachinesCache(machinesCache) nextRefresh := time.Now() m.machinesCacheLastRefresh = nextRefresh - glog.V(2).Infof("Refreshed machine types, next refresh after %v", nextRefresh) + klog.V(2).Infof("Refreshed machine types, next refresh after %v", nextRefresh) return nil } @@ -399,7 +408,7 @@ func (m *gkeManagerImpl) GetMigSize(mig gce.Mig) (int64, error) { // SetMigSize sets MIG size. func (m *gkeManagerImpl) SetMigSize(mig gce.Mig, size int64) error { - glog.V(0).Infof("Setting mig size %s to %d", mig.Id(), size) + klog.V(0).Infof("Setting mig size %s to %d", mig.Id(), size) return m.GceService.ResizeMig(mig.GceRef(), size) } @@ -429,12 +438,12 @@ func (m *gkeManagerImpl) GetMigs() []*gce.MigInformation { return m.cache.GetMigs() } -// GetMigForInstance returns MigConfig of the given Instance +// GetMigForInstance returns MIG to which the given instance belongs. func (m *gkeManagerImpl) GetMigForInstance(instance *gce.GceRef) (gce.Mig, error) { return m.cache.GetMigForInstance(instance) } -// GetMigNodes returns mig nodes. +// GetMigNodes returns instances that belong to a MIG. func (m *gkeManagerImpl) GetMigNodes(mig gce.Mig) ([]string, error) { instances, err := m.GceService.FetchMigInstances(mig.GceRef()) if err != nil { @@ -447,18 +456,22 @@ func (m *gkeManagerImpl) GetMigNodes(mig gce.Mig) ([]string, error) { return result, nil } +// GetLocation returns cluster's location. func (m *gkeManagerImpl) GetLocation() string { return m.location } +// GetProjectId returns id of GCE project to which the cluster belongs. func (m *gkeManagerImpl) GetProjectId() string { return m.projectId } +// GetClusterName returns the name of GKE cluster. func (m *gkeManagerImpl) GetClusterName() string { return m.clusterName } +// Refresh triggers refresh of cached resources. func (m *gkeManagerImpl) Refresh() error { if m.lastRefresh.Add(refreshInterval).After(time.Now()) { return nil @@ -468,15 +481,15 @@ func (m *gkeManagerImpl) Refresh() error { func (m *gkeManagerImpl) forceRefresh() error { if err := m.refreshClusterResources(); err != nil { - glog.Errorf("Failed to refresh GKE cluster resources: %v", err) + klog.Errorf("Failed to refresh GKE cluster resources: %v", err) return err } if err := m.refreshMachinesCache(); err != nil { - glog.Errorf("Failed to fetch machine types: %v", err) + klog.Errorf("Failed to fetch machine types: %v", err) return err } m.lastRefresh = time.Now() - glog.V(2).Infof("Refreshed GCE resources, next refresh after %v", m.lastRefresh.Add(refreshInterval)) + klog.V(2).Infof("Refreshed GCE resources, next refresh after %v", m.lastRefresh.Add(refreshInterval)) return nil } @@ -524,16 +537,16 @@ func (m *gkeManagerImpl) buildMigFromSpec(s *dynamic.NodeGroupSpec) (gce.Mig, er func (m *gkeManagerImpl) refreshResourceLimiter(resourceLimiter *cloudprovider.ResourceLimiter) { if m.mode == ModeGKENAP { if resourceLimiter != nil { - glog.V(2).Infof("Refreshed resource limits: %s", resourceLimiter.String()) + klog.V(2).Infof("Refreshed resource limits: %s", resourceLimiter.String()) m.cache.SetResourceLimiter(resourceLimiter) } else { oldLimits, _ := m.cache.GetResourceLimiter() - glog.Errorf("Resource limits should always be defined in NAP mode, but they appear to be empty. Using possibly outdated limits: %v", oldLimits.String()) + klog.Errorf("Resource limits should always be defined in NAP mode, but they appear to be empty. Using possibly outdated limits: %v", oldLimits.String()) } } } -// GetResourceLimiter returns resource limiter. +// GetResourceLimiter returns resource limiter from cache. func (m *gkeManagerImpl) GetResourceLimiter() (*cloudprovider.ResourceLimiter, error) { return m.cache.GetResourceLimiter() } @@ -547,7 +560,7 @@ func (m *gkeManagerImpl) clearMachinesCache() { m.cache.SetMachinesCache(machinesCache) nextRefresh := time.Now() m.machinesCacheLastRefresh = nextRefresh - glog.V(2).Infof("Cleared machine types cache, next clear after %v", nextRefresh) + klog.V(2).Infof("Cleared machine types cache, next clear after %v", nextRefresh) } // Code borrowed from gce cloud provider. Reuse the original as soon as it becomes public. @@ -574,6 +587,9 @@ func getProjectAndLocation(regional bool) (string, string, error) { return projectID, location, nil } +// GetMigTemplateNode constructs a node: +// - from GCE instance template of the given MIG, if the MIG already exists, +// - from MIG spec, if it doesn't exist, but may be autoprovisioned. func (m *gkeManagerImpl) GetMigTemplateNode(mig *GkeMig) (*apiv1.Node, error) { if mig.Exist() { template, err := m.GceService.FetchMigTemplate(mig.GceRef()) @@ -607,7 +623,7 @@ func (m *gkeManagerImpl) getCpuAndMemoryForMachineType(machineType string, zone } m.cache.AddMachineToCache(machineType, zone, machine) } - return machine.GuestCpus, machine.MemoryMb * bytesPerMB, nil + return machine.GuestCpus, machine.MemoryMb * units.MiB, nil } func parseCustomMachineType(machineType string) (cpu, mem int64, err error) { @@ -621,6 +637,6 @@ func parseCustomMachineType(machineType string) (cpu, mem int64, err error) { return 0, 0, fmt.Errorf("failed to parse all params in %s", machineType) } // Mb to bytes - mem = mem * bytesPerMB + mem = mem * units.MiB return } diff --git a/cluster-autoscaler/cloudprovider/gke/gke_manager_test.go b/cluster-autoscaler/cloudprovider/gke/gke_manager_test.go index a3398eb82bf2..997dc04da5cf 100644 --- a/cluster-autoscaler/cloudprovider/gke/gke_manager_test.go +++ b/cluster-autoscaler/cloudprovider/gke/gke_manager_test.go @@ -24,6 +24,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" + "k8s.io/autoscaler/cluster-autoscaler/utils/units" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" @@ -526,7 +527,7 @@ func newTestGkeManager(t *testing.T, testServerURL string, mode GcpCloudProvider manager.cache.SetMachinesCache(machinesCache) client := &http.Client{} - gkeAPIEndpoint = &testServerURL + GkeAPIEndpoint = &testServerURL var err error if mode == ModeGKE { manager.GkeService, err = NewAutoscalingGkeClientV1(client, projectId, manager.location, clusterName) @@ -1158,14 +1159,14 @@ func TestGetCpuAndMemoryForMachineType(t *testing.T) { cpu, mem, err := g.getCpuAndMemoryForMachineType("custom-8-2", zoneB) assert.NoError(t, err) assert.Equal(t, int64(8), cpu) - assert.Equal(t, int64(2*bytesPerMB), mem) + assert.Equal(t, int64(2*units.MiB), mem) mock.AssertExpectationsForObjects(t, server) // Standard machine type found in cache. cpu, mem, err = g.getCpuAndMemoryForMachineType("n1-standard-1", zoneB) assert.NoError(t, err) assert.Equal(t, int64(1), cpu) - assert.Equal(t, int64(1*bytesPerMB), mem) + assert.Equal(t, int64(1*units.MiB), mem) mock.AssertExpectationsForObjects(t, server) // Standard machine type not found in cache. @@ -1173,14 +1174,14 @@ func TestGetCpuAndMemoryForMachineType(t *testing.T) { cpu, mem, err = g.getCpuAndMemoryForMachineType("n1-standard-2", zoneB) assert.NoError(t, err) assert.Equal(t, int64(2), cpu) - assert.Equal(t, int64(3840*bytesPerMB), mem) + assert.Equal(t, int64(3840*units.MiB), mem) mock.AssertExpectationsForObjects(t, server) // Standard machine type cached. cpu, mem, err = g.getCpuAndMemoryForMachineType("n1-standard-2", zoneB) assert.NoError(t, err) assert.Equal(t, int64(2), cpu) - assert.Equal(t, int64(3840*bytesPerMB), mem) + assert.Equal(t, int64(3840*units.MiB), mem) mock.AssertExpectationsForObjects(t, server) // Standard machine type not found in the zone. @@ -1195,7 +1196,7 @@ func TestParseCustomMachineType(t *testing.T) { cpu, mem, err := parseCustomMachineType("custom-2-2816") assert.NoError(t, err) assert.Equal(t, int64(2), cpu) - assert.Equal(t, int64(2816*bytesPerMB), mem) + assert.Equal(t, int64(2816*units.MiB), mem) cpu, mem, err = parseCustomMachineType("other-a2-2816") assert.Error(t, err) cpu, mem, err = parseCustomMachineType("other-2-2816") diff --git a/cluster-autoscaler/cloudprovider/gke/gke_metrics.go b/cluster-autoscaler/cloudprovider/gke/gke_metrics.go new file mode 100644 index 000000000000..3ac72a8a95f0 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/gke/gke_metrics.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gke + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +const ( + caNamespace = "cluster_autoscaler" +) + +var ( + /**** Metrics related to GKE API usage ****/ + requestCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: caNamespace, + Name: "gke_request_count", + Help: "Counter of GKE API requests for each verb and API resource.", + }, []string{"resource", "verb"}, + ) +) + +// registerMetrics registers all GKE metrics. +func registerMetrics() { + prometheus.MustRegister(requestCounter) +} + +// registerRequest registers request to GKE API. +func registerRequest(resource string, verb string) { + requestCounter.WithLabelValues(resource, verb).Add(1.0) +} diff --git a/cluster-autoscaler/cloudprovider/gke/reserved.go b/cluster-autoscaler/cloudprovider/gke/reserved.go new file mode 100644 index 000000000000..9240d40b3e55 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/gke/reserved.go @@ -0,0 +1,120 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gke + +// There should be no imports as it is used standalone in e2e tests + +const ( + // MiB - MebiByte size (2^20) + MiB = 1024 * 1024 + // Duplicating an upstream bug treating GB as 1000*MiB (we need to predict the end result accurately). + mbPerGB = 1000 + millicoresPerCore = 1000 +) + +// PredictKubeReservedMemory calculates kube-reserved memory based on physical memory +func PredictKubeReservedMemory(physicalMemory int64) int64 { + return memoryReservedMiB(physicalMemory/MiB) * MiB +} + +// PredictKubeReservedCpuMillicores calculates kube-reserved cpu based on physical cpu +func PredictKubeReservedCpuMillicores(physicalCpuMillicores int64) int64 { + return cpuReservedMillicores(physicalCpuMillicores) +} + +type allocatableBracket struct { + threshold int64 + marginalReservedRate float64 +} + +func memoryReservedMiB(memoryCapacityMiB int64) int64 { + if memoryCapacityMiB <= mbPerGB { + if memoryCapacityMiB <= 0 { + return 0 + } + // The minimum reservation required for proper node operation is 255 MiB. + // For any node with less than 1 GB of memory use the minimum. Nodes with + // more memory will use the existing reservation thresholds. + return 255 + } + return calculateReserved(memoryCapacityMiB, []allocatableBracket{ + { + threshold: 0, + marginalReservedRate: 0.25, + }, + { + threshold: 4 * mbPerGB, + marginalReservedRate: 0.2, + }, + { + threshold: 8 * mbPerGB, + marginalReservedRate: 0.1, + }, + { + threshold: 16 * mbPerGB, + marginalReservedRate: 0.06, + }, + { + threshold: 128 * mbPerGB, + marginalReservedRate: 0.02, + }, + }) +} + +func cpuReservedMillicores(cpuCapacityMillicores int64) int64 { + return calculateReserved(cpuCapacityMillicores, []allocatableBracket{ + { + threshold: 0, + marginalReservedRate: 0.06, + }, + { + threshold: 1 * millicoresPerCore, + marginalReservedRate: 0.01, + }, + { + threshold: 2 * millicoresPerCore, + marginalReservedRate: 0.005, + }, + { + threshold: 4 * millicoresPerCore, + marginalReservedRate: 0.0025, + }, + }) +} + +// calculateReserved calculates reserved using capacity and a series of +// brackets as follows: the marginalReservedRate applies to all capacity +// greater than the bracket, but less than the next bracket. For example, if +// the first bracket is threshold: 0, rate:0.1, and the second bracket has +// threshold: 100, rate: 0.4, a capacity of 100 results in a reserved of +// 100*0.1 = 10, but a capacity of 200 results in a reserved of +// 10 + (200-100)*.4 = 50. Using brackets with marginal rates ensures that as +// capacity increases, reserved always increases, and never decreases. +func calculateReserved(capacity int64, brackets []allocatableBracket) int64 { + var reserved float64 + for i, bracket := range brackets { + c := capacity + if i < len(brackets)-1 && brackets[i+1].threshold < capacity { + c = brackets[i+1].threshold + } + additionalReserved := float64(c-bracket.threshold) * bracket.marginalReservedRate + if additionalReserved > 0 { + reserved += additionalReserved + } + } + return int64(reserved) +} diff --git a/cluster-autoscaler/cloudprovider/gke/reserved_test.go b/cluster-autoscaler/cloudprovider/gke/reserved_test.go new file mode 100644 index 000000000000..98592dc57219 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/gke/reserved_test.go @@ -0,0 +1,125 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gke + +import ( + "testing" +) + +func TestPredictKubeReserved(t *testing.T) { + type testCase struct { + name string + function func(capacity int64) int64 + capacity int64 + expectedReserved int64 + } + testCases := []testCase{ + { + name: "zero memory capacity", + function: PredictKubeReservedMemory, + capacity: 0, + expectedReserved: 0, + }, + { + name: "f1-micro", + function: PredictKubeReservedMemory, + capacity: 600 * MiB, + expectedReserved: 255 * MiB, + }, + { + name: "between memory thresholds", + function: PredictKubeReservedMemory, + capacity: 2000 * MiB, + expectedReserved: 500 * MiB, + }, + { + name: "at a memory threshold boundary", + function: PredictKubeReservedMemory, + capacity: 8000 * MiB, + expectedReserved: 1800 * MiB, + }, + { + name: "exceeds highest memory threshold", + function: PredictKubeReservedMemory, + capacity: 200 * 1000 * MiB, + expectedReserved: 10760 * MiB, + }, + { + name: "cpu sanity check", + function: PredictKubeReservedCpuMillicores, + capacity: 4000, + expectedReserved: 80, + }, + } + for _, tc := range testCases { + if actualReserved := tc.function(tc.capacity); actualReserved != tc.expectedReserved { + t.Errorf("Test case: %s, Got f(%d Mb) = %d. Want %d", tc.name, tc.capacity, actualReserved, tc.expectedReserved) + } + } +} + +func TestCalculateReserved(t *testing.T) { + type testCase struct { + name string + function func(capacity int64) int64 + capacity int64 + expectedReserved int64 + } + testCases := []testCase{ + { + name: "zero memory capacity", + function: memoryReservedMiB, + capacity: 0, + expectedReserved: 0, + }, + { + name: "f1-micro", + function: memoryReservedMiB, + capacity: 600, + expectedReserved: 255, + }, + { + name: "between memory thresholds", + function: memoryReservedMiB, + capacity: 2000, + expectedReserved: 500, + }, + { + name: "at a memory threshold boundary", + function: memoryReservedMiB, + capacity: 8000, + expectedReserved: 1800, + }, + { + name: "exceeds highest memory threshold", + function: memoryReservedMiB, + capacity: 200 * 1000, + expectedReserved: 10760, + }, + { + name: "cpu sanity check", + function: cpuReservedMillicores, + capacity: 4 * millicoresPerCore, + expectedReserved: 80, + }, + } + for _, tc := range testCases { + if actualReserved := tc.function(tc.capacity); actualReserved != tc.expectedReserved { + t.Errorf("Test case: %s, Got f(%d Mb) = %d. Want %d", tc.name, tc.capacity, actualReserved, tc.expectedReserved) + } + } +} diff --git a/cluster-autoscaler/cloudprovider/gke/templates.go b/cluster-autoscaler/cloudprovider/gke/templates.go index 158655fb79e1..773fa9442424 100644 --- a/cluster-autoscaler/cloudprovider/gke/templates.go +++ b/cluster-autoscaler/cloudprovider/gke/templates.go @@ -20,25 +20,14 @@ import ( "fmt" "math/rand" - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce" - apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" ) -const ( - mbPerGB = 1000 - bytesPerMB = 1000 * 1000 - millicoresPerCore = 1000 - // Kubelet "evictionHard: {memory.available}" is subtracted from - // capacity when calculating allocatable (on top of kube-reserved). - // We don't have a good place to get it from, but it has been hard-coded - // to 100Mi since at least k8s 1.4. - kubeletEvictionHardMemory = 100 * 1024 * 1024 -) - // GkeTemplateBuilder builds templates for GKE cloud provider. type GkeTemplateBuilder struct { gce.GceTemplateBuilder @@ -60,7 +49,7 @@ func (t *GkeTemplateBuilder) BuildNodeFromMigSpec(mig *GkeMig, cpu int64, mem in Labels: map[string]string{}, } - capacity, err := t.BuildCapacity(mig.Spec().MachineType, nil, mig.GceRef().Zone, cpu, mem) + capacity, err := t.BuildCapacity(cpu, mem, nil) if err != nil { return nil, err } @@ -69,9 +58,11 @@ func (t *GkeTemplateBuilder) BuildNodeFromMigSpec(mig *GkeMig, cpu int64, mem in capacity[gpu.ResourceNvidiaGPU] = gpuRequest.DeepCopy() } + kubeReserved := t.BuildKubeReserved(cpu, mem) + node.Status = apiv1.NodeStatus{ Capacity: capacity, - Allocatable: t.BuildAllocatableFromCapacity(capacity), + Allocatable: t.CalculateAllocatable(capacity, kubeReserved), } labels, err := buildLabelsForAutoprovisionedMig(mig, nodeName) @@ -87,6 +78,17 @@ func (t *GkeTemplateBuilder) BuildNodeFromMigSpec(mig *GkeMig, cpu int64, mem in return &node, nil } +// BuildKubeReserved builds kube reserved resources based on node physical resources. +// See calculateReserved for more details +func (t *GkeTemplateBuilder) BuildKubeReserved(cpu, physicalMemory int64) apiv1.ResourceList { + cpuReservedMillicores := PredictKubeReservedCpuMillicores(cpu * 1000) + memoryReserved := PredictKubeReservedMemory(physicalMemory) + reserved := apiv1.ResourceList{} + reserved[apiv1.ResourceCPU] = *resource.NewMilliQuantity(cpuReservedMillicores, resource.DecimalSI) + reserved[apiv1.ResourceMemory] = *resource.NewQuantity(memoryReserved, resource.BinarySI) + return reserved +} + func buildLabelsForAutoprovisionedMig(mig *GkeMig, nodeName string) (map[string]string, error) { // GenericLabels labels, err := gce.BuildGenericLabels(mig.GceRef(), mig.Spec().MachineType, nodeName) diff --git a/cluster-autoscaler/cloudprovider/gke/templates_test.go b/cluster-autoscaler/cloudprovider/gke/templates_test.go index 22d05f14867c..1ea427abdafe 100644 --- a/cluster-autoscaler/cloudprovider/gke/templates_test.go +++ b/cluster-autoscaler/cloudprovider/gke/templates_test.go @@ -18,121 +18,112 @@ package gke import ( "fmt" + "strings" "testing" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce" gpuUtils "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" + "k8s.io/autoscaler/cluster-autoscaler/utils/units" gce_api "google.golang.org/api/compute/v1" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - "k8s.io/kubernetes/pkg/quota" + quota "k8s.io/kubernetes/pkg/quota/v1" "github.com/stretchr/testify/assert" ) func TestBuildNodeFromTemplateSetsResources(t *testing.T) { type testCase struct { - kubeEnv string - name string - machineType string - accelerators []*gce_api.AcceleratorConfig - mig gce.Mig - capacityCpu int64 - capacityMemory int64 - allocatableCpu string - allocatableMemory string - gpuCount int64 - expectedErr bool + scenario string + kubeEnv string + accelerators []*gce_api.AcceleratorConfig + mig gce.Mig + physicalCpu int64 + physicalMemory int64 + kubeReserved bool + reservedCpu string + reservedMemory string + expectedGpuCount int64 + expectedErr bool } - testCases := []testCase{{ - kubeEnv: "ENABLE_NODE_PROBLEM_DETECTOR: 'daemonset'\n" + - "NODE_LABELS: a=b,c=d,cloud.google.com/gke-nodepool=pool-3,cloud.google.com/gke-preemptible=true\n" + - "DNS_SERVER_IP: '10.0.0.10'\n" + - fmt.Sprintf("KUBELET_TEST_ARGS: --experimental-allocatable-ignore-eviction --kube-reserved=cpu=1000m,memory=%v\n", 1024*1024) + - "NODE_TAINTS: 'dedicated=ml:NoSchedule,test=dev:PreferNoSchedule,a=b:c'\n", - name: "nodeName", - machineType: "custom-8-2", - accelerators: []*gce_api.AcceleratorConfig{ - {AcceleratorType: "nvidia-tesla-k80", AcceleratorCount: 3}, - {AcceleratorType: "nvidia-tesla-p100", AcceleratorCount: 8}, - }, - mig: &GkeMig{ - gceRef: gce.GceRef{ - Name: "some-name", - Project: "some-proj", - Zone: "us-central1-b", + testCases := []testCase{ + { + scenario: "kube-reserved present in kube-env", + kubeEnv: "ENABLE_NODE_PROBLEM_DETECTOR: 'daemonset'\n" + + "NODE_LABELS: a=b,c=d,cloud.google.com/gke-nodepool=pool-3,cloud.google.com/gke-preemptible=true\n" + + "DNS_SERVER_IP: '10.0.0.10'\n" + + fmt.Sprintf("KUBELET_TEST_ARGS: --experimental-allocatable-ignore-eviction --kube-reserved=cpu=1000m,memory=%v\n", 1*units.MiB) + + "NODE_TAINTS: 'dedicated=ml:NoSchedule,test=dev:PreferNoSchedule,a=b:c'\n", + accelerators: []*gce_api.AcceleratorConfig{ + {AcceleratorType: "nvidia-tesla-k80", AcceleratorCount: 3}, + {AcceleratorType: "nvidia-tesla-p100", AcceleratorCount: 8}, }, + physicalCpu: 8, + physicalMemory: 200 * units.MiB, + kubeReserved: true, + reservedCpu: "1000m", + reservedMemory: fmt.Sprintf("%v", 1*units.MiB), + expectedGpuCount: 11, + expectedErr: false, }, - capacityCpu: 8, - capacityMemory: 200 * 1024 * 1024, - allocatableCpu: "7000m", - allocatableMemory: fmt.Sprintf("%v", 99*1024*1024), - gpuCount: 11, - expectedErr: false, - }, { + scenario: "no kube-reserved in kube-env", kubeEnv: "ENABLE_NODE_PROBLEM_DETECTOR: 'daemonset'\n" + "NODE_LABELS: a=b,c=d,cloud.google.com/gke-nodepool=pool-3,cloud.google.com/gke-preemptible=true\n" + "DNS_SERVER_IP: '10.0.0.10'\n" + "NODE_TAINTS: 'dedicated=ml:NoSchedule,test=dev:PreferNoSchedule,a=b:c'\n", - name: "nodeName", - machineType: "custom-8-2", - mig: &GkeMig{ - gceRef: gce.GceRef{ - Name: "some-name", - Project: "some-proj", - Zone: "us-central1-b", - }, - }, - capacityCpu: 8, - capacityMemory: 2 * 1024 * 1024, - allocatableCpu: "8000m", - allocatableMemory: fmt.Sprintf("%v", 2*1024*1024), - expectedErr: false, + physicalCpu: 8, + physicalMemory: 200 * units.MiB, + kubeReserved: false, + expectedGpuCount: 11, + expectedErr: false, }, { + scenario: "totally messed up kube-env", kubeEnv: "This kube-env is totally messed up", - name: "nodeName", - machineType: "custom-8-2", - mig: &GkeMig{ + expectedErr: true, + }, + } + for _, tc := range testCases { + t.Run(tc.scenario, func(t *testing.T) { + tb := &GkeTemplateBuilder{} + mig := &GkeMig{ gceRef: gce.GceRef{ Name: "some-name", Project: "some-proj", Zone: "us-central1-b", }, - }, - expectedErr: true, - }, - } - for _, tc := range testCases { - tb := &GkeTemplateBuilder{} - template := &gce_api.InstanceTemplate{ - Name: tc.name, - Properties: &gce_api.InstanceProperties{ - GuestAccelerators: tc.accelerators, - Metadata: &gce_api.Metadata{ - Items: []*gce_api.MetadataItems{{Key: "kube-env", Value: &tc.kubeEnv}}, + } + template := &gce_api.InstanceTemplate{ + Name: "node-name", + Properties: &gce_api.InstanceProperties{ + GuestAccelerators: tc.accelerators, + Metadata: &gce_api.Metadata{ + Items: []*gce_api.MetadataItems{{Key: "kube-env", Value: &tc.kubeEnv}}, + }, + MachineType: "irrelevant-type", }, - MachineType: tc.machineType, - }, - } - node, err := tb.BuildNodeFromTemplate(tc.mig, template, tc.capacityCpu, tc.capacityMemory) - if tc.expectedErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - podsQuantity, _ := resource.ParseQuantity("110") - capacity, err := makeResourceList(fmt.Sprintf("%dm", tc.capacityCpu*1000), fmt.Sprintf("%v", tc.capacityMemory), tc.gpuCount) - capacity[apiv1.ResourcePods] = podsQuantity - assert.NoError(t, err) - allocatable, err := makeResourceList(tc.allocatableCpu, tc.allocatableMemory, tc.gpuCount) - allocatable[apiv1.ResourcePods] = podsQuantity - assert.NoError(t, err) - assertEqualResourceLists(t, "Capacity", capacity, node.Status.Capacity) - assertEqualResourceLists(t, "Allocatable", allocatable, node.Status.Allocatable) - } + } + node, err := tb.BuildNodeFromTemplate(mig, template, tc.physicalCpu, tc.physicalMemory) + if tc.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + capacity, err := tb.BuildCapacity(tc.physicalCpu, tc.physicalMemory, tc.accelerators) + assert.NoError(t, err) + assertEqualResourceLists(t, "Capacity", capacity, node.Status.Capacity) + if !tc.kubeReserved { + assertEqualResourceLists(t, "Allocatable", capacity, node.Status.Allocatable) + } else { + reserved, err := makeResourceList(tc.reservedCpu, tc.reservedMemory, 0) + assert.NoError(t, err) + allocatable := tb.CalculateAllocatable(capacity, reserved) + assertEqualResourceLists(t, "Allocatable", allocatable, node.Status.Allocatable) + } + } + }) } } @@ -237,36 +228,32 @@ func TestBuildAllocatableFromKubeEnv(t *testing.T) { } } -func TestBuildAllocatableFromCapacity(t *testing.T) { +func TestBuildKubeReserved(t *testing.T) { type testCase struct { - capacityCpu string - capacityMemory string - allocatableCpu string - allocatableMemory string - gpuCount int64 + physicalCpu int64 + reservedCpu string + physicalMemory int64 + reservedMemory string } testCases := []testCase{{ - capacityCpu: "16000m", - capacityMemory: fmt.Sprintf("%v", 1*mbPerGB*bytesPerMB), - allocatableCpu: "15890m", + physicalCpu: 16, + reservedCpu: "110m", // Below threshold for reserving memory - allocatableMemory: fmt.Sprintf("%v", 1*mbPerGB*bytesPerMB-kubeletEvictionHardMemory), - gpuCount: 1, + physicalMemory: units.GB, + reservedMemory: fmt.Sprintf("%v", 255*units.MiB), }, { - capacityCpu: "500m", - capacityMemory: fmt.Sprintf("%v", 1.1*mbPerGB*bytesPerMB), - allocatableCpu: "470m", - // Final 1024*1024 because we're duplicating upstream bug using MB as MiB - allocatableMemory: fmt.Sprintf("%v", 1.1*mbPerGB*bytesPerMB-0.25*1.1*mbPerGB*1024*1024-kubeletEvictionHardMemory), + physicalCpu: 1, + reservedCpu: "60m", + // 10760Mi = 0.25*4000Mi + 0.2*4000Mi + 0.1*8000Mi + 0.06*112000Mi + 0.02*72000Mi + physicalMemory: 200 * 1000 * units.MiB, + reservedMemory: fmt.Sprintf("%v", 10760*units.MiB), }} for _, tc := range testCases { tb := GkeTemplateBuilder{} - capacity, err := makeResourceList(tc.capacityCpu, tc.capacityMemory, tc.gpuCount) - assert.NoError(t, err) - expectedAllocatable, err := makeResourceList(tc.allocatableCpu, tc.allocatableMemory, tc.gpuCount) + expectedReserved, err := makeResourceList(tc.reservedCpu, tc.reservedMemory, 0) assert.NoError(t, err) - allocatable := tb.BuildAllocatableFromCapacity(capacity) - assertEqualResourceLists(t, "Allocatable", expectedAllocatable, allocatable) + kubeReserved := tb.BuildKubeReserved(tc.physicalCpu, tc.physicalMemory) + assertEqualResourceLists(t, "Kube reserved", expectedReserved, kubeReserved) } } @@ -293,5 +280,24 @@ func makeResourceList(cpu string, memory string, gpu int64) (apiv1.ResourceList, } func assertEqualResourceLists(t *testing.T, name string, expected, actual apiv1.ResourceList) { - assert.True(t, quota.V1Equals(expected, actual), "%q unequal:\nExpected:%v\nActual:%v", name, expected, actual) + t.Helper() + assert.True(t, quota.V1Equals(expected, actual), + "%q unequal:\nExpected: %v\nActual: %v", name, stringifyResourceList(expected), stringifyResourceList(actual)) +} + +func stringifyResourceList(resourceList apiv1.ResourceList) string { + resourceNames := []apiv1.ResourceName{ + apiv1.ResourcePods, apiv1.ResourceCPU, gpuUtils.ResourceNvidiaGPU, apiv1.ResourceMemory, apiv1.ResourceEphemeralStorage} + var results []string + for _, name := range resourceNames { + quantity, found := resourceList[name] + if found { + value := quantity.Value() + if name == apiv1.ResourceCPU { + value = quantity.MilliValue() + } + results = append(results, fmt.Sprintf("%v: %v", string(name), value)) + } + } + return strings.Join(results, ", ") } diff --git a/cluster-autoscaler/cloudprovider/kubemark/kubemark_linux.go b/cluster-autoscaler/cloudprovider/kubemark/kubemark_linux.go index 24b5bc05d7c5..53158c4c9c6c 100644 --- a/cluster-autoscaler/cloudprovider/kubemark/kubemark_linux.go +++ b/cluster-autoscaler/cloudprovider/kubemark/kubemark_linux.go @@ -27,12 +27,17 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" + "k8s.io/client-go/informers" + kubeclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" "k8s.io/kubernetes/pkg/kubemark" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -68,7 +73,7 @@ func (kubemark *KubemarkCloudProvider) addNodeGroup(spec string) error { if err != nil { return err } - glog.V(2).Infof("adding node group: %s", nodeGroup.Name) + klog.V(2).Infof("adding node group: %s", nodeGroup.Name) kubemark.nodeGroups = append(kubemark.nodeGroups, nodeGroup) return nil } @@ -130,6 +135,11 @@ func (kubemark *KubemarkCloudProvider) Refresh() error { return nil } +// GetInstanceID gets the instance ID for the specified node. +func (kubemark *KubemarkCloudProvider) GetInstanceID(node *apiv1.Node) string { + return node.Spec.ProviderID +} + // Cleanup cleans up all resources before the cloud provider is removed func (kubemark *KubemarkCloudProvider) Cleanup() error { return nil @@ -164,16 +174,16 @@ func (nodeGroup *NodeGroup) Debug() string { } // Nodes returns a list of all nodes that belong to this node group. -func (nodeGroup *NodeGroup) Nodes() ([]string, error) { - ids := make([]string, 0) +func (nodeGroup *NodeGroup) Nodes() ([]cloudprovider.Instance, error) { + instances := make([]cloudprovider.Instance, 0) nodes, err := nodeGroup.kubemarkController.GetNodeNamesForNodeGroup(nodeGroup.Name) if err != nil { - return ids, err + return instances, err } for _, node := range nodes { - ids = append(ids, ":////"+node) + instances = append(instances, cloudprovider.Instance{Id: ":////" + node}) } - return ids, nil + return instances, nil } // DeleteNodes deletes the specified nodes from the node group. @@ -279,3 +289,44 @@ func buildNodeGroup(value string, kubemarkController *kubemark.KubemarkControlle return nodeGroup, nil } + +// BuildKubemark builds Kubemark cloud provider. +func BuildKubemark(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + externalConfig, err := rest.InClusterConfig() + if err != nil { + klog.Fatalf("Failed to get kubeclient config for external cluster: %v", err) + } + + kubemarkConfig, err := clientcmd.BuildConfigFromFlags("", "/kubeconfig/cluster_autoscaler.kubeconfig") + if err != nil { + klog.Fatalf("Failed to get kubeclient config for kubemark cluster: %v", err) + } + + stop := make(chan struct{}) + + externalClient := kubeclient.NewForConfigOrDie(externalConfig) + kubemarkClient := kubeclient.NewForConfigOrDie(kubemarkConfig) + + externalInformerFactory := informers.NewSharedInformerFactory(externalClient, 0) + kubemarkInformerFactory := informers.NewSharedInformerFactory(kubemarkClient, 0) + kubemarkNodeInformer := kubemarkInformerFactory.Core().V1().Nodes() + go kubemarkNodeInformer.Informer().Run(stop) + + kubemarkController, err := kubemark.NewKubemarkController(externalClient, externalInformerFactory, + kubemarkClient, kubemarkNodeInformer) + if err != nil { + klog.Fatalf("Failed to create Kubemark cloud provider: %v", err) + } + + externalInformerFactory.Start(stop) + if !kubemarkController.WaitForCacheSync(stop) { + klog.Fatalf("Failed to sync caches for kubemark controller") + } + go kubemarkController.Run(stop) + + provider, err := BuildKubemarkCloudProvider(kubemarkController, do.NodeGroupSpecs, rl) + if err != nil { + klog.Fatalf("Failed to create Kubemark cloud provider: %v", err) + } + return provider +} diff --git a/cluster-autoscaler/cloudprovider/kubemark/kubemark_other.go b/cluster-autoscaler/cloudprovider/kubemark/kubemark_other.go index 397d83905b1b..e54c8a451368 100644 --- a/cluster-autoscaler/cloudprovider/kubemark/kubemark_other.go +++ b/cluster-autoscaler/cloudprovider/kubemark/kubemark_other.go @@ -24,7 +24,9 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" + "k8s.io/klog" ) const ( @@ -73,7 +75,18 @@ func (kubemark *KubemarkCloudProvider) Refresh() error { return cloudprovider.ErrNotImplemented } +// GetInstanceID gets the instance ID for the specified node. +func (kubemark *KubemarkCloudProvider) GetInstanceID(node *apiv1.Node) string { + return "" +} + // Cleanup cleans up all resources before the cloud provider is removed func (kubemark *KubemarkCloudProvider) Cleanup() error { return cloudprovider.ErrNotImplemented } + +// BuildKubemark builds Kubemark cloud provider. +func BuildKubemark(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + klog.Fatal("Failed to create Kubemark cloud provider: only supported on Linux") + return nil +} diff --git a/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_controller.go b/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_controller.go index feee68ce3fca..4a21b7e332c7 100644 --- a/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_controller.go +++ b/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_controller.go @@ -19,7 +19,6 @@ package openshiftmachineapi import ( "fmt" - "github.com/golang/glog" "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" clusterclient "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset" clusterinformers "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions" @@ -31,6 +30,7 @@ import ( kubeinformers "k8s.io/client-go/informers" kubeclient "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" + "k8s.io/klog" ) const ( @@ -133,7 +133,7 @@ func (c *machineController) run(stopCh <-chan struct{}) error { c.kubeInformerFactory.Start(stopCh) c.clusterInformerFactory.Start(stopCh) - glog.V(4).Infof("waiting for caches to sync") + klog.V(4).Infof("waiting for caches to sync") if !cache.WaitForCacheSync(stopCh, c.nodeInformer.HasSynced, c.machineInformer.Informer().HasSynced, @@ -268,11 +268,11 @@ func (c *machineController) machineSetNodeNames(machineSet *v1beta1.MachineSet) for _, machine := range machines { if machine.Status.NodeRef == nil { - glog.V(4).Infof("Status.NodeRef of machine %q is currently nil", machine.Name) + klog.V(4).Infof("Status.NodeRef of machine %q is currently nil", machine.Name) continue } if machine.Status.NodeRef.Kind != "Node" { - glog.Errorf("Status.NodeRef of machine %q does not reference a node (rather %q)", machine.Name, machine.Status.NodeRef.Kind) + klog.Errorf("Status.NodeRef of machine %q does not reference a node (rather %q)", machine.Name, machine.Status.NodeRef.Kind) continue } @@ -286,7 +286,7 @@ func (c *machineController) machineSetNodeNames(machineSet *v1beta1.MachineSet) } } - glog.V(4).Infof("nodegroup %s has nodes %v", machineSet.Name, nodes) + klog.V(4).Infof("nodegroup %s has nodes %v", machineSet.Name, nodes) return nodes, nil } @@ -422,6 +422,6 @@ func (c *machineController) nodeGroupForNode(node *apiv1.Node) (cloudprovider.No return nil, nil } - glog.V(4).Infof("node %q is in nodegroup %q", node.Name, machineSet.Name) + klog.V(4).Infof("node %q is in nodegroup %q", node.Name, machineSet.Name) return nodegroup, nil } diff --git a/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_nodegroup.go b/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_nodegroup.go index 1e966c8dc112..6ca556aeb609 100644 --- a/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_nodegroup.go +++ b/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_nodegroup.go @@ -162,8 +162,20 @@ func (ng *nodegroup) Debug() string { } // Nodes returns a list of all nodes that belong to this node group. -func (ng *nodegroup) Nodes() ([]string, error) { - return ng.scalableResource.Nodes() +func (ng *nodegroup) Nodes() ([]cloudprovider.Instance, error) { + nodes, err := ng.scalableResource.Nodes() + if err != nil { + return nil, err + } + + instances := make([]cloudprovider.Instance, len(nodes)) + for i := range nodes { + instances[i] = cloudprovider.Instance{ + Id: nodes[i], + } + } + + return instances, nil } // TemplateNodeInfo returns a schedulercache.NodeInfo structure of an diff --git a/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_nodegroup_test.go b/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_nodegroup_test.go index 184ac0ef9329..8e3ce06f2c9d 100644 --- a/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_nodegroup_test.go +++ b/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_nodegroup_test.go @@ -601,11 +601,13 @@ func TestNodeGroupDeleteNodes(t *testing.T) { t.Fatalf("expected len=%v, got len=%v", len(testObjs.nodes), len(nodeNames)) } - sort.Strings(nodeNames) + sort.SliceStable(nodeNames, func(i, j int) bool { + return nodeNames[i].Id < nodeNames[j].Id + }) for i := 0; i < len(nodeNames); i++ { - if nodeNames[i] != testObjs.nodes[i].Spec.ProviderID { - t.Fatalf("expected %q, got %q", testObjs.nodes[i].Spec.ProviderID, nodeNames[i]) + if nodeNames[i].Id != testObjs.nodes[i].Spec.ProviderID { + t.Fatalf("expected %q, got %q", testObjs.nodes[i].Spec.ProviderID, nodeNames[i].Id) } } diff --git a/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_provider.go b/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_provider.go index 5a67c7f9a587..250e9bc150ee 100644 --- a/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_provider.go +++ b/cluster-autoscaler/cloudprovider/openshiftmachineapi/machineapi_provider.go @@ -17,10 +17,8 @@ limitations under the License. package openshiftmachineapi import ( - "fmt" "os" - "github.com/golang/glog" clusterclientset "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -30,6 +28,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog" ) const ( @@ -56,11 +55,11 @@ func (p *provider) GetResourceLimiter() (*cloudprovider.ResourceLimiter, error) func (p *provider) NodeGroups() []cloudprovider.NodeGroup { nodegroups, err := p.controller.nodeGroups() if err != nil { - glog.Errorf("error getting node groups: %v", err) + klog.Errorf("error getting node groups: %v", err) return nil } for _, ng := range nodegroups { - glog.V(4).Infof("discovered node group: %s", ng.Debug()) + klog.V(4).Infof("discovered node group: %s", ng.Debug()) } return nodegroups } @@ -95,6 +94,11 @@ func (p *provider) Refresh() error { return nil } +// GetInstanceID gets the instance ID for the specified node. +func (p *provider) GetInstanceID(node *apiv1.Node) string { + return node.Spec.ProviderID +} + func newProvider( name string, rl *cloudprovider.ResourceLimiter, @@ -107,38 +111,37 @@ func newProvider( }, nil } -// BuildCloudProvider builds CloudProvider implementation for machine api. -func BuildCloudProvider(name string, opts config.AutoscalingOptions, rl *cloudprovider.ResourceLimiter) (cloudprovider.CloudProvider, error) { +func BuildOpenShiftMachineAPI(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { var err error var externalConfig *rest.Config externalConfig, err = rest.InClusterConfig() if err != nil && err != rest.ErrNotInCluster { - return nil, err + klog.Fatal(err) } if opts.KubeConfigPath != "" { externalConfig, err = clientcmd.BuildConfigFromFlags("", opts.KubeConfigPath) if err != nil { - return nil, err + klog.Fatalf("cannot build config: %v", err) } } kubeclient, err := kubernetes.NewForConfig(externalConfig) if err != nil { - return nil, fmt.Errorf("create kube clientset failed: %v", err) + klog.Fatalf("create kube clientset failed: %v", err) } clusterclient, err := clusterclientset.NewForConfig(externalConfig) if err != nil { - return nil, fmt.Errorf("create cluster clientset failed: %v", err) + klog.Fatalf("create cluster clientset failed: %v", err) } enableMachineDeployments := os.Getenv("OPENSHIFT_MACHINE_API_CLOUDPROVIDER_ENABLE_MACHINE_DEPLOYMENTS") != "" controller, err := newMachineController(kubeclient, clusterclient, enableMachineDeployments) if err != nil { - return nil, err + klog.Fatal(err) } // Ideally this would be passed in but the builder is not @@ -146,8 +149,13 @@ func BuildCloudProvider(name string, opts config.AutoscalingOptions, rl *cloudpr stopCh := make(chan struct{}) if err := controller.run(stopCh); err != nil { - return nil, err + klog.Fatal(err) + } + + provider, err := newProvider(ProviderName, rl, controller) + if err != nil { + klog.Fatal(err) } - return newProvider(name, rl, controller) + return provider } diff --git a/cluster-autoscaler/cloudprovider/test/test_cloud_provider.go b/cluster-autoscaler/cloudprovider/test/test_cloud_provider.go index e1a4ff820d35..2c09cf9f25a2 100644 --- a/cluster-autoscaler/cloudprovider/test/test_cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/test/test_cloud_provider.go @@ -187,6 +187,14 @@ func (tcp *TestCloudProvider) AddAutoprovisionedNodeGroup(id string, min int, ma return nodeGroup } +// DeleteNodeGroup removes node group from test cloud provider. +func (tcp *TestCloudProvider) DeleteNodeGroup(id string) { + tcp.Lock() + defer tcp.Unlock() + + delete(tcp.groups, id) +} + // AddNode adds the given node to the group. func (tcp *TestCloudProvider) AddNode(nodeGroupId string, node *apiv1.Node) { tcp.Lock() @@ -216,6 +224,11 @@ func (tcp *TestCloudProvider) Refresh() error { return nil } +// GetInstanceID gets the instance ID for the specified node. +func (tcp *TestCloudProvider) GetInstanceID(node *apiv1.Node) string { + return node.Spec.ProviderID +} + // TestNodeGroup is a node group used by TestCloudProvider. type TestNodeGroup struct { sync.Mutex @@ -296,7 +309,11 @@ func (tng *TestNodeGroup) Create() (cloudprovider.NodeGroup, error) { // Delete deletes the node group on the cloud provider side. // This will be executed only for autoprovisioned node groups, once their size drops to 0. func (tng *TestNodeGroup) Delete() error { - return tng.cloudProvider.onNodeGroupDelete(tng.id) + err := tng.cloudProvider.onNodeGroupDelete(tng.id) + if err == nil { + tng.cloudProvider.DeleteNodeGroup(tng.Id()) + } + return err } // DecreaseTargetSize decreases the target size of the node group. This function @@ -344,17 +361,17 @@ func (tng *TestNodeGroup) Debug() string { } // Nodes returns a list of all nodes that belong to this node group. -func (tng *TestNodeGroup) Nodes() ([]string, error) { +func (tng *TestNodeGroup) Nodes() ([]cloudprovider.Instance, error) { tng.Lock() defer tng.Unlock() - result := make([]string, 0) + instances := make([]cloudprovider.Instance, 0) for node, nodegroup := range tng.cloudProvider.nodes { if nodegroup == tng.id { - result = append(result, node) + instances = append(instances, cloudprovider.Instance{Id: node}) } } - return result, nil + return instances, nil } // Autoprovisioned returns true if the node group is autoprovisioned. diff --git a/cluster-autoscaler/clusterstate/clusterstate.go b/cluster-autoscaler/clusterstate/clusterstate.go index 4acdf03a45b1..e806918526b5 100644 --- a/cluster-autoscaler/clusterstate/clusterstate.go +++ b/cluster-autoscaler/clusterstate/clusterstate.go @@ -33,8 +33,9 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -57,8 +58,8 @@ const ( // ScaleUpRequest contains information about the requested node group scale up. type ScaleUpRequest struct { - // NodeGroupName is the node group to be scaled up. - NodeGroupName string + // NodeGroup is the node group to be scaled up. + NodeGroup cloudprovider.NodeGroup // Time is the time when the request was submitted. Time time.Time // ExpectedAddTime is the time at which the request should be fulfilled. @@ -71,8 +72,8 @@ type ScaleUpRequest struct { type ScaleDownRequest struct { // NodeName is the name of the node to be deleted. NodeName string - // NodeGroupName is the node group of the deleted node. - NodeGroupName string + // NodeGroup is the node group of the deleted node. + NodeGroup cloudprovider.NodeGroup // Time is the time when the node deletion was requested. Time time.Time // ExpectedDeleteTime is the time when the node is expected to be deleted. @@ -115,9 +116,10 @@ type UnregisteredNode struct { type ClusterStateRegistry struct { sync.Mutex config ClusterStateRegistryConfig - scaleUpRequests []*ScaleUpRequest + scaleUpRequests map[string]*ScaleUpRequest // nodeGroupName -> ScaleUpRequest scaleDownRequests []*ScaleDownRequest nodes []*apiv1.Node + nodeInfosForGroups map[string]*schedulercache.NodeInfo cloudProvider cloudprovider.CloudProvider perNodeGroupReadiness map[string]Readiness totalReadiness Readiness @@ -125,20 +127,20 @@ type ClusterStateRegistry struct { incorrectNodeGroupSizes map[string]IncorrectNodeGroupSize unregisteredNodes map[string]UnregisteredNode candidatesForScaleDown map[string][]string - nodeGroupBackoffInfo *backoff.Backoff + nodeGroupBackoffInfo backoff.Backoff lastStatus *api.ClusterAutoscalerStatus lastScaleDownUpdateTime time.Time logRecorder *utils.LogEventRecorder } // NewClusterStateRegistry creates new ClusterStateRegistry. -func NewClusterStateRegistry(cloudProvider cloudprovider.CloudProvider, config ClusterStateRegistryConfig, logRecorder *utils.LogEventRecorder) *ClusterStateRegistry { +func NewClusterStateRegistry(cloudProvider cloudprovider.CloudProvider, config ClusterStateRegistryConfig, logRecorder *utils.LogEventRecorder, backoff backoff.Backoff) *ClusterStateRegistry { emptyStatus := &api.ClusterAutoscalerStatus{ ClusterwideConditions: make([]api.ClusterAutoscalerCondition, 0), NodeGroupStatuses: make([]api.NodeGroupStatus, 0), } return &ClusterStateRegistry{ - scaleUpRequests: make([]*ScaleUpRequest, 0), + scaleUpRequests: make(map[string]*ScaleUpRequest), scaleDownRequests: make([]*ScaleDownRequest, 0), nodes: make([]*apiv1.Node, 0), cloudProvider: cloudProvider, @@ -148,7 +150,7 @@ func NewClusterStateRegistry(cloudProvider cloudprovider.CloudProvider, config C incorrectNodeGroupSizes: make(map[string]IncorrectNodeGroupSize), unregisteredNodes: make(map[string]UnregisteredNode), candidatesForScaleDown: make(map[string][]string), - nodeGroupBackoffInfo: backoff.NewBackoff(InitialNodeGroupBackoffDuration, MaxNodeGroupBackoffDuration, NodeGroupBackoffResetTimeout), + nodeGroupBackoffInfo: backoff, lastStatus: emptyStatus, logRecorder: logRecorder, } @@ -158,7 +160,17 @@ func NewClusterStateRegistry(cloudProvider cloudprovider.CloudProvider, config C func (csr *ClusterStateRegistry) RegisterScaleUp(request *ScaleUpRequest) { csr.Lock() defer csr.Unlock() - csr.scaleUpRequests = append(csr.scaleUpRequests, request) + + oldScaleUpRequest, found := csr.scaleUpRequests[request.NodeGroup.Id()] + if !found { + csr.scaleUpRequests[request.NodeGroup.Id()] = request + return + } + + // update the old request + oldScaleUpRequest.Time = request.Time + oldScaleUpRequest.ExpectedAddTime = request.ExpectedAddTime + oldScaleUpRequest.Increase += request.Increase } // RegisterScaleDown registers node scale down. @@ -173,69 +185,57 @@ func (csr *ClusterStateRegistry) updateScaleRequests(currentTime time.Time) { // clean up stale backoff info csr.nodeGroupBackoffInfo.RemoveStaleBackoffData(currentTime) - timedOutSur := make([]*ScaleUpRequest, 0) - newSur := make([]*ScaleUpRequest, 0) - for _, sur := range csr.scaleUpRequests { - if !csr.areThereUpcomingNodesInNodeGroup(sur.NodeGroupName) { + for nodeGroupName, scaleUpRequest := range csr.scaleUpRequests { + if !csr.areThereUpcomingNodesInNodeGroup(nodeGroupName) { // scale-out finished successfully // remove it and reset node group backoff - csr.nodeGroupBackoffInfo.RemoveBackoff(sur.NodeGroupName) - glog.V(4).Infof("Scale up in group %v finished successfully in %v", - sur.NodeGroupName, currentTime.Sub(sur.Time)) + delete(csr.scaleUpRequests, nodeGroupName) + csr.nodeGroupBackoffInfo.RemoveBackoff(scaleUpRequest.NodeGroup) + klog.V(4).Infof("Scale up in group %v finished successfully in %v", + nodeGroupName, currentTime.Sub(scaleUpRequest.Time)) continue } - if sur.ExpectedAddTime.After(currentTime) { - newSur = append(newSur, sur) - } else { - timedOutSur = append(timedOutSur, sur) - } - } - csr.scaleUpRequests = newSur - for _, sur := range timedOutSur { - // IsNodeGroupScalingUp returns true if there is another - // scale-up still going on for this group, so it's ok for node - // group to still have upcoming nodes. If there is no other - // scale-up we have VMs that failed to provision within timeout, - // so we consider it a failed scale-up - if !csr.IsNodeGroupScalingUp(sur.NodeGroupName) { - glog.Warningf("Scale-up timed out for node group %v after %v", - sur.NodeGroupName, currentTime.Sub(sur.Time)) + + if scaleUpRequest.ExpectedAddTime.Before(currentTime) { + klog.Warningf("Scale-up timed out for node group %v after %v", + nodeGroupName, currentTime.Sub(scaleUpRequest.Time)) csr.logRecorder.Eventf(apiv1.EventTypeWarning, "ScaleUpTimedOut", "Nodes added to group %s failed to register within %v", - sur.NodeGroupName, currentTime.Sub(sur.Time)) + scaleUpRequest.NodeGroup.Id(), currentTime.Sub(scaleUpRequest.Time)) metrics.RegisterFailedScaleUp(metrics.Timeout) - csr.backoffNodeGroup(sur.NodeGroupName, currentTime) + csr.backoffNodeGroup(scaleUpRequest.NodeGroup, currentTime) + delete(csr.scaleUpRequests, nodeGroupName) } } - newSdr := make([]*ScaleDownRequest, 0) - for _, sdr := range csr.scaleDownRequests { - if sdr.ExpectedDeleteTime.After(currentTime) { - newSdr = append(newSdr, sdr) + newScaleDownRequests := make([]*ScaleDownRequest, 0) + for _, scaleDownRequest := range csr.scaleDownRequests { + if scaleDownRequest.ExpectedDeleteTime.After(currentTime) { + newScaleDownRequests = append(newScaleDownRequests, scaleDownRequest) } } - csr.scaleDownRequests = newSdr + csr.scaleDownRequests = newScaleDownRequests } // To be executed under a lock. -func (csr *ClusterStateRegistry) backoffNodeGroup(nodeGroupName string, currentTime time.Time) { - backoffUntil := csr.nodeGroupBackoffInfo.Backoff(nodeGroupName, currentTime) - glog.Warningf("Disabling scale-up for node group %v until %v", nodeGroupName, backoffUntil) +func (csr *ClusterStateRegistry) backoffNodeGroup(nodeGroup cloudprovider.NodeGroup, currentTime time.Time) { + backoffUntil := csr.nodeGroupBackoffInfo.Backoff(nodeGroup, currentTime) + klog.Warningf("Disabling scale-up for node group %v until %v", nodeGroup.Id(), backoffUntil) } // RegisterFailedScaleUp should be called after getting error from cloudprovider // when trying to scale-up node group. It will mark this group as not safe to autoscale // for some time. -func (csr *ClusterStateRegistry) RegisterFailedScaleUp(nodeGroupName string, reason metrics.FailedScaleUpReason) { +func (csr *ClusterStateRegistry) RegisterFailedScaleUp(nodeGroup cloudprovider.NodeGroup, reason metrics.FailedScaleUpReason) { csr.Lock() defer csr.Unlock() metrics.RegisterFailedScaleUp(reason) - csr.backoffNodeGroup(nodeGroupName, time.Now()) + csr.backoffNodeGroup(nodeGroup, time.Now()) } // UpdateNodes updates the state of the nodes in the ClusterStateRegistry and recalculates the stats -func (csr *ClusterStateRegistry) UpdateNodes(nodes []*apiv1.Node, currentTime time.Time) error { +func (csr *ClusterStateRegistry) UpdateNodes(nodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulercache.NodeInfo, currentTime time.Time) error { csr.updateNodeGroupMetrics() targetSizes, err := getTargetSizes(csr.cloudProvider) if err != nil { @@ -250,6 +250,7 @@ func (csr *ClusterStateRegistry) UpdateNodes(nodes []*apiv1.Node, currentTime ti defer csr.Unlock() csr.nodes = nodes + csr.nodeInfosForGroups = nodeInfosForGroups csr.updateUnregisteredNodes(notRegistered) csr.updateReadinessStats(currentTime) @@ -268,7 +269,7 @@ func (csr *ClusterStateRegistry) UpdateNodes(nodes []*apiv1.Node, currentTime ti func (csr *ClusterStateRegistry) Recalculate() { targetSizes, err := getTargetSizes(csr.cloudProvider) if err != nil { - glog.Warningf("Failed to get target sizes, when trying to recalculate cluster state: %v", err) + klog.Warningf("Failed to get target sizes, when trying to recalculate cluster state: %v", err) } csr.Lock() @@ -308,7 +309,7 @@ func (csr *ClusterStateRegistry) IsClusterHealthy() bool { func (csr *ClusterStateRegistry) IsNodeGroupHealthy(nodeGroupName string) bool { acceptable, found := csr.acceptableRanges[nodeGroupName] if !found { - glog.Warningf("Failed to find acceptable ranges for %v", nodeGroupName) + klog.Warningf("Failed to find acceptable ranges for %v", nodeGroupName) return false } @@ -318,7 +319,7 @@ func (csr *ClusterStateRegistry) IsNodeGroupHealthy(nodeGroupName string) bool { if acceptable.CurrentTarget == 0 || (acceptable.MinNodes == 0 && acceptable.CurrentTarget > 0) { return true } - glog.Warningf("Failed to find readiness information for %v", nodeGroupName) + klog.Warningf("Failed to find readiness information for %v", nodeGroupName) return false } @@ -356,31 +357,49 @@ func (csr *ClusterStateRegistry) updateNodeGroupMetrics() { } // IsNodeGroupSafeToScaleUp returns true if node group can be scaled up now. -func (csr *ClusterStateRegistry) IsNodeGroupSafeToScaleUp(nodeGroupName string, now time.Time) bool { - if !csr.IsNodeGroupHealthy(nodeGroupName) { +func (csr *ClusterStateRegistry) IsNodeGroupSafeToScaleUp(nodeGroup cloudprovider.NodeGroup, now time.Time) bool { + if !csr.IsNodeGroupHealthy(nodeGroup.Id()) { return false } - return !csr.nodeGroupBackoffInfo.IsBackedOff(nodeGroupName, now) + return !csr.nodeGroupBackoffInfo.IsBackedOff(nodeGroup, now) } -func (csr *ClusterStateRegistry) areThereUpcomingNodesInNodeGroup(nodeGroupName string) bool { +func (csr *ClusterStateRegistry) getProvisionedAndTargetSizesForNodeGroup(nodeGroupName string) (provisioned, target int, ok bool) { acceptable, found := csr.acceptableRanges[nodeGroupName] if !found { - glog.Warningf("Failed to find acceptable ranges for %v", nodeGroupName) - return false + klog.Warningf("Failed to find acceptable ranges for %v", nodeGroupName) + return 0, 0, false } + target = acceptable.CurrentTarget readiness, found := csr.perNodeGroupReadiness[nodeGroupName] if !found { // No need to warn if node group has size 0 (was scaled to 0 before). if acceptable.MinNodes != 0 { - glog.Warningf("Failed to find readiness information for %v", nodeGroupName) + klog.Warningf("Failed to find readiness information for %v", nodeGroupName) } - return acceptable.CurrentTarget > 0 + return 0, target, true } + provisioned = readiness.Registered - readiness.NotStarted - readiness.LongNotStarted - provisioned := readiness.Registered - readiness.NotStarted - readiness.LongNotStarted - return acceptable.CurrentTarget > provisioned + return provisioned, target, true +} + +func (csr *ClusterStateRegistry) areThereUpcomingNodesInNodeGroup(nodeGroupName string) bool { + provisioned, target, ok := csr.getProvisionedAndTargetSizesForNodeGroup(nodeGroupName) + if !ok { + return false + } + return target > provisioned +} + +// IsNodeGroupAtTargetSize returns true if the number of nodes provisioned in the group is equal to the target number of nodes. +func (csr *ClusterStateRegistry) IsNodeGroupAtTargetSize(nodeGroupName string) bool { + provisioned, target, ok := csr.getProvisionedAndTargetSizesForNodeGroup(nodeGroupName) + if !ok { + return false + } + return target == provisioned } // IsNodeGroupScalingUp returns true if the node group is currently scaling up. @@ -388,13 +407,8 @@ func (csr *ClusterStateRegistry) IsNodeGroupScalingUp(nodeGroupName string) bool if !csr.areThereUpcomingNodesInNodeGroup(nodeGroupName) { return false } - // Let's check if there is an active scale up request - for _, request := range csr.scaleUpRequests { - if request.NodeGroupName == nodeGroupName { - return true - } - } - return false + _, found := csr.scaleUpRequests[nodeGroupName] + return found } // AcceptableRange contains information about acceptable size of a node group. @@ -423,15 +437,15 @@ func (csr *ClusterStateRegistry) updateAcceptableRanges(targetSize map[string]in CurrentTarget: size, } } - for _, sur := range csr.scaleUpRequests { - val := result[sur.NodeGroupName] - val.MinNodes -= sur.Increase - result[sur.NodeGroupName] = val + for nodeGroupName, scaleUpRequest := range csr.scaleUpRequests { + acceptableRange := result[nodeGroupName] + acceptableRange.MinNodes -= scaleUpRequest.Increase + result[nodeGroupName] = acceptableRange } - for _, sdr := range csr.scaleDownRequests { - val := result[sdr.NodeGroupName] - val.MaxNodes += 1 - result[sdr.NodeGroupName] = val + for _, scaleDownRequest := range csr.scaleDownRequests { + acceptableRange := result[scaleDownRequest.NodeGroup.Id()] + acceptableRange.MaxNodes += 1 + result[scaleDownRequest.NodeGroup.Id()] = acceptableRange } csr.acceptableRanges = result } @@ -487,10 +501,10 @@ func (csr *ClusterStateRegistry) updateReadinessStats(currentTime time.Time) { // Node is most likely not autoscaled, however check the errors. if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() { if errNg != nil { - glog.Warningf("Failed to get nodegroup for %s: %v", node.Name, errNg) + klog.Warningf("Failed to get nodegroup for %s: %v", node.Name, errNg) } if errReady != nil { - glog.Warningf("Failed to get readiness info for %s: %v", node.Name, errReady) + klog.Warningf("Failed to get readiness info for %s: %v", node.Name, errReady) } } else { perNodeGroup[nodeGroup.Id()] = update(perNodeGroup[nodeGroup.Id()], node, ready) @@ -501,7 +515,7 @@ func (csr *ClusterStateRegistry) updateReadinessStats(currentTime time.Time) { for _, unregistered := range csr.unregisteredNodes { nodeGroup, errNg := csr.cloudProvider.NodeGroupForNode(unregistered.Node) if errNg != nil { - glog.Warningf("Failed to get nodegroup for %s: %v", unregistered.Node.Name, errNg) + klog.Warningf("Failed to get nodegroup for %s: %v", unregistered.Node.Name, errNg) continue } perNgCopy := perNodeGroup[nodeGroup.Id()] @@ -529,14 +543,14 @@ func (csr *ClusterStateRegistry) updateIncorrectNodeGroupSizes(currentTime time. for _, nodeGroup := range csr.cloudProvider.NodeGroups() { acceptableRange, found := csr.acceptableRanges[nodeGroup.Id()] if !found { - glog.Warningf("Acceptable range for node group %s not found", nodeGroup.Id()) + klog.Warningf("Acceptable range for node group %s not found", nodeGroup.Id()) continue } readiness, found := csr.perNodeGroupReadiness[nodeGroup.Id()] if !found { // if MinNodes == 0 node group has been scaled to 0 and everything's fine if acceptableRange.MinNodes != 0 { - glog.Warningf("Readiness for node group %s not found", nodeGroup.Id()) + klog.Warningf("Readiness for node group %s not found", nodeGroup.Id()) } continue } @@ -590,7 +604,7 @@ func (csr *ClusterStateRegistry) UpdateScaleDownCandidates(nodes []*apiv1.Node, for _, node := range nodes { group, err := csr.cloudProvider.NodeGroupForNode(node) if err != nil { - glog.Warningf("Failed to get node group for %s: %v", node.Name, err) + klog.Warningf("Failed to get node group for %s: %v", node.Name, err) continue } if group == nil || reflect.ValueOf(group).IsNil() { @@ -623,7 +637,7 @@ func (csr *ClusterStateRegistry) GetStatus(now time.Time) *api.ClusterAutoscaler // Scale up. nodeGroupStatus.Conditions = append(nodeGroupStatus.Conditions, buildScaleUpStatusNodeGroup( csr.IsNodeGroupScalingUp(nodeGroup.Id()), - csr.IsNodeGroupSafeToScaleUp(nodeGroup.Id(), now), + csr.IsNodeGroupSafeToScaleUp(nodeGroup, now), readiness, acceptable)) @@ -867,7 +881,7 @@ func (csr *ClusterStateRegistry) GetUpcomingNodes() map[string]int { func getNotRegisteredNodes(allNodes []*apiv1.Node, cloudProvider cloudprovider.CloudProvider, time time.Time) ([]UnregisteredNode, error) { registered := sets.NewString() for _, node := range allNodes { - registered.Insert(node.Spec.ProviderID) + registered.Insert(cloudProvider.GetInstanceID(node)) } notRegistered := make([]UnregisteredNode, 0) for _, nodeGroup := range cloudProvider.NodeGroups() { @@ -876,14 +890,14 @@ func getNotRegisteredNodes(allNodes []*apiv1.Node, cloudProvider cloudprovider.C return []UnregisteredNode{}, err } for _, node := range nodes { - if !registered.Has(node) { + if !registered.Has(node.Id) { notRegistered = append(notRegistered, UnregisteredNode{ Node: &apiv1.Node{ ObjectMeta: metav1.ObjectMeta{ - Name: node, + Name: node.Id, }, Spec: apiv1.NodeSpec{ - ProviderID: node, + ProviderID: node.Id, }, }, UnregisteredSince: time, diff --git a/cluster-autoscaler/clusterstate/clusterstate_test.go b/cluster-autoscaler/clusterstate/clusterstate_test.go index b16968f28a3b..c5f83aa4ab61 100644 --- a/cluster-autoscaler/clusterstate/clusterstate_test.go +++ b/cluster-autoscaler/clusterstate/clusterstate_test.go @@ -30,6 +30,7 @@ import ( kube_record "k8s.io/client-go/tools/record" "github.com/stretchr/testify/assert" + "k8s.io/autoscaler/cluster-autoscaler/utils/backoff" ) func TestOKWithScaleUp(t *testing.T) { @@ -53,14 +54,14 @@ func TestOKWithScaleUp(t *testing.T) { clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }, fakeLogRecorder) + }, fakeLogRecorder, newBackoff()) clusterstate.RegisterScaleUp(&ScaleUpRequest{ - NodeGroupName: "ng1", + NodeGroup: provider.GetNodeGroup("ng1"), Increase: 4, Time: now, ExpectedAddTime: now.Add(time.Minute), }) - err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now) + err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now) assert.NoError(t, err) assert.True(t, clusterstate.IsClusterHealthy()) @@ -98,8 +99,8 @@ func TestEmptyOK(t *testing.T) { clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }, fakeLogRecorder) - err := clusterstate.UpdateNodes([]*apiv1.Node{}, now.Add(-5*time.Second)) + }, fakeLogRecorder, newBackoff()) + err := clusterstate.UpdateNodes([]*apiv1.Node{}, nil, now.Add(-5*time.Second)) assert.NoError(t, err) assert.True(t, clusterstate.IsClusterHealthy()) assert.True(t, clusterstate.IsNodeGroupHealthy("ng1")) @@ -107,12 +108,12 @@ func TestEmptyOK(t *testing.T) { provider.AddNodeGroup("ng1", 0, 10, 3) clusterstate.RegisterScaleUp(&ScaleUpRequest{ - NodeGroupName: "ng1", + NodeGroup: provider.GetNodeGroup("ng1"), Increase: 3, Time: now.Add(-3 * time.Second), ExpectedAddTime: now.Add(1 * time.Minute), }) - err = clusterstate.UpdateNodes([]*apiv1.Node{}, now) + err = clusterstate.UpdateNodes([]*apiv1.Node{}, nil, now) assert.NoError(t, err) assert.True(t, clusterstate.IsClusterHealthy()) @@ -140,8 +141,8 @@ func TestOKOneUnreadyNode(t *testing.T) { clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }, fakeLogRecorder) - err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now) + }, fakeLogRecorder, newBackoff()) + err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now) assert.NoError(t, err) assert.True(t, clusterstate.IsClusterHealthy()) assert.True(t, clusterstate.IsNodeGroupHealthy("ng1")) @@ -177,8 +178,8 @@ func TestNodeWithoutNodeGroupDontCrash(t *testing.T) { clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }, fakeLogRecorder) - err := clusterstate.UpdateNodes([]*apiv1.Node{noNgNode}, now) + }, fakeLogRecorder, newBackoff()) + err := clusterstate.UpdateNodes([]*apiv1.Node{noNgNode}, nil, now) assert.NoError(t, err) clusterstate.UpdateScaleDownCandidates([]*apiv1.Node{noNgNode}, now) } @@ -203,8 +204,8 @@ func TestOKOneUnreadyNodeWithScaleDownCandidate(t *testing.T) { clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }, fakeLogRecorder) - err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now) + }, fakeLogRecorder, newBackoff()) + err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now) clusterstate.UpdateScaleDownCandidates([]*apiv1.Node{ng1_1}, now) assert.NoError(t, err) @@ -267,8 +268,8 @@ func TestMissingNodes(t *testing.T) { clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }, fakeLogRecorder) - err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now) + }, fakeLogRecorder, newBackoff()) + err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now) assert.NoError(t, err) assert.True(t, clusterstate.IsClusterHealthy()) assert.False(t, clusterstate.IsNodeGroupHealthy("ng1")) @@ -308,8 +309,8 @@ func TestTooManyUnready(t *testing.T) { clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }, fakeLogRecorder) - err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now) + }, fakeLogRecorder, newBackoff()) + err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now) assert.NoError(t, err) assert.False(t, clusterstate.IsClusterHealthy()) assert.True(t, clusterstate.IsNodeGroupHealthy("ng1")) @@ -331,14 +332,15 @@ func TestExpiredScaleUp(t *testing.T) { clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }, fakeLogRecorder) + MaxNodeProvisionTime: 2 * time.Minute, + }, fakeLogRecorder, newBackoff()) clusterstate.RegisterScaleUp(&ScaleUpRequest{ - NodeGroupName: "ng1", + NodeGroup: provider.GetNodeGroup("ng1"), Increase: 4, Time: now.Add(-3 * time.Minute), ExpectedAddTime: now.Add(-1 * time.Minute), }) - err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, now) + err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, nil, now) assert.NoError(t, err) assert.True(t, clusterstate.IsClusterHealthy()) assert.False(t, clusterstate.IsNodeGroupHealthy("ng1")) @@ -356,12 +358,12 @@ func TestRegisterScaleDown(t *testing.T) { clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }, fakeLogRecorder) + }, fakeLogRecorder, newBackoff()) now := time.Now() clusterstate.RegisterScaleDown(&ScaleDownRequest{ - NodeGroupName: "ng1", + NodeGroup: provider.GetNodeGroup("ng1"), NodeName: "ng1-1", ExpectedDeleteTime: now.Add(time.Minute), Time: now, @@ -408,8 +410,8 @@ func TestUpcomingNodes(t *testing.T) { clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }, fakeLogRecorder) - err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1, ng3_1, ng4_1}, now) + }, fakeLogRecorder, newBackoff()) + err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1, ng3_1, ng4_1}, nil, now) assert.NoError(t, err) upcomingNodes := clusterstate.GetUpcomingNodes() @@ -430,21 +432,21 @@ func TestIncorrectSize(t *testing.T) { clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }, fakeLogRecorder) + }, fakeLogRecorder, newBackoff()) now := time.Now() - clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, now.Add(-5*time.Minute)) + clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, nil, now.Add(-5*time.Minute)) incorrect := clusterstate.incorrectNodeGroupSizes["ng1"] assert.Equal(t, 5, incorrect.ExpectedSize) assert.Equal(t, 1, incorrect.CurrentSize) assert.Equal(t, now.Add(-5*time.Minute), incorrect.FirstObserved) - clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, now.Add(-4*time.Minute)) + clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, nil, now.Add(-4*time.Minute)) incorrect = clusterstate.incorrectNodeGroupSizes["ng1"] assert.Equal(t, 5, incorrect.ExpectedSize) assert.Equal(t, 1, incorrect.CurrentSize) assert.Equal(t, now.Add(-5*time.Minute), incorrect.FirstObserved) - clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_1}, now.Add(-3*time.Minute)) + clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_1}, nil, now.Add(-3*time.Minute)) incorrect = clusterstate.incorrectNodeGroupSizes["ng1"] assert.Equal(t, 5, incorrect.ExpectedSize) assert.Equal(t, 2, incorrect.CurrentSize) @@ -467,8 +469,8 @@ func TestUnregisteredNodes(t *testing.T) { MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, MaxNodeProvisionTime: 10 * time.Second, - }, fakeLogRecorder) - err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, time.Now().Add(-time.Minute)) + }, fakeLogRecorder, newBackoff()) + err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, nil, time.Now().Add(-time.Minute)) assert.NoError(t, err) assert.Equal(t, 1, len(clusterstate.GetUnregisteredNodes())) @@ -478,14 +480,14 @@ func TestUnregisteredNodes(t *testing.T) { // The node didn't come up in MaxNodeProvisionTime, it should no longer be // counted as upcoming (but it is still an unregistered node) - err = clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, time.Now().Add(time.Minute)) + err = clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, nil, time.Now().Add(time.Minute)) assert.NoError(t, err) assert.Equal(t, 1, len(clusterstate.GetUnregisteredNodes())) assert.Equal(t, "ng1-2", clusterstate.GetUnregisteredNodes()[0].Node.Name) upcomingNodes = clusterstate.GetUpcomingNodes() assert.Equal(t, 0, len(upcomingNodes)) - err = clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_2}, time.Now().Add(time.Minute)) + err = clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_2}, nil, time.Now().Add(time.Minute)) assert.NoError(t, err) assert.Equal(t, 0, len(clusterstate.GetUnregisteredNodes())) } @@ -607,6 +609,7 @@ func TestScaleUpBackoff(t *testing.T) { provider := testprovider.NewTestCloudProvider(nil, nil) provider.AddNodeGroup("ng1", 1, 10, 4) + ng1 := provider.GetNodeGroup("ng1") provider.AddNode("ng1", ng1_1) provider.AddNode("ng1", ng1_2) provider.AddNode("ng1", ng1_3) @@ -617,46 +620,46 @@ func TestScaleUpBackoff(t *testing.T) { clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }, fakeLogRecorder) + }, fakeLogRecorder, newBackoff()) // After failed scale-up, node group should be still healthy, but should backoff from scale-ups clusterstate.RegisterScaleUp(&ScaleUpRequest{ - NodeGroupName: "ng1", + NodeGroup: provider.GetNodeGroup("ng1"), Increase: 1, Time: now.Add(-3 * time.Minute), ExpectedAddTime: now.Add(-1 * time.Minute), }) - err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_2, ng1_3}, now) + err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_2, ng1_3}, nil, now) assert.NoError(t, err) assert.True(t, clusterstate.IsClusterHealthy()) assert.True(t, clusterstate.IsNodeGroupHealthy("ng1")) - assert.False(t, clusterstate.IsNodeGroupSafeToScaleUp("ng1", now)) + assert.False(t, clusterstate.IsNodeGroupSafeToScaleUp(ng1, now)) // Backoff should expire after timeout now = now.Add(InitialNodeGroupBackoffDuration).Add(time.Second) assert.True(t, clusterstate.IsClusterHealthy()) assert.True(t, clusterstate.IsNodeGroupHealthy("ng1")) - assert.True(t, clusterstate.IsNodeGroupSafeToScaleUp("ng1", now)) + assert.True(t, clusterstate.IsNodeGroupSafeToScaleUp(ng1, now)) // Another failed scale up should cause longer backoff clusterstate.RegisterScaleUp(&ScaleUpRequest{ - NodeGroupName: "ng1", + NodeGroup: provider.GetNodeGroup("ng1"), Increase: 1, Time: now.Add(-2 * time.Minute), ExpectedAddTime: now.Add(-1 * time.Second), }) - err = clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_2, ng1_3}, now) + err = clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_2, ng1_3}, nil, now) assert.NoError(t, err) assert.True(t, clusterstate.IsClusterHealthy()) assert.True(t, clusterstate.IsNodeGroupHealthy("ng1")) - assert.False(t, clusterstate.IsNodeGroupSafeToScaleUp("ng1", now)) + assert.False(t, clusterstate.IsNodeGroupSafeToScaleUp(ng1, now)) now = now.Add(InitialNodeGroupBackoffDuration).Add(time.Second) - assert.False(t, clusterstate.IsNodeGroupSafeToScaleUp("ng1", now)) + assert.False(t, clusterstate.IsNodeGroupSafeToScaleUp(ng1, now)) // The backoff should be cleared after a successful scale-up clusterstate.RegisterScaleUp(&ScaleUpRequest{ - NodeGroupName: "ng1", + NodeGroup: provider.GetNodeGroup("ng1"), Increase: 1, Time: now, ExpectedAddTime: now.Add(time.Second), @@ -664,12 +667,12 @@ func TestScaleUpBackoff(t *testing.T) { ng1_4 := BuildTestNode("ng1-4", 1000, 1000) SetNodeReadyState(ng1_4, true, now.Add(-1*time.Minute)) provider.AddNode("ng1", ng1_4) - err = clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_2, ng1_3, ng1_4}, now) + err = clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_2, ng1_3, ng1_4}, nil, now) assert.NoError(t, err) assert.True(t, clusterstate.IsClusterHealthy()) assert.True(t, clusterstate.IsNodeGroupHealthy("ng1")) - assert.True(t, clusterstate.IsNodeGroupSafeToScaleUp("ng1", now)) - assert.False(t, clusterstate.nodeGroupBackoffInfo.IsBackedOff("ng1", now)) + assert.True(t, clusterstate.IsNodeGroupSafeToScaleUp(ng1, now)) + assert.False(t, clusterstate.nodeGroupBackoffInfo.IsBackedOff(ng1, now)) } func TestGetClusterSize(t *testing.T) { @@ -692,23 +695,23 @@ func TestGetClusterSize(t *testing.T) { clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }, fakeLogRecorder) + }, fakeLogRecorder, newBackoff()) // There are 2 actual nodes in 2 node groups with target sizes of 5 and 1. - clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, now) + clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now) currentSize, targetSize := clusterstate.GetClusterSize() assert.Equal(t, 2, currentSize) assert.Equal(t, 6, targetSize) // Current size should increase after a new node is added. - clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_1, ng2_1}, now.Add(time.Minute)) + clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_1, ng2_1}, nil, now.Add(time.Minute)) currentSize, targetSize = clusterstate.GetClusterSize() assert.Equal(t, 3, currentSize) assert.Equal(t, 6, targetSize) // Target size should increase after a new node group is added. provider.AddNodeGroup("ng3", 1, 10, 1) - clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_1, ng2_1}, now.Add(2*time.Minute)) + clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_1, ng2_1}, nil, now.Add(2*time.Minute)) currentSize, targetSize = clusterstate.GetClusterSize() assert.Equal(t, 3, currentSize) assert.Equal(t, 7, targetSize) @@ -717,7 +720,7 @@ func TestGetClusterSize(t *testing.T) { for _, ng := range provider.NodeGroups() { ng.(*testprovider.TestNodeGroup).SetTargetSize(10) } - clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_1, ng2_1}, now.Add(3*time.Minute)) + clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_1, ng2_1}, nil, now.Add(3*time.Minute)) currentSize, targetSize = clusterstate.GetClusterSize() assert.Equal(t, 3, currentSize) assert.Equal(t, 30, targetSize) @@ -754,3 +757,7 @@ func TestIsNodeStillStarting(t *testing.T) { }) } } + +func newBackoff() backoff.Backoff { + return backoff.NewIdBasedExponentialBackoff(InitialNodeGroupBackoffDuration, MaxNodeGroupBackoffDuration, NodeGroupBackoffResetTimeout) +} diff --git a/cluster-autoscaler/clusterstate/utils/status.go b/cluster-autoscaler/clusterstate/utils/status.go index c76b197aa0d1..39150480c06f 100644 --- a/cluster-autoscaler/clusterstate/utils/status.go +++ b/cluster-autoscaler/clusterstate/utils/status.go @@ -28,7 +28,7 @@ import ( kube_client "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -119,10 +119,10 @@ func WriteStatusConfigMap(kubeClient kube_client.Interface, namespace string, ms errMsg = fmt.Sprintf("Failed to write status configmap: %v", writeStatusError) } if errMsg != "" { - glog.Error(errMsg) + klog.Error(errMsg) return nil, errors.New(errMsg) } - glog.V(8).Infof("Successfully wrote status configmap with body \"%v\"", statusMsg) + klog.V(8).Infof("Successfully wrote status configmap with body \"%v\"", statusMsg) // Having this as a side-effect is somewhat ugly // But it makes error handling easier, as we get a free retry each loop if logRecorder != nil { @@ -136,7 +136,7 @@ func DeleteStatusConfigMap(kubeClient kube_client.Interface, namespace string) e maps := kubeClient.CoreV1().ConfigMaps(namespace) err := maps.Delete(StatusConfigMapName, &metav1.DeleteOptions{}) if err != nil { - glog.Error("Failed to delete status configmap") + klog.Error("Failed to delete status configmap") } return err } diff --git a/cluster-autoscaler/config/autoscaling_options.go b/cluster-autoscaler/config/autoscaling_options.go index 37b623be66cc..81c494646e76 100644 --- a/cluster-autoscaler/config/autoscaling_options.go +++ b/cluster-autoscaler/config/autoscaling_options.go @@ -60,6 +60,10 @@ type AutoscalingOptions struct { EstimatorName string // ExpanderName sets the type of node group expander to be used in scale up ExpanderName string + // IgnoreDaemonSetsUtilization is whether CA will ignore DaemonSet pods when calculating resource utilization for scaling down + IgnoreDaemonSetsUtilization bool + // IgnoreMirrorPodsUtilization is whether CA will ignore Mirror pods when calculating resource utilization for scaling down + IgnoreMirrorPodsUtilization bool // MaxGracefulTerminationSec is maximum number of seconds scale down waits for pods to terminate before // removing the node from cloud provider. MaxGracefulTerminationSec int @@ -115,6 +119,8 @@ type AutoscalingOptions struct { ExpendablePodsPriorityCutoff int // Regional tells whether the cluster is regional. Regional bool + // Pods newer than this will not be considered as unschedulable for scale-up. + NewPodScaleUpDelay time.Duration // Path to kube configuration if available KubeConfigPath string } diff --git a/cluster-autoscaler/context/autoscaling_context.go b/cluster-autoscaler/context/autoscaling_context.go index 88cea76834b9..45d0f2154f48 100644 --- a/cluster-autoscaler/context/autoscaling_context.go +++ b/cluster-autoscaler/context/autoscaling_context.go @@ -17,15 +17,16 @@ limitations under the License. package context import ( - "github.com/golang/glog" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils" "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/expander" "k8s.io/autoscaler/cluster-autoscaler/simulator" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" kube_client "k8s.io/client-go/kubernetes" kube_record "k8s.io/client-go/tools/record" + "k8s.io/klog" ) // AutoscalingContext contains user-configurable constant and configuration-related objects passed to @@ -42,6 +43,8 @@ type AutoscalingContext struct { PredicateChecker *simulator.PredicateChecker // ExpanderStrategy is the strategy used to choose which node group to expand when scaling up ExpanderStrategy expander.Strategy + // EstimatorBuilder is the builder function for node count estimator to be used. + EstimatorBuilder estimator.EstimatorBuilder } // AutoscalingKubeClients contains all Kubernetes API clients, @@ -78,13 +81,14 @@ func NewResourceLimiterFromAutoscalingOptions(options config.AutoscalingOptions) // NewAutoscalingContext returns an autoscaling context from all the necessary parameters passed via arguments func NewAutoscalingContext(options config.AutoscalingOptions, predicateChecker *simulator.PredicateChecker, - autoscalingKubeClients *AutoscalingKubeClients, cloudProvider cloudprovider.CloudProvider, expanderStrategy expander.Strategy) *AutoscalingContext { + autoscalingKubeClients *AutoscalingKubeClients, cloudProvider cloudprovider.CloudProvider, expanderStrategy expander.Strategy, estimatorBuilder estimator.EstimatorBuilder) *AutoscalingContext { return &AutoscalingContext{ AutoscalingOptions: options, CloudProvider: cloudProvider, AutoscalingKubeClients: *autoscalingKubeClients, PredicateChecker: predicateChecker, ExpanderStrategy: expanderStrategy, + EstimatorBuilder: estimatorBuilder, } } @@ -95,7 +99,7 @@ func NewAutoscalingKubeClients(opts config.AutoscalingOptions, kubeClient kube_c kubeEventRecorder := kube_util.CreateEventRecorder(kubeClient) logRecorder, err := utils.NewStatusMapRecorder(kubeClient, opts.ConfigNamespace, kubeEventRecorder, opts.WriteStatusConfigMap) if err != nil { - glog.Error("Failed to initialize status configmap, unable to write status events") + klog.Error("Failed to initialize status configmap, unable to write status events") // Get a dummy, so we can at least safely call the methods // TODO(maciekpytel): recover from this after successful status configmap update? logRecorder, _ = utils.NewStatusMapRecorder(kubeClient, opts.ConfigNamespace, kubeEventRecorder, false) diff --git a/cluster-autoscaler/core/autoscaler.go b/cluster-autoscaler/core/autoscaler.go index c0d537825966..de245902cb8d 100644 --- a/cluster-autoscaler/core/autoscaler.go +++ b/cluster-autoscaler/core/autoscaler.go @@ -21,12 +21,15 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" cloudBuilder "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/builder" + "k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/expander" "k8s.io/autoscaler/cluster-autoscaler/expander/factory" ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" "k8s.io/autoscaler/cluster-autoscaler/simulator" + "k8s.io/autoscaler/cluster-autoscaler/utils/backoff" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" kube_client "k8s.io/client-go/kubernetes" ) @@ -39,7 +42,9 @@ type AutoscalerOptions struct { CloudProvider cloudprovider.CloudProvider PredicateChecker *simulator.PredicateChecker ExpanderStrategy expander.Strategy + EstimatorBuilder estimator.EstimatorBuilder Processors *ca_processors.AutoscalingProcessors + Backoff backoff.Backoff } // Autoscaler is the main component of CA which scales up/down node groups according to its configuration @@ -57,7 +62,14 @@ func NewAutoscaler(opts AutoscalerOptions) (Autoscaler, errors.AutoscalerError) if err != nil { return nil, errors.ToAutoscalerError(errors.InternalError, err) } - return NewStaticAutoscaler(opts.AutoscalingOptions, opts.PredicateChecker, opts.AutoscalingKubeClients, opts.Processors, opts.CloudProvider, opts.ExpanderStrategy), nil + return NewStaticAutoscaler( + opts.AutoscalingOptions, + opts.PredicateChecker, + opts.AutoscalingKubeClients, + opts.Processors, opts.CloudProvider, + opts.ExpanderStrategy, + opts.EstimatorBuilder, + opts.Backoff), nil } // Initialize default options if not provided. @@ -87,6 +99,17 @@ func initializeDefaultOptions(opts *AutoscalerOptions) error { } opts.ExpanderStrategy = expanderStrategy } + if opts.EstimatorBuilder == nil { + estimatorBuilder, err := estimator.NewEstimatorBuilder(opts.EstimatorName) + if err != nil { + return err + } + opts.EstimatorBuilder = estimatorBuilder + } + if opts.Backoff == nil { + opts.Backoff = + backoff.NewIdBasedExponentialBackoff(clusterstate.InitialNodeGroupBackoffDuration, clusterstate.MaxNodeGroupBackoffDuration, clusterstate.NodeGroupBackoffResetTimeout) + } return nil } diff --git a/cluster-autoscaler/core/scale_down.go b/cluster-autoscaler/core/scale_down.go index 56f3861796e3..4b7c41ace918 100644 --- a/cluster-autoscaler/core/scale_down.go +++ b/cluster-autoscaler/core/scale_down.go @@ -41,25 +41,10 @@ import ( kube_client "k8s.io/client-go/kubernetes" kube_record "k8s.io/client-go/tools/record" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/autoscaler/cluster-autoscaler/processors/status" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" -) - -// ScaleDownResult represents the state of scale down. -type ScaleDownResult int - -const ( - // ScaleDownError - scale down finished with error. - ScaleDownError ScaleDownResult = iota - // ScaleDownNoUnneeded - no unneeded nodes and no errors. - ScaleDownNoUnneeded - // ScaleDownNoNodeDeleted - unneeded nodes present but not available for deletion. - ScaleDownNoNodeDeleted - // ScaleDownNodeDeleted - a node was deleted. - ScaleDownNodeDeleted - // ScaleDownNodeDeleteStarted - a node deletion process was started. - ScaleDownNodeDeleteStarted + "k8s.io/klog" ) const ( @@ -85,6 +70,10 @@ const ( type NodeDeleteStatus struct { sync.Mutex deleteInProgress bool + // A map of node delete results by node name. It contains nil if the delete was successful and an error otherwise. + // It's being constantly drained into ScaleDownStatus objects in order to notify the ScaleDownStatusProcessor that + // the node drain has ended or that an error occurred during the deletion process. + nodeDeleteResults map[string]error } // IsDeleteInProgress returns true if a node is being deleted. @@ -101,6 +90,22 @@ func (n *NodeDeleteStatus) SetDeleteInProgress(status bool) { n.deleteInProgress = status } +// AddNodeDeleteResult adds a node delete result to the result map. +func (n *NodeDeleteStatus) AddNodeDeleteResult(nodeName string, result error) { + n.Lock() + defer n.Unlock() + n.nodeDeleteResults[nodeName] = result +} + +// DrainNodeDeleteResults returns the whole result map and replaces it with a new empty one. +func (n *NodeDeleteStatus) DrainNodeDeleteResults() map[string]error { + n.Lock() + defer n.Unlock() + results := n.nodeDeleteResults + n.nodeDeleteResults = make(map[string]error) + return results +} + type scaleDownResourcesLimits map[string]int64 type scaleDownResourcesDelta map[string]int64 @@ -134,7 +139,7 @@ func computeScaleDownResourcesLeftLimits(nodes []*apiv1.Node, resourceLimiter *c resultScaleDownLimits[resource] = computeAboveMin(totalGpus[resource], min) } default: - glog.Errorf("Scale down limits defined for unsupported resource '%s'", resource) + klog.Errorf("Scale down limits defined for unsupported resource '%s'", resource) } } } @@ -301,7 +306,7 @@ type ScaleDown struct { unneededNodesList []*apiv1.Node unremovableNodes map[string]time.Time podLocationHints map[string]string - nodeUtilizationMap map[string]float64 + nodeUtilizationMap map[string]simulator.UtilizationInfo usageTracker *simulator.UsageTracker nodeDeleteStatus *NodeDeleteStatus } @@ -314,10 +319,10 @@ func NewScaleDown(context *context.AutoscalingContext, clusterStateRegistry *clu unneededNodes: make(map[string]time.Time), unremovableNodes: make(map[string]time.Time), podLocationHints: make(map[string]string), - nodeUtilizationMap: make(map[string]float64), + nodeUtilizationMap: make(map[string]simulator.UtilizationInfo), usageTracker: simulator.NewUsageTracker(), unneededNodesList: make([]*apiv1.Node, 0), - nodeDeleteStatus: &NodeDeleteStatus{}, + nodeDeleteStatus: &NodeDeleteStatus{nodeDeleteResults: make(map[string]error)}, } } @@ -352,7 +357,7 @@ func (sd *ScaleDown) UpdateUnneededNodes( // Only scheduled non expendable pods and pods waiting for lower priority pods preemption can prevent node delete. nonExpendablePods := FilterOutExpendablePods(pods, sd.context.ExpendablePodsPriorityCutoff) nodeNameToNodeInfo := scheduler_util.CreateNodeNameToInfoMap(nonExpendablePods, nodes) - utilizationMap := make(map[string]float64) + utilizationMap := make(map[string]simulator.UtilizationInfo) sd.updateUnremovableNodes(nodes) // Filter out nodes that were recently checked @@ -368,7 +373,7 @@ func (sd *ScaleDown) UpdateUnneededNodes( } skipped := len(nodesToCheck) - len(filteredNodesToCheck) if skipped > 0 { - glog.V(1).Infof("Scale-down calculation: ignoring %v nodes unremovable in the last %v", skipped, sd.context.AutoscalingOptions.UnremovableNodeRecheckTimeout) + klog.V(1).Infof("Scale-down calculation: ignoring %v nodes unremovable in the last %v", skipped, sd.context.AutoscalingOptions.UnremovableNodeRecheckTimeout) } // Phase1 - look at the nodes utilization. Calculate the utilization @@ -379,31 +384,31 @@ func (sd *ScaleDown) UpdateUnneededNodes( // Old-time marked nodes are again eligible for deletion - something went wrong with them // and they have not been deleted. if isNodeBeingDeleted(node, timestamp) { - glog.V(1).Infof("Skipping %s from delete considerations - the node is currently being deleted", node.Name) + klog.V(1).Infof("Skipping %s from delete considerations - the node is currently being deleted", node.Name) continue } // Skip nodes marked with no scale down annotation if hasNoScaleDownAnnotation(node) { - glog.V(1).Infof("Skipping %s from delete consideration - the node is marked as no scale down", node.Name) + klog.V(1).Infof("Skipping %s from delete consideration - the node is marked as no scale down", node.Name) continue } nodeInfo, found := nodeNameToNodeInfo[node.Name] if !found { - glog.Errorf("Node info for %s not found", node.Name) + klog.Errorf("Node info for %s not found", node.Name) continue } - utilization, err := simulator.CalculateUtilization(node, nodeInfo) + utilInfo, err := simulator.CalculateUtilization(node, nodeInfo, sd.context.IgnoreDaemonSetsUtilization, sd.context.IgnoreMirrorPodsUtilization) if err != nil { - glog.Warningf("Failed to calculate utilization for %s: %v", node.Name, err) + klog.Warningf("Failed to calculate utilization for %s: %v", node.Name, err) } - glog.V(4).Infof("Node %s - utilization %f", node.Name, utilization) - utilizationMap[node.Name] = utilization + klog.V(4).Infof("Node %s - utilization %f", node.Name, utilInfo.Utilization) + utilizationMap[node.Name] = utilInfo - if utilization >= sd.context.ScaleDownUtilizationThreshold { - glog.V(4).Infof("Node %s is not suitable for removal - utilization too big (%f)", node.Name, utilization) + if utilInfo.Utilization >= sd.context.ScaleDownUtilizationThreshold { + klog.V(4).Infof("Node %s is not suitable for removal - utilization too big (%f)", node.Name, utilInfo.Utilization) continue } currentlyUnneededNodes = append(currentlyUnneededNodes, node) @@ -448,7 +453,7 @@ func (sd *ScaleDown) UpdateUnneededNodes( } if additionalCandidatesCount > 0 { // Look for additional nodes to remove among the rest of nodes. - glog.V(3).Infof("Finding additional %v candidates for scale down.", additionalCandidatesCount) + klog.V(3).Infof("Finding additional %v candidates for scale down.", additionalCandidatesCount) additionalNodesToRemove, additionalUnremovable, additionalNewHints, simulatorErr := simulator.FindNodesToRemove(currentNonCandidates[:additionalCandidatesPoolSize], nodes, nonExpendablePods, nil, sd.context.PredicateChecker, additionalCandidatesCount, true, @@ -485,7 +490,7 @@ func (sd *ScaleDown) UpdateUnneededNodes( for _, node := range unremovable { sd.unremovableNodes[node.Name] = unremovableTimeout } - glog.V(1).Infof("%v nodes found to be unremovable in simulation, will re-check them at %v", len(unremovable), unremovableTimeout) + klog.V(1).Infof("%v nodes found to be unremovable in simulation, will re-check them at %v", len(unremovable), unremovableTimeout) } // Update state and metrics @@ -525,10 +530,10 @@ func (sd *ScaleDown) updateUnremovableNodes(nodes []*apiv1.Node) { // down state and returning an appropriate error. func (sd *ScaleDown) markSimulationError(simulatorErr errors.AutoscalerError, timestamp time.Time) errors.AutoscalerError { - glog.Errorf("Error while simulating node drains: %v", simulatorErr) + klog.Errorf("Error while simulating node drains: %v", simulatorErr) sd.unneededNodesList = make([]*apiv1.Node, 0) sd.unneededNodes = make(map[string]time.Time) - sd.nodeUtilizationMap = make(map[string]float64) + sd.nodeUtilizationMap = make(map[string]simulator.UtilizationInfo) sd.clusterStateRegistry.UpdateScaleDownCandidates(sd.unneededNodesList, timestamp) return simulatorErr.AddPrefix("error while simulating node drains: ") } @@ -554,9 +559,23 @@ func (sd *ScaleDown) chooseCandidates(nodes []*apiv1.Node) ([]*apiv1.Node, []*ap return currentCandidates, currentNonCandidates } -// TryToScaleDown tries to scale down the cluster. It returns ScaleDownResult indicating if any node was +func (sd *ScaleDown) mapNodesToStatusScaleDownNodes(nodes []*apiv1.Node, nodeGroups map[string]cloudprovider.NodeGroup, evictedPodLists map[string][]*apiv1.Pod) []*status.ScaleDownNode { + var result []*status.ScaleDownNode + for _, node := range nodes { + result = append(result, &status.ScaleDownNode{ + Node: node, + NodeGroup: nodeGroups[node.Name], + UtilInfo: sd.nodeUtilizationMap[node.Name], + EvictedPods: evictedPodLists[node.Name], + }) + } + return result +} + +// TryToScaleDown tries to scale down the cluster. It returns a result inside a ScaleDownStatus indicating if any node was // removed and error if such occurred. -func (sd *ScaleDown) TryToScaleDown(allNodes []*apiv1.Node, pods []*apiv1.Pod, pdbs []*policyv1.PodDisruptionBudget, currentTime time.Time) (ScaleDownResult, errors.AutoscalerError) { +func (sd *ScaleDown) TryToScaleDown(allNodes []*apiv1.Node, pods []*apiv1.Pod, pdbs []*policyv1.PodDisruptionBudget, currentTime time.Time) (*status.ScaleDownStatus, errors.AutoscalerError) { + scaleDownStatus := &status.ScaleDownStatus{NodeDeleteResults: sd.nodeDeleteStatus.DrainNodeDeleteResults()} nodeDeletionDuration := time.Duration(0) findNodesToRemoveDuration := time.Duration(0) defer updateScaleDownMetrics(time.Now(), &findNodesToRemoveDuration, &nodeDeletionDuration) @@ -567,9 +586,8 @@ func (sd *ScaleDown) TryToScaleDown(allNodes []*apiv1.Node, pods []*apiv1.Pod, p resourceLimiter, errCP := sd.context.CloudProvider.GetResourceLimiter() if errCP != nil { - return ScaleDownError, errors.ToAutoscalerError( - errors.CloudProviderError, - errCP) + scaleDownStatus.Result = status.ScaleDownError + return scaleDownStatus, errors.ToAutoscalerError(errors.CloudProviderError, errCP) } scaleDownResourcesLeft := computeScaleDownResourcesLeftLimits(nodesWithoutMaster, resourceLimiter, sd.context.CloudProvider, currentTime) @@ -579,11 +597,11 @@ func (sd *ScaleDown) TryToScaleDown(allNodes []*apiv1.Node, pods []*apiv1.Pod, p for _, node := range nodesWithoutMaster { if val, found := sd.unneededNodes[node.Name]; found { - glog.V(2).Infof("%s was unneeded for %s", node.Name, currentTime.Sub(val).String()) + klog.V(2).Infof("%s was unneeded for %s", node.Name, currentTime.Sub(val).String()) // Check if node is marked with no scale down annotation. if hasNoScaleDownAnnotation(node) { - glog.V(4).Infof("Skipping %s - scale down disabled annotation found", node.Name) + klog.V(4).Infof("Skipping %s - scale down disabled annotation found", node.Name) continue } @@ -602,34 +620,34 @@ func (sd *ScaleDown) TryToScaleDown(allNodes []*apiv1.Node, pods []*apiv1.Pod, p nodeGroup, err := sd.context.CloudProvider.NodeGroupForNode(node) if err != nil { - glog.Errorf("Error while checking node group for %s: %v", node.Name, err) + klog.Errorf("Error while checking node group for %s: %v", node.Name, err) continue } if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() { - glog.V(4).Infof("Skipping %s - no node group config", node.Name) + klog.V(4).Infof("Skipping %s - no node group config", node.Name) continue } size, found := nodeGroupSize[nodeGroup.Id()] if !found { - glog.Errorf("Error while checking node group size %s: group size not found in cache", nodeGroup.Id()) + klog.Errorf("Error while checking node group size %s: group size not found in cache", nodeGroup.Id()) continue } if size <= nodeGroup.MinSize() { - glog.V(1).Infof("Skipping %s - node group min size reached", node.Name) + klog.V(1).Infof("Skipping %s - node group min size reached", node.Name) continue } scaleDownResourcesDelta, err := computeScaleDownResourcesDelta(node, nodeGroup, resourcesWithLimits) if err != nil { - glog.Errorf("Error getting node resources: %v", err) + klog.Errorf("Error getting node resources: %v", err) continue } checkResult := scaleDownResourcesLeft.checkScaleDownDeltaWithinLimits(scaleDownResourcesDelta) if checkResult.exceeded { - glog.V(4).Infof("Skipping %s - minimal limit exceeded for %v", node.Name, checkResult.exceededResources) + klog.V(4).Infof("Skipping %s - minimal limit exceeded for %v", node.Name, checkResult.exceededResources) continue } @@ -638,8 +656,9 @@ func (sd *ScaleDown) TryToScaleDown(allNodes []*apiv1.Node, pods []*apiv1.Pod, p } } if len(candidates) == 0 { - glog.V(1).Infof("No candidates for scale down") - return ScaleDownNoUnneeded, nil + klog.V(1).Infof("No candidates for scale down") + scaleDownStatus.Result = status.ScaleDownNoUnneeded + return scaleDownStatus, nil } // Trying to delete empty nodes in bulk. If there are no empty nodes then CA will @@ -653,9 +672,12 @@ func (sd *ScaleDown) TryToScaleDown(allNodes []*apiv1.Node, pods []*apiv1.Pod, p err := sd.waitForEmptyNodesDeleted(emptyNodes, confirmation) nodeDeletionDuration = time.Now().Sub(nodeDeletionStart) if err == nil { - return ScaleDownNodeDeleted, nil + scaleDownStatus.ScaledDownNodes = sd.mapNodesToStatusScaleDownNodes(emptyNodes, candidateNodeGroups, make(map[string][]*apiv1.Pod)) + scaleDownStatus.Result = status.ScaleDownNodeDeleted + return scaleDownStatus, nil } - return ScaleDownError, err.AddPrefix("failed to delete at least one empty node: ") + scaleDownStatus.Result = status.ScaleDownError + return scaleDownStatus, err.AddPrefix("failed to delete at least one empty node: ") } findNodesToRemoveStart := time.Now() @@ -668,11 +690,13 @@ func (sd *ScaleDown) TryToScaleDown(allNodes []*apiv1.Node, pods []*apiv1.Pod, p findNodesToRemoveDuration = time.Now().Sub(findNodesToRemoveStart) if err != nil { - return ScaleDownError, err.AddPrefix("Find node to remove failed: ") + scaleDownStatus.Result = status.ScaleDownError + return scaleDownStatus, err.AddPrefix("Find node to remove failed: ") } if len(nodesToRemove) == 0 { - glog.V(1).Infof("No node to remove") - return ScaleDownNoNodeDeleted, nil + klog.V(1).Infof("No node to remove") + scaleDownStatus.Result = status.ScaleDownNoNodeDeleted + return scaleDownStatus, nil } toRemove := nodesToRemove[0] utilization := sd.nodeUtilizationMap[toRemove.Node.Name] @@ -680,7 +704,7 @@ func (sd *ScaleDown) TryToScaleDown(allNodes []*apiv1.Node, pods []*apiv1.Pod, p for _, pod := range toRemove.PodsToReschedule { podNames = append(podNames, pod.Namespace+"/"+pod.Name) } - glog.V(0).Infof("Scale-down: removing node %s, utilization: %v, pods to reschedule: %s", toRemove.Node.Name, utilization, + klog.V(0).Infof("Scale-down: removing node %s, utilization: %v, pods to reschedule: %s", toRemove.Node.Name, utilization, strings.Join(podNames, ",")) sd.context.LogRecorder.Eventf(apiv1.EventTypeNormal, "ScaleDown", "Scale-down: removing node %s, utilization: %v, pods to reschedule: %s", toRemove.Node.Name, utilization, strings.Join(podNames, ",")) @@ -695,10 +719,12 @@ func (sd *ScaleDown) TryToScaleDown(allNodes []*apiv1.Node, pods []*apiv1.Pod, p go func() { // Finishing the delete process once this goroutine is over. + var err error + defer func() { sd.nodeDeleteStatus.AddNodeDeleteResult(toRemove.Node.Name, err) }() defer sd.nodeDeleteStatus.SetDeleteInProgress(false) - err := sd.deleteNode(toRemove.Node, toRemove.PodsToReschedule) + err = sd.deleteNode(toRemove.Node, toRemove.PodsToReschedule) if err != nil { - glog.Errorf("Failed to delete %s: %v", toRemove.Node.Name, err) + klog.Errorf("Failed to delete %s: %v", toRemove.Node.Name, err) return } nodeGroup := candidateNodeGroups[toRemove.Node.Name] @@ -709,7 +735,9 @@ func (sd *ScaleDown) TryToScaleDown(allNodes []*apiv1.Node, pods []*apiv1.Pod, p } }() - return ScaleDownNodeDeleteStarted, nil + scaleDownStatus.ScaledDownNodes = sd.mapNodesToStatusScaleDownNodes([]*apiv1.Node{toRemove.Node}, candidateNodeGroups, map[string][]*apiv1.Pod{toRemove.Node.Name: toRemove.PodsToReschedule}) + scaleDownStatus.Result = status.ScaleDownNodeDeleteStarted + return scaleDownStatus, nil } // updateScaleDownMetrics registers duration of different parts of scale down. @@ -741,7 +769,7 @@ func getEmptyNodes(candidates []*apiv1.Node, pods []*apiv1.Pod, maxEmptyBulkDele for _, node := range emptyNodes { nodeGroup, err := cloudProvider.NodeGroupForNode(node) if err != nil { - glog.Errorf("Failed to get group for %s", node.Name) + klog.Errorf("Failed to get group for %s", node.Name) continue } if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() { @@ -753,7 +781,7 @@ func getEmptyNodes(candidates []*apiv1.Node, pods []*apiv1.Pod, maxEmptyBulkDele // Will be cached. size, err := nodeGroup.TargetSize() if err != nil { - glog.Errorf("Failed to get size for %s: %v ", nodeGroup.Id(), err) + klog.Errorf("Failed to get size for %s: %v ", nodeGroup.Id(), err) continue } available = size - nodeGroup.MinSize() @@ -765,7 +793,7 @@ func getEmptyNodes(candidates []*apiv1.Node, pods []*apiv1.Pod, maxEmptyBulkDele if available > 0 { resourcesDelta, err := computeScaleDownResourcesDelta(node, nodeGroup, resourcesNames) if err != nil { - glog.Errorf("Error: %v", err) + klog.Errorf("Error: %v", err) continue } checkResult := resourcesLimitsCopy.tryDecrementLimitsByDelta(resourcesDelta) @@ -788,7 +816,7 @@ func (sd *ScaleDown) scheduleDeleteEmptyNodes(emptyNodes []*apiv1.Node, client k recorder kube_record.EventRecorder, readinessMap map[string]bool, candidateNodeGroups map[string]cloudprovider.NodeGroup, confirmation chan errors.AutoscalerError) { for _, node := range emptyNodes { - glog.V(0).Infof("Scale-down: removing empty node %s", node.Name) + klog.V(0).Infof("Scale-down: removing empty node %s", node.Name) sd.context.LogRecorder.Eventf(apiv1.EventTypeNormal, "ScaleDownEmpty", "Scale-down: removing empty node %s", node.Name) simulator.RemoveNodeFromTracker(sd.usageTracker, node.Name, sd.unneededNodes) go func(nodeToDelete *apiv1.Node) { @@ -838,7 +866,7 @@ func (sd *ScaleDown) waitForEmptyNodesDeleted(emptyNodes []*apiv1.Node, confirma select { case err := <-confirmation: if err != nil { - glog.Errorf("Problem with empty node deletion: %v", err) + klog.Errorf("Problem with empty node deletion: %v", err) finalError = err } case <-time.After(timeLeft): @@ -917,7 +945,7 @@ func evictPod(podToEvict *apiv1.Pod, client kube_client.Interface, recorder kube return nil } } - glog.Errorf("Failed to evict pod %s, error: %v", podToEvict.Name, lastError) + klog.Errorf("Failed to evict pod %s, error: %v", podToEvict.Name, lastError) recorder.Eventf(podToEvict, apiv1.EventTypeWarning, "ScaleDownFailed", "failed to delete pod for ScaleDown") return fmt.Errorf("Failed to evict pod %s/%s within allowed timeout (last error: %v)", podToEvict.Namespace, podToEvict.Name, lastError) } @@ -963,18 +991,18 @@ func drainNode(node *apiv1.Node, pods []*apiv1.Pod, client kube_client.Interface for _, pod := range pods { podreturned, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) if err == nil && (podreturned == nil || podreturned.Spec.NodeName == node.Name) { - glog.Errorf("Not deleted yet %v", podreturned) + klog.Errorf("Not deleted yet %v", podreturned) allGone = false break } if err != nil && !kube_errors.IsNotFound(err) { - glog.Errorf("Failed to check pod %s/%s: %v", pod.Namespace, pod.Name, err) + klog.Errorf("Failed to check pod %s/%s: %v", pod.Namespace, pod.Name, err) allGone = false break } } if allGone { - glog.V(1).Infof("All pods removed from %s", node.Name) + klog.V(1).Infof("All pods removed from %s", node.Name) // Let the deferred function know there is no need for cleanup return nil } @@ -988,11 +1016,11 @@ func cleanToBeDeleted(nodes []*apiv1.Node, client kube_client.Interface, recorde for _, node := range nodes { cleaned, err := deletetaint.CleanToBeDeleted(node, client) if err != nil { - glog.Warningf("Error while releasing taints on node %v: %v", node.Name, err) + klog.Warningf("Error while releasing taints on node %v: %v", node.Name, err) recorder.Eventf(node, apiv1.EventTypeWarning, "ClusterAutoscalerCleanup", "failed to clean toBeDeletedTaint: %v", err) } else if cleaned { - glog.V(1).Infof("Successfully released toBeDeletedTaint on node %v", node.Name) + klog.V(1).Infof("Successfully released toBeDeletedTaint on node %v", node.Name) recorder.Eventf(node, apiv1.EventTypeNormal, "ClusterAutoscalerCleanup", "marking the node as schedulable") } } @@ -1015,7 +1043,7 @@ func deleteNodeFromCloudProvider(node *apiv1.Node, cloudProvider cloudprovider.C } recorder.Eventf(node, apiv1.EventTypeNormal, "ScaleDown", "node removed by cluster autoscaler") registry.RegisterScaleDown(&clusterstate.ScaleDownRequest{ - NodeGroupName: nodeGroup.Id(), + NodeGroup: nodeGroup, NodeName: node.Name, Time: time.Now(), ExpectedDeleteTime: time.Now().Add(MaxCloudProviderNodeDeletionTime), diff --git a/cluster-autoscaler/core/scale_down_test.go b/cluster-autoscaler/core/scale_down_test.go index c32b5998926f..5491498582fc 100644 --- a/cluster-autoscaler/core/scale_down_test.go +++ b/cluster-autoscaler/core/scale_down_test.go @@ -41,10 +41,11 @@ import ( "strconv" - "github.com/golang/glog" "github.com/stretchr/testify/assert" + "k8s.io/autoscaler/cluster-autoscaler/processors/status" "k8s.io/autoscaler/cluster-autoscaler/utils/deletetaint" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" + "k8s.io/klog" ) func TestFindUnneededNodes(t *testing.T) { @@ -126,7 +127,7 @@ func TestFindUnneededNodes(t *testing.T) { } context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, provider) - clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) + clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) sd := NewScaleDown(&context, clusterStateRegistry) sd.UpdateUnneededNodes([]*apiv1.Node{n1, n2, n3, n4, n5, n7, n8, n9}, []*apiv1.Node{n1, n2, n3, n4, n5, n6, n7, n8, n9}, []*apiv1.Pod{p1, p2, p3, p4, p5, p6}, time.Now(), nil) @@ -237,13 +238,13 @@ func TestPodsWithPrioritiesFindUnneededNodes(t *testing.T) { } context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, provider) - clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) + clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) sd := NewScaleDown(&context, clusterStateRegistry) sd.UpdateUnneededNodes([]*apiv1.Node{n1, n2, n3, n4}, []*apiv1.Node{n1, n2, n3, n4}, []*apiv1.Pod{p1, p2, p3, p4, p5, p6, p7}, time.Now(), nil) assert.Equal(t, 2, len(sd.unneededNodes)) - glog.Warningf("Unneeded nodes %v", sd.unneededNodes) + klog.Warningf("Unneeded nodes %v", sd.unneededNodes) _, found := sd.unneededNodes["n2"] assert.True(t, found) _, found = sd.unneededNodes["n3"] @@ -287,7 +288,7 @@ func TestFindUnneededMaxCandidates(t *testing.T) { } context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, provider) - clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) + clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) sd := NewScaleDown(&context, clusterStateRegistry) sd.UpdateUnneededNodes(nodes, nodes, pods, time.Now(), nil) @@ -353,7 +354,7 @@ func TestFindUnneededEmptyNodes(t *testing.T) { } context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, provider) - clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) + clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) sd := NewScaleDown(&context, clusterStateRegistry) sd.UpdateUnneededNodes(nodes, nodes, pods, time.Now(), nil) @@ -397,7 +398,7 @@ func TestFindUnneededNodePool(t *testing.T) { } context := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, provider) - clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) + clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) sd := NewScaleDown(&context, clusterStateRegistry) sd.UpdateUnneededNodes(nodes, nodes, pods, time.Now(), nil) @@ -531,7 +532,7 @@ func TestDeleteNode(t *testing.T) { // build context context := NewScaleTestAutoscalingContext(config.AutoscalingOptions{}, fakeClient, provider) - clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) + clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) sd := NewScaleDown(&context, clusterStateRegistry) // attempt delete @@ -760,14 +761,14 @@ func TestScaleDown(t *testing.T) { } context := NewScaleTestAutoscalingContext(options, fakeClient, provider) - clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) + clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) scaleDown := NewScaleDown(&context, clusterStateRegistry) scaleDown.UpdateUnneededNodes([]*apiv1.Node{n1, n2}, []*apiv1.Node{n1, n2}, []*apiv1.Pod{p1, p2, p3}, time.Now().Add(-5*time.Minute), nil) - result, err := scaleDown.TryToScaleDown([]*apiv1.Node{n1, n2}, []*apiv1.Pod{p1, p2, p3}, nil, time.Now()) + scaleDownStatus, err := scaleDown.TryToScaleDown([]*apiv1.Node{n1, n2}, []*apiv1.Pod{p1, p2, p3}, nil, time.Now()) waitForDeleteToFinish(t, scaleDown) assert.NoError(t, err) - assert.Equal(t, ScaleDownNodeDeleteStarted, result) + assert.Equal(t, status.ScaleDownNodeDeleteStarted, scaleDownStatus.Result) assert.Equal(t, n1.Name, getStringFromChan(deletedNodes)) assert.Equal(t, n1.Name, getStringFromChan(updatedNodes)) } @@ -811,7 +812,7 @@ var defaultScaleDownOptions = config.AutoscalingOptions{ MinCoresTotal: 0, MinMemoryTotal: 0, MaxCoresTotal: config.DefaultMaxClusterCores, - MaxMemoryTotal: config.DefaultMaxClusterMemory * units.Gigabyte, + MaxMemoryTotal: config.DefaultMaxClusterMemory * units.GiB, } func TestScaleDownEmptyMultipleNodeGroups(t *testing.T) { @@ -856,13 +857,13 @@ func TestScaleDownEmptyMinCoresLimitHit(t *testing.T) { func TestScaleDownEmptyMinMemoryLimitHit(t *testing.T) { options := defaultScaleDownOptions - options.MinMemoryTotal = 4000 * MB + options.MinMemoryTotal = 4000 * MiB config := &scaleTestConfig{ nodes: []nodeConfig{ - {"n1", 2000, 1000 * MB, 0, true, "ng1"}, - {"n2", 1000, 1000 * MB, 0, true, "ng1"}, - {"n3", 1000, 1000 * MB, 0, true, "ng1"}, - {"n4", 1000, 3000 * MB, 0, true, "ng1"}, + {"n1", 2000, 1000 * MiB, 0, true, "ng1"}, + {"n2", 1000, 1000 * MiB, 0, true, "ng1"}, + {"n3", 1000, 1000 * MiB, 0, true, "ng1"}, + {"n4", 1000, 3000 * MiB, 0, true, "ng1"}, }, options: options, expectedScaleDowns: []string{"n1", "n2"}, @@ -886,12 +887,12 @@ func TestScaleDownEmptyMinGpuLimitHit(t *testing.T) { } config := &scaleTestConfig{ nodes: []nodeConfig{ - {"n1", 1000, 1000 * MB, 1, true, "ng1"}, - {"n2", 1000, 1000 * MB, 1, true, "ng1"}, - {"n3", 1000, 1000 * MB, 1, true, "ng1"}, - {"n4", 1000, 1000 * MB, 1, true, "ng1"}, - {"n5", 1000, 1000 * MB, 1, true, "ng1"}, - {"n6", 1000, 1000 * MB, 1, true, "ng1"}, + {"n1", 1000, 1000 * MiB, 1, true, "ng1"}, + {"n2", 1000, 1000 * MiB, 1, true, "ng1"}, + {"n3", 1000, 1000 * MiB, 1, true, "ng1"}, + {"n4", 1000, 1000 * MiB, 1, true, "ng1"}, + {"n5", 1000, 1000 * MiB, 1, true, "ng1"}, + {"n6", 1000, 1000 * MiB, 1, true, "ng1"}, }, options: options, expectedScaleDowns: []string{"n1", "n2"}, @@ -969,24 +970,24 @@ func simpleScaleDownEmpty(t *testing.T, config *scaleTestConfig) { context := NewScaleTestAutoscalingContext(config.options, fakeClient, provider) - clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) + clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) scaleDown := NewScaleDown(&context, clusterStateRegistry) scaleDown.UpdateUnneededNodes(nodes, nodes, []*apiv1.Pod{}, time.Now().Add(-5*time.Minute), nil) - result, err := scaleDown.TryToScaleDown(nodes, []*apiv1.Pod{}, nil, time.Now()) + scaleDownStatus, err := scaleDown.TryToScaleDown(nodes, []*apiv1.Pod{}, nil, time.Now()) waitForDeleteToFinish(t, scaleDown) // This helps to verify that TryToScaleDown doesn't attempt to remove anything // after delete in progress status is gone. close(deletedNodes) assert.NoError(t, err) - var expectedScaleDownResult ScaleDownResult + var expectedScaleDownResult status.ScaleDownResult if len(config.expectedScaleDowns) > 0 { - expectedScaleDownResult = ScaleDownNodeDeleted + expectedScaleDownResult = status.ScaleDownNodeDeleted } else { - expectedScaleDownResult = ScaleDownNoUnneeded + expectedScaleDownResult = status.ScaleDownNoUnneeded } - assert.Equal(t, expectedScaleDownResult, result) + assert.Equal(t, expectedScaleDownResult, scaleDownStatus.Result) // Check the channel (and make sure there isn't more than there should be). // Report only up to 10 extra nodes found. @@ -1045,15 +1046,15 @@ func TestNoScaleDownUnready(t *testing.T) { context := NewScaleTestAutoscalingContext(options, fakeClient, provider) // N1 is unready so it requires a bigger unneeded time. - clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) + clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) scaleDown := NewScaleDown(&context, clusterStateRegistry) scaleDown.UpdateUnneededNodes([]*apiv1.Node{n1, n2}, []*apiv1.Node{n1, n2}, []*apiv1.Pod{p2}, time.Now().Add(-5*time.Minute), nil) - result, err := scaleDown.TryToScaleDown([]*apiv1.Node{n1, n2}, []*apiv1.Pod{p2}, nil, time.Now()) + scaleDownStatus, err := scaleDown.TryToScaleDown([]*apiv1.Node{n1, n2}, []*apiv1.Pod{p2}, nil, time.Now()) waitForDeleteToFinish(t, scaleDown) assert.NoError(t, err) - assert.Equal(t, ScaleDownNoUnneeded, result) + assert.Equal(t, status.ScaleDownNoUnneeded, scaleDownStatus.Result) deletedNodes := make(chan string, 10) @@ -1071,11 +1072,11 @@ func TestNoScaleDownUnready(t *testing.T) { scaleDown = NewScaleDown(&context, clusterStateRegistry) scaleDown.UpdateUnneededNodes([]*apiv1.Node{n1, n2}, []*apiv1.Node{n1, n2}, []*apiv1.Pod{p2}, time.Now().Add(-2*time.Hour), nil) - result, err = scaleDown.TryToScaleDown([]*apiv1.Node{n1, n2}, []*apiv1.Pod{p2}, nil, time.Now()) + scaleDownStatus, err = scaleDown.TryToScaleDown([]*apiv1.Node{n1, n2}, []*apiv1.Pod{p2}, nil, time.Now()) waitForDeleteToFinish(t, scaleDown) assert.NoError(t, err) - assert.Equal(t, ScaleDownNodeDeleteStarted, result) + assert.Equal(t, status.ScaleDownNodeDeleteStarted, scaleDownStatus.Result) assert.Equal(t, n1.Name, getStringFromChan(deletedNodes)) } @@ -1144,15 +1145,15 @@ func TestScaleDownNoMove(t *testing.T) { } context := NewScaleTestAutoscalingContext(options, fakeClient, provider) - clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) + clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) scaleDown := NewScaleDown(&context, clusterStateRegistry) scaleDown.UpdateUnneededNodes([]*apiv1.Node{n1, n2}, []*apiv1.Node{n1, n2}, []*apiv1.Pod{p1, p2}, time.Now().Add(5*time.Minute), nil) - result, err := scaleDown.TryToScaleDown([]*apiv1.Node{n1, n2}, []*apiv1.Pod{p1, p2}, nil, time.Now()) + scaleDownStatus, err := scaleDown.TryToScaleDown([]*apiv1.Node{n1, n2}, []*apiv1.Pod{p1, p2}, nil, time.Now()) waitForDeleteToFinish(t, scaleDown) assert.NoError(t, err) - assert.Equal(t, ScaleDownNoUnneeded, result) + assert.Equal(t, status.ScaleDownNoUnneeded, scaleDownStatus.Result) } func getStringFromChan(c chan string) string { @@ -1210,13 +1211,13 @@ func TestCleanToBeDeleted(t *testing.T) { func TestCalculateCoresAndMemoryTotal(t *testing.T) { nodeConfigs := []nodeConfig{ - {"n1", 2000, 7500 * MB, 0, true, "ng1"}, - {"n2", 2000, 7500 * MB, 0, true, "ng1"}, - {"n3", 2000, 7500 * MB, 0, true, "ng1"}, - {"n4", 12000, 8000 * MB, 0, true, "ng1"}, - {"n5", 16000, 7500 * MB, 0, true, "ng1"}, - {"n6", 8000, 6000 * MB, 0, true, "ng1"}, - {"n7", 6000, 16000 * MB, 0, true, "ng1"}, + {"n1", 2000, 7500 * MiB, 0, true, "ng1"}, + {"n2", 2000, 7500 * MiB, 0, true, "ng1"}, + {"n3", 2000, 7500 * MiB, 0, true, "ng1"}, + {"n4", 12000, 8000 * MiB, 0, true, "ng1"}, + {"n5", 16000, 7500 * MiB, 0, true, "ng1"}, + {"n6", 8000, 6000 * MiB, 0, true, "ng1"}, + {"n7", 6000, 16000 * MiB, 0, true, "ng1"}, } nodes := make([]*apiv1.Node, len(nodeConfigs)) for i, n := range nodeConfigs { @@ -1236,7 +1237,7 @@ func TestCalculateCoresAndMemoryTotal(t *testing.T) { coresTotal, memoryTotal := calculateScaleDownCoresMemoryTotal(nodes, time.Now()) assert.Equal(t, int64(42), coresTotal) - assert.Equal(t, int64(44000*MB), memoryTotal) + assert.Equal(t, int64(44000*MiB), memoryTotal) } func TestFilterOutMasters(t *testing.T) { diff --git a/cluster-autoscaler/core/scale_test_common.go b/cluster-autoscaler/core/scale_test_common.go index 57fd9215c19c..9d6fd2ba413d 100644 --- a/cluster-autoscaler/core/scale_test_common.go +++ b/cluster-autoscaler/core/scale_test_common.go @@ -24,8 +24,10 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/expander/random" "k8s.io/autoscaler/cluster-autoscaler/metrics" + "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups" "k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/labels" @@ -33,6 +35,8 @@ import ( "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/clusterstate" + "k8s.io/autoscaler/cluster-autoscaler/utils/backoff" kube_client "k8s.io/client-go/kubernetes" kube_record "k8s.io/client-go/tools/record" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" @@ -75,6 +79,9 @@ type scaleTestConfig struct { func NewScaleTestAutoscalingContext(options config.AutoscalingOptions, fakeClient kube_client.Interface, provider cloudprovider.CloudProvider) context.AutoscalingContext { fakeRecorder := kube_record.NewFakeRecorder(5) fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false) + // Ignoring error here is safe - if a test doesn't specify valid estimatorName, + // it either doesn't need one, or should fail when it turns out to be nil. + estimatorBuilder, _ := estimator.NewEstimatorBuilder(options.EstimatorName) return context.AutoscalingContext{ AutoscalingOptions: options, AutoscalingKubeClients: context.AutoscalingKubeClients{ @@ -85,6 +92,7 @@ func NewScaleTestAutoscalingContext(options config.AutoscalingOptions, fakeClien CloudProvider: provider, PredicateChecker: simulator.NewTestPredicateChecker(), ExpanderStrategy: random.NewStrategy(), + EstimatorBuilder: estimatorBuilder, } } @@ -92,11 +100,14 @@ type mockAutoprovisioningNodeGroupManager struct { t *testing.T } -func (p *mockAutoprovisioningNodeGroupManager) CreateNodeGroup(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (cloudprovider.NodeGroup, errors.AutoscalerError) { +func (p *mockAutoprovisioningNodeGroupManager) CreateNodeGroup(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (nodegroups.CreateNodeGroupResult, errors.AutoscalerError) { newNodeGroup, err := nodeGroup.Create() assert.NoError(p.t, err) metrics.RegisterNodeGroupCreation() - return newNodeGroup, nil + result := nodegroups.CreateNodeGroupResult{ + MainCreatedNodeGroup: newNodeGroup, + } + return result, nil } func (p *mockAutoprovisioningNodeGroupManager) RemoveUnneededNodeGroups(context *context.AutoscalingContext) error { @@ -151,3 +162,7 @@ func (p *mockAutoprovisioningNodeGroupListProcessor) Process(context *context.Au func (p *mockAutoprovisioningNodeGroupListProcessor) CleanUp() { } + +func newBackoff() backoff.Backoff { + return backoff.NewIdBasedExponentialBackoff(clusterstate.InitialNodeGroupBackoffDuration, clusterstate.MaxNodeGroupBackoffDuration, clusterstate.NodeGroupBackoffResetTimeout) +} diff --git a/cluster-autoscaler/core/scale_up.go b/cluster-autoscaler/core/scale_up.go index a6110c72ed9e..d06423c41e3f 100644 --- a/cluster-autoscaler/core/scale_up.go +++ b/cluster-autoscaler/core/scale_up.go @@ -28,18 +28,17 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/context" - "k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/expander" "k8s.io/autoscaler/cluster-autoscaler/metrics" ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" + "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset" "k8s.io/autoscaler/cluster-autoscaler/processors/status" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/glogx" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" - "k8s.io/autoscaler/cluster-autoscaler/utils/nodegroupset" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - "github.com/golang/glog" + "k8s.io/klog" ) type scaleUpResourcesLimits map[string]int64 @@ -94,7 +93,7 @@ func computeScaleUpResourcesLeftLimits( } default: - glog.Errorf("Scale up limits defined for unsupported resource '%s'", resource) + klog.Errorf("Scale up limits defined for unsupported resource '%s'", resource) } } } @@ -245,12 +244,12 @@ var ( // false if it didn't and error if an error occurred. Assumes that all nodes in the cluster are // ready and in sync with instance groups. func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.AutoscalingProcessors, clusterStateRegistry *clusterstate.ClusterStateRegistry, unschedulablePods []*apiv1.Pod, - nodes []*apiv1.Node, daemonSets []*extensionsv1.DaemonSet) (*status.ScaleUpStatus, errors.AutoscalerError) { + nodes []*apiv1.Node, daemonSets []*extensionsv1.DaemonSet, nodeInfos map[string]*schedulercache.NodeInfo) (*status.ScaleUpStatus, errors.AutoscalerError) { // From now on we only care about unschedulable pods that were marked after the newest // node became available for the scheduler. if len(unschedulablePods) == 0 { - glog.V(1).Info("No unschedulable pods") - return &status.ScaleUpStatus{ScaledUp: false}, nil + klog.V(1).Info("No unschedulable pods") + return &status.ScaleUpStatus{Result: status.ScaleUpNotNeeded}, nil } now := time.Now() @@ -264,36 +263,31 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto podsRemainUnschedulable[pod] = make(map[string]status.Reasons) } glogx.V(1).Over(loggingQuota).Infof("%v other pods are also unschedulable", -loggingQuota.Left()) - nodeInfos, err := GetNodeInfosForGroups(nodes, context.CloudProvider, context.ClientSet, - daemonSets, context.PredicateChecker) - if err != nil { - return nil, err.AddPrefix("failed to build node infos for node groups: ") - } nodesFromNotAutoscaledGroups, err := FilterOutNodesFromNotAutoscaledGroups(nodes, context.CloudProvider) if err != nil { - return nil, err.AddPrefix("failed to filter out nodes which are from not autoscaled groups: ") + return &status.ScaleUpStatus{Result: status.ScaleUpError}, err.AddPrefix("failed to filter out nodes which are from not autoscaled groups: ") } nodeGroups := context.CloudProvider.NodeGroups() resourceLimiter, errCP := context.CloudProvider.GetResourceLimiter() if errCP != nil { - return nil, errors.ToAutoscalerError( + return &status.ScaleUpStatus{Result: status.ScaleUpError}, errors.ToAutoscalerError( errors.CloudProviderError, errCP) } scaleUpResourcesLeft, errLimits := computeScaleUpResourcesLeftLimits(nodeGroups, nodeInfos, nodesFromNotAutoscaledGroups, resourceLimiter) if errLimits != nil { - return nil, errLimits.AddPrefix("Could not compute total resources: ") + return &status.ScaleUpStatus{Result: status.ScaleUpError}, errLimits.AddPrefix("Could not compute total resources: ") } upcomingNodes := make([]*schedulercache.NodeInfo, 0) for nodeGroup, numberOfNodes := range clusterStateRegistry.GetUpcomingNodes() { nodeTemplate, found := nodeInfos[nodeGroup] if !found { - return nil, errors.NewAutoscalerError( + return &status.ScaleUpStatus{Result: status.ScaleUpError}, errors.NewAutoscalerError( errors.InternalError, "failed to find template node for node group %s", nodeGroup) @@ -302,29 +296,32 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto upcomingNodes = append(upcomingNodes, nodeTemplate) } } - glog.V(4).Infof("Upcoming %d nodes", len(upcomingNodes)) + klog.V(4).Infof("Upcoming %d nodes", len(upcomingNodes)) - podsPassingPredicates := make(map[string][]*apiv1.Pod) expansionOptions := make([]expander.Option, 0) if processors != nil && processors.NodeGroupListProcessor != nil { var errProc error nodeGroups, nodeInfos, errProc = processors.NodeGroupListProcessor.Process(context, nodeGroups, nodeInfos, unschedulablePods) if errProc != nil { - return nil, errors.ToAutoscalerError(errors.InternalError, errProc) + return &status.ScaleUpStatus{Result: status.ScaleUpError}, errors.ToAutoscalerError(errors.InternalError, errProc) } } + podsPredicatePassingCheckFunctions := getPodsPredicatePassingCheckFunctions(context, unschedulablePods, nodeInfos) + getPodsPassingPredicates := podsPredicatePassingCheckFunctions.getPodsPassingPredicates + getPodsNotPassingPredicates := podsPredicatePassingCheckFunctions.getPodsNotPassingPredicates + skippedNodeGroups := map[string]status.Reasons{} for _, nodeGroup := range nodeGroups { // Autoprovisioned node groups without nodes are created later so skip check for them. - if nodeGroup.Exist() && !clusterStateRegistry.IsNodeGroupSafeToScaleUp(nodeGroup.Id(), now) { + if nodeGroup.Exist() && !clusterStateRegistry.IsNodeGroupSafeToScaleUp(nodeGroup, now) { // Hack that depends on internals of IsNodeGroupSafeToScaleUp. if !clusterStateRegistry.IsNodeGroupHealthy(nodeGroup.Id()) { - glog.Warningf("Node group %s is not ready for scaleup - unhealthy", nodeGroup.Id()) + klog.Warningf("Node group %s is not ready for scaleup - unhealthy", nodeGroup.Id()) skippedNodeGroups[nodeGroup.Id()] = notReadyReason } else { - glog.Warningf("Node group %s is not ready for scaleup - backoff", nodeGroup.Id()) + klog.Warningf("Node group %s is not ready for scaleup - backoff", nodeGroup.Id()) skippedNodeGroups[nodeGroup.Id()] = backoffReason } continue @@ -332,32 +329,32 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto currentTargetSize, err := nodeGroup.TargetSize() if err != nil { - glog.Errorf("Failed to get node group size: %v", err) + klog.Errorf("Failed to get node group size: %v", err) skippedNodeGroups[nodeGroup.Id()] = notReadyReason continue } if currentTargetSize >= nodeGroup.MaxSize() { - glog.V(4).Infof("Skipping node group %s - max size reached", nodeGroup.Id()) + klog.V(4).Infof("Skipping node group %s - max size reached", nodeGroup.Id()) skippedNodeGroups[nodeGroup.Id()] = maxLimitReachedReason continue } nodeInfo, found := nodeInfos[nodeGroup.Id()] if !found { - glog.Errorf("No node info for: %s", nodeGroup.Id()) + klog.Errorf("No node info for: %s", nodeGroup.Id()) skippedNodeGroups[nodeGroup.Id()] = notReadyReason continue } scaleUpResourcesDelta, err := computeScaleUpResourcesDelta(nodeInfo, nodeGroup, resourceLimiter) if err != nil { - glog.Errorf("Skipping node group %s; error getting node group resources: %v", nodeGroup.Id(), err) + klog.Errorf("Skipping node group %s; error getting node group resources: %v", nodeGroup.Id(), err) skippedNodeGroups[nodeGroup.Id()] = notReadyReason continue } checkResult := scaleUpResourcesLeft.checkScaleUpDeltaWithinLimits(scaleUpResourcesDelta) if checkResult.exceeded { - glog.V(4).Infof("Skipping node group %s; maximal limit exceeded for %v", nodeGroup.Id(), checkResult.exceededResources) + klog.V(4).Infof("Skipping node group %s; maximal limit exceeded for %v", nodeGroup.Id(), checkResult.exceededResources) skippedNodeGroups[nodeGroup.Id()] = maxLimitReachedReason continue } @@ -367,72 +364,74 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto Pods: make([]*apiv1.Pod, 0), } - schedulableOnNode := CheckPodsSchedulableOnNode(context, unschedulablePods, nodeGroup.Id(), nodeInfo) - for pod, err := range schedulableOnNode { - if err != nil { + // add list of pods which pass predicates to option + podsPassing, err := getPodsPassingPredicates(nodeGroup.Id()) + if err != nil { + klog.V(4).Infof("Skipping node group %s; cannot compute pods passing predicates", nodeGroup.Id()) + skippedNodeGroups[nodeGroup.Id()] = notReadyReason + continue + } else { + option.Pods = make([]*apiv1.Pod, len(podsPassing)) + copy(option.Pods, podsPassing) + } + + // update information why we cannot schedule pods for which we did not find a working extension option so far + podsNotPassing, err := getPodsNotPassingPredicates(nodeGroup.Id()) + if err != nil { + klog.V(4).Infof("Skipping node group %s; cannot compute pods not passing predicates", nodeGroup.Id()) + skippedNodeGroups[nodeGroup.Id()] = notReadyReason + continue + } + + // mark that there is a scheduling option for pods which can be scheduled to node from currently analyzed node group + for _, pod := range podsPassing { + delete(podsRemainUnschedulable, pod) + } + + for pod, err := range podsNotPassing { + _, found := podsRemainUnschedulable[pod] + if found && nodeGroup.Exist() { // Aggregate errors across existing node groups. // TODO(aleksandra-malinowska): figure out how to communicate // reasons NAP can't create a node-pool, if it's enabled. - if nodeGroup.Exist() { - if _, found := podsRemainUnschedulable[pod]; found { - podsRemainUnschedulable[pod][nodeGroup.Id()] = err - } - } - } else { - option.Pods = append(option.Pods, pod) + podsRemainUnschedulable[pod][nodeGroup.Id()] = err } } - for _, pod := range option.Pods { - delete(podsRemainUnschedulable, pod) - } - passingPods := make([]*apiv1.Pod, len(option.Pods)) - copy(passingPods, option.Pods) - podsPassingPredicates[nodeGroup.Id()] = passingPods if len(option.Pods) > 0 { - if context.EstimatorName == estimator.BinpackingEstimatorName { - binpackingEstimator := estimator.NewBinpackingNodeEstimator(context.PredicateChecker) - option.NodeCount = binpackingEstimator.Estimate(option.Pods, nodeInfo, upcomingNodes) - } else if context.EstimatorName == estimator.BasicEstimatorName { - basicEstimator := estimator.NewBasicNodeEstimator() - for _, pod := range option.Pods { - basicEstimator.Add(pod) - } - option.NodeCount, option.Debug = basicEstimator.Estimate(nodeInfo.Node(), upcomingNodes) - } else { - glog.Fatalf("Unrecognized estimator: %s", context.EstimatorName) - } + estimator := context.EstimatorBuilder(context.PredicateChecker) + option.NodeCount = estimator.Estimate(option.Pods, nodeInfo, upcomingNodes) if option.NodeCount > 0 { expansionOptions = append(expansionOptions, option) } else { - glog.V(2).Infof("No need for any nodes in %s", nodeGroup.Id()) + klog.V(2).Infof("No need for any nodes in %s", nodeGroup.Id()) } } else { - glog.V(4).Infof("No pod can fit to %s", nodeGroup.Id()) + klog.V(4).Infof("No pod can fit to %s", nodeGroup.Id()) } } if len(expansionOptions) == 0 { - glog.V(1).Info("No expansion options") - return &status.ScaleUpStatus{ScaledUp: false, PodsRemainUnschedulable: getRemainingPods(podsRemainUnschedulable, skippedNodeGroups)}, nil + klog.V(1).Info("No expansion options") + return &status.ScaleUpStatus{Result: status.ScaleUpNoOptionsAvailable, PodsRemainUnschedulable: getRemainingPods(podsRemainUnschedulable, skippedNodeGroups)}, nil } // Pick some expansion option. bestOption := context.ExpanderStrategy.BestOption(expansionOptions, nodeInfos) if bestOption != nil && bestOption.NodeCount > 0 { - glog.V(1).Infof("Best option to resize: %s", bestOption.NodeGroup.Id()) + klog.V(1).Infof("Best option to resize: %s", bestOption.NodeGroup.Id()) if len(bestOption.Debug) > 0 { - glog.V(1).Info(bestOption.Debug) + klog.V(1).Info(bestOption.Debug) } - glog.V(1).Infof("Estimated %d nodes needed in %s", bestOption.NodeCount, bestOption.NodeGroup.Id()) + klog.V(1).Infof("Estimated %d nodes needed in %s", bestOption.NodeCount, bestOption.NodeGroup.Id()) newNodes := bestOption.NodeCount if context.MaxNodesTotal > 0 && len(nodes)+newNodes+len(upcomingNodes) > context.MaxNodesTotal { - glog.V(1).Infof("Capping size to max cluster total size (%d)", context.MaxNodesTotal) + klog.V(1).Infof("Capping size to max cluster total size (%d)", context.MaxNodesTotal) newNodes = context.MaxNodesTotal - len(nodes) - len(upcomingNodes) if newNodes < 1 { - return nil, errors.NewAutoscalerError( + return &status.ScaleUpStatus{Result: status.ScaleUpError}, errors.NewAutoscalerError( errors.TransientError, "max node total count already reached") } @@ -440,26 +439,49 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto if !bestOption.NodeGroup.Exist() { oldId := bestOption.NodeGroup.Id() - bestOption.NodeGroup, err = processors.NodeGroupManager.CreateNodeGroup(context, bestOption.NodeGroup) + createNodeGroupResult, err := processors.NodeGroupManager.CreateNodeGroup(context, bestOption.NodeGroup) if err != nil { - return nil, err + return &status.ScaleUpStatus{Result: status.ScaleUpError}, err } - // Node group id may change when we create node group and we need to update - // our data structures. - if oldId != bestOption.NodeGroup.Id() { - podsPassingPredicates[bestOption.NodeGroup.Id()] = podsPassingPredicates[oldId] - delete(podsPassingPredicates, oldId) + bestOption.NodeGroup = createNodeGroupResult.MainCreatedNodeGroup + + // If possible replace candidate node-info with node info based on crated node group. The latter + // one should be more in line with nodes which will be created by node group. + mainCreatedNodeInfo, err := GetNodeInfoFromTemplate(createNodeGroupResult.MainCreatedNodeGroup, daemonSets, context.PredicateChecker) + if err == nil { + nodeInfos[createNodeGroupResult.MainCreatedNodeGroup.Id()] = mainCreatedNodeInfo + } else { + klog.Warningf("Cannot build node info for newly created main node group %v; balancing similar node groups may not work; err=%v", createNodeGroupResult.MainCreatedNodeGroup.Id(), err) + // Use node info based on expansion candidate but upadte Id which likely changed when node group was created. nodeInfos[bestOption.NodeGroup.Id()] = nodeInfos[oldId] + } + + if oldId != createNodeGroupResult.MainCreatedNodeGroup.Id() { delete(nodeInfos, oldId) } + + for _, nodeGroup := range createNodeGroupResult.ExtraCreatedNodeGroups { + nodeInfo, err := GetNodeInfoFromTemplate(nodeGroup, daemonSets, context.PredicateChecker) + + if err != nil { + klog.Warningf("Cannot build node info for newly created extra node group %v; balancing similar node groups will not work; err=%v", nodeGroup.Id(), err) + continue + } + nodeInfos[nodeGroup.Id()] = nodeInfo + } + + // Update ClusterStateRegistry so similar nodegroups rebalancing works. + // TODO(lukaszos) when pursuing scalability update this call with one which takes list of changed node groups so we do not + // do extra API calls. (the call at the bottom of ScaleUp() could be also changed then) + clusterStateRegistry.Recalculate() } nodeInfo, found := nodeInfos[bestOption.NodeGroup.Id()] if !found { // This should never happen, as we already should have retrieved // nodeInfo for any considered nodegroup. - glog.Errorf("No node info for: %s", bestOption.NodeGroup.Id()) - return nil, errors.NewAutoscalerError( + klog.Errorf("No node info for: %s", bestOption.NodeGroup.Id()) + return &status.ScaleUpStatus{Result: status.ScaleUpError}, errors.NewAutoscalerError( errors.CloudProviderError, "No node info for best expansion option!") } @@ -467,24 +489,24 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto // apply upper limits for CPU and memory newNodes, err = applyScaleUpResourcesLimits(newNodes, scaleUpResourcesLeft, nodeInfo, bestOption.NodeGroup, resourceLimiter) if err != nil { - return nil, err + return &status.ScaleUpStatus{Result: status.ScaleUpError}, err } targetNodeGroups := []cloudprovider.NodeGroup{bestOption.NodeGroup} if context.BalanceSimilarNodeGroups { - similarNodeGroups, typedErr := nodegroupset.FindSimilarNodeGroups(bestOption.NodeGroup, context.CloudProvider, nodeInfos) + similarNodeGroups, typedErr := processors.NodeGroupSetProcessor.FindSimilarNodeGroups(context, bestOption.NodeGroup, nodeInfos) if typedErr != nil { - return nil, typedErr.AddPrefix("Failed to find matching node groups: ") + return &status.ScaleUpStatus{Result: status.ScaleUpError}, typedErr.AddPrefix("Failed to find matching node groups: ") } - similarNodeGroups = filterNodeGroupsByPods(similarNodeGroups, bestOption.Pods, podsPassingPredicates) + similarNodeGroups = filterNodeGroupsByPods(similarNodeGroups, bestOption.Pods, getPodsPassingPredicates) for _, ng := range similarNodeGroups { - if clusterStateRegistry.IsNodeGroupSafeToScaleUp(ng.Id(), now) { + if clusterStateRegistry.IsNodeGroupSafeToScaleUp(ng, now) { targetNodeGroups = append(targetNodeGroups, ng) } else { // This should never happen, as we will filter out the node group earlier on // because of missing entry in podsPassingPredicates, but double checking doesn't // really cost us anything - glog.V(2).Infof("Ignoring node group %s when balancing: group is not ready for scaleup", ng.Id()) + klog.V(2).Infof("Ignoring node group %s when balancing: group is not ready for scaleup", ng.Id()) } } if len(targetNodeGroups) > 1 { @@ -495,25 +517,25 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto } buffer.WriteString(ng.Id()) } - glog.V(1).Infof("Splitting scale-up between %v similar node groups: {%v}", len(targetNodeGroups), buffer.String()) + klog.V(1).Infof("Splitting scale-up between %v similar node groups: {%v}", len(targetNodeGroups), buffer.String()) } } - scaleUpInfos, typedErr := nodegroupset.BalanceScaleUpBetweenGroups( - targetNodeGroups, newNodes) + scaleUpInfos, typedErr := processors.NodeGroupSetProcessor.BalanceScaleUpBetweenGroups( + context, targetNodeGroups, newNodes) if typedErr != nil { - return nil, typedErr + return &status.ScaleUpStatus{Result: status.ScaleUpError}, typedErr } - glog.V(1).Infof("Final scale-up plan: %v", scaleUpInfos) + klog.V(1).Infof("Final scale-up plan: %v", scaleUpInfos) for _, info := range scaleUpInfos { typedErr := executeScaleUp(context, clusterStateRegistry, info, gpu.GetGpuTypeForMetrics(nodeInfo.Node(), nil)) if typedErr != nil { - return nil, typedErr + return &status.ScaleUpStatus{Result: status.ScaleUpError}, typedErr } } clusterStateRegistry.Recalculate() return &status.ScaleUpStatus{ - ScaledUp: true, + Result: status.ScaleUpSuccessful, ScaleUpInfos: scaleUpInfos, PodsRemainUnschedulable: getRemainingPods(podsRemainUnschedulable, skippedNodeGroups), PodsTriggeredScaleUp: bestOption.Pods, @@ -521,7 +543,82 @@ func ScaleUp(context *context.AutoscalingContext, processors *ca_processors.Auto nil } - return &status.ScaleUpStatus{ScaledUp: false, PodsRemainUnschedulable: getRemainingPods(podsRemainUnschedulable, skippedNodeGroups)}, nil + return &status.ScaleUpStatus{Result: status.ScaleUpNoOptionsAvailable, PodsRemainUnschedulable: getRemainingPods(podsRemainUnschedulable, skippedNodeGroups)}, nil +} + +type podsPredicatePassingCheckFunctions struct { + getPodsPassingPredicates func(nodeGroupId string) ([]*apiv1.Pod, error) + getPodsNotPassingPredicates func(nodeGroupId string) (map[*apiv1.Pod]status.Reasons, error) +} + +func getPodsPredicatePassingCheckFunctions( + context *context.AutoscalingContext, + unschedulablePods []*apiv1.Pod, + nodeInfos map[string]*schedulercache.NodeInfo) podsPredicatePassingCheckFunctions { + + podsPassingPredicatesCache := make(map[string][]*apiv1.Pod) + podsNotPassingPredicatesCache := make(map[string]map[*apiv1.Pod]status.Reasons) + errorsCache := make(map[string]error) + + computeCaches := func(nodeGroupId string) { + nodeInfo, found := nodeInfos[nodeGroupId] + if !found { + errorsCache[nodeGroupId] = errors.NewAutoscalerError(errors.InternalError, "NodeInfo not found for node group %v", nodeGroupId) + return + } + + podsPassing := make([]*apiv1.Pod, 0) + podsNotPassing := make(map[*apiv1.Pod]status.Reasons) + schedulableOnNode := CheckPodsSchedulableOnNode(context, unschedulablePods, nodeGroupId, nodeInfo) + for pod, err := range schedulableOnNode { + if err == nil { + podsPassing = append(podsPassing, pod) + } else { + podsNotPassing[pod] = err + } + } + podsPassingPredicatesCache[nodeGroupId] = podsPassing + podsNotPassingPredicatesCache[nodeGroupId] = podsNotPassing + } + + return podsPredicatePassingCheckFunctions{ + + getPodsPassingPredicates: func(nodeGroupId string) ([]*apiv1.Pod, error) { + _, passingFound := podsPassingPredicatesCache[nodeGroupId] + _, errorFound := errorsCache[nodeGroupId] + + if !passingFound && !errorFound { + computeCaches(nodeGroupId) + } + err, found := errorsCache[nodeGroupId] + if found { + return []*apiv1.Pod{}, err + } + pods, found := podsPassingPredicatesCache[nodeGroupId] + if found { + return pods, nil + } + return []*apiv1.Pod{}, errors.NewAutoscalerError(errors.InternalError, "Pods passing predicate entry not found in cache for node group %s", nodeGroupId) + }, + + getPodsNotPassingPredicates: func(nodeGroupId string) (map[*apiv1.Pod]status.Reasons, error) { + _, notPassingFound := podsNotPassingPredicatesCache[nodeGroupId] + _, errorFound := errorsCache[nodeGroupId] + + if !notPassingFound && !errorFound { + computeCaches(nodeGroupId) + } + err, found := errorsCache[nodeGroupId] + if found { + return map[*apiv1.Pod]status.Reasons{}, err + } + pods, found := podsNotPassingPredicatesCache[nodeGroupId] + if found { + return pods, nil + } + return map[*apiv1.Pod]status.Reasons{}, errors.NewAutoscalerError(errors.InternalError, "Pods not passing predicate entry not found in cache for node group %s", nodeGroupId) + }, + } } func getRemainingPods(schedulingErrors map[*apiv1.Pod]map[string]status.Reasons, skipped map[string]status.Reasons) []status.NoScaleUpInfo { @@ -555,14 +652,18 @@ func getPodsAwaitingEvaluation(allPods []*apiv1.Pod, unschedulable map[*apiv1.Po return result } -func filterNodeGroupsByPods(groups []cloudprovider.NodeGroup, podsRequiredToFit []*apiv1.Pod, - fittingPodsPerNodeGroup map[string][]*apiv1.Pod) []cloudprovider.NodeGroup { +func filterNodeGroupsByPods( + groups []cloudprovider.NodeGroup, + podsRequiredToFit []*apiv1.Pod, + fittingPodsPerNodeGroup func(groupId string) ([]*apiv1.Pod, error)) []cloudprovider.NodeGroup { + result := make([]cloudprovider.NodeGroup, 0) + groupsloop: for _, group := range groups { - fittingPods, found := fittingPodsPerNodeGroup[group.Id()] - if !found { - glog.V(1).Infof("No info about pods passing predicates found for group %v, skipping it from scale-up consideration", group.Id()) + fittingPods, err := fittingPodsPerNodeGroup(group.Id()) + if err != nil { + klog.V(1).Infof("No info about pods passing predicates found for group %v, skipping it from scale-up consideration; err=%v", group.Id(), err) continue } podSet := make(map[*apiv1.Pod]bool, len(fittingPods)) @@ -570,8 +671,8 @@ groupsloop: podSet[pod] = true } for _, pod := range podsRequiredToFit { - if _, found = podSet[pod]; !found { - glog.V(1).Infof("Group %v, can't fit pod %v/%v, removing from scale-up consideration", group.Id(), pod.Namespace, pod.Name) + if _, found := podSet[pod]; !found { + klog.V(1).Infof("Group %v, can't fit pod %v/%v, removing from scale-up consideration", group.Id(), pod.Namespace, pod.Name) continue groupsloop } } @@ -581,19 +682,19 @@ groupsloop: } func executeScaleUp(context *context.AutoscalingContext, clusterStateRegistry *clusterstate.ClusterStateRegistry, info nodegroupset.ScaleUpInfo, gpuType string) errors.AutoscalerError { - glog.V(0).Infof("Scale-up: setting group %s size to %d", info.Group.Id(), info.NewSize) + klog.V(0).Infof("Scale-up: setting group %s size to %d", info.Group.Id(), info.NewSize) context.LogRecorder.Eventf(apiv1.EventTypeNormal, "ScaledUpGroup", "Scale-up: setting group %s size to %d", info.Group.Id(), info.NewSize) increase := info.NewSize - info.CurrentSize if err := info.Group.IncreaseSize(increase); err != nil { context.LogRecorder.Eventf(apiv1.EventTypeWarning, "FailedToScaleUpGroup", "Scale-up failed for group %s: %v", info.Group.Id(), err) - clusterStateRegistry.RegisterFailedScaleUp(info.Group.Id(), metrics.APIError) + clusterStateRegistry.RegisterFailedScaleUp(info.Group, metrics.APIError) return errors.NewAutoscalerError(errors.CloudProviderError, "failed to increase node group size: %v", err) } clusterStateRegistry.RegisterScaleUp( &clusterstate.ScaleUpRequest{ - NodeGroupName: info.Group.Id(), + NodeGroup: info.Group, Increase: increase, Time: time.Now(), ExpectedAddTime: time.Now().Add(context.MaxNodeProvisionTime), @@ -633,7 +734,7 @@ func applyScaleUpResourcesLimits( } newNodes = int(limit / resourceDelta) - glog.V(1).Infof("Capping scale-up size due to limit for resource %s", resource) + klog.V(1).Infof("Capping scale-up size due to limit for resource %s", resource) if newNodes < 1 { // should never happen - checked before return 0, errors.NewAutoscalerError( diff --git a/cluster-autoscaler/core/scale_up_test.go b/cluster-autoscaler/core/scale_up_test.go index 1a29c9541c87..06201b5019a4 100644 --- a/cluster-autoscaler/core/scale_up_test.go +++ b/cluster-autoscaler/core/scale_up_test.go @@ -47,7 +47,7 @@ import ( var defaultOptions = config.AutoscalingOptions{ EstimatorName: estimator.BinpackingEstimatorName, MaxCoresTotal: config.DefaultMaxClusterCores, - MaxMemoryTotal: config.DefaultMaxClusterMemory * units.Gigabyte, + MaxMemoryTotal: config.DefaultMaxClusterMemory * units.GiB, MinCoresTotal: 0, MinMemoryTotal: 0, } @@ -121,24 +121,22 @@ func TestScaleUpMaxCoresLimitHitWithNotAutoscaledGroup(t *testing.T) { simpleScaleUpTest(t, config) } -const MB = 1024 * 1024 - func TestScaleUpMaxMemoryLimitHit(t *testing.T) { options := defaultOptions - options.MaxMemoryTotal = 1300 * MB + options.MaxMemoryTotal = 1300 * MiB config := &scaleTestConfig{ nodes: []nodeConfig{ - {"n1", 2000, 100 * MB, 0, true, "ng1"}, - {"n2", 4000, 1000 * MB, 0, true, "ng2"}, + {"n1", 2000, 100 * MiB, 0, true, "ng1"}, + {"n2", 4000, 1000 * MiB, 0, true, "ng2"}, }, pods: []podConfig{ {"p1", 1000, 0, 0, "n1"}, {"p2", 3000, 0, 0, "n2"}, }, extraPods: []podConfig{ - {"p-new-1", 2000, 100 * MB, 0, ""}, - {"p-new-2", 2000, 100 * MB, 0, ""}, - {"p-new-3", 2000, 100 * MB, 0, ""}, + {"p-new-1", 2000, 100 * MiB, 0, ""}, + {"p-new-2", 2000, 100 * MiB, 0, ""}, + {"p-new-3", 2000, 100 * MiB, 0, ""}, }, scaleUpOptionToChoose: groupSizeChange{groupName: "ng1", sizeChange: 3}, expectedFinalScaleUp: groupSizeChange{groupName: "ng1", sizeChange: 2}, @@ -150,20 +148,20 @@ func TestScaleUpMaxMemoryLimitHit(t *testing.T) { func TestScaleUpMaxMemoryLimitHitWithNotAutoscaledGroup(t *testing.T) { options := defaultOptions - options.MaxMemoryTotal = 1300 * MB + options.MaxMemoryTotal = 1300 * MiB config := &scaleTestConfig{ nodes: []nodeConfig{ - {"n1", 2000, 100 * MB, 0, true, "ng1"}, - {"n2", 4000, 1000 * MB, 0, true, ""}, + {"n1", 2000, 100 * MiB, 0, true, "ng1"}, + {"n2", 4000, 1000 * MiB, 0, true, ""}, }, pods: []podConfig{ {"p1", 1000, 0, 0, "n1"}, {"p2", 3000, 0, 0, "n2"}, }, extraPods: []podConfig{ - {"p-new-1", 2000, 100 * MB, 0, ""}, - {"p-new-2", 2000, 100 * MB, 0, ""}, - {"p-new-3", 2000, 100 * MB, 0, ""}, + {"p-new-1", 2000, 100 * MiB, 0, ""}, + {"p-new-2", 2000, 100 * MiB, 0, ""}, + {"p-new-3", 2000, 100 * MiB, 0, ""}, }, scaleUpOptionToChoose: groupSizeChange{groupName: "ng1", sizeChange: 3}, expectedFinalScaleUp: groupSizeChange{groupName: "ng1", sizeChange: 2}, @@ -178,17 +176,17 @@ func TestScaleUpCapToMaxTotalNodesLimit(t *testing.T) { options.MaxNodesTotal = 3 config := &scaleTestConfig{ nodes: []nodeConfig{ - {"n1", 2000, 100 * MB, 0, true, "ng1"}, - {"n2", 4000, 1000 * MB, 0, true, "ng2"}, + {"n1", 2000, 100 * MiB, 0, true, "ng1"}, + {"n2", 4000, 1000 * MiB, 0, true, "ng2"}, }, pods: []podConfig{ {"p1", 1000, 0, 0, "n1"}, {"p2", 3000, 0, 0, "n2"}, }, extraPods: []podConfig{ - {"p-new-1", 4000, 100 * MB, 0, ""}, - {"p-new-2", 4000, 100 * MB, 0, ""}, - {"p-new-3", 4000, 100 * MB, 0, ""}, + {"p-new-1", 4000, 100 * MiB, 0, ""}, + {"p-new-2", 4000, 100 * MiB, 0, ""}, + {"p-new-3", 4000, 100 * MiB, 0, ""}, }, scaleUpOptionToChoose: groupSizeChange{groupName: "ng2", sizeChange: 3}, expectedFinalScaleUp: groupSizeChange{groupName: "ng2", sizeChange: 1}, @@ -203,17 +201,17 @@ func TestScaleUpCapToMaxTotalNodesLimitWithNotAutoscaledGroup(t *testing.T) { options.MaxNodesTotal = 3 config := &scaleTestConfig{ nodes: []nodeConfig{ - {"n1", 2000, 100 * MB, 0, true, ""}, - {"n2", 4000, 1000 * MB, 0, true, "ng2"}, + {"n1", 2000, 100 * MiB, 0, true, ""}, + {"n2", 4000, 1000 * MiB, 0, true, "ng2"}, }, pods: []podConfig{ {"p1", 1000, 0, 0, "n1"}, {"p2", 3000, 0, 0, "n2"}, }, extraPods: []podConfig{ - {"p-new-1", 4000, 100 * MB, 0, ""}, - {"p-new-2", 4000, 100 * MB, 0, ""}, - {"p-new-3", 4000, 100 * MB, 0, ""}, + {"p-new-1", 4000, 100 * MiB, 0, ""}, + {"p-new-2", 4000, 100 * MiB, 0, ""}, + {"p-new-3", 4000, 100 * MiB, 0, ""}, }, scaleUpOptionToChoose: groupSizeChange{groupName: "ng2", sizeChange: 3}, expectedFinalScaleUp: groupSizeChange{groupName: "ng2", sizeChange: 1}, @@ -228,15 +226,15 @@ func TestWillConsiderGpuAndStandardPoolForPodWhichDoesNotRequireGpu(t *testing.T options.MaxNodesTotal = 100 config := &scaleTestConfig{ nodes: []nodeConfig{ - {"gpu-node-1", 2000, 1000 * MB, 1, true, "gpu-pool"}, - {"std-node-1", 2000, 1000 * MB, 0, true, "std-pool"}, + {"gpu-node-1", 2000, 1000 * MiB, 1, true, "gpu-pool"}, + {"std-node-1", 2000, 1000 * MiB, 0, true, "std-pool"}, }, pods: []podConfig{ - {"gpu-pod-1", 2000, 1000 * MB, 1, "gpu-node-1"}, - {"std-pod-1", 2000, 1000 * MB, 0, "std-node-1"}, + {"gpu-pod-1", 2000, 1000 * MiB, 1, "gpu-node-1"}, + {"std-pod-1", 2000, 1000 * MiB, 0, "std-node-1"}, }, extraPods: []podConfig{ - {"extra-std-pod", 2000, 1000 * MB, 0, ""}, + {"extra-std-pod", 2000, 1000 * MiB, 0, ""}, }, expectedScaleUpOptions: []groupSizeChange{ {groupName: "std-pool", sizeChange: 1}, @@ -255,15 +253,15 @@ func TestWillConsiderOnlyGpuPoolForPodWhichDoesRequiresGpu(t *testing.T) { options.MaxNodesTotal = 100 config := &scaleTestConfig{ nodes: []nodeConfig{ - {"gpu-node-1", 2000, 1000 * MB, 1, true, "gpu-pool"}, - {"std-node-1", 2000, 1000 * MB, 0, true, "std-pool"}, + {"gpu-node-1", 2000, 1000 * MiB, 1, true, "gpu-pool"}, + {"std-node-1", 2000, 1000 * MiB, 0, true, "std-pool"}, }, pods: []podConfig{ - {"gpu-pod-1", 2000, 1000 * MB, 1, "gpu-node-1"}, - {"std-pod-1", 2000, 1000 * MB, 0, "std-node-1"}, + {"gpu-pod-1", 2000, 1000 * MiB, 1, "gpu-node-1"}, + {"std-pod-1", 2000, 1000 * MiB, 0, "std-node-1"}, }, extraPods: []podConfig{ - {"extra-gpu-pod", 2000, 1000 * MB, 1, ""}, + {"extra-gpu-pod", 2000, 1000 * MiB, 1, ""}, }, expectedScaleUpOptions: []groupSizeChange{ {groupName: "gpu-pool", sizeChange: 1}, @@ -281,21 +279,21 @@ func TestWillConsiderAllPoolsWhichFitTwoPodsRequiringGpus(t *testing.T) { options.MaxNodesTotal = 100 config := &scaleTestConfig{ nodes: []nodeConfig{ - {"gpu-1-node-1", 2000, 1000 * MB, 1, true, "gpu-1-pool"}, - {"gpu-2-node-1", 2000, 1000 * MB, 2, true, "gpu-2-pool"}, - {"gpu-4-node-1", 2000, 1000 * MB, 4, true, "gpu-4-pool"}, - {"std-node-1", 2000, 1000 * MB, 0, true, "std-pool"}, + {"gpu-1-node-1", 2000, 1000 * MiB, 1, true, "gpu-1-pool"}, + {"gpu-2-node-1", 2000, 1000 * MiB, 2, true, "gpu-2-pool"}, + {"gpu-4-node-1", 2000, 1000 * MiB, 4, true, "gpu-4-pool"}, + {"std-node-1", 2000, 1000 * MiB, 0, true, "std-pool"}, }, pods: []podConfig{ - {"gpu-pod-1", 2000, 1000 * MB, 1, "gpu-1-node-1"}, - {"gpu-pod-2", 2000, 1000 * MB, 2, "gpu-2-node-1"}, - {"gpu-pod-3", 2000, 1000 * MB, 4, "gpu-4-node-1"}, - {"std-pod-1", 2000, 1000 * MB, 0, "std-node-1"}, + {"gpu-pod-1", 2000, 1000 * MiB, 1, "gpu-1-node-1"}, + {"gpu-pod-2", 2000, 1000 * MiB, 2, "gpu-2-node-1"}, + {"gpu-pod-3", 2000, 1000 * MiB, 4, "gpu-4-node-1"}, + {"std-pod-1", 2000, 1000 * MiB, 0, "std-node-1"}, }, extraPods: []podConfig{ - {"extra-gpu-pod-1", 1, 1 * MB, 1, ""}, // CPU and mem negligible - {"extra-gpu-pod-2", 1, 1 * MB, 1, ""}, // CPU and mem negligible - {"extra-gpu-pod-3", 1, 1 * MB, 1, ""}, // CPU and mem negligible + {"extra-gpu-pod-1", 1, 1 * MiB, 1, ""}, // CPU and mem negligible + {"extra-gpu-pod-2", 1, 1 * MiB, 1, ""}, // CPU and mem negligible + {"extra-gpu-pod-3", 1, 1 * MiB, 1, ""}, // CPU and mem negligible }, expectedScaleUpOptions: []groupSizeChange{ {groupName: "gpu-1-pool", sizeChange: 3}, @@ -421,12 +419,13 @@ func simpleScaleUpTest(t *testing.T, config *scaleTestConfig) { initialNodeConfigs: config.nodes, expectedScaleUpOptions: config.expectedScaleUpOptions, scaleUpOptionToChoose: config.scaleUpOptionToChoose, - t: t, + t: t, } context.ExpanderStrategy = expander - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) - clusterState.UpdateNodes(nodes, time.Now()) + nodeInfos, _ := GetNodeInfosForGroups(nodes, nil, provider, fakeClient, []*extensionsv1.DaemonSet{}, context.PredicateChecker) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) + clusterState.UpdateNodes(nodes, nodeInfos, time.Now()) extraPods := make([]*apiv1.Pod, len(config.extraPods)) for i, p := range config.extraPods { @@ -436,10 +435,10 @@ func simpleScaleUpTest(t *testing.T, config *scaleTestConfig) { processors := ca_processors.TestProcessors() - status, err := ScaleUp(&context, processors, clusterState, extraPods, nodes, []*extensionsv1.DaemonSet{}) - processors.ScaleUpStatusProcessor.Process(&context, status) + scaleUpStatus, err := ScaleUp(&context, processors, clusterState, extraPods, nodes, []*extensionsv1.DaemonSet{}, nodeInfos) + processors.ScaleUpStatusProcessor.Process(&context, scaleUpStatus) assert.NoError(t, err) - assert.True(t, status.ScaledUp) + assert.True(t, scaleUpStatus.WasSuccessful()) expandedGroup := getGroupSizeChangeFromChan(expandedGroups) assert.NotNil(t, expandedGroup, "Expected scale up event") @@ -520,23 +519,25 @@ func TestScaleUpNodeComingNoScale(t *testing.T) { } context := NewScaleTestAutoscalingContext(options, fakeClient, provider) - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) + nodes := []*apiv1.Node{n1, n2} + nodeInfos, _ := GetNodeInfosForGroups(nodes, nil, provider, fakeClient, []*extensionsv1.DaemonSet{}, context.PredicateChecker) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) clusterState.RegisterScaleUp(&clusterstate.ScaleUpRequest{ - NodeGroupName: "ng2", + NodeGroup: provider.GetNodeGroup("ng2"), Increase: 1, Time: time.Now(), ExpectedAddTime: time.Now().Add(5 * time.Minute), }) - clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) + clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, nodeInfos, time.Now()) p3 := BuildTestPod("p-new", 550, 0) processors := ca_processors.TestProcessors() - status, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p3}, []*apiv1.Node{n1, n2}, []*extensionsv1.DaemonSet{}) + scaleUpStatus, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p3}, []*apiv1.Node{n1, n2}, []*extensionsv1.DaemonSet{}, nodeInfos) assert.NoError(t, err) // A node is already coming - no need for scale up. - assert.False(t, status.ScaledUp) + assert.False(t, scaleUpStatus.WasSuccessful()) } func TestScaleUpNodeComingHasScale(t *testing.T) { @@ -575,24 +576,26 @@ func TestScaleUpNodeComingHasScale(t *testing.T) { context := NewScaleTestAutoscalingContext(defaultOptions, fakeClient, provider) - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) + nodes := []*apiv1.Node{n1, n2} + nodeInfos, _ := GetNodeInfosForGroups(nodes, nil, provider, fakeClient, []*extensionsv1.DaemonSet{}, context.PredicateChecker) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) clusterState.RegisterScaleUp(&clusterstate.ScaleUpRequest{ - NodeGroupName: "ng2", + NodeGroup: provider.GetNodeGroup("ng2"), Increase: 1, Time: time.Now(), ExpectedAddTime: time.Now().Add(5 * time.Minute), }) - clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) + clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, nodeInfos, time.Now()) p3 := BuildTestPod("p-new", 550, 0) p4 := BuildTestPod("p-new", 550, 0) processors := ca_processors.TestProcessors() - status, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p3, p4}, []*apiv1.Node{n1, n2}, []*extensionsv1.DaemonSet{}) + scaleUpStatus, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p3, p4}, []*apiv1.Node{n1, n2}, []*extensionsv1.DaemonSet{}, nodeInfos) assert.NoError(t, err) // Two nodes needed but one node is already coming, so it should increase by one. - assert.True(t, status.ScaledUp) + assert.True(t, scaleUpStatus.WasSuccessful()) assert.Equal(t, "ng2-1", getStringFromChan(expandedGroups)) } @@ -636,16 +639,18 @@ func TestScaleUpUnhealthy(t *testing.T) { } context := NewScaleTestAutoscalingContext(options, fakeClient, provider) - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) - clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) + nodes := []*apiv1.Node{n1, n2} + nodeInfos, _ := GetNodeInfosForGroups(nodes, nil, provider, fakeClient, []*extensionsv1.DaemonSet{}, context.PredicateChecker) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) + clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, nodeInfos, time.Now()) p3 := BuildTestPod("p-new", 550, 0) processors := ca_processors.TestProcessors() - status, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p3}, []*apiv1.Node{n1, n2}, []*extensionsv1.DaemonSet{}) + scaleUpStatus, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p3}, []*apiv1.Node{n1, n2}, []*extensionsv1.DaemonSet{}, nodeInfos) assert.NoError(t, err) // Node group is unhealthy. - assert.False(t, status.ScaledUp) + assert.False(t, scaleUpStatus.WasSuccessful()) } func TestScaleUpNoHelp(t *testing.T) { @@ -680,16 +685,18 @@ func TestScaleUpNoHelp(t *testing.T) { } context := NewScaleTestAutoscalingContext(options, fakeClient, provider) - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) - clusterState.UpdateNodes([]*apiv1.Node{n1}, time.Now()) + nodes := []*apiv1.Node{n1} + nodeInfos, _ := GetNodeInfosForGroups(nodes, nil, provider, fakeClient, []*extensionsv1.DaemonSet{}, context.PredicateChecker) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) + clusterState.UpdateNodes([]*apiv1.Node{n1}, nodeInfos, time.Now()) p3 := BuildTestPod("p-new", 500, 0) processors := ca_processors.TestProcessors() - status, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p3}, []*apiv1.Node{n1}, []*extensionsv1.DaemonSet{}) - processors.ScaleUpStatusProcessor.Process(&context, status) + scaleUpStatus, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p3}, []*apiv1.Node{n1}, []*extensionsv1.DaemonSet{}, nodeInfos) + processors.ScaleUpStatusProcessor.Process(&context, scaleUpStatus) assert.NoError(t, err) - assert.False(t, status.ScaledUp) + assert.False(t, scaleUpStatus.WasSuccessful()) var event string select { case event = <-context.Recorder.(*kube_record.FakeRecorder).Events: @@ -755,8 +762,9 @@ func TestScaleUpBalanceGroups(t *testing.T) { } context := NewScaleTestAutoscalingContext(options, fakeClient, provider) - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) - clusterState.UpdateNodes(nodes, time.Now()) + nodeInfos, _ := GetNodeInfosForGroups(nodes, nil, provider, fakeClient, []*extensionsv1.DaemonSet{}, context.PredicateChecker) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) + clusterState.UpdateNodes(nodes, nodeInfos, time.Now()) pods := make([]*apiv1.Pod, 0) for i := 0; i < 2; i++ { @@ -764,10 +772,10 @@ func TestScaleUpBalanceGroups(t *testing.T) { } processors := ca_processors.TestProcessors() - status, typedErr := ScaleUp(&context, processors, clusterState, pods, nodes, []*extensionsv1.DaemonSet{}) + scaleUpStatus, typedErr := ScaleUp(&context, processors, clusterState, pods, nodes, []*extensionsv1.DaemonSet{}, nodeInfos) assert.NoError(t, typedErr) - assert.True(t, status.ScaledUp) + assert.True(t, scaleUpStatus.WasSuccessful()) groupMap := make(map[string]cloudprovider.NodeGroup, 3) for _, group := range provider.NodeGroups() { groupMap[group.Id()] = group @@ -812,15 +820,18 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) { } context := NewScaleTestAutoscalingContext(options, fakeClient, provider) - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff()) processors := ca_processors.TestProcessors() processors.NodeGroupListProcessor = &mockAutoprovisioningNodeGroupListProcessor{t} processors.NodeGroupManager = &mockAutoprovisioningNodeGroupManager{t} - status, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p1}, []*apiv1.Node{}, []*extensionsv1.DaemonSet{}) + nodes := []*apiv1.Node{} + nodeInfos, _ := GetNodeInfosForGroups(nodes, nil, provider, fakeClient, []*extensionsv1.DaemonSet{}, context.PredicateChecker) + + scaleUpStatus, err := ScaleUp(&context, processors, clusterState, []*apiv1.Pod{p1}, []*apiv1.Node{}, []*extensionsv1.DaemonSet{}, nodeInfos) assert.NoError(t, err) - assert.True(t, status.ScaledUp) + assert.True(t, scaleUpStatus.WasSuccessful()) assert.Equal(t, "autoprovisioned-T1", getStringFromChan(createdGroups)) assert.Equal(t, "autoprovisioned-T1-1", getStringFromChan(expandedGroups)) } diff --git a/cluster-autoscaler/core/static_autoscaler.go b/cluster-autoscaler/core/static_autoscaler.go index d90cdb51efbd..338d1f69b356 100644 --- a/cluster-autoscaler/core/static_autoscaler.go +++ b/cluster-autoscaler/core/static_autoscaler.go @@ -24,6 +24,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/expander" "k8s.io/autoscaler/cluster-autoscaler/metrics" ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" @@ -34,7 +35,10 @@ import ( apiv1 "k8s.io/api/core/v1" - "github.com/golang/glog" + "k8s.io/autoscaler/cluster-autoscaler/processors/status" + "k8s.io/autoscaler/cluster-autoscaler/utils/backoff" + "k8s.io/klog" + schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" ) const ( @@ -61,18 +65,28 @@ type StaticAutoscaler struct { scaleDown *ScaleDown processors *ca_processors.AutoscalingProcessors initialized bool + // Caches nodeInfo computed for previously seen nodes + nodeInfoCache map[string]*schedulercache.NodeInfo } // NewStaticAutoscaler creates an instance of Autoscaler filled with provided parameters -func NewStaticAutoscaler(opts config.AutoscalingOptions, predicateChecker *simulator.PredicateChecker, - autoscalingKubeClients *context.AutoscalingKubeClients, processors *ca_processors.AutoscalingProcessors, cloudProvider cloudprovider.CloudProvider, expanderStrategy expander.Strategy) *StaticAutoscaler { - autoscalingContext := context.NewAutoscalingContext(opts, predicateChecker, autoscalingKubeClients, cloudProvider, expanderStrategy) +func NewStaticAutoscaler( + opts config.AutoscalingOptions, + predicateChecker *simulator.PredicateChecker, + autoscalingKubeClients *context.AutoscalingKubeClients, + processors *ca_processors.AutoscalingProcessors, + cloudProvider cloudprovider.CloudProvider, + expanderStrategy expander.Strategy, + estimatorBuilder estimator.EstimatorBuilder, + backoff backoff.Backoff) *StaticAutoscaler { + autoscalingContext := context.NewAutoscalingContext(opts, predicateChecker, autoscalingKubeClients, cloudProvider, expanderStrategy, estimatorBuilder) + clusterStateConfig := clusterstate.ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: opts.MaxTotalUnreadyPercentage, OkTotalUnreadyCount: opts.OkTotalUnreadyCount, MaxNodeProvisionTime: opts.MaxNodeProvisionTime, } - clusterStateRegistry := clusterstate.NewClusterStateRegistry(autoscalingContext.CloudProvider, clusterStateConfig, autoscalingContext.LogRecorder) + clusterStateRegistry := clusterstate.NewClusterStateRegistry(autoscalingContext.CloudProvider, clusterStateConfig, autoscalingContext.LogRecorder, backoff) scaleDown := NewScaleDown(autoscalingContext, clusterStateRegistry) @@ -85,6 +99,7 @@ func NewStaticAutoscaler(opts config.AutoscalingOptions, predicateChecker *simul scaleDown: scaleDown, processors: processors, clusterStateRegistry: clusterStateRegistry, + nodeInfoCache: make(map[string]*schedulercache.NodeInfo), } } @@ -97,7 +112,7 @@ func (a *StaticAutoscaler) cleanUpIfRequired() { // CA can die at any time. Removing taints that might have been left from the previous run. if readyNodes, err := a.ReadyNodeLister().List(); err != nil { - glog.Errorf("Failed to list ready nodes, not cleaning up taints: %v", err) + klog.Errorf("Failed to list ready nodes, not cleaning up taints: %v", err) } else { cleanToBeDeleted(readyNodes, a.AutoscalingContext.ClientSet, a.Recorder) } @@ -114,7 +129,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError scaleDown := a.scaleDown autoscalingContext := a.AutoscalingContext - glog.V(4).Info("Starting main loop") + klog.V(4).Info("Starting main loop") stateUpdateStart := time.Now() allNodes, readyNodes, typedErr := a.obtainNodeLists() @@ -125,12 +140,29 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError return nil } - typedErr = a.updateClusterState(allNodes, currentTime) + daemonsets, err := a.ListerRegistry.DaemonSetLister().List() + if err != nil { + klog.Errorf("Failed to get daemonset list") + return errors.ToAutoscalerError(errors.ApiCallError, err) + } + + nodeInfosForGroups, autoscalerError := GetNodeInfosForGroups( + readyNodes, a.nodeInfoCache, autoscalingContext.CloudProvider, autoscalingContext.ClientSet, daemonsets, autoscalingContext.PredicateChecker) + if err != nil { + return autoscalerError.AddPrefix("failed to build node infos for node groups: ") + } + + typedErr = a.updateClusterState(allNodes, nodeInfosForGroups, currentTime) if typedErr != nil { return typedErr } metrics.UpdateDurationFromStart(metrics.UpdateState, stateUpdateStart) + scaleUpStatus := &status.ScaleUpStatus{Result: status.ScaleUpNotTried} + scaleUpStatusProcessorAlreadyCalled := false + scaleDownStatus := &status.ScaleDownStatus{Result: status.ScaleDownNotTried} + scaleDownStatusProcessorAlreadyCalled := false + defer func() { // Update status information when the loop is done (regardless of reason) if autoscalingContext.WriteStatusConfigMap { @@ -139,9 +171,18 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError status.GetReadableString(), a.AutoscalingContext.LogRecorder) } + // This deferred processor execution allows the processors to handle a situation when a scale-(up|down) + // wasn't even attempted because e.g. the iteration exited earlier. + if !scaleUpStatusProcessorAlreadyCalled && a.processors != nil && a.processors.ScaleUpStatusProcessor != nil { + a.processors.ScaleUpStatusProcessor.Process(a.AutoscalingContext, scaleUpStatus) + } + if !scaleDownStatusProcessorAlreadyCalled && a.processors != nil && a.processors.ScaleDownStatusProcessor != nil { + a.processors.ScaleDownStatusProcessor.Process(a.AutoscalingContext, scaleDownStatus) + } + err := a.processors.AutoscalingStatusProcessor.Process(a.AutoscalingContext, a.clusterStateRegistry, currentTime) if err != nil { - glog.Errorf("AutoscalingStatusProcessor error: %v.", err) + klog.Errorf("AutoscalingStatusProcessor error: %v.", err) } }() @@ -149,26 +190,26 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError // master. unregisteredNodes := a.clusterStateRegistry.GetUnregisteredNodes() if len(unregisteredNodes) > 0 { - glog.V(1).Infof("%d unregistered nodes present", len(unregisteredNodes)) + klog.V(1).Infof("%d unregistered nodes present", len(unregisteredNodes)) removedAny, err := removeOldUnregisteredNodes(unregisteredNodes, autoscalingContext, currentTime, autoscalingContext.LogRecorder) // There was a problem with removing unregistered nodes. Retry in the next loop. if err != nil { if removedAny { - glog.Warningf("Some unregistered nodes were removed, but got error: %v", err) + klog.Warningf("Some unregistered nodes were removed, but got error: %v", err) } else { - glog.Errorf("Failed to remove unregistered nodes: %v", err) + klog.Errorf("Failed to remove unregistered nodes: %v", err) } return errors.ToAutoscalerError(errors.CloudProviderError, err) } // Some nodes were removed. Let's skip this iteration, the next one should be better. if removedAny { - glog.V(0).Infof("Some unregistered nodes were removed, skipping iteration") + klog.V(0).Infof("Some unregistered nodes were removed, skipping iteration") return nil } } if !a.clusterStateRegistry.IsClusterHealthy() { - glog.Warning("Cluster is not ready for autoscaling") + klog.Warning("Cluster is not ready for autoscaling") scaleDown.CleanUpUnneededNodes() autoscalingContext.LogRecorder.Eventf(apiv1.EventTypeWarning, "ClusterUnhealthy", "Cluster is unhealthy") return nil @@ -179,11 +220,11 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError // TODO: andrewskim - add protection for ready AWS nodes. fixedSomething, err := fixNodeGroupSize(autoscalingContext, a.clusterStateRegistry, currentTime) if err != nil { - glog.Errorf("Failed to fix node group sizes: %v", err) + klog.Errorf("Failed to fix node group sizes: %v", err) return errors.ToAutoscalerError(errors.CloudProviderError, err) } if fixedSomething { - glog.V(0).Infof("Some node group target size was fixed, skipping the iteration") + klog.V(0).Infof("Some node group target size was fixed, skipping the iteration") return nil } @@ -191,20 +232,20 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError allUnschedulablePods, err := unschedulablePodLister.List() if err != nil { - glog.Errorf("Failed to list unscheduled pods: %v", err) + klog.Errorf("Failed to list unscheduled pods: %v", err) return errors.ToAutoscalerError(errors.ApiCallError, err) } metrics.UpdateUnschedulablePodsCount(len(allUnschedulablePods)) allScheduled, err := scheduledPodLister.List() if err != nil { - glog.Errorf("Failed to list scheduled pods: %v", err) + klog.Errorf("Failed to list scheduled pods: %v", err) return errors.ToAutoscalerError(errors.ApiCallError, err) } allUnschedulablePods, allScheduled, err = a.processors.PodListProcessor.Process(a.AutoscalingContext, allUnschedulablePods, allScheduled, allNodes) if err != nil { - glog.Errorf("Failed to process pod list: %v", err) + klog.Errorf("Failed to process pod list: %v", err) return errors.ToAutoscalerError(errors.InternalError, err) } @@ -233,54 +274,57 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError // Such pods don't require scale up but should be considered during scale down. unschedulablePods, unschedulableWaitingForLowerPriorityPreemption := FilterOutExpendableAndSplit(unschedulablePodsWithoutTPUs, a.ExpendablePodsPriorityCutoff) - glog.V(4).Infof("Filtering out schedulables") + klog.V(4).Infof("Filtering out schedulables") filterOutSchedulableStart := time.Now() unschedulablePodsToHelp := FilterOutSchedulable(unschedulablePods, readyNodes, allScheduled, unschedulableWaitingForLowerPriorityPreemption, a.PredicateChecker, a.ExpendablePodsPriorityCutoff) metrics.UpdateDurationFromStart(metrics.FilterOutSchedulable, filterOutSchedulableStart) if len(unschedulablePodsToHelp) != len(unschedulablePods) { - glog.V(2).Info("Schedulable pods present") + klog.V(2).Info("Schedulable pods present") scaleDownForbidden = true } else { - glog.V(4).Info("No schedulable pods") + klog.V(4).Info("No schedulable pods") } + // finally, filter out pods that are too "young" to safely be considered for a scale-up (delay is configurable) + unschedulablePodsToHelp = a.filterOutYoungPods(unschedulablePodsToHelp, currentTime) + if len(unschedulablePodsToHelp) == 0 { - glog.V(1).Info("No unschedulable pods") + scaleUpStatus.Result = status.ScaleUpNotNeeded + klog.V(1).Info("No unschedulable pods") } else if a.MaxNodesTotal > 0 && len(readyNodes) >= a.MaxNodesTotal { - glog.V(1).Info("Max total nodes in cluster reached") + scaleUpStatus.Result = status.ScaleUpNoOptionsAvailable + klog.V(1).Info("Max total nodes in cluster reached") } else if allPodsAreNew(unschedulablePodsToHelp, currentTime) { // The assumption here is that these pods have been created very recently and probably there // is more pods to come. In theory we could check the newest pod time but then if pod were created // slowly but at the pace of 1 every 2 seconds then no scale up would be triggered for long time. // We also want to skip a real scale down (just like if the pods were handled). scaleDownForbidden = true - glog.V(1).Info("Unschedulable pods are very new, waiting one iteration for more") + scaleUpStatus.Result = status.ScaleUpInCooldown + klog.V(1).Info("Unschedulable pods are very new, waiting one iteration for more") } else { - daemonsets, err := a.ListerRegistry.DaemonSetLister().List() - if err != nil { - glog.Errorf("Failed to get daemonset list") - return errors.ToAutoscalerError(errors.ApiCallError, err) - } - scaleUpStart := time.Now() metrics.UpdateLastTime(metrics.ScaleUp, scaleUpStart) - scaleUpStatus, typedErr := ScaleUp(autoscalingContext, a.processors, a.clusterStateRegistry, unschedulablePodsToHelp, readyNodes, daemonsets) + scaleUpStatus, typedErr := ScaleUp(autoscalingContext, a.processors, a.clusterStateRegistry, unschedulablePodsToHelp, readyNodes, daemonsets, nodeInfosForGroups) metrics.UpdateDurationFromStart(metrics.ScaleUp, scaleUpStart) - if typedErr != nil { - glog.Errorf("Failed to scale up: %v", typedErr) - return typedErr - } if a.processors != nil && a.processors.ScaleUpStatusProcessor != nil { a.processors.ScaleUpStatusProcessor.Process(autoscalingContext, scaleUpStatus) + scaleUpStatusProcessorAlreadyCalled = true + } + + if typedErr != nil { + klog.Errorf("Failed to scale up: %v", typedErr) + return typedErr } - if scaleUpStatus.ScaledUp { + if scaleUpStatus.Result == status.ScaleUpSuccessful { a.lastScaleUpTime = currentTime // No scale down in this iteration. + scaleDownStatus.Result = status.ScaleDownInCooldown return nil } } @@ -288,45 +332,51 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError if a.ScaleDownEnabled { pdbs, err := pdbLister.List() if err != nil { - glog.Errorf("Failed to list pod disruption budgets: %v", err) + scaleDownStatus.Result = status.ScaleDownError + klog.Errorf("Failed to list pod disruption budgets: %v", err) return errors.ToAutoscalerError(errors.ApiCallError, err) } unneededStart := time.Now() - glog.V(4).Infof("Calculating unneeded nodes") + klog.V(4).Infof("Calculating unneeded nodes") scaleDown.CleanUp(currentTime) potentiallyUnneeded := getPotentiallyUnneededNodes(autoscalingContext, allNodes) typedErr := scaleDown.UpdateUnneededNodes(allNodes, potentiallyUnneeded, append(allScheduled, unschedulableWaitingForLowerPriorityPreemption...), currentTime, pdbs) if typedErr != nil { - glog.Errorf("Failed to scale down: %v", typedErr) + scaleDownStatus.Result = status.ScaleDownError + klog.Errorf("Failed to scale down: %v", typedErr) return typedErr } metrics.UpdateDurationFromStart(metrics.FindUnneeded, unneededStart) - if glog.V(4) { + if klog.V(4) { for key, val := range scaleDown.unneededNodes { - glog.Infof("%s is unneeded since %s duration %s", key, val.String(), currentTime.Sub(val).String()) + klog.Infof("%s is unneeded since %s duration %s", key, val.String(), currentTime.Sub(val).String()) } } - // In dry run only utilization is updated - calculateUnneededOnly := scaleDownForbidden || + scaleDownInCooldown := scaleDownForbidden || a.lastScaleUpTime.Add(a.ScaleDownDelayAfterAdd).After(currentTime) || a.lastScaleDownFailTime.Add(a.ScaleDownDelayAfterFailure).After(currentTime) || - a.lastScaleDownDeleteTime.Add(a.ScaleDownDelayAfterDelete).After(currentTime) || - scaleDown.nodeDeleteStatus.IsDeleteInProgress() + a.lastScaleDownDeleteTime.Add(a.ScaleDownDelayAfterDelete).After(currentTime) + // In dry run only utilization is updated + calculateUnneededOnly := scaleDownInCooldown || scaleDown.nodeDeleteStatus.IsDeleteInProgress() - glog.V(4).Infof("Scale down status: unneededOnly=%v lastScaleUpTime=%s "+ + klog.V(4).Infof("Scale down status: unneededOnly=%v lastScaleUpTime=%s "+ "lastScaleDownDeleteTime=%v lastScaleDownFailTime=%s scaleDownForbidden=%v isDeleteInProgress=%v", calculateUnneededOnly, a.lastScaleUpTime, a.lastScaleDownDeleteTime, a.lastScaleDownFailTime, scaleDownForbidden, scaleDown.nodeDeleteStatus.IsDeleteInProgress()) - if !calculateUnneededOnly { - glog.V(4).Infof("Starting scale down") + if scaleDownInCooldown { + scaleDownStatus.Result = status.ScaleDownInCooldown + } else if scaleDown.nodeDeleteStatus.IsDeleteInProgress() { + scaleDownStatus.Result = status.ScaleDownInProgress + } else { + klog.V(4).Infof("Starting scale down") // We want to delete unneeded Node Groups only if there was no recent scale up, // and there is no current delete in progress and there was no recent errors. @@ -334,22 +384,45 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError scaleDownStart := time.Now() metrics.UpdateLastTime(metrics.ScaleDown, scaleDownStart) - result, typedErr := scaleDown.TryToScaleDown(allNodes, allScheduled, pdbs, currentTime) + scaleDownStatus, typedErr := scaleDown.TryToScaleDown(allNodes, allScheduled, pdbs, currentTime) metrics.UpdateDurationFromStart(metrics.ScaleDown, scaleDownStart) + if scaleDownStatus.Result == status.ScaleDownNodeDeleted { + a.lastScaleDownDeleteTime = currentTime + a.clusterStateRegistry.Recalculate() + } + + if a.processors != nil && a.processors.ScaleDownStatusProcessor != nil { + a.processors.ScaleDownStatusProcessor.Process(autoscalingContext, scaleDownStatus) + scaleDownStatusProcessorAlreadyCalled = true + } + if typedErr != nil { - glog.Errorf("Failed to scale down: %v", typedErr) + klog.Errorf("Failed to scale down: %v", err) a.lastScaleDownFailTime = currentTime return typedErr } - if result == ScaleDownNodeDeleted { - a.lastScaleDownDeleteTime = currentTime - } } } return nil } +// don't consider pods newer than newPodScaleUpDelay seconds old as unschedulable +func (a *StaticAutoscaler) filterOutYoungPods(allUnschedulablePods []*apiv1.Pod, currentTime time.Time) []*apiv1.Pod { + var oldUnschedulablePods []*apiv1.Pod + newPodScaleUpDelay := a.AutoscalingOptions.NewPodScaleUpDelay + for _, pod := range allUnschedulablePods { + podAge := currentTime.Sub(pod.CreationTimestamp.Time) + if podAge > newPodScaleUpDelay { + oldUnschedulablePods = append(oldUnschedulablePods, pod) + } else { + klog.V(3).Infof("Pod %s is %.3f seconds old, too new to consider unschedulable", pod.Name, podAge.Seconds()) + + } + } + return oldUnschedulablePods +} + // ExitCleanUp performs all necessary clean-ups when the autoscaler's exiting. func (a *StaticAutoscaler) ExitCleanUp() { a.processors.CleanUp() @@ -363,12 +436,12 @@ func (a *StaticAutoscaler) ExitCleanUp() { func (a *StaticAutoscaler) obtainNodeLists() ([]*apiv1.Node, []*apiv1.Node, errors.AutoscalerError) { allNodes, err := a.AllNodeLister().List() if err != nil { - glog.Errorf("Failed to list all nodes: %v", err) + klog.Errorf("Failed to list all nodes: %v", err) return nil, nil, errors.ToAutoscalerError(errors.ApiCallError, err) } readyNodes, err := a.ReadyNodeLister().List() if err != nil { - glog.Errorf("Failed to list ready nodes: %v", err) + klog.Errorf("Failed to list ready nodes: %v", err) return nil, nil, errors.ToAutoscalerError(errors.ApiCallError, err) } @@ -397,16 +470,16 @@ func (a *StaticAutoscaler) actOnEmptyCluster(allNodes, readyNodes []*apiv1.Node, return false } -func (a *StaticAutoscaler) updateClusterState(allNodes []*apiv1.Node, currentTime time.Time) errors.AutoscalerError { +func (a *StaticAutoscaler) updateClusterState(allNodes []*apiv1.Node, nodeInfosForGroups map[string]*schedulercache.NodeInfo, currentTime time.Time) errors.AutoscalerError { err := a.AutoscalingContext.CloudProvider.Refresh() if err != nil { - glog.Errorf("Failed to refresh cloud provider config: %v", err) + klog.Errorf("Failed to refresh cloud provider config: %v", err) return errors.ToAutoscalerError(errors.CloudProviderError, err) } - err = a.clusterStateRegistry.UpdateNodes(allNodes, currentTime) + err = a.clusterStateRegistry.UpdateNodes(allNodes, nodeInfosForGroups, currentTime) if err != nil { - glog.Errorf("Failed to update node registry: %v", err) + klog.Errorf("Failed to update node registry: %v", err) a.scaleDown.CleanUpUnneededNodes() return errors.ToAutoscalerError(errors.CloudProviderError, err) } @@ -416,7 +489,7 @@ func (a *StaticAutoscaler) updateClusterState(allNodes []*apiv1.Node, currentTim } func (a *StaticAutoscaler) onEmptyCluster(status string, emitEvent bool) { - glog.Warningf(status) + klog.Warningf(status) a.scaleDown.CleanUpUnneededNodes() UpdateEmptyClusterStateMetrics() if a.AutoscalingContext.WriteStatusConfigMap { diff --git a/cluster-autoscaler/core/static_autoscaler_test.go b/cluster-autoscaler/core/static_autoscaler_test.go index 12efb1d9d534..d9e2007706a3 100644 --- a/cluster-autoscaler/core/static_autoscaler_test.go +++ b/cluster-autoscaler/core/static_autoscaler_test.go @@ -36,9 +36,9 @@ import ( "k8s.io/client-go/kubernetes/fake" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - "github.com/golang/glog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "k8s.io/klog" ) type nodeListerMock struct { @@ -82,7 +82,7 @@ type onScaleUpMock struct { } func (m *onScaleUpMock) ScaleUp(id string, delta int) error { - glog.Infof("Scale up: %v %v", id, delta) + klog.Infof("Scale up: %v %v", id, delta) args := m.Called(id, delta) return args.Error(0) } @@ -92,7 +92,7 @@ type onScaleDownMock struct { } func (m *onScaleDownMock) ScaleDown(id string, name string) error { - glog.Infof("Scale down: %v %v", id, name) + klog.Infof("Scale down: %v %v", id, name) args := m.Called(id, name) return args.Error(0) } @@ -102,7 +102,7 @@ type onNodeGroupCreateMock struct { } func (m *onNodeGroupCreateMock) Create(id string) error { - glog.Infof("Create group: %v", id) + klog.Infof("Create group: %v", id) args := m.Called(id) return args.Error(0) } @@ -112,7 +112,7 @@ type onNodeGroupDeleteMock struct { } func (m *onNodeGroupDeleteMock) Delete(id string) error { - glog.Infof("Delete group: %v", id) + klog.Infof("Delete group: %v", id) args := m.Called(id) return args.Error(0) } @@ -176,8 +176,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) { MaxNodeProvisionTime: 10 * time.Second, } - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder) - clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder, newBackoff()) sd := NewScaleDown(&context, clusterState) @@ -196,6 +195,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) { allNodeListerMock.On("List").Return([]*apiv1.Node{n1}, nil).Once() scheduledPodMock.On("List").Return([]*apiv1.Pod{p1}, nil).Once() unschedulablePodMock.On("List").Return([]*apiv1.Pod{p2}, nil).Once() + daemonSetListerMock.On("List").Return([]*extensionsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() err := autoscaler.RunOnce(time.Now()) @@ -222,6 +222,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) { allNodeListerMock.On("List").Return([]*apiv1.Node{n1, n2}, nil).Once() scheduledPodMock.On("List").Return([]*apiv1.Pod{p1}, nil).Once() unschedulablePodMock.On("List").Return([]*apiv1.Pod{}, nil).Once() + daemonSetListerMock.On("List").Return([]*extensionsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() provider.AddNode("ng1", n2) @@ -237,6 +238,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) { allNodeListerMock.On("List").Return([]*apiv1.Node{n1, n2}, nil).Once() scheduledPodMock.On("List").Return([]*apiv1.Pod{p1}, nil).Once() unschedulablePodMock.On("List").Return([]*apiv1.Pod{}, nil).Once() + daemonSetListerMock.On("List").Return([]*extensionsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() onScaleDownMock.On("ScaleDown", "ng1", "n2").Return(nil).Once() @@ -251,6 +253,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) { allNodeListerMock.On("List").Return([]*apiv1.Node{n1, n2}, nil).Once() scheduledPodMock.On("List").Return([]*apiv1.Pod{p1}, nil).Once() unschedulablePodMock.On("List").Return([]*apiv1.Pod{p2}, nil).Once() + daemonSetListerMock.On("List").Return([]*extensionsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() provider.AddNode("ng1", n3) @@ -264,6 +267,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) { // Remove unregistered nodes. readyNodeListerMock.On("List").Return([]*apiv1.Node{n1, n2}, nil).Once() allNodeListerMock.On("List").Return([]*apiv1.Node{n1, n2}, nil).Once() + daemonSetListerMock.On("List").Return([]*extensionsv1.DaemonSet{}, nil).Once() onScaleDownMock.On("ScaleDown", "ng1", "n3").Return(nil).Once() err = autoscaler.RunOnce(time.Now().Add(5 * time.Hour)) @@ -354,8 +358,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) { OkTotalUnreadyCount: 0, MaxNodeProvisionTime: 10 * time.Second, } - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder) - clusterState.UpdateNodes([]*apiv1.Node{n1}, time.Now()) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder, newBackoff()) sd := NewScaleDown(&context, clusterState) @@ -392,6 +395,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) { scheduledPodMock.On("List").Return([]*apiv1.Pod{p1}, nil).Once() unschedulablePodMock.On("List").Return([]*apiv1.Pod{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() + daemonSetListerMock.On("List").Return([]*extensionsv1.DaemonSet{}, nil).Once() onNodeGroupDeleteMock.On("Delete", "autoprovisioned-TN1").Return(nil).Once() provider.AddAutoprovisionedNodeGroup("autoprovisioned-TN2", 0, 10, 1, "TN1") @@ -408,6 +412,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) { scheduledPodMock.On("List").Return([]*apiv1.Pod{p1}, nil).Once() unschedulablePodMock.On("List").Return([]*apiv1.Pod{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() + daemonSetListerMock.On("List").Return([]*extensionsv1.DaemonSet{}, nil).Once() onNodeGroupDeleteMock.On("Delete", "autoprovisioned-"+ "TN1").Return(nil).Once() onScaleDownMock.On("ScaleDown", "autoprovisioned-TN2", "n2").Return(nil).Once() @@ -480,12 +485,15 @@ func TestStaticAutoscalerRunOnceWithALongUnregisteredNode(t *testing.T) { OkTotalUnreadyCount: 1, MaxNodeProvisionTime: 10 * time.Second, } - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder, newBackoff()) // broken node detected as unregistered - clusterState.UpdateNodes([]*apiv1.Node{n1}, now) + + nodes := []*apiv1.Node{n1} + //nodeInfos, _ := GetNodeInfosForGroups(nodes, provider, listerRegistry, []*extensionsv1.DaemonSet{}, context.PredicateChecker) + clusterState.UpdateNodes(nodes, nil, now) // broken node failed to register in time - clusterState.UpdateNodes([]*apiv1.Node{n1}, later) + clusterState.UpdateNodes([]*apiv1.Node{n1}, nil, later) sd := NewScaleDown(&context, clusterState) @@ -518,6 +526,7 @@ func TestStaticAutoscalerRunOnceWithALongUnregisteredNode(t *testing.T) { readyNodeListerMock.On("List").Return([]*apiv1.Node{n1, n2}, nil).Once() allNodeListerMock.On("List").Return([]*apiv1.Node{n1, n2}, nil).Once() onScaleDownMock.On("ScaleDown", "ng1", "broken").Return(nil).Once() + daemonSetListerMock.On("List").Return([]*extensionsv1.DaemonSet{}, nil).Once() err = autoscaler.RunOnce(later.Add(2 * time.Hour)) waitForDeleteToFinish(t, autoscaler.scaleDown) @@ -613,8 +622,7 @@ func TestStaticAutoscalerRunOncePodsWithPriorities(t *testing.T) { MaxNodeProvisionTime: 10 * time.Second, } - clusterState := clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder) - clusterState.UpdateNodes([]*apiv1.Node{n1, n2}, time.Now()) + clusterState := clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder, newBackoff()) sd := NewScaleDown(&context, clusterState) @@ -645,6 +653,7 @@ func TestStaticAutoscalerRunOncePodsWithPriorities(t *testing.T) { allNodeListerMock.On("List").Return([]*apiv1.Node{n1, n2, n3}, nil).Once() scheduledPodMock.On("List").Return([]*apiv1.Pod{p1, p2, p3}, nil).Once() unschedulablePodMock.On("List").Return([]*apiv1.Pod{p4, p5}, nil).Once() + daemonSetListerMock.On("List").Return([]*extensionsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() ng2.SetTargetSize(2) @@ -659,6 +668,7 @@ func TestStaticAutoscalerRunOncePodsWithPriorities(t *testing.T) { allNodeListerMock.On("List").Return([]*apiv1.Node{n1, n2, n3}, nil).Once() scheduledPodMock.On("List").Return([]*apiv1.Pod{p1, p2, p3, p4}, nil).Once() unschedulablePodMock.On("List").Return([]*apiv1.Pod{p5}, nil).Once() + daemonSetListerMock.On("List").Return([]*extensionsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() onScaleDownMock.On("ScaleDown", "ng1", "n1").Return(nil).Once() diff --git a/cluster-autoscaler/core/utils.go b/cluster-autoscaler/core/utils.go index 99ee048ad623..83006745fb90 100644 --- a/cluster-autoscaler/core/utils.go +++ b/cluster-autoscaler/core/utils.go @@ -44,7 +44,7 @@ import ( kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -136,7 +136,7 @@ func FilterOutSchedulable(unschedulableCandidates []*apiv1.Pod, nodes []*apiv1.N // Hello, ugly hack. I wish you weren't here. var predicateError *simulator.PredicateError if err != nil { - predicateError = simulator.NewPredicateError("FitsAny", err, nil) + predicateError = simulator.NewPredicateError("FitsAny", err, nil, nil) unschedulablePods = append(unschedulablePods, pod) } else { glogx.V(4).UpTo(loggingQuota).Infof("Pod %s marked as unschedulable can be scheduled on %s. Ignoring in scale up.", pod.Name, nodeName) @@ -156,10 +156,10 @@ func FilterOutExpendableAndSplit(unschedulableCandidates []*apiv1.Pod, expendabl waitingForLowerPriorityPreemption := []*apiv1.Pod{} for _, pod := range unschedulableCandidates { if pod.Spec.Priority != nil && int(*pod.Spec.Priority) < expendablePodsPriorityCutoff { - glog.V(4).Infof("Pod %s has priority below %d (%d) and will scheduled when enough resources is free. Ignoring in scale up.", pod.Name, expendablePodsPriorityCutoff, *pod.Spec.Priority) + klog.V(4).Infof("Pod %s has priority below %d (%d) and will scheduled when enough resources is free. Ignoring in scale up.", pod.Name, expendablePodsPriorityCutoff, *pod.Spec.Priority) } else if annot, found := pod.Annotations[scheduler_util.NominatedNodeAnnotationKey]; found && len(annot) > 0 { waitingForLowerPriorityPreemption = append(waitingForLowerPriorityPreemption, pod) - glog.V(4).Infof("Pod %s will be scheduled after low prioity pods are preempted on %s. Ignoring in scale up.", pod.Name, annot) + klog.V(4).Infof("Pod %s will be scheduled after low prioity pods are preempted on %s. Ignoring in scale up.", pod.Name, annot) } else { unschedulableNonExpendable = append(unschedulableNonExpendable, pod) } @@ -188,7 +188,7 @@ func CheckPodsSchedulableOnNode(context *context.AutoscalingContext, pods []*api // Check if pod isn't repeated before overwriting result for it. if _, repeated := schedulingErrors[pod]; repeated { // This shouldn't really happen. - glog.Warningf("Pod %v appears multiple time on pods list, will only count it once in scale-up simulation", pod) + klog.Warningf("Pod %v appears multiple time on pods list, will only count it once in scale-up simulation", pod) } // Try to get result from cache. err, found := podSchedulable.get(pod) @@ -205,7 +205,7 @@ func CheckPodsSchedulableOnNode(context *context.AutoscalingContext, pods []*api schedulingErrors[pod] = err if err != nil { // Always log for the first pod in a controller. - glog.V(2).Infof("Pod %s can't be scheduled on %s, predicate failed: %v", pod.Name, nodeGroupId, err.VerboseError()) + klog.V(2).Infof("Pod %s can't be scheduled on %s, predicate failed: %v", pod.Name, nodeGroupId, err.VerboseError()) } } } @@ -218,34 +218,35 @@ func CheckPodsSchedulableOnNode(context *context.AutoscalingContext, pods []*api // TODO(mwielgus): This returns map keyed by url, while most code (including scheduler) uses node.Name for a key. // // TODO(mwielgus): Review error policy - sometimes we may continue with partial errors. -func GetNodeInfosForGroups(nodes []*apiv1.Node, cloudProvider cloudprovider.CloudProvider, kubeClient kube_client.Interface, +func GetNodeInfosForGroups(nodes []*apiv1.Node, nodeInfoCache map[string]*schedulercache.NodeInfo, cloudProvider cloudprovider.CloudProvider, kubeClient kube_client.Interface, daemonsets []*extensionsv1.DaemonSet, predicateChecker *simulator.PredicateChecker) (map[string]*schedulercache.NodeInfo, errors.AutoscalerError) { result := make(map[string]*schedulercache.NodeInfo) + seenGroups := make(map[string]bool) // processNode returns information whether the nodeTemplate was generated and if there was an error. - processNode := func(node *apiv1.Node) (bool, errors.AutoscalerError) { + processNode := func(node *apiv1.Node) (bool, string, errors.AutoscalerError) { nodeGroup, err := cloudProvider.NodeGroupForNode(node) if err != nil { - return false, errors.ToAutoscalerError(errors.CloudProviderError, err) + return false, "", errors.ToAutoscalerError(errors.CloudProviderError, err) } if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() { - return false, nil + return false, "", nil } id := nodeGroup.Id() if _, found := result[id]; !found { // Build nodeInfo. nodeInfo, err := simulator.BuildNodeInfoForNode(node, kubeClient) if err != nil { - return false, err + return false, "", err } sanitizedNodeInfo, err := sanitizeNodeInfo(nodeInfo, id) if err != nil { - return false, err + return false, "", err } result[id] = sanitizedNodeInfo - return true, nil + return true, id, nil } - return false, nil + return false, "", nil } for _, node := range nodes { @@ -253,45 +254,59 @@ func GetNodeInfosForGroups(nodes []*apiv1.Node, cloudProvider cloudprovider.Clou if !kube_util.IsNodeReadyAndSchedulable(node) { continue } - _, typedErr := processNode(node) + added, id, typedErr := processNode(node) if typedErr != nil { return map[string]*schedulercache.NodeInfo{}, typedErr } + if added && nodeInfoCache != nil { + if nodeInfoCopy, err := deepCopyNodeInfo(result[id]); err == nil { + nodeInfoCache[id] = nodeInfoCopy + } + } } for _, nodeGroup := range cloudProvider.NodeGroups() { id := nodeGroup.Id() + seenGroups[id] = true if _, found := result[id]; found { continue } + // No good template, check cache of previously running nodes. + if nodeInfoCache != nil { + if nodeInfo, found := nodeInfoCache[id]; found { + if nodeInfoCopy, err := deepCopyNodeInfo(nodeInfo); err == nil { + result[id] = nodeInfoCopy + continue + } + } + } + // No good template, trying to generate one. This is called only if there are no // working nodes in the node groups. By default CA tries to use a real-world example. - baseNodeInfo, err := nodeGroup.TemplateNodeInfo() + nodeInfo, err := GetNodeInfoFromTemplate(nodeGroup, daemonsets, predicateChecker) if err != nil { if err == cloudprovider.ErrNotImplemented { continue } else { - glog.Errorf("Unable to build proper template node for %s: %v", id, err) - return map[string]*schedulercache.NodeInfo{}, errors.ToAutoscalerError( - errors.CloudProviderError, err) + klog.Errorf("Unable to build proper template node for %s: %v", id, err) + return map[string]*schedulercache.NodeInfo{}, errors.ToAutoscalerError(errors.CloudProviderError, err) } } - pods := daemonset.GetDaemonSetPodsForNode(baseNodeInfo, daemonsets, predicateChecker) - pods = append(pods, baseNodeInfo.Pods()...) - fullNodeInfo := schedulercache.NewNodeInfo(pods...) - fullNodeInfo.SetNode(baseNodeInfo.Node()) - sanitizedNodeInfo, typedErr := sanitizeNodeInfo(fullNodeInfo, id) - if typedErr != nil { - return map[string]*schedulercache.NodeInfo{}, typedErr + result[id] = nodeInfo + } + + // Remove invalid node groups from cache + for id := range nodeInfoCache { + if _, ok := seenGroups[id]; !ok { + delete(nodeInfoCache, id) } - result[id] = sanitizedNodeInfo } // Last resort - unready/unschedulable nodes. for _, node := range nodes { // Allowing broken nodes if !kube_util.IsNodeReadyAndSchedulable(node) { - added, typedErr := processNode(node) + added, _, typedErr := processNode(node) if typedErr != nil { return map[string]*schedulercache.NodeInfo{}, typedErr } @@ -301,7 +316,7 @@ func GetNodeInfosForGroups(nodes []*apiv1.Node, cloudProvider cloudprovider.Clou errors.CloudProviderError, err) } if added { - glog.Warningf("Built template for %s based on unready/unschedulable node %s", nodeGroup.Id(), node.Name) + klog.Warningf("Built template for %s based on unready/unschedulable node %s", nodeGroup.Id(), node.Name) } } } @@ -309,6 +324,25 @@ func GetNodeInfosForGroups(nodes []*apiv1.Node, cloudProvider cloudprovider.Clou return result, nil } +// GetNodeInfoFromTemplate returns NodeInfo object built base on TemplateNodeInfo returned by NodeGroup.TemplateNodeInfo(). +func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*extensionsv1.DaemonSet, predicateChecker *simulator.PredicateChecker) (*schedulercache.NodeInfo, errors.AutoscalerError) { + id := nodeGroup.Id() + baseNodeInfo, err := nodeGroup.TemplateNodeInfo() + if err != nil { + return nil, errors.ToAutoscalerError(errors.CloudProviderError, err) + } + + pods := daemonset.GetDaemonSetPodsForNode(baseNodeInfo, daemonsets, predicateChecker) + pods = append(pods, baseNodeInfo.Pods()...) + fullNodeInfo := schedulercache.NewNodeInfo(pods...) + fullNodeInfo.SetNode(baseNodeInfo.Node()) + sanitizedNodeInfo, typedErr := sanitizeNodeInfo(fullNodeInfo, id) + if typedErr != nil { + return nil, typedErr + } + return sanitizedNodeInfo, nil +} + // FilterOutNodesFromNotAutoscaledGroups return subset of input nodes for which cloud provider does not // return autoscaled node group. func FilterOutNodesFromNotAutoscaledGroups(nodes []*apiv1.Node, cloudProvider cloudprovider.CloudProvider) ([]*apiv1.Node, errors.AutoscalerError) { @@ -326,6 +360,20 @@ func FilterOutNodesFromNotAutoscaledGroups(nodes []*apiv1.Node, cloudProvider cl return result, nil } +func deepCopyNodeInfo(nodeInfo *schedulercache.NodeInfo) (*schedulercache.NodeInfo, errors.AutoscalerError) { + newPods := make([]*apiv1.Pod, 0) + for _, pod := range nodeInfo.Pods() { + newPods = append(newPods, pod.DeepCopy()) + } + + // Build a new node info. + newNodeInfo := schedulercache.NewNodeInfo(newPods...) + if err := newNodeInfo.SetNode(nodeInfo.Node().DeepCopy()); err != nil { + return nil, errors.ToAutoscalerError(errors.InternalError, err) + } + return newNodeInfo, nil +} + func sanitizeNodeInfo(nodeInfo *schedulercache.NodeInfo, nodeGroupName string) (*schedulercache.NodeInfo, errors.AutoscalerError) { // Sanitize node name. sanitizedNode, err := sanitizeTemplateNode(nodeInfo.Node(), nodeGroupName) @@ -368,9 +416,9 @@ func sanitizeTemplateNode(node *apiv1.Node, nodeGroup string) (*apiv1.Node, erro // template node. switch taint.Key { case ReschedulerTaintKey: - glog.V(4).Infof("Removing rescheduler taint when creating template from node %s", node.Name) + klog.V(4).Infof("Removing rescheduler taint when creating template from node %s", node.Name) case deletetaint.ToBeDeletedTaint: - glog.V(4).Infof("Removing autoscaler taint when creating template from node %s", node.Name) + klog.V(4).Infof("Removing autoscaler taint when creating template from node %s", node.Name) default: newTaints = append(newTaints, taint) } @@ -385,32 +433,34 @@ func removeOldUnregisteredNodes(unregisteredNodes []clusterstate.UnregisteredNod removedAny := false for _, unregisteredNode := range unregisteredNodes { if unregisteredNode.UnregisteredSince.Add(context.MaxNodeProvisionTime).Before(currentTime) { - glog.V(0).Infof("Removing unregistered node %v", unregisteredNode.Node.Name) + klog.V(0).Infof("Removing unregistered node %v", unregisteredNode.Node.Name) nodeGroup, err := context.CloudProvider.NodeGroupForNode(unregisteredNode.Node) if err != nil { - glog.Warningf("Failed to get node group for %s: %v", unregisteredNode.Node.Name, err) + klog.Warningf("Failed to get node group for %s: %v", unregisteredNode.Node.Name, err) return removedAny, err } if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() { - glog.Warningf("No node group for node %s, skipping", unregisteredNode.Node.Name) + klog.Warningf("No node group for node %s, skipping", unregisteredNode.Node.Name) continue } size, err := nodeGroup.TargetSize() if err != nil { - glog.Warningf("Failed to get node group size, err: %v", err) + klog.Warningf("Failed to get node group size, err: %v", err) continue } if nodeGroup.MinSize() >= size { - glog.Warningf("Failed to remove node %s: node group min size reached, skipping unregistered node removal", unregisteredNode.Node.Name) + klog.Warningf("Failed to remove node %s: node group min size reached, skipping unregistered node removal", unregisteredNode.Node.Name) continue } - logRecorder.Eventf(apiv1.EventTypeNormal, "DeleteUnregistered", - "Removing unregistered node %v", unregisteredNode.Node.Name) err = nodeGroup.DeleteNodes([]*apiv1.Node{unregisteredNode.Node}) if err != nil { - glog.Warningf("Failed to remove node %s: %v", unregisteredNode.Node.Name, err) + klog.Warningf("Failed to remove node %s: %v", unregisteredNode.Node.Name, err) + logRecorder.Eventf(apiv1.EventTypeWarning, "DeleteUnregisteredFailed", + "Failed to remove node %s: %v", unregisteredNode.Node.Name, err) return removedAny, err } + logRecorder.Eventf(apiv1.EventTypeNormal, "DeleteUnregistered", + "Removed unregistered node %v", unregisteredNode.Node.Name) removedAny = true } } @@ -430,7 +480,7 @@ func fixNodeGroupSize(context *context.AutoscalingContext, clusterStateRegistry if incorrectSize.FirstObserved.Add(context.MaxNodeProvisionTime).Before(currentTime) { delta := incorrectSize.CurrentSize - incorrectSize.ExpectedSize if delta < 0 { - glog.V(0).Infof("Decreasing size of %s, expected=%d current=%d delta=%d", nodeGroup.Id(), + klog.V(0).Infof("Decreasing size of %s, expected=%d current=%d delta=%d", nodeGroup.Id(), incorrectSize.ExpectedSize, incorrectSize.CurrentSize, delta) @@ -455,20 +505,20 @@ func getPotentiallyUnneededNodes(context *context.AutoscalingContext, nodes []*a for _, node := range nodes { nodeGroup, err := context.CloudProvider.NodeGroupForNode(node) if err != nil { - glog.Warningf("Error while checking node group for %s: %v", node.Name, err) + klog.Warningf("Error while checking node group for %s: %v", node.Name, err) continue } if nodeGroup == nil || reflect.ValueOf(nodeGroup).IsNil() { - glog.V(4).Infof("Skipping %s - no node group config", node.Name) + klog.V(4).Infof("Skipping %s - no node group config", node.Name) continue } size, found := nodeGroupSize[nodeGroup.Id()] if !found { - glog.Errorf("Error while checking node group size %s: group size not found", nodeGroup.Id()) + klog.Errorf("Error while checking node group size %s: group size not found", nodeGroup.Id()) continue } if size <= nodeGroup.MinSize() { - glog.V(1).Infof("Skipping %s - node group min size reached", node.Name) + klog.V(1).Infof("Skipping %s - node group min size reached", node.Name) continue } result = append(result, node) @@ -511,7 +561,7 @@ func ConfigurePredicateCheckerForLoop(unschedulablePods []*apiv1.Pod, schedulabl } predicateChecker.SetAffinityPredicateEnabled(podsWithAffinityFound) if !podsWithAffinityFound { - glog.V(1).Info("No pod using affinity / antiaffinity found in cluster, disabling affinity predicate for this loop") + klog.V(1).Info("No pod using affinity / antiaffinity found in cluster, disabling affinity predicate for this loop") } } @@ -540,7 +590,7 @@ func getNodeGroupSizeMap(cloudProvider cloudprovider.CloudProvider) map[string]i for _, nodeGroup := range cloudProvider.NodeGroups() { size, err := nodeGroup.TargetSize() if err != nil { - glog.Errorf("Error while checking node group size %s: %v", nodeGroup.Id(), err) + klog.Errorf("Error while checking node group size %s: %v", nodeGroup.Id(), err) continue } nodeGroupSize[nodeGroup.Id()] = size diff --git a/cluster-autoscaler/core/utils_test.go b/cluster-autoscaler/core/utils_test.go index 6bd807760506..52e076b03fac 100644 --- a/cluster-autoscaler/core/utils_test.go +++ b/cluster-autoscaler/core/utils_test.go @@ -45,6 +45,8 @@ import ( schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" ) +const MiB = 1024 * 1024 + func TestPodSchedulableMap(t *testing.T) { rc1 := apiv1.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ @@ -298,44 +300,35 @@ func TestFilterSchedulablePodsForNode(t *testing.T) { } func TestGetNodeInfosForGroups(t *testing.T) { - n1 := BuildTestNode("n1", 100, 1000) - SetNodeReadyState(n1, true, time.Now()) - n2 := BuildTestNode("n2", 1000, 1000) - SetNodeReadyState(n2, true, time.Now()) - n3 := BuildTestNode("n3", 1000, 1000) - SetNodeReadyState(n3, false, time.Now()) - n4 := BuildTestNode("n4", 1000, 1000) - SetNodeReadyState(n4, false, time.Now()) - - p1 := BuildTestPod("p1", 80, 0) - p2 := BuildTestPod("p2", 800, 0) - p3 := BuildTestPod("p3", 800, 0) - p1.Spec.NodeName = "n1" - p2.Spec.NodeName = "n2" - p3.Spec.NodeName = "n4" - - tn := BuildTestNode("T1-abc", 4000, 1000000) + ready1 := BuildTestNode("n1", 1000, 1000) + SetNodeReadyState(ready1, true, time.Now()) + ready2 := BuildTestNode("n2", 2000, 2000) + SetNodeReadyState(ready2, true, time.Now()) + unready3 := BuildTestNode("n3", 3000, 3000) + SetNodeReadyState(unready3, false, time.Now()) + unready4 := BuildTestNode("n4", 4000, 4000) + SetNodeReadyState(unready4, false, time.Now()) + + tn := BuildTestNode("tn", 5000, 5000) tni := schedulercache.NewNodeInfo() tni.SetNode(tn) // Cloud provider with TemplateNodeInfo implemented. - provider1 := testprovider.NewTestAutoprovisioningCloudProvider(nil, nil, - nil, nil, - nil, map[string]*schedulercache.NodeInfo{"n3": tni, "n4": tni}) - provider1.AddNodeGroup("n1", 1, 10, 1) // Nodegroup with ready node. - provider1.AddNodeGroup("n2", 1, 10, 1) // Nodegroup with ready and unready node. - provider1.AddNodeGroup("n3", 1, 10, 1) // Nodegroup with unready node. - provider1.AddNodeGroup("n4", 0, 1000, 0) // Nodegroup without nodes. - provider1.AddNode("n1", n1) - provider1.AddNode("n2", n2) - provider1.AddNode("n2", n3) - provider1.AddNode("n3", n4) + provider1 := testprovider.NewTestAutoprovisioningCloudProvider( + nil, nil, nil, nil, nil, + map[string]*schedulercache.NodeInfo{"ng3": tni, "ng4": tni}) + provider1.AddNodeGroup("ng1", 1, 10, 1) // Nodegroup with ready node. + provider1.AddNode("ng1", ready1) + provider1.AddNodeGroup("ng2", 1, 10, 1) // Nodegroup with ready and unready node. + provider1.AddNode("ng2", ready2) + provider1.AddNode("ng2", unready3) + provider1.AddNodeGroup("ng3", 1, 10, 1) // Nodegroup with unready node. + provider1.AddNode("ng3", unready4) + provider1.AddNodeGroup("ng4", 0, 1000, 0) // Nodegroup without nodes. // Cloud provider with TemplateNodeInfo not implemented. - provider2 := testprovider.NewTestAutoprovisioningCloudProvider(nil, nil, - nil, nil, - nil, nil) - provider2.AddNodeGroup("n5", 1, 10, 1) // Nodegroup without nodes. + provider2 := testprovider.NewTestAutoprovisioningCloudProvider(nil, nil, nil, nil, nil, nil) + provider2.AddNodeGroup("ng5", 1, 10, 1) // Nodegroup without nodes. fakeClient := &fake.Clientset{} fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { @@ -344,26 +337,157 @@ func TestGetNodeInfosForGroups(t *testing.T) { predicateChecker := simulator.NewTestPredicateChecker() - res, err := GetNodeInfosForGroups([]*apiv1.Node{n1, n2, n3, n4}, provider1, fakeClient, - []*extensionsv1.DaemonSet{}, predicateChecker) + res, err := GetNodeInfosForGroups([]*apiv1.Node{unready4, unready3, ready2, ready1}, nil, + provider1, fakeClient, []*extensionsv1.DaemonSet{}, predicateChecker) assert.NoError(t, err) assert.Equal(t, 4, len(res)) - _, found := res["n1"] + info, found := res["ng1"] assert.True(t, found) - _, found = res["n2"] + assertEqualNodeCapacities(t, ready1, info.Node()) + info, found = res["ng2"] assert.True(t, found) - _, found = res["n3"] + assertEqualNodeCapacities(t, ready2, info.Node()) + info, found = res["ng3"] assert.True(t, found) - _, found = res["n4"] + assertEqualNodeCapacities(t, tn, info.Node()) + info, found = res["ng4"] assert.True(t, found) + assertEqualNodeCapacities(t, tn, info.Node()) // Test for a nodegroup without nodes and TemplateNodeInfo not implemented by cloud proivder - res, err = GetNodeInfosForGroups([]*apiv1.Node{}, provider2, fakeClient, + res, err = GetNodeInfosForGroups([]*apiv1.Node{}, nil, provider2, fakeClient, []*extensionsv1.DaemonSet{}, predicateChecker) assert.NoError(t, err) assert.Equal(t, 0, len(res)) } +func TestGetNodeInfosForGroupsCache(t *testing.T) { + ready1 := BuildTestNode("n1", 1000, 1000) + SetNodeReadyState(ready1, true, time.Now()) + ready2 := BuildTestNode("n2", 2000, 2000) + SetNodeReadyState(ready2, true, time.Now()) + unready3 := BuildTestNode("n3", 3000, 3000) + SetNodeReadyState(unready3, false, time.Now()) + unready4 := BuildTestNode("n4", 4000, 4000) + SetNodeReadyState(unready4, false, time.Now()) + ready5 := BuildTestNode("n5", 5000, 5000) + SetNodeReadyState(ready5, true, time.Now()) + ready6 := BuildTestNode("n6", 6000, 6000) + SetNodeReadyState(ready6, true, time.Now()) + + tn := BuildTestNode("tn", 10000, 10000) + tni := schedulercache.NewNodeInfo() + tni.SetNode(tn) + + lastDeletedGroup := "" + onDeleteGroup := func(id string) error { + lastDeletedGroup = id + return nil + } + + // Cloud provider with TemplateNodeInfo implemented. + provider1 := testprovider.NewTestAutoprovisioningCloudProvider( + nil, nil, nil, onDeleteGroup, nil, + map[string]*schedulercache.NodeInfo{"ng3": tni, "ng4": tni}) + provider1.AddNodeGroup("ng1", 1, 10, 1) // Nodegroup with ready node. + provider1.AddNode("ng1", ready1) + provider1.AddNodeGroup("ng2", 1, 10, 1) // Nodegroup with ready and unready node. + provider1.AddNode("ng2", ready2) + provider1.AddNode("ng2", unready3) + provider1.AddNodeGroup("ng3", 1, 10, 1) // Nodegroup with unready node (and 1 previously ready node). + provider1.AddNode("ng3", unready4) + provider1.AddNode("ng3", ready5) + provider1.AddNodeGroup("ng4", 0, 1000, 0) // Nodegroup without nodes (and 1 previously ready node). + provider1.AddNode("ng4", ready6) + + fakeClient := &fake.Clientset{} + fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { + return true, &apiv1.PodList{Items: []apiv1.Pod{}}, nil + }) + + predicateChecker := simulator.NewTestPredicateChecker() + + nodeInfoCache := make(map[string]*schedulercache.NodeInfo) + + // Fill cache + res, err := GetNodeInfosForGroups([]*apiv1.Node{unready4, unready3, ready2, ready1}, nodeInfoCache, + provider1, fakeClient, []*extensionsv1.DaemonSet{}, predicateChecker) + assert.NoError(t, err) + // Check results + assert.Equal(t, 4, len(res)) + info, found := res["ng1"] + assert.True(t, found) + assertEqualNodeCapacities(t, ready1, info.Node()) + info, found = res["ng2"] + assert.True(t, found) + assertEqualNodeCapacities(t, ready2, info.Node()) + info, found = res["ng3"] + assert.True(t, found) + assertEqualNodeCapacities(t, tn, info.Node()) + info, found = res["ng4"] + assert.True(t, found) + assertEqualNodeCapacities(t, tn, info.Node()) + // Check cache + cachedInfo, found := nodeInfoCache["ng1"] + assert.True(t, found) + assertEqualNodeCapacities(t, ready1, cachedInfo.Node()) + cachedInfo, found = nodeInfoCache["ng2"] + assert.True(t, found) + assertEqualNodeCapacities(t, ready2, cachedInfo.Node()) + cachedInfo, found = nodeInfoCache["ng3"] + assert.False(t, found) + cachedInfo, found = nodeInfoCache["ng4"] + assert.False(t, found) + + // Invalidate part of cache in two different ways + provider1.DeleteNodeGroup("ng1") + provider1.GetNodeGroup("ng3").Delete() + assert.Equal(t, "ng3", lastDeletedGroup) + + // Check cache with all nodes removed + res, err = GetNodeInfosForGroups([]*apiv1.Node{}, nodeInfoCache, + provider1, fakeClient, []*extensionsv1.DaemonSet{}, predicateChecker) + assert.NoError(t, err) + // Check results + assert.Equal(t, 2, len(res)) + info, found = res["ng2"] + assert.True(t, found) + assertEqualNodeCapacities(t, ready2, info.Node()) + // Check ng4 result and cache + info, found = res["ng4"] + assert.True(t, found) + assertEqualNodeCapacities(t, tn, info.Node()) + // Check cache + cachedInfo, found = nodeInfoCache["ng2"] + assert.True(t, found) + assertEqualNodeCapacities(t, ready2, cachedInfo.Node()) + cachedInfo, found = nodeInfoCache["ng4"] + assert.False(t, found) + + // Fill cache manually + infoNg4Node6 := schedulercache.NewNodeInfo() + err2 := infoNg4Node6.SetNode(ready6.DeepCopy()) + assert.NoError(t, err2) + nodeInfoCache = map[string]*schedulercache.NodeInfo{"ng4": infoNg4Node6} + // Check if cache was used + res, err = GetNodeInfosForGroups([]*apiv1.Node{ready1, ready2}, nodeInfoCache, + provider1, fakeClient, []*extensionsv1.DaemonSet{}, predicateChecker) + assert.NoError(t, err) + assert.Equal(t, 2, len(res)) + info, found = res["ng2"] + assert.True(t, found) + assertEqualNodeCapacities(t, ready2, info.Node()) + info, found = res["ng4"] + assert.True(t, found) + assertEqualNodeCapacities(t, ready6, info.Node()) +} + +func assertEqualNodeCapacities(t *testing.T, expected, actual *apiv1.Node) { + t.Helper() + assert.Equal(t, getNodeResource(expected, apiv1.ResourceCPU), getNodeResource(actual, apiv1.ResourceCPU), "CPU should be the same") + assert.Equal(t, getNodeResource(expected, apiv1.ResourceMemory), getNodeResource(actual, apiv1.ResourceMemory), "Memory should be the same") +} + func TestRemoveOldUnregisteredNodes(t *testing.T) { deletedNodes := make(chan string, 10) @@ -386,8 +510,8 @@ func TestRemoveOldUnregisteredNodes(t *testing.T) { clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }, fakeLogRecorder) - err := clusterState.UpdateNodes([]*apiv1.Node{ng1_1}, now.Add(-time.Hour)) + }, fakeLogRecorder, newBackoff()) + err := clusterState.UpdateNodes([]*apiv1.Node{ng1_1}, nil, now.Add(-time.Hour)) assert.NoError(t, err) context := &context.AutoscalingContext{ @@ -430,7 +554,7 @@ func TestSanitizeLabels(t *testing.T) { oldNode := BuildTestNode("ng1-1", 1000, 1000) oldNode.Labels = map[string]string{ kubeletapis.LabelHostname: "abc", - "x": "y", + "x": "y", } node, err := sanitizeTemplateNode(oldNode, "bzium") assert.NoError(t, err) @@ -483,8 +607,8 @@ func TestRemoveFixNodeTargetSize(t *testing.T) { clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{ MaxTotalUnreadyPercentage: 10, OkTotalUnreadyCount: 1, - }, fakeLogRecorder) - err := clusterState.UpdateNodes([]*apiv1.Node{ng1_1}, now.Add(-time.Hour)) + }, fakeLogRecorder, newBackoff()) + err := clusterState.UpdateNodes([]*apiv1.Node{ng1_1}, nil, now.Add(-time.Hour)) assert.NoError(t, err) context := &context.AutoscalingContext{ @@ -584,19 +708,19 @@ func TestConfigurePredicateCheckerForLoop(t *testing.T) { } func TestGetNodeResource(t *testing.T) { - node := BuildTestNode("n1", 1000, 2*MB) + node := BuildTestNode("n1", 1000, 2*MiB) cores := getNodeResource(node, apiv1.ResourceCPU) assert.Equal(t, int64(1), cores) memory := getNodeResource(node, apiv1.ResourceMemory) - assert.Equal(t, int64(2*MB), memory) + assert.Equal(t, int64(2*MiB), memory) unknownResourceValue := getNodeResource(node, "unknown resource") assert.Equal(t, int64(0), unknownResourceValue) // if we have no resources in capacity we expect getNodeResource to return 0 - nodeWithMissingCapacity := BuildTestNode("n1", 1000, 2*MB) + nodeWithMissingCapacity := BuildTestNode("n1", 1000, 2*MiB) nodeWithMissingCapacity.Status.Capacity = apiv1.ResourceList{} cores = getNodeResource(nodeWithMissingCapacity, apiv1.ResourceCPU) @@ -606,7 +730,7 @@ func TestGetNodeResource(t *testing.T) { assert.Equal(t, int64(0), memory) // if we have negative values in resources we expect getNodeResource to return 0 - nodeWithNegativeCapacity := BuildTestNode("n1", -1000, -2*MB) + nodeWithNegativeCapacity := BuildTestNode("n1", -1000, -2*MiB) nodeWithNegativeCapacity.Status.Capacity = apiv1.ResourceList{} cores = getNodeResource(nodeWithNegativeCapacity, apiv1.ResourceCPU) @@ -618,14 +742,14 @@ func TestGetNodeResource(t *testing.T) { } func TestGetNodeCoresAndMemory(t *testing.T) { - node := BuildTestNode("n1", 2000, 2048*MB) + node := BuildTestNode("n1", 2000, 2048*MiB) cores, memory := getNodeCoresAndMemory(node) assert.Equal(t, int64(2), cores) - assert.Equal(t, int64(2048*MB), memory) + assert.Equal(t, int64(2048*MiB), memory) // if we have no cpu/memory defined in capacity we expect getNodeCoresAndMemory to return 0s - nodeWithMissingCapacity := BuildTestNode("n1", 1000, 2*MB) + nodeWithMissingCapacity := BuildTestNode("n1", 1000, 2*MiB) nodeWithMissingCapacity.Status.Capacity = apiv1.ResourceList{} cores, memory = getNodeCoresAndMemory(nodeWithMissingCapacity) diff --git a/cluster-autoscaler/estimator/basic_estimator.go b/cluster-autoscaler/estimator/basic_estimator.go new file mode 100644 index 000000000000..235ea42e9263 --- /dev/null +++ b/cluster-autoscaler/estimator/basic_estimator.go @@ -0,0 +1,153 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package estimator + +import ( + "bytes" + "fmt" + "math" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/klog" + schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" +) + +const basicEstimatorDeprecationMessage = "WARNING: basic estimator is deprecated. It will be removed in Cluster Autoscaler 1.5." + +// BasicNodeEstimator estimates the number of needed nodes to handle the given amount of pods. +// It will never overestimate the number of nodes but is quite likely to provide a number that +// is too small. +// +// Deprecated. +// TODO(aleksandra-malinowska): remove this in 1.5. +type BasicNodeEstimator struct { + cpuSum resource.Quantity + memorySum resource.Quantity + portSum map[int32]int + FittingPods map[*apiv1.Pod]struct{} +} + +// NewBasicNodeEstimator builds BasicNodeEstimator. +func NewBasicNodeEstimator() *BasicNodeEstimator { + klog.Warning(basicEstimatorDeprecationMessage) + return &BasicNodeEstimator{ + portSum: make(map[int32]int), + FittingPods: make(map[*apiv1.Pod]struct{}), + } +} + +// Add adds Pod to the estimation. +func (basicEstimator *BasicNodeEstimator) Add(pod *apiv1.Pod) error { + ports := make(map[int32]struct{}) + for _, container := range pod.Spec.Containers { + if request, ok := container.Resources.Requests[apiv1.ResourceCPU]; ok { + basicEstimator.cpuSum.Add(request) + } + if request, ok := container.Resources.Requests[apiv1.ResourceMemory]; ok { + basicEstimator.memorySum.Add(request) + } + for _, port := range container.Ports { + if port.HostPort > 0 { + ports[port.HostPort] = struct{}{} + } + } + } + for port := range ports { + if sum, ok := basicEstimator.portSum[port]; ok { + basicEstimator.portSum[port] = sum + 1 + } else { + basicEstimator.portSum[port] = 1 + } + } + basicEstimator.FittingPods[pod] = struct{}{} + return nil +} + +func maxInt(a, b int) int { + if a > b { + return a + } + return b +} + +// GetDebug returns debug information about the current state of BasicNodeEstimator +func (basicEstimator *BasicNodeEstimator) GetDebug() string { + var buffer bytes.Buffer + buffer.WriteString("Resources needed:\n") + buffer.WriteString(fmt.Sprintf("CPU: %s\n", basicEstimator.cpuSum.String())) + buffer.WriteString(fmt.Sprintf("Mem: %s\n", basicEstimator.memorySum.String())) + for port, count := range basicEstimator.portSum { + buffer.WriteString(fmt.Sprintf("Port %d: %d\n", port, count)) + } + return buffer.String() +} + +// Estimate estimates the number needed of nodes of the given shape. +func (basicEstimator *BasicNodeEstimator) Estimate(pods []*apiv1.Pod, nodeInfo *schedulercache.NodeInfo, upcomingNodes []*schedulercache.NodeInfo) int { + for _, pod := range pods { + basicEstimator.Add(pod) + } + + result := 0 + resources := apiv1.ResourceList{} + for _, node := range upcomingNodes { + cpu := resources[apiv1.ResourceCPU] + cpu.Add(node.Node().Status.Capacity[apiv1.ResourceCPU]) + resources[apiv1.ResourceCPU] = cpu + + mem := resources[apiv1.ResourceMemory] + mem.Add(node.Node().Status.Capacity[apiv1.ResourceMemory]) + resources[apiv1.ResourceMemory] = mem + + pods := resources[apiv1.ResourcePods] + pods.Add(node.Node().Status.Capacity[apiv1.ResourcePods]) + resources[apiv1.ResourcePods] = pods + } + + node := nodeInfo.Node() + if cpuCapacity, ok := node.Status.Capacity[apiv1.ResourceCPU]; ok { + comingCpu := resources[apiv1.ResourceCPU] + prop := int(math.Ceil(float64( + basicEstimator.cpuSum.MilliValue()-comingCpu.MilliValue()) / + float64(cpuCapacity.MilliValue()))) + + result = maxInt(result, prop) + } + if memCapacity, ok := node.Status.Capacity[apiv1.ResourceMemory]; ok { + comingMem := resources[apiv1.ResourceMemory] + prop := int(math.Ceil(float64( + basicEstimator.memorySum.Value()-comingMem.Value()) / + float64(memCapacity.Value()))) + result = maxInt(result, prop) + } + if podCapacity, ok := node.Status.Capacity[apiv1.ResourcePods]; ok { + comingPods := resources[apiv1.ResourcePods] + prop := int(math.Ceil(float64(basicEstimator.GetCount()-int(comingPods.Value())) / + float64(podCapacity.Value()))) + result = maxInt(result, prop) + } + for _, count := range basicEstimator.portSum { + result = maxInt(result, count-len(upcomingNodes)) + } + return result +} + +// GetCount returns number of pods included in the estimation. +func (basicEstimator *BasicNodeEstimator) GetCount() int { + return len(basicEstimator.FittingPods) +} diff --git a/cluster-autoscaler/estimator/basic_estimator_test.go b/cluster-autoscaler/estimator/basic_estimator_test.go new file mode 100644 index 000000000000..46c56378f145 --- /dev/null +++ b/cluster-autoscaler/estimator/basic_estimator_test.go @@ -0,0 +1,152 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package estimator + +import ( + "testing" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/autoscaler/cluster-autoscaler/utils/units" + schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + + "github.com/stretchr/testify/assert" +) + +func makePod(cpuPerPod, memoryPerPod int64) *apiv1.Pod { + return &apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(cpuPerPod, resource.DecimalSI), + apiv1.ResourceMemory: *resource.NewQuantity(memoryPerPod, resource.DecimalSI), + }, + }, + }, + }, + }, + } +} + +func TestEstimate(t *testing.T) { + cpuPerPod := int64(500) + memoryPerPod := int64(1000 * units.MiB) + pod := makePod(cpuPerPod, memoryPerPod) + + pods := []*apiv1.Pod{} + for i := 0; i < 5; i++ { + podCopy := *pod + pods = append(pods, &podCopy) + } + + node := &apiv1.Node{ + Status: apiv1.NodeStatus{ + Capacity: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(3*cpuPerPod, resource.DecimalSI), + apiv1.ResourceMemory: *resource.NewQuantity(2*memoryPerPod, resource.DecimalSI), + apiv1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI), + }, + }, + } + nodeInfo := schedulercache.NewNodeInfo() + nodeInfo.SetNode(node) + + estimator := NewBasicNodeEstimator() + estimate := estimator.Estimate(pods, nodeInfo, []*schedulercache.NodeInfo{}) + + // Check result. + assert.Equal(t, 3, estimate) + + // Check internal state of estimator. + assert.Equal(t, int64(500*5), estimator.cpuSum.MilliValue()) + assert.Equal(t, int64(5*memoryPerPod), estimator.memorySum.Value()) + assert.Equal(t, 5, estimator.GetCount()) + assert.Contains(t, estimator.GetDebug(), "CPU") +} + +func TestEstimateWithComing(t *testing.T) { + cpuPerPod := int64(500) + memoryPerPod := int64(1000 * units.MiB) + + pod := makePod(cpuPerPod, memoryPerPod) + pods := []*apiv1.Pod{} + for i := 0; i < 5; i++ { + podCopy := *pod + pods = append(pods, &podCopy) + } + + node := &apiv1.Node{ + Status: apiv1.NodeStatus{ + Capacity: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(3*cpuPerPod, resource.DecimalSI), + apiv1.ResourceMemory: *resource.NewQuantity(2*memoryPerPod, resource.DecimalSI), + apiv1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI), + }, + }, + } + node.Status.Allocatable = node.Status.Capacity + nodeInfo := schedulercache.NewNodeInfo() + nodeInfo.SetNode(node) + + estimator := NewBasicNodeEstimator() + estimate := estimator.Estimate(pods, nodeInfo, []*schedulercache.NodeInfo{nodeInfo, nodeInfo}) + + // Check result. + assert.Equal(t, 1, estimate) + + // Check internal state of estimator. + assert.Contains(t, estimator.GetDebug(), "CPU") + assert.Equal(t, int64(500*5), estimator.cpuSum.MilliValue()) + assert.Equal(t, int64(5*memoryPerPod), estimator.memorySum.Value()) + assert.Equal(t, 5, estimator.GetCount()) + +} + +func TestEstimateWithPorts(t *testing.T) { + cpuPerPod := int64(500) + memoryPerPod := int64(1000 * units.MiB) + + pod := makePod(cpuPerPod, memoryPerPod) + pod.Spec.Containers[0].Ports = []apiv1.ContainerPort{ + { + HostPort: 5555, + }, + } + + pods := []*apiv1.Pod{} + for i := 0; i < 5; i++ { + pods = append(pods, pod) + } + node := &apiv1.Node{ + Status: apiv1.NodeStatus{ + Capacity: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(3*cpuPerPod, resource.DecimalSI), + apiv1.ResourceMemory: *resource.NewQuantity(2*memoryPerPod, resource.DecimalSI), + apiv1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI), + }, + }, + } + nodeInfo := schedulercache.NewNodeInfo() + nodeInfo.SetNode(node) + + estimator := NewBasicNodeEstimator() + estimate := estimator.Estimate(pods, nodeInfo, []*schedulercache.NodeInfo{}) + assert.Contains(t, estimator.GetDebug(), "CPU") + assert.Equal(t, 5, estimate) +} diff --git a/cluster-autoscaler/estimator/binpacking_estimator.go b/cluster-autoscaler/estimator/binpacking_estimator.go index 0dcf8b37bea2..6fbce8106a2c 100644 --- a/cluster-autoscaler/estimator/binpacking_estimator.go +++ b/cluster-autoscaler/estimator/binpacking_estimator.go @@ -51,7 +51,7 @@ func NewBinpackingNodeEstimator(predicateChecker *simulator.PredicateChecker) *B // It is assumed that all pods from the given list can fit to nodeTemplate. // Returns the number of nodes needed to accommodate all pods from the list. func (estimator *BinpackingNodeEstimator) Estimate(pods []*apiv1.Pod, nodeTemplate *schedulercache.NodeInfo, - comingNodes []*schedulercache.NodeInfo) int { + upcomingNodes []*schedulercache.NodeInfo) int { podInfos := calculatePodScore(pods, nodeTemplate) sort.Slice(podInfos, func(i, j int) bool { return podInfos[i].score > podInfos[j].score }) @@ -66,7 +66,7 @@ func (estimator *BinpackingNodeEstimator) Estimate(pods []*apiv1.Pod, nodeTempla } newNodes := make([]*schedulercache.NodeInfo, 0) - newNodes = append(newNodes, comingNodes...) + newNodes = append(newNodes, upcomingNodes...) for _, podInfo := range podInfos { found := false @@ -81,7 +81,7 @@ func (estimator *BinpackingNodeEstimator) Estimate(pods []*apiv1.Pod, nodeTempla newNodes = append(newNodes, nodeWithPod(nodeTemplate, podInfo.pod)) } } - return len(newNodes) - len(comingNodes) + return len(newNodes) - len(upcomingNodes) } // Calculates score for all pods and returns podInfo structure. diff --git a/cluster-autoscaler/estimator/binpacking_estimator_test.go b/cluster-autoscaler/estimator/binpacking_estimator_test.go index 6a5463371a84..e71fc1fe49f3 100644 --- a/cluster-autoscaler/estimator/binpacking_estimator_test.go +++ b/cluster-autoscaler/estimator/binpacking_estimator_test.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/simulator" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" + "k8s.io/autoscaler/cluster-autoscaler/utils/units" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" "github.com/stretchr/testify/assert" @@ -33,7 +34,7 @@ func TestBinpackingEstimate(t *testing.T) { estimator := NewBinpackingNodeEstimator(simulator.NewTestPredicateChecker()) cpuPerPod := int64(350) - memoryPerPod := int64(1000 * 1024 * 1024) + memoryPerPod := int64(1000 * units.MiB) pod := makePod(cpuPerPod, memoryPerPod) pods := make([]*apiv1.Pod, 0) @@ -62,7 +63,7 @@ func TestBinpackingEstimateComingNodes(t *testing.T) { estimator := NewBinpackingNodeEstimator(simulator.NewTestPredicateChecker()) cpuPerPod := int64(350) - memoryPerPod := int64(1000 * 1024 * 1024) + memoryPerPod := int64(1000 * units.MiB) pod := makePod(cpuPerPod, memoryPerPod) pods := make([]*apiv1.Pod, 0) @@ -92,7 +93,7 @@ func TestBinpackingEstimateWithPorts(t *testing.T) { estimator := NewBinpackingNodeEstimator(simulator.NewTestPredicateChecker()) cpuPerPod := int64(200) - memoryPerPod := int64(1000 * 1024 * 1024) + memoryPerPod := int64(1000 * units.MiB) pod := makePod(cpuPerPod, memoryPerPod) pod.Spec.Containers[0].Ports = []apiv1.ContainerPort{ { diff --git a/cluster-autoscaler/estimator/estimator.go b/cluster-autoscaler/estimator/estimator.go index 364f172ea3df..54818e17764b 100644 --- a/cluster-autoscaler/estimator/estimator.go +++ b/cluster-autoscaler/estimator/estimator.go @@ -17,12 +17,11 @@ limitations under the License. package estimator import ( - "bytes" "fmt" - "math" apiv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/autoscaler/cluster-autoscaler/simulator" + "k8s.io/klog" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" ) @@ -33,126 +32,35 @@ const ( BinpackingEstimatorName = "binpacking" ) -// AvailableEstimators is a list of available estimators. -var AvailableEstimators = []string{BasicEstimatorName, BinpackingEstimatorName} - -// BasicNodeEstimator estimates the number of needed nodes to handle the given amount of pods. -// It will never overestimate the number of nodes but is quite likely to provide a number that -// is too small. -type BasicNodeEstimator struct { - cpuSum resource.Quantity - memorySum resource.Quantity - portSum map[int32]int - FittingPods map[*apiv1.Pod]struct{} -} - -// NewBasicNodeEstimator builds BasicNodeEstimator. -func NewBasicNodeEstimator() *BasicNodeEstimator { - return &BasicNodeEstimator{ - portSum: make(map[int32]int), - FittingPods: make(map[*apiv1.Pod]struct{}), - } -} - -// Add adds Pod to the estimation. -func (basicEstimator *BasicNodeEstimator) Add(pod *apiv1.Pod) error { - ports := make(map[int32]struct{}) - for _, container := range pod.Spec.Containers { - if request, ok := container.Resources.Requests[apiv1.ResourceCPU]; ok { - basicEstimator.cpuSum.Add(request) - } - if request, ok := container.Resources.Requests[apiv1.ResourceMemory]; ok { - basicEstimator.memorySum.Add(request) - } - for _, port := range container.Ports { - if port.HostPort > 0 { - ports[port.HostPort] = struct{}{} - } - } - } - for port := range ports { - if sum, ok := basicEstimator.portSum[port]; ok { - basicEstimator.portSum[port] = sum + 1 - } else { - basicEstimator.portSum[port] = 1 - } - } - basicEstimator.FittingPods[pod] = struct{}{} - return nil +func deprecated(name string) string { + return fmt.Sprintf("%s (DEPRECATED)", name) } -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} +// AvailableEstimators is a list of available estimators. +var AvailableEstimators = []string{BinpackingEstimatorName, deprecated(BasicEstimatorName)} -// GetDebug returns debug information about the current state of BasicNodeEstimator -func (basicEstimator *BasicNodeEstimator) GetDebug() string { - var buffer bytes.Buffer - buffer.WriteString("Resources needed:\n") - buffer.WriteString(fmt.Sprintf("CPU: %s\n", basicEstimator.cpuSum.String())) - buffer.WriteString(fmt.Sprintf("Mem: %s\n", basicEstimator.memorySum.String())) - for port, count := range basicEstimator.portSum { - buffer.WriteString(fmt.Sprintf("Port %d: %d\n", port, count)) - } - return buffer.String() +// Estimator calculates the number of nodes of given type needed to schedule pods. +type Estimator interface { + Estimate([]*apiv1.Pod, *schedulercache.NodeInfo, []*schedulercache.NodeInfo) int } -// Estimate estimates the number needed of nodes of the given shape. -func (basicEstimator *BasicNodeEstimator) Estimate(node *apiv1.Node, comingNodes []*schedulercache.NodeInfo) (int, string) { - var buffer bytes.Buffer - buffer.WriteString("Needed nodes according to:\n") - result := 0 - - resources := apiv1.ResourceList{} - for _, node := range comingNodes { - cpu := resources[apiv1.ResourceCPU] - cpu.Add(node.Node().Status.Capacity[apiv1.ResourceCPU]) - resources[apiv1.ResourceCPU] = cpu - - mem := resources[apiv1.ResourceMemory] - mem.Add(node.Node().Status.Capacity[apiv1.ResourceMemory]) - resources[apiv1.ResourceMemory] = mem - - pods := resources[apiv1.ResourcePods] - pods.Add(node.Node().Status.Capacity[apiv1.ResourcePods]) - resources[apiv1.ResourcePods] = pods - } - - if cpuCapacity, ok := node.Status.Capacity[apiv1.ResourceCPU]; ok { - comingCpu := resources[apiv1.ResourceCPU] - prop := int(math.Ceil(float64( - basicEstimator.cpuSum.MilliValue()-comingCpu.MilliValue()) / - float64(cpuCapacity.MilliValue()))) - - buffer.WriteString(fmt.Sprintf("CPU: %d\n", prop)) - result = maxInt(result, prop) - } - if memCapacity, ok := node.Status.Capacity[apiv1.ResourceMemory]; ok { - comingMem := resources[apiv1.ResourceMemory] - prop := int(math.Ceil(float64( - basicEstimator.memorySum.Value()-comingMem.Value()) / - float64(memCapacity.Value()))) - buffer.WriteString(fmt.Sprintf("Mem: %d\n", prop)) - result = maxInt(result, prop) - } - if podCapacity, ok := node.Status.Capacity[apiv1.ResourcePods]; ok { - comingPods := resources[apiv1.ResourcePods] - prop := int(math.Ceil(float64(basicEstimator.GetCount()-int(comingPods.Value())) / - float64(podCapacity.Value()))) - buffer.WriteString(fmt.Sprintf("Pods: %d\n", prop)) - result = maxInt(result, prop) +// EstimatorBuilder creates a new estimator object. +type EstimatorBuilder func(*simulator.PredicateChecker) Estimator + +// NewEstimatorBuilder creates a new estimator object from flag. +func NewEstimatorBuilder(name string) (EstimatorBuilder, error) { + switch name { + case BinpackingEstimatorName: + return func(predicateChecker *simulator.PredicateChecker) Estimator { + return NewBinpackingNodeEstimator(predicateChecker) + }, nil + // Deprecated. + // TODO(aleksandra-malinowska): remove in 1.5. + case BasicEstimatorName: + klog.Warning(basicEstimatorDeprecationMessage) + return func(_ *simulator.PredicateChecker) Estimator { + return NewBasicNodeEstimator() + }, nil } - for port, count := range basicEstimator.portSum { - buffer.WriteString(fmt.Sprintf("Port %d: %d\n", port, count)) - result = maxInt(result, count-len(comingNodes)) - } - return result, buffer.String() -} - -// GetCount returns number of pods included in the estimation. -func (basicEstimator *BasicNodeEstimator) GetCount() int { - return len(basicEstimator.FittingPods) + return nil, fmt.Errorf("Unknown estimator: %s", name) } diff --git a/cluster-autoscaler/estimator/estimator_test.go b/cluster-autoscaler/estimator/estimator_test.go deleted file mode 100644 index ce29df3db13d..000000000000 --- a/cluster-autoscaler/estimator/estimator_test.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package estimator - -import ( - "testing" - - apiv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - - "github.com/stretchr/testify/assert" -) - -func makePod(cpuPerPod, memoryPerPod int64) *apiv1.Pod { - return &apiv1.Pod{ - Spec: apiv1.PodSpec{ - Containers: []apiv1.Container{ - { - Resources: apiv1.ResourceRequirements{ - Requests: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(cpuPerPod, resource.DecimalSI), - apiv1.ResourceMemory: *resource.NewQuantity(memoryPerPod, resource.DecimalSI), - }, - }, - }, - }, - }, - } -} - -func TestEstimate(t *testing.T) { - cpuPerPod := int64(500) - memoryPerPod := int64(1000 * 1024 * 1024) - pod := makePod(cpuPerPod, memoryPerPod) - - estimator := NewBasicNodeEstimator() - for i := 0; i < 5; i++ { - podCopy := *pod - estimator.Add(&podCopy) - } - - assert.Equal(t, int64(500*5), estimator.cpuSum.MilliValue()) - assert.Equal(t, int64(5*memoryPerPod), estimator.memorySum.Value()) - assert.Equal(t, 5, estimator.GetCount()) - - node := &apiv1.Node{ - Status: apiv1.NodeStatus{ - Capacity: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(3*cpuPerPod, resource.DecimalSI), - apiv1.ResourceMemory: *resource.NewQuantity(2*memoryPerPod, resource.DecimalSI), - apiv1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI), - }, - }, - } - estimate, report := estimator.Estimate(node, []*schedulercache.NodeInfo{}) - assert.Contains(t, estimator.GetDebug(), "CPU") - assert.Contains(t, report, "CPU") - assert.Equal(t, 3, estimate) -} - -func TestEstimateWithComing(t *testing.T) { - cpuPerPod := int64(500) - memoryPerPod := int64(1000 * 1024 * 1024) - - pod := makePod(cpuPerPod, memoryPerPod) - estimator := NewBasicNodeEstimator() - - for i := 0; i < 5; i++ { - podCopy := *pod - estimator.Add(&podCopy) - } - - assert.Equal(t, int64(500*5), estimator.cpuSum.MilliValue()) - assert.Equal(t, int64(5*memoryPerPod), estimator.memorySum.Value()) - assert.Equal(t, 5, estimator.GetCount()) - - node := &apiv1.Node{ - Status: apiv1.NodeStatus{ - Capacity: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(3*cpuPerPod, resource.DecimalSI), - apiv1.ResourceMemory: *resource.NewQuantity(2*memoryPerPod, resource.DecimalSI), - apiv1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI), - }, - }, - } - node.Status.Allocatable = node.Status.Capacity - nodeInfo := schedulercache.NewNodeInfo() - nodeInfo.SetNode(node) - - estimate, report := estimator.Estimate(node, []*schedulercache.NodeInfo{nodeInfo, nodeInfo}) - assert.Contains(t, estimator.GetDebug(), "CPU") - assert.Contains(t, report, "CPU") - assert.Equal(t, 1, estimate) -} - -func TestEstimateWithPorts(t *testing.T) { - cpuPerPod := int64(500) - memoryPerPod := int64(1000 * 1024 * 1024) - - pod := makePod(cpuPerPod, memoryPerPod) - pod.Spec.Containers[0].Ports = []apiv1.ContainerPort{ - { - HostPort: 5555, - }, - } - - estimator := NewBasicNodeEstimator() - for i := 0; i < 5; i++ { - estimator.Add(pod) - } - node := &apiv1.Node{ - Status: apiv1.NodeStatus{ - Capacity: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewMilliQuantity(3*cpuPerPod, resource.DecimalSI), - apiv1.ResourceMemory: *resource.NewQuantity(2*memoryPerPod, resource.DecimalSI), - apiv1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI), - }, - }, - } - - estimate, report := estimator.Estimate(node, []*schedulercache.NodeInfo{}) - assert.Contains(t, estimator.GetDebug(), "CPU") - assert.Contains(t, report, "CPU") - assert.Equal(t, 5, estimate) -} diff --git a/cluster-autoscaler/expander/price/preferred.go b/cluster-autoscaler/expander/price/preferred.go index c20c35c5fabf..6b257882cde7 100644 --- a/cluster-autoscaler/expander/price/preferred.go +++ b/cluster-autoscaler/expander/price/preferred.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" + "k8s.io/autoscaler/cluster-autoscaler/utils/units" ) // SimplePreferredNodeProvider returns preferred node based on the cluster size. @@ -46,22 +47,21 @@ func (spnp *SimplePreferredNodeProvider) Node() (*apiv1.Node, error) { } size := len(nodes) - mb := int64(1024 * 1024) cpu := int64(1000) // Double node size with every time the cluster size increases 3x. if size <= 2 { - return buildNode(1*cpu, 3750*mb), nil + return buildNode(1*cpu, 3750*units.MiB), nil } else if size <= 6 { - return buildNode(2*cpu, 7500*mb), nil + return buildNode(2*cpu, 7500*units.MiB), nil } else if size <= 20 { - return buildNode(4*cpu, 15000*mb), nil + return buildNode(4*cpu, 15000*units.MiB), nil } else if size <= 60 { - return buildNode(8*cpu, 30000*mb), nil + return buildNode(8*cpu, 30000*units.MiB), nil } else if size <= 200 { - return buildNode(16*cpu, 60000*mb), nil + return buildNode(16*cpu, 60000*units.MiB), nil } - return buildNode(32*cpu, 120000*mb), nil + return buildNode(32*cpu, 120000*units.MiB), nil } func buildNode(millicpu int64, mem int64) *apiv1.Node { diff --git a/cluster-autoscaler/expander/price/price.go b/cluster-autoscaler/expander/price/price.go index d0ac669013a9..9d4f8486dda2 100644 --- a/cluster-autoscaler/expander/price/price.go +++ b/cluster-autoscaler/expander/price/price.go @@ -27,9 +27,10 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/expander" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" + "k8s.io/autoscaler/cluster-autoscaler/utils/units" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - "github.com/golang/glog" + "k8s.io/klog" ) // ************* @@ -47,11 +48,11 @@ type priceBased struct { var ( // defaultPreferredNode is the node that is preferred if PreferredNodeProvider fails. // 4 cpu, 16gb ram. - defaultPreferredNode = buildNode(4*1000, 4*4*1024*1024*1024) + defaultPreferredNode = buildNode(4*1000, 4*4*units.GiB) // priceStabilizationPod is the pod cost to stabilize node_cost/pod_cost ratio a bit. // 0.5 cpu, 500 mb ram - priceStabilizationPod = buildPod("stabilize", 500, 500*1024*1024) + priceStabilizationPod = buildPod("stabilize", 500, 500*units.MiB) // Penalty given to node groups that are yet to be created. // TODO: make it a flag @@ -94,12 +95,12 @@ func (p *priceBased) BestOption(expansionOptions []expander.Option, nodeInfos ma preferredNode, err := p.preferredNodeProvider.Node() if err != nil { - glog.Errorf("Failed to get preferred node, switching to default: %v", err) + klog.Errorf("Failed to get preferred node, switching to default: %v", err) preferredNode = defaultPreferredNode } stabilizationPrice, err := p.pricingModel.PodPrice(priceStabilizationPod, now, then) if err != nil { - glog.Errorf("Failed to get price for stabilization pod: %v", err) + klog.Errorf("Failed to get price for stabilization pod: %v", err) // continuing without stabilization. } @@ -107,12 +108,12 @@ nextoption: for _, option := range expansionOptions { nodeInfo, found := nodeInfos[option.NodeGroup.Id()] if !found { - glog.Warningf("No node info for %s", option.NodeGroup.Id()) + klog.Warningf("No node info for %s", option.NodeGroup.Id()) continue } nodePrice, err := p.pricingModel.NodePrice(nodeInfo.Node(), now, then) if err != nil { - glog.Warningf("Failed to calculate node price for %s: %v", option.NodeGroup.Id(), err) + klog.Warningf("Failed to calculate node price for %s: %v", option.NodeGroup.Id(), err) continue } totalNodePrice := nodePrice * float64(option.NodeCount) @@ -120,7 +121,7 @@ nextoption: for _, pod := range option.Pods { podPrice, err := p.pricingModel.PodPrice(pod, now, then) if err != nil { - glog.Warningf("Failed to calculate pod price for %s/%s: %v", pod.Namespace, pod.Name, err) + klog.Warningf("Failed to calculate pod price for %s/%s: %v", pod.Namespace, pod.Name, err) continue nextoption } totalPodPrice += podPrice @@ -141,7 +142,7 @@ nextoption: // Set constant, very high unfitness to make them unattractive for pods that doesn't need GPU and // avoid optimizing them for CPU utilization. if gpu.NodeHasGpu(nodeInfo.Node()) { - glog.V(4).Infof("Price expander overriding unfitness for node group with GPU %s", option.NodeGroup.Id()) + klog.V(4).Infof("Price expander overriding unfitness for node group with GPU %s", option.NodeGroup.Id()) supressedUnfitness = gpuUnfitnessOverride } @@ -160,7 +161,7 @@ nextoption: optionScore, ) - glog.V(5).Infof("Price expander for %s: %s", option.NodeGroup.Id(), debug) + klog.V(5).Infof("Price expander for %s: %s", option.NodeGroup.Id(), debug) if bestOption == nil || bestOptionScore > optionScore { bestOption = &expander.Option{ diff --git a/cluster-autoscaler/expander/price/price_test.go b/cluster-autoscaler/expander/price/price_test.go index b9ccf80e9850..b3b8a62de3d6 100644 --- a/cluster-autoscaler/expander/price/price_test.go +++ b/cluster-autoscaler/expander/price/price_test.go @@ -22,6 +22,7 @@ import ( "time" "k8s.io/autoscaler/cluster-autoscaler/expander" + "k8s.io/autoscaler/cluster-autoscaler/utils/units" apiv1 "k8s.io/api/core/v1" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" @@ -115,7 +116,7 @@ func TestPriceExpander(t *testing.T) { }, }, &testPreferredNodeProvider{ - preferred: buildNode(2000, 1024*1024*1024), + preferred: buildNode(2000, units.GiB), }, SimpleNodeUnfitness, ).BestOption(options, nodeInfosForGroups).Debug, "ng1") @@ -134,7 +135,7 @@ func TestPriceExpander(t *testing.T) { }, }, &testPreferredNodeProvider{ - preferred: buildNode(4000, 1024*1024*1024), + preferred: buildNode(4000, units.GiB), }, SimpleNodeUnfitness, ).BestOption(options, nodeInfosForGroups).Debug, "ng2") @@ -169,7 +170,7 @@ func TestPriceExpander(t *testing.T) { }, }, &testPreferredNodeProvider{ - preferred: buildNode(4000, 1024*1024*1024), + preferred: buildNode(4000, units.GiB), }, SimpleNodeUnfitness, ).BestOption(options1b, nodeInfosForGroups).Debug, "ng1") @@ -188,7 +189,7 @@ func TestPriceExpander(t *testing.T) { }, }, &testPreferredNodeProvider{ - preferred: buildNode(2000, 1024*1024*1024), + preferred: buildNode(2000, units.GiB), }, SimpleNodeUnfitness, ).BestOption(options, nodeInfosForGroups).Debug, "ng2") @@ -224,7 +225,7 @@ func TestPriceExpander(t *testing.T) { }, }, &testPreferredNodeProvider{ - preferred: buildNode(2000, 1024*1024*1024), + preferred: buildNode(2000, units.GiB), }, SimpleNodeUnfitness, ).BestOption(options2, nodeInfosForGroups).Debug, "ng2") @@ -236,7 +237,7 @@ func TestPriceExpander(t *testing.T) { nodePrice: map[string]float64{}, }, &testPreferredNodeProvider{ - preferred: buildNode(2000, 1024*1024*1024), + preferred: buildNode(2000, units.GiB), }, SimpleNodeUnfitness, ).BestOption(options2, nodeInfosForGroups)) @@ -280,7 +281,7 @@ func TestPriceExpander(t *testing.T) { }, }, &testPreferredNodeProvider{ - preferred: buildNode(2000, 1024*1024*1024), + preferred: buildNode(2000, units.GiB), }, SimpleNodeUnfitness, ).BestOption(options3, nodeInfosForGroups).Debug, "ng2") @@ -300,7 +301,7 @@ func TestPriceExpander(t *testing.T) { }, }, &testPreferredNodeProvider{ - preferred: buildNode(2000, 1024*1024*1024), + preferred: buildNode(2000, units.GiB), }, SimpleNodeUnfitness, ).BestOption(options3, nodeInfosForGroups).Debug, "ng3") diff --git a/cluster-autoscaler/expander/waste/waste.go b/cluster-autoscaler/expander/waste/waste.go index 749262c8b850..916e80712f11 100644 --- a/cluster-autoscaler/expander/waste/waste.go +++ b/cluster-autoscaler/expander/waste/waste.go @@ -17,11 +17,11 @@ limitations under the License. package waste import ( - "github.com/golang/glog" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/cluster-autoscaler/expander" "k8s.io/autoscaler/cluster-autoscaler/expander/random" + "k8s.io/klog" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" ) @@ -43,7 +43,7 @@ func (l *leastwaste) BestOption(expansionOptions []expander.Option, nodeInfo map requestedCPU, requestedMemory := resourcesForPods(option.Pods) node, found := nodeInfo[option.NodeGroup.Id()] if !found { - glog.Errorf("No node info for: %s", option.NodeGroup.Id()) + klog.Errorf("No node info for: %s", option.NodeGroup.Id()) continue } @@ -54,7 +54,7 @@ func (l *leastwaste) BestOption(expansionOptions []expander.Option, nodeInfo map wastedMemory := float64(availMemory-requestedMemory.Value()) / float64(availMemory) wastedScore := wastedCPU + wastedMemory - glog.V(1).Infof("Expanding Node Group %s would waste %0.2f%% CPU, %0.2f%% Memory, %0.2f%% Blended\n", option.NodeGroup.Id(), wastedCPU*100.0, wastedMemory*100.0, wastedScore*50.0) + klog.V(1).Infof("Expanding Node Group %s would waste %0.2f%% CPU, %0.2f%% Memory, %0.2f%% Blended\n", option.NodeGroup.Id(), wastedCPU*100.0, wastedMemory*100.0, wastedScore*50.0) if wastedScore == leastWastedScore { leastWastedOptions = append(leastWastedOptions, option) diff --git a/cluster-autoscaler/expander/waste/waste_test.go b/cluster-autoscaler/expander/waste/waste_test.go index 958e6d4d33d2..7902d654c1a7 100644 --- a/cluster-autoscaler/expander/waste/waste_test.go +++ b/cluster-autoscaler/expander/waste/waste_test.go @@ -42,7 +42,9 @@ func (f *FakeNodeGroup) DecreaseTargetSize(delta int) error { return nil } func (f *FakeNodeGroup) DeleteNodes([]*apiv1.Node) error { return nil } func (f *FakeNodeGroup) Id() string { return f.id } func (f *FakeNodeGroup) Debug() string { return f.id } -func (f *FakeNodeGroup) Nodes() ([]string, error) { return []string{}, nil } +func (f *FakeNodeGroup) Nodes() ([]cloudprovider.Instance, error) { + return []cloudprovider.Instance{}, nil +} func (f *FakeNodeGroup) TemplateNodeInfo() (*schedulercache.NodeInfo, error) { return nil, cloudprovider.ErrNotImplemented } diff --git a/cluster-autoscaler/kubernetes.sync b/cluster-autoscaler/kubernetes.sync index 86fa15378037..b980677cccc2 100644 --- a/cluster-autoscaler/kubernetes.sync +++ b/cluster-autoscaler/kubernetes.sync @@ -1,26 +1,6 @@ -commit 8e329f1c3cbb1354c27b6135990c5544f14d2a17 -Merge: 4bc9e94fee 07ebe323fa -Author: Kubernetes Submit Queue -Date: Thu Sep 6 00:02:02 2018 -0700 - - Merge pull request #68312 from msau42/fix-e2e - - Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions here: https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md. - - Fix gce localssd pv tests - - **What this PR does / why we need it**: - When running local PV tests against GCE local SSD, it directly uses the disk so doesn't need to create a tmp dir like the other test formats. Fsgroup tests do not create test-file so don't error on cleanup if the file doesn't exist. - - - **Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*: - Fixes #68308 - - **Special notes for your reviewer**: - - **Release note**: - - ```release-note - NONE - ``` +commit 80b400f60b7980558614cc3dca1cffa452d8dd57 +Merge: 40f8a2022c 7098f1ad38 +Author: Yang Li +Date: Sat Nov 24 23:54:49 2018 +0800 + Merge remote-tracking branch 'origin/master' into release-1.13 diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go index c47d66ef5d2a..56d361e6d291 100644 --- a/cluster-autoscaler/main.go +++ b/cluster-autoscaler/main.go @@ -38,6 +38,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/expander" "k8s.io/autoscaler/cluster-autoscaler/metrics" + ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" + "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" "k8s.io/autoscaler/cluster-autoscaler/utils/units" @@ -48,9 +50,9 @@ import ( "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/kubernetes/pkg/client/leaderelectionconfig" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/pflag" + "k8s.io/klog" ) // MultiStringFlag is a flag for passing multiple parameters using same flag @@ -97,7 +99,7 @@ var ( "Maximum number of non empty nodes considered in one iteration as candidates for scale down with drain."+ "Lower value means better CA responsiveness but possible slower scale down latency."+ "Higher value can affect CA performance with big clusters (hundreds of nodes)."+ - "Set to non posistive value to turn this heuristic off - CA will not limit the number of nodes it considers.") + "Set to non positive value to turn this heuristic off - CA will not limit the number of nodes it considers.") scaleDownCandidatesPoolRatio = flag.Float64("scale-down-candidates-pool-ratio", 0.1, "A ratio of nodes that are considered as additional non empty candidates for"+ "scale down when some candidates from previous iteration are no longer valid."+ @@ -138,6 +140,11 @@ var ( expanderFlag = flag.String("expander", expander.RandomExpanderName, "Type of node group expander to be used in scale up. Available values: ["+strings.Join(expander.AvailableExpanders, ",")+"]") + ignoreDaemonSetsUtilization = flag.Bool("ignore-daemonsets-utilization", false, + "Should CA ignore DaemonSet pods when calculating resource utilization for scaling down") + ignoreMirrorPodsUtilization = flag.Bool("ignore-mirror-pods-utilization", false, + "Should CA ignore Mirror pods when calculating resource utilization for scaling down") + writeStatusConfigMapFlag = flag.Bool("write-status-configmap", true, "Should CA write status information to a configmap") maxInactivityTimeFlag = flag.Duration("max-inactivity", 10*time.Minute, "Maximum time from last recorded autoscaler activity before automatic restart") maxFailingTimeFlag = flag.Duration("max-failing-time", 15*time.Minute, "Maximum time from last recorded successful autoscaler run before automatic restart") @@ -148,24 +155,25 @@ var ( unremovableNodeRecheckTimeout = flag.Duration("unremovable-node-recheck-timeout", 5*time.Minute, "The timeout before we check again a node that couldn't be removed before") expendablePodsPriorityCutoff = flag.Int("expendable-pods-priority-cutoff", -10, "Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.") regional = flag.Bool("regional", false, "Cluster is regional.") + newPodScaleUpDelay = flag.Duration("new-pod-scale-up-delay", 0*time.Second, "Pods less than this old will not be considered for scale-up.") ) func createAutoscalingOptions() config.AutoscalingOptions { minCoresTotal, maxCoresTotal, err := parseMinMaxFlag(*coresTotal) if err != nil { - glog.Fatalf("Failed to parse flags: %v", err) + klog.Fatalf("Failed to parse flags: %v", err) } minMemoryTotal, maxMemoryTotal, err := parseMinMaxFlag(*memoryTotal) if err != nil { - glog.Fatalf("Failed to parse flags: %v", err) + klog.Fatalf("Failed to parse flags: %v", err) } // Convert memory limits to bytes. - minMemoryTotal = minMemoryTotal * units.Gigabyte - maxMemoryTotal = maxMemoryTotal * units.Gigabyte + minMemoryTotal = minMemoryTotal * units.GiB + maxMemoryTotal = maxMemoryTotal * units.GiB parsedGpuTotal, err := parseMultipleGpuLimits(*gpuTotal) if err != nil { - glog.Fatalf("Failed to parse flags: %v", err) + klog.Fatalf("Failed to parse flags: %v", err) } return config.AutoscalingOptions{ @@ -176,6 +184,8 @@ func createAutoscalingOptions() config.AutoscalingOptions { OkTotalUnreadyCount: *okTotalUnreadyCount, EstimatorName: *estimatorFlag, ExpanderName: *expanderFlag, + IgnoreDaemonSetsUtilization: *ignoreDaemonSetsUtilization, + IgnoreMirrorPodsUtilization: *ignoreMirrorPodsUtilization, MaxEmptyBulkDelete: *maxEmptyBulkDeleteFlag, MaxGracefulTerminationSec: *maxGracefulTerminationFlag, MaxNodeProvisionTime: *maxNodeProvisionTime, @@ -205,28 +215,29 @@ func createAutoscalingOptions() config.AutoscalingOptions { UnremovableNodeRecheckTimeout: *unremovableNodeRecheckTimeout, ExpendablePodsPriorityCutoff: *expendablePodsPriorityCutoff, Regional: *regional, + NewPodScaleUpDelay: *newPodScaleUpDelay, KubeConfigPath: *kubeConfigFile, } } func getKubeConfig() *rest.Config { if *kubeConfigFile != "" { - glog.V(1).Infof("Using kubeconfig file: %s", *kubeConfigFile) + klog.V(1).Infof("Using kubeconfig file: %s", *kubeConfigFile) // use the current context in kubeconfig config, err := clientcmd.BuildConfigFromFlags("", *kubeConfigFile) if err != nil { - glog.Fatalf("Failed to build config: %v", err) + klog.Fatalf("Failed to build config: %v", err) } return config } url, err := url.Parse(*kubernetes) if err != nil { - glog.Fatalf("Failed to parse Kubernetes url: %v", err) + klog.Fatalf("Failed to parse Kubernetes url: %v", err) } kubeConfig, err := config.GetKubeClientConfig(url) if err != nil { - glog.Fatalf("Failed to build Kubernetes client configuration: %v", err) + klog.Fatalf("Failed to build Kubernetes client configuration: %v", err) } return kubeConfig @@ -239,14 +250,14 @@ func createKubeClient(kubeConfig *rest.Config) kube_client.Interface { func registerSignalHandlers(autoscaler core.Autoscaler) { sigs := make(chan os.Signal, 1) signal.Notify(sigs, os.Interrupt, os.Kill, syscall.SIGTERM, syscall.SIGQUIT) - glog.V(1).Info("Registered cleanup signal handler") + klog.V(1).Info("Registered cleanup signal handler") go func() { <-sigs - glog.V(1).Info("Received signal, attempting cleanup") + klog.V(1).Info("Received signal, attempting cleanup") autoscaler.ExitCleanUp() - glog.V(1).Info("Cleaned up, exiting...") - glog.Flush() + klog.V(1).Info("Cleaned up, exiting...") + klog.Flush() os.Exit(0) }() } @@ -255,9 +266,16 @@ func buildAutoscaler() (core.Autoscaler, error) { // Create basic config from flags. autoscalingOptions := createAutoscalingOptions() kubeClient := createKubeClient(getKubeConfig()) + processors := ca_processors.DefaultProcessors() + if autoscalingOptions.CloudProviderName == "gke" { + processors.NodeGroupSetProcessor = &nodegroupset.BalancingNodeGroupSetProcessor{ + Comparator: nodegroupset.IsGkeNodeInfoSimilar} + + } opts := core.AutoscalerOptions{ AutoscalingOptions: autoscalingOptions, KubeClient: kubeClient, + Processors: processors, } // This metric should be published only once. @@ -272,7 +290,7 @@ func run(healthCheck *metrics.HealthCheck) { autoscaler, err := buildAutoscaler() if err != nil { - glog.Fatalf("Failed to create autoscaler: %v", err) + klog.Fatalf("Failed to create autoscaler: %v", err) } // Register signal handlers for graceful shutdown. @@ -304,6 +322,8 @@ func run(healthCheck *metrics.HealthCheck) { } func main() { + klog.InitFlags(nil) + leaderElection := defaultLeaderElectionConfiguration() leaderElection.LeaderElect = true @@ -311,23 +331,13 @@ func main() { kube_flag.InitFlags() healthCheck := metrics.NewHealthCheck(*maxInactivityTimeFlag, *maxFailingTimeFlag) - glog.V(1).Infof("Cluster Autoscaler %s", ClusterAutoscalerVersion) - - correctEstimator := false - for _, availableEstimator := range estimator.AvailableEstimators { - if *estimatorFlag == availableEstimator { - correctEstimator = true - } - } - if !correctEstimator { - glog.Fatalf("Unrecognized estimator: %v", *estimatorFlag) - } + klog.V(1).Infof("Cluster Autoscaler %s", ClusterAutoscalerVersion) go func() { http.Handle("/metrics", prometheus.Handler()) http.Handle("/health-check", healthCheck) err := http.ListenAndServe(*address, nil) - glog.Fatalf("Failed to start metrics: %v", err) + klog.Fatalf("Failed to start metrics: %v", err) }() if !leaderElection.LeaderElect { @@ -335,7 +345,7 @@ func main() { } else { id, err := os.Hostname() if err != nil { - glog.Fatalf("Unable to get hostname: %v", err) + klog.Fatalf("Unable to get hostname: %v", err) } kubeClient := createKubeClient(getKubeConfig()) @@ -343,7 +353,7 @@ func main() { // Validate that the client is ok. _, err = kubeClient.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { - glog.Fatalf("Failed to get nodes from apiserver: %v", err) + klog.Fatalf("Failed to get nodes from apiserver: %v", err) } lock, err := resourcelock.New( @@ -357,7 +367,7 @@ func main() { }, ) if err != nil { - glog.Fatalf("Unable to create leader election lock: %v", err) + klog.Fatalf("Unable to create leader election lock: %v", err) } leaderelection.RunOrDie(ctx.TODO(), leaderelection.LeaderElectionConfig{ @@ -372,7 +382,7 @@ func main() { run(healthCheck) }, OnStoppedLeading: func() { - glog.Fatalf("lost master") + klog.Fatalf("lost master") }, }, }) diff --git a/cluster-autoscaler/metrics/metrics.go b/cluster-autoscaler/metrics/metrics.go index 1e255318c920..77853f9f5275 100644 --- a/cluster-autoscaler/metrics/metrics.go +++ b/cluster-autoscaler/metrics/metrics.go @@ -23,8 +23,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client-go metrics registration - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog" ) // NodeScaleDownReason describes reason for removing node @@ -265,7 +265,7 @@ func UpdateDuration(label FunctionLabel, duration time.Duration) { // TODO(maciekpytel): remove second condition if we manage to get // asynchronous node drain if duration > LogLongDurationThreshold && label != ScaleDown { - glog.V(4).Infof("Function %s took %v to complete", label, duration) + klog.V(4).Infof("Function %s took %v to complete", label, duration) } functionDuration.WithLabelValues(string(label)).Observe(duration.Seconds()) } diff --git a/cluster-autoscaler/processors/nodegroups/nodegroup_manager.go b/cluster-autoscaler/processors/nodegroups/nodegroup_manager.go index c918ff22ef18..baedfe173477 100644 --- a/cluster-autoscaler/processors/nodegroups/nodegroup_manager.go +++ b/cluster-autoscaler/processors/nodegroups/nodegroup_manager.go @@ -24,7 +24,7 @@ import ( // NodeGroupManager is responsible for creating/deleting node groups. type NodeGroupManager interface { - CreateNodeGroup(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (cloudprovider.NodeGroup, errors.AutoscalerError) + CreateNodeGroup(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (CreateNodeGroupResult, errors.AutoscalerError) RemoveUnneededNodeGroups(context *context.AutoscalingContext) error CleanUp() } @@ -35,9 +35,20 @@ type NodeGroupManager interface { type NoOpNodeGroupManager struct { } +// CreateNodeGroupResult result captures result of successful NodeGroupManager.CreateNodeGroup call. +type CreateNodeGroupResult struct { + // Main created node group, matching the requested node group passed to CreateNodeGroup call + MainCreatedNodeGroup cloudprovider.NodeGroup + + // List of extra node groups created by CreateNodeGroup call. Non-empty if due manager specific + // constraints creating one node group requires creating other ones (e.g. matching node group + // must exist in each zone for multizonal deployments) + ExtraCreatedNodeGroups []cloudprovider.NodeGroup +} + // CreateNodeGroup always returns internal error. It must not be called on NoOpNodeGroupManager. -func (*NoOpNodeGroupManager) CreateNodeGroup(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (cloudprovider.NodeGroup, errors.AutoscalerError) { - return nil, errors.NewAutoscalerError(errors.InternalError, "not implemented") +func (*NoOpNodeGroupManager) CreateNodeGroup(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup) (CreateNodeGroupResult, errors.AutoscalerError) { + return CreateNodeGroupResult{}, errors.NewAutoscalerError(errors.InternalError, "not implemented") } // RemoveUnneededNodeGroups does nothing in NoOpNodeGroupManager diff --git a/cluster-autoscaler/processors/nodegroupset/balancing_processor.go b/cluster-autoscaler/processors/nodegroupset/balancing_processor.go new file mode 100644 index 000000000000..7cd0893a029a --- /dev/null +++ b/cluster-autoscaler/processors/nodegroupset/balancing_processor.go @@ -0,0 +1,181 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodegroupset + +import ( + "sort" + + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/utils/errors" + schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + + "k8s.io/klog" +) + +// BalancingNodeGroupSetProcessor tries to keep similar node groups balanced on scale-up. +type BalancingNodeGroupSetProcessor struct { + Comparator NodeInfoComparator +} + +// FindSimilarNodeGroups returns a list of NodeGroups similar to the given one. +// Two groups are similar if the NodeInfos for them compare equal using IsNodeInfoSimilar. +func (b *BalancingNodeGroupSetProcessor) FindSimilarNodeGroups(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup, + nodeInfosForGroups map[string]*schedulercache.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) { + + result := []cloudprovider.NodeGroup{} + nodeGroupId := nodeGroup.Id() + nodeInfo, found := nodeInfosForGroups[nodeGroupId] + if !found { + return []cloudprovider.NodeGroup{}, errors.NewAutoscalerError( + errors.InternalError, + "failed to find template node for node group %s", + nodeGroupId) + } + for _, ng := range context.CloudProvider.NodeGroups() { + ngId := ng.Id() + if ngId == nodeGroupId { + continue + } + ngNodeInfo, found := nodeInfosForGroups[ngId] + if !found { + klog.Warningf("Failed to find nodeInfo for group %v", ngId) + continue + } + comparator := b.Comparator + if comparator == nil { + comparator = IsNodeInfoSimilar + } + if comparator(nodeInfo, ngNodeInfo) { + result = append(result, ng) + } + } + return result, nil +} + +// BalanceScaleUpBetweenGroups distributes a given number of nodes between +// given set of NodeGroups. The nodes are added to smallest group first, trying +// to make the group sizes as evenly balanced as possible. +// +// Returns ScaleUpInfos for groups that need to be resized. +// +// MaxSize of each group will be respected. If newNodes > total free capacity +// of all NodeGroups it will be capped to total capacity. In particular if all +// group already have MaxSize, empty list will be returned. +func (b *BalancingNodeGroupSetProcessor) BalanceScaleUpBetweenGroups(context *context.AutoscalingContext, groups []cloudprovider.NodeGroup, newNodes int) ([]ScaleUpInfo, errors.AutoscalerError) { + if len(groups) == 0 { + return []ScaleUpInfo{}, errors.NewAutoscalerError( + errors.InternalError, "Can't balance scale up between 0 groups") + } + + // get all data from cloudprovider, build data structure + scaleUpInfos := make([]ScaleUpInfo, 0) + totalCapacity := 0 + for _, ng := range groups { + currentSize, err := ng.TargetSize() + if err != nil { + return []ScaleUpInfo{}, errors.NewAutoscalerError( + errors.CloudProviderError, + "failed to get node group size: %v", err) + } + maxSize := ng.MaxSize() + if currentSize == maxSize { + // group already maxed, ignore it + continue + } + info := ScaleUpInfo{ + Group: ng, + CurrentSize: currentSize, + NewSize: currentSize, + MaxSize: maxSize} + scaleUpInfos = append(scaleUpInfos, info) + totalCapacity += maxSize - currentSize + } + if totalCapacity < newNodes { + klog.V(2).Infof("Requested scale-up (%v) exceeds node group set capacity, capping to %v", newNodes, totalCapacity) + newNodes = totalCapacity + } + + // The actual balancing algorithm. + // Sort the node groups by current size and just loop over nodes adding + // to smallest group. If a group hits max size remove it from the list + // (by moving it to start of the list and increasing startIndex). + // + // In each iteration we either allocate one node, or 'remove' a maxed out + // node group, so this will terminate in O(#nodes + #node groups) steps. + // We already know that newNodes <= total capacity, so we don't have to + // worry about accidentally removing all node groups while we still + // have nodes to allocate. + // + // Loop invariants: + // 1. i < startIndex -> scaleUpInfos[i].CurrentSize == scaleUpInfos[i].MaxSize + // 2. i >= startIndex -> scaleUpInfos[i].CurrentSize < scaleUpInfos[i].MaxSize + // 3. startIndex <= currentIndex < len(scaleUpInfos) + // 4. currentIndex <= i < j -> scaleUpInfos[i].CurrentSize <= scaleUpInfos[j].CurrentSize + // 5. startIndex <= i < j < currentIndex -> scaleUpInfos[i].CurrentSize == scaleUpInfos[j].CurrentSize + // 6. startIndex <= i < currentIndex <= j -> scaleUpInfos[i].CurrentSize <= scaleUpInfos[j].CurrentSize + 1 + sort.Slice(scaleUpInfos, func(i, j int) bool { + return scaleUpInfos[i].CurrentSize < scaleUpInfos[j].CurrentSize + }) + startIndex := 0 + currentIndex := 0 + for newNodes > 0 { + currentInfo := &scaleUpInfos[currentIndex] + + if currentInfo.NewSize < currentInfo.MaxSize { + // Add a node to group on currentIndex + currentInfo.NewSize++ + newNodes-- + } else { + // Group on currentIndex is full. Remove it from the array. + // Removing is done by swapping the group with the first + // group still in array and moving the start of the array. + // Every group between startIndex and currentIndex has the + // same size, so we can swap without breaking ordering. + scaleUpInfos[startIndex], scaleUpInfos[currentIndex] = scaleUpInfos[currentIndex], scaleUpInfos[startIndex] + startIndex++ + } + + // Update currentIndex. + // If we removed a group in this loop currentIndex may be equal to startIndex-1, + // in which case both branches of below if will make currentIndex == startIndex. + if currentIndex < len(scaleUpInfos)-1 && currentInfo.NewSize > scaleUpInfos[currentIndex+1].NewSize { + // Next group has exactly one less node, than current one. + // We will increase it in next iteration. + currentIndex++ + } else { + // We reached end of array, or a group larger than the current one. + // All groups from startIndex to currentIndex have the same size. + // So we're moving to the beginning of array to loop over all of + // them once again. + currentIndex = startIndex + } + } + + // Filter out groups that haven't changed size + result := make([]ScaleUpInfo, 0) + for _, info := range scaleUpInfos { + if info.NewSize != info.CurrentSize { + result = append(result, info) + } + } + + return result, nil +} + +// CleanUp performs final clean up of processor state. +func (b *BalancingNodeGroupSetProcessor) CleanUp() {} diff --git a/cluster-autoscaler/processors/nodegroupset/balancing_processor_test.go b/cluster-autoscaler/processors/nodegroupset/balancing_processor_test.go new file mode 100644 index 000000000000..902822f8ba10 --- /dev/null +++ b/cluster-autoscaler/processors/nodegroupset/balancing_processor_test.go @@ -0,0 +1,220 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodegroupset + +import ( + "testing" + + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" + "k8s.io/autoscaler/cluster-autoscaler/context" + . "k8s.io/autoscaler/cluster-autoscaler/utils/test" + schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + + "github.com/stretchr/testify/assert" +) + +func basicSimilarNodeGroupsTest(t *testing.T, processor NodeGroupSetProcessor) { + context := &context.AutoscalingContext{} + + n1 := BuildTestNode("n1", 1000, 1000) + n2 := BuildTestNode("n2", 1000, 1000) + n3 := BuildTestNode("n3", 2000, 2000) + provider := testprovider.NewTestCloudProvider(nil, nil) + provider.AddNodeGroup("ng1", 1, 10, 1) + provider.AddNodeGroup("ng2", 1, 10, 1) + provider.AddNodeGroup("ng3", 1, 10, 1) + provider.AddNode("ng1", n1) + provider.AddNode("ng2", n2) + provider.AddNode("ng3", n3) + + ni1 := schedulercache.NewNodeInfo() + ni1.SetNode(n1) + ni2 := schedulercache.NewNodeInfo() + ni2.SetNode(n2) + ni3 := schedulercache.NewNodeInfo() + ni3.SetNode(n3) + + nodeInfosForGroups := map[string]*schedulercache.NodeInfo{ + "ng1": ni1, "ng2": ni2, "ng3": ni3, + } + + ng1, _ := provider.NodeGroupForNode(n1) + ng2, _ := provider.NodeGroupForNode(n2) + ng3, _ := provider.NodeGroupForNode(n3) + context.CloudProvider = provider + + similar, err := processor.FindSimilarNodeGroups(context, ng1, nodeInfosForGroups) + assert.NoError(t, err) + assert.Equal(t, similar, []cloudprovider.NodeGroup{ng2}) + + similar, err = processor.FindSimilarNodeGroups(context, ng2, nodeInfosForGroups) + assert.NoError(t, err) + assert.Equal(t, similar, []cloudprovider.NodeGroup{ng1}) + + similar, err = processor.FindSimilarNodeGroups(context, ng3, nodeInfosForGroups) + assert.NoError(t, err) + assert.Equal(t, similar, []cloudprovider.NodeGroup{}) +} + +func TestFindSimilarNodeGroups(t *testing.T) { + processor := &BalancingNodeGroupSetProcessor{} + basicSimilarNodeGroupsTest(t, processor) +} + +func TestBalanceSingleGroup(t *testing.T) { + processor := &BalancingNodeGroupSetProcessor{} + context := &context.AutoscalingContext{} + + provider := testprovider.NewTestCloudProvider(nil, nil) + provider.AddNodeGroup("ng1", 1, 10, 1) + + // just one node + scaleUpInfo, err := processor.BalanceScaleUpBetweenGroups(context, provider.NodeGroups(), 1) + assert.NoError(t, err) + assert.Equal(t, 1, len(scaleUpInfo)) + assert.Equal(t, 2, scaleUpInfo[0].NewSize) + + // multiple nodes + scaleUpInfo, err = processor.BalanceScaleUpBetweenGroups(context, provider.NodeGroups(), 4) + assert.NoError(t, err) + assert.Equal(t, 1, len(scaleUpInfo)) + assert.Equal(t, 5, scaleUpInfo[0].NewSize) +} + +func TestBalanceUnderMaxSize(t *testing.T) { + processor := &BalancingNodeGroupSetProcessor{} + context := &context.AutoscalingContext{} + + provider := testprovider.NewTestCloudProvider(nil, nil) + provider.AddNodeGroup("ng1", 1, 10, 1) + provider.AddNodeGroup("ng2", 1, 10, 3) + provider.AddNodeGroup("ng3", 1, 10, 5) + provider.AddNodeGroup("ng4", 1, 10, 5) + + // add a single node + scaleUpInfo, err := processor.BalanceScaleUpBetweenGroups(context, provider.NodeGroups(), 1) + assert.NoError(t, err) + assert.Equal(t, 1, len(scaleUpInfo)) + assert.Equal(t, 2, scaleUpInfo[0].NewSize) + + // add multiple nodes to single group + scaleUpInfo, err = processor.BalanceScaleUpBetweenGroups(context, provider.NodeGroups(), 2) + assert.NoError(t, err) + assert.Equal(t, 1, len(scaleUpInfo)) + assert.Equal(t, 3, scaleUpInfo[0].NewSize) + + // add nodes to groups of different sizes, divisible + scaleUpInfo, err = processor.BalanceScaleUpBetweenGroups(context, provider.NodeGroups(), 4) + assert.NoError(t, err) + assert.Equal(t, 2, len(scaleUpInfo)) + assert.Equal(t, 4, scaleUpInfo[0].NewSize) + assert.Equal(t, 4, scaleUpInfo[1].NewSize) + assert.True(t, scaleUpInfo[0].Group.Id() == "ng1" || scaleUpInfo[1].Group.Id() == "ng1") + assert.True(t, scaleUpInfo[0].Group.Id() == "ng2" || scaleUpInfo[1].Group.Id() == "ng2") + + // add nodes to groups of different sizes, non-divisible + // we expect new sizes to be 4 and 5, doesn't matter which group gets how many + scaleUpInfo, err = processor.BalanceScaleUpBetweenGroups(context, provider.NodeGroups(), 5) + assert.NoError(t, err) + assert.Equal(t, 2, len(scaleUpInfo)) + assert.Equal(t, 9, scaleUpInfo[0].NewSize+scaleUpInfo[1].NewSize) + assert.True(t, scaleUpInfo[0].NewSize == 4 || scaleUpInfo[0].NewSize == 5) + assert.True(t, scaleUpInfo[0].Group.Id() == "ng1" || scaleUpInfo[1].Group.Id() == "ng1") + assert.True(t, scaleUpInfo[0].Group.Id() == "ng2" || scaleUpInfo[1].Group.Id() == "ng2") + + // add nodes to all groups, divisible + scaleUpInfo, err = processor.BalanceScaleUpBetweenGroups(context, provider.NodeGroups(), 10) + assert.NoError(t, err) + assert.Equal(t, 4, len(scaleUpInfo)) + for _, info := range scaleUpInfo { + assert.Equal(t, 6, info.NewSize) + } +} + +func TestBalanceHittingMaxSize(t *testing.T) { + processor := &BalancingNodeGroupSetProcessor{} + context := &context.AutoscalingContext{} + + provider := testprovider.NewTestCloudProvider(nil, nil) + provider.AddNodeGroup("ng1", 1, 1, 1) + provider.AddNodeGroup("ng2", 1, 3, 1) + provider.AddNodeGroup("ng3", 1, 10, 3) + provider.AddNodeGroup("ng4", 1, 7, 5) + groupsMap := make(map[string]cloudprovider.NodeGroup) + for _, group := range provider.NodeGroups() { + groupsMap[group.Id()] = group + } + + getGroups := func(names ...string) []cloudprovider.NodeGroup { + result := make([]cloudprovider.NodeGroup, 0) + for _, n := range names { + result = append(result, groupsMap[n]) + } + return result + } + + toMap := func(suiList []ScaleUpInfo) map[string]ScaleUpInfo { + result := make(map[string]ScaleUpInfo, 0) + for _, sui := range suiList { + result[sui.Group.Id()] = sui + } + return result + } + + // Just one maxed out group + scaleUpInfo, err := processor.BalanceScaleUpBetweenGroups(context, getGroups("ng1"), 1) + assert.NoError(t, err) + assert.Equal(t, 0, len(scaleUpInfo)) + + // Smallest group already maxed out, add one node + scaleUpInfo, err = processor.BalanceScaleUpBetweenGroups(context, getGroups("ng1", "ng2"), 1) + assert.NoError(t, err) + assert.Equal(t, 1, len(scaleUpInfo)) + assert.Equal(t, "ng2", scaleUpInfo[0].Group.Id()) + assert.Equal(t, 2, scaleUpInfo[0].NewSize) + + // Smallest group already maxed out, too many nodes (should cap to max capacity) + scaleUpInfo, err = processor.BalanceScaleUpBetweenGroups(context, getGroups("ng1", "ng2"), 5) + assert.NoError(t, err) + assert.Equal(t, 1, len(scaleUpInfo)) + assert.Equal(t, "ng2", scaleUpInfo[0].Group.Id()) + assert.Equal(t, 3, scaleUpInfo[0].NewSize) + + // First group maxes out before proceeding to next one + scaleUpInfo, err = processor.BalanceScaleUpBetweenGroups(context, getGroups("ng2", "ng3"), 4) + assert.Equal(t, 2, len(scaleUpInfo)) + scaleUpMap := toMap(scaleUpInfo) + assert.Equal(t, 3, scaleUpMap["ng2"].NewSize) + assert.Equal(t, 5, scaleUpMap["ng3"].NewSize) + + // Last group maxes out before previous one + scaleUpInfo, err = processor.BalanceScaleUpBetweenGroups(context, getGroups("ng2", "ng3", "ng4"), 9) + assert.Equal(t, 3, len(scaleUpInfo)) + scaleUpMap = toMap(scaleUpInfo) + assert.Equal(t, 3, scaleUpMap["ng2"].NewSize) + assert.Equal(t, 8, scaleUpMap["ng3"].NewSize) + assert.Equal(t, 7, scaleUpMap["ng4"].NewSize) + + // Use all capacity, cap to max + scaleUpInfo, err = processor.BalanceScaleUpBetweenGroups(context, getGroups("ng2", "ng3", "ng4"), 900) + assert.Equal(t, 3, len(scaleUpInfo)) + scaleUpMap = toMap(scaleUpInfo) + assert.Equal(t, 3, scaleUpMap["ng2"].NewSize) + assert.Equal(t, 10, scaleUpMap["ng3"].NewSize) + assert.Equal(t, 7, scaleUpMap["ng4"].NewSize) +} diff --git a/cluster-autoscaler/utils/nodegroupset/compare_nodegroups.go b/cluster-autoscaler/processors/nodegroupset/compare_nodegroups.go similarity index 84% rename from cluster-autoscaler/utils/nodegroupset/compare_nodegroups.go rename to cluster-autoscaler/processors/nodegroupset/compare_nodegroups.go index 3627025e5a6a..c408069dae2a 100644 --- a/cluster-autoscaler/utils/nodegroupset/compare_nodegroups.go +++ b/cluster-autoscaler/processors/nodegroupset/compare_nodegroups.go @@ -34,6 +34,10 @@ const ( MaxFreeDifferenceRatio = 0.05 ) +// NodeInfoComparator is a function that tells if two nodes are from NodeGroups +// similar enough to be considered a part of a single NodeGroupSet. +type NodeInfoComparator func(n1, n2 *schedulercache.NodeInfo) bool + func compareResourceMapsWithTolerance(resources map[apiv1.ResourceName][]resource.Quantity, maxDifferenceRatio float64) bool { for _, qtyList := range resources { @@ -89,19 +93,20 @@ func IsNodeInfoSimilar(n1, n2 *schedulercache.NodeInfo) bool { return false } + ignoredLabels := map[string]bool{ + kubeletapis.LabelHostname: true, + kubeletapis.LabelZoneFailureDomain: true, + kubeletapis.LabelZoneRegion: true, + "beta.kubernetes.io/fluentd-ds-ready": true, // this is internal label used for determining if fluentd should be installed as deamon set. Used for migration 1.8 to 1.9. + } + labels := make(map[string][]string) for _, node := range nodes { for label, value := range node.Node().ObjectMeta.Labels { - if label == kubeletapis.LabelHostname { - continue - } - if label == kubeletapis.LabelZoneFailureDomain { - continue - } - if label == kubeletapis.LabelZoneRegion { - continue + ignore, _ := ignoredLabels[label] + if !ignore { + labels[label] = append(labels[label], value) } - labels[label] = append(labels[label], value) } } for _, labelValues := range labels { diff --git a/cluster-autoscaler/utils/nodegroupset/compare_nodegroups_test.go b/cluster-autoscaler/processors/nodegroupset/compare_nodegroups_test.go similarity index 75% rename from cluster-autoscaler/utils/nodegroupset/compare_nodegroups_test.go rename to cluster-autoscaler/processors/nodegroupset/compare_nodegroups_test.go index 35d3f519bcb8..6f15c8a26e44 100644 --- a/cluster-autoscaler/utils/nodegroupset/compare_nodegroups_test.go +++ b/cluster-autoscaler/processors/nodegroupset/compare_nodegroups_test.go @@ -29,22 +29,22 @@ import ( "github.com/stretchr/testify/assert" ) -func checkNodesSimilar(t *testing.T, n1, n2 *apiv1.Node, shouldEqual bool) { - checkNodesSimilarWithPods(t, n1, n2, []*apiv1.Pod{}, []*apiv1.Pod{}, shouldEqual) +func checkNodesSimilar(t *testing.T, n1, n2 *apiv1.Node, comparator NodeInfoComparator, shouldEqual bool) { + checkNodesSimilarWithPods(t, n1, n2, []*apiv1.Pod{}, []*apiv1.Pod{}, comparator, shouldEqual) } -func checkNodesSimilarWithPods(t *testing.T, n1, n2 *apiv1.Node, pods1, pods2 []*apiv1.Pod, shouldEqual bool) { +func checkNodesSimilarWithPods(t *testing.T, n1, n2 *apiv1.Node, pods1, pods2 []*apiv1.Pod, comparator NodeInfoComparator, shouldEqual bool) { ni1 := schedulercache.NewNodeInfo(pods1...) ni1.SetNode(n1) ni2 := schedulercache.NewNodeInfo(pods2...) ni2.SetNode(n2) - assert.Equal(t, shouldEqual, IsNodeInfoSimilar(ni1, ni2)) + assert.Equal(t, shouldEqual, comparator(ni1, ni2)) } func TestIdenticalNodesSimilar(t *testing.T) { n1 := BuildTestNode("node1", 1000, 2000) n2 := BuildTestNode("node2", 1000, 2000) - checkNodesSimilar(t, n1, n2, true) + checkNodesSimilar(t, n1, n2, IsNodeInfoSimilar, true) } func TestNodesSimilarVariousRequirements(t *testing.T) { @@ -53,23 +53,23 @@ func TestNodesSimilarVariousRequirements(t *testing.T) { // Different CPU capacity n2 := BuildTestNode("node2", 1000, 2000) n2.Status.Capacity[apiv1.ResourceCPU] = *resource.NewMilliQuantity(1001, resource.DecimalSI) - checkNodesSimilar(t, n1, n2, false) + checkNodesSimilar(t, n1, n2, IsNodeInfoSimilar, false) // Same CPU capacity, but slightly different allocatable n3 := BuildTestNode("node3", 1000, 2000) n3.Status.Allocatable[apiv1.ResourceCPU] = *resource.NewMilliQuantity(999, resource.DecimalSI) - checkNodesSimilar(t, n1, n3, true) + checkNodesSimilar(t, n1, n3, IsNodeInfoSimilar, true) // Same CPU capacity, significantly different allocatable n4 := BuildTestNode("node4", 1000, 2000) n4.Status.Allocatable[apiv1.ResourceCPU] = *resource.NewMilliQuantity(500, resource.DecimalSI) - checkNodesSimilar(t, n1, n4, false) + checkNodesSimilar(t, n1, n4, IsNodeInfoSimilar, false) // One with GPU, one without n5 := BuildTestNode("node5", 1000, 2000) n5.Status.Capacity[gpu.ResourceNvidiaGPU] = *resource.NewQuantity(1, resource.DecimalSI) n5.Status.Allocatable[gpu.ResourceNvidiaGPU] = n5.Status.Capacity[gpu.ResourceNvidiaGPU] - checkNodesSimilar(t, n1, n5, false) + checkNodesSimilar(t, n1, n5, IsNodeInfoSimilar, false) } func TestNodesSimilarVariousRequirementsAndPods(t *testing.T) { @@ -81,20 +81,20 @@ func TestNodesSimilarVariousRequirementsAndPods(t *testing.T) { n2 := BuildTestNode("node2", 1000, 2000) n2.Status.Allocatable[apiv1.ResourceCPU] = *resource.NewMilliQuantity(500, resource.DecimalSI) n2.Status.Allocatable[apiv1.ResourceMemory] = *resource.NewQuantity(1000, resource.DecimalSI) - checkNodesSimilarWithPods(t, n1, n2, []*apiv1.Pod{p1}, []*apiv1.Pod{}, false) + checkNodesSimilarWithPods(t, n1, n2, []*apiv1.Pod{p1}, []*apiv1.Pod{}, IsNodeInfoSimilar, false) // Same requests of pods n3 := BuildTestNode("node3", 1000, 2000) p3 := BuildTestPod("pod3", 500, 1000) p3.Spec.NodeName = "node3" - checkNodesSimilarWithPods(t, n1, n3, []*apiv1.Pod{p1}, []*apiv1.Pod{p3}, true) + checkNodesSimilarWithPods(t, n1, n3, []*apiv1.Pod{p1}, []*apiv1.Pod{p3}, IsNodeInfoSimilar, true) // Similar allocatable, similar pods n4 := BuildTestNode("node4", 1000, 2000) n4.Status.Allocatable[apiv1.ResourceCPU] = *resource.NewMilliQuantity(999, resource.DecimalSI) p4 := BuildTestPod("pod4", 501, 1001) p4.Spec.NodeName = "node4" - checkNodesSimilarWithPods(t, n1, n4, []*apiv1.Pod{p1}, []*apiv1.Pod{p4}, true) + checkNodesSimilarWithPods(t, n1, n4, []*apiv1.Pod{p1}, []*apiv1.Pod{p4}, IsNodeInfoSimilar, true) } func TestNodesSimilarVariousLabels(t *testing.T) { @@ -106,18 +106,27 @@ func TestNodesSimilarVariousLabels(t *testing.T) { n2.ObjectMeta.Labels["test-label"] = "test-value" // Missing character label - checkNodesSimilar(t, n1, n2, false) + checkNodesSimilar(t, n1, n2, IsNodeInfoSimilar, false) n2.ObjectMeta.Labels["character"] = "winnie the pooh" - checkNodesSimilar(t, n1, n2, true) + checkNodesSimilar(t, n1, n2, IsNodeInfoSimilar, true) // Different hostname labels shouldn't matter n1.ObjectMeta.Labels[kubeletapis.LabelHostname] = "node1" n2.ObjectMeta.Labels[kubeletapis.LabelHostname] = "node2" - checkNodesSimilar(t, n1, n2, true) + checkNodesSimilar(t, n1, n2, IsNodeInfoSimilar, true) // Different zone shouldn't matter either n1.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = "mars-olympus-mons1-b" n2.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = "us-houston1-a" - checkNodesSimilar(t, n1, n2, true) + checkNodesSimilar(t, n1, n2, IsNodeInfoSimilar, true) + + // Different beta.kubernetes.io/fluentd-ds-ready should not matter + n1.ObjectMeta.Labels["beta.kubernetes.io/fluentd-ds-ready"] = "true" + n2.ObjectMeta.Labels["beta.kubernetes.io/fluentd-ds-ready"] = "false" + checkNodesSimilar(t, n1, n2, IsNodeInfoSimilar, true) + + n1.ObjectMeta.Labels["beta.kubernetes.io/fluentd-ds-ready"] = "true" + delete(n2.ObjectMeta.Labels, "beta.kubernetes.io/fluentd-ds-ready") + checkNodesSimilar(t, n1, n2, IsNodeInfoSimilar, true) } diff --git a/cluster-autoscaler/processors/nodegroupset/gke_compare_nodegroups.go b/cluster-autoscaler/processors/nodegroupset/gke_compare_nodegroups.go new file mode 100644 index 000000000000..be1ffbbb7aeb --- /dev/null +++ b/cluster-autoscaler/processors/nodegroupset/gke_compare_nodegroups.go @@ -0,0 +1,40 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodegroupset + +import ( + schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" +) + +// GkeNodepoolLabel is a label specifying GKE node pool particular node belongs to. +const GkeNodepoolLabel = "cloud.google.com/gke-nodepool" + +func nodesFromSameGkeNodePool(n1, n2 *schedulercache.NodeInfo) bool { + n1GkeNodePool := n1.Node().Labels[GkeNodepoolLabel] + n2GkeNodePool := n2.Node().Labels[GkeNodepoolLabel] + return n1GkeNodePool != "" && n1GkeNodePool == n2GkeNodePool +} + +// IsGkeNodeInfoSimilar compares if two nodes should be considered part of the +// same NodeGroupSet. This is true if they either belong to the same GKE nodepool +// or match usual conditions checked by IsNodeInfoSimilar. +func IsGkeNodeInfoSimilar(n1, n2 *schedulercache.NodeInfo) bool { + if nodesFromSameGkeNodePool(n1, n2) { + return true + } + return IsNodeInfoSimilar(n1, n2) +} diff --git a/cluster-autoscaler/processors/nodegroupset/gke_compare_nodegroups_test.go b/cluster-autoscaler/processors/nodegroupset/gke_compare_nodegroups_test.go new file mode 100644 index 000000000000..7245c978b51d --- /dev/null +++ b/cluster-autoscaler/processors/nodegroupset/gke_compare_nodegroups_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodegroupset + +import ( + "testing" + + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" + "k8s.io/autoscaler/cluster-autoscaler/context" + . "k8s.io/autoscaler/cluster-autoscaler/utils/test" + schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + + "github.com/stretchr/testify/assert" +) + +func TestIsGkeNodeInfoSimilar(t *testing.T) { + n1 := BuildTestNode("node1", 1000, 2000) + n1.ObjectMeta.Labels["test-label"] = "test-value" + n1.ObjectMeta.Labels["character"] = "winnie the pooh" + n2 := BuildTestNode("node2", 1000, 2000) + n2.ObjectMeta.Labels["test-label"] = "test-value" + // No node-pool labels. + checkNodesSimilar(t, n1, n2, IsGkeNodeInfoSimilar, false) + // Empty node-pool labels + n1.ObjectMeta.Labels["cloud.google.com/gke-nodepool"] = "" + n2.ObjectMeta.Labels["cloud.google.com/gke-nodepool"] = "" + checkNodesSimilar(t, n1, n2, IsGkeNodeInfoSimilar, false) + // Only one non empty + n1.ObjectMeta.Labels["cloud.google.com/gke-nodepool"] = "" + n2.ObjectMeta.Labels["cloud.google.com/gke-nodepool"] = "blah" + checkNodesSimilar(t, n1, n2, IsGkeNodeInfoSimilar, false) + // Only one present + delete(n1.ObjectMeta.Labels, "cloud.google.com/gke-nodepool") + n2.ObjectMeta.Labels["cloud.google.com/gke-nodepool"] = "blah" + checkNodesSimilar(t, n1, n2, IsGkeNodeInfoSimilar, false) + // Different vales + n1.ObjectMeta.Labels["cloud.google.com/gke-nodepool"] = "blah1" + n2.ObjectMeta.Labels["cloud.google.com/gke-nodepool"] = "blah2" + checkNodesSimilar(t, n1, n2, IsGkeNodeInfoSimilar, false) + // Same values + n1.ObjectMeta.Labels["cloud.google.com/gke-nodepool"] = "blah" + n2.ObjectMeta.Labels["cloud.google.com/gke-nodepool"] = "blah" + checkNodesSimilar(t, n1, n2, IsGkeNodeInfoSimilar, true) +} + +func TestFindSimilarNodeGroupsGkeBasic(t *testing.T) { + processor := &BalancingNodeGroupSetProcessor{Comparator: IsGkeNodeInfoSimilar} + basicSimilarNodeGroupsTest(t, processor) +} + +func TestFindSimilarNodeGroupsGkeByLabel(t *testing.T) { + processor := &BalancingNodeGroupSetProcessor{Comparator: IsGkeNodeInfoSimilar} + context := &context.AutoscalingContext{} + + n1 := BuildTestNode("n1", 1000, 1000) + n2 := BuildTestNode("n2", 2000, 2000) + + provider := testprovider.NewTestCloudProvider(nil, nil) + provider.AddNodeGroup("ng1", 1, 10, 1) + provider.AddNodeGroup("ng2", 1, 10, 1) + provider.AddNode("ng1", n1) + provider.AddNode("ng2", n2) + + ni1 := schedulercache.NewNodeInfo() + ni1.SetNode(n1) + ni2 := schedulercache.NewNodeInfo() + ni2.SetNode(n2) + + nodeInfosForGroups := map[string]*schedulercache.NodeInfo{ + "ng1": ni1, "ng2": ni2, + } + + ng1, _ := provider.NodeGroupForNode(n1) + ng2, _ := provider.NodeGroupForNode(n2) + context.CloudProvider = provider + + // Groups with different cpu and mem are not similar + similar, err := processor.FindSimilarNodeGroups(context, ng1, nodeInfosForGroups) + assert.NoError(t, err) + assert.Equal(t, similar, []cloudprovider.NodeGroup{}) + + // Unless we give them nodepool label + n1.ObjectMeta.Labels["cloud.google.com/gke-nodepool"] = "blah" + n2.ObjectMeta.Labels["cloud.google.com/gke-nodepool"] = "blah" + similar, err = processor.FindSimilarNodeGroups(context, ng1, nodeInfosForGroups) + assert.NoError(t, err) + assert.Equal(t, similar, []cloudprovider.NodeGroup{ng2}) +} diff --git a/cluster-autoscaler/processors/nodegroupset/nodegroup_set_processor.go b/cluster-autoscaler/processors/nodegroupset/nodegroup_set_processor.go new file mode 100644 index 000000000000..8e1f0636ec0e --- /dev/null +++ b/cluster-autoscaler/processors/nodegroupset/nodegroup_set_processor.go @@ -0,0 +1,75 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodegroupset + +import ( + "fmt" + + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/utils/errors" + schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" +) + +// ScaleUpInfo contains information about planned scale-up of a single NodeGroup +type ScaleUpInfo struct { + // Group is the group to be scaled-up + Group cloudprovider.NodeGroup + // CurrentSize is the current size of the Group + CurrentSize int + // NewSize is the size the Group will be scaled-up to + NewSize int + // MaxSize is the maximum allowed size of the Group + MaxSize int +} + +// String is used for printing ScaleUpInfo for logging, etc +func (s ScaleUpInfo) String() string { + return fmt.Sprintf("{%v %v->%v (max: %v)}", s.Group.Id(), s.CurrentSize, s.NewSize, s.MaxSize) +} + +// NodeGroupSetProcessor finds nodegroups that are similar and allows balancing scale-up between them. +type NodeGroupSetProcessor interface { + FindSimilarNodeGroups(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup, + nodeInfosForGroups map[string]*schedulercache.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) + + BalanceScaleUpBetweenGroups(context *context.AutoscalingContext, groups []cloudprovider.NodeGroup, newNodes int) ([]ScaleUpInfo, errors.AutoscalerError) + CleanUp() +} + +// NoOpNodeGroupSetProcessor returns no similar node groups and doesn't do any balancing. +type NoOpNodeGroupSetProcessor struct { +} + +// FindSimilarNodeGroups returns a list of NodeGroups similar to the one provided in parameter. +func (n *NoOpNodeGroupSetProcessor) FindSimilarNodeGroups(context *context.AutoscalingContext, nodeGroup cloudprovider.NodeGroup, + nodeInfosForGroups map[string]*schedulercache.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) { + return []cloudprovider.NodeGroup{}, nil +} + +// BalanceScaleUpBetweenGroups splits a scale-up between provided NodeGroups. +func (n *NoOpNodeGroupSetProcessor) BalanceScaleUpBetweenGroups(context *context.AutoscalingContext, groups []cloudprovider.NodeGroup, newNodes int) ([]ScaleUpInfo, errors.AutoscalerError) { + return []ScaleUpInfo{}, nil +} + +// CleanUp performs final clean up of processor state. +func (n *NoOpNodeGroupSetProcessor) CleanUp() {} + +// NewDefaultNodeGroupSetProcessor creates an instance of NodeGroupSetProcessor. +func NewDefaultNodeGroupSetProcessor() NodeGroupSetProcessor { + return &BalancingNodeGroupSetProcessor{} +} diff --git a/cluster-autoscaler/processors/processors.go b/cluster-autoscaler/processors/processors.go index 91e504bb152b..ccde1b1b8ce6 100644 --- a/cluster-autoscaler/processors/processors.go +++ b/cluster-autoscaler/processors/processors.go @@ -18,6 +18,7 @@ package processors import ( "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups" + "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset" "k8s.io/autoscaler/cluster-autoscaler/processors/pods" "k8s.io/autoscaler/cluster-autoscaler/processors/status" ) @@ -29,8 +30,12 @@ type AutoscalingProcessors struct { PodListProcessor pods.PodListProcessor // NodeGroupListProcessor is used to process list of NodeGroups that can be used in scale-up. NodeGroupListProcessor nodegroups.NodeGroupListProcessor + // NodeGroupSetProcessor is used to divide scale-up between similar NodeGroups. + NodeGroupSetProcessor nodegroupset.NodeGroupSetProcessor // ScaleUpStatusProcessor is used to process the state of the cluster after a scale-up. ScaleUpStatusProcessor status.ScaleUpStatusProcessor + // ScaleDownStatusProcessor is used to process the state of the cluster after a scale-down. + ScaleDownStatusProcessor status.ScaleDownStatusProcessor // AutoscalingStatusProcessor is used to process the state of the cluster after each autoscaling iteration. AutoscalingStatusProcessor status.AutoscalingStatusProcessor // NodeGroupManager is responsible for creating/deleting node groups. @@ -42,7 +47,9 @@ func DefaultProcessors() *AutoscalingProcessors { return &AutoscalingProcessors{ PodListProcessor: pods.NewDefaultPodListProcessor(), NodeGroupListProcessor: nodegroups.NewDefaultNodeGroupListProcessor(), + NodeGroupSetProcessor: nodegroupset.NewDefaultNodeGroupSetProcessor(), ScaleUpStatusProcessor: status.NewDefaultScaleUpStatusProcessor(), + ScaleDownStatusProcessor: status.NewDefaultScaleDownStatusProcessor(), AutoscalingStatusProcessor: status.NewDefaultAutoscalingStatusProcessor(), NodeGroupManager: nodegroups.NewDefaultNodeGroupManager(), } @@ -53,8 +60,10 @@ func TestProcessors() *AutoscalingProcessors { return &AutoscalingProcessors{ PodListProcessor: &pods.NoOpPodListProcessor{}, NodeGroupListProcessor: &nodegroups.NoOpNodeGroupListProcessor{}, + NodeGroupSetProcessor: &nodegroupset.BalancingNodeGroupSetProcessor{}, // TODO(bskiba): change scale up test so that this can be a NoOpProcessor ScaleUpStatusProcessor: &status.EventingScaleUpStatusProcessor{}, + ScaleDownStatusProcessor: &status.NoOpScaleDownStatusProcessor{}, AutoscalingStatusProcessor: &status.NoOpAutoscalingStatusProcessor{}, NodeGroupManager: nodegroups.NewDefaultNodeGroupManager(), } @@ -64,7 +73,9 @@ func TestProcessors() *AutoscalingProcessors { func (ap *AutoscalingProcessors) CleanUp() { ap.PodListProcessor.CleanUp() ap.NodeGroupListProcessor.CleanUp() + ap.NodeGroupSetProcessor.CleanUp() ap.ScaleUpStatusProcessor.CleanUp() + ap.ScaleDownStatusProcessor.CleanUp() ap.AutoscalingStatusProcessor.CleanUp() ap.NodeGroupManager.CleanUp() } diff --git a/cluster-autoscaler/processors/status/eventing_scale_up_processor_test.go b/cluster-autoscaler/processors/status/eventing_scale_up_processor_test.go index bc5936fa91f1..5e684b7833ff 100644 --- a/cluster-autoscaler/processors/status/eventing_scale_up_processor_test.go +++ b/cluster-autoscaler/processors/status/eventing_scale_up_processor_test.go @@ -22,7 +22,7 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/context" - "k8s.io/autoscaler/cluster-autoscaler/utils/nodegroupset" + "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" kube_record "k8s.io/client-go/tools/record" diff --git a/cluster-autoscaler/processors/status/scale_down_status_processor.go b/cluster-autoscaler/processors/status/scale_down_status_processor.go new file mode 100644 index 000000000000..bb141793a109 --- /dev/null +++ b/cluster-autoscaler/processors/status/scale_down_status_processor.go @@ -0,0 +1,84 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/simulator" +) + +// ScaleDownStatus represents the state of scale down. +type ScaleDownStatus struct { + Result ScaleDownResult + ScaledDownNodes []*ScaleDownNode + NodeDeleteResults map[string]error +} + +// ScaleDownNode represents the state of a node that's being scaled down. +type ScaleDownNode struct { + Node *apiv1.Node + NodeGroup cloudprovider.NodeGroup + EvictedPods []*apiv1.Pod + UtilInfo simulator.UtilizationInfo +} + +// ScaleDownResult represents the result of scale down. +type ScaleDownResult int + +const ( + // ScaleDownError - scale down finished with error. + ScaleDownError ScaleDownResult = iota + // ScaleDownNoUnneeded - no unneeded nodes and no errors. + ScaleDownNoUnneeded + // ScaleDownNoNodeDeleted - unneeded nodes present but not available for deletion. + ScaleDownNoNodeDeleted + // ScaleDownNodeDeleted - a node was deleted. + ScaleDownNodeDeleted + // ScaleDownNodeDeleteStarted - a node deletion process was started. + ScaleDownNodeDeleteStarted + // ScaleDownNotTried - the scale down wasn't even attempted, e.g. an autoscaling iteration was skipped, or + // an error occurred before the scale up logic. + ScaleDownNotTried + // ScaleDownInCooldown - the scale down wasn't even attempted, because it's in a cooldown state (it's suspended for a scheduled period of time). + ScaleDownInCooldown + // ScaleDownInProgress - the scale down wasn't attempted, because a previous scale-down was still in progress. + ScaleDownInProgress +) + +// ScaleDownStatusProcessor processes the status of the cluster after a scale-down. +type ScaleDownStatusProcessor interface { + Process(context *context.AutoscalingContext, status *ScaleDownStatus) + CleanUp() +} + +// NewDefaultScaleDownStatusProcessor creates a default instance of ScaleUpStatusProcessor. +func NewDefaultScaleDownStatusProcessor() ScaleDownStatusProcessor { + return &NoOpScaleDownStatusProcessor{} +} + +// NoOpScaleDownStatusProcessor is a ScaleDownStatusProcessor implementations useful for testing. +type NoOpScaleDownStatusProcessor struct{} + +// Process processes the status of the cluster after a scale-down. +func (p *NoOpScaleDownStatusProcessor) Process(context *context.AutoscalingContext, status *ScaleDownStatus) { +} + +// CleanUp cleans up the processor's internal structures. +func (p *NoOpScaleDownStatusProcessor) CleanUp() { +} diff --git a/cluster-autoscaler/processors/status/scale_up_status_processor.go b/cluster-autoscaler/processors/status/scale_up_status_processor.go index c4ba4e22c88a..c5fc6aeb2454 100644 --- a/cluster-autoscaler/processors/status/scale_up_status_processor.go +++ b/cluster-autoscaler/processors/status/scale_up_status_processor.go @@ -19,14 +19,14 @@ package status import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/context" - "k8s.io/autoscaler/cluster-autoscaler/utils/nodegroupset" + "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset" ) // ScaleUpStatus is the status of a scale-up attempt. This includes information // on if scale-up happened, description of scale-up operation performed and // status of pods that took part in the scale-up evaluation. type ScaleUpStatus struct { - ScaledUp bool + Result ScaleUpResult ScaleUpInfos []nodegroupset.ScaleUpInfo PodsTriggeredScaleUp []*apiv1.Pod PodsRemainUnschedulable []NoScaleUpInfo @@ -40,6 +40,30 @@ type NoScaleUpInfo struct { SkippedNodeGroups map[string]Reasons } +// ScaleUpResult represents the result of a scale up. +type ScaleUpResult int + +const ( + // ScaleUpSuccessful - a scale-up successfully occurred. + ScaleUpSuccessful ScaleUpResult = iota + // ScaleUpError - an unexpected error occurred during the scale-up attempt. + ScaleUpError + // ScaleUpNoOptionsAvailable - there were no node groups that could be considered for the scale-up. + ScaleUpNoOptionsAvailable + // ScaleUpNotNeeded - there was no need for a scale-up e.g. because there were no unschedulable pods. + ScaleUpNotNeeded + // ScaleUpNotTried - the scale up wasn't even attempted, e.g. an autoscaling iteration was skipped, or + // an error occurred before the scale up logic. + ScaleUpNotTried + // ScaleUpInCooldown - the scale up wasn't even attempted, because it's in a cooldown state (it's suspended for a scheduled period of time). + ScaleUpInCooldown +) + +// WasSuccessful returns true if the scale-up was successful. +func (s *ScaleUpStatus) WasSuccessful() bool { + return s.Result == ScaleUpSuccessful +} + // Reasons interface provides a list of reasons for why something happened or didn't happen. type Reasons interface { Reasons() []string diff --git a/cluster-autoscaler/simulator/cluster.go b/cluster-autoscaler/simulator/cluster.go index aee2cb283564..326f55b8aea8 100644 --- a/cluster-autoscaler/simulator/cluster.go +++ b/cluster-autoscaler/simulator/cluster.go @@ -23,6 +23,7 @@ import ( "math/rand" "time" + "k8s.io/autoscaler/cluster-autoscaler/utils/drain" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/glogx" scheduler_util "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler" @@ -35,7 +36,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/algorithm" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -57,6 +58,14 @@ type NodeToBeRemoved struct { PodsToReschedule []*apiv1.Pod } +// UtilizationInfo contains utilization information for a node. +type UtilizationInfo struct { + CpuUtil float64 + MemUtil float64 + // Max(CpuUtil, MemUtil). + Utilization float64 +} + // FindNodesToRemove finds nodes that can be removed. Returns also an information about good // rescheduling location for each of the pods. func FindNodesToRemove(candidates []*apiv1.Node, allNodes []*apiv1.Node, pods []*apiv1.Pod, @@ -78,7 +87,7 @@ func FindNodesToRemove(candidates []*apiv1.Node, allNodes []*apiv1.Node, pods [] candidateloop: for _, node := range candidates { - glog.V(2).Infof("%s: %s for removal", evaluationType, node.Name) + klog.V(2).Infof("%s: %s for removal", evaluationType, node.Name) var podsToRemove []*apiv1.Pod var err error @@ -92,12 +101,12 @@ candidateloop: podDisruptionBudgets) } if err != nil { - glog.V(2).Infof("%s: node %s cannot be removed: %v", evaluationType, node.Name, err) + klog.V(2).Infof("%s: node %s cannot be removed: %v", evaluationType, node.Name, err) unremovable = append(unremovable, node) continue candidateloop } } else { - glog.V(2).Infof("%s: nodeInfo for %s not found", evaluationType, node.Name) + klog.V(2).Infof("%s: nodeInfo for %s not found", evaluationType, node.Name) unremovable = append(unremovable, node) continue candidateloop } @@ -109,12 +118,12 @@ candidateloop: Node: node, PodsToReschedule: podsToRemove, }) - glog.V(2).Infof("%s: node %s may be removed", evaluationType, node.Name) + klog.V(2).Infof("%s: node %s may be removed", evaluationType, node.Name) if len(result) >= maxCount { break candidateloop } } else { - glog.V(2).Infof("%s: node %s is not suitable for removal: %v", evaluationType, node.Name, findProblems) + klog.V(2).Infof("%s: node %s is not suitable for removal: %v", evaluationType, node.Name, findProblems) unremovable = append(unremovable, node) } } @@ -141,20 +150,21 @@ func FindEmptyNodesToRemove(candidates []*apiv1.Node, pods []*apiv1.Pod) []*apiv } // CalculateUtilization calculates utilization of a node, defined as maximum of (cpu, memory) utilization. -// Per resource utilization is the sum of requests for it divided by allocatable. -func CalculateUtilization(node *apiv1.Node, nodeInfo *schedulercache.NodeInfo) (float64, error) { - cpu, err := calculateUtilizationOfResource(node, nodeInfo, apiv1.ResourceCPU) +// Per resource utilization is the sum of requests for it divided by allocatable. It also returns the individual +// cpu and memory utilization. +func CalculateUtilization(node *apiv1.Node, nodeInfo *schedulercache.NodeInfo, skipDaemonSetPods, skipMirrorPods bool) (utilInfo UtilizationInfo, err error) { + cpu, err := calculateUtilizationOfResource(node, nodeInfo, apiv1.ResourceCPU, skipDaemonSetPods, skipMirrorPods) if err != nil { - return 0, err + return UtilizationInfo{}, err } - mem, err := calculateUtilizationOfResource(node, nodeInfo, apiv1.ResourceMemory) + mem, err := calculateUtilizationOfResource(node, nodeInfo, apiv1.ResourceMemory, skipDaemonSetPods, skipMirrorPods) if err != nil { - return 0, err + return UtilizationInfo{}, err } - return math.Max(cpu, mem), nil + return UtilizationInfo{CpuUtil: cpu, MemUtil: mem, Utilization: math.Max(cpu, mem)}, nil } -func calculateUtilizationOfResource(node *apiv1.Node, nodeInfo *schedulercache.NodeInfo, resourceName apiv1.ResourceName) (float64, error) { +func calculateUtilizationOfResource(node *apiv1.Node, nodeInfo *schedulercache.NodeInfo, resourceName apiv1.ResourceName, skipDaemonSetPods, skipMirrorPods bool) (float64, error) { nodeAllocatable, found := node.Status.Allocatable[resourceName] if !found { return 0, fmt.Errorf("Failed to get %v from %s", resourceName, node.Name) @@ -164,6 +174,14 @@ func calculateUtilizationOfResource(node *apiv1.Node, nodeInfo *schedulercache.N } podsRequest := resource.MustParse("0") for _, pod := range nodeInfo.Pods() { + // factor daemonset pods out of the utilization calculations + if skipDaemonSetPods && isDaemonSet(pod) { + continue + } + // factor mirror pods out of the utilization calculations + if skipMirrorPods && drain.IsMirrorPod(pod) { + continue + } for _, container := range pod.Spec.Containers { if resourceValue, found := container.Resources.Requests[resourceName]; found { podsRequest.Add(resourceValue) @@ -196,7 +214,7 @@ func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node, no // NodeInfo is generated based on pods. It is possible that node is removed from // an api server faster than the pod that were running on them. In such a case // we have to skip this nodeInfo. It should go away pretty soon. - glog.Warningf("No node in nodeInfo %s -> %v", nodename, nodeInfo) + klog.Warningf("No node in nodeInfo %s -> %v", nodename, nodeInfo) return false } err := predicateChecker.CheckPredicates(pod, predicateMeta, nodeInfo) @@ -204,7 +222,7 @@ func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node, no glogx.V(4).UpTo(loggingQuota).Infof("Evaluation %s for %s/%s -> %v", nodename, pod.Namespace, pod.Name, err.VerboseError()) } else { // TODO(mwielgus): Optimize it. - glog.V(4).Infof("Pod %s/%s can be moved to %s", pod.Namespace, pod.Name, nodename) + klog.V(4).Infof("Pod %s/%s can be moved to %s", pod.Namespace, pod.Name, nodename) podsOnNode := nodeInfo.Pods() podsOnNode = append(podsOnNode, pod) newNodeInfo := schedulercache.NewNodeInfo(podsOnNode...) @@ -232,7 +250,7 @@ func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node, no predicateMeta := predicateChecker.GetPredicateMetadata(pod, newNodeInfos) loggingQuota.Reset() - glog.V(5).Infof("Looking for place for %s/%s", pod.Namespace, pod.Name) + klog.V(5).Infof("Looking for place for %s/%s", pod.Namespace, pod.Name) hintedNode, hasHint := oldHints[podKey(pod)] if hasHint { @@ -274,3 +292,12 @@ func shuffleNodes(nodes []*apiv1.Node) []*apiv1.Node { } return result } + +func isDaemonSet(pod *apiv1.Pod) bool { + for _, ownerReference := range pod.ObjectMeta.OwnerReferences { + if ownerReference.Kind == "DaemonSet" { + return true + } + } + return false +} diff --git a/cluster-autoscaler/simulator/cluster_test.go b/cluster-autoscaler/simulator/cluster_test.go index d294fb4e8c09..2897a94a0de3 100644 --- a/cluster-autoscaler/simulator/cluster_test.go +++ b/cluster-autoscaler/simulator/cluster_test.go @@ -38,14 +38,42 @@ func TestUtilization(t *testing.T) { node := BuildTestNode("node1", 2000, 2000000) SetNodeReadyState(node, true, time.Time{}) - utilization, err := CalculateUtilization(node, nodeInfo) + utilInfo, err := CalculateUtilization(node, nodeInfo, false, false) assert.NoError(t, err) - assert.InEpsilon(t, 2.0/10, utilization, 0.01) + assert.InEpsilon(t, 2.0/10, utilInfo.Utilization, 0.01) node2 := BuildTestNode("node1", 2000, -1) - _, err = CalculateUtilization(node2, nodeInfo) + _, err = CalculateUtilization(node2, nodeInfo, false, false) assert.Error(t, err) + + daemonSetPod3 := BuildTestPod("p3", 100, 200000) + daemonSetPod3.OwnerReferences = GenerateOwnerReferences("ds", "DaemonSet", "apps/v1", "") + + nodeInfo = schedulercache.NewNodeInfo(pod, pod, pod2, daemonSetPod3) + utilInfo, err = CalculateUtilization(node, nodeInfo, true, false) + assert.NoError(t, err) + assert.InEpsilon(t, 2.0/10, utilInfo.Utilization, 0.01) + + nodeInfo = schedulercache.NewNodeInfo(pod, pod2, daemonSetPod3) + utilInfo, err = CalculateUtilization(node, nodeInfo, false, false) + assert.NoError(t, err) + assert.InEpsilon(t, 2.0/10, utilInfo.Utilization, 0.01) + + mirrorPod4 := BuildTestPod("p4", 100, 200000) + mirrorPod4.Annotations = map[string]string{ + types.ConfigMirrorAnnotationKey: "", + } + + nodeInfo = schedulercache.NewNodeInfo(pod, pod, pod2, mirrorPod4) + utilInfo, err = CalculateUtilization(node, nodeInfo, false, true) + assert.NoError(t, err) + assert.InEpsilon(t, 2.0/10, utilInfo.Utilization, 0.01) + + nodeInfo = schedulercache.NewNodeInfo(pod, pod2, mirrorPod4) + utilInfo, err = CalculateUtilization(node, nodeInfo, false, false) + assert.NoError(t, err) + assert.InEpsilon(t, 2.0/10, utilInfo.Utilization, 0.01) } func TestFindPlaceAllOk(t *testing.T) { diff --git a/cluster-autoscaler/simulator/drain.go b/cluster-autoscaler/simulator/drain.go index c1c517078d62..ced7c256e3a8 100644 --- a/cluster-autoscaler/simulator/drain.go +++ b/cluster-autoscaler/simulator/drain.go @@ -94,7 +94,7 @@ func checkPdbs(pods []*apiv1.Pod, pdbs []*policyv1.PodDisruptionBudget) error { for _, pod := range pods { if pod.Namespace == pdb.Namespace && selector.Matches(labels.Set(pod.Labels)) { if pdb.Status.PodDisruptionsAllowed < 1 { - return fmt.Errorf("no enough pod disruption budget to move %s/%s", pod.Namespace, pod.Name) + return fmt.Errorf("not enough pod disruption budget to move %s/%s", pod.Namespace, pod.Name) } } } diff --git a/cluster-autoscaler/simulator/nodes.go b/cluster-autoscaler/simulator/nodes.go index 8d0a9e78a6b8..3df4ee840a05 100644 --- a/cluster-autoscaler/simulator/nodes.go +++ b/cluster-autoscaler/simulator/nodes.go @@ -48,7 +48,7 @@ func GetRequiredPodsForNode(nodename string, client kube_client.Interface) ([]*a podsToRemoveList, err := drain.GetPodsForDeletionOnNodeDrain( allPods, []*policyv1.PodDisruptionBudget{}, // PDBs are irrelevant when considering new node. - true, // Force all removals. + true, // Force all removals. false, false, false, // Setting this to true requires client to be not-null. diff --git a/cluster-autoscaler/simulator/predicates.go b/cluster-autoscaler/simulator/predicates.go index ca6fa9c37e52..083e94e2fa8f 100644 --- a/cluster-autoscaler/simulator/predicates.go +++ b/cluster-autoscaler/simulator/predicates.go @@ -30,9 +30,9 @@ import ( "k8s.io/kubernetes/pkg/scheduler/factory" // We need to import provider to initialize default scheduler. - _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" + "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -56,6 +56,15 @@ type PredicateChecker struct { // There are no const arrays in Go, this is meant to be used as a const. var priorityPredicates = []string{"PodFitsResources", "GeneralPredicates", "PodToleratesNodeTaints"} +func init() { + // This results in filtering out some predicate functions registered by defaults.init() method. + // In scheduler this method is run from app.runCommand(). + // We also need to call it in CA to have simulation behaviour consistent with scheduler. + // Note: the logic of method is conditional and depends on feature gates enabled. To have same + // behaviour in CA and scheduler both need to be run with same set of feature gates. + algorithmprovider.ApplyFeatureGates() +} + // NewPredicateChecker builds PredicateChecker. func NewPredicateChecker(kubeClient kube_client.Interface, stop <-chan struct{}) (*PredicateChecker, error) { provider, err := factory.GetAlgorithmProvider(factory.DefaultProvider) @@ -110,7 +119,7 @@ func NewPredicateChecker(kubeClient kube_client.Interface, stop <-chan struct{}) } for _, predInfo := range predicateList { - glog.V(1).Infof("Using predicate %s", predInfo.name) + klog.V(1).Infof("Using predicate %s", predInfo.name) } // TODO: Verify that run is not needed anymore. @@ -222,11 +231,12 @@ func (pe *PredicateError) VerboseError() string { } // NewPredicateError creates a new predicate error from error and reasons. -func NewPredicateError(name string, err error, reasons []string) *PredicateError { +func NewPredicateError(name string, err error, reasons []string, originalReasons []algorithm.PredicateFailureReason) *PredicateError { return &PredicateError{ - predicateName: name, - err: err, - reasons: reasons, + predicateName: name, + err: err, + reasons: reasons, + failureReasons: originalReasons, } } @@ -242,6 +252,11 @@ func (pe *PredicateError) Reasons() []string { return pe.reasons } +// OriginalReasons returns original failure reasons from failed predicate as a slice of PredicateFailureReason. +func (pe *PredicateError) OriginalReasons() []algorithm.PredicateFailureReason { + return pe.failureReasons +} + // PredicateName returns the name of failed predicate. func (pe *PredicateError) PredicateName() string { return pe.predicateName diff --git a/cluster-autoscaler/utils/backoff/backoff.go b/cluster-autoscaler/utils/backoff/backoff.go index fad0589a2732..076efecf3400 100644 --- a/cluster-autoscaler/utils/backoff/backoff.go +++ b/cluster-autoscaler/utils/backoff/backoff.go @@ -18,66 +18,18 @@ package backoff import ( "time" -) - -type backoffInfo struct { - duration time.Duration - backoffUntil time.Time - lastFailedExecution time.Time -} - -// Backoff handles backing off executions. -type Backoff struct { - maxBackoffDuration time.Duration - initialBackoffDuration time.Duration - backoffResetTimeout time.Duration - backoffInfo map[string]backoffInfo -} - -// NewBackoff creates an instance of Backoff. -func NewBackoff(initialBackoffDuration time.Duration, maxBackoffDuration time.Duration, backoffResetTimeout time.Duration) *Backoff { - return &Backoff{maxBackoffDuration, initialBackoffDuration, backoffResetTimeout, make(map[string]backoffInfo)} -} -// RemoveStaleBackoffData removes stale backoff data. -func (b *Backoff) RemoveStaleBackoffData(currentTime time.Time) { - for key, backoffInfo := range b.backoffInfo { - if backoffInfo.lastFailedExecution.Add(b.backoffResetTimeout).Before(currentTime) { - delete(b.backoffInfo, key) - } - } -} - -// Backoff execution for the given key. Returns time till execution is backed off. -func (b *Backoff) Backoff(key string, currentTime time.Time) time.Time { - duration := b.initialBackoffDuration - if backoffInfo, found := b.backoffInfo[key]; found { - // Multiple concurrent scale-ups failing shouldn't cause backoff - // duration to increase, so we only increase it if we're not in - // backoff right now. - if backoffInfo.backoffUntil.Before(currentTime) { - duration = 2 * backoffInfo.duration - if duration > b.maxBackoffDuration { - duration = b.maxBackoffDuration - } - } - } - backoffUntil := currentTime.Add(duration) - b.backoffInfo[key] = backoffInfo{ - duration: duration, - backoffUntil: backoffUntil, - lastFailedExecution: currentTime, - } - return backoffUntil -} - -// RemoveBackoff removes backoff data for the given key. -func (b *Backoff) RemoveBackoff(key string) { - delete(b.backoffInfo, key) -} + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" +) -// IsBackedOff returns true if execution is backed off for the given key. -func (b *Backoff) IsBackedOff(key string, currentTime time.Time) bool { - backoffInfo, found := b.backoffInfo[key] - return found && backoffInfo.backoffUntil.After(currentTime) +// Backoff allows time-based backing off of node groups considered in scale up algorithm +type Backoff interface { + // Backoff execution for the given node group. Returns time till execution is backed off. + Backoff(nodeGroup cloudprovider.NodeGroup, currentTime time.Time) time.Time + // IsBackedOff returns true if execution is backed off for the given node group. + IsBackedOff(nodeGroup cloudprovider.NodeGroup, currentTime time.Time) bool + // RemoveBackoff removes backoff data for the given node group. + RemoveBackoff(nodeGroup cloudprovider.NodeGroup) + // RemoveStaleBackoffData removes stale backoff data. + RemoveStaleBackoffData(currentTime time.Time) } diff --git a/cluster-autoscaler/utils/backoff/backoff_test.go b/cluster-autoscaler/utils/backoff/backoff_test.go deleted file mode 100644 index cbaa60286fca..000000000000 --- a/cluster-autoscaler/utils/backoff/backoff_test.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package backoff - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestBackoffTwoKeys(t *testing.T) { - backoff := NewBackoff(10*time.Minute, time.Hour, 3*time.Hour) - startTime := time.Now() - assert.False(t, backoff.IsBackedOff("key1", startTime)) - assert.False(t, backoff.IsBackedOff("key2", startTime)) - backoff.Backoff("key1", startTime.Add(time.Minute)) - assert.True(t, backoff.IsBackedOff("key1", startTime.Add(2*time.Minute))) - assert.False(t, backoff.IsBackedOff("key2", startTime)) - assert.False(t, backoff.IsBackedOff("key1", startTime.Add(11*time.Minute))) -} - -func TestMaxBackoff(t *testing.T) { - backoff := NewBackoff(1*time.Minute, 3*time.Minute, 3*time.Hour) - startTime := time.Now() - backoff.Backoff("key1", startTime) - assert.True(t, backoff.IsBackedOff("key1", startTime)) - assert.False(t, backoff.IsBackedOff("key1", startTime.Add(1*time.Minute))) - backoff.Backoff("key1", startTime.Add(1*time.Minute)) - assert.True(t, backoff.IsBackedOff("key1", startTime.Add(1*time.Minute))) - assert.False(t, backoff.IsBackedOff("key1", startTime.Add(3*time.Minute))) - backoff.Backoff("key1", startTime.Add(3*time.Minute)) - assert.True(t, backoff.IsBackedOff("key1", startTime.Add(3*time.Minute))) - assert.False(t, backoff.IsBackedOff("key1", startTime.Add(6*time.Minute))) -} - -func TestRemoveBackoff(t *testing.T) { - backoff := NewBackoff(1*time.Minute, 3*time.Minute, 3*time.Hour) - startTime := time.Now() - backoff.Backoff("key1", startTime) - assert.True(t, backoff.IsBackedOff("key1", startTime)) - backoff.RemoveBackoff("key1") - assert.False(t, backoff.IsBackedOff("key1", startTime)) -} - -func TestResetStaleBackoffData(t *testing.T) { - backoff := NewBackoff(1*time.Minute, 3*time.Minute, 3*time.Hour) - startTime := time.Now() - backoff.Backoff("key1", startTime) - backoff.Backoff("key2", startTime.Add(time.Hour)) - backoff.RemoveStaleBackoffData(startTime.Add(time.Hour)) - assert.Equal(t, 2, len(backoff.backoffInfo)) - backoff.RemoveStaleBackoffData(startTime.Add(4 * time.Hour)) - assert.Equal(t, 1, len(backoff.backoffInfo)) - backoff.RemoveStaleBackoffData(startTime.Add(5 * time.Hour)) - assert.Equal(t, 0, len(backoff.backoffInfo)) -} diff --git a/cluster-autoscaler/utils/backoff/exponential_backoff.go b/cluster-autoscaler/utils/backoff/exponential_backoff.go new file mode 100644 index 000000000000..5ee0e6ba3dae --- /dev/null +++ b/cluster-autoscaler/utils/backoff/exponential_backoff.go @@ -0,0 +1,108 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backoff + +import ( + "time" + + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" +) + +// Backoff handles backing off executions. +type exponentialBackoff struct { + maxBackoffDuration time.Duration + initialBackoffDuration time.Duration + backoffResetTimeout time.Duration + backoffInfo map[string]exponentialBackoffInfo + nodeGroupKey func(nodeGroup cloudprovider.NodeGroup) string +} + +type exponentialBackoffInfo struct { + duration time.Duration + backoffUntil time.Time + lastFailedExecution time.Time +} + +// NewExponentialBackoff creates an instance of exponential backoff. +func NewExponentialBackoff( + initialBackoffDuration time.Duration, + maxBackoffDuration time.Duration, + backoffResetTimeout time.Duration, + nodeGroupKey func(nodeGroup cloudprovider.NodeGroup) string) Backoff { + return &exponentialBackoff{ + maxBackoffDuration: maxBackoffDuration, + initialBackoffDuration: initialBackoffDuration, + backoffResetTimeout: backoffResetTimeout, + backoffInfo: make(map[string]exponentialBackoffInfo), + nodeGroupKey: nodeGroupKey, + } +} + +// NewIdBasedExponentialBackoff creates an instance of exponential backoff with node group Id used as a key. +func NewIdBasedExponentialBackoff(initialBackoffDuration time.Duration, maxBackoffDuration time.Duration, backoffResetTimeout time.Duration) Backoff { + return NewExponentialBackoff( + initialBackoffDuration, + maxBackoffDuration, + backoffResetTimeout, + func(nodeGroup cloudprovider.NodeGroup) string { + return nodeGroup.Id() + }) +} + +// Backoff execution for the given node group. Returns time till execution is backed off. +func (b *exponentialBackoff) Backoff(nodeGroup cloudprovider.NodeGroup, currentTime time.Time) time.Time { + duration := b.initialBackoffDuration + key := b.nodeGroupKey(nodeGroup) + if backoffInfo, found := b.backoffInfo[key]; found { + // Multiple concurrent scale-ups failing shouldn't cause backoff + // duration to increase, so we only increase it if we're not in + // backoff right now. + if backoffInfo.backoffUntil.Before(currentTime) { + duration = 2 * backoffInfo.duration + if duration > b.maxBackoffDuration { + duration = b.maxBackoffDuration + } + } + } + backoffUntil := currentTime.Add(duration) + b.backoffInfo[key] = exponentialBackoffInfo{ + duration: duration, + backoffUntil: backoffUntil, + lastFailedExecution: currentTime, + } + return backoffUntil +} + +// IsBackedOff returns true if execution is backed off for the given node group. +func (b *exponentialBackoff) IsBackedOff(nodeGroup cloudprovider.NodeGroup, currentTime time.Time) bool { + backoffInfo, found := b.backoffInfo[b.nodeGroupKey(nodeGroup)] + return found && backoffInfo.backoffUntil.After(currentTime) +} + +// RemoveBackoff removes backoff data for the given node group. +func (b *exponentialBackoff) RemoveBackoff(nodeGroup cloudprovider.NodeGroup) { + delete(b.backoffInfo, b.nodeGroupKey(nodeGroup)) +} + +// RemoveStaleBackoffData removes stale backoff data. +func (b *exponentialBackoff) RemoveStaleBackoffData(currentTime time.Time) { + for key, backoffInfo := range b.backoffInfo { + if backoffInfo.lastFailedExecution.Add(b.backoffResetTimeout).Before(currentTime) { + delete(b.backoffInfo, key) + } + } +} diff --git a/cluster-autoscaler/utils/backoff/exponential_backoff_test.go b/cluster-autoscaler/utils/backoff/exponential_backoff_test.go new file mode 100644 index 000000000000..ea65b83de2d1 --- /dev/null +++ b/cluster-autoscaler/utils/backoff/exponential_backoff_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backoff + +import ( + "testing" + "time" + + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" + + "github.com/stretchr/testify/assert" +) + +func nodeGroup(id string) cloudprovider.NodeGroup { + provider := testprovider.NewTestCloudProvider(nil, nil) + provider.AddNodeGroup(id, 1, 10, 1) + return provider.GetNodeGroup(id) +} + +var nodeGroup1 = nodeGroup("id1") +var nodeGroup2 = nodeGroup("id2") + +func TestBackoffTwoKeys(t *testing.T) { + backoff := NewIdBasedExponentialBackoff(10*time.Minute, time.Hour, 3*time.Hour) + startTime := time.Now() + assert.False(t, backoff.IsBackedOff(nodeGroup1, startTime)) + assert.False(t, backoff.IsBackedOff(nodeGroup2, startTime)) + backoff.Backoff(nodeGroup1, startTime.Add(time.Minute)) + assert.True(t, backoff.IsBackedOff(nodeGroup1, startTime.Add(2*time.Minute))) + assert.False(t, backoff.IsBackedOff(nodeGroup2, startTime)) + assert.False(t, backoff.IsBackedOff(nodeGroup1, startTime.Add(11*time.Minute))) +} + +func TestMaxBackoff(t *testing.T) { + backoff := NewIdBasedExponentialBackoff(1*time.Minute, 3*time.Minute, 3*time.Hour) + startTime := time.Now() + backoff.Backoff(nodeGroup1, startTime) + assert.True(t, backoff.IsBackedOff(nodeGroup1, startTime)) + assert.False(t, backoff.IsBackedOff(nodeGroup1, startTime.Add(1*time.Minute))) + backoff.Backoff(nodeGroup1, startTime.Add(1*time.Minute)) + assert.True(t, backoff.IsBackedOff(nodeGroup1, startTime.Add(1*time.Minute))) + assert.False(t, backoff.IsBackedOff(nodeGroup1, startTime.Add(3*time.Minute))) + backoff.Backoff(nodeGroup1, startTime.Add(3*time.Minute)) + assert.True(t, backoff.IsBackedOff(nodeGroup1, startTime.Add(3*time.Minute))) + assert.False(t, backoff.IsBackedOff(nodeGroup1, startTime.Add(6*time.Minute))) +} + +func TestRemoveBackoff(t *testing.T) { + backoff := NewIdBasedExponentialBackoff(1*time.Minute, 3*time.Minute, 3*time.Hour) + startTime := time.Now() + backoff.Backoff(nodeGroup1, startTime) + assert.True(t, backoff.IsBackedOff(nodeGroup1, startTime)) + backoff.RemoveBackoff(nodeGroup1) + assert.False(t, backoff.IsBackedOff(nodeGroup1, startTime)) +} + +func TestResetStaleBackoffData(t *testing.T) { + backoff := NewIdBasedExponentialBackoff(1*time.Minute, 3*time.Minute, 3*time.Hour) + startTime := time.Now() + backoff.Backoff(nodeGroup1, startTime) + backoff.Backoff(nodeGroup2, startTime.Add(time.Hour)) + backoff.RemoveStaleBackoffData(startTime.Add(time.Hour)) + assert.Equal(t, 2, len(backoff.(*exponentialBackoff).backoffInfo)) + backoff.RemoveStaleBackoffData(startTime.Add(4 * time.Hour)) + assert.Equal(t, 1, len(backoff.(*exponentialBackoff).backoffInfo)) + backoff.RemoveStaleBackoffData(startTime.Add(5 * time.Hour)) + assert.Equal(t, 0, len(backoff.(*exponentialBackoff).backoffInfo)) +} diff --git a/cluster-autoscaler/utils/deletetaint/delete.go b/cluster-autoscaler/utils/deletetaint/delete.go index f17ccda0a7f6..3fc87f11f037 100644 --- a/cluster-autoscaler/utils/deletetaint/delete.go +++ b/cluster-autoscaler/utils/deletetaint/delete.go @@ -22,42 +22,54 @@ import ( "time" apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kube_client "k8s.io/client-go/kubernetes" - "github.com/golang/glog" + "k8s.io/klog" ) const ( // ToBeDeletedTaint is a taint used to make the node unschedulable. ToBeDeletedTaint = "ToBeDeletedByClusterAutoscaler" + + maxRetryDeadline = 5 * time.Second + conflictRetryInterval = 750 * time.Millisecond ) // MarkToBeDeleted sets a taint that makes the node unschedulable. func MarkToBeDeleted(node *apiv1.Node, client kube_client.Interface) error { - // Get the newest version of the node. - freshNode, err := client.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) - if err != nil || freshNode == nil { - return fmt.Errorf("failed to get node %v: %v", node.Name, err) - } + retryDeadline := time.Now().Add(maxRetryDeadline) + for { + // Get the newest version of the node. + freshNode, err := client.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + if err != nil || freshNode == nil { + return fmt.Errorf("failed to get node %v: %v", node.Name, err) + } - added, err := addToBeDeletedTaint(freshNode) - if added == false { - return err - } - _, err = client.CoreV1().Nodes().Update(freshNode) - if err != nil { - glog.Warningf("Error while adding taints on node %v: %v", node.Name, err) - return err + added, err := addToBeDeletedTaint(freshNode) + if added == false { + return err + } + _, err = client.CoreV1().Nodes().Update(freshNode) + if err != nil && errors.IsConflict(err) && time.Now().Before(retryDeadline) { + time.Sleep(conflictRetryInterval) + continue + } + + if err != nil { + klog.Warningf("Error while adding taints on node %v: %v", node.Name, err) + return err + } + klog.V(1).Infof("Successfully added toBeDeletedTaint on node %v", node.Name) + return nil } - glog.V(1).Infof("Successfully added toBeDeletedTaint on node %v", node.Name) - return nil } func addToBeDeletedTaint(node *apiv1.Node) (bool, error) { for _, taint := range node.Spec.Taints { if taint.Key == ToBeDeletedTaint { - glog.V(2).Infof("ToBeDeletedTaint already present on node %v, taint: %v", node.Name, taint) + klog.V(2).Infof("ToBeDeletedTaint already present on node %v, taint: %v", node.Name, taint) return false, nil } } @@ -96,28 +108,37 @@ func GetToBeDeletedTime(node *apiv1.Node) (*time.Time, error) { // CleanToBeDeleted cleans ToBeDeleted taint. func CleanToBeDeleted(node *apiv1.Node, client kube_client.Interface) (bool, error) { - freshNode, err := client.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) - if err != nil || freshNode == nil { - return false, fmt.Errorf("failed to get node %v: %v", node.Name, err) - } - newTaints := make([]apiv1.Taint, 0) - for _, taint := range freshNode.Spec.Taints { - if taint.Key == ToBeDeletedTaint { - glog.V(1).Infof("Releasing taint %+v on node %v", taint, node.Name) - } else { - newTaints = append(newTaints, taint) + retryDeadline := time.Now().Add(maxRetryDeadline) + for { + freshNode, err := client.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + if err != nil || freshNode == nil { + return false, fmt.Errorf("failed to get node %v: %v", node.Name, err) + } + newTaints := make([]apiv1.Taint, 0) + for _, taint := range freshNode.Spec.Taints { + if taint.Key == ToBeDeletedTaint { + klog.V(1).Infof("Releasing taint %+v on node %v", taint, node.Name) + } else { + newTaints = append(newTaints, taint) + } } - } - if len(newTaints) != len(freshNode.Spec.Taints) { - freshNode.Spec.Taints = newTaints - _, err := client.CoreV1().Nodes().Update(freshNode) - if err != nil { - glog.Warningf("Error while releasing taints on node %v: %v", node.Name, err) - return false, err + if len(newTaints) != len(freshNode.Spec.Taints) { + freshNode.Spec.Taints = newTaints + _, err := client.CoreV1().Nodes().Update(freshNode) + + if err != nil && errors.IsConflict(err) && time.Now().Before(retryDeadline) { + time.Sleep(conflictRetryInterval) + continue + } + + if err != nil { + klog.Warningf("Error while releasing taints on node %v: %v", node.Name, err) + return false, err + } + klog.V(1).Infof("Successfully released toBeDeletedTaint on node %v", node.Name) + return true, nil } - glog.V(1).Infof("Successfully released toBeDeletedTaint on node %v", node.Name) - return true, nil + return false, nil } - return false, nil } diff --git a/cluster-autoscaler/utils/deletetaint/delete_test.go b/cluster-autoscaler/utils/deletetaint/delete_test.go index 1ad25bb8f3ba..8763d71de9bc 100644 --- a/cluster-autoscaler/utils/deletetaint/delete_test.go +++ b/cluster-autoscaler/utils/deletetaint/delete_test.go @@ -17,6 +17,8 @@ limitations under the License. package deletetaint import ( + "fmt" + "sync/atomic" "testing" "time" @@ -24,6 +26,7 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" @@ -33,22 +36,26 @@ import ( func TestMarkNodes(t *testing.T) { node := BuildTestNode("node", 1000, 1000) - fakeClient, updatedNodes := buildFakeClientAndUpdateChannel(node) + fakeClient := buildFakeClient(t, node) err := MarkToBeDeleted(node, fakeClient) assert.NoError(t, err) - assert.Equal(t, node.Name, getStringFromChan(updatedNodes)) - assert.True(t, HasToBeDeletedTaint(node)) + + updatedNode, err := fakeClient.Core().Nodes().Get("node", metav1.GetOptions{}) + assert.NoError(t, err) + assert.True(t, HasToBeDeletedTaint(updatedNode)) } func TestCheckNodes(t *testing.T) { node := BuildTestNode("node", 1000, 1000) - fakeClient, updatedNodes := buildFakeClientAndUpdateChannel(node) + fakeClient := buildFakeClient(t, node) err := MarkToBeDeleted(node, fakeClient) assert.NoError(t, err) - assert.Equal(t, node.Name, getStringFromChan(updatedNodes)) - assert.True(t, HasToBeDeletedTaint(node)) - val, err := GetToBeDeletedTime(node) + updatedNode, err := fakeClient.Core().Nodes().Get("node", metav1.GetOptions{}) + assert.NoError(t, err) + assert.True(t, HasToBeDeletedTaint(updatedNode)) + + val, err := GetToBeDeletedTime(updatedNode) assert.NoError(t, err) assert.True(t, time.Now().Sub(*val) < 10*time.Second) } @@ -56,39 +63,39 @@ func TestCheckNodes(t *testing.T) { func TestCleanNodes(t *testing.T) { node := BuildTestNode("node", 1000, 1000) addToBeDeletedTaint(node) - fakeClient, updatedNodes := buildFakeClientAndUpdateChannel(node) + fakeClient := buildFakeClient(t, node) cleaned, err := CleanToBeDeleted(node, fakeClient) assert.True(t, cleaned) assert.NoError(t, err) - assert.Equal(t, node.Name, getStringFromChan(updatedNodes)) - assert.False(t, HasToBeDeletedTaint(node)) + + updatedNode, err := fakeClient.Core().Nodes().Get("node", metav1.GetOptions{}) + assert.NoError(t, err) + assert.False(t, HasToBeDeletedTaint(updatedNode)) } -func buildFakeClientAndUpdateChannel(node *apiv1.Node) (*fake.Clientset, chan string) { - fakeClient := &fake.Clientset{} - updatedNodes := make(chan string, 10) - fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { - get := action.(core.GetAction) - if get.GetName() == node.Name { - return true, node, nil - } - return true, nil, errors.NewNotFound(apiv1.Resource("node"), get.GetName()) - }) - fakeClient.Fake.AddReactor("update", "nodes", func(action core.Action) (bool, runtime.Object, error) { +func buildFakeClient(t *testing.T, node *apiv1.Node) *fake.Clientset { + fakeClient := fake.NewSimpleClientset() + + _, err := fakeClient.CoreV1().Nodes().Create(node) + assert.NoError(t, err) + + // return a 'Conflict' error on the first upadte, then pass it through, then return a Conflict again + var returnedConflict int32 + fakeClient.Fake.PrependReactor("update", "nodes", func(action core.Action) (bool, runtime.Object, error) { update := action.(core.UpdateAction) obj := update.GetObject().(*apiv1.Node) - updatedNodes <- obj.Name - return true, obj, nil + + if atomic.LoadInt32(&returnedConflict) == 0 { + // allow the next update + atomic.StoreInt32(&returnedConflict, 1) + return true, nil, errors.NewConflict(apiv1.Resource("node"), obj.GetName(), fmt.Errorf("concurrent update on %s", obj.GetName())) + } + + // return a conflict on next update + atomic.StoreInt32(&returnedConflict, 0) + return false, nil, nil }) - return fakeClient, updatedNodes -} -func getStringFromChan(c chan string) string { - select { - case val := <-c: - return val - case <-time.After(time.Second * 10): - return "Nothing returned" - } + return fakeClient } diff --git a/cluster-autoscaler/utils/glogx/defaults.go b/cluster-autoscaler/utils/glogx/defaults.go index a5202c64df56..2ba8d2282704 100644 --- a/cluster-autoscaler/utils/glogx/defaults.go +++ b/cluster-autoscaler/utils/glogx/defaults.go @@ -16,7 +16,7 @@ limitations under the License. package glogx -import "github.com/golang/glog" +import "k8s.io/klog" const ( // MaxPodsLogged is the maximum number of pods for which we will @@ -29,7 +29,7 @@ const ( // PodsLoggingQuota returns a new quota with default limit for pods at current verbosity. func PodsLoggingQuota() *quota { - if glog.V(5) { + if klog.V(5) { return NewLoggingQuota(MaxPodsLoggedV5) } return NewLoggingQuota(MaxPodsLogged) diff --git a/cluster-autoscaler/utils/glogx/glogx.go b/cluster-autoscaler/utils/glogx/glogx.go index c33ba030393d..1b274ca04425 100644 --- a/cluster-autoscaler/utils/glogx/glogx.go +++ b/cluster-autoscaler/utils/glogx/glogx.go @@ -17,7 +17,7 @@ limitations under the License. package glogx import ( - "github.com/golang/glog" + "k8s.io/klog" ) type quota struct { @@ -42,39 +42,39 @@ func (q *quota) Reset() { // UpTo decreases quota for logging and reports whether there was any left. // The returned value is a boolean of type glogx.Verbose. -func UpTo(quota *quota) glog.Verbose { +func UpTo(quota *quota) klog.Verbose { quota.left-- return quota.left >= 0 } // Over reports whether quota for logging was exceeded. // The returned value is a boolean of type glogx.Verbose. -func Over(quota *quota) glog.Verbose { +func Over(quota *quota) klog.Verbose { return quota.left < 0 } // V calls V from glog and wraps the result into glogx.Verbose. -func V(n glog.Level) Verbose { - return Verbose(glog.V(n)) +func V(n klog.Level) Verbose { + return Verbose(klog.V(n)) } -// Verbose is a wrapper for glog.Verbose that implements UpTo and Over. -type Verbose glog.Verbose +// Verbose is a wrapper for klog.Verbose that implements UpTo and Over. +type Verbose klog.Verbose // UpTo calls UpTo from this package if called on true object. -// The returned value is a boolean of type glog.Verbose. -func (v Verbose) UpTo(quota *quota) glog.Verbose { +// The returned value is a boolean of type klog.Verbose. +func (v Verbose) UpTo(quota *quota) klog.Verbose { if v { return UpTo(quota) } - return glog.Verbose(false) + return klog.Verbose(false) } // Over calls Over from this package if called on true object. -// The returned value is a boolean of type glog.Verbose. -func (v Verbose) Over(quota *quota) glog.Verbose { +// The returned value is a boolean of type klog.Verbose. +func (v Verbose) Over(quota *quota) klog.Verbose { if v { return Over(quota) } - return glog.Verbose(false) + return klog.Verbose(false) } diff --git a/cluster-autoscaler/utils/glogx/glogx_test.go b/cluster-autoscaler/utils/glogx/glogx_test.go index c59c6b58c31d..3f952ace307e 100644 --- a/cluster-autoscaler/utils/glogx/glogx_test.go +++ b/cluster-autoscaler/utils/glogx/glogx_test.go @@ -19,8 +19,8 @@ package glogx import ( "testing" - "github.com/golang/glog" "github.com/stretchr/testify/assert" + "k8s.io/klog" ) // Left, UpTo and Over should work as expected. @@ -58,9 +58,9 @@ func TestVFalse(t *testing.T) { assert.Equal(t, 3, q.Left()) } -// Tests that V limits calls based on verbosity the same way as glog.V. +// Tests that V limits calls based on verbosity the same way as klog.V. func TestV(t *testing.T) { - for i := glog.Level(0); i <= 10; i++ { - assert.Equal(t, bool(glog.V(i)), bool(V(i))) + for i := klog.Level(0); i <= 10; i++ { + assert.Equal(t, bool(klog.V(i)), bool(V(i))) } } diff --git a/cluster-autoscaler/utils/gpu/gpu.go b/cluster-autoscaler/utils/gpu/gpu.go index 8f5af2ca1b78..0af015e45d16 100644 --- a/cluster-autoscaler/utils/gpu/gpu.go +++ b/cluster-autoscaler/utils/gpu/gpu.go @@ -21,7 +21,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -74,7 +74,7 @@ func FilterOutNodesWithUnreadyGpus(allNodes, readyNodes []*apiv1.Node) ([]*apiv1 // on node object. Assume the node is still not fully started (installing // GPU drivers). if hasGpuLabel && (!hasGpuAllocatable || gpuAllocatable.IsZero()) { - glog.V(3).Infof("Overriding status of node %v, which seems to have unready GPU", + klog.V(3).Infof("Overriding status of node %v, which seems to have unready GPU", node.Name) nodesWithUnreadyGpu[node.Name] = getUnreadyNodeCopy(node) } else { @@ -119,7 +119,7 @@ func GetGpuTypeForMetrics(node *apiv1.Node, nodeGroup cloudprovider.NodeGroup) s if nodeGroup != nil { template, err := nodeGroup.TemplateNodeInfo() if err != nil { - glog.Warningf("Failed to build template for getting GPU metrics for node %v: %v", node.Name, err) + klog.Warningf("Failed to build template for getting GPU metrics for node %v: %v", node.Name, err) return MetricsErrorGPU } @@ -128,7 +128,7 @@ func GetGpuTypeForMetrics(node *apiv1.Node, nodeGroup cloudprovider.NodeGroup) s } // if template does not define GPUs we assume node will not have any even if it has gpu label - glog.Warningf("Template does not define GPUs even though node from its node group does; node=%v", node.Name) + klog.Warningf("Template does not define GPUs even though node from its node group does; node=%v", node.Name) return MetricsUnexpectedLabelGPU } @@ -213,7 +213,7 @@ func GetNodeTargetGpus(node *apiv1.Node, nodeGroup cloudprovider.NodeGroup) (gpu template, err := nodeGroup.TemplateNodeInfo() if err != nil { - glog.Errorf("Failed to build template for getting GPU estimation for node %v: %v", node.Name, err) + klog.Errorf("Failed to build template for getting GPU estimation for node %v: %v", node.Name, err) return "", 0, errors.ToAutoscalerError(errors.CloudProviderError, err) } if gpuCapacity, found := template.Node().Status.Capacity[ResourceNvidiaGPU]; found { @@ -221,6 +221,6 @@ func GetNodeTargetGpus(node *apiv1.Node, nodeGroup cloudprovider.NodeGroup) (gpu } // if template does not define gpus we assume node will not have any even if ith has gpu label - glog.Warningf("Template does not define gpus even though node from its node group does; node=%v", node.Name) + klog.Warningf("Template does not define gpus even though node from its node group does; node=%v", node.Name) return "", 0, nil } diff --git a/cluster-autoscaler/utils/kubernetes/factory.go b/cluster-autoscaler/utils/kubernetes/factory.go index 11fff07fcff2..2ab478b5f2db 100644 --- a/cluster-autoscaler/utils/kubernetes/factory.go +++ b/cluster-autoscaler/utils/kubernetes/factory.go @@ -24,13 +24,13 @@ import ( v1core "k8s.io/client-go/kubernetes/typed/core/v1" kube_record "k8s.io/client-go/tools/record" - "github.com/golang/glog" + "k8s.io/klog" ) // CreateEventRecorder creates an event recorder to send custom events to Kubernetes to be recorded for targeted Kubernetes objects func CreateEventRecorder(kubeClient clientset.Interface) kube_record.EventRecorder { eventBroadcaster := kube_record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.V(4).Infof) + eventBroadcaster.StartLogging(klog.V(4).Infof) if _, isfake := kubeClient.(*fake.Clientset); !isfake { eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) } diff --git a/cluster-autoscaler/utils/labels/labels_test.go b/cluster-autoscaler/utils/labels/labels_test.go index fd8ac37aab5c..e82b784fa869 100644 --- a/cluster-autoscaler/utils/labels/labels_test.go +++ b/cluster-autoscaler/utils/labels/labels_test.go @@ -74,7 +74,7 @@ func TestBestLabelSet(t *testing.T) { p4 := BuildTestPod("p3", 100, 0) p4.Spec.NodeSelector = map[string]string{ - "A": "X", + "A": "X", "cloud.google.com/gke": "true", } diff --git a/cluster-autoscaler/utils/nodegroupset/node_group_set.go b/cluster-autoscaler/utils/nodegroupset/node_group_set.go deleted file mode 100644 index 27951fb27934..000000000000 --- a/cluster-autoscaler/utils/nodegroupset/node_group_set.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodegroupset - -import ( - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" - "k8s.io/autoscaler/cluster-autoscaler/utils/errors" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - - "github.com/golang/glog" -) - -// FindSimilarNodeGroups returns a list of NodeGroups similar to the given one. -// Two groups are similar if the NodeInfos for them compare equal using IsNodeInfoSimilar. -func FindSimilarNodeGroups(nodeGroup cloudprovider.NodeGroup, cloudProvider cloudprovider.CloudProvider, - nodeInfosForGroups map[string]*schedulercache.NodeInfo) ([]cloudprovider.NodeGroup, errors.AutoscalerError) { - result := []cloudprovider.NodeGroup{} - nodeGroupId := nodeGroup.Id() - nodeInfo, found := nodeInfosForGroups[nodeGroupId] - if !found { - return []cloudprovider.NodeGroup{}, errors.NewAutoscalerError( - errors.InternalError, - "failed to find template node for node group %s", - nodeGroupId) - } - for _, ng := range cloudProvider.NodeGroups() { - ngId := ng.Id() - if ngId == nodeGroupId { - continue - } - ngNodeInfo, found := nodeInfosForGroups[ngId] - if !found { - glog.Warningf("Failed to find nodeInfo for group %v", ngId) - continue - } - if IsNodeInfoSimilar(nodeInfo, ngNodeInfo) { - result = append(result, ng) - } - } - return result, nil -} diff --git a/cluster-autoscaler/utils/nodegroupset/node_group_set_test.go b/cluster-autoscaler/utils/nodegroupset/node_group_set_test.go deleted file mode 100644 index eb4fe272243f..000000000000 --- a/cluster-autoscaler/utils/nodegroupset/node_group_set_test.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodegroupset - -import ( - "testing" - - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" - testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" - . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - - "github.com/stretchr/testify/assert" -) - -func TestFindSimilarNodeGroups(t *testing.T) { - n1 := BuildTestNode("n1", 1000, 1000) - n2 := BuildTestNode("n2", 1000, 1000) - n3 := BuildTestNode("n3", 2000, 2000) - provider := testprovider.NewTestCloudProvider(nil, nil) - provider.AddNodeGroup("ng1", 1, 10, 1) - provider.AddNodeGroup("ng2", 1, 10, 1) - provider.AddNodeGroup("ng3", 1, 10, 1) - provider.AddNode("ng1", n1) - provider.AddNode("ng2", n2) - provider.AddNode("ng3", n3) - - ni1 := schedulercache.NewNodeInfo() - ni1.SetNode(n1) - ni2 := schedulercache.NewNodeInfo() - ni2.SetNode(n2) - ni3 := schedulercache.NewNodeInfo() - ni3.SetNode(n3) - - nodeInfosForGroups := map[string]*schedulercache.NodeInfo{ - "ng1": ni1, "ng2": ni2, "ng3": ni3, - } - - ng1, _ := provider.NodeGroupForNode(n1) - ng2, _ := provider.NodeGroupForNode(n2) - ng3, _ := provider.NodeGroupForNode(n3) - - similar, err := FindSimilarNodeGroups(ng1, provider, nodeInfosForGroups) - assert.NoError(t, err) - assert.Equal(t, similar, []cloudprovider.NodeGroup{ng2}) - - similar, err = FindSimilarNodeGroups(ng2, provider, nodeInfosForGroups) - assert.NoError(t, err) - assert.Equal(t, similar, []cloudprovider.NodeGroup{ng1}) - - similar, err = FindSimilarNodeGroups(ng3, provider, nodeInfosForGroups) - assert.NoError(t, err) - assert.Equal(t, similar, []cloudprovider.NodeGroup{}) -} diff --git a/cluster-autoscaler/utils/nodegroupset/scale_up.go b/cluster-autoscaler/utils/nodegroupset/scale_up.go deleted file mode 100644 index 272fd2316de2..000000000000 --- a/cluster-autoscaler/utils/nodegroupset/scale_up.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodegroupset - -import ( - "fmt" - "sort" - - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" - "k8s.io/autoscaler/cluster-autoscaler/utils/errors" - - "github.com/golang/glog" -) - -// ScaleUpInfo contains information about planned scale-up of a single NodeGroup -type ScaleUpInfo struct { - // Group is the group to be scaled-up - Group cloudprovider.NodeGroup - // CurrentSize is the current size of the Group - CurrentSize int - // NewSize is the size the Group will be scaled-up to - NewSize int - // MaxSize is the maximum allowed size of the Group - MaxSize int -} - -// String is used for printing ScaleUpInfo for logging, etc -func (s ScaleUpInfo) String() string { - return fmt.Sprintf("{%v %v->%v (max: %v)}", s.Group.Id(), s.CurrentSize, s.NewSize, s.MaxSize) -} - -// BalanceScaleUpBetweenGroups distributes a given number of nodes between -// given set of NodeGroups. The nodes are added to smallest group first, trying -// to make the group sizes as evenly balanced as possible. -// -// Returns ScaleUpInfos for groups that need to be resized. -// -// MaxSize of each group will be respected. If newNodes > total free capacity -// of all NodeGroups it will be capped to total capacity. In particular if all -// group already have MaxSize, empty list will be returned. -func BalanceScaleUpBetweenGroups(groups []cloudprovider.NodeGroup, newNodes int) ([]ScaleUpInfo, errors.AutoscalerError) { - if len(groups) == 0 { - return []ScaleUpInfo{}, errors.NewAutoscalerError( - errors.InternalError, "Can't balance scale up between 0 groups") - } - - // get all data from cloudprovider, build data structure - scaleUpInfos := make([]ScaleUpInfo, 0) - totalCapacity := 0 - for _, ng := range groups { - currentSize, err := ng.TargetSize() - if err != nil { - return []ScaleUpInfo{}, errors.NewAutoscalerError( - errors.CloudProviderError, - "failed to get node group size: %v", err) - } - maxSize := ng.MaxSize() - if currentSize == maxSize { - // group already maxed, ignore it - continue - } - info := ScaleUpInfo{ - Group: ng, - CurrentSize: currentSize, - NewSize: currentSize, - MaxSize: maxSize} - scaleUpInfos = append(scaleUpInfos, info) - totalCapacity += maxSize - currentSize - } - if totalCapacity < newNodes { - glog.V(2).Infof("Requested scale-up (%v) exceeds node group set capacity, capping to %v", newNodes, totalCapacity) - newNodes = totalCapacity - } - - // The actual balancing algorithm. - // Sort the node groups by current size and just loop over nodes adding - // to smallest group. If a group hits max size remove it from the list - // (by moving it to start of the list and increasing startIndex). - // - // In each iteration we either allocate one node, or 'remove' a maxed out - // node group, so this will terminate in O(#nodes + #node groups) steps. - // We already know that newNodes <= total capacity, so we don't have to - // worry about accidentally removing all node groups while we still - // have nodes to allocate. - // - // Loop invariants: - // 1. i < startIndex -> scaleUpInfos[i].CurrentSize == scaleUpInfos[i].MaxSize - // 2. i >= startIndex -> scaleUpInfos[i].CurrentSize < scaleUpInfos[i].MaxSize - // 3. startIndex <= currentIndex < len(scaleUpInfos) - // 4. currentIndex <= i < j -> scaleUpInfos[i].CurrentSize <= scaleUpInfos[j].CurrentSize - // 5. startIndex <= i < j < currentIndex -> scaleUpInfos[i].CurrentSize == scaleUpInfos[j].CurrentSize - // 6. startIndex <= i < currentIndex <= j -> scaleUpInfos[i].CurrentSize <= scaleUpInfos[j].CurrentSize + 1 - sort.Slice(scaleUpInfos, func(i, j int) bool { - return scaleUpInfos[i].CurrentSize < scaleUpInfos[j].CurrentSize - }) - startIndex := 0 - currentIndex := 0 - for newNodes > 0 { - currentInfo := &scaleUpInfos[currentIndex] - - if currentInfo.NewSize < currentInfo.MaxSize { - // Add a node to group on currentIndex - currentInfo.NewSize++ - newNodes-- - } else { - // Group on currentIndex is full. Remove it from the array. - // Removing is done by swapping the group with the first - // group still in array and moving the start of the array. - // Every group between startIndex and currentIndex has the - // same size, so we can swap without breaking ordering. - scaleUpInfos[startIndex], scaleUpInfos[currentIndex] = scaleUpInfos[currentIndex], scaleUpInfos[startIndex] - startIndex++ - } - - // Update currentIndex. - // If we removed a group in this loop currentIndex may be equal to startIndex-1, - // in which case both branches of below if will make currentIndex == startIndex. - if currentIndex < len(scaleUpInfos)-1 && currentInfo.NewSize > scaleUpInfos[currentIndex+1].NewSize { - // Next group has exactly one less node, than current one. - // We will increase it in next iteration. - currentIndex++ - } else { - // We reached end of array, or a group larger than the current one. - // All groups from startIndex to currentIndex have the same size. - // So we're moving to the beginning of array to loop over all of - // them once again. - currentIndex = startIndex - } - } - - // Filter out groups that haven't changed size - result := make([]ScaleUpInfo, 0) - for _, info := range scaleUpInfos { - if info.NewSize != info.CurrentSize { - result = append(result, info) - } - } - - return result, nil -} diff --git a/cluster-autoscaler/utils/nodegroupset/scale_up_test.go b/cluster-autoscaler/utils/nodegroupset/scale_up_test.go deleted file mode 100644 index 43eacf8f8b4f..000000000000 --- a/cluster-autoscaler/utils/nodegroupset/scale_up_test.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodegroupset - -import ( - "testing" - - "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" - testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" - - "github.com/stretchr/testify/assert" -) - -func TestBalanceSingleGroup(t *testing.T) { - provider := testprovider.NewTestCloudProvider(nil, nil) - provider.AddNodeGroup("ng1", 1, 10, 1) - - // just one node - scaleUpInfo, err := BalanceScaleUpBetweenGroups(provider.NodeGroups(), 1) - assert.NoError(t, err) - assert.Equal(t, 1, len(scaleUpInfo)) - assert.Equal(t, 2, scaleUpInfo[0].NewSize) - - // multiple nodes - scaleUpInfo, err = BalanceScaleUpBetweenGroups(provider.NodeGroups(), 4) - assert.NoError(t, err) - assert.Equal(t, 1, len(scaleUpInfo)) - assert.Equal(t, 5, scaleUpInfo[0].NewSize) -} - -func TestBalanceUnderMaxSize(t *testing.T) { - provider := testprovider.NewTestCloudProvider(nil, nil) - provider.AddNodeGroup("ng1", 1, 10, 1) - provider.AddNodeGroup("ng2", 1, 10, 3) - provider.AddNodeGroup("ng3", 1, 10, 5) - provider.AddNodeGroup("ng4", 1, 10, 5) - - // add a single node - scaleUpInfo, err := BalanceScaleUpBetweenGroups(provider.NodeGroups(), 1) - assert.NoError(t, err) - assert.Equal(t, 1, len(scaleUpInfo)) - assert.Equal(t, 2, scaleUpInfo[0].NewSize) - - // add multiple nodes to single group - scaleUpInfo, err = BalanceScaleUpBetweenGroups(provider.NodeGroups(), 2) - assert.NoError(t, err) - assert.Equal(t, 1, len(scaleUpInfo)) - assert.Equal(t, 3, scaleUpInfo[0].NewSize) - - // add nodes to groups of different sizes, divisible - scaleUpInfo, err = BalanceScaleUpBetweenGroups(provider.NodeGroups(), 4) - assert.NoError(t, err) - assert.Equal(t, 2, len(scaleUpInfo)) - assert.Equal(t, 4, scaleUpInfo[0].NewSize) - assert.Equal(t, 4, scaleUpInfo[1].NewSize) - assert.True(t, scaleUpInfo[0].Group.Id() == "ng1" || scaleUpInfo[1].Group.Id() == "ng1") - assert.True(t, scaleUpInfo[0].Group.Id() == "ng2" || scaleUpInfo[1].Group.Id() == "ng2") - - // add nodes to groups of different sizes, non-divisible - // we expect new sizes to be 4 and 5, doesn't matter which group gets how many - scaleUpInfo, err = BalanceScaleUpBetweenGroups(provider.NodeGroups(), 5) - assert.NoError(t, err) - assert.Equal(t, 2, len(scaleUpInfo)) - assert.Equal(t, 9, scaleUpInfo[0].NewSize+scaleUpInfo[1].NewSize) - assert.True(t, scaleUpInfo[0].NewSize == 4 || scaleUpInfo[0].NewSize == 5) - assert.True(t, scaleUpInfo[0].Group.Id() == "ng1" || scaleUpInfo[1].Group.Id() == "ng1") - assert.True(t, scaleUpInfo[0].Group.Id() == "ng2" || scaleUpInfo[1].Group.Id() == "ng2") - - // add nodes to all groups, divisible - scaleUpInfo, err = BalanceScaleUpBetweenGroups(provider.NodeGroups(), 10) - assert.NoError(t, err) - assert.Equal(t, 4, len(scaleUpInfo)) - for _, info := range scaleUpInfo { - assert.Equal(t, 6, info.NewSize) - } -} - -func TestBalanceHittingMaxSize(t *testing.T) { - provider := testprovider.NewTestCloudProvider(nil, nil) - provider.AddNodeGroup("ng1", 1, 1, 1) - provider.AddNodeGroup("ng2", 1, 3, 1) - provider.AddNodeGroup("ng3", 1, 10, 3) - provider.AddNodeGroup("ng4", 1, 7, 5) - groupsMap := make(map[string]cloudprovider.NodeGroup) - for _, group := range provider.NodeGroups() { - groupsMap[group.Id()] = group - } - - getGroups := func(names ...string) []cloudprovider.NodeGroup { - result := make([]cloudprovider.NodeGroup, 0) - for _, n := range names { - result = append(result, groupsMap[n]) - } - return result - } - - toMap := func(suiList []ScaleUpInfo) map[string]ScaleUpInfo { - result := make(map[string]ScaleUpInfo, 0) - for _, sui := range suiList { - result[sui.Group.Id()] = sui - } - return result - } - - // Just one maxed out group - scaleUpInfo, err := BalanceScaleUpBetweenGroups(getGroups("ng1"), 1) - assert.NoError(t, err) - assert.Equal(t, 0, len(scaleUpInfo)) - - // Smallest group already maxed out, add one node - scaleUpInfo, err = BalanceScaleUpBetweenGroups(getGroups("ng1", "ng2"), 1) - assert.NoError(t, err) - assert.Equal(t, 1, len(scaleUpInfo)) - assert.Equal(t, "ng2", scaleUpInfo[0].Group.Id()) - assert.Equal(t, 2, scaleUpInfo[0].NewSize) - - // Smallest group already maxed out, too many nodes (should cap to max capacity) - scaleUpInfo, err = BalanceScaleUpBetweenGroups(getGroups("ng1", "ng2"), 5) - assert.NoError(t, err) - assert.Equal(t, 1, len(scaleUpInfo)) - assert.Equal(t, "ng2", scaleUpInfo[0].Group.Id()) - assert.Equal(t, 3, scaleUpInfo[0].NewSize) - - // First group maxes out before proceeding to next one - scaleUpInfo, err = BalanceScaleUpBetweenGroups(getGroups("ng2", "ng3"), 4) - assert.Equal(t, 2, len(scaleUpInfo)) - scaleUpMap := toMap(scaleUpInfo) - assert.Equal(t, 3, scaleUpMap["ng2"].NewSize) - assert.Equal(t, 5, scaleUpMap["ng3"].NewSize) - - // Last group maxes out before previous one - scaleUpInfo, err = BalanceScaleUpBetweenGroups(getGroups("ng2", "ng3", "ng4"), 9) - assert.Equal(t, 3, len(scaleUpInfo)) - scaleUpMap = toMap(scaleUpInfo) - assert.Equal(t, 3, scaleUpMap["ng2"].NewSize) - assert.Equal(t, 8, scaleUpMap["ng3"].NewSize) - assert.Equal(t, 7, scaleUpMap["ng4"].NewSize) - - // Use all capacity, cap to max - scaleUpInfo, err = BalanceScaleUpBetweenGroups(getGroups("ng2", "ng3", "ng4"), 900) - assert.Equal(t, 3, len(scaleUpInfo)) - scaleUpMap = toMap(scaleUpInfo) - assert.Equal(t, 3, scaleUpMap["ng2"].NewSize) - assert.Equal(t, 10, scaleUpMap["ng3"].NewSize) - assert.Equal(t, 7, scaleUpMap["ng4"].NewSize) -} diff --git a/cluster-autoscaler/utils/tpu/tpu_test.go b/cluster-autoscaler/utils/tpu/tpu_test.go index 6446a3bd3f31..e3af7f4467e2 100644 --- a/cluster-autoscaler/utils/tpu/tpu_test.go +++ b/cluster-autoscaler/utils/tpu/tpu_test.go @@ -83,7 +83,7 @@ func TestClearTPURequests(t *testing.T) { expected: podsWithoutTPUs, }, { - desc: "Pods with & without TPU reqest", + desc: "Pods with & without TPU request", pods: mixedPods, expected: sanitizedMixedPods, }, diff --git a/cluster-autoscaler/utils/units/units.go b/cluster-autoscaler/utils/units/units.go index 16a04e183e5c..ec03fcfe2871 100644 --- a/cluster-autoscaler/utils/units/units.go +++ b/cluster-autoscaler/utils/units/units.go @@ -17,6 +17,12 @@ limitations under the License. package units const ( - // Gigabyte is 2^30 bytes. - Gigabyte = 1024 * 1024 * 1024 + // GB - GigaByte size (10^9) + GB = 1000 * 1000 * 1000 + // GiB - GibiByte size (2^30) + GiB = 1024 * 1024 * 1024 + // MB - MegaByte size (10^6) + MB = 1000 * 1000 + // MiB - MebiByte size (2^20) + MiB = 1024 * 1024 ) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/client.go deleted file mode 100644 index 49391ea5dd02..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/client.go +++ /dev/null @@ -1,51 +0,0 @@ -// Package compute implements the Azure ARM Compute service API version 2018-04-01. -// -// Compute Client -package compute - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/go-autorest/autorest" -) - -const ( - // DefaultBaseURI is the default URI used for the service Compute - DefaultBaseURI = "https://management.azure.com" -) - -// BaseClient is the base client for Compute. -type BaseClient struct { - autorest.Client - BaseURI string - SubscriptionID string -} - -// New creates an instance of the BaseClient client. -func New(subscriptionID string) BaseClient { - return NewWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWithBaseURI creates an instance of the BaseClient client. -func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { - return BaseClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: baseURI, - SubscriptionID: subscriptionID, - } -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/models.go deleted file mode 100644 index 166790d037ad..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/models.go +++ /dev/null @@ -1,7818 +0,0 @@ -package compute - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "encoding/json" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/autorest/to" - "net/http" -) - -// AccessLevel enumerates the values for access level. -type AccessLevel string - -const ( - // None ... - None AccessLevel = "None" - // Read ... - Read AccessLevel = "Read" -) - -// PossibleAccessLevelValues returns an array of possible values for the AccessLevel const type. -func PossibleAccessLevelValues() []AccessLevel { - return []AccessLevel{None, Read} -} - -// CachingTypes enumerates the values for caching types. -type CachingTypes string - -const ( - // CachingTypesNone ... - CachingTypesNone CachingTypes = "None" - // CachingTypesReadOnly ... - CachingTypesReadOnly CachingTypes = "ReadOnly" - // CachingTypesReadWrite ... - CachingTypesReadWrite CachingTypes = "ReadWrite" -) - -// PossibleCachingTypesValues returns an array of possible values for the CachingTypes const type. -func PossibleCachingTypesValues() []CachingTypes { - return []CachingTypes{CachingTypesNone, CachingTypesReadOnly, CachingTypesReadWrite} -} - -// ComponentNames enumerates the values for component names. -type ComponentNames string - -const ( - // MicrosoftWindowsShellSetup ... - MicrosoftWindowsShellSetup ComponentNames = "Microsoft-Windows-Shell-Setup" -) - -// PossibleComponentNamesValues returns an array of possible values for the ComponentNames const type. -func PossibleComponentNamesValues() []ComponentNames { - return []ComponentNames{MicrosoftWindowsShellSetup} -} - -// DiskCreateOption enumerates the values for disk create option. -type DiskCreateOption string - -const ( - // Attach ... - Attach DiskCreateOption = "Attach" - // Copy ... - Copy DiskCreateOption = "Copy" - // Empty ... - Empty DiskCreateOption = "Empty" - // FromImage ... - FromImage DiskCreateOption = "FromImage" - // Import ... - Import DiskCreateOption = "Import" - // Restore ... - Restore DiskCreateOption = "Restore" -) - -// PossibleDiskCreateOptionValues returns an array of possible values for the DiskCreateOption const type. -func PossibleDiskCreateOptionValues() []DiskCreateOption { - return []DiskCreateOption{Attach, Copy, Empty, FromImage, Import, Restore} -} - -// DiskCreateOptionTypes enumerates the values for disk create option types. -type DiskCreateOptionTypes string - -const ( - // DiskCreateOptionTypesAttach ... - DiskCreateOptionTypesAttach DiskCreateOptionTypes = "Attach" - // DiskCreateOptionTypesEmpty ... - DiskCreateOptionTypesEmpty DiskCreateOptionTypes = "Empty" - // DiskCreateOptionTypesFromImage ... - DiskCreateOptionTypesFromImage DiskCreateOptionTypes = "FromImage" -) - -// PossibleDiskCreateOptionTypesValues returns an array of possible values for the DiskCreateOptionTypes const type. -func PossibleDiskCreateOptionTypesValues() []DiskCreateOptionTypes { - return []DiskCreateOptionTypes{DiskCreateOptionTypesAttach, DiskCreateOptionTypesEmpty, DiskCreateOptionTypesFromImage} -} - -// InstanceViewTypes enumerates the values for instance view types. -type InstanceViewTypes string - -const ( - // InstanceView ... - InstanceView InstanceViewTypes = "instanceView" -) - -// PossibleInstanceViewTypesValues returns an array of possible values for the InstanceViewTypes const type. -func PossibleInstanceViewTypesValues() []InstanceViewTypes { - return []InstanceViewTypes{InstanceView} -} - -// IntervalInMins enumerates the values for interval in mins. -type IntervalInMins string - -const ( - // FiveMins ... - FiveMins IntervalInMins = "FiveMins" - // SixtyMins ... - SixtyMins IntervalInMins = "SixtyMins" - // ThirtyMins ... - ThirtyMins IntervalInMins = "ThirtyMins" - // ThreeMins ... - ThreeMins IntervalInMins = "ThreeMins" -) - -// PossibleIntervalInMinsValues returns an array of possible values for the IntervalInMins const type. -func PossibleIntervalInMinsValues() []IntervalInMins { - return []IntervalInMins{FiveMins, SixtyMins, ThirtyMins, ThreeMins} -} - -// IPVersion enumerates the values for ip version. -type IPVersion string - -const ( - // IPv4 ... - IPv4 IPVersion = "IPv4" - // IPv6 ... - IPv6 IPVersion = "IPv6" -) - -// PossibleIPVersionValues returns an array of possible values for the IPVersion const type. -func PossibleIPVersionValues() []IPVersion { - return []IPVersion{IPv4, IPv6} -} - -// MaintenanceOperationResultCodeTypes enumerates the values for maintenance operation result code types. -type MaintenanceOperationResultCodeTypes string - -const ( - // MaintenanceOperationResultCodeTypesMaintenanceAborted ... - MaintenanceOperationResultCodeTypesMaintenanceAborted MaintenanceOperationResultCodeTypes = "MaintenanceAborted" - // MaintenanceOperationResultCodeTypesMaintenanceCompleted ... - MaintenanceOperationResultCodeTypesMaintenanceCompleted MaintenanceOperationResultCodeTypes = "MaintenanceCompleted" - // MaintenanceOperationResultCodeTypesNone ... - MaintenanceOperationResultCodeTypesNone MaintenanceOperationResultCodeTypes = "None" - // MaintenanceOperationResultCodeTypesRetryLater ... - MaintenanceOperationResultCodeTypesRetryLater MaintenanceOperationResultCodeTypes = "RetryLater" -) - -// PossibleMaintenanceOperationResultCodeTypesValues returns an array of possible values for the MaintenanceOperationResultCodeTypes const type. -func PossibleMaintenanceOperationResultCodeTypesValues() []MaintenanceOperationResultCodeTypes { - return []MaintenanceOperationResultCodeTypes{MaintenanceOperationResultCodeTypesMaintenanceAborted, MaintenanceOperationResultCodeTypesMaintenanceCompleted, MaintenanceOperationResultCodeTypesNone, MaintenanceOperationResultCodeTypesRetryLater} -} - -// OperatingSystemStateTypes enumerates the values for operating system state types. -type OperatingSystemStateTypes string - -const ( - // Generalized ... - Generalized OperatingSystemStateTypes = "Generalized" - // Specialized ... - Specialized OperatingSystemStateTypes = "Specialized" -) - -// PossibleOperatingSystemStateTypesValues returns an array of possible values for the OperatingSystemStateTypes const type. -func PossibleOperatingSystemStateTypesValues() []OperatingSystemStateTypes { - return []OperatingSystemStateTypes{Generalized, Specialized} -} - -// OperatingSystemTypes enumerates the values for operating system types. -type OperatingSystemTypes string - -const ( - // Linux ... - Linux OperatingSystemTypes = "Linux" - // Windows ... - Windows OperatingSystemTypes = "Windows" -) - -// PossibleOperatingSystemTypesValues returns an array of possible values for the OperatingSystemTypes const type. -func PossibleOperatingSystemTypesValues() []OperatingSystemTypes { - return []OperatingSystemTypes{Linux, Windows} -} - -// PassNames enumerates the values for pass names. -type PassNames string - -const ( - // OobeSystem ... - OobeSystem PassNames = "OobeSystem" -) - -// PossiblePassNamesValues returns an array of possible values for the PassNames const type. -func PossiblePassNamesValues() []PassNames { - return []PassNames{OobeSystem} -} - -// ProtocolTypes enumerates the values for protocol types. -type ProtocolTypes string - -const ( - // HTTP ... - HTTP ProtocolTypes = "Http" - // HTTPS ... - HTTPS ProtocolTypes = "Https" -) - -// PossibleProtocolTypesValues returns an array of possible values for the ProtocolTypes const type. -func PossibleProtocolTypesValues() []ProtocolTypes { - return []ProtocolTypes{HTTP, HTTPS} -} - -// ResourceIdentityType enumerates the values for resource identity type. -type ResourceIdentityType string - -const ( - // ResourceIdentityTypeNone ... - ResourceIdentityTypeNone ResourceIdentityType = "None" - // ResourceIdentityTypeSystemAssigned ... - ResourceIdentityTypeSystemAssigned ResourceIdentityType = "SystemAssigned" - // ResourceIdentityTypeSystemAssignedUserAssigned ... - ResourceIdentityTypeSystemAssignedUserAssigned ResourceIdentityType = "SystemAssigned, UserAssigned" - // ResourceIdentityTypeUserAssigned ... - ResourceIdentityTypeUserAssigned ResourceIdentityType = "UserAssigned" -) - -// PossibleResourceIdentityTypeValues returns an array of possible values for the ResourceIdentityType const type. -func PossibleResourceIdentityTypeValues() []ResourceIdentityType { - return []ResourceIdentityType{ResourceIdentityTypeNone, ResourceIdentityTypeSystemAssigned, ResourceIdentityTypeSystemAssignedUserAssigned, ResourceIdentityTypeUserAssigned} -} - -// RollingUpgradeActionType enumerates the values for rolling upgrade action type. -type RollingUpgradeActionType string - -const ( - // Cancel ... - Cancel RollingUpgradeActionType = "Cancel" - // Start ... - Start RollingUpgradeActionType = "Start" -) - -// PossibleRollingUpgradeActionTypeValues returns an array of possible values for the RollingUpgradeActionType const type. -func PossibleRollingUpgradeActionTypeValues() []RollingUpgradeActionType { - return []RollingUpgradeActionType{Cancel, Start} -} - -// RollingUpgradeStatusCode enumerates the values for rolling upgrade status code. -type RollingUpgradeStatusCode string - -const ( - // Cancelled ... - Cancelled RollingUpgradeStatusCode = "Cancelled" - // Completed ... - Completed RollingUpgradeStatusCode = "Completed" - // Faulted ... - Faulted RollingUpgradeStatusCode = "Faulted" - // RollingForward ... - RollingForward RollingUpgradeStatusCode = "RollingForward" -) - -// PossibleRollingUpgradeStatusCodeValues returns an array of possible values for the RollingUpgradeStatusCode const type. -func PossibleRollingUpgradeStatusCodeValues() []RollingUpgradeStatusCode { - return []RollingUpgradeStatusCode{Cancelled, Completed, Faulted, RollingForward} -} - -// SettingNames enumerates the values for setting names. -type SettingNames string - -const ( - // AutoLogon ... - AutoLogon SettingNames = "AutoLogon" - // FirstLogonCommands ... - FirstLogonCommands SettingNames = "FirstLogonCommands" -) - -// PossibleSettingNamesValues returns an array of possible values for the SettingNames const type. -func PossibleSettingNamesValues() []SettingNames { - return []SettingNames{AutoLogon, FirstLogonCommands} -} - -// SnapshotStorageAccountTypes enumerates the values for snapshot storage account types. -type SnapshotStorageAccountTypes string - -const ( - // PremiumLRS ... - PremiumLRS SnapshotStorageAccountTypes = "Premium_LRS" - // StandardLRS ... - StandardLRS SnapshotStorageAccountTypes = "Standard_LRS" - // StandardZRS ... - StandardZRS SnapshotStorageAccountTypes = "Standard_ZRS" -) - -// PossibleSnapshotStorageAccountTypesValues returns an array of possible values for the SnapshotStorageAccountTypes const type. -func PossibleSnapshotStorageAccountTypesValues() []SnapshotStorageAccountTypes { - return []SnapshotStorageAccountTypes{PremiumLRS, StandardLRS, StandardZRS} -} - -// StatusLevelTypes enumerates the values for status level types. -type StatusLevelTypes string - -const ( - // Error ... - Error StatusLevelTypes = "Error" - // Info ... - Info StatusLevelTypes = "Info" - // Warning ... - Warning StatusLevelTypes = "Warning" -) - -// PossibleStatusLevelTypesValues returns an array of possible values for the StatusLevelTypes const type. -func PossibleStatusLevelTypesValues() []StatusLevelTypes { - return []StatusLevelTypes{Error, Info, Warning} -} - -// StorageAccountTypes enumerates the values for storage account types. -type StorageAccountTypes string - -const ( - // StorageAccountTypesPremiumLRS ... - StorageAccountTypesPremiumLRS StorageAccountTypes = "Premium_LRS" - // StorageAccountTypesStandardLRS ... - StorageAccountTypesStandardLRS StorageAccountTypes = "Standard_LRS" - // StorageAccountTypesStandardSSDLRS ... - StorageAccountTypesStandardSSDLRS StorageAccountTypes = "StandardSSD_LRS" -) - -// PossibleStorageAccountTypesValues returns an array of possible values for the StorageAccountTypes const type. -func PossibleStorageAccountTypesValues() []StorageAccountTypes { - return []StorageAccountTypes{StorageAccountTypesPremiumLRS, StorageAccountTypesStandardLRS, StorageAccountTypesStandardSSDLRS} -} - -// UpgradeMode enumerates the values for upgrade mode. -type UpgradeMode string - -const ( - // Automatic ... - Automatic UpgradeMode = "Automatic" - // Manual ... - Manual UpgradeMode = "Manual" - // Rolling ... - Rolling UpgradeMode = "Rolling" -) - -// PossibleUpgradeModeValues returns an array of possible values for the UpgradeMode const type. -func PossibleUpgradeModeValues() []UpgradeMode { - return []UpgradeMode{Automatic, Manual, Rolling} -} - -// UpgradeOperationInvoker enumerates the values for upgrade operation invoker. -type UpgradeOperationInvoker string - -const ( - // Platform ... - Platform UpgradeOperationInvoker = "Platform" - // Unknown ... - Unknown UpgradeOperationInvoker = "Unknown" - // User ... - User UpgradeOperationInvoker = "User" -) - -// PossibleUpgradeOperationInvokerValues returns an array of possible values for the UpgradeOperationInvoker const type. -func PossibleUpgradeOperationInvokerValues() []UpgradeOperationInvoker { - return []UpgradeOperationInvoker{Platform, Unknown, User} -} - -// UpgradeState enumerates the values for upgrade state. -type UpgradeState string - -const ( - // UpgradeStateCancelled ... - UpgradeStateCancelled UpgradeState = "Cancelled" - // UpgradeStateCompleted ... - UpgradeStateCompleted UpgradeState = "Completed" - // UpgradeStateFaulted ... - UpgradeStateFaulted UpgradeState = "Faulted" - // UpgradeStateRollingForward ... - UpgradeStateRollingForward UpgradeState = "RollingForward" -) - -// PossibleUpgradeStateValues returns an array of possible values for the UpgradeState const type. -func PossibleUpgradeStateValues() []UpgradeState { - return []UpgradeState{UpgradeStateCancelled, UpgradeStateCompleted, UpgradeStateFaulted, UpgradeStateRollingForward} -} - -// VirtualMachineEvictionPolicyTypes enumerates the values for virtual machine eviction policy types. -type VirtualMachineEvictionPolicyTypes string - -const ( - // Deallocate ... - Deallocate VirtualMachineEvictionPolicyTypes = "Deallocate" - // Delete ... - Delete VirtualMachineEvictionPolicyTypes = "Delete" -) - -// PossibleVirtualMachineEvictionPolicyTypesValues returns an array of possible values for the VirtualMachineEvictionPolicyTypes const type. -func PossibleVirtualMachineEvictionPolicyTypesValues() []VirtualMachineEvictionPolicyTypes { - return []VirtualMachineEvictionPolicyTypes{Deallocate, Delete} -} - -// VirtualMachinePriorityTypes enumerates the values for virtual machine priority types. -type VirtualMachinePriorityTypes string - -const ( - // Low ... - Low VirtualMachinePriorityTypes = "Low" - // Regular ... - Regular VirtualMachinePriorityTypes = "Regular" -) - -// PossibleVirtualMachinePriorityTypesValues returns an array of possible values for the VirtualMachinePriorityTypes const type. -func PossibleVirtualMachinePriorityTypesValues() []VirtualMachinePriorityTypes { - return []VirtualMachinePriorityTypes{Low, Regular} -} - -// VirtualMachineScaleSetSkuScaleType enumerates the values for virtual machine scale set sku scale type. -type VirtualMachineScaleSetSkuScaleType string - -const ( - // VirtualMachineScaleSetSkuScaleTypeAutomatic ... - VirtualMachineScaleSetSkuScaleTypeAutomatic VirtualMachineScaleSetSkuScaleType = "Automatic" - // VirtualMachineScaleSetSkuScaleTypeNone ... - VirtualMachineScaleSetSkuScaleTypeNone VirtualMachineScaleSetSkuScaleType = "None" -) - -// PossibleVirtualMachineScaleSetSkuScaleTypeValues returns an array of possible values for the VirtualMachineScaleSetSkuScaleType const type. -func PossibleVirtualMachineScaleSetSkuScaleTypeValues() []VirtualMachineScaleSetSkuScaleType { - return []VirtualMachineScaleSetSkuScaleType{VirtualMachineScaleSetSkuScaleTypeAutomatic, VirtualMachineScaleSetSkuScaleTypeNone} -} - -// VirtualMachineSizeTypes enumerates the values for virtual machine size types. -type VirtualMachineSizeTypes string - -const ( - // BasicA0 ... - BasicA0 VirtualMachineSizeTypes = "Basic_A0" - // BasicA1 ... - BasicA1 VirtualMachineSizeTypes = "Basic_A1" - // BasicA2 ... - BasicA2 VirtualMachineSizeTypes = "Basic_A2" - // BasicA3 ... - BasicA3 VirtualMachineSizeTypes = "Basic_A3" - // BasicA4 ... - BasicA4 VirtualMachineSizeTypes = "Basic_A4" - // StandardA0 ... - StandardA0 VirtualMachineSizeTypes = "Standard_A0" - // StandardA1 ... - StandardA1 VirtualMachineSizeTypes = "Standard_A1" - // StandardA10 ... - StandardA10 VirtualMachineSizeTypes = "Standard_A10" - // StandardA11 ... - StandardA11 VirtualMachineSizeTypes = "Standard_A11" - // StandardA1V2 ... - StandardA1V2 VirtualMachineSizeTypes = "Standard_A1_v2" - // StandardA2 ... - StandardA2 VirtualMachineSizeTypes = "Standard_A2" - // StandardA2mV2 ... - StandardA2mV2 VirtualMachineSizeTypes = "Standard_A2m_v2" - // StandardA2V2 ... - StandardA2V2 VirtualMachineSizeTypes = "Standard_A2_v2" - // StandardA3 ... - StandardA3 VirtualMachineSizeTypes = "Standard_A3" - // StandardA4 ... - StandardA4 VirtualMachineSizeTypes = "Standard_A4" - // StandardA4mV2 ... - StandardA4mV2 VirtualMachineSizeTypes = "Standard_A4m_v2" - // StandardA4V2 ... - StandardA4V2 VirtualMachineSizeTypes = "Standard_A4_v2" - // StandardA5 ... - StandardA5 VirtualMachineSizeTypes = "Standard_A5" - // StandardA6 ... - StandardA6 VirtualMachineSizeTypes = "Standard_A6" - // StandardA7 ... - StandardA7 VirtualMachineSizeTypes = "Standard_A7" - // StandardA8 ... - StandardA8 VirtualMachineSizeTypes = "Standard_A8" - // StandardA8mV2 ... - StandardA8mV2 VirtualMachineSizeTypes = "Standard_A8m_v2" - // StandardA8V2 ... - StandardA8V2 VirtualMachineSizeTypes = "Standard_A8_v2" - // StandardA9 ... - StandardA9 VirtualMachineSizeTypes = "Standard_A9" - // StandardB1ms ... - StandardB1ms VirtualMachineSizeTypes = "Standard_B1ms" - // StandardB1s ... - StandardB1s VirtualMachineSizeTypes = "Standard_B1s" - // StandardB2ms ... - StandardB2ms VirtualMachineSizeTypes = "Standard_B2ms" - // StandardB2s ... - StandardB2s VirtualMachineSizeTypes = "Standard_B2s" - // StandardB4ms ... - StandardB4ms VirtualMachineSizeTypes = "Standard_B4ms" - // StandardB8ms ... - StandardB8ms VirtualMachineSizeTypes = "Standard_B8ms" - // StandardD1 ... - StandardD1 VirtualMachineSizeTypes = "Standard_D1" - // StandardD11 ... - StandardD11 VirtualMachineSizeTypes = "Standard_D11" - // StandardD11V2 ... - StandardD11V2 VirtualMachineSizeTypes = "Standard_D11_v2" - // StandardD12 ... - StandardD12 VirtualMachineSizeTypes = "Standard_D12" - // StandardD12V2 ... - StandardD12V2 VirtualMachineSizeTypes = "Standard_D12_v2" - // StandardD13 ... - StandardD13 VirtualMachineSizeTypes = "Standard_D13" - // StandardD13V2 ... - StandardD13V2 VirtualMachineSizeTypes = "Standard_D13_v2" - // StandardD14 ... - StandardD14 VirtualMachineSizeTypes = "Standard_D14" - // StandardD14V2 ... - StandardD14V2 VirtualMachineSizeTypes = "Standard_D14_v2" - // StandardD15V2 ... - StandardD15V2 VirtualMachineSizeTypes = "Standard_D15_v2" - // StandardD16sV3 ... - StandardD16sV3 VirtualMachineSizeTypes = "Standard_D16s_v3" - // StandardD16V3 ... - StandardD16V3 VirtualMachineSizeTypes = "Standard_D16_v3" - // StandardD1V2 ... - StandardD1V2 VirtualMachineSizeTypes = "Standard_D1_v2" - // StandardD2 ... - StandardD2 VirtualMachineSizeTypes = "Standard_D2" - // StandardD2sV3 ... - StandardD2sV3 VirtualMachineSizeTypes = "Standard_D2s_v3" - // StandardD2V2 ... - StandardD2V2 VirtualMachineSizeTypes = "Standard_D2_v2" - // StandardD2V3 ... - StandardD2V3 VirtualMachineSizeTypes = "Standard_D2_v3" - // StandardD3 ... - StandardD3 VirtualMachineSizeTypes = "Standard_D3" - // StandardD32sV3 ... - StandardD32sV3 VirtualMachineSizeTypes = "Standard_D32s_v3" - // StandardD32V3 ... - StandardD32V3 VirtualMachineSizeTypes = "Standard_D32_v3" - // StandardD3V2 ... - StandardD3V2 VirtualMachineSizeTypes = "Standard_D3_v2" - // StandardD4 ... - StandardD4 VirtualMachineSizeTypes = "Standard_D4" - // StandardD4sV3 ... - StandardD4sV3 VirtualMachineSizeTypes = "Standard_D4s_v3" - // StandardD4V2 ... - StandardD4V2 VirtualMachineSizeTypes = "Standard_D4_v2" - // StandardD4V3 ... - StandardD4V3 VirtualMachineSizeTypes = "Standard_D4_v3" - // StandardD5V2 ... - StandardD5V2 VirtualMachineSizeTypes = "Standard_D5_v2" - // StandardD64sV3 ... - StandardD64sV3 VirtualMachineSizeTypes = "Standard_D64s_v3" - // StandardD64V3 ... - StandardD64V3 VirtualMachineSizeTypes = "Standard_D64_v3" - // StandardD8sV3 ... - StandardD8sV3 VirtualMachineSizeTypes = "Standard_D8s_v3" - // StandardD8V3 ... - StandardD8V3 VirtualMachineSizeTypes = "Standard_D8_v3" - // StandardDS1 ... - StandardDS1 VirtualMachineSizeTypes = "Standard_DS1" - // StandardDS11 ... - StandardDS11 VirtualMachineSizeTypes = "Standard_DS11" - // StandardDS11V2 ... - StandardDS11V2 VirtualMachineSizeTypes = "Standard_DS11_v2" - // StandardDS12 ... - StandardDS12 VirtualMachineSizeTypes = "Standard_DS12" - // StandardDS12V2 ... - StandardDS12V2 VirtualMachineSizeTypes = "Standard_DS12_v2" - // StandardDS13 ... - StandardDS13 VirtualMachineSizeTypes = "Standard_DS13" - // StandardDS132V2 ... - StandardDS132V2 VirtualMachineSizeTypes = "Standard_DS13-2_v2" - // StandardDS134V2 ... - StandardDS134V2 VirtualMachineSizeTypes = "Standard_DS13-4_v2" - // StandardDS13V2 ... - StandardDS13V2 VirtualMachineSizeTypes = "Standard_DS13_v2" - // StandardDS14 ... - StandardDS14 VirtualMachineSizeTypes = "Standard_DS14" - // StandardDS144V2 ... - StandardDS144V2 VirtualMachineSizeTypes = "Standard_DS14-4_v2" - // StandardDS148V2 ... - StandardDS148V2 VirtualMachineSizeTypes = "Standard_DS14-8_v2" - // StandardDS14V2 ... - StandardDS14V2 VirtualMachineSizeTypes = "Standard_DS14_v2" - // StandardDS15V2 ... - StandardDS15V2 VirtualMachineSizeTypes = "Standard_DS15_v2" - // StandardDS1V2 ... - StandardDS1V2 VirtualMachineSizeTypes = "Standard_DS1_v2" - // StandardDS2 ... - StandardDS2 VirtualMachineSizeTypes = "Standard_DS2" - // StandardDS2V2 ... - StandardDS2V2 VirtualMachineSizeTypes = "Standard_DS2_v2" - // StandardDS3 ... - StandardDS3 VirtualMachineSizeTypes = "Standard_DS3" - // StandardDS3V2 ... - StandardDS3V2 VirtualMachineSizeTypes = "Standard_DS3_v2" - // StandardDS4 ... - StandardDS4 VirtualMachineSizeTypes = "Standard_DS4" - // StandardDS4V2 ... - StandardDS4V2 VirtualMachineSizeTypes = "Standard_DS4_v2" - // StandardDS5V2 ... - StandardDS5V2 VirtualMachineSizeTypes = "Standard_DS5_v2" - // StandardE16sV3 ... - StandardE16sV3 VirtualMachineSizeTypes = "Standard_E16s_v3" - // StandardE16V3 ... - StandardE16V3 VirtualMachineSizeTypes = "Standard_E16_v3" - // StandardE2sV3 ... - StandardE2sV3 VirtualMachineSizeTypes = "Standard_E2s_v3" - // StandardE2V3 ... - StandardE2V3 VirtualMachineSizeTypes = "Standard_E2_v3" - // StandardE3216V3 ... - StandardE3216V3 VirtualMachineSizeTypes = "Standard_E32-16_v3" - // StandardE328sV3 ... - StandardE328sV3 VirtualMachineSizeTypes = "Standard_E32-8s_v3" - // StandardE32sV3 ... - StandardE32sV3 VirtualMachineSizeTypes = "Standard_E32s_v3" - // StandardE32V3 ... - StandardE32V3 VirtualMachineSizeTypes = "Standard_E32_v3" - // StandardE4sV3 ... - StandardE4sV3 VirtualMachineSizeTypes = "Standard_E4s_v3" - // StandardE4V3 ... - StandardE4V3 VirtualMachineSizeTypes = "Standard_E4_v3" - // StandardE6416sV3 ... - StandardE6416sV3 VirtualMachineSizeTypes = "Standard_E64-16s_v3" - // StandardE6432sV3 ... - StandardE6432sV3 VirtualMachineSizeTypes = "Standard_E64-32s_v3" - // StandardE64sV3 ... - StandardE64sV3 VirtualMachineSizeTypes = "Standard_E64s_v3" - // StandardE64V3 ... - StandardE64V3 VirtualMachineSizeTypes = "Standard_E64_v3" - // StandardE8sV3 ... - StandardE8sV3 VirtualMachineSizeTypes = "Standard_E8s_v3" - // StandardE8V3 ... - StandardE8V3 VirtualMachineSizeTypes = "Standard_E8_v3" - // StandardF1 ... - StandardF1 VirtualMachineSizeTypes = "Standard_F1" - // StandardF16 ... - StandardF16 VirtualMachineSizeTypes = "Standard_F16" - // StandardF16s ... - StandardF16s VirtualMachineSizeTypes = "Standard_F16s" - // StandardF16sV2 ... - StandardF16sV2 VirtualMachineSizeTypes = "Standard_F16s_v2" - // StandardF1s ... - StandardF1s VirtualMachineSizeTypes = "Standard_F1s" - // StandardF2 ... - StandardF2 VirtualMachineSizeTypes = "Standard_F2" - // StandardF2s ... - StandardF2s VirtualMachineSizeTypes = "Standard_F2s" - // StandardF2sV2 ... - StandardF2sV2 VirtualMachineSizeTypes = "Standard_F2s_v2" - // StandardF32sV2 ... - StandardF32sV2 VirtualMachineSizeTypes = "Standard_F32s_v2" - // StandardF4 ... - StandardF4 VirtualMachineSizeTypes = "Standard_F4" - // StandardF4s ... - StandardF4s VirtualMachineSizeTypes = "Standard_F4s" - // StandardF4sV2 ... - StandardF4sV2 VirtualMachineSizeTypes = "Standard_F4s_v2" - // StandardF64sV2 ... - StandardF64sV2 VirtualMachineSizeTypes = "Standard_F64s_v2" - // StandardF72sV2 ... - StandardF72sV2 VirtualMachineSizeTypes = "Standard_F72s_v2" - // StandardF8 ... - StandardF8 VirtualMachineSizeTypes = "Standard_F8" - // StandardF8s ... - StandardF8s VirtualMachineSizeTypes = "Standard_F8s" - // StandardF8sV2 ... - StandardF8sV2 VirtualMachineSizeTypes = "Standard_F8s_v2" - // StandardG1 ... - StandardG1 VirtualMachineSizeTypes = "Standard_G1" - // StandardG2 ... - StandardG2 VirtualMachineSizeTypes = "Standard_G2" - // StandardG3 ... - StandardG3 VirtualMachineSizeTypes = "Standard_G3" - // StandardG4 ... - StandardG4 VirtualMachineSizeTypes = "Standard_G4" - // StandardG5 ... - StandardG5 VirtualMachineSizeTypes = "Standard_G5" - // StandardGS1 ... - StandardGS1 VirtualMachineSizeTypes = "Standard_GS1" - // StandardGS2 ... - StandardGS2 VirtualMachineSizeTypes = "Standard_GS2" - // StandardGS3 ... - StandardGS3 VirtualMachineSizeTypes = "Standard_GS3" - // StandardGS4 ... - StandardGS4 VirtualMachineSizeTypes = "Standard_GS4" - // StandardGS44 ... - StandardGS44 VirtualMachineSizeTypes = "Standard_GS4-4" - // StandardGS48 ... - StandardGS48 VirtualMachineSizeTypes = "Standard_GS4-8" - // StandardGS5 ... - StandardGS5 VirtualMachineSizeTypes = "Standard_GS5" - // StandardGS516 ... - StandardGS516 VirtualMachineSizeTypes = "Standard_GS5-16" - // StandardGS58 ... - StandardGS58 VirtualMachineSizeTypes = "Standard_GS5-8" - // StandardH16 ... - StandardH16 VirtualMachineSizeTypes = "Standard_H16" - // StandardH16m ... - StandardH16m VirtualMachineSizeTypes = "Standard_H16m" - // StandardH16mr ... - StandardH16mr VirtualMachineSizeTypes = "Standard_H16mr" - // StandardH16r ... - StandardH16r VirtualMachineSizeTypes = "Standard_H16r" - // StandardH8 ... - StandardH8 VirtualMachineSizeTypes = "Standard_H8" - // StandardH8m ... - StandardH8m VirtualMachineSizeTypes = "Standard_H8m" - // StandardL16s ... - StandardL16s VirtualMachineSizeTypes = "Standard_L16s" - // StandardL32s ... - StandardL32s VirtualMachineSizeTypes = "Standard_L32s" - // StandardL4s ... - StandardL4s VirtualMachineSizeTypes = "Standard_L4s" - // StandardL8s ... - StandardL8s VirtualMachineSizeTypes = "Standard_L8s" - // StandardM12832ms ... - StandardM12832ms VirtualMachineSizeTypes = "Standard_M128-32ms" - // StandardM12864ms ... - StandardM12864ms VirtualMachineSizeTypes = "Standard_M128-64ms" - // StandardM128ms ... - StandardM128ms VirtualMachineSizeTypes = "Standard_M128ms" - // StandardM128s ... - StandardM128s VirtualMachineSizeTypes = "Standard_M128s" - // StandardM6416ms ... - StandardM6416ms VirtualMachineSizeTypes = "Standard_M64-16ms" - // StandardM6432ms ... - StandardM6432ms VirtualMachineSizeTypes = "Standard_M64-32ms" - // StandardM64ms ... - StandardM64ms VirtualMachineSizeTypes = "Standard_M64ms" - // StandardM64s ... - StandardM64s VirtualMachineSizeTypes = "Standard_M64s" - // StandardNC12 ... - StandardNC12 VirtualMachineSizeTypes = "Standard_NC12" - // StandardNC12sV2 ... - StandardNC12sV2 VirtualMachineSizeTypes = "Standard_NC12s_v2" - // StandardNC12sV3 ... - StandardNC12sV3 VirtualMachineSizeTypes = "Standard_NC12s_v3" - // StandardNC24 ... - StandardNC24 VirtualMachineSizeTypes = "Standard_NC24" - // StandardNC24r ... - StandardNC24r VirtualMachineSizeTypes = "Standard_NC24r" - // StandardNC24rsV2 ... - StandardNC24rsV2 VirtualMachineSizeTypes = "Standard_NC24rs_v2" - // StandardNC24rsV3 ... - StandardNC24rsV3 VirtualMachineSizeTypes = "Standard_NC24rs_v3" - // StandardNC24sV2 ... - StandardNC24sV2 VirtualMachineSizeTypes = "Standard_NC24s_v2" - // StandardNC24sV3 ... - StandardNC24sV3 VirtualMachineSizeTypes = "Standard_NC24s_v3" - // StandardNC6 ... - StandardNC6 VirtualMachineSizeTypes = "Standard_NC6" - // StandardNC6sV2 ... - StandardNC6sV2 VirtualMachineSizeTypes = "Standard_NC6s_v2" - // StandardNC6sV3 ... - StandardNC6sV3 VirtualMachineSizeTypes = "Standard_NC6s_v3" - // StandardND12s ... - StandardND12s VirtualMachineSizeTypes = "Standard_ND12s" - // StandardND24rs ... - StandardND24rs VirtualMachineSizeTypes = "Standard_ND24rs" - // StandardND24s ... - StandardND24s VirtualMachineSizeTypes = "Standard_ND24s" - // StandardND6s ... - StandardND6s VirtualMachineSizeTypes = "Standard_ND6s" - // StandardNV12 ... - StandardNV12 VirtualMachineSizeTypes = "Standard_NV12" - // StandardNV24 ... - StandardNV24 VirtualMachineSizeTypes = "Standard_NV24" - // StandardNV6 ... - StandardNV6 VirtualMachineSizeTypes = "Standard_NV6" -) - -// PossibleVirtualMachineSizeTypesValues returns an array of possible values for the VirtualMachineSizeTypes const type. -func PossibleVirtualMachineSizeTypesValues() []VirtualMachineSizeTypes { - return []VirtualMachineSizeTypes{BasicA0, BasicA1, BasicA2, BasicA3, BasicA4, StandardA0, StandardA1, StandardA10, StandardA11, StandardA1V2, StandardA2, StandardA2mV2, StandardA2V2, StandardA3, StandardA4, StandardA4mV2, StandardA4V2, StandardA5, StandardA6, StandardA7, StandardA8, StandardA8mV2, StandardA8V2, StandardA9, StandardB1ms, StandardB1s, StandardB2ms, StandardB2s, StandardB4ms, StandardB8ms, StandardD1, StandardD11, StandardD11V2, StandardD12, StandardD12V2, StandardD13, StandardD13V2, StandardD14, StandardD14V2, StandardD15V2, StandardD16sV3, StandardD16V3, StandardD1V2, StandardD2, StandardD2sV3, StandardD2V2, StandardD2V3, StandardD3, StandardD32sV3, StandardD32V3, StandardD3V2, StandardD4, StandardD4sV3, StandardD4V2, StandardD4V3, StandardD5V2, StandardD64sV3, StandardD64V3, StandardD8sV3, StandardD8V3, StandardDS1, StandardDS11, StandardDS11V2, StandardDS12, StandardDS12V2, StandardDS13, StandardDS132V2, StandardDS134V2, StandardDS13V2, StandardDS14, StandardDS144V2, StandardDS148V2, StandardDS14V2, StandardDS15V2, StandardDS1V2, StandardDS2, StandardDS2V2, StandardDS3, StandardDS3V2, StandardDS4, StandardDS4V2, StandardDS5V2, StandardE16sV3, StandardE16V3, StandardE2sV3, StandardE2V3, StandardE3216V3, StandardE328sV3, StandardE32sV3, StandardE32V3, StandardE4sV3, StandardE4V3, StandardE6416sV3, StandardE6432sV3, StandardE64sV3, StandardE64V3, StandardE8sV3, StandardE8V3, StandardF1, StandardF16, StandardF16s, StandardF16sV2, StandardF1s, StandardF2, StandardF2s, StandardF2sV2, StandardF32sV2, StandardF4, StandardF4s, StandardF4sV2, StandardF64sV2, StandardF72sV2, StandardF8, StandardF8s, StandardF8sV2, StandardG1, StandardG2, StandardG3, StandardG4, StandardG5, StandardGS1, StandardGS2, StandardGS3, StandardGS4, StandardGS44, StandardGS48, StandardGS5, StandardGS516, StandardGS58, StandardH16, StandardH16m, StandardH16mr, StandardH16r, StandardH8, StandardH8m, StandardL16s, StandardL32s, StandardL4s, StandardL8s, StandardM12832ms, StandardM12864ms, StandardM128ms, StandardM128s, StandardM6416ms, StandardM6432ms, StandardM64ms, StandardM64s, StandardNC12, StandardNC12sV2, StandardNC12sV3, StandardNC24, StandardNC24r, StandardNC24rsV2, StandardNC24rsV3, StandardNC24sV2, StandardNC24sV3, StandardNC6, StandardNC6sV2, StandardNC6sV3, StandardND12s, StandardND24rs, StandardND24s, StandardND6s, StandardNV12, StandardNV24, StandardNV6} -} - -// AccessURI a disk access SAS uri. -type AccessURI struct { - autorest.Response `json:"-"` - // AccessURIOutput - Operation output data (raw JSON) - *AccessURIOutput `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for AccessURI. -func (au AccessURI) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if au.AccessURIOutput != nil { - objectMap["properties"] = au.AccessURIOutput - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AccessURI struct. -func (au *AccessURI) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var accessURIOutput AccessURIOutput - err = json.Unmarshal(*v, &accessURIOutput) - if err != nil { - return err - } - au.AccessURIOutput = &accessURIOutput - } - } - } - - return nil -} - -// AccessURIOutput azure properties, including output. -type AccessURIOutput struct { - // AccessURIRaw - Operation output data (raw JSON) - *AccessURIRaw `json:"output,omitempty"` -} - -// MarshalJSON is the custom marshaler for AccessURIOutput. -func (auo AccessURIOutput) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if auo.AccessURIRaw != nil { - objectMap["output"] = auo.AccessURIRaw - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AccessURIOutput struct. -func (auo *AccessURIOutput) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "output": - if v != nil { - var accessURIRaw AccessURIRaw - err = json.Unmarshal(*v, &accessURIRaw) - if err != nil { - return err - } - auo.AccessURIRaw = &accessURIRaw - } - } - } - - return nil -} - -// AccessURIRaw a disk access SAS uri. -type AccessURIRaw struct { - // AccessSAS - A SAS uri for accessing a disk. - AccessSAS *string `json:"accessSAS,omitempty"` -} - -// AdditionalUnattendContent specifies additional XML formatted information that can be included in the -// Unattend.xml file, which is used by Windows Setup. Contents are defined by setting name, component name, and the -// pass in which the content is applied. -type AdditionalUnattendContent struct { - // PassName - The pass name. Currently, the only allowable value is OobeSystem. Possible values include: 'OobeSystem' - PassName PassNames `json:"passName,omitempty"` - // ComponentName - The component name. Currently, the only allowable value is Microsoft-Windows-Shell-Setup. Possible values include: 'MicrosoftWindowsShellSetup' - ComponentName ComponentNames `json:"componentName,omitempty"` - // SettingName - Specifies the name of the setting to which the content applies. Possible values are: FirstLogonCommands and AutoLogon. Possible values include: 'AutoLogon', 'FirstLogonCommands' - SettingName SettingNames `json:"settingName,omitempty"` - // Content - Specifies the XML formatted content that is added to the unattend.xml file for the specified path and component. The XML must be less than 4KB and must include the root element for the setting or feature that is being inserted. - Content *string `json:"content,omitempty"` -} - -// APIEntityReference the API entity reference. -type APIEntityReference struct { - // ID - The ARM resource id in the form of /subscriptions/{SubcriptionId}/resourceGroups/{ResourceGroupName}/... - ID *string `json:"id,omitempty"` -} - -// APIError api error. -type APIError struct { - // Details - The Api error details - Details *[]APIErrorBase `json:"details,omitempty"` - // Innererror - The Api inner error - Innererror *InnerError `json:"innererror,omitempty"` - // Code - The error code. - Code *string `json:"code,omitempty"` - // Target - The target of the particular error. - Target *string `json:"target,omitempty"` - // Message - The error message. - Message *string `json:"message,omitempty"` -} - -// APIErrorBase api error base. -type APIErrorBase struct { - // Code - The error code. - Code *string `json:"code,omitempty"` - // Target - The target of the particular error. - Target *string `json:"target,omitempty"` - // Message - The error message. - Message *string `json:"message,omitempty"` -} - -// AutoOSUpgradePolicy the configuration parameters used for performing automatic OS upgrade. -type AutoOSUpgradePolicy struct { - // DisableAutoRollback - Whether OS image rollback feature should be disabled. Default value is false. - DisableAutoRollback *bool `json:"disableAutoRollback,omitempty"` -} - -// AvailabilitySet specifies information about the availability set that the virtual machine should be assigned to. -// Virtual machines specified in the same availability set are allocated to different nodes to maximize -// availability. For more information about availability sets, see [Manage the availability of virtual -// machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). -//

For more information on Azure planned maintainance, see [Planned maintenance for virtual machines in -// Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) -//

Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added -// to an availability set. -type AvailabilitySet struct { - autorest.Response `json:"-"` - *AvailabilitySetProperties `json:"properties,omitempty"` - // Sku - Sku of the availability set - Sku *Sku `json:"sku,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` - // Name - Resource name - Name *string `json:"name,omitempty"` - // Type - Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for AvailabilitySet. -func (as AvailabilitySet) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if as.AvailabilitySetProperties != nil { - objectMap["properties"] = as.AvailabilitySetProperties - } - if as.Sku != nil { - objectMap["sku"] = as.Sku - } - if as.ID != nil { - objectMap["id"] = as.ID - } - if as.Name != nil { - objectMap["name"] = as.Name - } - if as.Type != nil { - objectMap["type"] = as.Type - } - if as.Location != nil { - objectMap["location"] = as.Location - } - if as.Tags != nil { - objectMap["tags"] = as.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AvailabilitySet struct. -func (as *AvailabilitySet) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var availabilitySetProperties AvailabilitySetProperties - err = json.Unmarshal(*v, &availabilitySetProperties) - if err != nil { - return err - } - as.AvailabilitySetProperties = &availabilitySetProperties - } - case "sku": - if v != nil { - var sku Sku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - as.Sku = &sku - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - as.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - as.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - as.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - as.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - as.Tags = tags - } - } - } - - return nil -} - -// AvailabilitySetListResult the List Availability Set operation response. -type AvailabilitySetListResult struct { - autorest.Response `json:"-"` - // Value - The list of availability sets - Value *[]AvailabilitySet `json:"value,omitempty"` - // NextLink - The URI to fetch the next page of AvailabilitySets. Call ListNext() with this URI to fetch the next page of AvailabilitySets. - NextLink *string `json:"nextLink,omitempty"` -} - -// AvailabilitySetListResultIterator provides access to a complete listing of AvailabilitySet values. -type AvailabilitySetListResultIterator struct { - i int - page AvailabilitySetListResultPage -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *AvailabilitySetListResultIterator) Next() error { - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err := iter.page.Next() - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter AvailabilitySetListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter AvailabilitySetListResultIterator) Response() AvailabilitySetListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter AvailabilitySetListResultIterator) Value() AvailabilitySet { - if !iter.page.NotDone() { - return AvailabilitySet{} - } - return iter.page.Values()[iter.i] -} - -// IsEmpty returns true if the ListResult contains no values. -func (aslr AvailabilitySetListResult) IsEmpty() bool { - return aslr.Value == nil || len(*aslr.Value) == 0 -} - -// availabilitySetListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (aslr AvailabilitySetListResult) availabilitySetListResultPreparer() (*http.Request, error) { - if aslr.NextLink == nil || len(to.String(aslr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(aslr.NextLink))) -} - -// AvailabilitySetListResultPage contains a page of AvailabilitySet values. -type AvailabilitySetListResultPage struct { - fn func(AvailabilitySetListResult) (AvailabilitySetListResult, error) - aslr AvailabilitySetListResult -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *AvailabilitySetListResultPage) Next() error { - next, err := page.fn(page.aslr) - if err != nil { - return err - } - page.aslr = next - return nil -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page AvailabilitySetListResultPage) NotDone() bool { - return !page.aslr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page AvailabilitySetListResultPage) Response() AvailabilitySetListResult { - return page.aslr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page AvailabilitySetListResultPage) Values() []AvailabilitySet { - if page.aslr.IsEmpty() { - return nil - } - return *page.aslr.Value -} - -// AvailabilitySetProperties the instance view of a resource. -type AvailabilitySetProperties struct { - // PlatformUpdateDomainCount - Update Domain count. - PlatformUpdateDomainCount *int32 `json:"platformUpdateDomainCount,omitempty"` - // PlatformFaultDomainCount - Fault Domain count. - PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` - // VirtualMachines - A list of references to all virtual machines in the availability set. - VirtualMachines *[]SubResource `json:"virtualMachines,omitempty"` - // Statuses - The resource status information. - Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` -} - -// AvailabilitySetUpdate specifies information about the availability set that the virtual machine should be -// assigned to. Only tags may be updated. -type AvailabilitySetUpdate struct { - *AvailabilitySetProperties `json:"properties,omitempty"` - // Sku - Sku of the availability set - Sku *Sku `json:"sku,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for AvailabilitySetUpdate. -func (asu AvailabilitySetUpdate) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if asu.AvailabilitySetProperties != nil { - objectMap["properties"] = asu.AvailabilitySetProperties - } - if asu.Sku != nil { - objectMap["sku"] = asu.Sku - } - if asu.Tags != nil { - objectMap["tags"] = asu.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AvailabilitySetUpdate struct. -func (asu *AvailabilitySetUpdate) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var availabilitySetProperties AvailabilitySetProperties - err = json.Unmarshal(*v, &availabilitySetProperties) - if err != nil { - return err - } - asu.AvailabilitySetProperties = &availabilitySetProperties - } - case "sku": - if v != nil { - var sku Sku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - asu.Sku = &sku - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - asu.Tags = tags - } - } - } - - return nil -} - -// BootDiagnostics boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot -// to diagnose VM status.

For Linux Virtual Machines, you can easily view the output of your console log. -//

For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from -// the hypervisor. -type BootDiagnostics struct { - // Enabled - Whether boot diagnostics should be enabled on the Virtual Machine. - Enabled *bool `json:"enabled,omitempty"` - // StorageURI - Uri of the storage account to use for placing the console output and screenshot. - StorageURI *string `json:"storageUri,omitempty"` -} - -// BootDiagnosticsInstanceView the instance view of a virtual machine boot diagnostics. -type BootDiagnosticsInstanceView struct { - // ConsoleScreenshotBlobURI - The console screenshot blob URI. - ConsoleScreenshotBlobURI *string `json:"consoleScreenshotBlobUri,omitempty"` - // SerialConsoleLogBlobURI - The Linux serial console log blob Uri. - SerialConsoleLogBlobURI *string `json:"serialConsoleLogBlobUri,omitempty"` -} - -// CreationData data used when creating a disk. -type CreationData struct { - // CreateOption - This enumerates the possible sources of a disk's creation. Possible values include: 'Empty', 'Attach', 'FromImage', 'Import', 'Copy', 'Restore' - CreateOption DiskCreateOption `json:"createOption,omitempty"` - // StorageAccountID - If createOption is Import, the Azure Resource Manager identifier of the storage account containing the blob to import as a disk. Required only if the blob is in a different subscription - StorageAccountID *string `json:"storageAccountId,omitempty"` - // ImageReference - Disk source information. - ImageReference *ImageDiskReference `json:"imageReference,omitempty"` - // SourceURI - If createOption is Import, this is the URI of a blob to be imported into a managed disk. - SourceURI *string `json:"sourceUri,omitempty"` - // SourceResourceID - If createOption is Copy, this is the ARM id of the source snapshot or disk. - SourceResourceID *string `json:"sourceResourceId,omitempty"` -} - -// DataDisk describes a data disk. -type DataDisk struct { - // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. - Lun *int32 `json:"lun,omitempty"` - // Name - The disk name. - Name *string `json:"name,omitempty"` - // Vhd - The virtual hard disk. - Vhd *VirtualHardDisk `json:"vhd,omitempty"` - // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. - Image *VirtualHardDisk `json:"image,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' - Caching CachingTypes `json:"caching,omitempty"` - // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. - WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` - // CreateOption - Specifies how the virtual machine should be created.

Possible values are:

**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' - CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` - // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // ManagedDisk - The managed disk parameters. - ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` -} - -// DataDiskImage contains the data disk images information. -type DataDiskImage struct { - // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. - Lun *int32 `json:"lun,omitempty"` -} - -// DiagnosticsProfile specifies the boot diagnostic settings state.

Minimum api-version: 2015-06-15. -type DiagnosticsProfile struct { - // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

For Linux Virtual Machines, you can easily view the output of your console log.

For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from the hypervisor. - BootDiagnostics *BootDiagnostics `json:"bootDiagnostics,omitempty"` -} - -// Disk disk resource. -type Disk struct { - autorest.Response `json:"-"` - // ManagedBy - A relative URI containing the ID of the VM that has the disk attached. - ManagedBy *string `json:"managedBy,omitempty"` - Sku *DiskSku `json:"sku,omitempty"` - // Zones - The Logical zone list for Disk. - Zones *[]string `json:"zones,omitempty"` - *DiskProperties `json:"properties,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` - // Name - Resource name - Name *string `json:"name,omitempty"` - // Type - Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for Disk. -func (d Disk) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if d.ManagedBy != nil { - objectMap["managedBy"] = d.ManagedBy - } - if d.Sku != nil { - objectMap["sku"] = d.Sku - } - if d.Zones != nil { - objectMap["zones"] = d.Zones - } - if d.DiskProperties != nil { - objectMap["properties"] = d.DiskProperties - } - if d.ID != nil { - objectMap["id"] = d.ID - } - if d.Name != nil { - objectMap["name"] = d.Name - } - if d.Type != nil { - objectMap["type"] = d.Type - } - if d.Location != nil { - objectMap["location"] = d.Location - } - if d.Tags != nil { - objectMap["tags"] = d.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for Disk struct. -func (d *Disk) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "managedBy": - if v != nil { - var managedBy string - err = json.Unmarshal(*v, &managedBy) - if err != nil { - return err - } - d.ManagedBy = &managedBy - } - case "sku": - if v != nil { - var sku DiskSku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - d.Sku = &sku - } - case "zones": - if v != nil { - var zones []string - err = json.Unmarshal(*v, &zones) - if err != nil { - return err - } - d.Zones = &zones - } - case "properties": - if v != nil { - var diskProperties DiskProperties - err = json.Unmarshal(*v, &diskProperties) - if err != nil { - return err - } - d.DiskProperties = &diskProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - d.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - d.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - d.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - d.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - d.Tags = tags - } - } - } - - return nil -} - -// DiskEncryptionSettings describes a Encryption Settings for a Disk -type DiskEncryptionSettings struct { - // DiskEncryptionKey - Specifies the location of the disk encryption key, which is a Key Vault Secret. - DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"` - // KeyEncryptionKey - Specifies the location of the key encryption key in Key Vault. - KeyEncryptionKey *KeyVaultKeyReference `json:"keyEncryptionKey,omitempty"` - // Enabled - Specifies whether disk encryption should be enabled on the virtual machine. - Enabled *bool `json:"enabled,omitempty"` -} - -// DiskInstanceView the instance view of the disk. -type DiskInstanceView struct { - // Name - The disk name. - Name *string `json:"name,omitempty"` - // EncryptionSettings - Specifies the encryption settings for the OS Disk.

Minimum api-version: 2015-06-15 - EncryptionSettings *[]DiskEncryptionSettings `json:"encryptionSettings,omitempty"` - // Statuses - The resource status information. - Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` -} - -// DiskList the List Disks operation response. -type DiskList struct { - autorest.Response `json:"-"` - // Value - A list of disks. - Value *[]Disk `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of disks. Call ListNext() with this to fetch the next page of disks. - NextLink *string `json:"nextLink,omitempty"` -} - -// DiskListIterator provides access to a complete listing of Disk values. -type DiskListIterator struct { - i int - page DiskListPage -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DiskListIterator) Next() error { - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err := iter.page.Next() - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DiskListIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DiskListIterator) Response() DiskList { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DiskListIterator) Value() Disk { - if !iter.page.NotDone() { - return Disk{} - } - return iter.page.Values()[iter.i] -} - -// IsEmpty returns true if the ListResult contains no values. -func (dl DiskList) IsEmpty() bool { - return dl.Value == nil || len(*dl.Value) == 0 -} - -// diskListPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dl DiskList) diskListPreparer() (*http.Request, error) { - if dl.NextLink == nil || len(to.String(dl.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dl.NextLink))) -} - -// DiskListPage contains a page of Disk values. -type DiskListPage struct { - fn func(DiskList) (DiskList, error) - dl DiskList -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DiskListPage) Next() error { - next, err := page.fn(page.dl) - if err != nil { - return err - } - page.dl = next - return nil -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DiskListPage) NotDone() bool { - return !page.dl.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DiskListPage) Response() DiskList { - return page.dl -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DiskListPage) Values() []Disk { - if page.dl.IsEmpty() { - return nil - } - return *page.dl.Value -} - -// DiskProperties disk resource properties. -type DiskProperties struct { - // TimeCreated - The time when the disk was created. - TimeCreated *date.Time `json:"timeCreated,omitempty"` - // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created. - CreationData *CreationData `json:"creationData,omitempty"` - // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // EncryptionSettings - Encryption settings for disk or snapshot - EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` - // ProvisioningState - The disk provisioning state. - ProvisioningState *string `json:"provisioningState,omitempty"` -} - -// DisksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type DisksCreateOrUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *DisksCreateOrUpdateFuture) Result(client DisksClient) (d Disk, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { - d, err = client.CreateOrUpdateResponder(d.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", d.Response.Response, "Failure responding to request") - } - } - return -} - -// DisksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type DisksDeleteFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *DisksDeleteFuture) Result(client DisksClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// DisksGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type DisksGrantAccessFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *DisksGrantAccessFuture) Result(client DisksClient) (au AccessURI, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksGrantAccessFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { - au, err = client.GrantAccessResponder(au.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") - } - } - return -} - -// DiskSku the disks sku name. Can be Standard_LRS, Premium_LRS, or StandardSSD_LRS. -type DiskSku struct { - // Name - The sku name. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS' - Name StorageAccountTypes `json:"name,omitempty"` - // Tier - The sku tier. - Tier *string `json:"tier,omitempty"` -} - -// DisksRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type DisksRevokeAccessFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *DisksRevokeAccessFuture) Result(client DisksClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksRevokeAccessFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksRevokeAccessFuture") - return - } - ar.Response = future.Response() - return -} - -// DisksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type DisksUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *DisksUpdateFuture) Result(client DisksClient) (d Disk, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { - d, err = client.UpdateResponder(d.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", d.Response.Response, "Failure responding to request") - } - } - return -} - -// DiskUpdate disk update resource. -type DiskUpdate struct { - *DiskUpdateProperties `json:"properties,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` - Sku *DiskSku `json:"sku,omitempty"` -} - -// MarshalJSON is the custom marshaler for DiskUpdate. -func (du DiskUpdate) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if du.DiskUpdateProperties != nil { - objectMap["properties"] = du.DiskUpdateProperties - } - if du.Tags != nil { - objectMap["tags"] = du.Tags - } - if du.Sku != nil { - objectMap["sku"] = du.Sku - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for DiskUpdate struct. -func (du *DiskUpdate) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var diskUpdateProperties DiskUpdateProperties - err = json.Unmarshal(*v, &diskUpdateProperties) - if err != nil { - return err - } - du.DiskUpdateProperties = &diskUpdateProperties - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - du.Tags = tags - } - case "sku": - if v != nil { - var sku DiskSku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - du.Sku = &sku - } - } - } - - return nil -} - -// DiskUpdateProperties disk resource update properties. -type DiskUpdateProperties struct { - // OsType - the Operating System type. Possible values include: 'Windows', 'Linux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // EncryptionSettings - Encryption settings for disk or snapshot - EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` -} - -// EncryptionSettings encryption settings for disk or snapshot -type EncryptionSettings struct { - // Enabled - Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged. - Enabled *bool `json:"enabled,omitempty"` - // DiskEncryptionKey - Key Vault Secret Url and vault id of the disk encryption key - DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"` - // KeyEncryptionKey - Key Vault Key Url and vault id of the key encryption key - KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"` -} - -// GrantAccessData data used for requesting a SAS. -type GrantAccessData struct { - // Access - Possible values include: 'None', 'Read' - Access AccessLevel `json:"access,omitempty"` - // DurationInSeconds - Time duration in seconds until the SAS access expires. - DurationInSeconds *int32 `json:"durationInSeconds,omitempty"` -} - -// HardwareProfile specifies the hardware settings for the virtual machine. -type HardwareProfile struct { - // VMSize - Specifies the size of the virtual machine. For more information about virtual machine sizes, see [Sizes for virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-sizes?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

The available VM sizes depend on region and availability set. For a list of available sizes use these APIs:

[List all available virtual machine sizes in an availability set](https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes)

[List all available virtual machine sizes in a region](https://docs.microsoft.com/rest/api/compute/virtualmachinesizes/list)

[List all available virtual machine sizes for resizing](https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes). Possible values include: 'BasicA0', 'BasicA1', 'BasicA2', 'BasicA3', 'BasicA4', 'StandardA0', 'StandardA1', 'StandardA2', 'StandardA3', 'StandardA4', 'StandardA5', 'StandardA6', 'StandardA7', 'StandardA8', 'StandardA9', 'StandardA10', 'StandardA11', 'StandardA1V2', 'StandardA2V2', 'StandardA4V2', 'StandardA8V2', 'StandardA2mV2', 'StandardA4mV2', 'StandardA8mV2', 'StandardB1s', 'StandardB1ms', 'StandardB2s', 'StandardB2ms', 'StandardB4ms', 'StandardB8ms', 'StandardD1', 'StandardD2', 'StandardD3', 'StandardD4', 'StandardD11', 'StandardD12', 'StandardD13', 'StandardD14', 'StandardD1V2', 'StandardD2V2', 'StandardD3V2', 'StandardD4V2', 'StandardD5V2', 'StandardD2V3', 'StandardD4V3', 'StandardD8V3', 'StandardD16V3', 'StandardD32V3', 'StandardD64V3', 'StandardD2sV3', 'StandardD4sV3', 'StandardD8sV3', 'StandardD16sV3', 'StandardD32sV3', 'StandardD64sV3', 'StandardD11V2', 'StandardD12V2', 'StandardD13V2', 'StandardD14V2', 'StandardD15V2', 'StandardDS1', 'StandardDS2', 'StandardDS3', 'StandardDS4', 'StandardDS11', 'StandardDS12', 'StandardDS13', 'StandardDS14', 'StandardDS1V2', 'StandardDS2V2', 'StandardDS3V2', 'StandardDS4V2', 'StandardDS5V2', 'StandardDS11V2', 'StandardDS12V2', 'StandardDS13V2', 'StandardDS14V2', 'StandardDS15V2', 'StandardDS134V2', 'StandardDS132V2', 'StandardDS148V2', 'StandardDS144V2', 'StandardE2V3', 'StandardE4V3', 'StandardE8V3', 'StandardE16V3', 'StandardE32V3', 'StandardE64V3', 'StandardE2sV3', 'StandardE4sV3', 'StandardE8sV3', 'StandardE16sV3', 'StandardE32sV3', 'StandardE64sV3', 'StandardE3216V3', 'StandardE328sV3', 'StandardE6432sV3', 'StandardE6416sV3', 'StandardF1', 'StandardF2', 'StandardF4', 'StandardF8', 'StandardF16', 'StandardF1s', 'StandardF2s', 'StandardF4s', 'StandardF8s', 'StandardF16s', 'StandardF2sV2', 'StandardF4sV2', 'StandardF8sV2', 'StandardF16sV2', 'StandardF32sV2', 'StandardF64sV2', 'StandardF72sV2', 'StandardG1', 'StandardG2', 'StandardG3', 'StandardG4', 'StandardG5', 'StandardGS1', 'StandardGS2', 'StandardGS3', 'StandardGS4', 'StandardGS5', 'StandardGS48', 'StandardGS44', 'StandardGS516', 'StandardGS58', 'StandardH8', 'StandardH16', 'StandardH8m', 'StandardH16m', 'StandardH16r', 'StandardH16mr', 'StandardL4s', 'StandardL8s', 'StandardL16s', 'StandardL32s', 'StandardM64s', 'StandardM64ms', 'StandardM128s', 'StandardM128ms', 'StandardM6432ms', 'StandardM6416ms', 'StandardM12864ms', 'StandardM12832ms', 'StandardNC6', 'StandardNC12', 'StandardNC24', 'StandardNC24r', 'StandardNC6sV2', 'StandardNC12sV2', 'StandardNC24sV2', 'StandardNC24rsV2', 'StandardNC6sV3', 'StandardNC12sV3', 'StandardNC24sV3', 'StandardNC24rsV3', 'StandardND6s', 'StandardND12s', 'StandardND24s', 'StandardND24rs', 'StandardNV6', 'StandardNV12', 'StandardNV24' - VMSize VirtualMachineSizeTypes `json:"vmSize,omitempty"` -} - -// Image the source user image virtual hard disk. The virtual hard disk will be copied before being attached to the -// virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. -type Image struct { - autorest.Response `json:"-"` - *ImageProperties `json:"properties,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` - // Name - Resource name - Name *string `json:"name,omitempty"` - // Type - Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for Image. -func (i Image) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if i.ImageProperties != nil { - objectMap["properties"] = i.ImageProperties - } - if i.ID != nil { - objectMap["id"] = i.ID - } - if i.Name != nil { - objectMap["name"] = i.Name - } - if i.Type != nil { - objectMap["type"] = i.Type - } - if i.Location != nil { - objectMap["location"] = i.Location - } - if i.Tags != nil { - objectMap["tags"] = i.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for Image struct. -func (i *Image) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var imageProperties ImageProperties - err = json.Unmarshal(*v, &imageProperties) - if err != nil { - return err - } - i.ImageProperties = &imageProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - i.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - i.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - i.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - i.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - i.Tags = tags - } - } - } - - return nil -} - -// ImageDataDisk describes a data disk. -type ImageDataDisk struct { - // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. - Lun *int32 `json:"lun,omitempty"` - // Snapshot - The snapshot. - Snapshot *SubResource `json:"snapshot,omitempty"` - // ManagedDisk - The managedDisk. - ManagedDisk *SubResource `json:"managedDisk,omitempty"` - // BlobURI - The Virtual Hard Disk. - BlobURI *string `json:"blobUri,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' - Caching CachingTypes `json:"caching,omitempty"` - // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // StorageAccountType - Specifies the storage account type for the managed disk. Possible values are: Standard_LRS, Premium_LRS, and StandardSSD_LRS. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS' - StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` -} - -// ImageDiskReference the source image used for creating the disk. -type ImageDiskReference struct { - // ID - A relative uri containing either a Platform Imgage Repository or user image reference. - ID *string `json:"id,omitempty"` - // Lun - If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null. - Lun *int32 `json:"lun,omitempty"` -} - -// ImageListResult the List Image operation response. -type ImageListResult struct { - autorest.Response `json:"-"` - // Value - The list of Images. - Value *[]Image `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of Images. Call ListNext() with this to fetch the next page of Images. - NextLink *string `json:"nextLink,omitempty"` -} - -// ImageListResultIterator provides access to a complete listing of Image values. -type ImageListResultIterator struct { - i int - page ImageListResultPage -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *ImageListResultIterator) Next() error { - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err := iter.page.Next() - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ImageListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter ImageListResultIterator) Response() ImageListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter ImageListResultIterator) Value() Image { - if !iter.page.NotDone() { - return Image{} - } - return iter.page.Values()[iter.i] -} - -// IsEmpty returns true if the ListResult contains no values. -func (ilr ImageListResult) IsEmpty() bool { - return ilr.Value == nil || len(*ilr.Value) == 0 -} - -// imageListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (ilr ImageListResult) imageListResultPreparer() (*http.Request, error) { - if ilr.NextLink == nil || len(to.String(ilr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(ilr.NextLink))) -} - -// ImageListResultPage contains a page of Image values. -type ImageListResultPage struct { - fn func(ImageListResult) (ImageListResult, error) - ilr ImageListResult -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *ImageListResultPage) Next() error { - next, err := page.fn(page.ilr) - if err != nil { - return err - } - page.ilr = next - return nil -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ImageListResultPage) NotDone() bool { - return !page.ilr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page ImageListResultPage) Response() ImageListResult { - return page.ilr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page ImageListResultPage) Values() []Image { - if page.ilr.IsEmpty() { - return nil - } - return *page.ilr.Value -} - -// ImageOSDisk describes an Operating System disk. -type ImageOSDisk struct { - // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from a custom image.

Possible values are:

**Windows**

**Linux**. Possible values include: 'Windows', 'Linux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // OsState - The OS State. Possible values include: 'Generalized', 'Specialized' - OsState OperatingSystemStateTypes `json:"osState,omitempty"` - // Snapshot - The snapshot. - Snapshot *SubResource `json:"snapshot,omitempty"` - // ManagedDisk - The managedDisk. - ManagedDisk *SubResource `json:"managedDisk,omitempty"` - // BlobURI - The Virtual Hard Disk. - BlobURI *string `json:"blobUri,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' - Caching CachingTypes `json:"caching,omitempty"` - // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // StorageAccountType - Specifies the storage account type for the managed disk. Possible values are: Standard_LRS, Premium_LRS, and StandardSSD_LRS. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS' - StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` -} - -// ImageProperties describes the properties of an Image. -type ImageProperties struct { - // SourceVirtualMachine - The source virtual machine from which Image is created. - SourceVirtualMachine *SubResource `json:"sourceVirtualMachine,omitempty"` - // StorageProfile - Specifies the storage settings for the virtual machine disks. - StorageProfile *ImageStorageProfile `json:"storageProfile,omitempty"` - // ProvisioningState - The provisioning state. - ProvisioningState *string `json:"provisioningState,omitempty"` -} - -// ImageReference specifies information about the image to use. You can specify information about platform images, -// marketplace images, or virtual machine images. This element is required when you want to use a platform image, -// marketplace image, or virtual machine image, but is not used in other creation operations. -type ImageReference struct { - // Publisher - The image publisher. - Publisher *string `json:"publisher,omitempty"` - // Offer - Specifies the offer of the platform image or marketplace image used to create the virtual machine. - Offer *string `json:"offer,omitempty"` - // Sku - The image SKU. - Sku *string `json:"sku,omitempty"` - // Version - Specifies the version of the platform image or marketplace image used to create the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available. - Version *string `json:"version,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` -} - -// ImagesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type ImagesCreateOrUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *ImagesCreateOrUpdateFuture) Result(client ImagesClient) (i Image, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.ImagesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { - i, err = client.CreateOrUpdateResponder(i.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", i.Response.Response, "Failure responding to request") - } - } - return -} - -// ImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type ImagesDeleteFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *ImagesDeleteFuture) Result(client ImagesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.ImagesDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// ImageStorageProfile describes a storage profile. -type ImageStorageProfile struct { - // OsDisk - Specifies information about the operating system disk used by the virtual machine.

For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). - OsDisk *ImageOSDisk `json:"osDisk,omitempty"` - // DataDisks - Specifies the parameters that are used to add a data disk to a virtual machine.

For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). - DataDisks *[]ImageDataDisk `json:"dataDisks,omitempty"` - // ZoneResilient - Specifies whether an image is zone resilient or not. Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage (ZRS). - ZoneResilient *bool `json:"zoneResilient,omitempty"` -} - -// ImagesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type ImagesUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *ImagesUpdateFuture) Result(client ImagesClient) (i Image, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.ImagesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { - i, err = client.UpdateResponder(i.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", i.Response.Response, "Failure responding to request") - } - } - return -} - -// ImageUpdate the source user image virtual hard disk. Only tags may be updated. -type ImageUpdate struct { - *ImageProperties `json:"properties,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for ImageUpdate. -func (iu ImageUpdate) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if iu.ImageProperties != nil { - objectMap["properties"] = iu.ImageProperties - } - if iu.Tags != nil { - objectMap["tags"] = iu.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ImageUpdate struct. -func (iu *ImageUpdate) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var imageProperties ImageProperties - err = json.Unmarshal(*v, &imageProperties) - if err != nil { - return err - } - iu.ImageProperties = &imageProperties - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - iu.Tags = tags - } - } - } - - return nil -} - -// InnerError inner error details. -type InnerError struct { - // Exceptiontype - The exception type. - Exceptiontype *string `json:"exceptiontype,omitempty"` - // Errordetail - The internal error message or exception dump. - Errordetail *string `json:"errordetail,omitempty"` -} - -// InstanceViewStatus instance view status. -type InstanceViewStatus struct { - // Code - The status code. - Code *string `json:"code,omitempty"` - // Level - The level code. Possible values include: 'Info', 'Warning', 'Error' - Level StatusLevelTypes `json:"level,omitempty"` - // DisplayStatus - The short localizable label for the status. - DisplayStatus *string `json:"displayStatus,omitempty"` - // Message - The detailed status message, including for alerts and error messages. - Message *string `json:"message,omitempty"` - // Time - The time of the status. - Time *date.Time `json:"time,omitempty"` -} - -// KeyVaultAndKeyReference key Vault Key Url and vault id of KeK, KeK is optional and when provided is used to -// unwrap the encryptionKey -type KeyVaultAndKeyReference struct { - // SourceVault - Resource id of the KeyVault containing the key or secret - SourceVault *SourceVault `json:"sourceVault,omitempty"` - // KeyURL - Url pointing to a key or secret in KeyVault - KeyURL *string `json:"keyUrl,omitempty"` -} - -// KeyVaultAndSecretReference key Vault Secret Url and vault id of the encryption key -type KeyVaultAndSecretReference struct { - // SourceVault - Resource id of the KeyVault containing the key or secret - SourceVault *SourceVault `json:"sourceVault,omitempty"` - // SecretURL - Url pointing to a key or secret in KeyVault - SecretURL *string `json:"secretUrl,omitempty"` -} - -// KeyVaultKeyReference describes a reference to Key Vault Key -type KeyVaultKeyReference struct { - // KeyURL - The URL referencing a key encryption key in Key Vault. - KeyURL *string `json:"keyUrl,omitempty"` - // SourceVault - The relative URL of the Key Vault containing the key. - SourceVault *SubResource `json:"sourceVault,omitempty"` -} - -// KeyVaultSecretReference describes a reference to Key Vault Secret -type KeyVaultSecretReference struct { - // SecretURL - The URL referencing a secret in a Key Vault. - SecretURL *string `json:"secretUrl,omitempty"` - // SourceVault - The relative URL of the Key Vault containing the secret. - SourceVault *SubResource `json:"sourceVault,omitempty"` -} - -// LinuxConfiguration specifies the Linux operating system settings on the virtual machine.

For a list of -// supported Linux distributions, see [Linux on Azure-Endorsed -// Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) -//

For running non-endorsed distributions, see [Information for Non-Endorsed -// Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). -type LinuxConfiguration struct { - // DisablePasswordAuthentication - Specifies whether password authentication should be disabled. - DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty"` - // SSH - Specifies the ssh key configuration for a Linux OS. - SSH *SSHConfiguration `json:"ssh,omitempty"` -} - -// ListUsagesResult the List Usages operation response. -type ListUsagesResult struct { - autorest.Response `json:"-"` - // Value - The list of compute resource usages. - Value *[]Usage `json:"value,omitempty"` - // NextLink - The URI to fetch the next page of compute resource usage information. Call ListNext() with this to fetch the next page of compute resource usage information. - NextLink *string `json:"nextLink,omitempty"` -} - -// ListUsagesResultIterator provides access to a complete listing of Usage values. -type ListUsagesResultIterator struct { - i int - page ListUsagesResultPage -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *ListUsagesResultIterator) Next() error { - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err := iter.page.Next() - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ListUsagesResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter ListUsagesResultIterator) Response() ListUsagesResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter ListUsagesResultIterator) Value() Usage { - if !iter.page.NotDone() { - return Usage{} - } - return iter.page.Values()[iter.i] -} - -// IsEmpty returns true if the ListResult contains no values. -func (lur ListUsagesResult) IsEmpty() bool { - return lur.Value == nil || len(*lur.Value) == 0 -} - -// listUsagesResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (lur ListUsagesResult) listUsagesResultPreparer() (*http.Request, error) { - if lur.NextLink == nil || len(to.String(lur.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(lur.NextLink))) -} - -// ListUsagesResultPage contains a page of Usage values. -type ListUsagesResultPage struct { - fn func(ListUsagesResult) (ListUsagesResult, error) - lur ListUsagesResult -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *ListUsagesResultPage) Next() error { - next, err := page.fn(page.lur) - if err != nil { - return err - } - page.lur = next - return nil -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ListUsagesResultPage) NotDone() bool { - return !page.lur.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page ListUsagesResultPage) Response() ListUsagesResult { - return page.lur -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page ListUsagesResultPage) Values() []Usage { - if page.lur.IsEmpty() { - return nil - } - return *page.lur.Value -} - -// ListVirtualMachineExtensionImage ... -type ListVirtualMachineExtensionImage struct { - autorest.Response `json:"-"` - Value *[]VirtualMachineExtensionImage `json:"value,omitempty"` -} - -// ListVirtualMachineImageResource ... -type ListVirtualMachineImageResource struct { - autorest.Response `json:"-"` - Value *[]VirtualMachineImageResource `json:"value,omitempty"` -} - -// LogAnalyticsExportRequestRateByIntervalFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type LogAnalyticsExportRequestRateByIntervalFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *LogAnalyticsExportRequestRateByIntervalFuture) Result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportRequestRateByIntervalFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if laor.Response.Response, err = future.GetResult(sender); err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { - laor, err = client.ExportRequestRateByIntervalResponder(laor.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", laor.Response.Response, "Failure responding to request") - } - } - return -} - -// LogAnalyticsExportThrottledRequestsFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type LogAnalyticsExportThrottledRequestsFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *LogAnalyticsExportThrottledRequestsFuture) Result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportThrottledRequestsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if laor.Response.Response, err = future.GetResult(sender); err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { - laor, err = client.ExportThrottledRequestsResponder(laor.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", laor.Response.Response, "Failure responding to request") - } - } - return -} - -// LogAnalyticsInputBase api input base class for LogAnalytics Api. -type LogAnalyticsInputBase struct { - // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. - BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` - // FromTime - From time of the query - FromTime *date.Time `json:"fromTime,omitempty"` - // ToTime - To time of the query - ToTime *date.Time `json:"toTime,omitempty"` - // GroupByThrottlePolicy - Group query result by Throttle Policy applied. - GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` - // GroupByOperationName - Group query result by by Operation Name. - GroupByOperationName *bool `json:"groupByOperationName,omitempty"` - // GroupByResourceName - Group query result by Resource Name. - GroupByResourceName *bool `json:"groupByResourceName,omitempty"` -} - -// LogAnalyticsOperationResult logAnalytics operation status response -type LogAnalyticsOperationResult struct { - autorest.Response `json:"-"` - // Properties - LogAnalyticsOutput - Properties *LogAnalyticsOutput `json:"properties,omitempty"` -} - -// LogAnalyticsOutput logAnalytics output properties -type LogAnalyticsOutput struct { - // Output - Output file Uri path to blob container. - Output *string `json:"output,omitempty"` -} - -// MaintenanceRedeployStatus maintenance Operation Status. -type MaintenanceRedeployStatus struct { - // IsCustomerInitiatedMaintenanceAllowed - True, if customer is allowed to perform Maintenance. - IsCustomerInitiatedMaintenanceAllowed *bool `json:"isCustomerInitiatedMaintenanceAllowed,omitempty"` - // PreMaintenanceWindowStartTime - Start Time for the Pre Maintenance Window. - PreMaintenanceWindowStartTime *date.Time `json:"preMaintenanceWindowStartTime,omitempty"` - // PreMaintenanceWindowEndTime - End Time for the Pre Maintenance Window. - PreMaintenanceWindowEndTime *date.Time `json:"preMaintenanceWindowEndTime,omitempty"` - // MaintenanceWindowStartTime - Start Time for the Maintenance Window. - MaintenanceWindowStartTime *date.Time `json:"maintenanceWindowStartTime,omitempty"` - // MaintenanceWindowEndTime - End Time for the Maintenance Window. - MaintenanceWindowEndTime *date.Time `json:"maintenanceWindowEndTime,omitempty"` - // LastOperationResultCode - The Last Maintenance Operation Result Code. Possible values include: 'MaintenanceOperationResultCodeTypesNone', 'MaintenanceOperationResultCodeTypesRetryLater', 'MaintenanceOperationResultCodeTypesMaintenanceAborted', 'MaintenanceOperationResultCodeTypesMaintenanceCompleted' - LastOperationResultCode MaintenanceOperationResultCodeTypes `json:"lastOperationResultCode,omitempty"` - // LastOperationMessage - Message returned for the last Maintenance Operation. - LastOperationMessage *string `json:"lastOperationMessage,omitempty"` -} - -// ManagedDiskParameters the parameters of a managed disk. -type ManagedDiskParameters struct { - // StorageAccountType - Specifies the storage account type for the managed disk. Possible values are: Standard_LRS, Premium_LRS, and StandardSSD_LRS. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS' - StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` -} - -// NetworkInterfaceReference describes a network interface reference. -type NetworkInterfaceReference struct { - *NetworkInterfaceReferenceProperties `json:"properties,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` -} - -// MarshalJSON is the custom marshaler for NetworkInterfaceReference. -func (nir NetworkInterfaceReference) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if nir.NetworkInterfaceReferenceProperties != nil { - objectMap["properties"] = nir.NetworkInterfaceReferenceProperties - } - if nir.ID != nil { - objectMap["id"] = nir.ID - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for NetworkInterfaceReference struct. -func (nir *NetworkInterfaceReference) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var networkInterfaceReferenceProperties NetworkInterfaceReferenceProperties - err = json.Unmarshal(*v, &networkInterfaceReferenceProperties) - if err != nil { - return err - } - nir.NetworkInterfaceReferenceProperties = &networkInterfaceReferenceProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - nir.ID = &ID - } - } - } - - return nil -} - -// NetworkInterfaceReferenceProperties describes a network interface reference properties. -type NetworkInterfaceReferenceProperties struct { - // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface. - Primary *bool `json:"primary,omitempty"` -} - -// NetworkProfile specifies the network interfaces of the virtual machine. -type NetworkProfile struct { - // NetworkInterfaces - Specifies the list of resource Ids for the network interfaces associated with the virtual machine. - NetworkInterfaces *[]NetworkInterfaceReference `json:"networkInterfaces,omitempty"` -} - -// OperationListResult the List Compute Operation operation response. -type OperationListResult struct { - autorest.Response `json:"-"` - // Value - The list of compute operations - Value *[]OperationValue `json:"value,omitempty"` -} - -// OperationValue describes the properties of a Compute Operation value. -type OperationValue struct { - // Origin - The origin of the compute operation. - Origin *string `json:"origin,omitempty"` - // Name - The name of the compute operation. - Name *string `json:"name,omitempty"` - *OperationValueDisplay `json:"display,omitempty"` -} - -// MarshalJSON is the custom marshaler for OperationValue. -func (ov OperationValue) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ov.Origin != nil { - objectMap["origin"] = ov.Origin - } - if ov.Name != nil { - objectMap["name"] = ov.Name - } - if ov.OperationValueDisplay != nil { - objectMap["display"] = ov.OperationValueDisplay - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for OperationValue struct. -func (ov *OperationValue) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "origin": - if v != nil { - var origin string - err = json.Unmarshal(*v, &origin) - if err != nil { - return err - } - ov.Origin = &origin - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - ov.Name = &name - } - case "display": - if v != nil { - var operationValueDisplay OperationValueDisplay - err = json.Unmarshal(*v, &operationValueDisplay) - if err != nil { - return err - } - ov.OperationValueDisplay = &operationValueDisplay - } - } - } - - return nil -} - -// OperationValueDisplay describes the properties of a Compute Operation Value Display. -type OperationValueDisplay struct { - // Operation - The display name of the compute operation. - Operation *string `json:"operation,omitempty"` - // Resource - The display name of the resource the operation applies to. - Resource *string `json:"resource,omitempty"` - // Description - The description of the operation. - Description *string `json:"description,omitempty"` - // Provider - The resource provider for the operation. - Provider *string `json:"provider,omitempty"` -} - -// OSDisk specifies information about the operating system disk used by the virtual machine.

For more -// information about disks, see [About disks and VHDs for Azure virtual -// machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). -type OSDisk struct { - // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD.

Possible values are:

**Windows**

**Linux**. Possible values include: 'Windows', 'Linux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // EncryptionSettings - Specifies the encryption settings for the OS Disk.

Minimum api-version: 2015-06-15 - EncryptionSettings *DiskEncryptionSettings `json:"encryptionSettings,omitempty"` - // Name - The disk name. - Name *string `json:"name,omitempty"` - // Vhd - The virtual hard disk. - Vhd *VirtualHardDisk `json:"vhd,omitempty"` - // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. - Image *VirtualHardDisk `json:"image,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' - Caching CachingTypes `json:"caching,omitempty"` - // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. - WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` - // CreateOption - Specifies how the virtual machine should be created.

Possible values are:

**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' - CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` - // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // ManagedDisk - The managed disk parameters. - ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` -} - -// OSDiskImage contains the os disk image information. -type OSDiskImage struct { - // OperatingSystem - The operating system of the osDiskImage. Possible values include: 'Windows', 'Linux' - OperatingSystem OperatingSystemTypes `json:"operatingSystem,omitempty"` -} - -// OSProfile specifies the operating system settings for the virtual machine. -type OSProfile struct { - // ComputerName - Specifies the host OS name of the virtual machine.

**Max-length (Windows):** 15 characters

**Max-length (Linux):** 64 characters.

For naming conventions and restrictions see [Azure infrastructure services implementation guidelines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-infrastructure-subscription-accounts-guidelines?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#1-naming-conventions). - ComputerName *string `json:"computerName,omitempty"` - // AdminUsername - Specifies the name of the administrator account.

**Windows-only restriction:** Cannot end in "."

**Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".

**Minimum-length (Linux):** 1 character

**Max-length (Linux):** 64 characters

**Max-length (Windows):** 20 characters

  • For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
  • For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) - AdminUsername *string `json:"adminUsername,omitempty"` - // AdminPassword - Specifies the password of the administrator account.

    **Minimum-length (Windows):** 8 characters

    **Minimum-length (Linux):** 6 characters

    **Max-length (Windows):** 123 characters

    **Max-length (Linux):** 72 characters

    **Complexity requirements:** 3 out of 4 conditions below need to be fulfilled
    Has lower characters
    Has upper characters
    Has a digit
    Has a special character (Regex match [\W_])

    **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!"

    For resetting the password, see [How to reset the Remote Desktop service or its login password in a Windows VM](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-reset-rdp?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    For resetting root password, see [Manage users, SSH, and check or repair disks on Azure Linux VMs using the VMAccess Extension](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-vmaccess-extension?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#reset-root-password) - AdminPassword *string `json:"adminPassword,omitempty"` - // CustomData - Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes.

    For using cloud-init for your VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) - CustomData *string `json:"customData,omitempty"` - // WindowsConfiguration - Specifies Windows operating system settings on the virtual machine. - WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` - // LinuxConfiguration - Specifies the Linux operating system settings on the virtual machine.

    For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)

    For running non-endorsed distributions, see [Information for Non-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). - LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` - // Secrets - Specifies set of certificates that should be installed onto the virtual machine. - Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` -} - -// Plan specifies information about the marketplace image used to create the virtual machine. This element is only -// used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for -// programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to -// deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. -type Plan struct { - // Name - The plan ID. - Name *string `json:"name,omitempty"` - // Publisher - The publisher ID. - Publisher *string `json:"publisher,omitempty"` - // Product - Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element. - Product *string `json:"product,omitempty"` - // PromotionCode - The promotion code. - PromotionCode *string `json:"promotionCode,omitempty"` -} - -// PurchasePlan used for establishing the purchase context of any 3rd Party artifact through MarketPlace. -type PurchasePlan struct { - // Publisher - The publisher ID. - Publisher *string `json:"publisher,omitempty"` - // Name - The plan ID. - Name *string `json:"name,omitempty"` - // Product - Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element. - Product *string `json:"product,omitempty"` -} - -// RecoveryWalkResponse response after calling a manual recovery walk -type RecoveryWalkResponse struct { - autorest.Response `json:"-"` - // WalkPerformed - Whether the recovery walk was performed - WalkPerformed *bool `json:"walkPerformed,omitempty"` - // NextPlatformUpdateDomain - The next update domain that needs to be walked. Null means walk spanning all update domains has been completed - NextPlatformUpdateDomain *int32 `json:"nextPlatformUpdateDomain,omitempty"` -} - -// RequestRateByIntervalInput api request input for LogAnalytics getRequestRateByInterval Api. -type RequestRateByIntervalInput struct { - // IntervalLength - Interval value in minutes used to create LogAnalytics call rate logs. Possible values include: 'ThreeMins', 'FiveMins', 'ThirtyMins', 'SixtyMins' - IntervalLength IntervalInMins `json:"intervalLength,omitempty"` - // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. - BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` - // FromTime - From time of the query - FromTime *date.Time `json:"fromTime,omitempty"` - // ToTime - To time of the query - ToTime *date.Time `json:"toTime,omitempty"` - // GroupByThrottlePolicy - Group query result by Throttle Policy applied. - GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` - // GroupByOperationName - Group query result by by Operation Name. - GroupByOperationName *bool `json:"groupByOperationName,omitempty"` - // GroupByResourceName - Group query result by Resource Name. - GroupByResourceName *bool `json:"groupByResourceName,omitempty"` -} - -// Resource the Resource model definition. -type Resource struct { - // ID - Resource Id - ID *string `json:"id,omitempty"` - // Name - Resource name - Name *string `json:"name,omitempty"` - // Type - Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for Resource. -func (r Resource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if r.ID != nil { - objectMap["id"] = r.ID - } - if r.Name != nil { - objectMap["name"] = r.Name - } - if r.Type != nil { - objectMap["type"] = r.Type - } - if r.Location != nil { - objectMap["location"] = r.Location - } - if r.Tags != nil { - objectMap["tags"] = r.Tags - } - return json.Marshal(objectMap) -} - -// RollbackStatusInfo information about rollback on failed VM instances after a OS Upgrade operation. -type RollbackStatusInfo struct { - // SuccessfullyRolledbackInstanceCount - The number of instances which have been successfully rolled back. - SuccessfullyRolledbackInstanceCount *int32 `json:"successfullyRolledbackInstanceCount,omitempty"` - // FailedRolledbackInstanceCount - The number of instances which failed to rollback. - FailedRolledbackInstanceCount *int32 `json:"failedRolledbackInstanceCount,omitempty"` - // RollbackError - Error details if OS rollback failed. - RollbackError *APIError `json:"rollbackError,omitempty"` -} - -// RollingUpgradePolicy the configuration parameters used while performing a rolling upgrade. -type RollingUpgradePolicy struct { - // MaxBatchInstancePercent - The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. The default value for this parameter is 20%. - MaxBatchInstancePercent *int32 `json:"maxBatchInstancePercent,omitempty"` - // MaxUnhealthyInstancePercent - The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. The default value for this parameter is 20%. - MaxUnhealthyInstancePercent *int32 `json:"maxUnhealthyInstancePercent,omitempty"` - // MaxUnhealthyUpgradedInstancePercent - The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. The default value for this parameter is 20%. - MaxUnhealthyUpgradedInstancePercent *int32 `json:"maxUnhealthyUpgradedInstancePercent,omitempty"` - // PauseTimeBetweenBatches - The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in ISO 8601 format. The default value is 0 seconds (PT0S). - PauseTimeBetweenBatches *string `json:"pauseTimeBetweenBatches,omitempty"` -} - -// RollingUpgradeProgressInfo information about the number of virtual machine instances in each upgrade state. -type RollingUpgradeProgressInfo struct { - // SuccessfulInstanceCount - The number of instances that have been successfully upgraded. - SuccessfulInstanceCount *int32 `json:"successfulInstanceCount,omitempty"` - // FailedInstanceCount - The number of instances that have failed to be upgraded successfully. - FailedInstanceCount *int32 `json:"failedInstanceCount,omitempty"` - // InProgressInstanceCount - The number of instances that are currently being upgraded. - InProgressInstanceCount *int32 `json:"inProgressInstanceCount,omitempty"` - // PendingInstanceCount - The number of instances that have not yet begun to be upgraded. - PendingInstanceCount *int32 `json:"pendingInstanceCount,omitempty"` -} - -// RollingUpgradeRunningStatus information about the current running state of the overall upgrade. -type RollingUpgradeRunningStatus struct { - // Code - Code indicating the current status of the upgrade. Possible values include: 'RollingForward', 'Cancelled', 'Completed', 'Faulted' - Code RollingUpgradeStatusCode `json:"code,omitempty"` - // StartTime - Start time of the upgrade. - StartTime *date.Time `json:"startTime,omitempty"` - // LastAction - The last action performed on the rolling upgrade. Possible values include: 'Start', 'Cancel' - LastAction RollingUpgradeActionType `json:"lastAction,omitempty"` - // LastActionTime - Last action time of the upgrade. - LastActionTime *date.Time `json:"lastActionTime,omitempty"` -} - -// RollingUpgradeStatusInfo the status of the latest virtual machine scale set rolling upgrade. -type RollingUpgradeStatusInfo struct { - autorest.Response `json:"-"` - *RollingUpgradeStatusInfoProperties `json:"properties,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` - // Name - Resource name - Name *string `json:"name,omitempty"` - // Type - Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for RollingUpgradeStatusInfo. -func (rusi RollingUpgradeStatusInfo) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if rusi.RollingUpgradeStatusInfoProperties != nil { - objectMap["properties"] = rusi.RollingUpgradeStatusInfoProperties - } - if rusi.ID != nil { - objectMap["id"] = rusi.ID - } - if rusi.Name != nil { - objectMap["name"] = rusi.Name - } - if rusi.Type != nil { - objectMap["type"] = rusi.Type - } - if rusi.Location != nil { - objectMap["location"] = rusi.Location - } - if rusi.Tags != nil { - objectMap["tags"] = rusi.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for RollingUpgradeStatusInfo struct. -func (rusi *RollingUpgradeStatusInfo) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var rollingUpgradeStatusInfoProperties RollingUpgradeStatusInfoProperties - err = json.Unmarshal(*v, &rollingUpgradeStatusInfoProperties) - if err != nil { - return err - } - rusi.RollingUpgradeStatusInfoProperties = &rollingUpgradeStatusInfoProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - rusi.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - rusi.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - rusi.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - rusi.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - rusi.Tags = tags - } - } - } - - return nil -} - -// RollingUpgradeStatusInfoProperties the status of the latest virtual machine scale set rolling upgrade. -type RollingUpgradeStatusInfoProperties struct { - // Policy - The rolling upgrade policies applied for this upgrade. - Policy *RollingUpgradePolicy `json:"policy,omitempty"` - // RunningStatus - Information about the current running state of the overall upgrade. - RunningStatus *RollingUpgradeRunningStatus `json:"runningStatus,omitempty"` - // Progress - Information about the number of virtual machine instances in each upgrade state. - Progress *RollingUpgradeProgressInfo `json:"progress,omitempty"` - // Error - Error details for this upgrade, if there are any. - Error *APIError `json:"error,omitempty"` -} - -// RunCommandDocument describes the properties of a Run Command. -type RunCommandDocument struct { - autorest.Response `json:"-"` - // Script - The script to be executed. - Script *[]string `json:"script,omitempty"` - // Parameters - The parameters used by the script. - Parameters *[]RunCommandParameterDefinition `json:"parameters,omitempty"` - // Schema - The VM run command schema. - Schema *string `json:"$schema,omitempty"` - // ID - The VM run command id. - ID *string `json:"id,omitempty"` - // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // Label - The VM run command label. - Label *string `json:"label,omitempty"` - // Description - The VM run command description. - Description *string `json:"description,omitempty"` -} - -// RunCommandDocumentBase describes the properties of a Run Command metadata. -type RunCommandDocumentBase struct { - // Schema - The VM run command schema. - Schema *string `json:"$schema,omitempty"` - // ID - The VM run command id. - ID *string `json:"id,omitempty"` - // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // Label - The VM run command label. - Label *string `json:"label,omitempty"` - // Description - The VM run command description. - Description *string `json:"description,omitempty"` -} - -// RunCommandInput capture Virtual Machine parameters. -type RunCommandInput struct { - // CommandID - The run command id. - CommandID *string `json:"commandId,omitempty"` - // Script - Optional. The script to be executed. When this value is given, the given script will override the default script of the command. - Script *[]string `json:"script,omitempty"` - // Parameters - The run command parameters. - Parameters *[]RunCommandInputParameter `json:"parameters,omitempty"` -} - -// RunCommandInputParameter describes the properties of a run command parameter. -type RunCommandInputParameter struct { - // Name - The run command parameter name. - Name *string `json:"name,omitempty"` - // Value - The run command parameter value. - Value *string `json:"value,omitempty"` -} - -// RunCommandListResult the List Virtual Machine operation response. -type RunCommandListResult struct { - autorest.Response `json:"-"` - // Value - The list of virtual machine run commands. - Value *[]RunCommandDocumentBase `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of run commands. Call ListNext() with this to fetch the next page of run commands. - NextLink *string `json:"nextLink,omitempty"` -} - -// RunCommandListResultIterator provides access to a complete listing of RunCommandDocumentBase values. -type RunCommandListResultIterator struct { - i int - page RunCommandListResultPage -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *RunCommandListResultIterator) Next() error { - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err := iter.page.Next() - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter RunCommandListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter RunCommandListResultIterator) Response() RunCommandListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter RunCommandListResultIterator) Value() RunCommandDocumentBase { - if !iter.page.NotDone() { - return RunCommandDocumentBase{} - } - return iter.page.Values()[iter.i] -} - -// IsEmpty returns true if the ListResult contains no values. -func (rclr RunCommandListResult) IsEmpty() bool { - return rclr.Value == nil || len(*rclr.Value) == 0 -} - -// runCommandListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (rclr RunCommandListResult) runCommandListResultPreparer() (*http.Request, error) { - if rclr.NextLink == nil || len(to.String(rclr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(rclr.NextLink))) -} - -// RunCommandListResultPage contains a page of RunCommandDocumentBase values. -type RunCommandListResultPage struct { - fn func(RunCommandListResult) (RunCommandListResult, error) - rclr RunCommandListResult -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *RunCommandListResultPage) Next() error { - next, err := page.fn(page.rclr) - if err != nil { - return err - } - page.rclr = next - return nil -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page RunCommandListResultPage) NotDone() bool { - return !page.rclr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page RunCommandListResultPage) Response() RunCommandListResult { - return page.rclr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page RunCommandListResultPage) Values() []RunCommandDocumentBase { - if page.rclr.IsEmpty() { - return nil - } - return *page.rclr.Value -} - -// RunCommandParameterDefinition describes the properties of a run command parameter. -type RunCommandParameterDefinition struct { - // Name - The run command parameter name. - Name *string `json:"name,omitempty"` - // Type - The run command parameter type. - Type *string `json:"type,omitempty"` - // DefaultValue - The run command parameter default value. - DefaultValue *string `json:"defaultValue,omitempty"` - // Required - The run command parameter required. - Required *bool `json:"required,omitempty"` -} - -// RunCommandResult ... -type RunCommandResult struct { - autorest.Response `json:"-"` - // Value - Run command operation response. - Value *[]InstanceViewStatus `json:"value,omitempty"` -} - -// Sku describes a virtual machine scale set sku. -type Sku struct { - // Name - The sku name. - Name *string `json:"name,omitempty"` - // Tier - Specifies the tier of virtual machines in a scale set.

    Possible Values:

    **Standard**

    **Basic** - Tier *string `json:"tier,omitempty"` - // Capacity - Specifies the number of virtual machines in the scale set. - Capacity *int64 `json:"capacity,omitempty"` -} - -// Snapshot snapshot resource. -type Snapshot struct { - autorest.Response `json:"-"` - // ManagedBy - Unused. Always Null. - ManagedBy *string `json:"managedBy,omitempty"` - Sku *SnapshotSku `json:"sku,omitempty"` - *DiskProperties `json:"properties,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` - // Name - Resource name - Name *string `json:"name,omitempty"` - // Type - Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for Snapshot. -func (s Snapshot) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if s.ManagedBy != nil { - objectMap["managedBy"] = s.ManagedBy - } - if s.Sku != nil { - objectMap["sku"] = s.Sku - } - if s.DiskProperties != nil { - objectMap["properties"] = s.DiskProperties - } - if s.ID != nil { - objectMap["id"] = s.ID - } - if s.Name != nil { - objectMap["name"] = s.Name - } - if s.Type != nil { - objectMap["type"] = s.Type - } - if s.Location != nil { - objectMap["location"] = s.Location - } - if s.Tags != nil { - objectMap["tags"] = s.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for Snapshot struct. -func (s *Snapshot) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "managedBy": - if v != nil { - var managedBy string - err = json.Unmarshal(*v, &managedBy) - if err != nil { - return err - } - s.ManagedBy = &managedBy - } - case "sku": - if v != nil { - var sku SnapshotSku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - s.Sku = &sku - } - case "properties": - if v != nil { - var diskProperties DiskProperties - err = json.Unmarshal(*v, &diskProperties) - if err != nil { - return err - } - s.DiskProperties = &diskProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - s.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - s.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - s.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - s.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - s.Tags = tags - } - } - } - - return nil -} - -// SnapshotList the List Snapshots operation response. -type SnapshotList struct { - autorest.Response `json:"-"` - // Value - A list of snapshots. - Value *[]Snapshot `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of snapshots. Call ListNext() with this to fetch the next page of snapshots. - NextLink *string `json:"nextLink,omitempty"` -} - -// SnapshotListIterator provides access to a complete listing of Snapshot values. -type SnapshotListIterator struct { - i int - page SnapshotListPage -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *SnapshotListIterator) Next() error { - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err := iter.page.Next() - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter SnapshotListIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter SnapshotListIterator) Response() SnapshotList { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter SnapshotListIterator) Value() Snapshot { - if !iter.page.NotDone() { - return Snapshot{} - } - return iter.page.Values()[iter.i] -} - -// IsEmpty returns true if the ListResult contains no values. -func (sl SnapshotList) IsEmpty() bool { - return sl.Value == nil || len(*sl.Value) == 0 -} - -// snapshotListPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (sl SnapshotList) snapshotListPreparer() (*http.Request, error) { - if sl.NextLink == nil || len(to.String(sl.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(sl.NextLink))) -} - -// SnapshotListPage contains a page of Snapshot values. -type SnapshotListPage struct { - fn func(SnapshotList) (SnapshotList, error) - sl SnapshotList -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *SnapshotListPage) Next() error { - next, err := page.fn(page.sl) - if err != nil { - return err - } - page.sl = next - return nil -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page SnapshotListPage) NotDone() bool { - return !page.sl.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page SnapshotListPage) Response() SnapshotList { - return page.sl -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page SnapshotListPage) Values() []Snapshot { - if page.sl.IsEmpty() { - return nil - } - return *page.sl.Value -} - -// SnapshotsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type SnapshotsCreateOrUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *SnapshotsCreateOrUpdateFuture) Result(client SnapshotsClient) (s Snapshot, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.SnapshotsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.CreateOrUpdateResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return -} - -// SnapshotsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type SnapshotsDeleteFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *SnapshotsDeleteFuture) Result(client SnapshotsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.SnapshotsDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// SnapshotsGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type SnapshotsGrantAccessFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *SnapshotsGrantAccessFuture) Result(client SnapshotsClient) (au AccessURI, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.SnapshotsGrantAccessFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { - au, err = client.GrantAccessResponder(au.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") - } - } - return -} - -// SnapshotSku the snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. -type SnapshotSku struct { - // Name - The sku name. Possible values include: 'StandardLRS', 'PremiumLRS', 'StandardZRS' - Name SnapshotStorageAccountTypes `json:"name,omitempty"` - // Tier - The sku tier. - Tier *string `json:"tier,omitempty"` -} - -// SnapshotsRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type SnapshotsRevokeAccessFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *SnapshotsRevokeAccessFuture) Result(client SnapshotsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsRevokeAccessFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.SnapshotsRevokeAccessFuture") - return - } - ar.Response = future.Response() - return -} - -// SnapshotsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type SnapshotsUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *SnapshotsUpdateFuture) Result(client SnapshotsClient) (s Snapshot, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.SnapshotsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { - s, err = client.UpdateResponder(s.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", s.Response.Response, "Failure responding to request") - } - } - return -} - -// SnapshotUpdate snapshot update resource. -type SnapshotUpdate struct { - *DiskUpdateProperties `json:"properties,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` - Sku *SnapshotSku `json:"sku,omitempty"` -} - -// MarshalJSON is the custom marshaler for SnapshotUpdate. -func (su SnapshotUpdate) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if su.DiskUpdateProperties != nil { - objectMap["properties"] = su.DiskUpdateProperties - } - if su.Tags != nil { - objectMap["tags"] = su.Tags - } - if su.Sku != nil { - objectMap["sku"] = su.Sku - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for SnapshotUpdate struct. -func (su *SnapshotUpdate) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var diskUpdateProperties DiskUpdateProperties - err = json.Unmarshal(*v, &diskUpdateProperties) - if err != nil { - return err - } - su.DiskUpdateProperties = &diskUpdateProperties - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - su.Tags = tags - } - case "sku": - if v != nil { - var sku SnapshotSku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - su.Sku = &sku - } - } - } - - return nil -} - -// SourceVault the vault id is an Azure Resource Manager Resoure id in the form -// /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName} -type SourceVault struct { - // ID - Resource Id - ID *string `json:"id,omitempty"` -} - -// SSHConfiguration SSH configuration for Linux based VMs running on Azure -type SSHConfiguration struct { - // PublicKeys - The list of SSH public keys used to authenticate with linux based VMs. - PublicKeys *[]SSHPublicKey `json:"publicKeys,omitempty"` -} - -// SSHPublicKey contains information about SSH certificate public key and the path on the Linux VM where the public -// key is placed. -type SSHPublicKey struct { - // Path - Specifies the full path on the created VM where ssh public key is stored. If the file already exists, the specified key is appended to the file. Example: /home/user/.ssh/authorized_keys - Path *string `json:"path,omitempty"` - // KeyData - SSH public key certificate used to authenticate with the VM through ssh. The key needs to be at least 2048-bit and in ssh-rsa format.

    For creating ssh keys, see [Create SSH keys on Linux and Mac for Linux VMs in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-mac-create-ssh-keys?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). - KeyData *string `json:"keyData,omitempty"` -} - -// StorageProfile specifies the storage settings for the virtual machine disks. -type StorageProfile struct { - // ImageReference - Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations. - ImageReference *ImageReference `json:"imageReference,omitempty"` - // OsDisk - Specifies information about the operating system disk used by the virtual machine.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). - OsDisk *OSDisk `json:"osDisk,omitempty"` - // DataDisks - Specifies the parameters that are used to add a data disk to a virtual machine.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). - DataDisks *[]DataDisk `json:"dataDisks,omitempty"` -} - -// SubResource ... -type SubResource struct { - // ID - Resource Id - ID *string `json:"id,omitempty"` -} - -// SubResourceReadOnly ... -type SubResourceReadOnly struct { - // ID - Resource Id - ID *string `json:"id,omitempty"` -} - -// ThrottledRequestsInput api request input for LogAnalytics getThrottledRequests Api. -type ThrottledRequestsInput struct { - // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. - BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` - // FromTime - From time of the query - FromTime *date.Time `json:"fromTime,omitempty"` - // ToTime - To time of the query - ToTime *date.Time `json:"toTime,omitempty"` - // GroupByThrottlePolicy - Group query result by Throttle Policy applied. - GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` - // GroupByOperationName - Group query result by by Operation Name. - GroupByOperationName *bool `json:"groupByOperationName,omitempty"` - // GroupByResourceName - Group query result by Resource Name. - GroupByResourceName *bool `json:"groupByResourceName,omitempty"` -} - -// UpdateResource the Update Resource model definition. -type UpdateResource struct { - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for UpdateResource. -func (ur UpdateResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ur.Tags != nil { - objectMap["tags"] = ur.Tags - } - return json.Marshal(objectMap) -} - -// UpgradeOperationHistoricalStatusInfo virtual Machine Scale Set OS Upgrade History operation response. -type UpgradeOperationHistoricalStatusInfo struct { - // Properties - Information about the properties of the upgrade operation. - Properties *UpgradeOperationHistoricalStatusInfoProperties `json:"properties,omitempty"` - // Type - Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` -} - -// UpgradeOperationHistoricalStatusInfoProperties describes each OS upgrade on the Virtual Machine Scale Set. -type UpgradeOperationHistoricalStatusInfoProperties struct { - // RunningStatus - Information about the overall status of the upgrade operation. - RunningStatus *UpgradeOperationHistoryStatus `json:"runningStatus,omitempty"` - // Progress - Counts of the VM's in each state. - Progress *RollingUpgradeProgressInfo `json:"progress,omitempty"` - // Error - Error Details for this upgrade if there are any. - Error *APIError `json:"error,omitempty"` - // StartedBy - Invoker of the Upgrade Operation. Possible values include: 'Unknown', 'User', 'Platform' - StartedBy UpgradeOperationInvoker `json:"startedBy,omitempty"` - // TargetImageReference - Image Reference details - TargetImageReference *ImageReference `json:"targetImageReference,omitempty"` - // RollbackInfo - Information about OS rollback if performed - RollbackInfo *RollbackStatusInfo `json:"rollbackInfo,omitempty"` -} - -// UpgradeOperationHistoryStatus information about the current running state of the overall upgrade. -type UpgradeOperationHistoryStatus struct { - // Code - Code indicating the current status of the upgrade. Possible values include: 'UpgradeStateRollingForward', 'UpgradeStateCancelled', 'UpgradeStateCompleted', 'UpgradeStateFaulted' - Code UpgradeState `json:"code,omitempty"` - // StartTime - Start time of the upgrade. - StartTime *date.Time `json:"startTime,omitempty"` - // EndTime - End time of the upgrade. - EndTime *date.Time `json:"endTime,omitempty"` -} - -// UpgradePolicy describes an upgrade policy - automatic, manual, or rolling. -type UpgradePolicy struct { - // Mode - Specifies the mode of an upgrade to virtual machines in the scale set.

    Possible values are:

    **Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.

    **Automatic** - All virtual machines in the scale set are automatically updated at the same time. Possible values include: 'Automatic', 'Manual', 'Rolling' - Mode UpgradeMode `json:"mode,omitempty"` - // RollingUpgradePolicy - The configuration parameters used while performing a rolling upgrade. - RollingUpgradePolicy *RollingUpgradePolicy `json:"rollingUpgradePolicy,omitempty"` - // AutomaticOSUpgrade - Whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the image becomes available. - AutomaticOSUpgrade *bool `json:"automaticOSUpgrade,omitempty"` - // AutoOSUpgradePolicy - Configuration parameters used for performing automatic OS Upgrade. - AutoOSUpgradePolicy *AutoOSUpgradePolicy `json:"autoOSUpgradePolicy,omitempty"` -} - -// Usage describes Compute Resource Usage. -type Usage struct { - // Unit - An enum describing the unit of usage measurement. - Unit *string `json:"unit,omitempty"` - // CurrentValue - The current usage of the resource. - CurrentValue *int32 `json:"currentValue,omitempty"` - // Limit - The maximum permitted usage of the resource. - Limit *int64 `json:"limit,omitempty"` - // Name - The name of the type of usage. - Name *UsageName `json:"name,omitempty"` -} - -// UsageName the Usage Names. -type UsageName struct { - // Value - The name of the resource. - Value *string `json:"value,omitempty"` - // LocalizedValue - The localized name of the resource. - LocalizedValue *string `json:"localizedValue,omitempty"` -} - -// VaultCertificate describes a single certificate reference in a Key Vault, and where the certificate should -// reside on the VM. -type VaultCertificate struct { - // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:

    {
    "data":"",
    "dataType":"pfx",
    "password":""
    } - CertificateURL *string `json:"certificateUrl,omitempty"` - // CertificateStore - For Windows VMs, specifies the certificate store on the Virtual Machine to which the certificate should be added. The specified certificate store is implicitly in the LocalMachine account.

    For Linux VMs, the certificate file is placed under the /var/lib/waagent directory, with the file name .crt for the X509 certificate file and .prv for private key. Both of these files are .pem formatted. - CertificateStore *string `json:"certificateStore,omitempty"` -} - -// VaultSecretGroup describes a set of certificates which are all in the same Key Vault. -type VaultSecretGroup struct { - // SourceVault - The relative URL of the Key Vault containing all of the certificates in VaultCertificates. - SourceVault *SubResource `json:"sourceVault,omitempty"` - // VaultCertificates - The list of key vault references in SourceVault which contain certificates. - VaultCertificates *[]VaultCertificate `json:"vaultCertificates,omitempty"` -} - -// VirtualHardDisk describes the uri of a disk. -type VirtualHardDisk struct { - // URI - Specifies the virtual hard disk's uri. - URI *string `json:"uri,omitempty"` -} - -// VirtualMachine describes a Virtual Machine. -type VirtualMachine struct { - autorest.Response `json:"-"` - // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. - Plan *Plan `json:"plan,omitempty"` - *VirtualMachineProperties `json:"properties,omitempty"` - // Resources - The virtual machine child extension resources. - Resources *[]VirtualMachineExtension `json:"resources,omitempty"` - // Identity - The identity of the virtual machine, if configured. - Identity *VirtualMachineIdentity `json:"identity,omitempty"` - // Zones - The virtual machine zones. - Zones *[]string `json:"zones,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` - // Name - Resource name - Name *string `json:"name,omitempty"` - // Type - Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for VirtualMachine. -func (VM VirtualMachine) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if VM.Plan != nil { - objectMap["plan"] = VM.Plan - } - if VM.VirtualMachineProperties != nil { - objectMap["properties"] = VM.VirtualMachineProperties - } - if VM.Resources != nil { - objectMap["resources"] = VM.Resources - } - if VM.Identity != nil { - objectMap["identity"] = VM.Identity - } - if VM.Zones != nil { - objectMap["zones"] = VM.Zones - } - if VM.ID != nil { - objectMap["id"] = VM.ID - } - if VM.Name != nil { - objectMap["name"] = VM.Name - } - if VM.Type != nil { - objectMap["type"] = VM.Type - } - if VM.Location != nil { - objectMap["location"] = VM.Location - } - if VM.Tags != nil { - objectMap["tags"] = VM.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachine struct. -func (VM *VirtualMachine) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "plan": - if v != nil { - var plan Plan - err = json.Unmarshal(*v, &plan) - if err != nil { - return err - } - VM.Plan = &plan - } - case "properties": - if v != nil { - var virtualMachineProperties VirtualMachineProperties - err = json.Unmarshal(*v, &virtualMachineProperties) - if err != nil { - return err - } - VM.VirtualMachineProperties = &virtualMachineProperties - } - case "resources": - if v != nil { - var resources []VirtualMachineExtension - err = json.Unmarshal(*v, &resources) - if err != nil { - return err - } - VM.Resources = &resources - } - case "identity": - if v != nil { - var identity VirtualMachineIdentity - err = json.Unmarshal(*v, &identity) - if err != nil { - return err - } - VM.Identity = &identity - } - case "zones": - if v != nil { - var zones []string - err = json.Unmarshal(*v, &zones) - if err != nil { - return err - } - VM.Zones = &zones - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - VM.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - VM.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - VM.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - VM.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - VM.Tags = tags - } - } - } - - return nil -} - -// VirtualMachineAgentInstanceView the instance view of the VM Agent running on the virtual machine. -type VirtualMachineAgentInstanceView struct { - // VMAgentVersion - The VM Agent full version. - VMAgentVersion *string `json:"vmAgentVersion,omitempty"` - // ExtensionHandlers - The virtual machine extension handler instance view. - ExtensionHandlers *[]VirtualMachineExtensionHandlerInstanceView `json:"extensionHandlers,omitempty"` - // Statuses - The resource status information. - Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` -} - -// VirtualMachineCaptureParameters capture Virtual Machine parameters. -type VirtualMachineCaptureParameters struct { - // VhdPrefix - The captured virtual hard disk's name prefix. - VhdPrefix *string `json:"vhdPrefix,omitempty"` - // DestinationContainerName - The destination container name. - DestinationContainerName *string `json:"destinationContainerName,omitempty"` - // OverwriteVhds - Specifies whether to overwrite the destination virtual hard disk, in case of conflict. - OverwriteVhds *bool `json:"overwriteVhds,omitempty"` -} - -// VirtualMachineCaptureResult output of virtual machine capture operation. -type VirtualMachineCaptureResult struct { - autorest.Response `json:"-"` - // Schema - the schema of the captured virtual machine - Schema *string `json:"$schema,omitempty"` - // ContentVersion - the version of the content - ContentVersion *string `json:"contentVersion,omitempty"` - // Parameters - parameters of the captured virtual machine - Parameters interface{} `json:"parameters,omitempty"` - // Resources - a list of resource items of the captured virtual machine - Resources *[]interface{} `json:"resources,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` -} - -// VirtualMachineExtension describes a Virtual Machine Extension. -type VirtualMachineExtension struct { - autorest.Response `json:"-"` - *VirtualMachineExtensionProperties `json:"properties,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` - // Name - Resource name - Name *string `json:"name,omitempty"` - // Type - Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineExtension. -func (vme VirtualMachineExtension) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vme.VirtualMachineExtensionProperties != nil { - objectMap["properties"] = vme.VirtualMachineExtensionProperties - } - if vme.ID != nil { - objectMap["id"] = vme.ID - } - if vme.Name != nil { - objectMap["name"] = vme.Name - } - if vme.Type != nil { - objectMap["type"] = vme.Type - } - if vme.Location != nil { - objectMap["location"] = vme.Location - } - if vme.Tags != nil { - objectMap["tags"] = vme.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachineExtension struct. -func (vme *VirtualMachineExtension) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var virtualMachineExtensionProperties VirtualMachineExtensionProperties - err = json.Unmarshal(*v, &virtualMachineExtensionProperties) - if err != nil { - return err - } - vme.VirtualMachineExtensionProperties = &virtualMachineExtensionProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - vme.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - vme.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - vme.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - vme.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - vme.Tags = tags - } - } - } - - return nil -} - -// VirtualMachineExtensionHandlerInstanceView the instance view of a virtual machine extension handler. -type VirtualMachineExtensionHandlerInstanceView struct { - // Type - Specifies the type of the extension; an example is "CustomScriptExtension". - Type *string `json:"type,omitempty"` - // TypeHandlerVersion - Specifies the version of the script handler. - TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` - // Status - The extension handler status. - Status *InstanceViewStatus `json:"status,omitempty"` -} - -// VirtualMachineExtensionImage describes a Virtual Machine Extension Image. -type VirtualMachineExtensionImage struct { - autorest.Response `json:"-"` - *VirtualMachineExtensionImageProperties `json:"properties,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` - // Name - Resource name - Name *string `json:"name,omitempty"` - // Type - Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineExtensionImage. -func (vmei VirtualMachineExtensionImage) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vmei.VirtualMachineExtensionImageProperties != nil { - objectMap["properties"] = vmei.VirtualMachineExtensionImageProperties - } - if vmei.ID != nil { - objectMap["id"] = vmei.ID - } - if vmei.Name != nil { - objectMap["name"] = vmei.Name - } - if vmei.Type != nil { - objectMap["type"] = vmei.Type - } - if vmei.Location != nil { - objectMap["location"] = vmei.Location - } - if vmei.Tags != nil { - objectMap["tags"] = vmei.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachineExtensionImage struct. -func (vmei *VirtualMachineExtensionImage) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var virtualMachineExtensionImageProperties VirtualMachineExtensionImageProperties - err = json.Unmarshal(*v, &virtualMachineExtensionImageProperties) - if err != nil { - return err - } - vmei.VirtualMachineExtensionImageProperties = &virtualMachineExtensionImageProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - vmei.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - vmei.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - vmei.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - vmei.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - vmei.Tags = tags - } - } - } - - return nil -} - -// VirtualMachineExtensionImageProperties describes the properties of a Virtual Machine Extension Image. -type VirtualMachineExtensionImageProperties struct { - // OperatingSystem - The operating system this extension supports. - OperatingSystem *string `json:"operatingSystem,omitempty"` - // ComputeRole - The type of role (IaaS or PaaS) this extension supports. - ComputeRole *string `json:"computeRole,omitempty"` - // HandlerSchema - The schema defined by publisher, where extension consumers should provide settings in a matching schema. - HandlerSchema *string `json:"handlerSchema,omitempty"` - // VMScaleSetEnabled - Whether the extension can be used on xRP VMScaleSets. By default existing extensions are usable on scalesets, but there might be cases where a publisher wants to explicitly indicate the extension is only enabled for CRP VMs but not VMSS. - VMScaleSetEnabled *bool `json:"vmScaleSetEnabled,omitempty"` - // SupportsMultipleExtensions - Whether the handler can support multiple extensions. - SupportsMultipleExtensions *bool `json:"supportsMultipleExtensions,omitempty"` -} - -// VirtualMachineExtensionInstanceView the instance view of a virtual machine extension. -type VirtualMachineExtensionInstanceView struct { - // Name - The virtual machine extension name. - Name *string `json:"name,omitempty"` - // Type - Specifies the type of the extension; an example is "CustomScriptExtension". - Type *string `json:"type,omitempty"` - // TypeHandlerVersion - Specifies the version of the script handler. - TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` - // Substatuses - The resource status information. - Substatuses *[]InstanceViewStatus `json:"substatuses,omitempty"` - // Statuses - The resource status information. - Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` -} - -// VirtualMachineExtensionProperties describes the properties of a Virtual Machine Extension. -type VirtualMachineExtensionProperties struct { - // ForceUpdateTag - How the extension handler should be forced to update even if the extension configuration has not changed. - ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` - // Publisher - The name of the extension handler publisher. - Publisher *string `json:"publisher,omitempty"` - // Type - Specifies the type of the extension; an example is "CustomScriptExtension". - Type *string `json:"type,omitempty"` - // TypeHandlerVersion - Specifies the version of the script handler. - TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` - // AutoUpgradeMinorVersion - Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. - AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` - // Settings - Json formatted public settings for the extension. - Settings interface{} `json:"settings,omitempty"` - // ProtectedSettings - The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. - ProtectedSettings interface{} `json:"protectedSettings,omitempty"` - // ProvisioningState - The provisioning state, which only appears in the response. - ProvisioningState *string `json:"provisioningState,omitempty"` - // InstanceView - The virtual machine extension instance view. - InstanceView *VirtualMachineExtensionInstanceView `json:"instanceView,omitempty"` -} - -// VirtualMachineExtensionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineExtensionsCreateOrUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineExtensionsCreateOrUpdateFuture) Result(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if vme.Response.Response, err = future.GetResult(sender); err == nil && vme.Response.Response.StatusCode != http.StatusNoContent { - vme, err = client.CreateOrUpdateResponder(vme.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", vme.Response.Response, "Failure responding to request") - } - } - return -} - -// VirtualMachineExtensionsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachineExtensionsDeleteFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineExtensionsDeleteFuture) Result(client VirtualMachineExtensionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineExtensionsListResult the List Extension operation response -type VirtualMachineExtensionsListResult struct { - autorest.Response `json:"-"` - // Value - The list of extensions - Value *[]VirtualMachineExtension `json:"value,omitempty"` -} - -// VirtualMachineExtensionsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachineExtensionsUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineExtensionsUpdateFuture) Result(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if vme.Response.Response, err = future.GetResult(sender); err == nil && vme.Response.Response.StatusCode != http.StatusNoContent { - vme, err = client.UpdateResponder(vme.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", vme.Response.Response, "Failure responding to request") - } - } - return -} - -// VirtualMachineExtensionUpdate describes a Virtual Machine Extension. -type VirtualMachineExtensionUpdate struct { - *VirtualMachineExtensionUpdateProperties `json:"properties,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineExtensionUpdate. -func (vmeu VirtualMachineExtensionUpdate) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vmeu.VirtualMachineExtensionUpdateProperties != nil { - objectMap["properties"] = vmeu.VirtualMachineExtensionUpdateProperties - } - if vmeu.Tags != nil { - objectMap["tags"] = vmeu.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachineExtensionUpdate struct. -func (vmeu *VirtualMachineExtensionUpdate) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var virtualMachineExtensionUpdateProperties VirtualMachineExtensionUpdateProperties - err = json.Unmarshal(*v, &virtualMachineExtensionUpdateProperties) - if err != nil { - return err - } - vmeu.VirtualMachineExtensionUpdateProperties = &virtualMachineExtensionUpdateProperties - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - vmeu.Tags = tags - } - } - } - - return nil -} - -// VirtualMachineExtensionUpdateProperties describes the properties of a Virtual Machine Extension. -type VirtualMachineExtensionUpdateProperties struct { - // ForceUpdateTag - How the extension handler should be forced to update even if the extension configuration has not changed. - ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` - // Publisher - The name of the extension handler publisher. - Publisher *string `json:"publisher,omitempty"` - // Type - Specifies the type of the extension; an example is "CustomScriptExtension". - Type *string `json:"type,omitempty"` - // TypeHandlerVersion - Specifies the version of the script handler. - TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` - // AutoUpgradeMinorVersion - Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. - AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` - // Settings - Json formatted public settings for the extension. - Settings interface{} `json:"settings,omitempty"` - // ProtectedSettings - The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. - ProtectedSettings interface{} `json:"protectedSettings,omitempty"` -} - -// VirtualMachineHealthStatus the health status of the VM. -type VirtualMachineHealthStatus struct { - // Status - The health status information for the VM. - Status *InstanceViewStatus `json:"status,omitempty"` -} - -// VirtualMachineIdentity identity for the virtual machine. -type VirtualMachineIdentity struct { - // PrincipalID - The principal id of virtual machine identity. This property will only be provided for a system assigned identity. - PrincipalID *string `json:"principalId,omitempty"` - // TenantID - The tenant id associated with the virtual machine. This property will only be provided for a system assigned identity. - TenantID *string `json:"tenantId,omitempty"` - // Type - The type of identity used for the virtual machine. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the virtual machine. Possible values include: 'ResourceIdentityTypeSystemAssigned', 'ResourceIdentityTypeUserAssigned', 'ResourceIdentityTypeSystemAssignedUserAssigned', 'ResourceIdentityTypeNone' - Type ResourceIdentityType `json:"type,omitempty"` - // IdentityIds - The list of user identities associated with the Virtual Machine. The user identity references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/identities/{identityName}'. - IdentityIds *[]string `json:"identityIds,omitempty"` -} - -// VirtualMachineImage describes a Virtual Machine Image. -type VirtualMachineImage struct { - autorest.Response `json:"-"` - *VirtualMachineImageProperties `json:"properties,omitempty"` - // Name - The name of the resource. - Name *string `json:"name,omitempty"` - // Location - The supported Azure location of the resource. - Location *string `json:"location,omitempty"` - // Tags - Specifies the tags that are assigned to the virtual machine. For more information about using tags, see [Using tags to organize your Azure resources](https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md). - Tags map[string]*string `json:"tags"` - // ID - Resource Id - ID *string `json:"id,omitempty"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineImage. -func (vmi VirtualMachineImage) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vmi.VirtualMachineImageProperties != nil { - objectMap["properties"] = vmi.VirtualMachineImageProperties - } - if vmi.Name != nil { - objectMap["name"] = vmi.Name - } - if vmi.Location != nil { - objectMap["location"] = vmi.Location - } - if vmi.Tags != nil { - objectMap["tags"] = vmi.Tags - } - if vmi.ID != nil { - objectMap["id"] = vmi.ID - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachineImage struct. -func (vmi *VirtualMachineImage) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var virtualMachineImageProperties VirtualMachineImageProperties - err = json.Unmarshal(*v, &virtualMachineImageProperties) - if err != nil { - return err - } - vmi.VirtualMachineImageProperties = &virtualMachineImageProperties - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - vmi.Name = &name - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - vmi.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - vmi.Tags = tags - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - vmi.ID = &ID - } - } - } - - return nil -} - -// VirtualMachineImageProperties describes the properties of a Virtual Machine Image. -type VirtualMachineImageProperties struct { - Plan *PurchasePlan `json:"plan,omitempty"` - OsDiskImage *OSDiskImage `json:"osDiskImage,omitempty"` - DataDiskImages *[]DataDiskImage `json:"dataDiskImages,omitempty"` -} - -// VirtualMachineImageResource virtual machine image resource information. -type VirtualMachineImageResource struct { - // Name - The name of the resource. - Name *string `json:"name,omitempty"` - // Location - The supported Azure location of the resource. - Location *string `json:"location,omitempty"` - // Tags - Specifies the tags that are assigned to the virtual machine. For more information about using tags, see [Using tags to organize your Azure resources](https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md). - Tags map[string]*string `json:"tags"` - // ID - Resource Id - ID *string `json:"id,omitempty"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineImageResource. -func (vmir VirtualMachineImageResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vmir.Name != nil { - objectMap["name"] = vmir.Name - } - if vmir.Location != nil { - objectMap["location"] = vmir.Location - } - if vmir.Tags != nil { - objectMap["tags"] = vmir.Tags - } - if vmir.ID != nil { - objectMap["id"] = vmir.ID - } - return json.Marshal(objectMap) -} - -// VirtualMachineInstanceView the instance view of a virtual machine. -type VirtualMachineInstanceView struct { - autorest.Response `json:"-"` - // PlatformUpdateDomain - Specifies the update domain of the virtual machine. - PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` - // PlatformFaultDomain - Specifies the fault domain of the virtual machine. - PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` - // ComputerName - The computer name assigned to the virtual machine. - ComputerName *string `json:"computerName,omitempty"` - // OsName - The Operating System running on the virtual machine. - OsName *string `json:"osName,omitempty"` - // OsVersion - The version of Operating System running on the virtual machine. - OsVersion *string `json:"osVersion,omitempty"` - // RdpThumbPrint - The Remote desktop certificate thumbprint. - RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` - // VMAgent - The VM Agent running on the virtual machine. - VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` - // MaintenanceRedeployStatus - The Maintenance Operation status on the virtual machine. - MaintenanceRedeployStatus *MaintenanceRedeployStatus `json:"maintenanceRedeployStatus,omitempty"` - // Disks - The virtual machine disk information. - Disks *[]DiskInstanceView `json:"disks,omitempty"` - // Extensions - The extensions information. - Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` - // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

    For Linux Virtual Machines, you can easily view the output of your console log.

    For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from the hypervisor. - BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` - // Statuses - The resource status information. - Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` -} - -// VirtualMachineListResult the List Virtual Machine operation response. -type VirtualMachineListResult struct { - autorest.Response `json:"-"` - // Value - The list of virtual machines. - Value *[]VirtualMachine `json:"value,omitempty"` - // NextLink - The URI to fetch the next page of VMs. Call ListNext() with this URI to fetch the next page of Virtual Machines. - NextLink *string `json:"nextLink,omitempty"` -} - -// VirtualMachineListResultIterator provides access to a complete listing of VirtualMachine values. -type VirtualMachineListResultIterator struct { - i int - page VirtualMachineListResultPage -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *VirtualMachineListResultIterator) Next() error { - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err := iter.page.Next() - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter VirtualMachineListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter VirtualMachineListResultIterator) Response() VirtualMachineListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter VirtualMachineListResultIterator) Value() VirtualMachine { - if !iter.page.NotDone() { - return VirtualMachine{} - } - return iter.page.Values()[iter.i] -} - -// IsEmpty returns true if the ListResult contains no values. -func (vmlr VirtualMachineListResult) IsEmpty() bool { - return vmlr.Value == nil || len(*vmlr.Value) == 0 -} - -// virtualMachineListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (vmlr VirtualMachineListResult) virtualMachineListResultPreparer() (*http.Request, error) { - if vmlr.NextLink == nil || len(to.String(vmlr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(vmlr.NextLink))) -} - -// VirtualMachineListResultPage contains a page of VirtualMachine values. -type VirtualMachineListResultPage struct { - fn func(VirtualMachineListResult) (VirtualMachineListResult, error) - vmlr VirtualMachineListResult -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *VirtualMachineListResultPage) Next() error { - next, err := page.fn(page.vmlr) - if err != nil { - return err - } - page.vmlr = next - return nil -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page VirtualMachineListResultPage) NotDone() bool { - return !page.vmlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page VirtualMachineListResultPage) Response() VirtualMachineListResult { - return page.vmlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page VirtualMachineListResultPage) Values() []VirtualMachine { - if page.vmlr.IsEmpty() { - return nil - } - return *page.vmlr.Value -} - -// VirtualMachineProperties describes the properties of a Virtual Machine. -type VirtualMachineProperties struct { - // HardwareProfile - Specifies the hardware settings for the virtual machine. - HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` - // StorageProfile - Specifies the storage settings for the virtual machine disks. - StorageProfile *StorageProfile `json:"storageProfile,omitempty"` - // OsProfile - Specifies the operating system settings for the virtual machine. - OsProfile *OSProfile `json:"osProfile,omitempty"` - // NetworkProfile - Specifies the network interfaces of the virtual machine. - NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` - // DiagnosticsProfile - Specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. - DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` - // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    For more information on Azure planned maintainance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. - AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` - // ProvisioningState - The provisioning state, which only appears in the response. - ProvisioningState *string `json:"provisioningState,omitempty"` - // InstanceView - The virtual machine instance view. - InstanceView *VirtualMachineInstanceView `json:"instanceView,omitempty"` - // LicenseType - Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system.

    Possible values are:

    Windows_Client

    Windows_Server

    If this element is included in a request for an update, the value must match the initial value. This value cannot be updated.

    For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Minimum api-version: 2015-06-15 - LicenseType *string `json:"licenseType,omitempty"` - // VMID - Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure IaaS VMs SMBIOS and can be read using platform BIOS commands. - VMID *string `json:"vmId,omitempty"` -} - -// VirtualMachineScaleSet describes a Virtual Machine Scale Set. -type VirtualMachineScaleSet struct { - autorest.Response `json:"-"` - // Sku - The virtual machine scale set sku. - Sku *Sku `json:"sku,omitempty"` - // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. - Plan *Plan `json:"plan,omitempty"` - *VirtualMachineScaleSetProperties `json:"properties,omitempty"` - // Identity - The identity of the virtual machine scale set, if configured. - Identity *VirtualMachineScaleSetIdentity `json:"identity,omitempty"` - // Zones - The virtual machine scale set zones. - Zones *[]string `json:"zones,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` - // Name - Resource name - Name *string `json:"name,omitempty"` - // Type - Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineScaleSet. -func (vmss VirtualMachineScaleSet) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vmss.Sku != nil { - objectMap["sku"] = vmss.Sku - } - if vmss.Plan != nil { - objectMap["plan"] = vmss.Plan - } - if vmss.VirtualMachineScaleSetProperties != nil { - objectMap["properties"] = vmss.VirtualMachineScaleSetProperties - } - if vmss.Identity != nil { - objectMap["identity"] = vmss.Identity - } - if vmss.Zones != nil { - objectMap["zones"] = vmss.Zones - } - if vmss.ID != nil { - objectMap["id"] = vmss.ID - } - if vmss.Name != nil { - objectMap["name"] = vmss.Name - } - if vmss.Type != nil { - objectMap["type"] = vmss.Type - } - if vmss.Location != nil { - objectMap["location"] = vmss.Location - } - if vmss.Tags != nil { - objectMap["tags"] = vmss.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSet struct. -func (vmss *VirtualMachineScaleSet) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "sku": - if v != nil { - var sku Sku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - vmss.Sku = &sku - } - case "plan": - if v != nil { - var plan Plan - err = json.Unmarshal(*v, &plan) - if err != nil { - return err - } - vmss.Plan = &plan - } - case "properties": - if v != nil { - var virtualMachineScaleSetProperties VirtualMachineScaleSetProperties - err = json.Unmarshal(*v, &virtualMachineScaleSetProperties) - if err != nil { - return err - } - vmss.VirtualMachineScaleSetProperties = &virtualMachineScaleSetProperties - } - case "identity": - if v != nil { - var identity VirtualMachineScaleSetIdentity - err = json.Unmarshal(*v, &identity) - if err != nil { - return err - } - vmss.Identity = &identity - } - case "zones": - if v != nil { - var zones []string - err = json.Unmarshal(*v, &zones) - if err != nil { - return err - } - vmss.Zones = &zones - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - vmss.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - vmss.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - vmss.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - vmss.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - vmss.Tags = tags - } - } - } - - return nil -} - -// VirtualMachineScaleSetDataDisk describes a virtual machine scale set data disk. -type VirtualMachineScaleSetDataDisk struct { - // Name - The disk name. - Name *string `json:"name,omitempty"` - // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. - Lun *int32 `json:"lun,omitempty"` - // Caching - Specifies the caching requirements.

    Possible values are:

    **None**

    **ReadOnly**

    **ReadWrite**

    Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' - Caching CachingTypes `json:"caching,omitempty"` - // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. - WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` - // CreateOption - The create option. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' - CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` - // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

    This value cannot be larger than 1023 GB - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // ManagedDisk - The managed disk parameters. - ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` -} - -// VirtualMachineScaleSetExtension describes a Virtual Machine Scale Set Extension. -type VirtualMachineScaleSetExtension struct { - autorest.Response `json:"-"` - // Name - The name of the extension. - Name *string `json:"name,omitempty"` - *VirtualMachineScaleSetExtensionProperties `json:"properties,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineScaleSetExtension. -func (vmsse VirtualMachineScaleSetExtension) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vmsse.Name != nil { - objectMap["name"] = vmsse.Name - } - if vmsse.VirtualMachineScaleSetExtensionProperties != nil { - objectMap["properties"] = vmsse.VirtualMachineScaleSetExtensionProperties - } - if vmsse.ID != nil { - objectMap["id"] = vmsse.ID - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetExtension struct. -func (vmsse *VirtualMachineScaleSetExtension) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - vmsse.Name = &name - } - case "properties": - if v != nil { - var virtualMachineScaleSetExtensionProperties VirtualMachineScaleSetExtensionProperties - err = json.Unmarshal(*v, &virtualMachineScaleSetExtensionProperties) - if err != nil { - return err - } - vmsse.VirtualMachineScaleSetExtensionProperties = &virtualMachineScaleSetExtensionProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - vmsse.ID = &ID - } - } - } - - return nil -} - -// VirtualMachineScaleSetExtensionListResult the List VM scale set extension operation response. -type VirtualMachineScaleSetExtensionListResult struct { - autorest.Response `json:"-"` - // Value - The list of VM scale set extensions. - Value *[]VirtualMachineScaleSetExtension `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of VM scale set extensions. Call ListNext() with this to fetch the next page of VM scale set extensions. - NextLink *string `json:"nextLink,omitempty"` -} - -// VirtualMachineScaleSetExtensionListResultIterator provides access to a complete listing of -// VirtualMachineScaleSetExtension values. -type VirtualMachineScaleSetExtensionListResultIterator struct { - i int - page VirtualMachineScaleSetExtensionListResultPage -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *VirtualMachineScaleSetExtensionListResultIterator) Next() error { - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err := iter.page.Next() - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter VirtualMachineScaleSetExtensionListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter VirtualMachineScaleSetExtensionListResultIterator) Response() VirtualMachineScaleSetExtensionListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter VirtualMachineScaleSetExtensionListResultIterator) Value() VirtualMachineScaleSetExtension { - if !iter.page.NotDone() { - return VirtualMachineScaleSetExtension{} - } - return iter.page.Values()[iter.i] -} - -// IsEmpty returns true if the ListResult contains no values. -func (vmsselr VirtualMachineScaleSetExtensionListResult) IsEmpty() bool { - return vmsselr.Value == nil || len(*vmsselr.Value) == 0 -} - -// virtualMachineScaleSetExtensionListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (vmsselr VirtualMachineScaleSetExtensionListResult) virtualMachineScaleSetExtensionListResultPreparer() (*http.Request, error) { - if vmsselr.NextLink == nil || len(to.String(vmsselr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(vmsselr.NextLink))) -} - -// VirtualMachineScaleSetExtensionListResultPage contains a page of VirtualMachineScaleSetExtension values. -type VirtualMachineScaleSetExtensionListResultPage struct { - fn func(VirtualMachineScaleSetExtensionListResult) (VirtualMachineScaleSetExtensionListResult, error) - vmsselr VirtualMachineScaleSetExtensionListResult -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *VirtualMachineScaleSetExtensionListResultPage) Next() error { - next, err := page.fn(page.vmsselr) - if err != nil { - return err - } - page.vmsselr = next - return nil -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page VirtualMachineScaleSetExtensionListResultPage) NotDone() bool { - return !page.vmsselr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page VirtualMachineScaleSetExtensionListResultPage) Response() VirtualMachineScaleSetExtensionListResult { - return page.vmsselr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page VirtualMachineScaleSetExtensionListResultPage) Values() []VirtualMachineScaleSetExtension { - if page.vmsselr.IsEmpty() { - return nil - } - return *page.vmsselr.Value -} - -// VirtualMachineScaleSetExtensionProfile describes a virtual machine scale set extension profile. -type VirtualMachineScaleSetExtensionProfile struct { - // Extensions - The virtual machine scale set child extension resources. - Extensions *[]VirtualMachineScaleSetExtension `json:"extensions,omitempty"` -} - -// VirtualMachineScaleSetExtensionProperties describes the properties of a Virtual Machine Scale Set Extension. -type VirtualMachineScaleSetExtensionProperties struct { - // ForceUpdateTag - If a value is provided and is different from the previous value, the extension handler will be forced to update even if the extension configuration has not changed. - ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` - // Publisher - The name of the extension handler publisher. - Publisher *string `json:"publisher,omitempty"` - // Type - Specifies the type of the extension; an example is "CustomScriptExtension". - Type *string `json:"type,omitempty"` - // TypeHandlerVersion - Specifies the version of the script handler. - TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` - // AutoUpgradeMinorVersion - Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. - AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` - // Settings - Json formatted public settings for the extension. - Settings interface{} `json:"settings,omitempty"` - // ProtectedSettings - The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. - ProtectedSettings interface{} `json:"protectedSettings,omitempty"` - // ProvisioningState - The provisioning state, which only appears in the response. - ProvisioningState *string `json:"provisioningState,omitempty"` -} - -// VirtualMachineScaleSetExtensionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of -// a long-running operation. -type VirtualMachineScaleSetExtensionsCreateOrUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetExtensionsCreateOrUpdateFuture) Result(client VirtualMachineScaleSetExtensionsClient) (vmsse VirtualMachineScaleSetExtension, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if vmsse.Response.Response, err = future.GetResult(sender); err == nil && vmsse.Response.Response.StatusCode != http.StatusNoContent { - vmsse, err = client.CreateOrUpdateResponder(vmsse.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", vmsse.Response.Response, "Failure responding to request") - } - } - return -} - -// VirtualMachineScaleSetExtensionsDeleteFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetExtensionsDeleteFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetExtensionsDeleteFuture) Result(client VirtualMachineScaleSetExtensionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetIdentity identity for the virtual machine scale set. -type VirtualMachineScaleSetIdentity struct { - // PrincipalID - The principal id of virtual machine scale set identity. This property will only be provided for a system assigned identity. - PrincipalID *string `json:"principalId,omitempty"` - // TenantID - The tenant id associated with the virtual machine scale set. This property will only be provided for a system assigned identity. - TenantID *string `json:"tenantId,omitempty"` - // Type - The type of identity used for the virtual machine scale set. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the virtual machine scale set. Possible values include: 'ResourceIdentityTypeSystemAssigned', 'ResourceIdentityTypeUserAssigned', 'ResourceIdentityTypeSystemAssignedUserAssigned', 'ResourceIdentityTypeNone' - Type ResourceIdentityType `json:"type,omitempty"` - // IdentityIds - The list of user identities associated with the virtual machine scale set. The user identity references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/identities/{identityName}'. - IdentityIds *[]string `json:"identityIds,omitempty"` -} - -// VirtualMachineScaleSetInstanceView the instance view of a virtual machine scale set. -type VirtualMachineScaleSetInstanceView struct { - autorest.Response `json:"-"` - // VirtualMachine - The instance view status summary for the virtual machine scale set. - VirtualMachine *VirtualMachineScaleSetInstanceViewStatusesSummary `json:"virtualMachine,omitempty"` - // Extensions - The extensions information. - Extensions *[]VirtualMachineScaleSetVMExtensionsSummary `json:"extensions,omitempty"` - // Statuses - The resource status information. - Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` -} - -// VirtualMachineScaleSetInstanceViewStatusesSummary instance view statuses summary for virtual machines of a -// virtual machine scale set. -type VirtualMachineScaleSetInstanceViewStatusesSummary struct { - // StatusesSummary - The extensions information. - StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"` -} - -// VirtualMachineScaleSetIPConfiguration describes a virtual machine scale set network profile's IP configuration. -type VirtualMachineScaleSetIPConfiguration struct { - // Name - The IP configuration name. - Name *string `json:"name,omitempty"` - *VirtualMachineScaleSetIPConfigurationProperties `json:"properties,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineScaleSetIPConfiguration. -func (vmssic VirtualMachineScaleSetIPConfiguration) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vmssic.Name != nil { - objectMap["name"] = vmssic.Name - } - if vmssic.VirtualMachineScaleSetIPConfigurationProperties != nil { - objectMap["properties"] = vmssic.VirtualMachineScaleSetIPConfigurationProperties - } - if vmssic.ID != nil { - objectMap["id"] = vmssic.ID - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetIPConfiguration struct. -func (vmssic *VirtualMachineScaleSetIPConfiguration) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - vmssic.Name = &name - } - case "properties": - if v != nil { - var virtualMachineScaleSetIPConfigurationProperties VirtualMachineScaleSetIPConfigurationProperties - err = json.Unmarshal(*v, &virtualMachineScaleSetIPConfigurationProperties) - if err != nil { - return err - } - vmssic.VirtualMachineScaleSetIPConfigurationProperties = &virtualMachineScaleSetIPConfigurationProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - vmssic.ID = &ID - } - } - } - - return nil -} - -// VirtualMachineScaleSetIPConfigurationProperties describes a virtual machine scale set network profile's IP -// configuration properties. -type VirtualMachineScaleSetIPConfigurationProperties struct { - // Subnet - Specifies the identifier of the subnet. - Subnet *APIEntityReference `json:"subnet,omitempty"` - // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface. - Primary *bool `json:"primary,omitempty"` - // PublicIPAddressConfiguration - The publicIPAddressConfiguration. - PublicIPAddressConfiguration *VirtualMachineScaleSetPublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"` - // PrivateIPAddressVersion - Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPv4', 'IPv6' - PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"` - // ApplicationGatewayBackendAddressPools - Specifies an array of references to backend address pools of application gateways. A scale set can reference backend address pools of multiple application gateways. Multiple scale sets cannot use the same application gateway. - ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` - // LoadBalancerBackendAddressPools - Specifies an array of references to backend address pools of load balancers. A scale set can reference backend address pools of one public and one internal load balancer. Multiple scale sets cannot use the same load balancer. - LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` - // LoadBalancerInboundNatPools - Specifies an array of references to inbound Nat pools of the load balancers. A scale set can reference inbound nat pools of one public and one internal load balancer. Multiple scale sets cannot use the same load balancer - LoadBalancerInboundNatPools *[]SubResource `json:"loadBalancerInboundNatPools,omitempty"` -} - -// VirtualMachineScaleSetIPTag contains the IP tag associated with the public IP address. -type VirtualMachineScaleSetIPTag struct { - // IPTagType - IP tag type. Example: FirstPartyUsage. - IPTagType *string `json:"ipTagType,omitempty"` - // Tag - IP tag associated with the public IP. Example: SQL, Storage etc. - Tag *string `json:"tag,omitempty"` -} - -// VirtualMachineScaleSetListOSUpgradeHistory list of Virtual Machine Scale Set OS Upgrade History operation -// response. -type VirtualMachineScaleSetListOSUpgradeHistory struct { - autorest.Response `json:"-"` - // Value - The list of OS upgrades performed on the virtual machine scale set. - Value *[]UpgradeOperationHistoricalStatusInfo `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of OS Upgrade History. Call ListNext() with this to fetch the next page of history of upgrades. - NextLink *string `json:"nextLink,omitempty"` -} - -// VirtualMachineScaleSetListOSUpgradeHistoryIterator provides access to a complete listing of -// UpgradeOperationHistoricalStatusInfo values. -type VirtualMachineScaleSetListOSUpgradeHistoryIterator struct { - i int - page VirtualMachineScaleSetListOSUpgradeHistoryPage -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *VirtualMachineScaleSetListOSUpgradeHistoryIterator) Next() error { - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err := iter.page.Next() - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter VirtualMachineScaleSetListOSUpgradeHistoryIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter VirtualMachineScaleSetListOSUpgradeHistoryIterator) Response() VirtualMachineScaleSetListOSUpgradeHistory { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter VirtualMachineScaleSetListOSUpgradeHistoryIterator) Value() UpgradeOperationHistoricalStatusInfo { - if !iter.page.NotDone() { - return UpgradeOperationHistoricalStatusInfo{} - } - return iter.page.Values()[iter.i] -} - -// IsEmpty returns true if the ListResult contains no values. -func (vmsslouh VirtualMachineScaleSetListOSUpgradeHistory) IsEmpty() bool { - return vmsslouh.Value == nil || len(*vmsslouh.Value) == 0 -} - -// virtualMachineScaleSetListOSUpgradeHistoryPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (vmsslouh VirtualMachineScaleSetListOSUpgradeHistory) virtualMachineScaleSetListOSUpgradeHistoryPreparer() (*http.Request, error) { - if vmsslouh.NextLink == nil || len(to.String(vmsslouh.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(vmsslouh.NextLink))) -} - -// VirtualMachineScaleSetListOSUpgradeHistoryPage contains a page of UpgradeOperationHistoricalStatusInfo values. -type VirtualMachineScaleSetListOSUpgradeHistoryPage struct { - fn func(VirtualMachineScaleSetListOSUpgradeHistory) (VirtualMachineScaleSetListOSUpgradeHistory, error) - vmsslouh VirtualMachineScaleSetListOSUpgradeHistory -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *VirtualMachineScaleSetListOSUpgradeHistoryPage) Next() error { - next, err := page.fn(page.vmsslouh) - if err != nil { - return err - } - page.vmsslouh = next - return nil -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page VirtualMachineScaleSetListOSUpgradeHistoryPage) NotDone() bool { - return !page.vmsslouh.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page VirtualMachineScaleSetListOSUpgradeHistoryPage) Response() VirtualMachineScaleSetListOSUpgradeHistory { - return page.vmsslouh -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page VirtualMachineScaleSetListOSUpgradeHistoryPage) Values() []UpgradeOperationHistoricalStatusInfo { - if page.vmsslouh.IsEmpty() { - return nil - } - return *page.vmsslouh.Value -} - -// VirtualMachineScaleSetListResult the List Virtual Machine operation response. -type VirtualMachineScaleSetListResult struct { - autorest.Response `json:"-"` - // Value - The list of virtual machine scale sets. - Value *[]VirtualMachineScaleSet `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of Virtual Machine Scale Sets. Call ListNext() with this to fetch the next page of VMSS. - NextLink *string `json:"nextLink,omitempty"` -} - -// VirtualMachineScaleSetListResultIterator provides access to a complete listing of VirtualMachineScaleSet values. -type VirtualMachineScaleSetListResultIterator struct { - i int - page VirtualMachineScaleSetListResultPage -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *VirtualMachineScaleSetListResultIterator) Next() error { - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err := iter.page.Next() - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter VirtualMachineScaleSetListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter VirtualMachineScaleSetListResultIterator) Response() VirtualMachineScaleSetListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter VirtualMachineScaleSetListResultIterator) Value() VirtualMachineScaleSet { - if !iter.page.NotDone() { - return VirtualMachineScaleSet{} - } - return iter.page.Values()[iter.i] -} - -// IsEmpty returns true if the ListResult contains no values. -func (vmsslr VirtualMachineScaleSetListResult) IsEmpty() bool { - return vmsslr.Value == nil || len(*vmsslr.Value) == 0 -} - -// virtualMachineScaleSetListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (vmsslr VirtualMachineScaleSetListResult) virtualMachineScaleSetListResultPreparer() (*http.Request, error) { - if vmsslr.NextLink == nil || len(to.String(vmsslr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(vmsslr.NextLink))) -} - -// VirtualMachineScaleSetListResultPage contains a page of VirtualMachineScaleSet values. -type VirtualMachineScaleSetListResultPage struct { - fn func(VirtualMachineScaleSetListResult) (VirtualMachineScaleSetListResult, error) - vmsslr VirtualMachineScaleSetListResult -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *VirtualMachineScaleSetListResultPage) Next() error { - next, err := page.fn(page.vmsslr) - if err != nil { - return err - } - page.vmsslr = next - return nil -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page VirtualMachineScaleSetListResultPage) NotDone() bool { - return !page.vmsslr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page VirtualMachineScaleSetListResultPage) Response() VirtualMachineScaleSetListResult { - return page.vmsslr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page VirtualMachineScaleSetListResultPage) Values() []VirtualMachineScaleSet { - if page.vmsslr.IsEmpty() { - return nil - } - return *page.vmsslr.Value -} - -// VirtualMachineScaleSetListSkusResult the Virtual Machine Scale Set List Skus operation response. -type VirtualMachineScaleSetListSkusResult struct { - autorest.Response `json:"-"` - // Value - The list of skus available for the virtual machine scale set. - Value *[]VirtualMachineScaleSetSku `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of Virtual Machine Scale Set Skus. Call ListNext() with this to fetch the next page of VMSS Skus. - NextLink *string `json:"nextLink,omitempty"` -} - -// VirtualMachineScaleSetListSkusResultIterator provides access to a complete listing of VirtualMachineScaleSetSku -// values. -type VirtualMachineScaleSetListSkusResultIterator struct { - i int - page VirtualMachineScaleSetListSkusResultPage -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *VirtualMachineScaleSetListSkusResultIterator) Next() error { - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err := iter.page.Next() - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter VirtualMachineScaleSetListSkusResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter VirtualMachineScaleSetListSkusResultIterator) Response() VirtualMachineScaleSetListSkusResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter VirtualMachineScaleSetListSkusResultIterator) Value() VirtualMachineScaleSetSku { - if !iter.page.NotDone() { - return VirtualMachineScaleSetSku{} - } - return iter.page.Values()[iter.i] -} - -// IsEmpty returns true if the ListResult contains no values. -func (vmsslsr VirtualMachineScaleSetListSkusResult) IsEmpty() bool { - return vmsslsr.Value == nil || len(*vmsslsr.Value) == 0 -} - -// virtualMachineScaleSetListSkusResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (vmsslsr VirtualMachineScaleSetListSkusResult) virtualMachineScaleSetListSkusResultPreparer() (*http.Request, error) { - if vmsslsr.NextLink == nil || len(to.String(vmsslsr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(vmsslsr.NextLink))) -} - -// VirtualMachineScaleSetListSkusResultPage contains a page of VirtualMachineScaleSetSku values. -type VirtualMachineScaleSetListSkusResultPage struct { - fn func(VirtualMachineScaleSetListSkusResult) (VirtualMachineScaleSetListSkusResult, error) - vmsslsr VirtualMachineScaleSetListSkusResult -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *VirtualMachineScaleSetListSkusResultPage) Next() error { - next, err := page.fn(page.vmsslsr) - if err != nil { - return err - } - page.vmsslsr = next - return nil -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page VirtualMachineScaleSetListSkusResultPage) NotDone() bool { - return !page.vmsslsr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page VirtualMachineScaleSetListSkusResultPage) Response() VirtualMachineScaleSetListSkusResult { - return page.vmsslsr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page VirtualMachineScaleSetListSkusResultPage) Values() []VirtualMachineScaleSetSku { - if page.vmsslsr.IsEmpty() { - return nil - } - return *page.vmsslsr.Value -} - -// VirtualMachineScaleSetListWithLinkResult the List Virtual Machine operation response. -type VirtualMachineScaleSetListWithLinkResult struct { - autorest.Response `json:"-"` - // Value - The list of virtual machine scale sets. - Value *[]VirtualMachineScaleSet `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of Virtual Machine Scale Sets. Call ListNext() with this to fetch the next page of Virtual Machine Scale Sets. - NextLink *string `json:"nextLink,omitempty"` -} - -// VirtualMachineScaleSetListWithLinkResultIterator provides access to a complete listing of VirtualMachineScaleSet -// values. -type VirtualMachineScaleSetListWithLinkResultIterator struct { - i int - page VirtualMachineScaleSetListWithLinkResultPage -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *VirtualMachineScaleSetListWithLinkResultIterator) Next() error { - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err := iter.page.Next() - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter VirtualMachineScaleSetListWithLinkResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter VirtualMachineScaleSetListWithLinkResultIterator) Response() VirtualMachineScaleSetListWithLinkResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter VirtualMachineScaleSetListWithLinkResultIterator) Value() VirtualMachineScaleSet { - if !iter.page.NotDone() { - return VirtualMachineScaleSet{} - } - return iter.page.Values()[iter.i] -} - -// IsEmpty returns true if the ListResult contains no values. -func (vmsslwlr VirtualMachineScaleSetListWithLinkResult) IsEmpty() bool { - return vmsslwlr.Value == nil || len(*vmsslwlr.Value) == 0 -} - -// virtualMachineScaleSetListWithLinkResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (vmsslwlr VirtualMachineScaleSetListWithLinkResult) virtualMachineScaleSetListWithLinkResultPreparer() (*http.Request, error) { - if vmsslwlr.NextLink == nil || len(to.String(vmsslwlr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(vmsslwlr.NextLink))) -} - -// VirtualMachineScaleSetListWithLinkResultPage contains a page of VirtualMachineScaleSet values. -type VirtualMachineScaleSetListWithLinkResultPage struct { - fn func(VirtualMachineScaleSetListWithLinkResult) (VirtualMachineScaleSetListWithLinkResult, error) - vmsslwlr VirtualMachineScaleSetListWithLinkResult -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *VirtualMachineScaleSetListWithLinkResultPage) Next() error { - next, err := page.fn(page.vmsslwlr) - if err != nil { - return err - } - page.vmsslwlr = next - return nil -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page VirtualMachineScaleSetListWithLinkResultPage) NotDone() bool { - return !page.vmsslwlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page VirtualMachineScaleSetListWithLinkResultPage) Response() VirtualMachineScaleSetListWithLinkResult { - return page.vmsslwlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page VirtualMachineScaleSetListWithLinkResultPage) Values() []VirtualMachineScaleSet { - if page.vmsslwlr.IsEmpty() { - return nil - } - return *page.vmsslwlr.Value -} - -// VirtualMachineScaleSetManagedDiskParameters describes the parameters of a ScaleSet managed disk. -type VirtualMachineScaleSetManagedDiskParameters struct { - // StorageAccountType - Specifies the storage account type for the managed disk. Possible values are: Standard_LRS, Premium_LRS, and StandardSSD_LRS. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS' - StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` -} - -// VirtualMachineScaleSetNetworkConfiguration describes a virtual machine scale set network profile's network -// configurations. -type VirtualMachineScaleSetNetworkConfiguration struct { - // Name - The network configuration name. - Name *string `json:"name,omitempty"` - *VirtualMachineScaleSetNetworkConfigurationProperties `json:"properties,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineScaleSetNetworkConfiguration. -func (vmssnc VirtualMachineScaleSetNetworkConfiguration) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vmssnc.Name != nil { - objectMap["name"] = vmssnc.Name - } - if vmssnc.VirtualMachineScaleSetNetworkConfigurationProperties != nil { - objectMap["properties"] = vmssnc.VirtualMachineScaleSetNetworkConfigurationProperties - } - if vmssnc.ID != nil { - objectMap["id"] = vmssnc.ID - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetNetworkConfiguration struct. -func (vmssnc *VirtualMachineScaleSetNetworkConfiguration) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - vmssnc.Name = &name - } - case "properties": - if v != nil { - var virtualMachineScaleSetNetworkConfigurationProperties VirtualMachineScaleSetNetworkConfigurationProperties - err = json.Unmarshal(*v, &virtualMachineScaleSetNetworkConfigurationProperties) - if err != nil { - return err - } - vmssnc.VirtualMachineScaleSetNetworkConfigurationProperties = &virtualMachineScaleSetNetworkConfigurationProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - vmssnc.ID = &ID - } - } - } - - return nil -} - -// VirtualMachineScaleSetNetworkConfigurationDNSSettings describes a virtual machines scale sets network -// configuration's DNS settings. -type VirtualMachineScaleSetNetworkConfigurationDNSSettings struct { - // DNSServers - List of DNS servers IP addresses - DNSServers *[]string `json:"dnsServers,omitempty"` -} - -// VirtualMachineScaleSetNetworkConfigurationProperties describes a virtual machine scale set network profile's IP -// configuration. -type VirtualMachineScaleSetNetworkConfigurationProperties struct { - // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface. - Primary *bool `json:"primary,omitempty"` - // EnableAcceleratedNetworking - Specifies whether the network interface is accelerated networking-enabled. - EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"` - // NetworkSecurityGroup - The network security group. - NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` - // DNSSettings - The dns settings to be applied on the network interfaces. - DNSSettings *VirtualMachineScaleSetNetworkConfigurationDNSSettings `json:"dnsSettings,omitempty"` - // IPConfigurations - Specifies the IP configurations of the network interface. - IPConfigurations *[]VirtualMachineScaleSetIPConfiguration `json:"ipConfigurations,omitempty"` - // EnableIPForwarding - Whether IP forwarding enabled on this NIC. - EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"` -} - -// VirtualMachineScaleSetNetworkProfile describes a virtual machine scale set network profile. -type VirtualMachineScaleSetNetworkProfile struct { - // HealthProbe - A reference to a load balancer probe used to determine the health of an instance in the virtual machine scale set. The reference will be in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'. - HealthProbe *APIEntityReference `json:"healthProbe,omitempty"` - // NetworkInterfaceConfigurations - The list of network configurations. - NetworkInterfaceConfigurations *[]VirtualMachineScaleSetNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` -} - -// VirtualMachineScaleSetOSDisk describes a virtual machine scale set operating system disk. -type VirtualMachineScaleSetOSDisk struct { - // Name - The disk name. - Name *string `json:"name,omitempty"` - // Caching - Specifies the caching requirements.

    Possible values are:

    **None**

    **ReadOnly**

    **ReadWrite**

    Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' - Caching CachingTypes `json:"caching,omitempty"` - // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. - WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` - // CreateOption - Specifies how the virtual machines in the scale set should be created.

    The only allowed value is: **FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' - CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` - // DiskSizeGB - Specifies the size of the operating system disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

    This value cannot be larger than 1023 GB - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD.

    Possible values are:

    **Windows**

    **Linux**. Possible values include: 'Windows', 'Linux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // Image - Specifies information about the unmanaged user image to base the scale set on. - Image *VirtualHardDisk `json:"image,omitempty"` - // VhdContainers - Specifies the container urls that are used to store operating system disks for the scale set. - VhdContainers *[]string `json:"vhdContainers,omitempty"` - // ManagedDisk - The managed disk parameters. - ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` -} - -// VirtualMachineScaleSetOSProfile describes a virtual machine scale set OS profile. -type VirtualMachineScaleSetOSProfile struct { - // ComputerNamePrefix - Specifies the computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long. - ComputerNamePrefix *string `json:"computerNamePrefix,omitempty"` - // AdminUsername - Specifies the name of the administrator account.

    **Windows-only restriction:** Cannot end in "."

    **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".

    **Minimum-length (Linux):** 1 character

    **Max-length (Linux):** 64 characters

    **Max-length (Windows):** 20 characters

  • For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
  • For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) - AdminUsername *string `json:"adminUsername,omitempty"` - // AdminPassword - Specifies the password of the administrator account.

    **Minimum-length (Windows):** 8 characters

    **Minimum-length (Linux):** 6 characters

    **Max-length (Windows):** 123 characters

    **Max-length (Linux):** 72 characters

    **Complexity requirements:** 3 out of 4 conditions below need to be fulfilled
    Has lower characters
    Has upper characters
    Has a digit
    Has a special character (Regex match [\W_])

    **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!"

    For resetting the password, see [How to reset the Remote Desktop service or its login password in a Windows VM](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-reset-rdp?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    For resetting root password, see [Manage users, SSH, and check or repair disks on Azure Linux VMs using the VMAccess Extension](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-vmaccess-extension?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#reset-root-password) - AdminPassword *string `json:"adminPassword,omitempty"` - // CustomData - Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes.

    For using cloud-init for your VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) - CustomData *string `json:"customData,omitempty"` - // WindowsConfiguration - Specifies Windows operating system settings on the virtual machine. - WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` - // LinuxConfiguration - Specifies the Linux operating system settings on the virtual machine.

    For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)

    For running non-endorsed distributions, see [Information for Non-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). - LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` - // Secrets - Specifies set of certificates that should be installed onto the virtual machines in the scale set. - Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` -} - -// VirtualMachineScaleSetProperties describes the properties of a Virtual Machine Scale Set. -type VirtualMachineScaleSetProperties struct { - // UpgradePolicy - The upgrade policy. - UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` - // VirtualMachineProfile - The virtual machine profile. - VirtualMachineProfile *VirtualMachineScaleSetVMProfile `json:"virtualMachineProfile,omitempty"` - // ProvisioningState - The provisioning state, which only appears in the response. - ProvisioningState *string `json:"provisioningState,omitempty"` - // Overprovision - Specifies whether the Virtual Machine Scale Set should be overprovisioned. - Overprovision *bool `json:"overprovision,omitempty"` - // UniqueID - Specifies the ID which uniquely identifies a Virtual Machine Scale Set. - UniqueID *string `json:"uniqueId,omitempty"` - // SinglePlacementGroup - When true this limits the scale set to a single placement group, of max size 100 virtual machines. - SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` - // ZoneBalance - Whether to force stictly even Virtual Machine distribution cross x-zones in case there is zone outage. - ZoneBalance *bool `json:"zoneBalance,omitempty"` - // PlatformFaultDomainCount - Fault Domain count for each placement group. - PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` -} - -// VirtualMachineScaleSetPublicIPAddressConfiguration describes a virtual machines scale set IP Configuration's -// PublicIPAddress configuration -type VirtualMachineScaleSetPublicIPAddressConfiguration struct { - // Name - The publicIP address configuration name. - Name *string `json:"name,omitempty"` - *VirtualMachineScaleSetPublicIPAddressConfigurationProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineScaleSetPublicIPAddressConfiguration. -func (vmsspiac VirtualMachineScaleSetPublicIPAddressConfiguration) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vmsspiac.Name != nil { - objectMap["name"] = vmsspiac.Name - } - if vmsspiac.VirtualMachineScaleSetPublicIPAddressConfigurationProperties != nil { - objectMap["properties"] = vmsspiac.VirtualMachineScaleSetPublicIPAddressConfigurationProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetPublicIPAddressConfiguration struct. -func (vmsspiac *VirtualMachineScaleSetPublicIPAddressConfiguration) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - vmsspiac.Name = &name - } - case "properties": - if v != nil { - var virtualMachineScaleSetPublicIPAddressConfigurationProperties VirtualMachineScaleSetPublicIPAddressConfigurationProperties - err = json.Unmarshal(*v, &virtualMachineScaleSetPublicIPAddressConfigurationProperties) - if err != nil { - return err - } - vmsspiac.VirtualMachineScaleSetPublicIPAddressConfigurationProperties = &virtualMachineScaleSetPublicIPAddressConfigurationProperties - } - } - } - - return nil -} - -// VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings describes a virtual machines scale sets network -// configuration's DNS settings. -type VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings struct { - // DomainNameLabel - The Domain name label.The concatenation of the domain name label and vm index will be the domain name labels of the PublicIPAddress resources that will be created - DomainNameLabel *string `json:"domainNameLabel,omitempty"` -} - -// VirtualMachineScaleSetPublicIPAddressConfigurationProperties describes a virtual machines scale set IP -// Configuration's PublicIPAddress configuration -type VirtualMachineScaleSetPublicIPAddressConfigurationProperties struct { - // IdleTimeoutInMinutes - The idle timeout of the public IP address. - IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` - // DNSSettings - The dns settings to be applied on the publicIP addresses . - DNSSettings *VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings `json:"dnsSettings,omitempty"` - // IPTags - The list of IP tags associated with the public IP address. - IPTags *[]VirtualMachineScaleSetIPTag `json:"ipTags,omitempty"` -} - -// VirtualMachineScaleSetRollingUpgradesCancelFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetRollingUpgradesCancelFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetRollingUpgradesCancelFuture) Result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesCancelFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesCancelFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture an abstraction for monitoring and retrieving the -// results of a long-running operation. -type VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture) Result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetsCreateOrUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetsCreateOrUpdateFuture) Result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if vmss.Response.Response, err = future.GetResult(sender); err == nil && vmss.Response.Response.StatusCode != http.StatusNoContent { - vmss, err = client.CreateOrUpdateResponder(vmss.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", vmss.Response.Response, "Failure responding to request") - } - } - return -} - -// VirtualMachineScaleSetsDeallocateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetsDeallocateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetsDeallocateFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeallocateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeallocateFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachineScaleSetsDeleteFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetsDeleteFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetsDeleteInstancesFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetsDeleteInstancesFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetsDeleteInstancesFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeleteInstancesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteInstancesFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetSku describes an available virtual machine scale set sku. -type VirtualMachineScaleSetSku struct { - // ResourceType - The type of resource the sku applies to. - ResourceType *string `json:"resourceType,omitempty"` - // Sku - The Sku. - Sku *Sku `json:"sku,omitempty"` - // Capacity - Specifies the number of virtual machines in the scale set. - Capacity *VirtualMachineScaleSetSkuCapacity `json:"capacity,omitempty"` -} - -// VirtualMachineScaleSetSkuCapacity describes scaling information of a sku. -type VirtualMachineScaleSetSkuCapacity struct { - // Minimum - The minimum capacity. - Minimum *int64 `json:"minimum,omitempty"` - // Maximum - The maximum capacity that can be set. - Maximum *int64 `json:"maximum,omitempty"` - // DefaultCapacity - The default capacity. - DefaultCapacity *int64 `json:"defaultCapacity,omitempty"` - // ScaleType - The scale type applicable to the sku. Possible values include: 'VirtualMachineScaleSetSkuScaleTypeAutomatic', 'VirtualMachineScaleSetSkuScaleTypeNone' - ScaleType VirtualMachineScaleSetSkuScaleType `json:"scaleType,omitempty"` -} - -// VirtualMachineScaleSetsPerformMaintenanceFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetsPerformMaintenanceFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetsPerformMaintenanceFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPerformMaintenanceFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetsPowerOffFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachineScaleSetsPowerOffFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetsPowerOffFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsPowerOffFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPowerOffFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetsRedeployFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachineScaleSetsRedeployFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetsRedeployFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsRedeployFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRedeployFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetsReimageAllFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetsReimageAllFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetsReimageAllFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsReimageAllFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageAllFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetsReimageFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachineScaleSetsReimageFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetsReimageFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsReimageFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetsRestartFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachineScaleSetsRestartFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetsRestartFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsRestartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRestartFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetsStartFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachineScaleSetsStartFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetsStartFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsStartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsStartFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetStorageProfile describes a virtual machine scale set storage profile. -type VirtualMachineScaleSetStorageProfile struct { - // ImageReference - Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations. - ImageReference *ImageReference `json:"imageReference,omitempty"` - // OsDisk - Specifies information about the operating system disk used by the virtual machines in the scale set.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). - OsDisk *VirtualMachineScaleSetOSDisk `json:"osDisk,omitempty"` - // DataDisks - Specifies the parameters that are used to add data disks to the virtual machines in the scale set.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). - DataDisks *[]VirtualMachineScaleSetDataDisk `json:"dataDisks,omitempty"` -} - -// VirtualMachineScaleSetsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachineScaleSetsUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetsUpdateFuture) Result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if vmss.Response.Response, err = future.GetResult(sender); err == nil && vmss.Response.Response.StatusCode != http.StatusNoContent { - vmss, err = client.UpdateResponder(vmss.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", vmss.Response.Response, "Failure responding to request") - } - } - return -} - -// VirtualMachineScaleSetsUpdateInstancesFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetsUpdateInstancesFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetsUpdateInstancesFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateInstancesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateInstancesFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetUpdate describes a Virtual Machine Scale Set. -type VirtualMachineScaleSetUpdate struct { - // Sku - The virtual machine scale set sku. - Sku *Sku `json:"sku,omitempty"` - // Plan - The purchase plan when deploying a virtual machine scale set from VM Marketplace images. - Plan *Plan `json:"plan,omitempty"` - *VirtualMachineScaleSetUpdateProperties `json:"properties,omitempty"` - // Identity - The identity of the virtual machine scale set, if configured. - Identity *VirtualMachineScaleSetIdentity `json:"identity,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineScaleSetUpdate. -func (vmssu VirtualMachineScaleSetUpdate) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vmssu.Sku != nil { - objectMap["sku"] = vmssu.Sku - } - if vmssu.Plan != nil { - objectMap["plan"] = vmssu.Plan - } - if vmssu.VirtualMachineScaleSetUpdateProperties != nil { - objectMap["properties"] = vmssu.VirtualMachineScaleSetUpdateProperties - } - if vmssu.Identity != nil { - objectMap["identity"] = vmssu.Identity - } - if vmssu.Tags != nil { - objectMap["tags"] = vmssu.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetUpdate struct. -func (vmssu *VirtualMachineScaleSetUpdate) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "sku": - if v != nil { - var sku Sku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - vmssu.Sku = &sku - } - case "plan": - if v != nil { - var plan Plan - err = json.Unmarshal(*v, &plan) - if err != nil { - return err - } - vmssu.Plan = &plan - } - case "properties": - if v != nil { - var virtualMachineScaleSetUpdateProperties VirtualMachineScaleSetUpdateProperties - err = json.Unmarshal(*v, &virtualMachineScaleSetUpdateProperties) - if err != nil { - return err - } - vmssu.VirtualMachineScaleSetUpdateProperties = &virtualMachineScaleSetUpdateProperties - } - case "identity": - if v != nil { - var identity VirtualMachineScaleSetIdentity - err = json.Unmarshal(*v, &identity) - if err != nil { - return err - } - vmssu.Identity = &identity - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - vmssu.Tags = tags - } - } - } - - return nil -} - -// VirtualMachineScaleSetUpdateIPConfiguration describes a virtual machine scale set network profile's IP -// configuration. -type VirtualMachineScaleSetUpdateIPConfiguration struct { - // Name - The IP configuration name. - Name *string `json:"name,omitempty"` - *VirtualMachineScaleSetUpdateIPConfigurationProperties `json:"properties,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineScaleSetUpdateIPConfiguration. -func (vmssuic VirtualMachineScaleSetUpdateIPConfiguration) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vmssuic.Name != nil { - objectMap["name"] = vmssuic.Name - } - if vmssuic.VirtualMachineScaleSetUpdateIPConfigurationProperties != nil { - objectMap["properties"] = vmssuic.VirtualMachineScaleSetUpdateIPConfigurationProperties - } - if vmssuic.ID != nil { - objectMap["id"] = vmssuic.ID - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetUpdateIPConfiguration struct. -func (vmssuic *VirtualMachineScaleSetUpdateIPConfiguration) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - vmssuic.Name = &name - } - case "properties": - if v != nil { - var virtualMachineScaleSetUpdateIPConfigurationProperties VirtualMachineScaleSetUpdateIPConfigurationProperties - err = json.Unmarshal(*v, &virtualMachineScaleSetUpdateIPConfigurationProperties) - if err != nil { - return err - } - vmssuic.VirtualMachineScaleSetUpdateIPConfigurationProperties = &virtualMachineScaleSetUpdateIPConfigurationProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - vmssuic.ID = &ID - } - } - } - - return nil -} - -// VirtualMachineScaleSetUpdateIPConfigurationProperties describes a virtual machine scale set network profile's IP -// configuration properties. -type VirtualMachineScaleSetUpdateIPConfigurationProperties struct { - // Subnet - The subnet. - Subnet *APIEntityReference `json:"subnet,omitempty"` - // Primary - Specifies the primary IP Configuration in case the network interface has more than one IP Configuration. - Primary *bool `json:"primary,omitempty"` - // PublicIPAddressConfiguration - The publicIPAddressConfiguration. - PublicIPAddressConfiguration *VirtualMachineScaleSetUpdatePublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"` - // PrivateIPAddressVersion - Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPv4', 'IPv6' - PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"` - // ApplicationGatewayBackendAddressPools - The application gateway backend address pools. - ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` - // LoadBalancerBackendAddressPools - The load balancer backend address pools. - LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` - // LoadBalancerInboundNatPools - The load balancer inbound nat pools. - LoadBalancerInboundNatPools *[]SubResource `json:"loadBalancerInboundNatPools,omitempty"` -} - -// VirtualMachineScaleSetUpdateNetworkConfiguration describes a virtual machine scale set network profile's network -// configurations. -type VirtualMachineScaleSetUpdateNetworkConfiguration struct { - // Name - The network configuration name. - Name *string `json:"name,omitempty"` - *VirtualMachineScaleSetUpdateNetworkConfigurationProperties `json:"properties,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineScaleSetUpdateNetworkConfiguration. -func (vmssunc VirtualMachineScaleSetUpdateNetworkConfiguration) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vmssunc.Name != nil { - objectMap["name"] = vmssunc.Name - } - if vmssunc.VirtualMachineScaleSetUpdateNetworkConfigurationProperties != nil { - objectMap["properties"] = vmssunc.VirtualMachineScaleSetUpdateNetworkConfigurationProperties - } - if vmssunc.ID != nil { - objectMap["id"] = vmssunc.ID - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetUpdateNetworkConfiguration struct. -func (vmssunc *VirtualMachineScaleSetUpdateNetworkConfiguration) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - vmssunc.Name = &name - } - case "properties": - if v != nil { - var virtualMachineScaleSetUpdateNetworkConfigurationProperties VirtualMachineScaleSetUpdateNetworkConfigurationProperties - err = json.Unmarshal(*v, &virtualMachineScaleSetUpdateNetworkConfigurationProperties) - if err != nil { - return err - } - vmssunc.VirtualMachineScaleSetUpdateNetworkConfigurationProperties = &virtualMachineScaleSetUpdateNetworkConfigurationProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - vmssunc.ID = &ID - } - } - } - - return nil -} - -// VirtualMachineScaleSetUpdateNetworkConfigurationProperties describes a virtual machine scale set updatable -// network profile's IP configuration.Use this object for updating network profile's IP Configuration. -type VirtualMachineScaleSetUpdateNetworkConfigurationProperties struct { - // Primary - Whether this is a primary NIC on a virtual machine. - Primary *bool `json:"primary,omitempty"` - // EnableAcceleratedNetworking - Specifies whether the network interface is accelerated networking-enabled. - EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"` - // NetworkSecurityGroup - The network security group. - NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` - // DNSSettings - The dns settings to be applied on the network interfaces. - DNSSettings *VirtualMachineScaleSetNetworkConfigurationDNSSettings `json:"dnsSettings,omitempty"` - // IPConfigurations - The virtual machine scale set IP Configuration. - IPConfigurations *[]VirtualMachineScaleSetUpdateIPConfiguration `json:"ipConfigurations,omitempty"` - // EnableIPForwarding - Whether IP forwarding enabled on this NIC. - EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"` -} - -// VirtualMachineScaleSetUpdateNetworkProfile describes a virtual machine scale set network profile. -type VirtualMachineScaleSetUpdateNetworkProfile struct { - // NetworkInterfaceConfigurations - The list of network configurations. - NetworkInterfaceConfigurations *[]VirtualMachineScaleSetUpdateNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` -} - -// VirtualMachineScaleSetUpdateOSDisk describes virtual machine scale set operating system disk Update Object. This -// should be used for Updating VMSS OS Disk. -type VirtualMachineScaleSetUpdateOSDisk struct { - // Caching - The caching type. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' - Caching CachingTypes `json:"caching,omitempty"` - // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. - WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` - // DiskSizeGB - Specifies the size of the operating system disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

    This value cannot be larger than 1023 GB - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // Image - The Source User Image VirtualHardDisk. This VirtualHardDisk will be copied before using it to attach to the Virtual Machine. If SourceImage is provided, the destination VirtualHardDisk should not exist. - Image *VirtualHardDisk `json:"image,omitempty"` - // VhdContainers - The list of virtual hard disk container uris. - VhdContainers *[]string `json:"vhdContainers,omitempty"` - // ManagedDisk - The managed disk parameters. - ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` -} - -// VirtualMachineScaleSetUpdateOSProfile describes a virtual machine scale set OS profile. -type VirtualMachineScaleSetUpdateOSProfile struct { - // CustomData - A base-64 encoded string of custom data. - CustomData *string `json:"customData,omitempty"` - // WindowsConfiguration - The Windows Configuration of the OS profile. - WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` - // LinuxConfiguration - The Linux Configuration of the OS profile. - LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` - // Secrets - The List of certificates for addition to the VM. - Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` -} - -// VirtualMachineScaleSetUpdateProperties describes the properties of a Virtual Machine Scale Set. -type VirtualMachineScaleSetUpdateProperties struct { - // UpgradePolicy - The upgrade policy. - UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` - // VirtualMachineProfile - The virtual machine profile. - VirtualMachineProfile *VirtualMachineScaleSetUpdateVMProfile `json:"virtualMachineProfile,omitempty"` - // Overprovision - Specifies whether the Virtual Machine Scale Set should be overprovisioned. - Overprovision *bool `json:"overprovision,omitempty"` - // SinglePlacementGroup - When true this limits the scale set to a single placement group, of max size 100 virtual machines. - SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` -} - -// VirtualMachineScaleSetUpdatePublicIPAddressConfiguration describes a virtual machines scale set IP -// Configuration's PublicIPAddress configuration -type VirtualMachineScaleSetUpdatePublicIPAddressConfiguration struct { - // Name - The publicIP address configuration name. - Name *string `json:"name,omitempty"` - *VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineScaleSetUpdatePublicIPAddressConfiguration. -func (vmssupiac VirtualMachineScaleSetUpdatePublicIPAddressConfiguration) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vmssupiac.Name != nil { - objectMap["name"] = vmssupiac.Name - } - if vmssupiac.VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties != nil { - objectMap["properties"] = vmssupiac.VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetUpdatePublicIPAddressConfiguration struct. -func (vmssupiac *VirtualMachineScaleSetUpdatePublicIPAddressConfiguration) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - vmssupiac.Name = &name - } - case "properties": - if v != nil { - var virtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties - err = json.Unmarshal(*v, &virtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties) - if err != nil { - return err - } - vmssupiac.VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties = &virtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties - } - } - } - - return nil -} - -// VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties describes a virtual machines scale set IP -// Configuration's PublicIPAddress configuration -type VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties struct { - // IdleTimeoutInMinutes - The idle timeout of the public IP address. - IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` - // DNSSettings - The dns settings to be applied on the publicIP addresses . - DNSSettings *VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings `json:"dnsSettings,omitempty"` -} - -// VirtualMachineScaleSetUpdateStorageProfile describes a virtual machine scale set storage profile. -type VirtualMachineScaleSetUpdateStorageProfile struct { - // ImageReference - The image reference. - ImageReference *ImageReference `json:"imageReference,omitempty"` - // OsDisk - The OS disk. - OsDisk *VirtualMachineScaleSetUpdateOSDisk `json:"osDisk,omitempty"` - // DataDisks - The data disks. - DataDisks *[]VirtualMachineScaleSetDataDisk `json:"dataDisks,omitempty"` -} - -// VirtualMachineScaleSetUpdateVMProfile describes a virtual machine scale set virtual machine profile. -type VirtualMachineScaleSetUpdateVMProfile struct { - // OsProfile - The virtual machine scale set OS profile. - OsProfile *VirtualMachineScaleSetUpdateOSProfile `json:"osProfile,omitempty"` - // StorageProfile - The virtual machine scale set storage profile. - StorageProfile *VirtualMachineScaleSetUpdateStorageProfile `json:"storageProfile,omitempty"` - // NetworkProfile - The virtual machine scale set network profile. - NetworkProfile *VirtualMachineScaleSetUpdateNetworkProfile `json:"networkProfile,omitempty"` - // DiagnosticsProfile - The virtual machine scale set diagnostics profile. - DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` - // ExtensionProfile - The virtual machine scale set extension profile. - ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` - // LicenseType - The license type, which is for bring your own license scenario. - LicenseType *string `json:"licenseType,omitempty"` -} - -// VirtualMachineScaleSetVM describes a virtual machine scale set virtual machine. -type VirtualMachineScaleSetVM struct { - autorest.Response `json:"-"` - // InstanceID - The virtual machine instance ID. - InstanceID *string `json:"instanceId,omitempty"` - // Sku - The virtual machine SKU. - Sku *Sku `json:"sku,omitempty"` - *VirtualMachineScaleSetVMProperties `json:"properties,omitempty"` - // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. - Plan *Plan `json:"plan,omitempty"` - // Resources - The virtual machine child extension resources. - Resources *[]VirtualMachineExtension `json:"resources,omitempty"` - // Zones - The virtual machine zones. - Zones *[]string `json:"zones,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` - // Name - Resource name - Name *string `json:"name,omitempty"` - // Type - Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineScaleSetVM. -func (vmssv VirtualMachineScaleSetVM) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vmssv.InstanceID != nil { - objectMap["instanceId"] = vmssv.InstanceID - } - if vmssv.Sku != nil { - objectMap["sku"] = vmssv.Sku - } - if vmssv.VirtualMachineScaleSetVMProperties != nil { - objectMap["properties"] = vmssv.VirtualMachineScaleSetVMProperties - } - if vmssv.Plan != nil { - objectMap["plan"] = vmssv.Plan - } - if vmssv.Resources != nil { - objectMap["resources"] = vmssv.Resources - } - if vmssv.Zones != nil { - objectMap["zones"] = vmssv.Zones - } - if vmssv.ID != nil { - objectMap["id"] = vmssv.ID - } - if vmssv.Name != nil { - objectMap["name"] = vmssv.Name - } - if vmssv.Type != nil { - objectMap["type"] = vmssv.Type - } - if vmssv.Location != nil { - objectMap["location"] = vmssv.Location - } - if vmssv.Tags != nil { - objectMap["tags"] = vmssv.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetVM struct. -func (vmssv *VirtualMachineScaleSetVM) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "instanceId": - if v != nil { - var instanceID string - err = json.Unmarshal(*v, &instanceID) - if err != nil { - return err - } - vmssv.InstanceID = &instanceID - } - case "sku": - if v != nil { - var sku Sku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - vmssv.Sku = &sku - } - case "properties": - if v != nil { - var virtualMachineScaleSetVMProperties VirtualMachineScaleSetVMProperties - err = json.Unmarshal(*v, &virtualMachineScaleSetVMProperties) - if err != nil { - return err - } - vmssv.VirtualMachineScaleSetVMProperties = &virtualMachineScaleSetVMProperties - } - case "plan": - if v != nil { - var plan Plan - err = json.Unmarshal(*v, &plan) - if err != nil { - return err - } - vmssv.Plan = &plan - } - case "resources": - if v != nil { - var resources []VirtualMachineExtension - err = json.Unmarshal(*v, &resources) - if err != nil { - return err - } - vmssv.Resources = &resources - } - case "zones": - if v != nil { - var zones []string - err = json.Unmarshal(*v, &zones) - if err != nil { - return err - } - vmssv.Zones = &zones - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - vmssv.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - vmssv.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - vmssv.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - vmssv.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - vmssv.Tags = tags - } - } - } - - return nil -} - -// VirtualMachineScaleSetVMExtensionsSummary extensions summary for virtual machines of a virtual machine scale -// set. -type VirtualMachineScaleSetVMExtensionsSummary struct { - // Name - The extension name. - Name *string `json:"name,omitempty"` - // StatusesSummary - The extensions information. - StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"` -} - -// VirtualMachineScaleSetVMInstanceIDs specifies a list of virtual machine instance IDs from the VM scale set. -type VirtualMachineScaleSetVMInstanceIDs struct { - // InstanceIds - The virtual machine scale set instance ids. Omitting the virtual machine scale set instance ids will result in the operation being performed on all virtual machines in the virtual machine scale set. - InstanceIds *[]string `json:"instanceIds,omitempty"` -} - -// VirtualMachineScaleSetVMInstanceRequiredIDs specifies a list of virtual machine instance IDs from the VM scale -// set. -type VirtualMachineScaleSetVMInstanceRequiredIDs struct { - // InstanceIds - The virtual machine scale set instance ids. - InstanceIds *[]string `json:"instanceIds,omitempty"` -} - -// VirtualMachineScaleSetVMInstanceView the instance view of a virtual machine scale set VM. -type VirtualMachineScaleSetVMInstanceView struct { - autorest.Response `json:"-"` - // PlatformUpdateDomain - The Update Domain count. - PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` - // PlatformFaultDomain - The Fault Domain count. - PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` - // RdpThumbPrint - The Remote desktop certificate thumbprint. - RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` - // VMAgent - The VM Agent running on the virtual machine. - VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` - // MaintenanceRedeployStatus - The Maintenance Operation status on the virtual machine. - MaintenanceRedeployStatus *MaintenanceRedeployStatus `json:"maintenanceRedeployStatus,omitempty"` - // Disks - The disks information. - Disks *[]DiskInstanceView `json:"disks,omitempty"` - // Extensions - The extensions information. - Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` - // VMHealth - The health status for the VM. - VMHealth *VirtualMachineHealthStatus `json:"vmHealth,omitempty"` - // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

    For Linux Virtual Machines, you can easily view the output of your console log.

    For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from the hypervisor. - BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` - // Statuses - The resource status information. - Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` - // PlacementGroupID - The placement group in which the VM is running. If the VM is deallocated it will not have a placementGroupId. - PlacementGroupID *string `json:"placementGroupId,omitempty"` -} - -// VirtualMachineScaleSetVMListResult the List Virtual Machine Scale Set VMs operation response. -type VirtualMachineScaleSetVMListResult struct { - autorest.Response `json:"-"` - // Value - The list of virtual machine scale sets VMs. - Value *[]VirtualMachineScaleSetVM `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of Virtual Machine Scale Set VMs. Call ListNext() with this to fetch the next page of VMSS VMs - NextLink *string `json:"nextLink,omitempty"` -} - -// VirtualMachineScaleSetVMListResultIterator provides access to a complete listing of VirtualMachineScaleSetVM -// values. -type VirtualMachineScaleSetVMListResultIterator struct { - i int - page VirtualMachineScaleSetVMListResultPage -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *VirtualMachineScaleSetVMListResultIterator) Next() error { - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err := iter.page.Next() - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter VirtualMachineScaleSetVMListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter VirtualMachineScaleSetVMListResultIterator) Response() VirtualMachineScaleSetVMListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter VirtualMachineScaleSetVMListResultIterator) Value() VirtualMachineScaleSetVM { - if !iter.page.NotDone() { - return VirtualMachineScaleSetVM{} - } - return iter.page.Values()[iter.i] -} - -// IsEmpty returns true if the ListResult contains no values. -func (vmssvlr VirtualMachineScaleSetVMListResult) IsEmpty() bool { - return vmssvlr.Value == nil || len(*vmssvlr.Value) == 0 -} - -// virtualMachineScaleSetVMListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (vmssvlr VirtualMachineScaleSetVMListResult) virtualMachineScaleSetVMListResultPreparer() (*http.Request, error) { - if vmssvlr.NextLink == nil || len(to.String(vmssvlr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(vmssvlr.NextLink))) -} - -// VirtualMachineScaleSetVMListResultPage contains a page of VirtualMachineScaleSetVM values. -type VirtualMachineScaleSetVMListResultPage struct { - fn func(VirtualMachineScaleSetVMListResult) (VirtualMachineScaleSetVMListResult, error) - vmssvlr VirtualMachineScaleSetVMListResult -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *VirtualMachineScaleSetVMListResultPage) Next() error { - next, err := page.fn(page.vmssvlr) - if err != nil { - return err - } - page.vmssvlr = next - return nil -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page VirtualMachineScaleSetVMListResultPage) NotDone() bool { - return !page.vmssvlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page VirtualMachineScaleSetVMListResultPage) Response() VirtualMachineScaleSetVMListResult { - return page.vmssvlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page VirtualMachineScaleSetVMListResultPage) Values() []VirtualMachineScaleSetVM { - if page.vmssvlr.IsEmpty() { - return nil - } - return *page.vmssvlr.Value -} - -// VirtualMachineScaleSetVMProfile describes a virtual machine scale set virtual machine profile. -type VirtualMachineScaleSetVMProfile struct { - // OsProfile - Specifies the operating system settings for the virtual machines in the scale set. - OsProfile *VirtualMachineScaleSetOSProfile `json:"osProfile,omitempty"` - // StorageProfile - Specifies the storage settings for the virtual machine disks. - StorageProfile *VirtualMachineScaleSetStorageProfile `json:"storageProfile,omitempty"` - // NetworkProfile - Specifies properties of the network interfaces of the virtual machines in the scale set. - NetworkProfile *VirtualMachineScaleSetNetworkProfile `json:"networkProfile,omitempty"` - // DiagnosticsProfile - Specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. - DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` - // ExtensionProfile - Specifies a collection of settings for extensions installed on virtual machines in the scale set. - ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` - // LicenseType - Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system.

    Possible values are:

    Windows_Client

    Windows_Server

    If this element is included in a request for an update, the value must match the initial value. This value cannot be updated.

    For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Minimum api-version: 2015-06-15 - LicenseType *string `json:"licenseType,omitempty"` - // Priority - Specifies the priority for the virtual machines in the scale set.

    Minimum api-version: 2017-10-30-preview. Possible values include: 'Regular', 'Low' - Priority VirtualMachinePriorityTypes `json:"priority,omitempty"` - // EvictionPolicy - Specifies the eviction policy for virtual machines in a low priority scale set.

    Minimum api-version: 2017-10-30-preview. Possible values include: 'Deallocate', 'Delete' - EvictionPolicy VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"` -} - -// VirtualMachineScaleSetVMProperties describes the properties of a virtual machine scale set virtual machine. -type VirtualMachineScaleSetVMProperties struct { - // LatestModelApplied - Specifies whether the latest model has been applied to the virtual machine. - LatestModelApplied *bool `json:"latestModelApplied,omitempty"` - // VMID - Azure VM unique ID. - VMID *string `json:"vmId,omitempty"` - // InstanceView - The virtual machine instance view. - InstanceView *VirtualMachineScaleSetVMInstanceView `json:"instanceView,omitempty"` - // HardwareProfile - Specifies the hardware settings for the virtual machine. - HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` - // StorageProfile - Specifies the storage settings for the virtual machine disks. - StorageProfile *StorageProfile `json:"storageProfile,omitempty"` - // OsProfile - Specifies the operating system settings for the virtual machine. - OsProfile *OSProfile `json:"osProfile,omitempty"` - // NetworkProfile - Specifies the network interfaces of the virtual machine. - NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` - // DiagnosticsProfile - Specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. - DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` - // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    For more information on Azure planned maintainance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. - AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` - // ProvisioningState - The provisioning state, which only appears in the response. - ProvisioningState *string `json:"provisioningState,omitempty"` - // LicenseType - Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system.

    Possible values are:

    Windows_Client

    Windows_Server

    If this element is included in a request for an update, the value must match the initial value. This value cannot be updated.

    For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Minimum api-version: 2015-06-15 - LicenseType *string `json:"licenseType,omitempty"` -} - -// VirtualMachineScaleSetVMsDeallocateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetVMsDeallocateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetVMsDeallocateFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsDeallocateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeallocateFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetVMsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachineScaleSetVMsDeleteFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetVMsDeleteFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetVMsPerformMaintenanceFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetVMsPerformMaintenanceFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetVMsPerformMaintenanceFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetVMsPowerOffFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetVMsPowerOffFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetVMsPowerOffFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsPowerOffFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPowerOffFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetVMsRedeployFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetVMsRedeployFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetVMsRedeployFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRedeployFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRedeployFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetVMsReimageAllFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetVMsReimageAllFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetVMsReimageAllFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsReimageAllFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageAllFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetVMsReimageFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetVMsReimageFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetVMsReimageFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsReimageFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetVMsRestartFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetVMsRestartFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetVMsRestartFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRestartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRestartFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetVMsRunCommandFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachineScaleSetVMsRunCommandFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetVMsRunCommandFuture) Result(client VirtualMachineScaleSetVMsClient) (rcr RunCommandResult, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRunCommandFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if rcr.Response.Response, err = future.GetResult(sender); err == nil && rcr.Response.Response.StatusCode != http.StatusNoContent { - rcr, err = client.RunCommandResponder(rcr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", rcr.Response.Response, "Failure responding to request") - } - } - return -} - -// VirtualMachineScaleSetVMsStartFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachineScaleSetVMsStartFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetVMsStartFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsStartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsStartFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineScaleSetVMsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachineScaleSetVMsUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachineScaleSetVMsUpdateFuture) Result(client VirtualMachineScaleSetVMsClient) (vmssv VirtualMachineScaleSetVM, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if vmssv.Response.Response, err = future.GetResult(sender); err == nil && vmssv.Response.Response.StatusCode != http.StatusNoContent { - vmssv, err = client.UpdateResponder(vmssv.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", vmssv.Response.Response, "Failure responding to request") - } - } - return -} - -// VirtualMachinesCaptureFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachinesCaptureFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachinesCaptureFuture) Result(client VirtualMachinesClient) (vmcr VirtualMachineCaptureResult, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCaptureFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if vmcr.Response.Response, err = future.GetResult(sender); err == nil && vmcr.Response.Response.StatusCode != http.StatusNoContent { - vmcr, err = client.CaptureResponder(vmcr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", vmcr.Response.Response, "Failure responding to request") - } - } - return -} - -// VirtualMachinesConvertToManagedDisksFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachinesConvertToManagedDisksFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachinesConvertToManagedDisksFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesConvertToManagedDisksFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesConvertToManagedDisksFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachinesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachinesCreateOrUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachinesCreateOrUpdateFuture) Result(client VirtualMachinesClient) (VM VirtualMachine, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if VM.Response.Response, err = future.GetResult(sender); err == nil && VM.Response.Response.StatusCode != http.StatusNoContent { - VM, err = client.CreateOrUpdateResponder(VM.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", VM.Response.Response, "Failure responding to request") - } - } - return -} - -// VirtualMachinesDeallocateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachinesDeallocateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachinesDeallocateFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesDeallocateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeallocateFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachinesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachinesDeleteFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachinesDeleteFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineSize describes the properties of a VM size. -type VirtualMachineSize struct { - // Name - The name of the virtual machine size. - Name *string `json:"name,omitempty"` - // NumberOfCores - The number of cores supported by the virtual machine size. - NumberOfCores *int32 `json:"numberOfCores,omitempty"` - // OsDiskSizeInMB - The OS disk size, in MB, allowed by the virtual machine size. - OsDiskSizeInMB *int32 `json:"osDiskSizeInMB,omitempty"` - // ResourceDiskSizeInMB - The resource disk size, in MB, allowed by the virtual machine size. - ResourceDiskSizeInMB *int32 `json:"resourceDiskSizeInMB,omitempty"` - // MemoryInMB - The amount of memory, in MB, supported by the virtual machine size. - MemoryInMB *int32 `json:"memoryInMB,omitempty"` - // MaxDataDiskCount - The maximum number of data disks that can be attached to the virtual machine size. - MaxDataDiskCount *int32 `json:"maxDataDiskCount,omitempty"` -} - -// VirtualMachineSizeListResult the List Virtual Machine operation response. -type VirtualMachineSizeListResult struct { - autorest.Response `json:"-"` - // Value - The list of virtual machine sizes. - Value *[]VirtualMachineSize `json:"value,omitempty"` -} - -// VirtualMachinesPerformMaintenanceFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type VirtualMachinesPerformMaintenanceFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachinesPerformMaintenanceFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPerformMaintenanceFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachinesPowerOffFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachinesPowerOffFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachinesPowerOffFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesPowerOffFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPowerOffFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachinesRedeployFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachinesRedeployFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachinesRedeployFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRedeployFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRedeployFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachinesRestartFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachinesRestartFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachinesRestartFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRestartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRestartFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachinesRunCommandFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachinesRunCommandFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachinesRunCommandFuture) Result(client VirtualMachinesClient) (rcr RunCommandResult, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRunCommandFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if rcr.Response.Response, err = future.GetResult(sender); err == nil && rcr.Response.Response.StatusCode != http.StatusNoContent { - rcr, err = client.RunCommandResponder(rcr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", rcr.Response.Response, "Failure responding to request") - } - } - return -} - -// VirtualMachinesStartFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type VirtualMachinesStartFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachinesStartFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesStartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesStartFuture") - return - } - ar.Response = future.Response() - return -} - -// VirtualMachineStatusCodeCount the status code and count of the virtual machine scale set instance view status -// summary. -type VirtualMachineStatusCodeCount struct { - // Code - The instance view status code. - Code *string `json:"code,omitempty"` - // Count - The number of instances having a particular status code. - Count *int32 `json:"count,omitempty"` -} - -// VirtualMachinesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type VirtualMachinesUpdateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *VirtualMachinesUpdateFuture) Result(client VirtualMachinesClient) (VM VirtualMachine, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if VM.Response.Response, err = future.GetResult(sender); err == nil && VM.Response.Response.StatusCode != http.StatusNoContent { - VM, err = client.UpdateResponder(VM.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", VM.Response.Response, "Failure responding to request") - } - } - return -} - -// VirtualMachineUpdate describes a Virtual Machine Update. -type VirtualMachineUpdate struct { - // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. - Plan *Plan `json:"plan,omitempty"` - *VirtualMachineProperties `json:"properties,omitempty"` - // Identity - The identity of the virtual machine, if configured. - Identity *VirtualMachineIdentity `json:"identity,omitempty"` - // Zones - The virtual machine zones. - Zones *[]string `json:"zones,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for VirtualMachineUpdate. -func (vmu VirtualMachineUpdate) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vmu.Plan != nil { - objectMap["plan"] = vmu.Plan - } - if vmu.VirtualMachineProperties != nil { - objectMap["properties"] = vmu.VirtualMachineProperties - } - if vmu.Identity != nil { - objectMap["identity"] = vmu.Identity - } - if vmu.Zones != nil { - objectMap["zones"] = vmu.Zones - } - if vmu.Tags != nil { - objectMap["tags"] = vmu.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for VirtualMachineUpdate struct. -func (vmu *VirtualMachineUpdate) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "plan": - if v != nil { - var plan Plan - err = json.Unmarshal(*v, &plan) - if err != nil { - return err - } - vmu.Plan = &plan - } - case "properties": - if v != nil { - var virtualMachineProperties VirtualMachineProperties - err = json.Unmarshal(*v, &virtualMachineProperties) - if err != nil { - return err - } - vmu.VirtualMachineProperties = &virtualMachineProperties - } - case "identity": - if v != nil { - var identity VirtualMachineIdentity - err = json.Unmarshal(*v, &identity) - if err != nil { - return err - } - vmu.Identity = &identity - } - case "zones": - if v != nil { - var zones []string - err = json.Unmarshal(*v, &zones) - if err != nil { - return err - } - vmu.Zones = &zones - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - vmu.Tags = tags - } - } - } - - return nil -} - -// WindowsConfiguration specifies Windows operating system settings on the virtual machine. -type WindowsConfiguration struct { - // ProvisionVMAgent - Indicates whether virtual machine agent should be provisioned on the virtual machine.

    When this property is not specified in the request body, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later. - ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"` - // EnableAutomaticUpdates - Indicates whether virtual machine is enabled for automatic updates. - EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty"` - // TimeZone - Specifies the time zone of the virtual machine. e.g. "Pacific Standard Time" - TimeZone *string `json:"timeZone,omitempty"` - // AdditionalUnattendContent - Specifies additional base-64 encoded XML formatted information that can be included in the Unattend.xml file, which is used by Windows Setup. - AdditionalUnattendContent *[]AdditionalUnattendContent `json:"additionalUnattendContent,omitempty"` - // WinRM - Specifies the Windows Remote Management listeners. This enables remote Windows PowerShell. - WinRM *WinRMConfiguration `json:"winRM,omitempty"` -} - -// WinRMConfiguration describes Windows Remote Management configuration of the VM -type WinRMConfiguration struct { - // Listeners - The list of Windows Remote Management listeners - Listeners *[]WinRMListener `json:"listeners,omitempty"` -} - -// WinRMListener describes Protocol and thumbprint of Windows Remote Management listener -type WinRMListener struct { - // Protocol - Specifies the protocol of listener.

    Possible values are:
    **http**

    **https**. Possible values include: 'HTTP', 'HTTPS' - Protocol ProtocolTypes `json:"protocol,omitempty"` - // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:

    {
    "data":"",
    "dataType":"pfx",
    "password":""
    } - CertificateURL *string `json:"certificateUrl,omitempty"` -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/operations.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/operations.go deleted file mode 100644 index 14c892fe10ac..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/operations.go +++ /dev/null @@ -1,98 +0,0 @@ -package compute - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "net/http" -) - -// OperationsClient is the compute Client -type OperationsClient struct { - BaseClient -} - -// NewOperationsClient creates an instance of the OperationsClient client. -func NewOperationsClient(subscriptionID string) OperationsClient { - return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client. -func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { - return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List gets a list of compute operations. -func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) { - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.OperationsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "compute.OperationsClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.OperationsClient", "List", resp, "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2018-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/Microsoft.Compute/operations"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/usage.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/usage.go deleted file mode 100644 index fd007d01ea34..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/usage.go +++ /dev/null @@ -1,141 +0,0 @@ -package compute - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "net/http" -) - -// UsageClient is the compute Client -type UsageClient struct { - BaseClient -} - -// NewUsageClient creates an instance of the UsageClient client. -func NewUsageClient(subscriptionID string) UsageClient { - return NewUsageClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewUsageClientWithBaseURI creates an instance of the UsageClient client. -func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClient { - return UsageClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List gets, for the specified location, the current compute resource usage information as well as the limits for -// compute resources under the subscription. -// Parameters: -// location - the location for which resource usage is queried. -func (client UsageClient) List(ctx context.Context, location string) (result ListUsagesResultPage, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: location, - Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("compute.UsageClient", "List", err.Error()) - } - - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, location) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.lur.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", resp, "Failure sending request") - return - } - - result.lur, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", resp, "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client UsageClient) ListPreparer(ctx context.Context, location string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "location": autorest.Encode("path", location), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client UsageClient) ListResponder(resp *http.Response) (result ListUsagesResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client UsageClient) listNextResults(lastResults ListUsagesResult) (result ListUsagesResult, err error) { - req, err := lastResults.listUsagesResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.UsageClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.UsageClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.UsageClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client UsageClient) ListComplete(ctx context.Context, location string) (result ListUsagesResultIterator, err error) { - result.page, err = client.List(ctx, location) - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/version.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/version.go deleted file mode 100644 index 87d03ecf9490..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/version.go +++ /dev/null @@ -1,30 +0,0 @@ -package compute - -import "github.com/Azure/azure-sdk-for-go/version" - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/" + version.Number + " compute/2018-04-01" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return version.Number -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/availabilitysets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/availabilitysets.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/availabilitysets.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/availabilitysets.go index 095954766e76..afad9559b6d9 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/availabilitysets.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/availabilitysets.go @@ -74,7 +74,7 @@ func (client AvailabilitySetsClient) CreateOrUpdatePreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -143,7 +143,7 @@ func (client AvailabilitySetsClient) DeletePreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -209,7 +209,7 @@ func (client AvailabilitySetsClient) GetPreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -275,7 +275,7 @@ func (client AvailabilitySetsClient) ListPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -370,7 +370,7 @@ func (client AvailabilitySetsClient) ListAvailableSizesPreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -433,7 +433,7 @@ func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -528,7 +528,7 @@ func (client AvailabilitySetsClient) UpdatePreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/client.go new file mode 100644 index 000000000000..b23c9ca74268 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/client.go @@ -0,0 +1,51 @@ +// Package compute implements the Azure ARM Compute service API version . +// +// Compute Client +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Compute + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Compute. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/containerservices.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/containerservices.go new file mode 100644 index 000000000000..b16788bfa558 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/containerservices.go @@ -0,0 +1,475 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// ContainerServicesClient is the compute Client +type ContainerServicesClient struct { + BaseClient +} + +// NewContainerServicesClient creates an instance of the ContainerServicesClient client. +func NewContainerServicesClient(subscriptionID string) ContainerServicesClient { + return NewContainerServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewContainerServicesClientWithBaseURI creates an instance of the ContainerServicesClient client. +func NewContainerServicesClientWithBaseURI(baseURI string, subscriptionID string) ContainerServicesClient { + return ContainerServicesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a container service with the specified configuration of orchestrator, masters, and +// agents. +// Parameters: +// resourceGroupName - the name of the resource group. +// containerServiceName - the name of the container service in the specified subscription and resource group. +// parameters - parameters supplied to the Create or Update a Container Service operation. +func (client ContainerServicesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, containerServiceName string, parameters ContainerService) (result ContainerServicesCreateOrUpdateFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ContainerServiceProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.CustomProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.CustomProfile.Orchestrator", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.ServicePrincipalProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.ServicePrincipalProfile.ClientID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ContainerServiceProperties.ServicePrincipalProfile.Secret", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "parameters.ContainerServiceProperties.MasterProfile", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.MasterProfile.DNSPrefix", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.AgentPoolProfiles", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ContainerServiceProperties.WindowsProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminUsername", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminUsername", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([._]?[a-zA-Z0-9]+)*$`, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminPassword", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "parameters.ContainerServiceProperties.LinuxProfile", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.LinuxProfile.AdminUsername", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.LinuxProfile.AdminUsername", Name: validation.Pattern, Rule: `^[a-z][a-z0-9_-]*$`, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.LinuxProfile.SSH", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.LinuxProfile.SSH.PublicKeys", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "parameters.ContainerServiceProperties.DiagnosticsProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.DiagnosticsProfile.VMDiagnostics", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.DiagnosticsProfile.VMDiagnostics.Enabled", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + }}}}}); err != nil { + return result, validation.NewError("compute.ContainerServicesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, containerServiceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ContainerServicesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, containerServiceName string, parameters ContainerService) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) CreateOrUpdateSender(req *http.Request) (future ContainerServicesCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) CreateOrUpdateResponder(resp *http.Response) (result ContainerService, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified container service in the specified subscription and resource group. The operation does +// not delete other resources created as part of creating a container service, including storage accounts, VMs, and +// availability sets. All the other resources created with the container service are part of the same resource group +// and can be deleted individually. +// Parameters: +// resourceGroupName - the name of the resource group. +// containerServiceName - the name of the container service in the specified subscription and resource group. +func (client ContainerServicesClient) Delete(ctx context.Context, resourceGroupName string, containerServiceName string) (result ContainerServicesDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, containerServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ContainerServicesClient) DeletePreparer(ctx context.Context, resourceGroupName string, containerServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) DeleteSender(req *http.Request) (future ContainerServicesDeleteFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the properties of the specified container service in the specified subscription and resource group. The +// operation returns the properties including state, orchestrator, number of masters and agents, and FQDNs of masters +// and agents. +// Parameters: +// resourceGroupName - the name of the resource group. +// containerServiceName - the name of the container service in the specified subscription and resource group. +func (client ContainerServicesClient) Get(ctx context.Context, resourceGroupName string, containerServiceName string) (result ContainerService, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, containerServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ContainerServicesClient) GetPreparer(ctx context.Context, resourceGroupName string, containerServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) GetResponder(resp *http.Response) (result ContainerService, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of container services in the specified subscription. The operation returns properties of each +// container service including state, orchestrator, number of masters and agents, and FQDNs of masters and agents. +func (client ContainerServicesClient) List(ctx context.Context) (result ContainerServiceListResultPage, err error) { + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.cslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", resp, "Failure sending request") + return + } + + result.cslr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ContainerServicesClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/containerServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) ListResponder(resp *http.Response) (result ContainerServiceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ContainerServicesClient) listNextResults(lastResults ContainerServiceListResult) (result ContainerServiceListResult, err error) { + req, err := lastResults.containerServiceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ContainerServicesClient) ListComplete(ctx context.Context) (result ContainerServiceListResultIterator, err error) { + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup gets a list of container services in the specified subscription and resource group. The +// operation returns properties of each container service including state, orchestrator, number of masters and agents, +// and FQDNs of masters and agents. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client ContainerServicesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ContainerServiceListResultPage, err error) { + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.cslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.cslr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client ContainerServicesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) ListByResourceGroupResponder(resp *http.Response) (result ContainerServiceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client ContainerServicesClient) listByResourceGroupNextResults(lastResults ContainerServiceListResult) (result ContainerServiceListResult, err error) { + req, err := lastResults.containerServiceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client ContainerServicesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ContainerServiceListResultIterator, err error) { + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/disks.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/disks.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/disks.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/disks.go index cdc570377d01..9cbbd79fd6fc 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/disks.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/disks.go @@ -92,7 +92,7 @@ func (client DisksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -167,7 +167,7 @@ func (client DisksClient) DeletePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -245,7 +245,7 @@ func (client DisksClient) GetPreparer(ctx context.Context, resourceGroupName str "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -315,7 +315,7 @@ func (client DisksClient) GrantAccessPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -390,7 +390,7 @@ func (client DisksClient) ListPreparer(ctx context.Context) (*http.Request, erro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -483,7 +483,7 @@ func (client DisksClient) ListByResourceGroupPreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -573,7 +573,7 @@ func (client DisksClient) RevokeAccessPreparer(ctx context.Context, resourceGrou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -646,7 +646,7 @@ func (client DisksClient) UpdatePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleries.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleries.go new file mode 100644 index 000000000000..bc550686859f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleries.go @@ -0,0 +1,435 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// GalleriesClient is the compute Client +type GalleriesClient struct { + BaseClient +} + +// NewGalleriesClient creates an instance of the GalleriesClient client. +func NewGalleriesClient(subscriptionID string) GalleriesClient { + return NewGalleriesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGalleriesClientWithBaseURI creates an instance of the GalleriesClient client. +func NewGalleriesClientWithBaseURI(baseURI string, subscriptionID string) GalleriesClient { + return GalleriesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a Shared Image Gallery. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery. The allowed characters are alphabets and numbers with +// dots and periods allowed in the middle. The maximum length is 80 characters. +// gallery - parameters supplied to the create or update Shared Image Gallery operation. +func (client GalleriesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, gallery Gallery) (result GalleriesCreateOrUpdateFuture, err error) { + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, galleryName, gallery) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client GalleriesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, gallery Gallery) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}", pathParameters), + autorest.WithJSON(gallery), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client GalleriesClient) CreateOrUpdateSender(req *http.Request) (future GalleriesCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client GalleriesClient) CreateOrUpdateResponder(resp *http.Response) (result Gallery, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a Shared Image Gallery. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery to be deleted. +func (client GalleriesClient) Delete(ctx context.Context, resourceGroupName string, galleryName string) (result GalleriesDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, galleryName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client GalleriesClient) DeletePreparer(ctx context.Context, resourceGroupName string, galleryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client GalleriesClient) DeleteSender(req *http.Request) (future GalleriesDeleteFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client GalleriesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get retrieves information about a Shared Image Gallery. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery. +func (client GalleriesClient) Get(ctx context.Context, resourceGroupName string, galleryName string) (result Gallery, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, galleryName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client GalleriesClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client GalleriesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client GalleriesClient) GetResponder(resp *http.Response) (result Gallery, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list galleries under a subscription. +func (client GalleriesClient) List(ctx context.Context) (result GalleryListPage, err error) { + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.gl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "List", resp, "Failure sending request") + return + } + + result.gl, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client GalleriesClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/galleries", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client GalleriesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client GalleriesClient) ListResponder(resp *http.Response) (result GalleryList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client GalleriesClient) listNextResults(lastResults GalleryList) (result GalleryList, err error) { + req, err := lastResults.galleryListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.GalleriesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.GalleriesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client GalleriesClient) ListComplete(ctx context.Context) (result GalleryListIterator, err error) { + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup list galleries under a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client GalleriesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result GalleryListPage, err error) { + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.gl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.gl, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client GalleriesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client GalleriesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client GalleriesClient) ListByResourceGroupResponder(resp *http.Response) (result GalleryList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client GalleriesClient) listByResourceGroupNextResults(lastResults GalleryList) (result GalleryList, err error) { + req, err := lastResults.galleryListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.GalleriesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.GalleriesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client GalleriesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result GalleryListIterator, err error) { + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimages.go new file mode 100644 index 000000000000..670ea7ee29a3 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimages.go @@ -0,0 +1,367 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// GalleryImagesClient is the compute Client +type GalleryImagesClient struct { + BaseClient +} + +// NewGalleryImagesClient creates an instance of the GalleryImagesClient client. +func NewGalleryImagesClient(subscriptionID string) GalleryImagesClient { + return NewGalleryImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGalleryImagesClientWithBaseURI creates an instance of the GalleryImagesClient client. +func NewGalleryImagesClientWithBaseURI(baseURI string, subscriptionID string) GalleryImagesClient { + return GalleryImagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a gallery Image Definition. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery in which the Image Definition is to be created. +// galleryImageName - the name of the gallery Image Definition to be created or updated. The allowed characters +// are alphabets and numbers with dots, dashes, and periods allowed in the middle. The maximum length is 80 +// characters. +// galleryImage - parameters supplied to the create or update gallery image operation. +func (client GalleryImagesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImage) (result GalleryImagesCreateOrUpdateFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: galleryImage, + Constraints: []validation.Constraint{{Target: "galleryImage.GalleryImageProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "galleryImage.GalleryImageProperties.Identifier", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "galleryImage.GalleryImageProperties.Identifier.Publisher", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "galleryImage.GalleryImageProperties.Identifier.Offer", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "galleryImage.GalleryImageProperties.Identifier.Sku", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}}}}); err != nil { + return result, validation.NewError("compute.GalleryImagesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImage) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client GalleryImagesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImage) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}", pathParameters), + autorest.WithJSON(galleryImage), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImagesClient) CreateOrUpdateSender(req *http.Request) (future GalleryImagesCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client GalleryImagesClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a gallery image. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery in which the Image Definition is to be deleted. +// galleryImageName - the name of the gallery Image Definition to be deleted. +func (client GalleryImagesClient) Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result GalleryImagesDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, galleryName, galleryImageName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client GalleryImagesClient) DeletePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImagesClient) DeleteSender(req *http.Request) (future GalleryImagesDeleteFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client GalleryImagesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get retrieves information about a gallery Image Definition. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery from which the Image Definitions are to be retrieved. +// galleryImageName - the name of the gallery Image Definition to be retrieved. +func (client GalleryImagesClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result GalleryImage, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, galleryImageName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client GalleryImagesClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImagesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client GalleryImagesClient) GetResponder(resp *http.Response) (result GalleryImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByGallery list gallery Image Definitions in a gallery. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery from which Image Definitions are to be listed. +func (client GalleryImagesClient) ListByGallery(ctx context.Context, resourceGroupName string, galleryName string) (result GalleryImageListPage, err error) { + result.fn = client.listByGalleryNextResults + req, err := client.ListByGalleryPreparer(ctx, resourceGroupName, galleryName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "ListByGallery", nil, "Failure preparing request") + return + } + + resp, err := client.ListByGallerySender(req) + if err != nil { + result.gil.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "ListByGallery", resp, "Failure sending request") + return + } + + result.gil, err = client.ListByGalleryResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "ListByGallery", resp, "Failure responding to request") + } + + return +} + +// ListByGalleryPreparer prepares the ListByGallery request. +func (client GalleryImagesClient) ListByGalleryPreparer(ctx context.Context, resourceGroupName string, galleryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByGallerySender sends the ListByGallery request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImagesClient) ListByGallerySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByGalleryResponder handles the response to the ListByGallery request. The method always +// closes the http.Response Body. +func (client GalleryImagesClient) ListByGalleryResponder(resp *http.Response) (result GalleryImageList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByGalleryNextResults retrieves the next set of results, if any. +func (client GalleryImagesClient) listByGalleryNextResults(lastResults GalleryImageList) (result GalleryImageList, err error) { + req, err := lastResults.galleryImageListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "listByGalleryNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByGallerySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "listByGalleryNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByGalleryResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "listByGalleryNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByGalleryComplete enumerates all values, automatically crossing page boundaries as required. +func (client GalleryImagesClient) ListByGalleryComplete(ctx context.Context, resourceGroupName string, galleryName string) (result GalleryImageListIterator, err error) { + result.page, err = client.ListByGallery(ctx, resourceGroupName, galleryName) + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimageversions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimageversions.go new file mode 100644 index 000000000000..d179443ade51 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/galleryimageversions.go @@ -0,0 +1,375 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// GalleryImageVersionsClient is the compute Client +type GalleryImageVersionsClient struct { + BaseClient +} + +// NewGalleryImageVersionsClient creates an instance of the GalleryImageVersionsClient client. +func NewGalleryImageVersionsClient(subscriptionID string) GalleryImageVersionsClient { + return NewGalleryImageVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGalleryImageVersionsClientWithBaseURI creates an instance of the GalleryImageVersionsClient client. +func NewGalleryImageVersionsClientWithBaseURI(baseURI string, subscriptionID string) GalleryImageVersionsClient { + return GalleryImageVersionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a gallery Image Version. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery in which the Image Definition resides. +// galleryImageName - the name of the gallery Image Definition in which the Image Version is to be created. +// galleryImageVersionName - the name of the gallery Image Version to be created. Needs to follow semantic +// version name pattern: The allowed characters are digit and period. Digits must be within the range of a +// 32-bit integer. Format: .. +// galleryImageVersion - parameters supplied to the create or update gallery Image Version operation. +func (client GalleryImageVersionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersion) (result GalleryImageVersionsCreateOrUpdateFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: galleryImageVersion, + Constraints: []validation.Constraint{{Target: "galleryImageVersion.GalleryImageVersionProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "galleryImageVersion.GalleryImageVersionProperties.PublishingProfile", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("compute.GalleryImageVersionsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, galleryImageVersion) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client GalleryImageVersionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersion) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryImageVersionName": autorest.Encode("path", galleryImageVersionName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}", pathParameters), + autorest.WithJSON(galleryImageVersion), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImageVersionsClient) CreateOrUpdateSender(req *http.Request) (future GalleryImageVersionsCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client GalleryImageVersionsClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryImageVersion, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a gallery Image Version. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery in which the Image Definition resides. +// galleryImageName - the name of the gallery Image Definition in which the Image Version resides. +// galleryImageVersionName - the name of the gallery Image Version to be deleted. +func (client GalleryImageVersionsClient) Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string) (result GalleryImageVersionsDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client GalleryImageVersionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryImageVersionName": autorest.Encode("path", galleryImageVersionName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImageVersionsClient) DeleteSender(req *http.Request) (future GalleryImageVersionsDeleteFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client GalleryImageVersionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get retrieves information about a gallery Image Version. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery in which the Image Definition resides. +// galleryImageName - the name of the gallery Image Definition in which the Image Version resides. +// galleryImageVersionName - the name of the gallery Image Version to be retrieved. +// expand - the expand expression to apply on the operation. +func (client GalleryImageVersionsClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, expand ReplicationStatusTypes) (result GalleryImageVersion, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client GalleryImageVersionsClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, expand ReplicationStatusTypes) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryImageVersionName": autorest.Encode("path", galleryImageVersionName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImageVersionsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client GalleryImageVersionsClient) GetResponder(resp *http.Response) (result GalleryImageVersion, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByGalleryImage list gallery Image Versions in a gallery Image Definition. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery in which the Image Definition resides. +// galleryImageName - the name of the Shared Image Gallery Image Definition from which the Image Versions are +// to be listed. +func (client GalleryImageVersionsClient) ListByGalleryImage(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result GalleryImageVersionListPage, err error) { + result.fn = client.listByGalleryImageNextResults + req, err := client.ListByGalleryImagePreparer(ctx, resourceGroupName, galleryName, galleryImageName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "ListByGalleryImage", nil, "Failure preparing request") + return + } + + resp, err := client.ListByGalleryImageSender(req) + if err != nil { + result.givl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "ListByGalleryImage", resp, "Failure sending request") + return + } + + result.givl, err = client.ListByGalleryImageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "ListByGalleryImage", resp, "Failure responding to request") + } + + return +} + +// ListByGalleryImagePreparer prepares the ListByGalleryImage request. +func (client GalleryImageVersionsClient) ListByGalleryImagePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByGalleryImageSender sends the ListByGalleryImage request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImageVersionsClient) ListByGalleryImageSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByGalleryImageResponder handles the response to the ListByGalleryImage request. The method always +// closes the http.Response Body. +func (client GalleryImageVersionsClient) ListByGalleryImageResponder(resp *http.Response) (result GalleryImageVersionList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByGalleryImageNextResults retrieves the next set of results, if any. +func (client GalleryImageVersionsClient) listByGalleryImageNextResults(lastResults GalleryImageVersionList) (result GalleryImageVersionList, err error) { + req, err := lastResults.galleryImageVersionListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "listByGalleryImageNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByGalleryImageSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "listByGalleryImageNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByGalleryImageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "listByGalleryImageNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByGalleryImageComplete enumerates all values, automatically crossing page boundaries as required. +func (client GalleryImageVersionsClient) ListByGalleryImageComplete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result GalleryImageVersionListIterator, err error) { + result.page, err = client.ListByGalleryImage(ctx, resourceGroupName, galleryName, galleryImageName) + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/images.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/images.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/images.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/images.go index f468d6fd0c3f..2afe53551f0b 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/images.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/images.go @@ -68,7 +68,7 @@ func (client ImagesClient) CreateOrUpdatePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -141,7 +141,7 @@ func (client ImagesClient) DeletePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -218,7 +218,7 @@ func (client ImagesClient) GetPreparer(ctx context.Context, resourceGroupName st "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -285,7 +285,7 @@ func (client ImagesClient) ListPreparer(ctx context.Context) (*http.Request, err "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -378,7 +378,7 @@ func (client ImagesClient) ListByResourceGroupPreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -467,7 +467,7 @@ func (client ImagesClient) UpdatePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/loganalytics.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/loganalytics.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/loganalytics.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/loganalytics.go index e958f84b1003..f72c02202e3c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/loganalytics.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/loganalytics.go @@ -74,7 +74,7 @@ func (client LogAnalyticsClient) ExportRequestRateByIntervalPreparer(ctx context "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -153,7 +153,7 @@ func (client LogAnalyticsClient) ExportThrottledRequestsPreparer(ctx context.Con "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/models.go new file mode 100644 index 000000000000..6499e4d052a3 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/models.go @@ -0,0 +1,9776 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// AccessLevel enumerates the values for access level. +type AccessLevel string + +const ( + // None ... + None AccessLevel = "None" + // Read ... + Read AccessLevel = "Read" +) + +// PossibleAccessLevelValues returns an array of possible values for the AccessLevel const type. +func PossibleAccessLevelValues() []AccessLevel { + return []AccessLevel{None, Read} +} + +// AggregatedReplicationState enumerates the values for aggregated replication state. +type AggregatedReplicationState string + +const ( + // Completed ... + Completed AggregatedReplicationState = "Completed" + // Failed ... + Failed AggregatedReplicationState = "Failed" + // InProgress ... + InProgress AggregatedReplicationState = "InProgress" + // Unknown ... + Unknown AggregatedReplicationState = "Unknown" +) + +// PossibleAggregatedReplicationStateValues returns an array of possible values for the AggregatedReplicationState const type. +func PossibleAggregatedReplicationStateValues() []AggregatedReplicationState { + return []AggregatedReplicationState{Completed, Failed, InProgress, Unknown} +} + +// AvailabilitySetSkuTypes enumerates the values for availability set sku types. +type AvailabilitySetSkuTypes string + +const ( + // Aligned ... + Aligned AvailabilitySetSkuTypes = "Aligned" + // Classic ... + Classic AvailabilitySetSkuTypes = "Classic" +) + +// PossibleAvailabilitySetSkuTypesValues returns an array of possible values for the AvailabilitySetSkuTypes const type. +func PossibleAvailabilitySetSkuTypesValues() []AvailabilitySetSkuTypes { + return []AvailabilitySetSkuTypes{Aligned, Classic} +} + +// CachingTypes enumerates the values for caching types. +type CachingTypes string + +const ( + // CachingTypesNone ... + CachingTypesNone CachingTypes = "None" + // CachingTypesReadOnly ... + CachingTypesReadOnly CachingTypes = "ReadOnly" + // CachingTypesReadWrite ... + CachingTypesReadWrite CachingTypes = "ReadWrite" +) + +// PossibleCachingTypesValues returns an array of possible values for the CachingTypes const type. +func PossibleCachingTypesValues() []CachingTypes { + return []CachingTypes{CachingTypesNone, CachingTypesReadOnly, CachingTypesReadWrite} +} + +// ComponentNames enumerates the values for component names. +type ComponentNames string + +const ( + // MicrosoftWindowsShellSetup ... + MicrosoftWindowsShellSetup ComponentNames = "Microsoft-Windows-Shell-Setup" +) + +// PossibleComponentNamesValues returns an array of possible values for the ComponentNames const type. +func PossibleComponentNamesValues() []ComponentNames { + return []ComponentNames{MicrosoftWindowsShellSetup} +} + +// ContainerServiceOrchestratorTypes enumerates the values for container service orchestrator types. +type ContainerServiceOrchestratorTypes string + +const ( + // Custom ... + Custom ContainerServiceOrchestratorTypes = "Custom" + // DCOS ... + DCOS ContainerServiceOrchestratorTypes = "DCOS" + // Kubernetes ... + Kubernetes ContainerServiceOrchestratorTypes = "Kubernetes" + // Swarm ... + Swarm ContainerServiceOrchestratorTypes = "Swarm" +) + +// PossibleContainerServiceOrchestratorTypesValues returns an array of possible values for the ContainerServiceOrchestratorTypes const type. +func PossibleContainerServiceOrchestratorTypesValues() []ContainerServiceOrchestratorTypes { + return []ContainerServiceOrchestratorTypes{Custom, DCOS, Kubernetes, Swarm} +} + +// ContainerServiceVMSizeTypes enumerates the values for container service vm size types. +type ContainerServiceVMSizeTypes string + +const ( + // StandardA0 ... + StandardA0 ContainerServiceVMSizeTypes = "Standard_A0" + // StandardA1 ... + StandardA1 ContainerServiceVMSizeTypes = "Standard_A1" + // StandardA10 ... + StandardA10 ContainerServiceVMSizeTypes = "Standard_A10" + // StandardA11 ... + StandardA11 ContainerServiceVMSizeTypes = "Standard_A11" + // StandardA2 ... + StandardA2 ContainerServiceVMSizeTypes = "Standard_A2" + // StandardA3 ... + StandardA3 ContainerServiceVMSizeTypes = "Standard_A3" + // StandardA4 ... + StandardA4 ContainerServiceVMSizeTypes = "Standard_A4" + // StandardA5 ... + StandardA5 ContainerServiceVMSizeTypes = "Standard_A5" + // StandardA6 ... + StandardA6 ContainerServiceVMSizeTypes = "Standard_A6" + // StandardA7 ... + StandardA7 ContainerServiceVMSizeTypes = "Standard_A7" + // StandardA8 ... + StandardA8 ContainerServiceVMSizeTypes = "Standard_A8" + // StandardA9 ... + StandardA9 ContainerServiceVMSizeTypes = "Standard_A9" + // StandardD1 ... + StandardD1 ContainerServiceVMSizeTypes = "Standard_D1" + // StandardD11 ... + StandardD11 ContainerServiceVMSizeTypes = "Standard_D11" + // StandardD11V2 ... + StandardD11V2 ContainerServiceVMSizeTypes = "Standard_D11_v2" + // StandardD12 ... + StandardD12 ContainerServiceVMSizeTypes = "Standard_D12" + // StandardD12V2 ... + StandardD12V2 ContainerServiceVMSizeTypes = "Standard_D12_v2" + // StandardD13 ... + StandardD13 ContainerServiceVMSizeTypes = "Standard_D13" + // StandardD13V2 ... + StandardD13V2 ContainerServiceVMSizeTypes = "Standard_D13_v2" + // StandardD14 ... + StandardD14 ContainerServiceVMSizeTypes = "Standard_D14" + // StandardD14V2 ... + StandardD14V2 ContainerServiceVMSizeTypes = "Standard_D14_v2" + // StandardD1V2 ... + StandardD1V2 ContainerServiceVMSizeTypes = "Standard_D1_v2" + // StandardD2 ... + StandardD2 ContainerServiceVMSizeTypes = "Standard_D2" + // StandardD2V2 ... + StandardD2V2 ContainerServiceVMSizeTypes = "Standard_D2_v2" + // StandardD3 ... + StandardD3 ContainerServiceVMSizeTypes = "Standard_D3" + // StandardD3V2 ... + StandardD3V2 ContainerServiceVMSizeTypes = "Standard_D3_v2" + // StandardD4 ... + StandardD4 ContainerServiceVMSizeTypes = "Standard_D4" + // StandardD4V2 ... + StandardD4V2 ContainerServiceVMSizeTypes = "Standard_D4_v2" + // StandardD5V2 ... + StandardD5V2 ContainerServiceVMSizeTypes = "Standard_D5_v2" + // StandardDS1 ... + StandardDS1 ContainerServiceVMSizeTypes = "Standard_DS1" + // StandardDS11 ... + StandardDS11 ContainerServiceVMSizeTypes = "Standard_DS11" + // StandardDS12 ... + StandardDS12 ContainerServiceVMSizeTypes = "Standard_DS12" + // StandardDS13 ... + StandardDS13 ContainerServiceVMSizeTypes = "Standard_DS13" + // StandardDS14 ... + StandardDS14 ContainerServiceVMSizeTypes = "Standard_DS14" + // StandardDS2 ... + StandardDS2 ContainerServiceVMSizeTypes = "Standard_DS2" + // StandardDS3 ... + StandardDS3 ContainerServiceVMSizeTypes = "Standard_DS3" + // StandardDS4 ... + StandardDS4 ContainerServiceVMSizeTypes = "Standard_DS4" + // StandardG1 ... + StandardG1 ContainerServiceVMSizeTypes = "Standard_G1" + // StandardG2 ... + StandardG2 ContainerServiceVMSizeTypes = "Standard_G2" + // StandardG3 ... + StandardG3 ContainerServiceVMSizeTypes = "Standard_G3" + // StandardG4 ... + StandardG4 ContainerServiceVMSizeTypes = "Standard_G4" + // StandardG5 ... + StandardG5 ContainerServiceVMSizeTypes = "Standard_G5" + // StandardGS1 ... + StandardGS1 ContainerServiceVMSizeTypes = "Standard_GS1" + // StandardGS2 ... + StandardGS2 ContainerServiceVMSizeTypes = "Standard_GS2" + // StandardGS3 ... + StandardGS3 ContainerServiceVMSizeTypes = "Standard_GS3" + // StandardGS4 ... + StandardGS4 ContainerServiceVMSizeTypes = "Standard_GS4" + // StandardGS5 ... + StandardGS5 ContainerServiceVMSizeTypes = "Standard_GS5" +) + +// PossibleContainerServiceVMSizeTypesValues returns an array of possible values for the ContainerServiceVMSizeTypes const type. +func PossibleContainerServiceVMSizeTypesValues() []ContainerServiceVMSizeTypes { + return []ContainerServiceVMSizeTypes{StandardA0, StandardA1, StandardA10, StandardA11, StandardA2, StandardA3, StandardA4, StandardA5, StandardA6, StandardA7, StandardA8, StandardA9, StandardD1, StandardD11, StandardD11V2, StandardD12, StandardD12V2, StandardD13, StandardD13V2, StandardD14, StandardD14V2, StandardD1V2, StandardD2, StandardD2V2, StandardD3, StandardD3V2, StandardD4, StandardD4V2, StandardD5V2, StandardDS1, StandardDS11, StandardDS12, StandardDS13, StandardDS14, StandardDS2, StandardDS3, StandardDS4, StandardG1, StandardG2, StandardG3, StandardG4, StandardG5, StandardGS1, StandardGS2, StandardGS3, StandardGS4, StandardGS5} +} + +// DiffDiskOptions enumerates the values for diff disk options. +type DiffDiskOptions string + +const ( + // Local ... + Local DiffDiskOptions = "Local" +) + +// PossibleDiffDiskOptionsValues returns an array of possible values for the DiffDiskOptions const type. +func PossibleDiffDiskOptionsValues() []DiffDiskOptions { + return []DiffDiskOptions{Local} +} + +// DiskCreateOption enumerates the values for disk create option. +type DiskCreateOption string + +const ( + // Attach ... + Attach DiskCreateOption = "Attach" + // Copy ... + Copy DiskCreateOption = "Copy" + // Empty ... + Empty DiskCreateOption = "Empty" + // FromImage ... + FromImage DiskCreateOption = "FromImage" + // Import ... + Import DiskCreateOption = "Import" + // Restore ... + Restore DiskCreateOption = "Restore" +) + +// PossibleDiskCreateOptionValues returns an array of possible values for the DiskCreateOption const type. +func PossibleDiskCreateOptionValues() []DiskCreateOption { + return []DiskCreateOption{Attach, Copy, Empty, FromImage, Import, Restore} +} + +// DiskCreateOptionTypes enumerates the values for disk create option types. +type DiskCreateOptionTypes string + +const ( + // DiskCreateOptionTypesAttach ... + DiskCreateOptionTypesAttach DiskCreateOptionTypes = "Attach" + // DiskCreateOptionTypesEmpty ... + DiskCreateOptionTypesEmpty DiskCreateOptionTypes = "Empty" + // DiskCreateOptionTypesFromImage ... + DiskCreateOptionTypesFromImage DiskCreateOptionTypes = "FromImage" +) + +// PossibleDiskCreateOptionTypesValues returns an array of possible values for the DiskCreateOptionTypes const type. +func PossibleDiskCreateOptionTypesValues() []DiskCreateOptionTypes { + return []DiskCreateOptionTypes{DiskCreateOptionTypesAttach, DiskCreateOptionTypesEmpty, DiskCreateOptionTypesFromImage} +} + +// DiskStorageAccountTypes enumerates the values for disk storage account types. +type DiskStorageAccountTypes string + +const ( + // PremiumLRS ... + PremiumLRS DiskStorageAccountTypes = "Premium_LRS" + // StandardLRS ... + StandardLRS DiskStorageAccountTypes = "Standard_LRS" + // StandardSSDLRS ... + StandardSSDLRS DiskStorageAccountTypes = "StandardSSD_LRS" + // UltraSSDLRS ... + UltraSSDLRS DiskStorageAccountTypes = "UltraSSD_LRS" +) + +// PossibleDiskStorageAccountTypesValues returns an array of possible values for the DiskStorageAccountTypes const type. +func PossibleDiskStorageAccountTypesValues() []DiskStorageAccountTypes { + return []DiskStorageAccountTypes{PremiumLRS, StandardLRS, StandardSSDLRS, UltraSSDLRS} +} + +// HostCaching enumerates the values for host caching. +type HostCaching string + +const ( + // HostCachingNone ... + HostCachingNone HostCaching = "None" + // HostCachingReadOnly ... + HostCachingReadOnly HostCaching = "ReadOnly" + // HostCachingReadWrite ... + HostCachingReadWrite HostCaching = "ReadWrite" +) + +// PossibleHostCachingValues returns an array of possible values for the HostCaching const type. +func PossibleHostCachingValues() []HostCaching { + return []HostCaching{HostCachingNone, HostCachingReadOnly, HostCachingReadWrite} +} + +// InstanceViewTypes enumerates the values for instance view types. +type InstanceViewTypes string + +const ( + // InstanceView ... + InstanceView InstanceViewTypes = "instanceView" +) + +// PossibleInstanceViewTypesValues returns an array of possible values for the InstanceViewTypes const type. +func PossibleInstanceViewTypesValues() []InstanceViewTypes { + return []InstanceViewTypes{InstanceView} +} + +// IntervalInMins enumerates the values for interval in mins. +type IntervalInMins string + +const ( + // FiveMins ... + FiveMins IntervalInMins = "FiveMins" + // SixtyMins ... + SixtyMins IntervalInMins = "SixtyMins" + // ThirtyMins ... + ThirtyMins IntervalInMins = "ThirtyMins" + // ThreeMins ... + ThreeMins IntervalInMins = "ThreeMins" +) + +// PossibleIntervalInMinsValues returns an array of possible values for the IntervalInMins const type. +func PossibleIntervalInMinsValues() []IntervalInMins { + return []IntervalInMins{FiveMins, SixtyMins, ThirtyMins, ThreeMins} +} + +// IPVersion enumerates the values for ip version. +type IPVersion string + +const ( + // IPv4 ... + IPv4 IPVersion = "IPv4" + // IPv6 ... + IPv6 IPVersion = "IPv6" +) + +// PossibleIPVersionValues returns an array of possible values for the IPVersion const type. +func PossibleIPVersionValues() []IPVersion { + return []IPVersion{IPv4, IPv6} +} + +// MaintenanceOperationResultCodeTypes enumerates the values for maintenance operation result code types. +type MaintenanceOperationResultCodeTypes string + +const ( + // MaintenanceOperationResultCodeTypesMaintenanceAborted ... + MaintenanceOperationResultCodeTypesMaintenanceAborted MaintenanceOperationResultCodeTypes = "MaintenanceAborted" + // MaintenanceOperationResultCodeTypesMaintenanceCompleted ... + MaintenanceOperationResultCodeTypesMaintenanceCompleted MaintenanceOperationResultCodeTypes = "MaintenanceCompleted" + // MaintenanceOperationResultCodeTypesNone ... + MaintenanceOperationResultCodeTypesNone MaintenanceOperationResultCodeTypes = "None" + // MaintenanceOperationResultCodeTypesRetryLater ... + MaintenanceOperationResultCodeTypesRetryLater MaintenanceOperationResultCodeTypes = "RetryLater" +) + +// PossibleMaintenanceOperationResultCodeTypesValues returns an array of possible values for the MaintenanceOperationResultCodeTypes const type. +func PossibleMaintenanceOperationResultCodeTypesValues() []MaintenanceOperationResultCodeTypes { + return []MaintenanceOperationResultCodeTypes{MaintenanceOperationResultCodeTypesMaintenanceAborted, MaintenanceOperationResultCodeTypesMaintenanceCompleted, MaintenanceOperationResultCodeTypesNone, MaintenanceOperationResultCodeTypesRetryLater} +} + +// OperatingSystemStateTypes enumerates the values for operating system state types. +type OperatingSystemStateTypes string + +const ( + // Generalized ... + Generalized OperatingSystemStateTypes = "Generalized" + // Specialized ... + Specialized OperatingSystemStateTypes = "Specialized" +) + +// PossibleOperatingSystemStateTypesValues returns an array of possible values for the OperatingSystemStateTypes const type. +func PossibleOperatingSystemStateTypesValues() []OperatingSystemStateTypes { + return []OperatingSystemStateTypes{Generalized, Specialized} +} + +// OperatingSystemTypes enumerates the values for operating system types. +type OperatingSystemTypes string + +const ( + // Linux ... + Linux OperatingSystemTypes = "Linux" + // Windows ... + Windows OperatingSystemTypes = "Windows" +) + +// PossibleOperatingSystemTypesValues returns an array of possible values for the OperatingSystemTypes const type. +func PossibleOperatingSystemTypesValues() []OperatingSystemTypes { + return []OperatingSystemTypes{Linux, Windows} +} + +// PassNames enumerates the values for pass names. +type PassNames string + +const ( + // OobeSystem ... + OobeSystem PassNames = "OobeSystem" +) + +// PossiblePassNamesValues returns an array of possible values for the PassNames const type. +func PossiblePassNamesValues() []PassNames { + return []PassNames{OobeSystem} +} + +// ProtocolTypes enumerates the values for protocol types. +type ProtocolTypes string + +const ( + // HTTP ... + HTTP ProtocolTypes = "Http" + // HTTPS ... + HTTPS ProtocolTypes = "Https" +) + +// PossibleProtocolTypesValues returns an array of possible values for the ProtocolTypes const type. +func PossibleProtocolTypesValues() []ProtocolTypes { + return []ProtocolTypes{HTTP, HTTPS} +} + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // ProvisioningStateCreating ... + ProvisioningStateCreating ProvisioningState = "Creating" + // ProvisioningStateDeleting ... + ProvisioningStateDeleting ProvisioningState = "Deleting" + // ProvisioningStateFailed ... + ProvisioningStateFailed ProvisioningState = "Failed" + // ProvisioningStateMigrating ... + ProvisioningStateMigrating ProvisioningState = "Migrating" + // ProvisioningStateSucceeded ... + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + // ProvisioningStateUpdating ... + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type. +func PossibleProvisioningStateValues() []ProvisioningState { + return []ProvisioningState{ProvisioningStateCreating, ProvisioningStateDeleting, ProvisioningStateFailed, ProvisioningStateMigrating, ProvisioningStateSucceeded, ProvisioningStateUpdating} +} + +// ProvisioningState1 enumerates the values for provisioning state 1. +type ProvisioningState1 string + +const ( + // ProvisioningState1Creating ... + ProvisioningState1Creating ProvisioningState1 = "Creating" + // ProvisioningState1Deleting ... + ProvisioningState1Deleting ProvisioningState1 = "Deleting" + // ProvisioningState1Failed ... + ProvisioningState1Failed ProvisioningState1 = "Failed" + // ProvisioningState1Migrating ... + ProvisioningState1Migrating ProvisioningState1 = "Migrating" + // ProvisioningState1Succeeded ... + ProvisioningState1Succeeded ProvisioningState1 = "Succeeded" + // ProvisioningState1Updating ... + ProvisioningState1Updating ProvisioningState1 = "Updating" +) + +// PossibleProvisioningState1Values returns an array of possible values for the ProvisioningState1 const type. +func PossibleProvisioningState1Values() []ProvisioningState1 { + return []ProvisioningState1{ProvisioningState1Creating, ProvisioningState1Deleting, ProvisioningState1Failed, ProvisioningState1Migrating, ProvisioningState1Succeeded, ProvisioningState1Updating} +} + +// ProvisioningState2 enumerates the values for provisioning state 2. +type ProvisioningState2 string + +const ( + // ProvisioningState2Creating ... + ProvisioningState2Creating ProvisioningState2 = "Creating" + // ProvisioningState2Deleting ... + ProvisioningState2Deleting ProvisioningState2 = "Deleting" + // ProvisioningState2Failed ... + ProvisioningState2Failed ProvisioningState2 = "Failed" + // ProvisioningState2Migrating ... + ProvisioningState2Migrating ProvisioningState2 = "Migrating" + // ProvisioningState2Succeeded ... + ProvisioningState2Succeeded ProvisioningState2 = "Succeeded" + // ProvisioningState2Updating ... + ProvisioningState2Updating ProvisioningState2 = "Updating" +) + +// PossibleProvisioningState2Values returns an array of possible values for the ProvisioningState2 const type. +func PossibleProvisioningState2Values() []ProvisioningState2 { + return []ProvisioningState2{ProvisioningState2Creating, ProvisioningState2Deleting, ProvisioningState2Failed, ProvisioningState2Migrating, ProvisioningState2Succeeded, ProvisioningState2Updating} +} + +// ReplicationState enumerates the values for replication state. +type ReplicationState string + +const ( + // ReplicationStateCompleted ... + ReplicationStateCompleted ReplicationState = "Completed" + // ReplicationStateFailed ... + ReplicationStateFailed ReplicationState = "Failed" + // ReplicationStateReplicating ... + ReplicationStateReplicating ReplicationState = "Replicating" + // ReplicationStateUnknown ... + ReplicationStateUnknown ReplicationState = "Unknown" +) + +// PossibleReplicationStateValues returns an array of possible values for the ReplicationState const type. +func PossibleReplicationStateValues() []ReplicationState { + return []ReplicationState{ReplicationStateCompleted, ReplicationStateFailed, ReplicationStateReplicating, ReplicationStateUnknown} +} + +// ReplicationStatusTypes enumerates the values for replication status types. +type ReplicationStatusTypes string + +const ( + // ReplicationStatusTypesReplicationStatus ... + ReplicationStatusTypesReplicationStatus ReplicationStatusTypes = "ReplicationStatus" +) + +// PossibleReplicationStatusTypesValues returns an array of possible values for the ReplicationStatusTypes const type. +func PossibleReplicationStatusTypesValues() []ReplicationStatusTypes { + return []ReplicationStatusTypes{ReplicationStatusTypesReplicationStatus} +} + +// ResourceIdentityType enumerates the values for resource identity type. +type ResourceIdentityType string + +const ( + // ResourceIdentityTypeNone ... + ResourceIdentityTypeNone ResourceIdentityType = "None" + // ResourceIdentityTypeSystemAssigned ... + ResourceIdentityTypeSystemAssigned ResourceIdentityType = "SystemAssigned" + // ResourceIdentityTypeSystemAssignedUserAssigned ... + ResourceIdentityTypeSystemAssignedUserAssigned ResourceIdentityType = "SystemAssigned, UserAssigned" + // ResourceIdentityTypeUserAssigned ... + ResourceIdentityTypeUserAssigned ResourceIdentityType = "UserAssigned" +) + +// PossibleResourceIdentityTypeValues returns an array of possible values for the ResourceIdentityType const type. +func PossibleResourceIdentityTypeValues() []ResourceIdentityType { + return []ResourceIdentityType{ResourceIdentityTypeNone, ResourceIdentityTypeSystemAssigned, ResourceIdentityTypeSystemAssignedUserAssigned, ResourceIdentityTypeUserAssigned} +} + +// ResourceSkuCapacityScaleType enumerates the values for resource sku capacity scale type. +type ResourceSkuCapacityScaleType string + +const ( + // ResourceSkuCapacityScaleTypeAutomatic ... + ResourceSkuCapacityScaleTypeAutomatic ResourceSkuCapacityScaleType = "Automatic" + // ResourceSkuCapacityScaleTypeManual ... + ResourceSkuCapacityScaleTypeManual ResourceSkuCapacityScaleType = "Manual" + // ResourceSkuCapacityScaleTypeNone ... + ResourceSkuCapacityScaleTypeNone ResourceSkuCapacityScaleType = "None" +) + +// PossibleResourceSkuCapacityScaleTypeValues returns an array of possible values for the ResourceSkuCapacityScaleType const type. +func PossibleResourceSkuCapacityScaleTypeValues() []ResourceSkuCapacityScaleType { + return []ResourceSkuCapacityScaleType{ResourceSkuCapacityScaleTypeAutomatic, ResourceSkuCapacityScaleTypeManual, ResourceSkuCapacityScaleTypeNone} +} + +// ResourceSkuRestrictionsReasonCode enumerates the values for resource sku restrictions reason code. +type ResourceSkuRestrictionsReasonCode string + +const ( + // NotAvailableForSubscription ... + NotAvailableForSubscription ResourceSkuRestrictionsReasonCode = "NotAvailableForSubscription" + // QuotaID ... + QuotaID ResourceSkuRestrictionsReasonCode = "QuotaId" +) + +// PossibleResourceSkuRestrictionsReasonCodeValues returns an array of possible values for the ResourceSkuRestrictionsReasonCode const type. +func PossibleResourceSkuRestrictionsReasonCodeValues() []ResourceSkuRestrictionsReasonCode { + return []ResourceSkuRestrictionsReasonCode{NotAvailableForSubscription, QuotaID} +} + +// ResourceSkuRestrictionsType enumerates the values for resource sku restrictions type. +type ResourceSkuRestrictionsType string + +const ( + // Location ... + Location ResourceSkuRestrictionsType = "Location" + // Zone ... + Zone ResourceSkuRestrictionsType = "Zone" +) + +// PossibleResourceSkuRestrictionsTypeValues returns an array of possible values for the ResourceSkuRestrictionsType const type. +func PossibleResourceSkuRestrictionsTypeValues() []ResourceSkuRestrictionsType { + return []ResourceSkuRestrictionsType{Location, Zone} +} + +// RollingUpgradeActionType enumerates the values for rolling upgrade action type. +type RollingUpgradeActionType string + +const ( + // Cancel ... + Cancel RollingUpgradeActionType = "Cancel" + // Start ... + Start RollingUpgradeActionType = "Start" +) + +// PossibleRollingUpgradeActionTypeValues returns an array of possible values for the RollingUpgradeActionType const type. +func PossibleRollingUpgradeActionTypeValues() []RollingUpgradeActionType { + return []RollingUpgradeActionType{Cancel, Start} +} + +// RollingUpgradeStatusCode enumerates the values for rolling upgrade status code. +type RollingUpgradeStatusCode string + +const ( + // RollingUpgradeStatusCodeCancelled ... + RollingUpgradeStatusCodeCancelled RollingUpgradeStatusCode = "Cancelled" + // RollingUpgradeStatusCodeCompleted ... + RollingUpgradeStatusCodeCompleted RollingUpgradeStatusCode = "Completed" + // RollingUpgradeStatusCodeFaulted ... + RollingUpgradeStatusCodeFaulted RollingUpgradeStatusCode = "Faulted" + // RollingUpgradeStatusCodeRollingForward ... + RollingUpgradeStatusCodeRollingForward RollingUpgradeStatusCode = "RollingForward" +) + +// PossibleRollingUpgradeStatusCodeValues returns an array of possible values for the RollingUpgradeStatusCode const type. +func PossibleRollingUpgradeStatusCodeValues() []RollingUpgradeStatusCode { + return []RollingUpgradeStatusCode{RollingUpgradeStatusCodeCancelled, RollingUpgradeStatusCodeCompleted, RollingUpgradeStatusCodeFaulted, RollingUpgradeStatusCodeRollingForward} +} + +// SettingNames enumerates the values for setting names. +type SettingNames string + +const ( + // AutoLogon ... + AutoLogon SettingNames = "AutoLogon" + // FirstLogonCommands ... + FirstLogonCommands SettingNames = "FirstLogonCommands" +) + +// PossibleSettingNamesValues returns an array of possible values for the SettingNames const type. +func PossibleSettingNamesValues() []SettingNames { + return []SettingNames{AutoLogon, FirstLogonCommands} +} + +// SnapshotStorageAccountTypes enumerates the values for snapshot storage account types. +type SnapshotStorageAccountTypes string + +const ( + // SnapshotStorageAccountTypesPremiumLRS ... + SnapshotStorageAccountTypesPremiumLRS SnapshotStorageAccountTypes = "Premium_LRS" + // SnapshotStorageAccountTypesStandardLRS ... + SnapshotStorageAccountTypesStandardLRS SnapshotStorageAccountTypes = "Standard_LRS" + // SnapshotStorageAccountTypesStandardZRS ... + SnapshotStorageAccountTypesStandardZRS SnapshotStorageAccountTypes = "Standard_ZRS" +) + +// PossibleSnapshotStorageAccountTypesValues returns an array of possible values for the SnapshotStorageAccountTypes const type. +func PossibleSnapshotStorageAccountTypesValues() []SnapshotStorageAccountTypes { + return []SnapshotStorageAccountTypes{SnapshotStorageAccountTypesPremiumLRS, SnapshotStorageAccountTypesStandardLRS, SnapshotStorageAccountTypesStandardZRS} +} + +// StatusLevelTypes enumerates the values for status level types. +type StatusLevelTypes string + +const ( + // Error ... + Error StatusLevelTypes = "Error" + // Info ... + Info StatusLevelTypes = "Info" + // Warning ... + Warning StatusLevelTypes = "Warning" +) + +// PossibleStatusLevelTypesValues returns an array of possible values for the StatusLevelTypes const type. +func PossibleStatusLevelTypesValues() []StatusLevelTypes { + return []StatusLevelTypes{Error, Info, Warning} +} + +// StorageAccountTypes enumerates the values for storage account types. +type StorageAccountTypes string + +const ( + // StorageAccountTypesPremiumLRS ... + StorageAccountTypesPremiumLRS StorageAccountTypes = "Premium_LRS" + // StorageAccountTypesStandardLRS ... + StorageAccountTypesStandardLRS StorageAccountTypes = "Standard_LRS" + // StorageAccountTypesStandardSSDLRS ... + StorageAccountTypesStandardSSDLRS StorageAccountTypes = "StandardSSD_LRS" + // StorageAccountTypesUltraSSDLRS ... + StorageAccountTypesUltraSSDLRS StorageAccountTypes = "UltraSSD_LRS" +) + +// PossibleStorageAccountTypesValues returns an array of possible values for the StorageAccountTypes const type. +func PossibleStorageAccountTypesValues() []StorageAccountTypes { + return []StorageAccountTypes{StorageAccountTypesPremiumLRS, StorageAccountTypesStandardLRS, StorageAccountTypesStandardSSDLRS, StorageAccountTypesUltraSSDLRS} +} + +// UpgradeMode enumerates the values for upgrade mode. +type UpgradeMode string + +const ( + // Automatic ... + Automatic UpgradeMode = "Automatic" + // Manual ... + Manual UpgradeMode = "Manual" + // Rolling ... + Rolling UpgradeMode = "Rolling" +) + +// PossibleUpgradeModeValues returns an array of possible values for the UpgradeMode const type. +func PossibleUpgradeModeValues() []UpgradeMode { + return []UpgradeMode{Automatic, Manual, Rolling} +} + +// UpgradeOperationInvoker enumerates the values for upgrade operation invoker. +type UpgradeOperationInvoker string + +const ( + // UpgradeOperationInvokerPlatform ... + UpgradeOperationInvokerPlatform UpgradeOperationInvoker = "Platform" + // UpgradeOperationInvokerUnknown ... + UpgradeOperationInvokerUnknown UpgradeOperationInvoker = "Unknown" + // UpgradeOperationInvokerUser ... + UpgradeOperationInvokerUser UpgradeOperationInvoker = "User" +) + +// PossibleUpgradeOperationInvokerValues returns an array of possible values for the UpgradeOperationInvoker const type. +func PossibleUpgradeOperationInvokerValues() []UpgradeOperationInvoker { + return []UpgradeOperationInvoker{UpgradeOperationInvokerPlatform, UpgradeOperationInvokerUnknown, UpgradeOperationInvokerUser} +} + +// UpgradeState enumerates the values for upgrade state. +type UpgradeState string + +const ( + // UpgradeStateCancelled ... + UpgradeStateCancelled UpgradeState = "Cancelled" + // UpgradeStateCompleted ... + UpgradeStateCompleted UpgradeState = "Completed" + // UpgradeStateFaulted ... + UpgradeStateFaulted UpgradeState = "Faulted" + // UpgradeStateRollingForward ... + UpgradeStateRollingForward UpgradeState = "RollingForward" +) + +// PossibleUpgradeStateValues returns an array of possible values for the UpgradeState const type. +func PossibleUpgradeStateValues() []UpgradeState { + return []UpgradeState{UpgradeStateCancelled, UpgradeStateCompleted, UpgradeStateFaulted, UpgradeStateRollingForward} +} + +// VirtualMachineEvictionPolicyTypes enumerates the values for virtual machine eviction policy types. +type VirtualMachineEvictionPolicyTypes string + +const ( + // Deallocate ... + Deallocate VirtualMachineEvictionPolicyTypes = "Deallocate" + // Delete ... + Delete VirtualMachineEvictionPolicyTypes = "Delete" +) + +// PossibleVirtualMachineEvictionPolicyTypesValues returns an array of possible values for the VirtualMachineEvictionPolicyTypes const type. +func PossibleVirtualMachineEvictionPolicyTypesValues() []VirtualMachineEvictionPolicyTypes { + return []VirtualMachineEvictionPolicyTypes{Deallocate, Delete} +} + +// VirtualMachinePriorityTypes enumerates the values for virtual machine priority types. +type VirtualMachinePriorityTypes string + +const ( + // Low ... + Low VirtualMachinePriorityTypes = "Low" + // Regular ... + Regular VirtualMachinePriorityTypes = "Regular" +) + +// PossibleVirtualMachinePriorityTypesValues returns an array of possible values for the VirtualMachinePriorityTypes const type. +func PossibleVirtualMachinePriorityTypesValues() []VirtualMachinePriorityTypes { + return []VirtualMachinePriorityTypes{Low, Regular} +} + +// VirtualMachineScaleSetSkuScaleType enumerates the values for virtual machine scale set sku scale type. +type VirtualMachineScaleSetSkuScaleType string + +const ( + // VirtualMachineScaleSetSkuScaleTypeAutomatic ... + VirtualMachineScaleSetSkuScaleTypeAutomatic VirtualMachineScaleSetSkuScaleType = "Automatic" + // VirtualMachineScaleSetSkuScaleTypeNone ... + VirtualMachineScaleSetSkuScaleTypeNone VirtualMachineScaleSetSkuScaleType = "None" +) + +// PossibleVirtualMachineScaleSetSkuScaleTypeValues returns an array of possible values for the VirtualMachineScaleSetSkuScaleType const type. +func PossibleVirtualMachineScaleSetSkuScaleTypeValues() []VirtualMachineScaleSetSkuScaleType { + return []VirtualMachineScaleSetSkuScaleType{VirtualMachineScaleSetSkuScaleTypeAutomatic, VirtualMachineScaleSetSkuScaleTypeNone} +} + +// VirtualMachineSizeTypes enumerates the values for virtual machine size types. +type VirtualMachineSizeTypes string + +const ( + // VirtualMachineSizeTypesBasicA0 ... + VirtualMachineSizeTypesBasicA0 VirtualMachineSizeTypes = "Basic_A0" + // VirtualMachineSizeTypesBasicA1 ... + VirtualMachineSizeTypesBasicA1 VirtualMachineSizeTypes = "Basic_A1" + // VirtualMachineSizeTypesBasicA2 ... + VirtualMachineSizeTypesBasicA2 VirtualMachineSizeTypes = "Basic_A2" + // VirtualMachineSizeTypesBasicA3 ... + VirtualMachineSizeTypesBasicA3 VirtualMachineSizeTypes = "Basic_A3" + // VirtualMachineSizeTypesBasicA4 ... + VirtualMachineSizeTypesBasicA4 VirtualMachineSizeTypes = "Basic_A4" + // VirtualMachineSizeTypesStandardA0 ... + VirtualMachineSizeTypesStandardA0 VirtualMachineSizeTypes = "Standard_A0" + // VirtualMachineSizeTypesStandardA1 ... + VirtualMachineSizeTypesStandardA1 VirtualMachineSizeTypes = "Standard_A1" + // VirtualMachineSizeTypesStandardA10 ... + VirtualMachineSizeTypesStandardA10 VirtualMachineSizeTypes = "Standard_A10" + // VirtualMachineSizeTypesStandardA11 ... + VirtualMachineSizeTypesStandardA11 VirtualMachineSizeTypes = "Standard_A11" + // VirtualMachineSizeTypesStandardA1V2 ... + VirtualMachineSizeTypesStandardA1V2 VirtualMachineSizeTypes = "Standard_A1_v2" + // VirtualMachineSizeTypesStandardA2 ... + VirtualMachineSizeTypesStandardA2 VirtualMachineSizeTypes = "Standard_A2" + // VirtualMachineSizeTypesStandardA2mV2 ... + VirtualMachineSizeTypesStandardA2mV2 VirtualMachineSizeTypes = "Standard_A2m_v2" + // VirtualMachineSizeTypesStandardA2V2 ... + VirtualMachineSizeTypesStandardA2V2 VirtualMachineSizeTypes = "Standard_A2_v2" + // VirtualMachineSizeTypesStandardA3 ... + VirtualMachineSizeTypesStandardA3 VirtualMachineSizeTypes = "Standard_A3" + // VirtualMachineSizeTypesStandardA4 ... + VirtualMachineSizeTypesStandardA4 VirtualMachineSizeTypes = "Standard_A4" + // VirtualMachineSizeTypesStandardA4mV2 ... + VirtualMachineSizeTypesStandardA4mV2 VirtualMachineSizeTypes = "Standard_A4m_v2" + // VirtualMachineSizeTypesStandardA4V2 ... + VirtualMachineSizeTypesStandardA4V2 VirtualMachineSizeTypes = "Standard_A4_v2" + // VirtualMachineSizeTypesStandardA5 ... + VirtualMachineSizeTypesStandardA5 VirtualMachineSizeTypes = "Standard_A5" + // VirtualMachineSizeTypesStandardA6 ... + VirtualMachineSizeTypesStandardA6 VirtualMachineSizeTypes = "Standard_A6" + // VirtualMachineSizeTypesStandardA7 ... + VirtualMachineSizeTypesStandardA7 VirtualMachineSizeTypes = "Standard_A7" + // VirtualMachineSizeTypesStandardA8 ... + VirtualMachineSizeTypesStandardA8 VirtualMachineSizeTypes = "Standard_A8" + // VirtualMachineSizeTypesStandardA8mV2 ... + VirtualMachineSizeTypesStandardA8mV2 VirtualMachineSizeTypes = "Standard_A8m_v2" + // VirtualMachineSizeTypesStandardA8V2 ... + VirtualMachineSizeTypesStandardA8V2 VirtualMachineSizeTypes = "Standard_A8_v2" + // VirtualMachineSizeTypesStandardA9 ... + VirtualMachineSizeTypesStandardA9 VirtualMachineSizeTypes = "Standard_A9" + // VirtualMachineSizeTypesStandardB1ms ... + VirtualMachineSizeTypesStandardB1ms VirtualMachineSizeTypes = "Standard_B1ms" + // VirtualMachineSizeTypesStandardB1s ... + VirtualMachineSizeTypesStandardB1s VirtualMachineSizeTypes = "Standard_B1s" + // VirtualMachineSizeTypesStandardB2ms ... + VirtualMachineSizeTypesStandardB2ms VirtualMachineSizeTypes = "Standard_B2ms" + // VirtualMachineSizeTypesStandardB2s ... + VirtualMachineSizeTypesStandardB2s VirtualMachineSizeTypes = "Standard_B2s" + // VirtualMachineSizeTypesStandardB4ms ... + VirtualMachineSizeTypesStandardB4ms VirtualMachineSizeTypes = "Standard_B4ms" + // VirtualMachineSizeTypesStandardB8ms ... + VirtualMachineSizeTypesStandardB8ms VirtualMachineSizeTypes = "Standard_B8ms" + // VirtualMachineSizeTypesStandardD1 ... + VirtualMachineSizeTypesStandardD1 VirtualMachineSizeTypes = "Standard_D1" + // VirtualMachineSizeTypesStandardD11 ... + VirtualMachineSizeTypesStandardD11 VirtualMachineSizeTypes = "Standard_D11" + // VirtualMachineSizeTypesStandardD11V2 ... + VirtualMachineSizeTypesStandardD11V2 VirtualMachineSizeTypes = "Standard_D11_v2" + // VirtualMachineSizeTypesStandardD12 ... + VirtualMachineSizeTypesStandardD12 VirtualMachineSizeTypes = "Standard_D12" + // VirtualMachineSizeTypesStandardD12V2 ... + VirtualMachineSizeTypesStandardD12V2 VirtualMachineSizeTypes = "Standard_D12_v2" + // VirtualMachineSizeTypesStandardD13 ... + VirtualMachineSizeTypesStandardD13 VirtualMachineSizeTypes = "Standard_D13" + // VirtualMachineSizeTypesStandardD13V2 ... + VirtualMachineSizeTypesStandardD13V2 VirtualMachineSizeTypes = "Standard_D13_v2" + // VirtualMachineSizeTypesStandardD14 ... + VirtualMachineSizeTypesStandardD14 VirtualMachineSizeTypes = "Standard_D14" + // VirtualMachineSizeTypesStandardD14V2 ... + VirtualMachineSizeTypesStandardD14V2 VirtualMachineSizeTypes = "Standard_D14_v2" + // VirtualMachineSizeTypesStandardD15V2 ... + VirtualMachineSizeTypesStandardD15V2 VirtualMachineSizeTypes = "Standard_D15_v2" + // VirtualMachineSizeTypesStandardD16sV3 ... + VirtualMachineSizeTypesStandardD16sV3 VirtualMachineSizeTypes = "Standard_D16s_v3" + // VirtualMachineSizeTypesStandardD16V3 ... + VirtualMachineSizeTypesStandardD16V3 VirtualMachineSizeTypes = "Standard_D16_v3" + // VirtualMachineSizeTypesStandardD1V2 ... + VirtualMachineSizeTypesStandardD1V2 VirtualMachineSizeTypes = "Standard_D1_v2" + // VirtualMachineSizeTypesStandardD2 ... + VirtualMachineSizeTypesStandardD2 VirtualMachineSizeTypes = "Standard_D2" + // VirtualMachineSizeTypesStandardD2sV3 ... + VirtualMachineSizeTypesStandardD2sV3 VirtualMachineSizeTypes = "Standard_D2s_v3" + // VirtualMachineSizeTypesStandardD2V2 ... + VirtualMachineSizeTypesStandardD2V2 VirtualMachineSizeTypes = "Standard_D2_v2" + // VirtualMachineSizeTypesStandardD2V3 ... + VirtualMachineSizeTypesStandardD2V3 VirtualMachineSizeTypes = "Standard_D2_v3" + // VirtualMachineSizeTypesStandardD3 ... + VirtualMachineSizeTypesStandardD3 VirtualMachineSizeTypes = "Standard_D3" + // VirtualMachineSizeTypesStandardD32sV3 ... + VirtualMachineSizeTypesStandardD32sV3 VirtualMachineSizeTypes = "Standard_D32s_v3" + // VirtualMachineSizeTypesStandardD32V3 ... + VirtualMachineSizeTypesStandardD32V3 VirtualMachineSizeTypes = "Standard_D32_v3" + // VirtualMachineSizeTypesStandardD3V2 ... + VirtualMachineSizeTypesStandardD3V2 VirtualMachineSizeTypes = "Standard_D3_v2" + // VirtualMachineSizeTypesStandardD4 ... + VirtualMachineSizeTypesStandardD4 VirtualMachineSizeTypes = "Standard_D4" + // VirtualMachineSizeTypesStandardD4sV3 ... + VirtualMachineSizeTypesStandardD4sV3 VirtualMachineSizeTypes = "Standard_D4s_v3" + // VirtualMachineSizeTypesStandardD4V2 ... + VirtualMachineSizeTypesStandardD4V2 VirtualMachineSizeTypes = "Standard_D4_v2" + // VirtualMachineSizeTypesStandardD4V3 ... + VirtualMachineSizeTypesStandardD4V3 VirtualMachineSizeTypes = "Standard_D4_v3" + // VirtualMachineSizeTypesStandardD5V2 ... + VirtualMachineSizeTypesStandardD5V2 VirtualMachineSizeTypes = "Standard_D5_v2" + // VirtualMachineSizeTypesStandardD64sV3 ... + VirtualMachineSizeTypesStandardD64sV3 VirtualMachineSizeTypes = "Standard_D64s_v3" + // VirtualMachineSizeTypesStandardD64V3 ... + VirtualMachineSizeTypesStandardD64V3 VirtualMachineSizeTypes = "Standard_D64_v3" + // VirtualMachineSizeTypesStandardD8sV3 ... + VirtualMachineSizeTypesStandardD8sV3 VirtualMachineSizeTypes = "Standard_D8s_v3" + // VirtualMachineSizeTypesStandardD8V3 ... + VirtualMachineSizeTypesStandardD8V3 VirtualMachineSizeTypes = "Standard_D8_v3" + // VirtualMachineSizeTypesStandardDS1 ... + VirtualMachineSizeTypesStandardDS1 VirtualMachineSizeTypes = "Standard_DS1" + // VirtualMachineSizeTypesStandardDS11 ... + VirtualMachineSizeTypesStandardDS11 VirtualMachineSizeTypes = "Standard_DS11" + // VirtualMachineSizeTypesStandardDS11V2 ... + VirtualMachineSizeTypesStandardDS11V2 VirtualMachineSizeTypes = "Standard_DS11_v2" + // VirtualMachineSizeTypesStandardDS12 ... + VirtualMachineSizeTypesStandardDS12 VirtualMachineSizeTypes = "Standard_DS12" + // VirtualMachineSizeTypesStandardDS12V2 ... + VirtualMachineSizeTypesStandardDS12V2 VirtualMachineSizeTypes = "Standard_DS12_v2" + // VirtualMachineSizeTypesStandardDS13 ... + VirtualMachineSizeTypesStandardDS13 VirtualMachineSizeTypes = "Standard_DS13" + // VirtualMachineSizeTypesStandardDS132V2 ... + VirtualMachineSizeTypesStandardDS132V2 VirtualMachineSizeTypes = "Standard_DS13-2_v2" + // VirtualMachineSizeTypesStandardDS134V2 ... + VirtualMachineSizeTypesStandardDS134V2 VirtualMachineSizeTypes = "Standard_DS13-4_v2" + // VirtualMachineSizeTypesStandardDS13V2 ... + VirtualMachineSizeTypesStandardDS13V2 VirtualMachineSizeTypes = "Standard_DS13_v2" + // VirtualMachineSizeTypesStandardDS14 ... + VirtualMachineSizeTypesStandardDS14 VirtualMachineSizeTypes = "Standard_DS14" + // VirtualMachineSizeTypesStandardDS144V2 ... + VirtualMachineSizeTypesStandardDS144V2 VirtualMachineSizeTypes = "Standard_DS14-4_v2" + // VirtualMachineSizeTypesStandardDS148V2 ... + VirtualMachineSizeTypesStandardDS148V2 VirtualMachineSizeTypes = "Standard_DS14-8_v2" + // VirtualMachineSizeTypesStandardDS14V2 ... + VirtualMachineSizeTypesStandardDS14V2 VirtualMachineSizeTypes = "Standard_DS14_v2" + // VirtualMachineSizeTypesStandardDS15V2 ... + VirtualMachineSizeTypesStandardDS15V2 VirtualMachineSizeTypes = "Standard_DS15_v2" + // VirtualMachineSizeTypesStandardDS1V2 ... + VirtualMachineSizeTypesStandardDS1V2 VirtualMachineSizeTypes = "Standard_DS1_v2" + // VirtualMachineSizeTypesStandardDS2 ... + VirtualMachineSizeTypesStandardDS2 VirtualMachineSizeTypes = "Standard_DS2" + // VirtualMachineSizeTypesStandardDS2V2 ... + VirtualMachineSizeTypesStandardDS2V2 VirtualMachineSizeTypes = "Standard_DS2_v2" + // VirtualMachineSizeTypesStandardDS3 ... + VirtualMachineSizeTypesStandardDS3 VirtualMachineSizeTypes = "Standard_DS3" + // VirtualMachineSizeTypesStandardDS3V2 ... + VirtualMachineSizeTypesStandardDS3V2 VirtualMachineSizeTypes = "Standard_DS3_v2" + // VirtualMachineSizeTypesStandardDS4 ... + VirtualMachineSizeTypesStandardDS4 VirtualMachineSizeTypes = "Standard_DS4" + // VirtualMachineSizeTypesStandardDS4V2 ... + VirtualMachineSizeTypesStandardDS4V2 VirtualMachineSizeTypes = "Standard_DS4_v2" + // VirtualMachineSizeTypesStandardDS5V2 ... + VirtualMachineSizeTypesStandardDS5V2 VirtualMachineSizeTypes = "Standard_DS5_v2" + // VirtualMachineSizeTypesStandardE16sV3 ... + VirtualMachineSizeTypesStandardE16sV3 VirtualMachineSizeTypes = "Standard_E16s_v3" + // VirtualMachineSizeTypesStandardE16V3 ... + VirtualMachineSizeTypesStandardE16V3 VirtualMachineSizeTypes = "Standard_E16_v3" + // VirtualMachineSizeTypesStandardE2sV3 ... + VirtualMachineSizeTypesStandardE2sV3 VirtualMachineSizeTypes = "Standard_E2s_v3" + // VirtualMachineSizeTypesStandardE2V3 ... + VirtualMachineSizeTypesStandardE2V3 VirtualMachineSizeTypes = "Standard_E2_v3" + // VirtualMachineSizeTypesStandardE3216V3 ... + VirtualMachineSizeTypesStandardE3216V3 VirtualMachineSizeTypes = "Standard_E32-16_v3" + // VirtualMachineSizeTypesStandardE328sV3 ... + VirtualMachineSizeTypesStandardE328sV3 VirtualMachineSizeTypes = "Standard_E32-8s_v3" + // VirtualMachineSizeTypesStandardE32sV3 ... + VirtualMachineSizeTypesStandardE32sV3 VirtualMachineSizeTypes = "Standard_E32s_v3" + // VirtualMachineSizeTypesStandardE32V3 ... + VirtualMachineSizeTypesStandardE32V3 VirtualMachineSizeTypes = "Standard_E32_v3" + // VirtualMachineSizeTypesStandardE4sV3 ... + VirtualMachineSizeTypesStandardE4sV3 VirtualMachineSizeTypes = "Standard_E4s_v3" + // VirtualMachineSizeTypesStandardE4V3 ... + VirtualMachineSizeTypesStandardE4V3 VirtualMachineSizeTypes = "Standard_E4_v3" + // VirtualMachineSizeTypesStandardE6416sV3 ... + VirtualMachineSizeTypesStandardE6416sV3 VirtualMachineSizeTypes = "Standard_E64-16s_v3" + // VirtualMachineSizeTypesStandardE6432sV3 ... + VirtualMachineSizeTypesStandardE6432sV3 VirtualMachineSizeTypes = "Standard_E64-32s_v3" + // VirtualMachineSizeTypesStandardE64sV3 ... + VirtualMachineSizeTypesStandardE64sV3 VirtualMachineSizeTypes = "Standard_E64s_v3" + // VirtualMachineSizeTypesStandardE64V3 ... + VirtualMachineSizeTypesStandardE64V3 VirtualMachineSizeTypes = "Standard_E64_v3" + // VirtualMachineSizeTypesStandardE8sV3 ... + VirtualMachineSizeTypesStandardE8sV3 VirtualMachineSizeTypes = "Standard_E8s_v3" + // VirtualMachineSizeTypesStandardE8V3 ... + VirtualMachineSizeTypesStandardE8V3 VirtualMachineSizeTypes = "Standard_E8_v3" + // VirtualMachineSizeTypesStandardF1 ... + VirtualMachineSizeTypesStandardF1 VirtualMachineSizeTypes = "Standard_F1" + // VirtualMachineSizeTypesStandardF16 ... + VirtualMachineSizeTypesStandardF16 VirtualMachineSizeTypes = "Standard_F16" + // VirtualMachineSizeTypesStandardF16s ... + VirtualMachineSizeTypesStandardF16s VirtualMachineSizeTypes = "Standard_F16s" + // VirtualMachineSizeTypesStandardF16sV2 ... + VirtualMachineSizeTypesStandardF16sV2 VirtualMachineSizeTypes = "Standard_F16s_v2" + // VirtualMachineSizeTypesStandardF1s ... + VirtualMachineSizeTypesStandardF1s VirtualMachineSizeTypes = "Standard_F1s" + // VirtualMachineSizeTypesStandardF2 ... + VirtualMachineSizeTypesStandardF2 VirtualMachineSizeTypes = "Standard_F2" + // VirtualMachineSizeTypesStandardF2s ... + VirtualMachineSizeTypesStandardF2s VirtualMachineSizeTypes = "Standard_F2s" + // VirtualMachineSizeTypesStandardF2sV2 ... + VirtualMachineSizeTypesStandardF2sV2 VirtualMachineSizeTypes = "Standard_F2s_v2" + // VirtualMachineSizeTypesStandardF32sV2 ... + VirtualMachineSizeTypesStandardF32sV2 VirtualMachineSizeTypes = "Standard_F32s_v2" + // VirtualMachineSizeTypesStandardF4 ... + VirtualMachineSizeTypesStandardF4 VirtualMachineSizeTypes = "Standard_F4" + // VirtualMachineSizeTypesStandardF4s ... + VirtualMachineSizeTypesStandardF4s VirtualMachineSizeTypes = "Standard_F4s" + // VirtualMachineSizeTypesStandardF4sV2 ... + VirtualMachineSizeTypesStandardF4sV2 VirtualMachineSizeTypes = "Standard_F4s_v2" + // VirtualMachineSizeTypesStandardF64sV2 ... + VirtualMachineSizeTypesStandardF64sV2 VirtualMachineSizeTypes = "Standard_F64s_v2" + // VirtualMachineSizeTypesStandardF72sV2 ... + VirtualMachineSizeTypesStandardF72sV2 VirtualMachineSizeTypes = "Standard_F72s_v2" + // VirtualMachineSizeTypesStandardF8 ... + VirtualMachineSizeTypesStandardF8 VirtualMachineSizeTypes = "Standard_F8" + // VirtualMachineSizeTypesStandardF8s ... + VirtualMachineSizeTypesStandardF8s VirtualMachineSizeTypes = "Standard_F8s" + // VirtualMachineSizeTypesStandardF8sV2 ... + VirtualMachineSizeTypesStandardF8sV2 VirtualMachineSizeTypes = "Standard_F8s_v2" + // VirtualMachineSizeTypesStandardG1 ... + VirtualMachineSizeTypesStandardG1 VirtualMachineSizeTypes = "Standard_G1" + // VirtualMachineSizeTypesStandardG2 ... + VirtualMachineSizeTypesStandardG2 VirtualMachineSizeTypes = "Standard_G2" + // VirtualMachineSizeTypesStandardG3 ... + VirtualMachineSizeTypesStandardG3 VirtualMachineSizeTypes = "Standard_G3" + // VirtualMachineSizeTypesStandardG4 ... + VirtualMachineSizeTypesStandardG4 VirtualMachineSizeTypes = "Standard_G4" + // VirtualMachineSizeTypesStandardG5 ... + VirtualMachineSizeTypesStandardG5 VirtualMachineSizeTypes = "Standard_G5" + // VirtualMachineSizeTypesStandardGS1 ... + VirtualMachineSizeTypesStandardGS1 VirtualMachineSizeTypes = "Standard_GS1" + // VirtualMachineSizeTypesStandardGS2 ... + VirtualMachineSizeTypesStandardGS2 VirtualMachineSizeTypes = "Standard_GS2" + // VirtualMachineSizeTypesStandardGS3 ... + VirtualMachineSizeTypesStandardGS3 VirtualMachineSizeTypes = "Standard_GS3" + // VirtualMachineSizeTypesStandardGS4 ... + VirtualMachineSizeTypesStandardGS4 VirtualMachineSizeTypes = "Standard_GS4" + // VirtualMachineSizeTypesStandardGS44 ... + VirtualMachineSizeTypesStandardGS44 VirtualMachineSizeTypes = "Standard_GS4-4" + // VirtualMachineSizeTypesStandardGS48 ... + VirtualMachineSizeTypesStandardGS48 VirtualMachineSizeTypes = "Standard_GS4-8" + // VirtualMachineSizeTypesStandardGS5 ... + VirtualMachineSizeTypesStandardGS5 VirtualMachineSizeTypes = "Standard_GS5" + // VirtualMachineSizeTypesStandardGS516 ... + VirtualMachineSizeTypesStandardGS516 VirtualMachineSizeTypes = "Standard_GS5-16" + // VirtualMachineSizeTypesStandardGS58 ... + VirtualMachineSizeTypesStandardGS58 VirtualMachineSizeTypes = "Standard_GS5-8" + // VirtualMachineSizeTypesStandardH16 ... + VirtualMachineSizeTypesStandardH16 VirtualMachineSizeTypes = "Standard_H16" + // VirtualMachineSizeTypesStandardH16m ... + VirtualMachineSizeTypesStandardH16m VirtualMachineSizeTypes = "Standard_H16m" + // VirtualMachineSizeTypesStandardH16mr ... + VirtualMachineSizeTypesStandardH16mr VirtualMachineSizeTypes = "Standard_H16mr" + // VirtualMachineSizeTypesStandardH16r ... + VirtualMachineSizeTypesStandardH16r VirtualMachineSizeTypes = "Standard_H16r" + // VirtualMachineSizeTypesStandardH8 ... + VirtualMachineSizeTypesStandardH8 VirtualMachineSizeTypes = "Standard_H8" + // VirtualMachineSizeTypesStandardH8m ... + VirtualMachineSizeTypesStandardH8m VirtualMachineSizeTypes = "Standard_H8m" + // VirtualMachineSizeTypesStandardL16s ... + VirtualMachineSizeTypesStandardL16s VirtualMachineSizeTypes = "Standard_L16s" + // VirtualMachineSizeTypesStandardL32s ... + VirtualMachineSizeTypesStandardL32s VirtualMachineSizeTypes = "Standard_L32s" + // VirtualMachineSizeTypesStandardL4s ... + VirtualMachineSizeTypesStandardL4s VirtualMachineSizeTypes = "Standard_L4s" + // VirtualMachineSizeTypesStandardL8s ... + VirtualMachineSizeTypesStandardL8s VirtualMachineSizeTypes = "Standard_L8s" + // VirtualMachineSizeTypesStandardM12832ms ... + VirtualMachineSizeTypesStandardM12832ms VirtualMachineSizeTypes = "Standard_M128-32ms" + // VirtualMachineSizeTypesStandardM12864ms ... + VirtualMachineSizeTypesStandardM12864ms VirtualMachineSizeTypes = "Standard_M128-64ms" + // VirtualMachineSizeTypesStandardM128ms ... + VirtualMachineSizeTypesStandardM128ms VirtualMachineSizeTypes = "Standard_M128ms" + // VirtualMachineSizeTypesStandardM128s ... + VirtualMachineSizeTypesStandardM128s VirtualMachineSizeTypes = "Standard_M128s" + // VirtualMachineSizeTypesStandardM6416ms ... + VirtualMachineSizeTypesStandardM6416ms VirtualMachineSizeTypes = "Standard_M64-16ms" + // VirtualMachineSizeTypesStandardM6432ms ... + VirtualMachineSizeTypesStandardM6432ms VirtualMachineSizeTypes = "Standard_M64-32ms" + // VirtualMachineSizeTypesStandardM64ms ... + VirtualMachineSizeTypesStandardM64ms VirtualMachineSizeTypes = "Standard_M64ms" + // VirtualMachineSizeTypesStandardM64s ... + VirtualMachineSizeTypesStandardM64s VirtualMachineSizeTypes = "Standard_M64s" + // VirtualMachineSizeTypesStandardNC12 ... + VirtualMachineSizeTypesStandardNC12 VirtualMachineSizeTypes = "Standard_NC12" + // VirtualMachineSizeTypesStandardNC12sV2 ... + VirtualMachineSizeTypesStandardNC12sV2 VirtualMachineSizeTypes = "Standard_NC12s_v2" + // VirtualMachineSizeTypesStandardNC12sV3 ... + VirtualMachineSizeTypesStandardNC12sV3 VirtualMachineSizeTypes = "Standard_NC12s_v3" + // VirtualMachineSizeTypesStandardNC24 ... + VirtualMachineSizeTypesStandardNC24 VirtualMachineSizeTypes = "Standard_NC24" + // VirtualMachineSizeTypesStandardNC24r ... + VirtualMachineSizeTypesStandardNC24r VirtualMachineSizeTypes = "Standard_NC24r" + // VirtualMachineSizeTypesStandardNC24rsV2 ... + VirtualMachineSizeTypesStandardNC24rsV2 VirtualMachineSizeTypes = "Standard_NC24rs_v2" + // VirtualMachineSizeTypesStandardNC24rsV3 ... + VirtualMachineSizeTypesStandardNC24rsV3 VirtualMachineSizeTypes = "Standard_NC24rs_v3" + // VirtualMachineSizeTypesStandardNC24sV2 ... + VirtualMachineSizeTypesStandardNC24sV2 VirtualMachineSizeTypes = "Standard_NC24s_v2" + // VirtualMachineSizeTypesStandardNC24sV3 ... + VirtualMachineSizeTypesStandardNC24sV3 VirtualMachineSizeTypes = "Standard_NC24s_v3" + // VirtualMachineSizeTypesStandardNC6 ... + VirtualMachineSizeTypesStandardNC6 VirtualMachineSizeTypes = "Standard_NC6" + // VirtualMachineSizeTypesStandardNC6sV2 ... + VirtualMachineSizeTypesStandardNC6sV2 VirtualMachineSizeTypes = "Standard_NC6s_v2" + // VirtualMachineSizeTypesStandardNC6sV3 ... + VirtualMachineSizeTypesStandardNC6sV3 VirtualMachineSizeTypes = "Standard_NC6s_v3" + // VirtualMachineSizeTypesStandardND12s ... + VirtualMachineSizeTypesStandardND12s VirtualMachineSizeTypes = "Standard_ND12s" + // VirtualMachineSizeTypesStandardND24rs ... + VirtualMachineSizeTypesStandardND24rs VirtualMachineSizeTypes = "Standard_ND24rs" + // VirtualMachineSizeTypesStandardND24s ... + VirtualMachineSizeTypesStandardND24s VirtualMachineSizeTypes = "Standard_ND24s" + // VirtualMachineSizeTypesStandardND6s ... + VirtualMachineSizeTypesStandardND6s VirtualMachineSizeTypes = "Standard_ND6s" + // VirtualMachineSizeTypesStandardNV12 ... + VirtualMachineSizeTypesStandardNV12 VirtualMachineSizeTypes = "Standard_NV12" + // VirtualMachineSizeTypesStandardNV24 ... + VirtualMachineSizeTypesStandardNV24 VirtualMachineSizeTypes = "Standard_NV24" + // VirtualMachineSizeTypesStandardNV6 ... + VirtualMachineSizeTypesStandardNV6 VirtualMachineSizeTypes = "Standard_NV6" +) + +// PossibleVirtualMachineSizeTypesValues returns an array of possible values for the VirtualMachineSizeTypes const type. +func PossibleVirtualMachineSizeTypesValues() []VirtualMachineSizeTypes { + return []VirtualMachineSizeTypes{VirtualMachineSizeTypesBasicA0, VirtualMachineSizeTypesBasicA1, VirtualMachineSizeTypesBasicA2, VirtualMachineSizeTypesBasicA3, VirtualMachineSizeTypesBasicA4, VirtualMachineSizeTypesStandardA0, VirtualMachineSizeTypesStandardA1, VirtualMachineSizeTypesStandardA10, VirtualMachineSizeTypesStandardA11, VirtualMachineSizeTypesStandardA1V2, VirtualMachineSizeTypesStandardA2, VirtualMachineSizeTypesStandardA2mV2, VirtualMachineSizeTypesStandardA2V2, VirtualMachineSizeTypesStandardA3, VirtualMachineSizeTypesStandardA4, VirtualMachineSizeTypesStandardA4mV2, VirtualMachineSizeTypesStandardA4V2, VirtualMachineSizeTypesStandardA5, VirtualMachineSizeTypesStandardA6, VirtualMachineSizeTypesStandardA7, VirtualMachineSizeTypesStandardA8, VirtualMachineSizeTypesStandardA8mV2, VirtualMachineSizeTypesStandardA8V2, VirtualMachineSizeTypesStandardA9, VirtualMachineSizeTypesStandardB1ms, VirtualMachineSizeTypesStandardB1s, VirtualMachineSizeTypesStandardB2ms, VirtualMachineSizeTypesStandardB2s, VirtualMachineSizeTypesStandardB4ms, VirtualMachineSizeTypesStandardB8ms, VirtualMachineSizeTypesStandardD1, VirtualMachineSizeTypesStandardD11, VirtualMachineSizeTypesStandardD11V2, VirtualMachineSizeTypesStandardD12, VirtualMachineSizeTypesStandardD12V2, VirtualMachineSizeTypesStandardD13, VirtualMachineSizeTypesStandardD13V2, VirtualMachineSizeTypesStandardD14, VirtualMachineSizeTypesStandardD14V2, VirtualMachineSizeTypesStandardD15V2, VirtualMachineSizeTypesStandardD16sV3, VirtualMachineSizeTypesStandardD16V3, VirtualMachineSizeTypesStandardD1V2, VirtualMachineSizeTypesStandardD2, VirtualMachineSizeTypesStandardD2sV3, VirtualMachineSizeTypesStandardD2V2, VirtualMachineSizeTypesStandardD2V3, VirtualMachineSizeTypesStandardD3, VirtualMachineSizeTypesStandardD32sV3, VirtualMachineSizeTypesStandardD32V3, VirtualMachineSizeTypesStandardD3V2, VirtualMachineSizeTypesStandardD4, VirtualMachineSizeTypesStandardD4sV3, VirtualMachineSizeTypesStandardD4V2, VirtualMachineSizeTypesStandardD4V3, VirtualMachineSizeTypesStandardD5V2, VirtualMachineSizeTypesStandardD64sV3, VirtualMachineSizeTypesStandardD64V3, VirtualMachineSizeTypesStandardD8sV3, VirtualMachineSizeTypesStandardD8V3, VirtualMachineSizeTypesStandardDS1, VirtualMachineSizeTypesStandardDS11, VirtualMachineSizeTypesStandardDS11V2, VirtualMachineSizeTypesStandardDS12, VirtualMachineSizeTypesStandardDS12V2, VirtualMachineSizeTypesStandardDS13, VirtualMachineSizeTypesStandardDS132V2, VirtualMachineSizeTypesStandardDS134V2, VirtualMachineSizeTypesStandardDS13V2, VirtualMachineSizeTypesStandardDS14, VirtualMachineSizeTypesStandardDS144V2, VirtualMachineSizeTypesStandardDS148V2, VirtualMachineSizeTypesStandardDS14V2, VirtualMachineSizeTypesStandardDS15V2, VirtualMachineSizeTypesStandardDS1V2, VirtualMachineSizeTypesStandardDS2, VirtualMachineSizeTypesStandardDS2V2, VirtualMachineSizeTypesStandardDS3, VirtualMachineSizeTypesStandardDS3V2, VirtualMachineSizeTypesStandardDS4, VirtualMachineSizeTypesStandardDS4V2, VirtualMachineSizeTypesStandardDS5V2, VirtualMachineSizeTypesStandardE16sV3, VirtualMachineSizeTypesStandardE16V3, VirtualMachineSizeTypesStandardE2sV3, VirtualMachineSizeTypesStandardE2V3, VirtualMachineSizeTypesStandardE3216V3, VirtualMachineSizeTypesStandardE328sV3, VirtualMachineSizeTypesStandardE32sV3, VirtualMachineSizeTypesStandardE32V3, VirtualMachineSizeTypesStandardE4sV3, VirtualMachineSizeTypesStandardE4V3, VirtualMachineSizeTypesStandardE6416sV3, VirtualMachineSizeTypesStandardE6432sV3, VirtualMachineSizeTypesStandardE64sV3, VirtualMachineSizeTypesStandardE64V3, VirtualMachineSizeTypesStandardE8sV3, VirtualMachineSizeTypesStandardE8V3, VirtualMachineSizeTypesStandardF1, VirtualMachineSizeTypesStandardF16, VirtualMachineSizeTypesStandardF16s, VirtualMachineSizeTypesStandardF16sV2, VirtualMachineSizeTypesStandardF1s, VirtualMachineSizeTypesStandardF2, VirtualMachineSizeTypesStandardF2s, VirtualMachineSizeTypesStandardF2sV2, VirtualMachineSizeTypesStandardF32sV2, VirtualMachineSizeTypesStandardF4, VirtualMachineSizeTypesStandardF4s, VirtualMachineSizeTypesStandardF4sV2, VirtualMachineSizeTypesStandardF64sV2, VirtualMachineSizeTypesStandardF72sV2, VirtualMachineSizeTypesStandardF8, VirtualMachineSizeTypesStandardF8s, VirtualMachineSizeTypesStandardF8sV2, VirtualMachineSizeTypesStandardG1, VirtualMachineSizeTypesStandardG2, VirtualMachineSizeTypesStandardG3, VirtualMachineSizeTypesStandardG4, VirtualMachineSizeTypesStandardG5, VirtualMachineSizeTypesStandardGS1, VirtualMachineSizeTypesStandardGS2, VirtualMachineSizeTypesStandardGS3, VirtualMachineSizeTypesStandardGS4, VirtualMachineSizeTypesStandardGS44, VirtualMachineSizeTypesStandardGS48, VirtualMachineSizeTypesStandardGS5, VirtualMachineSizeTypesStandardGS516, VirtualMachineSizeTypesStandardGS58, VirtualMachineSizeTypesStandardH16, VirtualMachineSizeTypesStandardH16m, VirtualMachineSizeTypesStandardH16mr, VirtualMachineSizeTypesStandardH16r, VirtualMachineSizeTypesStandardH8, VirtualMachineSizeTypesStandardH8m, VirtualMachineSizeTypesStandardL16s, VirtualMachineSizeTypesStandardL32s, VirtualMachineSizeTypesStandardL4s, VirtualMachineSizeTypesStandardL8s, VirtualMachineSizeTypesStandardM12832ms, VirtualMachineSizeTypesStandardM12864ms, VirtualMachineSizeTypesStandardM128ms, VirtualMachineSizeTypesStandardM128s, VirtualMachineSizeTypesStandardM6416ms, VirtualMachineSizeTypesStandardM6432ms, VirtualMachineSizeTypesStandardM64ms, VirtualMachineSizeTypesStandardM64s, VirtualMachineSizeTypesStandardNC12, VirtualMachineSizeTypesStandardNC12sV2, VirtualMachineSizeTypesStandardNC12sV3, VirtualMachineSizeTypesStandardNC24, VirtualMachineSizeTypesStandardNC24r, VirtualMachineSizeTypesStandardNC24rsV2, VirtualMachineSizeTypesStandardNC24rsV3, VirtualMachineSizeTypesStandardNC24sV2, VirtualMachineSizeTypesStandardNC24sV3, VirtualMachineSizeTypesStandardNC6, VirtualMachineSizeTypesStandardNC6sV2, VirtualMachineSizeTypesStandardNC6sV3, VirtualMachineSizeTypesStandardND12s, VirtualMachineSizeTypesStandardND24rs, VirtualMachineSizeTypesStandardND24s, VirtualMachineSizeTypesStandardND6s, VirtualMachineSizeTypesStandardNV12, VirtualMachineSizeTypesStandardNV24, VirtualMachineSizeTypesStandardNV6} +} + +// AccessURI a disk access SAS uri. +type AccessURI struct { + autorest.Response `json:"-"` + // AccessSAS - A SAS uri for accessing a disk. + AccessSAS *string `json:"accessSAS,omitempty"` +} + +// AdditionalCapabilities enables or disables a capability on the virtual machine or virtual machine scale set. +type AdditionalCapabilities struct { + // UltraSSDEnabled - The flag that enables or disables a capability to have one or more managed data disks with UltraSSD_LRS storage account type on the VM or VMSS. Managed disks with storage account type UltraSSD_LRS can be added to a virtual machine or virtual machine scale set only if this property is enabled. + UltraSSDEnabled *bool `json:"ultraSSDEnabled,omitempty"` +} + +// AdditionalUnattendContent specifies additional XML formatted information that can be included in the +// Unattend.xml file, which is used by Windows Setup. Contents are defined by setting name, component name, and the +// pass in which the content is applied. +type AdditionalUnattendContent struct { + // PassName - The pass name. Currently, the only allowable value is OobeSystem. Possible values include: 'OobeSystem' + PassName PassNames `json:"passName,omitempty"` + // ComponentName - The component name. Currently, the only allowable value is Microsoft-Windows-Shell-Setup. Possible values include: 'MicrosoftWindowsShellSetup' + ComponentName ComponentNames `json:"componentName,omitempty"` + // SettingName - Specifies the name of the setting to which the content applies. Possible values are: FirstLogonCommands and AutoLogon. Possible values include: 'AutoLogon', 'FirstLogonCommands' + SettingName SettingNames `json:"settingName,omitempty"` + // Content - Specifies the XML formatted content that is added to the unattend.xml file for the specified path and component. The XML must be less than 4KB and must include the root element for the setting or feature that is being inserted. + Content *string `json:"content,omitempty"` +} + +// APIEntityReference the API entity reference. +type APIEntityReference struct { + // ID - The ARM resource id in the form of /subscriptions/{SubcriptionId}/resourceGroups/{ResourceGroupName}/... + ID *string `json:"id,omitempty"` +} + +// APIError api error. +type APIError struct { + // Details - The Api error details + Details *[]APIErrorBase `json:"details,omitempty"` + // Innererror - The Api inner error + Innererror *InnerError `json:"innererror,omitempty"` + // Code - The error code. + Code *string `json:"code,omitempty"` + // Target - The target of the particular error. + Target *string `json:"target,omitempty"` + // Message - The error message. + Message *string `json:"message,omitempty"` +} + +// APIErrorBase api error base. +type APIErrorBase struct { + // Code - The error code. + Code *string `json:"code,omitempty"` + // Target - The target of the particular error. + Target *string `json:"target,omitempty"` + // Message - The error message. + Message *string `json:"message,omitempty"` +} + +// AutomaticOSUpgradePolicy the configuration parameters used for performing automatic OS upgrade. +type AutomaticOSUpgradePolicy struct { + // EnableAutomaticOSUpgrade - Whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the image becomes available. Default value is false. + EnableAutomaticOSUpgrade *bool `json:"enableAutomaticOSUpgrade,omitempty"` + // DisableAutomaticRollback - Whether OS image rollback feature should be disabled. Default value is false. + DisableAutomaticRollback *bool `json:"disableAutomaticRollback,omitempty"` +} + +// AvailabilitySet specifies information about the availability set that the virtual machine should be assigned to. +// Virtual machines specified in the same availability set are allocated to different nodes to maximize +// availability. For more information about availability sets, see [Manage the availability of virtual +// machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). +//

    For more information on Azure planned maintainance, see [Planned maintenance for virtual machines in +// Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) +//

    Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added +// to an availability set. +type AvailabilitySet struct { + autorest.Response `json:"-"` + *AvailabilitySetProperties `json:"properties,omitempty"` + // Sku - Sku of the availability set, only name is required to be set. See AvailabilitySetSkuTypes for possible set of values. Use 'Aligned' for virtual machines with managed disks and 'Classic' for virtual machines with unmanaged disks. Default value is 'Classic'. + Sku *Sku `json:"sku,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for AvailabilitySet. +func (as AvailabilitySet) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if as.AvailabilitySetProperties != nil { + objectMap["properties"] = as.AvailabilitySetProperties + } + if as.Sku != nil { + objectMap["sku"] = as.Sku + } + if as.ID != nil { + objectMap["id"] = as.ID + } + if as.Name != nil { + objectMap["name"] = as.Name + } + if as.Type != nil { + objectMap["type"] = as.Type + } + if as.Location != nil { + objectMap["location"] = as.Location + } + if as.Tags != nil { + objectMap["tags"] = as.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AvailabilitySet struct. +func (as *AvailabilitySet) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var availabilitySetProperties AvailabilitySetProperties + err = json.Unmarshal(*v, &availabilitySetProperties) + if err != nil { + return err + } + as.AvailabilitySetProperties = &availabilitySetProperties + } + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + as.Sku = &sku + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + as.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + as.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + as.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + as.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + as.Tags = tags + } + } + } + + return nil +} + +// AvailabilitySetListResult the List Availability Set operation response. +type AvailabilitySetListResult struct { + autorest.Response `json:"-"` + // Value - The list of availability sets + Value *[]AvailabilitySet `json:"value,omitempty"` + // NextLink - The URI to fetch the next page of AvailabilitySets. Call ListNext() with this URI to fetch the next page of AvailabilitySets. + NextLink *string `json:"nextLink,omitempty"` +} + +// AvailabilitySetListResultIterator provides access to a complete listing of AvailabilitySet values. +type AvailabilitySetListResultIterator struct { + i int + page AvailabilitySetListResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *AvailabilitySetListResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter AvailabilitySetListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter AvailabilitySetListResultIterator) Response() AvailabilitySetListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter AvailabilitySetListResultIterator) Value() AvailabilitySet { + if !iter.page.NotDone() { + return AvailabilitySet{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (aslr AvailabilitySetListResult) IsEmpty() bool { + return aslr.Value == nil || len(*aslr.Value) == 0 +} + +// availabilitySetListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (aslr AvailabilitySetListResult) availabilitySetListResultPreparer() (*http.Request, error) { + if aslr.NextLink == nil || len(to.String(aslr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(aslr.NextLink))) +} + +// AvailabilitySetListResultPage contains a page of AvailabilitySet values. +type AvailabilitySetListResultPage struct { + fn func(AvailabilitySetListResult) (AvailabilitySetListResult, error) + aslr AvailabilitySetListResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *AvailabilitySetListResultPage) Next() error { + next, err := page.fn(page.aslr) + if err != nil { + return err + } + page.aslr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page AvailabilitySetListResultPage) NotDone() bool { + return !page.aslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page AvailabilitySetListResultPage) Response() AvailabilitySetListResult { + return page.aslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page AvailabilitySetListResultPage) Values() []AvailabilitySet { + if page.aslr.IsEmpty() { + return nil + } + return *page.aslr.Value +} + +// AvailabilitySetProperties the instance view of a resource. +type AvailabilitySetProperties struct { + // PlatformUpdateDomainCount - Update Domain count. + PlatformUpdateDomainCount *int32 `json:"platformUpdateDomainCount,omitempty"` + // PlatformFaultDomainCount - Fault Domain count. + PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` + // VirtualMachines - A list of references to all virtual machines in the availability set. + VirtualMachines *[]SubResource `json:"virtualMachines,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// AvailabilitySetUpdate specifies information about the availability set that the virtual machine should be +// assigned to. Only tags may be updated. +type AvailabilitySetUpdate struct { + *AvailabilitySetProperties `json:"properties,omitempty"` + // Sku - Sku of the availability set + Sku *Sku `json:"sku,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for AvailabilitySetUpdate. +func (asu AvailabilitySetUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if asu.AvailabilitySetProperties != nil { + objectMap["properties"] = asu.AvailabilitySetProperties + } + if asu.Sku != nil { + objectMap["sku"] = asu.Sku + } + if asu.Tags != nil { + objectMap["tags"] = asu.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AvailabilitySetUpdate struct. +func (asu *AvailabilitySetUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var availabilitySetProperties AvailabilitySetProperties + err = json.Unmarshal(*v, &availabilitySetProperties) + if err != nil { + return err + } + asu.AvailabilitySetProperties = &availabilitySetProperties + } + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + asu.Sku = &sku + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + asu.Tags = tags + } + } + } + + return nil +} + +// BootDiagnostics boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot +// to diagnose VM status.

    For Linux Virtual Machines, you can easily view the output of your console log. +//

    For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from +// the hypervisor. +type BootDiagnostics struct { + // Enabled - Whether boot diagnostics should be enabled on the Virtual Machine. + Enabled *bool `json:"enabled,omitempty"` + // StorageURI - Uri of the storage account to use for placing the console output and screenshot. + StorageURI *string `json:"storageUri,omitempty"` +} + +// BootDiagnosticsInstanceView the instance view of a virtual machine boot diagnostics. +type BootDiagnosticsInstanceView struct { + // ConsoleScreenshotBlobURI - The console screenshot blob URI. + ConsoleScreenshotBlobURI *string `json:"consoleScreenshotBlobUri,omitempty"` + // SerialConsoleLogBlobURI - The Linux serial console log blob Uri. + SerialConsoleLogBlobURI *string `json:"serialConsoleLogBlobUri,omitempty"` +} + +// CloudError an error response from the Gallery service. +type CloudError struct { + Error *APIError `json:"error,omitempty"` +} + +// ContainerService container service. +type ContainerService struct { + autorest.Response `json:"-"` + *ContainerServiceProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for ContainerService. +func (cs ContainerService) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if cs.ContainerServiceProperties != nil { + objectMap["properties"] = cs.ContainerServiceProperties + } + if cs.ID != nil { + objectMap["id"] = cs.ID + } + if cs.Name != nil { + objectMap["name"] = cs.Name + } + if cs.Type != nil { + objectMap["type"] = cs.Type + } + if cs.Location != nil { + objectMap["location"] = cs.Location + } + if cs.Tags != nil { + objectMap["tags"] = cs.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ContainerService struct. +func (cs *ContainerService) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var containerServiceProperties ContainerServiceProperties + err = json.Unmarshal(*v, &containerServiceProperties) + if err != nil { + return err + } + cs.ContainerServiceProperties = &containerServiceProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + cs.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + cs.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + cs.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + cs.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + cs.Tags = tags + } + } + } + + return nil +} + +// ContainerServiceAgentPoolProfile profile for the container service agent pool. +type ContainerServiceAgentPoolProfile struct { + // Name - Unique name of the agent pool profile in the context of the subscription and resource group. + Name *string `json:"name,omitempty"` + // Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1. + Count *int32 `json:"count,omitempty"` + // VMSize - Size of agent VMs. Possible values include: 'StandardA0', 'StandardA1', 'StandardA2', 'StandardA3', 'StandardA4', 'StandardA5', 'StandardA6', 'StandardA7', 'StandardA8', 'StandardA9', 'StandardA10', 'StandardA11', 'StandardD1', 'StandardD2', 'StandardD3', 'StandardD4', 'StandardD11', 'StandardD12', 'StandardD13', 'StandardD14', 'StandardD1V2', 'StandardD2V2', 'StandardD3V2', 'StandardD4V2', 'StandardD5V2', 'StandardD11V2', 'StandardD12V2', 'StandardD13V2', 'StandardD14V2', 'StandardG1', 'StandardG2', 'StandardG3', 'StandardG4', 'StandardG5', 'StandardDS1', 'StandardDS2', 'StandardDS3', 'StandardDS4', 'StandardDS11', 'StandardDS12', 'StandardDS13', 'StandardDS14', 'StandardGS1', 'StandardGS2', 'StandardGS3', 'StandardGS4', 'StandardGS5' + VMSize ContainerServiceVMSizeTypes `json:"vmSize,omitempty"` + // DNSPrefix - DNS prefix to be used to create the FQDN for the agent pool. + DNSPrefix *string `json:"dnsPrefix,omitempty"` + // Fqdn - FDQN for the agent pool. + Fqdn *string `json:"fqdn,omitempty"` +} + +// ContainerServiceCustomProfile properties to configure a custom container service cluster. +type ContainerServiceCustomProfile struct { + // Orchestrator - The name of the custom orchestrator to use. + Orchestrator *string `json:"orchestrator,omitempty"` +} + +// ContainerServiceDiagnosticsProfile ... +type ContainerServiceDiagnosticsProfile struct { + // VMDiagnostics - Profile for the container service VM diagnostic agent. + VMDiagnostics *ContainerServiceVMDiagnostics `json:"vmDiagnostics,omitempty"` +} + +// ContainerServiceLinuxProfile profile for Linux VMs in the container service cluster. +type ContainerServiceLinuxProfile struct { + // AdminUsername - The administrator username to use for Linux VMs. + AdminUsername *string `json:"adminUsername,omitempty"` + // SSH - The ssh key configuration for Linux VMs. + SSH *ContainerServiceSSHConfiguration `json:"ssh,omitempty"` +} + +// ContainerServiceListResult the response from the List Container Services operation. +type ContainerServiceListResult struct { + autorest.Response `json:"-"` + // Value - the list of container services. + Value *[]ContainerService `json:"value,omitempty"` + // NextLink - The URL to get the next set of container service results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ContainerServiceListResultIterator provides access to a complete listing of ContainerService values. +type ContainerServiceListResultIterator struct { + i int + page ContainerServiceListResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ContainerServiceListResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ContainerServiceListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ContainerServiceListResultIterator) Response() ContainerServiceListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ContainerServiceListResultIterator) Value() ContainerService { + if !iter.page.NotDone() { + return ContainerService{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (cslr ContainerServiceListResult) IsEmpty() bool { + return cslr.Value == nil || len(*cslr.Value) == 0 +} + +// containerServiceListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (cslr ContainerServiceListResult) containerServiceListResultPreparer() (*http.Request, error) { + if cslr.NextLink == nil || len(to.String(cslr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(cslr.NextLink))) +} + +// ContainerServiceListResultPage contains a page of ContainerService values. +type ContainerServiceListResultPage struct { + fn func(ContainerServiceListResult) (ContainerServiceListResult, error) + cslr ContainerServiceListResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ContainerServiceListResultPage) Next() error { + next, err := page.fn(page.cslr) + if err != nil { + return err + } + page.cslr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ContainerServiceListResultPage) NotDone() bool { + return !page.cslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ContainerServiceListResultPage) Response() ContainerServiceListResult { + return page.cslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ContainerServiceListResultPage) Values() []ContainerService { + if page.cslr.IsEmpty() { + return nil + } + return *page.cslr.Value +} + +// ContainerServiceMasterProfile profile for the container service master. +type ContainerServiceMasterProfile struct { + // Count - Number of masters (VMs) in the container service cluster. Allowed values are 1, 3, and 5. The default value is 1. + Count *int32 `json:"count,omitempty"` + // DNSPrefix - DNS prefix to be used to create the FQDN for master. + DNSPrefix *string `json:"dnsPrefix,omitempty"` + // Fqdn - FDQN for the master. + Fqdn *string `json:"fqdn,omitempty"` +} + +// ContainerServiceOrchestratorProfile profile for the container service orchestrator. +type ContainerServiceOrchestratorProfile struct { + // OrchestratorType - The orchestrator to use to manage container service cluster resources. Valid values are Swarm, DCOS, and Custom. Possible values include: 'Swarm', 'DCOS', 'Custom', 'Kubernetes' + OrchestratorType ContainerServiceOrchestratorTypes `json:"orchestratorType,omitempty"` +} + +// ContainerServiceProperties properties of the container service. +type ContainerServiceProperties struct { + // ProvisioningState - the current deployment or provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // OrchestratorProfile - Properties of the orchestrator. + OrchestratorProfile *ContainerServiceOrchestratorProfile `json:"orchestratorProfile,omitempty"` + // CustomProfile - Properties for custom clusters. + CustomProfile *ContainerServiceCustomProfile `json:"customProfile,omitempty"` + // ServicePrincipalProfile - Properties for cluster service principals. + ServicePrincipalProfile *ContainerServiceServicePrincipalProfile `json:"servicePrincipalProfile,omitempty"` + // MasterProfile - Properties of master agents. + MasterProfile *ContainerServiceMasterProfile `json:"masterProfile,omitempty"` + // AgentPoolProfiles - Properties of the agent pool. + AgentPoolProfiles *[]ContainerServiceAgentPoolProfile `json:"agentPoolProfiles,omitempty"` + // WindowsProfile - Properties of Windows VMs. + WindowsProfile *ContainerServiceWindowsProfile `json:"windowsProfile,omitempty"` + // LinuxProfile - Properties of Linux VMs. + LinuxProfile *ContainerServiceLinuxProfile `json:"linuxProfile,omitempty"` + // DiagnosticsProfile - Properties of the diagnostic agent. + DiagnosticsProfile *ContainerServiceDiagnosticsProfile `json:"diagnosticsProfile,omitempty"` +} + +// ContainerServicesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type ContainerServicesCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ContainerServicesCreateOrUpdateFuture) Result(client ContainerServicesClient) (cs ContainerService, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.ContainerServicesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if cs.Response.Response, err = future.GetResult(sender); err == nil && cs.Response.Response.StatusCode != http.StatusNoContent { + cs, err = client.CreateOrUpdateResponder(cs.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesCreateOrUpdateFuture", "Result", cs.Response.Response, "Failure responding to request") + } + } + return +} + +// ContainerServicesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type ContainerServicesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ContainerServicesDeleteFuture) Result(client ContainerServicesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.ContainerServicesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// ContainerServiceServicePrincipalProfile information about a service principal identity for the cluster to use +// for manipulating Azure APIs. +type ContainerServiceServicePrincipalProfile struct { + // ClientID - The ID for the service principal. + ClientID *string `json:"clientId,omitempty"` + // Secret - The secret password associated with the service principal. + Secret *string `json:"secret,omitempty"` +} + +// ContainerServiceSSHConfiguration SSH configuration for Linux-based VMs running on Azure. +type ContainerServiceSSHConfiguration struct { + // PublicKeys - the list of SSH public keys used to authenticate with Linux-based VMs. + PublicKeys *[]ContainerServiceSSHPublicKey `json:"publicKeys,omitempty"` +} + +// ContainerServiceSSHPublicKey contains information about SSH certificate public key data. +type ContainerServiceSSHPublicKey struct { + // KeyData - Certificate public key used to authenticate with VMs through SSH. The certificate must be in PEM format with or without headers. + KeyData *string `json:"keyData,omitempty"` +} + +// ContainerServiceVMDiagnostics profile for diagnostics on the container service VMs. +type ContainerServiceVMDiagnostics struct { + // Enabled - Whether the VM diagnostic agent is provisioned on the VM. + Enabled *bool `json:"enabled,omitempty"` + // StorageURI - The URI of the storage account where diagnostics are stored. + StorageURI *string `json:"storageUri,omitempty"` +} + +// ContainerServiceWindowsProfile profile for Windows VMs in the container service cluster. +type ContainerServiceWindowsProfile struct { + // AdminUsername - The administrator username to use for Windows VMs. + AdminUsername *string `json:"adminUsername,omitempty"` + // AdminPassword - The administrator password to use for Windows VMs. + AdminPassword *string `json:"adminPassword,omitempty"` +} + +// CreationData data used when creating a disk. +type CreationData struct { + // CreateOption - This enumerates the possible sources of a disk's creation. Possible values include: 'Empty', 'Attach', 'FromImage', 'Import', 'Copy', 'Restore' + CreateOption DiskCreateOption `json:"createOption,omitempty"` + // StorageAccountID - If createOption is Import, the Azure Resource Manager identifier of the storage account containing the blob to import as a disk. Required only if the blob is in a different subscription + StorageAccountID *string `json:"storageAccountId,omitempty"` + // ImageReference - Disk source information. + ImageReference *ImageDiskReference `json:"imageReference,omitempty"` + // SourceURI - If createOption is Import, this is the URI of a blob to be imported into a managed disk. + SourceURI *string `json:"sourceUri,omitempty"` + // SourceResourceID - If createOption is Copy, this is the ARM id of the source snapshot or disk. + SourceResourceID *string `json:"sourceResourceId,omitempty"` +} + +// DataDisk describes a data disk. +type DataDisk struct { + // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` + // Name - The disk name. + Name *string `json:"name,omitempty"` + // Vhd - The virtual hard disk. + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. + Image *VirtualHardDisk `json:"image,omitempty"` + // Caching - Specifies the caching requirements.

    Possible values are:

    **None**

    **ReadOnly**

    **ReadWrite**

    Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` + // CreateOption - Specifies how the virtual machine should be created.

    Possible values are:

    **Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

    **FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

    This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // ManagedDisk - The managed disk parameters. + ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` +} + +// DataDiskImage contains the data disk images information. +type DataDiskImage struct { + // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` +} + +// DiagnosticsProfile specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. +type DiagnosticsProfile struct { + // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

    For Linux Virtual Machines, you can easily view the output of your console log.

    For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from the hypervisor. + BootDiagnostics *BootDiagnostics `json:"bootDiagnostics,omitempty"` +} + +// DiffDiskSettings describes the parameters of differencing disk settings that can be be specified for operating +// system disk.

    NOTE: The differencing disk settings can only be specified for managed disk. +type DiffDiskSettings struct { + // Option - Specifies the differencing disk settings for operating system disk. Possible values include: 'Local' + Option DiffDiskOptions `json:"option,omitempty"` +} + +// Disallowed describes the disallowed disk types. +type Disallowed struct { + // DiskTypes - A list of disk types. + DiskTypes *[]string `json:"diskTypes,omitempty"` +} + +// Disk disk resource. +type Disk struct { + autorest.Response `json:"-"` + // ManagedBy - A relative URI containing the ID of the VM that has the disk attached. + ManagedBy *string `json:"managedBy,omitempty"` + Sku *DiskSku `json:"sku,omitempty"` + // Zones - The Logical zone list for Disk. + Zones *[]string `json:"zones,omitempty"` + *DiskProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Disk. +func (d Disk) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if d.ManagedBy != nil { + objectMap["managedBy"] = d.ManagedBy + } + if d.Sku != nil { + objectMap["sku"] = d.Sku + } + if d.Zones != nil { + objectMap["zones"] = d.Zones + } + if d.DiskProperties != nil { + objectMap["properties"] = d.DiskProperties + } + if d.ID != nil { + objectMap["id"] = d.ID + } + if d.Name != nil { + objectMap["name"] = d.Name + } + if d.Type != nil { + objectMap["type"] = d.Type + } + if d.Location != nil { + objectMap["location"] = d.Location + } + if d.Tags != nil { + objectMap["tags"] = d.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Disk struct. +func (d *Disk) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "managedBy": + if v != nil { + var managedBy string + err = json.Unmarshal(*v, &managedBy) + if err != nil { + return err + } + d.ManagedBy = &managedBy + } + case "sku": + if v != nil { + var sku DiskSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + d.Sku = &sku + } + case "zones": + if v != nil { + var zones []string + err = json.Unmarshal(*v, &zones) + if err != nil { + return err + } + d.Zones = &zones + } + case "properties": + if v != nil { + var diskProperties DiskProperties + err = json.Unmarshal(*v, &diskProperties) + if err != nil { + return err + } + d.DiskProperties = &diskProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + d.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + d.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + d.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + d.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + d.Tags = tags + } + } + } + + return nil +} + +// DiskEncryptionSettings describes a Encryption Settings for a Disk +type DiskEncryptionSettings struct { + // DiskEncryptionKey - Specifies the location of the disk encryption key, which is a Key Vault Secret. + DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"` + // KeyEncryptionKey - Specifies the location of the key encryption key in Key Vault. + KeyEncryptionKey *KeyVaultKeyReference `json:"keyEncryptionKey,omitempty"` + // Enabled - Specifies whether disk encryption should be enabled on the virtual machine. + Enabled *bool `json:"enabled,omitempty"` +} + +// DiskInstanceView the instance view of the disk. +type DiskInstanceView struct { + // Name - The disk name. + Name *string `json:"name,omitempty"` + // EncryptionSettings - Specifies the encryption settings for the OS Disk.

    Minimum api-version: 2015-06-15 + EncryptionSettings *[]DiskEncryptionSettings `json:"encryptionSettings,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// DiskList the List Disks operation response. +type DiskList struct { + autorest.Response `json:"-"` + // Value - A list of disks. + Value *[]Disk `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of disks. Call ListNext() with this to fetch the next page of disks. + NextLink *string `json:"nextLink,omitempty"` +} + +// DiskListIterator provides access to a complete listing of Disk values. +type DiskListIterator struct { + i int + page DiskListPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DiskListIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DiskListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DiskListIterator) Response() DiskList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DiskListIterator) Value() Disk { + if !iter.page.NotDone() { + return Disk{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (dl DiskList) IsEmpty() bool { + return dl.Value == nil || len(*dl.Value) == 0 +} + +// diskListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dl DiskList) diskListPreparer() (*http.Request, error) { + if dl.NextLink == nil || len(to.String(dl.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dl.NextLink))) +} + +// DiskListPage contains a page of Disk values. +type DiskListPage struct { + fn func(DiskList) (DiskList, error) + dl DiskList +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DiskListPage) Next() error { + next, err := page.fn(page.dl) + if err != nil { + return err + } + page.dl = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DiskListPage) NotDone() bool { + return !page.dl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DiskListPage) Response() DiskList { + return page.dl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DiskListPage) Values() []Disk { + if page.dl.IsEmpty() { + return nil + } + return *page.dl.Value +} + +// DiskProperties disk resource properties. +type DiskProperties struct { + // TimeCreated - The time when the disk was created. + TimeCreated *date.Time `json:"timeCreated,omitempty"` + // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created. + CreationData *CreationData `json:"creationData,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // EncryptionSettings - Encryption settings for disk or snapshot + EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` + // ProvisioningState - The disk provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` + // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes. + DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` + // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. + DiskMBpsReadWrite *int32 `json:"diskMBpsReadWrite,omitempty"` +} + +// DisksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksCreateOrUpdateFuture) Result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.CreateOrUpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + +// DisksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksDeleteFuture) Result(client DisksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// DisksGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksGrantAccessFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksGrantAccessFuture) Result(client DisksClient) (au AccessURI, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksGrantAccessFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { + au, err = client.GrantAccessResponder(au.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskSku the disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS. +type DiskSku struct { + // Name - The sku name. Possible values include: 'StandardLRS', 'PremiumLRS', 'StandardSSDLRS', 'UltraSSDLRS' + Name DiskStorageAccountTypes `json:"name,omitempty"` + // Tier - The sku tier. + Tier *string `json:"tier,omitempty"` +} + +// DisksRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksRevokeAccessFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksRevokeAccessFuture) Result(client DisksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksRevokeAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksRevokeAccessFuture") + return + } + ar.Response = future.Response() + return +} + +// DisksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksUpdateFuture) Result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.UpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskUpdate disk update resource. +type DiskUpdate struct { + *DiskUpdateProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + Sku *DiskSku `json:"sku,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskUpdate. +func (du DiskUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if du.DiskUpdateProperties != nil { + objectMap["properties"] = du.DiskUpdateProperties + } + if du.Tags != nil { + objectMap["tags"] = du.Tags + } + if du.Sku != nil { + objectMap["sku"] = du.Sku + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DiskUpdate struct. +func (du *DiskUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var diskUpdateProperties DiskUpdateProperties + err = json.Unmarshal(*v, &diskUpdateProperties) + if err != nil { + return err + } + du.DiskUpdateProperties = &diskUpdateProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + du.Tags = tags + } + case "sku": + if v != nil { + var sku DiskSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + du.Sku = &sku + } + } + } + + return nil +} + +// DiskUpdateProperties disk resource update properties. +type DiskUpdateProperties struct { + // OsType - the Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // EncryptionSettings - Encryption settings for disk or snapshot + EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` + // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes. + DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` + // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. + DiskMBpsReadWrite *int32 `json:"diskMBpsReadWrite,omitempty"` +} + +// EncryptionSettings encryption settings for disk or snapshot +type EncryptionSettings struct { + // Enabled - Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged. + Enabled *bool `json:"enabled,omitempty"` + // DiskEncryptionKey - Key Vault Secret Url and vault id of the disk encryption key + DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"` + // KeyEncryptionKey - Key Vault Key Url and vault id of the key encryption key + KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"` +} + +// GalleriesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type GalleriesCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleriesCreateOrUpdateFuture) Result(client GalleriesClient) (g Gallery, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleriesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent { + g, err = client.CreateOrUpdateResponder(g.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", g.Response.Response, "Failure responding to request") + } + } + return +} + +// GalleriesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type GalleriesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleriesDeleteFuture) Result(client GalleriesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleriesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// Gallery specifies information about the Shared Image Gallery that you want to create or update. +type Gallery struct { + autorest.Response `json:"-"` + *GalleryProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Gallery. +func (g Gallery) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if g.GalleryProperties != nil { + objectMap["properties"] = g.GalleryProperties + } + if g.ID != nil { + objectMap["id"] = g.ID + } + if g.Name != nil { + objectMap["name"] = g.Name + } + if g.Type != nil { + objectMap["type"] = g.Type + } + if g.Location != nil { + objectMap["location"] = g.Location + } + if g.Tags != nil { + objectMap["tags"] = g.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Gallery struct. +func (g *Gallery) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var galleryProperties GalleryProperties + err = json.Unmarshal(*v, &galleryProperties) + if err != nil { + return err + } + g.GalleryProperties = &galleryProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + g.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + g.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + g.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + g.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + g.Tags = tags + } + } + } + + return nil +} + +// GalleryArtifactPublishingProfileBase describes the basic gallery artifact publishing profile. +type GalleryArtifactPublishingProfileBase struct { + // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updateable. + TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"` + Source *GalleryArtifactSource `json:"source,omitempty"` +} + +// GalleryArtifactSource the source image from which the Image Version is going to be created. +type GalleryArtifactSource struct { + ManagedImage *ManagedArtifact `json:"managedImage,omitempty"` +} + +// GalleryDataDiskImage this is the data disk image. +type GalleryDataDiskImage struct { + // Lun - This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine. + Lun *int32 `json:"lun,omitempty"` + // SizeInGB - This property indicates the size of the VHD to be created. + SizeInGB *int32 `json:"sizeInGB,omitempty"` + // HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'. Possible values include: 'HostCachingNone', 'HostCachingReadOnly', 'HostCachingReadWrite' + HostCaching HostCaching `json:"hostCaching,omitempty"` +} + +// GalleryDiskImage this is the disk image base class. +type GalleryDiskImage struct { + // SizeInGB - This property indicates the size of the VHD to be created. + SizeInGB *int32 `json:"sizeInGB,omitempty"` + // HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'. Possible values include: 'HostCachingNone', 'HostCachingReadOnly', 'HostCachingReadWrite' + HostCaching HostCaching `json:"hostCaching,omitempty"` +} + +// GalleryIdentifier describes the gallery unique name. +type GalleryIdentifier struct { + // UniqueName - The unique name of the Shared Image Gallery. This name is generated automatically by Azure. + UniqueName *string `json:"uniqueName,omitempty"` +} + +// GalleryImage specifies information about the gallery Image Definition that you want to create or update. +type GalleryImage struct { + autorest.Response `json:"-"` + *GalleryImageProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for GalleryImage. +func (gi GalleryImage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if gi.GalleryImageProperties != nil { + objectMap["properties"] = gi.GalleryImageProperties + } + if gi.ID != nil { + objectMap["id"] = gi.ID + } + if gi.Name != nil { + objectMap["name"] = gi.Name + } + if gi.Type != nil { + objectMap["type"] = gi.Type + } + if gi.Location != nil { + objectMap["location"] = gi.Location + } + if gi.Tags != nil { + objectMap["tags"] = gi.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for GalleryImage struct. +func (gi *GalleryImage) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var galleryImageProperties GalleryImageProperties + err = json.Unmarshal(*v, &galleryImageProperties) + if err != nil { + return err + } + gi.GalleryImageProperties = &galleryImageProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + gi.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + gi.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + gi.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + gi.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + gi.Tags = tags + } + } + } + + return nil +} + +// GalleryImageIdentifier this is the gallery Image Definition identifier. +type GalleryImageIdentifier struct { + // Publisher - The name of the gallery Image Definition publisher. + Publisher *string `json:"publisher,omitempty"` + // Offer - The name of the gallery Image Definition offer. + Offer *string `json:"offer,omitempty"` + // Sku - The name of the gallery Image Definition SKU. + Sku *string `json:"sku,omitempty"` +} + +// GalleryImageList the List Gallery Images operation response. +type GalleryImageList struct { + autorest.Response `json:"-"` + // Value - A list of Shared Image Gallery images. + Value *[]GalleryImage `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Image Definitions in the Shared Image Gallery. Call ListNext() with this to fetch the next page of gallery Image Definitions. + NextLink *string `json:"nextLink,omitempty"` +} + +// GalleryImageListIterator provides access to a complete listing of GalleryImage values. +type GalleryImageListIterator struct { + i int + page GalleryImageListPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *GalleryImageListIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter GalleryImageListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter GalleryImageListIterator) Response() GalleryImageList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter GalleryImageListIterator) Value() GalleryImage { + if !iter.page.NotDone() { + return GalleryImage{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (gil GalleryImageList) IsEmpty() bool { + return gil.Value == nil || len(*gil.Value) == 0 +} + +// galleryImageListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (gil GalleryImageList) galleryImageListPreparer() (*http.Request, error) { + if gil.NextLink == nil || len(to.String(gil.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(gil.NextLink))) +} + +// GalleryImageListPage contains a page of GalleryImage values. +type GalleryImageListPage struct { + fn func(GalleryImageList) (GalleryImageList, error) + gil GalleryImageList +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *GalleryImageListPage) Next() error { + next, err := page.fn(page.gil) + if err != nil { + return err + } + page.gil = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page GalleryImageListPage) NotDone() bool { + return !page.gil.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page GalleryImageListPage) Response() GalleryImageList { + return page.gil +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page GalleryImageListPage) Values() []GalleryImage { + if page.gil.IsEmpty() { + return nil + } + return *page.gil.Value +} + +// GalleryImageProperties describes the properties of a gallery Image Definition. +type GalleryImageProperties struct { + // Description - The description of this gallery Image Definition resource. This property is updateable. + Description *string `json:"description,omitempty"` + // Eula - The Eula agreement for the gallery Image Definition. + Eula *string `json:"eula,omitempty"` + // PrivacyStatementURI - The privacy statement uri. + PrivacyStatementURI *string `json:"privacyStatementUri,omitempty"` + // ReleaseNoteURI - The release note uri. + ReleaseNoteURI *string `json:"releaseNoteUri,omitempty"` + // OsType - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.

    Possible values are:

    **Windows**

    **Linux**. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // OsState - The allowed values for OS State are 'Generalized'. Possible values include: 'Generalized', 'Specialized' + OsState OperatingSystemStateTypes `json:"osState,omitempty"` + // EndOfLifeDate - The end of life date of the gallery Image Definition. This property can be used for decommissioning purposes. This property is updateable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` + Identifier *GalleryImageIdentifier `json:"identifier,omitempty"` + Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"` + Disallowed *Disallowed `json:"disallowed,omitempty"` + PurchasePlan *ImagePurchasePlan `json:"purchasePlan,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState1Creating', 'ProvisioningState1Updating', 'ProvisioningState1Failed', 'ProvisioningState1Succeeded', 'ProvisioningState1Deleting', 'ProvisioningState1Migrating' + ProvisioningState ProvisioningState1 `json:"provisioningState,omitempty"` +} + +// GalleryImagesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type GalleryImagesCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleryImagesCreateOrUpdateFuture) Result(client GalleryImagesClient) (gi GalleryImage, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gi.Response.Response, err = future.GetResult(sender); err == nil && gi.Response.Response.StatusCode != http.StatusNoContent { + gi, err = client.CreateOrUpdateResponder(gi.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", gi.Response.Response, "Failure responding to request") + } + } + return +} + +// GalleryImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type GalleryImagesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleryImagesDeleteFuture) Result(client GalleryImagesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// GalleryImageVersion specifies information about the gallery Image Version that you want to create or update. +type GalleryImageVersion struct { + autorest.Response `json:"-"` + *GalleryImageVersionProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for GalleryImageVersion. +func (giv GalleryImageVersion) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if giv.GalleryImageVersionProperties != nil { + objectMap["properties"] = giv.GalleryImageVersionProperties + } + if giv.ID != nil { + objectMap["id"] = giv.ID + } + if giv.Name != nil { + objectMap["name"] = giv.Name + } + if giv.Type != nil { + objectMap["type"] = giv.Type + } + if giv.Location != nil { + objectMap["location"] = giv.Location + } + if giv.Tags != nil { + objectMap["tags"] = giv.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for GalleryImageVersion struct. +func (giv *GalleryImageVersion) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var galleryImageVersionProperties GalleryImageVersionProperties + err = json.Unmarshal(*v, &galleryImageVersionProperties) + if err != nil { + return err + } + giv.GalleryImageVersionProperties = &galleryImageVersionProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + giv.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + giv.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + giv.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + giv.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + giv.Tags = tags + } + } + } + + return nil +} + +// GalleryImageVersionList the List Gallery Image version operation response. +type GalleryImageVersionList struct { + autorest.Response `json:"-"` + // Value - A list of gallery Image Versions. + Value *[]GalleryImageVersion `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of gallery Image Versions. Call ListNext() with this to fetch the next page of gallery Image Versions. + NextLink *string `json:"nextLink,omitempty"` +} + +// GalleryImageVersionListIterator provides access to a complete listing of GalleryImageVersion values. +type GalleryImageVersionListIterator struct { + i int + page GalleryImageVersionListPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *GalleryImageVersionListIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter GalleryImageVersionListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter GalleryImageVersionListIterator) Response() GalleryImageVersionList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter GalleryImageVersionListIterator) Value() GalleryImageVersion { + if !iter.page.NotDone() { + return GalleryImageVersion{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (givl GalleryImageVersionList) IsEmpty() bool { + return givl.Value == nil || len(*givl.Value) == 0 +} + +// galleryImageVersionListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (givl GalleryImageVersionList) galleryImageVersionListPreparer() (*http.Request, error) { + if givl.NextLink == nil || len(to.String(givl.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(givl.NextLink))) +} + +// GalleryImageVersionListPage contains a page of GalleryImageVersion values. +type GalleryImageVersionListPage struct { + fn func(GalleryImageVersionList) (GalleryImageVersionList, error) + givl GalleryImageVersionList +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *GalleryImageVersionListPage) Next() error { + next, err := page.fn(page.givl) + if err != nil { + return err + } + page.givl = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page GalleryImageVersionListPage) NotDone() bool { + return !page.givl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page GalleryImageVersionListPage) Response() GalleryImageVersionList { + return page.givl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page GalleryImageVersionListPage) Values() []GalleryImageVersion { + if page.givl.IsEmpty() { + return nil + } + return *page.givl.Value +} + +// GalleryImageVersionProperties describes the properties of a gallery Image Version. +type GalleryImageVersionProperties struct { + PublishingProfile *GalleryImageVersionPublishingProfile `json:"publishingProfile,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState2Creating', 'ProvisioningState2Updating', 'ProvisioningState2Failed', 'ProvisioningState2Succeeded', 'ProvisioningState2Deleting', 'ProvisioningState2Migrating' + ProvisioningState ProvisioningState2 `json:"provisioningState,omitempty"` + StorageProfile *GalleryImageVersionStorageProfile `json:"storageProfile,omitempty"` + ReplicationStatus *ReplicationStatus `json:"replicationStatus,omitempty"` +} + +// GalleryImageVersionPublishingProfile the publishing profile of a gallery Image Version. +type GalleryImageVersionPublishingProfile struct { + // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updateable. + ReplicaCount *int32 `json:"replicaCount,omitempty"` + // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"` + // PublishedDate - The timestamp for when the gallery Image Version is published. + PublishedDate *date.Time `json:"publishedDate,omitempty"` + // EndOfLifeDate - The end of life date of the gallery Image Version. This property can be used for decommissioning purposes. This property is updateable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` + // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updateable. + TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"` + Source *GalleryArtifactSource `json:"source,omitempty"` +} + +// GalleryImageVersionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryImageVersionsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleryImageVersionsCreateOrUpdateFuture) Result(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if giv.Response.Response, err = future.GetResult(sender); err == nil && giv.Response.Response.StatusCode != http.StatusNoContent { + giv, err = client.CreateOrUpdateResponder(giv.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", giv.Response.Response, "Failure responding to request") + } + } + return +} + +// GalleryImageVersionsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type GalleryImageVersionsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleryImageVersionsDeleteFuture) Result(client GalleryImageVersionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// GalleryImageVersionStorageProfile this is the storage profile of a gallery Image Version. +type GalleryImageVersionStorageProfile struct { + OsDiskImage *GalleryOSDiskImage `json:"osDiskImage,omitempty"` + // DataDiskImages - A list of data disk images. + DataDiskImages *[]GalleryDataDiskImage `json:"dataDiskImages,omitempty"` +} + +// GalleryList the List Galleries operation response. +type GalleryList struct { + autorest.Response `json:"-"` + // Value - A list of galleries. + Value *[]Gallery `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of galleries. Call ListNext() with this to fetch the next page of galleries. + NextLink *string `json:"nextLink,omitempty"` +} + +// GalleryListIterator provides access to a complete listing of Gallery values. +type GalleryListIterator struct { + i int + page GalleryListPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *GalleryListIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter GalleryListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter GalleryListIterator) Response() GalleryList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter GalleryListIterator) Value() Gallery { + if !iter.page.NotDone() { + return Gallery{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (gl GalleryList) IsEmpty() bool { + return gl.Value == nil || len(*gl.Value) == 0 +} + +// galleryListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (gl GalleryList) galleryListPreparer() (*http.Request, error) { + if gl.NextLink == nil || len(to.String(gl.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(gl.NextLink))) +} + +// GalleryListPage contains a page of Gallery values. +type GalleryListPage struct { + fn func(GalleryList) (GalleryList, error) + gl GalleryList +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *GalleryListPage) Next() error { + next, err := page.fn(page.gl) + if err != nil { + return err + } + page.gl = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page GalleryListPage) NotDone() bool { + return !page.gl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page GalleryListPage) Response() GalleryList { + return page.gl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page GalleryListPage) Values() []Gallery { + if page.gl.IsEmpty() { + return nil + } + return *page.gl.Value +} + +// GalleryOSDiskImage this is the OS disk image. +type GalleryOSDiskImage struct { + // SizeInGB - This property indicates the size of the VHD to be created. + SizeInGB *int32 `json:"sizeInGB,omitempty"` + // HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'. Possible values include: 'HostCachingNone', 'HostCachingReadOnly', 'HostCachingReadWrite' + HostCaching HostCaching `json:"hostCaching,omitempty"` +} + +// GalleryProperties describes the properties of a Shared Image Gallery. +type GalleryProperties struct { + // Description - The description of this Shared Image Gallery resource. This property is updateable. + Description *string `json:"description,omitempty"` + Identifier *GalleryIdentifier `json:"identifier,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. Possible values include: 'ProvisioningStateCreating', 'ProvisioningStateUpdating', 'ProvisioningStateFailed', 'ProvisioningStateSucceeded', 'ProvisioningStateDeleting', 'ProvisioningStateMigrating' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` +} + +// GrantAccessData data used for requesting a SAS. +type GrantAccessData struct { + // Access - Possible values include: 'None', 'Read' + Access AccessLevel `json:"access,omitempty"` + // DurationInSeconds - Time duration in seconds until the SAS access expires. + DurationInSeconds *int32 `json:"durationInSeconds,omitempty"` +} + +// HardwareProfile specifies the hardware settings for the virtual machine. +type HardwareProfile struct { + // VMSize - Specifies the size of the virtual machine. For more information about virtual machine sizes, see [Sizes for virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-sizes?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    The available VM sizes depend on region and availability set. For a list of available sizes use these APIs:

    [List all available virtual machine sizes in an availability set](https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes)

    [List all available virtual machine sizes in a region](https://docs.microsoft.com/rest/api/compute/virtualmachinesizes/list)

    [List all available virtual machine sizes for resizing](https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes). Possible values include: 'VirtualMachineSizeTypesBasicA0', 'VirtualMachineSizeTypesBasicA1', 'VirtualMachineSizeTypesBasicA2', 'VirtualMachineSizeTypesBasicA3', 'VirtualMachineSizeTypesBasicA4', 'VirtualMachineSizeTypesStandardA0', 'VirtualMachineSizeTypesStandardA1', 'VirtualMachineSizeTypesStandardA2', 'VirtualMachineSizeTypesStandardA3', 'VirtualMachineSizeTypesStandardA4', 'VirtualMachineSizeTypesStandardA5', 'VirtualMachineSizeTypesStandardA6', 'VirtualMachineSizeTypesStandardA7', 'VirtualMachineSizeTypesStandardA8', 'VirtualMachineSizeTypesStandardA9', 'VirtualMachineSizeTypesStandardA10', 'VirtualMachineSizeTypesStandardA11', 'VirtualMachineSizeTypesStandardA1V2', 'VirtualMachineSizeTypesStandardA2V2', 'VirtualMachineSizeTypesStandardA4V2', 'VirtualMachineSizeTypesStandardA8V2', 'VirtualMachineSizeTypesStandardA2mV2', 'VirtualMachineSizeTypesStandardA4mV2', 'VirtualMachineSizeTypesStandardA8mV2', 'VirtualMachineSizeTypesStandardB1s', 'VirtualMachineSizeTypesStandardB1ms', 'VirtualMachineSizeTypesStandardB2s', 'VirtualMachineSizeTypesStandardB2ms', 'VirtualMachineSizeTypesStandardB4ms', 'VirtualMachineSizeTypesStandardB8ms', 'VirtualMachineSizeTypesStandardD1', 'VirtualMachineSizeTypesStandardD2', 'VirtualMachineSizeTypesStandardD3', 'VirtualMachineSizeTypesStandardD4', 'VirtualMachineSizeTypesStandardD11', 'VirtualMachineSizeTypesStandardD12', 'VirtualMachineSizeTypesStandardD13', 'VirtualMachineSizeTypesStandardD14', 'VirtualMachineSizeTypesStandardD1V2', 'VirtualMachineSizeTypesStandardD2V2', 'VirtualMachineSizeTypesStandardD3V2', 'VirtualMachineSizeTypesStandardD4V2', 'VirtualMachineSizeTypesStandardD5V2', 'VirtualMachineSizeTypesStandardD2V3', 'VirtualMachineSizeTypesStandardD4V3', 'VirtualMachineSizeTypesStandardD8V3', 'VirtualMachineSizeTypesStandardD16V3', 'VirtualMachineSizeTypesStandardD32V3', 'VirtualMachineSizeTypesStandardD64V3', 'VirtualMachineSizeTypesStandardD2sV3', 'VirtualMachineSizeTypesStandardD4sV3', 'VirtualMachineSizeTypesStandardD8sV3', 'VirtualMachineSizeTypesStandardD16sV3', 'VirtualMachineSizeTypesStandardD32sV3', 'VirtualMachineSizeTypesStandardD64sV3', 'VirtualMachineSizeTypesStandardD11V2', 'VirtualMachineSizeTypesStandardD12V2', 'VirtualMachineSizeTypesStandardD13V2', 'VirtualMachineSizeTypesStandardD14V2', 'VirtualMachineSizeTypesStandardD15V2', 'VirtualMachineSizeTypesStandardDS1', 'VirtualMachineSizeTypesStandardDS2', 'VirtualMachineSizeTypesStandardDS3', 'VirtualMachineSizeTypesStandardDS4', 'VirtualMachineSizeTypesStandardDS11', 'VirtualMachineSizeTypesStandardDS12', 'VirtualMachineSizeTypesStandardDS13', 'VirtualMachineSizeTypesStandardDS14', 'VirtualMachineSizeTypesStandardDS1V2', 'VirtualMachineSizeTypesStandardDS2V2', 'VirtualMachineSizeTypesStandardDS3V2', 'VirtualMachineSizeTypesStandardDS4V2', 'VirtualMachineSizeTypesStandardDS5V2', 'VirtualMachineSizeTypesStandardDS11V2', 'VirtualMachineSizeTypesStandardDS12V2', 'VirtualMachineSizeTypesStandardDS13V2', 'VirtualMachineSizeTypesStandardDS14V2', 'VirtualMachineSizeTypesStandardDS15V2', 'VirtualMachineSizeTypesStandardDS134V2', 'VirtualMachineSizeTypesStandardDS132V2', 'VirtualMachineSizeTypesStandardDS148V2', 'VirtualMachineSizeTypesStandardDS144V2', 'VirtualMachineSizeTypesStandardE2V3', 'VirtualMachineSizeTypesStandardE4V3', 'VirtualMachineSizeTypesStandardE8V3', 'VirtualMachineSizeTypesStandardE16V3', 'VirtualMachineSizeTypesStandardE32V3', 'VirtualMachineSizeTypesStandardE64V3', 'VirtualMachineSizeTypesStandardE2sV3', 'VirtualMachineSizeTypesStandardE4sV3', 'VirtualMachineSizeTypesStandardE8sV3', 'VirtualMachineSizeTypesStandardE16sV3', 'VirtualMachineSizeTypesStandardE32sV3', 'VirtualMachineSizeTypesStandardE64sV3', 'VirtualMachineSizeTypesStandardE3216V3', 'VirtualMachineSizeTypesStandardE328sV3', 'VirtualMachineSizeTypesStandardE6432sV3', 'VirtualMachineSizeTypesStandardE6416sV3', 'VirtualMachineSizeTypesStandardF1', 'VirtualMachineSizeTypesStandardF2', 'VirtualMachineSizeTypesStandardF4', 'VirtualMachineSizeTypesStandardF8', 'VirtualMachineSizeTypesStandardF16', 'VirtualMachineSizeTypesStandardF1s', 'VirtualMachineSizeTypesStandardF2s', 'VirtualMachineSizeTypesStandardF4s', 'VirtualMachineSizeTypesStandardF8s', 'VirtualMachineSizeTypesStandardF16s', 'VirtualMachineSizeTypesStandardF2sV2', 'VirtualMachineSizeTypesStandardF4sV2', 'VirtualMachineSizeTypesStandardF8sV2', 'VirtualMachineSizeTypesStandardF16sV2', 'VirtualMachineSizeTypesStandardF32sV2', 'VirtualMachineSizeTypesStandardF64sV2', 'VirtualMachineSizeTypesStandardF72sV2', 'VirtualMachineSizeTypesStandardG1', 'VirtualMachineSizeTypesStandardG2', 'VirtualMachineSizeTypesStandardG3', 'VirtualMachineSizeTypesStandardG4', 'VirtualMachineSizeTypesStandardG5', 'VirtualMachineSizeTypesStandardGS1', 'VirtualMachineSizeTypesStandardGS2', 'VirtualMachineSizeTypesStandardGS3', 'VirtualMachineSizeTypesStandardGS4', 'VirtualMachineSizeTypesStandardGS5', 'VirtualMachineSizeTypesStandardGS48', 'VirtualMachineSizeTypesStandardGS44', 'VirtualMachineSizeTypesStandardGS516', 'VirtualMachineSizeTypesStandardGS58', 'VirtualMachineSizeTypesStandardH8', 'VirtualMachineSizeTypesStandardH16', 'VirtualMachineSizeTypesStandardH8m', 'VirtualMachineSizeTypesStandardH16m', 'VirtualMachineSizeTypesStandardH16r', 'VirtualMachineSizeTypesStandardH16mr', 'VirtualMachineSizeTypesStandardL4s', 'VirtualMachineSizeTypesStandardL8s', 'VirtualMachineSizeTypesStandardL16s', 'VirtualMachineSizeTypesStandardL32s', 'VirtualMachineSizeTypesStandardM64s', 'VirtualMachineSizeTypesStandardM64ms', 'VirtualMachineSizeTypesStandardM128s', 'VirtualMachineSizeTypesStandardM128ms', 'VirtualMachineSizeTypesStandardM6432ms', 'VirtualMachineSizeTypesStandardM6416ms', 'VirtualMachineSizeTypesStandardM12864ms', 'VirtualMachineSizeTypesStandardM12832ms', 'VirtualMachineSizeTypesStandardNC6', 'VirtualMachineSizeTypesStandardNC12', 'VirtualMachineSizeTypesStandardNC24', 'VirtualMachineSizeTypesStandardNC24r', 'VirtualMachineSizeTypesStandardNC6sV2', 'VirtualMachineSizeTypesStandardNC12sV2', 'VirtualMachineSizeTypesStandardNC24sV2', 'VirtualMachineSizeTypesStandardNC24rsV2', 'VirtualMachineSizeTypesStandardNC6sV3', 'VirtualMachineSizeTypesStandardNC12sV3', 'VirtualMachineSizeTypesStandardNC24sV3', 'VirtualMachineSizeTypesStandardNC24rsV3', 'VirtualMachineSizeTypesStandardND6s', 'VirtualMachineSizeTypesStandardND12s', 'VirtualMachineSizeTypesStandardND24s', 'VirtualMachineSizeTypesStandardND24rs', 'VirtualMachineSizeTypesStandardNV6', 'VirtualMachineSizeTypesStandardNV12', 'VirtualMachineSizeTypesStandardNV24' + VMSize VirtualMachineSizeTypes `json:"vmSize,omitempty"` +} + +// Image the source user image virtual hard disk. The virtual hard disk will be copied before being attached to the +// virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. +type Image struct { + autorest.Response `json:"-"` + *ImageProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Image. +func (i Image) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if i.ImageProperties != nil { + objectMap["properties"] = i.ImageProperties + } + if i.ID != nil { + objectMap["id"] = i.ID + } + if i.Name != nil { + objectMap["name"] = i.Name + } + if i.Type != nil { + objectMap["type"] = i.Type + } + if i.Location != nil { + objectMap["location"] = i.Location + } + if i.Tags != nil { + objectMap["tags"] = i.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Image struct. +func (i *Image) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var imageProperties ImageProperties + err = json.Unmarshal(*v, &imageProperties) + if err != nil { + return err + } + i.ImageProperties = &imageProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + i.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + i.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + i.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + i.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + i.Tags = tags + } + } + } + + return nil +} + +// ImageDataDisk describes a data disk. +type ImageDataDisk struct { + // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` + // Snapshot - The snapshot. + Snapshot *SubResource `json:"snapshot,omitempty"` + // ManagedDisk - The managedDisk. + ManagedDisk *SubResource `json:"managedDisk,omitempty"` + // BlobURI - The Virtual Hard Disk. + BlobURI *string `json:"blobUri,omitempty"` + // Caching - Specifies the caching requirements.

    Possible values are:

    **None**

    **ReadOnly**

    **ReadWrite**

    Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

    This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS' + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` +} + +// ImageDiskReference the source image used for creating the disk. +type ImageDiskReference struct { + // ID - A relative uri containing either a Platform Imgage Repository or user image reference. + ID *string `json:"id,omitempty"` + // Lun - If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null. + Lun *int32 `json:"lun,omitempty"` +} + +// ImageListResult the List Image operation response. +type ImageListResult struct { + autorest.Response `json:"-"` + // Value - The list of Images. + Value *[]Image `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Images. Call ListNext() with this to fetch the next page of Images. + NextLink *string `json:"nextLink,omitempty"` +} + +// ImageListResultIterator provides access to a complete listing of Image values. +type ImageListResultIterator struct { + i int + page ImageListResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ImageListResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ImageListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ImageListResultIterator) Response() ImageListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ImageListResultIterator) Value() Image { + if !iter.page.NotDone() { + return Image{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (ilr ImageListResult) IsEmpty() bool { + return ilr.Value == nil || len(*ilr.Value) == 0 +} + +// imageListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (ilr ImageListResult) imageListResultPreparer() (*http.Request, error) { + if ilr.NextLink == nil || len(to.String(ilr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(ilr.NextLink))) +} + +// ImageListResultPage contains a page of Image values. +type ImageListResultPage struct { + fn func(ImageListResult) (ImageListResult, error) + ilr ImageListResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ImageListResultPage) Next() error { + next, err := page.fn(page.ilr) + if err != nil { + return err + } + page.ilr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ImageListResultPage) NotDone() bool { + return !page.ilr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ImageListResultPage) Response() ImageListResult { + return page.ilr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ImageListResultPage) Values() []Image { + if page.ilr.IsEmpty() { + return nil + } + return *page.ilr.Value +} + +// ImageOSDisk describes an Operating System disk. +type ImageOSDisk struct { + // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from a custom image.

    Possible values are:

    **Windows**

    **Linux**. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // OsState - The OS State. Possible values include: 'Generalized', 'Specialized' + OsState OperatingSystemStateTypes `json:"osState,omitempty"` + // Snapshot - The snapshot. + Snapshot *SubResource `json:"snapshot,omitempty"` + // ManagedDisk - The managedDisk. + ManagedDisk *SubResource `json:"managedDisk,omitempty"` + // BlobURI - The Virtual Hard Disk. + BlobURI *string `json:"blobUri,omitempty"` + // Caching - Specifies the caching requirements.

    Possible values are:

    **None**

    **ReadOnly**

    **ReadWrite**

    Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

    This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // StorageAccountType - Specifies the storage account type for the managed disk. UltraSSD_LRS cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS' + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` +} + +// ImageProperties describes the properties of an Image. +type ImageProperties struct { + // SourceVirtualMachine - The source virtual machine from which Image is created. + SourceVirtualMachine *SubResource `json:"sourceVirtualMachine,omitempty"` + // StorageProfile - Specifies the storage settings for the virtual machine disks. + StorageProfile *ImageStorageProfile `json:"storageProfile,omitempty"` + // ProvisioningState - The provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ImagePurchasePlan describes the gallery Image Definition purchase plan. This is used by marketplace images. +type ImagePurchasePlan struct { + // Name - The plan ID. + Name *string `json:"name,omitempty"` + // Publisher - The publisher ID. + Publisher *string `json:"publisher,omitempty"` + // Product - The product ID. + Product *string `json:"product,omitempty"` +} + +// ImageReference specifies information about the image to use. You can specify information about platform images, +// marketplace images, or virtual machine images. This element is required when you want to use a platform image, +// marketplace image, or virtual machine image, but is not used in other creation operations. +type ImageReference struct { + // Publisher - The image publisher. + Publisher *string `json:"publisher,omitempty"` + // Offer - Specifies the offer of the platform image or marketplace image used to create the virtual machine. + Offer *string `json:"offer,omitempty"` + // Sku - The image SKU. + Sku *string `json:"sku,omitempty"` + // Version - Specifies the version of the platform image or marketplace image used to create the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available. + Version *string `json:"version,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// ImagesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type ImagesCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ImagesCreateOrUpdateFuture) Result(client ImagesClient) (i Image, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.ImagesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { + i, err = client.CreateOrUpdateResponder(i.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesCreateOrUpdateFuture", "Result", i.Response.Response, "Failure responding to request") + } + } + return +} + +// ImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type ImagesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ImagesDeleteFuture) Result(client ImagesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.ImagesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// ImageStorageProfile describes a storage profile. +type ImageStorageProfile struct { + // OsDisk - Specifies information about the operating system disk used by the virtual machine.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + OsDisk *ImageOSDisk `json:"osDisk,omitempty"` + // DataDisks - Specifies the parameters that are used to add a data disk to a virtual machine.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + DataDisks *[]ImageDataDisk `json:"dataDisks,omitempty"` + // ZoneResilient - Specifies whether an image is zone resilient or not. Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage (ZRS). + ZoneResilient *bool `json:"zoneResilient,omitempty"` +} + +// ImagesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type ImagesUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ImagesUpdateFuture) Result(client ImagesClient) (i Image, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.ImagesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { + i, err = client.UpdateResponder(i.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesUpdateFuture", "Result", i.Response.Response, "Failure responding to request") + } + } + return +} + +// ImageUpdate the source user image virtual hard disk. Only tags may be updated. +type ImageUpdate struct { + *ImageProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for ImageUpdate. +func (iu ImageUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if iu.ImageProperties != nil { + objectMap["properties"] = iu.ImageProperties + } + if iu.Tags != nil { + objectMap["tags"] = iu.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ImageUpdate struct. +func (iu *ImageUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var imageProperties ImageProperties + err = json.Unmarshal(*v, &imageProperties) + if err != nil { + return err + } + iu.ImageProperties = &imageProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + iu.Tags = tags + } + } + } + + return nil +} + +// InnerError inner error details. +type InnerError struct { + // Exceptiontype - The exception type. + Exceptiontype *string `json:"exceptiontype,omitempty"` + // Errordetail - The internal error message or exception dump. + Errordetail *string `json:"errordetail,omitempty"` +} + +// InstanceViewStatus instance view status. +type InstanceViewStatus struct { + // Code - The status code. + Code *string `json:"code,omitempty"` + // Level - The level code. Possible values include: 'Info', 'Warning', 'Error' + Level StatusLevelTypes `json:"level,omitempty"` + // DisplayStatus - The short localizable label for the status. + DisplayStatus *string `json:"displayStatus,omitempty"` + // Message - The detailed status message, including for alerts and error messages. + Message *string `json:"message,omitempty"` + // Time - The time of the status. + Time *date.Time `json:"time,omitempty"` +} + +// KeyVaultAndKeyReference key Vault Key Url and vault id of KeK, KeK is optional and when provided is used to +// unwrap the encryptionKey +type KeyVaultAndKeyReference struct { + // SourceVault - Resource id of the KeyVault containing the key or secret + SourceVault *SourceVault `json:"sourceVault,omitempty"` + // KeyURL - Url pointing to a key or secret in KeyVault + KeyURL *string `json:"keyUrl,omitempty"` +} + +// KeyVaultAndSecretReference key Vault Secret Url and vault id of the encryption key +type KeyVaultAndSecretReference struct { + // SourceVault - Resource id of the KeyVault containing the key or secret + SourceVault *SourceVault `json:"sourceVault,omitempty"` + // SecretURL - Url pointing to a key or secret in KeyVault + SecretURL *string `json:"secretUrl,omitempty"` +} + +// KeyVaultKeyReference describes a reference to Key Vault Key +type KeyVaultKeyReference struct { + // KeyURL - The URL referencing a key encryption key in Key Vault. + KeyURL *string `json:"keyUrl,omitempty"` + // SourceVault - The relative URL of the Key Vault containing the key. + SourceVault *SubResource `json:"sourceVault,omitempty"` +} + +// KeyVaultSecretReference describes a reference to Key Vault Secret +type KeyVaultSecretReference struct { + // SecretURL - The URL referencing a secret in a Key Vault. + SecretURL *string `json:"secretUrl,omitempty"` + // SourceVault - The relative URL of the Key Vault containing the secret. + SourceVault *SubResource `json:"sourceVault,omitempty"` +} + +// LinuxConfiguration specifies the Linux operating system settings on the virtual machine.

    For a list of +// supported Linux distributions, see [Linux on Azure-Endorsed +// Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) +//

    For running non-endorsed distributions, see [Information for Non-Endorsed +// Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). +type LinuxConfiguration struct { + // DisablePasswordAuthentication - Specifies whether password authentication should be disabled. + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty"` + // SSH - Specifies the ssh key configuration for a Linux OS. + SSH *SSHConfiguration `json:"ssh,omitempty"` + // ProvisionVMAgent - Indicates whether virtual machine agent should be provisioned on the virtual machine.

    When this property is not specified in the request body, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later. + ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"` +} + +// ListUsagesResult the List Usages operation response. +type ListUsagesResult struct { + autorest.Response `json:"-"` + // Value - The list of compute resource usages. + Value *[]Usage `json:"value,omitempty"` + // NextLink - The URI to fetch the next page of compute resource usage information. Call ListNext() with this to fetch the next page of compute resource usage information. + NextLink *string `json:"nextLink,omitempty"` +} + +// ListUsagesResultIterator provides access to a complete listing of Usage values. +type ListUsagesResultIterator struct { + i int + page ListUsagesResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ListUsagesResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ListUsagesResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ListUsagesResultIterator) Response() ListUsagesResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ListUsagesResultIterator) Value() Usage { + if !iter.page.NotDone() { + return Usage{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (lur ListUsagesResult) IsEmpty() bool { + return lur.Value == nil || len(*lur.Value) == 0 +} + +// listUsagesResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (lur ListUsagesResult) listUsagesResultPreparer() (*http.Request, error) { + if lur.NextLink == nil || len(to.String(lur.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(lur.NextLink))) +} + +// ListUsagesResultPage contains a page of Usage values. +type ListUsagesResultPage struct { + fn func(ListUsagesResult) (ListUsagesResult, error) + lur ListUsagesResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ListUsagesResultPage) Next() error { + next, err := page.fn(page.lur) + if err != nil { + return err + } + page.lur = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ListUsagesResultPage) NotDone() bool { + return !page.lur.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ListUsagesResultPage) Response() ListUsagesResult { + return page.lur +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ListUsagesResultPage) Values() []Usage { + if page.lur.IsEmpty() { + return nil + } + return *page.lur.Value +} + +// ListVirtualMachineExtensionImage ... +type ListVirtualMachineExtensionImage struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineExtensionImage `json:"value,omitempty"` +} + +// ListVirtualMachineImageResource ... +type ListVirtualMachineImageResource struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineImageResource `json:"value,omitempty"` +} + +// LogAnalyticsExportRequestRateByIntervalFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type LogAnalyticsExportRequestRateByIntervalFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *LogAnalyticsExportRequestRateByIntervalFuture) Result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportRequestRateByIntervalFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if laor.Response.Response, err = future.GetResult(sender); err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { + laor, err = client.ExportRequestRateByIntervalResponder(laor.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", laor.Response.Response, "Failure responding to request") + } + } + return +} + +// LogAnalyticsExportThrottledRequestsFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type LogAnalyticsExportThrottledRequestsFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *LogAnalyticsExportThrottledRequestsFuture) Result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.LogAnalyticsExportThrottledRequestsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if laor.Response.Response, err = future.GetResult(sender); err == nil && laor.Response.Response.StatusCode != http.StatusNoContent { + laor, err = client.ExportThrottledRequestsResponder(laor.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsExportThrottledRequestsFuture", "Result", laor.Response.Response, "Failure responding to request") + } + } + return +} + +// LogAnalyticsInputBase api input base class for LogAnalytics Api. +type LogAnalyticsInputBase struct { + // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. + BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` + // FromTime - From time of the query + FromTime *date.Time `json:"fromTime,omitempty"` + // ToTime - To time of the query + ToTime *date.Time `json:"toTime,omitempty"` + // GroupByThrottlePolicy - Group query result by Throttle Policy applied. + GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` + // GroupByOperationName - Group query result by by Operation Name. + GroupByOperationName *bool `json:"groupByOperationName,omitempty"` + // GroupByResourceName - Group query result by Resource Name. + GroupByResourceName *bool `json:"groupByResourceName,omitempty"` +} + +// LogAnalyticsOperationResult logAnalytics operation status response +type LogAnalyticsOperationResult struct { + autorest.Response `json:"-"` + // Properties - LogAnalyticsOutput + Properties *LogAnalyticsOutput `json:"properties,omitempty"` +} + +// LogAnalyticsOutput logAnalytics output properties +type LogAnalyticsOutput struct { + // Output - Output file Uri path to blob container. + Output *string `json:"output,omitempty"` +} + +// MaintenanceRedeployStatus maintenance Operation Status. +type MaintenanceRedeployStatus struct { + // IsCustomerInitiatedMaintenanceAllowed - True, if customer is allowed to perform Maintenance. + IsCustomerInitiatedMaintenanceAllowed *bool `json:"isCustomerInitiatedMaintenanceAllowed,omitempty"` + // PreMaintenanceWindowStartTime - Start Time for the Pre Maintenance Window. + PreMaintenanceWindowStartTime *date.Time `json:"preMaintenanceWindowStartTime,omitempty"` + // PreMaintenanceWindowEndTime - End Time for the Pre Maintenance Window. + PreMaintenanceWindowEndTime *date.Time `json:"preMaintenanceWindowEndTime,omitempty"` + // MaintenanceWindowStartTime - Start Time for the Maintenance Window. + MaintenanceWindowStartTime *date.Time `json:"maintenanceWindowStartTime,omitempty"` + // MaintenanceWindowEndTime - End Time for the Maintenance Window. + MaintenanceWindowEndTime *date.Time `json:"maintenanceWindowEndTime,omitempty"` + // LastOperationResultCode - The Last Maintenance Operation Result Code. Possible values include: 'MaintenanceOperationResultCodeTypesNone', 'MaintenanceOperationResultCodeTypesRetryLater', 'MaintenanceOperationResultCodeTypesMaintenanceAborted', 'MaintenanceOperationResultCodeTypesMaintenanceCompleted' + LastOperationResultCode MaintenanceOperationResultCodeTypes `json:"lastOperationResultCode,omitempty"` + // LastOperationMessage - Message returned for the last Maintenance Operation. + LastOperationMessage *string `json:"lastOperationMessage,omitempty"` +} + +// ManagedArtifact the managed artifact. +type ManagedArtifact struct { + // ID - The managed artifact id. + ID *string `json:"id,omitempty"` +} + +// ManagedDiskParameters the parameters of a managed disk. +type ManagedDiskParameters struct { + // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS' + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// NetworkInterfaceReference describes a network interface reference. +type NetworkInterfaceReference struct { + *NetworkInterfaceReferenceProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for NetworkInterfaceReference. +func (nir NetworkInterfaceReference) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if nir.NetworkInterfaceReferenceProperties != nil { + objectMap["properties"] = nir.NetworkInterfaceReferenceProperties + } + if nir.ID != nil { + objectMap["id"] = nir.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for NetworkInterfaceReference struct. +func (nir *NetworkInterfaceReference) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var networkInterfaceReferenceProperties NetworkInterfaceReferenceProperties + err = json.Unmarshal(*v, &networkInterfaceReferenceProperties) + if err != nil { + return err + } + nir.NetworkInterfaceReferenceProperties = &networkInterfaceReferenceProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + nir.ID = &ID + } + } + } + + return nil +} + +// NetworkInterfaceReferenceProperties describes a network interface reference properties. +type NetworkInterfaceReferenceProperties struct { + // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface. + Primary *bool `json:"primary,omitempty"` +} + +// NetworkProfile specifies the network interfaces of the virtual machine. +type NetworkProfile struct { + // NetworkInterfaces - Specifies the list of resource Ids for the network interfaces associated with the virtual machine. + NetworkInterfaces *[]NetworkInterfaceReference `json:"networkInterfaces,omitempty"` +} + +// OperationListResult the List Compute Operation operation response. +type OperationListResult struct { + autorest.Response `json:"-"` + // Value - The list of compute operations + Value *[]OperationValue `json:"value,omitempty"` +} + +// OperationValue describes the properties of a Compute Operation value. +type OperationValue struct { + // Origin - The origin of the compute operation. + Origin *string `json:"origin,omitempty"` + // Name - The name of the compute operation. + Name *string `json:"name,omitempty"` + *OperationValueDisplay `json:"display,omitempty"` +} + +// MarshalJSON is the custom marshaler for OperationValue. +func (ov OperationValue) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ov.Origin != nil { + objectMap["origin"] = ov.Origin + } + if ov.Name != nil { + objectMap["name"] = ov.Name + } + if ov.OperationValueDisplay != nil { + objectMap["display"] = ov.OperationValueDisplay + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for OperationValue struct. +func (ov *OperationValue) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "origin": + if v != nil { + var origin string + err = json.Unmarshal(*v, &origin) + if err != nil { + return err + } + ov.Origin = &origin + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + ov.Name = &name + } + case "display": + if v != nil { + var operationValueDisplay OperationValueDisplay + err = json.Unmarshal(*v, &operationValueDisplay) + if err != nil { + return err + } + ov.OperationValueDisplay = &operationValueDisplay + } + } + } + + return nil +} + +// OperationValueDisplay describes the properties of a Compute Operation Value Display. +type OperationValueDisplay struct { + // Operation - The display name of the compute operation. + Operation *string `json:"operation,omitempty"` + // Resource - The display name of the resource the operation applies to. + Resource *string `json:"resource,omitempty"` + // Description - The description of the operation. + Description *string `json:"description,omitempty"` + // Provider - The resource provider for the operation. + Provider *string `json:"provider,omitempty"` +} + +// OSDisk specifies information about the operating system disk used by the virtual machine.

    For more +// information about disks, see [About disks and VHDs for Azure virtual +// machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). +type OSDisk struct { + // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD.

    Possible values are:

    **Windows**

    **Linux**. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // EncryptionSettings - Specifies the encryption settings for the OS Disk.

    Minimum api-version: 2015-06-15 + EncryptionSettings *DiskEncryptionSettings `json:"encryptionSettings,omitempty"` + // Name - The disk name. + Name *string `json:"name,omitempty"` + // Vhd - The virtual hard disk. + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. + Image *VirtualHardDisk `json:"image,omitempty"` + // Caching - Specifies the caching requirements.

    Possible values are:

    **None**

    **ReadOnly**

    **ReadWrite**

    Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` + // DiffDiskSettings - Specifies the differencing Disk Settings for the operating system disk used by the virtual machine. + DiffDiskSettings *DiffDiskSettings `json:"diffDiskSettings,omitempty"` + // CreateOption - Specifies how the virtual machine should be created.

    Possible values are:

    **Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

    **FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

    This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // ManagedDisk - The managed disk parameters. + ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` +} + +// OSDiskImage contains the os disk image information. +type OSDiskImage struct { + // OperatingSystem - The operating system of the osDiskImage. Possible values include: 'Windows', 'Linux' + OperatingSystem OperatingSystemTypes `json:"operatingSystem,omitempty"` +} + +// OSProfile specifies the operating system settings for the virtual machine. +type OSProfile struct { + // ComputerName - Specifies the host OS name of the virtual machine.

    **Max-length (Windows):** 15 characters

    **Max-length (Linux):** 64 characters.

    For naming conventions and restrictions see [Azure infrastructure services implementation guidelines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-infrastructure-subscription-accounts-guidelines?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#1-naming-conventions). + ComputerName *string `json:"computerName,omitempty"` + // AdminUsername - Specifies the name of the administrator account.

    **Windows-only restriction:** Cannot end in "."

    **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".

    **Minimum-length (Linux):** 1 character

    **Max-length (Linux):** 64 characters

    **Max-length (Windows):** 20 characters

  • For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
  • For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) + AdminUsername *string `json:"adminUsername,omitempty"` + // AdminPassword - Specifies the password of the administrator account.

    **Minimum-length (Windows):** 8 characters

    **Minimum-length (Linux):** 6 characters

    **Max-length (Windows):** 123 characters

    **Max-length (Linux):** 72 characters

    **Complexity requirements:** 3 out of 4 conditions below need to be fulfilled
    Has lower characters
    Has upper characters
    Has a digit
    Has a special character (Regex match [\W_])

    **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!"

    For resetting the password, see [How to reset the Remote Desktop service or its login password in a Windows VM](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-reset-rdp?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    For resetting root password, see [Manage users, SSH, and check or repair disks on Azure Linux VMs using the VMAccess Extension](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-vmaccess-extension?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#reset-root-password) + AdminPassword *string `json:"adminPassword,omitempty"` + // CustomData - Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes.

    For using cloud-init for your VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) + CustomData *string `json:"customData,omitempty"` + // WindowsConfiguration - Specifies Windows operating system settings on the virtual machine. + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` + // LinuxConfiguration - Specifies the Linux operating system settings on the virtual machine.

    For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)

    For running non-endorsed distributions, see [Information for Non-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + // Secrets - Specifies set of certificates that should be installed onto the virtual machine. + Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` + // AllowExtensionOperations - Specifies whether extension operations should be allowed on the virtual machine.

    This may only be set to False when no extensions are present on the virtual machine. + AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty"` +} + +// Plan specifies information about the marketplace image used to create the virtual machine. This element is only +// used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for +// programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to +// deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. +type Plan struct { + // Name - The plan ID. + Name *string `json:"name,omitempty"` + // Publisher - The publisher ID. + Publisher *string `json:"publisher,omitempty"` + // Product - Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element. + Product *string `json:"product,omitempty"` + // PromotionCode - The promotion code. + PromotionCode *string `json:"promotionCode,omitempty"` +} + +// PurchasePlan used for establishing the purchase context of any 3rd Party artifact through MarketPlace. +type PurchasePlan struct { + // Publisher - The publisher ID. + Publisher *string `json:"publisher,omitempty"` + // Name - The plan ID. + Name *string `json:"name,omitempty"` + // Product - Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element. + Product *string `json:"product,omitempty"` +} + +// RecommendedMachineConfiguration the properties describe the recommended machine configuration for this Image +// Definition. These properties are updateable. +type RecommendedMachineConfiguration struct { + VCPUs *ResourceRange `json:"vCPUs,omitempty"` + Memory *ResourceRange `json:"memory,omitempty"` +} + +// RecoveryWalkResponse response after calling a manual recovery walk +type RecoveryWalkResponse struct { + autorest.Response `json:"-"` + // WalkPerformed - Whether the recovery walk was performed + WalkPerformed *bool `json:"walkPerformed,omitempty"` + // NextPlatformUpdateDomain - The next update domain that needs to be walked. Null means walk spanning all update domains has been completed + NextPlatformUpdateDomain *int32 `json:"nextPlatformUpdateDomain,omitempty"` +} + +// RegionalReplicationStatus this is the regional replication status. +type RegionalReplicationStatus struct { + // Region - The region to which the gallery Image Version is being replicated to. + Region *string `json:"region,omitempty"` + // State - This is the regional replication state. Possible values include: 'ReplicationStateUnknown', 'ReplicationStateReplicating', 'ReplicationStateCompleted', 'ReplicationStateFailed' + State ReplicationState `json:"state,omitempty"` + // Details - The details of the replication status. + Details *string `json:"details,omitempty"` + // Progress - It indicates progress of the replication job. + Progress *int32 `json:"progress,omitempty"` +} + +// ReplicationStatus this is the replication status of the gallery Image Version. +type ReplicationStatus struct { + // AggregatedState - This is the aggregated replication status based on all the regional replication status flags. Possible values include: 'Unknown', 'InProgress', 'Completed', 'Failed' + AggregatedState AggregatedReplicationState `json:"aggregatedState,omitempty"` + // Summary - This is a summary of replication status for each region. + Summary *[]RegionalReplicationStatus `json:"summary,omitempty"` +} + +// RequestRateByIntervalInput api request input for LogAnalytics getRequestRateByInterval Api. +type RequestRateByIntervalInput struct { + // IntervalLength - Interval value in minutes used to create LogAnalytics call rate logs. Possible values include: 'ThreeMins', 'FiveMins', 'ThirtyMins', 'SixtyMins' + IntervalLength IntervalInMins `json:"intervalLength,omitempty"` + // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. + BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` + // FromTime - From time of the query + FromTime *date.Time `json:"fromTime,omitempty"` + // ToTime - To time of the query + ToTime *date.Time `json:"toTime,omitempty"` + // GroupByThrottlePolicy - Group query result by Throttle Policy applied. + GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` + // GroupByOperationName - Group query result by by Operation Name. + GroupByOperationName *bool `json:"groupByOperationName,omitempty"` + // GroupByResourceName - Group query result by Resource Name. + GroupByResourceName *bool `json:"groupByResourceName,omitempty"` +} + +// Resource the Resource model definition. +type Resource struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Resource. +func (r Resource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if r.ID != nil { + objectMap["id"] = r.ID + } + if r.Name != nil { + objectMap["name"] = r.Name + } + if r.Type != nil { + objectMap["type"] = r.Type + } + if r.Location != nil { + objectMap["location"] = r.Location + } + if r.Tags != nil { + objectMap["tags"] = r.Tags + } + return json.Marshal(objectMap) +} + +// ResourceRange describes the resource range. +type ResourceRange struct { + // Min - The minimum number of the resource. + Min *int32 `json:"min,omitempty"` + // Max - The maximum number of the resource. + Max *int32 `json:"max,omitempty"` +} + +// ResourceSku describes an available Compute SKU. +type ResourceSku struct { + // ResourceType - The type of resource the SKU applies to. + ResourceType *string `json:"resourceType,omitempty"` + // Name - The name of SKU. + Name *string `json:"name,omitempty"` + // Tier - Specifies the tier of virtual machines in a scale set.

    Possible Values:

    **Standard**

    **Basic** + Tier *string `json:"tier,omitempty"` + // Size - The Size of the SKU. + Size *string `json:"size,omitempty"` + // Family - The Family of this particular SKU. + Family *string `json:"family,omitempty"` + // Kind - The Kind of resources that are supported in this SKU. + Kind *string `json:"kind,omitempty"` + // Capacity - Specifies the number of virtual machines in the scale set. + Capacity *ResourceSkuCapacity `json:"capacity,omitempty"` + // Locations - The set of locations that the SKU is available. + Locations *[]string `json:"locations,omitempty"` + // LocationInfo - A list of locations and availability zones in those locations where the SKU is available. + LocationInfo *[]ResourceSkuLocationInfo `json:"locationInfo,omitempty"` + // APIVersions - The api versions that support this SKU. + APIVersions *[]string `json:"apiVersions,omitempty"` + // Costs - Metadata for retrieving price info. + Costs *[]ResourceSkuCosts `json:"costs,omitempty"` + // Capabilities - A name value pair to describe the capability. + Capabilities *[]ResourceSkuCapabilities `json:"capabilities,omitempty"` + // Restrictions - The restrictions because of which SKU cannot be used. This is empty if there are no restrictions. + Restrictions *[]ResourceSkuRestrictions `json:"restrictions,omitempty"` +} + +// ResourceSkuCapabilities describes The SKU capabilites object. +type ResourceSkuCapabilities struct { + // Name - An invariant to describe the feature. + Name *string `json:"name,omitempty"` + // Value - An invariant if the feature is measured by quantity. + Value *string `json:"value,omitempty"` +} + +// ResourceSkuCapacity describes scaling information of a SKU. +type ResourceSkuCapacity struct { + // Minimum - The minimum capacity. + Minimum *int64 `json:"minimum,omitempty"` + // Maximum - The maximum capacity that can be set. + Maximum *int64 `json:"maximum,omitempty"` + // Default - The default capacity. + Default *int64 `json:"default,omitempty"` + // ScaleType - The scale type applicable to the sku. Possible values include: 'ResourceSkuCapacityScaleTypeAutomatic', 'ResourceSkuCapacityScaleTypeManual', 'ResourceSkuCapacityScaleTypeNone' + ScaleType ResourceSkuCapacityScaleType `json:"scaleType,omitempty"` +} + +// ResourceSkuCosts describes metadata for retrieving price info. +type ResourceSkuCosts struct { + // MeterID - Used for querying price from commerce. + MeterID *string `json:"meterID,omitempty"` + // Quantity - The multiplier is needed to extend the base metered cost. + Quantity *int64 `json:"quantity,omitempty"` + // ExtendedUnit - An invariant to show the extended unit. + ExtendedUnit *string `json:"extendedUnit,omitempty"` +} + +// ResourceSkuLocationInfo ... +type ResourceSkuLocationInfo struct { + // Location - Location of the SKU + Location *string `json:"location,omitempty"` + // Zones - List of availability zones where the SKU is supported. + Zones *[]string `json:"zones,omitempty"` +} + +// ResourceSkuRestrictionInfo ... +type ResourceSkuRestrictionInfo struct { + // Locations - Locations where the SKU is restricted + Locations *[]string `json:"locations,omitempty"` + // Zones - List of availability zones where the SKU is restricted. + Zones *[]string `json:"zones,omitempty"` +} + +// ResourceSkuRestrictions describes scaling information of a SKU. +type ResourceSkuRestrictions struct { + // Type - The type of restrictions. Possible values include: 'Location', 'Zone' + Type ResourceSkuRestrictionsType `json:"type,omitempty"` + // Values - The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted. + Values *[]string `json:"values,omitempty"` + // RestrictionInfo - The information about the restriction where the SKU cannot be used. + RestrictionInfo *ResourceSkuRestrictionInfo `json:"restrictionInfo,omitempty"` + // ReasonCode - The reason for restriction. Possible values include: 'QuotaID', 'NotAvailableForSubscription' + ReasonCode ResourceSkuRestrictionsReasonCode `json:"reasonCode,omitempty"` +} + +// ResourceSkusResult the Compute List Skus operation response. +type ResourceSkusResult struct { + autorest.Response `json:"-"` + // Value - The list of skus available for the subscription. + Value *[]ResourceSku `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Compute Skus. Call ListNext() with this to fetch the next page of VMSS Skus. + NextLink *string `json:"nextLink,omitempty"` +} + +// ResourceSkusResultIterator provides access to a complete listing of ResourceSku values. +type ResourceSkusResultIterator struct { + i int + page ResourceSkusResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ResourceSkusResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ResourceSkusResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ResourceSkusResultIterator) Response() ResourceSkusResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ResourceSkusResultIterator) Value() ResourceSku { + if !iter.page.NotDone() { + return ResourceSku{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (rsr ResourceSkusResult) IsEmpty() bool { + return rsr.Value == nil || len(*rsr.Value) == 0 +} + +// resourceSkusResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rsr ResourceSkusResult) resourceSkusResultPreparer() (*http.Request, error) { + if rsr.NextLink == nil || len(to.String(rsr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rsr.NextLink))) +} + +// ResourceSkusResultPage contains a page of ResourceSku values. +type ResourceSkusResultPage struct { + fn func(ResourceSkusResult) (ResourceSkusResult, error) + rsr ResourceSkusResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ResourceSkusResultPage) Next() error { + next, err := page.fn(page.rsr) + if err != nil { + return err + } + page.rsr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ResourceSkusResultPage) NotDone() bool { + return !page.rsr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ResourceSkusResultPage) Response() ResourceSkusResult { + return page.rsr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ResourceSkusResultPage) Values() []ResourceSku { + if page.rsr.IsEmpty() { + return nil + } + return *page.rsr.Value +} + +// RollbackStatusInfo information about rollback on failed VM instances after a OS Upgrade operation. +type RollbackStatusInfo struct { + // SuccessfullyRolledbackInstanceCount - The number of instances which have been successfully rolled back. + SuccessfullyRolledbackInstanceCount *int32 `json:"successfullyRolledbackInstanceCount,omitempty"` + // FailedRolledbackInstanceCount - The number of instances which failed to rollback. + FailedRolledbackInstanceCount *int32 `json:"failedRolledbackInstanceCount,omitempty"` + // RollbackError - Error details if OS rollback failed. + RollbackError *APIError `json:"rollbackError,omitempty"` +} + +// RollingUpgradePolicy the configuration parameters used while performing a rolling upgrade. +type RollingUpgradePolicy struct { + // MaxBatchInstancePercent - The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. The default value for this parameter is 20%. + MaxBatchInstancePercent *int32 `json:"maxBatchInstancePercent,omitempty"` + // MaxUnhealthyInstancePercent - The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. The default value for this parameter is 20%. + MaxUnhealthyInstancePercent *int32 `json:"maxUnhealthyInstancePercent,omitempty"` + // MaxUnhealthyUpgradedInstancePercent - The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. The default value for this parameter is 20%. + MaxUnhealthyUpgradedInstancePercent *int32 `json:"maxUnhealthyUpgradedInstancePercent,omitempty"` + // PauseTimeBetweenBatches - The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in ISO 8601 format. The default value is 0 seconds (PT0S). + PauseTimeBetweenBatches *string `json:"pauseTimeBetweenBatches,omitempty"` +} + +// RollingUpgradeProgressInfo information about the number of virtual machine instances in each upgrade state. +type RollingUpgradeProgressInfo struct { + // SuccessfulInstanceCount - The number of instances that have been successfully upgraded. + SuccessfulInstanceCount *int32 `json:"successfulInstanceCount,omitempty"` + // FailedInstanceCount - The number of instances that have failed to be upgraded successfully. + FailedInstanceCount *int32 `json:"failedInstanceCount,omitempty"` + // InProgressInstanceCount - The number of instances that are currently being upgraded. + InProgressInstanceCount *int32 `json:"inProgressInstanceCount,omitempty"` + // PendingInstanceCount - The number of instances that have not yet begun to be upgraded. + PendingInstanceCount *int32 `json:"pendingInstanceCount,omitempty"` +} + +// RollingUpgradeRunningStatus information about the current running state of the overall upgrade. +type RollingUpgradeRunningStatus struct { + // Code - Code indicating the current status of the upgrade. Possible values include: 'RollingUpgradeStatusCodeRollingForward', 'RollingUpgradeStatusCodeCancelled', 'RollingUpgradeStatusCodeCompleted', 'RollingUpgradeStatusCodeFaulted' + Code RollingUpgradeStatusCode `json:"code,omitempty"` + // StartTime - Start time of the upgrade. + StartTime *date.Time `json:"startTime,omitempty"` + // LastAction - The last action performed on the rolling upgrade. Possible values include: 'Start', 'Cancel' + LastAction RollingUpgradeActionType `json:"lastAction,omitempty"` + // LastActionTime - Last action time of the upgrade. + LastActionTime *date.Time `json:"lastActionTime,omitempty"` +} + +// RollingUpgradeStatusInfo the status of the latest virtual machine scale set rolling upgrade. +type RollingUpgradeStatusInfo struct { + autorest.Response `json:"-"` + *RollingUpgradeStatusInfoProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for RollingUpgradeStatusInfo. +func (rusi RollingUpgradeStatusInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rusi.RollingUpgradeStatusInfoProperties != nil { + objectMap["properties"] = rusi.RollingUpgradeStatusInfoProperties + } + if rusi.ID != nil { + objectMap["id"] = rusi.ID + } + if rusi.Name != nil { + objectMap["name"] = rusi.Name + } + if rusi.Type != nil { + objectMap["type"] = rusi.Type + } + if rusi.Location != nil { + objectMap["location"] = rusi.Location + } + if rusi.Tags != nil { + objectMap["tags"] = rusi.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for RollingUpgradeStatusInfo struct. +func (rusi *RollingUpgradeStatusInfo) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var rollingUpgradeStatusInfoProperties RollingUpgradeStatusInfoProperties + err = json.Unmarshal(*v, &rollingUpgradeStatusInfoProperties) + if err != nil { + return err + } + rusi.RollingUpgradeStatusInfoProperties = &rollingUpgradeStatusInfoProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + rusi.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + rusi.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + rusi.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + rusi.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + rusi.Tags = tags + } + } + } + + return nil +} + +// RollingUpgradeStatusInfoProperties the status of the latest virtual machine scale set rolling upgrade. +type RollingUpgradeStatusInfoProperties struct { + // Policy - The rolling upgrade policies applied for this upgrade. + Policy *RollingUpgradePolicy `json:"policy,omitempty"` + // RunningStatus - Information about the current running state of the overall upgrade. + RunningStatus *RollingUpgradeRunningStatus `json:"runningStatus,omitempty"` + // Progress - Information about the number of virtual machine instances in each upgrade state. + Progress *RollingUpgradeProgressInfo `json:"progress,omitempty"` + // Error - Error details for this upgrade, if there are any. + Error *APIError `json:"error,omitempty"` +} + +// RunCommandDocument describes the properties of a Run Command. +type RunCommandDocument struct { + autorest.Response `json:"-"` + // Script - The script to be executed. + Script *[]string `json:"script,omitempty"` + // Parameters - The parameters used by the script. + Parameters *[]RunCommandParameterDefinition `json:"parameters,omitempty"` + // Schema - The VM run command schema. + Schema *string `json:"$schema,omitempty"` + // ID - The VM run command id. + ID *string `json:"id,omitempty"` + // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // Label - The VM run command label. + Label *string `json:"label,omitempty"` + // Description - The VM run command description. + Description *string `json:"description,omitempty"` +} + +// RunCommandDocumentBase describes the properties of a Run Command metadata. +type RunCommandDocumentBase struct { + // Schema - The VM run command schema. + Schema *string `json:"$schema,omitempty"` + // ID - The VM run command id. + ID *string `json:"id,omitempty"` + // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // Label - The VM run command label. + Label *string `json:"label,omitempty"` + // Description - The VM run command description. + Description *string `json:"description,omitempty"` +} + +// RunCommandInput capture Virtual Machine parameters. +type RunCommandInput struct { + // CommandID - The run command id. + CommandID *string `json:"commandId,omitempty"` + // Script - Optional. The script to be executed. When this value is given, the given script will override the default script of the command. + Script *[]string `json:"script,omitempty"` + // Parameters - The run command parameters. + Parameters *[]RunCommandInputParameter `json:"parameters,omitempty"` +} + +// RunCommandInputParameter describes the properties of a run command parameter. +type RunCommandInputParameter struct { + // Name - The run command parameter name. + Name *string `json:"name,omitempty"` + // Value - The run command parameter value. + Value *string `json:"value,omitempty"` +} + +// RunCommandListResult the List Virtual Machine operation response. +type RunCommandListResult struct { + autorest.Response `json:"-"` + // Value - The list of virtual machine run commands. + Value *[]RunCommandDocumentBase `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of run commands. Call ListNext() with this to fetch the next page of run commands. + NextLink *string `json:"nextLink,omitempty"` +} + +// RunCommandListResultIterator provides access to a complete listing of RunCommandDocumentBase values. +type RunCommandListResultIterator struct { + i int + page RunCommandListResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *RunCommandListResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter RunCommandListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter RunCommandListResultIterator) Response() RunCommandListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter RunCommandListResultIterator) Value() RunCommandDocumentBase { + if !iter.page.NotDone() { + return RunCommandDocumentBase{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (rclr RunCommandListResult) IsEmpty() bool { + return rclr.Value == nil || len(*rclr.Value) == 0 +} + +// runCommandListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rclr RunCommandListResult) runCommandListResultPreparer() (*http.Request, error) { + if rclr.NextLink == nil || len(to.String(rclr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rclr.NextLink))) +} + +// RunCommandListResultPage contains a page of RunCommandDocumentBase values. +type RunCommandListResultPage struct { + fn func(RunCommandListResult) (RunCommandListResult, error) + rclr RunCommandListResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *RunCommandListResultPage) Next() error { + next, err := page.fn(page.rclr) + if err != nil { + return err + } + page.rclr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page RunCommandListResultPage) NotDone() bool { + return !page.rclr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page RunCommandListResultPage) Response() RunCommandListResult { + return page.rclr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page RunCommandListResultPage) Values() []RunCommandDocumentBase { + if page.rclr.IsEmpty() { + return nil + } + return *page.rclr.Value +} + +// RunCommandParameterDefinition describes the properties of a run command parameter. +type RunCommandParameterDefinition struct { + // Name - The run command parameter name. + Name *string `json:"name,omitempty"` + // Type - The run command parameter type. + Type *string `json:"type,omitempty"` + // DefaultValue - The run command parameter default value. + DefaultValue *string `json:"defaultValue,omitempty"` + // Required - The run command parameter required. + Required *bool `json:"required,omitempty"` +} + +// RunCommandResult ... +type RunCommandResult struct { + autorest.Response `json:"-"` + // Value - Run command operation response. + Value *[]InstanceViewStatus `json:"value,omitempty"` +} + +// Sku describes a virtual machine scale set sku. +type Sku struct { + // Name - The sku name. + Name *string `json:"name,omitempty"` + // Tier - Specifies the tier of virtual machines in a scale set.

    Possible Values:

    **Standard**

    **Basic** + Tier *string `json:"tier,omitempty"` + // Capacity - Specifies the number of virtual machines in the scale set. + Capacity *int64 `json:"capacity,omitempty"` +} + +// Snapshot snapshot resource. +type Snapshot struct { + autorest.Response `json:"-"` + // ManagedBy - Unused. Always Null. + ManagedBy *string `json:"managedBy,omitempty"` + Sku *SnapshotSku `json:"sku,omitempty"` + *SnapshotProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Snapshot. +func (s Snapshot) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if s.ManagedBy != nil { + objectMap["managedBy"] = s.ManagedBy + } + if s.Sku != nil { + objectMap["sku"] = s.Sku + } + if s.SnapshotProperties != nil { + objectMap["properties"] = s.SnapshotProperties + } + if s.ID != nil { + objectMap["id"] = s.ID + } + if s.Name != nil { + objectMap["name"] = s.Name + } + if s.Type != nil { + objectMap["type"] = s.Type + } + if s.Location != nil { + objectMap["location"] = s.Location + } + if s.Tags != nil { + objectMap["tags"] = s.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Snapshot struct. +func (s *Snapshot) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "managedBy": + if v != nil { + var managedBy string + err = json.Unmarshal(*v, &managedBy) + if err != nil { + return err + } + s.ManagedBy = &managedBy + } + case "sku": + if v != nil { + var sku SnapshotSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + s.Sku = &sku + } + case "properties": + if v != nil { + var snapshotProperties SnapshotProperties + err = json.Unmarshal(*v, &snapshotProperties) + if err != nil { + return err + } + s.SnapshotProperties = &snapshotProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + s.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + s.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + s.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + s.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + s.Tags = tags + } + } + } + + return nil +} + +// SnapshotList the List Snapshots operation response. +type SnapshotList struct { + autorest.Response `json:"-"` + // Value - A list of snapshots. + Value *[]Snapshot `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of snapshots. Call ListNext() with this to fetch the next page of snapshots. + NextLink *string `json:"nextLink,omitempty"` +} + +// SnapshotListIterator provides access to a complete listing of Snapshot values. +type SnapshotListIterator struct { + i int + page SnapshotListPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SnapshotListIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SnapshotListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SnapshotListIterator) Response() SnapshotList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SnapshotListIterator) Value() Snapshot { + if !iter.page.NotDone() { + return Snapshot{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (sl SnapshotList) IsEmpty() bool { + return sl.Value == nil || len(*sl.Value) == 0 +} + +// snapshotListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (sl SnapshotList) snapshotListPreparer() (*http.Request, error) { + if sl.NextLink == nil || len(to.String(sl.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(sl.NextLink))) +} + +// SnapshotListPage contains a page of Snapshot values. +type SnapshotListPage struct { + fn func(SnapshotList) (SnapshotList, error) + sl SnapshotList +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SnapshotListPage) Next() error { + next, err := page.fn(page.sl) + if err != nil { + return err + } + page.sl = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SnapshotListPage) NotDone() bool { + return !page.sl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SnapshotListPage) Response() SnapshotList { + return page.sl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SnapshotListPage) Values() []Snapshot { + if page.sl.IsEmpty() { + return nil + } + return *page.sl.Value +} + +// SnapshotProperties snapshot resource properties. +type SnapshotProperties struct { + // TimeCreated - The time when the disk was created. + TimeCreated *date.Time `json:"timeCreated,omitempty"` + // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created. + CreationData *CreationData `json:"creationData,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // EncryptionSettings - Encryption settings for disk or snapshot + EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` + // ProvisioningState - The disk provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// SnapshotsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SnapshotsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SnapshotsCreateOrUpdateFuture) Result(client SnapshotsClient) (s Snapshot, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.CreateOrUpdateResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsCreateOrUpdateFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + +// SnapshotsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type SnapshotsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SnapshotsDeleteFuture) Result(client SnapshotsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// SnapshotsGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type SnapshotsGrantAccessFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SnapshotsGrantAccessFuture) Result(client SnapshotsClient) (au AccessURI, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsGrantAccessFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { + au, err = client.GrantAccessResponder(au.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + } + } + return +} + +// SnapshotSku the snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. +type SnapshotSku struct { + // Name - The sku name. Possible values include: 'SnapshotStorageAccountTypesStandardLRS', 'SnapshotStorageAccountTypesPremiumLRS', 'SnapshotStorageAccountTypesStandardZRS' + Name SnapshotStorageAccountTypes `json:"name,omitempty"` + // Tier - The sku tier. + Tier *string `json:"tier,omitempty"` +} + +// SnapshotsRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SnapshotsRevokeAccessFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SnapshotsRevokeAccessFuture) Result(client SnapshotsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsRevokeAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsRevokeAccessFuture") + return + } + ar.Response = future.Response() + return +} + +// SnapshotsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type SnapshotsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SnapshotsUpdateFuture) Result(client SnapshotsClient) (s Snapshot, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.SnapshotsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.UpdateResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsUpdateFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + +// SnapshotUpdate snapshot update resource. +type SnapshotUpdate struct { + *SnapshotUpdateProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + Sku *SnapshotSku `json:"sku,omitempty"` +} + +// MarshalJSON is the custom marshaler for SnapshotUpdate. +func (su SnapshotUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if su.SnapshotUpdateProperties != nil { + objectMap["properties"] = su.SnapshotUpdateProperties + } + if su.Tags != nil { + objectMap["tags"] = su.Tags + } + if su.Sku != nil { + objectMap["sku"] = su.Sku + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SnapshotUpdate struct. +func (su *SnapshotUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var snapshotUpdateProperties SnapshotUpdateProperties + err = json.Unmarshal(*v, &snapshotUpdateProperties) + if err != nil { + return err + } + su.SnapshotUpdateProperties = &snapshotUpdateProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + su.Tags = tags + } + case "sku": + if v != nil { + var sku SnapshotSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + su.Sku = &sku + } + } + } + + return nil +} + +// SnapshotUpdateProperties snapshot resource update properties. +type SnapshotUpdateProperties struct { + // OsType - the Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // EncryptionSettings - Encryption settings for disk or snapshot + EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` +} + +// SourceVault the vault id is an Azure Resource Manager Resoure id in the form +// /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName} +type SourceVault struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// SSHConfiguration SSH configuration for Linux based VMs running on Azure +type SSHConfiguration struct { + // PublicKeys - The list of SSH public keys used to authenticate with linux based VMs. + PublicKeys *[]SSHPublicKey `json:"publicKeys,omitempty"` +} + +// SSHPublicKey contains information about SSH certificate public key and the path on the Linux VM where the public +// key is placed. +type SSHPublicKey struct { + // Path - Specifies the full path on the created VM where ssh public key is stored. If the file already exists, the specified key is appended to the file. Example: /home/user/.ssh/authorized_keys + Path *string `json:"path,omitempty"` + // KeyData - SSH public key certificate used to authenticate with the VM through ssh. The key needs to be at least 2048-bit and in ssh-rsa format.

    For creating ssh keys, see [Create SSH keys on Linux and Mac for Linux VMs in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-mac-create-ssh-keys?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). + KeyData *string `json:"keyData,omitempty"` +} + +// StorageProfile specifies the storage settings for the virtual machine disks. +type StorageProfile struct { + // ImageReference - Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations. + ImageReference *ImageReference `json:"imageReference,omitempty"` + // OsDisk - Specifies information about the operating system disk used by the virtual machine.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + OsDisk *OSDisk `json:"osDisk,omitempty"` + // DataDisks - Specifies the parameters that are used to add a data disk to a virtual machine.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + DataDisks *[]DataDisk `json:"dataDisks,omitempty"` +} + +// SubResource ... +type SubResource struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// SubResourceReadOnly ... +type SubResourceReadOnly struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// TargetRegion describes the target region information. +type TargetRegion struct { + // Name - The name of the region. + Name *string `json:"name,omitempty"` + // RegionalReplicaCount - The number of replicas of the Image Version to be created per region. This property is updateable. + RegionalReplicaCount *int32 `json:"regionalReplicaCount,omitempty"` +} + +// ThrottledRequestsInput api request input for LogAnalytics getThrottledRequests Api. +type ThrottledRequestsInput struct { + // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. + BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` + // FromTime - From time of the query + FromTime *date.Time `json:"fromTime,omitempty"` + // ToTime - To time of the query + ToTime *date.Time `json:"toTime,omitempty"` + // GroupByThrottlePolicy - Group query result by Throttle Policy applied. + GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` + // GroupByOperationName - Group query result by by Operation Name. + GroupByOperationName *bool `json:"groupByOperationName,omitempty"` + // GroupByResourceName - Group query result by Resource Name. + GroupByResourceName *bool `json:"groupByResourceName,omitempty"` +} + +// UpdateResource the Update Resource model definition. +type UpdateResource struct { + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for UpdateResource. +func (ur UpdateResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ur.Tags != nil { + objectMap["tags"] = ur.Tags + } + return json.Marshal(objectMap) +} + +// UpgradeOperationHistoricalStatusInfo virtual Machine Scale Set OS Upgrade History operation response. +type UpgradeOperationHistoricalStatusInfo struct { + // Properties - Information about the properties of the upgrade operation. + Properties *UpgradeOperationHistoricalStatusInfoProperties `json:"properties,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` +} + +// UpgradeOperationHistoricalStatusInfoProperties describes each OS upgrade on the Virtual Machine Scale Set. +type UpgradeOperationHistoricalStatusInfoProperties struct { + // RunningStatus - Information about the overall status of the upgrade operation. + RunningStatus *UpgradeOperationHistoryStatus `json:"runningStatus,omitempty"` + // Progress - Counts of the VM's in each state. + Progress *RollingUpgradeProgressInfo `json:"progress,omitempty"` + // Error - Error Details for this upgrade if there are any. + Error *APIError `json:"error,omitempty"` + // StartedBy - Invoker of the Upgrade Operation. Possible values include: 'UpgradeOperationInvokerUnknown', 'UpgradeOperationInvokerUser', 'UpgradeOperationInvokerPlatform' + StartedBy UpgradeOperationInvoker `json:"startedBy,omitempty"` + // TargetImageReference - Image Reference details + TargetImageReference *ImageReference `json:"targetImageReference,omitempty"` + // RollbackInfo - Information about OS rollback if performed + RollbackInfo *RollbackStatusInfo `json:"rollbackInfo,omitempty"` +} + +// UpgradeOperationHistoryStatus information about the current running state of the overall upgrade. +type UpgradeOperationHistoryStatus struct { + // Code - Code indicating the current status of the upgrade. Possible values include: 'UpgradeStateRollingForward', 'UpgradeStateCancelled', 'UpgradeStateCompleted', 'UpgradeStateFaulted' + Code UpgradeState `json:"code,omitempty"` + // StartTime - Start time of the upgrade. + StartTime *date.Time `json:"startTime,omitempty"` + // EndTime - End time of the upgrade. + EndTime *date.Time `json:"endTime,omitempty"` +} + +// UpgradePolicy describes an upgrade policy - automatic, manual, or rolling. +type UpgradePolicy struct { + // Mode - Specifies the mode of an upgrade to virtual machines in the scale set.

    Possible values are:

    **Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.

    **Automatic** - All virtual machines in the scale set are automatically updated at the same time. Possible values include: 'Automatic', 'Manual', 'Rolling' + Mode UpgradeMode `json:"mode,omitempty"` + // RollingUpgradePolicy - The configuration parameters used while performing a rolling upgrade. + RollingUpgradePolicy *RollingUpgradePolicy `json:"rollingUpgradePolicy,omitempty"` + // AutomaticOSUpgradePolicy - Configuration parameters used for performing automatic OS Upgrade. + AutomaticOSUpgradePolicy *AutomaticOSUpgradePolicy `json:"automaticOSUpgradePolicy,omitempty"` +} + +// Usage describes Compute Resource Usage. +type Usage struct { + // Unit - An enum describing the unit of usage measurement. + Unit *string `json:"unit,omitempty"` + // CurrentValue - The current usage of the resource. + CurrentValue *int32 `json:"currentValue,omitempty"` + // Limit - The maximum permitted usage of the resource. + Limit *int64 `json:"limit,omitempty"` + // Name - The name of the type of usage. + Name *UsageName `json:"name,omitempty"` +} + +// UsageName the Usage Names. +type UsageName struct { + // Value - The name of the resource. + Value *string `json:"value,omitempty"` + // LocalizedValue - The localized name of the resource. + LocalizedValue *string `json:"localizedValue,omitempty"` +} + +// VaultCertificate describes a single certificate reference in a Key Vault, and where the certificate should +// reside on the VM. +type VaultCertificate struct { + // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:

    {
    "data":"",
    "dataType":"pfx",
    "password":""
    } + CertificateURL *string `json:"certificateUrl,omitempty"` + // CertificateStore - For Windows VMs, specifies the certificate store on the Virtual Machine to which the certificate should be added. The specified certificate store is implicitly in the LocalMachine account.

    For Linux VMs, the certificate file is placed under the /var/lib/waagent directory, with the file name .crt for the X509 certificate file and .prv for private key. Both of these files are .pem formatted. + CertificateStore *string `json:"certificateStore,omitempty"` +} + +// VaultSecretGroup describes a set of certificates which are all in the same Key Vault. +type VaultSecretGroup struct { + // SourceVault - The relative URL of the Key Vault containing all of the certificates in VaultCertificates. + SourceVault *SubResource `json:"sourceVault,omitempty"` + // VaultCertificates - The list of key vault references in SourceVault which contain certificates. + VaultCertificates *[]VaultCertificate `json:"vaultCertificates,omitempty"` +} + +// VirtualHardDisk describes the uri of a disk. +type VirtualHardDisk struct { + // URI - Specifies the virtual hard disk's uri. + URI *string `json:"uri,omitempty"` +} + +// VirtualMachine describes a Virtual Machine. +type VirtualMachine struct { + autorest.Response `json:"-"` + // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. + Plan *Plan `json:"plan,omitempty"` + *VirtualMachineProperties `json:"properties,omitempty"` + // Resources - The virtual machine child extension resources. + Resources *[]VirtualMachineExtension `json:"resources,omitempty"` + // Identity - The identity of the virtual machine, if configured. + Identity *VirtualMachineIdentity `json:"identity,omitempty"` + // Zones - The virtual machine zones. + Zones *[]string `json:"zones,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for VirtualMachine. +func (VM VirtualMachine) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if VM.Plan != nil { + objectMap["plan"] = VM.Plan + } + if VM.VirtualMachineProperties != nil { + objectMap["properties"] = VM.VirtualMachineProperties + } + if VM.Resources != nil { + objectMap["resources"] = VM.Resources + } + if VM.Identity != nil { + objectMap["identity"] = VM.Identity + } + if VM.Zones != nil { + objectMap["zones"] = VM.Zones + } + if VM.ID != nil { + objectMap["id"] = VM.ID + } + if VM.Name != nil { + objectMap["name"] = VM.Name + } + if VM.Type != nil { + objectMap["type"] = VM.Type + } + if VM.Location != nil { + objectMap["location"] = VM.Location + } + if VM.Tags != nil { + objectMap["tags"] = VM.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachine struct. +func (VM *VirtualMachine) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "plan": + if v != nil { + var plan Plan + err = json.Unmarshal(*v, &plan) + if err != nil { + return err + } + VM.Plan = &plan + } + case "properties": + if v != nil { + var virtualMachineProperties VirtualMachineProperties + err = json.Unmarshal(*v, &virtualMachineProperties) + if err != nil { + return err + } + VM.VirtualMachineProperties = &virtualMachineProperties + } + case "resources": + if v != nil { + var resources []VirtualMachineExtension + err = json.Unmarshal(*v, &resources) + if err != nil { + return err + } + VM.Resources = &resources + } + case "identity": + if v != nil { + var identity VirtualMachineIdentity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + VM.Identity = &identity + } + case "zones": + if v != nil { + var zones []string + err = json.Unmarshal(*v, &zones) + if err != nil { + return err + } + VM.Zones = &zones + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + VM.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + VM.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + VM.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + VM.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + VM.Tags = tags + } + } + } + + return nil +} + +// VirtualMachineAgentInstanceView the instance view of the VM Agent running on the virtual machine. +type VirtualMachineAgentInstanceView struct { + // VMAgentVersion - The VM Agent full version. + VMAgentVersion *string `json:"vmAgentVersion,omitempty"` + // ExtensionHandlers - The virtual machine extension handler instance view. + ExtensionHandlers *[]VirtualMachineExtensionHandlerInstanceView `json:"extensionHandlers,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineCaptureParameters capture Virtual Machine parameters. +type VirtualMachineCaptureParameters struct { + // VhdPrefix - The captured virtual hard disk's name prefix. + VhdPrefix *string `json:"vhdPrefix,omitempty"` + // DestinationContainerName - The destination container name. + DestinationContainerName *string `json:"destinationContainerName,omitempty"` + // OverwriteVhds - Specifies whether to overwrite the destination virtual hard disk, in case of conflict. + OverwriteVhds *bool `json:"overwriteVhds,omitempty"` +} + +// VirtualMachineCaptureResult output of virtual machine capture operation. +type VirtualMachineCaptureResult struct { + autorest.Response `json:"-"` + // Schema - the schema of the captured virtual machine + Schema *string `json:"$schema,omitempty"` + // ContentVersion - the version of the content + ContentVersion *string `json:"contentVersion,omitempty"` + // Parameters - parameters of the captured virtual machine + Parameters interface{} `json:"parameters,omitempty"` + // Resources - a list of resource items of the captured virtual machine + Resources *[]interface{} `json:"resources,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// VirtualMachineExtension describes a Virtual Machine Extension. +type VirtualMachineExtension struct { + autorest.Response `json:"-"` + *VirtualMachineExtensionProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineExtension. +func (vme VirtualMachineExtension) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vme.VirtualMachineExtensionProperties != nil { + objectMap["properties"] = vme.VirtualMachineExtensionProperties + } + if vme.ID != nil { + objectMap["id"] = vme.ID + } + if vme.Name != nil { + objectMap["name"] = vme.Name + } + if vme.Type != nil { + objectMap["type"] = vme.Type + } + if vme.Location != nil { + objectMap["location"] = vme.Location + } + if vme.Tags != nil { + objectMap["tags"] = vme.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineExtension struct. +func (vme *VirtualMachineExtension) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var virtualMachineExtensionProperties VirtualMachineExtensionProperties + err = json.Unmarshal(*v, &virtualMachineExtensionProperties) + if err != nil { + return err + } + vme.VirtualMachineExtensionProperties = &virtualMachineExtensionProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vme.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vme.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + vme.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + vme.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + vme.Tags = tags + } + } + } + + return nil +} + +// VirtualMachineExtensionHandlerInstanceView the instance view of a virtual machine extension handler. +type VirtualMachineExtensionHandlerInstanceView struct { + // Type - Specifies the type of the extension; an example is "CustomScriptExtension". + Type *string `json:"type,omitempty"` + // TypeHandlerVersion - Specifies the version of the script handler. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + // Status - The extension handler status. + Status *InstanceViewStatus `json:"status,omitempty"` +} + +// VirtualMachineExtensionImage describes a Virtual Machine Extension Image. +type VirtualMachineExtensionImage struct { + autorest.Response `json:"-"` + *VirtualMachineExtensionImageProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineExtensionImage. +func (vmei VirtualMachineExtensionImage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmei.VirtualMachineExtensionImageProperties != nil { + objectMap["properties"] = vmei.VirtualMachineExtensionImageProperties + } + if vmei.ID != nil { + objectMap["id"] = vmei.ID + } + if vmei.Name != nil { + objectMap["name"] = vmei.Name + } + if vmei.Type != nil { + objectMap["type"] = vmei.Type + } + if vmei.Location != nil { + objectMap["location"] = vmei.Location + } + if vmei.Tags != nil { + objectMap["tags"] = vmei.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineExtensionImage struct. +func (vmei *VirtualMachineExtensionImage) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var virtualMachineExtensionImageProperties VirtualMachineExtensionImageProperties + err = json.Unmarshal(*v, &virtualMachineExtensionImageProperties) + if err != nil { + return err + } + vmei.VirtualMachineExtensionImageProperties = &virtualMachineExtensionImageProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vmei.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vmei.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + vmei.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + vmei.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + vmei.Tags = tags + } + } + } + + return nil +} + +// VirtualMachineExtensionImageProperties describes the properties of a Virtual Machine Extension Image. +type VirtualMachineExtensionImageProperties struct { + // OperatingSystem - The operating system this extension supports. + OperatingSystem *string `json:"operatingSystem,omitempty"` + // ComputeRole - The type of role (IaaS or PaaS) this extension supports. + ComputeRole *string `json:"computeRole,omitempty"` + // HandlerSchema - The schema defined by publisher, where extension consumers should provide settings in a matching schema. + HandlerSchema *string `json:"handlerSchema,omitempty"` + // VMScaleSetEnabled - Whether the extension can be used on xRP VMScaleSets. By default existing extensions are usable on scalesets, but there might be cases where a publisher wants to explicitly indicate the extension is only enabled for CRP VMs but not VMSS. + VMScaleSetEnabled *bool `json:"vmScaleSetEnabled,omitempty"` + // SupportsMultipleExtensions - Whether the handler can support multiple extensions. + SupportsMultipleExtensions *bool `json:"supportsMultipleExtensions,omitempty"` +} + +// VirtualMachineExtensionInstanceView the instance view of a virtual machine extension. +type VirtualMachineExtensionInstanceView struct { + // Name - The virtual machine extension name. + Name *string `json:"name,omitempty"` + // Type - Specifies the type of the extension; an example is "CustomScriptExtension". + Type *string `json:"type,omitempty"` + // TypeHandlerVersion - Specifies the version of the script handler. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + // Substatuses - The resource status information. + Substatuses *[]InstanceViewStatus `json:"substatuses,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineExtensionProperties describes the properties of a Virtual Machine Extension. +type VirtualMachineExtensionProperties struct { + // ForceUpdateTag - How the extension handler should be forced to update even if the extension configuration has not changed. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` + // Publisher - The name of the extension handler publisher. + Publisher *string `json:"publisher,omitempty"` + // Type - Specifies the type of the extension; an example is "CustomScriptExtension". + Type *string `json:"type,omitempty"` + // TypeHandlerVersion - Specifies the version of the script handler. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + // AutoUpgradeMinorVersion - Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` + // Settings - Json formatted public settings for the extension. + Settings interface{} `json:"settings,omitempty"` + // ProtectedSettings - The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. + ProtectedSettings interface{} `json:"protectedSettings,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // InstanceView - The virtual machine extension instance view. + InstanceView *VirtualMachineExtensionInstanceView `json:"instanceView,omitempty"` +} + +// VirtualMachineExtensionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineExtensionsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineExtensionsCreateOrUpdateFuture) Result(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vme.Response.Response, err = future.GetResult(sender); err == nil && vme.Response.Response.StatusCode != http.StatusNoContent { + vme, err = client.CreateOrUpdateResponder(vme.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", vme.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualMachineExtensionsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineExtensionsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineExtensionsDeleteFuture) Result(client VirtualMachineExtensionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineExtensionsListResult the List Extension operation response +type VirtualMachineExtensionsListResult struct { + autorest.Response `json:"-"` + // Value - The list of extensions + Value *[]VirtualMachineExtension `json:"value,omitempty"` +} + +// VirtualMachineExtensionsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineExtensionsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineExtensionsUpdateFuture) Result(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineExtensionsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vme.Response.Response, err = future.GetResult(sender); err == nil && vme.Response.Response.StatusCode != http.StatusNoContent { + vme, err = client.UpdateResponder(vme.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsUpdateFuture", "Result", vme.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualMachineExtensionUpdate describes a Virtual Machine Extension. +type VirtualMachineExtensionUpdate struct { + *VirtualMachineExtensionUpdateProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineExtensionUpdate. +func (vmeu VirtualMachineExtensionUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmeu.VirtualMachineExtensionUpdateProperties != nil { + objectMap["properties"] = vmeu.VirtualMachineExtensionUpdateProperties + } + if vmeu.Tags != nil { + objectMap["tags"] = vmeu.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineExtensionUpdate struct. +func (vmeu *VirtualMachineExtensionUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var virtualMachineExtensionUpdateProperties VirtualMachineExtensionUpdateProperties + err = json.Unmarshal(*v, &virtualMachineExtensionUpdateProperties) + if err != nil { + return err + } + vmeu.VirtualMachineExtensionUpdateProperties = &virtualMachineExtensionUpdateProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + vmeu.Tags = tags + } + } + } + + return nil +} + +// VirtualMachineExtensionUpdateProperties describes the properties of a Virtual Machine Extension. +type VirtualMachineExtensionUpdateProperties struct { + // ForceUpdateTag - How the extension handler should be forced to update even if the extension configuration has not changed. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` + // Publisher - The name of the extension handler publisher. + Publisher *string `json:"publisher,omitempty"` + // Type - Specifies the type of the extension; an example is "CustomScriptExtension". + Type *string `json:"type,omitempty"` + // TypeHandlerVersion - Specifies the version of the script handler. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + // AutoUpgradeMinorVersion - Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` + // Settings - Json formatted public settings for the extension. + Settings interface{} `json:"settings,omitempty"` + // ProtectedSettings - The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. + ProtectedSettings interface{} `json:"protectedSettings,omitempty"` +} + +// VirtualMachineHealthStatus the health status of the VM. +type VirtualMachineHealthStatus struct { + // Status - The health status information for the VM. + Status *InstanceViewStatus `json:"status,omitempty"` +} + +// VirtualMachineIdentity identity for the virtual machine. +type VirtualMachineIdentity struct { + // PrincipalID - The principal id of virtual machine identity. This property will only be provided for a system assigned identity. + PrincipalID *string `json:"principalId,omitempty"` + // TenantID - The tenant id associated with the virtual machine. This property will only be provided for a system assigned identity. + TenantID *string `json:"tenantId,omitempty"` + // Type - The type of identity used for the virtual machine. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the virtual machine. Possible values include: 'ResourceIdentityTypeSystemAssigned', 'ResourceIdentityTypeUserAssigned', 'ResourceIdentityTypeSystemAssignedUserAssigned', 'ResourceIdentityTypeNone' + Type ResourceIdentityType `json:"type,omitempty"` + // UserAssignedIdentities - The list of user identities associated with the Virtual Machine. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + UserAssignedIdentities map[string]*VirtualMachineIdentityUserAssignedIdentitiesValue `json:"userAssignedIdentities"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineIdentity. +func (vmi VirtualMachineIdentity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmi.PrincipalID != nil { + objectMap["principalId"] = vmi.PrincipalID + } + if vmi.TenantID != nil { + objectMap["tenantId"] = vmi.TenantID + } + if vmi.Type != "" { + objectMap["type"] = vmi.Type + } + if vmi.UserAssignedIdentities != nil { + objectMap["userAssignedIdentities"] = vmi.UserAssignedIdentities + } + return json.Marshal(objectMap) +} + +// VirtualMachineIdentityUserAssignedIdentitiesValue ... +type VirtualMachineIdentityUserAssignedIdentitiesValue struct { + // PrincipalID - The principal id of user assigned identity. + PrincipalID *string `json:"principalId,omitempty"` + // ClientID - The client id of user assigned identity. + ClientID *string `json:"clientId,omitempty"` +} + +// VirtualMachineImage describes a Virtual Machine Image. +type VirtualMachineImage struct { + autorest.Response `json:"-"` + *VirtualMachineImageProperties `json:"properties,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Location - The supported Azure location of the resource. + Location *string `json:"location,omitempty"` + // Tags - Specifies the tags that are assigned to the virtual machine. For more information about using tags, see [Using tags to organize your Azure resources](https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md). + Tags map[string]*string `json:"tags"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineImage. +func (vmi VirtualMachineImage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmi.VirtualMachineImageProperties != nil { + objectMap["properties"] = vmi.VirtualMachineImageProperties + } + if vmi.Name != nil { + objectMap["name"] = vmi.Name + } + if vmi.Location != nil { + objectMap["location"] = vmi.Location + } + if vmi.Tags != nil { + objectMap["tags"] = vmi.Tags + } + if vmi.ID != nil { + objectMap["id"] = vmi.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineImage struct. +func (vmi *VirtualMachineImage) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var virtualMachineImageProperties VirtualMachineImageProperties + err = json.Unmarshal(*v, &virtualMachineImageProperties) + if err != nil { + return err + } + vmi.VirtualMachineImageProperties = &virtualMachineImageProperties + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vmi.Name = &name + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + vmi.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + vmi.Tags = tags + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vmi.ID = &ID + } + } + } + + return nil +} + +// VirtualMachineImageProperties describes the properties of a Virtual Machine Image. +type VirtualMachineImageProperties struct { + Plan *PurchasePlan `json:"plan,omitempty"` + OsDiskImage *OSDiskImage `json:"osDiskImage,omitempty"` + DataDiskImages *[]DataDiskImage `json:"dataDiskImages,omitempty"` +} + +// VirtualMachineImageResource virtual machine image resource information. +type VirtualMachineImageResource struct { + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Location - The supported Azure location of the resource. + Location *string `json:"location,omitempty"` + // Tags - Specifies the tags that are assigned to the virtual machine. For more information about using tags, see [Using tags to organize your Azure resources](https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md). + Tags map[string]*string `json:"tags"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineImageResource. +func (vmir VirtualMachineImageResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmir.Name != nil { + objectMap["name"] = vmir.Name + } + if vmir.Location != nil { + objectMap["location"] = vmir.Location + } + if vmir.Tags != nil { + objectMap["tags"] = vmir.Tags + } + if vmir.ID != nil { + objectMap["id"] = vmir.ID + } + return json.Marshal(objectMap) +} + +// VirtualMachineInstanceView the instance view of a virtual machine. +type VirtualMachineInstanceView struct { + autorest.Response `json:"-"` + // PlatformUpdateDomain - Specifies the update domain of the virtual machine. + PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` + // PlatformFaultDomain - Specifies the fault domain of the virtual machine. + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` + // ComputerName - The computer name assigned to the virtual machine. + ComputerName *string `json:"computerName,omitempty"` + // OsName - The Operating System running on the virtual machine. + OsName *string `json:"osName,omitempty"` + // OsVersion - The version of Operating System running on the virtual machine. + OsVersion *string `json:"osVersion,omitempty"` + // RdpThumbPrint - The Remote desktop certificate thumbprint. + RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` + // VMAgent - The VM Agent running on the virtual machine. + VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` + // MaintenanceRedeployStatus - The Maintenance Operation status on the virtual machine. + MaintenanceRedeployStatus *MaintenanceRedeployStatus `json:"maintenanceRedeployStatus,omitempty"` + // Disks - The virtual machine disk information. + Disks *[]DiskInstanceView `json:"disks,omitempty"` + // Extensions - The extensions information. + Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` + // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

    For Linux Virtual Machines, you can easily view the output of your console log.

    For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from the hypervisor. + BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineListResult the List Virtual Machine operation response. +type VirtualMachineListResult struct { + autorest.Response `json:"-"` + // Value - The list of virtual machines. + Value *[]VirtualMachine `json:"value,omitempty"` + // NextLink - The URI to fetch the next page of VMs. Call ListNext() with this URI to fetch the next page of Virtual Machines. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineListResultIterator provides access to a complete listing of VirtualMachine values. +type VirtualMachineListResultIterator struct { + i int + page VirtualMachineListResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualMachineListResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualMachineListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualMachineListResultIterator) Response() VirtualMachineListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualMachineListResultIterator) Value() VirtualMachine { + if !iter.page.NotDone() { + return VirtualMachine{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (vmlr VirtualMachineListResult) IsEmpty() bool { + return vmlr.Value == nil || len(*vmlr.Value) == 0 +} + +// virtualMachineListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vmlr VirtualMachineListResult) virtualMachineListResultPreparer() (*http.Request, error) { + if vmlr.NextLink == nil || len(to.String(vmlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vmlr.NextLink))) +} + +// VirtualMachineListResultPage contains a page of VirtualMachine values. +type VirtualMachineListResultPage struct { + fn func(VirtualMachineListResult) (VirtualMachineListResult, error) + vmlr VirtualMachineListResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualMachineListResultPage) Next() error { + next, err := page.fn(page.vmlr) + if err != nil { + return err + } + page.vmlr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualMachineListResultPage) NotDone() bool { + return !page.vmlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualMachineListResultPage) Response() VirtualMachineListResult { + return page.vmlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualMachineListResultPage) Values() []VirtualMachine { + if page.vmlr.IsEmpty() { + return nil + } + return *page.vmlr.Value +} + +// VirtualMachineProperties describes the properties of a Virtual Machine. +type VirtualMachineProperties struct { + // HardwareProfile - Specifies the hardware settings for the virtual machine. + HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` + // StorageProfile - Specifies the storage settings for the virtual machine disks. + StorageProfile *StorageProfile `json:"storageProfile,omitempty"` + // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the virtual machine. + AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` + // OsProfile - Specifies the operating system settings for the virtual machine. + OsProfile *OSProfile `json:"osProfile,omitempty"` + // NetworkProfile - Specifies the network interfaces of the virtual machine. + NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` + // DiagnosticsProfile - Specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    For more information on Azure planned maintainance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. + AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // InstanceView - The virtual machine instance view. + InstanceView *VirtualMachineInstanceView `json:"instanceView,omitempty"` + // LicenseType - Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system.

    Possible values are:

    Windows_Client

    Windows_Server

    If this element is included in a request for an update, the value must match the initial value. This value cannot be updated.

    For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Minimum api-version: 2015-06-15 + LicenseType *string `json:"licenseType,omitempty"` + // VMID - Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure IaaS VMs SMBIOS and can be read using platform BIOS commands. + VMID *string `json:"vmId,omitempty"` +} + +// VirtualMachineScaleSet describes a Virtual Machine Scale Set. +type VirtualMachineScaleSet struct { + autorest.Response `json:"-"` + // Sku - The virtual machine scale set sku. + Sku *Sku `json:"sku,omitempty"` + // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. + Plan *Plan `json:"plan,omitempty"` + *VirtualMachineScaleSetProperties `json:"properties,omitempty"` + // Identity - The identity of the virtual machine scale set, if configured. + Identity *VirtualMachineScaleSetIdentity `json:"identity,omitempty"` + // Zones - The virtual machine scale set zones. + Zones *[]string `json:"zones,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineScaleSet. +func (vmss VirtualMachineScaleSet) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmss.Sku != nil { + objectMap["sku"] = vmss.Sku + } + if vmss.Plan != nil { + objectMap["plan"] = vmss.Plan + } + if vmss.VirtualMachineScaleSetProperties != nil { + objectMap["properties"] = vmss.VirtualMachineScaleSetProperties + } + if vmss.Identity != nil { + objectMap["identity"] = vmss.Identity + } + if vmss.Zones != nil { + objectMap["zones"] = vmss.Zones + } + if vmss.ID != nil { + objectMap["id"] = vmss.ID + } + if vmss.Name != nil { + objectMap["name"] = vmss.Name + } + if vmss.Type != nil { + objectMap["type"] = vmss.Type + } + if vmss.Location != nil { + objectMap["location"] = vmss.Location + } + if vmss.Tags != nil { + objectMap["tags"] = vmss.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSet struct. +func (vmss *VirtualMachineScaleSet) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + vmss.Sku = &sku + } + case "plan": + if v != nil { + var plan Plan + err = json.Unmarshal(*v, &plan) + if err != nil { + return err + } + vmss.Plan = &plan + } + case "properties": + if v != nil { + var virtualMachineScaleSetProperties VirtualMachineScaleSetProperties + err = json.Unmarshal(*v, &virtualMachineScaleSetProperties) + if err != nil { + return err + } + vmss.VirtualMachineScaleSetProperties = &virtualMachineScaleSetProperties + } + case "identity": + if v != nil { + var identity VirtualMachineScaleSetIdentity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + vmss.Identity = &identity + } + case "zones": + if v != nil { + var zones []string + err = json.Unmarshal(*v, &zones) + if err != nil { + return err + } + vmss.Zones = &zones + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vmss.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vmss.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + vmss.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + vmss.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + vmss.Tags = tags + } + } + } + + return nil +} + +// VirtualMachineScaleSetDataDisk describes a virtual machine scale set data disk. +type VirtualMachineScaleSetDataDisk struct { + // Name - The disk name. + Name *string `json:"name,omitempty"` + // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` + // Caching - Specifies the caching requirements.

    Possible values are:

    **None**

    **ReadOnly**

    **ReadWrite**

    Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` + // CreateOption - The create option. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

    This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // ManagedDisk - The managed disk parameters. + ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` +} + +// VirtualMachineScaleSetExtension describes a Virtual Machine Scale Set Extension. +type VirtualMachineScaleSetExtension struct { + autorest.Response `json:"-"` + // Name - The name of the extension. + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetExtensionProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineScaleSetExtension. +func (vmsse VirtualMachineScaleSetExtension) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmsse.Name != nil { + objectMap["name"] = vmsse.Name + } + if vmsse.VirtualMachineScaleSetExtensionProperties != nil { + objectMap["properties"] = vmsse.VirtualMachineScaleSetExtensionProperties + } + if vmsse.ID != nil { + objectMap["id"] = vmsse.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetExtension struct. +func (vmsse *VirtualMachineScaleSetExtension) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vmsse.Name = &name + } + case "properties": + if v != nil { + var virtualMachineScaleSetExtensionProperties VirtualMachineScaleSetExtensionProperties + err = json.Unmarshal(*v, &virtualMachineScaleSetExtensionProperties) + if err != nil { + return err + } + vmsse.VirtualMachineScaleSetExtensionProperties = &virtualMachineScaleSetExtensionProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vmsse.ID = &ID + } + } + } + + return nil +} + +// VirtualMachineScaleSetExtensionListResult the List VM scale set extension operation response. +type VirtualMachineScaleSetExtensionListResult struct { + autorest.Response `json:"-"` + // Value - The list of VM scale set extensions. + Value *[]VirtualMachineScaleSetExtension `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of VM scale set extensions. Call ListNext() with this to fetch the next page of VM scale set extensions. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetExtensionListResultIterator provides access to a complete listing of +// VirtualMachineScaleSetExtension values. +type VirtualMachineScaleSetExtensionListResultIterator struct { + i int + page VirtualMachineScaleSetExtensionListResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualMachineScaleSetExtensionListResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualMachineScaleSetExtensionListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualMachineScaleSetExtensionListResultIterator) Response() VirtualMachineScaleSetExtensionListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualMachineScaleSetExtensionListResultIterator) Value() VirtualMachineScaleSetExtension { + if !iter.page.NotDone() { + return VirtualMachineScaleSetExtension{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (vmsselr VirtualMachineScaleSetExtensionListResult) IsEmpty() bool { + return vmsselr.Value == nil || len(*vmsselr.Value) == 0 +} + +// virtualMachineScaleSetExtensionListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vmsselr VirtualMachineScaleSetExtensionListResult) virtualMachineScaleSetExtensionListResultPreparer() (*http.Request, error) { + if vmsselr.NextLink == nil || len(to.String(vmsselr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vmsselr.NextLink))) +} + +// VirtualMachineScaleSetExtensionListResultPage contains a page of VirtualMachineScaleSetExtension values. +type VirtualMachineScaleSetExtensionListResultPage struct { + fn func(VirtualMachineScaleSetExtensionListResult) (VirtualMachineScaleSetExtensionListResult, error) + vmsselr VirtualMachineScaleSetExtensionListResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualMachineScaleSetExtensionListResultPage) Next() error { + next, err := page.fn(page.vmsselr) + if err != nil { + return err + } + page.vmsselr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualMachineScaleSetExtensionListResultPage) NotDone() bool { + return !page.vmsselr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualMachineScaleSetExtensionListResultPage) Response() VirtualMachineScaleSetExtensionListResult { + return page.vmsselr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualMachineScaleSetExtensionListResultPage) Values() []VirtualMachineScaleSetExtension { + if page.vmsselr.IsEmpty() { + return nil + } + return *page.vmsselr.Value +} + +// VirtualMachineScaleSetExtensionProfile describes a virtual machine scale set extension profile. +type VirtualMachineScaleSetExtensionProfile struct { + // Extensions - The virtual machine scale set child extension resources. + Extensions *[]VirtualMachineScaleSetExtension `json:"extensions,omitempty"` +} + +// VirtualMachineScaleSetExtensionProperties describes the properties of a Virtual Machine Scale Set Extension. +type VirtualMachineScaleSetExtensionProperties struct { + // ForceUpdateTag - If a value is provided and is different from the previous value, the extension handler will be forced to update even if the extension configuration has not changed. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` + // Publisher - The name of the extension handler publisher. + Publisher *string `json:"publisher,omitempty"` + // Type - Specifies the type of the extension; an example is "CustomScriptExtension". + Type *string `json:"type,omitempty"` + // TypeHandlerVersion - Specifies the version of the script handler. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + // AutoUpgradeMinorVersion - Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` + // Settings - Json formatted public settings for the extension. + Settings interface{} `json:"settings,omitempty"` + // ProtectedSettings - The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. + ProtectedSettings interface{} `json:"protectedSettings,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualMachineScaleSetExtensionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of +// a long-running operation. +type VirtualMachineScaleSetExtensionsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetExtensionsCreateOrUpdateFuture) Result(client VirtualMachineScaleSetExtensionsClient) (vmsse VirtualMachineScaleSetExtension, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmsse.Response.Response, err = future.GetResult(sender); err == nil && vmsse.Response.Response.StatusCode != http.StatusNoContent { + vmsse, err = client.CreateOrUpdateResponder(vmsse.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", vmsse.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualMachineScaleSetExtensionsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetExtensionsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetExtensionsDeleteFuture) Result(client VirtualMachineScaleSetExtensionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetIdentity identity for the virtual machine scale set. +type VirtualMachineScaleSetIdentity struct { + // PrincipalID - The principal id of virtual machine scale set identity. This property will only be provided for a system assigned identity. + PrincipalID *string `json:"principalId,omitempty"` + // TenantID - The tenant id associated with the virtual machine scale set. This property will only be provided for a system assigned identity. + TenantID *string `json:"tenantId,omitempty"` + // Type - The type of identity used for the virtual machine scale set. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the virtual machine scale set. Possible values include: 'ResourceIdentityTypeSystemAssigned', 'ResourceIdentityTypeUserAssigned', 'ResourceIdentityTypeSystemAssignedUserAssigned', 'ResourceIdentityTypeNone' + Type ResourceIdentityType `json:"type,omitempty"` + // UserAssignedIdentities - The list of user identities associated with the virtual machine scale set. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + UserAssignedIdentities map[string]*VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue `json:"userAssignedIdentities"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineScaleSetIdentity. +func (vmssi VirtualMachineScaleSetIdentity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmssi.PrincipalID != nil { + objectMap["principalId"] = vmssi.PrincipalID + } + if vmssi.TenantID != nil { + objectMap["tenantId"] = vmssi.TenantID + } + if vmssi.Type != "" { + objectMap["type"] = vmssi.Type + } + if vmssi.UserAssignedIdentities != nil { + objectMap["userAssignedIdentities"] = vmssi.UserAssignedIdentities + } + return json.Marshal(objectMap) +} + +// VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue ... +type VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue struct { + // PrincipalID - The principal id of user assigned identity. + PrincipalID *string `json:"principalId,omitempty"` + // ClientID - The client id of user assigned identity. + ClientID *string `json:"clientId,omitempty"` +} + +// VirtualMachineScaleSetInstanceView the instance view of a virtual machine scale set. +type VirtualMachineScaleSetInstanceView struct { + autorest.Response `json:"-"` + // VirtualMachine - The instance view status summary for the virtual machine scale set. + VirtualMachine *VirtualMachineScaleSetInstanceViewStatusesSummary `json:"virtualMachine,omitempty"` + // Extensions - The extensions information. + Extensions *[]VirtualMachineScaleSetVMExtensionsSummary `json:"extensions,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineScaleSetInstanceViewStatusesSummary instance view statuses summary for virtual machines of a +// virtual machine scale set. +type VirtualMachineScaleSetInstanceViewStatusesSummary struct { + // StatusesSummary - The extensions information. + StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"` +} + +// VirtualMachineScaleSetIPConfiguration describes a virtual machine scale set network profile's IP configuration. +type VirtualMachineScaleSetIPConfiguration struct { + // Name - The IP configuration name. + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetIPConfigurationProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineScaleSetIPConfiguration. +func (vmssic VirtualMachineScaleSetIPConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmssic.Name != nil { + objectMap["name"] = vmssic.Name + } + if vmssic.VirtualMachineScaleSetIPConfigurationProperties != nil { + objectMap["properties"] = vmssic.VirtualMachineScaleSetIPConfigurationProperties + } + if vmssic.ID != nil { + objectMap["id"] = vmssic.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetIPConfiguration struct. +func (vmssic *VirtualMachineScaleSetIPConfiguration) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vmssic.Name = &name + } + case "properties": + if v != nil { + var virtualMachineScaleSetIPConfigurationProperties VirtualMachineScaleSetIPConfigurationProperties + err = json.Unmarshal(*v, &virtualMachineScaleSetIPConfigurationProperties) + if err != nil { + return err + } + vmssic.VirtualMachineScaleSetIPConfigurationProperties = &virtualMachineScaleSetIPConfigurationProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vmssic.ID = &ID + } + } + } + + return nil +} + +// VirtualMachineScaleSetIPConfigurationProperties describes a virtual machine scale set network profile's IP +// configuration properties. +type VirtualMachineScaleSetIPConfigurationProperties struct { + // Subnet - Specifies the identifier of the subnet. + Subnet *APIEntityReference `json:"subnet,omitempty"` + // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface. + Primary *bool `json:"primary,omitempty"` + // PublicIPAddressConfiguration - The publicIPAddressConfiguration. + PublicIPAddressConfiguration *VirtualMachineScaleSetPublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"` + // PrivateIPAddressVersion - Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPv4', 'IPv6' + PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"` + // ApplicationGatewayBackendAddressPools - Specifies an array of references to backend address pools of application gateways. A scale set can reference backend address pools of multiple application gateways. Multiple scale sets cannot use the same application gateway. + ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` + // ApplicationSecurityGroups - Specifies an array of references to application security group. + ApplicationSecurityGroups *[]SubResource `json:"applicationSecurityGroups,omitempty"` + // LoadBalancerBackendAddressPools - Specifies an array of references to backend address pools of load balancers. A scale set can reference backend address pools of one public and one internal load balancer. Multiple scale sets cannot use the same load balancer. + LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` + // LoadBalancerInboundNatPools - Specifies an array of references to inbound Nat pools of the load balancers. A scale set can reference inbound nat pools of one public and one internal load balancer. Multiple scale sets cannot use the same load balancer + LoadBalancerInboundNatPools *[]SubResource `json:"loadBalancerInboundNatPools,omitempty"` +} + +// VirtualMachineScaleSetIPTag contains the IP tag associated with the public IP address. +type VirtualMachineScaleSetIPTag struct { + // IPTagType - IP tag type. Example: FirstPartyUsage. + IPTagType *string `json:"ipTagType,omitempty"` + // Tag - IP tag associated with the public IP. Example: SQL, Storage etc. + Tag *string `json:"tag,omitempty"` +} + +// VirtualMachineScaleSetListOSUpgradeHistory list of Virtual Machine Scale Set OS Upgrade History operation +// response. +type VirtualMachineScaleSetListOSUpgradeHistory struct { + autorest.Response `json:"-"` + // Value - The list of OS upgrades performed on the virtual machine scale set. + Value *[]UpgradeOperationHistoricalStatusInfo `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of OS Upgrade History. Call ListNext() with this to fetch the next page of history of upgrades. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListOSUpgradeHistoryIterator provides access to a complete listing of +// UpgradeOperationHistoricalStatusInfo values. +type VirtualMachineScaleSetListOSUpgradeHistoryIterator struct { + i int + page VirtualMachineScaleSetListOSUpgradeHistoryPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualMachineScaleSetListOSUpgradeHistoryIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualMachineScaleSetListOSUpgradeHistoryIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualMachineScaleSetListOSUpgradeHistoryIterator) Response() VirtualMachineScaleSetListOSUpgradeHistory { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualMachineScaleSetListOSUpgradeHistoryIterator) Value() UpgradeOperationHistoricalStatusInfo { + if !iter.page.NotDone() { + return UpgradeOperationHistoricalStatusInfo{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (vmsslouh VirtualMachineScaleSetListOSUpgradeHistory) IsEmpty() bool { + return vmsslouh.Value == nil || len(*vmsslouh.Value) == 0 +} + +// virtualMachineScaleSetListOSUpgradeHistoryPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vmsslouh VirtualMachineScaleSetListOSUpgradeHistory) virtualMachineScaleSetListOSUpgradeHistoryPreparer() (*http.Request, error) { + if vmsslouh.NextLink == nil || len(to.String(vmsslouh.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vmsslouh.NextLink))) +} + +// VirtualMachineScaleSetListOSUpgradeHistoryPage contains a page of UpgradeOperationHistoricalStatusInfo values. +type VirtualMachineScaleSetListOSUpgradeHistoryPage struct { + fn func(VirtualMachineScaleSetListOSUpgradeHistory) (VirtualMachineScaleSetListOSUpgradeHistory, error) + vmsslouh VirtualMachineScaleSetListOSUpgradeHistory +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualMachineScaleSetListOSUpgradeHistoryPage) Next() error { + next, err := page.fn(page.vmsslouh) + if err != nil { + return err + } + page.vmsslouh = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualMachineScaleSetListOSUpgradeHistoryPage) NotDone() bool { + return !page.vmsslouh.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualMachineScaleSetListOSUpgradeHistoryPage) Response() VirtualMachineScaleSetListOSUpgradeHistory { + return page.vmsslouh +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualMachineScaleSetListOSUpgradeHistoryPage) Values() []UpgradeOperationHistoricalStatusInfo { + if page.vmsslouh.IsEmpty() { + return nil + } + return *page.vmsslouh.Value +} + +// VirtualMachineScaleSetListResult the List Virtual Machine operation response. +type VirtualMachineScaleSetListResult struct { + autorest.Response `json:"-"` + // Value - The list of virtual machine scale sets. + Value *[]VirtualMachineScaleSet `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Virtual Machine Scale Sets. Call ListNext() with this to fetch the next page of VMSS. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListResultIterator provides access to a complete listing of VirtualMachineScaleSet values. +type VirtualMachineScaleSetListResultIterator struct { + i int + page VirtualMachineScaleSetListResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualMachineScaleSetListResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualMachineScaleSetListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualMachineScaleSetListResultIterator) Response() VirtualMachineScaleSetListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualMachineScaleSetListResultIterator) Value() VirtualMachineScaleSet { + if !iter.page.NotDone() { + return VirtualMachineScaleSet{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (vmsslr VirtualMachineScaleSetListResult) IsEmpty() bool { + return vmsslr.Value == nil || len(*vmsslr.Value) == 0 +} + +// virtualMachineScaleSetListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vmsslr VirtualMachineScaleSetListResult) virtualMachineScaleSetListResultPreparer() (*http.Request, error) { + if vmsslr.NextLink == nil || len(to.String(vmsslr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vmsslr.NextLink))) +} + +// VirtualMachineScaleSetListResultPage contains a page of VirtualMachineScaleSet values. +type VirtualMachineScaleSetListResultPage struct { + fn func(VirtualMachineScaleSetListResult) (VirtualMachineScaleSetListResult, error) + vmsslr VirtualMachineScaleSetListResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualMachineScaleSetListResultPage) Next() error { + next, err := page.fn(page.vmsslr) + if err != nil { + return err + } + page.vmsslr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualMachineScaleSetListResultPage) NotDone() bool { + return !page.vmsslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualMachineScaleSetListResultPage) Response() VirtualMachineScaleSetListResult { + return page.vmsslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualMachineScaleSetListResultPage) Values() []VirtualMachineScaleSet { + if page.vmsslr.IsEmpty() { + return nil + } + return *page.vmsslr.Value +} + +// VirtualMachineScaleSetListSkusResult the Virtual Machine Scale Set List Skus operation response. +type VirtualMachineScaleSetListSkusResult struct { + autorest.Response `json:"-"` + // Value - The list of skus available for the virtual machine scale set. + Value *[]VirtualMachineScaleSetSku `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Virtual Machine Scale Set Skus. Call ListNext() with this to fetch the next page of VMSS Skus. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListSkusResultIterator provides access to a complete listing of VirtualMachineScaleSetSku +// values. +type VirtualMachineScaleSetListSkusResultIterator struct { + i int + page VirtualMachineScaleSetListSkusResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualMachineScaleSetListSkusResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualMachineScaleSetListSkusResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualMachineScaleSetListSkusResultIterator) Response() VirtualMachineScaleSetListSkusResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualMachineScaleSetListSkusResultIterator) Value() VirtualMachineScaleSetSku { + if !iter.page.NotDone() { + return VirtualMachineScaleSetSku{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (vmsslsr VirtualMachineScaleSetListSkusResult) IsEmpty() bool { + return vmsslsr.Value == nil || len(*vmsslsr.Value) == 0 +} + +// virtualMachineScaleSetListSkusResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vmsslsr VirtualMachineScaleSetListSkusResult) virtualMachineScaleSetListSkusResultPreparer() (*http.Request, error) { + if vmsslsr.NextLink == nil || len(to.String(vmsslsr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vmsslsr.NextLink))) +} + +// VirtualMachineScaleSetListSkusResultPage contains a page of VirtualMachineScaleSetSku values. +type VirtualMachineScaleSetListSkusResultPage struct { + fn func(VirtualMachineScaleSetListSkusResult) (VirtualMachineScaleSetListSkusResult, error) + vmsslsr VirtualMachineScaleSetListSkusResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualMachineScaleSetListSkusResultPage) Next() error { + next, err := page.fn(page.vmsslsr) + if err != nil { + return err + } + page.vmsslsr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualMachineScaleSetListSkusResultPage) NotDone() bool { + return !page.vmsslsr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualMachineScaleSetListSkusResultPage) Response() VirtualMachineScaleSetListSkusResult { + return page.vmsslsr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualMachineScaleSetListSkusResultPage) Values() []VirtualMachineScaleSetSku { + if page.vmsslsr.IsEmpty() { + return nil + } + return *page.vmsslsr.Value +} + +// VirtualMachineScaleSetListWithLinkResult the List Virtual Machine operation response. +type VirtualMachineScaleSetListWithLinkResult struct { + autorest.Response `json:"-"` + // Value - The list of virtual machine scale sets. + Value *[]VirtualMachineScaleSet `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Virtual Machine Scale Sets. Call ListNext() with this to fetch the next page of Virtual Machine Scale Sets. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListWithLinkResultIterator provides access to a complete listing of VirtualMachineScaleSet +// values. +type VirtualMachineScaleSetListWithLinkResultIterator struct { + i int + page VirtualMachineScaleSetListWithLinkResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualMachineScaleSetListWithLinkResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualMachineScaleSetListWithLinkResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualMachineScaleSetListWithLinkResultIterator) Response() VirtualMachineScaleSetListWithLinkResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualMachineScaleSetListWithLinkResultIterator) Value() VirtualMachineScaleSet { + if !iter.page.NotDone() { + return VirtualMachineScaleSet{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (vmsslwlr VirtualMachineScaleSetListWithLinkResult) IsEmpty() bool { + return vmsslwlr.Value == nil || len(*vmsslwlr.Value) == 0 +} + +// virtualMachineScaleSetListWithLinkResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vmsslwlr VirtualMachineScaleSetListWithLinkResult) virtualMachineScaleSetListWithLinkResultPreparer() (*http.Request, error) { + if vmsslwlr.NextLink == nil || len(to.String(vmsslwlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vmsslwlr.NextLink))) +} + +// VirtualMachineScaleSetListWithLinkResultPage contains a page of VirtualMachineScaleSet values. +type VirtualMachineScaleSetListWithLinkResultPage struct { + fn func(VirtualMachineScaleSetListWithLinkResult) (VirtualMachineScaleSetListWithLinkResult, error) + vmsslwlr VirtualMachineScaleSetListWithLinkResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualMachineScaleSetListWithLinkResultPage) Next() error { + next, err := page.fn(page.vmsslwlr) + if err != nil { + return err + } + page.vmsslwlr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualMachineScaleSetListWithLinkResultPage) NotDone() bool { + return !page.vmsslwlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualMachineScaleSetListWithLinkResultPage) Response() VirtualMachineScaleSetListWithLinkResult { + return page.vmsslwlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualMachineScaleSetListWithLinkResultPage) Values() []VirtualMachineScaleSet { + if page.vmsslwlr.IsEmpty() { + return nil + } + return *page.vmsslwlr.Value +} + +// VirtualMachineScaleSetManagedDiskParameters describes the parameters of a ScaleSet managed disk. +type VirtualMachineScaleSetManagedDiskParameters struct { + // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS' + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` +} + +// VirtualMachineScaleSetNetworkConfiguration describes a virtual machine scale set network profile's network +// configurations. +type VirtualMachineScaleSetNetworkConfiguration struct { + // Name - The network configuration name. + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetNetworkConfigurationProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineScaleSetNetworkConfiguration. +func (vmssnc VirtualMachineScaleSetNetworkConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmssnc.Name != nil { + objectMap["name"] = vmssnc.Name + } + if vmssnc.VirtualMachineScaleSetNetworkConfigurationProperties != nil { + objectMap["properties"] = vmssnc.VirtualMachineScaleSetNetworkConfigurationProperties + } + if vmssnc.ID != nil { + objectMap["id"] = vmssnc.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetNetworkConfiguration struct. +func (vmssnc *VirtualMachineScaleSetNetworkConfiguration) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vmssnc.Name = &name + } + case "properties": + if v != nil { + var virtualMachineScaleSetNetworkConfigurationProperties VirtualMachineScaleSetNetworkConfigurationProperties + err = json.Unmarshal(*v, &virtualMachineScaleSetNetworkConfigurationProperties) + if err != nil { + return err + } + vmssnc.VirtualMachineScaleSetNetworkConfigurationProperties = &virtualMachineScaleSetNetworkConfigurationProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vmssnc.ID = &ID + } + } + } + + return nil +} + +// VirtualMachineScaleSetNetworkConfigurationDNSSettings describes a virtual machines scale sets network +// configuration's DNS settings. +type VirtualMachineScaleSetNetworkConfigurationDNSSettings struct { + // DNSServers - List of DNS servers IP addresses + DNSServers *[]string `json:"dnsServers,omitempty"` +} + +// VirtualMachineScaleSetNetworkConfigurationProperties describes a virtual machine scale set network profile's IP +// configuration. +type VirtualMachineScaleSetNetworkConfigurationProperties struct { + // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface. + Primary *bool `json:"primary,omitempty"` + // EnableAcceleratedNetworking - Specifies whether the network interface is accelerated networking-enabled. + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"` + // NetworkSecurityGroup - The network security group. + NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` + // DNSSettings - The dns settings to be applied on the network interfaces. + DNSSettings *VirtualMachineScaleSetNetworkConfigurationDNSSettings `json:"dnsSettings,omitempty"` + // IPConfigurations - Specifies the IP configurations of the network interface. + IPConfigurations *[]VirtualMachineScaleSetIPConfiguration `json:"ipConfigurations,omitempty"` + // EnableIPForwarding - Whether IP forwarding enabled on this NIC. + EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"` +} + +// VirtualMachineScaleSetNetworkProfile describes a virtual machine scale set network profile. +type VirtualMachineScaleSetNetworkProfile struct { + // HealthProbe - A reference to a load balancer probe used to determine the health of an instance in the virtual machine scale set. The reference will be in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'. + HealthProbe *APIEntityReference `json:"healthProbe,omitempty"` + // NetworkInterfaceConfigurations - The list of network configurations. + NetworkInterfaceConfigurations *[]VirtualMachineScaleSetNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` +} + +// VirtualMachineScaleSetOSDisk describes a virtual machine scale set operating system disk. +type VirtualMachineScaleSetOSDisk struct { + // Name - The disk name. + Name *string `json:"name,omitempty"` + // Caching - Specifies the caching requirements.

    Possible values are:

    **None**

    **ReadOnly**

    **ReadWrite**

    Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` + // CreateOption - Specifies how the virtual machines in the scale set should be created.

    The only allowed value is: **FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + // DiffDiskSettings - Specifies the differencing Disk Settings for the operating system disk used by the virtual machine scale set. + DiffDiskSettings *DiffDiskSettings `json:"diffDiskSettings,omitempty"` + // DiskSizeGB - Specifies the size of the operating system disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

    This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD.

    Possible values are:

    **Windows**

    **Linux**. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // Image - Specifies information about the unmanaged user image to base the scale set on. + Image *VirtualHardDisk `json:"image,omitempty"` + // VhdContainers - Specifies the container urls that are used to store operating system disks for the scale set. + VhdContainers *[]string `json:"vhdContainers,omitempty"` + // ManagedDisk - The managed disk parameters. + ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` +} + +// VirtualMachineScaleSetOSProfile describes a virtual machine scale set OS profile. +type VirtualMachineScaleSetOSProfile struct { + // ComputerNamePrefix - Specifies the computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long. + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty"` + // AdminUsername - Specifies the name of the administrator account.

    **Windows-only restriction:** Cannot end in "."

    **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".

    **Minimum-length (Linux):** 1 character

    **Max-length (Linux):** 64 characters

    **Max-length (Windows):** 20 characters

  • For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
  • For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) + AdminUsername *string `json:"adminUsername,omitempty"` + // AdminPassword - Specifies the password of the administrator account.

    **Minimum-length (Windows):** 8 characters

    **Minimum-length (Linux):** 6 characters

    **Max-length (Windows):** 123 characters

    **Max-length (Linux):** 72 characters

    **Complexity requirements:** 3 out of 4 conditions below need to be fulfilled
    Has lower characters
    Has upper characters
    Has a digit
    Has a special character (Regex match [\W_])

    **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!"

    For resetting the password, see [How to reset the Remote Desktop service or its login password in a Windows VM](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-reset-rdp?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    For resetting root password, see [Manage users, SSH, and check or repair disks on Azure Linux VMs using the VMAccess Extension](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-vmaccess-extension?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#reset-root-password) + AdminPassword *string `json:"adminPassword,omitempty"` + // CustomData - Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes.

    For using cloud-init for your VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) + CustomData *string `json:"customData,omitempty"` + // WindowsConfiguration - Specifies Windows operating system settings on the virtual machine. + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` + // LinuxConfiguration - Specifies the Linux operating system settings on the virtual machine.

    For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)

    For running non-endorsed distributions, see [Information for Non-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + // Secrets - Specifies set of certificates that should be installed onto the virtual machines in the scale set. + Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` +} + +// VirtualMachineScaleSetProperties describes the properties of a Virtual Machine Scale Set. +type VirtualMachineScaleSetProperties struct { + // UpgradePolicy - The upgrade policy. + UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` + // VirtualMachineProfile - The virtual machine profile. + VirtualMachineProfile *VirtualMachineScaleSetVMProfile `json:"virtualMachineProfile,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // Overprovision - Specifies whether the Virtual Machine Scale Set should be overprovisioned. + Overprovision *bool `json:"overprovision,omitempty"` + // UniqueID - Specifies the ID which uniquely identifies a Virtual Machine Scale Set. + UniqueID *string `json:"uniqueId,omitempty"` + // SinglePlacementGroup - When true this limits the scale set to a single placement group, of max size 100 virtual machines. + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` + // ZoneBalance - Whether to force stictly even Virtual Machine distribution cross x-zones in case there is zone outage. + ZoneBalance *bool `json:"zoneBalance,omitempty"` + // PlatformFaultDomainCount - Fault Domain count for each placement group. + PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` +} + +// VirtualMachineScaleSetPublicIPAddressConfiguration describes a virtual machines scale set IP Configuration's +// PublicIPAddress configuration +type VirtualMachineScaleSetPublicIPAddressConfiguration struct { + // Name - The publicIP address configuration name. + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetPublicIPAddressConfigurationProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineScaleSetPublicIPAddressConfiguration. +func (vmsspiac VirtualMachineScaleSetPublicIPAddressConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmsspiac.Name != nil { + objectMap["name"] = vmsspiac.Name + } + if vmsspiac.VirtualMachineScaleSetPublicIPAddressConfigurationProperties != nil { + objectMap["properties"] = vmsspiac.VirtualMachineScaleSetPublicIPAddressConfigurationProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetPublicIPAddressConfiguration struct. +func (vmsspiac *VirtualMachineScaleSetPublicIPAddressConfiguration) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vmsspiac.Name = &name + } + case "properties": + if v != nil { + var virtualMachineScaleSetPublicIPAddressConfigurationProperties VirtualMachineScaleSetPublicIPAddressConfigurationProperties + err = json.Unmarshal(*v, &virtualMachineScaleSetPublicIPAddressConfigurationProperties) + if err != nil { + return err + } + vmsspiac.VirtualMachineScaleSetPublicIPAddressConfigurationProperties = &virtualMachineScaleSetPublicIPAddressConfigurationProperties + } + } + } + + return nil +} + +// VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings describes a virtual machines scale sets network +// configuration's DNS settings. +type VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings struct { + // DomainNameLabel - The Domain name label.The concatenation of the domain name label and vm index will be the domain name labels of the PublicIPAddress resources that will be created + DomainNameLabel *string `json:"domainNameLabel,omitempty"` +} + +// VirtualMachineScaleSetPublicIPAddressConfigurationProperties describes a virtual machines scale set IP +// Configuration's PublicIPAddress configuration +type VirtualMachineScaleSetPublicIPAddressConfigurationProperties struct { + // IdleTimeoutInMinutes - The idle timeout of the public IP address. + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + // DNSSettings - The dns settings to be applied on the publicIP addresses . + DNSSettings *VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings `json:"dnsSettings,omitempty"` + // IPTags - The list of IP tags associated with the public IP address. + IPTags *[]VirtualMachineScaleSetIPTag `json:"ipTags,omitempty"` + // PublicIPPrefix - The PublicIPPrefix from which to allocate publicIP addresses. + PublicIPPrefix *SubResource `json:"publicIPPrefix,omitempty"` +} + +// VirtualMachineScaleSetRollingUpgradesCancelFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetRollingUpgradesCancelFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetRollingUpgradesCancelFuture) Result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesCancelFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesCancelFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture an abstraction for monitoring and retrieving +// the results of a long-running operation. +type VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture) Result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture an abstraction for monitoring and retrieving the +// results of a long-running operation. +type VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture) Result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetsCreateOrUpdateFuture) Result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmss.Response.Response, err = future.GetResult(sender); err == nil && vmss.Response.Response.StatusCode != http.StatusNoContent { + vmss, err = client.CreateOrUpdateResponder(vmss.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", vmss.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualMachineScaleSetsDeallocateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetsDeallocateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetsDeallocateFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeallocateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeallocateFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetsDeleteFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetsDeleteInstancesFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetsDeleteInstancesFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetsDeleteInstancesFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsDeleteInstancesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsDeleteInstancesFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetSku describes an available virtual machine scale set sku. +type VirtualMachineScaleSetSku struct { + // ResourceType - The type of resource the sku applies to. + ResourceType *string `json:"resourceType,omitempty"` + // Sku - The Sku. + Sku *Sku `json:"sku,omitempty"` + // Capacity - Specifies the number of virtual machines in the scale set. + Capacity *VirtualMachineScaleSetSkuCapacity `json:"capacity,omitempty"` +} + +// VirtualMachineScaleSetSkuCapacity describes scaling information of a sku. +type VirtualMachineScaleSetSkuCapacity struct { + // Minimum - The minimum capacity. + Minimum *int64 `json:"minimum,omitempty"` + // Maximum - The maximum capacity that can be set. + Maximum *int64 `json:"maximum,omitempty"` + // DefaultCapacity - The default capacity. + DefaultCapacity *int64 `json:"defaultCapacity,omitempty"` + // ScaleType - The scale type applicable to the sku. Possible values include: 'VirtualMachineScaleSetSkuScaleTypeAutomatic', 'VirtualMachineScaleSetSkuScaleTypeNone' + ScaleType VirtualMachineScaleSetSkuScaleType `json:"scaleType,omitempty"` +} + +// VirtualMachineScaleSetsPerformMaintenanceFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetsPerformMaintenanceFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetsPerformMaintenanceFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPerformMaintenanceFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetsPowerOffFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetsPowerOffFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetsPowerOffFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsPowerOffFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsPowerOffFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetsRedeployFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetsRedeployFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetsRedeployFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsRedeployFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRedeployFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetsReimageAllFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetsReimageAllFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetsReimageAllFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsReimageAllFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageAllFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetsReimageFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetsReimageFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetsReimageFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsReimageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsReimageFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetsRestartFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetsRestartFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetsRestartFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsRestartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsRestartFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetsStartFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetsStartFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetsStartFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsStartFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetStorageProfile describes a virtual machine scale set storage profile. +type VirtualMachineScaleSetStorageProfile struct { + // ImageReference - Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations. + ImageReference *ImageReference `json:"imageReference,omitempty"` + // OsDisk - Specifies information about the operating system disk used by the virtual machines in the scale set.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + OsDisk *VirtualMachineScaleSetOSDisk `json:"osDisk,omitempty"` + // DataDisks - Specifies the parameters that are used to add data disks to the virtual machines in the scale set.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + DataDisks *[]VirtualMachineScaleSetDataDisk `json:"dataDisks,omitempty"` +} + +// VirtualMachineScaleSetsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetsUpdateFuture) Result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmss.Response.Response, err = future.GetResult(sender); err == nil && vmss.Response.Response.StatusCode != http.StatusNoContent { + vmss, err = client.UpdateResponder(vmss.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateFuture", "Result", vmss.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualMachineScaleSetsUpdateInstancesFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetsUpdateInstancesFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetsUpdateInstancesFuture) Result(client VirtualMachineScaleSetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsUpdateInstancesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetsUpdateInstancesFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetUpdate describes a Virtual Machine Scale Set. +type VirtualMachineScaleSetUpdate struct { + // Sku - The virtual machine scale set sku. + Sku *Sku `json:"sku,omitempty"` + // Plan - The purchase plan when deploying a virtual machine scale set from VM Marketplace images. + Plan *Plan `json:"plan,omitempty"` + *VirtualMachineScaleSetUpdateProperties `json:"properties,omitempty"` + // Identity - The identity of the virtual machine scale set, if configured. + Identity *VirtualMachineScaleSetIdentity `json:"identity,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineScaleSetUpdate. +func (vmssu VirtualMachineScaleSetUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmssu.Sku != nil { + objectMap["sku"] = vmssu.Sku + } + if vmssu.Plan != nil { + objectMap["plan"] = vmssu.Plan + } + if vmssu.VirtualMachineScaleSetUpdateProperties != nil { + objectMap["properties"] = vmssu.VirtualMachineScaleSetUpdateProperties + } + if vmssu.Identity != nil { + objectMap["identity"] = vmssu.Identity + } + if vmssu.Tags != nil { + objectMap["tags"] = vmssu.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetUpdate struct. +func (vmssu *VirtualMachineScaleSetUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + vmssu.Sku = &sku + } + case "plan": + if v != nil { + var plan Plan + err = json.Unmarshal(*v, &plan) + if err != nil { + return err + } + vmssu.Plan = &plan + } + case "properties": + if v != nil { + var virtualMachineScaleSetUpdateProperties VirtualMachineScaleSetUpdateProperties + err = json.Unmarshal(*v, &virtualMachineScaleSetUpdateProperties) + if err != nil { + return err + } + vmssu.VirtualMachineScaleSetUpdateProperties = &virtualMachineScaleSetUpdateProperties + } + case "identity": + if v != nil { + var identity VirtualMachineScaleSetIdentity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + vmssu.Identity = &identity + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + vmssu.Tags = tags + } + } + } + + return nil +} + +// VirtualMachineScaleSetUpdateIPConfiguration describes a virtual machine scale set network profile's IP +// configuration. +type VirtualMachineScaleSetUpdateIPConfiguration struct { + // Name - The IP configuration name. + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetUpdateIPConfigurationProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineScaleSetUpdateIPConfiguration. +func (vmssuic VirtualMachineScaleSetUpdateIPConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmssuic.Name != nil { + objectMap["name"] = vmssuic.Name + } + if vmssuic.VirtualMachineScaleSetUpdateIPConfigurationProperties != nil { + objectMap["properties"] = vmssuic.VirtualMachineScaleSetUpdateIPConfigurationProperties + } + if vmssuic.ID != nil { + objectMap["id"] = vmssuic.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetUpdateIPConfiguration struct. +func (vmssuic *VirtualMachineScaleSetUpdateIPConfiguration) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vmssuic.Name = &name + } + case "properties": + if v != nil { + var virtualMachineScaleSetUpdateIPConfigurationProperties VirtualMachineScaleSetUpdateIPConfigurationProperties + err = json.Unmarshal(*v, &virtualMachineScaleSetUpdateIPConfigurationProperties) + if err != nil { + return err + } + vmssuic.VirtualMachineScaleSetUpdateIPConfigurationProperties = &virtualMachineScaleSetUpdateIPConfigurationProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vmssuic.ID = &ID + } + } + } + + return nil +} + +// VirtualMachineScaleSetUpdateIPConfigurationProperties describes a virtual machine scale set network profile's IP +// configuration properties. +type VirtualMachineScaleSetUpdateIPConfigurationProperties struct { + // Subnet - The subnet. + Subnet *APIEntityReference `json:"subnet,omitempty"` + // Primary - Specifies the primary IP Configuration in case the network interface has more than one IP Configuration. + Primary *bool `json:"primary,omitempty"` + // PublicIPAddressConfiguration - The publicIPAddressConfiguration. + PublicIPAddressConfiguration *VirtualMachineScaleSetUpdatePublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"` + // PrivateIPAddressVersion - Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPv4', 'IPv6' + PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"` + // ApplicationGatewayBackendAddressPools - The application gateway backend address pools. + ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` + // ApplicationSecurityGroups - Specifies an array of references to application security group. + ApplicationSecurityGroups *[]SubResource `json:"applicationSecurityGroups,omitempty"` + // LoadBalancerBackendAddressPools - The load balancer backend address pools. + LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` + // LoadBalancerInboundNatPools - The load balancer inbound nat pools. + LoadBalancerInboundNatPools *[]SubResource `json:"loadBalancerInboundNatPools,omitempty"` +} + +// VirtualMachineScaleSetUpdateNetworkConfiguration describes a virtual machine scale set network profile's network +// configurations. +type VirtualMachineScaleSetUpdateNetworkConfiguration struct { + // Name - The network configuration name. + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetUpdateNetworkConfigurationProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineScaleSetUpdateNetworkConfiguration. +func (vmssunc VirtualMachineScaleSetUpdateNetworkConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmssunc.Name != nil { + objectMap["name"] = vmssunc.Name + } + if vmssunc.VirtualMachineScaleSetUpdateNetworkConfigurationProperties != nil { + objectMap["properties"] = vmssunc.VirtualMachineScaleSetUpdateNetworkConfigurationProperties + } + if vmssunc.ID != nil { + objectMap["id"] = vmssunc.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetUpdateNetworkConfiguration struct. +func (vmssunc *VirtualMachineScaleSetUpdateNetworkConfiguration) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vmssunc.Name = &name + } + case "properties": + if v != nil { + var virtualMachineScaleSetUpdateNetworkConfigurationProperties VirtualMachineScaleSetUpdateNetworkConfigurationProperties + err = json.Unmarshal(*v, &virtualMachineScaleSetUpdateNetworkConfigurationProperties) + if err != nil { + return err + } + vmssunc.VirtualMachineScaleSetUpdateNetworkConfigurationProperties = &virtualMachineScaleSetUpdateNetworkConfigurationProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vmssunc.ID = &ID + } + } + } + + return nil +} + +// VirtualMachineScaleSetUpdateNetworkConfigurationProperties describes a virtual machine scale set updatable +// network profile's IP configuration.Use this object for updating network profile's IP Configuration. +type VirtualMachineScaleSetUpdateNetworkConfigurationProperties struct { + // Primary - Whether this is a primary NIC on a virtual machine. + Primary *bool `json:"primary,omitempty"` + // EnableAcceleratedNetworking - Specifies whether the network interface is accelerated networking-enabled. + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"` + // NetworkSecurityGroup - The network security group. + NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` + // DNSSettings - The dns settings to be applied on the network interfaces. + DNSSettings *VirtualMachineScaleSetNetworkConfigurationDNSSettings `json:"dnsSettings,omitempty"` + // IPConfigurations - The virtual machine scale set IP Configuration. + IPConfigurations *[]VirtualMachineScaleSetUpdateIPConfiguration `json:"ipConfigurations,omitempty"` + // EnableIPForwarding - Whether IP forwarding enabled on this NIC. + EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"` +} + +// VirtualMachineScaleSetUpdateNetworkProfile describes a virtual machine scale set network profile. +type VirtualMachineScaleSetUpdateNetworkProfile struct { + // NetworkInterfaceConfigurations - The list of network configurations. + NetworkInterfaceConfigurations *[]VirtualMachineScaleSetUpdateNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` +} + +// VirtualMachineScaleSetUpdateOSDisk describes virtual machine scale set operating system disk Update Object. This +// should be used for Updating VMSS OS Disk. +type VirtualMachineScaleSetUpdateOSDisk struct { + // Caching - The caching type. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` + // DiskSizeGB - Specifies the size of the operating system disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

    This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // Image - The Source User Image VirtualHardDisk. This VirtualHardDisk will be copied before using it to attach to the Virtual Machine. If SourceImage is provided, the destination VirtualHardDisk should not exist. + Image *VirtualHardDisk `json:"image,omitempty"` + // VhdContainers - The list of virtual hard disk container uris. + VhdContainers *[]string `json:"vhdContainers,omitempty"` + // ManagedDisk - The managed disk parameters. + ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` +} + +// VirtualMachineScaleSetUpdateOSProfile describes a virtual machine scale set OS profile. +type VirtualMachineScaleSetUpdateOSProfile struct { + // CustomData - A base-64 encoded string of custom data. + CustomData *string `json:"customData,omitempty"` + // WindowsConfiguration - The Windows Configuration of the OS profile. + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` + // LinuxConfiguration - The Linux Configuration of the OS profile. + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + // Secrets - The List of certificates for addition to the VM. + Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` +} + +// VirtualMachineScaleSetUpdateProperties describes the properties of a Virtual Machine Scale Set. +type VirtualMachineScaleSetUpdateProperties struct { + // UpgradePolicy - The upgrade policy. + UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` + // VirtualMachineProfile - The virtual machine profile. + VirtualMachineProfile *VirtualMachineScaleSetUpdateVMProfile `json:"virtualMachineProfile,omitempty"` + // Overprovision - Specifies whether the Virtual Machine Scale Set should be overprovisioned. + Overprovision *bool `json:"overprovision,omitempty"` + // SinglePlacementGroup - When true this limits the scale set to a single placement group, of max size 100 virtual machines. + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` +} + +// VirtualMachineScaleSetUpdatePublicIPAddressConfiguration describes a virtual machines scale set IP +// Configuration's PublicIPAddress configuration +type VirtualMachineScaleSetUpdatePublicIPAddressConfiguration struct { + // Name - The publicIP address configuration name. + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineScaleSetUpdatePublicIPAddressConfiguration. +func (vmssupiac VirtualMachineScaleSetUpdatePublicIPAddressConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmssupiac.Name != nil { + objectMap["name"] = vmssupiac.Name + } + if vmssupiac.VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties != nil { + objectMap["properties"] = vmssupiac.VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetUpdatePublicIPAddressConfiguration struct. +func (vmssupiac *VirtualMachineScaleSetUpdatePublicIPAddressConfiguration) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vmssupiac.Name = &name + } + case "properties": + if v != nil { + var virtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties + err = json.Unmarshal(*v, &virtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties) + if err != nil { + return err + } + vmssupiac.VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties = &virtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties + } + } + } + + return nil +} + +// VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties describes a virtual machines scale set IP +// Configuration's PublicIPAddress configuration +type VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties struct { + // IdleTimeoutInMinutes - The idle timeout of the public IP address. + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + // DNSSettings - The dns settings to be applied on the publicIP addresses . + DNSSettings *VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings `json:"dnsSettings,omitempty"` +} + +// VirtualMachineScaleSetUpdateStorageProfile describes a virtual machine scale set storage profile. +type VirtualMachineScaleSetUpdateStorageProfile struct { + // ImageReference - The image reference. + ImageReference *ImageReference `json:"imageReference,omitempty"` + // OsDisk - The OS disk. + OsDisk *VirtualMachineScaleSetUpdateOSDisk `json:"osDisk,omitempty"` + // DataDisks - The data disks. + DataDisks *[]VirtualMachineScaleSetDataDisk `json:"dataDisks,omitempty"` +} + +// VirtualMachineScaleSetUpdateVMProfile describes a virtual machine scale set virtual machine profile. +type VirtualMachineScaleSetUpdateVMProfile struct { + // OsProfile - The virtual machine scale set OS profile. + OsProfile *VirtualMachineScaleSetUpdateOSProfile `json:"osProfile,omitempty"` + // StorageProfile - The virtual machine scale set storage profile. + StorageProfile *VirtualMachineScaleSetUpdateStorageProfile `json:"storageProfile,omitempty"` + // NetworkProfile - The virtual machine scale set network profile. + NetworkProfile *VirtualMachineScaleSetUpdateNetworkProfile `json:"networkProfile,omitempty"` + // DiagnosticsProfile - The virtual machine scale set diagnostics profile. + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + // ExtensionProfile - The virtual machine scale set extension profile. + ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` + // LicenseType - The license type, which is for bring your own license scenario. + LicenseType *string `json:"licenseType,omitempty"` +} + +// VirtualMachineScaleSetVM describes a virtual machine scale set virtual machine. +type VirtualMachineScaleSetVM struct { + autorest.Response `json:"-"` + // InstanceID - The virtual machine instance ID. + InstanceID *string `json:"instanceId,omitempty"` + // Sku - The virtual machine SKU. + Sku *Sku `json:"sku,omitempty"` + *VirtualMachineScaleSetVMProperties `json:"properties,omitempty"` + // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. + Plan *Plan `json:"plan,omitempty"` + // Resources - The virtual machine child extension resources. + Resources *[]VirtualMachineExtension `json:"resources,omitempty"` + // Zones - The virtual machine zones. + Zones *[]string `json:"zones,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineScaleSetVM. +func (vmssv VirtualMachineScaleSetVM) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmssv.InstanceID != nil { + objectMap["instanceId"] = vmssv.InstanceID + } + if vmssv.Sku != nil { + objectMap["sku"] = vmssv.Sku + } + if vmssv.VirtualMachineScaleSetVMProperties != nil { + objectMap["properties"] = vmssv.VirtualMachineScaleSetVMProperties + } + if vmssv.Plan != nil { + objectMap["plan"] = vmssv.Plan + } + if vmssv.Resources != nil { + objectMap["resources"] = vmssv.Resources + } + if vmssv.Zones != nil { + objectMap["zones"] = vmssv.Zones + } + if vmssv.ID != nil { + objectMap["id"] = vmssv.ID + } + if vmssv.Name != nil { + objectMap["name"] = vmssv.Name + } + if vmssv.Type != nil { + objectMap["type"] = vmssv.Type + } + if vmssv.Location != nil { + objectMap["location"] = vmssv.Location + } + if vmssv.Tags != nil { + objectMap["tags"] = vmssv.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetVM struct. +func (vmssv *VirtualMachineScaleSetVM) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "instanceId": + if v != nil { + var instanceID string + err = json.Unmarshal(*v, &instanceID) + if err != nil { + return err + } + vmssv.InstanceID = &instanceID + } + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + vmssv.Sku = &sku + } + case "properties": + if v != nil { + var virtualMachineScaleSetVMProperties VirtualMachineScaleSetVMProperties + err = json.Unmarshal(*v, &virtualMachineScaleSetVMProperties) + if err != nil { + return err + } + vmssv.VirtualMachineScaleSetVMProperties = &virtualMachineScaleSetVMProperties + } + case "plan": + if v != nil { + var plan Plan + err = json.Unmarshal(*v, &plan) + if err != nil { + return err + } + vmssv.Plan = &plan + } + case "resources": + if v != nil { + var resources []VirtualMachineExtension + err = json.Unmarshal(*v, &resources) + if err != nil { + return err + } + vmssv.Resources = &resources + } + case "zones": + if v != nil { + var zones []string + err = json.Unmarshal(*v, &zones) + if err != nil { + return err + } + vmssv.Zones = &zones + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vmssv.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vmssv.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + vmssv.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + vmssv.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + vmssv.Tags = tags + } + } + } + + return nil +} + +// VirtualMachineScaleSetVMExtensionsSummary extensions summary for virtual machines of a virtual machine scale +// set. +type VirtualMachineScaleSetVMExtensionsSummary struct { + // Name - The extension name. + Name *string `json:"name,omitempty"` + // StatusesSummary - The extensions information. + StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"` +} + +// VirtualMachineScaleSetVMInstanceIDs specifies a list of virtual machine instance IDs from the VM scale set. +type VirtualMachineScaleSetVMInstanceIDs struct { + // InstanceIds - The virtual machine scale set instance ids. Omitting the virtual machine scale set instance ids will result in the operation being performed on all virtual machines in the virtual machine scale set. + InstanceIds *[]string `json:"instanceIds,omitempty"` +} + +// VirtualMachineScaleSetVMInstanceRequiredIDs specifies a list of virtual machine instance IDs from the VM scale +// set. +type VirtualMachineScaleSetVMInstanceRequiredIDs struct { + // InstanceIds - The virtual machine scale set instance ids. + InstanceIds *[]string `json:"instanceIds,omitempty"` +} + +// VirtualMachineScaleSetVMInstanceView the instance view of a virtual machine scale set VM. +type VirtualMachineScaleSetVMInstanceView struct { + autorest.Response `json:"-"` + // PlatformUpdateDomain - The Update Domain count. + PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` + // PlatformFaultDomain - The Fault Domain count. + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` + // RdpThumbPrint - The Remote desktop certificate thumbprint. + RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` + // VMAgent - The VM Agent running on the virtual machine. + VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` + // MaintenanceRedeployStatus - The Maintenance Operation status on the virtual machine. + MaintenanceRedeployStatus *MaintenanceRedeployStatus `json:"maintenanceRedeployStatus,omitempty"` + // Disks - The disks information. + Disks *[]DiskInstanceView `json:"disks,omitempty"` + // Extensions - The extensions information. + Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` + // VMHealth - The health status for the VM. + VMHealth *VirtualMachineHealthStatus `json:"vmHealth,omitempty"` + // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

    For Linux Virtual Machines, you can easily view the output of your console log.

    For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from the hypervisor. + BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` + // PlacementGroupID - The placement group in which the VM is running. If the VM is deallocated it will not have a placementGroupId. + PlacementGroupID *string `json:"placementGroupId,omitempty"` +} + +// VirtualMachineScaleSetVMListResult the List Virtual Machine Scale Set VMs operation response. +type VirtualMachineScaleSetVMListResult struct { + autorest.Response `json:"-"` + // Value - The list of virtual machine scale sets VMs. + Value *[]VirtualMachineScaleSetVM `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Virtual Machine Scale Set VMs. Call ListNext() with this to fetch the next page of VMSS VMs + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetVMListResultIterator provides access to a complete listing of VirtualMachineScaleSetVM +// values. +type VirtualMachineScaleSetVMListResultIterator struct { + i int + page VirtualMachineScaleSetVMListResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualMachineScaleSetVMListResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualMachineScaleSetVMListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualMachineScaleSetVMListResultIterator) Response() VirtualMachineScaleSetVMListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualMachineScaleSetVMListResultIterator) Value() VirtualMachineScaleSetVM { + if !iter.page.NotDone() { + return VirtualMachineScaleSetVM{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (vmssvlr VirtualMachineScaleSetVMListResult) IsEmpty() bool { + return vmssvlr.Value == nil || len(*vmssvlr.Value) == 0 +} + +// virtualMachineScaleSetVMListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vmssvlr VirtualMachineScaleSetVMListResult) virtualMachineScaleSetVMListResultPreparer() (*http.Request, error) { + if vmssvlr.NextLink == nil || len(to.String(vmssvlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vmssvlr.NextLink))) +} + +// VirtualMachineScaleSetVMListResultPage contains a page of VirtualMachineScaleSetVM values. +type VirtualMachineScaleSetVMListResultPage struct { + fn func(VirtualMachineScaleSetVMListResult) (VirtualMachineScaleSetVMListResult, error) + vmssvlr VirtualMachineScaleSetVMListResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualMachineScaleSetVMListResultPage) Next() error { + next, err := page.fn(page.vmssvlr) + if err != nil { + return err + } + page.vmssvlr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualMachineScaleSetVMListResultPage) NotDone() bool { + return !page.vmssvlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualMachineScaleSetVMListResultPage) Response() VirtualMachineScaleSetVMListResult { + return page.vmssvlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualMachineScaleSetVMListResultPage) Values() []VirtualMachineScaleSetVM { + if page.vmssvlr.IsEmpty() { + return nil + } + return *page.vmssvlr.Value +} + +// VirtualMachineScaleSetVMProfile describes a virtual machine scale set virtual machine profile. +type VirtualMachineScaleSetVMProfile struct { + // OsProfile - Specifies the operating system settings for the virtual machines in the scale set. + OsProfile *VirtualMachineScaleSetOSProfile `json:"osProfile,omitempty"` + // StorageProfile - Specifies the storage settings for the virtual machine disks. + StorageProfile *VirtualMachineScaleSetStorageProfile `json:"storageProfile,omitempty"` + // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the virtual machine in the scale set. For instance: whether the virtual machine has the capability to support attaching managed data disks with UltraSSD_LRS storage account type. + AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` + // NetworkProfile - Specifies properties of the network interfaces of the virtual machines in the scale set. + NetworkProfile *VirtualMachineScaleSetNetworkProfile `json:"networkProfile,omitempty"` + // DiagnosticsProfile - Specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + // ExtensionProfile - Specifies a collection of settings for extensions installed on virtual machines in the scale set. + ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` + // LicenseType - Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system.

    Possible values are:

    Windows_Client

    Windows_Server

    If this element is included in a request for an update, the value must match the initial value. This value cannot be updated.

    For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Minimum api-version: 2015-06-15 + LicenseType *string `json:"licenseType,omitempty"` + // Priority - Specifies the priority for the virtual machines in the scale set.

    Minimum api-version: 2017-10-30-preview. Possible values include: 'Regular', 'Low' + Priority VirtualMachinePriorityTypes `json:"priority,omitempty"` + // EvictionPolicy - Specifies the eviction policy for virtual machines in a low priority scale set.

    Minimum api-version: 2017-10-30-preview. Possible values include: 'Deallocate', 'Delete' + EvictionPolicy VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"` +} + +// VirtualMachineScaleSetVMProperties describes the properties of a virtual machine scale set virtual machine. +type VirtualMachineScaleSetVMProperties struct { + // LatestModelApplied - Specifies whether the latest model has been applied to the virtual machine. + LatestModelApplied *bool `json:"latestModelApplied,omitempty"` + // VMID - Azure VM unique ID. + VMID *string `json:"vmId,omitempty"` + // InstanceView - The virtual machine instance view. + InstanceView *VirtualMachineScaleSetVMInstanceView `json:"instanceView,omitempty"` + // HardwareProfile - Specifies the hardware settings for the virtual machine. + HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` + // StorageProfile - Specifies the storage settings for the virtual machine disks. + StorageProfile *StorageProfile `json:"storageProfile,omitempty"` + // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the virtual machine in the scale set. For instance: whether the virtual machine has the capability to support attaching managed data disks with UltraSSD_LRS storage account type. + AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` + // OsProfile - Specifies the operating system settings for the virtual machine. + OsProfile *OSProfile `json:"osProfile,omitempty"` + // NetworkProfile - Specifies the network interfaces of the virtual machine. + NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` + // DiagnosticsProfile - Specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    For more information on Azure planned maintainance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. + AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // LicenseType - Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system.

    Possible values are:

    Windows_Client

    Windows_Server

    If this element is included in a request for an update, the value must match the initial value. This value cannot be updated.

    For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Minimum api-version: 2015-06-15 + LicenseType *string `json:"licenseType,omitempty"` +} + +// VirtualMachineScaleSetVMsDeallocateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetVMsDeallocateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetVMsDeallocateFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsDeallocateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeallocateFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetVMsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetVMsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetVMsDeleteFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetVMsPerformMaintenanceFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetVMsPerformMaintenanceFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetVMsPerformMaintenanceFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPerformMaintenanceFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetVMsPowerOffFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetVMsPowerOffFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetVMsPowerOffFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsPowerOffFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsPowerOffFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetVMsRedeployFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetVMsRedeployFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetVMsRedeployFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRedeployFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRedeployFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetVMsReimageAllFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetVMsReimageAllFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetVMsReimageAllFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsReimageAllFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageAllFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetVMsReimageFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetVMsReimageFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetVMsReimageFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsReimageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsReimageFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetVMsRestartFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetVMsRestartFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetVMsRestartFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRestartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRestartFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetVMsRunCommandFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetVMsRunCommandFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetVMsRunCommandFuture) Result(client VirtualMachineScaleSetVMsClient) (rcr RunCommandResult, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsRunCommandFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rcr.Response.Response, err = future.GetResult(sender); err == nil && rcr.Response.Response.StatusCode != http.StatusNoContent { + rcr, err = client.RunCommandResponder(rcr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsRunCommandFuture", "Result", rcr.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualMachineScaleSetVMsStartFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetVMsStartFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetVMsStartFuture) Result(client VirtualMachineScaleSetVMsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsStartFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineScaleSetVMsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetVMsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetVMsUpdateFuture) Result(client VirtualMachineScaleSetVMsClient) (vmssv VirtualMachineScaleSetVM, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetVMsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmssv.Response.Response, err = future.GetResult(sender); err == nil && vmssv.Response.Response.StatusCode != http.StatusNoContent { + vmssv, err = client.UpdateResponder(vmssv.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", vmssv.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualMachinesCaptureFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachinesCaptureFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachinesCaptureFuture) Result(client VirtualMachinesClient) (vmcr VirtualMachineCaptureResult, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCaptureFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmcr.Response.Response, err = future.GetResult(sender); err == nil && vmcr.Response.Response.StatusCode != http.StatusNoContent { + vmcr, err = client.CaptureResponder(vmcr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCaptureFuture", "Result", vmcr.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualMachinesConvertToManagedDisksFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachinesConvertToManagedDisksFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachinesConvertToManagedDisksFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesConvertToManagedDisksFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesConvertToManagedDisksFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachinesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachinesCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachinesCreateOrUpdateFuture) Result(client VirtualMachinesClient) (VM VirtualMachine, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if VM.Response.Response, err = future.GetResult(sender); err == nil && VM.Response.Response.StatusCode != http.StatusNoContent { + VM, err = client.CreateOrUpdateResponder(VM.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesCreateOrUpdateFuture", "Result", VM.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualMachinesDeallocateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachinesDeallocateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachinesDeallocateFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesDeallocateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeallocateFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachinesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachinesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachinesDeleteFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineSize describes the properties of a VM size. +type VirtualMachineSize struct { + // Name - The name of the virtual machine size. + Name *string `json:"name,omitempty"` + // NumberOfCores - The number of cores supported by the virtual machine size. + NumberOfCores *int32 `json:"numberOfCores,omitempty"` + // OsDiskSizeInMB - The OS disk size, in MB, allowed by the virtual machine size. + OsDiskSizeInMB *int32 `json:"osDiskSizeInMB,omitempty"` + // ResourceDiskSizeInMB - The resource disk size, in MB, allowed by the virtual machine size. + ResourceDiskSizeInMB *int32 `json:"resourceDiskSizeInMB,omitempty"` + // MemoryInMB - The amount of memory, in MB, supported by the virtual machine size. + MemoryInMB *int32 `json:"memoryInMB,omitempty"` + // MaxDataDiskCount - The maximum number of data disks that can be attached to the virtual machine size. + MaxDataDiskCount *int32 `json:"maxDataDiskCount,omitempty"` +} + +// VirtualMachineSizeListResult the List Virtual Machine operation response. +type VirtualMachineSizeListResult struct { + autorest.Response `json:"-"` + // Value - The list of virtual machine sizes. + Value *[]VirtualMachineSize `json:"value,omitempty"` +} + +// VirtualMachinesPerformMaintenanceFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachinesPerformMaintenanceFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachinesPerformMaintenanceFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesPerformMaintenanceFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPerformMaintenanceFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachinesPowerOffFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachinesPowerOffFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachinesPowerOffFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesPowerOffFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesPowerOffFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachinesRedeployFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachinesRedeployFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachinesRedeployFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRedeployFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRedeployFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachinesRestartFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachinesRestartFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachinesRestartFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRestartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRestartFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachinesRunCommandFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachinesRunCommandFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachinesRunCommandFuture) Result(client VirtualMachinesClient) (rcr RunCommandResult, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesRunCommandFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rcr.Response.Response, err = future.GetResult(sender); err == nil && rcr.Response.Response.StatusCode != http.StatusNoContent { + rcr, err = client.RunCommandResponder(rcr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesRunCommandFuture", "Result", rcr.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualMachinesStartFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type VirtualMachinesStartFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachinesStartFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesStartFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualMachineStatusCodeCount the status code and count of the virtual machine scale set instance view status +// summary. +type VirtualMachineStatusCodeCount struct { + // Code - The instance view status code. + Code *string `json:"code,omitempty"` + // Count - The number of instances having a particular status code. + Count *int32 `json:"count,omitempty"` +} + +// VirtualMachinesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachinesUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachinesUpdateFuture) Result(client VirtualMachinesClient) (VM VirtualMachine, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if VM.Response.Response, err = future.GetResult(sender); err == nil && VM.Response.Response.StatusCode != http.StatusNoContent { + VM, err = client.UpdateResponder(VM.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesUpdateFuture", "Result", VM.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualMachineUpdate describes a Virtual Machine Update. +type VirtualMachineUpdate struct { + // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. + Plan *Plan `json:"plan,omitempty"` + *VirtualMachineProperties `json:"properties,omitempty"` + // Identity - The identity of the virtual machine, if configured. + Identity *VirtualMachineIdentity `json:"identity,omitempty"` + // Zones - The virtual machine zones. + Zones *[]string `json:"zones,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineUpdate. +func (vmu VirtualMachineUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmu.Plan != nil { + objectMap["plan"] = vmu.Plan + } + if vmu.VirtualMachineProperties != nil { + objectMap["properties"] = vmu.VirtualMachineProperties + } + if vmu.Identity != nil { + objectMap["identity"] = vmu.Identity + } + if vmu.Zones != nil { + objectMap["zones"] = vmu.Zones + } + if vmu.Tags != nil { + objectMap["tags"] = vmu.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineUpdate struct. +func (vmu *VirtualMachineUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "plan": + if v != nil { + var plan Plan + err = json.Unmarshal(*v, &plan) + if err != nil { + return err + } + vmu.Plan = &plan + } + case "properties": + if v != nil { + var virtualMachineProperties VirtualMachineProperties + err = json.Unmarshal(*v, &virtualMachineProperties) + if err != nil { + return err + } + vmu.VirtualMachineProperties = &virtualMachineProperties + } + case "identity": + if v != nil { + var identity VirtualMachineIdentity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + vmu.Identity = &identity + } + case "zones": + if v != nil { + var zones []string + err = json.Unmarshal(*v, &zones) + if err != nil { + return err + } + vmu.Zones = &zones + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + vmu.Tags = tags + } + } + } + + return nil +} + +// WindowsConfiguration specifies Windows operating system settings on the virtual machine. +type WindowsConfiguration struct { + // ProvisionVMAgent - Indicates whether virtual machine agent should be provisioned on the virtual machine.

    When this property is not specified in the request body, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later. + ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"` + // EnableAutomaticUpdates - Indicates whether virtual machine is enabled for automatic updates. + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty"` + // TimeZone - Specifies the time zone of the virtual machine. e.g. "Pacific Standard Time" + TimeZone *string `json:"timeZone,omitempty"` + // AdditionalUnattendContent - Specifies additional base-64 encoded XML formatted information that can be included in the Unattend.xml file, which is used by Windows Setup. + AdditionalUnattendContent *[]AdditionalUnattendContent `json:"additionalUnattendContent,omitempty"` + // WinRM - Specifies the Windows Remote Management listeners. This enables remote Windows PowerShell. + WinRM *WinRMConfiguration `json:"winRM,omitempty"` +} + +// WinRMConfiguration describes Windows Remote Management configuration of the VM +type WinRMConfiguration struct { + // Listeners - The list of Windows Remote Management listeners + Listeners *[]WinRMListener `json:"listeners,omitempty"` +} + +// WinRMListener describes Protocol and thumbprint of Windows Remote Management listener +type WinRMListener struct { + // Protocol - Specifies the protocol of listener.

    Possible values are:
    **http**

    **https**. Possible values include: 'HTTP', 'HTTPS' + Protocol ProtocolTypes `json:"protocol,omitempty"` + // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:

    {
    "data":"",
    "dataType":"pfx",
    "password":""
    } + CertificateURL *string `json:"certificateUrl,omitempty"` +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/operations.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/operations.go new file mode 100644 index 000000000000..e8fb3415b48b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/operations.go @@ -0,0 +1,98 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// OperationsClient is the compute Client +type OperationsClient struct { + BaseClient +} + +// NewOperationsClient creates an instance of the OperationsClient client. +func NewOperationsClient(subscriptionID string) OperationsClient { + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client. +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets a list of compute operations. +func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) { + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.OperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.OperationsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.OperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + const APIVersion = "2018-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Compute/operations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/resourceskus.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/resourceskus.go new file mode 100644 index 000000000000..de32a3df699d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/resourceskus.go @@ -0,0 +1,130 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ResourceSkusClient is the compute Client +type ResourceSkusClient struct { + BaseClient +} + +// NewResourceSkusClient creates an instance of the ResourceSkusClient client. +func NewResourceSkusClient(subscriptionID string) ResourceSkusClient { + return NewResourceSkusClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewResourceSkusClientWithBaseURI creates an instance of the ResourceSkusClient client. +func NewResourceSkusClientWithBaseURI(baseURI string, subscriptionID string) ResourceSkusClient { + return ResourceSkusClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets the list of Microsoft.Compute SKUs available for your Subscription. +func (client ResourceSkusClient) List(ctx context.Context) (result ResourceSkusResultPage, err error) { + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.rsr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure sending request") + return + } + + result.rsr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ResourceSkusClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/skus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ResourceSkusClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ResourceSkusClient) ListResponder(resp *http.Response) (result ResourceSkusResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ResourceSkusClient) listNextResults(lastResults ResourceSkusResult) (result ResourceSkusResult, err error) { + req, err := lastResults.resourceSkusResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ResourceSkusClient) ListComplete(ctx context.Context) (result ResourceSkusResultIterator, err error) { + result.page, err = client.List(ctx) + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/snapshots.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/snapshots.go similarity index 93% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/snapshots.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/snapshots.go index b87359354942..89e57104c4f3 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/snapshots.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/snapshots.go @@ -49,19 +49,19 @@ func NewSnapshotsClientWithBaseURI(baseURI string, subscriptionID string) Snapsh func (client SnapshotsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot) (result SnapshotsCreateOrUpdateFuture, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: snapshot, - Constraints: []validation.Constraint{{Target: "snapshot.DiskProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData.ImageReference", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}}, + Constraints: []validation.Constraint{{Target: "snapshot.SnapshotProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.CreationData", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.CreationData.ImageReference", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}}, }}, - {Target: "snapshot.DiskProperties.EncryptionSettings", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "snapshot.SnapshotProperties.EncryptionSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "snapshot.SnapshotProperties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, }}, - {Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "snapshot.SnapshotProperties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "snapshot.SnapshotProperties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, }}, }}, }}}}}); err != nil { @@ -91,7 +91,7 @@ func (client SnapshotsClient) CreateOrUpdatePreparer(ctx context.Context, resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -165,7 +165,7 @@ func (client SnapshotsClient) DeletePreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -242,7 +242,7 @@ func (client SnapshotsClient) GetPreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -311,7 +311,7 @@ func (client SnapshotsClient) GrantAccessPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -386,7 +386,7 @@ func (client SnapshotsClient) ListPreparer(ctx context.Context) (*http.Request, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -479,7 +479,7 @@ func (client SnapshotsClient) ListByResourceGroupPreparer(ctx context.Context, r "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -568,7 +568,7 @@ func (client SnapshotsClient) RevokeAccessPreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -640,7 +640,7 @@ func (client SnapshotsClient) UpdatePreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/usage.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/usage.go new file mode 100644 index 000000000000..6e433603f898 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/usage.go @@ -0,0 +1,141 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// UsageClient is the compute Client +type UsageClient struct { + BaseClient +} + +// NewUsageClient creates an instance of the UsageClient client. +func NewUsageClient(subscriptionID string) UsageClient { + return NewUsageClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsageClientWithBaseURI creates an instance of the UsageClient client. +func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClient { + return UsageClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets, for the specified location, the current compute resource usage information as well as the limits for +// compute resources under the subscription. +// Parameters: +// location - the location for which resource usage is queried. +func (client UsageClient) List(ctx context.Context, location string) (result ListUsagesResultPage, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: location, + Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("compute.UsageClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, location) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.lur.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", resp, "Failure sending request") + return + } + + result.lur, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client UsageClient) ListPreparer(ctx context.Context, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client UsageClient) ListResponder(resp *http.Response) (result ListUsagesResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client UsageClient) listNextResults(lastResults ListUsagesResult) (result ListUsagesResult, err error) { + req, err := lastResults.listUsagesResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.UsageClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.UsageClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.UsageClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client UsageClient) ListComplete(ctx context.Context, location string) (result ListUsagesResultIterator, err error) { + result.page, err = client.List(ctx, location) + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/version.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/version.go new file mode 100644 index 000000000000..e28392b5b8e4 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/version.go @@ -0,0 +1,30 @@ +package compute + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + version.Number + " compute/2018-10-01" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineextensionimages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensionimages.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineextensionimages.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensionimages.go index 45714d55e79c..636160044851 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineextensionimages.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensionimages.go @@ -75,7 +75,7 @@ func (client VirtualMachineExtensionImagesClient) GetPreparer(ctx context.Contex "version": autorest.Encode("path", version), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -141,7 +141,7 @@ func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(ctx context. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -209,7 +209,7 @@ func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(ctx conte "type": autorest.Encode("path", typeParameter), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineextensions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensions.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineextensions.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensions.go index 06673a73d63e..6bfd80ac3a17 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineextensions.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineextensions.go @@ -70,7 +70,7 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(ctx context. "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -145,7 +145,7 @@ func (client VirtualMachineExtensionsClient) DeletePreparer(ctx context.Context, "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -224,7 +224,7 @@ func (client VirtualMachineExtensionsClient) GetPreparer(ctx context.Context, re "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -295,7 +295,7 @@ func (client VirtualMachineExtensionsClient) ListPreparer(ctx context.Context, r "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -362,7 +362,7 @@ func (client VirtualMachineExtensionsClient) UpdatePreparer(ctx context.Context, "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineimages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineimages.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineimages.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineimages.go index aeaa39b04918..23dab447a750 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineimages.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineimages.go @@ -79,7 +79,7 @@ func (client VirtualMachineImagesClient) GetPreparer(ctx context.Context, locati "version": autorest.Encode("path", version), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -151,7 +151,7 @@ func (client VirtualMachineImagesClient) ListPreparer(ctx context.Context, locat "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -227,7 +227,7 @@ func (client VirtualMachineImagesClient) ListOffersPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -292,7 +292,7 @@ func (client VirtualMachineImagesClient) ListPublishersPreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -361,7 +361,7 @@ func (client VirtualMachineImagesClient) ListSkusPreparer(ctx context.Context, l "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineruncommands.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineruncommands.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineruncommands.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineruncommands.go index 3777e8b86703..525fc948b845 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineruncommands.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachineruncommands.go @@ -80,7 +80,7 @@ func (client VirtualMachineRunCommandsClient) GetPreparer(ctx context.Context, l "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -152,7 +152,7 @@ func (client VirtualMachineRunCommandsClient) ListPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachines.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachines.go similarity index 98% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachines.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachines.go index 40a6166a1f34..e8149617e6e9 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachines.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachines.go @@ -78,7 +78,7 @@ func (client VirtualMachinesClient) CapturePreparer(ctx context.Context, resourc "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -152,7 +152,7 @@ func (client VirtualMachinesClient) ConvertToManagedDisksPreparer(ctx context.Co "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -244,7 +244,7 @@ func (client VirtualMachinesClient) CreateOrUpdatePreparer(ctx context.Context, "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -318,7 +318,7 @@ func (client VirtualMachinesClient) DeallocatePreparer(ctx context.Context, reso "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -388,7 +388,7 @@ func (client VirtualMachinesClient) DeletePreparer(ctx context.Context, resource "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -464,7 +464,7 @@ func (client VirtualMachinesClient) GeneralizePreparer(ctx context.Context, reso "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -531,7 +531,7 @@ func (client VirtualMachinesClient) GetPreparer(ctx context.Context, resourceGro "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -601,7 +601,7 @@ func (client VirtualMachinesClient) InstanceViewPreparer(ctx context.Context, re "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -668,7 +668,7 @@ func (client VirtualMachinesClient) ListPreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -759,7 +759,7 @@ func (client VirtualMachinesClient) ListAllPreparer(ctx context.Context) (*http. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -853,7 +853,7 @@ func (client VirtualMachinesClient) ListAvailableSizesPreparer(ctx context.Conte "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -914,7 +914,7 @@ func (client VirtualMachinesClient) PerformMaintenancePreparer(ctx context.Conte "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -985,7 +985,7 @@ func (client VirtualMachinesClient) PowerOffPreparer(ctx context.Context, resour "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1055,7 +1055,7 @@ func (client VirtualMachinesClient) RedeployPreparer(ctx context.Context, resour "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1125,7 +1125,7 @@ func (client VirtualMachinesClient) RestartPreparer(ctx context.Context, resourc "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1202,7 +1202,7 @@ func (client VirtualMachinesClient) RunCommandPreparer(ctx context.Context, reso "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1275,7 +1275,7 @@ func (client VirtualMachinesClient) StartPreparer(ctx context.Context, resourceG "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1346,7 +1346,7 @@ func (client VirtualMachinesClient) UpdatePreparer(ctx context.Context, resource "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetextensions.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetextensions.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetextensions.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetextensions.go index c2f1187f8596..43e443249b7e 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetextensions.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetextensions.go @@ -71,7 +71,7 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdatePreparer(ctx "vmssExtensionName": autorest.Encode("path", vmssExtensionName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -146,7 +146,7 @@ func (client VirtualMachineScaleSetExtensionsClient) DeletePreparer(ctx context. "vmssExtensionName": autorest.Encode("path", vmssExtensionName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -225,7 +225,7 @@ func (client VirtualMachineScaleSetExtensionsClient) GetPreparer(ctx context.Con "vmssExtensionName": autorest.Encode("path", vmssExtensionName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -296,7 +296,7 @@ func (client VirtualMachineScaleSetExtensionsClient) ListPreparer(ctx context.Co "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetrollingupgrades.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetrollingupgrades.go similarity index 75% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetrollingupgrades.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetrollingupgrades.go index 381cb0cb6327..ca98956cfd04 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetrollingupgrades.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetrollingupgrades.go @@ -69,7 +69,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) CancelPreparer(ctx con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -145,7 +145,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestPreparer(ctx "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -178,6 +178,78 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestResponder(res return } +// StartExtensionUpgrade starts a rolling upgrade to move all extensions for all virtual machine scale set instances to +// the latest available extension version. Instances which are already running the latest extension versions are not +// affected. +// Parameters: +// resourceGroupName - the name of the resource group. +// VMScaleSetName - the name of the VM scale set. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgrade(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture, err error) { + req, err := client.StartExtensionUpgradePreparer(ctx, resourceGroupName, VMScaleSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartExtensionUpgrade", nil, "Failure preparing request") + return + } + + result, err = client.StartExtensionUpgradeSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartExtensionUpgrade", result.Response(), "Failure sending request") + return + } + + return +} + +// StartExtensionUpgradePreparer prepares the StartExtensionUpgrade request. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2018-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StartExtensionUpgradeSender sends the StartExtensionUpgrade request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeSender(req *http.Request) (future VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// StartExtensionUpgradeResponder handles the response to the StartExtensionUpgrade request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + // StartOSUpgrade starts a rolling upgrade to move all virtual machine scale set instances to the latest available // Platform Image OS version. Instances which are already running the latest available OS version are not affected. // Parameters: @@ -207,7 +279,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradePreparer "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesets.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesets.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesets.go index ef73c1417756..f78d824fdfef 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesets.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesets.go @@ -92,7 +92,7 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(ctx context.C "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -167,7 +167,7 @@ func (client VirtualMachineScaleSetsClient) DeallocatePreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -242,7 +242,7 @@ func (client VirtualMachineScaleSetsClient) DeletePreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -319,7 +319,7 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(ctx context. "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -399,7 +399,7 @@ func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUp "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, "platformUpdateDomain": autorest.Encode("query", platformUpdateDomain), @@ -467,7 +467,7 @@ func (client VirtualMachineScaleSetsClient) GetPreparer(ctx context.Context, res "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -534,7 +534,7 @@ func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(ctx context. "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -602,7 +602,7 @@ func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistoryPreparer(ctx cont "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -695,7 +695,7 @@ func (client VirtualMachineScaleSetsClient) ListPreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -787,7 +787,7 @@ func (client VirtualMachineScaleSetsClient) ListAllPreparer(ctx context.Context) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -883,7 +883,7 @@ func (client VirtualMachineScaleSetsClient) ListSkusPreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -974,7 +974,7 @@ func (client VirtualMachineScaleSetsClient) PerformMaintenancePreparer(ctx conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1051,7 +1051,7 @@ func (client VirtualMachineScaleSetsClient) PowerOffPreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1127,7 +1127,7 @@ func (client VirtualMachineScaleSetsClient) RedeployPreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1203,7 +1203,7 @@ func (client VirtualMachineScaleSetsClient) ReimagePreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1280,7 +1280,7 @@ func (client VirtualMachineScaleSetsClient) ReimageAllPreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1356,7 +1356,7 @@ func (client VirtualMachineScaleSetsClient) RestartPreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1432,7 +1432,7 @@ func (client VirtualMachineScaleSetsClient) StartPreparer(ctx context.Context, r "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1508,7 +1508,7 @@ func (client VirtualMachineScaleSetsClient) UpdatePreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1588,7 +1588,7 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(ctx context. "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetvms.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetvms.go similarity index 99% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetvms.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetvms.go index a068ec453f0e..2742fe91126e 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetvms.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinescalesetvms.go @@ -72,7 +72,7 @@ func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(ctx context.Con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -144,7 +144,7 @@ func (client VirtualMachineScaleSetVMsClient) DeletePreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -222,7 +222,7 @@ func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, r "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -291,7 +291,7 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(ctx contex "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -362,7 +362,7 @@ func (client VirtualMachineScaleSetVMsClient) ListPreparer(ctx context.Context, "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -461,7 +461,7 @@ func (client VirtualMachineScaleSetVMsClient) PerformMaintenancePreparer(ctx con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -534,7 +534,7 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -606,7 +606,7 @@ func (client VirtualMachineScaleSetVMsClient) RedeployPreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -678,7 +678,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(ctx context.Contex "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -751,7 +751,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllPreparer(ctx context.Con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -823,7 +823,7 @@ func (client VirtualMachineScaleSetVMsClient) RestartPreparer(ctx context.Contex "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -902,7 +902,7 @@ func (client VirtualMachineScaleSetVMsClient) RunCommandPreparer(ctx context.Con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -977,7 +977,7 @@ func (client VirtualMachineScaleSetVMsClient) StartPreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1071,7 +1071,7 @@ func (client VirtualMachineScaleSetVMsClient) UpdatePreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinesizes.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinesizes.go similarity index 96% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinesizes.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinesizes.go index d339b5dee2d0..91343db94206 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinesizes.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute/virtualmachinesizes.go @@ -40,7 +40,8 @@ func NewVirtualMachineSizesClientWithBaseURI(baseURI string, subscriptionID stri return VirtualMachineSizesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// List lists all available virtual machine sizes for a subscription in a location. +// List this API is deprecated. Use [Resources +// Skus](https://docs.microsoft.com/en-us/rest/api/compute/resourceskus/list) // Parameters: // location - the location upon which virtual-machine-sizes is queried. func (client VirtualMachineSizesClient) List(ctx context.Context, location string) (result VirtualMachineSizeListResult, err error) { @@ -78,7 +79,7 @@ func (client VirtualMachineSizesClient) ListPreparer(ctx context.Context, locati "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2017-10-01/containerregistry/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2017-10-01/containerregistry/models.go index dd5019e53557..8e5ee3778cba 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2017-10-01/containerregistry/models.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2017-10-01/containerregistry/models.go @@ -441,8 +441,12 @@ type ImportImageParameters struct { // ImportSource ... type ImportSource struct { - // ResourceID - The resource identifier of the target Azure Container Registry. + // ResourceID - The resource identifier of the source Azure Container Registry. ResourceID *string `json:"resourceId,omitempty"` + // RegistryURI - The address of the source registry (e.g. 'mcr.microsoft.com'). + RegistryURI *string `json:"registryUri,omitempty"` + // Credentials - Credentials used when importing from a registry uri. + Credentials *ImportSourceCredentials `json:"credentials,omitempty"` // SourceImage - Repository name of the source image. // Specify an image by repository ('hello-world'). This will use the 'latest' tag. // Specify an image by tag ('hello-world:latest'). @@ -450,6 +454,14 @@ type ImportSource struct { SourceImage *string `json:"sourceImage,omitempty"` } +// ImportSourceCredentials ... +type ImportSourceCredentials struct { + // Username - The username to authenticate with the source registry. + Username *string `json:"username,omitempty"` + // Password - The password used to authenticate with the source registry. + Password *string `json:"password,omitempty"` +} + // OperationDefinition the definition of a container registry operation. type OperationDefinition struct { // Origin - The origin information of the container registry operation. @@ -820,6 +832,8 @@ type Registry struct { autorest.Response `json:"-"` // Sku - The SKU of the container registry. Sku *Sku `json:"sku,omitempty"` + // Identity - The identity of the container registry. + Identity *RegistryIdentity `json:"identity,omitempty"` // RegistryProperties - The properties of the container registry. *RegistryProperties `json:"properties,omitempty"` // ID - The resource ID. @@ -840,6 +854,9 @@ func (r Registry) MarshalJSON() ([]byte, error) { if r.Sku != nil { objectMap["sku"] = r.Sku } + if r.Identity != nil { + objectMap["identity"] = r.Identity + } if r.RegistryProperties != nil { objectMap["properties"] = r.RegistryProperties } @@ -879,6 +896,15 @@ func (r *Registry) UnmarshalJSON(body []byte) error { } r.Sku = &sku } + case "identity": + if v != nil { + var identity RegistryIdentity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + r.Identity = &identity + } case "properties": if v != nil { var registryProperties RegistryProperties @@ -939,6 +965,16 @@ func (r *Registry) UnmarshalJSON(body []byte) error { return nil } +// RegistryIdentity the identity of the container registry. +type RegistryIdentity struct { + // Type - The type of identity used for the registry. + Type *string `json:"type,omitempty"` + // PrincipalID - The principal ID of registry identity. + PrincipalID *string `json:"principalId,omitempty"` + // TenantID - The tenant ID associated with the registry. + TenantID *string `json:"tenantId,omitempty"` +} + // RegistryListCredentialsResult the response from the ListCredentials operation. type RegistryListCredentialsResult struct { autorest.Response `json:"-"` @@ -1116,6 +1152,8 @@ type RegistryUpdateParameters struct { Tags map[string]*string `json:"tags"` // Sku - The SKU of the container registry. Sku *Sku `json:"sku,omitempty"` + // Identity - The identity of the container registry. + Identity *RegistryIdentity `json:"identity,omitempty"` // RegistryPropertiesUpdateParameters - The properties that the container registry will be updated with. *RegistryPropertiesUpdateParameters `json:"properties,omitempty"` } @@ -1129,6 +1167,9 @@ func (rup RegistryUpdateParameters) MarshalJSON() ([]byte, error) { if rup.Sku != nil { objectMap["sku"] = rup.Sku } + if rup.Identity != nil { + objectMap["identity"] = rup.Identity + } if rup.RegistryPropertiesUpdateParameters != nil { objectMap["properties"] = rup.RegistryPropertiesUpdateParameters } @@ -1162,6 +1203,15 @@ func (rup *RegistryUpdateParameters) UnmarshalJSON(body []byte) error { } rup.Sku = &sku } + case "identity": + if v != nil { + var identity RegistryIdentity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + rup.Identity = &identity + } case "properties": if v != nil { var registryPropertiesUpdateParameters RegistryPropertiesUpdateParameters diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2017-10-01/containerregistry/registries.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2017-10-01/containerregistry/registries.go index 4e6598d12c3c..b02ca13b2ea2 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2017-10-01/containerregistry/registries.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2017-10-01/containerregistry/registries.go @@ -372,7 +372,8 @@ func (client RegistriesClient) ImportImage(ctx context.Context, resourceGroupNam {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters.Source", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.Source.ResourceID", Name: validation.Null, Rule: true, Chain: nil}, + Chain: []validation.Constraint{{Target: "parameters.Source.Credentials", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Source.Credentials.Password", Name: validation.Null, Rule: true, Chain: nil}}}, {Target: "parameters.Source.SourceImage", Name: validation.Null, Rule: true, Chain: nil}, }}}}}); err != nil { return result, validation.NewError("containerregistry.RegistriesClient", "ImportImage", err.Error()) diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/managedclusters.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/managedclusters.go index 753471bfbf24..661ab1347660 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/managedclusters.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/managedclusters.go @@ -76,7 +76,6 @@ func (client ManagedClustersClient) CreateOrUpdate(ctx context.Context, resource {Target: "parameters.ManagedClusterProperties.AadProfile", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.AadProfile.ClientAppID", Name: validation.Null, Rule: true, Chain: nil}, {Target: "parameters.ManagedClusterProperties.AadProfile.ServerAppID", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.ManagedClusterProperties.AadProfile.ServerAppSecret", Name: validation.Null, Rule: true, Chain: nil}, }}, }}}}}); err != nil { return result, validation.NewError("containerservice.ManagedClustersClient", "CreateOrUpdate", err.Error()) @@ -609,3 +608,77 @@ func (client ManagedClustersClient) ListByResourceGroupComplete(ctx context.Cont result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) return } + +// UpdateTags updates a managed cluster with the specified tags. +// Parameters: +// resourceGroupName - the name of the resource group. +// resourceName - the name of the managed cluster resource. +// parameters - parameters supplied to the Update Managed Cluster Tags operation. +func (client ManagedClustersClient) UpdateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (result ManagedClustersUpdateTagsFuture, err error) { + req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, resourceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "UpdateTags", nil, "Failure preparing request") + return + } + + result, err = client.UpdateTagsSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "UpdateTags", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdateTagsPreparer prepares the UpdateTags request. +func (client ManagedClustersClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-03-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateTagsSender sends the UpdateTags request. The method will close the +// http.Response Body if it receives an error. +func (client ManagedClustersClient) UpdateTagsSender(req *http.Request) (future ManagedClustersUpdateTagsFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateTagsResponder handles the response to the UpdateTags request. The method always +// closes the http.Response Body. +func (client ManagedClustersClient) UpdateTagsResponder(resp *http.Response) (result ManagedCluster, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/models.go index f8b0bc6c3b8b..ba39aa50ca0c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/models.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2018-03-31/containerservice/models.go @@ -1301,6 +1301,35 @@ func (future *ManagedClustersDeleteFuture) Result(client ManagedClustersClient) return } +// ManagedClustersUpdateTagsFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type ManagedClustersUpdateTagsFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ManagedClustersUpdateTagsFuture) Result(client ManagedClustersClient) (mc ManagedCluster, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersUpdateTagsFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersUpdateTagsFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if mc.Response.Response, err = future.GetResult(sender); err == nil && mc.Response.Response.StatusCode != http.StatusNoContent { + mc, err = client.UpdateTagsResponder(mc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersUpdateTagsFuture", "Result", mc.Response.Response, "Failure responding to request") + } + } + return +} + // ManagedClusterUpgradeProfile the list of available upgrades for compute pools. type ManagedClusterUpgradeProfile struct { autorest.Response `json:"-"` @@ -1709,6 +1738,21 @@ type SSHPublicKey struct { KeyData *string `json:"keyData,omitempty"` } +// TagsObject tags object for patch operations. +type TagsObject struct { + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for TagsObject. +func (toVar TagsObject) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if toVar.Tags != nil { + objectMap["tags"] = toVar.Tags + } + return json.Marshal(objectMap) +} + // VMDiagnostics profile for diagnostics on the container service VMs. type VMDiagnostics struct { // Enabled - Whether the VM diagnostic agent is provisioned on the VM. diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/applicationgateways.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/applicationgateways.go index fe19916146da..89b0b01ff991 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/applicationgateways.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/applicationgateways.go @@ -94,10 +94,6 @@ func (client ApplicationGatewaysClient) BackendHealthSender(req *http.Request) ( if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -180,10 +176,6 @@ func (client ApplicationGatewaysClient) CreateOrUpdateSender(req *http.Request) if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -251,10 +243,6 @@ func (client ApplicationGatewaysClient) DeleteSender(req *http.Request) (future if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -850,10 +838,6 @@ func (client ApplicationGatewaysClient) StartSender(req *http.Request) (future A if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -920,10 +904,6 @@ func (client ApplicationGatewaysClient) StopSender(req *http.Request) (future Ap if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -993,10 +973,6 @@ func (client ApplicationGatewaysClient) UpdateTagsSender(req *http.Request) (fut if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/applicationsecuritygroups.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/applicationsecuritygroups.go index 4d1a255a47be..7c2ebe4692e5 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/applicationsecuritygroups.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/applicationsecuritygroups.go @@ -92,10 +92,6 @@ func (client ApplicationSecurityGroupsClient) CreateOrUpdateSender(req *http.Req if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -163,10 +159,6 @@ func (client ApplicationSecurityGroupsClient) DeleteSender(req *http.Request) (f if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/expressroutecircuitauthorizations.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/expressroutecircuitauthorizations.go index 2860cd697334..0a15c4b4bd94 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/expressroutecircuitauthorizations.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/expressroutecircuitauthorizations.go @@ -97,10 +97,6 @@ func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateSender(req * if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -170,10 +166,6 @@ func (client ExpressRouteCircuitAuthorizationsClient) DeleteSender(req *http.Req if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/expressroutecircuitpeerings.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/expressroutecircuitpeerings.go index b129f3c2ed80..7f397879c1d8 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/expressroutecircuitpeerings.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/expressroutecircuitpeerings.go @@ -94,10 +94,6 @@ func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateSender(req *http.R if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -167,10 +163,6 @@ func (client ExpressRouteCircuitPeeringsClient) DeleteSender(req *http.Request) if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/expressroutecircuits.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/expressroutecircuits.go index 0a11eb94914a..d2932342de9c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/expressroutecircuits.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/expressroutecircuits.go @@ -92,10 +92,6 @@ func (client ExpressRouteCircuitsClient) CreateOrUpdateSender(req *http.Request) if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -163,10 +159,6 @@ func (client ExpressRouteCircuitsClient) DeleteSender(req *http.Request) (future if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -623,10 +615,6 @@ func (client ExpressRouteCircuitsClient) ListArpTableSender(req *http.Request) ( if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -699,10 +687,6 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSender(req *http.Request if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -775,10 +759,6 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSummarySender(req *http. if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -849,10 +829,6 @@ func (client ExpressRouteCircuitsClient) UpdateTagsSender(req *http.Request) (fu if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/inboundnatrules.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/inboundnatrules.go index 64c604441c6a..806e21e3f9d2 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/inboundnatrules.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/inboundnatrules.go @@ -114,10 +114,6 @@ func (client InboundNatRulesClient) CreateOrUpdateSender(req *http.Request) (fut if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -187,10 +183,6 @@ func (client InboundNatRulesClient) DeleteSender(req *http.Request) (future Inbo if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/interfaces.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/interfaces.go index 5632e32b2276..3b345a136fb8 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/interfaces.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/interfaces.go @@ -92,10 +92,6 @@ func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (future I if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -163,10 +159,6 @@ func (client InterfacesClient) DeleteSender(req *http.Request) (future Interface if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -304,10 +296,6 @@ func (client InterfacesClient) GetEffectiveRouteTableSender(req *http.Request) ( if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -711,10 +699,6 @@ func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsSender(req *htt if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -1082,10 +1066,6 @@ func (client InterfacesClient) UpdateTagsSender(req *http.Request) (future Inter if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/loadbalancers.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/loadbalancers.go index f62ad7b99d64..27b6955976ee 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/loadbalancers.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/loadbalancers.go @@ -92,10 +92,6 @@ func (client LoadBalancersClient) CreateOrUpdateSender(req *http.Request) (futur if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -163,10 +159,6 @@ func (client LoadBalancersClient) DeleteSender(req *http.Request) (future LoadBa if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -490,10 +482,6 @@ func (client LoadBalancersClient) UpdateTagsSender(req *http.Request) (future Lo if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/localnetworkgateways.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/localnetworkgateways.go index b20ca1df7b68..1933dc937668 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/localnetworkgateways.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/localnetworkgateways.go @@ -101,10 +101,6 @@ func (client LocalNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -178,10 +174,6 @@ func (client LocalNetworkGatewaysClient) DeleteSender(req *http.Request) (future if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -423,10 +415,6 @@ func (client LocalNetworkGatewaysClient) UpdateTagsSender(req *http.Request) (fu if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/packetcaptures.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/packetcaptures.go index 78b6b80d90b5..e44a38221dbb 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/packetcaptures.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/packetcaptures.go @@ -104,10 +104,6 @@ func (client PacketCapturesClient) CreateSender(req *http.Request) (future Packe if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -177,10 +173,6 @@ func (client PacketCapturesClient) DeleteSender(req *http.Request) (future Packe if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -318,10 +310,6 @@ func (client PacketCapturesClient) GetStatusSender(req *http.Request) (future Pa if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -458,10 +446,6 @@ func (client PacketCapturesClient) StopSender(req *http.Request) (future PacketC if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/publicipaddresses.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/publicipaddresses.go index c49d425fe4e3..0b1bb489f877 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/publicipaddresses.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/publicipaddresses.go @@ -104,10 +104,6 @@ func (client PublicIPAddressesClient) CreateOrUpdateSender(req *http.Request) (f if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -175,10 +171,6 @@ func (client PublicIPAddressesClient) DeleteSender(req *http.Request) (future Pu if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -779,10 +771,6 @@ func (client PublicIPAddressesClient) UpdateTagsSender(req *http.Request) (futur if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routefilterrules.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routefilterrules.go index 70181ed68c97..a9e3ae623226 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routefilterrules.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routefilterrules.go @@ -104,10 +104,6 @@ func (client RouteFilterRulesClient) CreateOrUpdateSender(req *http.Request) (fu if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -177,10 +173,6 @@ func (client RouteFilterRulesClient) DeleteSender(req *http.Request) (future Rou if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -416,10 +408,6 @@ func (client RouteFilterRulesClient) UpdateSender(req *http.Request) (future Rou if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routefilters.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routefilters.go index 5c4f503fd698..245ad832aec2 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routefilters.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routefilters.go @@ -92,10 +92,6 @@ func (client RouteFiltersClient) CreateOrUpdateSender(req *http.Request) (future if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -163,10 +159,6 @@ func (client RouteFiltersClient) DeleteSender(req *http.Request) (future RouteFi if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -490,10 +482,6 @@ func (client RouteFiltersClient) UpdateSender(req *http.Request) (future RouteFi if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routes.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routes.go index a1ac17b7decf..013576182ec9 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routes.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routes.go @@ -94,10 +94,6 @@ func (client RoutesClient) CreateOrUpdateSender(req *http.Request) (future Route if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -167,10 +163,6 @@ func (client RoutesClient) DeleteSender(req *http.Request) (future RoutesDeleteF if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routetables.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routetables.go index c839f21b0606..490fe3032f78 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routetables.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/routetables.go @@ -92,10 +92,6 @@ func (client RouteTablesClient) CreateOrUpdateSender(req *http.Request) (future if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -163,10 +159,6 @@ func (client RouteTablesClient) DeleteSender(req *http.Request) (future RouteTab if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -490,10 +482,6 @@ func (client RouteTablesClient) UpdateTagsSender(req *http.Request) (future Rout if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/securitygroups.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/securitygroups.go index e20eaeef3928..fa4d471b3bab 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/securitygroups.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/securitygroups.go @@ -92,10 +92,6 @@ func (client SecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (futu if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -163,10 +159,6 @@ func (client SecurityGroupsClient) DeleteSender(req *http.Request) (future Secur if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -490,10 +482,6 @@ func (client SecurityGroupsClient) UpdateTagsSender(req *http.Request) (future S if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/securityrules.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/securityrules.go index e1761f25235a..cab4c1f3f14b 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/securityrules.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/securityrules.go @@ -94,10 +94,6 @@ func (client SecurityRulesClient) CreateOrUpdateSender(req *http.Request) (futur if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -167,10 +163,6 @@ func (client SecurityRulesClient) DeleteSender(req *http.Request) (future Securi if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/subnets.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/subnets.go index 13f8a8bf15ab..10a16654f61c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/subnets.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/subnets.go @@ -94,10 +94,6 @@ func (client SubnetsClient) CreateOrUpdateSender(req *http.Request) (future Subn if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -167,10 +163,6 @@ func (client SubnetsClient) DeleteSender(req *http.Request) (future SubnetsDelet if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworkgatewayconnections.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworkgatewayconnections.go index 5facf960ef78..230392d5b330 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworkgatewayconnections.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworkgatewayconnections.go @@ -107,10 +107,6 @@ func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateSender(req *h if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -178,10 +174,6 @@ func (client VirtualNetworkGatewayConnectionsClient) DeleteSender(req *http.Requ if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -492,10 +484,6 @@ func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeySender(req *h if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -575,10 +563,6 @@ func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeySender(req *htt if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -649,10 +633,6 @@ func (client VirtualNetworkGatewayConnectionsClient) UpdateTagsSender(req *http. if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworkgateways.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworkgateways.go index b45b6bd82c04..8e17e0a62e87 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworkgateways.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworkgateways.go @@ -99,10 +99,6 @@ func (client VirtualNetworkGatewaysClient) CreateOrUpdateSender(req *http.Reques if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -170,10 +166,6 @@ func (client VirtualNetworkGatewaysClient) DeleteSender(req *http.Request) (futu if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -244,10 +236,6 @@ func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageSender(req *h if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -319,10 +307,6 @@ func (client VirtualNetworkGatewaysClient) GenerateVpnProfileSender(req *http.Re if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -460,10 +444,6 @@ func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutesSender(req *http.R if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -535,10 +515,6 @@ func (client VirtualNetworkGatewaysClient) GetBgpPeerStatusSender(req *http.Requ if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -607,10 +583,6 @@ func (client VirtualNetworkGatewaysClient) GetLearnedRoutesSender(req *http.Requ if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -679,10 +651,6 @@ func (client VirtualNetworkGatewaysClient) GetVpnProfilePackageURLSender(req *ht if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -943,10 +911,6 @@ func (client VirtualNetworkGatewaysClient) ResetSender(req *http.Request) (futur if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -1084,10 +1048,6 @@ func (client VirtualNetworkGatewaysClient) UpdateTagsSender(req *http.Request) ( if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworkpeerings.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworkpeerings.go index a0a54eea9ffc..b5824eca546f 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworkpeerings.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworkpeerings.go @@ -95,10 +95,6 @@ func (client VirtualNetworkPeeringsClient) CreateOrUpdateSender(req *http.Reques if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -168,10 +164,6 @@ func (client VirtualNetworkPeeringsClient) DeleteSender(req *http.Request) (futu if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworks.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworks.go index afcd4b75c620..9a7007ac7f94 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworks.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/virtualnetworks.go @@ -163,10 +163,6 @@ func (client VirtualNetworksClient) CreateOrUpdateSender(req *http.Request) (fut if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -234,10 +230,6 @@ func (client VirtualNetworksClient) DeleteSender(req *http.Request) (future Virt if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -656,10 +648,6 @@ func (client VirtualNetworksClient) UpdateTagsSender(req *http.Request) (future if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/watchers.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/watchers.go index 992df7b254ad..475ec5932693 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/watchers.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network/watchers.go @@ -102,10 +102,6 @@ func (client WatchersClient) CheckConnectivitySender(req *http.Request) (future if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -243,10 +239,6 @@ func (client WatchersClient) DeleteSender(req *http.Request) (future WatchersDel if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -393,10 +385,6 @@ func (client WatchersClient) GetAzureReachabilityReportSender(req *http.Request) if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -473,10 +461,6 @@ func (client WatchersClient) GetFlowLogStatusSender(req *http.Request) (future W if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -555,10 +539,6 @@ func (client WatchersClient) GetNextHopSender(req *http.Request) (future Watcher if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -715,10 +695,6 @@ func (client WatchersClient) GetTroubleshootingSender(req *http.Request) (future if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -795,10 +771,6 @@ func (client WatchersClient) GetTroubleshootingResultSender(req *http.Request) ( if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -875,10 +847,6 @@ func (client WatchersClient) GetVMSecurityRulesSender(req *http.Request) (future if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -1076,10 +1044,6 @@ func (client WatchersClient) ListAvailableProvidersSender(req *http.Request) (fu if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -1160,10 +1124,6 @@ func (client WatchersClient) SetFlowLogConfigurationSender(req *http.Request) (f if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -1314,10 +1274,6 @@ func (client WatchersClient) VerifyIPFlowSender(req *http.Request) (future Watch if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/deployments.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/deployments.go index d27a542c0f74..1c767e9d1018 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/deployments.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/deployments.go @@ -272,10 +272,6 @@ func (client DeploymentsClient) CreateOrUpdateSender(req *http.Request) (future if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -362,10 +358,6 @@ func (client DeploymentsClient) DeleteSender(req *http.Request) (future Deployme if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/groups.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/groups.go index 012de7818c65..15650baac745 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/groups.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/groups.go @@ -247,10 +247,6 @@ func (client GroupsClient) DeleteSender(req *http.Request) (future GroupsDeleteF if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/resources.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/resources.go index bd8987cf1f64..455d5a882360 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/resources.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/resources.go @@ -256,10 +256,6 @@ func (client Client) CreateOrUpdateSender(req *http.Request) (future CreateOrUpd if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -336,10 +332,6 @@ func (client Client) CreateOrUpdateByIDSender(req *http.Request) (future CreateO if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -422,10 +414,6 @@ func (client Client) DeleteSender(req *http.Request) (future DeleteFuture, err e if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -491,10 +479,6 @@ func (client Client) DeleteByIDSender(req *http.Request) (future DeleteByIDFutur if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -936,10 +920,6 @@ func (client Client) MoveResourcesSender(req *http.Request) (future MoveResource if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -1023,10 +1003,6 @@ func (client Client) UpdateSender(req *http.Request) (future UpdateFuture, err e if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -1096,10 +1072,6 @@ func (client Client) UpdateByIDSender(req *http.Request) (future UpdateByIDFutur if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } @@ -1180,10 +1152,6 @@ func (client Client) ValidateMoveResourcesSender(req *http.Request) (future Vali if err != nil { return } - err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent, http.StatusConflict)) - if err != nil { - return - } future.Future, err = azure.NewFutureFromResponse(resp) return } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/client.go deleted file mode 100644 index 2be951c81f13..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/client.go +++ /dev/null @@ -1,51 +0,0 @@ -// Package storage implements the Azure ARM Storage service API version 2017-10-01. -// -// The Azure Storage Management API. -package storage - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/go-autorest/autorest" -) - -const ( - // DefaultBaseURI is the default URI used for the service Storage - DefaultBaseURI = "https://management.azure.com" -) - -// BaseClient is the base client for Storage. -type BaseClient struct { - autorest.Client - BaseURI string - SubscriptionID string -} - -// New creates an instance of the BaseClient client. -func New(subscriptionID string) BaseClient { - return NewWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWithBaseURI creates an instance of the BaseClient client. -func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { - return BaseClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: baseURI, - SubscriptionID: subscriptionID, - } -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/models.go deleted file mode 100644 index c215a75c8e59..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/models.go +++ /dev/null @@ -1,1288 +0,0 @@ -package storage - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "encoding/json" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/date" - "net/http" -) - -// AccessTier enumerates the values for access tier. -type AccessTier string - -const ( - // Cool ... - Cool AccessTier = "Cool" - // Hot ... - Hot AccessTier = "Hot" -) - -// PossibleAccessTierValues returns an array of possible values for the AccessTier const type. -func PossibleAccessTierValues() []AccessTier { - return []AccessTier{Cool, Hot} -} - -// AccountStatus enumerates the values for account status. -type AccountStatus string - -const ( - // Available ... - Available AccountStatus = "available" - // Unavailable ... - Unavailable AccountStatus = "unavailable" -) - -// PossibleAccountStatusValues returns an array of possible values for the AccountStatus const type. -func PossibleAccountStatusValues() []AccountStatus { - return []AccountStatus{Available, Unavailable} -} - -// Action enumerates the values for action. -type Action string - -const ( - // Allow ... - Allow Action = "Allow" -) - -// PossibleActionValues returns an array of possible values for the Action const type. -func PossibleActionValues() []Action { - return []Action{Allow} -} - -// Bypass enumerates the values for bypass. -type Bypass string - -const ( - // AzureServices ... - AzureServices Bypass = "AzureServices" - // Logging ... - Logging Bypass = "Logging" - // Metrics ... - Metrics Bypass = "Metrics" - // None ... - None Bypass = "None" -) - -// PossibleBypassValues returns an array of possible values for the Bypass const type. -func PossibleBypassValues() []Bypass { - return []Bypass{AzureServices, Logging, Metrics, None} -} - -// DefaultAction enumerates the values for default action. -type DefaultAction string - -const ( - // DefaultActionAllow ... - DefaultActionAllow DefaultAction = "Allow" - // DefaultActionDeny ... - DefaultActionDeny DefaultAction = "Deny" -) - -// PossibleDefaultActionValues returns an array of possible values for the DefaultAction const type. -func PossibleDefaultActionValues() []DefaultAction { - return []DefaultAction{DefaultActionAllow, DefaultActionDeny} -} - -// HTTPProtocol enumerates the values for http protocol. -type HTTPProtocol string - -const ( - // HTTPS ... - HTTPS HTTPProtocol = "https" - // Httpshttp ... - Httpshttp HTTPProtocol = "https,http" -) - -// PossibleHTTPProtocolValues returns an array of possible values for the HTTPProtocol const type. -func PossibleHTTPProtocolValues() []HTTPProtocol { - return []HTTPProtocol{HTTPS, Httpshttp} -} - -// KeyPermission enumerates the values for key permission. -type KeyPermission string - -const ( - // Full ... - Full KeyPermission = "Full" - // Read ... - Read KeyPermission = "Read" -) - -// PossibleKeyPermissionValues returns an array of possible values for the KeyPermission const type. -func PossibleKeyPermissionValues() []KeyPermission { - return []KeyPermission{Full, Read} -} - -// KeySource enumerates the values for key source. -type KeySource string - -const ( - // MicrosoftKeyvault ... - MicrosoftKeyvault KeySource = "Microsoft.Keyvault" - // MicrosoftStorage ... - MicrosoftStorage KeySource = "Microsoft.Storage" -) - -// PossibleKeySourceValues returns an array of possible values for the KeySource const type. -func PossibleKeySourceValues() []KeySource { - return []KeySource{MicrosoftKeyvault, MicrosoftStorage} -} - -// Kind enumerates the values for kind. -type Kind string - -const ( - // BlobStorage ... - BlobStorage Kind = "BlobStorage" - // Storage ... - Storage Kind = "Storage" - // StorageV2 ... - StorageV2 Kind = "StorageV2" -) - -// PossibleKindValues returns an array of possible values for the Kind const type. -func PossibleKindValues() []Kind { - return []Kind{BlobStorage, Storage, StorageV2} -} - -// Permissions enumerates the values for permissions. -type Permissions string - -const ( - // A ... - A Permissions = "a" - // C ... - C Permissions = "c" - // D ... - D Permissions = "d" - // L ... - L Permissions = "l" - // P ... - P Permissions = "p" - // R ... - R Permissions = "r" - // U ... - U Permissions = "u" - // W ... - W Permissions = "w" -) - -// PossiblePermissionsValues returns an array of possible values for the Permissions const type. -func PossiblePermissionsValues() []Permissions { - return []Permissions{A, C, D, L, P, R, U, W} -} - -// ProvisioningState enumerates the values for provisioning state. -type ProvisioningState string - -const ( - // Creating ... - Creating ProvisioningState = "Creating" - // ResolvingDNS ... - ResolvingDNS ProvisioningState = "ResolvingDNS" - // Succeeded ... - Succeeded ProvisioningState = "Succeeded" -) - -// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type. -func PossibleProvisioningStateValues() []ProvisioningState { - return []ProvisioningState{Creating, ResolvingDNS, Succeeded} -} - -// Reason enumerates the values for reason. -type Reason string - -const ( - // AccountNameInvalid ... - AccountNameInvalid Reason = "AccountNameInvalid" - // AlreadyExists ... - AlreadyExists Reason = "AlreadyExists" -) - -// PossibleReasonValues returns an array of possible values for the Reason const type. -func PossibleReasonValues() []Reason { - return []Reason{AccountNameInvalid, AlreadyExists} -} - -// ReasonCode enumerates the values for reason code. -type ReasonCode string - -const ( - // NotAvailableForSubscription ... - NotAvailableForSubscription ReasonCode = "NotAvailableForSubscription" - // QuotaID ... - QuotaID ReasonCode = "QuotaId" -) - -// PossibleReasonCodeValues returns an array of possible values for the ReasonCode const type. -func PossibleReasonCodeValues() []ReasonCode { - return []ReasonCode{NotAvailableForSubscription, QuotaID} -} - -// Services enumerates the values for services. -type Services string - -const ( - // B ... - B Services = "b" - // F ... - F Services = "f" - // Q ... - Q Services = "q" - // T ... - T Services = "t" -) - -// PossibleServicesValues returns an array of possible values for the Services const type. -func PossibleServicesValues() []Services { - return []Services{B, F, Q, T} -} - -// SignedResource enumerates the values for signed resource. -type SignedResource string - -const ( - // SignedResourceB ... - SignedResourceB SignedResource = "b" - // SignedResourceC ... - SignedResourceC SignedResource = "c" - // SignedResourceF ... - SignedResourceF SignedResource = "f" - // SignedResourceS ... - SignedResourceS SignedResource = "s" -) - -// PossibleSignedResourceValues returns an array of possible values for the SignedResource const type. -func PossibleSignedResourceValues() []SignedResource { - return []SignedResource{SignedResourceB, SignedResourceC, SignedResourceF, SignedResourceS} -} - -// SignedResourceTypes enumerates the values for signed resource types. -type SignedResourceTypes string - -const ( - // SignedResourceTypesC ... - SignedResourceTypesC SignedResourceTypes = "c" - // SignedResourceTypesO ... - SignedResourceTypesO SignedResourceTypes = "o" - // SignedResourceTypesS ... - SignedResourceTypesS SignedResourceTypes = "s" -) - -// PossibleSignedResourceTypesValues returns an array of possible values for the SignedResourceTypes const type. -func PossibleSignedResourceTypesValues() []SignedResourceTypes { - return []SignedResourceTypes{SignedResourceTypesC, SignedResourceTypesO, SignedResourceTypesS} -} - -// SkuName enumerates the values for sku name. -type SkuName string - -const ( - // PremiumLRS ... - PremiumLRS SkuName = "Premium_LRS" - // StandardGRS ... - StandardGRS SkuName = "Standard_GRS" - // StandardLRS ... - StandardLRS SkuName = "Standard_LRS" - // StandardRAGRS ... - StandardRAGRS SkuName = "Standard_RAGRS" - // StandardZRS ... - StandardZRS SkuName = "Standard_ZRS" -) - -// PossibleSkuNameValues returns an array of possible values for the SkuName const type. -func PossibleSkuNameValues() []SkuName { - return []SkuName{PremiumLRS, StandardGRS, StandardLRS, StandardRAGRS, StandardZRS} -} - -// SkuTier enumerates the values for sku tier. -type SkuTier string - -const ( - // Premium ... - Premium SkuTier = "Premium" - // Standard ... - Standard SkuTier = "Standard" -) - -// PossibleSkuTierValues returns an array of possible values for the SkuTier const type. -func PossibleSkuTierValues() []SkuTier { - return []SkuTier{Premium, Standard} -} - -// State enumerates the values for state. -type State string - -const ( - // StateDeprovisioning ... - StateDeprovisioning State = "deprovisioning" - // StateFailed ... - StateFailed State = "failed" - // StateNetworkSourceDeleted ... - StateNetworkSourceDeleted State = "networkSourceDeleted" - // StateProvisioning ... - StateProvisioning State = "provisioning" - // StateSucceeded ... - StateSucceeded State = "succeeded" -) - -// PossibleStateValues returns an array of possible values for the State const type. -func PossibleStateValues() []State { - return []State{StateDeprovisioning, StateFailed, StateNetworkSourceDeleted, StateProvisioning, StateSucceeded} -} - -// UsageUnit enumerates the values for usage unit. -type UsageUnit string - -const ( - // Bytes ... - Bytes UsageUnit = "Bytes" - // BytesPerSecond ... - BytesPerSecond UsageUnit = "BytesPerSecond" - // Count ... - Count UsageUnit = "Count" - // CountsPerSecond ... - CountsPerSecond UsageUnit = "CountsPerSecond" - // Percent ... - Percent UsageUnit = "Percent" - // Seconds ... - Seconds UsageUnit = "Seconds" -) - -// PossibleUsageUnitValues returns an array of possible values for the UsageUnit const type. -func PossibleUsageUnitValues() []UsageUnit { - return []UsageUnit{Bytes, BytesPerSecond, Count, CountsPerSecond, Percent, Seconds} -} - -// Account the storage account. -type Account struct { - autorest.Response `json:"-"` - // Sku - Gets the SKU. - Sku *Sku `json:"sku,omitempty"` - // Kind - Gets the Kind. Possible values include: 'Storage', 'StorageV2', 'BlobStorage' - Kind Kind `json:"kind,omitempty"` - // Identity - The identity of the resource. - Identity *Identity `json:"identity,omitempty"` - // AccountProperties - Properties of the storage account. - *AccountProperties `json:"properties,omitempty"` - // ID - Resource Id - ID *string `json:"id,omitempty"` - // Name - Resource name - Name *string `json:"name,omitempty"` - // Type - Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Tags assigned to a resource; can be used for viewing and grouping a resource (across resource groups). - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for Account. -func (a Account) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if a.Sku != nil { - objectMap["sku"] = a.Sku - } - if a.Kind != "" { - objectMap["kind"] = a.Kind - } - if a.Identity != nil { - objectMap["identity"] = a.Identity - } - if a.AccountProperties != nil { - objectMap["properties"] = a.AccountProperties - } - if a.ID != nil { - objectMap["id"] = a.ID - } - if a.Name != nil { - objectMap["name"] = a.Name - } - if a.Type != nil { - objectMap["type"] = a.Type - } - if a.Location != nil { - objectMap["location"] = a.Location - } - if a.Tags != nil { - objectMap["tags"] = a.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for Account struct. -func (a *Account) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "sku": - if v != nil { - var sku Sku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - a.Sku = &sku - } - case "kind": - if v != nil { - var kind Kind - err = json.Unmarshal(*v, &kind) - if err != nil { - return err - } - a.Kind = kind - } - case "identity": - if v != nil { - var identity Identity - err = json.Unmarshal(*v, &identity) - if err != nil { - return err - } - a.Identity = &identity - } - case "properties": - if v != nil { - var accountProperties AccountProperties - err = json.Unmarshal(*v, &accountProperties) - if err != nil { - return err - } - a.AccountProperties = &accountProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - a.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - a.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - a.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - a.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - a.Tags = tags - } - } - } - - return nil -} - -// AccountCheckNameAvailabilityParameters the parameters used to check the availabity of the storage account name. -type AccountCheckNameAvailabilityParameters struct { - // Name - The storage account name. - Name *string `json:"name,omitempty"` - // Type - The type of resource, Microsoft.Storage/storageAccounts - Type *string `json:"type,omitempty"` -} - -// AccountCreateParameters the parameters used when creating a storage account. -type AccountCreateParameters struct { - // Sku - Required. Gets or sets the sku name. - Sku *Sku `json:"sku,omitempty"` - // Kind - Required. Indicates the type of storage account. Possible values include: 'Storage', 'StorageV2', 'BlobStorage' - Kind Kind `json:"kind,omitempty"` - // Location - Required. Gets or sets the location of the resource. This will be one of the supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a resource cannot be changed once it is created, but if an identical geo region is specified on update, the request will succeed. - Location *string `json:"location,omitempty"` - // Tags - Gets or sets a list of key value pairs that describe the resource. These tags can be used for viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key with a length no greater than 128 characters and a value with a length no greater than 256 characters. - Tags map[string]*string `json:"tags"` - // Identity - The identity of the resource. - Identity *Identity `json:"identity,omitempty"` - // AccountPropertiesCreateParameters - The parameters used to create the storage account. - *AccountPropertiesCreateParameters `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for AccountCreateParameters. -func (acp AccountCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if acp.Sku != nil { - objectMap["sku"] = acp.Sku - } - if acp.Kind != "" { - objectMap["kind"] = acp.Kind - } - if acp.Location != nil { - objectMap["location"] = acp.Location - } - if acp.Tags != nil { - objectMap["tags"] = acp.Tags - } - if acp.Identity != nil { - objectMap["identity"] = acp.Identity - } - if acp.AccountPropertiesCreateParameters != nil { - objectMap["properties"] = acp.AccountPropertiesCreateParameters - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AccountCreateParameters struct. -func (acp *AccountCreateParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "sku": - if v != nil { - var sku Sku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - acp.Sku = &sku - } - case "kind": - if v != nil { - var kind Kind - err = json.Unmarshal(*v, &kind) - if err != nil { - return err - } - acp.Kind = kind - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - acp.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - acp.Tags = tags - } - case "identity": - if v != nil { - var identity Identity - err = json.Unmarshal(*v, &identity) - if err != nil { - return err - } - acp.Identity = &identity - } - case "properties": - if v != nil { - var accountPropertiesCreateParameters AccountPropertiesCreateParameters - err = json.Unmarshal(*v, &accountPropertiesCreateParameters) - if err != nil { - return err - } - acp.AccountPropertiesCreateParameters = &accountPropertiesCreateParameters - } - } - } - - return nil -} - -// AccountKey an access key for the storage account. -type AccountKey struct { - // KeyName - Name of the key. - KeyName *string `json:"keyName,omitempty"` - // Value - Base 64-encoded value of the key. - Value *string `json:"value,omitempty"` - // Permissions - Permissions for the key -- read-only or full permissions. Possible values include: 'Read', 'Full' - Permissions KeyPermission `json:"permissions,omitempty"` -} - -// AccountListKeysResult the response from the ListKeys operation. -type AccountListKeysResult struct { - autorest.Response `json:"-"` - // Keys - Gets the list of storage account keys and their properties for the specified storage account. - Keys *[]AccountKey `json:"keys,omitempty"` -} - -// AccountListResult the response from the List Storage Accounts operation. -type AccountListResult struct { - autorest.Response `json:"-"` - // Value - Gets the list of storage accounts and their properties. - Value *[]Account `json:"value,omitempty"` -} - -// AccountProperties properties of the storage account. -type AccountProperties struct { - // ProvisioningState - Gets the status of the storage account at the time the operation was called. Possible values include: 'Creating', 'ResolvingDNS', 'Succeeded' - ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` - // PrimaryEndpoints - Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object. Note that Standard_ZRS and Premium_LRS accounts only return the blob endpoint. - PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"` - // PrimaryLocation - Gets the location of the primary data center for the storage account. - PrimaryLocation *string `json:"primaryLocation,omitempty"` - // StatusOfPrimary - Gets the status indicating whether the primary location of the storage account is available or unavailable. Possible values include: 'Available', 'Unavailable' - StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"` - // LastGeoFailoverTime - Gets the timestamp of the most recent instance of a failover to the secondary location. Only the most recent timestamp is retained. This element is not returned if there has never been a failover instance. Only available if the accountType is Standard_GRS or Standard_RAGRS. - LastGeoFailoverTime *date.Time `json:"lastGeoFailoverTime,omitempty"` - // SecondaryLocation - Gets the location of the geo-replicated secondary for the storage account. Only available if the accountType is Standard_GRS or Standard_RAGRS. - SecondaryLocation *string `json:"secondaryLocation,omitempty"` - // StatusOfSecondary - Gets the status indicating whether the secondary location of the storage account is available or unavailable. Only available if the SKU name is Standard_GRS or Standard_RAGRS. Possible values include: 'Available', 'Unavailable' - StatusOfSecondary AccountStatus `json:"statusOfSecondary,omitempty"` - // CreationTime - Gets the creation date and time of the storage account in UTC. - CreationTime *date.Time `json:"creationTime,omitempty"` - // CustomDomain - Gets the custom domain the user assigned to this storage account. - CustomDomain *CustomDomain `json:"customDomain,omitempty"` - // SecondaryEndpoints - Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object from the secondary location of the storage account. Only available if the SKU name is Standard_RAGRS. - SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"` - // Encryption - Gets the encryption settings on the account. If unspecified, the account is unencrypted. - Encryption *Encryption `json:"encryption,omitempty"` - // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'Hot', 'Cool' - AccessTier AccessTier `json:"accessTier,omitempty"` - // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true. - EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` - // NetworkRuleSet - Network rule set - NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` -} - -// AccountPropertiesCreateParameters the parameters used to create the storage account. -type AccountPropertiesCreateParameters struct { - // CustomDomain - User domain assigned to the storage account. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property. - CustomDomain *CustomDomain `json:"customDomain,omitempty"` - // Encryption - Provides the encryption settings on the account. If left unspecified the account encryption settings will remain the same. The default setting is unencrypted. - Encryption *Encryption `json:"encryption,omitempty"` - // NetworkRuleSet - Network rule set - NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` - // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'Hot', 'Cool' - AccessTier AccessTier `json:"accessTier,omitempty"` - // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true. - EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` -} - -// AccountPropertiesUpdateParameters the parameters used when updating a storage account. -type AccountPropertiesUpdateParameters struct { - // CustomDomain - Custom domain assigned to the storage account by the user. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property. - CustomDomain *CustomDomain `json:"customDomain,omitempty"` - // Encryption - Provides the encryption settings on the account. The default setting is unencrypted. - Encryption *Encryption `json:"encryption,omitempty"` - // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'Hot', 'Cool' - AccessTier AccessTier `json:"accessTier,omitempty"` - // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true. - EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` - // NetworkRuleSet - Network rule set - NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` -} - -// AccountRegenerateKeyParameters the parameters used to regenerate the storage account key. -type AccountRegenerateKeyParameters struct { - // KeyName - The name of storage keys that want to be regenerated, possible vaules are key1, key2. - KeyName *string `json:"keyName,omitempty"` -} - -// AccountSasParameters the parameters to list SAS credentials of a storage account. -type AccountSasParameters struct { - // Services - The signed services accessible with the account SAS. Possible values include: Blob (b), Queue (q), Table (t), File (f). Possible values include: 'B', 'Q', 'T', 'F' - Services Services `json:"signedServices,omitempty"` - // ResourceTypes - The signed resource types that are accessible with the account SAS. Service (s): Access to service-level APIs; Container (c): Access to container-level APIs; Object (o): Access to object-level APIs for blobs, queue messages, table entities, and files. Possible values include: 'SignedResourceTypesS', 'SignedResourceTypesC', 'SignedResourceTypesO' - ResourceTypes SignedResourceTypes `json:"signedResourceTypes,omitempty"` - // Permissions - The signed permissions for the account SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p). Possible values include: 'R', 'D', 'W', 'L', 'A', 'C', 'U', 'P' - Permissions Permissions `json:"signedPermission,omitempty"` - // IPAddressOrRange - An IP address or a range of IP addresses from which to accept requests. - IPAddressOrRange *string `json:"signedIp,omitempty"` - // Protocols - The protocol permitted for a request made with the account SAS. Possible values include: 'Httpshttp', 'HTTPS' - Protocols HTTPProtocol `json:"signedProtocol,omitempty"` - // SharedAccessStartTime - The time at which the SAS becomes valid. - SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` - // SharedAccessExpiryTime - The time at which the shared access signature becomes invalid. - SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` - // KeyToSign - The key to sign the account SAS token with. - KeyToSign *string `json:"keyToSign,omitempty"` -} - -// AccountsCreateFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type AccountsCreateFuture struct { - azure.Future -} - -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *AccountsCreateFuture) Result(client AccountsClient) (a Account, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("storage.AccountsCreateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if a.Response.Response, err = future.GetResult(sender); err == nil && a.Response.Response.StatusCode != http.StatusNoContent { - a, err = client.CreateResponder(a.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", a.Response.Response, "Failure responding to request") - } - } - return -} - -// AccountUpdateParameters the parameters that can be provided when updating the storage account properties. -type AccountUpdateParameters struct { - // Sku - Gets or sets the SKU name. Note that the SKU name cannot be updated to Standard_ZRS or Premium_LRS, nor can accounts of those sku names be updated to any other value. - Sku *Sku `json:"sku,omitempty"` - // Tags - Gets or sets a list of key value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater in length than 128 characters and a value no greater in length than 256 characters. - Tags map[string]*string `json:"tags"` - // Identity - The identity of the resource. - Identity *Identity `json:"identity,omitempty"` - // AccountPropertiesUpdateParameters - The parameters used when updating a storage account. - *AccountPropertiesUpdateParameters `json:"properties,omitempty"` - // Kind - Optional. Indicates the type of storage account. Currently only StorageV2 value supported by server. Possible values include: 'Storage', 'StorageV2', 'BlobStorage' - Kind Kind `json:"kind,omitempty"` -} - -// MarshalJSON is the custom marshaler for AccountUpdateParameters. -func (aup AccountUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if aup.Sku != nil { - objectMap["sku"] = aup.Sku - } - if aup.Tags != nil { - objectMap["tags"] = aup.Tags - } - if aup.Identity != nil { - objectMap["identity"] = aup.Identity - } - if aup.AccountPropertiesUpdateParameters != nil { - objectMap["properties"] = aup.AccountPropertiesUpdateParameters - } - if aup.Kind != "" { - objectMap["kind"] = aup.Kind - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AccountUpdateParameters struct. -func (aup *AccountUpdateParameters) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "sku": - if v != nil { - var sku Sku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - aup.Sku = &sku - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - aup.Tags = tags - } - case "identity": - if v != nil { - var identity Identity - err = json.Unmarshal(*v, &identity) - if err != nil { - return err - } - aup.Identity = &identity - } - case "properties": - if v != nil { - var accountPropertiesUpdateParameters AccountPropertiesUpdateParameters - err = json.Unmarshal(*v, &accountPropertiesUpdateParameters) - if err != nil { - return err - } - aup.AccountPropertiesUpdateParameters = &accountPropertiesUpdateParameters - } - case "kind": - if v != nil { - var kind Kind - err = json.Unmarshal(*v, &kind) - if err != nil { - return err - } - aup.Kind = kind - } - } - } - - return nil -} - -// CheckNameAvailabilityResult the CheckNameAvailability operation response. -type CheckNameAvailabilityResult struct { - autorest.Response `json:"-"` - // NameAvailable - Gets a boolean value that indicates whether the name is available for you to use. If true, the name is available. If false, the name has already been taken or is invalid and cannot be used. - NameAvailable *bool `json:"nameAvailable,omitempty"` - // Reason - Gets the reason that a storage account name could not be used. The Reason element is only returned if NameAvailable is false. Possible values include: 'AccountNameInvalid', 'AlreadyExists' - Reason Reason `json:"reason,omitempty"` - // Message - Gets an error message explaining the Reason value in more detail. - Message *string `json:"message,omitempty"` -} - -// CustomDomain the custom domain assigned to this storage account. This can be set via Update. -type CustomDomain struct { - // Name - Gets or sets the custom domain name assigned to the storage account. Name is the CNAME source. - Name *string `json:"name,omitempty"` - // UseSubDomain - Indicates whether indirect CName validation is enabled. Default value is false. This should only be set on updates. - UseSubDomain *bool `json:"useSubDomain,omitempty"` -} - -// Dimension dimension of blobs, possiblly be blob type or access tier. -type Dimension struct { - // Name - Display name of dimension. - Name *string `json:"name,omitempty"` - // DisplayName - Display name of dimension. - DisplayName *string `json:"displayName,omitempty"` -} - -// Encryption the encryption settings on the storage account. -type Encryption struct { - // Services - List of services which support encryption. - Services *EncryptionServices `json:"services,omitempty"` - // KeySource - The encryption keySource (provider). Possible values (case-insensitive): Microsoft.Storage, Microsoft.Keyvault. Possible values include: 'MicrosoftStorage', 'MicrosoftKeyvault' - KeySource KeySource `json:"keySource,omitempty"` - // KeyVaultProperties - Properties provided by key vault. - KeyVaultProperties *KeyVaultProperties `json:"keyvaultproperties,omitempty"` -} - -// EncryptionService a service that allows server-side encryption to be used. -type EncryptionService struct { - // Enabled - A boolean indicating whether or not the service encrypts the data as it is stored. - Enabled *bool `json:"enabled,omitempty"` - // LastEnabledTime - Gets a rough estimate of the date/time when the encryption was last enabled by the user. Only returned when encryption is enabled. There might be some unencrypted blobs which were written after this time, as it is just a rough estimate. - LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"` -} - -// EncryptionServices a list of services that support encryption. -type EncryptionServices struct { - // Blob - The encryption function of the blob storage service. - Blob *EncryptionService `json:"blob,omitempty"` - // File - The encryption function of the file storage service. - File *EncryptionService `json:"file,omitempty"` - // Table - The encryption function of the table storage service. - Table *EncryptionService `json:"table,omitempty"` - // Queue - The encryption function of the queue storage service. - Queue *EncryptionService `json:"queue,omitempty"` -} - -// Endpoints the URIs that are used to perform a retrieval of a public blob, queue, or table object. -type Endpoints struct { - // Blob - Gets the blob endpoint. - Blob *string `json:"blob,omitempty"` - // Queue - Gets the queue endpoint. - Queue *string `json:"queue,omitempty"` - // Table - Gets the table endpoint. - Table *string `json:"table,omitempty"` - // File - Gets the file endpoint. - File *string `json:"file,omitempty"` -} - -// Identity identity for the resource. -type Identity struct { - // PrincipalID - The principal ID of resource identity. - PrincipalID *string `json:"principalId,omitempty"` - // TenantID - The tenant ID of resource. - TenantID *string `json:"tenantId,omitempty"` - // Type - The identity type. - Type *string `json:"type,omitempty"` -} - -// IPRule IP rule with specific IP or IP range in CIDR format. -type IPRule struct { - // IPAddressOrRange - Specifies the IP or IP range in CIDR format. Only IPV4 address is allowed. - IPAddressOrRange *string `json:"value,omitempty"` - // Action - The action of IP ACL rule. Possible values include: 'Allow' - Action Action `json:"action,omitempty"` -} - -// KeyVaultProperties properties of key vault. -type KeyVaultProperties struct { - // KeyName - The name of KeyVault key. - KeyName *string `json:"keyname,omitempty"` - // KeyVersion - The version of KeyVault key. - KeyVersion *string `json:"keyversion,omitempty"` - // KeyVaultURI - The Uri of KeyVault. - KeyVaultURI *string `json:"keyvaulturi,omitempty"` -} - -// ListAccountSasResponse the List SAS credentials operation response. -type ListAccountSasResponse struct { - autorest.Response `json:"-"` - // AccountSasToken - List SAS credentials of storage account. - AccountSasToken *string `json:"accountSasToken,omitempty"` -} - -// ListServiceSasResponse the List service SAS credentials operation response. -type ListServiceSasResponse struct { - autorest.Response `json:"-"` - // ServiceSasToken - List service SAS credentials of speicific resource. - ServiceSasToken *string `json:"serviceSasToken,omitempty"` -} - -// MetricSpecification metric specification of operation. -type MetricSpecification struct { - // Name - Name of metric specification. - Name *string `json:"name,omitempty"` - // DisplayName - Display name of metric specification. - DisplayName *string `json:"displayName,omitempty"` - // DisplayDescription - Display description of metric specification. - DisplayDescription *string `json:"displayDescription,omitempty"` - // Unit - Unit could be Bytes or Count. - Unit *string `json:"unit,omitempty"` - // Dimensions - Dimensions of blobs, including blob type and access tier. - Dimensions *[]Dimension `json:"dimensions,omitempty"` - // AggregationType - Aggregation type could be Average. - AggregationType *string `json:"aggregationType,omitempty"` - // FillGapWithZero - The property to decide fill gap with zero or not. - FillGapWithZero *bool `json:"fillGapWithZero,omitempty"` - // Category - The category this metric specification belong to, could be Capacity. - Category *string `json:"category,omitempty"` - // ResourceIDDimensionNameOverride - Account Resource Id. - ResourceIDDimensionNameOverride *string `json:"resourceIdDimensionNameOverride,omitempty"` -} - -// NetworkRuleSet network rule set -type NetworkRuleSet struct { - // Bypass - Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. Possible values are any combination of Logging|Metrics|AzureServices (For example, "Logging, Metrics"), or None to bypass none of those traffics. Possible values include: 'None', 'Logging', 'Metrics', 'AzureServices' - Bypass Bypass `json:"bypass,omitempty"` - // VirtualNetworkRules - Sets the virtual network rules - VirtualNetworkRules *[]VirtualNetworkRule `json:"virtualNetworkRules,omitempty"` - // IPRules - Sets the IP ACL rules - IPRules *[]IPRule `json:"ipRules,omitempty"` - // DefaultAction - Specifies the default action of allow or deny when no other rules match. Possible values include: 'DefaultActionAllow', 'DefaultActionDeny' - DefaultAction DefaultAction `json:"defaultAction,omitempty"` -} - -// Operation storage REST API operation definition. -type Operation struct { - // Name - Operation name: {provider}/{resource}/{operation} - Name *string `json:"name,omitempty"` - // Display - Display metadata associated with the operation. - Display *OperationDisplay `json:"display,omitempty"` - // Origin - The origin of operations. - Origin *string `json:"origin,omitempty"` - // OperationProperties - Properties of operation, include metric specifications. - *OperationProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for Operation. -func (o Operation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if o.Name != nil { - objectMap["name"] = o.Name - } - if o.Display != nil { - objectMap["display"] = o.Display - } - if o.Origin != nil { - objectMap["origin"] = o.Origin - } - if o.OperationProperties != nil { - objectMap["properties"] = o.OperationProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for Operation struct. -func (o *Operation) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - o.Name = &name - } - case "display": - if v != nil { - var display OperationDisplay - err = json.Unmarshal(*v, &display) - if err != nil { - return err - } - o.Display = &display - } - case "origin": - if v != nil { - var origin string - err = json.Unmarshal(*v, &origin) - if err != nil { - return err - } - o.Origin = &origin - } - case "properties": - if v != nil { - var operationProperties OperationProperties - err = json.Unmarshal(*v, &operationProperties) - if err != nil { - return err - } - o.OperationProperties = &operationProperties - } - } - } - - return nil -} - -// OperationDisplay display metadata associated with the operation. -type OperationDisplay struct { - // Provider - Service provider: Microsoft Storage. - Provider *string `json:"provider,omitempty"` - // Resource - Resource on which the operation is performed etc. - Resource *string `json:"resource,omitempty"` - // Operation - Type of operation: get, read, delete, etc. - Operation *string `json:"operation,omitempty"` -} - -// OperationListResult result of the request to list Storage operations. It contains a list of operations and a URL -// link to get the next set of results. -type OperationListResult struct { - autorest.Response `json:"-"` - // Value - List of Storage operations supported by the Storage resource provider. - Value *[]Operation `json:"value,omitempty"` -} - -// OperationProperties properties of operation, include metric specifications. -type OperationProperties struct { - // ServiceSpecification - One property of operation, include metric specifications. - ServiceSpecification *ServiceSpecification `json:"serviceSpecification,omitempty"` -} - -// Resource describes a storage resource. -type Resource struct { - // ID - Resource Id - ID *string `json:"id,omitempty"` - // Name - Resource name - Name *string `json:"name,omitempty"` - // Type - Resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Tags assigned to a resource; can be used for viewing and grouping a resource (across resource groups). - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for Resource. -func (r Resource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if r.ID != nil { - objectMap["id"] = r.ID - } - if r.Name != nil { - objectMap["name"] = r.Name - } - if r.Type != nil { - objectMap["type"] = r.Type - } - if r.Location != nil { - objectMap["location"] = r.Location - } - if r.Tags != nil { - objectMap["tags"] = r.Tags - } - return json.Marshal(objectMap) -} - -// Restriction the restriction because of which SKU cannot be used. -type Restriction struct { - // Type - The type of restrictions. As of now only possible value for this is location. - Type *string `json:"type,omitempty"` - // Values - The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted. - Values *[]string `json:"values,omitempty"` - // ReasonCode - The reason for the restriction. As of now this can be “QuotaId” or “NotAvailableForSubscription”. Quota Id is set when the SKU has requiredQuotas parameter as the subscription does not belong to that quota. The “NotAvailableForSubscription” is related to capacity at DC. Possible values include: 'QuotaID', 'NotAvailableForSubscription' - ReasonCode ReasonCode `json:"reasonCode,omitempty"` -} - -// ServiceSasParameters the parameters to list service SAS credentials of a speicific resource. -type ServiceSasParameters struct { - // CanonicalizedResource - The canonical path to the signed resource. - CanonicalizedResource *string `json:"canonicalizedResource,omitempty"` - // Resource - The signed services accessible with the service SAS. Possible values include: Blob (b), Container (c), File (f), Share (s). Possible values include: 'SignedResourceB', 'SignedResourceC', 'SignedResourceF', 'SignedResourceS' - Resource SignedResource `json:"signedResource,omitempty"` - // Permissions - The signed permissions for the service SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p). Possible values include: 'R', 'D', 'W', 'L', 'A', 'C', 'U', 'P' - Permissions Permissions `json:"signedPermission,omitempty"` - // IPAddressOrRange - An IP address or a range of IP addresses from which to accept requests. - IPAddressOrRange *string `json:"signedIp,omitempty"` - // Protocols - The protocol permitted for a request made with the account SAS. Possible values include: 'Httpshttp', 'HTTPS' - Protocols HTTPProtocol `json:"signedProtocol,omitempty"` - // SharedAccessStartTime - The time at which the SAS becomes valid. - SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` - // SharedAccessExpiryTime - The time at which the shared access signature becomes invalid. - SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` - // Identifier - A unique value up to 64 characters in length that correlates to an access policy specified for the container, queue, or table. - Identifier *string `json:"signedIdentifier,omitempty"` - // PartitionKeyStart - The start of partition key. - PartitionKeyStart *string `json:"startPk,omitempty"` - // PartitionKeyEnd - The end of partition key. - PartitionKeyEnd *string `json:"endPk,omitempty"` - // RowKeyStart - The start of row key. - RowKeyStart *string `json:"startRk,omitempty"` - // RowKeyEnd - The end of row key. - RowKeyEnd *string `json:"endRk,omitempty"` - // KeyToSign - The key to sign the account SAS token with. - KeyToSign *string `json:"keyToSign,omitempty"` - // CacheControl - The response header override for cache control. - CacheControl *string `json:"rscc,omitempty"` - // ContentDisposition - The response header override for content disposition. - ContentDisposition *string `json:"rscd,omitempty"` - // ContentEncoding - The response header override for content encoding. - ContentEncoding *string `json:"rsce,omitempty"` - // ContentLanguage - The response header override for content language. - ContentLanguage *string `json:"rscl,omitempty"` - // ContentType - The response header override for content type. - ContentType *string `json:"rsct,omitempty"` -} - -// ServiceSpecification one property of operation, include metric specifications. -type ServiceSpecification struct { - // MetricSpecifications - Metric specifications of operation. - MetricSpecifications *[]MetricSpecification `json:"metricSpecifications,omitempty"` -} - -// Sku the SKU of the storage account. -type Sku struct { - // Name - Gets or sets the sku name. Required for account creation; optional for update. Note that in older versions, sku name was called accountType. Possible values include: 'StandardLRS', 'StandardGRS', 'StandardRAGRS', 'StandardZRS', 'PremiumLRS' - Name SkuName `json:"name,omitempty"` - // Tier - Gets the sku tier. This is based on the SKU name. Possible values include: 'Standard', 'Premium' - Tier SkuTier `json:"tier,omitempty"` - // ResourceType - The type of the resource, usually it is 'storageAccounts'. - ResourceType *string `json:"resourceType,omitempty"` - // Kind - Indicates the type of storage account. Possible values include: 'Storage', 'StorageV2', 'BlobStorage' - Kind Kind `json:"kind,omitempty"` - // Locations - The set of locations that the SKU is available. This will be supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). - Locations *[]string `json:"locations,omitempty"` - // Capabilities - The capability information in the specified sku, including file encryption, network acls, change notification, etc. - Capabilities *[]SKUCapability `json:"capabilities,omitempty"` - // Restrictions - The restrictions because of which SKU cannot be used. This is empty if there are no restrictions. - Restrictions *[]Restriction `json:"restrictions,omitempty"` -} - -// SKUCapability the capability information in the specified sku, including file encryption, network acls, change -// notification, etc. -type SKUCapability struct { - // Name - The name of capability, The capability information in the specified sku, including file encryption, network acls, change notification, etc. - Name *string `json:"name,omitempty"` - // Value - A string value to indicate states of given capability. Possibly 'true' or 'false'. - Value *string `json:"value,omitempty"` -} - -// SkuListResult the response from the List Storage SKUs operation. -type SkuListResult struct { - autorest.Response `json:"-"` - // Value - Get the list result of storage SKUs and their properties. - Value *[]Sku `json:"value,omitempty"` -} - -// Usage describes Storage Resource Usage. -type Usage struct { - // Unit - Gets the unit of measurement. Possible values include: 'Count', 'Bytes', 'Seconds', 'Percent', 'CountsPerSecond', 'BytesPerSecond' - Unit UsageUnit `json:"unit,omitempty"` - // CurrentValue - Gets the current count of the allocated resources in the subscription. - CurrentValue *int32 `json:"currentValue,omitempty"` - // Limit - Gets the maximum count of the resources that can be allocated in the subscription. - Limit *int32 `json:"limit,omitempty"` - // Name - Gets the name of the type of usage. - Name *UsageName `json:"name,omitempty"` -} - -// UsageListResult the response from the List Usages operation. -type UsageListResult struct { - autorest.Response `json:"-"` - // Value - Gets or sets the list of Storage Resource Usages. - Value *[]Usage `json:"value,omitempty"` -} - -// UsageName the usage names that can be used; currently limited to StorageAccount. -type UsageName struct { - // Value - Gets a string describing the resource name. - Value *string `json:"value,omitempty"` - // LocalizedValue - Gets a localized string describing the resource name. - LocalizedValue *string `json:"localizedValue,omitempty"` -} - -// VirtualNetworkRule virtual Network rule. -type VirtualNetworkRule struct { - // VirtualNetworkResourceID - Resource ID of a subnet, for example: /subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}. - VirtualNetworkResourceID *string `json:"id,omitempty"` - // Action - The action of virtual network rule. Possible values include: 'Allow' - Action Action `json:"action,omitempty"` - // State - Gets the state of virtual network rule. Possible values include: 'StateProvisioning', 'StateDeprovisioning', 'StateSucceeded', 'StateFailed', 'StateNetworkSourceDeleted' - State State `json:"state,omitempty"` -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/operations.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/operations.go deleted file mode 100644 index b36bc6d68447..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/operations.go +++ /dev/null @@ -1,98 +0,0 @@ -package storage - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "net/http" -) - -// OperationsClient is the the Azure Storage Management API. -type OperationsClient struct { - BaseClient -} - -// NewOperationsClient creates an instance of the OperationsClient client. -func NewOperationsClient(subscriptionID string) OperationsClient { - return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client. -func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { - return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List lists all of the available Storage Rest API operations. -func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) { - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2017-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/Microsoft.Storage/operations"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/usage.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/usage.go deleted file mode 100644 index 16942e6f7542..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/usage.go +++ /dev/null @@ -1,102 +0,0 @@ -package storage - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "net/http" -) - -// UsageClient is the the Azure Storage Management API. -type UsageClient struct { - BaseClient -} - -// NewUsageClient creates an instance of the UsageClient client. -func NewUsageClient(subscriptionID string) UsageClient { - return NewUsageClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewUsageClientWithBaseURI creates an instance of the UsageClient client. -func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClient { - return UsageClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List gets the current usage count and the limit for the resources under the subscription. -func (client UsageClient) List(ctx context.Context) (result UsageListResult, err error) { - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", resp, "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client UsageClient) ListPreparer(ctx context.Context) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/usages", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client UsageClient) ListResponder(resp *http.Response) (result UsageListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/version.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/version.go deleted file mode 100644 index 884a2478f8d8..000000000000 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/version.go +++ /dev/null @@ -1,30 +0,0 @@ -package storage - -import "github.com/Azure/azure-sdk-for-go/version" - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/" + version.Number + " storage/2017-10-01" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return version.Number -} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/accounts.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/accounts.go similarity index 93% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/accounts.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/accounts.go index 56dc300ec18f..1af73b7c9c89 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/accounts.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/accounts.go @@ -48,7 +48,9 @@ func (client AccountsClient) CheckNameAvailability(ctx context.Context, accountN if err := validation.Validate([]validation.Validation{ {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName.Name", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "accountName.Type", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + {Target: "accountName.Type", Name: validation.Null, Rule: true, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.AccountsClient", "CheckNameAvailability", err.Error()) } @@ -79,7 +81,7 @@ func (client AccountsClient) CheckNameAvailabilityPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-10-01" + const APIVersion = "2018-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -141,7 +143,9 @@ func (client AccountsClient) Create(ctx context.Context, resourceGroupName strin {Target: "parameters.AccountPropertiesCreateParameters", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain.Name", Name: validation.Null, Rule: true, Chain: nil}}}, - }}}}}); err != nil { + }}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.AccountsClient", "Create", err.Error()) } @@ -168,7 +172,7 @@ func (client AccountsClient) CreatePreparer(ctx context.Context, resourceGroupNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-10-01" + const APIVersion = "2018-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -227,7 +231,9 @@ func (client AccountsClient) Delete(ctx context.Context, resourceGroupName strin {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.AccountsClient", "Delete", err.Error()) } @@ -260,7 +266,7 @@ func (client AccountsClient) DeletePreparer(ctx context.Context, resourceGroupNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-10-01" + const APIVersion = "2018-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -307,7 +313,9 @@ func (client AccountsClient) GetProperties(ctx context.Context, resourceGroupNam {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.AccountsClient", "GetProperties", err.Error()) } @@ -340,7 +348,7 @@ func (client AccountsClient) GetPropertiesPreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-10-01" + const APIVersion = "2018-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -376,6 +384,12 @@ func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result // List lists all the storage accounts available under the subscription. Note that storage keys are not returned; use // the ListKeys operation for this. func (client AccountsClient) List(ctx context.Context) (result AccountListResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "List", err.Error()) + } + req, err := client.ListPreparer(ctx) if err != nil { err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", nil, "Failure preparing request") @@ -403,7 +417,7 @@ func (client AccountsClient) ListPreparer(ctx context.Context) (*http.Request, e "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-10-01" + const APIVersion = "2018-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -453,7 +467,9 @@ func (client AccountsClient) ListAccountSAS(ctx context.Context, resourceGroupNa Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.SharedAccessExpiryTime", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + Constraints: []validation.Constraint{{Target: "parameters.SharedAccessExpiryTime", Name: validation.Null, Rule: true, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.AccountsClient", "ListAccountSAS", err.Error()) } @@ -486,7 +502,7 @@ func (client AccountsClient) ListAccountSASPreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-10-01" + const APIVersion = "2018-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -531,7 +547,9 @@ func (client AccountsClient) ListByResourceGroup(ctx context.Context, resourceGr {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.AccountsClient", "ListByResourceGroup", err.Error()) } @@ -563,7 +581,7 @@ func (client AccountsClient) ListByResourceGroupPreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-10-01" + const APIVersion = "2018-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -610,7 +628,9 @@ func (client AccountsClient) ListKeys(ctx context.Context, resourceGroupName str {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.AccountsClient", "ListKeys", err.Error()) } @@ -643,7 +663,7 @@ func (client AccountsClient) ListKeysPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-10-01" + const APIVersion = "2018-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -695,7 +715,9 @@ func (client AccountsClient) ListServiceSAS(ctx context.Context, resourceGroupNa {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters.CanonicalizedResource", Name: validation.Null, Rule: true, Chain: nil}, {Target: "parameters.Identifier", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.Identifier", Name: validation.MaxLength, Rule: 64, Chain: nil}}}}}}); err != nil { + Chain: []validation.Constraint{{Target: "parameters.Identifier", Name: validation.MaxLength, Rule: 64, Chain: nil}}}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.AccountsClient", "ListServiceSAS", err.Error()) } @@ -728,7 +750,7 @@ func (client AccountsClient) ListServiceSASPreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-10-01" + const APIVersion = "2018-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -780,7 +802,9 @@ func (client AccountsClient) RegenerateKey(ctx context.Context, resourceGroupNam Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: regenerateKey, - Constraints: []validation.Constraint{{Target: "regenerateKey.KeyName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + Constraints: []validation.Constraint{{Target: "regenerateKey.KeyName", Name: validation.Null, Rule: true, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.AccountsClient", "RegenerateKey", err.Error()) } @@ -813,7 +837,7 @@ func (client AccountsClient) RegenerateKeyPreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-10-01" + const APIVersion = "2018-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -868,7 +892,9 @@ func (client AccountsClient) Update(ctx context.Context, resourceGroupName strin {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, - {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.AccountsClient", "Update", err.Error()) } @@ -901,7 +927,7 @@ func (client AccountsClient) UpdatePreparer(ctx context.Context, resourceGroupNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-10-01" + const APIVersion = "2018-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/blobcontainers.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/blobcontainers.go new file mode 100644 index 000000000000..d8d5cdafced0 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/blobcontainers.go @@ -0,0 +1,1188 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// BlobContainersClient is the the Azure Storage Management API. +type BlobContainersClient struct { + BaseClient +} + +// NewBlobContainersClient creates an instance of the BlobContainersClient client. +func NewBlobContainersClient(subscriptionID string) BlobContainersClient { + return NewBlobContainersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewBlobContainersClientWithBaseURI creates an instance of the BlobContainersClient client. +func NewBlobContainersClientWithBaseURI(baseURI string, subscriptionID string) BlobContainersClient { + return BlobContainersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// ClearLegalHold clears legal hold tags. Clearing the same or non-existent tag results in an idempotent operation. +// ClearLegalHold clears out only the specified tags in the request. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// legalHold - the LegalHold property that will be clear from a blob container. +func (client BlobContainersClient) ClearLegalHold(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (result LegalHold, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: legalHold, + Constraints: []validation.Constraint{{Target: "legalHold.Tags", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "ClearLegalHold", err.Error()) + } + + req, err := client.ClearLegalHoldPreparer(ctx, resourceGroupName, accountName, containerName, legalHold) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", nil, "Failure preparing request") + return + } + + resp, err := client.ClearLegalHoldSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", resp, "Failure sending request") + return + } + + result, err = client.ClearLegalHoldResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", resp, "Failure responding to request") + } + + return +} + +// ClearLegalHoldPreparer prepares the ClearLegalHold request. +func (client BlobContainersClient) ClearLegalHoldPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/clearLegalHold", pathParameters), + autorest.WithJSON(legalHold), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ClearLegalHoldSender sends the ClearLegalHold request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) ClearLegalHoldSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ClearLegalHoldResponder handles the response to the ClearLegalHold request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) ClearLegalHoldResponder(resp *http.Response) (result LegalHold, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Create creates a new container under the specified account as described by request body. The container resource +// includes metadata and properties for that container. It does not include a list of the blobs contained by the +// container. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// blobContainer - properties of the blob container to create. +func (client BlobContainersClient) Create(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (result BlobContainer, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: blobContainer, + Constraints: []validation.Constraint{{Target: "blobContainer.ContainerProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "blobContainer.ContainerProperties.ImmutabilityPolicy", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "blobContainer.ContainerProperties.ImmutabilityPolicy.ImmutabilityPolicyProperty", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "blobContainer.ContainerProperties.ImmutabilityPolicy.ImmutabilityPolicyProperty.ImmutabilityPeriodSinceCreationInDays", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + }}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, containerName, blobContainer) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client BlobContainersClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), + autorest.WithJSON(blobContainer), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) CreateResponder(resp *http.Response) (result BlobContainer, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateImmutabilityPolicy creates or updates an unlocked immutability policy. ETag in If-Match is honored if +// given but not required for this operation. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// parameters - the ImmutabilityPolicy Properties that will be created or updated to a blob container. +// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used +// to apply the operation only if the immutability policy already exists. If omitted, this operation will +// always be applied. +func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *ImmutabilityPolicy, ifMatch string) (result ImmutabilityPolicy, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty.ImmutabilityPeriodSinceCreationInDays", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", err.Error()) + } + + req, err := client.CreateOrUpdateImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, parameters, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateImmutabilityPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateImmutabilityPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateImmutabilityPolicyPreparer prepares the CreateOrUpdateImmutabilityPolicy request. +func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *ImmutabilityPolicy, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "immutabilityPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if parameters != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(parameters)) + } + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateImmutabilityPolicySender sends the CreateOrUpdateImmutabilityPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateImmutabilityPolicyResponder handles the response to the CreateOrUpdateImmutabilityPolicy request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes specified container under its account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +func (client BlobContainersClient) Delete(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, containerName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client BlobContainersClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteImmutabilityPolicy aborts an unlocked immutability policy. The response of delete has +// immutabilityPeriodSinceCreationInDays set to 0. ETag in If-Match is required for this operation. Deleting a locked +// immutability policy is not allowed, only way is to delete the container after deleting all blobs inside the +// container. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used +// to apply the operation only if the immutability policy already exists. If omitted, this operation will +// always be applied. +func (client BlobContainersClient) DeleteImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "DeleteImmutabilityPolicy", err.Error()) + } + + req, err := client.DeleteImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteImmutabilityPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", resp, "Failure sending request") + return + } + + result, err = client.DeleteImmutabilityPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", resp, "Failure responding to request") + } + + return +} + +// DeleteImmutabilityPolicyPreparer prepares the DeleteImmutabilityPolicy request. +func (client BlobContainersClient) DeleteImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "immutabilityPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters), + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteImmutabilityPolicySender sends the DeleteImmutabilityPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) DeleteImmutabilityPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteImmutabilityPolicyResponder handles the response to the DeleteImmutabilityPolicy request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) DeleteImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ExtendImmutabilityPolicy extends the immutabilityPeriodSinceCreationInDays of a locked immutabilityPolicy. The only +// action allowed on a Locked policy will be this action. ETag in If-Match is required for this operation. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used +// to apply the operation only if the immutability policy already exists. If omitted, this operation will +// always be applied. +// parameters - the ImmutabilityPolicy Properties that will be extented for a blob container. +func (client BlobContainersClient) ExtendImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string, parameters *ImmutabilityPolicy) (result ImmutabilityPolicy, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty.ImmutabilityPeriodSinceCreationInDays", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "ExtendImmutabilityPolicy", err.Error()) + } + + req, err := client.ExtendImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", nil, "Failure preparing request") + return + } + + resp, err := client.ExtendImmutabilityPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", resp, "Failure sending request") + return + } + + result, err = client.ExtendImmutabilityPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", resp, "Failure responding to request") + } + + return +} + +// ExtendImmutabilityPolicyPreparer prepares the ExtendImmutabilityPolicy request. +func (client BlobContainersClient) ExtendImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string, parameters *ImmutabilityPolicy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/extend", pathParameters), + autorest.WithQueryParameters(queryParameters), + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + if parameters != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(parameters)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ExtendImmutabilityPolicySender sends the ExtendImmutabilityPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) ExtendImmutabilityPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ExtendImmutabilityPolicyResponder handles the response to the ExtendImmutabilityPolicy request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) ExtendImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets properties of a specified container. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +func (client BlobContainersClient) Get(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result BlobContainer, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, accountName, containerName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client BlobContainersClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) GetResponder(resp *http.Response) (result BlobContainer, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetImmutabilityPolicy gets the existing immutability policy along with the corresponding ETag in response headers +// and body. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used +// to apply the operation only if the immutability policy already exists. If omitted, this operation will +// always be applied. +func (client BlobContainersClient) GetImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "GetImmutabilityPolicy", err.Error()) + } + + req, err := client.GetImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", nil, "Failure preparing request") + return + } + + resp, err := client.GetImmutabilityPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", resp, "Failure sending request") + return + } + + result, err = client.GetImmutabilityPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", resp, "Failure responding to request") + } + + return +} + +// GetImmutabilityPolicyPreparer prepares the GetImmutabilityPolicy request. +func (client BlobContainersClient) GetImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "immutabilityPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetImmutabilityPolicySender sends the GetImmutabilityPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) GetImmutabilityPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetImmutabilityPolicyResponder handles the response to the GetImmutabilityPolicy request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) GetImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all containers and does not support a prefix like data plane. Also SRP today does not return continuation +// token. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client BlobContainersClient) List(ctx context.Context, resourceGroupName string, accountName string) (result ListContainerItems, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "List", err.Error()) + } + + req, err := client.ListPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client BlobContainersClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) ListResponder(resp *http.Response) (result ListContainerItems, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// LockImmutabilityPolicy sets the ImmutabilityPolicy to Locked state. The only action allowed on a Locked policy is +// ExtendImmutabilityPolicy action. ETag in If-Match is required for this operation. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used +// to apply the operation only if the immutability policy already exists. If omitted, this operation will +// always be applied. +func (client BlobContainersClient) LockImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "LockImmutabilityPolicy", err.Error()) + } + + req, err := client.LockImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", nil, "Failure preparing request") + return + } + + resp, err := client.LockImmutabilityPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", resp, "Failure sending request") + return + } + + result, err = client.LockImmutabilityPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", resp, "Failure responding to request") + } + + return +} + +// LockImmutabilityPolicyPreparer prepares the LockImmutabilityPolicy request. +func (client BlobContainersClient) LockImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/lock", pathParameters), + autorest.WithQueryParameters(queryParameters), + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// LockImmutabilityPolicySender sends the LockImmutabilityPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) LockImmutabilityPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// LockImmutabilityPolicyResponder handles the response to the LockImmutabilityPolicy request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) LockImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetLegalHold sets legal hold tags. Setting the same tag results in an idempotent operation. SetLegalHold follows an +// append pattern and does not clear out the existing tags that are not specified in the request. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// legalHold - the LegalHold property that will be set to a blob container. +func (client BlobContainersClient) SetLegalHold(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (result LegalHold, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, + {TargetValue: legalHold, + Constraints: []validation.Constraint{{Target: "legalHold.Tags", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "SetLegalHold", err.Error()) + } + + req, err := client.SetLegalHoldPreparer(ctx, resourceGroupName, accountName, containerName, legalHold) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", nil, "Failure preparing request") + return + } + + resp, err := client.SetLegalHoldSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", resp, "Failure sending request") + return + } + + result, err = client.SetLegalHoldResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", resp, "Failure responding to request") + } + + return +} + +// SetLegalHoldPreparer prepares the SetLegalHold request. +func (client BlobContainersClient) SetLegalHoldPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/setLegalHold", pathParameters), + autorest.WithJSON(legalHold), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetLegalHoldSender sends the SetLegalHold request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) SetLegalHoldSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// SetLegalHoldResponder handles the response to the SetLegalHold request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) SetLegalHoldResponder(resp *http.Response) (result LegalHold, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates container properties as specified in request body. Properties not mentioned in the request will be +// unchanged. Update fails if the specified container doesn't already exist. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// containerName - the name of the blob container within the specified storage account. Blob container names +// must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every +// dash (-) character must be immediately preceded and followed by a letter or number. +// blobContainer - properties to update for the blob container. +func (client BlobContainersClient) Update(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (result BlobContainer, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: containerName, + Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, + {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.BlobContainersClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, containerName, blobContainer) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client BlobContainersClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "containerName": autorest.Encode("path", containerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), + autorest.WithJSON(blobContainer), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client BlobContainersClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client BlobContainersClient) UpdateResponder(resp *http.Response) (result BlobContainer, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/client.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/client.go new file mode 100644 index 000000000000..4ffb5928a839 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/client.go @@ -0,0 +1,51 @@ +// Package storage implements the Azure ARM Storage service API version . +// +// The Azure Storage Management API. +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Storage + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Storage. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/managementpolicies.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/managementpolicies.go new file mode 100644 index 000000000000..69eac469d7ea --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/managementpolicies.go @@ -0,0 +1,292 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// ManagementPoliciesClient is the the Azure Storage Management API. +type ManagementPoliciesClient struct { + BaseClient +} + +// NewManagementPoliciesClient creates an instance of the ManagementPoliciesClient client. +func NewManagementPoliciesClient(subscriptionID string) ManagementPoliciesClient { + return NewManagementPoliciesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewManagementPoliciesClientWithBaseURI creates an instance of the ManagementPoliciesClient client. +func NewManagementPoliciesClientWithBaseURI(baseURI string, subscriptionID string) ManagementPoliciesClient { + return ManagementPoliciesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate sets the data policy rules associated with the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// properties - the data policy rules to set to a storage account. +func (client ManagementPoliciesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, accountName string, properties ManagementPoliciesRulesSetParameter) (result AccountManagementPolicies, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.ManagementPoliciesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, accountName, properties) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ManagementPoliciesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, properties ManagementPoliciesRulesSetParameter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "managementPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-03-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters), + autorest.WithJSON(properties), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementPoliciesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ManagementPoliciesClient) CreateOrUpdateResponder(resp *http.Response) (result AccountManagementPolicies, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the data policy rules associated with the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client ManagementPoliciesClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.ManagementPoliciesClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ManagementPoliciesClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "managementPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-03-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementPoliciesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ManagementPoliciesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the data policy rules associated with the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. The name is case +// insensitive. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client ManagementPoliciesClient) Get(ctx context.Context, resourceGroupName string, accountName string) (result AccountManagementPolicies, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.ManagementPoliciesClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.ManagementPoliciesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ManagementPoliciesClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "managementPolicyName": autorest.Encode("path", "default"), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-03-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementPoliciesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ManagementPoliciesClient) GetResponder(resp *http.Response) (result AccountManagementPolicies, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/models.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/models.go new file mode 100644 index 000000000000..39f4d414d635 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/models.go @@ -0,0 +1,2057 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "net/http" +) + +// AccessTier enumerates the values for access tier. +type AccessTier string + +const ( + // Cool ... + Cool AccessTier = "Cool" + // Hot ... + Hot AccessTier = "Hot" +) + +// PossibleAccessTierValues returns an array of possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return []AccessTier{Cool, Hot} +} + +// AccountStatus enumerates the values for account status. +type AccountStatus string + +const ( + // Available ... + Available AccountStatus = "available" + // Unavailable ... + Unavailable AccountStatus = "unavailable" +) + +// PossibleAccountStatusValues returns an array of possible values for the AccountStatus const type. +func PossibleAccountStatusValues() []AccountStatus { + return []AccountStatus{Available, Unavailable} +} + +// Action enumerates the values for action. +type Action string + +const ( + // Allow ... + Allow Action = "Allow" +) + +// PossibleActionValues returns an array of possible values for the Action const type. +func PossibleActionValues() []Action { + return []Action{Allow} +} + +// Bypass enumerates the values for bypass. +type Bypass string + +const ( + // AzureServices ... + AzureServices Bypass = "AzureServices" + // Logging ... + Logging Bypass = "Logging" + // Metrics ... + Metrics Bypass = "Metrics" + // None ... + None Bypass = "None" +) + +// PossibleBypassValues returns an array of possible values for the Bypass const type. +func PossibleBypassValues() []Bypass { + return []Bypass{AzureServices, Logging, Metrics, None} +} + +// DefaultAction enumerates the values for default action. +type DefaultAction string + +const ( + // DefaultActionAllow ... + DefaultActionAllow DefaultAction = "Allow" + // DefaultActionDeny ... + DefaultActionDeny DefaultAction = "Deny" +) + +// PossibleDefaultActionValues returns an array of possible values for the DefaultAction const type. +func PossibleDefaultActionValues() []DefaultAction { + return []DefaultAction{DefaultActionAllow, DefaultActionDeny} +} + +// HTTPProtocol enumerates the values for http protocol. +type HTTPProtocol string + +const ( + // HTTPS ... + HTTPS HTTPProtocol = "https" + // Httpshttp ... + Httpshttp HTTPProtocol = "https,http" +) + +// PossibleHTTPProtocolValues returns an array of possible values for the HTTPProtocol const type. +func PossibleHTTPProtocolValues() []HTTPProtocol { + return []HTTPProtocol{HTTPS, Httpshttp} +} + +// ImmutabilityPolicyState enumerates the values for immutability policy state. +type ImmutabilityPolicyState string + +const ( + // Locked ... + Locked ImmutabilityPolicyState = "Locked" + // Unlocked ... + Unlocked ImmutabilityPolicyState = "Unlocked" +) + +// PossibleImmutabilityPolicyStateValues returns an array of possible values for the ImmutabilityPolicyState const type. +func PossibleImmutabilityPolicyStateValues() []ImmutabilityPolicyState { + return []ImmutabilityPolicyState{Locked, Unlocked} +} + +// ImmutabilityPolicyUpdateType enumerates the values for immutability policy update type. +type ImmutabilityPolicyUpdateType string + +const ( + // Extend ... + Extend ImmutabilityPolicyUpdateType = "extend" + // Lock ... + Lock ImmutabilityPolicyUpdateType = "lock" + // Put ... + Put ImmutabilityPolicyUpdateType = "put" +) + +// PossibleImmutabilityPolicyUpdateTypeValues returns an array of possible values for the ImmutabilityPolicyUpdateType const type. +func PossibleImmutabilityPolicyUpdateTypeValues() []ImmutabilityPolicyUpdateType { + return []ImmutabilityPolicyUpdateType{Extend, Lock, Put} +} + +// KeyPermission enumerates the values for key permission. +type KeyPermission string + +const ( + // Full ... + Full KeyPermission = "Full" + // Read ... + Read KeyPermission = "Read" +) + +// PossibleKeyPermissionValues returns an array of possible values for the KeyPermission const type. +func PossibleKeyPermissionValues() []KeyPermission { + return []KeyPermission{Full, Read} +} + +// KeySource enumerates the values for key source. +type KeySource string + +const ( + // MicrosoftKeyvault ... + MicrosoftKeyvault KeySource = "Microsoft.Keyvault" + // MicrosoftStorage ... + MicrosoftStorage KeySource = "Microsoft.Storage" +) + +// PossibleKeySourceValues returns an array of possible values for the KeySource const type. +func PossibleKeySourceValues() []KeySource { + return []KeySource{MicrosoftKeyvault, MicrosoftStorage} +} + +// Kind enumerates the values for kind. +type Kind string + +const ( + // BlobStorage ... + BlobStorage Kind = "BlobStorage" + // BlockBlobStorage ... + BlockBlobStorage Kind = "BlockBlobStorage" + // FileStorage ... + FileStorage Kind = "FileStorage" + // Storage ... + Storage Kind = "Storage" + // StorageV2 ... + StorageV2 Kind = "StorageV2" +) + +// PossibleKindValues returns an array of possible values for the Kind const type. +func PossibleKindValues() []Kind { + return []Kind{BlobStorage, BlockBlobStorage, FileStorage, Storage, StorageV2} +} + +// LeaseDuration enumerates the values for lease duration. +type LeaseDuration string + +const ( + // Fixed ... + Fixed LeaseDuration = "Fixed" + // Infinite ... + Infinite LeaseDuration = "Infinite" +) + +// PossibleLeaseDurationValues returns an array of possible values for the LeaseDuration const type. +func PossibleLeaseDurationValues() []LeaseDuration { + return []LeaseDuration{Fixed, Infinite} +} + +// LeaseState enumerates the values for lease state. +type LeaseState string + +const ( + // LeaseStateAvailable ... + LeaseStateAvailable LeaseState = "Available" + // LeaseStateBreaking ... + LeaseStateBreaking LeaseState = "Breaking" + // LeaseStateBroken ... + LeaseStateBroken LeaseState = "Broken" + // LeaseStateExpired ... + LeaseStateExpired LeaseState = "Expired" + // LeaseStateLeased ... + LeaseStateLeased LeaseState = "Leased" +) + +// PossibleLeaseStateValues returns an array of possible values for the LeaseState const type. +func PossibleLeaseStateValues() []LeaseState { + return []LeaseState{LeaseStateAvailable, LeaseStateBreaking, LeaseStateBroken, LeaseStateExpired, LeaseStateLeased} +} + +// LeaseStatus enumerates the values for lease status. +type LeaseStatus string + +const ( + // LeaseStatusLocked ... + LeaseStatusLocked LeaseStatus = "Locked" + // LeaseStatusUnlocked ... + LeaseStatusUnlocked LeaseStatus = "Unlocked" +) + +// PossibleLeaseStatusValues returns an array of possible values for the LeaseStatus const type. +func PossibleLeaseStatusValues() []LeaseStatus { + return []LeaseStatus{LeaseStatusLocked, LeaseStatusUnlocked} +} + +// Permissions enumerates the values for permissions. +type Permissions string + +const ( + // A ... + A Permissions = "a" + // C ... + C Permissions = "c" + // D ... + D Permissions = "d" + // L ... + L Permissions = "l" + // P ... + P Permissions = "p" + // R ... + R Permissions = "r" + // U ... + U Permissions = "u" + // W ... + W Permissions = "w" +) + +// PossiblePermissionsValues returns an array of possible values for the Permissions const type. +func PossiblePermissionsValues() []Permissions { + return []Permissions{A, C, D, L, P, R, U, W} +} + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // Creating ... + Creating ProvisioningState = "Creating" + // ResolvingDNS ... + ResolvingDNS ProvisioningState = "ResolvingDNS" + // Succeeded ... + Succeeded ProvisioningState = "Succeeded" +) + +// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type. +func PossibleProvisioningStateValues() []ProvisioningState { + return []ProvisioningState{Creating, ResolvingDNS, Succeeded} +} + +// PublicAccess enumerates the values for public access. +type PublicAccess string + +const ( + // PublicAccessBlob ... + PublicAccessBlob PublicAccess = "Blob" + // PublicAccessContainer ... + PublicAccessContainer PublicAccess = "Container" + // PublicAccessNone ... + PublicAccessNone PublicAccess = "None" +) + +// PossiblePublicAccessValues returns an array of possible values for the PublicAccess const type. +func PossiblePublicAccessValues() []PublicAccess { + return []PublicAccess{PublicAccessBlob, PublicAccessContainer, PublicAccessNone} +} + +// Reason enumerates the values for reason. +type Reason string + +const ( + // AccountNameInvalid ... + AccountNameInvalid Reason = "AccountNameInvalid" + // AlreadyExists ... + AlreadyExists Reason = "AlreadyExists" +) + +// PossibleReasonValues returns an array of possible values for the Reason const type. +func PossibleReasonValues() []Reason { + return []Reason{AccountNameInvalid, AlreadyExists} +} + +// ReasonCode enumerates the values for reason code. +type ReasonCode string + +const ( + // NotAvailableForSubscription ... + NotAvailableForSubscription ReasonCode = "NotAvailableForSubscription" + // QuotaID ... + QuotaID ReasonCode = "QuotaId" +) + +// PossibleReasonCodeValues returns an array of possible values for the ReasonCode const type. +func PossibleReasonCodeValues() []ReasonCode { + return []ReasonCode{NotAvailableForSubscription, QuotaID} +} + +// Services enumerates the values for services. +type Services string + +const ( + // B ... + B Services = "b" + // F ... + F Services = "f" + // Q ... + Q Services = "q" + // T ... + T Services = "t" +) + +// PossibleServicesValues returns an array of possible values for the Services const type. +func PossibleServicesValues() []Services { + return []Services{B, F, Q, T} +} + +// SignedResource enumerates the values for signed resource. +type SignedResource string + +const ( + // SignedResourceB ... + SignedResourceB SignedResource = "b" + // SignedResourceC ... + SignedResourceC SignedResource = "c" + // SignedResourceF ... + SignedResourceF SignedResource = "f" + // SignedResourceS ... + SignedResourceS SignedResource = "s" +) + +// PossibleSignedResourceValues returns an array of possible values for the SignedResource const type. +func PossibleSignedResourceValues() []SignedResource { + return []SignedResource{SignedResourceB, SignedResourceC, SignedResourceF, SignedResourceS} +} + +// SignedResourceTypes enumerates the values for signed resource types. +type SignedResourceTypes string + +const ( + // SignedResourceTypesC ... + SignedResourceTypesC SignedResourceTypes = "c" + // SignedResourceTypesO ... + SignedResourceTypesO SignedResourceTypes = "o" + // SignedResourceTypesS ... + SignedResourceTypesS SignedResourceTypes = "s" +) + +// PossibleSignedResourceTypesValues returns an array of possible values for the SignedResourceTypes const type. +func PossibleSignedResourceTypesValues() []SignedResourceTypes { + return []SignedResourceTypes{SignedResourceTypesC, SignedResourceTypesO, SignedResourceTypesS} +} + +// SkuName enumerates the values for sku name. +type SkuName string + +const ( + // PremiumLRS ... + PremiumLRS SkuName = "Premium_LRS" + // PremiumZRS ... + PremiumZRS SkuName = "Premium_ZRS" + // StandardGRS ... + StandardGRS SkuName = "Standard_GRS" + // StandardLRS ... + StandardLRS SkuName = "Standard_LRS" + // StandardRAGRS ... + StandardRAGRS SkuName = "Standard_RAGRS" + // StandardZRS ... + StandardZRS SkuName = "Standard_ZRS" +) + +// PossibleSkuNameValues returns an array of possible values for the SkuName const type. +func PossibleSkuNameValues() []SkuName { + return []SkuName{PremiumLRS, PremiumZRS, StandardGRS, StandardLRS, StandardRAGRS, StandardZRS} +} + +// SkuTier enumerates the values for sku tier. +type SkuTier string + +const ( + // Premium ... + Premium SkuTier = "Premium" + // Standard ... + Standard SkuTier = "Standard" +) + +// PossibleSkuTierValues returns an array of possible values for the SkuTier const type. +func PossibleSkuTierValues() []SkuTier { + return []SkuTier{Premium, Standard} +} + +// State enumerates the values for state. +type State string + +const ( + // StateDeprovisioning ... + StateDeprovisioning State = "deprovisioning" + // StateFailed ... + StateFailed State = "failed" + // StateNetworkSourceDeleted ... + StateNetworkSourceDeleted State = "networkSourceDeleted" + // StateProvisioning ... + StateProvisioning State = "provisioning" + // StateSucceeded ... + StateSucceeded State = "succeeded" +) + +// PossibleStateValues returns an array of possible values for the State const type. +func PossibleStateValues() []State { + return []State{StateDeprovisioning, StateFailed, StateNetworkSourceDeleted, StateProvisioning, StateSucceeded} +} + +// UsageUnit enumerates the values for usage unit. +type UsageUnit string + +const ( + // Bytes ... + Bytes UsageUnit = "Bytes" + // BytesPerSecond ... + BytesPerSecond UsageUnit = "BytesPerSecond" + // Count ... + Count UsageUnit = "Count" + // CountsPerSecond ... + CountsPerSecond UsageUnit = "CountsPerSecond" + // Percent ... + Percent UsageUnit = "Percent" + // Seconds ... + Seconds UsageUnit = "Seconds" +) + +// PossibleUsageUnitValues returns an array of possible values for the UsageUnit const type. +func PossibleUsageUnitValues() []UsageUnit { + return []UsageUnit{Bytes, BytesPerSecond, Count, CountsPerSecond, Percent, Seconds} +} + +// Account the storage account. +type Account struct { + autorest.Response `json:"-"` + // Sku - Gets the SKU. + Sku *Sku `json:"sku,omitempty"` + // Kind - Gets the Kind. Possible values include: 'Storage', 'StorageV2', 'BlobStorage', 'FileStorage', 'BlockBlobStorage' + Kind Kind `json:"kind,omitempty"` + // Identity - The identity of the resource. + Identity *Identity `json:"identity,omitempty"` + // AccountProperties - Properties of the storage account. + *AccountProperties `json:"properties,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The geo-location where the resource lives + Location *string `json:"location,omitempty"` + // ID - Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - The name of the resource + Name *string `json:"name,omitempty"` + // Type - The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for Account. +func (a Account) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if a.Sku != nil { + objectMap["sku"] = a.Sku + } + if a.Kind != "" { + objectMap["kind"] = a.Kind + } + if a.Identity != nil { + objectMap["identity"] = a.Identity + } + if a.AccountProperties != nil { + objectMap["properties"] = a.AccountProperties + } + if a.Tags != nil { + objectMap["tags"] = a.Tags + } + if a.Location != nil { + objectMap["location"] = a.Location + } + if a.ID != nil { + objectMap["id"] = a.ID + } + if a.Name != nil { + objectMap["name"] = a.Name + } + if a.Type != nil { + objectMap["type"] = a.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Account struct. +func (a *Account) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + a.Sku = &sku + } + case "kind": + if v != nil { + var kind Kind + err = json.Unmarshal(*v, &kind) + if err != nil { + return err + } + a.Kind = kind + } + case "identity": + if v != nil { + var identity Identity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + a.Identity = &identity + } + case "properties": + if v != nil { + var accountProperties AccountProperties + err = json.Unmarshal(*v, &accountProperties) + if err != nil { + return err + } + a.AccountProperties = &accountProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + a.Tags = tags + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + a.Location = &location + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + a.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + a.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + a.Type = &typeVar + } + } + } + + return nil +} + +// AccountCheckNameAvailabilityParameters the parameters used to check the availabity of the storage account name. +type AccountCheckNameAvailabilityParameters struct { + // Name - The storage account name. + Name *string `json:"name,omitempty"` + // Type - The type of resource, Microsoft.Storage/storageAccounts + Type *string `json:"type,omitempty"` +} + +// AccountCreateParameters the parameters used when creating a storage account. +type AccountCreateParameters struct { + // Sku - Required. Gets or sets the sku name. + Sku *Sku `json:"sku,omitempty"` + // Kind - Required. Indicates the type of storage account. Possible values include: 'Storage', 'StorageV2', 'BlobStorage', 'FileStorage', 'BlockBlobStorage' + Kind Kind `json:"kind,omitempty"` + // Location - Required. Gets or sets the location of the resource. This will be one of the supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a resource cannot be changed once it is created, but if an identical geo region is specified on update, the request will succeed. + Location *string `json:"location,omitempty"` + // Tags - Gets or sets a list of key value pairs that describe the resource. These tags can be used for viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key with a length no greater than 128 characters and a value with a length no greater than 256 characters. + Tags map[string]*string `json:"tags"` + // Identity - The identity of the resource. + Identity *Identity `json:"identity,omitempty"` + // AccountPropertiesCreateParameters - The parameters used to create the storage account. + *AccountPropertiesCreateParameters `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccountCreateParameters. +func (acp AccountCreateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if acp.Sku != nil { + objectMap["sku"] = acp.Sku + } + if acp.Kind != "" { + objectMap["kind"] = acp.Kind + } + if acp.Location != nil { + objectMap["location"] = acp.Location + } + if acp.Tags != nil { + objectMap["tags"] = acp.Tags + } + if acp.Identity != nil { + objectMap["identity"] = acp.Identity + } + if acp.AccountPropertiesCreateParameters != nil { + objectMap["properties"] = acp.AccountPropertiesCreateParameters + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AccountCreateParameters struct. +func (acp *AccountCreateParameters) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + acp.Sku = &sku + } + case "kind": + if v != nil { + var kind Kind + err = json.Unmarshal(*v, &kind) + if err != nil { + return err + } + acp.Kind = kind + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + acp.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + acp.Tags = tags + } + case "identity": + if v != nil { + var identity Identity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + acp.Identity = &identity + } + case "properties": + if v != nil { + var accountPropertiesCreateParameters AccountPropertiesCreateParameters + err = json.Unmarshal(*v, &accountPropertiesCreateParameters) + if err != nil { + return err + } + acp.AccountPropertiesCreateParameters = &accountPropertiesCreateParameters + } + } + } + + return nil +} + +// AccountKey an access key for the storage account. +type AccountKey struct { + // KeyName - Name of the key. + KeyName *string `json:"keyName,omitempty"` + // Value - Base 64-encoded value of the key. + Value *string `json:"value,omitempty"` + // Permissions - Permissions for the key -- read-only or full permissions. Possible values include: 'Read', 'Full' + Permissions KeyPermission `json:"permissions,omitempty"` +} + +// AccountListKeysResult the response from the ListKeys operation. +type AccountListKeysResult struct { + autorest.Response `json:"-"` + // Keys - Gets the list of storage account keys and their properties for the specified storage account. + Keys *[]AccountKey `json:"keys,omitempty"` +} + +// AccountListResult the response from the List Storage Accounts operation. +type AccountListResult struct { + autorest.Response `json:"-"` + // Value - Gets the list of storage accounts and their properties. + Value *[]Account `json:"value,omitempty"` +} + +// AccountManagementPolicies the Get Storage Account ManagementPolicies operation response. +type AccountManagementPolicies struct { + autorest.Response `json:"-"` + // AccountManagementPoliciesRulesProperty - Returns the Storage Account Data Policies Rules. + *AccountManagementPoliciesRulesProperty `json:"properties,omitempty"` + // ID - Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - The name of the resource + Name *string `json:"name,omitempty"` + // Type - The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccountManagementPolicies. +func (amp AccountManagementPolicies) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if amp.AccountManagementPoliciesRulesProperty != nil { + objectMap["properties"] = amp.AccountManagementPoliciesRulesProperty + } + if amp.ID != nil { + objectMap["id"] = amp.ID + } + if amp.Name != nil { + objectMap["name"] = amp.Name + } + if amp.Type != nil { + objectMap["type"] = amp.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AccountManagementPolicies struct. +func (amp *AccountManagementPolicies) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var accountManagementPoliciesRulesProperty AccountManagementPoliciesRulesProperty + err = json.Unmarshal(*v, &accountManagementPoliciesRulesProperty) + if err != nil { + return err + } + amp.AccountManagementPoliciesRulesProperty = &accountManagementPoliciesRulesProperty + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + amp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + amp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + amp.Type = &typeVar + } + } + } + + return nil +} + +// AccountManagementPoliciesRulesProperty the Storage Account Data Policies properties. +type AccountManagementPoliciesRulesProperty struct { + // LastModifiedTime - Returns the date and time the ManagementPolicies was last modified. + LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` + // Policy - The Storage Account ManagementPolicies Rules, in JSON format. See more details in: https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts. + Policy interface{} `json:"policy,omitempty"` +} + +// AccountProperties properties of the storage account. +type AccountProperties struct { + // ProvisioningState - Gets the status of the storage account at the time the operation was called. Possible values include: 'Creating', 'ResolvingDNS', 'Succeeded' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + // PrimaryEndpoints - Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object. Note that Standard_ZRS and Premium_LRS accounts only return the blob endpoint. + PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"` + // PrimaryLocation - Gets the location of the primary data center for the storage account. + PrimaryLocation *string `json:"primaryLocation,omitempty"` + // StatusOfPrimary - Gets the status indicating whether the primary location of the storage account is available or unavailable. Possible values include: 'Available', 'Unavailable' + StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"` + // LastGeoFailoverTime - Gets the timestamp of the most recent instance of a failover to the secondary location. Only the most recent timestamp is retained. This element is not returned if there has never been a failover instance. Only available if the accountType is Standard_GRS or Standard_RAGRS. + LastGeoFailoverTime *date.Time `json:"lastGeoFailoverTime,omitempty"` + // SecondaryLocation - Gets the location of the geo-replicated secondary for the storage account. Only available if the accountType is Standard_GRS or Standard_RAGRS. + SecondaryLocation *string `json:"secondaryLocation,omitempty"` + // StatusOfSecondary - Gets the status indicating whether the secondary location of the storage account is available or unavailable. Only available if the SKU name is Standard_GRS or Standard_RAGRS. Possible values include: 'Available', 'Unavailable' + StatusOfSecondary AccountStatus `json:"statusOfSecondary,omitempty"` + // CreationTime - Gets the creation date and time of the storage account in UTC. + CreationTime *date.Time `json:"creationTime,omitempty"` + // CustomDomain - Gets the custom domain the user assigned to this storage account. + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + // SecondaryEndpoints - Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object from the secondary location of the storage account. Only available if the SKU name is Standard_RAGRS. + SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"` + // Encryption - Gets the encryption settings on the account. If unspecified, the account is unencrypted. + Encryption *Encryption `json:"encryption,omitempty"` + // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'Hot', 'Cool' + AccessTier AccessTier `json:"accessTier,omitempty"` + // EnableAzureFilesAadIntegration - Enables Azure Files AAD Integration for SMB if sets to true. + EnableAzureFilesAadIntegration *bool `json:"azureFilesAadIntegration,omitempty"` + // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true. + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` + // NetworkRuleSet - Network rule set + NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` + // IsHnsEnabled - Account HierarchicalNamespace enabled if sets to true. + IsHnsEnabled *bool `json:"isHnsEnabled,omitempty"` +} + +// AccountPropertiesCreateParameters the parameters used to create the storage account. +type AccountPropertiesCreateParameters struct { + // CustomDomain - User domain assigned to the storage account. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property. + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + // Encryption - Provides the encryption settings on the account. If left unspecified the account encryption settings will remain the same. The default setting is unencrypted. + Encryption *Encryption `json:"encryption,omitempty"` + // NetworkRuleSet - Network rule set + NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` + // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'Hot', 'Cool' + AccessTier AccessTier `json:"accessTier,omitempty"` + // EnableAzureFilesAadIntegration - Enables Azure Files AAD Integration for SMB if sets to true. + EnableAzureFilesAadIntegration *bool `json:"azureFilesAadIntegration,omitempty"` + // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true. + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` + // IsHnsEnabled - Account HierarchicalNamespace enabled if sets to true. + IsHnsEnabled *bool `json:"isHnsEnabled,omitempty"` +} + +// AccountPropertiesUpdateParameters the parameters used when updating a storage account. +type AccountPropertiesUpdateParameters struct { + // CustomDomain - Custom domain assigned to the storage account by the user. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property. + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + // Encryption - Provides the encryption settings on the account. The default setting is unencrypted. + Encryption *Encryption `json:"encryption,omitempty"` + // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'Hot', 'Cool' + AccessTier AccessTier `json:"accessTier,omitempty"` + // EnableAzureFilesAadIntegration - Enables Azure Files AAD Integration for SMB if sets to true. + EnableAzureFilesAadIntegration *bool `json:"azureFilesAadIntegration,omitempty"` + // EnableHTTPSTrafficOnly - Allows https traffic only to storage service if sets to true. + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` + // NetworkRuleSet - Network rule set + NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` +} + +// AccountRegenerateKeyParameters the parameters used to regenerate the storage account key. +type AccountRegenerateKeyParameters struct { + // KeyName - The name of storage keys that want to be regenerated, possible vaules are key1, key2. + KeyName *string `json:"keyName,omitempty"` +} + +// AccountSasParameters the parameters to list SAS credentials of a storage account. +type AccountSasParameters struct { + // Services - The signed services accessible with the account SAS. Possible values include: Blob (b), Queue (q), Table (t), File (f). Possible values include: 'B', 'Q', 'T', 'F' + Services Services `json:"signedServices,omitempty"` + // ResourceTypes - The signed resource types that are accessible with the account SAS. Service (s): Access to service-level APIs; Container (c): Access to container-level APIs; Object (o): Access to object-level APIs for blobs, queue messages, table entities, and files. Possible values include: 'SignedResourceTypesS', 'SignedResourceTypesC', 'SignedResourceTypesO' + ResourceTypes SignedResourceTypes `json:"signedResourceTypes,omitempty"` + // Permissions - The signed permissions for the account SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p). Possible values include: 'R', 'D', 'W', 'L', 'A', 'C', 'U', 'P' + Permissions Permissions `json:"signedPermission,omitempty"` + // IPAddressOrRange - An IP address or a range of IP addresses from which to accept requests. + IPAddressOrRange *string `json:"signedIp,omitempty"` + // Protocols - The protocol permitted for a request made with the account SAS. Possible values include: 'Httpshttp', 'HTTPS' + Protocols HTTPProtocol `json:"signedProtocol,omitempty"` + // SharedAccessStartTime - The time at which the SAS becomes valid. + SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` + // SharedAccessExpiryTime - The time at which the shared access signature becomes invalid. + SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` + // KeyToSign - The key to sign the account SAS token with. + KeyToSign *string `json:"keyToSign,omitempty"` +} + +// AccountsCreateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type AccountsCreateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *AccountsCreateFuture) Result(client AccountsClient) (a Account, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("storage.AccountsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if a.Response.Response, err = future.GetResult(sender); err == nil && a.Response.Response.StatusCode != http.StatusNoContent { + a, err = client.CreateResponder(a.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", a.Response.Response, "Failure responding to request") + } + } + return +} + +// AccountUpdateParameters the parameters that can be provided when updating the storage account properties. +type AccountUpdateParameters struct { + // Sku - Gets or sets the SKU name. Note that the SKU name cannot be updated to Standard_ZRS, Premium_LRS or Premium_ZRS, nor can accounts of those sku names be updated to any other value. + Sku *Sku `json:"sku,omitempty"` + // Tags - Gets or sets a list of key value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater in length than 128 characters and a value no greater in length than 256 characters. + Tags map[string]*string `json:"tags"` + // Identity - The identity of the resource. + Identity *Identity `json:"identity,omitempty"` + // AccountPropertiesUpdateParameters - The parameters used when updating a storage account. + *AccountPropertiesUpdateParameters `json:"properties,omitempty"` + // Kind - Optional. Indicates the type of storage account. Currently only StorageV2 value supported by server. Possible values include: 'Storage', 'StorageV2', 'BlobStorage', 'FileStorage', 'BlockBlobStorage' + Kind Kind `json:"kind,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccountUpdateParameters. +func (aup AccountUpdateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if aup.Sku != nil { + objectMap["sku"] = aup.Sku + } + if aup.Tags != nil { + objectMap["tags"] = aup.Tags + } + if aup.Identity != nil { + objectMap["identity"] = aup.Identity + } + if aup.AccountPropertiesUpdateParameters != nil { + objectMap["properties"] = aup.AccountPropertiesUpdateParameters + } + if aup.Kind != "" { + objectMap["kind"] = aup.Kind + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AccountUpdateParameters struct. +func (aup *AccountUpdateParameters) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + aup.Sku = &sku + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + aup.Tags = tags + } + case "identity": + if v != nil { + var identity Identity + err = json.Unmarshal(*v, &identity) + if err != nil { + return err + } + aup.Identity = &identity + } + case "properties": + if v != nil { + var accountPropertiesUpdateParameters AccountPropertiesUpdateParameters + err = json.Unmarshal(*v, &accountPropertiesUpdateParameters) + if err != nil { + return err + } + aup.AccountPropertiesUpdateParameters = &accountPropertiesUpdateParameters + } + case "kind": + if v != nil { + var kind Kind + err = json.Unmarshal(*v, &kind) + if err != nil { + return err + } + aup.Kind = kind + } + } + } + + return nil +} + +// AzureEntityResource the resource model definition for a Azure Resource Manager resource with an etag. +type AzureEntityResource struct { + // Etag - Resource Etag. + Etag *string `json:"etag,omitempty"` + // ID - Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - The name of the resource + Name *string `json:"name,omitempty"` + // Type - The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// BlobContainer properties of the blob container, including Id, resource name, resource type, Etag. +type BlobContainer struct { + autorest.Response `json:"-"` + // ContainerProperties - Properties of the blob container. + *ContainerProperties `json:"properties,omitempty"` + // Etag - Resource Etag. + Etag *string `json:"etag,omitempty"` + // ID - Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - The name of the resource + Name *string `json:"name,omitempty"` + // Type - The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for BlobContainer. +func (bc BlobContainer) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if bc.ContainerProperties != nil { + objectMap["properties"] = bc.ContainerProperties + } + if bc.Etag != nil { + objectMap["etag"] = bc.Etag + } + if bc.ID != nil { + objectMap["id"] = bc.ID + } + if bc.Name != nil { + objectMap["name"] = bc.Name + } + if bc.Type != nil { + objectMap["type"] = bc.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for BlobContainer struct. +func (bc *BlobContainer) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var containerProperties ContainerProperties + err = json.Unmarshal(*v, &containerProperties) + if err != nil { + return err + } + bc.ContainerProperties = &containerProperties + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + bc.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + bc.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + bc.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + bc.Type = &typeVar + } + } + } + + return nil +} + +// CheckNameAvailabilityResult the CheckNameAvailability operation response. +type CheckNameAvailabilityResult struct { + autorest.Response `json:"-"` + // NameAvailable - Gets a boolean value that indicates whether the name is available for you to use. If true, the name is available. If false, the name has already been taken or is invalid and cannot be used. + NameAvailable *bool `json:"nameAvailable,omitempty"` + // Reason - Gets the reason that a storage account name could not be used. The Reason element is only returned if NameAvailable is false. Possible values include: 'AccountNameInvalid', 'AlreadyExists' + Reason Reason `json:"reason,omitempty"` + // Message - Gets an error message explaining the Reason value in more detail. + Message *string `json:"message,omitempty"` +} + +// ContainerProperties the properties of a container. +type ContainerProperties struct { + // PublicAccess - Specifies whether data in the container may be accessed publicly and the level of access. Possible values include: 'PublicAccessContainer', 'PublicAccessBlob', 'PublicAccessNone' + PublicAccess PublicAccess `json:"publicAccess,omitempty"` + // LastModifiedTime - Returns the date and time the container was last modified. + LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` + // LeaseStatus - The lease status of the container. Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked' + LeaseStatus LeaseStatus `json:"leaseStatus,omitempty"` + // LeaseState - Lease state of the container. Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken' + LeaseState LeaseState `json:"leaseState,omitempty"` + // LeaseDuration - Specifies whether the lease on a container is of infinite or fixed duration, only when the container is leased. Possible values include: 'Infinite', 'Fixed' + LeaseDuration LeaseDuration `json:"leaseDuration,omitempty"` + // Metadata - A name-value pair to associate with the container as metadata. + Metadata map[string]*string `json:"metadata"` + // ImmutabilityPolicy - The ImmutabilityPolicy property of the container. + ImmutabilityPolicy *ImmutabilityPolicyProperties `json:"immutabilityPolicy,omitempty"` + // LegalHold - The LegalHold property of the container. + LegalHold *LegalHoldProperties `json:"legalHold,omitempty"` + // HasLegalHold - The hasLegalHold public property is set to true by SRP if there are at least one existing tag. The hasLegalHold public property is set to false by SRP if all existing legal hold tags are cleared out. There can be a maximum of 1000 blob containers with hasLegalHold=true for a given account. + HasLegalHold *bool `json:"hasLegalHold,omitempty"` + // HasImmutabilityPolicy - The hasImmutabilityPolicy public property is set to true by SRP if ImmutabilityPolicy has been created for this container. The hasImmutabilityPolicy public property is set to false by SRP if ImmutabilityPolicy has not been created for this container. + HasImmutabilityPolicy *bool `json:"hasImmutabilityPolicy,omitempty"` +} + +// MarshalJSON is the custom marshaler for ContainerProperties. +func (cp ContainerProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if cp.PublicAccess != "" { + objectMap["publicAccess"] = cp.PublicAccess + } + if cp.LastModifiedTime != nil { + objectMap["lastModifiedTime"] = cp.LastModifiedTime + } + if cp.LeaseStatus != "" { + objectMap["leaseStatus"] = cp.LeaseStatus + } + if cp.LeaseState != "" { + objectMap["leaseState"] = cp.LeaseState + } + if cp.LeaseDuration != "" { + objectMap["leaseDuration"] = cp.LeaseDuration + } + if cp.Metadata != nil { + objectMap["metadata"] = cp.Metadata + } + if cp.ImmutabilityPolicy != nil { + objectMap["immutabilityPolicy"] = cp.ImmutabilityPolicy + } + if cp.LegalHold != nil { + objectMap["legalHold"] = cp.LegalHold + } + if cp.HasLegalHold != nil { + objectMap["hasLegalHold"] = cp.HasLegalHold + } + if cp.HasImmutabilityPolicy != nil { + objectMap["hasImmutabilityPolicy"] = cp.HasImmutabilityPolicy + } + return json.Marshal(objectMap) +} + +// CustomDomain the custom domain assigned to this storage account. This can be set via Update. +type CustomDomain struct { + // Name - Gets or sets the custom domain name assigned to the storage account. Name is the CNAME source. + Name *string `json:"name,omitempty"` + // UseSubDomain - Indicates whether indirect CName validation is enabled. Default value is false. This should only be set on updates. + UseSubDomain *bool `json:"useSubDomain,omitempty"` +} + +// Dimension dimension of blobs, possiblly be blob type or access tier. +type Dimension struct { + // Name - Display name of dimension. + Name *string `json:"name,omitempty"` + // DisplayName - Display name of dimension. + DisplayName *string `json:"displayName,omitempty"` +} + +// Encryption the encryption settings on the storage account. +type Encryption struct { + // Services - List of services which support encryption. + Services *EncryptionServices `json:"services,omitempty"` + // KeySource - The encryption keySource (provider). Possible values (case-insensitive): Microsoft.Storage, Microsoft.Keyvault. Possible values include: 'MicrosoftStorage', 'MicrosoftKeyvault' + KeySource KeySource `json:"keySource,omitempty"` + // KeyVaultProperties - Properties provided by key vault. + KeyVaultProperties *KeyVaultProperties `json:"keyvaultproperties,omitempty"` +} + +// EncryptionService a service that allows server-side encryption to be used. +type EncryptionService struct { + // Enabled - A boolean indicating whether or not the service encrypts the data as it is stored. + Enabled *bool `json:"enabled,omitempty"` + // LastEnabledTime - Gets a rough estimate of the date/time when the encryption was last enabled by the user. Only returned when encryption is enabled. There might be some unencrypted blobs which were written after this time, as it is just a rough estimate. + LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"` +} + +// EncryptionServices a list of services that support encryption. +type EncryptionServices struct { + // Blob - The encryption function of the blob storage service. + Blob *EncryptionService `json:"blob,omitempty"` + // File - The encryption function of the file storage service. + File *EncryptionService `json:"file,omitempty"` + // Table - The encryption function of the table storage service. + Table *EncryptionService `json:"table,omitempty"` + // Queue - The encryption function of the queue storage service. + Queue *EncryptionService `json:"queue,omitempty"` +} + +// Endpoints the URIs that are used to perform a retrieval of a public blob, queue, table, web or dfs object. +type Endpoints struct { + // Blob - Gets the blob endpoint. + Blob *string `json:"blob,omitempty"` + // Queue - Gets the queue endpoint. + Queue *string `json:"queue,omitempty"` + // Table - Gets the table endpoint. + Table *string `json:"table,omitempty"` + // File - Gets the file endpoint. + File *string `json:"file,omitempty"` + // Web - Gets the web endpoint. + Web *string `json:"web,omitempty"` + // Dfs - Gets the dfs endpoint. + Dfs *string `json:"dfs,omitempty"` +} + +// Identity identity for the resource. +type Identity struct { + // PrincipalID - The principal ID of resource identity. + PrincipalID *string `json:"principalId,omitempty"` + // TenantID - The tenant ID of resource. + TenantID *string `json:"tenantId,omitempty"` + // Type - The identity type. + Type *string `json:"type,omitempty"` +} + +// ImmutabilityPolicy the ImmutabilityPolicy property of a blob container, including Id, resource name, resource +// type, Etag. +type ImmutabilityPolicy struct { + autorest.Response `json:"-"` + // ImmutabilityPolicyProperty - The properties of an ImmutabilityPolicy of a blob container. + *ImmutabilityPolicyProperty `json:"properties,omitempty"` + // Etag - Resource Etag. + Etag *string `json:"etag,omitempty"` + // ID - Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - The name of the resource + Name *string `json:"name,omitempty"` + // Type - The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ImmutabilityPolicy. +func (IP ImmutabilityPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if IP.ImmutabilityPolicyProperty != nil { + objectMap["properties"] = IP.ImmutabilityPolicyProperty + } + if IP.Etag != nil { + objectMap["etag"] = IP.Etag + } + if IP.ID != nil { + objectMap["id"] = IP.ID + } + if IP.Name != nil { + objectMap["name"] = IP.Name + } + if IP.Type != nil { + objectMap["type"] = IP.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ImmutabilityPolicy struct. +func (IP *ImmutabilityPolicy) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var immutabilityPolicyProperty ImmutabilityPolicyProperty + err = json.Unmarshal(*v, &immutabilityPolicyProperty) + if err != nil { + return err + } + IP.ImmutabilityPolicyProperty = &immutabilityPolicyProperty + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + IP.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + IP.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + IP.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + IP.Type = &typeVar + } + } + } + + return nil +} + +// ImmutabilityPolicyProperties the properties of an ImmutabilityPolicy of a blob container. +type ImmutabilityPolicyProperties struct { + // ImmutabilityPolicyProperty - The properties of an ImmutabilityPolicy of a blob container. + *ImmutabilityPolicyProperty `json:"properties,omitempty"` + // Etag - ImmutabilityPolicy Etag. + Etag *string `json:"etag,omitempty"` + // UpdateHistory - The ImmutabilityPolicy update history of the blob container. + UpdateHistory *[]UpdateHistoryProperty `json:"updateHistory,omitempty"` +} + +// MarshalJSON is the custom marshaler for ImmutabilityPolicyProperties. +func (ipp ImmutabilityPolicyProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ipp.ImmutabilityPolicyProperty != nil { + objectMap["properties"] = ipp.ImmutabilityPolicyProperty + } + if ipp.Etag != nil { + objectMap["etag"] = ipp.Etag + } + if ipp.UpdateHistory != nil { + objectMap["updateHistory"] = ipp.UpdateHistory + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ImmutabilityPolicyProperties struct. +func (ipp *ImmutabilityPolicyProperties) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var immutabilityPolicyProperty ImmutabilityPolicyProperty + err = json.Unmarshal(*v, &immutabilityPolicyProperty) + if err != nil { + return err + } + ipp.ImmutabilityPolicyProperty = &immutabilityPolicyProperty + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + ipp.Etag = &etag + } + case "updateHistory": + if v != nil { + var updateHistory []UpdateHistoryProperty + err = json.Unmarshal(*v, &updateHistory) + if err != nil { + return err + } + ipp.UpdateHistory = &updateHistory + } + } + } + + return nil +} + +// ImmutabilityPolicyProperty the properties of an ImmutabilityPolicy of a blob container. +type ImmutabilityPolicyProperty struct { + // ImmutabilityPeriodSinceCreationInDays - The immutability period for the blobs in the container since the policy creation, in days. + ImmutabilityPeriodSinceCreationInDays *int32 `json:"immutabilityPeriodSinceCreationInDays,omitempty"` + // State - The ImmutabilityPolicy state of a blob container, possible values include: Locked and Unlocked. Possible values include: 'Locked', 'Unlocked' + State ImmutabilityPolicyState `json:"state,omitempty"` +} + +// IPRule IP rule with specific IP or IP range in CIDR format. +type IPRule struct { + // IPAddressOrRange - Specifies the IP or IP range in CIDR format. Only IPV4 address is allowed. + IPAddressOrRange *string `json:"value,omitempty"` + // Action - The action of IP ACL rule. Possible values include: 'Allow' + Action Action `json:"action,omitempty"` +} + +// KeyVaultProperties properties of key vault. +type KeyVaultProperties struct { + // KeyName - The name of KeyVault key. + KeyName *string `json:"keyname,omitempty"` + // KeyVersion - The version of KeyVault key. + KeyVersion *string `json:"keyversion,omitempty"` + // KeyVaultURI - The Uri of KeyVault. + KeyVaultURI *string `json:"keyvaulturi,omitempty"` +} + +// LegalHold the LegalHold property of a blob container. +type LegalHold struct { + autorest.Response `json:"-"` + // HasLegalHold - The hasLegalHold public property is set to true by SRP if there are at least one existing tag. The hasLegalHold public property is set to false by SRP if all existing legal hold tags are cleared out. There can be a maximum of 1000 blob containers with hasLegalHold=true for a given account. + HasLegalHold *bool `json:"hasLegalHold,omitempty"` + // Tags - Each tag should be 3 to 23 alphanumeric characters and is normalized to lower case at SRP. + Tags *[]string `json:"tags,omitempty"` +} + +// LegalHoldProperties the LegalHold property of a blob container. +type LegalHoldProperties struct { + // HasLegalHold - The hasLegalHold public property is set to true by SRP if there are at least one existing tag. The hasLegalHold public property is set to false by SRP if all existing legal hold tags are cleared out. There can be a maximum of 1000 blob containers with hasLegalHold=true for a given account. + HasLegalHold *bool `json:"hasLegalHold,omitempty"` + // Tags - The list of LegalHold tags of a blob container. + Tags *[]TagProperty `json:"tags,omitempty"` +} + +// ListAccountSasResponse the List SAS credentials operation response. +type ListAccountSasResponse struct { + autorest.Response `json:"-"` + // AccountSasToken - List SAS credentials of storage account. + AccountSasToken *string `json:"accountSasToken,omitempty"` +} + +// ListContainerItem the blob container properties be listed out. +type ListContainerItem struct { + // ContainerProperties - The blob container properties be listed out. + *ContainerProperties `json:"properties,omitempty"` + // Etag - Resource Etag. + Etag *string `json:"etag,omitempty"` + // ID - Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - The name of the resource + Name *string `json:"name,omitempty"` + // Type - The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ListContainerItem. +func (lci ListContainerItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if lci.ContainerProperties != nil { + objectMap["properties"] = lci.ContainerProperties + } + if lci.Etag != nil { + objectMap["etag"] = lci.Etag + } + if lci.ID != nil { + objectMap["id"] = lci.ID + } + if lci.Name != nil { + objectMap["name"] = lci.Name + } + if lci.Type != nil { + objectMap["type"] = lci.Type + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ListContainerItem struct. +func (lci *ListContainerItem) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var containerProperties ContainerProperties + err = json.Unmarshal(*v, &containerProperties) + if err != nil { + return err + } + lci.ContainerProperties = &containerProperties + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + lci.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + lci.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + lci.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + lci.Type = &typeVar + } + } + } + + return nil +} + +// ListContainerItems the list of blob containers. +type ListContainerItems struct { + autorest.Response `json:"-"` + // Value - The list of blob containers. + Value *[]ListContainerItem `json:"value,omitempty"` +} + +// ListServiceSasResponse the List service SAS credentials operation response. +type ListServiceSasResponse struct { + autorest.Response `json:"-"` + // ServiceSasToken - List service SAS credentials of speicific resource. + ServiceSasToken *string `json:"serviceSasToken,omitempty"` +} + +// ManagementPoliciesRules the Storage Account ManagementPolicies Rules, in JSON format. See more details in: +// https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts. +type ManagementPoliciesRules struct { + // Policy - The Storage Account ManagementPolicies Rules, in JSON format. See more details in: https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts. + Policy interface{} `json:"policy,omitempty"` +} + +// ManagementPoliciesRulesSetParameter the Storage Account ManagementPolicies Rules, in JSON format. See more +// details in: https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts. +type ManagementPoliciesRulesSetParameter struct { + // ManagementPoliciesRules - The Storage Account ManagementPolicies Rules, in JSON format. See more details in: https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts. + *ManagementPoliciesRules `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for ManagementPoliciesRulesSetParameter. +func (mprsp ManagementPoliciesRulesSetParameter) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if mprsp.ManagementPoliciesRules != nil { + objectMap["properties"] = mprsp.ManagementPoliciesRules + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ManagementPoliciesRulesSetParameter struct. +func (mprsp *ManagementPoliciesRulesSetParameter) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var managementPoliciesRules ManagementPoliciesRules + err = json.Unmarshal(*v, &managementPoliciesRules) + if err != nil { + return err + } + mprsp.ManagementPoliciesRules = &managementPoliciesRules + } + } + } + + return nil +} + +// MetricSpecification metric specification of operation. +type MetricSpecification struct { + // Name - Name of metric specification. + Name *string `json:"name,omitempty"` + // DisplayName - Display name of metric specification. + DisplayName *string `json:"displayName,omitempty"` + // DisplayDescription - Display description of metric specification. + DisplayDescription *string `json:"displayDescription,omitempty"` + // Unit - Unit could be Bytes or Count. + Unit *string `json:"unit,omitempty"` + // Dimensions - Dimensions of blobs, including blob type and access tier. + Dimensions *[]Dimension `json:"dimensions,omitempty"` + // AggregationType - Aggregation type could be Average. + AggregationType *string `json:"aggregationType,omitempty"` + // FillGapWithZero - The property to decide fill gap with zero or not. + FillGapWithZero *bool `json:"fillGapWithZero,omitempty"` + // Category - The category this metric specification belong to, could be Capacity. + Category *string `json:"category,omitempty"` + // ResourceIDDimensionNameOverride - Account Resource Id. + ResourceIDDimensionNameOverride *string `json:"resourceIdDimensionNameOverride,omitempty"` +} + +// NetworkRuleSet network rule set +type NetworkRuleSet struct { + // Bypass - Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. Possible values are any combination of Logging|Metrics|AzureServices (For example, "Logging, Metrics"), or None to bypass none of those traffics. Possible values include: 'None', 'Logging', 'Metrics', 'AzureServices' + Bypass Bypass `json:"bypass,omitempty"` + // VirtualNetworkRules - Sets the virtual network rules + VirtualNetworkRules *[]VirtualNetworkRule `json:"virtualNetworkRules,omitempty"` + // IPRules - Sets the IP ACL rules + IPRules *[]IPRule `json:"ipRules,omitempty"` + // DefaultAction - Specifies the default action of allow or deny when no other rules match. Possible values include: 'DefaultActionAllow', 'DefaultActionDeny' + DefaultAction DefaultAction `json:"defaultAction,omitempty"` +} + +// Operation storage REST API operation definition. +type Operation struct { + // Name - Operation name: {provider}/{resource}/{operation} + Name *string `json:"name,omitempty"` + // Display - Display metadata associated with the operation. + Display *OperationDisplay `json:"display,omitempty"` + // Origin - The origin of operations. + Origin *string `json:"origin,omitempty"` + // OperationProperties - Properties of operation, include metric specifications. + *OperationProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for Operation. +func (o Operation) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if o.Name != nil { + objectMap["name"] = o.Name + } + if o.Display != nil { + objectMap["display"] = o.Display + } + if o.Origin != nil { + objectMap["origin"] = o.Origin + } + if o.OperationProperties != nil { + objectMap["properties"] = o.OperationProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Operation struct. +func (o *Operation) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + o.Name = &name + } + case "display": + if v != nil { + var display OperationDisplay + err = json.Unmarshal(*v, &display) + if err != nil { + return err + } + o.Display = &display + } + case "origin": + if v != nil { + var origin string + err = json.Unmarshal(*v, &origin) + if err != nil { + return err + } + o.Origin = &origin + } + case "properties": + if v != nil { + var operationProperties OperationProperties + err = json.Unmarshal(*v, &operationProperties) + if err != nil { + return err + } + o.OperationProperties = &operationProperties + } + } + } + + return nil +} + +// OperationDisplay display metadata associated with the operation. +type OperationDisplay struct { + // Provider - Service provider: Microsoft Storage. + Provider *string `json:"provider,omitempty"` + // Resource - Resource on which the operation is performed etc. + Resource *string `json:"resource,omitempty"` + // Operation - Type of operation: get, read, delete, etc. + Operation *string `json:"operation,omitempty"` + // Description - Description of the operation. + Description *string `json:"description,omitempty"` +} + +// OperationListResult result of the request to list Storage operations. It contains a list of operations and a URL +// link to get the next set of results. +type OperationListResult struct { + autorest.Response `json:"-"` + // Value - List of Storage operations supported by the Storage resource provider. + Value *[]Operation `json:"value,omitempty"` +} + +// OperationProperties properties of operation, include metric specifications. +type OperationProperties struct { + // ServiceSpecification - One property of operation, include metric specifications. + ServiceSpecification *ServiceSpecification `json:"serviceSpecification,omitempty"` +} + +// ProxyResource the resource model definition for a ARM proxy resource. It will have everything other than +// required location and tags +type ProxyResource struct { + // ID - Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - The name of the resource + Name *string `json:"name,omitempty"` + // Type - The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// Resource ... +type Resource struct { + // ID - Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - The name of the resource + Name *string `json:"name,omitempty"` + // Type - The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// Restriction the restriction because of which SKU cannot be used. +type Restriction struct { + // Type - The type of restrictions. As of now only possible value for this is location. + Type *string `json:"type,omitempty"` + // Values - The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted. + Values *[]string `json:"values,omitempty"` + // ReasonCode - The reason for the restriction. As of now this can be "QuotaId" or "NotAvailableForSubscription". Quota Id is set when the SKU has requiredQuotas parameter as the subscription does not belong to that quota. The "NotAvailableForSubscription" is related to capacity at DC. Possible values include: 'QuotaID', 'NotAvailableForSubscription' + ReasonCode ReasonCode `json:"reasonCode,omitempty"` +} + +// ServiceSasParameters the parameters to list service SAS credentials of a speicific resource. +type ServiceSasParameters struct { + // CanonicalizedResource - The canonical path to the signed resource. + CanonicalizedResource *string `json:"canonicalizedResource,omitempty"` + // Resource - The signed services accessible with the service SAS. Possible values include: Blob (b), Container (c), File (f), Share (s). Possible values include: 'SignedResourceB', 'SignedResourceC', 'SignedResourceF', 'SignedResourceS' + Resource SignedResource `json:"signedResource,omitempty"` + // Permissions - The signed permissions for the service SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p). Possible values include: 'R', 'D', 'W', 'L', 'A', 'C', 'U', 'P' + Permissions Permissions `json:"signedPermission,omitempty"` + // IPAddressOrRange - An IP address or a range of IP addresses from which to accept requests. + IPAddressOrRange *string `json:"signedIp,omitempty"` + // Protocols - The protocol permitted for a request made with the account SAS. Possible values include: 'Httpshttp', 'HTTPS' + Protocols HTTPProtocol `json:"signedProtocol,omitempty"` + // SharedAccessStartTime - The time at which the SAS becomes valid. + SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` + // SharedAccessExpiryTime - The time at which the shared access signature becomes invalid. + SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` + // Identifier - A unique value up to 64 characters in length that correlates to an access policy specified for the container, queue, or table. + Identifier *string `json:"signedIdentifier,omitempty"` + // PartitionKeyStart - The start of partition key. + PartitionKeyStart *string `json:"startPk,omitempty"` + // PartitionKeyEnd - The end of partition key. + PartitionKeyEnd *string `json:"endPk,omitempty"` + // RowKeyStart - The start of row key. + RowKeyStart *string `json:"startRk,omitempty"` + // RowKeyEnd - The end of row key. + RowKeyEnd *string `json:"endRk,omitempty"` + // KeyToSign - The key to sign the account SAS token with. + KeyToSign *string `json:"keyToSign,omitempty"` + // CacheControl - The response header override for cache control. + CacheControl *string `json:"rscc,omitempty"` + // ContentDisposition - The response header override for content disposition. + ContentDisposition *string `json:"rscd,omitempty"` + // ContentEncoding - The response header override for content encoding. + ContentEncoding *string `json:"rsce,omitempty"` + // ContentLanguage - The response header override for content language. + ContentLanguage *string `json:"rscl,omitempty"` + // ContentType - The response header override for content type. + ContentType *string `json:"rsct,omitempty"` +} + +// ServiceSpecification one property of operation, include metric specifications. +type ServiceSpecification struct { + // MetricSpecifications - Metric specifications of operation. + MetricSpecifications *[]MetricSpecification `json:"metricSpecifications,omitempty"` +} + +// Sku the SKU of the storage account. +type Sku struct { + // Name - Gets or sets the sku name. Required for account creation; optional for update. Note that in older versions, sku name was called accountType. Possible values include: 'StandardLRS', 'StandardGRS', 'StandardRAGRS', 'StandardZRS', 'PremiumLRS', 'PremiumZRS' + Name SkuName `json:"name,omitempty"` + // Tier - Gets the sku tier. This is based on the SKU name. Possible values include: 'Standard', 'Premium' + Tier SkuTier `json:"tier,omitempty"` + // ResourceType - The type of the resource, usually it is 'storageAccounts'. + ResourceType *string `json:"resourceType,omitempty"` + // Kind - Indicates the type of storage account. Possible values include: 'Storage', 'StorageV2', 'BlobStorage', 'FileStorage', 'BlockBlobStorage' + Kind Kind `json:"kind,omitempty"` + // Locations - The set of locations that the SKU is available. This will be supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). + Locations *[]string `json:"locations,omitempty"` + // Capabilities - The capability information in the specified sku, including file encryption, network acls, change notification, etc. + Capabilities *[]SKUCapability `json:"capabilities,omitempty"` + // Restrictions - The restrictions because of which SKU cannot be used. This is empty if there are no restrictions. + Restrictions *[]Restriction `json:"restrictions,omitempty"` +} + +// SKUCapability the capability information in the specified sku, including file encryption, network acls, change +// notification, etc. +type SKUCapability struct { + // Name - The name of capability, The capability information in the specified sku, including file encryption, network acls, change notification, etc. + Name *string `json:"name,omitempty"` + // Value - A string value to indicate states of given capability. Possibly 'true' or 'false'. + Value *string `json:"value,omitempty"` +} + +// SkuListResult the response from the List Storage SKUs operation. +type SkuListResult struct { + autorest.Response `json:"-"` + // Value - Get the list result of storage SKUs and their properties. + Value *[]Sku `json:"value,omitempty"` +} + +// TagProperty a tag of the LegalHold of a blob container. +type TagProperty struct { + // Tag - The tag value. + Tag *string `json:"tag,omitempty"` + // Timestamp - Returns the date and time the tag was added. + Timestamp *date.Time `json:"timestamp,omitempty"` + // ObjectIdentifier - Returns the Object ID of the user who added the tag. + ObjectIdentifier *string `json:"objectIdentifier,omitempty"` + // TenantID - Returns the Tenant ID that issued the token for the user who added the tag. + TenantID *string `json:"tenantId,omitempty"` + // Upn - Returns the User Principal Name of the user who added the tag. + Upn *string `json:"upn,omitempty"` +} + +// TrackedResource the resource model definition for a ARM tracked top level resource +type TrackedResource struct { + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` + // Location - The geo-location where the resource lives + Location *string `json:"location,omitempty"` + // ID - Fully qualified resource Id for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string `json:"id,omitempty"` + // Name - The name of the resource + Name *string `json:"name,omitempty"` + // Type - The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for TrackedResource. +func (tr TrackedResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if tr.Tags != nil { + objectMap["tags"] = tr.Tags + } + if tr.Location != nil { + objectMap["location"] = tr.Location + } + if tr.ID != nil { + objectMap["id"] = tr.ID + } + if tr.Name != nil { + objectMap["name"] = tr.Name + } + if tr.Type != nil { + objectMap["type"] = tr.Type + } + return json.Marshal(objectMap) +} + +// UpdateHistoryProperty an update history of the ImmutabilityPolicy of a blob container. +type UpdateHistoryProperty struct { + // Update - The ImmutabilityPolicy update type of a blob container, possible values include: put, lock and extend. Possible values include: 'Put', 'Lock', 'Extend' + Update ImmutabilityPolicyUpdateType `json:"update,omitempty"` + // ImmutabilityPeriodSinceCreationInDays - The immutability period for the blobs in the container since the policy creation, in days. + ImmutabilityPeriodSinceCreationInDays *int32 `json:"immutabilityPeriodSinceCreationInDays,omitempty"` + // Timestamp - Returns the date and time the ImmutabilityPolicy was updated. + Timestamp *date.Time `json:"timestamp,omitempty"` + // ObjectIdentifier - Returns the Object ID of the user who updated the ImmutabilityPolicy. + ObjectIdentifier *string `json:"objectIdentifier,omitempty"` + // TenantID - Returns the Tenant ID that issued the token for the user who updated the ImmutabilityPolicy. + TenantID *string `json:"tenantId,omitempty"` + // Upn - Returns the User Principal Name of the user who updated the ImmutabilityPolicy. + Upn *string `json:"upn,omitempty"` +} + +// Usage describes Storage Resource Usage. +type Usage struct { + // Unit - Gets the unit of measurement. Possible values include: 'Count', 'Bytes', 'Seconds', 'Percent', 'CountsPerSecond', 'BytesPerSecond' + Unit UsageUnit `json:"unit,omitempty"` + // CurrentValue - Gets the current count of the allocated resources in the subscription. + CurrentValue *int32 `json:"currentValue,omitempty"` + // Limit - Gets the maximum count of the resources that can be allocated in the subscription. + Limit *int32 `json:"limit,omitempty"` + // Name - Gets the name of the type of usage. + Name *UsageName `json:"name,omitempty"` +} + +// UsageListResult the response from the List Usages operation. +type UsageListResult struct { + autorest.Response `json:"-"` + // Value - Gets or sets the list of Storage Resource Usages. + Value *[]Usage `json:"value,omitempty"` +} + +// UsageName the usage names that can be used; currently limited to StorageAccount. +type UsageName struct { + // Value - Gets a string describing the resource name. + Value *string `json:"value,omitempty"` + // LocalizedValue - Gets a localized string describing the resource name. + LocalizedValue *string `json:"localizedValue,omitempty"` +} + +// VirtualNetworkRule virtual Network rule. +type VirtualNetworkRule struct { + // VirtualNetworkResourceID - Resource ID of a subnet, for example: /subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}. + VirtualNetworkResourceID *string `json:"id,omitempty"` + // Action - The action of virtual network rule. Possible values include: 'Allow' + Action Action `json:"action,omitempty"` + // State - Gets the state of virtual network rule. Possible values include: 'StateProvisioning', 'StateDeprovisioning', 'StateSucceeded', 'StateFailed', 'StateNetworkSourceDeleted' + State State `json:"state,omitempty"` +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/operations.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/operations.go new file mode 100644 index 000000000000..20b1db7df6ce --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/operations.go @@ -0,0 +1,98 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// OperationsClient is the the Azure Storage Management API. +type OperationsClient struct { + BaseClient +} + +// NewOperationsClient creates an instance of the OperationsClient client. +func NewOperationsClient(subscriptionID string) OperationsClient { + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client. +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists all of the available Storage Rest API operations. +func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) { + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Storage/operations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/skus.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/skus.go similarity index 89% rename from cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/skus.go rename to cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/skus.go index 20abb8c90f98..c5e9bd5c624a 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/skus.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/skus.go @@ -21,6 +21,7 @@ import ( "context" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" "net/http" ) @@ -41,6 +42,12 @@ func NewSkusClientWithBaseURI(baseURI string, subscriptionID string) SkusClient // List lists the available SKUs supported by Microsoft.Storage for given subscription. func (client SkusClient) List(ctx context.Context) (result SkuListResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.SkusClient", "List", err.Error()) + } + req, err := client.ListPreparer(ctx) if err != nil { err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", nil, "Failure preparing request") @@ -68,7 +75,7 @@ func (client SkusClient) ListPreparer(ctx context.Context) (*http.Request, error "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-10-01" + const APIVersion = "2018-07-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/usages.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/usages.go new file mode 100644 index 000000000000..82b291b5fb3b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/usages.go @@ -0,0 +1,112 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// UsagesClient is the the Azure Storage Management API. +type UsagesClient struct { + BaseClient +} + +// NewUsagesClient creates an instance of the UsagesClient client. +func NewUsagesClient(subscriptionID string) UsagesClient { + return NewUsagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsagesClientWithBaseURI creates an instance of the UsagesClient client. +func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesClient { + return UsagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// ListByLocation gets the current usage count and the limit for the resources of the location under the subscription. +// Parameters: +// location - the location of the Azure Storage resource. +func (client UsagesClient) ListByLocation(ctx context.Context, location string) (result UsageListResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: client.SubscriptionID, + Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.UsagesClient", "ListByLocation", err.Error()) + } + + req, err := client.ListByLocationPreparer(ctx, location) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", nil, "Failure preparing request") + return + } + + resp, err := client.ListByLocationSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", resp, "Failure sending request") + return + } + + result, err = client.ListByLocationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.UsagesClient", "ListByLocation", resp, "Failure responding to request") + } + + return +} + +// ListByLocationPreparer prepares the ListByLocation request. +func (client UsagesClient) ListByLocationPreparer(ctx context.Context, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByLocationSender sends the ListByLocation request. The method will close the +// http.Response Body if it receives an error. +func (client UsagesClient) ListByLocationSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByLocationResponder handles the response to the ListByLocation request. The method always +// closes the http.Response Body. +func (client UsagesClient) ListByLocationResponder(resp *http.Response) (result UsageListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/version.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/version.go new file mode 100644 index 000000000000..2b30253f6b5a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage/version.go @@ -0,0 +1,30 @@ +package storage + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + version.Number + " storage/2018-07-01" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go index 55238ab15ffc..f90050cb0029 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -169,7 +169,7 @@ func (q *Queue) GetMetadata(options *QueueServiceOptions) error { params = addTimeout(params, options.Timeout) headers = mergeHeaders(headers, headersFromStruct(*options)) } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) if err != nil { diff --git a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go index f0ba0326824e..7f0f6f2b660e 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ b/cluster-autoscaler/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -18,4 +18,4 @@ package version // Changes may cause incorrect behavior and will be lost if the code is regenerated. // Number contains the semantic version of this SDK. -const Number = "v19.0.0" +const Number = "v21.3.0" diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/BUILD b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/BUILD new file mode 100644 index 000000000000..e77ba34e19ab --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/BUILD @@ -0,0 +1,48 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "authorization.go", + "autorest.go", + "client.go", + "error.go", + "preparer.go", + "responder.go", + "retriablerequest.go", + "retriablerequest_1.7.go", + "retriablerequest_1.8.go", + "sender.go", + "utility.go", + "version.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/autorest", + importpath = "github.com/Azure/go-autorest/autorest", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", + "//vendor/github.com/Azure/go-autorest/logger:go_default_library", + "//vendor/github.com/Azure/go-autorest/version:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/Azure/go-autorest/autorest/adal:all-srcs", + "//vendor/github.com/Azure/go-autorest/autorest/azure:all-srcs", + "//vendor/github.com/Azure/go-autorest/autorest/date:all-srcs", + "//vendor/github.com/Azure/go-autorest/autorest/to:all-srcs", + "//vendor/github.com/Azure/go-autorest/autorest/validation:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/BUILD b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/BUILD new file mode 100644 index 000000000000..9d6551166477 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "config.go", + "devicetoken.go", + "persist.go", + "sender.go", + "token.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/autorest/adal", + importpath = "github.com/Azure/go-autorest/autorest/adal", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/Azure/go-autorest/autorest/date:go_default_library", + "//vendor/github.com/Azure/go-autorest/version:go_default_library", + "//vendor/github.com/dgrijalva/jwt-go:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/config.go index bee5e61ddb2c..8c83a917ff7e 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -19,10 +19,6 @@ import ( "net/url" ) -const ( - activeDirectoryAPIVersion = "1.0" -) - // OAuthConfig represents the endpoints needed // in OAuth operations type OAuthConfig struct { @@ -46,11 +42,25 @@ func validateStringParam(param, name string) error { // NewOAuthConfig returns an OAuthConfig with tenant specific urls func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { return nil, err } + api := "" // it's legal for tenantID to be empty so don't validate it - const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" u, err := url.Parse(activeDirectoryEndpoint) if err != nil { return nil, err @@ -59,15 +69,15 @@ func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, err if err != nil { return nil, err } - authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) if err != nil { return nil, err } - tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) if err != nil { return nil, err } - deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go index 29f5e086cb46..834401e00dec 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -38,7 +38,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { return sf(r) } -// SendDecorator takes and possibility decorates, by wrapping, a Sender. Decorators may affect the +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the // http.Request and pass it along or, first, pass the http.Request along then react to the // http.Response result. type SendDecorator func(Sender) Sender diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index eec4dced7e56..2fd340d6922f 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -29,12 +29,12 @@ import ( "net" "net/http" "net/url" - "strconv" "strings" "sync" "time" "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/version" "github.com/dgrijalva/jwt-go" ) @@ -96,18 +96,27 @@ type RefresherWithContext interface { type TokenRefreshCallback func(Token) error // Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response type Token struct { AccessToken string `json:"access_token"` RefreshToken string `json:"refresh_token"` - ExpiresIn string `json:"expires_in"` - ExpiresOn string `json:"expires_on"` - NotBefore string `json:"not_before"` + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` Resource string `json:"resource"` Type string `json:"token_type"` } +func newToken() Token { + return Token{ + ExpiresIn: "0", + ExpiresOn: "0", + NotBefore: "0", + } +} + // IsZero returns true if the token object is zero-initialized. func (t Token) IsZero() bool { return t == Token{} @@ -115,12 +124,12 @@ func (t Token) IsZero() bool { // Expires returns the time.Time when the Token expires. func (t Token) Expires() time.Time { - s, err := strconv.Atoi(t.ExpiresOn) + s, err := t.ExpiresOn.Float64() if err != nil { s = -3600 } - expiration := date.NewUnixTimeFromSeconds(float64(s)) + expiration := date.NewUnixTimeFromSeconds(s) return time.Time(expiration).UTC() } @@ -217,6 +226,8 @@ func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalTo token := jwt.New(jwt.SigningMethodRS256) token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c token.Claims = jwt.MapClaims{ "aud": spt.inner.OauthConfig.TokenEndpoint.String(), "iss": spt.inner.ClientID, @@ -413,6 +424,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso } spt := &ServicePrincipalToken{ inner: servicePrincipalToken{ + Token: newToken(), OauthConfig: oauthConfig, Secret: secret, ClientID: id, @@ -652,6 +664,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI spt := &ServicePrincipalToken{ inner: servicePrincipalToken{ + Token: newToken(), OauthConfig: OAuthConfig{ TokenEndpoint: *msiEndpointURL, }, @@ -778,6 +791,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource if err != nil { return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) } + req.Header.Add("User-Agent", version.UserAgent()) req = req.WithContext(ctx) if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { v := url.Values{} diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/BUILD b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/BUILD new file mode 100644 index 000000000000..fd41c61c8231 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "async.go", + "azure.go", + "environments.go", + "metadata_environment.go", + "rp.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/autorest/azure", + importpath = "github.com/Azure/go-autorest/autorest/azure", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/Azure/go-autorest/autorest:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index e85e84fd1f21..e94e68567b2c 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -119,7 +119,10 @@ func (f *Future) Done(sender autorest.Sender) (bool, error) { if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { return false, err } - if err := f.pt.updateHeaders(); err != nil { + if err := f.pt.initPollingMethod(); err != nil { + return false, err + } + if err := f.pt.updatePollingMethod(); err != nil { return false, err } return f.pt.hasTerminated(), f.pt.pollingError() @@ -165,8 +168,12 @@ func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) e // polling duration has been exceeded. It will retry failed polling attempts based on // the retry value defined in the client up to the maximum retry attempts. func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) error { - ctx, cancel := context.WithTimeout(ctx, client.PollingDuration) - defer cancel() + if d := client.PollingDuration; d != 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, d) + defer cancel() + } + done, err := f.Done(client) for attempts := 0; !done; done, err = f.Done(client) { if attempts >= client.RetryAttempts { @@ -264,7 +271,7 @@ type pollingTracker interface { // these methods can differ per tracker // checks the response headers and status code to determine the polling mechanism - updateHeaders() error + updatePollingMethod() error // checks the response for tracker-specific error conditions checkForErrors() error @@ -274,6 +281,10 @@ type pollingTracker interface { // methods common to all trackers + // initializes a tracker's polling URL and method, called for each iteration. + // these values can be overridden by each polling tracker as required. + initPollingMethod() error + // initializes the tracker's internal state, call this when the tracker is created initializeState() error @@ -348,6 +359,10 @@ func (pt *pollingTrackerBase) initializeState() error { case http.StatusOK: if ps := pt.getProvisioningState(); ps != nil { pt.State = *ps + if pt.hasFailed() { + pt.updateErrorFromResponse() + return pt.pollingError() + } } else { pt.State = operationSucceeded } @@ -364,8 +379,9 @@ func (pt *pollingTrackerBase) initializeState() error { default: pt.State = operationFailed pt.updateErrorFromResponse() + return pt.pollingError() } - return nil + return pt.initPollingMethod() } func (pt pollingTrackerBase) getProvisioningState() *string { @@ -416,12 +432,14 @@ func (pt *pollingTrackerBase) pollForStatus(sender autorest.Sender) error { } else { // check response body for error content pt.updateErrorFromResponse() + err = pt.pollingError() } return err } // attempts to unmarshal a ServiceError type from the response body. // if that fails then make a best attempt at creating something meaningful. +// NOTE: this assumes that the async operation has failed. func (pt *pollingTrackerBase) updateErrorFromResponse() { var err error if pt.resp.ContentLength != 0 { @@ -431,8 +449,7 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() { re := respErr{} defer pt.resp.Body.Close() var b []byte - b, err = ioutil.ReadAll(pt.resp.Body) - if err != nil { + if b, err = ioutil.ReadAll(pt.resp.Body); err != nil { goto Default } if err = json.Unmarshal(b, &re); err != nil { @@ -445,20 +462,29 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() { goto Default } } - if re.ServiceError != nil { + // the unmarshaller will ensure re.ServiceError is non-nil + // even if there was no content unmarshalled so check the code. + if re.ServiceError.Code != "" { pt.Err = re.ServiceError return } } Default: se := &ServiceError{ - Code: fmt.Sprintf("HTTP status code %v", pt.resp.StatusCode), - Message: pt.resp.Status, + Code: pt.pollingStatus(), + Message: "The async operation failed.", } if err != nil { se.InnerError = make(map[string]interface{}) se.InnerError["unmarshalError"] = err.Error() } + // stick the response body into the error object in hopes + // it contains something useful to help diagnose the failure. + if len(pt.rawBody) > 0 { + se.AdditionalInfo = []map[string]interface{}{ + pt.rawBody, + } + } pt.Err = se } @@ -538,13 +564,33 @@ func (pt pollingTrackerBase) baseCheckForErrors() error { return nil } +// default initialization of polling URL/method. each verb tracker will update this as required. +func (pt *pollingTrackerBase) initPollingMethod() error { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + return nil + } + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh != "" { + pt.URI = lh + pt.Pm = PollingLocation + return nil + } + // it's ok if we didn't find a polling header, this will be handled elsewhere + return nil +} + // DELETE type pollingTrackerDelete struct { pollingTrackerBase } -func (pt *pollingTrackerDelete) updateHeaders() error { +func (pt *pollingTrackerDelete) updatePollingMethod() error { // for 201 the Location header is required if pt.resp.StatusCode == http.StatusCreated { if lh, err := getURLFromLocationHeader(pt.resp); err != nil { @@ -600,7 +646,7 @@ type pollingTrackerPatch struct { pollingTrackerBase } -func (pt *pollingTrackerPatch) updateHeaders() error { +func (pt *pollingTrackerPatch) updatePollingMethod() error { // by default we can use the original URL for polling and final GET if pt.URI == "" { pt.URI = pt.resp.Request.URL.String() @@ -658,7 +704,7 @@ type pollingTrackerPost struct { pollingTrackerBase } -func (pt *pollingTrackerPost) updateHeaders() error { +func (pt *pollingTrackerPost) updatePollingMethod() error { // 201 requires Location header if pt.resp.StatusCode == http.StatusCreated { if lh, err := getURLFromLocationHeader(pt.resp); err != nil { @@ -714,7 +760,7 @@ type pollingTrackerPut struct { pollingTrackerBase } -func (pt *pollingTrackerPut) updateHeaders() error { +func (pt *pollingTrackerPut) updatePollingMethod() error { // by default we can use the original URL for polling and final GET if pt.URI == "" { pt.URI = pt.resp.Request.URL.String() @@ -808,7 +854,7 @@ func createPollingTracker(resp *http.Response) (pollingTracker, error) { // this initializes the polling header values, we do this during creation in case the // initial response send us invalid values; this way the API call will return a non-nil // error (not doing this means the error shows up in Future.Done) - return pt, pt.updateHeaders() + return pt, pt.updatePollingMethod() } // gets the polling URL from the Azure-AsyncOperation header. diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go index a702ffe75172..3a0a439ff930 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -44,11 +44,12 @@ const ( // ServiceError encapsulates the error response from an Azure service. // It adhears to the OData v4 specification for error responses. type ServiceError struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` } func (se ServiceError) Error() string { @@ -74,6 +75,14 @@ func (se ServiceError) Error() string { result += fmt.Sprintf(" InnerError=%v", string(d)) } + if se.AdditionalInfo != nil { + d, err := json.Marshal(se.AdditionalInfo) + if err != nil { + result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) + } + result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) + } + return result } @@ -86,44 +95,47 @@ func (se *ServiceError) UnmarshalJSON(b []byte) error { // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 type serviceError1 struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` } type serviceError2 struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` } se1 := serviceError1{} err := json.Unmarshal(b, &se1) if err == nil { - se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError) + se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) return nil } se2 := serviceError2{} err = json.Unmarshal(b, &se2) if err == nil { - se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError) + se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo) se.Details = append(se.Details, se2.Details) return nil } return err } -func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}) { +func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { se.Code = code se.Message = message se.Target = target se.Details = details se.InnerError = inner + se.AdditionalInfo = additional } // RequestError describes an error response returned by Azure service. diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go index bd34f0ed5a58..86ce9f2b5b1e 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -140,8 +140,8 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError } // poll for registered provisioning state - now := time.Now() - for err == nil && time.Since(now) < client.PollingDuration { + registrationStartTime := time.Now() + for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { // taken from the resources SDK // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 preparer := autorest.CreatePreparer( @@ -183,7 +183,7 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError return originalReq.Context().Err() } } - if !(time.Since(now) < client.PollingDuration) { + if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { return errors.New("polling for resource provider registration has exceeded the polling duration") } return err diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/client.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/client.go index 4e92dcad077a..48eb0e8b73bc 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/client.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -22,8 +22,11 @@ import ( "log" "net/http" "net/http/cookiejar" - "runtime" + "strings" "time" + + "github.com/Azure/go-autorest/logger" + "github.com/Azure/go-autorest/version" ) const ( @@ -41,15 +44,6 @@ const ( ) var ( - // defaultUserAgent builds a string containing the Go version, system archityecture and OS, - // and the go-autorest version. - defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - Version(), - ) - // StatusCodesForRetry are a defined group of status code for which the client will retry StatusCodesForRetry = []int{ http.StatusRequestTimeout, // 408 @@ -153,6 +147,7 @@ type Client struct { PollingDelay time.Duration // PollingDuration sets the maximum polling time after which an error is returned. + // Setting this to zero will use the provided context to control the duration. PollingDuration time.Duration // RetryAttempts sets the default number of retry attempts for client. @@ -179,7 +174,7 @@ func NewClientWithUserAgent(ua string) Client { PollingDuration: DefaultPollingDuration, RetryAttempts: DefaultRetryAttempts, RetryDuration: DefaultRetryDuration, - UserAgent: defaultUserAgent, + UserAgent: version.UserAgent(), } c.Sender = c.sender() c.AddToUserAgent(ua) @@ -216,8 +211,17 @@ func (c Client) Do(r *http.Request) (*http.Response, error) { } return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") } - + logger.Instance.WriteRequest(r, logger.Filter{ + Header: func(k string, v []string) (bool, []string) { + // remove the auth token from the log + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { + v = []string{"**REDACTED**"} + } + return true, v + }, + }) resp, err := SendWithSender(c.sender(), r) + logger.Instance.WriteResponse(resp, logger.Filter{}) Respond(resp, c.ByInspecting()) return resp, err } diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/date/BUILD b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/date/BUILD new file mode 100644 index 000000000000..19fce15d84f4 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/date/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "date.go", + "time.go", + "timerfc1123.go", + "unixtime.go", + "utility.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/autorest/date", + importpath = "github.com/Azure/go-autorest/autorest/date", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/sender.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/sender.go index 28c7222d6b6d..8811cb2e7f9b 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -234,7 +234,7 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se } delayed := DelayWithRetryAfter(resp, r.Context().Done()) if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() + return resp, r.Context().Err() } // don't count a 429 against the number of attempts // so that we continue to retry until it succeeds diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/to/BUILD b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/to/BUILD new file mode 100644 index 000000000000..e435f0e4d67e --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/to/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["convert.go"], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/autorest/to", + importpath = "github.com/Azure/go-autorest/autorest/to", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/validation/BUILD b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/validation/BUILD new file mode 100644 index 000000000000..411f4e8a24f7 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/validation/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "error.go", + "validation.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/autorest/validation", + importpath = "github.com/Azure/go-autorest/autorest/validation", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/version.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/version.go index e32cd68fecac..3c6451546bae 100644 --- a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -1,5 +1,7 @@ package autorest +import "github.com/Azure/go-autorest/version" + // Copyright 2017 Microsoft Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,5 +18,5 @@ package autorest // Version returns the semantic version (see http://semver.org). func Version() string { - return "v10.12.0" + return version.Number } diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/logger/BUILD b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/logger/BUILD new file mode 100644 index 000000000000..d43bee7c193f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/logger/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["logger.go"], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/logger", + importpath = "github.com/Azure/go-autorest/logger", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/logger/logger.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/logger/logger.go new file mode 100644 index 000000000000..bd9e0a3b1ee0 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -0,0 +1,328 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// LevelType tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LevelType uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LevelType = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug +) + +const ( + logNone = "NONE" + logFatal = "FATAL" + logPanic = "PANIC" + logError = "ERROR" + logWarning = "WARNING" + logInfo = "INFO" + logDebug = "DEBUG" + logUnknown = "UNKNOWN" +) + +// ParseLevel converts the specified string into the corresponding LevelType. +func ParseLevel(s string) (lt LevelType, err error) { + switch strings.ToUpper(s) { + case logFatal: + lt = LogFatal + case logPanic: + lt = LogPanic + case logError: + lt = LogError + case logWarning: + lt = LogWarning + case logInfo: + lt = LogInfo + case logDebug: + lt = LogDebug + default: + err = fmt.Errorf("bad log level '%s'", s) + } + return +} + +// String implements the stringer interface for LevelType. +func (lt LevelType) String() string { + switch lt { + case LogNone: + return logNone + case LogFatal: + return logFatal + case LogPanic: + return logPanic + case LogError: + return logError + case LogWarning: + return logWarning + case LogInfo: + return logInfo + case LogDebug: + return logDebug + default: + return logUnknown + } +} + +// Filter defines functions for filtering HTTP request/response content. +type Filter struct { + // URL returns a potentially modified string representation of a request URL. + URL func(u *url.URL) string + + // Header returns a potentially modified set of values for the specified key. + // To completely exclude the header key/values return false. + Header func(key string, val []string) (bool, []string) + + // Body returns a potentially modified request/response body. + Body func(b []byte) []byte +} + +func (f Filter) processURL(u *url.URL) string { + if f.URL == nil { + return u.String() + } + return f.URL(u) +} + +func (f Filter) processHeader(k string, val []string) (bool, []string) { + if f.Header == nil { + return true, val + } + return f.Header(k, val) +} + +func (f Filter) processBody(b []byte) []byte { + if f.Body == nil { + return b + } + return f.Body(b) +} + +// Writer defines methods for writing to a logging facility. +type Writer interface { + // Writeln writes the specified message with the standard log entry header and new-line character. + Writeln(level LevelType, message string) + + // Writef writes the specified format specifier with the standard log entry header and no new-line character. + Writef(level LevelType, format string, a ...interface{}) + + // WriteRequest writes the specified HTTP request to the logger if the log level is greater than + // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no request content is excluded. + WriteRequest(req *http.Request, filter Filter) + + // WriteResponse writes the specified HTTP response to the logger if the log level is greater than + // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no response content is excluded. + WriteResponse(resp *http.Response, filter Filter) +} + +// Instance is the default log writer initialized during package init. +// This can be replaced with a custom implementation as required. +var Instance Writer + +// default log level +var logLevel = LogNone + +// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// If no value was specified the default value is LogNone. +// Custom loggers can call this to retrieve the configured log level. +func Level() LevelType { + return logLevel +} + +func init() { + // separated for testing purposes + initDefaultLogger() +} + +func initDefaultLogger() { + // init with nilLogger so callers don't have to do a nil check on Default + Instance = nilLogger{} + llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) + if llStr == "" { + return + } + var err error + logLevel, err = ParseLevel(llStr) + if err != nil { + fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) + return + } + if logLevel == LogNone { + return + } + // default to stderr + dest := os.Stderr + lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") + if strings.EqualFold(lfStr, "stdout") { + dest = os.Stdout + } else if lfStr != "" { + lf, err := os.Create(lfStr) + if err == nil { + dest = lf + } else { + fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) + } + } + Instance = fileLogger{ + logLevel: logLevel, + mu: &sync.Mutex{}, + logFile: dest, + } +} + +// the nil logger does nothing +type nilLogger struct{} + +func (nilLogger) Writeln(LevelType, string) {} + +func (nilLogger) Writef(LevelType, string, ...interface{}) {} + +func (nilLogger) WriteRequest(*http.Request, Filter) {} + +func (nilLogger) WriteResponse(*http.Response, Filter) {} + +// A File is used instead of a Logger so the stream can be flushed after every write. +type fileLogger struct { + logLevel LevelType + mu *sync.Mutex // for synchronizing writes to logFile + logFile *os.File +} + +func (fl fileLogger) Writeln(level LevelType, message string) { + fl.Writef(level, "%s\n", message) +} + +func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { + if fl.logLevel >= level { + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) + fl.logFile.Sync() + } +} + +func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { + if req == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) + // dump headers + for k, v := range req.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(req.Header, req.Body) { + // dump body + body, err := ioutil.ReadAll(req.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + if nc, ok := req.Body.(io.Seeker); ok { + // rewind to the beginning + nc.Seek(0, io.SeekStart) + } else { + // recreate the body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { + if resp == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) + // dump headers + for k, v := range resp.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(resp.Header, resp.Body) { + // dump body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +// returns true if the provided body should be included in the log +func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { + ct := header.Get("Content-Type") + return fl.logLevel >= LogDebug && body != nil && strings.Index(ct, "application/octet-stream") == -1 +} + +// creates standard header for log entries, it contains a timestamp and the log level +func entryHeader(level LevelType) string { + // this format provides a fixed number of digits so the size of the timestamp is constant + return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) +} diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/version/BUILD b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/version/BUILD new file mode 100644 index 000000000000..5c8d025bde41 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/version/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["version.go"], + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/version", + importpath = "github.com/Azure/go-autorest/version", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/github.com/Azure/go-autorest/version/version.go b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/version/version.go new file mode 100644 index 000000000000..6323c288fbfc --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Azure/go-autorest/version/version.go @@ -0,0 +1,37 @@ +package version + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "runtime" +) + +// Number contains the semantic version of this SDK. +const Number = "v11.1.0" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + Number, + ) +) + +// UserAgent returns a string containing the Go version, system archityecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} diff --git a/cluster-autoscaler/vendor/github.com/Rican7/retry/.travis.yml b/cluster-autoscaler/vendor/github.com/Rican7/retry/.travis.yml new file mode 100644 index 000000000000..399e7b64f27d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Rican7/retry/.travis.yml @@ -0,0 +1,39 @@ +language: go + +go: + - 1.6 + - tip + +sudo: false + +before_install: + # Install tools necessary to report code-coverage to Coveralls.io + - go get github.com/mattn/goveralls + + # Export some environment variables + - export GO_TEST_COVERAGE_FILE_NAME='coverage.out' + +install: + # Get all imported packages + - make install-deps install-deps-dev + + # Basic build errors + - make build + +script: + # Lint + - make format-lint + - make import-lint + - make copyright-lint + + # Run tests + - make test-with-coverage-profile + +after_success: + # Report our code-coverage to Coveralls.io + - goveralls -service=travis-ci -coverprofile="${GO_TEST_COVERAGE_FILE_NAME}" + +matrix: + allow_failures: + - go: tip + fast_finish: true diff --git a/cluster-autoscaler/vendor/github.com/Rican7/retry/LICENSE b/cluster-autoscaler/vendor/github.com/Rican7/retry/LICENSE new file mode 100644 index 000000000000..361507628d9d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Rican7/retry/LICENSE @@ -0,0 +1,19 @@ +Copyright (C) 2016 Trevor N. Suarez (Rican7) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/cluster-autoscaler/vendor/github.com/Rican7/retry/Makefile b/cluster-autoscaler/vendor/github.com/Rican7/retry/Makefile new file mode 100644 index 000000000000..77d4bcd7f722 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Rican7/retry/Makefile @@ -0,0 +1,83 @@ +# Define some VCS context +PARENT_BRANCH ?= master + +# Set the mode for code-coverage +GO_TEST_COVERAGE_MODE ?= count +GO_TEST_COVERAGE_FILE_NAME ?= coverage.out + +# Set flags for `gofmt` +GOFMT_FLAGS ?= -s + +# Set a default `min_confidence` value for `golint` +GOLINT_MIN_CONFIDENCE ?= 0.3 + + +all: install-deps build install + +clean: + go clean -i -x ./... + +build: + go build -v ./... + +install: + go install ./... + +install-deps: + go get -d -t ./... + +install-deps-dev: install-deps + go get github.com/golang/lint/golint + go get golang.org/x/tools/cmd/goimports + +update-deps: + go get -d -t -u ./... + +update-deps-dev: update-deps + go get -u github.com/golang/lint/golint + go get -u golang.org/x/tools/cmd/goimports + +test: + go test -v ./... + +test-with-coverage: + go test -cover ./... + +test-with-coverage-formatted: + go test -cover ./... | column -t | sort -r + +test-with-coverage-profile: + echo "mode: ${GO_TEST_COVERAGE_MODE}" > ${GO_TEST_COVERAGE_FILE_NAME} + for package in $$(go list ./...); do \ + go test -covermode ${GO_TEST_COVERAGE_MODE} -coverprofile "coverage_$${package##*/}.out" "$${package}"; \ + sed '1d' "coverage_$${package##*/}.out" >> ${GO_TEST_COVERAGE_FILE_NAME}; \ + done + +format-lint: + errors=$$(gofmt -l ${GOFMT_FLAGS} .); if [ "$${errors}" != "" ]; then echo "$${errors}"; exit 1; fi + +import-lint: + errors=$$(goimports -l .); if [ "$${errors}" != "" ]; then echo "$${errors}"; exit 1; fi + +style-lint: + errors=$$(golint -min_confidence=${GOLINT_MIN_CONFIDENCE} ./...); if [ "$${errors}" != "" ]; then echo "$${errors}"; exit 1; fi + +copyright-lint: + @old_dates=$$(git diff --diff-filter=ACMRTUXB --name-only "${PARENT_BRANCH}" | xargs grep -E '[Cc]opyright(\s+)[©Cc]?(\s+)[0-9]{4}' | grep -E -v "[Cc]opyright(\s+)[©Cc]?(\s+)$$(date '+%Y')"); if [ "$${old_dates}" != "" ]; then printf "The following files contain outdated copyrights:\n$${old_dates}\n\nThis can be fixed with 'make copyright-fix'\n"; exit 1; fi + +lint: install-deps-dev format-lint import-lint style-lint copyright-lint + +format-fix: + gofmt -w ${GOFMT_FLAGS} . + +import-fix: + goimports -w . + +copyright-fix: + @git diff --diff-filter=ACMRTUXB --name-only "${PARENT_BRANCH}" | xargs -I '_FILENAME' -- sh -c 'sed -i.bak "s/\([Cc]opyright\([[:space:]][©Cc]\{0,1\}[[:space:]]*\)\)[0-9]\{4\}/\1"$$(date '+%Y')"/g" _FILENAME && rm _FILENAME.bak' + +vet: + go vet ./... + + +.PHONY: all clean build install install-deps install-deps-dev update-deps update-deps-dev test test-with-coverage test-with-coverage-formatted test-with-coverage-profile format-lint import-lint style-lint copyright-lint lint format-fix import-fix copyright-fix vet diff --git a/cluster-autoscaler/vendor/github.com/Rican7/retry/README.md b/cluster-autoscaler/vendor/github.com/Rican7/retry/README.md new file mode 100644 index 000000000000..bccf4dec76c0 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Rican7/retry/README.md @@ -0,0 +1,101 @@ +# retry + +[![Build Status](https://travis-ci.org/Rican7/retry.svg?branch=master)](https://travis-ci.org/Rican7/retry) +[![Coverage Status](https://coveralls.io/repos/github/Rican7/retry/badge.svg)](https://coveralls.io/github/Rican7/retry) +[![Go Report Card](https://goreportcard.com/badge/Rican7/retry)](http://goreportcard.com/report/Rican7/retry) +[![GoDoc](https://godoc.org/github.com/Rican7/retry?status.png)](https://godoc.org/github.com/Rican7/retry) +[![Latest Stable Version](https://img.shields.io/github/release/Rican7/retry.svg?style=flat)](https://github.com/Rican7/retry/releases) + +A simple, stateless, functional mechanism to perform actions repetitively until successful. + + +## Project Status + +This project is currently in "pre-release". While the code is heavily tested, the API may change. +Vendor (commit or lock) this dependency if you plan on using it. + + +## Install + +`go get github.com/Rican7/retry` + + +## Examples + +### Basic + +```go +retry.Retry(func(attempt uint) error { + return nil // Do something that may or may not cause an error +}) +``` + +### File Open + +```go +const logFilePath = "/var/log/myapp.log" + +var logFile *os.File + +err := retry.Retry(func(attempt uint) error { + var err error + + logFile, err = os.Open(logFilePath) + + return err +}) + +if nil != err { + log.Fatalf("Unable to open file %q with error %q", logFilePath, err) +} + +logFile.Chdir() // Do something with the file +``` + +### HTTP request with strategies and backoff + +```go +var response *http.Response + +action := func(attempt uint) error { + var err error + + response, err = http.Get("https://api.github.com/repos/Rican7/retry") + + if nil == err && nil != response && response.StatusCode > 200 { + err = fmt.Errorf("failed to fetch (attempt #%d) with status code: %d", attempt, response.StatusCode) + } + + return err +} + +err := retry.Retry( + action, + strategy.Limit(5), + strategy.Backoff(backoff.Fibonacci(10*time.Millisecond)), +) + +if nil != err { + log.Fatalf("Failed to fetch repository with error %q", err) +} +``` + +### Retry with backoff jitter + +```go +action := func(attempt uint) error { + return errors.New("something happened") +} + +seed := time.Now().UnixNano() +random := rand.New(rand.NewSource(seed)) + +retry.Retry( + action, + strategy.Limit(5), + strategy.BackoffWithJitter( + backoff.BinaryExponential(10*time.Millisecond), + jitter.Deviation(random, 0.5), + ), +) +``` diff --git a/cluster-autoscaler/vendor/github.com/Rican7/retry/backoff/backoff.go b/cluster-autoscaler/vendor/github.com/Rican7/retry/backoff/backoff.go new file mode 100644 index 000000000000..5369a75a1c6f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Rican7/retry/backoff/backoff.go @@ -0,0 +1,67 @@ +// Package backoff provides stateless methods of calculating durations based on +// a number of attempts made. +// +// Copyright © 2016 Trevor N. Suarez (Rican7) +package backoff + +import ( + "math" + "time" +) + +// Algorithm defines a function that calculates a time.Duration based on +// the given retry attempt number. +type Algorithm func(attempt uint) time.Duration + +// Incremental creates a Algorithm that increments the initial duration +// by the given increment for each attempt. +func Incremental(initial, increment time.Duration) Algorithm { + return func(attempt uint) time.Duration { + return initial + (increment * time.Duration(attempt)) + } +} + +// Linear creates a Algorithm that linearly multiplies the factor +// duration by the attempt number for each attempt. +func Linear(factor time.Duration) Algorithm { + return func(attempt uint) time.Duration { + return (factor * time.Duration(attempt)) + } +} + +// Exponential creates a Algorithm that multiplies the factor duration by +// an exponentially increasing factor for each attempt, where the factor is +// calculated as the given base raised to the attempt number. +func Exponential(factor time.Duration, base float64) Algorithm { + return func(attempt uint) time.Duration { + return (factor * time.Duration(math.Pow(base, float64(attempt)))) + } +} + +// BinaryExponential creates a Algorithm that multiplies the factor +// duration by an exponentially increasing factor for each attempt, where the +// factor is calculated as `2` raised to the attempt number (2^attempt). +func BinaryExponential(factor time.Duration) Algorithm { + return Exponential(factor, 2) +} + +// Fibonacci creates a Algorithm that multiplies the factor duration by +// an increasing factor for each attempt, where the factor is the Nth number in +// the Fibonacci sequence. +func Fibonacci(factor time.Duration) Algorithm { + return func(attempt uint) time.Duration { + return (factor * time.Duration(fibonacciNumber(attempt))) + } +} + +// fibonacciNumber calculates the Fibonacci sequence number for the given +// sequence position. +func fibonacciNumber(n uint) uint { + if 0 == n { + return 0 + } else if 1 == n { + return 1 + } else { + return fibonacciNumber(n-1) + fibonacciNumber(n-2) + } +} diff --git a/cluster-autoscaler/vendor/github.com/Rican7/retry/jitter/jitter.go b/cluster-autoscaler/vendor/github.com/Rican7/retry/jitter/jitter.go new file mode 100644 index 000000000000..e94ad8927960 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Rican7/retry/jitter/jitter.go @@ -0,0 +1,89 @@ +// Package jitter provides methods of transforming durations. +// +// Copyright © 2016 Trevor N. Suarez (Rican7) +package jitter + +import ( + "math" + "math/rand" + "time" +) + +// Transformation defines a function that calculates a time.Duration based on +// the given duration. +type Transformation func(duration time.Duration) time.Duration + +// Full creates a Transformation that transforms a duration into a result +// duration in [0, n) randomly, where n is the given duration. +// +// The given generator is what is used to determine the random transformation. +// If a nil generator is passed, a default one will be provided. +// +// Inspired by https://www.awsarchitectureblog.com/2015/03/backoff.html +func Full(generator *rand.Rand) Transformation { + random := fallbackNewRandom(generator) + + return func(duration time.Duration) time.Duration { + return time.Duration(random.Int63n(int64(duration))) + } +} + +// Equal creates a Transformation that transforms a duration into a result +// duration in [n/2, n) randomly, where n is the given duration. +// +// The given generator is what is used to determine the random transformation. +// If a nil generator is passed, a default one will be provided. +// +// Inspired by https://www.awsarchitectureblog.com/2015/03/backoff.html +func Equal(generator *rand.Rand) Transformation { + random := fallbackNewRandom(generator) + + return func(duration time.Duration) time.Duration { + return (duration / 2) + time.Duration(random.Int63n(int64(duration))/2) + } +} + +// Deviation creates a Transformation that transforms a duration into a result +// duration that deviates from the input randomly by a given factor. +// +// The given generator is what is used to determine the random transformation. +// If a nil generator is passed, a default one will be provided. +// +// Inspired by https://developers.google.com/api-client-library/java/google-http-java-client/backoff +func Deviation(generator *rand.Rand, factor float64) Transformation { + random := fallbackNewRandom(generator) + + return func(duration time.Duration) time.Duration { + min := int64(math.Floor(float64(duration) * (1 - factor))) + max := int64(math.Ceil(float64(duration) * (1 + factor))) + + return time.Duration(random.Int63n(max-min) + min) + } +} + +// NormalDistribution creates a Transformation that transforms a duration into a +// result duration based on a normal distribution of the input and the given +// standard deviation. +// +// The given generator is what is used to determine the random transformation. +// If a nil generator is passed, a default one will be provided. +func NormalDistribution(generator *rand.Rand, standardDeviation float64) Transformation { + random := fallbackNewRandom(generator) + + return func(duration time.Duration) time.Duration { + return time.Duration(random.NormFloat64()*standardDeviation + float64(duration)) + } +} + +// fallbackNewRandom returns the passed in random instance if it's not nil, +// and otherwise returns a new random instance seeded with the current time. +func fallbackNewRandom(random *rand.Rand) *rand.Rand { + // Return the passed in value if it's already not null + if nil != random { + return random + } + + seed := time.Now().UnixNano() + + return rand.New(rand.NewSource(seed)) +} diff --git a/cluster-autoscaler/vendor/github.com/Rican7/retry/retry.go b/cluster-autoscaler/vendor/github.com/Rican7/retry/retry.go new file mode 100644 index 000000000000..15015db257fb --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Rican7/retry/retry.go @@ -0,0 +1,36 @@ +// Package retry provides a simple, stateless, functional mechanism to perform +// actions repetitively until successful. +// +// Copyright © 2016 Trevor N. Suarez (Rican7) +package retry + +import "github.com/Rican7/retry/strategy" + +// Action defines a callable function that package retry can handle. +type Action func(attempt uint) error + +// Retry takes an action and performs it, repetitively, until successful. +// +// Optionally, strategies may be passed that assess whether or not an attempt +// should be made. +func Retry(action Action, strategies ...strategy.Strategy) error { + var err error + + for attempt := uint(0); (0 == attempt || nil != err) && shouldAttempt(attempt, strategies...); attempt++ { + err = action(attempt) + } + + return err +} + +// shouldAttempt evaluates the provided strategies with the given attempt to +// determine if the Retry loop should make another attempt. +func shouldAttempt(attempt uint, strategies ...strategy.Strategy) bool { + shouldAttempt := true + + for i := 0; shouldAttempt && i < len(strategies); i++ { + shouldAttempt = shouldAttempt && strategies[i](attempt) + } + + return shouldAttempt +} diff --git a/cluster-autoscaler/vendor/github.com/Rican7/retry/strategy/strategy.go b/cluster-autoscaler/vendor/github.com/Rican7/retry/strategy/strategy.go new file mode 100644 index 000000000000..a315fa02cbfe --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/Rican7/retry/strategy/strategy.go @@ -0,0 +1,85 @@ +// Package strategy provides a way to change the way that retry is performed. +// +// Copyright © 2016 Trevor N. Suarez (Rican7) +package strategy + +import ( + "time" + + "github.com/Rican7/retry/backoff" + "github.com/Rican7/retry/jitter" +) + +// Strategy defines a function that Retry calls before every successive attempt +// to determine whether it should make the next attempt or not. Returning `true` +// allows for the next attempt to be made. Returning `false` halts the retrying +// process and returns the last error returned by the called Action. +// +// The strategy will be passed an "attempt" number on each successive retry +// iteration, starting with a `0` value before the first attempt is actually +// made. This allows for a pre-action delay, etc. +type Strategy func(attempt uint) bool + +// Limit creates a Strategy that limits the number of attempts that Retry will +// make. +func Limit(attemptLimit uint) Strategy { + return func(attempt uint) bool { + return (attempt <= attemptLimit) + } +} + +// Delay creates a Strategy that waits the given duration before the first +// attempt is made. +func Delay(duration time.Duration) Strategy { + return func(attempt uint) bool { + if 0 == attempt { + time.Sleep(duration) + } + + return true + } +} + +// Wait creates a Strategy that waits the given durations for each attempt after +// the first. If the number of attempts is greater than the number of durations +// provided, then the strategy uses the last duration provided. +func Wait(durations ...time.Duration) Strategy { + return func(attempt uint) bool { + if 0 < attempt && 0 < len(durations) { + durationIndex := int(attempt - 1) + + if len(durations) <= durationIndex { + durationIndex = len(durations) - 1 + } + + time.Sleep(durations[durationIndex]) + } + + return true + } +} + +// Backoff creates a Strategy that waits before each attempt, with a duration as +// defined by the given backoff.Algorithm. +func Backoff(algorithm backoff.Algorithm) Strategy { + return BackoffWithJitter(algorithm, noJitter()) +} + +// BackoffWithJitter creates a Strategy that waits before each attempt, with a +// duration as defined by the given backoff.Algorithm and jitter.Transformation. +func BackoffWithJitter(algorithm backoff.Algorithm, transformation jitter.Transformation) Strategy { + return func(attempt uint) bool { + if 0 < attempt { + time.Sleep(transformation(algorithm(attempt))) + } + + return true + } +} + +// noJitter creates a jitter.Transformation that simply returns the input. +func noJitter() jitter.Transformation { + return func(duration time.Duration) time.Duration { + return duration + } +} diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/.travis.yml b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/.travis.yml new file mode 100644 index 000000000000..e29f8eef5efd --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip + +notifications: + email: + - bwatas@gmail.com diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md new file mode 100644 index 000000000000..f0f7e3a8add0 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md @@ -0,0 +1,63 @@ +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Financial contributions + +We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). +Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. + + +## Credits + + +### Contributors + +Thank you to all the people who have already contributed to govalidator! + + + +### Backers + +Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) + + + + + + + + + + + \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/LICENSE b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/LICENSE new file mode 100644 index 000000000000..2f9a31fadf67 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Alex Saskevich + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/README.md b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/README.md new file mode 100644 index 000000000000..0e8793f719a8 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/README.md @@ -0,0 +1,496 @@ +govalidator +=========== +[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043) +[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) + +A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). + +#### Installation +Make sure that Go is installed on your computer. +Type the following command in your terminal: + + go get github.com/asaskevich/govalidator + +or you can get specified release of the package with `gopkg.in`: + + go get gopkg.in/asaskevich/govalidator.v4 + +After it the package is ready to use. + + +#### Import package in your project +Add following line in your `*.go` file: +```go +import "github.com/asaskevich/govalidator" +``` +If you are unhappy to use long `govalidator`, you can do something like this: +```go +import ( + valid "github.com/asaskevich/govalidator" +) +``` + +#### Activate behavior to require all fields have a validation tag by default +`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. + +`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. + +```go +import "github.com/asaskevich/govalidator" + +func init() { + govalidator.SetFieldsRequiredByDefault(true) +} +``` + +Here's some code to explain it: +```go +// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +type exampleStruct struct { + Name string `` + Email string `valid:"email"` +} + +// this, however, will only fail when Email is empty or an invalid email address: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email"` +} + +// lastly, this will only fail when Email is an invalid email address but not when it's empty: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email,optional"` +} +``` + +#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) +##### Custom validator function signature +A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. +```go +import "github.com/asaskevich/govalidator" + +// old signature +func(i interface{}) bool + +// new signature +func(i interface{}, o interface{}) bool +``` + +##### Adding a custom validator +This was changed to prevent data races when accessing custom validators. +```go +import "github.com/asaskevich/govalidator" + +// before +govalidator.CustomTypeTagMap["customByteArrayValidator"] = CustomTypeValidator(func(i interface{}, o interface{}) bool { + // ... +}) + +// after +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool { + // ... +})) +``` + +#### List of functions: +```go +func Abs(value float64) float64 +func BlackList(str, chars string) string +func ByteLength(str string, params ...string) bool +func CamelCaseToUnderscore(str string) string +func Contains(str, substring string) bool +func Count(array []interface{}, iterator ConditionIterator) int +func Each(array []interface{}, iterator Iterator) +func ErrorByField(e error, field string) string +func ErrorsByField(e error) map[string]string +func Filter(array []interface{}, iterator ConditionIterator) []interface{} +func Find(array []interface{}, iterator ConditionIterator) interface{} +func GetLine(s string, index int) (string, error) +func GetLines(s string) []string +func InRange(value, left, right float64) bool +func IsASCII(str string) bool +func IsAlpha(str string) bool +func IsAlphanumeric(str string) bool +func IsBase64(str string) bool +func IsByteLength(str string, min, max int) bool +func IsCIDR(str string) bool +func IsCreditCard(str string) bool +func IsDNSName(str string) bool +func IsDataURI(str string) bool +func IsDialString(str string) bool +func IsDivisibleBy(str, num string) bool +func IsEmail(str string) bool +func IsFilePath(str string) (bool, int) +func IsFloat(str string) bool +func IsFullWidth(str string) bool +func IsHalfWidth(str string) bool +func IsHexadecimal(str string) bool +func IsHexcolor(str string) bool +func IsHost(str string) bool +func IsIP(str string) bool +func IsIPv4(str string) bool +func IsIPv6(str string) bool +func IsISBN(str string, version int) bool +func IsISBN10(str string) bool +func IsISBN13(str string) bool +func IsISO3166Alpha2(str string) bool +func IsISO3166Alpha3(str string) bool +func IsISO693Alpha2(str string) bool +func IsISO693Alpha3b(str string) bool +func IsISO4217(str string) bool +func IsIn(str string, params ...string) bool +func IsInt(str string) bool +func IsJSON(str string) bool +func IsLatitude(str string) bool +func IsLongitude(str string) bool +func IsLowerCase(str string) bool +func IsMAC(str string) bool +func IsMongoID(str string) bool +func IsMultibyte(str string) bool +func IsNatural(value float64) bool +func IsNegative(value float64) bool +func IsNonNegative(value float64) bool +func IsNonPositive(value float64) bool +func IsNull(str string) bool +func IsNumeric(str string) bool +func IsPort(str string) bool +func IsPositive(value float64) bool +func IsPrintableASCII(str string) bool +func IsRFC3339(str string) bool +func IsRFC3339WithoutZone(str string) bool +func IsRGBcolor(str string) bool +func IsRequestURI(rawurl string) bool +func IsRequestURL(rawurl string) bool +func IsSSN(str string) bool +func IsSemver(str string) bool +func IsTime(str string, format string) bool +func IsURL(str string) bool +func IsUTFDigit(str string) bool +func IsUTFLetter(str string) bool +func IsUTFLetterNumeric(str string) bool +func IsUTFNumeric(str string) bool +func IsUUID(str string) bool +func IsUUIDv3(str string) bool +func IsUUIDv4(str string) bool +func IsUUIDv5(str string) bool +func IsUpperCase(str string) bool +func IsVariableWidth(str string) bool +func IsWhole(value float64) bool +func LeftTrim(str, chars string) string +func Map(array []interface{}, iterator ResultIterator) []interface{} +func Matches(str, pattern string) bool +func NormalizeEmail(str string) (string, error) +func PadBoth(str string, padStr string, padLen int) string +func PadLeft(str string, padStr string, padLen int) string +func PadRight(str string, padStr string, padLen int) string +func Range(str string, params ...string) bool +func RemoveTags(s string) string +func ReplacePattern(str, pattern, replace string) string +func Reverse(s string) string +func RightTrim(str, chars string) string +func RuneLength(str string, params ...string) bool +func SafeFileName(str string) string +func SetFieldsRequiredByDefault(value bool) +func Sign(value float64) float64 +func StringLength(str string, params ...string) bool +func StringMatches(s string, params ...string) bool +func StripLow(str string, keepNewLines bool) string +func ToBoolean(str string) (bool, error) +func ToFloat(str string) (float64, error) +func ToInt(str string) (int64, error) +func ToJSON(obj interface{}) (string, error) +func ToString(obj interface{}) string +func Trim(str, chars string) string +func Truncate(str string, length int, ending string) string +func UnderscoreToCamelCase(s string) string +func ValidateStruct(s interface{}) (bool, error) +func WhiteList(str, chars string) string +type ConditionIterator +type CustomTypeValidator +type Error +func (e Error) Error() string +type Errors +func (es Errors) Error() string +func (es Errors) Errors() []error +type ISO3166Entry +type Iterator +type ParamValidator +type ResultIterator +type UnsupportedTypeError +func (e *UnsupportedTypeError) Error() string +type Validator +``` + +#### Examples +###### IsURL +```go +println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) +``` +###### ToString +```go +type User struct { + FirstName string + LastName string +} + +str := govalidator.ToString(&User{"John", "Juan"}) +println(str) +``` +###### Each, Map, Filter, Count for slices +Each iterates over the slice/array and calls Iterator for every item +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.Iterator = func(value interface{}, index int) { + println(value.(int)) +} +govalidator.Each(data, fn) +``` +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { + return value.(int) * 3 +} +_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} +``` +```go +data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} +var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { + return value.(int)%2 == 0 +} +_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} +_ = govalidator.Count(data, fn) // result = 5 +``` +###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) +If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: +```go +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) +``` +For completely custom validators (interface-based), see below. + +Here is a list of available validators for struct fields (validator - used function): +```go +"email": IsEmail, +"url": IsURL, +"dialstring": IsDialString, +"requrl": IsRequestURL, +"requri": IsRequestURI, +"alpha": IsAlpha, +"utfletter": IsUTFLetter, +"alphanum": IsAlphanumeric, +"utfletternum": IsUTFLetterNumeric, +"numeric": IsNumeric, +"utfnumeric": IsUTFNumeric, +"utfdigit": IsUTFDigit, +"hexadecimal": IsHexadecimal, +"hexcolor": IsHexcolor, +"rgbcolor": IsRGBcolor, +"lowercase": IsLowerCase, +"uppercase": IsUpperCase, +"int": IsInt, +"float": IsFloat, +"null": IsNull, +"uuid": IsUUID, +"uuidv3": IsUUIDv3, +"uuidv4": IsUUIDv4, +"uuidv5": IsUUIDv5, +"creditcard": IsCreditCard, +"isbn10": IsISBN10, +"isbn13": IsISBN13, +"json": IsJSON, +"multibyte": IsMultibyte, +"ascii": IsASCII, +"printableascii": IsPrintableASCII, +"fullwidth": IsFullWidth, +"halfwidth": IsHalfWidth, +"variablewidth": IsVariableWidth, +"base64": IsBase64, +"datauri": IsDataURI, +"ip": IsIP, +"port": IsPort, +"ipv4": IsIPv4, +"ipv6": IsIPv6, +"dns": IsDNSName, +"host": IsHost, +"mac": IsMAC, +"latitude": IsLatitude, +"longitude": IsLongitude, +"ssn": IsSSN, +"semver": IsSemver, +"rfc3339": IsRFC3339, +"rfc3339WithoutZone": IsRFC3339WithoutZone, +"ISO3166Alpha2": IsISO3166Alpha2, +"ISO3166Alpha3": IsISO3166Alpha3, +``` +Validators with parameters + +```go +"range(min|max)": Range, +"length(min|max)": ByteLength, +"runelength(min|max)": RuneLength, +"matches(pattern)": StringMatches, +"in(string1|string2|...|stringN)": IsIn, +``` + +And here is small example of usage: +```go +type Post struct { + Title string `valid:"alphanum,required"` + Message string `valid:"duck,ascii"` + AuthorIP string `valid:"ipv4"` + Date string `valid:"-"` +} +post := &Post{ + Title: "My Example Post", + Message: "duck", + AuthorIP: "123.234.54.3", +} + +// Add your own struct validation tags +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) + +result, err := govalidator.ValidateStruct(post) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` +###### WhiteList +```go +// Remove all characters from string ignoring characters between "a" and "z" +println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") +``` + +###### Custom validation functions +Custom validation using your own domain specific validators is also available - here's an example of how to use it: +```go +import "github.com/asaskevich/govalidator" + +type CustomByteArray [6]byte // custom types are supported and can be validated + +type StructWithCustomByteArray struct { + ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence + Email string `valid:"email"` + CustomMinLength int `valid:"-"` +} + +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool { + switch v := context.(type) { // you can type switch on the context interface being validated + case StructWithCustomByteArray: + // you can check and validate against some other field in the context, + // return early or not validate against the context at all – your choice + case SomeOtherType: + // ... + default: + // expecting some other type? Throw/panic here or continue + } + + switch v := i.(type) { // type switch on the struct field being validated + case CustomByteArray: + for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes + if e != 0 { + return true + } + } + } + return false +})) +govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool { + switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation + case StructWithCustomByteArray: + return len(v.ID) >= v.CustomMinLength + } + return false +})) +``` + +###### Custom error messages +Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: +```go +type Ticket struct { + Id int64 `json:"id"` + FirstName string `json:"firstname" valid:"required~First name is blank"` +} +``` + +#### Notes +Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). +Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). + +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Credits +### Contributors + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + +#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) +* [Daniel Lohse](https://github.com/annismckenzie) +* [Attila Oláh](https://github.com/attilaolah) +* [Daniel Korner](https://github.com/Dadie) +* [Steven Wilkin](https://github.com/stevenwilkin) +* [Deiwin Sarjas](https://github.com/deiwin) +* [Noah Shibley](https://github.com/slugmobile) +* [Nathan Davies](https://github.com/nathj07) +* [Matt Sanford](https://github.com/mzsanford) +* [Simon ccl1115](https://github.com/ccl1115) + + + + +### Backers + +Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] + + + + + + + + + + + + + + + +## License +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/arrays.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/arrays.go new file mode 100644 index 000000000000..5bace2654d3b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/arrays.go @@ -0,0 +1,58 @@ +package govalidator + +// Iterator is the function that accepts element of slice/array and its index +type Iterator func(interface{}, int) + +// ResultIterator is the function that accepts element of slice/array and its index and returns any result +type ResultIterator func(interface{}, int) interface{} + +// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean +type ConditionIterator func(interface{}, int) bool + +// Each iterates over the slice and apply Iterator to every item +func Each(array []interface{}, iterator Iterator) { + for index, data := range array { + iterator(data, index) + } +} + +// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. +func Map(array []interface{}, iterator ResultIterator) []interface{} { + var result = make([]interface{}, len(array)) + for index, data := range array { + result[index] = iterator(data, index) + } + return result +} + +// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. +func Find(array []interface{}, iterator ConditionIterator) interface{} { + for index, data := range array { + if iterator(data, index) { + return data + } + } + return nil +} + +// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. +func Filter(array []interface{}, iterator ConditionIterator) []interface{} { + var result = make([]interface{}, 0) + for index, data := range array { + if iterator(data, index) { + result = append(result, data) + } + } + return result +} + +// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. +func Count(array []interface{}, iterator ConditionIterator) int { + count := 0 + for index, data := range array { + if iterator(data, index) { + count = count + 1 + } + } + return count +} diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/converter.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/converter.go new file mode 100644 index 000000000000..cf1e5d569ba0 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/converter.go @@ -0,0 +1,64 @@ +package govalidator + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" +) + +// ToString convert the input to a string. +func ToString(obj interface{}) string { + res := fmt.Sprintf("%v", obj) + return string(res) +} + +// ToJSON convert the input to a valid JSON string +func ToJSON(obj interface{}) (string, error) { + res, err := json.Marshal(obj) + if err != nil { + res = []byte("") + } + return string(res), err +} + +// ToFloat convert the input string to a float, or 0.0 if the input is not a float. +func ToFloat(str string) (float64, error) { + res, err := strconv.ParseFloat(str, 64) + if err != nil { + res = 0.0 + } + return res, err +} + +// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. +func ToInt(value interface{}) (res int64, err error) { + val := reflect.ValueOf(value) + + switch value.(type) { + case int, int8, int16, int32, int64: + res = val.Int() + case uint, uint8, uint16, uint32, uint64: + res = int64(val.Uint()) + case string: + if IsInt(val.String()) { + res, err = strconv.ParseInt(val.String(), 0, 64) + if err != nil { + res = 0 + } + } else { + err = fmt.Errorf("math: square root of negative number %g", value) + res = 0 + } + default: + err = fmt.Errorf("math: square root of negative number %g", value) + res = 0 + } + + return +} + +// ToBoolean convert the input string to a boolean. +func ToBoolean(str string) (bool, error) { + return strconv.ParseBool(str) +} diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/error.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/error.go new file mode 100644 index 000000000000..655b750cb8f6 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/error.go @@ -0,0 +1,43 @@ +package govalidator + +import "strings" + +// Errors is an array of multiple errors and conforms to the error interface. +type Errors []error + +// Errors returns itself. +func (es Errors) Errors() []error { + return es +} + +func (es Errors) Error() string { + var errs []string + for _, e := range es { + errs = append(errs, e.Error()) + } + return strings.Join(errs, ";") +} + +// Error encapsulates a name, an error and whether there's a custom error message or not. +type Error struct { + Name string + Err error + CustomErrorMessageExists bool + + // Validator indicates the name of the validator that failed + Validator string + Path []string +} + +func (e Error) Error() string { + if e.CustomErrorMessageExists { + return e.Err.Error() + } + + errName := e.Name + if len(e.Path) > 0 { + errName = strings.Join(append(e.Path, e.Name), ".") + } + + return errName + ": " + e.Err.Error() +} diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/numerics.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/numerics.go new file mode 100644 index 000000000000..7e6c652e140c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/numerics.go @@ -0,0 +1,97 @@ +package govalidator + +import ( + "math" + "reflect" +) + +// Abs returns absolute value of number +func Abs(value float64) float64 { + return math.Abs(value) +} + +// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise +func Sign(value float64) float64 { + if value > 0 { + return 1 + } else if value < 0 { + return -1 + } else { + return 0 + } +} + +// IsNegative returns true if value < 0 +func IsNegative(value float64) bool { + return value < 0 +} + +// IsPositive returns true if value > 0 +func IsPositive(value float64) bool { + return value > 0 +} + +// IsNonNegative returns true if value >= 0 +func IsNonNegative(value float64) bool { + return value >= 0 +} + +// IsNonPositive returns true if value <= 0 +func IsNonPositive(value float64) bool { + return value <= 0 +} + +// InRange returns true if value lies between left and right border +func InRangeInt(value, left, right interface{}) bool { + value64, _ := ToInt(value) + left64, _ := ToInt(left) + right64, _ := ToInt(right) + if left64 > right64 { + left64, right64 = right64, left64 + } + return value64 >= left64 && value64 <= right64 +} + +// InRange returns true if value lies between left and right border +func InRangeFloat32(value, left, right float32) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRange returns true if value lies between left and right border +func InRangeFloat64(value, left, right float64) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRange returns true if value lies between left and right border, generic type to handle int, float32 or float64, all types must the same type +func InRange(value interface{}, left interface{}, right interface{}) bool { + + reflectValue := reflect.TypeOf(value).Kind() + reflectLeft := reflect.TypeOf(left).Kind() + reflectRight := reflect.TypeOf(right).Kind() + + if reflectValue == reflect.Int && reflectLeft == reflect.Int && reflectRight == reflect.Int { + return InRangeInt(value.(int), left.(int), right.(int)) + } else if reflectValue == reflect.Float32 && reflectLeft == reflect.Float32 && reflectRight == reflect.Float32 { + return InRangeFloat32(value.(float32), left.(float32), right.(float32)) + } else if reflectValue == reflect.Float64 && reflectLeft == reflect.Float64 && reflectRight == reflect.Float64 { + return InRangeFloat64(value.(float64), left.(float64), right.(float64)) + } else { + return false + } +} + +// IsWhole returns true if value is whole number +func IsWhole(value float64) bool { + return math.Remainder(value, 1) == 0 +} + +// IsNatural returns true if value is natural number (positive and whole) +func IsNatural(value float64) bool { + return IsWhole(value) && IsPositive(value) +} diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/patterns.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/patterns.go new file mode 100644 index 000000000000..61a05d438e18 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/patterns.go @@ -0,0 +1,101 @@ +package govalidator + +import "regexp" + +// Basic regular expressions for validating strings +const ( + Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$" + ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" + ISBN13 string = "^(?:[0-9]{13})$" + UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" + UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + Alpha string = "^[a-zA-Z]+$" + Alphanumeric string = "^[a-zA-Z0-9]+$" + Numeric string = "^[0-9]+$" + Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" + Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" + Hexadecimal string = "^[0-9a-fA-F]+$" + Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" + ASCII string = "^[\x00-\x7F]+$" + Multibyte string = "[^\x00-\x7F]" + FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + PrintableASCII string = "^[\x20-\x7E]+$" + DataURI string = "^data:.+\\/(.+);base64$" + Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" + Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" + DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` + IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` + URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` + URLUsername string = `(\S+(:\S*)?@)` + URLPath string = `((\/|\?|#)[^\s]*)` + URLPort string = `(:(\d{1,5}))` + URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))` + URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` + URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` + SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` + WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` + UnixPath string = `^(/[^/\x00]*)+/?$` + Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" + tagName string = "valid" + hasLowerCase string = ".*[[:lower:]]" + hasUpperCase string = ".*[[:upper:]]" + hasWhitespace string = ".*[[:space:]]" + hasWhitespaceOnly string = "^[[:space:]]+$" +) + +// Used by IsFilePath func +const ( + // Unknown is unresolved OS type + Unknown = iota + // Win is Windows type + Win + // Unix is *nix OS types + Unix +) + +var ( + userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") + hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") + userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") + rxEmail = regexp.MustCompile(Email) + rxCreditCard = regexp.MustCompile(CreditCard) + rxISBN10 = regexp.MustCompile(ISBN10) + rxISBN13 = regexp.MustCompile(ISBN13) + rxUUID3 = regexp.MustCompile(UUID3) + rxUUID4 = regexp.MustCompile(UUID4) + rxUUID5 = regexp.MustCompile(UUID5) + rxUUID = regexp.MustCompile(UUID) + rxAlpha = regexp.MustCompile(Alpha) + rxAlphanumeric = regexp.MustCompile(Alphanumeric) + rxNumeric = regexp.MustCompile(Numeric) + rxInt = regexp.MustCompile(Int) + rxFloat = regexp.MustCompile(Float) + rxHexadecimal = regexp.MustCompile(Hexadecimal) + rxHexcolor = regexp.MustCompile(Hexcolor) + rxRGBcolor = regexp.MustCompile(RGBcolor) + rxASCII = regexp.MustCompile(ASCII) + rxPrintableASCII = regexp.MustCompile(PrintableASCII) + rxMultibyte = regexp.MustCompile(Multibyte) + rxFullWidth = regexp.MustCompile(FullWidth) + rxHalfWidth = regexp.MustCompile(HalfWidth) + rxBase64 = regexp.MustCompile(Base64) + rxDataURI = regexp.MustCompile(DataURI) + rxLatitude = regexp.MustCompile(Latitude) + rxLongitude = regexp.MustCompile(Longitude) + rxDNSName = regexp.MustCompile(DNSName) + rxURL = regexp.MustCompile(URL) + rxSSN = regexp.MustCompile(SSN) + rxWinPath = regexp.MustCompile(WinPath) + rxUnixPath = regexp.MustCompile(UnixPath) + rxSemver = regexp.MustCompile(Semver) + rxHasLowerCase = regexp.MustCompile(hasLowerCase) + rxHasUpperCase = regexp.MustCompile(hasUpperCase) + rxHasWhitespace = regexp.MustCompile(hasWhitespace) + rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) +) diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/types.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/types.go new file mode 100644 index 000000000000..4f7e9274ade0 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/types.go @@ -0,0 +1,636 @@ +package govalidator + +import ( + "reflect" + "regexp" + "sort" + "sync" +) + +// Validator is a wrapper for a validator function that returns bool and accepts string. +type Validator func(str string) bool + +// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. +// The second parameter should be the context (in the case of validating a struct: the whole object being validated). +type CustomTypeValidator func(i interface{}, o interface{}) bool + +// ParamValidator is a wrapper for validator functions that accepts additional parameters. +type ParamValidator func(str string, params ...string) bool +type tagOptionsMap map[string]tagOption + +func (t tagOptionsMap) orderedKeys() []string { + var keys []string + for k := range t { + keys = append(keys, k) + } + + sort.Slice(keys, func(a, b int) bool { + return t[keys[a]].order < t[keys[b]].order + }) + + return keys +} + +type tagOption struct { + name string + customErrorMessage string + order int +} + +// UnsupportedTypeError is a wrapper for reflect.Type +type UnsupportedTypeError struct { + Type reflect.Type +} + +// stringValues is a slice of reflect.Value holding *reflect.StringValue. +// It implements the methods to sort by string. +type stringValues []reflect.Value + +// ParamTagMap is a map of functions accept variants parameters +var ParamTagMap = map[string]ParamValidator{ + "length": ByteLength, + "range": Range, + "runelength": RuneLength, + "stringlength": StringLength, + "matches": StringMatches, + "in": isInRaw, + "rsapub": IsRsaPub, +} + +// ParamTagRegexMap maps param tags to their respective regexes. +var ParamTagRegexMap = map[string]*regexp.Regexp{ + "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), + "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), + "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), + "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), + "in": regexp.MustCompile(`^in\((.*)\)`), + "matches": regexp.MustCompile(`^matches\((.+)\)$`), + "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), +} + +type customTypeTagMap struct { + validators map[string]CustomTypeValidator + + sync.RWMutex +} + +func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { + tm.RLock() + defer tm.RUnlock() + v, ok := tm.validators[name] + return v, ok +} + +func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { + tm.Lock() + defer tm.Unlock() + tm.validators[name] = ctv +} + +// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. +// Use this to validate compound or custom types that need to be handled as a whole, e.g. +// `type UUID [16]byte` (this would be handled as an array of bytes). +var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} + +// TagMap is a map of functions, that can be used as tags for ValidateStruct function. +var TagMap = map[string]Validator{ + "email": IsEmail, + "url": IsURL, + "dialstring": IsDialString, + "requrl": IsRequestURL, + "requri": IsRequestURI, + "alpha": IsAlpha, + "utfletter": IsUTFLetter, + "alphanum": IsAlphanumeric, + "utfletternum": IsUTFLetterNumeric, + "numeric": IsNumeric, + "utfnumeric": IsUTFNumeric, + "utfdigit": IsUTFDigit, + "hexadecimal": IsHexadecimal, + "hexcolor": IsHexcolor, + "rgbcolor": IsRGBcolor, + "lowercase": IsLowerCase, + "uppercase": IsUpperCase, + "int": IsInt, + "float": IsFloat, + "null": IsNull, + "uuid": IsUUID, + "uuidv3": IsUUIDv3, + "uuidv4": IsUUIDv4, + "uuidv5": IsUUIDv5, + "creditcard": IsCreditCard, + "isbn10": IsISBN10, + "isbn13": IsISBN13, + "json": IsJSON, + "multibyte": IsMultibyte, + "ascii": IsASCII, + "printableascii": IsPrintableASCII, + "fullwidth": IsFullWidth, + "halfwidth": IsHalfWidth, + "variablewidth": IsVariableWidth, + "base64": IsBase64, + "datauri": IsDataURI, + "ip": IsIP, + "port": IsPort, + "ipv4": IsIPv4, + "ipv6": IsIPv6, + "dns": IsDNSName, + "host": IsHost, + "mac": IsMAC, + "latitude": IsLatitude, + "longitude": IsLongitude, + "ssn": IsSSN, + "semver": IsSemver, + "rfc3339": IsRFC3339, + "rfc3339WithoutZone": IsRFC3339WithoutZone, + "ISO3166Alpha2": IsISO3166Alpha2, + "ISO3166Alpha3": IsISO3166Alpha3, + "ISO4217": IsISO4217, +} + +// ISO3166Entry stores country codes +type ISO3166Entry struct { + EnglishShortName string + FrenchShortName string + Alpha2Code string + Alpha3Code string + Numeric string +} + +//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" +var ISO3166List = []ISO3166Entry{ + {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, + {"Albania", "Albanie (l')", "AL", "ALB", "008"}, + {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, + {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, + {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, + {"Andorra", "Andorre (l')", "AD", "AND", "020"}, + {"Angola", "Angola (l')", "AO", "AGO", "024"}, + {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, + {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, + {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, + {"Australia", "Australie (l')", "AU", "AUS", "036"}, + {"Austria", "Autriche (l')", "AT", "AUT", "040"}, + {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, + {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, + {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, + {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, + {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, + {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, + {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, + {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, + {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, + {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, + {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, + {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"}, + {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, + {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, + {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, + {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"}, + {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"}, + {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, + {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, + {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, + {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, + {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, + {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, + {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, + {"Canada", "Canada (le)", "CA", "CAN", "124"}, + {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, + {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"}, + {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, + {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, + {"Chad", "Tchad (le)", "TD", "TCD", "148"}, + {"Chile", "Chili (le)", "CL", "CHL", "152"}, + {"China", "Chine (la)", "CN", "CHN", "156"}, + {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, + {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"}, + {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"}, + {"Colombia", "Colombie (la)", "CO", "COL", "170"}, + {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, + {"Mayotte", "Mayotte", "YT", "MYT", "175"}, + {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, + {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, + {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"}, + {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, + {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, + {"Cuba", "Cuba", "CU", "CUB", "192"}, + {"Cyprus", "Chypre", "CY", "CYP", "196"}, + {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, + {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, + {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, + {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, + {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, + {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, + {"El Salvador", "El Salvador", "SV", "SLV", "222"}, + {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, + {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, + {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, + {"Estonia", "Estonie (l')", "EE", "EST", "233"}, + {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"}, + {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"}, + {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"}, + {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, + {"Finland", "Finlande (la)", "FI", "FIN", "246"}, + {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"}, + {"France", "France (la)", "FR", "FRA", "250"}, + {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, + {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, + {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, + {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, + {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, + {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, + {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, + {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, + {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, + {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, + {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, + {"Kiribati", "Kiribati", "KI", "KIR", "296"}, + {"Greece", "Grèce (la)", "GR", "GRC", "300"}, + {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, + {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, + {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, + {"Guam", "Guam", "GU", "GUM", "316"}, + {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, + {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, + {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, + {"Haiti", "Haïti", "HT", "HTI", "332"}, + {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"}, + {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, + {"Honduras", "Honduras (le)", "HN", "HND", "340"}, + {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, + {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, + {"Iceland", "Islande (l')", "IS", "ISL", "352"}, + {"India", "Inde (l')", "IN", "IND", "356"}, + {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, + {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, + {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, + {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, + {"Israel", "Israël", "IL", "ISR", "376"}, + {"Italy", "Italie (l')", "IT", "ITA", "380"}, + {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, + {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, + {"Japan", "Japon (le)", "JP", "JPN", "392"}, + {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, + {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, + {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, + {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, + {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, + {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, + {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, + {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, + {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, + {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, + {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, + {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, + {"Libya", "Libye (la)", "LY", "LBY", "434"}, + {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, + {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, + {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, + {"Macao", "Macao", "MO", "MAC", "446"}, + {"Madagascar", "Madagascar", "MG", "MDG", "450"}, + {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, + {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, + {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, + {"Mali", "Mali (le)", "ML", "MLI", "466"}, + {"Malta", "Malte", "MT", "MLT", "470"}, + {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, + {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, + {"Mauritius", "Maurice", "MU", "MUS", "480"}, + {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, + {"Monaco", "Monaco", "MC", "MCO", "492"}, + {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, + {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, + {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, + {"Montserrat", "Montserrat", "MS", "MSR", "500"}, + {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, + {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, + {"Oman", "Oman", "OM", "OMN", "512"}, + {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, + {"Nauru", "Nauru", "NR", "NRU", "520"}, + {"Nepal", "Népal (le)", "NP", "NPL", "524"}, + {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, + {"Curaçao", "Curaçao", "CW", "CUW", "531"}, + {"Aruba", "Aruba", "AW", "ABW", "533"}, + {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, + {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, + {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, + {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, + {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, + {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, + {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, + {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, + {"Niue", "Niue", "NU", "NIU", "570"}, + {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"}, + {"Norway", "Norvège (la)", "NO", "NOR", "578"}, + {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"}, + {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, + {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, + {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"}, + {"Palau", "Palaos (les)", "PW", "PLW", "585"}, + {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, + {"Panama", "Panama (le)", "PA", "PAN", "591"}, + {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, + {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, + {"Peru", "Pérou (le)", "PE", "PER", "604"}, + {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, + {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, + {"Poland", "Pologne (la)", "PL", "POL", "616"}, + {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, + {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, + {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, + {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, + {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, + {"Réunion", "Réunion (La)", "RE", "REU", "638"}, + {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, + {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, + {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, + {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, + {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, + {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, + {"Anguilla", "Anguilla", "AI", "AIA", "660"}, + {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, + {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, + {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, + {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, + {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, + {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, + {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, + {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, + {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, + {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, + {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, + {"Singapore", "Singapour", "SG", "SGP", "702"}, + {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, + {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, + {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, + {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, + {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, + {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, + {"Spain", "Espagne (l')", "ES", "ESP", "724"}, + {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, + {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, + {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, + {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, + {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"}, + {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, + {"Sweden", "Suède (la)", "SE", "SWE", "752"}, + {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, + {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, + {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, + {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, + {"Togo", "Togo (le)", "TG", "TGO", "768"}, + {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, + {"Tonga", "Tonga (les)", "TO", "TON", "776"}, + {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, + {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, + {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, + {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, + {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, + {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"}, + {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, + {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, + {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, + {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, + {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, + {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, + {"Guernsey", "Guernesey", "GG", "GGY", "831"}, + {"Jersey", "Jersey", "JE", "JEY", "832"}, + {"Isle of Man", "Île de Man", "IM", "IMN", "833"}, + {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, + {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, + {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"}, + {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, + {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, + {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, + {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, + {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, + {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, + {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, + {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, +} + +// ISO4217List is the list of ISO currency codes +var ISO4217List = []string{ + "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", + "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", + "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", + "DJF", "DKK", "DOP", "DZD", + "EGP", "ERN", "ETB", "EUR", + "FJD", "FKP", + "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", + "HKD", "HNL", "HRK", "HTG", "HUF", + "IDR", "ILS", "INR", "IQD", "IRR", "ISK", + "JMD", "JOD", "JPY", + "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", + "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", + "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", + "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", + "OMR", + "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", + "QAR", + "RON", "RSD", "RUB", "RWF", + "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL", + "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", + "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS", + "VEF", "VND", "VUV", + "WST", + "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", + "YER", + "ZAR", "ZMW", "ZWL", +} + +// ISO693Entry stores ISO language codes +type ISO693Entry struct { + Alpha3bCode string + Alpha2Code string + English string +} + +//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json +var ISO693List = []ISO693Entry{ + {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, + {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, + {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, + {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, + {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, + {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, + {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, + {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, + {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, + {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, + {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, + {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, + {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, + {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, + {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, + {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, + {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, + {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, + {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, + {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, + {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, + {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, + {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, + {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, + {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, + {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, + {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, + {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, + {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, + {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, + {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, + {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, + {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, + {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, + {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, + {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, + {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, + {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, + {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, + {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, + {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, + {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, + {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, + {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, + {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, + {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, + {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, + {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, + {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, + {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, + {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, + {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, + {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, + {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, + {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, + {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, + {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, + {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, + {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, + {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, + {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, + {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, + {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, + {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, + {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, + {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, + {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, + {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, + {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, + {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, + {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, + {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, + {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, + {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, + {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, + {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, + {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, + {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, + {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, + {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, + {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, + {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, + {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, + {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, + {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, + {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, + {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, + {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, + {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, + {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, + {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, + {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, + {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, + {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, + {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, + {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, + {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, + {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, + {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, + {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, + {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, + {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, + {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, + {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, + {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, + {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, + {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, + {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, + {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, + {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, + {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, + {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, + {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, + {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, + {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, + {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, + {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, + {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"}, + {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, + {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, + {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, + {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, + {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, + {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, + {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, + {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, + {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, + {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, + {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, + {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, + {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, + {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, + {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, + {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, + {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, + {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, + {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, + {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, + {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, + {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, + {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, + {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, + {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, + {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, + {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, + {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, + {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, + {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, + {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, + {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, + {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, + {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, + {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, + {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, + {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, + {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, + {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, + {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, + {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, + {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, + {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, + {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, + {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, + {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, + {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, + {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, + {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, + {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, + {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, + {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, + {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, + {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, + {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, + {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, + {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, + {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, + {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, + {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, + {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, + {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, + {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, + {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, + {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, + {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, +} diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/utils.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/utils.go new file mode 100644 index 000000000000..a0b706a743ce --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/utils.go @@ -0,0 +1,270 @@ +package govalidator + +import ( + "errors" + "fmt" + "html" + "math" + "path" + "regexp" + "strings" + "unicode" + "unicode/utf8" +) + +// Contains check if the string contains the substring. +func Contains(str, substring string) bool { + return strings.Contains(str, substring) +} + +// Matches check if string matches the pattern (pattern is regular expression) +// In case of error return false +func Matches(str, pattern string) bool { + match, _ := regexp.MatchString(pattern, str) + return match +} + +// LeftTrim trim characters from the left-side of the input. +// If second argument is empty, it's will be remove leading spaces. +func LeftTrim(str, chars string) string { + if chars == "" { + return strings.TrimLeftFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("^[" + chars + "]+") + return r.ReplaceAllString(str, "") +} + +// RightTrim trim characters from the right-side of the input. +// If second argument is empty, it's will be remove spaces. +func RightTrim(str, chars string) string { + if chars == "" { + return strings.TrimRightFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("[" + chars + "]+$") + return r.ReplaceAllString(str, "") +} + +// Trim trim characters from both sides of the input. +// If second argument is empty, it's will be remove spaces. +func Trim(str, chars string) string { + return LeftTrim(RightTrim(str, chars), chars) +} + +// WhiteList remove characters that do not appear in the whitelist. +func WhiteList(str, chars string) string { + pattern := "[^" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// BlackList remove characters that appear in the blacklist. +func BlackList(str, chars string) string { + pattern := "[" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// StripLow remove characters with a numerical value < 32 and 127, mostly control characters. +// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). +func StripLow(str string, keepNewLines bool) string { + chars := "" + if keepNewLines { + chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" + } else { + chars = "\x00-\x1F\x7F" + } + return BlackList(str, chars) +} + +// ReplacePattern replace regular expression pattern in string +func ReplacePattern(str, pattern, replace string) string { + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, replace) +} + +// Escape replace <, >, & and " with HTML entities. +var Escape = html.EscapeString + +func addSegment(inrune, segment []rune) []rune { + if len(segment) == 0 { + return inrune + } + if len(inrune) != 0 { + inrune = append(inrune, '_') + } + inrune = append(inrune, segment...) + return inrune +} + +// UnderscoreToCamelCase converts from underscore separated form to camel case form. +// Ex.: my_func => MyFunc +func UnderscoreToCamelCase(s string) string { + return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) +} + +// CamelCaseToUnderscore converts from camel case form to underscore separated form. +// Ex.: MyFunc => my_func +func CamelCaseToUnderscore(str string) string { + var output []rune + var segment []rune + for _, r := range str { + + // not treat number as separate segment + if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { + output = addSegment(output, segment) + segment = nil + } + segment = append(segment, unicode.ToLower(r)) + } + output = addSegment(output, segment) + return string(output) +} + +// Reverse return reversed string +func Reverse(s string) string { + r := []rune(s) + for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r) +} + +// GetLines split string by "\n" and return array of lines +func GetLines(s string) []string { + return strings.Split(s, "\n") +} + +// GetLine return specified line of multiline string +func GetLine(s string, index int) (string, error) { + lines := GetLines(s) + if index < 0 || index >= len(lines) { + return "", errors.New("line index out of bounds") + } + return lines[index], nil +} + +// RemoveTags remove all tags from HTML string +func RemoveTags(s string) string { + return ReplacePattern(s, "<[^>]*>", "") +} + +// SafeFileName return safe string that can be used in file names +func SafeFileName(str string) string { + name := strings.ToLower(str) + name = path.Clean(path.Base(name)) + name = strings.Trim(name, " ") + separators, err := regexp.Compile(`[ &_=+:]`) + if err == nil { + name = separators.ReplaceAllString(name, "-") + } + legal, err := regexp.Compile(`[^[:alnum:]-.]`) + if err == nil { + name = legal.ReplaceAllString(name, "") + } + for strings.Contains(name, "--") { + name = strings.Replace(name, "--", "-", -1) + } + return name +} + +// NormalizeEmail canonicalize an email address. +// The local part of the email address is lowercased for all domains; the hostname is always lowercased and +// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). +// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and +// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are +// normalized to @gmail.com. +func NormalizeEmail(str string) (string, error) { + if !IsEmail(str) { + return "", fmt.Errorf("%s is not an email", str) + } + parts := strings.Split(str, "@") + parts[0] = strings.ToLower(parts[0]) + parts[1] = strings.ToLower(parts[1]) + if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { + parts[1] = "gmail.com" + parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] + } + return strings.Join(parts, "@"), nil +} + +// Truncate a string to the closest length without breaking words. +func Truncate(str string, length int, ending string) string { + var aftstr, befstr string + if len(str) > length { + words := strings.Fields(str) + before, present := 0, 0 + for i := range words { + befstr = aftstr + before = present + aftstr = aftstr + words[i] + " " + present = len(aftstr) + if present > length && i != 0 { + if (length - before) < (present - length) { + return Trim(befstr, " /\\.,\"'#!?&@+-") + ending + } + return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending + } + } + } + + return str +} + +// PadLeft pad left side of string if size of string is less then indicated pad length +func PadLeft(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, false) +} + +// PadRight pad right side of string if size of string is less then indicated pad length +func PadRight(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, false, true) +} + +// PadBoth pad sides of string if size of string is less then indicated pad length +func PadBoth(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, true) +} + +// PadString either left, right or both sides, not the padding string can be unicode and more then one +// character +func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { + + // When padded length is less then the current string size + if padLen < utf8.RuneCountInString(str) { + return str + } + + padLen -= utf8.RuneCountInString(str) + + targetLen := padLen + + targetLenLeft := targetLen + targetLenRight := targetLen + if padLeft && padRight { + targetLenLeft = padLen / 2 + targetLenRight = padLen - targetLenLeft + } + + strToRepeatLen := utf8.RuneCountInString(padStr) + + repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) + repeatedString := strings.Repeat(padStr, repeatTimes) + + leftSide := "" + if padLeft { + leftSide = repeatedString[0:targetLenLeft] + } + + rightSide := "" + if padRight { + rightSide = repeatedString[0:targetLenRight] + } + + return leftSide + str + rightSide +} + +// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object +func TruncatingErrorf(str string, args ...interface{}) error { + n := strings.Count(str, "%s") + return fmt.Errorf(str, args[:n]...) +} diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/validator.go b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/validator.go new file mode 100644 index 000000000000..b18bbcb4c99f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/validator.go @@ -0,0 +1,1278 @@ +// Package govalidator is package of validators and sanitizers for strings, structs and collections. +package govalidator + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +var ( + fieldsRequiredByDefault bool + nilPtrAllowedByRequired = false + notNumberRegexp = regexp.MustCompile("[^0-9]+") + whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) + paramsRegexp = regexp.MustCompile(`\(.*\)$`) +) + +const maxURLRuneCount = 2083 +const minURLRuneCount = 3 +const RF3339WithoutZone = "2006-01-02T15:04:05" + +// SetFieldsRequiredByDefault causes validation to fail when struct fields +// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). +// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +// type exampleStruct struct { +// Name string `` +// Email string `valid:"email"` +// This, however, will only fail when Email is empty or an invalid email address: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email"` +// Lastly, this will only fail when Email is an invalid email address but not when it's empty: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email,optional"` +func SetFieldsRequiredByDefault(value bool) { + fieldsRequiredByDefault = value +} + +// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required. +// The validation will still reject ptr fields in their zero value state. Example with this enabled: +// type exampleStruct struct { +// Name *string `valid:"required"` +// With `Name` set to "", this will be considered invalid input and will cause a validation error. +// With `Name` set to nil, this will be considered valid by validation. +// By default this is disabled. +func SetNilPtrAllowedByRequired(value bool) { + nilPtrAllowedByRequired = value +} + +// IsEmail check if the string is an email. +func IsEmail(str string) bool { + // TODO uppercase letters are not supported + return rxEmail.MatchString(str) +} + +// IsExistingEmail check if the string is an email of existing domain +func IsExistingEmail(email string) bool { + + if len(email) < 6 || len(email) > 254 { + return false + } + at := strings.LastIndex(email, "@") + if at <= 0 || at > len(email)-3 { + return false + } + user := email[:at] + host := email[at+1:] + if len(user) > 64 { + return false + } + if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { + return false + } + switch host { + case "localhost", "example.com": + return true + } + if _, err := net.LookupMX(host); err != nil { + if _, err := net.LookupIP(host); err != nil { + return false + } + } + + return true +} + +// IsURL check if the string is an URL. +func IsURL(str string) bool { + if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { + return false + } + strTemp := str + if strings.Contains(str, ":") && !strings.Contains(str, "://") { + // support no indicated urlscheme but with colon for port number + // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString + strTemp = "http://" + str + } + u, err := url.Parse(strTemp) + if err != nil { + return false + } + if strings.HasPrefix(u.Host, ".") { + return false + } + if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { + return false + } + return rxURL.MatchString(str) +} + +// IsRequestURL check if the string rawurl, assuming +// it was received in an HTTP request, is a valid +// URL confirm to RFC 3986 +func IsRequestURL(rawurl string) bool { + url, err := url.ParseRequestURI(rawurl) + if err != nil { + return false //Couldn't even parse the rawurl + } + if len(url.Scheme) == 0 { + return false //No Scheme found + } + return true +} + +// IsRequestURI check if the string rawurl, assuming +// it was received in an HTTP request, is an +// absolute URI or an absolute path. +func IsRequestURI(rawurl string) bool { + _, err := url.ParseRequestURI(rawurl) + return err == nil +} + +// IsAlpha check if the string contains only letters (a-zA-Z). Empty string is valid. +func IsAlpha(str string) bool { + if IsNull(str) { + return true + } + return rxAlpha.MatchString(str) +} + +//IsUTFLetter check if the string contains only unicode letter characters. +//Similar to IsAlpha but for all languages. Empty string is valid. +func IsUTFLetter(str string) bool { + if IsNull(str) { + return true + } + + for _, c := range str { + if !unicode.IsLetter(c) { + return false + } + } + return true + +} + +// IsAlphanumeric check if the string contains only letters and numbers. Empty string is valid. +func IsAlphanumeric(str string) bool { + if IsNull(str) { + return true + } + return rxAlphanumeric.MatchString(str) +} + +// IsUTFLetterNumeric check if the string contains only unicode letters and numbers. Empty string is valid. +func IsUTFLetterNumeric(str string) bool { + if IsNull(str) { + return true + } + for _, c := range str { + if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok + return false + } + } + return true + +} + +// IsNumeric check if the string contains only numbers. Empty string is valid. +func IsNumeric(str string) bool { + if IsNull(str) { + return true + } + return rxNumeric.MatchString(str) +} + +// IsUTFNumeric check if the string contains only unicode numbers of any kind. +// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid. +func IsUTFNumeric(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsNumber(c) { //numbers && minus sign are ok + return false + } + } + return true + +} + +// IsUTFDigit check if the string contains only unicode radix-10 decimal digits. Empty string is valid. +func IsUTFDigit(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsDigit(c) { //digits && minus sign are ok + return false + } + } + return true + +} + +// IsHexadecimal check if the string is a hexadecimal number. +func IsHexadecimal(str string) bool { + return rxHexadecimal.MatchString(str) +} + +// IsHexcolor check if the string is a hexadecimal color. +func IsHexcolor(str string) bool { + return rxHexcolor.MatchString(str) +} + +// IsRGBcolor check if the string is a valid RGB color in form rgb(RRR, GGG, BBB). +func IsRGBcolor(str string) bool { + return rxRGBcolor.MatchString(str) +} + +// IsLowerCase check if the string is lowercase. Empty string is valid. +func IsLowerCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToLower(str) +} + +// IsUpperCase check if the string is uppercase. Empty string is valid. +func IsUpperCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToUpper(str) +} + +// HasLowerCase check if the string contains at least 1 lowercase. Empty string is valid. +func HasLowerCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasLowerCase.MatchString(str) +} + +// HasUpperCase check if the string contians as least 1 uppercase. Empty string is valid. +func HasUpperCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasUpperCase.MatchString(str) +} + +// IsInt check if the string is an integer. Empty string is valid. +func IsInt(str string) bool { + if IsNull(str) { + return true + } + return rxInt.MatchString(str) +} + +// IsFloat check if the string is a float. +func IsFloat(str string) bool { + return str != "" && rxFloat.MatchString(str) +} + +// IsDivisibleBy check if the string is a number that's divisible by another. +// If second argument is not valid integer or zero, it's return false. +// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). +func IsDivisibleBy(str, num string) bool { + f, _ := ToFloat(str) + p := int64(f) + q, _ := ToInt(num) + if q == 0 { + return false + } + return (p == 0) || (p%q == 0) +} + +// IsNull check if the string is null. +func IsNull(str string) bool { + return len(str) == 0 +} + +// HasWhitespaceOnly checks the string only contains whitespace +func HasWhitespaceOnly(str string) bool { + return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) +} + +// HasWhitespace checks if the string contains any whitespace +func HasWhitespace(str string) bool { + return len(str) > 0 && rxHasWhitespace.MatchString(str) +} + +// IsByteLength check if the string's length (in bytes) falls in a range. +func IsByteLength(str string, min, max int) bool { + return len(str) >= min && len(str) <= max +} + +// IsUUIDv3 check if the string is a UUID version 3. +func IsUUIDv3(str string) bool { + return rxUUID3.MatchString(str) +} + +// IsUUIDv4 check if the string is a UUID version 4. +func IsUUIDv4(str string) bool { + return rxUUID4.MatchString(str) +} + +// IsUUIDv5 check if the string is a UUID version 5. +func IsUUIDv5(str string) bool { + return rxUUID5.MatchString(str) +} + +// IsUUID check if the string is a UUID (version 3, 4 or 5). +func IsUUID(str string) bool { + return rxUUID.MatchString(str) +} + +// IsCreditCard check if the string is a credit card. +func IsCreditCard(str string) bool { + sanitized := notNumberRegexp.ReplaceAllString(str, "") + if !rxCreditCard.MatchString(sanitized) { + return false + } + var sum int64 + var digit string + var tmpNum int64 + var shouldDouble bool + for i := len(sanitized) - 1; i >= 0; i-- { + digit = sanitized[i:(i + 1)] + tmpNum, _ = ToInt(digit) + if shouldDouble { + tmpNum *= 2 + if tmpNum >= 10 { + sum += ((tmpNum % 10) + 1) + } else { + sum += tmpNum + } + } else { + sum += tmpNum + } + shouldDouble = !shouldDouble + } + + return sum%10 == 0 +} + +// IsISBN10 check if the string is an ISBN version 10. +func IsISBN10(str string) bool { + return IsISBN(str, 10) +} + +// IsISBN13 check if the string is an ISBN version 13. +func IsISBN13(str string) bool { + return IsISBN(str, 13) +} + +// IsISBN check if the string is an ISBN (version 10 or 13). +// If version value is not equal to 10 or 13, it will be check both variants. +func IsISBN(str string, version int) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + var checksum int32 + var i int32 + if version == 10 { + if !rxISBN10.MatchString(sanitized) { + return false + } + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(sanitized[i]-'0') + } + if sanitized[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(sanitized[9]-'0') + } + if checksum%11 == 0 { + return true + } + return false + } else if version == 13 { + if !rxISBN13.MatchString(sanitized) { + return false + } + factor := []int32{1, 3} + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(sanitized[i]-'0') + } + return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 + } + return IsISBN(str, 10) || IsISBN(str, 13) +} + +// IsJSON check if the string is valid JSON (note: uses json.Unmarshal). +func IsJSON(str string) bool { + var js json.RawMessage + return json.Unmarshal([]byte(str), &js) == nil +} + +// IsMultibyte check if the string contains one or more multibyte chars. Empty string is valid. +func IsMultibyte(str string) bool { + if IsNull(str) { + return true + } + return rxMultibyte.MatchString(str) +} + +// IsASCII check if the string contains ASCII chars only. Empty string is valid. +func IsASCII(str string) bool { + if IsNull(str) { + return true + } + return rxASCII.MatchString(str) +} + +// IsPrintableASCII check if the string contains printable ASCII chars only. Empty string is valid. +func IsPrintableASCII(str string) bool { + if IsNull(str) { + return true + } + return rxPrintableASCII.MatchString(str) +} + +// IsFullWidth check if the string contains any full-width chars. Empty string is valid. +func IsFullWidth(str string) bool { + if IsNull(str) { + return true + } + return rxFullWidth.MatchString(str) +} + +// IsHalfWidth check if the string contains any half-width chars. Empty string is valid. +func IsHalfWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) +} + +// IsVariableWidth check if the string contains a mixture of full and half-width chars. Empty string is valid. +func IsVariableWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) +} + +// IsBase64 check if a string is base64 encoded. +func IsBase64(str string) bool { + return rxBase64.MatchString(str) +} + +// IsFilePath check is a string is Win or Unix file path and returns it's type. +func IsFilePath(str string) (bool, int) { + if rxWinPath.MatchString(str) { + //check windows path limit see: + // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath + if len(str[3:]) > 32767 { + return false, Win + } + return true, Win + } else if rxUnixPath.MatchString(str) { + return true, Unix + } + return false, Unknown +} + +// IsDataURI checks if a string is base64 encoded data URI such as an image +func IsDataURI(str string) bool { + dataURI := strings.Split(str, ",") + if !rxDataURI.MatchString(dataURI[0]) { + return false + } + return IsBase64(dataURI[1]) +} + +// IsISO3166Alpha2 checks if a string is valid two-letter country code +func IsISO3166Alpha2(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO3166Alpha3 checks if a string is valid three-letter country code +func IsISO3166Alpha3(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha3Code { + return true + } + } + return false +} + +// IsISO693Alpha2 checks if a string is valid two-letter language code +func IsISO693Alpha2(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO693Alpha3b checks if a string is valid three-letter language code +func IsISO693Alpha3b(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha3bCode { + return true + } + } + return false +} + +// IsDNSName will validate the given string as a DNS name +func IsDNSName(str string) bool { + if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { + // constraints already violated + return false + } + return !IsIP(str) && rxDNSName.MatchString(str) +} + +// IsHash checks if a string is a hash of type algorithm. +// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] +func IsHash(str string, algorithm string) bool { + len := "0" + algo := strings.ToLower(algorithm) + + if algo == "crc32" || algo == "crc32b" { + len = "8" + } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" { + len = "32" + } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" { + len = "40" + } else if algo == "tiger192" { + len = "48" + } else if algo == "sha256" { + len = "64" + } else if algo == "sha384" { + len = "96" + } else if algo == "sha512" { + len = "128" + } else { + return false + } + + return Matches(str, "^[a-f0-9]{"+len+"}$") +} + +// IsDialString validates the given string for usage with the various Dial() functions +func IsDialString(str string) bool { + + if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { + return true + } + + return false +} + +// IsIP checks if a string is either IP version 4 or 6. +func IsIP(str string) bool { + return net.ParseIP(str) != nil +} + +// IsPort checks if a string represents a valid port +func IsPort(str string) bool { + if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 { + return true + } + return false +} + +// IsIPv4 check if the string is an IP version 4. +func IsIPv4(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ".") +} + +// IsIPv6 check if the string is an IP version 6. +func IsIPv6(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ":") +} + +// IsCIDR check if the string is an valid CIDR notiation (IPV4 & IPV6) +func IsCIDR(str string) bool { + _, _, err := net.ParseCIDR(str) + return err == nil +} + +// IsMAC check if a string is valid MAC address. +// Possible MAC formats: +// 01:23:45:67:89:ab +// 01:23:45:67:89:ab:cd:ef +// 01-23-45-67-89-ab +// 01-23-45-67-89-ab-cd-ef +// 0123.4567.89ab +// 0123.4567.89ab.cdef +func IsMAC(str string) bool { + _, err := net.ParseMAC(str) + return err == nil +} + +// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name +func IsHost(str string) bool { + return IsIP(str) || IsDNSName(str) +} + +// IsMongoID check if the string is a valid hex-encoded representation of a MongoDB ObjectId. +func IsMongoID(str string) bool { + return rxHexadecimal.MatchString(str) && (len(str) == 24) +} + +// IsLatitude check if a string is valid latitude. +func IsLatitude(str string) bool { + return rxLatitude.MatchString(str) +} + +// IsLongitude check if a string is valid longitude. +func IsLongitude(str string) bool { + return rxLongitude.MatchString(str) +} + +// IsRsaPublicKey check if a string is valid public key with provided length +func IsRsaPublicKey(str string, keylen int) bool { + bb := bytes.NewBufferString(str) + pemBytes, err := ioutil.ReadAll(bb) + if err != nil { + return false + } + block, _ := pem.Decode(pemBytes) + if block != nil && block.Type != "PUBLIC KEY" { + return false + } + var der []byte + + if block != nil { + der = block.Bytes + } else { + der, err = base64.StdEncoding.DecodeString(str) + if err != nil { + return false + } + } + + key, err := x509.ParsePKIXPublicKey(der) + if err != nil { + return false + } + pubkey, ok := key.(*rsa.PublicKey) + if !ok { + return false + } + bitlen := len(pubkey.N.Bytes()) * 8 + return bitlen == int(keylen) +} + +func toJSONName(tag string) string { + if tag == "" { + return "" + } + + // JSON name always comes first. If there's no options then split[0] is + // JSON name, if JSON name is not set, then split[0] is an empty string. + split := strings.SplitN(tag, ",", 2) + + name := split[0] + + // However it is possible that the field is skipped when + // (de-)serializing from/to JSON, in which case assume that there is no + // tag name to use + if name == "-" { + return "" + } + return name +} + +func PrependPathToErrors(err error, path string) error { + switch err2 := err.(type) { + case Error: + err2.Path = append([]string{path}, err2.Path...) + return err2 + case Errors: + errors := err2.Errors() + for i, err3 := range errors { + errors[i] = PrependPathToErrors(err3, path) + } + return err2 + } + fmt.Println(err) + return err +} + +// ValidateStruct use tags for fields. +// result will be equal to `false` if there are any errors. +func ValidateStruct(s interface{}) (bool, error) { + if s == nil { + return true, nil + } + result := true + var err error + val := reflect.ValueOf(s) + if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + val = val.Elem() + } + // we only accept structs + if val.Kind() != reflect.Struct { + return false, fmt.Errorf("function only accepts structs; got %s", val.Kind()) + } + var errs Errors + for i := 0; i < val.NumField(); i++ { + valueField := val.Field(i) + typeField := val.Type().Field(i) + if typeField.PkgPath != "" { + continue // Private field + } + structResult := true + if valueField.Kind() == reflect.Interface { + valueField = valueField.Elem() + } + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + typeField.Tag.Get(tagName) != "-" { + var err error + structResult, err = ValidateStruct(valueField.Interface()) + if err != nil { + err = PrependPathToErrors(err, typeField.Name) + errs = append(errs, err) + } + } + resultField, err2 := typeCheck(valueField, typeField, val, nil) + if err2 != nil { + + // Replace structure name with JSON name if there is a tag on the variable + jsonTag := toJSONName(typeField.Tag.Get("json")) + if jsonTag != "" { + switch jsonError := err2.(type) { + case Error: + jsonError.Name = jsonTag + err2 = jsonError + case Errors: + for i2, err3 := range jsonError { + switch customErr := err3.(type) { + case Error: + customErr.Name = jsonTag + jsonError[i2] = customErr + } + } + + err2 = jsonError + } + } + + errs = append(errs, err2) + } + result = result && resultField && structResult + } + if len(errs) > 0 { + err = errs + } + return result, err +} + +// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} +func parseTagIntoMap(tag string) tagOptionsMap { + optionsMap := make(tagOptionsMap) + options := strings.Split(tag, ",") + + for i, option := range options { + option = strings.TrimSpace(option) + + validationOptions := strings.Split(option, "~") + if !isValidTag(validationOptions[0]) { + continue + } + if len(validationOptions) == 2 { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i} + } else { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i} + } + } + return optionsMap +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +// IsSSN will validate the given string as a U.S. Social Security Number +func IsSSN(str string) bool { + if str == "" || len(str) != 11 { + return false + } + return rxSSN.MatchString(str) +} + +// IsSemver check if string is valid semantic version +func IsSemver(str string) bool { + return rxSemver.MatchString(str) +} + +// IsTime check if string is valid according to given format +func IsTime(str string, format string) bool { + _, err := time.Parse(format, str) + return err == nil +} + +// IsRFC3339 check if string is valid timestamp value according to RFC3339 +func IsRFC3339(str string) bool { + return IsTime(str, time.RFC3339) +} + +// IsRFC3339WithoutZone check if string is valid timestamp value according to RFC3339 which excludes the timezone. +func IsRFC3339WithoutZone(str string) bool { + return IsTime(str, RF3339WithoutZone) +} + +// IsISO4217 check if string is valid ISO currency code +func IsISO4217(str string) bool { + for _, currency := range ISO4217List { + if str == currency { + return true + } + } + + return false +} + +// ByteLength check string's length +func ByteLength(str string, params ...string) bool { + if len(params) == 2 { + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return len(str) >= int(min) && len(str) <= int(max) + } + + return false +} + +// RuneLength check string's length +// Alias for StringLength +func RuneLength(str string, params ...string) bool { + return StringLength(str, params...) +} + +// IsRsaPub check whether string is valid RSA key +// Alias for IsRsaPublicKey +func IsRsaPub(str string, params ...string) bool { + if len(params) == 1 { + len, _ := ToInt(params[0]) + return IsRsaPublicKey(str, int(len)) + } + + return false +} + +// StringMatches checks if a string matches a given pattern. +func StringMatches(s string, params ...string) bool { + if len(params) == 1 { + pattern := params[0] + return Matches(s, pattern) + } + return false +} + +// StringLength check string's length (including multi byte strings) +func StringLength(str string, params ...string) bool { + + if len(params) == 2 { + strLength := utf8.RuneCountInString(str) + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return strLength >= int(min) && strLength <= int(max) + } + + return false +} + +// Range check string's length +func Range(str string, params ...string) bool { + if len(params) == 2 { + value, _ := ToFloat(str) + min, _ := ToFloat(params[0]) + max, _ := ToFloat(params[1]) + return InRange(value, min, max) + } + + return false +} + +func isInRaw(str string, params ...string) bool { + if len(params) == 1 { + rawParams := params[0] + + parsedParams := strings.Split(rawParams, "|") + + return IsIn(str, parsedParams...) + } + + return false +} + +// IsIn check if string str is a member of the set of strings params +func IsIn(str string, params ...string) bool { + for _, param := range params { + if str == param { + return true + } + } + + return false +} + +func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { + if nilPtrAllowedByRequired { + k := v.Kind() + if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() { + return true, nil + } + } + + if requiredOption, isRequired := options["required"]; isRequired { + if len(requiredOption.customErrorMessage) > 0 { + return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}} + } + return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}} + } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { + return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}} + } + // not required and empty is valid + return true, nil +} + +func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) { + if !v.IsValid() { + return false, nil + } + + tag := t.Tag.Get(tagName) + + // Check if the field should be ignored + switch tag { + case "": + if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { + if !fieldsRequiredByDefault { + return true, nil + } + return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}} + } + case "-": + return true, nil + } + + isRootType := false + if options == nil { + isRootType = true + options = parseTagIntoMap(tag) + } + + if isEmptyValue(v) { + // an empty value is not validated, check only required + isValid, resultErr = checkRequired(v, t, options) + for key := range options { + delete(options, key) + } + return isValid, resultErr + } + + var customTypeErrors Errors + optionsOrder := options.orderedKeys() + for _, validatorName := range optionsOrder { + validatorStruct := options[validatorName] + if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok { + delete(options, validatorName) + + if result := validatefunc(v.Interface(), o.Interface()); !result { + if len(validatorStruct.customErrorMessage) > 0 { + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) + continue + } + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) + } + } + } + + if len(customTypeErrors.Errors()) > 0 { + return false, customTypeErrors + } + + if isRootType { + // Ensure that we've checked the value by all specified validators before report that the value is valid + defer func() { + delete(options, "optional") + delete(options, "required") + + if isValid && resultErr == nil && len(options) != 0 { + optionsOrder := options.orderedKeys() + for _, validator := range optionsOrder { + isValid = false + resultErr = Error{t.Name, fmt.Errorf( + "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}} + return + } + } + }() + } + + switch v.Kind() { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, + reflect.String: + // for each tag option check the map of validator functions + for _, validatorSpec := range optionsOrder { + validatorStruct := options[validatorSpec] + var negate bool + validator := validatorSpec + customMsgExists := len(validatorStruct.customErrorMessage) > 0 + + // Check whether the tag looks like '!something' or 'something' + if validator[0] == '!' { + validator = validator[1:] + negate = true + } + + // Check for param validators + for key, value := range ParamTagRegexMap { + ps := value.FindStringSubmatch(validator) + if len(ps) == 0 { + continue + } + + validatefunc, ok := ParamTagMap[key] + if !ok { + continue + } + + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + // type not yet supported, fail + return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}} + } + } + + if validatefunc, ok := TagMap[validator]; ok { + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field); !result && !negate || result && negate { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + //Not Yet Supported Types (Fail here!) + err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) + return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}} + } + } + } + return true, nil + case reflect.Map: + if v.Type().Key().Kind() != reflect.String { + return false, &UnsupportedTypeError{v.Type()} + } + var sv stringValues + sv = v.MapKeys() + sort.Sort(sv) + result := true + for i, k := range sv { + var resultItem bool + var err error + if v.MapIndex(k).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.MapIndex(k), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) + if err != nil { + err = PrependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Slice, reflect.Array: + result := true + for i := 0; i < v.Len(); i++ { + var resultItem bool + var err error + if v.Index(i).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.Index(i), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.Index(i).Interface()) + if err != nil { + err = PrependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Interface: + // If the value is an interface then encode its element + if v.IsNil() { + return true, nil + } + return ValidateStruct(v.Interface()) + case reflect.Ptr: + // If the value is a pointer then check its element + if v.IsNil() { + return true, nil + } + return typeCheck(v.Elem(), t, o, options) + case reflect.Struct: + return ValidateStruct(v.Interface()) + default: + return false, &UnsupportedTypeError{v.Type()} + } +} + +func stripParams(validatorString string) string { + return paramsRegexp.ReplaceAllString(validatorString, "") +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.String, reflect.Array: + return v.Len() == 0 + case reflect.Map, reflect.Slice: + return v.Len() == 0 || v.IsNil() + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) +} + +// ErrorByField returns error for specified field of the struct +// validated by ValidateStruct or empty string if there are no errors +// or this field doesn't exists or doesn't have any errors. +func ErrorByField(e error, field string) string { + if e == nil { + return "" + } + return ErrorsByField(e)[field] +} + +// ErrorsByField returns map of errors of the struct validated +// by ValidateStruct or empty map if there are no errors. +func ErrorsByField(e error) map[string]string { + m := make(map[string]string) + if e == nil { + return m + } + // prototype for ValidateStruct + + switch e.(type) { + case Error: + m[e.(Error).Name] = e.(Error).Err.Error() + case Errors: + for _, item := range e.(Errors).Errors() { + n := ErrorsByField(item) + for k, v := range n { + m[k] = v + } + } + } + + return m +} + +// Error returns string equivalent for reflect.Type +func (e *UnsupportedTypeError) Error() string { + return "validator: unsupported type: " + e.Type.String() +} + +func (sv stringValues) Len() int { return len(sv) } +func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } +func (sv stringValues) get(i int) string { return sv[i].String() } diff --git a/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/wercker.yml b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/wercker.yml new file mode 100644 index 000000000000..cac7a5fcf063 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/asaskevich/govalidator/wercker.yml @@ -0,0 +1,15 @@ +box: golang +build: + steps: + - setup-go-workspace + + - script: + name: go get + code: | + go version + go get -t ./... + + - script: + name: go test + code: | + go test -race ./... diff --git a/cluster-autoscaler/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go b/cluster-autoscaler/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go new file mode 100644 index 000000000000..0652b822f774 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go @@ -0,0 +1,5277 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/container-storage-interface/spec/csi.proto + +package csi + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type PluginCapability_Service_Type int32 + +const ( + PluginCapability_Service_UNKNOWN PluginCapability_Service_Type = 0 + // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for + // the ControllerService. Plugins SHOULD provide this capability. + // In rare cases certain plugins MAY wish to omit the + // ControllerService entirely from their implementation, but such + // SHOULD NOT be the common case. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED ControllerService RPCs, as well + // as specific RPCs as indicated by ControllerGetCapabilities. + PluginCapability_Service_CONTROLLER_SERVICE PluginCapability_Service_Type = 1 + // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for + // this plugin MAY NOT be equally accessible by all nodes in the + // cluster. The CO MUST use the topology information returned by + // CreateVolumeRequest along with the topology information + // returned by NodeGetInfo to ensure that a given volume is + // accessible from a given node when scheduling workloads. + PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS PluginCapability_Service_Type = 2 +) + +var PluginCapability_Service_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CONTROLLER_SERVICE", + 2: "VOLUME_ACCESSIBILITY_CONSTRAINTS", +} +var PluginCapability_Service_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CONTROLLER_SERVICE": 1, + "VOLUME_ACCESSIBILITY_CONSTRAINTS": 2, +} + +func (x PluginCapability_Service_Type) String() string { + return proto.EnumName(PluginCapability_Service_Type_name, int32(x)) +} +func (PluginCapability_Service_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{4, 0, 0} +} + +type VolumeCapability_AccessMode_Mode int32 + +const ( + VolumeCapability_AccessMode_UNKNOWN VolumeCapability_AccessMode_Mode = 0 + // Can only be published once as read/write on a single node, at + // any given time. + VolumeCapability_AccessMode_SINGLE_NODE_WRITER VolumeCapability_AccessMode_Mode = 1 + // Can only be published once as readonly on a single node, at + // any given time. + VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 2 + // Can be published as readonly at multiple nodes simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 3 + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER VolumeCapability_AccessMode_Mode = 4 + // Can be published as read/write at multiple nodes + // simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 5 +) + +var VolumeCapability_AccessMode_Mode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SINGLE_NODE_WRITER", + 2: "SINGLE_NODE_READER_ONLY", + 3: "MULTI_NODE_READER_ONLY", + 4: "MULTI_NODE_SINGLE_WRITER", + 5: "MULTI_NODE_MULTI_WRITER", +} +var VolumeCapability_AccessMode_Mode_value = map[string]int32{ + "UNKNOWN": 0, + "SINGLE_NODE_WRITER": 1, + "SINGLE_NODE_READER_ONLY": 2, + "MULTI_NODE_READER_ONLY": 3, + "MULTI_NODE_SINGLE_WRITER": 4, + "MULTI_NODE_MULTI_WRITER": 5, +} + +func (x VolumeCapability_AccessMode_Mode) String() string { + return proto.EnumName(VolumeCapability_AccessMode_Mode_name, int32(x)) +} +func (VolumeCapability_AccessMode_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{10, 2, 0} +} + +type ControllerServiceCapability_RPC_Type int32 + +const ( + ControllerServiceCapability_RPC_UNKNOWN ControllerServiceCapability_RPC_Type = 0 + ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME ControllerServiceCapability_RPC_Type = 1 + ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME ControllerServiceCapability_RPC_Type = 2 + ControllerServiceCapability_RPC_LIST_VOLUMES ControllerServiceCapability_RPC_Type = 3 + ControllerServiceCapability_RPC_GET_CAPACITY ControllerServiceCapability_RPC_Type = 4 + // Currently the only way to consume a snapshot is to create + // a volume from it. Therefore plugins supporting + // CREATE_DELETE_SNAPSHOT MUST support creating volume from + // snapshot. + ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT ControllerServiceCapability_RPC_Type = 5 + ControllerServiceCapability_RPC_LIST_SNAPSHOTS ControllerServiceCapability_RPC_Type = 6 + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + ControllerServiceCapability_RPC_CLONE_VOLUME ControllerServiceCapability_RPC_Type = 7 + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + ControllerServiceCapability_RPC_PUBLISH_READONLY ControllerServiceCapability_RPC_Type = 8 +) + +var ControllerServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATE_DELETE_VOLUME", + 2: "PUBLISH_UNPUBLISH_VOLUME", + 3: "LIST_VOLUMES", + 4: "GET_CAPACITY", + 5: "CREATE_DELETE_SNAPSHOT", + 6: "LIST_SNAPSHOTS", + 7: "CLONE_VOLUME", + 8: "PUBLISH_READONLY", +} +var ControllerServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CREATE_DELETE_VOLUME": 1, + "PUBLISH_UNPUBLISH_VOLUME": 2, + "LIST_VOLUMES": 3, + "GET_CAPACITY": 4, + "CREATE_DELETE_SNAPSHOT": 5, + "LIST_SNAPSHOTS": 6, + "CLONE_VOLUME": 7, + "PUBLISH_READONLY": 8, +} + +func (x ControllerServiceCapability_RPC_Type) String() string { + return proto.EnumName(ControllerServiceCapability_RPC_Type_name, int32(x)) +} +func (ControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{29, 0, 0} +} + +type VolumeUsage_Unit int32 + +const ( + VolumeUsage_UNKNOWN VolumeUsage_Unit = 0 + VolumeUsage_BYTES VolumeUsage_Unit = 1 + VolumeUsage_INODES VolumeUsage_Unit = 2 +) + +var VolumeUsage_Unit_name = map[int32]string{ + 0: "UNKNOWN", + 1: "BYTES", + 2: "INODES", +} +var VolumeUsage_Unit_value = map[string]int32{ + "UNKNOWN": 0, + "BYTES": 1, + "INODES": 2, +} + +func (x VolumeUsage_Unit) String() string { + return proto.EnumName(VolumeUsage_Unit_name, int32(x)) +} +func (VolumeUsage_Unit) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{47, 0} +} + +type NodeServiceCapability_RPC_Type int32 + +const ( + NodeServiceCapability_RPC_UNKNOWN NodeServiceCapability_RPC_Type = 0 + NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME NodeServiceCapability_RPC_Type = 1 + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + NodeServiceCapability_RPC_GET_VOLUME_STATS NodeServiceCapability_RPC_Type = 2 +) + +var NodeServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STAGE_UNSTAGE_VOLUME", + 2: "GET_VOLUME_STATS", +} +var NodeServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "STAGE_UNSTAGE_VOLUME": 1, + "GET_VOLUME_STATS": 2, +} + +func (x NodeServiceCapability_RPC_Type) String() string { + return proto.EnumName(NodeServiceCapability_RPC_Type_name, int32(x)) +} +func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{50, 0, 0} +} + +type GetPluginInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginInfoRequest) Reset() { *m = GetPluginInfoRequest{} } +func (m *GetPluginInfoRequest) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoRequest) ProtoMessage() {} +func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{0} +} +func (m *GetPluginInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginInfoRequest.Unmarshal(m, b) +} +func (m *GetPluginInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginInfoRequest.Marshal(b, m, deterministic) +} +func (dst *GetPluginInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginInfoRequest.Merge(dst, src) +} +func (m *GetPluginInfoRequest) XXX_Size() int { + return xxx_messageInfo_GetPluginInfoRequest.Size(m) +} +func (m *GetPluginInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginInfoRequest proto.InternalMessageInfo + +type GetPluginInfoResponse struct { + // The name MUST follow domain name notation format + // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD + // include the plugin's host company name and the plugin name, + // to minimize the possibility of collisions. It MUST be 63 + // characters or less, beginning and ending with an alphanumeric + // character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. This field is REQUIRED. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // This field is REQUIRED. Value of this field is opaque to the CO. + VendorVersion string `protobuf:"bytes,2,opt,name=vendor_version,json=vendorVersion,proto3" json:"vendor_version,omitempty"` + // This field is OPTIONAL. Values are opaque to the CO. + Manifest map[string]string `protobuf:"bytes,3,rep,name=manifest,proto3" json:"manifest,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginInfoResponse) Reset() { *m = GetPluginInfoResponse{} } +func (m *GetPluginInfoResponse) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoResponse) ProtoMessage() {} +func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{1} +} +func (m *GetPluginInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginInfoResponse.Unmarshal(m, b) +} +func (m *GetPluginInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginInfoResponse.Marshal(b, m, deterministic) +} +func (dst *GetPluginInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginInfoResponse.Merge(dst, src) +} +func (m *GetPluginInfoResponse) XXX_Size() int { + return xxx_messageInfo_GetPluginInfoResponse.Size(m) +} +func (m *GetPluginInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginInfoResponse proto.InternalMessageInfo + +func (m *GetPluginInfoResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetPluginInfoResponse) GetVendorVersion() string { + if m != nil { + return m.VendorVersion + } + return "" +} + +func (m *GetPluginInfoResponse) GetManifest() map[string]string { + if m != nil { + return m.Manifest + } + return nil +} + +type GetPluginCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginCapabilitiesRequest) Reset() { *m = GetPluginCapabilitiesRequest{} } +func (m *GetPluginCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*GetPluginCapabilitiesRequest) ProtoMessage() {} +func (*GetPluginCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{2} +} +func (m *GetPluginCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginCapabilitiesRequest.Unmarshal(m, b) +} +func (m *GetPluginCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (dst *GetPluginCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginCapabilitiesRequest.Merge(dst, src) +} +func (m *GetPluginCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_GetPluginCapabilitiesRequest.Size(m) +} +func (m *GetPluginCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginCapabilitiesRequest proto.InternalMessageInfo + +type GetPluginCapabilitiesResponse struct { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + Capabilities []*PluginCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginCapabilitiesResponse) Reset() { *m = GetPluginCapabilitiesResponse{} } +func (m *GetPluginCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*GetPluginCapabilitiesResponse) ProtoMessage() {} +func (*GetPluginCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{3} +} +func (m *GetPluginCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginCapabilitiesResponse.Unmarshal(m, b) +} +func (m *GetPluginCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (dst *GetPluginCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginCapabilitiesResponse.Merge(dst, src) +} +func (m *GetPluginCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_GetPluginCapabilitiesResponse.Size(m) +} +func (m *GetPluginCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginCapabilitiesResponse proto.InternalMessageInfo + +func (m *GetPluginCapabilitiesResponse) GetCapabilities() []*PluginCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the plugin. +type PluginCapability struct { + // Types that are valid to be assigned to Type: + // *PluginCapability_Service_ + Type isPluginCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginCapability) Reset() { *m = PluginCapability{} } +func (m *PluginCapability) String() string { return proto.CompactTextString(m) } +func (*PluginCapability) ProtoMessage() {} +func (*PluginCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{4} +} +func (m *PluginCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginCapability.Unmarshal(m, b) +} +func (m *PluginCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginCapability.Marshal(b, m, deterministic) +} +func (dst *PluginCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginCapability.Merge(dst, src) +} +func (m *PluginCapability) XXX_Size() int { + return xxx_messageInfo_PluginCapability.Size(m) +} +func (m *PluginCapability) XXX_DiscardUnknown() { + xxx_messageInfo_PluginCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginCapability proto.InternalMessageInfo + +type isPluginCapability_Type interface { + isPluginCapability_Type() +} + +type PluginCapability_Service_ struct { + Service *PluginCapability_Service `protobuf:"bytes,1,opt,name=service,proto3,oneof"` +} + +func (*PluginCapability_Service_) isPluginCapability_Type() {} + +func (m *PluginCapability) GetType() isPluginCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *PluginCapability) GetService() *PluginCapability_Service { + if x, ok := m.GetType().(*PluginCapability_Service_); ok { + return x.Service + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PluginCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PluginCapability_OneofMarshaler, _PluginCapability_OneofUnmarshaler, _PluginCapability_OneofSizer, []interface{}{ + (*PluginCapability_Service_)(nil), + } +} + +func _PluginCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PluginCapability) + // type + switch x := m.Type.(type) { + case *PluginCapability_Service_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Service); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PluginCapability.Type has unexpected type %T", x) + } + return nil +} + +func _PluginCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PluginCapability) + switch tag { + case 1: // type.service + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PluginCapability_Service) + err := b.DecodeMessage(msg) + m.Type = &PluginCapability_Service_{msg} + return true, err + default: + return false, nil + } +} + +func _PluginCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PluginCapability) + // type + switch x := m.Type.(type) { + case *PluginCapability_Service_: + s := proto.Size(x.Service) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type PluginCapability_Service struct { + Type PluginCapability_Service_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.PluginCapability_Service_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginCapability_Service) Reset() { *m = PluginCapability_Service{} } +func (m *PluginCapability_Service) String() string { return proto.CompactTextString(m) } +func (*PluginCapability_Service) ProtoMessage() {} +func (*PluginCapability_Service) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{4, 0} +} +func (m *PluginCapability_Service) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginCapability_Service.Unmarshal(m, b) +} +func (m *PluginCapability_Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginCapability_Service.Marshal(b, m, deterministic) +} +func (dst *PluginCapability_Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginCapability_Service.Merge(dst, src) +} +func (m *PluginCapability_Service) XXX_Size() int { + return xxx_messageInfo_PluginCapability_Service.Size(m) +} +func (m *PluginCapability_Service) XXX_DiscardUnknown() { + xxx_messageInfo_PluginCapability_Service.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginCapability_Service proto.InternalMessageInfo + +func (m *PluginCapability_Service) GetType() PluginCapability_Service_Type { + if m != nil { + return m.Type + } + return PluginCapability_Service_UNKNOWN +} + +type ProbeRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProbeRequest) Reset() { *m = ProbeRequest{} } +func (m *ProbeRequest) String() string { return proto.CompactTextString(m) } +func (*ProbeRequest) ProtoMessage() {} +func (*ProbeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{5} +} +func (m *ProbeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProbeRequest.Unmarshal(m, b) +} +func (m *ProbeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProbeRequest.Marshal(b, m, deterministic) +} +func (dst *ProbeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbeRequest.Merge(dst, src) +} +func (m *ProbeRequest) XXX_Size() int { + return xxx_messageInfo_ProbeRequest.Size(m) +} +func (m *ProbeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ProbeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbeRequest proto.InternalMessageInfo + +type ProbeResponse struct { + // Readiness allows a plugin to report its initialization status back + // to the CO. Initialization for some plugins MAY be time consuming + // and it is important for a CO to distinguish between the following + // cases: + // + // 1) The plugin is in an unhealthy state and MAY need restarting. In + // this case a gRPC error code SHALL be returned. + // 2) The plugin is still initializing, but is otherwise perfectly + // healthy. In this case a successful response SHALL be returned + // with a readiness value of `false`. Calls to the plugin's + // Controller and/or Node services MAY fail due to an incomplete + // initialization state. + // 3) The plugin has finished initializing and is ready to service + // calls to its Controller and/or Node services. A successful + // response is returned with a readiness value of `true`. + // + // This field is OPTIONAL. If not present, the caller SHALL assume + // that the plugin is in a ready state and is accepting calls to its + // Controller and/or Node services (according to the plugin's reported + // capabilities). + Ready *wrappers.BoolValue `protobuf:"bytes,1,opt,name=ready,proto3" json:"ready,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProbeResponse) Reset() { *m = ProbeResponse{} } +func (m *ProbeResponse) String() string { return proto.CompactTextString(m) } +func (*ProbeResponse) ProtoMessage() {} +func (*ProbeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{6} +} +func (m *ProbeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProbeResponse.Unmarshal(m, b) +} +func (m *ProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProbeResponse.Marshal(b, m, deterministic) +} +func (dst *ProbeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbeResponse.Merge(dst, src) +} +func (m *ProbeResponse) XXX_Size() int { + return xxx_messageInfo_ProbeResponse.Size(m) +} +func (m *ProbeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ProbeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbeResponse proto.InternalMessageInfo + +func (m *ProbeResponse) GetReady() *wrappers.BoolValue { + if m != nil { + return m.Ready + } + return nil +} + +type CreateVolumeRequest struct { + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. The Plugin SHOULD ensure that multiple + // `CreateVolume` calls for the same name do not result in more + // than one piece of storage provisioned corresponding to that + // name. If a Plugin is unable to enforce idempotency, the CO's + // error recovery logic could result in multiple (unused) volumes + // being provisioned. + // In the case of error, the CO MUST handle the gRPC error codes + // per the recovery behavior defined in the "CreateVolume Errors" + // section below. + // The CO is responsible for cleaning up volumes it provisioned + // that it no longer needs. If the CO is uncertain whether a volume + // was provisioned or not when a `CreateVolume` call fails, the CO + // MAY call `CreateVolume` again, with the same name, to ensure the + // volume exists and to retrieve the volume's `volume_id` (unless + // otherwise prohibited by "CreateVolume Errors"). + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. If + // specified it MUST always be honored, even when creating volumes + // from a source; which MAY force some backends to internally extend + // the volume after creating it. + CapacityRange *CapacityRange `protobuf:"bytes,2,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"` + // The capabilities that the provisioned volume MUST have. SP MUST + // provision a volume that will satisfy ALL of the capabilities + // specified in this list. Otherwise SP MUST return the appropriate + // gRPC error code. + // The Plugin MUST assume that the CO MAY use the provisioned volume + // with ANY of the capabilities specified in this list. + // For example, a CO MAY specify two volume capabilities: one with + // access mode SINGLE_NODE_WRITER and another with access mode + // MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the + // provisioned volume can be used in either mode. + // This also enables the CO to do early validation: If ANY of the + // specified volume capabilities are not supported by the SP, the call + // MUST return the appropriate gRPC error code. + // This field is REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"` + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Secrets required by plugin to complete volume creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If specified, the new volume will be pre-populated with data from + // this source. This field is OPTIONAL. + VolumeContentSource *VolumeContentSource `protobuf:"bytes,6,opt,name=volume_content_source,json=volumeContentSource,proto3" json:"volume_content_source,omitempty"` + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume MUST be accessible from. + // An SP SHALL advertise the requirements for topological + // accessibility information in documentation. COs SHALL only specify + // topological accessibility information supported by the SP. + // This field is OPTIONAL. + // This field SHALL NOT be specified unless the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // If this field is not specified and the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY + // choose where the provisioned volume is accessible from. + AccessibilityRequirements *TopologyRequirement `protobuf:"bytes,7,opt,name=accessibility_requirements,json=accessibilityRequirements,proto3" json:"accessibility_requirements,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateVolumeRequest) Reset() { *m = CreateVolumeRequest{} } +func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeRequest) ProtoMessage() {} +func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{7} +} +func (m *CreateVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateVolumeRequest.Unmarshal(m, b) +} +func (m *CreateVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *CreateVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateVolumeRequest.Merge(dst, src) +} +func (m *CreateVolumeRequest) XXX_Size() int { + return xxx_messageInfo_CreateVolumeRequest.Size(m) +} +func (m *CreateVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateVolumeRequest proto.InternalMessageInfo + +func (m *CreateVolumeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateVolumeRequest) GetCapacityRange() *CapacityRange { + if m != nil { + return m.CapacityRange + } + return nil +} + +func (m *CreateVolumeRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *CreateVolumeRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *CreateVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *CreateVolumeRequest) GetVolumeContentSource() *VolumeContentSource { + if m != nil { + return m.VolumeContentSource + } + return nil +} + +func (m *CreateVolumeRequest) GetAccessibilityRequirements() *TopologyRequirement { + if m != nil { + return m.AccessibilityRequirements + } + return nil +} + +// Specifies what source the volume will be created from. One of the +// type fields MUST be specified. +type VolumeContentSource struct { + // Types that are valid to be assigned to Type: + // *VolumeContentSource_Snapshot + // *VolumeContentSource_Volume + Type isVolumeContentSource_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeContentSource) Reset() { *m = VolumeContentSource{} } +func (m *VolumeContentSource) String() string { return proto.CompactTextString(m) } +func (*VolumeContentSource) ProtoMessage() {} +func (*VolumeContentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{8} +} +func (m *VolumeContentSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeContentSource.Unmarshal(m, b) +} +func (m *VolumeContentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeContentSource.Marshal(b, m, deterministic) +} +func (dst *VolumeContentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource.Merge(dst, src) +} +func (m *VolumeContentSource) XXX_Size() int { + return xxx_messageInfo_VolumeContentSource.Size(m) +} +func (m *VolumeContentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeContentSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeContentSource proto.InternalMessageInfo + +type isVolumeContentSource_Type interface { + isVolumeContentSource_Type() +} + +type VolumeContentSource_Snapshot struct { + Snapshot *VolumeContentSource_SnapshotSource `protobuf:"bytes,1,opt,name=snapshot,proto3,oneof"` +} + +type VolumeContentSource_Volume struct { + Volume *VolumeContentSource_VolumeSource `protobuf:"bytes,2,opt,name=volume,proto3,oneof"` +} + +func (*VolumeContentSource_Snapshot) isVolumeContentSource_Type() {} + +func (*VolumeContentSource_Volume) isVolumeContentSource_Type() {} + +func (m *VolumeContentSource) GetType() isVolumeContentSource_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *VolumeContentSource) GetSnapshot() *VolumeContentSource_SnapshotSource { + if x, ok := m.GetType().(*VolumeContentSource_Snapshot); ok { + return x.Snapshot + } + return nil +} + +func (m *VolumeContentSource) GetVolume() *VolumeContentSource_VolumeSource { + if x, ok := m.GetType().(*VolumeContentSource_Volume); ok { + return x.Volume + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*VolumeContentSource) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _VolumeContentSource_OneofMarshaler, _VolumeContentSource_OneofUnmarshaler, _VolumeContentSource_OneofSizer, []interface{}{ + (*VolumeContentSource_Snapshot)(nil), + (*VolumeContentSource_Volume)(nil), + } +} + +func _VolumeContentSource_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*VolumeContentSource) + // type + switch x := m.Type.(type) { + case *VolumeContentSource_Snapshot: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Snapshot); err != nil { + return err + } + case *VolumeContentSource_Volume: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Volume); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("VolumeContentSource.Type has unexpected type %T", x) + } + return nil +} + +func _VolumeContentSource_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*VolumeContentSource) + switch tag { + case 1: // type.snapshot + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeContentSource_SnapshotSource) + err := b.DecodeMessage(msg) + m.Type = &VolumeContentSource_Snapshot{msg} + return true, err + case 2: // type.volume + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeContentSource_VolumeSource) + err := b.DecodeMessage(msg) + m.Type = &VolumeContentSource_Volume{msg} + return true, err + default: + return false, nil + } +} + +func _VolumeContentSource_OneofSizer(msg proto.Message) (n int) { + m := msg.(*VolumeContentSource) + // type + switch x := m.Type.(type) { + case *VolumeContentSource_Snapshot: + s := proto.Size(x.Snapshot) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *VolumeContentSource_Volume: + s := proto.Size(x.Volume) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type VolumeContentSource_SnapshotSource struct { + // Contains identity information for the existing source snapshot. + // This field is REQUIRED. Plugin is REQUIRED to support creating + // volume from snapshot if it supports the capability + // CREATE_DELETE_SNAPSHOT. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeContentSource_SnapshotSource) Reset() { *m = VolumeContentSource_SnapshotSource{} } +func (m *VolumeContentSource_SnapshotSource) String() string { return proto.CompactTextString(m) } +func (*VolumeContentSource_SnapshotSource) ProtoMessage() {} +func (*VolumeContentSource_SnapshotSource) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{8, 0} +} +func (m *VolumeContentSource_SnapshotSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeContentSource_SnapshotSource.Unmarshal(m, b) +} +func (m *VolumeContentSource_SnapshotSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeContentSource_SnapshotSource.Marshal(b, m, deterministic) +} +func (dst *VolumeContentSource_SnapshotSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource_SnapshotSource.Merge(dst, src) +} +func (m *VolumeContentSource_SnapshotSource) XXX_Size() int { + return xxx_messageInfo_VolumeContentSource_SnapshotSource.Size(m) +} +func (m *VolumeContentSource_SnapshotSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeContentSource_SnapshotSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeContentSource_SnapshotSource proto.InternalMessageInfo + +func (m *VolumeContentSource_SnapshotSource) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +type VolumeContentSource_VolumeSource struct { + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeContentSource_VolumeSource) Reset() { *m = VolumeContentSource_VolumeSource{} } +func (m *VolumeContentSource_VolumeSource) String() string { return proto.CompactTextString(m) } +func (*VolumeContentSource_VolumeSource) ProtoMessage() {} +func (*VolumeContentSource_VolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{8, 1} +} +func (m *VolumeContentSource_VolumeSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeContentSource_VolumeSource.Unmarshal(m, b) +} +func (m *VolumeContentSource_VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeContentSource_VolumeSource.Marshal(b, m, deterministic) +} +func (dst *VolumeContentSource_VolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource_VolumeSource.Merge(dst, src) +} +func (m *VolumeContentSource_VolumeSource) XXX_Size() int { + return xxx_messageInfo_VolumeContentSource_VolumeSource.Size(m) +} +func (m *VolumeContentSource_VolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeContentSource_VolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeContentSource_VolumeSource proto.InternalMessageInfo + +func (m *VolumeContentSource_VolumeSource) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +type CreateVolumeResponse struct { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateVolumeResponse) Reset() { *m = CreateVolumeResponse{} } +func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeResponse) ProtoMessage() {} +func (*CreateVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{9} +} +func (m *CreateVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateVolumeResponse.Unmarshal(m, b) +} +func (m *CreateVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *CreateVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateVolumeResponse.Merge(dst, src) +} +func (m *CreateVolumeResponse) XXX_Size() int { + return xxx_messageInfo_CreateVolumeResponse.Size(m) +} +func (m *CreateVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateVolumeResponse proto.InternalMessageInfo + +func (m *CreateVolumeResponse) GetVolume() *Volume { + if m != nil { + return m.Volume + } + return nil +} + +// Specify a capability of a volume. +type VolumeCapability struct { + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + // + // Types that are valid to be assigned to AccessType: + // *VolumeCapability_Block + // *VolumeCapability_Mount + AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"` + // This is a REQUIRED field. + AccessMode *VolumeCapability_AccessMode `protobuf:"bytes,3,opt,name=access_mode,json=accessMode,proto3" json:"access_mode,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability) Reset() { *m = VolumeCapability{} } +func (m *VolumeCapability) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability) ProtoMessage() {} +func (*VolumeCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{10} +} +func (m *VolumeCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability.Unmarshal(m, b) +} +func (m *VolumeCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability.Marshal(b, m, deterministic) +} +func (dst *VolumeCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability.Merge(dst, src) +} +func (m *VolumeCapability) XXX_Size() int { + return xxx_messageInfo_VolumeCapability.Size(m) +} +func (m *VolumeCapability) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability proto.InternalMessageInfo + +type isVolumeCapability_AccessType interface { + isVolumeCapability_AccessType() +} + +type VolumeCapability_Block struct { + Block *VolumeCapability_BlockVolume `protobuf:"bytes,1,opt,name=block,proto3,oneof"` +} + +type VolumeCapability_Mount struct { + Mount *VolumeCapability_MountVolume `protobuf:"bytes,2,opt,name=mount,proto3,oneof"` +} + +func (*VolumeCapability_Block) isVolumeCapability_AccessType() {} + +func (*VolumeCapability_Mount) isVolumeCapability_AccessType() {} + +func (m *VolumeCapability) GetAccessType() isVolumeCapability_AccessType { + if m != nil { + return m.AccessType + } + return nil +} + +func (m *VolumeCapability) GetBlock() *VolumeCapability_BlockVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Block); ok { + return x.Block + } + return nil +} + +func (m *VolumeCapability) GetMount() *VolumeCapability_MountVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Mount); ok { + return x.Mount + } + return nil +} + +func (m *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode { + if m != nil { + return m.AccessMode + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*VolumeCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _VolumeCapability_OneofMarshaler, _VolumeCapability_OneofUnmarshaler, _VolumeCapability_OneofSizer, []interface{}{ + (*VolumeCapability_Block)(nil), + (*VolumeCapability_Mount)(nil), + } +} + +func _VolumeCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*VolumeCapability) + // access_type + switch x := m.AccessType.(type) { + case *VolumeCapability_Block: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Block); err != nil { + return err + } + case *VolumeCapability_Mount: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Mount); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("VolumeCapability.AccessType has unexpected type %T", x) + } + return nil +} + +func _VolumeCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*VolumeCapability) + switch tag { + case 1: // access_type.block + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeCapability_BlockVolume) + err := b.DecodeMessage(msg) + m.AccessType = &VolumeCapability_Block{msg} + return true, err + case 2: // access_type.mount + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeCapability_MountVolume) + err := b.DecodeMessage(msg) + m.AccessType = &VolumeCapability_Mount{msg} + return true, err + default: + return false, nil + } +} + +func _VolumeCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*VolumeCapability) + // access_type + switch x := m.AccessType.(type) { + case *VolumeCapability_Block: + s := proto.Size(x.Block) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *VolumeCapability_Mount: + s := proto.Size(x.Mount) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Indicate that the volume will be accessed via the block device API. +type VolumeCapability_BlockVolume struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability_BlockVolume) Reset() { *m = VolumeCapability_BlockVolume{} } +func (m *VolumeCapability_BlockVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_BlockVolume) ProtoMessage() {} +func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{10, 0} +} +func (m *VolumeCapability_BlockVolume) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability_BlockVolume.Unmarshal(m, b) +} +func (m *VolumeCapability_BlockVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability_BlockVolume.Marshal(b, m, deterministic) +} +func (dst *VolumeCapability_BlockVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_BlockVolume.Merge(dst, src) +} +func (m *VolumeCapability_BlockVolume) XXX_Size() int { + return xxx_messageInfo_VolumeCapability_BlockVolume.Size(m) +} +func (m *VolumeCapability_BlockVolume) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability_BlockVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability_BlockVolume proto.InternalMessageInfo + +// Indicate that the volume will be accessed via the filesystem API. +type VolumeCapability_MountVolume struct { + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + FsType string `protobuf:"bytes,1,opt,name=fs_type,json=fsType,proto3" json:"fs_type,omitempty"` + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + MountFlags []string `protobuf:"bytes,2,rep,name=mount_flags,json=mountFlags,proto3" json:"mount_flags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability_MountVolume) Reset() { *m = VolumeCapability_MountVolume{} } +func (m *VolumeCapability_MountVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_MountVolume) ProtoMessage() {} +func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{10, 1} +} +func (m *VolumeCapability_MountVolume) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability_MountVolume.Unmarshal(m, b) +} +func (m *VolumeCapability_MountVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability_MountVolume.Marshal(b, m, deterministic) +} +func (dst *VolumeCapability_MountVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_MountVolume.Merge(dst, src) +} +func (m *VolumeCapability_MountVolume) XXX_Size() int { + return xxx_messageInfo_VolumeCapability_MountVolume.Size(m) +} +func (m *VolumeCapability_MountVolume) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability_MountVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability_MountVolume proto.InternalMessageInfo + +func (m *VolumeCapability_MountVolume) GetFsType() string { + if m != nil { + return m.FsType + } + return "" +} + +func (m *VolumeCapability_MountVolume) GetMountFlags() []string { + if m != nil { + return m.MountFlags + } + return nil +} + +// Specify how a volume can be accessed. +type VolumeCapability_AccessMode struct { + // This field is REQUIRED. + Mode VolumeCapability_AccessMode_Mode `protobuf:"varint,1,opt,name=mode,proto3,enum=csi.v1.VolumeCapability_AccessMode_Mode" json:"mode,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability_AccessMode) Reset() { *m = VolumeCapability_AccessMode{} } +func (m *VolumeCapability_AccessMode) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_AccessMode) ProtoMessage() {} +func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{10, 2} +} +func (m *VolumeCapability_AccessMode) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability_AccessMode.Unmarshal(m, b) +} +func (m *VolumeCapability_AccessMode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability_AccessMode.Marshal(b, m, deterministic) +} +func (dst *VolumeCapability_AccessMode) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_AccessMode.Merge(dst, src) +} +func (m *VolumeCapability_AccessMode) XXX_Size() int { + return xxx_messageInfo_VolumeCapability_AccessMode.Size(m) +} +func (m *VolumeCapability_AccessMode) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability_AccessMode.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability_AccessMode proto.InternalMessageInfo + +func (m *VolumeCapability_AccessMode) GetMode() VolumeCapability_AccessMode_Mode { + if m != nil { + return m.Mode + } + return VolumeCapability_AccessMode_UNKNOWN +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` SHALL be set to the same value. At +// least one of the these fields MUST be specified. +type CapacityRange struct { + // Volume MUST be at least this big. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + RequiredBytes int64 `protobuf:"varint,1,opt,name=required_bytes,json=requiredBytes,proto3" json:"required_bytes,omitempty"` + // Volume MUST not be bigger than this. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + LimitBytes int64 `protobuf:"varint,2,opt,name=limit_bytes,json=limitBytes,proto3" json:"limit_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CapacityRange) Reset() { *m = CapacityRange{} } +func (m *CapacityRange) String() string { return proto.CompactTextString(m) } +func (*CapacityRange) ProtoMessage() {} +func (*CapacityRange) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{11} +} +func (m *CapacityRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CapacityRange.Unmarshal(m, b) +} +func (m *CapacityRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CapacityRange.Marshal(b, m, deterministic) +} +func (dst *CapacityRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapacityRange.Merge(dst, src) +} +func (m *CapacityRange) XXX_Size() int { + return xxx_messageInfo_CapacityRange.Size(m) +} +func (m *CapacityRange) XXX_DiscardUnknown() { + xxx_messageInfo_CapacityRange.DiscardUnknown(m) +} + +var xxx_messageInfo_CapacityRange proto.InternalMessageInfo + +func (m *CapacityRange) GetRequiredBytes() int64 { + if m != nil { + return m.RequiredBytes + } + return 0 +} + +func (m *CapacityRange) GetLimitBytes() int64 { + if m != nil { + return m.LimitBytes + } + return 0 +} + +// Information about a specific volume. +type Volume struct { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set (value of 0), it indicates that the capacity of the volume is + // unknown (e.g., NFS share). + // The value of this field MUST NOT be negative. + CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"` + // The identifier for this volume, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + // The SP is NOT responsible for global uniqueness of volume_id across + // multiple SPs. + VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // Opaque static properties of the volume. SP MAY use this field to + // ensure subsequent volume validation and publishing calls have + // contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // A volume uniquely identified by `volume_id` SHALL always report the + // same volume_context. + // This field is OPTIONAL and when present MUST be passed to volume + // validation and publishing calls. + VolumeContext map[string]string `protobuf:"bytes,3,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If specified, indicates that the volume is not empty and is + // pre-populated with data from the specified source. + // This field is OPTIONAL. + ContentSource *VolumeContentSource `protobuf:"bytes,4,opt,name=content_source,json=contentSource,proto3" json:"content_source,omitempty"` + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume is accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // An SP MAY specify multiple topologies to indicate the volume is + // accessible from multiple locations. + // COs MAY use this information along with the topology information + // returned by NodeGetInfo to ensure that a given volume is accessible + // from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the volume is equally accessible from all nodes in the cluster and + // MAY schedule workloads referencing the volume on any available + // node. + // + // Example 1: + // accessible_topology = {"region": "R1", "zone": "Z2"} + // Indicates a volume accessible only from the "region" "R1" and the + // "zone" "Z2". + // + // Example 2: + // accessible_topology = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" + // in the "region" "R1". + AccessibleTopology []*Topology `protobuf:"bytes,5,rep,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Volume) Reset() { *m = Volume{} } +func (m *Volume) String() string { return proto.CompactTextString(m) } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{12} +} +func (m *Volume) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Volume.Unmarshal(m, b) +} +func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Volume.Marshal(b, m, deterministic) +} +func (dst *Volume) XXX_Merge(src proto.Message) { + xxx_messageInfo_Volume.Merge(dst, src) +} +func (m *Volume) XXX_Size() int { + return xxx_messageInfo_Volume.Size(m) +} +func (m *Volume) XXX_DiscardUnknown() { + xxx_messageInfo_Volume.DiscardUnknown(m) +} + +var xxx_messageInfo_Volume proto.InternalMessageInfo + +func (m *Volume) GetCapacityBytes() int64 { + if m != nil { + return m.CapacityBytes + } + return 0 +} + +func (m *Volume) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *Volume) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +func (m *Volume) GetContentSource() *VolumeContentSource { + if m != nil { + return m.ContentSource + } + return nil +} + +func (m *Volume) GetAccessibleTopology() []*Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +type TopologyRequirement struct { + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, than the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, than the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + Requisite []*Topology `protobuf:"bytes,1,rep,name=requisite,proto3" json:"requisite,omitempty"` + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + Preferred []*Topology `protobuf:"bytes,2,rep,name=preferred,proto3" json:"preferred,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TopologyRequirement) Reset() { *m = TopologyRequirement{} } +func (m *TopologyRequirement) String() string { return proto.CompactTextString(m) } +func (*TopologyRequirement) ProtoMessage() {} +func (*TopologyRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{13} +} +func (m *TopologyRequirement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TopologyRequirement.Unmarshal(m, b) +} +func (m *TopologyRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TopologyRequirement.Marshal(b, m, deterministic) +} +func (dst *TopologyRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologyRequirement.Merge(dst, src) +} +func (m *TopologyRequirement) XXX_Size() int { + return xxx_messageInfo_TopologyRequirement.Size(m) +} +func (m *TopologyRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_TopologyRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologyRequirement proto.InternalMessageInfo + +func (m *TopologyRequirement) GetRequisite() []*Topology { + if m != nil { + return m.Requisite + } + return nil +} + +func (m *TopologyRequirement) GetPreferred() []*Topology { + if m != nil { + return m.Preferred + } + return nil +} + +// Topology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Topology) Reset() { *m = Topology{} } +func (m *Topology) String() string { return proto.CompactTextString(m) } +func (*Topology) ProtoMessage() {} +func (*Topology) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{14} +} +func (m *Topology) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Topology.Unmarshal(m, b) +} +func (m *Topology) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Topology.Marshal(b, m, deterministic) +} +func (dst *Topology) XXX_Merge(src proto.Message) { + xxx_messageInfo_Topology.Merge(dst, src) +} +func (m *Topology) XXX_Size() int { + return xxx_messageInfo_Topology.Size(m) +} +func (m *Topology) XXX_DiscardUnknown() { + xxx_messageInfo_Topology.DiscardUnknown(m) +} + +var xxx_messageInfo_Topology proto.InternalMessageInfo + +func (m *Topology) GetSegments() map[string]string { + if m != nil { + return m.Segments + } + return nil +} + +type DeleteVolumeRequest struct { + // The ID of the volume to be deprovisioned. + // This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // Secrets required by plugin to complete volume deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteVolumeRequest) Reset() { *m = DeleteVolumeRequest{} } +func (m *DeleteVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeRequest) ProtoMessage() {} +func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{15} +} +func (m *DeleteVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteVolumeRequest.Unmarshal(m, b) +} +func (m *DeleteVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteVolumeRequest.Merge(dst, src) +} +func (m *DeleteVolumeRequest) XXX_Size() int { + return xxx_messageInfo_DeleteVolumeRequest.Size(m) +} +func (m *DeleteVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteVolumeRequest proto.InternalMessageInfo + +func (m *DeleteVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *DeleteVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type DeleteVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteVolumeResponse) Reset() { *m = DeleteVolumeResponse{} } +func (m *DeleteVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeResponse) ProtoMessage() {} +func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{16} +} +func (m *DeleteVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteVolumeResponse.Unmarshal(m, b) +} +func (m *DeleteVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *DeleteVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteVolumeResponse.Merge(dst, src) +} +func (m *DeleteVolumeResponse) XXX_Size() int { + return xxx_messageInfo_DeleteVolumeResponse.Size(m) +} +func (m *DeleteVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteVolumeResponse proto.InternalMessageInfo + +type ControllerPublishVolumeRequest struct { + // The ID of the volume to be used on a node. + // This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The ID of the node. This field is REQUIRED. The CO SHALL set this + // field to match the node ID returned by `NodeGetInfo`. + NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,3,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. + Readonly bool `protobuf:"varint,4,opt,name=readonly,proto3" json:"readonly,omitempty"` + // Secrets required by plugin to complete controller publish volume + // request. This field is OPTIONAL. Refer to the + // `Secrets Requirements` section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + VolumeContext map[string]string `protobuf:"bytes,6,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerPublishVolumeRequest) Reset() { *m = ControllerPublishVolumeRequest{} } +func (m *ControllerPublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeRequest) ProtoMessage() {} +func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{17} +} +func (m *ControllerPublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerPublishVolumeRequest.Unmarshal(m, b) +} +func (m *ControllerPublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerPublishVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *ControllerPublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerPublishVolumeRequest.Merge(dst, src) +} +func (m *ControllerPublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_ControllerPublishVolumeRequest.Size(m) +} +func (m *ControllerPublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerPublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerPublishVolumeRequest proto.InternalMessageInfo + +func (m *ControllerPublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerPublishVolumeRequest) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *ControllerPublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *ControllerPublishVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +type ControllerPublishVolumeResponse struct { + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + PublishContext map[string]string `protobuf:"bytes,1,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerPublishVolumeResponse) Reset() { *m = ControllerPublishVolumeResponse{} } +func (m *ControllerPublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeResponse) ProtoMessage() {} +func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{18} +} +func (m *ControllerPublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerPublishVolumeResponse.Unmarshal(m, b) +} +func (m *ControllerPublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerPublishVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *ControllerPublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerPublishVolumeResponse.Merge(dst, src) +} +func (m *ControllerPublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_ControllerPublishVolumeResponse.Size(m) +} +func (m *ControllerPublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerPublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerPublishVolumeResponse proto.InternalMessageInfo + +func (m *ControllerPublishVolumeResponse) GetPublishContext() map[string]string { + if m != nil { + return m.PublishContext + } + return nil +} + +type ControllerUnpublishVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The ID of the node. This field is OPTIONAL. The CO SHOULD set this + // field to match the node ID returned by `NodeGetInfo` or leave it + // unset. If the value is set, the SP MUST unpublish the volume from + // the specified node. If the value is unset, the SP MUST unpublish + // the volume from all nodes it is published to. + NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Secrets required by plugin to complete controller unpublish volume + // request. This SHOULD be the same secrets passed to the + // ControllerPublishVolume call for the specified volume. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerUnpublishVolumeRequest) Reset() { *m = ControllerUnpublishVolumeRequest{} } +func (m *ControllerUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeRequest) ProtoMessage() {} +func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{19} +} +func (m *ControllerUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerUnpublishVolumeRequest.Unmarshal(m, b) +} +func (m *ControllerUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerUnpublishVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *ControllerUnpublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerUnpublishVolumeRequest.Merge(dst, src) +} +func (m *ControllerUnpublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_ControllerUnpublishVolumeRequest.Size(m) +} +func (m *ControllerUnpublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerUnpublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerUnpublishVolumeRequest proto.InternalMessageInfo + +func (m *ControllerUnpublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerUnpublishVolumeRequest) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *ControllerUnpublishVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type ControllerUnpublishVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerUnpublishVolumeResponse) Reset() { *m = ControllerUnpublishVolumeResponse{} } +func (m *ControllerUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeResponse) ProtoMessage() {} +func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{20} +} +func (m *ControllerUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerUnpublishVolumeResponse.Unmarshal(m, b) +} +func (m *ControllerUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerUnpublishVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *ControllerUnpublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerUnpublishVolumeResponse.Merge(dst, src) +} +func (m *ControllerUnpublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_ControllerUnpublishVolumeResponse.Size(m) +} +func (m *ControllerUnpublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerUnpublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerUnpublishVolumeResponse proto.InternalMessageInfo + +type ValidateVolumeCapabilitiesRequest struct { + // The ID of the volume to check. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + VolumeContext map[string]string `protobuf:"bytes,2,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "confirmed" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"` + // See CreateVolumeRequest.parameters. + // This field is OPTIONAL. + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Secrets required by plugin to complete volume validation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateVolumeCapabilitiesRequest) Reset() { *m = ValidateVolumeCapabilitiesRequest{} } +func (m *ValidateVolumeCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesRequest) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{21} +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Unmarshal(m, b) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (dst *ValidateVolumeCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Merge(dst, src) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Size(m) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateVolumeCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateVolumeCapabilitiesRequest proto.InternalMessageInfo + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type ValidateVolumeCapabilitiesResponse struct { + // Confirmed indicates to the CO the set of capabilities that the + // plugin has validated. This field SHALL only be set to a non-empty + // value for successful validation responses. + // For successful validation responses, the CO SHALL compare the + // fields of this message to the originally requested capabilities in + // order to guard against an older plugin reporting "valid" for newer + // capability fields that it does not yet understand. + // This field is OPTIONAL. + Confirmed *ValidateVolumeCapabilitiesResponse_Confirmed `protobuf:"bytes,1,opt,name=confirmed,proto3" json:"confirmed,omitempty"` + // Message to the CO if `confirmed` above is empty. This field is + // OPTIONAL. + // An empty string is equal to an unspecified field value. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateVolumeCapabilitiesResponse) Reset() { *m = ValidateVolumeCapabilitiesResponse{} } +func (m *ValidateVolumeCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesResponse) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{22} +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Unmarshal(m, b) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (dst *ValidateVolumeCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Merge(dst, src) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Size(m) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateVolumeCapabilitiesResponse proto.InternalMessageInfo + +func (m *ValidateVolumeCapabilitiesResponse) GetConfirmed() *ValidateVolumeCapabilitiesResponse_Confirmed { + if m != nil { + return m.Confirmed + } + return nil +} + +func (m *ValidateVolumeCapabilitiesResponse) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +type ValidateVolumeCapabilitiesResponse_Confirmed struct { + // Volume context validated by the plugin. + // This field is OPTIONAL. + VolumeContext map[string]string `protobuf:"bytes,1,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,2,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"` + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) Reset() { + *m = ValidateVolumeCapabilitiesResponse_Confirmed{} +} +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) String() string { + return proto.CompactTextString(m) +} +func (*ValidateVolumeCapabilitiesResponse_Confirmed) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesResponse_Confirmed) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{22, 0} +} +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Unmarshal(m, b) +} +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Marshal(b, m, deterministic) +} +func (dst *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Merge(dst, src) +} +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Size() int { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Size(m) +} +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed proto.InternalMessageInfo + +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +type ListVolumesRequest struct { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListVolumes` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"` + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken,proto3" json:"starting_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesRequest) Reset() { *m = ListVolumesRequest{} } +func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) } +func (*ListVolumesRequest) ProtoMessage() {} +func (*ListVolumesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{23} +} +func (m *ListVolumesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesRequest.Unmarshal(m, b) +} +func (m *ListVolumesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesRequest.Marshal(b, m, deterministic) +} +func (dst *ListVolumesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesRequest.Merge(dst, src) +} +func (m *ListVolumesRequest) XXX_Size() int { + return xxx_messageInfo_ListVolumesRequest.Size(m) +} +func (m *ListVolumesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesRequest proto.InternalMessageInfo + +func (m *ListVolumesRequest) GetMaxEntries() int32 { + if m != nil { + return m.MaxEntries + } + return 0 +} + +func (m *ListVolumesRequest) GetStartingToken() string { + if m != nil { + return m.StartingToken + } + return "" +} + +type ListVolumesResponse struct { + Entries []*ListVolumesResponse_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesResponse) Reset() { *m = ListVolumesResponse{} } +func (m *ListVolumesResponse) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse) ProtoMessage() {} +func (*ListVolumesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{24} +} +func (m *ListVolumesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesResponse.Unmarshal(m, b) +} +func (m *ListVolumesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesResponse.Marshal(b, m, deterministic) +} +func (dst *ListVolumesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesResponse.Merge(dst, src) +} +func (m *ListVolumesResponse) XXX_Size() int { + return xxx_messageInfo_ListVolumesResponse.Size(m) +} +func (m *ListVolumesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesResponse proto.InternalMessageInfo + +func (m *ListVolumesResponse) GetEntries() []*ListVolumesResponse_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *ListVolumesResponse) GetNextToken() string { + if m != nil { + return m.NextToken + } + return "" +} + +type ListVolumesResponse_Entry struct { + Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesResponse_Entry) Reset() { *m = ListVolumesResponse_Entry{} } +func (m *ListVolumesResponse_Entry) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse_Entry) ProtoMessage() {} +func (*ListVolumesResponse_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{24, 0} +} +func (m *ListVolumesResponse_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesResponse_Entry.Unmarshal(m, b) +} +func (m *ListVolumesResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesResponse_Entry.Marshal(b, m, deterministic) +} +func (dst *ListVolumesResponse_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesResponse_Entry.Merge(dst, src) +} +func (m *ListVolumesResponse_Entry) XXX_Size() int { + return xxx_messageInfo_ListVolumesResponse_Entry.Size(m) +} +func (m *ListVolumesResponse_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesResponse_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesResponse_Entry proto.InternalMessageInfo + +func (m *ListVolumesResponse_Entry) GetVolume() *Volume { + if m != nil { + return m.Volume + } + return nil +} + +type GetCapacityRequest struct { + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,1,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"` + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that in the specified + // `accessible_topology`. This is the same as the + // `accessible_topology` the CO returns in a `CreateVolumeResponse`. + // This field is OPTIONAL. This field SHALL NOT be set unless the + // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. + AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCapacityRequest) Reset() { *m = GetCapacityRequest{} } +func (m *GetCapacityRequest) String() string { return proto.CompactTextString(m) } +func (*GetCapacityRequest) ProtoMessage() {} +func (*GetCapacityRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{25} +} +func (m *GetCapacityRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetCapacityRequest.Unmarshal(m, b) +} +func (m *GetCapacityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetCapacityRequest.Marshal(b, m, deterministic) +} +func (dst *GetCapacityRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCapacityRequest.Merge(dst, src) +} +func (m *GetCapacityRequest) XXX_Size() int { + return xxx_messageInfo_GetCapacityRequest.Size(m) +} +func (m *GetCapacityRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetCapacityRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCapacityRequest proto.InternalMessageInfo + +func (m *GetCapacityRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *GetCapacityRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *GetCapacityRequest) GetAccessibleTopology() *Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +type GetCapacityResponse struct { + // The available capacity, in bytes, of the storage that can be used + // to provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + // The value of this field MUST NOT be negative. + AvailableCapacity int64 `protobuf:"varint,1,opt,name=available_capacity,json=availableCapacity,proto3" json:"available_capacity,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCapacityResponse) Reset() { *m = GetCapacityResponse{} } +func (m *GetCapacityResponse) String() string { return proto.CompactTextString(m) } +func (*GetCapacityResponse) ProtoMessage() {} +func (*GetCapacityResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{26} +} +func (m *GetCapacityResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetCapacityResponse.Unmarshal(m, b) +} +func (m *GetCapacityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetCapacityResponse.Marshal(b, m, deterministic) +} +func (dst *GetCapacityResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCapacityResponse.Merge(dst, src) +} +func (m *GetCapacityResponse) XXX_Size() int { + return xxx_messageInfo_GetCapacityResponse.Size(m) +} +func (m *GetCapacityResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetCapacityResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCapacityResponse proto.InternalMessageInfo + +func (m *GetCapacityResponse) GetAvailableCapacity() int64 { + if m != nil { + return m.AvailableCapacity + } + return 0 +} + +type ControllerGetCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerGetCapabilitiesRequest) Reset() { *m = ControllerGetCapabilitiesRequest{} } +func (m *ControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesRequest) ProtoMessage() {} +func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{27} +} +func (m *ControllerGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerGetCapabilitiesRequest.Unmarshal(m, b) +} +func (m *ControllerGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerGetCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (dst *ControllerGetCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerGetCapabilitiesRequest.Merge(dst, src) +} +func (m *ControllerGetCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_ControllerGetCapabilitiesRequest.Size(m) +} +func (m *ControllerGetCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerGetCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerGetCapabilitiesRequest proto.InternalMessageInfo + +type ControllerGetCapabilitiesResponse struct { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + Capabilities []*ControllerServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerGetCapabilitiesResponse) Reset() { *m = ControllerGetCapabilitiesResponse{} } +func (m *ControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesResponse) ProtoMessage() {} +func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{28} +} +func (m *ControllerGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerGetCapabilitiesResponse.Unmarshal(m, b) +} +func (m *ControllerGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerGetCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (dst *ControllerGetCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerGetCapabilitiesResponse.Merge(dst, src) +} +func (m *ControllerGetCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_ControllerGetCapabilitiesResponse.Size(m) +} +func (m *ControllerGetCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerGetCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerGetCapabilitiesResponse proto.InternalMessageInfo + +func (m *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the controller service. +type ControllerServiceCapability struct { + // Types that are valid to be assigned to Type: + // *ControllerServiceCapability_Rpc + Type isControllerServiceCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerServiceCapability) Reset() { *m = ControllerServiceCapability{} } +func (m *ControllerServiceCapability) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability) ProtoMessage() {} +func (*ControllerServiceCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{29} +} +func (m *ControllerServiceCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerServiceCapability.Unmarshal(m, b) +} +func (m *ControllerServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerServiceCapability.Marshal(b, m, deterministic) +} +func (dst *ControllerServiceCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerServiceCapability.Merge(dst, src) +} +func (m *ControllerServiceCapability) XXX_Size() int { + return xxx_messageInfo_ControllerServiceCapability.Size(m) +} +func (m *ControllerServiceCapability) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerServiceCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerServiceCapability proto.InternalMessageInfo + +type isControllerServiceCapability_Type interface { + isControllerServiceCapability_Type() +} + +type ControllerServiceCapability_Rpc struct { + Rpc *ControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"` +} + +func (*ControllerServiceCapability_Rpc) isControllerServiceCapability_Type() {} + +func (m *ControllerServiceCapability) GetType() isControllerServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC { + if x, ok := m.GetType().(*ControllerServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ControllerServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ControllerServiceCapability_OneofMarshaler, _ControllerServiceCapability_OneofUnmarshaler, _ControllerServiceCapability_OneofSizer, []interface{}{ + (*ControllerServiceCapability_Rpc)(nil), + } +} + +func _ControllerServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ControllerServiceCapability) + // type + switch x := m.Type.(type) { + case *ControllerServiceCapability_Rpc: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Rpc); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ControllerServiceCapability.Type has unexpected type %T", x) + } + return nil +} + +func _ControllerServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ControllerServiceCapability) + switch tag { + case 1: // type.rpc + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ControllerServiceCapability_RPC) + err := b.DecodeMessage(msg) + m.Type = &ControllerServiceCapability_Rpc{msg} + return true, err + default: + return false, nil + } +} + +func _ControllerServiceCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ControllerServiceCapability) + // type + switch x := m.Type.(type) { + case *ControllerServiceCapability_Rpc: + s := proto.Size(x.Rpc) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ControllerServiceCapability_RPC struct { + Type ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.ControllerServiceCapability_RPC_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerServiceCapability_RPC) Reset() { *m = ControllerServiceCapability_RPC{} } +func (m *ControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability_RPC) ProtoMessage() {} +func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{29, 0} +} +func (m *ControllerServiceCapability_RPC) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerServiceCapability_RPC.Unmarshal(m, b) +} +func (m *ControllerServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerServiceCapability_RPC.Marshal(b, m, deterministic) +} +func (dst *ControllerServiceCapability_RPC) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerServiceCapability_RPC.Merge(dst, src) +} +func (m *ControllerServiceCapability_RPC) XXX_Size() int { + return xxx_messageInfo_ControllerServiceCapability_RPC.Size(m) +} +func (m *ControllerServiceCapability_RPC) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerServiceCapability_RPC.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerServiceCapability_RPC proto.InternalMessageInfo + +func (m *ControllerServiceCapability_RPC) GetType() ControllerServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return ControllerServiceCapability_RPC_UNKNOWN +} + +type CreateSnapshotRequest struct { + // The ID of the source volume to be snapshotted. + // This field is REQUIRED. + SourceVolumeId string `protobuf:"bytes,1,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"` + // The suggested name for the snapshot. This field is REQUIRED for + // idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Secrets required by plugin to complete snapshot creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + // Use cases for opaque parameters: + // - Specify a policy to automatically clean up the snapshot. + // - Specify an expiration date for the snapshot. + // - Specify whether the snapshot is readonly or read/write. + // - Specify if the snapshot should be replicated to some place. + // - Specify primary or secondary for replication systems that + // support snapshotting only on primary. + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSnapshotRequest) Reset() { *m = CreateSnapshotRequest{} } +func (m *CreateSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSnapshotRequest) ProtoMessage() {} +func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{30} +} +func (m *CreateSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSnapshotRequest.Unmarshal(m, b) +} +func (m *CreateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSnapshotRequest.Marshal(b, m, deterministic) +} +func (dst *CreateSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSnapshotRequest.Merge(dst, src) +} +func (m *CreateSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_CreateSnapshotRequest.Size(m) +} +func (m *CreateSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSnapshotRequest proto.InternalMessageInfo + +func (m *CreateSnapshotRequest) GetSourceVolumeId() string { + if m != nil { + return m.SourceVolumeId + } + return "" +} + +func (m *CreateSnapshotRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateSnapshotRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *CreateSnapshotRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +type CreateSnapshotResponse struct { + // Contains all attributes of the newly created snapshot that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the snapshot. This field is REQUIRED. + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSnapshotResponse) Reset() { *m = CreateSnapshotResponse{} } +func (m *CreateSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*CreateSnapshotResponse) ProtoMessage() {} +func (*CreateSnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{31} +} +func (m *CreateSnapshotResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSnapshotResponse.Unmarshal(m, b) +} +func (m *CreateSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSnapshotResponse.Marshal(b, m, deterministic) +} +func (dst *CreateSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSnapshotResponse.Merge(dst, src) +} +func (m *CreateSnapshotResponse) XXX_Size() int { + return xxx_messageInfo_CreateSnapshotResponse.Size(m) +} +func (m *CreateSnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSnapshotResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSnapshotResponse proto.InternalMessageInfo + +func (m *CreateSnapshotResponse) GetSnapshot() *Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +// Information about a specific snapshot. +type Snapshot struct { + // This is the complete size of the snapshot in bytes. The purpose of + // this field is to give CO guidance on how much space is needed to + // create a volume from this snapshot. The size of the volume MUST NOT + // be less than the size of the source snapshot. This field is + // OPTIONAL. If this field is not set, it indicates that this size is + // unknown. The value of this field MUST NOT be negative and a size of + // zero means it is unspecified. + SizeBytes int64 `protobuf:"varint,1,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` + // The identifier for this snapshot, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other snapshots supported by this + // plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this snapshot. + // The SP is NOT responsible for global uniqueness of snapshot_id + // across multiple SPs. + SnapshotId string `protobuf:"bytes,2,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + // Identity information for the source volume. Note that creating a + // snapshot from a snapshot is not supported here so the source has to + // be a volume. This field is REQUIRED. + SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"` + // Timestamp when the point-in-time snapshot is taken on the storage + // system. This field is REQUIRED. + CreationTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + // Indicates if a snapshot is ready to use as a + // `volume_content_source` in a `CreateVolumeRequest`. The default + // value is false. This field is REQUIRED. + ReadyToUse bool `protobuf:"varint,5,opt,name=ready_to_use,json=readyToUse,proto3" json:"ready_to_use,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{32} +} +func (m *Snapshot) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Snapshot.Unmarshal(m, b) +} +func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) +} +func (dst *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(dst, src) +} +func (m *Snapshot) XXX_Size() int { + return xxx_messageInfo_Snapshot.Size(m) +} +func (m *Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_Snapshot proto.InternalMessageInfo + +func (m *Snapshot) GetSizeBytes() int64 { + if m != nil { + return m.SizeBytes + } + return 0 +} + +func (m *Snapshot) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +func (m *Snapshot) GetSourceVolumeId() string { + if m != nil { + return m.SourceVolumeId + } + return "" +} + +func (m *Snapshot) GetCreationTime() *timestamp.Timestamp { + if m != nil { + return m.CreationTime + } + return nil +} + +func (m *Snapshot) GetReadyToUse() bool { + if m != nil { + return m.ReadyToUse + } + return false +} + +type DeleteSnapshotRequest struct { + // The ID of the snapshot to be deleted. + // This field is REQUIRED. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + // Secrets required by plugin to complete snapshot deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSnapshotRequest) Reset() { *m = DeleteSnapshotRequest{} } +func (m *DeleteSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSnapshotRequest) ProtoMessage() {} +func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{33} +} +func (m *DeleteSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteSnapshotRequest.Unmarshal(m, b) +} +func (m *DeleteSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteSnapshotRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSnapshotRequest.Merge(dst, src) +} +func (m *DeleteSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_DeleteSnapshotRequest.Size(m) +} +func (m *DeleteSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSnapshotRequest proto.InternalMessageInfo + +func (m *DeleteSnapshotRequest) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +func (m *DeleteSnapshotRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type DeleteSnapshotResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSnapshotResponse) Reset() { *m = DeleteSnapshotResponse{} } +func (m *DeleteSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteSnapshotResponse) ProtoMessage() {} +func (*DeleteSnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{34} +} +func (m *DeleteSnapshotResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteSnapshotResponse.Unmarshal(m, b) +} +func (m *DeleteSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteSnapshotResponse.Marshal(b, m, deterministic) +} +func (dst *DeleteSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSnapshotResponse.Merge(dst, src) +} +func (m *DeleteSnapshotResponse) XXX_Size() int { + return xxx_messageInfo_DeleteSnapshotResponse.Size(m) +} +func (m *DeleteSnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSnapshotResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSnapshotResponse proto.InternalMessageInfo + +// List all snapshots on the storage system regardless of how they were +// created. +type ListSnapshotsRequest struct { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"` + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListSnapshots` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken,proto3" json:"starting_token,omitempty"` + // Identity information for the source volume. This field is OPTIONAL. + // It can be used to list snapshots by volume. + SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"` + // Identity information for a specific snapshot. This field is + // OPTIONAL. It can be used to list only a specific snapshot. + // ListSnapshots will return with current snapshot information + // and will not block if the snapshot is being processed after + // it is cut. + SnapshotId string `protobuf:"bytes,4,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsRequest) Reset() { *m = ListSnapshotsRequest{} } +func (m *ListSnapshotsRequest) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsRequest) ProtoMessage() {} +func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{35} +} +func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsRequest.Unmarshal(m, b) +} +func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic) +} +func (dst *ListSnapshotsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsRequest.Merge(dst, src) +} +func (m *ListSnapshotsRequest) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsRequest.Size(m) +} +func (m *ListSnapshotsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsRequest proto.InternalMessageInfo + +func (m *ListSnapshotsRequest) GetMaxEntries() int32 { + if m != nil { + return m.MaxEntries + } + return 0 +} + +func (m *ListSnapshotsRequest) GetStartingToken() string { + if m != nil { + return m.StartingToken + } + return "" +} + +func (m *ListSnapshotsRequest) GetSourceVolumeId() string { + if m != nil { + return m.SourceVolumeId + } + return "" +} + +func (m *ListSnapshotsRequest) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +type ListSnapshotsResponse struct { + Entries []*ListSnapshotsResponse_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + // This token allows you to get the next page of entries for + // `ListSnapshots` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListSnapshots` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsResponse) Reset() { *m = ListSnapshotsResponse{} } +func (m *ListSnapshotsResponse) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsResponse) ProtoMessage() {} +func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{36} +} +func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsResponse.Unmarshal(m, b) +} +func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic) +} +func (dst *ListSnapshotsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsResponse.Merge(dst, src) +} +func (m *ListSnapshotsResponse) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsResponse.Size(m) +} +func (m *ListSnapshotsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsResponse proto.InternalMessageInfo + +func (m *ListSnapshotsResponse) GetEntries() []*ListSnapshotsResponse_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *ListSnapshotsResponse) GetNextToken() string { + if m != nil { + return m.NextToken + } + return "" +} + +type ListSnapshotsResponse_Entry struct { + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsResponse_Entry) Reset() { *m = ListSnapshotsResponse_Entry{} } +func (m *ListSnapshotsResponse_Entry) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsResponse_Entry) ProtoMessage() {} +func (*ListSnapshotsResponse_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{36, 0} +} +func (m *ListSnapshotsResponse_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsResponse_Entry.Unmarshal(m, b) +} +func (m *ListSnapshotsResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsResponse_Entry.Marshal(b, m, deterministic) +} +func (dst *ListSnapshotsResponse_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsResponse_Entry.Merge(dst, src) +} +func (m *ListSnapshotsResponse_Entry) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsResponse_Entry.Size(m) +} +func (m *ListSnapshotsResponse_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsResponse_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsResponse_Entry proto.InternalMessageInfo + +func (m *ListSnapshotsResponse_Entry) GetSnapshot() *Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type NodeStageVolumeRequest struct { + // The ID of the volume to publish. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + PublishContext map[string]string `protobuf:"bytes,2,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The path to which the volume MAY be staged. It MUST be an + // absolute path in the root filesystem of the process serving this + // request, and MUST be a directory. The CO SHALL ensure that there + // is only one `staging_target_path` per volume. The CO SHALL ensure + // that the path is directory and that the process serving the + // request has `read` and `write` permission to that directory. The + // CO SHALL be responsible for creating the directory if it does not + // exist. + // This is a REQUIRED field. + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the staged volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + // Secrets required by plugin to complete node stage volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + VolumeContext map[string]string `protobuf:"bytes,6,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeStageVolumeRequest) Reset() { *m = NodeStageVolumeRequest{} } +func (m *NodeStageVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeStageVolumeRequest) ProtoMessage() {} +func (*NodeStageVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{37} +} +func (m *NodeStageVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeStageVolumeRequest.Unmarshal(m, b) +} +func (m *NodeStageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeStageVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *NodeStageVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStageVolumeRequest.Merge(dst, src) +} +func (m *NodeStageVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeStageVolumeRequest.Size(m) +} +func (m *NodeStageVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStageVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeStageVolumeRequest proto.InternalMessageInfo + +func (m *NodeStageVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeStageVolumeRequest) GetPublishContext() map[string]string { + if m != nil { + return m.PublishContext + } + return nil +} + +func (m *NodeStageVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +func (m *NodeStageVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *NodeStageVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *NodeStageVolumeRequest) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +type NodeStageVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeStageVolumeResponse) Reset() { *m = NodeStageVolumeResponse{} } +func (m *NodeStageVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeStageVolumeResponse) ProtoMessage() {} +func (*NodeStageVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{38} +} +func (m *NodeStageVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeStageVolumeResponse.Unmarshal(m, b) +} +func (m *NodeStageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeStageVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *NodeStageVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStageVolumeResponse.Merge(dst, src) +} +func (m *NodeStageVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeStageVolumeResponse.Size(m) +} +func (m *NodeStageVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStageVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeStageVolumeResponse proto.InternalMessageInfo + +type NodeUnstageVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The path at which the volume was staged. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + StagingTargetPath string `protobuf:"bytes,2,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnstageVolumeRequest) Reset() { *m = NodeUnstageVolumeRequest{} } +func (m *NodeUnstageVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeUnstageVolumeRequest) ProtoMessage() {} +func (*NodeUnstageVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{39} +} +func (m *NodeUnstageVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnstageVolumeRequest.Unmarshal(m, b) +} +func (m *NodeUnstageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnstageVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *NodeUnstageVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnstageVolumeRequest.Merge(dst, src) +} +func (m *NodeUnstageVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeUnstageVolumeRequest.Size(m) +} +func (m *NodeUnstageVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnstageVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnstageVolumeRequest proto.InternalMessageInfo + +func (m *NodeUnstageVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeUnstageVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +type NodeUnstageVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnstageVolumeResponse) Reset() { *m = NodeUnstageVolumeResponse{} } +func (m *NodeUnstageVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeUnstageVolumeResponse) ProtoMessage() {} +func (*NodeUnstageVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{40} +} +func (m *NodeUnstageVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnstageVolumeResponse.Unmarshal(m, b) +} +func (m *NodeUnstageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnstageVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *NodeUnstageVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnstageVolumeResponse.Merge(dst, src) +} +func (m *NodeUnstageVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeUnstageVolumeResponse.Size(m) +} +func (m *NodeUnstageVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnstageVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnstageVolumeResponse proto.InternalMessageInfo + +type NodePublishVolumeRequest struct { + // The ID of the volume to publish. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + PublishContext map[string]string `protobuf:"bytes,2,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The path to which the volume was staged by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + // This is an OPTIONAL field. + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the parent directory of this path exists + // and that the process serving the request has `read` and `write` + // permissions to that parent directory. + // For volumes with an access type of block, the SP SHALL place the + // block device at target_path. + // For volumes with an access type of mount, the SP SHALL place the + // mounted directory at target_path. + // Creation of target_path is the responsibility of the SP. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,4,opt,name=target_path,json=targetPath,proto3" json:"target_path,omitempty"` + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + // Indicates SP MUST publish the volume in readonly mode. + // This field is REQUIRED. + Readonly bool `protobuf:"varint,6,opt,name=readonly,proto3" json:"readonly,omitempty"` + // Secrets required by plugin to complete node publish volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,7,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + VolumeContext map[string]string `protobuf:"bytes,8,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodePublishVolumeRequest) Reset() { *m = NodePublishVolumeRequest{} } +func (m *NodePublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeRequest) ProtoMessage() {} +func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{41} +} +func (m *NodePublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodePublishVolumeRequest.Unmarshal(m, b) +} +func (m *NodePublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodePublishVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *NodePublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePublishVolumeRequest.Merge(dst, src) +} +func (m *NodePublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodePublishVolumeRequest.Size(m) +} +func (m *NodePublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodePublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodePublishVolumeRequest proto.InternalMessageInfo + +func (m *NodePublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodePublishVolumeRequest) GetPublishContext() map[string]string { + if m != nil { + return m.PublishContext + } + return nil +} + +func (m *NodePublishVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +func (m *NodePublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +func (m *NodePublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *NodePublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *NodePublishVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *NodePublishVolumeRequest) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +type NodePublishVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodePublishVolumeResponse) Reset() { *m = NodePublishVolumeResponse{} } +func (m *NodePublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeResponse) ProtoMessage() {} +func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{42} +} +func (m *NodePublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodePublishVolumeResponse.Unmarshal(m, b) +} +func (m *NodePublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodePublishVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *NodePublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePublishVolumeResponse.Merge(dst, src) +} +func (m *NodePublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodePublishVolumeResponse.Size(m) +} +func (m *NodePublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodePublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodePublishVolumeResponse proto.InternalMessageInfo + +type NodeUnpublishVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // The SP MUST delete the file or directory it created at this path. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,2,opt,name=target_path,json=targetPath,proto3" json:"target_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnpublishVolumeRequest) Reset() { *m = NodeUnpublishVolumeRequest{} } +func (m *NodeUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeRequest) ProtoMessage() {} +func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{43} +} +func (m *NodeUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnpublishVolumeRequest.Unmarshal(m, b) +} +func (m *NodeUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnpublishVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *NodeUnpublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnpublishVolumeRequest.Merge(dst, src) +} +func (m *NodeUnpublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeUnpublishVolumeRequest.Size(m) +} +func (m *NodeUnpublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnpublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnpublishVolumeRequest proto.InternalMessageInfo + +func (m *NodeUnpublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeUnpublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +type NodeUnpublishVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnpublishVolumeResponse) Reset() { *m = NodeUnpublishVolumeResponse{} } +func (m *NodeUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeResponse) ProtoMessage() {} +func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{44} +} +func (m *NodeUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnpublishVolumeResponse.Unmarshal(m, b) +} +func (m *NodeUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnpublishVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *NodeUnpublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnpublishVolumeResponse.Merge(dst, src) +} +func (m *NodeUnpublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeUnpublishVolumeResponse.Size(m) +} +func (m *NodeUnpublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnpublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnpublishVolumeResponse proto.InternalMessageInfo + +type NodeGetVolumeStatsRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // It can be any valid path where volume was previously + // staged or published. + // It MUST be an absolute path in the root filesystem of + // the process serving this request. + // This is a REQUIRED field. + VolumePath string `protobuf:"bytes,2,opt,name=volume_path,json=volumePath,proto3" json:"volume_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetVolumeStatsRequest) Reset() { *m = NodeGetVolumeStatsRequest{} } +func (m *NodeGetVolumeStatsRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetVolumeStatsRequest) ProtoMessage() {} +func (*NodeGetVolumeStatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{45} +} +func (m *NodeGetVolumeStatsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetVolumeStatsRequest.Unmarshal(m, b) +} +func (m *NodeGetVolumeStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetVolumeStatsRequest.Marshal(b, m, deterministic) +} +func (dst *NodeGetVolumeStatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetVolumeStatsRequest.Merge(dst, src) +} +func (m *NodeGetVolumeStatsRequest) XXX_Size() int { + return xxx_messageInfo_NodeGetVolumeStatsRequest.Size(m) +} +func (m *NodeGetVolumeStatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetVolumeStatsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetVolumeStatsRequest proto.InternalMessageInfo + +func (m *NodeGetVolumeStatsRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeGetVolumeStatsRequest) GetVolumePath() string { + if m != nil { + return m.VolumePath + } + return "" +} + +type NodeGetVolumeStatsResponse struct { + // This field is OPTIONAL. + Usage []*VolumeUsage `protobuf:"bytes,1,rep,name=usage,proto3" json:"usage,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetVolumeStatsResponse) Reset() { *m = NodeGetVolumeStatsResponse{} } +func (m *NodeGetVolumeStatsResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetVolumeStatsResponse) ProtoMessage() {} +func (*NodeGetVolumeStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{46} +} +func (m *NodeGetVolumeStatsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetVolumeStatsResponse.Unmarshal(m, b) +} +func (m *NodeGetVolumeStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetVolumeStatsResponse.Marshal(b, m, deterministic) +} +func (dst *NodeGetVolumeStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetVolumeStatsResponse.Merge(dst, src) +} +func (m *NodeGetVolumeStatsResponse) XXX_Size() int { + return xxx_messageInfo_NodeGetVolumeStatsResponse.Size(m) +} +func (m *NodeGetVolumeStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetVolumeStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetVolumeStatsResponse proto.InternalMessageInfo + +func (m *NodeGetVolumeStatsResponse) GetUsage() []*VolumeUsage { + if m != nil { + return m.Usage + } + return nil +} + +type VolumeUsage struct { + // The available capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + Available int64 `protobuf:"varint,1,opt,name=available,proto3" json:"available,omitempty"` + // The total capacity in specified Unit. This field is REQUIRED. + // The value of this field MUST NOT be negative. + Total int64 `protobuf:"varint,2,opt,name=total,proto3" json:"total,omitempty"` + // The used capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + Used int64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` + // Units by which values are measured. This field is REQUIRED. + Unit VolumeUsage_Unit `protobuf:"varint,4,opt,name=unit,proto3,enum=csi.v1.VolumeUsage_Unit" json:"unit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeUsage) Reset() { *m = VolumeUsage{} } +func (m *VolumeUsage) String() string { return proto.CompactTextString(m) } +func (*VolumeUsage) ProtoMessage() {} +func (*VolumeUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{47} +} +func (m *VolumeUsage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeUsage.Unmarshal(m, b) +} +func (m *VolumeUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeUsage.Marshal(b, m, deterministic) +} +func (dst *VolumeUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeUsage.Merge(dst, src) +} +func (m *VolumeUsage) XXX_Size() int { + return xxx_messageInfo_VolumeUsage.Size(m) +} +func (m *VolumeUsage) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeUsage proto.InternalMessageInfo + +func (m *VolumeUsage) GetAvailable() int64 { + if m != nil { + return m.Available + } + return 0 +} + +func (m *VolumeUsage) GetTotal() int64 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *VolumeUsage) GetUsed() int64 { + if m != nil { + return m.Used + } + return 0 +} + +func (m *VolumeUsage) GetUnit() VolumeUsage_Unit { + if m != nil { + return m.Unit + } + return VolumeUsage_UNKNOWN +} + +type NodeGetCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetCapabilitiesRequest) Reset() { *m = NodeGetCapabilitiesRequest{} } +func (m *NodeGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesRequest) ProtoMessage() {} +func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{48} +} +func (m *NodeGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetCapabilitiesRequest.Unmarshal(m, b) +} +func (m *NodeGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (dst *NodeGetCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetCapabilitiesRequest.Merge(dst, src) +} +func (m *NodeGetCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_NodeGetCapabilitiesRequest.Size(m) +} +func (m *NodeGetCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetCapabilitiesRequest proto.InternalMessageInfo + +type NodeGetCapabilitiesResponse struct { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + Capabilities []*NodeServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetCapabilitiesResponse) Reset() { *m = NodeGetCapabilitiesResponse{} } +func (m *NodeGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesResponse) ProtoMessage() {} +func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{49} +} +func (m *NodeGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetCapabilitiesResponse.Unmarshal(m, b) +} +func (m *NodeGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (dst *NodeGetCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetCapabilitiesResponse.Merge(dst, src) +} +func (m *NodeGetCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_NodeGetCapabilitiesResponse.Size(m) +} +func (m *NodeGetCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetCapabilitiesResponse proto.InternalMessageInfo + +func (m *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the node service. +type NodeServiceCapability struct { + // Types that are valid to be assigned to Type: + // *NodeServiceCapability_Rpc + Type isNodeServiceCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeServiceCapability) Reset() { *m = NodeServiceCapability{} } +func (m *NodeServiceCapability) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability) ProtoMessage() {} +func (*NodeServiceCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{50} +} +func (m *NodeServiceCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeServiceCapability.Unmarshal(m, b) +} +func (m *NodeServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeServiceCapability.Marshal(b, m, deterministic) +} +func (dst *NodeServiceCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeServiceCapability.Merge(dst, src) +} +func (m *NodeServiceCapability) XXX_Size() int { + return xxx_messageInfo_NodeServiceCapability.Size(m) +} +func (m *NodeServiceCapability) XXX_DiscardUnknown() { + xxx_messageInfo_NodeServiceCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeServiceCapability proto.InternalMessageInfo + +type isNodeServiceCapability_Type interface { + isNodeServiceCapability_Type() +} + +type NodeServiceCapability_Rpc struct { + Rpc *NodeServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"` +} + +func (*NodeServiceCapability_Rpc) isNodeServiceCapability_Type() {} + +func (m *NodeServiceCapability) GetType() isNodeServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC { + if x, ok := m.GetType().(*NodeServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NodeServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NodeServiceCapability_OneofMarshaler, _NodeServiceCapability_OneofUnmarshaler, _NodeServiceCapability_OneofSizer, []interface{}{ + (*NodeServiceCapability_Rpc)(nil), + } +} + +func _NodeServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NodeServiceCapability) + // type + switch x := m.Type.(type) { + case *NodeServiceCapability_Rpc: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Rpc); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NodeServiceCapability.Type has unexpected type %T", x) + } + return nil +} + +func _NodeServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NodeServiceCapability) + switch tag { + case 1: // type.rpc + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NodeServiceCapability_RPC) + err := b.DecodeMessage(msg) + m.Type = &NodeServiceCapability_Rpc{msg} + return true, err + default: + return false, nil + } +} + +func _NodeServiceCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NodeServiceCapability) + // type + switch x := m.Type.(type) { + case *NodeServiceCapability_Rpc: + s := proto.Size(x.Rpc) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type NodeServiceCapability_RPC struct { + Type NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.NodeServiceCapability_RPC_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeServiceCapability_RPC) Reset() { *m = NodeServiceCapability_RPC{} } +func (m *NodeServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability_RPC) ProtoMessage() {} +func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{50, 0} +} +func (m *NodeServiceCapability_RPC) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeServiceCapability_RPC.Unmarshal(m, b) +} +func (m *NodeServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeServiceCapability_RPC.Marshal(b, m, deterministic) +} +func (dst *NodeServiceCapability_RPC) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeServiceCapability_RPC.Merge(dst, src) +} +func (m *NodeServiceCapability_RPC) XXX_Size() int { + return xxx_messageInfo_NodeServiceCapability_RPC.Size(m) +} +func (m *NodeServiceCapability_RPC) XXX_DiscardUnknown() { + xxx_messageInfo_NodeServiceCapability_RPC.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeServiceCapability_RPC proto.InternalMessageInfo + +func (m *NodeServiceCapability_RPC) GetType() NodeServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return NodeServiceCapability_RPC_UNKNOWN +} + +type NodeGetInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetInfoRequest) Reset() { *m = NodeGetInfoRequest{} } +func (m *NodeGetInfoRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetInfoRequest) ProtoMessage() {} +func (*NodeGetInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{51} +} +func (m *NodeGetInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetInfoRequest.Unmarshal(m, b) +} +func (m *NodeGetInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetInfoRequest.Marshal(b, m, deterministic) +} +func (dst *NodeGetInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetInfoRequest.Merge(dst, src) +} +func (m *NodeGetInfoRequest) XXX_Size() int { + return xxx_messageInfo_NodeGetInfoRequest.Size(m) +} +func (m *NodeGetInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetInfoRequest proto.InternalMessageInfo + +type NodeGetInfoResponse struct { + // The identifier of the node as understood by the SP. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific node vs all other nodes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls, including + // `ControllerPublishVolume`, to refer to this node. + // The SP is NOT responsible for global uniqueness of node_id across + // multiple SPs. + NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Maximum number of volumes that controller can publish to the node. + // If value is not set or zero CO SHALL decide how many volumes of + // this type can be published by the controller to the node. The + // plugin MUST NOT set negative values here. + // This field is OPTIONAL. + MaxVolumesPerNode int64 `protobuf:"varint,2,opt,name=max_volumes_per_node,json=maxVolumesPerNode,proto3" json:"max_volumes_per_node,omitempty"` + // Specifies where (regions, zones, racks, etc.) the node is + // accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // COs MAY use this information along with the topology information + // returned in CreateVolumeResponse to ensure that a given volume is + // accessible from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the node is not subject to any topological constraint, and MAY + // schedule workloads that reference any volume V, such that there are + // no topological constraints declared for V. + // + // Example 1: + // accessible_topology = + // {"region": "R1", "zone": "R2"} + // Indicates the node exists within the "region" "R1" and the "zone" + // "Z2". + AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetInfoResponse) Reset() { *m = NodeGetInfoResponse{} } +func (m *NodeGetInfoResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetInfoResponse) ProtoMessage() {} +func (*NodeGetInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_1092d4f3f3c8dc30, []int{52} +} +func (m *NodeGetInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetInfoResponse.Unmarshal(m, b) +} +func (m *NodeGetInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetInfoResponse.Marshal(b, m, deterministic) +} +func (dst *NodeGetInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetInfoResponse.Merge(dst, src) +} +func (m *NodeGetInfoResponse) XXX_Size() int { + return xxx_messageInfo_NodeGetInfoResponse.Size(m) +} +func (m *NodeGetInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetInfoResponse proto.InternalMessageInfo + +func (m *NodeGetInfoResponse) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *NodeGetInfoResponse) GetMaxVolumesPerNode() int64 { + if m != nil { + return m.MaxVolumesPerNode + } + return 0 +} + +func (m *NodeGetInfoResponse) GetAccessibleTopology() *Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +var E_CsiSecret = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1059, + Name: "csi.v1.csi_secret", + Tag: "varint,1059,opt,name=csi_secret,json=csiSecret", + Filename: "github.com/container-storage-interface/spec/csi.proto", +} + +func init() { + proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.v1.GetPluginInfoRequest") + proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.v1.GetPluginInfoResponse") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetPluginInfoResponse.ManifestEntry") + proto.RegisterType((*GetPluginCapabilitiesRequest)(nil), "csi.v1.GetPluginCapabilitiesRequest") + proto.RegisterType((*GetPluginCapabilitiesResponse)(nil), "csi.v1.GetPluginCapabilitiesResponse") + proto.RegisterType((*PluginCapability)(nil), "csi.v1.PluginCapability") + proto.RegisterType((*PluginCapability_Service)(nil), "csi.v1.PluginCapability.Service") + proto.RegisterType((*ProbeRequest)(nil), "csi.v1.ProbeRequest") + proto.RegisterType((*ProbeResponse)(nil), "csi.v1.ProbeResponse") + proto.RegisterType((*CreateVolumeRequest)(nil), "csi.v1.CreateVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeRequest.ParametersEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeRequest.SecretsEntry") + proto.RegisterType((*VolumeContentSource)(nil), "csi.v1.VolumeContentSource") + proto.RegisterType((*VolumeContentSource_SnapshotSource)(nil), "csi.v1.VolumeContentSource.SnapshotSource") + proto.RegisterType((*VolumeContentSource_VolumeSource)(nil), "csi.v1.VolumeContentSource.VolumeSource") + proto.RegisterType((*CreateVolumeResponse)(nil), "csi.v1.CreateVolumeResponse") + proto.RegisterType((*VolumeCapability)(nil), "csi.v1.VolumeCapability") + proto.RegisterType((*VolumeCapability_BlockVolume)(nil), "csi.v1.VolumeCapability.BlockVolume") + proto.RegisterType((*VolumeCapability_MountVolume)(nil), "csi.v1.VolumeCapability.MountVolume") + proto.RegisterType((*VolumeCapability_AccessMode)(nil), "csi.v1.VolumeCapability.AccessMode") + proto.RegisterType((*CapacityRange)(nil), "csi.v1.CapacityRange") + proto.RegisterType((*Volume)(nil), "csi.v1.Volume") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.Volume.VolumeContextEntry") + proto.RegisterType((*TopologyRequirement)(nil), "csi.v1.TopologyRequirement") + proto.RegisterType((*Topology)(nil), "csi.v1.Topology") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.Topology.SegmentsEntry") + proto.RegisterType((*DeleteVolumeRequest)(nil), "csi.v1.DeleteVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteVolumeRequest.SecretsEntry") + proto.RegisterType((*DeleteVolumeResponse)(nil), "csi.v1.DeleteVolumeResponse") + proto.RegisterType((*ControllerPublishVolumeRequest)(nil), "csi.v1.ControllerPublishVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeRequest.SecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeRequest.VolumeContextEntry") + proto.RegisterType((*ControllerPublishVolumeResponse)(nil), "csi.v1.ControllerPublishVolumeResponse") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeResponse.PublishContextEntry") + proto.RegisterType((*ControllerUnpublishVolumeRequest)(nil), "csi.v1.ControllerUnpublishVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerUnpublishVolumeRequest.SecretsEntry") + proto.RegisterType((*ControllerUnpublishVolumeResponse)(nil), "csi.v1.ControllerUnpublishVolumeResponse") + proto.RegisterType((*ValidateVolumeCapabilitiesRequest)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.ParametersEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.SecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.VolumeContextEntry") + proto.RegisterType((*ValidateVolumeCapabilitiesResponse)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse") + proto.RegisterType((*ValidateVolumeCapabilitiesResponse_Confirmed)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.ParametersEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry") + proto.RegisterType((*ListVolumesRequest)(nil), "csi.v1.ListVolumesRequest") + proto.RegisterType((*ListVolumesResponse)(nil), "csi.v1.ListVolumesResponse") + proto.RegisterType((*ListVolumesResponse_Entry)(nil), "csi.v1.ListVolumesResponse.Entry") + proto.RegisterType((*GetCapacityRequest)(nil), "csi.v1.GetCapacityRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetCapacityRequest.ParametersEntry") + proto.RegisterType((*GetCapacityResponse)(nil), "csi.v1.GetCapacityResponse") + proto.RegisterType((*ControllerGetCapabilitiesRequest)(nil), "csi.v1.ControllerGetCapabilitiesRequest") + proto.RegisterType((*ControllerGetCapabilitiesResponse)(nil), "csi.v1.ControllerGetCapabilitiesResponse") + proto.RegisterType((*ControllerServiceCapability)(nil), "csi.v1.ControllerServiceCapability") + proto.RegisterType((*ControllerServiceCapability_RPC)(nil), "csi.v1.ControllerServiceCapability.RPC") + proto.RegisterType((*CreateSnapshotRequest)(nil), "csi.v1.CreateSnapshotRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateSnapshotRequest.ParametersEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateSnapshotRequest.SecretsEntry") + proto.RegisterType((*CreateSnapshotResponse)(nil), "csi.v1.CreateSnapshotResponse") + proto.RegisterType((*Snapshot)(nil), "csi.v1.Snapshot") + proto.RegisterType((*DeleteSnapshotRequest)(nil), "csi.v1.DeleteSnapshotRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteSnapshotRequest.SecretsEntry") + proto.RegisterType((*DeleteSnapshotResponse)(nil), "csi.v1.DeleteSnapshotResponse") + proto.RegisterType((*ListSnapshotsRequest)(nil), "csi.v1.ListSnapshotsRequest") + proto.RegisterType((*ListSnapshotsResponse)(nil), "csi.v1.ListSnapshotsResponse") + proto.RegisterType((*ListSnapshotsResponse_Entry)(nil), "csi.v1.ListSnapshotsResponse.Entry") + proto.RegisterType((*NodeStageVolumeRequest)(nil), "csi.v1.NodeStageVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.PublishContextEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.SecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.VolumeContextEntry") + proto.RegisterType((*NodeStageVolumeResponse)(nil), "csi.v1.NodeStageVolumeResponse") + proto.RegisterType((*NodeUnstageVolumeRequest)(nil), "csi.v1.NodeUnstageVolumeRequest") + proto.RegisterType((*NodeUnstageVolumeResponse)(nil), "csi.v1.NodeUnstageVolumeResponse") + proto.RegisterType((*NodePublishVolumeRequest)(nil), "csi.v1.NodePublishVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.PublishContextEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.SecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.VolumeContextEntry") + proto.RegisterType((*NodePublishVolumeResponse)(nil), "csi.v1.NodePublishVolumeResponse") + proto.RegisterType((*NodeUnpublishVolumeRequest)(nil), "csi.v1.NodeUnpublishVolumeRequest") + proto.RegisterType((*NodeUnpublishVolumeResponse)(nil), "csi.v1.NodeUnpublishVolumeResponse") + proto.RegisterType((*NodeGetVolumeStatsRequest)(nil), "csi.v1.NodeGetVolumeStatsRequest") + proto.RegisterType((*NodeGetVolumeStatsResponse)(nil), "csi.v1.NodeGetVolumeStatsResponse") + proto.RegisterType((*VolumeUsage)(nil), "csi.v1.VolumeUsage") + proto.RegisterType((*NodeGetCapabilitiesRequest)(nil), "csi.v1.NodeGetCapabilitiesRequest") + proto.RegisterType((*NodeGetCapabilitiesResponse)(nil), "csi.v1.NodeGetCapabilitiesResponse") + proto.RegisterType((*NodeServiceCapability)(nil), "csi.v1.NodeServiceCapability") + proto.RegisterType((*NodeServiceCapability_RPC)(nil), "csi.v1.NodeServiceCapability.RPC") + proto.RegisterType((*NodeGetInfoRequest)(nil), "csi.v1.NodeGetInfoRequest") + proto.RegisterType((*NodeGetInfoResponse)(nil), "csi.v1.NodeGetInfoResponse") + proto.RegisterEnum("csi.v1.PluginCapability_Service_Type", PluginCapability_Service_Type_name, PluginCapability_Service_Type_value) + proto.RegisterEnum("csi.v1.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value) + proto.RegisterEnum("csi.v1.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value) + proto.RegisterEnum("csi.v1.VolumeUsage_Unit", VolumeUsage_Unit_name, VolumeUsage_Unit_value) + proto.RegisterEnum("csi.v1.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value) + proto.RegisterExtension(E_CsiSecret) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// IdentityClient is the client API for Identity service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IdentityClient interface { + GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) + GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) + Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) +} + +type identityClient struct { + cc *grpc.ClientConn +} + +func NewIdentityClient(cc *grpc.ClientConn) IdentityClient { + return &identityClient{cc} +} + +func (c *identityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) { + out := new(GetPluginInfoResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Identity/GetPluginInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) { + out := new(GetPluginCapabilitiesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Identity/GetPluginCapabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) { + out := new(ProbeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Identity/Probe", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IdentityServer is the server API for Identity service. +type IdentityServer interface { + GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error) + GetPluginCapabilities(context.Context, *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) + Probe(context.Context, *ProbeRequest) (*ProbeResponse, error) +} + +func RegisterIdentityServer(s *grpc.Server, srv IdentityServer) { + s.RegisterService(&_Identity_serviceDesc, srv) +} + +func _Identity_GetPluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Identity/GetPluginInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginInfo(ctx, req.(*GetPluginInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_GetPluginCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Identity/GetPluginCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginCapabilities(ctx, req.(*GetPluginCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_Probe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProbeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).Probe(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Identity/Probe", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).Probe(ctx, req.(*ProbeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Identity_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v1.Identity", + HandlerType: (*IdentityServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetPluginInfo", + Handler: _Identity_GetPluginInfo_Handler, + }, + { + MethodName: "GetPluginCapabilities", + Handler: _Identity_GetPluginCapabilities_Handler, + }, + { + MethodName: "Probe", + Handler: _Identity_Probe_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/container-storage-interface/spec/csi.proto", +} + +// ControllerClient is the client API for Controller service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ControllerClient interface { + CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) + DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) + ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) + GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) + ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) + CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) + DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) + ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) +} + +type controllerClient struct { + cc *grpc.ClientConn +} + +func NewControllerClient(cc *grpc.ClientConn) ControllerClient { + return &controllerClient{cc} +} + +func (c *controllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) { + out := new(CreateVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/CreateVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) { + out := new(DeleteVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/DeleteVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) { + out := new(ControllerPublishVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerPublishVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) { + out := new(ControllerUnpublishVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerUnpublishVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) { + out := new(ValidateVolumeCapabilitiesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ValidateVolumeCapabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) { + out := new(ListVolumesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ListVolumes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) { + out := new(GetCapacityResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/GetCapacity", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) { + out := new(ControllerGetCapabilitiesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerGetCapabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) { + out := new(CreateSnapshotResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/CreateSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) { + out := new(DeleteSnapshotResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/DeleteSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) { + out := new(ListSnapshotsResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ListSnapshots", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ControllerServer is the server API for Controller service. +type ControllerServer interface { + CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) + DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error) + ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error) + GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error) + ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) + CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error) + DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) + ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) +} + +func RegisterControllerServer(s *grpc.Server, srv ControllerServer) { + s.RegisterService(&_Controller_serviceDesc, srv) +} + +func _Controller_CreateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).CreateVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/CreateVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).CreateVolume(ctx, req.(*CreateVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DeleteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DeleteVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/DeleteVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DeleteVolume(ctx, req.(*DeleteVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerPublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerPublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerPublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ControllerPublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerPublishVolume(ctx, req.(*ControllerPublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ControllerUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, req.(*ControllerUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ValidateVolumeCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateVolumeCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ValidateVolumeCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, req.(*ValidateVolumeCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ListVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListVolumesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ListVolumes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ListVolumes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ListVolumes(ctx, req.(*ListVolumesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_GetCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCapacityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).GetCapacity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/GetCapacity", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).GetCapacity(ctx, req.(*GetCapacityRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ControllerGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, req.(*ControllerGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).CreateSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/CreateSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DeleteSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/DeleteSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnapshotsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ListSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ListSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Controller_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v1.Controller", + HandlerType: (*ControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateVolume", + Handler: _Controller_CreateVolume_Handler, + }, + { + MethodName: "DeleteVolume", + Handler: _Controller_DeleteVolume_Handler, + }, + { + MethodName: "ControllerPublishVolume", + Handler: _Controller_ControllerPublishVolume_Handler, + }, + { + MethodName: "ControllerUnpublishVolume", + Handler: _Controller_ControllerUnpublishVolume_Handler, + }, + { + MethodName: "ValidateVolumeCapabilities", + Handler: _Controller_ValidateVolumeCapabilities_Handler, + }, + { + MethodName: "ListVolumes", + Handler: _Controller_ListVolumes_Handler, + }, + { + MethodName: "GetCapacity", + Handler: _Controller_GetCapacity_Handler, + }, + { + MethodName: "ControllerGetCapabilities", + Handler: _Controller_ControllerGetCapabilities_Handler, + }, + { + MethodName: "CreateSnapshot", + Handler: _Controller_CreateSnapshot_Handler, + }, + { + MethodName: "DeleteSnapshot", + Handler: _Controller_DeleteSnapshot_Handler, + }, + { + MethodName: "ListSnapshots", + Handler: _Controller_ListSnapshots_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/container-storage-interface/spec/csi.proto", +} + +// NodeClient is the client API for Node service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type NodeClient interface { + NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) + NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) + NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) + NodeGetVolumeStats(ctx context.Context, in *NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*NodeGetVolumeStatsResponse, error) + NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) + NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) +} + +type nodeClient struct { + cc *grpc.ClientConn +} + +func NewNodeClient(cc *grpc.ClientConn) NodeClient { + return &nodeClient{cc} +} + +func (c *nodeClient) NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) { + out := new(NodeStageVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeStageVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) { + out := new(NodeUnstageVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeUnstageVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) { + out := new(NodePublishVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodePublishVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) { + out := new(NodeUnpublishVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeUnpublishVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetVolumeStats(ctx context.Context, in *NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*NodeGetVolumeStatsResponse, error) { + out := new(NodeGetVolumeStatsResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetVolumeStats", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) { + out := new(NodeGetCapabilitiesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetCapabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) { + out := new(NodeGetInfoResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NodeServer is the server API for Node service. +type NodeServer interface { + NodeStageVolume(context.Context, *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) + NodeUnstageVolume(context.Context, *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) + NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) + NodeGetVolumeStats(context.Context, *NodeGetVolumeStatsRequest) (*NodeGetVolumeStatsResponse, error) + NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) + NodeGetInfo(context.Context, *NodeGetInfoRequest) (*NodeGetInfoResponse, error) +} + +func RegisterNodeServer(s *grpc.Server, srv NodeServer) { + s.RegisterService(&_Node_serviceDesc, srv) +} + +func _Node_NodeStageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeStageVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeStageVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeStageVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeStageVolume(ctx, req.(*NodeStageVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnstageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnstageVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnstageVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeUnstageVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnstageVolume(ctx, req.(*NodeUnstageVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodePublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodePublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodePublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodePublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodePublishVolume(ctx, req.(*NodePublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnpublishVolume(ctx, req.(*NodeUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetVolumeStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetVolumeStatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetVolumeStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeGetVolumeStats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetVolumeStats(ctx, req.(*NodeGetVolumeStatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetCapabilities(ctx, req.(*NodeGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeGetInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetInfo(ctx, req.(*NodeGetInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Node_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v1.Node", + HandlerType: (*NodeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "NodeStageVolume", + Handler: _Node_NodeStageVolume_Handler, + }, + { + MethodName: "NodeUnstageVolume", + Handler: _Node_NodeUnstageVolume_Handler, + }, + { + MethodName: "NodePublishVolume", + Handler: _Node_NodePublishVolume_Handler, + }, + { + MethodName: "NodeUnpublishVolume", + Handler: _Node_NodeUnpublishVolume_Handler, + }, + { + MethodName: "NodeGetVolumeStats", + Handler: _Node_NodeGetVolumeStats_Handler, + }, + { + MethodName: "NodeGetCapabilities", + Handler: _Node_NodeGetCapabilities_Handler, + }, + { + MethodName: "NodeGetInfo", + Handler: _Node_NodeGetInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/container-storage-interface/spec/csi.proto", +} + +func init() { + proto.RegisterFile("github.com/container-storage-interface/spec/csi.proto", fileDescriptor_csi_1092d4f3f3c8dc30) +} + +var fileDescriptor_csi_1092d4f3f3c8dc30 = []byte{ + // 3070 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x1a, 0x4d, 0x6f, 0xe3, 0xc6, + 0xd5, 0xd4, 0x87, 0x3f, 0x9e, 0x6d, 0x45, 0x3b, 0xfe, 0x58, 0x2d, 0x6d, 0xef, 0x7a, 0xb9, 0xd9, + 0xc4, 0xd9, 0x64, 0xe5, 0xc6, 0xc9, 0x06, 0xcd, 0xee, 0xa6, 0x8d, 0x24, 0x2b, 0xb6, 0xb2, 0x5a, + 0xd9, 0xa1, 0x64, 0xa7, 0xbb, 0x6d, 0xc0, 0xd0, 0xd2, 0x58, 0x4b, 0x44, 0x22, 0x15, 0x72, 0xe4, + 0xae, 0x7b, 0x2b, 0x0a, 0xf4, 0xd2, 0x53, 0x4f, 0xed, 0xad, 0x40, 0x7a, 0x6a, 0xd1, 0xa0, 0xa7, + 0xa2, 0xc7, 0x02, 0xbd, 0x14, 0xe8, 0x1f, 0x68, 0x6f, 0xb9, 0x07, 0x2d, 0x10, 0xf4, 0xd0, 0x43, + 0x81, 0x02, 0x05, 0x39, 0x43, 0x8a, 0x43, 0x91, 0x94, 0xb4, 0xde, 0x20, 0x87, 0x9e, 0x24, 0xbe, + 0x79, 0x5f, 0xf3, 0xe6, 0xbd, 0x37, 0xef, 0x3d, 0x12, 0xee, 0xb4, 0x35, 0xf2, 0xa4, 0x7f, 0x92, + 0x6f, 0x1a, 0xdd, 0xed, 0xa6, 0xa1, 0x13, 0x55, 0xd3, 0xb1, 0x79, 0xdb, 0x22, 0x86, 0xa9, 0xb6, + 0xf1, 0x6d, 0x4d, 0x27, 0xd8, 0x3c, 0x55, 0x9b, 0x78, 0xdb, 0xea, 0xe1, 0xe6, 0x76, 0xd3, 0xd2, + 0xf2, 0x3d, 0xd3, 0x20, 0x06, 0x9a, 0xb6, 0xff, 0x9e, 0xbd, 0x2e, 0x6e, 0xb6, 0x0d, 0xa3, 0xdd, + 0xc1, 0xdb, 0x0e, 0xf4, 0xa4, 0x7f, 0xba, 0xdd, 0xc2, 0x56, 0xd3, 0xd4, 0x7a, 0xc4, 0x30, 0x29, + 0xa6, 0x78, 0x2d, 0x88, 0x41, 0xb4, 0x2e, 0xb6, 0x88, 0xda, 0xed, 0x31, 0x84, 0xab, 0x41, 0x84, + 0x1f, 0x9a, 0x6a, 0xaf, 0x87, 0x4d, 0x8b, 0xae, 0x4b, 0xab, 0xb0, 0xbc, 0x87, 0xc9, 0x61, 0xa7, + 0xdf, 0xd6, 0xf4, 0x8a, 0x7e, 0x6a, 0xc8, 0xf8, 0xd3, 0x3e, 0xb6, 0x88, 0xf4, 0x77, 0x01, 0x56, + 0x02, 0x0b, 0x56, 0xcf, 0xd0, 0x2d, 0x8c, 0x10, 0xa4, 0x74, 0xb5, 0x8b, 0x73, 0xc2, 0xa6, 0xb0, + 0x35, 0x27, 0x3b, 0xff, 0xd1, 0x4d, 0xc8, 0x9c, 0x61, 0xbd, 0x65, 0x98, 0xca, 0x19, 0x36, 0x2d, + 0xcd, 0xd0, 0x73, 0x09, 0x67, 0x75, 0x91, 0x42, 0x8f, 0x29, 0x10, 0xed, 0xc1, 0x6c, 0x57, 0xd5, + 0xb5, 0x53, 0x6c, 0x91, 0x5c, 0x72, 0x33, 0xb9, 0x35, 0xbf, 0xf3, 0x6a, 0x9e, 0x6e, 0x35, 0x1f, + 0x2a, 0x2b, 0xff, 0x90, 0x61, 0x97, 0x75, 0x62, 0x9e, 0xcb, 0x1e, 0xb1, 0x78, 0x0f, 0x16, 0xb9, + 0x25, 0x94, 0x85, 0xe4, 0x27, 0xf8, 0x9c, 0xe9, 0x64, 0xff, 0x45, 0xcb, 0x90, 0x3e, 0x53, 0x3b, + 0x7d, 0xcc, 0x34, 0xa1, 0x0f, 0x77, 0x13, 0xdf, 0x16, 0xa4, 0xab, 0xb0, 0xee, 0x49, 0x2b, 0xa9, + 0x3d, 0xf5, 0x44, 0xeb, 0x68, 0x44, 0xc3, 0x96, 0xbb, 0xf5, 0x8f, 0x60, 0x23, 0x62, 0x9d, 0x59, + 0xe0, 0x3e, 0x2c, 0x34, 0x7d, 0xf0, 0x9c, 0xe0, 0x6c, 0x25, 0xe7, 0x6e, 0x25, 0x40, 0x79, 0x2e, + 0x73, 0xd8, 0xd2, 0xbf, 0x04, 0xc8, 0x06, 0x51, 0xd0, 0x7d, 0x98, 0xb1, 0xb0, 0x79, 0xa6, 0x35, + 0xa9, 0x5d, 0xe7, 0x77, 0x36, 0xa3, 0xb8, 0xe5, 0xeb, 0x14, 0x6f, 0x7f, 0x4a, 0x76, 0x49, 0xc4, + 0x5f, 0x08, 0x30, 0xc3, 0xc0, 0xe8, 0x6d, 0x48, 0x91, 0xf3, 0x1e, 0x65, 0x93, 0xd9, 0xb9, 0x39, + 0x8a, 0x4d, 0xbe, 0x71, 0xde, 0xc3, 0xb2, 0x43, 0x22, 0x7d, 0x00, 0x29, 0xfb, 0x09, 0xcd, 0xc3, + 0xcc, 0x51, 0xed, 0x41, 0xed, 0xe0, 0xc3, 0x5a, 0x76, 0x0a, 0xad, 0x02, 0x2a, 0x1d, 0xd4, 0x1a, + 0xf2, 0x41, 0xb5, 0x5a, 0x96, 0x95, 0x7a, 0x59, 0x3e, 0xae, 0x94, 0xca, 0x59, 0x01, 0xbd, 0x08, + 0x9b, 0xc7, 0x07, 0xd5, 0xa3, 0x87, 0x65, 0xa5, 0x50, 0x2a, 0x95, 0xeb, 0xf5, 0x4a, 0xb1, 0x52, + 0xad, 0x34, 0x1e, 0x29, 0xa5, 0x83, 0x5a, 0xbd, 0x21, 0x17, 0x2a, 0xb5, 0x46, 0x3d, 0x9b, 0x28, + 0x4e, 0x53, 0x6d, 0xa4, 0x0c, 0x2c, 0x1c, 0x9a, 0xc6, 0x09, 0x76, 0x6d, 0x5c, 0x80, 0x45, 0xf6, + 0xcc, 0x6c, 0xfa, 0x2d, 0x48, 0x9b, 0x58, 0x6d, 0x9d, 0xb3, 0xed, 0x8b, 0x79, 0xea, 0xb7, 0x79, + 0xd7, 0x6f, 0xf3, 0x45, 0xc3, 0xe8, 0x1c, 0xdb, 0x67, 0x28, 0x53, 0x44, 0xe9, 0xab, 0x14, 0x2c, + 0x95, 0x4c, 0xac, 0x12, 0x7c, 0x6c, 0x74, 0xfa, 0x5d, 0x97, 0x75, 0xa8, 0x7f, 0xde, 0x87, 0x8c, + 0x7d, 0x06, 0x4d, 0x8d, 0x9c, 0x2b, 0xa6, 0xaa, 0xb7, 0xa9, 0x57, 0xcc, 0xef, 0xac, 0xb8, 0xe6, + 0x29, 0xb1, 0x55, 0xd9, 0x5e, 0x94, 0x17, 0x9b, 0xfe, 0x47, 0x54, 0x81, 0xa5, 0x33, 0x47, 0x84, + 0xc2, 0x1d, 0x7b, 0x92, 0x3f, 0x76, 0xaa, 0x85, 0xef, 0xd8, 0xd1, 0x19, 0x0f, 0xd1, 0xb0, 0x85, + 0x1e, 0x00, 0xf4, 0x54, 0x53, 0xed, 0x62, 0x82, 0x4d, 0x2b, 0x97, 0xe2, 0x63, 0x20, 0x64, 0x37, + 0xf9, 0x43, 0x0f, 0x9b, 0xc6, 0x80, 0x8f, 0x1c, 0xed, 0xd9, 0x4e, 0xd3, 0x34, 0x31, 0xb1, 0x72, + 0x69, 0x87, 0xd3, 0x56, 0x1c, 0xa7, 0x3a, 0x45, 0x75, 0xd8, 0x14, 0x93, 0xbf, 0x2c, 0x0a, 0xb2, + 0x4b, 0x8d, 0x0e, 0x60, 0xc5, 0xdd, 0xa0, 0xa1, 0x13, 0xac, 0x13, 0xc5, 0x32, 0xfa, 0x66, 0x13, + 0xe7, 0xa6, 0x1d, 0x2b, 0xad, 0x05, 0xb6, 0x48, 0x71, 0xea, 0x0e, 0x8a, 0xcc, 0x4c, 0xc3, 0x01, + 0xd1, 0x63, 0x10, 0xd5, 0x66, 0x13, 0x5b, 0x96, 0x46, 0x6d, 0xa1, 0x98, 0xf8, 0xd3, 0xbe, 0x66, + 0xe2, 0x2e, 0xd6, 0x89, 0x95, 0x9b, 0xe1, 0xb9, 0x36, 0x8c, 0x9e, 0xd1, 0x31, 0xda, 0xe7, 0xf2, + 0x00, 0x47, 0xbe, 0xc2, 0x91, 0xfb, 0x56, 0x2c, 0xf1, 0x1d, 0x78, 0x21, 0x60, 0x94, 0x49, 0xa2, + 0x5f, 0xbc, 0x0b, 0x0b, 0x7e, 0x4b, 0x4c, 0x94, 0x39, 0x7e, 0x96, 0x80, 0xa5, 0x10, 0x1b, 0xa0, + 0x7d, 0x98, 0xb5, 0x74, 0xb5, 0x67, 0x3d, 0x31, 0x08, 0xf3, 0xdf, 0x5b, 0x31, 0x26, 0xcb, 0xd7, + 0x19, 0x2e, 0x7d, 0xdc, 0x9f, 0x92, 0x3d, 0x6a, 0x54, 0x84, 0x69, 0x6a, 0x4f, 0xe6, 0xa0, 0x5b, + 0x71, 0x7c, 0x28, 0xcc, 0xe3, 0xc2, 0x28, 0xc5, 0xd7, 0x21, 0xc3, 0x4b, 0x40, 0xd7, 0x60, 0xde, + 0x95, 0xa0, 0x68, 0x2d, 0xb6, 0x57, 0x70, 0x41, 0x95, 0x96, 0xf8, 0x2a, 0x2c, 0xf8, 0x99, 0xa1, + 0x35, 0x98, 0x63, 0x0e, 0xe1, 0xa1, 0xcf, 0x52, 0x40, 0xa5, 0xe5, 0xc5, 0xf4, 0x77, 0x60, 0x99, + 0xf7, 0x33, 0x16, 0xca, 0x2f, 0x79, 0x7b, 0xa0, 0xb6, 0xc8, 0xf0, 0x7b, 0x70, 0xf5, 0x94, 0x7e, + 0x9b, 0x82, 0x6c, 0x30, 0x68, 0xd0, 0x7d, 0x48, 0x9f, 0x74, 0x8c, 0xe6, 0x27, 0x8c, 0xf6, 0xc5, + 0xa8, 0xe8, 0xca, 0x17, 0x6d, 0x2c, 0x0a, 0xdd, 0x9f, 0x92, 0x29, 0x91, 0x4d, 0xdd, 0x35, 0xfa, + 0x3a, 0x61, 0xd6, 0x8b, 0xa6, 0x7e, 0x68, 0x63, 0x0d, 0xa8, 0x1d, 0x22, 0xb4, 0x0b, 0xf3, 0xd4, + 0xed, 0x94, 0xae, 0xd1, 0xc2, 0xb9, 0xa4, 0xc3, 0xe3, 0x46, 0x24, 0x8f, 0x82, 0x83, 0xfb, 0xd0, + 0x68, 0x61, 0x19, 0x54, 0xef, 0xbf, 0xb8, 0x08, 0xf3, 0x3e, 0xdd, 0xc4, 0x3d, 0x98, 0xf7, 0x09, + 0x43, 0x97, 0x61, 0xe6, 0xd4, 0x52, 0xbc, 0x0c, 0x3d, 0x27, 0x4f, 0x9f, 0x5a, 0x4e, 0xd2, 0xbd, + 0x06, 0xf3, 0x8e, 0x16, 0xca, 0x69, 0x47, 0x6d, 0x5b, 0xb9, 0xc4, 0x66, 0xd2, 0x3e, 0x23, 0x07, + 0xf4, 0x9e, 0x0d, 0x11, 0xff, 0x21, 0x00, 0x0c, 0x44, 0xa2, 0xfb, 0x90, 0x72, 0xb4, 0xa4, 0x79, + 0x7e, 0x6b, 0x0c, 0x2d, 0xf3, 0x8e, 0xaa, 0x0e, 0x95, 0xf4, 0x2b, 0x01, 0x52, 0x0e, 0x9b, 0x60, + 0xae, 0xaf, 0x57, 0x6a, 0x7b, 0xd5, 0xb2, 0x52, 0x3b, 0xd8, 0x2d, 0x2b, 0x1f, 0xca, 0x95, 0x46, + 0x59, 0xce, 0x0a, 0x68, 0x0d, 0x2e, 0xfb, 0xe1, 0x72, 0xb9, 0xb0, 0x5b, 0x96, 0x95, 0x83, 0x5a, + 0xf5, 0x51, 0x36, 0x81, 0x44, 0x58, 0x7d, 0x78, 0x54, 0x6d, 0x54, 0x86, 0xd7, 0x92, 0x68, 0x1d, + 0x72, 0xbe, 0x35, 0xc6, 0x83, 0xb1, 0x4d, 0xd9, 0x6c, 0x7d, 0xab, 0xf4, 0x2f, 0x5b, 0x4c, 0x17, + 0x17, 0xbd, 0xc3, 0x70, 0x9c, 0xed, 0x43, 0x58, 0xe4, 0x72, 0xb4, 0x5d, 0x72, 0xb0, 0xa4, 0xd2, + 0x52, 0x4e, 0xce, 0x89, 0x73, 0x0d, 0x0b, 0x5b, 0x49, 0x79, 0xd1, 0x85, 0x16, 0x6d, 0xa0, 0x6d, + 0xd6, 0x8e, 0xd6, 0xd5, 0x08, 0xc3, 0x49, 0x38, 0x38, 0xe0, 0x80, 0x1c, 0x04, 0xe9, 0x8b, 0x04, + 0x4c, 0xb3, 0xb3, 0xb9, 0xe9, 0xbb, 0x25, 0x38, 0x96, 0x2e, 0x94, 0xb2, 0xe4, 0x82, 0x23, 0xc1, + 0x07, 0x07, 0xda, 0x87, 0x8c, 0x3f, 0x95, 0x3e, 0x75, 0x0b, 0x9d, 0xeb, 0xfc, 0x01, 0xf9, 0xe3, + 0xf9, 0x29, 0x2b, 0x6f, 0x16, 0xcf, 0xfc, 0x30, 0x54, 0x84, 0x4c, 0x20, 0x1b, 0xa7, 0x46, 0x67, + 0xe3, 0xc5, 0x26, 0x97, 0x98, 0x0a, 0xb0, 0xe4, 0x26, 0xd2, 0x0e, 0x56, 0x08, 0x4b, 0xb4, 0xec, + 0xb6, 0xc8, 0x0e, 0x25, 0x60, 0x34, 0x40, 0x76, 0x61, 0xe2, 0xbb, 0x80, 0x86, 0x75, 0x9d, 0x28, + 0x6b, 0xf6, 0x61, 0x29, 0x24, 0xc5, 0xa3, 0x3c, 0xcc, 0x39, 0x47, 0x65, 0x69, 0x04, 0xb3, 0x12, + 0x6a, 0x58, 0xa3, 0x01, 0x8a, 0x8d, 0xdf, 0x33, 0xf1, 0x29, 0x36, 0x4d, 0xdc, 0x72, 0xc2, 0x23, + 0x14, 0xdf, 0x43, 0x91, 0x7e, 0x22, 0xc0, 0xac, 0x0b, 0x47, 0x77, 0x61, 0xd6, 0xc2, 0x6d, 0x7a, + 0xfd, 0x50, 0x59, 0x57, 0x83, 0xb4, 0xf9, 0x3a, 0x43, 0x60, 0xc5, 0xa6, 0x8b, 0x6f, 0x17, 0x9b, + 0xdc, 0xd2, 0x44, 0x9b, 0xff, 0xa3, 0x00, 0x4b, 0xbb, 0xb8, 0x83, 0x83, 0x55, 0x4a, 0x5c, 0x86, + 0xf5, 0x5f, 0xec, 0x09, 0xfe, 0x62, 0x0f, 0x61, 0x15, 0x73, 0xb1, 0x5f, 0xe8, 0xb2, 0x5b, 0x85, + 0x65, 0x5e, 0x1a, 0x4d, 0xef, 0xd2, 0x3f, 0x93, 0x70, 0xd5, 0xf6, 0x05, 0xd3, 0xe8, 0x74, 0xb0, + 0x79, 0xd8, 0x3f, 0xe9, 0x68, 0xd6, 0x93, 0x09, 0x36, 0x77, 0x19, 0x66, 0x74, 0xa3, 0xe5, 0x0b, + 0x9e, 0x69, 0xfb, 0xb1, 0xd2, 0x42, 0x65, 0xb8, 0x14, 0x2c, 0xb3, 0xce, 0x59, 0x12, 0x8e, 0x2e, + 0xb2, 0xb2, 0x67, 0xc1, 0x1b, 0x44, 0x84, 0x59, 0xbb, 0x40, 0x34, 0xf4, 0xce, 0xb9, 0x13, 0x31, + 0xb3, 0xb2, 0xf7, 0x8c, 0xe4, 0x60, 0xc5, 0xf4, 0x86, 0x57, 0x31, 0xc5, 0xee, 0x28, 0xae, 0x78, + 0xfa, 0x78, 0x28, 0xe2, 0xa7, 0x1d, 0xd6, 0x6f, 0x8f, 0xc9, 0x7a, 0x64, 0x26, 0xb8, 0xc8, 0x29, + 0x3e, 0x87, 0xf0, 0xfd, 0xab, 0x00, 0xd7, 0x22, 0xb7, 0xc0, 0xae, 0xfc, 0x16, 0xbc, 0xd0, 0xa3, + 0x0b, 0x9e, 0x11, 0x68, 0x94, 0xdd, 0x1b, 0x69, 0x04, 0xd6, 0xe9, 0x31, 0x28, 0x67, 0x86, 0x4c, + 0x8f, 0x03, 0x8a, 0x05, 0x58, 0x0a, 0x41, 0x9b, 0x68, 0x33, 0x5f, 0x0a, 0xb0, 0x39, 0x50, 0xe5, + 0x48, 0xef, 0x3d, 0x3f, 0xf7, 0x6d, 0x0c, 0x7c, 0x8b, 0xa6, 0xfc, 0x3b, 0xc3, 0x7b, 0x0f, 0x17, + 0xf8, 0x75, 0x45, 0xf0, 0x0d, 0xb8, 0x1e, 0x23, 0x9a, 0x85, 0xf3, 0x17, 0x29, 0xb8, 0x7e, 0xac, + 0x76, 0xb4, 0x96, 0x57, 0xc8, 0x85, 0xf4, 0xc4, 0xf1, 0x26, 0x69, 0x0e, 0x45, 0x00, 0xcd, 0x5a, + 0xf7, 0xbd, 0xa8, 0x1d, 0xc5, 0x7f, 0x8c, 0xeb, 0xf0, 0x39, 0x36, 0x61, 0x8f, 0x42, 0x9a, 0xb0, + 0xb7, 0xc7, 0xd7, 0x35, 0xae, 0x25, 0x3b, 0x0a, 0x26, 0x98, 0xb7, 0xc6, 0xe7, 0x1b, 0xe3, 0x05, + 0x17, 0x8e, 0xe2, 0x6f, 0xb2, 0x6b, 0xfa, 0x73, 0x0a, 0xa4, 0xb8, 0xdd, 0xb3, 0x1c, 0x22, 0xc3, + 0x5c, 0xd3, 0xd0, 0x4f, 0x35, 0xb3, 0x8b, 0x5b, 0xac, 0xfa, 0x7f, 0x73, 0x1c, 0xe3, 0xb1, 0x04, + 0x52, 0x72, 0x69, 0xe5, 0x01, 0x1b, 0x94, 0x83, 0x99, 0x2e, 0xb6, 0x2c, 0xb5, 0xed, 0xaa, 0xe5, + 0x3e, 0x8a, 0x9f, 0x27, 0x61, 0xce, 0x23, 0x41, 0xfa, 0x90, 0x07, 0xd3, 0xf4, 0xb5, 0xf7, 0x2c, + 0x0a, 0x3c, 0xbb, 0x33, 0x27, 0x9e, 0xc1, 0x99, 0x5b, 0x9c, 0x33, 0xd3, 0x70, 0xd8, 0x7d, 0x26, + 0xb5, 0x63, 0xfc, 0xfa, 0x1b, 0x77, 0x40, 0xe9, 0x07, 0x80, 0xaa, 0x9a, 0xc5, 0xba, 0x28, 0x2f, + 0x2d, 0xd9, 0x4d, 0x93, 0xfa, 0x54, 0xc1, 0x3a, 0x31, 0x35, 0x56, 0xae, 0xa7, 0x65, 0xe8, 0xaa, + 0x4f, 0xcb, 0x14, 0x62, 0x97, 0xf4, 0x16, 0x51, 0x4d, 0xa2, 0xe9, 0x6d, 0x85, 0x18, 0x9f, 0x60, + 0x6f, 0x30, 0xe9, 0x42, 0x1b, 0x36, 0x50, 0xfa, 0x4c, 0x80, 0x25, 0x8e, 0x3d, 0xf3, 0xc9, 0x7b, + 0x30, 0x33, 0xe0, 0xcd, 0x95, 0xf1, 0x21, 0xd8, 0x79, 0x6a, 0x36, 0x97, 0x02, 0x6d, 0x00, 0xe8, + 0xf8, 0x29, 0xe1, 0xe4, 0xce, 0xd9, 0x10, 0x47, 0xa6, 0xb8, 0x0d, 0x69, 0x6a, 0x86, 0x71, 0xfb, + 0xe5, 0xcf, 0x13, 0x80, 0xf6, 0x30, 0xf1, 0xda, 0x20, 0x66, 0x83, 0x08, 0x5f, 0x12, 0x9e, 0xc1, + 0x97, 0xde, 0xe7, 0x7c, 0x89, 0x7a, 0xe3, 0x2d, 0xdf, 0x84, 0x36, 0x20, 0x3a, 0x36, 0x13, 0x46, + 0xb4, 0x1e, 0xb4, 0x9e, 0x1b, 0xaf, 0xf5, 0xb8, 0xa0, 0xcb, 0xec, 0xc2, 0x12, 0xa7, 0x33, 0x3b, + 0xd3, 0xdb, 0x80, 0xd4, 0x33, 0x55, 0xeb, 0xa8, 0xb6, 0x5e, 0x6e, 0x67, 0xc7, 0x3a, 0xbd, 0x4b, + 0xde, 0x8a, 0x4b, 0x26, 0x49, 0xfe, 0x82, 0x81, 0xf1, 0x0b, 0x4e, 0x8c, 0x3b, 0xfe, 0x8b, 0x76, + 0x08, 0x87, 0xc9, 0xdd, 0x0b, 0x9d, 0x1a, 0xdf, 0x18, 0x2e, 0x12, 0xd8, 0x64, 0x36, 0x72, 0x80, + 0xfc, 0xef, 0x04, 0xac, 0xc5, 0x60, 0xa3, 0x7b, 0x90, 0x34, 0x7b, 0x4d, 0xe6, 0x4c, 0x2f, 0x8f, + 0xc1, 0x3f, 0x2f, 0x1f, 0x96, 0xf6, 0xa7, 0x64, 0x9b, 0x4a, 0xfc, 0x79, 0x02, 0x92, 0xf2, 0x61, + 0x09, 0xbd, 0xcb, 0x8d, 0x91, 0x5f, 0x1b, 0x93, 0x8b, 0x7f, 0x9a, 0xfc, 0x17, 0x21, 0x6c, 0x9c, + 0x9c, 0x83, 0xe5, 0x92, 0x5c, 0x2e, 0x34, 0xca, 0xca, 0x6e, 0xb9, 0x5a, 0x6e, 0x94, 0x15, 0x3a, + 0x44, 0xce, 0x0a, 0x68, 0x1d, 0x72, 0x87, 0x47, 0xc5, 0x6a, 0xa5, 0xbe, 0xaf, 0x1c, 0xd5, 0xdc, + 0x7f, 0x6c, 0x35, 0x81, 0xb2, 0xb0, 0x50, 0xad, 0xd4, 0x1b, 0x0c, 0x50, 0xcf, 0x26, 0x6d, 0xc8, + 0x5e, 0xb9, 0xa1, 0x94, 0x0a, 0x87, 0x85, 0x52, 0xa5, 0xf1, 0x28, 0x9b, 0x42, 0x22, 0xac, 0xf2, + 0xbc, 0xeb, 0xb5, 0xc2, 0x61, 0x7d, 0xff, 0xa0, 0x91, 0x4d, 0x23, 0x04, 0x19, 0x87, 0xde, 0x05, + 0xd5, 0xb3, 0xd3, 0x36, 0x87, 0x52, 0xf5, 0xa0, 0xe6, 0xe9, 0x30, 0x83, 0x96, 0x21, 0xeb, 0x4a, + 0x96, 0xcb, 0x85, 0x5d, 0x67, 0x8a, 0x31, 0xeb, 0x0d, 0xbc, 0xbe, 0x4c, 0xc0, 0x0a, 0x9d, 0x78, + 0xb9, 0xf3, 0x35, 0x37, 0x06, 0xb7, 0x20, 0x4b, 0x7b, 0x74, 0x25, 0x58, 0x25, 0x65, 0x28, 0xfc, + 0xd8, 0xad, 0x95, 0xdc, 0xe9, 0x74, 0xc2, 0x37, 0x9d, 0xae, 0x04, 0x2b, 0xc7, 0x5b, 0xfc, 0x1c, + 0x37, 0x20, 0x2d, 0xae, 0x19, 0x79, 0x18, 0x52, 0xda, 0xdc, 0x8e, 0xe7, 0x16, 0x97, 0xf6, 0x2f, + 0xd2, 0x79, 0x5c, 0x30, 0x7a, 0xdf, 0x83, 0xd5, 0xa0, 0xbe, 0x2c, 0x90, 0x5e, 0x1b, 0x9a, 0xb6, + 0x7a, 0xe9, 0xc4, 0xc3, 0xf5, 0x30, 0xa4, 0xbf, 0x09, 0x30, 0xeb, 0x82, 0xed, 0x94, 0x6c, 0x69, + 0x3f, 0xc2, 0xdc, 0x74, 0x67, 0xce, 0x86, 0x78, 0xc3, 0x22, 0xff, 0x9c, 0x34, 0x11, 0x9c, 0x93, + 0x86, 0x9e, 0x73, 0x32, 0xf4, 0x9c, 0xbf, 0x0b, 0x8b, 0x4d, 0x5b, 0x7d, 0xcd, 0xd0, 0x15, 0xa2, + 0x75, 0xdd, 0xe1, 0xcd, 0xf0, 0x7b, 0x8d, 0x86, 0xfb, 0xc2, 0x4e, 0x5e, 0x70, 0x09, 0x6c, 0x10, + 0xda, 0x84, 0x05, 0xe7, 0x3d, 0x87, 0x42, 0x0c, 0xa5, 0x6f, 0xe1, 0x5c, 0xda, 0x69, 0x65, 0xc1, + 0x81, 0x35, 0x8c, 0x23, 0x0b, 0x4b, 0x7f, 0x12, 0x60, 0x85, 0x76, 0xe8, 0x41, 0x77, 0x1c, 0x35, + 0xef, 0xf5, 0x7b, 0x5c, 0x20, 0xcb, 0x87, 0x32, 0xfc, 0xba, 0x1a, 0x94, 0x1c, 0xac, 0x06, 0xe5, + 0xb1, 0xae, 0xe4, 0x37, 0x02, 0x2c, 0xdb, 0x57, 0xac, 0xbb, 0xf0, 0xbc, 0x6f, 0xfc, 0x09, 0x4e, + 0x32, 0x60, 0xcc, 0x54, 0xd0, 0x98, 0xd2, 0xef, 0x04, 0x58, 0x09, 0xe8, 0xca, 0x3c, 0xf5, 0x9d, + 0x60, 0xf9, 0x70, 0xc3, 0x5f, 0x3e, 0x0c, 0xe1, 0x4f, 0x58, 0x40, 0xdc, 0x71, 0x0b, 0x88, 0xc9, + 0x02, 0xe2, 0xab, 0x14, 0xac, 0xd6, 0x8c, 0x16, 0xae, 0x13, 0xb5, 0x3d, 0xc9, 0x50, 0xea, 0xfb, + 0xc3, 0x3d, 0x3e, 0xf5, 0x9d, 0x1d, 0x57, 0x58, 0x38, 0xd7, 0x71, 0x5a, 0x7b, 0x94, 0x87, 0x25, + 0x8b, 0xa8, 0x6d, 0xe7, 0xd0, 0x54, 0xb3, 0x8d, 0x89, 0xd2, 0x53, 0xc9, 0x13, 0x76, 0x22, 0x97, + 0xd8, 0x52, 0xc3, 0x59, 0x39, 0x54, 0xc9, 0x93, 0xf0, 0x59, 0x51, 0x6a, 0xe2, 0x59, 0xd1, 0xfb, + 0xc1, 0x76, 0xed, 0xd5, 0x11, 0x7b, 0x89, 0x49, 0xbd, 0xdf, 0x8b, 0x98, 0x03, 0xbd, 0x3e, 0x82, + 0xe5, 0xe8, 0xf9, 0xcf, 0xc5, 0xe7, 0x1e, 0xdf, 0xf0, 0x08, 0xe9, 0x0a, 0x5c, 0x1e, 0xda, 0x3c, + 0x0b, 0xf4, 0x36, 0xe4, 0xec, 0xa5, 0x23, 0xdd, 0x9a, 0xd0, 0x1d, 0x23, 0x3c, 0x26, 0x11, 0xe1, + 0x31, 0xd2, 0x1a, 0x5c, 0x09, 0x11, 0xc4, 0xb4, 0xf8, 0x43, 0x9a, 0xaa, 0x31, 0xf9, 0x34, 0xf3, + 0xa3, 0xa8, 0xa8, 0x78, 0xd3, 0x7f, 0xec, 0xa1, 0x83, 0xbf, 0xaf, 0x23, 0x2e, 0xae, 0xc1, 0xbc, + 0x1f, 0x8f, 0x25, 0x2b, 0x32, 0x22, 0x70, 0xd2, 0x17, 0x1a, 0xb2, 0x4e, 0x07, 0x86, 0xac, 0xd5, + 0x41, 0x50, 0xcd, 0xf0, 0x05, 0x48, 0xa4, 0x29, 0x62, 0xc2, 0xea, 0xf1, 0x50, 0x58, 0xcd, 0xf2, + 0x93, 0xdb, 0x48, 0xa6, 0xff, 0x07, 0x81, 0xc5, 0x9c, 0x3a, 0x74, 0xa4, 0x2a, 0x3d, 0x06, 0x91, + 0x7a, 0xfc, 0xe4, 0x43, 0xce, 0x80, 0x1b, 0x25, 0x82, 0x6e, 0x24, 0x6d, 0xc0, 0x5a, 0x28, 0x6f, + 0x26, 0xfa, 0x11, 0xd5, 0x6b, 0x0f, 0xb3, 0x1e, 0xb9, 0x4e, 0x54, 0x62, 0x8d, 0x2b, 0x99, 0x2d, + 0xfa, 0x25, 0x53, 0x90, 0x23, 0x79, 0x8f, 0xee, 0x2a, 0xc8, 0x9a, 0xdd, 0xb8, 0xaf, 0x40, 0xba, + 0xef, 0x8c, 0x7b, 0xe8, 0x7d, 0xbb, 0xc4, 0xbb, 0xf4, 0x91, 0xbd, 0x24, 0x53, 0x0c, 0xe9, 0xf7, + 0x02, 0xcc, 0xfb, 0xc0, 0x68, 0x1d, 0xe6, 0xbc, 0xee, 0xcf, 0x2d, 0x0d, 0x3d, 0x80, 0x7d, 0x06, + 0xc4, 0x20, 0x6a, 0x87, 0xbd, 0x41, 0xa4, 0x0f, 0x76, 0x35, 0xdf, 0xb7, 0x30, 0xad, 0x1c, 0x92, + 0xb2, 0xf3, 0x1f, 0xbd, 0x06, 0xa9, 0xbe, 0xae, 0x11, 0x27, 0xf6, 0x32, 0xc1, 0xa0, 0x72, 0x44, + 0xe5, 0x8f, 0x74, 0x8d, 0xc8, 0x0e, 0x96, 0x74, 0x0b, 0x52, 0xf6, 0x13, 0xdf, 0x24, 0xcd, 0x41, + 0xba, 0xf8, 0xa8, 0x51, 0xae, 0x67, 0x05, 0x04, 0x30, 0x5d, 0xa9, 0x1d, 0xec, 0x96, 0xeb, 0xd9, + 0x84, 0xb4, 0xee, 0x6d, 0x3d, 0xac, 0x09, 0xfd, 0x98, 0x1e, 0x49, 0x54, 0xfb, 0x59, 0x08, 0x6d, + 0x3f, 0x37, 0xb8, 0xcb, 0x69, 0x44, 0xe3, 0xf9, 0x85, 0x00, 0x2b, 0xa1, 0x78, 0xe8, 0x8e, 0xbf, + 0xe5, 0xbc, 0x1e, 0xcb, 0xd3, 0xdf, 0x6c, 0xfe, 0x54, 0xa0, 0xcd, 0xe6, 0x5d, 0xae, 0xd9, 0x7c, + 0x69, 0x24, 0xbd, 0xbf, 0xcd, 0x2c, 0x45, 0x74, 0x99, 0xf5, 0x46, 0x61, 0xaf, 0xac, 0x1c, 0xd5, + 0xe8, 0xaf, 0xd7, 0x65, 0x2e, 0x43, 0xd6, 0xee, 0x1a, 0xd9, 0xa7, 0x4b, 0xf5, 0x46, 0x81, 0xfb, + 0x4c, 0x69, 0x19, 0x10, 0xb3, 0xa1, 0xff, 0x5b, 0xb8, 0xcf, 0x04, 0x58, 0xe2, 0xc0, 0xcc, 0xa4, + 0xbe, 0x57, 0x01, 0x02, 0xf7, 0x2a, 0x60, 0x1b, 0x96, 0xed, 0x22, 0x95, 0x7a, 0xad, 0xa5, 0xf4, + 0xb0, 0xa9, 0xd8, 0x2b, 0xcc, 0x77, 0x2e, 0x75, 0xd5, 0xa7, 0x6c, 0x74, 0x74, 0x88, 0x4d, 0x9b, + 0xf1, 0x73, 0x18, 0x96, 0xec, 0xfc, 0x47, 0x80, 0xd9, 0x4a, 0x0b, 0xeb, 0xc4, 0x3e, 0x8f, 0x1a, + 0x2c, 0x72, 0x1f, 0xd4, 0xa1, 0xf5, 0x88, 0xef, 0xec, 0x9c, 0x0d, 0x8a, 0x1b, 0xb1, 0x5f, 0xe1, + 0x49, 0x53, 0xe8, 0xd4, 0xf7, 0x31, 0x20, 0x37, 0x31, 0x7a, 0x71, 0x88, 0x32, 0xc4, 0x35, 0xc5, + 0x9b, 0x23, 0xb0, 0x3c, 0x39, 0x6f, 0x41, 0xda, 0xf9, 0x2c, 0x0c, 0x2d, 0x7b, 0xdf, 0xad, 0xf9, + 0xbe, 0x1a, 0x13, 0x57, 0x02, 0x50, 0x97, 0x6e, 0xe7, 0xbf, 0x33, 0x00, 0x83, 0xd1, 0x04, 0x7a, + 0x00, 0x0b, 0xfe, 0x2f, 0x53, 0xd0, 0x5a, 0xcc, 0x77, 0x51, 0xe2, 0x7a, 0xf8, 0xa2, 0xa7, 0xd3, + 0x03, 0x58, 0xf0, 0xbf, 0x07, 0x1d, 0x30, 0x0b, 0x79, 0x17, 0x3b, 0x60, 0x16, 0xfa, 0xea, 0x74, + 0x0a, 0x75, 0xe0, 0x72, 0xc4, 0x9b, 0x30, 0xf4, 0xd2, 0x78, 0xef, 0x0b, 0xc5, 0x97, 0xc7, 0x7c, + 0xa5, 0x26, 0x4d, 0x21, 0x13, 0xae, 0x44, 0xbe, 0x00, 0x42, 0x5b, 0xe3, 0xbe, 0x9e, 0x12, 0x5f, + 0x19, 0x03, 0xd3, 0x93, 0xd9, 0x07, 0x31, 0x7a, 0xea, 0x8c, 0x5e, 0x19, 0xfb, 0x75, 0x88, 0x78, + 0x6b, 0xfc, 0x21, 0xb6, 0x34, 0x85, 0xf6, 0x61, 0xde, 0x37, 0x92, 0x45, 0x62, 0xe8, 0x9c, 0x96, + 0x32, 0x5e, 0x8b, 0x99, 0xe1, 0x52, 0x4e, 0xbe, 0xb1, 0xe1, 0x80, 0xd3, 0xf0, 0xfc, 0x73, 0xc0, + 0x29, 0x64, 0xce, 0x18, 0x34, 0x7f, 0x20, 0x2f, 0x87, 0x99, 0x3f, 0x3c, 0xb1, 0x87, 0x99, 0x3f, + 0x22, 0xc9, 0x4b, 0x53, 0xe8, 0x03, 0xc8, 0xf0, 0x63, 0x13, 0xb4, 0x11, 0x3b, 0xfe, 0x11, 0xaf, + 0x46, 0x2d, 0xfb, 0x59, 0xf2, 0x5d, 0xfa, 0x80, 0x65, 0xe8, 0xb4, 0x60, 0xc0, 0x32, 0xa2, 0xb9, + 0x9f, 0xb2, 0xf3, 0x13, 0xd7, 0x01, 0x0f, 0xf2, 0x53, 0x58, 0xd3, 0x3f, 0xc8, 0x4f, 0xa1, 0x6d, + 0xb3, 0x34, 0xb5, 0xf3, 0xe3, 0x34, 0xa4, 0x9c, 0x44, 0xda, 0x80, 0x17, 0x02, 0x9d, 0x06, 0xba, + 0x1a, 0xdf, 0x7f, 0x89, 0xd7, 0x22, 0xd7, 0x3d, 0x75, 0x1f, 0xc3, 0xa5, 0xa1, 0xde, 0x01, 0x6d, + 0xfa, 0xe9, 0xc2, 0xfa, 0x17, 0xf1, 0x7a, 0x0c, 0x46, 0x90, 0x37, 0x9f, 0x0b, 0x36, 0x47, 0x15, + 0xb7, 0x3c, 0xef, 0xa8, 0xf8, 0xff, 0x98, 0xde, 0x5b, 0xc1, 0xc8, 0x97, 0x78, 0xbd, 0x42, 0x63, + 0xfe, 0x46, 0x2c, 0x8e, 0x27, 0xe1, 0x23, 0xef, 0xc2, 0xf4, 0x55, 0x63, 0x88, 0x53, 0x2e, 0xb4, + 0x08, 0x14, 0xa5, 0x38, 0x94, 0xe0, 0x06, 0x82, 0xb1, 0x13, 0x24, 0x0e, 0x8b, 0x9a, 0x1b, 0xb1, + 0x38, 0xfe, 0x68, 0xf7, 0x5d, 0xed, 0x83, 0x68, 0x1f, 0x2e, 0x03, 0x06, 0xd1, 0x1e, 0x52, 0x0b, + 0x48, 0x53, 0x77, 0xdf, 0x01, 0x68, 0x5a, 0x9a, 0x42, 0xfb, 0x16, 0xb4, 0x31, 0x34, 0xe8, 0x7b, + 0x4f, 0xc3, 0x9d, 0xd6, 0x41, 0x8f, 0x68, 0x86, 0x6e, 0xe5, 0x7e, 0x3d, 0xeb, 0x34, 0x4d, 0x73, + 0x4d, 0x4b, 0xa3, 0xed, 0x43, 0x31, 0xfd, 0x38, 0xd9, 0xb4, 0xb4, 0x93, 0x69, 0x07, 0xff, 0x8d, + 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x47, 0x5d, 0xbf, 0x76, 0x3a, 0x30, 0x00, 0x00, +} diff --git a/cluster-autoscaler/vendor/github.com/container-storage-interface/spec/lib/go/csi/v0/csi.pb.go b/cluster-autoscaler/vendor/github.com/container-storage-interface/spec/lib/go/csi/v0/csi.pb.go deleted file mode 100644 index 612cfdd71dc8..000000000000 --- a/cluster-autoscaler/vendor/github.com/container-storage-interface/spec/lib/go/csi/v0/csi.pb.go +++ /dev/null @@ -1,4991 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: csi.proto - -package csi - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import wrappers "github.com/golang/protobuf/ptypes/wrappers" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type PluginCapability_Service_Type int32 - -const ( - PluginCapability_Service_UNKNOWN PluginCapability_Service_Type = 0 - // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for - // the ControllerService. Plugins SHOULD provide this capability. - // In rare cases certain plugins may wish to omit the - // ControllerService entirely from their implementation, but such - // SHOULD NOT be the common case. - // The presence of this capability determines whether the CO will - // attempt to invoke the REQUIRED ControllerService RPCs, as well - // as specific RPCs as indicated by ControllerGetCapabilities. - PluginCapability_Service_CONTROLLER_SERVICE PluginCapability_Service_Type = 1 - // ACCESSIBILITY_CONSTRAINTS indicates that the volumes for this - // plugin may not be equally accessible by all nodes in the - // cluster. The CO MUST use the topology information returned by - // CreateVolumeRequest along with the topology information - // returned by NodeGetInfo to ensure that a given volume is - // accessible from a given node when scheduling workloads. - PluginCapability_Service_ACCESSIBILITY_CONSTRAINTS PluginCapability_Service_Type = 2 -) - -var PluginCapability_Service_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CONTROLLER_SERVICE", - 2: "ACCESSIBILITY_CONSTRAINTS", -} -var PluginCapability_Service_Type_value = map[string]int32{ - "UNKNOWN": 0, - "CONTROLLER_SERVICE": 1, - "ACCESSIBILITY_CONSTRAINTS": 2, -} - -func (x PluginCapability_Service_Type) String() string { - return proto.EnumName(PluginCapability_Service_Type_name, int32(x)) -} -func (PluginCapability_Service_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{4, 0, 0} -} - -type VolumeCapability_AccessMode_Mode int32 - -const ( - VolumeCapability_AccessMode_UNKNOWN VolumeCapability_AccessMode_Mode = 0 - // Can only be published once as read/write on a single node, at - // any given time. - VolumeCapability_AccessMode_SINGLE_NODE_WRITER VolumeCapability_AccessMode_Mode = 1 - // Can only be published once as readonly on a single node, at - // any given time. - VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 2 - // Can be published as readonly at multiple nodes simultaneously. - VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 3 - // Can be published at multiple nodes simultaneously. Only one of - // the node can be used as read/write. The rest will be readonly. - VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER VolumeCapability_AccessMode_Mode = 4 - // Can be published as read/write at multiple nodes - // simultaneously. - VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 5 -) - -var VolumeCapability_AccessMode_Mode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SINGLE_NODE_WRITER", - 2: "SINGLE_NODE_READER_ONLY", - 3: "MULTI_NODE_READER_ONLY", - 4: "MULTI_NODE_SINGLE_WRITER", - 5: "MULTI_NODE_MULTI_WRITER", -} -var VolumeCapability_AccessMode_Mode_value = map[string]int32{ - "UNKNOWN": 0, - "SINGLE_NODE_WRITER": 1, - "SINGLE_NODE_READER_ONLY": 2, - "MULTI_NODE_READER_ONLY": 3, - "MULTI_NODE_SINGLE_WRITER": 4, - "MULTI_NODE_MULTI_WRITER": 5, -} - -func (x VolumeCapability_AccessMode_Mode) String() string { - return proto.EnumName(VolumeCapability_AccessMode_Mode_name, int32(x)) -} -func (VolumeCapability_AccessMode_Mode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{10, 2, 0} -} - -type ControllerServiceCapability_RPC_Type int32 - -const ( - ControllerServiceCapability_RPC_UNKNOWN ControllerServiceCapability_RPC_Type = 0 - ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME ControllerServiceCapability_RPC_Type = 1 - ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME ControllerServiceCapability_RPC_Type = 2 - ControllerServiceCapability_RPC_LIST_VOLUMES ControllerServiceCapability_RPC_Type = 3 - ControllerServiceCapability_RPC_GET_CAPACITY ControllerServiceCapability_RPC_Type = 4 - // Currently the only way to consume a snapshot is to create - // a volume from it. Therefore plugins supporting - // CREATE_DELETE_SNAPSHOT MUST support creating volume from - // snapshot. - ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT ControllerServiceCapability_RPC_Type = 5 - // LIST_SNAPSHOTS is NOT REQUIRED. For plugins that need to upload - // a snapshot after it is being cut, LIST_SNAPSHOTS COULD be used - // with the snapshot_id as the filter to query whether the - // uploading process is complete or not. - ControllerServiceCapability_RPC_LIST_SNAPSHOTS ControllerServiceCapability_RPC_Type = 6 -) - -var ControllerServiceCapability_RPC_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CREATE_DELETE_VOLUME", - 2: "PUBLISH_UNPUBLISH_VOLUME", - 3: "LIST_VOLUMES", - 4: "GET_CAPACITY", - 5: "CREATE_DELETE_SNAPSHOT", - 6: "LIST_SNAPSHOTS", -} -var ControllerServiceCapability_RPC_Type_value = map[string]int32{ - "UNKNOWN": 0, - "CREATE_DELETE_VOLUME": 1, - "PUBLISH_UNPUBLISH_VOLUME": 2, - "LIST_VOLUMES": 3, - "GET_CAPACITY": 4, - "CREATE_DELETE_SNAPSHOT": 5, - "LIST_SNAPSHOTS": 6, -} - -func (x ControllerServiceCapability_RPC_Type) String() string { - return proto.EnumName(ControllerServiceCapability_RPC_Type_name, int32(x)) -} -func (ControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{29, 0, 0} -} - -type SnapshotStatus_Type int32 - -const ( - SnapshotStatus_UNKNOWN SnapshotStatus_Type = 0 - // A snapshot is ready for use. - SnapshotStatus_READY SnapshotStatus_Type = 1 - // A snapshot is cut and is now being uploaded. - // Some cloud providers and storage systems uploads the snapshot - // to the cloud after the snapshot is cut. During this phase, - // `thaw` can be done so the application can be running again if - // `freeze` was done before taking the snapshot. - SnapshotStatus_UPLOADING SnapshotStatus_Type = 2 - // An error occurred during the snapshot uploading process. - // This error status is specific for uploading because - // `CreateSnaphot` is a blocking call before the snapshot is - // cut and therefore it SHOULD NOT come back with an error - // status when an error occurs. Instead a gRPC error code SHALL - // be returned by `CreateSnapshot` when an error occurs before - // a snapshot is cut. - SnapshotStatus_ERROR_UPLOADING SnapshotStatus_Type = 3 -) - -var SnapshotStatus_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "READY", - 2: "UPLOADING", - 3: "ERROR_UPLOADING", -} -var SnapshotStatus_Type_value = map[string]int32{ - "UNKNOWN": 0, - "READY": 1, - "UPLOADING": 2, - "ERROR_UPLOADING": 3, -} - -func (x SnapshotStatus_Type) String() string { - return proto.EnumName(SnapshotStatus_Type_name, int32(x)) -} -func (SnapshotStatus_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{33, 0} -} - -type NodeServiceCapability_RPC_Type int32 - -const ( - NodeServiceCapability_RPC_UNKNOWN NodeServiceCapability_RPC_Type = 0 - NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME NodeServiceCapability_RPC_Type = 1 -) - -var NodeServiceCapability_RPC_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "STAGE_UNSTAGE_VOLUME", -} -var NodeServiceCapability_RPC_Type_value = map[string]int32{ - "UNKNOWN": 0, - "STAGE_UNSTAGE_VOLUME": 1, -} - -func (x NodeServiceCapability_RPC_Type) String() string { - return proto.EnumName(NodeServiceCapability_RPC_Type_name, int32(x)) -} -func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{50, 0, 0} -} - -type GetPluginInfoRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPluginInfoRequest) Reset() { *m = GetPluginInfoRequest{} } -func (m *GetPluginInfoRequest) String() string { return proto.CompactTextString(m) } -func (*GetPluginInfoRequest) ProtoMessage() {} -func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{0} -} -func (m *GetPluginInfoRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPluginInfoRequest.Unmarshal(m, b) -} -func (m *GetPluginInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPluginInfoRequest.Marshal(b, m, deterministic) -} -func (dst *GetPluginInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginInfoRequest.Merge(dst, src) -} -func (m *GetPluginInfoRequest) XXX_Size() int { - return xxx_messageInfo_GetPluginInfoRequest.Size(m) -} -func (m *GetPluginInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPluginInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPluginInfoRequest proto.InternalMessageInfo - -type GetPluginInfoResponse struct { - // The name MUST follow reverse domain name notation format - // (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). - // It SHOULD include the plugin's host company name and the plugin - // name, to minimize the possibility of collisions. It MUST be 63 - // characters or less, beginning and ending with an alphanumeric - // character ([a-z0-9A-Z]) with dashes (-), underscores (_), - // dots (.), and alphanumerics between. This field is REQUIRED. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // This field is REQUIRED. Value of this field is opaque to the CO. - VendorVersion string `protobuf:"bytes,2,opt,name=vendor_version,json=vendorVersion" json:"vendor_version,omitempty"` - // This field is OPTIONAL. Values are opaque to the CO. - Manifest map[string]string `protobuf:"bytes,3,rep,name=manifest" json:"manifest,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPluginInfoResponse) Reset() { *m = GetPluginInfoResponse{} } -func (m *GetPluginInfoResponse) String() string { return proto.CompactTextString(m) } -func (*GetPluginInfoResponse) ProtoMessage() {} -func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{1} -} -func (m *GetPluginInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPluginInfoResponse.Unmarshal(m, b) -} -func (m *GetPluginInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPluginInfoResponse.Marshal(b, m, deterministic) -} -func (dst *GetPluginInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginInfoResponse.Merge(dst, src) -} -func (m *GetPluginInfoResponse) XXX_Size() int { - return xxx_messageInfo_GetPluginInfoResponse.Size(m) -} -func (m *GetPluginInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetPluginInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPluginInfoResponse proto.InternalMessageInfo - -func (m *GetPluginInfoResponse) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *GetPluginInfoResponse) GetVendorVersion() string { - if m != nil { - return m.VendorVersion - } - return "" -} - -func (m *GetPluginInfoResponse) GetManifest() map[string]string { - if m != nil { - return m.Manifest - } - return nil -} - -type GetPluginCapabilitiesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPluginCapabilitiesRequest) Reset() { *m = GetPluginCapabilitiesRequest{} } -func (m *GetPluginCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*GetPluginCapabilitiesRequest) ProtoMessage() {} -func (*GetPluginCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{2} -} -func (m *GetPluginCapabilitiesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPluginCapabilitiesRequest.Unmarshal(m, b) -} -func (m *GetPluginCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPluginCapabilitiesRequest.Marshal(b, m, deterministic) -} -func (dst *GetPluginCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginCapabilitiesRequest.Merge(dst, src) -} -func (m *GetPluginCapabilitiesRequest) XXX_Size() int { - return xxx_messageInfo_GetPluginCapabilitiesRequest.Size(m) -} -func (m *GetPluginCapabilitiesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPluginCapabilitiesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPluginCapabilitiesRequest proto.InternalMessageInfo - -type GetPluginCapabilitiesResponse struct { - // All the capabilities that the controller service supports. This - // field is OPTIONAL. - Capabilities []*PluginCapability `protobuf:"bytes,2,rep,name=capabilities" json:"capabilities,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPluginCapabilitiesResponse) Reset() { *m = GetPluginCapabilitiesResponse{} } -func (m *GetPluginCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*GetPluginCapabilitiesResponse) ProtoMessage() {} -func (*GetPluginCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{3} -} -func (m *GetPluginCapabilitiesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPluginCapabilitiesResponse.Unmarshal(m, b) -} -func (m *GetPluginCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPluginCapabilitiesResponse.Marshal(b, m, deterministic) -} -func (dst *GetPluginCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginCapabilitiesResponse.Merge(dst, src) -} -func (m *GetPluginCapabilitiesResponse) XXX_Size() int { - return xxx_messageInfo_GetPluginCapabilitiesResponse.Size(m) -} -func (m *GetPluginCapabilitiesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetPluginCapabilitiesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPluginCapabilitiesResponse proto.InternalMessageInfo - -func (m *GetPluginCapabilitiesResponse) GetCapabilities() []*PluginCapability { - if m != nil { - return m.Capabilities - } - return nil -} - -// Specifies a capability of the plugin. -type PluginCapability struct { - // Types that are valid to be assigned to Type: - // *PluginCapability_Service_ - Type isPluginCapability_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PluginCapability) Reset() { *m = PluginCapability{} } -func (m *PluginCapability) String() string { return proto.CompactTextString(m) } -func (*PluginCapability) ProtoMessage() {} -func (*PluginCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{4} -} -func (m *PluginCapability) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PluginCapability.Unmarshal(m, b) -} -func (m *PluginCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PluginCapability.Marshal(b, m, deterministic) -} -func (dst *PluginCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginCapability.Merge(dst, src) -} -func (m *PluginCapability) XXX_Size() int { - return xxx_messageInfo_PluginCapability.Size(m) -} -func (m *PluginCapability) XXX_DiscardUnknown() { - xxx_messageInfo_PluginCapability.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginCapability proto.InternalMessageInfo - -type isPluginCapability_Type interface { - isPluginCapability_Type() -} - -type PluginCapability_Service_ struct { - Service *PluginCapability_Service `protobuf:"bytes,1,opt,name=service,oneof"` -} - -func (*PluginCapability_Service_) isPluginCapability_Type() {} - -func (m *PluginCapability) GetType() isPluginCapability_Type { - if m != nil { - return m.Type - } - return nil -} - -func (m *PluginCapability) GetService() *PluginCapability_Service { - if x, ok := m.GetType().(*PluginCapability_Service_); ok { - return x.Service - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*PluginCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _PluginCapability_OneofMarshaler, _PluginCapability_OneofUnmarshaler, _PluginCapability_OneofSizer, []interface{}{ - (*PluginCapability_Service_)(nil), - } -} - -func _PluginCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*PluginCapability) - // type - switch x := m.Type.(type) { - case *PluginCapability_Service_: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Service); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("PluginCapability.Type has unexpected type %T", x) - } - return nil -} - -func _PluginCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*PluginCapability) - switch tag { - case 1: // type.service - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PluginCapability_Service) - err := b.DecodeMessage(msg) - m.Type = &PluginCapability_Service_{msg} - return true, err - default: - return false, nil - } -} - -func _PluginCapability_OneofSizer(msg proto.Message) (n int) { - m := msg.(*PluginCapability) - // type - switch x := m.Type.(type) { - case *PluginCapability_Service_: - s := proto.Size(x.Service) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type PluginCapability_Service struct { - Type PluginCapability_Service_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.PluginCapability_Service_Type" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PluginCapability_Service) Reset() { *m = PluginCapability_Service{} } -func (m *PluginCapability_Service) String() string { return proto.CompactTextString(m) } -func (*PluginCapability_Service) ProtoMessage() {} -func (*PluginCapability_Service) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{4, 0} -} -func (m *PluginCapability_Service) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PluginCapability_Service.Unmarshal(m, b) -} -func (m *PluginCapability_Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PluginCapability_Service.Marshal(b, m, deterministic) -} -func (dst *PluginCapability_Service) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginCapability_Service.Merge(dst, src) -} -func (m *PluginCapability_Service) XXX_Size() int { - return xxx_messageInfo_PluginCapability_Service.Size(m) -} -func (m *PluginCapability_Service) XXX_DiscardUnknown() { - xxx_messageInfo_PluginCapability_Service.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginCapability_Service proto.InternalMessageInfo - -func (m *PluginCapability_Service) GetType() PluginCapability_Service_Type { - if m != nil { - return m.Type - } - return PluginCapability_Service_UNKNOWN -} - -type ProbeRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProbeRequest) Reset() { *m = ProbeRequest{} } -func (m *ProbeRequest) String() string { return proto.CompactTextString(m) } -func (*ProbeRequest) ProtoMessage() {} -func (*ProbeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{5} -} -func (m *ProbeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProbeRequest.Unmarshal(m, b) -} -func (m *ProbeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProbeRequest.Marshal(b, m, deterministic) -} -func (dst *ProbeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProbeRequest.Merge(dst, src) -} -func (m *ProbeRequest) XXX_Size() int { - return xxx_messageInfo_ProbeRequest.Size(m) -} -func (m *ProbeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ProbeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ProbeRequest proto.InternalMessageInfo - -type ProbeResponse struct { - // Readiness allows a plugin to report its initialization status back - // to the CO. Initialization for some plugins MAY be time consuming - // and it is important for a CO to distinguish between the following - // cases: - // - // 1) The plugin is in an unhealthy state and MAY need restarting. In - // this case a gRPC error code SHALL be returned. - // 2) The plugin is still initializing, but is otherwise perfectly - // healthy. In this case a successful response SHALL be returned - // with a readiness value of `false`. Calls to the plugin's - // Controller and/or Node services MAY fail due to an incomplete - // initialization state. - // 3) The plugin has finished initializing and is ready to service - // calls to its Controller and/or Node services. A successful - // response is returned with a readiness value of `true`. - // - // This field is OPTIONAL. If not present, the caller SHALL assume - // that the plugin is in a ready state and is accepting calls to its - // Controller and/or Node services (according to the plugin's reported - // capabilities). - Ready *wrappers.BoolValue `protobuf:"bytes,1,opt,name=ready" json:"ready,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProbeResponse) Reset() { *m = ProbeResponse{} } -func (m *ProbeResponse) String() string { return proto.CompactTextString(m) } -func (*ProbeResponse) ProtoMessage() {} -func (*ProbeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{6} -} -func (m *ProbeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProbeResponse.Unmarshal(m, b) -} -func (m *ProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProbeResponse.Marshal(b, m, deterministic) -} -func (dst *ProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProbeResponse.Merge(dst, src) -} -func (m *ProbeResponse) XXX_Size() int { - return xxx_messageInfo_ProbeResponse.Size(m) -} -func (m *ProbeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ProbeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ProbeResponse proto.InternalMessageInfo - -func (m *ProbeResponse) GetReady() *wrappers.BoolValue { - if m != nil { - return m.Ready - } - return nil -} - -type CreateVolumeRequest struct { - // The suggested name for the storage space. This field is REQUIRED. - // It serves two purposes: - // 1) Idempotency - This name is generated by the CO to achieve - // idempotency. If `CreateVolume` fails, the volume may or may not - // be provisioned. In this case, the CO may call `CreateVolume` - // again, with the same name, to ensure the volume exists. The - // Plugin should ensure that multiple `CreateVolume` calls for the - // same name do not result in more than one piece of storage - // provisioned corresponding to that name. If a Plugin is unable to - // enforce idempotency, the CO's error recovery logic could result - // in multiple (unused) volumes being provisioned. - // 2) Suggested name - Some storage systems allow callers to specify - // an identifier by which to refer to the newly provisioned - // storage. If a storage system supports this, it can optionally - // use this name as the identifier for the new volume. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - CapacityRange *CapacityRange `protobuf:"bytes,2,opt,name=capacity_range,json=capacityRange" json:"capacity_range,omitempty"` - // The capabilities that the provisioned volume MUST have: the Plugin - // MUST provision a volume that could satisfy ALL of the - // capabilities specified in this list. The Plugin MUST assume that - // the CO MAY use the provisioned volume later with ANY of the - // capabilities specified in this list. This also enables the CO to do - // early validation: if ANY of the specified volume capabilities are - // not supported by the Plugin, the call SHALL fail. This field is - // REQUIRED. - VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` - // Plugin specific parameters passed in as opaque key-value pairs. - // This field is OPTIONAL. The Plugin is responsible for parsing and - // validating these parameters. COs will treat these as opaque. - Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Secrets required by plugin to complete volume creation request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - ControllerCreateSecrets map[string]string `protobuf:"bytes,5,rep,name=controller_create_secrets,json=controllerCreateSecrets" json:"controller_create_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // If specified, the new volume will be pre-populated with data from - // this source. This field is OPTIONAL. - VolumeContentSource *VolumeContentSource `protobuf:"bytes,6,opt,name=volume_content_source,json=volumeContentSource" json:"volume_content_source,omitempty"` - // Specifies where (regions, zones, racks, etc.) the provisioned - // volume MUST be accessible from. - // An SP SHALL advertise the requirements for topological - // accessibility information in documentation. COs SHALL only specify - // topological accessibility information supported by the SP. - // This field is OPTIONAL. - // This field SHALL NOT be specified unless the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability. - // If this field is not specified and the SP has the - // ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY choose - // where the provisioned volume is accessible from. - AccessibilityRequirements *TopologyRequirement `protobuf:"bytes,7,opt,name=accessibility_requirements,json=accessibilityRequirements" json:"accessibility_requirements,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateVolumeRequest) Reset() { *m = CreateVolumeRequest{} } -func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*CreateVolumeRequest) ProtoMessage() {} -func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{7} -} -func (m *CreateVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateVolumeRequest.Unmarshal(m, b) -} -func (m *CreateVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *CreateVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateVolumeRequest.Merge(dst, src) -} -func (m *CreateVolumeRequest) XXX_Size() int { - return xxx_messageInfo_CreateVolumeRequest.Size(m) -} -func (m *CreateVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateVolumeRequest proto.InternalMessageInfo - -func (m *CreateVolumeRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *CreateVolumeRequest) GetCapacityRange() *CapacityRange { - if m != nil { - return m.CapacityRange - } - return nil -} - -func (m *CreateVolumeRequest) GetVolumeCapabilities() []*VolumeCapability { - if m != nil { - return m.VolumeCapabilities - } - return nil -} - -func (m *CreateVolumeRequest) GetParameters() map[string]string { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *CreateVolumeRequest) GetControllerCreateSecrets() map[string]string { - if m != nil { - return m.ControllerCreateSecrets - } - return nil -} - -func (m *CreateVolumeRequest) GetVolumeContentSource() *VolumeContentSource { - if m != nil { - return m.VolumeContentSource - } - return nil -} - -func (m *CreateVolumeRequest) GetAccessibilityRequirements() *TopologyRequirement { - if m != nil { - return m.AccessibilityRequirements - } - return nil -} - -// Specifies what source the volume will be created from. One of the -// type fields MUST be specified. -type VolumeContentSource struct { - // Types that are valid to be assigned to Type: - // *VolumeContentSource_Snapshot - Type isVolumeContentSource_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VolumeContentSource) Reset() { *m = VolumeContentSource{} } -func (m *VolumeContentSource) String() string { return proto.CompactTextString(m) } -func (*VolumeContentSource) ProtoMessage() {} -func (*VolumeContentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{8} -} -func (m *VolumeContentSource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeContentSource.Unmarshal(m, b) -} -func (m *VolumeContentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeContentSource.Marshal(b, m, deterministic) -} -func (dst *VolumeContentSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeContentSource.Merge(dst, src) -} -func (m *VolumeContentSource) XXX_Size() int { - return xxx_messageInfo_VolumeContentSource.Size(m) -} -func (m *VolumeContentSource) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeContentSource.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeContentSource proto.InternalMessageInfo - -type isVolumeContentSource_Type interface { - isVolumeContentSource_Type() -} - -type VolumeContentSource_Snapshot struct { - Snapshot *VolumeContentSource_SnapshotSource `protobuf:"bytes,1,opt,name=snapshot,oneof"` -} - -func (*VolumeContentSource_Snapshot) isVolumeContentSource_Type() {} - -func (m *VolumeContentSource) GetType() isVolumeContentSource_Type { - if m != nil { - return m.Type - } - return nil -} - -func (m *VolumeContentSource) GetSnapshot() *VolumeContentSource_SnapshotSource { - if x, ok := m.GetType().(*VolumeContentSource_Snapshot); ok { - return x.Snapshot - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*VolumeContentSource) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _VolumeContentSource_OneofMarshaler, _VolumeContentSource_OneofUnmarshaler, _VolumeContentSource_OneofSizer, []interface{}{ - (*VolumeContentSource_Snapshot)(nil), - } -} - -func _VolumeContentSource_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*VolumeContentSource) - // type - switch x := m.Type.(type) { - case *VolumeContentSource_Snapshot: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Snapshot); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("VolumeContentSource.Type has unexpected type %T", x) - } - return nil -} - -func _VolumeContentSource_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*VolumeContentSource) - switch tag { - case 1: // type.snapshot - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(VolumeContentSource_SnapshotSource) - err := b.DecodeMessage(msg) - m.Type = &VolumeContentSource_Snapshot{msg} - return true, err - default: - return false, nil - } -} - -func _VolumeContentSource_OneofSizer(msg proto.Message) (n int) { - m := msg.(*VolumeContentSource) - // type - switch x := m.Type.(type) { - case *VolumeContentSource_Snapshot: - s := proto.Size(x.Snapshot) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type VolumeContentSource_SnapshotSource struct { - // Contains identity information for the existing source snapshot. - // This field is REQUIRED. Plugin is REQUIRED to support creating - // volume from snapshot if it supports the capability - // CREATE_DELETE_SNAPSHOT. - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VolumeContentSource_SnapshotSource) Reset() { *m = VolumeContentSource_SnapshotSource{} } -func (m *VolumeContentSource_SnapshotSource) String() string { return proto.CompactTextString(m) } -func (*VolumeContentSource_SnapshotSource) ProtoMessage() {} -func (*VolumeContentSource_SnapshotSource) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{8, 0} -} -func (m *VolumeContentSource_SnapshotSource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeContentSource_SnapshotSource.Unmarshal(m, b) -} -func (m *VolumeContentSource_SnapshotSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeContentSource_SnapshotSource.Marshal(b, m, deterministic) -} -func (dst *VolumeContentSource_SnapshotSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeContentSource_SnapshotSource.Merge(dst, src) -} -func (m *VolumeContentSource_SnapshotSource) XXX_Size() int { - return xxx_messageInfo_VolumeContentSource_SnapshotSource.Size(m) -} -func (m *VolumeContentSource_SnapshotSource) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeContentSource_SnapshotSource.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeContentSource_SnapshotSource proto.InternalMessageInfo - -func (m *VolumeContentSource_SnapshotSource) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type CreateVolumeResponse struct { - // Contains all attributes of the newly created volume that are - // relevant to the CO along with information required by the Plugin - // to uniquely identify the volume. This field is REQUIRED. - Volume *Volume `protobuf:"bytes,1,opt,name=volume" json:"volume,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateVolumeResponse) Reset() { *m = CreateVolumeResponse{} } -func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*CreateVolumeResponse) ProtoMessage() {} -func (*CreateVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{9} -} -func (m *CreateVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateVolumeResponse.Unmarshal(m, b) -} -func (m *CreateVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *CreateVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateVolumeResponse.Merge(dst, src) -} -func (m *CreateVolumeResponse) XXX_Size() int { - return xxx_messageInfo_CreateVolumeResponse.Size(m) -} -func (m *CreateVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateVolumeResponse proto.InternalMessageInfo - -func (m *CreateVolumeResponse) GetVolume() *Volume { - if m != nil { - return m.Volume - } - return nil -} - -// Specify a capability of a volume. -type VolumeCapability struct { - // Specifies what API the volume will be accessed using. One of the - // following fields MUST be specified. - // - // Types that are valid to be assigned to AccessType: - // *VolumeCapability_Block - // *VolumeCapability_Mount - AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"` - // This is a REQUIRED field. - AccessMode *VolumeCapability_AccessMode `protobuf:"bytes,3,opt,name=access_mode,json=accessMode" json:"access_mode,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VolumeCapability) Reset() { *m = VolumeCapability{} } -func (m *VolumeCapability) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability) ProtoMessage() {} -func (*VolumeCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{10} -} -func (m *VolumeCapability) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeCapability.Unmarshal(m, b) -} -func (m *VolumeCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeCapability.Marshal(b, m, deterministic) -} -func (dst *VolumeCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability.Merge(dst, src) -} -func (m *VolumeCapability) XXX_Size() int { - return xxx_messageInfo_VolumeCapability.Size(m) -} -func (m *VolumeCapability) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeCapability.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeCapability proto.InternalMessageInfo - -type isVolumeCapability_AccessType interface { - isVolumeCapability_AccessType() -} - -type VolumeCapability_Block struct { - Block *VolumeCapability_BlockVolume `protobuf:"bytes,1,opt,name=block,oneof"` -} -type VolumeCapability_Mount struct { - Mount *VolumeCapability_MountVolume `protobuf:"bytes,2,opt,name=mount,oneof"` -} - -func (*VolumeCapability_Block) isVolumeCapability_AccessType() {} -func (*VolumeCapability_Mount) isVolumeCapability_AccessType() {} - -func (m *VolumeCapability) GetAccessType() isVolumeCapability_AccessType { - if m != nil { - return m.AccessType - } - return nil -} - -func (m *VolumeCapability) GetBlock() *VolumeCapability_BlockVolume { - if x, ok := m.GetAccessType().(*VolumeCapability_Block); ok { - return x.Block - } - return nil -} - -func (m *VolumeCapability) GetMount() *VolumeCapability_MountVolume { - if x, ok := m.GetAccessType().(*VolumeCapability_Mount); ok { - return x.Mount - } - return nil -} - -func (m *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode { - if m != nil { - return m.AccessMode - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*VolumeCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _VolumeCapability_OneofMarshaler, _VolumeCapability_OneofUnmarshaler, _VolumeCapability_OneofSizer, []interface{}{ - (*VolumeCapability_Block)(nil), - (*VolumeCapability_Mount)(nil), - } -} - -func _VolumeCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*VolumeCapability) - // access_type - switch x := m.AccessType.(type) { - case *VolumeCapability_Block: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Block); err != nil { - return err - } - case *VolumeCapability_Mount: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Mount); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("VolumeCapability.AccessType has unexpected type %T", x) - } - return nil -} - -func _VolumeCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*VolumeCapability) - switch tag { - case 1: // access_type.block - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(VolumeCapability_BlockVolume) - err := b.DecodeMessage(msg) - m.AccessType = &VolumeCapability_Block{msg} - return true, err - case 2: // access_type.mount - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(VolumeCapability_MountVolume) - err := b.DecodeMessage(msg) - m.AccessType = &VolumeCapability_Mount{msg} - return true, err - default: - return false, nil - } -} - -func _VolumeCapability_OneofSizer(msg proto.Message) (n int) { - m := msg.(*VolumeCapability) - // access_type - switch x := m.AccessType.(type) { - case *VolumeCapability_Block: - s := proto.Size(x.Block) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *VolumeCapability_Mount: - s := proto.Size(x.Mount) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// Indicate that the volume will be accessed via the block device API. -type VolumeCapability_BlockVolume struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VolumeCapability_BlockVolume) Reset() { *m = VolumeCapability_BlockVolume{} } -func (m *VolumeCapability_BlockVolume) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability_BlockVolume) ProtoMessage() {} -func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{10, 0} -} -func (m *VolumeCapability_BlockVolume) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeCapability_BlockVolume.Unmarshal(m, b) -} -func (m *VolumeCapability_BlockVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeCapability_BlockVolume.Marshal(b, m, deterministic) -} -func (dst *VolumeCapability_BlockVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability_BlockVolume.Merge(dst, src) -} -func (m *VolumeCapability_BlockVolume) XXX_Size() int { - return xxx_messageInfo_VolumeCapability_BlockVolume.Size(m) -} -func (m *VolumeCapability_BlockVolume) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeCapability_BlockVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeCapability_BlockVolume proto.InternalMessageInfo - -// Indicate that the volume will be accessed via the filesystem API. -type VolumeCapability_MountVolume struct { - // The filesystem type. This field is OPTIONAL. - // An empty string is equal to an unspecified field value. - FsType string `protobuf:"bytes,1,opt,name=fs_type,json=fsType" json:"fs_type,omitempty"` - // The mount options that can be used for the volume. This field is - // OPTIONAL. `mount_flags` MAY contain sensitive information. - // Therefore, the CO and the Plugin MUST NOT leak this information - // to untrusted entities. The total size of this repeated field - // SHALL NOT exceed 4 KiB. - MountFlags []string `protobuf:"bytes,2,rep,name=mount_flags,json=mountFlags" json:"mount_flags,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VolumeCapability_MountVolume) Reset() { *m = VolumeCapability_MountVolume{} } -func (m *VolumeCapability_MountVolume) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability_MountVolume) ProtoMessage() {} -func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{10, 1} -} -func (m *VolumeCapability_MountVolume) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeCapability_MountVolume.Unmarshal(m, b) -} -func (m *VolumeCapability_MountVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeCapability_MountVolume.Marshal(b, m, deterministic) -} -func (dst *VolumeCapability_MountVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability_MountVolume.Merge(dst, src) -} -func (m *VolumeCapability_MountVolume) XXX_Size() int { - return xxx_messageInfo_VolumeCapability_MountVolume.Size(m) -} -func (m *VolumeCapability_MountVolume) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeCapability_MountVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeCapability_MountVolume proto.InternalMessageInfo - -func (m *VolumeCapability_MountVolume) GetFsType() string { - if m != nil { - return m.FsType - } - return "" -} - -func (m *VolumeCapability_MountVolume) GetMountFlags() []string { - if m != nil { - return m.MountFlags - } - return nil -} - -// Specify how a volume can be accessed. -type VolumeCapability_AccessMode struct { - // This field is REQUIRED. - Mode VolumeCapability_AccessMode_Mode `protobuf:"varint,1,opt,name=mode,enum=csi.v0.VolumeCapability_AccessMode_Mode" json:"mode,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VolumeCapability_AccessMode) Reset() { *m = VolumeCapability_AccessMode{} } -func (m *VolumeCapability_AccessMode) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability_AccessMode) ProtoMessage() {} -func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{10, 2} -} -func (m *VolumeCapability_AccessMode) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeCapability_AccessMode.Unmarshal(m, b) -} -func (m *VolumeCapability_AccessMode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeCapability_AccessMode.Marshal(b, m, deterministic) -} -func (dst *VolumeCapability_AccessMode) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability_AccessMode.Merge(dst, src) -} -func (m *VolumeCapability_AccessMode) XXX_Size() int { - return xxx_messageInfo_VolumeCapability_AccessMode.Size(m) -} -func (m *VolumeCapability_AccessMode) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeCapability_AccessMode.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeCapability_AccessMode proto.InternalMessageInfo - -func (m *VolumeCapability_AccessMode) GetMode() VolumeCapability_AccessMode_Mode { - if m != nil { - return m.Mode - } - return VolumeCapability_AccessMode_UNKNOWN -} - -// The capacity of the storage space in bytes. To specify an exact size, -// `required_bytes` and `limit_bytes` SHALL be set to the same value. At -// least one of the these fields MUST be specified. -type CapacityRange struct { - // Volume MUST be at least this big. This field is OPTIONAL. - // A value of 0 is equal to an unspecified field value. - // The value of this field MUST NOT be negative. - RequiredBytes int64 `protobuf:"varint,1,opt,name=required_bytes,json=requiredBytes" json:"required_bytes,omitempty"` - // Volume MUST not be bigger than this. This field is OPTIONAL. - // A value of 0 is equal to an unspecified field value. - // The value of this field MUST NOT be negative. - LimitBytes int64 `protobuf:"varint,2,opt,name=limit_bytes,json=limitBytes" json:"limit_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CapacityRange) Reset() { *m = CapacityRange{} } -func (m *CapacityRange) String() string { return proto.CompactTextString(m) } -func (*CapacityRange) ProtoMessage() {} -func (*CapacityRange) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{11} -} -func (m *CapacityRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CapacityRange.Unmarshal(m, b) -} -func (m *CapacityRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CapacityRange.Marshal(b, m, deterministic) -} -func (dst *CapacityRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_CapacityRange.Merge(dst, src) -} -func (m *CapacityRange) XXX_Size() int { - return xxx_messageInfo_CapacityRange.Size(m) -} -func (m *CapacityRange) XXX_DiscardUnknown() { - xxx_messageInfo_CapacityRange.DiscardUnknown(m) -} - -var xxx_messageInfo_CapacityRange proto.InternalMessageInfo - -func (m *CapacityRange) GetRequiredBytes() int64 { - if m != nil { - return m.RequiredBytes - } - return 0 -} - -func (m *CapacityRange) GetLimitBytes() int64 { - if m != nil { - return m.LimitBytes - } - return 0 -} - -// The information about a provisioned volume. -type Volume struct { - // The capacity of the volume in bytes. This field is OPTIONAL. If not - // set (value of 0), it indicates that the capacity of the volume is - // unknown (e.g., NFS share). - // The value of this field MUST NOT be negative. - CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes" json:"capacity_bytes,omitempty"` - // Contains identity information for the created volume. This field is - // REQUIRED. The identity information will be used by the CO in - // subsequent calls to refer to the provisioned volume. - Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` - // Attributes reflect static properties of a volume and MUST be passed - // to volume validation and publishing calls. - // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable - // and SHALL be safe for the CO to cache. Attributes SHOULD NOT - // contain sensitive information. Attributes MAY NOT uniquely identify - // a volume. A volume uniquely identified by `id` SHALL always report - // the same attributes. This field is OPTIONAL and when present MUST - // be passed to volume validation and publishing calls. - Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // If specified, indicates that the volume is not empty and is - // pre-populated with data from the specified source. - // This field is OPTIONAL. - ContentSource *VolumeContentSource `protobuf:"bytes,4,opt,name=content_source,json=contentSource" json:"content_source,omitempty"` - // Specifies where (regions, zones, racks, etc.) the provisioned - // volume is accessible from. - // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. - // An SP MAY specify multiple topologies to indicate the volume is - // accessible from multiple locations. - // COs MAY use this information along with the topology information - // returned by NodeGetInfo to ensure that a given volume is accessible - // from a given node when scheduling workloads. - // This field is OPTIONAL. If it is not specified, the CO MAY assume - // the volume is equally accessible from all nodes in the cluster and - // may schedule workloads referencing the volume on any available - // node. - // - // Example 1: - // accessible_topology = {"region": "R1", "zone": "Z2"} - // Indicates a volume accessible only from the "region" "R1" and the - // "zone" "Z2". - // - // Example 2: - // accessible_topology = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" - // in the "region" "R1". - AccessibleTopology []*Topology `protobuf:"bytes,5,rep,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Volume) Reset() { *m = Volume{} } -func (m *Volume) String() string { return proto.CompactTextString(m) } -func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{12} -} -func (m *Volume) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Volume.Unmarshal(m, b) -} -func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Volume.Marshal(b, m, deterministic) -} -func (dst *Volume) XXX_Merge(src proto.Message) { - xxx_messageInfo_Volume.Merge(dst, src) -} -func (m *Volume) XXX_Size() int { - return xxx_messageInfo_Volume.Size(m) -} -func (m *Volume) XXX_DiscardUnknown() { - xxx_messageInfo_Volume.DiscardUnknown(m) -} - -var xxx_messageInfo_Volume proto.InternalMessageInfo - -func (m *Volume) GetCapacityBytes() int64 { - if m != nil { - return m.CapacityBytes - } - return 0 -} - -func (m *Volume) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Volume) GetAttributes() map[string]string { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Volume) GetContentSource() *VolumeContentSource { - if m != nil { - return m.ContentSource - } - return nil -} - -func (m *Volume) GetAccessibleTopology() []*Topology { - if m != nil { - return m.AccessibleTopology - } - return nil -} - -type TopologyRequirement struct { - // Specifies the list of topologies the provisioned volume MUST be - // accessible from. - // This field is OPTIONAL. If TopologyRequirement is specified either - // requisite or preferred or both MUST be specified. - // - // If requisite is specified, the provisioned volume MUST be - // accessible from at least one of the requisite topologies. - // - // Given - // x = number of topologies provisioned volume is accessible from - // n = number of requisite topologies - // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 - // If x==n, than the SP MUST make the provisioned volume available to - // all topologies from the list of requisite topologies. If it is - // unable to do so, the SP MUST fail the CreateVolume call. - // For example, if a volume should be accessible from a single zone, - // and requisite = - // {"region": "R1", "zone": "Z2"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and the "zone" "Z2". - // Similarly, if a volume should be accessible from two zones, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and both "zone" "Z2" and "zone" "Z3". - // - // If xn, than the SP MUST make the provisioned volume available from - // all topologies from the list of requisite topologies and MAY choose - // the remaining x-n unique topologies from the list of all possible - // topologies. If it is unable to do so, the SP MUST fail the - // CreateVolume call. - // For example, if a volume should be accessible from two zones, and - // requisite = - // {"region": "R1", "zone": "Z2"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and the "zone" "Z2" and the SP may select the second zone - // independently, e.g. "R1/Z4". - Requisite []*Topology `protobuf:"bytes,1,rep,name=requisite" json:"requisite,omitempty"` - // Specifies the list of topologies the CO would prefer the volume to - // be provisioned in. - // - // This field is OPTIONAL. If TopologyRequirement is specified either - // requisite or preferred or both MUST be specified. - // - // An SP MUST attempt to make the provisioned volume available using - // the preferred topologies in order from first to last. - // - // If requisite is specified, all topologies in preferred list MUST - // also be present in the list of requisite topologies. - // - // If the SP is unable to to make the provisioned volume available - // from any of the preferred topologies, the SP MAY choose a topology - // from the list of requisite topologies. - // If the list of requisite topologies is not specified, then the SP - // MAY choose from the list of all possible topologies. - // If the list of requisite topologies is specified and the SP is - // unable to to make the provisioned volume available from any of the - // requisite topologies it MUST fail the CreateVolume call. - // - // Example 1: - // Given a volume should be accessible from a single zone, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // preferred = - // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume - // available from "zone" "Z3" in the "region" "R1" and fall back to - // "zone" "Z2" in the "region" "R1" if that is not possible. - // - // Example 2: - // Given a volume should be accessible from a single zone, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} - // preferred = - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z2"} - // then the the SP SHOULD first attempt to make the provisioned volume - // accessible from "zone" "Z4" in the "region" "R1" and fall back to - // "zone" "Z2" in the "region" "R1" if that is not possible. If that - // is not possible, the SP may choose between either the "zone" - // "Z3" or "Z5" in the "region" "R1". - // - // Example 3: - // Given a volume should be accessible from TWO zones (because an - // opaque parameter in CreateVolumeRequest, for example, specifies - // the volume is accessible from two zones, aka synchronously - // replicated), and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} - // preferred = - // {"region": "R1", "zone": "Z5"}, - // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume - // accessible from the combination of the two "zones" "Z5" and "Z3" in - // the "region" "R1". If that's not possible, it should fall back to - // a combination of "Z5" and other possibilities from the list of - // requisite. If that's not possible, it should fall back to a - // combination of "Z3" and other possibilities from the list of - // requisite. If that's not possible, it should fall back to a - // combination of other possibilities from the list of requisite. - Preferred []*Topology `protobuf:"bytes,2,rep,name=preferred" json:"preferred,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TopologyRequirement) Reset() { *m = TopologyRequirement{} } -func (m *TopologyRequirement) String() string { return proto.CompactTextString(m) } -func (*TopologyRequirement) ProtoMessage() {} -func (*TopologyRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{13} -} -func (m *TopologyRequirement) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TopologyRequirement.Unmarshal(m, b) -} -func (m *TopologyRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TopologyRequirement.Marshal(b, m, deterministic) -} -func (dst *TopologyRequirement) XXX_Merge(src proto.Message) { - xxx_messageInfo_TopologyRequirement.Merge(dst, src) -} -func (m *TopologyRequirement) XXX_Size() int { - return xxx_messageInfo_TopologyRequirement.Size(m) -} -func (m *TopologyRequirement) XXX_DiscardUnknown() { - xxx_messageInfo_TopologyRequirement.DiscardUnknown(m) -} - -var xxx_messageInfo_TopologyRequirement proto.InternalMessageInfo - -func (m *TopologyRequirement) GetRequisite() []*Topology { - if m != nil { - return m.Requisite - } - return nil -} - -func (m *TopologyRequirement) GetPreferred() []*Topology { - if m != nil { - return m.Preferred - } - return nil -} - -// Topology is a map of topological domains to topological segments. -// A topological domain is a sub-division of a cluster, like "region", -// "zone", "rack", etc. -// A topological segment is a specific instance of a topological domain, -// like "zone3", "rack3", etc. -// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} -// Valid keys have two segments: an optional prefix and name, separated -// by a slash (/), for example: "com.company.example/zone". -// The key name segment is required. The prefix is optional. -// Both the key name and the prefix MUST each be 63 characters or less, -// begin and end with an alphanumeric character ([a-z0-9A-Z]) and -// contain only dashes (-), underscores (_), dots (.), or alphanumerics -// in between, for example "zone". -// The key prefix MUST follow reverse domain name notation format -// (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). -// The key prefix SHOULD include the plugin's host company name and/or -// the plugin name, to minimize the possibility of collisions with keys -// from other plugins. -// If a key prefix is specified, it MUST be identical across all -// topology keys returned by the SP (across all RPCs). -// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" -// MUST not both exist. -// Each value (topological segment) MUST contain 1 or more strings. -// Each string MUST be 63 characters or less and begin and end with an -// alphanumeric character with '-', '_', '.', or alphanumerics in -// between. -type Topology struct { - Segments map[string]string `protobuf:"bytes,1,rep,name=segments" json:"segments,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Topology) Reset() { *m = Topology{} } -func (m *Topology) String() string { return proto.CompactTextString(m) } -func (*Topology) ProtoMessage() {} -func (*Topology) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{14} -} -func (m *Topology) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Topology.Unmarshal(m, b) -} -func (m *Topology) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Topology.Marshal(b, m, deterministic) -} -func (dst *Topology) XXX_Merge(src proto.Message) { - xxx_messageInfo_Topology.Merge(dst, src) -} -func (m *Topology) XXX_Size() int { - return xxx_messageInfo_Topology.Size(m) -} -func (m *Topology) XXX_DiscardUnknown() { - xxx_messageInfo_Topology.DiscardUnknown(m) -} - -var xxx_messageInfo_Topology proto.InternalMessageInfo - -func (m *Topology) GetSegments() map[string]string { - if m != nil { - return m.Segments - } - return nil -} - -type DeleteVolumeRequest struct { - // The ID of the volume to be deprovisioned. - // This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // Secrets required by plugin to complete volume deletion request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - ControllerDeleteSecrets map[string]string `protobuf:"bytes,2,rep,name=controller_delete_secrets,json=controllerDeleteSecrets" json:"controller_delete_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteVolumeRequest) Reset() { *m = DeleteVolumeRequest{} } -func (m *DeleteVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteVolumeRequest) ProtoMessage() {} -func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{15} -} -func (m *DeleteVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteVolumeRequest.Unmarshal(m, b) -} -func (m *DeleteVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *DeleteVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteVolumeRequest.Merge(dst, src) -} -func (m *DeleteVolumeRequest) XXX_Size() int { - return xxx_messageInfo_DeleteVolumeRequest.Size(m) -} -func (m *DeleteVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteVolumeRequest proto.InternalMessageInfo - -func (m *DeleteVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *DeleteVolumeRequest) GetControllerDeleteSecrets() map[string]string { - if m != nil { - return m.ControllerDeleteSecrets - } - return nil -} - -type DeleteVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteVolumeResponse) Reset() { *m = DeleteVolumeResponse{} } -func (m *DeleteVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteVolumeResponse) ProtoMessage() {} -func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{16} -} -func (m *DeleteVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteVolumeResponse.Unmarshal(m, b) -} -func (m *DeleteVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *DeleteVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteVolumeResponse.Merge(dst, src) -} -func (m *DeleteVolumeResponse) XXX_Size() int { - return xxx_messageInfo_DeleteVolumeResponse.Size(m) -} -func (m *DeleteVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteVolumeResponse proto.InternalMessageInfo - -type ControllerPublishVolumeRequest struct { - // The ID of the volume to be used on a node. - // This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The ID of the node. This field is REQUIRED. The CO SHALL set this - // field to match the node ID returned by `NodeGetInfo`. - NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` - // The capability of the volume the CO expects the volume to have. - // This is a REQUIRED field. - VolumeCapability *VolumeCapability `protobuf:"bytes,3,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. - Readonly bool `protobuf:"varint,4,opt,name=readonly" json:"readonly,omitempty"` - // Secrets required by plugin to complete controller publish volume - // request. This field is OPTIONAL. Refer to the - // `Secrets Requirements` section on how to use this field. - ControllerPublishSecrets map[string]string `protobuf:"bytes,5,rep,name=controller_publish_secrets,json=controllerPublishSecrets" json:"controller_publish_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Attributes of the volume to be used on a node. This field is - // OPTIONAL and MUST match the attributes of the Volume identified - // by `volume_id`. - VolumeAttributes map[string]string `protobuf:"bytes,6,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerPublishVolumeRequest) Reset() { *m = ControllerPublishVolumeRequest{} } -func (m *ControllerPublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*ControllerPublishVolumeRequest) ProtoMessage() {} -func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{17} -} -func (m *ControllerPublishVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerPublishVolumeRequest.Unmarshal(m, b) -} -func (m *ControllerPublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerPublishVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *ControllerPublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerPublishVolumeRequest.Merge(dst, src) -} -func (m *ControllerPublishVolumeRequest) XXX_Size() int { - return xxx_messageInfo_ControllerPublishVolumeRequest.Size(m) -} -func (m *ControllerPublishVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerPublishVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerPublishVolumeRequest proto.InternalMessageInfo - -func (m *ControllerPublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *ControllerPublishVolumeRequest) GetNodeId() string { - if m != nil { - return m.NodeId - } - return "" -} - -func (m *ControllerPublishVolumeRequest) GetVolumeCapability() *VolumeCapability { - if m != nil { - return m.VolumeCapability - } - return nil -} - -func (m *ControllerPublishVolumeRequest) GetReadonly() bool { - if m != nil { - return m.Readonly - } - return false -} - -func (m *ControllerPublishVolumeRequest) GetControllerPublishSecrets() map[string]string { - if m != nil { - return m.ControllerPublishSecrets - } - return nil -} - -func (m *ControllerPublishVolumeRequest) GetVolumeAttributes() map[string]string { - if m != nil { - return m.VolumeAttributes - } - return nil -} - -type ControllerPublishVolumeResponse struct { - // The SP specific information that will be passed to the Plugin in - // the subsequent `NodeStageVolume` or `NodePublishVolume` calls - // for the given volume. - // This information is opaque to the CO. This field is OPTIONAL. - PublishInfo map[string]string `protobuf:"bytes,1,rep,name=publish_info,json=publishInfo" json:"publish_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerPublishVolumeResponse) Reset() { *m = ControllerPublishVolumeResponse{} } -func (m *ControllerPublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*ControllerPublishVolumeResponse) ProtoMessage() {} -func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{18} -} -func (m *ControllerPublishVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerPublishVolumeResponse.Unmarshal(m, b) -} -func (m *ControllerPublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerPublishVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *ControllerPublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerPublishVolumeResponse.Merge(dst, src) -} -func (m *ControllerPublishVolumeResponse) XXX_Size() int { - return xxx_messageInfo_ControllerPublishVolumeResponse.Size(m) -} -func (m *ControllerPublishVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerPublishVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerPublishVolumeResponse proto.InternalMessageInfo - -func (m *ControllerPublishVolumeResponse) GetPublishInfo() map[string]string { - if m != nil { - return m.PublishInfo - } - return nil -} - -type ControllerUnpublishVolumeRequest struct { - // The ID of the volume. This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The ID of the node. This field is OPTIONAL. The CO SHOULD set this - // field to match the node ID returned by `NodeGetInfo` or leave it - // unset. If the value is set, the SP MUST unpublish the volume from - // the specified node. If the value is unset, the SP MUST unpublish - // the volume from all nodes it is published to. - NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` - // Secrets required by plugin to complete controller unpublish volume - // request. This SHOULD be the same secrets passed to the - // ControllerPublishVolume call for the specified volume. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - ControllerUnpublishSecrets map[string]string `protobuf:"bytes,3,rep,name=controller_unpublish_secrets,json=controllerUnpublishSecrets" json:"controller_unpublish_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerUnpublishVolumeRequest) Reset() { *m = ControllerUnpublishVolumeRequest{} } -func (m *ControllerUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*ControllerUnpublishVolumeRequest) ProtoMessage() {} -func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{19} -} -func (m *ControllerUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerUnpublishVolumeRequest.Unmarshal(m, b) -} -func (m *ControllerUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerUnpublishVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *ControllerUnpublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerUnpublishVolumeRequest.Merge(dst, src) -} -func (m *ControllerUnpublishVolumeRequest) XXX_Size() int { - return xxx_messageInfo_ControllerUnpublishVolumeRequest.Size(m) -} -func (m *ControllerUnpublishVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerUnpublishVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerUnpublishVolumeRequest proto.InternalMessageInfo - -func (m *ControllerUnpublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *ControllerUnpublishVolumeRequest) GetNodeId() string { - if m != nil { - return m.NodeId - } - return "" -} - -func (m *ControllerUnpublishVolumeRequest) GetControllerUnpublishSecrets() map[string]string { - if m != nil { - return m.ControllerUnpublishSecrets - } - return nil -} - -type ControllerUnpublishVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerUnpublishVolumeResponse) Reset() { *m = ControllerUnpublishVolumeResponse{} } -func (m *ControllerUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*ControllerUnpublishVolumeResponse) ProtoMessage() {} -func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{20} -} -func (m *ControllerUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerUnpublishVolumeResponse.Unmarshal(m, b) -} -func (m *ControllerUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerUnpublishVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *ControllerUnpublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerUnpublishVolumeResponse.Merge(dst, src) -} -func (m *ControllerUnpublishVolumeResponse) XXX_Size() int { - return xxx_messageInfo_ControllerUnpublishVolumeResponse.Size(m) -} -func (m *ControllerUnpublishVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerUnpublishVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerUnpublishVolumeResponse proto.InternalMessageInfo - -type ValidateVolumeCapabilitiesRequest struct { - // The ID of the volume to check. This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The capabilities that the CO wants to check for the volume. This - // call SHALL return "supported" only if all the volume capabilities - // specified below are supported. This field is REQUIRED. - VolumeCapabilities []*VolumeCapability `protobuf:"bytes,2,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` - // Attributes of the volume to check. This field is OPTIONAL and MUST - // match the attributes of the Volume identified by `volume_id`. - VolumeAttributes map[string]string `protobuf:"bytes,3,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Specifies where (regions, zones, racks, etc.) the caller believes - // the volume is accessible from. - // A caller MAY specify multiple topologies to indicate they believe - // the volume to be accessible from multiple locations. - // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. - AccessibleTopology []*Topology `protobuf:"bytes,4,rep,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateVolumeCapabilitiesRequest) Reset() { *m = ValidateVolumeCapabilitiesRequest{} } -func (m *ValidateVolumeCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*ValidateVolumeCapabilitiesRequest) ProtoMessage() {} -func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{21} -} -func (m *ValidateVolumeCapabilitiesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Unmarshal(m, b) -} -func (m *ValidateVolumeCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Marshal(b, m, deterministic) -} -func (dst *ValidateVolumeCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Merge(dst, src) -} -func (m *ValidateVolumeCapabilitiesRequest) XXX_Size() int { - return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Size(m) -} -func (m *ValidateVolumeCapabilitiesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateVolumeCapabilitiesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateVolumeCapabilitiesRequest proto.InternalMessageInfo - -func (m *ValidateVolumeCapabilitiesRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *ValidateVolumeCapabilitiesRequest) GetVolumeCapabilities() []*VolumeCapability { - if m != nil { - return m.VolumeCapabilities - } - return nil -} - -func (m *ValidateVolumeCapabilitiesRequest) GetVolumeAttributes() map[string]string { - if m != nil { - return m.VolumeAttributes - } - return nil -} - -func (m *ValidateVolumeCapabilitiesRequest) GetAccessibleTopology() []*Topology { - if m != nil { - return m.AccessibleTopology - } - return nil -} - -type ValidateVolumeCapabilitiesResponse struct { - // True if the Plugin supports the specified capabilities for the - // given volume. This field is REQUIRED. - Supported bool `protobuf:"varint,1,opt,name=supported" json:"supported,omitempty"` - // Message to the CO if `supported` above is false. This field is - // OPTIONAL. - // An empty string is equal to an unspecified field value. - Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateVolumeCapabilitiesResponse) Reset() { *m = ValidateVolumeCapabilitiesResponse{} } -func (m *ValidateVolumeCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*ValidateVolumeCapabilitiesResponse) ProtoMessage() {} -func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{22} -} -func (m *ValidateVolumeCapabilitiesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Unmarshal(m, b) -} -func (m *ValidateVolumeCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Marshal(b, m, deterministic) -} -func (dst *ValidateVolumeCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Merge(dst, src) -} -func (m *ValidateVolumeCapabilitiesResponse) XXX_Size() int { - return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Size(m) -} -func (m *ValidateVolumeCapabilitiesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateVolumeCapabilitiesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateVolumeCapabilitiesResponse proto.InternalMessageInfo - -func (m *ValidateVolumeCapabilitiesResponse) GetSupported() bool { - if m != nil { - return m.Supported - } - return false -} - -func (m *ValidateVolumeCapabilitiesResponse) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -type ListVolumesRequest struct { - // If specified (non-zero value), the Plugin MUST NOT return more - // entries than this number in the response. If the actual number of - // entries is more than this number, the Plugin MUST set `next_token` - // in the response which can be used to get the next page of entries - // in the subsequent `ListVolumes` call. This field is OPTIONAL. If - // not specified (zero value), it means there is no restriction on the - // number of entries that can be returned. - // The value of this field MUST NOT be negative. - MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries" json:"max_entries,omitempty"` - // A token to specify where to start paginating. Set this field to - // `next_token` returned by a previous `ListVolumes` call to get the - // next page of entries. This field is OPTIONAL. - // An empty string is equal to an unspecified field value. - StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken" json:"starting_token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListVolumesRequest) Reset() { *m = ListVolumesRequest{} } -func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) } -func (*ListVolumesRequest) ProtoMessage() {} -func (*ListVolumesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{23} -} -func (m *ListVolumesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListVolumesRequest.Unmarshal(m, b) -} -func (m *ListVolumesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListVolumesRequest.Marshal(b, m, deterministic) -} -func (dst *ListVolumesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListVolumesRequest.Merge(dst, src) -} -func (m *ListVolumesRequest) XXX_Size() int { - return xxx_messageInfo_ListVolumesRequest.Size(m) -} -func (m *ListVolumesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListVolumesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListVolumesRequest proto.InternalMessageInfo - -func (m *ListVolumesRequest) GetMaxEntries() int32 { - if m != nil { - return m.MaxEntries - } - return 0 -} - -func (m *ListVolumesRequest) GetStartingToken() string { - if m != nil { - return m.StartingToken - } - return "" -} - -type ListVolumesResponse struct { - Entries []*ListVolumesResponse_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` - // This token allows you to get the next page of entries for - // `ListVolumes` request. If the number of entries is larger than - // `max_entries`, use the `next_token` as a value for the - // `starting_token` field in the next `ListVolumes` request. This - // field is OPTIONAL. - // An empty string is equal to an unspecified field value. - NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken" json:"next_token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListVolumesResponse) Reset() { *m = ListVolumesResponse{} } -func (m *ListVolumesResponse) String() string { return proto.CompactTextString(m) } -func (*ListVolumesResponse) ProtoMessage() {} -func (*ListVolumesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{24} -} -func (m *ListVolumesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListVolumesResponse.Unmarshal(m, b) -} -func (m *ListVolumesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListVolumesResponse.Marshal(b, m, deterministic) -} -func (dst *ListVolumesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListVolumesResponse.Merge(dst, src) -} -func (m *ListVolumesResponse) XXX_Size() int { - return xxx_messageInfo_ListVolumesResponse.Size(m) -} -func (m *ListVolumesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListVolumesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListVolumesResponse proto.InternalMessageInfo - -func (m *ListVolumesResponse) GetEntries() []*ListVolumesResponse_Entry { - if m != nil { - return m.Entries - } - return nil -} - -func (m *ListVolumesResponse) GetNextToken() string { - if m != nil { - return m.NextToken - } - return "" -} - -type ListVolumesResponse_Entry struct { - Volume *Volume `protobuf:"bytes,1,opt,name=volume" json:"volume,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListVolumesResponse_Entry) Reset() { *m = ListVolumesResponse_Entry{} } -func (m *ListVolumesResponse_Entry) String() string { return proto.CompactTextString(m) } -func (*ListVolumesResponse_Entry) ProtoMessage() {} -func (*ListVolumesResponse_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{24, 0} -} -func (m *ListVolumesResponse_Entry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListVolumesResponse_Entry.Unmarshal(m, b) -} -func (m *ListVolumesResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListVolumesResponse_Entry.Marshal(b, m, deterministic) -} -func (dst *ListVolumesResponse_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListVolumesResponse_Entry.Merge(dst, src) -} -func (m *ListVolumesResponse_Entry) XXX_Size() int { - return xxx_messageInfo_ListVolumesResponse_Entry.Size(m) -} -func (m *ListVolumesResponse_Entry) XXX_DiscardUnknown() { - xxx_messageInfo_ListVolumesResponse_Entry.DiscardUnknown(m) -} - -var xxx_messageInfo_ListVolumesResponse_Entry proto.InternalMessageInfo - -func (m *ListVolumesResponse_Entry) GetVolume() *Volume { - if m != nil { - return m.Volume - } - return nil -} - -type GetCapacityRequest struct { - // If specified, the Plugin SHALL report the capacity of the storage - // that can be used to provision volumes that satisfy ALL of the - // specified `volume_capabilities`. These are the same - // `volume_capabilities` the CO will use in `CreateVolumeRequest`. - // This field is OPTIONAL. - VolumeCapabilities []*VolumeCapability `protobuf:"bytes,1,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` - // If specified, the Plugin SHALL report the capacity of the storage - // that can be used to provision volumes with the given Plugin - // specific `parameters`. These are the same `parameters` the CO will - // use in `CreateVolumeRequest`. This field is OPTIONAL. - Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // If specified, the Plugin SHALL report the capacity of the storage - // that can be used to provision volumes that in the specified - // `accessible_topology`. This is the same as the - // `accessible_topology` the CO returns in a `CreateVolumeResponse`. - // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. - AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetCapacityRequest) Reset() { *m = GetCapacityRequest{} } -func (m *GetCapacityRequest) String() string { return proto.CompactTextString(m) } -func (*GetCapacityRequest) ProtoMessage() {} -func (*GetCapacityRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{25} -} -func (m *GetCapacityRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetCapacityRequest.Unmarshal(m, b) -} -func (m *GetCapacityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetCapacityRequest.Marshal(b, m, deterministic) -} -func (dst *GetCapacityRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetCapacityRequest.Merge(dst, src) -} -func (m *GetCapacityRequest) XXX_Size() int { - return xxx_messageInfo_GetCapacityRequest.Size(m) -} -func (m *GetCapacityRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetCapacityRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetCapacityRequest proto.InternalMessageInfo - -func (m *GetCapacityRequest) GetVolumeCapabilities() []*VolumeCapability { - if m != nil { - return m.VolumeCapabilities - } - return nil -} - -func (m *GetCapacityRequest) GetParameters() map[string]string { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *GetCapacityRequest) GetAccessibleTopology() *Topology { - if m != nil { - return m.AccessibleTopology - } - return nil -} - -type GetCapacityResponse struct { - // The available capacity, in bytes, of the storage that can be used - // to provision volumes. If `volume_capabilities` or `parameters` is - // specified in the request, the Plugin SHALL take those into - // consideration when calculating the available capacity of the - // storage. This field is REQUIRED. - // The value of this field MUST NOT be negative. - AvailableCapacity int64 `protobuf:"varint,1,opt,name=available_capacity,json=availableCapacity" json:"available_capacity,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetCapacityResponse) Reset() { *m = GetCapacityResponse{} } -func (m *GetCapacityResponse) String() string { return proto.CompactTextString(m) } -func (*GetCapacityResponse) ProtoMessage() {} -func (*GetCapacityResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{26} -} -func (m *GetCapacityResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetCapacityResponse.Unmarshal(m, b) -} -func (m *GetCapacityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetCapacityResponse.Marshal(b, m, deterministic) -} -func (dst *GetCapacityResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetCapacityResponse.Merge(dst, src) -} -func (m *GetCapacityResponse) XXX_Size() int { - return xxx_messageInfo_GetCapacityResponse.Size(m) -} -func (m *GetCapacityResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetCapacityResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetCapacityResponse proto.InternalMessageInfo - -func (m *GetCapacityResponse) GetAvailableCapacity() int64 { - if m != nil { - return m.AvailableCapacity - } - return 0 -} - -type ControllerGetCapabilitiesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerGetCapabilitiesRequest) Reset() { *m = ControllerGetCapabilitiesRequest{} } -func (m *ControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*ControllerGetCapabilitiesRequest) ProtoMessage() {} -func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{27} -} -func (m *ControllerGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerGetCapabilitiesRequest.Unmarshal(m, b) -} -func (m *ControllerGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerGetCapabilitiesRequest.Marshal(b, m, deterministic) -} -func (dst *ControllerGetCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerGetCapabilitiesRequest.Merge(dst, src) -} -func (m *ControllerGetCapabilitiesRequest) XXX_Size() int { - return xxx_messageInfo_ControllerGetCapabilitiesRequest.Size(m) -} -func (m *ControllerGetCapabilitiesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerGetCapabilitiesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerGetCapabilitiesRequest proto.InternalMessageInfo - -type ControllerGetCapabilitiesResponse struct { - // All the capabilities that the controller service supports. This - // field is OPTIONAL. - Capabilities []*ControllerServiceCapability `protobuf:"bytes,2,rep,name=capabilities" json:"capabilities,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerGetCapabilitiesResponse) Reset() { *m = ControllerGetCapabilitiesResponse{} } -func (m *ControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*ControllerGetCapabilitiesResponse) ProtoMessage() {} -func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{28} -} -func (m *ControllerGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerGetCapabilitiesResponse.Unmarshal(m, b) -} -func (m *ControllerGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerGetCapabilitiesResponse.Marshal(b, m, deterministic) -} -func (dst *ControllerGetCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerGetCapabilitiesResponse.Merge(dst, src) -} -func (m *ControllerGetCapabilitiesResponse) XXX_Size() int { - return xxx_messageInfo_ControllerGetCapabilitiesResponse.Size(m) -} -func (m *ControllerGetCapabilitiesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerGetCapabilitiesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerGetCapabilitiesResponse proto.InternalMessageInfo - -func (m *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServiceCapability { - if m != nil { - return m.Capabilities - } - return nil -} - -// Specifies a capability of the controller service. -type ControllerServiceCapability struct { - // Types that are valid to be assigned to Type: - // *ControllerServiceCapability_Rpc - Type isControllerServiceCapability_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerServiceCapability) Reset() { *m = ControllerServiceCapability{} } -func (m *ControllerServiceCapability) String() string { return proto.CompactTextString(m) } -func (*ControllerServiceCapability) ProtoMessage() {} -func (*ControllerServiceCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{29} -} -func (m *ControllerServiceCapability) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerServiceCapability.Unmarshal(m, b) -} -func (m *ControllerServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerServiceCapability.Marshal(b, m, deterministic) -} -func (dst *ControllerServiceCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerServiceCapability.Merge(dst, src) -} -func (m *ControllerServiceCapability) XXX_Size() int { - return xxx_messageInfo_ControllerServiceCapability.Size(m) -} -func (m *ControllerServiceCapability) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerServiceCapability.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerServiceCapability proto.InternalMessageInfo - -type isControllerServiceCapability_Type interface { - isControllerServiceCapability_Type() -} - -type ControllerServiceCapability_Rpc struct { - Rpc *ControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` -} - -func (*ControllerServiceCapability_Rpc) isControllerServiceCapability_Type() {} - -func (m *ControllerServiceCapability) GetType() isControllerServiceCapability_Type { - if m != nil { - return m.Type - } - return nil -} - -func (m *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC { - if x, ok := m.GetType().(*ControllerServiceCapability_Rpc); ok { - return x.Rpc - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ControllerServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ControllerServiceCapability_OneofMarshaler, _ControllerServiceCapability_OneofUnmarshaler, _ControllerServiceCapability_OneofSizer, []interface{}{ - (*ControllerServiceCapability_Rpc)(nil), - } -} - -func _ControllerServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ControllerServiceCapability) - // type - switch x := m.Type.(type) { - case *ControllerServiceCapability_Rpc: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Rpc); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ControllerServiceCapability.Type has unexpected type %T", x) - } - return nil -} - -func _ControllerServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ControllerServiceCapability) - switch tag { - case 1: // type.rpc - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ControllerServiceCapability_RPC) - err := b.DecodeMessage(msg) - m.Type = &ControllerServiceCapability_Rpc{msg} - return true, err - default: - return false, nil - } -} - -func _ControllerServiceCapability_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ControllerServiceCapability) - // type - switch x := m.Type.(type) { - case *ControllerServiceCapability_Rpc: - s := proto.Size(x.Rpc) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type ControllerServiceCapability_RPC struct { - Type ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.ControllerServiceCapability_RPC_Type" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerServiceCapability_RPC) Reset() { *m = ControllerServiceCapability_RPC{} } -func (m *ControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) } -func (*ControllerServiceCapability_RPC) ProtoMessage() {} -func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{29, 0} -} -func (m *ControllerServiceCapability_RPC) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerServiceCapability_RPC.Unmarshal(m, b) -} -func (m *ControllerServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerServiceCapability_RPC.Marshal(b, m, deterministic) -} -func (dst *ControllerServiceCapability_RPC) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerServiceCapability_RPC.Merge(dst, src) -} -func (m *ControllerServiceCapability_RPC) XXX_Size() int { - return xxx_messageInfo_ControllerServiceCapability_RPC.Size(m) -} -func (m *ControllerServiceCapability_RPC) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerServiceCapability_RPC.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerServiceCapability_RPC proto.InternalMessageInfo - -func (m *ControllerServiceCapability_RPC) GetType() ControllerServiceCapability_RPC_Type { - if m != nil { - return m.Type - } - return ControllerServiceCapability_RPC_UNKNOWN -} - -type CreateSnapshotRequest struct { - // The ID of the source volume to be snapshotted. - // This field is REQUIRED. - SourceVolumeId string `protobuf:"bytes,1,opt,name=source_volume_id,json=sourceVolumeId" json:"source_volume_id,omitempty"` - // The suggested name for the snapshot. This field is REQUIRED for - // idempotency. - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - // Secrets required by plugin to complete snapshot creation request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - CreateSnapshotSecrets map[string]string `protobuf:"bytes,3,rep,name=create_snapshot_secrets,json=createSnapshotSecrets" json:"create_snapshot_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Plugin specific parameters passed in as opaque key-value pairs. - // This field is OPTIONAL. The Plugin is responsible for parsing and - // validating these parameters. COs will treat these as opaque. - // Use cases for opaque parameters: - // - Specify a policy to automatically clean up the snapshot. - // - Specify an expiration date for the snapshot. - // - Specify whether the snapshot is readonly or read/write. - // - Specify if the snapshot should be replicated to some place. - // - Specify primary or secondary for replication systems that - // support snapshotting only on primary. - Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSnapshotRequest) Reset() { *m = CreateSnapshotRequest{} } -func (m *CreateSnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*CreateSnapshotRequest) ProtoMessage() {} -func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{30} -} -func (m *CreateSnapshotRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSnapshotRequest.Unmarshal(m, b) -} -func (m *CreateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSnapshotRequest.Marshal(b, m, deterministic) -} -func (dst *CreateSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSnapshotRequest.Merge(dst, src) -} -func (m *CreateSnapshotRequest) XXX_Size() int { - return xxx_messageInfo_CreateSnapshotRequest.Size(m) -} -func (m *CreateSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSnapshotRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSnapshotRequest proto.InternalMessageInfo - -func (m *CreateSnapshotRequest) GetSourceVolumeId() string { - if m != nil { - return m.SourceVolumeId - } - return "" -} - -func (m *CreateSnapshotRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *CreateSnapshotRequest) GetCreateSnapshotSecrets() map[string]string { - if m != nil { - return m.CreateSnapshotSecrets - } - return nil -} - -func (m *CreateSnapshotRequest) GetParameters() map[string]string { - if m != nil { - return m.Parameters - } - return nil -} - -type CreateSnapshotResponse struct { - // Contains all attributes of the newly created snapshot that are - // relevant to the CO along with information required by the Plugin - // to uniquely identify the snapshot. This field is REQUIRED. - Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSnapshotResponse) Reset() { *m = CreateSnapshotResponse{} } -func (m *CreateSnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*CreateSnapshotResponse) ProtoMessage() {} -func (*CreateSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{31} -} -func (m *CreateSnapshotResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSnapshotResponse.Unmarshal(m, b) -} -func (m *CreateSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSnapshotResponse.Marshal(b, m, deterministic) -} -func (dst *CreateSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSnapshotResponse.Merge(dst, src) -} -func (m *CreateSnapshotResponse) XXX_Size() int { - return xxx_messageInfo_CreateSnapshotResponse.Size(m) -} -func (m *CreateSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSnapshotResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSnapshotResponse proto.InternalMessageInfo - -func (m *CreateSnapshotResponse) GetSnapshot() *Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -// The information about a provisioned snapshot. -type Snapshot struct { - // This is the complete size of the snapshot in bytes. The purpose of - // this field is to give CO guidance on how much space is needed to - // create a volume from this snapshot. The size of the volume MUST NOT - // be less than the size of the source snapshot. This field is - // OPTIONAL. If this field is not set, it indicates that this size is - // unknown. The value of this field MUST NOT be negative and a size of - // zero means it is unspecified. - SizeBytes int64 `protobuf:"varint,1,opt,name=size_bytes,json=sizeBytes" json:"size_bytes,omitempty"` - // Uniquely identifies a snapshot and is generated by the plugin. It - // will not change over time. This field is REQUIRED. The identity - // information will be used by the CO in subsequent calls to refer to - // the provisioned snapshot. - Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` - // Identity information for the source volume. Note that creating a - // snapshot from a snapshot is not supported here so the source has to - // be a volume. This field is REQUIRED. - SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId" json:"source_volume_id,omitempty"` - // Timestamp when the point-in-time snapshot is taken on the storage - // system. The format of this field should be a Unix nanoseconds time - // encoded as an int64. On Unix, the command `date +%s%N` returns the - // current time in nanoseconds since 1970-01-01 00:00:00 UTC. This - // field is REQUIRED. - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` - // The status of a snapshot. - Status *SnapshotStatus `protobuf:"bytes,5,opt,name=status" json:"status,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{32} -} -func (m *Snapshot) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Snapshot.Unmarshal(m, b) -} -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) -} -func (dst *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(dst, src) -} -func (m *Snapshot) XXX_Size() int { - return xxx_messageInfo_Snapshot.Size(m) -} -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) -} - -var xxx_messageInfo_Snapshot proto.InternalMessageInfo - -func (m *Snapshot) GetSizeBytes() int64 { - if m != nil { - return m.SizeBytes - } - return 0 -} - -func (m *Snapshot) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Snapshot) GetSourceVolumeId() string { - if m != nil { - return m.SourceVolumeId - } - return "" -} - -func (m *Snapshot) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -func (m *Snapshot) GetStatus() *SnapshotStatus { - if m != nil { - return m.Status - } - return nil -} - -// The status of a snapshot. -type SnapshotStatus struct { - // This field is REQUIRED. - Type SnapshotStatus_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.SnapshotStatus_Type" json:"type,omitempty"` - // Additional information to describe why a snapshot ended up in the - // `ERROR_UPLOADING` status. This field is OPTIONAL. - Details string `protobuf:"bytes,2,opt,name=details" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SnapshotStatus) Reset() { *m = SnapshotStatus{} } -func (m *SnapshotStatus) String() string { return proto.CompactTextString(m) } -func (*SnapshotStatus) ProtoMessage() {} -func (*SnapshotStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{33} -} -func (m *SnapshotStatus) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SnapshotStatus.Unmarshal(m, b) -} -func (m *SnapshotStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SnapshotStatus.Marshal(b, m, deterministic) -} -func (dst *SnapshotStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotStatus.Merge(dst, src) -} -func (m *SnapshotStatus) XXX_Size() int { - return xxx_messageInfo_SnapshotStatus.Size(m) -} -func (m *SnapshotStatus) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotStatus proto.InternalMessageInfo - -func (m *SnapshotStatus) GetType() SnapshotStatus_Type { - if m != nil { - return m.Type - } - return SnapshotStatus_UNKNOWN -} - -func (m *SnapshotStatus) GetDetails() string { - if m != nil { - return m.Details - } - return "" -} - -type DeleteSnapshotRequest struct { - // The ID of the snapshot to be deleted. - // This field is REQUIRED. - SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId" json:"snapshot_id,omitempty"` - // Secrets required by plugin to complete snapshot deletion request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - DeleteSnapshotSecrets map[string]string `protobuf:"bytes,2,rep,name=delete_snapshot_secrets,json=deleteSnapshotSecrets" json:"delete_snapshot_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteSnapshotRequest) Reset() { *m = DeleteSnapshotRequest{} } -func (m *DeleteSnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteSnapshotRequest) ProtoMessage() {} -func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{34} -} -func (m *DeleteSnapshotRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteSnapshotRequest.Unmarshal(m, b) -} -func (m *DeleteSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteSnapshotRequest.Marshal(b, m, deterministic) -} -func (dst *DeleteSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteSnapshotRequest.Merge(dst, src) -} -func (m *DeleteSnapshotRequest) XXX_Size() int { - return xxx_messageInfo_DeleteSnapshotRequest.Size(m) -} -func (m *DeleteSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteSnapshotRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteSnapshotRequest proto.InternalMessageInfo - -func (m *DeleteSnapshotRequest) GetSnapshotId() string { - if m != nil { - return m.SnapshotId - } - return "" -} - -func (m *DeleteSnapshotRequest) GetDeleteSnapshotSecrets() map[string]string { - if m != nil { - return m.DeleteSnapshotSecrets - } - return nil -} - -type DeleteSnapshotResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteSnapshotResponse) Reset() { *m = DeleteSnapshotResponse{} } -func (m *DeleteSnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteSnapshotResponse) ProtoMessage() {} -func (*DeleteSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{35} -} -func (m *DeleteSnapshotResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteSnapshotResponse.Unmarshal(m, b) -} -func (m *DeleteSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteSnapshotResponse.Marshal(b, m, deterministic) -} -func (dst *DeleteSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteSnapshotResponse.Merge(dst, src) -} -func (m *DeleteSnapshotResponse) XXX_Size() int { - return xxx_messageInfo_DeleteSnapshotResponse.Size(m) -} -func (m *DeleteSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteSnapshotResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteSnapshotResponse proto.InternalMessageInfo - -// List all snapshots on the storage system regardless of how they were -// created. -type ListSnapshotsRequest struct { - // If specified (non-zero value), the Plugin MUST NOT return more - // entries than this number in the response. If the actual number of - // entries is more than this number, the Plugin MUST set `next_token` - // in the response which can be used to get the next page of entries - // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If - // not specified (zero value), it means there is no restriction on the - // number of entries that can be returned. - // The value of this field MUST NOT be negative. - MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries" json:"max_entries,omitempty"` - // A token to specify where to start paginating. Set this field to - // `next_token` returned by a previous `ListSnapshots` call to get the - // next page of entries. This field is OPTIONAL. - // An empty string is equal to an unspecified field value. - StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken" json:"starting_token,omitempty"` - // Identity information for the source volume. This field is OPTIONAL. - // It can be used to list snapshots by volume. - SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId" json:"source_volume_id,omitempty"` - // Identity information for a specific snapshot. This field is - // OPTIONAL. It can be used to list only a specific snapshot. - // ListSnapshots will return with current snapshot information - // and will not block if the snapshot is being uploaded. - SnapshotId string `protobuf:"bytes,4,opt,name=snapshot_id,json=snapshotId" json:"snapshot_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListSnapshotsRequest) Reset() { *m = ListSnapshotsRequest{} } -func (m *ListSnapshotsRequest) String() string { return proto.CompactTextString(m) } -func (*ListSnapshotsRequest) ProtoMessage() {} -func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{36} -} -func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListSnapshotsRequest.Unmarshal(m, b) -} -func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic) -} -func (dst *ListSnapshotsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsRequest.Merge(dst, src) -} -func (m *ListSnapshotsRequest) XXX_Size() int { - return xxx_messageInfo_ListSnapshotsRequest.Size(m) -} -func (m *ListSnapshotsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListSnapshotsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListSnapshotsRequest proto.InternalMessageInfo - -func (m *ListSnapshotsRequest) GetMaxEntries() int32 { - if m != nil { - return m.MaxEntries - } - return 0 -} - -func (m *ListSnapshotsRequest) GetStartingToken() string { - if m != nil { - return m.StartingToken - } - return "" -} - -func (m *ListSnapshotsRequest) GetSourceVolumeId() string { - if m != nil { - return m.SourceVolumeId - } - return "" -} - -func (m *ListSnapshotsRequest) GetSnapshotId() string { - if m != nil { - return m.SnapshotId - } - return "" -} - -type ListSnapshotsResponse struct { - Entries []*ListSnapshotsResponse_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` - // This token allows you to get the next page of entries for - // `ListSnapshots` request. If the number of entries is larger than - // `max_entries`, use the `next_token` as a value for the - // `starting_token` field in the next `ListSnapshots` request. This - // field is OPTIONAL. - // An empty string is equal to an unspecified field value. - NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken" json:"next_token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListSnapshotsResponse) Reset() { *m = ListSnapshotsResponse{} } -func (m *ListSnapshotsResponse) String() string { return proto.CompactTextString(m) } -func (*ListSnapshotsResponse) ProtoMessage() {} -func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{37} -} -func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListSnapshotsResponse.Unmarshal(m, b) -} -func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic) -} -func (dst *ListSnapshotsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsResponse.Merge(dst, src) -} -func (m *ListSnapshotsResponse) XXX_Size() int { - return xxx_messageInfo_ListSnapshotsResponse.Size(m) -} -func (m *ListSnapshotsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListSnapshotsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListSnapshotsResponse proto.InternalMessageInfo - -func (m *ListSnapshotsResponse) GetEntries() []*ListSnapshotsResponse_Entry { - if m != nil { - return m.Entries - } - return nil -} - -func (m *ListSnapshotsResponse) GetNextToken() string { - if m != nil { - return m.NextToken - } - return "" -} - -type ListSnapshotsResponse_Entry struct { - Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListSnapshotsResponse_Entry) Reset() { *m = ListSnapshotsResponse_Entry{} } -func (m *ListSnapshotsResponse_Entry) String() string { return proto.CompactTextString(m) } -func (*ListSnapshotsResponse_Entry) ProtoMessage() {} -func (*ListSnapshotsResponse_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{37, 0} -} -func (m *ListSnapshotsResponse_Entry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListSnapshotsResponse_Entry.Unmarshal(m, b) -} -func (m *ListSnapshotsResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListSnapshotsResponse_Entry.Marshal(b, m, deterministic) -} -func (dst *ListSnapshotsResponse_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsResponse_Entry.Merge(dst, src) -} -func (m *ListSnapshotsResponse_Entry) XXX_Size() int { - return xxx_messageInfo_ListSnapshotsResponse_Entry.Size(m) -} -func (m *ListSnapshotsResponse_Entry) XXX_DiscardUnknown() { - xxx_messageInfo_ListSnapshotsResponse_Entry.DiscardUnknown(m) -} - -var xxx_messageInfo_ListSnapshotsResponse_Entry proto.InternalMessageInfo - -func (m *ListSnapshotsResponse_Entry) GetSnapshot() *Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type NodeStageVolumeRequest struct { - // The ID of the volume to publish. This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The CO SHALL set this field to the value returned by - // `ControllerPublishVolume` if the corresponding Controller Plugin - // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be - // left unset if the corresponding Controller Plugin does not have - // this capability. This is an OPTIONAL field. - PublishInfo map[string]string `protobuf:"bytes,2,rep,name=publish_info,json=publishInfo" json:"publish_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // The path to which the volume will be published. It MUST be an - // absolute path in the root filesystem of the process serving this - // request. The CO SHALL ensure that there is only one - // staging_target_path per volume. - // This is a REQUIRED field. - StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath" json:"staging_target_path,omitempty"` - // The capability of the volume the CO expects the volume to have. - // This is a REQUIRED field. - VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` - // Secrets required by plugin to complete node stage volume request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - NodeStageSecrets map[string]string `protobuf:"bytes,5,rep,name=node_stage_secrets,json=nodeStageSecrets" json:"node_stage_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the `Volume` identified by - // `volume_id`. - VolumeAttributes map[string]string `protobuf:"bytes,6,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeStageVolumeRequest) Reset() { *m = NodeStageVolumeRequest{} } -func (m *NodeStageVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*NodeStageVolumeRequest) ProtoMessage() {} -func (*NodeStageVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{38} -} -func (m *NodeStageVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeStageVolumeRequest.Unmarshal(m, b) -} -func (m *NodeStageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeStageVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *NodeStageVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeStageVolumeRequest.Merge(dst, src) -} -func (m *NodeStageVolumeRequest) XXX_Size() int { - return xxx_messageInfo_NodeStageVolumeRequest.Size(m) -} -func (m *NodeStageVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeStageVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeStageVolumeRequest proto.InternalMessageInfo - -func (m *NodeStageVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *NodeStageVolumeRequest) GetPublishInfo() map[string]string { - if m != nil { - return m.PublishInfo - } - return nil -} - -func (m *NodeStageVolumeRequest) GetStagingTargetPath() string { - if m != nil { - return m.StagingTargetPath - } - return "" -} - -func (m *NodeStageVolumeRequest) GetVolumeCapability() *VolumeCapability { - if m != nil { - return m.VolumeCapability - } - return nil -} - -func (m *NodeStageVolumeRequest) GetNodeStageSecrets() map[string]string { - if m != nil { - return m.NodeStageSecrets - } - return nil -} - -func (m *NodeStageVolumeRequest) GetVolumeAttributes() map[string]string { - if m != nil { - return m.VolumeAttributes - } - return nil -} - -type NodeStageVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeStageVolumeResponse) Reset() { *m = NodeStageVolumeResponse{} } -func (m *NodeStageVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*NodeStageVolumeResponse) ProtoMessage() {} -func (*NodeStageVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{39} -} -func (m *NodeStageVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeStageVolumeResponse.Unmarshal(m, b) -} -func (m *NodeStageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeStageVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *NodeStageVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeStageVolumeResponse.Merge(dst, src) -} -func (m *NodeStageVolumeResponse) XXX_Size() int { - return xxx_messageInfo_NodeStageVolumeResponse.Size(m) -} -func (m *NodeStageVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeStageVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeStageVolumeResponse proto.InternalMessageInfo - -type NodeUnstageVolumeRequest struct { - // The ID of the volume. This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The path at which the volume was published. It MUST be an absolute - // path in the root filesystem of the process serving this request. - // This is a REQUIRED field. - StagingTargetPath string `protobuf:"bytes,2,opt,name=staging_target_path,json=stagingTargetPath" json:"staging_target_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeUnstageVolumeRequest) Reset() { *m = NodeUnstageVolumeRequest{} } -func (m *NodeUnstageVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*NodeUnstageVolumeRequest) ProtoMessage() {} -func (*NodeUnstageVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{40} -} -func (m *NodeUnstageVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeUnstageVolumeRequest.Unmarshal(m, b) -} -func (m *NodeUnstageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeUnstageVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *NodeUnstageVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnstageVolumeRequest.Merge(dst, src) -} -func (m *NodeUnstageVolumeRequest) XXX_Size() int { - return xxx_messageInfo_NodeUnstageVolumeRequest.Size(m) -} -func (m *NodeUnstageVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnstageVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeUnstageVolumeRequest proto.InternalMessageInfo - -func (m *NodeUnstageVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *NodeUnstageVolumeRequest) GetStagingTargetPath() string { - if m != nil { - return m.StagingTargetPath - } - return "" -} - -type NodeUnstageVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeUnstageVolumeResponse) Reset() { *m = NodeUnstageVolumeResponse{} } -func (m *NodeUnstageVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*NodeUnstageVolumeResponse) ProtoMessage() {} -func (*NodeUnstageVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{41} -} -func (m *NodeUnstageVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeUnstageVolumeResponse.Unmarshal(m, b) -} -func (m *NodeUnstageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeUnstageVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *NodeUnstageVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnstageVolumeResponse.Merge(dst, src) -} -func (m *NodeUnstageVolumeResponse) XXX_Size() int { - return xxx_messageInfo_NodeUnstageVolumeResponse.Size(m) -} -func (m *NodeUnstageVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnstageVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeUnstageVolumeResponse proto.InternalMessageInfo - -type NodePublishVolumeRequest struct { - // The ID of the volume to publish. This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The CO SHALL set this field to the value returned by - // `ControllerPublishVolume` if the corresponding Controller Plugin - // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be - // left unset if the corresponding Controller Plugin does not have - // this capability. This is an OPTIONAL field. - PublishInfo map[string]string `protobuf:"bytes,2,rep,name=publish_info,json=publishInfo" json:"publish_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // The path to which the device was mounted by `NodeStageVolume`. - // It MUST be an absolute path in the root filesystem of the process - // serving this request. - // It MUST be set if the Node Plugin implements the - // `STAGE_UNSTAGE_VOLUME` node capability. - // This is an OPTIONAL field. - StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath" json:"staging_target_path,omitempty"` - // The path to which the volume will be published. It MUST be an - // absolute path in the root filesystem of the process serving this - // request. The CO SHALL ensure uniqueness of target_path per volume. - // The CO SHALL ensure that the path exists, and that the process - // serving the request has `read` and `write` permissions to the path. - // This is a REQUIRED field. - TargetPath string `protobuf:"bytes,4,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` - // The capability of the volume the CO expects the volume to have. - // This is a REQUIRED field. - VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. - Readonly bool `protobuf:"varint,6,opt,name=readonly" json:"readonly,omitempty"` - // Secrets required by plugin to complete node publish volume request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - NodePublishSecrets map[string]string `protobuf:"bytes,7,rep,name=node_publish_secrets,json=nodePublishSecrets" json:"node_publish_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the Volume identified by - // `volume_id`. - VolumeAttributes map[string]string `protobuf:"bytes,8,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodePublishVolumeRequest) Reset() { *m = NodePublishVolumeRequest{} } -func (m *NodePublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*NodePublishVolumeRequest) ProtoMessage() {} -func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{42} -} -func (m *NodePublishVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodePublishVolumeRequest.Unmarshal(m, b) -} -func (m *NodePublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodePublishVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *NodePublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodePublishVolumeRequest.Merge(dst, src) -} -func (m *NodePublishVolumeRequest) XXX_Size() int { - return xxx_messageInfo_NodePublishVolumeRequest.Size(m) -} -func (m *NodePublishVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodePublishVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodePublishVolumeRequest proto.InternalMessageInfo - -func (m *NodePublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *NodePublishVolumeRequest) GetPublishInfo() map[string]string { - if m != nil { - return m.PublishInfo - } - return nil -} - -func (m *NodePublishVolumeRequest) GetStagingTargetPath() string { - if m != nil { - return m.StagingTargetPath - } - return "" -} - -func (m *NodePublishVolumeRequest) GetTargetPath() string { - if m != nil { - return m.TargetPath - } - return "" -} - -func (m *NodePublishVolumeRequest) GetVolumeCapability() *VolumeCapability { - if m != nil { - return m.VolumeCapability - } - return nil -} - -func (m *NodePublishVolumeRequest) GetReadonly() bool { - if m != nil { - return m.Readonly - } - return false -} - -func (m *NodePublishVolumeRequest) GetNodePublishSecrets() map[string]string { - if m != nil { - return m.NodePublishSecrets - } - return nil -} - -func (m *NodePublishVolumeRequest) GetVolumeAttributes() map[string]string { - if m != nil { - return m.VolumeAttributes - } - return nil -} - -type NodePublishVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodePublishVolumeResponse) Reset() { *m = NodePublishVolumeResponse{} } -func (m *NodePublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*NodePublishVolumeResponse) ProtoMessage() {} -func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{43} -} -func (m *NodePublishVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodePublishVolumeResponse.Unmarshal(m, b) -} -func (m *NodePublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodePublishVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *NodePublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodePublishVolumeResponse.Merge(dst, src) -} -func (m *NodePublishVolumeResponse) XXX_Size() int { - return xxx_messageInfo_NodePublishVolumeResponse.Size(m) -} -func (m *NodePublishVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodePublishVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodePublishVolumeResponse proto.InternalMessageInfo - -type NodeUnpublishVolumeRequest struct { - // The ID of the volume. This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The path at which the volume was published. It MUST be an absolute - // path in the root filesystem of the process serving this request. - // This is a REQUIRED field. - TargetPath string `protobuf:"bytes,2,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeUnpublishVolumeRequest) Reset() { *m = NodeUnpublishVolumeRequest{} } -func (m *NodeUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*NodeUnpublishVolumeRequest) ProtoMessage() {} -func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{44} -} -func (m *NodeUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeUnpublishVolumeRequest.Unmarshal(m, b) -} -func (m *NodeUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeUnpublishVolumeRequest.Marshal(b, m, deterministic) -} -func (dst *NodeUnpublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnpublishVolumeRequest.Merge(dst, src) -} -func (m *NodeUnpublishVolumeRequest) XXX_Size() int { - return xxx_messageInfo_NodeUnpublishVolumeRequest.Size(m) -} -func (m *NodeUnpublishVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnpublishVolumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeUnpublishVolumeRequest proto.InternalMessageInfo - -func (m *NodeUnpublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *NodeUnpublishVolumeRequest) GetTargetPath() string { - if m != nil { - return m.TargetPath - } - return "" -} - -type NodeUnpublishVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeUnpublishVolumeResponse) Reset() { *m = NodeUnpublishVolumeResponse{} } -func (m *NodeUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*NodeUnpublishVolumeResponse) ProtoMessage() {} -func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{45} -} -func (m *NodeUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeUnpublishVolumeResponse.Unmarshal(m, b) -} -func (m *NodeUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeUnpublishVolumeResponse.Marshal(b, m, deterministic) -} -func (dst *NodeUnpublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnpublishVolumeResponse.Merge(dst, src) -} -func (m *NodeUnpublishVolumeResponse) XXX_Size() int { - return xxx_messageInfo_NodeUnpublishVolumeResponse.Size(m) -} -func (m *NodeUnpublishVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnpublishVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeUnpublishVolumeResponse proto.InternalMessageInfo - -type NodeGetIdRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeGetIdRequest) Reset() { *m = NodeGetIdRequest{} } -func (m *NodeGetIdRequest) String() string { return proto.CompactTextString(m) } -func (*NodeGetIdRequest) ProtoMessage() {} -func (*NodeGetIdRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{46} -} -func (m *NodeGetIdRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetIdRequest.Unmarshal(m, b) -} -func (m *NodeGetIdRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetIdRequest.Marshal(b, m, deterministic) -} -func (dst *NodeGetIdRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetIdRequest.Merge(dst, src) -} -func (m *NodeGetIdRequest) XXX_Size() int { - return xxx_messageInfo_NodeGetIdRequest.Size(m) -} -func (m *NodeGetIdRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetIdRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeGetIdRequest proto.InternalMessageInfo - -type NodeGetIdResponse struct { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent `ControllerPublishVolume`. - // This is a REQUIRED field. - NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeGetIdResponse) Reset() { *m = NodeGetIdResponse{} } -func (m *NodeGetIdResponse) String() string { return proto.CompactTextString(m) } -func (*NodeGetIdResponse) ProtoMessage() {} -func (*NodeGetIdResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{47} -} -func (m *NodeGetIdResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetIdResponse.Unmarshal(m, b) -} -func (m *NodeGetIdResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetIdResponse.Marshal(b, m, deterministic) -} -func (dst *NodeGetIdResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetIdResponse.Merge(dst, src) -} -func (m *NodeGetIdResponse) XXX_Size() int { - return xxx_messageInfo_NodeGetIdResponse.Size(m) -} -func (m *NodeGetIdResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetIdResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeGetIdResponse proto.InternalMessageInfo - -func (m *NodeGetIdResponse) GetNodeId() string { - if m != nil { - return m.NodeId - } - return "" -} - -type NodeGetCapabilitiesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeGetCapabilitiesRequest) Reset() { *m = NodeGetCapabilitiesRequest{} } -func (m *NodeGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*NodeGetCapabilitiesRequest) ProtoMessage() {} -func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{48} -} -func (m *NodeGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetCapabilitiesRequest.Unmarshal(m, b) -} -func (m *NodeGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetCapabilitiesRequest.Marshal(b, m, deterministic) -} -func (dst *NodeGetCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetCapabilitiesRequest.Merge(dst, src) -} -func (m *NodeGetCapabilitiesRequest) XXX_Size() int { - return xxx_messageInfo_NodeGetCapabilitiesRequest.Size(m) -} -func (m *NodeGetCapabilitiesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetCapabilitiesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeGetCapabilitiesRequest proto.InternalMessageInfo - -type NodeGetCapabilitiesResponse struct { - // All the capabilities that the node service supports. This field - // is OPTIONAL. - Capabilities []*NodeServiceCapability `protobuf:"bytes,1,rep,name=capabilities" json:"capabilities,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeGetCapabilitiesResponse) Reset() { *m = NodeGetCapabilitiesResponse{} } -func (m *NodeGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*NodeGetCapabilitiesResponse) ProtoMessage() {} -func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{49} -} -func (m *NodeGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetCapabilitiesResponse.Unmarshal(m, b) -} -func (m *NodeGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetCapabilitiesResponse.Marshal(b, m, deterministic) -} -func (dst *NodeGetCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetCapabilitiesResponse.Merge(dst, src) -} -func (m *NodeGetCapabilitiesResponse) XXX_Size() int { - return xxx_messageInfo_NodeGetCapabilitiesResponse.Size(m) -} -func (m *NodeGetCapabilitiesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetCapabilitiesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeGetCapabilitiesResponse proto.InternalMessageInfo - -func (m *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability { - if m != nil { - return m.Capabilities - } - return nil -} - -// Specifies a capability of the node service. -type NodeServiceCapability struct { - // Types that are valid to be assigned to Type: - // *NodeServiceCapability_Rpc - Type isNodeServiceCapability_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeServiceCapability) Reset() { *m = NodeServiceCapability{} } -func (m *NodeServiceCapability) String() string { return proto.CompactTextString(m) } -func (*NodeServiceCapability) ProtoMessage() {} -func (*NodeServiceCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{50} -} -func (m *NodeServiceCapability) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeServiceCapability.Unmarshal(m, b) -} -func (m *NodeServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeServiceCapability.Marshal(b, m, deterministic) -} -func (dst *NodeServiceCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeServiceCapability.Merge(dst, src) -} -func (m *NodeServiceCapability) XXX_Size() int { - return xxx_messageInfo_NodeServiceCapability.Size(m) -} -func (m *NodeServiceCapability) XXX_DiscardUnknown() { - xxx_messageInfo_NodeServiceCapability.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeServiceCapability proto.InternalMessageInfo - -type isNodeServiceCapability_Type interface { - isNodeServiceCapability_Type() -} - -type NodeServiceCapability_Rpc struct { - Rpc *NodeServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` -} - -func (*NodeServiceCapability_Rpc) isNodeServiceCapability_Type() {} - -func (m *NodeServiceCapability) GetType() isNodeServiceCapability_Type { - if m != nil { - return m.Type - } - return nil -} - -func (m *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC { - if x, ok := m.GetType().(*NodeServiceCapability_Rpc); ok { - return x.Rpc - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*NodeServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _NodeServiceCapability_OneofMarshaler, _NodeServiceCapability_OneofUnmarshaler, _NodeServiceCapability_OneofSizer, []interface{}{ - (*NodeServiceCapability_Rpc)(nil), - } -} - -func _NodeServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*NodeServiceCapability) - // type - switch x := m.Type.(type) { - case *NodeServiceCapability_Rpc: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Rpc); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("NodeServiceCapability.Type has unexpected type %T", x) - } - return nil -} - -func _NodeServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*NodeServiceCapability) - switch tag { - case 1: // type.rpc - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(NodeServiceCapability_RPC) - err := b.DecodeMessage(msg) - m.Type = &NodeServiceCapability_Rpc{msg} - return true, err - default: - return false, nil - } -} - -func _NodeServiceCapability_OneofSizer(msg proto.Message) (n int) { - m := msg.(*NodeServiceCapability) - // type - switch x := m.Type.(type) { - case *NodeServiceCapability_Rpc: - s := proto.Size(x.Rpc) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type NodeServiceCapability_RPC struct { - Type NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.NodeServiceCapability_RPC_Type" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeServiceCapability_RPC) Reset() { *m = NodeServiceCapability_RPC{} } -func (m *NodeServiceCapability_RPC) String() string { return proto.CompactTextString(m) } -func (*NodeServiceCapability_RPC) ProtoMessage() {} -func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{50, 0} -} -func (m *NodeServiceCapability_RPC) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeServiceCapability_RPC.Unmarshal(m, b) -} -func (m *NodeServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeServiceCapability_RPC.Marshal(b, m, deterministic) -} -func (dst *NodeServiceCapability_RPC) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeServiceCapability_RPC.Merge(dst, src) -} -func (m *NodeServiceCapability_RPC) XXX_Size() int { - return xxx_messageInfo_NodeServiceCapability_RPC.Size(m) -} -func (m *NodeServiceCapability_RPC) XXX_DiscardUnknown() { - xxx_messageInfo_NodeServiceCapability_RPC.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeServiceCapability_RPC proto.InternalMessageInfo - -func (m *NodeServiceCapability_RPC) GetType() NodeServiceCapability_RPC_Type { - if m != nil { - return m.Type - } - return NodeServiceCapability_RPC_UNKNOWN -} - -type NodeGetInfoRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeGetInfoRequest) Reset() { *m = NodeGetInfoRequest{} } -func (m *NodeGetInfoRequest) String() string { return proto.CompactTextString(m) } -func (*NodeGetInfoRequest) ProtoMessage() {} -func (*NodeGetInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{51} -} -func (m *NodeGetInfoRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetInfoRequest.Unmarshal(m, b) -} -func (m *NodeGetInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetInfoRequest.Marshal(b, m, deterministic) -} -func (dst *NodeGetInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetInfoRequest.Merge(dst, src) -} -func (m *NodeGetInfoRequest) XXX_Size() int { - return xxx_messageInfo_NodeGetInfoRequest.Size(m) -} -func (m *NodeGetInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeGetInfoRequest proto.InternalMessageInfo - -type NodeGetInfoResponse struct { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent calls to `ControllerPublishVolume`. - // This is a REQUIRED field. - NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` - // Maximum number of volumes that controller can publish to the node. - // If value is not set or zero CO SHALL decide how many volumes of - // this type can be published by the controller to the node. The - // plugin MUST NOT set negative values here. - // This field is OPTIONAL. - MaxVolumesPerNode int64 `protobuf:"varint,2,opt,name=max_volumes_per_node,json=maxVolumesPerNode" json:"max_volumes_per_node,omitempty"` - // Specifies where (regions, zones, racks, etc.) the node is - // accessible from. - // A plugin that returns this field MUST also set the - // ACCESSIBILITY_CONSTRAINTS plugin capability. - // COs MAY use this information along with the topology information - // returned in CreateVolumeResponse to ensure that a given volume is - // accessible from a given node when scheduling workloads. - // This field is OPTIONAL. If it is not specified, the CO MAY assume - // the node is not subject to any topological constraint, and MAY - // schedule workloads that reference any volume V, such that there are - // no topological constraints declared for V. - // - // Example 1: - // accessible_topology = - // {"region": "R1", "zone": "R2"} - // Indicates the node exists within the "region" "R1" and the "zone" - // "Z2". - AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeGetInfoResponse) Reset() { *m = NodeGetInfoResponse{} } -func (m *NodeGetInfoResponse) String() string { return proto.CompactTextString(m) } -func (*NodeGetInfoResponse) ProtoMessage() {} -func (*NodeGetInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_csi_31237507707d37ec, []int{52} -} -func (m *NodeGetInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetInfoResponse.Unmarshal(m, b) -} -func (m *NodeGetInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetInfoResponse.Marshal(b, m, deterministic) -} -func (dst *NodeGetInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetInfoResponse.Merge(dst, src) -} -func (m *NodeGetInfoResponse) XXX_Size() int { - return xxx_messageInfo_NodeGetInfoResponse.Size(m) -} -func (m *NodeGetInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeGetInfoResponse proto.InternalMessageInfo - -func (m *NodeGetInfoResponse) GetNodeId() string { - if m != nil { - return m.NodeId - } - return "" -} - -func (m *NodeGetInfoResponse) GetMaxVolumesPerNode() int64 { - if m != nil { - return m.MaxVolumesPerNode - } - return 0 -} - -func (m *NodeGetInfoResponse) GetAccessibleTopology() *Topology { - if m != nil { - return m.AccessibleTopology - } - return nil -} - -func init() { - proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.v0.GetPluginInfoRequest") - proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.v0.GetPluginInfoResponse") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.GetPluginInfoResponse.ManifestEntry") - proto.RegisterType((*GetPluginCapabilitiesRequest)(nil), "csi.v0.GetPluginCapabilitiesRequest") - proto.RegisterType((*GetPluginCapabilitiesResponse)(nil), "csi.v0.GetPluginCapabilitiesResponse") - proto.RegisterType((*PluginCapability)(nil), "csi.v0.PluginCapability") - proto.RegisterType((*PluginCapability_Service)(nil), "csi.v0.PluginCapability.Service") - proto.RegisterType((*ProbeRequest)(nil), "csi.v0.ProbeRequest") - proto.RegisterType((*ProbeResponse)(nil), "csi.v0.ProbeResponse") - proto.RegisterType((*CreateVolumeRequest)(nil), "csi.v0.CreateVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateVolumeRequest.ControllerCreateSecretsEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateVolumeRequest.ParametersEntry") - proto.RegisterType((*VolumeContentSource)(nil), "csi.v0.VolumeContentSource") - proto.RegisterType((*VolumeContentSource_SnapshotSource)(nil), "csi.v0.VolumeContentSource.SnapshotSource") - proto.RegisterType((*CreateVolumeResponse)(nil), "csi.v0.CreateVolumeResponse") - proto.RegisterType((*VolumeCapability)(nil), "csi.v0.VolumeCapability") - proto.RegisterType((*VolumeCapability_BlockVolume)(nil), "csi.v0.VolumeCapability.BlockVolume") - proto.RegisterType((*VolumeCapability_MountVolume)(nil), "csi.v0.VolumeCapability.MountVolume") - proto.RegisterType((*VolumeCapability_AccessMode)(nil), "csi.v0.VolumeCapability.AccessMode") - proto.RegisterType((*CapacityRange)(nil), "csi.v0.CapacityRange") - proto.RegisterType((*Volume)(nil), "csi.v0.Volume") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.Volume.AttributesEntry") - proto.RegisterType((*TopologyRequirement)(nil), "csi.v0.TopologyRequirement") - proto.RegisterType((*Topology)(nil), "csi.v0.Topology") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.Topology.SegmentsEntry") - proto.RegisterType((*DeleteVolumeRequest)(nil), "csi.v0.DeleteVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.DeleteVolumeRequest.ControllerDeleteSecretsEntry") - proto.RegisterType((*DeleteVolumeResponse)(nil), "csi.v0.DeleteVolumeResponse") - proto.RegisterType((*ControllerPublishVolumeRequest)(nil), "csi.v0.ControllerPublishVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerPublishVolumeRequest.ControllerPublishSecretsEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerPublishVolumeRequest.VolumeAttributesEntry") - proto.RegisterType((*ControllerPublishVolumeResponse)(nil), "csi.v0.ControllerPublishVolumeResponse") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerPublishVolumeResponse.PublishInfoEntry") - proto.RegisterType((*ControllerUnpublishVolumeRequest)(nil), "csi.v0.ControllerUnpublishVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerUnpublishVolumeRequest.ControllerUnpublishSecretsEntry") - proto.RegisterType((*ControllerUnpublishVolumeResponse)(nil), "csi.v0.ControllerUnpublishVolumeResponse") - proto.RegisterType((*ValidateVolumeCapabilitiesRequest)(nil), "csi.v0.ValidateVolumeCapabilitiesRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.ValidateVolumeCapabilitiesRequest.VolumeAttributesEntry") - proto.RegisterType((*ValidateVolumeCapabilitiesResponse)(nil), "csi.v0.ValidateVolumeCapabilitiesResponse") - proto.RegisterType((*ListVolumesRequest)(nil), "csi.v0.ListVolumesRequest") - proto.RegisterType((*ListVolumesResponse)(nil), "csi.v0.ListVolumesResponse") - proto.RegisterType((*ListVolumesResponse_Entry)(nil), "csi.v0.ListVolumesResponse.Entry") - proto.RegisterType((*GetCapacityRequest)(nil), "csi.v0.GetCapacityRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.GetCapacityRequest.ParametersEntry") - proto.RegisterType((*GetCapacityResponse)(nil), "csi.v0.GetCapacityResponse") - proto.RegisterType((*ControllerGetCapabilitiesRequest)(nil), "csi.v0.ControllerGetCapabilitiesRequest") - proto.RegisterType((*ControllerGetCapabilitiesResponse)(nil), "csi.v0.ControllerGetCapabilitiesResponse") - proto.RegisterType((*ControllerServiceCapability)(nil), "csi.v0.ControllerServiceCapability") - proto.RegisterType((*ControllerServiceCapability_RPC)(nil), "csi.v0.ControllerServiceCapability.RPC") - proto.RegisterType((*CreateSnapshotRequest)(nil), "csi.v0.CreateSnapshotRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateSnapshotRequest.CreateSnapshotSecretsEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateSnapshotRequest.ParametersEntry") - proto.RegisterType((*CreateSnapshotResponse)(nil), "csi.v0.CreateSnapshotResponse") - proto.RegisterType((*Snapshot)(nil), "csi.v0.Snapshot") - proto.RegisterType((*SnapshotStatus)(nil), "csi.v0.SnapshotStatus") - proto.RegisterType((*DeleteSnapshotRequest)(nil), "csi.v0.DeleteSnapshotRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.DeleteSnapshotRequest.DeleteSnapshotSecretsEntry") - proto.RegisterType((*DeleteSnapshotResponse)(nil), "csi.v0.DeleteSnapshotResponse") - proto.RegisterType((*ListSnapshotsRequest)(nil), "csi.v0.ListSnapshotsRequest") - proto.RegisterType((*ListSnapshotsResponse)(nil), "csi.v0.ListSnapshotsResponse") - proto.RegisterType((*ListSnapshotsResponse_Entry)(nil), "csi.v0.ListSnapshotsResponse.Entry") - proto.RegisterType((*NodeStageVolumeRequest)(nil), "csi.v0.NodeStageVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodeStageVolumeRequest.NodeStageSecretsEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodeStageVolumeRequest.PublishInfoEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodeStageVolumeRequest.VolumeAttributesEntry") - proto.RegisterType((*NodeStageVolumeResponse)(nil), "csi.v0.NodeStageVolumeResponse") - proto.RegisterType((*NodeUnstageVolumeRequest)(nil), "csi.v0.NodeUnstageVolumeRequest") - proto.RegisterType((*NodeUnstageVolumeResponse)(nil), "csi.v0.NodeUnstageVolumeResponse") - proto.RegisterType((*NodePublishVolumeRequest)(nil), "csi.v0.NodePublishVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodePublishVolumeRequest.NodePublishSecretsEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodePublishVolumeRequest.PublishInfoEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodePublishVolumeRequest.VolumeAttributesEntry") - proto.RegisterType((*NodePublishVolumeResponse)(nil), "csi.v0.NodePublishVolumeResponse") - proto.RegisterType((*NodeUnpublishVolumeRequest)(nil), "csi.v0.NodeUnpublishVolumeRequest") - proto.RegisterType((*NodeUnpublishVolumeResponse)(nil), "csi.v0.NodeUnpublishVolumeResponse") - proto.RegisterType((*NodeGetIdRequest)(nil), "csi.v0.NodeGetIdRequest") - proto.RegisterType((*NodeGetIdResponse)(nil), "csi.v0.NodeGetIdResponse") - proto.RegisterType((*NodeGetCapabilitiesRequest)(nil), "csi.v0.NodeGetCapabilitiesRequest") - proto.RegisterType((*NodeGetCapabilitiesResponse)(nil), "csi.v0.NodeGetCapabilitiesResponse") - proto.RegisterType((*NodeServiceCapability)(nil), "csi.v0.NodeServiceCapability") - proto.RegisterType((*NodeServiceCapability_RPC)(nil), "csi.v0.NodeServiceCapability.RPC") - proto.RegisterType((*NodeGetInfoRequest)(nil), "csi.v0.NodeGetInfoRequest") - proto.RegisterType((*NodeGetInfoResponse)(nil), "csi.v0.NodeGetInfoResponse") - proto.RegisterEnum("csi.v0.PluginCapability_Service_Type", PluginCapability_Service_Type_name, PluginCapability_Service_Type_value) - proto.RegisterEnum("csi.v0.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value) - proto.RegisterEnum("csi.v0.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value) - proto.RegisterEnum("csi.v0.SnapshotStatus_Type", SnapshotStatus_Type_name, SnapshotStatus_Type_value) - proto.RegisterEnum("csi.v0.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Identity service - -type IdentityClient interface { - GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) - GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) - Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) -} - -type identityClient struct { - cc *grpc.ClientConn -} - -func NewIdentityClient(cc *grpc.ClientConn) IdentityClient { - return &identityClient{cc} -} - -func (c *identityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) { - out := new(GetPluginInfoResponse) - err := grpc.Invoke(ctx, "/csi.v0.Identity/GetPluginInfo", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *identityClient) GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) { - out := new(GetPluginCapabilitiesResponse) - err := grpc.Invoke(ctx, "/csi.v0.Identity/GetPluginCapabilities", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *identityClient) Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) { - out := new(ProbeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Identity/Probe", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Identity service - -type IdentityServer interface { - GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error) - GetPluginCapabilities(context.Context, *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) - Probe(context.Context, *ProbeRequest) (*ProbeResponse, error) -} - -func RegisterIdentityServer(s *grpc.Server, srv IdentityServer) { - s.RegisterService(&_Identity_serviceDesc, srv) -} - -func _Identity_GetPluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPluginInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IdentityServer).GetPluginInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Identity/GetPluginInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IdentityServer).GetPluginInfo(ctx, req.(*GetPluginInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Identity_GetPluginCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPluginCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IdentityServer).GetPluginCapabilities(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Identity/GetPluginCapabilities", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IdentityServer).GetPluginCapabilities(ctx, req.(*GetPluginCapabilitiesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Identity_Probe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ProbeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IdentityServer).Probe(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Identity/Probe", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IdentityServer).Probe(ctx, req.(*ProbeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Identity_serviceDesc = grpc.ServiceDesc{ - ServiceName: "csi.v0.Identity", - HandlerType: (*IdentityServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetPluginInfo", - Handler: _Identity_GetPluginInfo_Handler, - }, - { - MethodName: "GetPluginCapabilities", - Handler: _Identity_GetPluginCapabilities_Handler, - }, - { - MethodName: "Probe", - Handler: _Identity_Probe_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "csi.proto", -} - -// Client API for Controller service - -type ControllerClient interface { - CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) - DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) - ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) - ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) - ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) - ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) - GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) - ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) - CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) - DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) - ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) -} - -type controllerClient struct { - cc *grpc.ClientConn -} - -func NewControllerClient(cc *grpc.ClientConn) ControllerClient { - return &controllerClient{cc} -} - -func (c *controllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) { - out := new(CreateVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/CreateVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) { - out := new(DeleteVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/DeleteVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) { - out := new(ControllerPublishVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/ControllerPublishVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) { - out := new(ControllerUnpublishVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/ControllerUnpublishVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) { - out := new(ValidateVolumeCapabilitiesResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/ValidateVolumeCapabilities", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) { - out := new(ListVolumesResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/ListVolumes", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) { - out := new(GetCapacityResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/GetCapacity", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) { - out := new(ControllerGetCapabilitiesResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/ControllerGetCapabilities", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) { - out := new(CreateSnapshotResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/CreateSnapshot", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) { - out := new(DeleteSnapshotResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/DeleteSnapshot", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) { - out := new(ListSnapshotsResponse) - err := grpc.Invoke(ctx, "/csi.v0.Controller/ListSnapshots", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Controller service - -type ControllerServer interface { - CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) - DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error) - ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) - ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) - ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) - ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error) - GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error) - ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) - CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error) - DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) - ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) -} - -func RegisterControllerServer(s *grpc.Server, srv ControllerServer) { - s.RegisterService(&_Controller_serviceDesc, srv) -} - -func _Controller_CreateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).CreateVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/CreateVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).CreateVolume(ctx, req.(*CreateVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_DeleteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).DeleteVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/DeleteVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).DeleteVolume(ctx, req.(*DeleteVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ControllerPublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ControllerPublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ControllerPublishVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/ControllerPublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ControllerPublishVolume(ctx, req.(*ControllerPublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ControllerUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ControllerUnpublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ControllerUnpublishVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/ControllerUnpublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ControllerUnpublishVolume(ctx, req.(*ControllerUnpublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ValidateVolumeCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ValidateVolumeCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/ValidateVolumeCapabilities", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, req.(*ValidateVolumeCapabilitiesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ListVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListVolumesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ListVolumes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/ListVolumes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ListVolumes(ctx, req.(*ListVolumesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_GetCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetCapacityRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).GetCapacity(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/GetCapacity", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).GetCapacity(ctx, req.(*GetCapacityRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ControllerGetCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ControllerGetCapabilities(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/ControllerGetCapabilities", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ControllerGetCapabilities(ctx, req.(*ControllerGetCapabilitiesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).CreateSnapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/CreateSnapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).DeleteSnapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/DeleteSnapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListSnapshotsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ListSnapshots(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Controller/ListSnapshots", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Controller_serviceDesc = grpc.ServiceDesc{ - ServiceName: "csi.v0.Controller", - HandlerType: (*ControllerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateVolume", - Handler: _Controller_CreateVolume_Handler, - }, - { - MethodName: "DeleteVolume", - Handler: _Controller_DeleteVolume_Handler, - }, - { - MethodName: "ControllerPublishVolume", - Handler: _Controller_ControllerPublishVolume_Handler, - }, - { - MethodName: "ControllerUnpublishVolume", - Handler: _Controller_ControllerUnpublishVolume_Handler, - }, - { - MethodName: "ValidateVolumeCapabilities", - Handler: _Controller_ValidateVolumeCapabilities_Handler, - }, - { - MethodName: "ListVolumes", - Handler: _Controller_ListVolumes_Handler, - }, - { - MethodName: "GetCapacity", - Handler: _Controller_GetCapacity_Handler, - }, - { - MethodName: "ControllerGetCapabilities", - Handler: _Controller_ControllerGetCapabilities_Handler, - }, - { - MethodName: "CreateSnapshot", - Handler: _Controller_CreateSnapshot_Handler, - }, - { - MethodName: "DeleteSnapshot", - Handler: _Controller_DeleteSnapshot_Handler, - }, - { - MethodName: "ListSnapshots", - Handler: _Controller_ListSnapshots_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "csi.proto", -} - -// Client API for Node service - -type NodeClient interface { - NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) - NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) - NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) - NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) - // NodeGetId is being deprecated in favor of NodeGetInfo and will be - // removed in CSI 1.0. Existing drivers, however, may depend on this - // RPC call and hence this RPC call MUST be implemented by the CSI - // plugin prior to v1.0. - NodeGetId(ctx context.Context, in *NodeGetIdRequest, opts ...grpc.CallOption) (*NodeGetIdResponse, error) - NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) - // Prior to CSI 1.0 - CSI plugins MUST implement both NodeGetId and - // NodeGetInfo RPC calls. - NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) -} - -type nodeClient struct { - cc *grpc.ClientConn -} - -func NewNodeClient(cc *grpc.ClientConn) NodeClient { - return &nodeClient{cc} -} - -func (c *nodeClient) NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) { - out := new(NodeStageVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Node/NodeStageVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) { - out := new(NodeUnstageVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Node/NodeUnstageVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) { - out := new(NodePublishVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Node/NodePublishVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) { - out := new(NodeUnpublishVolumeResponse) - err := grpc.Invoke(ctx, "/csi.v0.Node/NodeUnpublishVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Deprecated: Do not use. -func (c *nodeClient) NodeGetId(ctx context.Context, in *NodeGetIdRequest, opts ...grpc.CallOption) (*NodeGetIdResponse, error) { - out := new(NodeGetIdResponse) - err := grpc.Invoke(ctx, "/csi.v0.Node/NodeGetId", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) { - out := new(NodeGetCapabilitiesResponse) - err := grpc.Invoke(ctx, "/csi.v0.Node/NodeGetCapabilities", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) { - out := new(NodeGetInfoResponse) - err := grpc.Invoke(ctx, "/csi.v0.Node/NodeGetInfo", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Node service - -type NodeServer interface { - NodeStageVolume(context.Context, *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) - NodeUnstageVolume(context.Context, *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) - NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) - NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) - // NodeGetId is being deprecated in favor of NodeGetInfo and will be - // removed in CSI 1.0. Existing drivers, however, may depend on this - // RPC call and hence this RPC call MUST be implemented by the CSI - // plugin prior to v1.0. - NodeGetId(context.Context, *NodeGetIdRequest) (*NodeGetIdResponse, error) - NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) - // Prior to CSI 1.0 - CSI plugins MUST implement both NodeGetId and - // NodeGetInfo RPC calls. - NodeGetInfo(context.Context, *NodeGetInfoRequest) (*NodeGetInfoResponse, error) -} - -func RegisterNodeServer(s *grpc.Server, srv NodeServer) { - s.RegisterService(&_Node_serviceDesc, srv) -} - -func _Node_NodeStageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeStageVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeStageVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Node/NodeStageVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeStageVolume(ctx, req.(*NodeStageVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodeUnstageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeUnstageVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeUnstageVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Node/NodeUnstageVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeUnstageVolume(ctx, req.(*NodeUnstageVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodePublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodePublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodePublishVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Node/NodePublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodePublishVolume(ctx, req.(*NodePublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodeUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeUnpublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeUnpublishVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Node/NodeUnpublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeUnpublishVolume(ctx, req.(*NodeUnpublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodeGetId_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeGetIdRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeGetId(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Node/NodeGetId", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeGetId(ctx, req.(*NodeGetIdRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodeGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeGetCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeGetCapabilities(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Node/NodeGetCapabilities", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeGetCapabilities(ctx, req.(*NodeGetCapabilitiesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodeGetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeGetInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeGetInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v0.Node/NodeGetInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeGetInfo(ctx, req.(*NodeGetInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Node_serviceDesc = grpc.ServiceDesc{ - ServiceName: "csi.v0.Node", - HandlerType: (*NodeServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "NodeStageVolume", - Handler: _Node_NodeStageVolume_Handler, - }, - { - MethodName: "NodeUnstageVolume", - Handler: _Node_NodeUnstageVolume_Handler, - }, - { - MethodName: "NodePublishVolume", - Handler: _Node_NodePublishVolume_Handler, - }, - { - MethodName: "NodeUnpublishVolume", - Handler: _Node_NodeUnpublishVolume_Handler, - }, - { - MethodName: "NodeGetId", - Handler: _Node_NodeGetId_Handler, - }, - { - MethodName: "NodeGetCapabilities", - Handler: _Node_NodeGetCapabilities_Handler, - }, - { - MethodName: "NodeGetInfo", - Handler: _Node_NodeGetInfo_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "csi.proto", -} - -func init() { proto.RegisterFile("csi.proto", fileDescriptor_csi_31237507707d37ec) } - -var fileDescriptor_csi_31237507707d37ec = []byte{ - // 2932 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4d, 0x73, 0x23, 0x47, - 0xd5, 0xa3, 0x0f, 0xdb, 0x7a, 0x5e, 0x3b, 0xda, 0xf6, 0x97, 0x3c, 0xb6, 0x77, 0xbd, 0xb3, 0xd9, - 0x64, 0x13, 0x12, 0x6d, 0x30, 0x24, 0x15, 0x92, 0x4d, 0x40, 0x96, 0x15, 0x5b, 0x59, 0x5b, 0x36, - 0x23, 0xd9, 0xa9, 0x5d, 0x42, 0x4d, 0xc6, 0x52, 0x5b, 0x3b, 0xac, 0x3c, 0xa3, 0xcc, 0x8c, 0xcc, - 0x9a, 0x1b, 0x70, 0x01, 0x4e, 0xf0, 0x0b, 0x52, 0x95, 0x1b, 0x14, 0xb9, 0x50, 0xdc, 0xa8, 0xe2, - 0x46, 0x15, 0x27, 0xce, 0x9c, 0xb8, 0xa7, 0xe0, 0xc8, 0x89, 0x2a, 0xaa, 0xa8, 0x9e, 0xee, 0x19, - 0x4d, 0xb7, 0x7a, 0xf4, 0x91, 0xdd, 0x4a, 0x71, 0x92, 0xe6, 0x7d, 0xf5, 0xeb, 0xd7, 0xef, 0xbd, - 0x7e, 0xef, 0xcd, 0x40, 0xae, 0xe9, 0x59, 0xc5, 0xae, 0xeb, 0xf8, 0x0e, 0x9a, 0x26, 0x7f, 0x2f, - 0xdf, 0x50, 0x6f, 0xb4, 0x1d, 0xa7, 0xdd, 0xc1, 0xf7, 0x02, 0xe8, 0x59, 0xef, 0xfc, 0xde, 0x8f, - 0x5d, 0xb3, 0xdb, 0xc5, 0xae, 0x47, 0xe9, 0xb4, 0x15, 0x58, 0xda, 0xc3, 0xfe, 0x71, 0xa7, 0xd7, - 0xb6, 0xec, 0xaa, 0x7d, 0xee, 0xe8, 0xf8, 0xd3, 0x1e, 0xf6, 0x7c, 0xed, 0xef, 0x0a, 0x2c, 0x0b, - 0x08, 0xaf, 0xeb, 0xd8, 0x1e, 0x46, 0x08, 0x32, 0xb6, 0x79, 0x81, 0x0b, 0xca, 0x96, 0x72, 0x37, - 0xa7, 0x07, 0xff, 0xd1, 0x1d, 0x58, 0xb8, 0xc4, 0x76, 0xcb, 0x71, 0x8d, 0x4b, 0xec, 0x7a, 0x96, - 0x63, 0x17, 0x52, 0x01, 0x76, 0x9e, 0x42, 0x4f, 0x29, 0x10, 0xed, 0xc1, 0xec, 0x85, 0x69, 0x5b, - 0xe7, 0xd8, 0xf3, 0x0b, 0xe9, 0xad, 0xf4, 0xdd, 0xb9, 0xed, 0x6f, 0x14, 0xa9, 0x9e, 0x45, 0xe9, - 0x5a, 0xc5, 0x43, 0x46, 0x5d, 0xb1, 0x7d, 0xf7, 0x4a, 0x8f, 0x98, 0xd5, 0x77, 0x61, 0x9e, 0x43, - 0xa1, 0x3c, 0xa4, 0x9f, 0xe0, 0x2b, 0xa6, 0x13, 0xf9, 0x8b, 0x96, 0x20, 0x7b, 0x69, 0x76, 0x7a, - 0x98, 0x69, 0x42, 0x1f, 0xde, 0x49, 0xbd, 0xad, 0x68, 0x37, 0x60, 0x23, 0x5a, 0xad, 0x6c, 0x76, - 0xcd, 0x33, 0xab, 0x63, 0xf9, 0x16, 0xf6, 0xc2, 0xad, 0xff, 0x10, 0x36, 0x13, 0xf0, 0xcc, 0x02, - 0xf7, 0xe1, 0x5a, 0x33, 0x06, 0x2f, 0xa4, 0x82, 0xad, 0x14, 0xc2, 0xad, 0x08, 0x9c, 0x57, 0x3a, - 0x47, 0xad, 0xfd, 0x53, 0x81, 0xbc, 0x48, 0x82, 0xee, 0xc3, 0x8c, 0x87, 0xdd, 0x4b, 0xab, 0x49, - 0xed, 0x3a, 0xb7, 0xbd, 0x95, 0x24, 0xad, 0x58, 0xa7, 0x74, 0xfb, 0x53, 0x7a, 0xc8, 0xa2, 0xfe, - 0x5a, 0x81, 0x19, 0x06, 0x46, 0xdf, 0x81, 0x8c, 0x7f, 0xd5, 0xa5, 0x62, 0x16, 0xb6, 0xef, 0x8c, - 0x12, 0x53, 0x6c, 0x5c, 0x75, 0xb1, 0x1e, 0xb0, 0x68, 0x1f, 0x42, 0x86, 0x3c, 0xa1, 0x39, 0x98, - 0x39, 0xa9, 0x3d, 0xa8, 0x1d, 0x7d, 0x54, 0xcb, 0x4f, 0xa1, 0x15, 0x40, 0xe5, 0xa3, 0x5a, 0x43, - 0x3f, 0x3a, 0x38, 0xa8, 0xe8, 0x46, 0xbd, 0xa2, 0x9f, 0x56, 0xcb, 0x95, 0xbc, 0x82, 0x36, 0x61, - 0xad, 0x54, 0x2e, 0x57, 0xea, 0xf5, 0xea, 0x4e, 0xf5, 0xa0, 0xda, 0x78, 0x68, 0x94, 0x8f, 0x6a, - 0xf5, 0x86, 0x5e, 0xaa, 0xd6, 0x1a, 0xf5, 0x7c, 0x6a, 0x67, 0x9a, 0xaa, 0xa1, 0x2d, 0xc0, 0xb5, - 0x63, 0xd7, 0x39, 0xc3, 0xa1, 0x71, 0x4b, 0x30, 0xcf, 0x9e, 0x99, 0x31, 0xdf, 0x80, 0xac, 0x8b, - 0xcd, 0xd6, 0x15, 0xdb, 0xb7, 0x5a, 0xa4, 0x0e, 0x5b, 0x0c, 0x1d, 0xb6, 0xb8, 0xe3, 0x38, 0x9d, - 0x53, 0x72, 0x78, 0x3a, 0x25, 0xd4, 0xbe, 0xc8, 0xc2, 0x62, 0xd9, 0xc5, 0xa6, 0x8f, 0x4f, 0x9d, - 0x4e, 0xef, 0x22, 0x14, 0x2d, 0x75, 0xcc, 0xfb, 0xb0, 0x40, 0x8c, 0xdf, 0xb4, 0xfc, 0x2b, 0xc3, - 0x35, 0xed, 0x36, 0x75, 0x87, 0xb9, 0xed, 0xe5, 0xd0, 0x2e, 0x65, 0x86, 0xd5, 0x09, 0x52, 0x9f, - 0x6f, 0xc6, 0x1f, 0x51, 0x15, 0x16, 0x2f, 0x83, 0x25, 0x0c, 0xee, 0xbc, 0xd3, 0xfc, 0x79, 0x53, - 0x2d, 0x62, 0xe7, 0x8d, 0x2e, 0x79, 0x88, 0x85, 0x3d, 0xf4, 0x00, 0xa0, 0x6b, 0xba, 0xe6, 0x05, - 0xf6, 0xb1, 0xeb, 0x15, 0x32, 0xbc, 0xf3, 0x4b, 0x76, 0x53, 0x3c, 0x8e, 0xa8, 0xa9, 0xf3, 0xc7, - 0xd8, 0x91, 0x0f, 0x6b, 0x4d, 0xc7, 0xf6, 0x5d, 0xa7, 0xd3, 0xc1, 0xae, 0xd1, 0x0c, 0xb8, 0x0d, - 0x0f, 0x37, 0x5d, 0xec, 0x7b, 0x85, 0x6c, 0x20, 0xfb, 0xed, 0x61, 0xb2, 0xcb, 0x11, 0x33, 0xc5, - 0xd6, 0x29, 0x2b, 0x5d, 0x68, 0xb5, 0x29, 0xc7, 0xa2, 0x23, 0x58, 0x0e, 0xad, 0xe1, 0xd8, 0x3e, - 0xb6, 0x7d, 0xc3, 0x73, 0x7a, 0x6e, 0x13, 0x17, 0xa6, 0x03, 0x93, 0xae, 0x0b, 0xf6, 0xa0, 0x34, - 0xf5, 0x80, 0x44, 0x67, 0x76, 0xe4, 0x80, 0xe8, 0x11, 0xa8, 0x66, 0xb3, 0x89, 0x3d, 0xcf, 0xa2, - 0x86, 0x33, 0x5c, 0xfc, 0x69, 0xcf, 0x72, 0xf1, 0x05, 0xb6, 0x7d, 0xaf, 0x30, 0xc3, 0x4b, 0x6d, - 0x38, 0x5d, 0xa7, 0xe3, 0xb4, 0xaf, 0xf4, 0x3e, 0x8d, 0xbe, 0xc6, 0xb1, 0xc7, 0x30, 0x9e, 0xfa, - 0x1e, 0xbc, 0x20, 0x58, 0x70, 0x92, 0x1c, 0xa1, 0x7e, 0x08, 0x1b, 0xc3, 0x8c, 0x34, 0x51, 0xbe, - 0xf9, 0xa5, 0x02, 0x8b, 0x12, 0x9b, 0xa0, 0x7d, 0x98, 0xf5, 0x6c, 0xb3, 0xeb, 0x3d, 0x76, 0x7c, - 0xe6, 0xfc, 0xaf, 0x0e, 0x31, 0x61, 0xb1, 0xce, 0x68, 0xe9, 0xe3, 0xfe, 0x94, 0x1e, 0x71, 0xab, - 0x5b, 0xb0, 0xc0, 0x63, 0xd1, 0x02, 0xa4, 0xac, 0x16, 0x53, 0x2f, 0x65, 0xb5, 0xa2, 0x70, 0x7c, - 0x1f, 0x96, 0x78, 0x87, 0x60, 0x51, 0xf8, 0x12, 0x4c, 0xd3, 0x13, 0x62, 0x9a, 0x2c, 0xf0, 0x9a, - 0xe8, 0x0c, 0xab, 0xfd, 0x2e, 0x03, 0x79, 0xd1, 0xdf, 0xd1, 0x7d, 0xc8, 0x9e, 0x75, 0x9c, 0xe6, - 0x13, 0xc6, 0xfb, 0x62, 0x52, 0x60, 0x14, 0x77, 0x08, 0x15, 0x85, 0xee, 0x4f, 0xe9, 0x94, 0x89, - 0x70, 0x5f, 0x38, 0x3d, 0xdb, 0x67, 0x91, 0x99, 0xcc, 0x7d, 0x48, 0xa8, 0xfa, 0xdc, 0x01, 0x13, - 0xda, 0x85, 0x39, 0xea, 0x04, 0xc6, 0x85, 0xd3, 0xc2, 0x85, 0x74, 0x20, 0xe3, 0x76, 0xa2, 0x8c, - 0x52, 0x40, 0x7b, 0xe8, 0xb4, 0xb0, 0x0e, 0x66, 0xf4, 0x5f, 0x9d, 0x87, 0xb9, 0x98, 0x6e, 0xea, - 0x1e, 0xcc, 0xc5, 0x16, 0x43, 0xab, 0x30, 0x73, 0xee, 0x19, 0x51, 0x56, 0xcd, 0xe9, 0xd3, 0xe7, - 0x5e, 0x90, 0x28, 0x6f, 0xc2, 0x5c, 0xa0, 0x85, 0x71, 0xde, 0x31, 0xdb, 0xf4, 0x1e, 0xc8, 0xe9, - 0x10, 0x80, 0x3e, 0x20, 0x10, 0xf5, 0x5f, 0x0a, 0x40, 0x7f, 0x49, 0x74, 0x1f, 0x32, 0x81, 0x96, - 0x34, 0x37, 0xdf, 0x1d, 0x43, 0xcb, 0x62, 0xa0, 0x6a, 0xc0, 0xa5, 0x7d, 0xa6, 0x40, 0x26, 0x10, - 0x23, 0xe6, 0xe7, 0x7a, 0xb5, 0xb6, 0x77, 0x50, 0x31, 0x6a, 0x47, 0xbb, 0x15, 0xe3, 0x23, 0xbd, - 0xda, 0xa8, 0xe8, 0x79, 0x05, 0xad, 0xc3, 0x6a, 0x1c, 0xae, 0x57, 0x4a, 0xbb, 0x15, 0xdd, 0x38, - 0xaa, 0x1d, 0x3c, 0xcc, 0xa7, 0x90, 0x0a, 0x2b, 0x87, 0x27, 0x07, 0x8d, 0xea, 0x20, 0x2e, 0x8d, - 0x36, 0xa0, 0x10, 0xc3, 0x31, 0x19, 0x4c, 0x6c, 0x86, 0x88, 0x8d, 0x61, 0xe9, 0x5f, 0x86, 0xcc, - 0xee, 0xcc, 0x47, 0x87, 0x11, 0x38, 0xdb, 0x47, 0x30, 0xcf, 0xa5, 0x57, 0x52, 0x26, 0xb0, 0x10, - 0x6f, 0x19, 0x67, 0x57, 0x3e, 0xf6, 0x02, 0x4b, 0xa4, 0xf5, 0xf9, 0x10, 0xba, 0x43, 0x80, 0xc4, - 0xac, 0x1d, 0xeb, 0xc2, 0xf2, 0x19, 0x4d, 0x2a, 0xa0, 0x81, 0x00, 0x14, 0x10, 0x68, 0x7f, 0x49, - 0xc1, 0x34, 0x3b, 0x9b, 0x3b, 0xb1, 0x04, 0xcf, 0x89, 0x0c, 0xa1, 0x54, 0x24, 0x8d, 0x87, 0x54, - 0x18, 0x0f, 0xe8, 0x7d, 0x00, 0xd3, 0xf7, 0x5d, 0xeb, 0xac, 0xe7, 0x47, 0x09, 0xfd, 0x06, 0x7f, - 0x1e, 0xc5, 0x52, 0x44, 0xc0, 0x32, 0x70, 0x9f, 0x03, 0xed, 0xc0, 0x82, 0x90, 0x04, 0x33, 0xa3, - 0x93, 0xe0, 0x7c, 0x93, 0x8b, 0xff, 0x12, 0x2c, 0x86, 0xf9, 0xab, 0x83, 0x0d, 0x9f, 0xe5, 0x37, - 0x96, 0xbf, 0xf3, 0x03, 0x79, 0x0f, 0xf5, 0x89, 0x43, 0x18, 0xc9, 0x72, 0x82, 0x96, 0x13, 0x65, - 0xa6, 0x1e, 0x2c, 0x4a, 0xd2, 0x2a, 0x2a, 0x42, 0x2e, 0x38, 0x10, 0xcf, 0xf2, 0x89, 0xaf, 0xca, - 0xd5, 0xe9, 0x93, 0x10, 0xfa, 0xae, 0x8b, 0xcf, 0xb1, 0xeb, 0xe2, 0x16, 0x2b, 0x86, 0x24, 0xf4, - 0x11, 0x89, 0xf6, 0x73, 0x05, 0x66, 0x43, 0x38, 0x7a, 0x07, 0x66, 0x3d, 0xdc, 0xa6, 0x29, 0x5f, - 0xe1, 0xcf, 0x21, 0xa4, 0x29, 0xd6, 0x19, 0x01, 0x2b, 0x03, 0x43, 0x7a, 0x52, 0x06, 0x72, 0xa8, - 0x89, 0x36, 0xff, 0x6f, 0x05, 0x16, 0x77, 0x71, 0x07, 0x8b, 0x65, 0xc4, 0x3a, 0xe4, 0xd8, 0x35, - 0x17, 0x65, 0xd0, 0x59, 0x0a, 0xa8, 0xb6, 0x84, 0x9b, 0xb7, 0x15, 0xb0, 0x47, 0x37, 0x6f, 0x8a, - 0xbf, 0x79, 0x25, 0xc2, 0x63, 0x37, 0x2f, 0xc5, 0x26, 0xdd, 0xbc, 0x1c, 0x96, 0xbf, 0x8d, 0x06, - 0x19, 0x27, 0xda, 0xf6, 0x0a, 0x2c, 0xf1, 0x8a, 0xd1, 0x1b, 0x40, 0xfb, 0x53, 0x06, 0x6e, 0xf4, - 0x17, 0x39, 0xee, 0x9d, 0x75, 0x2c, 0xef, 0xf1, 0x04, 0x96, 0x59, 0x85, 0x19, 0xdb, 0x69, 0x05, - 0x28, 0xba, 0xe6, 0x34, 0x79, 0xac, 0xb6, 0x50, 0x05, 0xae, 0x8b, 0x45, 0xd4, 0x15, 0xcb, 0xd3, - 0xc9, 0x25, 0x54, 0xfe, 0x52, 0xbc, 0x64, 0x54, 0x98, 0x25, 0xe5, 0x9f, 0x63, 0x77, 0xae, 0x82, - 0x58, 0x9b, 0xd5, 0xa3, 0x67, 0xf4, 0x33, 0x05, 0xd4, 0xd8, 0xb1, 0x74, 0xa9, 0xf2, 0x42, 0x45, - 0xb4, 0x1b, 0x55, 0x44, 0x43, 0x77, 0x39, 0x88, 0xe6, 0xce, 0xa8, 0xd0, 0x4c, 0x40, 0x23, 0x2b, - 0xda, 0x67, 0x2c, 0xb3, 0x4c, 0x07, 0x4b, 0xdf, 0x1f, 0x73, 0x69, 0xfa, 0x24, 0xe6, 0x1d, 0x66, - 0x8b, 0x3e, 0x58, 0x7d, 0x00, 0x9b, 0x43, 0xb5, 0x9c, 0xa8, 0xd4, 0x29, 0xc3, 0xb2, 0x74, 0xdd, - 0x89, 0xbc, 0xea, 0xcf, 0x0a, 0xdc, 0x4c, 0xdc, 0x1c, 0xab, 0x31, 0x7e, 0x00, 0xd7, 0xc2, 0x93, - 0xb1, 0xec, 0x73, 0x87, 0x45, 0xfb, 0xdb, 0x23, 0x6d, 0xc3, 0x7a, 0x41, 0x06, 0x25, 0xfd, 0x21, - 0xb5, 0xcb, 0x5c, 0xb7, 0x0f, 0x51, 0xdf, 0x87, 0xbc, 0x48, 0x30, 0xd1, 0x06, 0xfe, 0x98, 0x82, - 0xad, 0xbe, 0x06, 0x27, 0x76, 0xf7, 0xf9, 0x05, 0xc0, 0xaf, 0x14, 0xd8, 0x88, 0x79, 0x67, 0xcf, - 0x16, 0xfd, 0x93, 0x5e, 0x3f, 0xfb, 0x83, 0x86, 0x90, 0xab, 0x21, 0x23, 0xe0, 0x7c, 0x34, 0x16, - 0x0b, 0x22, 0x81, 0x7a, 0x18, 0x3f, 0x27, 0x29, 0xfb, 0x44, 0x66, 0xbb, 0x0d, 0xb7, 0x86, 0xa8, - 0xcb, 0x52, 0xcb, 0x4f, 0xd3, 0x70, 0xeb, 0xd4, 0xec, 0x58, 0xad, 0xa8, 0xee, 0x94, 0xb4, 0xdd, - 0xc3, 0x8d, 0x9b, 0xd0, 0x89, 0xa5, 0xbe, 0x42, 0x27, 0xd6, 0x91, 0xc5, 0x29, 0x3d, 0x82, 0xef, - 0x46, 0x82, 0x46, 0x69, 0x3b, 0x6e, 0xa8, 0x26, 0x5d, 0xf2, 0x99, 0x09, 0x2e, 0xf9, 0xe7, 0x12, - 0xa0, 0x1f, 0x83, 0x36, 0x6c, 0x53, 0x2c, 0x44, 0x37, 0x20, 0xe7, 0xf5, 0xba, 0x5d, 0xc7, 0xf5, - 0x31, 0x3d, 0x83, 0x59, 0xbd, 0x0f, 0x40, 0x05, 0x98, 0xb9, 0xc0, 0x9e, 0x67, 0xb6, 0x43, 0xf9, - 0xe1, 0xa3, 0xf6, 0x31, 0xa0, 0x03, 0xcb, 0x63, 0xf5, 0x72, 0x74, 0xa2, 0xa4, 0x3c, 0x36, 0x9f, - 0x1a, 0xd8, 0xf6, 0x5d, 0x8b, 0x15, 0x66, 0x59, 0x1d, 0x2e, 0xcc, 0xa7, 0x15, 0x0a, 0x21, 0xc5, - 0x9b, 0xe7, 0x9b, 0xae, 0x6f, 0xd9, 0x6d, 0xc3, 0x77, 0x9e, 0xe0, 0x68, 0x6c, 0x14, 0x42, 0x1b, - 0x04, 0xa8, 0x7d, 0xae, 0xc0, 0x22, 0x27, 0x9e, 0x69, 0xfb, 0x2e, 0xcc, 0xf4, 0x65, 0x13, 0x7b, - 0xde, 0x0a, 0xed, 0x29, 0xa1, 0x2e, 0xd2, 0x13, 0x0a, 0x39, 0xd0, 0x26, 0x80, 0x8d, 0x9f, 0xfa, - 0xdc, 0xba, 0x39, 0x02, 0x09, 0xd6, 0x54, 0xef, 0x41, 0x96, 0x1a, 0x79, 0xdc, 0xce, 0xe8, 0x8b, - 0x14, 0xa0, 0x3d, 0xec, 0x47, 0x05, 0x2f, 0xb3, 0x41, 0x82, 0xe3, 0x2a, 0x5f, 0xc1, 0x71, 0x3f, - 0xe4, 0x46, 0x08, 0xd4, 0xf5, 0x5f, 0x8d, 0xcd, 0xcf, 0x84, 0xa5, 0x87, 0x4e, 0x10, 0x12, 0xdc, - 0x92, 0x5e, 0xcb, 0x63, 0xd7, 0x9e, 0xcf, 0xd0, 0x61, 0x6b, 0xbb, 0xb0, 0xc8, 0xe9, 0xcc, 0xce, - 0xf4, 0x75, 0x40, 0xe6, 0xa5, 0x69, 0x75, 0x4c, 0xa2, 0x57, 0x58, 0xc3, 0xb3, 0x9a, 0xfe, 0x7a, - 0x84, 0x09, 0xd9, 0x34, 0x2d, 0x9e, 0xb5, 0x99, 0x3c, 0x71, 0x9e, 0xd7, 0x89, 0xe7, 0xa8, 0x01, - 0x1a, 0xb6, 0xee, 0x9e, 0x74, 0xa6, 0x77, 0x7b, 0x30, 0x27, 0xb3, 0xb9, 0x59, 0xe2, 0x78, 0xef, - 0x6f, 0x29, 0x58, 0x1f, 0x42, 0x8d, 0xde, 0x85, 0xb4, 0xdb, 0x6d, 0x32, 0x67, 0x7a, 0x79, 0x0c, - 0xf9, 0x45, 0xfd, 0xb8, 0xbc, 0x3f, 0xa5, 0x13, 0x2e, 0xf5, 0x4b, 0x05, 0xd2, 0xfa, 0x71, 0x19, - 0x7d, 0x8f, 0x1b, 0xf2, 0xbd, 0x36, 0xa6, 0x94, 0xf8, 0xac, 0x8f, 0x34, 0x93, 0x83, 0xc3, 0xbe, - 0x02, 0x2c, 0x95, 0xf5, 0x4a, 0xa9, 0x51, 0x31, 0x76, 0x2b, 0x07, 0x95, 0x46, 0xc5, 0x38, 0x3d, - 0x3a, 0x38, 0x39, 0xac, 0xe4, 0x15, 0xd2, 0x15, 0x1e, 0x9f, 0xec, 0x1c, 0x54, 0xeb, 0xfb, 0xc6, - 0x49, 0x2d, 0xfc, 0xc7, 0xb0, 0x29, 0x94, 0x87, 0x6b, 0x07, 0xd5, 0x7a, 0x83, 0x01, 0xea, 0xf9, - 0x34, 0x81, 0xec, 0x55, 0x1a, 0x46, 0xb9, 0x74, 0x5c, 0x2a, 0x57, 0x1b, 0x0f, 0xf3, 0x19, 0xd2, - 0x73, 0xf2, 0xb2, 0xeb, 0xb5, 0xd2, 0x71, 0x7d, 0xff, 0xa8, 0x91, 0xcf, 0x22, 0x04, 0x0b, 0x01, - 0x7f, 0x08, 0xaa, 0xe7, 0xa7, 0xa3, 0x91, 0xc5, 0x67, 0x69, 0x58, 0x66, 0x13, 0x18, 0x36, 0xe3, - 0x08, 0x63, 0xeb, 0x2e, 0xe4, 0x69, 0xf3, 0x65, 0x88, 0x17, 0xc7, 0x02, 0x85, 0x9f, 0x86, 0xd7, - 0x47, 0x38, 0x1a, 0x4c, 0xc5, 0x46, 0x83, 0x5d, 0x58, 0x0d, 0x27, 0x67, 0x4c, 0xae, 0x70, 0x21, - 0x0b, 0x23, 0x34, 0x61, 0x75, 0x01, 0xca, 0x5d, 0xc0, 0xcb, 0x4d, 0x19, 0x0e, 0x1d, 0x4a, 0x66, - 0x80, 0xaf, 0x0f, 0x5f, 0x64, 0x48, 0x0c, 0xab, 0xfb, 0xa0, 0x26, 0xeb, 0x30, 0x51, 0x09, 0xf8, - 0x8c, 0xa1, 0xfc, 0x01, 0xac, 0x88, 0xda, 0xb3, 0xa8, 0x7a, 0x6d, 0x60, 0xc4, 0x15, 0xe5, 0x96, - 0x88, 0x36, 0xa2, 0xd0, 0xfe, 0xa0, 0xc0, 0x6c, 0x08, 0x26, 0xf9, 0xd9, 0xb3, 0x7e, 0x82, 0xb9, - 0xa6, 0x3e, 0x47, 0x20, 0xf2, 0x86, 0x5e, 0xe6, 0x0b, 0x69, 0xa9, 0x2f, 0x6c, 0x02, 0xd0, 0xe3, - 0x69, 0x19, 0xa6, 0x1f, 0xb4, 0x12, 0x69, 0x3d, 0xc7, 0x20, 0x25, 0xd2, 0xfc, 0x4e, 0x7b, 0xbe, - 0xe9, 0xf7, 0x48, 0xdb, 0x40, 0x14, 0x5e, 0x11, 0x15, 0xae, 0x07, 0x58, 0x9d, 0x51, 0x91, 0x40, - 0x5a, 0xe0, 0x51, 0xe8, 0x1e, 0x17, 0x9d, 0xeb, 0x72, 0x01, 0xb1, 0x60, 0x24, 0x17, 0x6b, 0x0b, - 0xfb, 0xa6, 0xd5, 0xf1, 0xc2, 0x8b, 0x95, 0x3d, 0x6a, 0x3b, 0xb2, 0x28, 0xcd, 0x41, 0x56, 0xaf, - 0x94, 0x76, 0x1f, 0xe6, 0x15, 0x34, 0x0f, 0xb9, 0x93, 0xe3, 0x83, 0xa3, 0xd2, 0x6e, 0xb5, 0xb6, - 0x97, 0x4f, 0xa1, 0x45, 0x78, 0xa1, 0xa2, 0xeb, 0x47, 0xba, 0xd1, 0x07, 0xa6, 0x49, 0xa3, 0xbb, - 0xcc, 0x9a, 0x46, 0x21, 0x80, 0x6e, 0xc2, 0x5c, 0xe4, 0xfb, 0x51, 0xec, 0x40, 0x08, 0xaa, 0xb6, - 0x48, 0x8c, 0x84, 0x3d, 0xae, 0x18, 0x23, 0xd2, 0x66, 0x57, 0x74, 0x5f, 0x1e, 0xca, 0xc7, 0x48, - 0x4b, 0x86, 0x23, 0x4e, 0x9d, 0xcc, 0x34, 0x91, 0x57, 0x16, 0x60, 0x45, 0x54, 0x8a, 0xd5, 0xa3, - 0xbf, 0x55, 0x60, 0x89, 0x54, 0x08, 0x21, 0xe2, 0x79, 0x17, 0x2c, 0x13, 0x38, 0xa3, 0x70, 0x02, - 0x19, 0xf1, 0x04, 0xb4, 0xdf, 0x2b, 0xb0, 0x2c, 0xe8, 0xca, 0x62, 0xeb, 0x3d, 0xb1, 0xfa, 0xb9, - 0x1d, 0xaf, 0x7e, 0x06, 0xe8, 0x27, 0xac, 0x7f, 0xde, 0x0c, 0xeb, 0x9f, 0xc9, 0x42, 0xf8, 0x37, - 0x59, 0x58, 0xa9, 0x39, 0x2d, 0x5c, 0xf7, 0xcd, 0xf6, 0x24, 0x73, 0x15, 0x5d, 0xe8, 0x0d, 0xa9, - 0x77, 0xdd, 0x0b, 0x57, 0x92, 0x8b, 0x1c, 0xde, 0x12, 0xa2, 0x22, 0x2c, 0x7a, 0xbe, 0xd9, 0x0e, - 0xce, 0xca, 0x74, 0xdb, 0xd8, 0x37, 0xba, 0xa6, 0xff, 0x98, 0x1d, 0xc4, 0x75, 0x86, 0x6a, 0x04, - 0x98, 0x63, 0xd3, 0x7f, 0x2c, 0x1f, 0x54, 0x64, 0x26, 0x1e, 0x54, 0x9c, 0x01, 0x0a, 0xfa, 0x40, - 0xb2, 0x80, 0xf8, 0x56, 0xe6, 0xdb, 0x23, 0x36, 0x14, 0x81, 0xb9, 0x50, 0xc9, 0xdb, 0x02, 0x18, - 0x99, 0xc9, 0xb3, 0x86, 0x51, 0x4b, 0x8c, 0x3b, 0x63, 0x78, 0xc6, 0x86, 0x9a, 0x74, 0x2d, 0xd2, - 0xdd, 0x7c, 0xfd, 0xb3, 0x89, 0x35, 0x58, 0x1d, 0xb0, 0x05, 0xcb, 0x04, 0x6d, 0x28, 0x10, 0xd4, - 0x89, 0xed, 0x4d, 0xe8, 0xaf, 0x09, 0xbe, 0x95, 0x4a, 0xf0, 0x2d, 0x6d, 0x1d, 0xd6, 0x24, 0x0b, - 0x31, 0x2d, 0xfe, 0x91, 0xa5, 0x6a, 0x4c, 0x3e, 0x74, 0x6b, 0x48, 0xc3, 0xe6, 0x9b, 0x71, 0x17, - 0x90, 0x0e, 0x9a, 0x9e, 0x6f, 0xe0, 0xdc, 0x84, 0xb9, 0x38, 0x1d, 0x4b, 0x62, 0xfe, 0x88, 0xc8, - 0xca, 0x3e, 0xd3, 0x08, 0x70, 0x5a, 0x18, 0x01, 0xfe, 0x08, 0x96, 0x82, 0xa8, 0x13, 0x67, 0x2b, - 0x33, 0xfc, 0x35, 0x95, 0x68, 0x91, 0x18, 0x82, 0x8b, 0xbd, 0x20, 0x96, 0x85, 0x49, 0x5f, 0x53, - 0x16, 0x7d, 0xb3, 0xc1, 0x42, 0x6f, 0x8d, 0x5c, 0xe8, 0xeb, 0x8a, 0xbf, 0x0a, 0xf5, 0xfa, 0xff, - 0x8b, 0xe9, 0x20, 0xf3, 0x7e, 0xe9, 0x5c, 0x4f, 0x7b, 0x04, 0x2a, 0x0d, 0x8d, 0xc9, 0x47, 0x6e, - 0x82, 0xe3, 0xa5, 0x44, 0xc7, 0xd3, 0x36, 0x61, 0x5d, 0x2a, 0x9b, 0x2d, 0x8d, 0x20, 0x4f, 0xd0, - 0x7b, 0xd8, 0xaf, 0xb6, 0xc2, 0x6e, 0xf1, 0x35, 0xb8, 0x1e, 0x83, 0xb1, 0xbb, 0x36, 0x36, 0xdb, - 0x53, 0xe2, 0xb3, 0x3d, 0x6d, 0x83, 0x2a, 0x9f, 0xd0, 0x79, 0x7e, 0x42, 0x97, 0x4f, 0xea, 0x39, - 0x4b, 0x42, 0xcf, 0x49, 0xaf, 0xf1, 0x4d, 0x2e, 0x81, 0x8f, 0xe8, 0x36, 0xff, 0xaa, 0xb0, 0x34, - 0x3b, 0xd0, 0x67, 0xbe, 0x19, 0xef, 0x33, 0x6f, 0x0d, 0x95, 0x19, 0xef, 0x30, 0xbb, 0xb4, 0xc1, - 0x7c, 0x87, 0x2b, 0x61, 0x5f, 0x1a, 0xc9, 0x1e, 0x6f, 0x2d, 0x5f, 0x4f, 0xe8, 0x2c, 0xeb, 0x8d, - 0xd2, 0x5e, 0xc5, 0x38, 0xa9, 0xd1, 0xdf, 0xb0, 0xb3, 0x8c, 0xfa, 0xbc, 0x25, 0x40, 0xa1, 0xe1, - 0x63, 0xdf, 0x21, 0x7d, 0xae, 0xc0, 0x22, 0x07, 0x1e, 0x71, 0x22, 0xe8, 0x1e, 0x2c, 0x91, 0x1a, - 0x8e, 0xfa, 0x88, 0x67, 0x74, 0xb1, 0x6b, 0x10, 0x0c, 0x7b, 0x8b, 0x78, 0xfd, 0xc2, 0x7c, 0xca, - 0x06, 0x43, 0xc7, 0xd8, 0x25, 0x82, 0x9f, 0xc3, 0x28, 0x64, 0xfb, 0x3f, 0x0a, 0xcc, 0x56, 0x5b, - 0xd8, 0xf6, 0x89, 0xe1, 0x6b, 0x30, 0xcf, 0x7d, 0xcc, 0x84, 0x36, 0x12, 0xbe, 0x71, 0x0a, 0x36, - 0xa8, 0x6e, 0x0e, 0xfd, 0x02, 0x4a, 0x9b, 0x42, 0xe7, 0xb1, 0x0f, 0xb1, 0xb8, 0x79, 0xd0, 0x8b, - 0x03, 0x9c, 0x12, 0x1f, 0x54, 0xef, 0x8c, 0xa0, 0x8a, 0xd6, 0x79, 0x0b, 0xb2, 0xc1, 0x97, 0x39, - 0x68, 0x29, 0xfa, 0x66, 0x28, 0xf6, 0xe1, 0x8e, 0xba, 0x2c, 0x40, 0x43, 0xbe, 0xed, 0xff, 0xce, - 0x00, 0xf4, 0x07, 0x0f, 0xe8, 0x01, 0x5c, 0x8b, 0x7f, 0x61, 0x80, 0xd6, 0x87, 0x7c, 0x88, 0xa2, - 0x6e, 0xc8, 0x91, 0x91, 0x4e, 0x0f, 0xe0, 0x5a, 0xfc, 0x65, 0x55, 0x5f, 0x98, 0xe4, 0xdd, 0x5a, - 0x5f, 0x98, 0xf4, 0xfd, 0xd6, 0x14, 0xea, 0xc0, 0x6a, 0xc2, 0x3b, 0x06, 0xf4, 0xd2, 0x78, 0x2f, - 0x68, 0xd4, 0x97, 0xc7, 0x7c, 0x59, 0xa1, 0x4d, 0x21, 0x17, 0xd6, 0x12, 0x27, 0xe3, 0xe8, 0xee, - 0xb8, 0xb3, 0x7e, 0xf5, 0x95, 0x31, 0x28, 0xa3, 0x35, 0x7b, 0xa0, 0x26, 0x0f, 0x79, 0xd1, 0x2b, - 0x63, 0x4f, 0xb7, 0xd5, 0x57, 0xc7, 0x21, 0x8d, 0x96, 0xdd, 0x87, 0xb9, 0xd8, 0xc0, 0x15, 0xa9, - 0xd2, 0x29, 0x2c, 0x15, 0xbc, 0x3e, 0x64, 0x42, 0x4b, 0x25, 0xc5, 0x86, 0x82, 0x7d, 0x49, 0x83, - 0xd3, 0xcd, 0xbe, 0x24, 0xc9, 0x14, 0x51, 0x34, 0xbf, 0x90, 0x80, 0x65, 0xe6, 0x97, 0x67, 0x70, - 0x99, 0xf9, 0x13, 0xb2, 0xb9, 0x36, 0x85, 0xbe, 0x0f, 0x0b, 0xfc, 0x1c, 0x04, 0x6d, 0x0e, 0x9d, - 0xee, 0xa8, 0x37, 0x92, 0xd0, 0x71, 0x91, 0x7c, 0x13, 0xdb, 0x17, 0x29, 0xed, 0xb8, 0xfb, 0x22, - 0x13, 0x7a, 0xdf, 0x29, 0x92, 0x9f, 0xb8, 0x06, 0xb1, 0x9f, 0x9f, 0x64, 0x3d, 0x71, 0x3f, 0x3f, - 0x49, 0xbb, 0x4a, 0x6d, 0x6a, 0xfb, 0xcb, 0x0c, 0x64, 0x82, 0x44, 0xda, 0x80, 0x17, 0x84, 0x3a, - 0x1b, 0xdd, 0x18, 0xde, 0x8c, 0xa8, 0x37, 0x13, 0xf1, 0x91, 0xba, 0x8f, 0xe8, 0x7d, 0xcc, 0x55, - 0xce, 0x68, 0x2b, 0xce, 0x27, 0xab, 0xde, 0xd5, 0x5b, 0x43, 0x28, 0x44, 0xd9, 0x7c, 0x2e, 0xd8, - 0x1a, 0x55, 0xc2, 0xf1, 0xb2, 0x93, 0xe2, 0xff, 0x13, 0x7a, 0x6f, 0x89, 0x91, 0xaf, 0xf1, 0x7a, - 0x49, 0x63, 0xfe, 0xf6, 0x50, 0x9a, 0x68, 0x85, 0x0a, 0xe4, 0xa2, 0x4a, 0x05, 0x15, 0xe2, 0x3c, - 0xf1, 0x82, 0x46, 0x5d, 0x93, 0x60, 0x98, 0x8c, 0xf4, 0x2f, 0x52, 0x4a, 0xa8, 0xa8, 0x18, 0x23, - 0x9a, 0xc0, 0x26, 0x8b, 0x8e, 0xdb, 0x43, 0x69, 0xe2, 0x51, 0x1d, 0xbb, 0xc2, 0xfb, 0x51, 0x3d, - 0x78, 0xdd, 0xf7, 0xa3, 0x5a, 0x72, 0xe7, 0x6b, 0x53, 0x3b, 0xd9, 0x47, 0xe9, 0xa6, 0x67, 0x9d, - 0x4d, 0x07, 0x1f, 0x87, 0x7e, 0xeb, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9c, 0x3b, 0x5a, 0x51, - 0xf0, 0x2c, 0x00, 0x00, -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/NOTICE b/cluster-autoscaler/vendor/github.com/coreos/etcd/NOTICE deleted file mode 100644 index b39ddfa5cbde..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2014 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/README.md b/cluster-autoscaler/vendor/github.com/coreos/etcd/client/README.md deleted file mode 100644 index c0ec81901638..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# etcd/client - -etcd/client is the Go client library for etcd. - -[![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client) - -etcd uses `cmd/vendor` directory to store external dependencies, which are -to be compiled into etcd release binaries. `client` can be imported without -vendoring. For full compatibility, it is recommended to vendor builds using -etcd's vendored packages, using tools like godep, as in -[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). -For more detail, please read [Go vendor design](https://golang.org/s/go15vendor). - -## Install - -```bash -go get github.com/coreos/etcd/client -``` - -## Usage - -```go -package main - -import ( - "log" - "time" - - "golang.org/x/net/context" - "github.com/coreos/etcd/client" -) - -func main() { - cfg := client.Config{ - Endpoints: []string{"http://127.0.0.1:2379"}, - Transport: client.DefaultTransport, - // set timeout per request to fail fast when the target endpoint is unavailable - HeaderTimeoutPerRequest: time.Second, - } - c, err := client.New(cfg) - if err != nil { - log.Fatal(err) - } - kapi := client.NewKeysAPI(c) - // set "/foo" key with "bar" value - log.Print("Setting '/foo' key with 'bar' value") - resp, err := kapi.Set(context.Background(), "/foo", "bar", nil) - if err != nil { - log.Fatal(err) - } else { - // print common key info - log.Printf("Set is done. Metadata is %q\n", resp) - } - // get "/foo" key's value - log.Print("Getting '/foo' key value") - resp, err = kapi.Get(context.Background(), "/foo", nil) - if err != nil { - log.Fatal(err) - } else { - // print common key info - log.Printf("Get is done. Metadata is %q\n", resp) - // print value - log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value) - } -} -``` - -## Error Handling - -etcd client might return three types of errors. - -- context error - -Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered. - -- cluster error - -Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned. - -- response error - -If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error. - -Here is the example code to handle client errors: - -```go -cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}} -c, err := client.New(cfg) -if err != nil { - log.Fatal(err) -} - -kapi := client.NewKeysAPI(c) -resp, err := kapi.Set(ctx, "test", "bar", nil) -if err != nil { - if err == context.Canceled { - // ctx is canceled by another routine - } else if err == context.DeadlineExceeded { - // ctx is attached with a deadline and it exceeded - } else if cerr, ok := err.(*client.ClusterError); ok { - // process (cerr.Errors) - } else { - // bad cluster endpoints, which are not etcd servers - } -} -``` - - -## Caveat - -1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process. - -2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened. - -3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention. - -4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information. diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/auth_role.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/client/auth_role.go deleted file mode 100644 index d15e00dd7160..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/auth_role.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - - "golang.org/x/net/context" -) - -type Role struct { - Role string `json:"role"` - Permissions Permissions `json:"permissions"` - Grant *Permissions `json:"grant,omitempty"` - Revoke *Permissions `json:"revoke,omitempty"` -} - -type Permissions struct { - KV rwPermission `json:"kv"` -} - -type rwPermission struct { - Read []string `json:"read"` - Write []string `json:"write"` -} - -type PermissionType int - -const ( - ReadPermission PermissionType = iota - WritePermission - ReadWritePermission -) - -// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to -// interact with etcd's role creation and modification features. -func NewAuthRoleAPI(c Client) AuthRoleAPI { - return &httpAuthRoleAPI{ - client: c, - } -} - -type AuthRoleAPI interface { - // AddRole adds a role. - AddRole(ctx context.Context, role string) error - - // RemoveRole removes a role. - RemoveRole(ctx context.Context, role string) error - - // GetRole retrieves role details. - GetRole(ctx context.Context, role string) (*Role, error) - - // GrantRoleKV grants a role some permission prefixes for the KV store. - GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) - - // RevokeRoleKV revokes some permission prefixes for a role on the KV store. - RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) - - // ListRoles lists roles. - ListRoles(ctx context.Context) ([]string, error) -} - -type httpAuthRoleAPI struct { - client httpClient -} - -type authRoleAPIAction struct { - verb string - name string - role *Role -} - -type authRoleAPIList struct{} - -func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "roles", "") - req, _ := http.NewRequest("GET", u.String(), nil) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "roles", l.name) - if l.role == nil { - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req - } - b, err := json.Marshal(l.role) - if err != nil { - panic(err) - } - body := bytes.NewReader(b) - req, _ := http.NewRequest(l.verb, u.String(), body) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) { - resp, body, err := r.client.Do(ctx, &authRoleAPIList{}) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - var roleList struct { - Roles []Role `json:"roles"` - } - if err = json.Unmarshal(body, &roleList); err != nil { - return nil, err - } - ret := make([]string, 0, len(roleList.Roles)) - for _, r := range roleList.Roles { - ret = append(ret, r.Role) - } - return ret, nil -} - -func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error { - role := &Role{ - Role: rolename, - } - return r.addRemoveRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error { - return r.addRemoveRole(ctx, &authRoleAPIAction{ - verb: "DELETE", - name: rolename, - }) -} - -func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error { - resp, body, err := r.client.Do(ctx, req) - if err != nil { - return err - } - if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err := json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) { - return r.modRole(ctx, &authRoleAPIAction{ - verb: "GET", - name: rolename, - }) -} - -func buildRWPermission(prefixes []string, permType PermissionType) rwPermission { - var out rwPermission - switch permType { - case ReadPermission: - out.Read = prefixes - case WritePermission: - out.Write = prefixes - case ReadWritePermission: - out.Read = prefixes - out.Write = prefixes - } - return out -} - -func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { - rwp := buildRWPermission(prefixes, permType) - role := &Role{ - Role: rolename, - Grant: &Permissions{ - KV: rwp, - }, - } - return r.modRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { - rwp := buildRWPermission(prefixes, permType) - role := &Role{ - Role: rolename, - Revoke: &Permissions{ - KV: rwp, - }, - } - return r.modRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) { - resp, body, err := r.client.Do(ctx, req) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - var role Role - if err = json.Unmarshal(body, &role); err != nil { - return nil, err - } - return &role, nil -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/auth_user.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/client/auth_user.go deleted file mode 100644 index 97c3f31813b5..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/auth_user.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - "path" - - "golang.org/x/net/context" -) - -var ( - defaultV2AuthPrefix = "/v2/auth" -) - -type User struct { - User string `json:"user"` - Password string `json:"password,omitempty"` - Roles []string `json:"roles"` - Grant []string `json:"grant,omitempty"` - Revoke []string `json:"revoke,omitempty"` -} - -// userListEntry is the user representation given by the server for ListUsers -type userListEntry struct { - User string `json:"user"` - Roles []Role `json:"roles"` -} - -type UserRoles struct { - User string `json:"user"` - Roles []Role `json:"roles"` -} - -func v2AuthURL(ep url.URL, action string, name string) *url.URL { - if name != "" { - ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) - return &ep - } - ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) - return &ep -} - -// NewAuthAPI constructs a new AuthAPI that uses HTTP to -// interact with etcd's general auth features. -func NewAuthAPI(c Client) AuthAPI { - return &httpAuthAPI{ - client: c, - } -} - -type AuthAPI interface { - // Enable auth. - Enable(ctx context.Context) error - - // Disable auth. - Disable(ctx context.Context) error -} - -type httpAuthAPI struct { - client httpClient -} - -func (s *httpAuthAPI) Enable(ctx context.Context) error { - return s.enableDisable(ctx, &authAPIAction{"PUT"}) -} - -func (s *httpAuthAPI) Disable(ctx context.Context) error { - return s.enableDisable(ctx, &authAPIAction{"DELETE"}) -} - -func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error { - resp, body, err := s.client.Do(ctx, req) - if err != nil { - return err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -type authAPIAction struct { - verb string -} - -func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "enable", "") - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req -} - -type authError struct { - Message string `json:"message"` - Code int `json:"-"` -} - -func (e authError) Error() string { - return e.Message -} - -// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to -// interact with etcd's user creation and modification features. -func NewAuthUserAPI(c Client) AuthUserAPI { - return &httpAuthUserAPI{ - client: c, - } -} - -type AuthUserAPI interface { - // AddUser adds a user. - AddUser(ctx context.Context, username string, password string) error - - // RemoveUser removes a user. - RemoveUser(ctx context.Context, username string) error - - // GetUser retrieves user details. - GetUser(ctx context.Context, username string) (*User, error) - - // GrantUser grants a user some permission roles. - GrantUser(ctx context.Context, username string, roles []string) (*User, error) - - // RevokeUser revokes some permission roles from a user. - RevokeUser(ctx context.Context, username string, roles []string) (*User, error) - - // ChangePassword changes the user's password. - ChangePassword(ctx context.Context, username string, password string) (*User, error) - - // ListUsers lists the users. - ListUsers(ctx context.Context) ([]string, error) -} - -type httpAuthUserAPI struct { - client httpClient -} - -type authUserAPIAction struct { - verb string - username string - user *User -} - -type authUserAPIList struct{} - -func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "users", "") - req, _ := http.NewRequest("GET", u.String(), nil) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "users", l.username) - if l.user == nil { - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req - } - b, err := json.Marshal(l.user) - if err != nil { - panic(err) - } - body := bytes.NewReader(b) - req, _ := http.NewRequest(l.verb, u.String(), body) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) { - resp, body, err := u.client.Do(ctx, &authUserAPIList{}) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - - var userList struct { - Users []userListEntry `json:"users"` - } - - if err = json.Unmarshal(body, &userList); err != nil { - return nil, err - } - - ret := make([]string, 0, len(userList.Users)) - for _, u := range userList.Users { - ret = append(ret, u.User) - } - return ret, nil -} - -func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error { - user := &User{ - User: username, - Password: password, - } - return u.addRemoveUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error { - return u.addRemoveUser(ctx, &authUserAPIAction{ - verb: "DELETE", - username: username, - }) -} - -func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error { - resp, body, err := u.client.Do(ctx, req) - if err != nil { - return err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) { - return u.modUser(ctx, &authUserAPIAction{ - verb: "GET", - username: username, - }) -} - -func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) { - user := &User{ - User: username, - Grant: roles, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) { - user := &User{ - User: username, - Revoke: roles, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) { - user := &User{ - User: username, - Password: password, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) { - resp, body, err := u.client.Do(ctx, req) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - var user User - if err = json.Unmarshal(body, &user); err != nil { - var userR UserRoles - if urerr := json.Unmarshal(body, &userR); urerr != nil { - return nil, err - } - user.User = userR.User - for _, r := range userR.Roles { - user.Roles = append(user.Roles, r.Role) - } - } - return &user, nil -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/cancelreq.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/client/cancelreq.go deleted file mode 100644 index 76d1f040198b..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/cancelreq.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// borrowed from golang/net/context/ctxhttp/cancelreq.go - -package client - -import "net/http" - -func requestCanceler(tr CancelableTransport, req *http.Request) func() { - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/client.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/client/client.go deleted file mode 100644 index 19ce2ec01dad..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/client.go +++ /dev/null @@ -1,704 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "math/rand" - "net" - "net/http" - "net/url" - "sort" - "strconv" - "sync" - "time" - - "github.com/coreos/etcd/version" - - "golang.org/x/net/context" -) - -var ( - ErrNoEndpoints = errors.New("client: no endpoints available") - ErrTooManyRedirects = errors.New("client: too many redirects") - ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") - ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") - errTooManyRedirectChecks = errors.New("client: too many redirect checks") - - // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so - // that Do() will not retry a request - oneShotCtxValue interface{} -) - -var DefaultRequestTimeout = 5 * time.Second - -var DefaultTransport CancelableTransport = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, -} - -type EndpointSelectionMode int - -const ( - // EndpointSelectionRandom is the default value of the 'SelectionMode'. - // As the name implies, the client object will pick a node from the members - // of the cluster in a random fashion. If the cluster has three members, A, B, - // and C, the client picks any node from its three members as its request - // destination. - EndpointSelectionRandom EndpointSelectionMode = iota - - // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader', - // requests are sent directly to the cluster leader. This reduces - // forwarding roundtrips compared to making requests to etcd followers - // who then forward them to the cluster leader. In the event of a leader - // failure, however, clients configured this way cannot prioritize among - // the remaining etcd followers. Therefore, when a client sets 'SelectionMode' - // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to - // maintain its knowledge of current cluster state. - // - // This mode should be used with Client.AutoSync(). - EndpointSelectionPrioritizeLeader -) - -type Config struct { - // Endpoints defines a set of URLs (schemes, hosts and ports only) - // that can be used to communicate with a logical etcd cluster. For - // example, a three-node cluster could be provided like so: - // - // Endpoints: []string{ - // "http://node1.example.com:2379", - // "http://node2.example.com:2379", - // "http://node3.example.com:2379", - // } - // - // If multiple endpoints are provided, the Client will attempt to - // use them all in the event that one or more of them are unusable. - // - // If Client.Sync is ever called, the Client may cache an alternate - // set of endpoints to continue operation. - Endpoints []string - - // Transport is used by the Client to drive HTTP requests. If not - // provided, DefaultTransport will be used. - Transport CancelableTransport - - // CheckRedirect specifies the policy for handling HTTP redirects. - // If CheckRedirect is not nil, the Client calls it before - // following an HTTP redirect. The sole argument is the number of - // requests that have already been made. If CheckRedirect returns - // an error, Client.Do will not make any further requests and return - // the error back it to the caller. - // - // If CheckRedirect is nil, the Client uses its default policy, - // which is to stop after 10 consecutive requests. - CheckRedirect CheckRedirectFunc - - // Username specifies the user credential to add as an authorization header - Username string - - // Password is the password for the specified user to add as an authorization header - // to the request. - Password string - - // HeaderTimeoutPerRequest specifies the time limit to wait for response - // header in a single request made by the Client. The timeout includes - // connection time, any redirects, and header wait time. - // - // For non-watch GET request, server returns the response body immediately. - // For PUT/POST/DELETE request, server will attempt to commit request - // before responding, which is expected to take `100ms + 2 * RTT`. - // For watch request, server returns the header immediately to notify Client - // watch start. But if server is behind some kind of proxy, the response - // header may be cached at proxy, and Client cannot rely on this behavior. - // - // Especially, wait request will ignore this timeout. - // - // One API call may send multiple requests to different etcd servers until it - // succeeds. Use context of the API to specify the overall timeout. - // - // A HeaderTimeoutPerRequest of zero means no timeout. - HeaderTimeoutPerRequest time.Duration - - // SelectionMode is an EndpointSelectionMode enum that specifies the - // policy for choosing the etcd cluster node to which requests are sent. - SelectionMode EndpointSelectionMode -} - -func (cfg *Config) transport() CancelableTransport { - if cfg.Transport == nil { - return DefaultTransport - } - return cfg.Transport -} - -func (cfg *Config) checkRedirect() CheckRedirectFunc { - if cfg.CheckRedirect == nil { - return DefaultCheckRedirect - } - return cfg.CheckRedirect -} - -// CancelableTransport mimics net/http.Transport, but requires that -// the object also support request cancellation. -type CancelableTransport interface { - http.RoundTripper - CancelRequest(req *http.Request) -} - -type CheckRedirectFunc func(via int) error - -// DefaultCheckRedirect follows up to 10 redirects, but no more. -var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { - if via > 10 { - return ErrTooManyRedirects - } - return nil -} - -type Client interface { - // Sync updates the internal cache of the etcd cluster's membership. - Sync(context.Context) error - - // AutoSync periodically calls Sync() every given interval. - // The recommended sync interval is 10 seconds to 1 minute, which does - // not bring too much overhead to server and makes client catch up the - // cluster change in time. - // - // The example to use it: - // - // for { - // err := client.AutoSync(ctx, 10*time.Second) - // if err == context.DeadlineExceeded || err == context.Canceled { - // break - // } - // log.Print(err) - // } - AutoSync(context.Context, time.Duration) error - - // Endpoints returns a copy of the current set of API endpoints used - // by Client to resolve HTTP requests. If Sync has ever been called, - // this may differ from the initial Endpoints provided in the Config. - Endpoints() []string - - // SetEndpoints sets the set of API endpoints used by Client to resolve - // HTTP requests. If the given endpoints are not valid, an error will be - // returned - SetEndpoints(eps []string) error - - // GetVersion retrieves the current etcd server and cluster version - GetVersion(ctx context.Context) (*version.Versions, error) - - httpClient -} - -func New(cfg Config) (Client, error) { - c := &httpClusterClient{ - clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), - rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), - selectionMode: cfg.SelectionMode, - } - if cfg.Username != "" { - c.credentials = &credentials{ - username: cfg.Username, - password: cfg.Password, - } - } - if err := c.SetEndpoints(cfg.Endpoints); err != nil { - return nil, err - } - return c, nil -} - -type httpClient interface { - Do(context.Context, httpAction) (*http.Response, []byte, error) -} - -func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { - return func(ep url.URL) httpClient { - return &redirectFollowingHTTPClient{ - checkRedirect: cr, - client: &simpleHTTPClient{ - transport: tr, - endpoint: ep, - headerTimeout: headerTimeout, - }, - } - } -} - -type credentials struct { - username string - password string -} - -type httpClientFactory func(url.URL) httpClient - -type httpAction interface { - HTTPRequest(url.URL) *http.Request -} - -type httpClusterClient struct { - clientFactory httpClientFactory - endpoints []url.URL - pinned int - credentials *credentials - sync.RWMutex - rand *rand.Rand - selectionMode EndpointSelectionMode -} - -func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) { - ceps := make([]url.URL, len(eps)) - copy(ceps, eps) - - // To perform a lookup on the new endpoint list without using the current - // client, we'll copy it - clientCopy := &httpClusterClient{ - clientFactory: c.clientFactory, - credentials: c.credentials, - rand: c.rand, - - pinned: 0, - endpoints: ceps, - } - - mAPI := NewMembersAPI(clientCopy) - leader, err := mAPI.Leader(ctx) - if err != nil { - return "", err - } - if len(leader.ClientURLs) == 0 { - return "", ErrNoLeaderEndpoint - } - - return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? -} - -func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) { - if len(eps) == 0 { - return []url.URL{}, ErrNoEndpoints - } - - neps := make([]url.URL, len(eps)) - for i, ep := range eps { - u, err := url.Parse(ep) - if err != nil { - return []url.URL{}, err - } - neps[i] = *u - } - return neps, nil -} - -func (c *httpClusterClient) SetEndpoints(eps []string) error { - neps, err := c.parseEndpoints(eps) - if err != nil { - return err - } - - c.Lock() - defer c.Unlock() - - c.endpoints = shuffleEndpoints(c.rand, neps) - // We're not doing anything for PrioritizeLeader here. This is - // due to not having a context meaning we can't call getLeaderEndpoint - // However, if you're using PrioritizeLeader, you've already been told - // to regularly call sync, where we do have a ctx, and can figure the - // leader. PrioritizeLeader is also quite a loose guarantee, so deal - // with it - c.pinned = 0 - - return nil -} - -func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - action := act - c.RLock() - leps := len(c.endpoints) - eps := make([]url.URL, leps) - n := copy(eps, c.endpoints) - pinned := c.pinned - - if c.credentials != nil { - action = &authedAction{ - act: act, - credentials: *c.credentials, - } - } - c.RUnlock() - - if leps == 0 { - return nil, nil, ErrNoEndpoints - } - - if leps != n { - return nil, nil, errors.New("unable to pick endpoint: copy failed") - } - - var resp *http.Response - var body []byte - var err error - cerr := &ClusterError{} - isOneShot := ctx.Value(&oneShotCtxValue) != nil - - for i := pinned; i < leps+pinned; i++ { - k := i % leps - hc := c.clientFactory(eps[k]) - resp, body, err = hc.Do(ctx, action) - if err != nil { - cerr.Errors = append(cerr.Errors, err) - if err == ctx.Err() { - return nil, nil, ctx.Err() - } - if err == context.Canceled || err == context.DeadlineExceeded { - return nil, nil, err - } - } else if resp.StatusCode/100 == 5 { - switch resp.StatusCode { - case http.StatusInternalServerError, http.StatusServiceUnavailable: - // TODO: make sure this is a no leader response - cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) - default: - cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) - } - err = cerr.Errors[0] - } - if err != nil { - if !isOneShot { - continue - } - c.Lock() - c.pinned = (k + 1) % leps - c.Unlock() - return nil, nil, err - } - if k != pinned { - c.Lock() - c.pinned = k - c.Unlock() - } - return resp, body, nil - } - - return nil, nil, cerr -} - -func (c *httpClusterClient) Endpoints() []string { - c.RLock() - defer c.RUnlock() - - eps := make([]string, len(c.endpoints)) - for i, ep := range c.endpoints { - eps[i] = ep.String() - } - - return eps -} - -func (c *httpClusterClient) Sync(ctx context.Context) error { - mAPI := NewMembersAPI(c) - ms, err := mAPI.List(ctx) - if err != nil { - return err - } - - var eps []string - for _, m := range ms { - eps = append(eps, m.ClientURLs...) - } - - neps, err := c.parseEndpoints(eps) - if err != nil { - return err - } - - npin := 0 - - switch c.selectionMode { - case EndpointSelectionRandom: - c.RLock() - eq := endpointsEqual(c.endpoints, neps) - c.RUnlock() - - if eq { - return nil - } - // When items in the endpoint list changes, we choose a new pin - neps = shuffleEndpoints(c.rand, neps) - case EndpointSelectionPrioritizeLeader: - nle, err := c.getLeaderEndpoint(ctx, neps) - if err != nil { - return ErrNoLeaderEndpoint - } - - for i, n := range neps { - if n.String() == nle { - npin = i - break - } - } - default: - return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode) - } - - c.Lock() - defer c.Unlock() - c.endpoints = neps - c.pinned = npin - - return nil -} - -func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { - ticker := time.NewTicker(interval) - defer ticker.Stop() - for { - err := c.Sync(ctx) - if err != nil { - return err - } - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - } - } -} - -func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { - act := &getAction{Prefix: "/version"} - - resp, body, err := c.Do(ctx, act) - if err != nil { - return nil, err - } - - switch resp.StatusCode { - case http.StatusOK: - if len(body) == 0 { - return nil, ErrEmptyBody - } - var vresp version.Versions - if err := json.Unmarshal(body, &vresp); err != nil { - return nil, ErrInvalidJSON - } - return &vresp, nil - default: - var etcdErr Error - if err := json.Unmarshal(body, &etcdErr); err != nil { - return nil, ErrInvalidJSON - } - return nil, etcdErr - } -} - -type roundTripResponse struct { - resp *http.Response - err error -} - -type simpleHTTPClient struct { - transport CancelableTransport - endpoint url.URL - headerTimeout time.Duration -} - -func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - req := act.HTTPRequest(c.endpoint) - - if err := printcURL(req); err != nil { - return nil, nil, err - } - - isWait := false - if req != nil && req.URL != nil { - ws := req.URL.Query().Get("wait") - if len(ws) != 0 { - var err error - isWait, err = strconv.ParseBool(ws) - if err != nil { - return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) - } - } - } - - var hctx context.Context - var hcancel context.CancelFunc - if !isWait && c.headerTimeout > 0 { - hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) - } else { - hctx, hcancel = context.WithCancel(ctx) - } - defer hcancel() - - reqcancel := requestCanceler(c.transport, req) - - rtchan := make(chan roundTripResponse, 1) - go func() { - resp, err := c.transport.RoundTrip(req) - rtchan <- roundTripResponse{resp: resp, err: err} - close(rtchan) - }() - - var resp *http.Response - var err error - - select { - case rtresp := <-rtchan: - resp, err = rtresp.resp, rtresp.err - case <-hctx.Done(): - // cancel and wait for request to actually exit before continuing - reqcancel() - rtresp := <-rtchan - resp = rtresp.resp - switch { - case ctx.Err() != nil: - err = ctx.Err() - case hctx.Err() != nil: - err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) - default: - panic("failed to get error from context") - } - } - - // always check for resp nil-ness to deal with possible - // race conditions between channels above - defer func() { - if resp != nil { - resp.Body.Close() - } - }() - - if err != nil { - return nil, nil, err - } - - var body []byte - done := make(chan struct{}) - go func() { - body, err = ioutil.ReadAll(resp.Body) - done <- struct{}{} - }() - - select { - case <-ctx.Done(): - resp.Body.Close() - <-done - return nil, nil, ctx.Err() - case <-done: - } - - return resp, body, err -} - -type authedAction struct { - act httpAction - credentials credentials -} - -func (a *authedAction) HTTPRequest(url url.URL) *http.Request { - r := a.act.HTTPRequest(url) - r.SetBasicAuth(a.credentials.username, a.credentials.password) - return r -} - -type redirectFollowingHTTPClient struct { - client httpClient - checkRedirect CheckRedirectFunc -} - -func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - next := act - for i := 0; i < 100; i++ { - if i > 0 { - if err := r.checkRedirect(i); err != nil { - return nil, nil, err - } - } - resp, body, err := r.client.Do(ctx, next) - if err != nil { - return nil, nil, err - } - if resp.StatusCode/100 == 3 { - hdr := resp.Header.Get("Location") - if hdr == "" { - return nil, nil, fmt.Errorf("Location header not set") - } - loc, err := url.Parse(hdr) - if err != nil { - return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr) - } - next = &redirectedHTTPAction{ - action: act, - location: *loc, - } - continue - } - return resp, body, nil - } - - return nil, nil, errTooManyRedirectChecks -} - -type redirectedHTTPAction struct { - action httpAction - location url.URL -} - -func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { - orig := r.action.HTTPRequest(ep) - orig.URL = &r.location - return orig -} - -func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { - p := r.Perm(len(eps)) - neps := make([]url.URL, len(eps)) - for i, k := range p { - neps[i] = eps[k] - } - return neps -} - -func endpointsEqual(left, right []url.URL) bool { - if len(left) != len(right) { - return false - } - - sLeft := make([]string, len(left)) - sRight := make([]string, len(right)) - for i, l := range left { - sLeft[i] = l.String() - } - for i, r := range right { - sRight[i] = r.String() - } - - sort.Strings(sLeft) - sort.Strings(sRight) - for i := range sLeft { - if sLeft[i] != sRight[i] { - return false - } - } - return true -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/cluster_error.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/client/cluster_error.go deleted file mode 100644 index 34618cdbd9e6..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/cluster_error.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import "fmt" - -type ClusterError struct { - Errors []error -} - -func (ce *ClusterError) Error() string { - s := ErrClusterUnavailable.Error() - for i, e := range ce.Errors { - s += fmt.Sprintf("; error #%d: %s\n", i, e) - } - return s -} - -func (ce *ClusterError) Detail() string { - s := "" - for i, e := range ce.Errors { - s += fmt.Sprintf("error #%d: %s\n", i, e) - } - return s -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/curl.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/client/curl.go deleted file mode 100644 index c8bc9fba20ec..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/curl.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "os" -) - -var ( - cURLDebug = false -) - -func EnablecURLDebug() { - cURLDebug = true -} - -func DisablecURLDebug() { - cURLDebug = false -} - -// printcURL prints the cURL equivalent request to stderr. -// It returns an error if the body of the request cannot -// be read. -// The caller MUST cancel the request if there is an error. -func printcURL(req *http.Request) error { - if !cURLDebug { - return nil - } - var ( - command string - b []byte - err error - ) - - if req.URL != nil { - command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) - } - - if req.Body != nil { - b, err = ioutil.ReadAll(req.Body) - if err != nil { - return err - } - command += fmt.Sprintf(" -d %q", string(b)) - } - - fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) - - // reset body - body := bytes.NewBuffer(b) - req.Body = ioutil.NopCloser(body) - - return nil -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/discover.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/client/discover.go deleted file mode 100644 index 442e35fe543b..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/discover.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "github.com/coreos/etcd/pkg/srv" -) - -// Discoverer is an interface that wraps the Discover method. -type Discoverer interface { - // Discover looks up the etcd servers for the domain. - Discover(domain string) ([]string, error) -} - -type srvDiscover struct{} - -// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. -func NewSRVDiscover() Discoverer { - return &srvDiscover{} -} - -func (d *srvDiscover) Discover(domain string) ([]string, error) { - srvs, err := srv.GetClient("etcd-client", domain) - if err != nil { - return nil, err - } - return srvs.Endpoints, nil -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/doc.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/client/doc.go deleted file mode 100644 index 32fdfb52c670..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/doc.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package client provides bindings for the etcd APIs. - -Create a Config and exchange it for a Client: - - import ( - "net/http" - - "github.com/coreos/etcd/client" - "golang.org/x/net/context" - ) - - cfg := client.Config{ - Endpoints: []string{"http://127.0.0.1:2379"}, - Transport: DefaultTransport, - } - - c, err := client.New(cfg) - if err != nil { - // handle error - } - -Clients are safe for concurrent use by multiple goroutines. - -Create a KeysAPI using the Client, then use it to interact with etcd: - - kAPI := client.NewKeysAPI(c) - - // create a new key /foo with the value "bar" - _, err = kAPI.Create(context.Background(), "/foo", "bar") - if err != nil { - // handle error - } - - // delete the newly created key only if the value is still "bar" - _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) - if err != nil { - // handle error - } - -Use a custom context to set timeouts on your operations: - - import "time" - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - // set a new key, ignoring it's previous state - _, err := kAPI.Set(ctx, "/ping", "pong", nil) - if err != nil { - if err == context.DeadlineExceeded { - // request took longer than 5s - } else { - // handle error - } - } - -*/ -package client diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/keys.generated.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/client/keys.generated.go deleted file mode 100644 index 216139c9cc71..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/keys.generated.go +++ /dev/null @@ -1,1087 +0,0 @@ -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package client - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81819 = 1 - codecSelferC_RAW1819 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1819 = 10 - codecSelferValueTypeMap1819 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1819 = 2 - codecSelfer_containerMapValue1819 = 3 - codecSelfer_containerMapEnd1819 = 4 - codecSelfer_containerArrayElem1819 = 6 - codecSelfer_containerArrayEnd1819 = 7 -) - -var ( - codecSelferBitsize1819 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1819 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1819 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 time.Time - _ = v0 - } -} - -func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Action)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("action")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Action)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.Node == nil { - r.EncodeNil() - } else { - x.Node.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("node")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Node == nil { - r.EncodeNil() - } else { - x.Node.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.PrevNode == nil { - r.EncodeNil() - } else { - x.PrevNode.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("prevNode")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.PrevNode == nil { - r.EncodeNil() - } else { - x.PrevNode.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1819) - } - } - } -} - -func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1819 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1819) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1819 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) - } - } -} - -func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1819) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1819) - switch yys3 { - case "action": - if r.TryDecodeAsNil() { - x.Action = "" - } else { - yyv4 := &x.Action - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "node": - if r.TryDecodeAsNil() { - if x.Node != nil { - x.Node = nil - } - } else { - if x.Node == nil { - x.Node = new(Node) - } - x.Node.CodecDecodeSelf(d) - } - case "prevNode": - if r.TryDecodeAsNil() { - if x.PrevNode != nil { - x.PrevNode = nil - } - } else { - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - x.PrevNode.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1819) -} - -func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Action = "" - } else { - yyv9 := &x.Action - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.Node != nil { - x.Node = nil - } - } else { - if x.Node == nil { - x.Node = new(Node) - } - x.Node.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.PrevNode != nil { - x.PrevNode = nil - } - } else { - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - x.PrevNode.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Dir != false - yyq2[6] = x.Expiration != nil - yyq2[7] = x.TTL != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 5 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("dir")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Value)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Value)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.Nodes == nil { - r.EncodeNil() - } else { - x.Nodes.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("nodes")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Nodes == nil { - r.EncodeNil() - } else { - x.Nodes.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeUint(uint64(x.CreatedIndex)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("createdIndex")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeUint(uint64(x.CreatedIndex)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeUint(uint64(x.ModifiedIndex)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("modifiedIndex")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeUint(uint64(x.ModifiedIndex)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq2[6] { - if x.Expiration == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 { - r.EncodeBuiltin(yym23, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym22 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym22 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) - } else { - z.EncFallback(x.Expiration) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("expiration")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Expiration == nil { - r.EncodeNil() - } else { - yym24 := z.EncBinary() - _ = yym24 - if false { - } else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 { - r.EncodeBuiltin(yym25, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym24 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym24 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) - } else { - z.EncFallback(x.Expiration) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq2[7] { - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeInt(int64(x.TTL)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("ttl")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeInt(int64(x.TTL)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1819) - } - } - } -} - -func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1819 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1819) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1819 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) - } - } -} - -func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1819) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1819) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv4 := &x.Key - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "dir": - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv6 := &x.Dir - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*bool)(yyv6)) = r.DecodeBool() - } - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv8 := &x.Value - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "nodes": - if r.TryDecodeAsNil() { - x.Nodes = nil - } else { - yyv10 := &x.Nodes - yyv10.CodecDecodeSelf(d) - } - case "createdIndex": - if r.TryDecodeAsNil() { - x.CreatedIndex = 0 - } else { - yyv11 := &x.CreatedIndex - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*uint64)(yyv11)) = uint64(r.DecodeUint(64)) - } - } - case "modifiedIndex": - if r.TryDecodeAsNil() { - x.ModifiedIndex = 0 - } else { - yyv13 := &x.ModifiedIndex - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*uint64)(yyv13)) = uint64(r.DecodeUint(64)) - } - } - case "expiration": - if r.TryDecodeAsNil() { - if x.Expiration != nil { - x.Expiration = nil - } - } else { - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if yym17 := z.TimeRtidIfBinc(); yym17 != 0 { - r.DecodeBuiltin(yym17, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym16 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) - } else { - z.DecFallback(x.Expiration, false) - } - } - case "ttl": - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv18 := &x.TTL - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*int64)(yyv18)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1819) -} - -func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj20 int - var yyb20 bool - var yyhl20 bool = l >= 0 - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv21 := &x.Key - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv23 := &x.Dir - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*bool)(yyv23)) = r.DecodeBool() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv25 := &x.Value - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*string)(yyv25)) = r.DecodeString() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Nodes = nil - } else { - yyv27 := &x.Nodes - yyv27.CodecDecodeSelf(d) - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.CreatedIndex = 0 - } else { - yyv28 := &x.CreatedIndex - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - *((*uint64)(yyv28)) = uint64(r.DecodeUint(64)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.ModifiedIndex = 0 - } else { - yyv30 := &x.ModifiedIndex - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - *((*uint64)(yyv30)) = uint64(r.DecodeUint(64)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.Expiration != nil { - x.Expiration = nil - } - } else { - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yym33 := z.DecBinary() - _ = yym33 - if false { - } else if yym34 := z.TimeRtidIfBinc(); yym34 != 0 { - r.DecodeBuiltin(yym34, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym33 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym33 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) - } else { - z.DecFallback(x.Expiration, false) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv35 := &x.TTL - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*int64)(yyv35)) = int64(r.DecodeInt(64)) - } - } - for { - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - z.DecStructFieldNotFound(yyj20-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encNodes((Nodes)(x), e) - } - } -} - -func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decNodes((*Nodes)(x), d) - } -} - -func (x codecSelfer1819) encNodes(v Nodes, e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyv1 == nil { - r.EncodeNil() - } else { - yyv1.CodecEncodeSelf(e) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []*Node{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]*Node, yyrl1) - } - } else { - yyv1 = make([]*Node, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Node) - } - yyw2 := yyv1[yyj1] - yyw2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, nil) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Node) - } - yyw3 := yyv1[yyj1] - yyw3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, nil) // var yyz1 *Node - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Node) - } - yyw4 := yyv1[yyj1] - yyw4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []*Node{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/keys.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/client/keys.go deleted file mode 100644 index 4a6c41a78bc8..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/keys.go +++ /dev/null @@ -1,682 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/coreos/etcd/pkg/pathutil" - "github.com/ugorji/go/codec" - "golang.org/x/net/context" -) - -const ( - ErrorCodeKeyNotFound = 100 - ErrorCodeTestFailed = 101 - ErrorCodeNotFile = 102 - ErrorCodeNotDir = 104 - ErrorCodeNodeExist = 105 - ErrorCodeRootROnly = 107 - ErrorCodeDirNotEmpty = 108 - ErrorCodeUnauthorized = 110 - - ErrorCodePrevValueRequired = 201 - ErrorCodeTTLNaN = 202 - ErrorCodeIndexNaN = 203 - ErrorCodeInvalidField = 209 - ErrorCodeInvalidForm = 210 - - ErrorCodeRaftInternal = 300 - ErrorCodeLeaderElect = 301 - - ErrorCodeWatcherCleared = 400 - ErrorCodeEventIndexCleared = 401 -) - -type Error struct { - Code int `json:"errorCode"` - Message string `json:"message"` - Cause string `json:"cause"` - Index uint64 `json:"index"` -} - -func (e Error) Error() string { - return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index) -} - -var ( - ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.") - ErrEmptyBody = errors.New("client: response body is empty") -) - -// PrevExistType is used to define an existence condition when setting -// or deleting Nodes. -type PrevExistType string - -const ( - PrevIgnore = PrevExistType("") - PrevExist = PrevExistType("true") - PrevNoExist = PrevExistType("false") -) - -var ( - defaultV2KeysPrefix = "/v2/keys" -) - -// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value -// API over HTTP. -func NewKeysAPI(c Client) KeysAPI { - return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix) -} - -// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller -// to provide a custom base URL path. This should only be used in -// very rare cases. -func NewKeysAPIWithPrefix(c Client, p string) KeysAPI { - return &httpKeysAPI{ - client: c, - prefix: p, - } -} - -type KeysAPI interface { - // Get retrieves a set of Nodes from etcd - Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) - - // Set assigns a new value to a Node identified by a given key. The caller - // may define a set of conditions in the SetOptions. If SetOptions.Dir=true - // then value is ignored. - Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error) - - // Delete removes a Node identified by the given key, optionally destroying - // all of its children as well. The caller may define a set of required - // conditions in an DeleteOptions object. - Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) - - // Create is an alias for Set w/ PrevExist=false - Create(ctx context.Context, key, value string) (*Response, error) - - // CreateInOrder is used to atomically create in-order keys within the given directory. - CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error) - - // Update is an alias for Set w/ PrevExist=true - Update(ctx context.Context, key, value string) (*Response, error) - - // Watcher builds a new Watcher targeted at a specific Node identified - // by the given key. The Watcher may be configured at creation time - // through a WatcherOptions object. The returned Watcher is designed - // to emit events that happen to a Node, and optionally to its children. - Watcher(key string, opts *WatcherOptions) Watcher -} - -type WatcherOptions struct { - // AfterIndex defines the index after-which the Watcher should - // start emitting events. For example, if a value of 5 is - // provided, the first event will have an index >= 6. - // - // Setting AfterIndex to 0 (default) means that the Watcher - // should start watching for events starting at the current - // index, whatever that may be. - AfterIndex uint64 - - // Recursive specifies whether or not the Watcher should emit - // events that occur in children of the given keyspace. If set - // to false (default), events will be limited to those that - // occur for the exact key. - Recursive bool -} - -type CreateInOrderOptions struct { - // TTL defines a period of time after-which the Node should - // expire and no longer exist. Values <= 0 are ignored. Given - // that the zero-value is ignored, TTL cannot be used to set - // a TTL of 0. - TTL time.Duration -} - -type SetOptions struct { - // PrevValue specifies what the current value of the Node must - // be in order for the Set operation to succeed. - // - // Leaving this field empty means that the caller wishes to - // ignore the current value of the Node. This cannot be used - // to compare the Node's current value to an empty string. - // - // PrevValue is ignored if Dir=true - PrevValue string - - // PrevIndex indicates what the current ModifiedIndex of the - // Node must be in order for the Set operation to succeed. - // - // If PrevIndex is set to 0 (default), no comparison is made. - PrevIndex uint64 - - // PrevExist specifies whether the Node must currently exist - // (PrevExist) or not (PrevNoExist). If the caller does not - // care about existence, set PrevExist to PrevIgnore, or simply - // leave it unset. - PrevExist PrevExistType - - // TTL defines a period of time after-which the Node should - // expire and no longer exist. Values <= 0 are ignored. Given - // that the zero-value is ignored, TTL cannot be used to set - // a TTL of 0. - TTL time.Duration - - // Refresh set to true means a TTL value can be updated - // without firing a watch or changing the node value. A - // value must not be provided when refreshing a key. - Refresh bool - - // Dir specifies whether or not this Node should be created as a directory. - Dir bool - - // NoValueOnSuccess specifies whether the response contains the current value of the Node. - // If set, the response will only contain the current value when the request fails. - NoValueOnSuccess bool -} - -type GetOptions struct { - // Recursive defines whether or not all children of the Node - // should be returned. - Recursive bool - - // Sort instructs the server whether or not to sort the Nodes. - // If true, the Nodes are sorted alphabetically by key in - // ascending order (A to z). If false (default), the Nodes will - // not be sorted and the ordering used should not be considered - // predictable. - Sort bool - - // Quorum specifies whether it gets the latest committed value that - // has been applied in quorum of members, which ensures external - // consistency (or linearizability). - Quorum bool -} - -type DeleteOptions struct { - // PrevValue specifies what the current value of the Node must - // be in order for the Delete operation to succeed. - // - // Leaving this field empty means that the caller wishes to - // ignore the current value of the Node. This cannot be used - // to compare the Node's current value to an empty string. - PrevValue string - - // PrevIndex indicates what the current ModifiedIndex of the - // Node must be in order for the Delete operation to succeed. - // - // If PrevIndex is set to 0 (default), no comparison is made. - PrevIndex uint64 - - // Recursive defines whether or not all children of the Node - // should be deleted. If set to true, all children of the Node - // identified by the given key will be deleted. If left unset - // or explicitly set to false, only a single Node will be - // deleted. - Recursive bool - - // Dir specifies whether or not this Node should be removed as a directory. - Dir bool -} - -type Watcher interface { - // Next blocks until an etcd event occurs, then returns a Response - // representing that event. The behavior of Next depends on the - // WatcherOptions used to construct the Watcher. Next is designed to - // be called repeatedly, each time blocking until a subsequent event - // is available. - // - // If the provided context is cancelled, Next will return a non-nil - // error. Any other failures encountered while waiting for the next - // event (connection issues, deserialization failures, etc) will - // also result in a non-nil error. - Next(context.Context) (*Response, error) -} - -type Response struct { - // Action is the name of the operation that occurred. Possible values - // include get, set, delete, update, create, compareAndSwap, - // compareAndDelete and expire. - Action string `json:"action"` - - // Node represents the state of the relevant etcd Node. - Node *Node `json:"node"` - - // PrevNode represents the previous state of the Node. PrevNode is non-nil - // only if the Node existed before the action occurred and the action - // caused a change to the Node. - PrevNode *Node `json:"prevNode"` - - // Index holds the cluster-level index at the time the Response was generated. - // This index is not tied to the Node(s) contained in this Response. - Index uint64 `json:"-"` - - // ClusterID holds the cluster-level ID reported by the server. This - // should be different for different etcd clusters. - ClusterID string `json:"-"` -} - -type Node struct { - // Key represents the unique location of this Node (e.g. "/foo/bar"). - Key string `json:"key"` - - // Dir reports whether node describes a directory. - Dir bool `json:"dir,omitempty"` - - // Value is the current data stored on this Node. If this Node - // is a directory, Value will be empty. - Value string `json:"value"` - - // Nodes holds the children of this Node, only if this Node is a directory. - // This slice of will be arbitrarily deep (children, grandchildren, great- - // grandchildren, etc.) if a recursive Get or Watch request were made. - Nodes Nodes `json:"nodes"` - - // CreatedIndex is the etcd index at-which this Node was created. - CreatedIndex uint64 `json:"createdIndex"` - - // ModifiedIndex is the etcd index at-which this Node was last modified. - ModifiedIndex uint64 `json:"modifiedIndex"` - - // Expiration is the server side expiration time of the key. - Expiration *time.Time `json:"expiration,omitempty"` - - // TTL is the time to live of the key in second. - TTL int64 `json:"ttl,omitempty"` -} - -func (n *Node) String() string { - return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL) -} - -// TTLDuration returns the Node's TTL as a time.Duration object -func (n *Node) TTLDuration() time.Duration { - return time.Duration(n.TTL) * time.Second -} - -type Nodes []*Node - -// interfaces for sorting - -func (ns Nodes) Len() int { return len(ns) } -func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key } -func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } - -type httpKeysAPI struct { - client httpClient - prefix string -} - -func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) { - act := &setAction{ - Prefix: k.prefix, - Key: key, - Value: val, - } - - if opts != nil { - act.PrevValue = opts.PrevValue - act.PrevIndex = opts.PrevIndex - act.PrevExist = opts.PrevExist - act.TTL = opts.TTL - act.Refresh = opts.Refresh - act.Dir = opts.Dir - act.NoValueOnSuccess = opts.NoValueOnSuccess - } - - doCtx := ctx - if act.PrevExist == PrevNoExist { - doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue) - } - resp, body, err := k.client.Do(doCtx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) { - return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist}) -} - -func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) { - act := &createInOrderAction{ - Prefix: k.prefix, - Dir: dir, - Value: val, - } - - if opts != nil { - act.TTL = opts.TTL - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) { - return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist}) -} - -func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) { - act := &deleteAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.PrevValue = opts.PrevValue - act.PrevIndex = opts.PrevIndex - act.Dir = opts.Dir - act.Recursive = opts.Recursive - } - - doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue) - resp, body, err := k.client.Do(doCtx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) { - act := &getAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.Recursive = opts.Recursive - act.Sorted = opts.Sort - act.Quorum = opts.Quorum - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher { - act := waitAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.Recursive = opts.Recursive - if opts.AfterIndex > 0 { - act.WaitIndex = opts.AfterIndex + 1 - } - } - - return &httpWatcher{ - client: k.client, - nextWait: act, - } -} - -type httpWatcher struct { - client httpClient - nextWait waitAction -} - -func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) { - for { - httpresp, body, err := hw.client.Do(ctx, &hw.nextWait) - if err != nil { - return nil, err - } - - resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body) - if err != nil { - if err == ErrEmptyBody { - continue - } - return nil, err - } - - hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1 - return resp, nil - } -} - -// v2KeysURL forms a URL representing the location of a key. -// The endpoint argument represents the base URL of an etcd -// server. The prefix is the path needed to route from the -// provided endpoint's path to the root of the keys API -// (typically "/v2/keys"). -func v2KeysURL(ep url.URL, prefix, key string) *url.URL { - // We concatenate all parts together manually. We cannot use - // path.Join because it does not reserve trailing slash. - // We call CanonicalURLPath to further cleanup the path. - if prefix != "" && prefix[0] != '/' { - prefix = "/" + prefix - } - if key != "" && key[0] != '/' { - key = "/" + key - } - ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key) - return &ep -} - -type getAction struct { - Prefix string - Key string - Recursive bool - Sorted bool - Quorum bool -} - -func (g *getAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, g.Prefix, g.Key) - - params := u.Query() - params.Set("recursive", strconv.FormatBool(g.Recursive)) - params.Set("sorted", strconv.FormatBool(g.Sorted)) - params.Set("quorum", strconv.FormatBool(g.Quorum)) - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type waitAction struct { - Prefix string - Key string - WaitIndex uint64 - Recursive bool -} - -func (w *waitAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, w.Prefix, w.Key) - - params := u.Query() - params.Set("wait", "true") - params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10)) - params.Set("recursive", strconv.FormatBool(w.Recursive)) - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type setAction struct { - Prefix string - Key string - Value string - PrevValue string - PrevIndex uint64 - PrevExist PrevExistType - TTL time.Duration - Refresh bool - Dir bool - NoValueOnSuccess bool -} - -func (a *setAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Key) - - params := u.Query() - form := url.Values{} - - // we're either creating a directory or setting a key - if a.Dir { - params.Set("dir", strconv.FormatBool(a.Dir)) - } else { - // These options are only valid for setting a key - if a.PrevValue != "" { - params.Set("prevValue", a.PrevValue) - } - form.Add("value", a.Value) - } - - // Options which apply to both setting a key and creating a dir - if a.PrevIndex != 0 { - params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) - } - if a.PrevExist != PrevIgnore { - params.Set("prevExist", string(a.PrevExist)) - } - if a.TTL > 0 { - form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) - } - - if a.Refresh { - form.Add("refresh", "true") - } - if a.NoValueOnSuccess { - params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess)) - } - - u.RawQuery = params.Encode() - body := strings.NewReader(form.Encode()) - - req, _ := http.NewRequest("PUT", u.String(), body) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - return req -} - -type deleteAction struct { - Prefix string - Key string - PrevValue string - PrevIndex uint64 - Dir bool - Recursive bool -} - -func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Key) - - params := u.Query() - if a.PrevValue != "" { - params.Set("prevValue", a.PrevValue) - } - if a.PrevIndex != 0 { - params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) - } - if a.Dir { - params.Set("dir", "true") - } - if a.Recursive { - params.Set("recursive", "true") - } - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("DELETE", u.String(), nil) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - return req -} - -type createInOrderAction struct { - Prefix string - Dir string - Value string - TTL time.Duration -} - -func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Dir) - - form := url.Values{} - form.Add("value", a.Value) - if a.TTL > 0 { - form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) - } - body := strings.NewReader(form.Encode()) - - req, _ := http.NewRequest("POST", u.String(), body) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - return req -} - -func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) { - switch code { - case http.StatusOK, http.StatusCreated: - if len(body) == 0 { - return nil, ErrEmptyBody - } - res, err = unmarshalSuccessfulKeysResponse(header, body) - default: - err = unmarshalFailedKeysResponse(body) - } - - return -} - -func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { - var res Response - err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res) - if err != nil { - return nil, ErrInvalidJSON - } - if header.Get("X-Etcd-Index") != "" { - res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64) - if err != nil { - return nil, err - } - } - res.ClusterID = header.Get("X-Etcd-Cluster-ID") - return &res, nil -} - -func unmarshalFailedKeysResponse(body []byte) error { - var etcdErr Error - if err := json.Unmarshal(body, &etcdErr); err != nil { - return ErrInvalidJSON - } - return etcdErr -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/members.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/client/members.go deleted file mode 100644 index 23adf07ad91b..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/members.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "net/url" - "path" - - "golang.org/x/net/context" - - "github.com/coreos/etcd/pkg/types" -) - -var ( - defaultV2MembersPrefix = "/v2/members" - defaultLeaderSuffix = "/leader" -) - -type Member struct { - // ID is the unique identifier of this Member. - ID string `json:"id"` - - // Name is a human-readable, non-unique identifier of this Member. - Name string `json:"name"` - - // PeerURLs represents the HTTP(S) endpoints this Member uses to - // participate in etcd's consensus protocol. - PeerURLs []string `json:"peerURLs"` - - // ClientURLs represents the HTTP(S) endpoints on which this Member - // serves it's client-facing APIs. - ClientURLs []string `json:"clientURLs"` -} - -type memberCollection []Member - -func (c *memberCollection) UnmarshalJSON(data []byte) error { - d := struct { - Members []Member - }{} - - if err := json.Unmarshal(data, &d); err != nil { - return err - } - - if d.Members == nil { - *c = make([]Member, 0) - return nil - } - - *c = d.Members - return nil -} - -type memberCreateOrUpdateRequest struct { - PeerURLs types.URLs -} - -func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) { - s := struct { - PeerURLs []string `json:"peerURLs"` - }{ - PeerURLs: make([]string, len(m.PeerURLs)), - } - - for i, u := range m.PeerURLs { - s.PeerURLs[i] = u.String() - } - - return json.Marshal(&s) -} - -// NewMembersAPI constructs a new MembersAPI that uses HTTP to -// interact with etcd's membership API. -func NewMembersAPI(c Client) MembersAPI { - return &httpMembersAPI{ - client: c, - } -} - -type MembersAPI interface { - // List enumerates the current cluster membership. - List(ctx context.Context) ([]Member, error) - - // Add instructs etcd to accept a new Member into the cluster. - Add(ctx context.Context, peerURL string) (*Member, error) - - // Remove demotes an existing Member out of the cluster. - Remove(ctx context.Context, mID string) error - - // Update instructs etcd to update an existing Member in the cluster. - Update(ctx context.Context, mID string, peerURLs []string) error - - // Leader gets current leader of the cluster - Leader(ctx context.Context) (*Member, error) -} - -type httpMembersAPI struct { - client httpClient -} - -func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) { - req := &membersAPIActionList{} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - - var mCollection memberCollection - if err := json.Unmarshal(body, &mCollection); err != nil { - return nil, err - } - - return []Member(mCollection), nil -} - -func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) { - urls, err := types.NewURLs([]string{peerURL}) - if err != nil { - return nil, err - } - - req := &membersAPIActionAdd{peerURLs: urls} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusCreated { - var merr membersError - if err := json.Unmarshal(body, &merr); err != nil { - return nil, err - } - return nil, merr - } - - var memb Member - if err := json.Unmarshal(body, &memb); err != nil { - return nil, err - } - - return &memb, nil -} - -func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error { - urls, err := types.NewURLs(peerURLs) - if err != nil { - return err - } - - req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil { - return err - } - - if resp.StatusCode != http.StatusNoContent { - var merr membersError - if err := json.Unmarshal(body, &merr); err != nil { - return err - } - return merr - } - - return nil -} - -func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error { - req := &membersAPIActionRemove{memberID: memberID} - resp, _, err := m.client.Do(ctx, req) - if err != nil { - return err - } - - return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone) -} - -func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) { - req := &membersAPIActionLeader{} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - - var leader Member - if err := json.Unmarshal(body, &leader); err != nil { - return nil, err - } - - return &leader, nil -} - -type membersAPIActionList struct{} - -func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type membersAPIActionRemove struct { - memberID string -} - -func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - u.Path = path.Join(u.Path, d.memberID) - req, _ := http.NewRequest("DELETE", u.String(), nil) - return req -} - -type membersAPIActionAdd struct { - peerURLs types.URLs -} - -func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} - b, _ := json.Marshal(&m) - req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b)) - req.Header.Set("Content-Type", "application/json") - return req -} - -type membersAPIActionUpdate struct { - memberID string - peerURLs types.URLs -} - -func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} - u.Path = path.Join(u.Path, a.memberID) - b, _ := json.Marshal(&m) - req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b)) - req.Header.Set("Content-Type", "application/json") - return req -} - -func assertStatusCode(got int, want ...int) (err error) { - for _, w := range want { - if w == got { - return nil - } - } - return fmt.Errorf("unexpected status code %d", got) -} - -type membersAPIActionLeader struct{} - -func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - u.Path = path.Join(u.Path, defaultLeaderSuffix) - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -// v2MembersURL add the necessary path to the provided endpoint -// to route requests to the default v2 members API. -func v2MembersURL(ep url.URL) *url.URL { - ep.Path = path.Join(ep.Path, defaultV2MembersPrefix) - return &ep -} - -type membersError struct { - Message string `json:"message"` - Code int `json:"-"` -} - -func (e membersError) Error() string { - return e.Message -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/util.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/client/util.go deleted file mode 100644 index 15a8babff4d4..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/client/util.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "regexp" -) - -var ( - roleNotFoundRegExp *regexp.Regexp - userNotFoundRegExp *regexp.Regexp -) - -func init() { - roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.") - userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.") -} - -// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound. -func IsKeyNotFound(err error) bool { - if cErr, ok := err.(Error); ok { - return cErr.Code == ErrorCodeKeyNotFound - } - return false -} - -// IsRoleNotFound returns true if the error means role not found of v2 API. -func IsRoleNotFound(err error) bool { - if ae, ok := err.(authError); ok { - return roleNotFoundRegExp.MatchString(ae.Message) - } - return false -} - -// IsUserNotFound returns true if the error means user not found of v2 API. -func IsUserNotFound(err error) bool { - if ae, ok := err.(authError); ok { - return userNotFoundRegExp.MatchString(ae.Message) - } - return false -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/pathutil/path.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/pathutil/path.go deleted file mode 100644 index f26254ba933e..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/pathutil/path.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pathutil implements utility functions for handling slash-separated -// paths. -package pathutil - -import "path" - -// CanonicalURLPath returns the canonical url path for p, which follows the rules: -// 1. the path always starts with "/" -// 2. replace multiple slashes with a single slash -// 3. replace each '.' '..' path name element with equivalent one -// 4. keep the trailing slash -// The function is borrowed from stdlib http.cleanPath in server.go. -func CanonicalURLPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root, - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/srv/srv.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/srv/srv.go deleted file mode 100644 index fefcbcb4b889..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/srv/srv.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package srv looks up DNS SRV records. -package srv - -import ( - "fmt" - "net" - "net/url" - "strings" - - "github.com/coreos/etcd/pkg/types" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict - resolveTCPAddr = net.ResolveTCPAddr -) - -// GetCluster gets the cluster information via DNS discovery. -// Also sees each entry as a separate instance. -func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) { - tempName := int(0) - tcp2ap := make(map[string]url.URL) - - // First, resolve the apurls - for _, url := range apurls { - tcpAddr, err := resolveTCPAddr("tcp", url.Host) - if err != nil { - return nil, err - } - tcp2ap[tcpAddr.String()] = url - } - - stringParts := []string{} - updateNodeMap := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", dns) - if err != nil { - return err - } - for _, srv := range addrs { - port := fmt.Sprintf("%d", srv.Port) - host := net.JoinHostPort(srv.Target, port) - tcpAddr, terr := resolveTCPAddr("tcp", host) - if terr != nil { - err = terr - continue - } - n := "" - url, ok := tcp2ap[tcpAddr.String()] - if ok { - n = name - } - if n == "" { - n = fmt.Sprintf("%d", tempName) - tempName++ - } - // SRV records have a trailing dot but URL shouldn't. - shortHost := strings.TrimSuffix(srv.Target, ".") - urlHost := net.JoinHostPort(shortHost, port) - stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) - if ok && url.Scheme != scheme { - err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) - } - } - if len(stringParts) == 0 { - return err - } - return nil - } - - failCount := 0 - err := updateNodeMap(service+"-ssl", "https") - srvErr := make([]string, 2) - if err != nil { - srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err) - failCount++ - } - err = updateNodeMap(service, "http") - if err != nil { - srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err) - failCount++ - } - if failCount == 2 { - return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1]) - } - return stringParts, nil -} - -type SRVClients struct { - Endpoints []string - SRVs []*net.SRV -} - -// GetClient looks up the client endpoints for a service and domain. -func GetClient(service, domain string) (*SRVClients, error) { - var urls []*url.URL - var srvs []*net.SRV - - updateURLs := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", domain) - if err != nil { - return err - } - for _, srv := range addrs { - urls = append(urls, &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), - }) - } - srvs = append(srvs, addrs...) - return nil - } - - errHTTPS := updateURLs(service+"-ssl", "https") - errHTTP := updateURLs(service, "http") - - if errHTTPS != nil && errHTTP != nil { - return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) - } - - endpoints := make([]string, len(urls)) - for i := range urls { - endpoints[i] = urls[i].String() - } - return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/doc.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/doc.go deleted file mode 100644 index de8ef0bd7126..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package types declares various data types and implements type-checking -// functions. -package types diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/id.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/id.go deleted file mode 100644 index 1b042d9ce652..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/id.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "strconv" -) - -// ID represents a generic identifier which is canonically -// stored as a uint64 but is typically represented as a -// base-16 string for input/output -type ID uint64 - -func (i ID) String() string { - return strconv.FormatUint(uint64(i), 16) -} - -// IDFromString attempts to create an ID from a base-16 string. -func IDFromString(s string) (ID, error) { - i, err := strconv.ParseUint(s, 16, 64) - return ID(i), err -} - -// IDSlice implements the sort interface -type IDSlice []ID - -func (p IDSlice) Len() int { return len(p) } -func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } -func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/set.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/set.go deleted file mode 100644 index 73ef431bef1f..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/set.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "sync" -) - -type Set interface { - Add(string) - Remove(string) - Contains(string) bool - Equals(Set) bool - Length() int - Values() []string - Copy() Set - Sub(Set) Set -} - -func NewUnsafeSet(values ...string) *unsafeSet { - set := &unsafeSet{make(map[string]struct{})} - for _, v := range values { - set.Add(v) - } - return set -} - -func NewThreadsafeSet(values ...string) *tsafeSet { - us := NewUnsafeSet(values...) - return &tsafeSet{us, sync.RWMutex{}} -} - -type unsafeSet struct { - d map[string]struct{} -} - -// Add adds a new value to the set (no-op if the value is already present) -func (us *unsafeSet) Add(value string) { - us.d[value] = struct{}{} -} - -// Remove removes the given value from the set -func (us *unsafeSet) Remove(value string) { - delete(us.d, value) -} - -// Contains returns whether the set contains the given value -func (us *unsafeSet) Contains(value string) (exists bool) { - _, exists = us.d[value] - return -} - -// ContainsAll returns whether the set contains all given values -func (us *unsafeSet) ContainsAll(values []string) bool { - for _, s := range values { - if !us.Contains(s) { - return false - } - } - return true -} - -// Equals returns whether the contents of two sets are identical -func (us *unsafeSet) Equals(other Set) bool { - v1 := sort.StringSlice(us.Values()) - v2 := sort.StringSlice(other.Values()) - v1.Sort() - v2.Sort() - return reflect.DeepEqual(v1, v2) -} - -// Length returns the number of elements in the set -func (us *unsafeSet) Length() int { - return len(us.d) -} - -// Values returns the values of the Set in an unspecified order. -func (us *unsafeSet) Values() (values []string) { - values = make([]string, 0) - for val := range us.d { - values = append(values, val) - } - return -} - -// Copy creates a new Set containing the values of the first -func (us *unsafeSet) Copy() Set { - cp := NewUnsafeSet() - for val := range us.d { - cp.Add(val) - } - - return cp -} - -// Sub removes all elements in other from the set -func (us *unsafeSet) Sub(other Set) Set { - oValues := other.Values() - result := us.Copy().(*unsafeSet) - - for _, val := range oValues { - if _, ok := result.d[val]; !ok { - continue - } - delete(result.d, val) - } - - return result -} - -type tsafeSet struct { - us *unsafeSet - m sync.RWMutex -} - -func (ts *tsafeSet) Add(value string) { - ts.m.Lock() - defer ts.m.Unlock() - ts.us.Add(value) -} - -func (ts *tsafeSet) Remove(value string) { - ts.m.Lock() - defer ts.m.Unlock() - ts.us.Remove(value) -} - -func (ts *tsafeSet) Contains(value string) (exists bool) { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Contains(value) -} - -func (ts *tsafeSet) Equals(other Set) bool { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Equals(other) -} - -func (ts *tsafeSet) Length() int { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Length() -} - -func (ts *tsafeSet) Values() (values []string) { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Values() -} - -func (ts *tsafeSet) Copy() Set { - ts.m.RLock() - defer ts.m.RUnlock() - usResult := ts.us.Copy().(*unsafeSet) - return &tsafeSet{usResult, sync.RWMutex{}} -} - -func (ts *tsafeSet) Sub(other Set) Set { - ts.m.RLock() - defer ts.m.RUnlock() - usResult := ts.us.Sub(other).(*unsafeSet) - return &tsafeSet{usResult, sync.RWMutex{}} -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/slice.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/slice.go deleted file mode 100644 index 0dd9ca798ae4..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/slice.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -// Uint64Slice implements sort interface -type Uint64Slice []uint64 - -func (p Uint64Slice) Len() int { return len(p) } -func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/urls.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/urls.go deleted file mode 100644 index 9e5d03ff6457..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/urls.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "errors" - "fmt" - "net" - "net/url" - "sort" - "strings" -) - -type URLs []url.URL - -func NewURLs(strs []string) (URLs, error) { - all := make([]url.URL, len(strs)) - if len(all) == 0 { - return nil, errors.New("no valid URLs given") - } - for i, in := range strs { - in = strings.TrimSpace(in) - u, err := url.Parse(in) - if err != nil { - return nil, err - } - if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { - return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in) - } - if _, _, err := net.SplitHostPort(u.Host); err != nil { - return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) - } - if u.Path != "" { - return nil, fmt.Errorf("URL must not contain a path: %s", in) - } - all[i] = *u - } - us := URLs(all) - us.Sort() - - return us, nil -} - -func MustNewURLs(strs []string) URLs { - urls, err := NewURLs(strs) - if err != nil { - panic(err) - } - return urls -} - -func (us URLs) String() string { - return strings.Join(us.StringSlice(), ",") -} - -func (us *URLs) Sort() { - sort.Sort(us) -} -func (us URLs) Len() int { return len(us) } -func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } -func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } - -func (us URLs) StringSlice() []string { - out := make([]string, len(us)) - for i := range us { - out[i] = us[i].String() - } - - return out -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go deleted file mode 100644 index 47690cc381a3..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "sort" - "strings" -) - -// URLsMap is a map from a name to its URLs. -type URLsMap map[string]URLs - -// NewURLsMap returns a URLsMap instantiated from the given string, -// which consists of discovery-formatted names-to-URLs, like: -// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 -func NewURLsMap(s string) (URLsMap, error) { - m := parse(s) - - cl := URLsMap{} - for name, urls := range m { - us, err := NewURLs(urls) - if err != nil { - return nil, err - } - cl[name] = us - } - return cl, nil -} - -// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The -// string values in the map can be multiple values separated by the sep string. -func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) { - var err error - um := URLsMap{} - for k, v := range m { - um[k], err = NewURLs(strings.Split(v, sep)) - if err != nil { - return nil, err - } - } - return um, nil -} - -// String turns URLsMap into discovery-formatted name-to-URLs sorted by name. -func (c URLsMap) String() string { - var pairs []string - for name, urls := range c { - for _, url := range urls { - pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) - } - } - sort.Strings(pairs) - return strings.Join(pairs, ",") -} - -// URLs returns a list of all URLs. -// The returned list is sorted in ascending lexicographical order. -func (c URLsMap) URLs() []string { - var urls []string - for _, us := range c { - for _, u := range us { - urls = append(urls, u.String()) - } - } - sort.Strings(urls) - return urls -} - -// Len returns the size of URLsMap. -func (c URLsMap) Len() int { - return len(c) -} - -// parse parses the given string and returns a map listing the values specified for each key. -func parse(s string) map[string][]string { - m := make(map[string][]string) - for s != "" { - key := s - if i := strings.IndexAny(key, ","); i >= 0 { - key, s = key[:i], key[i+1:] - } else { - s = "" - } - if key == "" { - continue - } - value := "" - if i := strings.Index(key, "="); i >= 0 { - key, value = key[:i], key[i+1:] - } - m[key] = append(m[key], value) - } - return m -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/version/version.go b/cluster-autoscaler/vendor/github.com/coreos/etcd/version/version.go deleted file mode 100644 index e09b88332a9e..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/etcd/version/version.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package version implements etcd version parsing and contains latest version -// information. -package version - -import ( - "fmt" - "strings" - - "github.com/coreos/go-semver/semver" -) - -var ( - // MinClusterVersion is the min cluster version this etcd binary is compatible with. - MinClusterVersion = "3.0.0" - Version = "3.2.24" - APIVersion = "unknown" - - // Git SHA Value will be set during build - GitSHA = "Not provided (use ./build instead of go build)" -) - -func init() { - ver, err := semver.NewVersion(Version) - if err == nil { - APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor) - } -} - -type Versions struct { - Server string `json:"etcdserver"` - Cluster string `json:"etcdcluster"` - // TODO: raft state machine version -} - -// Cluster only keeps the major.minor. -func Cluster(v string) string { - vs := strings.Split(v, ".") - if len(vs) <= 2 { - return v - } - return fmt.Sprintf("%s.%s", vs[0], vs[1]) -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-semver/LICENSE b/cluster-autoscaler/vendor/github.com/coreos/go-semver/LICENSE deleted file mode 100644 index d64569567334..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/go-semver/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-semver/semver/semver.go b/cluster-autoscaler/vendor/github.com/coreos/go-semver/semver/semver.go deleted file mode 100644 index f1f8ab797390..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/go-semver/semver/semver.go +++ /dev/null @@ -1,209 +0,0 @@ -package semver - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" -) - -type Version struct { - Major int64 - Minor int64 - Patch int64 - PreRelease PreRelease - Metadata string -} - -type PreRelease string - -func splitOff(input *string, delim string) (val string) { - parts := strings.SplitN(*input, delim, 2) - - if len(parts) == 2 { - *input = parts[0] - val = parts[1] - } - - return val -} - -func NewVersion(version string) (*Version, error) { - v := Version{} - - dotParts := strings.SplitN(version, ".", 3) - - if len(dotParts) != 3 { - return nil, errors.New(fmt.Sprintf("%s is not in dotted-tri format", version)) - } - - v.Metadata = splitOff(&dotParts[2], "+") - v.PreRelease = PreRelease(splitOff(&dotParts[2], "-")) - - parsed := make([]int64, 3, 3) - - for i, v := range dotParts[:3] { - val, err := strconv.ParseInt(v, 10, 64) - parsed[i] = val - if err != nil { - return nil, err - } - } - - v.Major = parsed[0] - v.Minor = parsed[1] - v.Patch = parsed[2] - - return &v, nil -} - -func Must(v *Version, err error) *Version { - if err != nil { - panic(err) - } - return v -} - -func (v *Version) String() string { - var buffer bytes.Buffer - - base := fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) - buffer.WriteString(base) - - if v.PreRelease != "" { - buffer.WriteString(fmt.Sprintf("-%s", v.PreRelease)) - } - - if v.Metadata != "" { - buffer.WriteString(fmt.Sprintf("+%s", v.Metadata)) - } - - return buffer.String() -} - -func (v *Version) LessThan(versionB Version) bool { - versionA := *v - cmp := recursiveCompare(versionA.Slice(), versionB.Slice()) - - if cmp == 0 { - cmp = preReleaseCompare(versionA, versionB) - } - - if cmp == -1 { - return true - } - - return false -} - -/* Slice converts the comparable parts of the semver into a slice of strings */ -func (v *Version) Slice() []int64 { - return []int64{v.Major, v.Minor, v.Patch} -} - -func (p *PreRelease) Slice() []string { - preRelease := string(*p) - return strings.Split(preRelease, ".") -} - -func preReleaseCompare(versionA Version, versionB Version) int { - a := versionA.PreRelease - b := versionB.PreRelease - - /* Handle the case where if two versions are otherwise equal it is the - * one without a PreRelease that is greater */ - if len(a) == 0 && (len(b) > 0) { - return 1 - } else if len(b) == 0 && (len(a) > 0) { - return -1 - } - - // If there is a prelease, check and compare each part. - return recursivePreReleaseCompare(a.Slice(), b.Slice()) -} - -func recursiveCompare(versionA []int64, versionB []int64) int { - if len(versionA) == 0 { - return 0 - } - - a := versionA[0] - b := versionB[0] - - if a > b { - return 1 - } else if a < b { - return -1 - } - - return recursiveCompare(versionA[1:], versionB[1:]) -} - -func recursivePreReleaseCompare(versionA []string, versionB []string) int { - // Handle slice length disparity. - if len(versionA) == 0 { - // Nothing to compare too, so we return 0 - return 0 - } else if len(versionB) == 0 { - // We're longer than versionB so return 1. - return 1 - } - - a := versionA[0] - b := versionB[0] - - aInt := false; bInt := false - - aI, err := strconv.Atoi(versionA[0]) - if err == nil { - aInt = true - } - - bI, err := strconv.Atoi(versionB[0]) - if err == nil { - bInt = true - } - - // Handle Integer Comparison - if aInt && bInt { - if aI > bI { - return 1 - } else if aI < bI { - return -1 - } - } - - // Handle String Comparison - if a > b { - return 1 - } else if a < b { - return -1 - } - - return recursivePreReleaseCompare(versionA[1:], versionB[1:]) -} - -// BumpMajor increments the Major field by 1 and resets all other fields to their default values -func (v *Version) BumpMajor() { - v.Major += 1 - v.Minor = 0 - v.Patch = 0 - v.PreRelease = PreRelease("") - v.Metadata = "" -} - -// BumpMinor increments the Minor field by 1 and resets all other fields to their default values -func (v *Version) BumpMinor() { - v.Minor += 1 - v.Patch = 0 - v.PreRelease = PreRelease("") - v.Metadata = "" -} - -// BumpPatch increments the Patch field by 1 and resets all other fields to their default values -func (v *Version) BumpPatch() { - v.Patch += 1 - v.PreRelease = PreRelease("") - v.Metadata = "" -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-semver/semver/sort.go b/cluster-autoscaler/vendor/github.com/coreos/go-semver/semver/sort.go deleted file mode 100644 index 86203007ae9f..000000000000 --- a/cluster-autoscaler/vendor/github.com/coreos/go-semver/semver/sort.go +++ /dev/null @@ -1,24 +0,0 @@ -package semver - -import ( - "sort" -) - -type Versions []*Version - -func (s Versions) Len() int { - return len(s) -} - -func (s Versions) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s Versions) Less(i, j int) bool { - return s[i].LessThan(*s[j]) -} - -// Sort sorts the given slice of Version -func Sort(versions []*Version) { - sort.Sort(Versions(versions)) -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/NOTICE b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/NOTICE new file mode 100644 index 000000000000..23a0ada2fbb5 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go index ba6d41d85baa..ba4ae31f19ba 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go +++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go @@ -1,4 +1,5 @@ // Copyright 2014 Docker, Inc. +// Copyright 2015-2018 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,7 +14,11 @@ // limitations under the License. // -// Code forked from Docker project +// Package daemon provides a Go implementation of the sd_notify protocol. +// It can be used to inform systemd of service start-up completion, watchdog +// events, and other status changes. +// +// https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description package daemon import ( @@ -21,6 +26,25 @@ import ( "os" ) +const ( + // SdNotifyReady tells the service manager that service startup is finished + // or the service finished loading its configuration. + SdNotifyReady = "READY=1" + + // SdNotifyStopping tells the service manager that the service is beginning + // its shutdown. + SdNotifyStopping = "STOPPING=1" + + // SdNotifyReloading tells the service manager that this service is + // reloading its configuration. Note that you must call SdNotifyReady when + // it completed reloading. + SdNotifyReloading = "RELOADING=1" + + // SdNotifyWatchdog tells the service manager to update the watchdog + // timestamp for the service. + SdNotifyWatchdog = "WATCHDOG=1" +) + // SdNotify sends a message to the init daemon. It is common to ignore the error. // If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET` // will be unconditionally unset. @@ -29,7 +53,7 @@ import ( // (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset) // (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data) // (true, nil) - notification supported, data has been sent -func SdNotify(unsetEnvironment bool, state string) (sent bool, err error) { +func SdNotify(unsetEnvironment bool, state string) (bool, error) { socketAddr := &net.UnixAddr{ Name: os.Getenv("NOTIFY_SOCKET"), Net: "unixgram", @@ -41,10 +65,9 @@ func SdNotify(unsetEnvironment bool, state string) (sent bool, err error) { } if unsetEnvironment { - err = os.Unsetenv("NOTIFY_SOCKET") - } - if err != nil { - return false, err + if err := os.Unsetenv("NOTIFY_SOCKET"); err != nil { + return false, err + } } conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) @@ -54,9 +77,7 @@ func SdNotify(unsetEnvironment bool, state string) (sent bool, err error) { } defer conn.Close() - _, err = conn.Write([]byte(state)) - // Error sending the message - if err != nil { + if _, err = conn.Write([]byte(state)); err != nil { return false, err } return true, nil diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/daemon/watchdog.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/daemon/watchdog.go index 35a92e6e67db..7a0e0d3a51b1 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/daemon/watchdog.go +++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/daemon/watchdog.go @@ -21,10 +21,11 @@ import ( "time" ) -// SdWatchdogEnabled return watchdog information for a service. -// Process should send daemon.SdNotify("WATCHDOG=1") every time / 2. -// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC` -// and `WATCHDOG_PID` will be unconditionally unset. +// SdWatchdogEnabled returns watchdog information for a service. +// Processes should call daemon.SdNotify(false, daemon.SdNotifyWatchdog) every +// time / 2. +// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC` and +// `WATCHDOG_PID` will be unconditionally unset. // // It returns one of the following: // (0, nil) - watchdog isn't enabled or we aren't the watched PID. diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/dbus.go index c1694fb522e7..1d54810aff4e 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/dbus.go +++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/dbus.go @@ -16,6 +16,7 @@ package dbus import ( + "encoding/hex" "fmt" "os" "strconv" @@ -60,6 +61,27 @@ func PathBusEscape(path string) string { return string(n) } +// pathBusUnescape is the inverse of PathBusEscape. +func pathBusUnescape(path string) string { + if path == "_" { + return "" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if c == '_' && i+2 < len(path) { + res, err := hex.DecodeString(path[i+1 : i+3]) + if err == nil { + n = append(n, res...) + } + i += 2 + } else { + n = append(n, c) + } + } + return string(n) +} + // Conn is a connection to systemd's dbus endpoint. type Conn struct { // sysconn/sysobj are only used to call dbus methods @@ -74,13 +96,18 @@ type Conn struct { jobs map[dbus.ObjectPath]chan<- string sync.Mutex } - subscriber struct { + subStateSubscriber struct { updateCh chan<- *SubStateUpdate errCh chan<- error sync.Mutex ignore map[dbus.ObjectPath]int64 cleanIgnore int64 } + propertiesSubscriber struct { + updateCh chan<- *PropertiesUpdate + errCh chan<- error + sync.Mutex + } } // New establishes a connection to any available bus and authenticates. @@ -152,7 +179,7 @@ func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { sigobj: systemdObject(sigconn), } - c.subscriber.ignore = make(map[dbus.ObjectPath]int64) + c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64) c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) // Setup the listeners on jobs so that we can get completions diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/methods.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/methods.go index ab17f7cc75a4..0b4207229f77 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/methods.go +++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/methods.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015, 2018 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ package dbus import ( "errors" + "fmt" "path" "strconv" @@ -148,14 +149,27 @@ func (c *Conn) ResetFailedUnit(name string) error { return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() } -// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface -func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) { +// SystemState returns the systemd state. Equivalent to `systemctl is-system-running`. +func (c *Conn) SystemState() (*Property, error) { + var err error + var prop dbus.Variant + + obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") + err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: "SystemState", Value: prop}, nil +} + +// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface +func (c *Conn) getProperties(path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { var err error var props map[string]dbus.Variant - path := unitPath(unit) if !path.IsValid() { - return nil, errors.New("invalid unit name: " + unit) + return nil, fmt.Errorf("invalid unit name: %v", path) } obj := c.sysconn.Object("org.freedesktop.systemd1", path) @@ -172,9 +186,15 @@ func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]inte return out, nil } -// GetUnitProperties takes the unit name and returns all of its dbus object properties. +// GetUnitProperties takes the (unescaped) unit name and returns all of its dbus object properties. func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { - return c.getProperties(unit, "org.freedesktop.systemd1.Unit") + path := unitPath(unit) + return c.getProperties(path, "org.freedesktop.systemd1.Unit") +} + +// GetUnitProperties takes the (escaped) unit path and returns all of its dbus object properties. +func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { + return c.getProperties(path, "org.freedesktop.systemd1.Unit") } func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { @@ -208,7 +228,8 @@ func (c *Conn) GetServiceProperty(service string, propertyName string) (*Propert // Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope // return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { - return c.getProperties(unit, "org.freedesktop.systemd1."+unitType) + path := unitPath(unit) + return c.getProperties(path, "org.freedesktop.systemd1."+unitType) } // SetUnitProperties() may be used to modify certain unit properties at runtime. @@ -292,6 +313,7 @@ func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitSt // names and returns an UnitStatus array. Comparing to ListUnitsByPatterns // method, this method returns statuses even for inactive or non-existing // units. Input array should contain exact unit names, but not patterns. +// Note: Requires systemd v230 or higher func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) } @@ -563,3 +585,8 @@ func (c *Conn) Reload() error { func unitPath(name string) dbus.ObjectPath { return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) } + +// unitName returns the unescaped base element of the supplied escaped path +func unitName(dpath dbus.ObjectPath) string { + return pathBusUnescape(path.Base(string(dpath))) +} diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/set.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/set.go index f92e6fbed1ea..17c5d485657d 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/set.go +++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/set.go @@ -36,7 +36,7 @@ func (s *set) Length() int { } func (s *set) Values() (values []string) { - for val, _ := range s.data { + for val := range s.data { values = append(values, val) } return diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/subscription.go index 996451445c06..70e63a6f16e1 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/subscription.go +++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/dbus/subscription.go @@ -16,6 +16,7 @@ package dbus import ( "errors" + "log" "time" "github.com/godbus/dbus" @@ -36,22 +37,12 @@ func (c *Conn) Subscribe() error { c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") - err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() - if err != nil { - return err - } - - return nil + return c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() } // Unsubscribe this connection from systemd dbus events. func (c *Conn) Unsubscribe() error { - err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() - if err != nil { - return err - } - - return nil + return c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() } func (c *Conn) dispatch() { @@ -70,7 +61,8 @@ func (c *Conn) dispatch() { c.jobComplete(signal) } - if c.subscriber.updateCh == nil { + if c.subStateSubscriber.updateCh == nil && + c.propertiesSubscriber.updateCh == nil { continue } @@ -84,6 +76,12 @@ func (c *Conn) dispatch() { case "org.freedesktop.DBus.Properties.PropertiesChanged": if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { unitPath = signal.Path + + if len(signal.Body) >= 2 { + if changed, ok := signal.Body[1].(map[string]dbus.Variant); ok { + c.sendPropertiesUpdate(unitPath, changed) + } + } } } @@ -169,42 +167,80 @@ type SubStateUpdate struct { // is full, it attempts to write an error to errCh; if errCh is full, the error // passes silently. func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { - c.subscriber.Lock() - defer c.subscriber.Unlock() - c.subscriber.updateCh = updateCh - c.subscriber.errCh = errCh + if c == nil { + msg := "nil receiver" + select { + case errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } + + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + c.subStateSubscriber.updateCh = updateCh + c.subStateSubscriber.errCh = errCh } -func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) { - c.subscriber.Lock() - defer c.subscriber.Unlock() +func (c *Conn) sendSubStateUpdate(unitPath dbus.ObjectPath) { + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + + if c.subStateSubscriber.updateCh == nil { + return + } - if c.shouldIgnore(path) { + isIgnored := c.shouldIgnore(unitPath) + defer c.cleanIgnore() + if isIgnored { return } - info, err := c.GetUnitProperties(string(path)) + info, err := c.GetUnitPathProperties(unitPath) if err != nil { select { - case c.subscriber.errCh <- err: + case c.subStateSubscriber.errCh <- err: default: + log.Printf("full error channel while reporting: %s\n", err) } + return } + defer c.updateIgnore(unitPath, info) - name := info["Id"].(string) - substate := info["SubState"].(string) + name, ok := info["Id"].(string) + if !ok { + msg := "failed to cast info.Id" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", err) + } + return + } + substate, ok := info["SubState"].(string) + if !ok { + msg := "failed to cast info.SubState" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } update := &SubStateUpdate{name, substate} select { - case c.subscriber.updateCh <- update: + case c.subStateSubscriber.updateCh <- update: default: + msg := "update channel is full" select { - case c.subscriber.errCh <- errors.New("update channel full!"): + case c.subStateSubscriber.errCh <- errors.New(msg): default: + log.Printf("full error channel while reporting: %s\n", msg) } + return } - - c.updateIgnore(path, info) } // The ignore functions work around a wart in the systemd dbus interface. @@ -222,29 +258,76 @@ func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) { // the properties). func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { - t, ok := c.subscriber.ignore[path] + t, ok := c.subStateSubscriber.ignore[path] return ok && t >= time.Now().UnixNano() } func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { - c.cleanIgnore() + loadState, ok := info["LoadState"].(string) + if !ok { + return + } // unit is unloaded - it will trigger bad systemd dbus behavior - if info["LoadState"].(string) == "not-found" { - c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval + if loadState == "not-found" { + c.subStateSubscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval } } // without this, ignore would grow unboundedly over time func (c *Conn) cleanIgnore() { now := time.Now().UnixNano() - if c.subscriber.cleanIgnore < now { - c.subscriber.cleanIgnore = now + cleanIgnoreInterval + if c.subStateSubscriber.cleanIgnore < now { + c.subStateSubscriber.cleanIgnore = now + cleanIgnoreInterval - for p, t := range c.subscriber.ignore { + for p, t := range c.subStateSubscriber.ignore { if t < now { - delete(c.subscriber.ignore, p) + delete(c.subStateSubscriber.ignore, p) } } } } + +// PropertiesUpdate holds a map of a unit's changed properties +type PropertiesUpdate struct { + UnitName string + Changed map[string]dbus.Variant +} + +// SetPropertiesSubscriber writes to updateCh when any unit's properties +// change. Every property change reported by systemd will be sent; that is, no +// transitions will be "missed" (as they might be with SetSubStateSubscriber). +// However, state changes will only be written to the channel with non-blocking +// writes. If updateCh is full, it attempts to write an error to errCh; if +// errCh is full, the error passes silently. +func (c *Conn) SetPropertiesSubscriber(updateCh chan<- *PropertiesUpdate, errCh chan<- error) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + c.propertiesSubscriber.updateCh = updateCh + c.propertiesSubscriber.errCh = errCh +} + +// we don't need to worry about shouldIgnore() here because +// sendPropertiesUpdate doesn't call GetProperties() +func (c *Conn) sendPropertiesUpdate(unitPath dbus.ObjectPath, changedProps map[string]dbus.Variant) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + + if c.propertiesSubscriber.updateCh == nil { + return + } + + update := &PropertiesUpdate{unitName(unitPath), changedProps} + + select { + case c.propertiesSubscriber.updateCh <- update: + default: + msg := "update channel is full" + select { + case c.propertiesSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } +} diff --git a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/util/util_cgo.go b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/util/util_cgo.go index 22c0d6099df5..6269bc73236e 100644 --- a/cluster-autoscaler/vendor/github.com/coreos/go-systemd/util/util_cgo.go +++ b/cluster-autoscaler/vendor/github.com/coreos/go-systemd/util/util_cgo.go @@ -122,11 +122,12 @@ func runningFromSystemService() (ret bool, err error) { errno := C.my_sd_pid_get_owner_uid(sd_pid_get_owner_uid, 0, &uid) serrno := syscall.Errno(-errno) // when we're running from a unit file, sd_pid_get_owner_uid returns - // ENOENT (systemd <220) or ENXIO (systemd >=220) + // ENOENT (systemd <220), ENXIO (systemd 220-223), or ENODATA + // (systemd >=234) switch { case errno >= 0: ret = false - case serrno == syscall.ENOENT, serrno == syscall.ENXIO: + case serrno == syscall.ENOENT, serrno == syscall.ENXIO, serrno == syscall.ENODATA: // Since the implementation of sessions in systemd relies on // the `pam_systemd` module, using the sd_pid_get_owner_uid // heuristic alone can result in false positives if that module diff --git a/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/ipvs.go b/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/ipvs.go index ebcdd808c34a..ab10717089ef 100644 --- a/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/ipvs.go +++ b/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/ipvs.go @@ -5,12 +5,19 @@ package ipvs import ( "net" "syscall" + "time" "fmt" + "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" ) +const ( + netlinkRecvSocketsTimeout = 3 * time.Second + netlinkSendSocketTimeout = 30 * time.Second +) + // Service defines an IPVS service in its entirety. type Service struct { // Virtual service address. @@ -46,13 +53,15 @@ type SvcStats struct { // Destination defines an IPVS destination (real server) in its // entirety. type Destination struct { - Address net.IP - Port uint16 - Weight int - ConnectionFlags uint32 - AddressFamily uint16 - UpperThreshold uint32 - LowerThreshold uint32 + Address net.IP + Port uint16 + Weight int + ConnectionFlags uint32 + AddressFamily uint16 + UpperThreshold uint32 + LowerThreshold uint32 + ActiveConnections int + InactiveConnections int } // Handle provides a namespace specific ipvs handle to program ipvs @@ -82,6 +91,15 @@ func New(path string) (*Handle, error) { if err != nil { return nil, err } + // Add operation timeout to avoid deadlocks + tv := syscall.NsecToTimeval(netlinkSendSocketTimeout.Nanoseconds()) + if err := sock.SetSendTimeout(&tv); err != nil { + return nil, err + } + tv = syscall.NsecToTimeval(netlinkRecvSocketsTimeout.Nanoseconds()) + if err := sock.SetReceiveTimeout(&tv); err != nil { + return nil, err + } return &Handle{sock: sock}, nil } diff --git a/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/netlink.go b/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/netlink.go index 2089283d14e4..3c7b1a562ae9 100644 --- a/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/netlink.go +++ b/cluster-autoscaler/vendor/github.com/docker/libnetwork/ipvs/netlink.go @@ -100,7 +100,7 @@ func fillService(s *Service) nl.NetlinkRequestData { return cmdAttr } -func fillDestinaton(d *Destination) nl.NetlinkRequestData { +func fillDestination(d *Destination) nl.NetlinkRequestData { cmdAttr := nl.NewRtAttr(ipvsCmdAttrDest, nil) nl.NewRtAttrChild(cmdAttr, ipvsDestAttrAddress, rawIPData(d.Address)) @@ -134,7 +134,7 @@ func (i *Handle) doCmdwithResponse(s *Service, d *Destination, cmd uint8) ([][]b } } else { - req.AddData(fillDestinaton(d)) + req.AddData(fillDestination(d)) } res, err := execute(i.sock, req, 0) @@ -203,10 +203,6 @@ func newGenlRequest(familyID int, cmd uint8) *nl.NetlinkRequest { } func execute(s *nl.NetlinkSocket, req *nl.NetlinkRequest, resType uint16) ([][]byte, error) { - var ( - err error - ) - if err := s.Send(req); err != nil { return nil, err } @@ -222,6 +218,13 @@ done: for { msgs, err := s.Receive() if err != nil { + if s.GetFd() == -1 { + return nil, fmt.Errorf("Socket got closed on receive") + } + if err == syscall.EAGAIN { + // timeout fired + continue + } return nil, err } for _, m := range msgs { @@ -436,6 +439,10 @@ func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) d.LowerThreshold = native.Uint32(attr.Value) case ipvsDestAttrAddressFamily: d.AddressFamily = native.Uint16(attr.Value) + case ipvsDestAttrActiveConnections: + d.ActiveConnections = int(native.Uint16(attr.Value)) + case ipvsDestAttrInactiveConnections: + d.InactiveConnections = int(native.Uint16(attr.Value)) } } return &d, nil diff --git a/cluster-autoscaler/vendor/github.com/evanphx/json-patch/README.md b/cluster-autoscaler/vendor/github.com/evanphx/json-patch/README.md index 078629004d76..13b90420ca14 100644 --- a/cluster-autoscaler/vendor/github.com/evanphx/json-patch/README.md +++ b/cluster-autoscaler/vendor/github.com/evanphx/json-patch/README.md @@ -15,7 +15,7 @@ go get -u github.com/evanphx/json-patch ``` **Stable Versions**: -* Version 3: `go get -u gopkg.in/evanphx/json-patch.v3` +* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` (previous versions below `v3` are unavailable) diff --git a/cluster-autoscaler/vendor/github.com/evanphx/json-patch/patch.go b/cluster-autoscaler/vendor/github.com/evanphx/json-patch/patch.go index 1a3aa387eef8..3d76e9e38b0c 100644 --- a/cluster-autoscaler/vendor/github.com/evanphx/json-patch/patch.go +++ b/cluster-autoscaler/vendor/github.com/evanphx/json-patch/patch.go @@ -204,7 +204,7 @@ func (n *lazyNode) equal(o *lazyNode) bool { } func (o operation) kind() string { - if obj, ok := o["op"]; ok { + if obj, ok := o["op"]; ok && obj != nil { var op string err := json.Unmarshal(*obj, &op) @@ -220,7 +220,7 @@ func (o operation) kind() string { } func (o operation) path() string { - if obj, ok := o["path"]; ok { + if obj, ok := o["path"]; ok && obj != nil { var op string err := json.Unmarshal(*obj, &op) @@ -236,7 +236,7 @@ func (o operation) path() string { } func (o operation) from() string { - if obj, ok := o["from"]; ok { + if obj, ok := o["from"]; ok && obj != nil { var op string err := json.Unmarshal(*obj, &op) @@ -389,17 +389,13 @@ func (d *partialArray) add(key string, val *lazyNode) error { cur := *d - if idx < 0 { - idx *= -1 - - if idx > len(ary) { - return fmt.Errorf("Unable to access invalid index: %d", idx) - } - idx = len(ary) - idx - } - if idx < 0 || idx >= len(ary) || idx > len(cur) { + if idx < -len(ary) || idx >= len(ary) { return fmt.Errorf("Unable to access invalid index: %d", idx) } + + if idx < 0 { + idx += len(ary) + } copy(ary[0:idx], cur[0:idx]) ary[idx] = val copy(ary[idx+1:], cur[idx:]) @@ -430,9 +426,12 @@ func (d *partialArray) remove(key string) error { cur := *d - if idx >= len(cur) { + if idx < -len(cur) || idx >= len(cur) { return fmt.Errorf("Unable to remove invalid index: %d", idx) } + if idx < 0 { + idx += len(cur) + } ary := make([]*lazyNode, len(cur)-1) @@ -450,7 +449,7 @@ func (p Patch) add(doc *container, op operation) error { con, key := findObject(doc, path) if con == nil { - return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: %s", path) + return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: \"%s\"", path) } return con.add(key, op.value()) @@ -462,7 +461,7 @@ func (p Patch) remove(doc *container, op operation) error { con, key := findObject(doc, path) if con == nil { - return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: %s", path) + return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: \"%s\"", path) } return con.remove(key) @@ -477,8 +476,8 @@ func (p Patch) replace(doc *container, op operation) error { return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path) } - val, ok := con.get(key) - if val == nil || ok != nil { + _, ok := con.get(key) + if ok != nil { return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path) } @@ -535,6 +534,8 @@ func (p Patch) test(doc *container, op operation) error { return nil } return fmt.Errorf("Testing value %s failed", path) + } else if op.value() == nil { + return fmt.Errorf("Testing value %s failed", path) } if val.equal(op.value()) { diff --git a/cluster-autoscaler/vendor/github.com/ghodss/yaml/.travis.yml b/cluster-autoscaler/vendor/github.com/ghodss/yaml/.travis.yml index 0e9d6edc010a..930860e0a808 100644 --- a/cluster-autoscaler/vendor/github.com/ghodss/yaml/.travis.yml +++ b/cluster-autoscaler/vendor/github.com/ghodss/yaml/.travis.yml @@ -1,7 +1,8 @@ language: go go: - - 1.3 - - 1.4 + - "1.3" + - "1.4" + - "1.10" script: - go test - go build diff --git a/cluster-autoscaler/vendor/github.com/ghodss/yaml/README.md b/cluster-autoscaler/vendor/github.com/ghodss/yaml/README.md index f8f7e369549c..0200f75b4d12 100644 --- a/cluster-autoscaler/vendor/github.com/ghodss/yaml/README.md +++ b/cluster-autoscaler/vendor/github.com/ghodss/yaml/README.md @@ -4,13 +4,13 @@ ## Introduction -A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). ## Compatibility -This package uses [go-yaml v2](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). ## Caveats @@ -44,6 +44,8 @@ import "github.com/ghodss/yaml" Usage is very similar to the JSON library: ```go +package main + import ( "fmt" @@ -51,8 +53,8 @@ import ( ) type Person struct { - Name string `json:"name"` // Affects YAML field names too. - Age int `json:"name"` + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` } func main() { @@ -65,13 +67,13 @@ func main() { } fmt.Println(string(y)) /* Output: - name: John age: 30 + name: John */ // Unmarshal the YAML back into a Person struct. var p2 Person - err := yaml.Unmarshal(y, &p2) + err = yaml.Unmarshal(y, &p2) if err != nil { fmt.Printf("err: %v\n", err) return @@ -86,11 +88,14 @@ func main() { `yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: ```go +package main + import ( "fmt" "github.com/ghodss/yaml" ) + func main() { j := []byte(`{"name": "John", "age": 30}`) y, err := yaml.JSONToYAML(j) diff --git a/cluster-autoscaler/vendor/github.com/ghodss/yaml/fields.go b/cluster-autoscaler/vendor/github.com/ghodss/yaml/fields.go index 0bd3c2b46267..58600740266c 100644 --- a/cluster-autoscaler/vendor/github.com/ghodss/yaml/fields.go +++ b/cluster-autoscaler/vendor/github.com/ghodss/yaml/fields.go @@ -45,7 +45,11 @@ func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.Te break } if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } } if v.Type().NumMethod() > 0 { if u, ok := v.Interface().(json.Unmarshaler); ok { diff --git a/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml.go b/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml.go index c02beacb9a41..6e7f14fc7fb7 100644 --- a/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml.go +++ b/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "io" "reflect" "strconv" @@ -15,26 +16,30 @@ import ( func Marshal(o interface{}) ([]byte, error) { j, err := json.Marshal(o) if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: ", err) + return nil, fmt.Errorf("error marshaling into JSON: %v", err) } y, err := JSONToYAML(j) if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: ", err) + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) } return y, nil } -// Converts YAML to JSON then uses JSON to unmarshal into an object. -func Unmarshal(y []byte, o interface{}) error { +// JSONOpt is a decoding option for decoding from JSON format. +type JSONOpt func(*json.Decoder) *json.Decoder + +// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, +// optionally configuring the behavior of the JSON unmarshal. +func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo) + j, err := yamlToJSON(y, &vo, yaml.Unmarshal) if err != nil { return fmt.Errorf("error converting YAML to JSON: %v", err) } - err = json.Unmarshal(j, o) + err = jsonUnmarshal(bytes.NewReader(j), o, opts...) if err != nil { return fmt.Errorf("error unmarshaling JSON: %v", err) } @@ -42,13 +47,28 @@ func Unmarshal(y []byte, o interface{}) error { return nil } +// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the +// object, optionally applying decoder options prior to decoding. We are not +// using json.Unmarshal directly as we want the chance to pass in non-default +// options. +func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { + d := json.NewDecoder(r) + for _, opt := range opts { + d = opt(d) + } + if err := d.Decode(&o); err != nil { + return fmt.Errorf("while decoding JSON: %v", err) + } + return nil +} + // Convert JSON to YAML. func JSONToYAML(j []byte) ([]byte, error) { // Convert the JSON to an object. var jsonObj interface{} // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshling to interface{}, it just picks float64 + // etc.) when unmarshalling to interface{}, it just picks float64 // universally. go-yaml does go through the effort of picking the right // number type, so we can preserve number type throughout this process. err := yaml.Unmarshal(j, &jsonObj) @@ -60,8 +80,8 @@ func JSONToYAML(j []byte) ([]byte, error) { return yaml.Marshal(jsonObj) } -// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through -// this method should be a no-op. +// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, +// passing JSON through this method should be a no-op. // // Things YAML can do that are not supported by JSON: // * In YAML you can have binary and null keys in your maps. These are invalid @@ -70,14 +90,22 @@ func JSONToYAML(j []byte) ([]byte, error) { // use binary data with this library, encode the data as base64 as usual but do // not use the !!binary tag in your YAML. This will ensure the original base64 // encoded data makes it all the way through to the JSON. +// +// For strict decoding of YAML, use YAMLToJSONStrict. func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil) + return yamlToJSON(y, nil, yaml.Unmarshal) +} + +// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, +// returning an error on any duplicate field names. +func YAMLToJSONStrict(y []byte) ([]byte, error) { + return yamlToJSON(y, nil, yaml.UnmarshalStrict) } -func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { +func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { // Convert the YAML to an object. var yamlObj interface{} - err := yaml.Unmarshal(y, &yamlObj) + err := yamlUnmarshal(y, &yamlObj) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml_go110.go b/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml_go110.go new file mode 100644 index 000000000000..ab3e06a222a6 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/ghodss/yaml/yaml_go110.go @@ -0,0 +1,14 @@ +// This file contains changes that are only compatible with go 1.10 and onwards. + +// +build go1.10 + +package yaml + +import "encoding/json" + +// DisallowUnknownFields configures the JSON decoder to error out if unknown +// fields come along, instead of dropping them by default. +func DisallowUnknownFields(d *json.Decoder) *json.Decoder { + d.DisallowUnknownFields() + return d +} diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.drone.yml b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.drone.yml deleted file mode 100644 index cb8c7b50ad14..000000000000 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.drone.yml +++ /dev/null @@ -1,32 +0,0 @@ -clone: - path: github.com/go-openapi/jsonpointer - -matrix: - GO_VERSION: - - "1.6" - -build: - integration: - image: golang:$$GO_VERSION - pull: true - commands: - - go get -u github.com/stretchr/testify/assert - - go get -u github.com/go-openapi/swag - - go test -race - - go test -v -cover -coverprofile=coverage.out -covermode=count ./... - -notify: - slack: - channel: bots - webhook_url: $$SLACK_URL - username: drone - -publish: - coverage: - server: https://coverage.vmware.run - token: $$GITHUB_TOKEN - # threshold: 70 - # must_increase: true - when: - matrix: - GO_VERSION: "1.6" diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.editorconfig b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.editorconfig new file mode 100644 index 000000000000..3152da69a5d7 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.pullapprove.yml b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.pullapprove.yml deleted file mode 100644 index 5ec183e22442..000000000000 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.pullapprove.yml +++ /dev/null @@ -1,13 +0,0 @@ -approve_by_comment: true -approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' -reject_regex: ^[Rr]ejected -reset_on_push: false -reviewers: - members: - - casualjim - - chancez - - frapposelli - - vburenin - - pytlesk4 - name: pullapprove - required: 1 diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.travis.yml new file mode 100644 index 000000000000..3436c4590c31 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- '1.9' +- 1.10.x +- 1.11.x +install: +- go get -u github.com/stretchr/testify/assert +- go get -u github.com/go-openapi/swag +language: go +notifications: + slack: + secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/README.md b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/README.md index 9c9b1fd488e2..813788aff1c2 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/README.md +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/README.md @@ -1,4 +1,4 @@ -# gojsonpointer [![Build Status](https://ci.vmware.run/api/badges/go-openapi/jsonpointer/status.svg)](https://ci.vmware.run/go-openapi/jsonpointer) [![Coverage](https://coverage.vmware.run/badges/go-openapi/jsonpointer/coverage.svg)](https://coverage.vmware.run/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) An implementation of JSON Pointer - Go language diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/go.mod b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/go.mod new file mode 100644 index 000000000000..eb4d623c590c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/go.mod @@ -0,0 +1,10 @@ +module github.com/go-openapi/jsonpointer + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-openapi/swag v0.17.0 + github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + gopkg.in/yaml.v2 v2.2.1 // indirect +) diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/go.sum b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/go.sum new file mode 100644 index 000000000000..c71f4d7a294f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= +github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/pointer.go b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/pointer.go index 39dd012c2a12..fe2d6ee574f1 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -43,6 +43,7 @@ const ( ) var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() +var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() // JSONPointable is an interface for structs to implement when they need to customize the // json pointer process @@ -50,16 +51,10 @@ type JSONPointable interface { JSONLookup(string) (interface{}, error) } -type implStruct struct { - mode string // "SET" or "GET" - - inDocument interface{} - - setInValue interface{} - - getOutNode interface{} - getOutKind reflect.Kind - outError error +// JSONSetable is an interface for structs to implement when they need to customize the +// json pointer process +type JSONSetable interface { + JSONSet(string, interface{}) error } // New creates a new json pointer for the given string @@ -100,15 +95,25 @@ func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { return p.get(document, swag.DefaultJSONNameProvider) } +// Set uses the pointer to set a value from a JSON document +func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { + return document, p.set(document, value, swag.DefaultJSONNameProvider) +} + // GetForToken gets a value for a json pointer token 1 level deep func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) } +// SetForToken gets a value for a json pointer token 1 level deep +func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { + return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) +} + func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { - kind := reflect.Invalid rValue := reflect.Indirect(reflect.ValueOf(node)) - kind = rValue.Kind() + kind := rValue.Kind() + switch kind { case reflect.Struct: @@ -129,6 +134,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam case reflect.Map: kv := reflect.ValueOf(decodedToken) mv := rValue.MapIndex(kv) + if mv.IsValid() && !swag.IsZero(mv) { return mv.Interface(), kind, nil } @@ -141,7 +147,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam } sLength := rValue.Len() if tokenIndex < 0 || tokenIndex >= sLength { - return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex) } elem := rValue.Index(tokenIndex) @@ -153,6 +159,57 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam } +func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { + rValue := reflect.Indirect(reflect.ValueOf(node)) + switch rValue.Kind() { + + case reflect.Struct: + if ns, ok := node.(JSONSetable); ok { // pointer impl + return ns.JSONSet(decodedToken, data) + } + + if rValue.Type().Implements(jsonSetableType) { + return node.(JSONSetable).JSONSet(decodedToken, data) + } + + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + if fld.IsValid() { + fld.Set(reflect.ValueOf(data)) + } + return nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + rValue.SetMapIndex(kv, reflect.ValueOf(data)) + return nil + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + if !elem.CanSet() { + return fmt.Errorf("can't set slice index %s to %v", decodedToken, data) + } + elem.Set(reflect.ValueOf(data)) + return nil + + default: + return fmt.Errorf("invalid token reference %q", decodedToken) + } + +} + func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { if nameProvider == nil { @@ -184,6 +241,101 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf return node, kind, nil } +func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { + knd := reflect.ValueOf(node).Kind() + + if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { + return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") + } + + if nameProvider == nil { + nameProvider = swag.DefaultJSONNameProvider + } + + // Full document when empty + if len(p.referenceTokens) == 0 { + return nil + } + + lastI := len(p.referenceTokens) - 1 + for i, token := range p.referenceTokens { + isLastToken := i == lastI + decodedToken := Unescape(token) + + if isLastToken { + + return setSingleImpl(node, data, decodedToken, nameProvider) + } + + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind := rValue.Kind() + + switch kind { + + case reflect.Struct: + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) + if err != nil { + return err + } + fld := reflect.ValueOf(r) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + node = fld.Addr().Interface() + continue + } + node = r + continue + } + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + node = fld.Addr().Interface() + continue + } + node = fld.Interface() + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + + if !mv.IsValid() { + return fmt.Errorf("object has no key %q", decodedToken) + } + if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr { + node = mv.Addr().Interface() + continue + } + node = mv.Interface() + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr { + node = elem.Addr().Interface() + continue + } + node = elem.Interface() + + default: + return fmt.Errorf("invalid token reference %q", decodedToken) + } + + } + + return nil +} + // DecodedTokens returns the decoded tokens func (p *Pointer) DecodedTokens() []string { result := make([]string, 0, len(p.referenceTokens)) diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.drone.yml b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.drone.yml deleted file mode 100644 index 157ffe5798e8..000000000000 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.drone.yml +++ /dev/null @@ -1,33 +0,0 @@ -clone: - path: github.com/go-openapi/jsonreference - -matrix: - GO_VERSION: - - "1.6" - -build: - integration: - image: golang:$$GO_VERSION - pull: true - commands: - - go get -u github.com/stretchr/testify/assert - - go get -u github.com/PuerkitoBio/purell - - go get -u github.com/go-openapi/jsonpointer - - go test -race - - go test -v -cover -coverprofile=coverage.out -covermode=count ./... - -notify: - slack: - channel: bots - webhook_url: $$SLACK_URL - username: drone - -publish: - coverage: - server: https://coverage.vmware.run - token: $$GITHUB_TOKEN - # threshold: 70 - # must_increase: true - when: - matrix: - GO_VERSION: "1.6" diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.pullapprove.yml b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.pullapprove.yml deleted file mode 100644 index 5ec183e22442..000000000000 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.pullapprove.yml +++ /dev/null @@ -1,13 +0,0 @@ -approve_by_comment: true -approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' -reject_regex: ^[Rr]ejected -reset_on_push: false -reviewers: - members: - - casualjim - - chancez - - frapposelli - - vburenin - - pytlesk4 - name: pullapprove - required: 1 diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.travis.yml b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.travis.yml new file mode 100644 index 000000000000..40034d28dc23 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/.travis.yml @@ -0,0 +1,16 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- '1.9' +- 1.10.x +- 1.11.x +install: +- go get -u github.com/stretchr/testify/assert +- go get -u github.com/PuerkitoBio/purell +- go get -u github.com/go-openapi/jsonpointer +language: go +notifications: + slack: + secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/README.md b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/README.md index 5f7881274ee4..66345f4c61fb 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/README.md +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/README.md @@ -1,4 +1,4 @@ -# gojsonreference [![Build Status](https://ci.vmware.run/api/badges/go-openapi/jsonreference/status.svg)](https://ci.vmware.run/go-openapi/jsonreference) [![Coverage](https://coverage.vmware.run/badges/go-openapi/jsonreference/coverage.svg)](https://coverage.vmware.run/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) An implementation of JSON Reference - Go language @@ -7,7 +7,7 @@ An implementation of JSON Reference - Go language Work in progress ( 90% done ) ## Dependencies -https://github.com/xeipuuv/gojsonpointer +https://github.com/go-openapi/jsonpointer ## References http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/go.mod b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/go.mod new file mode 100644 index 000000000000..6d15a70503ec --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/go.mod @@ -0,0 +1,15 @@ +module github.com/go-openapi/jsonreference + +require ( + github.com/PuerkitoBio/purell v1.1.0 + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-openapi/jsonpointer v0.17.0 + github.com/go-openapi/swag v0.17.0 // indirect + github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + golang.org/x/net v0.0.0-20181005035420-146acd28ed58 // indirect + golang.org/x/text v0.3.0 // indirect + gopkg.in/yaml.v2 v2.2.1 // indirect +) diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/go.sum b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/go.sum new file mode 100644 index 000000000000..ec9bdbc28651 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/jsonreference/go.sum @@ -0,0 +1,20 @@ +github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-openapi/jsonpointer v0.17.0 h1:Bpl2DtZ6k7wKqfFs7e+4P08+M9I3FQgn09a1UsRUQbk= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= +github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/.golangci.yml b/cluster-autoscaler/vendor/github.com/go-openapi/spec/.golangci.yml new file mode 100644 index 000000000000..ed53e5cd761e --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/.golangci.yml @@ -0,0 +1,21 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 25 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 2 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/.travis.yml b/cluster-autoscaler/vendor/github.com/go-openapi/spec/.travis.yml index ea80ef2a71e0..a4f03484b76c 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/.travis.yml +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/.travis.yml @@ -1,16 +1,18 @@ -language: go +after_success: +- bash <(curl -s https://codecov.io/bash) go: -- 1.7 +- '1.9' +- 1.10.x +- 1.11.x install: - go get -u github.com/stretchr/testify - go get -u github.com/go-openapi/swag - go get -u gopkg.in/yaml.v2 - go get -u github.com/go-openapi/jsonpointer - go get -u github.com/go-openapi/jsonreference -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... -after_success: -- bash <(curl -s https://codecov.io/bash) +language: go notifications: slack: secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/README.md b/cluster-autoscaler/vendor/github.com/go-openapi/spec/README.md index 1d1622082fe5..6354742cbf62 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/README.md +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/README.md @@ -1,5 +1,10 @@ # OAI object model [![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/spec.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) -The object model for OpenAPI specification documents +The object model for OpenAPI specification documents. + +Currently supports Swagger 2.0. diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/bindata.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/bindata.go index 9afb5df194ed..1717ea1052eb 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/bindata.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/bindata.go @@ -69,7 +69,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xc4\x57\x3b\x6f\xdb\x3e\x10\xdf\xf3\x29\x08\x26\x63\xf2\x97\xff\x40\x27\x6f\x45\xbb\x18\x68\xd1\x0c\xdd\x0c\x0f\xb4\x75\xb2\x19\x50\xa4\x42\x51\x81\x0d\x43\xdf\xbd\xa0\xa8\x07\x29\x91\x92\x2d\xbb\x8d\x97\x28\xbc\xd7\xef\x8e\xf7\xe2\xf9\x01\x21\x84\x30\x8d\xf1\x12\xe1\x83\x52\xd9\x32\x8a\xde\x72\xc1\x5f\xf2\xdd\x01\x52\xf2\x9f\x90\xfb\x28\x96\x24\x51\x2f\x8b\x2f\x91\x39\x7b\xc4\xcf\x46\xe8\xc9\xfc\x3f\x43\x32\x86\x7c\x27\x69\xa6\xa8\xe0\x5a\xfa\x9b\x90\x80\x0c\x0b\x4a\x41\x91\x5a\x45\xc7\x9d\x50\x4e\x35\x73\x8e\x97\xc8\x20\xae\x08\x86\xed\xab\x94\xe4\xe4\x10\x2a\xa2\x3a\x65\xa0\x95\x93\x8a\xfc\xec\x12\x53\xca\x57\x0a\x52\xad\xef\xff\x1e\x89\xd6\xe7\x67\x84\x9f\x24\x24\x5a\xc5\x23\x46\x65\xcb\x54\x76\xfc\x38\x13\x39\x55\xf4\x03\x56\x5c\xc1\x1e\x64\x18\x04\xad\x19\x86\x30\x68\x5a\xa4\x78\x89\x16\x97\xe8\xff\x0e\x09\x29\x98\x5a\x0c\xed\x10\xc6\x7e\x69\xa8\x6b\x07\x76\x64\x45\x2e\xea\x63\x45\xe5\xb3\x66\x8e\x8d\x4e\x0d\x01\x95\x68\xe3\x85\x91\xd3\x34\x63\xf0\xfb\x94\x41\x3e\x34\x0d\xbc\x72\x60\xdd\x46\x1a\xe1\xad\x10\x0c\x08\xd7\x9f\xad\xe3\x08\xf3\x82\x31\xf3\x37\xdd\x9a\x13\xb1\x7d\x83\x9d\xd2\x5f\xb9\x92\x94\xef\x71\xc8\x7e\x45\x9d\x73\xcf\xd6\x65\x36\x7c\x8d\xa9\xf2\xf2\x94\x28\x38\x7d\x2f\xa0\xa1\x2a\x59\x40\x07\xf3\xc1\x02\xdb\xda\x68\x1c\x33\xa7\x99\x14\x19\x48\x45\x7b\xd1\x33\x45\x17\xf0\xa6\x46\xd9\x03\x92\x08\x99\x12\x7d\x57\xb8\x90\x14\x7b\x63\xd5\x15\xe5\xbd\x35\x2b\xaa\x18\x4c\xea\xf5\x8a\xba\xf5\x3e\x4b\x41\x93\xa5\x67\xfb\x38\x2d\x98\xa2\x19\x83\x2a\xf7\x03\x6a\x9b\x74\x0b\x56\x5e\x8f\x02\xc7\x1d\x2b\x72\xfa\x01\x3f\x5b\x16\xf7\xc6\x6d\xfb\xe4\x58\xb3\x8c\x1b\xf7\x0a\x77\x86\xa6\xb4\xb4\xf5\xe4\x92\xbb\xa0\x24\x84\xe5\x01\x84\xad\x13\x37\x21\x9c\xd2\x72\x0b\x42\x72\xfc\x01\x7c\xaf\x0e\xbd\x9e\x3b\xd5\xbc\x1c\x1f\xaf\xd6\xd0\xb6\x52\xb7\xdf\x12\xa5\x40\x4e\xe7\x68\xb0\x78\x24\xec\xe1\xe8\x0f\x26\x89\xe3\x0a\x0a\x61\x4d\x23\xe9\xf7\x70\x7e\x32\x3d\xdc\x39\xd6\xbf\xf3\x30\xd0\xfd\xf6\x55\xb3\x79\x27\x96\xfe\x6d\x82\x37\x73\xf6\x8f\x36\x3a\x03\xa4\x6d\x7d\x1c\x9e\x73\x35\xf6\x18\xbf\x15\x76\x4a\x8e\x2b\xcf\x00\xbf\x2a\x99\xae\x55\xe0\xcf\x25\x77\x68\xfc\x95\xba\x79\x75\x06\xcb\x5c\x77\x67\x69\xf1\xfb\x2c\xe1\xbd\xa0\x12\xe2\x31\x45\xf6\x30\x0f\x14\xc8\xab\x7f\x60\x4e\x27\xe0\x3f\xaf\x92\xd0\x6a\x8a\x82\xdb\xc0\xa4\xbb\x63\x65\x34\x0d\x28\xb0\x6b\x7c\x1e\x1e\xd3\x51\xc7\x6e\xf4\x33\x60\xc5\x90\x01\x8f\x81\xef\xee\x88\x68\x90\x69\x23\xb9\x8a\x2e\x69\x98\x7d\xa6\x91\x32\x1a\xc8\x6e\x9c\x13\x7f\x10\xea\xcd\xfd\x4e\xef\xa6\xb1\x25\xd9\xde\x22\x8d\xfa\x59\x63\xc5\x0d\x80\xf5\x28\xf1\xd6\xb9\x37\x9e\xa3\xee\xb5\x4c\xbe\x37\xe0\x55\xc6\x27\x82\x75\x49\xd0\xda\xe0\xb9\x1d\xca\xbf\x5b\xd4\xcf\xbf\x0b\x47\xac\x2d\x59\x07\xfe\x7a\x49\xc1\x61\xa6\x24\x17\x2a\xf0\xbe\x2e\xdb\x17\x7f\xa0\x3c\x7d\x4b\xf3\xba\xdb\xc3\xed\x06\xee\xdb\x5e\xd7\xdd\x42\x5c\x47\xb2\xb3\x68\x75\x8c\xf2\xe1\x4f\x00\x00\x00\xff\xff\x4e\x9b\x8d\xdf\x17\x11\x00\x00") +var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00") func jsonschemaDraft04JSONBytes() ([]byte, error) { return bindataRead( @@ -84,12 +84,12 @@ func jsonschemaDraft04JSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4375, mode: os.FileMode(420), modTime: time.Unix(1482389892, 0)} + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(420), modTime: time.Unix(1523760398, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\x87\x15\xca\x15\xe5\xf5\x94\x46\x9f\x33\x1a\x0c\x9a\xb1\x5a\xd9\x6a\x95\xcd\xcb\x7e\xec\x9a\xc5\x94\x3b\x37\x26\x31\xd7\xfc\xe4\x1f\x13\x8c\x31\x75\x9c\xba\xf7\x87\x3c\xa1\xb7\x4f\x17\x1b\x09\x82\x98\xc4\x70\x95\xd3\xe8\x4c\x48\x5a\xa6\xd6\x2a\x3d\x56\x42\x80\x9f\xaf\xae\x2e\x50\x0c\x42\xe0\x35\x34\x3c\x8a\x62\x03\x37\xba\xb2\x27\x04\xda\x25\x8d\x06\xe2\xa0\x13\x8a\xf3\xf5\xec\x10\x72\x67\x88\x90\x3d\x4b\x64\xeb\xaa\xda\x8f\xf7\x5a\x75\x47\x9a\xa8\x51\x70\x26\xd2\x38\xc6\x7c\xbb\x57\xfc\xbd\xe4\x04\x56\xa8\xa0\x54\x9a\x45\xd5\xf7\x0f\x16\xfc\x57\x1c\x3c\xdf\x23\xba\x77\x38\xda\x16\x4b\x31\x53\x6a\x4d\x9a\x15\x63\xe7\xe1\x18\x69\x9f\x22\xe0\x24\xbb\x94\x4b\x97\xee\x2d\xf9\x70\x87\x72\x7b\xe6\xc4\x33\x2a\x66\x5e\x1c\x35\x72\xe3\x2d\xda\x73\xe4\xc7\x51\x6d\xa4\xa1\x2a\x4f\xde\x94\xcb\xb2\x3e\x31\x48\xae\x82\xce\xc9\xc8\x65\xcd\xc3\xb7\x34\xb6\x2b\xdf\x58\x65\x78\x6e\x73\xac\x5e\x24\x0d\x3f\xdc\x70\x23\xc6\xda\x52\x0b\x2d\x63\x7d\xa9\x49\x2d\x54\x48\x28\xc0\x12\x9c\xe3\x63\xc9\x58\x04\x98\x36\x07\xc8\x0a\xa7\x91\xd4\xf0\xbc\xc1\xa8\xb9\x70\xd0\xc6\xa9\xb6\x78\x80\x5a\xa3\xb4\x2c\xf4\x18\x0b\x8a\x9d\xd0\xb4\x55\x10\xee\x0d\xc5\xd6\xe0\x99\x93\xdc\xa1\x04\xbb\xf1\xa7\x23\xd1\xd1\x97\x8c\x87\x13\x0a\x21\x02\xe9\x99\x25\xed\x20\xc5\x92\x66\x3c\x32\x9c\xd6\x06\xb0\x31\x5c\x86\x29\x0a\xcb\x60\x33\x12\xa5\x91\xfc\x96\x75\xd0\x59\xd7\x13\xbd\xd3\x23\x79\xdd\x2a\x90\xa6\x38\x06\x91\x39\x7f\x20\x72\x03\x1c\x2d\x01\x61\xba\x45\x37\x38\x22\x61\x8e\x71\x85\xc4\x32\x15\x28\x60\x61\x16\xb8\x3d\x29\xdc\x4d\x3d\x2f\x12\x13\x7d\xc8\x7e\x37\xee\xa8\x7f\xfa\xdb\xcb\x17\xff\x77\xfd\xf9\x7f\xee\x9f\x3d\xfe\xcf\xa7\xa7\x45\xfb\xcf\x1e\xf7\xf3\xe0\xff\xc4\x51\x0a\x8e\x4c\xcb\x01\xdc\x0a\x65\xb2\x01\x83\xed\x3d\xe4\xa9\xa3\x4e\x2d\x59\xc5\xe8\x2f\x48\x7d\x5a\x6e\x37\xbf\x5c\x9f\x35\x13\x64\x14\xfa\xef\x0b\x68\xa6\x0d\xb4\x8e\xf1\xa8\xff\xbb\x60\xf4\x03\x64\xab\x5b\x81\x65\x51\xe6\xda\xca\xfa\xf0\xb0\xac\x3e\x9c\xca\x26\x0e\x1d\xdb\x57\x5b\xbb\xb4\x9a\xa6\xb6\x9b\x1a\x6b\xd1\x9a\x9e\x7e\x33\x9a\xec\x41\x69\x45\x22\xb8\xb4\x51\xeb\x04\x77\xca\x6f\x7b\x7b\xc8\xb2\xb0\x95\x92\x25\x5b\xd0\x42\xaa\x2a\xdd\x32\x78\x4f\x0c\xab\x68\x46\x6c\xea\x6d\xf4\x5c\x5e\xde\xc4\xac\xa5\xf9\xd1\x00\x9f\x7d\x98\x65\x24\xbd\xc7\x97\xd4\xb3\x3a\xa8\x2b\xa0\x34\x76\xf9\x65\x5f\x2d\x25\x95\x1b\xcf\xd6\xf4\x9b\x5f\x09\x95\xb0\x36\x3f\xdb\xd0\x39\x2a\x93\x1c\x9d\x03\xa2\x4a\xca\xf5\xf6\x10\xb6\x94\x89\x0b\x6a\x70\x12\x13\x49\x6e\x40\xe4\x29\x12\x2b\xbd\x80\x45\x11\x04\xaa\xc2\x8f\x56\x9e\x5c\x6b\xec\x8d\x5a\x0e\x14\x59\x06\x2b\x1e\x24\xcb\xc2\x56\x4a\x31\xbe\x23\x71\x1a\xfb\x51\x2a\x0b\x3b\x1c\x48\x10\xa5\x82\xdc\xc0\xbb\x3e\x24\x8d\x5a\x76\x2e\x09\xed\xc1\x65\x51\xb8\x83\xcb\x3e\x24\x8d\x5a\x2e\x5d\xfe\x02\x74\x2d\x3d\xf1\xef\xae\xb8\x4b\xe6\x5e\xd4\xaa\xe2\x2e\x5c\x5e\xec\x0e\xf5\x5b\x0c\xcb\x0a\xbb\xa4\x3c\xf7\x1f\x2a\x55\x69\x97\x8c\x7d\x68\x95\xa5\xad\xb4\xf4\x9c\xa5\x07\xb9\x7a\x05\xbb\xad\x50\x6f\xfb\xa0\x4e\x9b\x48\x23\x49\x92\x28\x87\x19\x3e\x32\xee\xca\x3b\x46\x7e\x7f\x18\x64\xcc\xcc\x0f\x34\xe9\x36\x8b\xb7\x6c\xa8\xa5\x5b\x54\x4c\x54\x5b\x15\x3a\xf1\x6c\x2d\xfe\x96\xc8\x0d\xba\x7b\x81\x88\xc8\x23\xab\xee\x7d\x3b\x92\xa7\x60\x29\xe3\xdc\xff\xb8\x64\xe1\xf6\xa2\x5a\x59\xdc\x6f\xeb\x45\x7d\x6a\xd1\x76\x1e\xea\xb8\xf1\xfa\x14\xd3\x36\x63\xe5\xd7\xf3\xe4\xbe\x25\xbd\x5e\x05\xeb\x73\x74\xb5\x21\x2a\x2e\x4e\xa3\x30\xdf\xbf\x43\x28\x2a\xd1\xa5\x2a\x9d\x8a\xfd\x76\xd8\x8d\xbc\x67\x65\xc7\xb8\x03\x45\xec\xa3\xb0\x37\x8a\x70\x4c\x68\x91\x51\x8e\x58\x80\xed\x4a\xf3\x81\x62\xca\x96\xbb\xf1\x52\xcd\x80\xfb\xe4\x4a\x5d\x6c\xdf\x6e\x20\x4b\x80\x30\x8e\x28\x93\xf9\xe9\x8d\x8a\x6d\xd5\x59\x65\x7b\xaa\x44\x9e\xc0\xc2\xd1\x7c\x40\x26\xd6\x1a\xce\xf9\xc5\x69\x7b\x6c\xec\xc8\x71\x7b\xe5\x21\x2e\xd3\xe5\x65\x93\x91\x53\x0b\x7b\x3a\xc7\xfa\x17\x6a\x01\xa7\x33\xd0\xf4\x40\x0f\x39\x87\xda\xe4\x54\x87\x3a\xd5\xe3\xc7\xa6\x8e\x20\xd4\x11\xb2\x4e\xb1\xe9\x14\x9b\x4e\xb1\xe9\x14\x9b\xfe\x15\x63\xd3\x47\xf5\xff\x97\x38\xe9\xcf\x14\xf8\x76\x82\x49\x13\x4c\xaa\x7d\xcd\x6c\x62\x42\x49\x87\x43\x49\x19\x33\x6f\xe3\x44\x6e\x9b\xab\x8a\x3e\x86\xaa\x99\x52\x1b\x5b\x59\x33\x02\x09\xa0\x21\xa1\x6b\x84\x6b\x66\xbb\xdc\x16\x0c\xd3\x68\xab\xec\x36\x4b\xd8\x60\x8a\x40\x31\x85\x6e\x14\x57\x13\xc2\xfb\x92\x10\xde\xbf\x88\xdc\xbc\x53\x5e\x7f\x82\x7a\x13\xd4\x9b\xa0\xde\x04\xf5\x90\x01\xf5\x94\xcb\x7b\x83\x25\x9e\xd0\xde\x84\xf6\x6a\x5f\x4b\xb3\x98\x00\xdf\x04\xf8\x6c\xbc\x7f\x19\x80\xaf\xf1\x71\x45\x22\x98\x40\xe0\x04\x02\x27\x10\xd8\x29\xf5\x04\x02\xff\x4a\x20\x30\xc1\x72\xf3\x65\x02\x40\xd7\xc1\xd1\xe2\x6b\xf1\xa9\x7b\xfb\xe4\x20\xc0\x68\x9d\xd4\xb4\xd3\x96\xb5\xa6\xd1\x41\x20\xe6\x89\xc3\x48\x65\x58\x13\x84\x9c\x56\x56\x3b\x0c\xe0\x6b\x83\x5c\x13\xd2\x9a\x90\xd6\x84\xb4\x26\xa4\x85\x0c\xa4\x45\x19\xfd\xff\x63\x6c\x52\xb5\x1f\x1e\x19\x74\x3a\xcd\xb9\x69\xce\xa6\x3a\x0f\x7a\x2d\x19\xc7\x81\x14\x5d\xcb\xd5\x03\xc9\x39\xd0\xb0\xd1\xb3\xcd\xfb\x7a\x2d\x5d\x3a\x48\xe1\xfa\x2e\xe6\x81\x42\x18\x86\xd6\xc1\xbe\xb1\x23\xd3\xf7\x34\xed\x19\x0a\x0b\xc4\x48\x44\xfd\x22\x50\xb6\x42\x58\xbb\xe5\x3d\xa7\x73\xd4\x8b\xc4\x8c\x70\x61\xec\x73\xee\xc3\x81\x8b\xf5\xe2\xd7\x52\x3e\xcf\xeb\xeb\x17\x3b\x71\x16\xda\x7d\xb8\xde\xf0\x7a\x8f\x06\x2d\xa7\x40\x7b\xc1\x9d\x41\x4d\xb6\x61\xa2\x4e\x9f\x3d\xa0\xc5\xae\xe3\x1c\x1d\x40\x6c\x48\x8b\x63\xa0\xb5\x01\xed\x8e\x02\xe9\x86\xc8\x3b\x06\xee\xdb\x4b\xde\xbd\xc0\xa1\x6f\xcb\xda\xfc\xc2\x44\x16\x87\x9c\x17\x31\xd3\x30\x20\x39\x42\xcb\x6f\xf2\xf1\xf4\x72\x10\xf8\x1c\xa0\xf3\xbd\x10\xea\x21\x35\x7d\xe8\x86\xdb\x15\xed\x81\x81\x07\x28\xbb\x13\x28\xc7\xf8\xce\x7d\x8d\xc2\x31\xb4\x7e\x94\xd6\xdb\x55\xef\x4a\xfb\xed\xc3\x40\x3e\xeb\x9f\xe9\x99\x0f\xdf\x08\x65\x88\x27\x73\x86\x31\x9d\x47\xdf\x55\x19\xba\x3d\xee\x15\x0a\xcd\x8c\xaa\x5e\xb9\xf6\x57\x33\x73\x5a\xa1\x89\x7b\x3b\xa0\xb2\xa4\xc2\xf6\xc1\x53\xb5\x00\xca\x23\xe5\xf4\x60\x6a\xb4\x2d\x74\xea\x4e\xed\x3b\xe3\x47\xfb\xed\x82\x3d\x19\xd4\x3b\x6b\xaf\xae\x2b\x2f\x57\xb3\x82\x68\xcb\xed\x88\x2e\xe1\x5c\xd7\x26\xfa\x0a\x65\xe7\xce\x11\x33\xb4\xdd\x66\xe3\x37\xf6\xfa\x70\xd6\x4f\xa1\x21\x51\xd8\x3c\x26\x14\x4b\xc6\x87\x44\x27\x1c\x70\xf8\x9e\x46\xce\xab\x21\x07\x5f\xc1\x76\x17\x1b\x77\xb4\xda\x75\xa0\x0a\x3a\x30\xe1\xf8\x97\x32\x16\x2b\x00\x75\x85\xee\x62\x46\xef\xd3\x85\xb5\x6b\x60\xbe\xf2\x30\x7a\x8c\x0b\x4b\xa6\xd0\xf9\x64\x42\xe7\x07\x41\x41\xe3\x2c\x5d\xf9\x6d\xe9\x39\x98\x3b\x3b\x5d\x67\xd4\x5c\xed\xf2\xf0\x48\x7b\xbd\x2d\x31\xdd\x3f\x34\xad\x44\x76\x51\x9a\x56\x22\xa7\x95\xc8\x69\x25\xf2\xe1\x56\x22\x1f\x00\x32\x6a\x73\x92\xed\xe1\xc6\x7d\x9f\x49\x2c\x69\x7e\xc8\x31\x4c\x0c\xb4\xf2\x54\x3b\x79\x3b\x9e\x4d\xb4\xd1\x18\x3e\x5f\x9a\x93\xa2\x11\xc3\xda\x27\x0b\xaf\x37\x2e\x5c\x37\xfb\xeb\x9a\xd6\xc3\xac\xc3\xcc\xf8\x1e\x5b\x9d\xac\x22\x64\xb7\xed\x26\xb8\xf3\xb9\x3c\xbb\x1f\xe2\xb0\x22\x77\x43\x6a\x62\x29\x39\x59\xa6\xe6\xe5\xcd\x7b\x83\xc0\x5b\x8e\x93\x64\xac\xeb\xca\x4f\x65\xac\x4a\xbc\x1e\xcd\x82\xfa\x3c\x70\x36\xb6\xb5\xed\x79\xef\xec\x68\x00\xff\x54\xfa\xb5\xe3\xf1\xdb\xe1\xbe\xce\x76\x17\xaf\x57\xb6\x6b\x89\x05\x09\xce\x52\xb9\x01\x2a\x49\xbe\xd9\xf4\xd2\xb8\x7a\xbf\x91\x02\xf3\x22\x8c\x13\xf2\x77\xd8\x8e\x43\x8b\xe1\x54\x6e\x5e\x9d\xc7\x49\x44\x02\x22\xc7\xa4\x79\x81\x85\xb8\x65\x3c\x1c\x93\xe6\x59\xa2\xf8\x1c\x51\x95\x05\xd9\x20\x00\x21\x7e\x60\x21\x58\xa9\x56\xff\xbe\xb6\x5a\x5e\x5b\x3f\x1f\xd6\xd3\x3c\xc4\x4d\xba\x99\xb4\x63\x6e\x7d\x3e\x3d\x57\xd2\x18\x5f\x47\xe8\xc3\x06\x8a\x68\x6c\x7f\x3b\x72\x0f\xe7\xe2\x77\x77\xf1\xd0\x99\xab\xdf\x2e\xfe\xd6\xbb\xcd\x1a\xb9\x90\xd1\xaf\xf2\x38\x3d\xdb\x74\xf8\xeb\xe3\xda\xe8\x2a\x62\xb7\xda\x1b\x07\xa9\xdc\x30\x5e\xbc\x68\xfb\x6b\x9f\x97\xf1\xc6\xb1\xd8\x5c\x29\x1e\x49\x30\xc5\xf7\xde\xad\x91\x42\xf9\xdd\xed\x89\x80\x25\xbe\x37\xd7\xe7\x32\x5c\xe6\x35\xac\xd4\x0c\x2d\xf7\x90\xc4\xe3\xf5\xe3\x2f\x7f\x54\x18\x88\xe3\x61\x47\x85\x64\x7f\xc0\xd7\x3f\x1a\x92\x42\xe9\xc7\x1e\x0d\x95\x76\xa7\x51\xa0\x8f\x02\x1b\x46\x9e\x06\x42\xd1\xf2\x01\x07\x02\xde\xe9\x7d\x1a\x0b\xa7\x32\x16\xcc\xc0\xee\xc4\x90\xd2\x5f\x6f\x98\x54\x5d\xf2\x95\xe1\xa7\x69\x10\x3a\x06\xe1\x65\xb3\x17\x47\x58\x78\xd0\x45\xd6\x5b\xd5\x5f\x25\x1d\x71\x49\xa6\x7a\x64\xda\xd0\x6f\xc7\x3a\x4c\xe3\x09\xc0\x6e\x96\x2c\xa7\xa7\x77\x34\x10\x05\x08\x21\x44\x92\x65\x77\xdf\x20\x5c\xbc\xe7\x97\x3f\xf4\x1a\x45\xd6\xe7\x27\x4a\xde\x74\x27\x66\x11\x7d\x70\xba\xd3\x78\xf9\x1e\x0d\xca\xc8\x39\xde\x7c\xb3\xa6\xe1\xbc\xd7\xc1\x6a\x6f\xb3\x0e\x52\xbe\xe4\x98\x8a\x15\x70\x94\x70\x26\x59\xc0\xa2\xf2\x1c\xfb\xd9\xc5\xf9\xbc\xd5\x92\x9c\xa3\xdf\xe6\x1e\xb3\x0d\x49\xba\x87\x50\x5f\x84\xfe\xe9\xd6\xf8\xbb\xe6\xf0\x7a\xeb\xa6\x65\x3b\x86\x8b\x79\x93\xf5\x59\x20\x6e\xb4\xa7\x44\xf4\x3f\xa5\xfe\x67\x42\x12\xdb\xd3\xe7\xbb\xa5\xa3\x8c\x5c\x2b\x97\xbb\xbb\x7f\x8e\xc5\x6e\xed\x43\x5c\xbf\x74\xc8\x8f\xff\xe6\xd6\xbe\x91\xb6\xf5\x95\xe4\xed\x93\xc4\xa8\x5b\xf9\x76\x4d\x35\xb7\xd8\x8c\xb6\x7d\xaf\x72\xe0\xb6\xbd\x01\x63\x9e\x76\xab\x1a\x32\x76\xe4\x8c\x76\xc2\xad\x6c\xa2\x65\xf7\xcf\xf8\xa7\xda\x2a\xb9\x8c\x3d\x3c\xa3\x9d\x64\x33\xe5\x1a\xb5\x2d\xfb\x86\xa2\x5a\x7f\x19\x5b\x7f\xc6\x3f\xd1\x53\xd3\xe2\x41\x5b\xd3\x4f\xf0\xec\xb0\x42\x73\x43\xd2\x68\x27\xd3\x6a\x6a\x34\xf6\x4e\x1e\x52\x8b\x87\x6c\xcc\xae\x44\xfb\x9e\xa7\x51\x4f\x9d\x55\x03\x81\x8e\x67\xfc\xb4\x69\xf0\x3a\x18\xf2\x40\xd0\xf6\xa8\x34\xe3\xc9\x98\xaf\xf6\xda\x24\xd3\xeb\x60\xb9\x0e\xd3\x1f\xa9\xff\xee\x1f\xfd\x37\x00\x00\xff\xff\x69\x5d\x0a\x6a\x39\x9d\x00\x00") +var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\x87\x15\xca\x15\xe5\xf5\x94\x46\x9f\x33\x1a\x0c\x9a\xb1\x5a\xd9\x6a\x95\xcd\xcb\x7e\xec\x9a\xc5\x94\x3b\x37\x26\x31\xd7\xfc\xe4\x1f\x13\x8c\x31\x75\x9c\xba\xf7\x87\x3c\xa1\xb7\x4f\x17\x1b\x09\x82\x98\xc4\x70\x95\xd3\xe8\x4c\x48\x5a\xa6\xd6\x2a\x3d\x56\x42\x80\x9f\xaf\xae\x2e\x50\x0c\x42\xe0\x35\x34\x3c\x8a\x62\x03\x37\xba\xb2\x27\x04\xda\x25\x8d\x06\xe2\xa0\x13\x8a\xf3\xf5\xec\x10\x72\x67\x88\x90\x3d\x4b\x64\xeb\xaa\xda\x8f\xf7\x5a\x75\x47\x9a\xa8\x51\x70\x26\xd2\x38\xc6\x7c\xbb\x57\xfc\xbd\xe4\x04\x56\xa8\xa0\x54\x9a\x45\xd5\xf7\x0f\x16\xfc\x57\x1c\x3c\xdf\x23\xba\x77\x38\xda\x16\x4b\x31\x53\x6a\x4d\x9a\x15\x63\xe7\xe1\x18\x69\x9f\x22\xe0\x24\xbb\x94\x4b\x97\xee\x2d\xf9\x70\x87\x72\x7b\xe6\xc4\x33\x2a\x66\x5e\x1c\x35\x72\xe3\x2d\xda\x73\xe4\xc7\x51\x6d\xa4\xa1\x2a\x4f\xde\x94\xcb\xb2\x3e\x31\x48\xae\x82\xce\xc9\xc8\x65\xcd\xc3\xb7\x34\xb6\x2b\xdf\x58\x65\x78\x6e\x73\xac\x5e\x24\x0d\x3f\xdc\x70\x23\xc6\xda\x52\x0b\x2d\x63\x7d\xa9\x49\x2d\x54\x48\x28\xc0\x12\x9c\xe3\x63\xc9\x58\x04\x98\x36\x07\xc8\x0a\xa7\x91\xd4\xf0\xbc\xc1\xa8\xb9\x70\xd0\xc6\xa9\xb6\x78\x80\x5a\xa3\xb4\x2c\xf4\x18\x0b\x8a\x9d\xd0\xb4\x55\x10\xee\x0d\xc5\xd6\xe0\x99\x93\xdc\xa1\x04\xbb\xf1\xa7\x23\xd1\xd1\x97\x8c\x87\x13\x0a\x21\x02\xe9\x99\x25\xed\x20\xc5\x92\x66\x3c\x32\x9c\xd6\x06\xb0\x31\x5c\x86\x29\x0a\xcb\x60\x33\x12\xa5\x91\xfc\x96\x75\xd0\x59\xd7\x13\xbd\xd3\x23\x79\xdd\x2a\x90\xa6\x38\x06\x91\x39\x7f\x20\x72\x03\x1c\x2d\x01\x61\xba\x45\x37\x38\x22\x61\x8e\x71\x85\xc4\x32\x15\x28\x60\x61\x16\xb8\x3d\x29\xdc\x4d\x3d\x2f\x12\x13\x7d\xc8\x7e\x37\xee\xa8\x7f\xfa\xdb\xcb\x17\xff\x77\xfd\xf9\x7f\xee\x9f\x3d\xfe\xcf\xa7\xa7\x45\xfb\xcf\x1e\xf7\xf3\xe0\xff\xc4\x51\x0a\x8e\x4c\xcb\x01\xdc\x0a\x65\xb2\x01\x83\xed\x3d\xe4\xa9\xa3\x4e\x2d\x59\xc5\xe8\x2f\x48\x7d\x5a\x6e\x37\xbf\x5c\x9f\x35\x13\x64\x14\xfa\xef\x0b\x68\xa6\x0d\xb4\x8e\xf1\xa8\xff\xbb\x60\xf4\x03\x64\xab\x5b\x81\x65\x51\xe6\xda\xca\xfa\xf0\xb0\xac\x3e\x9c\xca\x26\x0e\x1d\xdb\x57\x5b\xbb\xb4\x9a\xa6\xb6\x9b\x1a\x6b\xd1\x9a\x9e\x7e\x33\x9a\xec\x41\x69\x45\x22\xb8\xb4\x51\xeb\x04\x77\xca\x6f\x7b\x7b\xc8\xb2\xb0\x95\x92\x25\x5b\xd0\x42\xaa\x2a\xdd\x32\x78\x4f\x0c\xab\x68\x46\x6c\xea\x6d\xf4\x5c\x5e\xde\xc4\xac\xa5\xf9\xd1\x00\x9f\x7d\x98\x65\x24\xbd\xc7\x97\xd4\xb3\x3a\xa8\x2b\xa0\x34\x76\xf9\x65\x5f\x2d\x25\x95\x1b\xcf\xd6\xf4\x9b\x5f\x09\x95\xb0\x36\x3f\xdb\xd0\x39\x2a\x93\x1c\x9d\x03\xa2\x4a\xca\xf5\xf6\x10\xb6\x94\x89\x0b\x6a\x70\x12\x13\x49\x6e\x40\xe4\x29\x12\x2b\xbd\x80\x45\x11\x04\xaa\xc2\x8f\x56\x9e\x5c\x6b\xec\x8d\x5a\x0e\x14\x59\x06\x2b\x1e\x24\xcb\xc2\x56\x4a\x31\xbe\x23\x71\x1a\xfb\x51\x2a\x0b\x3b\x1c\x48\x10\xa5\x82\xdc\xc0\xbb\x3e\x24\x8d\x5a\x76\x2e\x09\xed\xc1\x65\x51\xb8\x83\xcb\x3e\x24\x8d\x5a\x2e\x5d\xfe\x02\x74\x2d\x3d\xf1\xef\xae\xb8\x4b\xe6\x5e\xd4\xaa\xe2\x2e\x5c\x5e\xec\x0e\xf5\x5b\x0c\xcb\x0a\xbb\xa4\x3c\xf7\x1f\x2a\x55\x69\x97\x8c\x7d\x68\x95\xa5\xad\xb4\xf4\x9c\xa5\x07\xb9\x7a\x05\xbb\xad\x50\x6f\xfb\xa0\x4e\x9b\x48\x23\x49\x92\x28\x87\x19\x3e\x32\xee\xca\x3b\x46\x7e\x7f\x18\x64\xcc\xcc\x0f\x34\xe9\x36\x8b\xb7\x6c\xa8\xa5\x5b\x54\x4c\x54\x5b\x15\x3a\xf1\x6c\x2d\xfe\x96\xc8\x0d\xba\x7b\x81\x88\xc8\x23\xab\xee\x7d\x3b\x92\xa7\x60\x29\xe3\xdc\xff\xb8\x64\xe1\xf6\xa2\x5a\x59\xdc\x6f\xeb\x45\x7d\x6a\xd1\x76\x1e\xea\xb8\xf1\xfa\x14\xd3\x36\x63\xe5\xd7\xf3\xe4\xbe\x25\xbd\x5e\x05\xeb\x73\x74\xb5\x21\x2a\x2e\x4e\xa3\x30\xdf\xbf\x43\x28\x2a\xd1\xa5\x2a\x9d\x8a\xfd\x76\xd8\x8d\xbc\x67\x65\xc7\xb8\x03\x45\xec\xa3\xb0\x37\x8a\x70\x4c\x68\x91\x51\x8e\x58\x80\xed\x4a\xf3\x81\x62\xca\x96\xbb\xf1\x52\xcd\x80\xfb\xe4\x4a\x5d\x6c\xdf\x6e\x20\x4b\x80\x30\x8e\x28\x93\xf9\xe9\x8d\x8a\x6d\xd5\x59\x65\x7b\xaa\x44\x9e\xc0\xc2\xd1\x7c\x40\x26\xd6\x1a\xce\xf9\xc5\x69\x7b\x6c\xec\xc8\x71\x7b\xe5\x21\x2e\xd3\xe5\x65\x93\x91\x53\x0b\x7b\x3a\xc7\xfa\x17\x6a\x01\xa7\x33\xd0\xf4\x40\x0f\x39\x87\xda\xe4\x54\x87\x3a\xd5\xe3\xc7\xa6\x8e\x20\xd4\x11\xb2\x4e\xb1\xe9\x14\x9b\x4e\xb1\xe9\x14\x9b\xfe\x15\x63\xd3\x47\xf5\xff\x97\x38\xe9\xcf\x14\xf8\x76\x82\x49\x13\x4c\xaa\x7d\xcd\x6c\x62\x42\x49\x87\x43\x49\x19\x33\x6f\xe3\x44\x6e\x9b\xab\x8a\x3e\x86\xaa\x99\x52\x1b\x5b\x59\x33\x02\x09\xa0\x21\xa1\x6b\x84\x6b\x66\xbb\xdc\x16\x0c\xd3\x68\xab\xec\x36\x4b\xd8\x60\x8a\x40\x31\x85\x6e\x14\x57\x13\xc2\xfb\x92\x10\xde\xbf\x88\xdc\xbc\x53\x5e\x7f\x82\x7a\x13\xd4\x9b\xa0\xde\x04\xf5\x90\x01\xf5\x94\xcb\x7b\x83\x25\x9e\xd0\xde\x84\xf6\x6a\x5f\x4b\xb3\x98\x00\xdf\x04\xf8\x6c\xbc\x7f\x19\x80\xaf\xf1\x71\x45\x22\x98\x40\xe0\x04\x02\x27\x10\xd8\x29\xf5\x04\x02\xff\x4a\x20\x30\xc1\x72\xf3\x65\x02\x40\xd7\xc1\xd1\xe2\x6b\xf1\xa9\x7b\xfb\xe4\x20\xc0\x68\x9d\xd4\xb4\xd3\x96\xb5\xa6\xd1\x41\x20\xe6\x89\xc3\x48\x65\x58\x13\x84\x9c\x56\x56\x3b\x0c\xe0\x6b\x83\x5c\x13\xd2\x9a\x90\xd6\x84\xb4\x26\xa4\x85\x0c\xa4\x45\x19\xfd\xff\x63\x6c\x52\xb5\x1f\x1e\x19\x74\x3a\xcd\xb9\x69\xce\xa6\x3a\x0f\x7a\x2d\x19\xc7\x81\x14\x5d\xcb\xd5\x03\xc9\x39\xd0\xb0\xd1\xb3\xcd\xfb\x7a\x2d\x5d\x3a\x48\xe1\xfa\x2e\xe6\x81\x42\x18\x86\xd6\xc1\xbe\xb1\x23\xd3\xf7\x34\xed\x19\x0a\x0b\xc4\x48\x44\xfd\x22\x50\xb6\x42\x58\xbb\xe5\x3d\xa7\x73\xd4\x8b\xc4\x8c\x70\x61\xec\x73\xee\xc3\x81\x8b\xf5\xe2\xd7\x52\x3e\xcf\xeb\xeb\x17\x3b\x71\x16\xda\x7d\xb8\xde\xf0\x7a\x8f\x06\x2d\xa7\x40\x7b\xc1\x9d\x41\x4d\xb6\x61\xa2\x4e\x9f\x3d\xa0\xc5\xae\xe3\x1c\x1d\x40\x6c\x48\x8b\x63\xa0\xb5\x01\xed\x8e\x02\xe9\x86\xc8\x3b\x06\xee\xdb\x4b\xde\xbd\xc0\xa1\x6f\xcb\xda\xfc\xc2\x44\x16\x87\x9c\x17\x31\xd3\x30\x20\x39\x42\xcb\x6f\xf2\xf1\xf4\x72\x10\xf8\x1c\xa0\xf3\xbd\x10\xea\x21\x35\x7d\xe8\x86\xdb\x15\xed\x81\x81\x07\x28\xbb\x13\x28\xc7\xf8\xce\x7d\x8d\xc2\x31\xb4\x7e\x94\xd6\xdb\x55\xef\x4a\xfb\xed\xc3\x40\x3e\xeb\x9f\xe9\x99\x0f\xdf\x08\x65\x88\x27\x73\x86\x31\x9d\x47\xdf\x55\x19\xba\x3d\xee\x15\x0a\xcd\x8c\xaa\x5e\xb9\xf6\x57\x33\x73\x5a\xa1\x89\x7b\x3b\xa0\xb2\xa4\xc2\xf6\xc1\x53\xb5\x00\xca\x23\xe5\xf4\x60\x6a\xb4\x2d\x74\xea\x4e\xed\x3b\xe3\x47\xfb\xed\x82\x3d\x19\xd4\x3b\x6b\xaf\xae\x2b\x2f\x57\xb3\x82\x68\xcb\xed\x88\x2e\xe1\x5c\xd7\x26\xfa\x0a\x65\xe7\xce\x11\x33\xb4\xdd\x66\xe3\x37\xf6\xfa\x70\xd6\x4f\xa1\x21\x51\xd8\x3c\x26\x14\x4b\xc6\x87\x44\x27\x1c\x70\xf8\x9e\x46\xce\xab\x21\x07\x5f\xc1\x76\x17\x1b\x77\xb4\xda\x75\xa0\x0a\x3a\x30\xe1\xf8\x97\x32\x16\x2b\x00\x75\x85\xee\x62\x46\xef\xd3\x85\xb5\x6b\x60\xbe\xf2\x30\x7a\x8c\x0b\x4b\xa6\xd0\xf9\x64\x42\xe7\x07\x41\x41\xe3\x2c\x5d\xf9\x6d\xe9\x39\x98\x3b\x3b\x5d\x67\xd4\x5c\xed\xf2\xf0\x48\x7b\xbd\x2d\x31\xdd\x3f\x34\xad\x44\x76\x51\x9a\x56\x22\xa7\x95\xc8\x69\x25\xf2\xe1\x56\x22\x1f\x00\x32\x6a\x73\x92\xed\xe1\xc6\x7d\x9f\x49\x2c\x69\x7e\xc8\x31\x4c\x0c\xb4\xf2\x54\x3b\x79\x3b\x9e\x4d\xb4\xd1\x18\x3e\x5f\x9a\x93\xa2\x11\xc3\xda\x27\x0b\xaf\x37\x2e\x5c\x37\xfb\xeb\x9a\xd6\xc3\xac\xc3\xcc\xf8\x1e\x5b\x9d\xac\x22\x64\xb7\xed\x26\xb8\xf3\xb9\x3c\xbb\x1f\xe2\xb0\x22\x77\x43\x6a\x62\x29\x39\x59\xa6\xe6\xe5\xcd\x7b\x83\xc0\x5b\x8e\x93\x64\xac\xeb\xca\x4f\x65\xac\x4a\xbc\x1e\xcd\x82\xfa\x3c\x70\x36\xb6\xb5\xed\x79\xef\xec\x68\x00\xff\x54\xfa\xb5\xe3\xf1\xdb\xe1\xbe\xce\x76\x17\xaf\x57\xb6\x6b\x89\x05\x09\xce\x52\xb9\x01\x2a\x49\xbe\xd9\xf4\xd2\xb8\x7a\xbf\x91\x02\xf3\x22\x8c\x13\xf2\x77\xd8\x8e\x43\x8b\xe1\x54\x6e\x5e\x9d\xc7\x49\x44\x02\x22\xc7\xa4\x79\x81\x85\xb8\x65\x3c\x1c\x93\xe6\x59\xa2\xf8\x1c\x51\x95\x05\xd9\x20\x00\x21\x7e\x60\x21\x58\xa9\x56\xff\xbe\xb6\x5a\x5e\x5b\x3f\x1f\xd6\xd3\x3c\xc4\x4d\xba\x99\xb4\x63\x6e\x7d\x3e\x3d\x57\xd2\x18\x5f\x47\xe8\xc3\x06\x8a\x68\x6c\x7f\x3b\x72\x0f\xe7\xe2\x77\x77\xf1\xd0\x99\xab\xdf\x2e\xfe\xd6\xbb\xcd\x1a\xb9\x90\xd1\xaf\xf2\x38\x3d\xdb\x74\xf8\xeb\xe3\xda\xe8\x2a\x62\xb7\xda\x1b\x07\xa9\xdc\x30\x5e\xbc\x68\xfb\x6b\x9f\x97\xf1\xc6\xb1\xd8\x5c\x29\x1e\x49\x30\xc5\xf7\xde\xad\x91\x42\xf9\xdd\xed\x89\x80\x25\xbe\x37\xd7\xe7\x32\x5c\xe6\x35\xac\xd4\x0c\x2d\xf7\x90\xc4\xe3\xf5\xe3\x2f\x7f\x54\x18\x88\xe3\x61\x47\x85\x64\x7f\xc0\xd7\x3f\x1a\x92\x42\xe9\xc7\x1e\x0d\x95\x76\xa7\x51\xa0\x8f\x02\x1b\x46\x9e\x06\x42\xd1\xf2\x01\x07\x02\xde\xe9\x7d\x1a\x0b\xa7\x32\x16\xcc\xc0\xee\xc4\x90\xd2\x5f\x6f\x98\x54\x5d\xf2\x95\xe1\xa7\x69\x10\x3a\x06\xe1\x65\xb3\x17\x47\x58\x78\xd0\x45\xd6\x5b\xd5\x5f\x25\x1d\x71\x49\xa6\x7a\x64\xda\xd0\x6f\xc7\x3a\x4c\xe3\x09\xc0\x6e\x96\x2c\xa7\xa7\x77\x34\x10\x05\x08\x21\x44\x92\x65\x77\xdf\x20\x5c\xbc\xe7\x97\x3f\xf4\x1a\x45\xd6\xe7\x27\x4a\xde\x74\x27\x66\x11\x7d\x70\xba\xd3\x78\xf9\x1e\x0d\xca\xc8\x39\xde\x7c\xb3\xa6\xe1\xbc\xd7\xc1\x6a\x6f\xb3\x0e\x52\xbe\xe4\x98\x8a\x15\x70\x94\x70\x26\x59\xc0\xa2\xf2\x1c\xfb\xd9\xc5\xf9\xbc\xd5\x92\x9c\xa3\xdf\xe6\x1e\xb3\x0d\x49\xba\x87\x50\x5f\x84\xfe\xe9\xd6\xf8\xbb\xe6\xf0\x7a\xeb\xa6\x65\x3b\x86\x8b\x79\x93\xf5\x59\x20\x6e\xb4\xa7\x44\xf4\x3f\xa5\xfe\x67\x42\x12\xdb\xd3\xe7\xbb\xa5\xa3\x8c\x5c\x2b\x97\xbb\xbb\x7f\x8e\xc5\x6e\xed\x43\x5c\xbf\x74\xc8\x8f\xff\xe6\xd6\xbe\x91\xb6\xf5\x95\xe4\xed\x93\xc4\xa8\x5b\xf9\x76\x4d\x35\xb7\xd8\x8c\xb6\x7d\xaf\x72\xe0\xb6\xbd\x01\x63\x9e\x76\xab\x1a\x32\x76\xe4\x8c\x76\xc2\xad\x6c\xa2\x65\xf7\xcf\xf8\xa7\xda\x2a\xb9\x8c\x3d\x3c\xa3\x9d\x64\x33\xe5\x1a\xb5\x2d\xfb\x86\xa2\x5a\x7f\x19\x5b\x7f\xc6\x3f\xd1\x53\xd3\xe2\x41\x5b\xd3\x4f\xf0\xec\xb0\x42\x73\x43\xd2\x68\x27\xd3\x6a\x6a\x34\xf6\x4e\x1e\x52\x8b\x87\x6c\xcc\xae\x44\xfb\x9e\xa7\x51\x4f\x9d\x55\x03\x81\x8e\x67\xfc\xb4\x69\xf0\x3a\x18\xf2\x40\xd0\xf6\xa8\x34\xe3\xc9\x98\xaf\xf6\xda\x24\xd3\xeb\x60\xb9\x0e\xd3\x1f\xa9\xff\xee\x1f\xfd\x37\x00\x00\xff\xff\x69\x5d\x0a\x6a\x39\x9d\x00\x00") func v2SchemaJSONBytes() ([]byte, error) { return bindataRead( @@ -104,7 +104,7 @@ func v2SchemaJSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(420), modTime: time.Unix(1482389892, 0)} + info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(420), modTime: time.Unix(1523760397, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -162,7 +162,7 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ "jsonschema-draft-04.json": jsonschemaDraft04JSON, - "v2/schema.json": v2SchemaJSON, + "v2/schema.json": v2SchemaJSON, } // AssetDir returns the file names below a certain @@ -204,6 +204,7 @@ type bintree struct { Func func() (*asset, error) Children map[string]*bintree } + var _bintree = &bintree{nil, map[string]*bintree{ "jsonschema-draft-04.json": &bintree{jsonschemaDraft04JSON, map[string]*bintree{}}, "v2": &bintree{nil, map[string]*bintree{ @@ -257,4 +258,3 @@ func _filePath(dir, name string) string { cannonicalName := strings.Replace(name, "\\", "/", -1) return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) } - diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/debug.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/debug.go new file mode 100644 index 000000000000..7edb95a61427 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/debug.go @@ -0,0 +1,47 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" +) + +var ( + // Debug is true when the SWAGGER_DEBUG env var is not empty. + // It enables a more verbose logging of validators. + Debug = os.Getenv("SWAGGER_DEBUG") != "" + // validateLogger is a debug logger for this package + specLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) +} + +func debugLog(msg string, args ...interface{}) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + specLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/expander.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/expander.go index 5054a7be670d..456a9dd7efb9 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/expander.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/expander.go @@ -30,16 +30,12 @@ import ( "github.com/go-openapi/swag" ) -var ( - // Debug enables logging when SWAGGER_DEBUG env var is not empty - Debug = os.Getenv("SWAGGER_DEBUG") != "" -) - // ExpandOptions provides options for expand. type ExpandOptions struct { - RelativeBase string - SkipSchemas bool - ContinueOnError bool + RelativeBase string + SkipSchemas bool + ContinueOnError bool + AbsoluteCircularRef bool } // ResolutionCache a cache for resolving urls @@ -49,7 +45,7 @@ type ResolutionCache interface { } type simpleCache struct { - lock sync.Mutex + lock sync.RWMutex store map[string]interface{} } @@ -59,6 +55,7 @@ func init() { resCache = initResolutionCache() } +// initResolutionCache initializes the URI resolution cache func initResolutionCache() ResolutionCache { return &simpleCache{store: map[string]interface{}{ "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), @@ -66,16 +63,37 @@ func initResolutionCache() ResolutionCache { }} } +// resolverContext allows to share a context during spec processing. +// At the moment, it just holds the index of circular references found. +type resolverContext struct { + // circulars holds all visited circular references, which allows shortcuts. + // NOTE: this is not just a performance improvement: it is required to figure out + // circular references which participate several cycles. + // This structure is privately instantiated and needs not be locked against + // concurrent access, unless we chose to implement a parallel spec walking. + circulars map[string]bool + basePath string +} + +func newResolverContext(originalBasePath string) *resolverContext { + return &resolverContext{ + circulars: make(map[string]bool), + basePath: originalBasePath, // keep the root base path in context + } +} + +// Get retrieves a cached URI func (s *simpleCache) Get(uri string) (interface{}, bool) { debugLog("getting %q from resolution cache", uri) - s.lock.Lock() + s.lock.RLock() v, ok := s.store[uri] debugLog("got %q from resolution cache: %t", uri, ok) - s.lock.Unlock() + s.lock.RUnlock() return v, ok } +// Set caches a URI func (s *simpleCache) Set(uri string, data interface{}) { s.lock.Lock() s.store[uri] = data @@ -84,7 +102,7 @@ func (s *simpleCache) Set(uri string, data interface{}) { // ResolveRefWithBase resolves a reference against a context root with preservation of base path func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) { - resolver, err := defaultSchemaLoader(root, opts, nil) + resolver, err := defaultSchemaLoader(root, opts, nil, nil) if err != nil { return nil, err } @@ -116,21 +134,21 @@ func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { case map[string]interface{}: b, _ := json.Marshal(sch) newSch := new(Schema) - json.Unmarshal(b, newSch) + _ = json.Unmarshal(b, newSch) return newSch, nil default: return nil, fmt.Errorf("unknown type for the resolved reference") } } -// ResolveParameter resolves a paramter reference against a context root +// ResolveParameter resolves a parameter reference against a context root func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { return ResolveParameterWithBase(root, ref, nil) } -// ResolveParameterWithBase resolves a paramter reference against a context root and base path +// ResolveParameterWithBase resolves a parameter reference against a context root and base path func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Parameter, error) { - resolver, err := defaultSchemaLoader(root, opts, nil) + resolver, err := defaultSchemaLoader(root, opts, nil, nil) if err != nil { return nil, err } @@ -149,7 +167,7 @@ func ResolveResponse(root interface{}, ref Ref) (*Response, error) { // ResolveResponseWithBase resolves response a reference against a context root and base path func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Response, error) { - resolver, err := defaultSchemaLoader(root, opts, nil) + resolver, err := defaultSchemaLoader(root, opts, nil, nil) if err != nil { return nil, err } @@ -163,7 +181,7 @@ func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*R // ResolveItems resolves header and parameter items reference against a context root and base path func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) { - resolver, err := defaultSchemaLoader(root, opts, nil) + resolver, err := defaultSchemaLoader(root, opts, nil, nil) if err != nil { return nil, err } @@ -180,7 +198,7 @@ func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error // ResolvePathItem resolves response a path item against a context root and base path func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, error) { - resolver, err := defaultSchemaLoader(root, opts, nil) + resolver, err := defaultSchemaLoader(root, opts, nil, nil) if err != nil { return nil, err } @@ -199,6 +217,7 @@ type schemaLoader struct { root interface{} options *ExpandOptions cache ResolutionCache + context *resolverContext loadDoc func(string) (json.RawMessage, error) } @@ -221,7 +240,8 @@ func init() { func defaultSchemaLoader( root interface{}, expandOptions *ExpandOptions, - cache ResolutionCache) (*schemaLoader, error) { + cache ResolutionCache, + context *resolverContext) (*schemaLoader, error) { if cache == nil { cache = resCache @@ -229,11 +249,15 @@ func defaultSchemaLoader( if expandOptions == nil { expandOptions = &ExpandOptions{} } - + absBase, _ := absPath(expandOptions.RelativeBase) + if context == nil { + context = newResolverContext(absBase) + } return &schemaLoader{ root: root, options: expandOptions, cache: cache, + context: context, loadDoc: func(path string) (json.RawMessage, error) { debugLog("fetching document at %q", path) return PathLoader(path) @@ -312,12 +336,6 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe return ret } -func debugLog(msg string, args ...interface{}) { - if Debug { - log.Printf(msg, args...) - } -} - // normalize absolute path for cache. // on Windows, drive letters should be converted to lower as scheme in net/url.URL func normalizeAbsPath(path string) string { @@ -336,12 +354,17 @@ func normalizeAbsPath(path string) string { // base could be a directory or a full file path func normalizePaths(refPath, base string) string { refURL, _ := url.Parse(refPath) - if path.IsAbs(refURL.Path) { + if path.IsAbs(refURL.Path) || filepath.IsAbs(refPath) { // refPath is actually absolute if refURL.Host != "" { return refPath } - return filepath.FromSlash(refPath) + parts := strings.Split(refPath, "#") + result := filepath.FromSlash(parts[0]) + if len(parts) == 2 { + result += "#" + parts[1] + } + return result } // relative refPath @@ -361,6 +384,59 @@ func normalizePaths(refPath, base string) string { return baseURL.String() } +// denormalizePaths returns to simplest notation on file $ref, +// i.e. strips the absolute path and sets a path relative to the base path. +// +// This is currently used when we rewrite ref after a circular ref has been detected +func denormalizeFileRef(ref *Ref, relativeBase, originalRelativeBase string) *Ref { + debugLog("denormalizeFileRef for: %s", ref.String()) + + if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { + return ref + } + // strip relativeBase from URI + relativeBaseURL, _ := url.Parse(relativeBase) + relativeBaseURL.Fragment = "" + + if relativeBaseURL.IsAbs() && strings.HasPrefix(ref.String(), relativeBase) { + // this should work for absolute URI (e.g. http://...): we have an exact match, just trim prefix + r, _ := NewRef(strings.TrimPrefix(ref.String(), relativeBase)) + return &r + } + + if relativeBaseURL.IsAbs() { + // other absolute URL get unchanged (i.e. with a non-empty scheme) + return ref + } + + // for relative file URIs: + originalRelativeBaseURL, _ := url.Parse(originalRelativeBase) + originalRelativeBaseURL.Fragment = "" + if strings.HasPrefix(ref.String(), originalRelativeBaseURL.String()) { + // the resulting ref is in the expanded spec: return a local ref + r, _ := NewRef(strings.TrimPrefix(ref.String(), originalRelativeBaseURL.String())) + return &r + } + + // check if we may set a relative path, considering the original base path for this spec. + // Example: + // spec is located at /mypath/spec.json + // my normalized ref points to: /mypath/item.json#/target + // expected result: item.json#/target + parts := strings.Split(ref.String(), "#") + relativePath, err := filepath.Rel(path.Dir(originalRelativeBaseURL.String()), parts[0]) + if err != nil { + // there is no common ancestor (e.g. different drives on windows) + // leaves the ref unchanged + return ref + } + if len(parts) == 2 { + relativePath += "#" + parts[1] + } + r, _ := NewRef(relativePath) + return &r +} + // relativeBase could be an ABSOLUTE file path or an ABSOLUTE URL func normalizeFileRef(ref *Ref, relativeBase string) *Ref { // This is important for when the reference is pointing to the root schema @@ -369,8 +445,7 @@ func normalizeFileRef(ref *Ref, relativeBase string) *Ref { return &r } - refURL := ref.GetURL() - debugLog("normalizing %s against %s (%s)", ref.String(), relativeBase, refURL.String()) + debugLog("normalizing %s against %s", ref.String(), relativeBase) s := normalizePaths(ref.String(), relativeBase) r, _ := NewRef(s) @@ -395,7 +470,7 @@ func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) // it is pointing somewhere in the root. root := r.root if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { - if baseRef, err := NewRef(basePath); err == nil { + if baseRef, erb := NewRef(basePath); erb == nil { root, _, _, _ = r.load(baseRef.GetURL()) } } @@ -430,9 +505,11 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) toFetch := *refURL toFetch.Fragment = "" - data, fromCache := r.cache.Get(toFetch.String()) + normalized := normalizeAbsPath(toFetch.String()) + + data, fromCache := r.cache.Get(normalized) if !fromCache { - b, err := r.loadDoc(toFetch.String()) + b, err := r.loadDoc(normalized) if err != nil { return nil, url.URL{}, false, err } @@ -440,7 +517,7 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) if err := json.Unmarshal(b, &data); err != nil { return nil, url.URL{}, false, err } - r.cache.Set(toFetch.String(), data) + r.cache.Set(normalized, data) } return data, toFetch, fromCache, nil @@ -468,7 +545,7 @@ func absPath(fname string) (string, error) { // ExpandSpec expands the references in a swagger spec func ExpandSpec(spec *Swagger, options *ExpandOptions) error { - resolver, err := defaultSchemaLoader(spec, options, nil) + resolver, err := defaultSchemaLoader(spec, options, nil, nil) // Just in case this ever returns an error. if shouldStopOnError(err, resolver.options) { return err @@ -484,7 +561,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { for key, definition := range spec.Definitions { var def *Schema var err error - if def, err = expandSchema(definition, []string{fmt.Sprintf("#/defintions/%s", key)}, resolver, specBasePath); shouldStopOnError(err, resolver.options) { + if def, err = expandSchema(definition, []string{fmt.Sprintf("#/definitions/%s", key)}, resolver, specBasePath); shouldStopOnError(err, resolver.options) { return err } if def != nil { @@ -531,25 +608,35 @@ func shouldStopOnError(err error, opts *ExpandOptions) bool { return false } -// ExpandSchema expands the refs in the schema object with reference to the root object -// go-openapi/validate uses this function -// notice that it is impossible to reference a json scema in a different file other than root -func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { - // Only save the root to a tmp file if it isn't nil. - var base string +// baseForRoot loads in the cache the root document and produces a fake "root" base path entry +// for further $ref resolution +func baseForRoot(root interface{}, cache ResolutionCache) string { + // cache the root document to resolve $ref's + const rootBase = "root" if root != nil { - base, _ = absPath("root") + base, _ := absPath(rootBase) + normalizedBase := normalizeAbsPath(base) + debugLog("setting root doc in cache at: %s", normalizedBase) if cache == nil { cache = resCache } - cache.Set(normalizeAbsPath(base), root) - base = "root" + cache.Set(normalizedBase, root) + return rootBase } + return "" +} +// ExpandSchema expands the refs in the schema object with reference to the root object +// go-openapi/validate uses this function +// notice that it is impossible to reference a json schema in a different file other than root +func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { opts := &ExpandOptions{ - RelativeBase: base, + // when a root is specified, cache the root as an in-memory document for $ref retrieval + RelativeBase: baseForRoot(root, cache), SkipSchemas: false, ContinueOnError: false, + // when no base path is specified, remaining $ref (circular) are rendered with an absolute path + AbsoluteCircularRef: true, } return ExpandSchemaWithBasePath(schema, cache, opts) } @@ -565,7 +652,7 @@ func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *Expan basePath, _ = absPath(opts.RelativeBase) } - resolver, err := defaultSchemaLoader(nil, opts, cache) + resolver, err := defaultSchemaLoader(nil, opts, cache, nil) if err != nil { return err } @@ -617,8 +704,32 @@ func basePathFromSchemaID(oldBasePath, id string) string { return u.String() } -func isCircular(ref *Ref, basePath string, parentRefs ...string) bool { - return basePath != "" && swag.ContainsStringsCI(parentRefs, ref.String()) +// isCircular detects cycles in sequences of $ref. +// It relies on a private context (which needs not be locked). +func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { + normalizedRef := normalizePaths(ref.String(), basePath) + if _, ok := r.context.circulars[normalizedRef]; ok { + // circular $ref has been already detected in another explored cycle + foundCycle = true + return + } + foundCycle = swag.ContainsStringsCI(parentRefs, normalizedRef) + if foundCycle { + r.context.circulars[normalizedRef] = true + } + return +} + +func updateBasePath(transitive *schemaLoader, resolver *schemaLoader, basePath string) string { + if transitive != resolver { + debugLog("got a new resolver") + if transitive.options != nil && transitive.options.RelativeBase != "" { + basePath, _ = absPath(transitive.options.RelativeBase) + debugLog("new basePath = %s", basePath) + } + } + + return basePath } func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { @@ -634,6 +745,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba otherwise the basePath should inherit the parent's */ // important: ID can be relative path if target.ID != "" { + debugLog("schema has ID: %s", target.ID) // handling the case when id is a folder // remember that basePath has to be a file refPath := target.ID @@ -645,7 +757,6 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } /* Explain here what this function does */ - var t *Schema /* if Ref is found, everything else doesn't matter */ /* Ref also changes the resolution scope of children expandSchema */ @@ -654,14 +765,21 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba normalizedRef := normalizeFileRef(&target.Ref, basePath) normalizedBasePath := normalizedRef.RemoteURI() - /* this means there is a circle in the recursion tree */ - /* return the Ref */ - if isCircular(normalizedRef, basePath, parentRefs...) { - target.Ref = *normalizedRef + if resolver.isCircular(normalizedRef, basePath, parentRefs...) { + // this means there is a cycle in the recursion tree: return the Ref + // - circular refs cannot be expanded. We leave them as ref. + // - denormalization means that a new local file ref is set relative to the original basePath + debugLog("shortcut circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", + basePath, normalizedBasePath, normalizedRef.String()) + if !resolver.options.AbsoluteCircularRef { + target.Ref = *denormalizeFileRef(normalizedRef, normalizedBasePath, resolver.context.basePath) + } else { + target.Ref = *normalizedRef + } return &target, nil } - debugLog("\nbasePath: %s", basePath) + debugLog("basePath: %s", basePath) if Debug { b, _ := json.Marshal(target) debugLog("calling Resolve with target: %s", string(b)) @@ -672,7 +790,15 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba if t != nil { parentRefs = append(parentRefs, normalizedRef.String()) - return expandSchema(*t, parentRefs, resolver, normalizedBasePath) + var err error + transitiveResolver, err := transitiveResolver(basePath, target.Ref, resolver) + if shouldStopOnError(err, resolver.options) { + return nil, err + } + + basePath = updateBasePath(transitiveResolver, resolver, normalizedBasePath) + + return expandSchema(*t, parentRefs, transitiveResolver, basePath) } } @@ -781,7 +907,7 @@ func derefPathItem(pathItem *PathItem, parentRefs []string, resolver *schemaLoad normalizedRef := normalizeFileRef(&pathItem.Ref, basePath) normalizedBasePath := normalizedRef.RemoteURI() - if isCircular(normalizedRef, basePath, parentRefs...) { + if resolver.isCircular(normalizedRef, basePath, parentRefs...) { return nil } @@ -807,9 +933,17 @@ func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) if err := derefPathItem(pathItem, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { return err } + if pathItem.Ref.String() != "" { + var err error + resolver, err = transitiveResolver(basePath, pathItem.Ref, resolver) + if shouldStopOnError(err, resolver.options) { + return err + } + } pathItem.Ref = Ref{} - parentRefs = parentRefs[0:] + // Currently unused: + //parentRefs = parentRefs[0:] for idx := range pathItem.Parameters { if err := expandParameter(&(pathItem.Parameters[idx]), resolver, basePath); shouldStopOnError(err, resolver.options) { @@ -867,19 +1001,68 @@ func expandOperation(op *Operation, resolver *schemaLoader, basePath string) err return nil } +func transitiveResolver(basePath string, ref Ref, resolver *schemaLoader) (*schemaLoader, error) { + if ref.IsRoot() || ref.HasFragmentOnly { + return resolver, nil + } + + baseRef, _ := NewRef(basePath) + currentRef := normalizeFileRef(&ref, basePath) + // Set a new root to resolve against + if !strings.HasPrefix(currentRef.String(), baseRef.String()) { + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := resolver.cache.Get(rootURL.String()) + var err error + + // shallow copy of resolver options to set a new RelativeBase when + // traversing multiple documents + newOptions := resolver.options + newOptions.RelativeBase = rootURL.String() + debugLog("setting new root: %s", newOptions.RelativeBase) + resolver, err = defaultSchemaLoader(root, newOptions, resolver.cache, resolver.context) + if err != nil { + return nil, err + } + } + + return resolver, nil +} + +// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document +func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + // when no base path is specified, remaining $ref (circular) are rendered with an absolute path + AbsoluteCircularRef: true, + } + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return err + } + + return expandResponse(response, resolver, opts.RelativeBase) +} + // ExpandResponse expands a response based on a basepath // This is the exported version of expandResponse // all refs inside response will be resolved relative to basePath func ExpandResponse(response *Response, basePath string) error { + var specBasePath string + if basePath != "" { + specBasePath, _ = absPath(basePath) + } opts := &ExpandOptions{ - RelativeBase: basePath, + RelativeBase: specBasePath, } - resolver, err := defaultSchemaLoader(nil, opts, nil) + resolver, err := defaultSchemaLoader(nil, opts, nil, nil) if err != nil { return err } - return expandResponse(response, resolver, basePath) + return expandResponse(response, resolver, opts.RelativeBase) } func derefResponse(response *Response, parentRefs []string, resolver *schemaLoader, basePath string) error { @@ -889,7 +1072,7 @@ func derefResponse(response *Response, parentRefs []string, resolver *schemaLoad normalizedRef := normalizeFileRef(&response.Ref, basePath) normalizedBasePath := normalizedRef.RemoteURI() - if isCircular(normalizedRef, basePath, parentRefs...) { + if resolver.isCircular(normalizedRef, basePath, parentRefs...) { return nil } @@ -910,16 +1093,31 @@ func expandResponse(response *Response, resolver *schemaLoader, basePath string) if response == nil { return nil } - parentRefs := []string{} if err := derefResponse(response, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { return err } + if response.Ref.String() != "" { + transitiveResolver, err := transitiveResolver(basePath, response.Ref, resolver) + if shouldStopOnError(err, transitiveResolver.options) { + return err + } + basePath = updateBasePath(transitiveResolver, resolver, basePath) + resolver = transitiveResolver + } + if response.Schema != nil && response.Schema.Ref.String() != "" { + // schema expanded to a $ref in another root + var ern error + response.Schema.Ref, ern = NewRef(normalizePaths(response.Schema.Ref.String(), response.Ref.RemoteURI())) + if ern != nil { + return ern + } + } response.Ref = Ref{} parentRefs = parentRefs[0:] if !resolver.options.SkipSchemas && response.Schema != nil { - parentRefs = append(parentRefs, response.Schema.Ref.String()) + // parentRefs = append(parentRefs, response.Schema.Ref.String()) s, err := expandSchema(*response.Schema, parentRefs, resolver, basePath) if shouldStopOnError(err, resolver.options) { return err @@ -930,19 +1128,40 @@ func expandResponse(response *Response, resolver *schemaLoader, basePath string) return nil } +// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document +func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + // when no base path is specified, remaining $ref (circular) are rendered with an absolute path + AbsoluteCircularRef: true, + } + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return err + } + + return expandParameter(parameter, resolver, opts.RelativeBase) +} + // ExpandParameter expands a parameter based on a basepath // This is the exported version of expandParameter // all refs inside parameter will be resolved relative to basePath func ExpandParameter(parameter *Parameter, basePath string) error { + var specBasePath string + if basePath != "" { + specBasePath, _ = absPath(basePath) + } opts := &ExpandOptions{ - RelativeBase: basePath, + RelativeBase: specBasePath, } - resolver, err := defaultSchemaLoader(nil, opts, nil) + resolver, err := defaultSchemaLoader(nil, opts, nil, nil) if err != nil { return err } - return expandParameter(parameter, resolver, basePath) + return expandParameter(parameter, resolver, opts.RelativeBase) } func derefParameter(parameter *Parameter, parentRefs []string, resolver *schemaLoader, basePath string) error { @@ -951,7 +1170,7 @@ func derefParameter(parameter *Parameter, parentRefs []string, resolver *schemaL normalizedRef := normalizeFileRef(¶meter.Ref, basePath) normalizedBasePath := normalizedRef.RemoteURI() - if isCircular(normalizedRef, basePath, parentRefs...) { + if resolver.isCircular(normalizedRef, basePath, parentRefs...) { return nil } @@ -977,11 +1196,27 @@ func expandParameter(parameter *Parameter, resolver *schemaLoader, basePath stri if err := derefParameter(parameter, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { return err } + if parameter.Ref.String() != "" { + transitiveResolver, err := transitiveResolver(basePath, parameter.Ref, resolver) + if shouldStopOnError(err, transitiveResolver.options) { + return err + } + basePath = updateBasePath(transitiveResolver, resolver, basePath) + resolver = transitiveResolver + } + + if parameter.Schema != nil && parameter.Schema.Ref.String() != "" { + // schema expanded to a $ref in another root + var ern error + parameter.Schema.Ref, ern = NewRef(normalizePaths(parameter.Schema.Ref.String(), parameter.Ref.RemoteURI())) + if ern != nil { + return ern + } + } parameter.Ref = Ref{} parentRefs = parentRefs[0:] if !resolver.options.SkipSchemas && parameter.Schema != nil { - parentRefs = append(parentRefs, parameter.Schema.Ref.String()) s, err := expandSchema(*parameter.Schema, parentRefs, resolver, basePath) if shouldStopOnError(err, resolver.options) { return err diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/go.mod b/cluster-autoscaler/vendor/github.com/go-openapi/spec/go.mod new file mode 100644 index 000000000000..5af64c10b574 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/go.mod @@ -0,0 +1,16 @@ +module github.com/go-openapi/spec + +require ( + github.com/PuerkitoBio/purell v1.1.0 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-openapi/jsonpointer v0.17.0 + github.com/go-openapi/jsonreference v0.17.0 + github.com/go-openapi/swag v0.17.0 + github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + golang.org/x/net v0.0.0-20181005035420-146acd28ed58 // indirect + golang.org/x/text v0.3.0 // indirect + gopkg.in/yaml.v2 v2.2.1 +) diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/go.sum b/cluster-autoscaler/vendor/github.com/go-openapi/spec/go.sum new file mode 100644 index 000000000000..ab6bfb6086ad --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/go.sum @@ -0,0 +1,22 @@ +github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-openapi/jsonpointer v0.17.0 h1:Bpl2DtZ6k7wKqfFs7e+4P08+M9I3FQgn09a1UsRUQbk= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.17.0 h1:d/o7/fsLWWQZACbihvZxcyLQ59jfUVs7WOJv/ak7T7A= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= +github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/header.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/header.go index 85c4d454c1e8..82f77f7709b1 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/header.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/header.go @@ -22,6 +22,7 @@ import ( "github.com/go-openapi/swag" ) +// HeaderProps describes a response header type HeaderProps struct { Description string `json:"description,omitempty"` } @@ -153,7 +154,7 @@ func (h Header) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } -// UnmarshalJSON marshal this from JSON +// UnmarshalJSON unmarshals this header from JSON func (h *Header) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &h.CommonValidations); err != nil { return err @@ -164,32 +165,29 @@ func (h *Header) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { return err } - if err := json.Unmarshal(data, &h.HeaderProps); err != nil { - return err - } - return nil + return json.Unmarshal(data, &h.HeaderProps) } // JSONLookup look up a value by the json property name -func (p Header) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { +func (h Header) JSONLookup(token string) (interface{}, error) { + if ex, ok := h.Extensions[token]; ok { return &ex, nil } - r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { return nil, err } if r != nil { return r, nil } - r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { return nil, err } if r != nil { return r, nil } - r, _, err = jsonpointer.GetForToken(p.HeaderProps, token) + r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) return r, err } diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/info.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/info.go index fb8b7c4ac541..cfb37ec12a64 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/info.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/info.go @@ -52,14 +52,14 @@ func (e Extensions) GetBool(key string) (bool, bool) { // GetStringSlice gets a string value from the extensions func (e Extensions) GetStringSlice(key string) ([]string, bool) { if v, ok := e[strings.ToLower(key)]; ok { - arr, ok := v.([]interface{}) - if !ok { + arr, isSlice := v.([]interface{}) + if !isSlice { return nil, false } var strs []string for _, iface := range arr { - str, ok := iface.(string) - if !ok { + str, isString := iface.(string) + if !isString { return nil, false } strs = append(strs, str) diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/items.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/items.go index 492423ef7fd5..cf4298971664 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/items.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/items.go @@ -22,6 +22,7 @@ import ( "github.com/go-openapi/swag" ) +// SimpleSchema describe swagger simple schemas for parameters and headers type SimpleSchema struct { Type string `json:"type,omitempty"` Format string `json:"format,omitempty"` @@ -31,6 +32,7 @@ type SimpleSchema struct { Example interface{} `json:"example,omitempty"` } +// TypeName return the type (or format) of a simple schema func (s *SimpleSchema) TypeName() string { if s.Format != "" { return s.Format @@ -38,6 +40,7 @@ func (s *SimpleSchema) TypeName() string { return s.Type } +// ItemsTypeName yields the type of items in a simple schema array func (s *SimpleSchema) ItemsTypeName() string { if s.Items == nil { return "" @@ -45,6 +48,7 @@ func (s *SimpleSchema) ItemsTypeName() string { return s.Items.TypeName() } +// CommonValidations describe common JSON-schema validations type CommonValidations struct { Maximum *float64 `json:"maximum,omitempty"` ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` @@ -212,18 +216,18 @@ func (i Items) MarshalJSON() ([]byte, error) { } // JSONLookup look up a value by the json property name -func (p Items) JSONLookup(token string) (interface{}, error) { +func (i Items) JSONLookup(token string) (interface{}, error) { if token == "$ref" { - return &p.Ref, nil + return &i.Ref, nil } - r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { return nil, err } if r != nil { return r, nil } - r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) return r, err } diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/operation.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/operation.go index e698f9e8acfb..32f7d8fe7251 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/operation.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/operation.go @@ -21,6 +21,7 @@ import ( "github.com/go-openapi/swag" ) +// OperationProps describes an operation type OperationProps struct { Description string `json:"description,omitempty"` Consumes []string `json:"consumes,omitempty"` @@ -38,9 +39,9 @@ type OperationProps struct { // MarshalJSON takes care of serializing operation properties to JSON // -// We use a custom marhaller here to handle a special cases related +// We use a custom marhaller here to handle a special cases related to // the Security field. We need to preserve zero length slice -// while omiting the field when the value is nil/unset. +// while omitting the field when the value is nil/unset. func (op OperationProps) MarshalJSON() ([]byte, error) { type Alias OperationProps if op.Security == nil { diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/parameter.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/parameter.go index 71aee1e80638..cb1a88d252a4 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/parameter.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/parameter.go @@ -64,6 +64,7 @@ func ParamRef(uri string) *Parameter { return p } +// ParamProps describes the specific attributes of an operation parameter type ParamProps struct { Description string `json:"description,omitempty"` Name string `json:"name,omitempty"` diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/path_item.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/path_item.go index 9ab3ec53832a..a8ae63ece52b 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/path_item.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/path_item.go @@ -21,7 +21,7 @@ import ( "github.com/go-openapi/swag" ) -// pathItemProps the path item specific properties +// PathItemProps the path item specific properties type PathItemProps struct { Get *Operation `json:"get,omitempty"` Put *Operation `json:"put,omitempty"` diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/response.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/response.go index a32b039eaf6d..586db0d78017 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/response.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/response.go @@ -39,15 +39,15 @@ type Response struct { } // JSONLookup look up a value by the json property name -func (p Response) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { +func (r Response) JSONLookup(token string) (interface{}, error) { + if ex, ok := r.Extensions[token]; ok { return &ex, nil } if token == "$ref" { - return &p.Ref, nil + return &r.Ref, nil } - r, _, err := jsonpointer.GetForToken(p.ResponseProps, token) - return r, err + ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) + return ptr, err } // UnmarshalJSON hydrates this items instance with the data from JSON diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/responses.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/responses.go index 3ab06697f2ea..4efb6f868bd0 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/responses.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/responses.go @@ -85,11 +85,15 @@ func (r Responses) MarshalJSON() ([]byte, error) { return concated, nil } +// ResponsesProps describes all responses for an operation. +// It tells what is the default response and maps all responses with a +// HTTP status code. type ResponsesProps struct { Default *Response StatusCodeResponses map[int]Response } +// MarshalJSON marshals responses as JSON func (r ResponsesProps) MarshalJSON() ([]byte, error) { toser := map[string]Response{} if r.Default != nil { @@ -101,6 +105,7 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) { return json.Marshal(toser) } +// UnmarshalJSON unmarshals responses from JSON func (r *ResponsesProps) UnmarshalJSON(data []byte) error { var res map[string]Response if err := json.Unmarshal(data, &res); err != nil { diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/schema.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/schema.go index 05c1a4aa0eef..b9481e29bcdb 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/schema.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/schema.go @@ -203,6 +203,7 @@ func (r *SchemaURL) fromMap(v map[string]interface{}) error { // return nil // } +// SchemaProps describes a JSON schema (draft 4) type SchemaProps struct { ID string `json:"id,omitempty"` Ref Ref `json:"-"` @@ -240,6 +241,7 @@ type SchemaProps struct { Definitions Definitions `json:"definitions,omitempty"` } +// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) type SwaggerSchemaProps struct { Discriminator string `json:"discriminator,omitempty"` ReadOnly bool `json:"readOnly,omitempty"` @@ -604,8 +606,8 @@ func (s *Schema) UnmarshalJSON(data []byte) error { return err } - sch.Ref.fromMap(d) - sch.Schema.fromMap(d) + _ = sch.Ref.fromMap(d) + _ = sch.Schema.fromMap(d) delete(d, "$ref") delete(d, "$schema") diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/security_scheme.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/security_scheme.go index 22d4f10af245..9f1b454eac91 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/security_scheme.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/security_scheme.go @@ -78,6 +78,7 @@ func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { }} } +// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section type SecuritySchemeProps struct { Description string `json:"description,omitempty"` Type string `json:"type"` diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/swagger.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/swagger.go index 23780c78a20b..4586a21c8602 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/swagger.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/swagger.go @@ -67,6 +67,7 @@ func (s *Swagger) UnmarshalJSON(data []byte) error { return nil } +// SwaggerProps captures the top-level properties of an Api specification type SwaggerProps struct { ID string `json:"id,omitempty"` Consumes []string `json:"consumes,omitempty"` diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/spec/tag.go b/cluster-autoscaler/vendor/github.com/go-openapi/spec/tag.go index 97f555840c60..25256c4beca0 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/spec/tag.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/spec/tag.go @@ -21,6 +21,7 @@ import ( "github.com/go-openapi/swag" ) +// TagProps describe a tag entry in the top level tags section of a swagger spec type TagProps struct { Description string `json:"description,omitempty"` Name string `json:"name,omitempty"` diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/.gitignore b/cluster-autoscaler/vendor/github.com/go-openapi/swag/.gitignore index 769c244007b5..5862205eec94 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/.gitignore +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/.gitignore @@ -1 +1,3 @@ secrets.yml +vendor +Godeps diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/.golangci.yml b/cluster-autoscaler/vendor/github.com/go-openapi/swag/.golangci.yml new file mode 100644 index 000000000000..6b237e4a795b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/.golangci.yml @@ -0,0 +1,20 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 25 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 3 + min-occurrences: 2 + +linters: + enable-all: true + disable: + - maligned + - lll diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/.travis.yml b/cluster-autoscaler/vendor/github.com/go-openapi/swag/.travis.yml index 24c69bdf36fd..bd3a2e527dd0 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/.travis.yml +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/.travis.yml @@ -1,14 +1,16 @@ -language: go +after_success: +- bash <(curl -s https://codecov.io/bash) go: -- 1.8 +- '1.9' +- 1.10.x +- 1.11.x install: - go get -u github.com/stretchr/testify - go get -u github.com/mailru/easyjson - go get -u gopkg.in/yaml.v2 -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... -after_success: -- bash <(curl -s https://codecov.io/bash) +language: go notifications: slack: secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/README.md b/cluster-autoscaler/vendor/github.com/go-openapi/swag/README.md index 5d43728e8768..459a3e18d795 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/README.md +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/README.md @@ -1,12 +1,23 @@ # Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/swag.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) -Contains a bunch of helper functions: +Contains a bunch of helper functions for go-openapi and go-swagger projects. -* convert between value and pointers for builtins -* convert from string to builtin +You may also use it standalone for your projects. + +* convert between value and pointers for builtin types +* convert from string to builtin types (wraps strconv) * fast json concatenation * search in path * load from file or http -* name manglin \ No newline at end of file +* name mangling + + +This repo has only few dependencies outside of the standard library: + +* JSON utilities depend on github.com/mailru/easyjson +* YAML utilities depend on gopkg.in/yaml.v2 diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/convert.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/convert.go index 2bf5ecbba2c6..4e446ff703f2 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/convert.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/convert.go @@ -22,8 +22,9 @@ import ( // same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER const ( - maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 - minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 + epsilon float64 = 1e-9 ) // IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive @@ -31,21 +32,39 @@ func IsFloat64AJSONInteger(f float64) bool { if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { return false } - - return f == float64(int64(f)) || f == float64(uint64(f)) -} - -var evaluatesAsTrue = map[string]struct{}{ - "true": struct{}{}, - "1": struct{}{}, - "yes": struct{}{}, - "ok": struct{}{}, - "y": struct{}{}, - "on": struct{}{}, - "selected": struct{}{}, - "checked": struct{}{}, - "t": struct{}{}, - "enabled": struct{}{}, + fa := math.Abs(f) + g := float64(uint64(f)) + ga := math.Abs(g) + + diff := math.Abs(f - g) + + // more info: https://floating-point-gui.de/errors/comparison/#look-out-for-edge-cases + if f == g { // best case + return true + } else if f == float64(int64(f)) || f == float64(uint64(f)) { // optimistic case + return true + } else if f == 0 || g == 0 || diff < math.SmallestNonzeroFloat64 { // very close to 0 values + return diff < (epsilon * math.SmallestNonzeroFloat64) + } + // check the relative error + return diff/math.Min(fa+ga, math.MaxFloat64) < epsilon +} + +var evaluatesAsTrue map[string]struct{} + +func init() { + evaluatesAsTrue = map[string]struct{}{ + "true": {}, + "1": {}, + "yes": {}, + "ok": {}, + "y": {}, + "on": {}, + "selected": {}, + "checked": {}, + "t": {}, + "enabled": {}, + } } // ConvertBool turn a string into a boolean diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/doc.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/doc.go new file mode 100644 index 000000000000..e01e1a02347b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/doc.go @@ -0,0 +1,33 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package swag contains a bunch of helper functions for go-openapi and go-swagger projects. + +You may also use it standalone for your projects. + + * convert between value and pointers for builtin types + * convert from string to builtin types (wraps strconv) + * fast json concatenation + * search in path + * load from file or http + * name mangling + + +This repo has only few dependencies outside of the standard library: + + * JSON utilities depend on github.com/mailru/easyjson + * YAML utilities depend on gopkg.in/yaml.v2 +*/ +package swag diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/go.mod b/cluster-autoscaler/vendor/github.com/go-openapi/swag/go.mod new file mode 100644 index 000000000000..9eb936a19bb9 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/go.mod @@ -0,0 +1,9 @@ +module github.com/go-openapi/swag + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + gopkg.in/yaml.v2 v2.2.1 +) diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/go.sum b/cluster-autoscaler/vendor/github.com/go-openapi/swag/go.sum new file mode 100644 index 000000000000..d6e717bd4e18 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/go.sum @@ -0,0 +1,9 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/json.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/json.go index cb20a6a0f79a..33da5e4e7d6b 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/json.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/json.go @@ -26,14 +26,21 @@ import ( "github.com/mailru/easyjson/jwriter" ) +// nullJSON represents a JSON object with null type +var nullJSON = []byte("null") + // DefaultJSONNameProvider the default cache for types var DefaultJSONNameProvider = NewNameProvider() const comma = byte(',') -var closers = map[byte]byte{ - '{': '}', - '[': ']', +var closers map[byte]byte + +func init() { + closers = map[byte]byte{ + '{': '}', + '[': ']', + } } type ejMarshaler interface { @@ -79,10 +86,7 @@ func DynamicJSONToStruct(data interface{}, target interface{}) error { if err != nil { return err } - if err := ReadJSON(b, target); err != nil { - return err - } - return nil + return ReadJSON(b, target) } // ConcatJSON concatenates multiple json objects efficiently @@ -90,17 +94,29 @@ func ConcatJSON(blobs ...[]byte) []byte { if len(blobs) == 0 { return nil } - if len(blobs) == 1 { + + last := len(blobs) - 1 + for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) { + // strips trailing null objects + last = last - 1 + if last < 0 { + // there was nothing but "null"s or nil... + return nil + } + } + if last == 0 { return blobs[0] } - last := len(blobs) - 1 var opening, closing byte - a := 0 - idx := 0 + var idx, a int buf := bytes.NewBuffer(nil) - for i, b := range blobs { + for i, b := range blobs[:last+1] { + if b == nil || bytes.Equal(b, nullJSON) { + // a null object is in the list: skip it + continue + } if len(b) > 0 && opening == 0 { // is this an array or an object? opening, closing = b[0], closers[b[0]] } @@ -245,7 +261,7 @@ func (n *NameProvider) GetJSONNames(subject interface{}) []string { names = n.makeNameIndex(tpe) } - var res []string + res := make([]string, 0, len(names.jsonNames)) for k := range names.jsonNames { res = append(res, k) } diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/loading.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/loading.go index 62ed1e80ab56..70f4fb361c13 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/loading.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/loading.go @@ -43,7 +43,13 @@ func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func( if strings.HasPrefix(path, "http") { return remote } - return func(pth string) ([]byte, error) { return local(filepath.FromSlash(pth)) } + return func(pth string) ([]byte, error) { + upth, err := pathUnescape(pth) + if err != nil { + return nil, err + } + return local(filepath.FromSlash(upth)) + } } func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/post_go18.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/post_go18.go new file mode 100644 index 000000000000..ef48086db38d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/post_go18.go @@ -0,0 +1,9 @@ +// +build go1.8 + +package swag + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.PathUnescape(path) +} diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/post_go19.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/post_go19.go new file mode 100644 index 000000000000..567680c793a7 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/post_go19.go @@ -0,0 +1,53 @@ +// +build go1.9 + +package swag + +import ( + "sort" + "sync" +) + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Since go1.9, this may be implemented with sync.Map. +type indexOfInitialisms struct { + sortMutex *sync.Mutex + index *sync.Map +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + sortMutex: new(sync.Mutex), + index: new(sync.Map), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + for k, v := range initial { + m.index.Store(k, v) + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + _, ok := m.index.Load(key) + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.index.Store(key, true) + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + m.index.Range(func(key, value interface{}) bool { + k := key.(string) + result = append(result, k) + return true + }) + sort.Sort(sort.Reverse(byLength(result))) + return +} diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/pre_go18.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/pre_go18.go new file mode 100644 index 000000000000..860bb2bbb1e5 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/pre_go18.go @@ -0,0 +1,9 @@ +// +build !go1.8 + +package swag + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.QueryUnescape(path) +} diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/pre_go19.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/pre_go19.go new file mode 100644 index 000000000000..72c48ae7520b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/pre_go19.go @@ -0,0 +1,55 @@ +// +build !go1.9 + +package swag + +import ( + "sort" + "sync" +) + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Before go1.9, this may be implemented with a mutex on the map. +type indexOfInitialisms struct { + getMutex *sync.Mutex + index map[string]bool +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + getMutex: new(sync.Mutex), + index: make(map[string]bool, 50), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.getMutex.Lock() + defer m.getMutex.Unlock() + for k, v := range initial { + m.index[k] = v + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + m.getMutex.Lock() + defer m.getMutex.Unlock() + _, ok := m.index[key] + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.getMutex.Lock() + defer m.getMutex.Unlock() + m.index[key] = true + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.getMutex.Lock() + defer m.getMutex.Unlock() + for k := range m.index { + result = append(result, k) + } + sort.Sort(sort.Reverse(byLength(result))) + return +} diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/util.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/util.go index 40751aab441a..e659968fb10c 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/util.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/util.go @@ -18,62 +18,85 @@ import ( "math" "reflect" "regexp" - "sort" "strings" + "sync" "unicode" ) -// Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 -var commonInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "JSON": true, - "LHS": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, -} +// commonInitialisms are common acronyms that are kept as whole uppercased words. +var commonInitialisms *indexOfInitialisms + +// initialisms is a slice of sorted initialisms var initialisms []string +var once sync.Once + +var isInitialism func(string) bool + func init() { - for k := range commonInitialisms { - initialisms = append(initialisms, k) + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + var configuredInitialisms = map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, } - sort.Sort(sort.Reverse(byLength(initialisms))) + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +func ensureSorted() { + initialisms = commonInitialisms.sorted() } -// JoinByFormat joins a string array by a known format: +const ( + //collectionFormatComma = "csv" + collectionFormatSpace = "ssv" + collectionFormatTab = "tsv" + collectionFormatPipe = "pipes" + collectionFormatMulti = "multi" +) + +// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): // ssv: space separated value // tsv: tab separated value // pipes: pipe (|) separated value @@ -84,13 +107,13 @@ func JoinByFormat(data []string, format string) []string { } var sep string switch format { - case "ssv": + case collectionFormatSpace: sep = " " - case "tsv": + case collectionFormatTab: sep = "\t" - case "pipes": + case collectionFormatPipe: sep = "|" - case "multi": + case collectionFormatMulti: return data default: sep = "," @@ -103,19 +126,20 @@ func JoinByFormat(data []string, format string) []string { // tsv: tab separated value // pipes: pipe (|) separated value // csv: comma separated value (default) +// func SplitByFormat(data, format string) []string { if data == "" { return nil } var sep string switch format { - case "ssv": + case collectionFormatSpace: sep = " " - case "tsv": + case collectionFormatTab: sep = "\t" - case "pipes": + case collectionFormatPipe: sep = "|" - case "multi": + case collectionFormatMulti: return nil default: sep = "," @@ -142,7 +166,7 @@ func (s byLength) Less(i, j int) bool { } // Prepares strings by splitting by caps, spaces, dashes, and underscore -func split(str string) (words []string) { +func split(str string) []string { repl := strings.NewReplacer( "@", "At ", "&", "And ", @@ -163,15 +187,15 @@ func split(str string) (words []string) { // Split when uppercase is found (needed for Snake) str = rex1.ReplaceAllString(str, " $1") - // check if consecutive single char things make up an initialism + // check if consecutive single char things make up an initialism + once.Do(ensureSorted) for _, k := range initialisms { str = strings.Replace(str, rex1.ReplaceAllString(k, " $1"), " "+k, -1) } // Get the final list of words - words = rex2.FindAllString(str, -1) - - return + //words = rex2.FindAllString(str, -1) + return rex2.FindAllString(str, -1) } // Removes leading whitespaces @@ -189,19 +213,36 @@ func lower(str string) string { return strings.ToLower(trim(str)) } +// Camelize an uppercased word +func Camelize(word string) (camelized string) { + for pos, ru := range word { + if pos > 0 { + camelized += string(unicode.ToLower(ru)) + } else { + camelized += string(unicode.ToUpper(ru)) + } + } + return +} + // ToFileName lowercases and underscores a go type name func ToFileName(name string) string { - var out []string - for _, w := range split(name) { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { out = append(out, lower(w)) } + return strings.Join(out, "_") } // ToCommandName lowercases and underscores a go type name func ToCommandName(name string) string { - var out []string - for _, w := range split(name) { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { out = append(out, lower(w)) } return strings.Join(out, "-") @@ -209,9 +250,11 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - var out []string - for _, w := range split(name) { - if !commonInitialisms[upper(w)] { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { + if !isInitialism(upper(w)) { out = append(out, lower(w)) } else { out = append(out, w) @@ -222,10 +265,12 @@ func ToHumanNameLower(name string) string { // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - var out []string - for _, w := range split(name) { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { uw := upper(w) - if !commonInitialisms[uw] { + if !isInitialism(uw) { out = append(out, upper(w[:1])+lower(w[1:])) } else { out = append(out, w) @@ -236,8 +281,10 @@ func ToHumanNameTitle(name string) string { // ToJSONName camelcases a name which can be underscored or pascal cased func ToJSONName(name string) string { - var out []string - for i, w := range split(name) { + in := split(name) + out := make([]string, 0, len(in)) + + for i, w := range in { if i == 0 { out = append(out, lower(w)) continue @@ -250,7 +297,7 @@ func ToJSONName(name string) string { // ToVarName camelcases a name which can be underscored or pascal cased func ToVarName(name string) string { res := ToGoName(name) - if _, ok := commonInitialisms[res]; ok { + if isInitialism(res) { return lower(res) } if len(res) <= 1 { @@ -261,11 +308,13 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - var out []string - for _, w := range split(name) { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { uw := upper(w) mod := int(math.Min(float64(len(uw)), 2)) - if !commonInitialisms[uw] && !commonInitialisms[uw[:len(uw)-mod]] { + if !isInitialism(uw) && !isInitialism(uw[:len(uw)-mod]) { uw = upper(w[:1]) + lower(w[1:]) } out = append(out, uw) @@ -284,6 +333,16 @@ func ToGoName(name string) string { return result } +// ContainsStrings searches a slice of strings for a case-sensitive match +func ContainsStrings(coll []string, item string) bool { + for _, a := range coll { + if a == item { + return true + } + } + return false +} + // ContainsStringsCI searches a slice of strings for a case-insensitive match func ContainsStringsCI(coll []string, item string) bool { for _, a := range coll { @@ -328,6 +387,16 @@ func IsZero(data interface{}) bool { return false } +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + //commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() +} + // CommandLineOptionsGroup represents a group of user-defined command line options type CommandLineOptionsGroup struct { ShortDescription string diff --git a/cluster-autoscaler/vendor/github.com/go-openapi/swag/yaml.go b/cluster-autoscaler/vendor/github.com/go-openapi/swag/yaml.go index 26502f21d542..f458c81a84a8 100644 --- a/cluster-autoscaler/vendor/github.com/go-openapi/swag/yaml.go +++ b/cluster-autoscaler/vendor/github.com/go-openapi/swag/yaml.go @@ -42,6 +42,7 @@ func YAMLToJSON(data interface{}) (json.RawMessage, error) { return json.RawMessage(b), err } +// BytesToYAMLDoc converts a byte slice into a YAML document func BytesToYAMLDoc(data []byte) (interface{}, error) { var canary map[interface{}]interface{} // validate this is an object and not a different type if err := yaml.Unmarshal(data, &canary); err != nil { @@ -55,14 +56,17 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) { return document, nil } +// JSONMapSlice represent a JSON object, with the order of keys maintained type JSONMapSlice []JSONMapItem +// MarshalJSON renders a JSONMapSlice as JSON func (s JSONMapSlice) MarshalJSON() ([]byte, error) { w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} s.MarshalEasyJSON(w) return w.BuildBytes() } +// MarshalEasyJSON renders a JSONMapSlice as JSON, using easyJSON func (s JSONMapSlice) MarshalEasyJSON(w *jwriter.Writer) { w.RawByte('{') @@ -78,11 +82,14 @@ func (s JSONMapSlice) MarshalEasyJSON(w *jwriter.Writer) { w.RawByte('}') } +// UnmarshalJSON makes a JSONMapSlice from JSON func (s *JSONMapSlice) UnmarshalJSON(data []byte) error { l := jlexer.Lexer{Data: data} s.UnmarshalEasyJSON(&l) return l.Error() } + +// UnmarshalEasyJSON makes a JSONMapSlice from JSON, using easyJSON func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { if in.IsNull() { in.Skip() @@ -99,23 +106,34 @@ func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { *s = result } +// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice type JSONMapItem struct { Key string Value interface{} } +// MarshalJSON renders a JSONMapItem as JSON func (s JSONMapItem) MarshalJSON() ([]byte, error) { w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} s.MarshalEasyJSON(w) return w.BuildBytes() } +// MarshalEasyJSON renders a JSONMapItem as JSON, using easyJSON func (s JSONMapItem) MarshalEasyJSON(w *jwriter.Writer) { w.String(s.Key) w.RawByte(':') w.Raw(WriteJSON(s.Value)) } +// UnmarshalJSON makes a JSONMapItem from JSON +func (s *JSONMapItem) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{Data: data} + s.UnmarshalEasyJSON(&l) + return l.Error() +} + +// UnmarshalEasyJSON makes a JSONMapItem from JSON, using easyJSON func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) { key := in.UnsafeString() in.WantColon() @@ -124,11 +142,6 @@ func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) { s.Key = key s.Value = value } -func (s *JSONMapItem) UnmarshalJSON(data []byte) error { - l := jlexer.Lexer{Data: data} - s.UnmarshalEasyJSON(&l) - return l.Error() -} func transformData(input interface{}) (out interface{}, err error) { switch in := input.(type) { @@ -146,9 +159,9 @@ func transformData(input interface{}) (out interface{}, err error) { return nil, fmt.Errorf("types don't match expect map key string or int got: %T", mi.Key) } - v, err := transformData(mi.Value) - if err != nil { - return nil, err + v, ert := transformData(mi.Value) + if ert != nil { + return nil, ert } nmi.Value = v o[i] = nmi @@ -167,9 +180,9 @@ func transformData(input interface{}) (out interface{}, err error) { return nil, fmt.Errorf("types don't match expect map key string or int got: %T", ke) } - v, err := transformData(va) - if err != nil { - return nil, err + v, ert := transformData(va) + if ert != nil { + return nil, ert } nmi.Value = v o = append(o, nmi) @@ -201,7 +214,7 @@ func YAMLDoc(path string) (json.RawMessage, error) { return nil, err } - return json.RawMessage(data), nil + return data, nil } // YAMLData loads a yaml document from either http or a file diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/.gitignore b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/.gitignore new file mode 100644 index 000000000000..5a3865c74f70 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +.DS_Store diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/.travis.yml b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/.travis.yml new file mode 100644 index 000000000000..fc2af5986180 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - 1.8 + - 1.9 + - tip + +install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - go list -f '{{range .Imports}}{{.}} {{end}}' ./... | xargs go get -v + - go list -f '{{range .TestImports}}{{.}} {{end}}' ./... | xargs go get -v + +script: + - go test -v -covermode=count -coverprofile=coverage.out + - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/LICENSE b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/LICENSE new file mode 100644 index 000000000000..d235be9cc6df --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/LICENSE @@ -0,0 +1,17 @@ +The MIT License (MIT) +Copyright (c) 2016, Qiang Xue + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software +and associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/README.md b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/README.md new file mode 100644 index 000000000000..ca9445d2cf82 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/README.md @@ -0,0 +1,534 @@ +# ozzo-validation + +[![GoDoc](https://godoc.org/github.com/go-ozzo/ozzo-validation?status.png)](http://godoc.org/github.com/go-ozzo/ozzo-validation) +[![Build Status](https://travis-ci.org/go-ozzo/ozzo-validation.svg?branch=master)](https://travis-ci.org/go-ozzo/ozzo-validation) +[![Coverage Status](https://coveralls.io/repos/github/go-ozzo/ozzo-validation/badge.svg?branch=master)](https://coveralls.io/github/go-ozzo/ozzo-validation?branch=master) +[![Go Report](https://goreportcard.com/badge/github.com/go-ozzo/ozzo-validation)](https://goreportcard.com/report/github.com/go-ozzo/ozzo-validation) + +## Description + +ozzo-validation is a Go package that provides configurable and extensible data validation capabilities. +It has the following features: + +* use normal programming constructs rather than error-prone struct tags to specify how data should be validated. +* can validate data of different types, e.g., structs, strings, byte slices, slices, maps, arrays. +* can validate custom data types as long as they implement the `Validatable` interface. +* can validate data types that implement the `sql.Valuer` interface (e.g. `sql.NullString`). +* customizable and well-formatted validation errors. +* provide a rich set of validation rules right out of box. +* extremely easy to create and use custom validation rules. + + +## Requirements + +Go 1.8 or above. + + +## Getting Started + +The ozzo-validation package mainly includes a set of validation rules and two validation methods. You use +validation rules to describe how a value should be considered valid, and you call either `validation.Validate()` +or `validation.ValidateStruct()` to validate the value. + + +### Installation + +Run the following command to install the package: + +``` +go get github.com/go-ozzo/ozzo-validation +go get github.com/go-ozzo/ozzo-validation/is +``` + +### Validating a Simple Value + +For a simple value, such as a string or an integer, you may use `validation.Validate()` to validate it. For example, + +```go +package main + +import ( + "fmt" + + "github.com/go-ozzo/ozzo-validation" + "github.com/go-ozzo/ozzo-validation/is" +) + +func main() { + data := "example" + err := validation.Validate(data, + validation.Required, // not empty + validation.Length(5, 100), // length between 5 and 100 + is.URL, // is a valid URL + ) + fmt.Println(err) + // Output: + // must be a valid URL +} +``` + +The method `validation.Validate()` will run through the rules in the order that they are listed. If a rule fails +the validation, the method will return the corresponding error and skip the rest of the rules. The method will +return nil if the value passes all validation rules. + + +### Validating a Struct + +For a struct value, you usually want to check if its fields are valid. For example, in a RESTful application, you +may unmarshal the request payload into a struct and then validate the struct fields. If one or multiple fields +are invalid, you may want to get an error describing which fields are invalid. You can use `validation.ValidateStruct()` +to achieve this purpose. A single struct can have rules for multiple fields, and a field can be associated with multiple +rules. For example, + +```go +package main + +import ( + "fmt" + "regexp" + + "github.com/go-ozzo/ozzo-validation" + "github.com/go-ozzo/ozzo-validation/is" +) + +type Address struct { + Street string + City string + State string + Zip string +} + +func (a Address) Validate() error { + return validation.ValidateStruct(&a, + // Street cannot be empty, and the length must between 5 and 50 + validation.Field(&a.Street, validation.Required, validation.Length(5, 50)), + // City cannot be empty, and the length must between 5 and 50 + validation.Field(&a.City, validation.Required, validation.Length(5, 50)), + // State cannot be empty, and must be a string consisting of two letters in upper case + validation.Field(&a.State, validation.Required, validation.Match(regexp.MustCompile("^[A-Z]{2}$"))), + // State cannot be empty, and must be a string consisting of five digits + validation.Field(&a.Zip, validation.Required, validation.Match(regexp.MustCompile("^[0-9]{5}$"))), + ) +} + +func main() { + a := Address{ + Street: "123", + City: "Unknown", + State: "Virginia", + Zip: "12345", + } + + err := a.Validate() + fmt.Println(err) + // Output: + // Street: the length must be between 5 and 50; State: must be in a valid format. +} +``` + +Note that when calling `validation.ValidateStruct` to validate a struct, you should pass to the method a pointer +to the struct instead of the struct itself. Similarly, when calling `validation.Field` to specify the rules +for a struct field, you should use a pointer to the struct field. + +When the struct validation is performed, the fields are validated in the order they are specified in `ValidateStruct`. +And when each field is validated, its rules are also evaluated in the order they are associated with the field. +If a rule fails, an error is recorded for that field, and the validation will continue with the next field. + + +### Validation Errors + +The `validation.ValidateStruct` method returns validation errors found in struct fields in terms of `validation.Errors` +which is a map of fields and their corresponding errors. Nil is returned if validation passes. + +By default, `validation.Errors` uses the struct tags named `json` to determine what names should be used to +represent the invalid fields. The type also implements the `json.Marshaler` interface so that it can be marshaled +into a proper JSON object. For example, + +```go +type Address struct { + Street string `json:"street"` + City string `json:"city"` + State string `json:"state"` + Zip string `json:"zip"` +} + +// ...perform validation here... + +err := a.Validate() +b, _ := json.Marshal(err) +fmt.Println(string(b)) +// Output: +// {"street":"the length must be between 5 and 50","state":"must be in a valid format"} +``` + +You may modify `validation.ErrorTag` to use a different struct tag name. + +If you do not like the magic that `ValidateStruct` determines error keys based on struct field names or corresponding +tag values, you may use the following alternative approach: + +```go +c := Customer{ + Name: "Qiang Xue", + Email: "q", + Address: Address{ + State: "Virginia", + }, +} + +err := validation.Errors{ + "name": validation.Validate(c.Name, validation.Required, validation.Length(5, 20)), + "email": validation.Validate(c.Name, validation.Required, is.Email), + "zip": validation.Validate(c.Address.Zip, validation.Required, validation.Match(regexp.MustCompile("^[0-9]{5}$"))), +}.Filter() +fmt.Println(err) +// Output: +// email: must be a valid email address; zip: cannot be blank. +``` + +In the above example, we build a `validation.Errors` by a list of names and the corresponding validation results. +At the end we call `Errors.Filter()` to remove from `Errors` all nils which correspond to those successful validation +results. The method will return nil if `Errors` is empty. + +The above approach is very flexible as it allows you to freely build up your validation error structure. You can use +it to validate both struct and non-struct values. Compared to using `ValidateStruct` to validate a struct, +it has the drawback that you have to redundantly specify the error keys while `ValidateStruct` can automatically +find them out. + + +### Internal Errors + +Internal errors are different from validation errors in that internal errors are caused by malfunctioning code (e.g. +a validator making a remote call to validate some data when the remote service is down) rather +than the data being validated. When an internal error happens during data validation, you may allow the user to resubmit +the same data to perform validation again, hoping the program resumes functioning. On the other hand, if data validation +fails due to data error, the user should generally not resubmit the same data again. + +To differentiate internal errors from validation errors, when an internal error occurs in a validator, wrap it +into `validation.InternalError` by calling `validation.NewInternalError()`. The user of the validator can then check +if a returned error is an internal error or not. For example, + +```go +if err := a.Validate(); err != nil { + if e, ok := err.(validation.InternalError); ok { + // an internal error happened + fmt.Println(e.InternalError()) + } +} +``` + + +## Validatable Types + +A type is validatable if it implements the `validation.Validatable` interface. + +When `validation.Validate` is used to validate a validatable value, if it does not find any error with the +given validation rules, it will further call the value's `Validate()` method. + +Similarly, when `validation.ValidateStruct` is validating a struct field whose type is validatable, it will call +the field's `Validate` method after it passes the listed rules. + +In the following example, the `Address` field of `Customer` is validatable because `Address` implements +`validation.Validatable`. Therefore, when validating a `Customer` struct with `validation.ValidateStruct`, +validation will "dive" into the `Address` field. + +```go +type Customer struct { + Name string + Gender string + Email string + Address Address +} + +func (c Customer) Validate() error { + return validation.ValidateStruct(&c, + // Name cannot be empty, and the length must be between 5 and 20. + validation.Field(&c.Name, validation.Required, validation.Length(5, 20)), + // Gender is optional, and should be either "Female" or "Male". + validation.Field(&c.Gender, validation.In("Female", "Male")), + // Email cannot be empty and should be in a valid email format. + validation.Field(&c.Email, validation.Required, is.Email), + // Validate Address using its own validation rules + validation.Field(&c.Address), + ) +} + +c := Customer{ + Name: "Qiang Xue", + Email: "q", + Address: Address{ + Street: "123 Main Street", + City: "Unknown", + State: "Virginia", + Zip: "12345", + }, +} + +err := c.Validate() +fmt.Println(err) +// Output: +// Address: (State: must be in a valid format.); Email: must be a valid email address. +``` + +Sometimes, you may want to skip the invocation of a type's `Validate` method. To do so, simply associate +a `validation.Skip` rule with the value being validated. + + +### Maps/Slices/Arrays of Validatables + +When validating a map, slice, or array, whose element type implements the `validation.Validatable` interface, +the `validation.Validate` method will call the `Validate` method of every non-nil element. +The validation errors of the elements will be returned as `validation.Errors` which maps the keys of the +invalid elements to their corresponding validation errors. For example, + +```go +addresses := []Address{ + Address{State: "MD", Zip: "12345"}, + Address{Street: "123 Main St", City: "Vienna", State: "VA", Zip: "12345"}, + Address{City: "Unknown", State: "NC", Zip: "123"}, +} +err := validation.Validate(addresses) +fmt.Println(err) +// Output: +// 0: (City: cannot be blank; Street: cannot be blank.); 2: (Street: cannot be blank; Zip: must be in a valid format.). +``` + +When using `validation.ValidateStruct` to validate a struct, the above validation procedure also applies to those struct +fields which are map/slices/arrays of validatables. + + +### Pointers + +When a value being validated is a pointer, most validation rules will validate the actual value pointed to by the pointer. +If the pointer is nil, these rules will skip the validation. + +An exception is the `validation.Required` and `validation.NotNil` rules. When a pointer is nil, they +will report a validation error. + + +### Types Implementing `sql.Valuer` + +If a data type implements the `sql.Valuer` interface (e.g. `sql.NullString`), the built-in validation rules will handle +it properly. In particular, when a rule is validating such data, it will call the `Value()` method and validate +the returned value instead. + + +### Required vs. Not Nil + +When validating input values, there are two different scenarios about checking if input values are provided or not. + +In the first scenario, an input value is considered missing if it is not entered or it is entered as a zero value +(e.g. an empty string, a zero integer). You can use the `validation.Required` rule in this case. + +In the second scenario, an input value is considered missing only if it is not entered. A pointer field is usually +used in this case so that you can detect if a value is entered or not by checking if the pointer is nil or not. +You can use the `validation.NotNil` rule to ensure a value is entered (even if it is a zero value). + + +### Embedded Structs + +The `validation.ValidateStruct` method will properly validate a struct that contains embedded structs. In particular, +the fields of an embedded struct are treated as if they belong directly to the containing struct. For example, + +```go +type Employee struct { + Name string +} + +func () + +type Manager struct { + Employee + Level int +} + +m := Manager{} +err := validation.ValidateStruct(&m, + validation.Field(&m.Name, validation.Required), + validation.Field(&m.Level, validation.Required), +) +fmt.Println(err) +// Output: +// Level: cannot be blank; Name: cannot be blank. +``` + +In the above code, we use `&m.Name` to specify the validation of the `Name` field of the embedded struct `Employee`. +And the validation error uses `Name` as the key for the error associated with the `Name` field as if `Name` a field +directly belonging to `Manager`. + +If `Employee` implements the `validation.Validatable` interface, we can also use the following code to validate +`Manager`, which generates the same validation result: + +```go +func (e Employee) Validate() error { + return validation.ValidateStruct(&e, + validation.Field(&e.Name, validation.Required), + ) +} + +err := validation.ValidateStruct(&m, + validation.Field(&m.Employee), + validation.Field(&m.Level, validation.Required), +) +fmt.Println(err) +// Output: +// Level: cannot be blank; Name: cannot be blank. +``` + + +## Built-in Validation Rules + +The following rules are provided in the `validation` package: + +* `In(...interface{})`: checks if a value can be found in the given list of values. +* `Length(min, max int)`: checks if the length of a value is within the specified range. + This rule should only be used for validating strings, slices, maps, and arrays. +* `RuneLength(min, max int)`: checks if the length of a string is within the specified range. + This rule is similar as `Length` except that when the value being validated is a string, it checks + its rune length instead of byte length. +* `Min(min interface{})` and `Max(max interface{})`: checks if a value is within the specified range. + These two rules should only be used for validating int, uint, float and time.Time types. +* `Match(*regexp.Regexp)`: checks if a value matches the specified regular expression. + This rule should only be used for strings and byte slices. +* `Date(layout string)`: checks if a string value is a date whose format is specified by the layout. + By calling `Min()` and/or `Max()`, you can check additionally if the date is within the specified range. +* `Required`: checks if a value is not empty (neither nil nor zero). +* `NotNil`: checks if a pointer value is not nil. Non-pointer values are considered valid. +* `NilOrNotEmpty`: checks if a value is a nil pointer or a non-empty value. This differs from `Required` in that it treats a nil pointer as valid. +* `Skip`: this is a special rule used to indicate that all rules following it should be skipped (including the nested ones). +* `MultipleOf`: checks if the value is a multiple of the specified range. + +The `is` sub-package provides a list of commonly used string validation rules that can be used to check if the format +of a value satisfies certain requirements. Note that these rules only handle strings and byte slices and if a string + or byte slice is empty, it is considered valid. You may use a `Required` rule to ensure a value is not empty. +Below is the whole list of the rules provided by the `is` package: + +* `Email`: validates if a string is an email or not +* `URL`: validates if a string is a valid URL +* `RequestURL`: validates if a string is a valid request URL +* `RequestURI`: validates if a string is a valid request URI +* `Alpha`: validates if a string contains English letters only (a-zA-Z) +* `Digit`: validates if a string contains digits only (0-9) +* `Alphanumeric`: validates if a string contains English letters and digits only (a-zA-Z0-9) +* `UTFLetter`: validates if a string contains unicode letters only +* `UTFDigit`: validates if a string contains unicode decimal digits only +* `UTFLetterNumeric`: validates if a string contains unicode letters and numbers only +* `UTFNumeric`: validates if a string contains unicode number characters (category N) only +* `LowerCase`: validates if a string contains lower case unicode letters only +* `UpperCase`: validates if a string contains upper case unicode letters only +* `Hexadecimal`: validates if a string is a valid hexadecimal number +* `HexColor`: validates if a string is a valid hexadecimal color code +* `RGBColor`: validates if a string is a valid RGB color in the form of rgb(R, G, B) +* `Int`: validates if a string is a valid integer number +* `Float`: validates if a string is a floating point number +* `UUIDv3`: validates if a string is a valid version 3 UUID +* `UUIDv4`: validates if a string is a valid version 4 UUID +* `UUIDv5`: validates if a string is a valid version 5 UUID +* `UUID`: validates if a string is a valid UUID +* `CreditCard`: validates if a string is a valid credit card number +* `ISBN10`: validates if a string is an ISBN version 10 +* `ISBN13`: validates if a string is an ISBN version 13 +* `ISBN`: validates if a string is an ISBN (either version 10 or 13) +* `JSON`: validates if a string is in valid JSON format +* `ASCII`: validates if a string contains ASCII characters only +* `PrintableASCII`: validates if a string contains printable ASCII characters only +* `Multibyte`: validates if a string contains multibyte characters +* `FullWidth`: validates if a string contains full-width characters +* `HalfWidth`: validates if a string contains half-width characters +* `VariableWidth`: validates if a string contains both full-width and half-width characters +* `Base64`: validates if a string is encoded in Base64 +* `DataURI`: validates if a string is a valid base64-encoded data URI +* `E164`: validates if a string is a valid E164 phone number (+19251232233) +* `CountryCode2`: validates if a string is a valid ISO3166 Alpha 2 country code +* `CountryCode3`: validates if a string is a valid ISO3166 Alpha 3 country code +* `DialString`: validates if a string is a valid dial string that can be passed to Dial() +* `MAC`: validates if a string is a MAC address +* `IP`: validates if a string is a valid IP address (either version 4 or 6) +* `IPv4`: validates if a string is a valid version 4 IP address +* `IPv6`: validates if a string is a valid version 6 IP address +* `Subdomain`: validates if a string is valid subdomain +* `Domain`: validates if a string is valid domain +* `DNSName`: validates if a string is valid DNS name +* `Host`: validates if a string is a valid IP (both v4 and v6) or a valid DNS name +* `Port`: validates if a string is a valid port number +* `MongoID`: validates if a string is a valid Mongo ID +* `Latitude`: validates if a string is a valid latitude +* `Longitude`: validates if a string is a valid longitude +* `SSN`: validates if a string is a social security number (SSN) +* `Semver`: validates if a string is a valid semantic version + +### Customizing Error Messages + +All built-in validation rules allow you to customize error messages. To do so, simply call the `Error()` method +of the rules. For example, + +```go +data := "2123" +err := validation.Validate(data, + validation.Required.Error("is required"), + validation.Match(regexp.MustCompile("^[0-9]{5}$")).Error("must be a string with five digits"), +) +fmt.Println(err) +// Output: +// must be a string with five digits +``` + + +## Creating Custom Rules + +Creating a custom rule is as simple as implementing the `validation.Rule` interface. The interface contains a single +method as shown below, which should validate the value and return the validation error, if any: + +```go +// Validate validates a value and returns an error if validation fails. +Validate(value interface{}) error +``` + +If you already have a function with the same signature as shown above, you can call `validation.By()` to turn +it into a validation rule. For example, + +```go +func checkAbc(value interface{}) error { + s, _ := value.(string) + if s != "abc" { + return errors.New("must be abc") + } + return nil +} + +err := validation.Validate("xyz", validation.By(checkAbc)) +fmt.Println(err) +// Output: must be abc +``` + + +### Rule Groups + +When a combination of several rules are used in multiple places, you may use the following trick to create a +rule group so that your code is more maintainable. + +```go +var NameRule = []validation.Rule{ + validation.Required, + validation.Length(5, 20), +} + +type User struct { + FirstName string + LastName string +} + +func (u User) Validate() error { + return validation.ValidateStruct(&u, + validation.Field(&u.FirstName, NameRule...), + validation.Field(&u.LastName, NameRule...), + ) +} +``` + +In the above example, we create a rule group `NameRule` which consists of two validation rules. We then use this rule +group to validate both `FirstName` and `LastName`. + + +## Credits + +The `is` sub-package wraps the excellent validators provided by the [govalidator](https://github.com/asaskevich/govalidator) package. diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/UPGRADE.md b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/UPGRADE.md new file mode 100644 index 000000000000..8f11d03eaa95 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/UPGRADE.md @@ -0,0 +1,46 @@ +# Upgrade Instructions + +## Upgrade from 2.x to 3.x + +* Instead of using `StructRules` to define struct validation rules, use `ValidateStruct()` to declare and perform + struct validation. The following code snippet shows how to modify your code: +```go +// 2.x usage +err := validation.StructRules{}. + Add("Street", validation.Required, validation.Length(5, 50)). + Add("City", validation.Required, validation.Length(5, 50)). + Add("State", validation.Required, validation.Match(regexp.MustCompile("^[A-Z]{2}$"))). + Add("Zip", validation.Required, validation.Match(regexp.MustCompile("^[0-9]{5}$"))). + Validate(a) + +// 3.x usage +err := validation.ValidateStruct(&a, + validation.Field(&a.Street, validation.Required, validation.Length(5, 50)), + validation.Field(&a.City, validation.Required, validation.Length(5, 50)), + validation.Field(&a.State, validation.Required, validation.Match(regexp.MustCompile("^[A-Z]{2}$"))), + validation.Field(&a.Zip, validation.Required, validation.Match(regexp.MustCompile("^[0-9]{5}$"))), +) +``` + +* Instead of using `Rules` to declare a rule list and use it to validate a value, call `Validate()` with the rules directly. +```go +data := "example" + +// 2.x usage +rules := validation.Rules{ + validation.Required, + validation.Length(5, 100), + is.URL, +} +err := rules.Validate(data) + +// 3.x usage +err := validation.Validate(data, + validation.Required, + validation.Length(5, 100), + is.URL, +) +``` + +* The default struct tags used for determining error keys is changed from `validation` to `json`. You may modify + `validation.ErrorTag` to change it back. \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/date.go b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/date.go new file mode 100644 index 000000000000..432e035155e9 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/date.go @@ -0,0 +1,84 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "errors" + "time" +) + +type DateRule struct { + layout string + min, max time.Time + message string + rangeMessage string +} + +// Date returns a validation rule that checks if a string value is in a format that can be parsed into a date. +// The format of the date should be specified as the layout parameter which accepts the same value as that for time.Parse. +// For example, +// validation.Date(time.ANSIC) +// validation.Date("02 Jan 06 15:04 MST") +// validation.Date("2006-01-02") +// +// By calling Min() and/or Max(), you can let the Date rule to check if a parsed date value is within +// the specified date range. +// +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +func Date(layout string) *DateRule { + return &DateRule{ + layout: layout, + message: "must be a valid date", + rangeMessage: "the data is out of range", + } +} + +// Error sets the error message that is used when the value being validated is not a valid date. +func (r *DateRule) Error(message string) *DateRule { + r.message = message + return r +} + +// RangeError sets the error message that is used when the value being validated is out of the specified Min/Max date range. +func (r *DateRule) RangeError(message string) *DateRule { + r.rangeMessage = message + return r +} + +// Min sets the minimum date range. A zero value means skipping the minimum range validation. +func (r *DateRule) Min(min time.Time) *DateRule { + r.min = min + return r +} + +// Max sets the maximum date range. A zero value means skipping the maximum range validation. +func (r *DateRule) Max(max time.Time) *DateRule { + r.max = max + return r +} + +// Validate checks if the given value is a valid date. +func (r *DateRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + str, err := EnsureString(value) + if err != nil { + return err + } + + date, err := time.Parse(r.layout, str) + if err != nil { + return errors.New(r.message) + } + + if !r.min.IsZero() && r.min.After(date) || !r.max.IsZero() && date.After(r.max) { + return errors.New(r.rangeMessage) + } + + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/error.go b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/error.go new file mode 100644 index 000000000000..d89d62857379 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/error.go @@ -0,0 +1,89 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "encoding/json" + "fmt" + "sort" +) + +type ( + // Errors represents the validation errors that are indexed by struct field names, map or slice keys. + Errors map[string]error + + // InternalError represents an error that should NOT be treated as a validation error. + InternalError interface { + error + InternalError() error + } + + internalError struct { + error + } +) + +// NewInternalError wraps a given error into an InternalError. +func NewInternalError(err error) InternalError { + return &internalError{error: err} +} + +// InternalError returns the actual error that it wraps around. +func (e *internalError) InternalError() error { + return e.error +} + +// Error returns the error string of Errors. +func (es Errors) Error() string { + if len(es) == 0 { + return "" + } + + keys := []string{} + for key := range es { + keys = append(keys, key) + } + sort.Strings(keys) + + s := "" + for i, key := range keys { + if i > 0 { + s += "; " + } + if errs, ok := es[key].(Errors); ok { + s += fmt.Sprintf("%v: (%v)", key, errs) + } else { + s += fmt.Sprintf("%v: %v", key, es[key].Error()) + } + } + return s + "." +} + +// MarshalJSON converts the Errors into a valid JSON. +func (es Errors) MarshalJSON() ([]byte, error) { + errs := map[string]interface{}{} + for key, err := range es { + if ms, ok := err.(json.Marshaler); ok { + errs[key] = ms + } else { + errs[key] = err.Error() + } + } + return json.Marshal(errs) +} + +// Filter removes all nils from Errors and returns back the updated Errors as an error. +// If the length of Errors becomes 0, it will return nil. +func (es Errors) Filter() error { + for key, value := range es { + if value == nil { + delete(es, key) + } + } + if len(es) == 0 { + return nil + } + return es +} diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/in.go b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/in.go new file mode 100644 index 000000000000..33ee5e534422 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/in.go @@ -0,0 +1,43 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import "errors" + +// In returns a validation rule that checks if a value can be found in the given list of values. +// Note that the value being checked and the possible range of values must be of the same type. +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +func In(values ...interface{}) *InRule { + return &InRule{ + elements: values, + message: "must be a valid value", + } +} + +type InRule struct { + elements []interface{} + message string +} + +// Validate checks if the given value is valid or not. +func (r *InRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + for _, e := range r.elements { + if e == value { + return nil + } + } + return errors.New(r.message) +} + +// Error sets the error message for the rule. +func (r *InRule) Error(message string) *InRule { + r.message = message + return r +} diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/is/rules.go b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/is/rules.go new file mode 100644 index 000000000000..73cfb7e4de72 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/is/rules.go @@ -0,0 +1,171 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package is provides a list of commonly used string validation rules. +package is + +import ( + "regexp" + "unicode" + + "github.com/asaskevich/govalidator" + "github.com/go-ozzo/ozzo-validation" +) + +var ( + // Email validates if a string is an email or not. + Email = validation.NewStringRule(govalidator.IsEmail, "must be a valid email address") + // URL validates if a string is a valid URL + URL = validation.NewStringRule(govalidator.IsURL, "must be a valid URL") + // RequestURL validates if a string is a valid request URL + RequestURL = validation.NewStringRule(govalidator.IsRequestURL, "must be a valid request URL") + // RequestURI validates if a string is a valid request URI + RequestURI = validation.NewStringRule(govalidator.IsRequestURI, "must be a valid request URI") + // Alpha validates if a string contains English letters only (a-zA-Z) + Alpha = validation.NewStringRule(govalidator.IsAlpha, "must contain English letters only") + // Digit validates if a string contains digits only (0-9) + Digit = validation.NewStringRule(isDigit, "must contain digits only") + // Alphanumeric validates if a string contains English letters and digits only (a-zA-Z0-9) + Alphanumeric = validation.NewStringRule(govalidator.IsAlphanumeric, "must contain English letters and digits only") + // UTFLetter validates if a string contains unicode letters only + UTFLetter = validation.NewStringRule(govalidator.IsUTFLetter, "must contain unicode letter characters only") + // UTFDigit validates if a string contains unicode decimal digits only + UTFDigit = validation.NewStringRule(govalidator.IsUTFDigit, "must contain unicode decimal digits only") + // UTFLetterNumeric validates if a string contains unicode letters and numbers only + UTFLetterNumeric = validation.NewStringRule(govalidator.IsUTFLetterNumeric, "must contain unicode letters and numbers only") + // UTFNumeric validates if a string contains unicode number characters (category N) only + UTFNumeric = validation.NewStringRule(isUTFNumeric, "must contain unicode number characters only") + // LowerCase validates if a string contains lower case unicode letters only + LowerCase = validation.NewStringRule(govalidator.IsLowerCase, "must be in lower case") + // UpperCase validates if a string contains upper case unicode letters only + UpperCase = validation.NewStringRule(govalidator.IsUpperCase, "must be in upper case") + // Hexadecimal validates if a string is a valid hexadecimal number + Hexadecimal = validation.NewStringRule(govalidator.IsHexadecimal, "must be a valid hexadecimal number") + // HexColor validates if a string is a valid hexadecimal color code + HexColor = validation.NewStringRule(govalidator.IsHexcolor, "must be a valid hexadecimal color code") + // RGBColor validates if a string is a valid RGB color in the form of rgb(R, G, B) + RGBColor = validation.NewStringRule(govalidator.IsRGBcolor, "must be a valid RGB color code") + // Int validates if a string is a valid integer number + Int = validation.NewStringRule(govalidator.IsInt, "must be an integer number") + // Float validates if a string is a floating point number + Float = validation.NewStringRule(govalidator.IsFloat, "must be a floating point number") + // UUIDv3 validates if a string is a valid version 3 UUID + UUIDv3 = validation.NewStringRule(govalidator.IsUUIDv3, "must be a valid UUID v3") + // UUIDv4 validates if a string is a valid version 4 UUID + UUIDv4 = validation.NewStringRule(govalidator.IsUUIDv4, "must be a valid UUID v4") + // UUIDv5 validates if a string is a valid version 5 UUID + UUIDv5 = validation.NewStringRule(govalidator.IsUUIDv5, "must be a valid UUID v5") + // UUID validates if a string is a valid UUID + UUID = validation.NewStringRule(govalidator.IsUUID, "must be a valid UUID") + // CreditCard validates if a string is a valid credit card number + CreditCard = validation.NewStringRule(govalidator.IsCreditCard, "must be a valid credit card number") + // ISBN10 validates if a string is an ISBN version 10 + ISBN10 = validation.NewStringRule(govalidator.IsISBN10, "must be a valid ISBN-10") + // ISBN13 validates if a string is an ISBN version 13 + ISBN13 = validation.NewStringRule(govalidator.IsISBN13, "must be a valid ISBN-13") + // ISBN validates if a string is an ISBN (either version 10 or 13) + ISBN = validation.NewStringRule(isISBN, "must be a valid ISBN") + // JSON validates if a string is in valid JSON format + JSON = validation.NewStringRule(govalidator.IsJSON, "must be in valid JSON format") + // ASCII validates if a string contains ASCII characters only + ASCII = validation.NewStringRule(govalidator.IsASCII, "must contain ASCII characters only") + // PrintableASCII validates if a string contains printable ASCII characters only + PrintableASCII = validation.NewStringRule(govalidator.IsPrintableASCII, "must contain printable ASCII characters only") + // Multibyte validates if a string contains multibyte characters + Multibyte = validation.NewStringRule(govalidator.IsMultibyte, "must contain multibyte characters") + // FullWidth validates if a string contains full-width characters + FullWidth = validation.NewStringRule(govalidator.IsFullWidth, "must contain full-width characters") + // HalfWidth validates if a string contains half-width characters + HalfWidth = validation.NewStringRule(govalidator.IsHalfWidth, "must contain half-width characters") + // VariableWidth validates if a string contains both full-width and half-width characters + VariableWidth = validation.NewStringRule(govalidator.IsVariableWidth, "must contain both full-width and half-width characters") + // Base64 validates if a string is encoded in Base64 + Base64 = validation.NewStringRule(govalidator.IsBase64, "must be encoded in Base64") + // DataURI validates if a string is a valid base64-encoded data URI + DataURI = validation.NewStringRule(govalidator.IsDataURI, "must be a Base64-encoded data URI") + // E164 validates if a string is a valid ISO3166 Alpha 2 country code + E164 = validation.NewStringRule(isE164Number, "must be a valid E164 number") + // CountryCode2 validates if a string is a valid ISO3166 Alpha 2 country code + CountryCode2 = validation.NewStringRule(govalidator.IsISO3166Alpha2, "must be a valid two-letter country code") + // CountryCode3 validates if a string is a valid ISO3166 Alpha 3 country code + CountryCode3 = validation.NewStringRule(govalidator.IsISO3166Alpha3, "must be a valid three-letter country code") + // DialString validates if a string is a valid dial string that can be passed to Dial() + DialString = validation.NewStringRule(govalidator.IsDialString, "must be a valid dial string") + // MAC validates if a string is a MAC address + MAC = validation.NewStringRule(govalidator.IsMAC, "must be a valid MAC address") + // IP validates if a string is a valid IP address (either version 4 or 6) + IP = validation.NewStringRule(govalidator.IsIP, "must be a valid IP address") + // IPv4 validates if a string is a valid version 4 IP address + IPv4 = validation.NewStringRule(govalidator.IsIPv4, "must be a valid IPv4 address") + // IPv6 validates if a string is a valid version 6 IP address + IPv6 = validation.NewStringRule(govalidator.IsIPv6, "must be a valid IPv6 address") + // Subdomain validates if a string is valid subdomain + Subdomain = validation.NewStringRule(isSubdomain, "must be a valid subdomain") + // Domain validates if a string is valid domain + Domain = validation.NewStringRule(isDomain, "must be a valid domain") + // DNSName validates if a string is valid DNS name + DNSName = validation.NewStringRule(govalidator.IsDNSName, "must be a valid DNS name") + // Host validates if a string is a valid IP (both v4 and v6) or a valid DNS name + Host = validation.NewStringRule(govalidator.IsHost, "must be a valid IP address or DNS name") + // Port validates if a string is a valid port number + Port = validation.NewStringRule(govalidator.IsPort, "must be a valid port number") + // MongoID validates if a string is a valid Mongo ID + MongoID = validation.NewStringRule(govalidator.IsMongoID, "must be a valid hex-encoded MongoDB ObjectId") + // Latitude validates if a string is a valid latitude + Latitude = validation.NewStringRule(govalidator.IsLatitude, "must be a valid latitude") + // Longitude validates if a string is a valid longitude + Longitude = validation.NewStringRule(govalidator.IsLongitude, "must be a valid longitude") + // SSN validates if a string is a social security number (SSN) + SSN = validation.NewStringRule(govalidator.IsSSN, "must be a valid social security number") + // Semver validates if a string is a valid semantic version + Semver = validation.NewStringRule(govalidator.IsSemver, "must be a valid semantic version") +) + +var ( + reDigit = regexp.MustCompile("^[0-9]+$") + // Subdomain regex source: https://stackoverflow.com/a/7933253 + reSubdomain = regexp.MustCompile(`^[A-Za-z0-9](?:[A-Za-z0-9\-]{0,61}[A-Za-z0-9])?$`) +) + +func isISBN(value string) bool { + return govalidator.IsISBN(value, 10) || govalidator.IsISBN(value, 13) +} + +func isDigit(value string) bool { + return reDigit.MatchString(value) +} + +func isE164Number(value string) bool { + // E164 regex source: https://stackoverflow.com/a/23299989 + reE164 := regexp.MustCompile(`^\+?[1-9]\d{1,14}$`) + return reE164.MatchString(value) +} + +func isSubdomain(value string) bool { + // Subdomain regex source: https://stackoverflow.com/a/7933253 + reSubdomain := regexp.MustCompile(`^[A-Za-z0-9](?:[A-Za-z0-9\-]{0,61}[A-Za-z0-9])?$`) + return reSubdomain.MatchString(value) +} + +func isDomain(value string) bool { + if len(value) > 255 { + return false + } + + // Domain regex source: https://stackoverflow.com/a/7933253 + // Slightly modified: Removed 255 max length validation since Go regex does not + // support lookarounds. More info: https://stackoverflow.com/a/38935027 + reDomain := regexp.MustCompile(`^(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+(?:[a-z]{1,63}| xn--[a-z0-9]{1,59})$`) + + return reDomain.MatchString(value) +} + +func isUTFNumeric(value string) bool { + for _, c := range value { + if unicode.IsNumber(c) == false { + return false + } + } + return true +} diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/length.go b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/length.go new file mode 100644 index 000000000000..d5effdf0f72b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/length.go @@ -0,0 +1,81 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "errors" + "fmt" + "unicode/utf8" +) + +// Length returns a validation rule that checks if a value's length is within the specified range. +// If max is 0, it means there is no upper bound for the length. +// This rule should only be used for validating strings, slices, maps, and arrays. +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +func Length(min, max int) *LengthRule { + message := "the value must be empty" + if min == 0 && max > 0 { + message = fmt.Sprintf("the length must be no more than %v", max) + } else if min > 0 && max == 0 { + message = fmt.Sprintf("the length must be no less than %v", min) + } else if min > 0 && max > 0 { + if min == max { + message = fmt.Sprintf("the length must be exactly %v", min) + } else { + message = fmt.Sprintf("the length must be between %v and %v", min, max) + } + } + return &LengthRule{ + min: min, + max: max, + message: message, + } +} + +// RuneLength returns a validation rule that checks if a string's rune length is within the specified range. +// If max is 0, it means there is no upper bound for the length. +// This rule should only be used for validating strings, slices, maps, and arrays. +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +// If the value being validated is not a string, the rule works the same as Length. +func RuneLength(min, max int) *LengthRule { + r := Length(min, max) + r.rune = true + return r +} + +type LengthRule struct { + min, max int + message string + rune bool +} + +// Validate checks if the given value is valid or not. +func (v *LengthRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + var ( + l int + err error + ) + if s, ok := value.(string); ok && v.rune { + l = utf8.RuneCountInString(s) + } else if l, err = LengthOfValue(value); err != nil { + return err + } + + if v.min > 0 && l < v.min || v.max > 0 && l > v.max { + return errors.New(v.message) + } + return nil +} + +// Error sets the error message for the rule. +func (v *LengthRule) Error(message string) *LengthRule { + v.message = message + return v +} diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/match.go b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/match.go new file mode 100644 index 000000000000..4a842bedecb5 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/match.go @@ -0,0 +1,47 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "errors" + "regexp" +) + +// Match returns a validation rule that checks if a value matches the specified regular expression. +// This rule should only be used for validating strings and byte slices, or a validation error will be reported. +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +func Match(re *regexp.Regexp) *MatchRule { + return &MatchRule{ + re: re, + message: "must be in a valid format", + } +} + +type MatchRule struct { + re *regexp.Regexp + message string +} + +// Validate checks if the given value is valid or not. +func (v *MatchRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil { + return nil + } + + isString, str, isBytes, bs := StringOrBytes(value) + if isString && (str == "" || v.re.MatchString(str)) { + return nil + } else if isBytes && (len(bs) == 0 || v.re.Match(bs)) { + return nil + } + return errors.New(v.message) +} + +// Error sets the error message for the rule. +func (v *MatchRule) Error(message string) *MatchRule { + v.message = message + return v +} diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/minmax.go b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/minmax.go new file mode 100644 index 000000000000..5eded0a54b6f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/minmax.go @@ -0,0 +1,177 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "errors" + "fmt" + "reflect" + "time" +) + +type ThresholdRule struct { + threshold interface{} + operator int + message string +} + +const ( + greaterThan = iota + greaterEqualThan + lessThan + lessEqualThan +) + +// Min is a validation rule that checks if a value is greater or equal than the specified value. +// By calling Exclusive, the rule will check if the value is strictly greater than the specified value. +// Note that the value being checked and the threshold value must be of the same type. +// Only int, uint, float and time.Time types are supported. +// An empty value is considered valid. Please use the Required rule to make sure a value is not empty. +func Min(min interface{}) *ThresholdRule { + return &ThresholdRule{ + threshold: min, + operator: greaterEqualThan, + message: fmt.Sprintf("must be no less than %v", min), + } +} + +// Max is a validation rule that checks if a value is less or equal than the specified value. +// By calling Exclusive, the rule will check if the value is strictly less than the specified value. +// Note that the value being checked and the threshold value must be of the same type. +// Only int, uint, float and time.Time types are supported. +// An empty value is considered valid. Please use the Required rule to make sure a value is not empty. +func Max(max interface{}) *ThresholdRule { + return &ThresholdRule{ + threshold: max, + operator: lessEqualThan, + message: fmt.Sprintf("must be no greater than %v", max), + } +} + +// Exclusive sets the comparison to exclude the boundary value. +func (r *ThresholdRule) Exclusive() *ThresholdRule { + if r.operator == greaterEqualThan { + r.operator = greaterThan + r.message = fmt.Sprintf("must be greater than %v", r.threshold) + } else if r.operator == lessEqualThan { + r.operator = lessThan + r.message = fmt.Sprintf("must be less than %v", r.threshold) + } + return r +} + +// Validate checks if the given value is valid or not. +func (r *ThresholdRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + rv := reflect.ValueOf(r.threshold) + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v, err := ToInt(value) + if err != nil { + return err + } + if r.compareInt(rv.Int(), v) { + return nil + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + v, err := ToUint(value) + if err != nil { + return err + } + if r.compareUint(rv.Uint(), v) { + return nil + } + + case reflect.Float32, reflect.Float64: + v, err := ToFloat(value) + if err != nil { + return err + } + if r.compareFloat(rv.Float(), v) { + return nil + } + + case reflect.Struct: + t, ok := r.threshold.(time.Time) + if !ok { + return fmt.Errorf("type not supported: %v", rv.Type()) + } + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("cannot convert %v to time.Time", reflect.TypeOf(value)) + } + if v.IsZero() || r.compareTime(t, v) { + return nil + } + + default: + return fmt.Errorf("type not supported: %v", rv.Type()) + } + + return errors.New(r.message) +} + +// Error sets the error message for the rule. +func (r *ThresholdRule) Error(message string) *ThresholdRule { + r.message = message + return r +} + +func (r *ThresholdRule) compareInt(threshold, value int64) bool { + switch r.operator { + case greaterThan: + return value > threshold + case greaterEqualThan: + return value >= threshold + case lessThan: + return value < threshold + default: + return value <= threshold + } +} + +func (r *ThresholdRule) compareUint(threshold, value uint64) bool { + switch r.operator { + case greaterThan: + return value > threshold + case greaterEqualThan: + return value >= threshold + case lessThan: + return value < threshold + default: + return value <= threshold + } +} + +func (r *ThresholdRule) compareFloat(threshold, value float64) bool { + switch r.operator { + case greaterThan: + return value > threshold + case greaterEqualThan: + return value >= threshold + case lessThan: + return value < threshold + default: + return value <= threshold + } +} + +func (r *ThresholdRule) compareTime(threshold, value time.Time) bool { + switch r.operator { + case greaterThan: + return value.After(threshold) + case greaterEqualThan: + return value.After(threshold) || value.Equal(threshold) + case lessThan: + return value.Before(threshold) + default: + return value.Before(threshold) || value.Equal(threshold) + } +} diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/multipleof.go b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/multipleof.go new file mode 100644 index 000000000000..c40fcfa72c8f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/multipleof.go @@ -0,0 +1,55 @@ +package validation + +import ( + "errors" + "fmt" + "reflect" +) + +func MultipleOf(threshold interface{}) *multipleOfRule { + return &multipleOfRule{ + threshold, + fmt.Sprintf("must be multiple of %v", threshold), + } +} + +type multipleOfRule struct { + threshold interface{} + message string +} + +// Error sets the error message for the rule. +func (r *multipleOfRule) Error(message string) *multipleOfRule { + r.message = message + return r +} + + +func (r *multipleOfRule) Validate(value interface{}) error { + + rv := reflect.ValueOf(r.threshold) + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v, err := ToInt(value) + if err != nil { + return err + } + if v%rv.Int() == 0 { + return nil + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + v, err := ToUint(value) + if err != nil { + return err + } + + if v%rv.Uint() == 0 { + return nil + } + default: + return fmt.Errorf("type not supported: %v", rv.Type()) + } + + return errors.New(r.message) +} diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/not_in.go b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/not_in.go new file mode 100644 index 000000000000..18cf4a0ffc4c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/not_in.go @@ -0,0 +1,45 @@ +// Copyright 2018 Qiang Xue, Google LLC. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "errors" +) + +// NotIn returns a validation rule that checks if a value os absent from, the given list of values. +// Note that the value being checked and the possible range of values must be of the same type. +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +func NotIn(values ...interface{}) *NotInRule { + return &NotInRule{ + elements: values, + message: "must not be in list", + } +} + +type NotInRule struct { + elements []interface{} + message string +} + +// Validate checks if the given value is valid or not. +func (r *NotInRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + for _, e := range r.elements { + if e == value { + return errors.New(r.message) + } + } + return nil +} + +// Error sets the error message for the rule. +func (r *NotInRule) Error(message string) *NotInRule { + r.message = message + return r +} diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/not_nil.go b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/not_nil.go new file mode 100644 index 000000000000..6cfca1204af7 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/not_nil.go @@ -0,0 +1,32 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import "errors" + +// NotNil is a validation rule that checks if a value is not nil. +// NotNil only handles types including interface, pointer, slice, and map. +// All other types are considered valid. +var NotNil = ¬NilRule{message: "is required"} + +type notNilRule struct { + message string +} + +// Validate checks if the given value is valid or not. +func (r *notNilRule) Validate(value interface{}) error { + _, isNil := Indirect(value) + if isNil { + return errors.New(r.message) + } + return nil +} + +// Error sets the error message for the rule. +func (r *notNilRule) Error(message string) *notNilRule { + return ¬NilRule{ + message: message, + } +} diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/required.go b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/required.go new file mode 100644 index 000000000000..ef9558e02546 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/required.go @@ -0,0 +1,42 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import "errors" + +// Required is a validation rule that checks if a value is not empty. +// A value is considered not empty if +// - integer, float: not zero +// - bool: true +// - string, array, slice, map: len() > 0 +// - interface, pointer: not nil and the referenced value is not empty +// - any other types +var Required = &requiredRule{message: "cannot be blank", skipNil: false} + +// NilOrNotEmpty checks if a value is a nil pointer or a value that is not empty. +// NilOrNotEmpty differs from Required in that it treats a nil pointer as valid. +var NilOrNotEmpty = &requiredRule{message: "cannot be blank", skipNil: true} + +type requiredRule struct { + message string + skipNil bool +} + +// Validate checks if the given value is valid or not. +func (v *requiredRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if v.skipNil && !isNil && IsEmpty(value) || !v.skipNil && (isNil || IsEmpty(value)) { + return errors.New(v.message) + } + return nil +} + +// Error sets the error message for the rule. +func (v *requiredRule) Error(message string) *requiredRule { + return &requiredRule{ + message: message, + skipNil: v.skipNil, + } +} diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/string.go b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/string.go new file mode 100644 index 000000000000..e8f2a5e749af --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/string.go @@ -0,0 +1,48 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import "errors" + +type stringValidator func(string) bool + +// StringRule is a rule that checks a string variable using a specified stringValidator. +type StringRule struct { + validate stringValidator + message string +} + +// NewStringRule creates a new validation rule using a function that takes a string value and returns a bool. +// The rule returned will use the function to check if a given string or byte slice is valid or not. +// An empty value is considered to be valid. Please use the Required rule to make sure a value is not empty. +func NewStringRule(validator stringValidator, message string) *StringRule { + return &StringRule{ + validate: validator, + message: message, + } +} + +// Error sets the error message for the rule. +func (v *StringRule) Error(message string) *StringRule { + return NewStringRule(v.validate, message) +} + +// Validate checks if the given value is valid or not. +func (v *StringRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + str, err := EnsureString(value) + if err != nil { + return err + } + + if v.validate(str) { + return nil + } + return errors.New(v.message) +} diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/struct.go b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/struct.go new file mode 100644 index 000000000000..2ff852ad5dc8 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/struct.go @@ -0,0 +1,154 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +var ( + // ErrStructPointer is the error that a struct being validated is not specified as a pointer. + ErrStructPointer = errors.New("only a pointer to a struct can be validated") +) + +type ( + // ErrFieldPointer is the error that a field is not specified as a pointer. + ErrFieldPointer int + + // ErrFieldNotFound is the error that a field cannot be found in the struct. + ErrFieldNotFound int + + // FieldRules represents a rule set associated with a struct field. + FieldRules struct { + fieldPtr interface{} + rules []Rule + } +) + +// Error returns the error string of ErrFieldPointer. +func (e ErrFieldPointer) Error() string { + return fmt.Sprintf("field #%v must be specified as a pointer", int(e)) +} + +// Error returns the error string of ErrFieldNotFound. +func (e ErrFieldNotFound) Error() string { + return fmt.Sprintf("field #%v cannot be found in the struct", int(e)) +} + +// ValidateStruct validates a struct by checking the specified struct fields against the corresponding validation rules. +// Note that the struct being validated must be specified as a pointer to it. If the pointer is nil, it is considered valid. +// Use Field() to specify struct fields that need to be validated. Each Field() call specifies a single field which +// should be specified as a pointer to the field. A field can be associated with multiple rules. +// For example, +// +// value := struct { +// Name string +// Value string +// }{"name", "demo"} +// err := validation.ValidateStruct(&value, +// validation.Field(&a.Name, validation.Required), +// validation.Field(&a.Value, validation.Required, validation.Length(5, 10)), +// ) +// fmt.Println(err) +// // Value: the length must be between 5 and 10. +// +// An error will be returned if validation fails. +func ValidateStruct(structPtr interface{}, fields ...*FieldRules) error { + value := reflect.ValueOf(structPtr) + if value.Kind() != reflect.Ptr || !value.IsNil() && value.Elem().Kind() != reflect.Struct { + // must be a pointer to a struct + return NewInternalError(ErrStructPointer) + } + if value.IsNil() { + // treat a nil struct pointer as valid + return nil + } + value = value.Elem() + + errs := Errors{} + + for i, fr := range fields { + fv := reflect.ValueOf(fr.fieldPtr) + if fv.Kind() != reflect.Ptr { + return NewInternalError(ErrFieldPointer(i)) + } + ft := findStructField(value, fv) + if ft == nil { + return NewInternalError(ErrFieldNotFound(i)) + } + if err := Validate(fv.Elem().Interface(), fr.rules...); err != nil { + if ie, ok := err.(InternalError); ok && ie.InternalError() != nil { + return err + } + if ft.Anonymous { + // merge errors from anonymous struct field + if es, ok := err.(Errors); ok { + for name, value := range es { + errs[name] = value + } + continue + } + } + errs[getErrorFieldName(ft)] = err + } + } + + if len(errs) > 0 { + return errs + } + return nil +} + +// Field specifies a struct field and the corresponding validation rules. +// The struct field must be specified as a pointer to it. +func Field(fieldPtr interface{}, rules ...Rule) *FieldRules { + return &FieldRules{ + fieldPtr: fieldPtr, + rules: rules, + } +} + +// findStructField looks for a field in the given struct. +// The field being looked for should be a pointer to the actual struct field. +// If found, the field info will be returned. Otherwise, nil will be returned. +func findStructField(structValue reflect.Value, fieldValue reflect.Value) *reflect.StructField { + ptr := fieldValue.Pointer() + for i := structValue.NumField() - 1; i >= 0; i-- { + sf := structValue.Type().Field(i) + if ptr == structValue.Field(i).UnsafeAddr() { + // do additional type comparison because it's possible that the address of + // an embedded struct is the same as the first field of the embedded struct + if sf.Type == fieldValue.Elem().Type() { + return &sf + } + } + if sf.Anonymous { + // delve into anonymous struct to look for the field + fi := structValue.Field(i) + if sf.Type.Kind() == reflect.Ptr { + fi = fi.Elem() + } + if fi.Kind() == reflect.Struct { + if f := findStructField(fi, fieldValue); f != nil { + return f + } + } + } + } + return nil +} + +// getErrorFieldName returns the name that should be used to represent the validation error of a struct field. +func getErrorFieldName(f *reflect.StructField) string { + if tag := f.Tag.Get(ErrorTag); tag != "" { + if cps := strings.SplitN(tag, ",", 2); cps[0] != "" { + return cps[0] + } + } + return f.Name +} diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/util.go b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/util.go new file mode 100644 index 000000000000..b15fd9a2979c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/util.go @@ -0,0 +1,163 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "database/sql/driver" + "errors" + "fmt" + "reflect" + "time" +) + +var ( + bytesType = reflect.TypeOf([]byte(nil)) + valuerType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() +) + +// EnsureString ensures the given value is a string. +// If the value is a byte slice, it will be typecast into a string. +// An error is returned otherwise. +func EnsureString(value interface{}) (string, error) { + v := reflect.ValueOf(value) + if v.Kind() == reflect.String { + return v.String(), nil + } + if v.Type() == bytesType { + return string(v.Interface().([]byte)), nil + } + return "", errors.New("must be either a string or byte slice") +} + +// StringOrBytes typecasts a value into a string or byte slice. +// Boolean flags are returned to indicate if the typecasting succeeds or not. +func StringOrBytes(value interface{}) (isString bool, str string, isBytes bool, bs []byte) { + v := reflect.ValueOf(value) + if v.Kind() == reflect.String { + str = v.String() + isString = true + } else if v.Kind() == reflect.Slice && v.Type() == bytesType { + bs = v.Interface().([]byte) + isBytes = true + } + return +} + +// LengthOfValue returns the length of a value that is a string, slice, map, or array. +// An error is returned for all other types. +func LengthOfValue(value interface{}) (int, error) { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.String, reflect.Slice, reflect.Map, reflect.Array: + return v.Len(), nil + } + return 0, fmt.Errorf("cannot get the length of %v", v.Kind()) +} + +// ToInt converts the given value to an int64. +// An error is returned for all incompatible types. +func ToInt(value interface{}) (int64, error) { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int(), nil + } + return 0, fmt.Errorf("cannot convert %v to int64", v.Kind()) +} + +// ToUint converts the given value to an uint64. +// An error is returned for all incompatible types. +func ToUint(value interface{}) (uint64, error) { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint(), nil + } + return 0, fmt.Errorf("cannot convert %v to uint64", v.Kind()) +} + +// ToFloat converts the given value to a float64. +// An error is returned for all incompatible types. +func ToFloat(value interface{}) (float64, error) { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.Float32, reflect.Float64: + return v.Float(), nil + } + return 0, fmt.Errorf("cannot convert %v to float64", v.Kind()) +} + +// IsEmpty checks if a value is empty or not. +// A value is considered empty if +// - integer, float: zero +// - bool: false +// - string, array: len() == 0 +// - slice, map: nil or len() == 0 +// - interface, pointer: nil or the referenced value is empty +func IsEmpty(value interface{}) bool { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.String, reflect.Array, reflect.Map, reflect.Slice: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Invalid: + return true + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + return IsEmpty(v.Elem().Interface()) + case reflect.Struct: + v, ok := value.(time.Time) + if ok && v.IsZero() { + return true + } + } + + return false +} + +// Indirect returns the value that the given interface or pointer references to. +// If the value implements driver.Valuer, it will deal with the value returned by +// the Value() method instead. A boolean value is also returned to indicate if +// the value is nil or not (only applicable to interface, pointer, map, and slice). +// If the value is neither an interface nor a pointer, it will be returned back. +func Indirect(value interface{}) (interface{}, bool) { + rv := reflect.ValueOf(value) + kind := rv.Kind() + switch kind { + case reflect.Invalid: + return nil, true + case reflect.Ptr, reflect.Interface: + if rv.IsNil() { + return nil, true + } + return Indirect(rv.Elem().Interface()) + case reflect.Slice, reflect.Map, reflect.Func, reflect.Chan: + if rv.IsNil() { + return nil, true + } + } + + if rv.Type().Implements(valuerType) { + return indirectValuer(value.(driver.Valuer)) + } + + return value, false +} + +func indirectValuer(valuer driver.Valuer) (interface{}, bool) { + if value, err := valuer.Value(); value != nil && err == nil { + return Indirect(value) + } + return nil, true +} diff --git a/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/validation.go b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/validation.go new file mode 100644 index 000000000000..1633258178d5 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/go-ozzo/ozzo-validation/validation.go @@ -0,0 +1,133 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package validation provides configurable and extensible rules for validating data of various types. +package validation + +import ( + "fmt" + "reflect" + "strconv" +) + +type ( + // Validatable is the interface indicating the type implementing it supports data validation. + Validatable interface { + // Validate validates the data and returns an error if validation fails. + Validate() error + } + + // Rule represents a validation rule. + Rule interface { + // Validate validates a value and returns a value if validation fails. + Validate(value interface{}) error + } + + // RuleFunc represents a validator function. + // You may wrap it as a Rule by calling By(). + RuleFunc func(value interface{}) error +) + +var ( + // ErrorTag is the struct tag name used to customize the error field name for a struct field. + ErrorTag = "json" + + // Skip is a special validation rule that indicates all rules following it should be skipped. + Skip = &skipRule{} + + validatableType = reflect.TypeOf((*Validatable)(nil)).Elem() +) + +// Validate validates the given value and returns the validation error, if any. +// +// Validate performs validation using the following steps: +// - validate the value against the rules passed in as parameters +// - if the value is a map and the map values implement `Validatable`, call `Validate` of every map value +// - if the value is a slice or array whose values implement `Validatable`, call `Validate` of every element +func Validate(value interface{}, rules ...Rule) error { + for _, rule := range rules { + if _, ok := rule.(*skipRule); ok { + return nil + } + if err := rule.Validate(value); err != nil { + return err + } + } + + rv := reflect.ValueOf(value) + if (rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Interface) && rv.IsNil() { + return nil + } + + if v, ok := value.(Validatable); ok { + return v.Validate() + } + + switch rv.Kind() { + case reflect.Map: + if rv.Type().Elem().Implements(validatableType) { + return validateMap(rv) + } + case reflect.Slice, reflect.Array: + if rv.Type().Elem().Implements(validatableType) { + return validateSlice(rv) + } + case reflect.Ptr, reflect.Interface: + return Validate(rv.Elem().Interface()) + } + + return nil +} + +// validateMap validates a map of validatable elements +func validateMap(rv reflect.Value) error { + errs := Errors{} + for _, key := range rv.MapKeys() { + if mv := rv.MapIndex(key).Interface(); mv != nil { + if err := mv.(Validatable).Validate(); err != nil { + errs[fmt.Sprintf("%v", key.Interface())] = err + } + } + } + if len(errs) > 0 { + return errs + } + return nil +} + +// validateMap validates a slice/array of validatable elements +func validateSlice(rv reflect.Value) error { + errs := Errors{} + l := rv.Len() + for i := 0; i < l; i++ { + if ev := rv.Index(i).Interface(); ev != nil { + if err := ev.(Validatable).Validate(); err != nil { + errs[strconv.Itoa(i)] = err + } + } + } + if len(errs) > 0 { + return errs + } + return nil +} + +type skipRule struct{} + +func (r *skipRule) Validate(interface{}) error { + return nil +} + +type inlineRule struct { + f RuleFunc +} + +func (r *inlineRule) Validate(value interface{}) error { + return r.f(value) +} + +// By wraps a RuleFunc into a Rule. +func By(f RuleFunc) Rule { + return &inlineRule{f} +} diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/AUTHORS b/cluster-autoscaler/vendor/github.com/gogo/protobuf/AUTHORS index 2eaf4d53a0c1..3d97fc7a29f8 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/AUTHORS +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/AUTHORS @@ -10,5 +10,6 @@ # Please keep the list sorted. +Sendgrid, Inc Vastech SA (PTY) LTD Walter Schulze diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/cluster-autoscaler/vendor/github.com/gogo/protobuf/CONTRIBUTORS index 84a85b1e88db..b1abc4d305df 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -1,4 +1,5 @@ Anton Povarov +Brian Goff Clayton Coleman Denis Smirnov DongYun Kang @@ -10,9 +11,12 @@ John Shahid John Tuley Laurent Patrick Lee +Roger Johansson +Sam Nguyen Sergio Arbeo Stephen J Day Tamir Duberstein Todd Eisenberger Tormod Erevik Lea +Vyacheslav Kim Walter Schulze diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go index 9506b6fb2052..fa88040f1171 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: gogo.proto -// DO NOT EDIT! /* Package gogoproto is a generated protocol buffer package. diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/encode.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/encode.go index 2b30f84626ad..8b84d1b22d4c 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/encode.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/encode.go @@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int { // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) } func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/lib.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/lib.go index 7580bb45c617..c98d73da49ea 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/lib.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/lib.go @@ -73,7 +73,6 @@ for a protocol buffer variable v: When the .proto file specifies `syntax="proto3"`, there are some differences: - Non-repeated fields of non-message type are values instead of pointers. - - Getters are only generated for message and oneof fields. - Enum types do not get an Enum method. The simplest way to describe this is to see an example. diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/properties.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/properties.go index 44b332052ef5..2a69e8862d56 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/properties.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/properties.go @@ -193,6 +193,7 @@ type Properties struct { Default string // default value HasDefault bool // whether an explicit default was provided CustomType string + CastType string StdTime bool StdDuration bool @@ -341,6 +342,8 @@ func (p *Properties) Parse(s string) { p.OrigName = strings.Split(f, "=")[1] case strings.HasPrefix(f, "customtype="): p.CustomType = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "casttype="): + p.CastType = strings.Split(f, "=")[1] case f == "stdtime": p.StdTime = true case f == "stdduration": diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/text.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/text.go index d63732fcbda5..f609d1d453b0 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/text.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/text.go @@ -522,6 +522,17 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert } return nil } + } else if len(props.CastType) > 0 { + if _, ok := v.Interface().(interface { + String() string + }); ok { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + _, err := fmt.Fprintf(w, "%d", v.Interface()) + return err + } + } } else if props.StdTime { t, ok := v.Interface().(time.Time) if !ok { @@ -531,9 +542,9 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert if err != nil { return err } - props.StdTime = false - err = tm.writeAny(w, reflect.ValueOf(tproto), props) - props.StdTime = true + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) return err } else if props.StdDuration { d, ok := v.Interface().(time.Duration) @@ -541,9 +552,9 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) } dproto := durationProto(d) - props.StdDuration = false - err := tm.writeAny(w, reflect.ValueOf(dproto), props) - props.StdDuration = true + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) return err } } diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/text_parser.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/text_parser.go index 9db12e96018e..f1276729a354 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -983,7 +983,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) + fv.SetUint(x) return nil } case reflect.Uint64: diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go index e2703901b01e..82623f0497bc 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: descriptor.proto -// DO NOT EDIT! /* Package descriptor is a generated protocol buffer package. @@ -12,6 +11,7 @@ It has these top-level messages: FileDescriptorSet FileDescriptorProto DescriptorProto + ExtensionRangeOptions FieldDescriptorProto OneofDescriptorProto EnumDescriptorProto @@ -139,7 +139,7 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{3, 0} + return fileDescriptorDescriptor, []int{4, 0} } type FieldDescriptorProto_Label int32 @@ -179,7 +179,7 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{3, 1} + return fileDescriptorDescriptor, []int{4, 1} } // Generated classes can be optimized for speed or code size. @@ -220,7 +220,7 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { return nil } func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{9, 0} + return fileDescriptorDescriptor, []int{10, 0} } type FieldOptions_CType int32 @@ -260,7 +260,7 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { return nil } func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{11, 0} + return fileDescriptorDescriptor, []int{12, 0} } type FieldOptions_JSType int32 @@ -302,7 +302,7 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { return nil } func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{11, 1} + return fileDescriptorDescriptor, []int{12, 1} } // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, @@ -344,7 +344,7 @@ func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { return nil } func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{16, 0} + return fileDescriptorDescriptor, []int{17, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -576,9 +576,10 @@ func (m *DescriptorProto) GetReservedName() []string { } type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } @@ -602,6 +603,13 @@ func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { return 0 } +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + // Range of reserved tag numbers. Reserved tag numbers may not be used by // fields or extension ranges in the same message. Reserved ranges may // not overlap. @@ -632,6 +640,33 @@ func (m *DescriptorProto_ReservedRange) GetEnd() int32 { return 0 } +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{3} } + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + // Describes a field within a message. type FieldDescriptorProto struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -670,7 +705,7 @@ type FieldDescriptorProto struct { func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{3} } +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{4} } func (m *FieldDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -752,7 +787,7 @@ type OneofDescriptorProto struct { func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{4} } +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{5} } func (m *OneofDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -779,7 +814,7 @@ type EnumDescriptorProto struct { func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{5} } +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{6} } func (m *EnumDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -814,7 +849,7 @@ func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorPro func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumValueDescriptorProto) ProtoMessage() {} func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{6} + return fileDescriptorDescriptor, []int{7} } func (m *EnumValueDescriptorProto) GetName() string { @@ -849,7 +884,7 @@ type ServiceDescriptorProto struct { func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{7} } +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{8} } func (m *ServiceDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -890,7 +925,7 @@ type MethodDescriptorProto struct { func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{8} } +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} } const Default_MethodDescriptorProto_ClientStreaming bool = false const Default_MethodDescriptorProto_ServerStreaming bool = false @@ -985,6 +1020,7 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,19,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -1003,6 +1039,13 @@ type FileOptions struct { // defined. When this options is provided, they will use this value instead // to prefix the types/symbols defined. SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` proto.XXX_InternalExtensions `json:"-"` @@ -1012,7 +1055,7 @@ type FileOptions struct { func (m *FileOptions) Reset() { *m = FileOptions{} } func (m *FileOptions) String() string { return proto.CompactTextString(m) } func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} } +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} } var extRange_FileOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1028,6 +1071,7 @@ const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPE const Default_FileOptions_CcGenericServices bool = false const Default_FileOptions_JavaGenericServices bool = false const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false const Default_FileOptions_Deprecated bool = false const Default_FileOptions_CcEnableArenas bool = false @@ -1101,6 +1145,13 @@ func (m *FileOptions) GetPyGenericServices() bool { return Default_FileOptions_PyGenericServices } +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + func (m *FileOptions) GetDeprecated() bool { if m != nil && m.Deprecated != nil { return *m.Deprecated @@ -1136,6 +1187,20 @@ func (m *FileOptions) GetSwiftPrefix() string { return "" } +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { return m.UninterpretedOption @@ -1203,7 +1268,7 @@ type MessageOptions struct { func (m *MessageOptions) Reset() { *m = MessageOptions{} } func (m *MessageOptions) String() string { return proto.CompactTextString(m) } func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} } +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} } var extRange_MessageOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1266,13 +1331,15 @@ type FieldOptions struct { Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). By default these types are - // represented as JavaScript strings. This avoids loss of precision that can - // happen when a large value is converted to a floating point JavaScript - // numbers. Specifying JS_NUMBER for the jstype causes the generated - // JavaScript code to use the JavaScript "number" type instead of strings. - // This option is an enum to permit additional types to be added, - // e.g. goog.math.Integer. + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` // Should this field be parsed lazily? Lazy applies only to message-type // fields. It means that when the outer message is initially parsed, the @@ -1319,7 +1386,7 @@ type FieldOptions struct { func (m *FieldOptions) Reset() { *m = FieldOptions{} } func (m *FieldOptions) String() string { return proto.CompactTextString(m) } func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} } +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } var extRange_FieldOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1394,7 +1461,7 @@ type OneofOptions struct { func (m *OneofOptions) Reset() { *m = OneofOptions{} } func (m *OneofOptions) String() string { return proto.CompactTextString(m) } func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } var extRange_OneofOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1429,7 +1496,7 @@ type EnumOptions struct { func (m *EnumOptions) Reset() { *m = EnumOptions{} } func (m *EnumOptions) String() string { return proto.CompactTextString(m) } func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } var extRange_EnumOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1477,7 +1544,7 @@ type EnumValueOptions struct { func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } var extRange_EnumValueOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1518,7 +1585,7 @@ type ServiceOptions struct { func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } var extRange_ServiceOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1560,7 +1627,7 @@ type MethodOptions struct { func (m *MethodOptions) Reset() { *m = MethodOptions{} } func (m *MethodOptions) String() string { return proto.CompactTextString(m) } func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } var extRange_MethodOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1616,7 +1683,7 @@ type UninterpretedOption struct { func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{18} } func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { if m != nil { @@ -1682,7 +1749,7 @@ func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOptio func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{17, 0} + return fileDescriptorDescriptor, []int{18, 0} } func (m *UninterpretedOption_NamePart) GetNamePart() string { @@ -1752,7 +1819,7 @@ type SourceCodeInfo struct { func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{18} } +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{19} } func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { if m != nil { @@ -1849,7 +1916,7 @@ func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo_Location) ProtoMessage() {} func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{18, 0} + return fileDescriptorDescriptor, []int{19, 0} } func (m *SourceCodeInfo_Location) GetPath() []int32 { @@ -1900,7 +1967,7 @@ type GeneratedCodeInfo struct { func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{19} } +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{20} } func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { if m != nil { @@ -1929,7 +1996,7 @@ func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_ func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{19, 0} + return fileDescriptorDescriptor, []int{20, 0} } func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { @@ -1966,6 +2033,7 @@ func init() { proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") @@ -1997,154 +2065,159 @@ func init() { func init() { proto.RegisterFile("descriptor.proto", fileDescriptorDescriptor) } var fileDescriptorDescriptor = []byte{ - // 2379 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x73, 0xdb, 0xc6, - 0x15, 0x37, 0xc1, 0x0f, 0x91, 0x8f, 0x14, 0xb5, 0x5a, 0x29, 0x36, 0x2c, 0xc7, 0xb1, 0xcc, 0xd8, - 0xb5, 0x6c, 0xb7, 0x74, 0x46, 0xfe, 0x88, 0xa3, 0x74, 0xd2, 0xa1, 0x48, 0x58, 0xa1, 0x4b, 0x91, - 0x2c, 0x48, 0x35, 0x76, 0x2e, 0x98, 0x15, 0xb0, 0xa4, 0x60, 0x83, 0x00, 0x02, 0x80, 0xb6, 0x95, - 0x93, 0x67, 0x7a, 0xea, 0x7f, 0xd0, 0xe9, 0x74, 0x7a, 0xc8, 0x25, 0x33, 0xed, 0xbd, 0x87, 0xde, - 0x7b, 0xed, 0x4c, 0xef, 0x3d, 0xf4, 0xd0, 0x99, 0xf6, 0x3f, 0xe8, 0xb5, 0xb3, 0xbb, 0x00, 0x08, - 0x7e, 0xd9, 0x4a, 0x66, 0x9c, 0x9c, 0xa4, 0xfd, 0xbd, 0xdf, 0x7b, 0x78, 0xfb, 0xf6, 0x87, 0xdd, - 0x87, 0x25, 0x20, 0x83, 0xfa, 0xba, 0x67, 0xba, 0x81, 0xe3, 0x55, 0x5d, 0xcf, 0x09, 0x1c, 0xbc, - 0x36, 0x74, 0x9c, 0xa1, 0x45, 0xc5, 0xe8, 0x78, 0x3c, 0xa8, 0x1c, 0xc2, 0xfa, 0x23, 0xd3, 0xa2, - 0x8d, 0x98, 0xd8, 0xa3, 0x01, 0x7e, 0x08, 0x99, 0x81, 0x69, 0x51, 0x39, 0xb5, 0x9d, 0xde, 0x29, - 0xee, 0x5e, 0xab, 0xce, 0x38, 0x55, 0xa7, 0x3d, 0xba, 0x0c, 0x56, 0xb9, 0x47, 0xe5, 0xdf, 0x19, - 0xd8, 0x58, 0x60, 0xc5, 0x18, 0x32, 0x36, 0x19, 0xb1, 0x88, 0xa9, 0x9d, 0x82, 0xca, 0xff, 0xc7, - 0x32, 0xac, 0xb8, 0x44, 0x7f, 0x4e, 0x86, 0x54, 0x96, 0x38, 0x1c, 0x0d, 0xf1, 0x07, 0x00, 0x06, - 0x75, 0xa9, 0x6d, 0x50, 0x5b, 0x3f, 0x95, 0xd3, 0xdb, 0xe9, 0x9d, 0x82, 0x9a, 0x40, 0xf0, 0x6d, - 0x58, 0x77, 0xc7, 0xc7, 0x96, 0xa9, 0x6b, 0x09, 0x1a, 0x6c, 0xa7, 0x77, 0xb2, 0x2a, 0x12, 0x86, - 0xc6, 0x84, 0x7c, 0x03, 0xd6, 0x5e, 0x52, 0xf2, 0x3c, 0x49, 0x2d, 0x72, 0x6a, 0x99, 0xc1, 0x09, - 0x62, 0x1d, 0x4a, 0x23, 0xea, 0xfb, 0x64, 0x48, 0xb5, 0xe0, 0xd4, 0xa5, 0x72, 0x86, 0xcf, 0x7e, - 0x7b, 0x6e, 0xf6, 0xb3, 0x33, 0x2f, 0x86, 0x5e, 0xfd, 0x53, 0x97, 0xe2, 0x1a, 0x14, 0xa8, 0x3d, - 0x1e, 0x89, 0x08, 0xd9, 0x25, 0xf5, 0x53, 0xec, 0xf1, 0x68, 0x36, 0x4a, 0x9e, 0xb9, 0x85, 0x21, - 0x56, 0x7c, 0xea, 0xbd, 0x30, 0x75, 0x2a, 0xe7, 0x78, 0x80, 0x1b, 0x73, 0x01, 0x7a, 0xc2, 0x3e, - 0x1b, 0x23, 0xf2, 0xc3, 0x75, 0x28, 0xd0, 0x57, 0x01, 0xb5, 0x7d, 0xd3, 0xb1, 0xe5, 0x15, 0x1e, - 0xe4, 0xfa, 0x82, 0x55, 0xa4, 0x96, 0x31, 0x1b, 0x62, 0xe2, 0x87, 0x1f, 0xc0, 0x8a, 0xe3, 0x06, - 0xa6, 0x63, 0xfb, 0x72, 0x7e, 0x3b, 0xb5, 0x53, 0xdc, 0x7d, 0x7f, 0xa1, 0x10, 0x3a, 0x82, 0xa3, - 0x46, 0x64, 0xdc, 0x04, 0xe4, 0x3b, 0x63, 0x4f, 0xa7, 0x9a, 0xee, 0x18, 0x54, 0x33, 0xed, 0x81, - 0x23, 0x17, 0x78, 0x80, 0x2b, 0xf3, 0x13, 0xe1, 0xc4, 0xba, 0x63, 0xd0, 0xa6, 0x3d, 0x70, 0xd4, - 0xb2, 0x3f, 0x35, 0xc6, 0xe7, 0x21, 0xe7, 0x9f, 0xda, 0x01, 0x79, 0x25, 0x97, 0xb8, 0x42, 0xc2, - 0x51, 0xe5, 0x7f, 0x59, 0x58, 0x3b, 0x8b, 0xc4, 0x3e, 0x85, 0xec, 0x80, 0xcd, 0x52, 0x96, 0xbe, - 0x4b, 0x0d, 0x84, 0xcf, 0x74, 0x11, 0x73, 0xdf, 0xb3, 0x88, 0x35, 0x28, 0xda, 0xd4, 0x0f, 0xa8, - 0x21, 0x14, 0x91, 0x3e, 0xa3, 0xa6, 0x40, 0x38, 0xcd, 0x4b, 0x2a, 0xf3, 0xbd, 0x24, 0xf5, 0x04, - 0xd6, 0xe2, 0x94, 0x34, 0x8f, 0xd8, 0xc3, 0x48, 0x9b, 0x77, 0xde, 0x96, 0x49, 0x55, 0x89, 0xfc, - 0x54, 0xe6, 0xa6, 0x96, 0xe9, 0xd4, 0x18, 0x37, 0x00, 0x1c, 0x9b, 0x3a, 0x03, 0xcd, 0xa0, 0xba, - 0x25, 0xe7, 0x97, 0x54, 0xa9, 0xc3, 0x28, 0x73, 0x55, 0x72, 0x04, 0xaa, 0x5b, 0xf8, 0x93, 0x89, - 0xd4, 0x56, 0x96, 0x28, 0xe5, 0x50, 0xbc, 0x64, 0x73, 0x6a, 0x3b, 0x82, 0xb2, 0x47, 0x99, 0xee, - 0xa9, 0x11, 0xce, 0xac, 0xc0, 0x93, 0xa8, 0xbe, 0x75, 0x66, 0x6a, 0xe8, 0x26, 0x26, 0xb6, 0xea, - 0x25, 0x87, 0xf8, 0x43, 0x88, 0x01, 0x8d, 0xcb, 0x0a, 0xf8, 0x2e, 0x54, 0x8a, 0xc0, 0x36, 0x19, - 0xd1, 0xad, 0x87, 0x50, 0x9e, 0x2e, 0x0f, 0xde, 0x84, 0xac, 0x1f, 0x10, 0x2f, 0xe0, 0x2a, 0xcc, - 0xaa, 0x62, 0x80, 0x11, 0xa4, 0xa9, 0x6d, 0xf0, 0x5d, 0x2e, 0xab, 0xb2, 0x7f, 0xb7, 0x3e, 0x86, - 0xd5, 0xa9, 0xc7, 0x9f, 0xd5, 0xb1, 0xf2, 0xbb, 0x1c, 0x6c, 0x2e, 0xd2, 0xdc, 0x42, 0xf9, 0x9f, - 0x87, 0x9c, 0x3d, 0x1e, 0x1d, 0x53, 0x4f, 0x4e, 0xf3, 0x08, 0xe1, 0x08, 0xd7, 0x20, 0x6b, 0x91, - 0x63, 0x6a, 0xc9, 0x99, 0xed, 0xd4, 0x4e, 0x79, 0xf7, 0xf6, 0x99, 0x54, 0x5d, 0x6d, 0x31, 0x17, - 0x55, 0x78, 0xe2, 0xcf, 0x20, 0x13, 0x6e, 0x71, 0x2c, 0xc2, 0xad, 0xb3, 0x45, 0x60, 0x5a, 0x54, - 0xb9, 0x1f, 0xbe, 0x04, 0x05, 0xf6, 0x57, 0xd4, 0x36, 0xc7, 0x73, 0xce, 0x33, 0x80, 0xd5, 0x15, - 0x6f, 0x41, 0x9e, 0xcb, 0xcc, 0xa0, 0xd1, 0xd1, 0x10, 0x8f, 0xd9, 0xc2, 0x18, 0x74, 0x40, 0xc6, - 0x56, 0xa0, 0xbd, 0x20, 0xd6, 0x98, 0x72, 0xc1, 0x14, 0xd4, 0x52, 0x08, 0xfe, 0x9a, 0x61, 0xf8, - 0x0a, 0x14, 0x85, 0x2a, 0x4d, 0xdb, 0xa0, 0xaf, 0xf8, 0xee, 0x93, 0x55, 0x85, 0x50, 0x9b, 0x0c, - 0x61, 0x8f, 0x7f, 0xe6, 0x3b, 0x76, 0xb4, 0xb4, 0xfc, 0x11, 0x0c, 0xe0, 0x8f, 0xff, 0x78, 0x76, - 0xe3, 0xbb, 0xbc, 0x78, 0x7a, 0xb3, 0x5a, 0xac, 0xfc, 0x45, 0x82, 0x0c, 0x7f, 0xdf, 0xd6, 0xa0, - 0xd8, 0x7f, 0xda, 0x55, 0xb4, 0x46, 0xe7, 0x68, 0xbf, 0xa5, 0xa0, 0x14, 0x2e, 0x03, 0x70, 0xe0, - 0x51, 0xab, 0x53, 0xeb, 0x23, 0x29, 0x1e, 0x37, 0xdb, 0xfd, 0x07, 0xf7, 0x50, 0x3a, 0x76, 0x38, - 0x12, 0x40, 0x26, 0x49, 0xb8, 0xbb, 0x8b, 0xb2, 0x18, 0x41, 0x49, 0x04, 0x68, 0x3e, 0x51, 0x1a, - 0x0f, 0xee, 0xa1, 0xdc, 0x34, 0x72, 0x77, 0x17, 0xad, 0xe0, 0x55, 0x28, 0x70, 0x64, 0xbf, 0xd3, - 0x69, 0xa1, 0x7c, 0x1c, 0xb3, 0xd7, 0x57, 0x9b, 0xed, 0x03, 0x54, 0x88, 0x63, 0x1e, 0xa8, 0x9d, - 0xa3, 0x2e, 0x82, 0x38, 0xc2, 0xa1, 0xd2, 0xeb, 0xd5, 0x0e, 0x14, 0x54, 0x8c, 0x19, 0xfb, 0x4f, - 0xfb, 0x4a, 0x0f, 0x95, 0xa6, 0xd2, 0xba, 0xbb, 0x8b, 0x56, 0xe3, 0x47, 0x28, 0xed, 0xa3, 0x43, - 0x54, 0xc6, 0xeb, 0xb0, 0x2a, 0x1e, 0x11, 0x25, 0xb1, 0x36, 0x03, 0x3d, 0xb8, 0x87, 0xd0, 0x24, - 0x11, 0x11, 0x65, 0x7d, 0x0a, 0x78, 0x70, 0x0f, 0xe1, 0x4a, 0x1d, 0xb2, 0x5c, 0x5d, 0x18, 0x43, - 0xb9, 0x55, 0xdb, 0x57, 0x5a, 0x5a, 0xa7, 0xdb, 0x6f, 0x76, 0xda, 0xb5, 0x16, 0x4a, 0x4d, 0x30, - 0x55, 0xf9, 0xd5, 0x51, 0x53, 0x55, 0x1a, 0x48, 0x4a, 0x62, 0x5d, 0xa5, 0xd6, 0x57, 0x1a, 0x28, - 0x5d, 0xd1, 0x61, 0x73, 0xd1, 0x3e, 0xb3, 0xf0, 0xcd, 0x48, 0x2c, 0xb1, 0xb4, 0x64, 0x89, 0x79, - 0xac, 0xb9, 0x25, 0xfe, 0x26, 0x05, 0x1b, 0x0b, 0xf6, 0xda, 0x85, 0x0f, 0xf9, 0x05, 0x64, 0x85, - 0x44, 0xc5, 0xe9, 0x73, 0x73, 0xe1, 0xa6, 0xcd, 0x05, 0x3b, 0x77, 0x02, 0x71, 0xbf, 0xe4, 0x09, - 0x9c, 0x5e, 0x72, 0x02, 0xb3, 0x10, 0x73, 0x49, 0xfe, 0x26, 0x05, 0xf2, 0xb2, 0xd8, 0x6f, 0xd9, - 0x28, 0xa4, 0xa9, 0x8d, 0xe2, 0xd3, 0xd9, 0x04, 0xae, 0x2e, 0x9f, 0xc3, 0x5c, 0x16, 0xdf, 0xa6, - 0xe0, 0xfc, 0xe2, 0x46, 0x65, 0x61, 0x0e, 0x9f, 0x41, 0x6e, 0x44, 0x83, 0x13, 0x27, 0x3a, 0xac, - 0x7f, 0xb2, 0xe0, 0x08, 0x60, 0xe6, 0xd9, 0x5a, 0x85, 0x5e, 0xc9, 0x33, 0x24, 0xbd, 0xac, 0xdb, - 0x10, 0xd9, 0xcc, 0x65, 0xfa, 0x5b, 0x09, 0xde, 0x5b, 0x18, 0x7c, 0x61, 0xa2, 0x97, 0x01, 0x4c, - 0xdb, 0x1d, 0x07, 0xe2, 0x40, 0x16, 0xfb, 0x53, 0x81, 0x23, 0xfc, 0xdd, 0x67, 0x7b, 0xcf, 0x38, - 0x88, 0xed, 0x69, 0x6e, 0x07, 0x01, 0x71, 0xc2, 0xc3, 0x49, 0xa2, 0x19, 0x9e, 0xe8, 0x07, 0x4b, - 0x66, 0x3a, 0x77, 0xd6, 0x7d, 0x04, 0x48, 0xb7, 0x4c, 0x6a, 0x07, 0x9a, 0x1f, 0x78, 0x94, 0x8c, - 0x4c, 0x7b, 0xc8, 0x37, 0xe0, 0xfc, 0x5e, 0x76, 0x40, 0x2c, 0x9f, 0xaa, 0x6b, 0xc2, 0xdc, 0x8b, - 0xac, 0xcc, 0x83, 0x9f, 0x32, 0x5e, 0xc2, 0x23, 0x37, 0xe5, 0x21, 0xcc, 0xb1, 0x47, 0xe5, 0xcf, - 0x2b, 0x50, 0x4c, 0xb4, 0x75, 0xf8, 0x2a, 0x94, 0x9e, 0x91, 0x17, 0x44, 0x8b, 0x5a, 0x75, 0x51, - 0x89, 0x22, 0xc3, 0xba, 0x61, 0xbb, 0xfe, 0x11, 0x6c, 0x72, 0x8a, 0x33, 0x0e, 0xa8, 0xa7, 0xe9, - 0x16, 0xf1, 0x7d, 0x5e, 0xb4, 0x3c, 0xa7, 0x62, 0x66, 0xeb, 0x30, 0x53, 0x3d, 0xb2, 0xe0, 0xfb, - 0xb0, 0xc1, 0x3d, 0x46, 0x63, 0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xe3, 0xc1, 0xe7, 0x1b, 0x71, - 0x9c, 0xd9, 0x3a, 0x63, 0x1c, 0x86, 0x04, 0x96, 0x91, 0x8f, 0x1b, 0x70, 0x99, 0xbb, 0x0d, 0xa9, - 0x4d, 0x3d, 0x12, 0x50, 0x8d, 0x7e, 0x35, 0x26, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0x4e, 0x88, 0x7f, - 0x22, 0x6f, 0xb2, 0x00, 0xfb, 0x92, 0x9c, 0x52, 0x2f, 0x32, 0xe2, 0x41, 0xc8, 0x53, 0x38, 0xad, - 0x66, 0x1b, 0x9f, 0x13, 0xff, 0x04, 0xef, 0xc1, 0x79, 0x1e, 0xc5, 0x0f, 0x3c, 0xd3, 0x1e, 0x6a, - 0xfa, 0x09, 0xd5, 0x9f, 0x6b, 0xe3, 0x60, 0xf0, 0x50, 0xbe, 0x94, 0x7c, 0x3e, 0xcf, 0xb0, 0xc7, - 0x39, 0x75, 0x46, 0x39, 0x0a, 0x06, 0x0f, 0x71, 0x0f, 0x4a, 0x6c, 0x31, 0x46, 0xe6, 0xd7, 0x54, - 0x1b, 0x38, 0x1e, 0x3f, 0x59, 0xca, 0x0b, 0xde, 0xec, 0x44, 0x05, 0xab, 0x9d, 0xd0, 0xe1, 0xd0, - 0x31, 0xe8, 0x5e, 0xb6, 0xd7, 0x55, 0x94, 0x86, 0x5a, 0x8c, 0xa2, 0x3c, 0x72, 0x3c, 0x26, 0xa8, - 0xa1, 0x13, 0x17, 0xb8, 0x28, 0x04, 0x35, 0x74, 0xa2, 0xf2, 0xde, 0x87, 0x0d, 0x5d, 0x17, 0x73, - 0x36, 0x75, 0x2d, 0x6c, 0xf1, 0x7d, 0x19, 0x4d, 0x15, 0x4b, 0xd7, 0x0f, 0x04, 0x21, 0xd4, 0xb8, - 0x8f, 0x3f, 0x81, 0xf7, 0x26, 0xc5, 0x4a, 0x3a, 0xae, 0xcf, 0xcd, 0x72, 0xd6, 0xf5, 0x3e, 0x6c, - 0xb8, 0xa7, 0xf3, 0x8e, 0x78, 0xea, 0x89, 0xee, 0xe9, 0xac, 0xdb, 0x75, 0xfe, 0xd9, 0xe6, 0x51, - 0x9d, 0x04, 0xd4, 0x90, 0x2f, 0x24, 0xd9, 0x09, 0x03, 0xbe, 0x03, 0x48, 0xd7, 0x35, 0x6a, 0x93, - 0x63, 0x8b, 0x6a, 0xc4, 0xa3, 0x36, 0xf1, 0xe5, 0x2b, 0x49, 0x72, 0x59, 0xd7, 0x15, 0x6e, 0xad, - 0x71, 0x23, 0xbe, 0x05, 0xeb, 0xce, 0xf1, 0x33, 0x5d, 0x28, 0x4b, 0x73, 0x3d, 0x3a, 0x30, 0x5f, - 0xc9, 0xd7, 0x78, 0x99, 0xd6, 0x98, 0x81, 0xeb, 0xaa, 0xcb, 0x61, 0x7c, 0x13, 0x90, 0xee, 0x9f, - 0x10, 0xcf, 0xe5, 0x47, 0xbb, 0xef, 0x12, 0x9d, 0xca, 0xd7, 0x05, 0x55, 0xe0, 0xed, 0x08, 0x66, - 0xca, 0xf6, 0x5f, 0x9a, 0x83, 0x20, 0x8a, 0x78, 0x43, 0x28, 0x9b, 0x63, 0x61, 0xb4, 0x27, 0xb0, - 0x39, 0xb6, 0x4d, 0x3b, 0xa0, 0x9e, 0xeb, 0x51, 0xd6, 0xc4, 0x8b, 0x37, 0x51, 0xfe, 0xcf, 0xca, - 0x92, 0x36, 0xfc, 0x28, 0xc9, 0x16, 0x02, 0x50, 0x37, 0xc6, 0xf3, 0x60, 0x65, 0x0f, 0x4a, 0x49, - 0x5d, 0xe0, 0x02, 0x08, 0x65, 0xa0, 0x14, 0x3b, 0x63, 0xeb, 0x9d, 0x06, 0x3b, 0x1d, 0xbf, 0x54, - 0x90, 0xc4, 0x4e, 0xe9, 0x56, 0xb3, 0xaf, 0x68, 0xea, 0x51, 0xbb, 0xdf, 0x3c, 0x54, 0x50, 0xfa, - 0x56, 0x21, 0xff, 0xdf, 0x15, 0xf4, 0xfa, 0xf5, 0xeb, 0xd7, 0x52, 0xe5, 0x6f, 0x12, 0x94, 0xa7, - 0x3b, 0x63, 0xfc, 0x73, 0xb8, 0x10, 0x7d, 0xc6, 0xfa, 0x34, 0xd0, 0x5e, 0x9a, 0x1e, 0x97, 0xea, - 0x88, 0x88, 0xde, 0x32, 0xae, 0xf2, 0x66, 0xc8, 0xea, 0xd1, 0xe0, 0x0b, 0xd3, 0x63, 0x42, 0x1c, - 0x91, 0x00, 0xb7, 0xe0, 0x8a, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0xb9, 0x40, - 0xd0, 0x88, 0xae, 0x53, 0xdf, 0x77, 0xc4, 0x11, 0x11, 0x47, 0x79, 0xdf, 0x76, 0x7a, 0x21, 0x79, - 0xb2, 0x77, 0xd6, 0x42, 0xea, 0x8c, 0x22, 0xd2, 0xcb, 0x14, 0x71, 0x09, 0x0a, 0x23, 0xe2, 0x6a, - 0xd4, 0x0e, 0xbc, 0x53, 0xde, 0xcf, 0xe5, 0xd5, 0xfc, 0x88, 0xb8, 0x0a, 0x1b, 0xbf, 0xbb, 0x35, - 0x48, 0xd6, 0xf1, 0x9f, 0x69, 0x28, 0x25, 0x7b, 0x3a, 0xd6, 0x22, 0xeb, 0x7c, 0xff, 0x4e, 0xf1, - 0x37, 0xfc, 0xc3, 0x37, 0x76, 0x80, 0xd5, 0x3a, 0xdb, 0xd8, 0xf7, 0x72, 0xa2, 0xd3, 0x52, 0x85, - 0x27, 0x3b, 0x54, 0xd9, 0x3b, 0x4d, 0x45, 0xff, 0x9e, 0x57, 0xc3, 0x11, 0x3e, 0x80, 0xdc, 0x33, - 0x9f, 0xc7, 0xce, 0xf1, 0xd8, 0xd7, 0xde, 0x1c, 0xfb, 0x71, 0x8f, 0x07, 0x2f, 0x3c, 0xee, 0x69, - 0xed, 0x8e, 0x7a, 0x58, 0x6b, 0xa9, 0xa1, 0x3b, 0xbe, 0x08, 0x19, 0x8b, 0x7c, 0x7d, 0x3a, 0x7d, - 0x04, 0x70, 0xe8, 0xac, 0x85, 0xbf, 0x08, 0x99, 0x97, 0x94, 0x3c, 0x9f, 0xde, 0x78, 0x39, 0xf4, - 0x0e, 0xa5, 0x7f, 0x07, 0xb2, 0xbc, 0x5e, 0x18, 0x20, 0xac, 0x18, 0x3a, 0x87, 0xf3, 0x90, 0xa9, - 0x77, 0x54, 0x26, 0x7f, 0x04, 0x25, 0x81, 0x6a, 0xdd, 0xa6, 0x52, 0x57, 0x90, 0x54, 0xb9, 0x0f, - 0x39, 0x51, 0x04, 0xf6, 0x6a, 0xc4, 0x65, 0x40, 0xe7, 0xc2, 0x61, 0x18, 0x23, 0x15, 0x59, 0x8f, - 0x0e, 0xf7, 0x15, 0x15, 0x49, 0xc9, 0xe5, 0xf5, 0xa1, 0x94, 0x6c, 0xe7, 0x7e, 0x18, 0x4d, 0xfd, - 0x35, 0x05, 0xc5, 0x44, 0x7b, 0xc6, 0x1a, 0x03, 0x62, 0x59, 0xce, 0x4b, 0x8d, 0x58, 0x26, 0xf1, - 0x43, 0x51, 0x00, 0x87, 0x6a, 0x0c, 0x39, 0xeb, 0xa2, 0xfd, 0x20, 0xc9, 0xff, 0x31, 0x05, 0x68, - 0xb6, 0xb5, 0x9b, 0x49, 0x30, 0xf5, 0xa3, 0x26, 0xf8, 0x87, 0x14, 0x94, 0xa7, 0xfb, 0xb9, 0x99, - 0xf4, 0xae, 0xfe, 0xa8, 0xe9, 0xfd, 0x4b, 0x82, 0xd5, 0xa9, 0x2e, 0xee, 0xac, 0xd9, 0x7d, 0x05, - 0xeb, 0xa6, 0x41, 0x47, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x82, 0x5a, 0x72, 0x85, - 0x6f, 0x14, 0x77, 0xde, 0xdc, 0x27, 0x56, 0x9b, 0x13, 0xbf, 0x16, 0x73, 0xdb, 0xdb, 0x68, 0x36, - 0x94, 0xc3, 0x6e, 0xa7, 0xaf, 0xb4, 0xeb, 0x4f, 0xb5, 0xa3, 0xf6, 0x2f, 0xdb, 0x9d, 0x2f, 0xda, - 0x2a, 0x32, 0x67, 0x68, 0xef, 0xf0, 0x55, 0xef, 0x02, 0x9a, 0x4d, 0x0a, 0x5f, 0x80, 0x45, 0x69, - 0xa1, 0x73, 0x78, 0x03, 0xd6, 0xda, 0x1d, 0xad, 0xd7, 0x6c, 0x28, 0x9a, 0xf2, 0xe8, 0x91, 0x52, - 0xef, 0xf7, 0xc4, 0x87, 0x73, 0xcc, 0xee, 0x4f, 0xbf, 0xd4, 0xbf, 0x4f, 0xc3, 0xc6, 0x82, 0x4c, - 0x70, 0x2d, 0xec, 0xd9, 0xc5, 0x67, 0xc4, 0xcf, 0xce, 0x92, 0x7d, 0x95, 0x75, 0x05, 0x5d, 0xe2, - 0x05, 0x61, 0x8b, 0x7f, 0x13, 0x58, 0x95, 0xec, 0xc0, 0x1c, 0x98, 0xd4, 0x0b, 0xef, 0x19, 0x44, - 0x23, 0xbf, 0x36, 0xc1, 0xc5, 0x55, 0xc3, 0x4f, 0x01, 0xbb, 0x8e, 0x6f, 0x06, 0xe6, 0x0b, 0xaa, - 0x99, 0x76, 0x74, 0x29, 0xc1, 0x1a, 0xfb, 0x8c, 0x8a, 0x22, 0x4b, 0xd3, 0x0e, 0x62, 0xb6, 0x4d, - 0x87, 0x64, 0x86, 0xcd, 0x36, 0xf0, 0xb4, 0x8a, 0x22, 0x4b, 0xcc, 0xbe, 0x0a, 0x25, 0xc3, 0x19, - 0xb3, 0x36, 0x49, 0xf0, 0xd8, 0x79, 0x91, 0x52, 0x8b, 0x02, 0x8b, 0x29, 0x61, 0x1f, 0x3b, 0xb9, - 0x0d, 0x29, 0xa9, 0x45, 0x81, 0x09, 0xca, 0x0d, 0x58, 0x23, 0xc3, 0xa1, 0xc7, 0x82, 0x47, 0x81, - 0x44, 0x67, 0x5e, 0x8e, 0x61, 0x4e, 0xdc, 0x7a, 0x0c, 0xf9, 0xa8, 0x0e, 0xec, 0x48, 0x66, 0x95, - 0xd0, 0x5c, 0x71, 0x27, 0x25, 0xed, 0x14, 0xd4, 0xbc, 0x1d, 0x19, 0xaf, 0x42, 0xc9, 0xf4, 0xb5, - 0xc9, 0xe5, 0xa8, 0xb4, 0x2d, 0xed, 0xe4, 0xd5, 0xa2, 0xe9, 0xc7, 0xb7, 0x61, 0x95, 0x6f, 0x25, - 0x28, 0x4f, 0x5f, 0xee, 0xe2, 0x06, 0xe4, 0x2d, 0x47, 0x27, 0x5c, 0x5a, 0xe2, 0x97, 0x85, 0x9d, - 0xb7, 0xdc, 0x07, 0x57, 0x5b, 0x21, 0x5f, 0x8d, 0x3d, 0xb7, 0xfe, 0x9e, 0x82, 0x7c, 0x04, 0xe3, - 0xf3, 0x90, 0x71, 0x49, 0x70, 0xc2, 0xc3, 0x65, 0xf7, 0x25, 0x94, 0x52, 0xf9, 0x98, 0xe1, 0xbe, - 0x4b, 0x6c, 0x2e, 0x81, 0x10, 0x67, 0x63, 0xb6, 0xae, 0x16, 0x25, 0x06, 0x6f, 0xfb, 0x9d, 0xd1, - 0x88, 0xda, 0x81, 0x1f, 0xad, 0x6b, 0x88, 0xd7, 0x43, 0x18, 0xdf, 0x86, 0xf5, 0xc0, 0x23, 0xa6, - 0x35, 0xc5, 0xcd, 0x70, 0x2e, 0x8a, 0x0c, 0x31, 0x79, 0x0f, 0x2e, 0x46, 0x71, 0x0d, 0x1a, 0x10, - 0xfd, 0x84, 0x1a, 0x13, 0xa7, 0x1c, 0xbf, 0x39, 0xbc, 0x10, 0x12, 0x1a, 0xa1, 0x3d, 0xf2, 0xad, - 0xfc, 0x23, 0x05, 0xeb, 0xd1, 0x87, 0x8a, 0x11, 0x17, 0xeb, 0x10, 0x80, 0xd8, 0xb6, 0x13, 0x24, - 0xcb, 0x35, 0x2f, 0xe5, 0x39, 0xbf, 0x6a, 0x2d, 0x76, 0x52, 0x13, 0x01, 0xb6, 0x46, 0x00, 0x13, - 0xcb, 0xd2, 0xb2, 0x5d, 0x81, 0x62, 0x78, 0x73, 0xcf, 0x7f, 0xfe, 0x11, 0x9f, 0xb6, 0x20, 0x20, - 0xf6, 0x45, 0x83, 0x37, 0x21, 0x7b, 0x4c, 0x87, 0xa6, 0x1d, 0xde, 0x27, 0x8a, 0x41, 0x74, 0x4b, - 0x99, 0x89, 0x6f, 0x29, 0xf7, 0x9f, 0xc0, 0x86, 0xee, 0x8c, 0x66, 0xd3, 0xdd, 0x47, 0x33, 0x9f, - 0xd7, 0xfe, 0xe7, 0xa9, 0x2f, 0x61, 0xd2, 0x62, 0x7e, 0x23, 0xa5, 0x0f, 0xba, 0xfb, 0x7f, 0x92, - 0xb6, 0x0e, 0x84, 0x5f, 0x37, 0x9a, 0xa6, 0x4a, 0x07, 0x16, 0xd5, 0x59, 0xea, 0xff, 0x0f, 0x00, - 0x00, 0xff, 0xff, 0xa0, 0xbf, 0x63, 0x15, 0xd3, 0x1a, 0x00, 0x00, + // 2451 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0xe5, 0xf1, 0xd8, 0x9b, 0x30, 0xce, 0x66, 0xe3, 0x28, 0xc9, + 0xc6, 0x49, 0x5a, 0x65, 0xe1, 0x7c, 0xae, 0xb7, 0xd8, 0x56, 0x96, 0x18, 0xaf, 0x52, 0x59, 0x52, + 0x29, 0xb9, 0x9b, 0xec, 0x85, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, 0x83, + 0x1e, 0x02, 0xf4, 0xd4, 0xff, 0xa0, 0x28, 0x8a, 0x1e, 0xf6, 0xb2, 0x40, 0xaf, 0x05, 0x0a, 0xb4, + 0xf7, 0x5e, 0x0b, 0xf4, 0xde, 0x43, 0x0f, 0x05, 0xda, 0x3f, 0xa3, 0x98, 0x19, 0x92, 0xa2, 0xbe, + 0x12, 0x77, 0x81, 0x64, 0x4f, 0xf6, 0xfc, 0xde, 0xef, 0xbd, 0x79, 0xf3, 0xf8, 0x66, 0xde, 0x9b, + 0x11, 0x20, 0x9d, 0x7a, 0x9a, 0x6b, 0x38, 0xbe, 0xed, 0x56, 0x1c, 0xd7, 0xf6, 0x6d, 0xbc, 0x36, + 0xb0, 0xed, 0x81, 0x49, 0xc5, 0xe8, 0x78, 0xdc, 0x2f, 0x1f, 0xc2, 0xfa, 0x43, 0xc3, 0xa4, 0xf5, + 0x88, 0xd8, 0xa5, 0x3e, 0x7e, 0x00, 0xe9, 0xbe, 0x61, 0x52, 0x29, 0xb1, 0x9d, 0xda, 0x29, 0xec, + 0x5e, 0xa9, 0xcc, 0x28, 0x55, 0xa6, 0x35, 0x3a, 0x0c, 0x56, 0xb8, 0x46, 0xf9, 0xdf, 0x69, 0xd8, + 0x58, 0x20, 0xc5, 0x18, 0xd2, 0x16, 0x19, 0x31, 0x8b, 0x89, 0x9d, 0xbc, 0xc2, 0xff, 0xc7, 0x12, + 0xac, 0x38, 0x44, 0x7b, 0x46, 0x06, 0x54, 0x4a, 0x72, 0x38, 0x1c, 0xe2, 0x8f, 0x01, 0x74, 0xea, + 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x22, 0xa5, 0xb6, 0x53, 0x3b, 0x79, 0x25, 0x86, 0xe0, 0x9b, 0xb0, + 0xee, 0x8c, 0x8f, 0x4d, 0x43, 0x53, 0x63, 0x34, 0xd8, 0x4e, 0xed, 0x64, 0x14, 0x24, 0x04, 0xf5, + 0x09, 0xf9, 0x1a, 0xac, 0xbd, 0xa0, 0xe4, 0x59, 0x9c, 0x5a, 0xe0, 0xd4, 0x12, 0x83, 0x63, 0xc4, + 0x1a, 0x14, 0x47, 0xd4, 0xf3, 0xc8, 0x80, 0xaa, 0xfe, 0x89, 0x43, 0xa5, 0x34, 0x5f, 0xfd, 0xf6, + 0xdc, 0xea, 0x67, 0x57, 0x5e, 0x08, 0xb4, 0x7a, 0x27, 0x0e, 0xc5, 0x55, 0xc8, 0x53, 0x6b, 0x3c, + 0x12, 0x16, 0x32, 0x4b, 0xe2, 0x27, 0x5b, 0xe3, 0xd1, 0xac, 0x95, 0x1c, 0x53, 0x0b, 0x4c, 0xac, + 0x78, 0xd4, 0x7d, 0x6e, 0x68, 0x54, 0xca, 0x72, 0x03, 0xd7, 0xe6, 0x0c, 0x74, 0x85, 0x7c, 0xd6, + 0x46, 0xa8, 0x87, 0x6b, 0x90, 0xa7, 0x2f, 0x7d, 0x6a, 0x79, 0x86, 0x6d, 0x49, 0x2b, 0xdc, 0xc8, + 0xd5, 0x05, 0x5f, 0x91, 0x9a, 0xfa, 0xac, 0x89, 0x89, 0x1e, 0xbe, 0x07, 0x2b, 0xb6, 0xe3, 0x1b, + 0xb6, 0xe5, 0x49, 0xb9, 0xed, 0xc4, 0x4e, 0x61, 0xf7, 0xa3, 0x85, 0x89, 0xd0, 0x16, 0x1c, 0x25, + 0x24, 0xe3, 0x06, 0x20, 0xcf, 0x1e, 0xbb, 0x1a, 0x55, 0x35, 0x5b, 0xa7, 0xaa, 0x61, 0xf5, 0x6d, + 0x29, 0xcf, 0x0d, 0x5c, 0x9c, 0x5f, 0x08, 0x27, 0xd6, 0x6c, 0x9d, 0x36, 0xac, 0xbe, 0xad, 0x94, + 0xbc, 0xa9, 0x31, 0x3e, 0x03, 0x59, 0xef, 0xc4, 0xf2, 0xc9, 0x4b, 0xa9, 0xc8, 0x33, 0x24, 0x18, + 0x95, 0xff, 0x92, 0x85, 0xb5, 0xd3, 0xa4, 0xd8, 0xe7, 0x90, 0xe9, 0xb3, 0x55, 0x4a, 0xc9, 0xff, + 0x27, 0x06, 0x42, 0x67, 0x3a, 0x88, 0xd9, 0xef, 0x19, 0xc4, 0x2a, 0x14, 0x2c, 0xea, 0xf9, 0x54, + 0x17, 0x19, 0x91, 0x3a, 0x65, 0x4e, 0x81, 0x50, 0x9a, 0x4f, 0xa9, 0xf4, 0xf7, 0x4a, 0xa9, 0xc7, + 0xb0, 0x16, 0xb9, 0xa4, 0xba, 0xc4, 0x1a, 0x84, 0xb9, 0x79, 0xeb, 0x6d, 0x9e, 0x54, 0xe4, 0x50, + 0x4f, 0x61, 0x6a, 0x4a, 0x89, 0x4e, 0x8d, 0x71, 0x1d, 0xc0, 0xb6, 0xa8, 0xdd, 0x57, 0x75, 0xaa, + 0x99, 0x52, 0x6e, 0x49, 0x94, 0xda, 0x8c, 0x32, 0x17, 0x25, 0x5b, 0xa0, 0x9a, 0x89, 0x3f, 0x9b, + 0xa4, 0xda, 0xca, 0x92, 0x4c, 0x39, 0x14, 0x9b, 0x6c, 0x2e, 0xdb, 0x8e, 0xa0, 0xe4, 0x52, 0x96, + 0xf7, 0x54, 0x0f, 0x56, 0x96, 0xe7, 0x4e, 0x54, 0xde, 0xba, 0x32, 0x25, 0x50, 0x13, 0x0b, 0x5b, + 0x75, 0xe3, 0x43, 0x7c, 0x19, 0x22, 0x40, 0xe5, 0x69, 0x05, 0xfc, 0x14, 0x2a, 0x86, 0x60, 0x8b, + 0x8c, 0xe8, 0xd6, 0x2b, 0x28, 0x4d, 0x87, 0x07, 0x6f, 0x42, 0xc6, 0xf3, 0x89, 0xeb, 0xf3, 0x2c, + 0xcc, 0x28, 0x62, 0x80, 0x11, 0xa4, 0xa8, 0xa5, 0xf3, 0x53, 0x2e, 0xa3, 0xb0, 0x7f, 0xf1, 0xcf, + 0x26, 0x0b, 0x4e, 0xf1, 0x05, 0x7f, 0x32, 0xff, 0x45, 0xa7, 0x2c, 0xcf, 0xae, 0x7b, 0xeb, 0x3e, + 0xac, 0x4e, 0x2d, 0xe0, 0xb4, 0x53, 0x97, 0x7f, 0x05, 0x1f, 0x2e, 0x34, 0x8d, 0x1f, 0xc3, 0xe6, + 0xd8, 0x32, 0x2c, 0x9f, 0xba, 0x8e, 0x4b, 0x59, 0xc6, 0x8a, 0xa9, 0xa4, 0xff, 0xac, 0x2c, 0xc9, + 0xb9, 0xa3, 0x38, 0x5b, 0x58, 0x51, 0x36, 0xc6, 0xf3, 0xe0, 0x8d, 0x7c, 0xee, 0xbf, 0x2b, 0xe8, + 0xf5, 0xeb, 0xd7, 0xaf, 0x93, 0xe5, 0xdf, 0x66, 0x61, 0x73, 0xd1, 0x9e, 0x59, 0xb8, 0x7d, 0xcf, + 0x40, 0xd6, 0x1a, 0x8f, 0x8e, 0xa9, 0xcb, 0x83, 0x94, 0x51, 0x82, 0x11, 0xae, 0x42, 0xc6, 0x24, + 0xc7, 0xd4, 0x94, 0xd2, 0xdb, 0x89, 0x9d, 0xd2, 0xee, 0xcd, 0x53, 0xed, 0xca, 0x4a, 0x93, 0xa9, + 0x28, 0x42, 0x13, 0x7f, 0x01, 0xe9, 0xe0, 0x88, 0x66, 0x16, 0x6e, 0x9c, 0xce, 0x02, 0xdb, 0x4b, + 0x0a, 0xd7, 0xc3, 0xe7, 0x21, 0xcf, 0xfe, 0x8a, 0xdc, 0xc8, 0x72, 0x9f, 0x73, 0x0c, 0x60, 0x79, + 0x81, 0xb7, 0x20, 0xc7, 0xb7, 0x89, 0x4e, 0xc3, 0xd2, 0x16, 0x8d, 0x59, 0x62, 0xe9, 0xb4, 0x4f, + 0xc6, 0xa6, 0xaf, 0x3e, 0x27, 0xe6, 0x98, 0xf2, 0x84, 0xcf, 0x2b, 0xc5, 0x00, 0xfc, 0x25, 0xc3, + 0xf0, 0x45, 0x28, 0x88, 0x5d, 0x65, 0x58, 0x3a, 0x7d, 0xc9, 0x4f, 0xcf, 0x8c, 0x22, 0x36, 0x5a, + 0x83, 0x21, 0x6c, 0xfa, 0xa7, 0x9e, 0x6d, 0x85, 0xa9, 0xc9, 0xa7, 0x60, 0x00, 0x9f, 0xfe, 0xfe, + 0xec, 0xc1, 0x7d, 0x61, 0xf1, 0xf2, 0x66, 0x73, 0xaa, 0xfc, 0xe7, 0x24, 0xa4, 0xf9, 0x79, 0xb1, + 0x06, 0x85, 0xde, 0x93, 0x8e, 0xac, 0xd6, 0xdb, 0x47, 0xfb, 0x4d, 0x19, 0x25, 0x70, 0x09, 0x80, + 0x03, 0x0f, 0x9b, 0xed, 0x6a, 0x0f, 0x25, 0xa3, 0x71, 0xa3, 0xd5, 0xbb, 0x77, 0x07, 0xa5, 0x22, + 0x85, 0x23, 0x01, 0xa4, 0xe3, 0x84, 0xdb, 0xbb, 0x28, 0x83, 0x11, 0x14, 0x85, 0x81, 0xc6, 0x63, + 0xb9, 0x7e, 0xef, 0x0e, 0xca, 0x4e, 0x23, 0xb7, 0x77, 0xd1, 0x0a, 0x5e, 0x85, 0x3c, 0x47, 0xf6, + 0xdb, 0xed, 0x26, 0xca, 0x45, 0x36, 0xbb, 0x3d, 0xa5, 0xd1, 0x3a, 0x40, 0xf9, 0xc8, 0xe6, 0x81, + 0xd2, 0x3e, 0xea, 0x20, 0x88, 0x2c, 0x1c, 0xca, 0xdd, 0x6e, 0xf5, 0x40, 0x46, 0x85, 0x88, 0xb1, + 0xff, 0xa4, 0x27, 0x77, 0x51, 0x71, 0xca, 0xad, 0xdb, 0xbb, 0x68, 0x35, 0x9a, 0x42, 0x6e, 0x1d, + 0x1d, 0xa2, 0x12, 0x5e, 0x87, 0x55, 0x31, 0x45, 0xe8, 0xc4, 0xda, 0x0c, 0x74, 0xef, 0x0e, 0x42, + 0x13, 0x47, 0x84, 0x95, 0xf5, 0x29, 0xe0, 0xde, 0x1d, 0x84, 0xcb, 0x35, 0xc8, 0xf0, 0xec, 0xc2, + 0x18, 0x4a, 0xcd, 0xea, 0xbe, 0xdc, 0x54, 0xdb, 0x9d, 0x5e, 0xa3, 0xdd, 0xaa, 0x36, 0x51, 0x62, + 0x82, 0x29, 0xf2, 0x2f, 0x8e, 0x1a, 0x8a, 0x5c, 0x47, 0xc9, 0x38, 0xd6, 0x91, 0xab, 0x3d, 0xb9, + 0x8e, 0x52, 0x65, 0x0d, 0x36, 0x17, 0x9d, 0x93, 0x0b, 0x77, 0x46, 0xec, 0x13, 0x27, 0x97, 0x7c, + 0x62, 0x6e, 0x6b, 0xee, 0x13, 0x7f, 0x9b, 0x80, 0x8d, 0x05, 0xb5, 0x62, 0xe1, 0x24, 0x3f, 0x85, + 0x8c, 0x48, 0x51, 0x51, 0x3d, 0xaf, 0x2f, 0x2c, 0x3a, 0x3c, 0x61, 0xe7, 0x2a, 0x28, 0xd7, 0x8b, + 0x77, 0x10, 0xa9, 0x25, 0x1d, 0x04, 0x33, 0x31, 0xe7, 0xe4, 0xaf, 0x13, 0x20, 0x2d, 0xb3, 0xfd, + 0x96, 0x83, 0x22, 0x39, 0x75, 0x50, 0x7c, 0x3e, 0xeb, 0xc0, 0xa5, 0xe5, 0x6b, 0x98, 0xf3, 0xe2, + 0xbb, 0x04, 0x9c, 0x59, 0xdc, 0x68, 0x2d, 0xf4, 0xe1, 0x0b, 0xc8, 0x8e, 0xa8, 0x3f, 0xb4, 0xc3, + 0x66, 0xe3, 0x93, 0x05, 0x25, 0x8c, 0x89, 0x67, 0x63, 0x15, 0x68, 0xc5, 0x6b, 0x60, 0x6a, 0x59, + 0xb7, 0x24, 0xbc, 0x99, 0xf3, 0xf4, 0x37, 0x49, 0xf8, 0x70, 0xa1, 0xf1, 0x85, 0x8e, 0x5e, 0x00, + 0x30, 0x2c, 0x67, 0xec, 0x8b, 0x86, 0x42, 0x9c, 0x4f, 0x79, 0x8e, 0xf0, 0xbd, 0xcf, 0xce, 0x9e, + 0xb1, 0x1f, 0xc9, 0x53, 0x5c, 0x0e, 0x02, 0xe2, 0x84, 0x07, 0x13, 0x47, 0xd3, 0xdc, 0xd1, 0x8f, + 0x97, 0xac, 0x74, 0xae, 0x56, 0x7f, 0x0a, 0x48, 0x33, 0x0d, 0x6a, 0xf9, 0xaa, 0xe7, 0xbb, 0x94, + 0x8c, 0x0c, 0x6b, 0xc0, 0x0f, 0xe0, 0xdc, 0x5e, 0xa6, 0x4f, 0x4c, 0x8f, 0x2a, 0x6b, 0x42, 0xdc, + 0x0d, 0xa5, 0x4c, 0x83, 0xd7, 0x38, 0x37, 0xa6, 0x91, 0x9d, 0xd2, 0x10, 0xe2, 0x48, 0xa3, 0xfc, + 0xa7, 0x1c, 0x14, 0x62, 0x6d, 0x29, 0xbe, 0x04, 0xc5, 0xa7, 0xe4, 0x39, 0x51, 0xc3, 0xab, 0x86, + 0x88, 0x44, 0x81, 0x61, 0x9d, 0xe0, 0xba, 0xf1, 0x29, 0x6c, 0x72, 0x8a, 0x3d, 0xf6, 0xa9, 0xab, + 0x6a, 0x26, 0xf1, 0x3c, 0x1e, 0xb4, 0x1c, 0xa7, 0x62, 0x26, 0x6b, 0x33, 0x51, 0x2d, 0x94, 0xe0, + 0xbb, 0xb0, 0xc1, 0x35, 0x46, 0x63, 0xd3, 0x37, 0x1c, 0x93, 0xaa, 0xec, 0xf2, 0xe3, 0xf1, 0x83, + 0x38, 0xf2, 0x6c, 0x9d, 0x31, 0x0e, 0x03, 0x02, 0xf3, 0xc8, 0xc3, 0x75, 0xb8, 0xc0, 0xd5, 0x06, + 0xd4, 0xa2, 0x2e, 0xf1, 0xa9, 0x4a, 0xbf, 0x19, 0x13, 0xd3, 0x53, 0x89, 0xa5, 0xab, 0x43, 0xe2, + 0x0d, 0xa5, 0x4d, 0x66, 0x60, 0x3f, 0x29, 0x25, 0x94, 0x73, 0x8c, 0x78, 0x10, 0xf0, 0x64, 0x4e, + 0xab, 0x5a, 0xfa, 0x97, 0xc4, 0x1b, 0xe2, 0x3d, 0x38, 0xc3, 0xad, 0x78, 0xbe, 0x6b, 0x58, 0x03, + 0x55, 0x1b, 0x52, 0xed, 0x99, 0x3a, 0xf6, 0xfb, 0x0f, 0xa4, 0xf3, 0xf1, 0xf9, 0xb9, 0x87, 0x5d, + 0xce, 0xa9, 0x31, 0xca, 0x91, 0xdf, 0x7f, 0x80, 0xbb, 0x50, 0x64, 0x1f, 0x63, 0x64, 0xbc, 0xa2, + 0x6a, 0xdf, 0x76, 0x79, 0x65, 0x29, 0x2d, 0xd8, 0xd9, 0xb1, 0x08, 0x56, 0xda, 0x81, 0xc2, 0xa1, + 0xad, 0xd3, 0xbd, 0x4c, 0xb7, 0x23, 0xcb, 0x75, 0xa5, 0x10, 0x5a, 0x79, 0x68, 0xbb, 0x2c, 0xa1, + 0x06, 0x76, 0x14, 0xe0, 0x82, 0x48, 0xa8, 0x81, 0x1d, 0x86, 0xf7, 0x2e, 0x6c, 0x68, 0x9a, 0x58, + 0xb3, 0xa1, 0xa9, 0xc1, 0x15, 0xc5, 0x93, 0xd0, 0x54, 0xb0, 0x34, 0xed, 0x40, 0x10, 0x82, 0x1c, + 0xf7, 0xf0, 0x67, 0xf0, 0xe1, 0x24, 0x58, 0x71, 0xc5, 0xf5, 0xb9, 0x55, 0xce, 0xaa, 0xde, 0x85, + 0x0d, 0xe7, 0x64, 0x5e, 0x11, 0x4f, 0xcd, 0xe8, 0x9c, 0xcc, 0xaa, 0xdd, 0x87, 0x4d, 0x67, 0xe8, + 0xcc, 0xeb, 0x6d, 0xc4, 0xf5, 0xb0, 0x33, 0x74, 0x66, 0x15, 0xaf, 0xf2, 0xfb, 0xaa, 0x4b, 0x35, + 0xe2, 0x53, 0x5d, 0x3a, 0x1b, 0xa7, 0xc7, 0x04, 0xf8, 0x16, 0x20, 0x4d, 0x53, 0xa9, 0x45, 0x8e, + 0x4d, 0xaa, 0x12, 0x97, 0x5a, 0xc4, 0x93, 0x2e, 0xc6, 0xc9, 0x25, 0x4d, 0x93, 0xb9, 0xb4, 0xca, + 0x85, 0xf8, 0x06, 0xac, 0xdb, 0xc7, 0x4f, 0x35, 0x91, 0x92, 0xaa, 0xe3, 0xd2, 0xbe, 0xf1, 0x52, + 0xba, 0xc2, 0xe3, 0xbb, 0xc6, 0x04, 0x3c, 0x21, 0x3b, 0x1c, 0xc6, 0xd7, 0x01, 0x69, 0xde, 0x90, + 0xb8, 0x0e, 0xef, 0x09, 0x3c, 0x87, 0x68, 0x54, 0xba, 0x2a, 0xa8, 0x02, 0x6f, 0x85, 0x30, 0xdb, + 0x12, 0xde, 0x0b, 0xa3, 0xef, 0x87, 0x16, 0xaf, 0x89, 0x2d, 0xc1, 0xb1, 0xc0, 0xda, 0x0e, 0x20, + 0x16, 0x8a, 0xa9, 0x89, 0x77, 0x38, 0xad, 0xe4, 0x0c, 0x9d, 0xf8, 0xbc, 0x97, 0x61, 0x95, 0x31, + 0x27, 0x93, 0x5e, 0x17, 0xfd, 0x8c, 0x33, 0x8c, 0xcd, 0xf8, 0xce, 0x5a, 0xcb, 0xf2, 0x1e, 0x14, + 0xe3, 0xf9, 0x89, 0xf3, 0x20, 0x32, 0x14, 0x25, 0x58, 0xad, 0xaf, 0xb5, 0xeb, 0xac, 0x4a, 0x7f, + 0x2d, 0xa3, 0x24, 0xeb, 0x16, 0x9a, 0x8d, 0x9e, 0xac, 0x2a, 0x47, 0xad, 0x5e, 0xe3, 0x50, 0x46, + 0xa9, 0x78, 0x5b, 0xfa, 0xb7, 0x24, 0x94, 0xa6, 0x6f, 0x18, 0xf8, 0x27, 0x70, 0x36, 0x7c, 0x0e, + 0xf0, 0xa8, 0xaf, 0xbe, 0x30, 0x5c, 0xbe, 0x65, 0x46, 0x44, 0x74, 0xd8, 0xd1, 0x47, 0xdb, 0x0c, + 0x58, 0x5d, 0xea, 0x7f, 0x65, 0xb8, 0x6c, 0x43, 0x8c, 0x88, 0x8f, 0x9b, 0x70, 0xd1, 0xb2, 0x55, + 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3c, 0xc4, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xa2, + 0x54, 0x45, 0x56, 0x3e, 0xb2, 0xec, 0x6e, 0x40, 0x9e, 0x9c, 0xe1, 0xd5, 0x80, 0x3a, 0x93, 0x60, + 0xa9, 0x65, 0x09, 0x76, 0x1e, 0xf2, 0x23, 0xe2, 0xa8, 0xd4, 0xf2, 0xdd, 0x13, 0xde, 0x57, 0xe6, + 0x94, 0xdc, 0x88, 0x38, 0x32, 0x1b, 0xbf, 0x9f, 0xf6, 0xfe, 0x9f, 0x29, 0x28, 0xc6, 0x7b, 0x4b, + 0xd6, 0xaa, 0x6b, 0xbc, 0x8e, 0x24, 0xf8, 0x49, 0x73, 0xf9, 0x8d, 0x9d, 0x68, 0xa5, 0xc6, 0x0a, + 0xcc, 0x5e, 0x56, 0x74, 0x7c, 0x8a, 0xd0, 0x64, 0xc5, 0x9d, 0x9d, 0x2d, 0x54, 0xdc, 0x62, 0x72, + 0x4a, 0x30, 0xc2, 0x07, 0x90, 0x7d, 0xea, 0x71, 0xdb, 0x59, 0x6e, 0xfb, 0xca, 0x9b, 0x6d, 0x3f, + 0xea, 0x72, 0xe3, 0xf9, 0x47, 0x5d, 0xb5, 0xd5, 0x56, 0x0e, 0xab, 0x4d, 0x25, 0x50, 0xc7, 0xe7, + 0x20, 0x6d, 0x92, 0x57, 0x27, 0xd3, 0xa5, 0x88, 0x43, 0xa7, 0x0d, 0xfc, 0x39, 0x48, 0xbf, 0xa0, + 0xe4, 0xd9, 0x74, 0x01, 0xe0, 0xd0, 0x3b, 0x4c, 0xfd, 0x5b, 0x90, 0xe1, 0xf1, 0xc2, 0x00, 0x41, + 0xc4, 0xd0, 0x07, 0x38, 0x07, 0xe9, 0x5a, 0x5b, 0x61, 0xe9, 0x8f, 0xa0, 0x28, 0x50, 0xb5, 0xd3, + 0x90, 0x6b, 0x32, 0x4a, 0x96, 0xef, 0x42, 0x56, 0x04, 0x81, 0x6d, 0x8d, 0x28, 0x0c, 0xe8, 0x83, + 0x60, 0x18, 0xd8, 0x48, 0x84, 0xd2, 0xa3, 0xc3, 0x7d, 0x59, 0x41, 0xc9, 0xf8, 0xe7, 0xf5, 0xa0, + 0x18, 0x6f, 0x2b, 0xdf, 0x4f, 0x4e, 0xfd, 0x35, 0x01, 0x85, 0x58, 0x9b, 0xc8, 0x1a, 0x14, 0x62, + 0x9a, 0xf6, 0x0b, 0x95, 0x98, 0x06, 0xf1, 0x82, 0xa4, 0x00, 0x0e, 0x55, 0x19, 0x72, 0xda, 0x8f, + 0xf6, 0x5e, 0x9c, 0xff, 0x43, 0x02, 0xd0, 0x6c, 0x8b, 0x39, 0xe3, 0x60, 0xe2, 0x07, 0x75, 0xf0, + 0xf7, 0x09, 0x28, 0x4d, 0xf7, 0x95, 0x33, 0xee, 0x5d, 0xfa, 0x41, 0xdd, 0xfb, 0x57, 0x12, 0x56, + 0xa7, 0xba, 0xc9, 0xd3, 0x7a, 0xf7, 0x0d, 0xac, 0x1b, 0x3a, 0x1d, 0x39, 0xb6, 0x4f, 0x2d, 0xed, + 0x44, 0x35, 0xe9, 0x73, 0x6a, 0x4a, 0x65, 0x7e, 0x50, 0xdc, 0x7a, 0x73, 0xbf, 0x5a, 0x69, 0x4c, + 0xf4, 0x9a, 0x4c, 0x6d, 0x6f, 0xa3, 0x51, 0x97, 0x0f, 0x3b, 0xed, 0x9e, 0xdc, 0xaa, 0x3d, 0x51, + 0x8f, 0x5a, 0x3f, 0x6f, 0xb5, 0xbf, 0x6a, 0x29, 0xc8, 0x98, 0xa1, 0xbd, 0xc3, 0xad, 0xde, 0x01, + 0x34, 0xeb, 0x14, 0x3e, 0x0b, 0x8b, 0xdc, 0x42, 0x1f, 0xe0, 0x0d, 0x58, 0x6b, 0xb5, 0xd5, 0x6e, + 0xa3, 0x2e, 0xab, 0xf2, 0xc3, 0x87, 0x72, 0xad, 0xd7, 0x15, 0x17, 0xf8, 0x88, 0xdd, 0x9b, 0xde, + 0xd4, 0xbf, 0x4b, 0xc1, 0xc6, 0x02, 0x4f, 0x70, 0x35, 0xb8, 0x3b, 0x88, 0xeb, 0xcc, 0x8f, 0x4f, + 0xe3, 0x7d, 0x85, 0x95, 0xfc, 0x0e, 0x71, 0xfd, 0xe0, 0xaa, 0x71, 0x1d, 0x58, 0x94, 0x2c, 0xdf, + 0xe8, 0x1b, 0xd4, 0x0d, 0xde, 0x3b, 0xc4, 0x85, 0x62, 0x6d, 0x82, 0x8b, 0x27, 0x8f, 0x1f, 0x01, + 0x76, 0x6c, 0xcf, 0xf0, 0x8d, 0xe7, 0x54, 0x35, 0xac, 0xf0, 0x71, 0x84, 0x5d, 0x30, 0xd2, 0x0a, + 0x0a, 0x25, 0x0d, 0xcb, 0x8f, 0xd8, 0x16, 0x1d, 0x90, 0x19, 0x36, 0x3b, 0xc0, 0x53, 0x0a, 0x0a, + 0x25, 0x11, 0xfb, 0x12, 0x14, 0x75, 0x7b, 0xcc, 0xba, 0x2e, 0xc1, 0x63, 0xf5, 0x22, 0xa1, 0x14, + 0x04, 0x16, 0x51, 0x82, 0x7e, 0x7a, 0xf2, 0x2a, 0x53, 0x54, 0x0a, 0x02, 0x13, 0x94, 0x6b, 0xb0, + 0x46, 0x06, 0x03, 0x97, 0x19, 0x0f, 0x0d, 0x89, 0x1b, 0x42, 0x29, 0x82, 0x39, 0x71, 0xeb, 0x11, + 0xe4, 0xc2, 0x38, 0xb0, 0x92, 0xcc, 0x22, 0xa1, 0x3a, 0xe2, 0x65, 0x2e, 0xb9, 0x93, 0x57, 0x72, + 0x56, 0x28, 0xbc, 0x04, 0x45, 0xc3, 0x53, 0x27, 0x8f, 0xcc, 0xc9, 0xed, 0xe4, 0x4e, 0x4e, 0x29, + 0x18, 0x5e, 0xf4, 0x40, 0x57, 0xfe, 0x2e, 0x09, 0xa5, 0xe9, 0x47, 0x72, 0x5c, 0x87, 0x9c, 0x69, + 0x6b, 0x84, 0xa7, 0x96, 0xf8, 0x85, 0x66, 0xe7, 0x2d, 0xef, 0xea, 0x95, 0x66, 0xc0, 0x57, 0x22, + 0xcd, 0xad, 0xbf, 0x27, 0x20, 0x17, 0xc2, 0xf8, 0x0c, 0xa4, 0x1d, 0xe2, 0x0f, 0xb9, 0xb9, 0xcc, + 0x7e, 0x12, 0x25, 0x14, 0x3e, 0x66, 0xb8, 0xe7, 0x10, 0x8b, 0xa7, 0x40, 0x80, 0xb3, 0x31, 0xfb, + 0xae, 0x26, 0x25, 0x3a, 0xbf, 0x7e, 0xd8, 0xa3, 0x11, 0xb5, 0x7c, 0x2f, 0xfc, 0xae, 0x01, 0x5e, + 0x0b, 0x60, 0x7c, 0x13, 0xd6, 0x7d, 0x97, 0x18, 0xe6, 0x14, 0x37, 0xcd, 0xb9, 0x28, 0x14, 0x44, + 0xe4, 0x3d, 0x38, 0x17, 0xda, 0xd5, 0xa9, 0x4f, 0xb4, 0x21, 0xd5, 0x27, 0x4a, 0x59, 0xfe, 0x02, + 0x7b, 0x36, 0x20, 0xd4, 0x03, 0x79, 0xa8, 0x5b, 0xfe, 0x47, 0x02, 0xd6, 0xc3, 0x0b, 0x93, 0x1e, + 0x05, 0xeb, 0x10, 0x80, 0x58, 0x96, 0xed, 0xc7, 0xc3, 0x35, 0x9f, 0xca, 0x73, 0x7a, 0x95, 0x6a, + 0xa4, 0xa4, 0xc4, 0x0c, 0x6c, 0x8d, 0x00, 0x26, 0x92, 0xa5, 0x61, 0xbb, 0x08, 0x85, 0xe0, 0x17, + 0x10, 0xfe, 0x33, 0x9a, 0xb8, 0x62, 0x83, 0x80, 0xd8, 0xcd, 0x0a, 0x6f, 0x42, 0xe6, 0x98, 0x0e, + 0x0c, 0x2b, 0x78, 0xd7, 0x14, 0x83, 0xf0, 0xad, 0x36, 0x1d, 0xbd, 0xd5, 0xee, 0x3f, 0x86, 0x0d, + 0xcd, 0x1e, 0xcd, 0xba, 0xbb, 0x8f, 0x66, 0xae, 0xf9, 0xde, 0x97, 0x89, 0xaf, 0x61, 0xd2, 0x62, + 0x7e, 0x9b, 0x4c, 0x1d, 0x74, 0xf6, 0xff, 0x98, 0xdc, 0x3a, 0x10, 0x7a, 0x9d, 0x70, 0x99, 0x0a, + 0xed, 0x9b, 0x54, 0x63, 0xae, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x03, 0xf1, 0x99, 0x1b, + 0x1c, 0x00, 0x00, } diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go index bac9913e019c..be534f0fa1c2 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: descriptor.proto -// DO NOT EDIT! /* Package descriptor is a generated protocol buffer package. @@ -12,6 +11,7 @@ It has these top-level messages: FileDescriptorSet FileDescriptorProto DescriptorProto + ExtensionRangeOptions FieldDescriptorProto OneofDescriptorProto EnumDescriptorProto @@ -155,7 +155,7 @@ func (this *DescriptorProto_ExtensionRange) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") if this.Start != nil { s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") @@ -163,6 +163,9 @@ func (this *DescriptorProto_ExtensionRange) GoString() string { if this.End != nil { s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } @@ -187,6 +190,22 @@ func (this *DescriptorProto_ReservedRange) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func (this *FieldDescriptorProto) GoString() string { if this == nil { return "nil" @@ -200,10 +219,10 @@ func (this *FieldDescriptorProto) GoString() string { s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") } if this.Label != nil { - s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "descriptor.FieldDescriptorProto_Label")+",\n") + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") } if this.Type != nil { - s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "descriptor.FieldDescriptorProto_Type")+",\n") + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") } if this.TypeName != nil { s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") @@ -344,7 +363,7 @@ func (this *FileOptions) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 20) + s := make([]string, 0, 23) s = append(s, "&descriptor.FileOptions{") if this.JavaPackage != nil { s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") @@ -362,7 +381,7 @@ func (this *FileOptions) GoString() string { s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") } if this.OptimizeFor != nil { - s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "descriptor.FileOptions_OptimizeMode")+",\n") + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") } if this.GoPackage != nil { s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") @@ -376,6 +395,9 @@ func (this *FileOptions) GoString() string { if this.PyGenericServices != nil { s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } if this.Deprecated != nil { s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") } @@ -391,6 +413,12 @@ func (this *FileOptions) GoString() string { if this.SwiftPrefix != nil { s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") } @@ -436,13 +464,13 @@ func (this *FieldOptions) GoString() string { s := make([]string, 0, 11) s = append(s, "&descriptor.FieldOptions{") if this.Ctype != nil { - s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "descriptor.FieldOptions_CType")+",\n") + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") } if this.Packed != nil { s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") } if this.Jstype != nil { - s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "descriptor.FieldOptions_JSType")+",\n") + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") } if this.Lazy != nil { s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") @@ -549,7 +577,7 @@ func (this *MethodOptions) GoString() string { s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") } if this.IdempotencyLevel != nil { - s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "descriptor.MethodOptions_IdempotencyLevel")+",\n") + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") } if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/any.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/any.go index c10caf405576..d83c3ad0076a 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/any.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/any.go @@ -50,6 +50,9 @@ const googleApis = "type.googleapis.com/" // function. AnyMessageName is provided for less common use cases like filtering a // sequence of Any messages based on a set of allowed message type names. func AnyMessageName(any *Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } slash := strings.LastIndex(any.TypeUrl, "/") if slash < 0 { return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/any.pb.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/any.pb.go index 902d6d62aaeb..017d8de4d81a 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/any.pb.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/any.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: any.proto -// DO NOT EDIT! /* Package types is a generated protocol buffer package. @@ -70,6 +69,16 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // any.Unpack(foo) // ... // +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack // methods only use the fully qualified type name after the last '/' @@ -273,24 +282,6 @@ func (m *Any) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Any(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Any(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintAny(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/duration.pb.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/duration.pb.go index 9d7fa8f3130f..806d6d8de52d 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/duration.pb.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/duration.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: duration.proto -// DO NOT EDIT! /* Package types is a generated protocol buffer package. @@ -40,6 +39,8 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // two Timestamp values is a Duration and it can be added or subtracted // from a Timestamp. Range is approximately +-10,000 years. // +// # Examples +// // Example 1: Compute Duration from two Timestamps in pseudo code. // // Timestamp start = ...; @@ -80,10 +81,21 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // duration = Duration() // duration.FromTimedelta(td) // +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// // type Duration struct { // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // Signed fractions of a second at nanosecond resolution of the span // of time. Durations less than one second are represented with a 0 @@ -235,24 +247,6 @@ func (m *Duration) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Duration(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Duration(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintDuration(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/empty.pb.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/empty.pb.go index 4cfac13261ad..1289ac237cb9 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/empty.pb.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/empty.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: empty.proto -// DO NOT EDIT! /* Package types is a generated protocol buffer package. @@ -142,24 +141,6 @@ func (m *Empty) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Empty(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Empty(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/field_mask.pb.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/field_mask.pb.go index 3bf0ff36999a..af684101a911 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/field_mask.pb.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/field_mask.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: field_mask.proto -// DO NOT EDIT! /* Package types is a generated protocol buffer package. @@ -380,24 +379,6 @@ func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64FieldMask(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32FieldMask(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/struct.pb.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/struct.pb.go index 366ea48e7470..aec2117d5fda 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/struct.pb.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/struct.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: struct.proto -// DO NOT EDIT! /* Package types is a generated protocol buffer package. @@ -25,6 +24,8 @@ import strings "strings" import reflect "reflect" import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" +import encoding_binary "encoding/binary" + import io "io" // Reference imports to suppress errors if they are not otherwise used. @@ -828,7 +829,8 @@ func (m *Value_NumberValue) MarshalTo(dAtA []byte) (int, error) { i := 0 dAtA[i] = 0x11 i++ - i = encodeFixed64Struct(dAtA, i, uint64(math.Float64bits(float64(m.NumberValue)))) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + i += 8 return i, nil } func (m *Value_StringValue) MarshalTo(dAtA []byte) (int, error) { @@ -909,24 +911,6 @@ func (m *ListValue) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Struct(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Struct(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintStruct(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1350,51 +1334,14 @@ func (m *Struct) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStruct - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStruct - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthStruct - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Fields == nil { m.Fields = make(map[string]*Value) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue *Value + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStruct @@ -1404,46 +1351,85 @@ func (m *Struct) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStruct + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStruct + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthStruct + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthStruct + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Value{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthStruct - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthStruct - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &Value{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Fields[mapkey] = mapvalue - } else { - var mapvalue *Value - m.Fields[mapkey] = mapvalue } + m.Fields[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -1523,15 +1509,8 @@ func (m *Value) Unmarshal(dAtA []byte) error { if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 - v = uint64(dAtA[iNdEx-8]) - v |= uint64(dAtA[iNdEx-7]) << 8 - v |= uint64(dAtA[iNdEx-6]) << 16 - v |= uint64(dAtA[iNdEx-5]) << 24 - v |= uint64(dAtA[iNdEx-4]) << 32 - v |= uint64(dAtA[iNdEx-3]) << 40 - v |= uint64(dAtA[iNdEx-2]) << 48 - v |= uint64(dAtA[iNdEx-1]) << 56 m.Kind = &Value_NumberValue{float64(math.Float64frombits(v))} case 3: if wireType != 2 { diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/timestamp.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/timestamp.go index 521b62d925ec..7ae54d8b3ff6 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/timestamp.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/timestamp.go @@ -97,6 +97,15 @@ func TimestampFromProto(ts *Timestamp) (time.Time, error) { return t, validateTimestamp(ts) } +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. func TimestampProto(t time.Time) (*Timestamp, error) { diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/timestamp.pb.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/timestamp.pb.go index 8c705a3a3048..265133fd6711 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/timestamp.pb.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/timestamp.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: timestamp.proto -// DO NOT EDIT! /* Package types is a generated protocol buffer package. @@ -45,6 +44,8 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // and from RFC 3339 date strings. // See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). // +// # Examples +// // Example 1: Compute Timestamp from POSIX `time()`. // // Timestamp timestamp; @@ -85,6 +86,29 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // timestamp = Timestamp() // timestamp.GetCurrentTime() // +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// to obtain a formatter capable of generating timestamps in this format. +// // type Timestamp struct { // Represents seconds of UTC time since Unix epoch @@ -239,24 +263,6 @@ func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Timestamp(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Timestamp(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/wrappers.pb.go b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/wrappers.pb.go index 125e35c8f4cc..60edd60d9e35 100644 --- a/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/wrappers.pb.go +++ b/cluster-autoscaler/vendor/github.com/gogo/protobuf/types/wrappers.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: wrappers.proto -// DO NOT EDIT! /* Package types is a generated protocol buffer package. @@ -30,6 +29,8 @@ import bytes "bytes" import strings "strings" import reflect "reflect" +import encoding_binary "encoding/binary" + import io "io" // Reference imports to suppress errors if they are not otherwise used. @@ -914,7 +915,8 @@ func (m *DoubleValue) MarshalTo(dAtA []byte) (int, error) { if m.Value != 0 { dAtA[i] = 0x9 i++ - i = encodeFixed64Wrappers(dAtA, i, uint64(math.Float64bits(float64(m.Value)))) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i += 8 } return i, nil } @@ -937,7 +939,8 @@ func (m *FloatValue) MarshalTo(dAtA []byte) (int, error) { if m.Value != 0 { dAtA[i] = 0xd i++ - i = encodeFixed32Wrappers(dAtA, i, uint32(math.Float32bits(float32(m.Value)))) + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + i += 4 } return i, nil } @@ -1110,24 +1113,6 @@ func (m *BytesValue) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Wrappers(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Wrappers(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintWrappers(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1528,15 +1513,8 @@ func (m *DoubleValue) Unmarshal(dAtA []byte) error { if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 - v = uint64(dAtA[iNdEx-8]) - v |= uint64(dAtA[iNdEx-7]) << 8 - v |= uint64(dAtA[iNdEx-6]) << 16 - v |= uint64(dAtA[iNdEx-5]) << 24 - v |= uint64(dAtA[iNdEx-4]) << 32 - v |= uint64(dAtA[iNdEx-3]) << 40 - v |= uint64(dAtA[iNdEx-2]) << 48 - v |= uint64(dAtA[iNdEx-1]) << 56 m.Value = float64(math.Float64frombits(v)) default: iNdEx = preIndex @@ -1596,11 +1574,8 @@ func (m *FloatValue) Unmarshal(dAtA []byte) error { if (iNdEx + 4) > l { return io.ErrUnexpectedEOF } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) iNdEx += 4 - v = uint32(dAtA[iNdEx-4]) - v |= uint32(dAtA[iNdEx-3]) << 8 - v |= uint32(dAtA[iNdEx-2]) << 16 - v |= uint32(dAtA[iNdEx-1]) << 24 m.Value = float32(math.Float32frombits(v)) default: iNdEx = preIndex diff --git a/cluster-autoscaler/vendor/github.com/golang/glog/LICENSE b/cluster-autoscaler/vendor/github.com/golang/glog/LICENSE deleted file mode 100644 index 37ec93a14fdc..000000000000 --- a/cluster-autoscaler/vendor/github.com/golang/glog/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cluster-autoscaler/vendor/github.com/golang/glog/README b/cluster-autoscaler/vendor/github.com/golang/glog/README deleted file mode 100644 index 5f9c11485e03..000000000000 --- a/cluster-autoscaler/vendor/github.com/golang/glog/README +++ /dev/null @@ -1,44 +0,0 @@ -glog -==== - -Leveled execution logs for Go. - -This is an efficient pure Go implementation of leveled logs in the -manner of the open source C++ package - http://code.google.com/p/google-glog - -By binding methods to booleans it is possible to use the log package -without paying the expense of evaluating the arguments to the log. -Through the -vmodule flag, the package also provides fine-grained -control over logging at the file level. - -The comment from glog.go introduces the ideas: - - Package glog implements logging analogous to the Google-internal - C++ INFO/ERROR/V setup. It provides functions Info, Warning, - Error, Fatal, plus formatting variants such as Infof. It - also provides V-style logging controlled by the -v and - -vmodule=file=2 flags. - - Basic examples: - - glog.Info("Prepare to repel boarders") - - glog.Fatalf("Initialization failed: %s", err) - - See the documentation for the V function for an explanation - of these examples: - - if glog.V(2) { - glog.Info("Starting transaction...") - } - - glog.V(2).Infoln("Processed", nItems, "elements") - - -The repository contains an open source version of the log package -used inside Google. The master copy of the source lives inside -Google, not here. The code in this repo is for export only and is not itself -under development. Feature requests will be ignored. - -Send bug reports to golang-nuts@googlegroups.com. diff --git a/cluster-autoscaler/vendor/github.com/golang/glog/glog.go b/cluster-autoscaler/vendor/github.com/golang/glog/glog.go deleted file mode 100644 index 3e63fffd5ecb..000000000000 --- a/cluster-autoscaler/vendor/github.com/golang/glog/glog.go +++ /dev/null @@ -1,1177 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. -// -// Basic examples: -// -// glog.Info("Prepare to repel boarders") -// -// glog.Fatalf("Initialization failed: %s", err) -// -// See the documentation for the V function for an explanation of these examples: -// -// if glog.V(2) { -// glog.Info("Starting transaction...") -// } -// -// glog.V(2).Infoln("Processed", nItems, "elements") -// -// Log output is buffered and written periodically using Flush. Programs -// should call Flush before exiting to guarantee all log output is written. -// -// By default, all log statements write to files in a temporary directory. -// This package provides several flags that modify this behavior. -// As a result, flag.Parse must be called before any logging is done. -// -// -logtostderr=false -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. -// -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// -package glog - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "io" - stdLog "log" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} - -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} - -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - -// OutputStats tracks the number of output lines and bytes written. -type OutputStats struct { - lines int64 - bytes int64 -} - -// Lines returns the number of lines written. -func (s *OutputStats) Lines() int64 { - return atomic.LoadInt64(&s.lines) -} - -// Bytes returns the number of bytes written. -func (s *OutputStats) Bytes() int64 { - return atomic.LoadInt64(&s.bytes) -} - -// Stats tracks the number of lines of output and number of bytes -// per severity level. Values must be read with atomic.LoadInt64. -var Stats struct { - Info, Warning, Error OutputStats -} - -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, -} - -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. -type Level int32 - -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} - -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(file string) bool { - if m.literal { - return file == m.pattern - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) - } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil -} - -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil -} - -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} - -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *traceLocation) isSet() bool { - return t.line > 0 -} - -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *traceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *traceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *traceLocation) Set(value string) error { - if value == "" { - // Unset. - t.line = 0 - t.file = "" - } - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -func init() { - flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") - flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - - logging.setVState(0, nil, false) - go logging.flushDaemon() -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation traceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. - -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - return l.formatHeader(s, file, line), file, line -} - -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) -} - -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printf(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { - atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) - } -} - -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - sb.logger.exit(err) - } - } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} - -// rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now) - sb.nbytes = 0 - if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - } - if err := sb.rotateFile(now); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 30 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for _ = range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } - } -} - -// CopyStandardLogTo arranges for messages written to the Go "log" package's -// default logs to also appear in the Google logs for the named and lower -// severities. Subsequent changes to the standard log's default output location -// or format may break this behavior. -// -// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not -// recognized, CopyStandardLogTo panics. -func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) - } - // Set a log format that captures the user's file and line: - // d.go:23: message - stdLog.SetFlags(stdLog.Lshortfile) - stdLog.SetOutput(logBridge(sev)) -} - -// logBridge provides the Write method that enables CopyStandardLogTo to connect -// Go's standard logs to the logs provided by this package. -type logBridge severity - -// Write parses the standard logging line and passes its components to the -// logger for severity(lb). -func (lb logBridge) Write(b []byte) (n int, err error) { - var ( - file = "???" - line = 1 - text string - ) - // Split "d.go:23: message" into "d.go", "23", and "message". - if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { - text = fmt.Sprintf("bad log format: %s", b) - } else { - file = string(parts[0]) - text = string(parts[2][1:]) // skip leading space - line, err = strconv.Atoi(string(parts[1])) - if err != nil { - text = fmt.Sprintf("bad line number: %s", b) - line = 1 - } - } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) - return len(b), nil -} - -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] - } - for _, filter := range l.vmodule.filter { - if filter.match(file) { - l.vmap[pc] = filter.level - return filter.level - } - } - l.vmap[pc] = 0 - return 0 -} - -// Verbose is a boolean type that implements Infof (like Printf) etc. -// See the documentation of V for more information. -type Verbose bool - -// V reports whether verbosity at the call site is at least the requested level. -// The returned value is a boolean of type Verbose, which implements Info, Infoln -// and Infof. These methods will write to the Info log if called. -// Thus, one may write either -// if glog.V(2) { glog.Info("log this") } -// or -// glog.V(2).Info("log this") -// The second form is shorter but the first is cheaper if logging is off because it does -// not evaluate its arguments. -// -// Whether an individual call to V generates a log record depends on the setting of -// the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at least the value of -v, or of -vmodule for the source file containing the -// call, the V call will log. -func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. - - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } - - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) - } - return Verbose(false) -} - -// Info is equivalent to the global Info function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { - if v { - logging.print(infoLog, args...) - } -} - -// Infoln is equivalent to the global Infoln function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { - if v { - logging.println(infoLog, args...) - } -} - -// Infof is equivalent to the global Infof function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { - if v { - logging.printf(infoLog, format, args...) - } -} - -// Info logs to the INFO log. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) -} - -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) -} - -// Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Infoln(args ...interface{}) { - logging.println(infoLog, args...) -} - -// Infof logs to the INFO log. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) -} - -// Warning logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) -} - -// WarningDepth acts as Warning but uses depth to determine which call frame to log. -// WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) -} - -// Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) -} - -// Warningf logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) -} - -// Error logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) -} - -// ErrorDepth acts as Error but uses depth to determine which call frame to log. -// ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) -} - -// Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) -} - -// Errorf logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) -} - -// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) -} - -// FatalDepth acts as Fatal but uses depth to determine which call frame to log. -// FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) -} - -// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) -} - -// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) -} - -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 - -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) -} - -// ExitDepth acts as Exit but uses depth to determine which call frame to log. -// ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) -} - -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) -} - -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) -} diff --git a/cluster-autoscaler/vendor/github.com/golang/glog/glog_file.go b/cluster-autoscaler/vendor/github.com/golang/glog/glog_file.go deleted file mode 100644 index 65075d281110..000000000000 --- a/cluster-autoscaler/vendor/github.com/golang/glog/glog_file.go +++ /dev/null @@ -1,124 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// File I/O for logs. - -package glog - -import ( - "errors" - "flag" - "fmt" - "os" - "os/user" - "path/filepath" - "strings" - "sync" - "time" -) - -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 - -// logDirs lists the candidate directories for new log files. -var logDirs []string - -// If non-empty, overrides the choice of directory in which to write logs. -// See createLogDirs for the full list of possible destinations. -var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") - -func createLogDirs() { - if *logDir != "" { - logDirs = append(logDirs, *logDir) - } - logDirs = append(logDirs, os.TempDir()) -} - -var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) - host = "unknownhost" - userName = "unknownuser" -) - -func init() { - h, err := os.Hostname() - if err == nil { - host = shortHostname(h) - } - - current, err := user.Current() - if err == nil { - userName = current.Username - } - - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) -} - -// shortHostname returns its argument, truncating at the first period. -// For instance, given "www.google.com" it returns "www". -func shortHostname(hostname string) string { - if i := strings.Index(hostname, "."); i >= 0 { - return hostname[:i] - } - return hostname -} - -// logName returns a new log file name containing tag, with start time t, and -// the name for the symlink for tag. -func logName(tag string, t time.Time) (name, link string) { - name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", - program, - host, - userName, - tag, - t.Year(), - t.Month(), - t.Day(), - t.Hour(), - t.Minute(), - t.Second(), - pid) - return name, program + "." + tag -} - -var onceLogDirs sync.Once - -// create creates a new log file and returns the file and its filename, which -// contains tag ("INFO", "FATAL", etc.) and t. If the file is created -// successfully, create also attempts to update the symlink for that tag, ignoring -// errors. -func create(tag string, t time.Time) (f *os.File, filename string, err error) { - onceLogDirs.Do(createLogDirs) - if len(logDirs) == 0 { - return nil, "", errors.New("log: no log dirs") - } - name, link := logName(tag, t) - var lastErr error - for _, dir := range logDirs { - fname := filepath.Join(dir, name) - f, err := os.Create(fname) - if err == nil { - symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err - return f, fname, nil - } - lastErr = err - } - return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) -} diff --git a/cluster-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/cluster-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 000000000000..9e029e8cb44b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,2812 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +package descriptor + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(dst, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(dst, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(dst, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(dst, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(dst, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (dst *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(dst, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(dst, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(dst, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(dst, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(dst, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(dst, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(dst, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(dst, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (dst *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(dst, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (dst *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(dst, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (dst *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(dst, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (dst *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(dst, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (dst *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(dst, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (dst *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(dst, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (dst *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(dst, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (dst *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(dst, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(dst, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(dst, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(dst, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(dst, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(dst, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(dst, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) +} + +func init() { + proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_descriptor_4df4cb5f42392df6) +} + +var fileDescriptor_descriptor_4df4cb5f42392df6 = []byte{ + // 2555 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, + 0xf5, 0xcf, 0xf2, 0x4b, 0xe4, 0x21, 0x45, 0x8d, 0x46, 0x8a, 0xbd, 0x56, 0x3e, 0x2c, 0x33, 0x1f, + 0x96, 0x9d, 0x7f, 0xa8, 0xc0, 0xb1, 0x1d, 0x47, 0xfe, 0x23, 0x2d, 0x45, 0xae, 0x15, 0xaa, 0x12, + 0xc9, 0x2e, 0xa9, 0xe6, 0x03, 0x28, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, + 0xb4, 0xad, 0xa0, 0x17, 0x06, 0x7a, 0xd5, 0xab, 0xde, 0x16, 0x45, 0xd1, 0x8b, 0xde, 0x04, 0xe8, + 0x03, 0x14, 0xc8, 0x5d, 0x9f, 0xa0, 0x40, 0xde, 0xa0, 0x68, 0x0b, 0xb4, 0x8f, 0xd0, 0xcb, 0x62, + 0x66, 0x76, 0x97, 0xbb, 0x24, 0x15, 0x2b, 0x01, 0xe2, 0x5c, 0x91, 0xf3, 0x9b, 0xdf, 0x39, 0x73, + 0xe6, 0xcc, 0x99, 0x33, 0x67, 0x66, 0x61, 0x7b, 0xe4, 0x38, 0x23, 0x8b, 0xee, 0xba, 0x9e, 0x13, + 0x38, 0xa7, 0xd3, 0xe1, 0xae, 0x41, 0x7d, 0xdd, 0x33, 0xdd, 0xc0, 0xf1, 0xea, 0x1c, 0xc3, 0x6b, + 0x82, 0x51, 0x8f, 0x18, 0xb5, 0x63, 0x58, 0x7f, 0x60, 0x5a, 0xb4, 0x15, 0x13, 0xfb, 0x34, 0xc0, + 0xf7, 0x20, 0x37, 0x34, 0x2d, 0x2a, 0x4b, 0xdb, 0xd9, 0x9d, 0xf2, 0xad, 0x37, 0xeb, 0x73, 0x42, + 0xf5, 0xb4, 0x44, 0x8f, 0xc1, 0x2a, 0x97, 0xa8, 0xfd, 0x2b, 0x07, 0x1b, 0x4b, 0x7a, 0x31, 0x86, + 0x9c, 0x4d, 0x26, 0x4c, 0xa3, 0xb4, 0x53, 0x52, 0xf9, 0x7f, 0x2c, 0xc3, 0x8a, 0x4b, 0xf4, 0x47, + 0x64, 0x44, 0xe5, 0x0c, 0x87, 0xa3, 0x26, 0x7e, 0x1d, 0xc0, 0xa0, 0x2e, 0xb5, 0x0d, 0x6a, 0xeb, + 0x67, 0x72, 0x76, 0x3b, 0xbb, 0x53, 0x52, 0x13, 0x08, 0x7e, 0x07, 0xd6, 0xdd, 0xe9, 0xa9, 0x65, + 0xea, 0x5a, 0x82, 0x06, 0xdb, 0xd9, 0x9d, 0xbc, 0x8a, 0x44, 0x47, 0x6b, 0x46, 0xbe, 0x0e, 0x6b, + 0x4f, 0x28, 0x79, 0x94, 0xa4, 0x96, 0x39, 0xb5, 0xca, 0xe0, 0x04, 0xb1, 0x09, 0x95, 0x09, 0xf5, + 0x7d, 0x32, 0xa2, 0x5a, 0x70, 0xe6, 0x52, 0x39, 0xc7, 0x67, 0xbf, 0xbd, 0x30, 0xfb, 0xf9, 0x99, + 0x97, 0x43, 0xa9, 0xc1, 0x99, 0x4b, 0x71, 0x03, 0x4a, 0xd4, 0x9e, 0x4e, 0x84, 0x86, 0xfc, 0x39, + 0xfe, 0x53, 0xec, 0xe9, 0x64, 0x5e, 0x4b, 0x91, 0x89, 0x85, 0x2a, 0x56, 0x7c, 0xea, 0x3d, 0x36, + 0x75, 0x2a, 0x17, 0xb8, 0x82, 0xeb, 0x0b, 0x0a, 0xfa, 0xa2, 0x7f, 0x5e, 0x47, 0x24, 0x87, 0x9b, + 0x50, 0xa2, 0x4f, 0x03, 0x6a, 0xfb, 0xa6, 0x63, 0xcb, 0x2b, 0x5c, 0xc9, 0x5b, 0x4b, 0x56, 0x91, + 0x5a, 0xc6, 0xbc, 0x8a, 0x99, 0x1c, 0xbe, 0x0b, 0x2b, 0x8e, 0x1b, 0x98, 0x8e, 0xed, 0xcb, 0xc5, + 0x6d, 0x69, 0xa7, 0x7c, 0xeb, 0xd5, 0xa5, 0x81, 0xd0, 0x15, 0x1c, 0x35, 0x22, 0xe3, 0x36, 0x20, + 0xdf, 0x99, 0x7a, 0x3a, 0xd5, 0x74, 0xc7, 0xa0, 0x9a, 0x69, 0x0f, 0x1d, 0xb9, 0xc4, 0x15, 0x5c, + 0x5d, 0x9c, 0x08, 0x27, 0x36, 0x1d, 0x83, 0xb6, 0xed, 0xa1, 0xa3, 0x56, 0xfd, 0x54, 0x1b, 0x5f, + 0x82, 0x82, 0x7f, 0x66, 0x07, 0xe4, 0xa9, 0x5c, 0xe1, 0x11, 0x12, 0xb6, 0x6a, 0x5f, 0x17, 0x60, + 0xed, 0x22, 0x21, 0x76, 0x1f, 0xf2, 0x43, 0x36, 0x4b, 0x39, 0xf3, 0x5d, 0x7c, 0x20, 0x64, 0xd2, + 0x4e, 0x2c, 0x7c, 0x4f, 0x27, 0x36, 0xa0, 0x6c, 0x53, 0x3f, 0xa0, 0x86, 0x88, 0x88, 0xec, 0x05, + 0x63, 0x0a, 0x84, 0xd0, 0x62, 0x48, 0xe5, 0xbe, 0x57, 0x48, 0x7d, 0x0a, 0x6b, 0xb1, 0x49, 0x9a, + 0x47, 0xec, 0x51, 0x14, 0x9b, 0xbb, 0xcf, 0xb3, 0xa4, 0xae, 0x44, 0x72, 0x2a, 0x13, 0x53, 0xab, + 0x34, 0xd5, 0xc6, 0x2d, 0x00, 0xc7, 0xa6, 0xce, 0x50, 0x33, 0xa8, 0x6e, 0xc9, 0xc5, 0x73, 0xbc, + 0xd4, 0x65, 0x94, 0x05, 0x2f, 0x39, 0x02, 0xd5, 0x2d, 0xfc, 0xe1, 0x2c, 0xd4, 0x56, 0xce, 0x89, + 0x94, 0x63, 0xb1, 0xc9, 0x16, 0xa2, 0xed, 0x04, 0xaa, 0x1e, 0x65, 0x71, 0x4f, 0x8d, 0x70, 0x66, + 0x25, 0x6e, 0x44, 0xfd, 0xb9, 0x33, 0x53, 0x43, 0x31, 0x31, 0xb1, 0x55, 0x2f, 0xd9, 0xc4, 0x6f, + 0x40, 0x0c, 0x68, 0x3c, 0xac, 0x80, 0x67, 0xa1, 0x4a, 0x04, 0x76, 0xc8, 0x84, 0x6e, 0x7d, 0x09, + 0xd5, 0xb4, 0x7b, 0xf0, 0x26, 0xe4, 0xfd, 0x80, 0x78, 0x01, 0x8f, 0xc2, 0xbc, 0x2a, 0x1a, 0x18, + 0x41, 0x96, 0xda, 0x06, 0xcf, 0x72, 0x79, 0x95, 0xfd, 0xc5, 0x3f, 0x9d, 0x4d, 0x38, 0xcb, 0x27, + 0xfc, 0xf6, 0xe2, 0x8a, 0xa6, 0x34, 0xcf, 0xcf, 0x7b, 0xeb, 0x03, 0x58, 0x4d, 0x4d, 0xe0, 0xa2, + 0x43, 0xd7, 0x7e, 0x05, 0x2f, 0x2f, 0x55, 0x8d, 0x3f, 0x85, 0xcd, 0xa9, 0x6d, 0xda, 0x01, 0xf5, + 0x5c, 0x8f, 0xb2, 0x88, 0x15, 0x43, 0xc9, 0xff, 0x5e, 0x39, 0x27, 0xe6, 0x4e, 0x92, 0x6c, 0xa1, + 0x45, 0xdd, 0x98, 0x2e, 0x82, 0x37, 0x4b, 0xc5, 0xff, 0xac, 0xa0, 0x67, 0xcf, 0x9e, 0x3d, 0xcb, + 0xd4, 0x7e, 0x57, 0x80, 0xcd, 0x65, 0x7b, 0x66, 0xe9, 0xf6, 0xbd, 0x04, 0x05, 0x7b, 0x3a, 0x39, + 0xa5, 0x1e, 0x77, 0x52, 0x5e, 0x0d, 0x5b, 0xb8, 0x01, 0x79, 0x8b, 0x9c, 0x52, 0x4b, 0xce, 0x6d, + 0x4b, 0x3b, 0xd5, 0x5b, 0xef, 0x5c, 0x68, 0x57, 0xd6, 0x8f, 0x98, 0x88, 0x2a, 0x24, 0xf1, 0x47, + 0x90, 0x0b, 0x53, 0x34, 0xd3, 0x70, 0xf3, 0x62, 0x1a, 0xd8, 0x5e, 0x52, 0xb9, 0x1c, 0x7e, 0x05, + 0x4a, 0xec, 0x57, 0xc4, 0x46, 0x81, 0xdb, 0x5c, 0x64, 0x00, 0x8b, 0x0b, 0xbc, 0x05, 0x45, 0xbe, + 0x4d, 0x0c, 0x1a, 0x1d, 0x6d, 0x71, 0x9b, 0x05, 0x96, 0x41, 0x87, 0x64, 0x6a, 0x05, 0xda, 0x63, + 0x62, 0x4d, 0x29, 0x0f, 0xf8, 0x92, 0x5a, 0x09, 0xc1, 0x5f, 0x30, 0x0c, 0x5f, 0x85, 0xb2, 0xd8, + 0x55, 0xa6, 0x6d, 0xd0, 0xa7, 0x3c, 0x7b, 0xe6, 0x55, 0xb1, 0xd1, 0xda, 0x0c, 0x61, 0xc3, 0x3f, + 0xf4, 0x1d, 0x3b, 0x0a, 0x4d, 0x3e, 0x04, 0x03, 0xf8, 0xf0, 0x1f, 0xcc, 0x27, 0xee, 0xd7, 0x96, + 0x4f, 0x6f, 0x3e, 0xa6, 0x6a, 0x7f, 0xc9, 0x40, 0x8e, 0xe7, 0x8b, 0x35, 0x28, 0x0f, 0x3e, 0xeb, + 0x29, 0x5a, 0xab, 0x7b, 0xb2, 0x7f, 0xa4, 0x20, 0x09, 0x57, 0x01, 0x38, 0xf0, 0xe0, 0xa8, 0xdb, + 0x18, 0xa0, 0x4c, 0xdc, 0x6e, 0x77, 0x06, 0x77, 0x6f, 0xa3, 0x6c, 0x2c, 0x70, 0x22, 0x80, 0x5c, + 0x92, 0xf0, 0xfe, 0x2d, 0x94, 0xc7, 0x08, 0x2a, 0x42, 0x41, 0xfb, 0x53, 0xa5, 0x75, 0xf7, 0x36, + 0x2a, 0xa4, 0x91, 0xf7, 0x6f, 0xa1, 0x15, 0xbc, 0x0a, 0x25, 0x8e, 0xec, 0x77, 0xbb, 0x47, 0xa8, + 0x18, 0xeb, 0xec, 0x0f, 0xd4, 0x76, 0xe7, 0x00, 0x95, 0x62, 0x9d, 0x07, 0x6a, 0xf7, 0xa4, 0x87, + 0x20, 0xd6, 0x70, 0xac, 0xf4, 0xfb, 0x8d, 0x03, 0x05, 0x95, 0x63, 0xc6, 0xfe, 0x67, 0x03, 0xa5, + 0x8f, 0x2a, 0x29, 0xb3, 0xde, 0xbf, 0x85, 0x56, 0xe3, 0x21, 0x94, 0xce, 0xc9, 0x31, 0xaa, 0xe2, + 0x75, 0x58, 0x15, 0x43, 0x44, 0x46, 0xac, 0xcd, 0x41, 0x77, 0x6f, 0x23, 0x34, 0x33, 0x44, 0x68, + 0x59, 0x4f, 0x01, 0x77, 0x6f, 0x23, 0x5c, 0x6b, 0x42, 0x9e, 0x47, 0x17, 0xc6, 0x50, 0x3d, 0x6a, + 0xec, 0x2b, 0x47, 0x5a, 0xb7, 0x37, 0x68, 0x77, 0x3b, 0x8d, 0x23, 0x24, 0xcd, 0x30, 0x55, 0xf9, + 0xf9, 0x49, 0x5b, 0x55, 0x5a, 0x28, 0x93, 0xc4, 0x7a, 0x4a, 0x63, 0xa0, 0xb4, 0x50, 0xb6, 0xa6, + 0xc3, 0xe6, 0xb2, 0x3c, 0xb9, 0x74, 0x67, 0x24, 0x96, 0x38, 0x73, 0xce, 0x12, 0x73, 0x5d, 0x0b, + 0x4b, 0xfc, 0xcf, 0x0c, 0x6c, 0x2c, 0x39, 0x2b, 0x96, 0x0e, 0xf2, 0x13, 0xc8, 0x8b, 0x10, 0x15, + 0xa7, 0xe7, 0x8d, 0xa5, 0x87, 0x0e, 0x0f, 0xd8, 0x85, 0x13, 0x94, 0xcb, 0x25, 0x2b, 0x88, 0xec, + 0x39, 0x15, 0x04, 0x53, 0xb1, 0x90, 0xd3, 0x7f, 0xb9, 0x90, 0xd3, 0xc5, 0xb1, 0x77, 0xf7, 0x22, + 0xc7, 0x1e, 0xc7, 0xbe, 0x5b, 0x6e, 0xcf, 0x2f, 0xc9, 0xed, 0xf7, 0x61, 0x7d, 0x41, 0xd1, 0x85, + 0x73, 0xec, 0xaf, 0x25, 0x90, 0xcf, 0x73, 0xce, 0x73, 0x32, 0x5d, 0x26, 0x95, 0xe9, 0xee, 0xcf, + 0x7b, 0xf0, 0xda, 0xf9, 0x8b, 0xb0, 0xb0, 0xd6, 0x5f, 0x49, 0x70, 0x69, 0x79, 0xa5, 0xb8, 0xd4, + 0x86, 0x8f, 0xa0, 0x30, 0xa1, 0xc1, 0xd8, 0x89, 0xaa, 0xa5, 0xb7, 0x97, 0x9c, 0xc1, 0xac, 0x7b, + 0x7e, 0xb1, 0x43, 0xa9, 0xe4, 0x21, 0x9e, 0x3d, 0xaf, 0xdc, 0x13, 0xd6, 0x2c, 0x58, 0xfa, 0x9b, + 0x0c, 0xbc, 0xbc, 0x54, 0xf9, 0x52, 0x43, 0x5f, 0x03, 0x30, 0x6d, 0x77, 0x1a, 0x88, 0x8a, 0x48, + 0x24, 0xd8, 0x12, 0x47, 0x78, 0xf2, 0x62, 0xc9, 0x73, 0x1a, 0xc4, 0xfd, 0x59, 0xde, 0x0f, 0x02, + 0xe2, 0x84, 0x7b, 0x33, 0x43, 0x73, 0xdc, 0xd0, 0xd7, 0xcf, 0x99, 0xe9, 0x42, 0x60, 0xbe, 0x07, + 0x48, 0xb7, 0x4c, 0x6a, 0x07, 0x9a, 0x1f, 0x78, 0x94, 0x4c, 0x4c, 0x7b, 0xc4, 0x4f, 0x90, 0xe2, + 0x5e, 0x7e, 0x48, 0x2c, 0x9f, 0xaa, 0x6b, 0xa2, 0xbb, 0x1f, 0xf5, 0x32, 0x09, 0x1e, 0x40, 0x5e, + 0x42, 0xa2, 0x90, 0x92, 0x10, 0xdd, 0xb1, 0x44, 0xed, 0xeb, 0x22, 0x94, 0x13, 0x75, 0x35, 0xbe, + 0x06, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0xf0, 0x44, 0x99, 0x61, 0xbd, 0xf0, 0xbe, + 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0xee, 0xb4, 0x22, + 0xa7, 0x62, 0xd6, 0xd7, 0x65, 0x5d, 0xcd, 0xa8, 0x07, 0xdf, 0x81, 0x0d, 0x2e, 0x31, 0x99, 0x5a, + 0x81, 0xe9, 0x5a, 0x54, 0x63, 0xb7, 0x37, 0x9f, 0x9f, 0x24, 0xb1, 0x65, 0xeb, 0x8c, 0x71, 0x1c, + 0x12, 0x98, 0x45, 0x3e, 0x6e, 0xc1, 0x6b, 0x5c, 0x6c, 0x44, 0x6d, 0xea, 0x91, 0x80, 0x6a, 0xf4, + 0x8b, 0x29, 0xb1, 0x7c, 0x8d, 0xd8, 0x86, 0x36, 0x26, 0xfe, 0x58, 0xde, 0x64, 0x0a, 0xf6, 0x33, + 0xb2, 0xa4, 0x5e, 0x61, 0xc4, 0x83, 0x90, 0xa7, 0x70, 0x5a, 0xc3, 0x36, 0x3e, 0x26, 0xfe, 0x18, + 0xef, 0xc1, 0x25, 0xae, 0xc5, 0x0f, 0x3c, 0xd3, 0x1e, 0x69, 0xfa, 0x98, 0xea, 0x8f, 0xb4, 0x69, + 0x30, 0xbc, 0x27, 0xbf, 0x92, 0x1c, 0x9f, 0x5b, 0xd8, 0xe7, 0x9c, 0x26, 0xa3, 0x9c, 0x04, 0xc3, + 0x7b, 0xb8, 0x0f, 0x15, 0xb6, 0x18, 0x13, 0xf3, 0x4b, 0xaa, 0x0d, 0x1d, 0x8f, 0x1f, 0x8d, 0xd5, + 0x25, 0xa9, 0x29, 0xe1, 0xc1, 0x7a, 0x37, 0x14, 0x38, 0x76, 0x0c, 0xba, 0x97, 0xef, 0xf7, 0x14, + 0xa5, 0xa5, 0x96, 0x23, 0x2d, 0x0f, 0x1c, 0x8f, 0x05, 0xd4, 0xc8, 0x89, 0x1d, 0x5c, 0x16, 0x01, + 0x35, 0x72, 0x22, 0xf7, 0xde, 0x81, 0x0d, 0x5d, 0x17, 0x73, 0x36, 0x75, 0x2d, 0xbc, 0x63, 0xf9, + 0x32, 0x4a, 0x39, 0x4b, 0xd7, 0x0f, 0x04, 0x21, 0x8c, 0x71, 0x1f, 0x7f, 0x08, 0x2f, 0xcf, 0x9c, + 0x95, 0x14, 0x5c, 0x5f, 0x98, 0xe5, 0xbc, 0xe8, 0x1d, 0xd8, 0x70, 0xcf, 0x16, 0x05, 0x71, 0x6a, + 0x44, 0xf7, 0x6c, 0x5e, 0xec, 0x03, 0xd8, 0x74, 0xc7, 0xee, 0xa2, 0xdc, 0xcd, 0xa4, 0x1c, 0x76, + 0xc7, 0xee, 0xbc, 0xe0, 0x5b, 0xfc, 0xc2, 0xed, 0x51, 0x9d, 0x04, 0xd4, 0x90, 0x2f, 0x27, 0xe9, + 0x89, 0x0e, 0xbc, 0x0b, 0x48, 0xd7, 0x35, 0x6a, 0x93, 0x53, 0x8b, 0x6a, 0xc4, 0xa3, 0x36, 0xf1, + 0xe5, 0xab, 0x49, 0x72, 0x55, 0xd7, 0x15, 0xde, 0xdb, 0xe0, 0x9d, 0xf8, 0x26, 0xac, 0x3b, 0xa7, + 0x0f, 0x75, 0x11, 0x92, 0x9a, 0xeb, 0xd1, 0xa1, 0xf9, 0x54, 0x7e, 0x93, 0xfb, 0x77, 0x8d, 0x75, + 0xf0, 0x80, 0xec, 0x71, 0x18, 0xdf, 0x00, 0xa4, 0xfb, 0x63, 0xe2, 0xb9, 0x3c, 0x27, 0xfb, 0x2e, + 0xd1, 0xa9, 0xfc, 0x96, 0xa0, 0x0a, 0xbc, 0x13, 0xc1, 0x6c, 0x4b, 0xf8, 0x4f, 0xcc, 0x61, 0x10, + 0x69, 0xbc, 0x2e, 0xb6, 0x04, 0xc7, 0x42, 0x6d, 0x3b, 0x80, 0x98, 0x2b, 0x52, 0x03, 0xef, 0x70, + 0x5a, 0xd5, 0x1d, 0xbb, 0xc9, 0x71, 0xdf, 0x80, 0x55, 0xc6, 0x9c, 0x0d, 0x7a, 0x43, 0x14, 0x64, + 0xee, 0x38, 0x31, 0xe2, 0x0f, 0x56, 0x1b, 0xd7, 0xf6, 0xa0, 0x92, 0x8c, 0x4f, 0x5c, 0x02, 0x11, + 0xa1, 0x48, 0x62, 0xc5, 0x4a, 0xb3, 0xdb, 0x62, 0x65, 0xc6, 0xe7, 0x0a, 0xca, 0xb0, 0x72, 0xe7, + 0xa8, 0x3d, 0x50, 0x34, 0xf5, 0xa4, 0x33, 0x68, 0x1f, 0x2b, 0x28, 0x9b, 0xa8, 0xab, 0x0f, 0x73, + 0xc5, 0xb7, 0xd1, 0xf5, 0xda, 0x37, 0x19, 0xa8, 0xa6, 0x2f, 0x4a, 0xf8, 0xff, 0xe1, 0x72, 0xf4, + 0xaa, 0xe1, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0xe3, 0x4c, 0x88, 0x38, 0xc4, 0xe2, 0xa5, 0xdb, + 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0x5b, 0x4c, 0x48, 0x80, 0x8f, 0xe0, 0xaa, 0xed, + 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0x4f, 0xd2, 0x88, 0xae, 0x53, 0xdf, 0x77, + 0xc4, 0x81, 0x15, 0x6b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x96, 0xc9, 0x1b, 0x21, 0x75, 0x2e, + 0xcc, 0xb2, 0xe7, 0x85, 0xd9, 0x2b, 0x50, 0x9a, 0x10, 0x57, 0xa3, 0x76, 0xe0, 0x9d, 0xf1, 0xf2, + 0xb8, 0xa8, 0x16, 0x27, 0xc4, 0x55, 0x58, 0xfb, 0x85, 0xdc, 0x52, 0x0e, 0x73, 0xc5, 0x22, 0x2a, + 0x1d, 0xe6, 0x8a, 0x25, 0x04, 0xb5, 0x7f, 0x64, 0xa1, 0x92, 0x2c, 0x97, 0xd9, 0xed, 0x43, 0xe7, + 0x27, 0x8b, 0xc4, 0x73, 0xcf, 0x1b, 0xdf, 0x5a, 0x5c, 0xd7, 0x9b, 0xec, 0xc8, 0xd9, 0x2b, 0x88, + 0x22, 0x56, 0x15, 0x92, 0xec, 0xb8, 0x67, 0xd9, 0x86, 0x8a, 0xa2, 0xa1, 0xa8, 0x86, 0x2d, 0x7c, + 0x00, 0x85, 0x87, 0x3e, 0xd7, 0x5d, 0xe0, 0xba, 0xdf, 0xfc, 0x76, 0xdd, 0x87, 0x7d, 0xae, 0xbc, + 0x74, 0xd8, 0xd7, 0x3a, 0x5d, 0xf5, 0xb8, 0x71, 0xa4, 0x86, 0xe2, 0xf8, 0x0a, 0xe4, 0x2c, 0xf2, + 0xe5, 0x59, 0xfa, 0x70, 0xe2, 0xd0, 0x45, 0x17, 0xe1, 0x0a, 0xe4, 0x9e, 0x50, 0xf2, 0x28, 0x7d, + 0x24, 0x70, 0xe8, 0x07, 0xdc, 0x0c, 0xbb, 0x90, 0xe7, 0xfe, 0xc2, 0x00, 0xa1, 0xc7, 0xd0, 0x4b, + 0xb8, 0x08, 0xb9, 0x66, 0x57, 0x65, 0x1b, 0x02, 0x41, 0x45, 0xa0, 0x5a, 0xaf, 0xad, 0x34, 0x15, + 0x94, 0xa9, 0xdd, 0x81, 0x82, 0x70, 0x02, 0xdb, 0x2c, 0xb1, 0x1b, 0xd0, 0x4b, 0x61, 0x33, 0xd4, + 0x21, 0x45, 0xbd, 0x27, 0xc7, 0xfb, 0x8a, 0x8a, 0x32, 0xe9, 0xa5, 0xce, 0xa1, 0x7c, 0xcd, 0x87, + 0x4a, 0xb2, 0x5e, 0x7e, 0x31, 0x77, 0xe1, 0xbf, 0x4a, 0x50, 0x4e, 0xd4, 0xbf, 0xac, 0x70, 0x21, + 0x96, 0xe5, 0x3c, 0xd1, 0x88, 0x65, 0x12, 0x3f, 0x0c, 0x0d, 0xe0, 0x50, 0x83, 0x21, 0x17, 0x5d, + 0xba, 0x17, 0xb4, 0x45, 0xf2, 0xa8, 0x50, 0xfb, 0xa3, 0x04, 0x68, 0xbe, 0x00, 0x9d, 0x33, 0x53, + 0xfa, 0x31, 0xcd, 0xac, 0xfd, 0x41, 0x82, 0x6a, 0xba, 0xea, 0x9c, 0x33, 0xef, 0xda, 0x8f, 0x6a, + 0xde, 0xdf, 0x33, 0xb0, 0x9a, 0xaa, 0x35, 0x2f, 0x6a, 0xdd, 0x17, 0xb0, 0x6e, 0x1a, 0x74, 0xe2, + 0x3a, 0x01, 0xb5, 0xf5, 0x33, 0xcd, 0xa2, 0x8f, 0xa9, 0x25, 0xd7, 0x78, 0xd2, 0xd8, 0xfd, 0xf6, + 0x6a, 0xb6, 0xde, 0x9e, 0xc9, 0x1d, 0x31, 0xb1, 0xbd, 0x8d, 0x76, 0x4b, 0x39, 0xee, 0x75, 0x07, + 0x4a, 0xa7, 0xf9, 0x99, 0x76, 0xd2, 0xf9, 0x59, 0xa7, 0xfb, 0x49, 0x47, 0x45, 0xe6, 0x1c, 0xed, + 0x07, 0xdc, 0xf6, 0x3d, 0x40, 0xf3, 0x46, 0xe1, 0xcb, 0xb0, 0xcc, 0x2c, 0xf4, 0x12, 0xde, 0x80, + 0xb5, 0x4e, 0x57, 0xeb, 0xb7, 0x5b, 0x8a, 0xa6, 0x3c, 0x78, 0xa0, 0x34, 0x07, 0x7d, 0xf1, 0x3e, + 0x11, 0xb3, 0x07, 0xa9, 0x0d, 0x5e, 0xfb, 0x7d, 0x16, 0x36, 0x96, 0x58, 0x82, 0x1b, 0xe1, 0xcd, + 0x42, 0x5c, 0x76, 0xde, 0xbd, 0x88, 0xf5, 0x75, 0x56, 0x10, 0xf4, 0x88, 0x17, 0x84, 0x17, 0x91, + 0x1b, 0xc0, 0xbc, 0x64, 0x07, 0xe6, 0xd0, 0xa4, 0x5e, 0xf8, 0x9c, 0x23, 0xae, 0x1b, 0x6b, 0x33, + 0x5c, 0xbc, 0xe8, 0xfc, 0x1f, 0x60, 0xd7, 0xf1, 0xcd, 0xc0, 0x7c, 0x4c, 0x35, 0xd3, 0x8e, 0xde, + 0x7e, 0xd8, 0xf5, 0x23, 0xa7, 0xa2, 0xa8, 0xa7, 0x6d, 0x07, 0x31, 0xdb, 0xa6, 0x23, 0x32, 0xc7, + 0x66, 0xc9, 0x3c, 0xab, 0xa2, 0xa8, 0x27, 0x66, 0x5f, 0x83, 0x8a, 0xe1, 0x4c, 0x59, 0x4d, 0x26, + 0x78, 0xec, 0xec, 0x90, 0xd4, 0xb2, 0xc0, 0x62, 0x4a, 0x58, 0x6d, 0xcf, 0x1e, 0x9d, 0x2a, 0x6a, + 0x59, 0x60, 0x82, 0x72, 0x1d, 0xd6, 0xc8, 0x68, 0xe4, 0x31, 0xe5, 0x91, 0x22, 0x71, 0x7f, 0xa8, + 0xc6, 0x30, 0x27, 0x6e, 0x1d, 0x42, 0x31, 0xf2, 0x03, 0x3b, 0xaa, 0x99, 0x27, 0x34, 0x57, 0x5c, + 0x8a, 0x33, 0x3b, 0x25, 0xb5, 0x68, 0x47, 0x9d, 0xd7, 0xa0, 0x62, 0xfa, 0xda, 0xec, 0x0d, 0x3d, + 0xb3, 0x9d, 0xd9, 0x29, 0xaa, 0x65, 0xd3, 0x8f, 0xdf, 0x1f, 0x6b, 0x5f, 0x65, 0xa0, 0x9a, 0xfe, + 0x06, 0x80, 0x5b, 0x50, 0xb4, 0x1c, 0x9d, 0xf0, 0xd0, 0x12, 0x1f, 0xa0, 0x76, 0x9e, 0xf3, 0xd9, + 0xa0, 0x7e, 0x14, 0xf2, 0xd5, 0x58, 0x72, 0xeb, 0x6f, 0x12, 0x14, 0x23, 0x18, 0x5f, 0x82, 0x9c, + 0x4b, 0x82, 0x31, 0x57, 0x97, 0xdf, 0xcf, 0x20, 0x49, 0xe5, 0x6d, 0x86, 0xfb, 0x2e, 0xb1, 0x79, + 0x08, 0x84, 0x38, 0x6b, 0xb3, 0x75, 0xb5, 0x28, 0x31, 0xf8, 0xe5, 0xc4, 0x99, 0x4c, 0xa8, 0x1d, + 0xf8, 0xd1, 0xba, 0x86, 0x78, 0x33, 0x84, 0xf1, 0x3b, 0xb0, 0x1e, 0x78, 0xc4, 0xb4, 0x52, 0xdc, + 0x1c, 0xe7, 0xa2, 0xa8, 0x23, 0x26, 0xef, 0xc1, 0x95, 0x48, 0xaf, 0x41, 0x03, 0xa2, 0x8f, 0xa9, + 0x31, 0x13, 0x2a, 0xf0, 0x47, 0x88, 0xcb, 0x21, 0xa1, 0x15, 0xf6, 0x47, 0xb2, 0xb5, 0x6f, 0x24, + 0x58, 0x8f, 0xae, 0x53, 0x46, 0xec, 0xac, 0x63, 0x00, 0x62, 0xdb, 0x4e, 0x90, 0x74, 0xd7, 0x62, + 0x28, 0x2f, 0xc8, 0xd5, 0x1b, 0xb1, 0x90, 0x9a, 0x50, 0xb0, 0x35, 0x01, 0x98, 0xf5, 0x9c, 0xeb, + 0xb6, 0xab, 0x50, 0x0e, 0x3f, 0xf0, 0xf0, 0xaf, 0x84, 0xe2, 0x02, 0x0e, 0x02, 0x62, 0xf7, 0x2e, + 0xbc, 0x09, 0xf9, 0x53, 0x3a, 0x32, 0xed, 0xf0, 0xd9, 0x56, 0x34, 0xa2, 0x67, 0x92, 0x5c, 0xfc, + 0x4c, 0xb2, 0xff, 0x5b, 0x09, 0x36, 0x74, 0x67, 0x32, 0x6f, 0xef, 0x3e, 0x9a, 0x7b, 0x05, 0xf0, + 0x3f, 0x96, 0x3e, 0xff, 0x68, 0x64, 0x06, 0xe3, 0xe9, 0x69, 0x5d, 0x77, 0x26, 0xbb, 0x23, 0xc7, + 0x22, 0xf6, 0x68, 0xf6, 0x99, 0x93, 0xff, 0xd1, 0xdf, 0x1d, 0x51, 0xfb, 0xdd, 0x91, 0x93, 0xf8, + 0xe8, 0x79, 0x7f, 0xf6, 0xf7, 0xbf, 0x92, 0xf4, 0xa7, 0x4c, 0xf6, 0xa0, 0xb7, 0xff, 0xe7, 0xcc, + 0xd6, 0x81, 0x18, 0xae, 0x17, 0xb9, 0x47, 0xa5, 0x43, 0x8b, 0xea, 0x6c, 0xca, 0xff, 0x0b, 0x00, + 0x00, 0xff, 0xff, 0x1a, 0x28, 0x25, 0x79, 0x42, 0x1d, 0x00, 0x00, +} diff --git a/cluster-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/cluster-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto new file mode 100644 index 000000000000..8697a50de4a9 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto @@ -0,0 +1,872 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + }; + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + }; + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default=false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default=false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default=false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default=false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default=SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default=false]; + optional bool java_generic_services = 17 [default=false]; + optional bool py_generic_services = 18 [default=false]; + optional bool php_generic_services = 42 [default=false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default=false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default=false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default=false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default=false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default=false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default=false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default=false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default=false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default=false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default=false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed=true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed=true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/accelerators/nvidia.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/accelerators/nvidia.go index 054d206b2ac8..b0c3c0c5af9f 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/accelerators/nvidia.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/accelerators/nvidia.go @@ -26,12 +26,15 @@ import ( info "github.com/google/cadvisor/info/v1" - "github.com/golang/glog" "github.com/mindprince/gonvml" + "k8s.io/klog" ) type NvidiaManager struct { - sync.RWMutex + sync.Mutex + + // true if there are NVIDIA devices present on the node + devicesPresent bool // true if the NVML library (libnvidia-ml.so.1) was loaded successfully nvmlInitialized bool @@ -47,31 +50,20 @@ const nvidiaVendorId = "0x10de" // Setup initializes NVML if nvidia devices are present on the node. func (nm *NvidiaManager) Setup() { if !detectDevices(nvidiaVendorId) { - glog.V(4).Info("No NVIDIA devices found.") + klog.V(4).Info("No NVIDIA devices found.") return } - nm.initializeNVML() - if nm.nvmlInitialized { - return - } - go func() { - glog.V(2).Info("Starting goroutine to initialize NVML") - // TODO: use globalHousekeepingInterval - for range time.Tick(time.Minute) { - nm.initializeNVML() - if nm.nvmlInitialized { - return - } - } - }() + nm.devicesPresent = true + + initializeNVML(nm) } // detectDevices returns true if a device with given pci id is present on the node. func detectDevices(vendorId string) bool { devices, err := ioutil.ReadDir(sysFsPCIDevicesPath) if err != nil { - glog.Warningf("Error reading %q: %v", sysFsPCIDevicesPath, err) + klog.Warningf("Error reading %q: %v", sysFsPCIDevicesPath, err) return false } @@ -79,11 +71,11 @@ func detectDevices(vendorId string) bool { vendorPath := filepath.Join(sysFsPCIDevicesPath, device.Name(), "vendor") content, err := ioutil.ReadFile(vendorPath) if err != nil { - glog.V(4).Infof("Error while reading %q: %v", vendorPath, err) + klog.V(4).Infof("Error while reading %q: %v", vendorPath, err) continue } if strings.EqualFold(strings.TrimSpace(string(content)), vendorId) { - glog.V(3).Infof("Found device with vendorId %q", vendorId) + klog.V(3).Infof("Found device with vendorId %q", vendorId) return true } } @@ -91,41 +83,35 @@ func detectDevices(vendorId string) bool { } // initializeNVML initializes the NVML library and sets up the nvmlDevices map. -func (nm *NvidiaManager) initializeNVML() { +// This is defined as a variable to help in testing. +var initializeNVML = func(nm *NvidiaManager) { if err := gonvml.Initialize(); err != nil { // This is under a logging level because otherwise we may cause // log spam if the drivers/nvml is not installed on the system. - glog.V(4).Infof("Could not initialize NVML: %v", err) + klog.V(4).Infof("Could not initialize NVML: %v", err) return } + nm.nvmlInitialized = true numDevices, err := gonvml.DeviceCount() if err != nil { - glog.Warningf("GPU metrics would not be available. Failed to get the number of nvidia devices: %v", err) - nm.Lock() - // Even though we won't have GPU metrics, the library was initialized and should be shutdown when exiting. - nm.nvmlInitialized = true - nm.Unlock() + klog.Warningf("GPU metrics would not be available. Failed to get the number of nvidia devices: %v", err) return } - glog.V(1).Infof("NVML initialized. Number of nvidia devices: %v", numDevices) + klog.V(1).Infof("NVML initialized. Number of nvidia devices: %v", numDevices) nm.nvidiaDevices = make(map[int]gonvml.Device, numDevices) for i := 0; i < int(numDevices); i++ { device, err := gonvml.DeviceHandleByIndex(uint(i)) if err != nil { - glog.Warningf("Failed to get nvidia device handle %d: %v", i, err) + klog.Warningf("Failed to get nvidia device handle %d: %v", i, err) continue } minorNumber, err := device.MinorNumber() if err != nil { - glog.Warningf("Failed to get nvidia device minor number: %v", err) + klog.Warningf("Failed to get nvidia device minor number: %v", err) continue } nm.nvidiaDevices[int(minorNumber)] = device } - nm.Lock() - // Doing this at the end to avoid race in accessing nvidiaDevices in GetCollector. - nm.nvmlInitialized = true - nm.Unlock() } // Destroy shuts down NVML. @@ -139,12 +125,21 @@ func (nm *NvidiaManager) Destroy() { // present in the devices.list file in the given devicesCgroupPath. func (nm *NvidiaManager) GetCollector(devicesCgroupPath string) (AcceleratorCollector, error) { nc := &NvidiaCollector{} - nm.RLock() + + if !nm.devicesPresent { + return nc, nil + } + // Makes sure that we don't call initializeNVML() concurrently and + // that we only call initializeNVML() when it's not initialized. + nm.Lock() + if !nm.nvmlInitialized { + initializeNVML(nm) + } if !nm.nvmlInitialized || len(nm.nvidiaDevices) == 0 { - nm.RUnlock() + nm.Unlock() return nc, nil } - nm.RUnlock() + nm.Unlock() nvidiaMinorNumbers, err := parseDevicesCgroup(devicesCgroupPath) if err != nil { return nc, err diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/cache/memory/memory.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/cache/memory/memory.go index 038f9b17aa74..fbeb2070d84c 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/cache/memory/memory.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/cache/memory/memory.go @@ -15,7 +15,7 @@ package memory import ( - "fmt" + "errors" "sync" "time" @@ -23,10 +23,13 @@ import ( "github.com/google/cadvisor/storage" "github.com/google/cadvisor/utils" - "github.com/golang/glog" + "k8s.io/klog" ) -// TODO(vmarmol): See about refactoring this class, we have an unecessary redirection of containerCache and InMemoryCache. +// ErrDataNotFound is the error resulting if failed to find a container in memory cache. +var ErrDataNotFound = errors.New("unable to find data in memory cache") + +// TODO(vmarmol): See about refactoring this class, we have an unnecessary redirection of containerCache and InMemoryCache. // containerCache is used to store per-container information type containerCache struct { ref info.ContainerReference @@ -88,7 +91,7 @@ func (self *InMemoryCache) AddStats(cInfo *info.ContainerInfo, stats *info.Conta // may want to start a pool of goroutines to do write // operations. if err := self.backend.AddStats(cInfo, stats); err != nil { - glog.Error(err) + klog.Error(err) } } return cstore.AddStats(stats) @@ -101,7 +104,7 @@ func (self *InMemoryCache) RecentStats(name string, start, end time.Time, maxSta self.lock.RLock() defer self.lock.RUnlock() if cstore, ok = self.containerCacheMap[name]; !ok { - return fmt.Errorf("unable to find data for container %v", name) + return ErrDataNotFound } return nil }() diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/fsHandler.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/fsHandler.go index d261c983a609..2a14358eb559 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/fsHandler.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/fsHandler.go @@ -22,7 +22,7 @@ import ( "github.com/google/cadvisor/fs" - "github.com/golang/glog" + "k8s.io/klog" ) type FsHandler interface { @@ -118,7 +118,7 @@ func (fh *realFsHandler) trackUsage() { case <-time.After(fh.period): start := time.Now() if err := fh.update(); err != nil { - glog.Errorf("failed to collect filesystem stats - %v", err) + klog.Errorf("failed to collect filesystem stats - %v", err) fh.period = fh.period * 2 if fh.period > maxBackoffFactor*fh.minPeriod { fh.period = maxBackoffFactor * fh.minPeriod @@ -132,7 +132,7 @@ func (fh *realFsHandler) trackUsage() { // if the long duration is persistent either because of slow // disk or lots of containers. longOp = longOp + time.Second - glog.V(2).Infof("du and find on following dirs took %v: %v; will not log again for this container unless duration exceeds %v", duration, []string{fh.rootfs, fh.extraDir}, longOp) + klog.V(2).Infof("du and find on following dirs took %v: %v; will not log again for this container unless duration exceeds %v", duration, []string{fh.rootfs, fh.extraDir}, longOp) } } } diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/helpers.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/helpers.go index f5539b5d1ac9..d38777f93bfc 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/helpers.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/helpers.go @@ -26,8 +26,10 @@ import ( "github.com/google/cadvisor/container" info "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/utils" + "github.com/karrick/godirwalk" + "github.com/pkg/errors" - "github.com/golang/glog" + "k8s.io/klog" ) func DebugInfo(watches map[string][]string) map[string][]string { @@ -85,7 +87,7 @@ func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoF if quota != "" && quota != "-1" { val, err := strconv.ParseUint(quota, 10, 64) if err != nil { - glog.Errorf("GetSpec: Failed to parse CPUQuota from %q: %s", path.Join(cpuRoot, "cpu.cfs_quota_us"), err) + klog.Errorf("GetSpec: Failed to parse CPUQuota from %q: %s", path.Join(cpuRoot, "cpu.cfs_quota_us"), err) } spec.Cpu.Quota = val } @@ -132,7 +134,7 @@ func readString(dirpath string, file string) string { if err != nil { // Ignore non-existent files if !os.IsNotExist(err) { - glog.Errorf("readString: Failed to read %q: %s", cgroupFile, err) + klog.Errorf("readString: Failed to read %q: %s", cgroupFile, err) } return "" } @@ -147,7 +149,7 @@ func readUInt64(dirpath string, file string) uint64 { val, err := strconv.ParseUint(out, 10, 64) if err != nil { - glog.Errorf("readUInt64: Failed to parse int %q from file %q: %s", out, path.Join(dirpath, file), err) + klog.Errorf("readUInt64: Failed to parse int %q from file %q: %s", out, path.Join(dirpath, file), err) return 0 } @@ -156,26 +158,34 @@ func readUInt64(dirpath string, file string) uint64 { // Lists all directories under "path" and outputs the results as children of "parent". func ListDirectories(dirpath string, parent string, recursive bool, output map[string]struct{}) error { - entries, err := ioutil.ReadDir(dirpath) + buf := make([]byte, godirwalk.DefaultScratchBufferSize) + return listDirectories(dirpath, parent, recursive, output, buf) +} + +func listDirectories(dirpath string, parent string, recursive bool, output map[string]struct{}, buf []byte) error { + dirents, err := godirwalk.ReadDirents(dirpath, buf) if err != nil { // Ignore if this hierarchy does not exist. - if os.IsNotExist(err) { + if os.IsNotExist(errors.Cause(err)) { err = nil } return err } - for _, entry := range entries { + for _, dirent := range dirents { // We only grab directories. - if entry.IsDir() { - name := path.Join(parent, entry.Name()) - output[name] = struct{}{} + if !dirent.IsDir() { + continue + } + dirname := dirent.Name() - // List subcontainers if asked to. - if recursive { - err := ListDirectories(path.Join(dirpath, entry.Name()), name, true, output) - if err != nil { - return err - } + name := path.Join(parent, dirname) + output[name] = struct{}{} + + // List subcontainers if asked to. + if recursive { + err := listDirectories(path.Join(dirpath, dirname), name, true, output, buf) + if err != nil { + return err } } } diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/inotify_watcher.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/inotify_watcher.go index 16e2f2c9b0f1..787f599a22b7 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/inotify_watcher.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/common/inotify_watcher.go @@ -17,7 +17,7 @@ package common import ( "sync" - "golang.org/x/exp/inotify" + inotify "github.com/sigma/go-inotify" ) // Watcher for container-related inotify events in the cgroup hierarchy. @@ -78,7 +78,7 @@ func (iw *InotifyWatcher) RemoveWatch(containerName, dir string) (bool, error) { iw.lock.Lock() defer iw.lock.Unlock() - // If we don't have a watch registed for this, just return. + // If we don't have a watch registered for this, just return. cgroupsWatched, ok := iw.containersWatched[containerName] if !ok { return false, nil diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/container.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/container.go index 4550452481ef..37c0a7d2ab56 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/container.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/container.go @@ -36,6 +36,7 @@ const ( ContainerTypeSystemd ContainerTypeCrio ContainerTypeContainerd + ContainerTypeMesos ) // Interface for container operation handlers. diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/containerd/factory.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/containerd/factory.go index dba43ef32e21..2e2bb33346ac 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/containerd/factory.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/containerd/factory.go @@ -21,8 +21,8 @@ import ( "regexp" "strings" - "github.com/golang/glog" "golang.org/x/net/context" + "k8s.io/klog" "github.com/google/cadvisor/container" "github.com/google/cadvisor/container/libcontainer" @@ -47,8 +47,8 @@ type containerdFactory struct { // Information about the mounted cgroup subsystems. cgroupSubsystems libcontainer.CgroupSubsystems // Information about mounted filesystems. - fsInfo fs.FsInfo - ignoreMetrics container.MetricSet + fsInfo fs.FsInfo + includedMetrics container.MetricSet } func (self *containerdFactory) String() string { @@ -70,7 +70,7 @@ func (self *containerdFactory) NewContainerHandler(name string, inHostNamespace &self.cgroupSubsystems, inHostNamespace, metadataEnvs, - self.ignoreMetrics, + self.includedMetrics, ) } @@ -117,7 +117,7 @@ func (self *containerdFactory) DebugInfo() map[string][]string { } // Register root container before running this function! -func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error { +func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error { client, err := Client() if err != nil { return fmt.Errorf("unable to create containerd client: %v", err) @@ -133,14 +133,14 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c return fmt.Errorf("failed to get cgroup subsystems: %v", err) } - glog.V(1).Infof("Registering containerd factory") + klog.V(1).Infof("Registering containerd factory") f := &containerdFactory{ cgroupSubsystems: cgroupSubsystems, client: client, fsInfo: fsInfo, machineInfoFactory: factory, version: containerdVersion, - ignoreMetrics: ignoreMetrics, + includedMetrics: includedMetrics, } container.RegisterContainerHandlerFactory(f, []watcher.ContainerWatchSource{watcher.Raw}) diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/containerd/handler.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/containerd/handler.go index a8095f42703d..82aa8082c440 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/containerd/handler.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/containerd/handler.go @@ -48,7 +48,7 @@ type containerdContainerHandler struct { // Image name used for this container. image string // Filesystem handler. - ignoreMetrics container.MetricSet + includedMetrics container.MetricSet libcontainerHandler *containerlibcontainer.Handler } @@ -64,7 +64,7 @@ func newContainerdContainerHandler( cgroupSubsystems *containerlibcontainer.CgroupSubsystems, inHostNamespace bool, metadataEnvs []string, - ignoreMetrics container.MetricSet, + includedMetrics container.MetricSet, ) (container.ContainerHandler, error) { // Create the cgroup paths. cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints)) @@ -127,7 +127,7 @@ func newContainerdContainerHandler( Aliases: []string{id, name}, } - libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootfs, int(taskPid), ignoreMetrics) + libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootfs, int(taskPid), includedMetrics) handler := &containerdContainerHandler{ machineInfoFactory: machineInfoFactory, @@ -135,7 +135,7 @@ func newContainerdContainerHandler( fsInfo: fsInfo, envs: make(map[string]string), labels: cntr.Labels, - ignoreMetrics: ignoreMetrics, + includedMetrics: includedMetrics, reference: containerReference, libcontainerHandler: libcontainerHandler, } @@ -159,9 +159,9 @@ func (self *containerdContainerHandler) ContainerReference() (info.ContainerRefe func (self *containerdContainerHandler) needNet() bool { // Since containerd does not handle networking ideally we need to return based - // on ignoreMetrics list. Here the assumption is the presence of cri-containerd + // on includedMetrics list. Here the assumption is the presence of cri-containerd // label - if !self.ignoreMetrics.Has(container.NetworkUsageMetrics) { + if self.includedMetrics.Has(container.NetworkUsageMetrics) { //TODO change it to exported cri-containerd constants return self.labels["io.cri-containerd.kind"] == "sandbox" } @@ -186,7 +186,7 @@ func (self *containerdContainerHandler) getFsStats(stats *info.ContainerStats) e return err } - if !self.ignoreMetrics.Has(container.DiskIOMetrics) { + if self.includedMetrics.Has(container.DiskIOMetrics) { common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo) } return nil diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/crio/factory.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/crio/factory.go index 0c77db69ed10..510b66f91c52 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/crio/factory.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/crio/factory.go @@ -26,7 +26,7 @@ import ( info "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/manager/watcher" - "github.com/golang/glog" + "k8s.io/klog" ) // The namespace under which crio aliases are unique. @@ -55,7 +55,7 @@ type crioFactory struct { // Information about mounted filesystems. fsInfo fs.FsInfo - ignoreMetrics container.MetricSet + includedMetrics container.MetricSet client crioClient } @@ -81,7 +81,7 @@ func (self *crioFactory) NewContainerHandler(name string, inHostNamespace bool) &self.cgroupSubsystems, inHostNamespace, metadataEnvs, - self.ignoreMetrics, + self.includedMetrics, ) return } @@ -136,7 +136,7 @@ var ( ) // Register root container before running this function! -func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error { +func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error { client, err := Client() if err != nil { return err @@ -154,7 +154,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c return fmt.Errorf("failed to get cgroup subsystems: %v", err) } - glog.V(1).Infof("Registering CRI-O factory") + klog.V(1).Infof("Registering CRI-O factory") f := &crioFactory{ client: client, cgroupSubsystems: cgroupSubsystems, @@ -162,7 +162,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c machineInfoFactory: factory, storageDriver: storageDriver(info.StorageDriver), storageDir: info.StorageRoot, - ignoreMetrics: ignoreMetrics, + includedMetrics: includedMetrics, } container.RegisterContainerHandlerFactory(f, []watcher.ContainerWatchSource{watcher.Raw}) diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/crio/handler.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/crio/handler.go index 4d7efbf4002f..d17ba6d932f3 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/crio/handler.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/crio/handler.go @@ -63,7 +63,7 @@ type crioContainerHandler struct { // The IP address of the container ipAddress string - ignoreMetrics container.MetricSet + includedMetrics container.MetricSet reference info.ContainerReference @@ -83,7 +83,7 @@ func newCrioContainerHandler( cgroupSubsystems *containerlibcontainer.CgroupSubsystems, inHostNamespace bool, metadataEnvs []string, - ignoreMetrics container.MetricSet, + includedMetrics container.MetricSet, ) (container.ContainerHandler, error) { // Create the cgroup paths. cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints)) @@ -141,7 +141,7 @@ func newCrioContainerHandler( Namespace: CrioNamespace, } - libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootFs, cInfo.Pid, ignoreMetrics) + libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootFs, cInfo.Pid, includedMetrics) // TODO: extract object mother method handler := &crioContainerHandler{ @@ -152,7 +152,7 @@ func newCrioContainerHandler( rootfsStorageDir: rootfsStorageDir, envs: make(map[string]string), labels: cInfo.Labels, - ignoreMetrics: ignoreMetrics, + includedMetrics: includedMetrics, reference: containerReference, libcontainerHandler: libcontainerHandler, } @@ -171,12 +171,12 @@ func newCrioContainerHandler( handler.ipAddress = cInfo.IP // we optionally collect disk usage metrics - if !ignoreMetrics.Has(container.DiskUsageMetrics) { + if includedMetrics.Has(container.DiskUsageMetrics) { handler.fsHandler = common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, storageLogDir, fsInfo) } // TODO for env vars we wanted to show from container.Config.Env from whitelist //for _, exposedEnv := range metadataEnvs { - //glog.V(4).Infof("TODO env whitelist: %v", exposedEnv) + //klog.V(4).Infof("TODO env whitelist: %v", exposedEnv) //} return handler, nil @@ -199,14 +199,14 @@ func (self *crioContainerHandler) ContainerReference() (info.ContainerReference, } func (self *crioContainerHandler) needNet() bool { - if !self.ignoreMetrics.Has(container.NetworkUsageMetrics) { + if self.includedMetrics.Has(container.NetworkUsageMetrics) { return self.labels["io.kubernetes.container.name"] == "POD" } return false } func (self *crioContainerHandler) GetSpec() (info.ContainerSpec, error) { - hasFilesystem := !self.ignoreMetrics.Has(container.DiskUsageMetrics) + hasFilesystem := self.includedMetrics.Has(container.DiskUsageMetrics) spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, self.needNet(), hasFilesystem) spec.Labels = self.labels @@ -222,11 +222,11 @@ func (self *crioContainerHandler) getFsStats(stats *info.ContainerStats) error { return err } - if !self.ignoreMetrics.Has(container.DiskIOMetrics) { + if self.includedMetrics.Has(container.DiskIOMetrics) { common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo) } - if self.ignoreMetrics.Has(container.DiskUsageMetrics) { + if !self.includedMetrics.Has(container.DiskUsageMetrics) { return nil } var device string diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/docker/factory.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/docker/factory.go index 9eb1ff526d08..7502781c43cb 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/docker/factory.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/docker/factory.go @@ -36,8 +36,8 @@ import ( "github.com/google/cadvisor/zfs" docker "github.com/docker/docker/client" - "github.com/golang/glog" "golang.org/x/net/context" + "k8s.io/klog" ) var ArgDockerEndpoint = flag.String("docker", "unix:///var/run/docker.sock", "docker endpoint") @@ -110,7 +110,7 @@ type dockerFactory struct { dockerAPIVersion []int - ignoreMetrics container.MetricSet + includedMetrics container.MetricSet thinPoolName string thinPoolWatcher *devicemapper.ThinPoolWatcher @@ -141,7 +141,7 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool inHostNamespace, metadataEnvs, self.dockerVersion, - self.ignoreMetrics, + self.includedMetrics, self.thinPoolName, self.thinPoolWatcher, self.zfsWatcher, @@ -309,7 +309,7 @@ func ensureThinLsKernelVersion(kernelVersion string) error { } // Register root container before running this function! -func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error { +func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error { client, err := Client() if err != nil { return fmt.Errorf("unable to communicate with docker daemon: %v", err) @@ -337,7 +337,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c if storageDriver(dockerInfo.Driver) == devicemapperStorageDriver { thinPoolWatcher, err = startThinPoolWatcher(dockerInfo) if err != nil { - glog.Errorf("devicemapper filesystem stats will not be reported: %v", err) + klog.Errorf("devicemapper filesystem stats will not be reported: %v", err) } // Safe to ignore error - driver status should always be populated. @@ -349,11 +349,11 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c if storageDriver(dockerInfo.Driver) == zfsStorageDriver { zfsWatcher, err = startZfsWatcher(dockerInfo) if err != nil { - glog.Errorf("zfs filesystem stats will not be reported: %v", err) + klog.Errorf("zfs filesystem stats will not be reported: %v", err) } } - glog.V(1).Infof("Registering Docker factory") + klog.V(1).Infof("Registering Docker factory") f := &dockerFactory{ cgroupSubsystems: cgroupSubsystems, client: client, @@ -363,7 +363,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c machineInfoFactory: factory, storageDriver: storageDriver(dockerInfo.Driver), storageDir: RootDir(), - ignoreMetrics: ignoreMetrics, + includedMetrics: includedMetrics, thinPoolName: thinPoolName, thinPoolWatcher: thinPoolWatcher, zfsWatcher: zfsWatcher, diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/docker/handler.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/docker/handler.go index 64726053a755..638350c09293 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/docker/handler.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/docker/handler.go @@ -34,10 +34,10 @@ import ( dockercontainer "github.com/docker/docker/api/types/container" docker "github.com/docker/docker/client" - "github.com/golang/glog" cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs" libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs" "golang.org/x/net/context" + "k8s.io/klog" ) const ( @@ -83,7 +83,7 @@ type dockerContainerHandler struct { // The IP address of the container ipAddress string - ignoreMetrics container.MetricSet + includedMetrics container.MetricSet // the devicemapper poolname poolName string @@ -128,7 +128,7 @@ func newDockerContainerHandler( inHostNamespace bool, metadataEnvs []string, dockerVersion []int, - ignoreMetrics container.MetricSet, + includedMetrics container.MetricSet, thinPoolName string, thinPoolWatcher *devicemapper.ThinPoolWatcher, zfsWatcher *zfs.ZfsWatcher, @@ -203,7 +203,7 @@ func newDockerContainerHandler( rootfsStorageDir: rootfsStorageDir, envs: make(map[string]string), labels: ctnr.Config.Labels, - ignoreMetrics: ignoreMetrics, + includedMetrics: includedMetrics, zfsParent: zfsParent, } // Timestamp returned by Docker is in time.RFC3339Nano format. @@ -212,7 +212,7 @@ func newDockerContainerHandler( // This should not happen, report the error just in case return nil, fmt.Errorf("failed to parse the create timestamp %q for container %q: %v", ctnr.Created, id, err) } - handler.libcontainerHandler = containerlibcontainer.NewHandler(cgroupManager, rootFs, ctnr.State.Pid, ignoreMetrics) + handler.libcontainerHandler = containerlibcontainer.NewHandler(cgroupManager, rootFs, ctnr.State.Pid, includedMetrics) // Add the name and bare ID as aliases of the container. handler.reference = info.ContainerReference{ @@ -228,7 +228,7 @@ func newDockerContainerHandler( handler.labels["restartcount"] = strconv.Itoa(ctnr.RestartCount) } - // Obtain the IP address for the contianer. + // Obtain the IP address for the container. // If the NetworkMode starts with 'container:' then we need to use the IP address of the container specified. // This happens in cases such as kubernetes where the containers doesn't have an IP address itself and we need to use the pod's address ipAddress := ctnr.NetworkSettings.IPAddress @@ -244,7 +244,7 @@ func newDockerContainerHandler( handler.ipAddress = ipAddress - if !ignoreMetrics.Has(container.DiskUsageMetrics) { + if includedMetrics.Has(container.DiskUsageMetrics) { handler.fsHandler = &dockerFsHandler{ fsHandler: common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, otherStorageDir, fsInfo), thinPoolWatcher: thinPoolWatcher, @@ -309,7 +309,7 @@ func (h *dockerFsHandler) Usage() common.FsUsage { // TODO: ideally we should keep track of how many times we failed to get the usage for this // device vs how many refreshes of the cache there have been, and display an error e.g. if we've // had at least 1 refresh and we still can't find the device. - glog.V(5).Infof("unable to get fs usage from thin pool for device %s: %v", h.deviceID, err) + klog.V(5).Infof("unable to get fs usage from thin pool for device %s: %v", h.deviceID, err) } else { usage.BaseUsageBytes = thinPoolUsage usage.TotalUsageBytes += thinPoolUsage @@ -319,7 +319,7 @@ func (h *dockerFsHandler) Usage() common.FsUsage { if h.zfsWatcher != nil { zfsUsage, err := h.zfsWatcher.GetUsage(h.zfsFilesystem) if err != nil { - glog.V(5).Infof("unable to get fs usage from zfs for filesystem %s: %v", h.zfsFilesystem, err) + klog.V(5).Infof("unable to get fs usage from zfs for filesystem %s: %v", h.zfsFilesystem, err) } else { usage.BaseUsageBytes = zfsUsage usage.TotalUsageBytes += zfsUsage @@ -345,14 +345,14 @@ func (self *dockerContainerHandler) ContainerReference() (info.ContainerReferenc } func (self *dockerContainerHandler) needNet() bool { - if !self.ignoreMetrics.Has(container.NetworkUsageMetrics) { + if self.includedMetrics.Has(container.NetworkUsageMetrics) { return !self.networkMode.IsContainer() } return false } func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) { - hasFilesystem := !self.ignoreMetrics.Has(container.DiskUsageMetrics) + hasFilesystem := self.includedMetrics.Has(container.DiskUsageMetrics) spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, self.needNet(), hasFilesystem) spec.Labels = self.labels @@ -369,11 +369,11 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error return err } - if !self.ignoreMetrics.Has(container.DiskIOMetrics) { + if self.includedMetrics.Has(container.DiskIOMetrics) { common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo) } - if self.ignoreMetrics.Has(container.DiskUsageMetrics) { + if !self.includedMetrics.Has(container.DiskUsageMetrics) { return nil } var device string diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/factory.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/factory.go index cf5b463e0e42..ae03960ea098 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/factory.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/factory.go @@ -20,7 +20,7 @@ import ( "github.com/google/cadvisor/manager/watcher" - "github.com/golang/glog" + "k8s.io/klog" ) type ContainerHandlerFactory interface { @@ -51,7 +51,9 @@ const ( NetworkUsageMetrics MetricKind = "network" NetworkTcpUsageMetrics MetricKind = "tcp" NetworkUdpUsageMetrics MetricKind = "udp" + AcceleratorUsageMetrics MetricKind = "accelerator" AppMetrics MetricKind = "app" + ProcessMetrics MetricKind = "process" ) func (mk MetricKind) String() string { @@ -104,18 +106,18 @@ func NewContainerHandler(name string, watchType watcher.ContainerWatchSource, in for _, factory := range factories[watchType] { canHandle, canAccept, err := factory.CanHandleAndAccept(name) if err != nil { - glog.V(4).Infof("Error trying to work out if we can handle %s: %v", name, err) + klog.V(4).Infof("Error trying to work out if we can handle %s: %v", name, err) } if canHandle { if !canAccept { - glog.V(3).Infof("Factory %q can handle container %q, but ignoring.", factory, name) + klog.V(3).Infof("Factory %q can handle container %q, but ignoring.", factory, name) return nil, false, nil } - glog.V(3).Infof("Using factory %q for container %q", factory, name) + klog.V(3).Infof("Using factory %q for container %q", factory, name) handle, err := factory.NewContainerHandler(name, inHostNamespace) return handle, canAccept, err } else { - glog.V(4).Infof("Factory %q was unable to handle container %q", factory, name) + klog.V(4).Infof("Factory %q was unable to handle container %q", factory, name) } } diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/libcontainer/handler.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/libcontainer/handler.go index 9bc5f653ba8c..5b44fb7437bf 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/libcontainer/handler.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/libcontainer/handler.go @@ -29,9 +29,9 @@ import ( info "github.com/google/cadvisor/info/v1" "bytes" - "github.com/golang/glog" "github.com/opencontainers/runc/libcontainer" "github.com/opencontainers/runc/libcontainer/cgroups" + "k8s.io/klog" ) /* @@ -43,16 +43,16 @@ type Handler struct { cgroupManager cgroups.Manager rootFs string pid int - ignoreMetrics container.MetricSet + includedMetrics container.MetricSet pidMetricsCache map[int]*info.CpuSchedstat } -func NewHandler(cgroupManager cgroups.Manager, rootFs string, pid int, ignoreMetrics container.MetricSet) *Handler { +func NewHandler(cgroupManager cgroups.Manager, rootFs string, pid int, includedMetrics container.MetricSet) *Handler { return &Handler{ cgroupManager: cgroupManager, rootFs: rootFs, pid: pid, - ignoreMetrics: ignoreMetrics, + includedMetrics: includedMetrics, pidMetricsCache: make(map[int]*info.CpuSchedstat), } } @@ -66,17 +66,17 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) { libcontainerStats := &libcontainer.Stats{ CgroupStats: cgroupStats, } - withPerCPU := !h.ignoreMetrics.Has(container.PerCpuUsageMetrics) + withPerCPU := h.includedMetrics.Has(container.PerCpuUsageMetrics) stats := newContainerStats(libcontainerStats, withPerCPU) - if !h.ignoreMetrics.Has(container.ProcessSchedulerMetrics) { + if h.includedMetrics.Has(container.ProcessSchedulerMetrics) { pids, err := h.cgroupManager.GetAllPids() if err != nil { - glog.V(4).Infof("Could not get PIDs for container %d: %v", h.pid, err) + klog.V(4).Infof("Could not get PIDs for container %d: %v", h.pid, err) } else { stats.Cpu.Schedstat, err = schedulerStatsFromProcs(h.rootFs, pids, h.pidMetricsCache) if err != nil { - glog.V(4).Infof("Unable to get Process Scheduler Stats: %v", err) + klog.V(4).Infof("Unable to get Process Scheduler Stats: %v", err) } } } @@ -85,44 +85,56 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) { if h.pid == 0 { return stats, nil } - if !h.ignoreMetrics.Has(container.NetworkUsageMetrics) { + if h.includedMetrics.Has(container.NetworkUsageMetrics) { netStats, err := networkStatsFromProc(h.rootFs, h.pid) if err != nil { - glog.V(4).Infof("Unable to get network stats from pid %d: %v", h.pid, err) + klog.V(4).Infof("Unable to get network stats from pid %d: %v", h.pid, err) } else { stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...) } } - if !h.ignoreMetrics.Has(container.NetworkTcpUsageMetrics) { + if h.includedMetrics.Has(container.NetworkTcpUsageMetrics) { t, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp") if err != nil { - glog.V(4).Infof("Unable to get tcp stats from pid %d: %v", h.pid, err) + klog.V(4).Infof("Unable to get tcp stats from pid %d: %v", h.pid, err) } else { stats.Network.Tcp = t } t6, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp6") if err != nil { - glog.V(4).Infof("Unable to get tcp6 stats from pid %d: %v", h.pid, err) + klog.V(4).Infof("Unable to get tcp6 stats from pid %d: %v", h.pid, err) } else { stats.Network.Tcp6 = t6 } } - if !h.ignoreMetrics.Has(container.NetworkUdpUsageMetrics) { + if h.includedMetrics.Has(container.NetworkUdpUsageMetrics) { u, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp") if err != nil { - glog.V(4).Infof("Unable to get udp stats from pid %d: %v", h.pid, err) + klog.V(4).Infof("Unable to get udp stats from pid %d: %v", h.pid, err) } else { stats.Network.Udp = u } u6, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp6") if err != nil { - glog.V(4).Infof("Unable to get udp6 stats from pid %d: %v", h.pid, err) + klog.V(4).Infof("Unable to get udp6 stats from pid %d: %v", h.pid, err) } else { stats.Network.Udp6 = u6 } } + if h.includedMetrics.Has(container.ProcessMetrics) { + paths := h.cgroupManager.GetPaths() + path, ok := paths["cpu"] + if !ok { + klog.V(4).Infof("Could not find cgroups CPU for container %d", h.pid) + } else { + stats.Processes, err = processStatsFromProcs(h.rootFs, path) + if err != nil { + klog.V(4).Infof("Unable to get Process Stats: %v", err) + } + } + } // For backwards compatibility. if len(stats.Network.Interfaces) > 0 { @@ -132,6 +144,41 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) { return stats, nil } +func processStatsFromProcs(rootFs string, cgroupPath string) (info.ProcessStats, error) { + var fdCount uint64 + filePath := path.Join(cgroupPath, "cgroup.procs") + out, err := ioutil.ReadFile(filePath) + if err != nil { + return info.ProcessStats{}, fmt.Errorf("couldn't open cpu cgroup procs file %v : %v", filePath, err) + } + + pids := strings.Split(string(out), "\n") + + // EOL is also treated as a new line while reading "cgroup.procs" file with ioutil.ReadFile. + // The last value is an empty string "". Ex: pids = ["22", "1223", ""] + // Trim the last value + if len(pids) != 0 && pids[len(pids)-1] == "" { + pids = pids[:len(pids)-1] + } + + for _, pid := range pids { + dirPath := path.Join(rootFs, "/proc", pid, "fd") + fds, err := ioutil.ReadDir(dirPath) + if err != nil { + klog.V(4).Infof("error while listing directory %q to measure fd count: %v", dirPath, err) + continue + } + fdCount += uint64(len(fds)) + } + + processStats := info.ProcessStats{ + ProcessCount: uint64(len(pids)), + FdCount: fdCount, + } + + return processStats, nil +} + func schedulerStatsFromProcs(rootFs string, pids []int, pidMetricsCache map[int]*info.CpuSchedstat) (info.CpuSchedstat, error) { for _, pid := range pids { f, err := os.Open(path.Join(rootFs, "proc", strconv.Itoa(pid), "schedstat")) @@ -451,13 +498,13 @@ func setCpuStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) { // We intentionally ignore these extra zeroes. numActual, err := numCpusFunc() if err != nil { - glog.Errorf("unable to determine number of actual cpus; defaulting to maximum possible number: errno %v", err) + klog.Errorf("unable to determine number of actual cpus; defaulting to maximum possible number: errno %v", err) numActual = numPossible } if numActual > numPossible { // The real number of cores should never be greater than the number of // datapoints reported in cpu usage. - glog.Errorf("PercpuUsage had %v cpus, but the actual number is %v; ignoring extra CPUs", numPossible, numActual) + klog.Errorf("PercpuUsage had %v cpus, but the actual number is %v; ignoring extra CPUs", numPossible, numActual) } numActual = minUint32(numPossible, numActual) ret.Cpu.Usage.PerCpu = make([]uint64, numActual) @@ -498,14 +545,17 @@ func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) { ret.Memory.Usage = s.MemoryStats.Usage.Usage ret.Memory.MaxUsage = s.MemoryStats.Usage.MaxUsage ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt - ret.Memory.Cache = s.MemoryStats.Stats["cache"] if s.MemoryStats.UseHierarchy { + ret.Memory.Cache = s.MemoryStats.Stats["total_cache"] ret.Memory.RSS = s.MemoryStats.Stats["total_rss"] ret.Memory.Swap = s.MemoryStats.Stats["total_swap"] + ret.Memory.MappedFile = s.MemoryStats.Stats["total_mapped_file"] } else { + ret.Memory.Cache = s.MemoryStats.Stats["cache"] ret.Memory.RSS = s.MemoryStats.Stats["rss"] ret.Memory.Swap = s.MemoryStats.Stats["swap"] + ret.Memory.MappedFile = s.MemoryStats.Stats["mapped_file"] } if v, ok := s.MemoryStats.Stats["pgfault"]; ok { ret.Memory.ContainerData.Pgfault = v diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go index d033f7bfc502..05554465caa7 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go @@ -19,8 +19,8 @@ import ( info "github.com/google/cadvisor/info/v1" - "github.com/golang/glog" "github.com/opencontainers/runc/libcontainer/cgroups" + "k8s.io/klog" ) type CgroupSubsystems struct { @@ -61,7 +61,7 @@ func getCgroupSubsystemsHelper(allCgroups []cgroups.Mount) (CgroupSubsystems, er } if _, ok := mountPoints[subsystem]; ok { // duplicate mount for this subsystem; use the first one we saw - glog.V(5).Infof("skipping %s, already using mount at %s", mount.Mountpoint, mountPoints[subsystem]) + klog.V(5).Infof("skipping %s, already using mount at %s", mount.Mountpoint, mountPoints[subsystem]) continue } if _, ok := recordedMountpoints[mount.Mountpoint]; !ok { diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/mesos/client.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/mesos/client.go new file mode 100644 index 000000000000..fa1beb90b41b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/mesos/client.go @@ -0,0 +1,213 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mesos + +import ( + "fmt" + "github.com/Rican7/retry" + "github.com/Rican7/retry/strategy" + "github.com/mesos/mesos-go/api/v1/lib" + "github.com/mesos/mesos-go/api/v1/lib/agent" + "github.com/mesos/mesos-go/api/v1/lib/agent/calls" + mclient "github.com/mesos/mesos-go/api/v1/lib/client" + "github.com/mesos/mesos-go/api/v1/lib/encoding/codecs" + "github.com/mesos/mesos-go/api/v1/lib/httpcli" + "net/url" + "sync" +) + +const ( + maxRetryAttempts = 3 + invalidPID = -1 +) + +var ( + mesosClientOnce sync.Once + mesosClient *client +) + +type client struct { + hc *httpcli.Client +} + +type mesosAgentClient interface { + ContainerInfo(id string) (*containerInfo, error) + ContainerPid(id string) (int, error) +} + +type containerInfo struct { + cntr *mContainer + labels map[string]string +} + +// Client is an interface to query mesos agent http endpoints +func Client() (mesosAgentClient, error) { + mesosClientOnce.Do(func() { + // Start Client + apiURL := url.URL{ + Scheme: "http", + Host: *MesosAgentAddress, + Path: "/api/v1", + } + + mesosClient = &client{ + hc: httpcli.New( + httpcli.Endpoint(apiURL.String()), + httpcli.Codec(codecs.ByMediaType[codecs.MediaTypeProtobuf]), + httpcli.Do(httpcli.With(httpcli.Timeout(*MesosAgentTimeout))), + ), + } + }) + + _, err := mesosClient.getVersion() + if err != nil { + return nil, fmt.Errorf("failed to get version") + } + return mesosClient, nil +} + +// ContainerInfo returns the container information of the given container id +func (self *client) ContainerInfo(id string) (*containerInfo, error) { + c, err := self.getContainer(id) + if err != nil { + return nil, err + } + + // Get labels of the container + l, err := self.getLabels(c) + if err != nil { + return nil, err + } + + return &containerInfo{ + cntr: c, + labels: l, + }, nil +} + +// Get the Pid of the container +func (self *client) ContainerPid(id string) (int, error) { + var pid int + var err error + err = retry.Retry( + func(attempt uint) error { + c, err := self.ContainerInfo(id) + if err != nil { + return err + } + + if c.cntr.ContainerStatus != nil { + pid = int(*c.cntr.ContainerStatus.ExecutorPID) + } else { + err = fmt.Errorf("error fetching Pid") + } + return err + }, + strategy.Limit(maxRetryAttempts), + ) + if err != nil { + return invalidPID, fmt.Errorf("failed to fetch pid") + } + return pid, err +} + +func (self *client) getContainer(id string) (*mContainer, error) { + // Get all containers + cntrs, err := self.getContainers() + if err != nil { + return nil, err + } + + // Check if there is a container with given id and return the container + for _, c := range cntrs.Containers { + if c.ContainerID.Value == id { + return &c, nil + } + } + return nil, fmt.Errorf("can't locate container %s", id) +} + +func (self *client) getVersion() (string, error) { + req := calls.NonStreaming(calls.GetVersion()) + result, err := self.fetchAndDecode(req) + if err != nil { + return "", fmt.Errorf("failed to get mesos version: %v", err) + } + version := result.GetVersion + + if version == nil { + return "", fmt.Errorf("failed to get mesos version") + } + return version.VersionInfo.Version, nil +} + +func (self *client) getContainers() (mContainers, error) { + req := calls.NonStreaming(calls.GetContainers()) + result, err := self.fetchAndDecode(req) + if err != nil { + return nil, fmt.Errorf("failed to get mesos containers: %v", err) + } + cntrs := result.GetContainers + + if cntrs == nil { + return nil, fmt.Errorf("failed to get mesos containers") + } + return cntrs, nil +} + +func (self *client) getLabels(c *mContainer) (map[string]string, error) { + // Get mesos agent state which contains all containers labels + var s state + req := calls.NonStreaming(calls.GetState()) + result, err := self.fetchAndDecode(req) + if err != nil { + return map[string]string{}, fmt.Errorf("failed to get mesos agent state: %v", err) + } + s.st = result.GetState + + // Fetch labels from state object + labels, err := s.FetchLabels(c.FrameworkID.Value, c.ExecutorID.Value) + if err != nil { + return labels, fmt.Errorf("error while fetching labels from executor: %v", err) + } + + return labels, nil +} + +func (self *client) fetchAndDecode(req calls.RequestFunc) (*agent.Response, error) { + var res mesos.Response + var err error + + // Send request + err = retry.Retry( + func(attempt uint) error { + res, err = mesosClient.hc.Send(req, mclient.ResponseClassSingleton, nil) + return err + }, + strategy.Limit(maxRetryAttempts), + ) + if err != nil { + return nil, fmt.Errorf("error fetching %s: %s", req.Call(), err) + } + + // Decode the result + var target agent.Response + err = res.Decode(&target) + if err != nil { + return nil, fmt.Errorf("error while decoding response body from %s: %s", res, err) + } + + return &target, nil +} diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/mesos/factory.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/mesos/factory.go new file mode 100644 index 000000000000..b2c61bb3fa9d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/mesos/factory.go @@ -0,0 +1,148 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mesos + +import ( + "flag" + "fmt" + "path" + "regexp" + "strings" + "time" + + "github.com/google/cadvisor/container" + "github.com/google/cadvisor/container/libcontainer" + "github.com/google/cadvisor/fs" + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/manager/watcher" + "k8s.io/klog" +) + +var MesosAgentAddress = flag.String("mesos_agent", "127.0.0.1:5051", "Mesos agent address") +var MesosAgentTimeout = flag.Duration("mesos_agent_timeout", 10*time.Second, "Mesos agent timeout") + +// The namespace under which mesos aliases are unique. +const MesosNamespace = "mesos" + +// Regexp that identifies mesos cgroups, containers started with +// --cgroup-parent have another prefix than 'mesos' +var mesosCgroupRegexp = regexp.MustCompile(`([a-z-0-9]{36})`) + +// mesosFactory implements the interface ContainerHandlerFactory +type mesosFactory struct { + machineInfoFactory info.MachineInfoFactory + + // Information about the cgroup subsystems. + cgroupSubsystems libcontainer.CgroupSubsystems + + // Information about mounted filesystems. + fsInfo fs.FsInfo + + includedMetrics map[container.MetricKind]struct{} + + client mesosAgentClient +} + +func (self *mesosFactory) String() string { + return MesosNamespace +} + +func (self *mesosFactory) NewContainerHandler(name string, inHostNamespace bool) (container.ContainerHandler, error) { + client, err := Client() + if err != nil { + return nil, err + } + + return newMesosContainerHandler( + name, + &self.cgroupSubsystems, + self.machineInfoFactory, + self.fsInfo, + self.includedMetrics, + inHostNamespace, + client, + ) +} + +// ContainerNameToMesosId returns the Mesos ID from the full container name. +func ContainerNameToMesosId(name string) string { + id := path.Base(name) + + if matches := mesosCgroupRegexp.FindStringSubmatch(id); matches != nil { + return matches[1] + } + + return id +} + +// isContainerName returns true if the cgroup with associated name +// corresponds to a mesos container. +func isContainerName(name string) bool { + // always ignore .mount cgroup even if associated with mesos and delegate to systemd + if strings.HasSuffix(name, ".mount") { + return false + } + return mesosCgroupRegexp.MatchString(path.Base(name)) +} + +// The mesos factory can handle any container. +func (self *mesosFactory) CanHandleAndAccept(name string) (handle bool, accept bool, err error) { + // if the container is not associated with mesos, we can't handle it or accept it. + if !isContainerName(name) { + return false, false, nil + } + + // Check if the container is known to mesos and it is active. + id := ContainerNameToMesosId(name) + + _, err = self.client.ContainerInfo(id) + if err != nil { + return false, true, fmt.Errorf("error getting running container: %v", err) + } + + return true, true, nil +} + +func (self *mesosFactory) DebugInfo() map[string][]string { + return map[string][]string{} +} + +func Register( + machineInfoFactory info.MachineInfoFactory, + fsInfo fs.FsInfo, + includedMetrics container.MetricSet, +) error { + client, err := Client() + + if err != nil { + return fmt.Errorf("unable to create mesos agent client: %v", err) + } + + cgroupSubsystems, err := libcontainer.GetCgroupSubsystems() + if err != nil { + return fmt.Errorf("failed to get cgroup subsystems: %v", err) + } + + klog.V(1).Infof("Registering mesos factory") + factory := &mesosFactory{ + machineInfoFactory: machineInfoFactory, + cgroupSubsystems: cgroupSubsystems, + fsInfo: fsInfo, + includedMetrics: includedMetrics, + client: client, + } + container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Raw}) + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/mesos/handler.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/mesos/handler.go new file mode 100644 index 000000000000..65d0b9878eda --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/mesos/handler.go @@ -0,0 +1,213 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Handler for "mesos" containers. +package mesos + +import ( + "fmt" + "path" + + "github.com/google/cadvisor/container" + "github.com/google/cadvisor/container/common" + containerlibcontainer "github.com/google/cadvisor/container/libcontainer" + "github.com/google/cadvisor/fs" + info "github.com/google/cadvisor/info/v1" + + cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs" + libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs" +) + +type mesosContainerHandler struct { + // Name of the container for this handler. + name string + + // machineInfoFactory provides info.MachineInfo + machineInfoFactory info.MachineInfoFactory + + // Absolute path to the cgroup hierarchies of this container. + // (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test") + cgroupPaths map[string]string + + // File System Info + fsInfo fs.FsInfo + + // Metrics to be included. + includedMetrics container.MetricSet + + labels map[string]string + + // Reference to the container + reference info.ContainerReference + + libcontainerHandler *containerlibcontainer.Handler +} + +func isRootCgroup(name string) bool { + return name == "/" +} + +func newMesosContainerHandler( + name string, + cgroupSubsystems *containerlibcontainer.CgroupSubsystems, + machineInfoFactory info.MachineInfoFactory, + fsInfo fs.FsInfo, + includedMetrics container.MetricSet, + inHostNamespace bool, + client mesosAgentClient, +) (container.ContainerHandler, error) { + cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems.MountPoints, name) + for key, val := range cgroupSubsystems.MountPoints { + cgroupPaths[key] = path.Join(val, name) + } + + // Generate the equivalent cgroup manager for this container. + cgroupManager := &cgroupfs.Manager{ + Cgroups: &libcontainerconfigs.Cgroup{ + Name: name, + }, + Paths: cgroupPaths, + } + + rootFs := "/" + if !inHostNamespace { + rootFs = "/rootfs" + } + + id := ContainerNameToMesosId(name) + + cinfo, err := client.ContainerInfo(id) + + if err != nil { + return nil, err + } + + labels := cinfo.labels + pid, err := client.ContainerPid(id) + if err != nil { + return nil, err + } + + libcontainerHandler := containerlibcontainer.NewHandler(cgroupManager, rootFs, pid, includedMetrics) + + reference := info.ContainerReference{ + Id: id, + Name: name, + Namespace: MesosNamespace, + Aliases: []string{id, name}, + } + + handler := &mesosContainerHandler{ + name: name, + machineInfoFactory: machineInfoFactory, + cgroupPaths: cgroupPaths, + fsInfo: fsInfo, + includedMetrics: includedMetrics, + labels: labels, + reference: reference, + libcontainerHandler: libcontainerHandler, + } + + return handler, nil +} + +func (self *mesosContainerHandler) ContainerReference() (info.ContainerReference, error) { + // We only know the container by its one name. + return self.reference, nil +} + +// Nothing to start up. +func (self *mesosContainerHandler) Start() {} + +// Nothing to clean up. +func (self *mesosContainerHandler) Cleanup() {} + +func (self *mesosContainerHandler) GetSpec() (info.ContainerSpec, error) { + // TODO: Since we dont collect disk usage and network stats for mesos containers, we set + // hasFilesystem and hasNetwork to false. Revisit when we support disk usage, network + // stats for mesos containers. + hasNetwork := false + hasFilesystem := false + + spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, hasNetwork, hasFilesystem) + if err != nil { + return spec, err + } + + spec.Labels = self.labels + + return spec, nil +} + +func (self *mesosContainerHandler) getFsStats(stats *info.ContainerStats) error { + + mi, err := self.machineInfoFactory.GetMachineInfo() + if err != nil { + return err + } + + if self.includedMetrics.Has(container.DiskIOMetrics) { + common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo) + } + + return nil +} + +func (self *mesosContainerHandler) GetStats() (*info.ContainerStats, error) { + stats, err := self.libcontainerHandler.GetStats() + if err != nil { + return stats, err + } + + // Get filesystem stats. + err = self.getFsStats(stats) + if err != nil { + return stats, err + } + + return stats, nil +} + +func (self *mesosContainerHandler) GetCgroupPath(resource string) (string, error) { + path, ok := self.cgroupPaths[resource] + if !ok { + return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.name) + } + return path, nil +} + +func (self *mesosContainerHandler) GetContainerLabels() map[string]string { + return self.labels +} + +func (self *mesosContainerHandler) GetContainerIPAddress() string { + // the IP address for the mesos container corresponds to the system ip address. + return "127.0.0.1" +} + +func (self *mesosContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) { + return common.ListContainers(self.name, self.cgroupPaths, listType) +} + +func (self *mesosContainerHandler) ListProcesses(listType container.ListType) ([]int, error) { + return self.libcontainerHandler.GetProcesses() +} + +func (self *mesosContainerHandler) Exists() bool { + return common.CgroupExists(self.cgroupPaths) +} + +func (self *mesosContainerHandler) Type() container.ContainerType { + return container.ContainerTypeMesos +} diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/mesos/mesos_agent.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/mesos/mesos_agent.go new file mode 100644 index 000000000000..20f921b9f67e --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/mesos/mesos_agent.go @@ -0,0 +1,147 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mesos + +import ( + "fmt" + "github.com/mesos/mesos-go/api/v1/lib" + "github.com/mesos/mesos-go/api/v1/lib/agent" +) + +const ( + cpus = "cpus" + schedulerSLA = "scheduler_sla" + framework = "framework" + source = "source" + revocable = "revocable" + nonRevocable = "non_revocable" +) + +type mContainers *agent.Response_GetContainers +type mContainer = agent.Response_GetContainers_Container + +type ( + state struct { + st *agent.Response_GetState + } +) + +// GetFramework finds a framework with the given id and returns nil if not found. Note that +// this is different from the framework name. +func (s *state) GetFramework(id string) (*mesos.FrameworkInfo, error) { + for _, fw := range s.st.GetFrameworks.Frameworks { + if fw.FrameworkInfo.ID.Value == id { + return &fw.FrameworkInfo, nil + } + } + return nil, fmt.Errorf("unable to find framework id %s", id) +} + +// GetExecutor finds an executor with the given ID and returns nil if not found. Note that +// this is different from the executor name. +func (s *state) GetExecutor(id string) (*mesos.ExecutorInfo, error) { + for _, exec := range s.st.GetExecutors.Executors { + if exec.ExecutorInfo.ExecutorID.Value == id { + return &exec.ExecutorInfo, nil + } + } + return nil, fmt.Errorf("unable to find executor with id %s", id) +} + +// GetTask returns a task launched by given executor. +func (s *state) GetTask(exID string) (*mesos.Task, error) { + // Check if task is in Launched Tasks list + for _, t := range s.st.GetTasks.LaunchedTasks { + if s.isMatchingTask(&t, exID) { + return &t, nil + } + } + + // Check if task is in Queued Tasks list + for _, t := range s.st.GetTasks.QueuedTasks { + if s.isMatchingTask(&t, exID) { + return &t, nil + } + } + return nil, fmt.Errorf("unable to find task matching executor id %s", exID) +} + +func (s *state) isMatchingTask(t *mesos.Task, exID string) bool { + // MESOS-9111: For tasks launched through mesos command/default executor, the + // executorID(which is same as the taskID) field is not filled in the TaskInfo object. + // The workaround is compare with taskID field if executorID is empty + if t.ExecutorID != nil { + if t.ExecutorID.Value == exID { + return true + } + } else { + if t.TaskID.Value == exID { + return true + } + } + + return false +} + +func (s *state) fetchLabelsFromTask(exID string, labels map[string]string) error { + t, err := s.GetTask(exID) + if err != nil { + return err + } + + // Identify revocability. Can be removed once we have a proper label + for _, resource := range t.Resources { + if resource.Name == cpus { + if resource.Revocable != nil { + labels[schedulerSLA] = revocable + } else { + labels[schedulerSLA] = nonRevocable + } + break + } + } + + for _, l := range t.Labels.Labels { + labels[l.Key] = *l.Value + } + + return nil +} + +func (s *state) FetchLabels(fwID string, exID string) (map[string]string, error) { + labels := make(map[string]string) + + // Look for the framework which launched the container. + fw, err := s.GetFramework(fwID) + if err != nil { + return labels, fmt.Errorf("framework ID %q not found: %v", fwID, err) + } + labels[framework] = fw.Name + + // Get the executor info of the container which contains all the task info. + exec, err := s.GetExecutor(exID) + if err != nil { + return labels, fmt.Errorf("executor ID %q not found: %v", exID, err) + } + + labels[source] = *exec.Source + + err = s.fetchLabelsFromTask(exID, labels) + if err != nil { + return labels, fmt.Errorf("failed to fetch labels from task with executor ID %s", exID) + } + + return labels, nil +} diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/raw/factory.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/raw/factory.go index 1b8a43a40776..afc852e345bf 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/raw/factory.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/raw/factory.go @@ -17,6 +17,7 @@ package raw import ( "flag" "fmt" + "strings" "github.com/google/cadvisor/container" "github.com/google/cadvisor/container/common" @@ -25,7 +26,7 @@ import ( info "github.com/google/cadvisor/info/v1" watch "github.com/google/cadvisor/manager/watcher" - "github.com/golang/glog" + "k8s.io/klog" ) var dockerOnly = flag.Bool("docker_only", false, "Only report docker containers in addition to root stats") @@ -43,8 +44,11 @@ type rawFactory struct { // Watcher for inotify events. watcher *common.InotifyWatcher - // List of metrics to be ignored. - ignoreMetrics map[container.MetricKind]struct{} + // List of metrics to be included. + includedMetrics map[container.MetricKind]struct{} + + // List of raw container cgroup path prefix whitelist. + rawPrefixWhiteList []string } func (self *rawFactory) String() string { @@ -56,12 +60,19 @@ func (self *rawFactory) NewContainerHandler(name string, inHostNamespace bool) ( if !inHostNamespace { rootFs = "/rootfs" } - return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, self.watcher, rootFs, self.ignoreMetrics) + return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, self.watcher, rootFs, self.includedMetrics) } // The raw factory can handle any container. If --docker_only is set to false, non-docker containers are ignored. func (self *rawFactory) CanHandleAndAccept(name string) (bool, bool, error) { accept := name == "/" || !*dockerOnly + + for _, prefix := range self.rawPrefixWhiteList { + if strings.HasPrefix(name, prefix) { + accept = true + break + } + } return true, accept, nil } @@ -69,7 +80,7 @@ func (self *rawFactory) DebugInfo() map[string][]string { return common.DebugInfo(self.watcher.GetWatches()) } -func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics map[container.MetricKind]struct{}) error { +func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics map[container.MetricKind]struct{}, rawPrefixWhiteList []string) error { cgroupSubsystems, err := libcontainer.GetCgroupSubsystems() if err != nil { return fmt.Errorf("failed to get cgroup subsystems: %v", err) @@ -83,13 +94,14 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno return err } - glog.V(1).Infof("Registering Raw factory") + klog.V(1).Infof("Registering Raw factory") factory := &rawFactory{ machineInfoFactory: machineInfoFactory, fsInfo: fsInfo, cgroupSubsystems: &cgroupSubsystems, watcher: watcher, - ignoreMetrics: ignoreMetrics, + includedMetrics: includedMetrics, + rawPrefixWhiteList: rawPrefixWhiteList, } container.RegisterContainerHandlerFactory(factory, []watch.ContainerWatchSource{watch.Raw}) return nil diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/raw/handler.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/raw/handler.go index a04a7c89e428..7e7504d933b2 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/raw/handler.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/raw/handler.go @@ -25,9 +25,9 @@ import ( info "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/machine" - "github.com/golang/glog" cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs" "github.com/opencontainers/runc/libcontainer/configs" + "k8s.io/klog" ) type rawContainerHandler struct { @@ -49,7 +49,7 @@ func isRootCgroup(name string) bool { return name == "/" } -func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *common.InotifyWatcher, rootFs string, ignoreMetrics container.MetricSet) (container.ContainerHandler, error) { +func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *common.InotifyWatcher, rootFs string, includedMetrics container.MetricSet) (container.ContainerHandler, error) { cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems.MountPoints, name) cHints, err := common.GetContainerHintsFromFile(*common.ArgContainerHints) @@ -78,7 +78,7 @@ func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSu pid = 1 } - handler := libcontainer.NewHandler(cgroupManager, rootFs, pid, ignoreMetrics) + handler := libcontainer.NewHandler(cgroupManager, rootFs, pid, includedMetrics) return &rawContainerHandler{ name: name, @@ -134,7 +134,7 @@ func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) { // Get memory and swap limits of the running machine memLimit, err := machine.GetMachineMemoryCapacity() if err != nil { - glog.Warningf("failed to obtain memory limit for machine container") + klog.Warningf("failed to obtain memory limit for machine container") spec.HasMemory = false } else { spec.Memory.Limit = uint64(memLimit) @@ -144,7 +144,7 @@ func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) { swapLimit, err := machine.GetMachineSwapCapacity() if err != nil { - glog.Warningf("failed to obtain swap limit for machine container") + klog.Warningf("failed to obtain swap limit for machine container") } else { spec.Memory.SwapLimit = uint64(swapLimit) } diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/rkt/factory.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/rkt/factory.go index 3f79d753e0d2..cf8060324230 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/rkt/factory.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/rkt/factory.go @@ -23,7 +23,7 @@ import ( info "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/manager/watcher" - "github.com/golang/glog" + "k8s.io/klog" ) const RktNamespace = "rkt" @@ -35,7 +35,7 @@ type rktFactory struct { fsInfo fs.FsInfo - ignoreMetrics container.MetricSet + includedMetrics container.MetricSet rktPath string } @@ -54,7 +54,7 @@ func (self *rktFactory) NewContainerHandler(name string, inHostNamespace bool) ( if !inHostNamespace { rootFs = "/rootfs" } - return newRktContainerHandler(name, client, self.rktPath, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, rootFs, self.ignoreMetrics) + return newRktContainerHandler(name, client, self.rktPath, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, rootFs, self.includedMetrics) } func (self *rktFactory) CanHandleAndAccept(name string) (bool, bool, error) { @@ -67,7 +67,7 @@ func (self *rktFactory) DebugInfo() map[string][]string { return map[string][]string{} } -func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error { +func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error { _, err := Client() if err != nil { return fmt.Errorf("unable to communicate with Rkt api service: %v", err) @@ -86,12 +86,12 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno return fmt.Errorf("failed to find supported cgroup mounts for the raw factory") } - glog.V(1).Infof("Registering Rkt factory") + klog.V(1).Infof("Registering Rkt factory") factory := &rktFactory{ machineInfoFactory: machineInfoFactory, fsInfo: fsInfo, cgroupSubsystems: &cgroupSubsystems, - ignoreMetrics: ignoreMetrics, + includedMetrics: includedMetrics, rktPath: rktPath, } container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Rkt}) diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/rkt/handler.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/rkt/handler.go index 86564a1ea934..1421e6ceeab1 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/rkt/handler.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/rkt/handler.go @@ -27,9 +27,9 @@ import ( info "github.com/google/cadvisor/info/v1" "golang.org/x/net/context" - "github.com/golang/glog" cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs" "github.com/opencontainers/runc/libcontainer/configs" + "k8s.io/klog" ) type rktContainerHandler struct { @@ -48,7 +48,7 @@ type rktContainerHandler struct { // Filesystem handler. fsHandler common.FsHandler - ignoreMetrics container.MetricSet + includedMetrics container.MetricSet apiPod *rktapi.Pod @@ -59,7 +59,7 @@ type rktContainerHandler struct { libcontainerHandler *libcontainer.Handler } -func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPath string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, rootFs string, ignoreMetrics container.MetricSet) (container.ContainerHandler, error) { +func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPath string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, rootFs string, includedMetrics container.MetricSet) (container.ContainerHandler, error) { aliases := make([]string, 1) isPod := false @@ -89,7 +89,7 @@ func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPa annotations := resp.Pod.Annotations if parsed.Container != "" { // As not empty string, an App container if contAnnotations, ok := findAnnotations(resp.Pod.Apps, parsed.Container); !ok { - glog.Warningf("couldn't find app %v in pod", parsed.Container) + klog.Warningf("couldn't find app %v in pod", parsed.Container) } else { annotations = append(annotations, contAnnotations...) } @@ -109,7 +109,7 @@ func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPa Paths: cgroupPaths, } - libcontainerHandler := libcontainer.NewHandler(cgroupManager, rootFs, pid, ignoreMetrics) + libcontainerHandler := libcontainer.NewHandler(cgroupManager, rootFs, pid, includedMetrics) rootfsStorageDir := getRootFs(rktPath, parsed) @@ -125,14 +125,14 @@ func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPa fsInfo: fsInfo, isPod: isPod, rootfsStorageDir: rootfsStorageDir, - ignoreMetrics: ignoreMetrics, + includedMetrics: includedMetrics, apiPod: apiPod, labels: labels, reference: containerReference, libcontainerHandler: libcontainerHandler, } - if !ignoreMetrics.Has(container.DiskUsageMetrics) { + if includedMetrics.Has(container.DiskUsageMetrics) { handler.fsHandler = common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, "", fsInfo) } @@ -170,8 +170,8 @@ func (handler *rktContainerHandler) Cleanup() { } func (handler *rktContainerHandler) GetSpec() (info.ContainerSpec, error) { - hasNetwork := handler.isPod && !handler.ignoreMetrics.Has(container.NetworkUsageMetrics) - hasFilesystem := !handler.ignoreMetrics.Has(container.DiskUsageMetrics) + hasNetwork := handler.isPod && handler.includedMetrics.Has(container.NetworkUsageMetrics) + hasFilesystem := handler.includedMetrics.Has(container.DiskUsageMetrics) spec, err := common.GetSpec(handler.cgroupPaths, handler.machineInfoFactory, hasNetwork, hasFilesystem) @@ -186,11 +186,11 @@ func (handler *rktContainerHandler) getFsStats(stats *info.ContainerStats) error return err } - if !handler.ignoreMetrics.Has(container.DiskIOMetrics) { + if handler.includedMetrics.Has(container.DiskIOMetrics) { common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo) } - if handler.ignoreMetrics.Has(container.DiskUsageMetrics) { + if !handler.includedMetrics.Has(container.DiskUsageMetrics) { return nil } diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/rkt/helpers.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/rkt/helpers.go index a4fb8da829ad..94bbdecc4073 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/rkt/helpers.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/rkt/helpers.go @@ -21,8 +21,8 @@ import ( "strings" rktapi "github.com/coreos/rkt/api/v1alpha" - "github.com/golang/glog" "golang.org/x/net/context" + "k8s.io/klog" ) type parsedName struct { @@ -128,7 +128,7 @@ func getRootFs(root string, parsed *parsedName) string { bytes, err := ioutil.ReadFile(tree) if err != nil { - glog.Errorf("ReadFile failed, couldn't read %v to get upper dir: %v", tree, err) + klog.Errorf("ReadFile failed, couldn't read %v to get upper dir: %v", tree, err) return "" } diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/systemd/factory.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/systemd/factory.go index cb3b7c89cd34..100c79e13901 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/container/systemd/factory.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/container/systemd/factory.go @@ -23,7 +23,7 @@ import ( info "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/manager/watcher" - "github.com/golang/glog" + "k8s.io/klog" ) type systemdFactory struct{} @@ -50,8 +50,8 @@ func (f *systemdFactory) DebugInfo() map[string][]string { } // Register registers the systemd container factory. -func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error { - glog.V(1).Infof("Registering systemd factory") +func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, includedMetrics container.MetricSet) error { + klog.V(1).Infof("Registering systemd factory") factory := &systemdFactory{} container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Raw}) return nil diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/devicemapper/dmsetup_client.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/devicemapper/dmsetup_client.go index c0936da9a758..3a37b5609532 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/devicemapper/dmsetup_client.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/devicemapper/dmsetup_client.go @@ -18,7 +18,7 @@ import ( "strconv" "strings" - "github.com/golang/glog" + "k8s.io/klog" ) // DmsetupClient is a low-level client for interacting with device mapper via @@ -58,6 +58,6 @@ func (c *defaultDmsetupClient) Status(deviceName string) ([]byte, error) { } func (*defaultDmsetupClient) dmsetup(args ...string) ([]byte, error) { - glog.V(5).Infof("running dmsetup %v", strings.Join(args, " ")) + klog.V(5).Infof("running dmsetup %v", strings.Join(args, " ")) return exec.Command("dmsetup", args...).Output() } diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/devicemapper/thin_ls_client.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/devicemapper/thin_ls_client.go index 29737434bfd6..1bbc360ff4d8 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/devicemapper/thin_ls_client.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/devicemapper/thin_ls_client.go @@ -21,7 +21,7 @@ import ( "strconv" "strings" - "github.com/golang/glog" + "k8s.io/klog" ) // thinLsClient knows how to run a thin_ls very specific to CoW usage for @@ -53,7 +53,7 @@ var _ thinLsClient = &defaultThinLsClient{} func (c *defaultThinLsClient) ThinLs(deviceName string) (map[string]uint64, error) { args := []string{"--no-headers", "-m", "-o", "DEV,EXCLUSIVE_BYTES", deviceName} - glog.V(4).Infof("running command: thin_ls %v", strings.Join(args, " ")) + klog.V(4).Infof("running command: thin_ls %v", strings.Join(args, " ")) output, err := exec.Command(c.thinLsPath, args...).Output() if err != nil { @@ -80,7 +80,7 @@ func parseThinLsOutput(output []byte) map[string]uint64 { deviceID := fields[0] usage, err := strconv.ParseUint(fields[1], 10, 64) if err != nil { - glog.Warningf("unexpected error parsing thin_ls output: %v", err) + klog.Warningf("unexpected error parsing thin_ls output: %v", err) continue } diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go index 6f5666a02fe3..2eb8e002c85f 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go @@ -19,7 +19,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" ) // ThinPoolWatcher maintains a cache of device name -> usage stats for a @@ -58,7 +58,7 @@ func NewThinPoolWatcher(poolName, metadataDevice string) (*ThinPoolWatcher, erro func (w *ThinPoolWatcher) Start() { err := w.Refresh() if err != nil { - glog.Errorf("encountered error refreshing thin pool watcher: %v", err) + klog.Errorf("encountered error refreshing thin pool watcher: %v", err) } for { @@ -69,12 +69,12 @@ func (w *ThinPoolWatcher) Start() { start := time.Now() err = w.Refresh() if err != nil { - glog.Errorf("encountered error refreshing thin pool watcher: %v", err) + klog.Errorf("encountered error refreshing thin pool watcher: %v", err) } // print latency for refresh duration := time.Since(start) - glog.V(5).Infof("thin_ls(%d) took %s", start.Unix(), duration) + klog.V(5).Infof("thin_ls(%d) took %s", start.Unix(), duration) } } } @@ -115,7 +115,7 @@ func (w *ThinPoolWatcher) Refresh() error { } if currentlyReserved { - glog.V(5).Infof("metadata for %v is currently reserved; releasing", w.poolName) + klog.V(5).Infof("metadata for %v is currently reserved; releasing", w.poolName) _, err = w.dmsetup.Message(w.poolName, 0, releaseMetadataMessage) if err != nil { err = fmt.Errorf("error releasing metadata snapshot for %v: %v", w.poolName, err) @@ -123,22 +123,22 @@ func (w *ThinPoolWatcher) Refresh() error { } } - glog.V(5).Infof("reserving metadata snapshot for thin-pool %v", w.poolName) + klog.V(5).Infof("reserving metadata snapshot for thin-pool %v", w.poolName) // NOTE: "0" in the call below is for the 'sector' argument to 'dmsetup // message'. It's not needed for thin pools. if output, err := w.dmsetup.Message(w.poolName, 0, reserveMetadataMessage); err != nil { err = fmt.Errorf("error reserving metadata for thin-pool %v: %v output: %v", w.poolName, err, string(output)) return err } else { - glog.V(5).Infof("reserved metadata snapshot for thin-pool %v", w.poolName) + klog.V(5).Infof("reserved metadata snapshot for thin-pool %v", w.poolName) } defer func() { - glog.V(5).Infof("releasing metadata snapshot for thin-pool %v", w.poolName) + klog.V(5).Infof("releasing metadata snapshot for thin-pool %v", w.poolName) w.dmsetup.Message(w.poolName, 0, releaseMetadataMessage) }() - glog.V(5).Infof("running thin_ls on metadata device %v", w.metadataDevice) + klog.V(5).Infof("running thin_ls on metadata device %v", w.metadataDevice) newCache, err := w.thinLsClient.ThinLs(w.metadataDevice) if err != nil { err = fmt.Errorf("error performing thin_ls on metadata device %v: %v", w.metadataDevice, err) @@ -157,7 +157,7 @@ const ( // checkReservation checks to see whether the thin device is currently holding // userspace metadata. func (w *ThinPoolWatcher) checkReservation(poolName string) (bool, error) { - glog.V(5).Infof("checking whether the thin-pool is holding a metadata snapshot") + klog.V(5).Infof("checking whether the thin-pool is holding a metadata snapshot") output, err := w.dmsetup.Status(poolName) if err != nil { return false, err diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/events/handler.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/events/handler.go index d920d7d6b7ad..28a67addb00a 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/events/handler.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/events/handler.go @@ -24,7 +24,7 @@ import ( info "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/utils" - "github.com/golang/glog" + "k8s.io/klog" ) type byTimestamp []*info.Event @@ -322,7 +322,7 @@ func (self *events) AddEvent(e *info.Event) error { for _, watchObject := range watchesToSend { watchObject.eventChannel.GetChannel() <- e } - glog.V(4).Infof("Added event %v", e) + klog.V(4).Infof("Added event %v", e) return nil } @@ -332,7 +332,7 @@ func (self *events) StopWatch(watchId int) { defer self.watcherLock.Unlock() _, ok := self.watchers[watchId] if !ok { - glog.Errorf("Could not find watcher instance %v", watchId) + klog.Errorf("Could not find watcher instance %v", watchId) } close(self.watchers[watchId].eventChannel.GetChannel()) delete(self.watchers, watchId) diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/fs/fs.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/fs/fs.go index 468f23b75550..e628b29ffd32 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/fs/fs.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/fs/fs.go @@ -33,11 +33,11 @@ import ( "time" "github.com/docker/docker/pkg/mount" - "github.com/golang/glog" "github.com/google/cadvisor/devicemapper" "github.com/google/cadvisor/utils" dockerutil "github.com/google/cadvisor/utils/docker" zfs "github.com/mistifyio/go-zfs" + "k8s.io/klog" ) const ( @@ -114,9 +114,9 @@ func NewFsInfo(context Context) (FsInfo, error) { fsUUIDToDeviceName, err := getFsUUIDToDeviceNameMap() if err != nil { - // UUID is not always avaiable across different OS distributions. + // UUID is not always available across different OS distributions. // Do not fail if there is an error. - glog.Warningf("Failed to get disk UUID mapping, getting disk info by uuid will not work: %v", err) + klog.Warningf("Failed to get disk UUID mapping, getting disk info by uuid will not work: %v", err) } // Avoid devicemapper container mounts - these are tracked by the ThinPoolWatcher @@ -139,8 +139,8 @@ func NewFsInfo(context Context) (FsInfo, error) { fsInfo.addDockerImagesLabel(context, mounts) fsInfo.addCrioImagesLabel(context, mounts) - glog.V(1).Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName) - glog.V(1).Infof("Filesystem partitions: %+v", fsInfo.partitions) + klog.V(1).Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName) + klog.V(1).Infof("Filesystem partitions: %+v", fsInfo.partitions) fsInfo.addSystemRootLabel(mounts) return fsInfo, nil } @@ -165,7 +165,7 @@ func getFsUUIDToDeviceNameMap() (map[string]string, error) { path := filepath.Join(dir, file.Name()) target, err := os.Readlink(path) if err != nil { - glog.Warningf("Failed to resolve symlink for %q", path) + klog.Warningf("Failed to resolve symlink for %q", path) continue } device, err := filepath.Abs(filepath.Join(dir, target)) @@ -213,7 +213,7 @@ func processMounts(mounts []*mount.Info, excludedMountpointPrefixes []string) ma if mount.Fstype == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") { major, minor, err := getBtrfsMajorMinorIds(mount) if err != nil { - glog.Warningf("%s", err) + klog.Warningf("%s", err) } else { mount.Major = major mount.Minor = minor @@ -278,7 +278,7 @@ func (self *RealFsInfo) addSystemRootLabel(mounts []*mount.Info) { func (self *RealFsInfo) addDockerImagesLabel(context Context, mounts []*mount.Info) { dockerDev, dockerPartition, err := self.getDockerDeviceMapperInfo(context.Docker) if err != nil { - glog.Warningf("Could not get Docker devicemapper device: %v", err) + klog.Warningf("Could not get Docker devicemapper device: %v", err) } if len(dockerDev) > 0 && dockerPartition != nil { self.partitions[dockerDev] = *dockerPartition @@ -405,7 +405,7 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er switch partition.fsType { case DeviceMapper.String(): fs.Capacity, fs.Free, fs.Available, err = getDMStats(device, partition.blockSize) - glog.V(5).Infof("got devicemapper fs capacity stats: capacity: %v free: %v available: %v:", fs.Capacity, fs.Free, fs.Available) + klog.V(5).Infof("got devicemapper fs capacity stats: capacity: %v free: %v available: %v:", fs.Capacity, fs.Free, fs.Available) fs.Type = DeviceMapper case ZFS.String(): fs.Capacity, fs.Free, fs.Available, err = getZfstats(device) @@ -418,11 +418,11 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er fs.InodesFree = &inodesFree fs.Type = VFS } else { - glog.V(4).Infof("unable to determine file system type, partition mountpoint does not exist: %v", partition.mountpoint) + klog.V(4).Infof("unable to determine file system type, partition mountpoint does not exist: %v", partition.mountpoint) } } if err != nil { - glog.Errorf("Stat fs failed. Error: %v", err) + klog.V(4).Infof("Stat fs failed. Error: %v", err) } else { deviceSet[device] = struct{}{} fs.DeviceInfo = DeviceInfo{ @@ -445,7 +445,7 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) { file, err := os.Open(diskStatsFile) if err != nil { if os.IsNotExist(err) { - glog.Warningf("Not collecting filesystem statistics because file %q was not found", diskStatsFile) + klog.Warningf("Not collecting filesystem statistics because file %q was not found", diskStatsFile) return diskStatsMap, nil } return nil, err @@ -533,10 +533,25 @@ func (self *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) { } mount, found := self.mounts[dir] + // try the parent dir if not found until we reach the root dir + // this is an issue on btrfs systems where the directory is not + // the subvolume + for !found { + pathdir, _ := filepath.Split(dir) + // break when we reach root + if pathdir == "/" { + break + } + // trim "/" from the new parent path otherwise the next possible + // filepath.Split in the loop will not split the string any further + dir = strings.TrimSuffix(pathdir, "/") + mount, found = self.mounts[dir] + } + if found && mount.Fstype == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") { major, minor, err := getBtrfsMajorMinorIds(mount) if err != nil { - glog.Warningf("%s", err) + klog.Warningf("%s", err) } else { return &DeviceInfo{mount.Source, uint(major), uint(minor)}, nil } @@ -568,12 +583,12 @@ func GetDirDiskUsage(dir string, timeout time.Duration) (uint64, error) { return 0, fmt.Errorf("failed to exec du - %v", err) } timer := time.AfterFunc(timeout, func() { - glog.Warningf("Killing cmd %v due to timeout(%s)", cmd.Args, timeout.String()) + klog.Warningf("Killing cmd %v due to timeout(%s)", cmd.Args, timeout.String()) cmd.Process.Kill() }) stdoutb, souterr := ioutil.ReadAll(stdoutp) if souterr != nil { - glog.Errorf("Failed to read from stdout for cmd %v - %v", cmd.Args, souterr) + klog.Errorf("Failed to read from stdout for cmd %v - %v", cmd.Args, souterr) } stderrb, _ := ioutil.ReadAll(stderrp) err = cmd.Wait() @@ -607,7 +622,7 @@ func GetDirInodeUsage(dir string, timeout time.Duration) (uint64, error) { return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stderr.String()) } timer := time.AfterFunc(timeout, func() { - glog.Warningf("Killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String()) + klog.Warningf("Killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String()) findCmd.Process.Kill() }) err := findCmd.Wait() @@ -748,7 +763,7 @@ func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) { return 0, 0, err } - glog.V(4).Infof("btrfs mount %#v", mount) + klog.V(4).Infof("btrfs mount %#v", mount) if buf.Mode&syscall.S_IFMT == syscall.S_IFBLK { err := syscall.Stat(mount.Mountpoint, buf) if err != nil { @@ -756,8 +771,8 @@ func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) { return 0, 0, err } - glog.V(4).Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev))) - glog.V(4).Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev))) + klog.V(4).Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev))) + klog.V(4).Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev))) return int(major(buf.Dev)), int(minor(buf.Dev)), nil } else { diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v1/container.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v1/container.go index 0d7b895bc014..b54233d535c0 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v1/container.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v1/container.go @@ -102,11 +102,11 @@ type ContainerInfoRequest struct { NumStats int `json:"num_stats,omitempty"` // Start time for which to query information. - // If ommitted, the beginning of time is assumed. + // If omitted, the beginning of time is assumed. Start time.Time `json:"start,omitempty"` // End time for which to query information. - // If ommitted, current time is assumed. + // If omitted, current time is assumed. End time.Time `json:"end,omitempty"` } @@ -358,6 +358,9 @@ type MemoryStats struct { // Units: Bytes. Swap uint64 `json:"swap"` + // The amount of memory used for mapped files (includes tmpfs/shmem) + MappedFile uint64 `json:"mapped_file"` + // The amount of working set memory, this includes recently accessed memory, // dirty memory, and kernel memory. Working set is <= "usage". // Units: Bytes. @@ -554,6 +557,14 @@ type AcceleratorStats struct { DutyCycle uint64 `json:"duty_cycle"` } +type ProcessStats struct { + // Number of processes + ProcessCount uint64 `json:"process_count"` + + // Number of open file descriptors + FdCount uint64 `json:"fd_count"` +} + type ContainerStats struct { // The time of this stat point. Timestamp time.Time `json:"timestamp"` @@ -571,6 +582,9 @@ type ContainerStats struct { // Metrics for Accelerators. Each Accelerator corresponds to one element in the array. Accelerators []AcceleratorStats `json:"accelerators,omitempty"` + // ProcessStats for Containers + Processes ProcessStats `json:"processes,omitempty"` + // Custom metrics from all collectors CustomMetrics map[string][]MetricVal `json:"custom_metrics,omitempty"` } diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v2/container.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v2/container.go index d32f571cb674..4288d003dbf0 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v2/container.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v2/container.go @@ -38,7 +38,7 @@ type CpuSpec struct { Mask string `json:"mask,omitempty"` // CPUQuota Default is disabled Quota uint64 `json:"quota,omitempty"` - // Period is the CPU reference time in ns e.g the quota is compared aginst this. + // Period is the CPU reference time in ns e.g the quota is compared against this. Period uint64 `json:"period,omitempty"` } @@ -254,6 +254,7 @@ type ProcessInfo struct { RunningTime string `json:"running_time"` CgroupPath string `json:"cgroup_path"` Cmd string `json:"cmd"` + FdCount int `json:"fd_count"` } type TcpStat struct { diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v2/conversion.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v2/conversion.go index 97fb463b9fac..74279a20a138 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v2/conversion.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/info/v2/conversion.go @@ -18,8 +18,8 @@ import ( "fmt" "time" - "github.com/golang/glog" "github.com/google/cadvisor/info/v1" + "k8s.io/klog" ) func machineFsStatsFromV1(fsStats []v1.FsStats) []MachineFsStats { @@ -70,7 +70,7 @@ func MachineStatsFromV1(cont *v1.ContainerInfo) []MachineStats { stat.Cpu = &val.Cpu cpuInst, err := InstCpuStats(last, val) if err != nil { - glog.Warningf("Could not get instant cpu stats: %v", err) + klog.Warningf("Could not get instant cpu stats: %v", err) } else { stat.CpuInst = cpuInst } @@ -107,7 +107,7 @@ func ContainerStatsFromV1(containerName string, spec *v1.ContainerSpec, stats [] stat.Cpu = &val.Cpu cpuInst, err := InstCpuStats(last, val) if err != nil { - glog.Warningf("Could not get instant cpu stats: %v", err) + klog.Warningf("Could not get instant cpu stats: %v", err) } else { stat.CpuInst = cpuInst } @@ -133,7 +133,7 @@ func ContainerStatsFromV1(containerName string, spec *v1.ContainerSpec, stats [] } } else if len(val.Filesystem) > 1 && containerName != "/" { // Cannot handle multiple devices per container. - glog.V(4).Infof("failed to handle multiple devices for container %s. Skipping Filesystem stats", containerName) + klog.V(4).Infof("failed to handle multiple devices for container %s. Skipping Filesystem stats", containerName) } } if spec.HasDiskIo { @@ -168,7 +168,7 @@ func DeprecatedStatsFromV1(cont *v1.ContainerInfo) []DeprecatedContainerStats { stat.Cpu = val.Cpu cpuInst, err := InstCpuStats(last, val) if err != nil { - glog.Warningf("Could not get instant cpu stats: %v", err) + klog.Warningf("Could not get instant cpu stats: %v", err) } else { stat.CpuInst = cpuInst } diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/machine/info.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/machine/info.go index bf4f595ab217..03a65792c3e3 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/machine/info.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/machine/info.go @@ -30,7 +30,7 @@ import ( "github.com/google/cadvisor/utils/sysfs" "github.com/google/cadvisor/utils/sysinfo" - "github.com/golang/glog" + "k8s.io/klog" "golang.org/x/sys/unix" ) @@ -50,7 +50,7 @@ func getInfoFromFiles(filePaths string) string { return strings.TrimSpace(string(id)) } } - glog.Warningf("Couldn't collect info from any of the files in %q", filePaths) + klog.Warningf("Couldn't collect info from any of the files in %q", filePaths) return "" } @@ -117,27 +117,27 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach filesystems, err := fsInfo.GetGlobalFsInfo() if err != nil { - glog.Errorf("Failed to get global filesystem information: %v", err) + klog.Errorf("Failed to get global filesystem information: %v", err) } diskMap, err := sysinfo.GetBlockDeviceInfo(sysFs) if err != nil { - glog.Errorf("Failed to get disk map: %v", err) + klog.Errorf("Failed to get disk map: %v", err) } netDevices, err := sysinfo.GetNetworkDevices(sysFs) if err != nil { - glog.Errorf("Failed to get network devices: %v", err) + klog.Errorf("Failed to get network devices: %v", err) } topology, numCores, err := GetTopology(sysFs, string(cpuinfo)) if err != nil { - glog.Errorf("Failed to get topology information: %v", err) + klog.Errorf("Failed to get topology information: %v", err) } systemUUID, err := sysinfo.GetSystemUUID(sysFs) if err != nil { - glog.Errorf("Failed to get system UUID: %v", err) + klog.Errorf("Failed to get system UUID: %v", err) } realCloudInfo := cloudinfo.NewRealCloudInfo() diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/machine/machine.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/machine/machine.go index bd07414456ca..3a60e2e5c52d 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/machine/machine.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/machine/machine.go @@ -30,7 +30,7 @@ import ( "github.com/google/cadvisor/utils/sysfs" "github.com/google/cadvisor/utils/sysinfo" - "github.com/golang/glog" + "k8s.io/klog" "golang.org/x/sys/unix" ) @@ -191,7 +191,7 @@ func GetTopology(sysFs sysfs.SysFs, cpuinfo string) ([]info.Node, int, error) { for idx, node := range nodes { caches, err := sysinfo.GetCacheInfo(sysFs, node.Cores[0].Threads[0]) if err != nil { - glog.Errorf("failed to get cache information for node %d: %v", node.Id, err) + klog.Errorf("failed to get cache information for node %d: %v", node.Id, err) continue } numThreadsPerCore := len(node.Cores[0].Threads) diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/container.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/container.go index 295479f092dd..dae4cf8fd5ab 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/container.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/container.go @@ -39,7 +39,7 @@ import ( "github.com/google/cadvisor/utils/cpuload" units "github.com/docker/go-units" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/utils/clock" ) @@ -47,7 +47,9 @@ import ( var enableLoadReader = flag.Bool("enable_load_reader", false, "Whether to enable cpu load reader") var HousekeepingInterval = flag.Duration("housekeeping_interval", 1*time.Second, "Interval between container housekeepings") -var cgroupPathRegExp = regexp.MustCompile(`devices[^:]*:(.*?)[,;$]`) +// cgroup type chosen to fetch the cgroup path of a process. +// Memory has been chosen, as it is one of the default cgroups that is enabled for most containers. +var cgroupPathRegExp = regexp.MustCompile(`memory[^:]*:(.*?)[,;$]`) type containerInfo struct { info.ContainerReference @@ -185,8 +187,8 @@ func (c *containerData) getCgroupPath(cgroups string) (string, error) { } matches := cgroupPathRegExp.FindSubmatch([]byte(cgroups)) if len(matches) != 2 { - glog.V(3).Infof("failed to get devices cgroup path from %q", cgroups) - // return root in case of failures - devices hierarchy might not be enabled. + klog.V(3).Infof("failed to get memory cgroup path from %q", cgroups) + // return root in case of failures - memory hierarchy might not be enabled. return "/", nil } return string(matches[1]), nil @@ -206,7 +208,7 @@ func (c *containerData) ReadFile(filepath string, inHostNamespace bool) ([]byte, } for _, pid := range pids { filePath := path.Join(rootfs, "/proc", pid, "/root", filepath) - glog.V(3).Infof("Trying path %q", filePath) + klog.V(3).Infof("Trying path %q", filePath) data, err := ioutil.ReadFile(filePath) if err == nil { return data, err @@ -266,6 +268,10 @@ func (c *containerData) getContainerPids(inHostNamespace bool) ([]string, error) func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace bool) ([]v2.ProcessInfo, error) { // report all processes for root. isRoot := c.info.Name == "/" + rootfs := "/" + if !inHostNamespace { + rootfs = "/rootfs" + } format := "user,pid,ppid,stime,pcpu,pmem,rss,vsz,stat,time,comm,cgroup" out, err := c.getPsOutput(inHostNamespace, format) if err != nil { @@ -324,6 +330,15 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace cgroupPath = cgroup } + var fdCount int + dirPath := path.Join(rootfs, "/proc", strconv.Itoa(pid), "fd") + fds, err := ioutil.ReadDir(dirPath) + if err != nil { + klog.V(4).Infof("error while listing directory %q to measure fd count: %v", dirPath, err) + continue + } + fdCount = len(fds) + if isRoot || c.info.Name == cgroup { processes = append(processes, v2.ProcessInfo{ User: fields[0], @@ -338,6 +353,7 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace RunningTime: fields[9], Cmd: fields[10], CgroupPath: cgroupPath, + FdCount: fdCount, }) } } @@ -377,7 +393,7 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h // Create cpu load reader. loadReader, err := cpuload.New() if err != nil { - glog.Warningf("Could not initialize cpu load reader for %q: %s", ref.Name, err) + klog.Warningf("Could not initialize cpu load reader for %q: %s", ref.Name, err) } else { cont.loadReader = loadReader } @@ -390,7 +406,7 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h cont.summaryReader, err = summary.New(cont.info.Spec) if err != nil { cont.summaryReader = nil - glog.Warningf("Failed to create summary reader for %q: %v", ref.Name, err) + klog.Warningf("Failed to create summary reader for %q: %v", ref.Name, err) } return cont, nil @@ -403,7 +419,7 @@ func (self *containerData) nextHousekeepingInterval() time.Duration { stats, err := self.memoryCache.RecentStats(self.info.Name, empty, empty, 2) if err != nil { if self.allowErrorLogging() { - glog.Warningf("Failed to get RecentStats(%q) while determining the next housekeeping: %v", self.info.Name, err) + klog.Warningf("Failed to get RecentStats(%q) while determining the next housekeeping: %v", self.info.Name, err) } } else if len(stats) == 2 { // TODO(vishnuk): Use no processes as a signal. @@ -433,7 +449,7 @@ func (c *containerData) housekeeping() { if c.loadReader != nil { err := c.loadReader.Start() if err != nil { - glog.Warningf("Could not start cpu load stat collector for %q: %s", c.info.Name, err) + klog.Warningf("Could not start cpu load stat collector for %q: %s", c.info.Name, err) } defer c.loadReader.Stop() } @@ -445,7 +461,7 @@ func (c *containerData) housekeeping() { } // Housekeep every second. - glog.V(3).Infof("Start housekeeping for container %q\n", c.info.Name) + klog.V(3).Infof("Start housekeeping for container %q\n", c.info.Name) houseKeepingTimer := c.clock.NewTimer(0 * time.Second) defer houseKeepingTimer.Stop() for { @@ -466,7 +482,7 @@ func (c *containerData) housekeeping() { stats, err := c.memoryCache.RecentStats(c.info.Name, empty, empty, numSamples) if err != nil { if c.allowErrorLogging() { - glog.Warningf("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err) + klog.Warningf("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err) } } else if len(stats) < numSamples { // Ignore, not enough stats yet. @@ -483,7 +499,7 @@ func (c *containerData) housekeeping() { usageInCores := float64(usageCpuNs) / float64(stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds()) usageInHuman := units.HumanSize(float64(usageMemory)) // Don't set verbosity since this is already protected by the logUsage flag. - glog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman) + klog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman) } } houseKeepingTimer.Reset(c.nextHousekeepingInterval()) @@ -504,13 +520,13 @@ func (c *containerData) housekeepingTick(timer <-chan time.Time, longHousekeepin err := c.updateStats() if err != nil { if c.allowErrorLogging() { - glog.Warningf("Failed to update stats for container \"%s\": %s", c.info.Name, err) + klog.Warningf("Failed to update stats for container \"%s\": %s", c.info.Name, err) } } // Log if housekeeping took too long. duration := c.clock.Since(start) if duration >= longHousekeeping { - glog.V(3).Infof("[%s] Housekeeping took %s", c.info.Name, duration) + klog.V(3).Infof("[%s] Housekeeping took %s", c.info.Name, duration) } c.notifyOnDemand() c.statsLastUpdatedTime = c.clock.Now() @@ -584,7 +600,7 @@ func (c *containerData) updateStats() error { err := c.summaryReader.AddSample(*stats) if err != nil { // Ignore summary errors for now. - glog.V(2).Infof("Failed to add summary stats for %q: %v", c.info.Name, err) + klog.V(2).Infof("Failed to add summary stats for %q: %v", c.info.Name, err) } } var customStatsErr error diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/manager.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/manager.go index 95054ae8b8b2..5c62bd66ebee 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/manager.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/manager.go @@ -33,6 +33,7 @@ import ( "github.com/google/cadvisor/container/containerd" "github.com/google/cadvisor/container/crio" "github.com/google/cadvisor/container/docker" + "github.com/google/cadvisor/container/mesos" "github.com/google/cadvisor/container/raw" "github.com/google/cadvisor/container/rkt" "github.com/google/cadvisor/container/systemd" @@ -48,9 +49,9 @@ import ( "github.com/google/cadvisor/utils/sysfs" "github.com/google/cadvisor/version" - "github.com/golang/glog" "github.com/opencontainers/runc/libcontainer/cgroups" "golang.org/x/net/context" + "k8s.io/klog" "k8s.io/utils/clock" ) @@ -141,7 +142,7 @@ type Manager interface { } // New takes a memory storage and returns a new manager. -func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool, ignoreMetricsSet container.MetricSet, collectorHttpClient *http.Client) (Manager, error) { +func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool, includedMetricsSet container.MetricSet, collectorHttpClient *http.Client, rawContainerCgroupPathPrefixWhiteList []string) (Manager, error) { if memoryCache == nil { return nil, fmt.Errorf("manager requires memory storage") } @@ -151,7 +152,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn if err != nil { return nil, err } - glog.V(2).Infof("cAdvisor running in container: %q", selfContainer) + klog.V(2).Infof("cAdvisor running in container: %q", selfContainer) var ( dockerStatus info.DockerStatus @@ -162,7 +163,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn dockerStatus = retryDockerStatus() if tmpRktPath, err := rkt.RktPath(); err != nil { - glog.V(5).Infof("Rkt not connected: %v", err) + klog.V(5).Infof("Rkt not connected: %v", err) } else { rktPath = tmpRktPath } @@ -173,7 +174,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn } crioInfo, err := crioClient.Info() if err != nil { - glog.V(5).Infof("CRI-O not connected: %v", err) + klog.V(5).Infof("CRI-O not connected: %v", err) } context := fs.Context{ @@ -203,20 +204,21 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn eventsChannel := make(chan watcher.ContainerEvent, 16) newManager := &manager{ - containers: make(map[namespacedContainerName]*containerData), - quitChannels: make([]chan error, 0, 2), - memoryCache: memoryCache, - fsInfo: fsInfo, - cadvisorContainer: selfContainer, - inHostNamespace: inHostNamespace, - startupTime: time.Now(), - maxHousekeepingInterval: maxHousekeepingInterval, - allowDynamicHousekeeping: allowDynamicHousekeeping, - ignoreMetrics: ignoreMetricsSet, - containerWatchers: []watcher.ContainerWatcher{}, - eventsChannel: eventsChannel, - collectorHttpClient: collectorHttpClient, - nvidiaManager: &accelerators.NvidiaManager{}, + containers: make(map[namespacedContainerName]*containerData), + quitChannels: make([]chan error, 0, 2), + memoryCache: memoryCache, + fsInfo: fsInfo, + cadvisorContainer: selfContainer, + inHostNamespace: inHostNamespace, + startupTime: time.Now(), + maxHousekeepingInterval: maxHousekeepingInterval, + allowDynamicHousekeeping: allowDynamicHousekeeping, + includedMetrics: includedMetricsSet, + containerWatchers: []watcher.ContainerWatcher{}, + eventsChannel: eventsChannel, + collectorHttpClient: collectorHttpClient, + nvidiaManager: &accelerators.NvidiaManager{}, + rawContainerCgroupPathPrefixWhiteList: rawContainerCgroupPathPrefixWhiteList, } machineInfo, err := machine.Info(sysfs, fsInfo, inHostNamespace) @@ -224,13 +226,13 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn return nil, err } newManager.machineInfo = *machineInfo - glog.V(1).Infof("Machine: %+v", newManager.machineInfo) + klog.V(1).Infof("Machine: %+v", newManager.machineInfo) versionInfo, err := getVersionInfo() if err != nil { return nil, err } - glog.V(1).Infof("Version: %+v", *versionInfo) + klog.V(1).Infof("Version: %+v", *versionInfo) newManager.eventHandler = events.NewEventManager(parseEventsStoragePolicy()) return newManager, nil @@ -248,9 +250,9 @@ func retryDockerStatus() info.DockerStatus { switch err { case context.DeadlineExceeded: - glog.Warningf("Timeout trying to communicate with docker during initialization, will retry") + klog.Warningf("Timeout trying to communicate with docker during initialization, will retry") default: - glog.V(5).Infof("Docker not connected: %v", err) + klog.V(5).Infof("Docker not connected: %v", err) return info.DockerStatus{} } @@ -283,23 +285,25 @@ type manager struct { startupTime time.Time maxHousekeepingInterval time.Duration allowDynamicHousekeeping bool - ignoreMetrics container.MetricSet + includedMetrics container.MetricSet containerWatchers []watcher.ContainerWatcher eventsChannel chan watcher.ContainerEvent collectorHttpClient *http.Client nvidiaManager accelerators.AcceleratorManager + // List of raw container cgroup path prefix whitelist. + rawContainerCgroupPathPrefixWhiteList []string } // Start the container manager. func (self *manager) Start() error { - err := docker.Register(self, self.fsInfo, self.ignoreMetrics) + err := docker.Register(self, self.fsInfo, self.includedMetrics) if err != nil { - glog.V(5).Infof("Registration of the Docker container factory failed: %v.", err) + klog.V(5).Infof("Registration of the Docker container factory failed: %v.", err) } - err = rkt.Register(self, self.fsInfo, self.ignoreMetrics) + err = rkt.Register(self, self.fsInfo, self.includedMetrics) if err != nil { - glog.V(5).Infof("Registration of the rkt container factory failed: %v", err) + klog.V(5).Infof("Registration of the rkt container factory failed: %v", err) } else { watcher, err := rktwatcher.NewRktContainerWatcher() if err != nil { @@ -308,24 +312,29 @@ func (self *manager) Start() error { self.containerWatchers = append(self.containerWatchers, watcher) } - err = containerd.Register(self, self.fsInfo, self.ignoreMetrics) + err = containerd.Register(self, self.fsInfo, self.includedMetrics) if err != nil { - glog.V(5).Infof("Registration of the containerd container factory failed: %v", err) + klog.V(5).Infof("Registration of the containerd container factory failed: %v", err) } - err = crio.Register(self, self.fsInfo, self.ignoreMetrics) + err = crio.Register(self, self.fsInfo, self.includedMetrics) if err != nil { - glog.V(5).Infof("Registration of the crio container factory failed: %v", err) + klog.V(5).Infof("Registration of the crio container factory failed: %v", err) } - err = systemd.Register(self, self.fsInfo, self.ignoreMetrics) + err = mesos.Register(self, self.fsInfo, self.includedMetrics) if err != nil { - glog.V(5).Infof("Registration of the systemd container factory failed: %v", err) + klog.V(5).Infof("Registration of the mesos container factory failed: %v", err) } - err = raw.Register(self, self.fsInfo, self.ignoreMetrics) + err = systemd.Register(self, self.fsInfo, self.includedMetrics) if err != nil { - glog.Errorf("Registration of the raw container factory failed: %v", err) + klog.V(5).Infof("Registration of the systemd container factory failed: %v", err) + } + + err = raw.Register(self, self.fsInfo, self.includedMetrics, self.rawContainerCgroupPathPrefixWhiteList) + if err != nil { + klog.Errorf("Registration of the raw container factory failed: %v", err) } rawWatcher, err := rawwatcher.NewRawContainerWatcher() @@ -337,7 +346,7 @@ func (self *manager) Start() error { // Watch for OOMs. err = self.watchForNewOoms() if err != nil { - glog.Warningf("Could not configure a source for OOM detection, disabling OOM events: %v", err) + klog.Warningf("Could not configure a source for OOM detection, disabling OOM events: %v", err) } // If there are no factories, don't start any housekeeping and serve the information we do have. @@ -353,12 +362,12 @@ func (self *manager) Start() error { if err != nil { return err } - glog.V(2).Infof("Starting recovery of all containers") + klog.V(2).Infof("Starting recovery of all containers") err = self.detectSubcontainers("/") if err != nil { return err } - glog.V(2).Infof("Recovery completed") + klog.V(2).Infof("Recovery completed") // Watch for new container. quitWatcher := make(chan error) @@ -409,18 +418,18 @@ func (self *manager) globalHousekeeping(quit chan error) { // Check for new containers. err := self.detectSubcontainers("/") if err != nil { - glog.Errorf("Failed to detect containers: %s", err) + klog.Errorf("Failed to detect containers: %s", err) } // Log if housekeeping took too long. duration := time.Since(start) if duration >= longHousekeeping { - glog.V(3).Infof("Global Housekeeping(%d) took %s", t.Unix(), duration) + klog.V(3).Infof("Global Housekeeping(%d) took %s", t.Unix(), duration) } case <-quit: // Quit if asked to do so. quit <- nil - glog.Infof("Exiting global housekeeping thread") + klog.Infof("Exiting global housekeeping thread") return } } @@ -619,6 +628,11 @@ func (self *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[ for name, cont := range containers { inf, err := self.containerDataToContainerInfo(cont, query) if err != nil { + // Ignore the error because of race condition and return best-effort result. + if err == memory.ErrDataNotFound { + klog.Warningf("Error getting data for container %s because of race condition", name) + continue + } return nil, err } output[name] = *inf @@ -876,7 +890,7 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c if err != nil { return fmt.Errorf("failed to read config file %q for config %q, container %q: %v", k, v, cont.info.Name, err) } - glog.V(4).Infof("Got config from %q: %q", v, configFile) + klog.V(4).Infof("Got config from %q: %q", v, configFile) if strings.HasPrefix(k, "prometheus") || strings.HasPrefix(k, "Prometheus") { newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient) @@ -954,7 +968,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche } if !accept { // ignoring this container. - glog.V(4).Infof("ignoring container %q", containerName) + klog.V(4).Infof("ignoring container %q", containerName) return nil } collectorManager, err := collector.NewCollectorManager() @@ -969,11 +983,11 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche } devicesCgroupPath, err := handler.GetCgroupPath("devices") if err != nil { - glog.Warningf("Error getting devices cgroup path: %v", err) + klog.Warningf("Error getting devices cgroup path: %v", err) } else { cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath) if err != nil { - glog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err) + klog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err) } } @@ -982,7 +996,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche collectorConfigs := collector.GetCollectorConfigs(labels) err = m.registerCollectors(collectorConfigs, cont) if err != nil { - glog.Warningf("Failed to register collectors for %q: %v", containerName, err) + klog.Warningf("Failed to register collectors for %q: %v", containerName, err) } // Add the container name and all its aliases. The aliases must be within the namespace of the factory. @@ -994,7 +1008,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche }] = cont } - glog.V(3).Infof("Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace) + klog.V(3).Infof("Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace) contSpec, err := cont.handler.GetSpec() if err != nil { @@ -1051,7 +1065,7 @@ func (m *manager) destroyContainerLocked(containerName string) error { Name: alias, }) } - glog.V(3).Infof("Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace) + klog.V(3).Infof("Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace) contRef, err := cont.handler.ContainerReference() if err != nil { @@ -1072,22 +1086,25 @@ func (m *manager) destroyContainerLocked(containerName string) error { // Detect all containers that have been added or deleted from the specified container. func (m *manager) getContainersDiff(containerName string) (added []info.ContainerReference, removed []info.ContainerReference, err error) { - m.containersLock.RLock() - defer m.containersLock.RUnlock() - // Get all subcontainers recursively. + m.containersLock.RLock() cont, ok := m.containers[namespacedContainerName{ Name: containerName, }] + m.containersLock.RUnlock() if !ok { return nil, nil, fmt.Errorf("failed to find container %q while checking for new containers", containerName) } allContainers, err := cont.handler.ListContainers(container.ListRecursive) + if err != nil { return nil, nil, err } allContainers = append(allContainers, info.ContainerReference{Name: containerName}) + m.containersLock.RLock() + defer m.containersLock.RUnlock() + // Determine which were added and which were removed. allContainersSet := make(map[string]*containerData) for name, d := range m.containers { @@ -1127,7 +1144,7 @@ func (m *manager) detectSubcontainers(containerName string) error { for _, cont := range added { err = m.createContainer(cont.Name, watcher.Raw) if err != nil { - glog.Errorf("Failed to create existing container: %s: %s", cont.Name, err) + klog.Errorf("Failed to create existing container: %s: %s", cont.Name, err) } } @@ -1135,7 +1152,7 @@ func (m *manager) detectSubcontainers(containerName string) error { for _, cont := range removed { err = m.destroyContainer(cont.Name) if err != nil { - glog.Errorf("Failed to destroy existing container: %s: %s", cont.Name, err) + klog.Errorf("Failed to destroy existing container: %s: %s", cont.Name, err) } } @@ -1175,7 +1192,7 @@ func (self *manager) watchForNewContainers(quit chan error) error { err = self.destroyContainer(event.Name) } if err != nil { - glog.Warningf("Failed to process watch event %+v: %v", event, err) + klog.Warningf("Failed to process watch event %+v: %v", event, err) } case <-quit: var errs partialFailure @@ -1192,7 +1209,7 @@ func (self *manager) watchForNewContainers(quit chan error) error { quit <- errs } else { quit <- nil - glog.Infof("Exiting thread watching subcontainers") + klog.Infof("Exiting thread watching subcontainers") return } } @@ -1202,7 +1219,7 @@ func (self *manager) watchForNewContainers(quit chan error) error { } func (self *manager) watchForNewOoms() error { - glog.V(2).Infof("Started watching for new ooms in manager") + klog.V(2).Infof("Started watching for new ooms in manager") outStream := make(chan *oomparser.OomInstance, 10) oomLog, err := oomparser.New() if err != nil { @@ -1220,9 +1237,9 @@ func (self *manager) watchForNewOoms() error { } err := self.eventHandler.AddEvent(newEvent) if err != nil { - glog.Errorf("failed to add OOM event for %q: %v", oomInstance.ContainerName, err) + klog.Errorf("failed to add OOM event for %q: %v", oomInstance.ContainerName, err) } - glog.V(3).Infof("Created an OOM event in container %q at %v", oomInstance.ContainerName, oomInstance.TimeOfDeath) + klog.V(3).Infof("Created an OOM event in container %q at %v", oomInstance.ContainerName, oomInstance.TimeOfDeath) newEvent = &info.Event{ ContainerName: oomInstance.VictimContainerName, @@ -1237,7 +1254,7 @@ func (self *manager) watchForNewOoms() error { } err = self.eventHandler.AddEvent(newEvent) if err != nil { - glog.Errorf("failed to add OOM kill event for %q: %v", oomInstance.ContainerName, err) + klog.Errorf("failed to add OOM kill event for %q: %v", oomInstance.ContainerName, err) } } }() @@ -1268,12 +1285,12 @@ func parseEventsStoragePolicy() events.StoragePolicy { for _, part := range parts { items := strings.Split(part, "=") if len(items) != 2 { - glog.Warningf("Unknown event storage policy %q when parsing max age", part) + klog.Warningf("Unknown event storage policy %q when parsing max age", part) continue } dur, err := time.ParseDuration(items[1]) if err != nil { - glog.Warningf("Unable to parse event max age duration %q: %v", items[1], err) + klog.Warningf("Unable to parse event max age duration %q: %v", items[1], err) continue } if items[0] == "default" { @@ -1288,12 +1305,12 @@ func parseEventsStoragePolicy() events.StoragePolicy { for _, part := range parts { items := strings.Split(part, "=") if len(items) != 2 { - glog.Warningf("Unknown event storage policy %q when parsing max event limit", part) + klog.Warningf("Unknown event storage policy %q when parsing max event limit", part) continue } val, err := strconv.Atoi(items[1]) if err != nil { - glog.Warningf("Unable to parse integer from %q: %v", items[1], err) + klog.Warningf("Unable to parse integer from %q: %v", items[1], err) continue } if items[0] == "default" { diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/watcher/raw/raw.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/watcher/raw/raw.go index 5a8b79d39e67..76983cbe00f1 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/watcher/raw/raw.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/watcher/raw/raw.go @@ -26,9 +26,9 @@ import ( "github.com/google/cadvisor/container/common" "github.com/google/cadvisor/container/libcontainer" "github.com/google/cadvisor/manager/watcher" + inotify "github.com/sigma/go-inotify" - "github.com/golang/glog" - "golang.org/x/exp/inotify" + "k8s.io/klog" ) type rawContainerWatcher struct { @@ -84,10 +84,10 @@ func (self *rawContainerWatcher) Start(events chan watcher.ContainerEvent) error case event := <-self.watcher.Event(): err := self.processEvent(event, events) if err != nil { - glog.Warningf("Error while processing event (%+v): %v", event, err) + klog.Warningf("Error while processing event (%+v): %v", event, err) } case err := <-self.watcher.Error(): - glog.Warningf("Error while watching %q:", "/", err) + klog.Warningf("Error while watching %q: %v", "/", err) case <-self.stopWatcher: err := self.watcher.Close() if err == nil { @@ -126,7 +126,7 @@ func (self *rawContainerWatcher) watchDirectory(events chan watcher.ContainerEve if cleanup { _, err := self.watcher.RemoveWatch(containerName, dir) if err != nil { - glog.Warningf("Failed to remove inotify watch for %q: %v", dir, err) + klog.Warningf("Failed to remove inotify watch for %q: %v", dir, err) } } }() @@ -143,7 +143,7 @@ func (self *rawContainerWatcher) watchDirectory(events chan watcher.ContainerEve subcontainerName := path.Join(containerName, entry.Name()) alreadyWatchingSubDir, err := self.watchDirectory(events, entryPath, subcontainerName) if err != nil { - glog.Errorf("Failed to watch directory %q: %v", entryPath, err) + klog.Errorf("Failed to watch directory %q: %v", entryPath, err) if os.IsNotExist(err) { // The directory may have been removed before watching. Try to watch the other // subdirectories. (https://github.com/kubernetes/kubernetes/issues/28997) diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go index 4c54d9b94e07..a2d910f89e0d 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go @@ -23,8 +23,8 @@ import ( "github.com/google/cadvisor/manager/watcher" rktapi "github.com/coreos/rkt/api/v1alpha" - "github.com/golang/glog" "golang.org/x/net/context" + "k8s.io/klog" ) type rktContainerWatcher struct { @@ -53,7 +53,7 @@ func (self *rktContainerWatcher) Stop() error { } func (self *rktContainerWatcher) detectRktContainers(events chan watcher.ContainerEvent) { - glog.V(1).Infof("Starting detectRktContainers thread") + klog.V(1).Infof("Starting detectRktContainers thread") ticker := time.Tick(10 * time.Second) curpods := make(map[string]*rktapi.Pod) @@ -62,13 +62,13 @@ func (self *rktContainerWatcher) detectRktContainers(events chan watcher.Contain case <-ticker: pods, err := listRunningPods() if err != nil { - glog.Errorf("detectRktContainers: listRunningPods failed: %v", err) + klog.Errorf("detectRktContainers: listRunningPods failed: %v", err) continue } curpods = self.syncRunningPods(pods, events, curpods) case <-self.stopWatcher: - glog.Infof("Exiting rktContainer Thread") + klog.Infof("Exiting rktContainer Thread") return } } @@ -92,7 +92,7 @@ func (self *rktContainerWatcher) syncRunningPods(pods []*rktapi.Pod, events chan for id, pod := range curpods { if _, ok := newpods[id]; !ok { for _, cgroup := range podToCgroup(pod) { - glog.V(2).Infof("cgroup to delete = %v", cgroup) + klog.V(2).Infof("cgroup to delete = %v", cgroup) self.sendDestroyEvent(cgroup, events) } } diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/metrics/prometheus.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/metrics/prometheus.go index f0aaa0ede51f..38502743edcc 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/metrics/prometheus.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/metrics/prometheus.go @@ -19,10 +19,11 @@ import ( "regexp" "time" + "github.com/google/cadvisor/container" info "github.com/google/cadvisor/info/v1" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog" ) // infoProvider will usually be manager.Manager, but can be swapped out for testing. @@ -108,13 +109,14 @@ type PrometheusCollector struct { errors prometheus.Gauge containerMetrics []containerMetric containerLabelsFunc ContainerLabelsFunc + includedMetrics container.MetricSet } // NewPrometheusCollector returns a new PrometheusCollector. The passed // ContainerLabelsFunc specifies which base labels will be attached to all // exported metrics. If left to nil, the DefaultContainerLabels function // will be used instead. -func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCollector { +func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetrics container.MetricSet) *PrometheusCollector { if f == nil { f = DefaultContainerLabels } @@ -134,7 +136,13 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo getValues: func(s *info.ContainerStats) metricValues { return metricValues{{value: float64(time.Now().Unix())}} }, - }, { + }, + }, + includedMetrics: includedMetrics, + } + if includedMetrics.Has(container.CpuUsageMetrics) { + c.containerMetrics = append(c.containerMetrics, []containerMetric{ + { name: "container_cpu_user_seconds_total", help: "Cumulative user cpu time consumed in seconds.", valueType: prometheus.CounterValue, @@ -197,7 +205,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo getValues: func(s *info.ContainerStats) metricValues { return metricValues{{value: float64(s.Cpu.CFS.ThrottledTime) / float64(time.Second)}} }, - }, { + }, + }...) + } + if includedMetrics.Has(container.ProcessSchedulerMetrics) { + c.containerMetrics = append(c.containerMetrics, []containerMetric{ + { name: "container_cpu_schedstat_run_seconds_total", help: "Time duration the processes of the container have run on the CPU.", valueType: prometheus.CounterValue, @@ -218,7 +231,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo getValues: func(s *info.ContainerStats) metricValues { return metricValues{{value: float64(s.Cpu.Schedstat.RunPeriods)}} }, - }, { + }, + }...) + } + if includedMetrics.Has(container.CpuLoadMetrics) { + c.containerMetrics = append(c.containerMetrics, []containerMetric{ + { name: "container_cpu_load_average_10s", help: "Value of container cpu load average over the last 10 seconds.", valueType: prometheus.GaugeValue, @@ -226,6 +244,40 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo return metricValues{{value: float64(s.Cpu.LoadAverage)}} }, }, { + name: "container_tasks_state", + help: "Number of tasks in given state", + extraLabels: []string{"state"}, + valueType: prometheus.GaugeValue, + getValues: func(s *info.ContainerStats) metricValues { + return metricValues{ + { + value: float64(s.TaskStats.NrSleeping), + labels: []string{"sleeping"}, + }, + { + value: float64(s.TaskStats.NrRunning), + labels: []string{"running"}, + }, + { + value: float64(s.TaskStats.NrStopped), + labels: []string{"stopped"}, + }, + { + value: float64(s.TaskStats.NrUninterruptible), + labels: []string{"uninterruptible"}, + }, + { + value: float64(s.TaskStats.NrIoWait), + labels: []string{"iowaiting"}, + }, + } + }, + }, + }...) + } + if includedMetrics.Has(container.MemoryUsageMetrics) { + c.containerMetrics = append(c.containerMetrics, []containerMetric{ + { name: "container_memory_cache", help: "Number of bytes of page cache memory.", valueType: prometheus.GaugeValue, @@ -239,6 +291,13 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo getValues: func(s *info.ContainerStats) metricValues { return metricValues{{value: float64(s.Memory.RSS)}} }, + }, { + name: "container_memory_mapped_file", + help: "Size of memory mapped files in bytes.", + valueType: prometheus.GaugeValue, + getValues: func(s *info.ContainerStats) metricValues { + return metricValues{{value: float64(s.Memory.MappedFile)}} + }, }, { name: "container_memory_swap", help: "Container swap usage in bytes.", @@ -279,7 +338,7 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo name: "container_memory_failures_total", help: "Cumulative count of memory allocation failures.", valueType: prometheus.CounterValue, - extraLabels: []string{"type", "scope"}, + extraLabels: []string{"failure_type", "scope"}, getValues: func(s *info.ContainerStats) metricValues { return metricValues{ { @@ -300,7 +359,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo }, } }, - }, { + }, + }...) + } + if includedMetrics.Has(container.AcceleratorUsageMetrics) { + c.containerMetrics = append(c.containerMetrics, []containerMetric{ + { name: "container_accelerator_memory_total_bytes", help: "Total accelerator memory.", valueType: prometheus.GaugeValue, @@ -345,7 +409,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo } return values }, - }, { + }, + }...) + } + if includedMetrics.Has(container.DiskUsageMetrics) { + c.containerMetrics = append(c.containerMetrics, []containerMetric{ + { name: "container_fs_inodes_free", help: "Number of available Inodes", valueType: prometheus.GaugeValue, @@ -385,7 +454,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo return float64(fs.Usage) }) }, - }, { + }, + }...) + } + if includedMetrics.Has(container.DiskIOMetrics) { + c.containerMetrics = append(c.containerMetrics, []containerMetric{ + { name: "container_fs_reads_bytes_total", help: "Cumulative count of bytes read", valueType: prometheus.CounterValue, @@ -547,7 +621,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo return float64(fs.WeightedIoTime) / float64(time.Second) }) }, - }, { + }, + }...) + } + if includedMetrics.Has(container.NetworkUsageMetrics) { + c.containerMetrics = append(c.containerMetrics, []containerMetric{ + { name: "container_network_receive_bytes_total", help: "Cumulative count of bytes received", valueType: prometheus.CounterValue, @@ -667,7 +746,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo } return values }, - }, { + }, + }...) + } + if includedMetrics.Has(container.NetworkTcpUsageMetrics) { + c.containerMetrics = append(c.containerMetrics, []containerMetric{ + { name: "container_network_tcp_usage_total", help: "tcp connection usage statistic for container", valueType: prometheus.GaugeValue, @@ -720,7 +804,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo }, } }, - }, { + }, + }...) + } + if includedMetrics.Has(container.NetworkUdpUsageMetrics) { + c.containerMetrics = append(c.containerMetrics, []containerMetric{ + { name: "container_network_udp_usage_total", help: "udp connection usage statistic for container", valueType: prometheus.GaugeValue, @@ -745,37 +834,28 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo }, } }, - }, { - name: "container_tasks_state", - help: "Number of tasks in given state", - extraLabels: []string{"state"}, - valueType: prometheus.GaugeValue, + }, + }...) + } + if includedMetrics.Has(container.ProcessMetrics) { + c.containerMetrics = append(c.containerMetrics, []containerMetric{ + { + name: "container_processes", + help: "Number of processes running inside the container.", + valueType: prometheus.GaugeValue, getValues: func(s *info.ContainerStats) metricValues { - return metricValues{ - { - value: float64(s.TaskStats.NrSleeping), - labels: []string{"sleeping"}, - }, - { - value: float64(s.TaskStats.NrRunning), - labels: []string{"running"}, - }, - { - value: float64(s.TaskStats.NrStopped), - labels: []string{"stopped"}, - }, - { - value: float64(s.TaskStats.NrUninterruptible), - labels: []string{"uninterruptible"}, - }, - { - value: float64(s.TaskStats.NrIoWait), - labels: []string{"iowaiting"}, - }, - } + return metricValues{{value: float64(s.Processes.ProcessCount)}} }, }, - }, + { + name: "container_file_descriptors", + help: "Number of open file descriptors for the container.", + valueType: prometheus.GaugeValue, + getValues: func(s *info.ContainerStats) metricValues { + return metricValues{{value: float64(s.Processes.FdCount)}} + }, + }, + }...) } return c @@ -842,11 +922,24 @@ func DefaultContainerLabels(container *info.ContainerInfo) map[string]string { return set } +// BaseContainerLabels implements ContainerLabelsFunc. It only exports the +// container name, first alias, and image name. +func BaseContainerLabels(container *info.ContainerInfo) map[string]string { + set := map[string]string{LabelID: container.Name} + if len(container.Aliases) > 0 { + set[LabelName] = container.Aliases[0] + } + if image := container.Spec.Image; len(image) > 0 { + set[LabelImage] = image + } + return set +} + func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric) { containers, err := c.infoProvider.SubcontainersInfo("/", &info.ContainerInfoRequest{NumStats: 1}) if err != nil { c.errors.Set(1) - glog.Warningf("Couldn't get containers: %s", err) + klog.Warningf("Couldn't get containers: %s", err) return } rawLabels := map[string]struct{}{} @@ -855,10 +948,11 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric) rawLabels[l] = struct{}{} } } - for _, container := range containers { + + for _, cont := range containers { values := make([]string, 0, len(rawLabels)) labels := make([]string, 0, len(rawLabels)) - containerLabels := c.containerLabelsFunc(container) + containerLabels := c.containerLabelsFunc(cont) for l := range rawLabels { labels = append(labels, sanitizeLabelName(l)) values = append(values, containerLabels[l]) @@ -866,32 +960,35 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric) // Container spec desc := prometheus.NewDesc("container_start_time_seconds", "Start time of the container since unix epoch in seconds.", labels, nil) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.CreationTime.Unix()), values...) + ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(cont.Spec.CreationTime.Unix()), values...) - if container.Spec.HasCpu { + if cont.Spec.HasCpu { desc = prometheus.NewDesc("container_spec_cpu_period", "CPU period of the container.", labels, nil) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Period), values...) - if container.Spec.Cpu.Quota != 0 { + ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(cont.Spec.Cpu.Period), values...) + if cont.Spec.Cpu.Quota != 0 { desc = prometheus.NewDesc("container_spec_cpu_quota", "CPU quota of the container.", labels, nil) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Quota), values...) + ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(cont.Spec.Cpu.Quota), values...) } desc := prometheus.NewDesc("container_spec_cpu_shares", "CPU share of the container.", labels, nil) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Limit), values...) + ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(cont.Spec.Cpu.Limit), values...) } - if container.Spec.HasMemory { + if cont.Spec.HasMemory { desc := prometheus.NewDesc("container_spec_memory_limit_bytes", "Memory limit for the container.", labels, nil) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.Limit), values...) + ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(cont.Spec.Memory.Limit), values...) desc = prometheus.NewDesc("container_spec_memory_swap_limit_bytes", "Memory swap limit for the container.", labels, nil) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.SwapLimit), values...) + ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(cont.Spec.Memory.SwapLimit), values...) desc = prometheus.NewDesc("container_spec_memory_reservation_limit_bytes", "Memory reservation limit for the container.", labels, nil) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.Reservation), values...) + ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(cont.Spec.Memory.Reservation), values...) } // Now for the actual metrics - stats := container.Stats[0] + if len(cont.Stats) == 0 { + continue + } + stats := cont.Stats[0] for _, cm := range c.containerMetrics { - if cm.condition != nil && !cm.condition(container.Spec) { + if cm.condition != nil && !cm.condition(cont.Spec) { continue } desc := cm.desc(labels) @@ -906,7 +1003,7 @@ func (c *PrometheusCollector) collectVersionInfo(ch chan<- prometheus.Metric) { versionInfo, err := c.infoProvider.GetVersionInfo() if err != nil { c.errors.Set(1) - glog.Warningf("Couldn't get version info: %s", err) + klog.Warningf("Couldn't get version info: %s", err) return } ch <- prometheus.MustNewConstMetric(versionInfoDesc, prometheus.GaugeValue, 1, []string{versionInfo.KernelVersion, versionInfo.ContainerOsVersion, versionInfo.DockerVersion, versionInfo.CadvisorVersion, versionInfo.CadvisorRevision}...) @@ -916,7 +1013,7 @@ func (c *PrometheusCollector) collectMachineInfo(ch chan<- prometheus.Metric) { machineInfo, err := c.infoProvider.GetMachineInfo() if err != nil { c.errors.Set(1) - glog.Warningf("Couldn't get machine info: %s", err) + klog.Warningf("Couldn't get machine info: %s", err) return } ch <- prometheus.MustNewConstMetric(machineInfoCoresDesc, prometheus.GaugeValue, float64(machineInfo.NumCores)) diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cloudinfo/gce.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cloudinfo/gce.go index b525451c05d9..7e4856a47d60 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cloudinfo/gce.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cloudinfo/gce.go @@ -21,7 +21,7 @@ import ( info "github.com/google/cadvisor/info/v1" "cloud.google.com/go/compute/metadata" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -32,7 +32,7 @@ const ( func onGCE() bool { data, err := ioutil.ReadFile(gceProductName) if err != nil { - glog.V(2).Infof("Error while reading product_name: %v", err) + klog.V(2).Infof("Error while reading product_name: %v", err) return false } return strings.Contains(string(data), google) diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go index f3d29b8dd05e..ca46ea1e6748 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go @@ -19,8 +19,8 @@ import ( info "github.com/google/cadvisor/info/v1" - "github.com/golang/glog" "github.com/google/cadvisor/utils/cpuload/netlink" + "k8s.io/klog" ) type CpuLoadReader interface { @@ -41,6 +41,6 @@ func New() (CpuLoadReader, error) { if err != nil { return nil, fmt.Errorf("failed to create a netlink based cpuload reader: %v", err) } - glog.V(4).Info("Using a netlink-based load reader") + klog.V(4).Info("Using a netlink-based load reader") return reader, nil } diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go index 1f43d755e17e..7236955215db 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go @@ -20,7 +20,7 @@ import ( info "github.com/google/cadvisor/info/v1" - "github.com/golang/glog" + "k8s.io/klog" ) type NetlinkReader struct { @@ -38,7 +38,7 @@ func New() (*NetlinkReader, error) { if err != nil { return nil, fmt.Errorf("failed to get netlink family id for task stats: %s", err) } - glog.V(4).Infof("Family id for taskstats: %d", id) + klog.V(4).Infof("Family id for taskstats: %d", id) return &NetlinkReader{ familyId: id, conn: conn, @@ -75,6 +75,6 @@ func (self *NetlinkReader) GetCpuLoad(name string, path string) (info.LoadStats, if err != nil { return info.LoadStats{}, err } - glog.V(4).Infof("Task stats for %q: %+v", path, stats) + klog.V(4).Infof("Task stats for %q: %+v", path, stats) return stats, nil } diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go index a73243f2e3e8..039baa30beed 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go @@ -22,7 +22,7 @@ import ( "github.com/euank/go-kmsg-parser/kmsgparser" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -107,11 +107,11 @@ func (self *OomParser) StreamOoms(outStream chan<- *OomInstance) { for msg := range kmsgEntries { err := getContainerName(msg.Message, oomCurrentInstance) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) } finished, err := getProcessNamePid(msg.Message, oomCurrentInstance) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) } if finished { oomCurrentInstance.TimeOfDeath = msg.Timestamp @@ -122,7 +122,7 @@ func (self *OomParser) StreamOoms(outStream chan<- *OomInstance) { } } // Should not happen - glog.Errorf("exiting analyzeLines. OOM events will not be reported.") + klog.Errorf("exiting analyzeLines. OOM events will not be reported.") } // initializes an OomParser object. Returns an OomParser object and an error. @@ -140,11 +140,11 @@ type glogAdapter struct{} var _ kmsgparser.Logger = glogAdapter{} func (glogAdapter) Infof(format string, args ...interface{}) { - glog.V(4).Infof(format, args) + klog.V(4).Infof(format, args...) } func (glogAdapter) Warningf(format string, args ...interface{}) { - glog.V(2).Infof(format, args) + klog.V(2).Infof(format, args...) } func (glogAdapter) Errorf(format string, args ...interface{}) { - glog.Warningf(format, args) + klog.Warningf(format, args...) } diff --git a/cluster-autoscaler/vendor/github.com/google/cadvisor/zfs/watcher.go b/cluster-autoscaler/vendor/github.com/google/cadvisor/zfs/watcher.go index 1bc3fb741d39..844f52f2e700 100644 --- a/cluster-autoscaler/vendor/github.com/google/cadvisor/zfs/watcher.go +++ b/cluster-autoscaler/vendor/github.com/google/cadvisor/zfs/watcher.go @@ -18,8 +18,8 @@ import ( "sync" "time" - "github.com/golang/glog" zfs "github.com/mistifyio/go-zfs" + "k8s.io/klog" ) // zfsWatcher maintains a cache of filesystem -> usage stats for a @@ -49,7 +49,7 @@ func NewZfsWatcher(filesystem string) (*ZfsWatcher, error) { func (w *ZfsWatcher) Start() { err := w.Refresh() if err != nil { - glog.Errorf("encountered error refreshing zfs watcher: %v", err) + klog.Errorf("encountered error refreshing zfs watcher: %v", err) } for { @@ -60,12 +60,12 @@ func (w *ZfsWatcher) Start() { start := time.Now() err = w.Refresh() if err != nil { - glog.Errorf("encountered error refreshing zfs watcher: %v", err) + klog.Errorf("encountered error refreshing zfs watcher: %v", err) } // print latency for refresh duration := time.Since(start) - glog.V(5).Infof("zfs(%d) took %s", start.Unix(), duration) + klog.V(5).Infof("zfs(%d) took %s", start.Unix(), duration) } } } @@ -95,12 +95,12 @@ func (w *ZfsWatcher) Refresh() error { newCache := make(map[string]uint64) parent, err := zfs.GetDataset(w.filesystem) if err != nil { - glog.Errorf("encountered error getting zfs filesystem: %s: %v", w.filesystem, err) + klog.Errorf("encountered error getting zfs filesystem: %s: %v", w.filesystem, err) return err } children, err := parent.Children(0) if err != nil { - glog.Errorf("encountered error getting children of zfs filesystem: %s: %v", w.filesystem, err) + klog.Errorf("encountered error getting children of zfs filesystem: %s: %v", w.filesystem, err) return err } diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/.gitignore b/cluster-autoscaler/vendor/github.com/gorilla/websocket/.gitignore index 00268614f045..ac710204fa19 100644 --- a/cluster-autoscaler/vendor/github.com/gorilla/websocket/.gitignore +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/.gitignore @@ -20,3 +20,6 @@ _cgo_export.* _testmain.go *.exe + +.idea/ +*.iml \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/.travis.yml b/cluster-autoscaler/vendor/github.com/gorilla/websocket/.travis.yml index 8687342e9d40..9f233f983d63 100644 --- a/cluster-autoscaler/vendor/github.com/gorilla/websocket/.travis.yml +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/.travis.yml @@ -1,6 +1,20 @@ language: go +sudo: false -go: - - 1.1 - - 1.2 - - tip +matrix: + include: + - go: 1.4 + - go: 1.5 + - go: 1.6 + - go: 1.7 + - go: 1.8 + - go: 1.9 + - go: tip + allow_failures: + - go: tip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go vet $(go list ./... | grep -v /vendor/) + - go test -v -race ./... diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/README.md b/cluster-autoscaler/vendor/github.com/gorilla/websocket/README.md index 9ad75a0f5e91..33c3d2be3e3d 100644 --- a/cluster-autoscaler/vendor/github.com/gorilla/websocket/README.md +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/README.md @@ -3,10 +3,15 @@ Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. +[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket) +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) + ### Documentation * [API Reference](http://godoc.org/github.com/gorilla/websocket) * [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) * [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) ### Status @@ -41,7 +46,7 @@ subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn Send pings and receive pongsYesNo Get the type of a received data messageYesYes, see note 2 Other Features -Limit size of received messageYesNo +Compression ExtensionsExperimentalNo Read message using io.ReaderYesNo, see note 3 Write message using io.WriteCloserYesNo, see note 3 diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/client.go b/cluster-autoscaler/vendor/github.com/gorilla/websocket/client.go index 93db8ddc320d..43a87c753bfc 100644 --- a/cluster-autoscaler/vendor/github.com/gorilla/websocket/client.go +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/client.go @@ -5,8 +5,10 @@ package websocket import ( + "bufio" "bytes" "crypto/tls" + "encoding/base64" "errors" "io" "io/ioutil" @@ -21,6 +23,8 @@ import ( // invalid. var ErrBadHandshake = errors.New("websocket: bad handshake") +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + // NewClient creates a new client connection using the given net connection. // The URL u specifies the host and request URI. Use requestHeader to specify // the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies @@ -30,50 +34,17 @@ var ErrBadHandshake = errors.New("websocket: bad handshake") // If the WebSocket handshake fails, ErrBadHandshake is returned along with a // non-nil *http.Response so that callers can handle redirects, authentication, // etc. +// +// Deprecated: Use Dialer instead. func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { - challengeKey, err := generateChallengeKey() - if err != nil { - return nil, nil, err - } - acceptKey := computeAcceptKey(challengeKey) - - c = newConn(netConn, false, readBufSize, writeBufSize) - p := c.writeBuf[:0] - p = append(p, "GET "...) - p = append(p, u.RequestURI()...) - p = append(p, " HTTP/1.1\r\nHost: "...) - p = append(p, u.Host...) - // "Upgrade" is capitalized for servers that do not use case insensitive - // comparisons on header tokens. - p = append(p, "\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Version: 13\r\nSec-WebSocket-Key: "...) - p = append(p, challengeKey...) - p = append(p, "\r\n"...) - for k, vs := range requestHeader { - for _, v := range vs { - p = append(p, k...) - p = append(p, ": "...) - p = append(p, v...) - p = append(p, "\r\n"...) - } - } - p = append(p, "\r\n"...) - - if _, err := netConn.Write(p); err != nil { - return nil, nil, err - } - - resp, err := http.ReadResponse(c.br, &http.Request{Method: "GET", URL: u}) - if err != nil { - return nil, nil, err - } - if resp.StatusCode != 101 || - !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || - !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || - resp.Header.Get("Sec-Websocket-Accept") != acceptKey { - return nil, resp, ErrBadHandshake + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, } - c.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - return c, resp, nil + return d.Dial(u.String(), requestHeader) } // A Dialer contains options for connecting to WebSocket server. @@ -82,6 +53,12 @@ type Dialer struct { // NetDial is nil, net.Dial is used. NetDial func(network, addr string) (net.Conn, error) + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + // TLSClientConfig specifies the TLS configuration to use with tls.Client. // If nil, the default configuration is used. TLSClientConfig *tls.Config @@ -89,28 +66,37 @@ type Dialer struct { // HandshakeTimeout specifies the duration for the handshake to complete. HandshakeTimeout time.Duration - // Input and output buffer sizes. If the buffer size is zero, then a - // default value of 4096 is used. + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. ReadBufferSize, WriteBufferSize int // Subprotocols specifies the client's requested subprotocols. Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar } var errMalformedURL = errors.New("malformed ws or wss URL") -// parseURL parses the URL. The url.Parse function is not used here because -// url.Parse mangles the path. +// parseURL parses the URL. +// +// This function is a replacement for the standard library url.Parse function. +// In Go 1.4 and earlier, url.Parse loses information from the path. func parseURL(s string) (*url.URL, error) { // From the RFC: // // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ] // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ] - // - // We don't use the net/url parser here because the dialer interface does - // not provide a way for applications to work around percent deocding in - // the net/url parser. - var u url.URL switch { case strings.HasPrefix(s, "ws://"): @@ -123,15 +109,23 @@ func parseURL(s string) (*url.URL, error) { return nil, errMalformedURL } - u.Host = s - u.Opaque = "/" + if i := strings.Index(s, "?"); i >= 0 { + u.RawQuery = s[i+1:] + s = s[:i] + } + if i := strings.Index(s, "/"); i >= 0 { - u.Host = s[:i] u.Opaque = s[i:] + s = s[:i] + } else { + u.Opaque = "/" } + u.Host = s + if strings.Contains(u.Host, "@") { - // WebSocket URIs do not contain user information. + // Don't bother parsing user information because user information is + // not allowed in websocket URIs. return nil, errMalformedURL } @@ -144,9 +138,12 @@ func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { hostNoPort = hostNoPort[:i] } else { - if u.Scheme == "wss" { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": hostPort += ":443" - } else { + default: hostPort += ":80" } } @@ -154,7 +151,9 @@ func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { } // DefaultDialer is a dialer with all fields set to the default zero values. -var DefaultDialer *Dialer +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, +} // Dial creates a new client connection. Use requestHeader to specify the // origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). @@ -166,15 +165,103 @@ var DefaultDialer *Dialer // etcetera. The response body may not contain the entire response and does not // need to be closed by the application. func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + + if d == nil { + d = &Dialer{ + Proxy: http.ProxyFromEnvironment, + } + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + u, err := parseURL(urlStr) if err != nil { return nil, nil, err } + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: "GET", + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover") + } + hostPort, hostNoPort := hostPortNoPort(u) - if d == nil { - d = &Dialer{} + var proxyURL *url.URL + // Check wether the proxy method has been configured + if d.Proxy != nil { + proxyURL, err = d.Proxy(req) + } + if err != nil { + return nil, nil, err + } + + var targetHostPort string + if proxyURL != nil { + targetHostPort, _ = hostPortNoPort(proxyURL) + } else { + targetHostPort = hostPort } var deadline time.Time @@ -188,7 +275,7 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re netDial = netDialer.Dial } - netConn, err := netDial("tcp", hostPort) + netConn, err := netDial("tcp", targetHostPort) if err != nil { return nil, nil, err } @@ -203,13 +290,41 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re return nil, nil, err } - if u.Scheme == "wss" { - cfg := d.TLSClientConfig - if cfg == nil { - cfg = &tls.Config{ServerName: hostNoPort} - } else if cfg.ServerName == "" { - shallowCopy := *cfg - cfg = &shallowCopy + if proxyURL != nil { + connectHeader := make(http.Header) + if user := proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: hostPort}, + Host: hostPort, + Header: connectHeader, + } + + connectReq.Write(netConn) + + // Read response. + // Okay to use and discard buffered reader here, because + // TLS server will not speak until spoken to. + br := bufio.NewReader(netConn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + return nil, nil, err + } + if resp.StatusCode != 200 { + f := strings.SplitN(resp.Status, " ", 2) + return nil, nil, errors.New(f[1]) + } + } + + if u.Scheme == "https" { + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { cfg.ServerName = hostNoPort } tlsConn := tls.Client(netConn, cfg) @@ -224,45 +339,53 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re } } - if len(d.Subprotocols) > 0 { - h := http.Header{} - for k, v := range requestHeader { - h[k] = v - } - h.Set("Sec-Websocket-Protocol", strings.Join(d.Subprotocols, ", ")) - requestHeader = h + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize) + + if err := req.Write(netConn); err != nil { + return nil, nil, err } - if len(requestHeader["Host"]) > 0 { - // This can be used to supply a Host: header which is different from - // the dial address. - u.Host = requestHeader.Get("Host") + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } - // Drop "Host" header - h := http.Header{} - for k, v := range requestHeader { - if k == "Host" { - continue - } - h[k] = v + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) } - requestHeader = h } - conn, resp, err := NewClient(netConn, u, requestHeader, d.ReadBufferSize, d.WriteBufferSize) + if resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } - if err != nil { - if err == ErrBadHandshake { - // Before closing the network connection on return from this - // function, slurp up some of the response to aid application - // debugging. - buf := make([]byte, 1024) - n, _ := io.ReadFull(resp.Body, buf) - resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression } - return nil, resp, err + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break } + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + netConn.SetDeadline(time.Time{}) netConn = nil // to avoid close in defer. return conn, resp, nil diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/client_clone.go b/cluster-autoscaler/vendor/github.com/gorilla/websocket/client_clone.go new file mode 100644 index 000000000000..4f0d943723a9 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/client_clone.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/cluster-autoscaler/vendor/github.com/gorilla/websocket/client_clone_legacy.go new file mode 100644 index 000000000000..babb007fb414 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/client_clone_legacy.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +import "crypto/tls" + +// cloneTLSConfig clones all public fields except the fields +// SessionTicketsDisabled and SessionTicketKey. This avoids copying the +// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a +// config in active use. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/compression.go b/cluster-autoscaler/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 000000000000..813ffb1e8433 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,148 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/conn.go b/cluster-autoscaler/vendor/github.com/gorilla/websocket/conn.go index e719f1ce63ee..97e1dbacb123 100644 --- a/cluster-autoscaler/vendor/github.com/gorilla/websocket/conn.go +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/conn.go @@ -13,15 +13,25 @@ import ( "math/rand" "net" "strconv" + "sync" "time" + "unicode/utf8" ) const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask maxControlFramePayloadSize = 125 - finalBit = 1 << 7 - maskBit = 1 << 7 - writeWait = time.Second + + writeWait = time.Second defaultReadBufferSize = 4096 defaultWriteBufferSize = 4096 @@ -43,6 +53,8 @@ const ( CloseMessageTooBig = 1009 CloseMandatoryExtension = 1010 CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 CloseTLSHandshake = 1015 ) @@ -88,24 +100,92 @@ func (e *netError) Error() string { return e.msg } func (e *netError) Temporary() bool { return e.temporary } func (e *netError) Timeout() bool { return e.timeout } -// closeError represents close frame. -type closeError struct { - code int - text string +// CloseError represents close frame. +type CloseError struct { + + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false } -func (e *closeError) Error() string { - return "websocket: close " + strconv.Itoa(e.code) + " " + e.text +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false } var ( - errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true} - errUnexpectedEOF = &closeError{code: CloseAbnormalClosure, text: io.ErrUnexpectedEOF.Error()} + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} errBadWriteOpCode = errors.New("websocket: bad write message type") errWriteClosed = errors.New("websocket: write closed") errInvalidControlFrame = errors.New("websocket: invalid control frame") ) +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + func hideTempErr(err error) error { if e, ok := err.(net.Error); ok && e.Temporary() { err = &netError{msg: e.Error(), timeout: e.Timeout()} @@ -121,72 +201,138 @@ func isData(frameType int) bool { return frameType == TextMessage || frameType == BinaryMessage } -func maskBytes(key [4]byte, pos int, b []byte) int { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, } -func newMaskKey() [4]byte { - n := rand.Uint32() - return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) } -// Conn represents a WebSocket connection. +// The Conn type represents a WebSocket connection. type Conn struct { conn net.Conn isServer bool subprotocol string // Write fields - mu chan bool // used as mutex to protect write to conn and closeSent - closeSent bool // true if close message was sent + mu chan bool // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error - // Message writer fields. - writeErr error - writeBuf []byte // frame is constructed in this buffer. - writePos int // end of data in writeBuf. - writeFrameType int // type of the current frame. - writeSeq int // incremented to invalidate message writers. - writeDeadline time.Time + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser // Read fields + reader io.ReadCloser // the current reader returned to the application readErr error br *bufio.Reader readRemaining int64 // bytes remaining in current frame. readFinal bool // true the current message has more frames. - readSeq int // incremented to invalidate message readers. readLength int64 // Message size. readLimit int64 // Maximum message size. readMaskPos int readMaskKey [4]byte handlePong func(string) error handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser } func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { + return newConnBRW(conn, isServer, readBufferSize, writeBufferSize, nil) +} + +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, brw *bufio.ReadWriter) *Conn { mu := make(chan bool, 1) mu <- true - if readBufferSize == 0 { - readBufferSize = defaultReadBufferSize + var br *bufio.Reader + if readBufferSize == 0 && brw != nil && brw.Reader != nil { + // Reuse the supplied bufio.Reader if the buffer has a useful size. + // This code assumes that peek on a reader returns + // bufio.Reader.buf[:0]. + brw.Reader.Reset(conn) + if p, err := brw.Reader.Peek(0); err == nil && cap(p) >= 256 { + br = brw.Reader + } + } + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } + if readBufferSize < maxControlFramePayloadSize { + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + var writeBuf []byte + if writeBufferSize == 0 && brw != nil && brw.Writer != nil { + // Use the bufio.Writer's buffer if the buffer has a useful size. This + // code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + brw.Writer.Reset(&wh) + brw.Writer.WriteByte(0) + brw.Flush() + if cap(wh.p) >= maxFrameHeaderSize+256 { + writeBuf = wh.p[:cap(wh.p)] + } } - if writeBufferSize == 0 { - writeBufferSize = defaultWriteBufferSize + + if writeBuf == nil { + if writeBufferSize == 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBuf = make([]byte, writeBufferSize+maxFrameHeaderSize) } c := &Conn{ - isServer: isServer, - br: bufio.NewReaderSize(conn, readBufferSize), - conn: conn, - mu: mu, - readFinal: true, - writeBuf: make([]byte, writeBufferSize+maxFrameHeaderSize), - writeFrameType: noFrame, - writePos: maxFrameHeaderSize, + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, } + c.SetCloseHandler(nil) c.SetPingHandler(nil) c.SetPongHandler(nil) return c @@ -214,29 +360,40 @@ func (c *Conn) RemoteAddr() net.Addr { // Write methods +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { <-c.mu defer func() { c.mu <- true }() - if c.closeSent { - return ErrCloseSent - } else if frameType == CloseMessage { - c.closeSent = true + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err } c.conn.SetWriteDeadline(deadline) for _, buf := range bufs { if len(buf) > 0 { - n, err := c.conn.Write(buf) - if n != len(buf) { - // Close on partial write. - c.conn.Close() - } + _, err := c.conn.Write(buf) if err != nil { - return err + return c.writeFatal(err) } } } + + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } return nil } @@ -285,63 +442,104 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er } defer func() { c.mu <- true }() - if c.closeSent { - return ErrCloseSent - } else if messageType == CloseMessage { - c.closeSent = true + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err } c.conn.SetWriteDeadline(deadline) - n, err := c.conn.Write(buf) - if n != 0 && n != len(buf) { - c.conn.Close() + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return err +} + +func (c *Conn) prepWrite(messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + c.writer.Close() + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() return err } -// NextWriter returns a writer for the next message to send. The writer's -// Close method flushes the complete message to the network. +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. // // There can be at most one open writer on a connection. NextWriter closes the // previous writer if the application has not already done so. -// -// The NextWriter method and the writers returned from the method cannot be -// accessed by more than one goroutine at a time. func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { - if c.writeErr != nil { - return nil, c.writeErr + if err := c.prepWrite(messageType); err != nil { + return nil, err } - if c.writeFrameType != noFrame { - if err := c.flushFrame(true, nil); err != nil { - return nil, err - } + mw := &messageWriter{ + c: c, + frameType: messageType, + pos: maxFrameHeaderSize, } - - if !isControl(messageType) && !isData(messageType) { - return nil, errBadWriteOpCode + c.writer = mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w } + return c.writer, nil +} - c.writeFrameType = messageType - return messageWriter{c, c.writeSeq}, nil +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error } -func (c *Conn) flushFrame(final bool, extra []byte) error { - length := c.writePos - maxFrameHeaderSize + len(extra) +func (w *messageWriter) fatal(err error) error { + if w.err != nil { + w.err = err + w.c.writer = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) // Check for invalid control frames. - if isControl(c.writeFrameType) && + if isControl(w.frameType) && (!final || length > maxControlFramePayloadSize) { - c.writeSeq++ - c.writeFrameType = noFrame - c.writePos = maxFrameHeaderSize - return errInvalidControlFrame + return w.fatal(errInvalidControlFrame) } - b0 := byte(c.writeFrameType) + b0 := byte(w.frameType) if final { b0 |= finalBit } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + b1 := byte(0) if !c.isServer { b1 |= maskBit @@ -373,49 +571,50 @@ func (c *Conn) flushFrame(final bool, extra []byte) error { if !c.isServer { key := newMaskKey() copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) - maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:c.writePos]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) if len(extra) > 0 { - c.writeErr = errors.New("websocket: internal error, extra used in client mode") - return c.writeErr + return c.writeFatal(errors.New("websocket: internal error, extra used in client mode")) } } - // Write the buffers to the connection. - c.writeErr = c.write(c.writeFrameType, c.writeDeadline, c.writeBuf[framePos:c.writePos], extra) + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. - // Setup for next frame. - c.writePos = maxFrameHeaderSize - c.writeFrameType = continuationFrame - if final { - c.writeSeq++ - c.writeFrameType = noFrame + if c.isWriting { + panic("concurrent write to websocket connection") } - return c.writeErr -} + c.isWriting = true -type messageWriter struct { - c *Conn - seq int -} + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) -func (w messageWriter) err() error { - c := w.c - if c.writeSeq != w.seq { - return errWriteClosed + if !c.isWriting { + panic("concurrent write to websocket connection") } - if c.writeErr != nil { - return c.writeErr + c.isWriting = false + + if err != nil { + return w.fatal(err) + } + + if final { + c.writer = nil + return nil } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame return nil } -func (w messageWriter) ncopy(max int) (int, error) { - n := len(w.c.writeBuf) - w.c.writePos +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos if n <= 0 { - if err := w.c.flushFrame(false, nil); err != nil { + if err := w.flushFrame(false, nil); err != nil { return 0, err } - n = len(w.c.writeBuf) - w.c.writePos + n = len(w.c.writeBuf) - w.pos } if n > max { n = max @@ -423,14 +622,14 @@ func (w messageWriter) ncopy(max int) (int, error) { return n, nil } -func (w messageWriter) write(final bool, p []byte) (int, error) { - if err := w.err(); err != nil { - return 0, err +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err } if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { // Don't buffer large messages. - err := w.c.flushFrame(final, p) + err := w.flushFrame(false, p) if err != nil { return 0, err } @@ -443,20 +642,16 @@ func (w messageWriter) write(final bool, p []byte) (int, error) { if err != nil { return 0, err } - copy(w.c.writeBuf[w.c.writePos:], p[:n]) - w.c.writePos += n + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n p = p[n:] } return nn, nil } -func (w messageWriter) Write(p []byte) (int, error) { - return w.write(false, p) -} - -func (w messageWriter) WriteString(p string) (int, error) { - if err := w.err(); err != nil { - return 0, err +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err } nn := len(p) @@ -465,27 +660,27 @@ func (w messageWriter) WriteString(p string) (int, error) { if err != nil { return 0, err } - copy(w.c.writeBuf[w.c.writePos:], p[:n]) - w.c.writePos += n + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n p = p[n:] } return nn, nil } -func (w messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { - if err := w.err(); err != nil { - return 0, err +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err } for { - if w.c.writePos == len(w.c.writeBuf) { - err = w.c.flushFrame(false, nil) + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) if err != nil { break } } var n int - n, err = r.Read(w.c.writeBuf[w.c.writePos:]) - w.c.writePos += n + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n nn += int64(n) if err != nil { if err == io.EOF { @@ -497,30 +692,64 @@ func (w messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { return nn, err } -func (w messageWriter) Close() error { - if err := w.err(); err != nil { +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + if err := w.flushFrame(true, nil); err != nil { + return err + } + w.err = errWriteClosed + return nil +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { return err } - return w.c.flushFrame(true, nil) + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err } // WriteMessage is a helper method for getting a writer using NextWriter, // writing the message and closing the writer. func (c *Conn) WriteMessage(messageType int, data []byte) error { - wr, err := c.NextWriter(messageType) + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + if err := c.prepWrite(messageType); err != nil { + return err + } + mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize} + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) if err != nil { return err } - w := wr.(messageWriter) - if _, err := w.write(true, data); err != nil { + if _, err = w.Write(data); err != nil { return err } - if c.writeSeq == w.seq { - if err := c.flushFrame(true, nil); err != nil { - return err - } - } - return nil + return w.Close() } // SetWriteDeadline sets the write deadline on the underlying network @@ -534,22 +763,6 @@ func (c *Conn) SetWriteDeadline(t time.Time) error { // Read methods -// readFull is like io.ReadFull except that io.EOF is never returned. -func (c *Conn) readFull(p []byte) (err error) { - var n int - for n < len(p) && err == nil { - var nn int - nn, err = c.br.Read(p[n:]) - n += nn - } - if n == len(p) { - err = nil - } else if err == io.EOF { - err = errUnexpectedEOF - } - return -} - func (c *Conn) advanceFrame() (int, error) { // 1. Skip remainder of previous frame. @@ -562,19 +775,24 @@ func (c *Conn) advanceFrame() (int, error) { // 2. Read and parse first two bytes of frame header. - var b [8]byte - if err := c.readFull(b[:2]); err != nil { + p, err := c.read(2) + if err != nil { return noFrame, err } - final := b[0]&finalBit != 0 - frameType := int(b[0] & 0xf) - reserved := int((b[0] >> 4) & 0x7) - mask := b[1]&maskBit != 0 - c.readRemaining = int64(b[1] & 0x7f) + final := p[0]&finalBit != 0 + frameType := int(p[0] & 0xf) + mask := p[1]&maskBit != 0 + c.readRemaining = int64(p[1] & 0x7f) + + c.readDecompress = false + if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { + c.readDecompress = true + p[0] &^= rsv1Bit + } - if reserved != 0 { - return noFrame, c.handleProtocolError("unexpected reserved bits " + strconv.Itoa(reserved)) + if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 { + return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16)) } switch frameType { @@ -603,15 +821,17 @@ func (c *Conn) advanceFrame() (int, error) { switch c.readRemaining { case 126: - if err := c.readFull(b[:2]); err != nil { + p, err := c.read(2) + if err != nil { return noFrame, err } - c.readRemaining = int64(binary.BigEndian.Uint16(b[:2])) + c.readRemaining = int64(binary.BigEndian.Uint16(p)) case 127: - if err := c.readFull(b[:8]); err != nil { + p, err := c.read(8) + if err != nil { return noFrame, err } - c.readRemaining = int64(binary.BigEndian.Uint64(b[:8])) + c.readRemaining = int64(binary.BigEndian.Uint64(p)) } // 4. Handle frame masking. @@ -622,9 +842,11 @@ func (c *Conn) advanceFrame() (int, error) { if mask { c.readMaskPos = 0 - if err := c.readFull(c.readMaskKey[:]); err != nil { + p, err := c.read(len(c.readMaskKey)) + if err != nil { return noFrame, err } + copy(c.readMaskKey[:], p) } // 5. For text and binary messages, enforce read limit and return. @@ -644,9 +866,9 @@ func (c *Conn) advanceFrame() (int, error) { var payload []byte if c.readRemaining > 0 { - payload = make([]byte, c.readRemaining) + payload, err = c.read(int(c.readRemaining)) c.readRemaining = 0 - if err := c.readFull(payload); err != nil { + if err != nil { return noFrame, err } if c.isServer { @@ -666,19 +888,22 @@ func (c *Conn) advanceFrame() (int, error) { return noFrame, err } case CloseMessage: - c.WriteControl(CloseMessage, []byte{}, time.Now().Add(writeWait)) closeCode := CloseNoStatusReceived closeText := "" if len(payload) >= 2 { closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("invalid close code") + } closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } } - switch closeCode { - case CloseNormalClosure, CloseGoingAway: - return noFrame, io.EOF - default: - return noFrame, &closeError{code: closeCode, text: closeText} + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err } + return noFrame, &CloseError{Code: closeCode, Text: closeText} } return frameType, nil @@ -695,11 +920,18 @@ func (c *Conn) handleProtocolError(message string) error { // There can be at most one open reader on a connection. NextReader discards // the previous message if the application has not already consumed it. // -// The NextReader method and the readers returned from the method cannot be -// accessed by more than one goroutine at a time. +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } - c.readSeq++ + c.messageReader = nil c.readLength = 0 for c.readErr == nil { @@ -709,59 +941,77 @@ func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { break } if frameType == TextMessage || frameType == BinaryMessage { - return frameType, messageReader{c, c.readSeq}, nil + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil } } - return noFrame, nil, c.readErr -} -type messageReader struct { - c *Conn - seq int + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr } -func (r messageReader) Read(b []byte) (int, error) { +type messageReader struct{ c *Conn } - if r.seq != r.c.readSeq { +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { return 0, io.EOF } - for r.c.readErr == nil { + for c.readErr == nil { - if r.c.readRemaining > 0 { - if int64(len(b)) > r.c.readRemaining { - b = b[:r.c.readRemaining] + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] } - n, err := r.c.br.Read(b) - r.c.readErr = hideTempErr(err) - if r.c.isServer { - r.c.readMaskPos = maskBytes(r.c.readMaskKey, r.c.readMaskPos, b[:n]) + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) } - r.c.readRemaining -= int64(n) - return n, r.c.readErr + c.readRemaining -= int64(n) + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr } - if r.c.readFinal { - r.c.readSeq++ + if c.readFinal { + c.messageReader = nil return 0, io.EOF } - frameType, err := r.c.advanceFrame() + frameType, err := c.advanceFrame() switch { case err != nil: - r.c.readErr = hideTempErr(err) + c.readErr = hideTempErr(err) case frameType == TextMessage || frameType == BinaryMessage: - r.c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") } } - err := r.c.readErr - if err == io.EOF && r.seq == r.c.readSeq { + err := c.readErr + if err == io.EOF && c.messageReader == r { err = errUnexpectedEOF } return 0, err } +func (r *messageReader) Close() error { + return nil +} + // ReadMessage is a helper method for getting a reader using NextReader and // reading from that reader to a buffer. func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { @@ -789,21 +1039,76 @@ func (c *Conn) SetReadLimit(limit int64) { c.readLimit = limit } +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close frame +// back to the peer. +// +// The application must read the connection to process close messages as +// described in the section on Control Frames above. +// +// The connection read methods return a CloseError when a close frame is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close frame back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := []byte{} + if code != CloseNoStatusReceived { + message = FormatCloseMessage(code, "") + } + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + // SetPingHandler sets the handler for ping messages received from the peer. -// The default ping handler sends a pong to the peer. -func (c *Conn) SetPingHandler(h func(string) error) { +// The appData argument to h is the PING frame application data. The default +// ping handler sends a pong to the peer. +// +// The application must read the connection to process ping messages as +// described in the section on Control Frames above. +func (c *Conn) SetPingHandler(h func(appData string) error) { if h == nil { h = func(message string) error { - c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) - return nil + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err } } c.handlePing = h } +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + // SetPongHandler sets the handler for pong messages received from the peer. -// The default pong handler does nothing. -func (c *Conn) SetPongHandler(h func(string) error) { +// The appData argument to h is the PONG frame application data. The default +// pong handler does nothing. +// +// The application must read the connection to process ping messages as +// described in the section on Control Frames above. +func (c *Conn) SetPongHandler(h func(appData string) error) { if h == nil { h = func(string) error { return nil } } @@ -816,6 +1121,25 @@ func (c *Conn) UnderlyingConn() net.Conn { return c.conn } +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + // FormatCloseMessage formats closeCode and text as a WebSocket close message. func FormatCloseMessage(closeCode int, text string) []byte { buf := make([]byte, 2+len(text)) diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/conn_read.go b/cluster-autoscaler/vendor/github.com/gorilla/websocket/conn_read.go new file mode 100644 index 000000000000..1ea15059ee14 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/conn_read.go @@ -0,0 +1,18 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package websocket + +import "io" + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/conn_read_legacy.go b/cluster-autoscaler/vendor/github.com/gorilla/websocket/conn_read_legacy.go new file mode 100644 index 000000000000..018541cf6cbb --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/conn_read_legacy.go @@ -0,0 +1,21 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package websocket + +import "io" + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + if len(p) > 0 { + // advance over the bytes just read + io.ReadFull(c.br, p) + } + return p, err +} diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/doc.go b/cluster-autoscaler/vendor/github.com/gorilla/websocket/doc.go index f52925dd11a5..f5ff0823da77 100644 --- a/cluster-autoscaler/vendor/github.com/gorilla/websocket/doc.go +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/doc.go @@ -6,9 +6,8 @@ // // Overview // -// The Conn type represents a WebSocket connection. A server application uses -// the Upgrade function from an Upgrader object with a HTTP request handler -// to get a pointer to a Conn: +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: // // var upgrader = websocket.Upgrader{ // ReadBufferSize: 1024, @@ -33,7 +32,7 @@ // if err != nil { // return // } -// if err = conn.WriteMessage(messageType, p); err != nil { +// if err := conn.WriteMessage(messageType, p); err != nil { // return err // } // } @@ -46,8 +45,7 @@ // method to get an io.WriteCloser, write the message to the writer and close // the writer when done. To receive a message, call the connection NextReader // method to get an io.Reader and read until io.EOF is returned. This snippet -// snippet shows how to echo messages using the NextWriter and NextReader -// methods: +// shows how to echo messages using the NextWriter and NextReader methods: // // for { // messageType, r, err := conn.NextReader() @@ -86,31 +84,23 @@ // and pong. Call the connection WriteControl, WriteMessage or NextWriter // methods to send a control message to the peer. // -// Connections handle received ping and pong messages by invoking a callback -// function set with SetPingHandler and SetPongHandler methods. These callback -// functions can be invoked from the ReadMessage method, the NextReader method -// or from a call to the data message reader returned from NextReader. +// Connections handle received close messages by sending a close message to the +// peer and returning a *CloseError from the the NextReader, ReadMessage or the +// message Read method. // -// Connections handle received close messages by returning an error from the -// ReadMessage method, the NextReader method or from a call to the data message -// reader returned from NextReader. -// -// Concurrency -// -// Connections do not support concurrent calls to the write methods -// (NextWriter, SetWriteDeadline, WriteMessage) or concurrent calls to the read -// methods methods (NextReader, SetReadDeadline, ReadMessage). Connections do -// support a concurrent reader and writer. -// -// The Close and WriteControl methods can be called concurrently with all other +// Connections handle received ping and pong messages by invoking callback +// functions set with SetPingHandler and SetPongHandler methods. The callback +// functions are called from the NextReader, ReadMessage and the message Read // methods. // -// Read is Required +// The default ping handler sends a pong to the peer. The application's reading +// goroutine can block for a short time while the handler writes the pong data +// to the connection. // -// The application must read the connection to process ping and close messages -// sent from the peer. If the application is not otherwise interested in -// messages from the peer, then the application should start a goroutine to read -// and discard messages from the peer. A simple example is: +// The application must read the connection to process ping, pong and close +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: // // func readLoop(c *websocket.Conn) { // for { @@ -121,6 +111,20 @@ // } // } // +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// // Origin Considerations // // Web browsers allow Javascript applications to open a WebSocket connection to @@ -138,11 +142,38 @@ // An application can allow connections from any origin by specifying a // function that always returns true: // -// var upgrader = websocket.Upgrader{ +// var upgrader = websocket.Upgrader{ // CheckOrigin: func(r *http.Request) bool { return true }, -// } +// } +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. // -// The deprecated Upgrade function does not enforce an origin policy. It's the -// application's responsibility to check the Origin header before calling -// Upgrade. +// Use of compression is experimental and may result in decreased performance. package websocket diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/json.go b/cluster-autoscaler/vendor/github.com/gorilla/websocket/json.go index 18e62f2256cb..dc2c1f6415ff 100644 --- a/cluster-autoscaler/vendor/github.com/gorilla/websocket/json.go +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/json.go @@ -9,12 +9,14 @@ import ( "io" ) -// WriteJSON is deprecated, use c.WriteJSON instead. +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. func WriteJSON(c *Conn, v interface{}) error { return c.WriteJSON(v) } -// WriteJSON writes the JSON encoding of v to the connection. +// WriteJSON writes the JSON encoding of v as a message. // // See the documentation for encoding/json Marshal for details about the // conversion of Go values to JSON. @@ -31,7 +33,10 @@ func (c *Conn) WriteJSON(v interface{}) error { return err2 } -// ReadJSON is deprecated, use c.ReadJSON instead. +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. func ReadJSON(c *Conn, v interface{}) error { return c.ReadJSON(v) } @@ -48,9 +53,7 @@ func (c *Conn) ReadJSON(v interface{}) error { } err = json.NewDecoder(r).Decode(v) if err == io.EOF { - // Decode returns io.EOF when the message is empty or all whitespace. - // Convert to io.ErrUnexpectedEOF so that application can distinguish - // between an error reading the JSON value and the connection closing. + // One value is expected in the message. err = io.ErrUnexpectedEOF } return err diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/mask.go b/cluster-autoscaler/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 000000000000..6a88bbc74342 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,55 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/mask_safe.go b/cluster-autoscaler/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 000000000000..2aac060e52e7 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/prepared.go b/cluster-autoscaler/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 000000000000..1efffbd1ebe9 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,103 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + err error + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan bool, 1) + mu <- true + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/server.go b/cluster-autoscaler/vendor/github.com/gorilla/websocket/server.go index e56a004933ad..6ae97c54fec6 100644 --- a/cluster-autoscaler/vendor/github.com/gorilla/websocket/server.go +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/server.go @@ -28,8 +28,9 @@ type Upgrader struct { HandshakeTimeout time.Duration // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer - // size is zero, then a default value of 4096 is used. The I/O buffer sizes - // do not limit the size of the messages that can be sent or received. + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. ReadBufferSize, WriteBufferSize int // Subprotocols specifies the server's supported protocols in order of @@ -46,6 +47,12 @@ type Upgrader struct { // CheckOrigin is nil, the host in the Origin header must not be set or // must match the host of the request. CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool } func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { @@ -53,6 +60,7 @@ func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status in if u.Error != nil { u.Error(w, r, status, err) } else { + w.Header().Set("Sec-Websocket-Version", "13") http.Error(w, http.StatusText(status), status) } return nil, err @@ -92,17 +100,28 @@ func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header // The responseHeader is included in the response to the client's upgrade // request. Use the responseHeader to specify cookies (Set-Cookie) and the // application negotiated subprotocol (Sec-Websocket-Protocol). +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { - if values := r.Header["Sec-Websocket-Version"]; len(values) == 0 || values[0] != "13" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: version != 13") + if r.Method != "GET" { + return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported") } if !tokenListContainsValue(r.Header, "Connection", "upgrade") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find connection header with token 'upgrade'") + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header") } if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find upgrade header with token 'websocket'") + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") } checkOrigin := u.CheckOrigin @@ -110,19 +129,30 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade checkOrigin = checkSameOrigin } if !checkOrigin(r) { - return u.returnError(w, r, http.StatusForbidden, "websocket: origin not allowed") + return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed") } challengeKey := r.Header.Get("Sec-Websocket-Key") if challengeKey == "" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: key missing or blank") + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-Websocket-Key' header is missing or blank") } subprotocol := u.selectSubprotocol(r, responseHeader) + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + var ( netConn net.Conn - br *bufio.Reader err error ) @@ -130,21 +160,25 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade if !ok { return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") } - var rw *bufio.ReadWriter - netConn, rw, err = h.Hijack() + var brw *bufio.ReadWriter + netConn, brw, err = h.Hijack() if err != nil { return u.returnError(w, r, http.StatusInternalServerError, err.Error()) } - br = rw.Reader - if br.Buffered() > 0 { + if brw.Reader.Buffered() > 0 { netConn.Close() return nil, errors.New("websocket: client sent data before handshake is complete") } - c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize) + c := newConnBRW(netConn, true, u.ReadBufferSize, u.WriteBufferSize, brw) c.subprotocol = subprotocol + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + p := c.writeBuf[:0] p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) p = append(p, computeAcceptKey(challengeKey)...) @@ -154,6 +188,9 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade p = append(p, c.subprotocol...) p = append(p, "\r\n"...) } + if compress { + p = append(p, "Sec-Websocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } for k, vs := range responseHeader { if k == "Sec-Websocket-Protocol" { continue @@ -193,10 +230,11 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade // Upgrade upgrades the HTTP server connection to the WebSocket protocol. // -// This function is deprecated, use websocket.Upgrader instead. +// Deprecated: Use websocket.Upgrader instead. // -// The application is responsible for checking the request origin before -// calling Upgrade. An example implementation of the same origin policy is: +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: // // if req.Header.Get("Origin") != "http://"+req.Host { // http.Error(w, "Origin not allowed", 403) @@ -245,3 +283,10 @@ func Subprotocols(r *http.Request) []string { } return protocols } + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} diff --git a/cluster-autoscaler/vendor/github.com/gorilla/websocket/util.go b/cluster-autoscaler/vendor/github.com/gorilla/websocket/util.go index ffdc265ed78d..262e647bce22 100644 --- a/cluster-autoscaler/vendor/github.com/gorilla/websocket/util.go +++ b/cluster-autoscaler/vendor/github.com/gorilla/websocket/util.go @@ -13,19 +13,6 @@ import ( "strings" ) -// tokenListContainsValue returns true if the 1#token header with the given -// name contains token. -func tokenListContainsValue(header http.Header, name string, value string) bool { - for _, v := range header[name] { - for _, s := range strings.Split(v, ",") { - if strings.EqualFold(value, strings.TrimSpace(s)) { - return true - } - } - } - return false -} - var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") func computeAcceptKey(challengeKey string) string { @@ -42,3 +29,186 @@ func generateChallengeKey() (string, error) { } return base64.StdEncoding.EncodeToString(p), nil } + +// Octet types from RFC 2616. +var octetTypes [256]byte + +const ( + isTokenOctet = 1 << iota + isSpaceOctet +) + +func init() { + // From RFC 2616 + // + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t byte + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpaceOctet + } + if isChar && !isCtl && !isSeparator { + t |= isTokenOctet + } + octetTypes[c] = t + } +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpaceOctet == 0 { + break + } + } + return s[i:] +} + +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isTokenOctet == 0 { + break + } + } + return s[:i], s[i:] +} + +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains token. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if strings.EqualFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensiosn parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/admin.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/admin.go new file mode 100644 index 000000000000..f47cf2361097 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/admin.go @@ -0,0 +1,78 @@ +// +// Copyright (c) 2018 The heketi Authors +// +// This file is licensed to you under your choice of the GNU Lesser +// General Public License, version 3 or any later version (LGPLv3 or +// later), as published by the Free Software Foundation, +// or under the Apache License, Version 2.0 . +// +// You may not use this file except in compliance with those terms. +// + +package client + +import ( + "bytes" + "encoding/json" + "net/http" + + "github.com/heketi/heketi/pkg/glusterfs/api" + "github.com/heketi/heketi/pkg/utils" +) + +func (c *Client) AdminStatusGet() (*api.AdminStatus, error) { + req, err := http.NewRequest("GET", c.host+"/admin", nil) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + + // Set token + err = c.setToken(req) + if err != nil { + return nil, err + } + + // Send request + r, err := c.do(req) + if err != nil { + return nil, err + } + var as api.AdminStatus + err = utils.GetJsonFromResponse(r, &as) + if err != nil { + return nil, err + } + return &as, nil +} + +func (c *Client) AdminStatusSet(request *api.AdminStatus) error { + // Marshal request to JSON + buffer, err := json.Marshal(request) + if err != nil { + return err + } + + req, err := http.NewRequest("POST", c.host+"/admin", bytes.NewBuffer(buffer)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + + // Set token + err = c.setToken(req) + if err != nil { + return err + } + + // Send request + r, err := c.do(req) + if err != nil { + return err + } + if r.StatusCode == http.StatusOK || r.StatusCode == http.StatusNoContent { + return nil + } + return utils.GetErrorFromResponse(r) +} diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/backup.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/backup.go index 4b04d12de6d2..0690b3f41737 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/backup.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/backup.go @@ -37,12 +37,12 @@ func (c *Client) BackupDb(w io.Writer) error { if err != nil { return err } + defer r.Body.Close() if r.StatusCode != http.StatusOK { return utils.GetErrorFromResponse(r) } // Read data from response - defer r.Body.Close() _, err = io.Copy(w, r.Body) return err diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/block_volume.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/block_volume.go new file mode 100644 index 000000000000..c84187f533ee --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/block_volume.go @@ -0,0 +1,160 @@ +// +// Copyright (c) 2015 The heketi Authors +// +// This file is licensed to you under your choice of the GNU Lesser +// General Public License, version 3 or any later version (LGPLv3 or +// later), as published by the Free Software Foundation, +// or under the Apache License, Version 2.0 . +// +// You may not use this file except in compliance with those terms. +// + +package client + +import ( + "bytes" + "encoding/json" + "net/http" + "time" + + "github.com/heketi/heketi/pkg/glusterfs/api" + "github.com/heketi/heketi/pkg/utils" +) + +func (c *Client) BlockVolumeCreate(request *api.BlockVolumeCreateRequest) ( + *api.BlockVolumeInfoResponse, error) { + + buffer, err := json.Marshal(request) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", + c.host+"/blockvolumes", + bytes.NewBuffer(buffer)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + + err = c.setToken(req) + if err != nil { + return nil, err + } + + r, err := c.do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + if r.StatusCode != http.StatusAccepted { + return nil, utils.GetErrorFromResponse(r) + } + + r, err = c.waitForResponseWithTimer(r, time.Second) + if err != nil { + return nil, err + } + if r.StatusCode != http.StatusOK { + return nil, utils.GetErrorFromResponse(r) + } + + var blockvolume api.BlockVolumeInfoResponse + err = utils.GetJsonFromResponse(r, &blockvolume) + if err != nil { + return nil, err + } + + return &blockvolume, nil + +} + +func (c *Client) BlockVolumeList() (*api.BlockVolumeListResponse, error) { + req, err := http.NewRequest("GET", c.host+"/blockvolumes", nil) + if err != nil { + return nil, err + } + + err = c.setToken(req) + if err != nil { + return nil, err + } + + r, err := c.do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + if r.StatusCode != http.StatusOK { + return nil, utils.GetErrorFromResponse(r) + } + + var blockvolumes api.BlockVolumeListResponse + err = utils.GetJsonFromResponse(r, &blockvolumes) + if err != nil { + return nil, err + } + + return &blockvolumes, nil +} + +func (c *Client) BlockVolumeInfo(id string) (*api.BlockVolumeInfoResponse, error) { + req, err := http.NewRequest("GET", c.host+"/blockvolumes/"+id, nil) + if err != nil { + return nil, err + } + + err = c.setToken(req) + if err != nil { + return nil, err + } + + r, err := c.do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + if r.StatusCode != http.StatusOK { + return nil, utils.GetErrorFromResponse(r) + } + + var blockvolume api.BlockVolumeInfoResponse + err = utils.GetJsonFromResponse(r, &blockvolume) + if err != nil { + return nil, err + } + + return &blockvolume, nil +} + +func (c *Client) BlockVolumeDelete(id string) error { + req, err := http.NewRequest("DELETE", c.host+"/blockvolumes/"+id, nil) + if err != nil { + return err + } + + err = c.setToken(req) + if err != nil { + return err + } + + r, err := c.do(req) + if err != nil { + return err + } + defer r.Body.Close() + if r.StatusCode != http.StatusAccepted { + return utils.GetErrorFromResponse(r) + } + + r, err = c.waitForResponseWithTimer(r, time.Second) + if err != nil { + return err + } + if r.StatusCode != http.StatusNoContent { + return utils.GetErrorFromResponse(r) + } + + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/client.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/client.go index e29abfbfb889..74d6fe2739c2 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/client.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/client.go @@ -13,9 +13,16 @@ package client import ( + "bytes" "crypto/sha256" + "crypto/tls" + "crypto/x509" "encoding/hex" + "fmt" + "io/ioutil" + "math/rand" "net/http" + "strconv" "time" jwt "github.com/dgrijalva/jwt-go" @@ -24,35 +31,118 @@ import ( const ( MAX_CONCURRENT_REQUESTS = 32 + RETRY_COUNT = 6 + + // default delay values + MIN_DELAY = 10 + MAX_DELAY = 30 ) +type ClientTLSOptions struct { + // directly borrow the field names from crypto/tls + InsecureSkipVerify bool + // one or more cert file paths (best for self-signed certs) + VerifyCerts []string +} + +// Client configuration options +type ClientOptions struct { + RetryEnabled bool + RetryCount int + // control waits between retries + RetryMinDelay, RetryMaxDelay int +} + // Client object type Client struct { host string key string user string throttle chan bool + + // configuration for TLS support + tlsClientConfig *tls.Config + + // general behavioral options + opts ClientOptions + + // allow plugging in custom do wrappers + do func(*http.Request) (*http.Response, error) +} + +var defaultClientOptions = ClientOptions{ + RetryEnabled: true, + RetryCount: RETRY_COUNT, + RetryMinDelay: MIN_DELAY, + RetryMaxDelay: MAX_DELAY, } -// Creates a new client to access a Heketi server +// NewClient creates a new client to access a Heketi server func NewClient(host, user, key string) *Client { + return NewClientWithOptions(host, user, key, defaultClientOptions) +} + +// NewClientWithOptions creates a new client to access a Heketi server +// with a user specified suite of options. +func NewClientWithOptions(host, user, key string, opts ClientOptions) *Client { c := &Client{} c.key = key c.host = host c.user = user - + c.opts = opts // Maximum concurrent requests c.throttle = make(chan bool, MAX_CONCURRENT_REQUESTS) + if opts.RetryEnabled { + c.do = c.retryOperationDo + } else { + c.do = c.doBasic + } return c } +func NewClientTLS(host, user, key string, tlsOpts *ClientTLSOptions) (*Client, error) { + c := NewClient(host, user, key) + if err := c.SetTLSOptions(tlsOpts); err != nil { + return nil, err + } + return c, nil +} + // Create a client to access a Heketi server without authentication enabled func NewClientNoAuth(host string) *Client { return NewClient(host, "", "") } +// SetTLSOptions configures an existing heketi client for +// TLS support based on the ClientTLSOptions. +func (c *Client) SetTLSOptions(o *ClientTLSOptions) error { + if o == nil { + c.tlsClientConfig = nil + return nil + } + + tlsConfig := &tls.Config{} + tlsConfig.InsecureSkipVerify = o.InsecureSkipVerify + if len(o.VerifyCerts) > 0 { + tlsConfig.RootCAs = x509.NewCertPool() + for _, path := range o.VerifyCerts { + pem, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to read cert file %v: %v", + path, err) + } + if ok := tlsConfig.RootCAs.AppendCertsFromPEM(pem); !ok { + return fmt.Errorf("failed to load PEM encoded cert from %s", + path) + } + } + } + c.tlsClientConfig = tlsConfig + return nil +} + // Simple Hello test to check if the server is up func (c *Client) Hello() error { // Create request @@ -72,6 +162,7 @@ func (c *Client) Hello() error { if err != nil { return err } + defer r.Body.Close() if r.StatusCode != http.StatusOK { return utils.GetErrorFromResponse(r) } @@ -79,14 +170,20 @@ func (c *Client) Hello() error { return nil } +// doBasic performs the core http transaction. // Make sure we do not run out of fds by throttling the requests -func (c *Client) do(req *http.Request) (*http.Response, error) { +func (c *Client) doBasic(req *http.Request) (*http.Response, error) { c.throttle <- true defer func() { <-c.throttle }() httpClient := &http.Client{} + if c.tlsClientConfig != nil { + httpClient.Transport = &http.Transport{ + TLSClientConfig: c.tlsClientConfig, + } + } httpClient.CheckRedirect = c.checkRedirect return httpClient.Do(req) } @@ -122,7 +219,7 @@ func (c *Client) waitForResponseWithTimer(r *http.Response, } // Wait for response - r, err = c.do(req) + r, err = c.doBasic(req) if err != nil { return nil, err } @@ -132,6 +229,11 @@ func (c *Client) waitForResponseWithTimer(r *http.Response, if r.StatusCode != http.StatusOK { return nil, utils.GetErrorFromResponse(r) } + if r != nil { + //Read Response Body + ioutil.ReadAll(r.Body) + r.Body.Close() + } time.Sleep(waitTime) } else { return r, nil @@ -174,3 +276,66 @@ func (c *Client) setToken(r *http.Request) error { return nil } + +// retryOperationDo performs the http request and internally +// handles http 429 codes up to the number of retries specified +// by the Client. +func (c *Client) retryOperationDo(req *http.Request) (*http.Response, error) { + var ( + requestBody []byte + err error + ) + if req.Body != nil { + requestBody, err = ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + } + + // Send request + var r *http.Response + for i := 0; i <= c.opts.RetryCount; i++ { + req.Body = ioutil.NopCloser(bytes.NewReader(requestBody)) + r, err = c.doBasic(req) + if err != nil { + return nil, err + } + switch r.StatusCode { + case http.StatusTooManyRequests: + if r != nil { + //Read Response Body + // I don't like discarding error here, but I cant + // think of something better atm + b, _ := ioutil.ReadAll(r.Body) + r.Body.Close() + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + //sleep before continue + time.Sleep(c.opts.retryDelay(r)) + continue + + default: + return r, err + + } + } + return r, err +} + +// retryDelay returns a duration for which a retry should wait +// (after failure) before continuing. +func (c *ClientOptions) retryDelay(r *http.Response) time.Duration { + var ( + min = c.RetryMinDelay + max = c.RetryMaxDelay + ) + if ra := r.Header.Get("Retry-After"); ra != "" { + // TODO: support http date + if i, err := strconv.Atoi(ra); err == nil { + s := rand.Intn(min) + i + return time.Second * time.Duration(s) + } + } + s := rand.Intn(max-min) + min + return time.Second * time.Duration(s) +} diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/cluster.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/cluster.go index ddab70dac545..b68a9e437292 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/cluster.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/cluster.go @@ -14,16 +14,23 @@ package client import ( "bytes" + "encoding/json" "net/http" "github.com/heketi/heketi/pkg/glusterfs/api" "github.com/heketi/heketi/pkg/utils" ) -func (c *Client) ClusterCreate() (*api.ClusterInfoResponse, error) { +func (c *Client) ClusterCreate(request *api.ClusterCreateRequest) (*api.ClusterInfoResponse, error) { + + buffer, err := json.Marshal(request) + if err != nil { + return nil, err + } // Create a request - req, err := http.NewRequest("POST", c.host+"/clusters", bytes.NewBuffer([]byte(`{}`))) + req, err := http.NewRequest("POST", c.host+"/clusters", + bytes.NewBuffer(buffer)) if err != nil { return nil, err } @@ -40,6 +47,7 @@ func (c *Client) ClusterCreate() (*api.ClusterInfoResponse, error) { if err != nil { return nil, err } + defer r.Body.Close() if r.StatusCode != http.StatusCreated { return nil, utils.GetErrorFromResponse(r) } @@ -47,7 +55,6 @@ func (c *Client) ClusterCreate() (*api.ClusterInfoResponse, error) { // Read JSON response var cluster api.ClusterInfoResponse err = utils.GetJsonFromResponse(r, &cluster) - r.Body.Close() if err != nil { return nil, err } @@ -55,6 +62,40 @@ func (c *Client) ClusterCreate() (*api.ClusterInfoResponse, error) { return &cluster, nil } +func (c *Client) ClusterSetFlags(id string, request *api.ClusterSetFlagsRequest) error { + + buffer, err := json.Marshal(request) + if err != nil { + return err + } + + // Create a request + req, err := http.NewRequest("POST", c.host+"/clusters/"+id+"/flags", + bytes.NewBuffer(buffer)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + + // Set token + err = c.setToken(req) + if err != nil { + return err + } + + // Send request + r, err := c.do(req) + if err != nil { + return err + } + defer r.Body.Close() + if r.StatusCode != http.StatusOK { + return utils.GetErrorFromResponse(r) + } + + return nil +} + func (c *Client) ClusterInfo(id string) (*api.ClusterInfoResponse, error) { // Create request @@ -74,6 +115,7 @@ func (c *Client) ClusterInfo(id string) (*api.ClusterInfoResponse, error) { if err != nil { return nil, err } + defer r.Body.Close() if r.StatusCode != http.StatusOK { return nil, utils.GetErrorFromResponse(r) } @@ -81,7 +123,6 @@ func (c *Client) ClusterInfo(id string) (*api.ClusterInfoResponse, error) { // Read JSON response var cluster api.ClusterInfoResponse err = utils.GetJsonFromResponse(r, &cluster) - r.Body.Close() if err != nil { return nil, err } @@ -108,6 +149,7 @@ func (c *Client) ClusterList() (*api.ClusterListResponse, error) { if err != nil { return nil, err } + defer r.Body.Close() if r.StatusCode != http.StatusOK { return nil, utils.GetErrorFromResponse(r) } @@ -141,6 +183,7 @@ func (c *Client) ClusterDelete(id string) error { if err != nil { return err } + defer r.Body.Close() if r.StatusCode != http.StatusOK { return utils.GetErrorFromResponse(r) } diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/db.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/db.go new file mode 100644 index 000000000000..72ed129b0021 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/db.go @@ -0,0 +1,52 @@ +// +// Copyright (c) 2018 The heketi Authors +// +// This file is licensed to you under your choice of the GNU Lesser +// General Public License, version 3 or any later version (LGPLv3 or +// later), as published by the Free Software Foundation, +// or under the Apache License, Version 2.0 . +// +// You may not use this file except in compliance with those terms. +// + +package client + +import ( + "io/ioutil" + "net/http" + + "github.com/heketi/heketi/pkg/utils" +) + +// DbDump provides a JSON representation of current state of DB +func (c *Client) DbDump() (string, error) { + req, err := http.NewRequest("GET", c.host+"/db/dump", nil) + if err != nil { + return "", err + } + + // Set token + err = c.setToken(req) + if err != nil { + return "", err + } + + // Send request + r, err := c.do(req) + if err != nil { + return "", err + } + defer r.Body.Close() + if r.StatusCode != http.StatusOK { + return "", utils.GetErrorFromResponse(r) + } + + respBytes, err := ioutil.ReadAll(r.Body) + if err != nil { + return "", err + } + + respJSON := string(respBytes) + return respJSON, nil +} diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/device.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/device.go index 50412521a56c..fd856d8b4765 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/device.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/device.go @@ -15,6 +15,7 @@ package client import ( "bytes" "encoding/json" + "io" "net/http" "time" @@ -47,6 +48,7 @@ func (c *Client) DeviceAdd(request *api.DeviceAddRequest) error { if err != nil { return err } + defer r.Body.Close() if r.StatusCode != http.StatusAccepted { return utils.GetErrorFromResponse(r) } @@ -82,6 +84,7 @@ func (c *Client) DeviceInfo(id string) (*api.DeviceInfoResponse, error) { if err != nil { return nil, err } + defer r.Body.Close() if r.StatusCode != http.StatusOK { return nil, utils.GetErrorFromResponse(r) } @@ -89,7 +92,6 @@ func (c *Client) DeviceInfo(id string) (*api.DeviceInfoResponse, error) { // Read JSON response var device api.DeviceInfoResponse err = utils.GetJsonFromResponse(r, &device) - r.Body.Close() if err != nil { return nil, err } @@ -98,9 +100,23 @@ func (c *Client) DeviceInfo(id string) (*api.DeviceInfoResponse, error) { } func (c *Client) DeviceDelete(id string) error { + return c.DeviceDeleteWithOptions(id, nil) +} + +func (c *Client) DeviceDeleteWithOptions( + id string, request *api.DeviceDeleteOptions) error { + + var buf io.Reader + if request != nil { + b, err := json.Marshal(request) + if err != nil { + return err + } + buf = bytes.NewBuffer(b) + } // Create a request - req, err := http.NewRequest("DELETE", c.host+"/devices/"+id, nil) + req, err := http.NewRequest("DELETE", c.host+"/devices/"+id, buf) if err != nil { return err } @@ -116,6 +132,7 @@ func (c *Client) DeviceDelete(id string) error { if err != nil { return err } + defer r.Body.Close() if r.StatusCode != http.StatusAccepted { return utils.GetErrorFromResponse(r) } @@ -161,6 +178,7 @@ func (c *Client) DeviceState(id string, if err != nil { return err } + defer r.Body.Close() if r.StatusCode != http.StatusAccepted { return utils.GetErrorFromResponse(r) } @@ -176,3 +194,71 @@ func (c *Client) DeviceState(id string, return nil } + +func (c *Client) DeviceResync(id string) error { + + // Create a request + req, err := http.NewRequest("GET", c.host+"/devices/"+id+"/resync", nil) + if err != nil { + return err + } + + // Set token + err = c.setToken(req) + if err != nil { + return err + } + + // Send request + r, err := c.do(req) + if err != nil { + return err + } + defer r.Body.Close() + if r.StatusCode != http.StatusAccepted { + return utils.GetErrorFromResponse(r) + } + + // Wait for response + r, err = c.waitForResponseWithTimer(r, time.Millisecond*250) + if err != nil { + return err + } + if r.StatusCode != http.StatusNoContent { + return utils.GetErrorFromResponse(r) + } + + return nil +} + +func (c *Client) DeviceSetTags(id string, request *api.TagsChangeRequest) error { + buffer, err := json.Marshal(request) + if err != nil { + return err + } + + req, err := http.NewRequest("POST", + c.host+"/devices/"+id+"/tags", + bytes.NewBuffer(buffer)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + + // Set token + err = c.setToken(req) + if err != nil { + return err + } + + // Get info + r, err := c.do(req) + if err != nil { + return err + } + defer r.Body.Close() + if r.StatusCode != http.StatusOK { + return utils.GetErrorFromResponse(r) + } + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/logging.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/logging.go new file mode 100644 index 000000000000..96a29d2ec751 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/logging.go @@ -0,0 +1,78 @@ +// +// Copyright (c) 2018 The heketi Authors +// +// This file is licensed to you under your choice of the GNU Lesser +// General Public License, version 3 or any later version (LGPLv3 or +// later), as published by the Free Software Foundation, +// or under the Apache License, Version 2.0 . +// +// You may not use this file except in compliance with those terms. +// + +package client + +import ( + "bytes" + "encoding/json" + "net/http" + + "github.com/heketi/heketi/pkg/glusterfs/api" + "github.com/heketi/heketi/pkg/utils" +) + +func (c *Client) LogLevelGet() (*api.LogLevelInfo, error) { + req, err := http.NewRequest("GET", c.host+"/internal/logging", nil) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + + // Set token + err = c.setToken(req) + if err != nil { + return nil, err + } + + // Send request + r, err := c.do(req) + if err != nil { + return nil, err + } + var lli api.LogLevelInfo + err = utils.GetJsonFromResponse(r, &lli) + if err != nil { + return nil, err + } + return &lli, nil +} + +func (c *Client) LogLevelSet(request *api.LogLevelInfo) error { + // Marshal request to JSON + buffer, err := json.Marshal(request) + if err != nil { + return err + } + + req, err := http.NewRequest("POST", c.host+"/internal/logging", bytes.NewBuffer(buffer)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + + // Set token + err = c.setToken(req) + if err != nil { + return err + } + + // Send request + r, err := c.do(req) + if err != nil { + return err + } + if r.StatusCode != http.StatusOK { + return utils.GetErrorFromResponse(r) + } + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/node.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/node.go index 20039c1ef436..da31379d1996 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/node.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/node.go @@ -48,6 +48,7 @@ func (c *Client) NodeAdd(request *api.NodeAddRequest) (*api.NodeInfoResponse, er if err != nil { return nil, err } + defer r.Body.Close() if r.StatusCode != http.StatusAccepted { return nil, utils.GetErrorFromResponse(r) } @@ -64,7 +65,6 @@ func (c *Client) NodeAdd(request *api.NodeAddRequest) (*api.NodeInfoResponse, er // Read JSON response var node api.NodeInfoResponse err = utils.GetJsonFromResponse(r, &node) - r.Body.Close() if err != nil { return nil, err } @@ -91,6 +91,7 @@ func (c *Client) NodeInfo(id string) (*api.NodeInfoResponse, error) { if err != nil { return nil, err } + defer r.Body.Close() if r.StatusCode != http.StatusOK { return nil, utils.GetErrorFromResponse(r) } @@ -98,7 +99,6 @@ func (c *Client) NodeInfo(id string) (*api.NodeInfoResponse, error) { // Read JSON response var node api.NodeInfoResponse err = utils.GetJsonFromResponse(r, &node) - r.Body.Close() if err != nil { return nil, err } @@ -125,6 +125,7 @@ func (c *Client) NodeDelete(id string) error { if err != nil { return err } + defer r.Body.Close() if r.StatusCode != http.StatusAccepted { return utils.GetErrorFromResponse(r) } @@ -168,6 +169,7 @@ func (c *Client) NodeState(id string, request *api.StateRequest) error { if err != nil { return err } + defer r.Body.Close() if r.StatusCode != http.StatusAccepted { return utils.GetErrorFromResponse(r) } @@ -183,3 +185,35 @@ func (c *Client) NodeState(id string, request *api.StateRequest) error { return nil } + +func (c *Client) NodeSetTags(id string, request *api.TagsChangeRequest) error { + buffer, err := json.Marshal(request) + if err != nil { + return err + } + + req, err := http.NewRequest("POST", + c.host+"/nodes/"+id+"/tags", + bytes.NewBuffer(buffer)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + + // Set token + err = c.setToken(req) + if err != nil { + return err + } + + // Get info + r, err := c.do(req) + if err != nil { + return err + } + defer r.Body.Close() + if r.StatusCode != http.StatusOK { + return utils.GetErrorFromResponse(r) + } + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/operations.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/operations.go new file mode 100644 index 000000000000..d641aea50076 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/operations.go @@ -0,0 +1,46 @@ +// +// Copyright (c) 2018 The heketi Authors +// +// This file is licensed to you under your choice of the GNU Lesser +// General Public License, version 3 or any later version (LGPLv3 or +// later), as published by the Free Software Foundation, +// or under the Apache License, Version 2.0 . +// +// You may not use this file except in compliance with those terms. +// + +package client + +import ( + "net/http" + + "github.com/heketi/heketi/pkg/glusterfs/api" + "github.com/heketi/heketi/pkg/utils" +) + +func (c *Client) OperationsInfo() (*api.OperationsInfo, error) { + req, err := http.NewRequest("GET", c.host+"/operations", nil) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + + // Set token + err = c.setToken(req) + if err != nil { + return nil, err + } + + // Send request + r, err := c.do(req) + if err != nil { + return nil, err + } + var oi api.OperationsInfo + err = utils.GetJsonFromResponse(r, &oi) + if err != nil { + return nil, err + } + return &oi, nil +} diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/topology.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/topology.go index 6c9ffe28c882..549aa7c75c4e 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/topology.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/topology.go @@ -33,6 +33,10 @@ func (c *Client) TopologyInfo() (*api.TopologyInfoResponse, error) { Id: clusteri.Id, Volumes: make([]api.VolumeInfoResponse, 0), Nodes: make([]api.NodeInfoResponse, 0), + ClusterFlags: api.ClusterFlags{ + Block: clusteri.Block, + File: clusteri.File, + }, } cluster.Id = clusteri.Id diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/volume.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/volume.go index 821cfa80837b..5b5d919ac5f2 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/volume.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/client/api/go-client/volume.go @@ -51,6 +51,61 @@ func (c *Client) VolumeCreate(request *api.VolumeCreateRequest) ( if err != nil { return nil, err } + defer r.Body.Close() + if r.StatusCode != http.StatusAccepted { + return nil, utils.GetErrorFromResponse(r) + } + + // Wait for response + r, err = c.waitForResponseWithTimer(r, time.Second) + if err != nil { + return nil, err + } + if r.StatusCode != http.StatusOK { + return nil, utils.GetErrorFromResponse(r) + } + + // Read JSON response + var volume api.VolumeInfoResponse + err = utils.GetJsonFromResponse(r, &volume) + if err != nil { + return nil, err + } + + return &volume, nil + +} + +func (c *Client) VolumeSetBlockRestriction(id string, request *api.VolumeBlockRestrictionRequest) ( + *api.VolumeInfoResponse, error) { + + // Marshal request to JSON + buffer, err := json.Marshal(request) + if err != nil { + return nil, err + } + + // Create a request + req, err := http.NewRequest("POST", + c.host+"/volumes/"+id+"/block-restriction", + bytes.NewBuffer(buffer)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + + // Set token + err = c.setToken(req) + if err != nil { + return nil, err + } + + // Send request + r, err := c.do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() if r.StatusCode != http.StatusAccepted { return nil, utils.GetErrorFromResponse(r) } @@ -67,7 +122,6 @@ func (c *Client) VolumeCreate(request *api.VolumeCreateRequest) ( // Read JSON response var volume api.VolumeInfoResponse err = utils.GetJsonFromResponse(r, &volume) - r.Body.Close() if err != nil { return nil, err } @@ -105,6 +159,7 @@ func (c *Client) VolumeExpand(id string, request *api.VolumeExpandRequest) ( if err != nil { return nil, err } + defer r.Body.Close() if r.StatusCode != http.StatusAccepted { return nil, utils.GetErrorFromResponse(r) } @@ -121,7 +176,6 @@ func (c *Client) VolumeExpand(id string, request *api.VolumeExpandRequest) ( // Read JSON response var volume api.VolumeInfoResponse err = utils.GetJsonFromResponse(r, &volume) - r.Body.Close() if err != nil { return nil, err } @@ -149,6 +203,7 @@ func (c *Client) VolumeList() (*api.VolumeListResponse, error) { if err != nil { return nil, err } + defer r.Body.Close() if r.StatusCode != http.StatusOK { return nil, utils.GetErrorFromResponse(r) } @@ -182,6 +237,7 @@ func (c *Client) VolumeInfo(id string) (*api.VolumeInfoResponse, error) { if err != nil { return nil, err } + defer r.Body.Close() if r.StatusCode != http.StatusOK { return nil, utils.GetErrorFromResponse(r) } @@ -189,7 +245,6 @@ func (c *Client) VolumeInfo(id string) (*api.VolumeInfoResponse, error) { // Read JSON response var volume api.VolumeInfoResponse err = utils.GetJsonFromResponse(r, &volume) - r.Body.Close() if err != nil { return nil, err } @@ -216,6 +271,7 @@ func (c *Client) VolumeDelete(id string) error { if err != nil { return err } + defer r.Body.Close() if r.StatusCode != http.StatusAccepted { return utils.GetErrorFromResponse(r) } @@ -231,3 +287,52 @@ func (c *Client) VolumeDelete(id string) error { return nil } + +func (c *Client) VolumeClone(id string, request *api.VolumeCloneRequest) (*api.VolumeInfoResponse, error) { + // Marshal request to JSON + buffer, err := json.Marshal(request) + if err != nil { + return nil, err + } + + // Create a request + req, err := http.NewRequest("POST", c.host+"/volumes/"+id+"/clone", bytes.NewBuffer(buffer)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + + // Set token + err = c.setToken(req) + if err != nil { + return nil, err + } + + // Send request + r, err := c.do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + if r.StatusCode != http.StatusAccepted { + return nil, utils.GetErrorFromResponse(r) + } + + // Wait for response + r, err = c.waitForResponseWithTimer(r, time.Second) + if err != nil { + return nil, err + } + if r.StatusCode != http.StatusOK { + return nil, utils.GetErrorFromResponse(r) + } + + // Read JSON response + var volume api.VolumeInfoResponse + err = utils.GetJsonFromResponse(r, &volume) + if err != nil { + return nil, err + } + + return &volume, nil +} diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/glusterfs/api/types.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/glusterfs/api/types.go index c244f5af9b92..759fb9bda3a2 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/glusterfs/api/types.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/glusterfs/api/types.go @@ -18,9 +18,38 @@ package api import ( "fmt" + "regexp" "sort" + + "github.com/go-ozzo/ozzo-validation" + "github.com/go-ozzo/ozzo-validation/is" ) +var ( + // Restricting the deviceName to much smaller subset of Unix Path + // as unix path takes almost everything except NULL + deviceNameRe = regexp.MustCompile("^/[a-zA-Z0-9_.:/-]+$") + + // Volume name constraints decided by looking at + // "cli_validate_volname" function in cli-cmd-parser.c of gluster code + volumeNameRe = regexp.MustCompile("^[a-zA-Z0-9_-]+$") + + blockVolNameRe = regexp.MustCompile("^[a-zA-Z0-9_-]+$") + + tagNameRe = regexp.MustCompile("^[a-zA-Z0-9_.-]+$") +) + +// ValidateUUID is written this way because heketi UUID does not +// conform to neither UUID v4 nor v5. +func ValidateUUID(value interface{}) error { + s, _ := value.(string) + err := validation.Validate(s, validation.RuneLength(32, 32), is.Hexadecimal) + if err != nil { + return fmt.Errorf("%v is not a valid UUID", s) + } + return nil +} + // State type EntryState string @@ -31,6 +60,15 @@ const ( EntryStateFailed EntryState = "failed" ) +func ValidateEntryState(value interface{}) error { + s, _ := value.(EntryState) + err := validation.Validate(s, validation.Required, validation.In(EntryStateOnline, EntryStateOffline, EntryStateFailed)) + if err != nil { + return fmt.Errorf("%v is not valid state", s) + } + return nil +} + type DurabilityType string const ( @@ -39,11 +77,26 @@ const ( DurabilityEC DurabilityType = "disperse" ) +func ValidateDurabilityType(value interface{}) error { + s, _ := value.(DurabilityType) + err := validation.Validate(s, validation.Required, validation.In(DurabilityReplicate, DurabilityDistributeOnly, DurabilityEC)) + if err != nil { + return fmt.Errorf("%v is not a valid durability type", s) + } + return nil +} + // Common type StateRequest struct { State EntryState `json:"state"` } +func (statereq StateRequest) Validate() error { + return validation.ValidateStruct(&statereq, + validation.Field(&statereq.State, validation.Required, validation.By(ValidateEntryState)), + ) +} + // Storage values in KB type StorageSize struct { Total uint64 `json:"total"` @@ -56,6 +109,35 @@ type HostAddresses struct { Storage sort.StringSlice `json:"storage"` } +func ValidateManagementHostname(value interface{}) error { + s, _ := value.(sort.StringSlice) + for _, fqdn := range s { + err := validation.Validate(fqdn, validation.Required, is.Host) + if err != nil { + return fmt.Errorf("%v is not a valid manage hostname", s) + } + } + return nil +} + +func ValidateStorageHostname(value interface{}) error { + s, _ := value.(sort.StringSlice) + for _, ip := range s { + err := validation.Validate(ip, validation.Required, is.Host) + if err != nil { + return fmt.Errorf("%v is not a valid storage hostname", s) + } + } + return nil +} + +func (hostadd HostAddresses) Validate() error { + return validation.ValidateStruct(&hostadd, + validation.Field(&hostadd.Manage, validation.Required, validation.By(ValidateManagementHostname)), + validation.Field(&hostadd.Storage, validation.Required, validation.By(ValidateStorageHostname)), + ) +} + // Brick type BrickInfo struct { Id string `json:"id"` @@ -70,12 +152,29 @@ type BrickInfo struct { // Device type Device struct { - Name string `json:"name"` + Name string `json:"name"` + Tags map[string]string `json:"tags,omitempty"` +} + +func (dev Device) Validate() error { + return validation.ValidateStruct(&dev, + validation.Field(&dev.Name, validation.Required, validation.Match(deviceNameRe)), + validation.Field(&dev.Tags, validation.By(ValidateTags)), + ) } type DeviceAddRequest struct { Device - NodeId string `json:"node"` + NodeId string `json:"node"` + DestroyData bool `json:"destroydata,omitempty"` +} + +func (devAddReq DeviceAddRequest) Validate() error { + return validation.ValidateStruct(&devAddReq, + validation.Field(&devAddReq.Device, validation.Required), + validation.Field(&devAddReq.NodeId, validation.Required, validation.By(ValidateUUID)), + validation.Field(&devAddReq.DestroyData, validation.In(true, false)), + ) } type DeviceInfo struct { @@ -92,9 +191,19 @@ type DeviceInfoResponse struct { // Node type NodeAddRequest struct { - Zone int `json:"zone"` - Hostnames HostAddresses `json:"hostnames"` - ClusterId string `json:"cluster"` + Zone int `json:"zone"` + Hostnames HostAddresses `json:"hostnames"` + ClusterId string `json:"cluster"` + Tags map[string]string `json:"tags,omitempty"` +} + +func (req NodeAddRequest) Validate() error { + return validation.ValidateStruct(&req, + validation.Field(&req.Zone, validation.Required, validation.Min(1)), + validation.Field(&req.Hostnames, validation.Required), + validation.Field(&req.ClusterId, validation.Required, validation.By(ValidateUUID)), + validation.Field(&req.Tags, validation.By(ValidateTags)), + ) } type NodeInfo struct { @@ -109,20 +218,37 @@ type NodeInfoResponse struct { } // Cluster + +type ClusterFlags struct { + Block bool `json:"block"` + File bool `json:"file"` +} + type Cluster struct { Volumes []VolumeInfoResponse `json:"volumes"` Nodes []NodeInfoResponse `json:"nodes"` Id string `json:"id"` + ClusterFlags } type TopologyInfoResponse struct { ClusterList []Cluster `json:"clusters"` } +type ClusterCreateRequest struct { + ClusterFlags +} + +type ClusterSetFlagsRequest struct { + ClusterFlags +} + type ClusterInfoResponse struct { Id string `json:"id"` Nodes sort.StringSlice `json:"nodes"` Volumes sort.StringSlice `json:"volumes"` + ClusterFlags + BlockVolumes sort.StringSlice `json:"blockvolumes"` } type ClusterListResponse struct { @@ -147,19 +273,57 @@ type VolumeDurabilityInfo struct { } type VolumeCreateRequest struct { - // Size in GB + // Size in GiB Size int `json:"size"` Clusters []string `json:"clusters,omitempty"` Name string `json:"name"` Durability VolumeDurabilityInfo `json:"durability,omitempty"` Gid int64 `json:"gid,omitempty"` GlusterVolumeOptions []string `json:"glustervolumeoptions,omitempty"` + Block bool `json:"block,omitempty"` Snapshot struct { Enable bool `json:"enable"` Factor float32 `json:"factor"` } `json:"snapshot"` } +func (volCreateRequest VolumeCreateRequest) Validate() error { + return validation.ValidateStruct(&volCreateRequest, + validation.Field(&volCreateRequest.Size, validation.Required, validation.Min(1)), + validation.Field(&volCreateRequest.Clusters, validation.By(ValidateUUID)), + validation.Field(&volCreateRequest.Name, validation.Match(volumeNameRe)), + validation.Field(&volCreateRequest.Durability, validation.Skip), + validation.Field(&volCreateRequest.Gid, validation.Skip), + validation.Field(&volCreateRequest.GlusterVolumeOptions, validation.Skip), + validation.Field(&volCreateRequest.Block, validation.In(true, false)), + // This is possibly a bug in validation lib, ignore next two lines for now + // validation.Field(&volCreateRequest.Snapshot.Enable, validation.In(true, false)), + // validation.Field(&volCreateRequest.Snapshot.Factor, validation.Min(1.0)), + ) +} + +type BlockRestriction string + +const ( + Unrestricted BlockRestriction = "" + Locked BlockRestriction = "locked" + LockedByUpdate BlockRestriction = "locked-by-update" +) + +func (br BlockRestriction) String() string { + switch br { + case Unrestricted: + return "(none)" + case Locked: + return "locked" + case LockedByUpdate: + return "locked-by-update" + default: + return "unknown" + + } +} + type VolumeInfo struct { VolumeCreateRequest Id string `json:"id"` @@ -171,6 +335,12 @@ type VolumeInfo struct { Options map[string]string `json:"options"` } `json:"glusterfs"` } `json:"mount"` + BlockInfo struct { + FreeSize int `json:"freesize,omitempty"` + ReservedSize int `json:"reservedsize,omitempty"` + BlockVolumes sort.StringSlice `json:"blockvolume,omitempty"` + Restriction BlockRestriction `json:"restriction,omitempty"` + } `json:"blockinfo,omitempty"` } type VolumeInfoResponse struct { @@ -186,6 +356,132 @@ type VolumeExpandRequest struct { Size int `json:"expand_size"` } +func (volExpandReq VolumeExpandRequest) Validate() error { + return validation.ValidateStruct(&volExpandReq, + validation.Field(&volExpandReq.Size, validation.Required, validation.Min(1)), + ) +} + +type VolumeCloneRequest struct { + Name string `json:"name,omitempty"` +} + +func (vcr VolumeCloneRequest) Validate() error { + return validation.ValidateStruct(&vcr, + validation.Field(&vcr.Name, validation.Match(volumeNameRe)), + ) +} + +type VolumeBlockRestrictionRequest struct { + Restriction BlockRestriction `json:"restriction"` +} + +func (vbrr VolumeBlockRestrictionRequest) Validate() error { + return validation.ValidateStruct(&vbrr, + validation.Field(&vbrr.Restriction, + validation.In(Unrestricted, Locked))) +} + +// BlockVolume + +type BlockVolumeCreateRequest struct { + // Size in GiB + Size int `json:"size"` + Clusters []string `json:"clusters,omitempty"` + Name string `json:"name"` + Hacount int `json:"hacount,omitempty"` + Auth bool `json:"auth,omitempty"` +} + +func (blockVolCreateReq BlockVolumeCreateRequest) Validate() error { + return validation.ValidateStruct(&blockVolCreateReq, + validation.Field(&blockVolCreateReq.Size, validation.Required, validation.Min(1)), + validation.Field(&blockVolCreateReq.Clusters, validation.By(ValidateUUID)), + validation.Field(&blockVolCreateReq.Name, validation.Match(blockVolNameRe)), + validation.Field(&blockVolCreateReq.Hacount, validation.Min(1)), + validation.Field(&blockVolCreateReq.Auth, validation.Skip), + ) +} + +type BlockVolumeInfo struct { + BlockVolumeCreateRequest + Id string `json:"id"` + BlockVolume struct { + Hosts []string `json:"hosts"` + Iqn string `json:"iqn"` + Lun int `json:"lun"` + Username string `json:"username"` + Password string `json:"password"` + /* + Options map[string]string `json:"options"` // needed?... + */ + } `json:"blockvolume"` + Cluster string `json:"cluster,omitempty"` + BlockHostingVolume string `json:"blockhostingvolume,omitempty"` +} + +type BlockVolumeInfoResponse struct { + BlockVolumeInfo +} + +type BlockVolumeListResponse struct { + BlockVolumes []string `json:"blockvolumes"` +} + +type LogLevelInfo struct { + // should contain one or more logger to log-level-name mapping + LogLevel map[string]string `json:"loglevel"` +} + +type TagsChangeType string + +const ( + UnknownTagsChangeType TagsChangeType = "" + SetTags TagsChangeType = "set" + UpdateTags TagsChangeType = "update" + DeleteTags TagsChangeType = "delete" +) + +// Common tag post body +type TagsChangeRequest struct { + Tags map[string]string `json:"tags"` + Change TagsChangeType `json:"change_type"` +} + +func (tcr TagsChangeRequest) Validate() error { + return validation.ValidateStruct(&tcr, + validation.Field(&tcr.Tags, validation.By(ValidateTags)), + validation.Field(&tcr.Change, + validation.Required, + validation.In(SetTags, UpdateTags, DeleteTags))) +} + +func ValidateTags(v interface{}) error { + t, ok := v.(map[string]string) + if !ok { + return fmt.Errorf("tags must be a map of strings to strings") + } + if len(t) > 32 { + return fmt.Errorf("too many tags specified (%v), up to %v supported", + len(t), 32) + } + for k, v := range t { + if len(k) == 0 { + return fmt.Errorf("tag names may not be empty") + } + if err := validation.Validate(k, validation.RuneLength(1, 32)); err != nil { + return fmt.Errorf("tag name %v: %v", k, err) + } + if err := validation.Validate(v, validation.RuneLength(0, 64)); err != nil { + return fmt.Errorf("value of tag %v: %v", k, err) + } + if !tagNameRe.MatchString(k) { + return fmt.Errorf("invalid characters in tag name %+v", k) + } + } + return nil +} + // Constructors func NewVolumeInfoResponse() *VolumeInfoResponse { @@ -205,6 +501,11 @@ func (v *VolumeInfoResponse) String() string { "Cluster Id: %v\n"+ "Mount: %v\n"+ "Mount Options: backup-volfile-servers=%v\n"+ + "Block: %v\n"+ + "Free Size: %v\n"+ + "Reserved Size: %v\n"+ + "Block Hosting Restriction: %v\n"+ + "Block Volumes: %v\n"+ "Durability Type: %v\n", v.Name, v.Size, @@ -212,6 +513,11 @@ func (v *VolumeInfoResponse) String() string { v.Cluster, v.Mount.GlusterFS.MountPoint, v.Mount.GlusterFS.Options["backup-volfile-servers"], + v.Block, + v.BlockInfo.FreeSize, + v.BlockInfo.ReservedSize, + v.BlockInfo.Restriction, + v.BlockInfo.BlockVolumes, v.Durability.Type) switch v.Durability.Type { @@ -248,3 +554,90 @@ func (v *VolumeInfoResponse) String() string { return s } + +func NewBlockVolumeInfoResponse() *BlockVolumeInfoResponse { + + info := &BlockVolumeInfoResponse{} + // Nothing to Construct now maybe for future + + return info +} + +// String functions +func (v *BlockVolumeInfoResponse) String() string { + s := fmt.Sprintf("Name: %v\n"+ + "Size: %v\n"+ + "Volume Id: %v\n"+ + "Cluster Id: %v\n"+ + "Hosts: %v\n"+ + "IQN: %v\n"+ + "LUN: %v\n"+ + "Hacount: %v\n"+ + "Username: %v\n"+ + "Password: %v\n"+ + "Block Hosting Volume: %v\n", + v.Name, + v.Size, + v.Id, + v.Cluster, + v.BlockVolume.Hosts, + v.BlockVolume.Iqn, + v.BlockVolume.Lun, + v.Hacount, + v.BlockVolume.Username, + v.BlockVolume.Password, + v.BlockHostingVolume) + + /* + s += "\nBricks:\n" + for _, b := range v.Bricks { + s += fmt.Sprintf("Id: %v\n"+ + "Path: %v\n"+ + "Size (GiB): %v\n"+ + "Node: %v\n"+ + "Device: %v\n\n", + b.Id, + b.Path, + b.Size/(1024*1024), + b.NodeId, + b.DeviceId) + } + */ + + return s +} + +type OperationsInfo struct { + Total uint64 `json:"total"` + InFlight uint64 `json:"in_flight"` + // state based counts: + Stale uint64 `json:"stale"` + New uint64 `json:"new"` +} + +type AdminState string + +const ( + AdminStateNormal AdminState = "normal" + AdminStateReadOnly AdminState = "read-only" + AdminStateLocal AdminState = "local-client" +) + +type AdminStatus struct { + State AdminState `json:"state"` +} + +func (as AdminStatus) Validate() error { + return validation.ValidateStruct(&as, + validation.Field(&as.State, + validation.Required, + validation.In(AdminStateNormal, AdminStateReadOnly, AdminStateLocal))) +} + +// DeviceDeleteOptions is used to specify additional behavior for device +// deletes. +type DeviceDeleteOptions struct { + // force heketi to forget about a device, possibly + // orphaning metadata on the node + ForceForget bool `json:"forceforget"` +} diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/bodystring.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/bodystring.go index 468db04e4541..8a7b4aca1bc1 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/bodystring.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/bodystring.go @@ -3,14 +3,18 @@ // // This file is licensed to you under your choice of the GNU Lesser // General Public License, version 3 or any later version (LGPLv3 or -// later), or the GNU General Public License, version 2 (GPLv2), in all -// cases as published by the Free Software Foundation. +// later), as published by the Free Software Foundation, +// or under the Apache License, Version 2.0 . +// +// You may not use this file except in compliance with those terms. // package utils import ( "errors" + "fmt" "io" "io/ioutil" "net/http" @@ -20,10 +24,10 @@ import ( // Return the body from a response as a string func GetStringFromResponse(r *http.Response) (string, error) { body, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength)) + defer r.Body.Close() if err != nil { return "", err } - r.Body.Close() return string(body), nil } @@ -33,5 +37,10 @@ func GetErrorFromResponse(r *http.Response) error { if err != nil { return err } - return errors.New(strings.TrimSpace(s)) + + s = strings.TrimSpace(s) + if len(s) == 0 { + return fmt.Errorf("server did not provide a message (status %v: %v)", r.StatusCode, http.StatusText(r.StatusCode)) + } + return errors.New(s) } diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/jsonutils.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/jsonutils.go index 77781d9c938b..6fb958d8ff47 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/jsonutils.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/jsonutils.go @@ -3,8 +3,11 @@ // // This file is licensed to you under your choice of the GNU Lesser // General Public License, version 3 or any later version (LGPLv3 or -// later), or the GNU General Public License, version 2 (GPLv2), in all -// cases as published by the Free Software Foundation. +// later), as published by the Free Software Foundation, +// or under the Apache License, Version 2.0 . +// +// You may not use this file except in compliance with those terms. // package utils diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/log.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/log.go deleted file mode 100644 index a9451ed7e575..000000000000 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/log.go +++ /dev/null @@ -1,155 +0,0 @@ -// -// Copyright (c) 2015 The heketi Authors -// -// This file is licensed to you under your choice of the GNU Lesser -// General Public License, version 3 or any later version (LGPLv3 or -// later), or the GNU General Public License, version 2 (GPLv2), in all -// cases as published by the Free Software Foundation. -// - -package utils - -import ( - "fmt" - "io" - "log" - "os" - "runtime" - "strings" - - "github.com/lpabon/godbc" -) - -type LogLevel int - -// Log levels -const ( - LEVEL_NOLOG LogLevel = iota - LEVEL_CRITICAL - LEVEL_ERROR - LEVEL_WARNING - LEVEL_INFO - LEVEL_DEBUG -) - -var ( - stderr io.Writer = os.Stderr - stdout io.Writer = os.Stdout -) - -type Logger struct { - critlog, errorlog, infolog *log.Logger - debuglog, warninglog *log.Logger - - level LogLevel -} - -func logWithLongFile(l *log.Logger, format string, v ...interface{}) { - _, file, line, _ := runtime.Caller(2) - - // Shorten the path. - // From - // /builddir/build/BUILD/heketi-3f4a5b1b6edff87232e8b24533c53b4151ebd9c7/src/github.com/heketi/heketi/apps/glusterfs/volume_entry.go - // to - // src/github.com/heketi/heketi/apps/glusterfs/volume_entry.go - i := strings.Index(file, "/src/") - if i == -1 { - i = 0 - } - - l.Print(fmt.Sprintf("%v:%v: ", file[i:], line) + - fmt.Sprintf(format, v...)) -} - -// Create a new logger -func NewLogger(prefix string, level LogLevel) *Logger { - godbc.Require(level >= 0, level) - godbc.Require(level <= LEVEL_DEBUG, level) - - l := &Logger{} - - if level == LEVEL_NOLOG { - l.level = LEVEL_DEBUG - } else { - l.level = level - } - - l.critlog = log.New(stderr, prefix+" CRITICAL ", log.LstdFlags) - l.errorlog = log.New(stderr, prefix+" ERROR ", log.LstdFlags) - l.warninglog = log.New(stdout, prefix+" WARNING ", log.LstdFlags) - l.infolog = log.New(stdout, prefix+" INFO ", log.LstdFlags) - l.debuglog = log.New(stdout, prefix+" DEBUG ", log.LstdFlags) - - godbc.Ensure(l.critlog != nil) - godbc.Ensure(l.errorlog != nil) - godbc.Ensure(l.warninglog != nil) - godbc.Ensure(l.infolog != nil) - godbc.Ensure(l.debuglog != nil) - - return l -} - -// Return current level -func (l *Logger) Level() LogLevel { - return l.level -} - -// Set level -func (l *Logger) SetLevel(level LogLevel) { - l.level = level -} - -// Log critical information -func (l *Logger) Critical(format string, v ...interface{}) { - if l.level >= LEVEL_CRITICAL { - logWithLongFile(l.critlog, format, v...) - } -} - -// Log error string -func (l *Logger) LogError(format string, v ...interface{}) error { - if l.level >= LEVEL_ERROR { - logWithLongFile(l.errorlog, format, v...) - } - - return fmt.Errorf(format, v...) -} - -// Log error variable -func (l *Logger) Err(err error) error { - if l.level >= LEVEL_ERROR { - logWithLongFile(l.errorlog, "%v", err) - } - - return err -} - -// Log warning information -func (l *Logger) Warning(format string, v ...interface{}) { - if l.level >= LEVEL_WARNING { - l.warninglog.Printf(format, v...) - } -} - -// Log error variable as a warning -func (l *Logger) WarnErr(err error) error { - if l.level >= LEVEL_WARNING { - logWithLongFile(l.warninglog, "%v", err) - } - - return err -} - -// Log string -func (l *Logger) Info(format string, v ...interface{}) { - if l.level >= LEVEL_INFO { - l.infolog.Printf(format, v...) - } -} - -// Log string as debug -func (l *Logger) Debug(format string, v ...interface{}) { - if l.level >= LEVEL_DEBUG { - logWithLongFile(l.debuglog, format, v...) - } -} diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/sortedstrings.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/sortedstrings.go deleted file mode 100644 index 985230d9f329..000000000000 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/sortedstrings.go +++ /dev/null @@ -1,33 +0,0 @@ -// -// Copyright (c) 2015 The heketi Authors -// -// This file is licensed to you under your choice of the GNU Lesser -// General Public License, version 3 or any later version (LGPLv3 or -// later), or the GNU General Public License, version 2 (GPLv2), in all -// cases as published by the Free Software Foundation. -// - -package utils - -import ( - "sort" -) - -// Check if a sorted string list has a string -func SortedStringHas(s sort.StringSlice, x string) bool { - index := s.Search(x) - if index == len(s) { - return false - } - return s[s.Search(x)] == x -} - -// Delete a string from a sorted string list -func SortedStringsDelete(s sort.StringSlice, x string) sort.StringSlice { - index := s.Search(x) - if len(s) != index && s[index] == x { - s = append(s[:index], s[index+1:]...) - } - - return s -} diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/statusgroup.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/statusgroup.go index 7c678a5031bf..c0354c964c4e 100644 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/statusgroup.go +++ b/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/statusgroup.go @@ -3,8 +3,11 @@ // // This file is licensed to you under your choice of the GNU Lesser // General Public License, version 3 or any later version (LGPLv3 or -// later), or the GNU General Public License, version 2 (GPLv2), in all -// cases as published by the Free Software Foundation. +// later), as published by the Free Software Foundation, +// or under the Apache License, Version 2.0 . +// +// You may not use this file except in compliance with those terms. // package utils diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/stringset.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/stringset.go deleted file mode 100644 index 8d7f3ca780d9..000000000000 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/stringset.go +++ /dev/null @@ -1,44 +0,0 @@ -// -// Copyright (c) 2015 The heketi Authors -// -// This file is licensed to you under your choice of the GNU Lesser -// General Public License, version 3 or any later version (LGPLv3 or -// later), or the GNU General Public License, version 2 (GPLv2), in all -// cases as published by the Free Software Foundation. -// - -package utils - -import ( - "sort" -) - -type StringSet struct { - Set sort.StringSlice -} - -// Create a string set. -// -// A string set is a list where each element appears only once -func NewStringSet() *StringSet { - return &StringSet{ - Set: make(sort.StringSlice, 0), - } -} - -// Add a string to the string set -func (s *StringSet) Add(v string) { - if !SortedStringHas(s.Set, v) { - s.Set = append(s.Set, v) - s.Set.Sort() - } -} - -// Return string list -func (s *StringSet) Strings() []string { - return s.Set -} - -func (s *StringSet) Len() int { - return len(s.Set) -} diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/stringstack.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/stringstack.go deleted file mode 100644 index c1b48e219aa0..000000000000 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/stringstack.go +++ /dev/null @@ -1,33 +0,0 @@ -// -// Copyright (c) 2015 The heketi Authors -// -// This file is licensed to you under your choice of the GNU Lesser -// General Public License, version 3 or any later version (LGPLv3 or -// later), or the GNU General Public License, version 2 (GPLv2), in all -// cases as published by the Free Software Foundation. -// - -package utils - -type StringStack struct { - list []string -} - -func NewStringStack() *StringStack { - a := &StringStack{} - a.list = make([]string, 0) - return a -} - -func (a *StringStack) IsEmpty() bool { - return len(a.list) == 0 -} - -func (a *StringStack) Pop() (x string) { - x, a.list = a.list[0], a.list[1:len(a.list)] - return -} - -func (a *StringStack) Push(x string) { - a.list = append(a.list, x) -} diff --git a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/uuid.go b/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/uuid.go deleted file mode 100644 index 39e743d4dad6..000000000000 --- a/cluster-autoscaler/vendor/github.com/heketi/heketi/pkg/utils/uuid.go +++ /dev/null @@ -1,27 +0,0 @@ -// -// Copyright (c) 2015 The heketi Authors -// -// This file is licensed to you under your choice of the GNU Lesser -// General Public License, version 3 or any later version (LGPLv3 or -// later), or the GNU General Public License, version 2 (GPLv2), in all -// cases as published by the Free Software Foundation. - -package utils - -// From http://www.ashishbanerjee.com/home/go/go-generate-uuid - -import ( - "crypto/rand" - "encoding/hex" - "github.com/lpabon/godbc" -) - -// Return a 16-byte uuid -func GenUUID() string { - uuid := make([]byte, 16) - n, err := rand.Read(uuid) - godbc.Check(n == len(uuid), n, len(uuid)) - godbc.Check(err == nil, err) - - return hex.EncodeToString(uuid) -} diff --git a/cluster-autoscaler/vendor/github.com/json-iterator/go/Gopkg.lock b/cluster-autoscaler/vendor/github.com/json-iterator/go/Gopkg.lock index 3719afe8e0d5..c8a9fbb3871b 100644 --- a/cluster-autoscaler/vendor/github.com/json-iterator/go/Gopkg.lock +++ b/cluster-autoscaler/vendor/github.com/json-iterator/go/Gopkg.lock @@ -1,12 +1,6 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. -[[projects]] - name = "github.com/json-iterator/go" - packages = ["."] - revision = "ca39e5af3ece67bbcda3d0f4f56a8e24d9f2dad4" - version = "1.1.3" - [[projects]] name = "github.com/modern-go/concurrent" packages = ["."] @@ -16,12 +10,12 @@ [[projects]] name = "github.com/modern-go/reflect2" packages = ["."] - revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f" - version = "1.0.0" + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "56a0b9e9e61d2bc8af5e1b68537401b7f4d60805eda3d107058f3171aa5cf793" + inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" solver-name = "gps-cdcl" solver-version = 1 diff --git a/cluster-autoscaler/vendor/github.com/json-iterator/go/Gopkg.toml b/cluster-autoscaler/vendor/github.com/json-iterator/go/Gopkg.toml index 5801ffa1e980..313a0f887b6f 100644 --- a/cluster-autoscaler/vendor/github.com/json-iterator/go/Gopkg.toml +++ b/cluster-autoscaler/vendor/github.com/json-iterator/go/Gopkg.toml @@ -23,4 +23,4 @@ ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com [[constraint]] name = "github.com/modern-go/reflect2" - version = "1.0.0" + version = "1.0.1" diff --git a/cluster-autoscaler/vendor/github.com/karrick/godirwalk/.gitignore b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/.gitignore new file mode 100644 index 000000000000..a1338d68517e --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/cluster-autoscaler/vendor/github.com/karrick/godirwalk/LICENSE b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/LICENSE new file mode 100644 index 000000000000..01ce194c80dd --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/LICENSE @@ -0,0 +1,25 @@ +BSD 2-Clause License + +Copyright (c) 2017, Karrick McDermott +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cluster-autoscaler/vendor/github.com/karrick/godirwalk/README.md b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/README.md new file mode 100644 index 000000000000..4f9922fefb08 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/README.md @@ -0,0 +1,208 @@ +# godirwalk + +`godirwalk` is a library for traversing a directory tree on a file +system. + +In short, why do I use this library? + +1. It's faster than `filepath.Walk`. +1. It's more correct on Windows than `filepath.Walk`. +1. It's more easy to use than `filepath.Walk`. +1. It's more flexible than `filepath.Walk`. + +## Usage Example + +Additional examples are provided in the `examples/` subdirectory. + +This library will normalize the provided top level directory name +based on the os-specific path separator by calling `filepath.Clean` on +its first argument. However it always provides the pathname created by +using the correct os-specific path separator when invoking the +provided callback function. + +```Go + dirname := "some/directory/root" + err := godirwalk.Walk(dirname, &godirwalk.Options{ + Callback: func(osPathname string, de *godirwalk.Dirent) error { + fmt.Printf("%s %s\n", de.ModeType(), osPathname) + return nil + }, + Unsorted: true, // (optional) set true for faster yet non-deterministic enumeration (see godoc) + }) +``` + +This library not only provides functions for traversing a file system +directory tree, but also for obtaining a list of immediate descendants +of a particular directory, typically much more quickly than using +`os.ReadDir` or `os.ReadDirnames`. + +Documentation is available via +[![GoDoc](https://godoc.org/github.com/karrick/godirwalk?status.svg)](https://godoc.org/github.com/karrick/godirwalk). + +## Description + +Here's why I use `godirwalk` in preference to `filepath.Walk`, +`os.ReadDir`, and `os.ReadDirnames`. + +### It's faster than `filepath.Walk` + +When compared against `filepath.Walk` in benchmarks, it has been +observed to run between five and ten times the speed on darwin, at +speeds comparable to the that of the unix `find` utility; about twice +the speed on linux; and about four times the speed on Windows. + +How does it obtain this performance boost? It does less work to give +you nearly the same output. This library calls the same `syscall` +functions to do the work, but it makes fewer calls, does not throw +away information that it might need, and creates less memory churn +along the way by reusing the same scratch buffer rather than +reallocating a new buffer every time it reads data from the operating +system. + +While traversing a file system directory tree, `filepath.Walk` obtains +the list of immediate descendants of a directory, and throws away the +file system node type information provided by the operating system +that comes with the node's name. Then, immediately prior to invoking +the callback function, `filepath.Walk` invokes `os.Stat` for each +node, and passes the returned `os.FileInfo` information to the +callback. + +While the `os.FileInfo` information provided by `os.Stat` is extremely +helpful--and even includes the `os.FileMode` data--providing it +requires an additional system call for each node. + +Because most callbacks only care about what the node type is, this +library does not throw the type information away, but rather provides +that information to the callback function in the form of a +`os.FileMode` value. Note that the provided `os.FileMode` value that +this library provides only has the node type information, and does not +have the permission bits, sticky bits, or other information from the +file's mode. If the callback does care about a particular node's +entire `os.FileInfo` data structure, the callback can easiy invoke +`os.Stat` when needed, and only when needed. + +#### Benchmarks + +##### macOS + +```Bash +go test -bench=. +goos: darwin +goarch: amd64 +pkg: github.com/karrick/godirwalk +BenchmarkFilepathWalk-8 1 3001274570 ns/op +BenchmarkGoDirWalk-8 3 465573172 ns/op +BenchmarkFlameGraphFilepathWalk-8 1 6957916936 ns/op +BenchmarkFlameGraphGoDirWalk-8 1 4210582571 ns/op +PASS +ok github.com/karrick/godirwalk 16.822s +``` + +##### Linux + +```Bash +go test -bench=. +goos: linux +goarch: amd64 +pkg: github.com/karrick/godirwalk +BenchmarkFilepathWalk-12 1 1609189170 ns/op +BenchmarkGoDirWalk-12 5 211336628 ns/op +BenchmarkFlameGraphFilepathWalk-12 1 3968119932 ns/op +BenchmarkFlameGraphGoDirWalk-12 1 2139598998 ns/op +PASS +ok github.com/karrick/godirwalk 9.007s +``` + +### It's more correct on Windows than `filepath.Walk` + +I did not previously care about this either, but humor me. We all love +how we can write once and run everywhere. It is essential for the +language's adoption, growth, and success, that the software we create +can run unmodified on all architectures and operating systems +supported by Go. + +When the traversed file system has a logical loop caused by symbolic +links to directories, on unix `filepath.Walk` ignores symbolic links +and traverses the entire directory tree without error. On Windows +however, `filepath.Walk` will continue following directory symbolic +links, even though it is not supposed to, eventually causing +`filepath.Walk` to terminate early and return an error when the +pathname gets too long from concatenating endless loops of symbolic +links onto the pathname. This error comes from Windows, passes through +`filepath.Walk`, and to the upstream client running `filepath.Walk`. + +The takeaway is that behavior is different based on which platform +`filepath.Walk` is running. While this is clearly not intentional, +until it is fixed in the standard library, it presents a compatibility +problem. + +This library correctly identifies symbolic links that point to +directories and will only follow them when `FollowSymbolicLinks` is +set to true. Behavior on Windows and other operating systems is +identical. + +### It's more easy to use than `filepath.Walk` + +Since this library does not invoke `os.Stat` on every file system node +it encounters, there is no possible error event for the callback +function to filter on. The third argument in the `filepath.WalkFunc` +function signature to pass the error from `os.Stat` to the callback +function is no longer necessary, and thus eliminated from signature of +the callback function from this library. + +Also, `filepath.Walk` invokes the callback function with a solidus +delimited pathname regardless of the os-specific path separator. This +library invokes the callback function with the os-specific pathname +separator, obviating a call to `filepath.Clean` in the callback +function for each node prior to actually using the provided pathname. + +In other words, even on Windows, `filepath.Walk` will invoke the +callback with `some/path/to/foo.txt`, requiring well written clients +to perform pathname normalization for every file prior to working with +the specified file. In truth, many clients developed on unix and not +tested on Windows neglect this subtlety, and will result in software +bugs when running on Windows. This library would invoke the callback +function with `some\path\to\foo.txt` for the same file when running on +Windows, eliminating the need to normalize the pathname by the client, +and lessen the likelyhood that a client will work on unix but not on +Windows. + +### It's more flexible than `filepath.Walk` + +#### Configurable Handling of Symbolic Links + +The default behavior of this library is to ignore symbolic links to +directories when walking a directory tree, just like `filepath.Walk` +does. However, it does invoke the callback function with each node it +finds, including symbolic links. If a particular use case exists to +follow symbolic links when traversing a directory tree, this library +can be invoked in manner to do so, by setting the +`FollowSymbolicLinks` parameter to true. + +#### Configurable Sorting of Directory Children + +The default behavior of this library is to always sort the immediate +descendants of a directory prior to visiting each node, just like +`filepath.Walk` does. This is usually the desired behavior. However, +this does come at a performance penalty to sort the names when a +directory node has many entries. If a particular use case exists that +does not require sorting the directory's immediate descendants prior +to visiting its nodes, this library will skip the sorting step when +the `Unsorted` parameter is set to true. + +#### Configurable Post Children Callback + +This library provides upstream code with the ability to specify a +callback to be invoked for each directory after its children are +processed. This has been used to recursively delete empty directories +after traversing the file system in a more efficient manner. See the +`examples/clean-empties` directory for an example of this usage. + +#### Configurable Error Callback + +This library provides upstream code with the ability to specify a +callback to be invoked for errors that the operating system returns, +allowing the upstream code to determine the next course of action to +take, whether to halt walking the hierarchy, as it would do were no +error callback provided, or skip the node that caused the error. See +the `examples/walk-fast` directory for an example of this usage. diff --git a/cluster-autoscaler/vendor/github.com/karrick/godirwalk/dirent.go b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/dirent.go new file mode 100644 index 000000000000..5a277224802a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/dirent.go @@ -0,0 +1,74 @@ +package godirwalk + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// Dirent stores the name and file system mode type of discovered file system +// entries. +type Dirent struct { + name string + modeType os.FileMode +} + +// NewDirent returns a newly initialized Dirent structure, or an error. This +// function does not follow symbolic links. +// +// This function is rarely used, as Dirent structures are provided by other +// functions in this library that read and walk directories. +func NewDirent(osPathname string) (*Dirent, error) { + fi, err := os.Lstat(osPathname) + if err != nil { + return nil, errors.Wrap(err, "cannot lstat") + } + return &Dirent{ + name: filepath.Base(osPathname), + modeType: fi.Mode() & os.ModeType, + }, nil +} + +// Name returns the basename of the file system entry. +func (de Dirent) Name() string { return de.name } + +// ModeType returns the mode bits that specify the file system node type. We +// could make our own enum-like data type for encoding the file type, but Go's +// runtime already gives us architecture independent file modes, as discussed in +// `os/types.go`: +// +// Go's runtime FileMode type has same definition on all systems, so that +// information about files can be moved from one system to another portably. +func (de Dirent) ModeType() os.FileMode { return de.modeType } + +// IsDir returns true if and only if the Dirent represents a file system +// directory. Note that on some operating systems, more than one file mode bit +// may be set for a node. For instance, on Windows, a symbolic link that points +// to a directory will have both the directory and the symbolic link bits set. +func (de Dirent) IsDir() bool { return de.modeType&os.ModeDir != 0 } + +// IsRegular returns true if and only if the Dirent represents a regular +// file. That is, it ensures that no mode type bits are set. +func (de Dirent) IsRegular() bool { return de.modeType&os.ModeType == 0 } + +// IsSymlink returns true if and only if the Dirent represents a file system +// symbolic link. Note that on some operating systems, more than one file mode +// bit may be set for a node. For instance, on Windows, a symbolic link that +// points to a directory will have both the directory and the symbolic link bits +// set. +func (de Dirent) IsSymlink() bool { return de.modeType&os.ModeSymlink != 0 } + +// Dirents represents a slice of Dirent pointers, which are sortable by +// name. This type satisfies the `sort.Interface` interface. +type Dirents []*Dirent + +// Len returns the count of Dirent structures in the slice. +func (l Dirents) Len() int { return len(l) } + +// Less returns true if and only if the Name of the element specified by the +// first index is lexicographically less than that of the second index. +func (l Dirents) Less(i, j int) bool { return l[i].name < l[j].name } + +// Swap exchanges the two Dirent entries specified by the two provided indexes. +func (l Dirents) Swap(i, j int) { l[i], l[j] = l[j], l[i] } diff --git a/cluster-autoscaler/vendor/github.com/karrick/godirwalk/doc.go b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/doc.go new file mode 100644 index 000000000000..0dfdabd48849 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/doc.go @@ -0,0 +1,34 @@ +/* +Package godirwalk provides functions to read and traverse directory trees. + +In short, why do I use this library? + +* It's faster than `filepath.Walk`. + +* It's more correct on Windows than `filepath.Walk`. + +* It's more easy to use than `filepath.Walk`. + +* It's more flexible than `filepath.Walk`. + +USAGE + +This library will normalize the provided top level directory name based on the +os-specific path separator by calling `filepath.Clean` on its first +argument. However it always provides the pathname created by using the correct +os-specific path separator when invoking the provided callback function. + + dirname := "some/directory/root" + err := godirwalk.Walk(dirname, &godirwalk.Options{ + Callback: func(osPathname string, de *godirwalk.Dirent) error { + fmt.Printf("%s %s\n", de.ModeType(), osPathname) + return nil + }, + }) + +This library not only provides functions for traversing a file system directory +tree, but also for obtaining a list of immediate descendants of a particular +directory, typically much more quickly than using `os.ReadDir` or +`os.ReadDirnames`. +*/ +package godirwalk diff --git a/cluster-autoscaler/vendor/github.com/karrick/godirwalk/go.mod b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/go.mod new file mode 100644 index 000000000000..6b467a9e153b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/go.mod @@ -0,0 +1,3 @@ +module github.com/karrick/godirwalk + +require github.com/pkg/errors v0.8.0 diff --git a/cluster-autoscaler/vendor/github.com/karrick/godirwalk/go.sum b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/go.sum new file mode 100644 index 000000000000..a31014d5f0b4 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/go.sum @@ -0,0 +1 @@ +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/cluster-autoscaler/vendor/github.com/karrick/godirwalk/readdir.go b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/readdir.go new file mode 100644 index 000000000000..2bba689751f9 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/readdir.go @@ -0,0 +1,47 @@ +package godirwalk + +// ReadDirents returns a sortable slice of pointers to Dirent structures, each +// representing the file system name and mode type for one of the immediate +// descendant of the specified directory. If the specified directory is a +// symbolic link, it will be resolved. +// +// If an optional scratch buffer is provided that is at least one page of +// memory, it will be used when reading directory entries from the file system. +// +// children, err := godirwalk.ReadDirents(osDirname, nil) +// if err != nil { +// return nil, errors.Wrap(err, "cannot get list of directory children") +// } +// sort.Sort(children) +// for _, child := range children { +// fmt.Printf("%s %s\n", child.ModeType, child.Name) +// } +func ReadDirents(osDirname string, scratchBuffer []byte) (Dirents, error) { + return readdirents(osDirname, scratchBuffer) +} + +// ReadDirnames returns a slice of strings, representing the immediate +// descendants of the specified directory. If the specified directory is a +// symbolic link, it will be resolved. +// +// If an optional scratch buffer is provided that is at least one page of +// memory, it will be used when reading directory entries from the file system. +// +// Note that this function, depending on operating system, may or may not invoke +// the ReadDirents function, in order to prepare the list of immediate +// descendants. Therefore, if your program needs both the names and the file +// system mode types of descendants, it will always be faster to invoke +// ReadDirents directly, rather than calling this function, then looping over +// the results and calling os.Stat for each child. +// +// children, err := godirwalk.ReadDirnames(osDirname, nil) +// if err != nil { +// return nil, errors.Wrap(err, "cannot get list of directory children") +// } +// sort.Strings(children) +// for _, child := range children { +// fmt.Printf("%s\n", child) +// } +func ReadDirnames(osDirname string, scratchBuffer []byte) ([]string, error) { + return readdirnames(osDirname, scratchBuffer) +} diff --git a/cluster-autoscaler/vendor/github.com/karrick/godirwalk/readdir_unix.go b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/readdir_unix.go new file mode 100644 index 000000000000..04a628f7fc16 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/readdir_unix.go @@ -0,0 +1,109 @@ +// +build darwin freebsd linux netbsd openbsd + +package godirwalk + +import ( + "os" + "path/filepath" + "syscall" + "unsafe" + + "github.com/pkg/errors" +) + +func readdirents(osDirname string, scratchBuffer []byte) (Dirents, error) { + dh, err := os.Open(osDirname) + if err != nil { + return nil, errors.Wrap(err, "cannot Open") + } + + var entries Dirents + + fd := int(dh.Fd()) + + if len(scratchBuffer) < MinimumScratchBufferSize { + scratchBuffer = make([]byte, DefaultScratchBufferSize) + } + + var de *syscall.Dirent + + for { + n, err := syscall.ReadDirent(fd, scratchBuffer) + if err != nil { + _ = dh.Close() // ignore potential error returned by Close + return nil, errors.Wrap(err, "cannot ReadDirent") + } + if n <= 0 { + break // end of directory reached + } + // Loop over the bytes returned by reading the directory entries. + buf := scratchBuffer[:n] + for len(buf) > 0 { + de = (*syscall.Dirent)(unsafe.Pointer(&buf[0])) // point entry to first syscall.Dirent in buffer + buf = buf[de.Reclen:] // advance buffer + + if inoFromDirent(de) == 0 { + continue // this item has been deleted, but not yet removed from directory + } + + nameSlice := nameFromDirent(de) + namlen := len(nameSlice) + if (namlen == 0) || (namlen == 1 && nameSlice[0] == '.') || (namlen == 2 && nameSlice[0] == '.' && nameSlice[1] == '.') { + continue // skip unimportant entries + } + osChildname := string(nameSlice) + + // Convert syscall constant, which is in purview of OS, to a + // constant defined by Go, assumed by this project to be stable. + var mode os.FileMode + switch de.Type { + case syscall.DT_REG: + // regular file + case syscall.DT_DIR: + mode = os.ModeDir + case syscall.DT_LNK: + mode = os.ModeSymlink + case syscall.DT_CHR: + mode = os.ModeDevice | os.ModeCharDevice + case syscall.DT_BLK: + mode = os.ModeDevice + case syscall.DT_FIFO: + mode = os.ModeNamedPipe + case syscall.DT_SOCK: + mode = os.ModeSocket + default: + // If syscall returned unknown type (e.g., DT_UNKNOWN, DT_WHT), + // then resolve actual mode by getting stat. + fi, err := os.Lstat(filepath.Join(osDirname, osChildname)) + if err != nil { + _ = dh.Close() // ignore potential error returned by Close + return nil, errors.Wrap(err, "cannot Stat") + } + // We only care about the bits that identify the type of a file + // system node, and can ignore append, exclusive, temporary, + // setuid, setgid, permission bits, and sticky bits, which are + // coincident to the bits that declare type of the file system + // node. + mode = fi.Mode() & os.ModeType + } + + entries = append(entries, &Dirent{name: osChildname, modeType: mode}) + } + } + if err = dh.Close(); err != nil { + return nil, err + } + return entries, nil +} + +func readdirnames(osDirname string, scratchBuffer []byte) ([]string, error) { + des, err := readdirents(osDirname, scratchBuffer) + if err != nil { + return nil, err + } + names := make([]string, len(des)) + for i, v := range des { + names[i] = v.name + } + return names, nil +} diff --git a/cluster-autoscaler/vendor/github.com/karrick/godirwalk/readdir_windows.go b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/readdir_windows.go new file mode 100644 index 000000000000..885a067a4bc5 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/readdir_windows.go @@ -0,0 +1,54 @@ +package godirwalk + +import ( + "os" + + "github.com/pkg/errors" +) + +// The functions in this file are mere wrappers of what is already provided by +// standard library, in order to provide the same API as this library provides. +// +// The scratch buffer argument is ignored by this architecture. +// +// Please send PR or link to article if you know of a more performant way of +// enumerating directory contents and mode types on Windows. + +func readdirents(osDirname string, _ []byte) (Dirents, error) { + dh, err := os.Open(osDirname) + if err != nil { + return nil, errors.Wrap(err, "cannot Open") + } + + fileinfos, err := dh.Readdir(0) + if er := dh.Close(); err == nil { + err = er + } + if err != nil { + return nil, errors.Wrap(err, "cannot Readdir") + } + + entries := make(Dirents, len(fileinfos)) + for i, info := range fileinfos { + entries[i] = &Dirent{name: info.Name(), modeType: info.Mode() & os.ModeType} + } + + return entries, nil +} + +func readdirnames(osDirname string, _ []byte) ([]string, error) { + dh, err := os.Open(osDirname) + if err != nil { + return nil, errors.Wrap(err, "cannot Open") + } + + entries, err := dh.Readdirnames(0) + if er := dh.Close(); err == nil { + err = er + } + if err != nil { + return nil, errors.Wrap(err, "cannot Readdirnames") + } + + return entries, nil +} diff --git a/cluster-autoscaler/vendor/github.com/karrick/godirwalk/walk.go b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/walk.go new file mode 100644 index 000000000000..4c184ab87e6f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/walk.go @@ -0,0 +1,367 @@ +package godirwalk + +import ( + "os" + "path/filepath" + "sort" + + "github.com/pkg/errors" +) + +// DefaultScratchBufferSize specifies the size of the scratch buffer that will +// be allocated by Walk, ReadDirents, or ReadDirnames when a scratch buffer is +// not provided or the scratch buffer that is provided is smaller than +// MinimumScratchBufferSize bytes. This may seem like a large value; however, +// when a program intends to enumerate large directories, having a larger +// scratch buffer results in fewer operating system calls. +const DefaultScratchBufferSize = 64 * 1024 + +// MinimumScratchBufferSize specifies the minimum size of the scratch buffer +// that Walk, ReadDirents, and ReadDirnames will use when reading file entries +// from the operating system. It is initialized to the result from calling +// `os.Getpagesize()` during program startup. +var MinimumScratchBufferSize int + +func init() { + MinimumScratchBufferSize = os.Getpagesize() +} + +// Options provide parameters for how the Walk function operates. +type Options struct { + // ErrorCallback specifies a function to be invoked in the case of an error + // that could potentially be ignored while walking a file system + // hierarchy. When set to nil or left as its zero-value, any error condition + // causes Walk to immediately return the error describing what took + // place. When non-nil, this user supplied function is invoked with the OS + // pathname of the file system object that caused the error along with the + // error that took place. The return value of the supplied ErrorCallback + // function determines whether the error will cause Walk to halt immediately + // as it would were no ErrorCallback value provided, or skip this file + // system node yet continue on with the remaining nodes in the file system + // hierarchy. + // + // ErrorCallback is invoked both for errors that are returned by the + // runtime, and for errors returned by other user supplied callback + // functions. + ErrorCallback func(string, error) ErrorAction + + // FollowSymbolicLinks specifies whether Walk will follow symbolic links + // that refer to directories. When set to false or left as its zero-value, + // Walk will still invoke the callback function with symbolic link nodes, + // but if the symbolic link refers to a directory, it will not recurse on + // that directory. When set to true, Walk will recurse on symbolic links + // that refer to a directory. + FollowSymbolicLinks bool + + // Unsorted controls whether or not Walk will sort the immediate descendants + // of a directory by their relative names prior to visiting each of those + // entries. + // + // When set to false or left at its zero-value, Walk will get the list of + // immediate descendants of a particular directory, sort that list by + // lexical order of their names, and then visit each node in the list in + // sorted order. This will cause Walk to always traverse the same directory + // tree in the same order, however may be inefficient for directories with + // many immediate descendants. + // + // When set to true, Walk skips sorting the list of immediate descendants + // for a directory, and simply visits each node in the order the operating + // system enumerated them. This will be more fast, but with the side effect + // that the traversal order may be different from one invocation to the + // next. + Unsorted bool + + // Callback is a required function that Walk will invoke for every file + // system node it encounters. + Callback WalkFunc + + // PostChildrenCallback is an option function that Walk will invoke for + // every file system directory it encounters after its children have been + // processed. + PostChildrenCallback WalkFunc + + // ScratchBuffer is an optional byte slice to use as a scratch buffer for + // Walk to use when reading directory entries, to reduce amount of garbage + // generation. Not all architectures take advantage of the scratch + // buffer. If omitted or the provided buffer has fewer bytes than + // MinimumScratchBufferSize, then a buffer with DefaultScratchBufferSize + // bytes will be created and used once per Walk invocation. + ScratchBuffer []byte +} + +// ErrorAction defines a set of actions the Walk function could take based on +// the occurrence of an error while walking the file system. See the +// documentation for the ErrorCallback field of the Options structure for more +// information. +type ErrorAction int + +const ( + // Halt is the ErrorAction return value when the upstream code wants to halt + // the walk process when a runtime error takes place. It matches the default + // action the Walk function would take were no ErrorCallback provided. + Halt ErrorAction = iota + + // SkipNode is the ErrorAction return value when the upstream code wants to + // ignore the runtime error for the current file system node, skip + // processing of the node that caused the error, and continue walking the + // file system hierarchy with the remaining nodes. + SkipNode +) + +// WalkFunc is the type of the function called for each file system node visited +// by Walk. The pathname argument will contain the argument to Walk as a prefix; +// that is, if Walk is called with "dir", which is a directory containing the +// file "a", the provided WalkFunc will be invoked with the argument "dir/a", +// using the correct os.PathSeparator for the Go Operating System architecture, +// GOOS. The directory entry argument is a pointer to a Dirent for the node, +// providing access to both the basename and the mode type of the file system +// node. +// +// If an error is returned by the Callback or PostChildrenCallback functions, +// and no ErrorCallback function is provided, processing stops. If an +// ErrorCallback function is provided, then it is invoked with the OS pathname +// of the node that caused the error along along with the error. The return +// value of the ErrorCallback function determines whether to halt processing, or +// skip this node and continue processing remaining file system nodes. +// +// The exception is when the function returns the special value +// filepath.SkipDir. If the function returns filepath.SkipDir when invoked on a +// directory, Walk skips the directory's contents entirely. If the function +// returns filepath.SkipDir when invoked on a non-directory file system node, +// Walk skips the remaining files in the containing directory. Note that any +// supplied ErrorCallback function is not invoked with filepath.SkipDir when the +// Callback or PostChildrenCallback functions return that special value. +type WalkFunc func(osPathname string, directoryEntry *Dirent) error + +// Walk walks the file tree rooted at the specified directory, calling the +// specified callback function for each file system node in the tree, including +// root, symbolic links, and other node types. The nodes are walked in lexical +// order, which makes the output deterministic but means that for very large +// directories this function can be inefficient. +// +// This function is often much faster than filepath.Walk because it does not +// invoke os.Stat for every node it encounters, but rather obtains the file +// system node type when it reads the parent directory. +// +// If a runtime error occurs, either from the operating system or from the +// upstream Callback or PostChildrenCallback functions, processing typically +// halts. However, when an ErrorCallback function is provided in the provided +// Options structure, that function is invoked with the error along with the OS +// pathname of the file system node that caused the error. The ErrorCallback +// function's return value determines the action that Walk will then take. +// +// func main() { +// dirname := "." +// if len(os.Args) > 1 { +// dirname = os.Args[1] +// } +// err := godirwalk.Walk(dirname, &godirwalk.Options{ +// Callback: func(osPathname string, de *godirwalk.Dirent) error { +// fmt.Printf("%s %s\n", de.ModeType(), osPathname) +// return nil +// }, +// ErrorCallback: func(osPathname string, err error) godirwalk.ErrorAction { +// // Your program may want to log the error somehow. +// fmt.Fprintf(os.Stderr, "ERROR: %s\n", err) +// +// // For the purposes of this example, a simple SkipNode will suffice, +// // although in reality perhaps additional logic might be called for. +// return godirwalk.SkipNode +// }, +// }) +// if err != nil { +// fmt.Fprintf(os.Stderr, "%s\n", err) +// os.Exit(1) +// } +// } +func Walk(pathname string, options *Options) error { + pathname = filepath.Clean(pathname) + + var fi os.FileInfo + var err error + + if options.FollowSymbolicLinks { + fi, err = os.Stat(pathname) + if err != nil { + return errors.Wrap(err, "cannot Stat") + } + } else { + fi, err = os.Lstat(pathname) + if err != nil { + return errors.Wrap(err, "cannot Lstat") + } + } + + mode := fi.Mode() + if mode&os.ModeDir == 0 { + return errors.Errorf("cannot Walk non-directory: %s", pathname) + } + + dirent := &Dirent{ + name: filepath.Base(pathname), + modeType: mode & os.ModeType, + } + + // If ErrorCallback is nil, set to a default value that halts the walk + // process on all operating system errors. This is done to allow error + // handling to be more succinct in the walk code. + if options.ErrorCallback == nil { + options.ErrorCallback = defaultErrorCallback + } + + if len(options.ScratchBuffer) < MinimumScratchBufferSize { + options.ScratchBuffer = make([]byte, DefaultScratchBufferSize) + } + + err = walk(pathname, dirent, options) + if err == filepath.SkipDir { + return nil // silence SkipDir for top level + } + return err +} + +// defaultErrorCallback always returns Halt because if the upstream code did not +// provide an ErrorCallback function, walking the file system hierarchy ought to +// halt upon any operating system error. +func defaultErrorCallback(_ string, _ error) ErrorAction { return Halt } + +// walk recursively traverses the file system node specified by pathname and the +// Dirent. +func walk(osPathname string, dirent *Dirent, options *Options) error { + err := options.Callback(osPathname, dirent) + if err != nil { + if err == filepath.SkipDir { + return err + } + err = errors.Wrap(err, "Callback") // wrap potential errors returned by callback + if action := options.ErrorCallback(osPathname, err); action == SkipNode { + return nil + } + return err + } + + // On some platforms, an entry can have more than one mode type bit set. + // For instance, it could have both the symlink bit and the directory bit + // set indicating it's a symlink to a directory. + if dirent.IsSymlink() { + if !options.FollowSymbolicLinks { + return nil + } + // Only need to Stat entry if platform did not already have os.ModeDir + // set, such as would be the case for unix like operating systems. (This + // guard eliminates extra os.Stat check on Windows.) + if !dirent.IsDir() { + referent, err := os.Readlink(osPathname) + if err != nil { + err = errors.Wrap(err, "cannot Readlink") + if action := options.ErrorCallback(osPathname, err); action == SkipNode { + return nil + } + return err + } + + var osp string + if filepath.IsAbs(referent) { + osp = referent + } else { + osp = filepath.Join(filepath.Dir(osPathname), referent) + } + + fi, err := os.Stat(osp) + if err != nil { + err = errors.Wrap(err, "cannot Stat") + if action := options.ErrorCallback(osp, err); action == SkipNode { + return nil + } + return err + } + dirent.modeType = fi.Mode() & os.ModeType + } + } + + if !dirent.IsDir() { + return nil + } + + // If get here, then specified pathname refers to a directory. + deChildren, err := ReadDirents(osPathname, options.ScratchBuffer) + if err != nil { + err = errors.Wrap(err, "cannot ReadDirents") + if action := options.ErrorCallback(osPathname, err); action == SkipNode { + return nil + } + return err + } + + if !options.Unsorted { + sort.Sort(deChildren) // sort children entries unless upstream says to leave unsorted + } + + for _, deChild := range deChildren { + osChildname := filepath.Join(osPathname, deChild.name) + err = walk(osChildname, deChild, options) + if err != nil { + if err != filepath.SkipDir { + return err + } + // If received skipdir on a directory, stop processing that + // directory, but continue to its siblings. If received skipdir on a + // non-directory, stop processing remaining siblings. + if deChild.IsSymlink() { + // Only need to Stat entry if platform did not already have + // os.ModeDir set, such as would be the case for unix like + // operating systems. (This guard eliminates extra os.Stat check + // on Windows.) + if !deChild.IsDir() { + // Resolve symbolic link referent to determine whether node + // is directory or not. + referent, err := os.Readlink(osChildname) + if err != nil { + err = errors.Wrap(err, "cannot Readlink") + if action := options.ErrorCallback(osChildname, err); action == SkipNode { + continue // with next child + } + return err + } + + var osp string + if filepath.IsAbs(referent) { + osp = referent + } else { + osp = filepath.Join(osPathname, referent) + } + + fi, err := os.Stat(osp) + if err != nil { + err = errors.Wrap(err, "cannot Stat") + if action := options.ErrorCallback(osp, err); action == SkipNode { + continue // with next child + } + return err + } + deChild.modeType = fi.Mode() & os.ModeType + } + } + if !deChild.IsDir() { + // If not directory, return immediately, thus skipping remainder + // of siblings. + return nil + } + } + } + + if options.PostChildrenCallback == nil { + return nil + } + + err = options.PostChildrenCallback(osPathname, dirent) + if err == nil || err == filepath.SkipDir { + return err + } + + err = errors.Wrap(err, "PostChildrenCallback") // wrap potential errors returned by callback + if action := options.ErrorCallback(osPathname, err); action == SkipNode { + return nil + } + return err +} diff --git a/cluster-autoscaler/vendor/github.com/karrick/godirwalk/withFileno.go b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/withFileno.go new file mode 100644 index 000000000000..1dc04a717a72 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/withFileno.go @@ -0,0 +1,9 @@ +// +build dragonfly freebsd openbsd netbsd + +package godirwalk + +import "syscall" + +func inoFromDirent(de *syscall.Dirent) uint64 { + return uint64(de.Fileno) +} diff --git a/cluster-autoscaler/vendor/github.com/karrick/godirwalk/withIno.go b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/withIno.go new file mode 100644 index 000000000000..47fc12540f36 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/withIno.go @@ -0,0 +1,9 @@ +// +build darwin linux + +package godirwalk + +import "syscall" + +func inoFromDirent(de *syscall.Dirent) uint64 { + return de.Ino +} diff --git a/cluster-autoscaler/vendor/github.com/karrick/godirwalk/withNamlen.go b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/withNamlen.go new file mode 100644 index 000000000000..46a4af50045a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/withNamlen.go @@ -0,0 +1,29 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package godirwalk + +import ( + "reflect" + "syscall" + "unsafe" +) + +func nameFromDirent(de *syscall.Dirent) []byte { + // Because this GOOS' syscall.Dirent provides a Namlen field that says how + // long the name is, this function does not need to search for the NULL + // byte. + ml := int(de.Namlen) + + // Convert syscall.Dirent.Name, which is array of int8, to []byte, by + // overwriting Cap, Len, and Data slice header fields to values from + // syscall.Dirent fields. Setting the Cap, Len, and Data field values for + // the slice header modifies what the slice header points to, and in this + // case, the name buffer. + var name []byte + sh := (*reflect.SliceHeader)(unsafe.Pointer(&name)) + sh.Cap = ml + sh.Len = ml + sh.Data = uintptr(unsafe.Pointer(&de.Name[0])) + + return name +} diff --git a/cluster-autoscaler/vendor/github.com/karrick/godirwalk/withoutNamlen.go b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/withoutNamlen.go new file mode 100644 index 000000000000..dcf9f3a9722e --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/karrick/godirwalk/withoutNamlen.go @@ -0,0 +1,36 @@ +// +build nacl linux solaris + +package godirwalk + +import ( + "bytes" + "reflect" + "syscall" + "unsafe" +) + +func nameFromDirent(de *syscall.Dirent) []byte { + // Because this GOOS' syscall.Dirent does not provide a field that specifies + // the name length, this function must first calculate the max possible name + // length, and then search for the NULL byte. + ml := int(uint64(de.Reclen) - uint64(unsafe.Offsetof(syscall.Dirent{}.Name))) + + // Convert syscall.Dirent.Name, which is array of int8, to []byte, by + // overwriting Cap, Len, and Data slice header fields to values from + // syscall.Dirent fields. Setting the Cap, Len, and Data field values for + // the slice header modifies what the slice header points to, and in this + // case, the name buffer. + var name []byte + sh := (*reflect.SliceHeader)(unsafe.Pointer(&name)) + sh.Cap = ml + sh.Len = ml + sh.Data = uintptr(unsafe.Pointer(&de.Name[0])) + + if index := bytes.IndexByte(name, 0); index >= 0 { + // Found NULL byte; set slice's cap and len accordingly. + sh.Cap = index + sh.Len = index + } + + return name +} diff --git a/cluster-autoscaler/vendor/github.com/lpabon/godbc/.gitignore b/cluster-autoscaler/vendor/github.com/lpabon/godbc/.gitignore deleted file mode 100644 index 836562412fe8..000000000000 --- a/cluster-autoscaler/vendor/github.com/lpabon/godbc/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test diff --git a/cluster-autoscaler/vendor/github.com/lpabon/godbc/.travis.yml b/cluster-autoscaler/vendor/github.com/lpabon/godbc/.travis.yml deleted file mode 100644 index 43b7cb86537c..000000000000 --- a/cluster-autoscaler/vendor/github.com/lpabon/godbc/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.1 - - 1.2 - - tip - -install: - - go get github.com/stretchr/testify - -script: - - go test - - go test -tags 'prod' diff --git a/cluster-autoscaler/vendor/github.com/lpabon/godbc/AUTHORS b/cluster-autoscaler/vendor/github.com/lpabon/godbc/AUTHORS deleted file mode 100644 index d4ddde5dd014..000000000000 --- a/cluster-autoscaler/vendor/github.com/lpabon/godbc/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -lpabon@redhat.com \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/lpabon/godbc/README.md b/cluster-autoscaler/vendor/github.com/lpabon/godbc/README.md deleted file mode 100644 index 2df53d423730..000000000000 --- a/cluster-autoscaler/vendor/github.com/lpabon/godbc/README.md +++ /dev/null @@ -1,21 +0,0 @@ -[![Build Status](https://travis-ci.org/lpabon/godbc.svg?branch=master)](https://travis-ci.org/lpabon/godbc) - -# godbc - -Design by contract for Go - -# Installation - -To install godbc, use `go get`: - - go get github.com/lpabon/godbc - -Import the `godbc` package into your code using this template: - - import ( - "github.com/lpabon/godbc" - ) - -# Documentation - -Documentation is available at https://godoc.org/github.com/lpabon/godbc diff --git a/cluster-autoscaler/vendor/github.com/lpabon/godbc/godbc.go b/cluster-autoscaler/vendor/github.com/lpabon/godbc/godbc.go deleted file mode 100644 index 7c44a9154eac..000000000000 --- a/cluster-autoscaler/vendor/github.com/lpabon/godbc/godbc.go +++ /dev/null @@ -1,146 +0,0 @@ -//+build !prod - -// -// Copyright (c) 2014 The godbc Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Design-by-Contract for Go -// -// Design by Contract is a programming methodology -// which binds the caller and the function called to a -// contract. The contract is represented using Hoare Triple: -// {P} C {Q} -// where {P} is the precondition before executing command C, -// and {Q} is the postcondition. -// -// See Also -// -// * http://en.wikipedia.org/wiki/Design_by_contract -// * http://en.wikipedia.org/wiki/Hoare_logic -// * http://dlang.org/dbc.html -// -// Usage -// -// Godbc is enabled by default, but can be disabled for production -// builds by using the tag 'prod' in builds and tests as follows: -// go build -tags 'prod' -// or -// go test -tags 'prod' -// -package godbc - -import ( - "errors" - "fmt" - "runtime" -) - -// InvariantSimpleTester is an interface which provides a receiver to -// test the object -type InvariantSimpleTester interface { - Invariant() bool -} - -// InvariantTester is an interface which provides not only an Invariant(), -// but also a receiver to print the structure -type InvariantTester interface { - InvariantSimpleTester - String() string -} - -// dbc_panic prints to the screen information of the failure followed -// by a call to panic() -func dbc_panic(dbc_func_name string, b bool, message ...interface{}) { - if !b { - - // Get caller information which is the caller - // of the caller of this function - pc, file, line, _ := runtime.Caller(2) - caller_func_info := runtime.FuncForPC(pc) - - error_string := fmt.Sprintf("%s:\n\r\tfunc (%s) 0x%x\n\r\tFile %s:%d", - dbc_func_name, - caller_func_info.Name(), - pc, - file, - line) - - if len(message) > 0 { - error_string += fmt.Sprintf("\n\r\tInfo: %+v", message) - } - err := errors.New(error_string) - - // Finally panic - panic(err) - } -} - -// Require checks that the preconditions are satisfied before -// executing the function -// -// Example Code -// -// func Divide(a, b int) int { -// godbc.Require(b != 0) -// return a/b -// } -// -func Require(b bool, message ...interface{}) { - dbc_panic("REQUIRE", b, message...) -} - -// Ensure checks the postconditions are satisfied before returning -// to the caller. -// -// Example Code -// -// type Data struct { -// a int -// } -// -// func (*d Data) Set(a int) { -// d.a = a -// godbc.Ensure(d.a == a) -// } -// -func Ensure(b bool, message ...interface{}) { - dbc_panic("ENSURE", b, message...) -} - -// Check provides a simple assert -func Check(b bool, message ...interface{}) { - dbc_panic("CHECK", b, message...) -} - -// InvariantSimple calls the objects Invariant() receiver to test -// the object for correctness. -// -// The caller object must provide an object that supports the -// interface InvariantSimpleTester and does not need to provide -// a String() receiver -func InvariantSimple(obj InvariantSimpleTester, message ...interface{}) { - dbc_panic("INVARIANT", obj.Invariant(), message...) -} - -// Invariant calls the objects Invariant() receiver to test -// the object for correctness. -// -// The caller object must provide an object that supports the -// interface InvariantTester -// -// To see an example, please take a look at the godbc_test.go -func Invariant(obj InvariantTester, message ...interface{}) { - m := append(message, obj) - dbc_panic("INVARIANT", obj.Invariant(), m) -} diff --git a/cluster-autoscaler/vendor/github.com/lpabon/godbc/godbc_prod.go b/cluster-autoscaler/vendor/github.com/lpabon/godbc/godbc_prod.go deleted file mode 100644 index 1cb2fffb8073..000000000000 --- a/cluster-autoscaler/vendor/github.com/lpabon/godbc/godbc_prod.go +++ /dev/null @@ -1,42 +0,0 @@ -//+build prod - -// -// Copyright (c) 2014 The godbc Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package godbc - -type InvariantSimpleTester interface { - Invariant() bool -} - -type InvariantTester interface { - InvariantSimpleTester - String() string -} - -func Require(b bool, message ...interface{}) { -} - -func Ensure(b bool, message ...interface{}) { -} - -func Check(b bool, message ...interface{}) { -} - -func InvariantSimple(obj InvariantSimpleTester, message ...interface{}) { -} - -func Invariant(obj InvariantTester, message ...interface{}) { -} diff --git a/cluster-autoscaler/vendor/github.com/lpabon/godbc/LICENSE b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/LICENSE similarity index 100% rename from cluster-autoscaler/vendor/github.com/lpabon/godbc/LICENSE rename to cluster-autoscaler/vendor/github.com/mesos/mesos-go/LICENSE diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/NOTICE b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/NOTICE new file mode 100644 index 000000000000..491bbe14560f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/NOTICE @@ -0,0 +1,13 @@ +Copyright 2013-2015, Mesosphere, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/agent.pb.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/agent.pb.go new file mode 100644 index 000000000000..cc6a79f99241 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/agent.pb.go @@ -0,0 +1,17324 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: agent/agent.proto + +/* + Package agent is a generated protocol buffer package. + + It is generated from these files: + agent/agent.proto + + It has these top-level messages: + Call + Response + ProcessIO +*/ +package agent + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import mesos "github.com/mesos/mesos-go/api/v1/lib" +import _ "github.com/gogo/protobuf/gogoproto" + +import strconv "strconv" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" + +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// If a call of type `Call::FOO` requires additional parameters they can be +// included in the corresponding `Call::Foo` message. Similarly, if a call +// receives a synchronous response it will be returned as a `Response` +// message of type `Response::FOO`; see `Call::LaunchNestedContainerSession` +// and `Call::AttachContainerOutput` for exceptions. +type Call_Type int32 + +const ( + Call_UNKNOWN Call_Type = 0 + Call_GET_HEALTH Call_Type = 1 + Call_GET_FLAGS Call_Type = 2 + Call_GET_VERSION Call_Type = 3 + Call_GET_METRICS Call_Type = 4 + Call_GET_LOGGING_LEVEL Call_Type = 5 + Call_SET_LOGGING_LEVEL Call_Type = 6 + Call_LIST_FILES Call_Type = 7 + Call_READ_FILE Call_Type = 8 + Call_GET_STATE Call_Type = 9 + Call_GET_CONTAINERS Call_Type = 10 + // Retrieves the information about known frameworks. + Call_GET_FRAMEWORKS Call_Type = 11 + // Retrieves the information about known executors. + Call_GET_EXECUTORS Call_Type = 12 + // Retrieves the information about known operations. + Call_GET_OPERATIONS Call_Type = 31 + // Retrieves the information about known tasks. + Call_GET_TASKS Call_Type = 13 + // Retrieves the agent information. + Call_GET_AGENT Call_Type = 20 + // Retrieves the information about known resource providers. + Call_GET_RESOURCE_PROVIDERS Call_Type = 26 + // Calls for managing nested containers underneath an executor's container. + // Some of these calls are deprecated in favor of the calls + // for both standalone or nested containers further below. + Call_LAUNCH_NESTED_CONTAINER Call_Type = 14 + Call_WAIT_NESTED_CONTAINER Call_Type = 15 + Call_KILL_NESTED_CONTAINER Call_Type = 16 + Call_REMOVE_NESTED_CONTAINER Call_Type = 21 + // See 'LaunchNestedContainerSession' below. + Call_LAUNCH_NESTED_CONTAINER_SESSION Call_Type = 17 + Call_ATTACH_CONTAINER_INPUT Call_Type = 18 + Call_ATTACH_CONTAINER_OUTPUT Call_Type = 19 + // Calls for managing standalone containers + // or containers nested underneath another container. + Call_LAUNCH_CONTAINER Call_Type = 22 + Call_WAIT_CONTAINER Call_Type = 23 + Call_KILL_CONTAINER Call_Type = 24 + Call_REMOVE_CONTAINER Call_Type = 25 + Call_ADD_RESOURCE_PROVIDER_CONFIG Call_Type = 27 + Call_UPDATE_RESOURCE_PROVIDER_CONFIG Call_Type = 28 + Call_REMOVE_RESOURCE_PROVIDER_CONFIG Call_Type = 29 + // Prune unused container images. + Call_PRUNE_IMAGES Call_Type = 30 +) + +var Call_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "GET_HEALTH", + 2: "GET_FLAGS", + 3: "GET_VERSION", + 4: "GET_METRICS", + 5: "GET_LOGGING_LEVEL", + 6: "SET_LOGGING_LEVEL", + 7: "LIST_FILES", + 8: "READ_FILE", + 9: "GET_STATE", + 10: "GET_CONTAINERS", + 11: "GET_FRAMEWORKS", + 12: "GET_EXECUTORS", + 31: "GET_OPERATIONS", + 13: "GET_TASKS", + 20: "GET_AGENT", + 26: "GET_RESOURCE_PROVIDERS", + 14: "LAUNCH_NESTED_CONTAINER", + 15: "WAIT_NESTED_CONTAINER", + 16: "KILL_NESTED_CONTAINER", + 21: "REMOVE_NESTED_CONTAINER", + 17: "LAUNCH_NESTED_CONTAINER_SESSION", + 18: "ATTACH_CONTAINER_INPUT", + 19: "ATTACH_CONTAINER_OUTPUT", + 22: "LAUNCH_CONTAINER", + 23: "WAIT_CONTAINER", + 24: "KILL_CONTAINER", + 25: "REMOVE_CONTAINER", + 27: "ADD_RESOURCE_PROVIDER_CONFIG", + 28: "UPDATE_RESOURCE_PROVIDER_CONFIG", + 29: "REMOVE_RESOURCE_PROVIDER_CONFIG", + 30: "PRUNE_IMAGES", +} +var Call_Type_value = map[string]int32{ + "UNKNOWN": 0, + "GET_HEALTH": 1, + "GET_FLAGS": 2, + "GET_VERSION": 3, + "GET_METRICS": 4, + "GET_LOGGING_LEVEL": 5, + "SET_LOGGING_LEVEL": 6, + "LIST_FILES": 7, + "READ_FILE": 8, + "GET_STATE": 9, + "GET_CONTAINERS": 10, + "GET_FRAMEWORKS": 11, + "GET_EXECUTORS": 12, + "GET_OPERATIONS": 31, + "GET_TASKS": 13, + "GET_AGENT": 20, + "GET_RESOURCE_PROVIDERS": 26, + "LAUNCH_NESTED_CONTAINER": 14, + "WAIT_NESTED_CONTAINER": 15, + "KILL_NESTED_CONTAINER": 16, + "REMOVE_NESTED_CONTAINER": 21, + "LAUNCH_NESTED_CONTAINER_SESSION": 17, + "ATTACH_CONTAINER_INPUT": 18, + "ATTACH_CONTAINER_OUTPUT": 19, + "LAUNCH_CONTAINER": 22, + "WAIT_CONTAINER": 23, + "KILL_CONTAINER": 24, + "REMOVE_CONTAINER": 25, + "ADD_RESOURCE_PROVIDER_CONFIG": 27, + "UPDATE_RESOURCE_PROVIDER_CONFIG": 28, + "REMOVE_RESOURCE_PROVIDER_CONFIG": 29, + "PRUNE_IMAGES": 30, +} + +func (x Call_Type) Enum() *Call_Type { + p := new(Call_Type) + *p = x + return p +} +func (x Call_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(Call_Type_name, int32(x)) +} +func (x *Call_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Call_Type_value, data, "Call_Type") + if err != nil { + return err + } + *x = Call_Type(value) + return nil +} +func (Call_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0, 0} } + +type Call_AttachContainerInput_Type int32 + +const ( + Call_AttachContainerInput_UNKNOWN Call_AttachContainerInput_Type = 0 + Call_AttachContainerInput_CONTAINER_ID Call_AttachContainerInput_Type = 1 + Call_AttachContainerInput_PROCESS_IO Call_AttachContainerInput_Type = 2 +) + +var Call_AttachContainerInput_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CONTAINER_ID", + 2: "PROCESS_IO", +} +var Call_AttachContainerInput_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CONTAINER_ID": 1, + "PROCESS_IO": 2, +} + +func (x Call_AttachContainerInput_Type) Enum() *Call_AttachContainerInput_Type { + p := new(Call_AttachContainerInput_Type) + *p = x + return p +} +func (x Call_AttachContainerInput_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(Call_AttachContainerInput_Type_name, int32(x)) +} +func (x *Call_AttachContainerInput_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Call_AttachContainerInput_Type_value, data, "Call_AttachContainerInput_Type") + if err != nil { + return err + } + *x = Call_AttachContainerInput_Type(value) + return nil +} +func (Call_AttachContainerInput_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{0, 10, 0} +} + +// Each of the responses of type `FOO` corresponds to `Foo` message below. +type Response_Type int32 + +const ( + Response_UNKNOWN Response_Type = 0 + Response_GET_HEALTH Response_Type = 1 + Response_GET_FLAGS Response_Type = 2 + Response_GET_VERSION Response_Type = 3 + Response_GET_METRICS Response_Type = 4 + Response_GET_LOGGING_LEVEL Response_Type = 5 + Response_LIST_FILES Response_Type = 6 + Response_READ_FILE Response_Type = 7 + Response_GET_STATE Response_Type = 8 + Response_GET_CONTAINERS Response_Type = 9 + Response_GET_FRAMEWORKS Response_Type = 10 + Response_GET_EXECUTORS Response_Type = 11 + Response_GET_OPERATIONS Response_Type = 17 + Response_GET_TASKS Response_Type = 12 + Response_GET_AGENT Response_Type = 14 + Response_GET_RESOURCE_PROVIDERS Response_Type = 16 + Response_WAIT_NESTED_CONTAINER Response_Type = 13 + Response_WAIT_CONTAINER Response_Type = 15 +) + +var Response_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "GET_HEALTH", + 2: "GET_FLAGS", + 3: "GET_VERSION", + 4: "GET_METRICS", + 5: "GET_LOGGING_LEVEL", + 6: "LIST_FILES", + 7: "READ_FILE", + 8: "GET_STATE", + 9: "GET_CONTAINERS", + 10: "GET_FRAMEWORKS", + 11: "GET_EXECUTORS", + 17: "GET_OPERATIONS", + 12: "GET_TASKS", + 14: "GET_AGENT", + 16: "GET_RESOURCE_PROVIDERS", + 13: "WAIT_NESTED_CONTAINER", + 15: "WAIT_CONTAINER", +} +var Response_Type_value = map[string]int32{ + "UNKNOWN": 0, + "GET_HEALTH": 1, + "GET_FLAGS": 2, + "GET_VERSION": 3, + "GET_METRICS": 4, + "GET_LOGGING_LEVEL": 5, + "LIST_FILES": 6, + "READ_FILE": 7, + "GET_STATE": 8, + "GET_CONTAINERS": 9, + "GET_FRAMEWORKS": 10, + "GET_EXECUTORS": 11, + "GET_OPERATIONS": 17, + "GET_TASKS": 12, + "GET_AGENT": 14, + "GET_RESOURCE_PROVIDERS": 16, + "WAIT_NESTED_CONTAINER": 13, + "WAIT_CONTAINER": 15, +} + +func (x Response_Type) Enum() *Response_Type { + p := new(Response_Type) + *p = x + return p +} +func (x Response_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(Response_Type_name, int32(x)) +} +func (x *Response_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Response_Type_value, data, "Response_Type") + if err != nil { + return err + } + *x = Response_Type(value) + return nil +} +func (Response_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 0} } + +type ProcessIO_Type int32 + +const ( + ProcessIO_UNKNOWN ProcessIO_Type = 0 + ProcessIO_DATA ProcessIO_Type = 1 + ProcessIO_CONTROL ProcessIO_Type = 2 +) + +var ProcessIO_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "DATA", + 2: "CONTROL", +} +var ProcessIO_Type_value = map[string]int32{ + "UNKNOWN": 0, + "DATA": 1, + "CONTROL": 2, +} + +func (x ProcessIO_Type) Enum() *ProcessIO_Type { + p := new(ProcessIO_Type) + *p = x + return p +} +func (x ProcessIO_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(ProcessIO_Type_name, int32(x)) +} +func (x *ProcessIO_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ProcessIO_Type_value, data, "ProcessIO_Type") + if err != nil { + return err + } + *x = ProcessIO_Type(value) + return nil +} +func (ProcessIO_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAgent, []int{2, 0} } + +type ProcessIO_Data_Type int32 + +const ( + ProcessIO_Data_UNKNOWN ProcessIO_Data_Type = 0 + ProcessIO_Data_STDIN ProcessIO_Data_Type = 1 + ProcessIO_Data_STDOUT ProcessIO_Data_Type = 2 + ProcessIO_Data_STDERR ProcessIO_Data_Type = 3 +) + +var ProcessIO_Data_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STDIN", + 2: "STDOUT", + 3: "STDERR", +} +var ProcessIO_Data_Type_value = map[string]int32{ + "UNKNOWN": 0, + "STDIN": 1, + "STDOUT": 2, + "STDERR": 3, +} + +func (x ProcessIO_Data_Type) Enum() *ProcessIO_Data_Type { + p := new(ProcessIO_Data_Type) + *p = x + return p +} +func (x ProcessIO_Data_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(ProcessIO_Data_Type_name, int32(x)) +} +func (x *ProcessIO_Data_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ProcessIO_Data_Type_value, data, "ProcessIO_Data_Type") + if err != nil { + return err + } + *x = ProcessIO_Data_Type(value) + return nil +} +func (ProcessIO_Data_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{2, 0, 0} +} + +type ProcessIO_Control_Type int32 + +const ( + ProcessIO_Control_UNKNOWN ProcessIO_Control_Type = 0 + ProcessIO_Control_TTY_INFO ProcessIO_Control_Type = 1 + ProcessIO_Control_HEARTBEAT ProcessIO_Control_Type = 2 +) + +var ProcessIO_Control_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "TTY_INFO", + 2: "HEARTBEAT", +} +var ProcessIO_Control_Type_value = map[string]int32{ + "UNKNOWN": 0, + "TTY_INFO": 1, + "HEARTBEAT": 2, +} + +func (x ProcessIO_Control_Type) Enum() *ProcessIO_Control_Type { + p := new(ProcessIO_Control_Type) + *p = x + return p +} +func (x ProcessIO_Control_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(ProcessIO_Control_Type_name, int32(x)) +} +func (x *ProcessIO_Control_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ProcessIO_Control_Type_value, data, "ProcessIO_Control_Type") + if err != nil { + return err + } + *x = ProcessIO_Control_Type(value) + return nil +} +func (ProcessIO_Control_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{2, 1, 0} +} + +// * +// Calls that can be sent to the v1 agent API. +// +// A call is described using the standard protocol buffer "union" +// trick, see +// https://developers.google.com/protocol-buffers/docs/techniques#union. +type Call struct { + Type Call_Type `protobuf:"varint,1,opt,name=type,enum=mesos.agent.Call_Type" json:"type"` + GetMetrics *Call_GetMetrics `protobuf:"bytes,2,opt,name=get_metrics,json=getMetrics" json:"get_metrics,omitempty"` + SetLoggingLevel *Call_SetLoggingLevel `protobuf:"bytes,3,opt,name=set_logging_level,json=setLoggingLevel" json:"set_logging_level,omitempty"` + ListFiles *Call_ListFiles `protobuf:"bytes,4,opt,name=list_files,json=listFiles" json:"list_files,omitempty"` + ReadFile *Call_ReadFile `protobuf:"bytes,5,opt,name=read_file,json=readFile" json:"read_file,omitempty"` + GetContainers *Call_GetContainers `protobuf:"bytes,20,opt,name=get_containers,json=getContainers" json:"get_containers,omitempty"` + LaunchNestedContainer *Call_LaunchNestedContainer `protobuf:"bytes,6,opt,name=launch_nested_container,json=launchNestedContainer" json:"launch_nested_container,omitempty"` + WaitNestedContainer *Call_WaitNestedContainer `protobuf:"bytes,7,opt,name=wait_nested_container,json=waitNestedContainer" json:"wait_nested_container,omitempty"` + KillNestedContainer *Call_KillNestedContainer `protobuf:"bytes,8,opt,name=kill_nested_container,json=killNestedContainer" json:"kill_nested_container,omitempty"` + RemoveNestedContainer *Call_RemoveNestedContainer `protobuf:"bytes,12,opt,name=remove_nested_container,json=removeNestedContainer" json:"remove_nested_container,omitempty"` + LaunchNestedContainerSession *Call_LaunchNestedContainerSession `protobuf:"bytes,9,opt,name=launch_nested_container_session,json=launchNestedContainerSession" json:"launch_nested_container_session,omitempty"` + AttachContainerInput *Call_AttachContainerInput `protobuf:"bytes,10,opt,name=attach_container_input,json=attachContainerInput" json:"attach_container_input,omitempty"` + AttachContainerOutput *Call_AttachContainerOutput `protobuf:"bytes,11,opt,name=attach_container_output,json=attachContainerOutput" json:"attach_container_output,omitempty"` + LaunchContainer *Call_LaunchContainer `protobuf:"bytes,13,opt,name=launch_container,json=launchContainer" json:"launch_container,omitempty"` + WaitContainer *Call_WaitContainer `protobuf:"bytes,14,opt,name=wait_container,json=waitContainer" json:"wait_container,omitempty"` + KillContainer *Call_KillContainer `protobuf:"bytes,15,opt,name=kill_container,json=killContainer" json:"kill_container,omitempty"` + RemoveContainer *Call_RemoveContainer `protobuf:"bytes,16,opt,name=remove_container,json=removeContainer" json:"remove_container,omitempty"` + AddResourceProviderConfig *Call_AddResourceProviderConfig `protobuf:"bytes,17,opt,name=add_resource_provider_config,json=addResourceProviderConfig" json:"add_resource_provider_config,omitempty"` + UpdateResourceProviderConfig *Call_UpdateResourceProviderConfig `protobuf:"bytes,18,opt,name=update_resource_provider_config,json=updateResourceProviderConfig" json:"update_resource_provider_config,omitempty"` + RemoveResourceProviderConfig *Call_RemoveResourceProviderConfig `protobuf:"bytes,19,opt,name=remove_resource_provider_config,json=removeResourceProviderConfig" json:"remove_resource_provider_config,omitempty"` + PruneImages *Call_PruneImages `protobuf:"bytes,21,opt,name=prune_images,json=pruneImages" json:"prune_images,omitempty"` +} + +func (m *Call) Reset() { *m = Call{} } +func (*Call) ProtoMessage() {} +func (*Call) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0} } + +func (m *Call) GetType() Call_Type { + if m != nil { + return m.Type + } + return Call_UNKNOWN +} + +func (m *Call) GetGetMetrics() *Call_GetMetrics { + if m != nil { + return m.GetMetrics + } + return nil +} + +func (m *Call) GetSetLoggingLevel() *Call_SetLoggingLevel { + if m != nil { + return m.SetLoggingLevel + } + return nil +} + +func (m *Call) GetListFiles() *Call_ListFiles { + if m != nil { + return m.ListFiles + } + return nil +} + +func (m *Call) GetReadFile() *Call_ReadFile { + if m != nil { + return m.ReadFile + } + return nil +} + +func (m *Call) GetGetContainers() *Call_GetContainers { + if m != nil { + return m.GetContainers + } + return nil +} + +func (m *Call) GetLaunchNestedContainer() *Call_LaunchNestedContainer { + if m != nil { + return m.LaunchNestedContainer + } + return nil +} + +func (m *Call) GetWaitNestedContainer() *Call_WaitNestedContainer { + if m != nil { + return m.WaitNestedContainer + } + return nil +} + +func (m *Call) GetKillNestedContainer() *Call_KillNestedContainer { + if m != nil { + return m.KillNestedContainer + } + return nil +} + +func (m *Call) GetRemoveNestedContainer() *Call_RemoveNestedContainer { + if m != nil { + return m.RemoveNestedContainer + } + return nil +} + +func (m *Call) GetLaunchNestedContainerSession() *Call_LaunchNestedContainerSession { + if m != nil { + return m.LaunchNestedContainerSession + } + return nil +} + +func (m *Call) GetAttachContainerInput() *Call_AttachContainerInput { + if m != nil { + return m.AttachContainerInput + } + return nil +} + +func (m *Call) GetAttachContainerOutput() *Call_AttachContainerOutput { + if m != nil { + return m.AttachContainerOutput + } + return nil +} + +func (m *Call) GetLaunchContainer() *Call_LaunchContainer { + if m != nil { + return m.LaunchContainer + } + return nil +} + +func (m *Call) GetWaitContainer() *Call_WaitContainer { + if m != nil { + return m.WaitContainer + } + return nil +} + +func (m *Call) GetKillContainer() *Call_KillContainer { + if m != nil { + return m.KillContainer + } + return nil +} + +func (m *Call) GetRemoveContainer() *Call_RemoveContainer { + if m != nil { + return m.RemoveContainer + } + return nil +} + +func (m *Call) GetAddResourceProviderConfig() *Call_AddResourceProviderConfig { + if m != nil { + return m.AddResourceProviderConfig + } + return nil +} + +func (m *Call) GetUpdateResourceProviderConfig() *Call_UpdateResourceProviderConfig { + if m != nil { + return m.UpdateResourceProviderConfig + } + return nil +} + +func (m *Call) GetRemoveResourceProviderConfig() *Call_RemoveResourceProviderConfig { + if m != nil { + return m.RemoveResourceProviderConfig + } + return nil +} + +func (m *Call) GetPruneImages() *Call_PruneImages { + if m != nil { + return m.PruneImages + } + return nil +} + +// Provides a snapshot of the current metrics tracked by the agent. +type Call_GetMetrics struct { + // If set, `timeout` would be used to determines the maximum amount of time + // the API will take to respond. If the timeout is exceeded, some metrics + // may not be included in the response. + Timeout *mesos.DurationInfo `protobuf:"bytes,1,opt,name=timeout" json:"timeout,omitempty"` +} + +func (m *Call_GetMetrics) Reset() { *m = Call_GetMetrics{} } +func (*Call_GetMetrics) ProtoMessage() {} +func (*Call_GetMetrics) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0, 0} } + +func (m *Call_GetMetrics) GetTimeout() *mesos.DurationInfo { + if m != nil { + return m.Timeout + } + return nil +} + +// Sets the logging verbosity level for a specified duration. Mesos uses +// [glog](https://github.com/google/glog) for logging. The library only uses +// verbose logging which means nothing will be output unless the verbosity +// level is set (by default it's 0, libprocess uses levels 1, 2, and 3). +type Call_SetLoggingLevel struct { + // The verbosity level. + Level uint32 `protobuf:"varint,1,req,name=level" json:"level"` + // The duration to keep verbosity level toggled. After this duration, the + // verbosity level of log would revert to the original level. + Duration mesos.DurationInfo `protobuf:"bytes,2,req,name=duration" json:"duration"` +} + +func (m *Call_SetLoggingLevel) Reset() { *m = Call_SetLoggingLevel{} } +func (*Call_SetLoggingLevel) ProtoMessage() {} +func (*Call_SetLoggingLevel) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0, 1} } + +func (m *Call_SetLoggingLevel) GetLevel() uint32 { + if m != nil { + return m.Level + } + return 0 +} + +func (m *Call_SetLoggingLevel) GetDuration() mesos.DurationInfo { + if m != nil { + return m.Duration + } + return mesos.DurationInfo{} +} + +// Provides the file listing for a directory. +type Call_ListFiles struct { + Path string `protobuf:"bytes,1,req,name=path" json:"path"` +} + +func (m *Call_ListFiles) Reset() { *m = Call_ListFiles{} } +func (*Call_ListFiles) ProtoMessage() {} +func (*Call_ListFiles) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0, 2} } + +func (m *Call_ListFiles) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +// Reads data from a file. +type Call_ReadFile struct { + // The path of file. + Path string `protobuf:"bytes,1,req,name=path" json:"path"` + // Initial offset in file to start reading from. + Offset uint64 `protobuf:"varint,2,req,name=offset" json:"offset"` + // The maximum number of bytes to read. The read length is capped at 16 + // memory pages. + Length *uint64 `protobuf:"varint,3,opt,name=length" json:"length,omitempty"` +} + +func (m *Call_ReadFile) Reset() { *m = Call_ReadFile{} } +func (*Call_ReadFile) ProtoMessage() {} +func (*Call_ReadFile) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0, 3} } + +func (m *Call_ReadFile) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *Call_ReadFile) GetOffset() uint64 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *Call_ReadFile) GetLength() uint64 { + if m != nil && m.Length != nil { + return *m.Length + } + return 0 +} + +// Lists active containers on the agent. +type Call_GetContainers struct { + ShowNested *bool `protobuf:"varint,1,opt,name=show_nested,json=showNested" json:"show_nested,omitempty"` + ShowStandalone *bool `protobuf:"varint,2,opt,name=show_standalone,json=showStandalone" json:"show_standalone,omitempty"` +} + +func (m *Call_GetContainers) Reset() { *m = Call_GetContainers{} } +func (*Call_GetContainers) ProtoMessage() {} +func (*Call_GetContainers) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0, 4} } + +func (m *Call_GetContainers) GetShowNested() bool { + if m != nil && m.ShowNested != nil { + return *m.ShowNested + } + return false +} + +func (m *Call_GetContainers) GetShowStandalone() bool { + if m != nil && m.ShowStandalone != nil { + return *m.ShowStandalone + } + return false +} + +// Deprecated in favor of `LaunchContainer`. +type Call_LaunchNestedContainer struct { + ContainerID mesos.ContainerID `protobuf:"bytes,1,req,name=container_id,json=containerId" json:"container_id"` + Command *mesos.CommandInfo `protobuf:"bytes,2,opt,name=command" json:"command,omitempty"` + Container *mesos.ContainerInfo `protobuf:"bytes,3,opt,name=container" json:"container,omitempty"` +} + +func (m *Call_LaunchNestedContainer) Reset() { *m = Call_LaunchNestedContainer{} } +func (*Call_LaunchNestedContainer) ProtoMessage() {} +func (*Call_LaunchNestedContainer) Descriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{0, 5} +} + +func (m *Call_LaunchNestedContainer) GetContainerID() mesos.ContainerID { + if m != nil { + return m.ContainerID + } + return mesos.ContainerID{} +} + +func (m *Call_LaunchNestedContainer) GetCommand() *mesos.CommandInfo { + if m != nil { + return m.Command + } + return nil +} + +func (m *Call_LaunchNestedContainer) GetContainer() *mesos.ContainerInfo { + if m != nil { + return m.Container + } + return nil +} + +// Deprecated in favor of `WaitContainer`. +type Call_WaitNestedContainer struct { + ContainerID mesos.ContainerID `protobuf:"bytes,1,req,name=container_id,json=containerId" json:"container_id"` +} + +func (m *Call_WaitNestedContainer) Reset() { *m = Call_WaitNestedContainer{} } +func (*Call_WaitNestedContainer) ProtoMessage() {} +func (*Call_WaitNestedContainer) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0, 6} } + +func (m *Call_WaitNestedContainer) GetContainerID() mesos.ContainerID { + if m != nil { + return m.ContainerID + } + return mesos.ContainerID{} +} + +// Deprecated in favor of `KillContainer`. +type Call_KillNestedContainer struct { + ContainerID mesos.ContainerID `protobuf:"bytes,1,req,name=container_id,json=containerId" json:"container_id"` + Signal *int32 `protobuf:"varint,2,opt,name=signal" json:"signal,omitempty"` +} + +func (m *Call_KillNestedContainer) Reset() { *m = Call_KillNestedContainer{} } +func (*Call_KillNestedContainer) ProtoMessage() {} +func (*Call_KillNestedContainer) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0, 7} } + +func (m *Call_KillNestedContainer) GetContainerID() mesos.ContainerID { + if m != nil { + return m.ContainerID + } + return mesos.ContainerID{} +} + +func (m *Call_KillNestedContainer) GetSignal() int32 { + if m != nil && m.Signal != nil { + return *m.Signal + } + return 0 +} + +// Deprecated in favor of `RemoveContainer`. +type Call_RemoveNestedContainer struct { + ContainerID mesos.ContainerID `protobuf:"bytes,1,req,name=container_id,json=containerId" json:"container_id"` +} + +func (m *Call_RemoveNestedContainer) Reset() { *m = Call_RemoveNestedContainer{} } +func (*Call_RemoveNestedContainer) ProtoMessage() {} +func (*Call_RemoveNestedContainer) Descriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{0, 8} +} + +func (m *Call_RemoveNestedContainer) GetContainerID() mesos.ContainerID { + if m != nil { + return m.ContainerID + } + return mesos.ContainerID{} +} + +// Launches a nested container within an executor's tree of containers. +// The differences between this call and `LaunchNestedContainer` are: +// 1) The container's life-cycle is tied to the lifetime of the +// connection used to make this call, i.e., if the connection ever +// breaks, the container will be destroyed. +// 2) The nested container shares the same namespaces and cgroups as +// its parent container. +// 3) Results in a streaming response of type `ProcessIO`. So the call +// needs to be made on a persistent connection. +type Call_LaunchNestedContainerSession struct { + ContainerID mesos.ContainerID `protobuf:"bytes,1,req,name=container_id,json=containerId" json:"container_id"` + Command *mesos.CommandInfo `protobuf:"bytes,2,opt,name=command" json:"command,omitempty"` + Container *mesos.ContainerInfo `protobuf:"bytes,3,opt,name=container" json:"container,omitempty"` +} + +func (m *Call_LaunchNestedContainerSession) Reset() { *m = Call_LaunchNestedContainerSession{} } +func (*Call_LaunchNestedContainerSession) ProtoMessage() {} +func (*Call_LaunchNestedContainerSession) Descriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{0, 9} +} + +func (m *Call_LaunchNestedContainerSession) GetContainerID() mesos.ContainerID { + if m != nil { + return m.ContainerID + } + return mesos.ContainerID{} +} + +func (m *Call_LaunchNestedContainerSession) GetCommand() *mesos.CommandInfo { + if m != nil { + return m.Command + } + return nil +} + +func (m *Call_LaunchNestedContainerSession) GetContainer() *mesos.ContainerInfo { + if m != nil { + return m.Container + } + return nil +} + +// Attaches the caller to the STDIN of the entry point of the container. +// Clients can use this to stream input data to a container. +// Note that this call needs to be made on a persistent connection by +// streaming a CONTAINER_ID message followed by one or more PROCESS_IO +// messages. +type Call_AttachContainerInput struct { + Type Call_AttachContainerInput_Type `protobuf:"varint,1,opt,name=type,enum=mesos.agent.Call_AttachContainerInput_Type" json:"type"` + ContainerID *mesos.ContainerID `protobuf:"bytes,2,opt,name=container_id,json=containerId" json:"container_id,omitempty"` + ProcessIO *ProcessIO `protobuf:"bytes,3,opt,name=process_io,json=processIo" json:"process_io,omitempty"` +} + +func (m *Call_AttachContainerInput) Reset() { *m = Call_AttachContainerInput{} } +func (*Call_AttachContainerInput) ProtoMessage() {} +func (*Call_AttachContainerInput) Descriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{0, 10} +} + +func (m *Call_AttachContainerInput) GetType() Call_AttachContainerInput_Type { + if m != nil { + return m.Type + } + return Call_AttachContainerInput_UNKNOWN +} + +func (m *Call_AttachContainerInput) GetContainerID() *mesos.ContainerID { + if m != nil { + return m.ContainerID + } + return nil +} + +func (m *Call_AttachContainerInput) GetProcessIO() *ProcessIO { + if m != nil { + return m.ProcessIO + } + return nil +} + +// Attaches the caller to the STDOUT and STDERR of the entrypoint of +// the container. Clients can use this to stream output/error from the +// container. This call will result in a streaming response of `ProcessIO`; +// so this call needs to be made on a persistent connection. +type Call_AttachContainerOutput struct { + ContainerID mesos.ContainerID `protobuf:"bytes,1,req,name=container_id,json=containerId" json:"container_id"` +} + +func (m *Call_AttachContainerOutput) Reset() { *m = Call_AttachContainerOutput{} } +func (*Call_AttachContainerOutput) ProtoMessage() {} +func (*Call_AttachContainerOutput) Descriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{0, 11} +} + +func (m *Call_AttachContainerOutput) GetContainerID() mesos.ContainerID { + if m != nil { + return m.ContainerID + } + return mesos.ContainerID{} +} + +// Launches a either a "standalone" container on this agent +// or a nested container within another tree of containers. +// +// A standalone container is launched by specifying a ContainerID +// with no parent. Standalone containers bypass the normal offer cycle +// between the master and agent. Unlike other containers, a standalone +// container does not have an executor or any tasks. This means the +// standalone container does not report back to Mesos or any framework +// and must be supervised separately. +// +// A nested container is launched by specifying a ContainerID with +// another existing container (including standalone containers) +// as the parent. +// +// Returns 200 OK if the new container launch succeeds. +// Returns 202 Accepted if the requested ContainerID is already in use +// by a standalone or nested container. +// Returns 400 Bad Request if the container launch fails. +type Call_LaunchContainer struct { + // NOTE: Some characters cannot be used in the ID. All characters + // must be valid filesystem path characters. In addition, '/' and '.' + // are reserved. + ContainerID mesos.ContainerID `protobuf:"bytes,1,req,name=container_id,json=containerId" json:"container_id"` + Command *mesos.CommandInfo `protobuf:"bytes,2,opt,name=command" json:"command,omitempty"` + // NOTE: Nested containers may not specify resources and instead + // share resources with its parent container. + // + // TODO(josephw): These resources are purely used for isolation + // and are not accounted for by the Mesos master (if connected). + // It is the caller's responsibility to ensure that resources are + // not overcommitted (e.g. CPU and memory) or conflicting (e.g. ports + // and volumes). Once there is support for preempting tasks and a + // way to update the resources advertised by the agent, these standalone + // container resources should be accounted for by the master. + Resources []mesos.Resource `protobuf:"bytes,3,rep,name=resources" json:"resources"` + Container *mesos.ContainerInfo `protobuf:"bytes,4,opt,name=container" json:"container,omitempty"` +} + +func (m *Call_LaunchContainer) Reset() { *m = Call_LaunchContainer{} } +func (*Call_LaunchContainer) ProtoMessage() {} +func (*Call_LaunchContainer) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0, 12} } + +func (m *Call_LaunchContainer) GetContainerID() mesos.ContainerID { + if m != nil { + return m.ContainerID + } + return mesos.ContainerID{} +} + +func (m *Call_LaunchContainer) GetCommand() *mesos.CommandInfo { + if m != nil { + return m.Command + } + return nil +} + +func (m *Call_LaunchContainer) GetResources() []mesos.Resource { + if m != nil { + return m.Resources + } + return nil +} + +func (m *Call_LaunchContainer) GetContainer() *mesos.ContainerInfo { + if m != nil { + return m.Container + } + return nil +} + +// Waits for the standalone or nested container to terminate +// and returns the exit status. +// +// Returns 200 OK if and when the container exits. +// Returns 404 Not Found if the container does not exist. +type Call_WaitContainer struct { + ContainerID mesos.ContainerID `protobuf:"bytes,1,req,name=container_id,json=containerId" json:"container_id"` +} + +func (m *Call_WaitContainer) Reset() { *m = Call_WaitContainer{} } +func (*Call_WaitContainer) ProtoMessage() {} +func (*Call_WaitContainer) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0, 13} } + +func (m *Call_WaitContainer) GetContainerID() mesos.ContainerID { + if m != nil { + return m.ContainerID + } + return mesos.ContainerID{} +} + +// Kills the standalone or nested container. The signal to be sent +// to the container can be specified in the 'signal' field. +// +// Returns 200 OK if the signal is sent successfully. +// Returns 404 Not Found if the container does not exist. +type Call_KillContainer struct { + ContainerID mesos.ContainerID `protobuf:"bytes,1,req,name=container_id,json=containerId" json:"container_id"` + // Defaults to SIGKILL. + Signal *int32 `protobuf:"varint,2,opt,name=signal" json:"signal,omitempty"` +} + +func (m *Call_KillContainer) Reset() { *m = Call_KillContainer{} } +func (*Call_KillContainer) ProtoMessage() {} +func (*Call_KillContainer) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0, 14} } + +func (m *Call_KillContainer) GetContainerID() mesos.ContainerID { + if m != nil { + return m.ContainerID + } + return mesos.ContainerID{} +} + +func (m *Call_KillContainer) GetSignal() int32 { + if m != nil && m.Signal != nil { + return *m.Signal + } + return 0 +} + +// Removes a container's artifacts (runtime and sandbox directories). +// +// For nested containers, it is important to use this call if multiple +// nested containers are launched under the same parent container, because +// garbage collection only takes place at the parent container. Artifacts +// belonging to nested containers will not be garbage collected while +// the parent container is running. +// +// TODO(josephw): A standalone container's runtime directory is currently +// garbage collected as soon as the container exits. To allow the user to +// retrieve the exit status reliably, the runtime directory cannot be +// garbage collected immediately. Instead, the user will eventually be +// required to make this call after the standalone container has exited. +// Also, a standalone container's sandbox directory is currently not +// garbage collected and is only deleted via this call. +// +// Returns 200 OK if the removal is successful or if the parent container +// (for nested containers) does not exist. +// Returns 500 Internal Server Error if anything goes wrong, including +// if the container is still running or does not exist. +// +// TODO(josephw): Consider returning a 400 Bad Request instead of 500 +// Internal Server Error when the user tries to remove a running or +// nonexistent nested container. +type Call_RemoveContainer struct { + ContainerID mesos.ContainerID `protobuf:"bytes,1,req,name=container_id,json=containerId" json:"container_id"` +} + +func (m *Call_RemoveContainer) Reset() { *m = Call_RemoveContainer{} } +func (*Call_RemoveContainer) ProtoMessage() {} +func (*Call_RemoveContainer) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0, 15} } + +func (m *Call_RemoveContainer) GetContainerID() mesos.ContainerID { + if m != nil { + return m.ContainerID + } + return mesos.ContainerID{} +} + +// Adds a new resource provider config file. +// +// The content of the `info` field will be written into a new config file in +// the resource provider config directory, and a new resource provider will be +// launched asynchronously based on the config. Callers must not set the +// `info.id` field. This call is idempotent, so if a config file identical to +// the content of the `info` field already exists, this call will return +// without launching a resource provider. Note that if a config file is +// placed into the resource provider config directory out-of-band after the +// agent starts up, it will not be checked against this call. +// +// Returns 200 OK if a new config file is created, or an identical config file +// exists. +// Returns 400 Bad Request if `info` is not well-formed. +// Returns 403 Forbidden if the call is not authorized. +// Returns 409 Conflict if another config file that describes a +// resource provider of the same type and name exists, but the content is +// not identical. +// Returns 500 Internal Server Error if anything goes wrong. +type Call_AddResourceProviderConfig struct { + Info mesos.ResourceProviderInfo `protobuf:"bytes,1,req,name=info" json:"info"` +} + +func (m *Call_AddResourceProviderConfig) Reset() { *m = Call_AddResourceProviderConfig{} } +func (*Call_AddResourceProviderConfig) ProtoMessage() {} +func (*Call_AddResourceProviderConfig) Descriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{0, 16} +} + +func (m *Call_AddResourceProviderConfig) GetInfo() mesos.ResourceProviderInfo { + if m != nil { + return m.Info + } + return mesos.ResourceProviderInfo{} +} + +// Updates an existing resource provider config file. +// +// The content of the `info` field will be written into an existing config +// file that describes a resource provider of the specified type and name in +// the resource provider config directory, and the corresponding resource +// provider will be relaunched asynchronously to reflect the changes in the +// config. Callers must not set the `info.id` field. This call is idempotent, +// so if there is no change in the config, this call will return without +// relaunching the resource provider. Note that if a config file is placed +// into the resource provider config directory out-of-band after the agent +// starts up, it will not be checked against this call. +// +// Returns 200 OK if an existing config file is updated, or there is no change +// in the config file. +// Returns 400 Bad Request if `info` is not well-formed. +// Returns 403 Forbidden if the call is not authorized. +// Returns 404 Not Found if no config file describes a resource +// provider of the same type and name exists. +// Returns 500 Internal Server Error if anything goes wrong. +type Call_UpdateResourceProviderConfig struct { + Info mesos.ResourceProviderInfo `protobuf:"bytes,1,req,name=info" json:"info"` +} + +func (m *Call_UpdateResourceProviderConfig) Reset() { *m = Call_UpdateResourceProviderConfig{} } +func (*Call_UpdateResourceProviderConfig) ProtoMessage() {} +func (*Call_UpdateResourceProviderConfig) Descriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{0, 17} +} + +func (m *Call_UpdateResourceProviderConfig) GetInfo() mesos.ResourceProviderInfo { + if m != nil { + return m.Info + } + return mesos.ResourceProviderInfo{} +} + +// Removes a config file from the resource provider config directory. +// +// The config file that describes the resource provider of the specified type +// and name will be removed, and the corresponding resource provider will be +// terminated asynchronously. This call is idempotent, so if no matching +// config file exists, this call will return without terminating any resource +// provider. Note that if a config file is placed into the resource provider +// config directory out-of-band after the agent starts up, it will not be +// checked against this call. +// +// Returns 200 OK if the config file is removed, or no matching config file +// exists. +// Returns 403 Forbidden if the call is not authorized. +// Returns 500 Internal Server Error if anything goes wrong. +type Call_RemoveResourceProviderConfig struct { + Type string `protobuf:"bytes,1,req,name=type" json:"type"` + Name string `protobuf:"bytes,2,req,name=name" json:"name"` +} + +func (m *Call_RemoveResourceProviderConfig) Reset() { *m = Call_RemoveResourceProviderConfig{} } +func (*Call_RemoveResourceProviderConfig) ProtoMessage() {} +func (*Call_RemoveResourceProviderConfig) Descriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{0, 18} +} + +func (m *Call_RemoveResourceProviderConfig) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Call_RemoveResourceProviderConfig) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Prune unused container images from image store. +// +// Images and layers referenced by active containers as well as +// image references specified in `excluded_images` will not be pruned. +type Call_PruneImages struct { + ExcludedImages []mesos.Image `protobuf:"bytes,1,rep,name=excluded_images,json=excludedImages" json:"excluded_images"` +} + +func (m *Call_PruneImages) Reset() { *m = Call_PruneImages{} } +func (*Call_PruneImages) ProtoMessage() {} +func (*Call_PruneImages) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0, 19} } + +func (m *Call_PruneImages) GetExcludedImages() []mesos.Image { + if m != nil { + return m.ExcludedImages + } + return nil +} + +// * +// Synchronous responses for all calls made to the v1 agent API. +type Response struct { + Type Response_Type `protobuf:"varint,1,opt,name=type,enum=mesos.agent.Response_Type" json:"type"` + GetHealth *Response_GetHealth `protobuf:"bytes,2,opt,name=get_health,json=getHealth" json:"get_health,omitempty"` + GetFlags *Response_GetFlags `protobuf:"bytes,3,opt,name=get_flags,json=getFlags" json:"get_flags,omitempty"` + GetVersion *Response_GetVersion `protobuf:"bytes,4,opt,name=get_version,json=getVersion" json:"get_version,omitempty"` + GetMetrics *Response_GetMetrics `protobuf:"bytes,5,opt,name=get_metrics,json=getMetrics" json:"get_metrics,omitempty"` + GetLoggingLevel *Response_GetLoggingLevel `protobuf:"bytes,6,opt,name=get_logging_level,json=getLoggingLevel" json:"get_logging_level,omitempty"` + ListFiles *Response_ListFiles `protobuf:"bytes,7,opt,name=list_files,json=listFiles" json:"list_files,omitempty"` + ReadFile *Response_ReadFile `protobuf:"bytes,8,opt,name=read_file,json=readFile" json:"read_file,omitempty"` + GetState *Response_GetState `protobuf:"bytes,9,opt,name=get_state,json=getState" json:"get_state,omitempty"` + GetContainers *Response_GetContainers `protobuf:"bytes,10,opt,name=get_containers,json=getContainers" json:"get_containers,omitempty"` + GetFrameworks *Response_GetFrameworks `protobuf:"bytes,11,opt,name=get_frameworks,json=getFrameworks" json:"get_frameworks,omitempty"` + GetExecutors *Response_GetExecutors `protobuf:"bytes,12,opt,name=get_executors,json=getExecutors" json:"get_executors,omitempty"` + GetOperations *Response_GetOperations `protobuf:"bytes,18,opt,name=get_operations,json=getOperations" json:"get_operations,omitempty"` + GetTasks *Response_GetTasks `protobuf:"bytes,13,opt,name=get_tasks,json=getTasks" json:"get_tasks,omitempty"` + GetAgent *Response_GetAgent `protobuf:"bytes,15,opt,name=get_agent,json=getAgent" json:"get_agent,omitempty"` + GetResourceProviders *Response_GetResourceProviders `protobuf:"bytes,17,opt,name=get_resource_providers,json=getResourceProviders" json:"get_resource_providers,omitempty"` + WaitNestedContainer *Response_WaitNestedContainer `protobuf:"bytes,14,opt,name=wait_nested_container,json=waitNestedContainer" json:"wait_nested_container,omitempty"` + WaitContainer *Response_WaitContainer `protobuf:"bytes,16,opt,name=wait_container,json=waitContainer" json:"wait_container,omitempty"` +} + +func (m *Response) Reset() { *m = Response{} } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1} } + +func (m *Response) GetType() Response_Type { + if m != nil { + return m.Type + } + return Response_UNKNOWN +} + +func (m *Response) GetGetHealth() *Response_GetHealth { + if m != nil { + return m.GetHealth + } + return nil +} + +func (m *Response) GetGetFlags() *Response_GetFlags { + if m != nil { + return m.GetFlags + } + return nil +} + +func (m *Response) GetGetVersion() *Response_GetVersion { + if m != nil { + return m.GetVersion + } + return nil +} + +func (m *Response) GetGetMetrics() *Response_GetMetrics { + if m != nil { + return m.GetMetrics + } + return nil +} + +func (m *Response) GetGetLoggingLevel() *Response_GetLoggingLevel { + if m != nil { + return m.GetLoggingLevel + } + return nil +} + +func (m *Response) GetListFiles() *Response_ListFiles { + if m != nil { + return m.ListFiles + } + return nil +} + +func (m *Response) GetReadFile() *Response_ReadFile { + if m != nil { + return m.ReadFile + } + return nil +} + +func (m *Response) GetGetState() *Response_GetState { + if m != nil { + return m.GetState + } + return nil +} + +func (m *Response) GetGetContainers() *Response_GetContainers { + if m != nil { + return m.GetContainers + } + return nil +} + +func (m *Response) GetGetFrameworks() *Response_GetFrameworks { + if m != nil { + return m.GetFrameworks + } + return nil +} + +func (m *Response) GetGetExecutors() *Response_GetExecutors { + if m != nil { + return m.GetExecutors + } + return nil +} + +func (m *Response) GetGetOperations() *Response_GetOperations { + if m != nil { + return m.GetOperations + } + return nil +} + +func (m *Response) GetGetTasks() *Response_GetTasks { + if m != nil { + return m.GetTasks + } + return nil +} + +func (m *Response) GetGetAgent() *Response_GetAgent { + if m != nil { + return m.GetAgent + } + return nil +} + +func (m *Response) GetGetResourceProviders() *Response_GetResourceProviders { + if m != nil { + return m.GetResourceProviders + } + return nil +} + +func (m *Response) GetWaitNestedContainer() *Response_WaitNestedContainer { + if m != nil { + return m.WaitNestedContainer + } + return nil +} + +func (m *Response) GetWaitContainer() *Response_WaitContainer { + if m != nil { + return m.WaitContainer + } + return nil +} + +// `healthy` would be true if the agent is healthy. Delayed responses are also +// indicative of the poor health of the agent. +type Response_GetHealth struct { + Healthy bool `protobuf:"varint,1,req,name=healthy" json:"healthy"` +} + +func (m *Response_GetHealth) Reset() { *m = Response_GetHealth{} } +func (*Response_GetHealth) ProtoMessage() {} +func (*Response_GetHealth) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 0} } + +func (m *Response_GetHealth) GetHealthy() bool { + if m != nil { + return m.Healthy + } + return false +} + +// Contains the flag configuration of the agent. +type Response_GetFlags struct { + Flags []mesos.Flag `protobuf:"bytes,1,rep,name=flags" json:"flags"` +} + +func (m *Response_GetFlags) Reset() { *m = Response_GetFlags{} } +func (*Response_GetFlags) ProtoMessage() {} +func (*Response_GetFlags) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 1} } + +func (m *Response_GetFlags) GetFlags() []mesos.Flag { + if m != nil { + return m.Flags + } + return nil +} + +// Contains the version information of the agent. +type Response_GetVersion struct { + VersionInfo mesos.VersionInfo `protobuf:"bytes,1,req,name=version_info,json=versionInfo" json:"version_info"` +} + +func (m *Response_GetVersion) Reset() { *m = Response_GetVersion{} } +func (*Response_GetVersion) ProtoMessage() {} +func (*Response_GetVersion) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 2} } + +func (m *Response_GetVersion) GetVersionInfo() mesos.VersionInfo { + if m != nil { + return m.VersionInfo + } + return mesos.VersionInfo{} +} + +// Contains a snapshot of the current metrics. +type Response_GetMetrics struct { + Metrics []mesos.Metric `protobuf:"bytes,1,rep,name=metrics" json:"metrics"` +} + +func (m *Response_GetMetrics) Reset() { *m = Response_GetMetrics{} } +func (*Response_GetMetrics) ProtoMessage() {} +func (*Response_GetMetrics) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 3} } + +func (m *Response_GetMetrics) GetMetrics() []mesos.Metric { + if m != nil { + return m.Metrics + } + return nil +} + +// Contains the logging level of the agent. +type Response_GetLoggingLevel struct { + Level uint32 `protobuf:"varint,1,req,name=level" json:"level"` +} + +func (m *Response_GetLoggingLevel) Reset() { *m = Response_GetLoggingLevel{} } +func (*Response_GetLoggingLevel) ProtoMessage() {} +func (*Response_GetLoggingLevel) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 4} } + +func (m *Response_GetLoggingLevel) GetLevel() uint32 { + if m != nil { + return m.Level + } + return 0 +} + +// Contains the file listing(similar to `ls -l`) for a directory. +type Response_ListFiles struct { + FileInfos []mesos.FileInfo `protobuf:"bytes,1,rep,name=file_infos,json=fileInfos" json:"file_infos"` +} + +func (m *Response_ListFiles) Reset() { *m = Response_ListFiles{} } +func (*Response_ListFiles) ProtoMessage() {} +func (*Response_ListFiles) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 5} } + +func (m *Response_ListFiles) GetFileInfos() []mesos.FileInfo { + if m != nil { + return m.FileInfos + } + return nil +} + +// Contains the file data. +type Response_ReadFile struct { + // The size of file (in bytes). + Size uint64 `protobuf:"varint,1,req,name=size" json:"size"` + Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` +} + +func (m *Response_ReadFile) Reset() { *m = Response_ReadFile{} } +func (*Response_ReadFile) ProtoMessage() {} +func (*Response_ReadFile) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 6} } + +func (m *Response_ReadFile) GetSize() uint64 { + if m != nil { + return m.Size + } + return 0 +} + +func (m *Response_ReadFile) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// Contains full state of the agent i.e. information about the tasks, +// frameworks and executors running in the cluster. +type Response_GetState struct { + GetTasks *Response_GetTasks `protobuf:"bytes,1,opt,name=get_tasks,json=getTasks" json:"get_tasks,omitempty"` + GetExecutors *Response_GetExecutors `protobuf:"bytes,2,opt,name=get_executors,json=getExecutors" json:"get_executors,omitempty"` + GetFrameworks *Response_GetFrameworks `protobuf:"bytes,3,opt,name=get_frameworks,json=getFrameworks" json:"get_frameworks,omitempty"` +} + +func (m *Response_GetState) Reset() { *m = Response_GetState{} } +func (*Response_GetState) ProtoMessage() {} +func (*Response_GetState) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 7} } + +func (m *Response_GetState) GetGetTasks() *Response_GetTasks { + if m != nil { + return m.GetTasks + } + return nil +} + +func (m *Response_GetState) GetGetExecutors() *Response_GetExecutors { + if m != nil { + return m.GetExecutors + } + return nil +} + +func (m *Response_GetState) GetGetFrameworks() *Response_GetFrameworks { + if m != nil { + return m.GetFrameworks + } + return nil +} + +// Information about containers running on this agent. It contains +// ContainerStatus and ResourceStatistics along with some metadata +// of the containers. +type Response_GetContainers struct { + Containers []Response_GetContainers_Container `protobuf:"bytes,1,rep,name=containers" json:"containers"` +} + +func (m *Response_GetContainers) Reset() { *m = Response_GetContainers{} } +func (*Response_GetContainers) ProtoMessage() {} +func (*Response_GetContainers) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 8} } + +func (m *Response_GetContainers) GetContainers() []Response_GetContainers_Container { + if m != nil { + return m.Containers + } + return nil +} + +type Response_GetContainers_Container struct { + FrameworkID *mesos.FrameworkID `protobuf:"bytes,1,opt,name=framework_id,json=frameworkId" json:"framework_id,omitempty"` + ExecutorID *mesos.ExecutorID `protobuf:"bytes,2,opt,name=executor_id,json=executorId" json:"executor_id,omitempty"` + ExecutorName *string `protobuf:"bytes,3,opt,name=executor_name,json=executorName" json:"executor_name,omitempty"` + ContainerID mesos.ContainerID `protobuf:"bytes,4,req,name=container_id,json=containerId" json:"container_id"` + ContainerStatus *mesos.ContainerStatus `protobuf:"bytes,5,opt,name=container_status,json=containerStatus" json:"container_status,omitempty"` + ResourceStatistics *mesos.ResourceStatistics `protobuf:"bytes,6,opt,name=resource_statistics,json=resourceStatistics" json:"resource_statistics,omitempty"` +} + +func (m *Response_GetContainers_Container) Reset() { *m = Response_GetContainers_Container{} } +func (*Response_GetContainers_Container) ProtoMessage() {} +func (*Response_GetContainers_Container) Descriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{1, 8, 0} +} + +func (m *Response_GetContainers_Container) GetFrameworkID() *mesos.FrameworkID { + if m != nil { + return m.FrameworkID + } + return nil +} + +func (m *Response_GetContainers_Container) GetExecutorID() *mesos.ExecutorID { + if m != nil { + return m.ExecutorID + } + return nil +} + +func (m *Response_GetContainers_Container) GetExecutorName() string { + if m != nil && m.ExecutorName != nil { + return *m.ExecutorName + } + return "" +} + +func (m *Response_GetContainers_Container) GetContainerID() mesos.ContainerID { + if m != nil { + return m.ContainerID + } + return mesos.ContainerID{} +} + +func (m *Response_GetContainers_Container) GetContainerStatus() *mesos.ContainerStatus { + if m != nil { + return m.ContainerStatus + } + return nil +} + +func (m *Response_GetContainers_Container) GetResourceStatistics() *mesos.ResourceStatistics { + if m != nil { + return m.ResourceStatistics + } + return nil +} + +// Information about all the frameworks known to the agent at the current +// time. +type Response_GetFrameworks struct { + Frameworks []Response_GetFrameworks_Framework `protobuf:"bytes,1,rep,name=frameworks" json:"frameworks"` + CompletedFrameworks []Response_GetFrameworks_Framework `protobuf:"bytes,2,rep,name=completed_frameworks,json=completedFrameworks" json:"completed_frameworks"` +} + +func (m *Response_GetFrameworks) Reset() { *m = Response_GetFrameworks{} } +func (*Response_GetFrameworks) ProtoMessage() {} +func (*Response_GetFrameworks) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 9} } + +func (m *Response_GetFrameworks) GetFrameworks() []Response_GetFrameworks_Framework { + if m != nil { + return m.Frameworks + } + return nil +} + +func (m *Response_GetFrameworks) GetCompletedFrameworks() []Response_GetFrameworks_Framework { + if m != nil { + return m.CompletedFrameworks + } + return nil +} + +type Response_GetFrameworks_Framework struct { + FrameworkInfo mesos.FrameworkInfo `protobuf:"bytes,1,req,name=framework_info,json=frameworkInfo" json:"framework_info"` +} + +func (m *Response_GetFrameworks_Framework) Reset() { *m = Response_GetFrameworks_Framework{} } +func (*Response_GetFrameworks_Framework) ProtoMessage() {} +func (*Response_GetFrameworks_Framework) Descriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{1, 9, 0} +} + +func (m *Response_GetFrameworks_Framework) GetFrameworkInfo() mesos.FrameworkInfo { + if m != nil { + return m.FrameworkInfo + } + return mesos.FrameworkInfo{} +} + +// Lists information about all the executors known to the agent at the +// current time. +type Response_GetExecutors struct { + Executors []Response_GetExecutors_Executor `protobuf:"bytes,1,rep,name=executors" json:"executors"` + CompletedExecutors []Response_GetExecutors_Executor `protobuf:"bytes,2,rep,name=completed_executors,json=completedExecutors" json:"completed_executors"` +} + +func (m *Response_GetExecutors) Reset() { *m = Response_GetExecutors{} } +func (*Response_GetExecutors) ProtoMessage() {} +func (*Response_GetExecutors) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 10} } + +func (m *Response_GetExecutors) GetExecutors() []Response_GetExecutors_Executor { + if m != nil { + return m.Executors + } + return nil +} + +func (m *Response_GetExecutors) GetCompletedExecutors() []Response_GetExecutors_Executor { + if m != nil { + return m.CompletedExecutors + } + return nil +} + +type Response_GetExecutors_Executor struct { + ExecutorInfo mesos.ExecutorInfo `protobuf:"bytes,1,req,name=executor_info,json=executorInfo" json:"executor_info"` +} + +func (m *Response_GetExecutors_Executor) Reset() { *m = Response_GetExecutors_Executor{} } +func (*Response_GetExecutors_Executor) ProtoMessage() {} +func (*Response_GetExecutors_Executor) Descriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{1, 10, 0} +} + +func (m *Response_GetExecutors_Executor) GetExecutorInfo() mesos.ExecutorInfo { + if m != nil { + return m.ExecutorInfo + } + return mesos.ExecutorInfo{} +} + +// Lists information about all operations known to the agent at the +// current time. +type Response_GetOperations struct { + Operations []mesos.Operation `protobuf:"bytes,1,rep,name=operations" json:"operations"` +} + +func (m *Response_GetOperations) Reset() { *m = Response_GetOperations{} } +func (*Response_GetOperations) ProtoMessage() {} +func (*Response_GetOperations) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 11} } + +func (m *Response_GetOperations) GetOperations() []mesos.Operation { + if m != nil { + return m.Operations + } + return nil +} + +// Lists information about all the tasks known to the agent at the current +// time. +type Response_GetTasks struct { + // Tasks that are pending in the agent's queue before an executor is + // launched. + PendingTasks []mesos.Task `protobuf:"bytes,1,rep,name=pending_tasks,json=pendingTasks" json:"pending_tasks"` + // Tasks that are enqueued for a launched executor that has not yet + // registered. + QueuedTasks []mesos.Task `protobuf:"bytes,2,rep,name=queued_tasks,json=queuedTasks" json:"queued_tasks"` + // Tasks that are running. + LaunchedTasks []mesos.Task `protobuf:"bytes,3,rep,name=launched_tasks,json=launchedTasks" json:"launched_tasks"` + // Tasks that are terminated but pending updates. + TerminatedTasks []mesos.Task `protobuf:"bytes,4,rep,name=terminated_tasks,json=terminatedTasks" json:"terminated_tasks"` + // Tasks that are terminated and updates acked. + CompletedTasks []mesos.Task `protobuf:"bytes,5,rep,name=completed_tasks,json=completedTasks" json:"completed_tasks"` +} + +func (m *Response_GetTasks) Reset() { *m = Response_GetTasks{} } +func (*Response_GetTasks) ProtoMessage() {} +func (*Response_GetTasks) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 12} } + +func (m *Response_GetTasks) GetPendingTasks() []mesos.Task { + if m != nil { + return m.PendingTasks + } + return nil +} + +func (m *Response_GetTasks) GetQueuedTasks() []mesos.Task { + if m != nil { + return m.QueuedTasks + } + return nil +} + +func (m *Response_GetTasks) GetLaunchedTasks() []mesos.Task { + if m != nil { + return m.LaunchedTasks + } + return nil +} + +func (m *Response_GetTasks) GetTerminatedTasks() []mesos.Task { + if m != nil { + return m.TerminatedTasks + } + return nil +} + +func (m *Response_GetTasks) GetCompletedTasks() []mesos.Task { + if m != nil { + return m.CompletedTasks + } + return nil +} + +// Contains the agent's information. +type Response_GetAgent struct { + AgentInfo *mesos.AgentInfo `protobuf:"bytes,1,opt,name=agent_info,json=agentInfo" json:"agent_info,omitempty"` +} + +func (m *Response_GetAgent) Reset() { *m = Response_GetAgent{} } +func (*Response_GetAgent) ProtoMessage() {} +func (*Response_GetAgent) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 13} } + +func (m *Response_GetAgent) GetAgentInfo() *mesos.AgentInfo { + if m != nil { + return m.AgentInfo + } + return nil +} + +// Lists information about all resource providers known to the agent +// at the current time. +type Response_GetResourceProviders struct { + ResourceProviders []Response_GetResourceProviders_ResourceProvider `protobuf:"bytes,1,rep,name=resource_providers,json=resourceProviders" json:"resource_providers"` +} + +func (m *Response_GetResourceProviders) Reset() { *m = Response_GetResourceProviders{} } +func (*Response_GetResourceProviders) ProtoMessage() {} +func (*Response_GetResourceProviders) Descriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{1, 14} +} + +func (m *Response_GetResourceProviders) GetResourceProviders() []Response_GetResourceProviders_ResourceProvider { + if m != nil { + return m.ResourceProviders + } + return nil +} + +type Response_GetResourceProviders_ResourceProvider struct { + ResourceProviderInfo mesos.ResourceProviderInfo `protobuf:"bytes,1,req,name=resource_provider_info,json=resourceProviderInfo" json:"resource_provider_info"` + TotalResources []mesos.Resource `protobuf:"bytes,2,rep,name=total_resources,json=totalResources" json:"total_resources"` +} + +func (m *Response_GetResourceProviders_ResourceProvider) Reset() { + *m = Response_GetResourceProviders_ResourceProvider{} +} +func (*Response_GetResourceProviders_ResourceProvider) ProtoMessage() {} +func (*Response_GetResourceProviders_ResourceProvider) Descriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{1, 14, 0} +} + +func (m *Response_GetResourceProviders_ResourceProvider) GetResourceProviderInfo() mesos.ResourceProviderInfo { + if m != nil { + return m.ResourceProviderInfo + } + return mesos.ResourceProviderInfo{} +} + +func (m *Response_GetResourceProviders_ResourceProvider) GetTotalResources() []mesos.Resource { + if m != nil { + return m.TotalResources + } + return nil +} + +// Returns termination information about the nested container. +type Response_WaitNestedContainer struct { + // Wait status of the lead process in the container. Note that this + // is the return value of `wait(2)`, so callers must use the `wait(2)` + // family of macros to extract whether the process exited cleanly and + // what the exit code was. + ExitStatus *int32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus" json:"exit_status,omitempty"` + // The `state` and `reason` fields may be populated if the Mesos agent + // terminates the container. In the absence of any special knowledge, + // executors should propagate this information via the `status` field + // of an `Update` call for the corresponding TaskID. + State *mesos.TaskState `protobuf:"varint,2,opt,name=state,enum=mesos.TaskState" json:"state,omitempty"` + Reason *mesos.TaskStatus_Reason `protobuf:"varint,3,opt,name=reason,enum=mesos.TaskStatus_Reason" json:"reason,omitempty"` + // This field will be populated if the task was terminated due to + // a resource limitation. + Limitation *mesos.TaskResourceLimitation `protobuf:"bytes,4,opt,name=limitation" json:"limitation,omitempty"` + Message *string `protobuf:"bytes,5,opt,name=message" json:"message,omitempty"` +} + +func (m *Response_WaitNestedContainer) Reset() { *m = Response_WaitNestedContainer{} } +func (*Response_WaitNestedContainer) ProtoMessage() {} +func (*Response_WaitNestedContainer) Descriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{1, 15} +} + +func (m *Response_WaitNestedContainer) GetExitStatus() int32 { + if m != nil && m.ExitStatus != nil { + return *m.ExitStatus + } + return 0 +} + +func (m *Response_WaitNestedContainer) GetState() mesos.TaskState { + if m != nil && m.State != nil { + return *m.State + } + return mesos.TASK_STAGING +} + +func (m *Response_WaitNestedContainer) GetReason() mesos.TaskStatus_Reason { + if m != nil && m.Reason != nil { + return *m.Reason + } + return mesos.REASON_COMMAND_EXECUTOR_FAILED +} + +func (m *Response_WaitNestedContainer) GetLimitation() *mesos.TaskResourceLimitation { + if m != nil { + return m.Limitation + } + return nil +} + +func (m *Response_WaitNestedContainer) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +// Returns termination information about the standalone or nested container. +type Response_WaitContainer struct { + // Wait status of the lead process in the container. Note that this + // is the return value of `wait(2)`, so callers must use the `wait(2)` + // family of macros to extract whether the process exited cleanly and + // what the exit code was. + ExitStatus *int32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus" json:"exit_status,omitempty"` + // The `state` and `reason` fields may be populated if the Mesos agent + // terminates the container. In the absence of any special knowledge, + // executors should propagate this information via the `status` field + // of an `Update` call for the corresponding TaskID. + State *mesos.TaskState `protobuf:"varint,2,opt,name=state,enum=mesos.TaskState" json:"state,omitempty"` + Reason *mesos.TaskStatus_Reason `protobuf:"varint,3,opt,name=reason,enum=mesos.TaskStatus_Reason" json:"reason,omitempty"` + // This field will be populated if the task was terminated due to + // a resource limitation. + Limitation *mesos.TaskResourceLimitation `protobuf:"bytes,4,opt,name=limitation" json:"limitation,omitempty"` + Message *string `protobuf:"bytes,5,opt,name=message" json:"message,omitempty"` +} + +func (m *Response_WaitContainer) Reset() { *m = Response_WaitContainer{} } +func (*Response_WaitContainer) ProtoMessage() {} +func (*Response_WaitContainer) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 16} } + +func (m *Response_WaitContainer) GetExitStatus() int32 { + if m != nil && m.ExitStatus != nil { + return *m.ExitStatus + } + return 0 +} + +func (m *Response_WaitContainer) GetState() mesos.TaskState { + if m != nil && m.State != nil { + return *m.State + } + return mesos.TASK_STAGING +} + +func (m *Response_WaitContainer) GetReason() mesos.TaskStatus_Reason { + if m != nil && m.Reason != nil { + return *m.Reason + } + return mesos.REASON_COMMAND_EXECUTOR_FAILED +} + +func (m *Response_WaitContainer) GetLimitation() *mesos.TaskResourceLimitation { + if m != nil { + return m.Limitation + } + return nil +} + +func (m *Response_WaitContainer) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +// * +// Streaming response to `Call::LAUNCH_NESTED_CONTAINER_SESSION` and +// `Call::ATTACH_CONTAINER_OUTPUT`. +// +// This message is also used to stream request data for +// `Call::ATTACH_CONTAINER_INPUT`. +type ProcessIO struct { + Type ProcessIO_Type `protobuf:"varint,1,opt,name=type,enum=mesos.agent.ProcessIO_Type" json:"type"` + Data *ProcessIO_Data `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` + Control *ProcessIO_Control `protobuf:"bytes,3,opt,name=control" json:"control,omitempty"` +} + +func (m *ProcessIO) Reset() { *m = ProcessIO{} } +func (*ProcessIO) ProtoMessage() {} +func (*ProcessIO) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{2} } + +func (m *ProcessIO) GetType() ProcessIO_Type { + if m != nil { + return m.Type + } + return ProcessIO_UNKNOWN +} + +func (m *ProcessIO) GetData() *ProcessIO_Data { + if m != nil { + return m.Data + } + return nil +} + +func (m *ProcessIO) GetControl() *ProcessIO_Control { + if m != nil { + return m.Control + } + return nil +} + +type ProcessIO_Data struct { + Type ProcessIO_Data_Type `protobuf:"varint,1,opt,name=type,enum=mesos.agent.ProcessIO_Data_Type" json:"type"` + Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` +} + +func (m *ProcessIO_Data) Reset() { *m = ProcessIO_Data{} } +func (*ProcessIO_Data) ProtoMessage() {} +func (*ProcessIO_Data) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{2, 0} } + +func (m *ProcessIO_Data) GetType() ProcessIO_Data_Type { + if m != nil { + return m.Type + } + return ProcessIO_Data_UNKNOWN +} + +func (m *ProcessIO_Data) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type ProcessIO_Control struct { + Type ProcessIO_Control_Type `protobuf:"varint,1,opt,name=type,enum=mesos.agent.ProcessIO_Control_Type" json:"type"` + TTYInfo *mesos.TTYInfo `protobuf:"bytes,2,opt,name=tty_info,json=ttyInfo" json:"tty_info,omitempty"` + Heartbeat *ProcessIO_Control_Heartbeat `protobuf:"bytes,3,opt,name=heartbeat" json:"heartbeat,omitempty"` +} + +func (m *ProcessIO_Control) Reset() { *m = ProcessIO_Control{} } +func (*ProcessIO_Control) ProtoMessage() {} +func (*ProcessIO_Control) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{2, 1} } + +func (m *ProcessIO_Control) GetType() ProcessIO_Control_Type { + if m != nil { + return m.Type + } + return ProcessIO_Control_UNKNOWN +} + +func (m *ProcessIO_Control) GetTTYInfo() *mesos.TTYInfo { + if m != nil { + return m.TTYInfo + } + return nil +} + +func (m *ProcessIO_Control) GetHeartbeat() *ProcessIO_Control_Heartbeat { + if m != nil { + return m.Heartbeat + } + return nil +} + +type ProcessIO_Control_Heartbeat struct { + Interval *mesos.DurationInfo `protobuf:"bytes,1,opt,name=interval" json:"interval,omitempty"` +} + +func (m *ProcessIO_Control_Heartbeat) Reset() { *m = ProcessIO_Control_Heartbeat{} } +func (*ProcessIO_Control_Heartbeat) ProtoMessage() {} +func (*ProcessIO_Control_Heartbeat) Descriptor() ([]byte, []int) { + return fileDescriptorAgent, []int{2, 1, 0} +} + +func (m *ProcessIO_Control_Heartbeat) GetInterval() *mesos.DurationInfo { + if m != nil { + return m.Interval + } + return nil +} + +func init() { + proto.RegisterType((*Call)(nil), "mesos.agent.Call") + proto.RegisterType((*Call_GetMetrics)(nil), "mesos.agent.Call.GetMetrics") + proto.RegisterType((*Call_SetLoggingLevel)(nil), "mesos.agent.Call.SetLoggingLevel") + proto.RegisterType((*Call_ListFiles)(nil), "mesos.agent.Call.ListFiles") + proto.RegisterType((*Call_ReadFile)(nil), "mesos.agent.Call.ReadFile") + proto.RegisterType((*Call_GetContainers)(nil), "mesos.agent.Call.GetContainers") + proto.RegisterType((*Call_LaunchNestedContainer)(nil), "mesos.agent.Call.LaunchNestedContainer") + proto.RegisterType((*Call_WaitNestedContainer)(nil), "mesos.agent.Call.WaitNestedContainer") + proto.RegisterType((*Call_KillNestedContainer)(nil), "mesos.agent.Call.KillNestedContainer") + proto.RegisterType((*Call_RemoveNestedContainer)(nil), "mesos.agent.Call.RemoveNestedContainer") + proto.RegisterType((*Call_LaunchNestedContainerSession)(nil), "mesos.agent.Call.LaunchNestedContainerSession") + proto.RegisterType((*Call_AttachContainerInput)(nil), "mesos.agent.Call.AttachContainerInput") + proto.RegisterType((*Call_AttachContainerOutput)(nil), "mesos.agent.Call.AttachContainerOutput") + proto.RegisterType((*Call_LaunchContainer)(nil), "mesos.agent.Call.LaunchContainer") + proto.RegisterType((*Call_WaitContainer)(nil), "mesos.agent.Call.WaitContainer") + proto.RegisterType((*Call_KillContainer)(nil), "mesos.agent.Call.KillContainer") + proto.RegisterType((*Call_RemoveContainer)(nil), "mesos.agent.Call.RemoveContainer") + proto.RegisterType((*Call_AddResourceProviderConfig)(nil), "mesos.agent.Call.AddResourceProviderConfig") + proto.RegisterType((*Call_UpdateResourceProviderConfig)(nil), "mesos.agent.Call.UpdateResourceProviderConfig") + proto.RegisterType((*Call_RemoveResourceProviderConfig)(nil), "mesos.agent.Call.RemoveResourceProviderConfig") + proto.RegisterType((*Call_PruneImages)(nil), "mesos.agent.Call.PruneImages") + proto.RegisterType((*Response)(nil), "mesos.agent.Response") + proto.RegisterType((*Response_GetHealth)(nil), "mesos.agent.Response.GetHealth") + proto.RegisterType((*Response_GetFlags)(nil), "mesos.agent.Response.GetFlags") + proto.RegisterType((*Response_GetVersion)(nil), "mesos.agent.Response.GetVersion") + proto.RegisterType((*Response_GetMetrics)(nil), "mesos.agent.Response.GetMetrics") + proto.RegisterType((*Response_GetLoggingLevel)(nil), "mesos.agent.Response.GetLoggingLevel") + proto.RegisterType((*Response_ListFiles)(nil), "mesos.agent.Response.ListFiles") + proto.RegisterType((*Response_ReadFile)(nil), "mesos.agent.Response.ReadFile") + proto.RegisterType((*Response_GetState)(nil), "mesos.agent.Response.GetState") + proto.RegisterType((*Response_GetContainers)(nil), "mesos.agent.Response.GetContainers") + proto.RegisterType((*Response_GetContainers_Container)(nil), "mesos.agent.Response.GetContainers.Container") + proto.RegisterType((*Response_GetFrameworks)(nil), "mesos.agent.Response.GetFrameworks") + proto.RegisterType((*Response_GetFrameworks_Framework)(nil), "mesos.agent.Response.GetFrameworks.Framework") + proto.RegisterType((*Response_GetExecutors)(nil), "mesos.agent.Response.GetExecutors") + proto.RegisterType((*Response_GetExecutors_Executor)(nil), "mesos.agent.Response.GetExecutors.Executor") + proto.RegisterType((*Response_GetOperations)(nil), "mesos.agent.Response.GetOperations") + proto.RegisterType((*Response_GetTasks)(nil), "mesos.agent.Response.GetTasks") + proto.RegisterType((*Response_GetAgent)(nil), "mesos.agent.Response.GetAgent") + proto.RegisterType((*Response_GetResourceProviders)(nil), "mesos.agent.Response.GetResourceProviders") + proto.RegisterType((*Response_GetResourceProviders_ResourceProvider)(nil), "mesos.agent.Response.GetResourceProviders.ResourceProvider") + proto.RegisterType((*Response_WaitNestedContainer)(nil), "mesos.agent.Response.WaitNestedContainer") + proto.RegisterType((*Response_WaitContainer)(nil), "mesos.agent.Response.WaitContainer") + proto.RegisterType((*ProcessIO)(nil), "mesos.agent.ProcessIO") + proto.RegisterType((*ProcessIO_Data)(nil), "mesos.agent.ProcessIO.Data") + proto.RegisterType((*ProcessIO_Control)(nil), "mesos.agent.ProcessIO.Control") + proto.RegisterType((*ProcessIO_Control_Heartbeat)(nil), "mesos.agent.ProcessIO.Control.Heartbeat") + proto.RegisterEnum("mesos.agent.Call_Type", Call_Type_name, Call_Type_value) + proto.RegisterEnum("mesos.agent.Call_AttachContainerInput_Type", Call_AttachContainerInput_Type_name, Call_AttachContainerInput_Type_value) + proto.RegisterEnum("mesos.agent.Response_Type", Response_Type_name, Response_Type_value) + proto.RegisterEnum("mesos.agent.ProcessIO_Type", ProcessIO_Type_name, ProcessIO_Type_value) + proto.RegisterEnum("mesos.agent.ProcessIO_Data_Type", ProcessIO_Data_Type_name, ProcessIO_Data_Type_value) + proto.RegisterEnum("mesos.agent.ProcessIO_Control_Type", ProcessIO_Control_Type_name, ProcessIO_Control_Type_value) +} +func (x Call_Type) String() string { + s, ok := Call_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Call_AttachContainerInput_Type) String() string { + s, ok := Call_AttachContainerInput_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Response_Type) String() string { + s, ok := Response_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x ProcessIO_Type) String() string { + s, ok := ProcessIO_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x ProcessIO_Data_Type) String() string { + s, ok := ProcessIO_Data_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x ProcessIO_Control_Type) String() string { + s, ok := ProcessIO_Control_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Call) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call) + if !ok { + that2, ok := that.(Call) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.GetMetrics.Equal(that1.GetMetrics) { + return fmt.Errorf("GetMetrics this(%v) Not Equal that(%v)", this.GetMetrics, that1.GetMetrics) + } + if !this.SetLoggingLevel.Equal(that1.SetLoggingLevel) { + return fmt.Errorf("SetLoggingLevel this(%v) Not Equal that(%v)", this.SetLoggingLevel, that1.SetLoggingLevel) + } + if !this.ListFiles.Equal(that1.ListFiles) { + return fmt.Errorf("ListFiles this(%v) Not Equal that(%v)", this.ListFiles, that1.ListFiles) + } + if !this.ReadFile.Equal(that1.ReadFile) { + return fmt.Errorf("ReadFile this(%v) Not Equal that(%v)", this.ReadFile, that1.ReadFile) + } + if !this.GetContainers.Equal(that1.GetContainers) { + return fmt.Errorf("GetContainers this(%v) Not Equal that(%v)", this.GetContainers, that1.GetContainers) + } + if !this.LaunchNestedContainer.Equal(that1.LaunchNestedContainer) { + return fmt.Errorf("LaunchNestedContainer this(%v) Not Equal that(%v)", this.LaunchNestedContainer, that1.LaunchNestedContainer) + } + if !this.WaitNestedContainer.Equal(that1.WaitNestedContainer) { + return fmt.Errorf("WaitNestedContainer this(%v) Not Equal that(%v)", this.WaitNestedContainer, that1.WaitNestedContainer) + } + if !this.KillNestedContainer.Equal(that1.KillNestedContainer) { + return fmt.Errorf("KillNestedContainer this(%v) Not Equal that(%v)", this.KillNestedContainer, that1.KillNestedContainer) + } + if !this.RemoveNestedContainer.Equal(that1.RemoveNestedContainer) { + return fmt.Errorf("RemoveNestedContainer this(%v) Not Equal that(%v)", this.RemoveNestedContainer, that1.RemoveNestedContainer) + } + if !this.LaunchNestedContainerSession.Equal(that1.LaunchNestedContainerSession) { + return fmt.Errorf("LaunchNestedContainerSession this(%v) Not Equal that(%v)", this.LaunchNestedContainerSession, that1.LaunchNestedContainerSession) + } + if !this.AttachContainerInput.Equal(that1.AttachContainerInput) { + return fmt.Errorf("AttachContainerInput this(%v) Not Equal that(%v)", this.AttachContainerInput, that1.AttachContainerInput) + } + if !this.AttachContainerOutput.Equal(that1.AttachContainerOutput) { + return fmt.Errorf("AttachContainerOutput this(%v) Not Equal that(%v)", this.AttachContainerOutput, that1.AttachContainerOutput) + } + if !this.LaunchContainer.Equal(that1.LaunchContainer) { + return fmt.Errorf("LaunchContainer this(%v) Not Equal that(%v)", this.LaunchContainer, that1.LaunchContainer) + } + if !this.WaitContainer.Equal(that1.WaitContainer) { + return fmt.Errorf("WaitContainer this(%v) Not Equal that(%v)", this.WaitContainer, that1.WaitContainer) + } + if !this.KillContainer.Equal(that1.KillContainer) { + return fmt.Errorf("KillContainer this(%v) Not Equal that(%v)", this.KillContainer, that1.KillContainer) + } + if !this.RemoveContainer.Equal(that1.RemoveContainer) { + return fmt.Errorf("RemoveContainer this(%v) Not Equal that(%v)", this.RemoveContainer, that1.RemoveContainer) + } + if !this.AddResourceProviderConfig.Equal(that1.AddResourceProviderConfig) { + return fmt.Errorf("AddResourceProviderConfig this(%v) Not Equal that(%v)", this.AddResourceProviderConfig, that1.AddResourceProviderConfig) + } + if !this.UpdateResourceProviderConfig.Equal(that1.UpdateResourceProviderConfig) { + return fmt.Errorf("UpdateResourceProviderConfig this(%v) Not Equal that(%v)", this.UpdateResourceProviderConfig, that1.UpdateResourceProviderConfig) + } + if !this.RemoveResourceProviderConfig.Equal(that1.RemoveResourceProviderConfig) { + return fmt.Errorf("RemoveResourceProviderConfig this(%v) Not Equal that(%v)", this.RemoveResourceProviderConfig, that1.RemoveResourceProviderConfig) + } + if !this.PruneImages.Equal(that1.PruneImages) { + return fmt.Errorf("PruneImages this(%v) Not Equal that(%v)", this.PruneImages, that1.PruneImages) + } + return nil +} +func (this *Call) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call) + if !ok { + that2, ok := that.(Call) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.GetMetrics.Equal(that1.GetMetrics) { + return false + } + if !this.SetLoggingLevel.Equal(that1.SetLoggingLevel) { + return false + } + if !this.ListFiles.Equal(that1.ListFiles) { + return false + } + if !this.ReadFile.Equal(that1.ReadFile) { + return false + } + if !this.GetContainers.Equal(that1.GetContainers) { + return false + } + if !this.LaunchNestedContainer.Equal(that1.LaunchNestedContainer) { + return false + } + if !this.WaitNestedContainer.Equal(that1.WaitNestedContainer) { + return false + } + if !this.KillNestedContainer.Equal(that1.KillNestedContainer) { + return false + } + if !this.RemoveNestedContainer.Equal(that1.RemoveNestedContainer) { + return false + } + if !this.LaunchNestedContainerSession.Equal(that1.LaunchNestedContainerSession) { + return false + } + if !this.AttachContainerInput.Equal(that1.AttachContainerInput) { + return false + } + if !this.AttachContainerOutput.Equal(that1.AttachContainerOutput) { + return false + } + if !this.LaunchContainer.Equal(that1.LaunchContainer) { + return false + } + if !this.WaitContainer.Equal(that1.WaitContainer) { + return false + } + if !this.KillContainer.Equal(that1.KillContainer) { + return false + } + if !this.RemoveContainer.Equal(that1.RemoveContainer) { + return false + } + if !this.AddResourceProviderConfig.Equal(that1.AddResourceProviderConfig) { + return false + } + if !this.UpdateResourceProviderConfig.Equal(that1.UpdateResourceProviderConfig) { + return false + } + if !this.RemoveResourceProviderConfig.Equal(that1.RemoveResourceProviderConfig) { + return false + } + if !this.PruneImages.Equal(that1.PruneImages) { + return false + } + return true +} +func (this *Call_GetMetrics) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_GetMetrics) + if !ok { + that2, ok := that.(Call_GetMetrics) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_GetMetrics") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_GetMetrics but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_GetMetrics but is not nil && this == nil") + } + if !this.Timeout.Equal(that1.Timeout) { + return fmt.Errorf("Timeout this(%v) Not Equal that(%v)", this.Timeout, that1.Timeout) + } + return nil +} +func (this *Call_GetMetrics) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_GetMetrics) + if !ok { + that2, ok := that.(Call_GetMetrics) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Timeout.Equal(that1.Timeout) { + return false + } + return true +} +func (this *Call_SetLoggingLevel) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_SetLoggingLevel) + if !ok { + that2, ok := that.(Call_SetLoggingLevel) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_SetLoggingLevel") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_SetLoggingLevel but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_SetLoggingLevel but is not nil && this == nil") + } + if this.Level != that1.Level { + return fmt.Errorf("Level this(%v) Not Equal that(%v)", this.Level, that1.Level) + } + if !this.Duration.Equal(&that1.Duration) { + return fmt.Errorf("Duration this(%v) Not Equal that(%v)", this.Duration, that1.Duration) + } + return nil +} +func (this *Call_SetLoggingLevel) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_SetLoggingLevel) + if !ok { + that2, ok := that.(Call_SetLoggingLevel) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Level != that1.Level { + return false + } + if !this.Duration.Equal(&that1.Duration) { + return false + } + return true +} +func (this *Call_ListFiles) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_ListFiles) + if !ok { + that2, ok := that.(Call_ListFiles) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_ListFiles") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_ListFiles but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_ListFiles but is not nil && this == nil") + } + if this.Path != that1.Path { + return fmt.Errorf("Path this(%v) Not Equal that(%v)", this.Path, that1.Path) + } + return nil +} +func (this *Call_ListFiles) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_ListFiles) + if !ok { + that2, ok := that.(Call_ListFiles) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Path != that1.Path { + return false + } + return true +} +func (this *Call_ReadFile) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_ReadFile) + if !ok { + that2, ok := that.(Call_ReadFile) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_ReadFile") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_ReadFile but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_ReadFile but is not nil && this == nil") + } + if this.Path != that1.Path { + return fmt.Errorf("Path this(%v) Not Equal that(%v)", this.Path, that1.Path) + } + if this.Offset != that1.Offset { + return fmt.Errorf("Offset this(%v) Not Equal that(%v)", this.Offset, that1.Offset) + } + if this.Length != nil && that1.Length != nil { + if *this.Length != *that1.Length { + return fmt.Errorf("Length this(%v) Not Equal that(%v)", *this.Length, *that1.Length) + } + } else if this.Length != nil { + return fmt.Errorf("this.Length == nil && that.Length != nil") + } else if that1.Length != nil { + return fmt.Errorf("Length this(%v) Not Equal that(%v)", this.Length, that1.Length) + } + return nil +} +func (this *Call_ReadFile) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_ReadFile) + if !ok { + that2, ok := that.(Call_ReadFile) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Path != that1.Path { + return false + } + if this.Offset != that1.Offset { + return false + } + if this.Length != nil && that1.Length != nil { + if *this.Length != *that1.Length { + return false + } + } else if this.Length != nil { + return false + } else if that1.Length != nil { + return false + } + return true +} +func (this *Call_GetContainers) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_GetContainers) + if !ok { + that2, ok := that.(Call_GetContainers) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_GetContainers") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_GetContainers but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_GetContainers but is not nil && this == nil") + } + if this.ShowNested != nil && that1.ShowNested != nil { + if *this.ShowNested != *that1.ShowNested { + return fmt.Errorf("ShowNested this(%v) Not Equal that(%v)", *this.ShowNested, *that1.ShowNested) + } + } else if this.ShowNested != nil { + return fmt.Errorf("this.ShowNested == nil && that.ShowNested != nil") + } else if that1.ShowNested != nil { + return fmt.Errorf("ShowNested this(%v) Not Equal that(%v)", this.ShowNested, that1.ShowNested) + } + if this.ShowStandalone != nil && that1.ShowStandalone != nil { + if *this.ShowStandalone != *that1.ShowStandalone { + return fmt.Errorf("ShowStandalone this(%v) Not Equal that(%v)", *this.ShowStandalone, *that1.ShowStandalone) + } + } else if this.ShowStandalone != nil { + return fmt.Errorf("this.ShowStandalone == nil && that.ShowStandalone != nil") + } else if that1.ShowStandalone != nil { + return fmt.Errorf("ShowStandalone this(%v) Not Equal that(%v)", this.ShowStandalone, that1.ShowStandalone) + } + return nil +} +func (this *Call_GetContainers) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_GetContainers) + if !ok { + that2, ok := that.(Call_GetContainers) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.ShowNested != nil && that1.ShowNested != nil { + if *this.ShowNested != *that1.ShowNested { + return false + } + } else if this.ShowNested != nil { + return false + } else if that1.ShowNested != nil { + return false + } + if this.ShowStandalone != nil && that1.ShowStandalone != nil { + if *this.ShowStandalone != *that1.ShowStandalone { + return false + } + } else if this.ShowStandalone != nil { + return false + } else if that1.ShowStandalone != nil { + return false + } + return true +} +func (this *Call_LaunchNestedContainer) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_LaunchNestedContainer) + if !ok { + that2, ok := that.(Call_LaunchNestedContainer) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_LaunchNestedContainer") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_LaunchNestedContainer but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_LaunchNestedContainer but is not nil && this == nil") + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return fmt.Errorf("ContainerID this(%v) Not Equal that(%v)", this.ContainerID, that1.ContainerID) + } + if !this.Command.Equal(that1.Command) { + return fmt.Errorf("Command this(%v) Not Equal that(%v)", this.Command, that1.Command) + } + if !this.Container.Equal(that1.Container) { + return fmt.Errorf("Container this(%v) Not Equal that(%v)", this.Container, that1.Container) + } + return nil +} +func (this *Call_LaunchNestedContainer) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_LaunchNestedContainer) + if !ok { + that2, ok := that.(Call_LaunchNestedContainer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return false + } + if !this.Command.Equal(that1.Command) { + return false + } + if !this.Container.Equal(that1.Container) { + return false + } + return true +} +func (this *Call_WaitNestedContainer) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_WaitNestedContainer) + if !ok { + that2, ok := that.(Call_WaitNestedContainer) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_WaitNestedContainer") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_WaitNestedContainer but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_WaitNestedContainer but is not nil && this == nil") + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return fmt.Errorf("ContainerID this(%v) Not Equal that(%v)", this.ContainerID, that1.ContainerID) + } + return nil +} +func (this *Call_WaitNestedContainer) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_WaitNestedContainer) + if !ok { + that2, ok := that.(Call_WaitNestedContainer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return false + } + return true +} +func (this *Call_KillNestedContainer) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_KillNestedContainer) + if !ok { + that2, ok := that.(Call_KillNestedContainer) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_KillNestedContainer") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_KillNestedContainer but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_KillNestedContainer but is not nil && this == nil") + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return fmt.Errorf("ContainerID this(%v) Not Equal that(%v)", this.ContainerID, that1.ContainerID) + } + if this.Signal != nil && that1.Signal != nil { + if *this.Signal != *that1.Signal { + return fmt.Errorf("Signal this(%v) Not Equal that(%v)", *this.Signal, *that1.Signal) + } + } else if this.Signal != nil { + return fmt.Errorf("this.Signal == nil && that.Signal != nil") + } else if that1.Signal != nil { + return fmt.Errorf("Signal this(%v) Not Equal that(%v)", this.Signal, that1.Signal) + } + return nil +} +func (this *Call_KillNestedContainer) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_KillNestedContainer) + if !ok { + that2, ok := that.(Call_KillNestedContainer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return false + } + if this.Signal != nil && that1.Signal != nil { + if *this.Signal != *that1.Signal { + return false + } + } else if this.Signal != nil { + return false + } else if that1.Signal != nil { + return false + } + return true +} +func (this *Call_RemoveNestedContainer) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_RemoveNestedContainer) + if !ok { + that2, ok := that.(Call_RemoveNestedContainer) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_RemoveNestedContainer") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_RemoveNestedContainer but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_RemoveNestedContainer but is not nil && this == nil") + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return fmt.Errorf("ContainerID this(%v) Not Equal that(%v)", this.ContainerID, that1.ContainerID) + } + return nil +} +func (this *Call_RemoveNestedContainer) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_RemoveNestedContainer) + if !ok { + that2, ok := that.(Call_RemoveNestedContainer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return false + } + return true +} +func (this *Call_LaunchNestedContainerSession) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_LaunchNestedContainerSession) + if !ok { + that2, ok := that.(Call_LaunchNestedContainerSession) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_LaunchNestedContainerSession") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_LaunchNestedContainerSession but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_LaunchNestedContainerSession but is not nil && this == nil") + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return fmt.Errorf("ContainerID this(%v) Not Equal that(%v)", this.ContainerID, that1.ContainerID) + } + if !this.Command.Equal(that1.Command) { + return fmt.Errorf("Command this(%v) Not Equal that(%v)", this.Command, that1.Command) + } + if !this.Container.Equal(that1.Container) { + return fmt.Errorf("Container this(%v) Not Equal that(%v)", this.Container, that1.Container) + } + return nil +} +func (this *Call_LaunchNestedContainerSession) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_LaunchNestedContainerSession) + if !ok { + that2, ok := that.(Call_LaunchNestedContainerSession) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return false + } + if !this.Command.Equal(that1.Command) { + return false + } + if !this.Container.Equal(that1.Container) { + return false + } + return true +} +func (this *Call_AttachContainerInput) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_AttachContainerInput) + if !ok { + that2, ok := that.(Call_AttachContainerInput) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_AttachContainerInput") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_AttachContainerInput but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_AttachContainerInput but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.ContainerID.Equal(that1.ContainerID) { + return fmt.Errorf("ContainerID this(%v) Not Equal that(%v)", this.ContainerID, that1.ContainerID) + } + if !this.ProcessIO.Equal(that1.ProcessIO) { + return fmt.Errorf("ProcessIO this(%v) Not Equal that(%v)", this.ProcessIO, that1.ProcessIO) + } + return nil +} +func (this *Call_AttachContainerInput) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_AttachContainerInput) + if !ok { + that2, ok := that.(Call_AttachContainerInput) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.ContainerID.Equal(that1.ContainerID) { + return false + } + if !this.ProcessIO.Equal(that1.ProcessIO) { + return false + } + return true +} +func (this *Call_AttachContainerOutput) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_AttachContainerOutput) + if !ok { + that2, ok := that.(Call_AttachContainerOutput) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_AttachContainerOutput") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_AttachContainerOutput but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_AttachContainerOutput but is not nil && this == nil") + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return fmt.Errorf("ContainerID this(%v) Not Equal that(%v)", this.ContainerID, that1.ContainerID) + } + return nil +} +func (this *Call_AttachContainerOutput) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_AttachContainerOutput) + if !ok { + that2, ok := that.(Call_AttachContainerOutput) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return false + } + return true +} +func (this *Call_LaunchContainer) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_LaunchContainer) + if !ok { + that2, ok := that.(Call_LaunchContainer) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_LaunchContainer") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_LaunchContainer but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_LaunchContainer but is not nil && this == nil") + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return fmt.Errorf("ContainerID this(%v) Not Equal that(%v)", this.ContainerID, that1.ContainerID) + } + if !this.Command.Equal(that1.Command) { + return fmt.Errorf("Command this(%v) Not Equal that(%v)", this.Command, that1.Command) + } + if len(this.Resources) != len(that1.Resources) { + return fmt.Errorf("Resources this(%v) Not Equal that(%v)", len(this.Resources), len(that1.Resources)) + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return fmt.Errorf("Resources this[%v](%v) Not Equal that[%v](%v)", i, this.Resources[i], i, that1.Resources[i]) + } + } + if !this.Container.Equal(that1.Container) { + return fmt.Errorf("Container this(%v) Not Equal that(%v)", this.Container, that1.Container) + } + return nil +} +func (this *Call_LaunchContainer) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_LaunchContainer) + if !ok { + that2, ok := that.(Call_LaunchContainer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return false + } + if !this.Command.Equal(that1.Command) { + return false + } + if len(this.Resources) != len(that1.Resources) { + return false + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return false + } + } + if !this.Container.Equal(that1.Container) { + return false + } + return true +} +func (this *Call_WaitContainer) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_WaitContainer) + if !ok { + that2, ok := that.(Call_WaitContainer) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_WaitContainer") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_WaitContainer but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_WaitContainer but is not nil && this == nil") + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return fmt.Errorf("ContainerID this(%v) Not Equal that(%v)", this.ContainerID, that1.ContainerID) + } + return nil +} +func (this *Call_WaitContainer) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_WaitContainer) + if !ok { + that2, ok := that.(Call_WaitContainer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return false + } + return true +} +func (this *Call_KillContainer) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_KillContainer) + if !ok { + that2, ok := that.(Call_KillContainer) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_KillContainer") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_KillContainer but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_KillContainer but is not nil && this == nil") + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return fmt.Errorf("ContainerID this(%v) Not Equal that(%v)", this.ContainerID, that1.ContainerID) + } + if this.Signal != nil && that1.Signal != nil { + if *this.Signal != *that1.Signal { + return fmt.Errorf("Signal this(%v) Not Equal that(%v)", *this.Signal, *that1.Signal) + } + } else if this.Signal != nil { + return fmt.Errorf("this.Signal == nil && that.Signal != nil") + } else if that1.Signal != nil { + return fmt.Errorf("Signal this(%v) Not Equal that(%v)", this.Signal, that1.Signal) + } + return nil +} +func (this *Call_KillContainer) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_KillContainer) + if !ok { + that2, ok := that.(Call_KillContainer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return false + } + if this.Signal != nil && that1.Signal != nil { + if *this.Signal != *that1.Signal { + return false + } + } else if this.Signal != nil { + return false + } else if that1.Signal != nil { + return false + } + return true +} +func (this *Call_RemoveContainer) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_RemoveContainer) + if !ok { + that2, ok := that.(Call_RemoveContainer) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_RemoveContainer") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_RemoveContainer but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_RemoveContainer but is not nil && this == nil") + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return fmt.Errorf("ContainerID this(%v) Not Equal that(%v)", this.ContainerID, that1.ContainerID) + } + return nil +} +func (this *Call_RemoveContainer) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_RemoveContainer) + if !ok { + that2, ok := that.(Call_RemoveContainer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return false + } + return true +} +func (this *Call_AddResourceProviderConfig) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_AddResourceProviderConfig) + if !ok { + that2, ok := that.(Call_AddResourceProviderConfig) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_AddResourceProviderConfig") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_AddResourceProviderConfig but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_AddResourceProviderConfig but is not nil && this == nil") + } + if !this.Info.Equal(&that1.Info) { + return fmt.Errorf("Info this(%v) Not Equal that(%v)", this.Info, that1.Info) + } + return nil +} +func (this *Call_AddResourceProviderConfig) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_AddResourceProviderConfig) + if !ok { + that2, ok := that.(Call_AddResourceProviderConfig) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Info.Equal(&that1.Info) { + return false + } + return true +} +func (this *Call_UpdateResourceProviderConfig) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_UpdateResourceProviderConfig) + if !ok { + that2, ok := that.(Call_UpdateResourceProviderConfig) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_UpdateResourceProviderConfig") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_UpdateResourceProviderConfig but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_UpdateResourceProviderConfig but is not nil && this == nil") + } + if !this.Info.Equal(&that1.Info) { + return fmt.Errorf("Info this(%v) Not Equal that(%v)", this.Info, that1.Info) + } + return nil +} +func (this *Call_UpdateResourceProviderConfig) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_UpdateResourceProviderConfig) + if !ok { + that2, ok := that.(Call_UpdateResourceProviderConfig) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Info.Equal(&that1.Info) { + return false + } + return true +} +func (this *Call_RemoveResourceProviderConfig) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_RemoveResourceProviderConfig) + if !ok { + that2, ok := that.(Call_RemoveResourceProviderConfig) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_RemoveResourceProviderConfig") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_RemoveResourceProviderConfig but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_RemoveResourceProviderConfig but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + return nil +} +func (this *Call_RemoveResourceProviderConfig) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_RemoveResourceProviderConfig) + if !ok { + that2, ok := that.(Call_RemoveResourceProviderConfig) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if this.Name != that1.Name { + return false + } + return true +} +func (this *Call_PruneImages) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Call_PruneImages) + if !ok { + that2, ok := that.(Call_PruneImages) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Call_PruneImages") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Call_PruneImages but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Call_PruneImages but is not nil && this == nil") + } + if len(this.ExcludedImages) != len(that1.ExcludedImages) { + return fmt.Errorf("ExcludedImages this(%v) Not Equal that(%v)", len(this.ExcludedImages), len(that1.ExcludedImages)) + } + for i := range this.ExcludedImages { + if !this.ExcludedImages[i].Equal(&that1.ExcludedImages[i]) { + return fmt.Errorf("ExcludedImages this[%v](%v) Not Equal that[%v](%v)", i, this.ExcludedImages[i], i, that1.ExcludedImages[i]) + } + } + return nil +} +func (this *Call_PruneImages) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Call_PruneImages) + if !ok { + that2, ok := that.(Call_PruneImages) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.ExcludedImages) != len(that1.ExcludedImages) { + return false + } + for i := range this.ExcludedImages { + if !this.ExcludedImages[i].Equal(&that1.ExcludedImages[i]) { + return false + } + } + return true +} +func (this *Response) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response) + if !ok { + that2, ok := that.(Response) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.GetHealth.Equal(that1.GetHealth) { + return fmt.Errorf("GetHealth this(%v) Not Equal that(%v)", this.GetHealth, that1.GetHealth) + } + if !this.GetFlags.Equal(that1.GetFlags) { + return fmt.Errorf("GetFlags this(%v) Not Equal that(%v)", this.GetFlags, that1.GetFlags) + } + if !this.GetVersion.Equal(that1.GetVersion) { + return fmt.Errorf("GetVersion this(%v) Not Equal that(%v)", this.GetVersion, that1.GetVersion) + } + if !this.GetMetrics.Equal(that1.GetMetrics) { + return fmt.Errorf("GetMetrics this(%v) Not Equal that(%v)", this.GetMetrics, that1.GetMetrics) + } + if !this.GetLoggingLevel.Equal(that1.GetLoggingLevel) { + return fmt.Errorf("GetLoggingLevel this(%v) Not Equal that(%v)", this.GetLoggingLevel, that1.GetLoggingLevel) + } + if !this.ListFiles.Equal(that1.ListFiles) { + return fmt.Errorf("ListFiles this(%v) Not Equal that(%v)", this.ListFiles, that1.ListFiles) + } + if !this.ReadFile.Equal(that1.ReadFile) { + return fmt.Errorf("ReadFile this(%v) Not Equal that(%v)", this.ReadFile, that1.ReadFile) + } + if !this.GetState.Equal(that1.GetState) { + return fmt.Errorf("GetState this(%v) Not Equal that(%v)", this.GetState, that1.GetState) + } + if !this.GetContainers.Equal(that1.GetContainers) { + return fmt.Errorf("GetContainers this(%v) Not Equal that(%v)", this.GetContainers, that1.GetContainers) + } + if !this.GetFrameworks.Equal(that1.GetFrameworks) { + return fmt.Errorf("GetFrameworks this(%v) Not Equal that(%v)", this.GetFrameworks, that1.GetFrameworks) + } + if !this.GetExecutors.Equal(that1.GetExecutors) { + return fmt.Errorf("GetExecutors this(%v) Not Equal that(%v)", this.GetExecutors, that1.GetExecutors) + } + if !this.GetOperations.Equal(that1.GetOperations) { + return fmt.Errorf("GetOperations this(%v) Not Equal that(%v)", this.GetOperations, that1.GetOperations) + } + if !this.GetTasks.Equal(that1.GetTasks) { + return fmt.Errorf("GetTasks this(%v) Not Equal that(%v)", this.GetTasks, that1.GetTasks) + } + if !this.GetAgent.Equal(that1.GetAgent) { + return fmt.Errorf("GetAgent this(%v) Not Equal that(%v)", this.GetAgent, that1.GetAgent) + } + if !this.GetResourceProviders.Equal(that1.GetResourceProviders) { + return fmt.Errorf("GetResourceProviders this(%v) Not Equal that(%v)", this.GetResourceProviders, that1.GetResourceProviders) + } + if !this.WaitNestedContainer.Equal(that1.WaitNestedContainer) { + return fmt.Errorf("WaitNestedContainer this(%v) Not Equal that(%v)", this.WaitNestedContainer, that1.WaitNestedContainer) + } + if !this.WaitContainer.Equal(that1.WaitContainer) { + return fmt.Errorf("WaitContainer this(%v) Not Equal that(%v)", this.WaitContainer, that1.WaitContainer) + } + return nil +} +func (this *Response) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response) + if !ok { + that2, ok := that.(Response) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.GetHealth.Equal(that1.GetHealth) { + return false + } + if !this.GetFlags.Equal(that1.GetFlags) { + return false + } + if !this.GetVersion.Equal(that1.GetVersion) { + return false + } + if !this.GetMetrics.Equal(that1.GetMetrics) { + return false + } + if !this.GetLoggingLevel.Equal(that1.GetLoggingLevel) { + return false + } + if !this.ListFiles.Equal(that1.ListFiles) { + return false + } + if !this.ReadFile.Equal(that1.ReadFile) { + return false + } + if !this.GetState.Equal(that1.GetState) { + return false + } + if !this.GetContainers.Equal(that1.GetContainers) { + return false + } + if !this.GetFrameworks.Equal(that1.GetFrameworks) { + return false + } + if !this.GetExecutors.Equal(that1.GetExecutors) { + return false + } + if !this.GetOperations.Equal(that1.GetOperations) { + return false + } + if !this.GetTasks.Equal(that1.GetTasks) { + return false + } + if !this.GetAgent.Equal(that1.GetAgent) { + return false + } + if !this.GetResourceProviders.Equal(that1.GetResourceProviders) { + return false + } + if !this.WaitNestedContainer.Equal(that1.WaitNestedContainer) { + return false + } + if !this.WaitContainer.Equal(that1.WaitContainer) { + return false + } + return true +} +func (this *Response_GetHealth) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetHealth) + if !ok { + that2, ok := that.(Response_GetHealth) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetHealth") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetHealth but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetHealth but is not nil && this == nil") + } + if this.Healthy != that1.Healthy { + return fmt.Errorf("Healthy this(%v) Not Equal that(%v)", this.Healthy, that1.Healthy) + } + return nil +} +func (this *Response_GetHealth) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetHealth) + if !ok { + that2, ok := that.(Response_GetHealth) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Healthy != that1.Healthy { + return false + } + return true +} +func (this *Response_GetFlags) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetFlags) + if !ok { + that2, ok := that.(Response_GetFlags) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetFlags") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetFlags but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetFlags but is not nil && this == nil") + } + if len(this.Flags) != len(that1.Flags) { + return fmt.Errorf("Flags this(%v) Not Equal that(%v)", len(this.Flags), len(that1.Flags)) + } + for i := range this.Flags { + if !this.Flags[i].Equal(&that1.Flags[i]) { + return fmt.Errorf("Flags this[%v](%v) Not Equal that[%v](%v)", i, this.Flags[i], i, that1.Flags[i]) + } + } + return nil +} +func (this *Response_GetFlags) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetFlags) + if !ok { + that2, ok := that.(Response_GetFlags) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Flags) != len(that1.Flags) { + return false + } + for i := range this.Flags { + if !this.Flags[i].Equal(&that1.Flags[i]) { + return false + } + } + return true +} +func (this *Response_GetVersion) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetVersion) + if !ok { + that2, ok := that.(Response_GetVersion) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetVersion") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetVersion but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetVersion but is not nil && this == nil") + } + if !this.VersionInfo.Equal(&that1.VersionInfo) { + return fmt.Errorf("VersionInfo this(%v) Not Equal that(%v)", this.VersionInfo, that1.VersionInfo) + } + return nil +} +func (this *Response_GetVersion) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetVersion) + if !ok { + that2, ok := that.(Response_GetVersion) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.VersionInfo.Equal(&that1.VersionInfo) { + return false + } + return true +} +func (this *Response_GetMetrics) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetMetrics) + if !ok { + that2, ok := that.(Response_GetMetrics) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetMetrics") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetMetrics but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetMetrics but is not nil && this == nil") + } + if len(this.Metrics) != len(that1.Metrics) { + return fmt.Errorf("Metrics this(%v) Not Equal that(%v)", len(this.Metrics), len(that1.Metrics)) + } + for i := range this.Metrics { + if !this.Metrics[i].Equal(&that1.Metrics[i]) { + return fmt.Errorf("Metrics this[%v](%v) Not Equal that[%v](%v)", i, this.Metrics[i], i, that1.Metrics[i]) + } + } + return nil +} +func (this *Response_GetMetrics) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetMetrics) + if !ok { + that2, ok := that.(Response_GetMetrics) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Metrics) != len(that1.Metrics) { + return false + } + for i := range this.Metrics { + if !this.Metrics[i].Equal(&that1.Metrics[i]) { + return false + } + } + return true +} +func (this *Response_GetLoggingLevel) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetLoggingLevel) + if !ok { + that2, ok := that.(Response_GetLoggingLevel) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetLoggingLevel") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetLoggingLevel but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetLoggingLevel but is not nil && this == nil") + } + if this.Level != that1.Level { + return fmt.Errorf("Level this(%v) Not Equal that(%v)", this.Level, that1.Level) + } + return nil +} +func (this *Response_GetLoggingLevel) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetLoggingLevel) + if !ok { + that2, ok := that.(Response_GetLoggingLevel) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Level != that1.Level { + return false + } + return true +} +func (this *Response_ListFiles) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_ListFiles) + if !ok { + that2, ok := that.(Response_ListFiles) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_ListFiles") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_ListFiles but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_ListFiles but is not nil && this == nil") + } + if len(this.FileInfos) != len(that1.FileInfos) { + return fmt.Errorf("FileInfos this(%v) Not Equal that(%v)", len(this.FileInfos), len(that1.FileInfos)) + } + for i := range this.FileInfos { + if !this.FileInfos[i].Equal(&that1.FileInfos[i]) { + return fmt.Errorf("FileInfos this[%v](%v) Not Equal that[%v](%v)", i, this.FileInfos[i], i, that1.FileInfos[i]) + } + } + return nil +} +func (this *Response_ListFiles) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_ListFiles) + if !ok { + that2, ok := that.(Response_ListFiles) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.FileInfos) != len(that1.FileInfos) { + return false + } + for i := range this.FileInfos { + if !this.FileInfos[i].Equal(&that1.FileInfos[i]) { + return false + } + } + return true +} +func (this *Response_ReadFile) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_ReadFile) + if !ok { + that2, ok := that.(Response_ReadFile) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_ReadFile") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_ReadFile but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_ReadFile but is not nil && this == nil") + } + if this.Size != that1.Size { + return fmt.Errorf("Size this(%v) Not Equal that(%v)", this.Size, that1.Size) + } + if !bytes.Equal(this.Data, that1.Data) { + return fmt.Errorf("Data this(%v) Not Equal that(%v)", this.Data, that1.Data) + } + return nil +} +func (this *Response_ReadFile) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_ReadFile) + if !ok { + that2, ok := that.(Response_ReadFile) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Size != that1.Size { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *Response_GetState) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetState) + if !ok { + that2, ok := that.(Response_GetState) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetState") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetState but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetState but is not nil && this == nil") + } + if !this.GetTasks.Equal(that1.GetTasks) { + return fmt.Errorf("GetTasks this(%v) Not Equal that(%v)", this.GetTasks, that1.GetTasks) + } + if !this.GetExecutors.Equal(that1.GetExecutors) { + return fmt.Errorf("GetExecutors this(%v) Not Equal that(%v)", this.GetExecutors, that1.GetExecutors) + } + if !this.GetFrameworks.Equal(that1.GetFrameworks) { + return fmt.Errorf("GetFrameworks this(%v) Not Equal that(%v)", this.GetFrameworks, that1.GetFrameworks) + } + return nil +} +func (this *Response_GetState) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetState) + if !ok { + that2, ok := that.(Response_GetState) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.GetTasks.Equal(that1.GetTasks) { + return false + } + if !this.GetExecutors.Equal(that1.GetExecutors) { + return false + } + if !this.GetFrameworks.Equal(that1.GetFrameworks) { + return false + } + return true +} +func (this *Response_GetContainers) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetContainers) + if !ok { + that2, ok := that.(Response_GetContainers) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetContainers") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetContainers but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetContainers but is not nil && this == nil") + } + if len(this.Containers) != len(that1.Containers) { + return fmt.Errorf("Containers this(%v) Not Equal that(%v)", len(this.Containers), len(that1.Containers)) + } + for i := range this.Containers { + if !this.Containers[i].Equal(&that1.Containers[i]) { + return fmt.Errorf("Containers this[%v](%v) Not Equal that[%v](%v)", i, this.Containers[i], i, that1.Containers[i]) + } + } + return nil +} +func (this *Response_GetContainers) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetContainers) + if !ok { + that2, ok := that.(Response_GetContainers) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Containers) != len(that1.Containers) { + return false + } + for i := range this.Containers { + if !this.Containers[i].Equal(&that1.Containers[i]) { + return false + } + } + return true +} +func (this *Response_GetContainers_Container) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetContainers_Container) + if !ok { + that2, ok := that.(Response_GetContainers_Container) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetContainers_Container") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetContainers_Container but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetContainers_Container but is not nil && this == nil") + } + if !this.FrameworkID.Equal(that1.FrameworkID) { + return fmt.Errorf("FrameworkID this(%v) Not Equal that(%v)", this.FrameworkID, that1.FrameworkID) + } + if !this.ExecutorID.Equal(that1.ExecutorID) { + return fmt.Errorf("ExecutorID this(%v) Not Equal that(%v)", this.ExecutorID, that1.ExecutorID) + } + if this.ExecutorName != nil && that1.ExecutorName != nil { + if *this.ExecutorName != *that1.ExecutorName { + return fmt.Errorf("ExecutorName this(%v) Not Equal that(%v)", *this.ExecutorName, *that1.ExecutorName) + } + } else if this.ExecutorName != nil { + return fmt.Errorf("this.ExecutorName == nil && that.ExecutorName != nil") + } else if that1.ExecutorName != nil { + return fmt.Errorf("ExecutorName this(%v) Not Equal that(%v)", this.ExecutorName, that1.ExecutorName) + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return fmt.Errorf("ContainerID this(%v) Not Equal that(%v)", this.ContainerID, that1.ContainerID) + } + if !this.ContainerStatus.Equal(that1.ContainerStatus) { + return fmt.Errorf("ContainerStatus this(%v) Not Equal that(%v)", this.ContainerStatus, that1.ContainerStatus) + } + if !this.ResourceStatistics.Equal(that1.ResourceStatistics) { + return fmt.Errorf("ResourceStatistics this(%v) Not Equal that(%v)", this.ResourceStatistics, that1.ResourceStatistics) + } + return nil +} +func (this *Response_GetContainers_Container) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetContainers_Container) + if !ok { + that2, ok := that.(Response_GetContainers_Container) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.FrameworkID.Equal(that1.FrameworkID) { + return false + } + if !this.ExecutorID.Equal(that1.ExecutorID) { + return false + } + if this.ExecutorName != nil && that1.ExecutorName != nil { + if *this.ExecutorName != *that1.ExecutorName { + return false + } + } else if this.ExecutorName != nil { + return false + } else if that1.ExecutorName != nil { + return false + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return false + } + if !this.ContainerStatus.Equal(that1.ContainerStatus) { + return false + } + if !this.ResourceStatistics.Equal(that1.ResourceStatistics) { + return false + } + return true +} +func (this *Response_GetFrameworks) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetFrameworks) + if !ok { + that2, ok := that.(Response_GetFrameworks) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetFrameworks") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetFrameworks but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetFrameworks but is not nil && this == nil") + } + if len(this.Frameworks) != len(that1.Frameworks) { + return fmt.Errorf("Frameworks this(%v) Not Equal that(%v)", len(this.Frameworks), len(that1.Frameworks)) + } + for i := range this.Frameworks { + if !this.Frameworks[i].Equal(&that1.Frameworks[i]) { + return fmt.Errorf("Frameworks this[%v](%v) Not Equal that[%v](%v)", i, this.Frameworks[i], i, that1.Frameworks[i]) + } + } + if len(this.CompletedFrameworks) != len(that1.CompletedFrameworks) { + return fmt.Errorf("CompletedFrameworks this(%v) Not Equal that(%v)", len(this.CompletedFrameworks), len(that1.CompletedFrameworks)) + } + for i := range this.CompletedFrameworks { + if !this.CompletedFrameworks[i].Equal(&that1.CompletedFrameworks[i]) { + return fmt.Errorf("CompletedFrameworks this[%v](%v) Not Equal that[%v](%v)", i, this.CompletedFrameworks[i], i, that1.CompletedFrameworks[i]) + } + } + return nil +} +func (this *Response_GetFrameworks) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetFrameworks) + if !ok { + that2, ok := that.(Response_GetFrameworks) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Frameworks) != len(that1.Frameworks) { + return false + } + for i := range this.Frameworks { + if !this.Frameworks[i].Equal(&that1.Frameworks[i]) { + return false + } + } + if len(this.CompletedFrameworks) != len(that1.CompletedFrameworks) { + return false + } + for i := range this.CompletedFrameworks { + if !this.CompletedFrameworks[i].Equal(&that1.CompletedFrameworks[i]) { + return false + } + } + return true +} +func (this *Response_GetFrameworks_Framework) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetFrameworks_Framework) + if !ok { + that2, ok := that.(Response_GetFrameworks_Framework) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetFrameworks_Framework") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetFrameworks_Framework but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetFrameworks_Framework but is not nil && this == nil") + } + if !this.FrameworkInfo.Equal(&that1.FrameworkInfo) { + return fmt.Errorf("FrameworkInfo this(%v) Not Equal that(%v)", this.FrameworkInfo, that1.FrameworkInfo) + } + return nil +} +func (this *Response_GetFrameworks_Framework) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetFrameworks_Framework) + if !ok { + that2, ok := that.(Response_GetFrameworks_Framework) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.FrameworkInfo.Equal(&that1.FrameworkInfo) { + return false + } + return true +} +func (this *Response_GetExecutors) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetExecutors) + if !ok { + that2, ok := that.(Response_GetExecutors) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetExecutors") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetExecutors but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetExecutors but is not nil && this == nil") + } + if len(this.Executors) != len(that1.Executors) { + return fmt.Errorf("Executors this(%v) Not Equal that(%v)", len(this.Executors), len(that1.Executors)) + } + for i := range this.Executors { + if !this.Executors[i].Equal(&that1.Executors[i]) { + return fmt.Errorf("Executors this[%v](%v) Not Equal that[%v](%v)", i, this.Executors[i], i, that1.Executors[i]) + } + } + if len(this.CompletedExecutors) != len(that1.CompletedExecutors) { + return fmt.Errorf("CompletedExecutors this(%v) Not Equal that(%v)", len(this.CompletedExecutors), len(that1.CompletedExecutors)) + } + for i := range this.CompletedExecutors { + if !this.CompletedExecutors[i].Equal(&that1.CompletedExecutors[i]) { + return fmt.Errorf("CompletedExecutors this[%v](%v) Not Equal that[%v](%v)", i, this.CompletedExecutors[i], i, that1.CompletedExecutors[i]) + } + } + return nil +} +func (this *Response_GetExecutors) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetExecutors) + if !ok { + that2, ok := that.(Response_GetExecutors) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Executors) != len(that1.Executors) { + return false + } + for i := range this.Executors { + if !this.Executors[i].Equal(&that1.Executors[i]) { + return false + } + } + if len(this.CompletedExecutors) != len(that1.CompletedExecutors) { + return false + } + for i := range this.CompletedExecutors { + if !this.CompletedExecutors[i].Equal(&that1.CompletedExecutors[i]) { + return false + } + } + return true +} +func (this *Response_GetExecutors_Executor) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetExecutors_Executor) + if !ok { + that2, ok := that.(Response_GetExecutors_Executor) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetExecutors_Executor") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetExecutors_Executor but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetExecutors_Executor but is not nil && this == nil") + } + if !this.ExecutorInfo.Equal(&that1.ExecutorInfo) { + return fmt.Errorf("ExecutorInfo this(%v) Not Equal that(%v)", this.ExecutorInfo, that1.ExecutorInfo) + } + return nil +} +func (this *Response_GetExecutors_Executor) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetExecutors_Executor) + if !ok { + that2, ok := that.(Response_GetExecutors_Executor) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ExecutorInfo.Equal(&that1.ExecutorInfo) { + return false + } + return true +} +func (this *Response_GetOperations) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetOperations) + if !ok { + that2, ok := that.(Response_GetOperations) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetOperations") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetOperations but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetOperations but is not nil && this == nil") + } + if len(this.Operations) != len(that1.Operations) { + return fmt.Errorf("Operations this(%v) Not Equal that(%v)", len(this.Operations), len(that1.Operations)) + } + for i := range this.Operations { + if !this.Operations[i].Equal(&that1.Operations[i]) { + return fmt.Errorf("Operations this[%v](%v) Not Equal that[%v](%v)", i, this.Operations[i], i, that1.Operations[i]) + } + } + return nil +} +func (this *Response_GetOperations) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetOperations) + if !ok { + that2, ok := that.(Response_GetOperations) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Operations) != len(that1.Operations) { + return false + } + for i := range this.Operations { + if !this.Operations[i].Equal(&that1.Operations[i]) { + return false + } + } + return true +} +func (this *Response_GetTasks) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetTasks) + if !ok { + that2, ok := that.(Response_GetTasks) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetTasks") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetTasks but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetTasks but is not nil && this == nil") + } + if len(this.PendingTasks) != len(that1.PendingTasks) { + return fmt.Errorf("PendingTasks this(%v) Not Equal that(%v)", len(this.PendingTasks), len(that1.PendingTasks)) + } + for i := range this.PendingTasks { + if !this.PendingTasks[i].Equal(&that1.PendingTasks[i]) { + return fmt.Errorf("PendingTasks this[%v](%v) Not Equal that[%v](%v)", i, this.PendingTasks[i], i, that1.PendingTasks[i]) + } + } + if len(this.QueuedTasks) != len(that1.QueuedTasks) { + return fmt.Errorf("QueuedTasks this(%v) Not Equal that(%v)", len(this.QueuedTasks), len(that1.QueuedTasks)) + } + for i := range this.QueuedTasks { + if !this.QueuedTasks[i].Equal(&that1.QueuedTasks[i]) { + return fmt.Errorf("QueuedTasks this[%v](%v) Not Equal that[%v](%v)", i, this.QueuedTasks[i], i, that1.QueuedTasks[i]) + } + } + if len(this.LaunchedTasks) != len(that1.LaunchedTasks) { + return fmt.Errorf("LaunchedTasks this(%v) Not Equal that(%v)", len(this.LaunchedTasks), len(that1.LaunchedTasks)) + } + for i := range this.LaunchedTasks { + if !this.LaunchedTasks[i].Equal(&that1.LaunchedTasks[i]) { + return fmt.Errorf("LaunchedTasks this[%v](%v) Not Equal that[%v](%v)", i, this.LaunchedTasks[i], i, that1.LaunchedTasks[i]) + } + } + if len(this.TerminatedTasks) != len(that1.TerminatedTasks) { + return fmt.Errorf("TerminatedTasks this(%v) Not Equal that(%v)", len(this.TerminatedTasks), len(that1.TerminatedTasks)) + } + for i := range this.TerminatedTasks { + if !this.TerminatedTasks[i].Equal(&that1.TerminatedTasks[i]) { + return fmt.Errorf("TerminatedTasks this[%v](%v) Not Equal that[%v](%v)", i, this.TerminatedTasks[i], i, that1.TerminatedTasks[i]) + } + } + if len(this.CompletedTasks) != len(that1.CompletedTasks) { + return fmt.Errorf("CompletedTasks this(%v) Not Equal that(%v)", len(this.CompletedTasks), len(that1.CompletedTasks)) + } + for i := range this.CompletedTasks { + if !this.CompletedTasks[i].Equal(&that1.CompletedTasks[i]) { + return fmt.Errorf("CompletedTasks this[%v](%v) Not Equal that[%v](%v)", i, this.CompletedTasks[i], i, that1.CompletedTasks[i]) + } + } + return nil +} +func (this *Response_GetTasks) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetTasks) + if !ok { + that2, ok := that.(Response_GetTasks) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.PendingTasks) != len(that1.PendingTasks) { + return false + } + for i := range this.PendingTasks { + if !this.PendingTasks[i].Equal(&that1.PendingTasks[i]) { + return false + } + } + if len(this.QueuedTasks) != len(that1.QueuedTasks) { + return false + } + for i := range this.QueuedTasks { + if !this.QueuedTasks[i].Equal(&that1.QueuedTasks[i]) { + return false + } + } + if len(this.LaunchedTasks) != len(that1.LaunchedTasks) { + return false + } + for i := range this.LaunchedTasks { + if !this.LaunchedTasks[i].Equal(&that1.LaunchedTasks[i]) { + return false + } + } + if len(this.TerminatedTasks) != len(that1.TerminatedTasks) { + return false + } + for i := range this.TerminatedTasks { + if !this.TerminatedTasks[i].Equal(&that1.TerminatedTasks[i]) { + return false + } + } + if len(this.CompletedTasks) != len(that1.CompletedTasks) { + return false + } + for i := range this.CompletedTasks { + if !this.CompletedTasks[i].Equal(&that1.CompletedTasks[i]) { + return false + } + } + return true +} +func (this *Response_GetAgent) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetAgent) + if !ok { + that2, ok := that.(Response_GetAgent) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetAgent") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetAgent but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetAgent but is not nil && this == nil") + } + if !this.AgentInfo.Equal(that1.AgentInfo) { + return fmt.Errorf("AgentInfo this(%v) Not Equal that(%v)", this.AgentInfo, that1.AgentInfo) + } + return nil +} +func (this *Response_GetAgent) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetAgent) + if !ok { + that2, ok := that.(Response_GetAgent) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.AgentInfo.Equal(that1.AgentInfo) { + return false + } + return true +} +func (this *Response_GetResourceProviders) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetResourceProviders) + if !ok { + that2, ok := that.(Response_GetResourceProviders) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetResourceProviders") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetResourceProviders but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetResourceProviders but is not nil && this == nil") + } + if len(this.ResourceProviders) != len(that1.ResourceProviders) { + return fmt.Errorf("ResourceProviders this(%v) Not Equal that(%v)", len(this.ResourceProviders), len(that1.ResourceProviders)) + } + for i := range this.ResourceProviders { + if !this.ResourceProviders[i].Equal(&that1.ResourceProviders[i]) { + return fmt.Errorf("ResourceProviders this[%v](%v) Not Equal that[%v](%v)", i, this.ResourceProviders[i], i, that1.ResourceProviders[i]) + } + } + return nil +} +func (this *Response_GetResourceProviders) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetResourceProviders) + if !ok { + that2, ok := that.(Response_GetResourceProviders) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.ResourceProviders) != len(that1.ResourceProviders) { + return false + } + for i := range this.ResourceProviders { + if !this.ResourceProviders[i].Equal(&that1.ResourceProviders[i]) { + return false + } + } + return true +} +func (this *Response_GetResourceProviders_ResourceProvider) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_GetResourceProviders_ResourceProvider) + if !ok { + that2, ok := that.(Response_GetResourceProviders_ResourceProvider) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_GetResourceProviders_ResourceProvider") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_GetResourceProviders_ResourceProvider but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_GetResourceProviders_ResourceProvider but is not nil && this == nil") + } + if !this.ResourceProviderInfo.Equal(&that1.ResourceProviderInfo) { + return fmt.Errorf("ResourceProviderInfo this(%v) Not Equal that(%v)", this.ResourceProviderInfo, that1.ResourceProviderInfo) + } + if len(this.TotalResources) != len(that1.TotalResources) { + return fmt.Errorf("TotalResources this(%v) Not Equal that(%v)", len(this.TotalResources), len(that1.TotalResources)) + } + for i := range this.TotalResources { + if !this.TotalResources[i].Equal(&that1.TotalResources[i]) { + return fmt.Errorf("TotalResources this[%v](%v) Not Equal that[%v](%v)", i, this.TotalResources[i], i, that1.TotalResources[i]) + } + } + return nil +} +func (this *Response_GetResourceProviders_ResourceProvider) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_GetResourceProviders_ResourceProvider) + if !ok { + that2, ok := that.(Response_GetResourceProviders_ResourceProvider) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ResourceProviderInfo.Equal(&that1.ResourceProviderInfo) { + return false + } + if len(this.TotalResources) != len(that1.TotalResources) { + return false + } + for i := range this.TotalResources { + if !this.TotalResources[i].Equal(&that1.TotalResources[i]) { + return false + } + } + return true +} +func (this *Response_WaitNestedContainer) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_WaitNestedContainer) + if !ok { + that2, ok := that.(Response_WaitNestedContainer) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_WaitNestedContainer") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_WaitNestedContainer but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_WaitNestedContainer but is not nil && this == nil") + } + if this.ExitStatus != nil && that1.ExitStatus != nil { + if *this.ExitStatus != *that1.ExitStatus { + return fmt.Errorf("ExitStatus this(%v) Not Equal that(%v)", *this.ExitStatus, *that1.ExitStatus) + } + } else if this.ExitStatus != nil { + return fmt.Errorf("this.ExitStatus == nil && that.ExitStatus != nil") + } else if that1.ExitStatus != nil { + return fmt.Errorf("ExitStatus this(%v) Not Equal that(%v)", this.ExitStatus, that1.ExitStatus) + } + if this.State != nil && that1.State != nil { + if *this.State != *that1.State { + return fmt.Errorf("State this(%v) Not Equal that(%v)", *this.State, *that1.State) + } + } else if this.State != nil { + return fmt.Errorf("this.State == nil && that.State != nil") + } else if that1.State != nil { + return fmt.Errorf("State this(%v) Not Equal that(%v)", this.State, that1.State) + } + if this.Reason != nil && that1.Reason != nil { + if *this.Reason != *that1.Reason { + return fmt.Errorf("Reason this(%v) Not Equal that(%v)", *this.Reason, *that1.Reason) + } + } else if this.Reason != nil { + return fmt.Errorf("this.Reason == nil && that.Reason != nil") + } else if that1.Reason != nil { + return fmt.Errorf("Reason this(%v) Not Equal that(%v)", this.Reason, that1.Reason) + } + if !this.Limitation.Equal(that1.Limitation) { + return fmt.Errorf("Limitation this(%v) Not Equal that(%v)", this.Limitation, that1.Limitation) + } + if this.Message != nil && that1.Message != nil { + if *this.Message != *that1.Message { + return fmt.Errorf("Message this(%v) Not Equal that(%v)", *this.Message, *that1.Message) + } + } else if this.Message != nil { + return fmt.Errorf("this.Message == nil && that.Message != nil") + } else if that1.Message != nil { + return fmt.Errorf("Message this(%v) Not Equal that(%v)", this.Message, that1.Message) + } + return nil +} +func (this *Response_WaitNestedContainer) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_WaitNestedContainer) + if !ok { + that2, ok := that.(Response_WaitNestedContainer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.ExitStatus != nil && that1.ExitStatus != nil { + if *this.ExitStatus != *that1.ExitStatus { + return false + } + } else if this.ExitStatus != nil { + return false + } else if that1.ExitStatus != nil { + return false + } + if this.State != nil && that1.State != nil { + if *this.State != *that1.State { + return false + } + } else if this.State != nil { + return false + } else if that1.State != nil { + return false + } + if this.Reason != nil && that1.Reason != nil { + if *this.Reason != *that1.Reason { + return false + } + } else if this.Reason != nil { + return false + } else if that1.Reason != nil { + return false + } + if !this.Limitation.Equal(that1.Limitation) { + return false + } + if this.Message != nil && that1.Message != nil { + if *this.Message != *that1.Message { + return false + } + } else if this.Message != nil { + return false + } else if that1.Message != nil { + return false + } + return true +} +func (this *Response_WaitContainer) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Response_WaitContainer) + if !ok { + that2, ok := that.(Response_WaitContainer) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Response_WaitContainer") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Response_WaitContainer but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Response_WaitContainer but is not nil && this == nil") + } + if this.ExitStatus != nil && that1.ExitStatus != nil { + if *this.ExitStatus != *that1.ExitStatus { + return fmt.Errorf("ExitStatus this(%v) Not Equal that(%v)", *this.ExitStatus, *that1.ExitStatus) + } + } else if this.ExitStatus != nil { + return fmt.Errorf("this.ExitStatus == nil && that.ExitStatus != nil") + } else if that1.ExitStatus != nil { + return fmt.Errorf("ExitStatus this(%v) Not Equal that(%v)", this.ExitStatus, that1.ExitStatus) + } + if this.State != nil && that1.State != nil { + if *this.State != *that1.State { + return fmt.Errorf("State this(%v) Not Equal that(%v)", *this.State, *that1.State) + } + } else if this.State != nil { + return fmt.Errorf("this.State == nil && that.State != nil") + } else if that1.State != nil { + return fmt.Errorf("State this(%v) Not Equal that(%v)", this.State, that1.State) + } + if this.Reason != nil && that1.Reason != nil { + if *this.Reason != *that1.Reason { + return fmt.Errorf("Reason this(%v) Not Equal that(%v)", *this.Reason, *that1.Reason) + } + } else if this.Reason != nil { + return fmt.Errorf("this.Reason == nil && that.Reason != nil") + } else if that1.Reason != nil { + return fmt.Errorf("Reason this(%v) Not Equal that(%v)", this.Reason, that1.Reason) + } + if !this.Limitation.Equal(that1.Limitation) { + return fmt.Errorf("Limitation this(%v) Not Equal that(%v)", this.Limitation, that1.Limitation) + } + if this.Message != nil && that1.Message != nil { + if *this.Message != *that1.Message { + return fmt.Errorf("Message this(%v) Not Equal that(%v)", *this.Message, *that1.Message) + } + } else if this.Message != nil { + return fmt.Errorf("this.Message == nil && that.Message != nil") + } else if that1.Message != nil { + return fmt.Errorf("Message this(%v) Not Equal that(%v)", this.Message, that1.Message) + } + return nil +} +func (this *Response_WaitContainer) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Response_WaitContainer) + if !ok { + that2, ok := that.(Response_WaitContainer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.ExitStatus != nil && that1.ExitStatus != nil { + if *this.ExitStatus != *that1.ExitStatus { + return false + } + } else if this.ExitStatus != nil { + return false + } else if that1.ExitStatus != nil { + return false + } + if this.State != nil && that1.State != nil { + if *this.State != *that1.State { + return false + } + } else if this.State != nil { + return false + } else if that1.State != nil { + return false + } + if this.Reason != nil && that1.Reason != nil { + if *this.Reason != *that1.Reason { + return false + } + } else if this.Reason != nil { + return false + } else if that1.Reason != nil { + return false + } + if !this.Limitation.Equal(that1.Limitation) { + return false + } + if this.Message != nil && that1.Message != nil { + if *this.Message != *that1.Message { + return false + } + } else if this.Message != nil { + return false + } else if that1.Message != nil { + return false + } + return true +} +func (this *ProcessIO) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ProcessIO) + if !ok { + that2, ok := that.(ProcessIO) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ProcessIO") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ProcessIO but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ProcessIO but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.Data.Equal(that1.Data) { + return fmt.Errorf("Data this(%v) Not Equal that(%v)", this.Data, that1.Data) + } + if !this.Control.Equal(that1.Control) { + return fmt.Errorf("Control this(%v) Not Equal that(%v)", this.Control, that1.Control) + } + return nil +} +func (this *ProcessIO) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ProcessIO) + if !ok { + that2, ok := that.(ProcessIO) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.Data.Equal(that1.Data) { + return false + } + if !this.Control.Equal(that1.Control) { + return false + } + return true +} +func (this *ProcessIO_Data) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ProcessIO_Data) + if !ok { + that2, ok := that.(ProcessIO_Data) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ProcessIO_Data") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ProcessIO_Data but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ProcessIO_Data but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !bytes.Equal(this.Data, that1.Data) { + return fmt.Errorf("Data this(%v) Not Equal that(%v)", this.Data, that1.Data) + } + return nil +} +func (this *ProcessIO_Data) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ProcessIO_Data) + if !ok { + that2, ok := that.(ProcessIO_Data) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *ProcessIO_Control) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ProcessIO_Control) + if !ok { + that2, ok := that.(ProcessIO_Control) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ProcessIO_Control") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ProcessIO_Control but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ProcessIO_Control but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.TTYInfo.Equal(that1.TTYInfo) { + return fmt.Errorf("TTYInfo this(%v) Not Equal that(%v)", this.TTYInfo, that1.TTYInfo) + } + if !this.Heartbeat.Equal(that1.Heartbeat) { + return fmt.Errorf("Heartbeat this(%v) Not Equal that(%v)", this.Heartbeat, that1.Heartbeat) + } + return nil +} +func (this *ProcessIO_Control) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ProcessIO_Control) + if !ok { + that2, ok := that.(ProcessIO_Control) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.TTYInfo.Equal(that1.TTYInfo) { + return false + } + if !this.Heartbeat.Equal(that1.Heartbeat) { + return false + } + return true +} +func (this *ProcessIO_Control_Heartbeat) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ProcessIO_Control_Heartbeat) + if !ok { + that2, ok := that.(ProcessIO_Control_Heartbeat) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ProcessIO_Control_Heartbeat") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ProcessIO_Control_Heartbeat but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ProcessIO_Control_Heartbeat but is not nil && this == nil") + } + if !this.Interval.Equal(that1.Interval) { + return fmt.Errorf("Interval this(%v) Not Equal that(%v)", this.Interval, that1.Interval) + } + return nil +} +func (this *ProcessIO_Control_Heartbeat) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ProcessIO_Control_Heartbeat) + if !ok { + that2, ok := that.(ProcessIO_Control_Heartbeat) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Interval.Equal(that1.Interval) { + return false + } + return true +} +func (this *Call) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&agent.Call{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.GetMetrics != nil { + s = append(s, "GetMetrics: "+fmt.Sprintf("%#v", this.GetMetrics)+",\n") + } + if this.SetLoggingLevel != nil { + s = append(s, "SetLoggingLevel: "+fmt.Sprintf("%#v", this.SetLoggingLevel)+",\n") + } + if this.ListFiles != nil { + s = append(s, "ListFiles: "+fmt.Sprintf("%#v", this.ListFiles)+",\n") + } + if this.ReadFile != nil { + s = append(s, "ReadFile: "+fmt.Sprintf("%#v", this.ReadFile)+",\n") + } + if this.GetContainers != nil { + s = append(s, "GetContainers: "+fmt.Sprintf("%#v", this.GetContainers)+",\n") + } + if this.LaunchNestedContainer != nil { + s = append(s, "LaunchNestedContainer: "+fmt.Sprintf("%#v", this.LaunchNestedContainer)+",\n") + } + if this.WaitNestedContainer != nil { + s = append(s, "WaitNestedContainer: "+fmt.Sprintf("%#v", this.WaitNestedContainer)+",\n") + } + if this.KillNestedContainer != nil { + s = append(s, "KillNestedContainer: "+fmt.Sprintf("%#v", this.KillNestedContainer)+",\n") + } + if this.RemoveNestedContainer != nil { + s = append(s, "RemoveNestedContainer: "+fmt.Sprintf("%#v", this.RemoveNestedContainer)+",\n") + } + if this.LaunchNestedContainerSession != nil { + s = append(s, "LaunchNestedContainerSession: "+fmt.Sprintf("%#v", this.LaunchNestedContainerSession)+",\n") + } + if this.AttachContainerInput != nil { + s = append(s, "AttachContainerInput: "+fmt.Sprintf("%#v", this.AttachContainerInput)+",\n") + } + if this.AttachContainerOutput != nil { + s = append(s, "AttachContainerOutput: "+fmt.Sprintf("%#v", this.AttachContainerOutput)+",\n") + } + if this.LaunchContainer != nil { + s = append(s, "LaunchContainer: "+fmt.Sprintf("%#v", this.LaunchContainer)+",\n") + } + if this.WaitContainer != nil { + s = append(s, "WaitContainer: "+fmt.Sprintf("%#v", this.WaitContainer)+",\n") + } + if this.KillContainer != nil { + s = append(s, "KillContainer: "+fmt.Sprintf("%#v", this.KillContainer)+",\n") + } + if this.RemoveContainer != nil { + s = append(s, "RemoveContainer: "+fmt.Sprintf("%#v", this.RemoveContainer)+",\n") + } + if this.AddResourceProviderConfig != nil { + s = append(s, "AddResourceProviderConfig: "+fmt.Sprintf("%#v", this.AddResourceProviderConfig)+",\n") + } + if this.UpdateResourceProviderConfig != nil { + s = append(s, "UpdateResourceProviderConfig: "+fmt.Sprintf("%#v", this.UpdateResourceProviderConfig)+",\n") + } + if this.RemoveResourceProviderConfig != nil { + s = append(s, "RemoveResourceProviderConfig: "+fmt.Sprintf("%#v", this.RemoveResourceProviderConfig)+",\n") + } + if this.PruneImages != nil { + s = append(s, "PruneImages: "+fmt.Sprintf("%#v", this.PruneImages)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_GetMetrics) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Call_GetMetrics{") + if this.Timeout != nil { + s = append(s, "Timeout: "+fmt.Sprintf("%#v", this.Timeout)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_SetLoggingLevel) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&agent.Call_SetLoggingLevel{") + s = append(s, "Level: "+fmt.Sprintf("%#v", this.Level)+",\n") + s = append(s, "Duration: "+strings.Replace(this.Duration.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_ListFiles) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Call_ListFiles{") + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_ReadFile) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&agent.Call_ReadFile{") + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + s = append(s, "Offset: "+fmt.Sprintf("%#v", this.Offset)+",\n") + if this.Length != nil { + s = append(s, "Length: "+valueToGoStringAgent(this.Length, "uint64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_GetContainers) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&agent.Call_GetContainers{") + if this.ShowNested != nil { + s = append(s, "ShowNested: "+valueToGoStringAgent(this.ShowNested, "bool")+",\n") + } + if this.ShowStandalone != nil { + s = append(s, "ShowStandalone: "+valueToGoStringAgent(this.ShowStandalone, "bool")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_LaunchNestedContainer) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&agent.Call_LaunchNestedContainer{") + s = append(s, "ContainerID: "+strings.Replace(this.ContainerID.GoString(), `&`, ``, 1)+",\n") + if this.Command != nil { + s = append(s, "Command: "+fmt.Sprintf("%#v", this.Command)+",\n") + } + if this.Container != nil { + s = append(s, "Container: "+fmt.Sprintf("%#v", this.Container)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_WaitNestedContainer) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Call_WaitNestedContainer{") + s = append(s, "ContainerID: "+strings.Replace(this.ContainerID.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_KillNestedContainer) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&agent.Call_KillNestedContainer{") + s = append(s, "ContainerID: "+strings.Replace(this.ContainerID.GoString(), `&`, ``, 1)+",\n") + if this.Signal != nil { + s = append(s, "Signal: "+valueToGoStringAgent(this.Signal, "int32")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_RemoveNestedContainer) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Call_RemoveNestedContainer{") + s = append(s, "ContainerID: "+strings.Replace(this.ContainerID.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_LaunchNestedContainerSession) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&agent.Call_LaunchNestedContainerSession{") + s = append(s, "ContainerID: "+strings.Replace(this.ContainerID.GoString(), `&`, ``, 1)+",\n") + if this.Command != nil { + s = append(s, "Command: "+fmt.Sprintf("%#v", this.Command)+",\n") + } + if this.Container != nil { + s = append(s, "Container: "+fmt.Sprintf("%#v", this.Container)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_AttachContainerInput) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&agent.Call_AttachContainerInput{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.ContainerID != nil { + s = append(s, "ContainerID: "+fmt.Sprintf("%#v", this.ContainerID)+",\n") + } + if this.ProcessIO != nil { + s = append(s, "ProcessIO: "+fmt.Sprintf("%#v", this.ProcessIO)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_AttachContainerOutput) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Call_AttachContainerOutput{") + s = append(s, "ContainerID: "+strings.Replace(this.ContainerID.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_LaunchContainer) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&agent.Call_LaunchContainer{") + s = append(s, "ContainerID: "+strings.Replace(this.ContainerID.GoString(), `&`, ``, 1)+",\n") + if this.Command != nil { + s = append(s, "Command: "+fmt.Sprintf("%#v", this.Command)+",\n") + } + if this.Resources != nil { + s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n") + } + if this.Container != nil { + s = append(s, "Container: "+fmt.Sprintf("%#v", this.Container)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_WaitContainer) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Call_WaitContainer{") + s = append(s, "ContainerID: "+strings.Replace(this.ContainerID.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_KillContainer) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&agent.Call_KillContainer{") + s = append(s, "ContainerID: "+strings.Replace(this.ContainerID.GoString(), `&`, ``, 1)+",\n") + if this.Signal != nil { + s = append(s, "Signal: "+valueToGoStringAgent(this.Signal, "int32")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_RemoveContainer) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Call_RemoveContainer{") + s = append(s, "ContainerID: "+strings.Replace(this.ContainerID.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_AddResourceProviderConfig) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Call_AddResourceProviderConfig{") + s = append(s, "Info: "+strings.Replace(this.Info.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_UpdateResourceProviderConfig) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Call_UpdateResourceProviderConfig{") + s = append(s, "Info: "+strings.Replace(this.Info.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_RemoveResourceProviderConfig) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&agent.Call_RemoveResourceProviderConfig{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Call_PruneImages) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Call_PruneImages{") + if this.ExcludedImages != nil { + s = append(s, "ExcludedImages: "+fmt.Sprintf("%#v", this.ExcludedImages)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 22) + s = append(s, "&agent.Response{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.GetHealth != nil { + s = append(s, "GetHealth: "+fmt.Sprintf("%#v", this.GetHealth)+",\n") + } + if this.GetFlags != nil { + s = append(s, "GetFlags: "+fmt.Sprintf("%#v", this.GetFlags)+",\n") + } + if this.GetVersion != nil { + s = append(s, "GetVersion: "+fmt.Sprintf("%#v", this.GetVersion)+",\n") + } + if this.GetMetrics != nil { + s = append(s, "GetMetrics: "+fmt.Sprintf("%#v", this.GetMetrics)+",\n") + } + if this.GetLoggingLevel != nil { + s = append(s, "GetLoggingLevel: "+fmt.Sprintf("%#v", this.GetLoggingLevel)+",\n") + } + if this.ListFiles != nil { + s = append(s, "ListFiles: "+fmt.Sprintf("%#v", this.ListFiles)+",\n") + } + if this.ReadFile != nil { + s = append(s, "ReadFile: "+fmt.Sprintf("%#v", this.ReadFile)+",\n") + } + if this.GetState != nil { + s = append(s, "GetState: "+fmt.Sprintf("%#v", this.GetState)+",\n") + } + if this.GetContainers != nil { + s = append(s, "GetContainers: "+fmt.Sprintf("%#v", this.GetContainers)+",\n") + } + if this.GetFrameworks != nil { + s = append(s, "GetFrameworks: "+fmt.Sprintf("%#v", this.GetFrameworks)+",\n") + } + if this.GetExecutors != nil { + s = append(s, "GetExecutors: "+fmt.Sprintf("%#v", this.GetExecutors)+",\n") + } + if this.GetOperations != nil { + s = append(s, "GetOperations: "+fmt.Sprintf("%#v", this.GetOperations)+",\n") + } + if this.GetTasks != nil { + s = append(s, "GetTasks: "+fmt.Sprintf("%#v", this.GetTasks)+",\n") + } + if this.GetAgent != nil { + s = append(s, "GetAgent: "+fmt.Sprintf("%#v", this.GetAgent)+",\n") + } + if this.GetResourceProviders != nil { + s = append(s, "GetResourceProviders: "+fmt.Sprintf("%#v", this.GetResourceProviders)+",\n") + } + if this.WaitNestedContainer != nil { + s = append(s, "WaitNestedContainer: "+fmt.Sprintf("%#v", this.WaitNestedContainer)+",\n") + } + if this.WaitContainer != nil { + s = append(s, "WaitContainer: "+fmt.Sprintf("%#v", this.WaitContainer)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetHealth) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Response_GetHealth{") + s = append(s, "Healthy: "+fmt.Sprintf("%#v", this.Healthy)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetFlags) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Response_GetFlags{") + if this.Flags != nil { + s = append(s, "Flags: "+fmt.Sprintf("%#v", this.Flags)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetVersion) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Response_GetVersion{") + s = append(s, "VersionInfo: "+strings.Replace(this.VersionInfo.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetMetrics) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Response_GetMetrics{") + if this.Metrics != nil { + s = append(s, "Metrics: "+fmt.Sprintf("%#v", this.Metrics)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetLoggingLevel) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Response_GetLoggingLevel{") + s = append(s, "Level: "+fmt.Sprintf("%#v", this.Level)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_ListFiles) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Response_ListFiles{") + if this.FileInfos != nil { + s = append(s, "FileInfos: "+fmt.Sprintf("%#v", this.FileInfos)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_ReadFile) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&agent.Response_ReadFile{") + s = append(s, "Size: "+fmt.Sprintf("%#v", this.Size)+",\n") + if this.Data != nil { + s = append(s, "Data: "+valueToGoStringAgent(this.Data, "byte")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetState) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&agent.Response_GetState{") + if this.GetTasks != nil { + s = append(s, "GetTasks: "+fmt.Sprintf("%#v", this.GetTasks)+",\n") + } + if this.GetExecutors != nil { + s = append(s, "GetExecutors: "+fmt.Sprintf("%#v", this.GetExecutors)+",\n") + } + if this.GetFrameworks != nil { + s = append(s, "GetFrameworks: "+fmt.Sprintf("%#v", this.GetFrameworks)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetContainers) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Response_GetContainers{") + if this.Containers != nil { + s = append(s, "Containers: "+fmt.Sprintf("%#v", this.Containers)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetContainers_Container) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&agent.Response_GetContainers_Container{") + if this.FrameworkID != nil { + s = append(s, "FrameworkID: "+fmt.Sprintf("%#v", this.FrameworkID)+",\n") + } + if this.ExecutorID != nil { + s = append(s, "ExecutorID: "+fmt.Sprintf("%#v", this.ExecutorID)+",\n") + } + if this.ExecutorName != nil { + s = append(s, "ExecutorName: "+valueToGoStringAgent(this.ExecutorName, "string")+",\n") + } + s = append(s, "ContainerID: "+strings.Replace(this.ContainerID.GoString(), `&`, ``, 1)+",\n") + if this.ContainerStatus != nil { + s = append(s, "ContainerStatus: "+fmt.Sprintf("%#v", this.ContainerStatus)+",\n") + } + if this.ResourceStatistics != nil { + s = append(s, "ResourceStatistics: "+fmt.Sprintf("%#v", this.ResourceStatistics)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetFrameworks) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&agent.Response_GetFrameworks{") + if this.Frameworks != nil { + s = append(s, "Frameworks: "+fmt.Sprintf("%#v", this.Frameworks)+",\n") + } + if this.CompletedFrameworks != nil { + s = append(s, "CompletedFrameworks: "+fmt.Sprintf("%#v", this.CompletedFrameworks)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetFrameworks_Framework) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Response_GetFrameworks_Framework{") + s = append(s, "FrameworkInfo: "+strings.Replace(this.FrameworkInfo.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetExecutors) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&agent.Response_GetExecutors{") + if this.Executors != nil { + s = append(s, "Executors: "+fmt.Sprintf("%#v", this.Executors)+",\n") + } + if this.CompletedExecutors != nil { + s = append(s, "CompletedExecutors: "+fmt.Sprintf("%#v", this.CompletedExecutors)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetExecutors_Executor) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Response_GetExecutors_Executor{") + s = append(s, "ExecutorInfo: "+strings.Replace(this.ExecutorInfo.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetOperations) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Response_GetOperations{") + if this.Operations != nil { + s = append(s, "Operations: "+fmt.Sprintf("%#v", this.Operations)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetTasks) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&agent.Response_GetTasks{") + if this.PendingTasks != nil { + s = append(s, "PendingTasks: "+fmt.Sprintf("%#v", this.PendingTasks)+",\n") + } + if this.QueuedTasks != nil { + s = append(s, "QueuedTasks: "+fmt.Sprintf("%#v", this.QueuedTasks)+",\n") + } + if this.LaunchedTasks != nil { + s = append(s, "LaunchedTasks: "+fmt.Sprintf("%#v", this.LaunchedTasks)+",\n") + } + if this.TerminatedTasks != nil { + s = append(s, "TerminatedTasks: "+fmt.Sprintf("%#v", this.TerminatedTasks)+",\n") + } + if this.CompletedTasks != nil { + s = append(s, "CompletedTasks: "+fmt.Sprintf("%#v", this.CompletedTasks)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetAgent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Response_GetAgent{") + if this.AgentInfo != nil { + s = append(s, "AgentInfo: "+fmt.Sprintf("%#v", this.AgentInfo)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetResourceProviders) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.Response_GetResourceProviders{") + if this.ResourceProviders != nil { + s = append(s, "ResourceProviders: "+fmt.Sprintf("%#v", this.ResourceProviders)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_GetResourceProviders_ResourceProvider) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&agent.Response_GetResourceProviders_ResourceProvider{") + s = append(s, "ResourceProviderInfo: "+strings.Replace(this.ResourceProviderInfo.GoString(), `&`, ``, 1)+",\n") + if this.TotalResources != nil { + s = append(s, "TotalResources: "+fmt.Sprintf("%#v", this.TotalResources)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_WaitNestedContainer) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&agent.Response_WaitNestedContainer{") + if this.ExitStatus != nil { + s = append(s, "ExitStatus: "+valueToGoStringAgent(this.ExitStatus, "int32")+",\n") + } + if this.State != nil { + s = append(s, "State: "+valueToGoStringAgent(this.State, "mesos.TaskState")+",\n") + } + if this.Reason != nil { + s = append(s, "Reason: "+valueToGoStringAgent(this.Reason, "mesos.TaskStatus_Reason")+",\n") + } + if this.Limitation != nil { + s = append(s, "Limitation: "+fmt.Sprintf("%#v", this.Limitation)+",\n") + } + if this.Message != nil { + s = append(s, "Message: "+valueToGoStringAgent(this.Message, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Response_WaitContainer) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&agent.Response_WaitContainer{") + if this.ExitStatus != nil { + s = append(s, "ExitStatus: "+valueToGoStringAgent(this.ExitStatus, "int32")+",\n") + } + if this.State != nil { + s = append(s, "State: "+valueToGoStringAgent(this.State, "mesos.TaskState")+",\n") + } + if this.Reason != nil { + s = append(s, "Reason: "+valueToGoStringAgent(this.Reason, "mesos.TaskStatus_Reason")+",\n") + } + if this.Limitation != nil { + s = append(s, "Limitation: "+fmt.Sprintf("%#v", this.Limitation)+",\n") + } + if this.Message != nil { + s = append(s, "Message: "+valueToGoStringAgent(this.Message, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ProcessIO) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&agent.ProcessIO{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.Data != nil { + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + } + if this.Control != nil { + s = append(s, "Control: "+fmt.Sprintf("%#v", this.Control)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ProcessIO_Data) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&agent.ProcessIO_Data{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.Data != nil { + s = append(s, "Data: "+valueToGoStringAgent(this.Data, "byte")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ProcessIO_Control) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&agent.ProcessIO_Control{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.TTYInfo != nil { + s = append(s, "TTYInfo: "+fmt.Sprintf("%#v", this.TTYInfo)+",\n") + } + if this.Heartbeat != nil { + s = append(s, "Heartbeat: "+fmt.Sprintf("%#v", this.Heartbeat)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ProcessIO_Control_Heartbeat) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&agent.ProcessIO_Control_Heartbeat{") + if this.Interval != nil { + s = append(s, "Interval: "+fmt.Sprintf("%#v", this.Interval)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAgent(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Call) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Type)) + if m.GetMetrics != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetMetrics.ProtoSize())) + n1, err := m.GetMetrics.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.SetLoggingLevel != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.SetLoggingLevel.ProtoSize())) + n2, err := m.SetLoggingLevel.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.ListFiles != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ListFiles.ProtoSize())) + n3, err := m.ListFiles.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.ReadFile != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ReadFile.ProtoSize())) + n4, err := m.ReadFile.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.LaunchNestedContainer != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.LaunchNestedContainer.ProtoSize())) + n5, err := m.LaunchNestedContainer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.WaitNestedContainer != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.WaitNestedContainer.ProtoSize())) + n6, err := m.WaitNestedContainer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.KillNestedContainer != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.KillNestedContainer.ProtoSize())) + n7, err := m.KillNestedContainer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.LaunchNestedContainerSession != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.LaunchNestedContainerSession.ProtoSize())) + n8, err := m.LaunchNestedContainerSession.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.AttachContainerInput != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.AttachContainerInput.ProtoSize())) + n9, err := m.AttachContainerInput.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.AttachContainerOutput != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.AttachContainerOutput.ProtoSize())) + n10, err := m.AttachContainerOutput.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.RemoveNestedContainer != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.RemoveNestedContainer.ProtoSize())) + n11, err := m.RemoveNestedContainer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.LaunchContainer != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.LaunchContainer.ProtoSize())) + n12, err := m.LaunchContainer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.WaitContainer != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.WaitContainer.ProtoSize())) + n13, err := m.WaitContainer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.KillContainer != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.KillContainer.ProtoSize())) + n14, err := m.KillContainer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.RemoveContainer != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.RemoveContainer.ProtoSize())) + n15, err := m.RemoveContainer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.AddResourceProviderConfig != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.AddResourceProviderConfig.ProtoSize())) + n16, err := m.AddResourceProviderConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.UpdateResourceProviderConfig != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.UpdateResourceProviderConfig.ProtoSize())) + n17, err := m.UpdateResourceProviderConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.RemoveResourceProviderConfig != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.RemoveResourceProviderConfig.ProtoSize())) + n18, err := m.RemoveResourceProviderConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.GetContainers != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetContainers.ProtoSize())) + n19, err := m.GetContainers.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.PruneImages != nil { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.PruneImages.ProtoSize())) + n20, err := m.PruneImages.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + return i, nil +} + +func (m *Call_GetMetrics) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_GetMetrics) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Timeout != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Timeout.ProtoSize())) + n21, err := m.Timeout.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + return i, nil +} + +func (m *Call_SetLoggingLevel) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_SetLoggingLevel) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Level)) + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Duration.ProtoSize())) + n22, err := m.Duration.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + return i, nil +} + +func (m *Call_ListFiles) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_ListFiles) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + return i, nil +} + +func (m *Call_ReadFile) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_ReadFile) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x10 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Offset)) + if m.Length != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintAgent(dAtA, i, uint64(*m.Length)) + } + return i, nil +} + +func (m *Call_GetContainers) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_GetContainers) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ShowNested != nil { + dAtA[i] = 0x8 + i++ + if *m.ShowNested { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ShowStandalone != nil { + dAtA[i] = 0x10 + i++ + if *m.ShowStandalone { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Call_LaunchNestedContainer) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_LaunchNestedContainer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ContainerID.ProtoSize())) + n23, err := m.ContainerID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + if m.Command != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Command.ProtoSize())) + n24, err := m.Command.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.Container != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Container.ProtoSize())) + n25, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + return i, nil +} + +func (m *Call_WaitNestedContainer) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_WaitNestedContainer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ContainerID.ProtoSize())) + n26, err := m.ContainerID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + return i, nil +} + +func (m *Call_KillNestedContainer) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_KillNestedContainer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ContainerID.ProtoSize())) + n27, err := m.ContainerID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + if m.Signal != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintAgent(dAtA, i, uint64(*m.Signal)) + } + return i, nil +} + +func (m *Call_RemoveNestedContainer) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_RemoveNestedContainer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ContainerID.ProtoSize())) + n28, err := m.ContainerID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + return i, nil +} + +func (m *Call_LaunchNestedContainerSession) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_LaunchNestedContainerSession) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ContainerID.ProtoSize())) + n29, err := m.ContainerID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + if m.Command != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Command.ProtoSize())) + n30, err := m.Command.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if m.Container != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Container.ProtoSize())) + n31, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + return i, nil +} + +func (m *Call_AttachContainerInput) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_AttachContainerInput) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Type)) + if m.ContainerID != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ContainerID.ProtoSize())) + n32, err := m.ContainerID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + if m.ProcessIO != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ProcessIO.ProtoSize())) + n33, err := m.ProcessIO.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + } + return i, nil +} + +func (m *Call_AttachContainerOutput) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_AttachContainerOutput) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ContainerID.ProtoSize())) + n34, err := m.ContainerID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + return i, nil +} + +func (m *Call_LaunchContainer) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_LaunchContainer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ContainerID.ProtoSize())) + n35, err := m.ContainerID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + if m.Command != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Command.ProtoSize())) + n36, err := m.Command.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + } + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0x1a + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Container != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Container.ProtoSize())) + n37, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + } + return i, nil +} + +func (m *Call_WaitContainer) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_WaitContainer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ContainerID.ProtoSize())) + n38, err := m.ContainerID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + return i, nil +} + +func (m *Call_KillContainer) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_KillContainer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ContainerID.ProtoSize())) + n39, err := m.ContainerID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + if m.Signal != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintAgent(dAtA, i, uint64(*m.Signal)) + } + return i, nil +} + +func (m *Call_RemoveContainer) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_RemoveContainer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ContainerID.ProtoSize())) + n40, err := m.ContainerID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + return i, nil +} + +func (m *Call_AddResourceProviderConfig) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_AddResourceProviderConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Info.ProtoSize())) + n41, err := m.Info.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + return i, nil +} + +func (m *Call_UpdateResourceProviderConfig) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_UpdateResourceProviderConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Info.ProtoSize())) + n42, err := m.Info.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + return i, nil +} + +func (m *Call_RemoveResourceProviderConfig) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_RemoveResourceProviderConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} + +func (m *Call_PruneImages) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Call_PruneImages) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ExcludedImages) > 0 { + for _, msg := range m.ExcludedImages { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Response) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Type)) + if m.GetHealth != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetHealth.ProtoSize())) + n43, err := m.GetHealth.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + } + if m.GetFlags != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetFlags.ProtoSize())) + n44, err := m.GetFlags.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + if m.GetVersion != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetVersion.ProtoSize())) + n45, err := m.GetVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + } + if m.GetMetrics != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetMetrics.ProtoSize())) + n46, err := m.GetMetrics.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + } + if m.GetLoggingLevel != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetLoggingLevel.ProtoSize())) + n47, err := m.GetLoggingLevel.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + } + if m.ListFiles != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ListFiles.ProtoSize())) + n48, err := m.ListFiles.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + } + if m.ReadFile != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ReadFile.ProtoSize())) + n49, err := m.ReadFile.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + } + if m.GetState != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetState.ProtoSize())) + n50, err := m.GetState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + } + if m.GetContainers != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetContainers.ProtoSize())) + n51, err := m.GetContainers.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 + } + if m.GetFrameworks != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetFrameworks.ProtoSize())) + n52, err := m.GetFrameworks.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + } + if m.GetExecutors != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetExecutors.ProtoSize())) + n53, err := m.GetExecutors.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 + } + if m.GetTasks != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetTasks.ProtoSize())) + n54, err := m.GetTasks.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n54 + } + if m.WaitNestedContainer != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.WaitNestedContainer.ProtoSize())) + n55, err := m.WaitNestedContainer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n55 + } + if m.GetAgent != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetAgent.ProtoSize())) + n56, err := m.GetAgent.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n56 + } + if m.WaitContainer != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.WaitContainer.ProtoSize())) + n57, err := m.WaitContainer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n57 + } + if m.GetResourceProviders != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetResourceProviders.ProtoSize())) + n58, err := m.GetResourceProviders.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 + } + if m.GetOperations != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetOperations.ProtoSize())) + n59, err := m.GetOperations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n59 + } + return i, nil +} + +func (m *Response_GetHealth) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetHealth) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Healthy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *Response_GetFlags) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetFlags) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Flags) > 0 { + for _, msg := range m.Flags { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Response_GetVersion) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetVersion) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.VersionInfo.ProtoSize())) + n60, err := m.VersionInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n60 + return i, nil +} + +func (m *Response_GetMetrics) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetMetrics) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Metrics) > 0 { + for _, msg := range m.Metrics { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Response_GetLoggingLevel) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetLoggingLevel) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Level)) + return i, nil +} + +func (m *Response_ListFiles) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_ListFiles) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.FileInfos) > 0 { + for _, msg := range m.FileInfos { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Response_ReadFile) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_ReadFile) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Size)) + if m.Data == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("data") + } else { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func (m *Response_GetState) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GetTasks != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetTasks.ProtoSize())) + n61, err := m.GetTasks.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n61 + } + if m.GetExecutors != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetExecutors.ProtoSize())) + n62, err := m.GetExecutors.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n62 + } + if m.GetFrameworks != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.GetFrameworks.ProtoSize())) + n63, err := m.GetFrameworks.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n63 + } + return i, nil +} + +func (m *Response_GetContainers) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetContainers) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Containers) > 0 { + for _, msg := range m.Containers { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Response_GetContainers_Container) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetContainers_Container) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.FrameworkID != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.FrameworkID.ProtoSize())) + n64, err := m.FrameworkID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n64 + } + if m.ExecutorID != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ExecutorID.ProtoSize())) + n65, err := m.ExecutorID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n65 + } + if m.ExecutorName != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintAgent(dAtA, i, uint64(len(*m.ExecutorName))) + i += copy(dAtA[i:], *m.ExecutorName) + } + dAtA[i] = 0x22 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ContainerID.ProtoSize())) + n66, err := m.ContainerID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n66 + if m.ContainerStatus != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ContainerStatus.ProtoSize())) + n67, err := m.ContainerStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n67 + } + if m.ResourceStatistics != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ResourceStatistics.ProtoSize())) + n68, err := m.ResourceStatistics.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n68 + } + return i, nil +} + +func (m *Response_GetFrameworks) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetFrameworks) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Frameworks) > 0 { + for _, msg := range m.Frameworks { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.CompletedFrameworks) > 0 { + for _, msg := range m.CompletedFrameworks { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Response_GetFrameworks_Framework) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetFrameworks_Framework) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.FrameworkInfo.ProtoSize())) + n69, err := m.FrameworkInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n69 + return i, nil +} + +func (m *Response_GetExecutors) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetExecutors) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Executors) > 0 { + for _, msg := range m.Executors { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.CompletedExecutors) > 0 { + for _, msg := range m.CompletedExecutors { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Response_GetExecutors_Executor) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetExecutors_Executor) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ExecutorInfo.ProtoSize())) + n70, err := m.ExecutorInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n70 + return i, nil +} + +func (m *Response_GetOperations) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetOperations) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Operations) > 0 { + for _, msg := range m.Operations { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Response_GetTasks) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetTasks) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.PendingTasks) > 0 { + for _, msg := range m.PendingTasks { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.QueuedTasks) > 0 { + for _, msg := range m.QueuedTasks { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.LaunchedTasks) > 0 { + for _, msg := range m.LaunchedTasks { + dAtA[i] = 0x1a + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.TerminatedTasks) > 0 { + for _, msg := range m.TerminatedTasks { + dAtA[i] = 0x22 + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.CompletedTasks) > 0 { + for _, msg := range m.CompletedTasks { + dAtA[i] = 0x2a + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Response_GetAgent) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetAgent) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.AgentInfo != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.AgentInfo.ProtoSize())) + n71, err := m.AgentInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n71 + } + return i, nil +} + +func (m *Response_GetResourceProviders) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetResourceProviders) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ResourceProviders) > 0 { + for _, msg := range m.ResourceProviders { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Response_GetResourceProviders_ResourceProvider) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_GetResourceProviders_ResourceProvider) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.ResourceProviderInfo.ProtoSize())) + n72, err := m.ResourceProviderInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n72 + if len(m.TotalResources) > 0 { + for _, msg := range m.TotalResources { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Response_WaitNestedContainer) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_WaitNestedContainer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ExitStatus != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintAgent(dAtA, i, uint64(*m.ExitStatus)) + } + if m.State != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintAgent(dAtA, i, uint64(*m.State)) + } + if m.Reason != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintAgent(dAtA, i, uint64(*m.Reason)) + } + if m.Limitation != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Limitation.ProtoSize())) + n73, err := m.Limitation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n73 + } + if m.Message != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintAgent(dAtA, i, uint64(len(*m.Message))) + i += copy(dAtA[i:], *m.Message) + } + return i, nil +} + +func (m *Response_WaitContainer) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response_WaitContainer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ExitStatus != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintAgent(dAtA, i, uint64(*m.ExitStatus)) + } + if m.State != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintAgent(dAtA, i, uint64(*m.State)) + } + if m.Reason != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintAgent(dAtA, i, uint64(*m.Reason)) + } + if m.Limitation != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Limitation.ProtoSize())) + n74, err := m.Limitation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n74 + } + if m.Message != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintAgent(dAtA, i, uint64(len(*m.Message))) + i += copy(dAtA[i:], *m.Message) + } + return i, nil +} + +func (m *ProcessIO) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProcessIO) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Type)) + if m.Data != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Data.ProtoSize())) + n75, err := m.Data.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n75 + } + if m.Control != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Control.ProtoSize())) + n76, err := m.Control.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n76 + } + return i, nil +} + +func (m *ProcessIO_Data) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProcessIO_Data) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Type)) + if m.Data != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func (m *ProcessIO_Control) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProcessIO_Control) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Type)) + if m.TTYInfo != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.TTYInfo.ProtoSize())) + n77, err := m.TTYInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n77 + } + if m.Heartbeat != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Heartbeat.ProtoSize())) + n78, err := m.Heartbeat.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n78 + } + return i, nil +} + +func (m *ProcessIO_Control_Heartbeat) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProcessIO_Control_Heartbeat) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Interval != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Interval.ProtoSize())) + n79, err := m.Interval.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n79 + } + return i, nil +} + +func encodeFixed64Agent(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Agent(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintAgent(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedCall(r randyAgent, easy bool) *Call { + this := &Call{} + this.Type = Call_Type([]int32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 31, 13, 20, 26, 14, 15, 16, 21, 17, 18, 19, 22, 23, 24, 25, 27, 28, 29, 30}[r.Intn(32)]) + if r.Intn(10) != 0 { + this.GetMetrics = NewPopulatedCall_GetMetrics(r, easy) + } + if r.Intn(10) != 0 { + this.SetLoggingLevel = NewPopulatedCall_SetLoggingLevel(r, easy) + } + if r.Intn(10) != 0 { + this.ListFiles = NewPopulatedCall_ListFiles(r, easy) + } + if r.Intn(10) != 0 { + this.ReadFile = NewPopulatedCall_ReadFile(r, easy) + } + if r.Intn(10) == 0 { + this.LaunchNestedContainer = NewPopulatedCall_LaunchNestedContainer(r, easy) + } + if r.Intn(10) == 0 { + this.WaitNestedContainer = NewPopulatedCall_WaitNestedContainer(r, easy) + } + if r.Intn(10) == 0 { + this.KillNestedContainer = NewPopulatedCall_KillNestedContainer(r, easy) + } + if r.Intn(10) == 0 { + this.LaunchNestedContainerSession = NewPopulatedCall_LaunchNestedContainerSession(r, easy) + } + if r.Intn(10) == 0 { + this.AttachContainerInput = NewPopulatedCall_AttachContainerInput(r, easy) + } + if r.Intn(10) == 0 { + this.AttachContainerOutput = NewPopulatedCall_AttachContainerOutput(r, easy) + } + if r.Intn(10) == 0 { + this.RemoveNestedContainer = NewPopulatedCall_RemoveNestedContainer(r, easy) + } + if r.Intn(10) == 0 { + this.LaunchContainer = NewPopulatedCall_LaunchContainer(r, easy) + } + if r.Intn(10) == 0 { + this.WaitContainer = NewPopulatedCall_WaitContainer(r, easy) + } + if r.Intn(10) == 0 { + this.KillContainer = NewPopulatedCall_KillContainer(r, easy) + } + if r.Intn(10) == 0 { + this.RemoveContainer = NewPopulatedCall_RemoveContainer(r, easy) + } + if r.Intn(10) != 0 { + this.AddResourceProviderConfig = NewPopulatedCall_AddResourceProviderConfig(r, easy) + } + if r.Intn(10) != 0 { + this.UpdateResourceProviderConfig = NewPopulatedCall_UpdateResourceProviderConfig(r, easy) + } + if r.Intn(10) != 0 { + this.RemoveResourceProviderConfig = NewPopulatedCall_RemoveResourceProviderConfig(r, easy) + } + if r.Intn(10) != 0 { + this.GetContainers = NewPopulatedCall_GetContainers(r, easy) + } + if r.Intn(10) != 0 { + this.PruneImages = NewPopulatedCall_PruneImages(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_GetMetrics(r randyAgent, easy bool) *Call_GetMetrics { + this := &Call_GetMetrics{} + if r.Intn(10) != 0 { + this.Timeout = mesos.NewPopulatedDurationInfo(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_SetLoggingLevel(r randyAgent, easy bool) *Call_SetLoggingLevel { + this := &Call_SetLoggingLevel{} + this.Level = uint32(r.Uint32()) + v1 := mesos.NewPopulatedDurationInfo(r, easy) + this.Duration = *v1 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_ListFiles(r randyAgent, easy bool) *Call_ListFiles { + this := &Call_ListFiles{} + this.Path = string(randStringAgent(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_ReadFile(r randyAgent, easy bool) *Call_ReadFile { + this := &Call_ReadFile{} + this.Path = string(randStringAgent(r)) + this.Offset = uint64(uint64(r.Uint32())) + if r.Intn(10) != 0 { + v2 := uint64(uint64(r.Uint32())) + this.Length = &v2 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_GetContainers(r randyAgent, easy bool) *Call_GetContainers { + this := &Call_GetContainers{} + if r.Intn(10) != 0 { + v3 := bool(bool(r.Intn(2) == 0)) + this.ShowNested = &v3 + } + if r.Intn(10) != 0 { + v4 := bool(bool(r.Intn(2) == 0)) + this.ShowStandalone = &v4 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_LaunchNestedContainer(r randyAgent, easy bool) *Call_LaunchNestedContainer { + this := &Call_LaunchNestedContainer{} + v5 := mesos.NewPopulatedContainerID(r, easy) + this.ContainerID = *v5 + if r.Intn(10) != 0 { + this.Command = mesos.NewPopulatedCommandInfo(r, easy) + } + if r.Intn(10) != 0 { + this.Container = mesos.NewPopulatedContainerInfo(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_WaitNestedContainer(r randyAgent, easy bool) *Call_WaitNestedContainer { + this := &Call_WaitNestedContainer{} + v6 := mesos.NewPopulatedContainerID(r, easy) + this.ContainerID = *v6 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_KillNestedContainer(r randyAgent, easy bool) *Call_KillNestedContainer { + this := &Call_KillNestedContainer{} + v7 := mesos.NewPopulatedContainerID(r, easy) + this.ContainerID = *v7 + if r.Intn(10) != 0 { + v8 := int32(r.Int31()) + if r.Intn(2) == 0 { + v8 *= -1 + } + this.Signal = &v8 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_RemoveNestedContainer(r randyAgent, easy bool) *Call_RemoveNestedContainer { + this := &Call_RemoveNestedContainer{} + v9 := mesos.NewPopulatedContainerID(r, easy) + this.ContainerID = *v9 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_LaunchNestedContainerSession(r randyAgent, easy bool) *Call_LaunchNestedContainerSession { + this := &Call_LaunchNestedContainerSession{} + v10 := mesos.NewPopulatedContainerID(r, easy) + this.ContainerID = *v10 + if r.Intn(10) != 0 { + this.Command = mesos.NewPopulatedCommandInfo(r, easy) + } + if r.Intn(10) != 0 { + this.Container = mesos.NewPopulatedContainerInfo(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_AttachContainerInput(r randyAgent, easy bool) *Call_AttachContainerInput { + this := &Call_AttachContainerInput{} + this.Type = Call_AttachContainerInput_Type([]int32{0, 1, 2}[r.Intn(3)]) + if r.Intn(10) == 0 { + this.ContainerID = mesos.NewPopulatedContainerID(r, easy) + } + if r.Intn(10) != 0 { + this.ProcessIO = NewPopulatedProcessIO(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_AttachContainerOutput(r randyAgent, easy bool) *Call_AttachContainerOutput { + this := &Call_AttachContainerOutput{} + v11 := mesos.NewPopulatedContainerID(r, easy) + this.ContainerID = *v11 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_LaunchContainer(r randyAgent, easy bool) *Call_LaunchContainer { + this := &Call_LaunchContainer{} + v12 := mesos.NewPopulatedContainerID(r, easy) + this.ContainerID = *v12 + if r.Intn(10) != 0 { + this.Command = mesos.NewPopulatedCommandInfo(r, easy) + } + if r.Intn(10) != 0 { + v13 := r.Intn(5) + this.Resources = make([]mesos.Resource, v13) + for i := 0; i < v13; i++ { + v14 := mesos.NewPopulatedResource(r, easy) + this.Resources[i] = *v14 + } + } + if r.Intn(10) != 0 { + this.Container = mesos.NewPopulatedContainerInfo(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_WaitContainer(r randyAgent, easy bool) *Call_WaitContainer { + this := &Call_WaitContainer{} + v15 := mesos.NewPopulatedContainerID(r, easy) + this.ContainerID = *v15 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_KillContainer(r randyAgent, easy bool) *Call_KillContainer { + this := &Call_KillContainer{} + v16 := mesos.NewPopulatedContainerID(r, easy) + this.ContainerID = *v16 + if r.Intn(10) != 0 { + v17 := int32(r.Int31()) + if r.Intn(2) == 0 { + v17 *= -1 + } + this.Signal = &v17 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_RemoveContainer(r randyAgent, easy bool) *Call_RemoveContainer { + this := &Call_RemoveContainer{} + v18 := mesos.NewPopulatedContainerID(r, easy) + this.ContainerID = *v18 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_AddResourceProviderConfig(r randyAgent, easy bool) *Call_AddResourceProviderConfig { + this := &Call_AddResourceProviderConfig{} + v19 := mesos.NewPopulatedResourceProviderInfo(r, easy) + this.Info = *v19 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_UpdateResourceProviderConfig(r randyAgent, easy bool) *Call_UpdateResourceProviderConfig { + this := &Call_UpdateResourceProviderConfig{} + v20 := mesos.NewPopulatedResourceProviderInfo(r, easy) + this.Info = *v20 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_RemoveResourceProviderConfig(r randyAgent, easy bool) *Call_RemoveResourceProviderConfig { + this := &Call_RemoveResourceProviderConfig{} + this.Type = string(randStringAgent(r)) + this.Name = string(randStringAgent(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCall_PruneImages(r randyAgent, easy bool) *Call_PruneImages { + this := &Call_PruneImages{} + if r.Intn(10) != 0 { + v21 := r.Intn(5) + this.ExcludedImages = make([]mesos.Image, v21) + for i := 0; i < v21; i++ { + v22 := mesos.NewPopulatedImage(r, easy) + this.ExcludedImages[i] = *v22 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse(r randyAgent, easy bool) *Response { + this := &Response{} + this.Type = Response_Type([]int32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 17, 12, 14, 16, 13, 15}[r.Intn(18)]) + if r.Intn(10) != 0 { + this.GetHealth = NewPopulatedResponse_GetHealth(r, easy) + } + if r.Intn(10) != 0 { + this.GetFlags = NewPopulatedResponse_GetFlags(r, easy) + } + if r.Intn(10) != 0 { + this.GetVersion = NewPopulatedResponse_GetVersion(r, easy) + } + if r.Intn(10) != 0 { + this.GetMetrics = NewPopulatedResponse_GetMetrics(r, easy) + } + if r.Intn(10) != 0 { + this.GetLoggingLevel = NewPopulatedResponse_GetLoggingLevel(r, easy) + } + if r.Intn(10) != 0 { + this.ListFiles = NewPopulatedResponse_ListFiles(r, easy) + } + if r.Intn(10) != 0 { + this.ReadFile = NewPopulatedResponse_ReadFile(r, easy) + } + if r.Intn(10) == 0 { + this.GetState = NewPopulatedResponse_GetState(r, easy) + } + if r.Intn(10) == 0 { + this.GetContainers = NewPopulatedResponse_GetContainers(r, easy) + } + if r.Intn(10) != 0 { + this.GetFrameworks = NewPopulatedResponse_GetFrameworks(r, easy) + } + if r.Intn(10) != 0 { + this.GetExecutors = NewPopulatedResponse_GetExecutors(r, easy) + } + if r.Intn(10) == 0 { + this.GetTasks = NewPopulatedResponse_GetTasks(r, easy) + } + if r.Intn(10) != 0 { + this.WaitNestedContainer = NewPopulatedResponse_WaitNestedContainer(r, easy) + } + if r.Intn(10) != 0 { + this.GetAgent = NewPopulatedResponse_GetAgent(r, easy) + } + if r.Intn(10) != 0 { + this.WaitContainer = NewPopulatedResponse_WaitContainer(r, easy) + } + if r.Intn(10) != 0 { + this.GetResourceProviders = NewPopulatedResponse_GetResourceProviders(r, easy) + } + if r.Intn(10) != 0 { + this.GetOperations = NewPopulatedResponse_GetOperations(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetHealth(r randyAgent, easy bool) *Response_GetHealth { + this := &Response_GetHealth{} + this.Healthy = bool(bool(r.Intn(2) == 0)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetFlags(r randyAgent, easy bool) *Response_GetFlags { + this := &Response_GetFlags{} + if r.Intn(10) != 0 { + v23 := r.Intn(5) + this.Flags = make([]mesos.Flag, v23) + for i := 0; i < v23; i++ { + v24 := mesos.NewPopulatedFlag(r, easy) + this.Flags[i] = *v24 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetVersion(r randyAgent, easy bool) *Response_GetVersion { + this := &Response_GetVersion{} + v25 := mesos.NewPopulatedVersionInfo(r, easy) + this.VersionInfo = *v25 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetMetrics(r randyAgent, easy bool) *Response_GetMetrics { + this := &Response_GetMetrics{} + if r.Intn(10) != 0 { + v26 := r.Intn(5) + this.Metrics = make([]mesos.Metric, v26) + for i := 0; i < v26; i++ { + v27 := mesos.NewPopulatedMetric(r, easy) + this.Metrics[i] = *v27 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetLoggingLevel(r randyAgent, easy bool) *Response_GetLoggingLevel { + this := &Response_GetLoggingLevel{} + this.Level = uint32(r.Uint32()) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_ListFiles(r randyAgent, easy bool) *Response_ListFiles { + this := &Response_ListFiles{} + if r.Intn(10) != 0 { + v28 := r.Intn(5) + this.FileInfos = make([]mesos.FileInfo, v28) + for i := 0; i < v28; i++ { + v29 := mesos.NewPopulatedFileInfo(r, easy) + this.FileInfos[i] = *v29 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_ReadFile(r randyAgent, easy bool) *Response_ReadFile { + this := &Response_ReadFile{} + this.Size = uint64(uint64(r.Uint32())) + v30 := r.Intn(100) + this.Data = make([]byte, v30) + for i := 0; i < v30; i++ { + this.Data[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetState(r randyAgent, easy bool) *Response_GetState { + this := &Response_GetState{} + if r.Intn(10) == 0 { + this.GetTasks = NewPopulatedResponse_GetTasks(r, easy) + } + if r.Intn(10) != 0 { + this.GetExecutors = NewPopulatedResponse_GetExecutors(r, easy) + } + if r.Intn(10) != 0 { + this.GetFrameworks = NewPopulatedResponse_GetFrameworks(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetContainers(r randyAgent, easy bool) *Response_GetContainers { + this := &Response_GetContainers{} + if r.Intn(10) == 0 { + v31 := r.Intn(5) + this.Containers = make([]Response_GetContainers_Container, v31) + for i := 0; i < v31; i++ { + v32 := NewPopulatedResponse_GetContainers_Container(r, easy) + this.Containers[i] = *v32 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetContainers_Container(r randyAgent, easy bool) *Response_GetContainers_Container { + this := &Response_GetContainers_Container{} + if r.Intn(10) != 0 { + this.FrameworkID = mesos.NewPopulatedFrameworkID(r, easy) + } + if r.Intn(10) != 0 { + this.ExecutorID = mesos.NewPopulatedExecutorID(r, easy) + } + if r.Intn(10) != 0 { + v33 := string(randStringAgent(r)) + this.ExecutorName = &v33 + } + v34 := mesos.NewPopulatedContainerID(r, easy) + this.ContainerID = *v34 + if r.Intn(10) == 0 { + this.ContainerStatus = mesos.NewPopulatedContainerStatus(r, easy) + } + if r.Intn(10) != 0 { + this.ResourceStatistics = mesos.NewPopulatedResourceStatistics(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetFrameworks(r randyAgent, easy bool) *Response_GetFrameworks { + this := &Response_GetFrameworks{} + if r.Intn(10) != 0 { + v35 := r.Intn(5) + this.Frameworks = make([]Response_GetFrameworks_Framework, v35) + for i := 0; i < v35; i++ { + v36 := NewPopulatedResponse_GetFrameworks_Framework(r, easy) + this.Frameworks[i] = *v36 + } + } + if r.Intn(10) != 0 { + v37 := r.Intn(5) + this.CompletedFrameworks = make([]Response_GetFrameworks_Framework, v37) + for i := 0; i < v37; i++ { + v38 := NewPopulatedResponse_GetFrameworks_Framework(r, easy) + this.CompletedFrameworks[i] = *v38 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetFrameworks_Framework(r randyAgent, easy bool) *Response_GetFrameworks_Framework { + this := &Response_GetFrameworks_Framework{} + v39 := mesos.NewPopulatedFrameworkInfo(r, easy) + this.FrameworkInfo = *v39 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetExecutors(r randyAgent, easy bool) *Response_GetExecutors { + this := &Response_GetExecutors{} + if r.Intn(10) != 0 { + v40 := r.Intn(5) + this.Executors = make([]Response_GetExecutors_Executor, v40) + for i := 0; i < v40; i++ { + v41 := NewPopulatedResponse_GetExecutors_Executor(r, easy) + this.Executors[i] = *v41 + } + } + if r.Intn(10) != 0 { + v42 := r.Intn(5) + this.CompletedExecutors = make([]Response_GetExecutors_Executor, v42) + for i := 0; i < v42; i++ { + v43 := NewPopulatedResponse_GetExecutors_Executor(r, easy) + this.CompletedExecutors[i] = *v43 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetExecutors_Executor(r randyAgent, easy bool) *Response_GetExecutors_Executor { + this := &Response_GetExecutors_Executor{} + v44 := mesos.NewPopulatedExecutorInfo(r, easy) + this.ExecutorInfo = *v44 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetOperations(r randyAgent, easy bool) *Response_GetOperations { + this := &Response_GetOperations{} + if r.Intn(10) != 0 { + v45 := r.Intn(5) + this.Operations = make([]mesos.Operation, v45) + for i := 0; i < v45; i++ { + v46 := mesos.NewPopulatedOperation(r, easy) + this.Operations[i] = *v46 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetTasks(r randyAgent, easy bool) *Response_GetTasks { + this := &Response_GetTasks{} + if r.Intn(10) == 0 { + v47 := r.Intn(5) + this.PendingTasks = make([]mesos.Task, v47) + for i := 0; i < v47; i++ { + v48 := mesos.NewPopulatedTask(r, easy) + this.PendingTasks[i] = *v48 + } + } + if r.Intn(10) == 0 { + v49 := r.Intn(5) + this.QueuedTasks = make([]mesos.Task, v49) + for i := 0; i < v49; i++ { + v50 := mesos.NewPopulatedTask(r, easy) + this.QueuedTasks[i] = *v50 + } + } + if r.Intn(10) == 0 { + v51 := r.Intn(5) + this.LaunchedTasks = make([]mesos.Task, v51) + for i := 0; i < v51; i++ { + v52 := mesos.NewPopulatedTask(r, easy) + this.LaunchedTasks[i] = *v52 + } + } + if r.Intn(10) == 0 { + v53 := r.Intn(5) + this.TerminatedTasks = make([]mesos.Task, v53) + for i := 0; i < v53; i++ { + v54 := mesos.NewPopulatedTask(r, easy) + this.TerminatedTasks[i] = *v54 + } + } + if r.Intn(10) == 0 { + v55 := r.Intn(5) + this.CompletedTasks = make([]mesos.Task, v55) + for i := 0; i < v55; i++ { + v56 := mesos.NewPopulatedTask(r, easy) + this.CompletedTasks[i] = *v56 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetAgent(r randyAgent, easy bool) *Response_GetAgent { + this := &Response_GetAgent{} + if r.Intn(10) != 0 { + this.AgentInfo = mesos.NewPopulatedAgentInfo(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetResourceProviders(r randyAgent, easy bool) *Response_GetResourceProviders { + this := &Response_GetResourceProviders{} + if r.Intn(10) != 0 { + v57 := r.Intn(5) + this.ResourceProviders = make([]Response_GetResourceProviders_ResourceProvider, v57) + for i := 0; i < v57; i++ { + v58 := NewPopulatedResponse_GetResourceProviders_ResourceProvider(r, easy) + this.ResourceProviders[i] = *v58 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_GetResourceProviders_ResourceProvider(r randyAgent, easy bool) *Response_GetResourceProviders_ResourceProvider { + this := &Response_GetResourceProviders_ResourceProvider{} + v59 := mesos.NewPopulatedResourceProviderInfo(r, easy) + this.ResourceProviderInfo = *v59 + if r.Intn(10) != 0 { + v60 := r.Intn(5) + this.TotalResources = make([]mesos.Resource, v60) + for i := 0; i < v60; i++ { + v61 := mesos.NewPopulatedResource(r, easy) + this.TotalResources[i] = *v61 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_WaitNestedContainer(r randyAgent, easy bool) *Response_WaitNestedContainer { + this := &Response_WaitNestedContainer{} + if r.Intn(10) != 0 { + v62 := int32(r.Int31()) + if r.Intn(2) == 0 { + v62 *= -1 + } + this.ExitStatus = &v62 + } + if r.Intn(10) != 0 { + v63 := mesos.TaskState([]int32{6, 0, 1, 8, 2, 3, 4, 7, 5, 9, 10, 11, 12, 13}[r.Intn(14)]) + this.State = &v63 + } + if r.Intn(10) != 0 { + v64 := mesos.TaskStatus_Reason([]int32{0, 21, 19, 20, 8, 17, 22, 33, 23, 24, 1, 2, 3, 4, 5, 6, 27, 7, 9, 18, 10, 11, 31, 32, 12, 13, 30, 28, 29, 25, 26, 14, 15, 16}[r.Intn(34)]) + this.Reason = &v64 + } + if r.Intn(10) != 0 { + this.Limitation = mesos.NewPopulatedTaskResourceLimitation(r, easy) + } + if r.Intn(10) != 0 { + v65 := string(randStringAgent(r)) + this.Message = &v65 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResponse_WaitContainer(r randyAgent, easy bool) *Response_WaitContainer { + this := &Response_WaitContainer{} + if r.Intn(10) != 0 { + v66 := int32(r.Int31()) + if r.Intn(2) == 0 { + v66 *= -1 + } + this.ExitStatus = &v66 + } + if r.Intn(10) != 0 { + v67 := mesos.TaskState([]int32{6, 0, 1, 8, 2, 3, 4, 7, 5, 9, 10, 11, 12, 13}[r.Intn(14)]) + this.State = &v67 + } + if r.Intn(10) != 0 { + v68 := mesos.TaskStatus_Reason([]int32{0, 21, 19, 20, 8, 17, 22, 33, 23, 24, 1, 2, 3, 4, 5, 6, 27, 7, 9, 18, 10, 11, 31, 32, 12, 13, 30, 28, 29, 25, 26, 14, 15, 16}[r.Intn(34)]) + this.Reason = &v68 + } + if r.Intn(10) != 0 { + this.Limitation = mesos.NewPopulatedTaskResourceLimitation(r, easy) + } + if r.Intn(10) != 0 { + v69 := string(randStringAgent(r)) + this.Message = &v69 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedProcessIO(r randyAgent, easy bool) *ProcessIO { + this := &ProcessIO{} + this.Type = ProcessIO_Type([]int32{0, 1, 2}[r.Intn(3)]) + if r.Intn(10) != 0 { + this.Data = NewPopulatedProcessIO_Data(r, easy) + } + if r.Intn(10) != 0 { + this.Control = NewPopulatedProcessIO_Control(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedProcessIO_Data(r randyAgent, easy bool) *ProcessIO_Data { + this := &ProcessIO_Data{} + this.Type = ProcessIO_Data_Type([]int32{0, 1, 2, 3}[r.Intn(4)]) + if r.Intn(10) != 0 { + v70 := r.Intn(100) + this.Data = make([]byte, v70) + for i := 0; i < v70; i++ { + this.Data[i] = byte(r.Intn(256)) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedProcessIO_Control(r randyAgent, easy bool) *ProcessIO_Control { + this := &ProcessIO_Control{} + this.Type = ProcessIO_Control_Type([]int32{0, 1, 2}[r.Intn(3)]) + if r.Intn(10) != 0 { + this.TTYInfo = mesos.NewPopulatedTTYInfo(r, easy) + } + if r.Intn(10) != 0 { + this.Heartbeat = NewPopulatedProcessIO_Control_Heartbeat(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedProcessIO_Control_Heartbeat(r randyAgent, easy bool) *ProcessIO_Control_Heartbeat { + this := &ProcessIO_Control_Heartbeat{} + if r.Intn(10) != 0 { + this.Interval = mesos.NewPopulatedDurationInfo(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyAgent interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneAgent(r randyAgent) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringAgent(r randyAgent) string { + v71 := r.Intn(100) + tmps := make([]rune, v71) + for i := 0; i < v71; i++ { + tmps[i] = randUTF8RuneAgent(r) + } + return string(tmps) +} +func randUnrecognizedAgent(r randyAgent, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldAgent(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldAgent(dAtA []byte, r randyAgent, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateAgent(dAtA, uint64(key)) + v72 := r.Int63() + if r.Intn(2) == 0 { + v72 *= -1 + } + dAtA = encodeVarintPopulateAgent(dAtA, uint64(v72)) + case 1: + dAtA = encodeVarintPopulateAgent(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateAgent(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateAgent(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateAgent(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateAgent(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Call) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovAgent(uint64(m.Type)) + if m.GetMetrics != nil { + l = m.GetMetrics.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.SetLoggingLevel != nil { + l = m.SetLoggingLevel.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.ListFiles != nil { + l = m.ListFiles.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.ReadFile != nil { + l = m.ReadFile.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.LaunchNestedContainer != nil { + l = m.LaunchNestedContainer.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.WaitNestedContainer != nil { + l = m.WaitNestedContainer.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.KillNestedContainer != nil { + l = m.KillNestedContainer.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.LaunchNestedContainerSession != nil { + l = m.LaunchNestedContainerSession.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.AttachContainerInput != nil { + l = m.AttachContainerInput.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.AttachContainerOutput != nil { + l = m.AttachContainerOutput.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.RemoveNestedContainer != nil { + l = m.RemoveNestedContainer.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.LaunchContainer != nil { + l = m.LaunchContainer.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.WaitContainer != nil { + l = m.WaitContainer.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.KillContainer != nil { + l = m.KillContainer.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.RemoveContainer != nil { + l = m.RemoveContainer.ProtoSize() + n += 2 + l + sovAgent(uint64(l)) + } + if m.AddResourceProviderConfig != nil { + l = m.AddResourceProviderConfig.ProtoSize() + n += 2 + l + sovAgent(uint64(l)) + } + if m.UpdateResourceProviderConfig != nil { + l = m.UpdateResourceProviderConfig.ProtoSize() + n += 2 + l + sovAgent(uint64(l)) + } + if m.RemoveResourceProviderConfig != nil { + l = m.RemoveResourceProviderConfig.ProtoSize() + n += 2 + l + sovAgent(uint64(l)) + } + if m.GetContainers != nil { + l = m.GetContainers.ProtoSize() + n += 2 + l + sovAgent(uint64(l)) + } + if m.PruneImages != nil { + l = m.PruneImages.ProtoSize() + n += 2 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *Call_GetMetrics) ProtoSize() (n int) { + var l int + _ = l + if m.Timeout != nil { + l = m.Timeout.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *Call_SetLoggingLevel) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovAgent(uint64(m.Level)) + l = m.Duration.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + return n +} + +func (m *Call_ListFiles) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovAgent(uint64(l)) + return n +} + +func (m *Call_ReadFile) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovAgent(uint64(l)) + n += 1 + sovAgent(uint64(m.Offset)) + if m.Length != nil { + n += 1 + sovAgent(uint64(*m.Length)) + } + return n +} + +func (m *Call_GetContainers) ProtoSize() (n int) { + var l int + _ = l + if m.ShowNested != nil { + n += 2 + } + if m.ShowStandalone != nil { + n += 2 + } + return n +} + +func (m *Call_LaunchNestedContainer) ProtoSize() (n int) { + var l int + _ = l + l = m.ContainerID.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + if m.Command != nil { + l = m.Command.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.Container != nil { + l = m.Container.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *Call_WaitNestedContainer) ProtoSize() (n int) { + var l int + _ = l + l = m.ContainerID.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + return n +} + +func (m *Call_KillNestedContainer) ProtoSize() (n int) { + var l int + _ = l + l = m.ContainerID.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + if m.Signal != nil { + n += 1 + sovAgent(uint64(*m.Signal)) + } + return n +} + +func (m *Call_RemoveNestedContainer) ProtoSize() (n int) { + var l int + _ = l + l = m.ContainerID.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + return n +} + +func (m *Call_LaunchNestedContainerSession) ProtoSize() (n int) { + var l int + _ = l + l = m.ContainerID.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + if m.Command != nil { + l = m.Command.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.Container != nil { + l = m.Container.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *Call_AttachContainerInput) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovAgent(uint64(m.Type)) + if m.ContainerID != nil { + l = m.ContainerID.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.ProcessIO != nil { + l = m.ProcessIO.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *Call_AttachContainerOutput) ProtoSize() (n int) { + var l int + _ = l + l = m.ContainerID.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + return n +} + +func (m *Call_LaunchContainer) ProtoSize() (n int) { + var l int + _ = l + l = m.ContainerID.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + if m.Command != nil { + l = m.Command.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + if m.Container != nil { + l = m.Container.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *Call_WaitContainer) ProtoSize() (n int) { + var l int + _ = l + l = m.ContainerID.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + return n +} + +func (m *Call_KillContainer) ProtoSize() (n int) { + var l int + _ = l + l = m.ContainerID.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + if m.Signal != nil { + n += 1 + sovAgent(uint64(*m.Signal)) + } + return n +} + +func (m *Call_RemoveContainer) ProtoSize() (n int) { + var l int + _ = l + l = m.ContainerID.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + return n +} + +func (m *Call_AddResourceProviderConfig) ProtoSize() (n int) { + var l int + _ = l + l = m.Info.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + return n +} + +func (m *Call_UpdateResourceProviderConfig) ProtoSize() (n int) { + var l int + _ = l + l = m.Info.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + return n +} + +func (m *Call_RemoveResourceProviderConfig) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovAgent(uint64(l)) + l = len(m.Name) + n += 1 + l + sovAgent(uint64(l)) + return n +} + +func (m *Call_PruneImages) ProtoSize() (n int) { + var l int + _ = l + if len(m.ExcludedImages) > 0 { + for _, e := range m.ExcludedImages { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + return n +} + +func (m *Response) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovAgent(uint64(m.Type)) + if m.GetHealth != nil { + l = m.GetHealth.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.GetFlags != nil { + l = m.GetFlags.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.GetVersion != nil { + l = m.GetVersion.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.GetMetrics != nil { + l = m.GetMetrics.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.GetLoggingLevel != nil { + l = m.GetLoggingLevel.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.ListFiles != nil { + l = m.ListFiles.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.ReadFile != nil { + l = m.ReadFile.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.GetState != nil { + l = m.GetState.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.GetContainers != nil { + l = m.GetContainers.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.GetFrameworks != nil { + l = m.GetFrameworks.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.GetExecutors != nil { + l = m.GetExecutors.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.GetTasks != nil { + l = m.GetTasks.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.WaitNestedContainer != nil { + l = m.WaitNestedContainer.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.GetAgent != nil { + l = m.GetAgent.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.WaitContainer != nil { + l = m.WaitContainer.ProtoSize() + n += 2 + l + sovAgent(uint64(l)) + } + if m.GetResourceProviders != nil { + l = m.GetResourceProviders.ProtoSize() + n += 2 + l + sovAgent(uint64(l)) + } + if m.GetOperations != nil { + l = m.GetOperations.ProtoSize() + n += 2 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *Response_GetHealth) ProtoSize() (n int) { + var l int + _ = l + n += 2 + return n +} + +func (m *Response_GetFlags) ProtoSize() (n int) { + var l int + _ = l + if len(m.Flags) > 0 { + for _, e := range m.Flags { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + return n +} + +func (m *Response_GetVersion) ProtoSize() (n int) { + var l int + _ = l + l = m.VersionInfo.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + return n +} + +func (m *Response_GetMetrics) ProtoSize() (n int) { + var l int + _ = l + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + return n +} + +func (m *Response_GetLoggingLevel) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovAgent(uint64(m.Level)) + return n +} + +func (m *Response_ListFiles) ProtoSize() (n int) { + var l int + _ = l + if len(m.FileInfos) > 0 { + for _, e := range m.FileInfos { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + return n +} + +func (m *Response_ReadFile) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovAgent(uint64(m.Size)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *Response_GetState) ProtoSize() (n int) { + var l int + _ = l + if m.GetTasks != nil { + l = m.GetTasks.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.GetExecutors != nil { + l = m.GetExecutors.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.GetFrameworks != nil { + l = m.GetFrameworks.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *Response_GetContainers) ProtoSize() (n int) { + var l int + _ = l + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + return n +} + +func (m *Response_GetContainers_Container) ProtoSize() (n int) { + var l int + _ = l + if m.FrameworkID != nil { + l = m.FrameworkID.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.ExecutorID != nil { + l = m.ExecutorID.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.ExecutorName != nil { + l = len(*m.ExecutorName) + n += 1 + l + sovAgent(uint64(l)) + } + l = m.ContainerID.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + if m.ContainerStatus != nil { + l = m.ContainerStatus.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.ResourceStatistics != nil { + l = m.ResourceStatistics.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *Response_GetFrameworks) ProtoSize() (n int) { + var l int + _ = l + if len(m.Frameworks) > 0 { + for _, e := range m.Frameworks { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + if len(m.CompletedFrameworks) > 0 { + for _, e := range m.CompletedFrameworks { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + return n +} + +func (m *Response_GetFrameworks_Framework) ProtoSize() (n int) { + var l int + _ = l + l = m.FrameworkInfo.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + return n +} + +func (m *Response_GetExecutors) ProtoSize() (n int) { + var l int + _ = l + if len(m.Executors) > 0 { + for _, e := range m.Executors { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + if len(m.CompletedExecutors) > 0 { + for _, e := range m.CompletedExecutors { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + return n +} + +func (m *Response_GetExecutors_Executor) ProtoSize() (n int) { + var l int + _ = l + l = m.ExecutorInfo.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + return n +} + +func (m *Response_GetOperations) ProtoSize() (n int) { + var l int + _ = l + if len(m.Operations) > 0 { + for _, e := range m.Operations { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + return n +} + +func (m *Response_GetTasks) ProtoSize() (n int) { + var l int + _ = l + if len(m.PendingTasks) > 0 { + for _, e := range m.PendingTasks { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + if len(m.QueuedTasks) > 0 { + for _, e := range m.QueuedTasks { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + if len(m.LaunchedTasks) > 0 { + for _, e := range m.LaunchedTasks { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + if len(m.TerminatedTasks) > 0 { + for _, e := range m.TerminatedTasks { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + if len(m.CompletedTasks) > 0 { + for _, e := range m.CompletedTasks { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + return n +} + +func (m *Response_GetAgent) ProtoSize() (n int) { + var l int + _ = l + if m.AgentInfo != nil { + l = m.AgentInfo.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *Response_GetResourceProviders) ProtoSize() (n int) { + var l int + _ = l + if len(m.ResourceProviders) > 0 { + for _, e := range m.ResourceProviders { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + return n +} + +func (m *Response_GetResourceProviders_ResourceProvider) ProtoSize() (n int) { + var l int + _ = l + l = m.ResourceProviderInfo.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + if len(m.TotalResources) > 0 { + for _, e := range m.TotalResources { + l = e.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + } + return n +} + +func (m *Response_WaitNestedContainer) ProtoSize() (n int) { + var l int + _ = l + if m.ExitStatus != nil { + n += 1 + sovAgent(uint64(*m.ExitStatus)) + } + if m.State != nil { + n += 1 + sovAgent(uint64(*m.State)) + } + if m.Reason != nil { + n += 1 + sovAgent(uint64(*m.Reason)) + } + if m.Limitation != nil { + l = m.Limitation.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.Message != nil { + l = len(*m.Message) + n += 1 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *Response_WaitContainer) ProtoSize() (n int) { + var l int + _ = l + if m.ExitStatus != nil { + n += 1 + sovAgent(uint64(*m.ExitStatus)) + } + if m.State != nil { + n += 1 + sovAgent(uint64(*m.State)) + } + if m.Reason != nil { + n += 1 + sovAgent(uint64(*m.Reason)) + } + if m.Limitation != nil { + l = m.Limitation.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.Message != nil { + l = len(*m.Message) + n += 1 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *ProcessIO) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovAgent(uint64(m.Type)) + if m.Data != nil { + l = m.Data.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.Control != nil { + l = m.Control.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *ProcessIO_Data) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovAgent(uint64(m.Type)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *ProcessIO_Control) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovAgent(uint64(m.Type)) + if m.TTYInfo != nil { + l = m.TTYInfo.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + if m.Heartbeat != nil { + l = m.Heartbeat.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + return n +} + +func (m *ProcessIO_Control_Heartbeat) ProtoSize() (n int) { + var l int + _ = l + if m.Interval != nil { + l = m.Interval.ProtoSize() + n += 1 + l + sovAgent(uint64(l)) + } + return n +} + +func sovAgent(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozAgent(x uint64) (n int) { + return sovAgent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Call) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `GetMetrics:` + strings.Replace(fmt.Sprintf("%v", this.GetMetrics), "Call_GetMetrics", "Call_GetMetrics", 1) + `,`, + `SetLoggingLevel:` + strings.Replace(fmt.Sprintf("%v", this.SetLoggingLevel), "Call_SetLoggingLevel", "Call_SetLoggingLevel", 1) + `,`, + `ListFiles:` + strings.Replace(fmt.Sprintf("%v", this.ListFiles), "Call_ListFiles", "Call_ListFiles", 1) + `,`, + `ReadFile:` + strings.Replace(fmt.Sprintf("%v", this.ReadFile), "Call_ReadFile", "Call_ReadFile", 1) + `,`, + `LaunchNestedContainer:` + strings.Replace(fmt.Sprintf("%v", this.LaunchNestedContainer), "Call_LaunchNestedContainer", "Call_LaunchNestedContainer", 1) + `,`, + `WaitNestedContainer:` + strings.Replace(fmt.Sprintf("%v", this.WaitNestedContainer), "Call_WaitNestedContainer", "Call_WaitNestedContainer", 1) + `,`, + `KillNestedContainer:` + strings.Replace(fmt.Sprintf("%v", this.KillNestedContainer), "Call_KillNestedContainer", "Call_KillNestedContainer", 1) + `,`, + `LaunchNestedContainerSession:` + strings.Replace(fmt.Sprintf("%v", this.LaunchNestedContainerSession), "Call_LaunchNestedContainerSession", "Call_LaunchNestedContainerSession", 1) + `,`, + `AttachContainerInput:` + strings.Replace(fmt.Sprintf("%v", this.AttachContainerInput), "Call_AttachContainerInput", "Call_AttachContainerInput", 1) + `,`, + `AttachContainerOutput:` + strings.Replace(fmt.Sprintf("%v", this.AttachContainerOutput), "Call_AttachContainerOutput", "Call_AttachContainerOutput", 1) + `,`, + `RemoveNestedContainer:` + strings.Replace(fmt.Sprintf("%v", this.RemoveNestedContainer), "Call_RemoveNestedContainer", "Call_RemoveNestedContainer", 1) + `,`, + `LaunchContainer:` + strings.Replace(fmt.Sprintf("%v", this.LaunchContainer), "Call_LaunchContainer", "Call_LaunchContainer", 1) + `,`, + `WaitContainer:` + strings.Replace(fmt.Sprintf("%v", this.WaitContainer), "Call_WaitContainer", "Call_WaitContainer", 1) + `,`, + `KillContainer:` + strings.Replace(fmt.Sprintf("%v", this.KillContainer), "Call_KillContainer", "Call_KillContainer", 1) + `,`, + `RemoveContainer:` + strings.Replace(fmt.Sprintf("%v", this.RemoveContainer), "Call_RemoveContainer", "Call_RemoveContainer", 1) + `,`, + `AddResourceProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.AddResourceProviderConfig), "Call_AddResourceProviderConfig", "Call_AddResourceProviderConfig", 1) + `,`, + `UpdateResourceProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.UpdateResourceProviderConfig), "Call_UpdateResourceProviderConfig", "Call_UpdateResourceProviderConfig", 1) + `,`, + `RemoveResourceProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.RemoveResourceProviderConfig), "Call_RemoveResourceProviderConfig", "Call_RemoveResourceProviderConfig", 1) + `,`, + `GetContainers:` + strings.Replace(fmt.Sprintf("%v", this.GetContainers), "Call_GetContainers", "Call_GetContainers", 1) + `,`, + `PruneImages:` + strings.Replace(fmt.Sprintf("%v", this.PruneImages), "Call_PruneImages", "Call_PruneImages", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Call_GetMetrics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_GetMetrics{`, + `Timeout:` + strings.Replace(fmt.Sprintf("%v", this.Timeout), "DurationInfo", "mesos.DurationInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Call_SetLoggingLevel) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_SetLoggingLevel{`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `Duration:` + strings.Replace(strings.Replace(this.Duration.String(), "DurationInfo", "mesos.DurationInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Call_ListFiles) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_ListFiles{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *Call_ReadFile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_ReadFile{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, + `Length:` + valueToStringAgent(this.Length) + `,`, + `}`, + }, "") + return s +} +func (this *Call_GetContainers) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_GetContainers{`, + `ShowNested:` + valueToStringAgent(this.ShowNested) + `,`, + `ShowStandalone:` + valueToStringAgent(this.ShowStandalone) + `,`, + `}`, + }, "") + return s +} +func (this *Call_LaunchNestedContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_LaunchNestedContainer{`, + `ContainerID:` + strings.Replace(strings.Replace(this.ContainerID.String(), "ContainerID", "mesos.ContainerID", 1), `&`, ``, 1) + `,`, + `Command:` + strings.Replace(fmt.Sprintf("%v", this.Command), "CommandInfo", "mesos.CommandInfo", 1) + `,`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerInfo", "mesos.ContainerInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Call_WaitNestedContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_WaitNestedContainer{`, + `ContainerID:` + strings.Replace(strings.Replace(this.ContainerID.String(), "ContainerID", "mesos.ContainerID", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Call_KillNestedContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_KillNestedContainer{`, + `ContainerID:` + strings.Replace(strings.Replace(this.ContainerID.String(), "ContainerID", "mesos.ContainerID", 1), `&`, ``, 1) + `,`, + `Signal:` + valueToStringAgent(this.Signal) + `,`, + `}`, + }, "") + return s +} +func (this *Call_RemoveNestedContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_RemoveNestedContainer{`, + `ContainerID:` + strings.Replace(strings.Replace(this.ContainerID.String(), "ContainerID", "mesos.ContainerID", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Call_LaunchNestedContainerSession) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_LaunchNestedContainerSession{`, + `ContainerID:` + strings.Replace(strings.Replace(this.ContainerID.String(), "ContainerID", "mesos.ContainerID", 1), `&`, ``, 1) + `,`, + `Command:` + strings.Replace(fmt.Sprintf("%v", this.Command), "CommandInfo", "mesos.CommandInfo", 1) + `,`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerInfo", "mesos.ContainerInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Call_AttachContainerInput) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_AttachContainerInput{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ContainerID:` + strings.Replace(fmt.Sprintf("%v", this.ContainerID), "ContainerID", "mesos.ContainerID", 1) + `,`, + `ProcessIO:` + strings.Replace(fmt.Sprintf("%v", this.ProcessIO), "ProcessIO", "ProcessIO", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Call_AttachContainerOutput) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_AttachContainerOutput{`, + `ContainerID:` + strings.Replace(strings.Replace(this.ContainerID.String(), "ContainerID", "mesos.ContainerID", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Call_LaunchContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_LaunchContainer{`, + `ContainerID:` + strings.Replace(strings.Replace(this.ContainerID.String(), "ContainerID", "mesos.ContainerID", 1), `&`, ``, 1) + `,`, + `Command:` + strings.Replace(fmt.Sprintf("%v", this.Command), "CommandInfo", "mesos.CommandInfo", 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "mesos.Resource", 1), `&`, ``, 1) + `,`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerInfo", "mesos.ContainerInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Call_WaitContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_WaitContainer{`, + `ContainerID:` + strings.Replace(strings.Replace(this.ContainerID.String(), "ContainerID", "mesos.ContainerID", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Call_KillContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_KillContainer{`, + `ContainerID:` + strings.Replace(strings.Replace(this.ContainerID.String(), "ContainerID", "mesos.ContainerID", 1), `&`, ``, 1) + `,`, + `Signal:` + valueToStringAgent(this.Signal) + `,`, + `}`, + }, "") + return s +} +func (this *Call_RemoveContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_RemoveContainer{`, + `ContainerID:` + strings.Replace(strings.Replace(this.ContainerID.String(), "ContainerID", "mesos.ContainerID", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Call_AddResourceProviderConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_AddResourceProviderConfig{`, + `Info:` + strings.Replace(strings.Replace(this.Info.String(), "ResourceProviderInfo", "mesos.ResourceProviderInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Call_UpdateResourceProviderConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_UpdateResourceProviderConfig{`, + `Info:` + strings.Replace(strings.Replace(this.Info.String(), "ResourceProviderInfo", "mesos.ResourceProviderInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Call_RemoveResourceProviderConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_RemoveResourceProviderConfig{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Call_PruneImages) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Call_PruneImages{`, + `ExcludedImages:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExcludedImages), "Image", "mesos.Image", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `GetHealth:` + strings.Replace(fmt.Sprintf("%v", this.GetHealth), "Response_GetHealth", "Response_GetHealth", 1) + `,`, + `GetFlags:` + strings.Replace(fmt.Sprintf("%v", this.GetFlags), "Response_GetFlags", "Response_GetFlags", 1) + `,`, + `GetVersion:` + strings.Replace(fmt.Sprintf("%v", this.GetVersion), "Response_GetVersion", "Response_GetVersion", 1) + `,`, + `GetMetrics:` + strings.Replace(fmt.Sprintf("%v", this.GetMetrics), "Response_GetMetrics", "Response_GetMetrics", 1) + `,`, + `GetLoggingLevel:` + strings.Replace(fmt.Sprintf("%v", this.GetLoggingLevel), "Response_GetLoggingLevel", "Response_GetLoggingLevel", 1) + `,`, + `ListFiles:` + strings.Replace(fmt.Sprintf("%v", this.ListFiles), "Response_ListFiles", "Response_ListFiles", 1) + `,`, + `ReadFile:` + strings.Replace(fmt.Sprintf("%v", this.ReadFile), "Response_ReadFile", "Response_ReadFile", 1) + `,`, + `GetState:` + strings.Replace(fmt.Sprintf("%v", this.GetState), "Response_GetState", "Response_GetState", 1) + `,`, + `GetContainers:` + strings.Replace(fmt.Sprintf("%v", this.GetContainers), "Response_GetContainers", "Response_GetContainers", 1) + `,`, + `GetFrameworks:` + strings.Replace(fmt.Sprintf("%v", this.GetFrameworks), "Response_GetFrameworks", "Response_GetFrameworks", 1) + `,`, + `GetExecutors:` + strings.Replace(fmt.Sprintf("%v", this.GetExecutors), "Response_GetExecutors", "Response_GetExecutors", 1) + `,`, + `GetTasks:` + strings.Replace(fmt.Sprintf("%v", this.GetTasks), "Response_GetTasks", "Response_GetTasks", 1) + `,`, + `WaitNestedContainer:` + strings.Replace(fmt.Sprintf("%v", this.WaitNestedContainer), "Response_WaitNestedContainer", "Response_WaitNestedContainer", 1) + `,`, + `GetAgent:` + strings.Replace(fmt.Sprintf("%v", this.GetAgent), "Response_GetAgent", "Response_GetAgent", 1) + `,`, + `WaitContainer:` + strings.Replace(fmt.Sprintf("%v", this.WaitContainer), "Response_WaitContainer", "Response_WaitContainer", 1) + `,`, + `GetResourceProviders:` + strings.Replace(fmt.Sprintf("%v", this.GetResourceProviders), "Response_GetResourceProviders", "Response_GetResourceProviders", 1) + `,`, + `GetOperations:` + strings.Replace(fmt.Sprintf("%v", this.GetOperations), "Response_GetOperations", "Response_GetOperations", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetHealth) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetHealth{`, + `Healthy:` + fmt.Sprintf("%v", this.Healthy) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetFlags) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetFlags{`, + `Flags:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Flags), "Flag", "mesos.Flag", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetVersion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetVersion{`, + `VersionInfo:` + strings.Replace(strings.Replace(this.VersionInfo.String(), "VersionInfo", "mesos.VersionInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetMetrics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetMetrics{`, + `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "Metric", "mesos.Metric", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetLoggingLevel) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetLoggingLevel{`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `}`, + }, "") + return s +} +func (this *Response_ListFiles) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_ListFiles{`, + `FileInfos:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FileInfos), "FileInfo", "mesos.FileInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_ReadFile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_ReadFile{`, + `Size:` + fmt.Sprintf("%v", this.Size) + `,`, + `Data:` + valueToStringAgent(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetState) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetState{`, + `GetTasks:` + strings.Replace(fmt.Sprintf("%v", this.GetTasks), "Response_GetTasks", "Response_GetTasks", 1) + `,`, + `GetExecutors:` + strings.Replace(fmt.Sprintf("%v", this.GetExecutors), "Response_GetExecutors", "Response_GetExecutors", 1) + `,`, + `GetFrameworks:` + strings.Replace(fmt.Sprintf("%v", this.GetFrameworks), "Response_GetFrameworks", "Response_GetFrameworks", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetContainers) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetContainers{`, + `Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "Response_GetContainers_Container", "Response_GetContainers_Container", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetContainers_Container) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetContainers_Container{`, + `FrameworkID:` + strings.Replace(fmt.Sprintf("%v", this.FrameworkID), "FrameworkID", "mesos.FrameworkID", 1) + `,`, + `ExecutorID:` + strings.Replace(fmt.Sprintf("%v", this.ExecutorID), "ExecutorID", "mesos.ExecutorID", 1) + `,`, + `ExecutorName:` + valueToStringAgent(this.ExecutorName) + `,`, + `ContainerID:` + strings.Replace(strings.Replace(this.ContainerID.String(), "ContainerID", "mesos.ContainerID", 1), `&`, ``, 1) + `,`, + `ContainerStatus:` + strings.Replace(fmt.Sprintf("%v", this.ContainerStatus), "ContainerStatus", "mesos.ContainerStatus", 1) + `,`, + `ResourceStatistics:` + strings.Replace(fmt.Sprintf("%v", this.ResourceStatistics), "ResourceStatistics", "mesos.ResourceStatistics", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetFrameworks) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetFrameworks{`, + `Frameworks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Frameworks), "Response_GetFrameworks_Framework", "Response_GetFrameworks_Framework", 1), `&`, ``, 1) + `,`, + `CompletedFrameworks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CompletedFrameworks), "Response_GetFrameworks_Framework", "Response_GetFrameworks_Framework", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetFrameworks_Framework) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetFrameworks_Framework{`, + `FrameworkInfo:` + strings.Replace(strings.Replace(this.FrameworkInfo.String(), "FrameworkInfo", "mesos.FrameworkInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetExecutors) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetExecutors{`, + `Executors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Executors), "Response_GetExecutors_Executor", "Response_GetExecutors_Executor", 1), `&`, ``, 1) + `,`, + `CompletedExecutors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CompletedExecutors), "Response_GetExecutors_Executor", "Response_GetExecutors_Executor", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetExecutors_Executor) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetExecutors_Executor{`, + `ExecutorInfo:` + strings.Replace(strings.Replace(this.ExecutorInfo.String(), "ExecutorInfo", "mesos.ExecutorInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetOperations{`, + `Operations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Operations), "Operation", "mesos.Operation", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetTasks) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetTasks{`, + `PendingTasks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PendingTasks), "Task", "mesos.Task", 1), `&`, ``, 1) + `,`, + `QueuedTasks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.QueuedTasks), "Task", "mesos.Task", 1), `&`, ``, 1) + `,`, + `LaunchedTasks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LaunchedTasks), "Task", "mesos.Task", 1), `&`, ``, 1) + `,`, + `TerminatedTasks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TerminatedTasks), "Task", "mesos.Task", 1), `&`, ``, 1) + `,`, + `CompletedTasks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CompletedTasks), "Task", "mesos.Task", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetAgent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetAgent{`, + `AgentInfo:` + strings.Replace(fmt.Sprintf("%v", this.AgentInfo), "AgentInfo", "mesos.AgentInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetResourceProviders) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetResourceProviders{`, + `ResourceProviders:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceProviders), "Response_GetResourceProviders_ResourceProvider", "Response_GetResourceProviders_ResourceProvider", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_GetResourceProviders_ResourceProvider) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_GetResourceProviders_ResourceProvider{`, + `ResourceProviderInfo:` + strings.Replace(strings.Replace(this.ResourceProviderInfo.String(), "ResourceProviderInfo", "mesos.ResourceProviderInfo", 1), `&`, ``, 1) + `,`, + `TotalResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TotalResources), "Resource", "mesos.Resource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Response_WaitNestedContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_WaitNestedContainer{`, + `ExitStatus:` + valueToStringAgent(this.ExitStatus) + `,`, + `State:` + valueToStringAgent(this.State) + `,`, + `Reason:` + valueToStringAgent(this.Reason) + `,`, + `Limitation:` + strings.Replace(fmt.Sprintf("%v", this.Limitation), "TaskResourceLimitation", "mesos.TaskResourceLimitation", 1) + `,`, + `Message:` + valueToStringAgent(this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *Response_WaitContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Response_WaitContainer{`, + `ExitStatus:` + valueToStringAgent(this.ExitStatus) + `,`, + `State:` + valueToStringAgent(this.State) + `,`, + `Reason:` + valueToStringAgent(this.Reason) + `,`, + `Limitation:` + strings.Replace(fmt.Sprintf("%v", this.Limitation), "TaskResourceLimitation", "mesos.TaskResourceLimitation", 1) + `,`, + `Message:` + valueToStringAgent(this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ProcessIO) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProcessIO{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "ProcessIO_Data", "ProcessIO_Data", 1) + `,`, + `Control:` + strings.Replace(fmt.Sprintf("%v", this.Control), "ProcessIO_Control", "ProcessIO_Control", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProcessIO_Data) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProcessIO_Data{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Data:` + valueToStringAgent(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *ProcessIO_Control) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProcessIO_Control{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `TTYInfo:` + strings.Replace(fmt.Sprintf("%v", this.TTYInfo), "TTYInfo", "mesos.TTYInfo", 1) + `,`, + `Heartbeat:` + strings.Replace(fmt.Sprintf("%v", this.Heartbeat), "ProcessIO_Control_Heartbeat", "ProcessIO_Control_Heartbeat", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProcessIO_Control_Heartbeat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProcessIO_Control_Heartbeat{`, + `Interval:` + strings.Replace(fmt.Sprintf("%v", this.Interval), "DurationInfo", "mesos.DurationInfo", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringAgent(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Call) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Call: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Call: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Call_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetMetrics == nil { + m.GetMetrics = &Call_GetMetrics{} + } + if err := m.GetMetrics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SetLoggingLevel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SetLoggingLevel == nil { + m.SetLoggingLevel = &Call_SetLoggingLevel{} + } + if err := m.SetLoggingLevel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListFiles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ListFiles == nil { + m.ListFiles = &Call_ListFiles{} + } + if err := m.ListFiles.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadFile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadFile == nil { + m.ReadFile = &Call_ReadFile{} + } + if err := m.ReadFile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LaunchNestedContainer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LaunchNestedContainer == nil { + m.LaunchNestedContainer = &Call_LaunchNestedContainer{} + } + if err := m.LaunchNestedContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitNestedContainer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WaitNestedContainer == nil { + m.WaitNestedContainer = &Call_WaitNestedContainer{} + } + if err := m.WaitNestedContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KillNestedContainer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KillNestedContainer == nil { + m.KillNestedContainer = &Call_KillNestedContainer{} + } + if err := m.KillNestedContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LaunchNestedContainerSession", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LaunchNestedContainerSession == nil { + m.LaunchNestedContainerSession = &Call_LaunchNestedContainerSession{} + } + if err := m.LaunchNestedContainerSession.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachContainerInput", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AttachContainerInput == nil { + m.AttachContainerInput = &Call_AttachContainerInput{} + } + if err := m.AttachContainerInput.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachContainerOutput", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AttachContainerOutput == nil { + m.AttachContainerOutput = &Call_AttachContainerOutput{} + } + if err := m.AttachContainerOutput.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RemoveNestedContainer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RemoveNestedContainer == nil { + m.RemoveNestedContainer = &Call_RemoveNestedContainer{} + } + if err := m.RemoveNestedContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LaunchContainer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LaunchContainer == nil { + m.LaunchContainer = &Call_LaunchContainer{} + } + if err := m.LaunchContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitContainer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WaitContainer == nil { + m.WaitContainer = &Call_WaitContainer{} + } + if err := m.WaitContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KillContainer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KillContainer == nil { + m.KillContainer = &Call_KillContainer{} + } + if err := m.KillContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RemoveContainer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RemoveContainer == nil { + m.RemoveContainer = &Call_RemoveContainer{} + } + if err := m.RemoveContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddResourceProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AddResourceProviderConfig == nil { + m.AddResourceProviderConfig = &Call_AddResourceProviderConfig{} + } + if err := m.AddResourceProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateResourceProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdateResourceProviderConfig == nil { + m.UpdateResourceProviderConfig = &Call_UpdateResourceProviderConfig{} + } + if err := m.UpdateResourceProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RemoveResourceProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RemoveResourceProviderConfig == nil { + m.RemoveResourceProviderConfig = &Call_RemoveResourceProviderConfig{} + } + if err := m.RemoveResourceProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetContainers == nil { + m.GetContainers = &Call_GetContainers{} + } + if err := m.GetContainers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PruneImages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PruneImages == nil { + m.PruneImages = &Call_PruneImages{} + } + if err := m.PruneImages.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_GetMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timeout == nil { + m.Timeout = &mesos.DurationInfo{} + } + if err := m.Timeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_SetLoggingLevel) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetLoggingLevel: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetLoggingLevel: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + m.Level = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Level |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Duration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("level") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("duration") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_ListFiles) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListFiles: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListFiles: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("path") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_ReadFile) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadFile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadFile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Length = &v + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("path") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("offset") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_GetContainers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetContainers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetContainers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShowNested", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ShowNested = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShowStandalone", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ShowStandalone = &b + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_LaunchNestedContainer) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LaunchNestedContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LaunchNestedContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ContainerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Command == nil { + m.Command = &mesos.CommandInfo{} + } + if err := m.Command.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Container == nil { + m.Container = &mesos.ContainerInfo{} + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("container_id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_WaitNestedContainer) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WaitNestedContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WaitNestedContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ContainerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("container_id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_KillNestedContainer) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KillNestedContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KillNestedContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ContainerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Signal = &v + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("container_id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_RemoveNestedContainer) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNestedContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNestedContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ContainerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("container_id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_LaunchNestedContainerSession) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LaunchNestedContainerSession: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LaunchNestedContainerSession: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ContainerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Command == nil { + m.Command = &mesos.CommandInfo{} + } + if err := m.Command.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Container == nil { + m.Container = &mesos.ContainerInfo{} + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("container_id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_AttachContainerInput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachContainerInput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachContainerInput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Call_AttachContainerInput_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerID == nil { + m.ContainerID = &mesos.ContainerID{} + } + if err := m.ContainerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessIO", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProcessIO == nil { + m.ProcessIO = &ProcessIO{} + } + if err := m.ProcessIO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_AttachContainerOutput) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachContainerOutput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachContainerOutput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ContainerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("container_id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_LaunchContainer) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LaunchContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LaunchContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ContainerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Command == nil { + m.Command = &mesos.CommandInfo{} + } + if err := m.Command.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, mesos.Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Container == nil { + m.Container = &mesos.ContainerInfo{} + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("container_id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_WaitContainer) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WaitContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WaitContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ContainerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("container_id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_KillContainer) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KillContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KillContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ContainerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Signal = &v + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("container_id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_RemoveContainer) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ContainerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("container_id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_AddResourceProviderConfig) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddResourceProviderConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddResourceProviderConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("info") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_UpdateResourceProviderConfig) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateResourceProviderConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateResourceProviderConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("info") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_RemoveResourceProviderConfig) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveResourceProviderConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveResourceProviderConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Call_PruneImages) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PruneImages: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PruneImages: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludedImages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludedImages = append(m.ExcludedImages, mesos.Image{}) + if err := m.ExcludedImages[len(m.ExcludedImages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Response: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Response_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetHealth", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetHealth == nil { + m.GetHealth = &Response_GetHealth{} + } + if err := m.GetHealth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetFlags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetFlags == nil { + m.GetFlags = &Response_GetFlags{} + } + if err := m.GetFlags.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetVersion == nil { + m.GetVersion = &Response_GetVersion{} + } + if err := m.GetVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetMetrics == nil { + m.GetMetrics = &Response_GetMetrics{} + } + if err := m.GetMetrics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetLoggingLevel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetLoggingLevel == nil { + m.GetLoggingLevel = &Response_GetLoggingLevel{} + } + if err := m.GetLoggingLevel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListFiles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ListFiles == nil { + m.ListFiles = &Response_ListFiles{} + } + if err := m.ListFiles.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadFile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadFile == nil { + m.ReadFile = &Response_ReadFile{} + } + if err := m.ReadFile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetState == nil { + m.GetState = &Response_GetState{} + } + if err := m.GetState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetContainers == nil { + m.GetContainers = &Response_GetContainers{} + } + if err := m.GetContainers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetFrameworks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetFrameworks == nil { + m.GetFrameworks = &Response_GetFrameworks{} + } + if err := m.GetFrameworks.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetExecutors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetExecutors == nil { + m.GetExecutors = &Response_GetExecutors{} + } + if err := m.GetExecutors.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetTasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetTasks == nil { + m.GetTasks = &Response_GetTasks{} + } + if err := m.GetTasks.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitNestedContainer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WaitNestedContainer == nil { + m.WaitNestedContainer = &Response_WaitNestedContainer{} + } + if err := m.WaitNestedContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetAgent", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetAgent == nil { + m.GetAgent = &Response_GetAgent{} + } + if err := m.GetAgent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitContainer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WaitContainer == nil { + m.WaitContainer = &Response_WaitContainer{} + } + if err := m.WaitContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetResourceProviders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetResourceProviders == nil { + m.GetResourceProviders = &Response_GetResourceProviders{} + } + if err := m.GetResourceProviders.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetOperations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetOperations == nil { + m.GetOperations = &Response_GetOperations{} + } + if err := m.GetOperations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetHealth) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetHealth: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetHealth: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Healthy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Healthy = bool(v != 0) + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("healthy") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetFlags) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetFlags: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetFlags: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Flags = append(m.Flags, mesos.Flag{}) + if err := m.Flags[len(m.Flags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetVersion) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.VersionInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("version_info") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, mesos.Metric{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetLoggingLevel) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetLoggingLevel: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetLoggingLevel: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + m.Level = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Level |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("level") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_ListFiles) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListFiles: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListFiles: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileInfos", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FileInfos = append(m.FileInfos, mesos.FileInfo{}) + if err := m.FileInfos[len(m.FileInfos)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_ReadFile) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadFile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadFile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size", wireType) + } + m.Size = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("size") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("data") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetTasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetTasks == nil { + m.GetTasks = &Response_GetTasks{} + } + if err := m.GetTasks.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetExecutors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetExecutors == nil { + m.GetExecutors = &Response_GetExecutors{} + } + if err := m.GetExecutors.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetFrameworks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetFrameworks == nil { + m.GetFrameworks = &Response_GetFrameworks{} + } + if err := m.GetFrameworks.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetContainers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetContainers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetContainers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, Response_GetContainers_Container{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetContainers_Container) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Container: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrameworkID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FrameworkID == nil { + m.FrameworkID = &mesos.FrameworkID{} + } + if err := m.FrameworkID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecutorID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExecutorID == nil { + m.ExecutorID = &mesos.ExecutorID{} + } + if err := m.ExecutorID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecutorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ExecutorName = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ContainerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerStatus == nil { + m.ContainerStatus = &mesos.ContainerStatus{} + } + if err := m.ContainerStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceStatistics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceStatistics == nil { + m.ResourceStatistics = &mesos.ResourceStatistics{} + } + if err := m.ResourceStatistics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("container_id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetFrameworks) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetFrameworks: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetFrameworks: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Frameworks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Frameworks = append(m.Frameworks, Response_GetFrameworks_Framework{}) + if err := m.Frameworks[len(m.Frameworks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletedFrameworks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CompletedFrameworks = append(m.CompletedFrameworks, Response_GetFrameworks_Framework{}) + if err := m.CompletedFrameworks[len(m.CompletedFrameworks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetFrameworks_Framework) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Framework: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Framework: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrameworkInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FrameworkInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("framework_info") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetExecutors) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetExecutors: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetExecutors: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Executors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Executors = append(m.Executors, Response_GetExecutors_Executor{}) + if err := m.Executors[len(m.Executors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletedExecutors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CompletedExecutors = append(m.CompletedExecutors, Response_GetExecutors_Executor{}) + if err := m.CompletedExecutors[len(m.CompletedExecutors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetExecutors_Executor) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Executor: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Executor: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecutorInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ExecutorInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("executor_info") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operations = append(m.Operations, mesos.Operation{}) + if err := m.Operations[len(m.Operations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetTasks) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTasks: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTasks: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingTasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PendingTasks = append(m.PendingTasks, mesos.Task{}) + if err := m.PendingTasks[len(m.PendingTasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueuedTasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QueuedTasks = append(m.QueuedTasks, mesos.Task{}) + if err := m.QueuedTasks[len(m.QueuedTasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LaunchedTasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LaunchedTasks = append(m.LaunchedTasks, mesos.Task{}) + if err := m.LaunchedTasks[len(m.LaunchedTasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminatedTasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminatedTasks = append(m.TerminatedTasks, mesos.Task{}) + if err := m.TerminatedTasks[len(m.TerminatedTasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletedTasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CompletedTasks = append(m.CompletedTasks, mesos.Task{}) + if err := m.CompletedTasks[len(m.CompletedTasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetAgent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAgent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAgent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AgentInfo == nil { + m.AgentInfo = &mesos.AgentInfo{} + } + if err := m.AgentInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetResourceProviders) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetResourceProviders: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetResourceProviders: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceProviders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceProviders = append(m.ResourceProviders, Response_GetResourceProviders_ResourceProvider{}) + if err := m.ResourceProviders[len(m.ResourceProviders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_GetResourceProviders_ResourceProvider) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceProvider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceProvider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceProviderInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceProviderInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TotalResources = append(m.TotalResources, mesos.Resource{}) + if err := m.TotalResources[len(m.TotalResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("resource_provider_info") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_WaitNestedContainer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WaitNestedContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WaitNestedContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ExitStatus = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var v mesos.TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (mesos.TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.State = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var v mesos.TaskStatus_Reason + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (mesos.TaskStatus_Reason(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Reason = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limitation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limitation == nil { + m.Limitation = &mesos.TaskResourceLimitation{} + } + if err := m.Limitation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Message = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response_WaitContainer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WaitContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WaitContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ExitStatus = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var v mesos.TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (mesos.TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.State = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var v mesos.TaskStatus_Reason + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (mesos.TaskStatus_Reason(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Reason = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limitation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limitation == nil { + m.Limitation = &mesos.TaskResourceLimitation{} + } + if err := m.Limitation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Message = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProcessIO) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProcessIO: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProcessIO: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ProcessIO_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Data == nil { + m.Data = &ProcessIO_Data{} + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Control", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Control == nil { + m.Control = &ProcessIO_Control{} + } + if err := m.Control.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProcessIO_Data) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Data: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Data: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ProcessIO_Data_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProcessIO_Control) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Control: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Control: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ProcessIO_Control_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TTYInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TTYInfo == nil { + m.TTYInfo = &mesos.TTYInfo{} + } + if err := m.TTYInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Heartbeat", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Heartbeat == nil { + m.Heartbeat = &ProcessIO_Control_Heartbeat{} + } + if err := m.Heartbeat.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProcessIO_Control_Heartbeat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Heartbeat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Heartbeat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Interval == nil { + m.Interval = &mesos.DurationInfo{} + } + if err := m.Interval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAgent(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAgent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAgent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAgent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthAgent + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAgent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipAgent(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthAgent = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAgent = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("agent/agent.proto", fileDescriptorAgent) } + +var fileDescriptorAgent = []byte{ + // 3053 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7, + 0xf5, 0xf7, 0x52, 0x94, 0x44, 0x3e, 0x7e, 0xad, 0x46, 0x1f, 0xa6, 0xd7, 0x0a, 0xa5, 0xbf, 0x8c, + 0x24, 0xfe, 0xd7, 0xb5, 0xe5, 0x3a, 0x89, 0x1b, 0x44, 0x89, 0x51, 0x4a, 0x5c, 0xc9, 0xb4, 0x69, + 0x52, 0x9d, 0x5d, 0xd9, 0x4d, 0xda, 0x80, 0x5d, 0x8b, 0x2b, 0x6a, 0x61, 0x92, 0xcb, 0xee, 0x2e, + 0xe5, 0xb8, 0xa7, 0x1e, 0x7b, 0x6b, 0x0f, 0x05, 0x7a, 0xe9, 0xa1, 0x40, 0x2f, 0x01, 0xda, 0x73, + 0xdb, 0x53, 0xd1, 0x4b, 0x81, 0x14, 0xe8, 0x21, 0xc7, 0x02, 0x29, 0x8c, 0x5a, 0xb9, 0xb4, 0xa7, + 0xe6, 0xd0, 0x43, 0x4f, 0x45, 0x31, 0x5f, 0xbb, 0x4b, 0xee, 0x2e, 0xed, 0xb8, 0x46, 0x8a, 0xf6, + 0x22, 0xec, 0xbc, 0xf9, 0xbd, 0x37, 0x33, 0x6f, 0x66, 0xde, 0x7b, 0xf3, 0xa3, 0x60, 0xc1, 0xe8, + 0x9a, 0x03, 0x6f, 0x93, 0xfe, 0xbd, 0x32, 0x74, 0x6c, 0xcf, 0x46, 0xb9, 0xbe, 0xe9, 0xda, 0xee, + 0x15, 0x2a, 0x52, 0xae, 0x76, 0x2d, 0xef, 0x78, 0x74, 0xff, 0xca, 0xa1, 0xdd, 0xdf, 0xa4, 0x72, + 0xf6, 0xf7, 0x72, 0xd7, 0xde, 0x34, 0x86, 0xd6, 0xe6, 0xc9, 0x57, 0x36, 0x7b, 0xd6, 0x7d, 0x26, + 0x63, 0xea, 0xca, 0xe5, 0x90, 0x46, 0xd7, 0xee, 0xda, 0x9b, 0x54, 0x7c, 0x7f, 0x74, 0x44, 0x5b, + 0xb4, 0x41, 0xbf, 0x18, 0x7c, 0xe3, 0x93, 0x57, 0x20, 0xbd, 0x63, 0xf4, 0x7a, 0xe8, 0x2a, 0xa4, + 0xbd, 0x47, 0x43, 0xb3, 0x2c, 0xad, 0x4b, 0x17, 0x8b, 0xd7, 0x56, 0xae, 0x84, 0x66, 0x71, 0x85, + 0x00, 0xae, 0xe8, 0x8f, 0x86, 0xe6, 0x76, 0xfa, 0xa3, 0xc7, 0x6b, 0x67, 0x30, 0x45, 0xa2, 0x77, + 0x20, 0xd7, 0x35, 0xbd, 0x76, 0xdf, 0xf4, 0x1c, 0xeb, 0xd0, 0x2d, 0xa7, 0xd6, 0xa5, 0x8b, 0xb9, + 0x6b, 0xab, 0x51, 0xc5, 0x3d, 0xd3, 0xbb, 0xc3, 0x30, 0x18, 0xba, 0xfe, 0x37, 0xba, 0x03, 0x0b, + 0xae, 0xe9, 0xb5, 0x7b, 0x76, 0xb7, 0x6b, 0x0d, 0xba, 0xed, 0x9e, 0x79, 0x62, 0xf6, 0xca, 0x33, + 0xd4, 0xc8, 0xff, 0x45, 0x8d, 0x68, 0xa6, 0xd7, 0x60, 0xc8, 0x06, 0x01, 0xe2, 0x92, 0x3b, 0x2e, + 0x40, 0x6f, 0x01, 0xf4, 0x2c, 0xd7, 0x6b, 0x1f, 0x59, 0x3d, 0xd3, 0x2d, 0xa7, 0xa9, 0x9d, 0xf3, + 0x51, 0x3b, 0x0d, 0xcb, 0xf5, 0x76, 0x09, 0x04, 0x67, 0x7b, 0xe2, 0x13, 0x7d, 0x15, 0xb2, 0x8e, + 0x69, 0x74, 0xa8, 0x6e, 0x79, 0x96, 0xaa, 0x2a, 0x51, 0x55, 0x6c, 0x1a, 0x1d, 0x82, 0xc7, 0x19, + 0x87, 0x7f, 0xa1, 0x43, 0x38, 0xdb, 0x33, 0x46, 0x83, 0xc3, 0xe3, 0xf6, 0xc0, 0x74, 0x3d, 0xb3, + 0xd3, 0x3e, 0xb4, 0x07, 0x9e, 0x61, 0x0d, 0x4c, 0xa7, 0x3c, 0x47, 0xcd, 0xbc, 0x1a, 0x33, 0x03, + 0xaa, 0xd0, 0xa4, 0xf8, 0x1d, 0x01, 0xdf, 0x4e, 0x95, 0x25, 0xbc, 0xdc, 0x8b, 0xeb, 0x42, 0xef, + 0xc3, 0xf2, 0x43, 0xc3, 0xf2, 0xa2, 0x43, 0xcc, 0xd3, 0x21, 0x5e, 0x8e, 0x0e, 0x71, 0xcf, 0xb0, + 0xbc, 0xb8, 0x01, 0x16, 0x1f, 0x46, 0x3b, 0x88, 0xf9, 0x07, 0x56, 0xaf, 0x17, 0x35, 0x9f, 0x49, + 0x32, 0x7f, 0xdb, 0xea, 0xf5, 0x62, 0xcd, 0x3f, 0x88, 0x76, 0xa0, 0x11, 0xac, 0x25, 0xb8, 0xa8, + 0xed, 0x9a, 0xae, 0x6b, 0xd9, 0x83, 0x72, 0x96, 0x0e, 0x74, 0xe5, 0x19, 0x5d, 0xa5, 0x31, 0x2d, + 0xbc, 0xda, 0x9b, 0xd2, 0x8b, 0xbe, 0x05, 0x2b, 0x86, 0xe7, 0x19, 0x87, 0xc7, 0xa1, 0xf1, 0xac, + 0xc1, 0x70, 0xe4, 0x95, 0x81, 0x8e, 0xf6, 0x4a, 0x74, 0xb4, 0x2a, 0xc5, 0xfb, 0x96, 0xea, 0x04, + 0x8d, 0x97, 0x8c, 0x18, 0x29, 0x6a, 0xc3, 0xd9, 0x88, 0x75, 0x7b, 0xe4, 0x11, 0xf3, 0xb9, 0xa4, + 0x7d, 0x9f, 0x30, 0xdf, 0xa2, 0x70, 0xbc, 0x6c, 0xc4, 0x89, 0xc9, 0xc1, 0x72, 0xcc, 0xbe, 0x7d, + 0x62, 0x46, 0xb7, 0x25, 0x9f, 0x34, 0x00, 0xa6, 0x0a, 0xb1, 0x07, 0xcb, 0x89, 0xeb, 0x42, 0x0d, + 0x90, 0xf9, 0xd6, 0x04, 0xd6, 0x0b, 0x49, 0x17, 0x90, 0xed, 0x85, 0xaf, 0x8c, 0x4b, 0xbd, 0x71, + 0x01, 0xda, 0x85, 0x22, 0x3d, 0xa6, 0x81, 0xad, 0x22, 0xb5, 0xb5, 0x16, 0x7f, 0x3e, 0x03, 0x4b, + 0x85, 0x87, 0xe1, 0x26, 0xb1, 0x43, 0xcf, 0x63, 0x60, 0xa7, 0x94, 0x64, 0x87, 0x1c, 0xc4, 0x90, + 0x9d, 0x07, 0xe1, 0x26, 0x59, 0x1d, 0x77, 0x61, 0x60, 0x49, 0x4e, 0x5a, 0x1d, 0xf3, 0x5d, 0x68, + 0x75, 0xce, 0xb8, 0x00, 0xf5, 0x60, 0xd5, 0xe8, 0x74, 0xda, 0x8e, 0xe9, 0xda, 0x23, 0xe7, 0xd0, + 0x6c, 0x0f, 0x1d, 0xfb, 0xc4, 0xea, 0x98, 0x0e, 0x31, 0x7e, 0x64, 0x75, 0xcb, 0x0b, 0xd4, 0xf2, + 0xa5, 0x98, 0x6d, 0xef, 0x74, 0x30, 0x57, 0xda, 0xe7, 0x3a, 0x3b, 0x54, 0x05, 0x9f, 0x33, 0x92, + 0xba, 0xc8, 0xa5, 0x19, 0x0d, 0x3b, 0x86, 0x67, 0x26, 0x0f, 0x88, 0x92, 0x2e, 0xcd, 0x01, 0x55, + 0x4c, 0x18, 0x73, 0x75, 0x34, 0xa5, 0x97, 0x0c, 0xcb, 0x5d, 0x96, 0x38, 0xec, 0x62, 0xd2, 0xb0, + 0xcc, 0x83, 0x49, 0xc3, 0x3a, 0x53, 0x7a, 0xc9, 0x8e, 0x93, 0x44, 0xe2, 0x6f, 0x93, 0x5b, 0x5e, + 0x4a, 0xda, 0xf1, 0x3d, 0x33, 0x38, 0x29, 0x2e, 0x2e, 0x74, 0xc3, 0x4d, 0xf4, 0x35, 0xc8, 0x0f, + 0x9d, 0xd1, 0xc0, 0x6c, 0x5b, 0x7d, 0xa3, 0x6b, 0xba, 0xe5, 0x65, 0x6a, 0xe5, 0xa5, 0xa8, 0x95, + 0x7d, 0x82, 0xaa, 0x53, 0x10, 0xce, 0x0d, 0x83, 0x86, 0xb2, 0x05, 0x10, 0x64, 0x2b, 0x74, 0x19, + 0xe6, 0x3d, 0xab, 0x6f, 0xda, 0x23, 0x8f, 0x66, 0xc5, 0xdc, 0xb5, 0x45, 0x6e, 0xaa, 0x36, 0x72, + 0x0c, 0xcf, 0xb2, 0x07, 0xf5, 0xc1, 0x91, 0x8d, 0x05, 0x46, 0xe9, 0x40, 0x69, 0x22, 0x4b, 0x21, + 0x05, 0x66, 0x59, 0x5e, 0x93, 0xd6, 0x53, 0x17, 0x0b, 0x3c, 0x7b, 0x32, 0x11, 0x7a, 0x03, 0x32, + 0x1d, 0x6e, 0xa7, 0x9c, 0x5a, 0x4f, 0x25, 0x98, 0xe7, 0x3a, 0x3e, 0x54, 0x79, 0x19, 0xb2, 0x7e, + 0x0e, 0x43, 0x65, 0x48, 0x0f, 0x0d, 0xef, 0x98, 0x9a, 0xcf, 0x8a, 0xe4, 0x4c, 0x24, 0xca, 0x7b, + 0x90, 0x11, 0xf9, 0x2a, 0x19, 0x85, 0x56, 0x61, 0xce, 0x3e, 0x3a, 0x72, 0x4d, 0x8f, 0xce, 0x20, + 0xcd, 0xfb, 0xb8, 0x0c, 0xad, 0xc0, 0x5c, 0xcf, 0x1c, 0x74, 0xbd, 0x63, 0x9a, 0x96, 0xd3, 0x98, + 0xb7, 0x94, 0x77, 0xa1, 0x30, 0xb6, 0x0f, 0x68, 0x0d, 0x72, 0xee, 0xb1, 0xfd, 0x90, 0xc7, 0x2a, + 0xea, 0xac, 0x0c, 0x06, 0x22, 0x62, 0x21, 0x07, 0xbd, 0x0a, 0x25, 0x0a, 0x70, 0x3d, 0x63, 0xd0, + 0x31, 0x7a, 0xf6, 0xc0, 0xa4, 0xe5, 0x42, 0x06, 0x17, 0x89, 0x58, 0xf3, 0xa5, 0xca, 0xef, 0x24, + 0x58, 0x8e, 0x8d, 0xfa, 0xe8, 0x16, 0xe4, 0x43, 0x91, 0xbc, 0x43, 0x17, 0x93, 0xbb, 0x86, 0xb8, + 0xcb, 0x82, 0xf8, 0x5c, 0xdb, 0x5e, 0x24, 0x8b, 0x38, 0x7d, 0xbc, 0x96, 0x0b, 0x09, 0x71, 0xce, + 0x57, 0xae, 0x77, 0xd0, 0x97, 0x61, 0xfe, 0xd0, 0xee, 0xf7, 0x8d, 0x41, 0x87, 0x57, 0x2d, 0x81, + 0x19, 0x2a, 0x65, 0xfb, 0xca, 0x21, 0xe8, 0x1a, 0x64, 0x83, 0x08, 0xc2, 0x0a, 0x94, 0xa5, 0xc8, + 0xb0, 0x44, 0x23, 0x80, 0x29, 0x06, 0x2c, 0xc6, 0x24, 0xe1, 0x17, 0xb9, 0x08, 0xe5, 0x11, 0x2c, + 0xc6, 0x24, 0xe2, 0x17, 0xea, 0xa7, 0x15, 0x98, 0x73, 0xad, 0xee, 0xc0, 0xe8, 0x51, 0x37, 0xcd, + 0x62, 0xde, 0x52, 0x0e, 0x61, 0x39, 0x36, 0xd9, 0xbc, 0xd0, 0xf5, 0xfd, 0x41, 0x82, 0xd5, 0x69, + 0x05, 0xc0, 0x7f, 0xd9, 0x89, 0xf8, 0x30, 0x05, 0x4b, 0x71, 0x15, 0x06, 0x52, 0xc7, 0x0a, 0xef, + 0x4b, 0xcf, 0x56, 0x97, 0x44, 0xab, 0xf1, 0xdd, 0x09, 0x6f, 0x4c, 0x2e, 0x23, 0xf0, 0x46, 0x69, + 0xaa, 0x27, 0x6a, 0x00, 0x43, 0xc7, 0x3e, 0x34, 0x5d, 0xb7, 0x6d, 0xd9, 0x7c, 0x71, 0xe3, 0xaf, + 0x81, 0x7d, 0xd6, 0x5d, 0x6f, 0x6d, 0x17, 0x4e, 0x1f, 0xaf, 0x65, 0xfd, 0x26, 0xce, 0x72, 0xc5, + 0xba, 0xbd, 0xb1, 0x05, 0x69, 0x32, 0x43, 0x94, 0x83, 0xf9, 0x83, 0xe6, 0xed, 0x66, 0xeb, 0x5e, + 0x53, 0x3e, 0x83, 0x64, 0xc8, 0xef, 0xb4, 0x9a, 0x7a, 0xb5, 0xde, 0x54, 0x71, 0xbb, 0x5e, 0x93, + 0x25, 0x54, 0x04, 0xd8, 0xc7, 0xad, 0x1d, 0x55, 0xd3, 0xda, 0xf5, 0x96, 0x9c, 0x52, 0xd2, 0xdf, + 0xff, 0x59, 0x45, 0x22, 0xc7, 0x2b, 0xb6, 0x58, 0x7a, 0xa1, 0xc7, 0xeb, 0xef, 0x12, 0x94, 0x26, + 0x6a, 0x9a, 0xff, 0xe0, 0x89, 0x7a, 0x8d, 0xbc, 0x40, 0x58, 0x72, 0x74, 0xcb, 0x33, 0xeb, 0x33, + 0x17, 0x73, 0xd7, 0x4a, 0x1c, 0x2f, 0x92, 0x26, 0xdf, 0xed, 0x00, 0x37, 0x7e, 0x0c, 0xd3, 0xcf, + 0x76, 0x0c, 0xbf, 0x09, 0x85, 0xb1, 0xea, 0xeb, 0x85, 0xfa, 0xd4, 0x85, 0xc2, 0x58, 0x49, 0xf6, + 0x85, 0x04, 0xa3, 0xf7, 0xa1, 0x34, 0x51, 0xbd, 0xbd, 0xd0, 0x35, 0x61, 0x38, 0x97, 0x58, 0xc2, + 0xa1, 0x37, 0x20, 0x6d, 0x0d, 0x8e, 0x6c, 0x3e, 0xc0, 0xf9, 0x89, 0x1d, 0x13, 0xe0, 0x50, 0x1e, + 0xa7, 0x70, 0xe5, 0x00, 0x56, 0xa7, 0x55, 0x69, 0xcf, 0x6b, 0x16, 0xc3, 0xea, 0xb4, 0x2a, 0x8c, + 0xd4, 0x01, 0x3c, 0xd2, 0x84, 0xea, 0x00, 0x1a, 0x3c, 0xca, 0x90, 0x1e, 0x18, 0x7d, 0x93, 0x56, + 0x01, 0x7e, 0x0f, 0x91, 0x28, 0xb7, 0x20, 0x17, 0xaa, 0x96, 0xd0, 0x16, 0x94, 0xcc, 0x0f, 0x0e, + 0x7b, 0xa3, 0x8e, 0xd9, 0x11, 0x55, 0x96, 0x44, 0x4f, 0x6b, 0x9e, 0x4f, 0x92, 0xe2, 0xb8, 0x85, + 0xa2, 0x80, 0x32, 0xe5, 0x8d, 0xbf, 0xcd, 0xc6, 0x45, 0x85, 0x22, 0xc0, 0x9e, 0xaa, 0xb7, 0x6f, + 0xaa, 0xd5, 0x86, 0x7e, 0x53, 0x96, 0x50, 0x01, 0xb2, 0xa4, 0xbd, 0xdb, 0xa8, 0xee, 0x69, 0x72, + 0x0a, 0x95, 0x20, 0x47, 0x9a, 0x77, 0x55, 0xac, 0xd5, 0x5b, 0x4d, 0x79, 0x46, 0x08, 0xee, 0xa8, + 0x3a, 0xae, 0xef, 0x68, 0x72, 0x1a, 0x2d, 0xc3, 0x02, 0x11, 0x34, 0x5a, 0x7b, 0x7b, 0xf5, 0xe6, + 0x5e, 0xbb, 0xa1, 0xde, 0x55, 0x1b, 0xf2, 0x2c, 0x11, 0x6b, 0x11, 0xf1, 0x1c, 0x19, 0xae, 0x51, + 0xd7, 0xf4, 0xf6, 0x6e, 0xbd, 0xa1, 0x6a, 0xf2, 0x3c, 0x19, 0x0e, 0xab, 0xd5, 0x1a, 0x6d, 0xcb, + 0x19, 0x31, 0xba, 0xa6, 0x57, 0x75, 0x55, 0xce, 0x22, 0x04, 0x45, 0xd2, 0xf4, 0xc3, 0x96, 0x26, + 0x83, 0x90, 0xed, 0xe2, 0xea, 0x1d, 0xf5, 0x5e, 0x0b, 0xdf, 0xd6, 0xe4, 0x1c, 0x5a, 0x80, 0x02, + 0x91, 0xa9, 0xdf, 0x50, 0x77, 0x0e, 0xf4, 0x16, 0xd6, 0xe4, 0xbc, 0x80, 0xb5, 0xf6, 0x55, 0x5c, + 0xd5, 0xeb, 0xad, 0xa6, 0x26, 0xaf, 0x09, 0xeb, 0x7a, 0x55, 0xbb, 0xad, 0xc9, 0x05, 0xd1, 0xac, + 0xee, 0xa9, 0x4d, 0x5d, 0x5e, 0x42, 0x0a, 0xac, 0x90, 0x26, 0x56, 0xb5, 0xd6, 0x01, 0xde, 0x51, + 0xdb, 0xfb, 0xb8, 0x75, 0xb7, 0x5e, 0x23, 0x83, 0x2a, 0x68, 0x0d, 0xce, 0x36, 0xaa, 0x07, 0xcd, + 0x9d, 0x9b, 0xed, 0xa6, 0xaa, 0xe9, 0x6a, 0x2d, 0x98, 0x92, 0x5c, 0x54, 0x52, 0x19, 0x09, 0xbd, + 0x04, 0xcb, 0xf7, 0xaa, 0x75, 0x3d, 0xda, 0x5d, 0x12, 0xdd, 0xb7, 0xeb, 0x8d, 0x46, 0xb4, 0x5b, + 0xa6, 0xdd, 0x6b, 0x70, 0x16, 0xab, 0x77, 0x5a, 0x77, 0xd5, 0x28, 0x60, 0x99, 0x02, 0x2e, 0xc0, + 0x5a, 0xc2, 0xf8, 0x6d, 0x4d, 0xd5, 0xe8, 0xd6, 0x2c, 0x90, 0x05, 0x54, 0x75, 0xbd, 0xba, 0x73, + 0x33, 0xd4, 0x5b, 0x6f, 0xee, 0x1f, 0xe8, 0x32, 0x42, 0xe7, 0xe1, 0x6c, 0xa4, 0xaf, 0x75, 0xa0, + 0x93, 0xce, 0x45, 0xb4, 0x04, 0x32, 0xb7, 0x1e, 0x8c, 0xbb, 0x42, 0x3c, 0x48, 0x97, 0x14, 0xc8, + 0xce, 0x12, 0x19, 0x5d, 0x47, 0x20, 0x2b, 0x13, 0x6d, 0x3e, 0xf9, 0x40, 0x7a, 0x0e, 0xad, 0xc3, + 0x6a, 0xb5, 0x56, 0x8b, 0x7a, 0x93, 0x80, 0x76, 0xeb, 0x7b, 0xf2, 0x79, 0xb2, 0xa6, 0x83, 0xfd, + 0x5a, 0x55, 0x57, 0x93, 0x41, 0xab, 0x04, 0xc4, 0x8d, 0x27, 0x82, 0x5e, 0x22, 0x99, 0x6d, 0x1f, + 0x1f, 0x34, 0xd5, 0x76, 0xfd, 0x4e, 0x75, 0x4f, 0xd5, 0xe4, 0x0a, 0xcb, 0x64, 0x1b, 0xbf, 0xbf, + 0x40, 0xca, 0x70, 0x77, 0x68, 0x0f, 0x5c, 0x13, 0xbd, 0x3e, 0x96, 0xe8, 0xc7, 0x09, 0x26, 0x01, + 0x8a, 0xe6, 0xf5, 0x1b, 0x00, 0xe4, 0x71, 0x74, 0x6c, 0x1a, 0x3d, 0xef, 0x98, 0xa7, 0x92, 0xb5, + 0x78, 0xdd, 0x3d, 0xd3, 0xbb, 0x49, 0x61, 0x38, 0xdb, 0x15, 0x9f, 0x68, 0x0b, 0x48, 0xa3, 0x7d, + 0xd4, 0x33, 0xba, 0x2e, 0x4f, 0xe7, 0x95, 0x44, 0xf5, 0x5d, 0x82, 0xc2, 0x99, 0x2e, 0xff, 0x42, + 0x55, 0x46, 0xf1, 0x9d, 0x98, 0x0e, 0x25, 0x6a, 0x58, 0x8e, 0x59, 0x4f, 0x54, 0xbf, 0xcb, 0x70, + 0x94, 0xe6, 0xe3, 0xdf, 0xc2, 0x84, 0x60, 0x09, 0x67, 0x9f, 0x62, 0x22, 0x8e, 0x29, 0xfc, 0x3a, + 0x2c, 0x74, 0x23, 0x4c, 0xe1, 0x5c, 0x0c, 0x3b, 0x15, 0x36, 0x34, 0xce, 0x16, 0x76, 0x27, 0x1e, + 0x66, 0x37, 0xc6, 0xd8, 0xc2, 0xf9, 0x69, 0x5e, 0x8d, 0x65, 0x0c, 0xb7, 0xc2, 0x8c, 0x61, 0x66, + 0x9a, 0x57, 0x63, 0x58, 0x43, 0xbe, 0x25, 0xae, 0x67, 0x78, 0x26, 0x27, 0xbf, 0x92, 0xb7, 0x44, + 0x23, 0x28, 0xba, 0x25, 0xf4, 0x0b, 0xdd, 0x8a, 0x3c, 0x96, 0x19, 0xa1, 0x75, 0x21, 0xd1, 0x42, + 0xf2, 0x83, 0x99, 0xdb, 0x3a, 0x72, 0x8c, 0xbe, 0xf9, 0xd0, 0x76, 0x1e, 0xb8, 0x9c, 0xbd, 0x4a, + 0xb6, 0xb5, 0xeb, 0x43, 0xa9, 0xad, 0xa0, 0x89, 0xf6, 0x80, 0x08, 0xda, 0xe6, 0x07, 0xe6, 0xe1, + 0xc8, 0xb3, 0x1d, 0x97, 0xf3, 0x54, 0x1b, 0x89, 0xa6, 0x54, 0x81, 0xc4, 0xf9, 0x6e, 0xa8, 0x25, + 0xbc, 0xe3, 0x19, 0xee, 0x03, 0x97, 0xd3, 0x51, 0xc9, 0xde, 0xd1, 0x09, 0x8a, 0x7a, 0x87, 0x7e, + 0x25, 0x73, 0xa5, 0x8c, 0x8b, 0xfa, 0xff, 0x78, 0x43, 0x31, 0x4f, 0xb5, 0x78, 0xae, 0x94, 0xcf, + 0x8d, 0xaa, 0x73, 0x5a, 0x2a, 0x79, 0x6e, 0x55, 0x22, 0xa1, 0x73, 0xa3, 0x5f, 0xc4, 0xdb, 0x13, + 0x04, 0x99, 0x3c, 0xcd, 0xdb, 0x53, 0x49, 0xb2, 0x6f, 0xc3, 0x0a, 0x99, 0x48, 0x84, 0xa6, 0x71, + 0x39, 0x11, 0xf5, 0xa5, 0xc4, 0x59, 0x4d, 0xd6, 0x06, 0x2e, 0x5e, 0xea, 0xc6, 0x48, 0xc5, 0xd9, + 0xb0, 0x87, 0x26, 0x23, 0x1e, 0x5c, 0xce, 0x38, 0x25, 0x9f, 0x8d, 0x96, 0x0f, 0xa5, 0x67, 0x23, + 0x68, 0x2a, 0x97, 0x20, 0xeb, 0xc7, 0x26, 0x54, 0x81, 0x79, 0x16, 0xcc, 0x1e, 0xd1, 0x42, 0x24, + 0xc3, 0xa3, 0x9d, 0x10, 0x2a, 0xaf, 0x41, 0x46, 0x44, 0x22, 0xf4, 0x2a, 0xcc, 0xb2, 0xc0, 0xc5, + 0x8a, 0x8c, 0x1c, 0x1f, 0x9b, 0x74, 0x0a, 0x32, 0x85, 0xf6, 0x2b, 0x75, 0x4a, 0xdc, 0x88, 0x98, + 0xb3, 0x05, 0x79, 0x1e, 0xb2, 0xda, 0xa1, 0x3a, 0x4a, 0xd4, 0x7f, 0x1c, 0x15, 0x2a, 0x9f, 0x72, + 0x27, 0x81, 0x28, 0xca, 0x01, 0x89, 0xd0, 0xc5, 0xe6, 0x50, 0xe0, 0x56, 0x18, 0x40, 0x4c, 0x9e, + 0x63, 0x94, 0xcb, 0x50, 0xda, 0x7b, 0x76, 0x0e, 0x48, 0xa9, 0x86, 0xc9, 0x9c, 0xd7, 0x01, 0x48, + 0x38, 0xa1, 0x53, 0x16, 0xa3, 0x89, 0x47, 0x00, 0x41, 0x84, 0x26, 0x9c, 0x3d, 0xe2, 0x6d, 0x57, + 0x79, 0x73, 0x9c, 0xe8, 0x71, 0xad, 0xef, 0xb2, 0x02, 0x4f, 0x90, 0x39, 0x54, 0x82, 0x10, 0xa4, + 0x3b, 0x86, 0x67, 0xd0, 0x02, 0x2f, 0x8f, 0xe9, 0xb7, 0xf2, 0x27, 0x89, 0x7a, 0x9a, 0x85, 0x95, + 0xb1, 0x5b, 0x27, 0x7d, 0xce, 0x5b, 0x17, 0xb9, 0xfb, 0xa9, 0xe7, 0xbc, 0xfb, 0xd1, 0x80, 0x34, + 0xf3, 0xbc, 0x01, 0x49, 0xf9, 0xe7, 0xcc, 0x24, 0x4d, 0xa5, 0x01, 0x84, 0xc2, 0x26, 0x73, 0xf0, + 0xe5, 0x67, 0x08, 0x9b, 0xc1, 0x8b, 0x81, 0xfb, 0x34, 0x64, 0x46, 0xf9, 0xf1, 0x0c, 0x64, 0xc3, + 0xe4, 0x75, 0xde, 0x9f, 0x3c, 0x7b, 0x79, 0x84, 0x9f, 0x7e, 0xfe, 0xec, 0xc4, 0x2b, 0x3c, 0x24, + 0xc0, 0x39, 0x5f, 0xb1, 0xde, 0x41, 0xdb, 0x90, 0x13, 0xde, 0x0c, 0x1e, 0xf3, 0x0b, 0xdc, 0x8c, + 0xf0, 0x57, 0xbd, 0xb6, 0x5d, 0x3c, 0x7d, 0xbc, 0x06, 0x41, 0x1b, 0x83, 0xd0, 0xaa, 0x77, 0xd0, + 0x05, 0x28, 0xf8, 0x36, 0x68, 0x75, 0x4f, 0x7c, 0x99, 0xc5, 0x79, 0x21, 0x6c, 0x1a, 0x7d, 0x33, + 0xf2, 0x54, 0x4a, 0xff, 0x1b, 0x2f, 0xb4, 0x2a, 0xc8, 0xa1, 0x1f, 0x77, 0x3c, 0xc3, 0x1b, 0x89, + 0x7c, 0xbf, 0x32, 0x69, 0x4f, 0xa3, 0xbd, 0xb8, 0x74, 0x38, 0x2e, 0x40, 0xb7, 0x60, 0xd1, 0x8f, + 0x69, 0xc4, 0x82, 0xe5, 0x7a, 0xe4, 0xea, 0xb1, 0x64, 0x7f, 0x6e, 0xe2, 0x21, 0xa4, 0xf9, 0x00, + 0x8c, 0x9c, 0x88, 0x4c, 0xf9, 0x49, 0x8a, 0x1e, 0x80, 0x50, 0x8e, 0xd2, 0x00, 0x42, 0x47, 0xeb, + 0x69, 0x07, 0x20, 0x50, 0x0c, 0x36, 0x4e, 0x1c, 0x80, 0xc0, 0x0c, 0x3a, 0x82, 0xa5, 0x43, 0xbb, + 0x3f, 0xec, 0x99, 0x24, 0xe1, 0x84, 0xcc, 0xa7, 0x9e, 0xdf, 0xfc, 0xa2, 0x6f, 0x30, 0x74, 0x9e, + 0x9b, 0x90, 0xf5, 0x5b, 0xa8, 0x0a, 0xc5, 0xd0, 0x39, 0x0b, 0x62, 0xdc, 0x52, 0xe4, 0xa4, 0x05, + 0x41, 0xa3, 0x70, 0x14, 0x16, 0x2a, 0x3f, 0x48, 0x41, 0x3e, 0x7c, 0x15, 0x51, 0x0b, 0xb2, 0xc1, + 0x0d, 0x66, 0xce, 0xb9, 0xf4, 0xf4, 0x1b, 0xec, 0x9f, 0x46, 0x11, 0x9a, 0x7c, 0x1b, 0xe8, 0x3e, + 0x04, 0x0b, 0x19, 0x0b, 0x0e, 0xcf, 0x69, 0x1a, 0xf9, 0xd6, 0x7c, 0x88, 0x72, 0x0b, 0x32, 0xa2, + 0x81, 0x6e, 0x84, 0x0e, 0x7c, 0xc8, 0x27, 0x8b, 0x93, 0xd7, 0x26, 0x70, 0x89, 0x7f, 0x17, 0xa8, + 0x47, 0xf6, 0xe8, 0x79, 0x09, 0xf2, 0x16, 0xba, 0x0e, 0x10, 0xca, 0x7f, 0xcc, 0x25, 0x32, 0xb7, + 0xe6, 0xc3, 0xc4, 0x91, 0x08, 0x90, 0xca, 0x2f, 0x52, 0x34, 0xb2, 0xb2, 0xe0, 0x78, 0x1d, 0x0a, + 0x43, 0x73, 0xd0, 0x21, 0x95, 0xab, 0x88, 0xae, 0xe1, 0x5c, 0x46, 0x40, 0x62, 0x36, 0x1c, 0xc7, + 0xf4, 0x5e, 0x87, 0xfc, 0x77, 0x46, 0xe6, 0xc8, 0xec, 0x70, 0xb5, 0x54, 0x92, 0x5a, 0x8e, 0xc1, + 0x98, 0xd6, 0x9b, 0x50, 0x64, 0x3f, 0xcc, 0xf9, 0x7a, 0x33, 0x49, 0x7a, 0x05, 0x01, 0x64, 0x9a, + 0x6f, 0x83, 0xec, 0x99, 0x4e, 0xdf, 0x1a, 0x18, 0x9e, 0xaf, 0x9b, 0x4e, 0xd2, 0x2d, 0x05, 0x50, + 0xa6, 0xfd, 0x16, 0x94, 0x82, 0xbd, 0x66, 0xca, 0xb3, 0x49, 0xca, 0x45, 0x1f, 0x49, 0x75, 0x95, + 0x2d, 0xea, 0x2d, 0x56, 0x24, 0x6d, 0x02, 0xd0, 0x13, 0x21, 0x36, 0x50, 0x0a, 0xb9, 0x9c, 0x22, + 0x18, 0xa1, 0x65, 0x88, 0x4f, 0xe5, 0x97, 0x29, 0x58, 0x8a, 0x2b, 0x6b, 0xd0, 0x10, 0x50, 0x4c, + 0x79, 0xc4, 0x9c, 0xbf, 0xf5, 0xec, 0xe5, 0x51, 0x84, 0x76, 0xe1, 0x8b, 0x58, 0x70, 0x26, 0x91, + 0xca, 0xcf, 0x25, 0x90, 0x27, 0xd1, 0xe8, 0x1e, 0xac, 0x44, 0x7f, 0x4c, 0xfb, 0x7c, 0xec, 0xce, + 0x92, 0x13, 0xd3, 0x87, 0x6e, 0x40, 0xc9, 0xb3, 0x3d, 0xa3, 0xd7, 0x0e, 0x88, 0xc3, 0xd4, 0x34, + 0xe2, 0xb0, 0x48, 0xd1, 0x42, 0xe8, 0x2a, 0x7f, 0x95, 0xe2, 0x7f, 0xa3, 0x58, 0x23, 0xa9, 0xc7, + 0xf2, 0x44, 0x00, 0x97, 0x28, 0xd9, 0x06, 0x44, 0xc4, 0x63, 0xf4, 0x2b, 0x30, 0xcb, 0x9e, 0x2e, + 0x29, 0xfa, 0x90, 0x95, 0x43, 0x1b, 0xcc, 0x1e, 0x2b, 0xac, 0x1b, 0x5d, 0x85, 0x39, 0xc7, 0x34, + 0x5c, 0x7b, 0x40, 0x13, 0x4f, 0xf1, 0x5a, 0x79, 0x02, 0x38, 0x22, 0x53, 0x24, 0xfd, 0x98, 0xe3, + 0xd0, 0x3b, 0xe4, 0x55, 0xd6, 0xb7, 0x3c, 0xf6, 0xa3, 0x58, 0x7a, 0xec, 0xe7, 0x3b, 0xa2, 0x25, + 0x26, 0xdf, 0xf0, 0x41, 0x38, 0xa4, 0x80, 0xca, 0xa4, 0x56, 0x73, 0x5d, 0xa3, 0xcb, 0xfe, 0x89, + 0x23, 0x8b, 0x45, 0x53, 0xf9, 0x54, 0x9a, 0xa4, 0x3d, 0xff, 0x07, 0x57, 0xb9, 0xf1, 0x49, 0xea, + 0x8b, 0xe4, 0xd7, 0xc6, 0x89, 0xb4, 0xb9, 0x71, 0x22, 0x6d, 0x7e, 0x9c, 0x48, 0xcb, 0xc4, 0x10, + 0x69, 0xd9, 0x18, 0x22, 0x0d, 0xa2, 0x44, 0x5a, 0x2e, 0x86, 0x48, 0x5b, 0x18, 0x27, 0xd2, 0xf2, + 0xe3, 0x44, 0x5a, 0x71, 0x0a, 0x91, 0x26, 0x27, 0xf3, 0x64, 0x05, 0xca, 0x73, 0x45, 0x39, 0xa7, + 0x12, 0xe7, 0x72, 0x7e, 0x35, 0x0b, 0xc1, 0x6f, 0x1d, 0xe8, 0x8d, 0x31, 0x32, 0xe7, 0x7c, 0xfc, + 0x0f, 0x24, 0x51, 0x36, 0x67, 0xd3, 0xaf, 0xc3, 0xa3, 0xff, 0x9f, 0x14, 0xa8, 0xd5, 0x0c, 0xcf, + 0x60, 0x45, 0x3a, 0x7a, 0x13, 0xe6, 0x49, 0x8d, 0xe4, 0xd8, 0xbd, 0x58, 0xf2, 0x26, 0xd0, 0xd9, + 0x61, 0x28, 0x2c, 0xe0, 0xca, 0x8f, 0x24, 0x48, 0x13, 0x43, 0xe8, 0xad, 0xb1, 0xa9, 0xae, 0x4f, + 0x19, 0x33, 0x3a, 0x5f, 0x14, 0x9a, 0x2f, 0x7f, 0x37, 0x6c, 0xbc, 0x1d, 0x77, 0xca, 0xb2, 0x30, + 0xab, 0xe9, 0xb5, 0x7a, 0x53, 0x96, 0x10, 0xc0, 0x9c, 0xa6, 0xd7, 0x5a, 0x07, 0xba, 0x9c, 0xe2, + 0xdf, 0x2a, 0xc6, 0xf2, 0x0c, 0xff, 0x71, 0xe7, 0x37, 0x29, 0x98, 0xe7, 0x73, 0x45, 0xef, 0x8c, + 0xcd, 0xec, 0xc2, 0xf4, 0x95, 0x45, 0x27, 0x77, 0x1d, 0x32, 0x9e, 0xf7, 0x88, 0x05, 0x53, 0xe6, + 0xd0, 0xa2, 0xb8, 0x46, 0xfa, 0xbb, 0x34, 0x7e, 0xe6, 0x4e, 0x1f, 0xaf, 0xcd, 0xf3, 0x06, 0x9e, + 0xf7, 0xbc, 0x47, 0x34, 0x72, 0xee, 0x42, 0xf6, 0xd8, 0x34, 0x1c, 0xef, 0xbe, 0x69, 0x78, 0xdc, + 0xab, 0x17, 0x9f, 0x32, 0xf6, 0x4d, 0x81, 0xc7, 0x81, 0xaa, 0xf2, 0x36, 0x64, 0x7d, 0x39, 0xda, + 0x84, 0x8c, 0x35, 0xf0, 0x4c, 0xe7, 0xc4, 0xe8, 0x4d, 0xfb, 0x6f, 0x01, 0x1f, 0xb4, 0x71, 0x3d, + 0xce, 0x8d, 0x79, 0xc8, 0xe8, 0xfa, 0xbb, 0xed, 0x7a, 0x73, 0xb7, 0xc5, 0xae, 0xea, 0x4d, 0xb5, + 0x8a, 0xf5, 0x6d, 0xb5, 0xaa, 0x8b, 0x5f, 0xc7, 0x36, 0xae, 0xc6, 0xe9, 0x65, 0x20, 0x5d, 0xab, + 0xea, 0x55, 0x59, 0x22, 0x62, 0x72, 0x76, 0x71, 0xab, 0x21, 0x34, 0xb6, 0x77, 0x3e, 0x7e, 0x52, + 0x39, 0xf3, 0xc7, 0x27, 0x95, 0x33, 0x7f, 0x7e, 0x52, 0x91, 0x3e, 0x7b, 0x52, 0x91, 0xfe, 0xf1, + 0xa4, 0x22, 0x7d, 0xef, 0xb4, 0x22, 0x7d, 0x78, 0x5a, 0x91, 0x7e, 0x7d, 0x5a, 0x91, 0x7e, 0x7b, + 0x5a, 0x91, 0x3e, 0x3a, 0xad, 0x48, 0x1f, 0x9f, 0x56, 0xa4, 0xbf, 0x9c, 0x56, 0xce, 0x7c, 0x76, + 0x5a, 0x91, 0x7e, 0xf8, 0x69, 0xe5, 0xcc, 0x4f, 0x3f, 0xad, 0x48, 0xef, 0xcd, 0x52, 0x9f, 0xfc, + 0x2b, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x44, 0x30, 0x4d, 0xaa, 0x28, 0x00, 0x00, +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/agent.pb_ffjson.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/agent.pb_ffjson.go new file mode 100644 index 000000000000..680617ae19d8 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/agent.pb_ffjson.go @@ -0,0 +1,15157 @@ +// DO NOT EDIT! +// Code generated by ffjson +// source: agent/agent.pb.go +// DO NOT EDIT! + +package agent + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "github.com/mesos/mesos-go/api/v1/lib" + fflib "github.com/pquerna/ffjson/fflib/v1" + "reflect" +) + +func (mj *Call) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.GetMetrics != nil { + if true { + buf.WriteString(`"get_metrics":`) + + { + + err = mj.GetMetrics.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.SetLoggingLevel != nil { + if true { + buf.WriteString(`"set_logging_level":`) + + { + + err = mj.SetLoggingLevel.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.ListFiles != nil { + if true { + buf.WriteString(`"list_files":`) + + { + + err = mj.ListFiles.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.ReadFile != nil { + if true { + buf.WriteString(`"read_file":`) + + { + + err = mj.ReadFile.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GetContainers != nil { + if true { + buf.WriteString(`"get_containers":`) + + { + + err = mj.GetContainers.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.LaunchNestedContainer != nil { + if true { + buf.WriteString(`"launch_nested_container":`) + + { + + err = mj.LaunchNestedContainer.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.WaitNestedContainer != nil { + if true { + buf.WriteString(`"wait_nested_container":`) + + { + + err = mj.WaitNestedContainer.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.KillNestedContainer != nil { + if true { + buf.WriteString(`"kill_nested_container":`) + + { + + err = mj.KillNestedContainer.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.RemoveNestedContainer != nil { + if true { + buf.WriteString(`"remove_nested_container":`) + + { + + err = mj.RemoveNestedContainer.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.LaunchNestedContainerSession != nil { + if true { + buf.WriteString(`"launch_nested_container_session":`) + + { + + err = mj.LaunchNestedContainerSession.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.AttachContainerInput != nil { + if true { + buf.WriteString(`"attach_container_input":`) + + { + + err = mj.AttachContainerInput.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.AttachContainerOutput != nil { + if true { + buf.WriteString(`"attach_container_output":`) + + { + + err = mj.AttachContainerOutput.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.LaunchContainer != nil { + if true { + buf.WriteString(`"launch_container":`) + + { + + err = mj.LaunchContainer.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.WaitContainer != nil { + if true { + buf.WriteString(`"wait_container":`) + + { + + err = mj.WaitContainer.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.KillContainer != nil { + if true { + buf.WriteString(`"kill_container":`) + + { + + err = mj.KillContainer.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.RemoveContainer != nil { + if true { + buf.WriteString(`"remove_container":`) + + { + + err = mj.RemoveContainer.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.AddResourceProviderConfig != nil { + if true { + buf.WriteString(`"add_resource_provider_config":`) + + { + + err = mj.AddResourceProviderConfig.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.UpdateResourceProviderConfig != nil { + if true { + buf.WriteString(`"update_resource_provider_config":`) + + { + + err = mj.UpdateResourceProviderConfig.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.RemoveResourceProviderConfig != nil { + if true { + buf.WriteString(`"remove_resource_provider_config":`) + + { + + err = mj.RemoveResourceProviderConfig.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.PruneImages != nil { + if true { + buf.WriteString(`"prune_images":`) + + { + + err = mj.PruneImages.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Callbase = iota + ffj_t_Callno_such_key + + ffj_t_Call_Type + + ffj_t_Call_GetMetrics + + ffj_t_Call_SetLoggingLevel + + ffj_t_Call_ListFiles + + ffj_t_Call_ReadFile + + ffj_t_Call_GetContainers + + ffj_t_Call_LaunchNestedContainer + + ffj_t_Call_WaitNestedContainer + + ffj_t_Call_KillNestedContainer + + ffj_t_Call_RemoveNestedContainer + + ffj_t_Call_LaunchNestedContainerSession + + ffj_t_Call_AttachContainerInput + + ffj_t_Call_AttachContainerOutput + + ffj_t_Call_LaunchContainer + + ffj_t_Call_WaitContainer + + ffj_t_Call_KillContainer + + ffj_t_Call_RemoveContainer + + ffj_t_Call_AddResourceProviderConfig + + ffj_t_Call_UpdateResourceProviderConfig + + ffj_t_Call_RemoveResourceProviderConfig + + ffj_t_Call_PruneImages +) + +var ffj_key_Call_Type = []byte("type") + +var ffj_key_Call_GetMetrics = []byte("get_metrics") + +var ffj_key_Call_SetLoggingLevel = []byte("set_logging_level") + +var ffj_key_Call_ListFiles = []byte("list_files") + +var ffj_key_Call_ReadFile = []byte("read_file") + +var ffj_key_Call_GetContainers = []byte("get_containers") + +var ffj_key_Call_LaunchNestedContainer = []byte("launch_nested_container") + +var ffj_key_Call_WaitNestedContainer = []byte("wait_nested_container") + +var ffj_key_Call_KillNestedContainer = []byte("kill_nested_container") + +var ffj_key_Call_RemoveNestedContainer = []byte("remove_nested_container") + +var ffj_key_Call_LaunchNestedContainerSession = []byte("launch_nested_container_session") + +var ffj_key_Call_AttachContainerInput = []byte("attach_container_input") + +var ffj_key_Call_AttachContainerOutput = []byte("attach_container_output") + +var ffj_key_Call_LaunchContainer = []byte("launch_container") + +var ffj_key_Call_WaitContainer = []byte("wait_container") + +var ffj_key_Call_KillContainer = []byte("kill_container") + +var ffj_key_Call_RemoveContainer = []byte("remove_container") + +var ffj_key_Call_AddResourceProviderConfig = []byte("add_resource_provider_config") + +var ffj_key_Call_UpdateResourceProviderConfig = []byte("update_resource_provider_config") + +var ffj_key_Call_RemoveResourceProviderConfig = []byte("remove_resource_provider_config") + +var ffj_key_Call_PruneImages = []byte("prune_images") + +func (uj *Call) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Callbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Callno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_Call_AttachContainerInput, kn) { + currentKey = ffj_t_Call_AttachContainerInput + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_AttachContainerOutput, kn) { + currentKey = ffj_t_Call_AttachContainerOutput + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_AddResourceProviderConfig, kn) { + currentKey = ffj_t_Call_AddResourceProviderConfig + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'g': + + if bytes.Equal(ffj_key_Call_GetMetrics, kn) { + currentKey = ffj_t_Call_GetMetrics + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_GetContainers, kn) { + currentKey = ffj_t_Call_GetContainers + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'k': + + if bytes.Equal(ffj_key_Call_KillNestedContainer, kn) { + currentKey = ffj_t_Call_KillNestedContainer + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_KillContainer, kn) { + currentKey = ffj_t_Call_KillContainer + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_Call_ListFiles, kn) { + currentKey = ffj_t_Call_ListFiles + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_LaunchNestedContainer, kn) { + currentKey = ffj_t_Call_LaunchNestedContainer + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_LaunchNestedContainerSession, kn) { + currentKey = ffj_t_Call_LaunchNestedContainerSession + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_LaunchContainer, kn) { + currentKey = ffj_t_Call_LaunchContainer + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_Call_PruneImages, kn) { + currentKey = ffj_t_Call_PruneImages + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_Call_ReadFile, kn) { + currentKey = ffj_t_Call_ReadFile + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_RemoveNestedContainer, kn) { + currentKey = ffj_t_Call_RemoveNestedContainer + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_RemoveContainer, kn) { + currentKey = ffj_t_Call_RemoveContainer + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_RemoveResourceProviderConfig, kn) { + currentKey = ffj_t_Call_RemoveResourceProviderConfig + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Call_SetLoggingLevel, kn) { + currentKey = ffj_t_Call_SetLoggingLevel + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Call_Type, kn) { + currentKey = ffj_t_Call_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_Call_UpdateResourceProviderConfig, kn) { + currentKey = ffj_t_Call_UpdateResourceProviderConfig + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'w': + + if bytes.Equal(ffj_key_Call_WaitNestedContainer, kn) { + currentKey = ffj_t_Call_WaitNestedContainer + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_WaitContainer, kn) { + currentKey = ffj_t_Call_WaitContainer + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Call_PruneImages, kn) { + currentKey = ffj_t_Call_PruneImages + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_RemoveResourceProviderConfig, kn) { + currentKey = ffj_t_Call_RemoveResourceProviderConfig + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_UpdateResourceProviderConfig, kn) { + currentKey = ffj_t_Call_UpdateResourceProviderConfig + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_AddResourceProviderConfig, kn) { + currentKey = ffj_t_Call_AddResourceProviderConfig + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Call_RemoveContainer, kn) { + currentKey = ffj_t_Call_RemoveContainer + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_KillContainer, kn) { + currentKey = ffj_t_Call_KillContainer + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Call_WaitContainer, kn) { + currentKey = ffj_t_Call_WaitContainer + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Call_LaunchContainer, kn) { + currentKey = ffj_t_Call_LaunchContainer + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Call_AttachContainerOutput, kn) { + currentKey = ffj_t_Call_AttachContainerOutput + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Call_AttachContainerInput, kn) { + currentKey = ffj_t_Call_AttachContainerInput + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_LaunchNestedContainerSession, kn) { + currentKey = ffj_t_Call_LaunchNestedContainerSession + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_RemoveNestedContainer, kn) { + currentKey = ffj_t_Call_RemoveNestedContainer + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_KillNestedContainer, kn) { + currentKey = ffj_t_Call_KillNestedContainer + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_WaitNestedContainer, kn) { + currentKey = ffj_t_Call_WaitNestedContainer + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_LaunchNestedContainer, kn) { + currentKey = ffj_t_Call_LaunchNestedContainer + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_GetContainers, kn) { + currentKey = ffj_t_Call_GetContainers + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Call_ReadFile, kn) { + currentKey = ffj_t_Call_ReadFile + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_ListFiles, kn) { + currentKey = ffj_t_Call_ListFiles + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_SetLoggingLevel, kn) { + currentKey = ffj_t_Call_SetLoggingLevel + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_GetMetrics, kn) { + currentKey = ffj_t_Call_GetMetrics + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_Type, kn) { + currentKey = ffj_t_Call_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Callno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_Type: + goto handle_Type + + case ffj_t_Call_GetMetrics: + goto handle_GetMetrics + + case ffj_t_Call_SetLoggingLevel: + goto handle_SetLoggingLevel + + case ffj_t_Call_ListFiles: + goto handle_ListFiles + + case ffj_t_Call_ReadFile: + goto handle_ReadFile + + case ffj_t_Call_GetContainers: + goto handle_GetContainers + + case ffj_t_Call_LaunchNestedContainer: + goto handle_LaunchNestedContainer + + case ffj_t_Call_WaitNestedContainer: + goto handle_WaitNestedContainer + + case ffj_t_Call_KillNestedContainer: + goto handle_KillNestedContainer + + case ffj_t_Call_RemoveNestedContainer: + goto handle_RemoveNestedContainer + + case ffj_t_Call_LaunchNestedContainerSession: + goto handle_LaunchNestedContainerSession + + case ffj_t_Call_AttachContainerInput: + goto handle_AttachContainerInput + + case ffj_t_Call_AttachContainerOutput: + goto handle_AttachContainerOutput + + case ffj_t_Call_LaunchContainer: + goto handle_LaunchContainer + + case ffj_t_Call_WaitContainer: + goto handle_WaitContainer + + case ffj_t_Call_KillContainer: + goto handle_KillContainer + + case ffj_t_Call_RemoveContainer: + goto handle_RemoveContainer + + case ffj_t_Call_AddResourceProviderConfig: + goto handle_AddResourceProviderConfig + + case ffj_t_Call_UpdateResourceProviderConfig: + goto handle_UpdateResourceProviderConfig + + case ffj_t_Call_RemoveResourceProviderConfig: + goto handle_RemoveResourceProviderConfig + + case ffj_t_Call_PruneImages: + goto handle_PruneImages + + case ffj_t_Callno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=agent.Call_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetMetrics: + + /* handler: uj.GetMetrics type=agent.Call_GetMetrics kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetMetrics = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetMetrics == nil { + uj.GetMetrics = new(Call_GetMetrics) + } + + err = uj.GetMetrics.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_SetLoggingLevel: + + /* handler: uj.SetLoggingLevel type=agent.Call_SetLoggingLevel kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.SetLoggingLevel = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.SetLoggingLevel == nil { + uj.SetLoggingLevel = new(Call_SetLoggingLevel) + } + + err = uj.SetLoggingLevel.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ListFiles: + + /* handler: uj.ListFiles type=agent.Call_ListFiles kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ListFiles = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ListFiles == nil { + uj.ListFiles = new(Call_ListFiles) + } + + err = uj.ListFiles.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ReadFile: + + /* handler: uj.ReadFile type=agent.Call_ReadFile kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ReadFile = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ReadFile == nil { + uj.ReadFile = new(Call_ReadFile) + } + + err = uj.ReadFile.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetContainers: + + /* handler: uj.GetContainers type=agent.Call_GetContainers kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetContainers = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetContainers == nil { + uj.GetContainers = new(Call_GetContainers) + } + + err = uj.GetContainers.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LaunchNestedContainer: + + /* handler: uj.LaunchNestedContainer type=agent.Call_LaunchNestedContainer kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.LaunchNestedContainer = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.LaunchNestedContainer == nil { + uj.LaunchNestedContainer = new(Call_LaunchNestedContainer) + } + + err = uj.LaunchNestedContainer.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_WaitNestedContainer: + + /* handler: uj.WaitNestedContainer type=agent.Call_WaitNestedContainer kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.WaitNestedContainer = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.WaitNestedContainer == nil { + uj.WaitNestedContainer = new(Call_WaitNestedContainer) + } + + err = uj.WaitNestedContainer.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_KillNestedContainer: + + /* handler: uj.KillNestedContainer type=agent.Call_KillNestedContainer kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.KillNestedContainer = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.KillNestedContainer == nil { + uj.KillNestedContainer = new(Call_KillNestedContainer) + } + + err = uj.KillNestedContainer.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_RemoveNestedContainer: + + /* handler: uj.RemoveNestedContainer type=agent.Call_RemoveNestedContainer kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.RemoveNestedContainer = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.RemoveNestedContainer == nil { + uj.RemoveNestedContainer = new(Call_RemoveNestedContainer) + } + + err = uj.RemoveNestedContainer.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LaunchNestedContainerSession: + + /* handler: uj.LaunchNestedContainerSession type=agent.Call_LaunchNestedContainerSession kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.LaunchNestedContainerSession = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.LaunchNestedContainerSession == nil { + uj.LaunchNestedContainerSession = new(Call_LaunchNestedContainerSession) + } + + err = uj.LaunchNestedContainerSession.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_AttachContainerInput: + + /* handler: uj.AttachContainerInput type=agent.Call_AttachContainerInput kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.AttachContainerInput = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.AttachContainerInput == nil { + uj.AttachContainerInput = new(Call_AttachContainerInput) + } + + err = uj.AttachContainerInput.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_AttachContainerOutput: + + /* handler: uj.AttachContainerOutput type=agent.Call_AttachContainerOutput kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.AttachContainerOutput = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.AttachContainerOutput == nil { + uj.AttachContainerOutput = new(Call_AttachContainerOutput) + } + + err = uj.AttachContainerOutput.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LaunchContainer: + + /* handler: uj.LaunchContainer type=agent.Call_LaunchContainer kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.LaunchContainer = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.LaunchContainer == nil { + uj.LaunchContainer = new(Call_LaunchContainer) + } + + err = uj.LaunchContainer.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_WaitContainer: + + /* handler: uj.WaitContainer type=agent.Call_WaitContainer kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.WaitContainer = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.WaitContainer == nil { + uj.WaitContainer = new(Call_WaitContainer) + } + + err = uj.WaitContainer.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_KillContainer: + + /* handler: uj.KillContainer type=agent.Call_KillContainer kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.KillContainer = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.KillContainer == nil { + uj.KillContainer = new(Call_KillContainer) + } + + err = uj.KillContainer.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_RemoveContainer: + + /* handler: uj.RemoveContainer type=agent.Call_RemoveContainer kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.RemoveContainer = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.RemoveContainer == nil { + uj.RemoveContainer = new(Call_RemoveContainer) + } + + err = uj.RemoveContainer.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_AddResourceProviderConfig: + + /* handler: uj.AddResourceProviderConfig type=agent.Call_AddResourceProviderConfig kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.AddResourceProviderConfig = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.AddResourceProviderConfig == nil { + uj.AddResourceProviderConfig = new(Call_AddResourceProviderConfig) + } + + err = uj.AddResourceProviderConfig.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UpdateResourceProviderConfig: + + /* handler: uj.UpdateResourceProviderConfig type=agent.Call_UpdateResourceProviderConfig kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.UpdateResourceProviderConfig = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.UpdateResourceProviderConfig == nil { + uj.UpdateResourceProviderConfig = new(Call_UpdateResourceProviderConfig) + } + + err = uj.UpdateResourceProviderConfig.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_RemoveResourceProviderConfig: + + /* handler: uj.RemoveResourceProviderConfig type=agent.Call_RemoveResourceProviderConfig kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.RemoveResourceProviderConfig = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.RemoveResourceProviderConfig == nil { + uj.RemoveResourceProviderConfig = new(Call_RemoveResourceProviderConfig) + } + + err = uj.RemoveResourceProviderConfig.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_PruneImages: + + /* handler: uj.PruneImages type=agent.Call_PruneImages kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.PruneImages = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.PruneImages == nil { + uj.PruneImages = new(Call_PruneImages) + } + + err = uj.PruneImages.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_AddResourceProviderConfig) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_AddResourceProviderConfig) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"info":`) + + { + + err = mj.Info.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_AddResourceProviderConfigbase = iota + ffj_t_Call_AddResourceProviderConfigno_such_key + + ffj_t_Call_AddResourceProviderConfig_Info +) + +var ffj_key_Call_AddResourceProviderConfig_Info = []byte("info") + +func (uj *Call_AddResourceProviderConfig) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_AddResourceProviderConfig) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_AddResourceProviderConfigbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_AddResourceProviderConfigno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'i': + + if bytes.Equal(ffj_key_Call_AddResourceProviderConfig_Info, kn) { + currentKey = ffj_t_Call_AddResourceProviderConfig_Info + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_AddResourceProviderConfig_Info, kn) { + currentKey = ffj_t_Call_AddResourceProviderConfig_Info + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_AddResourceProviderConfigno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_AddResourceProviderConfig_Info: + goto handle_Info + + case ffj_t_Call_AddResourceProviderConfigno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Info: + + /* handler: uj.Info type=mesos.ResourceProviderInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Info.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_AttachContainerInput) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_AttachContainerInput) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.ContainerID != nil { + if true { + buf.WriteString(`"container_id":`) + + { + + err = mj.ContainerID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.ProcessIO != nil { + if true { + buf.WriteString(`"process_io":`) + + { + + err = mj.ProcessIO.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_AttachContainerInputbase = iota + ffj_t_Call_AttachContainerInputno_such_key + + ffj_t_Call_AttachContainerInput_Type + + ffj_t_Call_AttachContainerInput_ContainerID + + ffj_t_Call_AttachContainerInput_ProcessIO +) + +var ffj_key_Call_AttachContainerInput_Type = []byte("type") + +var ffj_key_Call_AttachContainerInput_ContainerID = []byte("container_id") + +var ffj_key_Call_AttachContainerInput_ProcessIO = []byte("process_io") + +func (uj *Call_AttachContainerInput) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_AttachContainerInput) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_AttachContainerInputbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_AttachContainerInputno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Call_AttachContainerInput_ContainerID, kn) { + currentKey = ffj_t_Call_AttachContainerInput_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_Call_AttachContainerInput_ProcessIO, kn) { + currentKey = ffj_t_Call_AttachContainerInput_ProcessIO + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Call_AttachContainerInput_Type, kn) { + currentKey = ffj_t_Call_AttachContainerInput_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Call_AttachContainerInput_ProcessIO, kn) { + currentKey = ffj_t_Call_AttachContainerInput_ProcessIO + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Call_AttachContainerInput_ContainerID, kn) { + currentKey = ffj_t_Call_AttachContainerInput_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_AttachContainerInput_Type, kn) { + currentKey = ffj_t_Call_AttachContainerInput_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_AttachContainerInputno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_AttachContainerInput_Type: + goto handle_Type + + case ffj_t_Call_AttachContainerInput_ContainerID: + goto handle_ContainerID + + case ffj_t_Call_AttachContainerInput_ProcessIO: + goto handle_ProcessIO + + case ffj_t_Call_AttachContainerInputno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=agent.Call_AttachContainerInput_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ContainerID: + + /* handler: uj.ContainerID type=mesos.ContainerID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ContainerID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ContainerID == nil { + uj.ContainerID = new(mesos.ContainerID) + } + + err = uj.ContainerID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ProcessIO: + + /* handler: uj.ProcessIO type=agent.ProcessIO kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ProcessIO = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ProcessIO == nil { + uj.ProcessIO = new(ProcessIO) + } + + err = uj.ProcessIO.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_AttachContainerOutput) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_AttachContainerOutput) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"container_id":`) + + { + + err = mj.ContainerID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_AttachContainerOutputbase = iota + ffj_t_Call_AttachContainerOutputno_such_key + + ffj_t_Call_AttachContainerOutput_ContainerID +) + +var ffj_key_Call_AttachContainerOutput_ContainerID = []byte("container_id") + +func (uj *Call_AttachContainerOutput) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_AttachContainerOutput) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_AttachContainerOutputbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_AttachContainerOutputno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Call_AttachContainerOutput_ContainerID, kn) { + currentKey = ffj_t_Call_AttachContainerOutput_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_Call_AttachContainerOutput_ContainerID, kn) { + currentKey = ffj_t_Call_AttachContainerOutput_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_AttachContainerOutputno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_AttachContainerOutput_ContainerID: + goto handle_ContainerID + + case ffj_t_Call_AttachContainerOutputno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ContainerID: + + /* handler: uj.ContainerID type=mesos.ContainerID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ContainerID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_GetContainers) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_GetContainers) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.ShowNested != nil { + if true { + if *mj.ShowNested { + buf.WriteString(`"show_nested":true`) + } else { + buf.WriteString(`"show_nested":false`) + } + buf.WriteByte(',') + } + } + if mj.ShowStandalone != nil { + if true { + if *mj.ShowStandalone { + buf.WriteString(`"show_standalone":true`) + } else { + buf.WriteString(`"show_standalone":false`) + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_GetContainersbase = iota + ffj_t_Call_GetContainersno_such_key + + ffj_t_Call_GetContainers_ShowNested + + ffj_t_Call_GetContainers_ShowStandalone +) + +var ffj_key_Call_GetContainers_ShowNested = []byte("show_nested") + +var ffj_key_Call_GetContainers_ShowStandalone = []byte("show_standalone") + +func (uj *Call_GetContainers) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_GetContainers) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_GetContainersbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_GetContainersno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 's': + + if bytes.Equal(ffj_key_Call_GetContainers_ShowNested, kn) { + currentKey = ffj_t_Call_GetContainers_ShowNested + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_GetContainers_ShowStandalone, kn) { + currentKey = ffj_t_Call_GetContainers_ShowStandalone + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Call_GetContainers_ShowStandalone, kn) { + currentKey = ffj_t_Call_GetContainers_ShowStandalone + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_GetContainers_ShowNested, kn) { + currentKey = ffj_t_Call_GetContainers_ShowNested + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_GetContainersno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_GetContainers_ShowNested: + goto handle_ShowNested + + case ffj_t_Call_GetContainers_ShowStandalone: + goto handle_ShowStandalone + + case ffj_t_Call_GetContainersno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ShowNested: + + /* handler: uj.ShowNested type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.ShowNested = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.ShowNested = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ShowStandalone: + + /* handler: uj.ShowStandalone type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.ShowStandalone = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.ShowStandalone = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_GetMetrics) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_GetMetrics) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Timeout != nil { + if true { + buf.WriteString(`"timeout":`) + + { + + err = mj.Timeout.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_GetMetricsbase = iota + ffj_t_Call_GetMetricsno_such_key + + ffj_t_Call_GetMetrics_Timeout +) + +var ffj_key_Call_GetMetrics_Timeout = []byte("timeout") + +func (uj *Call_GetMetrics) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_GetMetrics) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_GetMetricsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_GetMetricsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 't': + + if bytes.Equal(ffj_key_Call_GetMetrics_Timeout, kn) { + currentKey = ffj_t_Call_GetMetrics_Timeout + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_GetMetrics_Timeout, kn) { + currentKey = ffj_t_Call_GetMetrics_Timeout + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_GetMetricsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_GetMetrics_Timeout: + goto handle_Timeout + + case ffj_t_Call_GetMetricsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Timeout: + + /* handler: uj.Timeout type=mesos.DurationInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Timeout = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Timeout == nil { + uj.Timeout = new(mesos.DurationInfo) + } + + err = uj.Timeout.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_KillContainer) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_KillContainer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "container_id":`) + + { + + err = mj.ContainerID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + if mj.Signal != nil { + if true { + buf.WriteString(`"signal":`) + fflib.FormatBits2(buf, uint64(*mj.Signal), 10, *mj.Signal < 0) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_KillContainerbase = iota + ffj_t_Call_KillContainerno_such_key + + ffj_t_Call_KillContainer_ContainerID + + ffj_t_Call_KillContainer_Signal +) + +var ffj_key_Call_KillContainer_ContainerID = []byte("container_id") + +var ffj_key_Call_KillContainer_Signal = []byte("signal") + +func (uj *Call_KillContainer) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_KillContainer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_KillContainerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_KillContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Call_KillContainer_ContainerID, kn) { + currentKey = ffj_t_Call_KillContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Call_KillContainer_Signal, kn) { + currentKey = ffj_t_Call_KillContainer_Signal + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Call_KillContainer_Signal, kn) { + currentKey = ffj_t_Call_KillContainer_Signal + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Call_KillContainer_ContainerID, kn) { + currentKey = ffj_t_Call_KillContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_KillContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_KillContainer_ContainerID: + goto handle_ContainerID + + case ffj_t_Call_KillContainer_Signal: + goto handle_Signal + + case ffj_t_Call_KillContainerno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ContainerID: + + /* handler: uj.ContainerID type=mesos.ContainerID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ContainerID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Signal: + + /* handler: uj.Signal type=int32 kind=int32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Signal = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int32(tval) + uj.Signal = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_KillNestedContainer) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_KillNestedContainer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "container_id":`) + + { + + err = mj.ContainerID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + if mj.Signal != nil { + if true { + buf.WriteString(`"signal":`) + fflib.FormatBits2(buf, uint64(*mj.Signal), 10, *mj.Signal < 0) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_KillNestedContainerbase = iota + ffj_t_Call_KillNestedContainerno_such_key + + ffj_t_Call_KillNestedContainer_ContainerID + + ffj_t_Call_KillNestedContainer_Signal +) + +var ffj_key_Call_KillNestedContainer_ContainerID = []byte("container_id") + +var ffj_key_Call_KillNestedContainer_Signal = []byte("signal") + +func (uj *Call_KillNestedContainer) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_KillNestedContainer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_KillNestedContainerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_KillNestedContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Call_KillNestedContainer_ContainerID, kn) { + currentKey = ffj_t_Call_KillNestedContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Call_KillNestedContainer_Signal, kn) { + currentKey = ffj_t_Call_KillNestedContainer_Signal + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Call_KillNestedContainer_Signal, kn) { + currentKey = ffj_t_Call_KillNestedContainer_Signal + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Call_KillNestedContainer_ContainerID, kn) { + currentKey = ffj_t_Call_KillNestedContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_KillNestedContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_KillNestedContainer_ContainerID: + goto handle_ContainerID + + case ffj_t_Call_KillNestedContainer_Signal: + goto handle_Signal + + case ffj_t_Call_KillNestedContainerno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ContainerID: + + /* handler: uj.ContainerID type=mesos.ContainerID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ContainerID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Signal: + + /* handler: uj.Signal type=int32 kind=int32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Signal = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int32(tval) + uj.Signal = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_LaunchContainer) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_LaunchContainer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "container_id":`) + + { + + err = mj.ContainerID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + if mj.Command != nil { + if true { + buf.WriteString(`"command":`) + + { + + err = mj.Command.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"resources":`) + if mj.Resources != nil { + buf.WriteString(`[`) + for i, v := range mj.Resources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.Container != nil { + if true { + buf.WriteString(`"container":`) + + { + + err = mj.Container.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_LaunchContainerbase = iota + ffj_t_Call_LaunchContainerno_such_key + + ffj_t_Call_LaunchContainer_ContainerID + + ffj_t_Call_LaunchContainer_Command + + ffj_t_Call_LaunchContainer_Resources + + ffj_t_Call_LaunchContainer_Container +) + +var ffj_key_Call_LaunchContainer_ContainerID = []byte("container_id") + +var ffj_key_Call_LaunchContainer_Command = []byte("command") + +var ffj_key_Call_LaunchContainer_Resources = []byte("resources") + +var ffj_key_Call_LaunchContainer_Container = []byte("container") + +func (uj *Call_LaunchContainer) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_LaunchContainer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_LaunchContainerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_LaunchContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Call_LaunchContainer_ContainerID, kn) { + currentKey = ffj_t_Call_LaunchContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_LaunchContainer_Command, kn) { + currentKey = ffj_t_Call_LaunchContainer_Command + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_LaunchContainer_Container, kn) { + currentKey = ffj_t_Call_LaunchContainer_Container + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_Call_LaunchContainer_Resources, kn) { + currentKey = ffj_t_Call_LaunchContainer_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_LaunchContainer_Container, kn) { + currentKey = ffj_t_Call_LaunchContainer_Container + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_LaunchContainer_Resources, kn) { + currentKey = ffj_t_Call_LaunchContainer_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_LaunchContainer_Command, kn) { + currentKey = ffj_t_Call_LaunchContainer_Command + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Call_LaunchContainer_ContainerID, kn) { + currentKey = ffj_t_Call_LaunchContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_LaunchContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_LaunchContainer_ContainerID: + goto handle_ContainerID + + case ffj_t_Call_LaunchContainer_Command: + goto handle_Command + + case ffj_t_Call_LaunchContainer_Resources: + goto handle_Resources + + case ffj_t_Call_LaunchContainer_Container: + goto handle_Container + + case ffj_t_Call_LaunchContainerno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ContainerID: + + /* handler: uj.ContainerID type=mesos.ContainerID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ContainerID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Command: + + /* handler: uj.Command type=mesos.CommandInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Command = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Command == nil { + uj.Command = new(mesos.CommandInfo) + } + + err = uj.Command.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Resources: + + /* handler: uj.Resources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Resources = nil + } else { + + uj.Resources = []mesos.Resource{} + + wantVal := true + + for { + + var tmp_uj__Resources mesos.Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Resources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Resources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Resources = append(uj.Resources, tmp_uj__Resources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Container: + + /* handler: uj.Container type=mesos.ContainerInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Container = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Container == nil { + uj.Container = new(mesos.ContainerInfo) + } + + err = uj.Container.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_LaunchNestedContainer) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_LaunchNestedContainer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "container_id":`) + + { + + err = mj.ContainerID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + if mj.Command != nil { + if true { + buf.WriteString(`"command":`) + + { + + err = mj.Command.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Container != nil { + if true { + buf.WriteString(`"container":`) + + { + + err = mj.Container.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_LaunchNestedContainerbase = iota + ffj_t_Call_LaunchNestedContainerno_such_key + + ffj_t_Call_LaunchNestedContainer_ContainerID + + ffj_t_Call_LaunchNestedContainer_Command + + ffj_t_Call_LaunchNestedContainer_Container +) + +var ffj_key_Call_LaunchNestedContainer_ContainerID = []byte("container_id") + +var ffj_key_Call_LaunchNestedContainer_Command = []byte("command") + +var ffj_key_Call_LaunchNestedContainer_Container = []byte("container") + +func (uj *Call_LaunchNestedContainer) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_LaunchNestedContainer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_LaunchNestedContainerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_LaunchNestedContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Call_LaunchNestedContainer_ContainerID, kn) { + currentKey = ffj_t_Call_LaunchNestedContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_LaunchNestedContainer_Command, kn) { + currentKey = ffj_t_Call_LaunchNestedContainer_Command + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_LaunchNestedContainer_Container, kn) { + currentKey = ffj_t_Call_LaunchNestedContainer_Container + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_LaunchNestedContainer_Container, kn) { + currentKey = ffj_t_Call_LaunchNestedContainer_Container + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_LaunchNestedContainer_Command, kn) { + currentKey = ffj_t_Call_LaunchNestedContainer_Command + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Call_LaunchNestedContainer_ContainerID, kn) { + currentKey = ffj_t_Call_LaunchNestedContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_LaunchNestedContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_LaunchNestedContainer_ContainerID: + goto handle_ContainerID + + case ffj_t_Call_LaunchNestedContainer_Command: + goto handle_Command + + case ffj_t_Call_LaunchNestedContainer_Container: + goto handle_Container + + case ffj_t_Call_LaunchNestedContainerno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ContainerID: + + /* handler: uj.ContainerID type=mesos.ContainerID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ContainerID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Command: + + /* handler: uj.Command type=mesos.CommandInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Command = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Command == nil { + uj.Command = new(mesos.CommandInfo) + } + + err = uj.Command.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Container: + + /* handler: uj.Container type=mesos.ContainerInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Container = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Container == nil { + uj.Container = new(mesos.ContainerInfo) + } + + err = uj.Container.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_LaunchNestedContainerSession) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_LaunchNestedContainerSession) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "container_id":`) + + { + + err = mj.ContainerID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + if mj.Command != nil { + if true { + buf.WriteString(`"command":`) + + { + + err = mj.Command.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Container != nil { + if true { + buf.WriteString(`"container":`) + + { + + err = mj.Container.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_LaunchNestedContainerSessionbase = iota + ffj_t_Call_LaunchNestedContainerSessionno_such_key + + ffj_t_Call_LaunchNestedContainerSession_ContainerID + + ffj_t_Call_LaunchNestedContainerSession_Command + + ffj_t_Call_LaunchNestedContainerSession_Container +) + +var ffj_key_Call_LaunchNestedContainerSession_ContainerID = []byte("container_id") + +var ffj_key_Call_LaunchNestedContainerSession_Command = []byte("command") + +var ffj_key_Call_LaunchNestedContainerSession_Container = []byte("container") + +func (uj *Call_LaunchNestedContainerSession) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_LaunchNestedContainerSession) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_LaunchNestedContainerSessionbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_LaunchNestedContainerSessionno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Call_LaunchNestedContainerSession_ContainerID, kn) { + currentKey = ffj_t_Call_LaunchNestedContainerSession_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_LaunchNestedContainerSession_Command, kn) { + currentKey = ffj_t_Call_LaunchNestedContainerSession_Command + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Call_LaunchNestedContainerSession_Container, kn) { + currentKey = ffj_t_Call_LaunchNestedContainerSession_Container + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_LaunchNestedContainerSession_Container, kn) { + currentKey = ffj_t_Call_LaunchNestedContainerSession_Container + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_LaunchNestedContainerSession_Command, kn) { + currentKey = ffj_t_Call_LaunchNestedContainerSession_Command + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Call_LaunchNestedContainerSession_ContainerID, kn) { + currentKey = ffj_t_Call_LaunchNestedContainerSession_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_LaunchNestedContainerSessionno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_LaunchNestedContainerSession_ContainerID: + goto handle_ContainerID + + case ffj_t_Call_LaunchNestedContainerSession_Command: + goto handle_Command + + case ffj_t_Call_LaunchNestedContainerSession_Container: + goto handle_Container + + case ffj_t_Call_LaunchNestedContainerSessionno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ContainerID: + + /* handler: uj.ContainerID type=mesos.ContainerID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ContainerID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Command: + + /* handler: uj.Command type=mesos.CommandInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Command = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Command == nil { + uj.Command = new(mesos.CommandInfo) + } + + err = uj.Command.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Container: + + /* handler: uj.Container type=mesos.ContainerInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Container = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Container == nil { + uj.Container = new(mesos.ContainerInfo) + } + + err = uj.Container.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_ListFiles) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_ListFiles) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"path":`) + fflib.WriteJsonString(buf, string(mj.Path)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_ListFilesbase = iota + ffj_t_Call_ListFilesno_such_key + + ffj_t_Call_ListFiles_Path +) + +var ffj_key_Call_ListFiles_Path = []byte("path") + +func (uj *Call_ListFiles) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_ListFiles) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_ListFilesbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_ListFilesno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'p': + + if bytes.Equal(ffj_key_Call_ListFiles_Path, kn) { + currentKey = ffj_t_Call_ListFiles_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_ListFiles_Path, kn) { + currentKey = ffj_t_Call_ListFiles_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_ListFilesno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_ListFiles_Path: + goto handle_Path + + case ffj_t_Call_ListFilesno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Path: + + /* handler: uj.Path type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Path = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_PruneImages) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_PruneImages) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"excluded_images":`) + if mj.ExcludedImages != nil { + buf.WriteString(`[`) + for i, v := range mj.ExcludedImages { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_PruneImagesbase = iota + ffj_t_Call_PruneImagesno_such_key + + ffj_t_Call_PruneImages_ExcludedImages +) + +var ffj_key_Call_PruneImages_ExcludedImages = []byte("excluded_images") + +func (uj *Call_PruneImages) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_PruneImages) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_PruneImagesbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_PruneImagesno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'e': + + if bytes.Equal(ffj_key_Call_PruneImages_ExcludedImages, kn) { + currentKey = ffj_t_Call_PruneImages_ExcludedImages + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Call_PruneImages_ExcludedImages, kn) { + currentKey = ffj_t_Call_PruneImages_ExcludedImages + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_PruneImagesno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_PruneImages_ExcludedImages: + goto handle_ExcludedImages + + case ffj_t_Call_PruneImagesno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ExcludedImages: + + /* handler: uj.ExcludedImages type=[]mesos.Image kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.ExcludedImages = nil + } else { + + uj.ExcludedImages = []mesos.Image{} + + wantVal := true + + for { + + var tmp_uj__ExcludedImages mesos.Image + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__ExcludedImages type=mesos.Image kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__ExcludedImages.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.ExcludedImages = append(uj.ExcludedImages, tmp_uj__ExcludedImages) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_ReadFile) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_ReadFile) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "path":`) + fflib.WriteJsonString(buf, string(mj.Path)) + buf.WriteString(`,"offset":`) + fflib.FormatBits2(buf, uint64(mj.Offset), 10, false) + buf.WriteByte(',') + if mj.Length != nil { + if true { + buf.WriteString(`"length":`) + fflib.FormatBits2(buf, uint64(*mj.Length), 10, false) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_ReadFilebase = iota + ffj_t_Call_ReadFileno_such_key + + ffj_t_Call_ReadFile_Path + + ffj_t_Call_ReadFile_Offset + + ffj_t_Call_ReadFile_Length +) + +var ffj_key_Call_ReadFile_Path = []byte("path") + +var ffj_key_Call_ReadFile_Offset = []byte("offset") + +var ffj_key_Call_ReadFile_Length = []byte("length") + +func (uj *Call_ReadFile) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_ReadFile) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_ReadFilebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_ReadFileno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'l': + + if bytes.Equal(ffj_key_Call_ReadFile_Length, kn) { + currentKey = ffj_t_Call_ReadFile_Length + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'o': + + if bytes.Equal(ffj_key_Call_ReadFile_Offset, kn) { + currentKey = ffj_t_Call_ReadFile_Offset + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_Call_ReadFile_Path, kn) { + currentKey = ffj_t_Call_ReadFile_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_ReadFile_Length, kn) { + currentKey = ffj_t_Call_ReadFile_Length + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Call_ReadFile_Offset, kn) { + currentKey = ffj_t_Call_ReadFile_Offset + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_ReadFile_Path, kn) { + currentKey = ffj_t_Call_ReadFile_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_ReadFileno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_ReadFile_Path: + goto handle_Path + + case ffj_t_Call_ReadFile_Offset: + goto handle_Offset + + case ffj_t_Call_ReadFile_Length: + goto handle_Length + + case ffj_t_Call_ReadFileno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Path: + + /* handler: uj.Path type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Path = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Offset: + + /* handler: uj.Offset type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Offset = uint64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Length: + + /* handler: uj.Length type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Length = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Length = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_RemoveContainer) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_RemoveContainer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"container_id":`) + + { + + err = mj.ContainerID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_RemoveContainerbase = iota + ffj_t_Call_RemoveContainerno_such_key + + ffj_t_Call_RemoveContainer_ContainerID +) + +var ffj_key_Call_RemoveContainer_ContainerID = []byte("container_id") + +func (uj *Call_RemoveContainer) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_RemoveContainer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_RemoveContainerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_RemoveContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Call_RemoveContainer_ContainerID, kn) { + currentKey = ffj_t_Call_RemoveContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_Call_RemoveContainer_ContainerID, kn) { + currentKey = ffj_t_Call_RemoveContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_RemoveContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_RemoveContainer_ContainerID: + goto handle_ContainerID + + case ffj_t_Call_RemoveContainerno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ContainerID: + + /* handler: uj.ContainerID type=mesos.ContainerID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ContainerID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_RemoveNestedContainer) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_RemoveNestedContainer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"container_id":`) + + { + + err = mj.ContainerID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_RemoveNestedContainerbase = iota + ffj_t_Call_RemoveNestedContainerno_such_key + + ffj_t_Call_RemoveNestedContainer_ContainerID +) + +var ffj_key_Call_RemoveNestedContainer_ContainerID = []byte("container_id") + +func (uj *Call_RemoveNestedContainer) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_RemoveNestedContainer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_RemoveNestedContainerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_RemoveNestedContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Call_RemoveNestedContainer_ContainerID, kn) { + currentKey = ffj_t_Call_RemoveNestedContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_Call_RemoveNestedContainer_ContainerID, kn) { + currentKey = ffj_t_Call_RemoveNestedContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_RemoveNestedContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_RemoveNestedContainer_ContainerID: + goto handle_ContainerID + + case ffj_t_Call_RemoveNestedContainerno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ContainerID: + + /* handler: uj.ContainerID type=mesos.ContainerID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ContainerID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_RemoveResourceProviderConfig) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_RemoveResourceProviderConfig) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"type":`) + fflib.WriteJsonString(buf, string(mj.Type)) + buf.WriteString(`,"name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_RemoveResourceProviderConfigbase = iota + ffj_t_Call_RemoveResourceProviderConfigno_such_key + + ffj_t_Call_RemoveResourceProviderConfig_Type + + ffj_t_Call_RemoveResourceProviderConfig_Name +) + +var ffj_key_Call_RemoveResourceProviderConfig_Type = []byte("type") + +var ffj_key_Call_RemoveResourceProviderConfig_Name = []byte("name") + +func (uj *Call_RemoveResourceProviderConfig) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_RemoveResourceProviderConfig) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_RemoveResourceProviderConfigbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_RemoveResourceProviderConfigno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'n': + + if bytes.Equal(ffj_key_Call_RemoveResourceProviderConfig_Name, kn) { + currentKey = ffj_t_Call_RemoveResourceProviderConfig_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Call_RemoveResourceProviderConfig_Type, kn) { + currentKey = ffj_t_Call_RemoveResourceProviderConfig_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_RemoveResourceProviderConfig_Name, kn) { + currentKey = ffj_t_Call_RemoveResourceProviderConfig_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_RemoveResourceProviderConfig_Type, kn) { + currentKey = ffj_t_Call_RemoveResourceProviderConfig_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_RemoveResourceProviderConfigno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_RemoveResourceProviderConfig_Type: + goto handle_Type + + case ffj_t_Call_RemoveResourceProviderConfig_Name: + goto handle_Name + + case ffj_t_Call_RemoveResourceProviderConfigno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Type = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_SetLoggingLevel) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_SetLoggingLevel) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"level":`) + fflib.FormatBits2(buf, uint64(mj.Level), 10, false) + buf.WriteString(`,"duration":`) + + { + + err = mj.Duration.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_SetLoggingLevelbase = iota + ffj_t_Call_SetLoggingLevelno_such_key + + ffj_t_Call_SetLoggingLevel_Level + + ffj_t_Call_SetLoggingLevel_Duration +) + +var ffj_key_Call_SetLoggingLevel_Level = []byte("level") + +var ffj_key_Call_SetLoggingLevel_Duration = []byte("duration") + +func (uj *Call_SetLoggingLevel) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_SetLoggingLevel) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_SetLoggingLevelbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_SetLoggingLevelno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'd': + + if bytes.Equal(ffj_key_Call_SetLoggingLevel_Duration, kn) { + currentKey = ffj_t_Call_SetLoggingLevel_Duration + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_Call_SetLoggingLevel_Level, kn) { + currentKey = ffj_t_Call_SetLoggingLevel_Level + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_SetLoggingLevel_Duration, kn) { + currentKey = ffj_t_Call_SetLoggingLevel_Duration + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_SetLoggingLevel_Level, kn) { + currentKey = ffj_t_Call_SetLoggingLevel_Level + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_SetLoggingLevelno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_SetLoggingLevel_Level: + goto handle_Level + + case ffj_t_Call_SetLoggingLevel_Duration: + goto handle_Duration + + case ffj_t_Call_SetLoggingLevelno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Level: + + /* handler: uj.Level type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Level = uint32(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Duration: + + /* handler: uj.Duration type=mesos.DurationInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Duration.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_UpdateResourceProviderConfig) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_UpdateResourceProviderConfig) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"info":`) + + { + + err = mj.Info.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_UpdateResourceProviderConfigbase = iota + ffj_t_Call_UpdateResourceProviderConfigno_such_key + + ffj_t_Call_UpdateResourceProviderConfig_Info +) + +var ffj_key_Call_UpdateResourceProviderConfig_Info = []byte("info") + +func (uj *Call_UpdateResourceProviderConfig) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_UpdateResourceProviderConfig) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_UpdateResourceProviderConfigbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_UpdateResourceProviderConfigno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'i': + + if bytes.Equal(ffj_key_Call_UpdateResourceProviderConfig_Info, kn) { + currentKey = ffj_t_Call_UpdateResourceProviderConfig_Info + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Call_UpdateResourceProviderConfig_Info, kn) { + currentKey = ffj_t_Call_UpdateResourceProviderConfig_Info + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_UpdateResourceProviderConfigno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_UpdateResourceProviderConfig_Info: + goto handle_Info + + case ffj_t_Call_UpdateResourceProviderConfigno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Info: + + /* handler: uj.Info type=mesos.ResourceProviderInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Info.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_WaitContainer) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_WaitContainer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"container_id":`) + + { + + err = mj.ContainerID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_WaitContainerbase = iota + ffj_t_Call_WaitContainerno_such_key + + ffj_t_Call_WaitContainer_ContainerID +) + +var ffj_key_Call_WaitContainer_ContainerID = []byte("container_id") + +func (uj *Call_WaitContainer) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_WaitContainer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_WaitContainerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_WaitContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Call_WaitContainer_ContainerID, kn) { + currentKey = ffj_t_Call_WaitContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_Call_WaitContainer_ContainerID, kn) { + currentKey = ffj_t_Call_WaitContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_WaitContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_WaitContainer_ContainerID: + goto handle_ContainerID + + case ffj_t_Call_WaitContainerno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ContainerID: + + /* handler: uj.ContainerID type=mesos.ContainerID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ContainerID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Call_WaitNestedContainer) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Call_WaitNestedContainer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"container_id":`) + + { + + err = mj.ContainerID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Call_WaitNestedContainerbase = iota + ffj_t_Call_WaitNestedContainerno_such_key + + ffj_t_Call_WaitNestedContainer_ContainerID +) + +var ffj_key_Call_WaitNestedContainer_ContainerID = []byte("container_id") + +func (uj *Call_WaitNestedContainer) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Call_WaitNestedContainer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Call_WaitNestedContainerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Call_WaitNestedContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Call_WaitNestedContainer_ContainerID, kn) { + currentKey = ffj_t_Call_WaitNestedContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_Call_WaitNestedContainer_ContainerID, kn) { + currentKey = ffj_t_Call_WaitNestedContainer_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Call_WaitNestedContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Call_WaitNestedContainer_ContainerID: + goto handle_ContainerID + + case ffj_t_Call_WaitNestedContainerno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ContainerID: + + /* handler: uj.ContainerID type=mesos.ContainerID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ContainerID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ProcessIO) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ProcessIO) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.Data != nil { + if true { + buf.WriteString(`"data":`) + + { + + err = mj.Data.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Control != nil { + if true { + buf.WriteString(`"control":`) + + { + + err = mj.Control.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ProcessIObase = iota + ffj_t_ProcessIOno_such_key + + ffj_t_ProcessIO_Type + + ffj_t_ProcessIO_Data + + ffj_t_ProcessIO_Control +) + +var ffj_key_ProcessIO_Type = []byte("type") + +var ffj_key_ProcessIO_Data = []byte("data") + +var ffj_key_ProcessIO_Control = []byte("control") + +func (uj *ProcessIO) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ProcessIO) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ProcessIObase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ProcessIOno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_ProcessIO_Control, kn) { + currentKey = ffj_t_ProcessIO_Control + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_ProcessIO_Data, kn) { + currentKey = ffj_t_ProcessIO_Data + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_ProcessIO_Type, kn) { + currentKey = ffj_t_ProcessIO_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_ProcessIO_Control, kn) { + currentKey = ffj_t_ProcessIO_Control + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ProcessIO_Data, kn) { + currentKey = ffj_t_ProcessIO_Data + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ProcessIO_Type, kn) { + currentKey = ffj_t_ProcessIO_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ProcessIOno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ProcessIO_Type: + goto handle_Type + + case ffj_t_ProcessIO_Data: + goto handle_Data + + case ffj_t_ProcessIO_Control: + goto handle_Control + + case ffj_t_ProcessIOno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=agent.ProcessIO_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Data: + + /* handler: uj.Data type=agent.ProcessIO_Data kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Data = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Data == nil { + uj.Data = new(ProcessIO_Data) + } + + err = uj.Data.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Control: + + /* handler: uj.Control type=agent.ProcessIO_Control kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Control = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Control == nil { + uj.Control = new(ProcessIO_Control) + } + + err = uj.Control.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ProcessIO_Control) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ProcessIO_Control) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.TTYInfo != nil { + if true { + buf.WriteString(`"tty_info":`) + + { + + err = mj.TTYInfo.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Heartbeat != nil { + if true { + buf.WriteString(`"heartbeat":`) + + { + + err = mj.Heartbeat.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ProcessIO_Controlbase = iota + ffj_t_ProcessIO_Controlno_such_key + + ffj_t_ProcessIO_Control_Type + + ffj_t_ProcessIO_Control_TTYInfo + + ffj_t_ProcessIO_Control_Heartbeat +) + +var ffj_key_ProcessIO_Control_Type = []byte("type") + +var ffj_key_ProcessIO_Control_TTYInfo = []byte("tty_info") + +var ffj_key_ProcessIO_Control_Heartbeat = []byte("heartbeat") + +func (uj *ProcessIO_Control) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ProcessIO_Control) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ProcessIO_Controlbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ProcessIO_Controlno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'h': + + if bytes.Equal(ffj_key_ProcessIO_Control_Heartbeat, kn) { + currentKey = ffj_t_ProcessIO_Control_Heartbeat + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_ProcessIO_Control_Type, kn) { + currentKey = ffj_t_ProcessIO_Control_Type + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ProcessIO_Control_TTYInfo, kn) { + currentKey = ffj_t_ProcessIO_Control_TTYInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_ProcessIO_Control_Heartbeat, kn) { + currentKey = ffj_t_ProcessIO_Control_Heartbeat + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_ProcessIO_Control_TTYInfo, kn) { + currentKey = ffj_t_ProcessIO_Control_TTYInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ProcessIO_Control_Type, kn) { + currentKey = ffj_t_ProcessIO_Control_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ProcessIO_Controlno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ProcessIO_Control_Type: + goto handle_Type + + case ffj_t_ProcessIO_Control_TTYInfo: + goto handle_TTYInfo + + case ffj_t_ProcessIO_Control_Heartbeat: + goto handle_Heartbeat + + case ffj_t_ProcessIO_Controlno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=agent.ProcessIO_Control_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TTYInfo: + + /* handler: uj.TTYInfo type=mesos.TTYInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.TTYInfo = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.TTYInfo == nil { + uj.TTYInfo = new(mesos.TTYInfo) + } + + err = uj.TTYInfo.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Heartbeat: + + /* handler: uj.Heartbeat type=agent.ProcessIO_Control_Heartbeat kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Heartbeat = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Heartbeat == nil { + uj.Heartbeat = new(ProcessIO_Control_Heartbeat) + } + + err = uj.Heartbeat.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ProcessIO_Control_Heartbeat) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ProcessIO_Control_Heartbeat) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Interval != nil { + if true { + buf.WriteString(`"interval":`) + + { + + err = mj.Interval.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ProcessIO_Control_Heartbeatbase = iota + ffj_t_ProcessIO_Control_Heartbeatno_such_key + + ffj_t_ProcessIO_Control_Heartbeat_Interval +) + +var ffj_key_ProcessIO_Control_Heartbeat_Interval = []byte("interval") + +func (uj *ProcessIO_Control_Heartbeat) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ProcessIO_Control_Heartbeat) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ProcessIO_Control_Heartbeatbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ProcessIO_Control_Heartbeatno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'i': + + if bytes.Equal(ffj_key_ProcessIO_Control_Heartbeat_Interval, kn) { + currentKey = ffj_t_ProcessIO_Control_Heartbeat_Interval + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_ProcessIO_Control_Heartbeat_Interval, kn) { + currentKey = ffj_t_ProcessIO_Control_Heartbeat_Interval + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ProcessIO_Control_Heartbeatno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ProcessIO_Control_Heartbeat_Interval: + goto handle_Interval + + case ffj_t_ProcessIO_Control_Heartbeatno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Interval: + + /* handler: uj.Interval type=mesos.DurationInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Interval = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Interval == nil { + uj.Interval = new(mesos.DurationInfo) + } + + err = uj.Interval.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ProcessIO_Data) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ProcessIO_Data) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if len(mj.Data) != 0 { + buf.WriteString(`"data":`) + if mj.Data != nil { + buf.WriteString(`"`) + { + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(reflect.Indirect(reflect.ValueOf(mj.Data)).Bytes()) + enc.Close() + } + buf.WriteString(`"`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ProcessIO_Database = iota + ffj_t_ProcessIO_Datano_such_key + + ffj_t_ProcessIO_Data_Type + + ffj_t_ProcessIO_Data_Data +) + +var ffj_key_ProcessIO_Data_Type = []byte("type") + +var ffj_key_ProcessIO_Data_Data = []byte("data") + +func (uj *ProcessIO_Data) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ProcessIO_Data) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ProcessIO_Database + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ProcessIO_Datano_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'd': + + if bytes.Equal(ffj_key_ProcessIO_Data_Data, kn) { + currentKey = ffj_t_ProcessIO_Data_Data + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_ProcessIO_Data_Type, kn) { + currentKey = ffj_t_ProcessIO_Data_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_ProcessIO_Data_Data, kn) { + currentKey = ffj_t_ProcessIO_Data_Data + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ProcessIO_Data_Type, kn) { + currentKey = ffj_t_ProcessIO_Data_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ProcessIO_Datano_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ProcessIO_Data_Type: + goto handle_Type + + case ffj_t_ProcessIO_Data_Data: + goto handle_Data + + case ffj_t_ProcessIO_Datano_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=agent.ProcessIO_Data_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Data: + + /* handler: uj.Data type=[]uint8 kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Data = nil + } else { + b := make([]byte, base64.StdEncoding.DecodedLen(fs.Output.Len())) + n, err := base64.StdEncoding.Decode(b, fs.Output.Bytes()) + if err != nil { + return fs.WrapErr(err) + } + + v := reflect.ValueOf(&uj.Data).Elem() + v.SetBytes(b[0:n]) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.GetHealth != nil { + if true { + buf.WriteString(`"get_health":`) + + { + + err = mj.GetHealth.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GetFlags != nil { + if true { + buf.WriteString(`"get_flags":`) + + { + + err = mj.GetFlags.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GetVersion != nil { + if true { + buf.WriteString(`"get_version":`) + + { + + err = mj.GetVersion.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GetMetrics != nil { + if true { + buf.WriteString(`"get_metrics":`) + + { + + err = mj.GetMetrics.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GetLoggingLevel != nil { + if true { + buf.WriteString(`"get_logging_level":`) + + { + + err = mj.GetLoggingLevel.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.ListFiles != nil { + if true { + buf.WriteString(`"list_files":`) + + { + + err = mj.ListFiles.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.ReadFile != nil { + if true { + buf.WriteString(`"read_file":`) + + { + + err = mj.ReadFile.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GetState != nil { + if true { + buf.WriteString(`"get_state":`) + + { + + err = mj.GetState.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GetContainers != nil { + if true { + buf.WriteString(`"get_containers":`) + + { + + err = mj.GetContainers.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GetFrameworks != nil { + if true { + buf.WriteString(`"get_frameworks":`) + + { + + err = mj.GetFrameworks.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GetExecutors != nil { + if true { + buf.WriteString(`"get_executors":`) + + { + + err = mj.GetExecutors.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GetOperations != nil { + if true { + buf.WriteString(`"get_operations":`) + + { + + err = mj.GetOperations.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GetTasks != nil { + if true { + buf.WriteString(`"get_tasks":`) + + { + + err = mj.GetTasks.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GetAgent != nil { + if true { + buf.WriteString(`"get_agent":`) + + { + + err = mj.GetAgent.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GetResourceProviders != nil { + if true { + buf.WriteString(`"get_resource_providers":`) + + { + + err = mj.GetResourceProviders.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.WaitNestedContainer != nil { + if true { + buf.WriteString(`"wait_nested_container":`) + + { + + err = mj.WaitNestedContainer.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.WaitContainer != nil { + if true { + buf.WriteString(`"wait_container":`) + + { + + err = mj.WaitContainer.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Responsebase = iota + ffj_t_Responseno_such_key + + ffj_t_Response_Type + + ffj_t_Response_GetHealth + + ffj_t_Response_GetFlags + + ffj_t_Response_GetVersion + + ffj_t_Response_GetMetrics + + ffj_t_Response_GetLoggingLevel + + ffj_t_Response_ListFiles + + ffj_t_Response_ReadFile + + ffj_t_Response_GetState + + ffj_t_Response_GetContainers + + ffj_t_Response_GetFrameworks + + ffj_t_Response_GetExecutors + + ffj_t_Response_GetOperations + + ffj_t_Response_GetTasks + + ffj_t_Response_GetAgent + + ffj_t_Response_GetResourceProviders + + ffj_t_Response_WaitNestedContainer + + ffj_t_Response_WaitContainer +) + +var ffj_key_Response_Type = []byte("type") + +var ffj_key_Response_GetHealth = []byte("get_health") + +var ffj_key_Response_GetFlags = []byte("get_flags") + +var ffj_key_Response_GetVersion = []byte("get_version") + +var ffj_key_Response_GetMetrics = []byte("get_metrics") + +var ffj_key_Response_GetLoggingLevel = []byte("get_logging_level") + +var ffj_key_Response_ListFiles = []byte("list_files") + +var ffj_key_Response_ReadFile = []byte("read_file") + +var ffj_key_Response_GetState = []byte("get_state") + +var ffj_key_Response_GetContainers = []byte("get_containers") + +var ffj_key_Response_GetFrameworks = []byte("get_frameworks") + +var ffj_key_Response_GetExecutors = []byte("get_executors") + +var ffj_key_Response_GetOperations = []byte("get_operations") + +var ffj_key_Response_GetTasks = []byte("get_tasks") + +var ffj_key_Response_GetAgent = []byte("get_agent") + +var ffj_key_Response_GetResourceProviders = []byte("get_resource_providers") + +var ffj_key_Response_WaitNestedContainer = []byte("wait_nested_container") + +var ffj_key_Response_WaitContainer = []byte("wait_container") + +func (uj *Response) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Responsebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Responseno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'g': + + if bytes.Equal(ffj_key_Response_GetHealth, kn) { + currentKey = ffj_t_Response_GetHealth + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetFlags, kn) { + currentKey = ffj_t_Response_GetFlags + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetVersion, kn) { + currentKey = ffj_t_Response_GetVersion + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetMetrics, kn) { + currentKey = ffj_t_Response_GetMetrics + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetLoggingLevel, kn) { + currentKey = ffj_t_Response_GetLoggingLevel + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetState, kn) { + currentKey = ffj_t_Response_GetState + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetContainers, kn) { + currentKey = ffj_t_Response_GetContainers + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetFrameworks, kn) { + currentKey = ffj_t_Response_GetFrameworks + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetExecutors, kn) { + currentKey = ffj_t_Response_GetExecutors + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetOperations, kn) { + currentKey = ffj_t_Response_GetOperations + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetTasks, kn) { + currentKey = ffj_t_Response_GetTasks + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetAgent, kn) { + currentKey = ffj_t_Response_GetAgent + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetResourceProviders, kn) { + currentKey = ffj_t_Response_GetResourceProviders + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_Response_ListFiles, kn) { + currentKey = ffj_t_Response_ListFiles + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_Response_ReadFile, kn) { + currentKey = ffj_t_Response_ReadFile + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Response_Type, kn) { + currentKey = ffj_t_Response_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'w': + + if bytes.Equal(ffj_key_Response_WaitNestedContainer, kn) { + currentKey = ffj_t_Response_WaitNestedContainer + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_WaitContainer, kn) { + currentKey = ffj_t_Response_WaitContainer + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_Response_WaitContainer, kn) { + currentKey = ffj_t_Response_WaitContainer + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_WaitNestedContainer, kn) { + currentKey = ffj_t_Response_WaitNestedContainer + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetResourceProviders, kn) { + currentKey = ffj_t_Response_GetResourceProviders + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Response_GetAgent, kn) { + currentKey = ffj_t_Response_GetAgent + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetTasks, kn) { + currentKey = ffj_t_Response_GetTasks + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetOperations, kn) { + currentKey = ffj_t_Response_GetOperations + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetExecutors, kn) { + currentKey = ffj_t_Response_GetExecutors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetFrameworks, kn) { + currentKey = ffj_t_Response_GetFrameworks + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetContainers, kn) { + currentKey = ffj_t_Response_GetContainers + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetState, kn) { + currentKey = ffj_t_Response_GetState + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Response_ReadFile, kn) { + currentKey = ffj_t_Response_ReadFile + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_ListFiles, kn) { + currentKey = ffj_t_Response_ListFiles + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Response_GetLoggingLevel, kn) { + currentKey = ffj_t_Response_GetLoggingLevel + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetMetrics, kn) { + currentKey = ffj_t_Response_GetMetrics + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetVersion, kn) { + currentKey = ffj_t_Response_GetVersion + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetFlags, kn) { + currentKey = ffj_t_Response_GetFlags + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Response_GetHealth, kn) { + currentKey = ffj_t_Response_GetHealth + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Response_Type, kn) { + currentKey = ffj_t_Response_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Responseno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_Type: + goto handle_Type + + case ffj_t_Response_GetHealth: + goto handle_GetHealth + + case ffj_t_Response_GetFlags: + goto handle_GetFlags + + case ffj_t_Response_GetVersion: + goto handle_GetVersion + + case ffj_t_Response_GetMetrics: + goto handle_GetMetrics + + case ffj_t_Response_GetLoggingLevel: + goto handle_GetLoggingLevel + + case ffj_t_Response_ListFiles: + goto handle_ListFiles + + case ffj_t_Response_ReadFile: + goto handle_ReadFile + + case ffj_t_Response_GetState: + goto handle_GetState + + case ffj_t_Response_GetContainers: + goto handle_GetContainers + + case ffj_t_Response_GetFrameworks: + goto handle_GetFrameworks + + case ffj_t_Response_GetExecutors: + goto handle_GetExecutors + + case ffj_t_Response_GetOperations: + goto handle_GetOperations + + case ffj_t_Response_GetTasks: + goto handle_GetTasks + + case ffj_t_Response_GetAgent: + goto handle_GetAgent + + case ffj_t_Response_GetResourceProviders: + goto handle_GetResourceProviders + + case ffj_t_Response_WaitNestedContainer: + goto handle_WaitNestedContainer + + case ffj_t_Response_WaitContainer: + goto handle_WaitContainer + + case ffj_t_Responseno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=agent.Response_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetHealth: + + /* handler: uj.GetHealth type=agent.Response_GetHealth kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetHealth = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetHealth == nil { + uj.GetHealth = new(Response_GetHealth) + } + + err = uj.GetHealth.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetFlags: + + /* handler: uj.GetFlags type=agent.Response_GetFlags kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetFlags = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetFlags == nil { + uj.GetFlags = new(Response_GetFlags) + } + + err = uj.GetFlags.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetVersion: + + /* handler: uj.GetVersion type=agent.Response_GetVersion kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetVersion = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetVersion == nil { + uj.GetVersion = new(Response_GetVersion) + } + + err = uj.GetVersion.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetMetrics: + + /* handler: uj.GetMetrics type=agent.Response_GetMetrics kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetMetrics = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetMetrics == nil { + uj.GetMetrics = new(Response_GetMetrics) + } + + err = uj.GetMetrics.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetLoggingLevel: + + /* handler: uj.GetLoggingLevel type=agent.Response_GetLoggingLevel kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetLoggingLevel = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetLoggingLevel == nil { + uj.GetLoggingLevel = new(Response_GetLoggingLevel) + } + + err = uj.GetLoggingLevel.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ListFiles: + + /* handler: uj.ListFiles type=agent.Response_ListFiles kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ListFiles = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ListFiles == nil { + uj.ListFiles = new(Response_ListFiles) + } + + err = uj.ListFiles.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ReadFile: + + /* handler: uj.ReadFile type=agent.Response_ReadFile kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ReadFile = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ReadFile == nil { + uj.ReadFile = new(Response_ReadFile) + } + + err = uj.ReadFile.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetState: + + /* handler: uj.GetState type=agent.Response_GetState kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetState = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetState == nil { + uj.GetState = new(Response_GetState) + } + + err = uj.GetState.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetContainers: + + /* handler: uj.GetContainers type=agent.Response_GetContainers kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetContainers = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetContainers == nil { + uj.GetContainers = new(Response_GetContainers) + } + + err = uj.GetContainers.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetFrameworks: + + /* handler: uj.GetFrameworks type=agent.Response_GetFrameworks kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetFrameworks = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetFrameworks == nil { + uj.GetFrameworks = new(Response_GetFrameworks) + } + + err = uj.GetFrameworks.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetExecutors: + + /* handler: uj.GetExecutors type=agent.Response_GetExecutors kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetExecutors = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetExecutors == nil { + uj.GetExecutors = new(Response_GetExecutors) + } + + err = uj.GetExecutors.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetOperations: + + /* handler: uj.GetOperations type=agent.Response_GetOperations kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetOperations = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetOperations == nil { + uj.GetOperations = new(Response_GetOperations) + } + + err = uj.GetOperations.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetTasks: + + /* handler: uj.GetTasks type=agent.Response_GetTasks kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetTasks = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetTasks == nil { + uj.GetTasks = new(Response_GetTasks) + } + + err = uj.GetTasks.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetAgent: + + /* handler: uj.GetAgent type=agent.Response_GetAgent kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetAgent = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetAgent == nil { + uj.GetAgent = new(Response_GetAgent) + } + + err = uj.GetAgent.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetResourceProviders: + + /* handler: uj.GetResourceProviders type=agent.Response_GetResourceProviders kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetResourceProviders = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetResourceProviders == nil { + uj.GetResourceProviders = new(Response_GetResourceProviders) + } + + err = uj.GetResourceProviders.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_WaitNestedContainer: + + /* handler: uj.WaitNestedContainer type=agent.Response_WaitNestedContainer kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.WaitNestedContainer = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.WaitNestedContainer == nil { + uj.WaitNestedContainer = new(Response_WaitNestedContainer) + } + + err = uj.WaitNestedContainer.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_WaitContainer: + + /* handler: uj.WaitContainer type=agent.Response_WaitContainer kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.WaitContainer = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.WaitContainer == nil { + uj.WaitContainer = new(Response_WaitContainer) + } + + err = uj.WaitContainer.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetAgent) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetAgent) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.AgentInfo != nil { + if true { + buf.WriteString(`"agent_info":`) + + { + + err = mj.AgentInfo.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetAgentbase = iota + ffj_t_Response_GetAgentno_such_key + + ffj_t_Response_GetAgent_AgentInfo +) + +var ffj_key_Response_GetAgent_AgentInfo = []byte("agent_info") + +func (uj *Response_GetAgent) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetAgent) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetAgentbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetAgentno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_Response_GetAgent_AgentInfo, kn) { + currentKey = ffj_t_Response_GetAgent_AgentInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_Response_GetAgent_AgentInfo, kn) { + currentKey = ffj_t_Response_GetAgent_AgentInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetAgentno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetAgent_AgentInfo: + goto handle_AgentInfo + + case ffj_t_Response_GetAgentno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_AgentInfo: + + /* handler: uj.AgentInfo type=mesos.AgentInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.AgentInfo = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.AgentInfo == nil { + uj.AgentInfo = new(mesos.AgentInfo) + } + + err = uj.AgentInfo.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetContainers) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetContainers) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"containers":`) + if mj.Containers != nil { + buf.WriteString(`[`) + for i, v := range mj.Containers { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetContainersbase = iota + ffj_t_Response_GetContainersno_such_key + + ffj_t_Response_GetContainers_Containers +) + +var ffj_key_Response_GetContainers_Containers = []byte("containers") + +func (uj *Response_GetContainers) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetContainers) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetContainersbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetContainersno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Response_GetContainers_Containers, kn) { + currentKey = ffj_t_Response_GetContainers_Containers + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_GetContainers_Containers, kn) { + currentKey = ffj_t_Response_GetContainers_Containers + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetContainersno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetContainers_Containers: + goto handle_Containers + + case ffj_t_Response_GetContainersno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Containers: + + /* handler: uj.Containers type=[]agent.Response_GetContainers_Container kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Containers = nil + } else { + + uj.Containers = []Response_GetContainers_Container{} + + wantVal := true + + for { + + var tmp_uj__Containers Response_GetContainers_Container + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Containers type=agent.Response_GetContainers_Container kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Containers.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Containers = append(uj.Containers, tmp_uj__Containers) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetContainers_Container) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetContainers_Container) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.FrameworkID != nil { + if true { + buf.WriteString(`"framework_id":`) + + { + + err = mj.FrameworkID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.ExecutorID != nil { + if true { + buf.WriteString(`"executor_id":`) + + { + + err = mj.ExecutorID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.ExecutorName != nil { + if true { + buf.WriteString(`"executor_name":`) + fflib.WriteJsonString(buf, string(*mj.ExecutorName)) + buf.WriteByte(',') + } + } + buf.WriteString(`"container_id":`) + + { + + err = mj.ContainerID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + if mj.ContainerStatus != nil { + if true { + buf.WriteString(`"container_status":`) + + { + + err = mj.ContainerStatus.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.ResourceStatistics != nil { + if true { + buf.WriteString(`"resource_statistics":`) + + { + + err = mj.ResourceStatistics.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetContainers_Containerbase = iota + ffj_t_Response_GetContainers_Containerno_such_key + + ffj_t_Response_GetContainers_Container_FrameworkID + + ffj_t_Response_GetContainers_Container_ExecutorID + + ffj_t_Response_GetContainers_Container_ExecutorName + + ffj_t_Response_GetContainers_Container_ContainerID + + ffj_t_Response_GetContainers_Container_ContainerStatus + + ffj_t_Response_GetContainers_Container_ResourceStatistics +) + +var ffj_key_Response_GetContainers_Container_FrameworkID = []byte("framework_id") + +var ffj_key_Response_GetContainers_Container_ExecutorID = []byte("executor_id") + +var ffj_key_Response_GetContainers_Container_ExecutorName = []byte("executor_name") + +var ffj_key_Response_GetContainers_Container_ContainerID = []byte("container_id") + +var ffj_key_Response_GetContainers_Container_ContainerStatus = []byte("container_status") + +var ffj_key_Response_GetContainers_Container_ResourceStatistics = []byte("resource_statistics") + +func (uj *Response_GetContainers_Container) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetContainers_Container) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetContainers_Containerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetContainers_Containerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Response_GetContainers_Container_ContainerID, kn) { + currentKey = ffj_t_Response_GetContainers_Container_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetContainers_Container_ContainerStatus, kn) { + currentKey = ffj_t_Response_GetContainers_Container_ContainerStatus + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_Response_GetContainers_Container_ExecutorID, kn) { + currentKey = ffj_t_Response_GetContainers_Container_ExecutorID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetContainers_Container_ExecutorName, kn) { + currentKey = ffj_t_Response_GetContainers_Container_ExecutorName + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffj_key_Response_GetContainers_Container_FrameworkID, kn) { + currentKey = ffj_t_Response_GetContainers_Container_FrameworkID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_Response_GetContainers_Container_ResourceStatistics, kn) { + currentKey = ffj_t_Response_GetContainers_Container_ResourceStatistics + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_GetContainers_Container_ResourceStatistics, kn) { + currentKey = ffj_t_Response_GetContainers_Container_ResourceStatistics + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetContainers_Container_ContainerStatus, kn) { + currentKey = ffj_t_Response_GetContainers_Container_ContainerStatus + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Response_GetContainers_Container_ContainerID, kn) { + currentKey = ffj_t_Response_GetContainers_Container_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Response_GetContainers_Container_ExecutorName, kn) { + currentKey = ffj_t_Response_GetContainers_Container_ExecutorName + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Response_GetContainers_Container_ExecutorID, kn) { + currentKey = ffj_t_Response_GetContainers_Container_ExecutorID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetContainers_Container_FrameworkID, kn) { + currentKey = ffj_t_Response_GetContainers_Container_FrameworkID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetContainers_Containerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetContainers_Container_FrameworkID: + goto handle_FrameworkID + + case ffj_t_Response_GetContainers_Container_ExecutorID: + goto handle_ExecutorID + + case ffj_t_Response_GetContainers_Container_ExecutorName: + goto handle_ExecutorName + + case ffj_t_Response_GetContainers_Container_ContainerID: + goto handle_ContainerID + + case ffj_t_Response_GetContainers_Container_ContainerStatus: + goto handle_ContainerStatus + + case ffj_t_Response_GetContainers_Container_ResourceStatistics: + goto handle_ResourceStatistics + + case ffj_t_Response_GetContainers_Containerno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_FrameworkID: + + /* handler: uj.FrameworkID type=mesos.FrameworkID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.FrameworkID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.FrameworkID == nil { + uj.FrameworkID = new(mesos.FrameworkID) + } + + err = uj.FrameworkID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ExecutorID: + + /* handler: uj.ExecutorID type=mesos.ExecutorID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ExecutorID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ExecutorID == nil { + uj.ExecutorID = new(mesos.ExecutorID) + } + + err = uj.ExecutorID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ExecutorName: + + /* handler: uj.ExecutorName type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.ExecutorName = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.ExecutorName = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ContainerID: + + /* handler: uj.ContainerID type=mesos.ContainerID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ContainerID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ContainerStatus: + + /* handler: uj.ContainerStatus type=mesos.ContainerStatus kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ContainerStatus = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ContainerStatus == nil { + uj.ContainerStatus = new(mesos.ContainerStatus) + } + + err = uj.ContainerStatus.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ResourceStatistics: + + /* handler: uj.ResourceStatistics type=mesos.ResourceStatistics kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ResourceStatistics = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ResourceStatistics == nil { + uj.ResourceStatistics = new(mesos.ResourceStatistics) + } + + err = uj.ResourceStatistics.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetExecutors) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetExecutors) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"executors":`) + if mj.Executors != nil { + buf.WriteString(`[`) + for i, v := range mj.Executors { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"completed_executors":`) + if mj.CompletedExecutors != nil { + buf.WriteString(`[`) + for i, v := range mj.CompletedExecutors { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetExecutorsbase = iota + ffj_t_Response_GetExecutorsno_such_key + + ffj_t_Response_GetExecutors_Executors + + ffj_t_Response_GetExecutors_CompletedExecutors +) + +var ffj_key_Response_GetExecutors_Executors = []byte("executors") + +var ffj_key_Response_GetExecutors_CompletedExecutors = []byte("completed_executors") + +func (uj *Response_GetExecutors) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetExecutors) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetExecutorsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetExecutorsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Response_GetExecutors_CompletedExecutors, kn) { + currentKey = ffj_t_Response_GetExecutors_CompletedExecutors + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_Response_GetExecutors_Executors, kn) { + currentKey = ffj_t_Response_GetExecutors_Executors + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_GetExecutors_CompletedExecutors, kn) { + currentKey = ffj_t_Response_GetExecutors_CompletedExecutors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetExecutors_Executors, kn) { + currentKey = ffj_t_Response_GetExecutors_Executors + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetExecutorsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetExecutors_Executors: + goto handle_Executors + + case ffj_t_Response_GetExecutors_CompletedExecutors: + goto handle_CompletedExecutors + + case ffj_t_Response_GetExecutorsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Executors: + + /* handler: uj.Executors type=[]agent.Response_GetExecutors_Executor kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Executors = nil + } else { + + uj.Executors = []Response_GetExecutors_Executor{} + + wantVal := true + + for { + + var tmp_uj__Executors Response_GetExecutors_Executor + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Executors type=agent.Response_GetExecutors_Executor kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Executors.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Executors = append(uj.Executors, tmp_uj__Executors) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CompletedExecutors: + + /* handler: uj.CompletedExecutors type=[]agent.Response_GetExecutors_Executor kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.CompletedExecutors = nil + } else { + + uj.CompletedExecutors = []Response_GetExecutors_Executor{} + + wantVal := true + + for { + + var tmp_uj__CompletedExecutors Response_GetExecutors_Executor + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__CompletedExecutors type=agent.Response_GetExecutors_Executor kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__CompletedExecutors.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.CompletedExecutors = append(uj.CompletedExecutors, tmp_uj__CompletedExecutors) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetExecutors_Executor) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetExecutors_Executor) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"executor_info":`) + + { + + err = mj.ExecutorInfo.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetExecutors_Executorbase = iota + ffj_t_Response_GetExecutors_Executorno_such_key + + ffj_t_Response_GetExecutors_Executor_ExecutorInfo +) + +var ffj_key_Response_GetExecutors_Executor_ExecutorInfo = []byte("executor_info") + +func (uj *Response_GetExecutors_Executor) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetExecutors_Executor) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetExecutors_Executorbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetExecutors_Executorno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'e': + + if bytes.Equal(ffj_key_Response_GetExecutors_Executor_ExecutorInfo, kn) { + currentKey = ffj_t_Response_GetExecutors_Executor_ExecutorInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_Response_GetExecutors_Executor_ExecutorInfo, kn) { + currentKey = ffj_t_Response_GetExecutors_Executor_ExecutorInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetExecutors_Executorno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetExecutors_Executor_ExecutorInfo: + goto handle_ExecutorInfo + + case ffj_t_Response_GetExecutors_Executorno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ExecutorInfo: + + /* handler: uj.ExecutorInfo type=mesos.ExecutorInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ExecutorInfo.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetFlags) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetFlags) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"flags":`) + if mj.Flags != nil { + buf.WriteString(`[`) + for i, v := range mj.Flags { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetFlagsbase = iota + ffj_t_Response_GetFlagsno_such_key + + ffj_t_Response_GetFlags_Flags +) + +var ffj_key_Response_GetFlags_Flags = []byte("flags") + +func (uj *Response_GetFlags) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetFlags) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetFlagsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetFlagsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'f': + + if bytes.Equal(ffj_key_Response_GetFlags_Flags, kn) { + currentKey = ffj_t_Response_GetFlags_Flags + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_GetFlags_Flags, kn) { + currentKey = ffj_t_Response_GetFlags_Flags + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetFlagsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetFlags_Flags: + goto handle_Flags + + case ffj_t_Response_GetFlagsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Flags: + + /* handler: uj.Flags type=[]mesos.Flag kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Flags = nil + } else { + + uj.Flags = []mesos.Flag{} + + wantVal := true + + for { + + var tmp_uj__Flags mesos.Flag + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Flags type=mesos.Flag kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Flags.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Flags = append(uj.Flags, tmp_uj__Flags) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetFrameworks) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetFrameworks) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"frameworks":`) + if mj.Frameworks != nil { + buf.WriteString(`[`) + for i, v := range mj.Frameworks { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"completed_frameworks":`) + if mj.CompletedFrameworks != nil { + buf.WriteString(`[`) + for i, v := range mj.CompletedFrameworks { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetFrameworksbase = iota + ffj_t_Response_GetFrameworksno_such_key + + ffj_t_Response_GetFrameworks_Frameworks + + ffj_t_Response_GetFrameworks_CompletedFrameworks +) + +var ffj_key_Response_GetFrameworks_Frameworks = []byte("frameworks") + +var ffj_key_Response_GetFrameworks_CompletedFrameworks = []byte("completed_frameworks") + +func (uj *Response_GetFrameworks) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetFrameworks) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetFrameworksbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetFrameworksno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Response_GetFrameworks_CompletedFrameworks, kn) { + currentKey = ffj_t_Response_GetFrameworks_CompletedFrameworks + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffj_key_Response_GetFrameworks_Frameworks, kn) { + currentKey = ffj_t_Response_GetFrameworks_Frameworks + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_GetFrameworks_CompletedFrameworks, kn) { + currentKey = ffj_t_Response_GetFrameworks_CompletedFrameworks + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetFrameworks_Frameworks, kn) { + currentKey = ffj_t_Response_GetFrameworks_Frameworks + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetFrameworksno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetFrameworks_Frameworks: + goto handle_Frameworks + + case ffj_t_Response_GetFrameworks_CompletedFrameworks: + goto handle_CompletedFrameworks + + case ffj_t_Response_GetFrameworksno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Frameworks: + + /* handler: uj.Frameworks type=[]agent.Response_GetFrameworks_Framework kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Frameworks = nil + } else { + + uj.Frameworks = []Response_GetFrameworks_Framework{} + + wantVal := true + + for { + + var tmp_uj__Frameworks Response_GetFrameworks_Framework + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Frameworks type=agent.Response_GetFrameworks_Framework kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Frameworks.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Frameworks = append(uj.Frameworks, tmp_uj__Frameworks) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CompletedFrameworks: + + /* handler: uj.CompletedFrameworks type=[]agent.Response_GetFrameworks_Framework kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.CompletedFrameworks = nil + } else { + + uj.CompletedFrameworks = []Response_GetFrameworks_Framework{} + + wantVal := true + + for { + + var tmp_uj__CompletedFrameworks Response_GetFrameworks_Framework + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__CompletedFrameworks type=agent.Response_GetFrameworks_Framework kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__CompletedFrameworks.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.CompletedFrameworks = append(uj.CompletedFrameworks, tmp_uj__CompletedFrameworks) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetFrameworks_Framework) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetFrameworks_Framework) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"framework_info":`) + + { + + err = mj.FrameworkInfo.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetFrameworks_Frameworkbase = iota + ffj_t_Response_GetFrameworks_Frameworkno_such_key + + ffj_t_Response_GetFrameworks_Framework_FrameworkInfo +) + +var ffj_key_Response_GetFrameworks_Framework_FrameworkInfo = []byte("framework_info") + +func (uj *Response_GetFrameworks_Framework) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetFrameworks_Framework) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetFrameworks_Frameworkbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetFrameworks_Frameworkno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'f': + + if bytes.Equal(ffj_key_Response_GetFrameworks_Framework_FrameworkInfo, kn) { + currentKey = ffj_t_Response_GetFrameworks_Framework_FrameworkInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_GetFrameworks_Framework_FrameworkInfo, kn) { + currentKey = ffj_t_Response_GetFrameworks_Framework_FrameworkInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetFrameworks_Frameworkno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetFrameworks_Framework_FrameworkInfo: + goto handle_FrameworkInfo + + case ffj_t_Response_GetFrameworks_Frameworkno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_FrameworkInfo: + + /* handler: uj.FrameworkInfo type=mesos.FrameworkInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.FrameworkInfo.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetHealth) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetHealth) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + if mj.Healthy { + buf.WriteString(`{"healthy":true`) + } else { + buf.WriteString(`{"healthy":false`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetHealthbase = iota + ffj_t_Response_GetHealthno_such_key + + ffj_t_Response_GetHealth_Healthy +) + +var ffj_key_Response_GetHealth_Healthy = []byte("healthy") + +func (uj *Response_GetHealth) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetHealth) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetHealthbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetHealthno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'h': + + if bytes.Equal(ffj_key_Response_GetHealth_Healthy, kn) { + currentKey = ffj_t_Response_GetHealth_Healthy + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Response_GetHealth_Healthy, kn) { + currentKey = ffj_t_Response_GetHealth_Healthy + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetHealthno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetHealth_Healthy: + goto handle_Healthy + + case ffj_t_Response_GetHealthno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Healthy: + + /* handler: uj.Healthy type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + uj.Healthy = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + uj.Healthy = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetLoggingLevel) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetLoggingLevel) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"level":`) + fflib.FormatBits2(buf, uint64(mj.Level), 10, false) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetLoggingLevelbase = iota + ffj_t_Response_GetLoggingLevelno_such_key + + ffj_t_Response_GetLoggingLevel_Level +) + +var ffj_key_Response_GetLoggingLevel_Level = []byte("level") + +func (uj *Response_GetLoggingLevel) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetLoggingLevel) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetLoggingLevelbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetLoggingLevelno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'l': + + if bytes.Equal(ffj_key_Response_GetLoggingLevel_Level, kn) { + currentKey = ffj_t_Response_GetLoggingLevel_Level + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Response_GetLoggingLevel_Level, kn) { + currentKey = ffj_t_Response_GetLoggingLevel_Level + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetLoggingLevelno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetLoggingLevel_Level: + goto handle_Level + + case ffj_t_Response_GetLoggingLevelno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Level: + + /* handler: uj.Level type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Level = uint32(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetMetrics) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetMetrics) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"metrics":`) + if mj.Metrics != nil { + buf.WriteString(`[`) + for i, v := range mj.Metrics { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetMetricsbase = iota + ffj_t_Response_GetMetricsno_such_key + + ffj_t_Response_GetMetrics_Metrics +) + +var ffj_key_Response_GetMetrics_Metrics = []byte("metrics") + +func (uj *Response_GetMetrics) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetMetrics) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetMetricsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetMetricsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'm': + + if bytes.Equal(ffj_key_Response_GetMetrics_Metrics, kn) { + currentKey = ffj_t_Response_GetMetrics_Metrics + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_GetMetrics_Metrics, kn) { + currentKey = ffj_t_Response_GetMetrics_Metrics + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetMetricsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetMetrics_Metrics: + goto handle_Metrics + + case ffj_t_Response_GetMetricsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Metrics: + + /* handler: uj.Metrics type=[]mesos.Metric kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Metrics = nil + } else { + + uj.Metrics = []mesos.Metric{} + + wantVal := true + + for { + + var tmp_uj__Metrics mesos.Metric + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Metrics type=mesos.Metric kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Metrics.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Metrics = append(uj.Metrics, tmp_uj__Metrics) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetOperations) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetOperations) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"operations":`) + if mj.Operations != nil { + buf.WriteString(`[`) + for i, v := range mj.Operations { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetOperationsbase = iota + ffj_t_Response_GetOperationsno_such_key + + ffj_t_Response_GetOperations_Operations +) + +var ffj_key_Response_GetOperations_Operations = []byte("operations") + +func (uj *Response_GetOperations) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetOperations) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetOperationsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetOperationsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'o': + + if bytes.Equal(ffj_key_Response_GetOperations_Operations, kn) { + currentKey = ffj_t_Response_GetOperations_Operations + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_GetOperations_Operations, kn) { + currentKey = ffj_t_Response_GetOperations_Operations + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetOperationsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetOperations_Operations: + goto handle_Operations + + case ffj_t_Response_GetOperationsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Operations: + + /* handler: uj.Operations type=[]mesos.Operation kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Operations = nil + } else { + + uj.Operations = []mesos.Operation{} + + wantVal := true + + for { + + var tmp_uj__Operations mesos.Operation + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Operations type=mesos.Operation kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Operations.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Operations = append(uj.Operations, tmp_uj__Operations) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetResourceProviders) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetResourceProviders) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"resource_providers":`) + if mj.ResourceProviders != nil { + buf.WriteString(`[`) + for i, v := range mj.ResourceProviders { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetResourceProvidersbase = iota + ffj_t_Response_GetResourceProvidersno_such_key + + ffj_t_Response_GetResourceProviders_ResourceProviders +) + +var ffj_key_Response_GetResourceProviders_ResourceProviders = []byte("resource_providers") + +func (uj *Response_GetResourceProviders) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetResourceProviders) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetResourceProvidersbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetResourceProvidersno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'r': + + if bytes.Equal(ffj_key_Response_GetResourceProviders_ResourceProviders, kn) { + currentKey = ffj_t_Response_GetResourceProviders_ResourceProviders + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_GetResourceProviders_ResourceProviders, kn) { + currentKey = ffj_t_Response_GetResourceProviders_ResourceProviders + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetResourceProvidersno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetResourceProviders_ResourceProviders: + goto handle_ResourceProviders + + case ffj_t_Response_GetResourceProvidersno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ResourceProviders: + + /* handler: uj.ResourceProviders type=[]agent.Response_GetResourceProviders_ResourceProvider kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.ResourceProviders = nil + } else { + + uj.ResourceProviders = []Response_GetResourceProviders_ResourceProvider{} + + wantVal := true + + for { + + var tmp_uj__ResourceProviders Response_GetResourceProviders_ResourceProvider + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__ResourceProviders type=agent.Response_GetResourceProviders_ResourceProvider kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__ResourceProviders.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.ResourceProviders = append(uj.ResourceProviders, tmp_uj__ResourceProviders) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetResourceProviders_ResourceProvider) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetResourceProviders_ResourceProvider) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"resource_provider_info":`) + + { + + err = mj.ResourceProviderInfo.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"total_resources":`) + if mj.TotalResources != nil { + buf.WriteString(`[`) + for i, v := range mj.TotalResources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetResourceProviders_ResourceProviderbase = iota + ffj_t_Response_GetResourceProviders_ResourceProviderno_such_key + + ffj_t_Response_GetResourceProviders_ResourceProvider_ResourceProviderInfo + + ffj_t_Response_GetResourceProviders_ResourceProvider_TotalResources +) + +var ffj_key_Response_GetResourceProviders_ResourceProvider_ResourceProviderInfo = []byte("resource_provider_info") + +var ffj_key_Response_GetResourceProviders_ResourceProvider_TotalResources = []byte("total_resources") + +func (uj *Response_GetResourceProviders_ResourceProvider) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetResourceProviders_ResourceProvider) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetResourceProviders_ResourceProviderbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetResourceProviders_ResourceProviderno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'r': + + if bytes.Equal(ffj_key_Response_GetResourceProviders_ResourceProvider_ResourceProviderInfo, kn) { + currentKey = ffj_t_Response_GetResourceProviders_ResourceProvider_ResourceProviderInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Response_GetResourceProviders_ResourceProvider_TotalResources, kn) { + currentKey = ffj_t_Response_GetResourceProviders_ResourceProvider_TotalResources + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_GetResourceProviders_ResourceProvider_TotalResources, kn) { + currentKey = ffj_t_Response_GetResourceProviders_ResourceProvider_TotalResources + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetResourceProviders_ResourceProvider_ResourceProviderInfo, kn) { + currentKey = ffj_t_Response_GetResourceProviders_ResourceProvider_ResourceProviderInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetResourceProviders_ResourceProviderno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetResourceProviders_ResourceProvider_ResourceProviderInfo: + goto handle_ResourceProviderInfo + + case ffj_t_Response_GetResourceProviders_ResourceProvider_TotalResources: + goto handle_TotalResources + + case ffj_t_Response_GetResourceProviders_ResourceProviderno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ResourceProviderInfo: + + /* handler: uj.ResourceProviderInfo type=mesos.ResourceProviderInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ResourceProviderInfo.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TotalResources: + + /* handler: uj.TotalResources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.TotalResources = nil + } else { + + uj.TotalResources = []mesos.Resource{} + + wantVal := true + + for { + + var tmp_uj__TotalResources mesos.Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__TotalResources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__TotalResources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.TotalResources = append(uj.TotalResources, tmp_uj__TotalResources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetState) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetState) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.GetTasks != nil { + if true { + buf.WriteString(`"get_tasks":`) + + { + + err = mj.GetTasks.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GetExecutors != nil { + if true { + buf.WriteString(`"get_executors":`) + + { + + err = mj.GetExecutors.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GetFrameworks != nil { + if true { + buf.WriteString(`"get_frameworks":`) + + { + + err = mj.GetFrameworks.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetStatebase = iota + ffj_t_Response_GetStateno_such_key + + ffj_t_Response_GetState_GetTasks + + ffj_t_Response_GetState_GetExecutors + + ffj_t_Response_GetState_GetFrameworks +) + +var ffj_key_Response_GetState_GetTasks = []byte("get_tasks") + +var ffj_key_Response_GetState_GetExecutors = []byte("get_executors") + +var ffj_key_Response_GetState_GetFrameworks = []byte("get_frameworks") + +func (uj *Response_GetState) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetState) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetStatebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetStateno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'g': + + if bytes.Equal(ffj_key_Response_GetState_GetTasks, kn) { + currentKey = ffj_t_Response_GetState_GetTasks + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetState_GetExecutors, kn) { + currentKey = ffj_t_Response_GetState_GetExecutors + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Response_GetState_GetFrameworks, kn) { + currentKey = ffj_t_Response_GetState_GetFrameworks + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_GetState_GetFrameworks, kn) { + currentKey = ffj_t_Response_GetState_GetFrameworks + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetState_GetExecutors, kn) { + currentKey = ffj_t_Response_GetState_GetExecutors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetState_GetTasks, kn) { + currentKey = ffj_t_Response_GetState_GetTasks + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetStateno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetState_GetTasks: + goto handle_GetTasks + + case ffj_t_Response_GetState_GetExecutors: + goto handle_GetExecutors + + case ffj_t_Response_GetState_GetFrameworks: + goto handle_GetFrameworks + + case ffj_t_Response_GetStateno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_GetTasks: + + /* handler: uj.GetTasks type=agent.Response_GetTasks kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetTasks = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetTasks == nil { + uj.GetTasks = new(Response_GetTasks) + } + + err = uj.GetTasks.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetExecutors: + + /* handler: uj.GetExecutors type=agent.Response_GetExecutors kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetExecutors = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetExecutors == nil { + uj.GetExecutors = new(Response_GetExecutors) + } + + err = uj.GetExecutors.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GetFrameworks: + + /* handler: uj.GetFrameworks type=agent.Response_GetFrameworks kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GetFrameworks = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GetFrameworks == nil { + uj.GetFrameworks = new(Response_GetFrameworks) + } + + err = uj.GetFrameworks.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetTasks) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetTasks) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"pending_tasks":`) + if mj.PendingTasks != nil { + buf.WriteString(`[`) + for i, v := range mj.PendingTasks { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"queued_tasks":`) + if mj.QueuedTasks != nil { + buf.WriteString(`[`) + for i, v := range mj.QueuedTasks { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"launched_tasks":`) + if mj.LaunchedTasks != nil { + buf.WriteString(`[`) + for i, v := range mj.LaunchedTasks { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"terminated_tasks":`) + if mj.TerminatedTasks != nil { + buf.WriteString(`[`) + for i, v := range mj.TerminatedTasks { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"completed_tasks":`) + if mj.CompletedTasks != nil { + buf.WriteString(`[`) + for i, v := range mj.CompletedTasks { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetTasksbase = iota + ffj_t_Response_GetTasksno_such_key + + ffj_t_Response_GetTasks_PendingTasks + + ffj_t_Response_GetTasks_QueuedTasks + + ffj_t_Response_GetTasks_LaunchedTasks + + ffj_t_Response_GetTasks_TerminatedTasks + + ffj_t_Response_GetTasks_CompletedTasks +) + +var ffj_key_Response_GetTasks_PendingTasks = []byte("pending_tasks") + +var ffj_key_Response_GetTasks_QueuedTasks = []byte("queued_tasks") + +var ffj_key_Response_GetTasks_LaunchedTasks = []byte("launched_tasks") + +var ffj_key_Response_GetTasks_TerminatedTasks = []byte("terminated_tasks") + +var ffj_key_Response_GetTasks_CompletedTasks = []byte("completed_tasks") + +func (uj *Response_GetTasks) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetTasks) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetTasksbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetTasksno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Response_GetTasks_CompletedTasks, kn) { + currentKey = ffj_t_Response_GetTasks_CompletedTasks + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_Response_GetTasks_LaunchedTasks, kn) { + currentKey = ffj_t_Response_GetTasks_LaunchedTasks + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_Response_GetTasks_PendingTasks, kn) { + currentKey = ffj_t_Response_GetTasks_PendingTasks + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'q': + + if bytes.Equal(ffj_key_Response_GetTasks_QueuedTasks, kn) { + currentKey = ffj_t_Response_GetTasks_QueuedTasks + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Response_GetTasks_TerminatedTasks, kn) { + currentKey = ffj_t_Response_GetTasks_TerminatedTasks + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_GetTasks_CompletedTasks, kn) { + currentKey = ffj_t_Response_GetTasks_CompletedTasks + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetTasks_TerminatedTasks, kn) { + currentKey = ffj_t_Response_GetTasks_TerminatedTasks + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetTasks_LaunchedTasks, kn) { + currentKey = ffj_t_Response_GetTasks_LaunchedTasks + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetTasks_QueuedTasks, kn) { + currentKey = ffj_t_Response_GetTasks_QueuedTasks + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_GetTasks_PendingTasks, kn) { + currentKey = ffj_t_Response_GetTasks_PendingTasks + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetTasksno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetTasks_PendingTasks: + goto handle_PendingTasks + + case ffj_t_Response_GetTasks_QueuedTasks: + goto handle_QueuedTasks + + case ffj_t_Response_GetTasks_LaunchedTasks: + goto handle_LaunchedTasks + + case ffj_t_Response_GetTasks_TerminatedTasks: + goto handle_TerminatedTasks + + case ffj_t_Response_GetTasks_CompletedTasks: + goto handle_CompletedTasks + + case ffj_t_Response_GetTasksno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_PendingTasks: + + /* handler: uj.PendingTasks type=[]mesos.Task kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.PendingTasks = nil + } else { + + uj.PendingTasks = []mesos.Task{} + + wantVal := true + + for { + + var tmp_uj__PendingTasks mesos.Task + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__PendingTasks type=mesos.Task kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__PendingTasks.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.PendingTasks = append(uj.PendingTasks, tmp_uj__PendingTasks) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_QueuedTasks: + + /* handler: uj.QueuedTasks type=[]mesos.Task kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.QueuedTasks = nil + } else { + + uj.QueuedTasks = []mesos.Task{} + + wantVal := true + + for { + + var tmp_uj__QueuedTasks mesos.Task + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__QueuedTasks type=mesos.Task kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__QueuedTasks.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.QueuedTasks = append(uj.QueuedTasks, tmp_uj__QueuedTasks) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LaunchedTasks: + + /* handler: uj.LaunchedTasks type=[]mesos.Task kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.LaunchedTasks = nil + } else { + + uj.LaunchedTasks = []mesos.Task{} + + wantVal := true + + for { + + var tmp_uj__LaunchedTasks mesos.Task + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__LaunchedTasks type=mesos.Task kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__LaunchedTasks.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.LaunchedTasks = append(uj.LaunchedTasks, tmp_uj__LaunchedTasks) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TerminatedTasks: + + /* handler: uj.TerminatedTasks type=[]mesos.Task kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.TerminatedTasks = nil + } else { + + uj.TerminatedTasks = []mesos.Task{} + + wantVal := true + + for { + + var tmp_uj__TerminatedTasks mesos.Task + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__TerminatedTasks type=mesos.Task kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__TerminatedTasks.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.TerminatedTasks = append(uj.TerminatedTasks, tmp_uj__TerminatedTasks) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CompletedTasks: + + /* handler: uj.CompletedTasks type=[]mesos.Task kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.CompletedTasks = nil + } else { + + uj.CompletedTasks = []mesos.Task{} + + wantVal := true + + for { + + var tmp_uj__CompletedTasks mesos.Task + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__CompletedTasks type=mesos.Task kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__CompletedTasks.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.CompletedTasks = append(uj.CompletedTasks, tmp_uj__CompletedTasks) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_GetVersion) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_GetVersion) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"version_info":`) + + { + + err = mj.VersionInfo.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_GetVersionbase = iota + ffj_t_Response_GetVersionno_such_key + + ffj_t_Response_GetVersion_VersionInfo +) + +var ffj_key_Response_GetVersion_VersionInfo = []byte("version_info") + +func (uj *Response_GetVersion) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_GetVersion) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_GetVersionbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_GetVersionno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'v': + + if bytes.Equal(ffj_key_Response_GetVersion_VersionInfo, kn) { + currentKey = ffj_t_Response_GetVersion_VersionInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_GetVersion_VersionInfo, kn) { + currentKey = ffj_t_Response_GetVersion_VersionInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_GetVersionno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_GetVersion_VersionInfo: + goto handle_VersionInfo + + case ffj_t_Response_GetVersionno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_VersionInfo: + + /* handler: uj.VersionInfo type=mesos.VersionInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.VersionInfo.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_ListFiles) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_ListFiles) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"file_infos":`) + if mj.FileInfos != nil { + buf.WriteString(`[`) + for i, v := range mj.FileInfos { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_ListFilesbase = iota + ffj_t_Response_ListFilesno_such_key + + ffj_t_Response_ListFiles_FileInfos +) + +var ffj_key_Response_ListFiles_FileInfos = []byte("file_infos") + +func (uj *Response_ListFiles) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_ListFiles) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_ListFilesbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_ListFilesno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'f': + + if bytes.Equal(ffj_key_Response_ListFiles_FileInfos, kn) { + currentKey = ffj_t_Response_ListFiles_FileInfos + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_ListFiles_FileInfos, kn) { + currentKey = ffj_t_Response_ListFiles_FileInfos + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_ListFilesno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_ListFiles_FileInfos: + goto handle_FileInfos + + case ffj_t_Response_ListFilesno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_FileInfos: + + /* handler: uj.FileInfos type=[]mesos.FileInfo kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.FileInfos = nil + } else { + + uj.FileInfos = []mesos.FileInfo{} + + wantVal := true + + for { + + var tmp_uj__FileInfos mesos.FileInfo + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__FileInfos type=mesos.FileInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__FileInfos.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.FileInfos = append(uj.FileInfos, tmp_uj__FileInfos) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_ReadFile) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_ReadFile) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "size":`) + fflib.FormatBits2(buf, uint64(mj.Size), 10, false) + buf.WriteByte(',') + if len(mj.Data) != 0 { + buf.WriteString(`"data":`) + if mj.Data != nil { + buf.WriteString(`"`) + { + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(reflect.Indirect(reflect.ValueOf(mj.Data)).Bytes()) + enc.Close() + } + buf.WriteString(`"`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_ReadFilebase = iota + ffj_t_Response_ReadFileno_such_key + + ffj_t_Response_ReadFile_Size + + ffj_t_Response_ReadFile_Data +) + +var ffj_key_Response_ReadFile_Size = []byte("size") + +var ffj_key_Response_ReadFile_Data = []byte("data") + +func (uj *Response_ReadFile) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_ReadFile) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_ReadFilebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_ReadFileno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'd': + + if bytes.Equal(ffj_key_Response_ReadFile_Data, kn) { + currentKey = ffj_t_Response_ReadFile_Data + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Response_ReadFile_Size, kn) { + currentKey = ffj_t_Response_ReadFile_Size + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Response_ReadFile_Data, kn) { + currentKey = ffj_t_Response_ReadFile_Data + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_ReadFile_Size, kn) { + currentKey = ffj_t_Response_ReadFile_Size + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_ReadFileno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_ReadFile_Size: + goto handle_Size + + case ffj_t_Response_ReadFile_Data: + goto handle_Data + + case ffj_t_Response_ReadFileno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Size: + + /* handler: uj.Size type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Size = uint64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Data: + + /* handler: uj.Data type=[]uint8 kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Data = nil + } else { + b := make([]byte, base64.StdEncoding.DecodedLen(fs.Output.Len())) + n, err := base64.StdEncoding.Decode(b, fs.Output.Bytes()) + if err != nil { + return fs.WrapErr(err) + } + + v := reflect.ValueOf(&uj.Data).Elem() + v.SetBytes(b[0:n]) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_WaitContainer) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_WaitContainer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.ExitStatus != nil { + if true { + buf.WriteString(`"exit_status":`) + fflib.FormatBits2(buf, uint64(*mj.ExitStatus), 10, *mj.ExitStatus < 0) + buf.WriteByte(',') + } + } + if mj.State != nil { + if true { + buf.WriteString(`"state":`) + + { + + obj, err = mj.State.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.Reason != nil { + if true { + buf.WriteString(`"reason":`) + + { + + obj, err = mj.Reason.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.Limitation != nil { + if true { + buf.WriteString(`"limitation":`) + + { + + err = mj.Limitation.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Message != nil { + if true { + buf.WriteString(`"message":`) + fflib.WriteJsonString(buf, string(*mj.Message)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_WaitContainerbase = iota + ffj_t_Response_WaitContainerno_such_key + + ffj_t_Response_WaitContainer_ExitStatus + + ffj_t_Response_WaitContainer_State + + ffj_t_Response_WaitContainer_Reason + + ffj_t_Response_WaitContainer_Limitation + + ffj_t_Response_WaitContainer_Message +) + +var ffj_key_Response_WaitContainer_ExitStatus = []byte("exit_status") + +var ffj_key_Response_WaitContainer_State = []byte("state") + +var ffj_key_Response_WaitContainer_Reason = []byte("reason") + +var ffj_key_Response_WaitContainer_Limitation = []byte("limitation") + +var ffj_key_Response_WaitContainer_Message = []byte("message") + +func (uj *Response_WaitContainer) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_WaitContainer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_WaitContainerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_WaitContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'e': + + if bytes.Equal(ffj_key_Response_WaitContainer_ExitStatus, kn) { + currentKey = ffj_t_Response_WaitContainer_ExitStatus + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_Response_WaitContainer_Limitation, kn) { + currentKey = ffj_t_Response_WaitContainer_Limitation + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffj_key_Response_WaitContainer_Message, kn) { + currentKey = ffj_t_Response_WaitContainer_Message + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_Response_WaitContainer_Reason, kn) { + currentKey = ffj_t_Response_WaitContainer_Reason + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Response_WaitContainer_State, kn) { + currentKey = ffj_t_Response_WaitContainer_State + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_WaitContainer_Message, kn) { + currentKey = ffj_t_Response_WaitContainer_Message + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Response_WaitContainer_Limitation, kn) { + currentKey = ffj_t_Response_WaitContainer_Limitation + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_WaitContainer_Reason, kn) { + currentKey = ffj_t_Response_WaitContainer_Reason + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_WaitContainer_State, kn) { + currentKey = ffj_t_Response_WaitContainer_State + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_WaitContainer_ExitStatus, kn) { + currentKey = ffj_t_Response_WaitContainer_ExitStatus + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_WaitContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_WaitContainer_ExitStatus: + goto handle_ExitStatus + + case ffj_t_Response_WaitContainer_State: + goto handle_State + + case ffj_t_Response_WaitContainer_Reason: + goto handle_Reason + + case ffj_t_Response_WaitContainer_Limitation: + goto handle_Limitation + + case ffj_t_Response_WaitContainer_Message: + goto handle_Message + + case ffj_t_Response_WaitContainerno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ExitStatus: + + /* handler: uj.ExitStatus type=int32 kind=int32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.ExitStatus = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int32(tval) + uj.ExitStatus = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_State: + + /* handler: uj.State type=mesos.TaskState kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.State = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.State == nil { + uj.State = new(mesos.TaskState) + } + + err = uj.State.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Reason: + + /* handler: uj.Reason type=mesos.TaskStatus_Reason kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Reason = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Reason == nil { + uj.Reason = new(mesos.TaskStatus_Reason) + } + + err = uj.Reason.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Limitation: + + /* handler: uj.Limitation type=mesos.TaskResourceLimitation kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Limitation = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Limitation == nil { + uj.Limitation = new(mesos.TaskResourceLimitation) + } + + err = uj.Limitation.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Message: + + /* handler: uj.Message type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Message = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Message = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Response_WaitNestedContainer) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Response_WaitNestedContainer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.ExitStatus != nil { + if true { + buf.WriteString(`"exit_status":`) + fflib.FormatBits2(buf, uint64(*mj.ExitStatus), 10, *mj.ExitStatus < 0) + buf.WriteByte(',') + } + } + if mj.State != nil { + if true { + buf.WriteString(`"state":`) + + { + + obj, err = mj.State.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.Reason != nil { + if true { + buf.WriteString(`"reason":`) + + { + + obj, err = mj.Reason.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.Limitation != nil { + if true { + buf.WriteString(`"limitation":`) + + { + + err = mj.Limitation.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Message != nil { + if true { + buf.WriteString(`"message":`) + fflib.WriteJsonString(buf, string(*mj.Message)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Response_WaitNestedContainerbase = iota + ffj_t_Response_WaitNestedContainerno_such_key + + ffj_t_Response_WaitNestedContainer_ExitStatus + + ffj_t_Response_WaitNestedContainer_State + + ffj_t_Response_WaitNestedContainer_Reason + + ffj_t_Response_WaitNestedContainer_Limitation + + ffj_t_Response_WaitNestedContainer_Message +) + +var ffj_key_Response_WaitNestedContainer_ExitStatus = []byte("exit_status") + +var ffj_key_Response_WaitNestedContainer_State = []byte("state") + +var ffj_key_Response_WaitNestedContainer_Reason = []byte("reason") + +var ffj_key_Response_WaitNestedContainer_Limitation = []byte("limitation") + +var ffj_key_Response_WaitNestedContainer_Message = []byte("message") + +func (uj *Response_WaitNestedContainer) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Response_WaitNestedContainer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Response_WaitNestedContainerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Response_WaitNestedContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'e': + + if bytes.Equal(ffj_key_Response_WaitNestedContainer_ExitStatus, kn) { + currentKey = ffj_t_Response_WaitNestedContainer_ExitStatus + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_Response_WaitNestedContainer_Limitation, kn) { + currentKey = ffj_t_Response_WaitNestedContainer_Limitation + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffj_key_Response_WaitNestedContainer_Message, kn) { + currentKey = ffj_t_Response_WaitNestedContainer_Message + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_Response_WaitNestedContainer_Reason, kn) { + currentKey = ffj_t_Response_WaitNestedContainer_Reason + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Response_WaitNestedContainer_State, kn) { + currentKey = ffj_t_Response_WaitNestedContainer_State + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Response_WaitNestedContainer_Message, kn) { + currentKey = ffj_t_Response_WaitNestedContainer_Message + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Response_WaitNestedContainer_Limitation, kn) { + currentKey = ffj_t_Response_WaitNestedContainer_Limitation + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_WaitNestedContainer_Reason, kn) { + currentKey = ffj_t_Response_WaitNestedContainer_Reason + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_WaitNestedContainer_State, kn) { + currentKey = ffj_t_Response_WaitNestedContainer_State + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Response_WaitNestedContainer_ExitStatus, kn) { + currentKey = ffj_t_Response_WaitNestedContainer_ExitStatus + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Response_WaitNestedContainerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Response_WaitNestedContainer_ExitStatus: + goto handle_ExitStatus + + case ffj_t_Response_WaitNestedContainer_State: + goto handle_State + + case ffj_t_Response_WaitNestedContainer_Reason: + goto handle_Reason + + case ffj_t_Response_WaitNestedContainer_Limitation: + goto handle_Limitation + + case ffj_t_Response_WaitNestedContainer_Message: + goto handle_Message + + case ffj_t_Response_WaitNestedContainerno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ExitStatus: + + /* handler: uj.ExitStatus type=int32 kind=int32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.ExitStatus = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int32(tval) + uj.ExitStatus = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_State: + + /* handler: uj.State type=mesos.TaskState kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.State = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.State == nil { + uj.State = new(mesos.TaskState) + } + + err = uj.State.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Reason: + + /* handler: uj.Reason type=mesos.TaskStatus_Reason kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Reason = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Reason == nil { + uj.Reason = new(mesos.TaskStatus_Reason) + } + + err = uj.Reason.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Limitation: + + /* handler: uj.Limitation type=mesos.TaskResourceLimitation kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Limitation = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Limitation == nil { + uj.Limitation = new(mesos.TaskResourceLimitation) + } + + err = uj.Limitation.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Message: + + /* handler: uj.Message type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Message = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Message = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/agent.proto b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/agent.proto new file mode 100644 index 000000000000..f1ec61802e62 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/agent.proto @@ -0,0 +1,706 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package mesos.agent; + +import "github.com/mesos/mesos-go/api/v1/lib/mesos.proto"; +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option go_package = "agent"; +option (gogoproto.benchgen_all) = true; +option (gogoproto.enum_stringer_all) = true; +option (gogoproto.equal_all) = true; +option (gogoproto.goproto_enum_prefix_all) = false; +option (gogoproto.goproto_enum_stringer_all) = false; +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.goproto_unrecognized_all) = false; +option (gogoproto.gostring_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.populate_all) = true; +option (gogoproto.protosizer_all) = true; +option (gogoproto.stringer_all) = true; +option (gogoproto.testgen_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.verbose_equal_all) = true; + +/** + * Calls that can be sent to the v1 agent API. + * + * A call is described using the standard protocol buffer "union" + * trick, see + * https://developers.google.com/protocol-buffers/docs/techniques#union. + */ +message Call { + // If a call of type `Call::FOO` requires additional parameters they can be + // included in the corresponding `Call::Foo` message. Similarly, if a call + // receives a synchronous response it will be returned as a `Response` + // message of type `Response::FOO`; see `Call::LaunchNestedContainerSession` + // and `Call::AttachContainerOutput` for exceptions. + enum Type { + UNKNOWN = 0; + + GET_HEALTH = 1; // Retrieves the agent's health status. + GET_FLAGS = 2; // Retrieves the agent's flag configuration. + GET_VERSION = 3; // Retrieves the agent's version information. + GET_METRICS = 4; // See 'GetMetrics' below. + + GET_LOGGING_LEVEL = 5; // Retrieves the agent's logging level. + SET_LOGGING_LEVEL = 6; // See 'SetLoggingLevel' below. + + LIST_FILES = 7; + READ_FILE = 8; // See 'ReadFile' below. + + GET_STATE = 9; + + GET_CONTAINERS = 10; + + // Retrieves the information about known frameworks. + GET_FRAMEWORKS = 11; + + // Retrieves the information about known executors. + GET_EXECUTORS = 12; + + // Retrieves the information about known operations. + GET_OPERATIONS = 31; + + // Retrieves the information about known tasks. + GET_TASKS = 13; + + // Retrieves the agent information. + GET_AGENT = 20; + + // Retrieves the information about known resource providers. + GET_RESOURCE_PROVIDERS = 26; + + // Calls for managing nested containers underneath an executor's container. + // Some of these calls are deprecated in favor of the calls + // for both standalone or nested containers further below. + LAUNCH_NESTED_CONTAINER = 14 [deprecated = true]; + WAIT_NESTED_CONTAINER = 15 [deprecated = true]; + KILL_NESTED_CONTAINER = 16 [deprecated = true]; + REMOVE_NESTED_CONTAINER = 21 [deprecated = true]; + + // See 'LaunchNestedContainerSession' below. + LAUNCH_NESTED_CONTAINER_SESSION = 17; + + ATTACH_CONTAINER_INPUT = 18; // See 'AttachContainerInput' below. + ATTACH_CONTAINER_OUTPUT = 19; // see 'AttachContainerOutput' below. + + // Calls for managing standalone containers + // or containers nested underneath another container. + LAUNCH_CONTAINER = 22; // See 'LaunchContainer' below. + WAIT_CONTAINER = 23; // See 'WaitContainer' below. + KILL_CONTAINER = 24; // See 'KillContainer' below. + REMOVE_CONTAINER = 25; // See 'RemoveContainer' below. + + ADD_RESOURCE_PROVIDER_CONFIG = 27; // See 'AddResourceProviderConfig' below. // NOLINT + UPDATE_RESOURCE_PROVIDER_CONFIG = 28; // See 'UpdateResourceProviderConfig' below. // NOLINT + REMOVE_RESOURCE_PROVIDER_CONFIG = 29; // See 'RemoveResourceProviderConfig' below. // NOLINT + + // Prune unused container images. + PRUNE_IMAGES = 30; + + option (gogoproto.goproto_enum_prefix) = true; + } + + // Provides a snapshot of the current metrics tracked by the agent. + message GetMetrics { + // If set, `timeout` would be used to determines the maximum amount of time + // the API will take to respond. If the timeout is exceeded, some metrics + // may not be included in the response. + optional DurationInfo timeout = 1; + } + + // Sets the logging verbosity level for a specified duration. Mesos uses + // [glog](https://github.com/google/glog) for logging. The library only uses + // verbose logging which means nothing will be output unless the verbosity + // level is set (by default it's 0, libprocess uses levels 1, 2, and 3). + message SetLoggingLevel { + // The verbosity level. + required uint32 level = 1 [(gogoproto.nullable) = false]; + // The duration to keep verbosity level toggled. After this duration, the + // verbosity level of log would revert to the original level. + required DurationInfo duration = 2 [(gogoproto.nullable) = false]; + } + + // Provides the file listing for a directory. + message ListFiles { + required string path = 1 [(gogoproto.nullable) = false]; + } + + // Reads data from a file. + message ReadFile { + // The path of file. + required string path = 1 [(gogoproto.nullable) = false]; + + // Initial offset in file to start reading from. + required uint64 offset = 2 [(gogoproto.nullable) = false]; + + // The maximum number of bytes to read. The read length is capped at 16 + // memory pages. + optional uint64 length = 3; + } + + // Lists active containers on the agent. + message GetContainers { + optional bool show_nested = 1; + optional bool show_standalone = 2; + } + + // Deprecated in favor of `LaunchContainer`. + message LaunchNestedContainer { + required ContainerID container_id = 1 [(gogoproto.customname) = "ContainerID", (gogoproto.nullable) = false]; + optional CommandInfo command = 2; + optional ContainerInfo container = 3; + } + + // Deprecated in favor of `WaitContainer`. + message WaitNestedContainer { + required ContainerID container_id = 1 [(gogoproto.customname) = "ContainerID", (gogoproto.nullable) = false]; + } + + // Deprecated in favor of `KillContainer`. + message KillNestedContainer { + required ContainerID container_id = 1 [(gogoproto.customname) = "ContainerID", (gogoproto.nullable) = false]; + optional int32 signal = 2; + } + + // Deprecated in favor of `RemoveContainer`. + message RemoveNestedContainer { + required ContainerID container_id = 1 [(gogoproto.customname) = "ContainerID", (gogoproto.nullable) = false]; + } + + // Launches a nested container within an executor's tree of containers. + // The differences between this call and `LaunchNestedContainer` are: + // 1) The container's life-cycle is tied to the lifetime of the + // connection used to make this call, i.e., if the connection ever + // breaks, the container will be destroyed. + // 2) The nested container shares the same namespaces and cgroups as + // its parent container. + // 3) Results in a streaming response of type `ProcessIO`. So the call + // needs to be made on a persistent connection. + message LaunchNestedContainerSession { + required ContainerID container_id = 1 [(gogoproto.customname) = "ContainerID", (gogoproto.nullable) = false]; + optional CommandInfo command = 2; + optional ContainerInfo container = 3; + } + + // Attaches the caller to the STDIN of the entry point of the container. + // Clients can use this to stream input data to a container. + // Note that this call needs to be made on a persistent connection by + // streaming a CONTAINER_ID message followed by one or more PROCESS_IO + // messages. + message AttachContainerInput { + enum Type { + UNKNOWN = 0; + CONTAINER_ID = 1; + PROCESS_IO = 2; + + option (gogoproto.goproto_enum_prefix) = true; + } + + optional Type type = 1 [(gogoproto.nullable) = false]; + optional ContainerID container_id = 2 [(gogoproto.customname) = "ContainerID"]; + optional ProcessIO process_io = 3 [(gogoproto.customname) = "ProcessIO"]; + } + + // Attaches the caller to the STDOUT and STDERR of the entrypoint of + // the container. Clients can use this to stream output/error from the + // container. This call will result in a streaming response of `ProcessIO`; + // so this call needs to be made on a persistent connection. + message AttachContainerOutput { + required ContainerID container_id = 1 [(gogoproto.customname) = "ContainerID", (gogoproto.nullable) = false]; + } + + // Launches a either a "standalone" container on this agent + // or a nested container within another tree of containers. + // + // A standalone container is launched by specifying a ContainerID + // with no parent. Standalone containers bypass the normal offer cycle + // between the master and agent. Unlike other containers, a standalone + // container does not have an executor or any tasks. This means the + // standalone container does not report back to Mesos or any framework + // and must be supervised separately. + // + // A nested container is launched by specifying a ContainerID with + // another existing container (including standalone containers) + // as the parent. + // + // Returns 200 OK if the new container launch succeeds. + // Returns 202 Accepted if the requested ContainerID is already in use + // by a standalone or nested container. + // Returns 400 Bad Request if the container launch fails. + message LaunchContainer { + // NOTE: Some characters cannot be used in the ID. All characters + // must be valid filesystem path characters. In addition, '/' and '.' + // are reserved. + required ContainerID container_id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ContainerID"]; + + optional CommandInfo command = 2; + + // NOTE: Nested containers may not specify resources and instead + // share resources with its parent container. + // + // TODO(josephw): These resources are purely used for isolation + // and are not accounted for by the Mesos master (if connected). + // It is the caller's responsibility to ensure that resources are + // not overcommitted (e.g. CPU and memory) or conflicting (e.g. ports + // and volumes). Once there is support for preempting tasks and a + // way to update the resources advertised by the agent, these standalone + // container resources should be accounted for by the master. + repeated Resource resources = 3 [(gogoproto.nullable) = false]; + + optional ContainerInfo container = 4; + } + + // Waits for the standalone or nested container to terminate + // and returns the exit status. + // + // Returns 200 OK if and when the container exits. + // Returns 404 Not Found if the container does not exist. + message WaitContainer { + required ContainerID container_id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ContainerID"]; + } + + // Kills the standalone or nested container. The signal to be sent + // to the container can be specified in the 'signal' field. + // + // Returns 200 OK if the signal is sent successfully. + // Returns 404 Not Found if the container does not exist. + message KillContainer { + required ContainerID container_id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ContainerID"]; + + // Defaults to SIGKILL. + optional int32 signal = 2; + } + + // Removes a container's artifacts (runtime and sandbox directories). + // + // For nested containers, it is important to use this call if multiple + // nested containers are launched under the same parent container, because + // garbage collection only takes place at the parent container. Artifacts + // belonging to nested containers will not be garbage collected while + // the parent container is running. + // + // TODO(josephw): A standalone container's runtime directory is currently + // garbage collected as soon as the container exits. To allow the user to + // retrieve the exit status reliably, the runtime directory cannot be + // garbage collected immediately. Instead, the user will eventually be + // required to make this call after the standalone container has exited. + // Also, a standalone container's sandbox directory is currently not + // garbage collected and is only deleted via this call. + // + // Returns 200 OK if the removal is successful or if the parent container + // (for nested containers) does not exist. + // Returns 500 Internal Server Error if anything goes wrong, including + // if the container is still running or does not exist. + // + // TODO(josephw): Consider returning a 400 Bad Request instead of 500 + // Internal Server Error when the user tries to remove a running or + // nonexistent nested container. + message RemoveContainer { + required ContainerID container_id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ContainerID"]; + } + + // Adds a new resource provider config file. + // + // The content of the `info` field will be written into a new config file in + // the resource provider config directory, and a new resource provider will be + // launched asynchronously based on the config. Callers must not set the + // `info.id` field. This call is idempotent, so if a config file identical to + // the content of the `info` field already exists, this call will return + // without launching a resource provider. Note that if a config file is + // placed into the resource provider config directory out-of-band after the + // agent starts up, it will not be checked against this call. + // + // Returns 200 OK if a new config file is created, or an identical config file + // exists. + // Returns 400 Bad Request if `info` is not well-formed. + // Returns 403 Forbidden if the call is not authorized. + // Returns 409 Conflict if another config file that describes a + // resource provider of the same type and name exists, but the content is + // not identical. + // Returns 500 Internal Server Error if anything goes wrong. + message AddResourceProviderConfig { + required ResourceProviderInfo info = 1 [(gogoproto.nullable) = false]; + } + + // Updates an existing resource provider config file. + // + // The content of the `info` field will be written into an existing config + // file that describes a resource provider of the specified type and name in + // the resource provider config directory, and the corresponding resource + // provider will be relaunched asynchronously to reflect the changes in the + // config. Callers must not set the `info.id` field. This call is idempotent, + // so if there is no change in the config, this call will return without + // relaunching the resource provider. Note that if a config file is placed + // into the resource provider config directory out-of-band after the agent + // starts up, it will not be checked against this call. + // + // Returns 200 OK if an existing config file is updated, or there is no change + // in the config file. + // Returns 400 Bad Request if `info` is not well-formed. + // Returns 403 Forbidden if the call is not authorized. + // Returns 404 Not Found if no config file describes a resource + // provider of the same type and name exists. + // Returns 500 Internal Server Error if anything goes wrong. + message UpdateResourceProviderConfig { + required ResourceProviderInfo info = 1 [(gogoproto.nullable) = false]; + } + + // Removes a config file from the resource provider config directory. + // + // The config file that describes the resource provider of the specified type + // and name will be removed, and the corresponding resource provider will be + // terminated asynchronously. This call is idempotent, so if no matching + // config file exists, this call will return without terminating any resource + // provider. Note that if a config file is placed into the resource provider + // config directory out-of-band after the agent starts up, it will not be + // checked against this call. + // + // Returns 200 OK if the config file is removed, or no matching config file + // exists. + // Returns 403 Forbidden if the call is not authorized. + // Returns 500 Internal Server Error if anything goes wrong. + message RemoveResourceProviderConfig { + required string type = 1 [(gogoproto.nullable) = false]; + required string name = 2 [(gogoproto.nullable) = false]; + } + + // Prune unused container images from image store. + // + // Images and layers referenced by active containers as well as + // image references specified in `excluded_images` will not be pruned. + message PruneImages { + repeated Image excluded_images = 1 [(gogoproto.nullable) = false]; + } + + optional Type type = 1 [(gogoproto.nullable) = false]; + + optional GetMetrics get_metrics = 2; + optional SetLoggingLevel set_logging_level = 3; + optional ListFiles list_files = 4; + optional ReadFile read_file = 5; + + optional GetContainers get_containers = 20; + + optional LaunchNestedContainer launch_nested_container = 6 + [deprecated = true]; + + optional WaitNestedContainer wait_nested_container = 7 [deprecated = true]; + optional KillNestedContainer kill_nested_container = 8 [deprecated = true]; + optional RemoveNestedContainer remove_nested_container = 12 + [deprecated = true]; + + optional LaunchNestedContainerSession launch_nested_container_session = 9; + optional AttachContainerInput attach_container_input = 10; + optional AttachContainerOutput attach_container_output = 11; + optional LaunchContainer launch_container = 13; + optional WaitContainer wait_container = 14; + optional KillContainer kill_container = 15; + optional RemoveContainer remove_container = 16; + + optional AddResourceProviderConfig add_resource_provider_config = 17; + optional UpdateResourceProviderConfig update_resource_provider_config = 18; + optional RemoveResourceProviderConfig remove_resource_provider_config = 19; + + optional PruneImages prune_images = 21; +} + + +/** + * Synchronous responses for all calls made to the v1 agent API. + */ +message Response { + // Each of the responses of type `FOO` corresponds to `Foo` message below. + enum Type { + UNKNOWN = 0; + + GET_HEALTH = 1; // See 'GetHealth' below. + GET_FLAGS = 2; // See 'GetFlags' below. + GET_VERSION = 3; // See 'GetVersion' below. + GET_METRICS = 4; // See 'GetMetrics' below. + + GET_LOGGING_LEVEL = 5; // See 'GetLoggingLevel' below. + + LIST_FILES = 6; + READ_FILE = 7; // See 'ReadFile' below. + + GET_STATE = 8; + + GET_CONTAINERS = 9; + GET_FRAMEWORKS = 10; // See 'GetFrameworks' below. + GET_EXECUTORS = 11; // See 'GetExecutors' below. + GET_OPERATIONS = 17; // See 'GetOperations' below. + GET_TASKS = 12; // See 'GetTasks' below. + GET_AGENT = 14; // See 'GetAgent' below. + GET_RESOURCE_PROVIDERS = 16; // See 'GetResourceProviders' below. + + WAIT_NESTED_CONTAINER = 13 [deprecated = true]; + WAIT_CONTAINER = 15; // See 'WaitContainer' below. + + option (gogoproto.goproto_enum_prefix) = true; + } + + // `healthy` would be true if the agent is healthy. Delayed responses are also + // indicative of the poor health of the agent. + message GetHealth { + required bool healthy = 1 [(gogoproto.nullable) = false]; + } + + // Contains the flag configuration of the agent. + message GetFlags { + repeated Flag flags = 1 [(gogoproto.nullable) = false]; + } + + // Contains the version information of the agent. + message GetVersion { + required VersionInfo version_info = 1 [(gogoproto.nullable) = false]; + } + + // Contains a snapshot of the current metrics. + message GetMetrics { + repeated Metric metrics = 1 [(gogoproto.nullable) = false]; + } + + // Contains the logging level of the agent. + message GetLoggingLevel { + required uint32 level = 1 [(gogoproto.nullable) = false]; + } + + // Contains the file listing(similar to `ls -l`) for a directory. + message ListFiles { + repeated FileInfo file_infos = 1 [(gogoproto.nullable) = false]; + } + + // Contains the file data. + message ReadFile { + // The size of file (in bytes). + required uint64 size = 1 [(gogoproto.nullable) = false]; + + required bytes data = 2; + } + + // Contains full state of the agent i.e. information about the tasks, + // frameworks and executors running in the cluster. + message GetState { + optional GetTasks get_tasks = 1; + optional GetExecutors get_executors = 2; + optional GetFrameworks get_frameworks = 3; + } + + // Information about containers running on this agent. It contains + // ContainerStatus and ResourceStatistics along with some metadata + // of the containers. + message GetContainers { + message Container { + optional FrameworkID framework_id = 1 [(gogoproto.customname) = "FrameworkID"]; + optional ExecutorID executor_id = 2 [(gogoproto.customname) = "ExecutorID"]; + optional string executor_name = 3; + required ContainerID container_id = 4 [(gogoproto.customname) = "ContainerID", (gogoproto.nullable) = false]; + optional ContainerStatus container_status = 5; + optional ResourceStatistics resource_statistics = 6; + } + + repeated Container containers = 1 [(gogoproto.nullable) = false]; + } + + // Information about all the frameworks known to the agent at the current + // time. + message GetFrameworks { + message Framework { + required FrameworkInfo framework_info = 1 [(gogoproto.nullable) = false]; + } + + repeated Framework frameworks = 1 [(gogoproto.nullable) = false]; + repeated Framework completed_frameworks = 2 [(gogoproto.nullable) = false]; + } + + // Lists information about all the executors known to the agent at the + // current time. + message GetExecutors { + message Executor { + required ExecutorInfo executor_info = 1 [(gogoproto.nullable) = false]; + } + + repeated Executor executors = 1 [(gogoproto.nullable) = false]; + repeated Executor completed_executors = 2 [(gogoproto.nullable) = false]; + } + + // Lists information about all operations known to the agent at the + // current time. + message GetOperations { + repeated Operation operations = 1 [(gogoproto.nullable) = false]; + } + + // Lists information about all the tasks known to the agent at the current + // time. + message GetTasks { + // Tasks that are pending in the agent's queue before an executor is + // launched. + repeated Task pending_tasks = 1 [(gogoproto.nullable) = false]; + + // Tasks that are enqueued for a launched executor that has not yet + // registered. + repeated Task queued_tasks = 2 [(gogoproto.nullable) = false]; + + // Tasks that are running. + repeated Task launched_tasks = 3 [(gogoproto.nullable) = false]; + + // Tasks that are terminated but pending updates. + repeated Task terminated_tasks = 4 [(gogoproto.nullable) = false]; + + // Tasks that are terminated and updates acked. + repeated Task completed_tasks = 5 [(gogoproto.nullable) = false]; + } + + // Contains the agent's information. + message GetAgent { + optional AgentInfo agent_info = 1; + } + + // Lists information about all resource providers known to the agent + // at the current time. + message GetResourceProviders { + message ResourceProvider { + required ResourceProviderInfo resource_provider_info = 1 [(gogoproto.nullable) = false]; + repeated Resource total_resources = 2 [(gogoproto.nullable) = false]; + } + + repeated ResourceProvider resource_providers = 1 [(gogoproto.nullable) = false]; + } + + // Returns termination information about the nested container. + message WaitNestedContainer { + // Wait status of the lead process in the container. Note that this + // is the return value of `wait(2)`, so callers must use the `wait(2)` + // family of macros to extract whether the process exited cleanly and + // what the exit code was. + optional int32 exit_status = 1; + + // The `state` and `reason` fields may be populated if the Mesos agent + // terminates the container. In the absence of any special knowledge, + // executors should propagate this information via the `status` field + // of an `Update` call for the corresponding TaskID. + optional TaskState state = 2; + optional TaskStatus.Reason reason = 3; + + // This field will be populated if the task was terminated due to + // a resource limitation. + optional TaskResourceLimitation limitation = 4; + + optional string message = 5; + } + + // Returns termination information about the standalone or nested container. + message WaitContainer { + // Wait status of the lead process in the container. Note that this + // is the return value of `wait(2)`, so callers must use the `wait(2)` + // family of macros to extract whether the process exited cleanly and + // what the exit code was. + optional int32 exit_status = 1; + + // The `state` and `reason` fields may be populated if the Mesos agent + // terminates the container. In the absence of any special knowledge, + // executors should propagate this information via the `status` field + // of an `Update` call for the corresponding TaskID. + optional TaskState state = 2; + optional TaskStatus.Reason reason = 3; + + // This field will be populated if the task was terminated due to + // a resource limitation. + optional TaskResourceLimitation limitation = 4; + + optional string message = 5; + } + + optional Type type = 1 [(gogoproto.nullable) = false]; + + optional GetHealth get_health = 2; + optional GetFlags get_flags = 3; + optional GetVersion get_version = 4; + optional GetMetrics get_metrics = 5; + optional GetLoggingLevel get_logging_level = 6; + optional ListFiles list_files = 7; + optional ReadFile read_file = 8; + optional GetState get_state = 9; + optional GetContainers get_containers = 10; + optional GetFrameworks get_frameworks = 11; + optional GetExecutors get_executors = 12; + optional GetOperations get_operations = 18; + optional GetTasks get_tasks = 13; + optional GetAgent get_agent = 15; + optional GetResourceProviders get_resource_providers = 17; + optional WaitNestedContainer wait_nested_container = 14; + optional WaitContainer wait_container = 16; +} + + +/** + * Streaming response to `Call::LAUNCH_NESTED_CONTAINER_SESSION` and + * `Call::ATTACH_CONTAINER_OUTPUT`. + * + * This message is also used to stream request data for + * `Call::ATTACH_CONTAINER_INPUT`. + */ +message ProcessIO { + enum Type { + UNKNOWN = 0; + DATA = 1; + CONTROL = 2; + + option (gogoproto.goproto_enum_prefix) = true; + } + + message Data { + enum Type { + UNKNOWN = 0; + STDIN = 1; + STDOUT = 2; + STDERR = 3; + + option (gogoproto.goproto_enum_prefix) = true; + } + + optional Type type = 1 [(gogoproto.nullable) = false]; + optional bytes data = 2; + } + + + message Control { + enum Type { + UNKNOWN = 0; + TTY_INFO = 1; + HEARTBEAT = 2; + + option (gogoproto.goproto_enum_prefix) = true; + } + + message Heartbeat { + optional DurationInfo interval = 1; + } + + optional Type type = 1 [(gogoproto.nullable) = false]; + optional TTYInfo tty_info = 2 [(gogoproto.customname) = "TTYInfo"]; + optional Heartbeat heartbeat = 3; + } + + optional Type type = 1 [(gogoproto.nullable) = false]; + optional Data data = 2; + optional Control control = 3; +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/calls/calls.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/calls/calls.go new file mode 100644 index 000000000000..c2acacc21868 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/calls/calls.go @@ -0,0 +1,258 @@ +package calls + +import ( + "time" + + "github.com/mesos/mesos-go/api/v1/lib" + "github.com/mesos/mesos-go/api/v1/lib/agent" +) + +func GetHealth() *agent.Call { return &agent.Call{Type: agent.Call_GET_HEALTH} } + +func GetFlags() *agent.Call { return &agent.Call{Type: agent.Call_GET_FLAGS} } + +func GetVersion() *agent.Call { return &agent.Call{Type: agent.Call_GET_VERSION} } + +func GetMetrics(d *time.Duration) (call *agent.Call) { + call = &agent.Call{ + Type: agent.Call_GET_METRICS, + GetMetrics: &agent.Call_GetMetrics{}, + } + if d != nil { + call.GetMetrics.Timeout = &mesos.DurationInfo{ + Nanoseconds: d.Nanoseconds(), + } + } + return +} + +func GetLoggingLevel() *agent.Call { return &agent.Call{Type: agent.Call_GET_LOGGING_LEVEL} } + +func SetLoggingLevel(level uint32, d time.Duration) *agent.Call { + return &agent.Call{ + Type: agent.Call_SET_LOGGING_LEVEL, + SetLoggingLevel: &agent.Call_SetLoggingLevel{ + Duration: mesos.DurationInfo{Nanoseconds: d.Nanoseconds()}, + Level: level, + }, + } +} + +func ListFiles(path string) *agent.Call { + return &agent.Call{ + Type: agent.Call_LIST_FILES, + ListFiles: &agent.Call_ListFiles{ + Path: path, + }, + } +} + +func ReadFile(path string, offset uint64) *agent.Call { + return &agent.Call{ + Type: agent.Call_READ_FILE, + ReadFile: &agent.Call_ReadFile{ + Path: path, + Offset: offset, + }, + } +} + +func ReadFileWithLength(path string, offset, length uint64) (call *agent.Call) { + call = ReadFile(path, offset) + call.ReadFile.Length = &length + return +} + +func GetState() *agent.Call { return &agent.Call{Type: agent.Call_GET_STATE} } + +func GetContainers() *agent.Call { return &agent.Call{Type: agent.Call_GET_CONTAINERS} } + +func GetFrameworks() *agent.Call { return &agent.Call{Type: agent.Call_GET_FRAMEWORKS} } + +func GetExecutors() *agent.Call { return &agent.Call{Type: agent.Call_GET_EXECUTORS} } + +func GetOperations() *agent.Call { return &agent.Call{Type: agent.Call_GET_OPERATIONS} } + +func GetTasks() *agent.Call { return &agent.Call{Type: agent.Call_GET_TASKS} } + +func GetAgent() *agent.Call { return &agent.Call{Type: agent.Call_GET_AGENT} } + +func GetResourceProviders() *agent.Call { return &agent.Call{Type: agent.Call_GET_RESOURCE_PROVIDERS} } + +func LaunchNestedContainer(cid mesos.ContainerID, cmd *mesos.CommandInfo, ci *mesos.ContainerInfo) *agent.Call { + return &agent.Call{ + Type: agent.Call_LAUNCH_NESTED_CONTAINER, + LaunchNestedContainer: &agent.Call_LaunchNestedContainer{ + ContainerID: cid, + Command: cmd, + Container: ci, + }, + } +} + +func LaunchContainer(cid mesos.ContainerID, cmd *mesos.CommandInfo, ci *mesos.ContainerInfo, r []mesos.Resource) *agent.Call { + return &agent.Call{ + Type: agent.Call_LAUNCH_CONTAINER, + LaunchContainer: &agent.Call_LaunchContainer{ + ContainerID: cid, + Command: cmd, + Container: ci, + Resources: r, + }, + } +} + +func WaitNestedContainer(cid mesos.ContainerID) *agent.Call { + return &agent.Call{ + Type: agent.Call_WAIT_NESTED_CONTAINER, + WaitNestedContainer: &agent.Call_WaitNestedContainer{ + ContainerID: cid, + }, + } +} + +func WaitContainer(cid mesos.ContainerID) *agent.Call { + return &agent.Call{ + Type: agent.Call_WAIT_CONTAINER, + WaitContainer: &agent.Call_WaitContainer{ + ContainerID: cid, + }, + } +} + +func KillNestedContainer(cid mesos.ContainerID) *agent.Call { + return &agent.Call{ + Type: agent.Call_KILL_NESTED_CONTAINER, + KillNestedContainer: &agent.Call_KillNestedContainer{ + ContainerID: cid, + }, + } +} + +func KillContainer(cid mesos.ContainerID) *agent.Call { + return &agent.Call{ + Type: agent.Call_KILL_CONTAINER, + KillContainer: &agent.Call_KillContainer{ + ContainerID: cid, + }, + } +} + +func RemoveNestedContainer(cid mesos.ContainerID) *agent.Call { + return &agent.Call{ + Type: agent.Call_REMOVE_NESTED_CONTAINER, + RemoveNestedContainer: &agent.Call_RemoveNestedContainer{ + ContainerID: cid, + }, + } +} + +func RemoveContainer(cid mesos.ContainerID) *agent.Call { + return &agent.Call{ + Type: agent.Call_REMOVE_CONTAINER, + RemoveContainer: &agent.Call_RemoveContainer{ + ContainerID: cid, + }, + } +} + +func LaunchNestedContainerSession(cid mesos.ContainerID, cmd *mesos.CommandInfo, ci *mesos.ContainerInfo) *agent.Call { + return &agent.Call{ + Type: agent.Call_LAUNCH_NESTED_CONTAINER_SESSION, + LaunchNestedContainerSession: &agent.Call_LaunchNestedContainerSession{ + ContainerID: cid, + Command: cmd, + Container: ci, + }, + } +} + +func AttachContainerOutput(cid mesos.ContainerID) *agent.Call { + return &agent.Call{ + Type: agent.Call_ATTACH_CONTAINER_OUTPUT, + AttachContainerOutput: &agent.Call_AttachContainerOutput{ + ContainerID: cid, + }, + } +} + +// AttachContainerInput returns a Call that is used to initiate attachment to a container's stdin. +// Callers should first send this Call followed by one or more AttachContainerInputXxx calls. +func AttachContainerInput(cid mesos.ContainerID) *agent.Call { + return &agent.Call{ + Type: agent.Call_ATTACH_CONTAINER_INPUT, + AttachContainerInput: &agent.Call_AttachContainerInput{ + Type: agent.Call_AttachContainerInput_CONTAINER_ID, + ContainerID: &cid, + }, + } +} + +func AttachContainerInputData(data []byte) *agent.Call { + return &agent.Call{ + Type: agent.Call_ATTACH_CONTAINER_INPUT, + AttachContainerInput: &agent.Call_AttachContainerInput{ + Type: agent.Call_AttachContainerInput_PROCESS_IO, + ProcessIO: &agent.ProcessIO{ + Type: agent.ProcessIO_DATA, + Data: &agent.ProcessIO_Data{ + Type: agent.ProcessIO_Data_STDIN, + Data: data, + }, + }, + }, + } +} + +func AttachContainerInputTTY(t *mesos.TTYInfo) *agent.Call { + return &agent.Call{ + Type: agent.Call_ATTACH_CONTAINER_INPUT, + AttachContainerInput: &agent.Call_AttachContainerInput{ + Type: agent.Call_AttachContainerInput_PROCESS_IO, + ProcessIO: &agent.ProcessIO{ + Type: agent.ProcessIO_CONTROL, + Control: &agent.ProcessIO_Control{ + Type: agent.ProcessIO_Control_TTY_INFO, + TTYInfo: t, + }, + }, + }, + } +} + +func AddResourceProviderConfig(rpi mesos.ResourceProviderInfo) *agent.Call { + return &agent.Call{ + Type: agent.Call_ADD_RESOURCE_PROVIDER_CONFIG, + AddResourceProviderConfig: &agent.Call_AddResourceProviderConfig{ + Info: rpi, + }, + } +} + +func UpdateResourceProviderConfig(rpi mesos.ResourceProviderInfo) *agent.Call { + return &agent.Call{ + Type: agent.Call_UPDATE_RESOURCE_PROVIDER_CONFIG, + UpdateResourceProviderConfig: &agent.Call_UpdateResourceProviderConfig{ + Info: rpi, + }, + } +} + +func RemoveResourceProviderConfig(typ, name string) *agent.Call { + return &agent.Call{ + Type: agent.Call_REMOVE_RESOURCE_PROVIDER_CONFIG, + RemoveResourceProviderConfig: &agent.Call_RemoveResourceProviderConfig{ + Type: typ, + Name: name, + }, + } +} + +func PruneImages(excluded []mesos.Image) *agent.Call { + return &agent.Call{ + Type: agent.Call_PRUNE_IMAGES, + PruneImages: &agent.Call_PruneImages{ + ExcludedImages: excluded, + }, + } +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/calls/calls_generated.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/calls/calls_generated.go new file mode 100644 index 000000000000..98816f5545c0 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/calls/calls_generated.go @@ -0,0 +1,129 @@ +package calls + +// go generate -import github.com/mesos/mesos-go/api/v1/lib/agent -type C:agent.Call +// GENERATED CODE FOLLOWS; DO NOT EDIT. + +import ( + "context" + + "github.com/mesos/mesos-go/api/v1/lib" + "github.com/mesos/mesos-go/api/v1/lib/encoding" + + "github.com/mesos/mesos-go/api/v1/lib/agent" +) + +type ( + // Request generates a Call that's sent to a Mesos agent. Subsequent invocations are expected to + // yield equivalent calls. Intended for use w/ non-streaming requests to an agent. + Request interface { + Call() *agent.Call + } + + // RequestFunc is the functional adaptation of Request. + RequestFunc func() *agent.Call + + // RequestStreaming generates a Call that's send to a Mesos agent. Subsequent invocations MAY generate + // different Call objects. No more Call objects are expected once a nil is returned to signal the end of + // of the request stream. + RequestStreaming interface { + Request + IsStreaming() + } + + // RequestStreamingFunc is the functional adaptation of RequestStreaming. + RequestStreamingFunc func() *agent.Call + + // Send issues a Request to a Mesos agent and properly manages Call-specific mechanics. + Sender interface { + Send(context.Context, Request) (mesos.Response, error) + } + + // SenderFunc is the functional adaptation of the Sender interface + SenderFunc func(context.Context, Request) (mesos.Response, error) +) + +func (f RequestFunc) Call() *agent.Call { return f() } + +func (f RequestFunc) Marshaler() encoding.Marshaler { + // avoid returning (*agent.Call)(nil) for interface type + if call := f(); call != nil { + return call + } + return nil +} + +func (f RequestStreamingFunc) Push(c ...*agent.Call) RequestStreamingFunc { return Push(f, c...) } + +func (f RequestStreamingFunc) Marshaler() encoding.Marshaler { + // avoid returning (*agent.Call)(nil) for interface type + if call := f(); call != nil { + return call + } + return nil +} + +func (f RequestStreamingFunc) IsStreaming() {} + +func (f RequestStreamingFunc) Call() *agent.Call { return f() } + +// Push prepends one or more calls onto a request stream. If no calls are given then the original stream is returned. +func Push(r RequestStreaming, c ...*agent.Call) RequestStreamingFunc { + return func() *agent.Call { + if len(c) == 0 { + return r.Call() + } + head := c[0] + c = c[1:] + return head + } +} + +// Empty generates a stream that always returns nil. +func Empty() RequestStreamingFunc { return func() *agent.Call { return nil } } + +var ( + _ = Request(RequestFunc(nil)) + _ = RequestStreaming(RequestStreamingFunc(nil)) + _ = Sender(SenderFunc(nil)) +) + +// NonStreaming returns a RequestFunc that always generates the same Call. +func NonStreaming(c *agent.Call) RequestFunc { return func() *agent.Call { return c } } + +// FromChan returns a streaming request that fetches calls from the given channel until it closes. +// If a nil chan is specified then the returned func will always generate nil. +func FromChan(ch <-chan *agent.Call) RequestStreamingFunc { + if ch == nil { + // avoid blocking forever if we're handed a nil chan + return func() *agent.Call { return nil } + } + return func() *agent.Call { + if m, ok := <-ch; ok { + return m + } + return nil + } +} + +// Send implements the Sender interface for SenderFunc +func (f SenderFunc) Send(ctx context.Context, r Request) (mesos.Response, error) { + return f(ctx, r) +} + +// IgnoreResponse generates a sender that closes any non-nil response received by Mesos. +func IgnoreResponse(s Sender) SenderFunc { + return func(ctx context.Context, r Request) (mesos.Response, error) { + resp, err := s.Send(ctx, r) + if resp != nil { + resp.Close() + } + return nil, err + } +} + +// SendNoData is a convenience func that executes the given Call using the provided Sender +// and always drops the response data. +func SendNoData(ctx context.Context, sender Sender, r Request) (err error) { + _, err = IgnoreResponse(sender).Send(ctx, r) + return +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/calls/gen.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/calls/gen.go new file mode 100644 index 000000000000..53693c06dab2 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/agent/calls/gen.go @@ -0,0 +1,3 @@ +package calls + +//go:generate go run ../../extras/gen/sender.go ../../extras/gen/gen.go -import github.com/mesos/mesos-go/api/v1/lib/agent -type C:agent.Call diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/client.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/client.go new file mode 100644 index 000000000000..233fe36231e3 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/client.go @@ -0,0 +1,71 @@ +package mesos + +// DEPRECATED in favor of github.com/mesos/mesos-go/api/v1/lib/client + +import ( + "io" + + "github.com/mesos/mesos-go/api/v1/lib/encoding" +) + +// A Client represents a Mesos API client which can send Calls and return +// a streaming Decoder from which callers can read Events from, an io.Closer to +// close the event stream on graceful termination and an error in case of failure. +type Client interface { + Do(encoding.Marshaler) (Response, error) +} + +// ClientFunc is a functional adapter of the Client interface +type ClientFunc func(encoding.Marshaler) (Response, error) + +// Do implements Client +func (cf ClientFunc) Do(m encoding.Marshaler) (Response, error) { return cf(m) } + +// Response captures the output of a Mesos API operation. Callers are responsible for invoking +// Close when they're finished processing the response otherwise there may be connection leaks. +type Response interface { + io.Closer + encoding.Decoder +} + +// ResponseDecorator optionally modifies the behavior of a Response +type ResponseDecorator interface { + Decorate(Response) Response +} + +// ResponseDecoratorFunc is the functional adapter for ResponseDecorator +type ResponseDecoratorFunc func(Response) Response + +func (f ResponseDecoratorFunc) Decorate(r Response) Response { return f(r) } + +// CloseFunc is the functional adapter for io.Closer +type CloseFunc func() error + +// Close implements io.Closer +func (f CloseFunc) Close() error { return f() } + +// ResponseWrapper delegates to optional overrides for invocations of Response methods. +type ResponseWrapper struct { + Response Response + Closer io.Closer + Decoder encoding.Decoder +} + +func (wrapper *ResponseWrapper) Close() error { + if wrapper.Closer != nil { + return wrapper.Closer.Close() + } + if wrapper.Response != nil { + return wrapper.Response.Close() + } + return nil +} + +func (wrapper *ResponseWrapper) Decode(u encoding.Unmarshaler) error { + if wrapper.Decoder != nil { + return wrapper.Decoder.Decode(u) + } + return wrapper.Response.Decode(u) +} + +var _ = Response(&ResponseWrapper{}) diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/client/client.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/client/client.go new file mode 100644 index 000000000000..0f0d0dd8a617 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/client/client.go @@ -0,0 +1,54 @@ +package client + +import ( + "github.com/mesos/mesos-go/api/v1/lib/encoding" +) + +type ( + // ResponseClass indicates the kind of response that a caller is expecting from Mesos. + ResponseClass int + + // Request is a non-streaming request from the client to the server. + // Marshaler always returns the same object; the object is sent once to the server and then + // a response is expected. + Request interface { + Marshaler() encoding.Marshaler + } + + // RequestStreaming is a streaming request from the client to the server. + // Marshaler returns a new object for upon each invocation, nil when there are no more objects to send. + // Client implementations are expected to differentiate between Request and RequestStreaming either by + // type-switching or by attempting interface conversion. + RequestStreaming interface { + Request + IsStreaming() + } + + RequestFunc func() encoding.Marshaler + RequestStreamingFunc func() encoding.Marshaler +) + +var ( + _ = Request(RequestFunc(nil)) + _ = RequestStreaming(RequestStreamingFunc(nil)) +) + +func (f RequestFunc) Marshaler() encoding.Marshaler { return f() } +func (f RequestStreamingFunc) Marshaler() encoding.Marshaler { return f() } +func (f RequestStreamingFunc) IsStreaming() {} + +// RequestSingleton generates a non-streaming Request that always returns the same marshaler +func RequestSingleton(m encoding.Marshaler) Request { + return RequestFunc(func() encoding.Marshaler { return m }) +} + +const ( + ResponseClassSingleton ResponseClass = iota + ResponseClassStreaming + ResponseClassNoData + + // ResponseClassAuto should be used with versions of Mesos prior to 1.2.x. + // Otherwise, this type is deprecated and callers should use ResponseClassSingleton + // or ResponseClassStreaming instead. + ResponseClassAuto +) diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/debug/logger.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/debug/logger.go new file mode 100644 index 000000000000..571be7d905f6 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/debug/logger.go @@ -0,0 +1,17 @@ +package debug + +import "log" + +type Logger bool + +func (d Logger) Log(v ...interface{}) { + if d { + log.Print(v...) + } +} + +func (d Logger) Logf(s string, v ...interface{}) { + if d { + log.Printf(s, v...) + } +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/doc.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/doc.go new file mode 100644 index 000000000000..9c749fe3b239 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/doc.go @@ -0,0 +1,3 @@ +// Package mesos presents common v1 HTTP API message types in addition to extension APIs that +// aim to simplify use of the machine-generated code. +package mesos diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/codecs/codecs.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/codecs/codecs.go new file mode 100644 index 000000000000..bff5aa8938c3 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/codecs/codecs.go @@ -0,0 +1,33 @@ +package codecs + +import ( + "github.com/mesos/mesos-go/api/v1/lib/encoding" + "github.com/mesos/mesos-go/api/v1/lib/encoding/json" + "github.com/mesos/mesos-go/api/v1/lib/encoding/proto" +) + +const ( + // MediaTypeProtobuf is the Protobuf serialization format media type. + MediaTypeProtobuf = encoding.MediaType("application/x-protobuf") + // MediaTypeJSON is the JSON serialiation format media type. + MediaTypeJSON = encoding.MediaType("application/json") + + NameProtobuf = "protobuf" + NameJSON = "json" +) + +// ByMediaType are pre-configured default Codecs, ready to use OOTB +var ByMediaType = map[encoding.MediaType]encoding.Codec{ + MediaTypeProtobuf: encoding.Codec{ + Name: NameProtobuf, + Type: MediaTypeProtobuf, + NewEncoder: proto.NewEncoder, + NewDecoder: proto.NewDecoder, + }, + MediaTypeJSON: encoding.Codec{ + Name: NameJSON, + Type: MediaTypeJSON, + NewEncoder: json.NewEncoder, + NewDecoder: json.NewDecoder, + }, +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/framing/decoder.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/framing/decoder.go new file mode 100644 index 000000000000..51e159c87e87 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/framing/decoder.go @@ -0,0 +1,34 @@ +package framing + +type ( + // UnmarshalFunc translates bytes to objects + UnmarshalFunc func([]byte, interface{}) error + + // Decoder reads and decodes Protobuf messages from an io.Reader. + Decoder interface { + // Decode reads the next encoded message from its input and stores it + // in the value pointed to by m. If m isn't a proto.Message, Decode will panic. + Decode(interface{}) error + } + + // DecoderFunc is the functional adaptation of Decoder + DecoderFunc func(interface{}) error +) + +func (f DecoderFunc) Decode(m interface{}) error { return f(m) } + +var _ = Decoder(DecoderFunc(nil)) + +// NewDecoder returns a new Decoder that reads from the given frame Reader. +func NewDecoder(r Reader, uf UnmarshalFunc) DecoderFunc { + return func(m interface{}) error { + // Note: the buf returned by ReadFrame will change over time, it can't be sub-sliced + // and then those sub-slices retained. Examination of generated proto code seems to indicate + // that byte buffers are copied vs. referenced by sub-slice (gogo protoc). + frame, err := r.ReadFrame() + if err != nil { + return err + } + return uf(frame, m) + } +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/framing/framing.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/framing/framing.go new file mode 100644 index 000000000000..0e05720b0ac6 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/framing/framing.go @@ -0,0 +1,70 @@ +package framing + +import ( + "io" + "io/ioutil" +) + +type Error string + +func (err Error) Error() string { return string(err) } + +const ( + ErrorUnderrun = Error("frame underrun, unexpected EOF") + ErrorBadSize = Error("bad frame size") + ErrorOversizedFrame = Error("oversized frame, max size exceeded") +) + +type ( + // Reader generates data frames from some source, returning io.EOF when the end of the input stream is + // detected. + Reader interface { + ReadFrame() (frame []byte, err error) + } + + // ReaderFunc is the functional adaptation of Reader. + ReaderFunc func() ([]byte, error) + + // Writer sends whole frames to some endpoint; returns io.ErrShortWrite if the frame is only partially written. + Writer interface { + WriteFrame(frame []byte) error + } + + // WriterFunc is the functional adaptation of Writer. + WriterFunc func([]byte) error +) + +func (f ReaderFunc) ReadFrame() ([]byte, error) { return f() } +func (f WriterFunc) WriteFrame(b []byte) error { return f(b) } + +var _ = Reader(ReaderFunc(nil)) +var _ = Writer(WriterFunc(nil)) + +// EOFReaderFunc always returns nil, io.EOF; it implements the ReaderFunc API. +func EOFReaderFunc() ([]byte, error) { return nil, io.EOF } + +var _ = ReaderFunc(EOFReaderFunc) // sanity check + +// ReadAll returns a reader func that returns the complete contents of `r` in a single frame. +// A zero length frame is treated as an "end of stream" condition, returning io.EOF. +func ReadAll(r io.Reader) ReaderFunc { + return func() (b []byte, err error) { + b, err = ioutil.ReadAll(r) + if len(b) == 0 && err == nil { + err = io.EOF + } + return + } +} + +// WriterFor adapts an io.Writer to the Writer interface. All buffers are written to `w` without decoration or +// modification. +func WriterFor(w io.Writer) WriterFunc { + return func(b []byte) error { + n, err := w.Write(b) + if err == nil && n != len(b) { + return io.ErrShortWrite + } + return err + } +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/json/json.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/json/json.go new file mode 100644 index 000000000000..d7fc2843d1ac --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/json/json.go @@ -0,0 +1,28 @@ +package json + +import ( + "encoding/json" + + "github.com/mesos/mesos-go/api/v1/lib/encoding" + "github.com/mesos/mesos-go/api/v1/lib/encoding/framing" +) + +// NewEncoder returns a new Encoder of Calls to JSON messages written to +// the given io.Writer. +func NewEncoder(s encoding.Sink) encoding.Encoder { + w := s() + return encoding.EncoderFunc(func(m encoding.Marshaler) error { + b, err := json.Marshal(m) + if err != nil { + return err + } + return w.WriteFrame(b) + }) +} + +// NewDecoder returns a new Decoder of JSON messages read from the given source. +func NewDecoder(s encoding.Source) encoding.Decoder { + r := s() + dec := framing.NewDecoder(r, json.Unmarshal) + return encoding.DecoderFunc(func(u encoding.Unmarshaler) error { return dec.Decode(u) }) +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/proto/doc.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/proto/doc.go new file mode 100644 index 000000000000..789cdf87fef7 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/proto/doc.go @@ -0,0 +1,4 @@ +// Package proto implements protobuf utilities such as functional options to +// construct complex structs and encoders and decoders composable with +// io.ReadWriters. +package proto diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/proto/encoding.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/proto/encoding.go new file mode 100644 index 000000000000..4300e490d5f9 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/proto/encoding.go @@ -0,0 +1,30 @@ +package proto + +import ( + "github.com/gogo/protobuf/proto" + "github.com/mesos/mesos-go/api/v1/lib/encoding" + "github.com/mesos/mesos-go/api/v1/lib/encoding/framing" +) + +// NewEncoder returns a new Encoder of Calls to Protobuf messages written to +// the given io.Writer. +func NewEncoder(s encoding.Sink) encoding.Encoder { + w := s() + return encoding.EncoderFunc(func(m encoding.Marshaler) error { + b, err := proto.Marshal(m.(proto.Message)) + if err != nil { + return err + } + return w.WriteFrame(b) + }) +} + +// NewDecoder returns a new Decoder of Protobuf messages read from the given Source. +func NewDecoder(s encoding.Source) encoding.Decoder { + r := s() + var ( + uf = func(b []byte, m interface{}) error { return proto.Unmarshal(b, m.(proto.Message)) } + dec = framing.NewDecoder(r, uf) + ) + return encoding.DecoderFunc(func(u encoding.Unmarshaler) error { return dec.Decode(u) }) +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/types.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/types.go new file mode 100644 index 000000000000..755308147b21 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/encoding/types.go @@ -0,0 +1,111 @@ +package encoding + +import ( + "encoding/json" + "io" + + pb "github.com/gogo/protobuf/proto" + "github.com/mesos/mesos-go/api/v1/lib/encoding/framing" +) + +type MediaType string + +// ContentType returns the HTTP Content-Type associated with the MediaType +func (m MediaType) ContentType() string { return string(m) } + +type ( + Source func() framing.Reader + Sink func() framing.Writer + + // A Codec composes encoding and decoding of a serialization format. + Codec struct { + Name string + Type MediaType + NewEncoder func(Sink) Encoder + NewDecoder func(Source) Decoder + } + + SourceFactory interface { + NewSource(r io.Reader) Source + } + SourceFactoryFunc func(r io.Reader) Source + + SinkFactory interface { + NewSink(w io.Writer) Sink + } + SinkFactoryFunc func(w io.Writer) Sink +) + +func (f SourceFactoryFunc) NewSource(r io.Reader) Source { return f(r) } + +func (f SinkFactoryFunc) NewSink(w io.Writer) Sink { return f(w) } + +var ( + _ = SourceFactory(SourceFactoryFunc(nil)) + _ = SinkFactory(SinkFactoryFunc(nil)) +) + +// SourceReader returns a Source that buffers all input from the given io.Reader +// and returns the contents in a single frame. +func SourceReader(r io.Reader) Source { + ch := make(chan framing.ReaderFunc, 1) + ch <- framing.ReadAll(r) + return func() framing.Reader { + select { + case f := <-ch: + return f + default: + return framing.ReaderFunc(framing.EOFReaderFunc) + } + } +} + +// SinkWriter returns a Sink that sends a frame to an io.Writer with no decoration. +func SinkWriter(w io.Writer) Sink { return func() framing.Writer { return framing.WriterFor(w) } } + +// String implements the fmt.Stringer interface. +func (c *Codec) String() string { + if c == nil { + return "" + } + return c.Name +} + +type ( + // Marshaler composes the supported marshaling formats. + Marshaler interface { + pb.Marshaler + json.Marshaler + } + // Unmarshaler composes the supporter unmarshaling formats. + Unmarshaler interface { + pb.Unmarshaler + json.Unmarshaler + } + // An Encoder encodes a given Marshaler or returns an error in case of failure. + Encoder interface { + Encode(Marshaler) error + } + + // EncoderFunc is the functional adapter for Encoder + EncoderFunc func(Marshaler) error + + // A Decoder decodes a given Unmarshaler or returns an error in case of failure. + Decoder interface { + Decode(Unmarshaler) error + } + + // DecoderFunc is the functional adapter for Decoder + DecoderFunc func(Unmarshaler) error +) + +// Decode implements the Decoder interface +func (f DecoderFunc) Decode(u Unmarshaler) error { return f(u) } + +// Encode implements the Encoder interface +func (f EncoderFunc) Encode(m Marshaler) error { return f(m) } + +var ( + _ = Encoder(EncoderFunc(nil)) + _ = Decoder(DecoderFunc(nil)) +) diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/filters.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/filters.go new file mode 100644 index 000000000000..73cbf588a212 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/filters.go @@ -0,0 +1,26 @@ +package mesos + +import "time" + +type FilterOpt func(*Filters) + +func (f *Filters) With(opts ...FilterOpt) *Filters { + for _, o := range opts { + o(f) + } + return f +} + +func RefuseSeconds(d time.Duration) FilterOpt { + return func(f *Filters) { + s := d.Seconds() + f.RefuseSeconds = &s + } +} + +func OptionalFilters(fo ...FilterOpt) *Filters { + if len(fo) == 0 { + return nil + } + return (&Filters{}).With(fo...) +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/fixedpoint.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/fixedpoint.go new file mode 100644 index 000000000000..c330309b2876 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/fixedpoint.go @@ -0,0 +1,35 @@ +package mesos + +// fixed point scalar math from mesos:src/common/values.cpp +// -- +// We manipulate scalar values by converting them from floating point to a +// fixed point representation, doing a calculation, and then converting +// the result back to floating point. We deliberately only preserve three +// decimal digits of precision in the fixed point representation. This +// ensures that client applications see predictable numerical behavior, at +// the expense of sacrificing some precision. + +import "math" + +func convertToFloat64(f int64) float64 { + // NOTE: We do the conversion from fixed point via integer division + // and then modulus, rather than a single floating point division. + // This ensures that we only apply floating point division to inputs + // in the range [0,999], which is easier to check for correctness. + var ( + quotient = float64(f / 1000) + remainder = float64(f%1000) / 1000.0 + ) + return quotient + remainder +} + +func convertToFixed64(f float64) int64 { + return round64(f * 1000) +} + +func round64(f float64) int64 { + if math.Abs(f) < 0.5 { + return 0 + } + return int64(f + math.Copysign(0.5, f)) +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli/apierrors/apierrors.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli/apierrors/apierrors.go new file mode 100644 index 000000000000..c5d6335be385 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli/apierrors/apierrors.go @@ -0,0 +1,161 @@ +package apierrors + +import ( + "io" + "io/ioutil" + "net/http" +) + +// Code is a Mesos HTTP v1 API response status code +type Code int + +const ( + // MsgNotLeader is returned by Do calls that are sent to a non leading Mesos master. + MsgNotLeader = "call sent to a non-leading master" + // MsgAuth is returned by Do calls that are not successfully authenticated. + MsgAuth = "call not authenticated" + // MsgUnsubscribed is returned by Do calls that are sent before a subscription is established. + MsgUnsubscribed = "no subscription established" + // MsgVersion is returned by Do calls that are sent to an incompatible API version. + MsgVersion = "incompatible API version" + // MsgMalformed is returned by Do calls that are malformed. + MsgMalformed = "malformed request" + // MsgMediaType is returned by Do calls that are sent with an unsupported media type. + MsgMediaType = "unsupported media type" + // MsgRateLimit is returned by Do calls that are rate limited. This is a temporary condition + // that should clear. + MsgRateLimit = "rate limited" + // MsgUnavailable is returned by Do calls that are sent to a master or agent that's in recovery, or + // does not yet realize that it's the leader. This is a temporary condition that should clear. + MsgUnavailable = "mesos server unavailable" + // MsgNotFound could happen if the master or agent libprocess has not yet set up http routes. + MsgNotFound = "mesos http endpoint not found" + + CodeNotLeader = Code(http.StatusTemporaryRedirect) + CodeNotAuthenticated = Code(http.StatusUnauthorized) + CodeUnsubscribed = Code(http.StatusForbidden) + CodeIncompatibleVersion = Code(http.StatusConflict) + CodeMalformedRequest = Code(http.StatusBadRequest) + CodeUnsupportedMediaType = Code(http.StatusNotAcceptable) + CodeRateLimitExceeded = Code(http.StatusTooManyRequests) + CodeMesosUnavailable = Code(http.StatusServiceUnavailable) + CodeNotFound = Code(http.StatusNotFound) + + MaxSizeDetails = 4 * 1024 // MaxSizeDetails limits the length of the details message read from a response body +) + +var ( + // ErrorTable maps HTTP response codes to their respective Mesos v1 API error messages. + ErrorTable = map[Code]string{ + CodeNotLeader: MsgNotLeader, + CodeMalformedRequest: MsgMalformed, + CodeIncompatibleVersion: MsgVersion, + CodeUnsubscribed: MsgUnsubscribed, + CodeNotAuthenticated: MsgAuth, + CodeUnsupportedMediaType: MsgMediaType, + CodeNotFound: MsgNotFound, + CodeMesosUnavailable: MsgUnavailable, + CodeRateLimitExceeded: MsgRateLimit, + } +) + +// Error captures HTTP v1 API error codes and messages generated by Mesos. +type Error struct { + code Code // code is the HTTP response status code generated by Mesos + message string // message briefly summarizes the nature of the error, possibly includes details from Mesos +} + +// IsError returns true for all HTTP status codes that are not considered informational or successful. +func (code Code) IsError() bool { + return code >= 300 +} + +// FromResponse returns an `*Error` for a response containing a status code that indicates an error condition. +// The response body (if any) is captured in the Error.Details field. +// Returns nil for nil responses and responses with non-error status codes. +// See IsErrorCode. +func FromResponse(res *http.Response) error { + if res == nil { + return nil + } + + code := Code(res.StatusCode) + if !code.IsError() { + // non-error HTTP response codes don't generate errors + return nil + } + + var details string + + if res.Body != nil { + defer res.Body.Close() + buf, _ := ioutil.ReadAll(io.LimitReader(res.Body, MaxSizeDetails)) + details = string(buf) + } + + return code.Error(details) +} + +// Error generates an error from the given status code and detail string. +func (code Code) Error(details string) error { + if !code.IsError() { + return nil + } + err := &Error{ + code: code, + message: ErrorTable[code], + } + if details != "" { + err.message = err.message + ": " + details + } + return err +} + +// Error implements error interface +func (e *Error) Error() string { return e.message } + +// Temporary returns true if the error is a temporary condition that should eventually clear. +func (e *Error) Temporary() bool { + switch e.code { + // TODO(jdef): NotFound **could** be a temporary error because there's a race at mesos startup in which the + // HTTP server responds before the internal listeners have been initialized. But it could also be reported + // because the client is accessing an invalid endpoint; as of right now, a client cannot distinguish between + // these cases. + // https://issues.apache.org/jira/browse/MESOS-7697 + case CodeRateLimitExceeded, CodeMesosUnavailable: + return true + default: + return false + } +} + +// CodesIndicatingSubscriptionLoss is a set of apierror.Code entries which each indicate that +// the event subscription stream has been severed between the scheduler and mesos. It's respresented +// as a public map variable so that clients can program additional error codes (if such are discovered) +// without hacking the code of the mesos-go library directly. +var CodesIndicatingSubscriptionLoss = func(codes ...Code) map[Code]struct{} { + result := make(map[Code]struct{}, len(codes)) + for _, code := range codes { + result[code] = struct{}{} + } + return result +}( + // expand this list as we discover other errors that guarantee we've lost our event subscription. + CodeUnsubscribed, +) + +// SubscriptionLoss returns true if the error indicates that the event subscription stream has been severed +// between mesos and a mesos client. +func (e *Error) SubscriptionLoss() (result bool) { + _, result = CodesIndicatingSubscriptionLoss[e.code] + return +} + +// Matches returns true if the given error is an API error with a matching error code +func (code Code) Matches(err error) bool { + if err == nil { + return !code.IsError() + } + apiErr, ok := err.(*Error) + return ok && apiErr.code == code +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli/auth_basic.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli/auth_basic.go new file mode 100644 index 000000000000..5cad3e9ddf47 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli/auth_basic.go @@ -0,0 +1,33 @@ +package httpcli + +import ( + "net/http" +) + +// roundTripperFunc is the functional adaptation of http.RoundTripper +type roundTripperFunc func(*http.Request) (*http.Response, error) + +// RoundTrip implements RoundTripper for roundTripperFunc +func (f roundTripperFunc) RoundTrip(req *http.Request) (*http.Response, error) { return f(req) } + +// BasicAuth generates a functional config option that sets HTTP Basic authentication for a Client +func BasicAuth(username, passwd string) ConfigOpt { + // TODO(jdef) this could be more efficient. according to the stdlib we're not supposed to + // mutate the original Request, so we copy here (including headers). another approach would + // be to generate a functional RequestOpt that adds the right header. + return WrapRoundTripper(func(rt http.RoundTripper) http.RoundTripper { + return roundTripperFunc(func(req *http.Request) (*http.Response, error) { + var h http.Header + if req.Header != nil { + h = make(http.Header, len(req.Header)) + for k, v := range req.Header { + h[k] = append(make([]string, 0, len(v)), v...) + } + } + clonedReq := *req + clonedReq.Header = h + clonedReq.SetBasicAuth(username, passwd) + return rt.RoundTrip(&clonedReq) + }) + }) +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli/http.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli/http.go new file mode 100644 index 000000000000..55bf9649326a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli/http.go @@ -0,0 +1,614 @@ +package httpcli + +import ( + "bytes" + "context" + "crypto/tls" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "sync" + "time" + + "github.com/mesos/mesos-go/api/v1/lib" + "github.com/mesos/mesos-go/api/v1/lib/client" + logger "github.com/mesos/mesos-go/api/v1/lib/debug" + "github.com/mesos/mesos-go/api/v1/lib/encoding" + "github.com/mesos/mesos-go/api/v1/lib/encoding/codecs" + "github.com/mesos/mesos-go/api/v1/lib/encoding/framing" + "github.com/mesos/mesos-go/api/v1/lib/httpcli/apierrors" + "github.com/mesos/mesos-go/api/v1/lib/recordio" +) + +func noRedirect(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse } + +// ProtocolError is returned when we receive a response from Mesos that is outside of the HTTP API specification. +// Receipt of the following will yield protocol errors: +// - any unexpected non-error HTTP response codes (e.g. 199) +// - any unexpected Content-Type +type ProtocolError string + +// Error implements error interface +func (pe ProtocolError) Error() string { return string(pe) } + +const ( + debug = logger.Logger(false) + mediaTypeRecordIO = encoding.MediaType("application/recordio") +) + +// DoFunc sends an HTTP request and returns an HTTP response. +// +// An error is returned if caused by client policy (such as +// http.Client.CheckRedirect), or if there was an HTTP protocol error. A +// non-2xx response doesn't cause an error. +// +// When err is nil, resp always contains a non-nil resp.Body. +// +// Callers should close resp.Body when done reading from it. If resp.Body is +// not closed, an underlying RoundTripper (typically Transport) may not be able +// to re-use a persistent TCP connection to the server for a subsequent +// "keep-alive" request. +// +// The request Body, if non-nil, will be closed by an underlying Transport, +// even on errors. +type DoFunc func(*http.Request) (*http.Response, error) + +// Response captures the output of a Mesos HTTP API operation. Callers are responsible for invoking +// Close when they're finished processing the response otherwise there may be connection leaks. +type Response struct { + io.Closer + encoding.Decoder + Header http.Header +} + +// ErrorMapperFunc generates an error for the given response. +type ErrorMapperFunc func(*http.Response) error + +// ResponseHandler is invoked to process an HTTP response. Callers SHALL invoke Close for +// a non-nil Response, even when errors are returned. +type ResponseHandler func(*http.Response, client.ResponseClass, error) (mesos.Response, error) + +// A Client is a Mesos HTTP APIs client. +type Client struct { + url string + do DoFunc + header http.Header + codec encoding.Codec + errorMapper ErrorMapperFunc + requestOpts []RequestOpt + buildRequestFunc func(client.Request, client.ResponseClass, ...RequestOpt) (*http.Request, error) + handleResponse ResponseHandler +} + +var ( + DefaultCodec = codecs.ByMediaType[codecs.MediaTypeProtobuf] + DefaultHeaders = http.Header{} + + // DefaultConfigOpt represents the default client config options. + DefaultConfigOpt = []ConfigOpt{ + Transport(func(t *http.Transport) { + // all calls should be ack'd by the server within this interval. + t.ResponseHeaderTimeout = 15 * time.Second + t.MaxIdleConnsPerHost = 2 // don't depend on go's default + }), + } + + DefaultErrorMapper = ErrorMapperFunc(apierrors.FromResponse) +) + +// New returns a new Client with the given Opts applied. +// Callers are expected to configure the URL, Do, and Codec options prior to +// invoking Do. +func New(opts ...Opt) *Client { + c := &Client{ + codec: DefaultCodec, + do: With(DefaultConfigOpt...), + header: cloneHeaders(DefaultHeaders), + errorMapper: DefaultErrorMapper, + } + c.buildRequestFunc = c.buildRequest + c.handleResponse = c.HandleResponse + c.With(opts...) + return c +} + +func cloneHeaders(hs http.Header) http.Header { + result := make(http.Header) + for k, v := range hs { + cloned := make([]string, len(v)) + copy(cloned, v) + result[k] = cloned + } + return result +} + +// Endpoint returns the current Mesos API endpoint URL that the caller is set to invoke +func (c *Client) Endpoint() string { + return c.url +} + +// RequestOpt defines a functional option for an http.Request. +type RequestOpt func(*http.Request) + +// RequestOpts is a convenience type +type RequestOpts []RequestOpt + +// Apply this set of request options to the given HTTP request. +func (opts RequestOpts) Apply(req *http.Request) { + // apply per-request options + for _, o := range opts { + if o != nil { + o(req) + } + } +} + +// With applies the given Opts to a Client and returns itself. +func (c *Client) With(opts ...Opt) Opt { + return Opts(opts).Merged().Apply(c) +} + +// WithTemporary configures the Client with the temporary option and returns the results of +// invoking f(). Changes made to the Client by the temporary option are reverted before this +// func returns. +func (c *Client) WithTemporary(opt Opt, f func() error) error { + if opt != nil { + undo := c.With(opt) + defer c.With(undo) + } + return f() +} + +// Mesos returns a mesos.Client variant backed by this implementation. +// Deprecated. +func (c *Client) Mesos(opts ...RequestOpt) mesos.Client { + return mesos.ClientFunc(func(m encoding.Marshaler) (mesos.Response, error) { + return c.Do(m, opts...) + }) +} + +func prepareForResponse(rc client.ResponseClass, codec encoding.Codec) (RequestOpts, error) { + // We need to tell Mesos both the content-type and message-content-type that we're expecting, otherwise + // the server may give us validation problems, or else send back a vague content-type (w/o a + // message-content-type). In order to communicate these things we need to understand the desired response + // type from the perspective of the caller --> client.ResponseClass. + var accept RequestOpts + switch rc { + case client.ResponseClassSingleton, client.ResponseClassAuto, client.ResponseClassNoData: + accept = append(accept, Header("Accept", codec.Type.ContentType())) + case client.ResponseClassStreaming: + accept = append(accept, Header("Accept", mediaTypeRecordIO.ContentType())) + accept = append(accept, Header("Message-Accept", codec.Type.ContentType())) + default: + return nil, ProtocolError(fmt.Sprintf("illegal response class requested: %v", rc)) + } + return accept, nil +} + +// buildRequest is a factory func that generates and returns an http.Request for the +// given marshaler and request options. +func (c *Client) buildRequest(cr client.Request, rc client.ResponseClass, opt ...RequestOpt) (*http.Request, error) { + if crs, ok := cr.(client.RequestStreaming); ok { + return c.buildRequestStream(crs.Marshaler, rc, opt...) + } + accept, err := prepareForResponse(rc, c.codec) + if err != nil { + return nil, err + } + + //TODO(jdef): use a pool to allocate these (and reduce garbage)? + // .. or else, use a pipe (like streaming does) to avoid the intermediate buffer? + var body bytes.Buffer + if err := c.codec.NewEncoder(encoding.SinkWriter(&body)).Encode(cr.Marshaler()); err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", c.url, &body) + if err != nil { + return nil, err + } + + helper := HTTPRequestHelper{req} + return helper. + withOptions(c.requestOpts, opt). + withHeaders(c.header). + withHeader("Content-Type", c.codec.Type.ContentType()). + withHeader("Accept", c.codec.Type.ContentType()). + withOptions(accept). + Request, nil +} + +func (c *Client) buildRequestStream(f func() encoding.Marshaler, rc client.ResponseClass, opt ...RequestOpt) (*http.Request, error) { + accept, err := prepareForResponse(rc, c.codec) + if err != nil { + return nil, err + } + + var ( + pr, pw = io.Pipe() + enc = c.codec.NewEncoder(func() framing.Writer { return recordio.NewWriter(pw) }) + ) + req, err := http.NewRequest("POST", c.url, pr) + if err != nil { + pw.Close() // ignore error + return nil, err + } + + go func() { + var closeOnce sync.Once + defer closeOnce.Do(func() { + pw.Close() + }) + for { + m := f() + if m == nil { + // no more messages to send; end of the stream + break + } + err := enc.Encode(m) + if err != nil { + closeOnce.Do(func() { + pw.CloseWithError(err) + }) + break + } + } + }() + + helper := HTTPRequestHelper{req} + return helper. + withOptions(c.requestOpts, opt). + withHeaders(c.header). + withHeader("Content-Type", mediaTypeRecordIO.ContentType()). + withHeader("Message-Content-Type", c.codec.Type.ContentType()). + withOptions(accept). + Request, nil +} + +func validateSuccessfulResponse(codec encoding.Codec, res *http.Response, rc client.ResponseClass) error { + switch res.StatusCode { + case http.StatusOK: + ct := res.Header.Get("Content-Type") + switch rc { + case client.ResponseClassNoData: + if ct != "" { + return ProtocolError(fmt.Sprintf("unexpected content type: %q", ct)) + } + case client.ResponseClassSingleton, client.ResponseClassAuto: + if ct != codec.Type.ContentType() { + return ProtocolError(fmt.Sprintf("unexpected content type: %q", ct)) + } + case client.ResponseClassStreaming: + if ct != mediaTypeRecordIO.ContentType() { + return ProtocolError(fmt.Sprintf("unexpected content type: %q", ct)) + } + ct = res.Header.Get("Message-Content-Type") + if ct != codec.Type.ContentType() { + return ProtocolError(fmt.Sprintf("unexpected message content type: %q", ct)) + } + default: + return ProtocolError(fmt.Sprintf("unsupported response-class: %q", rc)) + } + + case http.StatusAccepted: + // nothing to validate, we're not expecting any response entity in this case. + // TODO(jdef) perhaps check Content-Length == 0 here? + } + return nil +} + +func newSourceFactory(rc client.ResponseClass) encoding.SourceFactoryFunc { + switch rc { + case client.ResponseClassNoData: + return nil + case client.ResponseClassSingleton: + return encoding.SourceReader + case client.ResponseClassStreaming, client.ResponseClassAuto: + return recordIOSourceFactory + default: + panic(fmt.Sprintf("unsupported response-class: %q", rc)) + } +} + +func recordIOSourceFactory(r io.Reader) encoding.Source { + return func() framing.Reader { return recordio.NewReader(r) } +} + +// HandleResponse parses an HTTP response from a Mesos service endpoint, transforming the +// raw HTTP response into a mesos.Response. +func (c *Client) HandleResponse(res *http.Response, rc client.ResponseClass, err error) (mesos.Response, error) { + if err != nil { + if res != nil && res.Body != nil { + res.Body.Close() + } + return nil, err + } + + result := &Response{ + Closer: res.Body, + Header: res.Header, + } + if err = c.errorMapper(res); err != nil { + return result, err + } + + err = validateSuccessfulResponse(c.codec, res, rc) + if err != nil { + res.Body.Close() + return nil, err + } + + switch res.StatusCode { + case http.StatusOK: + debug.Log("request OK, decoding response") + + sf := newSourceFactory(rc) + if sf == nil { + if rc != client.ResponseClassNoData { + panic("nil Source for response that expected data") + } + // we don't expect any data. drain the response body and close it (compliant with golang's expectations + // for http/1.1 keepalive support. + defer res.Body.Close() + _, err = io.Copy(ioutil.Discard, res.Body) + return nil, err + } + + result.Decoder = c.codec.NewDecoder(sf.NewSource(res.Body)) + + case http.StatusAccepted: + debug.Log("request Accepted") + + // noop; no decoder for these types of calls + defer res.Body.Close() + _, err = io.Copy(ioutil.Discard, res.Body) + return nil, err + + default: + debug.Log("unexpected HTTP status", res.StatusCode) + + defer res.Body.Close() + io.Copy(ioutil.Discard, res.Body) // intentionally discard any error here + return nil, ProtocolError(fmt.Sprintf("unexpected mesos HTTP response code: %d", res.StatusCode)) + } + + return result, nil +} + +// Do is deprecated in favor of Send. +func (c *Client) Do(m encoding.Marshaler, opt ...RequestOpt) (res mesos.Response, err error) { + return c.Send(client.RequestSingleton(m), client.ResponseClassAuto, opt...) +} + +// Send sends a Call and returns (a) a Response (should be closed when finished) that +// contains a either a streaming or non-streaming Decoder from which callers can read +// objects from, and; (b) an error in case of failure. Callers are expected to *always* +// close a non-nil Response if one is returned. For operations which are successful but +// also for which there are no expected result objects the embedded Decoder will be nil. +// The provided ResponseClass determines whether the client implementation will attempt +// to decode a result as a single obeject or as an object stream. When working with +// versions of Mesos prior to v1.2.x callers MUST use ResponseClassAuto. +func (c *Client) Send(cr client.Request, rc client.ResponseClass, opt ...RequestOpt) (res mesos.Response, err error) { + var ( + hreq *http.Request + hres *http.Response + ) + hreq, err = c.buildRequestFunc(cr, rc, opt...) + if err == nil { + hres, err = c.do(hreq) + res, err = c.handleResponse(hres, rc, err) + } + return +} + +// ErrorMapper returns am Opt that overrides the existing error mapping behavior of the client. +func ErrorMapper(em ErrorMapperFunc) Opt { + return func(c *Client) Opt { + old := c.errorMapper + c.errorMapper = em + return ErrorMapper(old) + } +} + +// Endpoint returns an Opt that sets a Client's URL. +func Endpoint(rawurl string) Opt { + return func(c *Client) Opt { + old := c.url + c.url = rawurl + return Endpoint(old) + } +} + +// WrapDoer returns an Opt that decorates a Client's DoFunc +func WrapDoer(f func(DoFunc) DoFunc) Opt { + return func(c *Client) Opt { + old := c.do + c.do = f(c.do) + return Do(old) + } +} + +// Do returns an Opt that sets a Client's DoFunc +func Do(do DoFunc) Opt { + return func(c *Client) Opt { + old := c.do + c.do = do + return Do(old) + } +} + +// Codec returns an Opt that sets a Client's Codec. +func Codec(codec encoding.Codec) Opt { + return func(c *Client) Opt { + old := c.codec + c.codec = codec + return Codec(old) + } +} + +// DefaultHeader returns an Opt that adds a header to an Client's headers. +func DefaultHeader(k, v string) Opt { + return func(c *Client) Opt { + old, found := c.header[k] + old = append([]string{}, old...) // clone + c.header.Add(k, v) + return func(c *Client) Opt { + if found { + c.header[k] = old + } else { + c.header.Del(k) + } + return DefaultHeader(k, v) + } + } +} + +// HandleResponse returns a functional config option to set the HTTP response handler of the client. +func HandleResponse(f ResponseHandler) Opt { + return func(c *Client) Opt { + old := c.handleResponse + c.handleResponse = f + return HandleResponse(old) + } +} + +// RequestOptions returns an Opt that applies the given set of options to every Client request. +func RequestOptions(opts ...RequestOpt) Opt { + if len(opts) == 0 { + return nil + } + return func(c *Client) Opt { + old := append([]RequestOpt{}, c.requestOpts...) + c.requestOpts = opts + return RequestOptions(old...) + } +} + +// Header returns an RequestOpt that adds a header value to an HTTP requests's header. +func Header(k, v string) RequestOpt { return func(r *http.Request) { r.Header.Add(k, v) } } + +// Close returns a RequestOpt that determines whether to close the underlying connection after sending the request. +func Close(b bool) RequestOpt { return func(r *http.Request) { r.Close = b } } + +// Context returns a RequestOpt that sets the request's Context (ctx must be non-nil) +func Context(ctx context.Context) RequestOpt { + return func(r *http.Request) { + r2 := r.WithContext(ctx) + *r = *r2 + } +} + +type Config struct { + client *http.Client + dialer *net.Dialer + transport *http.Transport +} + +type ConfigOpt func(*Config) + +// With returns a DoFunc that executes HTTP round-trips. +// The default implementation provides reasonable defaults for timeouts: +// keep-alive, connection, request/response read/write, and TLS handshake. +// Callers can customize configuration by specifying one or more ConfigOpt's. +func With(opt ...ConfigOpt) DoFunc { + var ( + dialer = &net.Dialer{ + LocalAddr: &net.TCPAddr{IP: net.IPv4zero}, + KeepAlive: 30 * time.Second, + Timeout: 5 * time.Second, + } + transport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: dialer.Dial, + ResponseHeaderTimeout: 5 * time.Second, + TLSClientConfig: &tls.Config{InsecureSkipVerify: false}, + TLSHandshakeTimeout: 5 * time.Second, + } + config = &Config{ + dialer: dialer, + transport: transport, + client: &http.Client{ + Transport: transport, + CheckRedirect: noRedirect, // so we can actually see the 307 redirects + }, + } + ) + for _, o := range opt { + if o != nil { + o(config) + } + } + return config.client.Do +} + +// Timeout returns an ConfigOpt that sets a Config's response header timeout, tls handshake timeout, +// and dialer timeout. +func Timeout(d time.Duration) ConfigOpt { + return func(c *Config) { + c.transport.ResponseHeaderTimeout = d + c.transport.TLSHandshakeTimeout = d + c.dialer.Timeout = d + } +} + +// RoundTripper returns a ConfigOpt that sets a Config's round-tripper. +func RoundTripper(rt http.RoundTripper) ConfigOpt { + return func(c *Config) { + c.client.Transport = rt + } +} + +// TLSConfig returns a ConfigOpt that sets a Config's TLS configuration. +func TLSConfig(tc *tls.Config) ConfigOpt { + return func(c *Config) { + c.transport.TLSClientConfig = tc + } +} + +// Transport returns a ConfigOpt that allows tweaks of the default Config's http.Transport +func Transport(modifyTransport func(*http.Transport)) ConfigOpt { + return func(c *Config) { + if modifyTransport != nil { + modifyTransport(c.transport) + } + } +} + +// WrapRoundTripper allows a caller to customize a configuration's HTTP exchanger. Useful +// for authentication protocols that operate over stock HTTP. +func WrapRoundTripper(f func(http.RoundTripper) http.RoundTripper) ConfigOpt { + return func(c *Config) { + if f != nil { + if rt := f(c.client.Transport); rt != nil { + c.client.Transport = rt + } + } + } +} + +// HTTPRequestHelper wraps an http.Request and provides utility funcs to simplify code elsewhere +type HTTPRequestHelper struct { + *http.Request +} + +func (r *HTTPRequestHelper) withOptions(optsets ...RequestOpts) *HTTPRequestHelper { + for _, opts := range optsets { + opts.Apply(r.Request) + } + return r +} + +func (r *HTTPRequestHelper) withHeaders(hh http.Header) *HTTPRequestHelper { + for k, v := range hh { + r.Header[k] = v + debug.Log("request header " + k + ": " + v[0]) + } + return r +} + +func (r *HTTPRequestHelper) withHeader(key, value string) *HTTPRequestHelper { + r.Header.Set(key, value) + return r +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli/opts.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli/opts.go new file mode 100644 index 000000000000..c0f3ef2b180d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/httpcli/opts.go @@ -0,0 +1,53 @@ +package httpcli + +type ( + // Opt defines a functional option for the HTTP client type. A functional option + // must return an Opt that acts as an "undo" if applied to the same Client. + Opt func(*Client) Opt + // Opts represents a series of functional options + Opts []Opt +) + +// Apply is a nil-safe application of an Opt: if the receiver is nil then this func +// simply returns nil, otherwise it returns the result invoking the receiving Opt +// with the given Client. +func (o Opt) Apply(c *Client) (result Opt) { + if o != nil { + result = o(c) + } + return +} + +// Merged generates a single Opt that applies all the functional options, in-order +func (opts Opts) Merged() Opt { + if len(opts) == 0 { + return nil + } + return func(c *Client) Opt { + var ( + size = len(opts) + undo = make(Opts, size) + ) + size-- // make this a zero-based offset + for i, opt := range opts { + if opt != nil { + undo[size-i] = opt(c) + } + } + return undo.Merged() + } +} + +// And combines two functional options into a single Opt +func (o Opt) And(other Opt) Opt { + if o == nil { + if other == nil { + return nil + } + return other + } + if other == nil { + return o + } + return Opts{o, other}.Merged() +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/labels.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/labels.go new file mode 100644 index 000000000000..ecf50553640b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/labels.go @@ -0,0 +1,95 @@ +package mesos + +import ( + "bytes" + "io" +) + +type labelList []Label // convenience type, for working with unwrapped Label slices + +// Equivalent returns true if left and right have the same labels. Order is not important. +func (left *Labels) Equivalent(right *Labels) bool { + return labelList(left.GetLabels()).Equivalent(labelList(right.GetLabels())) +} + +// Equivalent returns true if left and right have the same labels. Order is not important. +func (left labelList) Equivalent(right labelList) bool { + if len(left) != len(right) { + return false + } else { + for i := range left { + found := false + for j := range right { + if left[i].Equivalent(right[j]) { + found = true + break + } + } + if !found { + return false + } + } + return true + } +} + +// Equivalent returns true if left and right represent the same Label. +func (left Label) Equivalent(right Label) bool { + if left.Key != right.Key { + return false + } + if left.Value == nil { + return right.Value == nil + } else { + return right.Value != nil && *left.Value == *right.Value + } +} + +func (left Label) writeTo(w io.Writer) (n int64, err error) { + write := func(s string) { + if err != nil { + return + } + var n2 int + n2, err = io.WriteString(w, s) + n += int64(n2) + } + write(left.Key) + if s := left.GetValue(); s != "" { + write("=") + write(s) + } + return +} + +func (left *Labels) writeTo(w io.Writer) (n int64, err error) { + var ( + lab = left.GetLabels() + n2 int + n3 int64 + ) + for i := range lab { + if i > 0 { + n2, err = io.WriteString(w, ",") + n += int64(n2) + if err != nil { + break + } + } + n3, err = lab[i].writeTo(w) + n += n3 + if err != nil { + break + } + } + return +} + +func (left *Labels) Format() string { + if left == nil { + return "" + } + var b bytes.Buffer + left.writeTo(&b) + return b.String() +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/mesos.pb.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/mesos.pb.go new file mode 100644 index 000000000000..45ff81991835 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/mesos.pb.go @@ -0,0 +1,72498 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: mesos.proto + +/* + Package mesos is a generated protocol buffer package. + + It is generated from these files: + mesos.proto + + It has these top-level messages: + FrameworkID + OfferID + AgentID + TaskID + ExecutorID + ContainerID + ResourceProviderID + OperationID + TimeInfo + DurationInfo + Address + URL + Unavailability + MachineID + MachineInfo + FrameworkInfo + CheckInfo + HealthCheck + KillPolicy + CommandInfo + ExecutorInfo + DomainInfo + MasterInfo + AgentInfo + CSIPluginContainerInfo + CSIPluginInfo + ResourceProviderInfo + Value + Attribute + Resource + TrafficControlStatistics + IpStatistics + IcmpStatistics + TcpStatistics + UdpStatistics + SNMPStatistics + DiskStatistics + ResourceStatistics + ResourceUsage + PerfStatistics + Request + Offer + InverseOffer + TaskInfo + TaskGroupInfo + Task + TaskResourceLimitation + UUID + Operation + OperationStatus + CheckStatusInfo + TaskStatus + Filters + Environment + Parameter + Parameters + Credential + Credentials + Secret + RateLimit + RateLimits + Image + MountPropagation + Volume + NetworkInfo + CapabilityInfo + LinuxInfo + RLimitInfo + TTYInfo + ContainerInfo + ContainerStatus + CgroupInfo + Labels + Label + Port + Ports + DiscoveryInfo + WeightInfo + VersionInfo + Flag + Role + Metric + FileInfo + Device + DeviceAccess + DeviceWhitelist +*/ +package mesos + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import strconv "strconv" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" + +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// * +// Status is used to indicate the state of the scheduler and executor +// driver after function calls. +type Status int32 + +const ( + DRIVER_NOT_STARTED Status = 1 + DRIVER_RUNNING Status = 2 + DRIVER_ABORTED Status = 3 + DRIVER_STOPPED Status = 4 +) + +var Status_name = map[int32]string{ + 1: "DRIVER_NOT_STARTED", + 2: "DRIVER_RUNNING", + 3: "DRIVER_ABORTED", + 4: "DRIVER_STOPPED", +} +var Status_value = map[string]int32{ + "DRIVER_NOT_STARTED": 1, + "DRIVER_RUNNING": 2, + "DRIVER_ABORTED": 3, + "DRIVER_STOPPED": 4, +} + +func (x Status) Enum() *Status { + p := new(Status) + *p = x + return p +} +func (x Status) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(Status_name, int32(x)) +} +func (x *Status) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Status_value, data, "Status") + if err != nil { + return err + } + *x = Status(value) + return nil +} +func (Status) EnumDescriptor() ([]byte, []int) { return fileDescriptorMesos, []int{0} } + +// * +// Describes possible task states. IMPORTANT: Mesos assumes tasks that +// enter terminal states (see below) imply the task is no longer +// running and thus clean up any thing associated with the task +// (ultimately offering any resources being consumed by that task to +// another task). +type TaskState int32 + +const ( + TASK_STAGING TaskState = 6 + TASK_STARTING TaskState = 0 + TASK_RUNNING TaskState = 1 + // NOTE: This should only be sent when the framework has + // the TASK_KILLING_STATE capability. + TASK_KILLING TaskState = 8 + // The task finished successfully on its own without external interference. + TASK_FINISHED TaskState = 2 + TASK_FAILED TaskState = 3 + TASK_KILLED TaskState = 4 + TASK_ERROR TaskState = 7 + // In Mesos 1.3, this will only be sent when the framework does NOT + // opt-in to the PARTITION_AWARE capability. + // + // NOTE: This state is not always terminal. For example, tasks might + // transition from TASK_LOST to TASK_RUNNING or other states when a + // partitioned agent reregisters. + TASK_LOST TaskState = 5 + // The task failed to launch because of a transient error. The + // task's executor never started running. Unlike TASK_ERROR, the + // task description is valid -- attempting to launch the task again + // may be successful. + TASK_DROPPED TaskState = 9 + // The task was running on an agent that has lost contact with the + // master, typically due to a network failure or partition. The task + // may or may not still be running. + TASK_UNREACHABLE TaskState = 10 + // The task is no longer running. This can occur if the agent has + // been terminated along with all of its tasks (e.g., the host that + // was running the agent was rebooted). It might also occur if the + // task was terminated due to an agent or containerizer error, or if + // the task was preempted by the QoS controller in an + // oversubscription scenario. + TASK_GONE TaskState = 11 + // The task was running on an agent that the master cannot contact; + // the operator has asserted that the agent has been shutdown, but + // this has not been directly confirmed by the master. If the + // operator is correct, the task is not running and this is a + // terminal state; if the operator is mistaken, the task may still + // be running and might return to RUNNING in the future. + TASK_GONE_BY_OPERATOR TaskState = 12 + // The master has no knowledge of the task. This is typically + // because either (a) the master never had knowledge of the task, or + // (b) the master forgot about the task because it garbage collected + // its metadata about the task. The task may or may not still be + // running. + TASK_UNKNOWN TaskState = 13 +) + +var TaskState_name = map[int32]string{ + 6: "TASK_STAGING", + 0: "TASK_STARTING", + 1: "TASK_RUNNING", + 8: "TASK_KILLING", + 2: "TASK_FINISHED", + 3: "TASK_FAILED", + 4: "TASK_KILLED", + 7: "TASK_ERROR", + 5: "TASK_LOST", + 9: "TASK_DROPPED", + 10: "TASK_UNREACHABLE", + 11: "TASK_GONE", + 12: "TASK_GONE_BY_OPERATOR", + 13: "TASK_UNKNOWN", +} +var TaskState_value = map[string]int32{ + "TASK_STAGING": 6, + "TASK_STARTING": 0, + "TASK_RUNNING": 1, + "TASK_KILLING": 8, + "TASK_FINISHED": 2, + "TASK_FAILED": 3, + "TASK_KILLED": 4, + "TASK_ERROR": 7, + "TASK_LOST": 5, + "TASK_DROPPED": 9, + "TASK_UNREACHABLE": 10, + "TASK_GONE": 11, + "TASK_GONE_BY_OPERATOR": 12, + "TASK_UNKNOWN": 13, +} + +func (x TaskState) Enum() *TaskState { + p := new(TaskState) + *p = x + return p +} +func (x TaskState) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(TaskState_name, int32(x)) +} +func (x *TaskState) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(TaskState_value, data, "TaskState") + if err != nil { + return err + } + *x = TaskState(value) + return nil +} +func (TaskState) EnumDescriptor() ([]byte, []int) { return fileDescriptorMesos, []int{1} } + +// * +// Describes possible operation states. +type OperationState int32 + +const ( + // Default value if the enum is not set. See MESOS-4997. + OPERATION_UNSUPPORTED OperationState = 0 + // Initial state. + OPERATION_PENDING OperationState = 1 + // TERMINAL: The operation was successfully applied. + OPERATION_FINISHED OperationState = 2 + // TERMINAL: The operation failed to apply. + OPERATION_FAILED OperationState = 3 + // TERMINAL: The operation description contains an error. + OPERATION_ERROR OperationState = 4 + // TERMINAL: The operation was dropped due to a transient error. + OPERATION_DROPPED OperationState = 5 + // The operation affects an agent that has lost contact with the master, + // typically due to a network failure or partition. The operation may or may + // not still be pending. + OPERATION_UNREACHABLE OperationState = 6 + // The operation affected an agent that the master cannot contact; + // the operator has asserted that the agent has been shutdown, but this has + // not been directly confirmed by the master. + // + // If the operator is correct, the operation is not pending and this is a + // terminal state; if the operator is mistaken, the operation may still be + // pending and might return to a different state in the future. + OPERATION_GONE_BY_OPERATOR OperationState = 7 + // The operation affects an agent that the master recovered from its + // state, but that agent has not yet re-registered. + // + // The operation can transition to `OPERATION_UNREACHABLE` if the + // corresponding agent is marked as unreachable, and will transition to + // another status if the agent re-registers. + OPERATION_RECOVERING OperationState = 8 + // The master has no knowledge of the operation. This is typically + // because either (a) the master never had knowledge of the operation, or + // (b) the master forgot about the operation because it garbage collected + // its metadata about the operation. The operation may or may not still be + // pending. + OPERATION_UNKNOWN OperationState = 9 +) + +var OperationState_name = map[int32]string{ + 0: "OPERATION_UNSUPPORTED", + 1: "OPERATION_PENDING", + 2: "OPERATION_FINISHED", + 3: "OPERATION_FAILED", + 4: "OPERATION_ERROR", + 5: "OPERATION_DROPPED", + 6: "OPERATION_UNREACHABLE", + 7: "OPERATION_GONE_BY_OPERATOR", + 8: "OPERATION_RECOVERING", + 9: "OPERATION_UNKNOWN", +} +var OperationState_value = map[string]int32{ + "OPERATION_UNSUPPORTED": 0, + "OPERATION_PENDING": 1, + "OPERATION_FINISHED": 2, + "OPERATION_FAILED": 3, + "OPERATION_ERROR": 4, + "OPERATION_DROPPED": 5, + "OPERATION_UNREACHABLE": 6, + "OPERATION_GONE_BY_OPERATOR": 7, + "OPERATION_RECOVERING": 8, + "OPERATION_UNKNOWN": 9, +} + +func (x OperationState) Enum() *OperationState { + p := new(OperationState) + *p = x + return p +} +func (x OperationState) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(OperationState_name, int32(x)) +} +func (x *OperationState) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(OperationState_value, data, "OperationState") + if err != nil { + return err + } + *x = OperationState(value) + return nil +} +func (OperationState) EnumDescriptor() ([]byte, []int) { return fileDescriptorMesos, []int{2} } + +// Describes the several states that a machine can be in. A `Mode` +// applies to a machine and to all associated agents on the machine. +type MachineInfo_Mode int32 + +const ( + // In this mode, a machine is behaving normally; + // offering resources, executing tasks, etc. + UP MachineInfo_Mode = 1 + // In this mode, all agents on the machine are expected to cooperate with + // frameworks to drain resources. In general, draining is done ahead of + // a pending `unavailability`. The resources should be drained so as to + // maximize utilization prior to the maintenance but without knowingly + // violating the frameworks' requirements. + DRAINING MachineInfo_Mode = 2 + // In this mode, a machine is not running any tasks and will not offer + // any of its resources. Agents on the machine will not be allowed to + // register with the master. + DOWN MachineInfo_Mode = 3 +) + +var MachineInfo_Mode_name = map[int32]string{ + 1: "UP", + 2: "DRAINING", + 3: "DOWN", +} +var MachineInfo_Mode_value = map[string]int32{ + "UP": 1, + "DRAINING": 2, + "DOWN": 3, +} + +func (x MachineInfo_Mode) Enum() *MachineInfo_Mode { + p := new(MachineInfo_Mode) + *p = x + return p +} +func (x MachineInfo_Mode) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(MachineInfo_Mode_name, int32(x)) +} +func (x *MachineInfo_Mode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MachineInfo_Mode_value, data, "MachineInfo_Mode") + if err != nil { + return err + } + *x = MachineInfo_Mode(value) + return nil +} +func (MachineInfo_Mode) EnumDescriptor() ([]byte, []int) { return fileDescriptorMesos, []int{14, 0} } + +type FrameworkInfo_Capability_Type int32 + +const ( + // This must be the first enum value in this list, to + // ensure that if 'type' is not set, the default value + // is UNKNOWN. This enables enum values to be added + // in a backwards-compatible way. See: MESOS-4997. + FrameworkInfo_Capability_UNKNOWN FrameworkInfo_Capability_Type = 0 + // Receive offers with revocable resources. See 'Resource' + // message for details. + FrameworkInfo_Capability_REVOCABLE_RESOURCES FrameworkInfo_Capability_Type = 1 + // Receive the TASK_KILLING TaskState when a task is being + // killed by an executor. The executor will examine this + // capability to determine whether it can send TASK_KILLING. + FrameworkInfo_Capability_TASK_KILLING_STATE FrameworkInfo_Capability_Type = 2 + // Indicates whether the framework is aware of GPU resources. + // Frameworks that are aware of GPU resources are expected to + // avoid placing non-GPU workloads on GPU agents, in order + // to avoid occupying a GPU agent and preventing GPU workloads + // from running! Currently, if a framework is unaware of GPU + // resources, it will not be offered *any* of the resources on + // an agent with GPUs. This restriction is in place because we + // do not have a revocation mechanism that ensures GPU workloads + // can evict GPU agent occupants if necessary. + // + // TODO(bmahler): As we add revocation we can relax the + // restriction here. See MESOS-5634 for more information. + FrameworkInfo_Capability_GPU_RESOURCES FrameworkInfo_Capability_Type = 3 + // Receive offers with resources that are shared. + FrameworkInfo_Capability_SHARED_RESOURCES FrameworkInfo_Capability_Type = 4 + // Indicates that (1) the framework is prepared to handle the + // following TaskStates: TASK_UNREACHABLE, TASK_DROPPED, + // TASK_GONE, TASK_GONE_BY_OPERATOR, and TASK_UNKNOWN, and (2) + // the framework will assume responsibility for managing + // partitioned tasks that reregister with the master. + // + // Frameworks that enable this capability can define how they + // would like to handle partitioned tasks. Frameworks will + // receive TASK_UNREACHABLE for tasks on agents that are + // partitioned from the master. + // + // Without this capability, frameworks will receive TASK_LOST + // for tasks on partitioned agents. + // NOTE: Prior to Mesos 1.5, such tasks will be killed by Mesos + // when the agent reregisters (unless the master has failed over). + // However due to the lack of benefit in maintaining different + // behaviors depending on whether the master has failed over + // (see MESOS-7215), as of 1.5, Mesos will not kill these + // tasks in either case. + FrameworkInfo_Capability_PARTITION_AWARE FrameworkInfo_Capability_Type = 5 + // This expresses the ability for the framework to be + // "multi-tenant" via using the newly introduced `roles` + // field, and examining `Offer.allocation_info` to determine + // which role the offers are being made to. We also + // expect that "single-tenant" schedulers eventually + // provide this and move away from the deprecated + // `role` field. + FrameworkInfo_Capability_MULTI_ROLE FrameworkInfo_Capability_Type = 6 + // This capability has two effects for a framework. + // + // (1) The framework is offered resources in a new format. + // + // The offered resources have the `Resource.reservations` field set + // rather than `Resource.role` and `Resource.reservation`. In short, + // an empty `reservations` field denotes unreserved resources, and + // each `ReservationInfo` in the `reservations` field denotes a + // reservation that refines the previous one. + // + // See the 'Resource Format' section for more details. + // + // (2) The framework can create refined reservations. + // + // A framework can refine an existing reservation via the + // `Resource.reservations` field. For example, a reservation for role + // `eng` can be refined to `eng/front_end`. + // + // See `ReservationInfo.reservations` for more details. + // + // NOTE: Without this capability, a framework is not offered resources + // that have refined reservations. A resource is said to have refined + // reservations if it uses the `Resource.reservations` field, and + // `Resource.reservations_size() > 1`. + FrameworkInfo_Capability_RESERVATION_REFINEMENT FrameworkInfo_Capability_Type = 7 + // Indicates that the framework is prepared to receive offers + // for agents whose region is different from the master's + // region. Network links between hosts in different regions + // typically have higher latency and lower bandwidth than + // network links within a region, so frameworks should be + // careful to only place suitable workloads in remote regions. + // Frameworks that are not region-aware will never receive + // offers for remote agents; region-aware frameworks are assumed + // to implement their own logic to decide which workloads (if + // any) are suitable for placement on remote agents. + FrameworkInfo_Capability_REGION_AWARE FrameworkInfo_Capability_Type = 8 +) + +var FrameworkInfo_Capability_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "REVOCABLE_RESOURCES", + 2: "TASK_KILLING_STATE", + 3: "GPU_RESOURCES", + 4: "SHARED_RESOURCES", + 5: "PARTITION_AWARE", + 6: "MULTI_ROLE", + 7: "RESERVATION_REFINEMENT", + 8: "REGION_AWARE", +} +var FrameworkInfo_Capability_Type_value = map[string]int32{ + "UNKNOWN": 0, + "REVOCABLE_RESOURCES": 1, + "TASK_KILLING_STATE": 2, + "GPU_RESOURCES": 3, + "SHARED_RESOURCES": 4, + "PARTITION_AWARE": 5, + "MULTI_ROLE": 6, + "RESERVATION_REFINEMENT": 7, + "REGION_AWARE": 8, +} + +func (x FrameworkInfo_Capability_Type) Enum() *FrameworkInfo_Capability_Type { + p := new(FrameworkInfo_Capability_Type) + *p = x + return p +} +func (x FrameworkInfo_Capability_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(FrameworkInfo_Capability_Type_name, int32(x)) +} +func (x *FrameworkInfo_Capability_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FrameworkInfo_Capability_Type_value, data, "FrameworkInfo_Capability_Type") + if err != nil { + return err + } + *x = FrameworkInfo_Capability_Type(value) + return nil +} +func (FrameworkInfo_Capability_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{15, 0, 0} +} + +type CheckInfo_Type int32 + +const ( + CheckInfo_UNKNOWN CheckInfo_Type = 0 + CheckInfo_COMMAND CheckInfo_Type = 1 + CheckInfo_HTTP CheckInfo_Type = 2 + CheckInfo_TCP CheckInfo_Type = 3 +) + +var CheckInfo_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "COMMAND", + 2: "HTTP", + 3: "TCP", +} +var CheckInfo_Type_value = map[string]int32{ + "UNKNOWN": 0, + "COMMAND": 1, + "HTTP": 2, + "TCP": 3, +} + +func (x CheckInfo_Type) Enum() *CheckInfo_Type { + p := new(CheckInfo_Type) + *p = x + return p +} +func (x CheckInfo_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(CheckInfo_Type_name, int32(x)) +} +func (x *CheckInfo_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CheckInfo_Type_value, data, "CheckInfo_Type") + if err != nil { + return err + } + *x = CheckInfo_Type(value) + return nil +} +func (CheckInfo_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorMesos, []int{16, 0} } + +type HealthCheck_Type int32 + +const ( + HealthCheck_UNKNOWN HealthCheck_Type = 0 + HealthCheck_COMMAND HealthCheck_Type = 1 + HealthCheck_HTTP HealthCheck_Type = 2 + HealthCheck_TCP HealthCheck_Type = 3 +) + +var HealthCheck_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "COMMAND", + 2: "HTTP", + 3: "TCP", +} +var HealthCheck_Type_value = map[string]int32{ + "UNKNOWN": 0, + "COMMAND": 1, + "HTTP": 2, + "TCP": 3, +} + +func (x HealthCheck_Type) Enum() *HealthCheck_Type { + p := new(HealthCheck_Type) + *p = x + return p +} +func (x HealthCheck_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(HealthCheck_Type_name, int32(x)) +} +func (x *HealthCheck_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(HealthCheck_Type_value, data, "HealthCheck_Type") + if err != nil { + return err + } + *x = HealthCheck_Type(value) + return nil +} +func (HealthCheck_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorMesos, []int{17, 0} } + +type ExecutorInfo_Type int32 + +const ( + ExecutorInfo_UNKNOWN ExecutorInfo_Type = 0 + // Mesos provides a simple built-in default executor that frameworks can + // leverage to run shell commands and containers. + // + // NOTES: + // + // 1) `command` must not be set when using a default executor. + // + // 2) Default executor only accepts a *single* `LAUNCH` or `LAUNCH_GROUP` + // operation. + // + // 3) If `container` is set, `container.type` must be `MESOS` + // and `container.mesos.image` must not be set. + ExecutorInfo_DEFAULT ExecutorInfo_Type = 1 + // For frameworks that need custom functionality to run tasks, a `CUSTOM` + // executor can be used. Note that `command` must be set when using a + // `CUSTOM` executor. + ExecutorInfo_CUSTOM ExecutorInfo_Type = 2 +) + +var ExecutorInfo_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "DEFAULT", + 2: "CUSTOM", +} +var ExecutorInfo_Type_value = map[string]int32{ + "UNKNOWN": 0, + "DEFAULT": 1, + "CUSTOM": 2, +} + +func (x ExecutorInfo_Type) Enum() *ExecutorInfo_Type { + p := new(ExecutorInfo_Type) + *p = x + return p +} +func (x ExecutorInfo_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(ExecutorInfo_Type_name, int32(x)) +} +func (x *ExecutorInfo_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ExecutorInfo_Type_value, data, "ExecutorInfo_Type") + if err != nil { + return err + } + *x = ExecutorInfo_Type(value) + return nil +} +func (ExecutorInfo_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorMesos, []int{20, 0} } + +type MasterInfo_Capability_Type int32 + +const ( + MasterInfo_Capability_UNKNOWN MasterInfo_Capability_Type = 0 + // The master can handle slaves whose state + // changes after reregistering. + MasterInfo_Capability_AGENT_UPDATE MasterInfo_Capability_Type = 1 +) + +var MasterInfo_Capability_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "AGENT_UPDATE", +} +var MasterInfo_Capability_Type_value = map[string]int32{ + "UNKNOWN": 0, + "AGENT_UPDATE": 1, +} + +func (x MasterInfo_Capability_Type) Enum() *MasterInfo_Capability_Type { + p := new(MasterInfo_Capability_Type) + *p = x + return p +} +func (x MasterInfo_Capability_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(MasterInfo_Capability_Type_name, int32(x)) +} +func (x *MasterInfo_Capability_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MasterInfo_Capability_Type_value, data, "MasterInfo_Capability_Type") + if err != nil { + return err + } + *x = MasterInfo_Capability_Type(value) + return nil +} +func (MasterInfo_Capability_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{22, 0, 0} +} + +type AgentInfo_Capability_Type int32 + +const ( + // This must be the first enum value in this list, to + // ensure that if 'type' is not set, the default value + // is UNKNOWN. This enables enum values to be added + // in a backwards-compatible way. See: MESOS-4997. + AgentInfo_Capability_UNKNOWN AgentInfo_Capability_Type = 0 + // This expresses the ability for the agent to be able + // to launch tasks of a 'multi-role' framework. + AgentInfo_Capability_MULTI_ROLE AgentInfo_Capability_Type = 1 + // This expresses the ability for the agent to be able to launch + // tasks, reserve resources, and create volumes using resources + // allocated to a 'hierarchical-role'. + // NOTE: This capability is required specifically for creating + // volumes because a hierchical role includes '/' (slashes) in them. + // Agents with this capability know to transform the '/' (slashes) + // into ' ' (spaces). + AgentInfo_Capability_HIERARCHICAL_ROLE AgentInfo_Capability_Type = 2 + // This capability has three effects for an agent. + // + // (1) The format of the checkpointed resources, and + // the resources reported to master. + // + // These resources are reported in the "pre-reservation-refinement" + // format if none of the resources have refined reservations. If any + // of the resources have refined reservations, they are reported in + // the "post-reservation-refinement" format. The purpose is to allow + // downgrading of an agent as well as communication with a pre-1.4.0 + // master until the reservation refinement feature is actually used. + // + // See the 'Resource Format' section for more details. + // + // (2) The format of the resources reported by the HTTP endpoints. + // + // For resources reported by agent endpoints, the + // "pre-reservation-refinement" format is "injected" if possible. + // That is, resources without refined reservations will have the + // `Resource.role` and `Resource.reservation` set, whereas + // resources with refined reservations will not. + // + // See the 'Resource Format' section for more details. + // + // (3) The ability for the agent to launch tasks, reserve resources, and + // create volumes using resources that have refined reservations. + // + // See `ReservationInfo.reservations` section for more details. + // + // NOTE: Resources are said to have refined reservations if it uses the + // `Resource.reservations` field, and `Resource.reservations_size() > 1`. + AgentInfo_Capability_RESERVATION_REFINEMENT AgentInfo_Capability_Type = 3 + // This expresses the ability for the agent to handle resource + // provider related operations. This includes the following: + // + // (1) The ability to report resources that are provided by some + // local resource providers through the resource provider API. + // + // (2) The ability to provide operation feedback. + AgentInfo_Capability_RESOURCE_PROVIDER AgentInfo_Capability_Type = 4 + // This expresses the capability for the agent to handle persistent volume + // resize operations safely. This capability is turned on by default. + AgentInfo_Capability_RESIZE_VOLUME AgentInfo_Capability_Type = 5 +) + +var AgentInfo_Capability_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "MULTI_ROLE", + 2: "HIERARCHICAL_ROLE", + 3: "RESERVATION_REFINEMENT", + 4: "RESOURCE_PROVIDER", + 5: "RESIZE_VOLUME", +} +var AgentInfo_Capability_Type_value = map[string]int32{ + "UNKNOWN": 0, + "MULTI_ROLE": 1, + "HIERARCHICAL_ROLE": 2, + "RESERVATION_REFINEMENT": 3, + "RESOURCE_PROVIDER": 4, + "RESIZE_VOLUME": 5, +} + +func (x AgentInfo_Capability_Type) Enum() *AgentInfo_Capability_Type { + p := new(AgentInfo_Capability_Type) + *p = x + return p +} +func (x AgentInfo_Capability_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(AgentInfo_Capability_Type_name, int32(x)) +} +func (x *AgentInfo_Capability_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(AgentInfo_Capability_Type_value, data, "AgentInfo_Capability_Type") + if err != nil { + return err + } + *x = AgentInfo_Capability_Type(value) + return nil +} +func (AgentInfo_Capability_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{23, 0, 0} +} + +type CSIPluginContainerInfo_Service int32 + +const ( + CSIPluginContainerInfo_UNKNOWN CSIPluginContainerInfo_Service = 0 + CSIPluginContainerInfo_CONTROLLER_SERVICE CSIPluginContainerInfo_Service = 1 + CSIPluginContainerInfo_NODE_SERVICE CSIPluginContainerInfo_Service = 2 +) + +var CSIPluginContainerInfo_Service_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CONTROLLER_SERVICE", + 2: "NODE_SERVICE", +} +var CSIPluginContainerInfo_Service_value = map[string]int32{ + "UNKNOWN": 0, + "CONTROLLER_SERVICE": 1, + "NODE_SERVICE": 2, +} + +func (x CSIPluginContainerInfo_Service) Enum() *CSIPluginContainerInfo_Service { + p := new(CSIPluginContainerInfo_Service) + *p = x + return p +} +func (x CSIPluginContainerInfo_Service) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(CSIPluginContainerInfo_Service_name, int32(x)) +} +func (x *CSIPluginContainerInfo_Service) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CSIPluginContainerInfo_Service_value, data, "CSIPluginContainerInfo_Service") + if err != nil { + return err + } + *x = CSIPluginContainerInfo_Service(value) + return nil +} +func (CSIPluginContainerInfo_Service) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{24, 0} +} + +type Value_Type int32 + +const ( + SCALAR Value_Type = 0 + RANGES Value_Type = 1 + SET Value_Type = 2 + TEXT Value_Type = 3 +) + +var Value_Type_name = map[int32]string{ + 0: "SCALAR", + 1: "RANGES", + 2: "SET", + 3: "TEXT", +} +var Value_Type_value = map[string]int32{ + "SCALAR": 0, + "RANGES": 1, + "SET": 2, + "TEXT": 3, +} + +func (x Value_Type) Enum() *Value_Type { + p := new(Value_Type) + *p = x + return p +} +func (x Value_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(Value_Type_name, int32(x)) +} +func (x *Value_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Value_Type_value, data, "Value_Type") + if err != nil { + return err + } + *x = Value_Type(value) + return nil +} +func (Value_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorMesos, []int{27, 0} } + +type Resource_ReservationInfo_Type int32 + +const ( + Resource_ReservationInfo_UNKNOWN Resource_ReservationInfo_Type = 0 + Resource_ReservationInfo_STATIC Resource_ReservationInfo_Type = 1 + Resource_ReservationInfo_DYNAMIC Resource_ReservationInfo_Type = 2 +) + +var Resource_ReservationInfo_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STATIC", + 2: "DYNAMIC", +} +var Resource_ReservationInfo_Type_value = map[string]int32{ + "UNKNOWN": 0, + "STATIC": 1, + "DYNAMIC": 2, +} + +func (x Resource_ReservationInfo_Type) Enum() *Resource_ReservationInfo_Type { + p := new(Resource_ReservationInfo_Type) + *p = x + return p +} +func (x Resource_ReservationInfo_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(Resource_ReservationInfo_Type_name, int32(x)) +} +func (x *Resource_ReservationInfo_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Resource_ReservationInfo_Type_value, data, "Resource_ReservationInfo_Type") + if err != nil { + return err + } + *x = Resource_ReservationInfo_Type(value) + return nil +} +func (Resource_ReservationInfo_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{29, 1, 0} +} + +type Resource_DiskInfo_Source_Type int32 + +const ( + Resource_DiskInfo_Source_UNKNOWN Resource_DiskInfo_Source_Type = 0 + Resource_DiskInfo_Source_PATH Resource_DiskInfo_Source_Type = 1 + Resource_DiskInfo_Source_MOUNT Resource_DiskInfo_Source_Type = 2 + Resource_DiskInfo_Source_BLOCK Resource_DiskInfo_Source_Type = 3 + Resource_DiskInfo_Source_RAW Resource_DiskInfo_Source_Type = 4 +) + +var Resource_DiskInfo_Source_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PATH", + 2: "MOUNT", + 3: "BLOCK", + 4: "RAW", +} +var Resource_DiskInfo_Source_Type_value = map[string]int32{ + "UNKNOWN": 0, + "PATH": 1, + "MOUNT": 2, + "BLOCK": 3, + "RAW": 4, +} + +func (x Resource_DiskInfo_Source_Type) Enum() *Resource_DiskInfo_Source_Type { + p := new(Resource_DiskInfo_Source_Type) + *p = x + return p +} +func (x Resource_DiskInfo_Source_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(Resource_DiskInfo_Source_Type_name, int32(x)) +} +func (x *Resource_DiskInfo_Source_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Resource_DiskInfo_Source_Type_value, data, "Resource_DiskInfo_Source_Type") + if err != nil { + return err + } + *x = Resource_DiskInfo_Source_Type(value) + return nil +} +func (Resource_DiskInfo_Source_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{29, 2, 1, 0} +} + +type Offer_Operation_Type int32 + +const ( + Offer_Operation_UNKNOWN Offer_Operation_Type = 0 + Offer_Operation_LAUNCH Offer_Operation_Type = 1 + Offer_Operation_LAUNCH_GROUP Offer_Operation_Type = 6 + Offer_Operation_RESERVE Offer_Operation_Type = 2 + Offer_Operation_UNRESERVE Offer_Operation_Type = 3 + Offer_Operation_CREATE Offer_Operation_Type = 4 + Offer_Operation_DESTROY Offer_Operation_Type = 5 + Offer_Operation_GROW_VOLUME Offer_Operation_Type = 11 + Offer_Operation_SHRINK_VOLUME Offer_Operation_Type = 12 + Offer_Operation_CREATE_DISK Offer_Operation_Type = 13 + Offer_Operation_DESTROY_DISK Offer_Operation_Type = 14 +) + +var Offer_Operation_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "LAUNCH", + 6: "LAUNCH_GROUP", + 2: "RESERVE", + 3: "UNRESERVE", + 4: "CREATE", + 5: "DESTROY", + 11: "GROW_VOLUME", + 12: "SHRINK_VOLUME", + 13: "CREATE_DISK", + 14: "DESTROY_DISK", +} +var Offer_Operation_Type_value = map[string]int32{ + "UNKNOWN": 0, + "LAUNCH": 1, + "LAUNCH_GROUP": 6, + "RESERVE": 2, + "UNRESERVE": 3, + "CREATE": 4, + "DESTROY": 5, + "GROW_VOLUME": 11, + "SHRINK_VOLUME": 12, + "CREATE_DISK": 13, + "DESTROY_DISK": 14, +} + +func (x Offer_Operation_Type) Enum() *Offer_Operation_Type { + p := new(Offer_Operation_Type) + *p = x + return p +} +func (x Offer_Operation_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(Offer_Operation_Type_name, int32(x)) +} +func (x *Offer_Operation_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Offer_Operation_Type_value, data, "Offer_Operation_Type") + if err != nil { + return err + } + *x = Offer_Operation_Type(value) + return nil +} +func (Offer_Operation_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{41, 0, 0} +} + +// Describes the source of the task status update. +type TaskStatus_Source int32 + +const ( + SOURCE_MASTER TaskStatus_Source = 0 + SOURCE_AGENT TaskStatus_Source = 1 + SOURCE_EXECUTOR TaskStatus_Source = 2 +) + +var TaskStatus_Source_name = map[int32]string{ + 0: "SOURCE_MASTER", + 1: "SOURCE_AGENT", + 2: "SOURCE_EXECUTOR", +} +var TaskStatus_Source_value = map[string]int32{ + "SOURCE_MASTER": 0, + "SOURCE_AGENT": 1, + "SOURCE_EXECUTOR": 2, +} + +func (x TaskStatus_Source) Enum() *TaskStatus_Source { + p := new(TaskStatus_Source) + *p = x + return p +} +func (x TaskStatus_Source) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(TaskStatus_Source_name, int32(x)) +} +func (x *TaskStatus_Source) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(TaskStatus_Source_value, data, "TaskStatus_Source") + if err != nil { + return err + } + *x = TaskStatus_Source(value) + return nil +} +func (TaskStatus_Source) EnumDescriptor() ([]byte, []int) { return fileDescriptorMesos, []int{51, 0} } + +// Detailed reason for the task status update. +// Refer to docs/task-state-reasons.md for additional explanation. +type TaskStatus_Reason int32 + +const ( + // TODO(jieyu): The default value when a caller doesn't check for + // presence is 0 and so ideally the 0 reason is not a valid one. + // Since this is not used anywhere, consider removing this reason. + REASON_COMMAND_EXECUTOR_FAILED TaskStatus_Reason = 0 + REASON_CONTAINER_LAUNCH_FAILED TaskStatus_Reason = 21 + REASON_CONTAINER_LIMITATION TaskStatus_Reason = 19 + REASON_CONTAINER_LIMITATION_DISK TaskStatus_Reason = 20 + REASON_CONTAINER_LIMITATION_MEMORY TaskStatus_Reason = 8 + REASON_CONTAINER_PREEMPTED TaskStatus_Reason = 17 + REASON_CONTAINER_UPDATE_FAILED TaskStatus_Reason = 22 + REASON_MAX_COMPLETION_TIME_REACHED TaskStatus_Reason = 33 + REASON_EXECUTOR_REGISTRATION_TIMEOUT TaskStatus_Reason = 23 + REASON_EXECUTOR_REREGISTRATION_TIMEOUT TaskStatus_Reason = 24 + REASON_EXECUTOR_TERMINATED TaskStatus_Reason = 1 + REASON_EXECUTOR_UNREGISTERED TaskStatus_Reason = 2 + REASON_FRAMEWORK_REMOVED TaskStatus_Reason = 3 + REASON_GC_ERROR TaskStatus_Reason = 4 + REASON_INVALID_FRAMEWORKID TaskStatus_Reason = 5 + REASON_INVALID_OFFERS TaskStatus_Reason = 6 + REASON_IO_SWITCHBOARD_EXITED TaskStatus_Reason = 27 + REASON_MASTER_DISCONNECTED TaskStatus_Reason = 7 + REASON_RECONCILIATION TaskStatus_Reason = 9 + REASON_RESOURCES_UNKNOWN TaskStatus_Reason = 18 + REASON_AGENT_DISCONNECTED TaskStatus_Reason = 10 + REASON_AGENT_REMOVED TaskStatus_Reason = 11 + REASON_AGENT_REMOVED_BY_OPERATOR TaskStatus_Reason = 31 + REASON_AGENT_REREGISTERED TaskStatus_Reason = 32 + REASON_AGENT_RESTARTED TaskStatus_Reason = 12 + REASON_AGENT_UNKNOWN TaskStatus_Reason = 13 + REASON_TASK_KILLED_DURING_LAUNCH TaskStatus_Reason = 30 + REASON_TASK_CHECK_STATUS_UPDATED TaskStatus_Reason = 28 + REASON_TASK_HEALTH_CHECK_STATUS_UPDATED TaskStatus_Reason = 29 + REASON_TASK_GROUP_INVALID TaskStatus_Reason = 25 + REASON_TASK_GROUP_UNAUTHORIZED TaskStatus_Reason = 26 + REASON_TASK_INVALID TaskStatus_Reason = 14 + REASON_TASK_UNAUTHORIZED TaskStatus_Reason = 15 + REASON_TASK_UNKNOWN TaskStatus_Reason = 16 +) + +var TaskStatus_Reason_name = map[int32]string{ + 0: "REASON_COMMAND_EXECUTOR_FAILED", + 21: "REASON_CONTAINER_LAUNCH_FAILED", + 19: "REASON_CONTAINER_LIMITATION", + 20: "REASON_CONTAINER_LIMITATION_DISK", + 8: "REASON_CONTAINER_LIMITATION_MEMORY", + 17: "REASON_CONTAINER_PREEMPTED", + 22: "REASON_CONTAINER_UPDATE_FAILED", + 33: "REASON_MAX_COMPLETION_TIME_REACHED", + 23: "REASON_EXECUTOR_REGISTRATION_TIMEOUT", + 24: "REASON_EXECUTOR_REREGISTRATION_TIMEOUT", + 1: "REASON_EXECUTOR_TERMINATED", + 2: "REASON_EXECUTOR_UNREGISTERED", + 3: "REASON_FRAMEWORK_REMOVED", + 4: "REASON_GC_ERROR", + 5: "REASON_INVALID_FRAMEWORKID", + 6: "REASON_INVALID_OFFERS", + 27: "REASON_IO_SWITCHBOARD_EXITED", + 7: "REASON_MASTER_DISCONNECTED", + 9: "REASON_RECONCILIATION", + 18: "REASON_RESOURCES_UNKNOWN", + 10: "REASON_AGENT_DISCONNECTED", + 11: "REASON_AGENT_REMOVED", + 31: "REASON_AGENT_REMOVED_BY_OPERATOR", + 32: "REASON_AGENT_REREGISTERED", + 12: "REASON_AGENT_RESTARTED", + 13: "REASON_AGENT_UNKNOWN", + 30: "REASON_TASK_KILLED_DURING_LAUNCH", + 28: "REASON_TASK_CHECK_STATUS_UPDATED", + 29: "REASON_TASK_HEALTH_CHECK_STATUS_UPDATED", + 25: "REASON_TASK_GROUP_INVALID", + 26: "REASON_TASK_GROUP_UNAUTHORIZED", + 14: "REASON_TASK_INVALID", + 15: "REASON_TASK_UNAUTHORIZED", + 16: "REASON_TASK_UNKNOWN", +} +var TaskStatus_Reason_value = map[string]int32{ + "REASON_COMMAND_EXECUTOR_FAILED": 0, + "REASON_CONTAINER_LAUNCH_FAILED": 21, + "REASON_CONTAINER_LIMITATION": 19, + "REASON_CONTAINER_LIMITATION_DISK": 20, + "REASON_CONTAINER_LIMITATION_MEMORY": 8, + "REASON_CONTAINER_PREEMPTED": 17, + "REASON_CONTAINER_UPDATE_FAILED": 22, + "REASON_MAX_COMPLETION_TIME_REACHED": 33, + "REASON_EXECUTOR_REGISTRATION_TIMEOUT": 23, + "REASON_EXECUTOR_REREGISTRATION_TIMEOUT": 24, + "REASON_EXECUTOR_TERMINATED": 1, + "REASON_EXECUTOR_UNREGISTERED": 2, + "REASON_FRAMEWORK_REMOVED": 3, + "REASON_GC_ERROR": 4, + "REASON_INVALID_FRAMEWORKID": 5, + "REASON_INVALID_OFFERS": 6, + "REASON_IO_SWITCHBOARD_EXITED": 27, + "REASON_MASTER_DISCONNECTED": 7, + "REASON_RECONCILIATION": 9, + "REASON_RESOURCES_UNKNOWN": 18, + "REASON_AGENT_DISCONNECTED": 10, + "REASON_AGENT_REMOVED": 11, + "REASON_AGENT_REMOVED_BY_OPERATOR": 31, + "REASON_AGENT_REREGISTERED": 32, + "REASON_AGENT_RESTARTED": 12, + "REASON_AGENT_UNKNOWN": 13, + "REASON_TASK_KILLED_DURING_LAUNCH": 30, + "REASON_TASK_CHECK_STATUS_UPDATED": 28, + "REASON_TASK_HEALTH_CHECK_STATUS_UPDATED": 29, + "REASON_TASK_GROUP_INVALID": 25, + "REASON_TASK_GROUP_UNAUTHORIZED": 26, + "REASON_TASK_INVALID": 14, + "REASON_TASK_UNAUTHORIZED": 15, + "REASON_TASK_UNKNOWN": 16, +} + +func (x TaskStatus_Reason) Enum() *TaskStatus_Reason { + p := new(TaskStatus_Reason) + *p = x + return p +} +func (x TaskStatus_Reason) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(TaskStatus_Reason_name, int32(x)) +} +func (x *TaskStatus_Reason) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(TaskStatus_Reason_value, data, "TaskStatus_Reason") + if err != nil { + return err + } + *x = TaskStatus_Reason(value) + return nil +} +func (TaskStatus_Reason) EnumDescriptor() ([]byte, []int) { return fileDescriptorMesos, []int{51, 1} } + +type Environment_Variable_Type int32 + +const ( + Environment_Variable_UNKNOWN Environment_Variable_Type = 0 + Environment_Variable_VALUE Environment_Variable_Type = 1 + Environment_Variable_SECRET Environment_Variable_Type = 2 +) + +var Environment_Variable_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "VALUE", + 2: "SECRET", +} +var Environment_Variable_Type_value = map[string]int32{ + "UNKNOWN": 0, + "VALUE": 1, + "SECRET": 2, +} + +func (x Environment_Variable_Type) Enum() *Environment_Variable_Type { + p := new(Environment_Variable_Type) + *p = x + return p +} +func (x Environment_Variable_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(Environment_Variable_Type_name, int32(x)) +} +func (x *Environment_Variable_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Environment_Variable_Type_value, data, "Environment_Variable_Type") + if err != nil { + return err + } + *x = Environment_Variable_Type(value) + return nil +} +func (Environment_Variable_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{53, 0, 0} +} + +type Secret_Type int32 + +const ( + Secret_UNKNOWN Secret_Type = 0 + Secret_REFERENCE Secret_Type = 1 + Secret_VALUE Secret_Type = 2 +) + +var Secret_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "REFERENCE", + 2: "VALUE", +} +var Secret_Type_value = map[string]int32{ + "UNKNOWN": 0, + "REFERENCE": 1, + "VALUE": 2, +} + +func (x Secret_Type) Enum() *Secret_Type { + p := new(Secret_Type) + *p = x + return p +} +func (x Secret_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(Secret_Type_name, int32(x)) +} +func (x *Secret_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Secret_Type_value, data, "Secret_Type") + if err != nil { + return err + } + *x = Secret_Type(value) + return nil +} +func (Secret_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorMesos, []int{58, 0} } + +type Image_Type int32 + +const ( + Image_APPC Image_Type = 1 + Image_DOCKER Image_Type = 2 +) + +var Image_Type_name = map[int32]string{ + 1: "APPC", + 2: "DOCKER", +} +var Image_Type_value = map[string]int32{ + "APPC": 1, + "DOCKER": 2, +} + +func (x Image_Type) Enum() *Image_Type { + p := new(Image_Type) + *p = x + return p +} +func (x Image_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(Image_Type_name, int32(x)) +} +func (x *Image_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Image_Type_value, data, "Image_Type") + if err != nil { + return err + } + *x = Image_Type(value) + return nil +} +func (Image_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorMesos, []int{61, 0} } + +type MountPropagation_Mode int32 + +const ( + MountPropagation_UNKNOWN MountPropagation_Mode = 0 + // The volume in a container will receive new mounts from the host + // or other containers, but filesystems mounted inside the + // container won't be propagated to the host or other containers. + // This is currently the default behavior for all volumes. + MountPropagation_HOST_TO_CONTAINER MountPropagation_Mode = 1 + // The volume in a container will receive new mounts from the host + // or other containers, and its own mounts will be propagated from + // the container to the host or other containers. + MountPropagation_BIDIRECTIONAL MountPropagation_Mode = 2 +) + +var MountPropagation_Mode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "HOST_TO_CONTAINER", + 2: "BIDIRECTIONAL", +} +var MountPropagation_Mode_value = map[string]int32{ + "UNKNOWN": 0, + "HOST_TO_CONTAINER": 1, + "BIDIRECTIONAL": 2, +} + +func (x MountPropagation_Mode) Enum() *MountPropagation_Mode { + p := new(MountPropagation_Mode) + *p = x + return p +} +func (x MountPropagation_Mode) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(MountPropagation_Mode_name, int32(x)) +} +func (x *MountPropagation_Mode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MountPropagation_Mode_value, data, "MountPropagation_Mode") + if err != nil { + return err + } + *x = MountPropagation_Mode(value) + return nil +} +func (MountPropagation_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{62, 0} +} + +type Volume_Mode int32 + +const ( + RW Volume_Mode = 1 + RO Volume_Mode = 2 +) + +var Volume_Mode_name = map[int32]string{ + 1: "RW", + 2: "RO", +} +var Volume_Mode_value = map[string]int32{ + "RW": 1, + "RO": 2, +} + +func (x Volume_Mode) Enum() *Volume_Mode { + p := new(Volume_Mode) + *p = x + return p +} +func (x Volume_Mode) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(Volume_Mode_name, int32(x)) +} +func (x *Volume_Mode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Volume_Mode_value, data, "Volume_Mode") + if err != nil { + return err + } + *x = Volume_Mode(value) + return nil +} +func (Volume_Mode) EnumDescriptor() ([]byte, []int) { return fileDescriptorMesos, []int{63, 0} } + +type Volume_Source_Type int32 + +const ( + // This must be the first enum value in this list, to + // ensure that if 'type' is not set, the default value + // is UNKNOWN. This enables enum values to be added + // in a backwards-compatible way. See: MESOS-4997. + Volume_Source_UNKNOWN Volume_Source_Type = 0 + // TODO(gyliu513): Add IMAGE as volume source type. + Volume_Source_DOCKER_VOLUME Volume_Source_Type = 1 + Volume_Source_HOST_PATH Volume_Source_Type = 4 + Volume_Source_SANDBOX_PATH Volume_Source_Type = 2 + Volume_Source_SECRET Volume_Source_Type = 3 +) + +var Volume_Source_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "DOCKER_VOLUME", + 4: "HOST_PATH", + 2: "SANDBOX_PATH", + 3: "SECRET", +} +var Volume_Source_Type_value = map[string]int32{ + "UNKNOWN": 0, + "DOCKER_VOLUME": 1, + "HOST_PATH": 4, + "SANDBOX_PATH": 2, + "SECRET": 3, +} + +func (x Volume_Source_Type) Enum() *Volume_Source_Type { + p := new(Volume_Source_Type) + *p = x + return p +} +func (x Volume_Source_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(Volume_Source_Type_name, int32(x)) +} +func (x *Volume_Source_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Volume_Source_Type_value, data, "Volume_Source_Type") + if err != nil { + return err + } + *x = Volume_Source_Type(value) + return nil +} +func (Volume_Source_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{63, 0, 0} +} + +type Volume_Source_SandboxPath_Type int32 + +const ( + Volume_Source_SandboxPath_UNKNOWN Volume_Source_SandboxPath_Type = 0 + Volume_Source_SandboxPath_SELF Volume_Source_SandboxPath_Type = 1 + Volume_Source_SandboxPath_PARENT Volume_Source_SandboxPath_Type = 2 +) + +var Volume_Source_SandboxPath_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SELF", + 2: "PARENT", +} +var Volume_Source_SandboxPath_Type_value = map[string]int32{ + "UNKNOWN": 0, + "SELF": 1, + "PARENT": 2, +} + +func (x Volume_Source_SandboxPath_Type) Enum() *Volume_Source_SandboxPath_Type { + p := new(Volume_Source_SandboxPath_Type) + *p = x + return p +} +func (x Volume_Source_SandboxPath_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(Volume_Source_SandboxPath_Type_name, int32(x)) +} +func (x *Volume_Source_SandboxPath_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Volume_Source_SandboxPath_Type_value, data, "Volume_Source_SandboxPath_Type") + if err != nil { + return err + } + *x = Volume_Source_SandboxPath_Type(value) + return nil +} +func (Volume_Source_SandboxPath_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{63, 0, 2, 0} +} + +type NetworkInfo_Protocol int32 + +const ( + IPv4 NetworkInfo_Protocol = 1 + IPv6 NetworkInfo_Protocol = 2 +) + +var NetworkInfo_Protocol_name = map[int32]string{ + 1: "IPv4", + 2: "IPv6", +} +var NetworkInfo_Protocol_value = map[string]int32{ + "IPv4": 1, + "IPv6": 2, +} + +func (x NetworkInfo_Protocol) Enum() *NetworkInfo_Protocol { + p := new(NetworkInfo_Protocol) + *p = x + return p +} +func (x NetworkInfo_Protocol) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(NetworkInfo_Protocol_name, int32(x)) +} +func (x *NetworkInfo_Protocol) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(NetworkInfo_Protocol_value, data, "NetworkInfo_Protocol") + if err != nil { + return err + } + *x = NetworkInfo_Protocol(value) + return nil +} +func (NetworkInfo_Protocol) EnumDescriptor() ([]byte, []int) { return fileDescriptorMesos, []int{64, 0} } + +// We start the actual values at an offset(1000) because Protobuf 2 +// uses the first value as the default one. Separating the default +// value from the real first value helps to disambiguate them. This +// is especially valuable for backward compatibility. +// See: MESOS-4997. +type CapabilityInfo_Capability int32 + +const ( + CapabilityInfo_UNKNOWN CapabilityInfo_Capability = 0 + CapabilityInfo_CHOWN CapabilityInfo_Capability = 1000 + CapabilityInfo_DAC_OVERRIDE CapabilityInfo_Capability = 1001 + CapabilityInfo_DAC_READ_SEARCH CapabilityInfo_Capability = 1002 + CapabilityInfo_FOWNER CapabilityInfo_Capability = 1003 + CapabilityInfo_FSETID CapabilityInfo_Capability = 1004 + CapabilityInfo_KILL CapabilityInfo_Capability = 1005 + CapabilityInfo_SETGID CapabilityInfo_Capability = 1006 + CapabilityInfo_SETUID CapabilityInfo_Capability = 1007 + CapabilityInfo_SETPCAP CapabilityInfo_Capability = 1008 + CapabilityInfo_LINUX_IMMUTABLE CapabilityInfo_Capability = 1009 + CapabilityInfo_NET_BIND_SERVICE CapabilityInfo_Capability = 1010 + CapabilityInfo_NET_BROADCAST CapabilityInfo_Capability = 1011 + CapabilityInfo_NET_ADMIN CapabilityInfo_Capability = 1012 + CapabilityInfo_NET_RAW CapabilityInfo_Capability = 1013 + CapabilityInfo_IPC_LOCK CapabilityInfo_Capability = 1014 + CapabilityInfo_IPC_OWNER CapabilityInfo_Capability = 1015 + CapabilityInfo_SYS_MODULE CapabilityInfo_Capability = 1016 + CapabilityInfo_SYS_RAWIO CapabilityInfo_Capability = 1017 + CapabilityInfo_SYS_CHROOT CapabilityInfo_Capability = 1018 + CapabilityInfo_SYS_PTRACE CapabilityInfo_Capability = 1019 + CapabilityInfo_SYS_PACCT CapabilityInfo_Capability = 1020 + CapabilityInfo_SYS_ADMIN CapabilityInfo_Capability = 1021 + CapabilityInfo_SYS_BOOT CapabilityInfo_Capability = 1022 + CapabilityInfo_SYS_NICE CapabilityInfo_Capability = 1023 + CapabilityInfo_SYS_RESOURCE CapabilityInfo_Capability = 1024 + CapabilityInfo_SYS_TIME CapabilityInfo_Capability = 1025 + CapabilityInfo_SYS_TTY_CONFIG CapabilityInfo_Capability = 1026 + CapabilityInfo_MKNOD CapabilityInfo_Capability = 1027 + CapabilityInfo_LEASE CapabilityInfo_Capability = 1028 + CapabilityInfo_AUDIT_WRITE CapabilityInfo_Capability = 1029 + CapabilityInfo_AUDIT_CONTROL CapabilityInfo_Capability = 1030 + CapabilityInfo_SETFCAP CapabilityInfo_Capability = 1031 + CapabilityInfo_MAC_OVERRIDE CapabilityInfo_Capability = 1032 + CapabilityInfo_MAC_ADMIN CapabilityInfo_Capability = 1033 + CapabilityInfo_SYSLOG CapabilityInfo_Capability = 1034 + CapabilityInfo_WAKE_ALARM CapabilityInfo_Capability = 1035 + CapabilityInfo_BLOCK_SUSPEND CapabilityInfo_Capability = 1036 + CapabilityInfo_AUDIT_READ CapabilityInfo_Capability = 1037 +) + +var CapabilityInfo_Capability_name = map[int32]string{ + 0: "UNKNOWN", + 1000: "CHOWN", + 1001: "DAC_OVERRIDE", + 1002: "DAC_READ_SEARCH", + 1003: "FOWNER", + 1004: "FSETID", + 1005: "KILL", + 1006: "SETGID", + 1007: "SETUID", + 1008: "SETPCAP", + 1009: "LINUX_IMMUTABLE", + 1010: "NET_BIND_SERVICE", + 1011: "NET_BROADCAST", + 1012: "NET_ADMIN", + 1013: "NET_RAW", + 1014: "IPC_LOCK", + 1015: "IPC_OWNER", + 1016: "SYS_MODULE", + 1017: "SYS_RAWIO", + 1018: "SYS_CHROOT", + 1019: "SYS_PTRACE", + 1020: "SYS_PACCT", + 1021: "SYS_ADMIN", + 1022: "SYS_BOOT", + 1023: "SYS_NICE", + 1024: "SYS_RESOURCE", + 1025: "SYS_TIME", + 1026: "SYS_TTY_CONFIG", + 1027: "MKNOD", + 1028: "LEASE", + 1029: "AUDIT_WRITE", + 1030: "AUDIT_CONTROL", + 1031: "SETFCAP", + 1032: "MAC_OVERRIDE", + 1033: "MAC_ADMIN", + 1034: "SYSLOG", + 1035: "WAKE_ALARM", + 1036: "BLOCK_SUSPEND", + 1037: "AUDIT_READ", +} +var CapabilityInfo_Capability_value = map[string]int32{ + "UNKNOWN": 0, + "CHOWN": 1000, + "DAC_OVERRIDE": 1001, + "DAC_READ_SEARCH": 1002, + "FOWNER": 1003, + "FSETID": 1004, + "KILL": 1005, + "SETGID": 1006, + "SETUID": 1007, + "SETPCAP": 1008, + "LINUX_IMMUTABLE": 1009, + "NET_BIND_SERVICE": 1010, + "NET_BROADCAST": 1011, + "NET_ADMIN": 1012, + "NET_RAW": 1013, + "IPC_LOCK": 1014, + "IPC_OWNER": 1015, + "SYS_MODULE": 1016, + "SYS_RAWIO": 1017, + "SYS_CHROOT": 1018, + "SYS_PTRACE": 1019, + "SYS_PACCT": 1020, + "SYS_ADMIN": 1021, + "SYS_BOOT": 1022, + "SYS_NICE": 1023, + "SYS_RESOURCE": 1024, + "SYS_TIME": 1025, + "SYS_TTY_CONFIG": 1026, + "MKNOD": 1027, + "LEASE": 1028, + "AUDIT_WRITE": 1029, + "AUDIT_CONTROL": 1030, + "SETFCAP": 1031, + "MAC_OVERRIDE": 1032, + "MAC_ADMIN": 1033, + "SYSLOG": 1034, + "WAKE_ALARM": 1035, + "BLOCK_SUSPEND": 1036, + "AUDIT_READ": 1037, +} + +func (x CapabilityInfo_Capability) Enum() *CapabilityInfo_Capability { + p := new(CapabilityInfo_Capability) + *p = x + return p +} +func (x CapabilityInfo_Capability) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(CapabilityInfo_Capability_name, int32(x)) +} +func (x *CapabilityInfo_Capability) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CapabilityInfo_Capability_value, data, "CapabilityInfo_Capability") + if err != nil { + return err + } + *x = CapabilityInfo_Capability(value) + return nil +} +func (CapabilityInfo_Capability) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{65, 0} +} + +type RLimitInfo_RLimit_Type int32 + +const ( + RLimitInfo_RLimit_UNKNOWN RLimitInfo_RLimit_Type = 0 + RLimitInfo_RLimit_RLMT_AS RLimitInfo_RLimit_Type = 1 + RLimitInfo_RLimit_RLMT_CORE RLimitInfo_RLimit_Type = 2 + RLimitInfo_RLimit_RLMT_CPU RLimitInfo_RLimit_Type = 3 + RLimitInfo_RLimit_RLMT_DATA RLimitInfo_RLimit_Type = 4 + RLimitInfo_RLimit_RLMT_FSIZE RLimitInfo_RLimit_Type = 5 + RLimitInfo_RLimit_RLMT_LOCKS RLimitInfo_RLimit_Type = 6 + RLimitInfo_RLimit_RLMT_MEMLOCK RLimitInfo_RLimit_Type = 7 + RLimitInfo_RLimit_RLMT_MSGQUEUE RLimitInfo_RLimit_Type = 8 + RLimitInfo_RLimit_RLMT_NICE RLimitInfo_RLimit_Type = 9 + RLimitInfo_RLimit_RLMT_NOFILE RLimitInfo_RLimit_Type = 10 + RLimitInfo_RLimit_RLMT_NPROC RLimitInfo_RLimit_Type = 11 + RLimitInfo_RLimit_RLMT_RSS RLimitInfo_RLimit_Type = 12 + RLimitInfo_RLimit_RLMT_RTPRIO RLimitInfo_RLimit_Type = 13 + RLimitInfo_RLimit_RLMT_RTTIME RLimitInfo_RLimit_Type = 14 + RLimitInfo_RLimit_RLMT_SIGPENDING RLimitInfo_RLimit_Type = 15 + RLimitInfo_RLimit_RLMT_STACK RLimitInfo_RLimit_Type = 16 +) + +var RLimitInfo_RLimit_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "RLMT_AS", + 2: "RLMT_CORE", + 3: "RLMT_CPU", + 4: "RLMT_DATA", + 5: "RLMT_FSIZE", + 6: "RLMT_LOCKS", + 7: "RLMT_MEMLOCK", + 8: "RLMT_MSGQUEUE", + 9: "RLMT_NICE", + 10: "RLMT_NOFILE", + 11: "RLMT_NPROC", + 12: "RLMT_RSS", + 13: "RLMT_RTPRIO", + 14: "RLMT_RTTIME", + 15: "RLMT_SIGPENDING", + 16: "RLMT_STACK", +} +var RLimitInfo_RLimit_Type_value = map[string]int32{ + "UNKNOWN": 0, + "RLMT_AS": 1, + "RLMT_CORE": 2, + "RLMT_CPU": 3, + "RLMT_DATA": 4, + "RLMT_FSIZE": 5, + "RLMT_LOCKS": 6, + "RLMT_MEMLOCK": 7, + "RLMT_MSGQUEUE": 8, + "RLMT_NICE": 9, + "RLMT_NOFILE": 10, + "RLMT_NPROC": 11, + "RLMT_RSS": 12, + "RLMT_RTPRIO": 13, + "RLMT_RTTIME": 14, + "RLMT_SIGPENDING": 15, + "RLMT_STACK": 16, +} + +func (x RLimitInfo_RLimit_Type) Enum() *RLimitInfo_RLimit_Type { + p := new(RLimitInfo_RLimit_Type) + *p = x + return p +} +func (x RLimitInfo_RLimit_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(RLimitInfo_RLimit_Type_name, int32(x)) +} +func (x *RLimitInfo_RLimit_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RLimitInfo_RLimit_Type_value, data, "RLimitInfo_RLimit_Type") + if err != nil { + return err + } + *x = RLimitInfo_RLimit_Type(value) + return nil +} +func (RLimitInfo_RLimit_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{67, 0, 0} +} + +// All container implementation types. +type ContainerInfo_Type int32 + +const ( + ContainerInfo_DOCKER ContainerInfo_Type = 1 + ContainerInfo_MESOS ContainerInfo_Type = 2 +) + +var ContainerInfo_Type_name = map[int32]string{ + 1: "DOCKER", + 2: "MESOS", +} +var ContainerInfo_Type_value = map[string]int32{ + "DOCKER": 1, + "MESOS": 2, +} + +func (x ContainerInfo_Type) Enum() *ContainerInfo_Type { + p := new(ContainerInfo_Type) + *p = x + return p +} +func (x ContainerInfo_Type) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(ContainerInfo_Type_name, int32(x)) +} +func (x *ContainerInfo_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ContainerInfo_Type_value, data, "ContainerInfo_Type") + if err != nil { + return err + } + *x = ContainerInfo_Type(value) + return nil +} +func (ContainerInfo_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorMesos, []int{69, 0} } + +// Network options. +type ContainerInfo_DockerInfo_Network int32 + +const ( + ContainerInfo_DockerInfo_HOST ContainerInfo_DockerInfo_Network = 1 + ContainerInfo_DockerInfo_BRIDGE ContainerInfo_DockerInfo_Network = 2 + ContainerInfo_DockerInfo_NONE ContainerInfo_DockerInfo_Network = 3 + ContainerInfo_DockerInfo_USER ContainerInfo_DockerInfo_Network = 4 +) + +var ContainerInfo_DockerInfo_Network_name = map[int32]string{ + 1: "HOST", + 2: "BRIDGE", + 3: "NONE", + 4: "USER", +} +var ContainerInfo_DockerInfo_Network_value = map[string]int32{ + "HOST": 1, + "BRIDGE": 2, + "NONE": 3, + "USER": 4, +} + +func (x ContainerInfo_DockerInfo_Network) Enum() *ContainerInfo_DockerInfo_Network { + p := new(ContainerInfo_DockerInfo_Network) + *p = x + return p +} +func (x ContainerInfo_DockerInfo_Network) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(ContainerInfo_DockerInfo_Network_name, int32(x)) +} +func (x *ContainerInfo_DockerInfo_Network) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ContainerInfo_DockerInfo_Network_value, data, "ContainerInfo_DockerInfo_Network") + if err != nil { + return err + } + *x = ContainerInfo_DockerInfo_Network(value) + return nil +} +func (ContainerInfo_DockerInfo_Network) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{69, 0, 0} +} + +type CgroupInfo_Blkio_Operation int32 + +const ( + CgroupInfo_Blkio_UNKNOWN CgroupInfo_Blkio_Operation = 0 + CgroupInfo_Blkio_TOTAL CgroupInfo_Blkio_Operation = 1 + CgroupInfo_Blkio_READ CgroupInfo_Blkio_Operation = 2 + CgroupInfo_Blkio_WRITE CgroupInfo_Blkio_Operation = 3 + CgroupInfo_Blkio_SYNC CgroupInfo_Blkio_Operation = 4 + CgroupInfo_Blkio_ASYNC CgroupInfo_Blkio_Operation = 5 +) + +var CgroupInfo_Blkio_Operation_name = map[int32]string{ + 0: "UNKNOWN", + 1: "TOTAL", + 2: "READ", + 3: "WRITE", + 4: "SYNC", + 5: "ASYNC", +} +var CgroupInfo_Blkio_Operation_value = map[string]int32{ + "UNKNOWN": 0, + "TOTAL": 1, + "READ": 2, + "WRITE": 3, + "SYNC": 4, + "ASYNC": 5, +} + +func (x CgroupInfo_Blkio_Operation) Enum() *CgroupInfo_Blkio_Operation { + p := new(CgroupInfo_Blkio_Operation) + *p = x + return p +} +func (x CgroupInfo_Blkio_Operation) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(CgroupInfo_Blkio_Operation_name, int32(x)) +} +func (x *CgroupInfo_Blkio_Operation) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CgroupInfo_Blkio_Operation_value, data, "CgroupInfo_Blkio_Operation") + if err != nil { + return err + } + *x = CgroupInfo_Blkio_Operation(value) + return nil +} +func (CgroupInfo_Blkio_Operation) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{71, 0, 0} +} + +type DiscoveryInfo_Visibility int32 + +const ( + FRAMEWORK DiscoveryInfo_Visibility = 0 + CLUSTER DiscoveryInfo_Visibility = 1 + EXTERNAL DiscoveryInfo_Visibility = 2 +) + +var DiscoveryInfo_Visibility_name = map[int32]string{ + 0: "FRAMEWORK", + 1: "CLUSTER", + 2: "EXTERNAL", +} +var DiscoveryInfo_Visibility_value = map[string]int32{ + "FRAMEWORK": 0, + "CLUSTER": 1, + "EXTERNAL": 2, +} + +func (x DiscoveryInfo_Visibility) Enum() *DiscoveryInfo_Visibility { + p := new(DiscoveryInfo_Visibility) + *p = x + return p +} +func (x DiscoveryInfo_Visibility) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(DiscoveryInfo_Visibility_name, int32(x)) +} +func (x *DiscoveryInfo_Visibility) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(DiscoveryInfo_Visibility_value, data, "DiscoveryInfo_Visibility") + if err != nil { + return err + } + *x = DiscoveryInfo_Visibility(value) + return nil +} +func (DiscoveryInfo_Visibility) EnumDescriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{76, 0} +} + +// * +// A unique ID assigned to a framework. A framework can reuse this ID +// in order to do failover (see MesosSchedulerDriver). +type FrameworkID struct { + Value string `protobuf:"bytes,1,req,name=value" json:"value"` +} + +func (m *FrameworkID) Reset() { *m = FrameworkID{} } +func (*FrameworkID) ProtoMessage() {} +func (*FrameworkID) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{0} } + +func (m *FrameworkID) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// * +// A unique ID assigned to an offer. +type OfferID struct { + Value string `protobuf:"bytes,1,req,name=value" json:"value"` +} + +func (m *OfferID) Reset() { *m = OfferID{} } +func (*OfferID) ProtoMessage() {} +func (*OfferID) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{1} } + +func (m *OfferID) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// * +// A unique ID assigned to an agent. Currently, an agent gets a new ID +// whenever it (re)registers with Mesos. Framework writers shouldn't +// assume any binding between an agent ID and and a hostname. +type AgentID struct { + Value string `protobuf:"bytes,1,req,name=value" json:"value"` +} + +func (m *AgentID) Reset() { *m = AgentID{} } +func (*AgentID) ProtoMessage() {} +func (*AgentID) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{2} } + +func (m *AgentID) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// * +// A framework-generated ID to distinguish a task. The ID must remain +// unique while the task is active. A framework can reuse an ID _only_ +// if the previous task with the same ID has reached a terminal state +// (e.g., TASK_FINISHED, TASK_KILLED, etc.). However, reusing task IDs +// is strongly discouraged (MESOS-2198). +type TaskID struct { + Value string `protobuf:"bytes,1,req,name=value" json:"value"` +} + +func (m *TaskID) Reset() { *m = TaskID{} } +func (*TaskID) ProtoMessage() {} +func (*TaskID) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{3} } + +func (m *TaskID) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// * +// A framework-generated ID to distinguish an executor. Only one +// executor with the same ID can be active on the same agent at a +// time. However, reusing executor IDs is discouraged. +type ExecutorID struct { + Value string `protobuf:"bytes,1,req,name=value" json:"value"` +} + +func (m *ExecutorID) Reset() { *m = ExecutorID{} } +func (*ExecutorID) ProtoMessage() {} +func (*ExecutorID) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{4} } + +func (m *ExecutorID) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// * +// ID used to uniquely identify a container. If the `parent` is not +// specified, the ID is a UUID generated by the agent to uniquely +// identify the container of an executor run. If the `parent` field is +// specified, it represents a nested container. +type ContainerID struct { + Value string `protobuf:"bytes,1,req,name=value" json:"value"` + Parent *ContainerID `protobuf:"bytes,2,opt,name=parent" json:"parent,omitempty"` +} + +func (m *ContainerID) Reset() { *m = ContainerID{} } +func (*ContainerID) ProtoMessage() {} +func (*ContainerID) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{5} } + +func (m *ContainerID) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *ContainerID) GetParent() *ContainerID { + if m != nil { + return m.Parent + } + return nil +} + +// * +// A unique ID assigned to a resource provider. Currently, a resource +// provider gets a new ID whenever it (re)registers with Mesos. +type ResourceProviderID struct { + Value string `protobuf:"bytes,1,req,name=value" json:"value"` +} + +func (m *ResourceProviderID) Reset() { *m = ResourceProviderID{} } +func (*ResourceProviderID) ProtoMessage() {} +func (*ResourceProviderID) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{6} } + +func (m *ResourceProviderID) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// * +// A framework-generated ID to distinguish an operation. The ID +// must be unique within the framework. +type OperationID struct { + Value string `protobuf:"bytes,1,req,name=value" json:"value"` +} + +func (m *OperationID) Reset() { *m = OperationID{} } +func (*OperationID) ProtoMessage() {} +func (*OperationID) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{7} } + +func (m *OperationID) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// * +// Represents time since the epoch, in nanoseconds. +type TimeInfo struct { + Nanoseconds int64 `protobuf:"varint,1,req,name=nanoseconds" json:"nanoseconds"` +} + +func (m *TimeInfo) Reset() { *m = TimeInfo{} } +func (*TimeInfo) ProtoMessage() {} +func (*TimeInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{8} } + +func (m *TimeInfo) GetNanoseconds() int64 { + if m != nil { + return m.Nanoseconds + } + return 0 +} + +// * +// Represents duration in nanoseconds. +type DurationInfo struct { + Nanoseconds int64 `protobuf:"varint,1,req,name=nanoseconds" json:"nanoseconds"` +} + +func (m *DurationInfo) Reset() { *m = DurationInfo{} } +func (*DurationInfo) ProtoMessage() {} +func (*DurationInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{9} } + +func (m *DurationInfo) GetNanoseconds() int64 { + if m != nil { + return m.Nanoseconds + } + return 0 +} + +// * +// A network address. +// +// TODO(bmahler): Use this more widely. +type Address struct { + // May contain a hostname, IP address, or both. + Hostname *string `protobuf:"bytes,1,opt,name=hostname" json:"hostname,omitempty"` + IP *string `protobuf:"bytes,2,opt,name=ip" json:"ip,omitempty"` + Port int32 `protobuf:"varint,3,req,name=port" json:"port"` +} + +func (m *Address) Reset() { *m = Address{} } +func (*Address) ProtoMessage() {} +func (*Address) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{10} } + +func (m *Address) GetHostname() string { + if m != nil && m.Hostname != nil { + return *m.Hostname + } + return "" +} + +func (m *Address) GetIP() string { + if m != nil && m.IP != nil { + return *m.IP + } + return "" +} + +func (m *Address) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +// * +// Represents a URL. +type URL struct { + Scheme string `protobuf:"bytes,1,req,name=scheme" json:"scheme"` + Address Address `protobuf:"bytes,2,req,name=address" json:"address"` + Path *string `protobuf:"bytes,3,opt,name=path" json:"path,omitempty"` + Query []Parameter `protobuf:"bytes,4,rep,name=query" json:"query"` + Fragment *string `protobuf:"bytes,5,opt,name=fragment" json:"fragment,omitempty"` +} + +func (m *URL) Reset() { *m = URL{} } +func (*URL) ProtoMessage() {} +func (*URL) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{11} } + +func (m *URL) GetScheme() string { + if m != nil { + return m.Scheme + } + return "" +} + +func (m *URL) GetAddress() Address { + if m != nil { + return m.Address + } + return Address{} +} + +func (m *URL) GetPath() string { + if m != nil && m.Path != nil { + return *m.Path + } + return "" +} + +func (m *URL) GetQuery() []Parameter { + if m != nil { + return m.Query + } + return nil +} + +func (m *URL) GetFragment() string { + if m != nil && m.Fragment != nil { + return *m.Fragment + } + return "" +} + +// * +// Represents an interval, from a given start time over a given duration. +// This interval pertains to an unavailability event, such as maintenance, +// and is not a generic interval. +type Unavailability struct { + Start TimeInfo `protobuf:"bytes,1,req,name=start" json:"start"` + // When added to `start`, this represents the end of the interval. + // If unspecified, the duration is assumed to be infinite. + Duration *DurationInfo `protobuf:"bytes,2,opt,name=duration" json:"duration,omitempty"` +} + +func (m *Unavailability) Reset() { *m = Unavailability{} } +func (*Unavailability) ProtoMessage() {} +func (*Unavailability) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{12} } + +func (m *Unavailability) GetStart() TimeInfo { + if m != nil { + return m.Start + } + return TimeInfo{} +} + +func (m *Unavailability) GetDuration() *DurationInfo { + if m != nil { + return m.Duration + } + return nil +} + +// * +// Represents a single machine, which may hold one or more agents. +// +// NOTE: In order to match an agent to a machine, both the `hostname` and +// `ip` must match the values advertised by the agent to the master. +// Hostname is not case-sensitive. +type MachineID struct { + Hostname *string `protobuf:"bytes,1,opt,name=hostname" json:"hostname,omitempty"` + IP *string `protobuf:"bytes,2,opt,name=ip" json:"ip,omitempty"` +} + +func (m *MachineID) Reset() { *m = MachineID{} } +func (*MachineID) ProtoMessage() {} +func (*MachineID) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{13} } + +func (m *MachineID) GetHostname() string { + if m != nil && m.Hostname != nil { + return *m.Hostname + } + return "" +} + +func (m *MachineID) GetIP() string { + if m != nil && m.IP != nil { + return *m.IP + } + return "" +} + +// * +// Holds information about a single machine, its `mode`, and any other +// relevant information which may affect the behavior of the machine. +type MachineInfo struct { + ID MachineID `protobuf:"bytes,1,req,name=id" json:"id"` + Mode *MachineInfo_Mode `protobuf:"varint,2,opt,name=mode,enum=mesos.MachineInfo_Mode" json:"mode,omitempty"` + // Signifies that the machine may be unavailable during the given interval. + // See comments in `Unavailability` and for the `unavailability` fields + // in `Offer` and `InverseOffer` for more information. + Unavailability *Unavailability `protobuf:"bytes,3,opt,name=unavailability" json:"unavailability,omitempty"` +} + +func (m *MachineInfo) Reset() { *m = MachineInfo{} } +func (*MachineInfo) ProtoMessage() {} +func (*MachineInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{14} } + +func (m *MachineInfo) GetID() MachineID { + if m != nil { + return m.ID + } + return MachineID{} +} + +func (m *MachineInfo) GetMode() MachineInfo_Mode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return UP +} + +func (m *MachineInfo) GetUnavailability() *Unavailability { + if m != nil { + return m.Unavailability + } + return nil +} + +// * +// Describes a framework. +type FrameworkInfo struct { + // Used to determine the Unix user that an executor or task should be + // launched as. + // + // When using the MesosSchedulerDriver, if the field is set to an + // empty string, it will automagically set it to the current user. + // + // When using the HTTP Scheduler API, the user has to be set + // explicitly. + User string `protobuf:"bytes,1,req,name=user" json:"user"` + // Name of the framework that shows up in the Mesos Web UI. + Name string `protobuf:"bytes,2,req,name=name" json:"name"` + // Note that 'id' is only available after a framework has + // registered, however, it is included here in order to facilitate + // scheduler failover (i.e., if it is set then the + // MesosSchedulerDriver expects the scheduler is performing + // failover). + ID *FrameworkID `protobuf:"bytes,3,opt,name=id" json:"id,omitempty"` + // The amount of time (in seconds) that the master will wait for the + // scheduler to failover before it tears down the framework by + // killing all its tasks/executors. This should be non-zero if a + // framework expects to reconnect after a failure and not lose its + // tasks/executors. + // + // NOTE: To avoid accidental destruction of tasks, production + // frameworks typically set this to a large value (e.g., 1 week). + FailoverTimeout *float64 `protobuf:"fixed64,4,opt,name=failover_timeout,json=failoverTimeout,def=0" json:"failover_timeout,omitempty"` + // If set, agents running tasks started by this framework will write + // the framework pid, executor pids and status updates to disk. If + // the agent exits (e.g., due to a crash or as part of upgrading + // Mesos), this checkpointed data allows the restarted agent to + // reconnect to executors that were started by the old instance of + // the agent. Enabling checkpointing improves fault tolerance, at + // the cost of a (usually small) increase in disk I/O. + Checkpoint *bool `protobuf:"varint,5,opt,name=checkpoint,def=0" json:"checkpoint,omitempty"` + // Roles are the entities to which allocations are made. + // The framework must have at least one role in order to + // be offered resources. Note that `role` is deprecated + // in favor of `roles` and only one of these fields must + // be used. Since we cannot distinguish between empty + // `roles` and the default unset `role`, we require that + // frameworks set the `MULTI_ROLE` capability if + // setting the `roles` field. + Role *string `protobuf:"bytes,6,opt,name=role,def=*" json:"role,omitempty"` + Roles []string `protobuf:"bytes,12,rep,name=roles" json:"roles,omitempty"` + // Used to indicate the current host from which the scheduler is + // registered in the Mesos Web UI. If set to an empty string Mesos + // will automagically set it to the current hostname if one is + // available. + Hostname *string `protobuf:"bytes,7,opt,name=hostname" json:"hostname,omitempty"` + // This field should match the credential's principal the framework + // uses for authentication. This field is used for framework API + // rate limiting and dynamic reservations. It should be set even + // if authentication is not enabled if these features are desired. + Principal *string `protobuf:"bytes,8,opt,name=principal" json:"principal,omitempty"` + // This field allows a framework to advertise its web UI, so that + // the Mesos web UI can link to it. It is expected to be a full URL, + // for example http://my-scheduler.example.com:8080/. + WebUiURL *string `protobuf:"bytes,9,opt,name=webui_url,json=webuiUrl" json:"webui_url,omitempty"` + // This field allows a framework to advertise its set of + // capabilities (e.g., ability to receive offers for revocable + // resources). + Capabilities []FrameworkInfo_Capability `protobuf:"bytes,10,rep,name=capabilities" json:"capabilities"` + // Labels are free-form key value pairs supplied by the framework + // scheduler (e.g., to describe additional functionality offered by + // the framework). These labels are not interpreted by Mesos itself. + // Labels should not contain duplicate key-value pairs. + Labels *Labels `protobuf:"bytes,11,opt,name=labels" json:"labels,omitempty"` +} + +func (m *FrameworkInfo) Reset() { *m = FrameworkInfo{} } +func (*FrameworkInfo) ProtoMessage() {} +func (*FrameworkInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{15} } + +const Default_FrameworkInfo_FailoverTimeout float64 = 0 +const Default_FrameworkInfo_Checkpoint bool = false +const Default_FrameworkInfo_Role string = "*" + +func (m *FrameworkInfo) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *FrameworkInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *FrameworkInfo) GetID() *FrameworkID { + if m != nil { + return m.ID + } + return nil +} + +func (m *FrameworkInfo) GetFailoverTimeout() float64 { + if m != nil && m.FailoverTimeout != nil { + return *m.FailoverTimeout + } + return Default_FrameworkInfo_FailoverTimeout +} + +func (m *FrameworkInfo) GetCheckpoint() bool { + if m != nil && m.Checkpoint != nil { + return *m.Checkpoint + } + return Default_FrameworkInfo_Checkpoint +} + +func (m *FrameworkInfo) GetRole() string { + if m != nil && m.Role != nil { + return *m.Role + } + return Default_FrameworkInfo_Role +} + +func (m *FrameworkInfo) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + +func (m *FrameworkInfo) GetHostname() string { + if m != nil && m.Hostname != nil { + return *m.Hostname + } + return "" +} + +func (m *FrameworkInfo) GetPrincipal() string { + if m != nil && m.Principal != nil { + return *m.Principal + } + return "" +} + +func (m *FrameworkInfo) GetWebUiURL() string { + if m != nil && m.WebUiURL != nil { + return *m.WebUiURL + } + return "" +} + +func (m *FrameworkInfo) GetCapabilities() []FrameworkInfo_Capability { + if m != nil { + return m.Capabilities + } + return nil +} + +func (m *FrameworkInfo) GetLabels() *Labels { + if m != nil { + return m.Labels + } + return nil +} + +type FrameworkInfo_Capability struct { + // Enum fields should be optional, see: MESOS-4997. + Type FrameworkInfo_Capability_Type `protobuf:"varint,1,opt,name=type,enum=mesos.FrameworkInfo_Capability_Type" json:"type"` +} + +func (m *FrameworkInfo_Capability) Reset() { *m = FrameworkInfo_Capability{} } +func (*FrameworkInfo_Capability) ProtoMessage() {} +func (*FrameworkInfo_Capability) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{15, 0} +} + +func (m *FrameworkInfo_Capability) GetType() FrameworkInfo_Capability_Type { + if m != nil { + return m.Type + } + return FrameworkInfo_Capability_UNKNOWN +} + +// * +// Describes a general non-interpreting non-killing check for a task or +// executor (or any arbitrary process/command). A type is picked by +// specifying one of the optional fields. Specifying more than one type +// is an error. +// +// NOTE: This API is subject to change and the related feature is experimental. +type CheckInfo struct { + // The type of the check. + Type CheckInfo_Type `protobuf:"varint,1,opt,name=type,enum=mesos.CheckInfo_Type" json:"type"` + // Command check. + Command *CheckInfo_Command `protobuf:"bytes,2,opt,name=command" json:"command,omitempty"` + // HTTP check. + HTTP *CheckInfo_Http `protobuf:"bytes,3,opt,name=http" json:"http,omitempty"` + // TCP check. + TCP *CheckInfo_Tcp `protobuf:"bytes,7,opt,name=tcp" json:"tcp,omitempty"` + // Amount of time to wait to start checking the task after it + // transitions to `TASK_RUNNING` or `TASK_STARTING` if the latter + // is used by the executor. + DelaySeconds *float64 `protobuf:"fixed64,4,opt,name=delay_seconds,json=delaySeconds,def=15" json:"delay_seconds,omitempty"` + // Interval between check attempts, i.e., amount of time to wait after + // the previous check finished or timed out to start the next check. + IntervalSeconds *float64 `protobuf:"fixed64,5,opt,name=interval_seconds,json=intervalSeconds,def=10" json:"interval_seconds,omitempty"` + // Amount of time to wait for the check to complete. Zero means infinite + // timeout. + // + // After this timeout, the check attempt is aborted and no result is + // reported. Note that this may be considered a state change and hence + // may trigger a check status change delivery to the corresponding + // scheduler. See `CheckStatusInfo` for more details. + TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,json=timeoutSeconds,def=20" json:"timeout_seconds,omitempty"` +} + +func (m *CheckInfo) Reset() { *m = CheckInfo{} } +func (*CheckInfo) ProtoMessage() {} +func (*CheckInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{16} } + +const Default_CheckInfo_DelaySeconds float64 = 15 +const Default_CheckInfo_IntervalSeconds float64 = 10 +const Default_CheckInfo_TimeoutSeconds float64 = 20 + +func (m *CheckInfo) GetType() CheckInfo_Type { + if m != nil { + return m.Type + } + return CheckInfo_UNKNOWN +} + +func (m *CheckInfo) GetCommand() *CheckInfo_Command { + if m != nil { + return m.Command + } + return nil +} + +func (m *CheckInfo) GetHTTP() *CheckInfo_Http { + if m != nil { + return m.HTTP + } + return nil +} + +func (m *CheckInfo) GetTCP() *CheckInfo_Tcp { + if m != nil { + return m.TCP + } + return nil +} + +func (m *CheckInfo) GetDelaySeconds() float64 { + if m != nil && m.DelaySeconds != nil { + return *m.DelaySeconds + } + return Default_CheckInfo_DelaySeconds +} + +func (m *CheckInfo) GetIntervalSeconds() float64 { + if m != nil && m.IntervalSeconds != nil { + return *m.IntervalSeconds + } + return Default_CheckInfo_IntervalSeconds +} + +func (m *CheckInfo) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_CheckInfo_TimeoutSeconds +} + +// Describes a command check. If applicable, enters mount and/or network +// namespaces of the task. +type CheckInfo_Command struct { + Command CommandInfo `protobuf:"bytes,1,req,name=command" json:"command"` +} + +func (m *CheckInfo_Command) Reset() { *m = CheckInfo_Command{} } +func (*CheckInfo_Command) ProtoMessage() {} +func (*CheckInfo_Command) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{16, 0} } + +func (m *CheckInfo_Command) GetCommand() CommandInfo { + if m != nil { + return m.Command + } + return CommandInfo{} +} + +// Describes an HTTP check. Sends a GET request to +// http://:port/path. Note that is not configurable and is +// resolved automatically to 127.0.0.1. +type CheckInfo_Http struct { + // Port to send the HTTP request. + Port uint32 `protobuf:"varint,1,req,name=port" json:"port"` + // HTTP request path. + Path *string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` +} + +func (m *CheckInfo_Http) Reset() { *m = CheckInfo_Http{} } +func (*CheckInfo_Http) ProtoMessage() {} +func (*CheckInfo_Http) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{16, 1} } + +func (m *CheckInfo_Http) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *CheckInfo_Http) GetPath() string { + if m != nil && m.Path != nil { + return *m.Path + } + return "" +} + +// Describes a TCP check, i.e. based on establishing a TCP connection to +// the specified port. Note that is not configurable and is resolved +// automatically to 127.0.0.1. +type CheckInfo_Tcp struct { + Port uint32 `protobuf:"varint,1,req,name=port" json:"port"` +} + +func (m *CheckInfo_Tcp) Reset() { *m = CheckInfo_Tcp{} } +func (*CheckInfo_Tcp) ProtoMessage() {} +func (*CheckInfo_Tcp) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{16, 2} } + +func (m *CheckInfo_Tcp) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +// * +// Describes a health check for a task or executor (or any arbitrary +// process/command). A type is picked by specifying one of the +// optional fields. Specifying more than one type is an error. +type HealthCheck struct { + // Amount of time to wait to start health checking the task after it + // transitions to `TASK_RUNNING` or `TASK_STATING` if the latter is + // used by the executor. + DelaySeconds *float64 `protobuf:"fixed64,2,opt,name=delay_seconds,json=delaySeconds,def=15" json:"delay_seconds,omitempty"` + // Interval between health checks, i.e., amount of time to wait after + // the previous health check finished or timed out to start the next + // health check. + IntervalSeconds *float64 `protobuf:"fixed64,3,opt,name=interval_seconds,json=intervalSeconds,def=10" json:"interval_seconds,omitempty"` + // Amount of time to wait for the health check to complete. After this + // timeout, the health check is aborted and treated as a failure. Zero + // means infinite timeout. + TimeoutSeconds *float64 `protobuf:"fixed64,4,opt,name=timeout_seconds,json=timeoutSeconds,def=20" json:"timeout_seconds,omitempty"` + // Number of consecutive failures until the task is killed by the executor. + ConsecutiveFailures *uint32 `protobuf:"varint,5,opt,name=consecutive_failures,json=consecutiveFailures,def=3" json:"consecutive_failures,omitempty"` + // Amount of time after the task is launched during which health check + // failures are ignored. Once a check succeeds for the first time, + // the grace period does not apply anymore. Note that it includes + // `delay_seconds`, i.e., setting `grace_period_seconds` < `delay_seconds` + // has no effect. + GracePeriodSeconds *float64 `protobuf:"fixed64,6,opt,name=grace_period_seconds,json=gracePeriodSeconds,def=10" json:"grace_period_seconds,omitempty"` + // The type of health check. + Type HealthCheck_Type `protobuf:"varint,8,opt,name=type,enum=mesos.HealthCheck_Type" json:"type"` + // Command health check. + Command *CommandInfo `protobuf:"bytes,7,opt,name=command" json:"command,omitempty"` + // HTTP health check. + HTTP *HealthCheck_HTTPCheckInfo `protobuf:"bytes,1,opt,name=http" json:"http,omitempty"` + // TCP health check. + TCP *HealthCheck_TCPCheckInfo `protobuf:"bytes,9,opt,name=tcp" json:"tcp,omitempty"` +} + +func (m *HealthCheck) Reset() { *m = HealthCheck{} } +func (*HealthCheck) ProtoMessage() {} +func (*HealthCheck) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{17} } + +const Default_HealthCheck_DelaySeconds float64 = 15 +const Default_HealthCheck_IntervalSeconds float64 = 10 +const Default_HealthCheck_TimeoutSeconds float64 = 20 +const Default_HealthCheck_ConsecutiveFailures uint32 = 3 +const Default_HealthCheck_GracePeriodSeconds float64 = 10 + +func (m *HealthCheck) GetDelaySeconds() float64 { + if m != nil && m.DelaySeconds != nil { + return *m.DelaySeconds + } + return Default_HealthCheck_DelaySeconds +} + +func (m *HealthCheck) GetIntervalSeconds() float64 { + if m != nil && m.IntervalSeconds != nil { + return *m.IntervalSeconds + } + return Default_HealthCheck_IntervalSeconds +} + +func (m *HealthCheck) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_HealthCheck_TimeoutSeconds +} + +func (m *HealthCheck) GetConsecutiveFailures() uint32 { + if m != nil && m.ConsecutiveFailures != nil { + return *m.ConsecutiveFailures + } + return Default_HealthCheck_ConsecutiveFailures +} + +func (m *HealthCheck) GetGracePeriodSeconds() float64 { + if m != nil && m.GracePeriodSeconds != nil { + return *m.GracePeriodSeconds + } + return Default_HealthCheck_GracePeriodSeconds +} + +func (m *HealthCheck) GetType() HealthCheck_Type { + if m != nil { + return m.Type + } + return HealthCheck_UNKNOWN +} + +func (m *HealthCheck) GetCommand() *CommandInfo { + if m != nil { + return m.Command + } + return nil +} + +func (m *HealthCheck) GetHTTP() *HealthCheck_HTTPCheckInfo { + if m != nil { + return m.HTTP + } + return nil +} + +func (m *HealthCheck) GetTCP() *HealthCheck_TCPCheckInfo { + if m != nil { + return m.TCP + } + return nil +} + +// Describes an HTTP health check. Sends a GET request to +// scheme://:port/path. Note that is not configurable and is +// resolved automatically, in most cases to 127.0.0.1. Default executors +// treat return codes between 200 and 399 as success; custom executors +// may employ a different strategy, e.g. leveraging the `statuses` field. +type HealthCheck_HTTPCheckInfo struct { + Protocol *NetworkInfo_Protocol `protobuf:"varint,5,opt,name=protocol,enum=mesos.NetworkInfo_Protocol,def=1" json:"protocol,omitempty"` + // Currently "http" and "https" are supported. + Scheme *string `protobuf:"bytes,3,opt,name=scheme" json:"scheme,omitempty"` + // Port to send the HTTP request. + Port uint32 `protobuf:"varint,1,req,name=port" json:"port"` + // HTTP request path. + Path *string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` + // NOTE: It is up to the custom executor to interpret and act on this + // field. Setting this field has no effect on the default executors. + // + // TODO(haosdent): Deprecate this field when we add better support for + // success and possibly failure statuses, e.g. ranges of success and + // failure statuses. + Statuses []uint32 `protobuf:"varint,4,rep,name=statuses" json:"statuses,omitempty"` +} + +func (m *HealthCheck_HTTPCheckInfo) Reset() { *m = HealthCheck_HTTPCheckInfo{} } +func (*HealthCheck_HTTPCheckInfo) ProtoMessage() {} +func (*HealthCheck_HTTPCheckInfo) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{17, 0} +} + +const Default_HealthCheck_HTTPCheckInfo_Protocol NetworkInfo_Protocol = IPv4 + +func (m *HealthCheck_HTTPCheckInfo) GetProtocol() NetworkInfo_Protocol { + if m != nil && m.Protocol != nil { + return *m.Protocol + } + return Default_HealthCheck_HTTPCheckInfo_Protocol +} + +func (m *HealthCheck_HTTPCheckInfo) GetScheme() string { + if m != nil && m.Scheme != nil { + return *m.Scheme + } + return "" +} + +func (m *HealthCheck_HTTPCheckInfo) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *HealthCheck_HTTPCheckInfo) GetPath() string { + if m != nil && m.Path != nil { + return *m.Path + } + return "" +} + +func (m *HealthCheck_HTTPCheckInfo) GetStatuses() []uint32 { + if m != nil { + return m.Statuses + } + return nil +} + +// Describes a TCP health check, i.e. based on establishing +// a TCP connection to the specified port. +type HealthCheck_TCPCheckInfo struct { + Protocol *NetworkInfo_Protocol `protobuf:"varint,2,opt,name=protocol,enum=mesos.NetworkInfo_Protocol,def=1" json:"protocol,omitempty"` + // Port expected to be open. + Port uint32 `protobuf:"varint,1,req,name=port" json:"port"` +} + +func (m *HealthCheck_TCPCheckInfo) Reset() { *m = HealthCheck_TCPCheckInfo{} } +func (*HealthCheck_TCPCheckInfo) ProtoMessage() {} +func (*HealthCheck_TCPCheckInfo) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{17, 1} +} + +const Default_HealthCheck_TCPCheckInfo_Protocol NetworkInfo_Protocol = IPv4 + +func (m *HealthCheck_TCPCheckInfo) GetProtocol() NetworkInfo_Protocol { + if m != nil && m.Protocol != nil { + return *m.Protocol + } + return Default_HealthCheck_TCPCheckInfo_Protocol +} + +func (m *HealthCheck_TCPCheckInfo) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +// * +// Describes a kill policy for a task. Currently does not express +// different policies (e.g. hitting HTTP endpoints), only controls +// how long to wait between graceful and forcible task kill: +// +// graceful kill --------------> forcible kill +// grace_period +// +// Kill policies are best-effort, because machine failures / forcible +// terminations may occur. +// +// NOTE: For executor-less command-based tasks, the kill is performed +// via sending a signal to the task process: SIGTERM for the graceful +// kill and SIGKILL for the forcible kill. For the docker executor-less +// tasks the grace period is passed to 'docker stop --time'. +type KillPolicy struct { + // The grace period specifies how long to wait before forcibly + // killing the task. It is recommended to attempt to gracefully + // kill the task (and send TASK_KILLING) to indicate that the + // graceful kill is in progress. Once the grace period elapses, + // if the task has not terminated, a forcible kill should occur. + // The task should not assume that it will always be allotted + // the full grace period. For example, the executor may be + // shutdown more quickly by the agent, or failures / forcible + // terminations may occur. + GracePeriod *DurationInfo `protobuf:"bytes,1,opt,name=grace_period,json=gracePeriod" json:"grace_period,omitempty"` +} + +func (m *KillPolicy) Reset() { *m = KillPolicy{} } +func (*KillPolicy) ProtoMessage() {} +func (*KillPolicy) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{18} } + +func (m *KillPolicy) GetGracePeriod() *DurationInfo { + if m != nil { + return m.GracePeriod + } + return nil +} + +// * +// Describes a command, executed via: '/bin/sh -c value'. Any URIs specified +// are fetched before executing the command. If the executable field for an +// uri is set, executable file permission is set on the downloaded file. +// Otherwise, if the downloaded file has a recognized archive extension +// (currently [compressed] tar and zip) it is extracted into the executor's +// working directory. This extraction can be disabled by setting `extract` to +// false. In addition, any environment variables are set before executing +// the command (so they can be used to "parameterize" your command). +type CommandInfo struct { + URIs []CommandInfo_URI `protobuf:"bytes,1,rep,name=uris" json:"uris"` + Environment *Environment `protobuf:"bytes,2,opt,name=environment" json:"environment,omitempty"` + // There are two ways to specify the command: + // 1) If 'shell == true', the command will be launched via shell + // (i.e., /bin/sh -c 'value'). The 'value' specified will be + // treated as the shell command. The 'arguments' will be ignored. + // 2) If 'shell == false', the command will be launched by passing + // arguments to an executable. The 'value' specified will be + // treated as the filename of the executable. The 'arguments' + // will be treated as the arguments to the executable. This is + // similar to how POSIX exec families launch processes (i.e., + // execlp(value, arguments(0), arguments(1), ...)). + // NOTE: The field 'value' is changed from 'required' to 'optional' + // in 0.20.0. It will only cause issues if a new framework is + // connecting to an old master. + Shell *bool `protobuf:"varint,6,opt,name=shell,def=1" json:"shell,omitempty"` + Value *string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` + Arguments []string `protobuf:"bytes,7,rep,name=arguments" json:"arguments,omitempty"` + // Enables executor and tasks to run as a specific user. If the user + // field is present both in FrameworkInfo and here, the CommandInfo + // user value takes precedence. + User *string `protobuf:"bytes,5,opt,name=user" json:"user,omitempty"` +} + +func (m *CommandInfo) Reset() { *m = CommandInfo{} } +func (*CommandInfo) ProtoMessage() {} +func (*CommandInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{19} } + +const Default_CommandInfo_Shell bool = true + +func (m *CommandInfo) GetURIs() []CommandInfo_URI { + if m != nil { + return m.URIs + } + return nil +} + +func (m *CommandInfo) GetEnvironment() *Environment { + if m != nil { + return m.Environment + } + return nil +} + +func (m *CommandInfo) GetShell() bool { + if m != nil && m.Shell != nil { + return *m.Shell + } + return Default_CommandInfo_Shell +} + +func (m *CommandInfo) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +func (m *CommandInfo) GetArguments() []string { + if m != nil { + return m.Arguments + } + return nil +} + +func (m *CommandInfo) GetUser() string { + if m != nil && m.User != nil { + return *m.User + } + return "" +} + +type CommandInfo_URI struct { + Value string `protobuf:"bytes,1,req,name=value" json:"value"` + Executable *bool `protobuf:"varint,2,opt,name=executable" json:"executable,omitempty"` + // In case the fetched file is recognized as an archive, extract + // its contents into the sandbox. Note that a cached archive is + // not copied from the cache to the sandbox in case extraction + // originates from an archive in the cache. + Extract *bool `protobuf:"varint,3,opt,name=extract,def=1" json:"extract,omitempty"` + // If this field is "true", the fetcher cache will be used. If not, + // fetching bypasses the cache and downloads directly into the + // sandbox directory, no matter whether a suitable cache file is + // available or not. The former directs the fetcher to download to + // the file cache, then copy from there to the sandbox. Subsequent + // fetch attempts with the same URI will omit downloading and copy + // from the cache as long as the file is resident there. Cache files + // may get evicted at any time, which then leads to renewed + // downloading. See also "docs/fetcher.md" and + // "docs/fetcher-cache-internals.md". + Cache *bool `protobuf:"varint,4,opt,name=cache" json:"cache,omitempty"` + // The fetcher's default behavior is to use the URI string's basename to + // name the local copy. If this field is provided, the local copy will be + // named with its value instead. If there is a directory component (which + // must be a relative path), the local copy will be stored in that + // subdirectory inside the sandbox. + OutputFile *string `protobuf:"bytes,5,opt,name=output_file,json=outputFile" json:"output_file,omitempty"` +} + +func (m *CommandInfo_URI) Reset() { *m = CommandInfo_URI{} } +func (*CommandInfo_URI) ProtoMessage() {} +func (*CommandInfo_URI) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{19, 0} } + +const Default_CommandInfo_URI_Extract bool = true + +func (m *CommandInfo_URI) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *CommandInfo_URI) GetExecutable() bool { + if m != nil && m.Executable != nil { + return *m.Executable + } + return false +} + +func (m *CommandInfo_URI) GetExtract() bool { + if m != nil && m.Extract != nil { + return *m.Extract + } + return Default_CommandInfo_URI_Extract +} + +func (m *CommandInfo_URI) GetCache() bool { + if m != nil && m.Cache != nil { + return *m.Cache + } + return false +} + +func (m *CommandInfo_URI) GetOutputFile() string { + if m != nil && m.OutputFile != nil { + return *m.OutputFile + } + return "" +} + +// * +// Describes information about an executor. +type ExecutorInfo struct { + // For backwards compatibility, if this field is not set when using `LAUNCH` + // operation, Mesos will infer the type by checking if `command` is set + // (`CUSTOM`) or unset (`DEFAULT`). `type` must be set when using + // `LAUNCH_GROUP` operation. + // + // TODO(vinod): Add support for explicitly setting `type` to `DEFAULT` in + // `LAUNCH` operation. + Type ExecutorInfo_Type `protobuf:"varint,15,opt,name=type,enum=mesos.ExecutorInfo_Type" json:"type"` + ExecutorID ExecutorID `protobuf:"bytes,1,req,name=executor_id,json=executorId" json:"executor_id"` + FrameworkID *FrameworkID `protobuf:"bytes,8,opt,name=framework_id,json=frameworkId" json:"framework_id,omitempty"` + Command *CommandInfo `protobuf:"bytes,7,opt,name=command" json:"command,omitempty"` + // Executor provided with a container will launch the container + // with the executor's CommandInfo and we expect the container to + // act as a Mesos executor. + Container *ContainerInfo `protobuf:"bytes,11,opt,name=container" json:"container,omitempty"` + Resources []Resource `protobuf:"bytes,5,rep,name=resources" json:"resources"` + Name *string `protobuf:"bytes,9,opt,name=name" json:"name,omitempty"` + // 'source' is an identifier style string used by frameworks to + // track the source of an executor. This is useful when it's + // possible for different executor ids to be related semantically. + // + // NOTE: 'source' is exposed alongside the resource usage of the + // executor via JSON on the agent. This allows users to import usage + // information into a time series database for monitoring. + // + // This field is deprecated since 1.0. Please use labels for + // free-form metadata instead. + Source *string `protobuf:"bytes,10,opt,name=source" json:"source,omitempty"` + // This field can be used to pass arbitrary bytes to an executor. + Data []byte `protobuf:"bytes,4,opt,name=data" json:"data,omitempty"` + // Service discovery information for the executor. It is not + // interpreted or acted upon by Mesos. It is up to a service + // discovery system to use this information as needed and to handle + // executors without service discovery information. + Discovery *DiscoveryInfo `protobuf:"bytes,12,opt,name=discovery" json:"discovery,omitempty"` + // When shutting down an executor the agent will wait in a + // best-effort manner for the grace period specified here + // before forcibly destroying the container. The executor + // must not assume that it will always be allotted the full + // grace period, as the agent may decide to allot a shorter + // period and failures / forcible terminations may occur. + ShutdownGracePeriod *DurationInfo `protobuf:"bytes,13,opt,name=shutdown_grace_period,json=shutdownGracePeriod" json:"shutdown_grace_period,omitempty"` + // Labels are free-form key value pairs which are exposed through + // master and agent endpoints. Labels will not be interpreted or + // acted upon by Mesos itself. As opposed to the data field, labels + // will be kept in memory on master and agent processes. Therefore, + // labels should be used to tag executors with lightweight metadata. + // Labels should not contain duplicate key-value pairs. + Labels *Labels `protobuf:"bytes,14,opt,name=labels" json:"labels,omitempty"` +} + +func (m *ExecutorInfo) Reset() { *m = ExecutorInfo{} } +func (*ExecutorInfo) ProtoMessage() {} +func (*ExecutorInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{20} } + +func (m *ExecutorInfo) GetType() ExecutorInfo_Type { + if m != nil { + return m.Type + } + return ExecutorInfo_UNKNOWN +} + +func (m *ExecutorInfo) GetExecutorID() ExecutorID { + if m != nil { + return m.ExecutorID + } + return ExecutorID{} +} + +func (m *ExecutorInfo) GetFrameworkID() *FrameworkID { + if m != nil { + return m.FrameworkID + } + return nil +} + +func (m *ExecutorInfo) GetCommand() *CommandInfo { + if m != nil { + return m.Command + } + return nil +} + +func (m *ExecutorInfo) GetContainer() *ContainerInfo { + if m != nil { + return m.Container + } + return nil +} + +func (m *ExecutorInfo) GetResources() []Resource { + if m != nil { + return m.Resources + } + return nil +} + +func (m *ExecutorInfo) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ExecutorInfo) GetSource() string { + if m != nil && m.Source != nil { + return *m.Source + } + return "" +} + +func (m *ExecutorInfo) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ExecutorInfo) GetDiscovery() *DiscoveryInfo { + if m != nil { + return m.Discovery + } + return nil +} + +func (m *ExecutorInfo) GetShutdownGracePeriod() *DurationInfo { + if m != nil { + return m.ShutdownGracePeriod + } + return nil +} + +func (m *ExecutorInfo) GetLabels() *Labels { + if m != nil { + return m.Labels + } + return nil +} + +// * +// Describes a domain. A domain is a collection of hosts that have +// similar characteristics. Mesos currently only supports "fault +// domains", which identify groups of hosts with similar failure +// characteristics. +// +// Frameworks can generally assume that network links between hosts in +// the same fault domain have lower latency, higher bandwidth, and better +// availability than network links between hosts in different domains. +// Schedulers may prefer to place network-intensive workloads in the +// same domain, as this may improve performance. Conversely, a single +// failure that affects a host in a domain may be more likely to +// affect other hosts in the same domain; hence, schedulers may prefer +// to place workloads that require high availability in multiple +// domains. (For example, all the hosts in a single rack might lose +// power or network connectivity simultaneously.) +// +// There are two kinds of fault domains: regions and zones. Regions +// offer the highest degree of fault isolation, but network latency +// between regions is typically high (typically >50 ms). Zones offer a +// modest degree of fault isolation along with reasonably low network +// latency (typically <10 ms). +// +// The mapping from fault domains to physical infrastructure is up to +// the operator to configure. In cloud environments, regions and zones +// can be mapped to the "region" and "availability zone" concepts +// exposed by most cloud providers, respectively. In on-premise +// deployments, regions and zones can be mapped to data centers and +// racks, respectively. +// +// Both masters and agents can be configured with domains. Frameworks +// can compare the domains of two hosts to determine if the hosts are +// in the same zone, in different zones in the same region, or in +// different regions. Note that all masters in a given Mesos cluster +// must be in the same region. +type DomainInfo struct { + FaultDomain *DomainInfo_FaultDomain `protobuf:"bytes,1,opt,name=fault_domain,json=faultDomain" json:"fault_domain,omitempty"` +} + +func (m *DomainInfo) Reset() { *m = DomainInfo{} } +func (*DomainInfo) ProtoMessage() {} +func (*DomainInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{21} } + +func (m *DomainInfo) GetFaultDomain() *DomainInfo_FaultDomain { + if m != nil { + return m.FaultDomain + } + return nil +} + +type DomainInfo_FaultDomain struct { + Region DomainInfo_FaultDomain_RegionInfo `protobuf:"bytes,1,req,name=region" json:"region"` + Zone DomainInfo_FaultDomain_ZoneInfo `protobuf:"bytes,2,req,name=zone" json:"zone"` +} + +func (m *DomainInfo_FaultDomain) Reset() { *m = DomainInfo_FaultDomain{} } +func (*DomainInfo_FaultDomain) ProtoMessage() {} +func (*DomainInfo_FaultDomain) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{21, 0} } + +func (m *DomainInfo_FaultDomain) GetRegion() DomainInfo_FaultDomain_RegionInfo { + if m != nil { + return m.Region + } + return DomainInfo_FaultDomain_RegionInfo{} +} + +func (m *DomainInfo_FaultDomain) GetZone() DomainInfo_FaultDomain_ZoneInfo { + if m != nil { + return m.Zone + } + return DomainInfo_FaultDomain_ZoneInfo{} +} + +type DomainInfo_FaultDomain_RegionInfo struct { + Name string `protobuf:"bytes,1,req,name=name" json:"name"` +} + +func (m *DomainInfo_FaultDomain_RegionInfo) Reset() { *m = DomainInfo_FaultDomain_RegionInfo{} } +func (*DomainInfo_FaultDomain_RegionInfo) ProtoMessage() {} +func (*DomainInfo_FaultDomain_RegionInfo) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{21, 0, 0} +} + +func (m *DomainInfo_FaultDomain_RegionInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type DomainInfo_FaultDomain_ZoneInfo struct { + Name string `protobuf:"bytes,1,req,name=name" json:"name"` +} + +func (m *DomainInfo_FaultDomain_ZoneInfo) Reset() { *m = DomainInfo_FaultDomain_ZoneInfo{} } +func (*DomainInfo_FaultDomain_ZoneInfo) ProtoMessage() {} +func (*DomainInfo_FaultDomain_ZoneInfo) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{21, 0, 1} +} + +func (m *DomainInfo_FaultDomain_ZoneInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// * +// Describes a master. This will probably have more fields in the +// future which might be used, for example, to link a framework webui +// to a master webui. +type MasterInfo struct { + ID string `protobuf:"bytes,1,req,name=id" json:"id"` + // The IP address (only IPv4) as a packed 4-bytes integer, + // stored in network order. Deprecated, use `address.ip` instead. + IP uint32 `protobuf:"varint,2,req,name=ip" json:"ip"` + // The TCP port the Master is listening on for incoming + // HTTP requests; deprecated, use `address.port` instead. + Port *uint32 `protobuf:"varint,3,req,name=port,def=5050" json:"port,omitempty"` + // In the default implementation, this will contain information + // about both the IP address, port and Master name; it should really + // not be relied upon by external tooling/frameworks and be + // considered an "internal" implementation field. + PID *string `protobuf:"bytes,4,opt,name=pid" json:"pid,omitempty"` + // The server's hostname, if available; it may be unreliable + // in environments where the DNS configuration does not resolve + // internal hostnames (eg, some public cloud providers). + // Deprecated, use `address.hostname` instead. + Hostname *string `protobuf:"bytes,5,opt,name=hostname" json:"hostname,omitempty"` + // The running Master version, as a string; taken from the + // generated "master/version.hpp". + Version *string `protobuf:"bytes,6,opt,name=version" json:"version,omitempty"` + // The full IP address (supports both IPv4 and IPv6 formats) + // and supersedes the use of `ip`, `port` and `hostname`. + // Since Mesos 0.24. + Address *Address `protobuf:"bytes,7,opt,name=address" json:"address,omitempty"` + // The domain that this master belongs to. All masters in a Mesos + // cluster should belong to the same region. + Domain *DomainInfo `protobuf:"bytes,8,opt,name=domain" json:"domain,omitempty"` + Capabilities []MasterInfo_Capability `protobuf:"bytes,9,rep,name=capabilities" json:"capabilities"` +} + +func (m *MasterInfo) Reset() { *m = MasterInfo{} } +func (*MasterInfo) ProtoMessage() {} +func (*MasterInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{22} } + +const Default_MasterInfo_Port uint32 = 5050 + +func (m *MasterInfo) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *MasterInfo) GetIP() uint32 { + if m != nil { + return m.IP + } + return 0 +} + +func (m *MasterInfo) GetPort() uint32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_MasterInfo_Port +} + +func (m *MasterInfo) GetPID() string { + if m != nil && m.PID != nil { + return *m.PID + } + return "" +} + +func (m *MasterInfo) GetHostname() string { + if m != nil && m.Hostname != nil { + return *m.Hostname + } + return "" +} + +func (m *MasterInfo) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *MasterInfo) GetAddress() *Address { + if m != nil { + return m.Address + } + return nil +} + +func (m *MasterInfo) GetDomain() *DomainInfo { + if m != nil { + return m.Domain + } + return nil +} + +func (m *MasterInfo) GetCapabilities() []MasterInfo_Capability { + if m != nil { + return m.Capabilities + } + return nil +} + +type MasterInfo_Capability struct { + Type MasterInfo_Capability_Type `protobuf:"varint,1,opt,name=type,enum=mesos.MasterInfo_Capability_Type" json:"type"` +} + +func (m *MasterInfo_Capability) Reset() { *m = MasterInfo_Capability{} } +func (*MasterInfo_Capability) ProtoMessage() {} +func (*MasterInfo_Capability) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{22, 0} } + +func (m *MasterInfo_Capability) GetType() MasterInfo_Capability_Type { + if m != nil { + return m.Type + } + return MasterInfo_Capability_UNKNOWN +} + +// * +// Describes an agent. Note that the 'id' field is only available +// after an agent is registered with the master, and is made available +// here to facilitate re-registration. +type AgentInfo struct { + Hostname string `protobuf:"bytes,1,req,name=hostname" json:"hostname"` + Port *int32 `protobuf:"varint,8,opt,name=port,def=5051" json:"port,omitempty"` + // The configured resources at the agent. This does not include any + // dynamic reservations or persistent volumes that may currently + // exist at the agent. + Resources []Resource `protobuf:"bytes,3,rep,name=resources" json:"resources"` + Attributes []Attribute `protobuf:"bytes,5,rep,name=attributes" json:"attributes"` + ID *AgentID `protobuf:"bytes,6,opt,name=id" json:"id,omitempty"` + // The domain that this agent belongs to. If the agent's region + // differs from the master's region, it will not appear in resource + // offers to frameworks that have not enabled the REGION_AWARE + // capability. + Domain *DomainInfo `protobuf:"bytes,10,opt,name=domain" json:"domain,omitempty"` +} + +func (m *AgentInfo) Reset() { *m = AgentInfo{} } +func (*AgentInfo) ProtoMessage() {} +func (*AgentInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{23} } + +const Default_AgentInfo_Port int32 = 5051 + +func (m *AgentInfo) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +func (m *AgentInfo) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_AgentInfo_Port +} + +func (m *AgentInfo) GetResources() []Resource { + if m != nil { + return m.Resources + } + return nil +} + +func (m *AgentInfo) GetAttributes() []Attribute { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *AgentInfo) GetID() *AgentID { + if m != nil { + return m.ID + } + return nil +} + +func (m *AgentInfo) GetDomain() *DomainInfo { + if m != nil { + return m.Domain + } + return nil +} + +type AgentInfo_Capability struct { + // Enum fields should be optional, see: MESOS-4997. + Type AgentInfo_Capability_Type `protobuf:"varint,1,opt,name=type,enum=mesos.AgentInfo_Capability_Type" json:"type"` +} + +func (m *AgentInfo_Capability) Reset() { *m = AgentInfo_Capability{} } +func (*AgentInfo_Capability) ProtoMessage() {} +func (*AgentInfo_Capability) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{23, 0} } + +func (m *AgentInfo_Capability) GetType() AgentInfo_Capability_Type { + if m != nil { + return m.Type + } + return AgentInfo_Capability_UNKNOWN +} + +// * +// Describes the container configuration to run a CSI plugin component. +type CSIPluginContainerInfo struct { + Services []CSIPluginContainerInfo_Service `protobuf:"varint,1,rep,name=services,enum=mesos.CSIPluginContainerInfo_Service" json:"services,omitempty"` + Command *CommandInfo `protobuf:"bytes,2,opt,name=command" json:"command,omitempty"` + Resources []Resource `protobuf:"bytes,3,rep,name=resources" json:"resources"` + Container *ContainerInfo `protobuf:"bytes,4,opt,name=container" json:"container,omitempty"` +} + +func (m *CSIPluginContainerInfo) Reset() { *m = CSIPluginContainerInfo{} } +func (*CSIPluginContainerInfo) ProtoMessage() {} +func (*CSIPluginContainerInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{24} } + +func (m *CSIPluginContainerInfo) GetServices() []CSIPluginContainerInfo_Service { + if m != nil { + return m.Services + } + return nil +} + +func (m *CSIPluginContainerInfo) GetCommand() *CommandInfo { + if m != nil { + return m.Command + } + return nil +} + +func (m *CSIPluginContainerInfo) GetResources() []Resource { + if m != nil { + return m.Resources + } + return nil +} + +func (m *CSIPluginContainerInfo) GetContainer() *ContainerInfo { + if m != nil { + return m.Container + } + return nil +} + +// * +// Describes a CSI plugin. +type CSIPluginInfo struct { + // The type of the CSI service. This uniquely identifies a CSI + // implementation. For instance: + // org.apache.mesos.csi.test + // + // Please follow to Java package naming convention + // (https://en.wikipedia.org/wiki/Java_package#Package_naming_conventions) + // to avoid conflicts on type names. + Type string `protobuf:"bytes,1,req,name=type" json:"type"` + // The name of the CSI service. There could be mutliple instances of a + // type of CSI service. The name field is used to distinguish these + // instances. It should be a legal Java identifier + // (https://docs.oracle.com/javase/tutorial/java/nutsandbolts/variables.html) + // to avoid conflicts on concatenation of type and name. + Name string `protobuf:"bytes,2,req,name=name" json:"name"` + // A list of container configurations to run CSI plugin components. + // The controller service will be served by the first configuration + // that contains `CONTROLLER_SERVICE`, and the node service will be + // served by the first configuration that contains `NODE_SERVICE`. + Containers []CSIPluginContainerInfo `protobuf:"bytes,3,rep,name=containers" json:"containers"` +} + +func (m *CSIPluginInfo) Reset() { *m = CSIPluginInfo{} } +func (*CSIPluginInfo) ProtoMessage() {} +func (*CSIPluginInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{25} } + +func (m *CSIPluginInfo) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *CSIPluginInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CSIPluginInfo) GetContainers() []CSIPluginContainerInfo { + if m != nil { + return m.Containers + } + return nil +} + +// * +// Describes a resource provider. Note that the 'id' field is only available +// after a resource provider is registered with the master, and is made +// available here to facilitate re-registration. +type ResourceProviderInfo struct { + ID *ResourceProviderID `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Attributes []Attribute `protobuf:"bytes,2,rep,name=attributes" json:"attributes"` + // The type of the resource provider. This uniquely identifies a + // resource provider implementation. For instance: + // org.apache.mesos.rp.local.storage + // + // Please follow to Java package naming convention + // (https://en.wikipedia.org/wiki/Java_package#Package_naming_conventions) + // to avoid conflicts on type names. + Type string `protobuf:"bytes,3,req,name=type" json:"type"` + // The name of the resource provider. There could be multiple + // instances of a type of resource provider. The name field is used + // to distinguish these instances. It should be a legal Java identifier + // (https://docs.oracle.com/javase/tutorial/java/nutsandbolts/variables.html) + // to avoid conflicts on concatenation of type and name. + Name string `protobuf:"bytes,4,req,name=name" json:"name"` + // The stack of default reservations. If this field is not empty, it + // indicates that resources from this resource provider are reserved + // by default, except for the resources that have been reserved or + // unreserved through operations. The first `ReservationInfo` + // may have type `STATIC` or `DYNAMIC`, but the rest must have + // `DYNAMIC`. One can create a new reservation on top of an existing + // one by pushing a new `ReservationInfo` to the back. The last + // `ReservationInfo` in this stack is the "current" reservation. The + // new reservation's role must be a child of the current one. + DefaultReservations []Resource_ReservationInfo `protobuf:"bytes,5,rep,name=default_reservations,json=defaultReservations" json:"default_reservations"` + Storage *ResourceProviderInfo_Storage `protobuf:"bytes,6,opt,name=storage" json:"storage,omitempty"` +} + +func (m *ResourceProviderInfo) Reset() { *m = ResourceProviderInfo{} } +func (*ResourceProviderInfo) ProtoMessage() {} +func (*ResourceProviderInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{26} } + +func (m *ResourceProviderInfo) GetID() *ResourceProviderID { + if m != nil { + return m.ID + } + return nil +} + +func (m *ResourceProviderInfo) GetAttributes() []Attribute { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *ResourceProviderInfo) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ResourceProviderInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ResourceProviderInfo) GetDefaultReservations() []Resource_ReservationInfo { + if m != nil { + return m.DefaultReservations + } + return nil +} + +func (m *ResourceProviderInfo) GetStorage() *ResourceProviderInfo_Storage { + if m != nil { + return m.Storage + } + return nil +} + +// Storage resource provider related information. +type ResourceProviderInfo_Storage struct { + Plugin CSIPluginInfo `protobuf:"bytes,1,req,name=plugin" json:"plugin"` +} + +func (m *ResourceProviderInfo_Storage) Reset() { *m = ResourceProviderInfo_Storage{} } +func (*ResourceProviderInfo_Storage) ProtoMessage() {} +func (*ResourceProviderInfo_Storage) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{26, 0} +} + +func (m *ResourceProviderInfo_Storage) GetPlugin() CSIPluginInfo { + if m != nil { + return m.Plugin + } + return CSIPluginInfo{} +} + +// * +// Describes an Attribute or Resource "value". A value is described +// using the standard protocol buffer "union" trick. +type Value struct { + Type Value_Type `protobuf:"varint,1,req,name=type,enum=mesos.Value_Type" json:"type"` + Scalar *Value_Scalar `protobuf:"bytes,2,opt,name=scalar" json:"scalar,omitempty"` + Ranges *Value_Ranges `protobuf:"bytes,3,opt,name=ranges" json:"ranges,omitempty"` + Set *Value_Set `protobuf:"bytes,4,opt,name=set" json:"set,omitempty"` + Text *Value_Text `protobuf:"bytes,5,opt,name=text" json:"text,omitempty"` +} + +func (m *Value) Reset() { *m = Value{} } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{27} } + +func (m *Value) GetType() Value_Type { + if m != nil { + return m.Type + } + return SCALAR +} + +func (m *Value) GetScalar() *Value_Scalar { + if m != nil { + return m.Scalar + } + return nil +} + +func (m *Value) GetRanges() *Value_Ranges { + if m != nil { + return m.Ranges + } + return nil +} + +func (m *Value) GetSet() *Value_Set { + if m != nil { + return m.Set + } + return nil +} + +func (m *Value) GetText() *Value_Text { + if m != nil { + return m.Text + } + return nil +} + +type Value_Scalar struct { + // Scalar values are represented using floating point. To reduce + // the chance of unpredictable floating point behavior due to + // roundoff error, Mesos only supports three decimal digits of + // precision for scalar resource values. That is, floating point + // values are converted to a fixed point format that supports + // three decimal digits of precision, and then converted back to + // floating point on output. Any additional precision in scalar + // resource values is discarded (via rounding). + Value float64 `protobuf:"fixed64,1,req,name=value" json:"value"` +} + +func (m *Value_Scalar) Reset() { *m = Value_Scalar{} } +func (*Value_Scalar) ProtoMessage() {} +func (*Value_Scalar) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{27, 0} } + +func (m *Value_Scalar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +type Value_Range struct { + Begin uint64 `protobuf:"varint,1,req,name=begin" json:"begin"` + End uint64 `protobuf:"varint,2,req,name=end" json:"end"` +} + +func (m *Value_Range) Reset() { *m = Value_Range{} } +func (*Value_Range) ProtoMessage() {} +func (*Value_Range) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{27, 1} } + +func (m *Value_Range) GetBegin() uint64 { + if m != nil { + return m.Begin + } + return 0 +} + +func (m *Value_Range) GetEnd() uint64 { + if m != nil { + return m.End + } + return 0 +} + +type Value_Ranges struct { + Range []Value_Range `protobuf:"bytes,1,rep,name=range" json:"range"` +} + +func (m *Value_Ranges) Reset() { *m = Value_Ranges{} } +func (*Value_Ranges) ProtoMessage() {} +func (*Value_Ranges) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{27, 2} } + +func (m *Value_Ranges) GetRange() []Value_Range { + if m != nil { + return m.Range + } + return nil +} + +type Value_Set struct { + Item []string `protobuf:"bytes,1,rep,name=item" json:"item,omitempty"` +} + +func (m *Value_Set) Reset() { *m = Value_Set{} } +func (*Value_Set) ProtoMessage() {} +func (*Value_Set) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{27, 3} } + +func (m *Value_Set) GetItem() []string { + if m != nil { + return m.Item + } + return nil +} + +type Value_Text struct { + Value string `protobuf:"bytes,1,req,name=value" json:"value"` +} + +func (m *Value_Text) Reset() { *m = Value_Text{} } +func (*Value_Text) ProtoMessage() {} +func (*Value_Text) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{27, 4} } + +func (m *Value_Text) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// * +// Describes an attribute that can be set on a machine. For now, +// attributes and resources share the same "value" type, but this may +// change in the future and attributes may only be string based. +type Attribute struct { + Name string `protobuf:"bytes,1,req,name=name" json:"name"` + Type Value_Type `protobuf:"varint,2,req,name=type,enum=mesos.Value_Type" json:"type"` + Scalar *Value_Scalar `protobuf:"bytes,3,opt,name=scalar" json:"scalar,omitempty"` + Ranges *Value_Ranges `protobuf:"bytes,4,opt,name=ranges" json:"ranges,omitempty"` + Set *Value_Set `protobuf:"bytes,6,opt,name=set" json:"set,omitempty"` + Text *Value_Text `protobuf:"bytes,5,opt,name=text" json:"text,omitempty"` +} + +func (m *Attribute) Reset() { *m = Attribute{} } +func (*Attribute) ProtoMessage() {} +func (*Attribute) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{28} } + +func (m *Attribute) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Attribute) GetType() Value_Type { + if m != nil { + return m.Type + } + return SCALAR +} + +func (m *Attribute) GetScalar() *Value_Scalar { + if m != nil { + return m.Scalar + } + return nil +} + +func (m *Attribute) GetRanges() *Value_Ranges { + if m != nil { + return m.Ranges + } + return nil +} + +func (m *Attribute) GetSet() *Value_Set { + if m != nil { + return m.Set + } + return nil +} + +func (m *Attribute) GetText() *Value_Text { + if m != nil { + return m.Text + } + return nil +} + +// * +// Describes a resource from a resource provider. The `name` field is +// a string like "cpus" or "mem" that indicates which kind of resource +// this is; the rest of the fields describe the properties of the +// resource. A resource can take on one of three types: scalar +// (double), a list of finite and discrete ranges (e.g., [1-10, +// 20-30]), or a set of items. A resource is described using the +// standard protocol buffer "union" trick. +// +// Note that "disk" and "mem" resources are scalar values expressed in +// megabytes. Fractional "cpus" values are allowed (e.g., "0.5"), +// which correspond to partial shares of a CPU. +type Resource struct { + ProviderID *ResourceProviderID `protobuf:"bytes,12,opt,name=provider_id,json=providerId" json:"provider_id,omitempty"` + Name string `protobuf:"bytes,1,req,name=name" json:"name"` + Type *Value_Type `protobuf:"varint,2,req,name=type,enum=mesos.Value_Type" json:"type,omitempty"` + Scalar *Value_Scalar `protobuf:"bytes,3,opt,name=scalar" json:"scalar,omitempty"` + Ranges *Value_Ranges `protobuf:"bytes,4,opt,name=ranges" json:"ranges,omitempty"` + Set *Value_Set `protobuf:"bytes,5,opt,name=set" json:"set,omitempty"` + // The role that this resource is reserved for. If "*", this indicates + // that the resource is unreserved. Otherwise, the resource will only + // be offered to frameworks that belong to this role. + // + // NOTE: Frameworks must not set this field if `reservations` is set. + // See the 'Resource Format' section for more details. + // + // TODO(mpark): Deprecate once `reservations` is no longer experimental. + Role *string `protobuf:"bytes,6,opt,name=role,def=*" json:"role,omitempty"` + AllocationInfo *Resource_AllocationInfo `protobuf:"bytes,11,opt,name=allocation_info,json=allocationInfo" json:"allocation_info,omitempty"` + // If this is set, this resource was dynamically reserved by an + // operator or a framework. Otherwise, this resource is either unreserved + // or statically reserved by an operator via the --resources flag. + // + // NOTE: Frameworks must not set this field if `reservations` is set. + // See the 'Resource Format' section for more details. + // + // TODO(mpark): Deprecate once `reservations` is no longer experimental. + Reservation *Resource_ReservationInfo `protobuf:"bytes,8,opt,name=reservation" json:"reservation,omitempty"` + // The stack of reservations. If this field is empty, it indicates that this + // resource is unreserved. Otherwise, the resource is reserved. The first + // `ReservationInfo` may have type `STATIC` or `DYNAMIC`, but the rest must + // have `DYNAMIC`. One can create a new reservation on top of an existing + // one by pushing a new `ReservationInfo` to the back. The last + // `ReservationInfo` in this stack is the "current" reservation. The new + // reservation's role must be a child of the current reservation's role. + // + // NOTE: Frameworks must not set this field if `reservation` is set. + // See the 'Resource Format' section for more details. + // + // TODO(mpark): Deprecate `role` and `reservation` once this is stable. + Reservations []Resource_ReservationInfo `protobuf:"bytes,13,rep,name=reservations" json:"reservations"` + Disk *Resource_DiskInfo `protobuf:"bytes,7,opt,name=disk" json:"disk,omitempty"` + // If this is set, the resources are revocable, i.e., any tasks or + // executors launched using these resources could get preempted or + // throttled at any time. This could be used by frameworks to run + // best effort tasks that do not need strict uptime or performance + // guarantees. Note that if this is set, 'disk' or 'reservation' + // cannot be set. + Revocable *Resource_RevocableInfo `protobuf:"bytes,9,opt,name=revocable" json:"revocable,omitempty"` + // If this is set, the resources are shared, i.e. multiple tasks + // can be launched using this resource and all of them shall refer + // to the same physical resource on the cluster. Note that only + // persistent volumes can be shared currently. + Shared *Resource_SharedInfo `protobuf:"bytes,10,opt,name=shared" json:"shared,omitempty"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{29} } + +const Default_Resource_Role string = "*" + +func (m *Resource) GetProviderID() *ResourceProviderID { + if m != nil { + return m.ProviderID + } + return nil +} + +func (m *Resource) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Resource) GetType() Value_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return SCALAR +} + +func (m *Resource) GetScalar() *Value_Scalar { + if m != nil { + return m.Scalar + } + return nil +} + +func (m *Resource) GetRanges() *Value_Ranges { + if m != nil { + return m.Ranges + } + return nil +} + +func (m *Resource) GetSet() *Value_Set { + if m != nil { + return m.Set + } + return nil +} + +func (m *Resource) GetRole() string { + if m != nil && m.Role != nil { + return *m.Role + } + return Default_Resource_Role +} + +func (m *Resource) GetAllocationInfo() *Resource_AllocationInfo { + if m != nil { + return m.AllocationInfo + } + return nil +} + +func (m *Resource) GetReservation() *Resource_ReservationInfo { + if m != nil { + return m.Reservation + } + return nil +} + +func (m *Resource) GetReservations() []Resource_ReservationInfo { + if m != nil { + return m.Reservations + } + return nil +} + +func (m *Resource) GetDisk() *Resource_DiskInfo { + if m != nil { + return m.Disk + } + return nil +} + +func (m *Resource) GetRevocable() *Resource_RevocableInfo { + if m != nil { + return m.Revocable + } + return nil +} + +func (m *Resource) GetShared() *Resource_SharedInfo { + if m != nil { + return m.Shared + } + return nil +} + +// This was initially introduced to support MULTI_ROLE capable +// frameworks. Frameworks that are not MULTI_ROLE capable can +// continue to assume that the offered resources are allocated +// to their role. +type Resource_AllocationInfo struct { + // If set, this resource is allocated to a role. Note that in the + // future, this may be unset and the scheduler may be responsible + // for allocating to one of its roles. + Role *string `protobuf:"bytes,1,opt,name=role" json:"role,omitempty"` +} + +func (m *Resource_AllocationInfo) Reset() { *m = Resource_AllocationInfo{} } +func (*Resource_AllocationInfo) ProtoMessage() {} +func (*Resource_AllocationInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{29, 0} } + +func (m *Resource_AllocationInfo) GetRole() string { + if m != nil && m.Role != nil { + return *m.Role + } + return "" +} + +type Resource_ReservationInfo struct { + // The type of this reservation. + // + // NOTE: This field must not be set for `Resource.reservation`. + // See the 'Resource Format' section for more details. + Type *Resource_ReservationInfo_Type `protobuf:"varint,4,opt,name=type,enum=mesos.Resource_ReservationInfo_Type" json:"type,omitempty"` + // The role to which this reservation is made for. + // + // NOTE: This field must not be set for `Resource.reservation`. + // See the 'Resource Format' section for more details. + Role *string `protobuf:"bytes,3,opt,name=role" json:"role,omitempty"` + // Indicates the principal, if any, of the framework or operator + // that reserved this resource. If reserved by a framework, the + // field should match the `FrameworkInfo.principal`. It is used in + // conjunction with the `UnreserveResources` ACL to determine + // whether the entity attempting to unreserve this resource is + // permitted to do so. + Principal *string `protobuf:"bytes,1,opt,name=principal" json:"principal,omitempty"` + // Labels are free-form key value pairs that can be used to + // associate arbitrary metadata with a reserved resource. For + // example, frameworks can use labels to identify the intended + // purpose for a portion of the resources the framework has + // reserved at a given agent. Labels should not contain duplicate + // key-value pairs. + Labels *Labels `protobuf:"bytes,2,opt,name=labels" json:"labels,omitempty"` +} + +func (m *Resource_ReservationInfo) Reset() { *m = Resource_ReservationInfo{} } +func (*Resource_ReservationInfo) ProtoMessage() {} +func (*Resource_ReservationInfo) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{29, 1} +} + +func (m *Resource_ReservationInfo) GetType() Resource_ReservationInfo_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return Resource_ReservationInfo_UNKNOWN +} + +func (m *Resource_ReservationInfo) GetRole() string { + if m != nil && m.Role != nil { + return *m.Role + } + return "" +} + +func (m *Resource_ReservationInfo) GetPrincipal() string { + if m != nil && m.Principal != nil { + return *m.Principal + } + return "" +} + +func (m *Resource_ReservationInfo) GetLabels() *Labels { + if m != nil { + return m.Labels + } + return nil +} + +type Resource_DiskInfo struct { + Persistence *Resource_DiskInfo_Persistence `protobuf:"bytes,1,opt,name=persistence" json:"persistence,omitempty"` + // Describes how this disk resource will be mounted in the + // container. If not set, the disk resource will be used as the + // sandbox. Otherwise, it will be mounted according to the + // 'container_path' inside 'volume'. The 'host_path' inside + // 'volume' is ignored. + // NOTE: If 'volume' is set but 'persistence' is not set, the + // volume will be automatically garbage collected after + // task/executor terminates. Currently, if 'persistence' is set, + // 'volume' must be set. + Volume *Volume `protobuf:"bytes,2,opt,name=volume" json:"volume,omitempty"` + Source *Resource_DiskInfo_Source `protobuf:"bytes,3,opt,name=source" json:"source,omitempty"` +} + +func (m *Resource_DiskInfo) Reset() { *m = Resource_DiskInfo{} } +func (*Resource_DiskInfo) ProtoMessage() {} +func (*Resource_DiskInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{29, 2} } + +func (m *Resource_DiskInfo) GetPersistence() *Resource_DiskInfo_Persistence { + if m != nil { + return m.Persistence + } + return nil +} + +func (m *Resource_DiskInfo) GetVolume() *Volume { + if m != nil { + return m.Volume + } + return nil +} + +func (m *Resource_DiskInfo) GetSource() *Resource_DiskInfo_Source { + if m != nil { + return m.Source + } + return nil +} + +// Describes a persistent disk volume. +// +// A persistent disk volume will not be automatically garbage +// collected if the task/executor/agent terminates, but will be +// re-offered to the framework(s) belonging to the 'role'. +// +// NOTE: Currently, we do not allow persistent disk volumes +// without a reservation (i.e., 'role' cannot be '*'). +type Resource_DiskInfo_Persistence struct { + // A unique ID for the persistent disk volume. This ID must be + // unique per role on each agent. Although it is possible to use + // the same ID on different agents in the cluster and to reuse + // IDs after a volume with that ID has been destroyed, both + // practices are discouraged. + ID string `protobuf:"bytes,1,req,name=id" json:"id"` + // This field indicates the principal of the operator or + // framework that created this volume. It is used in conjunction + // with the "destroy" ACL to determine whether an entity + // attempting to destroy the volume is permitted to do so. + // + // NOTE: This field should match the FrameworkInfo.principal of + // the framework that created the volume. + Principal *string `protobuf:"bytes,2,opt,name=principal" json:"principal,omitempty"` +} + +func (m *Resource_DiskInfo_Persistence) Reset() { *m = Resource_DiskInfo_Persistence{} } +func (*Resource_DiskInfo_Persistence) ProtoMessage() {} +func (*Resource_DiskInfo_Persistence) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{29, 2, 0} +} + +func (m *Resource_DiskInfo_Persistence) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *Resource_DiskInfo_Persistence) GetPrincipal() string { + if m != nil && m.Principal != nil { + return *m.Principal + } + return "" +} + +// Describes where a disk originates from. +type Resource_DiskInfo_Source struct { + Type Resource_DiskInfo_Source_Type `protobuf:"varint,1,req,name=type,enum=mesos.Resource_DiskInfo_Source_Type" json:"type"` + Path *Resource_DiskInfo_Source_Path `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` + Mount *Resource_DiskInfo_Source_Mount `protobuf:"bytes,3,opt,name=mount" json:"mount,omitempty"` + // An identifier for this source. This field maps onto CSI + // volume IDs and is not expected to be set by frameworks. + ID *string `protobuf:"bytes,4,opt,name=id" json:"id,omitempty"` + // Additional metadata for this source. This field maps onto CSI + // volume metadata and is not expected to be set by frameworks. + Metadata *Labels `protobuf:"bytes,5,opt,name=metadata" json:"metadata,omitempty"` + // This field serves as an indirection to a set of storage + // vendor specific disk parameters which describe the properties + // of the disk. The operator will setup mappings between a + // profile name to a set of vendor specific disk parameters. And + // the framework will do disk selection based on profile names, + // instead of vendor specific disk parameters. + // + // Also see the DiskProfileAdaptor module. + Profile *string `protobuf:"bytes,6,opt,name=profile" json:"profile,omitempty"` +} + +func (m *Resource_DiskInfo_Source) Reset() { *m = Resource_DiskInfo_Source{} } +func (*Resource_DiskInfo_Source) ProtoMessage() {} +func (*Resource_DiskInfo_Source) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{29, 2, 1} +} + +func (m *Resource_DiskInfo_Source) GetType() Resource_DiskInfo_Source_Type { + if m != nil { + return m.Type + } + return Resource_DiskInfo_Source_UNKNOWN +} + +func (m *Resource_DiskInfo_Source) GetPath() *Resource_DiskInfo_Source_Path { + if m != nil { + return m.Path + } + return nil +} + +func (m *Resource_DiskInfo_Source) GetMount() *Resource_DiskInfo_Source_Mount { + if m != nil { + return m.Mount + } + return nil +} + +func (m *Resource_DiskInfo_Source) GetID() string { + if m != nil && m.ID != nil { + return *m.ID + } + return "" +} + +func (m *Resource_DiskInfo_Source) GetMetadata() *Labels { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Resource_DiskInfo_Source) GetProfile() string { + if m != nil && m.Profile != nil { + return *m.Profile + } + return "" +} + +// A folder that can be located on a separate disk device. This +// can be shared and carved up as necessary between frameworks. +type Resource_DiskInfo_Source_Path struct { + // Path to the folder (e.g., /mnt/raid/disk0). If the path is a + // relative path, it is relative to the agent work directory. + Root *string `protobuf:"bytes,1,opt,name=root" json:"root,omitempty"` +} + +func (m *Resource_DiskInfo_Source_Path) Reset() { *m = Resource_DiskInfo_Source_Path{} } +func (*Resource_DiskInfo_Source_Path) ProtoMessage() {} +func (*Resource_DiskInfo_Source_Path) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{29, 2, 1, 0} +} + +func (m *Resource_DiskInfo_Source_Path) GetRoot() string { + if m != nil && m.Root != nil { + return *m.Root + } + return "" +} + +// A mounted file-system set up by the Agent administrator. This +// can only be used exclusively: a framework cannot accept a +// partial amount of this disk. +type Resource_DiskInfo_Source_Mount struct { + // Path to mount point (e.g., /mnt/raid/disk0). If the path is a + // relative path, it is relative to the agent work directory. + Root *string `protobuf:"bytes,1,opt,name=root" json:"root,omitempty"` +} + +func (m *Resource_DiskInfo_Source_Mount) Reset() { *m = Resource_DiskInfo_Source_Mount{} } +func (*Resource_DiskInfo_Source_Mount) ProtoMessage() {} +func (*Resource_DiskInfo_Source_Mount) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{29, 2, 1, 1} +} + +func (m *Resource_DiskInfo_Source_Mount) GetRoot() string { + if m != nil && m.Root != nil { + return *m.Root + } + return "" +} + +type Resource_RevocableInfo struct { +} + +func (m *Resource_RevocableInfo) Reset() { *m = Resource_RevocableInfo{} } +func (*Resource_RevocableInfo) ProtoMessage() {} +func (*Resource_RevocableInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{29, 3} } + +// Allow the resource to be shared across tasks. +type Resource_SharedInfo struct { +} + +func (m *Resource_SharedInfo) Reset() { *m = Resource_SharedInfo{} } +func (*Resource_SharedInfo) ProtoMessage() {} +func (*Resource_SharedInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{29, 4} } + +// * +// When the network bandwidth caps are enabled and the container +// is over its limit, outbound packets may be either delayed or +// dropped completely either because it exceeds the maximum bandwidth +// allocation for a single container (the cap) or because the combined +// network traffic of multiple containers on the host exceeds the +// transmit capacity of the host (the share). We can report the +// following statistics for each of these conditions exported directly +// from the Linux Traffic Control Queueing Discipline. +// +// id : name of the limiter, e.g. 'tx_bw_cap' +// backlog : number of packets currently delayed +// bytes : total bytes seen +// drops : number of packets dropped in total +// overlimits : number of packets which exceeded allocation +// packets : total packets seen +// qlen : number of packets currently queued +// rate_bps : throughput in bytes/sec +// rate_pps : throughput in packets/sec +// requeues : number of times a packet has been delayed due to +// locking or device contention issues +// +// More information on the operation of Linux Traffic Control can be +// found at http://www.lartc.org/lartc.html. +type TrafficControlStatistics struct { + ID string `protobuf:"bytes,1,req,name=id" json:"id"` + Backlog *uint64 `protobuf:"varint,2,opt,name=backlog" json:"backlog,omitempty"` + Bytes *uint64 `protobuf:"varint,3,opt,name=bytes" json:"bytes,omitempty"` + Drops *uint64 `protobuf:"varint,4,opt,name=drops" json:"drops,omitempty"` + Overlimits *uint64 `protobuf:"varint,5,opt,name=overlimits" json:"overlimits,omitempty"` + Packets *uint64 `protobuf:"varint,6,opt,name=packets" json:"packets,omitempty"` + Qlen *uint64 `protobuf:"varint,7,opt,name=qlen" json:"qlen,omitempty"` + RateBPS *uint64 `protobuf:"varint,8,opt,name=ratebps" json:"ratebps,omitempty"` + RatePPS *uint64 `protobuf:"varint,9,opt,name=ratepps" json:"ratepps,omitempty"` + Requeues *uint64 `protobuf:"varint,10,opt,name=requeues" json:"requeues,omitempty"` +} + +func (m *TrafficControlStatistics) Reset() { *m = TrafficControlStatistics{} } +func (*TrafficControlStatistics) ProtoMessage() {} +func (*TrafficControlStatistics) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{30} } + +func (m *TrafficControlStatistics) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *TrafficControlStatistics) GetBacklog() uint64 { + if m != nil && m.Backlog != nil { + return *m.Backlog + } + return 0 +} + +func (m *TrafficControlStatistics) GetBytes() uint64 { + if m != nil && m.Bytes != nil { + return *m.Bytes + } + return 0 +} + +func (m *TrafficControlStatistics) GetDrops() uint64 { + if m != nil && m.Drops != nil { + return *m.Drops + } + return 0 +} + +func (m *TrafficControlStatistics) GetOverlimits() uint64 { + if m != nil && m.Overlimits != nil { + return *m.Overlimits + } + return 0 +} + +func (m *TrafficControlStatistics) GetPackets() uint64 { + if m != nil && m.Packets != nil { + return *m.Packets + } + return 0 +} + +func (m *TrafficControlStatistics) GetQlen() uint64 { + if m != nil && m.Qlen != nil { + return *m.Qlen + } + return 0 +} + +func (m *TrafficControlStatistics) GetRateBPS() uint64 { + if m != nil && m.RateBPS != nil { + return *m.RateBPS + } + return 0 +} + +func (m *TrafficControlStatistics) GetRatePPS() uint64 { + if m != nil && m.RatePPS != nil { + return *m.RatePPS + } + return 0 +} + +func (m *TrafficControlStatistics) GetRequeues() uint64 { + if m != nil && m.Requeues != nil { + return *m.Requeues + } + return 0 +} + +type IpStatistics struct { + Forwarding *int64 `protobuf:"varint,1,opt,name=Forwarding" json:"Forwarding,omitempty"` + DefaultTTL *int64 `protobuf:"varint,2,opt,name=DefaultTTL" json:"DefaultTTL,omitempty"` + InReceives *int64 `protobuf:"varint,3,opt,name=InReceives" json:"InReceives,omitempty"` + InHdrErrors *int64 `protobuf:"varint,4,opt,name=InHdrErrors" json:"InHdrErrors,omitempty"` + InAddrErrors *int64 `protobuf:"varint,5,opt,name=InAddrErrors" json:"InAddrErrors,omitempty"` + ForwDatagrams *int64 `protobuf:"varint,6,opt,name=ForwDatagrams" json:"ForwDatagrams,omitempty"` + InUnknownProtos *int64 `protobuf:"varint,7,opt,name=InUnknownProtos" json:"InUnknownProtos,omitempty"` + InDiscards *int64 `protobuf:"varint,8,opt,name=InDiscards" json:"InDiscards,omitempty"` + InDelivers *int64 `protobuf:"varint,9,opt,name=InDelivers" json:"InDelivers,omitempty"` + OutRequests *int64 `protobuf:"varint,10,opt,name=OutRequests" json:"OutRequests,omitempty"` + OutDiscards *int64 `protobuf:"varint,11,opt,name=OutDiscards" json:"OutDiscards,omitempty"` + OutNoRoutes *int64 `protobuf:"varint,12,opt,name=OutNoRoutes" json:"OutNoRoutes,omitempty"` + ReasmTimeout *int64 `protobuf:"varint,13,opt,name=ReasmTimeout" json:"ReasmTimeout,omitempty"` + ReasmReqds *int64 `protobuf:"varint,14,opt,name=ReasmReqds" json:"ReasmReqds,omitempty"` + ReasmOKs *int64 `protobuf:"varint,15,opt,name=ReasmOKs" json:"ReasmOKs,omitempty"` + ReasmFails *int64 `protobuf:"varint,16,opt,name=ReasmFails" json:"ReasmFails,omitempty"` + FragOKs *int64 `protobuf:"varint,17,opt,name=FragOKs" json:"FragOKs,omitempty"` + FragFails *int64 `protobuf:"varint,18,opt,name=FragFails" json:"FragFails,omitempty"` + FragCreates *int64 `protobuf:"varint,19,opt,name=FragCreates" json:"FragCreates,omitempty"` +} + +func (m *IpStatistics) Reset() { *m = IpStatistics{} } +func (*IpStatistics) ProtoMessage() {} +func (*IpStatistics) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{31} } + +func (m *IpStatistics) GetForwarding() int64 { + if m != nil && m.Forwarding != nil { + return *m.Forwarding + } + return 0 +} + +func (m *IpStatistics) GetDefaultTTL() int64 { + if m != nil && m.DefaultTTL != nil { + return *m.DefaultTTL + } + return 0 +} + +func (m *IpStatistics) GetInReceives() int64 { + if m != nil && m.InReceives != nil { + return *m.InReceives + } + return 0 +} + +func (m *IpStatistics) GetInHdrErrors() int64 { + if m != nil && m.InHdrErrors != nil { + return *m.InHdrErrors + } + return 0 +} + +func (m *IpStatistics) GetInAddrErrors() int64 { + if m != nil && m.InAddrErrors != nil { + return *m.InAddrErrors + } + return 0 +} + +func (m *IpStatistics) GetForwDatagrams() int64 { + if m != nil && m.ForwDatagrams != nil { + return *m.ForwDatagrams + } + return 0 +} + +func (m *IpStatistics) GetInUnknownProtos() int64 { + if m != nil && m.InUnknownProtos != nil { + return *m.InUnknownProtos + } + return 0 +} + +func (m *IpStatistics) GetInDiscards() int64 { + if m != nil && m.InDiscards != nil { + return *m.InDiscards + } + return 0 +} + +func (m *IpStatistics) GetInDelivers() int64 { + if m != nil && m.InDelivers != nil { + return *m.InDelivers + } + return 0 +} + +func (m *IpStatistics) GetOutRequests() int64 { + if m != nil && m.OutRequests != nil { + return *m.OutRequests + } + return 0 +} + +func (m *IpStatistics) GetOutDiscards() int64 { + if m != nil && m.OutDiscards != nil { + return *m.OutDiscards + } + return 0 +} + +func (m *IpStatistics) GetOutNoRoutes() int64 { + if m != nil && m.OutNoRoutes != nil { + return *m.OutNoRoutes + } + return 0 +} + +func (m *IpStatistics) GetReasmTimeout() int64 { + if m != nil && m.ReasmTimeout != nil { + return *m.ReasmTimeout + } + return 0 +} + +func (m *IpStatistics) GetReasmReqds() int64 { + if m != nil && m.ReasmReqds != nil { + return *m.ReasmReqds + } + return 0 +} + +func (m *IpStatistics) GetReasmOKs() int64 { + if m != nil && m.ReasmOKs != nil { + return *m.ReasmOKs + } + return 0 +} + +func (m *IpStatistics) GetReasmFails() int64 { + if m != nil && m.ReasmFails != nil { + return *m.ReasmFails + } + return 0 +} + +func (m *IpStatistics) GetFragOKs() int64 { + if m != nil && m.FragOKs != nil { + return *m.FragOKs + } + return 0 +} + +func (m *IpStatistics) GetFragFails() int64 { + if m != nil && m.FragFails != nil { + return *m.FragFails + } + return 0 +} + +func (m *IpStatistics) GetFragCreates() int64 { + if m != nil && m.FragCreates != nil { + return *m.FragCreates + } + return 0 +} + +type IcmpStatistics struct { + InMsgs *int64 `protobuf:"varint,1,opt,name=InMsgs" json:"InMsgs,omitempty"` + InErrors *int64 `protobuf:"varint,2,opt,name=InErrors" json:"InErrors,omitempty"` + InCsumErrors *int64 `protobuf:"varint,3,opt,name=InCsumErrors" json:"InCsumErrors,omitempty"` + InDestUnreachs *int64 `protobuf:"varint,4,opt,name=InDestUnreachs" json:"InDestUnreachs,omitempty"` + InTimeExcds *int64 `protobuf:"varint,5,opt,name=InTimeExcds" json:"InTimeExcds,omitempty"` + InParmProbs *int64 `protobuf:"varint,6,opt,name=InParmProbs" json:"InParmProbs,omitempty"` + InSrcQuenchs *int64 `protobuf:"varint,7,opt,name=InSrcQuenchs" json:"InSrcQuenchs,omitempty"` + InRedirects *int64 `protobuf:"varint,8,opt,name=InRedirects" json:"InRedirects,omitempty"` + InEchos *int64 `protobuf:"varint,9,opt,name=InEchos" json:"InEchos,omitempty"` + InEchoReps *int64 `protobuf:"varint,10,opt,name=InEchoReps" json:"InEchoReps,omitempty"` + InTimestamps *int64 `protobuf:"varint,11,opt,name=InTimestamps" json:"InTimestamps,omitempty"` + InTimestampReps *int64 `protobuf:"varint,12,opt,name=InTimestampReps" json:"InTimestampReps,omitempty"` + InAddrMasks *int64 `protobuf:"varint,13,opt,name=InAddrMasks" json:"InAddrMasks,omitempty"` + InAddrMaskReps *int64 `protobuf:"varint,14,opt,name=InAddrMaskReps" json:"InAddrMaskReps,omitempty"` + OutMsgs *int64 `protobuf:"varint,15,opt,name=OutMsgs" json:"OutMsgs,omitempty"` + OutErrors *int64 `protobuf:"varint,16,opt,name=OutErrors" json:"OutErrors,omitempty"` + OutDestUnreachs *int64 `protobuf:"varint,17,opt,name=OutDestUnreachs" json:"OutDestUnreachs,omitempty"` + OutTimeExcds *int64 `protobuf:"varint,18,opt,name=OutTimeExcds" json:"OutTimeExcds,omitempty"` + OutParmProbs *int64 `protobuf:"varint,19,opt,name=OutParmProbs" json:"OutParmProbs,omitempty"` + OutSrcQuenchs *int64 `protobuf:"varint,20,opt,name=OutSrcQuenchs" json:"OutSrcQuenchs,omitempty"` + OutRedirects *int64 `protobuf:"varint,21,opt,name=OutRedirects" json:"OutRedirects,omitempty"` + OutEchos *int64 `protobuf:"varint,22,opt,name=OutEchos" json:"OutEchos,omitempty"` + OutEchoReps *int64 `protobuf:"varint,23,opt,name=OutEchoReps" json:"OutEchoReps,omitempty"` + OutTimestamps *int64 `protobuf:"varint,24,opt,name=OutTimestamps" json:"OutTimestamps,omitempty"` + OutTimestampReps *int64 `protobuf:"varint,25,opt,name=OutTimestampReps" json:"OutTimestampReps,omitempty"` + OutAddrMasks *int64 `protobuf:"varint,26,opt,name=OutAddrMasks" json:"OutAddrMasks,omitempty"` + OutAddrMaskReps *int64 `protobuf:"varint,27,opt,name=OutAddrMaskReps" json:"OutAddrMaskReps,omitempty"` +} + +func (m *IcmpStatistics) Reset() { *m = IcmpStatistics{} } +func (*IcmpStatistics) ProtoMessage() {} +func (*IcmpStatistics) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{32} } + +func (m *IcmpStatistics) GetInMsgs() int64 { + if m != nil && m.InMsgs != nil { + return *m.InMsgs + } + return 0 +} + +func (m *IcmpStatistics) GetInErrors() int64 { + if m != nil && m.InErrors != nil { + return *m.InErrors + } + return 0 +} + +func (m *IcmpStatistics) GetInCsumErrors() int64 { + if m != nil && m.InCsumErrors != nil { + return *m.InCsumErrors + } + return 0 +} + +func (m *IcmpStatistics) GetInDestUnreachs() int64 { + if m != nil && m.InDestUnreachs != nil { + return *m.InDestUnreachs + } + return 0 +} + +func (m *IcmpStatistics) GetInTimeExcds() int64 { + if m != nil && m.InTimeExcds != nil { + return *m.InTimeExcds + } + return 0 +} + +func (m *IcmpStatistics) GetInParmProbs() int64 { + if m != nil && m.InParmProbs != nil { + return *m.InParmProbs + } + return 0 +} + +func (m *IcmpStatistics) GetInSrcQuenchs() int64 { + if m != nil && m.InSrcQuenchs != nil { + return *m.InSrcQuenchs + } + return 0 +} + +func (m *IcmpStatistics) GetInRedirects() int64 { + if m != nil && m.InRedirects != nil { + return *m.InRedirects + } + return 0 +} + +func (m *IcmpStatistics) GetInEchos() int64 { + if m != nil && m.InEchos != nil { + return *m.InEchos + } + return 0 +} + +func (m *IcmpStatistics) GetInEchoReps() int64 { + if m != nil && m.InEchoReps != nil { + return *m.InEchoReps + } + return 0 +} + +func (m *IcmpStatistics) GetInTimestamps() int64 { + if m != nil && m.InTimestamps != nil { + return *m.InTimestamps + } + return 0 +} + +func (m *IcmpStatistics) GetInTimestampReps() int64 { + if m != nil && m.InTimestampReps != nil { + return *m.InTimestampReps + } + return 0 +} + +func (m *IcmpStatistics) GetInAddrMasks() int64 { + if m != nil && m.InAddrMasks != nil { + return *m.InAddrMasks + } + return 0 +} + +func (m *IcmpStatistics) GetInAddrMaskReps() int64 { + if m != nil && m.InAddrMaskReps != nil { + return *m.InAddrMaskReps + } + return 0 +} + +func (m *IcmpStatistics) GetOutMsgs() int64 { + if m != nil && m.OutMsgs != nil { + return *m.OutMsgs + } + return 0 +} + +func (m *IcmpStatistics) GetOutErrors() int64 { + if m != nil && m.OutErrors != nil { + return *m.OutErrors + } + return 0 +} + +func (m *IcmpStatistics) GetOutDestUnreachs() int64 { + if m != nil && m.OutDestUnreachs != nil { + return *m.OutDestUnreachs + } + return 0 +} + +func (m *IcmpStatistics) GetOutTimeExcds() int64 { + if m != nil && m.OutTimeExcds != nil { + return *m.OutTimeExcds + } + return 0 +} + +func (m *IcmpStatistics) GetOutParmProbs() int64 { + if m != nil && m.OutParmProbs != nil { + return *m.OutParmProbs + } + return 0 +} + +func (m *IcmpStatistics) GetOutSrcQuenchs() int64 { + if m != nil && m.OutSrcQuenchs != nil { + return *m.OutSrcQuenchs + } + return 0 +} + +func (m *IcmpStatistics) GetOutRedirects() int64 { + if m != nil && m.OutRedirects != nil { + return *m.OutRedirects + } + return 0 +} + +func (m *IcmpStatistics) GetOutEchos() int64 { + if m != nil && m.OutEchos != nil { + return *m.OutEchos + } + return 0 +} + +func (m *IcmpStatistics) GetOutEchoReps() int64 { + if m != nil && m.OutEchoReps != nil { + return *m.OutEchoReps + } + return 0 +} + +func (m *IcmpStatistics) GetOutTimestamps() int64 { + if m != nil && m.OutTimestamps != nil { + return *m.OutTimestamps + } + return 0 +} + +func (m *IcmpStatistics) GetOutTimestampReps() int64 { + if m != nil && m.OutTimestampReps != nil { + return *m.OutTimestampReps + } + return 0 +} + +func (m *IcmpStatistics) GetOutAddrMasks() int64 { + if m != nil && m.OutAddrMasks != nil { + return *m.OutAddrMasks + } + return 0 +} + +func (m *IcmpStatistics) GetOutAddrMaskReps() int64 { + if m != nil && m.OutAddrMaskReps != nil { + return *m.OutAddrMaskReps + } + return 0 +} + +type TcpStatistics struct { + RtoAlgorithm *int64 `protobuf:"varint,1,opt,name=RtoAlgorithm" json:"RtoAlgorithm,omitempty"` + RtoMin *int64 `protobuf:"varint,2,opt,name=RtoMin" json:"RtoMin,omitempty"` + RtoMax *int64 `protobuf:"varint,3,opt,name=RtoMax" json:"RtoMax,omitempty"` + MaxConn *int64 `protobuf:"varint,4,opt,name=MaxConn" json:"MaxConn,omitempty"` + ActiveOpens *int64 `protobuf:"varint,5,opt,name=ActiveOpens" json:"ActiveOpens,omitempty"` + PassiveOpens *int64 `protobuf:"varint,6,opt,name=PassiveOpens" json:"PassiveOpens,omitempty"` + AttemptFails *int64 `protobuf:"varint,7,opt,name=AttemptFails" json:"AttemptFails,omitempty"` + EstabResets *int64 `protobuf:"varint,8,opt,name=EstabResets" json:"EstabResets,omitempty"` + CurrEstab *int64 `protobuf:"varint,9,opt,name=CurrEstab" json:"CurrEstab,omitempty"` + InSegs *int64 `protobuf:"varint,10,opt,name=InSegs" json:"InSegs,omitempty"` + OutSegs *int64 `protobuf:"varint,11,opt,name=OutSegs" json:"OutSegs,omitempty"` + RetransSegs *int64 `protobuf:"varint,12,opt,name=RetransSegs" json:"RetransSegs,omitempty"` + InErrs *int64 `protobuf:"varint,13,opt,name=InErrs" json:"InErrs,omitempty"` + OutRsts *int64 `protobuf:"varint,14,opt,name=OutRsts" json:"OutRsts,omitempty"` + InCsumErrors *int64 `protobuf:"varint,15,opt,name=InCsumErrors" json:"InCsumErrors,omitempty"` +} + +func (m *TcpStatistics) Reset() { *m = TcpStatistics{} } +func (*TcpStatistics) ProtoMessage() {} +func (*TcpStatistics) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{33} } + +func (m *TcpStatistics) GetRtoAlgorithm() int64 { + if m != nil && m.RtoAlgorithm != nil { + return *m.RtoAlgorithm + } + return 0 +} + +func (m *TcpStatistics) GetRtoMin() int64 { + if m != nil && m.RtoMin != nil { + return *m.RtoMin + } + return 0 +} + +func (m *TcpStatistics) GetRtoMax() int64 { + if m != nil && m.RtoMax != nil { + return *m.RtoMax + } + return 0 +} + +func (m *TcpStatistics) GetMaxConn() int64 { + if m != nil && m.MaxConn != nil { + return *m.MaxConn + } + return 0 +} + +func (m *TcpStatistics) GetActiveOpens() int64 { + if m != nil && m.ActiveOpens != nil { + return *m.ActiveOpens + } + return 0 +} + +func (m *TcpStatistics) GetPassiveOpens() int64 { + if m != nil && m.PassiveOpens != nil { + return *m.PassiveOpens + } + return 0 +} + +func (m *TcpStatistics) GetAttemptFails() int64 { + if m != nil && m.AttemptFails != nil { + return *m.AttemptFails + } + return 0 +} + +func (m *TcpStatistics) GetEstabResets() int64 { + if m != nil && m.EstabResets != nil { + return *m.EstabResets + } + return 0 +} + +func (m *TcpStatistics) GetCurrEstab() int64 { + if m != nil && m.CurrEstab != nil { + return *m.CurrEstab + } + return 0 +} + +func (m *TcpStatistics) GetInSegs() int64 { + if m != nil && m.InSegs != nil { + return *m.InSegs + } + return 0 +} + +func (m *TcpStatistics) GetOutSegs() int64 { + if m != nil && m.OutSegs != nil { + return *m.OutSegs + } + return 0 +} + +func (m *TcpStatistics) GetRetransSegs() int64 { + if m != nil && m.RetransSegs != nil { + return *m.RetransSegs + } + return 0 +} + +func (m *TcpStatistics) GetInErrs() int64 { + if m != nil && m.InErrs != nil { + return *m.InErrs + } + return 0 +} + +func (m *TcpStatistics) GetOutRsts() int64 { + if m != nil && m.OutRsts != nil { + return *m.OutRsts + } + return 0 +} + +func (m *TcpStatistics) GetInCsumErrors() int64 { + if m != nil && m.InCsumErrors != nil { + return *m.InCsumErrors + } + return 0 +} + +type UdpStatistics struct { + InDatagrams *int64 `protobuf:"varint,1,opt,name=InDatagrams" json:"InDatagrams,omitempty"` + NoPorts *int64 `protobuf:"varint,2,opt,name=NoPorts" json:"NoPorts,omitempty"` + InErrors *int64 `protobuf:"varint,3,opt,name=InErrors" json:"InErrors,omitempty"` + OutDatagrams *int64 `protobuf:"varint,4,opt,name=OutDatagrams" json:"OutDatagrams,omitempty"` + RcvbufErrors *int64 `protobuf:"varint,5,opt,name=RcvbufErrors" json:"RcvbufErrors,omitempty"` + SndbufErrors *int64 `protobuf:"varint,6,opt,name=SndbufErrors" json:"SndbufErrors,omitempty"` + InCsumErrors *int64 `protobuf:"varint,7,opt,name=InCsumErrors" json:"InCsumErrors,omitempty"` + IgnoredMulti *int64 `protobuf:"varint,8,opt,name=IgnoredMulti" json:"IgnoredMulti,omitempty"` +} + +func (m *UdpStatistics) Reset() { *m = UdpStatistics{} } +func (*UdpStatistics) ProtoMessage() {} +func (*UdpStatistics) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{34} } + +func (m *UdpStatistics) GetInDatagrams() int64 { + if m != nil && m.InDatagrams != nil { + return *m.InDatagrams + } + return 0 +} + +func (m *UdpStatistics) GetNoPorts() int64 { + if m != nil && m.NoPorts != nil { + return *m.NoPorts + } + return 0 +} + +func (m *UdpStatistics) GetInErrors() int64 { + if m != nil && m.InErrors != nil { + return *m.InErrors + } + return 0 +} + +func (m *UdpStatistics) GetOutDatagrams() int64 { + if m != nil && m.OutDatagrams != nil { + return *m.OutDatagrams + } + return 0 +} + +func (m *UdpStatistics) GetRcvbufErrors() int64 { + if m != nil && m.RcvbufErrors != nil { + return *m.RcvbufErrors + } + return 0 +} + +func (m *UdpStatistics) GetSndbufErrors() int64 { + if m != nil && m.SndbufErrors != nil { + return *m.SndbufErrors + } + return 0 +} + +func (m *UdpStatistics) GetInCsumErrors() int64 { + if m != nil && m.InCsumErrors != nil { + return *m.InCsumErrors + } + return 0 +} + +func (m *UdpStatistics) GetIgnoredMulti() int64 { + if m != nil && m.IgnoredMulti != nil { + return *m.IgnoredMulti + } + return 0 +} + +type SNMPStatistics struct { + IPStats *IpStatistics `protobuf:"bytes,1,opt,name=ip_stats,json=ipStats" json:"ip_stats,omitempty"` + ICMPStats *IcmpStatistics `protobuf:"bytes,2,opt,name=icmp_stats,json=icmpStats" json:"icmp_stats,omitempty"` + TCPStats *TcpStatistics `protobuf:"bytes,3,opt,name=tcp_stats,json=tcpStats" json:"tcp_stats,omitempty"` + UDPStats *UdpStatistics `protobuf:"bytes,4,opt,name=udp_stats,json=udpStats" json:"udp_stats,omitempty"` +} + +func (m *SNMPStatistics) Reset() { *m = SNMPStatistics{} } +func (*SNMPStatistics) ProtoMessage() {} +func (*SNMPStatistics) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{35} } + +func (m *SNMPStatistics) GetIPStats() *IpStatistics { + if m != nil { + return m.IPStats + } + return nil +} + +func (m *SNMPStatistics) GetICMPStats() *IcmpStatistics { + if m != nil { + return m.ICMPStats + } + return nil +} + +func (m *SNMPStatistics) GetTCPStats() *TcpStatistics { + if m != nil { + return m.TCPStats + } + return nil +} + +func (m *SNMPStatistics) GetUDPStats() *UdpStatistics { + if m != nil { + return m.UDPStats + } + return nil +} + +type DiskStatistics struct { + Source *Resource_DiskInfo_Source `protobuf:"bytes,1,opt,name=source" json:"source,omitempty"` + Persistence *Resource_DiskInfo_Persistence `protobuf:"bytes,2,opt,name=persistence" json:"persistence,omitempty"` + LimitBytes *uint64 `protobuf:"varint,3,opt,name=limit_bytes,json=limitBytes" json:"limit_bytes,omitempty"` + UsedBytes *uint64 `protobuf:"varint,4,opt,name=used_bytes,json=usedBytes" json:"used_bytes,omitempty"` +} + +func (m *DiskStatistics) Reset() { *m = DiskStatistics{} } +func (*DiskStatistics) ProtoMessage() {} +func (*DiskStatistics) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{36} } + +func (m *DiskStatistics) GetSource() *Resource_DiskInfo_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *DiskStatistics) GetPersistence() *Resource_DiskInfo_Persistence { + if m != nil { + return m.Persistence + } + return nil +} + +func (m *DiskStatistics) GetLimitBytes() uint64 { + if m != nil && m.LimitBytes != nil { + return *m.LimitBytes + } + return 0 +} + +func (m *DiskStatistics) GetUsedBytes() uint64 { + if m != nil && m.UsedBytes != nil { + return *m.UsedBytes + } + return 0 +} + +// * +// A snapshot of resource usage statistics. +type ResourceStatistics struct { + Timestamp float64 `protobuf:"fixed64,1,req,name=timestamp" json:"timestamp"` + Processes *uint32 `protobuf:"varint,30,opt,name=processes" json:"processes,omitempty"` + Threads *uint32 `protobuf:"varint,31,opt,name=threads" json:"threads,omitempty"` + // CPU Usage Information: + // Total CPU time spent in user mode, and kernel mode. + CPUsUserTimeSecs *float64 `protobuf:"fixed64,2,opt,name=cpus_user_time_secs,json=cpusUserTimeSecs" json:"cpus_user_time_secs,omitempty"` + CPUsSystemTimeSecs *float64 `protobuf:"fixed64,3,opt,name=cpus_system_time_secs,json=cpusSystemTimeSecs" json:"cpus_system_time_secs,omitempty"` + // Number of CPUs allocated. + CPUsLimit *float64 `protobuf:"fixed64,4,opt,name=cpus_limit,json=cpusLimit" json:"cpus_limit,omitempty"` + // cpu.stat on process throttling (for contention issues). + CPUsNrPeriods *uint32 `protobuf:"varint,7,opt,name=cpus_nr_periods,json=cpusNrPeriods" json:"cpus_nr_periods,omitempty"` + CPUsNrThrottled *uint32 `protobuf:"varint,8,opt,name=cpus_nr_throttled,json=cpusNrThrottled" json:"cpus_nr_throttled,omitempty"` + CPUsThrottledTimeSecs *float64 `protobuf:"fixed64,9,opt,name=cpus_throttled_time_secs,json=cpusThrottledTimeSecs" json:"cpus_throttled_time_secs,omitempty"` + // mem_total_bytes was added in 0.23.0 to represent the total memory + // of a process in RAM (as opposed to in Swap). This was previously + // reported as mem_rss_bytes, which was also changed in 0.23.0 to + // represent only the anonymous memory usage, to keep in sync with + // Linux kernel's (arguably erroneous) use of terminology. + MemTotalBytes *uint64 `protobuf:"varint,36,opt,name=mem_total_bytes,json=memTotalBytes" json:"mem_total_bytes,omitempty"` + // Total memory + swap usage. This is set if swap is enabled. + MemTotalMemswBytes *uint64 `protobuf:"varint,37,opt,name=mem_total_memsw_bytes,json=memTotalMemswBytes" json:"mem_total_memsw_bytes,omitempty"` + // Hard memory limit for a container. + MemLimitBytes *uint64 `protobuf:"varint,6,opt,name=mem_limit_bytes,json=memLimitBytes" json:"mem_limit_bytes,omitempty"` + // Soft memory limit for a container. + MemSoftLimitBytes *uint64 `protobuf:"varint,38,opt,name=mem_soft_limit_bytes,json=memSoftLimitBytes" json:"mem_soft_limit_bytes,omitempty"` + // TODO(chzhcn) mem_file_bytes and mem_anon_bytes are deprecated in + // 0.23.0 and will be removed in 0.24.0. + MemFileBytes *uint64 `protobuf:"varint,10,opt,name=mem_file_bytes,json=memFileBytes" json:"mem_file_bytes,omitempty"` + MemAnonBytes *uint64 `protobuf:"varint,11,opt,name=mem_anon_bytes,json=memAnonBytes" json:"mem_anon_bytes,omitempty"` + // mem_cache_bytes is added in 0.23.0 to represent page cache usage. + MemCacheBytes *uint64 `protobuf:"varint,39,opt,name=mem_cache_bytes,json=memCacheBytes" json:"mem_cache_bytes,omitempty"` + // Since 0.23.0, mem_rss_bytes is changed to represent only + // anonymous memory usage. Note that neither its requiredness, type, + // name nor numeric tag has been changed. + MemRSSBytes *uint64 `protobuf:"varint,5,opt,name=mem_rss_bytes,json=memRssBytes" json:"mem_rss_bytes,omitempty"` + MemMappedFileBytes *uint64 `protobuf:"varint,12,opt,name=mem_mapped_file_bytes,json=memMappedFileBytes" json:"mem_mapped_file_bytes,omitempty"` + // This is only set if swap is enabled. + MemSwapBytes *uint64 `protobuf:"varint,40,opt,name=mem_swap_bytes,json=memSwapBytes" json:"mem_swap_bytes,omitempty"` + MemUnevictableBytes *uint64 `protobuf:"varint,41,opt,name=mem_unevictable_bytes,json=memUnevictableBytes" json:"mem_unevictable_bytes,omitempty"` + // Number of occurrences of different levels of memory pressure + // events reported by memory cgroup. Pressure listening (re)starts + // with these values set to 0 when agent (re)starts. See + // https://www.kernel.org/doc/Documentation/cgroups/memory.txt for + // more details. + MemLowPressureCounter *uint64 `protobuf:"varint,32,opt,name=mem_low_pressure_counter,json=memLowPressureCounter" json:"mem_low_pressure_counter,omitempty"` + MemMediumPressureCounter *uint64 `protobuf:"varint,33,opt,name=mem_medium_pressure_counter,json=memMediumPressureCounter" json:"mem_medium_pressure_counter,omitempty"` + MemCriticalPressureCounter *uint64 `protobuf:"varint,34,opt,name=mem_critical_pressure_counter,json=memCriticalPressureCounter" json:"mem_critical_pressure_counter,omitempty"` + // Disk Usage Information for executor working directory. + DiskLimitBytes *uint64 `protobuf:"varint,26,opt,name=disk_limit_bytes,json=diskLimitBytes" json:"disk_limit_bytes,omitempty"` + DiskUsedBytes *uint64 `protobuf:"varint,27,opt,name=disk_used_bytes,json=diskUsedBytes" json:"disk_used_bytes,omitempty"` + // Per disk (resource) statistics. + DiskStatistics []DiskStatistics `protobuf:"bytes,43,rep,name=disk_statistics,json=diskStatistics" json:"disk_statistics"` + // Cgroups blkio statistics. + BlkioStatistics *CgroupInfo_Blkio_Statistics `protobuf:"bytes,44,opt,name=blkio_statistics,json=blkioStatistics" json:"blkio_statistics,omitempty"` + // Perf statistics. + Perf *PerfStatistics `protobuf:"bytes,13,opt,name=perf" json:"perf,omitempty"` + // Network Usage Information: + NetRxPackets *uint64 `protobuf:"varint,14,opt,name=net_rx_packets,json=netRxPackets" json:"net_rx_packets,omitempty"` + NetRxBytes *uint64 `protobuf:"varint,15,opt,name=net_rx_bytes,json=netRxBytes" json:"net_rx_bytes,omitempty"` + NetRxErrors *uint64 `protobuf:"varint,16,opt,name=net_rx_errors,json=netRxErrors" json:"net_rx_errors,omitempty"` + NetRxDropped *uint64 `protobuf:"varint,17,opt,name=net_rx_dropped,json=netRxDropped" json:"net_rx_dropped,omitempty"` + NetTxPackets *uint64 `protobuf:"varint,18,opt,name=net_tx_packets,json=netTxPackets" json:"net_tx_packets,omitempty"` + NetTxBytes *uint64 `protobuf:"varint,19,opt,name=net_tx_bytes,json=netTxBytes" json:"net_tx_bytes,omitempty"` + NetTxErrors *uint64 `protobuf:"varint,20,opt,name=net_tx_errors,json=netTxErrors" json:"net_tx_errors,omitempty"` + NetTxDropped *uint64 `protobuf:"varint,21,opt,name=net_tx_dropped,json=netTxDropped" json:"net_tx_dropped,omitempty"` + // The kernel keeps track of RTT (round-trip time) for its TCP + // sockets. RTT is a way to tell the latency of a container. + NetTCPRttMicrosecsP50 *float64 `protobuf:"fixed64,22,opt,name=net_tcp_rtt_microsecs_p50,json=netTcpRttMicrosecsP50" json:"net_tcp_rtt_microsecs_p50,omitempty"` + NetTCPRttMicrosecsP90 *float64 `protobuf:"fixed64,23,opt,name=net_tcp_rtt_microsecs_p90,json=netTcpRttMicrosecsP90" json:"net_tcp_rtt_microsecs_p90,omitempty"` + NetTCPRttMicrosecsP95 *float64 `protobuf:"fixed64,24,opt,name=net_tcp_rtt_microsecs_p95,json=netTcpRttMicrosecsP95" json:"net_tcp_rtt_microsecs_p95,omitempty"` + NetTCPRttMicrosecsP99 *float64 `protobuf:"fixed64,25,opt,name=net_tcp_rtt_microsecs_p99,json=netTcpRttMicrosecsP99" json:"net_tcp_rtt_microsecs_p99,omitempty"` + NetTCPActiveConnections *float64 `protobuf:"fixed64,28,opt,name=net_tcp_active_connections,json=netTcpActiveConnections" json:"net_tcp_active_connections,omitempty"` + NetTCPTimeWaitConnections *float64 `protobuf:"fixed64,29,opt,name=net_tcp_time_wait_connections,json=netTcpTimeWaitConnections" json:"net_tcp_time_wait_connections,omitempty"` + // Network traffic flowing into or out of a container can be delayed + // or dropped due to congestion or policy inside and outside the + // container. + NetTrafficControlStatistics []TrafficControlStatistics `protobuf:"bytes,35,rep,name=net_traffic_control_statistics,json=netTrafficControlStatistics" json:"net_traffic_control_statistics"` + // Network SNMP statistics for each container. + NetSNMPStatistics *SNMPStatistics `protobuf:"bytes,42,opt,name=net_snmp_statistics,json=netSnmpStatistics" json:"net_snmp_statistics,omitempty"` +} + +func (m *ResourceStatistics) Reset() { *m = ResourceStatistics{} } +func (*ResourceStatistics) ProtoMessage() {} +func (*ResourceStatistics) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{37} } + +func (m *ResourceStatistics) GetTimestamp() float64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *ResourceStatistics) GetProcesses() uint32 { + if m != nil && m.Processes != nil { + return *m.Processes + } + return 0 +} + +func (m *ResourceStatistics) GetThreads() uint32 { + if m != nil && m.Threads != nil { + return *m.Threads + } + return 0 +} + +func (m *ResourceStatistics) GetCPUsUserTimeSecs() float64 { + if m != nil && m.CPUsUserTimeSecs != nil { + return *m.CPUsUserTimeSecs + } + return 0 +} + +func (m *ResourceStatistics) GetCPUsSystemTimeSecs() float64 { + if m != nil && m.CPUsSystemTimeSecs != nil { + return *m.CPUsSystemTimeSecs + } + return 0 +} + +func (m *ResourceStatistics) GetCPUsLimit() float64 { + if m != nil && m.CPUsLimit != nil { + return *m.CPUsLimit + } + return 0 +} + +func (m *ResourceStatistics) GetCPUsNrPeriods() uint32 { + if m != nil && m.CPUsNrPeriods != nil { + return *m.CPUsNrPeriods + } + return 0 +} + +func (m *ResourceStatistics) GetCPUsNrThrottled() uint32 { + if m != nil && m.CPUsNrThrottled != nil { + return *m.CPUsNrThrottled + } + return 0 +} + +func (m *ResourceStatistics) GetCPUsThrottledTimeSecs() float64 { + if m != nil && m.CPUsThrottledTimeSecs != nil { + return *m.CPUsThrottledTimeSecs + } + return 0 +} + +func (m *ResourceStatistics) GetMemTotalBytes() uint64 { + if m != nil && m.MemTotalBytes != nil { + return *m.MemTotalBytes + } + return 0 +} + +func (m *ResourceStatistics) GetMemTotalMemswBytes() uint64 { + if m != nil && m.MemTotalMemswBytes != nil { + return *m.MemTotalMemswBytes + } + return 0 +} + +func (m *ResourceStatistics) GetMemLimitBytes() uint64 { + if m != nil && m.MemLimitBytes != nil { + return *m.MemLimitBytes + } + return 0 +} + +func (m *ResourceStatistics) GetMemSoftLimitBytes() uint64 { + if m != nil && m.MemSoftLimitBytes != nil { + return *m.MemSoftLimitBytes + } + return 0 +} + +func (m *ResourceStatistics) GetMemFileBytes() uint64 { + if m != nil && m.MemFileBytes != nil { + return *m.MemFileBytes + } + return 0 +} + +func (m *ResourceStatistics) GetMemAnonBytes() uint64 { + if m != nil && m.MemAnonBytes != nil { + return *m.MemAnonBytes + } + return 0 +} + +func (m *ResourceStatistics) GetMemCacheBytes() uint64 { + if m != nil && m.MemCacheBytes != nil { + return *m.MemCacheBytes + } + return 0 +} + +func (m *ResourceStatistics) GetMemRSSBytes() uint64 { + if m != nil && m.MemRSSBytes != nil { + return *m.MemRSSBytes + } + return 0 +} + +func (m *ResourceStatistics) GetMemMappedFileBytes() uint64 { + if m != nil && m.MemMappedFileBytes != nil { + return *m.MemMappedFileBytes + } + return 0 +} + +func (m *ResourceStatistics) GetMemSwapBytes() uint64 { + if m != nil && m.MemSwapBytes != nil { + return *m.MemSwapBytes + } + return 0 +} + +func (m *ResourceStatistics) GetMemUnevictableBytes() uint64 { + if m != nil && m.MemUnevictableBytes != nil { + return *m.MemUnevictableBytes + } + return 0 +} + +func (m *ResourceStatistics) GetMemLowPressureCounter() uint64 { + if m != nil && m.MemLowPressureCounter != nil { + return *m.MemLowPressureCounter + } + return 0 +} + +func (m *ResourceStatistics) GetMemMediumPressureCounter() uint64 { + if m != nil && m.MemMediumPressureCounter != nil { + return *m.MemMediumPressureCounter + } + return 0 +} + +func (m *ResourceStatistics) GetMemCriticalPressureCounter() uint64 { + if m != nil && m.MemCriticalPressureCounter != nil { + return *m.MemCriticalPressureCounter + } + return 0 +} + +func (m *ResourceStatistics) GetDiskLimitBytes() uint64 { + if m != nil && m.DiskLimitBytes != nil { + return *m.DiskLimitBytes + } + return 0 +} + +func (m *ResourceStatistics) GetDiskUsedBytes() uint64 { + if m != nil && m.DiskUsedBytes != nil { + return *m.DiskUsedBytes + } + return 0 +} + +func (m *ResourceStatistics) GetDiskStatistics() []DiskStatistics { + if m != nil { + return m.DiskStatistics + } + return nil +} + +func (m *ResourceStatistics) GetBlkioStatistics() *CgroupInfo_Blkio_Statistics { + if m != nil { + return m.BlkioStatistics + } + return nil +} + +func (m *ResourceStatistics) GetPerf() *PerfStatistics { + if m != nil { + return m.Perf + } + return nil +} + +func (m *ResourceStatistics) GetNetRxPackets() uint64 { + if m != nil && m.NetRxPackets != nil { + return *m.NetRxPackets + } + return 0 +} + +func (m *ResourceStatistics) GetNetRxBytes() uint64 { + if m != nil && m.NetRxBytes != nil { + return *m.NetRxBytes + } + return 0 +} + +func (m *ResourceStatistics) GetNetRxErrors() uint64 { + if m != nil && m.NetRxErrors != nil { + return *m.NetRxErrors + } + return 0 +} + +func (m *ResourceStatistics) GetNetRxDropped() uint64 { + if m != nil && m.NetRxDropped != nil { + return *m.NetRxDropped + } + return 0 +} + +func (m *ResourceStatistics) GetNetTxPackets() uint64 { + if m != nil && m.NetTxPackets != nil { + return *m.NetTxPackets + } + return 0 +} + +func (m *ResourceStatistics) GetNetTxBytes() uint64 { + if m != nil && m.NetTxBytes != nil { + return *m.NetTxBytes + } + return 0 +} + +func (m *ResourceStatistics) GetNetTxErrors() uint64 { + if m != nil && m.NetTxErrors != nil { + return *m.NetTxErrors + } + return 0 +} + +func (m *ResourceStatistics) GetNetTxDropped() uint64 { + if m != nil && m.NetTxDropped != nil { + return *m.NetTxDropped + } + return 0 +} + +func (m *ResourceStatistics) GetNetTCPRttMicrosecsP50() float64 { + if m != nil && m.NetTCPRttMicrosecsP50 != nil { + return *m.NetTCPRttMicrosecsP50 + } + return 0 +} + +func (m *ResourceStatistics) GetNetTCPRttMicrosecsP90() float64 { + if m != nil && m.NetTCPRttMicrosecsP90 != nil { + return *m.NetTCPRttMicrosecsP90 + } + return 0 +} + +func (m *ResourceStatistics) GetNetTCPRttMicrosecsP95() float64 { + if m != nil && m.NetTCPRttMicrosecsP95 != nil { + return *m.NetTCPRttMicrosecsP95 + } + return 0 +} + +func (m *ResourceStatistics) GetNetTCPRttMicrosecsP99() float64 { + if m != nil && m.NetTCPRttMicrosecsP99 != nil { + return *m.NetTCPRttMicrosecsP99 + } + return 0 +} + +func (m *ResourceStatistics) GetNetTCPActiveConnections() float64 { + if m != nil && m.NetTCPActiveConnections != nil { + return *m.NetTCPActiveConnections + } + return 0 +} + +func (m *ResourceStatistics) GetNetTCPTimeWaitConnections() float64 { + if m != nil && m.NetTCPTimeWaitConnections != nil { + return *m.NetTCPTimeWaitConnections + } + return 0 +} + +func (m *ResourceStatistics) GetNetTrafficControlStatistics() []TrafficControlStatistics { + if m != nil { + return m.NetTrafficControlStatistics + } + return nil +} + +func (m *ResourceStatistics) GetNetSNMPStatistics() *SNMPStatistics { + if m != nil { + return m.NetSNMPStatistics + } + return nil +} + +// * +// Describes a snapshot of the resource usage for executors. +type ResourceUsage struct { + Executors []ResourceUsage_Executor `protobuf:"bytes,1,rep,name=executors" json:"executors"` + // Agent's total resources including checkpointed dynamic + // reservations and persistent volumes. + Total []Resource `protobuf:"bytes,2,rep,name=total" json:"total"` +} + +func (m *ResourceUsage) Reset() { *m = ResourceUsage{} } +func (*ResourceUsage) ProtoMessage() {} +func (*ResourceUsage) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{38} } + +func (m *ResourceUsage) GetExecutors() []ResourceUsage_Executor { + if m != nil { + return m.Executors + } + return nil +} + +func (m *ResourceUsage) GetTotal() []Resource { + if m != nil { + return m.Total + } + return nil +} + +type ResourceUsage_Executor struct { + ExecutorInfo ExecutorInfo `protobuf:"bytes,1,req,name=executor_info,json=executorInfo" json:"executor_info"` + // This includes resources used by the executor itself + // as well as its active tasks. + Allocated []Resource `protobuf:"bytes,2,rep,name=allocated" json:"allocated"` + // Current resource usage. If absent, the containerizer + // cannot provide resource usage. + Statistics *ResourceStatistics `protobuf:"bytes,3,opt,name=statistics" json:"statistics,omitempty"` + // The container id for the executor specified in the executor_info field. + ContainerID ContainerID `protobuf:"bytes,4,req,name=container_id,json=containerId" json:"container_id"` + // Non-terminal tasks. + Tasks []ResourceUsage_Executor_Task `protobuf:"bytes,5,rep,name=tasks" json:"tasks"` +} + +func (m *ResourceUsage_Executor) Reset() { *m = ResourceUsage_Executor{} } +func (*ResourceUsage_Executor) ProtoMessage() {} +func (*ResourceUsage_Executor) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{38, 0} } + +func (m *ResourceUsage_Executor) GetExecutorInfo() ExecutorInfo { + if m != nil { + return m.ExecutorInfo + } + return ExecutorInfo{} +} + +func (m *ResourceUsage_Executor) GetAllocated() []Resource { + if m != nil { + return m.Allocated + } + return nil +} + +func (m *ResourceUsage_Executor) GetStatistics() *ResourceStatistics { + if m != nil { + return m.Statistics + } + return nil +} + +func (m *ResourceUsage_Executor) GetContainerID() ContainerID { + if m != nil { + return m.ContainerID + } + return ContainerID{} +} + +func (m *ResourceUsage_Executor) GetTasks() []ResourceUsage_Executor_Task { + if m != nil { + return m.Tasks + } + return nil +} + +type ResourceUsage_Executor_Task struct { + Name string `protobuf:"bytes,1,req,name=name" json:"name"` + ID TaskID `protobuf:"bytes,2,req,name=id" json:"id"` + Resources []Resource `protobuf:"bytes,3,rep,name=resources" json:"resources"` + Labels *Labels `protobuf:"bytes,4,opt,name=labels" json:"labels,omitempty"` +} + +func (m *ResourceUsage_Executor_Task) Reset() { *m = ResourceUsage_Executor_Task{} } +func (*ResourceUsage_Executor_Task) ProtoMessage() {} +func (*ResourceUsage_Executor_Task) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{38, 0, 0} +} + +func (m *ResourceUsage_Executor_Task) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ResourceUsage_Executor_Task) GetID() TaskID { + if m != nil { + return m.ID + } + return TaskID{} +} + +func (m *ResourceUsage_Executor_Task) GetResources() []Resource { + if m != nil { + return m.Resources + } + return nil +} + +func (m *ResourceUsage_Executor_Task) GetLabels() *Labels { + if m != nil { + return m.Labels + } + return nil +} + +// * +// Describes a sample of events from "perf stat". Only available on +// Linux. +// +// NOTE: Each optional field matches the name of a perf event (see +// "perf list") with the following changes: +// 1. Names are downcased. +// 2. Hyphens ('-') are replaced with underscores ('_'). +// 3. Events with alternate names use the name "perf stat" returns, +// e.g., for the event "cycles OR cpu-cycles" perf always returns +// cycles. +type PerfStatistics struct { + Timestamp float64 `protobuf:"fixed64,1,req,name=timestamp" json:"timestamp"` + Duration float64 `protobuf:"fixed64,2,req,name=duration" json:"duration"` + // Hardware event. + Cycles *uint64 `protobuf:"varint,3,opt,name=cycles" json:"cycles,omitempty"` + StalledCyclesFrontend *uint64 `protobuf:"varint,4,opt,name=stalled_cycles_frontend,json=stalledCyclesFrontend" json:"stalled_cycles_frontend,omitempty"` + StalledCyclesBackend *uint64 `protobuf:"varint,5,opt,name=stalled_cycles_backend,json=stalledCyclesBackend" json:"stalled_cycles_backend,omitempty"` + Instructions *uint64 `protobuf:"varint,6,opt,name=instructions" json:"instructions,omitempty"` + CacheReferences *uint64 `protobuf:"varint,7,opt,name=cache_references,json=cacheReferences" json:"cache_references,omitempty"` + CacheMisses *uint64 `protobuf:"varint,8,opt,name=cache_misses,json=cacheMisses" json:"cache_misses,omitempty"` + Branches *uint64 `protobuf:"varint,9,opt,name=branches" json:"branches,omitempty"` + BranchMisses *uint64 `protobuf:"varint,10,opt,name=branch_misses,json=branchMisses" json:"branch_misses,omitempty"` + BusCycles *uint64 `protobuf:"varint,11,opt,name=bus_cycles,json=busCycles" json:"bus_cycles,omitempty"` + RefCycles *uint64 `protobuf:"varint,12,opt,name=ref_cycles,json=refCycles" json:"ref_cycles,omitempty"` + // Software event. + CPUClock *float64 `protobuf:"fixed64,13,opt,name=cpu_clock,json=cpuClock" json:"cpu_clock,omitempty"` + TaskClock *float64 `protobuf:"fixed64,14,opt,name=task_clock,json=taskClock" json:"task_clock,omitempty"` + PageFaults *uint64 `protobuf:"varint,15,opt,name=page_faults,json=pageFaults" json:"page_faults,omitempty"` + MinorFaults *uint64 `protobuf:"varint,16,opt,name=minor_faults,json=minorFaults" json:"minor_faults,omitempty"` + MajorFaults *uint64 `protobuf:"varint,17,opt,name=major_faults,json=majorFaults" json:"major_faults,omitempty"` + ContextSwitches *uint64 `protobuf:"varint,18,opt,name=context_switches,json=contextSwitches" json:"context_switches,omitempty"` + CPUMigrations *uint64 `protobuf:"varint,19,opt,name=cpu_migrations,json=cpuMigrations" json:"cpu_migrations,omitempty"` + AlignmentFaults *uint64 `protobuf:"varint,20,opt,name=alignment_faults,json=alignmentFaults" json:"alignment_faults,omitempty"` + EmulationFaults *uint64 `protobuf:"varint,21,opt,name=emulation_faults,json=emulationFaults" json:"emulation_faults,omitempty"` + // Hardware cache event. + L1DcacheLoads *uint64 `protobuf:"varint,22,opt,name=l1_dcache_loads,json=l1DcacheLoads" json:"l1_dcache_loads,omitempty"` + L1DcacheLoadMisses *uint64 `protobuf:"varint,23,opt,name=l1_dcache_load_misses,json=l1DcacheLoadMisses" json:"l1_dcache_load_misses,omitempty"` + L1DcacheStores *uint64 `protobuf:"varint,24,opt,name=l1_dcache_stores,json=l1DcacheStores" json:"l1_dcache_stores,omitempty"` + L1DcacheStoreMisses *uint64 `protobuf:"varint,25,opt,name=l1_dcache_store_misses,json=l1DcacheStoreMisses" json:"l1_dcache_store_misses,omitempty"` + L1DcachePrefetches *uint64 `protobuf:"varint,26,opt,name=l1_dcache_prefetches,json=l1DcachePrefetches" json:"l1_dcache_prefetches,omitempty"` + L1DcachePrefetchMisses *uint64 `protobuf:"varint,27,opt,name=l1_dcache_prefetch_misses,json=l1DcachePrefetchMisses" json:"l1_dcache_prefetch_misses,omitempty"` + L1IcacheLoads *uint64 `protobuf:"varint,28,opt,name=l1_icache_loads,json=l1IcacheLoads" json:"l1_icache_loads,omitempty"` + L1IcacheLoadMisses *uint64 `protobuf:"varint,29,opt,name=l1_icache_load_misses,json=l1IcacheLoadMisses" json:"l1_icache_load_misses,omitempty"` + L1IcachePrefetches *uint64 `protobuf:"varint,30,opt,name=l1_icache_prefetches,json=l1IcachePrefetches" json:"l1_icache_prefetches,omitempty"` + L1IcachePrefetchMisses *uint64 `protobuf:"varint,31,opt,name=l1_icache_prefetch_misses,json=l1IcachePrefetchMisses" json:"l1_icache_prefetch_misses,omitempty"` + LLCLoads *uint64 `protobuf:"varint,32,opt,name=llc_loads,json=llcLoads" json:"llc_loads,omitempty"` + LLCLoadMisses *uint64 `protobuf:"varint,33,opt,name=llc_load_misses,json=llcLoadMisses" json:"llc_load_misses,omitempty"` + LLCStores *uint64 `protobuf:"varint,34,opt,name=llc_stores,json=llcStores" json:"llc_stores,omitempty"` + LLCStoreMisses *uint64 `protobuf:"varint,35,opt,name=llc_store_misses,json=llcStoreMisses" json:"llc_store_misses,omitempty"` + LLCPrefetches *uint64 `protobuf:"varint,36,opt,name=llc_prefetches,json=llcPrefetches" json:"llc_prefetches,omitempty"` + LLCPrefetchMisses *uint64 `protobuf:"varint,37,opt,name=llc_prefetch_misses,json=llcPrefetchMisses" json:"llc_prefetch_misses,omitempty"` + DTLBLoads *uint64 `protobuf:"varint,38,opt,name=dtlb_loads,json=dtlbLoads" json:"dtlb_loads,omitempty"` + DTLBLoadMisses *uint64 `protobuf:"varint,39,opt,name=dtlb_load_misses,json=dtlbLoadMisses" json:"dtlb_load_misses,omitempty"` + DTLBStores *uint64 `protobuf:"varint,40,opt,name=dtlb_stores,json=dtlbStores" json:"dtlb_stores,omitempty"` + DTLBStoreMisses *uint64 `protobuf:"varint,41,opt,name=dtlb_store_misses,json=dtlbStoreMisses" json:"dtlb_store_misses,omitempty"` + DTLBPrefetches *uint64 `protobuf:"varint,42,opt,name=dtlb_prefetches,json=dtlbPrefetches" json:"dtlb_prefetches,omitempty"` + DTLBPrefetchMisses *uint64 `protobuf:"varint,43,opt,name=dtlb_prefetch_misses,json=dtlbPrefetchMisses" json:"dtlb_prefetch_misses,omitempty"` + ITLBLoads *uint64 `protobuf:"varint,44,opt,name=itlb_loads,json=itlbLoads" json:"itlb_loads,omitempty"` + ITLBLoadMisses *uint64 `protobuf:"varint,45,opt,name=itlb_load_misses,json=itlbLoadMisses" json:"itlb_load_misses,omitempty"` + BranchLoads *uint64 `protobuf:"varint,46,opt,name=branch_loads,json=branchLoads" json:"branch_loads,omitempty"` + BranchLoadMisses *uint64 `protobuf:"varint,47,opt,name=branch_load_misses,json=branchLoadMisses" json:"branch_load_misses,omitempty"` + NodeLoads *uint64 `protobuf:"varint,48,opt,name=node_loads,json=nodeLoads" json:"node_loads,omitempty"` + NodeLoadMisses *uint64 `protobuf:"varint,49,opt,name=node_load_misses,json=nodeLoadMisses" json:"node_load_misses,omitempty"` + NodeStores *uint64 `protobuf:"varint,50,opt,name=node_stores,json=nodeStores" json:"node_stores,omitempty"` + NodeStoreMisses *uint64 `protobuf:"varint,51,opt,name=node_store_misses,json=nodeStoreMisses" json:"node_store_misses,omitempty"` + NodePrefetches *uint64 `protobuf:"varint,52,opt,name=node_prefetches,json=nodePrefetches" json:"node_prefetches,omitempty"` + NodePrefetchMisses *uint64 `protobuf:"varint,53,opt,name=node_prefetch_misses,json=nodePrefetchMisses" json:"node_prefetch_misses,omitempty"` +} + +func (m *PerfStatistics) Reset() { *m = PerfStatistics{} } +func (*PerfStatistics) ProtoMessage() {} +func (*PerfStatistics) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{39} } + +func (m *PerfStatistics) GetTimestamp() float64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *PerfStatistics) GetDuration() float64 { + if m != nil { + return m.Duration + } + return 0 +} + +func (m *PerfStatistics) GetCycles() uint64 { + if m != nil && m.Cycles != nil { + return *m.Cycles + } + return 0 +} + +func (m *PerfStatistics) GetStalledCyclesFrontend() uint64 { + if m != nil && m.StalledCyclesFrontend != nil { + return *m.StalledCyclesFrontend + } + return 0 +} + +func (m *PerfStatistics) GetStalledCyclesBackend() uint64 { + if m != nil && m.StalledCyclesBackend != nil { + return *m.StalledCyclesBackend + } + return 0 +} + +func (m *PerfStatistics) GetInstructions() uint64 { + if m != nil && m.Instructions != nil { + return *m.Instructions + } + return 0 +} + +func (m *PerfStatistics) GetCacheReferences() uint64 { + if m != nil && m.CacheReferences != nil { + return *m.CacheReferences + } + return 0 +} + +func (m *PerfStatistics) GetCacheMisses() uint64 { + if m != nil && m.CacheMisses != nil { + return *m.CacheMisses + } + return 0 +} + +func (m *PerfStatistics) GetBranches() uint64 { + if m != nil && m.Branches != nil { + return *m.Branches + } + return 0 +} + +func (m *PerfStatistics) GetBranchMisses() uint64 { + if m != nil && m.BranchMisses != nil { + return *m.BranchMisses + } + return 0 +} + +func (m *PerfStatistics) GetBusCycles() uint64 { + if m != nil && m.BusCycles != nil { + return *m.BusCycles + } + return 0 +} + +func (m *PerfStatistics) GetRefCycles() uint64 { + if m != nil && m.RefCycles != nil { + return *m.RefCycles + } + return 0 +} + +func (m *PerfStatistics) GetCPUClock() float64 { + if m != nil && m.CPUClock != nil { + return *m.CPUClock + } + return 0 +} + +func (m *PerfStatistics) GetTaskClock() float64 { + if m != nil && m.TaskClock != nil { + return *m.TaskClock + } + return 0 +} + +func (m *PerfStatistics) GetPageFaults() uint64 { + if m != nil && m.PageFaults != nil { + return *m.PageFaults + } + return 0 +} + +func (m *PerfStatistics) GetMinorFaults() uint64 { + if m != nil && m.MinorFaults != nil { + return *m.MinorFaults + } + return 0 +} + +func (m *PerfStatistics) GetMajorFaults() uint64 { + if m != nil && m.MajorFaults != nil { + return *m.MajorFaults + } + return 0 +} + +func (m *PerfStatistics) GetContextSwitches() uint64 { + if m != nil && m.ContextSwitches != nil { + return *m.ContextSwitches + } + return 0 +} + +func (m *PerfStatistics) GetCPUMigrations() uint64 { + if m != nil && m.CPUMigrations != nil { + return *m.CPUMigrations + } + return 0 +} + +func (m *PerfStatistics) GetAlignmentFaults() uint64 { + if m != nil && m.AlignmentFaults != nil { + return *m.AlignmentFaults + } + return 0 +} + +func (m *PerfStatistics) GetEmulationFaults() uint64 { + if m != nil && m.EmulationFaults != nil { + return *m.EmulationFaults + } + return 0 +} + +func (m *PerfStatistics) GetL1DcacheLoads() uint64 { + if m != nil && m.L1DcacheLoads != nil { + return *m.L1DcacheLoads + } + return 0 +} + +func (m *PerfStatistics) GetL1DcacheLoadMisses() uint64 { + if m != nil && m.L1DcacheLoadMisses != nil { + return *m.L1DcacheLoadMisses + } + return 0 +} + +func (m *PerfStatistics) GetL1DcacheStores() uint64 { + if m != nil && m.L1DcacheStores != nil { + return *m.L1DcacheStores + } + return 0 +} + +func (m *PerfStatistics) GetL1DcacheStoreMisses() uint64 { + if m != nil && m.L1DcacheStoreMisses != nil { + return *m.L1DcacheStoreMisses + } + return 0 +} + +func (m *PerfStatistics) GetL1DcachePrefetches() uint64 { + if m != nil && m.L1DcachePrefetches != nil { + return *m.L1DcachePrefetches + } + return 0 +} + +func (m *PerfStatistics) GetL1DcachePrefetchMisses() uint64 { + if m != nil && m.L1DcachePrefetchMisses != nil { + return *m.L1DcachePrefetchMisses + } + return 0 +} + +func (m *PerfStatistics) GetL1IcacheLoads() uint64 { + if m != nil && m.L1IcacheLoads != nil { + return *m.L1IcacheLoads + } + return 0 +} + +func (m *PerfStatistics) GetL1IcacheLoadMisses() uint64 { + if m != nil && m.L1IcacheLoadMisses != nil { + return *m.L1IcacheLoadMisses + } + return 0 +} + +func (m *PerfStatistics) GetL1IcachePrefetches() uint64 { + if m != nil && m.L1IcachePrefetches != nil { + return *m.L1IcachePrefetches + } + return 0 +} + +func (m *PerfStatistics) GetL1IcachePrefetchMisses() uint64 { + if m != nil && m.L1IcachePrefetchMisses != nil { + return *m.L1IcachePrefetchMisses + } + return 0 +} + +func (m *PerfStatistics) GetLLCLoads() uint64 { + if m != nil && m.LLCLoads != nil { + return *m.LLCLoads + } + return 0 +} + +func (m *PerfStatistics) GetLLCLoadMisses() uint64 { + if m != nil && m.LLCLoadMisses != nil { + return *m.LLCLoadMisses + } + return 0 +} + +func (m *PerfStatistics) GetLLCStores() uint64 { + if m != nil && m.LLCStores != nil { + return *m.LLCStores + } + return 0 +} + +func (m *PerfStatistics) GetLLCStoreMisses() uint64 { + if m != nil && m.LLCStoreMisses != nil { + return *m.LLCStoreMisses + } + return 0 +} + +func (m *PerfStatistics) GetLLCPrefetches() uint64 { + if m != nil && m.LLCPrefetches != nil { + return *m.LLCPrefetches + } + return 0 +} + +func (m *PerfStatistics) GetLLCPrefetchMisses() uint64 { + if m != nil && m.LLCPrefetchMisses != nil { + return *m.LLCPrefetchMisses + } + return 0 +} + +func (m *PerfStatistics) GetDTLBLoads() uint64 { + if m != nil && m.DTLBLoads != nil { + return *m.DTLBLoads + } + return 0 +} + +func (m *PerfStatistics) GetDTLBLoadMisses() uint64 { + if m != nil && m.DTLBLoadMisses != nil { + return *m.DTLBLoadMisses + } + return 0 +} + +func (m *PerfStatistics) GetDTLBStores() uint64 { + if m != nil && m.DTLBStores != nil { + return *m.DTLBStores + } + return 0 +} + +func (m *PerfStatistics) GetDTLBStoreMisses() uint64 { + if m != nil && m.DTLBStoreMisses != nil { + return *m.DTLBStoreMisses + } + return 0 +} + +func (m *PerfStatistics) GetDTLBPrefetches() uint64 { + if m != nil && m.DTLBPrefetches != nil { + return *m.DTLBPrefetches + } + return 0 +} + +func (m *PerfStatistics) GetDTLBPrefetchMisses() uint64 { + if m != nil && m.DTLBPrefetchMisses != nil { + return *m.DTLBPrefetchMisses + } + return 0 +} + +func (m *PerfStatistics) GetITLBLoads() uint64 { + if m != nil && m.ITLBLoads != nil { + return *m.ITLBLoads + } + return 0 +} + +func (m *PerfStatistics) GetITLBLoadMisses() uint64 { + if m != nil && m.ITLBLoadMisses != nil { + return *m.ITLBLoadMisses + } + return 0 +} + +func (m *PerfStatistics) GetBranchLoads() uint64 { + if m != nil && m.BranchLoads != nil { + return *m.BranchLoads + } + return 0 +} + +func (m *PerfStatistics) GetBranchLoadMisses() uint64 { + if m != nil && m.BranchLoadMisses != nil { + return *m.BranchLoadMisses + } + return 0 +} + +func (m *PerfStatistics) GetNodeLoads() uint64 { + if m != nil && m.NodeLoads != nil { + return *m.NodeLoads + } + return 0 +} + +func (m *PerfStatistics) GetNodeLoadMisses() uint64 { + if m != nil && m.NodeLoadMisses != nil { + return *m.NodeLoadMisses + } + return 0 +} + +func (m *PerfStatistics) GetNodeStores() uint64 { + if m != nil && m.NodeStores != nil { + return *m.NodeStores + } + return 0 +} + +func (m *PerfStatistics) GetNodeStoreMisses() uint64 { + if m != nil && m.NodeStoreMisses != nil { + return *m.NodeStoreMisses + } + return 0 +} + +func (m *PerfStatistics) GetNodePrefetches() uint64 { + if m != nil && m.NodePrefetches != nil { + return *m.NodePrefetches + } + return 0 +} + +func (m *PerfStatistics) GetNodePrefetchMisses() uint64 { + if m != nil && m.NodePrefetchMisses != nil { + return *m.NodePrefetchMisses + } + return 0 +} + +// * +// Describes a request for resources that can be used by a framework +// to proactively influence the allocator. If 'agent_id' is provided +// then this request is assumed to only apply to resources on that +// agent. +type Request struct { + AgentID *AgentID `protobuf:"bytes,1,opt,name=agent_id,json=agentId" json:"agent_id,omitempty"` + Resources []Resource `protobuf:"bytes,2,rep,name=resources" json:"resources"` +} + +func (m *Request) Reset() { *m = Request{} } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{40} } + +func (m *Request) GetAgentID() *AgentID { + if m != nil { + return m.AgentID + } + return nil +} + +func (m *Request) GetResources() []Resource { + if m != nil { + return m.Resources + } + return nil +} + +// * +// Describes some resources available on an agent. An offer only +// contains resources from a single agent. +type Offer struct { + ID OfferID `protobuf:"bytes,1,req,name=id" json:"id"` + FrameworkID FrameworkID `protobuf:"bytes,2,req,name=framework_id,json=frameworkId" json:"framework_id"` + AgentID AgentID `protobuf:"bytes,3,req,name=agent_id,json=agentId" json:"agent_id"` + Hostname string `protobuf:"bytes,4,req,name=hostname" json:"hostname"` + // URL for reaching the agent running on the host. + URL *URL `protobuf:"bytes,8,opt,name=url" json:"url,omitempty"` + // The domain of the agent. + Domain *DomainInfo `protobuf:"bytes,11,opt,name=domain" json:"domain,omitempty"` + Resources []Resource `protobuf:"bytes,5,rep,name=resources" json:"resources"` + Attributes []Attribute `protobuf:"bytes,7,rep,name=attributes" json:"attributes"` + // Executors of the same framework running on this agent. + ExecutorIDs []ExecutorID `protobuf:"bytes,6,rep,name=executor_ids,json=executorIds" json:"executor_ids"` + // Signifies that the resources in this Offer may be unavailable during + // the given interval. Any tasks launched using these resources may be + // killed when the interval arrives. For example, these resources may be + // part of a planned maintenance schedule. + // + // This field only provides information about a planned unavailability. + // The unavailability interval may not necessarily start at exactly this + // interval, nor last for exactly the duration of this interval. + // The unavailability may also be forever! See comments in + // `Unavailability` for more details. + Unavailability *Unavailability `protobuf:"bytes,9,opt,name=unavailability" json:"unavailability,omitempty"` + // An offer represents resources allocated to *one* of the + // roles managed by the scheduler. (Therefore, each + // `Offer.resources[i].allocation_info` will match the + // top level `Offer.allocation_info`). + AllocationInfo *Resource_AllocationInfo `protobuf:"bytes,10,opt,name=allocation_info,json=allocationInfo" json:"allocation_info,omitempty"` +} + +func (m *Offer) Reset() { *m = Offer{} } +func (*Offer) ProtoMessage() {} +func (*Offer) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{41} } + +func (m *Offer) GetID() OfferID { + if m != nil { + return m.ID + } + return OfferID{} +} + +func (m *Offer) GetFrameworkID() FrameworkID { + if m != nil { + return m.FrameworkID + } + return FrameworkID{} +} + +func (m *Offer) GetAgentID() AgentID { + if m != nil { + return m.AgentID + } + return AgentID{} +} + +func (m *Offer) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +func (m *Offer) GetURL() *URL { + if m != nil { + return m.URL + } + return nil +} + +func (m *Offer) GetDomain() *DomainInfo { + if m != nil { + return m.Domain + } + return nil +} + +func (m *Offer) GetResources() []Resource { + if m != nil { + return m.Resources + } + return nil +} + +func (m *Offer) GetAttributes() []Attribute { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Offer) GetExecutorIDs() []ExecutorID { + if m != nil { + return m.ExecutorIDs + } + return nil +} + +func (m *Offer) GetUnavailability() *Unavailability { + if m != nil { + return m.Unavailability + } + return nil +} + +func (m *Offer) GetAllocationInfo() *Resource_AllocationInfo { + if m != nil { + return m.AllocationInfo + } + return nil +} + +// Defines an operation that can be performed against offers. +type Offer_Operation struct { + Type Offer_Operation_Type `protobuf:"varint,1,opt,name=type,enum=mesos.Offer_Operation_Type" json:"type"` + // NOTE: The `id` field will allow frameworks to indicate that they wish to + // receive feedback about an operation. Since this feature is not yet + // implemented, the `id` field should NOT be set at present. See MESOS-8054. + ID *OperationID `protobuf:"bytes,12,opt,name=id" json:"id,omitempty"` + Launch *Offer_Operation_Launch `protobuf:"bytes,2,opt,name=launch" json:"launch,omitempty"` + LaunchGroup *Offer_Operation_LaunchGroup `protobuf:"bytes,7,opt,name=launch_group,json=launchGroup" json:"launch_group,omitempty"` + Reserve *Offer_Operation_Reserve `protobuf:"bytes,3,opt,name=reserve" json:"reserve,omitempty"` + Unreserve *Offer_Operation_Unreserve `protobuf:"bytes,4,opt,name=unreserve" json:"unreserve,omitempty"` + Create *Offer_Operation_Create `protobuf:"bytes,5,opt,name=create" json:"create,omitempty"` + Destroy *Offer_Operation_Destroy `protobuf:"bytes,6,opt,name=destroy" json:"destroy,omitempty"` + GrowVolume *Offer_Operation_GrowVolume `protobuf:"bytes,13,opt,name=grow_volume,json=growVolume" json:"grow_volume,omitempty"` + ShrinkVolume *Offer_Operation_ShrinkVolume `protobuf:"bytes,14,opt,name=shrink_volume,json=shrinkVolume" json:"shrink_volume,omitempty"` + CreateDisk *Offer_Operation_CreateDisk `protobuf:"bytes,15,opt,name=create_disk,json=createDisk" json:"create_disk,omitempty"` + DestroyDisk *Offer_Operation_DestroyDisk `protobuf:"bytes,16,opt,name=destroy_disk,json=destroyDisk" json:"destroy_disk,omitempty"` +} + +func (m *Offer_Operation) Reset() { *m = Offer_Operation{} } +func (*Offer_Operation) ProtoMessage() {} +func (*Offer_Operation) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{41, 0} } + +func (m *Offer_Operation) GetType() Offer_Operation_Type { + if m != nil { + return m.Type + } + return Offer_Operation_UNKNOWN +} + +func (m *Offer_Operation) GetID() *OperationID { + if m != nil { + return m.ID + } + return nil +} + +func (m *Offer_Operation) GetLaunch() *Offer_Operation_Launch { + if m != nil { + return m.Launch + } + return nil +} + +func (m *Offer_Operation) GetLaunchGroup() *Offer_Operation_LaunchGroup { + if m != nil { + return m.LaunchGroup + } + return nil +} + +func (m *Offer_Operation) GetReserve() *Offer_Operation_Reserve { + if m != nil { + return m.Reserve + } + return nil +} + +func (m *Offer_Operation) GetUnreserve() *Offer_Operation_Unreserve { + if m != nil { + return m.Unreserve + } + return nil +} + +func (m *Offer_Operation) GetCreate() *Offer_Operation_Create { + if m != nil { + return m.Create + } + return nil +} + +func (m *Offer_Operation) GetDestroy() *Offer_Operation_Destroy { + if m != nil { + return m.Destroy + } + return nil +} + +func (m *Offer_Operation) GetGrowVolume() *Offer_Operation_GrowVolume { + if m != nil { + return m.GrowVolume + } + return nil +} + +func (m *Offer_Operation) GetShrinkVolume() *Offer_Operation_ShrinkVolume { + if m != nil { + return m.ShrinkVolume + } + return nil +} + +func (m *Offer_Operation) GetCreateDisk() *Offer_Operation_CreateDisk { + if m != nil { + return m.CreateDisk + } + return nil +} + +func (m *Offer_Operation) GetDestroyDisk() *Offer_Operation_DestroyDisk { + if m != nil { + return m.DestroyDisk + } + return nil +} + +// TODO(vinod): Deprecate this in favor of `LaunchGroup` below. +type Offer_Operation_Launch struct { + TaskInfos []TaskInfo `protobuf:"bytes,1,rep,name=task_infos,json=taskInfos" json:"task_infos"` +} + +func (m *Offer_Operation_Launch) Reset() { *m = Offer_Operation_Launch{} } +func (*Offer_Operation_Launch) ProtoMessage() {} +func (*Offer_Operation_Launch) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{41, 0, 0} +} + +func (m *Offer_Operation_Launch) GetTaskInfos() []TaskInfo { + if m != nil { + return m.TaskInfos + } + return nil +} + +// Unlike `Launch` above, all the tasks in a `task_group` are +// atomically delivered to an executor. +// +// `NetworkInfo` set on executor will be shared by all tasks in +// the task group. +// +// TODO(vinod): Any volumes set on executor could be used by a +// task by explicitly setting `Volume.source` in its resources. +type Offer_Operation_LaunchGroup struct { + Executor ExecutorInfo `protobuf:"bytes,1,req,name=executor" json:"executor"` + TaskGroup TaskGroupInfo `protobuf:"bytes,2,req,name=task_group,json=taskGroup" json:"task_group"` +} + +func (m *Offer_Operation_LaunchGroup) Reset() { *m = Offer_Operation_LaunchGroup{} } +func (*Offer_Operation_LaunchGroup) ProtoMessage() {} +func (*Offer_Operation_LaunchGroup) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{41, 0, 1} +} + +func (m *Offer_Operation_LaunchGroup) GetExecutor() ExecutorInfo { + if m != nil { + return m.Executor + } + return ExecutorInfo{} +} + +func (m *Offer_Operation_LaunchGroup) GetTaskGroup() TaskGroupInfo { + if m != nil { + return m.TaskGroup + } + return TaskGroupInfo{} +} + +type Offer_Operation_Reserve struct { + Resources []Resource `protobuf:"bytes,1,rep,name=resources" json:"resources"` +} + +func (m *Offer_Operation_Reserve) Reset() { *m = Offer_Operation_Reserve{} } +func (*Offer_Operation_Reserve) ProtoMessage() {} +func (*Offer_Operation_Reserve) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{41, 0, 2} +} + +func (m *Offer_Operation_Reserve) GetResources() []Resource { + if m != nil { + return m.Resources + } + return nil +} + +type Offer_Operation_Unreserve struct { + Resources []Resource `protobuf:"bytes,1,rep,name=resources" json:"resources"` +} + +func (m *Offer_Operation_Unreserve) Reset() { *m = Offer_Operation_Unreserve{} } +func (*Offer_Operation_Unreserve) ProtoMessage() {} +func (*Offer_Operation_Unreserve) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{41, 0, 3} +} + +func (m *Offer_Operation_Unreserve) GetResources() []Resource { + if m != nil { + return m.Resources + } + return nil +} + +type Offer_Operation_Create struct { + Volumes []Resource `protobuf:"bytes,1,rep,name=volumes" json:"volumes"` +} + +func (m *Offer_Operation_Create) Reset() { *m = Offer_Operation_Create{} } +func (*Offer_Operation_Create) ProtoMessage() {} +func (*Offer_Operation_Create) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{41, 0, 4} +} + +func (m *Offer_Operation_Create) GetVolumes() []Resource { + if m != nil { + return m.Volumes + } + return nil +} + +type Offer_Operation_Destroy struct { + Volumes []Resource `protobuf:"bytes,1,rep,name=volumes" json:"volumes"` +} + +func (m *Offer_Operation_Destroy) Reset() { *m = Offer_Operation_Destroy{} } +func (*Offer_Operation_Destroy) ProtoMessage() {} +func (*Offer_Operation_Destroy) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{41, 0, 5} +} + +func (m *Offer_Operation_Destroy) GetVolumes() []Resource { + if m != nil { + return m.Volumes + } + return nil +} + +// Grow a volume by an additional disk resource. +// NOTE: This is currently experimental and only for persistent volumes +// created on ROOT/PATH disk. +type Offer_Operation_GrowVolume struct { + Volume Resource `protobuf:"bytes,1,req,name=volume" json:"volume"` + Addition Resource `protobuf:"bytes,2,req,name=addition" json:"addition"` +} + +func (m *Offer_Operation_GrowVolume) Reset() { *m = Offer_Operation_GrowVolume{} } +func (*Offer_Operation_GrowVolume) ProtoMessage() {} +func (*Offer_Operation_GrowVolume) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{41, 0, 6} +} + +func (m *Offer_Operation_GrowVolume) GetVolume() Resource { + if m != nil { + return m.Volume + } + return Resource{} +} + +func (m *Offer_Operation_GrowVolume) GetAddition() Resource { + if m != nil { + return m.Addition + } + return Resource{} +} + +// Shrink a volume by the size specified in the `subtract` field. +// NOTE: This is currently experimental and only for persistent volumes +// created on ROOT/PATH disk. +type Offer_Operation_ShrinkVolume struct { + Volume Resource `protobuf:"bytes,1,req,name=volume" json:"volume"` + // See comments in `Value.Scalar` for maximum precision supported. + Subtract Value_Scalar `protobuf:"bytes,2,req,name=subtract" json:"subtract"` +} + +func (m *Offer_Operation_ShrinkVolume) Reset() { *m = Offer_Operation_ShrinkVolume{} } +func (*Offer_Operation_ShrinkVolume) ProtoMessage() {} +func (*Offer_Operation_ShrinkVolume) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{41, 0, 7} +} + +func (m *Offer_Operation_ShrinkVolume) GetVolume() Resource { + if m != nil { + return m.Volume + } + return Resource{} +} + +func (m *Offer_Operation_ShrinkVolume) GetSubtract() Value_Scalar { + if m != nil { + return m.Subtract + } + return Value_Scalar{} +} + +// Create a `MOUNT` or `BLOCK` disk resource from a `RAW` disk resource. +// NOTE: For the time being, this API is subject to change and the related +// feature is experimental. +type Offer_Operation_CreateDisk struct { + Source Resource `protobuf:"bytes,1,req,name=source" json:"source"` + // NOTE: Only `MOUNT` or `BLOCK` is allowed in the `target_type` field. + TargetType Resource_DiskInfo_Source_Type `protobuf:"varint,2,req,name=target_type,json=targetType,enum=mesos.Resource_DiskInfo_Source_Type" json:"target_type"` +} + +func (m *Offer_Operation_CreateDisk) Reset() { *m = Offer_Operation_CreateDisk{} } +func (*Offer_Operation_CreateDisk) ProtoMessage() {} +func (*Offer_Operation_CreateDisk) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{41, 0, 8} +} + +func (m *Offer_Operation_CreateDisk) GetSource() Resource { + if m != nil { + return m.Source + } + return Resource{} +} + +func (m *Offer_Operation_CreateDisk) GetTargetType() Resource_DiskInfo_Source_Type { + if m != nil { + return m.TargetType + } + return Resource_DiskInfo_Source_UNKNOWN +} + +// Destroy a `MOUNT` or `BLOCK` disk resource. This will result in a `RAW` +// disk resource. +// NOTE: For the time being, this API is subject to change and the related +// feature is experimental. +type Offer_Operation_DestroyDisk struct { + // NOTE: Only a `MOUNT` or `BLOCK` disk is allowed in the `source` field. + Source Resource `protobuf:"bytes,1,req,name=source" json:"source"` +} + +func (m *Offer_Operation_DestroyDisk) Reset() { *m = Offer_Operation_DestroyDisk{} } +func (*Offer_Operation_DestroyDisk) ProtoMessage() {} +func (*Offer_Operation_DestroyDisk) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{41, 0, 9} +} + +func (m *Offer_Operation_DestroyDisk) GetSource() Resource { + if m != nil { + return m.Source + } + return Resource{} +} + +// * +// A request to return some resources occupied by a framework. +type InverseOffer struct { + // This is the same OfferID as found in normal offers, which allows + // re-use of some of the OfferID-only messages. + OfferID OfferID `protobuf:"bytes,1,req,name=id" json:"id"` + // URL for reaching the agent running on the host. This enables some + // optimizations as described in MESOS-3012, such as allowing the + // scheduler driver to bypass the master and talk directly with an agent. + URL *URL `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + // The framework that should release its resources. + // If no specifics are provided (i.e. which agent), all the framework's + // resources are requested back. + FrameworkID FrameworkID `protobuf:"bytes,3,req,name=framework_id,json=frameworkId" json:"framework_id"` + // Specified if the resources need to be released from a particular agent. + // All the framework's resources on this agent are requested back, + // unless further qualified by the `resources` field. + AgentID *AgentID `protobuf:"bytes,4,opt,name=agent_id,json=agentId" json:"agent_id,omitempty"` + // This InverseOffer represents a planned unavailability event in the + // specified interval. Any tasks running on the given framework or agent + // may be killed when the interval arrives. Therefore, frameworks should + // aim to gracefully terminate tasks prior to the arrival of the interval. + // + // For reserved resources, the resources are expected to be returned to the + // framework after the unavailability interval. This is an expectation, + // not a guarantee. For example, if the unavailability duration is not set, + // the resources may be removed permanently. + // + // For other resources, there is no guarantee that requested resources will + // be returned after the unavailability interval. The allocator has no + // obligation to re-offer these resources to the prior framework after + // the unavailability. + Unavailability Unavailability `protobuf:"bytes,5,req,name=unavailability" json:"unavailability"` + // A list of resources being requested back from the framework, + // on the agent identified by `agent_id`. If no resources are specified + // then all resources are being requested back. For the purpose of + // maintenance, this field is always empty (maintenance always requests + // all resources back). + Resources []Resource `protobuf:"bytes,6,rep,name=resources" json:"resources"` +} + +func (m *InverseOffer) Reset() { *m = InverseOffer{} } +func (*InverseOffer) ProtoMessage() {} +func (*InverseOffer) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{42} } + +func (m *InverseOffer) GetOfferID() OfferID { + if m != nil { + return m.OfferID + } + return OfferID{} +} + +func (m *InverseOffer) GetURL() *URL { + if m != nil { + return m.URL + } + return nil +} + +func (m *InverseOffer) GetFrameworkID() FrameworkID { + if m != nil { + return m.FrameworkID + } + return FrameworkID{} +} + +func (m *InverseOffer) GetAgentID() *AgentID { + if m != nil { + return m.AgentID + } + return nil +} + +func (m *InverseOffer) GetUnavailability() Unavailability { + if m != nil { + return m.Unavailability + } + return Unavailability{} +} + +func (m *InverseOffer) GetResources() []Resource { + if m != nil { + return m.Resources + } + return nil +} + +// * +// Describes a task. Passed from the scheduler all the way to an +// executor (see SchedulerDriver::launchTasks and +// Executor::launchTask). Either ExecutorInfo or CommandInfo should be set. +// A different executor can be used to launch this task, and subsequent tasks +// meant for the same executor can reuse the same ExecutorInfo struct. +type TaskInfo struct { + Name string `protobuf:"bytes,1,req,name=name" json:"name"` + TaskID TaskID `protobuf:"bytes,2,req,name=task_id,json=taskId" json:"task_id"` + AgentID AgentID `protobuf:"bytes,3,req,name=agent_id,json=agentId" json:"agent_id"` + Resources []Resource `protobuf:"bytes,4,rep,name=resources" json:"resources"` + Executor *ExecutorInfo `protobuf:"bytes,5,opt,name=executor" json:"executor,omitempty"` + Command *CommandInfo `protobuf:"bytes,7,opt,name=command" json:"command,omitempty"` + // Task provided with a container will launch the container as part + // of this task paired with the task's CommandInfo. + Container *ContainerInfo `protobuf:"bytes,9,opt,name=container" json:"container,omitempty"` + // A health check for the task. Implemented for executor-less + // command-based tasks. For tasks that specify an executor, it is + // the executor's responsibility to implement the health checking. + HealthCheck *HealthCheck `protobuf:"bytes,8,opt,name=health_check,json=healthCheck" json:"health_check,omitempty"` + // A general check for the task. Implemented for all built-in executors. + // For tasks that specify an executor, it is the executor's responsibility + // to implement checking support. Executors should (all built-in executors + // will) neither interpret nor act on the check's result. + // + // NOTE: Check support in built-in executors is experimental. + // + // TODO(alexr): Consider supporting multiple checks per task. + Check *CheckInfo `protobuf:"bytes,13,opt,name=check" json:"check,omitempty"` + // A kill policy for the task. Implemented for executor-less + // command-based and docker tasks. For tasks that specify an + // executor, it is the executor's responsibility to implement + // the kill policy. + KillPolicy *KillPolicy `protobuf:"bytes,12,opt,name=kill_policy,json=killPolicy" json:"kill_policy,omitempty"` + Data []byte `protobuf:"bytes,6,opt,name=data" json:"data,omitempty"` + // Labels are free-form key value pairs which are exposed through + // master and agent endpoints. Labels will not be interpreted or + // acted upon by Mesos itself. As opposed to the data field, labels + // will be kept in memory on master and agent processes. Therefore, + // labels should be used to tag tasks with light-weight meta-data. + // Labels should not contain duplicate key-value pairs. + Labels *Labels `protobuf:"bytes,10,opt,name=labels" json:"labels,omitempty"` + // Service discovery information for the task. It is not interpreted + // or acted upon by Mesos. It is up to a service discovery system + // to use this information as needed and to handle tasks without + // service discovery information. + Discovery *DiscoveryInfo `protobuf:"bytes,11,opt,name=discovery" json:"discovery,omitempty"` + // Maximum duration for task completion. If the task is non-terminal at the + // end of this duration, it will fail with the reason + // `REASON_MAX_COMPLETION_TIME_REACHED`. Mesos supports this field for + // executor-less tasks, and tasks that use Docker or default executors. + // It is the executor's responsibility to implement this, so it might not be + // supported by all custom executors. + MaxCompletionTime *DurationInfo `protobuf:"bytes,14,opt,name=max_completion_time,json=maxCompletionTime" json:"max_completion_time,omitempty"` +} + +func (m *TaskInfo) Reset() { *m = TaskInfo{} } +func (*TaskInfo) ProtoMessage() {} +func (*TaskInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{43} } + +func (m *TaskInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TaskInfo) GetTaskID() TaskID { + if m != nil { + return m.TaskID + } + return TaskID{} +} + +func (m *TaskInfo) GetAgentID() AgentID { + if m != nil { + return m.AgentID + } + return AgentID{} +} + +func (m *TaskInfo) GetResources() []Resource { + if m != nil { + return m.Resources + } + return nil +} + +func (m *TaskInfo) GetExecutor() *ExecutorInfo { + if m != nil { + return m.Executor + } + return nil +} + +func (m *TaskInfo) GetCommand() *CommandInfo { + if m != nil { + return m.Command + } + return nil +} + +func (m *TaskInfo) GetContainer() *ContainerInfo { + if m != nil { + return m.Container + } + return nil +} + +func (m *TaskInfo) GetHealthCheck() *HealthCheck { + if m != nil { + return m.HealthCheck + } + return nil +} + +func (m *TaskInfo) GetCheck() *CheckInfo { + if m != nil { + return m.Check + } + return nil +} + +func (m *TaskInfo) GetKillPolicy() *KillPolicy { + if m != nil { + return m.KillPolicy + } + return nil +} + +func (m *TaskInfo) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *TaskInfo) GetLabels() *Labels { + if m != nil { + return m.Labels + } + return nil +} + +func (m *TaskInfo) GetDiscovery() *DiscoveryInfo { + if m != nil { + return m.Discovery + } + return nil +} + +func (m *TaskInfo) GetMaxCompletionTime() *DurationInfo { + if m != nil { + return m.MaxCompletionTime + } + return nil +} + +// * +// Describes a group of tasks that belong to an executor. The +// executor will receive the task group in a single message to +// allow the group to be launched "atomically". +// +// NOTES: +// 1) `NetworkInfo` must not be set inside task's `ContainerInfo`. +// 2) `TaskInfo.executor` doesn't need to set. If set, it should match +// `LaunchGroup.executor`. +type TaskGroupInfo struct { + Tasks []TaskInfo `protobuf:"bytes,1,rep,name=tasks" json:"tasks"` +} + +func (m *TaskGroupInfo) Reset() { *m = TaskGroupInfo{} } +func (*TaskGroupInfo) ProtoMessage() {} +func (*TaskGroupInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{44} } + +func (m *TaskGroupInfo) GetTasks() []TaskInfo { + if m != nil { + return m.Tasks + } + return nil +} + +// * +// Describes a task, similar to `TaskInfo`. +// +// `Task` is used in some of the Mesos messages found below. +// `Task` is used instead of `TaskInfo` if: +// 1) we need additional IDs, such as a specific +// framework, executor, or agent; or +// 2) we do not need the additional data, such as the command run by the +// task or the health checks. These additional fields may be large and +// unnecessary for some Mesos messages. +// +// `Task` is generally constructed from a `TaskInfo`. See protobuf::createTask. +type Task struct { + Name string `protobuf:"bytes,1,req,name=name" json:"name"` + TaskID TaskID `protobuf:"bytes,2,req,name=task_id,json=taskId" json:"task_id"` + FrameworkID FrameworkID `protobuf:"bytes,3,req,name=framework_id,json=frameworkId" json:"framework_id"` + ExecutorID *ExecutorID `protobuf:"bytes,4,opt,name=executor_id,json=executorId" json:"executor_id,omitempty"` + AgentID AgentID `protobuf:"bytes,5,req,name=agent_id,json=agentId" json:"agent_id"` + State *TaskState `protobuf:"varint,6,req,name=state,enum=mesos.TaskState" json:"state,omitempty"` + Resources []Resource `protobuf:"bytes,7,rep,name=resources" json:"resources"` + Statuses []TaskStatus `protobuf:"bytes,8,rep,name=statuses" json:"statuses"` + // These fields correspond to the state and uuid of the latest + // status update forwarded to the master. + // NOTE: Either both the fields must be set or both must be unset. + StatusUpdateState *TaskState `protobuf:"varint,9,opt,name=status_update_state,json=statusUpdateState,enum=mesos.TaskState" json:"status_update_state,omitempty"` + StatusUpdateUUID []byte `protobuf:"bytes,10,opt,name=status_update_uuid,json=statusUpdateUuid" json:"status_update_uuid,omitempty"` + Labels *Labels `protobuf:"bytes,11,opt,name=labels" json:"labels,omitempty"` + // Service discovery information for the task. It is not interpreted + // or acted upon by Mesos. It is up to a service discovery system + // to use this information as needed and to handle tasks without + // service discovery information. + Discovery *DiscoveryInfo `protobuf:"bytes,12,opt,name=discovery" json:"discovery,omitempty"` + // Container information for the task. + Container *ContainerInfo `protobuf:"bytes,13,opt,name=container" json:"container,omitempty"` + // Specific user under which task is running. + User *string `protobuf:"bytes,14,opt,name=user" json:"user,omitempty"` +} + +func (m *Task) Reset() { *m = Task{} } +func (*Task) ProtoMessage() {} +func (*Task) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{45} } + +func (m *Task) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Task) GetTaskID() TaskID { + if m != nil { + return m.TaskID + } + return TaskID{} +} + +func (m *Task) GetFrameworkID() FrameworkID { + if m != nil { + return m.FrameworkID + } + return FrameworkID{} +} + +func (m *Task) GetExecutorID() *ExecutorID { + if m != nil { + return m.ExecutorID + } + return nil +} + +func (m *Task) GetAgentID() AgentID { + if m != nil { + return m.AgentID + } + return AgentID{} +} + +func (m *Task) GetState() TaskState { + if m != nil && m.State != nil { + return *m.State + } + return TASK_STAGING +} + +func (m *Task) GetResources() []Resource { + if m != nil { + return m.Resources + } + return nil +} + +func (m *Task) GetStatuses() []TaskStatus { + if m != nil { + return m.Statuses + } + return nil +} + +func (m *Task) GetStatusUpdateState() TaskState { + if m != nil && m.StatusUpdateState != nil { + return *m.StatusUpdateState + } + return TASK_STAGING +} + +func (m *Task) GetStatusUpdateUUID() []byte { + if m != nil { + return m.StatusUpdateUUID + } + return nil +} + +func (m *Task) GetLabels() *Labels { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Task) GetDiscovery() *DiscoveryInfo { + if m != nil { + return m.Discovery + } + return nil +} + +func (m *Task) GetContainer() *ContainerInfo { + if m != nil { + return m.Container + } + return nil +} + +func (m *Task) GetUser() string { + if m != nil && m.User != nil { + return *m.User + } + return "" +} + +// * +// Describes a resource limitation that caused a task failure. +type TaskResourceLimitation struct { + // This field contains the resource whose limits were violated. + // + // NOTE: 'Resources' is used here because the resource may span + // multiple roles (e.g. `"mem(*):1;mem(role):2"`). + Resources []Resource `protobuf:"bytes,1,rep,name=resources" json:"resources"` +} + +func (m *TaskResourceLimitation) Reset() { *m = TaskResourceLimitation{} } +func (*TaskResourceLimitation) ProtoMessage() {} +func (*TaskResourceLimitation) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{46} } + +func (m *TaskResourceLimitation) GetResources() []Resource { + if m != nil { + return m.Resources + } + return nil +} + +// * +// Describes a UUID. +type UUID struct { + Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` +} + +func (m *UUID) Reset() { *m = UUID{} } +func (*UUID) ProtoMessage() {} +func (*UUID) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{47} } + +func (m *UUID) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// * +// Describes an operation, similar to `Offer.Operation`, with +// some additional information. +type Operation struct { + FrameworkID *FrameworkID `protobuf:"bytes,1,opt,name=framework_id,json=frameworkId" json:"framework_id,omitempty"` + AgentID *AgentID `protobuf:"bytes,2,opt,name=agent_id,json=agentId" json:"agent_id,omitempty"` + Info Offer_Operation `protobuf:"bytes,3,req,name=info" json:"info"` + LatestStatus OperationStatus `protobuf:"bytes,4,req,name=latest_status,json=latestStatus" json:"latest_status"` + // All the statuses known to this operation. Some of the statuses in this + // list might not have been acknowledged yet. The statuses are ordered. + Statuses []OperationStatus `protobuf:"bytes,5,rep,name=statuses" json:"statuses"` + // This is the internal UUID for the operation, which is kept independently + // from the framework-specified operation ID, which is optional. + UUID UUID `protobuf:"bytes,6,req,name=uuid" json:"uuid"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{48} } + +func (m *Operation) GetFrameworkID() *FrameworkID { + if m != nil { + return m.FrameworkID + } + return nil +} + +func (m *Operation) GetAgentID() *AgentID { + if m != nil { + return m.AgentID + } + return nil +} + +func (m *Operation) GetInfo() Offer_Operation { + if m != nil { + return m.Info + } + return Offer_Operation{} +} + +func (m *Operation) GetLatestStatus() OperationStatus { + if m != nil { + return m.LatestStatus + } + return OperationStatus{} +} + +func (m *Operation) GetStatuses() []OperationStatus { + if m != nil { + return m.Statuses + } + return nil +} + +func (m *Operation) GetUUID() UUID { + if m != nil { + return m.UUID + } + return UUID{} +} + +// * +// Describes the current status of an operation. +type OperationStatus struct { + // While frameworks will only receive status updates for operations on which + // they have set an ID, this field is optional because this message is also + // used internally by Mesos components when the operation's ID has not been + // set. + OperationID *OperationID `protobuf:"bytes,1,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + State OperationState `protobuf:"varint,2,req,name=state,enum=mesos.OperationState" json:"state"` + Message *string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` + // Converted resources after applying the operation. This only + // applies if the `state` is `OPERATION_FINISHED`. + ConvertedResources []Resource `protobuf:"bytes,4,rep,name=converted_resources,json=convertedResources" json:"converted_resources"` + // Statuses that are delivered reliably to the scheduler will + // include a `uuid`. The status is considered delivered once + // it is acknowledged by the scheduler. + UUID *UUID `protobuf:"bytes,5,opt,name=uuid" json:"uuid,omitempty"` +} + +func (m *OperationStatus) Reset() { *m = OperationStatus{} } +func (*OperationStatus) ProtoMessage() {} +func (*OperationStatus) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{49} } + +func (m *OperationStatus) GetOperationID() *OperationID { + if m != nil { + return m.OperationID + } + return nil +} + +func (m *OperationStatus) GetState() OperationState { + if m != nil { + return m.State + } + return OPERATION_UNSUPPORTED +} + +func (m *OperationStatus) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +func (m *OperationStatus) GetConvertedResources() []Resource { + if m != nil { + return m.ConvertedResources + } + return nil +} + +func (m *OperationStatus) GetUUID() *UUID { + if m != nil { + return m.UUID + } + return nil +} + +// * +// Describes the status of a check. Type and the corresponding field, i.e., +// `command` or `http` must be set. If the result of the check is not available +// (e.g., the check timed out), these fields must contain empty messages, i.e., +// `exit_code` or `status_code` will be unset. +// +// NOTE: This API is subject to change and the related feature is experimental. +type CheckStatusInfo struct { + // The type of the check this status corresponds to. + Type *CheckInfo_Type `protobuf:"varint,1,opt,name=type,enum=mesos.CheckInfo_Type" json:"type,omitempty"` + // Status of a command check. + Command *CheckStatusInfo_Command `protobuf:"bytes,2,opt,name=command" json:"command,omitempty"` + // Status of an HTTP check. + HTTP *CheckStatusInfo_Http `protobuf:"bytes,3,opt,name=http" json:"http,omitempty"` + // Status of a TCP check. + TCP *CheckStatusInfo_Tcp `protobuf:"bytes,4,opt,name=tcp" json:"tcp,omitempty"` +} + +func (m *CheckStatusInfo) Reset() { *m = CheckStatusInfo{} } +func (*CheckStatusInfo) ProtoMessage() {} +func (*CheckStatusInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{50} } + +func (m *CheckStatusInfo) GetType() CheckInfo_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return CheckInfo_UNKNOWN +} + +func (m *CheckStatusInfo) GetCommand() *CheckStatusInfo_Command { + if m != nil { + return m.Command + } + return nil +} + +func (m *CheckStatusInfo) GetHTTP() *CheckStatusInfo_Http { + if m != nil { + return m.HTTP + } + return nil +} + +func (m *CheckStatusInfo) GetTCP() *CheckStatusInfo_Tcp { + if m != nil { + return m.TCP + } + return nil +} + +type CheckStatusInfo_Command struct { + // Exit code of a command check. It is the result of calling + // `WEXITSTATUS()` on `waitpid()` termination information on + // Posix and calling `GetExitCodeProcess()` on Windows. + ExitCode *int32 `protobuf:"varint,1,opt,name=exit_code,json=exitCode" json:"exit_code,omitempty"` +} + +func (m *CheckStatusInfo_Command) Reset() { *m = CheckStatusInfo_Command{} } +func (*CheckStatusInfo_Command) ProtoMessage() {} +func (*CheckStatusInfo_Command) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{50, 0} } + +func (m *CheckStatusInfo_Command) GetExitCode() int32 { + if m != nil && m.ExitCode != nil { + return *m.ExitCode + } + return 0 +} + +type CheckStatusInfo_Http struct { + // HTTP status code of an HTTP check. + StatusCode *uint32 `protobuf:"varint,1,opt,name=status_code,json=statusCode" json:"status_code,omitempty"` +} + +func (m *CheckStatusInfo_Http) Reset() { *m = CheckStatusInfo_Http{} } +func (*CheckStatusInfo_Http) ProtoMessage() {} +func (*CheckStatusInfo_Http) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{50, 1} } + +func (m *CheckStatusInfo_Http) GetStatusCode() uint32 { + if m != nil && m.StatusCode != nil { + return *m.StatusCode + } + return 0 +} + +type CheckStatusInfo_Tcp struct { + // Whether a TCP connection succeeded. + Succeeded *bool `protobuf:"varint,1,opt,name=succeeded" json:"succeeded,omitempty"` +} + +func (m *CheckStatusInfo_Tcp) Reset() { *m = CheckStatusInfo_Tcp{} } +func (*CheckStatusInfo_Tcp) ProtoMessage() {} +func (*CheckStatusInfo_Tcp) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{50, 2} } + +func (m *CheckStatusInfo_Tcp) GetSucceeded() bool { + if m != nil && m.Succeeded != nil { + return *m.Succeeded + } + return false +} + +// * +// Describes the current status of a task. +type TaskStatus struct { + TaskID TaskID `protobuf:"bytes,1,req,name=task_id,json=taskId" json:"task_id"` + State *TaskState `protobuf:"varint,2,req,name=state,enum=mesos.TaskState" json:"state,omitempty"` + Message *string `protobuf:"bytes,4,opt,name=message" json:"message,omitempty"` + Source *TaskStatus_Source `protobuf:"varint,9,opt,name=source,enum=mesos.TaskStatus_Source" json:"source,omitempty"` + Reason *TaskStatus_Reason `protobuf:"varint,10,opt,name=reason,enum=mesos.TaskStatus_Reason" json:"reason,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` + AgentID *AgentID `protobuf:"bytes,5,opt,name=agent_id,json=agentId" json:"agent_id,omitempty"` + ExecutorID *ExecutorID `protobuf:"bytes,7,opt,name=executor_id,json=executorId" json:"executor_id,omitempty"` + Timestamp *float64 `protobuf:"fixed64,6,opt,name=timestamp" json:"timestamp,omitempty"` + // Statuses that are delivered reliably to the scheduler will + // include a 'uuid'. The status is considered delivered once + // it is acknowledged by the scheduler. Schedulers can choose + // to either explicitly acknowledge statuses or let the scheduler + // driver implicitly acknowledge (default). + // + // TODO(bmahler): This is currently overwritten in the scheduler + // driver and executor driver, but executors will need to set this + // to a valid RFC-4122 UUID if using the HTTP API. + UUID []byte `protobuf:"bytes,11,opt,name=uuid" json:"uuid,omitempty"` + // Describes whether the task has been determined to be healthy (true) or + // unhealthy (false) according to the `health_check` field in `TaskInfo`. + Healthy *bool `protobuf:"varint,8,opt,name=healthy" json:"healthy,omitempty"` + // Contains check status for the check specified in the corresponding + // `TaskInfo`. If no check has been specified, this field must be + // absent, otherwise it must be present even if the check status is + // not available yet. If the status update is triggered for a different + // reason than `REASON_TASK_CHECK_STATUS_UPDATED`, this field will contain + // the last known value. + // + // NOTE: A check-related task status update is triggered if and only if + // the value or presence of any field in `CheckStatusInfo` changes. + // + // NOTE: Check support in built-in executors is experimental. + CheckStatus *CheckStatusInfo `protobuf:"bytes,15,opt,name=check_status,json=checkStatus" json:"check_status,omitempty"` + // Labels are free-form key value pairs which are exposed through + // master and agent endpoints. Labels will not be interpreted or + // acted upon by Mesos itself. As opposed to the data field, labels + // will be kept in memory on master and agent processes. Therefore, + // labels should be used to tag TaskStatus message with light-weight + // meta-data. Labels should not contain duplicate key-value pairs. + Labels *Labels `protobuf:"bytes,12,opt,name=labels" json:"labels,omitempty"` + // Container related information that is resolved dynamically such as + // network address. + ContainerStatus *ContainerStatus `protobuf:"bytes,13,opt,name=container_status,json=containerStatus" json:"container_status,omitempty"` + // The time (according to the master's clock) when the agent where + // this task was running became unreachable. This is only set on + // status updates for tasks running on agents that are unreachable + // (e.g., partitioned away from the master). + UnreachableTime *TimeInfo `protobuf:"bytes,14,opt,name=unreachable_time,json=unreachableTime" json:"unreachable_time,omitempty"` + // If the reason field indicates a container resource limitation, + // this field optionally contains additional information. + Limitation *TaskResourceLimitation `protobuf:"bytes,16,opt,name=limitation" json:"limitation,omitempty"` +} + +func (m *TaskStatus) Reset() { *m = TaskStatus{} } +func (*TaskStatus) ProtoMessage() {} +func (*TaskStatus) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{51} } + +func (m *TaskStatus) GetTaskID() TaskID { + if m != nil { + return m.TaskID + } + return TaskID{} +} + +func (m *TaskStatus) GetState() TaskState { + if m != nil && m.State != nil { + return *m.State + } + return TASK_STAGING +} + +func (m *TaskStatus) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +func (m *TaskStatus) GetSource() TaskStatus_Source { + if m != nil && m.Source != nil { + return *m.Source + } + return SOURCE_MASTER +} + +func (m *TaskStatus) GetReason() TaskStatus_Reason { + if m != nil && m.Reason != nil { + return *m.Reason + } + return REASON_COMMAND_EXECUTOR_FAILED +} + +func (m *TaskStatus) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *TaskStatus) GetAgentID() *AgentID { + if m != nil { + return m.AgentID + } + return nil +} + +func (m *TaskStatus) GetExecutorID() *ExecutorID { + if m != nil { + return m.ExecutorID + } + return nil +} + +func (m *TaskStatus) GetTimestamp() float64 { + if m != nil && m.Timestamp != nil { + return *m.Timestamp + } + return 0 +} + +func (m *TaskStatus) GetUUID() []byte { + if m != nil { + return m.UUID + } + return nil +} + +func (m *TaskStatus) GetHealthy() bool { + if m != nil && m.Healthy != nil { + return *m.Healthy + } + return false +} + +func (m *TaskStatus) GetCheckStatus() *CheckStatusInfo { + if m != nil { + return m.CheckStatus + } + return nil +} + +func (m *TaskStatus) GetLabels() *Labels { + if m != nil { + return m.Labels + } + return nil +} + +func (m *TaskStatus) GetContainerStatus() *ContainerStatus { + if m != nil { + return m.ContainerStatus + } + return nil +} + +func (m *TaskStatus) GetUnreachableTime() *TimeInfo { + if m != nil { + return m.UnreachableTime + } + return nil +} + +func (m *TaskStatus) GetLimitation() *TaskResourceLimitation { + if m != nil { + return m.Limitation + } + return nil +} + +// * +// Describes possible filters that can be applied to unused resources +// (see SchedulerDriver::launchTasks) to influence the allocator. +type Filters struct { + // Time to consider unused resources refused. Note that all unused + // resources will be considered refused and use the default value + // (below) regardless of whether Filters was passed to + // SchedulerDriver::launchTasks. You MUST pass Filters with this + // field set to change this behavior (i.e., get another offer which + // includes unused resources sooner or later than the default). + // + // If this field is set to a number of seconds greater than 31536000 + // (365 days), then the resources will be considered refused for 365 + // days. If it is set to a negative number, then the default value + // will be used. + RefuseSeconds *float64 `protobuf:"fixed64,1,opt,name=refuse_seconds,json=refuseSeconds,def=5" json:"refuse_seconds,omitempty"` +} + +func (m *Filters) Reset() { *m = Filters{} } +func (*Filters) ProtoMessage() {} +func (*Filters) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{52} } + +const Default_Filters_RefuseSeconds float64 = 5 + +func (m *Filters) GetRefuseSeconds() float64 { + if m != nil && m.RefuseSeconds != nil { + return *m.RefuseSeconds + } + return Default_Filters_RefuseSeconds +} + +// * +// Describes a collection of environment variables. This is used with +// CommandInfo in order to set environment variables before running a +// command. The contents of each variable may be specified as a string +// or a Secret; only one of `value` and `secret` must be set. +type Environment struct { + Variables []Environment_Variable `protobuf:"bytes,1,rep,name=variables" json:"variables"` +} + +func (m *Environment) Reset() { *m = Environment{} } +func (*Environment) ProtoMessage() {} +func (*Environment) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{53} } + +func (m *Environment) GetVariables() []Environment_Variable { + if m != nil { + return m.Variables + } + return nil +} + +type Environment_Variable struct { + Name string `protobuf:"bytes,1,req,name=name" json:"name"` + // In Mesos 1.2, the `Environment.variables.value` message was made + // optional. The default type for `Environment.variables.type` is now VALUE, + // which requires `value` to be set, maintaining backward compatibility. + // + // TODO(greggomann): The default can be removed in Mesos 2.1 (MESOS-7134). + Type *Environment_Variable_Type `protobuf:"varint,3,opt,name=type,enum=mesos.Environment_Variable_Type,def=1" json:"type,omitempty"` + // Only one of `value` and `secret` must be set. + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Secret *Secret `protobuf:"bytes,4,opt,name=secret" json:"secret,omitempty"` +} + +func (m *Environment_Variable) Reset() { *m = Environment_Variable{} } +func (*Environment_Variable) ProtoMessage() {} +func (*Environment_Variable) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{53, 0} } + +const Default_Environment_Variable_Type Environment_Variable_Type = Environment_Variable_VALUE + +func (m *Environment_Variable) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Environment_Variable) GetType() Environment_Variable_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Environment_Variable_Type +} + +func (m *Environment_Variable) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +func (m *Environment_Variable) GetSecret() *Secret { + if m != nil { + return m.Secret + } + return nil +} + +// * +// A generic (key, value) pair used in various places for parameters. +type Parameter struct { + Key string `protobuf:"bytes,1,req,name=key" json:"key"` + Value string `protobuf:"bytes,2,req,name=value" json:"value"` +} + +func (m *Parameter) Reset() { *m = Parameter{} } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{54} } + +func (m *Parameter) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *Parameter) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// * +// Collection of Parameter. +type Parameters struct { + Parameter []Parameter `protobuf:"bytes,1,rep,name=parameter" json:"parameter"` +} + +func (m *Parameters) Reset() { *m = Parameters{} } +func (*Parameters) ProtoMessage() {} +func (*Parameters) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{55} } + +func (m *Parameters) GetParameter() []Parameter { + if m != nil { + return m.Parameter + } + return nil +} + +// * +// Credential used in various places for authentication and +// authorization. +// +// NOTE: A 'principal' is different from 'FrameworkInfo.user'. The +// former is used for authentication and authorization while the +// latter is used to determine the default user under which the +// framework's executors/tasks are run. +type Credential struct { + Principal string `protobuf:"bytes,1,req,name=principal" json:"principal"` + Secret *string `protobuf:"bytes,2,opt,name=secret" json:"secret,omitempty"` +} + +func (m *Credential) Reset() { *m = Credential{} } +func (*Credential) ProtoMessage() {} +func (*Credential) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{56} } + +func (m *Credential) GetPrincipal() string { + if m != nil { + return m.Principal + } + return "" +} + +func (m *Credential) GetSecret() string { + if m != nil && m.Secret != nil { + return *m.Secret + } + return "" +} + +// * +// Credentials used for framework authentication, HTTP authentication +// (where the common 'username' and 'password' are captured as +// 'principal' and 'secret' respectively), etc. +type Credentials struct { + Credentials []Credential `protobuf:"bytes,1,rep,name=credentials" json:"credentials"` +} + +func (m *Credentials) Reset() { *m = Credentials{} } +func (*Credentials) ProtoMessage() {} +func (*Credentials) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{57} } + +func (m *Credentials) GetCredentials() []Credential { + if m != nil { + return m.Credentials + } + return nil +} + +// * +// Secret used to pass privileged information. It is designed to provide +// pass-by-value or pass-by-reference semantics, where the REFERENCE type can be +// used by custom modules which interact with a secure back-end. +type Secret struct { + Type Secret_Type `protobuf:"varint,1,opt,name=type,enum=mesos.Secret_Type" json:"type"` + // Only one of `reference` and `value` must be set. + Reference *Secret_Reference `protobuf:"bytes,2,opt,name=reference" json:"reference,omitempty"` + Value *Secret_Value `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` +} + +func (m *Secret) Reset() { *m = Secret{} } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{58} } + +func (m *Secret) GetType() Secret_Type { + if m != nil { + return m.Type + } + return Secret_UNKNOWN +} + +func (m *Secret) GetReference() *Secret_Reference { + if m != nil { + return m.Reference + } + return nil +} + +func (m *Secret) GetValue() *Secret_Value { + if m != nil { + return m.Value + } + return nil +} + +// Can be used by modules to refer to a secret stored in a secure back-end. +// The `key` field is provided to permit reference to a single value within a +// secret containing arbitrary key-value pairs. +// +// For example, given a back-end secret store with a secret named +// "my-secret" containing the following key-value pairs: +// +// { +// "username": "my-user", +// "password": "my-password +// } +// +// the username could be referred to in a `Secret` by specifying +// "my-secret" for the `name` and "username" for the `key`. +type Secret_Reference struct { + Name string `protobuf:"bytes,1,req,name=name" json:"name"` + Key *string `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` +} + +func (m *Secret_Reference) Reset() { *m = Secret_Reference{} } +func (*Secret_Reference) ProtoMessage() {} +func (*Secret_Reference) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{58, 0} } + +func (m *Secret_Reference) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Secret_Reference) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +// Used to pass the value of a secret. +type Secret_Value struct { + Data []byte `protobuf:"bytes,1,req,name=data" json:"data,omitempty"` +} + +func (m *Secret_Value) Reset() { *m = Secret_Value{} } +func (*Secret_Value) ProtoMessage() {} +func (*Secret_Value) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{58, 1} } + +func (m *Secret_Value) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// * +// Rate (queries per second, QPS) limit for messages from a framework to master. +// Strictly speaking they are the combined rate from all frameworks of the same +// principal. +type RateLimit struct { + // Leaving QPS unset gives it unlimited rate (i.e., not throttled), + // which also implies unlimited capacity. + QPS *float64 `protobuf:"fixed64,1,opt,name=qps" json:"qps,omitempty"` + // Principal of framework(s) to be throttled. Should match + // FrameworkInfo.principal and Credential.principal (if using authentication). + Principal string `protobuf:"bytes,2,req,name=principal" json:"principal"` + // Max number of outstanding messages from frameworks of this principal + // allowed by master before the next message is dropped and an error is sent + // back to the sender. Messages received before the capacity is reached are + // still going to be processed after the error is sent. + // If unspecified, this principal is assigned unlimited capacity. + // NOTE: This value is ignored if 'qps' is not set. + Capacity *uint64 `protobuf:"varint,3,opt,name=capacity" json:"capacity,omitempty"` +} + +func (m *RateLimit) Reset() { *m = RateLimit{} } +func (*RateLimit) ProtoMessage() {} +func (*RateLimit) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{59} } + +func (m *RateLimit) GetQPS() float64 { + if m != nil && m.QPS != nil { + return *m.QPS + } + return 0 +} + +func (m *RateLimit) GetPrincipal() string { + if m != nil { + return m.Principal + } + return "" +} + +func (m *RateLimit) GetCapacity() uint64 { + if m != nil && m.Capacity != nil { + return *m.Capacity + } + return 0 +} + +// * +// Collection of RateLimit. +// Frameworks without rate limits defined here are not throttled unless +// 'aggregate_default_qps' is specified. +type RateLimits struct { + // Items should have unique principals. + Limits []RateLimit `protobuf:"bytes,1,rep,name=limits" json:"limits"` + // All the frameworks not specified in 'limits' get this default rate. + // This rate is an aggregate rate for all of them, i.e., their combined + // traffic is throttled together at this rate. + AggregateDefaultQPS *float64 `protobuf:"fixed64,2,opt,name=aggregate_default_qps,json=aggregateDefaultQps" json:"aggregate_default_qps,omitempty"` + // All the frameworks not specified in 'limits' get this default capacity. + // This is an aggregate value similar to 'aggregate_default_qps'. + AggregateDefaultCapacity *uint64 `protobuf:"varint,3,opt,name=aggregate_default_capacity,json=aggregateDefaultCapacity" json:"aggregate_default_capacity,omitempty"` +} + +func (m *RateLimits) Reset() { *m = RateLimits{} } +func (*RateLimits) ProtoMessage() {} +func (*RateLimits) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{60} } + +func (m *RateLimits) GetLimits() []RateLimit { + if m != nil { + return m.Limits + } + return nil +} + +func (m *RateLimits) GetAggregateDefaultQPS() float64 { + if m != nil && m.AggregateDefaultQPS != nil { + return *m.AggregateDefaultQPS + } + return 0 +} + +func (m *RateLimits) GetAggregateDefaultCapacity() uint64 { + if m != nil && m.AggregateDefaultCapacity != nil { + return *m.AggregateDefaultCapacity + } + return 0 +} + +// * +// Describe an image used by tasks or executors. Note that it's only +// for tasks or executors launched by MesosContainerizer currently. +type Image struct { + Type *Image_Type `protobuf:"varint,1,req,name=type,enum=mesos.Image_Type" json:"type,omitempty"` + // Only one of the following image messages should be set to match + // the type. + Appc *Image_Appc `protobuf:"bytes,2,opt,name=appc" json:"appc,omitempty"` + Docker *Image_Docker `protobuf:"bytes,3,opt,name=docker" json:"docker,omitempty"` + // With this flag set to false, the mesos containerizer will pull + // the docker/appc image from the registry even if the image is + // already downloaded on the agent. + Cached *bool `protobuf:"varint,4,opt,name=cached,def=1" json:"cached,omitempty"` +} + +func (m *Image) Reset() { *m = Image{} } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{61} } + +const Default_Image_Cached bool = true + +func (m *Image) GetType() Image_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return Image_APPC +} + +func (m *Image) GetAppc() *Image_Appc { + if m != nil { + return m.Appc + } + return nil +} + +func (m *Image) GetDocker() *Image_Docker { + if m != nil { + return m.Docker + } + return nil +} + +func (m *Image) GetCached() bool { + if m != nil && m.Cached != nil { + return *m.Cached + } + return Default_Image_Cached +} + +// Protobuf for specifying an Appc container image. See: +// https://github.com/appc/spec/blob/master/spec/aci.md +type Image_Appc struct { + // The name of the image. + Name string `protobuf:"bytes,1,req,name=name" json:"name"` + // An image ID is a string of the format "hash-value", where + // "hash" is the hash algorithm used and "value" is the hex + // encoded string of the digest. Currently the only permitted + // hash algorithm is sha512. + ID *string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // Optional labels. Suggested labels: "version", "os", and "arch". + Labels *Labels `protobuf:"bytes,3,opt,name=labels" json:"labels,omitempty"` +} + +func (m *Image_Appc) Reset() { *m = Image_Appc{} } +func (*Image_Appc) ProtoMessage() {} +func (*Image_Appc) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{61, 0} } + +func (m *Image_Appc) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Image_Appc) GetID() string { + if m != nil && m.ID != nil { + return *m.ID + } + return "" +} + +func (m *Image_Appc) GetLabels() *Labels { + if m != nil { + return m.Labels + } + return nil +} + +type Image_Docker struct { + // The name of the image. Expected format: + // [REGISTRY_HOST[:REGISTRY_PORT]/]REPOSITORY[:TAG|@TYPE:DIGEST] + // + // See: https://docs.docker.com/reference/commandline/pull/ + Name string `protobuf:"bytes,1,req,name=name" json:"name"` + // Credential to authenticate with docker registry. + // NOTE: This is not encrypted, therefore framework and operators + // should enable SSL when passing this information. + // + // This field has never been used in Mesos before and is + // deprecated since Mesos 1.3. Please use `config` below + // (see MESOS-7088 for details). + Credential *Credential `protobuf:"bytes,2,opt,name=credential" json:"credential,omitempty"` + // Docker config containing credentials to authenticate with + // docker registry. The secret is expected to be a docker + // config file in JSON format with UTF-8 character encoding. + Config *Secret `protobuf:"bytes,3,opt,name=config" json:"config,omitempty"` +} + +func (m *Image_Docker) Reset() { *m = Image_Docker{} } +func (*Image_Docker) ProtoMessage() {} +func (*Image_Docker) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{61, 1} } + +func (m *Image_Docker) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Image_Docker) GetCredential() *Credential { + if m != nil { + return m.Credential + } + return nil +} + +func (m *Image_Docker) GetConfig() *Secret { + if m != nil { + return m.Config + } + return nil +} + +// * +// Describes how the mount will be propagated for a volume. See the +// following doc for more details about mount propagation: +// https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt +type MountPropagation struct { + Mode *MountPropagation_Mode `protobuf:"varint,1,opt,name=mode,enum=mesos.MountPropagation_Mode" json:"mode,omitempty"` +} + +func (m *MountPropagation) Reset() { *m = MountPropagation{} } +func (*MountPropagation) ProtoMessage() {} +func (*MountPropagation) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{62} } + +func (m *MountPropagation) GetMode() MountPropagation_Mode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return MountPropagation_UNKNOWN +} + +// * +// Describes a volume mapping either from host to container or vice +// versa. Both paths can either refer to a directory or a file. +type Volume struct { + // TODO(gyliu513): Make this as `optional` after deprecation cycle of 1.0. + Mode *Volume_Mode `protobuf:"varint,3,req,name=mode,enum=mesos.Volume_Mode" json:"mode,omitempty"` + // Path pointing to a directory or file in the container. If the + // path is a relative path, it is relative to the container work + // directory. If the path is an absolute path, that path must + // already exist. + ContainerPath string `protobuf:"bytes,1,req,name=container_path,json=containerPath" json:"container_path"` + // Absolute path pointing to a directory or file on the host or a + // path relative to the container work directory. + HostPath *string `protobuf:"bytes,2,opt,name=host_path,json=hostPath" json:"host_path,omitempty"` + // The source of the volume is an Image which describes a root + // filesystem which will be provisioned by Mesos. + Image *Image `protobuf:"bytes,4,opt,name=image" json:"image,omitempty"` + Source *Volume_Source `protobuf:"bytes,5,opt,name=source" json:"source,omitempty"` +} + +func (m *Volume) Reset() { *m = Volume{} } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{63} } + +func (m *Volume) GetMode() Volume_Mode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return RW +} + +func (m *Volume) GetContainerPath() string { + if m != nil { + return m.ContainerPath + } + return "" +} + +func (m *Volume) GetHostPath() string { + if m != nil && m.HostPath != nil { + return *m.HostPath + } + return "" +} + +func (m *Volume) GetImage() *Image { + if m != nil { + return m.Image + } + return nil +} + +func (m *Volume) GetSource() *Volume_Source { + if m != nil { + return m.Source + } + return nil +} + +// Describes where a volume originates from. +type Volume_Source struct { + // Enum fields should be optional, see: MESOS-4997. + Type Volume_Source_Type `protobuf:"varint,1,opt,name=type,enum=mesos.Volume_Source_Type" json:"type"` + // The source of the volume created by docker volume driver. + DockerVolume *Volume_Source_DockerVolume `protobuf:"bytes,2,opt,name=docker_volume,json=dockerVolume" json:"docker_volume,omitempty"` + HostPath *Volume_Source_HostPath `protobuf:"bytes,5,opt,name=host_path,json=hostPath" json:"host_path,omitempty"` + SandboxPath *Volume_Source_SandboxPath `protobuf:"bytes,3,opt,name=sandbox_path,json=sandboxPath" json:"sandbox_path,omitempty"` + // The volume/secret isolator uses the secret-fetcher module (third-party or + // internal) downloads the secret and makes it available at container_path. + Secret *Secret `protobuf:"bytes,4,opt,name=secret" json:"secret,omitempty"` +} + +func (m *Volume_Source) Reset() { *m = Volume_Source{} } +func (*Volume_Source) ProtoMessage() {} +func (*Volume_Source) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{63, 0} } + +func (m *Volume_Source) GetType() Volume_Source_Type { + if m != nil { + return m.Type + } + return Volume_Source_UNKNOWN +} + +func (m *Volume_Source) GetDockerVolume() *Volume_Source_DockerVolume { + if m != nil { + return m.DockerVolume + } + return nil +} + +func (m *Volume_Source) GetHostPath() *Volume_Source_HostPath { + if m != nil { + return m.HostPath + } + return nil +} + +func (m *Volume_Source) GetSandboxPath() *Volume_Source_SandboxPath { + if m != nil { + return m.SandboxPath + } + return nil +} + +func (m *Volume_Source) GetSecret() *Secret { + if m != nil { + return m.Secret + } + return nil +} + +type Volume_Source_DockerVolume struct { + // Driver of the volume, it can be flocker, convoy, raxrey etc. + Driver *string `protobuf:"bytes,1,opt,name=driver" json:"driver,omitempty"` + // Name of the volume. + Name string `protobuf:"bytes,2,req,name=name" json:"name"` + // Volume driver specific options. + DriverOptions *Parameters `protobuf:"bytes,3,opt,name=driver_options,json=driverOptions" json:"driver_options,omitempty"` +} + +func (m *Volume_Source_DockerVolume) Reset() { *m = Volume_Source_DockerVolume{} } +func (*Volume_Source_DockerVolume) ProtoMessage() {} +func (*Volume_Source_DockerVolume) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{63, 0, 0} +} + +func (m *Volume_Source_DockerVolume) GetDriver() string { + if m != nil && m.Driver != nil { + return *m.Driver + } + return "" +} + +func (m *Volume_Source_DockerVolume) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Volume_Source_DockerVolume) GetDriverOptions() *Parameters { + if m != nil { + return m.DriverOptions + } + return nil +} + +// Absolute path pointing to a directory or file on the host. +type Volume_Source_HostPath struct { + Path string `protobuf:"bytes,1,req,name=path" json:"path"` + MountPropagation *MountPropagation `protobuf:"bytes,2,opt,name=mount_propagation,json=mountPropagation" json:"mount_propagation,omitempty"` +} + +func (m *Volume_Source_HostPath) Reset() { *m = Volume_Source_HostPath{} } +func (*Volume_Source_HostPath) ProtoMessage() {} +func (*Volume_Source_HostPath) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{63, 0, 1} +} + +func (m *Volume_Source_HostPath) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *Volume_Source_HostPath) GetMountPropagation() *MountPropagation { + if m != nil { + return m.MountPropagation + } + return nil +} + +// Describe a path from a container's sandbox. The container can +// be the current container (SELF), or its parent container +// (PARENT). PARENT allows all child containers to share a volume +// from their parent container's sandbox. It'll be an error if +// the current container is a top level container. +type Volume_Source_SandboxPath struct { + Type Volume_Source_SandboxPath_Type `protobuf:"varint,1,opt,name=type,enum=mesos.Volume_Source_SandboxPath_Type" json:"type"` + // A path relative to the corresponding container's sandbox. + // Note that upwards traversal (i.e. ../../abc) is not allowed. + Path string `protobuf:"bytes,2,req,name=path" json:"path"` +} + +func (m *Volume_Source_SandboxPath) Reset() { *m = Volume_Source_SandboxPath{} } +func (*Volume_Source_SandboxPath) ProtoMessage() {} +func (*Volume_Source_SandboxPath) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{63, 0, 2} +} + +func (m *Volume_Source_SandboxPath) GetType() Volume_Source_SandboxPath_Type { + if m != nil { + return m.Type + } + return Volume_Source_SandboxPath_UNKNOWN +} + +func (m *Volume_Source_SandboxPath) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +// * +// Describes a network request from a framework as well as network resolution +// provided by Mesos. +// +// A framework may request the network isolator on the Agent to isolate the +// container in a network namespace and create a virtual network interface. +// The `NetworkInfo` message describes the properties of that virtual +// interface, including the IP addresses and network isolation policy +// (network group membership). +// +// The NetworkInfo message is not interpreted by the Master or Agent and is +// intended to be used by Agent and Master modules implementing network +// isolation. If the modules are missing, the message is simply ignored. In +// future, the task launch will fail if there is no module providing the +// network isolation capabilities (MESOS-3390). +// +// An executor, Agent, or an Agent module may append NetworkInfos inside +// TaskStatus::container_status to provide information such as the container IP +// address and isolation groups. +type NetworkInfo struct { + // When included in a ContainerInfo, each of these represent a + // request for an IP address. Each request can specify an explicit address + // or the IP protocol to use. + // + // When included in a TaskStatus message, these inform the framework + // scheduler about the IP addresses that are bound to the container + // interface. When there are no custom network isolator modules installed, + // this field is filled in automatically with the Agent IP address. + IPAddresses []NetworkInfo_IPAddress `protobuf:"bytes,5,rep,name=ip_addresses,json=ipAddresses" json:"ip_addresses"` + // Name of the network which will be used by network isolator to determine + // the network that the container joins. It's up to the network isolator + // to decide how to interpret this field. + Name *string `protobuf:"bytes,6,opt,name=name" json:"name,omitempty"` + // A group is the name given to a set of logically-related interfaces that + // are allowed to communicate among themselves. Network traffic is allowed + // between two container interfaces that share at least one network group. + // For example, one might want to create separate groups for isolating dev, + // testing, qa and prod deployment environments. + Groups []string `protobuf:"bytes,3,rep,name=groups" json:"groups,omitempty"` + // To tag certain metadata to be used by Isolator/IPAM, e.g., rack, etc. + Labels *Labels `protobuf:"bytes,4,opt,name=labels" json:"labels,omitempty"` + PortMappings []NetworkInfo_PortMapping `protobuf:"bytes,7,rep,name=port_mappings,json=portMappings" json:"port_mappings"` +} + +func (m *NetworkInfo) Reset() { *m = NetworkInfo{} } +func (*NetworkInfo) ProtoMessage() {} +func (*NetworkInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{64} } + +func (m *NetworkInfo) GetIPAddresses() []NetworkInfo_IPAddress { + if m != nil { + return m.IPAddresses + } + return nil +} + +func (m *NetworkInfo) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NetworkInfo) GetGroups() []string { + if m != nil { + return m.Groups + } + return nil +} + +func (m *NetworkInfo) GetLabels() *Labels { + if m != nil { + return m.Labels + } + return nil +} + +func (m *NetworkInfo) GetPortMappings() []NetworkInfo_PortMapping { + if m != nil { + return m.PortMappings + } + return nil +} + +// Specifies a request for an IP address, or reports the assigned container +// IP address. +// +// Users can request an automatically assigned IP (for example, via an +// IPAM service) or a specific IP by adding a NetworkInfo to the +// ContainerInfo for a task. On a request, specifying neither `protocol` +// nor `ip_address` means that any available address may be assigned. +type NetworkInfo_IPAddress struct { + // Specify IP address requirement. Set protocol to the desired value to + // request the network isolator on the Agent to assign an IP address to the + // container being launched. If a specific IP address is specified in + // ip_address, this field should not be set. + Protocol *NetworkInfo_Protocol `protobuf:"varint,1,opt,name=protocol,enum=mesos.NetworkInfo_Protocol,def=1" json:"protocol,omitempty"` + // Statically assigned IP provided by the Framework. This IP will be + // assigned to the container by the network isolator module on the Agent. + // This field should not be used with the protocol field above. + // + // If an explicit address is requested but is unavailable, the network + // isolator should fail the task. + IPAddress *string `protobuf:"bytes,2,opt,name=ip_address,json=ipAddress" json:"ip_address,omitempty"` +} + +func (m *NetworkInfo_IPAddress) Reset() { *m = NetworkInfo_IPAddress{} } +func (*NetworkInfo_IPAddress) ProtoMessage() {} +func (*NetworkInfo_IPAddress) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{64, 0} } + +const Default_NetworkInfo_IPAddress_Protocol NetworkInfo_Protocol = IPv4 + +func (m *NetworkInfo_IPAddress) GetProtocol() NetworkInfo_Protocol { + if m != nil && m.Protocol != nil { + return *m.Protocol + } + return Default_NetworkInfo_IPAddress_Protocol +} + +func (m *NetworkInfo_IPAddress) GetIPAddress() string { + if m != nil && m.IPAddress != nil { + return *m.IPAddress + } + return "" +} + +// Specifies a port mapping request for the task on this network. +type NetworkInfo_PortMapping struct { + HostPort uint32 `protobuf:"varint,1,req,name=host_port,json=hostPort" json:"host_port"` + ContainerPort uint32 `protobuf:"varint,2,req,name=container_port,json=containerPort" json:"container_port"` + // Protocol to expose as (ie: tcp, udp). + Protocol *string `protobuf:"bytes,3,opt,name=protocol" json:"protocol,omitempty"` +} + +func (m *NetworkInfo_PortMapping) Reset() { *m = NetworkInfo_PortMapping{} } +func (*NetworkInfo_PortMapping) ProtoMessage() {} +func (*NetworkInfo_PortMapping) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{64, 1} } + +func (m *NetworkInfo_PortMapping) GetHostPort() uint32 { + if m != nil { + return m.HostPort + } + return 0 +} + +func (m *NetworkInfo_PortMapping) GetContainerPort() uint32 { + if m != nil { + return m.ContainerPort + } + return 0 +} + +func (m *NetworkInfo_PortMapping) GetProtocol() string { + if m != nil && m.Protocol != nil { + return *m.Protocol + } + return "" +} + +// * +// Encapsulation of `Capabilities` supported by Linux. +// Reference: http://linux.die.net/man/7/capabilities. +type CapabilityInfo struct { + Capabilities []CapabilityInfo_Capability `protobuf:"varint,1,rep,name=capabilities,enum=mesos.CapabilityInfo_Capability" json:"capabilities,omitempty"` +} + +func (m *CapabilityInfo) Reset() { *m = CapabilityInfo{} } +func (*CapabilityInfo) ProtoMessage() {} +func (*CapabilityInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{65} } + +func (m *CapabilityInfo) GetCapabilities() []CapabilityInfo_Capability { + if m != nil { + return m.Capabilities + } + return nil +} + +// * +// Encapsulation for Linux specific configuration. +// E.g, capabilities, limits etc. +type LinuxInfo struct { + // Since 1.4.0, deprecated in favor of `effective_capabilities`. + CapabilityInfo *CapabilityInfo `protobuf:"bytes,1,opt,name=capability_info,json=capabilityInfo" json:"capability_info,omitempty"` + // The set of capabilities that are allowed but not initially + // granted to tasks. + BoundingCapabilities *CapabilityInfo `protobuf:"bytes,2,opt,name=bounding_capabilities,json=boundingCapabilities" json:"bounding_capabilities,omitempty"` + // Represents the set of capabilities that the task will + // be executed with. + EffectiveCapabilities *CapabilityInfo `protobuf:"bytes,3,opt,name=effective_capabilities,json=effectiveCapabilities" json:"effective_capabilities,omitempty"` + // If set as 'true', the container shares the pid namespace with + // its parent. If the container is a top level container, it will + // share the pid namespace with the agent. If the container is a + // nested container, it will share the pid namespace with its + // parent container. This field will be ignored if 'namespaces/pid' + // isolator is not enabled. + SharePIDNamespace *bool `protobuf:"varint,4,opt,name=share_pid_namespace,json=sharePidNamespace" json:"share_pid_namespace,omitempty"` +} + +func (m *LinuxInfo) Reset() { *m = LinuxInfo{} } +func (*LinuxInfo) ProtoMessage() {} +func (*LinuxInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{66} } + +func (m *LinuxInfo) GetCapabilityInfo() *CapabilityInfo { + if m != nil { + return m.CapabilityInfo + } + return nil +} + +func (m *LinuxInfo) GetBoundingCapabilities() *CapabilityInfo { + if m != nil { + return m.BoundingCapabilities + } + return nil +} + +func (m *LinuxInfo) GetEffectiveCapabilities() *CapabilityInfo { + if m != nil { + return m.EffectiveCapabilities + } + return nil +} + +func (m *LinuxInfo) GetSharePIDNamespace() bool { + if m != nil && m.SharePIDNamespace != nil { + return *m.SharePIDNamespace + } + return false +} + +// * +// Encapsulation for POSIX rlimits, see +// http://pubs.opengroup.org/onlinepubs/009695399/functions/getrlimit.html. +// Note that some types might only be defined for Linux. +// We use a custom prefix to avoid conflict with existing system macros +// (e.g., `RLIMIT_CPU` or `NOFILE`). +type RLimitInfo struct { + Rlimits []RLimitInfo_RLimit `protobuf:"bytes,1,rep,name=rlimits" json:"rlimits"` +} + +func (m *RLimitInfo) Reset() { *m = RLimitInfo{} } +func (*RLimitInfo) ProtoMessage() {} +func (*RLimitInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{67} } + +func (m *RLimitInfo) GetRlimits() []RLimitInfo_RLimit { + if m != nil { + return m.Rlimits + } + return nil +} + +type RLimitInfo_RLimit struct { + Type RLimitInfo_RLimit_Type `protobuf:"varint,1,opt,name=type,enum=mesos.RLimitInfo_RLimit_Type" json:"type"` + // Either both are set or both are not set. + // If both are not set, it represents unlimited. + // If both are set, we require `soft` <= `hard`. + Hard *uint64 `protobuf:"varint,2,opt,name=hard" json:"hard,omitempty"` + Soft *uint64 `protobuf:"varint,3,opt,name=soft" json:"soft,omitempty"` +} + +func (m *RLimitInfo_RLimit) Reset() { *m = RLimitInfo_RLimit{} } +func (*RLimitInfo_RLimit) ProtoMessage() {} +func (*RLimitInfo_RLimit) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{67, 0} } + +func (m *RLimitInfo_RLimit) GetType() RLimitInfo_RLimit_Type { + if m != nil { + return m.Type + } + return RLimitInfo_RLimit_UNKNOWN +} + +func (m *RLimitInfo_RLimit) GetHard() uint64 { + if m != nil && m.Hard != nil { + return *m.Hard + } + return 0 +} + +func (m *RLimitInfo_RLimit) GetSoft() uint64 { + if m != nil && m.Soft != nil { + return *m.Soft + } + return 0 +} + +// * +// Describes the information about (pseudo) TTY that can +// be attached to a process running in a container. +type TTYInfo struct { + WindowSize *TTYInfo_WindowSize `protobuf:"bytes,1,opt,name=window_size,json=windowSize" json:"window_size,omitempty"` +} + +func (m *TTYInfo) Reset() { *m = TTYInfo{} } +func (*TTYInfo) ProtoMessage() {} +func (*TTYInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{68} } + +func (m *TTYInfo) GetWindowSize() *TTYInfo_WindowSize { + if m != nil { + return m.WindowSize + } + return nil +} + +type TTYInfo_WindowSize struct { + Rows uint32 `protobuf:"varint,1,req,name=rows" json:"rows"` + Columns uint32 `protobuf:"varint,2,req,name=columns" json:"columns"` +} + +func (m *TTYInfo_WindowSize) Reset() { *m = TTYInfo_WindowSize{} } +func (*TTYInfo_WindowSize) ProtoMessage() {} +func (*TTYInfo_WindowSize) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{68, 0} } + +func (m *TTYInfo_WindowSize) GetRows() uint32 { + if m != nil { + return m.Rows + } + return 0 +} + +func (m *TTYInfo_WindowSize) GetColumns() uint32 { + if m != nil { + return m.Columns + } + return 0 +} + +// * +// Describes a container configuration and allows extensible +// configurations for different container implementations. +// +// NOTE: `ContainerInfo` may be specified, e.g., by a task, even if no +// container image is provided. In this case neither `MesosInfo` nor +// `DockerInfo` is set, the required `type` must be `MESOS`. This is to +// address a case when a task without an image, e.g., a shell script +// with URIs, wants to use features originally designed for containers, +// for example custom network isolation via `NetworkInfo`. +type ContainerInfo struct { + Type *ContainerInfo_Type `protobuf:"varint,1,req,name=type,enum=mesos.ContainerInfo_Type" json:"type,omitempty"` + Volumes []Volume `protobuf:"bytes,2,rep,name=volumes" json:"volumes"` + Hostname *string `protobuf:"bytes,4,opt,name=hostname" json:"hostname,omitempty"` + // Only one of the following *Info messages should be set to match + // the type. + Docker *ContainerInfo_DockerInfo `protobuf:"bytes,3,opt,name=docker" json:"docker,omitempty"` + Mesos *ContainerInfo_MesosInfo `protobuf:"bytes,5,opt,name=mesos" json:"mesos,omitempty"` + // A list of network requests. A framework can request multiple IP addresses + // for the container. + NetworkInfos []NetworkInfo `protobuf:"bytes,7,rep,name=network_infos,json=networkInfos" json:"network_infos"` + // Linux specific information for the container. + LinuxInfo *LinuxInfo `protobuf:"bytes,8,opt,name=linux_info,json=linuxInfo" json:"linux_info,omitempty"` + // (POSIX only) rlimits of the container. + RlimitInfo *RLimitInfo `protobuf:"bytes,9,opt,name=rlimit_info,json=rlimitInfo" json:"rlimit_info,omitempty"` + // If specified a tty will be attached to the container entrypoint. + TTYInfo *TTYInfo `protobuf:"bytes,10,opt,name=tty_info,json=ttyInfo" json:"tty_info,omitempty"` +} + +func (m *ContainerInfo) Reset() { *m = ContainerInfo{} } +func (*ContainerInfo) ProtoMessage() {} +func (*ContainerInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{69} } + +func (m *ContainerInfo) GetType() ContainerInfo_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return ContainerInfo_DOCKER +} + +func (m *ContainerInfo) GetVolumes() []Volume { + if m != nil { + return m.Volumes + } + return nil +} + +func (m *ContainerInfo) GetHostname() string { + if m != nil && m.Hostname != nil { + return *m.Hostname + } + return "" +} + +func (m *ContainerInfo) GetDocker() *ContainerInfo_DockerInfo { + if m != nil { + return m.Docker + } + return nil +} + +func (m *ContainerInfo) GetMesos() *ContainerInfo_MesosInfo { + if m != nil { + return m.Mesos + } + return nil +} + +func (m *ContainerInfo) GetNetworkInfos() []NetworkInfo { + if m != nil { + return m.NetworkInfos + } + return nil +} + +func (m *ContainerInfo) GetLinuxInfo() *LinuxInfo { + if m != nil { + return m.LinuxInfo + } + return nil +} + +func (m *ContainerInfo) GetRlimitInfo() *RLimitInfo { + if m != nil { + return m.RlimitInfo + } + return nil +} + +func (m *ContainerInfo) GetTTYInfo() *TTYInfo { + if m != nil { + return m.TTYInfo + } + return nil +} + +type ContainerInfo_DockerInfo struct { + // The docker image that is going to be passed to the registry. + Image string `protobuf:"bytes,1,req,name=image" json:"image"` + Network *ContainerInfo_DockerInfo_Network `protobuf:"varint,2,opt,name=network,enum=mesos.ContainerInfo_DockerInfo_Network,def=1" json:"network,omitempty"` + PortMappings []ContainerInfo_DockerInfo_PortMapping `protobuf:"bytes,3,rep,name=port_mappings,json=portMappings" json:"port_mappings"` + Privileged *bool `protobuf:"varint,4,opt,name=privileged,def=0" json:"privileged,omitempty"` + // Allowing arbitrary parameters to be passed to docker CLI. + // Note that anything passed to this field is not guaranteed + // to be supported moving forward, as we might move away from + // the docker CLI. + Parameters []Parameter `protobuf:"bytes,5,rep,name=parameters" json:"parameters"` + // With this flag set to true, the docker containerizer will + // pull the docker image from the registry even if the image + // is already downloaded on the agent. + ForcePullImage *bool `protobuf:"varint,6,opt,name=force_pull_image,json=forcePullImage" json:"force_pull_image,omitempty"` + // The name of volume driver plugin. + VolumeDriver *string `protobuf:"bytes,7,opt,name=volume_driver,json=volumeDriver" json:"volume_driver,omitempty"` +} + +func (m *ContainerInfo_DockerInfo) Reset() { *m = ContainerInfo_DockerInfo{} } +func (*ContainerInfo_DockerInfo) ProtoMessage() {} +func (*ContainerInfo_DockerInfo) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{69, 0} +} + +const Default_ContainerInfo_DockerInfo_Network ContainerInfo_DockerInfo_Network = ContainerInfo_DockerInfo_HOST +const Default_ContainerInfo_DockerInfo_Privileged bool = false + +func (m *ContainerInfo_DockerInfo) GetImage() string { + if m != nil { + return m.Image + } + return "" +} + +func (m *ContainerInfo_DockerInfo) GetNetwork() ContainerInfo_DockerInfo_Network { + if m != nil && m.Network != nil { + return *m.Network + } + return Default_ContainerInfo_DockerInfo_Network +} + +func (m *ContainerInfo_DockerInfo) GetPortMappings() []ContainerInfo_DockerInfo_PortMapping { + if m != nil { + return m.PortMappings + } + return nil +} + +func (m *ContainerInfo_DockerInfo) GetPrivileged() bool { + if m != nil && m.Privileged != nil { + return *m.Privileged + } + return Default_ContainerInfo_DockerInfo_Privileged +} + +func (m *ContainerInfo_DockerInfo) GetParameters() []Parameter { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *ContainerInfo_DockerInfo) GetForcePullImage() bool { + if m != nil && m.ForcePullImage != nil { + return *m.ForcePullImage + } + return false +} + +func (m *ContainerInfo_DockerInfo) GetVolumeDriver() string { + if m != nil && m.VolumeDriver != nil { + return *m.VolumeDriver + } + return "" +} + +type ContainerInfo_DockerInfo_PortMapping struct { + HostPort uint32 `protobuf:"varint,1,req,name=host_port,json=hostPort" json:"host_port"` + ContainerPort uint32 `protobuf:"varint,2,req,name=container_port,json=containerPort" json:"container_port"` + // Protocol to expose as (ie: tcp, udp). + Protocol *string `protobuf:"bytes,3,opt,name=protocol" json:"protocol,omitempty"` +} + +func (m *ContainerInfo_DockerInfo_PortMapping) Reset() { *m = ContainerInfo_DockerInfo_PortMapping{} } +func (*ContainerInfo_DockerInfo_PortMapping) ProtoMessage() {} +func (*ContainerInfo_DockerInfo_PortMapping) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{69, 0, 0} +} + +func (m *ContainerInfo_DockerInfo_PortMapping) GetHostPort() uint32 { + if m != nil { + return m.HostPort + } + return 0 +} + +func (m *ContainerInfo_DockerInfo_PortMapping) GetContainerPort() uint32 { + if m != nil { + return m.ContainerPort + } + return 0 +} + +func (m *ContainerInfo_DockerInfo_PortMapping) GetProtocol() string { + if m != nil && m.Protocol != nil { + return *m.Protocol + } + return "" +} + +type ContainerInfo_MesosInfo struct { + Image *Image `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"` +} + +func (m *ContainerInfo_MesosInfo) Reset() { *m = ContainerInfo_MesosInfo{} } +func (*ContainerInfo_MesosInfo) ProtoMessage() {} +func (*ContainerInfo_MesosInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{69, 1} } + +func (m *ContainerInfo_MesosInfo) GetImage() *Image { + if m != nil { + return m.Image + } + return nil +} + +// * +// Container related information that is resolved during container +// setup. The information is sent back to the framework as part of the +// TaskStatus message. +type ContainerStatus struct { + ContainerID *ContainerID `protobuf:"bytes,4,opt,name=container_id,json=containerId" json:"container_id,omitempty"` + // This field can be reliably used to identify the container IP address. + NetworkInfos []NetworkInfo `protobuf:"bytes,1,rep,name=network_infos,json=networkInfos" json:"network_infos"` + // Information about Linux control group (cgroup). + CgroupInfo *CgroupInfo `protobuf:"bytes,2,opt,name=cgroup_info,json=cgroupInfo" json:"cgroup_info,omitempty"` + // Information about Executor PID. + ExecutorPID *uint32 `protobuf:"varint,3,opt,name=executor_pid,json=executorPid" json:"executor_pid,omitempty"` +} + +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{70} } + +func (m *ContainerStatus) GetContainerID() *ContainerID { + if m != nil { + return m.ContainerID + } + return nil +} + +func (m *ContainerStatus) GetNetworkInfos() []NetworkInfo { + if m != nil { + return m.NetworkInfos + } + return nil +} + +func (m *ContainerStatus) GetCgroupInfo() *CgroupInfo { + if m != nil { + return m.CgroupInfo + } + return nil +} + +func (m *ContainerStatus) GetExecutorPID() uint32 { + if m != nil && m.ExecutorPID != nil { + return *m.ExecutorPID + } + return 0 +} + +// * +// Linux control group (cgroup) information. +type CgroupInfo struct { + NetCLS *CgroupInfo_NetCls `protobuf:"bytes,1,opt,name=net_cls,json=netCls" json:"net_cls,omitempty"` +} + +func (m *CgroupInfo) Reset() { *m = CgroupInfo{} } +func (*CgroupInfo) ProtoMessage() {} +func (*CgroupInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{71} } + +func (m *CgroupInfo) GetNetCLS() *CgroupInfo_NetCls { + if m != nil { + return m.NetCLS + } + return nil +} + +// Configuration of a blkio cgroup subsystem. +type CgroupInfo_Blkio struct { +} + +func (m *CgroupInfo_Blkio) Reset() { *m = CgroupInfo_Blkio{} } +func (*CgroupInfo_Blkio) ProtoMessage() {} +func (*CgroupInfo_Blkio) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{71, 0} } + +// Describes a stat value without the device descriptor part. +type CgroupInfo_Blkio_Value struct { + Op *CgroupInfo_Blkio_Operation `protobuf:"varint,1,opt,name=op,enum=mesos.CgroupInfo_Blkio_Operation" json:"op,omitempty"` + Value *uint64 `protobuf:"varint,2,opt,name=value" json:"value,omitempty"` +} + +func (m *CgroupInfo_Blkio_Value) Reset() { *m = CgroupInfo_Blkio_Value{} } +func (*CgroupInfo_Blkio_Value) ProtoMessage() {} +func (*CgroupInfo_Blkio_Value) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{71, 0, 0} +} + +func (m *CgroupInfo_Blkio_Value) GetOp() CgroupInfo_Blkio_Operation { + if m != nil && m.Op != nil { + return *m.Op + } + return CgroupInfo_Blkio_UNKNOWN +} + +func (m *CgroupInfo_Blkio_Value) GetValue() uint64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type CgroupInfo_Blkio_CFQ struct { +} + +func (m *CgroupInfo_Blkio_CFQ) Reset() { *m = CgroupInfo_Blkio_CFQ{} } +func (*CgroupInfo_Blkio_CFQ) ProtoMessage() {} +func (*CgroupInfo_Blkio_CFQ) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{71, 0, 1} } + +type CgroupInfo_Blkio_CFQ_Statistics struct { + // Stats are grouped by block devices. If `device` is not + // set, it represents `Total`. + Device *Device_Number `protobuf:"bytes,1,opt,name=device" json:"device,omitempty"` + // blkio.sectors + Sectors *uint64 `protobuf:"varint,2,opt,name=sectors" json:"sectors,omitempty"` + // blkio.time + Time *uint64 `protobuf:"varint,3,opt,name=time" json:"time,omitempty"` + // blkio.io_serviced + IOServiced []CgroupInfo_Blkio_Value `protobuf:"bytes,4,rep,name=io_serviced,json=ioServiced" json:"io_serviced"` + // blkio.io_service_bytes + IOServiceBytes []CgroupInfo_Blkio_Value `protobuf:"bytes,5,rep,name=io_service_bytes,json=ioServiceBytes" json:"io_service_bytes"` + // blkio.io_service_time + IOServiceTime []CgroupInfo_Blkio_Value `protobuf:"bytes,6,rep,name=io_service_time,json=ioServiceTime" json:"io_service_time"` + // blkio.io_wait_time + IOWaitTime []CgroupInfo_Blkio_Value `protobuf:"bytes,7,rep,name=io_wait_time,json=ioWaitTime" json:"io_wait_time"` + // blkio.io_merged + IOMerged []CgroupInfo_Blkio_Value `protobuf:"bytes,8,rep,name=io_merged,json=ioMerged" json:"io_merged"` + // blkio.io_queued + IOQueued []CgroupInfo_Blkio_Value `protobuf:"bytes,9,rep,name=io_queued,json=ioQueued" json:"io_queued"` +} + +func (m *CgroupInfo_Blkio_CFQ_Statistics) Reset() { *m = CgroupInfo_Blkio_CFQ_Statistics{} } +func (*CgroupInfo_Blkio_CFQ_Statistics) ProtoMessage() {} +func (*CgroupInfo_Blkio_CFQ_Statistics) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{71, 0, 1, 0} +} + +func (m *CgroupInfo_Blkio_CFQ_Statistics) GetDevice() *Device_Number { + if m != nil { + return m.Device + } + return nil +} + +func (m *CgroupInfo_Blkio_CFQ_Statistics) GetSectors() uint64 { + if m != nil && m.Sectors != nil { + return *m.Sectors + } + return 0 +} + +func (m *CgroupInfo_Blkio_CFQ_Statistics) GetTime() uint64 { + if m != nil && m.Time != nil { + return *m.Time + } + return 0 +} + +func (m *CgroupInfo_Blkio_CFQ_Statistics) GetIOServiced() []CgroupInfo_Blkio_Value { + if m != nil { + return m.IOServiced + } + return nil +} + +func (m *CgroupInfo_Blkio_CFQ_Statistics) GetIOServiceBytes() []CgroupInfo_Blkio_Value { + if m != nil { + return m.IOServiceBytes + } + return nil +} + +func (m *CgroupInfo_Blkio_CFQ_Statistics) GetIOServiceTime() []CgroupInfo_Blkio_Value { + if m != nil { + return m.IOServiceTime + } + return nil +} + +func (m *CgroupInfo_Blkio_CFQ_Statistics) GetIOWaitTime() []CgroupInfo_Blkio_Value { + if m != nil { + return m.IOWaitTime + } + return nil +} + +func (m *CgroupInfo_Blkio_CFQ_Statistics) GetIOMerged() []CgroupInfo_Blkio_Value { + if m != nil { + return m.IOMerged + } + return nil +} + +func (m *CgroupInfo_Blkio_CFQ_Statistics) GetIOQueued() []CgroupInfo_Blkio_Value { + if m != nil { + return m.IOQueued + } + return nil +} + +type CgroupInfo_Blkio_Throttling struct { +} + +func (m *CgroupInfo_Blkio_Throttling) Reset() { *m = CgroupInfo_Blkio_Throttling{} } +func (*CgroupInfo_Blkio_Throttling) ProtoMessage() {} +func (*CgroupInfo_Blkio_Throttling) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{71, 0, 2} +} + +type CgroupInfo_Blkio_Throttling_Statistics struct { + // Stats are grouped by block devices. If `device` is not + // set, it represents `Total`. + Device *Device_Number `protobuf:"bytes,1,opt,name=device" json:"device,omitempty"` + // blkio.throttle.io_serviced + IOServiced []CgroupInfo_Blkio_Value `protobuf:"bytes,2,rep,name=io_serviced,json=ioServiced" json:"io_serviced"` + // blkio.throttle.io_service_bytes + IOServiceBytes []CgroupInfo_Blkio_Value `protobuf:"bytes,3,rep,name=io_service_bytes,json=ioServiceBytes" json:"io_service_bytes"` +} + +func (m *CgroupInfo_Blkio_Throttling_Statistics) Reset() { + *m = CgroupInfo_Blkio_Throttling_Statistics{} +} +func (*CgroupInfo_Blkio_Throttling_Statistics) ProtoMessage() {} +func (*CgroupInfo_Blkio_Throttling_Statistics) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{71, 0, 2, 0} +} + +func (m *CgroupInfo_Blkio_Throttling_Statistics) GetDevice() *Device_Number { + if m != nil { + return m.Device + } + return nil +} + +func (m *CgroupInfo_Blkio_Throttling_Statistics) GetIOServiced() []CgroupInfo_Blkio_Value { + if m != nil { + return m.IOServiced + } + return nil +} + +func (m *CgroupInfo_Blkio_Throttling_Statistics) GetIOServiceBytes() []CgroupInfo_Blkio_Value { + if m != nil { + return m.IOServiceBytes + } + return nil +} + +type CgroupInfo_Blkio_Statistics struct { + CFQ []CgroupInfo_Blkio_CFQ_Statistics `protobuf:"bytes,1,rep,name=cfq" json:"cfq"` + CFQRecursive []CgroupInfo_Blkio_CFQ_Statistics `protobuf:"bytes,2,rep,name=cfq_recursive,json=cfqRecursive" json:"cfq_recursive"` + Throttling []*CgroupInfo_Blkio_Throttling_Statistics `protobuf:"bytes,3,rep,name=throttling" json:"throttling,omitempty"` +} + +func (m *CgroupInfo_Blkio_Statistics) Reset() { *m = CgroupInfo_Blkio_Statistics{} } +func (*CgroupInfo_Blkio_Statistics) ProtoMessage() {} +func (*CgroupInfo_Blkio_Statistics) Descriptor() ([]byte, []int) { + return fileDescriptorMesos, []int{71, 0, 3} +} + +func (m *CgroupInfo_Blkio_Statistics) GetCFQ() []CgroupInfo_Blkio_CFQ_Statistics { + if m != nil { + return m.CFQ + } + return nil +} + +func (m *CgroupInfo_Blkio_Statistics) GetCFQRecursive() []CgroupInfo_Blkio_CFQ_Statistics { + if m != nil { + return m.CFQRecursive + } + return nil +} + +func (m *CgroupInfo_Blkio_Statistics) GetThrottling() []*CgroupInfo_Blkio_Throttling_Statistics { + if m != nil { + return m.Throttling + } + return nil +} + +// Configuration of a net_cls cgroup subsystem. +type CgroupInfo_NetCls struct { + // The 32-bit classid consists of two parts, a 16 bit major handle + // and a 16-bit minor handle. The major and minor handle are + // represented using the format 0xAAAABBBB, where 0xAAAA is the + // 16-bit major handle and 0xBBBB is the 16-bit minor handle. + ClassID *uint32 `protobuf:"varint,1,opt,name=classid" json:"classid,omitempty"` +} + +func (m *CgroupInfo_NetCls) Reset() { *m = CgroupInfo_NetCls{} } +func (*CgroupInfo_NetCls) ProtoMessage() {} +func (*CgroupInfo_NetCls) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{71, 1} } + +func (m *CgroupInfo_NetCls) GetClassID() uint32 { + if m != nil && m.ClassID != nil { + return *m.ClassID + } + return 0 +} + +// * +// Collection of labels. Labels should not contain duplicate key-value +// pairs. +type Labels struct { + Labels []Label `protobuf:"bytes,1,rep,name=labels" json:"labels"` +} + +func (m *Labels) Reset() { *m = Labels{} } +func (*Labels) ProtoMessage() {} +func (*Labels) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{72} } + +func (m *Labels) GetLabels() []Label { + if m != nil { + return m.Labels + } + return nil +} + +// * +// Key, value pair used to store free form user-data. +type Label struct { + Key string `protobuf:"bytes,1,req,name=key" json:"key"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *Label) Reset() { *m = Label{} } +func (*Label) ProtoMessage() {} +func (*Label) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{73} } + +func (m *Label) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *Label) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +// * +// Named port used for service discovery. +type Port struct { + // Port number on which the framework exposes a service. + Number uint32 `protobuf:"varint,1,req,name=number" json:"number"` + // Name of the service hosted on this port. + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // Layer 4-7 protocol on which the framework exposes its services. + Protocol *string `protobuf:"bytes,3,opt,name=protocol" json:"protocol,omitempty"` + // This field restricts discovery within a framework (FRAMEWORK), + // within a Mesos cluster (CLUSTER), or places no restrictions (EXTERNAL). + // The visibility setting for a Port overrides the general visibility setting + // in the DiscoveryInfo. + Visibility *DiscoveryInfo_Visibility `protobuf:"varint,4,opt,name=visibility,enum=mesos.DiscoveryInfo_Visibility" json:"visibility,omitempty"` + // This can be used to decorate the message with metadata to be + // interpreted by external applications such as firewalls. + Labels *Labels `protobuf:"bytes,5,opt,name=labels" json:"labels,omitempty"` +} + +func (m *Port) Reset() { *m = Port{} } +func (*Port) ProtoMessage() {} +func (*Port) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{74} } + +func (m *Port) GetNumber() uint32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *Port) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Port) GetProtocol() string { + if m != nil && m.Protocol != nil { + return *m.Protocol + } + return "" +} + +func (m *Port) GetVisibility() DiscoveryInfo_Visibility { + if m != nil && m.Visibility != nil { + return *m.Visibility + } + return FRAMEWORK +} + +func (m *Port) GetLabels() *Labels { + if m != nil { + return m.Labels + } + return nil +} + +// * +// Collection of ports. +type Ports struct { + Ports []Port `protobuf:"bytes,1,rep,name=ports" json:"ports"` +} + +func (m *Ports) Reset() { *m = Ports{} } +func (*Ports) ProtoMessage() {} +func (*Ports) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{75} } + +func (m *Ports) GetPorts() []Port { + if m != nil { + return m.Ports + } + return nil +} + +// * +// Service discovery information. +// The visibility field restricts discovery within a framework (FRAMEWORK), +// within a Mesos cluster (CLUSTER), or places no restrictions (EXTERNAL). +// Each port in the ports field also has an optional visibility field. +// If visibility is specified for a port, it overrides the default service-wide +// DiscoveryInfo.visibility for that port. +// The environment, location, and version fields provide first class support for +// common attributes used to differentiate between similar services. The +// environment may receive values such as PROD/QA/DEV, the location field may +// receive values like EAST-US/WEST-US/EUROPE/AMEA, and the version field may +// receive values like v2.0/v0.9. The exact use of these fields is up to each +// service discovery system. +type DiscoveryInfo struct { + Visibility DiscoveryInfo_Visibility `protobuf:"varint,1,req,name=visibility,enum=mesos.DiscoveryInfo_Visibility" json:"visibility"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Environment *string `protobuf:"bytes,3,opt,name=environment" json:"environment,omitempty"` + Location *string `protobuf:"bytes,4,opt,name=location" json:"location,omitempty"` + Version *string `protobuf:"bytes,5,opt,name=version" json:"version,omitempty"` + Ports *Ports `protobuf:"bytes,6,opt,name=ports" json:"ports,omitempty"` + Labels *Labels `protobuf:"bytes,7,opt,name=labels" json:"labels,omitempty"` +} + +func (m *DiscoveryInfo) Reset() { *m = DiscoveryInfo{} } +func (*DiscoveryInfo) ProtoMessage() {} +func (*DiscoveryInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{76} } + +func (m *DiscoveryInfo) GetVisibility() DiscoveryInfo_Visibility { + if m != nil { + return m.Visibility + } + return FRAMEWORK +} + +func (m *DiscoveryInfo) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DiscoveryInfo) GetEnvironment() string { + if m != nil && m.Environment != nil { + return *m.Environment + } + return "" +} + +func (m *DiscoveryInfo) GetLocation() string { + if m != nil && m.Location != nil { + return *m.Location + } + return "" +} + +func (m *DiscoveryInfo) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *DiscoveryInfo) GetPorts() *Ports { + if m != nil { + return m.Ports + } + return nil +} + +func (m *DiscoveryInfo) GetLabels() *Labels { + if m != nil { + return m.Labels + } + return nil +} + +// * +// Named WeightInfo to indicate resource allocation +// priority between the different roles. +type WeightInfo struct { + Weight float64 `protobuf:"fixed64,1,req,name=weight" json:"weight"` + // Related role name. + Role *string `protobuf:"bytes,2,opt,name=role" json:"role,omitempty"` +} + +func (m *WeightInfo) Reset() { *m = WeightInfo{} } +func (*WeightInfo) ProtoMessage() {} +func (*WeightInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{77} } + +func (m *WeightInfo) GetWeight() float64 { + if m != nil { + return m.Weight + } + return 0 +} + +func (m *WeightInfo) GetRole() string { + if m != nil && m.Role != nil { + return *m.Role + } + return "" +} + +// * +// Version information of a component. +type VersionInfo struct { + Version string `protobuf:"bytes,1,req,name=version" json:"version"` + BuildDate *string `protobuf:"bytes,2,opt,name=build_date,json=buildDate" json:"build_date,omitempty"` + BuildTime *float64 `protobuf:"fixed64,3,opt,name=build_time,json=buildTime" json:"build_time,omitempty"` + BuildUser *string `protobuf:"bytes,4,opt,name=build_user,json=buildUser" json:"build_user,omitempty"` + GitSHA *string `protobuf:"bytes,5,opt,name=git_sha,json=gitSha" json:"git_sha,omitempty"` + GitBranch *string `protobuf:"bytes,6,opt,name=git_branch,json=gitBranch" json:"git_branch,omitempty"` + GitTag *string `protobuf:"bytes,7,opt,name=git_tag,json=gitTag" json:"git_tag,omitempty"` +} + +func (m *VersionInfo) Reset() { *m = VersionInfo{} } +func (*VersionInfo) ProtoMessage() {} +func (*VersionInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{78} } + +func (m *VersionInfo) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *VersionInfo) GetBuildDate() string { + if m != nil && m.BuildDate != nil { + return *m.BuildDate + } + return "" +} + +func (m *VersionInfo) GetBuildTime() float64 { + if m != nil && m.BuildTime != nil { + return *m.BuildTime + } + return 0 +} + +func (m *VersionInfo) GetBuildUser() string { + if m != nil && m.BuildUser != nil { + return *m.BuildUser + } + return "" +} + +func (m *VersionInfo) GetGitSHA() string { + if m != nil && m.GitSHA != nil { + return *m.GitSHA + } + return "" +} + +func (m *VersionInfo) GetGitBranch() string { + if m != nil && m.GitBranch != nil { + return *m.GitBranch + } + return "" +} + +func (m *VersionInfo) GetGitTag() string { + if m != nil && m.GitTag != nil { + return *m.GitTag + } + return "" +} + +// * +// Flag consists of a name and optionally its value. +type Flag struct { + Name string `protobuf:"bytes,1,req,name=name" json:"name"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *Flag) Reset() { *m = Flag{} } +func (*Flag) ProtoMessage() {} +func (*Flag) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{79} } + +func (m *Flag) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Flag) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +// * +// Describes a Role. Roles can be used to specify that certain resources are +// reserved for the use of one or more frameworks. +type Role struct { + Name string `protobuf:"bytes,1,req,name=name" json:"name"` + Weight float64 `protobuf:"fixed64,2,req,name=weight" json:"weight"` + Frameworks []FrameworkID `protobuf:"bytes,3,rep,name=frameworks" json:"frameworks"` + Resources []Resource `protobuf:"bytes,4,rep,name=resources" json:"resources"` +} + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{80} } + +func (m *Role) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Role) GetWeight() float64 { + if m != nil { + return m.Weight + } + return 0 +} + +func (m *Role) GetFrameworks() []FrameworkID { + if m != nil { + return m.Frameworks + } + return nil +} + +func (m *Role) GetResources() []Resource { + if m != nil { + return m.Resources + } + return nil +} + +// * +// Metric consists of a name and optionally its value. +type Metric struct { + Name string `protobuf:"bytes,1,req,name=name" json:"name"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{81} } + +func (m *Metric) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Metric) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +// * +// Describes a File. +type FileInfo struct { + // Absolute path to the file. + Path string `protobuf:"bytes,1,req,name=path" json:"path"` + // Number of hard links. + Nlink *int32 `protobuf:"varint,2,opt,name=nlink" json:"nlink,omitempty"` + // Total size in bytes. + Size *uint64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"` + // Last modification time. + Mtime *TimeInfo `protobuf:"bytes,4,opt,name=mtime" json:"mtime,omitempty"` + // Represents a file's mode and permission bits. The bits have the same + // definition on all systems and is portable. + Mode *uint32 `protobuf:"varint,5,opt,name=mode" json:"mode,omitempty"` + // User ID of owner. + UID *string `protobuf:"bytes,6,opt,name=uid" json:"uid,omitempty"` + // Group ID of owner. + GID *string `protobuf:"bytes,7,opt,name=gid" json:"gid,omitempty"` +} + +func (m *FileInfo) Reset() { *m = FileInfo{} } +func (*FileInfo) ProtoMessage() {} +func (*FileInfo) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{82} } + +func (m *FileInfo) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *FileInfo) GetNlink() int32 { + if m != nil && m.Nlink != nil { + return *m.Nlink + } + return 0 +} + +func (m *FileInfo) GetSize() uint64 { + if m != nil && m.Size != nil { + return *m.Size + } + return 0 +} + +func (m *FileInfo) GetMtime() *TimeInfo { + if m != nil { + return m.Mtime + } + return nil +} + +func (m *FileInfo) GetMode() uint32 { + if m != nil && m.Mode != nil { + return *m.Mode + } + return 0 +} + +func (m *FileInfo) GetUID() string { + if m != nil && m.UID != nil { + return *m.UID + } + return "" +} + +func (m *FileInfo) GetGID() string { + if m != nil && m.GID != nil { + return *m.GID + } + return "" +} + +// * +// Describes information abount a device. +type Device struct { + Path *string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + Number *Device_Number `protobuf:"bytes,2,opt,name=number" json:"number,omitempty"` +} + +func (m *Device) Reset() { *m = Device{} } +func (*Device) ProtoMessage() {} +func (*Device) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{83} } + +func (m *Device) GetPath() string { + if m != nil && m.Path != nil { + return *m.Path + } + return "" +} + +func (m *Device) GetNumber() *Device_Number { + if m != nil { + return m.Number + } + return nil +} + +type Device_Number struct { + MajorNumber *uint64 `protobuf:"varint,1,req,name=major_number,json=majorNumber" json:"major_number,omitempty"` + MinorNumber *uint64 `protobuf:"varint,2,req,name=minor_number,json=minorNumber" json:"minor_number,omitempty"` +} + +func (m *Device_Number) Reset() { *m = Device_Number{} } +func (*Device_Number) ProtoMessage() {} +func (*Device_Number) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{83, 0} } + +func (m *Device_Number) GetMajorNumber() uint64 { + if m != nil && m.MajorNumber != nil { + return *m.MajorNumber + } + return 0 +} + +func (m *Device_Number) GetMinorNumber() uint64 { + if m != nil && m.MinorNumber != nil { + return *m.MinorNumber + } + return 0 +} + +// * +// Describes a device whitelist entry that expose from host to container. +type DeviceAccess struct { + Device Device `protobuf:"bytes,1,req,name=device" json:"device"` + Access DeviceAccess_Access `protobuf:"bytes,2,req,name=access" json:"access"` +} + +func (m *DeviceAccess) Reset() { *m = DeviceAccess{} } +func (*DeviceAccess) ProtoMessage() {} +func (*DeviceAccess) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{84} } + +func (m *DeviceAccess) GetDevice() Device { + if m != nil { + return m.Device + } + return Device{} +} + +func (m *DeviceAccess) GetAccess() DeviceAccess_Access { + if m != nil { + return m.Access + } + return DeviceAccess_Access{} +} + +type DeviceAccess_Access struct { + Read *bool `protobuf:"varint,1,opt,name=read" json:"read,omitempty"` + Write *bool `protobuf:"varint,2,opt,name=write" json:"write,omitempty"` + Mknod *bool `protobuf:"varint,3,opt,name=mknod" json:"mknod,omitempty"` +} + +func (m *DeviceAccess_Access) Reset() { *m = DeviceAccess_Access{} } +func (*DeviceAccess_Access) ProtoMessage() {} +func (*DeviceAccess_Access) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{84, 0} } + +func (m *DeviceAccess_Access) GetRead() bool { + if m != nil && m.Read != nil { + return *m.Read + } + return false +} + +func (m *DeviceAccess_Access) GetWrite() bool { + if m != nil && m.Write != nil { + return *m.Write + } + return false +} + +func (m *DeviceAccess_Access) GetMknod() bool { + if m != nil && m.Mknod != nil { + return *m.Mknod + } + return false +} + +type DeviceWhitelist struct { + AllowedDevices []DeviceAccess `protobuf:"bytes,1,rep,name=allowed_devices,json=allowedDevices" json:"allowed_devices"` +} + +func (m *DeviceWhitelist) Reset() { *m = DeviceWhitelist{} } +func (*DeviceWhitelist) ProtoMessage() {} +func (*DeviceWhitelist) Descriptor() ([]byte, []int) { return fileDescriptorMesos, []int{85} } + +func (m *DeviceWhitelist) GetAllowedDevices() []DeviceAccess { + if m != nil { + return m.AllowedDevices + } + return nil +} + +func init() { + proto.RegisterType((*FrameworkID)(nil), "mesos.FrameworkID") + proto.RegisterType((*OfferID)(nil), "mesos.OfferID") + proto.RegisterType((*AgentID)(nil), "mesos.AgentID") + proto.RegisterType((*TaskID)(nil), "mesos.TaskID") + proto.RegisterType((*ExecutorID)(nil), "mesos.ExecutorID") + proto.RegisterType((*ContainerID)(nil), "mesos.ContainerID") + proto.RegisterType((*ResourceProviderID)(nil), "mesos.ResourceProviderID") + proto.RegisterType((*OperationID)(nil), "mesos.OperationID") + proto.RegisterType((*TimeInfo)(nil), "mesos.TimeInfo") + proto.RegisterType((*DurationInfo)(nil), "mesos.DurationInfo") + proto.RegisterType((*Address)(nil), "mesos.Address") + proto.RegisterType((*URL)(nil), "mesos.URL") + proto.RegisterType((*Unavailability)(nil), "mesos.Unavailability") + proto.RegisterType((*MachineID)(nil), "mesos.MachineID") + proto.RegisterType((*MachineInfo)(nil), "mesos.MachineInfo") + proto.RegisterType((*FrameworkInfo)(nil), "mesos.FrameworkInfo") + proto.RegisterType((*FrameworkInfo_Capability)(nil), "mesos.FrameworkInfo.Capability") + proto.RegisterType((*CheckInfo)(nil), "mesos.CheckInfo") + proto.RegisterType((*CheckInfo_Command)(nil), "mesos.CheckInfo.Command") + proto.RegisterType((*CheckInfo_Http)(nil), "mesos.CheckInfo.Http") + proto.RegisterType((*CheckInfo_Tcp)(nil), "mesos.CheckInfo.Tcp") + proto.RegisterType((*HealthCheck)(nil), "mesos.HealthCheck") + proto.RegisterType((*HealthCheck_HTTPCheckInfo)(nil), "mesos.HealthCheck.HTTPCheckInfo") + proto.RegisterType((*HealthCheck_TCPCheckInfo)(nil), "mesos.HealthCheck.TCPCheckInfo") + proto.RegisterType((*KillPolicy)(nil), "mesos.KillPolicy") + proto.RegisterType((*CommandInfo)(nil), "mesos.CommandInfo") + proto.RegisterType((*CommandInfo_URI)(nil), "mesos.CommandInfo.URI") + proto.RegisterType((*ExecutorInfo)(nil), "mesos.ExecutorInfo") + proto.RegisterType((*DomainInfo)(nil), "mesos.DomainInfo") + proto.RegisterType((*DomainInfo_FaultDomain)(nil), "mesos.DomainInfo.FaultDomain") + proto.RegisterType((*DomainInfo_FaultDomain_RegionInfo)(nil), "mesos.DomainInfo.FaultDomain.RegionInfo") + proto.RegisterType((*DomainInfo_FaultDomain_ZoneInfo)(nil), "mesos.DomainInfo.FaultDomain.ZoneInfo") + proto.RegisterType((*MasterInfo)(nil), "mesos.MasterInfo") + proto.RegisterType((*MasterInfo_Capability)(nil), "mesos.MasterInfo.Capability") + proto.RegisterType((*AgentInfo)(nil), "mesos.AgentInfo") + proto.RegisterType((*AgentInfo_Capability)(nil), "mesos.AgentInfo.Capability") + proto.RegisterType((*CSIPluginContainerInfo)(nil), "mesos.CSIPluginContainerInfo") + proto.RegisterType((*CSIPluginInfo)(nil), "mesos.CSIPluginInfo") + proto.RegisterType((*ResourceProviderInfo)(nil), "mesos.ResourceProviderInfo") + proto.RegisterType((*ResourceProviderInfo_Storage)(nil), "mesos.ResourceProviderInfo.Storage") + proto.RegisterType((*Value)(nil), "mesos.Value") + proto.RegisterType((*Value_Scalar)(nil), "mesos.Value.Scalar") + proto.RegisterType((*Value_Range)(nil), "mesos.Value.Range") + proto.RegisterType((*Value_Ranges)(nil), "mesos.Value.Ranges") + proto.RegisterType((*Value_Set)(nil), "mesos.Value.Set") + proto.RegisterType((*Value_Text)(nil), "mesos.Value.Text") + proto.RegisterType((*Attribute)(nil), "mesos.Attribute") + proto.RegisterType((*Resource)(nil), "mesos.Resource") + proto.RegisterType((*Resource_AllocationInfo)(nil), "mesos.Resource.AllocationInfo") + proto.RegisterType((*Resource_ReservationInfo)(nil), "mesos.Resource.ReservationInfo") + proto.RegisterType((*Resource_DiskInfo)(nil), "mesos.Resource.DiskInfo") + proto.RegisterType((*Resource_DiskInfo_Persistence)(nil), "mesos.Resource.DiskInfo.Persistence") + proto.RegisterType((*Resource_DiskInfo_Source)(nil), "mesos.Resource.DiskInfo.Source") + proto.RegisterType((*Resource_DiskInfo_Source_Path)(nil), "mesos.Resource.DiskInfo.Source.Path") + proto.RegisterType((*Resource_DiskInfo_Source_Mount)(nil), "mesos.Resource.DiskInfo.Source.Mount") + proto.RegisterType((*Resource_RevocableInfo)(nil), "mesos.Resource.RevocableInfo") + proto.RegisterType((*Resource_SharedInfo)(nil), "mesos.Resource.SharedInfo") + proto.RegisterType((*TrafficControlStatistics)(nil), "mesos.TrafficControlStatistics") + proto.RegisterType((*IpStatistics)(nil), "mesos.IpStatistics") + proto.RegisterType((*IcmpStatistics)(nil), "mesos.IcmpStatistics") + proto.RegisterType((*TcpStatistics)(nil), "mesos.TcpStatistics") + proto.RegisterType((*UdpStatistics)(nil), "mesos.UdpStatistics") + proto.RegisterType((*SNMPStatistics)(nil), "mesos.SNMPStatistics") + proto.RegisterType((*DiskStatistics)(nil), "mesos.DiskStatistics") + proto.RegisterType((*ResourceStatistics)(nil), "mesos.ResourceStatistics") + proto.RegisterType((*ResourceUsage)(nil), "mesos.ResourceUsage") + proto.RegisterType((*ResourceUsage_Executor)(nil), "mesos.ResourceUsage.Executor") + proto.RegisterType((*ResourceUsage_Executor_Task)(nil), "mesos.ResourceUsage.Executor.Task") + proto.RegisterType((*PerfStatistics)(nil), "mesos.PerfStatistics") + proto.RegisterType((*Request)(nil), "mesos.Request") + proto.RegisterType((*Offer)(nil), "mesos.Offer") + proto.RegisterType((*Offer_Operation)(nil), "mesos.Offer.Operation") + proto.RegisterType((*Offer_Operation_Launch)(nil), "mesos.Offer.Operation.Launch") + proto.RegisterType((*Offer_Operation_LaunchGroup)(nil), "mesos.Offer.Operation.LaunchGroup") + proto.RegisterType((*Offer_Operation_Reserve)(nil), "mesos.Offer.Operation.Reserve") + proto.RegisterType((*Offer_Operation_Unreserve)(nil), "mesos.Offer.Operation.Unreserve") + proto.RegisterType((*Offer_Operation_Create)(nil), "mesos.Offer.Operation.Create") + proto.RegisterType((*Offer_Operation_Destroy)(nil), "mesos.Offer.Operation.Destroy") + proto.RegisterType((*Offer_Operation_GrowVolume)(nil), "mesos.Offer.Operation.GrowVolume") + proto.RegisterType((*Offer_Operation_ShrinkVolume)(nil), "mesos.Offer.Operation.ShrinkVolume") + proto.RegisterType((*Offer_Operation_CreateDisk)(nil), "mesos.Offer.Operation.CreateDisk") + proto.RegisterType((*Offer_Operation_DestroyDisk)(nil), "mesos.Offer.Operation.DestroyDisk") + proto.RegisterType((*InverseOffer)(nil), "mesos.InverseOffer") + proto.RegisterType((*TaskInfo)(nil), "mesos.TaskInfo") + proto.RegisterType((*TaskGroupInfo)(nil), "mesos.TaskGroupInfo") + proto.RegisterType((*Task)(nil), "mesos.Task") + proto.RegisterType((*TaskResourceLimitation)(nil), "mesos.TaskResourceLimitation") + proto.RegisterType((*UUID)(nil), "mesos.UUID") + proto.RegisterType((*Operation)(nil), "mesos.Operation") + proto.RegisterType((*OperationStatus)(nil), "mesos.OperationStatus") + proto.RegisterType((*CheckStatusInfo)(nil), "mesos.CheckStatusInfo") + proto.RegisterType((*CheckStatusInfo_Command)(nil), "mesos.CheckStatusInfo.Command") + proto.RegisterType((*CheckStatusInfo_Http)(nil), "mesos.CheckStatusInfo.Http") + proto.RegisterType((*CheckStatusInfo_Tcp)(nil), "mesos.CheckStatusInfo.Tcp") + proto.RegisterType((*TaskStatus)(nil), "mesos.TaskStatus") + proto.RegisterType((*Filters)(nil), "mesos.Filters") + proto.RegisterType((*Environment)(nil), "mesos.Environment") + proto.RegisterType((*Environment_Variable)(nil), "mesos.Environment.Variable") + proto.RegisterType((*Parameter)(nil), "mesos.Parameter") + proto.RegisterType((*Parameters)(nil), "mesos.Parameters") + proto.RegisterType((*Credential)(nil), "mesos.Credential") + proto.RegisterType((*Credentials)(nil), "mesos.Credentials") + proto.RegisterType((*Secret)(nil), "mesos.Secret") + proto.RegisterType((*Secret_Reference)(nil), "mesos.Secret.Reference") + proto.RegisterType((*Secret_Value)(nil), "mesos.Secret.Value") + proto.RegisterType((*RateLimit)(nil), "mesos.RateLimit") + proto.RegisterType((*RateLimits)(nil), "mesos.RateLimits") + proto.RegisterType((*Image)(nil), "mesos.Image") + proto.RegisterType((*Image_Appc)(nil), "mesos.Image.Appc") + proto.RegisterType((*Image_Docker)(nil), "mesos.Image.Docker") + proto.RegisterType((*MountPropagation)(nil), "mesos.MountPropagation") + proto.RegisterType((*Volume)(nil), "mesos.Volume") + proto.RegisterType((*Volume_Source)(nil), "mesos.Volume.Source") + proto.RegisterType((*Volume_Source_DockerVolume)(nil), "mesos.Volume.Source.DockerVolume") + proto.RegisterType((*Volume_Source_HostPath)(nil), "mesos.Volume.Source.HostPath") + proto.RegisterType((*Volume_Source_SandboxPath)(nil), "mesos.Volume.Source.SandboxPath") + proto.RegisterType((*NetworkInfo)(nil), "mesos.NetworkInfo") + proto.RegisterType((*NetworkInfo_IPAddress)(nil), "mesos.NetworkInfo.IPAddress") + proto.RegisterType((*NetworkInfo_PortMapping)(nil), "mesos.NetworkInfo.PortMapping") + proto.RegisterType((*CapabilityInfo)(nil), "mesos.CapabilityInfo") + proto.RegisterType((*LinuxInfo)(nil), "mesos.LinuxInfo") + proto.RegisterType((*RLimitInfo)(nil), "mesos.RLimitInfo") + proto.RegisterType((*RLimitInfo_RLimit)(nil), "mesos.RLimitInfo.RLimit") + proto.RegisterType((*TTYInfo)(nil), "mesos.TTYInfo") + proto.RegisterType((*TTYInfo_WindowSize)(nil), "mesos.TTYInfo.WindowSize") + proto.RegisterType((*ContainerInfo)(nil), "mesos.ContainerInfo") + proto.RegisterType((*ContainerInfo_DockerInfo)(nil), "mesos.ContainerInfo.DockerInfo") + proto.RegisterType((*ContainerInfo_DockerInfo_PortMapping)(nil), "mesos.ContainerInfo.DockerInfo.PortMapping") + proto.RegisterType((*ContainerInfo_MesosInfo)(nil), "mesos.ContainerInfo.MesosInfo") + proto.RegisterType((*ContainerStatus)(nil), "mesos.ContainerStatus") + proto.RegisterType((*CgroupInfo)(nil), "mesos.CgroupInfo") + proto.RegisterType((*CgroupInfo_Blkio)(nil), "mesos.CgroupInfo.Blkio") + proto.RegisterType((*CgroupInfo_Blkio_Value)(nil), "mesos.CgroupInfo.Blkio.Value") + proto.RegisterType((*CgroupInfo_Blkio_CFQ)(nil), "mesos.CgroupInfo.Blkio.CFQ") + proto.RegisterType((*CgroupInfo_Blkio_CFQ_Statistics)(nil), "mesos.CgroupInfo.Blkio.CFQ.Statistics") + proto.RegisterType((*CgroupInfo_Blkio_Throttling)(nil), "mesos.CgroupInfo.Blkio.Throttling") + proto.RegisterType((*CgroupInfo_Blkio_Throttling_Statistics)(nil), "mesos.CgroupInfo.Blkio.Throttling.Statistics") + proto.RegisterType((*CgroupInfo_Blkio_Statistics)(nil), "mesos.CgroupInfo.Blkio.Statistics") + proto.RegisterType((*CgroupInfo_NetCls)(nil), "mesos.CgroupInfo.NetCls") + proto.RegisterType((*Labels)(nil), "mesos.Labels") + proto.RegisterType((*Label)(nil), "mesos.Label") + proto.RegisterType((*Port)(nil), "mesos.Port") + proto.RegisterType((*Ports)(nil), "mesos.Ports") + proto.RegisterType((*DiscoveryInfo)(nil), "mesos.DiscoveryInfo") + proto.RegisterType((*WeightInfo)(nil), "mesos.WeightInfo") + proto.RegisterType((*VersionInfo)(nil), "mesos.VersionInfo") + proto.RegisterType((*Flag)(nil), "mesos.Flag") + proto.RegisterType((*Role)(nil), "mesos.Role") + proto.RegisterType((*Metric)(nil), "mesos.Metric") + proto.RegisterType((*FileInfo)(nil), "mesos.FileInfo") + proto.RegisterType((*Device)(nil), "mesos.Device") + proto.RegisterType((*Device_Number)(nil), "mesos.Device.Number") + proto.RegisterType((*DeviceAccess)(nil), "mesos.DeviceAccess") + proto.RegisterType((*DeviceAccess_Access)(nil), "mesos.DeviceAccess.Access") + proto.RegisterType((*DeviceWhitelist)(nil), "mesos.DeviceWhitelist") + proto.RegisterEnum("mesos.Status", Status_name, Status_value) + proto.RegisterEnum("mesos.TaskState", TaskState_name, TaskState_value) + proto.RegisterEnum("mesos.OperationState", OperationState_name, OperationState_value) + proto.RegisterEnum("mesos.MachineInfo_Mode", MachineInfo_Mode_name, MachineInfo_Mode_value) + proto.RegisterEnum("mesos.FrameworkInfo_Capability_Type", FrameworkInfo_Capability_Type_name, FrameworkInfo_Capability_Type_value) + proto.RegisterEnum("mesos.CheckInfo_Type", CheckInfo_Type_name, CheckInfo_Type_value) + proto.RegisterEnum("mesos.HealthCheck_Type", HealthCheck_Type_name, HealthCheck_Type_value) + proto.RegisterEnum("mesos.ExecutorInfo_Type", ExecutorInfo_Type_name, ExecutorInfo_Type_value) + proto.RegisterEnum("mesos.MasterInfo_Capability_Type", MasterInfo_Capability_Type_name, MasterInfo_Capability_Type_value) + proto.RegisterEnum("mesos.AgentInfo_Capability_Type", AgentInfo_Capability_Type_name, AgentInfo_Capability_Type_value) + proto.RegisterEnum("mesos.CSIPluginContainerInfo_Service", CSIPluginContainerInfo_Service_name, CSIPluginContainerInfo_Service_value) + proto.RegisterEnum("mesos.Value_Type", Value_Type_name, Value_Type_value) + proto.RegisterEnum("mesos.Resource_ReservationInfo_Type", Resource_ReservationInfo_Type_name, Resource_ReservationInfo_Type_value) + proto.RegisterEnum("mesos.Resource_DiskInfo_Source_Type", Resource_DiskInfo_Source_Type_name, Resource_DiskInfo_Source_Type_value) + proto.RegisterEnum("mesos.Offer_Operation_Type", Offer_Operation_Type_name, Offer_Operation_Type_value) + proto.RegisterEnum("mesos.TaskStatus_Source", TaskStatus_Source_name, TaskStatus_Source_value) + proto.RegisterEnum("mesos.TaskStatus_Reason", TaskStatus_Reason_name, TaskStatus_Reason_value) + proto.RegisterEnum("mesos.Environment_Variable_Type", Environment_Variable_Type_name, Environment_Variable_Type_value) + proto.RegisterEnum("mesos.Secret_Type", Secret_Type_name, Secret_Type_value) + proto.RegisterEnum("mesos.Image_Type", Image_Type_name, Image_Type_value) + proto.RegisterEnum("mesos.MountPropagation_Mode", MountPropagation_Mode_name, MountPropagation_Mode_value) + proto.RegisterEnum("mesos.Volume_Mode", Volume_Mode_name, Volume_Mode_value) + proto.RegisterEnum("mesos.Volume_Source_Type", Volume_Source_Type_name, Volume_Source_Type_value) + proto.RegisterEnum("mesos.Volume_Source_SandboxPath_Type", Volume_Source_SandboxPath_Type_name, Volume_Source_SandboxPath_Type_value) + proto.RegisterEnum("mesos.NetworkInfo_Protocol", NetworkInfo_Protocol_name, NetworkInfo_Protocol_value) + proto.RegisterEnum("mesos.CapabilityInfo_Capability", CapabilityInfo_Capability_name, CapabilityInfo_Capability_value) + proto.RegisterEnum("mesos.RLimitInfo_RLimit_Type", RLimitInfo_RLimit_Type_name, RLimitInfo_RLimit_Type_value) + proto.RegisterEnum("mesos.ContainerInfo_Type", ContainerInfo_Type_name, ContainerInfo_Type_value) + proto.RegisterEnum("mesos.ContainerInfo_DockerInfo_Network", ContainerInfo_DockerInfo_Network_name, ContainerInfo_DockerInfo_Network_value) + proto.RegisterEnum("mesos.CgroupInfo_Blkio_Operation", CgroupInfo_Blkio_Operation_name, CgroupInfo_Blkio_Operation_value) + proto.RegisterEnum("mesos.DiscoveryInfo_Visibility", DiscoveryInfo_Visibility_name, DiscoveryInfo_Visibility_value) +} +func (x Status) String() string { + s, ok := Status_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x TaskState) String() string { + s, ok := TaskState_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x OperationState) String() string { + s, ok := OperationState_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x MachineInfo_Mode) String() string { + s, ok := MachineInfo_Mode_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x FrameworkInfo_Capability_Type) String() string { + s, ok := FrameworkInfo_Capability_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x CheckInfo_Type) String() string { + s, ok := CheckInfo_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x HealthCheck_Type) String() string { + s, ok := HealthCheck_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x ExecutorInfo_Type) String() string { + s, ok := ExecutorInfo_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x MasterInfo_Capability_Type) String() string { + s, ok := MasterInfo_Capability_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x AgentInfo_Capability_Type) String() string { + s, ok := AgentInfo_Capability_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x CSIPluginContainerInfo_Service) String() string { + s, ok := CSIPluginContainerInfo_Service_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Value_Type) String() string { + s, ok := Value_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Resource_ReservationInfo_Type) String() string { + s, ok := Resource_ReservationInfo_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Resource_DiskInfo_Source_Type) String() string { + s, ok := Resource_DiskInfo_Source_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Offer_Operation_Type) String() string { + s, ok := Offer_Operation_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x TaskStatus_Source) String() string { + s, ok := TaskStatus_Source_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x TaskStatus_Reason) String() string { + s, ok := TaskStatus_Reason_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Environment_Variable_Type) String() string { + s, ok := Environment_Variable_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Secret_Type) String() string { + s, ok := Secret_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Image_Type) String() string { + s, ok := Image_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x MountPropagation_Mode) String() string { + s, ok := MountPropagation_Mode_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Volume_Mode) String() string { + s, ok := Volume_Mode_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Volume_Source_Type) String() string { + s, ok := Volume_Source_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Volume_Source_SandboxPath_Type) String() string { + s, ok := Volume_Source_SandboxPath_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x NetworkInfo_Protocol) String() string { + s, ok := NetworkInfo_Protocol_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x CapabilityInfo_Capability) String() string { + s, ok := CapabilityInfo_Capability_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x RLimitInfo_RLimit_Type) String() string { + s, ok := RLimitInfo_RLimit_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x ContainerInfo_Type) String() string { + s, ok := ContainerInfo_Type_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x ContainerInfo_DockerInfo_Network) String() string { + s, ok := ContainerInfo_DockerInfo_Network_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x CgroupInfo_Blkio_Operation) String() string { + s, ok := CgroupInfo_Blkio_Operation_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x DiscoveryInfo_Visibility) String() string { + s, ok := DiscoveryInfo_Visibility_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *FrameworkID) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*FrameworkID) + if !ok { + that2, ok := that.(FrameworkID) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *FrameworkID") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *FrameworkID but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *FrameworkID but is not nil && this == nil") + } + if this.Value != that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *FrameworkID) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*FrameworkID) + if !ok { + that2, ok := that.(FrameworkID) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *OfferID) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*OfferID) + if !ok { + that2, ok := that.(OfferID) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *OfferID") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *OfferID but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *OfferID but is not nil && this == nil") + } + if this.Value != that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *OfferID) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*OfferID) + if !ok { + that2, ok := that.(OfferID) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *AgentID) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*AgentID) + if !ok { + that2, ok := that.(AgentID) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *AgentID") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *AgentID but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *AgentID but is not nil && this == nil") + } + if this.Value != that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *AgentID) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*AgentID) + if !ok { + that2, ok := that.(AgentID) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *TaskID) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*TaskID) + if !ok { + that2, ok := that.(TaskID) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *TaskID") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *TaskID but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *TaskID but is not nil && this == nil") + } + if this.Value != that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *TaskID) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TaskID) + if !ok { + that2, ok := that.(TaskID) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *ExecutorID) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ExecutorID) + if !ok { + that2, ok := that.(ExecutorID) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ExecutorID") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ExecutorID but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ExecutorID but is not nil && this == nil") + } + if this.Value != that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *ExecutorID) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ExecutorID) + if !ok { + that2, ok := that.(ExecutorID) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *ContainerID) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ContainerID) + if !ok { + that2, ok := that.(ContainerID) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ContainerID") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ContainerID but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ContainerID but is not nil && this == nil") + } + if this.Value != that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + if !this.Parent.Equal(that1.Parent) { + return fmt.Errorf("Parent this(%v) Not Equal that(%v)", this.Parent, that1.Parent) + } + return nil +} +func (this *ContainerID) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ContainerID) + if !ok { + that2, ok := that.(ContainerID) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !this.Parent.Equal(that1.Parent) { + return false + } + return true +} +func (this *ResourceProviderID) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ResourceProviderID) + if !ok { + that2, ok := that.(ResourceProviderID) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ResourceProviderID") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ResourceProviderID but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ResourceProviderID but is not nil && this == nil") + } + if this.Value != that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *ResourceProviderID) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ResourceProviderID) + if !ok { + that2, ok := that.(ResourceProviderID) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *OperationID) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*OperationID) + if !ok { + that2, ok := that.(OperationID) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *OperationID") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *OperationID but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *OperationID but is not nil && this == nil") + } + if this.Value != that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *OperationID) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*OperationID) + if !ok { + that2, ok := that.(OperationID) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *TimeInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*TimeInfo) + if !ok { + that2, ok := that.(TimeInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *TimeInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *TimeInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *TimeInfo but is not nil && this == nil") + } + if this.Nanoseconds != that1.Nanoseconds { + return fmt.Errorf("Nanoseconds this(%v) Not Equal that(%v)", this.Nanoseconds, that1.Nanoseconds) + } + return nil +} +func (this *TimeInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TimeInfo) + if !ok { + that2, ok := that.(TimeInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Nanoseconds != that1.Nanoseconds { + return false + } + return true +} +func (this *DurationInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*DurationInfo) + if !ok { + that2, ok := that.(DurationInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *DurationInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *DurationInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *DurationInfo but is not nil && this == nil") + } + if this.Nanoseconds != that1.Nanoseconds { + return fmt.Errorf("Nanoseconds this(%v) Not Equal that(%v)", this.Nanoseconds, that1.Nanoseconds) + } + return nil +} +func (this *DurationInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*DurationInfo) + if !ok { + that2, ok := that.(DurationInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Nanoseconds != that1.Nanoseconds { + return false + } + return true +} +func (this *Address) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Address) + if !ok { + that2, ok := that.(Address) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Address") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Address but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Address but is not nil && this == nil") + } + if this.Hostname != nil && that1.Hostname != nil { + if *this.Hostname != *that1.Hostname { + return fmt.Errorf("Hostname this(%v) Not Equal that(%v)", *this.Hostname, *that1.Hostname) + } + } else if this.Hostname != nil { + return fmt.Errorf("this.Hostname == nil && that.Hostname != nil") + } else if that1.Hostname != nil { + return fmt.Errorf("Hostname this(%v) Not Equal that(%v)", this.Hostname, that1.Hostname) + } + if this.IP != nil && that1.IP != nil { + if *this.IP != *that1.IP { + return fmt.Errorf("IP this(%v) Not Equal that(%v)", *this.IP, *that1.IP) + } + } else if this.IP != nil { + return fmt.Errorf("this.IP == nil && that.IP != nil") + } else if that1.IP != nil { + return fmt.Errorf("IP this(%v) Not Equal that(%v)", this.IP, that1.IP) + } + if this.Port != that1.Port { + return fmt.Errorf("Port this(%v) Not Equal that(%v)", this.Port, that1.Port) + } + return nil +} +func (this *Address) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Address) + if !ok { + that2, ok := that.(Address) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Hostname != nil && that1.Hostname != nil { + if *this.Hostname != *that1.Hostname { + return false + } + } else if this.Hostname != nil { + return false + } else if that1.Hostname != nil { + return false + } + if this.IP != nil && that1.IP != nil { + if *this.IP != *that1.IP { + return false + } + } else if this.IP != nil { + return false + } else if that1.IP != nil { + return false + } + if this.Port != that1.Port { + return false + } + return true +} +func (this *URL) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*URL) + if !ok { + that2, ok := that.(URL) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *URL") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *URL but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *URL but is not nil && this == nil") + } + if this.Scheme != that1.Scheme { + return fmt.Errorf("Scheme this(%v) Not Equal that(%v)", this.Scheme, that1.Scheme) + } + if !this.Address.Equal(&that1.Address) { + return fmt.Errorf("Address this(%v) Not Equal that(%v)", this.Address, that1.Address) + } + if this.Path != nil && that1.Path != nil { + if *this.Path != *that1.Path { + return fmt.Errorf("Path this(%v) Not Equal that(%v)", *this.Path, *that1.Path) + } + } else if this.Path != nil { + return fmt.Errorf("this.Path == nil && that.Path != nil") + } else if that1.Path != nil { + return fmt.Errorf("Path this(%v) Not Equal that(%v)", this.Path, that1.Path) + } + if len(this.Query) != len(that1.Query) { + return fmt.Errorf("Query this(%v) Not Equal that(%v)", len(this.Query), len(that1.Query)) + } + for i := range this.Query { + if !this.Query[i].Equal(&that1.Query[i]) { + return fmt.Errorf("Query this[%v](%v) Not Equal that[%v](%v)", i, this.Query[i], i, that1.Query[i]) + } + } + if this.Fragment != nil && that1.Fragment != nil { + if *this.Fragment != *that1.Fragment { + return fmt.Errorf("Fragment this(%v) Not Equal that(%v)", *this.Fragment, *that1.Fragment) + } + } else if this.Fragment != nil { + return fmt.Errorf("this.Fragment == nil && that.Fragment != nil") + } else if that1.Fragment != nil { + return fmt.Errorf("Fragment this(%v) Not Equal that(%v)", this.Fragment, that1.Fragment) + } + return nil +} +func (this *URL) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*URL) + if !ok { + that2, ok := that.(URL) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Scheme != that1.Scheme { + return false + } + if !this.Address.Equal(&that1.Address) { + return false + } + if this.Path != nil && that1.Path != nil { + if *this.Path != *that1.Path { + return false + } + } else if this.Path != nil { + return false + } else if that1.Path != nil { + return false + } + if len(this.Query) != len(that1.Query) { + return false + } + for i := range this.Query { + if !this.Query[i].Equal(&that1.Query[i]) { + return false + } + } + if this.Fragment != nil && that1.Fragment != nil { + if *this.Fragment != *that1.Fragment { + return false + } + } else if this.Fragment != nil { + return false + } else if that1.Fragment != nil { + return false + } + return true +} +func (this *Unavailability) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Unavailability) + if !ok { + that2, ok := that.(Unavailability) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Unavailability") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Unavailability but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Unavailability but is not nil && this == nil") + } + if !this.Start.Equal(&that1.Start) { + return fmt.Errorf("Start this(%v) Not Equal that(%v)", this.Start, that1.Start) + } + if !this.Duration.Equal(that1.Duration) { + return fmt.Errorf("Duration this(%v) Not Equal that(%v)", this.Duration, that1.Duration) + } + return nil +} +func (this *Unavailability) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Unavailability) + if !ok { + that2, ok := that.(Unavailability) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Start.Equal(&that1.Start) { + return false + } + if !this.Duration.Equal(that1.Duration) { + return false + } + return true +} +func (this *MachineID) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*MachineID) + if !ok { + that2, ok := that.(MachineID) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *MachineID") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *MachineID but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *MachineID but is not nil && this == nil") + } + if this.Hostname != nil && that1.Hostname != nil { + if *this.Hostname != *that1.Hostname { + return fmt.Errorf("Hostname this(%v) Not Equal that(%v)", *this.Hostname, *that1.Hostname) + } + } else if this.Hostname != nil { + return fmt.Errorf("this.Hostname == nil && that.Hostname != nil") + } else if that1.Hostname != nil { + return fmt.Errorf("Hostname this(%v) Not Equal that(%v)", this.Hostname, that1.Hostname) + } + if this.IP != nil && that1.IP != nil { + if *this.IP != *that1.IP { + return fmt.Errorf("IP this(%v) Not Equal that(%v)", *this.IP, *that1.IP) + } + } else if this.IP != nil { + return fmt.Errorf("this.IP == nil && that.IP != nil") + } else if that1.IP != nil { + return fmt.Errorf("IP this(%v) Not Equal that(%v)", this.IP, that1.IP) + } + return nil +} +func (this *MachineID) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*MachineID) + if !ok { + that2, ok := that.(MachineID) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Hostname != nil && that1.Hostname != nil { + if *this.Hostname != *that1.Hostname { + return false + } + } else if this.Hostname != nil { + return false + } else if that1.Hostname != nil { + return false + } + if this.IP != nil && that1.IP != nil { + if *this.IP != *that1.IP { + return false + } + } else if this.IP != nil { + return false + } else if that1.IP != nil { + return false + } + return true +} +func (this *MachineInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*MachineInfo) + if !ok { + that2, ok := that.(MachineInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *MachineInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *MachineInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *MachineInfo but is not nil && this == nil") + } + if !this.ID.Equal(&that1.ID) { + return fmt.Errorf("ID this(%v) Not Equal that(%v)", this.ID, that1.ID) + } + if this.Mode != nil && that1.Mode != nil { + if *this.Mode != *that1.Mode { + return fmt.Errorf("Mode this(%v) Not Equal that(%v)", *this.Mode, *that1.Mode) + } + } else if this.Mode != nil { + return fmt.Errorf("this.Mode == nil && that.Mode != nil") + } else if that1.Mode != nil { + return fmt.Errorf("Mode this(%v) Not Equal that(%v)", this.Mode, that1.Mode) + } + if !this.Unavailability.Equal(that1.Unavailability) { + return fmt.Errorf("Unavailability this(%v) Not Equal that(%v)", this.Unavailability, that1.Unavailability) + } + return nil +} +func (this *MachineInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*MachineInfo) + if !ok { + that2, ok := that.(MachineInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ID.Equal(&that1.ID) { + return false + } + if this.Mode != nil && that1.Mode != nil { + if *this.Mode != *that1.Mode { + return false + } + } else if this.Mode != nil { + return false + } else if that1.Mode != nil { + return false + } + if !this.Unavailability.Equal(that1.Unavailability) { + return false + } + return true +} +func (this *FrameworkInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*FrameworkInfo) + if !ok { + that2, ok := that.(FrameworkInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *FrameworkInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *FrameworkInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *FrameworkInfo but is not nil && this == nil") + } + if this.User != that1.User { + return fmt.Errorf("User this(%v) Not Equal that(%v)", this.User, that1.User) + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if !this.ID.Equal(that1.ID) { + return fmt.Errorf("ID this(%v) Not Equal that(%v)", this.ID, that1.ID) + } + if this.FailoverTimeout != nil && that1.FailoverTimeout != nil { + if *this.FailoverTimeout != *that1.FailoverTimeout { + return fmt.Errorf("FailoverTimeout this(%v) Not Equal that(%v)", *this.FailoverTimeout, *that1.FailoverTimeout) + } + } else if this.FailoverTimeout != nil { + return fmt.Errorf("this.FailoverTimeout == nil && that.FailoverTimeout != nil") + } else if that1.FailoverTimeout != nil { + return fmt.Errorf("FailoverTimeout this(%v) Not Equal that(%v)", this.FailoverTimeout, that1.FailoverTimeout) + } + if this.Checkpoint != nil && that1.Checkpoint != nil { + if *this.Checkpoint != *that1.Checkpoint { + return fmt.Errorf("Checkpoint this(%v) Not Equal that(%v)", *this.Checkpoint, *that1.Checkpoint) + } + } else if this.Checkpoint != nil { + return fmt.Errorf("this.Checkpoint == nil && that.Checkpoint != nil") + } else if that1.Checkpoint != nil { + return fmt.Errorf("Checkpoint this(%v) Not Equal that(%v)", this.Checkpoint, that1.Checkpoint) + } + if this.Role != nil && that1.Role != nil { + if *this.Role != *that1.Role { + return fmt.Errorf("Role this(%v) Not Equal that(%v)", *this.Role, *that1.Role) + } + } else if this.Role != nil { + return fmt.Errorf("this.Role == nil && that.Role != nil") + } else if that1.Role != nil { + return fmt.Errorf("Role this(%v) Not Equal that(%v)", this.Role, that1.Role) + } + if len(this.Roles) != len(that1.Roles) { + return fmt.Errorf("Roles this(%v) Not Equal that(%v)", len(this.Roles), len(that1.Roles)) + } + for i := range this.Roles { + if this.Roles[i] != that1.Roles[i] { + return fmt.Errorf("Roles this[%v](%v) Not Equal that[%v](%v)", i, this.Roles[i], i, that1.Roles[i]) + } + } + if this.Hostname != nil && that1.Hostname != nil { + if *this.Hostname != *that1.Hostname { + return fmt.Errorf("Hostname this(%v) Not Equal that(%v)", *this.Hostname, *that1.Hostname) + } + } else if this.Hostname != nil { + return fmt.Errorf("this.Hostname == nil && that.Hostname != nil") + } else if that1.Hostname != nil { + return fmt.Errorf("Hostname this(%v) Not Equal that(%v)", this.Hostname, that1.Hostname) + } + if this.Principal != nil && that1.Principal != nil { + if *this.Principal != *that1.Principal { + return fmt.Errorf("Principal this(%v) Not Equal that(%v)", *this.Principal, *that1.Principal) + } + } else if this.Principal != nil { + return fmt.Errorf("this.Principal == nil && that.Principal != nil") + } else if that1.Principal != nil { + return fmt.Errorf("Principal this(%v) Not Equal that(%v)", this.Principal, that1.Principal) + } + if this.WebUiURL != nil && that1.WebUiURL != nil { + if *this.WebUiURL != *that1.WebUiURL { + return fmt.Errorf("WebUiURL this(%v) Not Equal that(%v)", *this.WebUiURL, *that1.WebUiURL) + } + } else if this.WebUiURL != nil { + return fmt.Errorf("this.WebUiURL == nil && that.WebUiURL != nil") + } else if that1.WebUiURL != nil { + return fmt.Errorf("WebUiURL this(%v) Not Equal that(%v)", this.WebUiURL, that1.WebUiURL) + } + if len(this.Capabilities) != len(that1.Capabilities) { + return fmt.Errorf("Capabilities this(%v) Not Equal that(%v)", len(this.Capabilities), len(that1.Capabilities)) + } + for i := range this.Capabilities { + if !this.Capabilities[i].Equal(&that1.Capabilities[i]) { + return fmt.Errorf("Capabilities this[%v](%v) Not Equal that[%v](%v)", i, this.Capabilities[i], i, that1.Capabilities[i]) + } + } + if !this.Labels.Equal(that1.Labels) { + return fmt.Errorf("Labels this(%v) Not Equal that(%v)", this.Labels, that1.Labels) + } + return nil +} +func (this *FrameworkInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*FrameworkInfo) + if !ok { + that2, ok := that.(FrameworkInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.User != that1.User { + return false + } + if this.Name != that1.Name { + return false + } + if !this.ID.Equal(that1.ID) { + return false + } + if this.FailoverTimeout != nil && that1.FailoverTimeout != nil { + if *this.FailoverTimeout != *that1.FailoverTimeout { + return false + } + } else if this.FailoverTimeout != nil { + return false + } else if that1.FailoverTimeout != nil { + return false + } + if this.Checkpoint != nil && that1.Checkpoint != nil { + if *this.Checkpoint != *that1.Checkpoint { + return false + } + } else if this.Checkpoint != nil { + return false + } else if that1.Checkpoint != nil { + return false + } + if this.Role != nil && that1.Role != nil { + if *this.Role != *that1.Role { + return false + } + } else if this.Role != nil { + return false + } else if that1.Role != nil { + return false + } + if len(this.Roles) != len(that1.Roles) { + return false + } + for i := range this.Roles { + if this.Roles[i] != that1.Roles[i] { + return false + } + } + if this.Hostname != nil && that1.Hostname != nil { + if *this.Hostname != *that1.Hostname { + return false + } + } else if this.Hostname != nil { + return false + } else if that1.Hostname != nil { + return false + } + if this.Principal != nil && that1.Principal != nil { + if *this.Principal != *that1.Principal { + return false + } + } else if this.Principal != nil { + return false + } else if that1.Principal != nil { + return false + } + if this.WebUiURL != nil && that1.WebUiURL != nil { + if *this.WebUiURL != *that1.WebUiURL { + return false + } + } else if this.WebUiURL != nil { + return false + } else if that1.WebUiURL != nil { + return false + } + if len(this.Capabilities) != len(that1.Capabilities) { + return false + } + for i := range this.Capabilities { + if !this.Capabilities[i].Equal(&that1.Capabilities[i]) { + return false + } + } + if !this.Labels.Equal(that1.Labels) { + return false + } + return true +} +func (this *FrameworkInfo_Capability) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*FrameworkInfo_Capability) + if !ok { + that2, ok := that.(FrameworkInfo_Capability) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *FrameworkInfo_Capability") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *FrameworkInfo_Capability but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *FrameworkInfo_Capability but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + return nil +} +func (this *FrameworkInfo_Capability) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*FrameworkInfo_Capability) + if !ok { + that2, ok := that.(FrameworkInfo_Capability) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + return true +} +func (this *CheckInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CheckInfo) + if !ok { + that2, ok := that.(CheckInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CheckInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CheckInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CheckInfo but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.Command.Equal(that1.Command) { + return fmt.Errorf("Command this(%v) Not Equal that(%v)", this.Command, that1.Command) + } + if !this.HTTP.Equal(that1.HTTP) { + return fmt.Errorf("HTTP this(%v) Not Equal that(%v)", this.HTTP, that1.HTTP) + } + if !this.TCP.Equal(that1.TCP) { + return fmt.Errorf("TCP this(%v) Not Equal that(%v)", this.TCP, that1.TCP) + } + if this.DelaySeconds != nil && that1.DelaySeconds != nil { + if *this.DelaySeconds != *that1.DelaySeconds { + return fmt.Errorf("DelaySeconds this(%v) Not Equal that(%v)", *this.DelaySeconds, *that1.DelaySeconds) + } + } else if this.DelaySeconds != nil { + return fmt.Errorf("this.DelaySeconds == nil && that.DelaySeconds != nil") + } else if that1.DelaySeconds != nil { + return fmt.Errorf("DelaySeconds this(%v) Not Equal that(%v)", this.DelaySeconds, that1.DelaySeconds) + } + if this.IntervalSeconds != nil && that1.IntervalSeconds != nil { + if *this.IntervalSeconds != *that1.IntervalSeconds { + return fmt.Errorf("IntervalSeconds this(%v) Not Equal that(%v)", *this.IntervalSeconds, *that1.IntervalSeconds) + } + } else if this.IntervalSeconds != nil { + return fmt.Errorf("this.IntervalSeconds == nil && that.IntervalSeconds != nil") + } else if that1.IntervalSeconds != nil { + return fmt.Errorf("IntervalSeconds this(%v) Not Equal that(%v)", this.IntervalSeconds, that1.IntervalSeconds) + } + if this.TimeoutSeconds != nil && that1.TimeoutSeconds != nil { + if *this.TimeoutSeconds != *that1.TimeoutSeconds { + return fmt.Errorf("TimeoutSeconds this(%v) Not Equal that(%v)", *this.TimeoutSeconds, *that1.TimeoutSeconds) + } + } else if this.TimeoutSeconds != nil { + return fmt.Errorf("this.TimeoutSeconds == nil && that.TimeoutSeconds != nil") + } else if that1.TimeoutSeconds != nil { + return fmt.Errorf("TimeoutSeconds this(%v) Not Equal that(%v)", this.TimeoutSeconds, that1.TimeoutSeconds) + } + return nil +} +func (this *CheckInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CheckInfo) + if !ok { + that2, ok := that.(CheckInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.Command.Equal(that1.Command) { + return false + } + if !this.HTTP.Equal(that1.HTTP) { + return false + } + if !this.TCP.Equal(that1.TCP) { + return false + } + if this.DelaySeconds != nil && that1.DelaySeconds != nil { + if *this.DelaySeconds != *that1.DelaySeconds { + return false + } + } else if this.DelaySeconds != nil { + return false + } else if that1.DelaySeconds != nil { + return false + } + if this.IntervalSeconds != nil && that1.IntervalSeconds != nil { + if *this.IntervalSeconds != *that1.IntervalSeconds { + return false + } + } else if this.IntervalSeconds != nil { + return false + } else if that1.IntervalSeconds != nil { + return false + } + if this.TimeoutSeconds != nil && that1.TimeoutSeconds != nil { + if *this.TimeoutSeconds != *that1.TimeoutSeconds { + return false + } + } else if this.TimeoutSeconds != nil { + return false + } else if that1.TimeoutSeconds != nil { + return false + } + return true +} +func (this *CheckInfo_Command) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CheckInfo_Command) + if !ok { + that2, ok := that.(CheckInfo_Command) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CheckInfo_Command") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CheckInfo_Command but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CheckInfo_Command but is not nil && this == nil") + } + if !this.Command.Equal(&that1.Command) { + return fmt.Errorf("Command this(%v) Not Equal that(%v)", this.Command, that1.Command) + } + return nil +} +func (this *CheckInfo_Command) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CheckInfo_Command) + if !ok { + that2, ok := that.(CheckInfo_Command) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Command.Equal(&that1.Command) { + return false + } + return true +} +func (this *CheckInfo_Http) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CheckInfo_Http) + if !ok { + that2, ok := that.(CheckInfo_Http) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CheckInfo_Http") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CheckInfo_Http but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CheckInfo_Http but is not nil && this == nil") + } + if this.Port != that1.Port { + return fmt.Errorf("Port this(%v) Not Equal that(%v)", this.Port, that1.Port) + } + if this.Path != nil && that1.Path != nil { + if *this.Path != *that1.Path { + return fmt.Errorf("Path this(%v) Not Equal that(%v)", *this.Path, *that1.Path) + } + } else if this.Path != nil { + return fmt.Errorf("this.Path == nil && that.Path != nil") + } else if that1.Path != nil { + return fmt.Errorf("Path this(%v) Not Equal that(%v)", this.Path, that1.Path) + } + return nil +} +func (this *CheckInfo_Http) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CheckInfo_Http) + if !ok { + that2, ok := that.(CheckInfo_Http) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Port != that1.Port { + return false + } + if this.Path != nil && that1.Path != nil { + if *this.Path != *that1.Path { + return false + } + } else if this.Path != nil { + return false + } else if that1.Path != nil { + return false + } + return true +} +func (this *CheckInfo_Tcp) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CheckInfo_Tcp) + if !ok { + that2, ok := that.(CheckInfo_Tcp) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CheckInfo_Tcp") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CheckInfo_Tcp but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CheckInfo_Tcp but is not nil && this == nil") + } + if this.Port != that1.Port { + return fmt.Errorf("Port this(%v) Not Equal that(%v)", this.Port, that1.Port) + } + return nil +} +func (this *CheckInfo_Tcp) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CheckInfo_Tcp) + if !ok { + that2, ok := that.(CheckInfo_Tcp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Port != that1.Port { + return false + } + return true +} +func (this *HealthCheck) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*HealthCheck) + if !ok { + that2, ok := that.(HealthCheck) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *HealthCheck") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *HealthCheck but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *HealthCheck but is not nil && this == nil") + } + if this.DelaySeconds != nil && that1.DelaySeconds != nil { + if *this.DelaySeconds != *that1.DelaySeconds { + return fmt.Errorf("DelaySeconds this(%v) Not Equal that(%v)", *this.DelaySeconds, *that1.DelaySeconds) + } + } else if this.DelaySeconds != nil { + return fmt.Errorf("this.DelaySeconds == nil && that.DelaySeconds != nil") + } else if that1.DelaySeconds != nil { + return fmt.Errorf("DelaySeconds this(%v) Not Equal that(%v)", this.DelaySeconds, that1.DelaySeconds) + } + if this.IntervalSeconds != nil && that1.IntervalSeconds != nil { + if *this.IntervalSeconds != *that1.IntervalSeconds { + return fmt.Errorf("IntervalSeconds this(%v) Not Equal that(%v)", *this.IntervalSeconds, *that1.IntervalSeconds) + } + } else if this.IntervalSeconds != nil { + return fmt.Errorf("this.IntervalSeconds == nil && that.IntervalSeconds != nil") + } else if that1.IntervalSeconds != nil { + return fmt.Errorf("IntervalSeconds this(%v) Not Equal that(%v)", this.IntervalSeconds, that1.IntervalSeconds) + } + if this.TimeoutSeconds != nil && that1.TimeoutSeconds != nil { + if *this.TimeoutSeconds != *that1.TimeoutSeconds { + return fmt.Errorf("TimeoutSeconds this(%v) Not Equal that(%v)", *this.TimeoutSeconds, *that1.TimeoutSeconds) + } + } else if this.TimeoutSeconds != nil { + return fmt.Errorf("this.TimeoutSeconds == nil && that.TimeoutSeconds != nil") + } else if that1.TimeoutSeconds != nil { + return fmt.Errorf("TimeoutSeconds this(%v) Not Equal that(%v)", this.TimeoutSeconds, that1.TimeoutSeconds) + } + if this.ConsecutiveFailures != nil && that1.ConsecutiveFailures != nil { + if *this.ConsecutiveFailures != *that1.ConsecutiveFailures { + return fmt.Errorf("ConsecutiveFailures this(%v) Not Equal that(%v)", *this.ConsecutiveFailures, *that1.ConsecutiveFailures) + } + } else if this.ConsecutiveFailures != nil { + return fmt.Errorf("this.ConsecutiveFailures == nil && that.ConsecutiveFailures != nil") + } else if that1.ConsecutiveFailures != nil { + return fmt.Errorf("ConsecutiveFailures this(%v) Not Equal that(%v)", this.ConsecutiveFailures, that1.ConsecutiveFailures) + } + if this.GracePeriodSeconds != nil && that1.GracePeriodSeconds != nil { + if *this.GracePeriodSeconds != *that1.GracePeriodSeconds { + return fmt.Errorf("GracePeriodSeconds this(%v) Not Equal that(%v)", *this.GracePeriodSeconds, *that1.GracePeriodSeconds) + } + } else if this.GracePeriodSeconds != nil { + return fmt.Errorf("this.GracePeriodSeconds == nil && that.GracePeriodSeconds != nil") + } else if that1.GracePeriodSeconds != nil { + return fmt.Errorf("GracePeriodSeconds this(%v) Not Equal that(%v)", this.GracePeriodSeconds, that1.GracePeriodSeconds) + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.Command.Equal(that1.Command) { + return fmt.Errorf("Command this(%v) Not Equal that(%v)", this.Command, that1.Command) + } + if !this.HTTP.Equal(that1.HTTP) { + return fmt.Errorf("HTTP this(%v) Not Equal that(%v)", this.HTTP, that1.HTTP) + } + if !this.TCP.Equal(that1.TCP) { + return fmt.Errorf("TCP this(%v) Not Equal that(%v)", this.TCP, that1.TCP) + } + return nil +} +func (this *HealthCheck) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*HealthCheck) + if !ok { + that2, ok := that.(HealthCheck) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.DelaySeconds != nil && that1.DelaySeconds != nil { + if *this.DelaySeconds != *that1.DelaySeconds { + return false + } + } else if this.DelaySeconds != nil { + return false + } else if that1.DelaySeconds != nil { + return false + } + if this.IntervalSeconds != nil && that1.IntervalSeconds != nil { + if *this.IntervalSeconds != *that1.IntervalSeconds { + return false + } + } else if this.IntervalSeconds != nil { + return false + } else if that1.IntervalSeconds != nil { + return false + } + if this.TimeoutSeconds != nil && that1.TimeoutSeconds != nil { + if *this.TimeoutSeconds != *that1.TimeoutSeconds { + return false + } + } else if this.TimeoutSeconds != nil { + return false + } else if that1.TimeoutSeconds != nil { + return false + } + if this.ConsecutiveFailures != nil && that1.ConsecutiveFailures != nil { + if *this.ConsecutiveFailures != *that1.ConsecutiveFailures { + return false + } + } else if this.ConsecutiveFailures != nil { + return false + } else if that1.ConsecutiveFailures != nil { + return false + } + if this.GracePeriodSeconds != nil && that1.GracePeriodSeconds != nil { + if *this.GracePeriodSeconds != *that1.GracePeriodSeconds { + return false + } + } else if this.GracePeriodSeconds != nil { + return false + } else if that1.GracePeriodSeconds != nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.Command.Equal(that1.Command) { + return false + } + if !this.HTTP.Equal(that1.HTTP) { + return false + } + if !this.TCP.Equal(that1.TCP) { + return false + } + return true +} +func (this *HealthCheck_HTTPCheckInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*HealthCheck_HTTPCheckInfo) + if !ok { + that2, ok := that.(HealthCheck_HTTPCheckInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *HealthCheck_HTTPCheckInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *HealthCheck_HTTPCheckInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *HealthCheck_HTTPCheckInfo but is not nil && this == nil") + } + if this.Protocol != nil && that1.Protocol != nil { + if *this.Protocol != *that1.Protocol { + return fmt.Errorf("Protocol this(%v) Not Equal that(%v)", *this.Protocol, *that1.Protocol) + } + } else if this.Protocol != nil { + return fmt.Errorf("this.Protocol == nil && that.Protocol != nil") + } else if that1.Protocol != nil { + return fmt.Errorf("Protocol this(%v) Not Equal that(%v)", this.Protocol, that1.Protocol) + } + if this.Scheme != nil && that1.Scheme != nil { + if *this.Scheme != *that1.Scheme { + return fmt.Errorf("Scheme this(%v) Not Equal that(%v)", *this.Scheme, *that1.Scheme) + } + } else if this.Scheme != nil { + return fmt.Errorf("this.Scheme == nil && that.Scheme != nil") + } else if that1.Scheme != nil { + return fmt.Errorf("Scheme this(%v) Not Equal that(%v)", this.Scheme, that1.Scheme) + } + if this.Port != that1.Port { + return fmt.Errorf("Port this(%v) Not Equal that(%v)", this.Port, that1.Port) + } + if this.Path != nil && that1.Path != nil { + if *this.Path != *that1.Path { + return fmt.Errorf("Path this(%v) Not Equal that(%v)", *this.Path, *that1.Path) + } + } else if this.Path != nil { + return fmt.Errorf("this.Path == nil && that.Path != nil") + } else if that1.Path != nil { + return fmt.Errorf("Path this(%v) Not Equal that(%v)", this.Path, that1.Path) + } + if len(this.Statuses) != len(that1.Statuses) { + return fmt.Errorf("Statuses this(%v) Not Equal that(%v)", len(this.Statuses), len(that1.Statuses)) + } + for i := range this.Statuses { + if this.Statuses[i] != that1.Statuses[i] { + return fmt.Errorf("Statuses this[%v](%v) Not Equal that[%v](%v)", i, this.Statuses[i], i, that1.Statuses[i]) + } + } + return nil +} +func (this *HealthCheck_HTTPCheckInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*HealthCheck_HTTPCheckInfo) + if !ok { + that2, ok := that.(HealthCheck_HTTPCheckInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Protocol != nil && that1.Protocol != nil { + if *this.Protocol != *that1.Protocol { + return false + } + } else if this.Protocol != nil { + return false + } else if that1.Protocol != nil { + return false + } + if this.Scheme != nil && that1.Scheme != nil { + if *this.Scheme != *that1.Scheme { + return false + } + } else if this.Scheme != nil { + return false + } else if that1.Scheme != nil { + return false + } + if this.Port != that1.Port { + return false + } + if this.Path != nil && that1.Path != nil { + if *this.Path != *that1.Path { + return false + } + } else if this.Path != nil { + return false + } else if that1.Path != nil { + return false + } + if len(this.Statuses) != len(that1.Statuses) { + return false + } + for i := range this.Statuses { + if this.Statuses[i] != that1.Statuses[i] { + return false + } + } + return true +} +func (this *HealthCheck_TCPCheckInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*HealthCheck_TCPCheckInfo) + if !ok { + that2, ok := that.(HealthCheck_TCPCheckInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *HealthCheck_TCPCheckInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *HealthCheck_TCPCheckInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *HealthCheck_TCPCheckInfo but is not nil && this == nil") + } + if this.Protocol != nil && that1.Protocol != nil { + if *this.Protocol != *that1.Protocol { + return fmt.Errorf("Protocol this(%v) Not Equal that(%v)", *this.Protocol, *that1.Protocol) + } + } else if this.Protocol != nil { + return fmt.Errorf("this.Protocol == nil && that.Protocol != nil") + } else if that1.Protocol != nil { + return fmt.Errorf("Protocol this(%v) Not Equal that(%v)", this.Protocol, that1.Protocol) + } + if this.Port != that1.Port { + return fmt.Errorf("Port this(%v) Not Equal that(%v)", this.Port, that1.Port) + } + return nil +} +func (this *HealthCheck_TCPCheckInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*HealthCheck_TCPCheckInfo) + if !ok { + that2, ok := that.(HealthCheck_TCPCheckInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Protocol != nil && that1.Protocol != nil { + if *this.Protocol != *that1.Protocol { + return false + } + } else if this.Protocol != nil { + return false + } else if that1.Protocol != nil { + return false + } + if this.Port != that1.Port { + return false + } + return true +} +func (this *KillPolicy) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*KillPolicy) + if !ok { + that2, ok := that.(KillPolicy) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *KillPolicy") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *KillPolicy but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *KillPolicy but is not nil && this == nil") + } + if !this.GracePeriod.Equal(that1.GracePeriod) { + return fmt.Errorf("GracePeriod this(%v) Not Equal that(%v)", this.GracePeriod, that1.GracePeriod) + } + return nil +} +func (this *KillPolicy) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*KillPolicy) + if !ok { + that2, ok := that.(KillPolicy) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.GracePeriod.Equal(that1.GracePeriod) { + return false + } + return true +} +func (this *CommandInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CommandInfo) + if !ok { + that2, ok := that.(CommandInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CommandInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CommandInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CommandInfo but is not nil && this == nil") + } + if len(this.URIs) != len(that1.URIs) { + return fmt.Errorf("URIs this(%v) Not Equal that(%v)", len(this.URIs), len(that1.URIs)) + } + for i := range this.URIs { + if !this.URIs[i].Equal(&that1.URIs[i]) { + return fmt.Errorf("URIs this[%v](%v) Not Equal that[%v](%v)", i, this.URIs[i], i, that1.URIs[i]) + } + } + if !this.Environment.Equal(that1.Environment) { + return fmt.Errorf("Environment this(%v) Not Equal that(%v)", this.Environment, that1.Environment) + } + if this.Shell != nil && that1.Shell != nil { + if *this.Shell != *that1.Shell { + return fmt.Errorf("Shell this(%v) Not Equal that(%v)", *this.Shell, *that1.Shell) + } + } else if this.Shell != nil { + return fmt.Errorf("this.Shell == nil && that.Shell != nil") + } else if that1.Shell != nil { + return fmt.Errorf("Shell this(%v) Not Equal that(%v)", this.Shell, that1.Shell) + } + if this.Value != nil && that1.Value != nil { + if *this.Value != *that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", *this.Value, *that1.Value) + } + } else if this.Value != nil { + return fmt.Errorf("this.Value == nil && that.Value != nil") + } else if that1.Value != nil { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + if len(this.Arguments) != len(that1.Arguments) { + return fmt.Errorf("Arguments this(%v) Not Equal that(%v)", len(this.Arguments), len(that1.Arguments)) + } + for i := range this.Arguments { + if this.Arguments[i] != that1.Arguments[i] { + return fmt.Errorf("Arguments this[%v](%v) Not Equal that[%v](%v)", i, this.Arguments[i], i, that1.Arguments[i]) + } + } + if this.User != nil && that1.User != nil { + if *this.User != *that1.User { + return fmt.Errorf("User this(%v) Not Equal that(%v)", *this.User, *that1.User) + } + } else if this.User != nil { + return fmt.Errorf("this.User == nil && that.User != nil") + } else if that1.User != nil { + return fmt.Errorf("User this(%v) Not Equal that(%v)", this.User, that1.User) + } + return nil +} +func (this *CommandInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CommandInfo) + if !ok { + that2, ok := that.(CommandInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.URIs) != len(that1.URIs) { + return false + } + for i := range this.URIs { + if !this.URIs[i].Equal(&that1.URIs[i]) { + return false + } + } + if !this.Environment.Equal(that1.Environment) { + return false + } + if this.Shell != nil && that1.Shell != nil { + if *this.Shell != *that1.Shell { + return false + } + } else if this.Shell != nil { + return false + } else if that1.Shell != nil { + return false + } + if this.Value != nil && that1.Value != nil { + if *this.Value != *that1.Value { + return false + } + } else if this.Value != nil { + return false + } else if that1.Value != nil { + return false + } + if len(this.Arguments) != len(that1.Arguments) { + return false + } + for i := range this.Arguments { + if this.Arguments[i] != that1.Arguments[i] { + return false + } + } + if this.User != nil && that1.User != nil { + if *this.User != *that1.User { + return false + } + } else if this.User != nil { + return false + } else if that1.User != nil { + return false + } + return true +} +func (this *CommandInfo_URI) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CommandInfo_URI) + if !ok { + that2, ok := that.(CommandInfo_URI) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CommandInfo_URI") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CommandInfo_URI but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CommandInfo_URI but is not nil && this == nil") + } + if this.Value != that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + if this.Executable != nil && that1.Executable != nil { + if *this.Executable != *that1.Executable { + return fmt.Errorf("Executable this(%v) Not Equal that(%v)", *this.Executable, *that1.Executable) + } + } else if this.Executable != nil { + return fmt.Errorf("this.Executable == nil && that.Executable != nil") + } else if that1.Executable != nil { + return fmt.Errorf("Executable this(%v) Not Equal that(%v)", this.Executable, that1.Executable) + } + if this.Extract != nil && that1.Extract != nil { + if *this.Extract != *that1.Extract { + return fmt.Errorf("Extract this(%v) Not Equal that(%v)", *this.Extract, *that1.Extract) + } + } else if this.Extract != nil { + return fmt.Errorf("this.Extract == nil && that.Extract != nil") + } else if that1.Extract != nil { + return fmt.Errorf("Extract this(%v) Not Equal that(%v)", this.Extract, that1.Extract) + } + if this.Cache != nil && that1.Cache != nil { + if *this.Cache != *that1.Cache { + return fmt.Errorf("Cache this(%v) Not Equal that(%v)", *this.Cache, *that1.Cache) + } + } else if this.Cache != nil { + return fmt.Errorf("this.Cache == nil && that.Cache != nil") + } else if that1.Cache != nil { + return fmt.Errorf("Cache this(%v) Not Equal that(%v)", this.Cache, that1.Cache) + } + if this.OutputFile != nil && that1.OutputFile != nil { + if *this.OutputFile != *that1.OutputFile { + return fmt.Errorf("OutputFile this(%v) Not Equal that(%v)", *this.OutputFile, *that1.OutputFile) + } + } else if this.OutputFile != nil { + return fmt.Errorf("this.OutputFile == nil && that.OutputFile != nil") + } else if that1.OutputFile != nil { + return fmt.Errorf("OutputFile this(%v) Not Equal that(%v)", this.OutputFile, that1.OutputFile) + } + return nil +} +func (this *CommandInfo_URI) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CommandInfo_URI) + if !ok { + that2, ok := that.(CommandInfo_URI) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if this.Executable != nil && that1.Executable != nil { + if *this.Executable != *that1.Executable { + return false + } + } else if this.Executable != nil { + return false + } else if that1.Executable != nil { + return false + } + if this.Extract != nil && that1.Extract != nil { + if *this.Extract != *that1.Extract { + return false + } + } else if this.Extract != nil { + return false + } else if that1.Extract != nil { + return false + } + if this.Cache != nil && that1.Cache != nil { + if *this.Cache != *that1.Cache { + return false + } + } else if this.Cache != nil { + return false + } else if that1.Cache != nil { + return false + } + if this.OutputFile != nil && that1.OutputFile != nil { + if *this.OutputFile != *that1.OutputFile { + return false + } + } else if this.OutputFile != nil { + return false + } else if that1.OutputFile != nil { + return false + } + return true +} +func (this *ExecutorInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ExecutorInfo) + if !ok { + that2, ok := that.(ExecutorInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ExecutorInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ExecutorInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ExecutorInfo but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.ExecutorID.Equal(&that1.ExecutorID) { + return fmt.Errorf("ExecutorID this(%v) Not Equal that(%v)", this.ExecutorID, that1.ExecutorID) + } + if !this.FrameworkID.Equal(that1.FrameworkID) { + return fmt.Errorf("FrameworkID this(%v) Not Equal that(%v)", this.FrameworkID, that1.FrameworkID) + } + if !this.Command.Equal(that1.Command) { + return fmt.Errorf("Command this(%v) Not Equal that(%v)", this.Command, that1.Command) + } + if !this.Container.Equal(that1.Container) { + return fmt.Errorf("Container this(%v) Not Equal that(%v)", this.Container, that1.Container) + } + if len(this.Resources) != len(that1.Resources) { + return fmt.Errorf("Resources this(%v) Not Equal that(%v)", len(this.Resources), len(that1.Resources)) + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return fmt.Errorf("Resources this[%v](%v) Not Equal that[%v](%v)", i, this.Resources[i], i, that1.Resources[i]) + } + } + if this.Name != nil && that1.Name != nil { + if *this.Name != *that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", *this.Name, *that1.Name) + } + } else if this.Name != nil { + return fmt.Errorf("this.Name == nil && that.Name != nil") + } else if that1.Name != nil { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if this.Source != nil && that1.Source != nil { + if *this.Source != *that1.Source { + return fmt.Errorf("Source this(%v) Not Equal that(%v)", *this.Source, *that1.Source) + } + } else if this.Source != nil { + return fmt.Errorf("this.Source == nil && that.Source != nil") + } else if that1.Source != nil { + return fmt.Errorf("Source this(%v) Not Equal that(%v)", this.Source, that1.Source) + } + if !bytes.Equal(this.Data, that1.Data) { + return fmt.Errorf("Data this(%v) Not Equal that(%v)", this.Data, that1.Data) + } + if !this.Discovery.Equal(that1.Discovery) { + return fmt.Errorf("Discovery this(%v) Not Equal that(%v)", this.Discovery, that1.Discovery) + } + if !this.ShutdownGracePeriod.Equal(that1.ShutdownGracePeriod) { + return fmt.Errorf("ShutdownGracePeriod this(%v) Not Equal that(%v)", this.ShutdownGracePeriod, that1.ShutdownGracePeriod) + } + if !this.Labels.Equal(that1.Labels) { + return fmt.Errorf("Labels this(%v) Not Equal that(%v)", this.Labels, that1.Labels) + } + return nil +} +func (this *ExecutorInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ExecutorInfo) + if !ok { + that2, ok := that.(ExecutorInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.ExecutorID.Equal(&that1.ExecutorID) { + return false + } + if !this.FrameworkID.Equal(that1.FrameworkID) { + return false + } + if !this.Command.Equal(that1.Command) { + return false + } + if !this.Container.Equal(that1.Container) { + return false + } + if len(this.Resources) != len(that1.Resources) { + return false + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return false + } + } + if this.Name != nil && that1.Name != nil { + if *this.Name != *that1.Name { + return false + } + } else if this.Name != nil { + return false + } else if that1.Name != nil { + return false + } + if this.Source != nil && that1.Source != nil { + if *this.Source != *that1.Source { + return false + } + } else if this.Source != nil { + return false + } else if that1.Source != nil { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + if !this.Discovery.Equal(that1.Discovery) { + return false + } + if !this.ShutdownGracePeriod.Equal(that1.ShutdownGracePeriod) { + return false + } + if !this.Labels.Equal(that1.Labels) { + return false + } + return true +} +func (this *DomainInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*DomainInfo) + if !ok { + that2, ok := that.(DomainInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *DomainInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *DomainInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *DomainInfo but is not nil && this == nil") + } + if !this.FaultDomain.Equal(that1.FaultDomain) { + return fmt.Errorf("FaultDomain this(%v) Not Equal that(%v)", this.FaultDomain, that1.FaultDomain) + } + return nil +} +func (this *DomainInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*DomainInfo) + if !ok { + that2, ok := that.(DomainInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.FaultDomain.Equal(that1.FaultDomain) { + return false + } + return true +} +func (this *DomainInfo_FaultDomain) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*DomainInfo_FaultDomain) + if !ok { + that2, ok := that.(DomainInfo_FaultDomain) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *DomainInfo_FaultDomain") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *DomainInfo_FaultDomain but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *DomainInfo_FaultDomain but is not nil && this == nil") + } + if !this.Region.Equal(&that1.Region) { + return fmt.Errorf("Region this(%v) Not Equal that(%v)", this.Region, that1.Region) + } + if !this.Zone.Equal(&that1.Zone) { + return fmt.Errorf("Zone this(%v) Not Equal that(%v)", this.Zone, that1.Zone) + } + return nil +} +func (this *DomainInfo_FaultDomain) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*DomainInfo_FaultDomain) + if !ok { + that2, ok := that.(DomainInfo_FaultDomain) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Region.Equal(&that1.Region) { + return false + } + if !this.Zone.Equal(&that1.Zone) { + return false + } + return true +} +func (this *DomainInfo_FaultDomain_RegionInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*DomainInfo_FaultDomain_RegionInfo) + if !ok { + that2, ok := that.(DomainInfo_FaultDomain_RegionInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *DomainInfo_FaultDomain_RegionInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *DomainInfo_FaultDomain_RegionInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *DomainInfo_FaultDomain_RegionInfo but is not nil && this == nil") + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + return nil +} +func (this *DomainInfo_FaultDomain_RegionInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*DomainInfo_FaultDomain_RegionInfo) + if !ok { + that2, ok := that.(DomainInfo_FaultDomain_RegionInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + return true +} +func (this *DomainInfo_FaultDomain_ZoneInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*DomainInfo_FaultDomain_ZoneInfo) + if !ok { + that2, ok := that.(DomainInfo_FaultDomain_ZoneInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *DomainInfo_FaultDomain_ZoneInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *DomainInfo_FaultDomain_ZoneInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *DomainInfo_FaultDomain_ZoneInfo but is not nil && this == nil") + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + return nil +} +func (this *DomainInfo_FaultDomain_ZoneInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*DomainInfo_FaultDomain_ZoneInfo) + if !ok { + that2, ok := that.(DomainInfo_FaultDomain_ZoneInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + return true +} +func (this *MasterInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*MasterInfo) + if !ok { + that2, ok := that.(MasterInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *MasterInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *MasterInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *MasterInfo but is not nil && this == nil") + } + if this.ID != that1.ID { + return fmt.Errorf("ID this(%v) Not Equal that(%v)", this.ID, that1.ID) + } + if this.IP != that1.IP { + return fmt.Errorf("IP this(%v) Not Equal that(%v)", this.IP, that1.IP) + } + if this.Port != nil && that1.Port != nil { + if *this.Port != *that1.Port { + return fmt.Errorf("Port this(%v) Not Equal that(%v)", *this.Port, *that1.Port) + } + } else if this.Port != nil { + return fmt.Errorf("this.Port == nil && that.Port != nil") + } else if that1.Port != nil { + return fmt.Errorf("Port this(%v) Not Equal that(%v)", this.Port, that1.Port) + } + if this.PID != nil && that1.PID != nil { + if *this.PID != *that1.PID { + return fmt.Errorf("PID this(%v) Not Equal that(%v)", *this.PID, *that1.PID) + } + } else if this.PID != nil { + return fmt.Errorf("this.PID == nil && that.PID != nil") + } else if that1.PID != nil { + return fmt.Errorf("PID this(%v) Not Equal that(%v)", this.PID, that1.PID) + } + if this.Hostname != nil && that1.Hostname != nil { + if *this.Hostname != *that1.Hostname { + return fmt.Errorf("Hostname this(%v) Not Equal that(%v)", *this.Hostname, *that1.Hostname) + } + } else if this.Hostname != nil { + return fmt.Errorf("this.Hostname == nil && that.Hostname != nil") + } else if that1.Hostname != nil { + return fmt.Errorf("Hostname this(%v) Not Equal that(%v)", this.Hostname, that1.Hostname) + } + if this.Version != nil && that1.Version != nil { + if *this.Version != *that1.Version { + return fmt.Errorf("Version this(%v) Not Equal that(%v)", *this.Version, *that1.Version) + } + } else if this.Version != nil { + return fmt.Errorf("this.Version == nil && that.Version != nil") + } else if that1.Version != nil { + return fmt.Errorf("Version this(%v) Not Equal that(%v)", this.Version, that1.Version) + } + if !this.Address.Equal(that1.Address) { + return fmt.Errorf("Address this(%v) Not Equal that(%v)", this.Address, that1.Address) + } + if !this.Domain.Equal(that1.Domain) { + return fmt.Errorf("Domain this(%v) Not Equal that(%v)", this.Domain, that1.Domain) + } + if len(this.Capabilities) != len(that1.Capabilities) { + return fmt.Errorf("Capabilities this(%v) Not Equal that(%v)", len(this.Capabilities), len(that1.Capabilities)) + } + for i := range this.Capabilities { + if !this.Capabilities[i].Equal(&that1.Capabilities[i]) { + return fmt.Errorf("Capabilities this[%v](%v) Not Equal that[%v](%v)", i, this.Capabilities[i], i, that1.Capabilities[i]) + } + } + return nil +} +func (this *MasterInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*MasterInfo) + if !ok { + that2, ok := that.(MasterInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.ID != that1.ID { + return false + } + if this.IP != that1.IP { + return false + } + if this.Port != nil && that1.Port != nil { + if *this.Port != *that1.Port { + return false + } + } else if this.Port != nil { + return false + } else if that1.Port != nil { + return false + } + if this.PID != nil && that1.PID != nil { + if *this.PID != *that1.PID { + return false + } + } else if this.PID != nil { + return false + } else if that1.PID != nil { + return false + } + if this.Hostname != nil && that1.Hostname != nil { + if *this.Hostname != *that1.Hostname { + return false + } + } else if this.Hostname != nil { + return false + } else if that1.Hostname != nil { + return false + } + if this.Version != nil && that1.Version != nil { + if *this.Version != *that1.Version { + return false + } + } else if this.Version != nil { + return false + } else if that1.Version != nil { + return false + } + if !this.Address.Equal(that1.Address) { + return false + } + if !this.Domain.Equal(that1.Domain) { + return false + } + if len(this.Capabilities) != len(that1.Capabilities) { + return false + } + for i := range this.Capabilities { + if !this.Capabilities[i].Equal(&that1.Capabilities[i]) { + return false + } + } + return true +} +func (this *MasterInfo_Capability) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*MasterInfo_Capability) + if !ok { + that2, ok := that.(MasterInfo_Capability) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *MasterInfo_Capability") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *MasterInfo_Capability but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *MasterInfo_Capability but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + return nil +} +func (this *MasterInfo_Capability) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*MasterInfo_Capability) + if !ok { + that2, ok := that.(MasterInfo_Capability) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + return true +} +func (this *AgentInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*AgentInfo) + if !ok { + that2, ok := that.(AgentInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *AgentInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *AgentInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *AgentInfo but is not nil && this == nil") + } + if this.Hostname != that1.Hostname { + return fmt.Errorf("Hostname this(%v) Not Equal that(%v)", this.Hostname, that1.Hostname) + } + if this.Port != nil && that1.Port != nil { + if *this.Port != *that1.Port { + return fmt.Errorf("Port this(%v) Not Equal that(%v)", *this.Port, *that1.Port) + } + } else if this.Port != nil { + return fmt.Errorf("this.Port == nil && that.Port != nil") + } else if that1.Port != nil { + return fmt.Errorf("Port this(%v) Not Equal that(%v)", this.Port, that1.Port) + } + if len(this.Resources) != len(that1.Resources) { + return fmt.Errorf("Resources this(%v) Not Equal that(%v)", len(this.Resources), len(that1.Resources)) + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return fmt.Errorf("Resources this[%v](%v) Not Equal that[%v](%v)", i, this.Resources[i], i, that1.Resources[i]) + } + } + if len(this.Attributes) != len(that1.Attributes) { + return fmt.Errorf("Attributes this(%v) Not Equal that(%v)", len(this.Attributes), len(that1.Attributes)) + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return fmt.Errorf("Attributes this[%v](%v) Not Equal that[%v](%v)", i, this.Attributes[i], i, that1.Attributes[i]) + } + } + if !this.ID.Equal(that1.ID) { + return fmt.Errorf("ID this(%v) Not Equal that(%v)", this.ID, that1.ID) + } + if !this.Domain.Equal(that1.Domain) { + return fmt.Errorf("Domain this(%v) Not Equal that(%v)", this.Domain, that1.Domain) + } + return nil +} +func (this *AgentInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*AgentInfo) + if !ok { + that2, ok := that.(AgentInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Hostname != that1.Hostname { + return false + } + if this.Port != nil && that1.Port != nil { + if *this.Port != *that1.Port { + return false + } + } else if this.Port != nil { + return false + } else if that1.Port != nil { + return false + } + if len(this.Resources) != len(that1.Resources) { + return false + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return false + } + } + if len(this.Attributes) != len(that1.Attributes) { + return false + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return false + } + } + if !this.ID.Equal(that1.ID) { + return false + } + if !this.Domain.Equal(that1.Domain) { + return false + } + return true +} +func (this *AgentInfo_Capability) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*AgentInfo_Capability) + if !ok { + that2, ok := that.(AgentInfo_Capability) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *AgentInfo_Capability") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *AgentInfo_Capability but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *AgentInfo_Capability but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + return nil +} +func (this *AgentInfo_Capability) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*AgentInfo_Capability) + if !ok { + that2, ok := that.(AgentInfo_Capability) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + return true +} +func (this *CSIPluginContainerInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CSIPluginContainerInfo) + if !ok { + that2, ok := that.(CSIPluginContainerInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CSIPluginContainerInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CSIPluginContainerInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CSIPluginContainerInfo but is not nil && this == nil") + } + if len(this.Services) != len(that1.Services) { + return fmt.Errorf("Services this(%v) Not Equal that(%v)", len(this.Services), len(that1.Services)) + } + for i := range this.Services { + if this.Services[i] != that1.Services[i] { + return fmt.Errorf("Services this[%v](%v) Not Equal that[%v](%v)", i, this.Services[i], i, that1.Services[i]) + } + } + if !this.Command.Equal(that1.Command) { + return fmt.Errorf("Command this(%v) Not Equal that(%v)", this.Command, that1.Command) + } + if len(this.Resources) != len(that1.Resources) { + return fmt.Errorf("Resources this(%v) Not Equal that(%v)", len(this.Resources), len(that1.Resources)) + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return fmt.Errorf("Resources this[%v](%v) Not Equal that[%v](%v)", i, this.Resources[i], i, that1.Resources[i]) + } + } + if !this.Container.Equal(that1.Container) { + return fmt.Errorf("Container this(%v) Not Equal that(%v)", this.Container, that1.Container) + } + return nil +} +func (this *CSIPluginContainerInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CSIPluginContainerInfo) + if !ok { + that2, ok := that.(CSIPluginContainerInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Services) != len(that1.Services) { + return false + } + for i := range this.Services { + if this.Services[i] != that1.Services[i] { + return false + } + } + if !this.Command.Equal(that1.Command) { + return false + } + if len(this.Resources) != len(that1.Resources) { + return false + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return false + } + } + if !this.Container.Equal(that1.Container) { + return false + } + return true +} +func (this *CSIPluginInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CSIPluginInfo) + if !ok { + that2, ok := that.(CSIPluginInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CSIPluginInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CSIPluginInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CSIPluginInfo but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if len(this.Containers) != len(that1.Containers) { + return fmt.Errorf("Containers this(%v) Not Equal that(%v)", len(this.Containers), len(that1.Containers)) + } + for i := range this.Containers { + if !this.Containers[i].Equal(&that1.Containers[i]) { + return fmt.Errorf("Containers this[%v](%v) Not Equal that[%v](%v)", i, this.Containers[i], i, that1.Containers[i]) + } + } + return nil +} +func (this *CSIPluginInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CSIPluginInfo) + if !ok { + that2, ok := that.(CSIPluginInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Containers) != len(that1.Containers) { + return false + } + for i := range this.Containers { + if !this.Containers[i].Equal(&that1.Containers[i]) { + return false + } + } + return true +} +func (this *ResourceProviderInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ResourceProviderInfo) + if !ok { + that2, ok := that.(ResourceProviderInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ResourceProviderInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ResourceProviderInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ResourceProviderInfo but is not nil && this == nil") + } + if !this.ID.Equal(that1.ID) { + return fmt.Errorf("ID this(%v) Not Equal that(%v)", this.ID, that1.ID) + } + if len(this.Attributes) != len(that1.Attributes) { + return fmt.Errorf("Attributes this(%v) Not Equal that(%v)", len(this.Attributes), len(that1.Attributes)) + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return fmt.Errorf("Attributes this[%v](%v) Not Equal that[%v](%v)", i, this.Attributes[i], i, that1.Attributes[i]) + } + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if len(this.DefaultReservations) != len(that1.DefaultReservations) { + return fmt.Errorf("DefaultReservations this(%v) Not Equal that(%v)", len(this.DefaultReservations), len(that1.DefaultReservations)) + } + for i := range this.DefaultReservations { + if !this.DefaultReservations[i].Equal(&that1.DefaultReservations[i]) { + return fmt.Errorf("DefaultReservations this[%v](%v) Not Equal that[%v](%v)", i, this.DefaultReservations[i], i, that1.DefaultReservations[i]) + } + } + if !this.Storage.Equal(that1.Storage) { + return fmt.Errorf("Storage this(%v) Not Equal that(%v)", this.Storage, that1.Storage) + } + return nil +} +func (this *ResourceProviderInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ResourceProviderInfo) + if !ok { + that2, ok := that.(ResourceProviderInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ID.Equal(that1.ID) { + return false + } + if len(this.Attributes) != len(that1.Attributes) { + return false + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return false + } + } + if this.Type != that1.Type { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.DefaultReservations) != len(that1.DefaultReservations) { + return false + } + for i := range this.DefaultReservations { + if !this.DefaultReservations[i].Equal(&that1.DefaultReservations[i]) { + return false + } + } + if !this.Storage.Equal(that1.Storage) { + return false + } + return true +} +func (this *ResourceProviderInfo_Storage) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ResourceProviderInfo_Storage) + if !ok { + that2, ok := that.(ResourceProviderInfo_Storage) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ResourceProviderInfo_Storage") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ResourceProviderInfo_Storage but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ResourceProviderInfo_Storage but is not nil && this == nil") + } + if !this.Plugin.Equal(&that1.Plugin) { + return fmt.Errorf("Plugin this(%v) Not Equal that(%v)", this.Plugin, that1.Plugin) + } + return nil +} +func (this *ResourceProviderInfo_Storage) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ResourceProviderInfo_Storage) + if !ok { + that2, ok := that.(ResourceProviderInfo_Storage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Plugin.Equal(&that1.Plugin) { + return false + } + return true +} +func (this *Value) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Value") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Value but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Value but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.Scalar.Equal(that1.Scalar) { + return fmt.Errorf("Scalar this(%v) Not Equal that(%v)", this.Scalar, that1.Scalar) + } + if !this.Ranges.Equal(that1.Ranges) { + return fmt.Errorf("Ranges this(%v) Not Equal that(%v)", this.Ranges, that1.Ranges) + } + if !this.Set.Equal(that1.Set) { + return fmt.Errorf("Set this(%v) Not Equal that(%v)", this.Set, that1.Set) + } + if !this.Text.Equal(that1.Text) { + return fmt.Errorf("Text this(%v) Not Equal that(%v)", this.Text, that1.Text) + } + return nil +} +func (this *Value) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.Scalar.Equal(that1.Scalar) { + return false + } + if !this.Ranges.Equal(that1.Ranges) { + return false + } + if !this.Set.Equal(that1.Set) { + return false + } + if !this.Text.Equal(that1.Text) { + return false + } + return true +} +func (this *Value_Scalar) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Value_Scalar) + if !ok { + that2, ok := that.(Value_Scalar) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Value_Scalar") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Value_Scalar but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Value_Scalar but is not nil && this == nil") + } + if this.Value != that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *Value_Scalar) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Value_Scalar) + if !ok { + that2, ok := that.(Value_Scalar) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *Value_Range) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Value_Range) + if !ok { + that2, ok := that.(Value_Range) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Value_Range") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Value_Range but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Value_Range but is not nil && this == nil") + } + if this.Begin != that1.Begin { + return fmt.Errorf("Begin this(%v) Not Equal that(%v)", this.Begin, that1.Begin) + } + if this.End != that1.End { + return fmt.Errorf("End this(%v) Not Equal that(%v)", this.End, that1.End) + } + return nil +} +func (this *Value_Range) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Value_Range) + if !ok { + that2, ok := that.(Value_Range) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Begin != that1.Begin { + return false + } + if this.End != that1.End { + return false + } + return true +} +func (this *Value_Ranges) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Value_Ranges) + if !ok { + that2, ok := that.(Value_Ranges) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Value_Ranges") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Value_Ranges but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Value_Ranges but is not nil && this == nil") + } + if len(this.Range) != len(that1.Range) { + return fmt.Errorf("Range this(%v) Not Equal that(%v)", len(this.Range), len(that1.Range)) + } + for i := range this.Range { + if !this.Range[i].Equal(&that1.Range[i]) { + return fmt.Errorf("Range this[%v](%v) Not Equal that[%v](%v)", i, this.Range[i], i, that1.Range[i]) + } + } + return nil +} +func (this *Value_Ranges) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Value_Ranges) + if !ok { + that2, ok := that.(Value_Ranges) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Range) != len(that1.Range) { + return false + } + for i := range this.Range { + if !this.Range[i].Equal(&that1.Range[i]) { + return false + } + } + return true +} +func (this *Value_Set) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Value_Set) + if !ok { + that2, ok := that.(Value_Set) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Value_Set") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Value_Set but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Value_Set but is not nil && this == nil") + } + if len(this.Item) != len(that1.Item) { + return fmt.Errorf("Item this(%v) Not Equal that(%v)", len(this.Item), len(that1.Item)) + } + for i := range this.Item { + if this.Item[i] != that1.Item[i] { + return fmt.Errorf("Item this[%v](%v) Not Equal that[%v](%v)", i, this.Item[i], i, that1.Item[i]) + } + } + return nil +} +func (this *Value_Set) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Value_Set) + if !ok { + that2, ok := that.(Value_Set) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Item) != len(that1.Item) { + return false + } + for i := range this.Item { + if this.Item[i] != that1.Item[i] { + return false + } + } + return true +} +func (this *Value_Text) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Value_Text) + if !ok { + that2, ok := that.(Value_Text) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Value_Text") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Value_Text but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Value_Text but is not nil && this == nil") + } + if this.Value != that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *Value_Text) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Value_Text) + if !ok { + that2, ok := that.(Value_Text) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *Attribute) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Attribute) + if !ok { + that2, ok := that.(Attribute) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Attribute") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Attribute but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Attribute but is not nil && this == nil") + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.Scalar.Equal(that1.Scalar) { + return fmt.Errorf("Scalar this(%v) Not Equal that(%v)", this.Scalar, that1.Scalar) + } + if !this.Ranges.Equal(that1.Ranges) { + return fmt.Errorf("Ranges this(%v) Not Equal that(%v)", this.Ranges, that1.Ranges) + } + if !this.Set.Equal(that1.Set) { + return fmt.Errorf("Set this(%v) Not Equal that(%v)", this.Set, that1.Set) + } + if !this.Text.Equal(that1.Text) { + return fmt.Errorf("Text this(%v) Not Equal that(%v)", this.Text, that1.Text) + } + return nil +} +func (this *Attribute) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Attribute) + if !ok { + that2, ok := that.(Attribute) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Type != that1.Type { + return false + } + if !this.Scalar.Equal(that1.Scalar) { + return false + } + if !this.Ranges.Equal(that1.Ranges) { + return false + } + if !this.Set.Equal(that1.Set) { + return false + } + if !this.Text.Equal(that1.Text) { + return false + } + return true +} +func (this *Resource) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Resource) + if !ok { + that2, ok := that.(Resource) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Resource") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Resource but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Resource but is not nil && this == nil") + } + if !this.ProviderID.Equal(that1.ProviderID) { + return fmt.Errorf("ProviderID this(%v) Not Equal that(%v)", this.ProviderID, that1.ProviderID) + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if this.Type != nil && that1.Type != nil { + if *this.Type != *that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", *this.Type, *that1.Type) + } + } else if this.Type != nil { + return fmt.Errorf("this.Type == nil && that.Type != nil") + } else if that1.Type != nil { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.Scalar.Equal(that1.Scalar) { + return fmt.Errorf("Scalar this(%v) Not Equal that(%v)", this.Scalar, that1.Scalar) + } + if !this.Ranges.Equal(that1.Ranges) { + return fmt.Errorf("Ranges this(%v) Not Equal that(%v)", this.Ranges, that1.Ranges) + } + if !this.Set.Equal(that1.Set) { + return fmt.Errorf("Set this(%v) Not Equal that(%v)", this.Set, that1.Set) + } + if this.Role != nil && that1.Role != nil { + if *this.Role != *that1.Role { + return fmt.Errorf("Role this(%v) Not Equal that(%v)", *this.Role, *that1.Role) + } + } else if this.Role != nil { + return fmt.Errorf("this.Role == nil && that.Role != nil") + } else if that1.Role != nil { + return fmt.Errorf("Role this(%v) Not Equal that(%v)", this.Role, that1.Role) + } + if !this.AllocationInfo.Equal(that1.AllocationInfo) { + return fmt.Errorf("AllocationInfo this(%v) Not Equal that(%v)", this.AllocationInfo, that1.AllocationInfo) + } + if !this.Reservation.Equal(that1.Reservation) { + return fmt.Errorf("Reservation this(%v) Not Equal that(%v)", this.Reservation, that1.Reservation) + } + if len(this.Reservations) != len(that1.Reservations) { + return fmt.Errorf("Reservations this(%v) Not Equal that(%v)", len(this.Reservations), len(that1.Reservations)) + } + for i := range this.Reservations { + if !this.Reservations[i].Equal(&that1.Reservations[i]) { + return fmt.Errorf("Reservations this[%v](%v) Not Equal that[%v](%v)", i, this.Reservations[i], i, that1.Reservations[i]) + } + } + if !this.Disk.Equal(that1.Disk) { + return fmt.Errorf("Disk this(%v) Not Equal that(%v)", this.Disk, that1.Disk) + } + if !this.Revocable.Equal(that1.Revocable) { + return fmt.Errorf("Revocable this(%v) Not Equal that(%v)", this.Revocable, that1.Revocable) + } + if !this.Shared.Equal(that1.Shared) { + return fmt.Errorf("Shared this(%v) Not Equal that(%v)", this.Shared, that1.Shared) + } + return nil +} +func (this *Resource) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Resource) + if !ok { + that2, ok := that.(Resource) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ProviderID.Equal(that1.ProviderID) { + return false + } + if this.Name != that1.Name { + return false + } + if this.Type != nil && that1.Type != nil { + if *this.Type != *that1.Type { + return false + } + } else if this.Type != nil { + return false + } else if that1.Type != nil { + return false + } + if !this.Scalar.Equal(that1.Scalar) { + return false + } + if !this.Ranges.Equal(that1.Ranges) { + return false + } + if !this.Set.Equal(that1.Set) { + return false + } + if this.Role != nil && that1.Role != nil { + if *this.Role != *that1.Role { + return false + } + } else if this.Role != nil { + return false + } else if that1.Role != nil { + return false + } + if !this.AllocationInfo.Equal(that1.AllocationInfo) { + return false + } + if !this.Reservation.Equal(that1.Reservation) { + return false + } + if len(this.Reservations) != len(that1.Reservations) { + return false + } + for i := range this.Reservations { + if !this.Reservations[i].Equal(&that1.Reservations[i]) { + return false + } + } + if !this.Disk.Equal(that1.Disk) { + return false + } + if !this.Revocable.Equal(that1.Revocable) { + return false + } + if !this.Shared.Equal(that1.Shared) { + return false + } + return true +} +func (this *Resource_AllocationInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Resource_AllocationInfo) + if !ok { + that2, ok := that.(Resource_AllocationInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Resource_AllocationInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Resource_AllocationInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Resource_AllocationInfo but is not nil && this == nil") + } + if this.Role != nil && that1.Role != nil { + if *this.Role != *that1.Role { + return fmt.Errorf("Role this(%v) Not Equal that(%v)", *this.Role, *that1.Role) + } + } else if this.Role != nil { + return fmt.Errorf("this.Role == nil && that.Role != nil") + } else if that1.Role != nil { + return fmt.Errorf("Role this(%v) Not Equal that(%v)", this.Role, that1.Role) + } + return nil +} +func (this *Resource_AllocationInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Resource_AllocationInfo) + if !ok { + that2, ok := that.(Resource_AllocationInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Role != nil && that1.Role != nil { + if *this.Role != *that1.Role { + return false + } + } else if this.Role != nil { + return false + } else if that1.Role != nil { + return false + } + return true +} +func (this *Resource_ReservationInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Resource_ReservationInfo) + if !ok { + that2, ok := that.(Resource_ReservationInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Resource_ReservationInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Resource_ReservationInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Resource_ReservationInfo but is not nil && this == nil") + } + if this.Type != nil && that1.Type != nil { + if *this.Type != *that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", *this.Type, *that1.Type) + } + } else if this.Type != nil { + return fmt.Errorf("this.Type == nil && that.Type != nil") + } else if that1.Type != nil { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if this.Role != nil && that1.Role != nil { + if *this.Role != *that1.Role { + return fmt.Errorf("Role this(%v) Not Equal that(%v)", *this.Role, *that1.Role) + } + } else if this.Role != nil { + return fmt.Errorf("this.Role == nil && that.Role != nil") + } else if that1.Role != nil { + return fmt.Errorf("Role this(%v) Not Equal that(%v)", this.Role, that1.Role) + } + if this.Principal != nil && that1.Principal != nil { + if *this.Principal != *that1.Principal { + return fmt.Errorf("Principal this(%v) Not Equal that(%v)", *this.Principal, *that1.Principal) + } + } else if this.Principal != nil { + return fmt.Errorf("this.Principal == nil && that.Principal != nil") + } else if that1.Principal != nil { + return fmt.Errorf("Principal this(%v) Not Equal that(%v)", this.Principal, that1.Principal) + } + if !this.Labels.Equal(that1.Labels) { + return fmt.Errorf("Labels this(%v) Not Equal that(%v)", this.Labels, that1.Labels) + } + return nil +} +func (this *Resource_ReservationInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Resource_ReservationInfo) + if !ok { + that2, ok := that.(Resource_ReservationInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != nil && that1.Type != nil { + if *this.Type != *that1.Type { + return false + } + } else if this.Type != nil { + return false + } else if that1.Type != nil { + return false + } + if this.Role != nil && that1.Role != nil { + if *this.Role != *that1.Role { + return false + } + } else if this.Role != nil { + return false + } else if that1.Role != nil { + return false + } + if this.Principal != nil && that1.Principal != nil { + if *this.Principal != *that1.Principal { + return false + } + } else if this.Principal != nil { + return false + } else if that1.Principal != nil { + return false + } + if !this.Labels.Equal(that1.Labels) { + return false + } + return true +} +func (this *Resource_DiskInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Resource_DiskInfo) + if !ok { + that2, ok := that.(Resource_DiskInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Resource_DiskInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Resource_DiskInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Resource_DiskInfo but is not nil && this == nil") + } + if !this.Persistence.Equal(that1.Persistence) { + return fmt.Errorf("Persistence this(%v) Not Equal that(%v)", this.Persistence, that1.Persistence) + } + if !this.Volume.Equal(that1.Volume) { + return fmt.Errorf("Volume this(%v) Not Equal that(%v)", this.Volume, that1.Volume) + } + if !this.Source.Equal(that1.Source) { + return fmt.Errorf("Source this(%v) Not Equal that(%v)", this.Source, that1.Source) + } + return nil +} +func (this *Resource_DiskInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Resource_DiskInfo) + if !ok { + that2, ok := that.(Resource_DiskInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Persistence.Equal(that1.Persistence) { + return false + } + if !this.Volume.Equal(that1.Volume) { + return false + } + if !this.Source.Equal(that1.Source) { + return false + } + return true +} +func (this *Resource_DiskInfo_Persistence) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Resource_DiskInfo_Persistence) + if !ok { + that2, ok := that.(Resource_DiskInfo_Persistence) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Resource_DiskInfo_Persistence") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Resource_DiskInfo_Persistence but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Resource_DiskInfo_Persistence but is not nil && this == nil") + } + if this.ID != that1.ID { + return fmt.Errorf("ID this(%v) Not Equal that(%v)", this.ID, that1.ID) + } + if this.Principal != nil && that1.Principal != nil { + if *this.Principal != *that1.Principal { + return fmt.Errorf("Principal this(%v) Not Equal that(%v)", *this.Principal, *that1.Principal) + } + } else if this.Principal != nil { + return fmt.Errorf("this.Principal == nil && that.Principal != nil") + } else if that1.Principal != nil { + return fmt.Errorf("Principal this(%v) Not Equal that(%v)", this.Principal, that1.Principal) + } + return nil +} +func (this *Resource_DiskInfo_Persistence) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Resource_DiskInfo_Persistence) + if !ok { + that2, ok := that.(Resource_DiskInfo_Persistence) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.ID != that1.ID { + return false + } + if this.Principal != nil && that1.Principal != nil { + if *this.Principal != *that1.Principal { + return false + } + } else if this.Principal != nil { + return false + } else if that1.Principal != nil { + return false + } + return true +} +func (this *Resource_DiskInfo_Source) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Resource_DiskInfo_Source) + if !ok { + that2, ok := that.(Resource_DiskInfo_Source) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Resource_DiskInfo_Source") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Resource_DiskInfo_Source but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Resource_DiskInfo_Source but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.Path.Equal(that1.Path) { + return fmt.Errorf("Path this(%v) Not Equal that(%v)", this.Path, that1.Path) + } + if !this.Mount.Equal(that1.Mount) { + return fmt.Errorf("Mount this(%v) Not Equal that(%v)", this.Mount, that1.Mount) + } + if this.ID != nil && that1.ID != nil { + if *this.ID != *that1.ID { + return fmt.Errorf("ID this(%v) Not Equal that(%v)", *this.ID, *that1.ID) + } + } else if this.ID != nil { + return fmt.Errorf("this.ID == nil && that.ID != nil") + } else if that1.ID != nil { + return fmt.Errorf("ID this(%v) Not Equal that(%v)", this.ID, that1.ID) + } + if !this.Metadata.Equal(that1.Metadata) { + return fmt.Errorf("Metadata this(%v) Not Equal that(%v)", this.Metadata, that1.Metadata) + } + if this.Profile != nil && that1.Profile != nil { + if *this.Profile != *that1.Profile { + return fmt.Errorf("Profile this(%v) Not Equal that(%v)", *this.Profile, *that1.Profile) + } + } else if this.Profile != nil { + return fmt.Errorf("this.Profile == nil && that.Profile != nil") + } else if that1.Profile != nil { + return fmt.Errorf("Profile this(%v) Not Equal that(%v)", this.Profile, that1.Profile) + } + return nil +} +func (this *Resource_DiskInfo_Source) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Resource_DiskInfo_Source) + if !ok { + that2, ok := that.(Resource_DiskInfo_Source) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.Path.Equal(that1.Path) { + return false + } + if !this.Mount.Equal(that1.Mount) { + return false + } + if this.ID != nil && that1.ID != nil { + if *this.ID != *that1.ID { + return false + } + } else if this.ID != nil { + return false + } else if that1.ID != nil { + return false + } + if !this.Metadata.Equal(that1.Metadata) { + return false + } + if this.Profile != nil && that1.Profile != nil { + if *this.Profile != *that1.Profile { + return false + } + } else if this.Profile != nil { + return false + } else if that1.Profile != nil { + return false + } + return true +} +func (this *Resource_DiskInfo_Source_Path) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Resource_DiskInfo_Source_Path) + if !ok { + that2, ok := that.(Resource_DiskInfo_Source_Path) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Resource_DiskInfo_Source_Path") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Resource_DiskInfo_Source_Path but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Resource_DiskInfo_Source_Path but is not nil && this == nil") + } + if this.Root != nil && that1.Root != nil { + if *this.Root != *that1.Root { + return fmt.Errorf("Root this(%v) Not Equal that(%v)", *this.Root, *that1.Root) + } + } else if this.Root != nil { + return fmt.Errorf("this.Root == nil && that.Root != nil") + } else if that1.Root != nil { + return fmt.Errorf("Root this(%v) Not Equal that(%v)", this.Root, that1.Root) + } + return nil +} +func (this *Resource_DiskInfo_Source_Path) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Resource_DiskInfo_Source_Path) + if !ok { + that2, ok := that.(Resource_DiskInfo_Source_Path) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Root != nil && that1.Root != nil { + if *this.Root != *that1.Root { + return false + } + } else if this.Root != nil { + return false + } else if that1.Root != nil { + return false + } + return true +} +func (this *Resource_DiskInfo_Source_Mount) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Resource_DiskInfo_Source_Mount) + if !ok { + that2, ok := that.(Resource_DiskInfo_Source_Mount) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Resource_DiskInfo_Source_Mount") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Resource_DiskInfo_Source_Mount but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Resource_DiskInfo_Source_Mount but is not nil && this == nil") + } + if this.Root != nil && that1.Root != nil { + if *this.Root != *that1.Root { + return fmt.Errorf("Root this(%v) Not Equal that(%v)", *this.Root, *that1.Root) + } + } else if this.Root != nil { + return fmt.Errorf("this.Root == nil && that.Root != nil") + } else if that1.Root != nil { + return fmt.Errorf("Root this(%v) Not Equal that(%v)", this.Root, that1.Root) + } + return nil +} +func (this *Resource_DiskInfo_Source_Mount) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Resource_DiskInfo_Source_Mount) + if !ok { + that2, ok := that.(Resource_DiskInfo_Source_Mount) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Root != nil && that1.Root != nil { + if *this.Root != *that1.Root { + return false + } + } else if this.Root != nil { + return false + } else if that1.Root != nil { + return false + } + return true +} +func (this *Resource_RevocableInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Resource_RevocableInfo) + if !ok { + that2, ok := that.(Resource_RevocableInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Resource_RevocableInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Resource_RevocableInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Resource_RevocableInfo but is not nil && this == nil") + } + return nil +} +func (this *Resource_RevocableInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Resource_RevocableInfo) + if !ok { + that2, ok := that.(Resource_RevocableInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + return true +} +func (this *Resource_SharedInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Resource_SharedInfo) + if !ok { + that2, ok := that.(Resource_SharedInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Resource_SharedInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Resource_SharedInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Resource_SharedInfo but is not nil && this == nil") + } + return nil +} +func (this *Resource_SharedInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Resource_SharedInfo) + if !ok { + that2, ok := that.(Resource_SharedInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + return true +} +func (this *TrafficControlStatistics) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*TrafficControlStatistics) + if !ok { + that2, ok := that.(TrafficControlStatistics) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *TrafficControlStatistics") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *TrafficControlStatistics but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *TrafficControlStatistics but is not nil && this == nil") + } + if this.ID != that1.ID { + return fmt.Errorf("ID this(%v) Not Equal that(%v)", this.ID, that1.ID) + } + if this.Backlog != nil && that1.Backlog != nil { + if *this.Backlog != *that1.Backlog { + return fmt.Errorf("Backlog this(%v) Not Equal that(%v)", *this.Backlog, *that1.Backlog) + } + } else if this.Backlog != nil { + return fmt.Errorf("this.Backlog == nil && that.Backlog != nil") + } else if that1.Backlog != nil { + return fmt.Errorf("Backlog this(%v) Not Equal that(%v)", this.Backlog, that1.Backlog) + } + if this.Bytes != nil && that1.Bytes != nil { + if *this.Bytes != *that1.Bytes { + return fmt.Errorf("Bytes this(%v) Not Equal that(%v)", *this.Bytes, *that1.Bytes) + } + } else if this.Bytes != nil { + return fmt.Errorf("this.Bytes == nil && that.Bytes != nil") + } else if that1.Bytes != nil { + return fmt.Errorf("Bytes this(%v) Not Equal that(%v)", this.Bytes, that1.Bytes) + } + if this.Drops != nil && that1.Drops != nil { + if *this.Drops != *that1.Drops { + return fmt.Errorf("Drops this(%v) Not Equal that(%v)", *this.Drops, *that1.Drops) + } + } else if this.Drops != nil { + return fmt.Errorf("this.Drops == nil && that.Drops != nil") + } else if that1.Drops != nil { + return fmt.Errorf("Drops this(%v) Not Equal that(%v)", this.Drops, that1.Drops) + } + if this.Overlimits != nil && that1.Overlimits != nil { + if *this.Overlimits != *that1.Overlimits { + return fmt.Errorf("Overlimits this(%v) Not Equal that(%v)", *this.Overlimits, *that1.Overlimits) + } + } else if this.Overlimits != nil { + return fmt.Errorf("this.Overlimits == nil && that.Overlimits != nil") + } else if that1.Overlimits != nil { + return fmt.Errorf("Overlimits this(%v) Not Equal that(%v)", this.Overlimits, that1.Overlimits) + } + if this.Packets != nil && that1.Packets != nil { + if *this.Packets != *that1.Packets { + return fmt.Errorf("Packets this(%v) Not Equal that(%v)", *this.Packets, *that1.Packets) + } + } else if this.Packets != nil { + return fmt.Errorf("this.Packets == nil && that.Packets != nil") + } else if that1.Packets != nil { + return fmt.Errorf("Packets this(%v) Not Equal that(%v)", this.Packets, that1.Packets) + } + if this.Qlen != nil && that1.Qlen != nil { + if *this.Qlen != *that1.Qlen { + return fmt.Errorf("Qlen this(%v) Not Equal that(%v)", *this.Qlen, *that1.Qlen) + } + } else if this.Qlen != nil { + return fmt.Errorf("this.Qlen == nil && that.Qlen != nil") + } else if that1.Qlen != nil { + return fmt.Errorf("Qlen this(%v) Not Equal that(%v)", this.Qlen, that1.Qlen) + } + if this.RateBPS != nil && that1.RateBPS != nil { + if *this.RateBPS != *that1.RateBPS { + return fmt.Errorf("RateBPS this(%v) Not Equal that(%v)", *this.RateBPS, *that1.RateBPS) + } + } else if this.RateBPS != nil { + return fmt.Errorf("this.RateBPS == nil && that.RateBPS != nil") + } else if that1.RateBPS != nil { + return fmt.Errorf("RateBPS this(%v) Not Equal that(%v)", this.RateBPS, that1.RateBPS) + } + if this.RatePPS != nil && that1.RatePPS != nil { + if *this.RatePPS != *that1.RatePPS { + return fmt.Errorf("RatePPS this(%v) Not Equal that(%v)", *this.RatePPS, *that1.RatePPS) + } + } else if this.RatePPS != nil { + return fmt.Errorf("this.RatePPS == nil && that.RatePPS != nil") + } else if that1.RatePPS != nil { + return fmt.Errorf("RatePPS this(%v) Not Equal that(%v)", this.RatePPS, that1.RatePPS) + } + if this.Requeues != nil && that1.Requeues != nil { + if *this.Requeues != *that1.Requeues { + return fmt.Errorf("Requeues this(%v) Not Equal that(%v)", *this.Requeues, *that1.Requeues) + } + } else if this.Requeues != nil { + return fmt.Errorf("this.Requeues == nil && that.Requeues != nil") + } else if that1.Requeues != nil { + return fmt.Errorf("Requeues this(%v) Not Equal that(%v)", this.Requeues, that1.Requeues) + } + return nil +} +func (this *TrafficControlStatistics) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TrafficControlStatistics) + if !ok { + that2, ok := that.(TrafficControlStatistics) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.ID != that1.ID { + return false + } + if this.Backlog != nil && that1.Backlog != nil { + if *this.Backlog != *that1.Backlog { + return false + } + } else if this.Backlog != nil { + return false + } else if that1.Backlog != nil { + return false + } + if this.Bytes != nil && that1.Bytes != nil { + if *this.Bytes != *that1.Bytes { + return false + } + } else if this.Bytes != nil { + return false + } else if that1.Bytes != nil { + return false + } + if this.Drops != nil && that1.Drops != nil { + if *this.Drops != *that1.Drops { + return false + } + } else if this.Drops != nil { + return false + } else if that1.Drops != nil { + return false + } + if this.Overlimits != nil && that1.Overlimits != nil { + if *this.Overlimits != *that1.Overlimits { + return false + } + } else if this.Overlimits != nil { + return false + } else if that1.Overlimits != nil { + return false + } + if this.Packets != nil && that1.Packets != nil { + if *this.Packets != *that1.Packets { + return false + } + } else if this.Packets != nil { + return false + } else if that1.Packets != nil { + return false + } + if this.Qlen != nil && that1.Qlen != nil { + if *this.Qlen != *that1.Qlen { + return false + } + } else if this.Qlen != nil { + return false + } else if that1.Qlen != nil { + return false + } + if this.RateBPS != nil && that1.RateBPS != nil { + if *this.RateBPS != *that1.RateBPS { + return false + } + } else if this.RateBPS != nil { + return false + } else if that1.RateBPS != nil { + return false + } + if this.RatePPS != nil && that1.RatePPS != nil { + if *this.RatePPS != *that1.RatePPS { + return false + } + } else if this.RatePPS != nil { + return false + } else if that1.RatePPS != nil { + return false + } + if this.Requeues != nil && that1.Requeues != nil { + if *this.Requeues != *that1.Requeues { + return false + } + } else if this.Requeues != nil { + return false + } else if that1.Requeues != nil { + return false + } + return true +} +func (this *IpStatistics) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*IpStatistics) + if !ok { + that2, ok := that.(IpStatistics) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *IpStatistics") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *IpStatistics but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *IpStatistics but is not nil && this == nil") + } + if this.Forwarding != nil && that1.Forwarding != nil { + if *this.Forwarding != *that1.Forwarding { + return fmt.Errorf("Forwarding this(%v) Not Equal that(%v)", *this.Forwarding, *that1.Forwarding) + } + } else if this.Forwarding != nil { + return fmt.Errorf("this.Forwarding == nil && that.Forwarding != nil") + } else if that1.Forwarding != nil { + return fmt.Errorf("Forwarding this(%v) Not Equal that(%v)", this.Forwarding, that1.Forwarding) + } + if this.DefaultTTL != nil && that1.DefaultTTL != nil { + if *this.DefaultTTL != *that1.DefaultTTL { + return fmt.Errorf("DefaultTTL this(%v) Not Equal that(%v)", *this.DefaultTTL, *that1.DefaultTTL) + } + } else if this.DefaultTTL != nil { + return fmt.Errorf("this.DefaultTTL == nil && that.DefaultTTL != nil") + } else if that1.DefaultTTL != nil { + return fmt.Errorf("DefaultTTL this(%v) Not Equal that(%v)", this.DefaultTTL, that1.DefaultTTL) + } + if this.InReceives != nil && that1.InReceives != nil { + if *this.InReceives != *that1.InReceives { + return fmt.Errorf("InReceives this(%v) Not Equal that(%v)", *this.InReceives, *that1.InReceives) + } + } else if this.InReceives != nil { + return fmt.Errorf("this.InReceives == nil && that.InReceives != nil") + } else if that1.InReceives != nil { + return fmt.Errorf("InReceives this(%v) Not Equal that(%v)", this.InReceives, that1.InReceives) + } + if this.InHdrErrors != nil && that1.InHdrErrors != nil { + if *this.InHdrErrors != *that1.InHdrErrors { + return fmt.Errorf("InHdrErrors this(%v) Not Equal that(%v)", *this.InHdrErrors, *that1.InHdrErrors) + } + } else if this.InHdrErrors != nil { + return fmt.Errorf("this.InHdrErrors == nil && that.InHdrErrors != nil") + } else if that1.InHdrErrors != nil { + return fmt.Errorf("InHdrErrors this(%v) Not Equal that(%v)", this.InHdrErrors, that1.InHdrErrors) + } + if this.InAddrErrors != nil && that1.InAddrErrors != nil { + if *this.InAddrErrors != *that1.InAddrErrors { + return fmt.Errorf("InAddrErrors this(%v) Not Equal that(%v)", *this.InAddrErrors, *that1.InAddrErrors) + } + } else if this.InAddrErrors != nil { + return fmt.Errorf("this.InAddrErrors == nil && that.InAddrErrors != nil") + } else if that1.InAddrErrors != nil { + return fmt.Errorf("InAddrErrors this(%v) Not Equal that(%v)", this.InAddrErrors, that1.InAddrErrors) + } + if this.ForwDatagrams != nil && that1.ForwDatagrams != nil { + if *this.ForwDatagrams != *that1.ForwDatagrams { + return fmt.Errorf("ForwDatagrams this(%v) Not Equal that(%v)", *this.ForwDatagrams, *that1.ForwDatagrams) + } + } else if this.ForwDatagrams != nil { + return fmt.Errorf("this.ForwDatagrams == nil && that.ForwDatagrams != nil") + } else if that1.ForwDatagrams != nil { + return fmt.Errorf("ForwDatagrams this(%v) Not Equal that(%v)", this.ForwDatagrams, that1.ForwDatagrams) + } + if this.InUnknownProtos != nil && that1.InUnknownProtos != nil { + if *this.InUnknownProtos != *that1.InUnknownProtos { + return fmt.Errorf("InUnknownProtos this(%v) Not Equal that(%v)", *this.InUnknownProtos, *that1.InUnknownProtos) + } + } else if this.InUnknownProtos != nil { + return fmt.Errorf("this.InUnknownProtos == nil && that.InUnknownProtos != nil") + } else if that1.InUnknownProtos != nil { + return fmt.Errorf("InUnknownProtos this(%v) Not Equal that(%v)", this.InUnknownProtos, that1.InUnknownProtos) + } + if this.InDiscards != nil && that1.InDiscards != nil { + if *this.InDiscards != *that1.InDiscards { + return fmt.Errorf("InDiscards this(%v) Not Equal that(%v)", *this.InDiscards, *that1.InDiscards) + } + } else if this.InDiscards != nil { + return fmt.Errorf("this.InDiscards == nil && that.InDiscards != nil") + } else if that1.InDiscards != nil { + return fmt.Errorf("InDiscards this(%v) Not Equal that(%v)", this.InDiscards, that1.InDiscards) + } + if this.InDelivers != nil && that1.InDelivers != nil { + if *this.InDelivers != *that1.InDelivers { + return fmt.Errorf("InDelivers this(%v) Not Equal that(%v)", *this.InDelivers, *that1.InDelivers) + } + } else if this.InDelivers != nil { + return fmt.Errorf("this.InDelivers == nil && that.InDelivers != nil") + } else if that1.InDelivers != nil { + return fmt.Errorf("InDelivers this(%v) Not Equal that(%v)", this.InDelivers, that1.InDelivers) + } + if this.OutRequests != nil && that1.OutRequests != nil { + if *this.OutRequests != *that1.OutRequests { + return fmt.Errorf("OutRequests this(%v) Not Equal that(%v)", *this.OutRequests, *that1.OutRequests) + } + } else if this.OutRequests != nil { + return fmt.Errorf("this.OutRequests == nil && that.OutRequests != nil") + } else if that1.OutRequests != nil { + return fmt.Errorf("OutRequests this(%v) Not Equal that(%v)", this.OutRequests, that1.OutRequests) + } + if this.OutDiscards != nil && that1.OutDiscards != nil { + if *this.OutDiscards != *that1.OutDiscards { + return fmt.Errorf("OutDiscards this(%v) Not Equal that(%v)", *this.OutDiscards, *that1.OutDiscards) + } + } else if this.OutDiscards != nil { + return fmt.Errorf("this.OutDiscards == nil && that.OutDiscards != nil") + } else if that1.OutDiscards != nil { + return fmt.Errorf("OutDiscards this(%v) Not Equal that(%v)", this.OutDiscards, that1.OutDiscards) + } + if this.OutNoRoutes != nil && that1.OutNoRoutes != nil { + if *this.OutNoRoutes != *that1.OutNoRoutes { + return fmt.Errorf("OutNoRoutes this(%v) Not Equal that(%v)", *this.OutNoRoutes, *that1.OutNoRoutes) + } + } else if this.OutNoRoutes != nil { + return fmt.Errorf("this.OutNoRoutes == nil && that.OutNoRoutes != nil") + } else if that1.OutNoRoutes != nil { + return fmt.Errorf("OutNoRoutes this(%v) Not Equal that(%v)", this.OutNoRoutes, that1.OutNoRoutes) + } + if this.ReasmTimeout != nil && that1.ReasmTimeout != nil { + if *this.ReasmTimeout != *that1.ReasmTimeout { + return fmt.Errorf("ReasmTimeout this(%v) Not Equal that(%v)", *this.ReasmTimeout, *that1.ReasmTimeout) + } + } else if this.ReasmTimeout != nil { + return fmt.Errorf("this.ReasmTimeout == nil && that.ReasmTimeout != nil") + } else if that1.ReasmTimeout != nil { + return fmt.Errorf("ReasmTimeout this(%v) Not Equal that(%v)", this.ReasmTimeout, that1.ReasmTimeout) + } + if this.ReasmReqds != nil && that1.ReasmReqds != nil { + if *this.ReasmReqds != *that1.ReasmReqds { + return fmt.Errorf("ReasmReqds this(%v) Not Equal that(%v)", *this.ReasmReqds, *that1.ReasmReqds) + } + } else if this.ReasmReqds != nil { + return fmt.Errorf("this.ReasmReqds == nil && that.ReasmReqds != nil") + } else if that1.ReasmReqds != nil { + return fmt.Errorf("ReasmReqds this(%v) Not Equal that(%v)", this.ReasmReqds, that1.ReasmReqds) + } + if this.ReasmOKs != nil && that1.ReasmOKs != nil { + if *this.ReasmOKs != *that1.ReasmOKs { + return fmt.Errorf("ReasmOKs this(%v) Not Equal that(%v)", *this.ReasmOKs, *that1.ReasmOKs) + } + } else if this.ReasmOKs != nil { + return fmt.Errorf("this.ReasmOKs == nil && that.ReasmOKs != nil") + } else if that1.ReasmOKs != nil { + return fmt.Errorf("ReasmOKs this(%v) Not Equal that(%v)", this.ReasmOKs, that1.ReasmOKs) + } + if this.ReasmFails != nil && that1.ReasmFails != nil { + if *this.ReasmFails != *that1.ReasmFails { + return fmt.Errorf("ReasmFails this(%v) Not Equal that(%v)", *this.ReasmFails, *that1.ReasmFails) + } + } else if this.ReasmFails != nil { + return fmt.Errorf("this.ReasmFails == nil && that.ReasmFails != nil") + } else if that1.ReasmFails != nil { + return fmt.Errorf("ReasmFails this(%v) Not Equal that(%v)", this.ReasmFails, that1.ReasmFails) + } + if this.FragOKs != nil && that1.FragOKs != nil { + if *this.FragOKs != *that1.FragOKs { + return fmt.Errorf("FragOKs this(%v) Not Equal that(%v)", *this.FragOKs, *that1.FragOKs) + } + } else if this.FragOKs != nil { + return fmt.Errorf("this.FragOKs == nil && that.FragOKs != nil") + } else if that1.FragOKs != nil { + return fmt.Errorf("FragOKs this(%v) Not Equal that(%v)", this.FragOKs, that1.FragOKs) + } + if this.FragFails != nil && that1.FragFails != nil { + if *this.FragFails != *that1.FragFails { + return fmt.Errorf("FragFails this(%v) Not Equal that(%v)", *this.FragFails, *that1.FragFails) + } + } else if this.FragFails != nil { + return fmt.Errorf("this.FragFails == nil && that.FragFails != nil") + } else if that1.FragFails != nil { + return fmt.Errorf("FragFails this(%v) Not Equal that(%v)", this.FragFails, that1.FragFails) + } + if this.FragCreates != nil && that1.FragCreates != nil { + if *this.FragCreates != *that1.FragCreates { + return fmt.Errorf("FragCreates this(%v) Not Equal that(%v)", *this.FragCreates, *that1.FragCreates) + } + } else if this.FragCreates != nil { + return fmt.Errorf("this.FragCreates == nil && that.FragCreates != nil") + } else if that1.FragCreates != nil { + return fmt.Errorf("FragCreates this(%v) Not Equal that(%v)", this.FragCreates, that1.FragCreates) + } + return nil +} +func (this *IpStatistics) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*IpStatistics) + if !ok { + that2, ok := that.(IpStatistics) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Forwarding != nil && that1.Forwarding != nil { + if *this.Forwarding != *that1.Forwarding { + return false + } + } else if this.Forwarding != nil { + return false + } else if that1.Forwarding != nil { + return false + } + if this.DefaultTTL != nil && that1.DefaultTTL != nil { + if *this.DefaultTTL != *that1.DefaultTTL { + return false + } + } else if this.DefaultTTL != nil { + return false + } else if that1.DefaultTTL != nil { + return false + } + if this.InReceives != nil && that1.InReceives != nil { + if *this.InReceives != *that1.InReceives { + return false + } + } else if this.InReceives != nil { + return false + } else if that1.InReceives != nil { + return false + } + if this.InHdrErrors != nil && that1.InHdrErrors != nil { + if *this.InHdrErrors != *that1.InHdrErrors { + return false + } + } else if this.InHdrErrors != nil { + return false + } else if that1.InHdrErrors != nil { + return false + } + if this.InAddrErrors != nil && that1.InAddrErrors != nil { + if *this.InAddrErrors != *that1.InAddrErrors { + return false + } + } else if this.InAddrErrors != nil { + return false + } else if that1.InAddrErrors != nil { + return false + } + if this.ForwDatagrams != nil && that1.ForwDatagrams != nil { + if *this.ForwDatagrams != *that1.ForwDatagrams { + return false + } + } else if this.ForwDatagrams != nil { + return false + } else if that1.ForwDatagrams != nil { + return false + } + if this.InUnknownProtos != nil && that1.InUnknownProtos != nil { + if *this.InUnknownProtos != *that1.InUnknownProtos { + return false + } + } else if this.InUnknownProtos != nil { + return false + } else if that1.InUnknownProtos != nil { + return false + } + if this.InDiscards != nil && that1.InDiscards != nil { + if *this.InDiscards != *that1.InDiscards { + return false + } + } else if this.InDiscards != nil { + return false + } else if that1.InDiscards != nil { + return false + } + if this.InDelivers != nil && that1.InDelivers != nil { + if *this.InDelivers != *that1.InDelivers { + return false + } + } else if this.InDelivers != nil { + return false + } else if that1.InDelivers != nil { + return false + } + if this.OutRequests != nil && that1.OutRequests != nil { + if *this.OutRequests != *that1.OutRequests { + return false + } + } else if this.OutRequests != nil { + return false + } else if that1.OutRequests != nil { + return false + } + if this.OutDiscards != nil && that1.OutDiscards != nil { + if *this.OutDiscards != *that1.OutDiscards { + return false + } + } else if this.OutDiscards != nil { + return false + } else if that1.OutDiscards != nil { + return false + } + if this.OutNoRoutes != nil && that1.OutNoRoutes != nil { + if *this.OutNoRoutes != *that1.OutNoRoutes { + return false + } + } else if this.OutNoRoutes != nil { + return false + } else if that1.OutNoRoutes != nil { + return false + } + if this.ReasmTimeout != nil && that1.ReasmTimeout != nil { + if *this.ReasmTimeout != *that1.ReasmTimeout { + return false + } + } else if this.ReasmTimeout != nil { + return false + } else if that1.ReasmTimeout != nil { + return false + } + if this.ReasmReqds != nil && that1.ReasmReqds != nil { + if *this.ReasmReqds != *that1.ReasmReqds { + return false + } + } else if this.ReasmReqds != nil { + return false + } else if that1.ReasmReqds != nil { + return false + } + if this.ReasmOKs != nil && that1.ReasmOKs != nil { + if *this.ReasmOKs != *that1.ReasmOKs { + return false + } + } else if this.ReasmOKs != nil { + return false + } else if that1.ReasmOKs != nil { + return false + } + if this.ReasmFails != nil && that1.ReasmFails != nil { + if *this.ReasmFails != *that1.ReasmFails { + return false + } + } else if this.ReasmFails != nil { + return false + } else if that1.ReasmFails != nil { + return false + } + if this.FragOKs != nil && that1.FragOKs != nil { + if *this.FragOKs != *that1.FragOKs { + return false + } + } else if this.FragOKs != nil { + return false + } else if that1.FragOKs != nil { + return false + } + if this.FragFails != nil && that1.FragFails != nil { + if *this.FragFails != *that1.FragFails { + return false + } + } else if this.FragFails != nil { + return false + } else if that1.FragFails != nil { + return false + } + if this.FragCreates != nil && that1.FragCreates != nil { + if *this.FragCreates != *that1.FragCreates { + return false + } + } else if this.FragCreates != nil { + return false + } else if that1.FragCreates != nil { + return false + } + return true +} +func (this *IcmpStatistics) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*IcmpStatistics) + if !ok { + that2, ok := that.(IcmpStatistics) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *IcmpStatistics") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *IcmpStatistics but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *IcmpStatistics but is not nil && this == nil") + } + if this.InMsgs != nil && that1.InMsgs != nil { + if *this.InMsgs != *that1.InMsgs { + return fmt.Errorf("InMsgs this(%v) Not Equal that(%v)", *this.InMsgs, *that1.InMsgs) + } + } else if this.InMsgs != nil { + return fmt.Errorf("this.InMsgs == nil && that.InMsgs != nil") + } else if that1.InMsgs != nil { + return fmt.Errorf("InMsgs this(%v) Not Equal that(%v)", this.InMsgs, that1.InMsgs) + } + if this.InErrors != nil && that1.InErrors != nil { + if *this.InErrors != *that1.InErrors { + return fmt.Errorf("InErrors this(%v) Not Equal that(%v)", *this.InErrors, *that1.InErrors) + } + } else if this.InErrors != nil { + return fmt.Errorf("this.InErrors == nil && that.InErrors != nil") + } else if that1.InErrors != nil { + return fmt.Errorf("InErrors this(%v) Not Equal that(%v)", this.InErrors, that1.InErrors) + } + if this.InCsumErrors != nil && that1.InCsumErrors != nil { + if *this.InCsumErrors != *that1.InCsumErrors { + return fmt.Errorf("InCsumErrors this(%v) Not Equal that(%v)", *this.InCsumErrors, *that1.InCsumErrors) + } + } else if this.InCsumErrors != nil { + return fmt.Errorf("this.InCsumErrors == nil && that.InCsumErrors != nil") + } else if that1.InCsumErrors != nil { + return fmt.Errorf("InCsumErrors this(%v) Not Equal that(%v)", this.InCsumErrors, that1.InCsumErrors) + } + if this.InDestUnreachs != nil && that1.InDestUnreachs != nil { + if *this.InDestUnreachs != *that1.InDestUnreachs { + return fmt.Errorf("InDestUnreachs this(%v) Not Equal that(%v)", *this.InDestUnreachs, *that1.InDestUnreachs) + } + } else if this.InDestUnreachs != nil { + return fmt.Errorf("this.InDestUnreachs == nil && that.InDestUnreachs != nil") + } else if that1.InDestUnreachs != nil { + return fmt.Errorf("InDestUnreachs this(%v) Not Equal that(%v)", this.InDestUnreachs, that1.InDestUnreachs) + } + if this.InTimeExcds != nil && that1.InTimeExcds != nil { + if *this.InTimeExcds != *that1.InTimeExcds { + return fmt.Errorf("InTimeExcds this(%v) Not Equal that(%v)", *this.InTimeExcds, *that1.InTimeExcds) + } + } else if this.InTimeExcds != nil { + return fmt.Errorf("this.InTimeExcds == nil && that.InTimeExcds != nil") + } else if that1.InTimeExcds != nil { + return fmt.Errorf("InTimeExcds this(%v) Not Equal that(%v)", this.InTimeExcds, that1.InTimeExcds) + } + if this.InParmProbs != nil && that1.InParmProbs != nil { + if *this.InParmProbs != *that1.InParmProbs { + return fmt.Errorf("InParmProbs this(%v) Not Equal that(%v)", *this.InParmProbs, *that1.InParmProbs) + } + } else if this.InParmProbs != nil { + return fmt.Errorf("this.InParmProbs == nil && that.InParmProbs != nil") + } else if that1.InParmProbs != nil { + return fmt.Errorf("InParmProbs this(%v) Not Equal that(%v)", this.InParmProbs, that1.InParmProbs) + } + if this.InSrcQuenchs != nil && that1.InSrcQuenchs != nil { + if *this.InSrcQuenchs != *that1.InSrcQuenchs { + return fmt.Errorf("InSrcQuenchs this(%v) Not Equal that(%v)", *this.InSrcQuenchs, *that1.InSrcQuenchs) + } + } else if this.InSrcQuenchs != nil { + return fmt.Errorf("this.InSrcQuenchs == nil && that.InSrcQuenchs != nil") + } else if that1.InSrcQuenchs != nil { + return fmt.Errorf("InSrcQuenchs this(%v) Not Equal that(%v)", this.InSrcQuenchs, that1.InSrcQuenchs) + } + if this.InRedirects != nil && that1.InRedirects != nil { + if *this.InRedirects != *that1.InRedirects { + return fmt.Errorf("InRedirects this(%v) Not Equal that(%v)", *this.InRedirects, *that1.InRedirects) + } + } else if this.InRedirects != nil { + return fmt.Errorf("this.InRedirects == nil && that.InRedirects != nil") + } else if that1.InRedirects != nil { + return fmt.Errorf("InRedirects this(%v) Not Equal that(%v)", this.InRedirects, that1.InRedirects) + } + if this.InEchos != nil && that1.InEchos != nil { + if *this.InEchos != *that1.InEchos { + return fmt.Errorf("InEchos this(%v) Not Equal that(%v)", *this.InEchos, *that1.InEchos) + } + } else if this.InEchos != nil { + return fmt.Errorf("this.InEchos == nil && that.InEchos != nil") + } else if that1.InEchos != nil { + return fmt.Errorf("InEchos this(%v) Not Equal that(%v)", this.InEchos, that1.InEchos) + } + if this.InEchoReps != nil && that1.InEchoReps != nil { + if *this.InEchoReps != *that1.InEchoReps { + return fmt.Errorf("InEchoReps this(%v) Not Equal that(%v)", *this.InEchoReps, *that1.InEchoReps) + } + } else if this.InEchoReps != nil { + return fmt.Errorf("this.InEchoReps == nil && that.InEchoReps != nil") + } else if that1.InEchoReps != nil { + return fmt.Errorf("InEchoReps this(%v) Not Equal that(%v)", this.InEchoReps, that1.InEchoReps) + } + if this.InTimestamps != nil && that1.InTimestamps != nil { + if *this.InTimestamps != *that1.InTimestamps { + return fmt.Errorf("InTimestamps this(%v) Not Equal that(%v)", *this.InTimestamps, *that1.InTimestamps) + } + } else if this.InTimestamps != nil { + return fmt.Errorf("this.InTimestamps == nil && that.InTimestamps != nil") + } else if that1.InTimestamps != nil { + return fmt.Errorf("InTimestamps this(%v) Not Equal that(%v)", this.InTimestamps, that1.InTimestamps) + } + if this.InTimestampReps != nil && that1.InTimestampReps != nil { + if *this.InTimestampReps != *that1.InTimestampReps { + return fmt.Errorf("InTimestampReps this(%v) Not Equal that(%v)", *this.InTimestampReps, *that1.InTimestampReps) + } + } else if this.InTimestampReps != nil { + return fmt.Errorf("this.InTimestampReps == nil && that.InTimestampReps != nil") + } else if that1.InTimestampReps != nil { + return fmt.Errorf("InTimestampReps this(%v) Not Equal that(%v)", this.InTimestampReps, that1.InTimestampReps) + } + if this.InAddrMasks != nil && that1.InAddrMasks != nil { + if *this.InAddrMasks != *that1.InAddrMasks { + return fmt.Errorf("InAddrMasks this(%v) Not Equal that(%v)", *this.InAddrMasks, *that1.InAddrMasks) + } + } else if this.InAddrMasks != nil { + return fmt.Errorf("this.InAddrMasks == nil && that.InAddrMasks != nil") + } else if that1.InAddrMasks != nil { + return fmt.Errorf("InAddrMasks this(%v) Not Equal that(%v)", this.InAddrMasks, that1.InAddrMasks) + } + if this.InAddrMaskReps != nil && that1.InAddrMaskReps != nil { + if *this.InAddrMaskReps != *that1.InAddrMaskReps { + return fmt.Errorf("InAddrMaskReps this(%v) Not Equal that(%v)", *this.InAddrMaskReps, *that1.InAddrMaskReps) + } + } else if this.InAddrMaskReps != nil { + return fmt.Errorf("this.InAddrMaskReps == nil && that.InAddrMaskReps != nil") + } else if that1.InAddrMaskReps != nil { + return fmt.Errorf("InAddrMaskReps this(%v) Not Equal that(%v)", this.InAddrMaskReps, that1.InAddrMaskReps) + } + if this.OutMsgs != nil && that1.OutMsgs != nil { + if *this.OutMsgs != *that1.OutMsgs { + return fmt.Errorf("OutMsgs this(%v) Not Equal that(%v)", *this.OutMsgs, *that1.OutMsgs) + } + } else if this.OutMsgs != nil { + return fmt.Errorf("this.OutMsgs == nil && that.OutMsgs != nil") + } else if that1.OutMsgs != nil { + return fmt.Errorf("OutMsgs this(%v) Not Equal that(%v)", this.OutMsgs, that1.OutMsgs) + } + if this.OutErrors != nil && that1.OutErrors != nil { + if *this.OutErrors != *that1.OutErrors { + return fmt.Errorf("OutErrors this(%v) Not Equal that(%v)", *this.OutErrors, *that1.OutErrors) + } + } else if this.OutErrors != nil { + return fmt.Errorf("this.OutErrors == nil && that.OutErrors != nil") + } else if that1.OutErrors != nil { + return fmt.Errorf("OutErrors this(%v) Not Equal that(%v)", this.OutErrors, that1.OutErrors) + } + if this.OutDestUnreachs != nil && that1.OutDestUnreachs != nil { + if *this.OutDestUnreachs != *that1.OutDestUnreachs { + return fmt.Errorf("OutDestUnreachs this(%v) Not Equal that(%v)", *this.OutDestUnreachs, *that1.OutDestUnreachs) + } + } else if this.OutDestUnreachs != nil { + return fmt.Errorf("this.OutDestUnreachs == nil && that.OutDestUnreachs != nil") + } else if that1.OutDestUnreachs != nil { + return fmt.Errorf("OutDestUnreachs this(%v) Not Equal that(%v)", this.OutDestUnreachs, that1.OutDestUnreachs) + } + if this.OutTimeExcds != nil && that1.OutTimeExcds != nil { + if *this.OutTimeExcds != *that1.OutTimeExcds { + return fmt.Errorf("OutTimeExcds this(%v) Not Equal that(%v)", *this.OutTimeExcds, *that1.OutTimeExcds) + } + } else if this.OutTimeExcds != nil { + return fmt.Errorf("this.OutTimeExcds == nil && that.OutTimeExcds != nil") + } else if that1.OutTimeExcds != nil { + return fmt.Errorf("OutTimeExcds this(%v) Not Equal that(%v)", this.OutTimeExcds, that1.OutTimeExcds) + } + if this.OutParmProbs != nil && that1.OutParmProbs != nil { + if *this.OutParmProbs != *that1.OutParmProbs { + return fmt.Errorf("OutParmProbs this(%v) Not Equal that(%v)", *this.OutParmProbs, *that1.OutParmProbs) + } + } else if this.OutParmProbs != nil { + return fmt.Errorf("this.OutParmProbs == nil && that.OutParmProbs != nil") + } else if that1.OutParmProbs != nil { + return fmt.Errorf("OutParmProbs this(%v) Not Equal that(%v)", this.OutParmProbs, that1.OutParmProbs) + } + if this.OutSrcQuenchs != nil && that1.OutSrcQuenchs != nil { + if *this.OutSrcQuenchs != *that1.OutSrcQuenchs { + return fmt.Errorf("OutSrcQuenchs this(%v) Not Equal that(%v)", *this.OutSrcQuenchs, *that1.OutSrcQuenchs) + } + } else if this.OutSrcQuenchs != nil { + return fmt.Errorf("this.OutSrcQuenchs == nil && that.OutSrcQuenchs != nil") + } else if that1.OutSrcQuenchs != nil { + return fmt.Errorf("OutSrcQuenchs this(%v) Not Equal that(%v)", this.OutSrcQuenchs, that1.OutSrcQuenchs) + } + if this.OutRedirects != nil && that1.OutRedirects != nil { + if *this.OutRedirects != *that1.OutRedirects { + return fmt.Errorf("OutRedirects this(%v) Not Equal that(%v)", *this.OutRedirects, *that1.OutRedirects) + } + } else if this.OutRedirects != nil { + return fmt.Errorf("this.OutRedirects == nil && that.OutRedirects != nil") + } else if that1.OutRedirects != nil { + return fmt.Errorf("OutRedirects this(%v) Not Equal that(%v)", this.OutRedirects, that1.OutRedirects) + } + if this.OutEchos != nil && that1.OutEchos != nil { + if *this.OutEchos != *that1.OutEchos { + return fmt.Errorf("OutEchos this(%v) Not Equal that(%v)", *this.OutEchos, *that1.OutEchos) + } + } else if this.OutEchos != nil { + return fmt.Errorf("this.OutEchos == nil && that.OutEchos != nil") + } else if that1.OutEchos != nil { + return fmt.Errorf("OutEchos this(%v) Not Equal that(%v)", this.OutEchos, that1.OutEchos) + } + if this.OutEchoReps != nil && that1.OutEchoReps != nil { + if *this.OutEchoReps != *that1.OutEchoReps { + return fmt.Errorf("OutEchoReps this(%v) Not Equal that(%v)", *this.OutEchoReps, *that1.OutEchoReps) + } + } else if this.OutEchoReps != nil { + return fmt.Errorf("this.OutEchoReps == nil && that.OutEchoReps != nil") + } else if that1.OutEchoReps != nil { + return fmt.Errorf("OutEchoReps this(%v) Not Equal that(%v)", this.OutEchoReps, that1.OutEchoReps) + } + if this.OutTimestamps != nil && that1.OutTimestamps != nil { + if *this.OutTimestamps != *that1.OutTimestamps { + return fmt.Errorf("OutTimestamps this(%v) Not Equal that(%v)", *this.OutTimestamps, *that1.OutTimestamps) + } + } else if this.OutTimestamps != nil { + return fmt.Errorf("this.OutTimestamps == nil && that.OutTimestamps != nil") + } else if that1.OutTimestamps != nil { + return fmt.Errorf("OutTimestamps this(%v) Not Equal that(%v)", this.OutTimestamps, that1.OutTimestamps) + } + if this.OutTimestampReps != nil && that1.OutTimestampReps != nil { + if *this.OutTimestampReps != *that1.OutTimestampReps { + return fmt.Errorf("OutTimestampReps this(%v) Not Equal that(%v)", *this.OutTimestampReps, *that1.OutTimestampReps) + } + } else if this.OutTimestampReps != nil { + return fmt.Errorf("this.OutTimestampReps == nil && that.OutTimestampReps != nil") + } else if that1.OutTimestampReps != nil { + return fmt.Errorf("OutTimestampReps this(%v) Not Equal that(%v)", this.OutTimestampReps, that1.OutTimestampReps) + } + if this.OutAddrMasks != nil && that1.OutAddrMasks != nil { + if *this.OutAddrMasks != *that1.OutAddrMasks { + return fmt.Errorf("OutAddrMasks this(%v) Not Equal that(%v)", *this.OutAddrMasks, *that1.OutAddrMasks) + } + } else if this.OutAddrMasks != nil { + return fmt.Errorf("this.OutAddrMasks == nil && that.OutAddrMasks != nil") + } else if that1.OutAddrMasks != nil { + return fmt.Errorf("OutAddrMasks this(%v) Not Equal that(%v)", this.OutAddrMasks, that1.OutAddrMasks) + } + if this.OutAddrMaskReps != nil && that1.OutAddrMaskReps != nil { + if *this.OutAddrMaskReps != *that1.OutAddrMaskReps { + return fmt.Errorf("OutAddrMaskReps this(%v) Not Equal that(%v)", *this.OutAddrMaskReps, *that1.OutAddrMaskReps) + } + } else if this.OutAddrMaskReps != nil { + return fmt.Errorf("this.OutAddrMaskReps == nil && that.OutAddrMaskReps != nil") + } else if that1.OutAddrMaskReps != nil { + return fmt.Errorf("OutAddrMaskReps this(%v) Not Equal that(%v)", this.OutAddrMaskReps, that1.OutAddrMaskReps) + } + return nil +} +func (this *IcmpStatistics) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*IcmpStatistics) + if !ok { + that2, ok := that.(IcmpStatistics) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.InMsgs != nil && that1.InMsgs != nil { + if *this.InMsgs != *that1.InMsgs { + return false + } + } else if this.InMsgs != nil { + return false + } else if that1.InMsgs != nil { + return false + } + if this.InErrors != nil && that1.InErrors != nil { + if *this.InErrors != *that1.InErrors { + return false + } + } else if this.InErrors != nil { + return false + } else if that1.InErrors != nil { + return false + } + if this.InCsumErrors != nil && that1.InCsumErrors != nil { + if *this.InCsumErrors != *that1.InCsumErrors { + return false + } + } else if this.InCsumErrors != nil { + return false + } else if that1.InCsumErrors != nil { + return false + } + if this.InDestUnreachs != nil && that1.InDestUnreachs != nil { + if *this.InDestUnreachs != *that1.InDestUnreachs { + return false + } + } else if this.InDestUnreachs != nil { + return false + } else if that1.InDestUnreachs != nil { + return false + } + if this.InTimeExcds != nil && that1.InTimeExcds != nil { + if *this.InTimeExcds != *that1.InTimeExcds { + return false + } + } else if this.InTimeExcds != nil { + return false + } else if that1.InTimeExcds != nil { + return false + } + if this.InParmProbs != nil && that1.InParmProbs != nil { + if *this.InParmProbs != *that1.InParmProbs { + return false + } + } else if this.InParmProbs != nil { + return false + } else if that1.InParmProbs != nil { + return false + } + if this.InSrcQuenchs != nil && that1.InSrcQuenchs != nil { + if *this.InSrcQuenchs != *that1.InSrcQuenchs { + return false + } + } else if this.InSrcQuenchs != nil { + return false + } else if that1.InSrcQuenchs != nil { + return false + } + if this.InRedirects != nil && that1.InRedirects != nil { + if *this.InRedirects != *that1.InRedirects { + return false + } + } else if this.InRedirects != nil { + return false + } else if that1.InRedirects != nil { + return false + } + if this.InEchos != nil && that1.InEchos != nil { + if *this.InEchos != *that1.InEchos { + return false + } + } else if this.InEchos != nil { + return false + } else if that1.InEchos != nil { + return false + } + if this.InEchoReps != nil && that1.InEchoReps != nil { + if *this.InEchoReps != *that1.InEchoReps { + return false + } + } else if this.InEchoReps != nil { + return false + } else if that1.InEchoReps != nil { + return false + } + if this.InTimestamps != nil && that1.InTimestamps != nil { + if *this.InTimestamps != *that1.InTimestamps { + return false + } + } else if this.InTimestamps != nil { + return false + } else if that1.InTimestamps != nil { + return false + } + if this.InTimestampReps != nil && that1.InTimestampReps != nil { + if *this.InTimestampReps != *that1.InTimestampReps { + return false + } + } else if this.InTimestampReps != nil { + return false + } else if that1.InTimestampReps != nil { + return false + } + if this.InAddrMasks != nil && that1.InAddrMasks != nil { + if *this.InAddrMasks != *that1.InAddrMasks { + return false + } + } else if this.InAddrMasks != nil { + return false + } else if that1.InAddrMasks != nil { + return false + } + if this.InAddrMaskReps != nil && that1.InAddrMaskReps != nil { + if *this.InAddrMaskReps != *that1.InAddrMaskReps { + return false + } + } else if this.InAddrMaskReps != nil { + return false + } else if that1.InAddrMaskReps != nil { + return false + } + if this.OutMsgs != nil && that1.OutMsgs != nil { + if *this.OutMsgs != *that1.OutMsgs { + return false + } + } else if this.OutMsgs != nil { + return false + } else if that1.OutMsgs != nil { + return false + } + if this.OutErrors != nil && that1.OutErrors != nil { + if *this.OutErrors != *that1.OutErrors { + return false + } + } else if this.OutErrors != nil { + return false + } else if that1.OutErrors != nil { + return false + } + if this.OutDestUnreachs != nil && that1.OutDestUnreachs != nil { + if *this.OutDestUnreachs != *that1.OutDestUnreachs { + return false + } + } else if this.OutDestUnreachs != nil { + return false + } else if that1.OutDestUnreachs != nil { + return false + } + if this.OutTimeExcds != nil && that1.OutTimeExcds != nil { + if *this.OutTimeExcds != *that1.OutTimeExcds { + return false + } + } else if this.OutTimeExcds != nil { + return false + } else if that1.OutTimeExcds != nil { + return false + } + if this.OutParmProbs != nil && that1.OutParmProbs != nil { + if *this.OutParmProbs != *that1.OutParmProbs { + return false + } + } else if this.OutParmProbs != nil { + return false + } else if that1.OutParmProbs != nil { + return false + } + if this.OutSrcQuenchs != nil && that1.OutSrcQuenchs != nil { + if *this.OutSrcQuenchs != *that1.OutSrcQuenchs { + return false + } + } else if this.OutSrcQuenchs != nil { + return false + } else if that1.OutSrcQuenchs != nil { + return false + } + if this.OutRedirects != nil && that1.OutRedirects != nil { + if *this.OutRedirects != *that1.OutRedirects { + return false + } + } else if this.OutRedirects != nil { + return false + } else if that1.OutRedirects != nil { + return false + } + if this.OutEchos != nil && that1.OutEchos != nil { + if *this.OutEchos != *that1.OutEchos { + return false + } + } else if this.OutEchos != nil { + return false + } else if that1.OutEchos != nil { + return false + } + if this.OutEchoReps != nil && that1.OutEchoReps != nil { + if *this.OutEchoReps != *that1.OutEchoReps { + return false + } + } else if this.OutEchoReps != nil { + return false + } else if that1.OutEchoReps != nil { + return false + } + if this.OutTimestamps != nil && that1.OutTimestamps != nil { + if *this.OutTimestamps != *that1.OutTimestamps { + return false + } + } else if this.OutTimestamps != nil { + return false + } else if that1.OutTimestamps != nil { + return false + } + if this.OutTimestampReps != nil && that1.OutTimestampReps != nil { + if *this.OutTimestampReps != *that1.OutTimestampReps { + return false + } + } else if this.OutTimestampReps != nil { + return false + } else if that1.OutTimestampReps != nil { + return false + } + if this.OutAddrMasks != nil && that1.OutAddrMasks != nil { + if *this.OutAddrMasks != *that1.OutAddrMasks { + return false + } + } else if this.OutAddrMasks != nil { + return false + } else if that1.OutAddrMasks != nil { + return false + } + if this.OutAddrMaskReps != nil && that1.OutAddrMaskReps != nil { + if *this.OutAddrMaskReps != *that1.OutAddrMaskReps { + return false + } + } else if this.OutAddrMaskReps != nil { + return false + } else if that1.OutAddrMaskReps != nil { + return false + } + return true +} +func (this *TcpStatistics) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*TcpStatistics) + if !ok { + that2, ok := that.(TcpStatistics) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *TcpStatistics") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *TcpStatistics but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *TcpStatistics but is not nil && this == nil") + } + if this.RtoAlgorithm != nil && that1.RtoAlgorithm != nil { + if *this.RtoAlgorithm != *that1.RtoAlgorithm { + return fmt.Errorf("RtoAlgorithm this(%v) Not Equal that(%v)", *this.RtoAlgorithm, *that1.RtoAlgorithm) + } + } else if this.RtoAlgorithm != nil { + return fmt.Errorf("this.RtoAlgorithm == nil && that.RtoAlgorithm != nil") + } else if that1.RtoAlgorithm != nil { + return fmt.Errorf("RtoAlgorithm this(%v) Not Equal that(%v)", this.RtoAlgorithm, that1.RtoAlgorithm) + } + if this.RtoMin != nil && that1.RtoMin != nil { + if *this.RtoMin != *that1.RtoMin { + return fmt.Errorf("RtoMin this(%v) Not Equal that(%v)", *this.RtoMin, *that1.RtoMin) + } + } else if this.RtoMin != nil { + return fmt.Errorf("this.RtoMin == nil && that.RtoMin != nil") + } else if that1.RtoMin != nil { + return fmt.Errorf("RtoMin this(%v) Not Equal that(%v)", this.RtoMin, that1.RtoMin) + } + if this.RtoMax != nil && that1.RtoMax != nil { + if *this.RtoMax != *that1.RtoMax { + return fmt.Errorf("RtoMax this(%v) Not Equal that(%v)", *this.RtoMax, *that1.RtoMax) + } + } else if this.RtoMax != nil { + return fmt.Errorf("this.RtoMax == nil && that.RtoMax != nil") + } else if that1.RtoMax != nil { + return fmt.Errorf("RtoMax this(%v) Not Equal that(%v)", this.RtoMax, that1.RtoMax) + } + if this.MaxConn != nil && that1.MaxConn != nil { + if *this.MaxConn != *that1.MaxConn { + return fmt.Errorf("MaxConn this(%v) Not Equal that(%v)", *this.MaxConn, *that1.MaxConn) + } + } else if this.MaxConn != nil { + return fmt.Errorf("this.MaxConn == nil && that.MaxConn != nil") + } else if that1.MaxConn != nil { + return fmt.Errorf("MaxConn this(%v) Not Equal that(%v)", this.MaxConn, that1.MaxConn) + } + if this.ActiveOpens != nil && that1.ActiveOpens != nil { + if *this.ActiveOpens != *that1.ActiveOpens { + return fmt.Errorf("ActiveOpens this(%v) Not Equal that(%v)", *this.ActiveOpens, *that1.ActiveOpens) + } + } else if this.ActiveOpens != nil { + return fmt.Errorf("this.ActiveOpens == nil && that.ActiveOpens != nil") + } else if that1.ActiveOpens != nil { + return fmt.Errorf("ActiveOpens this(%v) Not Equal that(%v)", this.ActiveOpens, that1.ActiveOpens) + } + if this.PassiveOpens != nil && that1.PassiveOpens != nil { + if *this.PassiveOpens != *that1.PassiveOpens { + return fmt.Errorf("PassiveOpens this(%v) Not Equal that(%v)", *this.PassiveOpens, *that1.PassiveOpens) + } + } else if this.PassiveOpens != nil { + return fmt.Errorf("this.PassiveOpens == nil && that.PassiveOpens != nil") + } else if that1.PassiveOpens != nil { + return fmt.Errorf("PassiveOpens this(%v) Not Equal that(%v)", this.PassiveOpens, that1.PassiveOpens) + } + if this.AttemptFails != nil && that1.AttemptFails != nil { + if *this.AttemptFails != *that1.AttemptFails { + return fmt.Errorf("AttemptFails this(%v) Not Equal that(%v)", *this.AttemptFails, *that1.AttemptFails) + } + } else if this.AttemptFails != nil { + return fmt.Errorf("this.AttemptFails == nil && that.AttemptFails != nil") + } else if that1.AttemptFails != nil { + return fmt.Errorf("AttemptFails this(%v) Not Equal that(%v)", this.AttemptFails, that1.AttemptFails) + } + if this.EstabResets != nil && that1.EstabResets != nil { + if *this.EstabResets != *that1.EstabResets { + return fmt.Errorf("EstabResets this(%v) Not Equal that(%v)", *this.EstabResets, *that1.EstabResets) + } + } else if this.EstabResets != nil { + return fmt.Errorf("this.EstabResets == nil && that.EstabResets != nil") + } else if that1.EstabResets != nil { + return fmt.Errorf("EstabResets this(%v) Not Equal that(%v)", this.EstabResets, that1.EstabResets) + } + if this.CurrEstab != nil && that1.CurrEstab != nil { + if *this.CurrEstab != *that1.CurrEstab { + return fmt.Errorf("CurrEstab this(%v) Not Equal that(%v)", *this.CurrEstab, *that1.CurrEstab) + } + } else if this.CurrEstab != nil { + return fmt.Errorf("this.CurrEstab == nil && that.CurrEstab != nil") + } else if that1.CurrEstab != nil { + return fmt.Errorf("CurrEstab this(%v) Not Equal that(%v)", this.CurrEstab, that1.CurrEstab) + } + if this.InSegs != nil && that1.InSegs != nil { + if *this.InSegs != *that1.InSegs { + return fmt.Errorf("InSegs this(%v) Not Equal that(%v)", *this.InSegs, *that1.InSegs) + } + } else if this.InSegs != nil { + return fmt.Errorf("this.InSegs == nil && that.InSegs != nil") + } else if that1.InSegs != nil { + return fmt.Errorf("InSegs this(%v) Not Equal that(%v)", this.InSegs, that1.InSegs) + } + if this.OutSegs != nil && that1.OutSegs != nil { + if *this.OutSegs != *that1.OutSegs { + return fmt.Errorf("OutSegs this(%v) Not Equal that(%v)", *this.OutSegs, *that1.OutSegs) + } + } else if this.OutSegs != nil { + return fmt.Errorf("this.OutSegs == nil && that.OutSegs != nil") + } else if that1.OutSegs != nil { + return fmt.Errorf("OutSegs this(%v) Not Equal that(%v)", this.OutSegs, that1.OutSegs) + } + if this.RetransSegs != nil && that1.RetransSegs != nil { + if *this.RetransSegs != *that1.RetransSegs { + return fmt.Errorf("RetransSegs this(%v) Not Equal that(%v)", *this.RetransSegs, *that1.RetransSegs) + } + } else if this.RetransSegs != nil { + return fmt.Errorf("this.RetransSegs == nil && that.RetransSegs != nil") + } else if that1.RetransSegs != nil { + return fmt.Errorf("RetransSegs this(%v) Not Equal that(%v)", this.RetransSegs, that1.RetransSegs) + } + if this.InErrs != nil && that1.InErrs != nil { + if *this.InErrs != *that1.InErrs { + return fmt.Errorf("InErrs this(%v) Not Equal that(%v)", *this.InErrs, *that1.InErrs) + } + } else if this.InErrs != nil { + return fmt.Errorf("this.InErrs == nil && that.InErrs != nil") + } else if that1.InErrs != nil { + return fmt.Errorf("InErrs this(%v) Not Equal that(%v)", this.InErrs, that1.InErrs) + } + if this.OutRsts != nil && that1.OutRsts != nil { + if *this.OutRsts != *that1.OutRsts { + return fmt.Errorf("OutRsts this(%v) Not Equal that(%v)", *this.OutRsts, *that1.OutRsts) + } + } else if this.OutRsts != nil { + return fmt.Errorf("this.OutRsts == nil && that.OutRsts != nil") + } else if that1.OutRsts != nil { + return fmt.Errorf("OutRsts this(%v) Not Equal that(%v)", this.OutRsts, that1.OutRsts) + } + if this.InCsumErrors != nil && that1.InCsumErrors != nil { + if *this.InCsumErrors != *that1.InCsumErrors { + return fmt.Errorf("InCsumErrors this(%v) Not Equal that(%v)", *this.InCsumErrors, *that1.InCsumErrors) + } + } else if this.InCsumErrors != nil { + return fmt.Errorf("this.InCsumErrors == nil && that.InCsumErrors != nil") + } else if that1.InCsumErrors != nil { + return fmt.Errorf("InCsumErrors this(%v) Not Equal that(%v)", this.InCsumErrors, that1.InCsumErrors) + } + return nil +} +func (this *TcpStatistics) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TcpStatistics) + if !ok { + that2, ok := that.(TcpStatistics) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.RtoAlgorithm != nil && that1.RtoAlgorithm != nil { + if *this.RtoAlgorithm != *that1.RtoAlgorithm { + return false + } + } else if this.RtoAlgorithm != nil { + return false + } else if that1.RtoAlgorithm != nil { + return false + } + if this.RtoMin != nil && that1.RtoMin != nil { + if *this.RtoMin != *that1.RtoMin { + return false + } + } else if this.RtoMin != nil { + return false + } else if that1.RtoMin != nil { + return false + } + if this.RtoMax != nil && that1.RtoMax != nil { + if *this.RtoMax != *that1.RtoMax { + return false + } + } else if this.RtoMax != nil { + return false + } else if that1.RtoMax != nil { + return false + } + if this.MaxConn != nil && that1.MaxConn != nil { + if *this.MaxConn != *that1.MaxConn { + return false + } + } else if this.MaxConn != nil { + return false + } else if that1.MaxConn != nil { + return false + } + if this.ActiveOpens != nil && that1.ActiveOpens != nil { + if *this.ActiveOpens != *that1.ActiveOpens { + return false + } + } else if this.ActiveOpens != nil { + return false + } else if that1.ActiveOpens != nil { + return false + } + if this.PassiveOpens != nil && that1.PassiveOpens != nil { + if *this.PassiveOpens != *that1.PassiveOpens { + return false + } + } else if this.PassiveOpens != nil { + return false + } else if that1.PassiveOpens != nil { + return false + } + if this.AttemptFails != nil && that1.AttemptFails != nil { + if *this.AttemptFails != *that1.AttemptFails { + return false + } + } else if this.AttemptFails != nil { + return false + } else if that1.AttemptFails != nil { + return false + } + if this.EstabResets != nil && that1.EstabResets != nil { + if *this.EstabResets != *that1.EstabResets { + return false + } + } else if this.EstabResets != nil { + return false + } else if that1.EstabResets != nil { + return false + } + if this.CurrEstab != nil && that1.CurrEstab != nil { + if *this.CurrEstab != *that1.CurrEstab { + return false + } + } else if this.CurrEstab != nil { + return false + } else if that1.CurrEstab != nil { + return false + } + if this.InSegs != nil && that1.InSegs != nil { + if *this.InSegs != *that1.InSegs { + return false + } + } else if this.InSegs != nil { + return false + } else if that1.InSegs != nil { + return false + } + if this.OutSegs != nil && that1.OutSegs != nil { + if *this.OutSegs != *that1.OutSegs { + return false + } + } else if this.OutSegs != nil { + return false + } else if that1.OutSegs != nil { + return false + } + if this.RetransSegs != nil && that1.RetransSegs != nil { + if *this.RetransSegs != *that1.RetransSegs { + return false + } + } else if this.RetransSegs != nil { + return false + } else if that1.RetransSegs != nil { + return false + } + if this.InErrs != nil && that1.InErrs != nil { + if *this.InErrs != *that1.InErrs { + return false + } + } else if this.InErrs != nil { + return false + } else if that1.InErrs != nil { + return false + } + if this.OutRsts != nil && that1.OutRsts != nil { + if *this.OutRsts != *that1.OutRsts { + return false + } + } else if this.OutRsts != nil { + return false + } else if that1.OutRsts != nil { + return false + } + if this.InCsumErrors != nil && that1.InCsumErrors != nil { + if *this.InCsumErrors != *that1.InCsumErrors { + return false + } + } else if this.InCsumErrors != nil { + return false + } else if that1.InCsumErrors != nil { + return false + } + return true +} +func (this *UdpStatistics) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*UdpStatistics) + if !ok { + that2, ok := that.(UdpStatistics) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *UdpStatistics") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *UdpStatistics but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *UdpStatistics but is not nil && this == nil") + } + if this.InDatagrams != nil && that1.InDatagrams != nil { + if *this.InDatagrams != *that1.InDatagrams { + return fmt.Errorf("InDatagrams this(%v) Not Equal that(%v)", *this.InDatagrams, *that1.InDatagrams) + } + } else if this.InDatagrams != nil { + return fmt.Errorf("this.InDatagrams == nil && that.InDatagrams != nil") + } else if that1.InDatagrams != nil { + return fmt.Errorf("InDatagrams this(%v) Not Equal that(%v)", this.InDatagrams, that1.InDatagrams) + } + if this.NoPorts != nil && that1.NoPorts != nil { + if *this.NoPorts != *that1.NoPorts { + return fmt.Errorf("NoPorts this(%v) Not Equal that(%v)", *this.NoPorts, *that1.NoPorts) + } + } else if this.NoPorts != nil { + return fmt.Errorf("this.NoPorts == nil && that.NoPorts != nil") + } else if that1.NoPorts != nil { + return fmt.Errorf("NoPorts this(%v) Not Equal that(%v)", this.NoPorts, that1.NoPorts) + } + if this.InErrors != nil && that1.InErrors != nil { + if *this.InErrors != *that1.InErrors { + return fmt.Errorf("InErrors this(%v) Not Equal that(%v)", *this.InErrors, *that1.InErrors) + } + } else if this.InErrors != nil { + return fmt.Errorf("this.InErrors == nil && that.InErrors != nil") + } else if that1.InErrors != nil { + return fmt.Errorf("InErrors this(%v) Not Equal that(%v)", this.InErrors, that1.InErrors) + } + if this.OutDatagrams != nil && that1.OutDatagrams != nil { + if *this.OutDatagrams != *that1.OutDatagrams { + return fmt.Errorf("OutDatagrams this(%v) Not Equal that(%v)", *this.OutDatagrams, *that1.OutDatagrams) + } + } else if this.OutDatagrams != nil { + return fmt.Errorf("this.OutDatagrams == nil && that.OutDatagrams != nil") + } else if that1.OutDatagrams != nil { + return fmt.Errorf("OutDatagrams this(%v) Not Equal that(%v)", this.OutDatagrams, that1.OutDatagrams) + } + if this.RcvbufErrors != nil && that1.RcvbufErrors != nil { + if *this.RcvbufErrors != *that1.RcvbufErrors { + return fmt.Errorf("RcvbufErrors this(%v) Not Equal that(%v)", *this.RcvbufErrors, *that1.RcvbufErrors) + } + } else if this.RcvbufErrors != nil { + return fmt.Errorf("this.RcvbufErrors == nil && that.RcvbufErrors != nil") + } else if that1.RcvbufErrors != nil { + return fmt.Errorf("RcvbufErrors this(%v) Not Equal that(%v)", this.RcvbufErrors, that1.RcvbufErrors) + } + if this.SndbufErrors != nil && that1.SndbufErrors != nil { + if *this.SndbufErrors != *that1.SndbufErrors { + return fmt.Errorf("SndbufErrors this(%v) Not Equal that(%v)", *this.SndbufErrors, *that1.SndbufErrors) + } + } else if this.SndbufErrors != nil { + return fmt.Errorf("this.SndbufErrors == nil && that.SndbufErrors != nil") + } else if that1.SndbufErrors != nil { + return fmt.Errorf("SndbufErrors this(%v) Not Equal that(%v)", this.SndbufErrors, that1.SndbufErrors) + } + if this.InCsumErrors != nil && that1.InCsumErrors != nil { + if *this.InCsumErrors != *that1.InCsumErrors { + return fmt.Errorf("InCsumErrors this(%v) Not Equal that(%v)", *this.InCsumErrors, *that1.InCsumErrors) + } + } else if this.InCsumErrors != nil { + return fmt.Errorf("this.InCsumErrors == nil && that.InCsumErrors != nil") + } else if that1.InCsumErrors != nil { + return fmt.Errorf("InCsumErrors this(%v) Not Equal that(%v)", this.InCsumErrors, that1.InCsumErrors) + } + if this.IgnoredMulti != nil && that1.IgnoredMulti != nil { + if *this.IgnoredMulti != *that1.IgnoredMulti { + return fmt.Errorf("IgnoredMulti this(%v) Not Equal that(%v)", *this.IgnoredMulti, *that1.IgnoredMulti) + } + } else if this.IgnoredMulti != nil { + return fmt.Errorf("this.IgnoredMulti == nil && that.IgnoredMulti != nil") + } else if that1.IgnoredMulti != nil { + return fmt.Errorf("IgnoredMulti this(%v) Not Equal that(%v)", this.IgnoredMulti, that1.IgnoredMulti) + } + return nil +} +func (this *UdpStatistics) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*UdpStatistics) + if !ok { + that2, ok := that.(UdpStatistics) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.InDatagrams != nil && that1.InDatagrams != nil { + if *this.InDatagrams != *that1.InDatagrams { + return false + } + } else if this.InDatagrams != nil { + return false + } else if that1.InDatagrams != nil { + return false + } + if this.NoPorts != nil && that1.NoPorts != nil { + if *this.NoPorts != *that1.NoPorts { + return false + } + } else if this.NoPorts != nil { + return false + } else if that1.NoPorts != nil { + return false + } + if this.InErrors != nil && that1.InErrors != nil { + if *this.InErrors != *that1.InErrors { + return false + } + } else if this.InErrors != nil { + return false + } else if that1.InErrors != nil { + return false + } + if this.OutDatagrams != nil && that1.OutDatagrams != nil { + if *this.OutDatagrams != *that1.OutDatagrams { + return false + } + } else if this.OutDatagrams != nil { + return false + } else if that1.OutDatagrams != nil { + return false + } + if this.RcvbufErrors != nil && that1.RcvbufErrors != nil { + if *this.RcvbufErrors != *that1.RcvbufErrors { + return false + } + } else if this.RcvbufErrors != nil { + return false + } else if that1.RcvbufErrors != nil { + return false + } + if this.SndbufErrors != nil && that1.SndbufErrors != nil { + if *this.SndbufErrors != *that1.SndbufErrors { + return false + } + } else if this.SndbufErrors != nil { + return false + } else if that1.SndbufErrors != nil { + return false + } + if this.InCsumErrors != nil && that1.InCsumErrors != nil { + if *this.InCsumErrors != *that1.InCsumErrors { + return false + } + } else if this.InCsumErrors != nil { + return false + } else if that1.InCsumErrors != nil { + return false + } + if this.IgnoredMulti != nil && that1.IgnoredMulti != nil { + if *this.IgnoredMulti != *that1.IgnoredMulti { + return false + } + } else if this.IgnoredMulti != nil { + return false + } else if that1.IgnoredMulti != nil { + return false + } + return true +} +func (this *SNMPStatistics) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*SNMPStatistics) + if !ok { + that2, ok := that.(SNMPStatistics) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *SNMPStatistics") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *SNMPStatistics but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *SNMPStatistics but is not nil && this == nil") + } + if !this.IPStats.Equal(that1.IPStats) { + return fmt.Errorf("IPStats this(%v) Not Equal that(%v)", this.IPStats, that1.IPStats) + } + if !this.ICMPStats.Equal(that1.ICMPStats) { + return fmt.Errorf("ICMPStats this(%v) Not Equal that(%v)", this.ICMPStats, that1.ICMPStats) + } + if !this.TCPStats.Equal(that1.TCPStats) { + return fmt.Errorf("TCPStats this(%v) Not Equal that(%v)", this.TCPStats, that1.TCPStats) + } + if !this.UDPStats.Equal(that1.UDPStats) { + return fmt.Errorf("UDPStats this(%v) Not Equal that(%v)", this.UDPStats, that1.UDPStats) + } + return nil +} +func (this *SNMPStatistics) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*SNMPStatistics) + if !ok { + that2, ok := that.(SNMPStatistics) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.IPStats.Equal(that1.IPStats) { + return false + } + if !this.ICMPStats.Equal(that1.ICMPStats) { + return false + } + if !this.TCPStats.Equal(that1.TCPStats) { + return false + } + if !this.UDPStats.Equal(that1.UDPStats) { + return false + } + return true +} +func (this *DiskStatistics) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*DiskStatistics) + if !ok { + that2, ok := that.(DiskStatistics) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *DiskStatistics") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *DiskStatistics but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *DiskStatistics but is not nil && this == nil") + } + if !this.Source.Equal(that1.Source) { + return fmt.Errorf("Source this(%v) Not Equal that(%v)", this.Source, that1.Source) + } + if !this.Persistence.Equal(that1.Persistence) { + return fmt.Errorf("Persistence this(%v) Not Equal that(%v)", this.Persistence, that1.Persistence) + } + if this.LimitBytes != nil && that1.LimitBytes != nil { + if *this.LimitBytes != *that1.LimitBytes { + return fmt.Errorf("LimitBytes this(%v) Not Equal that(%v)", *this.LimitBytes, *that1.LimitBytes) + } + } else if this.LimitBytes != nil { + return fmt.Errorf("this.LimitBytes == nil && that.LimitBytes != nil") + } else if that1.LimitBytes != nil { + return fmt.Errorf("LimitBytes this(%v) Not Equal that(%v)", this.LimitBytes, that1.LimitBytes) + } + if this.UsedBytes != nil && that1.UsedBytes != nil { + if *this.UsedBytes != *that1.UsedBytes { + return fmt.Errorf("UsedBytes this(%v) Not Equal that(%v)", *this.UsedBytes, *that1.UsedBytes) + } + } else if this.UsedBytes != nil { + return fmt.Errorf("this.UsedBytes == nil && that.UsedBytes != nil") + } else if that1.UsedBytes != nil { + return fmt.Errorf("UsedBytes this(%v) Not Equal that(%v)", this.UsedBytes, that1.UsedBytes) + } + return nil +} +func (this *DiskStatistics) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*DiskStatistics) + if !ok { + that2, ok := that.(DiskStatistics) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Source.Equal(that1.Source) { + return false + } + if !this.Persistence.Equal(that1.Persistence) { + return false + } + if this.LimitBytes != nil && that1.LimitBytes != nil { + if *this.LimitBytes != *that1.LimitBytes { + return false + } + } else if this.LimitBytes != nil { + return false + } else if that1.LimitBytes != nil { + return false + } + if this.UsedBytes != nil && that1.UsedBytes != nil { + if *this.UsedBytes != *that1.UsedBytes { + return false + } + } else if this.UsedBytes != nil { + return false + } else if that1.UsedBytes != nil { + return false + } + return true +} +func (this *ResourceStatistics) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ResourceStatistics) + if !ok { + that2, ok := that.(ResourceStatistics) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ResourceStatistics") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ResourceStatistics but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ResourceStatistics but is not nil && this == nil") + } + if this.Timestamp != that1.Timestamp { + return fmt.Errorf("Timestamp this(%v) Not Equal that(%v)", this.Timestamp, that1.Timestamp) + } + if this.Processes != nil && that1.Processes != nil { + if *this.Processes != *that1.Processes { + return fmt.Errorf("Processes this(%v) Not Equal that(%v)", *this.Processes, *that1.Processes) + } + } else if this.Processes != nil { + return fmt.Errorf("this.Processes == nil && that.Processes != nil") + } else if that1.Processes != nil { + return fmt.Errorf("Processes this(%v) Not Equal that(%v)", this.Processes, that1.Processes) + } + if this.Threads != nil && that1.Threads != nil { + if *this.Threads != *that1.Threads { + return fmt.Errorf("Threads this(%v) Not Equal that(%v)", *this.Threads, *that1.Threads) + } + } else if this.Threads != nil { + return fmt.Errorf("this.Threads == nil && that.Threads != nil") + } else if that1.Threads != nil { + return fmt.Errorf("Threads this(%v) Not Equal that(%v)", this.Threads, that1.Threads) + } + if this.CPUsUserTimeSecs != nil && that1.CPUsUserTimeSecs != nil { + if *this.CPUsUserTimeSecs != *that1.CPUsUserTimeSecs { + return fmt.Errorf("CPUsUserTimeSecs this(%v) Not Equal that(%v)", *this.CPUsUserTimeSecs, *that1.CPUsUserTimeSecs) + } + } else if this.CPUsUserTimeSecs != nil { + return fmt.Errorf("this.CPUsUserTimeSecs == nil && that.CPUsUserTimeSecs != nil") + } else if that1.CPUsUserTimeSecs != nil { + return fmt.Errorf("CPUsUserTimeSecs this(%v) Not Equal that(%v)", this.CPUsUserTimeSecs, that1.CPUsUserTimeSecs) + } + if this.CPUsSystemTimeSecs != nil && that1.CPUsSystemTimeSecs != nil { + if *this.CPUsSystemTimeSecs != *that1.CPUsSystemTimeSecs { + return fmt.Errorf("CPUsSystemTimeSecs this(%v) Not Equal that(%v)", *this.CPUsSystemTimeSecs, *that1.CPUsSystemTimeSecs) + } + } else if this.CPUsSystemTimeSecs != nil { + return fmt.Errorf("this.CPUsSystemTimeSecs == nil && that.CPUsSystemTimeSecs != nil") + } else if that1.CPUsSystemTimeSecs != nil { + return fmt.Errorf("CPUsSystemTimeSecs this(%v) Not Equal that(%v)", this.CPUsSystemTimeSecs, that1.CPUsSystemTimeSecs) + } + if this.CPUsLimit != nil && that1.CPUsLimit != nil { + if *this.CPUsLimit != *that1.CPUsLimit { + return fmt.Errorf("CPUsLimit this(%v) Not Equal that(%v)", *this.CPUsLimit, *that1.CPUsLimit) + } + } else if this.CPUsLimit != nil { + return fmt.Errorf("this.CPUsLimit == nil && that.CPUsLimit != nil") + } else if that1.CPUsLimit != nil { + return fmt.Errorf("CPUsLimit this(%v) Not Equal that(%v)", this.CPUsLimit, that1.CPUsLimit) + } + if this.CPUsNrPeriods != nil && that1.CPUsNrPeriods != nil { + if *this.CPUsNrPeriods != *that1.CPUsNrPeriods { + return fmt.Errorf("CPUsNrPeriods this(%v) Not Equal that(%v)", *this.CPUsNrPeriods, *that1.CPUsNrPeriods) + } + } else if this.CPUsNrPeriods != nil { + return fmt.Errorf("this.CPUsNrPeriods == nil && that.CPUsNrPeriods != nil") + } else if that1.CPUsNrPeriods != nil { + return fmt.Errorf("CPUsNrPeriods this(%v) Not Equal that(%v)", this.CPUsNrPeriods, that1.CPUsNrPeriods) + } + if this.CPUsNrThrottled != nil && that1.CPUsNrThrottled != nil { + if *this.CPUsNrThrottled != *that1.CPUsNrThrottled { + return fmt.Errorf("CPUsNrThrottled this(%v) Not Equal that(%v)", *this.CPUsNrThrottled, *that1.CPUsNrThrottled) + } + } else if this.CPUsNrThrottled != nil { + return fmt.Errorf("this.CPUsNrThrottled == nil && that.CPUsNrThrottled != nil") + } else if that1.CPUsNrThrottled != nil { + return fmt.Errorf("CPUsNrThrottled this(%v) Not Equal that(%v)", this.CPUsNrThrottled, that1.CPUsNrThrottled) + } + if this.CPUsThrottledTimeSecs != nil && that1.CPUsThrottledTimeSecs != nil { + if *this.CPUsThrottledTimeSecs != *that1.CPUsThrottledTimeSecs { + return fmt.Errorf("CPUsThrottledTimeSecs this(%v) Not Equal that(%v)", *this.CPUsThrottledTimeSecs, *that1.CPUsThrottledTimeSecs) + } + } else if this.CPUsThrottledTimeSecs != nil { + return fmt.Errorf("this.CPUsThrottledTimeSecs == nil && that.CPUsThrottledTimeSecs != nil") + } else if that1.CPUsThrottledTimeSecs != nil { + return fmt.Errorf("CPUsThrottledTimeSecs this(%v) Not Equal that(%v)", this.CPUsThrottledTimeSecs, that1.CPUsThrottledTimeSecs) + } + if this.MemTotalBytes != nil && that1.MemTotalBytes != nil { + if *this.MemTotalBytes != *that1.MemTotalBytes { + return fmt.Errorf("MemTotalBytes this(%v) Not Equal that(%v)", *this.MemTotalBytes, *that1.MemTotalBytes) + } + } else if this.MemTotalBytes != nil { + return fmt.Errorf("this.MemTotalBytes == nil && that.MemTotalBytes != nil") + } else if that1.MemTotalBytes != nil { + return fmt.Errorf("MemTotalBytes this(%v) Not Equal that(%v)", this.MemTotalBytes, that1.MemTotalBytes) + } + if this.MemTotalMemswBytes != nil && that1.MemTotalMemswBytes != nil { + if *this.MemTotalMemswBytes != *that1.MemTotalMemswBytes { + return fmt.Errorf("MemTotalMemswBytes this(%v) Not Equal that(%v)", *this.MemTotalMemswBytes, *that1.MemTotalMemswBytes) + } + } else if this.MemTotalMemswBytes != nil { + return fmt.Errorf("this.MemTotalMemswBytes == nil && that.MemTotalMemswBytes != nil") + } else if that1.MemTotalMemswBytes != nil { + return fmt.Errorf("MemTotalMemswBytes this(%v) Not Equal that(%v)", this.MemTotalMemswBytes, that1.MemTotalMemswBytes) + } + if this.MemLimitBytes != nil && that1.MemLimitBytes != nil { + if *this.MemLimitBytes != *that1.MemLimitBytes { + return fmt.Errorf("MemLimitBytes this(%v) Not Equal that(%v)", *this.MemLimitBytes, *that1.MemLimitBytes) + } + } else if this.MemLimitBytes != nil { + return fmt.Errorf("this.MemLimitBytes == nil && that.MemLimitBytes != nil") + } else if that1.MemLimitBytes != nil { + return fmt.Errorf("MemLimitBytes this(%v) Not Equal that(%v)", this.MemLimitBytes, that1.MemLimitBytes) + } + if this.MemSoftLimitBytes != nil && that1.MemSoftLimitBytes != nil { + if *this.MemSoftLimitBytes != *that1.MemSoftLimitBytes { + return fmt.Errorf("MemSoftLimitBytes this(%v) Not Equal that(%v)", *this.MemSoftLimitBytes, *that1.MemSoftLimitBytes) + } + } else if this.MemSoftLimitBytes != nil { + return fmt.Errorf("this.MemSoftLimitBytes == nil && that.MemSoftLimitBytes != nil") + } else if that1.MemSoftLimitBytes != nil { + return fmt.Errorf("MemSoftLimitBytes this(%v) Not Equal that(%v)", this.MemSoftLimitBytes, that1.MemSoftLimitBytes) + } + if this.MemFileBytes != nil && that1.MemFileBytes != nil { + if *this.MemFileBytes != *that1.MemFileBytes { + return fmt.Errorf("MemFileBytes this(%v) Not Equal that(%v)", *this.MemFileBytes, *that1.MemFileBytes) + } + } else if this.MemFileBytes != nil { + return fmt.Errorf("this.MemFileBytes == nil && that.MemFileBytes != nil") + } else if that1.MemFileBytes != nil { + return fmt.Errorf("MemFileBytes this(%v) Not Equal that(%v)", this.MemFileBytes, that1.MemFileBytes) + } + if this.MemAnonBytes != nil && that1.MemAnonBytes != nil { + if *this.MemAnonBytes != *that1.MemAnonBytes { + return fmt.Errorf("MemAnonBytes this(%v) Not Equal that(%v)", *this.MemAnonBytes, *that1.MemAnonBytes) + } + } else if this.MemAnonBytes != nil { + return fmt.Errorf("this.MemAnonBytes == nil && that.MemAnonBytes != nil") + } else if that1.MemAnonBytes != nil { + return fmt.Errorf("MemAnonBytes this(%v) Not Equal that(%v)", this.MemAnonBytes, that1.MemAnonBytes) + } + if this.MemCacheBytes != nil && that1.MemCacheBytes != nil { + if *this.MemCacheBytes != *that1.MemCacheBytes { + return fmt.Errorf("MemCacheBytes this(%v) Not Equal that(%v)", *this.MemCacheBytes, *that1.MemCacheBytes) + } + } else if this.MemCacheBytes != nil { + return fmt.Errorf("this.MemCacheBytes == nil && that.MemCacheBytes != nil") + } else if that1.MemCacheBytes != nil { + return fmt.Errorf("MemCacheBytes this(%v) Not Equal that(%v)", this.MemCacheBytes, that1.MemCacheBytes) + } + if this.MemRSSBytes != nil && that1.MemRSSBytes != nil { + if *this.MemRSSBytes != *that1.MemRSSBytes { + return fmt.Errorf("MemRSSBytes this(%v) Not Equal that(%v)", *this.MemRSSBytes, *that1.MemRSSBytes) + } + } else if this.MemRSSBytes != nil { + return fmt.Errorf("this.MemRSSBytes == nil && that.MemRSSBytes != nil") + } else if that1.MemRSSBytes != nil { + return fmt.Errorf("MemRSSBytes this(%v) Not Equal that(%v)", this.MemRSSBytes, that1.MemRSSBytes) + } + if this.MemMappedFileBytes != nil && that1.MemMappedFileBytes != nil { + if *this.MemMappedFileBytes != *that1.MemMappedFileBytes { + return fmt.Errorf("MemMappedFileBytes this(%v) Not Equal that(%v)", *this.MemMappedFileBytes, *that1.MemMappedFileBytes) + } + } else if this.MemMappedFileBytes != nil { + return fmt.Errorf("this.MemMappedFileBytes == nil && that.MemMappedFileBytes != nil") + } else if that1.MemMappedFileBytes != nil { + return fmt.Errorf("MemMappedFileBytes this(%v) Not Equal that(%v)", this.MemMappedFileBytes, that1.MemMappedFileBytes) + } + if this.MemSwapBytes != nil && that1.MemSwapBytes != nil { + if *this.MemSwapBytes != *that1.MemSwapBytes { + return fmt.Errorf("MemSwapBytes this(%v) Not Equal that(%v)", *this.MemSwapBytes, *that1.MemSwapBytes) + } + } else if this.MemSwapBytes != nil { + return fmt.Errorf("this.MemSwapBytes == nil && that.MemSwapBytes != nil") + } else if that1.MemSwapBytes != nil { + return fmt.Errorf("MemSwapBytes this(%v) Not Equal that(%v)", this.MemSwapBytes, that1.MemSwapBytes) + } + if this.MemUnevictableBytes != nil && that1.MemUnevictableBytes != nil { + if *this.MemUnevictableBytes != *that1.MemUnevictableBytes { + return fmt.Errorf("MemUnevictableBytes this(%v) Not Equal that(%v)", *this.MemUnevictableBytes, *that1.MemUnevictableBytes) + } + } else if this.MemUnevictableBytes != nil { + return fmt.Errorf("this.MemUnevictableBytes == nil && that.MemUnevictableBytes != nil") + } else if that1.MemUnevictableBytes != nil { + return fmt.Errorf("MemUnevictableBytes this(%v) Not Equal that(%v)", this.MemUnevictableBytes, that1.MemUnevictableBytes) + } + if this.MemLowPressureCounter != nil && that1.MemLowPressureCounter != nil { + if *this.MemLowPressureCounter != *that1.MemLowPressureCounter { + return fmt.Errorf("MemLowPressureCounter this(%v) Not Equal that(%v)", *this.MemLowPressureCounter, *that1.MemLowPressureCounter) + } + } else if this.MemLowPressureCounter != nil { + return fmt.Errorf("this.MemLowPressureCounter == nil && that.MemLowPressureCounter != nil") + } else if that1.MemLowPressureCounter != nil { + return fmt.Errorf("MemLowPressureCounter this(%v) Not Equal that(%v)", this.MemLowPressureCounter, that1.MemLowPressureCounter) + } + if this.MemMediumPressureCounter != nil && that1.MemMediumPressureCounter != nil { + if *this.MemMediumPressureCounter != *that1.MemMediumPressureCounter { + return fmt.Errorf("MemMediumPressureCounter this(%v) Not Equal that(%v)", *this.MemMediumPressureCounter, *that1.MemMediumPressureCounter) + } + } else if this.MemMediumPressureCounter != nil { + return fmt.Errorf("this.MemMediumPressureCounter == nil && that.MemMediumPressureCounter != nil") + } else if that1.MemMediumPressureCounter != nil { + return fmt.Errorf("MemMediumPressureCounter this(%v) Not Equal that(%v)", this.MemMediumPressureCounter, that1.MemMediumPressureCounter) + } + if this.MemCriticalPressureCounter != nil && that1.MemCriticalPressureCounter != nil { + if *this.MemCriticalPressureCounter != *that1.MemCriticalPressureCounter { + return fmt.Errorf("MemCriticalPressureCounter this(%v) Not Equal that(%v)", *this.MemCriticalPressureCounter, *that1.MemCriticalPressureCounter) + } + } else if this.MemCriticalPressureCounter != nil { + return fmt.Errorf("this.MemCriticalPressureCounter == nil && that.MemCriticalPressureCounter != nil") + } else if that1.MemCriticalPressureCounter != nil { + return fmt.Errorf("MemCriticalPressureCounter this(%v) Not Equal that(%v)", this.MemCriticalPressureCounter, that1.MemCriticalPressureCounter) + } + if this.DiskLimitBytes != nil && that1.DiskLimitBytes != nil { + if *this.DiskLimitBytes != *that1.DiskLimitBytes { + return fmt.Errorf("DiskLimitBytes this(%v) Not Equal that(%v)", *this.DiskLimitBytes, *that1.DiskLimitBytes) + } + } else if this.DiskLimitBytes != nil { + return fmt.Errorf("this.DiskLimitBytes == nil && that.DiskLimitBytes != nil") + } else if that1.DiskLimitBytes != nil { + return fmt.Errorf("DiskLimitBytes this(%v) Not Equal that(%v)", this.DiskLimitBytes, that1.DiskLimitBytes) + } + if this.DiskUsedBytes != nil && that1.DiskUsedBytes != nil { + if *this.DiskUsedBytes != *that1.DiskUsedBytes { + return fmt.Errorf("DiskUsedBytes this(%v) Not Equal that(%v)", *this.DiskUsedBytes, *that1.DiskUsedBytes) + } + } else if this.DiskUsedBytes != nil { + return fmt.Errorf("this.DiskUsedBytes == nil && that.DiskUsedBytes != nil") + } else if that1.DiskUsedBytes != nil { + return fmt.Errorf("DiskUsedBytes this(%v) Not Equal that(%v)", this.DiskUsedBytes, that1.DiskUsedBytes) + } + if len(this.DiskStatistics) != len(that1.DiskStatistics) { + return fmt.Errorf("DiskStatistics this(%v) Not Equal that(%v)", len(this.DiskStatistics), len(that1.DiskStatistics)) + } + for i := range this.DiskStatistics { + if !this.DiskStatistics[i].Equal(&that1.DiskStatistics[i]) { + return fmt.Errorf("DiskStatistics this[%v](%v) Not Equal that[%v](%v)", i, this.DiskStatistics[i], i, that1.DiskStatistics[i]) + } + } + if !this.BlkioStatistics.Equal(that1.BlkioStatistics) { + return fmt.Errorf("BlkioStatistics this(%v) Not Equal that(%v)", this.BlkioStatistics, that1.BlkioStatistics) + } + if !this.Perf.Equal(that1.Perf) { + return fmt.Errorf("Perf this(%v) Not Equal that(%v)", this.Perf, that1.Perf) + } + if this.NetRxPackets != nil && that1.NetRxPackets != nil { + if *this.NetRxPackets != *that1.NetRxPackets { + return fmt.Errorf("NetRxPackets this(%v) Not Equal that(%v)", *this.NetRxPackets, *that1.NetRxPackets) + } + } else if this.NetRxPackets != nil { + return fmt.Errorf("this.NetRxPackets == nil && that.NetRxPackets != nil") + } else if that1.NetRxPackets != nil { + return fmt.Errorf("NetRxPackets this(%v) Not Equal that(%v)", this.NetRxPackets, that1.NetRxPackets) + } + if this.NetRxBytes != nil && that1.NetRxBytes != nil { + if *this.NetRxBytes != *that1.NetRxBytes { + return fmt.Errorf("NetRxBytes this(%v) Not Equal that(%v)", *this.NetRxBytes, *that1.NetRxBytes) + } + } else if this.NetRxBytes != nil { + return fmt.Errorf("this.NetRxBytes == nil && that.NetRxBytes != nil") + } else if that1.NetRxBytes != nil { + return fmt.Errorf("NetRxBytes this(%v) Not Equal that(%v)", this.NetRxBytes, that1.NetRxBytes) + } + if this.NetRxErrors != nil && that1.NetRxErrors != nil { + if *this.NetRxErrors != *that1.NetRxErrors { + return fmt.Errorf("NetRxErrors this(%v) Not Equal that(%v)", *this.NetRxErrors, *that1.NetRxErrors) + } + } else if this.NetRxErrors != nil { + return fmt.Errorf("this.NetRxErrors == nil && that.NetRxErrors != nil") + } else if that1.NetRxErrors != nil { + return fmt.Errorf("NetRxErrors this(%v) Not Equal that(%v)", this.NetRxErrors, that1.NetRxErrors) + } + if this.NetRxDropped != nil && that1.NetRxDropped != nil { + if *this.NetRxDropped != *that1.NetRxDropped { + return fmt.Errorf("NetRxDropped this(%v) Not Equal that(%v)", *this.NetRxDropped, *that1.NetRxDropped) + } + } else if this.NetRxDropped != nil { + return fmt.Errorf("this.NetRxDropped == nil && that.NetRxDropped != nil") + } else if that1.NetRxDropped != nil { + return fmt.Errorf("NetRxDropped this(%v) Not Equal that(%v)", this.NetRxDropped, that1.NetRxDropped) + } + if this.NetTxPackets != nil && that1.NetTxPackets != nil { + if *this.NetTxPackets != *that1.NetTxPackets { + return fmt.Errorf("NetTxPackets this(%v) Not Equal that(%v)", *this.NetTxPackets, *that1.NetTxPackets) + } + } else if this.NetTxPackets != nil { + return fmt.Errorf("this.NetTxPackets == nil && that.NetTxPackets != nil") + } else if that1.NetTxPackets != nil { + return fmt.Errorf("NetTxPackets this(%v) Not Equal that(%v)", this.NetTxPackets, that1.NetTxPackets) + } + if this.NetTxBytes != nil && that1.NetTxBytes != nil { + if *this.NetTxBytes != *that1.NetTxBytes { + return fmt.Errorf("NetTxBytes this(%v) Not Equal that(%v)", *this.NetTxBytes, *that1.NetTxBytes) + } + } else if this.NetTxBytes != nil { + return fmt.Errorf("this.NetTxBytes == nil && that.NetTxBytes != nil") + } else if that1.NetTxBytes != nil { + return fmt.Errorf("NetTxBytes this(%v) Not Equal that(%v)", this.NetTxBytes, that1.NetTxBytes) + } + if this.NetTxErrors != nil && that1.NetTxErrors != nil { + if *this.NetTxErrors != *that1.NetTxErrors { + return fmt.Errorf("NetTxErrors this(%v) Not Equal that(%v)", *this.NetTxErrors, *that1.NetTxErrors) + } + } else if this.NetTxErrors != nil { + return fmt.Errorf("this.NetTxErrors == nil && that.NetTxErrors != nil") + } else if that1.NetTxErrors != nil { + return fmt.Errorf("NetTxErrors this(%v) Not Equal that(%v)", this.NetTxErrors, that1.NetTxErrors) + } + if this.NetTxDropped != nil && that1.NetTxDropped != nil { + if *this.NetTxDropped != *that1.NetTxDropped { + return fmt.Errorf("NetTxDropped this(%v) Not Equal that(%v)", *this.NetTxDropped, *that1.NetTxDropped) + } + } else if this.NetTxDropped != nil { + return fmt.Errorf("this.NetTxDropped == nil && that.NetTxDropped != nil") + } else if that1.NetTxDropped != nil { + return fmt.Errorf("NetTxDropped this(%v) Not Equal that(%v)", this.NetTxDropped, that1.NetTxDropped) + } + if this.NetTCPRttMicrosecsP50 != nil && that1.NetTCPRttMicrosecsP50 != nil { + if *this.NetTCPRttMicrosecsP50 != *that1.NetTCPRttMicrosecsP50 { + return fmt.Errorf("NetTCPRttMicrosecsP50 this(%v) Not Equal that(%v)", *this.NetTCPRttMicrosecsP50, *that1.NetTCPRttMicrosecsP50) + } + } else if this.NetTCPRttMicrosecsP50 != nil { + return fmt.Errorf("this.NetTCPRttMicrosecsP50 == nil && that.NetTCPRttMicrosecsP50 != nil") + } else if that1.NetTCPRttMicrosecsP50 != nil { + return fmt.Errorf("NetTCPRttMicrosecsP50 this(%v) Not Equal that(%v)", this.NetTCPRttMicrosecsP50, that1.NetTCPRttMicrosecsP50) + } + if this.NetTCPRttMicrosecsP90 != nil && that1.NetTCPRttMicrosecsP90 != nil { + if *this.NetTCPRttMicrosecsP90 != *that1.NetTCPRttMicrosecsP90 { + return fmt.Errorf("NetTCPRttMicrosecsP90 this(%v) Not Equal that(%v)", *this.NetTCPRttMicrosecsP90, *that1.NetTCPRttMicrosecsP90) + } + } else if this.NetTCPRttMicrosecsP90 != nil { + return fmt.Errorf("this.NetTCPRttMicrosecsP90 == nil && that.NetTCPRttMicrosecsP90 != nil") + } else if that1.NetTCPRttMicrosecsP90 != nil { + return fmt.Errorf("NetTCPRttMicrosecsP90 this(%v) Not Equal that(%v)", this.NetTCPRttMicrosecsP90, that1.NetTCPRttMicrosecsP90) + } + if this.NetTCPRttMicrosecsP95 != nil && that1.NetTCPRttMicrosecsP95 != nil { + if *this.NetTCPRttMicrosecsP95 != *that1.NetTCPRttMicrosecsP95 { + return fmt.Errorf("NetTCPRttMicrosecsP95 this(%v) Not Equal that(%v)", *this.NetTCPRttMicrosecsP95, *that1.NetTCPRttMicrosecsP95) + } + } else if this.NetTCPRttMicrosecsP95 != nil { + return fmt.Errorf("this.NetTCPRttMicrosecsP95 == nil && that.NetTCPRttMicrosecsP95 != nil") + } else if that1.NetTCPRttMicrosecsP95 != nil { + return fmt.Errorf("NetTCPRttMicrosecsP95 this(%v) Not Equal that(%v)", this.NetTCPRttMicrosecsP95, that1.NetTCPRttMicrosecsP95) + } + if this.NetTCPRttMicrosecsP99 != nil && that1.NetTCPRttMicrosecsP99 != nil { + if *this.NetTCPRttMicrosecsP99 != *that1.NetTCPRttMicrosecsP99 { + return fmt.Errorf("NetTCPRttMicrosecsP99 this(%v) Not Equal that(%v)", *this.NetTCPRttMicrosecsP99, *that1.NetTCPRttMicrosecsP99) + } + } else if this.NetTCPRttMicrosecsP99 != nil { + return fmt.Errorf("this.NetTCPRttMicrosecsP99 == nil && that.NetTCPRttMicrosecsP99 != nil") + } else if that1.NetTCPRttMicrosecsP99 != nil { + return fmt.Errorf("NetTCPRttMicrosecsP99 this(%v) Not Equal that(%v)", this.NetTCPRttMicrosecsP99, that1.NetTCPRttMicrosecsP99) + } + if this.NetTCPActiveConnections != nil && that1.NetTCPActiveConnections != nil { + if *this.NetTCPActiveConnections != *that1.NetTCPActiveConnections { + return fmt.Errorf("NetTCPActiveConnections this(%v) Not Equal that(%v)", *this.NetTCPActiveConnections, *that1.NetTCPActiveConnections) + } + } else if this.NetTCPActiveConnections != nil { + return fmt.Errorf("this.NetTCPActiveConnections == nil && that.NetTCPActiveConnections != nil") + } else if that1.NetTCPActiveConnections != nil { + return fmt.Errorf("NetTCPActiveConnections this(%v) Not Equal that(%v)", this.NetTCPActiveConnections, that1.NetTCPActiveConnections) + } + if this.NetTCPTimeWaitConnections != nil && that1.NetTCPTimeWaitConnections != nil { + if *this.NetTCPTimeWaitConnections != *that1.NetTCPTimeWaitConnections { + return fmt.Errorf("NetTCPTimeWaitConnections this(%v) Not Equal that(%v)", *this.NetTCPTimeWaitConnections, *that1.NetTCPTimeWaitConnections) + } + } else if this.NetTCPTimeWaitConnections != nil { + return fmt.Errorf("this.NetTCPTimeWaitConnections == nil && that.NetTCPTimeWaitConnections != nil") + } else if that1.NetTCPTimeWaitConnections != nil { + return fmt.Errorf("NetTCPTimeWaitConnections this(%v) Not Equal that(%v)", this.NetTCPTimeWaitConnections, that1.NetTCPTimeWaitConnections) + } + if len(this.NetTrafficControlStatistics) != len(that1.NetTrafficControlStatistics) { + return fmt.Errorf("NetTrafficControlStatistics this(%v) Not Equal that(%v)", len(this.NetTrafficControlStatistics), len(that1.NetTrafficControlStatistics)) + } + for i := range this.NetTrafficControlStatistics { + if !this.NetTrafficControlStatistics[i].Equal(&that1.NetTrafficControlStatistics[i]) { + return fmt.Errorf("NetTrafficControlStatistics this[%v](%v) Not Equal that[%v](%v)", i, this.NetTrafficControlStatistics[i], i, that1.NetTrafficControlStatistics[i]) + } + } + if !this.NetSNMPStatistics.Equal(that1.NetSNMPStatistics) { + return fmt.Errorf("NetSNMPStatistics this(%v) Not Equal that(%v)", this.NetSNMPStatistics, that1.NetSNMPStatistics) + } + return nil +} +func (this *ResourceStatistics) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ResourceStatistics) + if !ok { + that2, ok := that.(ResourceStatistics) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Timestamp != that1.Timestamp { + return false + } + if this.Processes != nil && that1.Processes != nil { + if *this.Processes != *that1.Processes { + return false + } + } else if this.Processes != nil { + return false + } else if that1.Processes != nil { + return false + } + if this.Threads != nil && that1.Threads != nil { + if *this.Threads != *that1.Threads { + return false + } + } else if this.Threads != nil { + return false + } else if that1.Threads != nil { + return false + } + if this.CPUsUserTimeSecs != nil && that1.CPUsUserTimeSecs != nil { + if *this.CPUsUserTimeSecs != *that1.CPUsUserTimeSecs { + return false + } + } else if this.CPUsUserTimeSecs != nil { + return false + } else if that1.CPUsUserTimeSecs != nil { + return false + } + if this.CPUsSystemTimeSecs != nil && that1.CPUsSystemTimeSecs != nil { + if *this.CPUsSystemTimeSecs != *that1.CPUsSystemTimeSecs { + return false + } + } else if this.CPUsSystemTimeSecs != nil { + return false + } else if that1.CPUsSystemTimeSecs != nil { + return false + } + if this.CPUsLimit != nil && that1.CPUsLimit != nil { + if *this.CPUsLimit != *that1.CPUsLimit { + return false + } + } else if this.CPUsLimit != nil { + return false + } else if that1.CPUsLimit != nil { + return false + } + if this.CPUsNrPeriods != nil && that1.CPUsNrPeriods != nil { + if *this.CPUsNrPeriods != *that1.CPUsNrPeriods { + return false + } + } else if this.CPUsNrPeriods != nil { + return false + } else if that1.CPUsNrPeriods != nil { + return false + } + if this.CPUsNrThrottled != nil && that1.CPUsNrThrottled != nil { + if *this.CPUsNrThrottled != *that1.CPUsNrThrottled { + return false + } + } else if this.CPUsNrThrottled != nil { + return false + } else if that1.CPUsNrThrottled != nil { + return false + } + if this.CPUsThrottledTimeSecs != nil && that1.CPUsThrottledTimeSecs != nil { + if *this.CPUsThrottledTimeSecs != *that1.CPUsThrottledTimeSecs { + return false + } + } else if this.CPUsThrottledTimeSecs != nil { + return false + } else if that1.CPUsThrottledTimeSecs != nil { + return false + } + if this.MemTotalBytes != nil && that1.MemTotalBytes != nil { + if *this.MemTotalBytes != *that1.MemTotalBytes { + return false + } + } else if this.MemTotalBytes != nil { + return false + } else if that1.MemTotalBytes != nil { + return false + } + if this.MemTotalMemswBytes != nil && that1.MemTotalMemswBytes != nil { + if *this.MemTotalMemswBytes != *that1.MemTotalMemswBytes { + return false + } + } else if this.MemTotalMemswBytes != nil { + return false + } else if that1.MemTotalMemswBytes != nil { + return false + } + if this.MemLimitBytes != nil && that1.MemLimitBytes != nil { + if *this.MemLimitBytes != *that1.MemLimitBytes { + return false + } + } else if this.MemLimitBytes != nil { + return false + } else if that1.MemLimitBytes != nil { + return false + } + if this.MemSoftLimitBytes != nil && that1.MemSoftLimitBytes != nil { + if *this.MemSoftLimitBytes != *that1.MemSoftLimitBytes { + return false + } + } else if this.MemSoftLimitBytes != nil { + return false + } else if that1.MemSoftLimitBytes != nil { + return false + } + if this.MemFileBytes != nil && that1.MemFileBytes != nil { + if *this.MemFileBytes != *that1.MemFileBytes { + return false + } + } else if this.MemFileBytes != nil { + return false + } else if that1.MemFileBytes != nil { + return false + } + if this.MemAnonBytes != nil && that1.MemAnonBytes != nil { + if *this.MemAnonBytes != *that1.MemAnonBytes { + return false + } + } else if this.MemAnonBytes != nil { + return false + } else if that1.MemAnonBytes != nil { + return false + } + if this.MemCacheBytes != nil && that1.MemCacheBytes != nil { + if *this.MemCacheBytes != *that1.MemCacheBytes { + return false + } + } else if this.MemCacheBytes != nil { + return false + } else if that1.MemCacheBytes != nil { + return false + } + if this.MemRSSBytes != nil && that1.MemRSSBytes != nil { + if *this.MemRSSBytes != *that1.MemRSSBytes { + return false + } + } else if this.MemRSSBytes != nil { + return false + } else if that1.MemRSSBytes != nil { + return false + } + if this.MemMappedFileBytes != nil && that1.MemMappedFileBytes != nil { + if *this.MemMappedFileBytes != *that1.MemMappedFileBytes { + return false + } + } else if this.MemMappedFileBytes != nil { + return false + } else if that1.MemMappedFileBytes != nil { + return false + } + if this.MemSwapBytes != nil && that1.MemSwapBytes != nil { + if *this.MemSwapBytes != *that1.MemSwapBytes { + return false + } + } else if this.MemSwapBytes != nil { + return false + } else if that1.MemSwapBytes != nil { + return false + } + if this.MemUnevictableBytes != nil && that1.MemUnevictableBytes != nil { + if *this.MemUnevictableBytes != *that1.MemUnevictableBytes { + return false + } + } else if this.MemUnevictableBytes != nil { + return false + } else if that1.MemUnevictableBytes != nil { + return false + } + if this.MemLowPressureCounter != nil && that1.MemLowPressureCounter != nil { + if *this.MemLowPressureCounter != *that1.MemLowPressureCounter { + return false + } + } else if this.MemLowPressureCounter != nil { + return false + } else if that1.MemLowPressureCounter != nil { + return false + } + if this.MemMediumPressureCounter != nil && that1.MemMediumPressureCounter != nil { + if *this.MemMediumPressureCounter != *that1.MemMediumPressureCounter { + return false + } + } else if this.MemMediumPressureCounter != nil { + return false + } else if that1.MemMediumPressureCounter != nil { + return false + } + if this.MemCriticalPressureCounter != nil && that1.MemCriticalPressureCounter != nil { + if *this.MemCriticalPressureCounter != *that1.MemCriticalPressureCounter { + return false + } + } else if this.MemCriticalPressureCounter != nil { + return false + } else if that1.MemCriticalPressureCounter != nil { + return false + } + if this.DiskLimitBytes != nil && that1.DiskLimitBytes != nil { + if *this.DiskLimitBytes != *that1.DiskLimitBytes { + return false + } + } else if this.DiskLimitBytes != nil { + return false + } else if that1.DiskLimitBytes != nil { + return false + } + if this.DiskUsedBytes != nil && that1.DiskUsedBytes != nil { + if *this.DiskUsedBytes != *that1.DiskUsedBytes { + return false + } + } else if this.DiskUsedBytes != nil { + return false + } else if that1.DiskUsedBytes != nil { + return false + } + if len(this.DiskStatistics) != len(that1.DiskStatistics) { + return false + } + for i := range this.DiskStatistics { + if !this.DiskStatistics[i].Equal(&that1.DiskStatistics[i]) { + return false + } + } + if !this.BlkioStatistics.Equal(that1.BlkioStatistics) { + return false + } + if !this.Perf.Equal(that1.Perf) { + return false + } + if this.NetRxPackets != nil && that1.NetRxPackets != nil { + if *this.NetRxPackets != *that1.NetRxPackets { + return false + } + } else if this.NetRxPackets != nil { + return false + } else if that1.NetRxPackets != nil { + return false + } + if this.NetRxBytes != nil && that1.NetRxBytes != nil { + if *this.NetRxBytes != *that1.NetRxBytes { + return false + } + } else if this.NetRxBytes != nil { + return false + } else if that1.NetRxBytes != nil { + return false + } + if this.NetRxErrors != nil && that1.NetRxErrors != nil { + if *this.NetRxErrors != *that1.NetRxErrors { + return false + } + } else if this.NetRxErrors != nil { + return false + } else if that1.NetRxErrors != nil { + return false + } + if this.NetRxDropped != nil && that1.NetRxDropped != nil { + if *this.NetRxDropped != *that1.NetRxDropped { + return false + } + } else if this.NetRxDropped != nil { + return false + } else if that1.NetRxDropped != nil { + return false + } + if this.NetTxPackets != nil && that1.NetTxPackets != nil { + if *this.NetTxPackets != *that1.NetTxPackets { + return false + } + } else if this.NetTxPackets != nil { + return false + } else if that1.NetTxPackets != nil { + return false + } + if this.NetTxBytes != nil && that1.NetTxBytes != nil { + if *this.NetTxBytes != *that1.NetTxBytes { + return false + } + } else if this.NetTxBytes != nil { + return false + } else if that1.NetTxBytes != nil { + return false + } + if this.NetTxErrors != nil && that1.NetTxErrors != nil { + if *this.NetTxErrors != *that1.NetTxErrors { + return false + } + } else if this.NetTxErrors != nil { + return false + } else if that1.NetTxErrors != nil { + return false + } + if this.NetTxDropped != nil && that1.NetTxDropped != nil { + if *this.NetTxDropped != *that1.NetTxDropped { + return false + } + } else if this.NetTxDropped != nil { + return false + } else if that1.NetTxDropped != nil { + return false + } + if this.NetTCPRttMicrosecsP50 != nil && that1.NetTCPRttMicrosecsP50 != nil { + if *this.NetTCPRttMicrosecsP50 != *that1.NetTCPRttMicrosecsP50 { + return false + } + } else if this.NetTCPRttMicrosecsP50 != nil { + return false + } else if that1.NetTCPRttMicrosecsP50 != nil { + return false + } + if this.NetTCPRttMicrosecsP90 != nil && that1.NetTCPRttMicrosecsP90 != nil { + if *this.NetTCPRttMicrosecsP90 != *that1.NetTCPRttMicrosecsP90 { + return false + } + } else if this.NetTCPRttMicrosecsP90 != nil { + return false + } else if that1.NetTCPRttMicrosecsP90 != nil { + return false + } + if this.NetTCPRttMicrosecsP95 != nil && that1.NetTCPRttMicrosecsP95 != nil { + if *this.NetTCPRttMicrosecsP95 != *that1.NetTCPRttMicrosecsP95 { + return false + } + } else if this.NetTCPRttMicrosecsP95 != nil { + return false + } else if that1.NetTCPRttMicrosecsP95 != nil { + return false + } + if this.NetTCPRttMicrosecsP99 != nil && that1.NetTCPRttMicrosecsP99 != nil { + if *this.NetTCPRttMicrosecsP99 != *that1.NetTCPRttMicrosecsP99 { + return false + } + } else if this.NetTCPRttMicrosecsP99 != nil { + return false + } else if that1.NetTCPRttMicrosecsP99 != nil { + return false + } + if this.NetTCPActiveConnections != nil && that1.NetTCPActiveConnections != nil { + if *this.NetTCPActiveConnections != *that1.NetTCPActiveConnections { + return false + } + } else if this.NetTCPActiveConnections != nil { + return false + } else if that1.NetTCPActiveConnections != nil { + return false + } + if this.NetTCPTimeWaitConnections != nil && that1.NetTCPTimeWaitConnections != nil { + if *this.NetTCPTimeWaitConnections != *that1.NetTCPTimeWaitConnections { + return false + } + } else if this.NetTCPTimeWaitConnections != nil { + return false + } else if that1.NetTCPTimeWaitConnections != nil { + return false + } + if len(this.NetTrafficControlStatistics) != len(that1.NetTrafficControlStatistics) { + return false + } + for i := range this.NetTrafficControlStatistics { + if !this.NetTrafficControlStatistics[i].Equal(&that1.NetTrafficControlStatistics[i]) { + return false + } + } + if !this.NetSNMPStatistics.Equal(that1.NetSNMPStatistics) { + return false + } + return true +} +func (this *ResourceUsage) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ResourceUsage) + if !ok { + that2, ok := that.(ResourceUsage) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ResourceUsage") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ResourceUsage but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ResourceUsage but is not nil && this == nil") + } + if len(this.Executors) != len(that1.Executors) { + return fmt.Errorf("Executors this(%v) Not Equal that(%v)", len(this.Executors), len(that1.Executors)) + } + for i := range this.Executors { + if !this.Executors[i].Equal(&that1.Executors[i]) { + return fmt.Errorf("Executors this[%v](%v) Not Equal that[%v](%v)", i, this.Executors[i], i, that1.Executors[i]) + } + } + if len(this.Total) != len(that1.Total) { + return fmt.Errorf("Total this(%v) Not Equal that(%v)", len(this.Total), len(that1.Total)) + } + for i := range this.Total { + if !this.Total[i].Equal(&that1.Total[i]) { + return fmt.Errorf("Total this[%v](%v) Not Equal that[%v](%v)", i, this.Total[i], i, that1.Total[i]) + } + } + return nil +} +func (this *ResourceUsage) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ResourceUsage) + if !ok { + that2, ok := that.(ResourceUsage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Executors) != len(that1.Executors) { + return false + } + for i := range this.Executors { + if !this.Executors[i].Equal(&that1.Executors[i]) { + return false + } + } + if len(this.Total) != len(that1.Total) { + return false + } + for i := range this.Total { + if !this.Total[i].Equal(&that1.Total[i]) { + return false + } + } + return true +} +func (this *ResourceUsage_Executor) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ResourceUsage_Executor) + if !ok { + that2, ok := that.(ResourceUsage_Executor) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ResourceUsage_Executor") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ResourceUsage_Executor but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ResourceUsage_Executor but is not nil && this == nil") + } + if !this.ExecutorInfo.Equal(&that1.ExecutorInfo) { + return fmt.Errorf("ExecutorInfo this(%v) Not Equal that(%v)", this.ExecutorInfo, that1.ExecutorInfo) + } + if len(this.Allocated) != len(that1.Allocated) { + return fmt.Errorf("Allocated this(%v) Not Equal that(%v)", len(this.Allocated), len(that1.Allocated)) + } + for i := range this.Allocated { + if !this.Allocated[i].Equal(&that1.Allocated[i]) { + return fmt.Errorf("Allocated this[%v](%v) Not Equal that[%v](%v)", i, this.Allocated[i], i, that1.Allocated[i]) + } + } + if !this.Statistics.Equal(that1.Statistics) { + return fmt.Errorf("Statistics this(%v) Not Equal that(%v)", this.Statistics, that1.Statistics) + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return fmt.Errorf("ContainerID this(%v) Not Equal that(%v)", this.ContainerID, that1.ContainerID) + } + if len(this.Tasks) != len(that1.Tasks) { + return fmt.Errorf("Tasks this(%v) Not Equal that(%v)", len(this.Tasks), len(that1.Tasks)) + } + for i := range this.Tasks { + if !this.Tasks[i].Equal(&that1.Tasks[i]) { + return fmt.Errorf("Tasks this[%v](%v) Not Equal that[%v](%v)", i, this.Tasks[i], i, that1.Tasks[i]) + } + } + return nil +} +func (this *ResourceUsage_Executor) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ResourceUsage_Executor) + if !ok { + that2, ok := that.(ResourceUsage_Executor) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ExecutorInfo.Equal(&that1.ExecutorInfo) { + return false + } + if len(this.Allocated) != len(that1.Allocated) { + return false + } + for i := range this.Allocated { + if !this.Allocated[i].Equal(&that1.Allocated[i]) { + return false + } + } + if !this.Statistics.Equal(that1.Statistics) { + return false + } + if !this.ContainerID.Equal(&that1.ContainerID) { + return false + } + if len(this.Tasks) != len(that1.Tasks) { + return false + } + for i := range this.Tasks { + if !this.Tasks[i].Equal(&that1.Tasks[i]) { + return false + } + } + return true +} +func (this *ResourceUsage_Executor_Task) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ResourceUsage_Executor_Task) + if !ok { + that2, ok := that.(ResourceUsage_Executor_Task) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ResourceUsage_Executor_Task") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ResourceUsage_Executor_Task but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ResourceUsage_Executor_Task but is not nil && this == nil") + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if !this.ID.Equal(&that1.ID) { + return fmt.Errorf("ID this(%v) Not Equal that(%v)", this.ID, that1.ID) + } + if len(this.Resources) != len(that1.Resources) { + return fmt.Errorf("Resources this(%v) Not Equal that(%v)", len(this.Resources), len(that1.Resources)) + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return fmt.Errorf("Resources this[%v](%v) Not Equal that[%v](%v)", i, this.Resources[i], i, that1.Resources[i]) + } + } + if !this.Labels.Equal(that1.Labels) { + return fmt.Errorf("Labels this(%v) Not Equal that(%v)", this.Labels, that1.Labels) + } + return nil +} +func (this *ResourceUsage_Executor_Task) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ResourceUsage_Executor_Task) + if !ok { + that2, ok := that.(ResourceUsage_Executor_Task) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if !this.ID.Equal(&that1.ID) { + return false + } + if len(this.Resources) != len(that1.Resources) { + return false + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return false + } + } + if !this.Labels.Equal(that1.Labels) { + return false + } + return true +} +func (this *PerfStatistics) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*PerfStatistics) + if !ok { + that2, ok := that.(PerfStatistics) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *PerfStatistics") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *PerfStatistics but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *PerfStatistics but is not nil && this == nil") + } + if this.Timestamp != that1.Timestamp { + return fmt.Errorf("Timestamp this(%v) Not Equal that(%v)", this.Timestamp, that1.Timestamp) + } + if this.Duration != that1.Duration { + return fmt.Errorf("Duration this(%v) Not Equal that(%v)", this.Duration, that1.Duration) + } + if this.Cycles != nil && that1.Cycles != nil { + if *this.Cycles != *that1.Cycles { + return fmt.Errorf("Cycles this(%v) Not Equal that(%v)", *this.Cycles, *that1.Cycles) + } + } else if this.Cycles != nil { + return fmt.Errorf("this.Cycles == nil && that.Cycles != nil") + } else if that1.Cycles != nil { + return fmt.Errorf("Cycles this(%v) Not Equal that(%v)", this.Cycles, that1.Cycles) + } + if this.StalledCyclesFrontend != nil && that1.StalledCyclesFrontend != nil { + if *this.StalledCyclesFrontend != *that1.StalledCyclesFrontend { + return fmt.Errorf("StalledCyclesFrontend this(%v) Not Equal that(%v)", *this.StalledCyclesFrontend, *that1.StalledCyclesFrontend) + } + } else if this.StalledCyclesFrontend != nil { + return fmt.Errorf("this.StalledCyclesFrontend == nil && that.StalledCyclesFrontend != nil") + } else if that1.StalledCyclesFrontend != nil { + return fmt.Errorf("StalledCyclesFrontend this(%v) Not Equal that(%v)", this.StalledCyclesFrontend, that1.StalledCyclesFrontend) + } + if this.StalledCyclesBackend != nil && that1.StalledCyclesBackend != nil { + if *this.StalledCyclesBackend != *that1.StalledCyclesBackend { + return fmt.Errorf("StalledCyclesBackend this(%v) Not Equal that(%v)", *this.StalledCyclesBackend, *that1.StalledCyclesBackend) + } + } else if this.StalledCyclesBackend != nil { + return fmt.Errorf("this.StalledCyclesBackend == nil && that.StalledCyclesBackend != nil") + } else if that1.StalledCyclesBackend != nil { + return fmt.Errorf("StalledCyclesBackend this(%v) Not Equal that(%v)", this.StalledCyclesBackend, that1.StalledCyclesBackend) + } + if this.Instructions != nil && that1.Instructions != nil { + if *this.Instructions != *that1.Instructions { + return fmt.Errorf("Instructions this(%v) Not Equal that(%v)", *this.Instructions, *that1.Instructions) + } + } else if this.Instructions != nil { + return fmt.Errorf("this.Instructions == nil && that.Instructions != nil") + } else if that1.Instructions != nil { + return fmt.Errorf("Instructions this(%v) Not Equal that(%v)", this.Instructions, that1.Instructions) + } + if this.CacheReferences != nil && that1.CacheReferences != nil { + if *this.CacheReferences != *that1.CacheReferences { + return fmt.Errorf("CacheReferences this(%v) Not Equal that(%v)", *this.CacheReferences, *that1.CacheReferences) + } + } else if this.CacheReferences != nil { + return fmt.Errorf("this.CacheReferences == nil && that.CacheReferences != nil") + } else if that1.CacheReferences != nil { + return fmt.Errorf("CacheReferences this(%v) Not Equal that(%v)", this.CacheReferences, that1.CacheReferences) + } + if this.CacheMisses != nil && that1.CacheMisses != nil { + if *this.CacheMisses != *that1.CacheMisses { + return fmt.Errorf("CacheMisses this(%v) Not Equal that(%v)", *this.CacheMisses, *that1.CacheMisses) + } + } else if this.CacheMisses != nil { + return fmt.Errorf("this.CacheMisses == nil && that.CacheMisses != nil") + } else if that1.CacheMisses != nil { + return fmt.Errorf("CacheMisses this(%v) Not Equal that(%v)", this.CacheMisses, that1.CacheMisses) + } + if this.Branches != nil && that1.Branches != nil { + if *this.Branches != *that1.Branches { + return fmt.Errorf("Branches this(%v) Not Equal that(%v)", *this.Branches, *that1.Branches) + } + } else if this.Branches != nil { + return fmt.Errorf("this.Branches == nil && that.Branches != nil") + } else if that1.Branches != nil { + return fmt.Errorf("Branches this(%v) Not Equal that(%v)", this.Branches, that1.Branches) + } + if this.BranchMisses != nil && that1.BranchMisses != nil { + if *this.BranchMisses != *that1.BranchMisses { + return fmt.Errorf("BranchMisses this(%v) Not Equal that(%v)", *this.BranchMisses, *that1.BranchMisses) + } + } else if this.BranchMisses != nil { + return fmt.Errorf("this.BranchMisses == nil && that.BranchMisses != nil") + } else if that1.BranchMisses != nil { + return fmt.Errorf("BranchMisses this(%v) Not Equal that(%v)", this.BranchMisses, that1.BranchMisses) + } + if this.BusCycles != nil && that1.BusCycles != nil { + if *this.BusCycles != *that1.BusCycles { + return fmt.Errorf("BusCycles this(%v) Not Equal that(%v)", *this.BusCycles, *that1.BusCycles) + } + } else if this.BusCycles != nil { + return fmt.Errorf("this.BusCycles == nil && that.BusCycles != nil") + } else if that1.BusCycles != nil { + return fmt.Errorf("BusCycles this(%v) Not Equal that(%v)", this.BusCycles, that1.BusCycles) + } + if this.RefCycles != nil && that1.RefCycles != nil { + if *this.RefCycles != *that1.RefCycles { + return fmt.Errorf("RefCycles this(%v) Not Equal that(%v)", *this.RefCycles, *that1.RefCycles) + } + } else if this.RefCycles != nil { + return fmt.Errorf("this.RefCycles == nil && that.RefCycles != nil") + } else if that1.RefCycles != nil { + return fmt.Errorf("RefCycles this(%v) Not Equal that(%v)", this.RefCycles, that1.RefCycles) + } + if this.CPUClock != nil && that1.CPUClock != nil { + if *this.CPUClock != *that1.CPUClock { + return fmt.Errorf("CPUClock this(%v) Not Equal that(%v)", *this.CPUClock, *that1.CPUClock) + } + } else if this.CPUClock != nil { + return fmt.Errorf("this.CPUClock == nil && that.CPUClock != nil") + } else if that1.CPUClock != nil { + return fmt.Errorf("CPUClock this(%v) Not Equal that(%v)", this.CPUClock, that1.CPUClock) + } + if this.TaskClock != nil && that1.TaskClock != nil { + if *this.TaskClock != *that1.TaskClock { + return fmt.Errorf("TaskClock this(%v) Not Equal that(%v)", *this.TaskClock, *that1.TaskClock) + } + } else if this.TaskClock != nil { + return fmt.Errorf("this.TaskClock == nil && that.TaskClock != nil") + } else if that1.TaskClock != nil { + return fmt.Errorf("TaskClock this(%v) Not Equal that(%v)", this.TaskClock, that1.TaskClock) + } + if this.PageFaults != nil && that1.PageFaults != nil { + if *this.PageFaults != *that1.PageFaults { + return fmt.Errorf("PageFaults this(%v) Not Equal that(%v)", *this.PageFaults, *that1.PageFaults) + } + } else if this.PageFaults != nil { + return fmt.Errorf("this.PageFaults == nil && that.PageFaults != nil") + } else if that1.PageFaults != nil { + return fmt.Errorf("PageFaults this(%v) Not Equal that(%v)", this.PageFaults, that1.PageFaults) + } + if this.MinorFaults != nil && that1.MinorFaults != nil { + if *this.MinorFaults != *that1.MinorFaults { + return fmt.Errorf("MinorFaults this(%v) Not Equal that(%v)", *this.MinorFaults, *that1.MinorFaults) + } + } else if this.MinorFaults != nil { + return fmt.Errorf("this.MinorFaults == nil && that.MinorFaults != nil") + } else if that1.MinorFaults != nil { + return fmt.Errorf("MinorFaults this(%v) Not Equal that(%v)", this.MinorFaults, that1.MinorFaults) + } + if this.MajorFaults != nil && that1.MajorFaults != nil { + if *this.MajorFaults != *that1.MajorFaults { + return fmt.Errorf("MajorFaults this(%v) Not Equal that(%v)", *this.MajorFaults, *that1.MajorFaults) + } + } else if this.MajorFaults != nil { + return fmt.Errorf("this.MajorFaults == nil && that.MajorFaults != nil") + } else if that1.MajorFaults != nil { + return fmt.Errorf("MajorFaults this(%v) Not Equal that(%v)", this.MajorFaults, that1.MajorFaults) + } + if this.ContextSwitches != nil && that1.ContextSwitches != nil { + if *this.ContextSwitches != *that1.ContextSwitches { + return fmt.Errorf("ContextSwitches this(%v) Not Equal that(%v)", *this.ContextSwitches, *that1.ContextSwitches) + } + } else if this.ContextSwitches != nil { + return fmt.Errorf("this.ContextSwitches == nil && that.ContextSwitches != nil") + } else if that1.ContextSwitches != nil { + return fmt.Errorf("ContextSwitches this(%v) Not Equal that(%v)", this.ContextSwitches, that1.ContextSwitches) + } + if this.CPUMigrations != nil && that1.CPUMigrations != nil { + if *this.CPUMigrations != *that1.CPUMigrations { + return fmt.Errorf("CPUMigrations this(%v) Not Equal that(%v)", *this.CPUMigrations, *that1.CPUMigrations) + } + } else if this.CPUMigrations != nil { + return fmt.Errorf("this.CPUMigrations == nil && that.CPUMigrations != nil") + } else if that1.CPUMigrations != nil { + return fmt.Errorf("CPUMigrations this(%v) Not Equal that(%v)", this.CPUMigrations, that1.CPUMigrations) + } + if this.AlignmentFaults != nil && that1.AlignmentFaults != nil { + if *this.AlignmentFaults != *that1.AlignmentFaults { + return fmt.Errorf("AlignmentFaults this(%v) Not Equal that(%v)", *this.AlignmentFaults, *that1.AlignmentFaults) + } + } else if this.AlignmentFaults != nil { + return fmt.Errorf("this.AlignmentFaults == nil && that.AlignmentFaults != nil") + } else if that1.AlignmentFaults != nil { + return fmt.Errorf("AlignmentFaults this(%v) Not Equal that(%v)", this.AlignmentFaults, that1.AlignmentFaults) + } + if this.EmulationFaults != nil && that1.EmulationFaults != nil { + if *this.EmulationFaults != *that1.EmulationFaults { + return fmt.Errorf("EmulationFaults this(%v) Not Equal that(%v)", *this.EmulationFaults, *that1.EmulationFaults) + } + } else if this.EmulationFaults != nil { + return fmt.Errorf("this.EmulationFaults == nil && that.EmulationFaults != nil") + } else if that1.EmulationFaults != nil { + return fmt.Errorf("EmulationFaults this(%v) Not Equal that(%v)", this.EmulationFaults, that1.EmulationFaults) + } + if this.L1DcacheLoads != nil && that1.L1DcacheLoads != nil { + if *this.L1DcacheLoads != *that1.L1DcacheLoads { + return fmt.Errorf("L1DcacheLoads this(%v) Not Equal that(%v)", *this.L1DcacheLoads, *that1.L1DcacheLoads) + } + } else if this.L1DcacheLoads != nil { + return fmt.Errorf("this.L1DcacheLoads == nil && that.L1DcacheLoads != nil") + } else if that1.L1DcacheLoads != nil { + return fmt.Errorf("L1DcacheLoads this(%v) Not Equal that(%v)", this.L1DcacheLoads, that1.L1DcacheLoads) + } + if this.L1DcacheLoadMisses != nil && that1.L1DcacheLoadMisses != nil { + if *this.L1DcacheLoadMisses != *that1.L1DcacheLoadMisses { + return fmt.Errorf("L1DcacheLoadMisses this(%v) Not Equal that(%v)", *this.L1DcacheLoadMisses, *that1.L1DcacheLoadMisses) + } + } else if this.L1DcacheLoadMisses != nil { + return fmt.Errorf("this.L1DcacheLoadMisses == nil && that.L1DcacheLoadMisses != nil") + } else if that1.L1DcacheLoadMisses != nil { + return fmt.Errorf("L1DcacheLoadMisses this(%v) Not Equal that(%v)", this.L1DcacheLoadMisses, that1.L1DcacheLoadMisses) + } + if this.L1DcacheStores != nil && that1.L1DcacheStores != nil { + if *this.L1DcacheStores != *that1.L1DcacheStores { + return fmt.Errorf("L1DcacheStores this(%v) Not Equal that(%v)", *this.L1DcacheStores, *that1.L1DcacheStores) + } + } else if this.L1DcacheStores != nil { + return fmt.Errorf("this.L1DcacheStores == nil && that.L1DcacheStores != nil") + } else if that1.L1DcacheStores != nil { + return fmt.Errorf("L1DcacheStores this(%v) Not Equal that(%v)", this.L1DcacheStores, that1.L1DcacheStores) + } + if this.L1DcacheStoreMisses != nil && that1.L1DcacheStoreMisses != nil { + if *this.L1DcacheStoreMisses != *that1.L1DcacheStoreMisses { + return fmt.Errorf("L1DcacheStoreMisses this(%v) Not Equal that(%v)", *this.L1DcacheStoreMisses, *that1.L1DcacheStoreMisses) + } + } else if this.L1DcacheStoreMisses != nil { + return fmt.Errorf("this.L1DcacheStoreMisses == nil && that.L1DcacheStoreMisses != nil") + } else if that1.L1DcacheStoreMisses != nil { + return fmt.Errorf("L1DcacheStoreMisses this(%v) Not Equal that(%v)", this.L1DcacheStoreMisses, that1.L1DcacheStoreMisses) + } + if this.L1DcachePrefetches != nil && that1.L1DcachePrefetches != nil { + if *this.L1DcachePrefetches != *that1.L1DcachePrefetches { + return fmt.Errorf("L1DcachePrefetches this(%v) Not Equal that(%v)", *this.L1DcachePrefetches, *that1.L1DcachePrefetches) + } + } else if this.L1DcachePrefetches != nil { + return fmt.Errorf("this.L1DcachePrefetches == nil && that.L1DcachePrefetches != nil") + } else if that1.L1DcachePrefetches != nil { + return fmt.Errorf("L1DcachePrefetches this(%v) Not Equal that(%v)", this.L1DcachePrefetches, that1.L1DcachePrefetches) + } + if this.L1DcachePrefetchMisses != nil && that1.L1DcachePrefetchMisses != nil { + if *this.L1DcachePrefetchMisses != *that1.L1DcachePrefetchMisses { + return fmt.Errorf("L1DcachePrefetchMisses this(%v) Not Equal that(%v)", *this.L1DcachePrefetchMisses, *that1.L1DcachePrefetchMisses) + } + } else if this.L1DcachePrefetchMisses != nil { + return fmt.Errorf("this.L1DcachePrefetchMisses == nil && that.L1DcachePrefetchMisses != nil") + } else if that1.L1DcachePrefetchMisses != nil { + return fmt.Errorf("L1DcachePrefetchMisses this(%v) Not Equal that(%v)", this.L1DcachePrefetchMisses, that1.L1DcachePrefetchMisses) + } + if this.L1IcacheLoads != nil && that1.L1IcacheLoads != nil { + if *this.L1IcacheLoads != *that1.L1IcacheLoads { + return fmt.Errorf("L1IcacheLoads this(%v) Not Equal that(%v)", *this.L1IcacheLoads, *that1.L1IcacheLoads) + } + } else if this.L1IcacheLoads != nil { + return fmt.Errorf("this.L1IcacheLoads == nil && that.L1IcacheLoads != nil") + } else if that1.L1IcacheLoads != nil { + return fmt.Errorf("L1IcacheLoads this(%v) Not Equal that(%v)", this.L1IcacheLoads, that1.L1IcacheLoads) + } + if this.L1IcacheLoadMisses != nil && that1.L1IcacheLoadMisses != nil { + if *this.L1IcacheLoadMisses != *that1.L1IcacheLoadMisses { + return fmt.Errorf("L1IcacheLoadMisses this(%v) Not Equal that(%v)", *this.L1IcacheLoadMisses, *that1.L1IcacheLoadMisses) + } + } else if this.L1IcacheLoadMisses != nil { + return fmt.Errorf("this.L1IcacheLoadMisses == nil && that.L1IcacheLoadMisses != nil") + } else if that1.L1IcacheLoadMisses != nil { + return fmt.Errorf("L1IcacheLoadMisses this(%v) Not Equal that(%v)", this.L1IcacheLoadMisses, that1.L1IcacheLoadMisses) + } + if this.L1IcachePrefetches != nil && that1.L1IcachePrefetches != nil { + if *this.L1IcachePrefetches != *that1.L1IcachePrefetches { + return fmt.Errorf("L1IcachePrefetches this(%v) Not Equal that(%v)", *this.L1IcachePrefetches, *that1.L1IcachePrefetches) + } + } else if this.L1IcachePrefetches != nil { + return fmt.Errorf("this.L1IcachePrefetches == nil && that.L1IcachePrefetches != nil") + } else if that1.L1IcachePrefetches != nil { + return fmt.Errorf("L1IcachePrefetches this(%v) Not Equal that(%v)", this.L1IcachePrefetches, that1.L1IcachePrefetches) + } + if this.L1IcachePrefetchMisses != nil && that1.L1IcachePrefetchMisses != nil { + if *this.L1IcachePrefetchMisses != *that1.L1IcachePrefetchMisses { + return fmt.Errorf("L1IcachePrefetchMisses this(%v) Not Equal that(%v)", *this.L1IcachePrefetchMisses, *that1.L1IcachePrefetchMisses) + } + } else if this.L1IcachePrefetchMisses != nil { + return fmt.Errorf("this.L1IcachePrefetchMisses == nil && that.L1IcachePrefetchMisses != nil") + } else if that1.L1IcachePrefetchMisses != nil { + return fmt.Errorf("L1IcachePrefetchMisses this(%v) Not Equal that(%v)", this.L1IcachePrefetchMisses, that1.L1IcachePrefetchMisses) + } + if this.LLCLoads != nil && that1.LLCLoads != nil { + if *this.LLCLoads != *that1.LLCLoads { + return fmt.Errorf("LLCLoads this(%v) Not Equal that(%v)", *this.LLCLoads, *that1.LLCLoads) + } + } else if this.LLCLoads != nil { + return fmt.Errorf("this.LLCLoads == nil && that.LLCLoads != nil") + } else if that1.LLCLoads != nil { + return fmt.Errorf("LLCLoads this(%v) Not Equal that(%v)", this.LLCLoads, that1.LLCLoads) + } + if this.LLCLoadMisses != nil && that1.LLCLoadMisses != nil { + if *this.LLCLoadMisses != *that1.LLCLoadMisses { + return fmt.Errorf("LLCLoadMisses this(%v) Not Equal that(%v)", *this.LLCLoadMisses, *that1.LLCLoadMisses) + } + } else if this.LLCLoadMisses != nil { + return fmt.Errorf("this.LLCLoadMisses == nil && that.LLCLoadMisses != nil") + } else if that1.LLCLoadMisses != nil { + return fmt.Errorf("LLCLoadMisses this(%v) Not Equal that(%v)", this.LLCLoadMisses, that1.LLCLoadMisses) + } + if this.LLCStores != nil && that1.LLCStores != nil { + if *this.LLCStores != *that1.LLCStores { + return fmt.Errorf("LLCStores this(%v) Not Equal that(%v)", *this.LLCStores, *that1.LLCStores) + } + } else if this.LLCStores != nil { + return fmt.Errorf("this.LLCStores == nil && that.LLCStores != nil") + } else if that1.LLCStores != nil { + return fmt.Errorf("LLCStores this(%v) Not Equal that(%v)", this.LLCStores, that1.LLCStores) + } + if this.LLCStoreMisses != nil && that1.LLCStoreMisses != nil { + if *this.LLCStoreMisses != *that1.LLCStoreMisses { + return fmt.Errorf("LLCStoreMisses this(%v) Not Equal that(%v)", *this.LLCStoreMisses, *that1.LLCStoreMisses) + } + } else if this.LLCStoreMisses != nil { + return fmt.Errorf("this.LLCStoreMisses == nil && that.LLCStoreMisses != nil") + } else if that1.LLCStoreMisses != nil { + return fmt.Errorf("LLCStoreMisses this(%v) Not Equal that(%v)", this.LLCStoreMisses, that1.LLCStoreMisses) + } + if this.LLCPrefetches != nil && that1.LLCPrefetches != nil { + if *this.LLCPrefetches != *that1.LLCPrefetches { + return fmt.Errorf("LLCPrefetches this(%v) Not Equal that(%v)", *this.LLCPrefetches, *that1.LLCPrefetches) + } + } else if this.LLCPrefetches != nil { + return fmt.Errorf("this.LLCPrefetches == nil && that.LLCPrefetches != nil") + } else if that1.LLCPrefetches != nil { + return fmt.Errorf("LLCPrefetches this(%v) Not Equal that(%v)", this.LLCPrefetches, that1.LLCPrefetches) + } + if this.LLCPrefetchMisses != nil && that1.LLCPrefetchMisses != nil { + if *this.LLCPrefetchMisses != *that1.LLCPrefetchMisses { + return fmt.Errorf("LLCPrefetchMisses this(%v) Not Equal that(%v)", *this.LLCPrefetchMisses, *that1.LLCPrefetchMisses) + } + } else if this.LLCPrefetchMisses != nil { + return fmt.Errorf("this.LLCPrefetchMisses == nil && that.LLCPrefetchMisses != nil") + } else if that1.LLCPrefetchMisses != nil { + return fmt.Errorf("LLCPrefetchMisses this(%v) Not Equal that(%v)", this.LLCPrefetchMisses, that1.LLCPrefetchMisses) + } + if this.DTLBLoads != nil && that1.DTLBLoads != nil { + if *this.DTLBLoads != *that1.DTLBLoads { + return fmt.Errorf("DTLBLoads this(%v) Not Equal that(%v)", *this.DTLBLoads, *that1.DTLBLoads) + } + } else if this.DTLBLoads != nil { + return fmt.Errorf("this.DTLBLoads == nil && that.DTLBLoads != nil") + } else if that1.DTLBLoads != nil { + return fmt.Errorf("DTLBLoads this(%v) Not Equal that(%v)", this.DTLBLoads, that1.DTLBLoads) + } + if this.DTLBLoadMisses != nil && that1.DTLBLoadMisses != nil { + if *this.DTLBLoadMisses != *that1.DTLBLoadMisses { + return fmt.Errorf("DTLBLoadMisses this(%v) Not Equal that(%v)", *this.DTLBLoadMisses, *that1.DTLBLoadMisses) + } + } else if this.DTLBLoadMisses != nil { + return fmt.Errorf("this.DTLBLoadMisses == nil && that.DTLBLoadMisses != nil") + } else if that1.DTLBLoadMisses != nil { + return fmt.Errorf("DTLBLoadMisses this(%v) Not Equal that(%v)", this.DTLBLoadMisses, that1.DTLBLoadMisses) + } + if this.DTLBStores != nil && that1.DTLBStores != nil { + if *this.DTLBStores != *that1.DTLBStores { + return fmt.Errorf("DTLBStores this(%v) Not Equal that(%v)", *this.DTLBStores, *that1.DTLBStores) + } + } else if this.DTLBStores != nil { + return fmt.Errorf("this.DTLBStores == nil && that.DTLBStores != nil") + } else if that1.DTLBStores != nil { + return fmt.Errorf("DTLBStores this(%v) Not Equal that(%v)", this.DTLBStores, that1.DTLBStores) + } + if this.DTLBStoreMisses != nil && that1.DTLBStoreMisses != nil { + if *this.DTLBStoreMisses != *that1.DTLBStoreMisses { + return fmt.Errorf("DTLBStoreMisses this(%v) Not Equal that(%v)", *this.DTLBStoreMisses, *that1.DTLBStoreMisses) + } + } else if this.DTLBStoreMisses != nil { + return fmt.Errorf("this.DTLBStoreMisses == nil && that.DTLBStoreMisses != nil") + } else if that1.DTLBStoreMisses != nil { + return fmt.Errorf("DTLBStoreMisses this(%v) Not Equal that(%v)", this.DTLBStoreMisses, that1.DTLBStoreMisses) + } + if this.DTLBPrefetches != nil && that1.DTLBPrefetches != nil { + if *this.DTLBPrefetches != *that1.DTLBPrefetches { + return fmt.Errorf("DTLBPrefetches this(%v) Not Equal that(%v)", *this.DTLBPrefetches, *that1.DTLBPrefetches) + } + } else if this.DTLBPrefetches != nil { + return fmt.Errorf("this.DTLBPrefetches == nil && that.DTLBPrefetches != nil") + } else if that1.DTLBPrefetches != nil { + return fmt.Errorf("DTLBPrefetches this(%v) Not Equal that(%v)", this.DTLBPrefetches, that1.DTLBPrefetches) + } + if this.DTLBPrefetchMisses != nil && that1.DTLBPrefetchMisses != nil { + if *this.DTLBPrefetchMisses != *that1.DTLBPrefetchMisses { + return fmt.Errorf("DTLBPrefetchMisses this(%v) Not Equal that(%v)", *this.DTLBPrefetchMisses, *that1.DTLBPrefetchMisses) + } + } else if this.DTLBPrefetchMisses != nil { + return fmt.Errorf("this.DTLBPrefetchMisses == nil && that.DTLBPrefetchMisses != nil") + } else if that1.DTLBPrefetchMisses != nil { + return fmt.Errorf("DTLBPrefetchMisses this(%v) Not Equal that(%v)", this.DTLBPrefetchMisses, that1.DTLBPrefetchMisses) + } + if this.ITLBLoads != nil && that1.ITLBLoads != nil { + if *this.ITLBLoads != *that1.ITLBLoads { + return fmt.Errorf("ITLBLoads this(%v) Not Equal that(%v)", *this.ITLBLoads, *that1.ITLBLoads) + } + } else if this.ITLBLoads != nil { + return fmt.Errorf("this.ITLBLoads == nil && that.ITLBLoads != nil") + } else if that1.ITLBLoads != nil { + return fmt.Errorf("ITLBLoads this(%v) Not Equal that(%v)", this.ITLBLoads, that1.ITLBLoads) + } + if this.ITLBLoadMisses != nil && that1.ITLBLoadMisses != nil { + if *this.ITLBLoadMisses != *that1.ITLBLoadMisses { + return fmt.Errorf("ITLBLoadMisses this(%v) Not Equal that(%v)", *this.ITLBLoadMisses, *that1.ITLBLoadMisses) + } + } else if this.ITLBLoadMisses != nil { + return fmt.Errorf("this.ITLBLoadMisses == nil && that.ITLBLoadMisses != nil") + } else if that1.ITLBLoadMisses != nil { + return fmt.Errorf("ITLBLoadMisses this(%v) Not Equal that(%v)", this.ITLBLoadMisses, that1.ITLBLoadMisses) + } + if this.BranchLoads != nil && that1.BranchLoads != nil { + if *this.BranchLoads != *that1.BranchLoads { + return fmt.Errorf("BranchLoads this(%v) Not Equal that(%v)", *this.BranchLoads, *that1.BranchLoads) + } + } else if this.BranchLoads != nil { + return fmt.Errorf("this.BranchLoads == nil && that.BranchLoads != nil") + } else if that1.BranchLoads != nil { + return fmt.Errorf("BranchLoads this(%v) Not Equal that(%v)", this.BranchLoads, that1.BranchLoads) + } + if this.BranchLoadMisses != nil && that1.BranchLoadMisses != nil { + if *this.BranchLoadMisses != *that1.BranchLoadMisses { + return fmt.Errorf("BranchLoadMisses this(%v) Not Equal that(%v)", *this.BranchLoadMisses, *that1.BranchLoadMisses) + } + } else if this.BranchLoadMisses != nil { + return fmt.Errorf("this.BranchLoadMisses == nil && that.BranchLoadMisses != nil") + } else if that1.BranchLoadMisses != nil { + return fmt.Errorf("BranchLoadMisses this(%v) Not Equal that(%v)", this.BranchLoadMisses, that1.BranchLoadMisses) + } + if this.NodeLoads != nil && that1.NodeLoads != nil { + if *this.NodeLoads != *that1.NodeLoads { + return fmt.Errorf("NodeLoads this(%v) Not Equal that(%v)", *this.NodeLoads, *that1.NodeLoads) + } + } else if this.NodeLoads != nil { + return fmt.Errorf("this.NodeLoads == nil && that.NodeLoads != nil") + } else if that1.NodeLoads != nil { + return fmt.Errorf("NodeLoads this(%v) Not Equal that(%v)", this.NodeLoads, that1.NodeLoads) + } + if this.NodeLoadMisses != nil && that1.NodeLoadMisses != nil { + if *this.NodeLoadMisses != *that1.NodeLoadMisses { + return fmt.Errorf("NodeLoadMisses this(%v) Not Equal that(%v)", *this.NodeLoadMisses, *that1.NodeLoadMisses) + } + } else if this.NodeLoadMisses != nil { + return fmt.Errorf("this.NodeLoadMisses == nil && that.NodeLoadMisses != nil") + } else if that1.NodeLoadMisses != nil { + return fmt.Errorf("NodeLoadMisses this(%v) Not Equal that(%v)", this.NodeLoadMisses, that1.NodeLoadMisses) + } + if this.NodeStores != nil && that1.NodeStores != nil { + if *this.NodeStores != *that1.NodeStores { + return fmt.Errorf("NodeStores this(%v) Not Equal that(%v)", *this.NodeStores, *that1.NodeStores) + } + } else if this.NodeStores != nil { + return fmt.Errorf("this.NodeStores == nil && that.NodeStores != nil") + } else if that1.NodeStores != nil { + return fmt.Errorf("NodeStores this(%v) Not Equal that(%v)", this.NodeStores, that1.NodeStores) + } + if this.NodeStoreMisses != nil && that1.NodeStoreMisses != nil { + if *this.NodeStoreMisses != *that1.NodeStoreMisses { + return fmt.Errorf("NodeStoreMisses this(%v) Not Equal that(%v)", *this.NodeStoreMisses, *that1.NodeStoreMisses) + } + } else if this.NodeStoreMisses != nil { + return fmt.Errorf("this.NodeStoreMisses == nil && that.NodeStoreMisses != nil") + } else if that1.NodeStoreMisses != nil { + return fmt.Errorf("NodeStoreMisses this(%v) Not Equal that(%v)", this.NodeStoreMisses, that1.NodeStoreMisses) + } + if this.NodePrefetches != nil && that1.NodePrefetches != nil { + if *this.NodePrefetches != *that1.NodePrefetches { + return fmt.Errorf("NodePrefetches this(%v) Not Equal that(%v)", *this.NodePrefetches, *that1.NodePrefetches) + } + } else if this.NodePrefetches != nil { + return fmt.Errorf("this.NodePrefetches == nil && that.NodePrefetches != nil") + } else if that1.NodePrefetches != nil { + return fmt.Errorf("NodePrefetches this(%v) Not Equal that(%v)", this.NodePrefetches, that1.NodePrefetches) + } + if this.NodePrefetchMisses != nil && that1.NodePrefetchMisses != nil { + if *this.NodePrefetchMisses != *that1.NodePrefetchMisses { + return fmt.Errorf("NodePrefetchMisses this(%v) Not Equal that(%v)", *this.NodePrefetchMisses, *that1.NodePrefetchMisses) + } + } else if this.NodePrefetchMisses != nil { + return fmt.Errorf("this.NodePrefetchMisses == nil && that.NodePrefetchMisses != nil") + } else if that1.NodePrefetchMisses != nil { + return fmt.Errorf("NodePrefetchMisses this(%v) Not Equal that(%v)", this.NodePrefetchMisses, that1.NodePrefetchMisses) + } + return nil +} +func (this *PerfStatistics) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*PerfStatistics) + if !ok { + that2, ok := that.(PerfStatistics) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Timestamp != that1.Timestamp { + return false + } + if this.Duration != that1.Duration { + return false + } + if this.Cycles != nil && that1.Cycles != nil { + if *this.Cycles != *that1.Cycles { + return false + } + } else if this.Cycles != nil { + return false + } else if that1.Cycles != nil { + return false + } + if this.StalledCyclesFrontend != nil && that1.StalledCyclesFrontend != nil { + if *this.StalledCyclesFrontend != *that1.StalledCyclesFrontend { + return false + } + } else if this.StalledCyclesFrontend != nil { + return false + } else if that1.StalledCyclesFrontend != nil { + return false + } + if this.StalledCyclesBackend != nil && that1.StalledCyclesBackend != nil { + if *this.StalledCyclesBackend != *that1.StalledCyclesBackend { + return false + } + } else if this.StalledCyclesBackend != nil { + return false + } else if that1.StalledCyclesBackend != nil { + return false + } + if this.Instructions != nil && that1.Instructions != nil { + if *this.Instructions != *that1.Instructions { + return false + } + } else if this.Instructions != nil { + return false + } else if that1.Instructions != nil { + return false + } + if this.CacheReferences != nil && that1.CacheReferences != nil { + if *this.CacheReferences != *that1.CacheReferences { + return false + } + } else if this.CacheReferences != nil { + return false + } else if that1.CacheReferences != nil { + return false + } + if this.CacheMisses != nil && that1.CacheMisses != nil { + if *this.CacheMisses != *that1.CacheMisses { + return false + } + } else if this.CacheMisses != nil { + return false + } else if that1.CacheMisses != nil { + return false + } + if this.Branches != nil && that1.Branches != nil { + if *this.Branches != *that1.Branches { + return false + } + } else if this.Branches != nil { + return false + } else if that1.Branches != nil { + return false + } + if this.BranchMisses != nil && that1.BranchMisses != nil { + if *this.BranchMisses != *that1.BranchMisses { + return false + } + } else if this.BranchMisses != nil { + return false + } else if that1.BranchMisses != nil { + return false + } + if this.BusCycles != nil && that1.BusCycles != nil { + if *this.BusCycles != *that1.BusCycles { + return false + } + } else if this.BusCycles != nil { + return false + } else if that1.BusCycles != nil { + return false + } + if this.RefCycles != nil && that1.RefCycles != nil { + if *this.RefCycles != *that1.RefCycles { + return false + } + } else if this.RefCycles != nil { + return false + } else if that1.RefCycles != nil { + return false + } + if this.CPUClock != nil && that1.CPUClock != nil { + if *this.CPUClock != *that1.CPUClock { + return false + } + } else if this.CPUClock != nil { + return false + } else if that1.CPUClock != nil { + return false + } + if this.TaskClock != nil && that1.TaskClock != nil { + if *this.TaskClock != *that1.TaskClock { + return false + } + } else if this.TaskClock != nil { + return false + } else if that1.TaskClock != nil { + return false + } + if this.PageFaults != nil && that1.PageFaults != nil { + if *this.PageFaults != *that1.PageFaults { + return false + } + } else if this.PageFaults != nil { + return false + } else if that1.PageFaults != nil { + return false + } + if this.MinorFaults != nil && that1.MinorFaults != nil { + if *this.MinorFaults != *that1.MinorFaults { + return false + } + } else if this.MinorFaults != nil { + return false + } else if that1.MinorFaults != nil { + return false + } + if this.MajorFaults != nil && that1.MajorFaults != nil { + if *this.MajorFaults != *that1.MajorFaults { + return false + } + } else if this.MajorFaults != nil { + return false + } else if that1.MajorFaults != nil { + return false + } + if this.ContextSwitches != nil && that1.ContextSwitches != nil { + if *this.ContextSwitches != *that1.ContextSwitches { + return false + } + } else if this.ContextSwitches != nil { + return false + } else if that1.ContextSwitches != nil { + return false + } + if this.CPUMigrations != nil && that1.CPUMigrations != nil { + if *this.CPUMigrations != *that1.CPUMigrations { + return false + } + } else if this.CPUMigrations != nil { + return false + } else if that1.CPUMigrations != nil { + return false + } + if this.AlignmentFaults != nil && that1.AlignmentFaults != nil { + if *this.AlignmentFaults != *that1.AlignmentFaults { + return false + } + } else if this.AlignmentFaults != nil { + return false + } else if that1.AlignmentFaults != nil { + return false + } + if this.EmulationFaults != nil && that1.EmulationFaults != nil { + if *this.EmulationFaults != *that1.EmulationFaults { + return false + } + } else if this.EmulationFaults != nil { + return false + } else if that1.EmulationFaults != nil { + return false + } + if this.L1DcacheLoads != nil && that1.L1DcacheLoads != nil { + if *this.L1DcacheLoads != *that1.L1DcacheLoads { + return false + } + } else if this.L1DcacheLoads != nil { + return false + } else if that1.L1DcacheLoads != nil { + return false + } + if this.L1DcacheLoadMisses != nil && that1.L1DcacheLoadMisses != nil { + if *this.L1DcacheLoadMisses != *that1.L1DcacheLoadMisses { + return false + } + } else if this.L1DcacheLoadMisses != nil { + return false + } else if that1.L1DcacheLoadMisses != nil { + return false + } + if this.L1DcacheStores != nil && that1.L1DcacheStores != nil { + if *this.L1DcacheStores != *that1.L1DcacheStores { + return false + } + } else if this.L1DcacheStores != nil { + return false + } else if that1.L1DcacheStores != nil { + return false + } + if this.L1DcacheStoreMisses != nil && that1.L1DcacheStoreMisses != nil { + if *this.L1DcacheStoreMisses != *that1.L1DcacheStoreMisses { + return false + } + } else if this.L1DcacheStoreMisses != nil { + return false + } else if that1.L1DcacheStoreMisses != nil { + return false + } + if this.L1DcachePrefetches != nil && that1.L1DcachePrefetches != nil { + if *this.L1DcachePrefetches != *that1.L1DcachePrefetches { + return false + } + } else if this.L1DcachePrefetches != nil { + return false + } else if that1.L1DcachePrefetches != nil { + return false + } + if this.L1DcachePrefetchMisses != nil && that1.L1DcachePrefetchMisses != nil { + if *this.L1DcachePrefetchMisses != *that1.L1DcachePrefetchMisses { + return false + } + } else if this.L1DcachePrefetchMisses != nil { + return false + } else if that1.L1DcachePrefetchMisses != nil { + return false + } + if this.L1IcacheLoads != nil && that1.L1IcacheLoads != nil { + if *this.L1IcacheLoads != *that1.L1IcacheLoads { + return false + } + } else if this.L1IcacheLoads != nil { + return false + } else if that1.L1IcacheLoads != nil { + return false + } + if this.L1IcacheLoadMisses != nil && that1.L1IcacheLoadMisses != nil { + if *this.L1IcacheLoadMisses != *that1.L1IcacheLoadMisses { + return false + } + } else if this.L1IcacheLoadMisses != nil { + return false + } else if that1.L1IcacheLoadMisses != nil { + return false + } + if this.L1IcachePrefetches != nil && that1.L1IcachePrefetches != nil { + if *this.L1IcachePrefetches != *that1.L1IcachePrefetches { + return false + } + } else if this.L1IcachePrefetches != nil { + return false + } else if that1.L1IcachePrefetches != nil { + return false + } + if this.L1IcachePrefetchMisses != nil && that1.L1IcachePrefetchMisses != nil { + if *this.L1IcachePrefetchMisses != *that1.L1IcachePrefetchMisses { + return false + } + } else if this.L1IcachePrefetchMisses != nil { + return false + } else if that1.L1IcachePrefetchMisses != nil { + return false + } + if this.LLCLoads != nil && that1.LLCLoads != nil { + if *this.LLCLoads != *that1.LLCLoads { + return false + } + } else if this.LLCLoads != nil { + return false + } else if that1.LLCLoads != nil { + return false + } + if this.LLCLoadMisses != nil && that1.LLCLoadMisses != nil { + if *this.LLCLoadMisses != *that1.LLCLoadMisses { + return false + } + } else if this.LLCLoadMisses != nil { + return false + } else if that1.LLCLoadMisses != nil { + return false + } + if this.LLCStores != nil && that1.LLCStores != nil { + if *this.LLCStores != *that1.LLCStores { + return false + } + } else if this.LLCStores != nil { + return false + } else if that1.LLCStores != nil { + return false + } + if this.LLCStoreMisses != nil && that1.LLCStoreMisses != nil { + if *this.LLCStoreMisses != *that1.LLCStoreMisses { + return false + } + } else if this.LLCStoreMisses != nil { + return false + } else if that1.LLCStoreMisses != nil { + return false + } + if this.LLCPrefetches != nil && that1.LLCPrefetches != nil { + if *this.LLCPrefetches != *that1.LLCPrefetches { + return false + } + } else if this.LLCPrefetches != nil { + return false + } else if that1.LLCPrefetches != nil { + return false + } + if this.LLCPrefetchMisses != nil && that1.LLCPrefetchMisses != nil { + if *this.LLCPrefetchMisses != *that1.LLCPrefetchMisses { + return false + } + } else if this.LLCPrefetchMisses != nil { + return false + } else if that1.LLCPrefetchMisses != nil { + return false + } + if this.DTLBLoads != nil && that1.DTLBLoads != nil { + if *this.DTLBLoads != *that1.DTLBLoads { + return false + } + } else if this.DTLBLoads != nil { + return false + } else if that1.DTLBLoads != nil { + return false + } + if this.DTLBLoadMisses != nil && that1.DTLBLoadMisses != nil { + if *this.DTLBLoadMisses != *that1.DTLBLoadMisses { + return false + } + } else if this.DTLBLoadMisses != nil { + return false + } else if that1.DTLBLoadMisses != nil { + return false + } + if this.DTLBStores != nil && that1.DTLBStores != nil { + if *this.DTLBStores != *that1.DTLBStores { + return false + } + } else if this.DTLBStores != nil { + return false + } else if that1.DTLBStores != nil { + return false + } + if this.DTLBStoreMisses != nil && that1.DTLBStoreMisses != nil { + if *this.DTLBStoreMisses != *that1.DTLBStoreMisses { + return false + } + } else if this.DTLBStoreMisses != nil { + return false + } else if that1.DTLBStoreMisses != nil { + return false + } + if this.DTLBPrefetches != nil && that1.DTLBPrefetches != nil { + if *this.DTLBPrefetches != *that1.DTLBPrefetches { + return false + } + } else if this.DTLBPrefetches != nil { + return false + } else if that1.DTLBPrefetches != nil { + return false + } + if this.DTLBPrefetchMisses != nil && that1.DTLBPrefetchMisses != nil { + if *this.DTLBPrefetchMisses != *that1.DTLBPrefetchMisses { + return false + } + } else if this.DTLBPrefetchMisses != nil { + return false + } else if that1.DTLBPrefetchMisses != nil { + return false + } + if this.ITLBLoads != nil && that1.ITLBLoads != nil { + if *this.ITLBLoads != *that1.ITLBLoads { + return false + } + } else if this.ITLBLoads != nil { + return false + } else if that1.ITLBLoads != nil { + return false + } + if this.ITLBLoadMisses != nil && that1.ITLBLoadMisses != nil { + if *this.ITLBLoadMisses != *that1.ITLBLoadMisses { + return false + } + } else if this.ITLBLoadMisses != nil { + return false + } else if that1.ITLBLoadMisses != nil { + return false + } + if this.BranchLoads != nil && that1.BranchLoads != nil { + if *this.BranchLoads != *that1.BranchLoads { + return false + } + } else if this.BranchLoads != nil { + return false + } else if that1.BranchLoads != nil { + return false + } + if this.BranchLoadMisses != nil && that1.BranchLoadMisses != nil { + if *this.BranchLoadMisses != *that1.BranchLoadMisses { + return false + } + } else if this.BranchLoadMisses != nil { + return false + } else if that1.BranchLoadMisses != nil { + return false + } + if this.NodeLoads != nil && that1.NodeLoads != nil { + if *this.NodeLoads != *that1.NodeLoads { + return false + } + } else if this.NodeLoads != nil { + return false + } else if that1.NodeLoads != nil { + return false + } + if this.NodeLoadMisses != nil && that1.NodeLoadMisses != nil { + if *this.NodeLoadMisses != *that1.NodeLoadMisses { + return false + } + } else if this.NodeLoadMisses != nil { + return false + } else if that1.NodeLoadMisses != nil { + return false + } + if this.NodeStores != nil && that1.NodeStores != nil { + if *this.NodeStores != *that1.NodeStores { + return false + } + } else if this.NodeStores != nil { + return false + } else if that1.NodeStores != nil { + return false + } + if this.NodeStoreMisses != nil && that1.NodeStoreMisses != nil { + if *this.NodeStoreMisses != *that1.NodeStoreMisses { + return false + } + } else if this.NodeStoreMisses != nil { + return false + } else if that1.NodeStoreMisses != nil { + return false + } + if this.NodePrefetches != nil && that1.NodePrefetches != nil { + if *this.NodePrefetches != *that1.NodePrefetches { + return false + } + } else if this.NodePrefetches != nil { + return false + } else if that1.NodePrefetches != nil { + return false + } + if this.NodePrefetchMisses != nil && that1.NodePrefetchMisses != nil { + if *this.NodePrefetchMisses != *that1.NodePrefetchMisses { + return false + } + } else if this.NodePrefetchMisses != nil { + return false + } else if that1.NodePrefetchMisses != nil { + return false + } + return true +} +func (this *Request) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Request) + if !ok { + that2, ok := that.(Request) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Request") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Request but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Request but is not nil && this == nil") + } + if !this.AgentID.Equal(that1.AgentID) { + return fmt.Errorf("AgentID this(%v) Not Equal that(%v)", this.AgentID, that1.AgentID) + } + if len(this.Resources) != len(that1.Resources) { + return fmt.Errorf("Resources this(%v) Not Equal that(%v)", len(this.Resources), len(that1.Resources)) + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return fmt.Errorf("Resources this[%v](%v) Not Equal that[%v](%v)", i, this.Resources[i], i, that1.Resources[i]) + } + } + return nil +} +func (this *Request) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Request) + if !ok { + that2, ok := that.(Request) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.AgentID.Equal(that1.AgentID) { + return false + } + if len(this.Resources) != len(that1.Resources) { + return false + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return false + } + } + return true +} +func (this *Offer) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Offer) + if !ok { + that2, ok := that.(Offer) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Offer") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Offer but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Offer but is not nil && this == nil") + } + if !this.ID.Equal(&that1.ID) { + return fmt.Errorf("ID this(%v) Not Equal that(%v)", this.ID, that1.ID) + } + if !this.FrameworkID.Equal(&that1.FrameworkID) { + return fmt.Errorf("FrameworkID this(%v) Not Equal that(%v)", this.FrameworkID, that1.FrameworkID) + } + if !this.AgentID.Equal(&that1.AgentID) { + return fmt.Errorf("AgentID this(%v) Not Equal that(%v)", this.AgentID, that1.AgentID) + } + if this.Hostname != that1.Hostname { + return fmt.Errorf("Hostname this(%v) Not Equal that(%v)", this.Hostname, that1.Hostname) + } + if !this.URL.Equal(that1.URL) { + return fmt.Errorf("URL this(%v) Not Equal that(%v)", this.URL, that1.URL) + } + if !this.Domain.Equal(that1.Domain) { + return fmt.Errorf("Domain this(%v) Not Equal that(%v)", this.Domain, that1.Domain) + } + if len(this.Resources) != len(that1.Resources) { + return fmt.Errorf("Resources this(%v) Not Equal that(%v)", len(this.Resources), len(that1.Resources)) + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return fmt.Errorf("Resources this[%v](%v) Not Equal that[%v](%v)", i, this.Resources[i], i, that1.Resources[i]) + } + } + if len(this.Attributes) != len(that1.Attributes) { + return fmt.Errorf("Attributes this(%v) Not Equal that(%v)", len(this.Attributes), len(that1.Attributes)) + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return fmt.Errorf("Attributes this[%v](%v) Not Equal that[%v](%v)", i, this.Attributes[i], i, that1.Attributes[i]) + } + } + if len(this.ExecutorIDs) != len(that1.ExecutorIDs) { + return fmt.Errorf("ExecutorIDs this(%v) Not Equal that(%v)", len(this.ExecutorIDs), len(that1.ExecutorIDs)) + } + for i := range this.ExecutorIDs { + if !this.ExecutorIDs[i].Equal(&that1.ExecutorIDs[i]) { + return fmt.Errorf("ExecutorIDs this[%v](%v) Not Equal that[%v](%v)", i, this.ExecutorIDs[i], i, that1.ExecutorIDs[i]) + } + } + if !this.Unavailability.Equal(that1.Unavailability) { + return fmt.Errorf("Unavailability this(%v) Not Equal that(%v)", this.Unavailability, that1.Unavailability) + } + if !this.AllocationInfo.Equal(that1.AllocationInfo) { + return fmt.Errorf("AllocationInfo this(%v) Not Equal that(%v)", this.AllocationInfo, that1.AllocationInfo) + } + return nil +} +func (this *Offer) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Offer) + if !ok { + that2, ok := that.(Offer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ID.Equal(&that1.ID) { + return false + } + if !this.FrameworkID.Equal(&that1.FrameworkID) { + return false + } + if !this.AgentID.Equal(&that1.AgentID) { + return false + } + if this.Hostname != that1.Hostname { + return false + } + if !this.URL.Equal(that1.URL) { + return false + } + if !this.Domain.Equal(that1.Domain) { + return false + } + if len(this.Resources) != len(that1.Resources) { + return false + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return false + } + } + if len(this.Attributes) != len(that1.Attributes) { + return false + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return false + } + } + if len(this.ExecutorIDs) != len(that1.ExecutorIDs) { + return false + } + for i := range this.ExecutorIDs { + if !this.ExecutorIDs[i].Equal(&that1.ExecutorIDs[i]) { + return false + } + } + if !this.Unavailability.Equal(that1.Unavailability) { + return false + } + if !this.AllocationInfo.Equal(that1.AllocationInfo) { + return false + } + return true +} +func (this *Offer_Operation) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Offer_Operation) + if !ok { + that2, ok := that.(Offer_Operation) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Offer_Operation") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Offer_Operation but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Offer_Operation but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.ID.Equal(that1.ID) { + return fmt.Errorf("ID this(%v) Not Equal that(%v)", this.ID, that1.ID) + } + if !this.Launch.Equal(that1.Launch) { + return fmt.Errorf("Launch this(%v) Not Equal that(%v)", this.Launch, that1.Launch) + } + if !this.LaunchGroup.Equal(that1.LaunchGroup) { + return fmt.Errorf("LaunchGroup this(%v) Not Equal that(%v)", this.LaunchGroup, that1.LaunchGroup) + } + if !this.Reserve.Equal(that1.Reserve) { + return fmt.Errorf("Reserve this(%v) Not Equal that(%v)", this.Reserve, that1.Reserve) + } + if !this.Unreserve.Equal(that1.Unreserve) { + return fmt.Errorf("Unreserve this(%v) Not Equal that(%v)", this.Unreserve, that1.Unreserve) + } + if !this.Create.Equal(that1.Create) { + return fmt.Errorf("Create this(%v) Not Equal that(%v)", this.Create, that1.Create) + } + if !this.Destroy.Equal(that1.Destroy) { + return fmt.Errorf("Destroy this(%v) Not Equal that(%v)", this.Destroy, that1.Destroy) + } + if !this.GrowVolume.Equal(that1.GrowVolume) { + return fmt.Errorf("GrowVolume this(%v) Not Equal that(%v)", this.GrowVolume, that1.GrowVolume) + } + if !this.ShrinkVolume.Equal(that1.ShrinkVolume) { + return fmt.Errorf("ShrinkVolume this(%v) Not Equal that(%v)", this.ShrinkVolume, that1.ShrinkVolume) + } + if !this.CreateDisk.Equal(that1.CreateDisk) { + return fmt.Errorf("CreateDisk this(%v) Not Equal that(%v)", this.CreateDisk, that1.CreateDisk) + } + if !this.DestroyDisk.Equal(that1.DestroyDisk) { + return fmt.Errorf("DestroyDisk this(%v) Not Equal that(%v)", this.DestroyDisk, that1.DestroyDisk) + } + return nil +} +func (this *Offer_Operation) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Offer_Operation) + if !ok { + that2, ok := that.(Offer_Operation) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.ID.Equal(that1.ID) { + return false + } + if !this.Launch.Equal(that1.Launch) { + return false + } + if !this.LaunchGroup.Equal(that1.LaunchGroup) { + return false + } + if !this.Reserve.Equal(that1.Reserve) { + return false + } + if !this.Unreserve.Equal(that1.Unreserve) { + return false + } + if !this.Create.Equal(that1.Create) { + return false + } + if !this.Destroy.Equal(that1.Destroy) { + return false + } + if !this.GrowVolume.Equal(that1.GrowVolume) { + return false + } + if !this.ShrinkVolume.Equal(that1.ShrinkVolume) { + return false + } + if !this.CreateDisk.Equal(that1.CreateDisk) { + return false + } + if !this.DestroyDisk.Equal(that1.DestroyDisk) { + return false + } + return true +} +func (this *Offer_Operation_Launch) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Offer_Operation_Launch) + if !ok { + that2, ok := that.(Offer_Operation_Launch) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Offer_Operation_Launch") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Offer_Operation_Launch but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Offer_Operation_Launch but is not nil && this == nil") + } + if len(this.TaskInfos) != len(that1.TaskInfos) { + return fmt.Errorf("TaskInfos this(%v) Not Equal that(%v)", len(this.TaskInfos), len(that1.TaskInfos)) + } + for i := range this.TaskInfos { + if !this.TaskInfos[i].Equal(&that1.TaskInfos[i]) { + return fmt.Errorf("TaskInfos this[%v](%v) Not Equal that[%v](%v)", i, this.TaskInfos[i], i, that1.TaskInfos[i]) + } + } + return nil +} +func (this *Offer_Operation_Launch) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Offer_Operation_Launch) + if !ok { + that2, ok := that.(Offer_Operation_Launch) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.TaskInfos) != len(that1.TaskInfos) { + return false + } + for i := range this.TaskInfos { + if !this.TaskInfos[i].Equal(&that1.TaskInfos[i]) { + return false + } + } + return true +} +func (this *Offer_Operation_LaunchGroup) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Offer_Operation_LaunchGroup) + if !ok { + that2, ok := that.(Offer_Operation_LaunchGroup) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Offer_Operation_LaunchGroup") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Offer_Operation_LaunchGroup but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Offer_Operation_LaunchGroup but is not nil && this == nil") + } + if !this.Executor.Equal(&that1.Executor) { + return fmt.Errorf("Executor this(%v) Not Equal that(%v)", this.Executor, that1.Executor) + } + if !this.TaskGroup.Equal(&that1.TaskGroup) { + return fmt.Errorf("TaskGroup this(%v) Not Equal that(%v)", this.TaskGroup, that1.TaskGroup) + } + return nil +} +func (this *Offer_Operation_LaunchGroup) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Offer_Operation_LaunchGroup) + if !ok { + that2, ok := that.(Offer_Operation_LaunchGroup) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Executor.Equal(&that1.Executor) { + return false + } + if !this.TaskGroup.Equal(&that1.TaskGroup) { + return false + } + return true +} +func (this *Offer_Operation_Reserve) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Offer_Operation_Reserve) + if !ok { + that2, ok := that.(Offer_Operation_Reserve) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Offer_Operation_Reserve") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Offer_Operation_Reserve but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Offer_Operation_Reserve but is not nil && this == nil") + } + if len(this.Resources) != len(that1.Resources) { + return fmt.Errorf("Resources this(%v) Not Equal that(%v)", len(this.Resources), len(that1.Resources)) + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return fmt.Errorf("Resources this[%v](%v) Not Equal that[%v](%v)", i, this.Resources[i], i, that1.Resources[i]) + } + } + return nil +} +func (this *Offer_Operation_Reserve) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Offer_Operation_Reserve) + if !ok { + that2, ok := that.(Offer_Operation_Reserve) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Resources) != len(that1.Resources) { + return false + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return false + } + } + return true +} +func (this *Offer_Operation_Unreserve) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Offer_Operation_Unreserve) + if !ok { + that2, ok := that.(Offer_Operation_Unreserve) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Offer_Operation_Unreserve") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Offer_Operation_Unreserve but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Offer_Operation_Unreserve but is not nil && this == nil") + } + if len(this.Resources) != len(that1.Resources) { + return fmt.Errorf("Resources this(%v) Not Equal that(%v)", len(this.Resources), len(that1.Resources)) + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return fmt.Errorf("Resources this[%v](%v) Not Equal that[%v](%v)", i, this.Resources[i], i, that1.Resources[i]) + } + } + return nil +} +func (this *Offer_Operation_Unreserve) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Offer_Operation_Unreserve) + if !ok { + that2, ok := that.(Offer_Operation_Unreserve) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Resources) != len(that1.Resources) { + return false + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return false + } + } + return true +} +func (this *Offer_Operation_Create) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Offer_Operation_Create) + if !ok { + that2, ok := that.(Offer_Operation_Create) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Offer_Operation_Create") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Offer_Operation_Create but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Offer_Operation_Create but is not nil && this == nil") + } + if len(this.Volumes) != len(that1.Volumes) { + return fmt.Errorf("Volumes this(%v) Not Equal that(%v)", len(this.Volumes), len(that1.Volumes)) + } + for i := range this.Volumes { + if !this.Volumes[i].Equal(&that1.Volumes[i]) { + return fmt.Errorf("Volumes this[%v](%v) Not Equal that[%v](%v)", i, this.Volumes[i], i, that1.Volumes[i]) + } + } + return nil +} +func (this *Offer_Operation_Create) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Offer_Operation_Create) + if !ok { + that2, ok := that.(Offer_Operation_Create) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Volumes) != len(that1.Volumes) { + return false + } + for i := range this.Volumes { + if !this.Volumes[i].Equal(&that1.Volumes[i]) { + return false + } + } + return true +} +func (this *Offer_Operation_Destroy) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Offer_Operation_Destroy) + if !ok { + that2, ok := that.(Offer_Operation_Destroy) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Offer_Operation_Destroy") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Offer_Operation_Destroy but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Offer_Operation_Destroy but is not nil && this == nil") + } + if len(this.Volumes) != len(that1.Volumes) { + return fmt.Errorf("Volumes this(%v) Not Equal that(%v)", len(this.Volumes), len(that1.Volumes)) + } + for i := range this.Volumes { + if !this.Volumes[i].Equal(&that1.Volumes[i]) { + return fmt.Errorf("Volumes this[%v](%v) Not Equal that[%v](%v)", i, this.Volumes[i], i, that1.Volumes[i]) + } + } + return nil +} +func (this *Offer_Operation_Destroy) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Offer_Operation_Destroy) + if !ok { + that2, ok := that.(Offer_Operation_Destroy) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Volumes) != len(that1.Volumes) { + return false + } + for i := range this.Volumes { + if !this.Volumes[i].Equal(&that1.Volumes[i]) { + return false + } + } + return true +} +func (this *Offer_Operation_GrowVolume) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Offer_Operation_GrowVolume) + if !ok { + that2, ok := that.(Offer_Operation_GrowVolume) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Offer_Operation_GrowVolume") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Offer_Operation_GrowVolume but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Offer_Operation_GrowVolume but is not nil && this == nil") + } + if !this.Volume.Equal(&that1.Volume) { + return fmt.Errorf("Volume this(%v) Not Equal that(%v)", this.Volume, that1.Volume) + } + if !this.Addition.Equal(&that1.Addition) { + return fmt.Errorf("Addition this(%v) Not Equal that(%v)", this.Addition, that1.Addition) + } + return nil +} +func (this *Offer_Operation_GrowVolume) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Offer_Operation_GrowVolume) + if !ok { + that2, ok := that.(Offer_Operation_GrowVolume) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Volume.Equal(&that1.Volume) { + return false + } + if !this.Addition.Equal(&that1.Addition) { + return false + } + return true +} +func (this *Offer_Operation_ShrinkVolume) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Offer_Operation_ShrinkVolume) + if !ok { + that2, ok := that.(Offer_Operation_ShrinkVolume) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Offer_Operation_ShrinkVolume") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Offer_Operation_ShrinkVolume but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Offer_Operation_ShrinkVolume but is not nil && this == nil") + } + if !this.Volume.Equal(&that1.Volume) { + return fmt.Errorf("Volume this(%v) Not Equal that(%v)", this.Volume, that1.Volume) + } + if !this.Subtract.Equal(&that1.Subtract) { + return fmt.Errorf("Subtract this(%v) Not Equal that(%v)", this.Subtract, that1.Subtract) + } + return nil +} +func (this *Offer_Operation_ShrinkVolume) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Offer_Operation_ShrinkVolume) + if !ok { + that2, ok := that.(Offer_Operation_ShrinkVolume) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Volume.Equal(&that1.Volume) { + return false + } + if !this.Subtract.Equal(&that1.Subtract) { + return false + } + return true +} +func (this *Offer_Operation_CreateDisk) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Offer_Operation_CreateDisk) + if !ok { + that2, ok := that.(Offer_Operation_CreateDisk) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Offer_Operation_CreateDisk") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Offer_Operation_CreateDisk but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Offer_Operation_CreateDisk but is not nil && this == nil") + } + if !this.Source.Equal(&that1.Source) { + return fmt.Errorf("Source this(%v) Not Equal that(%v)", this.Source, that1.Source) + } + if this.TargetType != that1.TargetType { + return fmt.Errorf("TargetType this(%v) Not Equal that(%v)", this.TargetType, that1.TargetType) + } + return nil +} +func (this *Offer_Operation_CreateDisk) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Offer_Operation_CreateDisk) + if !ok { + that2, ok := that.(Offer_Operation_CreateDisk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Source.Equal(&that1.Source) { + return false + } + if this.TargetType != that1.TargetType { + return false + } + return true +} +func (this *Offer_Operation_DestroyDisk) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Offer_Operation_DestroyDisk) + if !ok { + that2, ok := that.(Offer_Operation_DestroyDisk) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Offer_Operation_DestroyDisk") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Offer_Operation_DestroyDisk but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Offer_Operation_DestroyDisk but is not nil && this == nil") + } + if !this.Source.Equal(&that1.Source) { + return fmt.Errorf("Source this(%v) Not Equal that(%v)", this.Source, that1.Source) + } + return nil +} +func (this *Offer_Operation_DestroyDisk) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Offer_Operation_DestroyDisk) + if !ok { + that2, ok := that.(Offer_Operation_DestroyDisk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Source.Equal(&that1.Source) { + return false + } + return true +} +func (this *InverseOffer) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*InverseOffer) + if !ok { + that2, ok := that.(InverseOffer) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *InverseOffer") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *InverseOffer but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *InverseOffer but is not nil && this == nil") + } + if !this.OfferID.Equal(&that1.OfferID) { + return fmt.Errorf("OfferID this(%v) Not Equal that(%v)", this.OfferID, that1.OfferID) + } + if !this.URL.Equal(that1.URL) { + return fmt.Errorf("URL this(%v) Not Equal that(%v)", this.URL, that1.URL) + } + if !this.FrameworkID.Equal(&that1.FrameworkID) { + return fmt.Errorf("FrameworkID this(%v) Not Equal that(%v)", this.FrameworkID, that1.FrameworkID) + } + if !this.AgentID.Equal(that1.AgentID) { + return fmt.Errorf("AgentID this(%v) Not Equal that(%v)", this.AgentID, that1.AgentID) + } + if !this.Unavailability.Equal(&that1.Unavailability) { + return fmt.Errorf("Unavailability this(%v) Not Equal that(%v)", this.Unavailability, that1.Unavailability) + } + if len(this.Resources) != len(that1.Resources) { + return fmt.Errorf("Resources this(%v) Not Equal that(%v)", len(this.Resources), len(that1.Resources)) + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return fmt.Errorf("Resources this[%v](%v) Not Equal that[%v](%v)", i, this.Resources[i], i, that1.Resources[i]) + } + } + return nil +} +func (this *InverseOffer) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*InverseOffer) + if !ok { + that2, ok := that.(InverseOffer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.OfferID.Equal(&that1.OfferID) { + return false + } + if !this.URL.Equal(that1.URL) { + return false + } + if !this.FrameworkID.Equal(&that1.FrameworkID) { + return false + } + if !this.AgentID.Equal(that1.AgentID) { + return false + } + if !this.Unavailability.Equal(&that1.Unavailability) { + return false + } + if len(this.Resources) != len(that1.Resources) { + return false + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return false + } + } + return true +} +func (this *TaskInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*TaskInfo) + if !ok { + that2, ok := that.(TaskInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *TaskInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *TaskInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *TaskInfo but is not nil && this == nil") + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if !this.TaskID.Equal(&that1.TaskID) { + return fmt.Errorf("TaskID this(%v) Not Equal that(%v)", this.TaskID, that1.TaskID) + } + if !this.AgentID.Equal(&that1.AgentID) { + return fmt.Errorf("AgentID this(%v) Not Equal that(%v)", this.AgentID, that1.AgentID) + } + if len(this.Resources) != len(that1.Resources) { + return fmt.Errorf("Resources this(%v) Not Equal that(%v)", len(this.Resources), len(that1.Resources)) + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return fmt.Errorf("Resources this[%v](%v) Not Equal that[%v](%v)", i, this.Resources[i], i, that1.Resources[i]) + } + } + if !this.Executor.Equal(that1.Executor) { + return fmt.Errorf("Executor this(%v) Not Equal that(%v)", this.Executor, that1.Executor) + } + if !this.Command.Equal(that1.Command) { + return fmt.Errorf("Command this(%v) Not Equal that(%v)", this.Command, that1.Command) + } + if !this.Container.Equal(that1.Container) { + return fmt.Errorf("Container this(%v) Not Equal that(%v)", this.Container, that1.Container) + } + if !this.HealthCheck.Equal(that1.HealthCheck) { + return fmt.Errorf("HealthCheck this(%v) Not Equal that(%v)", this.HealthCheck, that1.HealthCheck) + } + if !this.Check.Equal(that1.Check) { + return fmt.Errorf("Check this(%v) Not Equal that(%v)", this.Check, that1.Check) + } + if !this.KillPolicy.Equal(that1.KillPolicy) { + return fmt.Errorf("KillPolicy this(%v) Not Equal that(%v)", this.KillPolicy, that1.KillPolicy) + } + if !bytes.Equal(this.Data, that1.Data) { + return fmt.Errorf("Data this(%v) Not Equal that(%v)", this.Data, that1.Data) + } + if !this.Labels.Equal(that1.Labels) { + return fmt.Errorf("Labels this(%v) Not Equal that(%v)", this.Labels, that1.Labels) + } + if !this.Discovery.Equal(that1.Discovery) { + return fmt.Errorf("Discovery this(%v) Not Equal that(%v)", this.Discovery, that1.Discovery) + } + if !this.MaxCompletionTime.Equal(that1.MaxCompletionTime) { + return fmt.Errorf("MaxCompletionTime this(%v) Not Equal that(%v)", this.MaxCompletionTime, that1.MaxCompletionTime) + } + return nil +} +func (this *TaskInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TaskInfo) + if !ok { + that2, ok := that.(TaskInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if !this.TaskID.Equal(&that1.TaskID) { + return false + } + if !this.AgentID.Equal(&that1.AgentID) { + return false + } + if len(this.Resources) != len(that1.Resources) { + return false + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return false + } + } + if !this.Executor.Equal(that1.Executor) { + return false + } + if !this.Command.Equal(that1.Command) { + return false + } + if !this.Container.Equal(that1.Container) { + return false + } + if !this.HealthCheck.Equal(that1.HealthCheck) { + return false + } + if !this.Check.Equal(that1.Check) { + return false + } + if !this.KillPolicy.Equal(that1.KillPolicy) { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + if !this.Labels.Equal(that1.Labels) { + return false + } + if !this.Discovery.Equal(that1.Discovery) { + return false + } + if !this.MaxCompletionTime.Equal(that1.MaxCompletionTime) { + return false + } + return true +} +func (this *TaskGroupInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*TaskGroupInfo) + if !ok { + that2, ok := that.(TaskGroupInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *TaskGroupInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *TaskGroupInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *TaskGroupInfo but is not nil && this == nil") + } + if len(this.Tasks) != len(that1.Tasks) { + return fmt.Errorf("Tasks this(%v) Not Equal that(%v)", len(this.Tasks), len(that1.Tasks)) + } + for i := range this.Tasks { + if !this.Tasks[i].Equal(&that1.Tasks[i]) { + return fmt.Errorf("Tasks this[%v](%v) Not Equal that[%v](%v)", i, this.Tasks[i], i, that1.Tasks[i]) + } + } + return nil +} +func (this *TaskGroupInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TaskGroupInfo) + if !ok { + that2, ok := that.(TaskGroupInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Tasks) != len(that1.Tasks) { + return false + } + for i := range this.Tasks { + if !this.Tasks[i].Equal(&that1.Tasks[i]) { + return false + } + } + return true +} +func (this *Task) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Task) + if !ok { + that2, ok := that.(Task) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Task") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Task but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Task but is not nil && this == nil") + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if !this.TaskID.Equal(&that1.TaskID) { + return fmt.Errorf("TaskID this(%v) Not Equal that(%v)", this.TaskID, that1.TaskID) + } + if !this.FrameworkID.Equal(&that1.FrameworkID) { + return fmt.Errorf("FrameworkID this(%v) Not Equal that(%v)", this.FrameworkID, that1.FrameworkID) + } + if !this.ExecutorID.Equal(that1.ExecutorID) { + return fmt.Errorf("ExecutorID this(%v) Not Equal that(%v)", this.ExecutorID, that1.ExecutorID) + } + if !this.AgentID.Equal(&that1.AgentID) { + return fmt.Errorf("AgentID this(%v) Not Equal that(%v)", this.AgentID, that1.AgentID) + } + if this.State != nil && that1.State != nil { + if *this.State != *that1.State { + return fmt.Errorf("State this(%v) Not Equal that(%v)", *this.State, *that1.State) + } + } else if this.State != nil { + return fmt.Errorf("this.State == nil && that.State != nil") + } else if that1.State != nil { + return fmt.Errorf("State this(%v) Not Equal that(%v)", this.State, that1.State) + } + if len(this.Resources) != len(that1.Resources) { + return fmt.Errorf("Resources this(%v) Not Equal that(%v)", len(this.Resources), len(that1.Resources)) + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return fmt.Errorf("Resources this[%v](%v) Not Equal that[%v](%v)", i, this.Resources[i], i, that1.Resources[i]) + } + } + if len(this.Statuses) != len(that1.Statuses) { + return fmt.Errorf("Statuses this(%v) Not Equal that(%v)", len(this.Statuses), len(that1.Statuses)) + } + for i := range this.Statuses { + if !this.Statuses[i].Equal(&that1.Statuses[i]) { + return fmt.Errorf("Statuses this[%v](%v) Not Equal that[%v](%v)", i, this.Statuses[i], i, that1.Statuses[i]) + } + } + if this.StatusUpdateState != nil && that1.StatusUpdateState != nil { + if *this.StatusUpdateState != *that1.StatusUpdateState { + return fmt.Errorf("StatusUpdateState this(%v) Not Equal that(%v)", *this.StatusUpdateState, *that1.StatusUpdateState) + } + } else if this.StatusUpdateState != nil { + return fmt.Errorf("this.StatusUpdateState == nil && that.StatusUpdateState != nil") + } else if that1.StatusUpdateState != nil { + return fmt.Errorf("StatusUpdateState this(%v) Not Equal that(%v)", this.StatusUpdateState, that1.StatusUpdateState) + } + if !bytes.Equal(this.StatusUpdateUUID, that1.StatusUpdateUUID) { + return fmt.Errorf("StatusUpdateUUID this(%v) Not Equal that(%v)", this.StatusUpdateUUID, that1.StatusUpdateUUID) + } + if !this.Labels.Equal(that1.Labels) { + return fmt.Errorf("Labels this(%v) Not Equal that(%v)", this.Labels, that1.Labels) + } + if !this.Discovery.Equal(that1.Discovery) { + return fmt.Errorf("Discovery this(%v) Not Equal that(%v)", this.Discovery, that1.Discovery) + } + if !this.Container.Equal(that1.Container) { + return fmt.Errorf("Container this(%v) Not Equal that(%v)", this.Container, that1.Container) + } + if this.User != nil && that1.User != nil { + if *this.User != *that1.User { + return fmt.Errorf("User this(%v) Not Equal that(%v)", *this.User, *that1.User) + } + } else if this.User != nil { + return fmt.Errorf("this.User == nil && that.User != nil") + } else if that1.User != nil { + return fmt.Errorf("User this(%v) Not Equal that(%v)", this.User, that1.User) + } + return nil +} +func (this *Task) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Task) + if !ok { + that2, ok := that.(Task) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if !this.TaskID.Equal(&that1.TaskID) { + return false + } + if !this.FrameworkID.Equal(&that1.FrameworkID) { + return false + } + if !this.ExecutorID.Equal(that1.ExecutorID) { + return false + } + if !this.AgentID.Equal(&that1.AgentID) { + return false + } + if this.State != nil && that1.State != nil { + if *this.State != *that1.State { + return false + } + } else if this.State != nil { + return false + } else if that1.State != nil { + return false + } + if len(this.Resources) != len(that1.Resources) { + return false + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return false + } + } + if len(this.Statuses) != len(that1.Statuses) { + return false + } + for i := range this.Statuses { + if !this.Statuses[i].Equal(&that1.Statuses[i]) { + return false + } + } + if this.StatusUpdateState != nil && that1.StatusUpdateState != nil { + if *this.StatusUpdateState != *that1.StatusUpdateState { + return false + } + } else if this.StatusUpdateState != nil { + return false + } else if that1.StatusUpdateState != nil { + return false + } + if !bytes.Equal(this.StatusUpdateUUID, that1.StatusUpdateUUID) { + return false + } + if !this.Labels.Equal(that1.Labels) { + return false + } + if !this.Discovery.Equal(that1.Discovery) { + return false + } + if !this.Container.Equal(that1.Container) { + return false + } + if this.User != nil && that1.User != nil { + if *this.User != *that1.User { + return false + } + } else if this.User != nil { + return false + } else if that1.User != nil { + return false + } + return true +} +func (this *TaskResourceLimitation) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*TaskResourceLimitation) + if !ok { + that2, ok := that.(TaskResourceLimitation) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *TaskResourceLimitation") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *TaskResourceLimitation but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *TaskResourceLimitation but is not nil && this == nil") + } + if len(this.Resources) != len(that1.Resources) { + return fmt.Errorf("Resources this(%v) Not Equal that(%v)", len(this.Resources), len(that1.Resources)) + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return fmt.Errorf("Resources this[%v](%v) Not Equal that[%v](%v)", i, this.Resources[i], i, that1.Resources[i]) + } + } + return nil +} +func (this *TaskResourceLimitation) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TaskResourceLimitation) + if !ok { + that2, ok := that.(TaskResourceLimitation) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Resources) != len(that1.Resources) { + return false + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return false + } + } + return true +} +func (this *UUID) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*UUID) + if !ok { + that2, ok := that.(UUID) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *UUID") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *UUID but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *UUID but is not nil && this == nil") + } + if !bytes.Equal(this.Value, that1.Value) { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *UUID) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*UUID) + if !ok { + that2, ok := that.(UUID) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + return true +} +func (this *Operation) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Operation) + if !ok { + that2, ok := that.(Operation) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Operation") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Operation but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Operation but is not nil && this == nil") + } + if !this.FrameworkID.Equal(that1.FrameworkID) { + return fmt.Errorf("FrameworkID this(%v) Not Equal that(%v)", this.FrameworkID, that1.FrameworkID) + } + if !this.AgentID.Equal(that1.AgentID) { + return fmt.Errorf("AgentID this(%v) Not Equal that(%v)", this.AgentID, that1.AgentID) + } + if !this.Info.Equal(&that1.Info) { + return fmt.Errorf("Info this(%v) Not Equal that(%v)", this.Info, that1.Info) + } + if !this.LatestStatus.Equal(&that1.LatestStatus) { + return fmt.Errorf("LatestStatus this(%v) Not Equal that(%v)", this.LatestStatus, that1.LatestStatus) + } + if len(this.Statuses) != len(that1.Statuses) { + return fmt.Errorf("Statuses this(%v) Not Equal that(%v)", len(this.Statuses), len(that1.Statuses)) + } + for i := range this.Statuses { + if !this.Statuses[i].Equal(&that1.Statuses[i]) { + return fmt.Errorf("Statuses this[%v](%v) Not Equal that[%v](%v)", i, this.Statuses[i], i, that1.Statuses[i]) + } + } + if !this.UUID.Equal(&that1.UUID) { + return fmt.Errorf("UUID this(%v) Not Equal that(%v)", this.UUID, that1.UUID) + } + return nil +} +func (this *Operation) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Operation) + if !ok { + that2, ok := that.(Operation) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.FrameworkID.Equal(that1.FrameworkID) { + return false + } + if !this.AgentID.Equal(that1.AgentID) { + return false + } + if !this.Info.Equal(&that1.Info) { + return false + } + if !this.LatestStatus.Equal(&that1.LatestStatus) { + return false + } + if len(this.Statuses) != len(that1.Statuses) { + return false + } + for i := range this.Statuses { + if !this.Statuses[i].Equal(&that1.Statuses[i]) { + return false + } + } + if !this.UUID.Equal(&that1.UUID) { + return false + } + return true +} +func (this *OperationStatus) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*OperationStatus) + if !ok { + that2, ok := that.(OperationStatus) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *OperationStatus") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *OperationStatus but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *OperationStatus but is not nil && this == nil") + } + if !this.OperationID.Equal(that1.OperationID) { + return fmt.Errorf("OperationID this(%v) Not Equal that(%v)", this.OperationID, that1.OperationID) + } + if this.State != that1.State { + return fmt.Errorf("State this(%v) Not Equal that(%v)", this.State, that1.State) + } + if this.Message != nil && that1.Message != nil { + if *this.Message != *that1.Message { + return fmt.Errorf("Message this(%v) Not Equal that(%v)", *this.Message, *that1.Message) + } + } else if this.Message != nil { + return fmt.Errorf("this.Message == nil && that.Message != nil") + } else if that1.Message != nil { + return fmt.Errorf("Message this(%v) Not Equal that(%v)", this.Message, that1.Message) + } + if len(this.ConvertedResources) != len(that1.ConvertedResources) { + return fmt.Errorf("ConvertedResources this(%v) Not Equal that(%v)", len(this.ConvertedResources), len(that1.ConvertedResources)) + } + for i := range this.ConvertedResources { + if !this.ConvertedResources[i].Equal(&that1.ConvertedResources[i]) { + return fmt.Errorf("ConvertedResources this[%v](%v) Not Equal that[%v](%v)", i, this.ConvertedResources[i], i, that1.ConvertedResources[i]) + } + } + if !this.UUID.Equal(that1.UUID) { + return fmt.Errorf("UUID this(%v) Not Equal that(%v)", this.UUID, that1.UUID) + } + return nil +} +func (this *OperationStatus) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*OperationStatus) + if !ok { + that2, ok := that.(OperationStatus) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.OperationID.Equal(that1.OperationID) { + return false + } + if this.State != that1.State { + return false + } + if this.Message != nil && that1.Message != nil { + if *this.Message != *that1.Message { + return false + } + } else if this.Message != nil { + return false + } else if that1.Message != nil { + return false + } + if len(this.ConvertedResources) != len(that1.ConvertedResources) { + return false + } + for i := range this.ConvertedResources { + if !this.ConvertedResources[i].Equal(&that1.ConvertedResources[i]) { + return false + } + } + if !this.UUID.Equal(that1.UUID) { + return false + } + return true +} +func (this *CheckStatusInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CheckStatusInfo) + if !ok { + that2, ok := that.(CheckStatusInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CheckStatusInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CheckStatusInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CheckStatusInfo but is not nil && this == nil") + } + if this.Type != nil && that1.Type != nil { + if *this.Type != *that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", *this.Type, *that1.Type) + } + } else if this.Type != nil { + return fmt.Errorf("this.Type == nil && that.Type != nil") + } else if that1.Type != nil { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.Command.Equal(that1.Command) { + return fmt.Errorf("Command this(%v) Not Equal that(%v)", this.Command, that1.Command) + } + if !this.HTTP.Equal(that1.HTTP) { + return fmt.Errorf("HTTP this(%v) Not Equal that(%v)", this.HTTP, that1.HTTP) + } + if !this.TCP.Equal(that1.TCP) { + return fmt.Errorf("TCP this(%v) Not Equal that(%v)", this.TCP, that1.TCP) + } + return nil +} +func (this *CheckStatusInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CheckStatusInfo) + if !ok { + that2, ok := that.(CheckStatusInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != nil && that1.Type != nil { + if *this.Type != *that1.Type { + return false + } + } else if this.Type != nil { + return false + } else if that1.Type != nil { + return false + } + if !this.Command.Equal(that1.Command) { + return false + } + if !this.HTTP.Equal(that1.HTTP) { + return false + } + if !this.TCP.Equal(that1.TCP) { + return false + } + return true +} +func (this *CheckStatusInfo_Command) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CheckStatusInfo_Command) + if !ok { + that2, ok := that.(CheckStatusInfo_Command) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CheckStatusInfo_Command") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CheckStatusInfo_Command but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CheckStatusInfo_Command but is not nil && this == nil") + } + if this.ExitCode != nil && that1.ExitCode != nil { + if *this.ExitCode != *that1.ExitCode { + return fmt.Errorf("ExitCode this(%v) Not Equal that(%v)", *this.ExitCode, *that1.ExitCode) + } + } else if this.ExitCode != nil { + return fmt.Errorf("this.ExitCode == nil && that.ExitCode != nil") + } else if that1.ExitCode != nil { + return fmt.Errorf("ExitCode this(%v) Not Equal that(%v)", this.ExitCode, that1.ExitCode) + } + return nil +} +func (this *CheckStatusInfo_Command) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CheckStatusInfo_Command) + if !ok { + that2, ok := that.(CheckStatusInfo_Command) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.ExitCode != nil && that1.ExitCode != nil { + if *this.ExitCode != *that1.ExitCode { + return false + } + } else if this.ExitCode != nil { + return false + } else if that1.ExitCode != nil { + return false + } + return true +} +func (this *CheckStatusInfo_Http) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CheckStatusInfo_Http) + if !ok { + that2, ok := that.(CheckStatusInfo_Http) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CheckStatusInfo_Http") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CheckStatusInfo_Http but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CheckStatusInfo_Http but is not nil && this == nil") + } + if this.StatusCode != nil && that1.StatusCode != nil { + if *this.StatusCode != *that1.StatusCode { + return fmt.Errorf("StatusCode this(%v) Not Equal that(%v)", *this.StatusCode, *that1.StatusCode) + } + } else if this.StatusCode != nil { + return fmt.Errorf("this.StatusCode == nil && that.StatusCode != nil") + } else if that1.StatusCode != nil { + return fmt.Errorf("StatusCode this(%v) Not Equal that(%v)", this.StatusCode, that1.StatusCode) + } + return nil +} +func (this *CheckStatusInfo_Http) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CheckStatusInfo_Http) + if !ok { + that2, ok := that.(CheckStatusInfo_Http) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.StatusCode != nil && that1.StatusCode != nil { + if *this.StatusCode != *that1.StatusCode { + return false + } + } else if this.StatusCode != nil { + return false + } else if that1.StatusCode != nil { + return false + } + return true +} +func (this *CheckStatusInfo_Tcp) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CheckStatusInfo_Tcp) + if !ok { + that2, ok := that.(CheckStatusInfo_Tcp) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CheckStatusInfo_Tcp") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CheckStatusInfo_Tcp but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CheckStatusInfo_Tcp but is not nil && this == nil") + } + if this.Succeeded != nil && that1.Succeeded != nil { + if *this.Succeeded != *that1.Succeeded { + return fmt.Errorf("Succeeded this(%v) Not Equal that(%v)", *this.Succeeded, *that1.Succeeded) + } + } else if this.Succeeded != nil { + return fmt.Errorf("this.Succeeded == nil && that.Succeeded != nil") + } else if that1.Succeeded != nil { + return fmt.Errorf("Succeeded this(%v) Not Equal that(%v)", this.Succeeded, that1.Succeeded) + } + return nil +} +func (this *CheckStatusInfo_Tcp) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CheckStatusInfo_Tcp) + if !ok { + that2, ok := that.(CheckStatusInfo_Tcp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Succeeded != nil && that1.Succeeded != nil { + if *this.Succeeded != *that1.Succeeded { + return false + } + } else if this.Succeeded != nil { + return false + } else if that1.Succeeded != nil { + return false + } + return true +} +func (this *TaskStatus) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*TaskStatus) + if !ok { + that2, ok := that.(TaskStatus) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *TaskStatus") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *TaskStatus but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *TaskStatus but is not nil && this == nil") + } + if !this.TaskID.Equal(&that1.TaskID) { + return fmt.Errorf("TaskID this(%v) Not Equal that(%v)", this.TaskID, that1.TaskID) + } + if this.State != nil && that1.State != nil { + if *this.State != *that1.State { + return fmt.Errorf("State this(%v) Not Equal that(%v)", *this.State, *that1.State) + } + } else if this.State != nil { + return fmt.Errorf("this.State == nil && that.State != nil") + } else if that1.State != nil { + return fmt.Errorf("State this(%v) Not Equal that(%v)", this.State, that1.State) + } + if this.Message != nil && that1.Message != nil { + if *this.Message != *that1.Message { + return fmt.Errorf("Message this(%v) Not Equal that(%v)", *this.Message, *that1.Message) + } + } else if this.Message != nil { + return fmt.Errorf("this.Message == nil && that.Message != nil") + } else if that1.Message != nil { + return fmt.Errorf("Message this(%v) Not Equal that(%v)", this.Message, that1.Message) + } + if this.Source != nil && that1.Source != nil { + if *this.Source != *that1.Source { + return fmt.Errorf("Source this(%v) Not Equal that(%v)", *this.Source, *that1.Source) + } + } else if this.Source != nil { + return fmt.Errorf("this.Source == nil && that.Source != nil") + } else if that1.Source != nil { + return fmt.Errorf("Source this(%v) Not Equal that(%v)", this.Source, that1.Source) + } + if this.Reason != nil && that1.Reason != nil { + if *this.Reason != *that1.Reason { + return fmt.Errorf("Reason this(%v) Not Equal that(%v)", *this.Reason, *that1.Reason) + } + } else if this.Reason != nil { + return fmt.Errorf("this.Reason == nil && that.Reason != nil") + } else if that1.Reason != nil { + return fmt.Errorf("Reason this(%v) Not Equal that(%v)", this.Reason, that1.Reason) + } + if !bytes.Equal(this.Data, that1.Data) { + return fmt.Errorf("Data this(%v) Not Equal that(%v)", this.Data, that1.Data) + } + if !this.AgentID.Equal(that1.AgentID) { + return fmt.Errorf("AgentID this(%v) Not Equal that(%v)", this.AgentID, that1.AgentID) + } + if !this.ExecutorID.Equal(that1.ExecutorID) { + return fmt.Errorf("ExecutorID this(%v) Not Equal that(%v)", this.ExecutorID, that1.ExecutorID) + } + if this.Timestamp != nil && that1.Timestamp != nil { + if *this.Timestamp != *that1.Timestamp { + return fmt.Errorf("Timestamp this(%v) Not Equal that(%v)", *this.Timestamp, *that1.Timestamp) + } + } else if this.Timestamp != nil { + return fmt.Errorf("this.Timestamp == nil && that.Timestamp != nil") + } else if that1.Timestamp != nil { + return fmt.Errorf("Timestamp this(%v) Not Equal that(%v)", this.Timestamp, that1.Timestamp) + } + if !bytes.Equal(this.UUID, that1.UUID) { + return fmt.Errorf("UUID this(%v) Not Equal that(%v)", this.UUID, that1.UUID) + } + if this.Healthy != nil && that1.Healthy != nil { + if *this.Healthy != *that1.Healthy { + return fmt.Errorf("Healthy this(%v) Not Equal that(%v)", *this.Healthy, *that1.Healthy) + } + } else if this.Healthy != nil { + return fmt.Errorf("this.Healthy == nil && that.Healthy != nil") + } else if that1.Healthy != nil { + return fmt.Errorf("Healthy this(%v) Not Equal that(%v)", this.Healthy, that1.Healthy) + } + if !this.CheckStatus.Equal(that1.CheckStatus) { + return fmt.Errorf("CheckStatus this(%v) Not Equal that(%v)", this.CheckStatus, that1.CheckStatus) + } + if !this.Labels.Equal(that1.Labels) { + return fmt.Errorf("Labels this(%v) Not Equal that(%v)", this.Labels, that1.Labels) + } + if !this.ContainerStatus.Equal(that1.ContainerStatus) { + return fmt.Errorf("ContainerStatus this(%v) Not Equal that(%v)", this.ContainerStatus, that1.ContainerStatus) + } + if !this.UnreachableTime.Equal(that1.UnreachableTime) { + return fmt.Errorf("UnreachableTime this(%v) Not Equal that(%v)", this.UnreachableTime, that1.UnreachableTime) + } + if !this.Limitation.Equal(that1.Limitation) { + return fmt.Errorf("Limitation this(%v) Not Equal that(%v)", this.Limitation, that1.Limitation) + } + return nil +} +func (this *TaskStatus) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TaskStatus) + if !ok { + that2, ok := that.(TaskStatus) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.TaskID.Equal(&that1.TaskID) { + return false + } + if this.State != nil && that1.State != nil { + if *this.State != *that1.State { + return false + } + } else if this.State != nil { + return false + } else if that1.State != nil { + return false + } + if this.Message != nil && that1.Message != nil { + if *this.Message != *that1.Message { + return false + } + } else if this.Message != nil { + return false + } else if that1.Message != nil { + return false + } + if this.Source != nil && that1.Source != nil { + if *this.Source != *that1.Source { + return false + } + } else if this.Source != nil { + return false + } else if that1.Source != nil { + return false + } + if this.Reason != nil && that1.Reason != nil { + if *this.Reason != *that1.Reason { + return false + } + } else if this.Reason != nil { + return false + } else if that1.Reason != nil { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + if !this.AgentID.Equal(that1.AgentID) { + return false + } + if !this.ExecutorID.Equal(that1.ExecutorID) { + return false + } + if this.Timestamp != nil && that1.Timestamp != nil { + if *this.Timestamp != *that1.Timestamp { + return false + } + } else if this.Timestamp != nil { + return false + } else if that1.Timestamp != nil { + return false + } + if !bytes.Equal(this.UUID, that1.UUID) { + return false + } + if this.Healthy != nil && that1.Healthy != nil { + if *this.Healthy != *that1.Healthy { + return false + } + } else if this.Healthy != nil { + return false + } else if that1.Healthy != nil { + return false + } + if !this.CheckStatus.Equal(that1.CheckStatus) { + return false + } + if !this.Labels.Equal(that1.Labels) { + return false + } + if !this.ContainerStatus.Equal(that1.ContainerStatus) { + return false + } + if !this.UnreachableTime.Equal(that1.UnreachableTime) { + return false + } + if !this.Limitation.Equal(that1.Limitation) { + return false + } + return true +} +func (this *Filters) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Filters) + if !ok { + that2, ok := that.(Filters) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Filters") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Filters but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Filters but is not nil && this == nil") + } + if this.RefuseSeconds != nil && that1.RefuseSeconds != nil { + if *this.RefuseSeconds != *that1.RefuseSeconds { + return fmt.Errorf("RefuseSeconds this(%v) Not Equal that(%v)", *this.RefuseSeconds, *that1.RefuseSeconds) + } + } else if this.RefuseSeconds != nil { + return fmt.Errorf("this.RefuseSeconds == nil && that.RefuseSeconds != nil") + } else if that1.RefuseSeconds != nil { + return fmt.Errorf("RefuseSeconds this(%v) Not Equal that(%v)", this.RefuseSeconds, that1.RefuseSeconds) + } + return nil +} +func (this *Filters) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Filters) + if !ok { + that2, ok := that.(Filters) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.RefuseSeconds != nil && that1.RefuseSeconds != nil { + if *this.RefuseSeconds != *that1.RefuseSeconds { + return false + } + } else if this.RefuseSeconds != nil { + return false + } else if that1.RefuseSeconds != nil { + return false + } + return true +} +func (this *Environment) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Environment) + if !ok { + that2, ok := that.(Environment) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Environment") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Environment but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Environment but is not nil && this == nil") + } + if len(this.Variables) != len(that1.Variables) { + return fmt.Errorf("Variables this(%v) Not Equal that(%v)", len(this.Variables), len(that1.Variables)) + } + for i := range this.Variables { + if !this.Variables[i].Equal(&that1.Variables[i]) { + return fmt.Errorf("Variables this[%v](%v) Not Equal that[%v](%v)", i, this.Variables[i], i, that1.Variables[i]) + } + } + return nil +} +func (this *Environment) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Environment) + if !ok { + that2, ok := that.(Environment) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Variables) != len(that1.Variables) { + return false + } + for i := range this.Variables { + if !this.Variables[i].Equal(&that1.Variables[i]) { + return false + } + } + return true +} +func (this *Environment_Variable) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Environment_Variable) + if !ok { + that2, ok := that.(Environment_Variable) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Environment_Variable") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Environment_Variable but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Environment_Variable but is not nil && this == nil") + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if this.Type != nil && that1.Type != nil { + if *this.Type != *that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", *this.Type, *that1.Type) + } + } else if this.Type != nil { + return fmt.Errorf("this.Type == nil && that.Type != nil") + } else if that1.Type != nil { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if this.Value != nil && that1.Value != nil { + if *this.Value != *that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", *this.Value, *that1.Value) + } + } else if this.Value != nil { + return fmt.Errorf("this.Value == nil && that.Value != nil") + } else if that1.Value != nil { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + if !this.Secret.Equal(that1.Secret) { + return fmt.Errorf("Secret this(%v) Not Equal that(%v)", this.Secret, that1.Secret) + } + return nil +} +func (this *Environment_Variable) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Environment_Variable) + if !ok { + that2, ok := that.(Environment_Variable) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Type != nil && that1.Type != nil { + if *this.Type != *that1.Type { + return false + } + } else if this.Type != nil { + return false + } else if that1.Type != nil { + return false + } + if this.Value != nil && that1.Value != nil { + if *this.Value != *that1.Value { + return false + } + } else if this.Value != nil { + return false + } else if that1.Value != nil { + return false + } + if !this.Secret.Equal(that1.Secret) { + return false + } + return true +} +func (this *Parameter) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Parameter) + if !ok { + that2, ok := that.(Parameter) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Parameter") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Parameter but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Parameter but is not nil && this == nil") + } + if this.Key != that1.Key { + return fmt.Errorf("Key this(%v) Not Equal that(%v)", this.Key, that1.Key) + } + if this.Value != that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *Parameter) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Parameter) + if !ok { + that2, ok := that.(Parameter) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Key != that1.Key { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *Parameters) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Parameters) + if !ok { + that2, ok := that.(Parameters) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Parameters") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Parameters but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Parameters but is not nil && this == nil") + } + if len(this.Parameter) != len(that1.Parameter) { + return fmt.Errorf("Parameter this(%v) Not Equal that(%v)", len(this.Parameter), len(that1.Parameter)) + } + for i := range this.Parameter { + if !this.Parameter[i].Equal(&that1.Parameter[i]) { + return fmt.Errorf("Parameter this[%v](%v) Not Equal that[%v](%v)", i, this.Parameter[i], i, that1.Parameter[i]) + } + } + return nil +} +func (this *Parameters) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Parameters) + if !ok { + that2, ok := that.(Parameters) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Parameter) != len(that1.Parameter) { + return false + } + for i := range this.Parameter { + if !this.Parameter[i].Equal(&that1.Parameter[i]) { + return false + } + } + return true +} +func (this *Credential) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Credential) + if !ok { + that2, ok := that.(Credential) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Credential") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Credential but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Credential but is not nil && this == nil") + } + if this.Principal != that1.Principal { + return fmt.Errorf("Principal this(%v) Not Equal that(%v)", this.Principal, that1.Principal) + } + if this.Secret != nil && that1.Secret != nil { + if *this.Secret != *that1.Secret { + return fmt.Errorf("Secret this(%v) Not Equal that(%v)", *this.Secret, *that1.Secret) + } + } else if this.Secret != nil { + return fmt.Errorf("this.Secret == nil && that.Secret != nil") + } else if that1.Secret != nil { + return fmt.Errorf("Secret this(%v) Not Equal that(%v)", this.Secret, that1.Secret) + } + return nil +} +func (this *Credential) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Credential) + if !ok { + that2, ok := that.(Credential) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Principal != that1.Principal { + return false + } + if this.Secret != nil && that1.Secret != nil { + if *this.Secret != *that1.Secret { + return false + } + } else if this.Secret != nil { + return false + } else if that1.Secret != nil { + return false + } + return true +} +func (this *Credentials) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Credentials) + if !ok { + that2, ok := that.(Credentials) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Credentials") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Credentials but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Credentials but is not nil && this == nil") + } + if len(this.Credentials) != len(that1.Credentials) { + return fmt.Errorf("Credentials this(%v) Not Equal that(%v)", len(this.Credentials), len(that1.Credentials)) + } + for i := range this.Credentials { + if !this.Credentials[i].Equal(&that1.Credentials[i]) { + return fmt.Errorf("Credentials this[%v](%v) Not Equal that[%v](%v)", i, this.Credentials[i], i, that1.Credentials[i]) + } + } + return nil +} +func (this *Credentials) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Credentials) + if !ok { + that2, ok := that.(Credentials) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Credentials) != len(that1.Credentials) { + return false + } + for i := range this.Credentials { + if !this.Credentials[i].Equal(&that1.Credentials[i]) { + return false + } + } + return true +} +func (this *Secret) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Secret) + if !ok { + that2, ok := that.(Secret) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Secret") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Secret but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Secret but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.Reference.Equal(that1.Reference) { + return fmt.Errorf("Reference this(%v) Not Equal that(%v)", this.Reference, that1.Reference) + } + if !this.Value.Equal(that1.Value) { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *Secret) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Secret) + if !ok { + that2, ok := that.(Secret) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.Reference.Equal(that1.Reference) { + return false + } + if !this.Value.Equal(that1.Value) { + return false + } + return true +} +func (this *Secret_Reference) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Secret_Reference) + if !ok { + that2, ok := that.(Secret_Reference) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Secret_Reference") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Secret_Reference but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Secret_Reference but is not nil && this == nil") + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if this.Key != nil && that1.Key != nil { + if *this.Key != *that1.Key { + return fmt.Errorf("Key this(%v) Not Equal that(%v)", *this.Key, *that1.Key) + } + } else if this.Key != nil { + return fmt.Errorf("this.Key == nil && that.Key != nil") + } else if that1.Key != nil { + return fmt.Errorf("Key this(%v) Not Equal that(%v)", this.Key, that1.Key) + } + return nil +} +func (this *Secret_Reference) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Secret_Reference) + if !ok { + that2, ok := that.(Secret_Reference) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Key != nil && that1.Key != nil { + if *this.Key != *that1.Key { + return false + } + } else if this.Key != nil { + return false + } else if that1.Key != nil { + return false + } + return true +} +func (this *Secret_Value) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Secret_Value) + if !ok { + that2, ok := that.(Secret_Value) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Secret_Value") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Secret_Value but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Secret_Value but is not nil && this == nil") + } + if !bytes.Equal(this.Data, that1.Data) { + return fmt.Errorf("Data this(%v) Not Equal that(%v)", this.Data, that1.Data) + } + return nil +} +func (this *Secret_Value) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Secret_Value) + if !ok { + that2, ok := that.(Secret_Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *RateLimit) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*RateLimit) + if !ok { + that2, ok := that.(RateLimit) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *RateLimit") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *RateLimit but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *RateLimit but is not nil && this == nil") + } + if this.QPS != nil && that1.QPS != nil { + if *this.QPS != *that1.QPS { + return fmt.Errorf("QPS this(%v) Not Equal that(%v)", *this.QPS, *that1.QPS) + } + } else if this.QPS != nil { + return fmt.Errorf("this.QPS == nil && that.QPS != nil") + } else if that1.QPS != nil { + return fmt.Errorf("QPS this(%v) Not Equal that(%v)", this.QPS, that1.QPS) + } + if this.Principal != that1.Principal { + return fmt.Errorf("Principal this(%v) Not Equal that(%v)", this.Principal, that1.Principal) + } + if this.Capacity != nil && that1.Capacity != nil { + if *this.Capacity != *that1.Capacity { + return fmt.Errorf("Capacity this(%v) Not Equal that(%v)", *this.Capacity, *that1.Capacity) + } + } else if this.Capacity != nil { + return fmt.Errorf("this.Capacity == nil && that.Capacity != nil") + } else if that1.Capacity != nil { + return fmt.Errorf("Capacity this(%v) Not Equal that(%v)", this.Capacity, that1.Capacity) + } + return nil +} +func (this *RateLimit) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*RateLimit) + if !ok { + that2, ok := that.(RateLimit) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.QPS != nil && that1.QPS != nil { + if *this.QPS != *that1.QPS { + return false + } + } else if this.QPS != nil { + return false + } else if that1.QPS != nil { + return false + } + if this.Principal != that1.Principal { + return false + } + if this.Capacity != nil && that1.Capacity != nil { + if *this.Capacity != *that1.Capacity { + return false + } + } else if this.Capacity != nil { + return false + } else if that1.Capacity != nil { + return false + } + return true +} +func (this *RateLimits) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*RateLimits) + if !ok { + that2, ok := that.(RateLimits) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *RateLimits") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *RateLimits but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *RateLimits but is not nil && this == nil") + } + if len(this.Limits) != len(that1.Limits) { + return fmt.Errorf("Limits this(%v) Not Equal that(%v)", len(this.Limits), len(that1.Limits)) + } + for i := range this.Limits { + if !this.Limits[i].Equal(&that1.Limits[i]) { + return fmt.Errorf("Limits this[%v](%v) Not Equal that[%v](%v)", i, this.Limits[i], i, that1.Limits[i]) + } + } + if this.AggregateDefaultQPS != nil && that1.AggregateDefaultQPS != nil { + if *this.AggregateDefaultQPS != *that1.AggregateDefaultQPS { + return fmt.Errorf("AggregateDefaultQPS this(%v) Not Equal that(%v)", *this.AggregateDefaultQPS, *that1.AggregateDefaultQPS) + } + } else if this.AggregateDefaultQPS != nil { + return fmt.Errorf("this.AggregateDefaultQPS == nil && that.AggregateDefaultQPS != nil") + } else if that1.AggregateDefaultQPS != nil { + return fmt.Errorf("AggregateDefaultQPS this(%v) Not Equal that(%v)", this.AggregateDefaultQPS, that1.AggregateDefaultQPS) + } + if this.AggregateDefaultCapacity != nil && that1.AggregateDefaultCapacity != nil { + if *this.AggregateDefaultCapacity != *that1.AggregateDefaultCapacity { + return fmt.Errorf("AggregateDefaultCapacity this(%v) Not Equal that(%v)", *this.AggregateDefaultCapacity, *that1.AggregateDefaultCapacity) + } + } else if this.AggregateDefaultCapacity != nil { + return fmt.Errorf("this.AggregateDefaultCapacity == nil && that.AggregateDefaultCapacity != nil") + } else if that1.AggregateDefaultCapacity != nil { + return fmt.Errorf("AggregateDefaultCapacity this(%v) Not Equal that(%v)", this.AggregateDefaultCapacity, that1.AggregateDefaultCapacity) + } + return nil +} +func (this *RateLimits) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*RateLimits) + if !ok { + that2, ok := that.(RateLimits) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Limits) != len(that1.Limits) { + return false + } + for i := range this.Limits { + if !this.Limits[i].Equal(&that1.Limits[i]) { + return false + } + } + if this.AggregateDefaultQPS != nil && that1.AggregateDefaultQPS != nil { + if *this.AggregateDefaultQPS != *that1.AggregateDefaultQPS { + return false + } + } else if this.AggregateDefaultQPS != nil { + return false + } else if that1.AggregateDefaultQPS != nil { + return false + } + if this.AggregateDefaultCapacity != nil && that1.AggregateDefaultCapacity != nil { + if *this.AggregateDefaultCapacity != *that1.AggregateDefaultCapacity { + return false + } + } else if this.AggregateDefaultCapacity != nil { + return false + } else if that1.AggregateDefaultCapacity != nil { + return false + } + return true +} +func (this *Image) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Image) + if !ok { + that2, ok := that.(Image) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Image") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Image but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Image but is not nil && this == nil") + } + if this.Type != nil && that1.Type != nil { + if *this.Type != *that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", *this.Type, *that1.Type) + } + } else if this.Type != nil { + return fmt.Errorf("this.Type == nil && that.Type != nil") + } else if that1.Type != nil { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.Appc.Equal(that1.Appc) { + return fmt.Errorf("Appc this(%v) Not Equal that(%v)", this.Appc, that1.Appc) + } + if !this.Docker.Equal(that1.Docker) { + return fmt.Errorf("Docker this(%v) Not Equal that(%v)", this.Docker, that1.Docker) + } + if this.Cached != nil && that1.Cached != nil { + if *this.Cached != *that1.Cached { + return fmt.Errorf("Cached this(%v) Not Equal that(%v)", *this.Cached, *that1.Cached) + } + } else if this.Cached != nil { + return fmt.Errorf("this.Cached == nil && that.Cached != nil") + } else if that1.Cached != nil { + return fmt.Errorf("Cached this(%v) Not Equal that(%v)", this.Cached, that1.Cached) + } + return nil +} +func (this *Image) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Image) + if !ok { + that2, ok := that.(Image) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != nil && that1.Type != nil { + if *this.Type != *that1.Type { + return false + } + } else if this.Type != nil { + return false + } else if that1.Type != nil { + return false + } + if !this.Appc.Equal(that1.Appc) { + return false + } + if !this.Docker.Equal(that1.Docker) { + return false + } + if this.Cached != nil && that1.Cached != nil { + if *this.Cached != *that1.Cached { + return false + } + } else if this.Cached != nil { + return false + } else if that1.Cached != nil { + return false + } + return true +} +func (this *Image_Appc) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Image_Appc) + if !ok { + that2, ok := that.(Image_Appc) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Image_Appc") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Image_Appc but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Image_Appc but is not nil && this == nil") + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if this.ID != nil && that1.ID != nil { + if *this.ID != *that1.ID { + return fmt.Errorf("ID this(%v) Not Equal that(%v)", *this.ID, *that1.ID) + } + } else if this.ID != nil { + return fmt.Errorf("this.ID == nil && that.ID != nil") + } else if that1.ID != nil { + return fmt.Errorf("ID this(%v) Not Equal that(%v)", this.ID, that1.ID) + } + if !this.Labels.Equal(that1.Labels) { + return fmt.Errorf("Labels this(%v) Not Equal that(%v)", this.Labels, that1.Labels) + } + return nil +} +func (this *Image_Appc) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Image_Appc) + if !ok { + that2, ok := that.(Image_Appc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.ID != nil && that1.ID != nil { + if *this.ID != *that1.ID { + return false + } + } else if this.ID != nil { + return false + } else if that1.ID != nil { + return false + } + if !this.Labels.Equal(that1.Labels) { + return false + } + return true +} +func (this *Image_Docker) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Image_Docker) + if !ok { + that2, ok := that.(Image_Docker) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Image_Docker") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Image_Docker but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Image_Docker but is not nil && this == nil") + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if !this.Credential.Equal(that1.Credential) { + return fmt.Errorf("Credential this(%v) Not Equal that(%v)", this.Credential, that1.Credential) + } + if !this.Config.Equal(that1.Config) { + return fmt.Errorf("Config this(%v) Not Equal that(%v)", this.Config, that1.Config) + } + return nil +} +func (this *Image_Docker) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Image_Docker) + if !ok { + that2, ok := that.(Image_Docker) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if !this.Credential.Equal(that1.Credential) { + return false + } + if !this.Config.Equal(that1.Config) { + return false + } + return true +} +func (this *MountPropagation) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*MountPropagation) + if !ok { + that2, ok := that.(MountPropagation) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *MountPropagation") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *MountPropagation but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *MountPropagation but is not nil && this == nil") + } + if this.Mode != nil && that1.Mode != nil { + if *this.Mode != *that1.Mode { + return fmt.Errorf("Mode this(%v) Not Equal that(%v)", *this.Mode, *that1.Mode) + } + } else if this.Mode != nil { + return fmt.Errorf("this.Mode == nil && that.Mode != nil") + } else if that1.Mode != nil { + return fmt.Errorf("Mode this(%v) Not Equal that(%v)", this.Mode, that1.Mode) + } + return nil +} +func (this *MountPropagation) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*MountPropagation) + if !ok { + that2, ok := that.(MountPropagation) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Mode != nil && that1.Mode != nil { + if *this.Mode != *that1.Mode { + return false + } + } else if this.Mode != nil { + return false + } else if that1.Mode != nil { + return false + } + return true +} +func (this *Volume) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Volume) + if !ok { + that2, ok := that.(Volume) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Volume") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Volume but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Volume but is not nil && this == nil") + } + if this.Mode != nil && that1.Mode != nil { + if *this.Mode != *that1.Mode { + return fmt.Errorf("Mode this(%v) Not Equal that(%v)", *this.Mode, *that1.Mode) + } + } else if this.Mode != nil { + return fmt.Errorf("this.Mode == nil && that.Mode != nil") + } else if that1.Mode != nil { + return fmt.Errorf("Mode this(%v) Not Equal that(%v)", this.Mode, that1.Mode) + } + if this.ContainerPath != that1.ContainerPath { + return fmt.Errorf("ContainerPath this(%v) Not Equal that(%v)", this.ContainerPath, that1.ContainerPath) + } + if this.HostPath != nil && that1.HostPath != nil { + if *this.HostPath != *that1.HostPath { + return fmt.Errorf("HostPath this(%v) Not Equal that(%v)", *this.HostPath, *that1.HostPath) + } + } else if this.HostPath != nil { + return fmt.Errorf("this.HostPath == nil && that.HostPath != nil") + } else if that1.HostPath != nil { + return fmt.Errorf("HostPath this(%v) Not Equal that(%v)", this.HostPath, that1.HostPath) + } + if !this.Image.Equal(that1.Image) { + return fmt.Errorf("Image this(%v) Not Equal that(%v)", this.Image, that1.Image) + } + if !this.Source.Equal(that1.Source) { + return fmt.Errorf("Source this(%v) Not Equal that(%v)", this.Source, that1.Source) + } + return nil +} +func (this *Volume) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Volume) + if !ok { + that2, ok := that.(Volume) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Mode != nil && that1.Mode != nil { + if *this.Mode != *that1.Mode { + return false + } + } else if this.Mode != nil { + return false + } else if that1.Mode != nil { + return false + } + if this.ContainerPath != that1.ContainerPath { + return false + } + if this.HostPath != nil && that1.HostPath != nil { + if *this.HostPath != *that1.HostPath { + return false + } + } else if this.HostPath != nil { + return false + } else if that1.HostPath != nil { + return false + } + if !this.Image.Equal(that1.Image) { + return false + } + if !this.Source.Equal(that1.Source) { + return false + } + return true +} +func (this *Volume_Source) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Volume_Source) + if !ok { + that2, ok := that.(Volume_Source) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Volume_Source") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Volume_Source but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Volume_Source but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if !this.DockerVolume.Equal(that1.DockerVolume) { + return fmt.Errorf("DockerVolume this(%v) Not Equal that(%v)", this.DockerVolume, that1.DockerVolume) + } + if !this.HostPath.Equal(that1.HostPath) { + return fmt.Errorf("HostPath this(%v) Not Equal that(%v)", this.HostPath, that1.HostPath) + } + if !this.SandboxPath.Equal(that1.SandboxPath) { + return fmt.Errorf("SandboxPath this(%v) Not Equal that(%v)", this.SandboxPath, that1.SandboxPath) + } + if !this.Secret.Equal(that1.Secret) { + return fmt.Errorf("Secret this(%v) Not Equal that(%v)", this.Secret, that1.Secret) + } + return nil +} +func (this *Volume_Source) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Volume_Source) + if !ok { + that2, ok := that.(Volume_Source) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.DockerVolume.Equal(that1.DockerVolume) { + return false + } + if !this.HostPath.Equal(that1.HostPath) { + return false + } + if !this.SandboxPath.Equal(that1.SandboxPath) { + return false + } + if !this.Secret.Equal(that1.Secret) { + return false + } + return true +} +func (this *Volume_Source_DockerVolume) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Volume_Source_DockerVolume) + if !ok { + that2, ok := that.(Volume_Source_DockerVolume) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Volume_Source_DockerVolume") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Volume_Source_DockerVolume but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Volume_Source_DockerVolume but is not nil && this == nil") + } + if this.Driver != nil && that1.Driver != nil { + if *this.Driver != *that1.Driver { + return fmt.Errorf("Driver this(%v) Not Equal that(%v)", *this.Driver, *that1.Driver) + } + } else if this.Driver != nil { + return fmt.Errorf("this.Driver == nil && that.Driver != nil") + } else if that1.Driver != nil { + return fmt.Errorf("Driver this(%v) Not Equal that(%v)", this.Driver, that1.Driver) + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if !this.DriverOptions.Equal(that1.DriverOptions) { + return fmt.Errorf("DriverOptions this(%v) Not Equal that(%v)", this.DriverOptions, that1.DriverOptions) + } + return nil +} +func (this *Volume_Source_DockerVolume) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Volume_Source_DockerVolume) + if !ok { + that2, ok := that.(Volume_Source_DockerVolume) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Driver != nil && that1.Driver != nil { + if *this.Driver != *that1.Driver { + return false + } + } else if this.Driver != nil { + return false + } else if that1.Driver != nil { + return false + } + if this.Name != that1.Name { + return false + } + if !this.DriverOptions.Equal(that1.DriverOptions) { + return false + } + return true +} +func (this *Volume_Source_HostPath) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Volume_Source_HostPath) + if !ok { + that2, ok := that.(Volume_Source_HostPath) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Volume_Source_HostPath") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Volume_Source_HostPath but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Volume_Source_HostPath but is not nil && this == nil") + } + if this.Path != that1.Path { + return fmt.Errorf("Path this(%v) Not Equal that(%v)", this.Path, that1.Path) + } + if !this.MountPropagation.Equal(that1.MountPropagation) { + return fmt.Errorf("MountPropagation this(%v) Not Equal that(%v)", this.MountPropagation, that1.MountPropagation) + } + return nil +} +func (this *Volume_Source_HostPath) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Volume_Source_HostPath) + if !ok { + that2, ok := that.(Volume_Source_HostPath) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Path != that1.Path { + return false + } + if !this.MountPropagation.Equal(that1.MountPropagation) { + return false + } + return true +} +func (this *Volume_Source_SandboxPath) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Volume_Source_SandboxPath) + if !ok { + that2, ok := that.(Volume_Source_SandboxPath) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Volume_Source_SandboxPath") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Volume_Source_SandboxPath but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Volume_Source_SandboxPath but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if this.Path != that1.Path { + return fmt.Errorf("Path this(%v) Not Equal that(%v)", this.Path, that1.Path) + } + return nil +} +func (this *Volume_Source_SandboxPath) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Volume_Source_SandboxPath) + if !ok { + that2, ok := that.(Volume_Source_SandboxPath) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if this.Path != that1.Path { + return false + } + return true +} +func (this *NetworkInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*NetworkInfo) + if !ok { + that2, ok := that.(NetworkInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *NetworkInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *NetworkInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *NetworkInfo but is not nil && this == nil") + } + if len(this.IPAddresses) != len(that1.IPAddresses) { + return fmt.Errorf("IPAddresses this(%v) Not Equal that(%v)", len(this.IPAddresses), len(that1.IPAddresses)) + } + for i := range this.IPAddresses { + if !this.IPAddresses[i].Equal(&that1.IPAddresses[i]) { + return fmt.Errorf("IPAddresses this[%v](%v) Not Equal that[%v](%v)", i, this.IPAddresses[i], i, that1.IPAddresses[i]) + } + } + if this.Name != nil && that1.Name != nil { + if *this.Name != *that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", *this.Name, *that1.Name) + } + } else if this.Name != nil { + return fmt.Errorf("this.Name == nil && that.Name != nil") + } else if that1.Name != nil { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if len(this.Groups) != len(that1.Groups) { + return fmt.Errorf("Groups this(%v) Not Equal that(%v)", len(this.Groups), len(that1.Groups)) + } + for i := range this.Groups { + if this.Groups[i] != that1.Groups[i] { + return fmt.Errorf("Groups this[%v](%v) Not Equal that[%v](%v)", i, this.Groups[i], i, that1.Groups[i]) + } + } + if !this.Labels.Equal(that1.Labels) { + return fmt.Errorf("Labels this(%v) Not Equal that(%v)", this.Labels, that1.Labels) + } + if len(this.PortMappings) != len(that1.PortMappings) { + return fmt.Errorf("PortMappings this(%v) Not Equal that(%v)", len(this.PortMappings), len(that1.PortMappings)) + } + for i := range this.PortMappings { + if !this.PortMappings[i].Equal(&that1.PortMappings[i]) { + return fmt.Errorf("PortMappings this[%v](%v) Not Equal that[%v](%v)", i, this.PortMappings[i], i, that1.PortMappings[i]) + } + } + return nil +} +func (this *NetworkInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*NetworkInfo) + if !ok { + that2, ok := that.(NetworkInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.IPAddresses) != len(that1.IPAddresses) { + return false + } + for i := range this.IPAddresses { + if !this.IPAddresses[i].Equal(&that1.IPAddresses[i]) { + return false + } + } + if this.Name != nil && that1.Name != nil { + if *this.Name != *that1.Name { + return false + } + } else if this.Name != nil { + return false + } else if that1.Name != nil { + return false + } + if len(this.Groups) != len(that1.Groups) { + return false + } + for i := range this.Groups { + if this.Groups[i] != that1.Groups[i] { + return false + } + } + if !this.Labels.Equal(that1.Labels) { + return false + } + if len(this.PortMappings) != len(that1.PortMappings) { + return false + } + for i := range this.PortMappings { + if !this.PortMappings[i].Equal(&that1.PortMappings[i]) { + return false + } + } + return true +} +func (this *NetworkInfo_IPAddress) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*NetworkInfo_IPAddress) + if !ok { + that2, ok := that.(NetworkInfo_IPAddress) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *NetworkInfo_IPAddress") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *NetworkInfo_IPAddress but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *NetworkInfo_IPAddress but is not nil && this == nil") + } + if this.Protocol != nil && that1.Protocol != nil { + if *this.Protocol != *that1.Protocol { + return fmt.Errorf("Protocol this(%v) Not Equal that(%v)", *this.Protocol, *that1.Protocol) + } + } else if this.Protocol != nil { + return fmt.Errorf("this.Protocol == nil && that.Protocol != nil") + } else if that1.Protocol != nil { + return fmt.Errorf("Protocol this(%v) Not Equal that(%v)", this.Protocol, that1.Protocol) + } + if this.IPAddress != nil && that1.IPAddress != nil { + if *this.IPAddress != *that1.IPAddress { + return fmt.Errorf("IPAddress this(%v) Not Equal that(%v)", *this.IPAddress, *that1.IPAddress) + } + } else if this.IPAddress != nil { + return fmt.Errorf("this.IPAddress == nil && that.IPAddress != nil") + } else if that1.IPAddress != nil { + return fmt.Errorf("IPAddress this(%v) Not Equal that(%v)", this.IPAddress, that1.IPAddress) + } + return nil +} +func (this *NetworkInfo_IPAddress) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*NetworkInfo_IPAddress) + if !ok { + that2, ok := that.(NetworkInfo_IPAddress) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Protocol != nil && that1.Protocol != nil { + if *this.Protocol != *that1.Protocol { + return false + } + } else if this.Protocol != nil { + return false + } else if that1.Protocol != nil { + return false + } + if this.IPAddress != nil && that1.IPAddress != nil { + if *this.IPAddress != *that1.IPAddress { + return false + } + } else if this.IPAddress != nil { + return false + } else if that1.IPAddress != nil { + return false + } + return true +} +func (this *NetworkInfo_PortMapping) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*NetworkInfo_PortMapping) + if !ok { + that2, ok := that.(NetworkInfo_PortMapping) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *NetworkInfo_PortMapping") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *NetworkInfo_PortMapping but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *NetworkInfo_PortMapping but is not nil && this == nil") + } + if this.HostPort != that1.HostPort { + return fmt.Errorf("HostPort this(%v) Not Equal that(%v)", this.HostPort, that1.HostPort) + } + if this.ContainerPort != that1.ContainerPort { + return fmt.Errorf("ContainerPort this(%v) Not Equal that(%v)", this.ContainerPort, that1.ContainerPort) + } + if this.Protocol != nil && that1.Protocol != nil { + if *this.Protocol != *that1.Protocol { + return fmt.Errorf("Protocol this(%v) Not Equal that(%v)", *this.Protocol, *that1.Protocol) + } + } else if this.Protocol != nil { + return fmt.Errorf("this.Protocol == nil && that.Protocol != nil") + } else if that1.Protocol != nil { + return fmt.Errorf("Protocol this(%v) Not Equal that(%v)", this.Protocol, that1.Protocol) + } + return nil +} +func (this *NetworkInfo_PortMapping) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*NetworkInfo_PortMapping) + if !ok { + that2, ok := that.(NetworkInfo_PortMapping) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.HostPort != that1.HostPort { + return false + } + if this.ContainerPort != that1.ContainerPort { + return false + } + if this.Protocol != nil && that1.Protocol != nil { + if *this.Protocol != *that1.Protocol { + return false + } + } else if this.Protocol != nil { + return false + } else if that1.Protocol != nil { + return false + } + return true +} +func (this *CapabilityInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CapabilityInfo) + if !ok { + that2, ok := that.(CapabilityInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CapabilityInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CapabilityInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CapabilityInfo but is not nil && this == nil") + } + if len(this.Capabilities) != len(that1.Capabilities) { + return fmt.Errorf("Capabilities this(%v) Not Equal that(%v)", len(this.Capabilities), len(that1.Capabilities)) + } + for i := range this.Capabilities { + if this.Capabilities[i] != that1.Capabilities[i] { + return fmt.Errorf("Capabilities this[%v](%v) Not Equal that[%v](%v)", i, this.Capabilities[i], i, that1.Capabilities[i]) + } + } + return nil +} +func (this *CapabilityInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CapabilityInfo) + if !ok { + that2, ok := that.(CapabilityInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Capabilities) != len(that1.Capabilities) { + return false + } + for i := range this.Capabilities { + if this.Capabilities[i] != that1.Capabilities[i] { + return false + } + } + return true +} +func (this *LinuxInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*LinuxInfo) + if !ok { + that2, ok := that.(LinuxInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *LinuxInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *LinuxInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *LinuxInfo but is not nil && this == nil") + } + if !this.CapabilityInfo.Equal(that1.CapabilityInfo) { + return fmt.Errorf("CapabilityInfo this(%v) Not Equal that(%v)", this.CapabilityInfo, that1.CapabilityInfo) + } + if !this.BoundingCapabilities.Equal(that1.BoundingCapabilities) { + return fmt.Errorf("BoundingCapabilities this(%v) Not Equal that(%v)", this.BoundingCapabilities, that1.BoundingCapabilities) + } + if !this.EffectiveCapabilities.Equal(that1.EffectiveCapabilities) { + return fmt.Errorf("EffectiveCapabilities this(%v) Not Equal that(%v)", this.EffectiveCapabilities, that1.EffectiveCapabilities) + } + if this.SharePIDNamespace != nil && that1.SharePIDNamespace != nil { + if *this.SharePIDNamespace != *that1.SharePIDNamespace { + return fmt.Errorf("SharePIDNamespace this(%v) Not Equal that(%v)", *this.SharePIDNamespace, *that1.SharePIDNamespace) + } + } else if this.SharePIDNamespace != nil { + return fmt.Errorf("this.SharePIDNamespace == nil && that.SharePIDNamespace != nil") + } else if that1.SharePIDNamespace != nil { + return fmt.Errorf("SharePIDNamespace this(%v) Not Equal that(%v)", this.SharePIDNamespace, that1.SharePIDNamespace) + } + return nil +} +func (this *LinuxInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*LinuxInfo) + if !ok { + that2, ok := that.(LinuxInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.CapabilityInfo.Equal(that1.CapabilityInfo) { + return false + } + if !this.BoundingCapabilities.Equal(that1.BoundingCapabilities) { + return false + } + if !this.EffectiveCapabilities.Equal(that1.EffectiveCapabilities) { + return false + } + if this.SharePIDNamespace != nil && that1.SharePIDNamespace != nil { + if *this.SharePIDNamespace != *that1.SharePIDNamespace { + return false + } + } else if this.SharePIDNamespace != nil { + return false + } else if that1.SharePIDNamespace != nil { + return false + } + return true +} +func (this *RLimitInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*RLimitInfo) + if !ok { + that2, ok := that.(RLimitInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *RLimitInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *RLimitInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *RLimitInfo but is not nil && this == nil") + } + if len(this.Rlimits) != len(that1.Rlimits) { + return fmt.Errorf("Rlimits this(%v) Not Equal that(%v)", len(this.Rlimits), len(that1.Rlimits)) + } + for i := range this.Rlimits { + if !this.Rlimits[i].Equal(&that1.Rlimits[i]) { + return fmt.Errorf("Rlimits this[%v](%v) Not Equal that[%v](%v)", i, this.Rlimits[i], i, that1.Rlimits[i]) + } + } + return nil +} +func (this *RLimitInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*RLimitInfo) + if !ok { + that2, ok := that.(RLimitInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Rlimits) != len(that1.Rlimits) { + return false + } + for i := range this.Rlimits { + if !this.Rlimits[i].Equal(&that1.Rlimits[i]) { + return false + } + } + return true +} +func (this *RLimitInfo_RLimit) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*RLimitInfo_RLimit) + if !ok { + that2, ok := that.(RLimitInfo_RLimit) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *RLimitInfo_RLimit") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *RLimitInfo_RLimit but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *RLimitInfo_RLimit but is not nil && this == nil") + } + if this.Type != that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if this.Hard != nil && that1.Hard != nil { + if *this.Hard != *that1.Hard { + return fmt.Errorf("Hard this(%v) Not Equal that(%v)", *this.Hard, *that1.Hard) + } + } else if this.Hard != nil { + return fmt.Errorf("this.Hard == nil && that.Hard != nil") + } else if that1.Hard != nil { + return fmt.Errorf("Hard this(%v) Not Equal that(%v)", this.Hard, that1.Hard) + } + if this.Soft != nil && that1.Soft != nil { + if *this.Soft != *that1.Soft { + return fmt.Errorf("Soft this(%v) Not Equal that(%v)", *this.Soft, *that1.Soft) + } + } else if this.Soft != nil { + return fmt.Errorf("this.Soft == nil && that.Soft != nil") + } else if that1.Soft != nil { + return fmt.Errorf("Soft this(%v) Not Equal that(%v)", this.Soft, that1.Soft) + } + return nil +} +func (this *RLimitInfo_RLimit) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*RLimitInfo_RLimit) + if !ok { + that2, ok := that.(RLimitInfo_RLimit) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if this.Hard != nil && that1.Hard != nil { + if *this.Hard != *that1.Hard { + return false + } + } else if this.Hard != nil { + return false + } else if that1.Hard != nil { + return false + } + if this.Soft != nil && that1.Soft != nil { + if *this.Soft != *that1.Soft { + return false + } + } else if this.Soft != nil { + return false + } else if that1.Soft != nil { + return false + } + return true +} +func (this *TTYInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*TTYInfo) + if !ok { + that2, ok := that.(TTYInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *TTYInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *TTYInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *TTYInfo but is not nil && this == nil") + } + if !this.WindowSize.Equal(that1.WindowSize) { + return fmt.Errorf("WindowSize this(%v) Not Equal that(%v)", this.WindowSize, that1.WindowSize) + } + return nil +} +func (this *TTYInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TTYInfo) + if !ok { + that2, ok := that.(TTYInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.WindowSize.Equal(that1.WindowSize) { + return false + } + return true +} +func (this *TTYInfo_WindowSize) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*TTYInfo_WindowSize) + if !ok { + that2, ok := that.(TTYInfo_WindowSize) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *TTYInfo_WindowSize") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *TTYInfo_WindowSize but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *TTYInfo_WindowSize but is not nil && this == nil") + } + if this.Rows != that1.Rows { + return fmt.Errorf("Rows this(%v) Not Equal that(%v)", this.Rows, that1.Rows) + } + if this.Columns != that1.Columns { + return fmt.Errorf("Columns this(%v) Not Equal that(%v)", this.Columns, that1.Columns) + } + return nil +} +func (this *TTYInfo_WindowSize) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TTYInfo_WindowSize) + if !ok { + that2, ok := that.(TTYInfo_WindowSize) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Rows != that1.Rows { + return false + } + if this.Columns != that1.Columns { + return false + } + return true +} +func (this *ContainerInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ContainerInfo) + if !ok { + that2, ok := that.(ContainerInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ContainerInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ContainerInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ContainerInfo but is not nil && this == nil") + } + if this.Type != nil && that1.Type != nil { + if *this.Type != *that1.Type { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", *this.Type, *that1.Type) + } + } else if this.Type != nil { + return fmt.Errorf("this.Type == nil && that.Type != nil") + } else if that1.Type != nil { + return fmt.Errorf("Type this(%v) Not Equal that(%v)", this.Type, that1.Type) + } + if len(this.Volumes) != len(that1.Volumes) { + return fmt.Errorf("Volumes this(%v) Not Equal that(%v)", len(this.Volumes), len(that1.Volumes)) + } + for i := range this.Volumes { + if !this.Volumes[i].Equal(&that1.Volumes[i]) { + return fmt.Errorf("Volumes this[%v](%v) Not Equal that[%v](%v)", i, this.Volumes[i], i, that1.Volumes[i]) + } + } + if this.Hostname != nil && that1.Hostname != nil { + if *this.Hostname != *that1.Hostname { + return fmt.Errorf("Hostname this(%v) Not Equal that(%v)", *this.Hostname, *that1.Hostname) + } + } else if this.Hostname != nil { + return fmt.Errorf("this.Hostname == nil && that.Hostname != nil") + } else if that1.Hostname != nil { + return fmt.Errorf("Hostname this(%v) Not Equal that(%v)", this.Hostname, that1.Hostname) + } + if !this.Docker.Equal(that1.Docker) { + return fmt.Errorf("Docker this(%v) Not Equal that(%v)", this.Docker, that1.Docker) + } + if !this.Mesos.Equal(that1.Mesos) { + return fmt.Errorf("Mesos this(%v) Not Equal that(%v)", this.Mesos, that1.Mesos) + } + if len(this.NetworkInfos) != len(that1.NetworkInfos) { + return fmt.Errorf("NetworkInfos this(%v) Not Equal that(%v)", len(this.NetworkInfos), len(that1.NetworkInfos)) + } + for i := range this.NetworkInfos { + if !this.NetworkInfos[i].Equal(&that1.NetworkInfos[i]) { + return fmt.Errorf("NetworkInfos this[%v](%v) Not Equal that[%v](%v)", i, this.NetworkInfos[i], i, that1.NetworkInfos[i]) + } + } + if !this.LinuxInfo.Equal(that1.LinuxInfo) { + return fmt.Errorf("LinuxInfo this(%v) Not Equal that(%v)", this.LinuxInfo, that1.LinuxInfo) + } + if !this.RlimitInfo.Equal(that1.RlimitInfo) { + return fmt.Errorf("RlimitInfo this(%v) Not Equal that(%v)", this.RlimitInfo, that1.RlimitInfo) + } + if !this.TTYInfo.Equal(that1.TTYInfo) { + return fmt.Errorf("TTYInfo this(%v) Not Equal that(%v)", this.TTYInfo, that1.TTYInfo) + } + return nil +} +func (this *ContainerInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ContainerInfo) + if !ok { + that2, ok := that.(ContainerInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != nil && that1.Type != nil { + if *this.Type != *that1.Type { + return false + } + } else if this.Type != nil { + return false + } else if that1.Type != nil { + return false + } + if len(this.Volumes) != len(that1.Volumes) { + return false + } + for i := range this.Volumes { + if !this.Volumes[i].Equal(&that1.Volumes[i]) { + return false + } + } + if this.Hostname != nil && that1.Hostname != nil { + if *this.Hostname != *that1.Hostname { + return false + } + } else if this.Hostname != nil { + return false + } else if that1.Hostname != nil { + return false + } + if !this.Docker.Equal(that1.Docker) { + return false + } + if !this.Mesos.Equal(that1.Mesos) { + return false + } + if len(this.NetworkInfos) != len(that1.NetworkInfos) { + return false + } + for i := range this.NetworkInfos { + if !this.NetworkInfos[i].Equal(&that1.NetworkInfos[i]) { + return false + } + } + if !this.LinuxInfo.Equal(that1.LinuxInfo) { + return false + } + if !this.RlimitInfo.Equal(that1.RlimitInfo) { + return false + } + if !this.TTYInfo.Equal(that1.TTYInfo) { + return false + } + return true +} +func (this *ContainerInfo_DockerInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ContainerInfo_DockerInfo) + if !ok { + that2, ok := that.(ContainerInfo_DockerInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ContainerInfo_DockerInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ContainerInfo_DockerInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ContainerInfo_DockerInfo but is not nil && this == nil") + } + if this.Image != that1.Image { + return fmt.Errorf("Image this(%v) Not Equal that(%v)", this.Image, that1.Image) + } + if this.Network != nil && that1.Network != nil { + if *this.Network != *that1.Network { + return fmt.Errorf("Network this(%v) Not Equal that(%v)", *this.Network, *that1.Network) + } + } else if this.Network != nil { + return fmt.Errorf("this.Network == nil && that.Network != nil") + } else if that1.Network != nil { + return fmt.Errorf("Network this(%v) Not Equal that(%v)", this.Network, that1.Network) + } + if len(this.PortMappings) != len(that1.PortMappings) { + return fmt.Errorf("PortMappings this(%v) Not Equal that(%v)", len(this.PortMappings), len(that1.PortMappings)) + } + for i := range this.PortMappings { + if !this.PortMappings[i].Equal(&that1.PortMappings[i]) { + return fmt.Errorf("PortMappings this[%v](%v) Not Equal that[%v](%v)", i, this.PortMappings[i], i, that1.PortMappings[i]) + } + } + if this.Privileged != nil && that1.Privileged != nil { + if *this.Privileged != *that1.Privileged { + return fmt.Errorf("Privileged this(%v) Not Equal that(%v)", *this.Privileged, *that1.Privileged) + } + } else if this.Privileged != nil { + return fmt.Errorf("this.Privileged == nil && that.Privileged != nil") + } else if that1.Privileged != nil { + return fmt.Errorf("Privileged this(%v) Not Equal that(%v)", this.Privileged, that1.Privileged) + } + if len(this.Parameters) != len(that1.Parameters) { + return fmt.Errorf("Parameters this(%v) Not Equal that(%v)", len(this.Parameters), len(that1.Parameters)) + } + for i := range this.Parameters { + if !this.Parameters[i].Equal(&that1.Parameters[i]) { + return fmt.Errorf("Parameters this[%v](%v) Not Equal that[%v](%v)", i, this.Parameters[i], i, that1.Parameters[i]) + } + } + if this.ForcePullImage != nil && that1.ForcePullImage != nil { + if *this.ForcePullImage != *that1.ForcePullImage { + return fmt.Errorf("ForcePullImage this(%v) Not Equal that(%v)", *this.ForcePullImage, *that1.ForcePullImage) + } + } else if this.ForcePullImage != nil { + return fmt.Errorf("this.ForcePullImage == nil && that.ForcePullImage != nil") + } else if that1.ForcePullImage != nil { + return fmt.Errorf("ForcePullImage this(%v) Not Equal that(%v)", this.ForcePullImage, that1.ForcePullImage) + } + if this.VolumeDriver != nil && that1.VolumeDriver != nil { + if *this.VolumeDriver != *that1.VolumeDriver { + return fmt.Errorf("VolumeDriver this(%v) Not Equal that(%v)", *this.VolumeDriver, *that1.VolumeDriver) + } + } else if this.VolumeDriver != nil { + return fmt.Errorf("this.VolumeDriver == nil && that.VolumeDriver != nil") + } else if that1.VolumeDriver != nil { + return fmt.Errorf("VolumeDriver this(%v) Not Equal that(%v)", this.VolumeDriver, that1.VolumeDriver) + } + return nil +} +func (this *ContainerInfo_DockerInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ContainerInfo_DockerInfo) + if !ok { + that2, ok := that.(ContainerInfo_DockerInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Image != that1.Image { + return false + } + if this.Network != nil && that1.Network != nil { + if *this.Network != *that1.Network { + return false + } + } else if this.Network != nil { + return false + } else if that1.Network != nil { + return false + } + if len(this.PortMappings) != len(that1.PortMappings) { + return false + } + for i := range this.PortMappings { + if !this.PortMappings[i].Equal(&that1.PortMappings[i]) { + return false + } + } + if this.Privileged != nil && that1.Privileged != nil { + if *this.Privileged != *that1.Privileged { + return false + } + } else if this.Privileged != nil { + return false + } else if that1.Privileged != nil { + return false + } + if len(this.Parameters) != len(that1.Parameters) { + return false + } + for i := range this.Parameters { + if !this.Parameters[i].Equal(&that1.Parameters[i]) { + return false + } + } + if this.ForcePullImage != nil && that1.ForcePullImage != nil { + if *this.ForcePullImage != *that1.ForcePullImage { + return false + } + } else if this.ForcePullImage != nil { + return false + } else if that1.ForcePullImage != nil { + return false + } + if this.VolumeDriver != nil && that1.VolumeDriver != nil { + if *this.VolumeDriver != *that1.VolumeDriver { + return false + } + } else if this.VolumeDriver != nil { + return false + } else if that1.VolumeDriver != nil { + return false + } + return true +} +func (this *ContainerInfo_DockerInfo_PortMapping) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ContainerInfo_DockerInfo_PortMapping) + if !ok { + that2, ok := that.(ContainerInfo_DockerInfo_PortMapping) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ContainerInfo_DockerInfo_PortMapping") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ContainerInfo_DockerInfo_PortMapping but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ContainerInfo_DockerInfo_PortMapping but is not nil && this == nil") + } + if this.HostPort != that1.HostPort { + return fmt.Errorf("HostPort this(%v) Not Equal that(%v)", this.HostPort, that1.HostPort) + } + if this.ContainerPort != that1.ContainerPort { + return fmt.Errorf("ContainerPort this(%v) Not Equal that(%v)", this.ContainerPort, that1.ContainerPort) + } + if this.Protocol != nil && that1.Protocol != nil { + if *this.Protocol != *that1.Protocol { + return fmt.Errorf("Protocol this(%v) Not Equal that(%v)", *this.Protocol, *that1.Protocol) + } + } else if this.Protocol != nil { + return fmt.Errorf("this.Protocol == nil && that.Protocol != nil") + } else if that1.Protocol != nil { + return fmt.Errorf("Protocol this(%v) Not Equal that(%v)", this.Protocol, that1.Protocol) + } + return nil +} +func (this *ContainerInfo_DockerInfo_PortMapping) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ContainerInfo_DockerInfo_PortMapping) + if !ok { + that2, ok := that.(ContainerInfo_DockerInfo_PortMapping) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.HostPort != that1.HostPort { + return false + } + if this.ContainerPort != that1.ContainerPort { + return false + } + if this.Protocol != nil && that1.Protocol != nil { + if *this.Protocol != *that1.Protocol { + return false + } + } else if this.Protocol != nil { + return false + } else if that1.Protocol != nil { + return false + } + return true +} +func (this *ContainerInfo_MesosInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ContainerInfo_MesosInfo) + if !ok { + that2, ok := that.(ContainerInfo_MesosInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ContainerInfo_MesosInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ContainerInfo_MesosInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ContainerInfo_MesosInfo but is not nil && this == nil") + } + if !this.Image.Equal(that1.Image) { + return fmt.Errorf("Image this(%v) Not Equal that(%v)", this.Image, that1.Image) + } + return nil +} +func (this *ContainerInfo_MesosInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ContainerInfo_MesosInfo) + if !ok { + that2, ok := that.(ContainerInfo_MesosInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Image.Equal(that1.Image) { + return false + } + return true +} +func (this *ContainerStatus) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*ContainerStatus) + if !ok { + that2, ok := that.(ContainerStatus) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *ContainerStatus") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *ContainerStatus but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *ContainerStatus but is not nil && this == nil") + } + if !this.ContainerID.Equal(that1.ContainerID) { + return fmt.Errorf("ContainerID this(%v) Not Equal that(%v)", this.ContainerID, that1.ContainerID) + } + if len(this.NetworkInfos) != len(that1.NetworkInfos) { + return fmt.Errorf("NetworkInfos this(%v) Not Equal that(%v)", len(this.NetworkInfos), len(that1.NetworkInfos)) + } + for i := range this.NetworkInfos { + if !this.NetworkInfos[i].Equal(&that1.NetworkInfos[i]) { + return fmt.Errorf("NetworkInfos this[%v](%v) Not Equal that[%v](%v)", i, this.NetworkInfos[i], i, that1.NetworkInfos[i]) + } + } + if !this.CgroupInfo.Equal(that1.CgroupInfo) { + return fmt.Errorf("CgroupInfo this(%v) Not Equal that(%v)", this.CgroupInfo, that1.CgroupInfo) + } + if this.ExecutorPID != nil && that1.ExecutorPID != nil { + if *this.ExecutorPID != *that1.ExecutorPID { + return fmt.Errorf("ExecutorPID this(%v) Not Equal that(%v)", *this.ExecutorPID, *that1.ExecutorPID) + } + } else if this.ExecutorPID != nil { + return fmt.Errorf("this.ExecutorPID == nil && that.ExecutorPID != nil") + } else if that1.ExecutorPID != nil { + return fmt.Errorf("ExecutorPID this(%v) Not Equal that(%v)", this.ExecutorPID, that1.ExecutorPID) + } + return nil +} +func (this *ContainerStatus) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ContainerStatus) + if !ok { + that2, ok := that.(ContainerStatus) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ContainerID.Equal(that1.ContainerID) { + return false + } + if len(this.NetworkInfos) != len(that1.NetworkInfos) { + return false + } + for i := range this.NetworkInfos { + if !this.NetworkInfos[i].Equal(&that1.NetworkInfos[i]) { + return false + } + } + if !this.CgroupInfo.Equal(that1.CgroupInfo) { + return false + } + if this.ExecutorPID != nil && that1.ExecutorPID != nil { + if *this.ExecutorPID != *that1.ExecutorPID { + return false + } + } else if this.ExecutorPID != nil { + return false + } else if that1.ExecutorPID != nil { + return false + } + return true +} +func (this *CgroupInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CgroupInfo) + if !ok { + that2, ok := that.(CgroupInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CgroupInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CgroupInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CgroupInfo but is not nil && this == nil") + } + if !this.NetCLS.Equal(that1.NetCLS) { + return fmt.Errorf("NetCLS this(%v) Not Equal that(%v)", this.NetCLS, that1.NetCLS) + } + return nil +} +func (this *CgroupInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CgroupInfo) + if !ok { + that2, ok := that.(CgroupInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.NetCLS.Equal(that1.NetCLS) { + return false + } + return true +} +func (this *CgroupInfo_Blkio) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CgroupInfo_Blkio) + if !ok { + that2, ok := that.(CgroupInfo_Blkio) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CgroupInfo_Blkio") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CgroupInfo_Blkio but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CgroupInfo_Blkio but is not nil && this == nil") + } + return nil +} +func (this *CgroupInfo_Blkio) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CgroupInfo_Blkio) + if !ok { + that2, ok := that.(CgroupInfo_Blkio) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + return true +} +func (this *CgroupInfo_Blkio_Value) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CgroupInfo_Blkio_Value) + if !ok { + that2, ok := that.(CgroupInfo_Blkio_Value) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CgroupInfo_Blkio_Value") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CgroupInfo_Blkio_Value but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CgroupInfo_Blkio_Value but is not nil && this == nil") + } + if this.Op != nil && that1.Op != nil { + if *this.Op != *that1.Op { + return fmt.Errorf("Op this(%v) Not Equal that(%v)", *this.Op, *that1.Op) + } + } else if this.Op != nil { + return fmt.Errorf("this.Op == nil && that.Op != nil") + } else if that1.Op != nil { + return fmt.Errorf("Op this(%v) Not Equal that(%v)", this.Op, that1.Op) + } + if this.Value != nil && that1.Value != nil { + if *this.Value != *that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", *this.Value, *that1.Value) + } + } else if this.Value != nil { + return fmt.Errorf("this.Value == nil && that.Value != nil") + } else if that1.Value != nil { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *CgroupInfo_Blkio_Value) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CgroupInfo_Blkio_Value) + if !ok { + that2, ok := that.(CgroupInfo_Blkio_Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Op != nil && that1.Op != nil { + if *this.Op != *that1.Op { + return false + } + } else if this.Op != nil { + return false + } else if that1.Op != nil { + return false + } + if this.Value != nil && that1.Value != nil { + if *this.Value != *that1.Value { + return false + } + } else if this.Value != nil { + return false + } else if that1.Value != nil { + return false + } + return true +} +func (this *CgroupInfo_Blkio_CFQ) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CgroupInfo_Blkio_CFQ) + if !ok { + that2, ok := that.(CgroupInfo_Blkio_CFQ) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CgroupInfo_Blkio_CFQ") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CgroupInfo_Blkio_CFQ but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CgroupInfo_Blkio_CFQ but is not nil && this == nil") + } + return nil +} +func (this *CgroupInfo_Blkio_CFQ) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CgroupInfo_Blkio_CFQ) + if !ok { + that2, ok := that.(CgroupInfo_Blkio_CFQ) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + return true +} +func (this *CgroupInfo_Blkio_CFQ_Statistics) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CgroupInfo_Blkio_CFQ_Statistics) + if !ok { + that2, ok := that.(CgroupInfo_Blkio_CFQ_Statistics) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CgroupInfo_Blkio_CFQ_Statistics") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CgroupInfo_Blkio_CFQ_Statistics but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CgroupInfo_Blkio_CFQ_Statistics but is not nil && this == nil") + } + if !this.Device.Equal(that1.Device) { + return fmt.Errorf("Device this(%v) Not Equal that(%v)", this.Device, that1.Device) + } + if this.Sectors != nil && that1.Sectors != nil { + if *this.Sectors != *that1.Sectors { + return fmt.Errorf("Sectors this(%v) Not Equal that(%v)", *this.Sectors, *that1.Sectors) + } + } else if this.Sectors != nil { + return fmt.Errorf("this.Sectors == nil && that.Sectors != nil") + } else if that1.Sectors != nil { + return fmt.Errorf("Sectors this(%v) Not Equal that(%v)", this.Sectors, that1.Sectors) + } + if this.Time != nil && that1.Time != nil { + if *this.Time != *that1.Time { + return fmt.Errorf("Time this(%v) Not Equal that(%v)", *this.Time, *that1.Time) + } + } else if this.Time != nil { + return fmt.Errorf("this.Time == nil && that.Time != nil") + } else if that1.Time != nil { + return fmt.Errorf("Time this(%v) Not Equal that(%v)", this.Time, that1.Time) + } + if len(this.IOServiced) != len(that1.IOServiced) { + return fmt.Errorf("IOServiced this(%v) Not Equal that(%v)", len(this.IOServiced), len(that1.IOServiced)) + } + for i := range this.IOServiced { + if !this.IOServiced[i].Equal(&that1.IOServiced[i]) { + return fmt.Errorf("IOServiced this[%v](%v) Not Equal that[%v](%v)", i, this.IOServiced[i], i, that1.IOServiced[i]) + } + } + if len(this.IOServiceBytes) != len(that1.IOServiceBytes) { + return fmt.Errorf("IOServiceBytes this(%v) Not Equal that(%v)", len(this.IOServiceBytes), len(that1.IOServiceBytes)) + } + for i := range this.IOServiceBytes { + if !this.IOServiceBytes[i].Equal(&that1.IOServiceBytes[i]) { + return fmt.Errorf("IOServiceBytes this[%v](%v) Not Equal that[%v](%v)", i, this.IOServiceBytes[i], i, that1.IOServiceBytes[i]) + } + } + if len(this.IOServiceTime) != len(that1.IOServiceTime) { + return fmt.Errorf("IOServiceTime this(%v) Not Equal that(%v)", len(this.IOServiceTime), len(that1.IOServiceTime)) + } + for i := range this.IOServiceTime { + if !this.IOServiceTime[i].Equal(&that1.IOServiceTime[i]) { + return fmt.Errorf("IOServiceTime this[%v](%v) Not Equal that[%v](%v)", i, this.IOServiceTime[i], i, that1.IOServiceTime[i]) + } + } + if len(this.IOWaitTime) != len(that1.IOWaitTime) { + return fmt.Errorf("IOWaitTime this(%v) Not Equal that(%v)", len(this.IOWaitTime), len(that1.IOWaitTime)) + } + for i := range this.IOWaitTime { + if !this.IOWaitTime[i].Equal(&that1.IOWaitTime[i]) { + return fmt.Errorf("IOWaitTime this[%v](%v) Not Equal that[%v](%v)", i, this.IOWaitTime[i], i, that1.IOWaitTime[i]) + } + } + if len(this.IOMerged) != len(that1.IOMerged) { + return fmt.Errorf("IOMerged this(%v) Not Equal that(%v)", len(this.IOMerged), len(that1.IOMerged)) + } + for i := range this.IOMerged { + if !this.IOMerged[i].Equal(&that1.IOMerged[i]) { + return fmt.Errorf("IOMerged this[%v](%v) Not Equal that[%v](%v)", i, this.IOMerged[i], i, that1.IOMerged[i]) + } + } + if len(this.IOQueued) != len(that1.IOQueued) { + return fmt.Errorf("IOQueued this(%v) Not Equal that(%v)", len(this.IOQueued), len(that1.IOQueued)) + } + for i := range this.IOQueued { + if !this.IOQueued[i].Equal(&that1.IOQueued[i]) { + return fmt.Errorf("IOQueued this[%v](%v) Not Equal that[%v](%v)", i, this.IOQueued[i], i, that1.IOQueued[i]) + } + } + return nil +} +func (this *CgroupInfo_Blkio_CFQ_Statistics) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CgroupInfo_Blkio_CFQ_Statistics) + if !ok { + that2, ok := that.(CgroupInfo_Blkio_CFQ_Statistics) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Device.Equal(that1.Device) { + return false + } + if this.Sectors != nil && that1.Sectors != nil { + if *this.Sectors != *that1.Sectors { + return false + } + } else if this.Sectors != nil { + return false + } else if that1.Sectors != nil { + return false + } + if this.Time != nil && that1.Time != nil { + if *this.Time != *that1.Time { + return false + } + } else if this.Time != nil { + return false + } else if that1.Time != nil { + return false + } + if len(this.IOServiced) != len(that1.IOServiced) { + return false + } + for i := range this.IOServiced { + if !this.IOServiced[i].Equal(&that1.IOServiced[i]) { + return false + } + } + if len(this.IOServiceBytes) != len(that1.IOServiceBytes) { + return false + } + for i := range this.IOServiceBytes { + if !this.IOServiceBytes[i].Equal(&that1.IOServiceBytes[i]) { + return false + } + } + if len(this.IOServiceTime) != len(that1.IOServiceTime) { + return false + } + for i := range this.IOServiceTime { + if !this.IOServiceTime[i].Equal(&that1.IOServiceTime[i]) { + return false + } + } + if len(this.IOWaitTime) != len(that1.IOWaitTime) { + return false + } + for i := range this.IOWaitTime { + if !this.IOWaitTime[i].Equal(&that1.IOWaitTime[i]) { + return false + } + } + if len(this.IOMerged) != len(that1.IOMerged) { + return false + } + for i := range this.IOMerged { + if !this.IOMerged[i].Equal(&that1.IOMerged[i]) { + return false + } + } + if len(this.IOQueued) != len(that1.IOQueued) { + return false + } + for i := range this.IOQueued { + if !this.IOQueued[i].Equal(&that1.IOQueued[i]) { + return false + } + } + return true +} +func (this *CgroupInfo_Blkio_Throttling) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CgroupInfo_Blkio_Throttling) + if !ok { + that2, ok := that.(CgroupInfo_Blkio_Throttling) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CgroupInfo_Blkio_Throttling") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CgroupInfo_Blkio_Throttling but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CgroupInfo_Blkio_Throttling but is not nil && this == nil") + } + return nil +} +func (this *CgroupInfo_Blkio_Throttling) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CgroupInfo_Blkio_Throttling) + if !ok { + that2, ok := that.(CgroupInfo_Blkio_Throttling) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + return true +} +func (this *CgroupInfo_Blkio_Throttling_Statistics) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CgroupInfo_Blkio_Throttling_Statistics) + if !ok { + that2, ok := that.(CgroupInfo_Blkio_Throttling_Statistics) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CgroupInfo_Blkio_Throttling_Statistics") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CgroupInfo_Blkio_Throttling_Statistics but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CgroupInfo_Blkio_Throttling_Statistics but is not nil && this == nil") + } + if !this.Device.Equal(that1.Device) { + return fmt.Errorf("Device this(%v) Not Equal that(%v)", this.Device, that1.Device) + } + if len(this.IOServiced) != len(that1.IOServiced) { + return fmt.Errorf("IOServiced this(%v) Not Equal that(%v)", len(this.IOServiced), len(that1.IOServiced)) + } + for i := range this.IOServiced { + if !this.IOServiced[i].Equal(&that1.IOServiced[i]) { + return fmt.Errorf("IOServiced this[%v](%v) Not Equal that[%v](%v)", i, this.IOServiced[i], i, that1.IOServiced[i]) + } + } + if len(this.IOServiceBytes) != len(that1.IOServiceBytes) { + return fmt.Errorf("IOServiceBytes this(%v) Not Equal that(%v)", len(this.IOServiceBytes), len(that1.IOServiceBytes)) + } + for i := range this.IOServiceBytes { + if !this.IOServiceBytes[i].Equal(&that1.IOServiceBytes[i]) { + return fmt.Errorf("IOServiceBytes this[%v](%v) Not Equal that[%v](%v)", i, this.IOServiceBytes[i], i, that1.IOServiceBytes[i]) + } + } + return nil +} +func (this *CgroupInfo_Blkio_Throttling_Statistics) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CgroupInfo_Blkio_Throttling_Statistics) + if !ok { + that2, ok := that.(CgroupInfo_Blkio_Throttling_Statistics) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Device.Equal(that1.Device) { + return false + } + if len(this.IOServiced) != len(that1.IOServiced) { + return false + } + for i := range this.IOServiced { + if !this.IOServiced[i].Equal(&that1.IOServiced[i]) { + return false + } + } + if len(this.IOServiceBytes) != len(that1.IOServiceBytes) { + return false + } + for i := range this.IOServiceBytes { + if !this.IOServiceBytes[i].Equal(&that1.IOServiceBytes[i]) { + return false + } + } + return true +} +func (this *CgroupInfo_Blkio_Statistics) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CgroupInfo_Blkio_Statistics) + if !ok { + that2, ok := that.(CgroupInfo_Blkio_Statistics) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CgroupInfo_Blkio_Statistics") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CgroupInfo_Blkio_Statistics but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CgroupInfo_Blkio_Statistics but is not nil && this == nil") + } + if len(this.CFQ) != len(that1.CFQ) { + return fmt.Errorf("CFQ this(%v) Not Equal that(%v)", len(this.CFQ), len(that1.CFQ)) + } + for i := range this.CFQ { + if !this.CFQ[i].Equal(&that1.CFQ[i]) { + return fmt.Errorf("CFQ this[%v](%v) Not Equal that[%v](%v)", i, this.CFQ[i], i, that1.CFQ[i]) + } + } + if len(this.CFQRecursive) != len(that1.CFQRecursive) { + return fmt.Errorf("CFQRecursive this(%v) Not Equal that(%v)", len(this.CFQRecursive), len(that1.CFQRecursive)) + } + for i := range this.CFQRecursive { + if !this.CFQRecursive[i].Equal(&that1.CFQRecursive[i]) { + return fmt.Errorf("CFQRecursive this[%v](%v) Not Equal that[%v](%v)", i, this.CFQRecursive[i], i, that1.CFQRecursive[i]) + } + } + if len(this.Throttling) != len(that1.Throttling) { + return fmt.Errorf("Throttling this(%v) Not Equal that(%v)", len(this.Throttling), len(that1.Throttling)) + } + for i := range this.Throttling { + if !this.Throttling[i].Equal(that1.Throttling[i]) { + return fmt.Errorf("Throttling this[%v](%v) Not Equal that[%v](%v)", i, this.Throttling[i], i, that1.Throttling[i]) + } + } + return nil +} +func (this *CgroupInfo_Blkio_Statistics) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CgroupInfo_Blkio_Statistics) + if !ok { + that2, ok := that.(CgroupInfo_Blkio_Statistics) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.CFQ) != len(that1.CFQ) { + return false + } + for i := range this.CFQ { + if !this.CFQ[i].Equal(&that1.CFQ[i]) { + return false + } + } + if len(this.CFQRecursive) != len(that1.CFQRecursive) { + return false + } + for i := range this.CFQRecursive { + if !this.CFQRecursive[i].Equal(&that1.CFQRecursive[i]) { + return false + } + } + if len(this.Throttling) != len(that1.Throttling) { + return false + } + for i := range this.Throttling { + if !this.Throttling[i].Equal(that1.Throttling[i]) { + return false + } + } + return true +} +func (this *CgroupInfo_NetCls) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*CgroupInfo_NetCls) + if !ok { + that2, ok := that.(CgroupInfo_NetCls) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *CgroupInfo_NetCls") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *CgroupInfo_NetCls but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *CgroupInfo_NetCls but is not nil && this == nil") + } + if this.ClassID != nil && that1.ClassID != nil { + if *this.ClassID != *that1.ClassID { + return fmt.Errorf("ClassID this(%v) Not Equal that(%v)", *this.ClassID, *that1.ClassID) + } + } else if this.ClassID != nil { + return fmt.Errorf("this.ClassID == nil && that.ClassID != nil") + } else if that1.ClassID != nil { + return fmt.Errorf("ClassID this(%v) Not Equal that(%v)", this.ClassID, that1.ClassID) + } + return nil +} +func (this *CgroupInfo_NetCls) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*CgroupInfo_NetCls) + if !ok { + that2, ok := that.(CgroupInfo_NetCls) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.ClassID != nil && that1.ClassID != nil { + if *this.ClassID != *that1.ClassID { + return false + } + } else if this.ClassID != nil { + return false + } else if that1.ClassID != nil { + return false + } + return true +} +func (this *Labels) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Labels) + if !ok { + that2, ok := that.(Labels) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Labels") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Labels but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Labels but is not nil && this == nil") + } + if len(this.Labels) != len(that1.Labels) { + return fmt.Errorf("Labels this(%v) Not Equal that(%v)", len(this.Labels), len(that1.Labels)) + } + for i := range this.Labels { + if !this.Labels[i].Equal(&that1.Labels[i]) { + return fmt.Errorf("Labels this[%v](%v) Not Equal that[%v](%v)", i, this.Labels[i], i, that1.Labels[i]) + } + } + return nil +} +func (this *Labels) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Labels) + if !ok { + that2, ok := that.(Labels) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(&that1.Labels[i]) { + return false + } + } + return true +} +func (this *Label) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Label) + if !ok { + that2, ok := that.(Label) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Label") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Label but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Label but is not nil && this == nil") + } + if this.Key != that1.Key { + return fmt.Errorf("Key this(%v) Not Equal that(%v)", this.Key, that1.Key) + } + if this.Value != nil && that1.Value != nil { + if *this.Value != *that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", *this.Value, *that1.Value) + } + } else if this.Value != nil { + return fmt.Errorf("this.Value == nil && that.Value != nil") + } else if that1.Value != nil { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *Label) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Label) + if !ok { + that2, ok := that.(Label) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Key != that1.Key { + return false + } + if this.Value != nil && that1.Value != nil { + if *this.Value != *that1.Value { + return false + } + } else if this.Value != nil { + return false + } else if that1.Value != nil { + return false + } + return true +} +func (this *Port) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Port) + if !ok { + that2, ok := that.(Port) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Port") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Port but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Port but is not nil && this == nil") + } + if this.Number != that1.Number { + return fmt.Errorf("Number this(%v) Not Equal that(%v)", this.Number, that1.Number) + } + if this.Name != nil && that1.Name != nil { + if *this.Name != *that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", *this.Name, *that1.Name) + } + } else if this.Name != nil { + return fmt.Errorf("this.Name == nil && that.Name != nil") + } else if that1.Name != nil { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if this.Protocol != nil && that1.Protocol != nil { + if *this.Protocol != *that1.Protocol { + return fmt.Errorf("Protocol this(%v) Not Equal that(%v)", *this.Protocol, *that1.Protocol) + } + } else if this.Protocol != nil { + return fmt.Errorf("this.Protocol == nil && that.Protocol != nil") + } else if that1.Protocol != nil { + return fmt.Errorf("Protocol this(%v) Not Equal that(%v)", this.Protocol, that1.Protocol) + } + if this.Visibility != nil && that1.Visibility != nil { + if *this.Visibility != *that1.Visibility { + return fmt.Errorf("Visibility this(%v) Not Equal that(%v)", *this.Visibility, *that1.Visibility) + } + } else if this.Visibility != nil { + return fmt.Errorf("this.Visibility == nil && that.Visibility != nil") + } else if that1.Visibility != nil { + return fmt.Errorf("Visibility this(%v) Not Equal that(%v)", this.Visibility, that1.Visibility) + } + if !this.Labels.Equal(that1.Labels) { + return fmt.Errorf("Labels this(%v) Not Equal that(%v)", this.Labels, that1.Labels) + } + return nil +} +func (this *Port) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Port) + if !ok { + that2, ok := that.(Port) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Number != that1.Number { + return false + } + if this.Name != nil && that1.Name != nil { + if *this.Name != *that1.Name { + return false + } + } else if this.Name != nil { + return false + } else if that1.Name != nil { + return false + } + if this.Protocol != nil && that1.Protocol != nil { + if *this.Protocol != *that1.Protocol { + return false + } + } else if this.Protocol != nil { + return false + } else if that1.Protocol != nil { + return false + } + if this.Visibility != nil && that1.Visibility != nil { + if *this.Visibility != *that1.Visibility { + return false + } + } else if this.Visibility != nil { + return false + } else if that1.Visibility != nil { + return false + } + if !this.Labels.Equal(that1.Labels) { + return false + } + return true +} +func (this *Ports) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Ports) + if !ok { + that2, ok := that.(Ports) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Ports") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Ports but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Ports but is not nil && this == nil") + } + if len(this.Ports) != len(that1.Ports) { + return fmt.Errorf("Ports this(%v) Not Equal that(%v)", len(this.Ports), len(that1.Ports)) + } + for i := range this.Ports { + if !this.Ports[i].Equal(&that1.Ports[i]) { + return fmt.Errorf("Ports this[%v](%v) Not Equal that[%v](%v)", i, this.Ports[i], i, that1.Ports[i]) + } + } + return nil +} +func (this *Ports) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Ports) + if !ok { + that2, ok := that.(Ports) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Ports) != len(that1.Ports) { + return false + } + for i := range this.Ports { + if !this.Ports[i].Equal(&that1.Ports[i]) { + return false + } + } + return true +} +func (this *DiscoveryInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*DiscoveryInfo) + if !ok { + that2, ok := that.(DiscoveryInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *DiscoveryInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *DiscoveryInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *DiscoveryInfo but is not nil && this == nil") + } + if this.Visibility != that1.Visibility { + return fmt.Errorf("Visibility this(%v) Not Equal that(%v)", this.Visibility, that1.Visibility) + } + if this.Name != nil && that1.Name != nil { + if *this.Name != *that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", *this.Name, *that1.Name) + } + } else if this.Name != nil { + return fmt.Errorf("this.Name == nil && that.Name != nil") + } else if that1.Name != nil { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if this.Environment != nil && that1.Environment != nil { + if *this.Environment != *that1.Environment { + return fmt.Errorf("Environment this(%v) Not Equal that(%v)", *this.Environment, *that1.Environment) + } + } else if this.Environment != nil { + return fmt.Errorf("this.Environment == nil && that.Environment != nil") + } else if that1.Environment != nil { + return fmt.Errorf("Environment this(%v) Not Equal that(%v)", this.Environment, that1.Environment) + } + if this.Location != nil && that1.Location != nil { + if *this.Location != *that1.Location { + return fmt.Errorf("Location this(%v) Not Equal that(%v)", *this.Location, *that1.Location) + } + } else if this.Location != nil { + return fmt.Errorf("this.Location == nil && that.Location != nil") + } else if that1.Location != nil { + return fmt.Errorf("Location this(%v) Not Equal that(%v)", this.Location, that1.Location) + } + if this.Version != nil && that1.Version != nil { + if *this.Version != *that1.Version { + return fmt.Errorf("Version this(%v) Not Equal that(%v)", *this.Version, *that1.Version) + } + } else if this.Version != nil { + return fmt.Errorf("this.Version == nil && that.Version != nil") + } else if that1.Version != nil { + return fmt.Errorf("Version this(%v) Not Equal that(%v)", this.Version, that1.Version) + } + if !this.Ports.Equal(that1.Ports) { + return fmt.Errorf("Ports this(%v) Not Equal that(%v)", this.Ports, that1.Ports) + } + if !this.Labels.Equal(that1.Labels) { + return fmt.Errorf("Labels this(%v) Not Equal that(%v)", this.Labels, that1.Labels) + } + return nil +} +func (this *DiscoveryInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*DiscoveryInfo) + if !ok { + that2, ok := that.(DiscoveryInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Visibility != that1.Visibility { + return false + } + if this.Name != nil && that1.Name != nil { + if *this.Name != *that1.Name { + return false + } + } else if this.Name != nil { + return false + } else if that1.Name != nil { + return false + } + if this.Environment != nil && that1.Environment != nil { + if *this.Environment != *that1.Environment { + return false + } + } else if this.Environment != nil { + return false + } else if that1.Environment != nil { + return false + } + if this.Location != nil && that1.Location != nil { + if *this.Location != *that1.Location { + return false + } + } else if this.Location != nil { + return false + } else if that1.Location != nil { + return false + } + if this.Version != nil && that1.Version != nil { + if *this.Version != *that1.Version { + return false + } + } else if this.Version != nil { + return false + } else if that1.Version != nil { + return false + } + if !this.Ports.Equal(that1.Ports) { + return false + } + if !this.Labels.Equal(that1.Labels) { + return false + } + return true +} +func (this *WeightInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*WeightInfo) + if !ok { + that2, ok := that.(WeightInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *WeightInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *WeightInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *WeightInfo but is not nil && this == nil") + } + if this.Weight != that1.Weight { + return fmt.Errorf("Weight this(%v) Not Equal that(%v)", this.Weight, that1.Weight) + } + if this.Role != nil && that1.Role != nil { + if *this.Role != *that1.Role { + return fmt.Errorf("Role this(%v) Not Equal that(%v)", *this.Role, *that1.Role) + } + } else if this.Role != nil { + return fmt.Errorf("this.Role == nil && that.Role != nil") + } else if that1.Role != nil { + return fmt.Errorf("Role this(%v) Not Equal that(%v)", this.Role, that1.Role) + } + return nil +} +func (this *WeightInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*WeightInfo) + if !ok { + that2, ok := that.(WeightInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Weight != that1.Weight { + return false + } + if this.Role != nil && that1.Role != nil { + if *this.Role != *that1.Role { + return false + } + } else if this.Role != nil { + return false + } else if that1.Role != nil { + return false + } + return true +} +func (this *VersionInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*VersionInfo) + if !ok { + that2, ok := that.(VersionInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *VersionInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *VersionInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *VersionInfo but is not nil && this == nil") + } + if this.Version != that1.Version { + return fmt.Errorf("Version this(%v) Not Equal that(%v)", this.Version, that1.Version) + } + if this.BuildDate != nil && that1.BuildDate != nil { + if *this.BuildDate != *that1.BuildDate { + return fmt.Errorf("BuildDate this(%v) Not Equal that(%v)", *this.BuildDate, *that1.BuildDate) + } + } else if this.BuildDate != nil { + return fmt.Errorf("this.BuildDate == nil && that.BuildDate != nil") + } else if that1.BuildDate != nil { + return fmt.Errorf("BuildDate this(%v) Not Equal that(%v)", this.BuildDate, that1.BuildDate) + } + if this.BuildTime != nil && that1.BuildTime != nil { + if *this.BuildTime != *that1.BuildTime { + return fmt.Errorf("BuildTime this(%v) Not Equal that(%v)", *this.BuildTime, *that1.BuildTime) + } + } else if this.BuildTime != nil { + return fmt.Errorf("this.BuildTime == nil && that.BuildTime != nil") + } else if that1.BuildTime != nil { + return fmt.Errorf("BuildTime this(%v) Not Equal that(%v)", this.BuildTime, that1.BuildTime) + } + if this.BuildUser != nil && that1.BuildUser != nil { + if *this.BuildUser != *that1.BuildUser { + return fmt.Errorf("BuildUser this(%v) Not Equal that(%v)", *this.BuildUser, *that1.BuildUser) + } + } else if this.BuildUser != nil { + return fmt.Errorf("this.BuildUser == nil && that.BuildUser != nil") + } else if that1.BuildUser != nil { + return fmt.Errorf("BuildUser this(%v) Not Equal that(%v)", this.BuildUser, that1.BuildUser) + } + if this.GitSHA != nil && that1.GitSHA != nil { + if *this.GitSHA != *that1.GitSHA { + return fmt.Errorf("GitSHA this(%v) Not Equal that(%v)", *this.GitSHA, *that1.GitSHA) + } + } else if this.GitSHA != nil { + return fmt.Errorf("this.GitSHA == nil && that.GitSHA != nil") + } else if that1.GitSHA != nil { + return fmt.Errorf("GitSHA this(%v) Not Equal that(%v)", this.GitSHA, that1.GitSHA) + } + if this.GitBranch != nil && that1.GitBranch != nil { + if *this.GitBranch != *that1.GitBranch { + return fmt.Errorf("GitBranch this(%v) Not Equal that(%v)", *this.GitBranch, *that1.GitBranch) + } + } else if this.GitBranch != nil { + return fmt.Errorf("this.GitBranch == nil && that.GitBranch != nil") + } else if that1.GitBranch != nil { + return fmt.Errorf("GitBranch this(%v) Not Equal that(%v)", this.GitBranch, that1.GitBranch) + } + if this.GitTag != nil && that1.GitTag != nil { + if *this.GitTag != *that1.GitTag { + return fmt.Errorf("GitTag this(%v) Not Equal that(%v)", *this.GitTag, *that1.GitTag) + } + } else if this.GitTag != nil { + return fmt.Errorf("this.GitTag == nil && that.GitTag != nil") + } else if that1.GitTag != nil { + return fmt.Errorf("GitTag this(%v) Not Equal that(%v)", this.GitTag, that1.GitTag) + } + return nil +} +func (this *VersionInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*VersionInfo) + if !ok { + that2, ok := that.(VersionInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Version != that1.Version { + return false + } + if this.BuildDate != nil && that1.BuildDate != nil { + if *this.BuildDate != *that1.BuildDate { + return false + } + } else if this.BuildDate != nil { + return false + } else if that1.BuildDate != nil { + return false + } + if this.BuildTime != nil && that1.BuildTime != nil { + if *this.BuildTime != *that1.BuildTime { + return false + } + } else if this.BuildTime != nil { + return false + } else if that1.BuildTime != nil { + return false + } + if this.BuildUser != nil && that1.BuildUser != nil { + if *this.BuildUser != *that1.BuildUser { + return false + } + } else if this.BuildUser != nil { + return false + } else if that1.BuildUser != nil { + return false + } + if this.GitSHA != nil && that1.GitSHA != nil { + if *this.GitSHA != *that1.GitSHA { + return false + } + } else if this.GitSHA != nil { + return false + } else if that1.GitSHA != nil { + return false + } + if this.GitBranch != nil && that1.GitBranch != nil { + if *this.GitBranch != *that1.GitBranch { + return false + } + } else if this.GitBranch != nil { + return false + } else if that1.GitBranch != nil { + return false + } + if this.GitTag != nil && that1.GitTag != nil { + if *this.GitTag != *that1.GitTag { + return false + } + } else if this.GitTag != nil { + return false + } else if that1.GitTag != nil { + return false + } + return true +} +func (this *Flag) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Flag) + if !ok { + that2, ok := that.(Flag) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Flag") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Flag but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Flag but is not nil && this == nil") + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if this.Value != nil && that1.Value != nil { + if *this.Value != *that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", *this.Value, *that1.Value) + } + } else if this.Value != nil { + return fmt.Errorf("this.Value == nil && that.Value != nil") + } else if that1.Value != nil { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *Flag) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Flag) + if !ok { + that2, ok := that.(Flag) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Value != nil && that1.Value != nil { + if *this.Value != *that1.Value { + return false + } + } else if this.Value != nil { + return false + } else if that1.Value != nil { + return false + } + return true +} +func (this *Role) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Role) + if !ok { + that2, ok := that.(Role) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Role") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Role but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Role but is not nil && this == nil") + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if this.Weight != that1.Weight { + return fmt.Errorf("Weight this(%v) Not Equal that(%v)", this.Weight, that1.Weight) + } + if len(this.Frameworks) != len(that1.Frameworks) { + return fmt.Errorf("Frameworks this(%v) Not Equal that(%v)", len(this.Frameworks), len(that1.Frameworks)) + } + for i := range this.Frameworks { + if !this.Frameworks[i].Equal(&that1.Frameworks[i]) { + return fmt.Errorf("Frameworks this[%v](%v) Not Equal that[%v](%v)", i, this.Frameworks[i], i, that1.Frameworks[i]) + } + } + if len(this.Resources) != len(that1.Resources) { + return fmt.Errorf("Resources this(%v) Not Equal that(%v)", len(this.Resources), len(that1.Resources)) + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return fmt.Errorf("Resources this[%v](%v) Not Equal that[%v](%v)", i, this.Resources[i], i, that1.Resources[i]) + } + } + return nil +} +func (this *Role) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Role) + if !ok { + that2, ok := that.(Role) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Weight != that1.Weight { + return false + } + if len(this.Frameworks) != len(that1.Frameworks) { + return false + } + for i := range this.Frameworks { + if !this.Frameworks[i].Equal(&that1.Frameworks[i]) { + return false + } + } + if len(this.Resources) != len(that1.Resources) { + return false + } + for i := range this.Resources { + if !this.Resources[i].Equal(&that1.Resources[i]) { + return false + } + } + return true +} +func (this *Metric) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Metric) + if !ok { + that2, ok := that.(Metric) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Metric") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Metric but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Metric but is not nil && this == nil") + } + if this.Name != that1.Name { + return fmt.Errorf("Name this(%v) Not Equal that(%v)", this.Name, that1.Name) + } + if this.Value != nil && that1.Value != nil { + if *this.Value != *that1.Value { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", *this.Value, *that1.Value) + } + } else if this.Value != nil { + return fmt.Errorf("this.Value == nil && that.Value != nil") + } else if that1.Value != nil { + return fmt.Errorf("Value this(%v) Not Equal that(%v)", this.Value, that1.Value) + } + return nil +} +func (this *Metric) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Metric) + if !ok { + that2, ok := that.(Metric) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Value != nil && that1.Value != nil { + if *this.Value != *that1.Value { + return false + } + } else if this.Value != nil { + return false + } else if that1.Value != nil { + return false + } + return true +} +func (this *FileInfo) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*FileInfo) + if !ok { + that2, ok := that.(FileInfo) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *FileInfo") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *FileInfo but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *FileInfo but is not nil && this == nil") + } + if this.Path != that1.Path { + return fmt.Errorf("Path this(%v) Not Equal that(%v)", this.Path, that1.Path) + } + if this.Nlink != nil && that1.Nlink != nil { + if *this.Nlink != *that1.Nlink { + return fmt.Errorf("Nlink this(%v) Not Equal that(%v)", *this.Nlink, *that1.Nlink) + } + } else if this.Nlink != nil { + return fmt.Errorf("this.Nlink == nil && that.Nlink != nil") + } else if that1.Nlink != nil { + return fmt.Errorf("Nlink this(%v) Not Equal that(%v)", this.Nlink, that1.Nlink) + } + if this.Size != nil && that1.Size != nil { + if *this.Size != *that1.Size { + return fmt.Errorf("Size this(%v) Not Equal that(%v)", *this.Size, *that1.Size) + } + } else if this.Size != nil { + return fmt.Errorf("this.Size == nil && that.Size != nil") + } else if that1.Size != nil { + return fmt.Errorf("Size this(%v) Not Equal that(%v)", this.Size, that1.Size) + } + if !this.Mtime.Equal(that1.Mtime) { + return fmt.Errorf("Mtime this(%v) Not Equal that(%v)", this.Mtime, that1.Mtime) + } + if this.Mode != nil && that1.Mode != nil { + if *this.Mode != *that1.Mode { + return fmt.Errorf("Mode this(%v) Not Equal that(%v)", *this.Mode, *that1.Mode) + } + } else if this.Mode != nil { + return fmt.Errorf("this.Mode == nil && that.Mode != nil") + } else if that1.Mode != nil { + return fmt.Errorf("Mode this(%v) Not Equal that(%v)", this.Mode, that1.Mode) + } + if this.UID != nil && that1.UID != nil { + if *this.UID != *that1.UID { + return fmt.Errorf("UID this(%v) Not Equal that(%v)", *this.UID, *that1.UID) + } + } else if this.UID != nil { + return fmt.Errorf("this.UID == nil && that.UID != nil") + } else if that1.UID != nil { + return fmt.Errorf("UID this(%v) Not Equal that(%v)", this.UID, that1.UID) + } + if this.GID != nil && that1.GID != nil { + if *this.GID != *that1.GID { + return fmt.Errorf("GID this(%v) Not Equal that(%v)", *this.GID, *that1.GID) + } + } else if this.GID != nil { + return fmt.Errorf("this.GID == nil && that.GID != nil") + } else if that1.GID != nil { + return fmt.Errorf("GID this(%v) Not Equal that(%v)", this.GID, that1.GID) + } + return nil +} +func (this *FileInfo) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*FileInfo) + if !ok { + that2, ok := that.(FileInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Path != that1.Path { + return false + } + if this.Nlink != nil && that1.Nlink != nil { + if *this.Nlink != *that1.Nlink { + return false + } + } else if this.Nlink != nil { + return false + } else if that1.Nlink != nil { + return false + } + if this.Size != nil && that1.Size != nil { + if *this.Size != *that1.Size { + return false + } + } else if this.Size != nil { + return false + } else if that1.Size != nil { + return false + } + if !this.Mtime.Equal(that1.Mtime) { + return false + } + if this.Mode != nil && that1.Mode != nil { + if *this.Mode != *that1.Mode { + return false + } + } else if this.Mode != nil { + return false + } else if that1.Mode != nil { + return false + } + if this.UID != nil && that1.UID != nil { + if *this.UID != *that1.UID { + return false + } + } else if this.UID != nil { + return false + } else if that1.UID != nil { + return false + } + if this.GID != nil && that1.GID != nil { + if *this.GID != *that1.GID { + return false + } + } else if this.GID != nil { + return false + } else if that1.GID != nil { + return false + } + return true +} +func (this *Device) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Device) + if !ok { + that2, ok := that.(Device) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Device") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Device but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Device but is not nil && this == nil") + } + if this.Path != nil && that1.Path != nil { + if *this.Path != *that1.Path { + return fmt.Errorf("Path this(%v) Not Equal that(%v)", *this.Path, *that1.Path) + } + } else if this.Path != nil { + return fmt.Errorf("this.Path == nil && that.Path != nil") + } else if that1.Path != nil { + return fmt.Errorf("Path this(%v) Not Equal that(%v)", this.Path, that1.Path) + } + if !this.Number.Equal(that1.Number) { + return fmt.Errorf("Number this(%v) Not Equal that(%v)", this.Number, that1.Number) + } + return nil +} +func (this *Device) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Device) + if !ok { + that2, ok := that.(Device) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Path != nil && that1.Path != nil { + if *this.Path != *that1.Path { + return false + } + } else if this.Path != nil { + return false + } else if that1.Path != nil { + return false + } + if !this.Number.Equal(that1.Number) { + return false + } + return true +} +func (this *Device_Number) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*Device_Number) + if !ok { + that2, ok := that.(Device_Number) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *Device_Number") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *Device_Number but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *Device_Number but is not nil && this == nil") + } + if this.MajorNumber != nil && that1.MajorNumber != nil { + if *this.MajorNumber != *that1.MajorNumber { + return fmt.Errorf("MajorNumber this(%v) Not Equal that(%v)", *this.MajorNumber, *that1.MajorNumber) + } + } else if this.MajorNumber != nil { + return fmt.Errorf("this.MajorNumber == nil && that.MajorNumber != nil") + } else if that1.MajorNumber != nil { + return fmt.Errorf("MajorNumber this(%v) Not Equal that(%v)", this.MajorNumber, that1.MajorNumber) + } + if this.MinorNumber != nil && that1.MinorNumber != nil { + if *this.MinorNumber != *that1.MinorNumber { + return fmt.Errorf("MinorNumber this(%v) Not Equal that(%v)", *this.MinorNumber, *that1.MinorNumber) + } + } else if this.MinorNumber != nil { + return fmt.Errorf("this.MinorNumber == nil && that.MinorNumber != nil") + } else if that1.MinorNumber != nil { + return fmt.Errorf("MinorNumber this(%v) Not Equal that(%v)", this.MinorNumber, that1.MinorNumber) + } + return nil +} +func (this *Device_Number) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Device_Number) + if !ok { + that2, ok := that.(Device_Number) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.MajorNumber != nil && that1.MajorNumber != nil { + if *this.MajorNumber != *that1.MajorNumber { + return false + } + } else if this.MajorNumber != nil { + return false + } else if that1.MajorNumber != nil { + return false + } + if this.MinorNumber != nil && that1.MinorNumber != nil { + if *this.MinorNumber != *that1.MinorNumber { + return false + } + } else if this.MinorNumber != nil { + return false + } else if that1.MinorNumber != nil { + return false + } + return true +} +func (this *DeviceAccess) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*DeviceAccess) + if !ok { + that2, ok := that.(DeviceAccess) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *DeviceAccess") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *DeviceAccess but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *DeviceAccess but is not nil && this == nil") + } + if !this.Device.Equal(&that1.Device) { + return fmt.Errorf("Device this(%v) Not Equal that(%v)", this.Device, that1.Device) + } + if !this.Access.Equal(&that1.Access) { + return fmt.Errorf("Access this(%v) Not Equal that(%v)", this.Access, that1.Access) + } + return nil +} +func (this *DeviceAccess) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*DeviceAccess) + if !ok { + that2, ok := that.(DeviceAccess) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.Device.Equal(&that1.Device) { + return false + } + if !this.Access.Equal(&that1.Access) { + return false + } + return true +} +func (this *DeviceAccess_Access) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*DeviceAccess_Access) + if !ok { + that2, ok := that.(DeviceAccess_Access) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *DeviceAccess_Access") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *DeviceAccess_Access but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *DeviceAccess_Access but is not nil && this == nil") + } + if this.Read != nil && that1.Read != nil { + if *this.Read != *that1.Read { + return fmt.Errorf("Read this(%v) Not Equal that(%v)", *this.Read, *that1.Read) + } + } else if this.Read != nil { + return fmt.Errorf("this.Read == nil && that.Read != nil") + } else if that1.Read != nil { + return fmt.Errorf("Read this(%v) Not Equal that(%v)", this.Read, that1.Read) + } + if this.Write != nil && that1.Write != nil { + if *this.Write != *that1.Write { + return fmt.Errorf("Write this(%v) Not Equal that(%v)", *this.Write, *that1.Write) + } + } else if this.Write != nil { + return fmt.Errorf("this.Write == nil && that.Write != nil") + } else if that1.Write != nil { + return fmt.Errorf("Write this(%v) Not Equal that(%v)", this.Write, that1.Write) + } + if this.Mknod != nil && that1.Mknod != nil { + if *this.Mknod != *that1.Mknod { + return fmt.Errorf("Mknod this(%v) Not Equal that(%v)", *this.Mknod, *that1.Mknod) + } + } else if this.Mknod != nil { + return fmt.Errorf("this.Mknod == nil && that.Mknod != nil") + } else if that1.Mknod != nil { + return fmt.Errorf("Mknod this(%v) Not Equal that(%v)", this.Mknod, that1.Mknod) + } + return nil +} +func (this *DeviceAccess_Access) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*DeviceAccess_Access) + if !ok { + that2, ok := that.(DeviceAccess_Access) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Read != nil && that1.Read != nil { + if *this.Read != *that1.Read { + return false + } + } else if this.Read != nil { + return false + } else if that1.Read != nil { + return false + } + if this.Write != nil && that1.Write != nil { + if *this.Write != *that1.Write { + return false + } + } else if this.Write != nil { + return false + } else if that1.Write != nil { + return false + } + if this.Mknod != nil && that1.Mknod != nil { + if *this.Mknod != *that1.Mknod { + return false + } + } else if this.Mknod != nil { + return false + } else if that1.Mknod != nil { + return false + } + return true +} +func (this *DeviceWhitelist) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*DeviceWhitelist) + if !ok { + that2, ok := that.(DeviceWhitelist) + if ok { + that1 = &that2 + } else { + return fmt.Errorf("that is not of type *DeviceWhitelist") + } + } + if that1 == nil { + if this == nil { + return nil + } + return fmt.Errorf("that is type *DeviceWhitelist but is nil && this != nil") + } else if this == nil { + return fmt.Errorf("that is type *DeviceWhitelist but is not nil && this == nil") + } + if len(this.AllowedDevices) != len(that1.AllowedDevices) { + return fmt.Errorf("AllowedDevices this(%v) Not Equal that(%v)", len(this.AllowedDevices), len(that1.AllowedDevices)) + } + for i := range this.AllowedDevices { + if !this.AllowedDevices[i].Equal(&that1.AllowedDevices[i]) { + return fmt.Errorf("AllowedDevices this[%v](%v) Not Equal that[%v](%v)", i, this.AllowedDevices[i], i, that1.AllowedDevices[i]) + } + } + return nil +} +func (this *DeviceWhitelist) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*DeviceWhitelist) + if !ok { + that2, ok := that.(DeviceWhitelist) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.AllowedDevices) != len(that1.AllowedDevices) { + return false + } + for i := range this.AllowedDevices { + if !this.AllowedDevices[i].Equal(&that1.AllowedDevices[i]) { + return false + } + } + return true +} +func (this *FrameworkID) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.FrameworkID{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OfferID) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.OfferID{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *AgentID) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.AgentID{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskID) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.TaskID{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExecutorID) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.ExecutorID{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ContainerID) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.ContainerID{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.Parent != nil { + s = append(s, "Parent: "+fmt.Sprintf("%#v", this.Parent)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceProviderID) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.ResourceProviderID{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OperationID) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.OperationID{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TimeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.TimeInfo{") + s = append(s, "Nanoseconds: "+fmt.Sprintf("%#v", this.Nanoseconds)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DurationInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.DurationInfo{") + s = append(s, "Nanoseconds: "+fmt.Sprintf("%#v", this.Nanoseconds)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Address) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.Address{") + if this.Hostname != nil { + s = append(s, "Hostname: "+valueToGoStringMesos(this.Hostname, "string")+",\n") + } + if this.IP != nil { + s = append(s, "IP: "+valueToGoStringMesos(this.IP, "string")+",\n") + } + s = append(s, "Port: "+fmt.Sprintf("%#v", this.Port)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *URL) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&mesos.URL{") + s = append(s, "Scheme: "+fmt.Sprintf("%#v", this.Scheme)+",\n") + s = append(s, "Address: "+strings.Replace(this.Address.GoString(), `&`, ``, 1)+",\n") + if this.Path != nil { + s = append(s, "Path: "+valueToGoStringMesos(this.Path, "string")+",\n") + } + if this.Query != nil { + s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n") + } + if this.Fragment != nil { + s = append(s, "Fragment: "+valueToGoStringMesos(this.Fragment, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Unavailability) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Unavailability{") + s = append(s, "Start: "+strings.Replace(this.Start.GoString(), `&`, ``, 1)+",\n") + if this.Duration != nil { + s = append(s, "Duration: "+fmt.Sprintf("%#v", this.Duration)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MachineID) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.MachineID{") + if this.Hostname != nil { + s = append(s, "Hostname: "+valueToGoStringMesos(this.Hostname, "string")+",\n") + } + if this.IP != nil { + s = append(s, "IP: "+valueToGoStringMesos(this.IP, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MachineInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.MachineInfo{") + s = append(s, "ID: "+strings.Replace(this.ID.GoString(), `&`, ``, 1)+",\n") + if this.Mode != nil { + s = append(s, "Mode: "+valueToGoStringMesos(this.Mode, "MachineInfo_Mode")+",\n") + } + if this.Unavailability != nil { + s = append(s, "Unavailability: "+fmt.Sprintf("%#v", this.Unavailability)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FrameworkInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&mesos.FrameworkInfo{") + s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.ID != nil { + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + } + if this.FailoverTimeout != nil { + s = append(s, "FailoverTimeout: "+valueToGoStringMesos(this.FailoverTimeout, "float64")+",\n") + } + if this.Checkpoint != nil { + s = append(s, "Checkpoint: "+valueToGoStringMesos(this.Checkpoint, "bool")+",\n") + } + if this.Role != nil { + s = append(s, "Role: "+valueToGoStringMesos(this.Role, "string")+",\n") + } + if this.Roles != nil { + s = append(s, "Roles: "+fmt.Sprintf("%#v", this.Roles)+",\n") + } + if this.Hostname != nil { + s = append(s, "Hostname: "+valueToGoStringMesos(this.Hostname, "string")+",\n") + } + if this.Principal != nil { + s = append(s, "Principal: "+valueToGoStringMesos(this.Principal, "string")+",\n") + } + if this.WebUiURL != nil { + s = append(s, "WebUiURL: "+valueToGoStringMesos(this.WebUiURL, "string")+",\n") + } + if this.Capabilities != nil { + s = append(s, "Capabilities: "+fmt.Sprintf("%#v", this.Capabilities)+",\n") + } + if this.Labels != nil { + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FrameworkInfo_Capability) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.FrameworkInfo_Capability{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CheckInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&mesos.CheckInfo{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.Command != nil { + s = append(s, "Command: "+fmt.Sprintf("%#v", this.Command)+",\n") + } + if this.HTTP != nil { + s = append(s, "HTTP: "+fmt.Sprintf("%#v", this.HTTP)+",\n") + } + if this.TCP != nil { + s = append(s, "TCP: "+fmt.Sprintf("%#v", this.TCP)+",\n") + } + if this.DelaySeconds != nil { + s = append(s, "DelaySeconds: "+valueToGoStringMesos(this.DelaySeconds, "float64")+",\n") + } + if this.IntervalSeconds != nil { + s = append(s, "IntervalSeconds: "+valueToGoStringMesos(this.IntervalSeconds, "float64")+",\n") + } + if this.TimeoutSeconds != nil { + s = append(s, "TimeoutSeconds: "+valueToGoStringMesos(this.TimeoutSeconds, "float64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CheckInfo_Command) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.CheckInfo_Command{") + s = append(s, "Command: "+strings.Replace(this.Command.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CheckInfo_Http) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.CheckInfo_Http{") + s = append(s, "Port: "+fmt.Sprintf("%#v", this.Port)+",\n") + if this.Path != nil { + s = append(s, "Path: "+valueToGoStringMesos(this.Path, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CheckInfo_Tcp) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.CheckInfo_Tcp{") + s = append(s, "Port: "+fmt.Sprintf("%#v", this.Port)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *HealthCheck) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 13) + s = append(s, "&mesos.HealthCheck{") + if this.DelaySeconds != nil { + s = append(s, "DelaySeconds: "+valueToGoStringMesos(this.DelaySeconds, "float64")+",\n") + } + if this.IntervalSeconds != nil { + s = append(s, "IntervalSeconds: "+valueToGoStringMesos(this.IntervalSeconds, "float64")+",\n") + } + if this.TimeoutSeconds != nil { + s = append(s, "TimeoutSeconds: "+valueToGoStringMesos(this.TimeoutSeconds, "float64")+",\n") + } + if this.ConsecutiveFailures != nil { + s = append(s, "ConsecutiveFailures: "+valueToGoStringMesos(this.ConsecutiveFailures, "uint32")+",\n") + } + if this.GracePeriodSeconds != nil { + s = append(s, "GracePeriodSeconds: "+valueToGoStringMesos(this.GracePeriodSeconds, "float64")+",\n") + } + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.Command != nil { + s = append(s, "Command: "+fmt.Sprintf("%#v", this.Command)+",\n") + } + if this.HTTP != nil { + s = append(s, "HTTP: "+fmt.Sprintf("%#v", this.HTTP)+",\n") + } + if this.TCP != nil { + s = append(s, "TCP: "+fmt.Sprintf("%#v", this.TCP)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *HealthCheck_HTTPCheckInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&mesos.HealthCheck_HTTPCheckInfo{") + if this.Protocol != nil { + s = append(s, "Protocol: "+valueToGoStringMesos(this.Protocol, "NetworkInfo_Protocol")+",\n") + } + if this.Scheme != nil { + s = append(s, "Scheme: "+valueToGoStringMesos(this.Scheme, "string")+",\n") + } + s = append(s, "Port: "+fmt.Sprintf("%#v", this.Port)+",\n") + if this.Path != nil { + s = append(s, "Path: "+valueToGoStringMesos(this.Path, "string")+",\n") + } + if this.Statuses != nil { + s = append(s, "Statuses: "+fmt.Sprintf("%#v", this.Statuses)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *HealthCheck_TCPCheckInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.HealthCheck_TCPCheckInfo{") + if this.Protocol != nil { + s = append(s, "Protocol: "+valueToGoStringMesos(this.Protocol, "NetworkInfo_Protocol")+",\n") + } + s = append(s, "Port: "+fmt.Sprintf("%#v", this.Port)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *KillPolicy) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.KillPolicy{") + if this.GracePeriod != nil { + s = append(s, "GracePeriod: "+fmt.Sprintf("%#v", this.GracePeriod)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CommandInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&mesos.CommandInfo{") + if this.URIs != nil { + s = append(s, "URIs: "+fmt.Sprintf("%#v", this.URIs)+",\n") + } + if this.Environment != nil { + s = append(s, "Environment: "+fmt.Sprintf("%#v", this.Environment)+",\n") + } + if this.Shell != nil { + s = append(s, "Shell: "+valueToGoStringMesos(this.Shell, "bool")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+valueToGoStringMesos(this.Value, "string")+",\n") + } + if this.Arguments != nil { + s = append(s, "Arguments: "+fmt.Sprintf("%#v", this.Arguments)+",\n") + } + if this.User != nil { + s = append(s, "User: "+valueToGoStringMesos(this.User, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CommandInfo_URI) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&mesos.CommandInfo_URI{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.Executable != nil { + s = append(s, "Executable: "+valueToGoStringMesos(this.Executable, "bool")+",\n") + } + if this.Extract != nil { + s = append(s, "Extract: "+valueToGoStringMesos(this.Extract, "bool")+",\n") + } + if this.Cache != nil { + s = append(s, "Cache: "+valueToGoStringMesos(this.Cache, "bool")+",\n") + } + if this.OutputFile != nil { + s = append(s, "OutputFile: "+valueToGoStringMesos(this.OutputFile, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExecutorInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&mesos.ExecutorInfo{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "ExecutorID: "+strings.Replace(this.ExecutorID.GoString(), `&`, ``, 1)+",\n") + if this.FrameworkID != nil { + s = append(s, "FrameworkID: "+fmt.Sprintf("%#v", this.FrameworkID)+",\n") + } + if this.Command != nil { + s = append(s, "Command: "+fmt.Sprintf("%#v", this.Command)+",\n") + } + if this.Container != nil { + s = append(s, "Container: "+fmt.Sprintf("%#v", this.Container)+",\n") + } + if this.Resources != nil { + s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n") + } + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringMesos(this.Name, "string")+",\n") + } + if this.Source != nil { + s = append(s, "Source: "+valueToGoStringMesos(this.Source, "string")+",\n") + } + if this.Data != nil { + s = append(s, "Data: "+valueToGoStringMesos(this.Data, "byte")+",\n") + } + if this.Discovery != nil { + s = append(s, "Discovery: "+fmt.Sprintf("%#v", this.Discovery)+",\n") + } + if this.ShutdownGracePeriod != nil { + s = append(s, "ShutdownGracePeriod: "+fmt.Sprintf("%#v", this.ShutdownGracePeriod)+",\n") + } + if this.Labels != nil { + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DomainInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.DomainInfo{") + if this.FaultDomain != nil { + s = append(s, "FaultDomain: "+fmt.Sprintf("%#v", this.FaultDomain)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DomainInfo_FaultDomain) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.DomainInfo_FaultDomain{") + s = append(s, "Region: "+strings.Replace(this.Region.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Zone: "+strings.Replace(this.Zone.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DomainInfo_FaultDomain_RegionInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.DomainInfo_FaultDomain_RegionInfo{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DomainInfo_FaultDomain_ZoneInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.DomainInfo_FaultDomain_ZoneInfo{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MasterInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 13) + s = append(s, "&mesos.MasterInfo{") + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + s = append(s, "IP: "+fmt.Sprintf("%#v", this.IP)+",\n") + if this.Port != nil { + s = append(s, "Port: "+valueToGoStringMesos(this.Port, "uint32")+",\n") + } + if this.PID != nil { + s = append(s, "PID: "+valueToGoStringMesos(this.PID, "string")+",\n") + } + if this.Hostname != nil { + s = append(s, "Hostname: "+valueToGoStringMesos(this.Hostname, "string")+",\n") + } + if this.Version != nil { + s = append(s, "Version: "+valueToGoStringMesos(this.Version, "string")+",\n") + } + if this.Address != nil { + s = append(s, "Address: "+fmt.Sprintf("%#v", this.Address)+",\n") + } + if this.Domain != nil { + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + } + if this.Capabilities != nil { + s = append(s, "Capabilities: "+fmt.Sprintf("%#v", this.Capabilities)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MasterInfo_Capability) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.MasterInfo_Capability{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *AgentInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&mesos.AgentInfo{") + s = append(s, "Hostname: "+fmt.Sprintf("%#v", this.Hostname)+",\n") + if this.Port != nil { + s = append(s, "Port: "+valueToGoStringMesos(this.Port, "int32")+",\n") + } + if this.Resources != nil { + s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n") + } + if this.Attributes != nil { + s = append(s, "Attributes: "+fmt.Sprintf("%#v", this.Attributes)+",\n") + } + if this.ID != nil { + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + } + if this.Domain != nil { + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *AgentInfo_Capability) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.AgentInfo_Capability{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CSIPluginContainerInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&mesos.CSIPluginContainerInfo{") + if this.Services != nil { + s = append(s, "Services: "+fmt.Sprintf("%#v", this.Services)+",\n") + } + if this.Command != nil { + s = append(s, "Command: "+fmt.Sprintf("%#v", this.Command)+",\n") + } + if this.Resources != nil { + s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n") + } + if this.Container != nil { + s = append(s, "Container: "+fmt.Sprintf("%#v", this.Container)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CSIPluginInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.CSIPluginInfo{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Containers != nil { + s = append(s, "Containers: "+fmt.Sprintf("%#v", this.Containers)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceProviderInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&mesos.ResourceProviderInfo{") + if this.ID != nil { + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + } + if this.Attributes != nil { + s = append(s, "Attributes: "+fmt.Sprintf("%#v", this.Attributes)+",\n") + } + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.DefaultReservations != nil { + s = append(s, "DefaultReservations: "+fmt.Sprintf("%#v", this.DefaultReservations)+",\n") + } + if this.Storage != nil { + s = append(s, "Storage: "+fmt.Sprintf("%#v", this.Storage)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceProviderInfo_Storage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.ResourceProviderInfo_Storage{") + s = append(s, "Plugin: "+strings.Replace(this.Plugin.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&mesos.Value{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.Scalar != nil { + s = append(s, "Scalar: "+fmt.Sprintf("%#v", this.Scalar)+",\n") + } + if this.Ranges != nil { + s = append(s, "Ranges: "+fmt.Sprintf("%#v", this.Ranges)+",\n") + } + if this.Set != nil { + s = append(s, "Set: "+fmt.Sprintf("%#v", this.Set)+",\n") + } + if this.Text != nil { + s = append(s, "Text: "+fmt.Sprintf("%#v", this.Text)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value_Scalar) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Value_Scalar{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value_Range) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Value_Range{") + s = append(s, "Begin: "+fmt.Sprintf("%#v", this.Begin)+",\n") + s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value_Ranges) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Value_Ranges{") + if this.Range != nil { + s = append(s, "Range: "+fmt.Sprintf("%#v", this.Range)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value_Set) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Value_Set{") + if this.Item != nil { + s = append(s, "Item: "+fmt.Sprintf("%#v", this.Item)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value_Text) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Value_Text{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Attribute) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&mesos.Attribute{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.Scalar != nil { + s = append(s, "Scalar: "+fmt.Sprintf("%#v", this.Scalar)+",\n") + } + if this.Ranges != nil { + s = append(s, "Ranges: "+fmt.Sprintf("%#v", this.Ranges)+",\n") + } + if this.Set != nil { + s = append(s, "Set: "+fmt.Sprintf("%#v", this.Set)+",\n") + } + if this.Text != nil { + s = append(s, "Text: "+fmt.Sprintf("%#v", this.Text)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Resource) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 17) + s = append(s, "&mesos.Resource{") + if this.ProviderID != nil { + s = append(s, "ProviderID: "+fmt.Sprintf("%#v", this.ProviderID)+",\n") + } + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringMesos(this.Type, "Value_Type")+",\n") + } + if this.Scalar != nil { + s = append(s, "Scalar: "+fmt.Sprintf("%#v", this.Scalar)+",\n") + } + if this.Ranges != nil { + s = append(s, "Ranges: "+fmt.Sprintf("%#v", this.Ranges)+",\n") + } + if this.Set != nil { + s = append(s, "Set: "+fmt.Sprintf("%#v", this.Set)+",\n") + } + if this.Role != nil { + s = append(s, "Role: "+valueToGoStringMesos(this.Role, "string")+",\n") + } + if this.AllocationInfo != nil { + s = append(s, "AllocationInfo: "+fmt.Sprintf("%#v", this.AllocationInfo)+",\n") + } + if this.Reservation != nil { + s = append(s, "Reservation: "+fmt.Sprintf("%#v", this.Reservation)+",\n") + } + if this.Reservations != nil { + s = append(s, "Reservations: "+fmt.Sprintf("%#v", this.Reservations)+",\n") + } + if this.Disk != nil { + s = append(s, "Disk: "+fmt.Sprintf("%#v", this.Disk)+",\n") + } + if this.Revocable != nil { + s = append(s, "Revocable: "+fmt.Sprintf("%#v", this.Revocable)+",\n") + } + if this.Shared != nil { + s = append(s, "Shared: "+fmt.Sprintf("%#v", this.Shared)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Resource_AllocationInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Resource_AllocationInfo{") + if this.Role != nil { + s = append(s, "Role: "+valueToGoStringMesos(this.Role, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Resource_ReservationInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&mesos.Resource_ReservationInfo{") + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringMesos(this.Type, "Resource_ReservationInfo_Type")+",\n") + } + if this.Role != nil { + s = append(s, "Role: "+valueToGoStringMesos(this.Role, "string")+",\n") + } + if this.Principal != nil { + s = append(s, "Principal: "+valueToGoStringMesos(this.Principal, "string")+",\n") + } + if this.Labels != nil { + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Resource_DiskInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.Resource_DiskInfo{") + if this.Persistence != nil { + s = append(s, "Persistence: "+fmt.Sprintf("%#v", this.Persistence)+",\n") + } + if this.Volume != nil { + s = append(s, "Volume: "+fmt.Sprintf("%#v", this.Volume)+",\n") + } + if this.Source != nil { + s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Resource_DiskInfo_Persistence) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Resource_DiskInfo_Persistence{") + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + if this.Principal != nil { + s = append(s, "Principal: "+valueToGoStringMesos(this.Principal, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Resource_DiskInfo_Source) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&mesos.Resource_DiskInfo_Source{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Mount != nil { + s = append(s, "Mount: "+fmt.Sprintf("%#v", this.Mount)+",\n") + } + if this.ID != nil { + s = append(s, "ID: "+valueToGoStringMesos(this.ID, "string")+",\n") + } + if this.Metadata != nil { + s = append(s, "Metadata: "+fmt.Sprintf("%#v", this.Metadata)+",\n") + } + if this.Profile != nil { + s = append(s, "Profile: "+valueToGoStringMesos(this.Profile, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Resource_DiskInfo_Source_Path) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Resource_DiskInfo_Source_Path{") + if this.Root != nil { + s = append(s, "Root: "+valueToGoStringMesos(this.Root, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Resource_DiskInfo_Source_Mount) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Resource_DiskInfo_Source_Mount{") + if this.Root != nil { + s = append(s, "Root: "+valueToGoStringMesos(this.Root, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Resource_RevocableInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&mesos.Resource_RevocableInfo{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Resource_SharedInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&mesos.Resource_SharedInfo{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TrafficControlStatistics) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&mesos.TrafficControlStatistics{") + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + if this.Backlog != nil { + s = append(s, "Backlog: "+valueToGoStringMesos(this.Backlog, "uint64")+",\n") + } + if this.Bytes != nil { + s = append(s, "Bytes: "+valueToGoStringMesos(this.Bytes, "uint64")+",\n") + } + if this.Drops != nil { + s = append(s, "Drops: "+valueToGoStringMesos(this.Drops, "uint64")+",\n") + } + if this.Overlimits != nil { + s = append(s, "Overlimits: "+valueToGoStringMesos(this.Overlimits, "uint64")+",\n") + } + if this.Packets != nil { + s = append(s, "Packets: "+valueToGoStringMesos(this.Packets, "uint64")+",\n") + } + if this.Qlen != nil { + s = append(s, "Qlen: "+valueToGoStringMesos(this.Qlen, "uint64")+",\n") + } + if this.RateBPS != nil { + s = append(s, "RateBPS: "+valueToGoStringMesos(this.RateBPS, "uint64")+",\n") + } + if this.RatePPS != nil { + s = append(s, "RatePPS: "+valueToGoStringMesos(this.RatePPS, "uint64")+",\n") + } + if this.Requeues != nil { + s = append(s, "Requeues: "+valueToGoStringMesos(this.Requeues, "uint64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *IpStatistics) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 23) + s = append(s, "&mesos.IpStatistics{") + if this.Forwarding != nil { + s = append(s, "Forwarding: "+valueToGoStringMesos(this.Forwarding, "int64")+",\n") + } + if this.DefaultTTL != nil { + s = append(s, "DefaultTTL: "+valueToGoStringMesos(this.DefaultTTL, "int64")+",\n") + } + if this.InReceives != nil { + s = append(s, "InReceives: "+valueToGoStringMesos(this.InReceives, "int64")+",\n") + } + if this.InHdrErrors != nil { + s = append(s, "InHdrErrors: "+valueToGoStringMesos(this.InHdrErrors, "int64")+",\n") + } + if this.InAddrErrors != nil { + s = append(s, "InAddrErrors: "+valueToGoStringMesos(this.InAddrErrors, "int64")+",\n") + } + if this.ForwDatagrams != nil { + s = append(s, "ForwDatagrams: "+valueToGoStringMesos(this.ForwDatagrams, "int64")+",\n") + } + if this.InUnknownProtos != nil { + s = append(s, "InUnknownProtos: "+valueToGoStringMesos(this.InUnknownProtos, "int64")+",\n") + } + if this.InDiscards != nil { + s = append(s, "InDiscards: "+valueToGoStringMesos(this.InDiscards, "int64")+",\n") + } + if this.InDelivers != nil { + s = append(s, "InDelivers: "+valueToGoStringMesos(this.InDelivers, "int64")+",\n") + } + if this.OutRequests != nil { + s = append(s, "OutRequests: "+valueToGoStringMesos(this.OutRequests, "int64")+",\n") + } + if this.OutDiscards != nil { + s = append(s, "OutDiscards: "+valueToGoStringMesos(this.OutDiscards, "int64")+",\n") + } + if this.OutNoRoutes != nil { + s = append(s, "OutNoRoutes: "+valueToGoStringMesos(this.OutNoRoutes, "int64")+",\n") + } + if this.ReasmTimeout != nil { + s = append(s, "ReasmTimeout: "+valueToGoStringMesos(this.ReasmTimeout, "int64")+",\n") + } + if this.ReasmReqds != nil { + s = append(s, "ReasmReqds: "+valueToGoStringMesos(this.ReasmReqds, "int64")+",\n") + } + if this.ReasmOKs != nil { + s = append(s, "ReasmOKs: "+valueToGoStringMesos(this.ReasmOKs, "int64")+",\n") + } + if this.ReasmFails != nil { + s = append(s, "ReasmFails: "+valueToGoStringMesos(this.ReasmFails, "int64")+",\n") + } + if this.FragOKs != nil { + s = append(s, "FragOKs: "+valueToGoStringMesos(this.FragOKs, "int64")+",\n") + } + if this.FragFails != nil { + s = append(s, "FragFails: "+valueToGoStringMesos(this.FragFails, "int64")+",\n") + } + if this.FragCreates != nil { + s = append(s, "FragCreates: "+valueToGoStringMesos(this.FragCreates, "int64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *IcmpStatistics) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 31) + s = append(s, "&mesos.IcmpStatistics{") + if this.InMsgs != nil { + s = append(s, "InMsgs: "+valueToGoStringMesos(this.InMsgs, "int64")+",\n") + } + if this.InErrors != nil { + s = append(s, "InErrors: "+valueToGoStringMesos(this.InErrors, "int64")+",\n") + } + if this.InCsumErrors != nil { + s = append(s, "InCsumErrors: "+valueToGoStringMesos(this.InCsumErrors, "int64")+",\n") + } + if this.InDestUnreachs != nil { + s = append(s, "InDestUnreachs: "+valueToGoStringMesos(this.InDestUnreachs, "int64")+",\n") + } + if this.InTimeExcds != nil { + s = append(s, "InTimeExcds: "+valueToGoStringMesos(this.InTimeExcds, "int64")+",\n") + } + if this.InParmProbs != nil { + s = append(s, "InParmProbs: "+valueToGoStringMesos(this.InParmProbs, "int64")+",\n") + } + if this.InSrcQuenchs != nil { + s = append(s, "InSrcQuenchs: "+valueToGoStringMesos(this.InSrcQuenchs, "int64")+",\n") + } + if this.InRedirects != nil { + s = append(s, "InRedirects: "+valueToGoStringMesos(this.InRedirects, "int64")+",\n") + } + if this.InEchos != nil { + s = append(s, "InEchos: "+valueToGoStringMesos(this.InEchos, "int64")+",\n") + } + if this.InEchoReps != nil { + s = append(s, "InEchoReps: "+valueToGoStringMesos(this.InEchoReps, "int64")+",\n") + } + if this.InTimestamps != nil { + s = append(s, "InTimestamps: "+valueToGoStringMesos(this.InTimestamps, "int64")+",\n") + } + if this.InTimestampReps != nil { + s = append(s, "InTimestampReps: "+valueToGoStringMesos(this.InTimestampReps, "int64")+",\n") + } + if this.InAddrMasks != nil { + s = append(s, "InAddrMasks: "+valueToGoStringMesos(this.InAddrMasks, "int64")+",\n") + } + if this.InAddrMaskReps != nil { + s = append(s, "InAddrMaskReps: "+valueToGoStringMesos(this.InAddrMaskReps, "int64")+",\n") + } + if this.OutMsgs != nil { + s = append(s, "OutMsgs: "+valueToGoStringMesos(this.OutMsgs, "int64")+",\n") + } + if this.OutErrors != nil { + s = append(s, "OutErrors: "+valueToGoStringMesos(this.OutErrors, "int64")+",\n") + } + if this.OutDestUnreachs != nil { + s = append(s, "OutDestUnreachs: "+valueToGoStringMesos(this.OutDestUnreachs, "int64")+",\n") + } + if this.OutTimeExcds != nil { + s = append(s, "OutTimeExcds: "+valueToGoStringMesos(this.OutTimeExcds, "int64")+",\n") + } + if this.OutParmProbs != nil { + s = append(s, "OutParmProbs: "+valueToGoStringMesos(this.OutParmProbs, "int64")+",\n") + } + if this.OutSrcQuenchs != nil { + s = append(s, "OutSrcQuenchs: "+valueToGoStringMesos(this.OutSrcQuenchs, "int64")+",\n") + } + if this.OutRedirects != nil { + s = append(s, "OutRedirects: "+valueToGoStringMesos(this.OutRedirects, "int64")+",\n") + } + if this.OutEchos != nil { + s = append(s, "OutEchos: "+valueToGoStringMesos(this.OutEchos, "int64")+",\n") + } + if this.OutEchoReps != nil { + s = append(s, "OutEchoReps: "+valueToGoStringMesos(this.OutEchoReps, "int64")+",\n") + } + if this.OutTimestamps != nil { + s = append(s, "OutTimestamps: "+valueToGoStringMesos(this.OutTimestamps, "int64")+",\n") + } + if this.OutTimestampReps != nil { + s = append(s, "OutTimestampReps: "+valueToGoStringMesos(this.OutTimestampReps, "int64")+",\n") + } + if this.OutAddrMasks != nil { + s = append(s, "OutAddrMasks: "+valueToGoStringMesos(this.OutAddrMasks, "int64")+",\n") + } + if this.OutAddrMaskReps != nil { + s = append(s, "OutAddrMaskReps: "+valueToGoStringMesos(this.OutAddrMaskReps, "int64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TcpStatistics) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 19) + s = append(s, "&mesos.TcpStatistics{") + if this.RtoAlgorithm != nil { + s = append(s, "RtoAlgorithm: "+valueToGoStringMesos(this.RtoAlgorithm, "int64")+",\n") + } + if this.RtoMin != nil { + s = append(s, "RtoMin: "+valueToGoStringMesos(this.RtoMin, "int64")+",\n") + } + if this.RtoMax != nil { + s = append(s, "RtoMax: "+valueToGoStringMesos(this.RtoMax, "int64")+",\n") + } + if this.MaxConn != nil { + s = append(s, "MaxConn: "+valueToGoStringMesos(this.MaxConn, "int64")+",\n") + } + if this.ActiveOpens != nil { + s = append(s, "ActiveOpens: "+valueToGoStringMesos(this.ActiveOpens, "int64")+",\n") + } + if this.PassiveOpens != nil { + s = append(s, "PassiveOpens: "+valueToGoStringMesos(this.PassiveOpens, "int64")+",\n") + } + if this.AttemptFails != nil { + s = append(s, "AttemptFails: "+valueToGoStringMesos(this.AttemptFails, "int64")+",\n") + } + if this.EstabResets != nil { + s = append(s, "EstabResets: "+valueToGoStringMesos(this.EstabResets, "int64")+",\n") + } + if this.CurrEstab != nil { + s = append(s, "CurrEstab: "+valueToGoStringMesos(this.CurrEstab, "int64")+",\n") + } + if this.InSegs != nil { + s = append(s, "InSegs: "+valueToGoStringMesos(this.InSegs, "int64")+",\n") + } + if this.OutSegs != nil { + s = append(s, "OutSegs: "+valueToGoStringMesos(this.OutSegs, "int64")+",\n") + } + if this.RetransSegs != nil { + s = append(s, "RetransSegs: "+valueToGoStringMesos(this.RetransSegs, "int64")+",\n") + } + if this.InErrs != nil { + s = append(s, "InErrs: "+valueToGoStringMesos(this.InErrs, "int64")+",\n") + } + if this.OutRsts != nil { + s = append(s, "OutRsts: "+valueToGoStringMesos(this.OutRsts, "int64")+",\n") + } + if this.InCsumErrors != nil { + s = append(s, "InCsumErrors: "+valueToGoStringMesos(this.InCsumErrors, "int64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UdpStatistics) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 12) + s = append(s, "&mesos.UdpStatistics{") + if this.InDatagrams != nil { + s = append(s, "InDatagrams: "+valueToGoStringMesos(this.InDatagrams, "int64")+",\n") + } + if this.NoPorts != nil { + s = append(s, "NoPorts: "+valueToGoStringMesos(this.NoPorts, "int64")+",\n") + } + if this.InErrors != nil { + s = append(s, "InErrors: "+valueToGoStringMesos(this.InErrors, "int64")+",\n") + } + if this.OutDatagrams != nil { + s = append(s, "OutDatagrams: "+valueToGoStringMesos(this.OutDatagrams, "int64")+",\n") + } + if this.RcvbufErrors != nil { + s = append(s, "RcvbufErrors: "+valueToGoStringMesos(this.RcvbufErrors, "int64")+",\n") + } + if this.SndbufErrors != nil { + s = append(s, "SndbufErrors: "+valueToGoStringMesos(this.SndbufErrors, "int64")+",\n") + } + if this.InCsumErrors != nil { + s = append(s, "InCsumErrors: "+valueToGoStringMesos(this.InCsumErrors, "int64")+",\n") + } + if this.IgnoredMulti != nil { + s = append(s, "IgnoredMulti: "+valueToGoStringMesos(this.IgnoredMulti, "int64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SNMPStatistics) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&mesos.SNMPStatistics{") + if this.IPStats != nil { + s = append(s, "IPStats: "+fmt.Sprintf("%#v", this.IPStats)+",\n") + } + if this.ICMPStats != nil { + s = append(s, "ICMPStats: "+fmt.Sprintf("%#v", this.ICMPStats)+",\n") + } + if this.TCPStats != nil { + s = append(s, "TCPStats: "+fmt.Sprintf("%#v", this.TCPStats)+",\n") + } + if this.UDPStats != nil { + s = append(s, "UDPStats: "+fmt.Sprintf("%#v", this.UDPStats)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DiskStatistics) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&mesos.DiskStatistics{") + if this.Source != nil { + s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n") + } + if this.Persistence != nil { + s = append(s, "Persistence: "+fmt.Sprintf("%#v", this.Persistence)+",\n") + } + if this.LimitBytes != nil { + s = append(s, "LimitBytes: "+valueToGoStringMesos(this.LimitBytes, "uint64")+",\n") + } + if this.UsedBytes != nil { + s = append(s, "UsedBytes: "+valueToGoStringMesos(this.UsedBytes, "uint64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceStatistics) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 48) + s = append(s, "&mesos.ResourceStatistics{") + s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") + if this.Processes != nil { + s = append(s, "Processes: "+valueToGoStringMesos(this.Processes, "uint32")+",\n") + } + if this.Threads != nil { + s = append(s, "Threads: "+valueToGoStringMesos(this.Threads, "uint32")+",\n") + } + if this.CPUsUserTimeSecs != nil { + s = append(s, "CPUsUserTimeSecs: "+valueToGoStringMesos(this.CPUsUserTimeSecs, "float64")+",\n") + } + if this.CPUsSystemTimeSecs != nil { + s = append(s, "CPUsSystemTimeSecs: "+valueToGoStringMesos(this.CPUsSystemTimeSecs, "float64")+",\n") + } + if this.CPUsLimit != nil { + s = append(s, "CPUsLimit: "+valueToGoStringMesos(this.CPUsLimit, "float64")+",\n") + } + if this.CPUsNrPeriods != nil { + s = append(s, "CPUsNrPeriods: "+valueToGoStringMesos(this.CPUsNrPeriods, "uint32")+",\n") + } + if this.CPUsNrThrottled != nil { + s = append(s, "CPUsNrThrottled: "+valueToGoStringMesos(this.CPUsNrThrottled, "uint32")+",\n") + } + if this.CPUsThrottledTimeSecs != nil { + s = append(s, "CPUsThrottledTimeSecs: "+valueToGoStringMesos(this.CPUsThrottledTimeSecs, "float64")+",\n") + } + if this.MemTotalBytes != nil { + s = append(s, "MemTotalBytes: "+valueToGoStringMesos(this.MemTotalBytes, "uint64")+",\n") + } + if this.MemTotalMemswBytes != nil { + s = append(s, "MemTotalMemswBytes: "+valueToGoStringMesos(this.MemTotalMemswBytes, "uint64")+",\n") + } + if this.MemLimitBytes != nil { + s = append(s, "MemLimitBytes: "+valueToGoStringMesos(this.MemLimitBytes, "uint64")+",\n") + } + if this.MemSoftLimitBytes != nil { + s = append(s, "MemSoftLimitBytes: "+valueToGoStringMesos(this.MemSoftLimitBytes, "uint64")+",\n") + } + if this.MemFileBytes != nil { + s = append(s, "MemFileBytes: "+valueToGoStringMesos(this.MemFileBytes, "uint64")+",\n") + } + if this.MemAnonBytes != nil { + s = append(s, "MemAnonBytes: "+valueToGoStringMesos(this.MemAnonBytes, "uint64")+",\n") + } + if this.MemCacheBytes != nil { + s = append(s, "MemCacheBytes: "+valueToGoStringMesos(this.MemCacheBytes, "uint64")+",\n") + } + if this.MemRSSBytes != nil { + s = append(s, "MemRSSBytes: "+valueToGoStringMesos(this.MemRSSBytes, "uint64")+",\n") + } + if this.MemMappedFileBytes != nil { + s = append(s, "MemMappedFileBytes: "+valueToGoStringMesos(this.MemMappedFileBytes, "uint64")+",\n") + } + if this.MemSwapBytes != nil { + s = append(s, "MemSwapBytes: "+valueToGoStringMesos(this.MemSwapBytes, "uint64")+",\n") + } + if this.MemUnevictableBytes != nil { + s = append(s, "MemUnevictableBytes: "+valueToGoStringMesos(this.MemUnevictableBytes, "uint64")+",\n") + } + if this.MemLowPressureCounter != nil { + s = append(s, "MemLowPressureCounter: "+valueToGoStringMesos(this.MemLowPressureCounter, "uint64")+",\n") + } + if this.MemMediumPressureCounter != nil { + s = append(s, "MemMediumPressureCounter: "+valueToGoStringMesos(this.MemMediumPressureCounter, "uint64")+",\n") + } + if this.MemCriticalPressureCounter != nil { + s = append(s, "MemCriticalPressureCounter: "+valueToGoStringMesos(this.MemCriticalPressureCounter, "uint64")+",\n") + } + if this.DiskLimitBytes != nil { + s = append(s, "DiskLimitBytes: "+valueToGoStringMesos(this.DiskLimitBytes, "uint64")+",\n") + } + if this.DiskUsedBytes != nil { + s = append(s, "DiskUsedBytes: "+valueToGoStringMesos(this.DiskUsedBytes, "uint64")+",\n") + } + if this.DiskStatistics != nil { + s = append(s, "DiskStatistics: "+fmt.Sprintf("%#v", this.DiskStatistics)+",\n") + } + if this.BlkioStatistics != nil { + s = append(s, "BlkioStatistics: "+fmt.Sprintf("%#v", this.BlkioStatistics)+",\n") + } + if this.Perf != nil { + s = append(s, "Perf: "+fmt.Sprintf("%#v", this.Perf)+",\n") + } + if this.NetRxPackets != nil { + s = append(s, "NetRxPackets: "+valueToGoStringMesos(this.NetRxPackets, "uint64")+",\n") + } + if this.NetRxBytes != nil { + s = append(s, "NetRxBytes: "+valueToGoStringMesos(this.NetRxBytes, "uint64")+",\n") + } + if this.NetRxErrors != nil { + s = append(s, "NetRxErrors: "+valueToGoStringMesos(this.NetRxErrors, "uint64")+",\n") + } + if this.NetRxDropped != nil { + s = append(s, "NetRxDropped: "+valueToGoStringMesos(this.NetRxDropped, "uint64")+",\n") + } + if this.NetTxPackets != nil { + s = append(s, "NetTxPackets: "+valueToGoStringMesos(this.NetTxPackets, "uint64")+",\n") + } + if this.NetTxBytes != nil { + s = append(s, "NetTxBytes: "+valueToGoStringMesos(this.NetTxBytes, "uint64")+",\n") + } + if this.NetTxErrors != nil { + s = append(s, "NetTxErrors: "+valueToGoStringMesos(this.NetTxErrors, "uint64")+",\n") + } + if this.NetTxDropped != nil { + s = append(s, "NetTxDropped: "+valueToGoStringMesos(this.NetTxDropped, "uint64")+",\n") + } + if this.NetTCPRttMicrosecsP50 != nil { + s = append(s, "NetTCPRttMicrosecsP50: "+valueToGoStringMesos(this.NetTCPRttMicrosecsP50, "float64")+",\n") + } + if this.NetTCPRttMicrosecsP90 != nil { + s = append(s, "NetTCPRttMicrosecsP90: "+valueToGoStringMesos(this.NetTCPRttMicrosecsP90, "float64")+",\n") + } + if this.NetTCPRttMicrosecsP95 != nil { + s = append(s, "NetTCPRttMicrosecsP95: "+valueToGoStringMesos(this.NetTCPRttMicrosecsP95, "float64")+",\n") + } + if this.NetTCPRttMicrosecsP99 != nil { + s = append(s, "NetTCPRttMicrosecsP99: "+valueToGoStringMesos(this.NetTCPRttMicrosecsP99, "float64")+",\n") + } + if this.NetTCPActiveConnections != nil { + s = append(s, "NetTCPActiveConnections: "+valueToGoStringMesos(this.NetTCPActiveConnections, "float64")+",\n") + } + if this.NetTCPTimeWaitConnections != nil { + s = append(s, "NetTCPTimeWaitConnections: "+valueToGoStringMesos(this.NetTCPTimeWaitConnections, "float64")+",\n") + } + if this.NetTrafficControlStatistics != nil { + s = append(s, "NetTrafficControlStatistics: "+fmt.Sprintf("%#v", this.NetTrafficControlStatistics)+",\n") + } + if this.NetSNMPStatistics != nil { + s = append(s, "NetSNMPStatistics: "+fmt.Sprintf("%#v", this.NetSNMPStatistics)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceUsage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.ResourceUsage{") + if this.Executors != nil { + s = append(s, "Executors: "+fmt.Sprintf("%#v", this.Executors)+",\n") + } + if this.Total != nil { + s = append(s, "Total: "+fmt.Sprintf("%#v", this.Total)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceUsage_Executor) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&mesos.ResourceUsage_Executor{") + s = append(s, "ExecutorInfo: "+strings.Replace(this.ExecutorInfo.GoString(), `&`, ``, 1)+",\n") + if this.Allocated != nil { + s = append(s, "Allocated: "+fmt.Sprintf("%#v", this.Allocated)+",\n") + } + if this.Statistics != nil { + s = append(s, "Statistics: "+fmt.Sprintf("%#v", this.Statistics)+",\n") + } + s = append(s, "ContainerID: "+strings.Replace(this.ContainerID.GoString(), `&`, ``, 1)+",\n") + if this.Tasks != nil { + s = append(s, "Tasks: "+fmt.Sprintf("%#v", this.Tasks)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceUsage_Executor_Task) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&mesos.ResourceUsage_Executor_Task{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "ID: "+strings.Replace(this.ID.GoString(), `&`, ``, 1)+",\n") + if this.Resources != nil { + s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n") + } + if this.Labels != nil { + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *PerfStatistics) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 57) + s = append(s, "&mesos.PerfStatistics{") + s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") + s = append(s, "Duration: "+fmt.Sprintf("%#v", this.Duration)+",\n") + if this.Cycles != nil { + s = append(s, "Cycles: "+valueToGoStringMesos(this.Cycles, "uint64")+",\n") + } + if this.StalledCyclesFrontend != nil { + s = append(s, "StalledCyclesFrontend: "+valueToGoStringMesos(this.StalledCyclesFrontend, "uint64")+",\n") + } + if this.StalledCyclesBackend != nil { + s = append(s, "StalledCyclesBackend: "+valueToGoStringMesos(this.StalledCyclesBackend, "uint64")+",\n") + } + if this.Instructions != nil { + s = append(s, "Instructions: "+valueToGoStringMesos(this.Instructions, "uint64")+",\n") + } + if this.CacheReferences != nil { + s = append(s, "CacheReferences: "+valueToGoStringMesos(this.CacheReferences, "uint64")+",\n") + } + if this.CacheMisses != nil { + s = append(s, "CacheMisses: "+valueToGoStringMesos(this.CacheMisses, "uint64")+",\n") + } + if this.Branches != nil { + s = append(s, "Branches: "+valueToGoStringMesos(this.Branches, "uint64")+",\n") + } + if this.BranchMisses != nil { + s = append(s, "BranchMisses: "+valueToGoStringMesos(this.BranchMisses, "uint64")+",\n") + } + if this.BusCycles != nil { + s = append(s, "BusCycles: "+valueToGoStringMesos(this.BusCycles, "uint64")+",\n") + } + if this.RefCycles != nil { + s = append(s, "RefCycles: "+valueToGoStringMesos(this.RefCycles, "uint64")+",\n") + } + if this.CPUClock != nil { + s = append(s, "CPUClock: "+valueToGoStringMesos(this.CPUClock, "float64")+",\n") + } + if this.TaskClock != nil { + s = append(s, "TaskClock: "+valueToGoStringMesos(this.TaskClock, "float64")+",\n") + } + if this.PageFaults != nil { + s = append(s, "PageFaults: "+valueToGoStringMesos(this.PageFaults, "uint64")+",\n") + } + if this.MinorFaults != nil { + s = append(s, "MinorFaults: "+valueToGoStringMesos(this.MinorFaults, "uint64")+",\n") + } + if this.MajorFaults != nil { + s = append(s, "MajorFaults: "+valueToGoStringMesos(this.MajorFaults, "uint64")+",\n") + } + if this.ContextSwitches != nil { + s = append(s, "ContextSwitches: "+valueToGoStringMesos(this.ContextSwitches, "uint64")+",\n") + } + if this.CPUMigrations != nil { + s = append(s, "CPUMigrations: "+valueToGoStringMesos(this.CPUMigrations, "uint64")+",\n") + } + if this.AlignmentFaults != nil { + s = append(s, "AlignmentFaults: "+valueToGoStringMesos(this.AlignmentFaults, "uint64")+",\n") + } + if this.EmulationFaults != nil { + s = append(s, "EmulationFaults: "+valueToGoStringMesos(this.EmulationFaults, "uint64")+",\n") + } + if this.L1DcacheLoads != nil { + s = append(s, "L1DcacheLoads: "+valueToGoStringMesos(this.L1DcacheLoads, "uint64")+",\n") + } + if this.L1DcacheLoadMisses != nil { + s = append(s, "L1DcacheLoadMisses: "+valueToGoStringMesos(this.L1DcacheLoadMisses, "uint64")+",\n") + } + if this.L1DcacheStores != nil { + s = append(s, "L1DcacheStores: "+valueToGoStringMesos(this.L1DcacheStores, "uint64")+",\n") + } + if this.L1DcacheStoreMisses != nil { + s = append(s, "L1DcacheStoreMisses: "+valueToGoStringMesos(this.L1DcacheStoreMisses, "uint64")+",\n") + } + if this.L1DcachePrefetches != nil { + s = append(s, "L1DcachePrefetches: "+valueToGoStringMesos(this.L1DcachePrefetches, "uint64")+",\n") + } + if this.L1DcachePrefetchMisses != nil { + s = append(s, "L1DcachePrefetchMisses: "+valueToGoStringMesos(this.L1DcachePrefetchMisses, "uint64")+",\n") + } + if this.L1IcacheLoads != nil { + s = append(s, "L1IcacheLoads: "+valueToGoStringMesos(this.L1IcacheLoads, "uint64")+",\n") + } + if this.L1IcacheLoadMisses != nil { + s = append(s, "L1IcacheLoadMisses: "+valueToGoStringMesos(this.L1IcacheLoadMisses, "uint64")+",\n") + } + if this.L1IcachePrefetches != nil { + s = append(s, "L1IcachePrefetches: "+valueToGoStringMesos(this.L1IcachePrefetches, "uint64")+",\n") + } + if this.L1IcachePrefetchMisses != nil { + s = append(s, "L1IcachePrefetchMisses: "+valueToGoStringMesos(this.L1IcachePrefetchMisses, "uint64")+",\n") + } + if this.LLCLoads != nil { + s = append(s, "LLCLoads: "+valueToGoStringMesos(this.LLCLoads, "uint64")+",\n") + } + if this.LLCLoadMisses != nil { + s = append(s, "LLCLoadMisses: "+valueToGoStringMesos(this.LLCLoadMisses, "uint64")+",\n") + } + if this.LLCStores != nil { + s = append(s, "LLCStores: "+valueToGoStringMesos(this.LLCStores, "uint64")+",\n") + } + if this.LLCStoreMisses != nil { + s = append(s, "LLCStoreMisses: "+valueToGoStringMesos(this.LLCStoreMisses, "uint64")+",\n") + } + if this.LLCPrefetches != nil { + s = append(s, "LLCPrefetches: "+valueToGoStringMesos(this.LLCPrefetches, "uint64")+",\n") + } + if this.LLCPrefetchMisses != nil { + s = append(s, "LLCPrefetchMisses: "+valueToGoStringMesos(this.LLCPrefetchMisses, "uint64")+",\n") + } + if this.DTLBLoads != nil { + s = append(s, "DTLBLoads: "+valueToGoStringMesos(this.DTLBLoads, "uint64")+",\n") + } + if this.DTLBLoadMisses != nil { + s = append(s, "DTLBLoadMisses: "+valueToGoStringMesos(this.DTLBLoadMisses, "uint64")+",\n") + } + if this.DTLBStores != nil { + s = append(s, "DTLBStores: "+valueToGoStringMesos(this.DTLBStores, "uint64")+",\n") + } + if this.DTLBStoreMisses != nil { + s = append(s, "DTLBStoreMisses: "+valueToGoStringMesos(this.DTLBStoreMisses, "uint64")+",\n") + } + if this.DTLBPrefetches != nil { + s = append(s, "DTLBPrefetches: "+valueToGoStringMesos(this.DTLBPrefetches, "uint64")+",\n") + } + if this.DTLBPrefetchMisses != nil { + s = append(s, "DTLBPrefetchMisses: "+valueToGoStringMesos(this.DTLBPrefetchMisses, "uint64")+",\n") + } + if this.ITLBLoads != nil { + s = append(s, "ITLBLoads: "+valueToGoStringMesos(this.ITLBLoads, "uint64")+",\n") + } + if this.ITLBLoadMisses != nil { + s = append(s, "ITLBLoadMisses: "+valueToGoStringMesos(this.ITLBLoadMisses, "uint64")+",\n") + } + if this.BranchLoads != nil { + s = append(s, "BranchLoads: "+valueToGoStringMesos(this.BranchLoads, "uint64")+",\n") + } + if this.BranchLoadMisses != nil { + s = append(s, "BranchLoadMisses: "+valueToGoStringMesos(this.BranchLoadMisses, "uint64")+",\n") + } + if this.NodeLoads != nil { + s = append(s, "NodeLoads: "+valueToGoStringMesos(this.NodeLoads, "uint64")+",\n") + } + if this.NodeLoadMisses != nil { + s = append(s, "NodeLoadMisses: "+valueToGoStringMesos(this.NodeLoadMisses, "uint64")+",\n") + } + if this.NodeStores != nil { + s = append(s, "NodeStores: "+valueToGoStringMesos(this.NodeStores, "uint64")+",\n") + } + if this.NodeStoreMisses != nil { + s = append(s, "NodeStoreMisses: "+valueToGoStringMesos(this.NodeStoreMisses, "uint64")+",\n") + } + if this.NodePrefetches != nil { + s = append(s, "NodePrefetches: "+valueToGoStringMesos(this.NodePrefetches, "uint64")+",\n") + } + if this.NodePrefetchMisses != nil { + s = append(s, "NodePrefetchMisses: "+valueToGoStringMesos(this.NodePrefetchMisses, "uint64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Request) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Request{") + if this.AgentID != nil { + s = append(s, "AgentID: "+fmt.Sprintf("%#v", this.AgentID)+",\n") + } + if this.Resources != nil { + s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Offer) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 15) + s = append(s, "&mesos.Offer{") + s = append(s, "ID: "+strings.Replace(this.ID.GoString(), `&`, ``, 1)+",\n") + s = append(s, "FrameworkID: "+strings.Replace(this.FrameworkID.GoString(), `&`, ``, 1)+",\n") + s = append(s, "AgentID: "+strings.Replace(this.AgentID.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Hostname: "+fmt.Sprintf("%#v", this.Hostname)+",\n") + if this.URL != nil { + s = append(s, "URL: "+fmt.Sprintf("%#v", this.URL)+",\n") + } + if this.Domain != nil { + s = append(s, "Domain: "+fmt.Sprintf("%#v", this.Domain)+",\n") + } + if this.Resources != nil { + s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n") + } + if this.Attributes != nil { + s = append(s, "Attributes: "+fmt.Sprintf("%#v", this.Attributes)+",\n") + } + if this.ExecutorIDs != nil { + s = append(s, "ExecutorIDs: "+fmt.Sprintf("%#v", this.ExecutorIDs)+",\n") + } + if this.Unavailability != nil { + s = append(s, "Unavailability: "+fmt.Sprintf("%#v", this.Unavailability)+",\n") + } + if this.AllocationInfo != nil { + s = append(s, "AllocationInfo: "+fmt.Sprintf("%#v", this.AllocationInfo)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Offer_Operation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&mesos.Offer_Operation{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.ID != nil { + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + } + if this.Launch != nil { + s = append(s, "Launch: "+fmt.Sprintf("%#v", this.Launch)+",\n") + } + if this.LaunchGroup != nil { + s = append(s, "LaunchGroup: "+fmt.Sprintf("%#v", this.LaunchGroup)+",\n") + } + if this.Reserve != nil { + s = append(s, "Reserve: "+fmt.Sprintf("%#v", this.Reserve)+",\n") + } + if this.Unreserve != nil { + s = append(s, "Unreserve: "+fmt.Sprintf("%#v", this.Unreserve)+",\n") + } + if this.Create != nil { + s = append(s, "Create: "+fmt.Sprintf("%#v", this.Create)+",\n") + } + if this.Destroy != nil { + s = append(s, "Destroy: "+fmt.Sprintf("%#v", this.Destroy)+",\n") + } + if this.GrowVolume != nil { + s = append(s, "GrowVolume: "+fmt.Sprintf("%#v", this.GrowVolume)+",\n") + } + if this.ShrinkVolume != nil { + s = append(s, "ShrinkVolume: "+fmt.Sprintf("%#v", this.ShrinkVolume)+",\n") + } + if this.CreateDisk != nil { + s = append(s, "CreateDisk: "+fmt.Sprintf("%#v", this.CreateDisk)+",\n") + } + if this.DestroyDisk != nil { + s = append(s, "DestroyDisk: "+fmt.Sprintf("%#v", this.DestroyDisk)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Offer_Operation_Launch) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Offer_Operation_Launch{") + if this.TaskInfos != nil { + s = append(s, "TaskInfos: "+fmt.Sprintf("%#v", this.TaskInfos)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Offer_Operation_LaunchGroup) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Offer_Operation_LaunchGroup{") + s = append(s, "Executor: "+strings.Replace(this.Executor.GoString(), `&`, ``, 1)+",\n") + s = append(s, "TaskGroup: "+strings.Replace(this.TaskGroup.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Offer_Operation_Reserve) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Offer_Operation_Reserve{") + if this.Resources != nil { + s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Offer_Operation_Unreserve) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Offer_Operation_Unreserve{") + if this.Resources != nil { + s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Offer_Operation_Create) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Offer_Operation_Create{") + if this.Volumes != nil { + s = append(s, "Volumes: "+fmt.Sprintf("%#v", this.Volumes)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Offer_Operation_Destroy) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Offer_Operation_Destroy{") + if this.Volumes != nil { + s = append(s, "Volumes: "+fmt.Sprintf("%#v", this.Volumes)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Offer_Operation_GrowVolume) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Offer_Operation_GrowVolume{") + s = append(s, "Volume: "+strings.Replace(this.Volume.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Addition: "+strings.Replace(this.Addition.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Offer_Operation_ShrinkVolume) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Offer_Operation_ShrinkVolume{") + s = append(s, "Volume: "+strings.Replace(this.Volume.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Subtract: "+strings.Replace(this.Subtract.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Offer_Operation_CreateDisk) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Offer_Operation_CreateDisk{") + s = append(s, "Source: "+strings.Replace(this.Source.GoString(), `&`, ``, 1)+",\n") + s = append(s, "TargetType: "+fmt.Sprintf("%#v", this.TargetType)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Offer_Operation_DestroyDisk) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Offer_Operation_DestroyDisk{") + s = append(s, "Source: "+strings.Replace(this.Source.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *InverseOffer) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&mesos.InverseOffer{") + s = append(s, "OfferID: "+strings.Replace(this.OfferID.GoString(), `&`, ``, 1)+",\n") + if this.URL != nil { + s = append(s, "URL: "+fmt.Sprintf("%#v", this.URL)+",\n") + } + s = append(s, "FrameworkID: "+strings.Replace(this.FrameworkID.GoString(), `&`, ``, 1)+",\n") + if this.AgentID != nil { + s = append(s, "AgentID: "+fmt.Sprintf("%#v", this.AgentID)+",\n") + } + s = append(s, "Unavailability: "+strings.Replace(this.Unavailability.GoString(), `&`, ``, 1)+",\n") + if this.Resources != nil { + s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 18) + s = append(s, "&mesos.TaskInfo{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "TaskID: "+strings.Replace(this.TaskID.GoString(), `&`, ``, 1)+",\n") + s = append(s, "AgentID: "+strings.Replace(this.AgentID.GoString(), `&`, ``, 1)+",\n") + if this.Resources != nil { + s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n") + } + if this.Executor != nil { + s = append(s, "Executor: "+fmt.Sprintf("%#v", this.Executor)+",\n") + } + if this.Command != nil { + s = append(s, "Command: "+fmt.Sprintf("%#v", this.Command)+",\n") + } + if this.Container != nil { + s = append(s, "Container: "+fmt.Sprintf("%#v", this.Container)+",\n") + } + if this.HealthCheck != nil { + s = append(s, "HealthCheck: "+fmt.Sprintf("%#v", this.HealthCheck)+",\n") + } + if this.Check != nil { + s = append(s, "Check: "+fmt.Sprintf("%#v", this.Check)+",\n") + } + if this.KillPolicy != nil { + s = append(s, "KillPolicy: "+fmt.Sprintf("%#v", this.KillPolicy)+",\n") + } + if this.Data != nil { + s = append(s, "Data: "+valueToGoStringMesos(this.Data, "byte")+",\n") + } + if this.Labels != nil { + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + } + if this.Discovery != nil { + s = append(s, "Discovery: "+fmt.Sprintf("%#v", this.Discovery)+",\n") + } + if this.MaxCompletionTime != nil { + s = append(s, "MaxCompletionTime: "+fmt.Sprintf("%#v", this.MaxCompletionTime)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskGroupInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.TaskGroupInfo{") + if this.Tasks != nil { + s = append(s, "Tasks: "+fmt.Sprintf("%#v", this.Tasks)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Task) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 18) + s = append(s, "&mesos.Task{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "TaskID: "+strings.Replace(this.TaskID.GoString(), `&`, ``, 1)+",\n") + s = append(s, "FrameworkID: "+strings.Replace(this.FrameworkID.GoString(), `&`, ``, 1)+",\n") + if this.ExecutorID != nil { + s = append(s, "ExecutorID: "+fmt.Sprintf("%#v", this.ExecutorID)+",\n") + } + s = append(s, "AgentID: "+strings.Replace(this.AgentID.GoString(), `&`, ``, 1)+",\n") + if this.State != nil { + s = append(s, "State: "+valueToGoStringMesos(this.State, "TaskState")+",\n") + } + if this.Resources != nil { + s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n") + } + if this.Statuses != nil { + s = append(s, "Statuses: "+fmt.Sprintf("%#v", this.Statuses)+",\n") + } + if this.StatusUpdateState != nil { + s = append(s, "StatusUpdateState: "+valueToGoStringMesos(this.StatusUpdateState, "TaskState")+",\n") + } + if this.StatusUpdateUUID != nil { + s = append(s, "StatusUpdateUUID: "+valueToGoStringMesos(this.StatusUpdateUUID, "byte")+",\n") + } + if this.Labels != nil { + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + } + if this.Discovery != nil { + s = append(s, "Discovery: "+fmt.Sprintf("%#v", this.Discovery)+",\n") + } + if this.Container != nil { + s = append(s, "Container: "+fmt.Sprintf("%#v", this.Container)+",\n") + } + if this.User != nil { + s = append(s, "User: "+valueToGoStringMesos(this.User, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskResourceLimitation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.TaskResourceLimitation{") + if this.Resources != nil { + s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UUID) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.UUID{") + if this.Value != nil { + s = append(s, "Value: "+valueToGoStringMesos(this.Value, "byte")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Operation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&mesos.Operation{") + if this.FrameworkID != nil { + s = append(s, "FrameworkID: "+fmt.Sprintf("%#v", this.FrameworkID)+",\n") + } + if this.AgentID != nil { + s = append(s, "AgentID: "+fmt.Sprintf("%#v", this.AgentID)+",\n") + } + s = append(s, "Info: "+strings.Replace(this.Info.GoString(), `&`, ``, 1)+",\n") + s = append(s, "LatestStatus: "+strings.Replace(this.LatestStatus.GoString(), `&`, ``, 1)+",\n") + if this.Statuses != nil { + s = append(s, "Statuses: "+fmt.Sprintf("%#v", this.Statuses)+",\n") + } + s = append(s, "UUID: "+strings.Replace(this.UUID.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OperationStatus) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&mesos.OperationStatus{") + if this.OperationID != nil { + s = append(s, "OperationID: "+fmt.Sprintf("%#v", this.OperationID)+",\n") + } + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + if this.Message != nil { + s = append(s, "Message: "+valueToGoStringMesos(this.Message, "string")+",\n") + } + if this.ConvertedResources != nil { + s = append(s, "ConvertedResources: "+fmt.Sprintf("%#v", this.ConvertedResources)+",\n") + } + if this.UUID != nil { + s = append(s, "UUID: "+fmt.Sprintf("%#v", this.UUID)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CheckStatusInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&mesos.CheckStatusInfo{") + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringMesos(this.Type, "CheckInfo_Type")+",\n") + } + if this.Command != nil { + s = append(s, "Command: "+fmt.Sprintf("%#v", this.Command)+",\n") + } + if this.HTTP != nil { + s = append(s, "HTTP: "+fmt.Sprintf("%#v", this.HTTP)+",\n") + } + if this.TCP != nil { + s = append(s, "TCP: "+fmt.Sprintf("%#v", this.TCP)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CheckStatusInfo_Command) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.CheckStatusInfo_Command{") + if this.ExitCode != nil { + s = append(s, "ExitCode: "+valueToGoStringMesos(this.ExitCode, "int32")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CheckStatusInfo_Http) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.CheckStatusInfo_Http{") + if this.StatusCode != nil { + s = append(s, "StatusCode: "+valueToGoStringMesos(this.StatusCode, "uint32")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CheckStatusInfo_Tcp) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.CheckStatusInfo_Tcp{") + if this.Succeeded != nil { + s = append(s, "Succeeded: "+valueToGoStringMesos(this.Succeeded, "bool")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskStatus) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 20) + s = append(s, "&mesos.TaskStatus{") + s = append(s, "TaskID: "+strings.Replace(this.TaskID.GoString(), `&`, ``, 1)+",\n") + if this.State != nil { + s = append(s, "State: "+valueToGoStringMesos(this.State, "TaskState")+",\n") + } + if this.Message != nil { + s = append(s, "Message: "+valueToGoStringMesos(this.Message, "string")+",\n") + } + if this.Source != nil { + s = append(s, "Source: "+valueToGoStringMesos(this.Source, "TaskStatus_Source")+",\n") + } + if this.Reason != nil { + s = append(s, "Reason: "+valueToGoStringMesos(this.Reason, "TaskStatus_Reason")+",\n") + } + if this.Data != nil { + s = append(s, "Data: "+valueToGoStringMesos(this.Data, "byte")+",\n") + } + if this.AgentID != nil { + s = append(s, "AgentID: "+fmt.Sprintf("%#v", this.AgentID)+",\n") + } + if this.ExecutorID != nil { + s = append(s, "ExecutorID: "+fmt.Sprintf("%#v", this.ExecutorID)+",\n") + } + if this.Timestamp != nil { + s = append(s, "Timestamp: "+valueToGoStringMesos(this.Timestamp, "float64")+",\n") + } + if this.UUID != nil { + s = append(s, "UUID: "+valueToGoStringMesos(this.UUID, "byte")+",\n") + } + if this.Healthy != nil { + s = append(s, "Healthy: "+valueToGoStringMesos(this.Healthy, "bool")+",\n") + } + if this.CheckStatus != nil { + s = append(s, "CheckStatus: "+fmt.Sprintf("%#v", this.CheckStatus)+",\n") + } + if this.Labels != nil { + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + } + if this.ContainerStatus != nil { + s = append(s, "ContainerStatus: "+fmt.Sprintf("%#v", this.ContainerStatus)+",\n") + } + if this.UnreachableTime != nil { + s = append(s, "UnreachableTime: "+fmt.Sprintf("%#v", this.UnreachableTime)+",\n") + } + if this.Limitation != nil { + s = append(s, "Limitation: "+fmt.Sprintf("%#v", this.Limitation)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Filters) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Filters{") + if this.RefuseSeconds != nil { + s = append(s, "RefuseSeconds: "+valueToGoStringMesos(this.RefuseSeconds, "float64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Environment) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Environment{") + if this.Variables != nil { + s = append(s, "Variables: "+fmt.Sprintf("%#v", this.Variables)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Environment_Variable) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&mesos.Environment_Variable{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringMesos(this.Type, "Environment_Variable_Type")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+valueToGoStringMesos(this.Value, "string")+",\n") + } + if this.Secret != nil { + s = append(s, "Secret: "+fmt.Sprintf("%#v", this.Secret)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Parameter) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Parameter{") + s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Parameters) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Parameters{") + if this.Parameter != nil { + s = append(s, "Parameter: "+fmt.Sprintf("%#v", this.Parameter)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Credential) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Credential{") + s = append(s, "Principal: "+fmt.Sprintf("%#v", this.Principal)+",\n") + if this.Secret != nil { + s = append(s, "Secret: "+valueToGoStringMesos(this.Secret, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Credentials) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Credentials{") + if this.Credentials != nil { + s = append(s, "Credentials: "+fmt.Sprintf("%#v", this.Credentials)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Secret) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.Secret{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.Reference != nil { + s = append(s, "Reference: "+fmt.Sprintf("%#v", this.Reference)+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Secret_Reference) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Secret_Reference{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Key != nil { + s = append(s, "Key: "+valueToGoStringMesos(this.Key, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Secret_Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Secret_Value{") + if this.Data != nil { + s = append(s, "Data: "+valueToGoStringMesos(this.Data, "byte")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RateLimit) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.RateLimit{") + if this.QPS != nil { + s = append(s, "QPS: "+valueToGoStringMesos(this.QPS, "float64")+",\n") + } + s = append(s, "Principal: "+fmt.Sprintf("%#v", this.Principal)+",\n") + if this.Capacity != nil { + s = append(s, "Capacity: "+valueToGoStringMesos(this.Capacity, "uint64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RateLimits) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.RateLimits{") + if this.Limits != nil { + s = append(s, "Limits: "+fmt.Sprintf("%#v", this.Limits)+",\n") + } + if this.AggregateDefaultQPS != nil { + s = append(s, "AggregateDefaultQPS: "+valueToGoStringMesos(this.AggregateDefaultQPS, "float64")+",\n") + } + if this.AggregateDefaultCapacity != nil { + s = append(s, "AggregateDefaultCapacity: "+valueToGoStringMesos(this.AggregateDefaultCapacity, "uint64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Image) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&mesos.Image{") + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringMesos(this.Type, "Image_Type")+",\n") + } + if this.Appc != nil { + s = append(s, "Appc: "+fmt.Sprintf("%#v", this.Appc)+",\n") + } + if this.Docker != nil { + s = append(s, "Docker: "+fmt.Sprintf("%#v", this.Docker)+",\n") + } + if this.Cached != nil { + s = append(s, "Cached: "+valueToGoStringMesos(this.Cached, "bool")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Image_Appc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.Image_Appc{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.ID != nil { + s = append(s, "ID: "+valueToGoStringMesos(this.ID, "string")+",\n") + } + if this.Labels != nil { + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Image_Docker) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.Image_Docker{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Credential != nil { + s = append(s, "Credential: "+fmt.Sprintf("%#v", this.Credential)+",\n") + } + if this.Config != nil { + s = append(s, "Config: "+fmt.Sprintf("%#v", this.Config)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MountPropagation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.MountPropagation{") + if this.Mode != nil { + s = append(s, "Mode: "+valueToGoStringMesos(this.Mode, "MountPropagation_Mode")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Volume) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&mesos.Volume{") + if this.Mode != nil { + s = append(s, "Mode: "+valueToGoStringMesos(this.Mode, "Volume_Mode")+",\n") + } + s = append(s, "ContainerPath: "+fmt.Sprintf("%#v", this.ContainerPath)+",\n") + if this.HostPath != nil { + s = append(s, "HostPath: "+valueToGoStringMesos(this.HostPath, "string")+",\n") + } + if this.Image != nil { + s = append(s, "Image: "+fmt.Sprintf("%#v", this.Image)+",\n") + } + if this.Source != nil { + s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Volume_Source) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&mesos.Volume_Source{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.DockerVolume != nil { + s = append(s, "DockerVolume: "+fmt.Sprintf("%#v", this.DockerVolume)+",\n") + } + if this.HostPath != nil { + s = append(s, "HostPath: "+fmt.Sprintf("%#v", this.HostPath)+",\n") + } + if this.SandboxPath != nil { + s = append(s, "SandboxPath: "+fmt.Sprintf("%#v", this.SandboxPath)+",\n") + } + if this.Secret != nil { + s = append(s, "Secret: "+fmt.Sprintf("%#v", this.Secret)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Volume_Source_DockerVolume) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.Volume_Source_DockerVolume{") + if this.Driver != nil { + s = append(s, "Driver: "+valueToGoStringMesos(this.Driver, "string")+",\n") + } + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.DriverOptions != nil { + s = append(s, "DriverOptions: "+fmt.Sprintf("%#v", this.DriverOptions)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Volume_Source_HostPath) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Volume_Source_HostPath{") + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + if this.MountPropagation != nil { + s = append(s, "MountPropagation: "+fmt.Sprintf("%#v", this.MountPropagation)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Volume_Source_SandboxPath) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Volume_Source_SandboxPath{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *NetworkInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&mesos.NetworkInfo{") + if this.IPAddresses != nil { + s = append(s, "IPAddresses: "+fmt.Sprintf("%#v", this.IPAddresses)+",\n") + } + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringMesos(this.Name, "string")+",\n") + } + if this.Groups != nil { + s = append(s, "Groups: "+fmt.Sprintf("%#v", this.Groups)+",\n") + } + if this.Labels != nil { + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + } + if this.PortMappings != nil { + s = append(s, "PortMappings: "+fmt.Sprintf("%#v", this.PortMappings)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *NetworkInfo_IPAddress) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.NetworkInfo_IPAddress{") + if this.Protocol != nil { + s = append(s, "Protocol: "+valueToGoStringMesos(this.Protocol, "NetworkInfo_Protocol")+",\n") + } + if this.IPAddress != nil { + s = append(s, "IPAddress: "+valueToGoStringMesos(this.IPAddress, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *NetworkInfo_PortMapping) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.NetworkInfo_PortMapping{") + s = append(s, "HostPort: "+fmt.Sprintf("%#v", this.HostPort)+",\n") + s = append(s, "ContainerPort: "+fmt.Sprintf("%#v", this.ContainerPort)+",\n") + if this.Protocol != nil { + s = append(s, "Protocol: "+valueToGoStringMesos(this.Protocol, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CapabilityInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.CapabilityInfo{") + if this.Capabilities != nil { + s = append(s, "Capabilities: "+fmt.Sprintf("%#v", this.Capabilities)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LinuxInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&mesos.LinuxInfo{") + if this.CapabilityInfo != nil { + s = append(s, "CapabilityInfo: "+fmt.Sprintf("%#v", this.CapabilityInfo)+",\n") + } + if this.BoundingCapabilities != nil { + s = append(s, "BoundingCapabilities: "+fmt.Sprintf("%#v", this.BoundingCapabilities)+",\n") + } + if this.EffectiveCapabilities != nil { + s = append(s, "EffectiveCapabilities: "+fmt.Sprintf("%#v", this.EffectiveCapabilities)+",\n") + } + if this.SharePIDNamespace != nil { + s = append(s, "SharePIDNamespace: "+valueToGoStringMesos(this.SharePIDNamespace, "bool")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RLimitInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.RLimitInfo{") + if this.Rlimits != nil { + s = append(s, "Rlimits: "+fmt.Sprintf("%#v", this.Rlimits)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RLimitInfo_RLimit) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.RLimitInfo_RLimit{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.Hard != nil { + s = append(s, "Hard: "+valueToGoStringMesos(this.Hard, "uint64")+",\n") + } + if this.Soft != nil { + s = append(s, "Soft: "+valueToGoStringMesos(this.Soft, "uint64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TTYInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.TTYInfo{") + if this.WindowSize != nil { + s = append(s, "WindowSize: "+fmt.Sprintf("%#v", this.WindowSize)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TTYInfo_WindowSize) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.TTYInfo_WindowSize{") + s = append(s, "Rows: "+fmt.Sprintf("%#v", this.Rows)+",\n") + s = append(s, "Columns: "+fmt.Sprintf("%#v", this.Columns)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ContainerInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 13) + s = append(s, "&mesos.ContainerInfo{") + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringMesos(this.Type, "ContainerInfo_Type")+",\n") + } + if this.Volumes != nil { + s = append(s, "Volumes: "+fmt.Sprintf("%#v", this.Volumes)+",\n") + } + if this.Hostname != nil { + s = append(s, "Hostname: "+valueToGoStringMesos(this.Hostname, "string")+",\n") + } + if this.Docker != nil { + s = append(s, "Docker: "+fmt.Sprintf("%#v", this.Docker)+",\n") + } + if this.Mesos != nil { + s = append(s, "Mesos: "+fmt.Sprintf("%#v", this.Mesos)+",\n") + } + if this.NetworkInfos != nil { + s = append(s, "NetworkInfos: "+fmt.Sprintf("%#v", this.NetworkInfos)+",\n") + } + if this.LinuxInfo != nil { + s = append(s, "LinuxInfo: "+fmt.Sprintf("%#v", this.LinuxInfo)+",\n") + } + if this.RlimitInfo != nil { + s = append(s, "RlimitInfo: "+fmt.Sprintf("%#v", this.RlimitInfo)+",\n") + } + if this.TTYInfo != nil { + s = append(s, "TTYInfo: "+fmt.Sprintf("%#v", this.TTYInfo)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ContainerInfo_DockerInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&mesos.ContainerInfo_DockerInfo{") + s = append(s, "Image: "+fmt.Sprintf("%#v", this.Image)+",\n") + if this.Network != nil { + s = append(s, "Network: "+valueToGoStringMesos(this.Network, "ContainerInfo_DockerInfo_Network")+",\n") + } + if this.PortMappings != nil { + s = append(s, "PortMappings: "+fmt.Sprintf("%#v", this.PortMappings)+",\n") + } + if this.Privileged != nil { + s = append(s, "Privileged: "+valueToGoStringMesos(this.Privileged, "bool")+",\n") + } + if this.Parameters != nil { + s = append(s, "Parameters: "+fmt.Sprintf("%#v", this.Parameters)+",\n") + } + if this.ForcePullImage != nil { + s = append(s, "ForcePullImage: "+valueToGoStringMesos(this.ForcePullImage, "bool")+",\n") + } + if this.VolumeDriver != nil { + s = append(s, "VolumeDriver: "+valueToGoStringMesos(this.VolumeDriver, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ContainerInfo_DockerInfo_PortMapping) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.ContainerInfo_DockerInfo_PortMapping{") + s = append(s, "HostPort: "+fmt.Sprintf("%#v", this.HostPort)+",\n") + s = append(s, "ContainerPort: "+fmt.Sprintf("%#v", this.ContainerPort)+",\n") + if this.Protocol != nil { + s = append(s, "Protocol: "+valueToGoStringMesos(this.Protocol, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ContainerInfo_MesosInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.ContainerInfo_MesosInfo{") + if this.Image != nil { + s = append(s, "Image: "+fmt.Sprintf("%#v", this.Image)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ContainerStatus) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&mesos.ContainerStatus{") + if this.ContainerID != nil { + s = append(s, "ContainerID: "+fmt.Sprintf("%#v", this.ContainerID)+",\n") + } + if this.NetworkInfos != nil { + s = append(s, "NetworkInfos: "+fmt.Sprintf("%#v", this.NetworkInfos)+",\n") + } + if this.CgroupInfo != nil { + s = append(s, "CgroupInfo: "+fmt.Sprintf("%#v", this.CgroupInfo)+",\n") + } + if this.ExecutorPID != nil { + s = append(s, "ExecutorPID: "+valueToGoStringMesos(this.ExecutorPID, "uint32")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CgroupInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.CgroupInfo{") + if this.NetCLS != nil { + s = append(s, "NetCLS: "+fmt.Sprintf("%#v", this.NetCLS)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CgroupInfo_Blkio) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&mesos.CgroupInfo_Blkio{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CgroupInfo_Blkio_Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.CgroupInfo_Blkio_Value{") + if this.Op != nil { + s = append(s, "Op: "+valueToGoStringMesos(this.Op, "CgroupInfo_Blkio_Operation")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+valueToGoStringMesos(this.Value, "uint64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CgroupInfo_Blkio_CFQ) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&mesos.CgroupInfo_Blkio_CFQ{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CgroupInfo_Blkio_CFQ_Statistics) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 13) + s = append(s, "&mesos.CgroupInfo_Blkio_CFQ_Statistics{") + if this.Device != nil { + s = append(s, "Device: "+fmt.Sprintf("%#v", this.Device)+",\n") + } + if this.Sectors != nil { + s = append(s, "Sectors: "+valueToGoStringMesos(this.Sectors, "uint64")+",\n") + } + if this.Time != nil { + s = append(s, "Time: "+valueToGoStringMesos(this.Time, "uint64")+",\n") + } + if this.IOServiced != nil { + s = append(s, "IOServiced: "+fmt.Sprintf("%#v", this.IOServiced)+",\n") + } + if this.IOServiceBytes != nil { + s = append(s, "IOServiceBytes: "+fmt.Sprintf("%#v", this.IOServiceBytes)+",\n") + } + if this.IOServiceTime != nil { + s = append(s, "IOServiceTime: "+fmt.Sprintf("%#v", this.IOServiceTime)+",\n") + } + if this.IOWaitTime != nil { + s = append(s, "IOWaitTime: "+fmt.Sprintf("%#v", this.IOWaitTime)+",\n") + } + if this.IOMerged != nil { + s = append(s, "IOMerged: "+fmt.Sprintf("%#v", this.IOMerged)+",\n") + } + if this.IOQueued != nil { + s = append(s, "IOQueued: "+fmt.Sprintf("%#v", this.IOQueued)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CgroupInfo_Blkio_Throttling) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&mesos.CgroupInfo_Blkio_Throttling{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CgroupInfo_Blkio_Throttling_Statistics) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.CgroupInfo_Blkio_Throttling_Statistics{") + if this.Device != nil { + s = append(s, "Device: "+fmt.Sprintf("%#v", this.Device)+",\n") + } + if this.IOServiced != nil { + s = append(s, "IOServiced: "+fmt.Sprintf("%#v", this.IOServiced)+",\n") + } + if this.IOServiceBytes != nil { + s = append(s, "IOServiceBytes: "+fmt.Sprintf("%#v", this.IOServiceBytes)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CgroupInfo_Blkio_Statistics) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.CgroupInfo_Blkio_Statistics{") + if this.CFQ != nil { + s = append(s, "CFQ: "+fmt.Sprintf("%#v", this.CFQ)+",\n") + } + if this.CFQRecursive != nil { + s = append(s, "CFQRecursive: "+fmt.Sprintf("%#v", this.CFQRecursive)+",\n") + } + if this.Throttling != nil { + s = append(s, "Throttling: "+fmt.Sprintf("%#v", this.Throttling)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CgroupInfo_NetCls) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.CgroupInfo_NetCls{") + if this.ClassID != nil { + s = append(s, "ClassID: "+valueToGoStringMesos(this.ClassID, "uint32")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Labels) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Labels{") + if this.Labels != nil { + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Label) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Label{") + s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") + if this.Value != nil { + s = append(s, "Value: "+valueToGoStringMesos(this.Value, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Port) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&mesos.Port{") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringMesos(this.Name, "string")+",\n") + } + if this.Protocol != nil { + s = append(s, "Protocol: "+valueToGoStringMesos(this.Protocol, "string")+",\n") + } + if this.Visibility != nil { + s = append(s, "Visibility: "+valueToGoStringMesos(this.Visibility, "DiscoveryInfo_Visibility")+",\n") + } + if this.Labels != nil { + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Ports) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.Ports{") + if this.Ports != nil { + s = append(s, "Ports: "+fmt.Sprintf("%#v", this.Ports)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DiscoveryInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&mesos.DiscoveryInfo{") + s = append(s, "Visibility: "+fmt.Sprintf("%#v", this.Visibility)+",\n") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringMesos(this.Name, "string")+",\n") + } + if this.Environment != nil { + s = append(s, "Environment: "+valueToGoStringMesos(this.Environment, "string")+",\n") + } + if this.Location != nil { + s = append(s, "Location: "+valueToGoStringMesos(this.Location, "string")+",\n") + } + if this.Version != nil { + s = append(s, "Version: "+valueToGoStringMesos(this.Version, "string")+",\n") + } + if this.Ports != nil { + s = append(s, "Ports: "+fmt.Sprintf("%#v", this.Ports)+",\n") + } + if this.Labels != nil { + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *WeightInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.WeightInfo{") + s = append(s, "Weight: "+fmt.Sprintf("%#v", this.Weight)+",\n") + if this.Role != nil { + s = append(s, "Role: "+valueToGoStringMesos(this.Role, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *VersionInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&mesos.VersionInfo{") + s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n") + if this.BuildDate != nil { + s = append(s, "BuildDate: "+valueToGoStringMesos(this.BuildDate, "string")+",\n") + } + if this.BuildTime != nil { + s = append(s, "BuildTime: "+valueToGoStringMesos(this.BuildTime, "float64")+",\n") + } + if this.BuildUser != nil { + s = append(s, "BuildUser: "+valueToGoStringMesos(this.BuildUser, "string")+",\n") + } + if this.GitSHA != nil { + s = append(s, "GitSHA: "+valueToGoStringMesos(this.GitSHA, "string")+",\n") + } + if this.GitBranch != nil { + s = append(s, "GitBranch: "+valueToGoStringMesos(this.GitBranch, "string")+",\n") + } + if this.GitTag != nil { + s = append(s, "GitTag: "+valueToGoStringMesos(this.GitTag, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Flag) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Flag{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Value != nil { + s = append(s, "Value: "+valueToGoStringMesos(this.Value, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Role) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&mesos.Role{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Weight: "+fmt.Sprintf("%#v", this.Weight)+",\n") + if this.Frameworks != nil { + s = append(s, "Frameworks: "+fmt.Sprintf("%#v", this.Frameworks)+",\n") + } + if this.Resources != nil { + s = append(s, "Resources: "+fmt.Sprintf("%#v", this.Resources)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Metric) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Metric{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Value != nil { + s = append(s, "Value: "+valueToGoStringMesos(this.Value, "float64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&mesos.FileInfo{") + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + if this.Nlink != nil { + s = append(s, "Nlink: "+valueToGoStringMesos(this.Nlink, "int32")+",\n") + } + if this.Size != nil { + s = append(s, "Size: "+valueToGoStringMesos(this.Size, "uint64")+",\n") + } + if this.Mtime != nil { + s = append(s, "Mtime: "+fmt.Sprintf("%#v", this.Mtime)+",\n") + } + if this.Mode != nil { + s = append(s, "Mode: "+valueToGoStringMesos(this.Mode, "uint32")+",\n") + } + if this.UID != nil { + s = append(s, "UID: "+valueToGoStringMesos(this.UID, "string")+",\n") + } + if this.GID != nil { + s = append(s, "GID: "+valueToGoStringMesos(this.GID, "string")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Device) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Device{") + if this.Path != nil { + s = append(s, "Path: "+valueToGoStringMesos(this.Path, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Device_Number) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.Device_Number{") + if this.MajorNumber != nil { + s = append(s, "MajorNumber: "+valueToGoStringMesos(this.MajorNumber, "uint64")+",\n") + } + if this.MinorNumber != nil { + s = append(s, "MinorNumber: "+valueToGoStringMesos(this.MinorNumber, "uint64")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DeviceAccess) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&mesos.DeviceAccess{") + s = append(s, "Device: "+strings.Replace(this.Device.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Access: "+strings.Replace(this.Access.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DeviceAccess_Access) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&mesos.DeviceAccess_Access{") + if this.Read != nil { + s = append(s, "Read: "+valueToGoStringMesos(this.Read, "bool")+",\n") + } + if this.Write != nil { + s = append(s, "Write: "+valueToGoStringMesos(this.Write, "bool")+",\n") + } + if this.Mknod != nil { + s = append(s, "Mknod: "+valueToGoStringMesos(this.Mknod, "bool")+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DeviceWhitelist) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&mesos.DeviceWhitelist{") + if this.AllowedDevices != nil { + s = append(s, "AllowedDevices: "+fmt.Sprintf("%#v", this.AllowedDevices)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringMesos(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *FrameworkID) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FrameworkID) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + return i, nil +} + +func (m *OfferID) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OfferID) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + return i, nil +} + +func (m *AgentID) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AgentID) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + return i, nil +} + +func (m *TaskID) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskID) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + return i, nil +} + +func (m *ExecutorID) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecutorID) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + return i, nil +} + +func (m *ContainerID) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerID) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + if m.Parent != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Parent.ProtoSize())) + n1, err := m.Parent.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *ResourceProviderID) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceProviderID) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + return i, nil +} + +func (m *OperationID) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OperationID) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + return i, nil +} + +func (m *TimeInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Nanoseconds)) + return i, nil +} + +func (m *DurationInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DurationInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Nanoseconds)) + return i, nil +} + +func (m *Address) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Address) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Hostname != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Hostname))) + i += copy(dAtA[i:], *m.Hostname) + } + if m.IP != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.IP))) + i += copy(dAtA[i:], *m.IP) + } + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Port)) + return i, nil +} + +func (m *URL) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *URL) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Scheme))) + i += copy(dAtA[i:], m.Scheme) + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Address.ProtoSize())) + n2, err := m.Address.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.Path != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Path))) + i += copy(dAtA[i:], *m.Path) + } + if len(m.Query) > 0 { + for _, msg := range m.Query { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Fragment != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Fragment))) + i += copy(dAtA[i:], *m.Fragment) + } + return i, nil +} + +func (m *Unavailability) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Unavailability) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Start.ProtoSize())) + n3, err := m.Start.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if m.Duration != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Duration.ProtoSize())) + n4, err := m.Duration.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *MachineID) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MachineID) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Hostname != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Hostname))) + i += copy(dAtA[i:], *m.Hostname) + } + if m.IP != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.IP))) + i += copy(dAtA[i:], *m.IP) + } + return i, nil +} + +func (m *MachineInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MachineInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ID.ProtoSize())) + n5, err := m.ID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.Mode != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Mode)) + } + if m.Unavailability != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Unavailability.ProtoSize())) + n6, err := m.Unavailability.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *FrameworkInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FrameworkInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.ID != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ID.ProtoSize())) + n7, err := m.ID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.FailoverTimeout != nil { + dAtA[i] = 0x21 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.FailoverTimeout)))) + } + if m.Checkpoint != nil { + dAtA[i] = 0x28 + i++ + if *m.Checkpoint { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Role != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Role))) + i += copy(dAtA[i:], *m.Role) + } + if m.Hostname != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Hostname))) + i += copy(dAtA[i:], *m.Hostname) + } + if m.Principal != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Principal))) + i += copy(dAtA[i:], *m.Principal) + } + if m.WebUiURL != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.WebUiURL))) + i += copy(dAtA[i:], *m.WebUiURL) + } + if len(m.Capabilities) > 0 { + for _, msg := range m.Capabilities { + dAtA[i] = 0x52 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Labels != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Labels.ProtoSize())) + n8, err := m.Labels.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x62 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *FrameworkInfo_Capability) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FrameworkInfo_Capability) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Type)) + return i, nil +} + +func (m *CheckInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Type)) + if m.Command != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Command.ProtoSize())) + n9, err := m.Command.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.HTTP != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.HTTP.ProtoSize())) + n10, err := m.HTTP.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.DelaySeconds != nil { + dAtA[i] = 0x21 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.DelaySeconds)))) + } + if m.IntervalSeconds != nil { + dAtA[i] = 0x29 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.IntervalSeconds)))) + } + if m.TimeoutSeconds != nil { + dAtA[i] = 0x31 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.TimeoutSeconds)))) + } + if m.TCP != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.TCP.ProtoSize())) + n11, err := m.TCP.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + return i, nil +} + +func (m *CheckInfo_Command) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckInfo_Command) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Command.ProtoSize())) + n12, err := m.Command.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + return i, nil +} + +func (m *CheckInfo_Http) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckInfo_Http) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Port)) + if m.Path != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Path))) + i += copy(dAtA[i:], *m.Path) + } + return i, nil +} + +func (m *CheckInfo_Tcp) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckInfo_Tcp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Port)) + return i, nil +} + +func (m *HealthCheck) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HTTP != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.HTTP.ProtoSize())) + n13, err := m.HTTP.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.DelaySeconds != nil { + dAtA[i] = 0x11 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.DelaySeconds)))) + } + if m.IntervalSeconds != nil { + dAtA[i] = 0x19 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.IntervalSeconds)))) + } + if m.TimeoutSeconds != nil { + dAtA[i] = 0x21 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.TimeoutSeconds)))) + } + if m.ConsecutiveFailures != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.ConsecutiveFailures)) + } + if m.GracePeriodSeconds != nil { + dAtA[i] = 0x31 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.GracePeriodSeconds)))) + } + if m.Command != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Command.ProtoSize())) + n14, err := m.Command.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + dAtA[i] = 0x40 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Type)) + if m.TCP != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.TCP.ProtoSize())) + n15, err := m.TCP.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + return i, nil +} + +func (m *HealthCheck_HTTPCheckInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_HTTPCheckInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Port)) + if m.Path != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Path))) + i += copy(dAtA[i:], *m.Path) + } + if m.Scheme != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Scheme))) + i += copy(dAtA[i:], *m.Scheme) + } + if len(m.Statuses) > 0 { + for _, num := range m.Statuses { + dAtA[i] = 0x20 + i++ + i = encodeVarintMesos(dAtA, i, uint64(num)) + } + } + if m.Protocol != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Protocol)) + } + return i, nil +} + +func (m *HealthCheck_TCPCheckInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_TCPCheckInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Port)) + if m.Protocol != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Protocol)) + } + return i, nil +} + +func (m *KillPolicy) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KillPolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GracePeriod != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.GracePeriod.ProtoSize())) + n16, err := m.GracePeriod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + return i, nil +} + +func (m *CommandInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommandInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.URIs) > 0 { + for _, msg := range m.URIs { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Environment != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Environment.ProtoSize())) + n17, err := m.Environment.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.Value != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Value))) + i += copy(dAtA[i:], *m.Value) + } + if m.User != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.User))) + i += copy(dAtA[i:], *m.User) + } + if m.Shell != nil { + dAtA[i] = 0x30 + i++ + if *m.Shell { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Arguments) > 0 { + for _, s := range m.Arguments { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *CommandInfo_URI) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommandInfo_URI) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + if m.Executable != nil { + dAtA[i] = 0x10 + i++ + if *m.Executable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Extract != nil { + dAtA[i] = 0x18 + i++ + if *m.Extract { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Cache != nil { + dAtA[i] = 0x20 + i++ + if *m.Cache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.OutputFile != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.OutputFile))) + i += copy(dAtA[i:], *m.OutputFile) + } + return i, nil +} + +func (m *ExecutorInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecutorInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ExecutorID.ProtoSize())) + n18, err := m.ExecutorID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + if m.Data != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Command != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Command.ProtoSize())) + n19, err := m.Command.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.FrameworkID != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.FrameworkID.ProtoSize())) + n20, err := m.FrameworkID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.Name != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Name))) + i += copy(dAtA[i:], *m.Name) + } + if m.Source != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Source))) + i += copy(dAtA[i:], *m.Source) + } + if m.Container != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Container.ProtoSize())) + n21, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.Discovery != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Discovery.ProtoSize())) + n22, err := m.Discovery.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if m.ShutdownGracePeriod != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ShutdownGracePeriod.ProtoSize())) + n23, err := m.ShutdownGracePeriod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.Labels != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Labels.ProtoSize())) + n24, err := m.Labels.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + dAtA[i] = 0x78 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Type)) + return i, nil +} + +func (m *DomainInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DomainInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.FaultDomain != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.FaultDomain.ProtoSize())) + n25, err := m.FaultDomain.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + return i, nil +} + +func (m *DomainInfo_FaultDomain) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DomainInfo_FaultDomain) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Region.ProtoSize())) + n26, err := m.Region.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Zone.ProtoSize())) + n27, err := m.Zone.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + return i, nil +} + +func (m *DomainInfo_FaultDomain_RegionInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DomainInfo_FaultDomain_RegionInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} + +func (m *DomainInfo_FaultDomain_ZoneInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DomainInfo_FaultDomain_ZoneInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} + +func (m *MasterInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MasterInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.IP)) + if m.Port == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("port") + } else { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Port)) + } + if m.PID != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.PID))) + i += copy(dAtA[i:], *m.PID) + } + if m.Hostname != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Hostname))) + i += copy(dAtA[i:], *m.Hostname) + } + if m.Version != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Version))) + i += copy(dAtA[i:], *m.Version) + } + if m.Address != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Address.ProtoSize())) + n28, err := m.Address.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if m.Domain != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Domain.ProtoSize())) + n29, err := m.Domain.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + if len(m.Capabilities) > 0 { + for _, msg := range m.Capabilities { + dAtA[i] = 0x4a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MasterInfo_Capability) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MasterInfo_Capability) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Type)) + return i, nil +} + +func (m *AgentInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AgentInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Attributes) > 0 { + for _, msg := range m.Attributes { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.ID != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ID.ProtoSize())) + n30, err := m.ID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if m.Port != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Port)) + } + if m.Domain != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Domain.ProtoSize())) + n31, err := m.Domain.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + return i, nil +} + +func (m *AgentInfo_Capability) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AgentInfo_Capability) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Type)) + return i, nil +} + +func (m *CSIPluginContainerInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIPluginContainerInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Services) > 0 { + for _, num := range m.Services { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(num)) + } + } + if m.Command != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Command.ProtoSize())) + n32, err := m.Command.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Container != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Container.ProtoSize())) + n33, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + } + return i, nil +} + +func (m *CSIPluginInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIPluginInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if len(m.Containers) > 0 { + for _, msg := range m.Containers { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceProviderInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceProviderInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ID.ProtoSize())) + n34, err := m.ID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if len(m.Attributes) > 0 { + for _, msg := range m.Attributes { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if len(m.DefaultReservations) > 0 { + for _, msg := range m.DefaultReservations { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Storage != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Storage.ProtoSize())) + n35, err := m.Storage.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + return i, nil +} + +func (m *ResourceProviderInfo_Storage) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceProviderInfo_Storage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Plugin.ProtoSize())) + n36, err := m.Plugin.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + return i, nil +} + +func (m *Value) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Type)) + if m.Scalar != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Scalar.ProtoSize())) + n37, err := m.Scalar.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + } + if m.Ranges != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Ranges.ProtoSize())) + n38, err := m.Ranges.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + if m.Set != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Set.ProtoSize())) + n39, err := m.Set.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + if m.Text != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Text.ProtoSize())) + n40, err := m.Text.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + } + return i, nil +} + +func (m *Value_Scalar) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value_Scalar) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x9 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(m.Value)))) + return i, nil +} + +func (m *Value_Range) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value_Range) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Begin)) + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.End)) + return i, nil +} + +func (m *Value_Ranges) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value_Ranges) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Range) > 0 { + for _, msg := range m.Range { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Value_Set) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value_Set) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Item) > 0 { + for _, s := range m.Item { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *Value_Text) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value_Text) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + return i, nil +} + +func (m *Attribute) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Attribute) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Type)) + if m.Scalar != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Scalar.ProtoSize())) + n41, err := m.Scalar.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + if m.Ranges != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Ranges.ProtoSize())) + n42, err := m.Ranges.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + if m.Text != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Text.ProtoSize())) + n43, err := m.Text.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + } + if m.Set != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Set.ProtoSize())) + n44, err := m.Set.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + return i, nil +} + +func (m *Resource) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Type == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") + } else { + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Type)) + } + if m.Scalar != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Scalar.ProtoSize())) + n45, err := m.Scalar.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + } + if m.Ranges != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Ranges.ProtoSize())) + n46, err := m.Ranges.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + } + if m.Set != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Set.ProtoSize())) + n47, err := m.Set.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + } + if m.Role != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Role))) + i += copy(dAtA[i:], *m.Role) + } + if m.Disk != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Disk.ProtoSize())) + n48, err := m.Disk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + } + if m.Reservation != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Reservation.ProtoSize())) + n49, err := m.Reservation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + } + if m.Revocable != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Revocable.ProtoSize())) + n50, err := m.Revocable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + } + if m.Shared != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Shared.ProtoSize())) + n51, err := m.Shared.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 + } + if m.AllocationInfo != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.AllocationInfo.ProtoSize())) + n52, err := m.AllocationInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + } + if m.ProviderID != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ProviderID.ProtoSize())) + n53, err := m.ProviderID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 + } + if len(m.Reservations) > 0 { + for _, msg := range m.Reservations { + dAtA[i] = 0x6a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Resource_AllocationInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource_AllocationInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Role != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Role))) + i += copy(dAtA[i:], *m.Role) + } + return i, nil +} + +func (m *Resource_ReservationInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource_ReservationInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Principal != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Principal))) + i += copy(dAtA[i:], *m.Principal) + } + if m.Labels != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Labels.ProtoSize())) + n54, err := m.Labels.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n54 + } + if m.Role != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Role))) + i += copy(dAtA[i:], *m.Role) + } + if m.Type != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Type)) + } + return i, nil +} + +func (m *Resource_DiskInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource_DiskInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Persistence != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Persistence.ProtoSize())) + n55, err := m.Persistence.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n55 + } + if m.Volume != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Volume.ProtoSize())) + n56, err := m.Volume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n56 + } + if m.Source != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Source.ProtoSize())) + n57, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n57 + } + return i, nil +} + +func (m *Resource_DiskInfo_Persistence) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource_DiskInfo_Persistence) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + if m.Principal != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Principal))) + i += copy(dAtA[i:], *m.Principal) + } + return i, nil +} + +func (m *Resource_DiskInfo_Source) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource_DiskInfo_Source) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Type)) + if m.Path != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Path.ProtoSize())) + n58, err := m.Path.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 + } + if m.Mount != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Mount.ProtoSize())) + n59, err := m.Mount.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n59 + } + if m.ID != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.ID))) + i += copy(dAtA[i:], *m.ID) + } + if m.Metadata != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Metadata.ProtoSize())) + n60, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n60 + } + if m.Profile != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Profile))) + i += copy(dAtA[i:], *m.Profile) + } + return i, nil +} + +func (m *Resource_DiskInfo_Source_Path) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource_DiskInfo_Source_Path) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Root != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Root))) + i += copy(dAtA[i:], *m.Root) + } + return i, nil +} + +func (m *Resource_DiskInfo_Source_Mount) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource_DiskInfo_Source_Mount) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Root != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Root))) + i += copy(dAtA[i:], *m.Root) + } + return i, nil +} + +func (m *Resource_RevocableInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource_RevocableInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *Resource_SharedInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource_SharedInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *TrafficControlStatistics) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TrafficControlStatistics) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + if m.Backlog != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Backlog)) + } + if m.Bytes != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Bytes)) + } + if m.Drops != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Drops)) + } + if m.Overlimits != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Overlimits)) + } + if m.Packets != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Packets)) + } + if m.Qlen != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Qlen)) + } + if m.RateBPS != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.RateBPS)) + } + if m.RatePPS != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.RatePPS)) + } + if m.Requeues != nil { + dAtA[i] = 0x50 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Requeues)) + } + return i, nil +} + +func (m *IpStatistics) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IpStatistics) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Forwarding != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Forwarding)) + } + if m.DefaultTTL != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.DefaultTTL)) + } + if m.InReceives != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InReceives)) + } + if m.InHdrErrors != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InHdrErrors)) + } + if m.InAddrErrors != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InAddrErrors)) + } + if m.ForwDatagrams != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.ForwDatagrams)) + } + if m.InUnknownProtos != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InUnknownProtos)) + } + if m.InDiscards != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InDiscards)) + } + if m.InDelivers != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InDelivers)) + } + if m.OutRequests != nil { + dAtA[i] = 0x50 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutRequests)) + } + if m.OutDiscards != nil { + dAtA[i] = 0x58 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutDiscards)) + } + if m.OutNoRoutes != nil { + dAtA[i] = 0x60 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutNoRoutes)) + } + if m.ReasmTimeout != nil { + dAtA[i] = 0x68 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.ReasmTimeout)) + } + if m.ReasmReqds != nil { + dAtA[i] = 0x70 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.ReasmReqds)) + } + if m.ReasmOKs != nil { + dAtA[i] = 0x78 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.ReasmOKs)) + } + if m.ReasmFails != nil { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.ReasmFails)) + } + if m.FragOKs != nil { + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.FragOKs)) + } + if m.FragFails != nil { + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.FragFails)) + } + if m.FragCreates != nil { + dAtA[i] = 0x98 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.FragCreates)) + } + return i, nil +} + +func (m *IcmpStatistics) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IcmpStatistics) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.InMsgs != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InMsgs)) + } + if m.InErrors != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InErrors)) + } + if m.InCsumErrors != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InCsumErrors)) + } + if m.InDestUnreachs != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InDestUnreachs)) + } + if m.InTimeExcds != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InTimeExcds)) + } + if m.InParmProbs != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InParmProbs)) + } + if m.InSrcQuenchs != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InSrcQuenchs)) + } + if m.InRedirects != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InRedirects)) + } + if m.InEchos != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InEchos)) + } + if m.InEchoReps != nil { + dAtA[i] = 0x50 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InEchoReps)) + } + if m.InTimestamps != nil { + dAtA[i] = 0x58 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InTimestamps)) + } + if m.InTimestampReps != nil { + dAtA[i] = 0x60 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InTimestampReps)) + } + if m.InAddrMasks != nil { + dAtA[i] = 0x68 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InAddrMasks)) + } + if m.InAddrMaskReps != nil { + dAtA[i] = 0x70 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InAddrMaskReps)) + } + if m.OutMsgs != nil { + dAtA[i] = 0x78 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutMsgs)) + } + if m.OutErrors != nil { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutErrors)) + } + if m.OutDestUnreachs != nil { + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutDestUnreachs)) + } + if m.OutTimeExcds != nil { + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutTimeExcds)) + } + if m.OutParmProbs != nil { + dAtA[i] = 0x98 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutParmProbs)) + } + if m.OutSrcQuenchs != nil { + dAtA[i] = 0xa0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutSrcQuenchs)) + } + if m.OutRedirects != nil { + dAtA[i] = 0xa8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutRedirects)) + } + if m.OutEchos != nil { + dAtA[i] = 0xb0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutEchos)) + } + if m.OutEchoReps != nil { + dAtA[i] = 0xb8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutEchoReps)) + } + if m.OutTimestamps != nil { + dAtA[i] = 0xc0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutTimestamps)) + } + if m.OutTimestampReps != nil { + dAtA[i] = 0xc8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutTimestampReps)) + } + if m.OutAddrMasks != nil { + dAtA[i] = 0xd0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutAddrMasks)) + } + if m.OutAddrMaskReps != nil { + dAtA[i] = 0xd8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutAddrMaskReps)) + } + return i, nil +} + +func (m *TcpStatistics) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TcpStatistics) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RtoAlgorithm != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.RtoAlgorithm)) + } + if m.RtoMin != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.RtoMin)) + } + if m.RtoMax != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.RtoMax)) + } + if m.MaxConn != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MaxConn)) + } + if m.ActiveOpens != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.ActiveOpens)) + } + if m.PassiveOpens != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.PassiveOpens)) + } + if m.AttemptFails != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.AttemptFails)) + } + if m.EstabResets != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.EstabResets)) + } + if m.CurrEstab != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.CurrEstab)) + } + if m.InSegs != nil { + dAtA[i] = 0x50 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InSegs)) + } + if m.OutSegs != nil { + dAtA[i] = 0x58 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutSegs)) + } + if m.RetransSegs != nil { + dAtA[i] = 0x60 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.RetransSegs)) + } + if m.InErrs != nil { + dAtA[i] = 0x68 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InErrs)) + } + if m.OutRsts != nil { + dAtA[i] = 0x70 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutRsts)) + } + if m.InCsumErrors != nil { + dAtA[i] = 0x78 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InCsumErrors)) + } + return i, nil +} + +func (m *UdpStatistics) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UdpStatistics) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.InDatagrams != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InDatagrams)) + } + if m.NoPorts != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.NoPorts)) + } + if m.InErrors != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InErrors)) + } + if m.OutDatagrams != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.OutDatagrams)) + } + if m.RcvbufErrors != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.RcvbufErrors)) + } + if m.SndbufErrors != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.SndbufErrors)) + } + if m.InCsumErrors != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.InCsumErrors)) + } + if m.IgnoredMulti != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.IgnoredMulti)) + } + return i, nil +} + +func (m *SNMPStatistics) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SNMPStatistics) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.IPStats != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.IPStats.ProtoSize())) + n61, err := m.IPStats.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n61 + } + if m.ICMPStats != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ICMPStats.ProtoSize())) + n62, err := m.ICMPStats.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n62 + } + if m.TCPStats != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.TCPStats.ProtoSize())) + n63, err := m.TCPStats.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n63 + } + if m.UDPStats != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.UDPStats.ProtoSize())) + n64, err := m.UDPStats.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n64 + } + return i, nil +} + +func (m *DiskStatistics) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiskStatistics) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Source != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Source.ProtoSize())) + n65, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n65 + } + if m.Persistence != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Persistence.ProtoSize())) + n66, err := m.Persistence.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n66 + } + if m.LimitBytes != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.LimitBytes)) + } + if m.UsedBytes != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.UsedBytes)) + } + return i, nil +} + +func (m *ResourceStatistics) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceStatistics) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x9 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(m.Timestamp)))) + if m.CPUsUserTimeSecs != nil { + dAtA[i] = 0x11 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.CPUsUserTimeSecs)))) + } + if m.CPUsSystemTimeSecs != nil { + dAtA[i] = 0x19 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.CPUsSystemTimeSecs)))) + } + if m.CPUsLimit != nil { + dAtA[i] = 0x21 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.CPUsLimit)))) + } + if m.MemRSSBytes != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MemRSSBytes)) + } + if m.MemLimitBytes != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MemLimitBytes)) + } + if m.CPUsNrPeriods != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.CPUsNrPeriods)) + } + if m.CPUsNrThrottled != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.CPUsNrThrottled)) + } + if m.CPUsThrottledTimeSecs != nil { + dAtA[i] = 0x49 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.CPUsThrottledTimeSecs)))) + } + if m.MemFileBytes != nil { + dAtA[i] = 0x50 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MemFileBytes)) + } + if m.MemAnonBytes != nil { + dAtA[i] = 0x58 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MemAnonBytes)) + } + if m.MemMappedFileBytes != nil { + dAtA[i] = 0x60 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MemMappedFileBytes)) + } + if m.Perf != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Perf.ProtoSize())) + n67, err := m.Perf.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n67 + } + if m.NetRxPackets != nil { + dAtA[i] = 0x70 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.NetRxPackets)) + } + if m.NetRxBytes != nil { + dAtA[i] = 0x78 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.NetRxBytes)) + } + if m.NetRxErrors != nil { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.NetRxErrors)) + } + if m.NetRxDropped != nil { + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.NetRxDropped)) + } + if m.NetTxPackets != nil { + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.NetTxPackets)) + } + if m.NetTxBytes != nil { + dAtA[i] = 0x98 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.NetTxBytes)) + } + if m.NetTxErrors != nil { + dAtA[i] = 0xa0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.NetTxErrors)) + } + if m.NetTxDropped != nil { + dAtA[i] = 0xa8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.NetTxDropped)) + } + if m.NetTCPRttMicrosecsP50 != nil { + dAtA[i] = 0xb1 + i++ + dAtA[i] = 0x1 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.NetTCPRttMicrosecsP50)))) + } + if m.NetTCPRttMicrosecsP90 != nil { + dAtA[i] = 0xb9 + i++ + dAtA[i] = 0x1 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.NetTCPRttMicrosecsP90)))) + } + if m.NetTCPRttMicrosecsP95 != nil { + dAtA[i] = 0xc1 + i++ + dAtA[i] = 0x1 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.NetTCPRttMicrosecsP95)))) + } + if m.NetTCPRttMicrosecsP99 != nil { + dAtA[i] = 0xc9 + i++ + dAtA[i] = 0x1 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.NetTCPRttMicrosecsP99)))) + } + if m.DiskLimitBytes != nil { + dAtA[i] = 0xd0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.DiskLimitBytes)) + } + if m.DiskUsedBytes != nil { + dAtA[i] = 0xd8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.DiskUsedBytes)) + } + if m.NetTCPActiveConnections != nil { + dAtA[i] = 0xe1 + i++ + dAtA[i] = 0x1 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.NetTCPActiveConnections)))) + } + if m.NetTCPTimeWaitConnections != nil { + dAtA[i] = 0xe9 + i++ + dAtA[i] = 0x1 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.NetTCPTimeWaitConnections)))) + } + if m.Processes != nil { + dAtA[i] = 0xf0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Processes)) + } + if m.Threads != nil { + dAtA[i] = 0xf8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Threads)) + } + if m.MemLowPressureCounter != nil { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MemLowPressureCounter)) + } + if m.MemMediumPressureCounter != nil { + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MemMediumPressureCounter)) + } + if m.MemCriticalPressureCounter != nil { + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MemCriticalPressureCounter)) + } + if len(m.NetTrafficControlStatistics) > 0 { + for _, msg := range m.NetTrafficControlStatistics { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.MemTotalBytes != nil { + dAtA[i] = 0xa0 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MemTotalBytes)) + } + if m.MemTotalMemswBytes != nil { + dAtA[i] = 0xa8 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MemTotalMemswBytes)) + } + if m.MemSoftLimitBytes != nil { + dAtA[i] = 0xb0 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MemSoftLimitBytes)) + } + if m.MemCacheBytes != nil { + dAtA[i] = 0xb8 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MemCacheBytes)) + } + if m.MemSwapBytes != nil { + dAtA[i] = 0xc0 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MemSwapBytes)) + } + if m.MemUnevictableBytes != nil { + dAtA[i] = 0xc8 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MemUnevictableBytes)) + } + if m.NetSNMPStatistics != nil { + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.NetSNMPStatistics.ProtoSize())) + n68, err := m.NetSNMPStatistics.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n68 + } + if len(m.DiskStatistics) > 0 { + for _, msg := range m.DiskStatistics { + dAtA[i] = 0xda + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.BlkioStatistics != nil { + dAtA[i] = 0xe2 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.BlkioStatistics.ProtoSize())) + n69, err := m.BlkioStatistics.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n69 + } + return i, nil +} + +func (m *ResourceUsage) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceUsage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Executors) > 0 { + for _, msg := range m.Executors { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Total) > 0 { + for _, msg := range m.Total { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceUsage_Executor) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceUsage_Executor) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ExecutorInfo.ProtoSize())) + n70, err := m.ExecutorInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n70 + if len(m.Allocated) > 0 { + for _, msg := range m.Allocated { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Statistics != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Statistics.ProtoSize())) + n71, err := m.Statistics.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n71 + } + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ContainerID.ProtoSize())) + n72, err := m.ContainerID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n72 + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceUsage_Executor_Task) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceUsage_Executor_Task) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ID.ProtoSize())) + n73, err := m.ID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n73 + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Labels != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Labels.ProtoSize())) + n74, err := m.Labels.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n74 + } + return i, nil +} + +func (m *PerfStatistics) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PerfStatistics) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x9 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(m.Timestamp)))) + dAtA[i] = 0x11 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(m.Duration)))) + if m.Cycles != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Cycles)) + } + if m.StalledCyclesFrontend != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.StalledCyclesFrontend)) + } + if m.StalledCyclesBackend != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.StalledCyclesBackend)) + } + if m.Instructions != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Instructions)) + } + if m.CacheReferences != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.CacheReferences)) + } + if m.CacheMisses != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.CacheMisses)) + } + if m.Branches != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Branches)) + } + if m.BranchMisses != nil { + dAtA[i] = 0x50 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.BranchMisses)) + } + if m.BusCycles != nil { + dAtA[i] = 0x58 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.BusCycles)) + } + if m.RefCycles != nil { + dAtA[i] = 0x60 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.RefCycles)) + } + if m.CPUClock != nil { + dAtA[i] = 0x69 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.CPUClock)))) + } + if m.TaskClock != nil { + dAtA[i] = 0x71 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.TaskClock)))) + } + if m.PageFaults != nil { + dAtA[i] = 0x78 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.PageFaults)) + } + if m.MinorFaults != nil { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MinorFaults)) + } + if m.MajorFaults != nil { + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MajorFaults)) + } + if m.ContextSwitches != nil { + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.ContextSwitches)) + } + if m.CPUMigrations != nil { + dAtA[i] = 0x98 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.CPUMigrations)) + } + if m.AlignmentFaults != nil { + dAtA[i] = 0xa0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.AlignmentFaults)) + } + if m.EmulationFaults != nil { + dAtA[i] = 0xa8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.EmulationFaults)) + } + if m.L1DcacheLoads != nil { + dAtA[i] = 0xb0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.L1DcacheLoads)) + } + if m.L1DcacheLoadMisses != nil { + dAtA[i] = 0xb8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.L1DcacheLoadMisses)) + } + if m.L1DcacheStores != nil { + dAtA[i] = 0xc0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.L1DcacheStores)) + } + if m.L1DcacheStoreMisses != nil { + dAtA[i] = 0xc8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.L1DcacheStoreMisses)) + } + if m.L1DcachePrefetches != nil { + dAtA[i] = 0xd0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.L1DcachePrefetches)) + } + if m.L1DcachePrefetchMisses != nil { + dAtA[i] = 0xd8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.L1DcachePrefetchMisses)) + } + if m.L1IcacheLoads != nil { + dAtA[i] = 0xe0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.L1IcacheLoads)) + } + if m.L1IcacheLoadMisses != nil { + dAtA[i] = 0xe8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.L1IcacheLoadMisses)) + } + if m.L1IcachePrefetches != nil { + dAtA[i] = 0xf0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.L1IcachePrefetches)) + } + if m.L1IcachePrefetchMisses != nil { + dAtA[i] = 0xf8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.L1IcachePrefetchMisses)) + } + if m.LLCLoads != nil { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.LLCLoads)) + } + if m.LLCLoadMisses != nil { + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.LLCLoadMisses)) + } + if m.LLCStores != nil { + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.LLCStores)) + } + if m.LLCStoreMisses != nil { + dAtA[i] = 0x98 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.LLCStoreMisses)) + } + if m.LLCPrefetches != nil { + dAtA[i] = 0xa0 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.LLCPrefetches)) + } + if m.LLCPrefetchMisses != nil { + dAtA[i] = 0xa8 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.LLCPrefetchMisses)) + } + if m.DTLBLoads != nil { + dAtA[i] = 0xb0 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.DTLBLoads)) + } + if m.DTLBLoadMisses != nil { + dAtA[i] = 0xb8 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.DTLBLoadMisses)) + } + if m.DTLBStores != nil { + dAtA[i] = 0xc0 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.DTLBStores)) + } + if m.DTLBStoreMisses != nil { + dAtA[i] = 0xc8 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.DTLBStoreMisses)) + } + if m.DTLBPrefetches != nil { + dAtA[i] = 0xd0 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.DTLBPrefetches)) + } + if m.DTLBPrefetchMisses != nil { + dAtA[i] = 0xd8 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.DTLBPrefetchMisses)) + } + if m.ITLBLoads != nil { + dAtA[i] = 0xe0 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.ITLBLoads)) + } + if m.ITLBLoadMisses != nil { + dAtA[i] = 0xe8 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.ITLBLoadMisses)) + } + if m.BranchLoads != nil { + dAtA[i] = 0xf0 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.BranchLoads)) + } + if m.BranchLoadMisses != nil { + dAtA[i] = 0xf8 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.BranchLoadMisses)) + } + if m.NodeLoads != nil { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x3 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.NodeLoads)) + } + if m.NodeLoadMisses != nil { + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x3 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.NodeLoadMisses)) + } + if m.NodeStores != nil { + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x3 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.NodeStores)) + } + if m.NodeStoreMisses != nil { + dAtA[i] = 0x98 + i++ + dAtA[i] = 0x3 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.NodeStoreMisses)) + } + if m.NodePrefetches != nil { + dAtA[i] = 0xa0 + i++ + dAtA[i] = 0x3 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.NodePrefetches)) + } + if m.NodePrefetchMisses != nil { + dAtA[i] = 0xa8 + i++ + dAtA[i] = 0x3 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.NodePrefetchMisses)) + } + return i, nil +} + +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.AgentID != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.AgentID.ProtoSize())) + n75, err := m.AgentID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n75 + } + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Offer) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Offer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ID.ProtoSize())) + n76, err := m.ID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n76 + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.FrameworkID.ProtoSize())) + n77, err := m.FrameworkID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n77 + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.AgentID.ProtoSize())) + n78, err := m.AgentID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n78 + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.ExecutorIDs) > 0 { + for _, msg := range m.ExecutorIDs { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Attributes) > 0 { + for _, msg := range m.Attributes { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.URL != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.URL.ProtoSize())) + n79, err := m.URL.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n79 + } + if m.Unavailability != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Unavailability.ProtoSize())) + n80, err := m.Unavailability.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n80 + } + if m.AllocationInfo != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.AllocationInfo.ProtoSize())) + n81, err := m.AllocationInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n81 + } + if m.Domain != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Domain.ProtoSize())) + n82, err := m.Domain.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n82 + } + return i, nil +} + +func (m *Offer_Operation) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Offer_Operation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Type)) + if m.Launch != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Launch.ProtoSize())) + n83, err := m.Launch.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n83 + } + if m.Reserve != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Reserve.ProtoSize())) + n84, err := m.Reserve.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n84 + } + if m.Unreserve != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Unreserve.ProtoSize())) + n85, err := m.Unreserve.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n85 + } + if m.Create != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Create.ProtoSize())) + n86, err := m.Create.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n86 + } + if m.Destroy != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Destroy.ProtoSize())) + n87, err := m.Destroy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n87 + } + if m.LaunchGroup != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.LaunchGroup.ProtoSize())) + n88, err := m.LaunchGroup.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n88 + } + if m.ID != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ID.ProtoSize())) + n89, err := m.ID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n89 + } + if m.GrowVolume != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.GrowVolume.ProtoSize())) + n90, err := m.GrowVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n90 + } + if m.ShrinkVolume != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ShrinkVolume.ProtoSize())) + n91, err := m.ShrinkVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n91 + } + if m.CreateDisk != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.CreateDisk.ProtoSize())) + n92, err := m.CreateDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n92 + } + if m.DestroyDisk != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.DestroyDisk.ProtoSize())) + n93, err := m.DestroyDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n93 + } + return i, nil +} + +func (m *Offer_Operation_Launch) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Offer_Operation_Launch) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TaskInfos) > 0 { + for _, msg := range m.TaskInfos { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Offer_Operation_LaunchGroup) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Offer_Operation_LaunchGroup) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Executor.ProtoSize())) + n94, err := m.Executor.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n94 + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.TaskGroup.ProtoSize())) + n95, err := m.TaskGroup.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n95 + return i, nil +} + +func (m *Offer_Operation_Reserve) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Offer_Operation_Reserve) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Offer_Operation_Unreserve) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Offer_Operation_Unreserve) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Offer_Operation_Create) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Offer_Operation_Create) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Volumes) > 0 { + for _, msg := range m.Volumes { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Offer_Operation_Destroy) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Offer_Operation_Destroy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Volumes) > 0 { + for _, msg := range m.Volumes { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Offer_Operation_GrowVolume) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Offer_Operation_GrowVolume) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Volume.ProtoSize())) + n96, err := m.Volume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n96 + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Addition.ProtoSize())) + n97, err := m.Addition.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n97 + return i, nil +} + +func (m *Offer_Operation_ShrinkVolume) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Offer_Operation_ShrinkVolume) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Volume.ProtoSize())) + n98, err := m.Volume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n98 + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Subtract.ProtoSize())) + n99, err := m.Subtract.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n99 + return i, nil +} + +func (m *Offer_Operation_CreateDisk) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Offer_Operation_CreateDisk) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Source.ProtoSize())) + n100, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n100 + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.TargetType)) + return i, nil +} + +func (m *Offer_Operation_DestroyDisk) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Offer_Operation_DestroyDisk) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Source.ProtoSize())) + n101, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n101 + return i, nil +} + +func (m *InverseOffer) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InverseOffer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.OfferID.ProtoSize())) + n102, err := m.OfferID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n102 + if m.URL != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.URL.ProtoSize())) + n103, err := m.URL.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n103 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.FrameworkID.ProtoSize())) + n104, err := m.FrameworkID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n104 + if m.AgentID != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.AgentID.ProtoSize())) + n105, err := m.AgentID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n105 + } + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Unavailability.ProtoSize())) + n106, err := m.Unavailability.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n106 + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TaskInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.TaskID.ProtoSize())) + n107, err := m.TaskID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n107 + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.AgentID.ProtoSize())) + n108, err := m.AgentID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n108 + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Executor != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Executor.ProtoSize())) + n109, err := m.Executor.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n109 + } + if m.Data != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.Command != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Command.ProtoSize())) + n110, err := m.Command.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n110 + } + if m.HealthCheck != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.HealthCheck.ProtoSize())) + n111, err := m.HealthCheck.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n111 + } + if m.Container != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Container.ProtoSize())) + n112, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n112 + } + if m.Labels != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Labels.ProtoSize())) + n113, err := m.Labels.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n113 + } + if m.Discovery != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Discovery.ProtoSize())) + n114, err := m.Discovery.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n114 + } + if m.KillPolicy != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.KillPolicy.ProtoSize())) + n115, err := m.KillPolicy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n115 + } + if m.Check != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Check.ProtoSize())) + n116, err := m.Check.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n116 + } + if m.MaxCompletionTime != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.MaxCompletionTime.ProtoSize())) + n117, err := m.MaxCompletionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n117 + } + return i, nil +} + +func (m *TaskGroupInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskGroupInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Task) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Task) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.TaskID.ProtoSize())) + n118, err := m.TaskID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n118 + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.FrameworkID.ProtoSize())) + n119, err := m.FrameworkID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n119 + if m.ExecutorID != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ExecutorID.ProtoSize())) + n120, err := m.ExecutorID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n120 + } + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.AgentID.ProtoSize())) + n121, err := m.AgentID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n121 + if m.State == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("state") + } else { + dAtA[i] = 0x30 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.State)) + } + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Statuses) > 0 { + for _, msg := range m.Statuses { + dAtA[i] = 0x42 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.StatusUpdateState != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.StatusUpdateState)) + } + if m.StatusUpdateUUID != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.StatusUpdateUUID))) + i += copy(dAtA[i:], m.StatusUpdateUUID) + } + if m.Labels != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Labels.ProtoSize())) + n122, err := m.Labels.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n122 + } + if m.Discovery != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Discovery.ProtoSize())) + n123, err := m.Discovery.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n123 + } + if m.Container != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Container.ProtoSize())) + n124, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n124 + } + if m.User != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.User))) + i += copy(dAtA[i:], *m.User) + } + return i, nil +} + +func (m *TaskResourceLimitation) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskResourceLimitation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *UUID) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UUID) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } else { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *Operation) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Operation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.FrameworkID != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.FrameworkID.ProtoSize())) + n125, err := m.FrameworkID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n125 + } + if m.AgentID != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.AgentID.ProtoSize())) + n126, err := m.AgentID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n126 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Info.ProtoSize())) + n127, err := m.Info.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n127 + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.LatestStatus.ProtoSize())) + n128, err := m.LatestStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n128 + if len(m.Statuses) > 0 { + for _, msg := range m.Statuses { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.UUID.ProtoSize())) + n129, err := m.UUID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n129 + return i, nil +} + +func (m *OperationStatus) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OperationStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.OperationID != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.OperationID.ProtoSize())) + n130, err := m.OperationID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n130 + } + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.State)) + if m.Message != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Message))) + i += copy(dAtA[i:], *m.Message) + } + if len(m.ConvertedResources) > 0 { + for _, msg := range m.ConvertedResources { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.UUID != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.UUID.ProtoSize())) + n131, err := m.UUID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n131 + } + return i, nil +} + +func (m *CheckStatusInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckStatusInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Type)) + } + if m.Command != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Command.ProtoSize())) + n132, err := m.Command.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n132 + } + if m.HTTP != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.HTTP.ProtoSize())) + n133, err := m.HTTP.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n133 + } + if m.TCP != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.TCP.ProtoSize())) + n134, err := m.TCP.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n134 + } + return i, nil +} + +func (m *CheckStatusInfo_Command) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckStatusInfo_Command) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ExitCode != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.ExitCode)) + } + return i, nil +} + +func (m *CheckStatusInfo_Http) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckStatusInfo_Http) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.StatusCode != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.StatusCode)) + } + return i, nil +} + +func (m *CheckStatusInfo_Tcp) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckStatusInfo_Tcp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Succeeded != nil { + dAtA[i] = 0x8 + i++ + if *m.Succeeded { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *TaskStatus) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.TaskID.ProtoSize())) + n135, err := m.TaskID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n135 + if m.State == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("state") + } else { + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.State)) + } + if m.Data != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.Message != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Message))) + i += copy(dAtA[i:], *m.Message) + } + if m.AgentID != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.AgentID.ProtoSize())) + n136, err := m.AgentID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n136 + } + if m.Timestamp != nil { + dAtA[i] = 0x31 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.Timestamp)))) + } + if m.ExecutorID != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ExecutorID.ProtoSize())) + n137, err := m.ExecutorID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n137 + } + if m.Healthy != nil { + dAtA[i] = 0x40 + i++ + if *m.Healthy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Source != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Source)) + } + if m.Reason != nil { + dAtA[i] = 0x50 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Reason)) + } + if m.UUID != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.UUID))) + i += copy(dAtA[i:], m.UUID) + } + if m.Labels != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Labels.ProtoSize())) + n138, err := m.Labels.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n138 + } + if m.ContainerStatus != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ContainerStatus.ProtoSize())) + n139, err := m.ContainerStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n139 + } + if m.UnreachableTime != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.UnreachableTime.ProtoSize())) + n140, err := m.UnreachableTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n140 + } + if m.CheckStatus != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.CheckStatus.ProtoSize())) + n141, err := m.CheckStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n141 + } + if m.Limitation != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Limitation.ProtoSize())) + n142, err := m.Limitation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n142 + } + return i, nil +} + +func (m *Filters) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RefuseSeconds != nil { + dAtA[i] = 0x9 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.RefuseSeconds)))) + } + return i, nil +} + +func (m *Environment) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Environment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Variables) > 0 { + for _, msg := range m.Variables { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Environment_Variable) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Environment_Variable) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Value != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Value))) + i += copy(dAtA[i:], *m.Value) + } + if m.Type != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Type)) + } + if m.Secret != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Secret.ProtoSize())) + n143, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n143 + } + return i, nil +} + +func (m *Parameter) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Parameter) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + return i, nil +} + +func (m *Parameters) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Parameters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Parameter) > 0 { + for _, msg := range m.Parameter { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Credential) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Credential) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Principal))) + i += copy(dAtA[i:], m.Principal) + if m.Secret != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Secret))) + i += copy(dAtA[i:], *m.Secret) + } + return i, nil +} + +func (m *Credentials) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Credentials) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Credentials) > 0 { + for _, msg := range m.Credentials { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Secret) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Secret) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Type)) + if m.Reference != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Reference.ProtoSize())) + n144, err := m.Reference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n144 + } + if m.Value != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Value.ProtoSize())) + n145, err := m.Value.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n145 + } + return i, nil +} + +func (m *Secret_Reference) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Secret_Reference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Key != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Key))) + i += copy(dAtA[i:], *m.Key) + } + return i, nil +} + +func (m *Secret_Value) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Secret_Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Data == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("data") + } else { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func (m *RateLimit) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.QPS != nil { + dAtA[i] = 0x9 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.QPS)))) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Principal))) + i += copy(dAtA[i:], m.Principal) + if m.Capacity != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Capacity)) + } + return i, nil +} + +func (m *RateLimits) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimits) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Limits) > 0 { + for _, msg := range m.Limits { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.AggregateDefaultQPS != nil { + dAtA[i] = 0x11 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.AggregateDefaultQPS)))) + } + if m.AggregateDefaultCapacity != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.AggregateDefaultCapacity)) + } + return i, nil +} + +func (m *Image) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Image) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") + } else { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Type)) + } + if m.Appc != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Appc.ProtoSize())) + n146, err := m.Appc.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n146 + } + if m.Docker != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Docker.ProtoSize())) + n147, err := m.Docker.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n147 + } + if m.Cached != nil { + dAtA[i] = 0x20 + i++ + if *m.Cached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Image_Appc) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Image_Appc) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.ID != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.ID))) + i += copy(dAtA[i:], *m.ID) + } + if m.Labels != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Labels.ProtoSize())) + n148, err := m.Labels.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n148 + } + return i, nil +} + +func (m *Image_Docker) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Image_Docker) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Credential != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Credential.ProtoSize())) + n149, err := m.Credential.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n149 + } + if m.Config != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Config.ProtoSize())) + n150, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n150 + } + return i, nil +} + +func (m *MountPropagation) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MountPropagation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Mode != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Mode)) + } + return i, nil +} + +func (m *Volume) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Volume) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.ContainerPath))) + i += copy(dAtA[i:], m.ContainerPath) + if m.HostPath != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.HostPath))) + i += copy(dAtA[i:], *m.HostPath) + } + if m.Mode == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("mode") + } else { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Mode)) + } + if m.Image != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Image.ProtoSize())) + n151, err := m.Image.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n151 + } + if m.Source != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Source.ProtoSize())) + n152, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n152 + } + return i, nil +} + +func (m *Volume_Source) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Volume_Source) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Type)) + if m.DockerVolume != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.DockerVolume.ProtoSize())) + n153, err := m.DockerVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n153 + } + if m.SandboxPath != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.SandboxPath.ProtoSize())) + n154, err := m.SandboxPath.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n154 + } + if m.Secret != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Secret.ProtoSize())) + n155, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n155 + } + if m.HostPath != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.HostPath.ProtoSize())) + n156, err := m.HostPath.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n156 + } + return i, nil +} + +func (m *Volume_Source_DockerVolume) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Volume_Source_DockerVolume) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Driver != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Driver))) + i += copy(dAtA[i:], *m.Driver) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.DriverOptions != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.DriverOptions.ProtoSize())) + n157, err := m.DriverOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n157 + } + return i, nil +} + +func (m *Volume_Source_HostPath) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Volume_Source_HostPath) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + if m.MountPropagation != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.MountPropagation.ProtoSize())) + n158, err := m.MountPropagation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n158 + } + return i, nil +} + +func (m *Volume_Source_SandboxPath) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Volume_Source_SandboxPath) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + return i, nil +} + +func (m *NetworkInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Groups) > 0 { + for _, s := range m.Groups { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Labels != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Labels.ProtoSize())) + n159, err := m.Labels.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n159 + } + if len(m.IPAddresses) > 0 { + for _, msg := range m.IPAddresses { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Name != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Name))) + i += copy(dAtA[i:], *m.Name) + } + if len(m.PortMappings) > 0 { + for _, msg := range m.PortMappings { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkInfo_IPAddress) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkInfo_IPAddress) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Protocol != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Protocol)) + } + if m.IPAddress != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.IPAddress))) + i += copy(dAtA[i:], *m.IPAddress) + } + return i, nil +} + +func (m *NetworkInfo_PortMapping) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkInfo_PortMapping) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.HostPort)) + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ContainerPort)) + if m.Protocol != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Protocol))) + i += copy(dAtA[i:], *m.Protocol) + } + return i, nil +} + +func (m *CapabilityInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CapabilityInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Capabilities) > 0 { + for _, num := range m.Capabilities { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(num)) + } + } + return i, nil +} + +func (m *LinuxInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.CapabilityInfo != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.CapabilityInfo.ProtoSize())) + n160, err := m.CapabilityInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n160 + } + if m.BoundingCapabilities != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.BoundingCapabilities.ProtoSize())) + n161, err := m.BoundingCapabilities.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n161 + } + if m.EffectiveCapabilities != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.EffectiveCapabilities.ProtoSize())) + n162, err := m.EffectiveCapabilities.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n162 + } + if m.SharePIDNamespace != nil { + dAtA[i] = 0x20 + i++ + if *m.SharePIDNamespace { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *RLimitInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RLimitInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Rlimits) > 0 { + for _, msg := range m.Rlimits { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RLimitInfo_RLimit) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RLimitInfo_RLimit) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Type)) + if m.Hard != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Hard)) + } + if m.Soft != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Soft)) + } + return i, nil +} + +func (m *TTYInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TTYInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.WindowSize != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.WindowSize.ProtoSize())) + n163, err := m.WindowSize.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n163 + } + return i, nil +} + +func (m *TTYInfo_WindowSize) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TTYInfo_WindowSize) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Rows)) + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Columns)) + return i, nil +} + +func (m *ContainerInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") + } else { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Type)) + } + if len(m.Volumes) > 0 { + for _, msg := range m.Volumes { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Docker != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Docker.ProtoSize())) + n164, err := m.Docker.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n164 + } + if m.Hostname != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Hostname))) + i += copy(dAtA[i:], *m.Hostname) + } + if m.Mesos != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Mesos.ProtoSize())) + n165, err := m.Mesos.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n165 + } + if len(m.NetworkInfos) > 0 { + for _, msg := range m.NetworkInfos { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.LinuxInfo != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.LinuxInfo.ProtoSize())) + n166, err := m.LinuxInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n166 + } + if m.RlimitInfo != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.RlimitInfo.ProtoSize())) + n167, err := m.RlimitInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n167 + } + if m.TTYInfo != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.TTYInfo.ProtoSize())) + n168, err := m.TTYInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n168 + } + return i, nil +} + +func (m *ContainerInfo_DockerInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerInfo_DockerInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) + if m.Network != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Network)) + } + if len(m.PortMappings) > 0 { + for _, msg := range m.PortMappings { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Privileged != nil { + dAtA[i] = 0x20 + i++ + if *m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Parameters) > 0 { + for _, msg := range m.Parameters { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.ForcePullImage != nil { + dAtA[i] = 0x30 + i++ + if *m.ForcePullImage { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.VolumeDriver != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.VolumeDriver))) + i += copy(dAtA[i:], *m.VolumeDriver) + } + return i, nil +} + +func (m *ContainerInfo_DockerInfo_PortMapping) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerInfo_DockerInfo_PortMapping) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.HostPort)) + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ContainerPort)) + if m.Protocol != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Protocol))) + i += copy(dAtA[i:], *m.Protocol) + } + return i, nil +} + +func (m *ContainerInfo_MesosInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerInfo_MesosInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Image != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Image.ProtoSize())) + n169, err := m.Image.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n169 + } + return i, nil +} + +func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NetworkInfos) > 0 { + for _, msg := range m.NetworkInfos { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.CgroupInfo != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.CgroupInfo.ProtoSize())) + n170, err := m.CgroupInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n170 + } + if m.ExecutorPID != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.ExecutorPID)) + } + if m.ContainerID != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.ContainerID.ProtoSize())) + n171, err := m.ContainerID.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n171 + } + return i, nil +} + +func (m *CgroupInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CgroupInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NetCLS != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.NetCLS.ProtoSize())) + n172, err := m.NetCLS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n172 + } + return i, nil +} + +func (m *CgroupInfo_Blkio) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CgroupInfo_Blkio) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *CgroupInfo_Blkio_Value) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CgroupInfo_Blkio_Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Op != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Op)) + } + if m.Value != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Value)) + } + return i, nil +} + +func (m *CgroupInfo_Blkio_CFQ) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CgroupInfo_Blkio_CFQ) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *CgroupInfo_Blkio_CFQ_Statistics) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CgroupInfo_Blkio_CFQ_Statistics) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Device != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Device.ProtoSize())) + n173, err := m.Device.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n173 + } + if m.Sectors != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Sectors)) + } + if m.Time != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Time)) + } + if len(m.IOServiced) > 0 { + for _, msg := range m.IOServiced { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.IOServiceBytes) > 0 { + for _, msg := range m.IOServiceBytes { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.IOServiceTime) > 0 { + for _, msg := range m.IOServiceTime { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.IOWaitTime) > 0 { + for _, msg := range m.IOWaitTime { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.IOMerged) > 0 { + for _, msg := range m.IOMerged { + dAtA[i] = 0x42 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.IOQueued) > 0 { + for _, msg := range m.IOQueued { + dAtA[i] = 0x4a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CgroupInfo_Blkio_Throttling) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CgroupInfo_Blkio_Throttling) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *CgroupInfo_Blkio_Throttling_Statistics) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CgroupInfo_Blkio_Throttling_Statistics) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Device != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Device.ProtoSize())) + n174, err := m.Device.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n174 + } + if len(m.IOServiced) > 0 { + for _, msg := range m.IOServiced { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.IOServiceBytes) > 0 { + for _, msg := range m.IOServiceBytes { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CgroupInfo_Blkio_Statistics) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CgroupInfo_Blkio_Statistics) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CFQ) > 0 { + for _, msg := range m.CFQ { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.CFQRecursive) > 0 { + for _, msg := range m.CFQRecursive { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Throttling) > 0 { + for _, msg := range m.Throttling { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CgroupInfo_NetCls) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CgroupInfo_NetCls) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ClassID != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.ClassID)) + } + return i, nil +} + +func (m *Labels) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Labels) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + for _, msg := range m.Labels { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Label) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Label) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + if m.Value != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Value))) + i += copy(dAtA[i:], *m.Value) + } + return i, nil +} + +func (m *Port) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Port) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Number)) + if m.Name != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Name))) + i += copy(dAtA[i:], *m.Name) + } + if m.Protocol != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Protocol))) + i += copy(dAtA[i:], *m.Protocol) + } + if m.Visibility != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Visibility)) + } + if m.Labels != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Labels.ProtoSize())) + n175, err := m.Labels.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n175 + } + return i, nil +} + +func (m *Ports) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Ports) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DiscoveryInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiscoveryInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Visibility)) + if m.Name != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Name))) + i += copy(dAtA[i:], *m.Name) + } + if m.Environment != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Environment))) + i += copy(dAtA[i:], *m.Environment) + } + if m.Location != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Location))) + i += copy(dAtA[i:], *m.Location) + } + if m.Version != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Version))) + i += copy(dAtA[i:], *m.Version) + } + if m.Ports != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Ports.ProtoSize())) + n176, err := m.Ports.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n176 + } + if m.Labels != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Labels.ProtoSize())) + n177, err := m.Labels.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n177 + } + return i, nil +} + +func (m *WeightInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WeightInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x9 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(m.Weight)))) + if m.Role != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Role))) + i += copy(dAtA[i:], *m.Role) + } + return i, nil +} + +func (m *VersionInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VersionInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + if m.BuildDate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.BuildDate))) + i += copy(dAtA[i:], *m.BuildDate) + } + if m.BuildTime != nil { + dAtA[i] = 0x19 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.BuildTime)))) + } + if m.BuildUser != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.BuildUser))) + i += copy(dAtA[i:], *m.BuildUser) + } + if m.GitSHA != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.GitSHA))) + i += copy(dAtA[i:], *m.GitSHA) + } + if m.GitBranch != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.GitBranch))) + i += copy(dAtA[i:], *m.GitBranch) + } + if m.GitTag != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.GitTag))) + i += copy(dAtA[i:], *m.GitTag) + } + return i, nil +} + +func (m *Flag) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Flag) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Value != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Value))) + i += copy(dAtA[i:], *m.Value) + } + return i, nil +} + +func (m *Role) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Role) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x11 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(m.Weight)))) + if len(m.Frameworks) > 0 { + for _, msg := range m.Frameworks { + dAtA[i] = 0x1a + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Metric) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metric) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Value != nil { + dAtA[i] = 0x11 + i++ + i = encodeFixed64Mesos(dAtA, i, uint64(math.Float64bits(float64(*m.Value)))) + } + return i, nil +} + +func (m *FileInfo) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + if m.Nlink != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Nlink)) + } + if m.Size != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Size)) + } + if m.Mtime != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Mtime.ProtoSize())) + n178, err := m.Mtime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n178 + } + if m.Mode != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.Mode)) + } + if m.UID != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.UID))) + i += copy(dAtA[i:], *m.UID) + } + if m.GID != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.GID))) + i += copy(dAtA[i:], *m.GID) + } + return i, nil +} + +func (m *Device) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Device) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Path != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(len(*m.Path))) + i += copy(dAtA[i:], *m.Path) + } + if m.Number != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Number.ProtoSize())) + n179, err := m.Number.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n179 + } + return i, nil +} + +func (m *Device_Number) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Device_Number) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MajorNumber == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("major_number") + } else { + dAtA[i] = 0x8 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MajorNumber)) + } + if m.MinorNumber == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("minor_number") + } else { + dAtA[i] = 0x10 + i++ + i = encodeVarintMesos(dAtA, i, uint64(*m.MinorNumber)) + } + return i, nil +} + +func (m *DeviceAccess) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAccess) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Device.ProtoSize())) + n180, err := m.Device.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n180 + dAtA[i] = 0x12 + i++ + i = encodeVarintMesos(dAtA, i, uint64(m.Access.ProtoSize())) + n181, err := m.Access.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n181 + return i, nil +} + +func (m *DeviceAccess_Access) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAccess_Access) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Read != nil { + dAtA[i] = 0x8 + i++ + if *m.Read { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Write != nil { + dAtA[i] = 0x10 + i++ + if *m.Write { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Mknod != nil { + dAtA[i] = 0x18 + i++ + if *m.Mknod { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *DeviceWhitelist) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceWhitelist) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AllowedDevices) > 0 { + for _, msg := range m.AllowedDevices { + dAtA[i] = 0xa + i++ + i = encodeVarintMesos(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Mesos(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Mesos(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintMesos(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedFrameworkID(r randyMesos, easy bool) *FrameworkID { + this := &FrameworkID{} + this.Value = string(randStringMesos(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOfferID(r randyMesos, easy bool) *OfferID { + this := &OfferID{} + this.Value = string(randStringMesos(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedAgentID(r randyMesos, easy bool) *AgentID { + this := &AgentID{} + this.Value = string(randStringMesos(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedTaskID(r randyMesos, easy bool) *TaskID { + this := &TaskID{} + this.Value = string(randStringMesos(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedExecutorID(r randyMesos, easy bool) *ExecutorID { + this := &ExecutorID{} + this.Value = string(randStringMesos(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedContainerID(r randyMesos, easy bool) *ContainerID { + this := &ContainerID{} + this.Value = string(randStringMesos(r)) + if r.Intn(10) == 0 { + this.Parent = NewPopulatedContainerID(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResourceProviderID(r randyMesos, easy bool) *ResourceProviderID { + this := &ResourceProviderID{} + this.Value = string(randStringMesos(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOperationID(r randyMesos, easy bool) *OperationID { + this := &OperationID{} + this.Value = string(randStringMesos(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedTimeInfo(r randyMesos, easy bool) *TimeInfo { + this := &TimeInfo{} + this.Nanoseconds = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Nanoseconds *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedDurationInfo(r randyMesos, easy bool) *DurationInfo { + this := &DurationInfo{} + this.Nanoseconds = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Nanoseconds *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedAddress(r randyMesos, easy bool) *Address { + this := &Address{} + if r.Intn(10) != 0 { + v1 := string(randStringMesos(r)) + this.Hostname = &v1 + } + if r.Intn(10) != 0 { + v2 := string(randStringMesos(r)) + this.IP = &v2 + } + this.Port = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Port *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedURL(r randyMesos, easy bool) *URL { + this := &URL{} + this.Scheme = string(randStringMesos(r)) + v3 := NewPopulatedAddress(r, easy) + this.Address = *v3 + if r.Intn(10) != 0 { + v4 := string(randStringMesos(r)) + this.Path = &v4 + } + if r.Intn(10) != 0 { + v5 := r.Intn(5) + this.Query = make([]Parameter, v5) + for i := 0; i < v5; i++ { + v6 := NewPopulatedParameter(r, easy) + this.Query[i] = *v6 + } + } + if r.Intn(10) != 0 { + v7 := string(randStringMesos(r)) + this.Fragment = &v7 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedUnavailability(r randyMesos, easy bool) *Unavailability { + this := &Unavailability{} + v8 := NewPopulatedTimeInfo(r, easy) + this.Start = *v8 + if r.Intn(10) != 0 { + this.Duration = NewPopulatedDurationInfo(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedMachineID(r randyMesos, easy bool) *MachineID { + this := &MachineID{} + if r.Intn(10) != 0 { + v9 := string(randStringMesos(r)) + this.Hostname = &v9 + } + if r.Intn(10) != 0 { + v10 := string(randStringMesos(r)) + this.IP = &v10 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedMachineInfo(r randyMesos, easy bool) *MachineInfo { + this := &MachineInfo{} + v11 := NewPopulatedMachineID(r, easy) + this.ID = *v11 + if r.Intn(10) != 0 { + v12 := MachineInfo_Mode([]int32{1, 2, 3}[r.Intn(3)]) + this.Mode = &v12 + } + if r.Intn(10) != 0 { + this.Unavailability = NewPopulatedUnavailability(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedFrameworkInfo(r randyMesos, easy bool) *FrameworkInfo { + this := &FrameworkInfo{} + this.User = string(randStringMesos(r)) + this.Name = string(randStringMesos(r)) + if r.Intn(10) != 0 { + this.ID = NewPopulatedFrameworkID(r, easy) + } + if r.Intn(10) != 0 { + v13 := float64(r.Float64()) + if r.Intn(2) == 0 { + v13 *= -1 + } + this.FailoverTimeout = &v13 + } + if r.Intn(10) != 0 { + v14 := bool(bool(r.Intn(2) == 0)) + this.Checkpoint = &v14 + } + if r.Intn(10) != 0 { + v15 := string(randStringMesos(r)) + this.Role = &v15 + } + if r.Intn(10) != 0 { + v16 := string(randStringMesos(r)) + this.Hostname = &v16 + } + if r.Intn(10) != 0 { + v17 := string(randStringMesos(r)) + this.Principal = &v17 + } + if r.Intn(10) != 0 { + v18 := string(randStringMesos(r)) + this.WebUiURL = &v18 + } + if r.Intn(10) != 0 { + v19 := r.Intn(5) + this.Capabilities = make([]FrameworkInfo_Capability, v19) + for i := 0; i < v19; i++ { + v20 := NewPopulatedFrameworkInfo_Capability(r, easy) + this.Capabilities[i] = *v20 + } + } + if r.Intn(10) != 0 { + this.Labels = NewPopulatedLabels(r, easy) + } + if r.Intn(10) != 0 { + v21 := r.Intn(10) + this.Roles = make([]string, v21) + for i := 0; i < v21; i++ { + this.Roles[i] = string(randStringMesos(r)) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedFrameworkInfo_Capability(r randyMesos, easy bool) *FrameworkInfo_Capability { + this := &FrameworkInfo_Capability{} + this.Type = FrameworkInfo_Capability_Type([]int32{0, 1, 2, 3, 4, 5, 6, 7, 8}[r.Intn(9)]) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCheckInfo(r randyMesos, easy bool) *CheckInfo { + this := &CheckInfo{} + this.Type = CheckInfo_Type([]int32{0, 1, 2, 3}[r.Intn(4)]) + if r.Intn(10) != 0 { + this.Command = NewPopulatedCheckInfo_Command(r, easy) + } + if r.Intn(10) != 0 { + this.HTTP = NewPopulatedCheckInfo_Http(r, easy) + } + if r.Intn(10) != 0 { + v22 := float64(r.Float64()) + if r.Intn(2) == 0 { + v22 *= -1 + } + this.DelaySeconds = &v22 + } + if r.Intn(10) != 0 { + v23 := float64(r.Float64()) + if r.Intn(2) == 0 { + v23 *= -1 + } + this.IntervalSeconds = &v23 + } + if r.Intn(10) != 0 { + v24 := float64(r.Float64()) + if r.Intn(2) == 0 { + v24 *= -1 + } + this.TimeoutSeconds = &v24 + } + if r.Intn(10) != 0 { + this.TCP = NewPopulatedCheckInfo_Tcp(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCheckInfo_Command(r randyMesos, easy bool) *CheckInfo_Command { + this := &CheckInfo_Command{} + v25 := NewPopulatedCommandInfo(r, easy) + this.Command = *v25 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCheckInfo_Http(r randyMesos, easy bool) *CheckInfo_Http { + this := &CheckInfo_Http{} + this.Port = uint32(r.Uint32()) + if r.Intn(10) != 0 { + v26 := string(randStringMesos(r)) + this.Path = &v26 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCheckInfo_Tcp(r randyMesos, easy bool) *CheckInfo_Tcp { + this := &CheckInfo_Tcp{} + this.Port = uint32(r.Uint32()) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedHealthCheck(r randyMesos, easy bool) *HealthCheck { + this := &HealthCheck{} + if r.Intn(10) != 0 { + this.HTTP = NewPopulatedHealthCheck_HTTPCheckInfo(r, easy) + } + if r.Intn(10) != 0 { + v27 := float64(r.Float64()) + if r.Intn(2) == 0 { + v27 *= -1 + } + this.DelaySeconds = &v27 + } + if r.Intn(10) != 0 { + v28 := float64(r.Float64()) + if r.Intn(2) == 0 { + v28 *= -1 + } + this.IntervalSeconds = &v28 + } + if r.Intn(10) != 0 { + v29 := float64(r.Float64()) + if r.Intn(2) == 0 { + v29 *= -1 + } + this.TimeoutSeconds = &v29 + } + if r.Intn(10) != 0 { + v30 := uint32(r.Uint32()) + this.ConsecutiveFailures = &v30 + } + if r.Intn(10) != 0 { + v31 := float64(r.Float64()) + if r.Intn(2) == 0 { + v31 *= -1 + } + this.GracePeriodSeconds = &v31 + } + if r.Intn(10) != 0 { + this.Command = NewPopulatedCommandInfo(r, easy) + } + this.Type = HealthCheck_Type([]int32{0, 1, 2, 3}[r.Intn(4)]) + if r.Intn(10) != 0 { + this.TCP = NewPopulatedHealthCheck_TCPCheckInfo(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedHealthCheck_HTTPCheckInfo(r randyMesos, easy bool) *HealthCheck_HTTPCheckInfo { + this := &HealthCheck_HTTPCheckInfo{} + this.Port = uint32(r.Uint32()) + if r.Intn(10) != 0 { + v32 := string(randStringMesos(r)) + this.Path = &v32 + } + if r.Intn(10) != 0 { + v33 := string(randStringMesos(r)) + this.Scheme = &v33 + } + if r.Intn(10) != 0 { + v34 := r.Intn(10) + this.Statuses = make([]uint32, v34) + for i := 0; i < v34; i++ { + this.Statuses[i] = uint32(r.Uint32()) + } + } + if r.Intn(10) != 0 { + v35 := NetworkInfo_Protocol([]int32{1, 2}[r.Intn(2)]) + this.Protocol = &v35 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedHealthCheck_TCPCheckInfo(r randyMesos, easy bool) *HealthCheck_TCPCheckInfo { + this := &HealthCheck_TCPCheckInfo{} + this.Port = uint32(r.Uint32()) + if r.Intn(10) != 0 { + v36 := NetworkInfo_Protocol([]int32{1, 2}[r.Intn(2)]) + this.Protocol = &v36 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedKillPolicy(r randyMesos, easy bool) *KillPolicy { + this := &KillPolicy{} + if r.Intn(10) != 0 { + this.GracePeriod = NewPopulatedDurationInfo(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCommandInfo(r randyMesos, easy bool) *CommandInfo { + this := &CommandInfo{} + if r.Intn(10) != 0 { + v37 := r.Intn(5) + this.URIs = make([]CommandInfo_URI, v37) + for i := 0; i < v37; i++ { + v38 := NewPopulatedCommandInfo_URI(r, easy) + this.URIs[i] = *v38 + } + } + if r.Intn(10) != 0 { + this.Environment = NewPopulatedEnvironment(r, easy) + } + if r.Intn(10) != 0 { + v39 := string(randStringMesos(r)) + this.Value = &v39 + } + if r.Intn(10) != 0 { + v40 := string(randStringMesos(r)) + this.User = &v40 + } + if r.Intn(10) != 0 { + v41 := bool(bool(r.Intn(2) == 0)) + this.Shell = &v41 + } + if r.Intn(10) != 0 { + v42 := r.Intn(10) + this.Arguments = make([]string, v42) + for i := 0; i < v42; i++ { + this.Arguments[i] = string(randStringMesos(r)) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCommandInfo_URI(r randyMesos, easy bool) *CommandInfo_URI { + this := &CommandInfo_URI{} + this.Value = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v43 := bool(bool(r.Intn(2) == 0)) + this.Executable = &v43 + } + if r.Intn(10) != 0 { + v44 := bool(bool(r.Intn(2) == 0)) + this.Extract = &v44 + } + if r.Intn(10) != 0 { + v45 := bool(bool(r.Intn(2) == 0)) + this.Cache = &v45 + } + if r.Intn(10) != 0 { + v46 := string(randStringMesos(r)) + this.OutputFile = &v46 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedExecutorInfo(r randyMesos, easy bool) *ExecutorInfo { + this := &ExecutorInfo{} + v47 := NewPopulatedExecutorID(r, easy) + this.ExecutorID = *v47 + if r.Intn(10) != 0 { + v48 := r.Intn(100) + this.Data = make([]byte, v48) + for i := 0; i < v48; i++ { + this.Data[i] = byte(r.Intn(256)) + } + } + if r.Intn(10) != 0 { + v49 := r.Intn(5) + this.Resources = make([]Resource, v49) + for i := 0; i < v49; i++ { + v50 := NewPopulatedResource(r, easy) + this.Resources[i] = *v50 + } + } + if r.Intn(10) != 0 { + this.Command = NewPopulatedCommandInfo(r, easy) + } + if r.Intn(10) != 0 { + this.FrameworkID = NewPopulatedFrameworkID(r, easy) + } + if r.Intn(10) != 0 { + v51 := string(randStringMesos(r)) + this.Name = &v51 + } + if r.Intn(10) != 0 { + v52 := string(randStringMesos(r)) + this.Source = &v52 + } + if r.Intn(10) != 0 { + this.Container = NewPopulatedContainerInfo(r, easy) + } + if r.Intn(10) != 0 { + this.Discovery = NewPopulatedDiscoveryInfo(r, easy) + } + if r.Intn(10) != 0 { + this.ShutdownGracePeriod = NewPopulatedDurationInfo(r, easy) + } + if r.Intn(10) != 0 { + this.Labels = NewPopulatedLabels(r, easy) + } + this.Type = ExecutorInfo_Type([]int32{0, 1, 2}[r.Intn(3)]) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedDomainInfo(r randyMesos, easy bool) *DomainInfo { + this := &DomainInfo{} + if r.Intn(10) != 0 { + this.FaultDomain = NewPopulatedDomainInfo_FaultDomain(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedDomainInfo_FaultDomain(r randyMesos, easy bool) *DomainInfo_FaultDomain { + this := &DomainInfo_FaultDomain{} + v53 := NewPopulatedDomainInfo_FaultDomain_RegionInfo(r, easy) + this.Region = *v53 + v54 := NewPopulatedDomainInfo_FaultDomain_ZoneInfo(r, easy) + this.Zone = *v54 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedDomainInfo_FaultDomain_RegionInfo(r randyMesos, easy bool) *DomainInfo_FaultDomain_RegionInfo { + this := &DomainInfo_FaultDomain_RegionInfo{} + this.Name = string(randStringMesos(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedDomainInfo_FaultDomain_ZoneInfo(r randyMesos, easy bool) *DomainInfo_FaultDomain_ZoneInfo { + this := &DomainInfo_FaultDomain_ZoneInfo{} + this.Name = string(randStringMesos(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedMasterInfo(r randyMesos, easy bool) *MasterInfo { + this := &MasterInfo{} + this.ID = string(randStringMesos(r)) + this.IP = uint32(r.Uint32()) + v55 := uint32(r.Uint32()) + this.Port = &v55 + if r.Intn(10) != 0 { + v56 := string(randStringMesos(r)) + this.PID = &v56 + } + if r.Intn(10) != 0 { + v57 := string(randStringMesos(r)) + this.Hostname = &v57 + } + if r.Intn(10) != 0 { + v58 := string(randStringMesos(r)) + this.Version = &v58 + } + if r.Intn(10) != 0 { + this.Address = NewPopulatedAddress(r, easy) + } + if r.Intn(10) != 0 { + this.Domain = NewPopulatedDomainInfo(r, easy) + } + if r.Intn(10) != 0 { + v59 := r.Intn(5) + this.Capabilities = make([]MasterInfo_Capability, v59) + for i := 0; i < v59; i++ { + v60 := NewPopulatedMasterInfo_Capability(r, easy) + this.Capabilities[i] = *v60 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedMasterInfo_Capability(r randyMesos, easy bool) *MasterInfo_Capability { + this := &MasterInfo_Capability{} + this.Type = MasterInfo_Capability_Type([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedAgentInfo(r randyMesos, easy bool) *AgentInfo { + this := &AgentInfo{} + this.Hostname = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v61 := r.Intn(5) + this.Resources = make([]Resource, v61) + for i := 0; i < v61; i++ { + v62 := NewPopulatedResource(r, easy) + this.Resources[i] = *v62 + } + } + if r.Intn(10) != 0 { + v63 := r.Intn(5) + this.Attributes = make([]Attribute, v63) + for i := 0; i < v63; i++ { + v64 := NewPopulatedAttribute(r, easy) + this.Attributes[i] = *v64 + } + } + if r.Intn(10) != 0 { + this.ID = NewPopulatedAgentID(r, easy) + } + if r.Intn(10) != 0 { + v65 := int32(r.Int31()) + if r.Intn(2) == 0 { + v65 *= -1 + } + this.Port = &v65 + } + if r.Intn(10) != 0 { + this.Domain = NewPopulatedDomainInfo(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedAgentInfo_Capability(r randyMesos, easy bool) *AgentInfo_Capability { + this := &AgentInfo_Capability{} + this.Type = AgentInfo_Capability_Type([]int32{0, 1, 2, 3, 4, 5}[r.Intn(6)]) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCSIPluginContainerInfo(r randyMesos, easy bool) *CSIPluginContainerInfo { + this := &CSIPluginContainerInfo{} + if r.Intn(10) != 0 { + v66 := r.Intn(10) + this.Services = make([]CSIPluginContainerInfo_Service, v66) + for i := 0; i < v66; i++ { + this.Services[i] = CSIPluginContainerInfo_Service([]int32{0, 1, 2}[r.Intn(3)]) + } + } + if r.Intn(10) != 0 { + this.Command = NewPopulatedCommandInfo(r, easy) + } + if r.Intn(10) != 0 { + v67 := r.Intn(5) + this.Resources = make([]Resource, v67) + for i := 0; i < v67; i++ { + v68 := NewPopulatedResource(r, easy) + this.Resources[i] = *v68 + } + } + if r.Intn(10) != 0 { + this.Container = NewPopulatedContainerInfo(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCSIPluginInfo(r randyMesos, easy bool) *CSIPluginInfo { + this := &CSIPluginInfo{} + this.Type = string(randStringMesos(r)) + this.Name = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v69 := r.Intn(5) + this.Containers = make([]CSIPluginContainerInfo, v69) + for i := 0; i < v69; i++ { + v70 := NewPopulatedCSIPluginContainerInfo(r, easy) + this.Containers[i] = *v70 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResourceProviderInfo(r randyMesos, easy bool) *ResourceProviderInfo { + this := &ResourceProviderInfo{} + if r.Intn(10) != 0 { + this.ID = NewPopulatedResourceProviderID(r, easy) + } + if r.Intn(10) != 0 { + v71 := r.Intn(5) + this.Attributes = make([]Attribute, v71) + for i := 0; i < v71; i++ { + v72 := NewPopulatedAttribute(r, easy) + this.Attributes[i] = *v72 + } + } + this.Type = string(randStringMesos(r)) + this.Name = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v73 := r.Intn(5) + this.DefaultReservations = make([]Resource_ReservationInfo, v73) + for i := 0; i < v73; i++ { + v74 := NewPopulatedResource_ReservationInfo(r, easy) + this.DefaultReservations[i] = *v74 + } + } + if r.Intn(10) != 0 { + this.Storage = NewPopulatedResourceProviderInfo_Storage(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResourceProviderInfo_Storage(r randyMesos, easy bool) *ResourceProviderInfo_Storage { + this := &ResourceProviderInfo_Storage{} + v75 := NewPopulatedCSIPluginInfo(r, easy) + this.Plugin = *v75 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedValue(r randyMesos, easy bool) *Value { + this := &Value{} + this.Type = Value_Type([]int32{0, 1, 2, 3}[r.Intn(4)]) + if r.Intn(10) != 0 { + this.Scalar = NewPopulatedValue_Scalar(r, easy) + } + if r.Intn(10) != 0 { + this.Ranges = NewPopulatedValue_Ranges(r, easy) + } + if r.Intn(10) != 0 { + this.Set = NewPopulatedValue_Set(r, easy) + } + if r.Intn(10) != 0 { + this.Text = NewPopulatedValue_Text(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedValue_Scalar(r randyMesos, easy bool) *Value_Scalar { + this := &Value_Scalar{} + this.Value = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedValue_Range(r randyMesos, easy bool) *Value_Range { + this := &Value_Range{} + this.Begin = uint64(uint64(r.Uint32())) + this.End = uint64(uint64(r.Uint32())) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedValue_Ranges(r randyMesos, easy bool) *Value_Ranges { + this := &Value_Ranges{} + if r.Intn(10) != 0 { + v76 := r.Intn(5) + this.Range = make([]Value_Range, v76) + for i := 0; i < v76; i++ { + v77 := NewPopulatedValue_Range(r, easy) + this.Range[i] = *v77 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedValue_Set(r randyMesos, easy bool) *Value_Set { + this := &Value_Set{} + if r.Intn(10) != 0 { + v78 := r.Intn(10) + this.Item = make([]string, v78) + for i := 0; i < v78; i++ { + this.Item[i] = string(randStringMesos(r)) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedValue_Text(r randyMesos, easy bool) *Value_Text { + this := &Value_Text{} + this.Value = string(randStringMesos(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedAttribute(r randyMesos, easy bool) *Attribute { + this := &Attribute{} + this.Name = string(randStringMesos(r)) + this.Type = Value_Type([]int32{0, 1, 2, 3}[r.Intn(4)]) + if r.Intn(10) != 0 { + this.Scalar = NewPopulatedValue_Scalar(r, easy) + } + if r.Intn(10) != 0 { + this.Ranges = NewPopulatedValue_Ranges(r, easy) + } + if r.Intn(10) != 0 { + this.Text = NewPopulatedValue_Text(r, easy) + } + if r.Intn(10) != 0 { + this.Set = NewPopulatedValue_Set(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResource(r randyMesos, easy bool) *Resource { + this := &Resource{} + this.Name = string(randStringMesos(r)) + v79 := Value_Type([]int32{0, 1, 2, 3}[r.Intn(4)]) + this.Type = &v79 + if r.Intn(10) != 0 { + this.Scalar = NewPopulatedValue_Scalar(r, easy) + } + if r.Intn(10) != 0 { + this.Ranges = NewPopulatedValue_Ranges(r, easy) + } + if r.Intn(10) != 0 { + this.Set = NewPopulatedValue_Set(r, easy) + } + if r.Intn(10) != 0 { + v80 := string(randStringMesos(r)) + this.Role = &v80 + } + if r.Intn(10) != 0 { + this.Disk = NewPopulatedResource_DiskInfo(r, easy) + } + if r.Intn(10) != 0 { + this.Reservation = NewPopulatedResource_ReservationInfo(r, easy) + } + if r.Intn(10) != 0 { + this.Revocable = NewPopulatedResource_RevocableInfo(r, easy) + } + if r.Intn(10) != 0 { + this.Shared = NewPopulatedResource_SharedInfo(r, easy) + } + if r.Intn(10) != 0 { + this.AllocationInfo = NewPopulatedResource_AllocationInfo(r, easy) + } + if r.Intn(10) != 0 { + this.ProviderID = NewPopulatedResourceProviderID(r, easy) + } + if r.Intn(10) != 0 { + v81 := r.Intn(5) + this.Reservations = make([]Resource_ReservationInfo, v81) + for i := 0; i < v81; i++ { + v82 := NewPopulatedResource_ReservationInfo(r, easy) + this.Reservations[i] = *v82 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResource_AllocationInfo(r randyMesos, easy bool) *Resource_AllocationInfo { + this := &Resource_AllocationInfo{} + if r.Intn(10) != 0 { + v83 := string(randStringMesos(r)) + this.Role = &v83 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResource_ReservationInfo(r randyMesos, easy bool) *Resource_ReservationInfo { + this := &Resource_ReservationInfo{} + if r.Intn(10) != 0 { + v84 := string(randStringMesos(r)) + this.Principal = &v84 + } + if r.Intn(10) != 0 { + this.Labels = NewPopulatedLabels(r, easy) + } + if r.Intn(10) != 0 { + v85 := string(randStringMesos(r)) + this.Role = &v85 + } + if r.Intn(10) != 0 { + v86 := Resource_ReservationInfo_Type([]int32{0, 1, 2}[r.Intn(3)]) + this.Type = &v86 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResource_DiskInfo(r randyMesos, easy bool) *Resource_DiskInfo { + this := &Resource_DiskInfo{} + if r.Intn(10) != 0 { + this.Persistence = NewPopulatedResource_DiskInfo_Persistence(r, easy) + } + if r.Intn(10) != 0 { + this.Volume = NewPopulatedVolume(r, easy) + } + if r.Intn(10) != 0 { + this.Source = NewPopulatedResource_DiskInfo_Source(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResource_DiskInfo_Persistence(r randyMesos, easy bool) *Resource_DiskInfo_Persistence { + this := &Resource_DiskInfo_Persistence{} + this.ID = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v87 := string(randStringMesos(r)) + this.Principal = &v87 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResource_DiskInfo_Source(r randyMesos, easy bool) *Resource_DiskInfo_Source { + this := &Resource_DiskInfo_Source{} + this.Type = Resource_DiskInfo_Source_Type([]int32{0, 1, 2, 3, 4}[r.Intn(5)]) + if r.Intn(10) != 0 { + this.Path = NewPopulatedResource_DiskInfo_Source_Path(r, easy) + } + if r.Intn(10) != 0 { + this.Mount = NewPopulatedResource_DiskInfo_Source_Mount(r, easy) + } + if r.Intn(10) != 0 { + v88 := string(randStringMesos(r)) + this.ID = &v88 + } + if r.Intn(10) != 0 { + this.Metadata = NewPopulatedLabels(r, easy) + } + if r.Intn(10) != 0 { + v89 := string(randStringMesos(r)) + this.Profile = &v89 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResource_DiskInfo_Source_Path(r randyMesos, easy bool) *Resource_DiskInfo_Source_Path { + this := &Resource_DiskInfo_Source_Path{} + if r.Intn(10) != 0 { + v90 := string(randStringMesos(r)) + this.Root = &v90 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResource_DiskInfo_Source_Mount(r randyMesos, easy bool) *Resource_DiskInfo_Source_Mount { + this := &Resource_DiskInfo_Source_Mount{} + if r.Intn(10) != 0 { + v91 := string(randStringMesos(r)) + this.Root = &v91 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResource_RevocableInfo(r randyMesos, easy bool) *Resource_RevocableInfo { + this := &Resource_RevocableInfo{} + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResource_SharedInfo(r randyMesos, easy bool) *Resource_SharedInfo { + this := &Resource_SharedInfo{} + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedTrafficControlStatistics(r randyMesos, easy bool) *TrafficControlStatistics { + this := &TrafficControlStatistics{} + this.ID = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v92 := uint64(uint64(r.Uint32())) + this.Backlog = &v92 + } + if r.Intn(10) != 0 { + v93 := uint64(uint64(r.Uint32())) + this.Bytes = &v93 + } + if r.Intn(10) != 0 { + v94 := uint64(uint64(r.Uint32())) + this.Drops = &v94 + } + if r.Intn(10) != 0 { + v95 := uint64(uint64(r.Uint32())) + this.Overlimits = &v95 + } + if r.Intn(10) != 0 { + v96 := uint64(uint64(r.Uint32())) + this.Packets = &v96 + } + if r.Intn(10) != 0 { + v97 := uint64(uint64(r.Uint32())) + this.Qlen = &v97 + } + if r.Intn(10) != 0 { + v98 := uint64(uint64(r.Uint32())) + this.RateBPS = &v98 + } + if r.Intn(10) != 0 { + v99 := uint64(uint64(r.Uint32())) + this.RatePPS = &v99 + } + if r.Intn(10) != 0 { + v100 := uint64(uint64(r.Uint32())) + this.Requeues = &v100 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedIpStatistics(r randyMesos, easy bool) *IpStatistics { + this := &IpStatistics{} + if r.Intn(10) != 0 { + v101 := int64(r.Int63()) + if r.Intn(2) == 0 { + v101 *= -1 + } + this.Forwarding = &v101 + } + if r.Intn(10) != 0 { + v102 := int64(r.Int63()) + if r.Intn(2) == 0 { + v102 *= -1 + } + this.DefaultTTL = &v102 + } + if r.Intn(10) != 0 { + v103 := int64(r.Int63()) + if r.Intn(2) == 0 { + v103 *= -1 + } + this.InReceives = &v103 + } + if r.Intn(10) != 0 { + v104 := int64(r.Int63()) + if r.Intn(2) == 0 { + v104 *= -1 + } + this.InHdrErrors = &v104 + } + if r.Intn(10) != 0 { + v105 := int64(r.Int63()) + if r.Intn(2) == 0 { + v105 *= -1 + } + this.InAddrErrors = &v105 + } + if r.Intn(10) != 0 { + v106 := int64(r.Int63()) + if r.Intn(2) == 0 { + v106 *= -1 + } + this.ForwDatagrams = &v106 + } + if r.Intn(10) != 0 { + v107 := int64(r.Int63()) + if r.Intn(2) == 0 { + v107 *= -1 + } + this.InUnknownProtos = &v107 + } + if r.Intn(10) != 0 { + v108 := int64(r.Int63()) + if r.Intn(2) == 0 { + v108 *= -1 + } + this.InDiscards = &v108 + } + if r.Intn(10) != 0 { + v109 := int64(r.Int63()) + if r.Intn(2) == 0 { + v109 *= -1 + } + this.InDelivers = &v109 + } + if r.Intn(10) != 0 { + v110 := int64(r.Int63()) + if r.Intn(2) == 0 { + v110 *= -1 + } + this.OutRequests = &v110 + } + if r.Intn(10) != 0 { + v111 := int64(r.Int63()) + if r.Intn(2) == 0 { + v111 *= -1 + } + this.OutDiscards = &v111 + } + if r.Intn(10) != 0 { + v112 := int64(r.Int63()) + if r.Intn(2) == 0 { + v112 *= -1 + } + this.OutNoRoutes = &v112 + } + if r.Intn(10) != 0 { + v113 := int64(r.Int63()) + if r.Intn(2) == 0 { + v113 *= -1 + } + this.ReasmTimeout = &v113 + } + if r.Intn(10) != 0 { + v114 := int64(r.Int63()) + if r.Intn(2) == 0 { + v114 *= -1 + } + this.ReasmReqds = &v114 + } + if r.Intn(10) != 0 { + v115 := int64(r.Int63()) + if r.Intn(2) == 0 { + v115 *= -1 + } + this.ReasmOKs = &v115 + } + if r.Intn(10) != 0 { + v116 := int64(r.Int63()) + if r.Intn(2) == 0 { + v116 *= -1 + } + this.ReasmFails = &v116 + } + if r.Intn(10) != 0 { + v117 := int64(r.Int63()) + if r.Intn(2) == 0 { + v117 *= -1 + } + this.FragOKs = &v117 + } + if r.Intn(10) != 0 { + v118 := int64(r.Int63()) + if r.Intn(2) == 0 { + v118 *= -1 + } + this.FragFails = &v118 + } + if r.Intn(10) != 0 { + v119 := int64(r.Int63()) + if r.Intn(2) == 0 { + v119 *= -1 + } + this.FragCreates = &v119 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedIcmpStatistics(r randyMesos, easy bool) *IcmpStatistics { + this := &IcmpStatistics{} + if r.Intn(10) != 0 { + v120 := int64(r.Int63()) + if r.Intn(2) == 0 { + v120 *= -1 + } + this.InMsgs = &v120 + } + if r.Intn(10) != 0 { + v121 := int64(r.Int63()) + if r.Intn(2) == 0 { + v121 *= -1 + } + this.InErrors = &v121 + } + if r.Intn(10) != 0 { + v122 := int64(r.Int63()) + if r.Intn(2) == 0 { + v122 *= -1 + } + this.InCsumErrors = &v122 + } + if r.Intn(10) != 0 { + v123 := int64(r.Int63()) + if r.Intn(2) == 0 { + v123 *= -1 + } + this.InDestUnreachs = &v123 + } + if r.Intn(10) != 0 { + v124 := int64(r.Int63()) + if r.Intn(2) == 0 { + v124 *= -1 + } + this.InTimeExcds = &v124 + } + if r.Intn(10) != 0 { + v125 := int64(r.Int63()) + if r.Intn(2) == 0 { + v125 *= -1 + } + this.InParmProbs = &v125 + } + if r.Intn(10) != 0 { + v126 := int64(r.Int63()) + if r.Intn(2) == 0 { + v126 *= -1 + } + this.InSrcQuenchs = &v126 + } + if r.Intn(10) != 0 { + v127 := int64(r.Int63()) + if r.Intn(2) == 0 { + v127 *= -1 + } + this.InRedirects = &v127 + } + if r.Intn(10) != 0 { + v128 := int64(r.Int63()) + if r.Intn(2) == 0 { + v128 *= -1 + } + this.InEchos = &v128 + } + if r.Intn(10) != 0 { + v129 := int64(r.Int63()) + if r.Intn(2) == 0 { + v129 *= -1 + } + this.InEchoReps = &v129 + } + if r.Intn(10) != 0 { + v130 := int64(r.Int63()) + if r.Intn(2) == 0 { + v130 *= -1 + } + this.InTimestamps = &v130 + } + if r.Intn(10) != 0 { + v131 := int64(r.Int63()) + if r.Intn(2) == 0 { + v131 *= -1 + } + this.InTimestampReps = &v131 + } + if r.Intn(10) != 0 { + v132 := int64(r.Int63()) + if r.Intn(2) == 0 { + v132 *= -1 + } + this.InAddrMasks = &v132 + } + if r.Intn(10) != 0 { + v133 := int64(r.Int63()) + if r.Intn(2) == 0 { + v133 *= -1 + } + this.InAddrMaskReps = &v133 + } + if r.Intn(10) != 0 { + v134 := int64(r.Int63()) + if r.Intn(2) == 0 { + v134 *= -1 + } + this.OutMsgs = &v134 + } + if r.Intn(10) != 0 { + v135 := int64(r.Int63()) + if r.Intn(2) == 0 { + v135 *= -1 + } + this.OutErrors = &v135 + } + if r.Intn(10) != 0 { + v136 := int64(r.Int63()) + if r.Intn(2) == 0 { + v136 *= -1 + } + this.OutDestUnreachs = &v136 + } + if r.Intn(10) != 0 { + v137 := int64(r.Int63()) + if r.Intn(2) == 0 { + v137 *= -1 + } + this.OutTimeExcds = &v137 + } + if r.Intn(10) != 0 { + v138 := int64(r.Int63()) + if r.Intn(2) == 0 { + v138 *= -1 + } + this.OutParmProbs = &v138 + } + if r.Intn(10) != 0 { + v139 := int64(r.Int63()) + if r.Intn(2) == 0 { + v139 *= -1 + } + this.OutSrcQuenchs = &v139 + } + if r.Intn(10) != 0 { + v140 := int64(r.Int63()) + if r.Intn(2) == 0 { + v140 *= -1 + } + this.OutRedirects = &v140 + } + if r.Intn(10) != 0 { + v141 := int64(r.Int63()) + if r.Intn(2) == 0 { + v141 *= -1 + } + this.OutEchos = &v141 + } + if r.Intn(10) != 0 { + v142 := int64(r.Int63()) + if r.Intn(2) == 0 { + v142 *= -1 + } + this.OutEchoReps = &v142 + } + if r.Intn(10) != 0 { + v143 := int64(r.Int63()) + if r.Intn(2) == 0 { + v143 *= -1 + } + this.OutTimestamps = &v143 + } + if r.Intn(10) != 0 { + v144 := int64(r.Int63()) + if r.Intn(2) == 0 { + v144 *= -1 + } + this.OutTimestampReps = &v144 + } + if r.Intn(10) != 0 { + v145 := int64(r.Int63()) + if r.Intn(2) == 0 { + v145 *= -1 + } + this.OutAddrMasks = &v145 + } + if r.Intn(10) != 0 { + v146 := int64(r.Int63()) + if r.Intn(2) == 0 { + v146 *= -1 + } + this.OutAddrMaskReps = &v146 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedTcpStatistics(r randyMesos, easy bool) *TcpStatistics { + this := &TcpStatistics{} + if r.Intn(10) != 0 { + v147 := int64(r.Int63()) + if r.Intn(2) == 0 { + v147 *= -1 + } + this.RtoAlgorithm = &v147 + } + if r.Intn(10) != 0 { + v148 := int64(r.Int63()) + if r.Intn(2) == 0 { + v148 *= -1 + } + this.RtoMin = &v148 + } + if r.Intn(10) != 0 { + v149 := int64(r.Int63()) + if r.Intn(2) == 0 { + v149 *= -1 + } + this.RtoMax = &v149 + } + if r.Intn(10) != 0 { + v150 := int64(r.Int63()) + if r.Intn(2) == 0 { + v150 *= -1 + } + this.MaxConn = &v150 + } + if r.Intn(10) != 0 { + v151 := int64(r.Int63()) + if r.Intn(2) == 0 { + v151 *= -1 + } + this.ActiveOpens = &v151 + } + if r.Intn(10) != 0 { + v152 := int64(r.Int63()) + if r.Intn(2) == 0 { + v152 *= -1 + } + this.PassiveOpens = &v152 + } + if r.Intn(10) != 0 { + v153 := int64(r.Int63()) + if r.Intn(2) == 0 { + v153 *= -1 + } + this.AttemptFails = &v153 + } + if r.Intn(10) != 0 { + v154 := int64(r.Int63()) + if r.Intn(2) == 0 { + v154 *= -1 + } + this.EstabResets = &v154 + } + if r.Intn(10) != 0 { + v155 := int64(r.Int63()) + if r.Intn(2) == 0 { + v155 *= -1 + } + this.CurrEstab = &v155 + } + if r.Intn(10) != 0 { + v156 := int64(r.Int63()) + if r.Intn(2) == 0 { + v156 *= -1 + } + this.InSegs = &v156 + } + if r.Intn(10) != 0 { + v157 := int64(r.Int63()) + if r.Intn(2) == 0 { + v157 *= -1 + } + this.OutSegs = &v157 + } + if r.Intn(10) != 0 { + v158 := int64(r.Int63()) + if r.Intn(2) == 0 { + v158 *= -1 + } + this.RetransSegs = &v158 + } + if r.Intn(10) != 0 { + v159 := int64(r.Int63()) + if r.Intn(2) == 0 { + v159 *= -1 + } + this.InErrs = &v159 + } + if r.Intn(10) != 0 { + v160 := int64(r.Int63()) + if r.Intn(2) == 0 { + v160 *= -1 + } + this.OutRsts = &v160 + } + if r.Intn(10) != 0 { + v161 := int64(r.Int63()) + if r.Intn(2) == 0 { + v161 *= -1 + } + this.InCsumErrors = &v161 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedUdpStatistics(r randyMesos, easy bool) *UdpStatistics { + this := &UdpStatistics{} + if r.Intn(10) != 0 { + v162 := int64(r.Int63()) + if r.Intn(2) == 0 { + v162 *= -1 + } + this.InDatagrams = &v162 + } + if r.Intn(10) != 0 { + v163 := int64(r.Int63()) + if r.Intn(2) == 0 { + v163 *= -1 + } + this.NoPorts = &v163 + } + if r.Intn(10) != 0 { + v164 := int64(r.Int63()) + if r.Intn(2) == 0 { + v164 *= -1 + } + this.InErrors = &v164 + } + if r.Intn(10) != 0 { + v165 := int64(r.Int63()) + if r.Intn(2) == 0 { + v165 *= -1 + } + this.OutDatagrams = &v165 + } + if r.Intn(10) != 0 { + v166 := int64(r.Int63()) + if r.Intn(2) == 0 { + v166 *= -1 + } + this.RcvbufErrors = &v166 + } + if r.Intn(10) != 0 { + v167 := int64(r.Int63()) + if r.Intn(2) == 0 { + v167 *= -1 + } + this.SndbufErrors = &v167 + } + if r.Intn(10) != 0 { + v168 := int64(r.Int63()) + if r.Intn(2) == 0 { + v168 *= -1 + } + this.InCsumErrors = &v168 + } + if r.Intn(10) != 0 { + v169 := int64(r.Int63()) + if r.Intn(2) == 0 { + v169 *= -1 + } + this.IgnoredMulti = &v169 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedSNMPStatistics(r randyMesos, easy bool) *SNMPStatistics { + this := &SNMPStatistics{} + if r.Intn(10) != 0 { + this.IPStats = NewPopulatedIpStatistics(r, easy) + } + if r.Intn(10) != 0 { + this.ICMPStats = NewPopulatedIcmpStatistics(r, easy) + } + if r.Intn(10) != 0 { + this.TCPStats = NewPopulatedTcpStatistics(r, easy) + } + if r.Intn(10) != 0 { + this.UDPStats = NewPopulatedUdpStatistics(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedDiskStatistics(r randyMesos, easy bool) *DiskStatistics { + this := &DiskStatistics{} + if r.Intn(10) != 0 { + this.Source = NewPopulatedResource_DiskInfo_Source(r, easy) + } + if r.Intn(10) != 0 { + this.Persistence = NewPopulatedResource_DiskInfo_Persistence(r, easy) + } + if r.Intn(10) != 0 { + v170 := uint64(uint64(r.Uint32())) + this.LimitBytes = &v170 + } + if r.Intn(10) != 0 { + v171 := uint64(uint64(r.Uint32())) + this.UsedBytes = &v171 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResourceStatistics(r randyMesos, easy bool) *ResourceStatistics { + this := &ResourceStatistics{} + this.Timestamp = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Timestamp *= -1 + } + if r.Intn(10) != 0 { + v172 := float64(r.Float64()) + if r.Intn(2) == 0 { + v172 *= -1 + } + this.CPUsUserTimeSecs = &v172 + } + if r.Intn(10) != 0 { + v173 := float64(r.Float64()) + if r.Intn(2) == 0 { + v173 *= -1 + } + this.CPUsSystemTimeSecs = &v173 + } + if r.Intn(10) != 0 { + v174 := float64(r.Float64()) + if r.Intn(2) == 0 { + v174 *= -1 + } + this.CPUsLimit = &v174 + } + if r.Intn(10) != 0 { + v175 := uint64(uint64(r.Uint32())) + this.MemRSSBytes = &v175 + } + if r.Intn(10) != 0 { + v176 := uint64(uint64(r.Uint32())) + this.MemLimitBytes = &v176 + } + if r.Intn(10) != 0 { + v177 := uint32(r.Uint32()) + this.CPUsNrPeriods = &v177 + } + if r.Intn(10) != 0 { + v178 := uint32(r.Uint32()) + this.CPUsNrThrottled = &v178 + } + if r.Intn(10) != 0 { + v179 := float64(r.Float64()) + if r.Intn(2) == 0 { + v179 *= -1 + } + this.CPUsThrottledTimeSecs = &v179 + } + if r.Intn(10) != 0 { + v180 := uint64(uint64(r.Uint32())) + this.MemFileBytes = &v180 + } + if r.Intn(10) != 0 { + v181 := uint64(uint64(r.Uint32())) + this.MemAnonBytes = &v181 + } + if r.Intn(10) != 0 { + v182 := uint64(uint64(r.Uint32())) + this.MemMappedFileBytes = &v182 + } + if r.Intn(10) != 0 { + this.Perf = NewPopulatedPerfStatistics(r, easy) + } + if r.Intn(10) != 0 { + v183 := uint64(uint64(r.Uint32())) + this.NetRxPackets = &v183 + } + if r.Intn(10) != 0 { + v184 := uint64(uint64(r.Uint32())) + this.NetRxBytes = &v184 + } + if r.Intn(10) != 0 { + v185 := uint64(uint64(r.Uint32())) + this.NetRxErrors = &v185 + } + if r.Intn(10) != 0 { + v186 := uint64(uint64(r.Uint32())) + this.NetRxDropped = &v186 + } + if r.Intn(10) != 0 { + v187 := uint64(uint64(r.Uint32())) + this.NetTxPackets = &v187 + } + if r.Intn(10) != 0 { + v188 := uint64(uint64(r.Uint32())) + this.NetTxBytes = &v188 + } + if r.Intn(10) != 0 { + v189 := uint64(uint64(r.Uint32())) + this.NetTxErrors = &v189 + } + if r.Intn(10) != 0 { + v190 := uint64(uint64(r.Uint32())) + this.NetTxDropped = &v190 + } + if r.Intn(10) != 0 { + v191 := float64(r.Float64()) + if r.Intn(2) == 0 { + v191 *= -1 + } + this.NetTCPRttMicrosecsP50 = &v191 + } + if r.Intn(10) != 0 { + v192 := float64(r.Float64()) + if r.Intn(2) == 0 { + v192 *= -1 + } + this.NetTCPRttMicrosecsP90 = &v192 + } + if r.Intn(10) != 0 { + v193 := float64(r.Float64()) + if r.Intn(2) == 0 { + v193 *= -1 + } + this.NetTCPRttMicrosecsP95 = &v193 + } + if r.Intn(10) != 0 { + v194 := float64(r.Float64()) + if r.Intn(2) == 0 { + v194 *= -1 + } + this.NetTCPRttMicrosecsP99 = &v194 + } + if r.Intn(10) != 0 { + v195 := uint64(uint64(r.Uint32())) + this.DiskLimitBytes = &v195 + } + if r.Intn(10) != 0 { + v196 := uint64(uint64(r.Uint32())) + this.DiskUsedBytes = &v196 + } + if r.Intn(10) != 0 { + v197 := float64(r.Float64()) + if r.Intn(2) == 0 { + v197 *= -1 + } + this.NetTCPActiveConnections = &v197 + } + if r.Intn(10) != 0 { + v198 := float64(r.Float64()) + if r.Intn(2) == 0 { + v198 *= -1 + } + this.NetTCPTimeWaitConnections = &v198 + } + if r.Intn(10) != 0 { + v199 := uint32(r.Uint32()) + this.Processes = &v199 + } + if r.Intn(10) != 0 { + v200 := uint32(r.Uint32()) + this.Threads = &v200 + } + if r.Intn(10) != 0 { + v201 := uint64(uint64(r.Uint32())) + this.MemLowPressureCounter = &v201 + } + if r.Intn(10) != 0 { + v202 := uint64(uint64(r.Uint32())) + this.MemMediumPressureCounter = &v202 + } + if r.Intn(10) != 0 { + v203 := uint64(uint64(r.Uint32())) + this.MemCriticalPressureCounter = &v203 + } + if r.Intn(10) != 0 { + v204 := r.Intn(5) + this.NetTrafficControlStatistics = make([]TrafficControlStatistics, v204) + for i := 0; i < v204; i++ { + v205 := NewPopulatedTrafficControlStatistics(r, easy) + this.NetTrafficControlStatistics[i] = *v205 + } + } + if r.Intn(10) != 0 { + v206 := uint64(uint64(r.Uint32())) + this.MemTotalBytes = &v206 + } + if r.Intn(10) != 0 { + v207 := uint64(uint64(r.Uint32())) + this.MemTotalMemswBytes = &v207 + } + if r.Intn(10) != 0 { + v208 := uint64(uint64(r.Uint32())) + this.MemSoftLimitBytes = &v208 + } + if r.Intn(10) != 0 { + v209 := uint64(uint64(r.Uint32())) + this.MemCacheBytes = &v209 + } + if r.Intn(10) != 0 { + v210 := uint64(uint64(r.Uint32())) + this.MemSwapBytes = &v210 + } + if r.Intn(10) != 0 { + v211 := uint64(uint64(r.Uint32())) + this.MemUnevictableBytes = &v211 + } + if r.Intn(10) != 0 { + this.NetSNMPStatistics = NewPopulatedSNMPStatistics(r, easy) + } + if r.Intn(10) != 0 { + v212 := r.Intn(5) + this.DiskStatistics = make([]DiskStatistics, v212) + for i := 0; i < v212; i++ { + v213 := NewPopulatedDiskStatistics(r, easy) + this.DiskStatistics[i] = *v213 + } + } + if r.Intn(10) != 0 { + this.BlkioStatistics = NewPopulatedCgroupInfo_Blkio_Statistics(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResourceUsage(r randyMesos, easy bool) *ResourceUsage { + this := &ResourceUsage{} + if r.Intn(10) == 0 { + v214 := r.Intn(5) + this.Executors = make([]ResourceUsage_Executor, v214) + for i := 0; i < v214; i++ { + v215 := NewPopulatedResourceUsage_Executor(r, easy) + this.Executors[i] = *v215 + } + } + if r.Intn(10) != 0 { + v216 := r.Intn(5) + this.Total = make([]Resource, v216) + for i := 0; i < v216; i++ { + v217 := NewPopulatedResource(r, easy) + this.Total[i] = *v217 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResourceUsage_Executor(r randyMesos, easy bool) *ResourceUsage_Executor { + this := &ResourceUsage_Executor{} + v218 := NewPopulatedExecutorInfo(r, easy) + this.ExecutorInfo = *v218 + if r.Intn(10) != 0 { + v219 := r.Intn(5) + this.Allocated = make([]Resource, v219) + for i := 0; i < v219; i++ { + v220 := NewPopulatedResource(r, easy) + this.Allocated[i] = *v220 + } + } + if r.Intn(10) != 0 { + this.Statistics = NewPopulatedResourceStatistics(r, easy) + } + v221 := NewPopulatedContainerID(r, easy) + this.ContainerID = *v221 + if r.Intn(10) != 0 { + v222 := r.Intn(5) + this.Tasks = make([]ResourceUsage_Executor_Task, v222) + for i := 0; i < v222; i++ { + v223 := NewPopulatedResourceUsage_Executor_Task(r, easy) + this.Tasks[i] = *v223 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedResourceUsage_Executor_Task(r randyMesos, easy bool) *ResourceUsage_Executor_Task { + this := &ResourceUsage_Executor_Task{} + this.Name = string(randStringMesos(r)) + v224 := NewPopulatedTaskID(r, easy) + this.ID = *v224 + if r.Intn(10) != 0 { + v225 := r.Intn(5) + this.Resources = make([]Resource, v225) + for i := 0; i < v225; i++ { + v226 := NewPopulatedResource(r, easy) + this.Resources[i] = *v226 + } + } + if r.Intn(10) != 0 { + this.Labels = NewPopulatedLabels(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedPerfStatistics(r randyMesos, easy bool) *PerfStatistics { + this := &PerfStatistics{} + this.Timestamp = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Timestamp *= -1 + } + this.Duration = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Duration *= -1 + } + if r.Intn(10) != 0 { + v227 := uint64(uint64(r.Uint32())) + this.Cycles = &v227 + } + if r.Intn(10) != 0 { + v228 := uint64(uint64(r.Uint32())) + this.StalledCyclesFrontend = &v228 + } + if r.Intn(10) != 0 { + v229 := uint64(uint64(r.Uint32())) + this.StalledCyclesBackend = &v229 + } + if r.Intn(10) != 0 { + v230 := uint64(uint64(r.Uint32())) + this.Instructions = &v230 + } + if r.Intn(10) != 0 { + v231 := uint64(uint64(r.Uint32())) + this.CacheReferences = &v231 + } + if r.Intn(10) != 0 { + v232 := uint64(uint64(r.Uint32())) + this.CacheMisses = &v232 + } + if r.Intn(10) != 0 { + v233 := uint64(uint64(r.Uint32())) + this.Branches = &v233 + } + if r.Intn(10) != 0 { + v234 := uint64(uint64(r.Uint32())) + this.BranchMisses = &v234 + } + if r.Intn(10) != 0 { + v235 := uint64(uint64(r.Uint32())) + this.BusCycles = &v235 + } + if r.Intn(10) != 0 { + v236 := uint64(uint64(r.Uint32())) + this.RefCycles = &v236 + } + if r.Intn(10) != 0 { + v237 := float64(r.Float64()) + if r.Intn(2) == 0 { + v237 *= -1 + } + this.CPUClock = &v237 + } + if r.Intn(10) != 0 { + v238 := float64(r.Float64()) + if r.Intn(2) == 0 { + v238 *= -1 + } + this.TaskClock = &v238 + } + if r.Intn(10) != 0 { + v239 := uint64(uint64(r.Uint32())) + this.PageFaults = &v239 + } + if r.Intn(10) != 0 { + v240 := uint64(uint64(r.Uint32())) + this.MinorFaults = &v240 + } + if r.Intn(10) != 0 { + v241 := uint64(uint64(r.Uint32())) + this.MajorFaults = &v241 + } + if r.Intn(10) != 0 { + v242 := uint64(uint64(r.Uint32())) + this.ContextSwitches = &v242 + } + if r.Intn(10) != 0 { + v243 := uint64(uint64(r.Uint32())) + this.CPUMigrations = &v243 + } + if r.Intn(10) != 0 { + v244 := uint64(uint64(r.Uint32())) + this.AlignmentFaults = &v244 + } + if r.Intn(10) != 0 { + v245 := uint64(uint64(r.Uint32())) + this.EmulationFaults = &v245 + } + if r.Intn(10) != 0 { + v246 := uint64(uint64(r.Uint32())) + this.L1DcacheLoads = &v246 + } + if r.Intn(10) != 0 { + v247 := uint64(uint64(r.Uint32())) + this.L1DcacheLoadMisses = &v247 + } + if r.Intn(10) != 0 { + v248 := uint64(uint64(r.Uint32())) + this.L1DcacheStores = &v248 + } + if r.Intn(10) != 0 { + v249 := uint64(uint64(r.Uint32())) + this.L1DcacheStoreMisses = &v249 + } + if r.Intn(10) != 0 { + v250 := uint64(uint64(r.Uint32())) + this.L1DcachePrefetches = &v250 + } + if r.Intn(10) != 0 { + v251 := uint64(uint64(r.Uint32())) + this.L1DcachePrefetchMisses = &v251 + } + if r.Intn(10) != 0 { + v252 := uint64(uint64(r.Uint32())) + this.L1IcacheLoads = &v252 + } + if r.Intn(10) != 0 { + v253 := uint64(uint64(r.Uint32())) + this.L1IcacheLoadMisses = &v253 + } + if r.Intn(10) != 0 { + v254 := uint64(uint64(r.Uint32())) + this.L1IcachePrefetches = &v254 + } + if r.Intn(10) != 0 { + v255 := uint64(uint64(r.Uint32())) + this.L1IcachePrefetchMisses = &v255 + } + if r.Intn(10) != 0 { + v256 := uint64(uint64(r.Uint32())) + this.LLCLoads = &v256 + } + if r.Intn(10) != 0 { + v257 := uint64(uint64(r.Uint32())) + this.LLCLoadMisses = &v257 + } + if r.Intn(10) != 0 { + v258 := uint64(uint64(r.Uint32())) + this.LLCStores = &v258 + } + if r.Intn(10) != 0 { + v259 := uint64(uint64(r.Uint32())) + this.LLCStoreMisses = &v259 + } + if r.Intn(10) != 0 { + v260 := uint64(uint64(r.Uint32())) + this.LLCPrefetches = &v260 + } + if r.Intn(10) != 0 { + v261 := uint64(uint64(r.Uint32())) + this.LLCPrefetchMisses = &v261 + } + if r.Intn(10) != 0 { + v262 := uint64(uint64(r.Uint32())) + this.DTLBLoads = &v262 + } + if r.Intn(10) != 0 { + v263 := uint64(uint64(r.Uint32())) + this.DTLBLoadMisses = &v263 + } + if r.Intn(10) != 0 { + v264 := uint64(uint64(r.Uint32())) + this.DTLBStores = &v264 + } + if r.Intn(10) != 0 { + v265 := uint64(uint64(r.Uint32())) + this.DTLBStoreMisses = &v265 + } + if r.Intn(10) != 0 { + v266 := uint64(uint64(r.Uint32())) + this.DTLBPrefetches = &v266 + } + if r.Intn(10) != 0 { + v267 := uint64(uint64(r.Uint32())) + this.DTLBPrefetchMisses = &v267 + } + if r.Intn(10) != 0 { + v268 := uint64(uint64(r.Uint32())) + this.ITLBLoads = &v268 + } + if r.Intn(10) != 0 { + v269 := uint64(uint64(r.Uint32())) + this.ITLBLoadMisses = &v269 + } + if r.Intn(10) != 0 { + v270 := uint64(uint64(r.Uint32())) + this.BranchLoads = &v270 + } + if r.Intn(10) != 0 { + v271 := uint64(uint64(r.Uint32())) + this.BranchLoadMisses = &v271 + } + if r.Intn(10) != 0 { + v272 := uint64(uint64(r.Uint32())) + this.NodeLoads = &v272 + } + if r.Intn(10) != 0 { + v273 := uint64(uint64(r.Uint32())) + this.NodeLoadMisses = &v273 + } + if r.Intn(10) != 0 { + v274 := uint64(uint64(r.Uint32())) + this.NodeStores = &v274 + } + if r.Intn(10) != 0 { + v275 := uint64(uint64(r.Uint32())) + this.NodeStoreMisses = &v275 + } + if r.Intn(10) != 0 { + v276 := uint64(uint64(r.Uint32())) + this.NodePrefetches = &v276 + } + if r.Intn(10) != 0 { + v277 := uint64(uint64(r.Uint32())) + this.NodePrefetchMisses = &v277 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRequest(r randyMesos, easy bool) *Request { + this := &Request{} + if r.Intn(10) != 0 { + this.AgentID = NewPopulatedAgentID(r, easy) + } + if r.Intn(10) != 0 { + v278 := r.Intn(5) + this.Resources = make([]Resource, v278) + for i := 0; i < v278; i++ { + v279 := NewPopulatedResource(r, easy) + this.Resources[i] = *v279 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOffer(r randyMesos, easy bool) *Offer { + this := &Offer{} + v280 := NewPopulatedOfferID(r, easy) + this.ID = *v280 + v281 := NewPopulatedFrameworkID(r, easy) + this.FrameworkID = *v281 + v282 := NewPopulatedAgentID(r, easy) + this.AgentID = *v282 + this.Hostname = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v283 := r.Intn(5) + this.Resources = make([]Resource, v283) + for i := 0; i < v283; i++ { + v284 := NewPopulatedResource(r, easy) + this.Resources[i] = *v284 + } + } + if r.Intn(10) != 0 { + v285 := r.Intn(5) + this.ExecutorIDs = make([]ExecutorID, v285) + for i := 0; i < v285; i++ { + v286 := NewPopulatedExecutorID(r, easy) + this.ExecutorIDs[i] = *v286 + } + } + if r.Intn(10) != 0 { + v287 := r.Intn(5) + this.Attributes = make([]Attribute, v287) + for i := 0; i < v287; i++ { + v288 := NewPopulatedAttribute(r, easy) + this.Attributes[i] = *v288 + } + } + if r.Intn(10) != 0 { + this.URL = NewPopulatedURL(r, easy) + } + if r.Intn(10) != 0 { + this.Unavailability = NewPopulatedUnavailability(r, easy) + } + if r.Intn(10) != 0 { + this.AllocationInfo = NewPopulatedResource_AllocationInfo(r, easy) + } + if r.Intn(10) != 0 { + this.Domain = NewPopulatedDomainInfo(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOffer_Operation(r randyMesos, easy bool) *Offer_Operation { + this := &Offer_Operation{} + this.Type = Offer_Operation_Type([]int32{0, 1, 6, 2, 3, 4, 5, 11, 12, 13, 14}[r.Intn(11)]) + if r.Intn(10) != 0 { + this.Launch = NewPopulatedOffer_Operation_Launch(r, easy) + } + if r.Intn(10) != 0 { + this.Reserve = NewPopulatedOffer_Operation_Reserve(r, easy) + } + if r.Intn(10) != 0 { + this.Unreserve = NewPopulatedOffer_Operation_Unreserve(r, easy) + } + if r.Intn(10) != 0 { + this.Create = NewPopulatedOffer_Operation_Create(r, easy) + } + if r.Intn(10) != 0 { + this.Destroy = NewPopulatedOffer_Operation_Destroy(r, easy) + } + if r.Intn(10) != 0 { + this.LaunchGroup = NewPopulatedOffer_Operation_LaunchGroup(r, easy) + } + if r.Intn(10) != 0 { + this.ID = NewPopulatedOperationID(r, easy) + } + if r.Intn(10) != 0 { + this.GrowVolume = NewPopulatedOffer_Operation_GrowVolume(r, easy) + } + if r.Intn(10) != 0 { + this.ShrinkVolume = NewPopulatedOffer_Operation_ShrinkVolume(r, easy) + } + if r.Intn(10) != 0 { + this.CreateDisk = NewPopulatedOffer_Operation_CreateDisk(r, easy) + } + if r.Intn(10) != 0 { + this.DestroyDisk = NewPopulatedOffer_Operation_DestroyDisk(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOffer_Operation_Launch(r randyMesos, easy bool) *Offer_Operation_Launch { + this := &Offer_Operation_Launch{} + if r.Intn(10) != 0 { + v289 := r.Intn(5) + this.TaskInfos = make([]TaskInfo, v289) + for i := 0; i < v289; i++ { + v290 := NewPopulatedTaskInfo(r, easy) + this.TaskInfos[i] = *v290 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOffer_Operation_LaunchGroup(r randyMesos, easy bool) *Offer_Operation_LaunchGroup { + this := &Offer_Operation_LaunchGroup{} + v291 := NewPopulatedExecutorInfo(r, easy) + this.Executor = *v291 + v292 := NewPopulatedTaskGroupInfo(r, easy) + this.TaskGroup = *v292 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOffer_Operation_Reserve(r randyMesos, easy bool) *Offer_Operation_Reserve { + this := &Offer_Operation_Reserve{} + if r.Intn(10) != 0 { + v293 := r.Intn(5) + this.Resources = make([]Resource, v293) + for i := 0; i < v293; i++ { + v294 := NewPopulatedResource(r, easy) + this.Resources[i] = *v294 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOffer_Operation_Unreserve(r randyMesos, easy bool) *Offer_Operation_Unreserve { + this := &Offer_Operation_Unreserve{} + if r.Intn(10) != 0 { + v295 := r.Intn(5) + this.Resources = make([]Resource, v295) + for i := 0; i < v295; i++ { + v296 := NewPopulatedResource(r, easy) + this.Resources[i] = *v296 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOffer_Operation_Create(r randyMesos, easy bool) *Offer_Operation_Create { + this := &Offer_Operation_Create{} + if r.Intn(10) != 0 { + v297 := r.Intn(5) + this.Volumes = make([]Resource, v297) + for i := 0; i < v297; i++ { + v298 := NewPopulatedResource(r, easy) + this.Volumes[i] = *v298 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOffer_Operation_Destroy(r randyMesos, easy bool) *Offer_Operation_Destroy { + this := &Offer_Operation_Destroy{} + if r.Intn(10) != 0 { + v299 := r.Intn(5) + this.Volumes = make([]Resource, v299) + for i := 0; i < v299; i++ { + v300 := NewPopulatedResource(r, easy) + this.Volumes[i] = *v300 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOffer_Operation_GrowVolume(r randyMesos, easy bool) *Offer_Operation_GrowVolume { + this := &Offer_Operation_GrowVolume{} + v301 := NewPopulatedResource(r, easy) + this.Volume = *v301 + v302 := NewPopulatedResource(r, easy) + this.Addition = *v302 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOffer_Operation_ShrinkVolume(r randyMesos, easy bool) *Offer_Operation_ShrinkVolume { + this := &Offer_Operation_ShrinkVolume{} + v303 := NewPopulatedResource(r, easy) + this.Volume = *v303 + v304 := NewPopulatedValue_Scalar(r, easy) + this.Subtract = *v304 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOffer_Operation_CreateDisk(r randyMesos, easy bool) *Offer_Operation_CreateDisk { + this := &Offer_Operation_CreateDisk{} + v305 := NewPopulatedResource(r, easy) + this.Source = *v305 + this.TargetType = Resource_DiskInfo_Source_Type([]int32{0, 1, 2, 3, 4}[r.Intn(5)]) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOffer_Operation_DestroyDisk(r randyMesos, easy bool) *Offer_Operation_DestroyDisk { + this := &Offer_Operation_DestroyDisk{} + v306 := NewPopulatedResource(r, easy) + this.Source = *v306 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedInverseOffer(r randyMesos, easy bool) *InverseOffer { + this := &InverseOffer{} + v307 := NewPopulatedOfferID(r, easy) + this.OfferID = *v307 + if r.Intn(10) != 0 { + this.URL = NewPopulatedURL(r, easy) + } + v308 := NewPopulatedFrameworkID(r, easy) + this.FrameworkID = *v308 + if r.Intn(10) != 0 { + this.AgentID = NewPopulatedAgentID(r, easy) + } + v309 := NewPopulatedUnavailability(r, easy) + this.Unavailability = *v309 + if r.Intn(10) != 0 { + v310 := r.Intn(5) + this.Resources = make([]Resource, v310) + for i := 0; i < v310; i++ { + v311 := NewPopulatedResource(r, easy) + this.Resources[i] = *v311 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedTaskInfo(r randyMesos, easy bool) *TaskInfo { + this := &TaskInfo{} + this.Name = string(randStringMesos(r)) + v312 := NewPopulatedTaskID(r, easy) + this.TaskID = *v312 + v313 := NewPopulatedAgentID(r, easy) + this.AgentID = *v313 + if r.Intn(10) != 0 { + v314 := r.Intn(5) + this.Resources = make([]Resource, v314) + for i := 0; i < v314; i++ { + v315 := NewPopulatedResource(r, easy) + this.Resources[i] = *v315 + } + } + if r.Intn(10) != 0 { + this.Executor = NewPopulatedExecutorInfo(r, easy) + } + if r.Intn(10) != 0 { + v316 := r.Intn(100) + this.Data = make([]byte, v316) + for i := 0; i < v316; i++ { + this.Data[i] = byte(r.Intn(256)) + } + } + if r.Intn(10) != 0 { + this.Command = NewPopulatedCommandInfo(r, easy) + } + if r.Intn(10) != 0 { + this.HealthCheck = NewPopulatedHealthCheck(r, easy) + } + if r.Intn(10) != 0 { + this.Container = NewPopulatedContainerInfo(r, easy) + } + if r.Intn(10) != 0 { + this.Labels = NewPopulatedLabels(r, easy) + } + if r.Intn(10) != 0 { + this.Discovery = NewPopulatedDiscoveryInfo(r, easy) + } + if r.Intn(10) != 0 { + this.KillPolicy = NewPopulatedKillPolicy(r, easy) + } + if r.Intn(10) != 0 { + this.Check = NewPopulatedCheckInfo(r, easy) + } + if r.Intn(10) != 0 { + this.MaxCompletionTime = NewPopulatedDurationInfo(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedTaskGroupInfo(r randyMesos, easy bool) *TaskGroupInfo { + this := &TaskGroupInfo{} + if r.Intn(10) != 0 { + v317 := r.Intn(5) + this.Tasks = make([]TaskInfo, v317) + for i := 0; i < v317; i++ { + v318 := NewPopulatedTaskInfo(r, easy) + this.Tasks[i] = *v318 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedTask(r randyMesos, easy bool) *Task { + this := &Task{} + this.Name = string(randStringMesos(r)) + v319 := NewPopulatedTaskID(r, easy) + this.TaskID = *v319 + v320 := NewPopulatedFrameworkID(r, easy) + this.FrameworkID = *v320 + if r.Intn(10) != 0 { + this.ExecutorID = NewPopulatedExecutorID(r, easy) + } + v321 := NewPopulatedAgentID(r, easy) + this.AgentID = *v321 + v322 := TaskState([]int32{6, 0, 1, 8, 2, 3, 4, 7, 5, 9, 10, 11, 12, 13}[r.Intn(14)]) + this.State = &v322 + if r.Intn(10) != 0 { + v323 := r.Intn(5) + this.Resources = make([]Resource, v323) + for i := 0; i < v323; i++ { + v324 := NewPopulatedResource(r, easy) + this.Resources[i] = *v324 + } + } + if r.Intn(10) == 0 { + v325 := r.Intn(5) + this.Statuses = make([]TaskStatus, v325) + for i := 0; i < v325; i++ { + v326 := NewPopulatedTaskStatus(r, easy) + this.Statuses[i] = *v326 + } + } + if r.Intn(10) != 0 { + v327 := TaskState([]int32{6, 0, 1, 8, 2, 3, 4, 7, 5, 9, 10, 11, 12, 13}[r.Intn(14)]) + this.StatusUpdateState = &v327 + } + if r.Intn(10) != 0 { + v328 := r.Intn(100) + this.StatusUpdateUUID = make([]byte, v328) + for i := 0; i < v328; i++ { + this.StatusUpdateUUID[i] = byte(r.Intn(256)) + } + } + if r.Intn(10) != 0 { + this.Labels = NewPopulatedLabels(r, easy) + } + if r.Intn(10) != 0 { + this.Discovery = NewPopulatedDiscoveryInfo(r, easy) + } + if r.Intn(10) != 0 { + this.Container = NewPopulatedContainerInfo(r, easy) + } + if r.Intn(10) != 0 { + v329 := string(randStringMesos(r)) + this.User = &v329 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedTaskResourceLimitation(r randyMesos, easy bool) *TaskResourceLimitation { + this := &TaskResourceLimitation{} + if r.Intn(10) != 0 { + v330 := r.Intn(5) + this.Resources = make([]Resource, v330) + for i := 0; i < v330; i++ { + v331 := NewPopulatedResource(r, easy) + this.Resources[i] = *v331 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedUUID(r randyMesos, easy bool) *UUID { + this := &UUID{} + v332 := r.Intn(100) + this.Value = make([]byte, v332) + for i := 0; i < v332; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOperation(r randyMesos, easy bool) *Operation { + this := &Operation{} + if r.Intn(10) != 0 { + this.FrameworkID = NewPopulatedFrameworkID(r, easy) + } + if r.Intn(10) != 0 { + this.AgentID = NewPopulatedAgentID(r, easy) + } + v333 := NewPopulatedOffer_Operation(r, easy) + this.Info = *v333 + v334 := NewPopulatedOperationStatus(r, easy) + this.LatestStatus = *v334 + if r.Intn(10) != 0 { + v335 := r.Intn(5) + this.Statuses = make([]OperationStatus, v335) + for i := 0; i < v335; i++ { + v336 := NewPopulatedOperationStatus(r, easy) + this.Statuses[i] = *v336 + } + } + v337 := NewPopulatedUUID(r, easy) + this.UUID = *v337 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOperationStatus(r randyMesos, easy bool) *OperationStatus { + this := &OperationStatus{} + if r.Intn(10) != 0 { + this.OperationID = NewPopulatedOperationID(r, easy) + } + this.State = OperationState([]int32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}[r.Intn(10)]) + if r.Intn(10) != 0 { + v338 := string(randStringMesos(r)) + this.Message = &v338 + } + if r.Intn(10) != 0 { + v339 := r.Intn(5) + this.ConvertedResources = make([]Resource, v339) + for i := 0; i < v339; i++ { + v340 := NewPopulatedResource(r, easy) + this.ConvertedResources[i] = *v340 + } + } + if r.Intn(10) != 0 { + this.UUID = NewPopulatedUUID(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCheckStatusInfo(r randyMesos, easy bool) *CheckStatusInfo { + this := &CheckStatusInfo{} + if r.Intn(10) != 0 { + v341 := CheckInfo_Type([]int32{0, 1, 2, 3}[r.Intn(4)]) + this.Type = &v341 + } + if r.Intn(10) != 0 { + this.Command = NewPopulatedCheckStatusInfo_Command(r, easy) + } + if r.Intn(10) != 0 { + this.HTTP = NewPopulatedCheckStatusInfo_Http(r, easy) + } + if r.Intn(10) != 0 { + this.TCP = NewPopulatedCheckStatusInfo_Tcp(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCheckStatusInfo_Command(r randyMesos, easy bool) *CheckStatusInfo_Command { + this := &CheckStatusInfo_Command{} + if r.Intn(10) != 0 { + v342 := int32(r.Int31()) + if r.Intn(2) == 0 { + v342 *= -1 + } + this.ExitCode = &v342 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCheckStatusInfo_Http(r randyMesos, easy bool) *CheckStatusInfo_Http { + this := &CheckStatusInfo_Http{} + if r.Intn(10) != 0 { + v343 := uint32(r.Uint32()) + this.StatusCode = &v343 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCheckStatusInfo_Tcp(r randyMesos, easy bool) *CheckStatusInfo_Tcp { + this := &CheckStatusInfo_Tcp{} + if r.Intn(10) != 0 { + v344 := bool(bool(r.Intn(2) == 0)) + this.Succeeded = &v344 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedTaskStatus(r randyMesos, easy bool) *TaskStatus { + this := &TaskStatus{} + v345 := NewPopulatedTaskID(r, easy) + this.TaskID = *v345 + v346 := TaskState([]int32{6, 0, 1, 8, 2, 3, 4, 7, 5, 9, 10, 11, 12, 13}[r.Intn(14)]) + this.State = &v346 + if r.Intn(10) != 0 { + v347 := r.Intn(100) + this.Data = make([]byte, v347) + for i := 0; i < v347; i++ { + this.Data[i] = byte(r.Intn(256)) + } + } + if r.Intn(10) != 0 { + v348 := string(randStringMesos(r)) + this.Message = &v348 + } + if r.Intn(10) != 0 { + this.AgentID = NewPopulatedAgentID(r, easy) + } + if r.Intn(10) != 0 { + v349 := float64(r.Float64()) + if r.Intn(2) == 0 { + v349 *= -1 + } + this.Timestamp = &v349 + } + if r.Intn(10) != 0 { + this.ExecutorID = NewPopulatedExecutorID(r, easy) + } + if r.Intn(10) != 0 { + v350 := bool(bool(r.Intn(2) == 0)) + this.Healthy = &v350 + } + if r.Intn(10) != 0 { + v351 := TaskStatus_Source([]int32{0, 1, 2}[r.Intn(3)]) + this.Source = &v351 + } + if r.Intn(10) != 0 { + v352 := TaskStatus_Reason([]int32{0, 21, 19, 20, 8, 17, 22, 33, 23, 24, 1, 2, 3, 4, 5, 6, 27, 7, 9, 18, 10, 11, 31, 32, 12, 13, 30, 28, 29, 25, 26, 14, 15, 16}[r.Intn(34)]) + this.Reason = &v352 + } + if r.Intn(10) != 0 { + v353 := r.Intn(100) + this.UUID = make([]byte, v353) + for i := 0; i < v353; i++ { + this.UUID[i] = byte(r.Intn(256)) + } + } + if r.Intn(10) != 0 { + this.Labels = NewPopulatedLabels(r, easy) + } + if r.Intn(10) == 0 { + this.ContainerStatus = NewPopulatedContainerStatus(r, easy) + } + if r.Intn(10) != 0 { + this.UnreachableTime = NewPopulatedTimeInfo(r, easy) + } + if r.Intn(10) != 0 { + this.CheckStatus = NewPopulatedCheckStatusInfo(r, easy) + } + if r.Intn(10) != 0 { + this.Limitation = NewPopulatedTaskResourceLimitation(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedFilters(r randyMesos, easy bool) *Filters { + this := &Filters{} + if r.Intn(10) != 0 { + v354 := float64(r.Float64()) + if r.Intn(2) == 0 { + v354 *= -1 + } + this.RefuseSeconds = &v354 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedEnvironment(r randyMesos, easy bool) *Environment { + this := &Environment{} + if r.Intn(10) != 0 { + v355 := r.Intn(5) + this.Variables = make([]Environment_Variable, v355) + for i := 0; i < v355; i++ { + v356 := NewPopulatedEnvironment_Variable(r, easy) + this.Variables[i] = *v356 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedEnvironment_Variable(r randyMesos, easy bool) *Environment_Variable { + this := &Environment_Variable{} + this.Name = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v357 := string(randStringMesos(r)) + this.Value = &v357 + } + if r.Intn(10) != 0 { + v358 := Environment_Variable_Type([]int32{0, 1, 2}[r.Intn(3)]) + this.Type = &v358 + } + if r.Intn(10) != 0 { + this.Secret = NewPopulatedSecret(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedParameter(r randyMesos, easy bool) *Parameter { + this := &Parameter{} + this.Key = string(randStringMesos(r)) + this.Value = string(randStringMesos(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedParameters(r randyMesos, easy bool) *Parameters { + this := &Parameters{} + if r.Intn(10) != 0 { + v359 := r.Intn(5) + this.Parameter = make([]Parameter, v359) + for i := 0; i < v359; i++ { + v360 := NewPopulatedParameter(r, easy) + this.Parameter[i] = *v360 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCredential(r randyMesos, easy bool) *Credential { + this := &Credential{} + this.Principal = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v361 := string(randStringMesos(r)) + this.Secret = &v361 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCredentials(r randyMesos, easy bool) *Credentials { + this := &Credentials{} + if r.Intn(10) != 0 { + v362 := r.Intn(5) + this.Credentials = make([]Credential, v362) + for i := 0; i < v362; i++ { + v363 := NewPopulatedCredential(r, easy) + this.Credentials[i] = *v363 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedSecret(r randyMesos, easy bool) *Secret { + this := &Secret{} + this.Type = Secret_Type([]int32{0, 1, 2}[r.Intn(3)]) + if r.Intn(10) != 0 { + this.Reference = NewPopulatedSecret_Reference(r, easy) + } + if r.Intn(10) != 0 { + this.Value = NewPopulatedSecret_Value(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedSecret_Reference(r randyMesos, easy bool) *Secret_Reference { + this := &Secret_Reference{} + this.Name = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v364 := string(randStringMesos(r)) + this.Key = &v364 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedSecret_Value(r randyMesos, easy bool) *Secret_Value { + this := &Secret_Value{} + v365 := r.Intn(100) + this.Data = make([]byte, v365) + for i := 0; i < v365; i++ { + this.Data[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRateLimit(r randyMesos, easy bool) *RateLimit { + this := &RateLimit{} + if r.Intn(10) != 0 { + v366 := float64(r.Float64()) + if r.Intn(2) == 0 { + v366 *= -1 + } + this.QPS = &v366 + } + this.Principal = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v367 := uint64(uint64(r.Uint32())) + this.Capacity = &v367 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRateLimits(r randyMesos, easy bool) *RateLimits { + this := &RateLimits{} + if r.Intn(10) != 0 { + v368 := r.Intn(5) + this.Limits = make([]RateLimit, v368) + for i := 0; i < v368; i++ { + v369 := NewPopulatedRateLimit(r, easy) + this.Limits[i] = *v369 + } + } + if r.Intn(10) != 0 { + v370 := float64(r.Float64()) + if r.Intn(2) == 0 { + v370 *= -1 + } + this.AggregateDefaultQPS = &v370 + } + if r.Intn(10) != 0 { + v371 := uint64(uint64(r.Uint32())) + this.AggregateDefaultCapacity = &v371 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedImage(r randyMesos, easy bool) *Image { + this := &Image{} + v372 := Image_Type([]int32{1, 2}[r.Intn(2)]) + this.Type = &v372 + if r.Intn(10) != 0 { + this.Appc = NewPopulatedImage_Appc(r, easy) + } + if r.Intn(10) != 0 { + this.Docker = NewPopulatedImage_Docker(r, easy) + } + if r.Intn(10) != 0 { + v373 := bool(bool(r.Intn(2) == 0)) + this.Cached = &v373 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedImage_Appc(r randyMesos, easy bool) *Image_Appc { + this := &Image_Appc{} + this.Name = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v374 := string(randStringMesos(r)) + this.ID = &v374 + } + if r.Intn(10) != 0 { + this.Labels = NewPopulatedLabels(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedImage_Docker(r randyMesos, easy bool) *Image_Docker { + this := &Image_Docker{} + this.Name = string(randStringMesos(r)) + if r.Intn(10) != 0 { + this.Credential = NewPopulatedCredential(r, easy) + } + if r.Intn(10) != 0 { + this.Config = NewPopulatedSecret(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedMountPropagation(r randyMesos, easy bool) *MountPropagation { + this := &MountPropagation{} + if r.Intn(10) != 0 { + v375 := MountPropagation_Mode([]int32{0, 1, 2}[r.Intn(3)]) + this.Mode = &v375 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedVolume(r randyMesos, easy bool) *Volume { + this := &Volume{} + this.ContainerPath = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v376 := string(randStringMesos(r)) + this.HostPath = &v376 + } + v377 := Volume_Mode([]int32{1, 2}[r.Intn(2)]) + this.Mode = &v377 + if r.Intn(10) != 0 { + this.Image = NewPopulatedImage(r, easy) + } + if r.Intn(10) != 0 { + this.Source = NewPopulatedVolume_Source(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedVolume_Source(r randyMesos, easy bool) *Volume_Source { + this := &Volume_Source{} + this.Type = Volume_Source_Type([]int32{0, 1, 4, 2, 3}[r.Intn(5)]) + if r.Intn(10) != 0 { + this.DockerVolume = NewPopulatedVolume_Source_DockerVolume(r, easy) + } + if r.Intn(10) != 0 { + this.SandboxPath = NewPopulatedVolume_Source_SandboxPath(r, easy) + } + if r.Intn(10) != 0 { + this.Secret = NewPopulatedSecret(r, easy) + } + if r.Intn(10) != 0 { + this.HostPath = NewPopulatedVolume_Source_HostPath(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedVolume_Source_DockerVolume(r randyMesos, easy bool) *Volume_Source_DockerVolume { + this := &Volume_Source_DockerVolume{} + if r.Intn(10) != 0 { + v378 := string(randStringMesos(r)) + this.Driver = &v378 + } + this.Name = string(randStringMesos(r)) + if r.Intn(10) != 0 { + this.DriverOptions = NewPopulatedParameters(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedVolume_Source_HostPath(r randyMesos, easy bool) *Volume_Source_HostPath { + this := &Volume_Source_HostPath{} + this.Path = string(randStringMesos(r)) + if r.Intn(10) != 0 { + this.MountPropagation = NewPopulatedMountPropagation(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedVolume_Source_SandboxPath(r randyMesos, easy bool) *Volume_Source_SandboxPath { + this := &Volume_Source_SandboxPath{} + this.Type = Volume_Source_SandboxPath_Type([]int32{0, 1, 2}[r.Intn(3)]) + this.Path = string(randStringMesos(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedNetworkInfo(r randyMesos, easy bool) *NetworkInfo { + this := &NetworkInfo{} + if r.Intn(10) != 0 { + v379 := r.Intn(10) + this.Groups = make([]string, v379) + for i := 0; i < v379; i++ { + this.Groups[i] = string(randStringMesos(r)) + } + } + if r.Intn(10) != 0 { + this.Labels = NewPopulatedLabels(r, easy) + } + if r.Intn(10) != 0 { + v380 := r.Intn(5) + this.IPAddresses = make([]NetworkInfo_IPAddress, v380) + for i := 0; i < v380; i++ { + v381 := NewPopulatedNetworkInfo_IPAddress(r, easy) + this.IPAddresses[i] = *v381 + } + } + if r.Intn(10) != 0 { + v382 := string(randStringMesos(r)) + this.Name = &v382 + } + if r.Intn(10) != 0 { + v383 := r.Intn(5) + this.PortMappings = make([]NetworkInfo_PortMapping, v383) + for i := 0; i < v383; i++ { + v384 := NewPopulatedNetworkInfo_PortMapping(r, easy) + this.PortMappings[i] = *v384 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedNetworkInfo_IPAddress(r randyMesos, easy bool) *NetworkInfo_IPAddress { + this := &NetworkInfo_IPAddress{} + if r.Intn(10) != 0 { + v385 := NetworkInfo_Protocol([]int32{1, 2}[r.Intn(2)]) + this.Protocol = &v385 + } + if r.Intn(10) != 0 { + v386 := string(randStringMesos(r)) + this.IPAddress = &v386 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedNetworkInfo_PortMapping(r randyMesos, easy bool) *NetworkInfo_PortMapping { + this := &NetworkInfo_PortMapping{} + this.HostPort = uint32(r.Uint32()) + this.ContainerPort = uint32(r.Uint32()) + if r.Intn(10) != 0 { + v387 := string(randStringMesos(r)) + this.Protocol = &v387 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCapabilityInfo(r randyMesos, easy bool) *CapabilityInfo { + this := &CapabilityInfo{} + if r.Intn(10) != 0 { + v388 := r.Intn(10) + this.Capabilities = make([]CapabilityInfo_Capability, v388) + for i := 0; i < v388; i++ { + this.Capabilities[i] = CapabilityInfo_Capability([]int32{0, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037}[r.Intn(39)]) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedLinuxInfo(r randyMesos, easy bool) *LinuxInfo { + this := &LinuxInfo{} + if r.Intn(10) != 0 { + this.CapabilityInfo = NewPopulatedCapabilityInfo(r, easy) + } + if r.Intn(10) != 0 { + this.BoundingCapabilities = NewPopulatedCapabilityInfo(r, easy) + } + if r.Intn(10) != 0 { + this.EffectiveCapabilities = NewPopulatedCapabilityInfo(r, easy) + } + if r.Intn(10) != 0 { + v389 := bool(bool(r.Intn(2) == 0)) + this.SharePIDNamespace = &v389 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRLimitInfo(r randyMesos, easy bool) *RLimitInfo { + this := &RLimitInfo{} + if r.Intn(10) != 0 { + v390 := r.Intn(5) + this.Rlimits = make([]RLimitInfo_RLimit, v390) + for i := 0; i < v390; i++ { + v391 := NewPopulatedRLimitInfo_RLimit(r, easy) + this.Rlimits[i] = *v391 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRLimitInfo_RLimit(r randyMesos, easy bool) *RLimitInfo_RLimit { + this := &RLimitInfo_RLimit{} + this.Type = RLimitInfo_RLimit_Type([]int32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}[r.Intn(17)]) + if r.Intn(10) != 0 { + v392 := uint64(uint64(r.Uint32())) + this.Hard = &v392 + } + if r.Intn(10) != 0 { + v393 := uint64(uint64(r.Uint32())) + this.Soft = &v393 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedTTYInfo(r randyMesos, easy bool) *TTYInfo { + this := &TTYInfo{} + if r.Intn(10) != 0 { + this.WindowSize = NewPopulatedTTYInfo_WindowSize(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedTTYInfo_WindowSize(r randyMesos, easy bool) *TTYInfo_WindowSize { + this := &TTYInfo_WindowSize{} + this.Rows = uint32(r.Uint32()) + this.Columns = uint32(r.Uint32()) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedContainerInfo(r randyMesos, easy bool) *ContainerInfo { + this := &ContainerInfo{} + v394 := ContainerInfo_Type([]int32{1, 2}[r.Intn(2)]) + this.Type = &v394 + if r.Intn(10) != 0 { + v395 := r.Intn(5) + this.Volumes = make([]Volume, v395) + for i := 0; i < v395; i++ { + v396 := NewPopulatedVolume(r, easy) + this.Volumes[i] = *v396 + } + } + if r.Intn(10) != 0 { + this.Docker = NewPopulatedContainerInfo_DockerInfo(r, easy) + } + if r.Intn(10) != 0 { + v397 := string(randStringMesos(r)) + this.Hostname = &v397 + } + if r.Intn(10) != 0 { + this.Mesos = NewPopulatedContainerInfo_MesosInfo(r, easy) + } + if r.Intn(10) != 0 { + v398 := r.Intn(5) + this.NetworkInfos = make([]NetworkInfo, v398) + for i := 0; i < v398; i++ { + v399 := NewPopulatedNetworkInfo(r, easy) + this.NetworkInfos[i] = *v399 + } + } + if r.Intn(10) != 0 { + this.LinuxInfo = NewPopulatedLinuxInfo(r, easy) + } + if r.Intn(10) != 0 { + this.RlimitInfo = NewPopulatedRLimitInfo(r, easy) + } + if r.Intn(10) != 0 { + this.TTYInfo = NewPopulatedTTYInfo(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedContainerInfo_DockerInfo(r randyMesos, easy bool) *ContainerInfo_DockerInfo { + this := &ContainerInfo_DockerInfo{} + this.Image = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v400 := ContainerInfo_DockerInfo_Network([]int32{1, 2, 3, 4}[r.Intn(4)]) + this.Network = &v400 + } + if r.Intn(10) != 0 { + v401 := r.Intn(5) + this.PortMappings = make([]ContainerInfo_DockerInfo_PortMapping, v401) + for i := 0; i < v401; i++ { + v402 := NewPopulatedContainerInfo_DockerInfo_PortMapping(r, easy) + this.PortMappings[i] = *v402 + } + } + if r.Intn(10) != 0 { + v403 := bool(bool(r.Intn(2) == 0)) + this.Privileged = &v403 + } + if r.Intn(10) != 0 { + v404 := r.Intn(5) + this.Parameters = make([]Parameter, v404) + for i := 0; i < v404; i++ { + v405 := NewPopulatedParameter(r, easy) + this.Parameters[i] = *v405 + } + } + if r.Intn(10) != 0 { + v406 := bool(bool(r.Intn(2) == 0)) + this.ForcePullImage = &v406 + } + if r.Intn(10) != 0 { + v407 := string(randStringMesos(r)) + this.VolumeDriver = &v407 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedContainerInfo_DockerInfo_PortMapping(r randyMesos, easy bool) *ContainerInfo_DockerInfo_PortMapping { + this := &ContainerInfo_DockerInfo_PortMapping{} + this.HostPort = uint32(r.Uint32()) + this.ContainerPort = uint32(r.Uint32()) + if r.Intn(10) != 0 { + v408 := string(randStringMesos(r)) + this.Protocol = &v408 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedContainerInfo_MesosInfo(r randyMesos, easy bool) *ContainerInfo_MesosInfo { + this := &ContainerInfo_MesosInfo{} + if r.Intn(10) != 0 { + this.Image = NewPopulatedImage(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedContainerStatus(r randyMesos, easy bool) *ContainerStatus { + this := &ContainerStatus{} + if r.Intn(10) != 0 { + v409 := r.Intn(5) + this.NetworkInfos = make([]NetworkInfo, v409) + for i := 0; i < v409; i++ { + v410 := NewPopulatedNetworkInfo(r, easy) + this.NetworkInfos[i] = *v410 + } + } + if r.Intn(10) != 0 { + this.CgroupInfo = NewPopulatedCgroupInfo(r, easy) + } + if r.Intn(10) != 0 { + v411 := uint32(r.Uint32()) + this.ExecutorPID = &v411 + } + if r.Intn(10) == 0 { + this.ContainerID = NewPopulatedContainerID(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCgroupInfo(r randyMesos, easy bool) *CgroupInfo { + this := &CgroupInfo{} + if r.Intn(10) != 0 { + this.NetCLS = NewPopulatedCgroupInfo_NetCls(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCgroupInfo_Blkio(r randyMesos, easy bool) *CgroupInfo_Blkio { + this := &CgroupInfo_Blkio{} + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCgroupInfo_Blkio_Value(r randyMesos, easy bool) *CgroupInfo_Blkio_Value { + this := &CgroupInfo_Blkio_Value{} + if r.Intn(10) != 0 { + v412 := CgroupInfo_Blkio_Operation([]int32{0, 1, 2, 3, 4, 5}[r.Intn(6)]) + this.Op = &v412 + } + if r.Intn(10) != 0 { + v413 := uint64(uint64(r.Uint32())) + this.Value = &v413 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCgroupInfo_Blkio_CFQ(r randyMesos, easy bool) *CgroupInfo_Blkio_CFQ { + this := &CgroupInfo_Blkio_CFQ{} + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCgroupInfo_Blkio_CFQ_Statistics(r randyMesos, easy bool) *CgroupInfo_Blkio_CFQ_Statistics { + this := &CgroupInfo_Blkio_CFQ_Statistics{} + if r.Intn(10) != 0 { + this.Device = NewPopulatedDevice_Number(r, easy) + } + if r.Intn(10) != 0 { + v414 := uint64(uint64(r.Uint32())) + this.Sectors = &v414 + } + if r.Intn(10) != 0 { + v415 := uint64(uint64(r.Uint32())) + this.Time = &v415 + } + if r.Intn(10) != 0 { + v416 := r.Intn(5) + this.IOServiced = make([]CgroupInfo_Blkio_Value, v416) + for i := 0; i < v416; i++ { + v417 := NewPopulatedCgroupInfo_Blkio_Value(r, easy) + this.IOServiced[i] = *v417 + } + } + if r.Intn(10) != 0 { + v418 := r.Intn(5) + this.IOServiceBytes = make([]CgroupInfo_Blkio_Value, v418) + for i := 0; i < v418; i++ { + v419 := NewPopulatedCgroupInfo_Blkio_Value(r, easy) + this.IOServiceBytes[i] = *v419 + } + } + if r.Intn(10) != 0 { + v420 := r.Intn(5) + this.IOServiceTime = make([]CgroupInfo_Blkio_Value, v420) + for i := 0; i < v420; i++ { + v421 := NewPopulatedCgroupInfo_Blkio_Value(r, easy) + this.IOServiceTime[i] = *v421 + } + } + if r.Intn(10) != 0 { + v422 := r.Intn(5) + this.IOWaitTime = make([]CgroupInfo_Blkio_Value, v422) + for i := 0; i < v422; i++ { + v423 := NewPopulatedCgroupInfo_Blkio_Value(r, easy) + this.IOWaitTime[i] = *v423 + } + } + if r.Intn(10) != 0 { + v424 := r.Intn(5) + this.IOMerged = make([]CgroupInfo_Blkio_Value, v424) + for i := 0; i < v424; i++ { + v425 := NewPopulatedCgroupInfo_Blkio_Value(r, easy) + this.IOMerged[i] = *v425 + } + } + if r.Intn(10) != 0 { + v426 := r.Intn(5) + this.IOQueued = make([]CgroupInfo_Blkio_Value, v426) + for i := 0; i < v426; i++ { + v427 := NewPopulatedCgroupInfo_Blkio_Value(r, easy) + this.IOQueued[i] = *v427 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCgroupInfo_Blkio_Throttling(r randyMesos, easy bool) *CgroupInfo_Blkio_Throttling { + this := &CgroupInfo_Blkio_Throttling{} + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCgroupInfo_Blkio_Throttling_Statistics(r randyMesos, easy bool) *CgroupInfo_Blkio_Throttling_Statistics { + this := &CgroupInfo_Blkio_Throttling_Statistics{} + if r.Intn(10) != 0 { + this.Device = NewPopulatedDevice_Number(r, easy) + } + if r.Intn(10) != 0 { + v428 := r.Intn(5) + this.IOServiced = make([]CgroupInfo_Blkio_Value, v428) + for i := 0; i < v428; i++ { + v429 := NewPopulatedCgroupInfo_Blkio_Value(r, easy) + this.IOServiced[i] = *v429 + } + } + if r.Intn(10) != 0 { + v430 := r.Intn(5) + this.IOServiceBytes = make([]CgroupInfo_Blkio_Value, v430) + for i := 0; i < v430; i++ { + v431 := NewPopulatedCgroupInfo_Blkio_Value(r, easy) + this.IOServiceBytes[i] = *v431 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCgroupInfo_Blkio_Statistics(r randyMesos, easy bool) *CgroupInfo_Blkio_Statistics { + this := &CgroupInfo_Blkio_Statistics{} + if r.Intn(10) != 0 { + v432 := r.Intn(5) + this.CFQ = make([]CgroupInfo_Blkio_CFQ_Statistics, v432) + for i := 0; i < v432; i++ { + v433 := NewPopulatedCgroupInfo_Blkio_CFQ_Statistics(r, easy) + this.CFQ[i] = *v433 + } + } + if r.Intn(10) != 0 { + v434 := r.Intn(5) + this.CFQRecursive = make([]CgroupInfo_Blkio_CFQ_Statistics, v434) + for i := 0; i < v434; i++ { + v435 := NewPopulatedCgroupInfo_Blkio_CFQ_Statistics(r, easy) + this.CFQRecursive[i] = *v435 + } + } + if r.Intn(10) != 0 { + v436 := r.Intn(5) + this.Throttling = make([]*CgroupInfo_Blkio_Throttling_Statistics, v436) + for i := 0; i < v436; i++ { + this.Throttling[i] = NewPopulatedCgroupInfo_Blkio_Throttling_Statistics(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedCgroupInfo_NetCls(r randyMesos, easy bool) *CgroupInfo_NetCls { + this := &CgroupInfo_NetCls{} + if r.Intn(10) != 0 { + v437 := uint32(r.Uint32()) + this.ClassID = &v437 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedLabels(r randyMesos, easy bool) *Labels { + this := &Labels{} + if r.Intn(10) != 0 { + v438 := r.Intn(5) + this.Labels = make([]Label, v438) + for i := 0; i < v438; i++ { + v439 := NewPopulatedLabel(r, easy) + this.Labels[i] = *v439 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedLabel(r randyMesos, easy bool) *Label { + this := &Label{} + this.Key = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v440 := string(randStringMesos(r)) + this.Value = &v440 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedPort(r randyMesos, easy bool) *Port { + this := &Port{} + this.Number = uint32(r.Uint32()) + if r.Intn(10) != 0 { + v441 := string(randStringMesos(r)) + this.Name = &v441 + } + if r.Intn(10) != 0 { + v442 := string(randStringMesos(r)) + this.Protocol = &v442 + } + if r.Intn(10) != 0 { + v443 := DiscoveryInfo_Visibility([]int32{0, 1, 2}[r.Intn(3)]) + this.Visibility = &v443 + } + if r.Intn(10) != 0 { + this.Labels = NewPopulatedLabels(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedPorts(r randyMesos, easy bool) *Ports { + this := &Ports{} + if r.Intn(10) != 0 { + v444 := r.Intn(5) + this.Ports = make([]Port, v444) + for i := 0; i < v444; i++ { + v445 := NewPopulatedPort(r, easy) + this.Ports[i] = *v445 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedDiscoveryInfo(r randyMesos, easy bool) *DiscoveryInfo { + this := &DiscoveryInfo{} + this.Visibility = DiscoveryInfo_Visibility([]int32{0, 1, 2}[r.Intn(3)]) + if r.Intn(10) != 0 { + v446 := string(randStringMesos(r)) + this.Name = &v446 + } + if r.Intn(10) != 0 { + v447 := string(randStringMesos(r)) + this.Environment = &v447 + } + if r.Intn(10) != 0 { + v448 := string(randStringMesos(r)) + this.Location = &v448 + } + if r.Intn(10) != 0 { + v449 := string(randStringMesos(r)) + this.Version = &v449 + } + if r.Intn(10) != 0 { + this.Ports = NewPopulatedPorts(r, easy) + } + if r.Intn(10) != 0 { + this.Labels = NewPopulatedLabels(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedWeightInfo(r randyMesos, easy bool) *WeightInfo { + this := &WeightInfo{} + this.Weight = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Weight *= -1 + } + if r.Intn(10) != 0 { + v450 := string(randStringMesos(r)) + this.Role = &v450 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedVersionInfo(r randyMesos, easy bool) *VersionInfo { + this := &VersionInfo{} + this.Version = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v451 := string(randStringMesos(r)) + this.BuildDate = &v451 + } + if r.Intn(10) != 0 { + v452 := float64(r.Float64()) + if r.Intn(2) == 0 { + v452 *= -1 + } + this.BuildTime = &v452 + } + if r.Intn(10) != 0 { + v453 := string(randStringMesos(r)) + this.BuildUser = &v453 + } + if r.Intn(10) != 0 { + v454 := string(randStringMesos(r)) + this.GitSHA = &v454 + } + if r.Intn(10) != 0 { + v455 := string(randStringMesos(r)) + this.GitBranch = &v455 + } + if r.Intn(10) != 0 { + v456 := string(randStringMesos(r)) + this.GitTag = &v456 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedFlag(r randyMesos, easy bool) *Flag { + this := &Flag{} + this.Name = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v457 := string(randStringMesos(r)) + this.Value = &v457 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRole(r randyMesos, easy bool) *Role { + this := &Role{} + this.Name = string(randStringMesos(r)) + this.Weight = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Weight *= -1 + } + if r.Intn(10) != 0 { + v458 := r.Intn(5) + this.Frameworks = make([]FrameworkID, v458) + for i := 0; i < v458; i++ { + v459 := NewPopulatedFrameworkID(r, easy) + this.Frameworks[i] = *v459 + } + } + if r.Intn(10) != 0 { + v460 := r.Intn(5) + this.Resources = make([]Resource, v460) + for i := 0; i < v460; i++ { + v461 := NewPopulatedResource(r, easy) + this.Resources[i] = *v461 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedMetric(r randyMesos, easy bool) *Metric { + this := &Metric{} + this.Name = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v462 := float64(r.Float64()) + if r.Intn(2) == 0 { + v462 *= -1 + } + this.Value = &v462 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedFileInfo(r randyMesos, easy bool) *FileInfo { + this := &FileInfo{} + this.Path = string(randStringMesos(r)) + if r.Intn(10) != 0 { + v463 := int32(r.Int31()) + if r.Intn(2) == 0 { + v463 *= -1 + } + this.Nlink = &v463 + } + if r.Intn(10) != 0 { + v464 := uint64(uint64(r.Uint32())) + this.Size = &v464 + } + if r.Intn(10) != 0 { + this.Mtime = NewPopulatedTimeInfo(r, easy) + } + if r.Intn(10) != 0 { + v465 := uint32(r.Uint32()) + this.Mode = &v465 + } + if r.Intn(10) != 0 { + v466 := string(randStringMesos(r)) + this.UID = &v466 + } + if r.Intn(10) != 0 { + v467 := string(randStringMesos(r)) + this.GID = &v467 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedDevice(r randyMesos, easy bool) *Device { + this := &Device{} + if r.Intn(10) != 0 { + v468 := string(randStringMesos(r)) + this.Path = &v468 + } + if r.Intn(10) != 0 { + this.Number = NewPopulatedDevice_Number(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedDevice_Number(r randyMesos, easy bool) *Device_Number { + this := &Device_Number{} + v469 := uint64(uint64(r.Uint32())) + this.MajorNumber = &v469 + v470 := uint64(uint64(r.Uint32())) + this.MinorNumber = &v470 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedDeviceAccess(r randyMesos, easy bool) *DeviceAccess { + this := &DeviceAccess{} + v471 := NewPopulatedDevice(r, easy) + this.Device = *v471 + v472 := NewPopulatedDeviceAccess_Access(r, easy) + this.Access = *v472 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedDeviceAccess_Access(r randyMesos, easy bool) *DeviceAccess_Access { + this := &DeviceAccess_Access{} + if r.Intn(10) != 0 { + v473 := bool(bool(r.Intn(2) == 0)) + this.Read = &v473 + } + if r.Intn(10) != 0 { + v474 := bool(bool(r.Intn(2) == 0)) + this.Write = &v474 + } + if r.Intn(10) != 0 { + v475 := bool(bool(r.Intn(2) == 0)) + this.Mknod = &v475 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedDeviceWhitelist(r randyMesos, easy bool) *DeviceWhitelist { + this := &DeviceWhitelist{} + if r.Intn(10) != 0 { + v476 := r.Intn(5) + this.AllowedDevices = make([]DeviceAccess, v476) + for i := 0; i < v476; i++ { + v477 := NewPopulatedDeviceAccess(r, easy) + this.AllowedDevices[i] = *v477 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyMesos interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneMesos(r randyMesos) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringMesos(r randyMesos) string { + v478 := r.Intn(100) + tmps := make([]rune, v478) + for i := 0; i < v478; i++ { + tmps[i] = randUTF8RuneMesos(r) + } + return string(tmps) +} +func randUnrecognizedMesos(r randyMesos, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldMesos(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldMesos(dAtA []byte, r randyMesos, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateMesos(dAtA, uint64(key)) + v479 := r.Int63() + if r.Intn(2) == 0 { + v479 *= -1 + } + dAtA = encodeVarintPopulateMesos(dAtA, uint64(v479)) + case 1: + dAtA = encodeVarintPopulateMesos(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateMesos(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateMesos(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateMesos(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateMesos(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *FrameworkID) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *OfferID) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *AgentID) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *TaskID) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *ExecutorID) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *ContainerID) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovMesos(uint64(l)) + if m.Parent != nil { + l = m.Parent.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *ResourceProviderID) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *OperationID) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *TimeInfo) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Nanoseconds)) + return n +} + +func (m *DurationInfo) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Nanoseconds)) + return n +} + +func (m *Address) ProtoSize() (n int) { + var l int + _ = l + if m.Hostname != nil { + l = len(*m.Hostname) + n += 1 + l + sovMesos(uint64(l)) + } + if m.IP != nil { + l = len(*m.IP) + n += 1 + l + sovMesos(uint64(l)) + } + n += 1 + sovMesos(uint64(m.Port)) + return n +} + +func (m *URL) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Scheme) + n += 1 + l + sovMesos(uint64(l)) + l = m.Address.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovMesos(uint64(l)) + } + if len(m.Query) > 0 { + for _, e := range m.Query { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.Fragment != nil { + l = len(*m.Fragment) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Unavailability) ProtoSize() (n int) { + var l int + _ = l + l = m.Start.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + if m.Duration != nil { + l = m.Duration.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *MachineID) ProtoSize() (n int) { + var l int + _ = l + if m.Hostname != nil { + l = len(*m.Hostname) + n += 1 + l + sovMesos(uint64(l)) + } + if m.IP != nil { + l = len(*m.IP) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *MachineInfo) ProtoSize() (n int) { + var l int + _ = l + l = m.ID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + if m.Mode != nil { + n += 1 + sovMesos(uint64(*m.Mode)) + } + if m.Unavailability != nil { + l = m.Unavailability.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *FrameworkInfo) ProtoSize() (n int) { + var l int + _ = l + l = len(m.User) + n += 1 + l + sovMesos(uint64(l)) + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + if m.ID != nil { + l = m.ID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.FailoverTimeout != nil { + n += 9 + } + if m.Checkpoint != nil { + n += 2 + } + if m.Role != nil { + l = len(*m.Role) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Hostname != nil { + l = len(*m.Hostname) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Principal != nil { + l = len(*m.Principal) + n += 1 + l + sovMesos(uint64(l)) + } + if m.WebUiURL != nil { + l = len(*m.WebUiURL) + n += 1 + l + sovMesos(uint64(l)) + } + if len(m.Capabilities) > 0 { + for _, e := range m.Capabilities { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.Labels != nil { + l = m.Labels.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *FrameworkInfo_Capability) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Type)) + return n +} + +func (m *CheckInfo) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Type)) + if m.Command != nil { + l = m.Command.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.HTTP != nil { + l = m.HTTP.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.DelaySeconds != nil { + n += 9 + } + if m.IntervalSeconds != nil { + n += 9 + } + if m.TimeoutSeconds != nil { + n += 9 + } + if m.TCP != nil { + l = m.TCP.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *CheckInfo_Command) ProtoSize() (n int) { + var l int + _ = l + l = m.Command.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *CheckInfo_Http) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Port)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *CheckInfo_Tcp) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Port)) + return n +} + +func (m *HealthCheck) ProtoSize() (n int) { + var l int + _ = l + if m.HTTP != nil { + l = m.HTTP.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.DelaySeconds != nil { + n += 9 + } + if m.IntervalSeconds != nil { + n += 9 + } + if m.TimeoutSeconds != nil { + n += 9 + } + if m.ConsecutiveFailures != nil { + n += 1 + sovMesos(uint64(*m.ConsecutiveFailures)) + } + if m.GracePeriodSeconds != nil { + n += 9 + } + if m.Command != nil { + l = m.Command.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + n += 1 + sovMesos(uint64(m.Type)) + if m.TCP != nil { + l = m.TCP.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *HealthCheck_HTTPCheckInfo) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Port)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Scheme != nil { + l = len(*m.Scheme) + n += 1 + l + sovMesos(uint64(l)) + } + if len(m.Statuses) > 0 { + for _, e := range m.Statuses { + n += 1 + sovMesos(uint64(e)) + } + } + if m.Protocol != nil { + n += 1 + sovMesos(uint64(*m.Protocol)) + } + return n +} + +func (m *HealthCheck_TCPCheckInfo) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Port)) + if m.Protocol != nil { + n += 1 + sovMesos(uint64(*m.Protocol)) + } + return n +} + +func (m *KillPolicy) ProtoSize() (n int) { + var l int + _ = l + if m.GracePeriod != nil { + l = m.GracePeriod.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *CommandInfo) ProtoSize() (n int) { + var l int + _ = l + if len(m.URIs) > 0 { + for _, e := range m.URIs { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.Environment != nil { + l = m.Environment.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovMesos(uint64(l)) + } + if m.User != nil { + l = len(*m.User) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Shell != nil { + n += 2 + } + if len(m.Arguments) > 0 { + for _, s := range m.Arguments { + l = len(s) + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *CommandInfo_URI) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovMesos(uint64(l)) + if m.Executable != nil { + n += 2 + } + if m.Extract != nil { + n += 2 + } + if m.Cache != nil { + n += 2 + } + if m.OutputFile != nil { + l = len(*m.OutputFile) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *ExecutorInfo) ProtoSize() (n int) { + var l int + _ = l + l = m.ExecutorID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovMesos(uint64(l)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.Command != nil { + l = m.Command.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.FrameworkID != nil { + l = m.FrameworkID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Source != nil { + l = len(*m.Source) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Container != nil { + l = m.Container.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Discovery != nil { + l = m.Discovery.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.ShutdownGracePeriod != nil { + l = m.ShutdownGracePeriod.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Labels != nil { + l = m.Labels.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + n += 1 + sovMesos(uint64(m.Type)) + return n +} + +func (m *DomainInfo) ProtoSize() (n int) { + var l int + _ = l + if m.FaultDomain != nil { + l = m.FaultDomain.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *DomainInfo_FaultDomain) ProtoSize() (n int) { + var l int + _ = l + l = m.Region.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + l = m.Zone.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *DomainInfo_FaultDomain_RegionInfo) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *DomainInfo_FaultDomain_ZoneInfo) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *MasterInfo) ProtoSize() (n int) { + var l int + _ = l + l = len(m.ID) + n += 1 + l + sovMesos(uint64(l)) + n += 1 + sovMesos(uint64(m.IP)) + if m.Port != nil { + n += 1 + sovMesos(uint64(*m.Port)) + } + if m.PID != nil { + l = len(*m.PID) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Hostname != nil { + l = len(*m.Hostname) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Version != nil { + l = len(*m.Version) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Address != nil { + l = m.Address.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Domain != nil { + l = m.Domain.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if len(m.Capabilities) > 0 { + for _, e := range m.Capabilities { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *MasterInfo_Capability) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Type)) + return n +} + +func (m *AgentInfo) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Hostname) + n += 1 + l + sovMesos(uint64(l)) + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.ID != nil { + l = m.ID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Port != nil { + n += 1 + sovMesos(uint64(*m.Port)) + } + if m.Domain != nil { + l = m.Domain.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *AgentInfo_Capability) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Type)) + return n +} + +func (m *CSIPluginContainerInfo) ProtoSize() (n int) { + var l int + _ = l + if len(m.Services) > 0 { + for _, e := range m.Services { + n += 1 + sovMesos(uint64(e)) + } + } + if m.Command != nil { + l = m.Command.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.Container != nil { + l = m.Container.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *CSIPluginInfo) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovMesos(uint64(l)) + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *ResourceProviderInfo) ProtoSize() (n int) { + var l int + _ = l + if m.ID != nil { + l = m.ID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + l = len(m.Type) + n += 1 + l + sovMesos(uint64(l)) + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + if len(m.DefaultReservations) > 0 { + for _, e := range m.DefaultReservations { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.Storage != nil { + l = m.Storage.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *ResourceProviderInfo_Storage) ProtoSize() (n int) { + var l int + _ = l + l = m.Plugin.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *Value) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Type)) + if m.Scalar != nil { + l = m.Scalar.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Ranges != nil { + l = m.Ranges.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Set != nil { + l = m.Set.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Text != nil { + l = m.Text.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Value_Scalar) ProtoSize() (n int) { + var l int + _ = l + n += 9 + return n +} + +func (m *Value_Range) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Begin)) + n += 1 + sovMesos(uint64(m.End)) + return n +} + +func (m *Value_Ranges) ProtoSize() (n int) { + var l int + _ = l + if len(m.Range) > 0 { + for _, e := range m.Range { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *Value_Set) ProtoSize() (n int) { + var l int + _ = l + if len(m.Item) > 0 { + for _, s := range m.Item { + l = len(s) + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *Value_Text) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *Attribute) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + n += 1 + sovMesos(uint64(m.Type)) + if m.Scalar != nil { + l = m.Scalar.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Ranges != nil { + l = m.Ranges.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Text != nil { + l = m.Text.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Set != nil { + l = m.Set.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Resource) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + if m.Type != nil { + n += 1 + sovMesos(uint64(*m.Type)) + } + if m.Scalar != nil { + l = m.Scalar.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Ranges != nil { + l = m.Ranges.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Set != nil { + l = m.Set.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Role != nil { + l = len(*m.Role) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Disk != nil { + l = m.Disk.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Reservation != nil { + l = m.Reservation.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Revocable != nil { + l = m.Revocable.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Shared != nil { + l = m.Shared.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.AllocationInfo != nil { + l = m.AllocationInfo.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.ProviderID != nil { + l = m.ProviderID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if len(m.Reservations) > 0 { + for _, e := range m.Reservations { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *Resource_AllocationInfo) ProtoSize() (n int) { + var l int + _ = l + if m.Role != nil { + l = len(*m.Role) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Resource_ReservationInfo) ProtoSize() (n int) { + var l int + _ = l + if m.Principal != nil { + l = len(*m.Principal) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Labels != nil { + l = m.Labels.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Role != nil { + l = len(*m.Role) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Type != nil { + n += 1 + sovMesos(uint64(*m.Type)) + } + return n +} + +func (m *Resource_DiskInfo) ProtoSize() (n int) { + var l int + _ = l + if m.Persistence != nil { + l = m.Persistence.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Volume != nil { + l = m.Volume.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Source != nil { + l = m.Source.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Resource_DiskInfo_Persistence) ProtoSize() (n int) { + var l int + _ = l + l = len(m.ID) + n += 1 + l + sovMesos(uint64(l)) + if m.Principal != nil { + l = len(*m.Principal) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Resource_DiskInfo_Source) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Type)) + if m.Path != nil { + l = m.Path.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Mount != nil { + l = m.Mount.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.ID != nil { + l = len(*m.ID) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Metadata != nil { + l = m.Metadata.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Profile != nil { + l = len(*m.Profile) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Resource_DiskInfo_Source_Path) ProtoSize() (n int) { + var l int + _ = l + if m.Root != nil { + l = len(*m.Root) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Resource_DiskInfo_Source_Mount) ProtoSize() (n int) { + var l int + _ = l + if m.Root != nil { + l = len(*m.Root) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Resource_RevocableInfo) ProtoSize() (n int) { + var l int + _ = l + return n +} + +func (m *Resource_SharedInfo) ProtoSize() (n int) { + var l int + _ = l + return n +} + +func (m *TrafficControlStatistics) ProtoSize() (n int) { + var l int + _ = l + l = len(m.ID) + n += 1 + l + sovMesos(uint64(l)) + if m.Backlog != nil { + n += 1 + sovMesos(uint64(*m.Backlog)) + } + if m.Bytes != nil { + n += 1 + sovMesos(uint64(*m.Bytes)) + } + if m.Drops != nil { + n += 1 + sovMesos(uint64(*m.Drops)) + } + if m.Overlimits != nil { + n += 1 + sovMesos(uint64(*m.Overlimits)) + } + if m.Packets != nil { + n += 1 + sovMesos(uint64(*m.Packets)) + } + if m.Qlen != nil { + n += 1 + sovMesos(uint64(*m.Qlen)) + } + if m.RateBPS != nil { + n += 1 + sovMesos(uint64(*m.RateBPS)) + } + if m.RatePPS != nil { + n += 1 + sovMesos(uint64(*m.RatePPS)) + } + if m.Requeues != nil { + n += 1 + sovMesos(uint64(*m.Requeues)) + } + return n +} + +func (m *IpStatistics) ProtoSize() (n int) { + var l int + _ = l + if m.Forwarding != nil { + n += 1 + sovMesos(uint64(*m.Forwarding)) + } + if m.DefaultTTL != nil { + n += 1 + sovMesos(uint64(*m.DefaultTTL)) + } + if m.InReceives != nil { + n += 1 + sovMesos(uint64(*m.InReceives)) + } + if m.InHdrErrors != nil { + n += 1 + sovMesos(uint64(*m.InHdrErrors)) + } + if m.InAddrErrors != nil { + n += 1 + sovMesos(uint64(*m.InAddrErrors)) + } + if m.ForwDatagrams != nil { + n += 1 + sovMesos(uint64(*m.ForwDatagrams)) + } + if m.InUnknownProtos != nil { + n += 1 + sovMesos(uint64(*m.InUnknownProtos)) + } + if m.InDiscards != nil { + n += 1 + sovMesos(uint64(*m.InDiscards)) + } + if m.InDelivers != nil { + n += 1 + sovMesos(uint64(*m.InDelivers)) + } + if m.OutRequests != nil { + n += 1 + sovMesos(uint64(*m.OutRequests)) + } + if m.OutDiscards != nil { + n += 1 + sovMesos(uint64(*m.OutDiscards)) + } + if m.OutNoRoutes != nil { + n += 1 + sovMesos(uint64(*m.OutNoRoutes)) + } + if m.ReasmTimeout != nil { + n += 1 + sovMesos(uint64(*m.ReasmTimeout)) + } + if m.ReasmReqds != nil { + n += 1 + sovMesos(uint64(*m.ReasmReqds)) + } + if m.ReasmOKs != nil { + n += 1 + sovMesos(uint64(*m.ReasmOKs)) + } + if m.ReasmFails != nil { + n += 2 + sovMesos(uint64(*m.ReasmFails)) + } + if m.FragOKs != nil { + n += 2 + sovMesos(uint64(*m.FragOKs)) + } + if m.FragFails != nil { + n += 2 + sovMesos(uint64(*m.FragFails)) + } + if m.FragCreates != nil { + n += 2 + sovMesos(uint64(*m.FragCreates)) + } + return n +} + +func (m *IcmpStatistics) ProtoSize() (n int) { + var l int + _ = l + if m.InMsgs != nil { + n += 1 + sovMesos(uint64(*m.InMsgs)) + } + if m.InErrors != nil { + n += 1 + sovMesos(uint64(*m.InErrors)) + } + if m.InCsumErrors != nil { + n += 1 + sovMesos(uint64(*m.InCsumErrors)) + } + if m.InDestUnreachs != nil { + n += 1 + sovMesos(uint64(*m.InDestUnreachs)) + } + if m.InTimeExcds != nil { + n += 1 + sovMesos(uint64(*m.InTimeExcds)) + } + if m.InParmProbs != nil { + n += 1 + sovMesos(uint64(*m.InParmProbs)) + } + if m.InSrcQuenchs != nil { + n += 1 + sovMesos(uint64(*m.InSrcQuenchs)) + } + if m.InRedirects != nil { + n += 1 + sovMesos(uint64(*m.InRedirects)) + } + if m.InEchos != nil { + n += 1 + sovMesos(uint64(*m.InEchos)) + } + if m.InEchoReps != nil { + n += 1 + sovMesos(uint64(*m.InEchoReps)) + } + if m.InTimestamps != nil { + n += 1 + sovMesos(uint64(*m.InTimestamps)) + } + if m.InTimestampReps != nil { + n += 1 + sovMesos(uint64(*m.InTimestampReps)) + } + if m.InAddrMasks != nil { + n += 1 + sovMesos(uint64(*m.InAddrMasks)) + } + if m.InAddrMaskReps != nil { + n += 1 + sovMesos(uint64(*m.InAddrMaskReps)) + } + if m.OutMsgs != nil { + n += 1 + sovMesos(uint64(*m.OutMsgs)) + } + if m.OutErrors != nil { + n += 2 + sovMesos(uint64(*m.OutErrors)) + } + if m.OutDestUnreachs != nil { + n += 2 + sovMesos(uint64(*m.OutDestUnreachs)) + } + if m.OutTimeExcds != nil { + n += 2 + sovMesos(uint64(*m.OutTimeExcds)) + } + if m.OutParmProbs != nil { + n += 2 + sovMesos(uint64(*m.OutParmProbs)) + } + if m.OutSrcQuenchs != nil { + n += 2 + sovMesos(uint64(*m.OutSrcQuenchs)) + } + if m.OutRedirects != nil { + n += 2 + sovMesos(uint64(*m.OutRedirects)) + } + if m.OutEchos != nil { + n += 2 + sovMesos(uint64(*m.OutEchos)) + } + if m.OutEchoReps != nil { + n += 2 + sovMesos(uint64(*m.OutEchoReps)) + } + if m.OutTimestamps != nil { + n += 2 + sovMesos(uint64(*m.OutTimestamps)) + } + if m.OutTimestampReps != nil { + n += 2 + sovMesos(uint64(*m.OutTimestampReps)) + } + if m.OutAddrMasks != nil { + n += 2 + sovMesos(uint64(*m.OutAddrMasks)) + } + if m.OutAddrMaskReps != nil { + n += 2 + sovMesos(uint64(*m.OutAddrMaskReps)) + } + return n +} + +func (m *TcpStatistics) ProtoSize() (n int) { + var l int + _ = l + if m.RtoAlgorithm != nil { + n += 1 + sovMesos(uint64(*m.RtoAlgorithm)) + } + if m.RtoMin != nil { + n += 1 + sovMesos(uint64(*m.RtoMin)) + } + if m.RtoMax != nil { + n += 1 + sovMesos(uint64(*m.RtoMax)) + } + if m.MaxConn != nil { + n += 1 + sovMesos(uint64(*m.MaxConn)) + } + if m.ActiveOpens != nil { + n += 1 + sovMesos(uint64(*m.ActiveOpens)) + } + if m.PassiveOpens != nil { + n += 1 + sovMesos(uint64(*m.PassiveOpens)) + } + if m.AttemptFails != nil { + n += 1 + sovMesos(uint64(*m.AttemptFails)) + } + if m.EstabResets != nil { + n += 1 + sovMesos(uint64(*m.EstabResets)) + } + if m.CurrEstab != nil { + n += 1 + sovMesos(uint64(*m.CurrEstab)) + } + if m.InSegs != nil { + n += 1 + sovMesos(uint64(*m.InSegs)) + } + if m.OutSegs != nil { + n += 1 + sovMesos(uint64(*m.OutSegs)) + } + if m.RetransSegs != nil { + n += 1 + sovMesos(uint64(*m.RetransSegs)) + } + if m.InErrs != nil { + n += 1 + sovMesos(uint64(*m.InErrs)) + } + if m.OutRsts != nil { + n += 1 + sovMesos(uint64(*m.OutRsts)) + } + if m.InCsumErrors != nil { + n += 1 + sovMesos(uint64(*m.InCsumErrors)) + } + return n +} + +func (m *UdpStatistics) ProtoSize() (n int) { + var l int + _ = l + if m.InDatagrams != nil { + n += 1 + sovMesos(uint64(*m.InDatagrams)) + } + if m.NoPorts != nil { + n += 1 + sovMesos(uint64(*m.NoPorts)) + } + if m.InErrors != nil { + n += 1 + sovMesos(uint64(*m.InErrors)) + } + if m.OutDatagrams != nil { + n += 1 + sovMesos(uint64(*m.OutDatagrams)) + } + if m.RcvbufErrors != nil { + n += 1 + sovMesos(uint64(*m.RcvbufErrors)) + } + if m.SndbufErrors != nil { + n += 1 + sovMesos(uint64(*m.SndbufErrors)) + } + if m.InCsumErrors != nil { + n += 1 + sovMesos(uint64(*m.InCsumErrors)) + } + if m.IgnoredMulti != nil { + n += 1 + sovMesos(uint64(*m.IgnoredMulti)) + } + return n +} + +func (m *SNMPStatistics) ProtoSize() (n int) { + var l int + _ = l + if m.IPStats != nil { + l = m.IPStats.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.ICMPStats != nil { + l = m.ICMPStats.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.TCPStats != nil { + l = m.TCPStats.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.UDPStats != nil { + l = m.UDPStats.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *DiskStatistics) ProtoSize() (n int) { + var l int + _ = l + if m.Source != nil { + l = m.Source.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Persistence != nil { + l = m.Persistence.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.LimitBytes != nil { + n += 1 + sovMesos(uint64(*m.LimitBytes)) + } + if m.UsedBytes != nil { + n += 1 + sovMesos(uint64(*m.UsedBytes)) + } + return n +} + +func (m *ResourceStatistics) ProtoSize() (n int) { + var l int + _ = l + n += 9 + if m.CPUsUserTimeSecs != nil { + n += 9 + } + if m.CPUsSystemTimeSecs != nil { + n += 9 + } + if m.CPUsLimit != nil { + n += 9 + } + if m.MemRSSBytes != nil { + n += 1 + sovMesos(uint64(*m.MemRSSBytes)) + } + if m.MemLimitBytes != nil { + n += 1 + sovMesos(uint64(*m.MemLimitBytes)) + } + if m.CPUsNrPeriods != nil { + n += 1 + sovMesos(uint64(*m.CPUsNrPeriods)) + } + if m.CPUsNrThrottled != nil { + n += 1 + sovMesos(uint64(*m.CPUsNrThrottled)) + } + if m.CPUsThrottledTimeSecs != nil { + n += 9 + } + if m.MemFileBytes != nil { + n += 1 + sovMesos(uint64(*m.MemFileBytes)) + } + if m.MemAnonBytes != nil { + n += 1 + sovMesos(uint64(*m.MemAnonBytes)) + } + if m.MemMappedFileBytes != nil { + n += 1 + sovMesos(uint64(*m.MemMappedFileBytes)) + } + if m.Perf != nil { + l = m.Perf.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.NetRxPackets != nil { + n += 1 + sovMesos(uint64(*m.NetRxPackets)) + } + if m.NetRxBytes != nil { + n += 1 + sovMesos(uint64(*m.NetRxBytes)) + } + if m.NetRxErrors != nil { + n += 2 + sovMesos(uint64(*m.NetRxErrors)) + } + if m.NetRxDropped != nil { + n += 2 + sovMesos(uint64(*m.NetRxDropped)) + } + if m.NetTxPackets != nil { + n += 2 + sovMesos(uint64(*m.NetTxPackets)) + } + if m.NetTxBytes != nil { + n += 2 + sovMesos(uint64(*m.NetTxBytes)) + } + if m.NetTxErrors != nil { + n += 2 + sovMesos(uint64(*m.NetTxErrors)) + } + if m.NetTxDropped != nil { + n += 2 + sovMesos(uint64(*m.NetTxDropped)) + } + if m.NetTCPRttMicrosecsP50 != nil { + n += 10 + } + if m.NetTCPRttMicrosecsP90 != nil { + n += 10 + } + if m.NetTCPRttMicrosecsP95 != nil { + n += 10 + } + if m.NetTCPRttMicrosecsP99 != nil { + n += 10 + } + if m.DiskLimitBytes != nil { + n += 2 + sovMesos(uint64(*m.DiskLimitBytes)) + } + if m.DiskUsedBytes != nil { + n += 2 + sovMesos(uint64(*m.DiskUsedBytes)) + } + if m.NetTCPActiveConnections != nil { + n += 10 + } + if m.NetTCPTimeWaitConnections != nil { + n += 10 + } + if m.Processes != nil { + n += 2 + sovMesos(uint64(*m.Processes)) + } + if m.Threads != nil { + n += 2 + sovMesos(uint64(*m.Threads)) + } + if m.MemLowPressureCounter != nil { + n += 2 + sovMesos(uint64(*m.MemLowPressureCounter)) + } + if m.MemMediumPressureCounter != nil { + n += 2 + sovMesos(uint64(*m.MemMediumPressureCounter)) + } + if m.MemCriticalPressureCounter != nil { + n += 2 + sovMesos(uint64(*m.MemCriticalPressureCounter)) + } + if len(m.NetTrafficControlStatistics) > 0 { + for _, e := range m.NetTrafficControlStatistics { + l = e.ProtoSize() + n += 2 + l + sovMesos(uint64(l)) + } + } + if m.MemTotalBytes != nil { + n += 2 + sovMesos(uint64(*m.MemTotalBytes)) + } + if m.MemTotalMemswBytes != nil { + n += 2 + sovMesos(uint64(*m.MemTotalMemswBytes)) + } + if m.MemSoftLimitBytes != nil { + n += 2 + sovMesos(uint64(*m.MemSoftLimitBytes)) + } + if m.MemCacheBytes != nil { + n += 2 + sovMesos(uint64(*m.MemCacheBytes)) + } + if m.MemSwapBytes != nil { + n += 2 + sovMesos(uint64(*m.MemSwapBytes)) + } + if m.MemUnevictableBytes != nil { + n += 2 + sovMesos(uint64(*m.MemUnevictableBytes)) + } + if m.NetSNMPStatistics != nil { + l = m.NetSNMPStatistics.ProtoSize() + n += 2 + l + sovMesos(uint64(l)) + } + if len(m.DiskStatistics) > 0 { + for _, e := range m.DiskStatistics { + l = e.ProtoSize() + n += 2 + l + sovMesos(uint64(l)) + } + } + if m.BlkioStatistics != nil { + l = m.BlkioStatistics.ProtoSize() + n += 2 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *ResourceUsage) ProtoSize() (n int) { + var l int + _ = l + if len(m.Executors) > 0 { + for _, e := range m.Executors { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if len(m.Total) > 0 { + for _, e := range m.Total { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *ResourceUsage_Executor) ProtoSize() (n int) { + var l int + _ = l + l = m.ExecutorInfo.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + if len(m.Allocated) > 0 { + for _, e := range m.Allocated { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.Statistics != nil { + l = m.Statistics.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + l = m.ContainerID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *ResourceUsage_Executor_Task) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + l = m.ID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.Labels != nil { + l = m.Labels.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *PerfStatistics) ProtoSize() (n int) { + var l int + _ = l + n += 9 + n += 9 + if m.Cycles != nil { + n += 1 + sovMesos(uint64(*m.Cycles)) + } + if m.StalledCyclesFrontend != nil { + n += 1 + sovMesos(uint64(*m.StalledCyclesFrontend)) + } + if m.StalledCyclesBackend != nil { + n += 1 + sovMesos(uint64(*m.StalledCyclesBackend)) + } + if m.Instructions != nil { + n += 1 + sovMesos(uint64(*m.Instructions)) + } + if m.CacheReferences != nil { + n += 1 + sovMesos(uint64(*m.CacheReferences)) + } + if m.CacheMisses != nil { + n += 1 + sovMesos(uint64(*m.CacheMisses)) + } + if m.Branches != nil { + n += 1 + sovMesos(uint64(*m.Branches)) + } + if m.BranchMisses != nil { + n += 1 + sovMesos(uint64(*m.BranchMisses)) + } + if m.BusCycles != nil { + n += 1 + sovMesos(uint64(*m.BusCycles)) + } + if m.RefCycles != nil { + n += 1 + sovMesos(uint64(*m.RefCycles)) + } + if m.CPUClock != nil { + n += 9 + } + if m.TaskClock != nil { + n += 9 + } + if m.PageFaults != nil { + n += 1 + sovMesos(uint64(*m.PageFaults)) + } + if m.MinorFaults != nil { + n += 2 + sovMesos(uint64(*m.MinorFaults)) + } + if m.MajorFaults != nil { + n += 2 + sovMesos(uint64(*m.MajorFaults)) + } + if m.ContextSwitches != nil { + n += 2 + sovMesos(uint64(*m.ContextSwitches)) + } + if m.CPUMigrations != nil { + n += 2 + sovMesos(uint64(*m.CPUMigrations)) + } + if m.AlignmentFaults != nil { + n += 2 + sovMesos(uint64(*m.AlignmentFaults)) + } + if m.EmulationFaults != nil { + n += 2 + sovMesos(uint64(*m.EmulationFaults)) + } + if m.L1DcacheLoads != nil { + n += 2 + sovMesos(uint64(*m.L1DcacheLoads)) + } + if m.L1DcacheLoadMisses != nil { + n += 2 + sovMesos(uint64(*m.L1DcacheLoadMisses)) + } + if m.L1DcacheStores != nil { + n += 2 + sovMesos(uint64(*m.L1DcacheStores)) + } + if m.L1DcacheStoreMisses != nil { + n += 2 + sovMesos(uint64(*m.L1DcacheStoreMisses)) + } + if m.L1DcachePrefetches != nil { + n += 2 + sovMesos(uint64(*m.L1DcachePrefetches)) + } + if m.L1DcachePrefetchMisses != nil { + n += 2 + sovMesos(uint64(*m.L1DcachePrefetchMisses)) + } + if m.L1IcacheLoads != nil { + n += 2 + sovMesos(uint64(*m.L1IcacheLoads)) + } + if m.L1IcacheLoadMisses != nil { + n += 2 + sovMesos(uint64(*m.L1IcacheLoadMisses)) + } + if m.L1IcachePrefetches != nil { + n += 2 + sovMesos(uint64(*m.L1IcachePrefetches)) + } + if m.L1IcachePrefetchMisses != nil { + n += 2 + sovMesos(uint64(*m.L1IcachePrefetchMisses)) + } + if m.LLCLoads != nil { + n += 2 + sovMesos(uint64(*m.LLCLoads)) + } + if m.LLCLoadMisses != nil { + n += 2 + sovMesos(uint64(*m.LLCLoadMisses)) + } + if m.LLCStores != nil { + n += 2 + sovMesos(uint64(*m.LLCStores)) + } + if m.LLCStoreMisses != nil { + n += 2 + sovMesos(uint64(*m.LLCStoreMisses)) + } + if m.LLCPrefetches != nil { + n += 2 + sovMesos(uint64(*m.LLCPrefetches)) + } + if m.LLCPrefetchMisses != nil { + n += 2 + sovMesos(uint64(*m.LLCPrefetchMisses)) + } + if m.DTLBLoads != nil { + n += 2 + sovMesos(uint64(*m.DTLBLoads)) + } + if m.DTLBLoadMisses != nil { + n += 2 + sovMesos(uint64(*m.DTLBLoadMisses)) + } + if m.DTLBStores != nil { + n += 2 + sovMesos(uint64(*m.DTLBStores)) + } + if m.DTLBStoreMisses != nil { + n += 2 + sovMesos(uint64(*m.DTLBStoreMisses)) + } + if m.DTLBPrefetches != nil { + n += 2 + sovMesos(uint64(*m.DTLBPrefetches)) + } + if m.DTLBPrefetchMisses != nil { + n += 2 + sovMesos(uint64(*m.DTLBPrefetchMisses)) + } + if m.ITLBLoads != nil { + n += 2 + sovMesos(uint64(*m.ITLBLoads)) + } + if m.ITLBLoadMisses != nil { + n += 2 + sovMesos(uint64(*m.ITLBLoadMisses)) + } + if m.BranchLoads != nil { + n += 2 + sovMesos(uint64(*m.BranchLoads)) + } + if m.BranchLoadMisses != nil { + n += 2 + sovMesos(uint64(*m.BranchLoadMisses)) + } + if m.NodeLoads != nil { + n += 2 + sovMesos(uint64(*m.NodeLoads)) + } + if m.NodeLoadMisses != nil { + n += 2 + sovMesos(uint64(*m.NodeLoadMisses)) + } + if m.NodeStores != nil { + n += 2 + sovMesos(uint64(*m.NodeStores)) + } + if m.NodeStoreMisses != nil { + n += 2 + sovMesos(uint64(*m.NodeStoreMisses)) + } + if m.NodePrefetches != nil { + n += 2 + sovMesos(uint64(*m.NodePrefetches)) + } + if m.NodePrefetchMisses != nil { + n += 2 + sovMesos(uint64(*m.NodePrefetchMisses)) + } + return n +} + +func (m *Request) ProtoSize() (n int) { + var l int + _ = l + if m.AgentID != nil { + l = m.AgentID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *Offer) ProtoSize() (n int) { + var l int + _ = l + l = m.ID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + l = m.FrameworkID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + l = m.AgentID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovMesos(uint64(l)) + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if len(m.ExecutorIDs) > 0 { + for _, e := range m.ExecutorIDs { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.URL != nil { + l = m.URL.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Unavailability != nil { + l = m.Unavailability.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.AllocationInfo != nil { + l = m.AllocationInfo.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Domain != nil { + l = m.Domain.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Offer_Operation) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Type)) + if m.Launch != nil { + l = m.Launch.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Reserve != nil { + l = m.Reserve.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Unreserve != nil { + l = m.Unreserve.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Create != nil { + l = m.Create.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Destroy != nil { + l = m.Destroy.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.LaunchGroup != nil { + l = m.LaunchGroup.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.ID != nil { + l = m.ID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.GrowVolume != nil { + l = m.GrowVolume.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.ShrinkVolume != nil { + l = m.ShrinkVolume.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.CreateDisk != nil { + l = m.CreateDisk.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.DestroyDisk != nil { + l = m.DestroyDisk.ProtoSize() + n += 2 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Offer_Operation_Launch) ProtoSize() (n int) { + var l int + _ = l + if len(m.TaskInfos) > 0 { + for _, e := range m.TaskInfos { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *Offer_Operation_LaunchGroup) ProtoSize() (n int) { + var l int + _ = l + l = m.Executor.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + l = m.TaskGroup.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *Offer_Operation_Reserve) ProtoSize() (n int) { + var l int + _ = l + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *Offer_Operation_Unreserve) ProtoSize() (n int) { + var l int + _ = l + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *Offer_Operation_Create) ProtoSize() (n int) { + var l int + _ = l + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *Offer_Operation_Destroy) ProtoSize() (n int) { + var l int + _ = l + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *Offer_Operation_GrowVolume) ProtoSize() (n int) { + var l int + _ = l + l = m.Volume.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + l = m.Addition.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *Offer_Operation_ShrinkVolume) ProtoSize() (n int) { + var l int + _ = l + l = m.Volume.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + l = m.Subtract.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *Offer_Operation_CreateDisk) ProtoSize() (n int) { + var l int + _ = l + l = m.Source.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + n += 1 + sovMesos(uint64(m.TargetType)) + return n +} + +func (m *Offer_Operation_DestroyDisk) ProtoSize() (n int) { + var l int + _ = l + l = m.Source.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *InverseOffer) ProtoSize() (n int) { + var l int + _ = l + l = m.OfferID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + if m.URL != nil { + l = m.URL.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + l = m.FrameworkID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + if m.AgentID != nil { + l = m.AgentID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + l = m.Unavailability.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *TaskInfo) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + l = m.TaskID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + l = m.AgentID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.Executor != nil { + l = m.Executor.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Command != nil { + l = m.Command.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.HealthCheck != nil { + l = m.HealthCheck.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Container != nil { + l = m.Container.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Labels != nil { + l = m.Labels.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Discovery != nil { + l = m.Discovery.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.KillPolicy != nil { + l = m.KillPolicy.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Check != nil { + l = m.Check.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.MaxCompletionTime != nil { + l = m.MaxCompletionTime.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *TaskGroupInfo) ProtoSize() (n int) { + var l int + _ = l + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *Task) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + l = m.TaskID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + l = m.FrameworkID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + if m.ExecutorID != nil { + l = m.ExecutorID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + l = m.AgentID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + if m.State != nil { + n += 1 + sovMesos(uint64(*m.State)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if len(m.Statuses) > 0 { + for _, e := range m.Statuses { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.StatusUpdateState != nil { + n += 1 + sovMesos(uint64(*m.StatusUpdateState)) + } + if m.StatusUpdateUUID != nil { + l = len(m.StatusUpdateUUID) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Labels != nil { + l = m.Labels.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Discovery != nil { + l = m.Discovery.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Container != nil { + l = m.Container.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.User != nil { + l = len(*m.User) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *TaskResourceLimitation) ProtoSize() (n int) { + var l int + _ = l + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *UUID) ProtoSize() (n int) { + var l int + _ = l + if m.Value != nil { + l = len(m.Value) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Operation) ProtoSize() (n int) { + var l int + _ = l + if m.FrameworkID != nil { + l = m.FrameworkID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.AgentID != nil { + l = m.AgentID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + l = m.Info.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + l = m.LatestStatus.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + if len(m.Statuses) > 0 { + for _, e := range m.Statuses { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + l = m.UUID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *OperationStatus) ProtoSize() (n int) { + var l int + _ = l + if m.OperationID != nil { + l = m.OperationID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + n += 1 + sovMesos(uint64(m.State)) + if m.Message != nil { + l = len(*m.Message) + n += 1 + l + sovMesos(uint64(l)) + } + if len(m.ConvertedResources) > 0 { + for _, e := range m.ConvertedResources { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.UUID != nil { + l = m.UUID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *CheckStatusInfo) ProtoSize() (n int) { + var l int + _ = l + if m.Type != nil { + n += 1 + sovMesos(uint64(*m.Type)) + } + if m.Command != nil { + l = m.Command.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.HTTP != nil { + l = m.HTTP.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.TCP != nil { + l = m.TCP.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *CheckStatusInfo_Command) ProtoSize() (n int) { + var l int + _ = l + if m.ExitCode != nil { + n += 1 + sovMesos(uint64(*m.ExitCode)) + } + return n +} + +func (m *CheckStatusInfo_Http) ProtoSize() (n int) { + var l int + _ = l + if m.StatusCode != nil { + n += 1 + sovMesos(uint64(*m.StatusCode)) + } + return n +} + +func (m *CheckStatusInfo_Tcp) ProtoSize() (n int) { + var l int + _ = l + if m.Succeeded != nil { + n += 2 + } + return n +} + +func (m *TaskStatus) ProtoSize() (n int) { + var l int + _ = l + l = m.TaskID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + if m.State != nil { + n += 1 + sovMesos(uint64(*m.State)) + } + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Message != nil { + l = len(*m.Message) + n += 1 + l + sovMesos(uint64(l)) + } + if m.AgentID != nil { + l = m.AgentID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Timestamp != nil { + n += 9 + } + if m.ExecutorID != nil { + l = m.ExecutorID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Healthy != nil { + n += 2 + } + if m.Source != nil { + n += 1 + sovMesos(uint64(*m.Source)) + } + if m.Reason != nil { + n += 1 + sovMesos(uint64(*m.Reason)) + } + if m.UUID != nil { + l = len(m.UUID) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Labels != nil { + l = m.Labels.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.ContainerStatus != nil { + l = m.ContainerStatus.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.UnreachableTime != nil { + l = m.UnreachableTime.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.CheckStatus != nil { + l = m.CheckStatus.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Limitation != nil { + l = m.Limitation.ProtoSize() + n += 2 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Filters) ProtoSize() (n int) { + var l int + _ = l + if m.RefuseSeconds != nil { + n += 9 + } + return n +} + +func (m *Environment) ProtoSize() (n int) { + var l int + _ = l + if len(m.Variables) > 0 { + for _, e := range m.Variables { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *Environment_Variable) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Type != nil { + n += 1 + sovMesos(uint64(*m.Type)) + } + if m.Secret != nil { + l = m.Secret.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Parameter) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovMesos(uint64(l)) + l = len(m.Value) + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *Parameters) ProtoSize() (n int) { + var l int + _ = l + if len(m.Parameter) > 0 { + for _, e := range m.Parameter { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *Credential) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Principal) + n += 1 + l + sovMesos(uint64(l)) + if m.Secret != nil { + l = len(*m.Secret) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Credentials) ProtoSize() (n int) { + var l int + _ = l + if len(m.Credentials) > 0 { + for _, e := range m.Credentials { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *Secret) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Type)) + if m.Reference != nil { + l = m.Reference.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Value != nil { + l = m.Value.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Secret_Reference) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + if m.Key != nil { + l = len(*m.Key) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Secret_Value) ProtoSize() (n int) { + var l int + _ = l + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *RateLimit) ProtoSize() (n int) { + var l int + _ = l + if m.QPS != nil { + n += 9 + } + l = len(m.Principal) + n += 1 + l + sovMesos(uint64(l)) + if m.Capacity != nil { + n += 1 + sovMesos(uint64(*m.Capacity)) + } + return n +} + +func (m *RateLimits) ProtoSize() (n int) { + var l int + _ = l + if len(m.Limits) > 0 { + for _, e := range m.Limits { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.AggregateDefaultQPS != nil { + n += 9 + } + if m.AggregateDefaultCapacity != nil { + n += 1 + sovMesos(uint64(*m.AggregateDefaultCapacity)) + } + return n +} + +func (m *Image) ProtoSize() (n int) { + var l int + _ = l + if m.Type != nil { + n += 1 + sovMesos(uint64(*m.Type)) + } + if m.Appc != nil { + l = m.Appc.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Docker != nil { + l = m.Docker.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Cached != nil { + n += 2 + } + return n +} + +func (m *Image_Appc) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + if m.ID != nil { + l = len(*m.ID) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Labels != nil { + l = m.Labels.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Image_Docker) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + if m.Credential != nil { + l = m.Credential.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Config != nil { + l = m.Config.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *MountPropagation) ProtoSize() (n int) { + var l int + _ = l + if m.Mode != nil { + n += 1 + sovMesos(uint64(*m.Mode)) + } + return n +} + +func (m *Volume) ProtoSize() (n int) { + var l int + _ = l + l = len(m.ContainerPath) + n += 1 + l + sovMesos(uint64(l)) + if m.HostPath != nil { + l = len(*m.HostPath) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Mode != nil { + n += 1 + sovMesos(uint64(*m.Mode)) + } + if m.Image != nil { + l = m.Image.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Source != nil { + l = m.Source.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Volume_Source) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Type)) + if m.DockerVolume != nil { + l = m.DockerVolume.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.SandboxPath != nil { + l = m.SandboxPath.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Secret != nil { + l = m.Secret.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.HostPath != nil { + l = m.HostPath.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Volume_Source_DockerVolume) ProtoSize() (n int) { + var l int + _ = l + if m.Driver != nil { + l = len(*m.Driver) + n += 1 + l + sovMesos(uint64(l)) + } + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + if m.DriverOptions != nil { + l = m.DriverOptions.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Volume_Source_HostPath) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovMesos(uint64(l)) + if m.MountPropagation != nil { + l = m.MountPropagation.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Volume_Source_SandboxPath) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Type)) + l = len(m.Path) + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *NetworkInfo) ProtoSize() (n int) { + var l int + _ = l + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.Labels != nil { + l = m.Labels.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if len(m.IPAddresses) > 0 { + for _, e := range m.IPAddresses { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovMesos(uint64(l)) + } + if len(m.PortMappings) > 0 { + for _, e := range m.PortMappings { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *NetworkInfo_IPAddress) ProtoSize() (n int) { + var l int + _ = l + if m.Protocol != nil { + n += 1 + sovMesos(uint64(*m.Protocol)) + } + if m.IPAddress != nil { + l = len(*m.IPAddress) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *NetworkInfo_PortMapping) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.HostPort)) + n += 1 + sovMesos(uint64(m.ContainerPort)) + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *CapabilityInfo) ProtoSize() (n int) { + var l int + _ = l + if len(m.Capabilities) > 0 { + for _, e := range m.Capabilities { + n += 1 + sovMesos(uint64(e)) + } + } + return n +} + +func (m *LinuxInfo) ProtoSize() (n int) { + var l int + _ = l + if m.CapabilityInfo != nil { + l = m.CapabilityInfo.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.BoundingCapabilities != nil { + l = m.BoundingCapabilities.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.EffectiveCapabilities != nil { + l = m.EffectiveCapabilities.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.SharePIDNamespace != nil { + n += 2 + } + return n +} + +func (m *RLimitInfo) ProtoSize() (n int) { + var l int + _ = l + if len(m.Rlimits) > 0 { + for _, e := range m.Rlimits { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *RLimitInfo_RLimit) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Type)) + if m.Hard != nil { + n += 1 + sovMesos(uint64(*m.Hard)) + } + if m.Soft != nil { + n += 1 + sovMesos(uint64(*m.Soft)) + } + return n +} + +func (m *TTYInfo) ProtoSize() (n int) { + var l int + _ = l + if m.WindowSize != nil { + l = m.WindowSize.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *TTYInfo_WindowSize) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Rows)) + n += 1 + sovMesos(uint64(m.Columns)) + return n +} + +func (m *ContainerInfo) ProtoSize() (n int) { + var l int + _ = l + if m.Type != nil { + n += 1 + sovMesos(uint64(*m.Type)) + } + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.Docker != nil { + l = m.Docker.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Hostname != nil { + l = len(*m.Hostname) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Mesos != nil { + l = m.Mesos.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if len(m.NetworkInfos) > 0 { + for _, e := range m.NetworkInfos { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.LinuxInfo != nil { + l = m.LinuxInfo.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.RlimitInfo != nil { + l = m.RlimitInfo.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.TTYInfo != nil { + l = m.TTYInfo.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *ContainerInfo_DockerInfo) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Image) + n += 1 + l + sovMesos(uint64(l)) + if m.Network != nil { + n += 1 + sovMesos(uint64(*m.Network)) + } + if len(m.PortMappings) > 0 { + for _, e := range m.PortMappings { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.Privileged != nil { + n += 2 + } + if len(m.Parameters) > 0 { + for _, e := range m.Parameters { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.ForcePullImage != nil { + n += 2 + } + if m.VolumeDriver != nil { + l = len(*m.VolumeDriver) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *ContainerInfo_DockerInfo_PortMapping) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.HostPort)) + n += 1 + sovMesos(uint64(m.ContainerPort)) + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *ContainerInfo_MesosInfo) ProtoSize() (n int) { + var l int + _ = l + if m.Image != nil { + l = m.Image.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *ContainerStatus) ProtoSize() (n int) { + var l int + _ = l + if len(m.NetworkInfos) > 0 { + for _, e := range m.NetworkInfos { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if m.CgroupInfo != nil { + l = m.CgroupInfo.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.ExecutorPID != nil { + n += 1 + sovMesos(uint64(*m.ExecutorPID)) + } + if m.ContainerID != nil { + l = m.ContainerID.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *CgroupInfo) ProtoSize() (n int) { + var l int + _ = l + if m.NetCLS != nil { + l = m.NetCLS.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *CgroupInfo_Blkio) ProtoSize() (n int) { + var l int + _ = l + return n +} + +func (m *CgroupInfo_Blkio_Value) ProtoSize() (n int) { + var l int + _ = l + if m.Op != nil { + n += 1 + sovMesos(uint64(*m.Op)) + } + if m.Value != nil { + n += 1 + sovMesos(uint64(*m.Value)) + } + return n +} + +func (m *CgroupInfo_Blkio_CFQ) ProtoSize() (n int) { + var l int + _ = l + return n +} + +func (m *CgroupInfo_Blkio_CFQ_Statistics) ProtoSize() (n int) { + var l int + _ = l + if m.Device != nil { + l = m.Device.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Sectors != nil { + n += 1 + sovMesos(uint64(*m.Sectors)) + } + if m.Time != nil { + n += 1 + sovMesos(uint64(*m.Time)) + } + if len(m.IOServiced) > 0 { + for _, e := range m.IOServiced { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if len(m.IOServiceBytes) > 0 { + for _, e := range m.IOServiceBytes { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if len(m.IOServiceTime) > 0 { + for _, e := range m.IOServiceTime { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if len(m.IOWaitTime) > 0 { + for _, e := range m.IOWaitTime { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if len(m.IOMerged) > 0 { + for _, e := range m.IOMerged { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if len(m.IOQueued) > 0 { + for _, e := range m.IOQueued { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *CgroupInfo_Blkio_Throttling) ProtoSize() (n int) { + var l int + _ = l + return n +} + +func (m *CgroupInfo_Blkio_Throttling_Statistics) ProtoSize() (n int) { + var l int + _ = l + if m.Device != nil { + l = m.Device.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if len(m.IOServiced) > 0 { + for _, e := range m.IOServiced { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if len(m.IOServiceBytes) > 0 { + for _, e := range m.IOServiceBytes { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *CgroupInfo_Blkio_Statistics) ProtoSize() (n int) { + var l int + _ = l + if len(m.CFQ) > 0 { + for _, e := range m.CFQ { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if len(m.CFQRecursive) > 0 { + for _, e := range m.CFQRecursive { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if len(m.Throttling) > 0 { + for _, e := range m.Throttling { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *CgroupInfo_NetCls) ProtoSize() (n int) { + var l int + _ = l + if m.ClassID != nil { + n += 1 + sovMesos(uint64(*m.ClassID)) + } + return n +} + +func (m *Labels) ProtoSize() (n int) { + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *Label) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovMesos(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Port) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Number)) + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Visibility != nil { + n += 1 + sovMesos(uint64(*m.Visibility)) + } + if m.Labels != nil { + l = m.Labels.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Ports) ProtoSize() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *DiscoveryInfo) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovMesos(uint64(m.Visibility)) + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Environment != nil { + l = len(*m.Environment) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Location != nil { + l = len(*m.Location) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Version != nil { + l = len(*m.Version) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Ports != nil { + l = m.Ports.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Labels != nil { + l = m.Labels.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *WeightInfo) ProtoSize() (n int) { + var l int + _ = l + n += 9 + if m.Role != nil { + l = len(*m.Role) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *VersionInfo) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Version) + n += 1 + l + sovMesos(uint64(l)) + if m.BuildDate != nil { + l = len(*m.BuildDate) + n += 1 + l + sovMesos(uint64(l)) + } + if m.BuildTime != nil { + n += 9 + } + if m.BuildUser != nil { + l = len(*m.BuildUser) + n += 1 + l + sovMesos(uint64(l)) + } + if m.GitSHA != nil { + l = len(*m.GitSHA) + n += 1 + l + sovMesos(uint64(l)) + } + if m.GitBranch != nil { + l = len(*m.GitBranch) + n += 1 + l + sovMesos(uint64(l)) + } + if m.GitTag != nil { + l = len(*m.GitTag) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Flag) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Role) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + n += 9 + if len(m.Frameworks) > 0 { + for _, e := range m.Frameworks { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func (m *Metric) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovMesos(uint64(l)) + if m.Value != nil { + n += 9 + } + return n +} + +func (m *FileInfo) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovMesos(uint64(l)) + if m.Nlink != nil { + n += 1 + sovMesos(uint64(*m.Nlink)) + } + if m.Size != nil { + n += 1 + sovMesos(uint64(*m.Size)) + } + if m.Mtime != nil { + l = m.Mtime.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + if m.Mode != nil { + n += 1 + sovMesos(uint64(*m.Mode)) + } + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovMesos(uint64(l)) + } + if m.GID != nil { + l = len(*m.GID) + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Device) ProtoSize() (n int) { + var l int + _ = l + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovMesos(uint64(l)) + } + if m.Number != nil { + l = m.Number.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + return n +} + +func (m *Device_Number) ProtoSize() (n int) { + var l int + _ = l + if m.MajorNumber != nil { + n += 1 + sovMesos(uint64(*m.MajorNumber)) + } + if m.MinorNumber != nil { + n += 1 + sovMesos(uint64(*m.MinorNumber)) + } + return n +} + +func (m *DeviceAccess) ProtoSize() (n int) { + var l int + _ = l + l = m.Device.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + l = m.Access.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + return n +} + +func (m *DeviceAccess_Access) ProtoSize() (n int) { + var l int + _ = l + if m.Read != nil { + n += 2 + } + if m.Write != nil { + n += 2 + } + if m.Mknod != nil { + n += 2 + } + return n +} + +func (m *DeviceWhitelist) ProtoSize() (n int) { + var l int + _ = l + if len(m.AllowedDevices) > 0 { + for _, e := range m.AllowedDevices { + l = e.ProtoSize() + n += 1 + l + sovMesos(uint64(l)) + } + } + return n +} + +func sovMesos(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozMesos(x uint64) (n int) { + return sovMesos(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FrameworkID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FrameworkID{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *OfferID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OfferID{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *AgentID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AgentID{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *TaskID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskID{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *ExecutorID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecutorID{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerID{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Parent:` + strings.Replace(fmt.Sprintf("%v", this.Parent), "ContainerID", "ContainerID", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceProviderID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceProviderID{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *OperationID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OperationID{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *TimeInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TimeInfo{`, + `Nanoseconds:` + fmt.Sprintf("%v", this.Nanoseconds) + `,`, + `}`, + }, "") + return s +} +func (this *DurationInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DurationInfo{`, + `Nanoseconds:` + fmt.Sprintf("%v", this.Nanoseconds) + `,`, + `}`, + }, "") + return s +} +func (this *Address) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Address{`, + `Hostname:` + valueToStringMesos(this.Hostname) + `,`, + `IP:` + valueToStringMesos(this.IP) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *URL) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&URL{`, + `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, + `Address:` + strings.Replace(strings.Replace(this.Address.String(), "Address", "Address", 1), `&`, ``, 1) + `,`, + `Path:` + valueToStringMesos(this.Path) + `,`, + `Query:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Query), "Parameter", "Parameter", 1), `&`, ``, 1) + `,`, + `Fragment:` + valueToStringMesos(this.Fragment) + `,`, + `}`, + }, "") + return s +} +func (this *Unavailability) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Unavailability{`, + `Start:` + strings.Replace(strings.Replace(this.Start.String(), "TimeInfo", "TimeInfo", 1), `&`, ``, 1) + `,`, + `Duration:` + strings.Replace(fmt.Sprintf("%v", this.Duration), "DurationInfo", "DurationInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MachineID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MachineID{`, + `Hostname:` + valueToStringMesos(this.Hostname) + `,`, + `IP:` + valueToStringMesos(this.IP) + `,`, + `}`, + }, "") + return s +} +func (this *MachineInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MachineInfo{`, + `ID:` + strings.Replace(strings.Replace(this.ID.String(), "MachineID", "MachineID", 1), `&`, ``, 1) + `,`, + `Mode:` + valueToStringMesos(this.Mode) + `,`, + `Unavailability:` + strings.Replace(fmt.Sprintf("%v", this.Unavailability), "Unavailability", "Unavailability", 1) + `,`, + `}`, + }, "") + return s +} +func (this *FrameworkInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FrameworkInfo{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "FrameworkID", "FrameworkID", 1) + `,`, + `FailoverTimeout:` + valueToStringMesos(this.FailoverTimeout) + `,`, + `Checkpoint:` + valueToStringMesos(this.Checkpoint) + `,`, + `Role:` + valueToStringMesos(this.Role) + `,`, + `Hostname:` + valueToStringMesos(this.Hostname) + `,`, + `Principal:` + valueToStringMesos(this.Principal) + `,`, + `WebUiURL:` + valueToStringMesos(this.WebUiURL) + `,`, + `Capabilities:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Capabilities), "FrameworkInfo_Capability", "FrameworkInfo_Capability", 1), `&`, ``, 1) + `,`, + `Labels:` + strings.Replace(fmt.Sprintf("%v", this.Labels), "Labels", "Labels", 1) + `,`, + `Roles:` + fmt.Sprintf("%v", this.Roles) + `,`, + `}`, + }, "") + return s +} +func (this *FrameworkInfo_Capability) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FrameworkInfo_Capability{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *CheckInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckInfo{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Command:` + strings.Replace(fmt.Sprintf("%v", this.Command), "CheckInfo_Command", "CheckInfo_Command", 1) + `,`, + `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "CheckInfo_Http", "CheckInfo_Http", 1) + `,`, + `DelaySeconds:` + valueToStringMesos(this.DelaySeconds) + `,`, + `IntervalSeconds:` + valueToStringMesos(this.IntervalSeconds) + `,`, + `TimeoutSeconds:` + valueToStringMesos(this.TimeoutSeconds) + `,`, + `TCP:` + strings.Replace(fmt.Sprintf("%v", this.TCP), "CheckInfo_Tcp", "CheckInfo_Tcp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CheckInfo_Command) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckInfo_Command{`, + `Command:` + strings.Replace(strings.Replace(this.Command.String(), "CommandInfo", "CommandInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CheckInfo_Http) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckInfo_Http{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Path:` + valueToStringMesos(this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *CheckInfo_Tcp) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckInfo_Tcp{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *HealthCheck) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthCheck{`, + `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HealthCheck_HTTPCheckInfo", "HealthCheck_HTTPCheckInfo", 1) + `,`, + `DelaySeconds:` + valueToStringMesos(this.DelaySeconds) + `,`, + `IntervalSeconds:` + valueToStringMesos(this.IntervalSeconds) + `,`, + `TimeoutSeconds:` + valueToStringMesos(this.TimeoutSeconds) + `,`, + `ConsecutiveFailures:` + valueToStringMesos(this.ConsecutiveFailures) + `,`, + `GracePeriodSeconds:` + valueToStringMesos(this.GracePeriodSeconds) + `,`, + `Command:` + strings.Replace(fmt.Sprintf("%v", this.Command), "CommandInfo", "CommandInfo", 1) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `TCP:` + strings.Replace(fmt.Sprintf("%v", this.TCP), "HealthCheck_TCPCheckInfo", "HealthCheck_TCPCheckInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HealthCheck_HTTPCheckInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthCheck_HTTPCheckInfo{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Path:` + valueToStringMesos(this.Path) + `,`, + `Scheme:` + valueToStringMesos(this.Scheme) + `,`, + `Statuses:` + fmt.Sprintf("%v", this.Statuses) + `,`, + `Protocol:` + valueToStringMesos(this.Protocol) + `,`, + `}`, + }, "") + return s +} +func (this *HealthCheck_TCPCheckInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthCheck_TCPCheckInfo{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + valueToStringMesos(this.Protocol) + `,`, + `}`, + }, "") + return s +} +func (this *KillPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KillPolicy{`, + `GracePeriod:` + strings.Replace(fmt.Sprintf("%v", this.GracePeriod), "DurationInfo", "DurationInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CommandInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CommandInfo{`, + `URIs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.URIs), "CommandInfo_URI", "CommandInfo_URI", 1), `&`, ``, 1) + `,`, + `Environment:` + strings.Replace(fmt.Sprintf("%v", this.Environment), "Environment", "Environment", 1) + `,`, + `Value:` + valueToStringMesos(this.Value) + `,`, + `User:` + valueToStringMesos(this.User) + `,`, + `Shell:` + valueToStringMesos(this.Shell) + `,`, + `Arguments:` + fmt.Sprintf("%v", this.Arguments) + `,`, + `}`, + }, "") + return s +} +func (this *CommandInfo_URI) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CommandInfo_URI{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Executable:` + valueToStringMesos(this.Executable) + `,`, + `Extract:` + valueToStringMesos(this.Extract) + `,`, + `Cache:` + valueToStringMesos(this.Cache) + `,`, + `OutputFile:` + valueToStringMesos(this.OutputFile) + `,`, + `}`, + }, "") + return s +} +func (this *ExecutorInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecutorInfo{`, + `ExecutorID:` + strings.Replace(strings.Replace(this.ExecutorID.String(), "ExecutorID", "ExecutorID", 1), `&`, ``, 1) + `,`, + `Data:` + valueToStringMesos(this.Data) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `Command:` + strings.Replace(fmt.Sprintf("%v", this.Command), "CommandInfo", "CommandInfo", 1) + `,`, + `FrameworkID:` + strings.Replace(fmt.Sprintf("%v", this.FrameworkID), "FrameworkID", "FrameworkID", 1) + `,`, + `Name:` + valueToStringMesos(this.Name) + `,`, + `Source:` + valueToStringMesos(this.Source) + `,`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerInfo", "ContainerInfo", 1) + `,`, + `Discovery:` + strings.Replace(fmt.Sprintf("%v", this.Discovery), "DiscoveryInfo", "DiscoveryInfo", 1) + `,`, + `ShutdownGracePeriod:` + strings.Replace(fmt.Sprintf("%v", this.ShutdownGracePeriod), "DurationInfo", "DurationInfo", 1) + `,`, + `Labels:` + strings.Replace(fmt.Sprintf("%v", this.Labels), "Labels", "Labels", 1) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *DomainInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DomainInfo{`, + `FaultDomain:` + strings.Replace(fmt.Sprintf("%v", this.FaultDomain), "DomainInfo_FaultDomain", "DomainInfo_FaultDomain", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DomainInfo_FaultDomain) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DomainInfo_FaultDomain{`, + `Region:` + strings.Replace(strings.Replace(this.Region.String(), "DomainInfo_FaultDomain_RegionInfo", "DomainInfo_FaultDomain_RegionInfo", 1), `&`, ``, 1) + `,`, + `Zone:` + strings.Replace(strings.Replace(this.Zone.String(), "DomainInfo_FaultDomain_ZoneInfo", "DomainInfo_FaultDomain_ZoneInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DomainInfo_FaultDomain_RegionInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DomainInfo_FaultDomain_RegionInfo{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *DomainInfo_FaultDomain_ZoneInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DomainInfo_FaultDomain_ZoneInfo{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *MasterInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MasterInfo{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Port:` + valueToStringMesos(this.Port) + `,`, + `PID:` + valueToStringMesos(this.PID) + `,`, + `Hostname:` + valueToStringMesos(this.Hostname) + `,`, + `Version:` + valueToStringMesos(this.Version) + `,`, + `Address:` + strings.Replace(fmt.Sprintf("%v", this.Address), "Address", "Address", 1) + `,`, + `Domain:` + strings.Replace(fmt.Sprintf("%v", this.Domain), "DomainInfo", "DomainInfo", 1) + `,`, + `Capabilities:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Capabilities), "MasterInfo_Capability", "MasterInfo_Capability", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MasterInfo_Capability) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MasterInfo_Capability{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *AgentInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AgentInfo{`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `Attributes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Attributes), "Attribute", "Attribute", 1), `&`, ``, 1) + `,`, + `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "AgentID", "AgentID", 1) + `,`, + `Port:` + valueToStringMesos(this.Port) + `,`, + `Domain:` + strings.Replace(fmt.Sprintf("%v", this.Domain), "DomainInfo", "DomainInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AgentInfo_Capability) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AgentInfo_Capability{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *CSIPluginContainerInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSIPluginContainerInfo{`, + `Services:` + fmt.Sprintf("%v", this.Services) + `,`, + `Command:` + strings.Replace(fmt.Sprintf("%v", this.Command), "CommandInfo", "CommandInfo", 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerInfo", "ContainerInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIPluginInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSIPluginInfo{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "CSIPluginContainerInfo", "CSIPluginContainerInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceProviderInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceProviderInfo{`, + `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "ResourceProviderID", "ResourceProviderID", 1) + `,`, + `Attributes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Attributes), "Attribute", "Attribute", 1), `&`, ``, 1) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DefaultReservations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DefaultReservations), "Resource_ReservationInfo", "Resource_ReservationInfo", 1), `&`, ``, 1) + `,`, + `Storage:` + strings.Replace(fmt.Sprintf("%v", this.Storage), "ResourceProviderInfo_Storage", "ResourceProviderInfo_Storage", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceProviderInfo_Storage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceProviderInfo_Storage{`, + `Plugin:` + strings.Replace(strings.Replace(this.Plugin.String(), "CSIPluginInfo", "CSIPluginInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Scalar:` + strings.Replace(fmt.Sprintf("%v", this.Scalar), "Value_Scalar", "Value_Scalar", 1) + `,`, + `Ranges:` + strings.Replace(fmt.Sprintf("%v", this.Ranges), "Value_Ranges", "Value_Ranges", 1) + `,`, + `Set:` + strings.Replace(fmt.Sprintf("%v", this.Set), "Value_Set", "Value_Set", 1) + `,`, + `Text:` + strings.Replace(fmt.Sprintf("%v", this.Text), "Value_Text", "Value_Text", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Value_Scalar) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_Scalar{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Value_Range) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_Range{`, + `Begin:` + fmt.Sprintf("%v", this.Begin) + `,`, + `End:` + fmt.Sprintf("%v", this.End) + `,`, + `}`, + }, "") + return s +} +func (this *Value_Ranges) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_Ranges{`, + `Range:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Range), "Value_Range", "Value_Range", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Value_Set) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_Set{`, + `Item:` + fmt.Sprintf("%v", this.Item) + `,`, + `}`, + }, "") + return s +} +func (this *Value_Text) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_Text{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Attribute) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Attribute{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Scalar:` + strings.Replace(fmt.Sprintf("%v", this.Scalar), "Value_Scalar", "Value_Scalar", 1) + `,`, + `Ranges:` + strings.Replace(fmt.Sprintf("%v", this.Ranges), "Value_Ranges", "Value_Ranges", 1) + `,`, + `Text:` + strings.Replace(fmt.Sprintf("%v", this.Text), "Value_Text", "Value_Text", 1) + `,`, + `Set:` + strings.Replace(fmt.Sprintf("%v", this.Set), "Value_Set", "Value_Set", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Resource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Type:` + valueToStringMesos(this.Type) + `,`, + `Scalar:` + strings.Replace(fmt.Sprintf("%v", this.Scalar), "Value_Scalar", "Value_Scalar", 1) + `,`, + `Ranges:` + strings.Replace(fmt.Sprintf("%v", this.Ranges), "Value_Ranges", "Value_Ranges", 1) + `,`, + `Set:` + strings.Replace(fmt.Sprintf("%v", this.Set), "Value_Set", "Value_Set", 1) + `,`, + `Role:` + valueToStringMesos(this.Role) + `,`, + `Disk:` + strings.Replace(fmt.Sprintf("%v", this.Disk), "Resource_DiskInfo", "Resource_DiskInfo", 1) + `,`, + `Reservation:` + strings.Replace(fmt.Sprintf("%v", this.Reservation), "Resource_ReservationInfo", "Resource_ReservationInfo", 1) + `,`, + `Revocable:` + strings.Replace(fmt.Sprintf("%v", this.Revocable), "Resource_RevocableInfo", "Resource_RevocableInfo", 1) + `,`, + `Shared:` + strings.Replace(fmt.Sprintf("%v", this.Shared), "Resource_SharedInfo", "Resource_SharedInfo", 1) + `,`, + `AllocationInfo:` + strings.Replace(fmt.Sprintf("%v", this.AllocationInfo), "Resource_AllocationInfo", "Resource_AllocationInfo", 1) + `,`, + `ProviderID:` + strings.Replace(fmt.Sprintf("%v", this.ProviderID), "ResourceProviderID", "ResourceProviderID", 1) + `,`, + `Reservations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Reservations), "Resource_ReservationInfo", "Resource_ReservationInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Resource_AllocationInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resource_AllocationInfo{`, + `Role:` + valueToStringMesos(this.Role) + `,`, + `}`, + }, "") + return s +} +func (this *Resource_ReservationInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resource_ReservationInfo{`, + `Principal:` + valueToStringMesos(this.Principal) + `,`, + `Labels:` + strings.Replace(fmt.Sprintf("%v", this.Labels), "Labels", "Labels", 1) + `,`, + `Role:` + valueToStringMesos(this.Role) + `,`, + `Type:` + valueToStringMesos(this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *Resource_DiskInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resource_DiskInfo{`, + `Persistence:` + strings.Replace(fmt.Sprintf("%v", this.Persistence), "Resource_DiskInfo_Persistence", "Resource_DiskInfo_Persistence", 1) + `,`, + `Volume:` + strings.Replace(fmt.Sprintf("%v", this.Volume), "Volume", "Volume", 1) + `,`, + `Source:` + strings.Replace(fmt.Sprintf("%v", this.Source), "Resource_DiskInfo_Source", "Resource_DiskInfo_Source", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Resource_DiskInfo_Persistence) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resource_DiskInfo_Persistence{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Principal:` + valueToStringMesos(this.Principal) + `,`, + `}`, + }, "") + return s +} +func (this *Resource_DiskInfo_Source) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resource_DiskInfo_Source{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Path:` + strings.Replace(fmt.Sprintf("%v", this.Path), "Resource_DiskInfo_Source_Path", "Resource_DiskInfo_Source_Path", 1) + `,`, + `Mount:` + strings.Replace(fmt.Sprintf("%v", this.Mount), "Resource_DiskInfo_Source_Mount", "Resource_DiskInfo_Source_Mount", 1) + `,`, + `ID:` + valueToStringMesos(this.ID) + `,`, + `Metadata:` + strings.Replace(fmt.Sprintf("%v", this.Metadata), "Labels", "Labels", 1) + `,`, + `Profile:` + valueToStringMesos(this.Profile) + `,`, + `}`, + }, "") + return s +} +func (this *Resource_DiskInfo_Source_Path) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resource_DiskInfo_Source_Path{`, + `Root:` + valueToStringMesos(this.Root) + `,`, + `}`, + }, "") + return s +} +func (this *Resource_DiskInfo_Source_Mount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resource_DiskInfo_Source_Mount{`, + `Root:` + valueToStringMesos(this.Root) + `,`, + `}`, + }, "") + return s +} +func (this *Resource_RevocableInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resource_RevocableInfo{`, + `}`, + }, "") + return s +} +func (this *Resource_SharedInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resource_SharedInfo{`, + `}`, + }, "") + return s +} +func (this *TrafficControlStatistics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TrafficControlStatistics{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Backlog:` + valueToStringMesos(this.Backlog) + `,`, + `Bytes:` + valueToStringMesos(this.Bytes) + `,`, + `Drops:` + valueToStringMesos(this.Drops) + `,`, + `Overlimits:` + valueToStringMesos(this.Overlimits) + `,`, + `Packets:` + valueToStringMesos(this.Packets) + `,`, + `Qlen:` + valueToStringMesos(this.Qlen) + `,`, + `RateBPS:` + valueToStringMesos(this.RateBPS) + `,`, + `RatePPS:` + valueToStringMesos(this.RatePPS) + `,`, + `Requeues:` + valueToStringMesos(this.Requeues) + `,`, + `}`, + }, "") + return s +} +func (this *IpStatistics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IpStatistics{`, + `Forwarding:` + valueToStringMesos(this.Forwarding) + `,`, + `DefaultTTL:` + valueToStringMesos(this.DefaultTTL) + `,`, + `InReceives:` + valueToStringMesos(this.InReceives) + `,`, + `InHdrErrors:` + valueToStringMesos(this.InHdrErrors) + `,`, + `InAddrErrors:` + valueToStringMesos(this.InAddrErrors) + `,`, + `ForwDatagrams:` + valueToStringMesos(this.ForwDatagrams) + `,`, + `InUnknownProtos:` + valueToStringMesos(this.InUnknownProtos) + `,`, + `InDiscards:` + valueToStringMesos(this.InDiscards) + `,`, + `InDelivers:` + valueToStringMesos(this.InDelivers) + `,`, + `OutRequests:` + valueToStringMesos(this.OutRequests) + `,`, + `OutDiscards:` + valueToStringMesos(this.OutDiscards) + `,`, + `OutNoRoutes:` + valueToStringMesos(this.OutNoRoutes) + `,`, + `ReasmTimeout:` + valueToStringMesos(this.ReasmTimeout) + `,`, + `ReasmReqds:` + valueToStringMesos(this.ReasmReqds) + `,`, + `ReasmOKs:` + valueToStringMesos(this.ReasmOKs) + `,`, + `ReasmFails:` + valueToStringMesos(this.ReasmFails) + `,`, + `FragOKs:` + valueToStringMesos(this.FragOKs) + `,`, + `FragFails:` + valueToStringMesos(this.FragFails) + `,`, + `FragCreates:` + valueToStringMesos(this.FragCreates) + `,`, + `}`, + }, "") + return s +} +func (this *IcmpStatistics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IcmpStatistics{`, + `InMsgs:` + valueToStringMesos(this.InMsgs) + `,`, + `InErrors:` + valueToStringMesos(this.InErrors) + `,`, + `InCsumErrors:` + valueToStringMesos(this.InCsumErrors) + `,`, + `InDestUnreachs:` + valueToStringMesos(this.InDestUnreachs) + `,`, + `InTimeExcds:` + valueToStringMesos(this.InTimeExcds) + `,`, + `InParmProbs:` + valueToStringMesos(this.InParmProbs) + `,`, + `InSrcQuenchs:` + valueToStringMesos(this.InSrcQuenchs) + `,`, + `InRedirects:` + valueToStringMesos(this.InRedirects) + `,`, + `InEchos:` + valueToStringMesos(this.InEchos) + `,`, + `InEchoReps:` + valueToStringMesos(this.InEchoReps) + `,`, + `InTimestamps:` + valueToStringMesos(this.InTimestamps) + `,`, + `InTimestampReps:` + valueToStringMesos(this.InTimestampReps) + `,`, + `InAddrMasks:` + valueToStringMesos(this.InAddrMasks) + `,`, + `InAddrMaskReps:` + valueToStringMesos(this.InAddrMaskReps) + `,`, + `OutMsgs:` + valueToStringMesos(this.OutMsgs) + `,`, + `OutErrors:` + valueToStringMesos(this.OutErrors) + `,`, + `OutDestUnreachs:` + valueToStringMesos(this.OutDestUnreachs) + `,`, + `OutTimeExcds:` + valueToStringMesos(this.OutTimeExcds) + `,`, + `OutParmProbs:` + valueToStringMesos(this.OutParmProbs) + `,`, + `OutSrcQuenchs:` + valueToStringMesos(this.OutSrcQuenchs) + `,`, + `OutRedirects:` + valueToStringMesos(this.OutRedirects) + `,`, + `OutEchos:` + valueToStringMesos(this.OutEchos) + `,`, + `OutEchoReps:` + valueToStringMesos(this.OutEchoReps) + `,`, + `OutTimestamps:` + valueToStringMesos(this.OutTimestamps) + `,`, + `OutTimestampReps:` + valueToStringMesos(this.OutTimestampReps) + `,`, + `OutAddrMasks:` + valueToStringMesos(this.OutAddrMasks) + `,`, + `OutAddrMaskReps:` + valueToStringMesos(this.OutAddrMaskReps) + `,`, + `}`, + }, "") + return s +} +func (this *TcpStatistics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TcpStatistics{`, + `RtoAlgorithm:` + valueToStringMesos(this.RtoAlgorithm) + `,`, + `RtoMin:` + valueToStringMesos(this.RtoMin) + `,`, + `RtoMax:` + valueToStringMesos(this.RtoMax) + `,`, + `MaxConn:` + valueToStringMesos(this.MaxConn) + `,`, + `ActiveOpens:` + valueToStringMesos(this.ActiveOpens) + `,`, + `PassiveOpens:` + valueToStringMesos(this.PassiveOpens) + `,`, + `AttemptFails:` + valueToStringMesos(this.AttemptFails) + `,`, + `EstabResets:` + valueToStringMesos(this.EstabResets) + `,`, + `CurrEstab:` + valueToStringMesos(this.CurrEstab) + `,`, + `InSegs:` + valueToStringMesos(this.InSegs) + `,`, + `OutSegs:` + valueToStringMesos(this.OutSegs) + `,`, + `RetransSegs:` + valueToStringMesos(this.RetransSegs) + `,`, + `InErrs:` + valueToStringMesos(this.InErrs) + `,`, + `OutRsts:` + valueToStringMesos(this.OutRsts) + `,`, + `InCsumErrors:` + valueToStringMesos(this.InCsumErrors) + `,`, + `}`, + }, "") + return s +} +func (this *UdpStatistics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UdpStatistics{`, + `InDatagrams:` + valueToStringMesos(this.InDatagrams) + `,`, + `NoPorts:` + valueToStringMesos(this.NoPorts) + `,`, + `InErrors:` + valueToStringMesos(this.InErrors) + `,`, + `OutDatagrams:` + valueToStringMesos(this.OutDatagrams) + `,`, + `RcvbufErrors:` + valueToStringMesos(this.RcvbufErrors) + `,`, + `SndbufErrors:` + valueToStringMesos(this.SndbufErrors) + `,`, + `InCsumErrors:` + valueToStringMesos(this.InCsumErrors) + `,`, + `IgnoredMulti:` + valueToStringMesos(this.IgnoredMulti) + `,`, + `}`, + }, "") + return s +} +func (this *SNMPStatistics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SNMPStatistics{`, + `IPStats:` + strings.Replace(fmt.Sprintf("%v", this.IPStats), "IpStatistics", "IpStatistics", 1) + `,`, + `ICMPStats:` + strings.Replace(fmt.Sprintf("%v", this.ICMPStats), "IcmpStatistics", "IcmpStatistics", 1) + `,`, + `TCPStats:` + strings.Replace(fmt.Sprintf("%v", this.TCPStats), "TcpStatistics", "TcpStatistics", 1) + `,`, + `UDPStats:` + strings.Replace(fmt.Sprintf("%v", this.UDPStats), "UdpStatistics", "UdpStatistics", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DiskStatistics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DiskStatistics{`, + `Source:` + strings.Replace(fmt.Sprintf("%v", this.Source), "Resource_DiskInfo_Source", "Resource_DiskInfo_Source", 1) + `,`, + `Persistence:` + strings.Replace(fmt.Sprintf("%v", this.Persistence), "Resource_DiskInfo_Persistence", "Resource_DiskInfo_Persistence", 1) + `,`, + `LimitBytes:` + valueToStringMesos(this.LimitBytes) + `,`, + `UsedBytes:` + valueToStringMesos(this.UsedBytes) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceStatistics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceStatistics{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `CPUsUserTimeSecs:` + valueToStringMesos(this.CPUsUserTimeSecs) + `,`, + `CPUsSystemTimeSecs:` + valueToStringMesos(this.CPUsSystemTimeSecs) + `,`, + `CPUsLimit:` + valueToStringMesos(this.CPUsLimit) + `,`, + `MemRSSBytes:` + valueToStringMesos(this.MemRSSBytes) + `,`, + `MemLimitBytes:` + valueToStringMesos(this.MemLimitBytes) + `,`, + `CPUsNrPeriods:` + valueToStringMesos(this.CPUsNrPeriods) + `,`, + `CPUsNrThrottled:` + valueToStringMesos(this.CPUsNrThrottled) + `,`, + `CPUsThrottledTimeSecs:` + valueToStringMesos(this.CPUsThrottledTimeSecs) + `,`, + `MemFileBytes:` + valueToStringMesos(this.MemFileBytes) + `,`, + `MemAnonBytes:` + valueToStringMesos(this.MemAnonBytes) + `,`, + `MemMappedFileBytes:` + valueToStringMesos(this.MemMappedFileBytes) + `,`, + `Perf:` + strings.Replace(fmt.Sprintf("%v", this.Perf), "PerfStatistics", "PerfStatistics", 1) + `,`, + `NetRxPackets:` + valueToStringMesos(this.NetRxPackets) + `,`, + `NetRxBytes:` + valueToStringMesos(this.NetRxBytes) + `,`, + `NetRxErrors:` + valueToStringMesos(this.NetRxErrors) + `,`, + `NetRxDropped:` + valueToStringMesos(this.NetRxDropped) + `,`, + `NetTxPackets:` + valueToStringMesos(this.NetTxPackets) + `,`, + `NetTxBytes:` + valueToStringMesos(this.NetTxBytes) + `,`, + `NetTxErrors:` + valueToStringMesos(this.NetTxErrors) + `,`, + `NetTxDropped:` + valueToStringMesos(this.NetTxDropped) + `,`, + `NetTCPRttMicrosecsP50:` + valueToStringMesos(this.NetTCPRttMicrosecsP50) + `,`, + `NetTCPRttMicrosecsP90:` + valueToStringMesos(this.NetTCPRttMicrosecsP90) + `,`, + `NetTCPRttMicrosecsP95:` + valueToStringMesos(this.NetTCPRttMicrosecsP95) + `,`, + `NetTCPRttMicrosecsP99:` + valueToStringMesos(this.NetTCPRttMicrosecsP99) + `,`, + `DiskLimitBytes:` + valueToStringMesos(this.DiskLimitBytes) + `,`, + `DiskUsedBytes:` + valueToStringMesos(this.DiskUsedBytes) + `,`, + `NetTCPActiveConnections:` + valueToStringMesos(this.NetTCPActiveConnections) + `,`, + `NetTCPTimeWaitConnections:` + valueToStringMesos(this.NetTCPTimeWaitConnections) + `,`, + `Processes:` + valueToStringMesos(this.Processes) + `,`, + `Threads:` + valueToStringMesos(this.Threads) + `,`, + `MemLowPressureCounter:` + valueToStringMesos(this.MemLowPressureCounter) + `,`, + `MemMediumPressureCounter:` + valueToStringMesos(this.MemMediumPressureCounter) + `,`, + `MemCriticalPressureCounter:` + valueToStringMesos(this.MemCriticalPressureCounter) + `,`, + `NetTrafficControlStatistics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NetTrafficControlStatistics), "TrafficControlStatistics", "TrafficControlStatistics", 1), `&`, ``, 1) + `,`, + `MemTotalBytes:` + valueToStringMesos(this.MemTotalBytes) + `,`, + `MemTotalMemswBytes:` + valueToStringMesos(this.MemTotalMemswBytes) + `,`, + `MemSoftLimitBytes:` + valueToStringMesos(this.MemSoftLimitBytes) + `,`, + `MemCacheBytes:` + valueToStringMesos(this.MemCacheBytes) + `,`, + `MemSwapBytes:` + valueToStringMesos(this.MemSwapBytes) + `,`, + `MemUnevictableBytes:` + valueToStringMesos(this.MemUnevictableBytes) + `,`, + `NetSNMPStatistics:` + strings.Replace(fmt.Sprintf("%v", this.NetSNMPStatistics), "SNMPStatistics", "SNMPStatistics", 1) + `,`, + `DiskStatistics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DiskStatistics), "DiskStatistics", "DiskStatistics", 1), `&`, ``, 1) + `,`, + `BlkioStatistics:` + strings.Replace(fmt.Sprintf("%v", this.BlkioStatistics), "CgroupInfo_Blkio_Statistics", "CgroupInfo_Blkio_Statistics", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceUsage{`, + `Executors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Executors), "ResourceUsage_Executor", "ResourceUsage_Executor", 1), `&`, ``, 1) + `,`, + `Total:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Total), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceUsage_Executor) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceUsage_Executor{`, + `ExecutorInfo:` + strings.Replace(strings.Replace(this.ExecutorInfo.String(), "ExecutorInfo", "ExecutorInfo", 1), `&`, ``, 1) + `,`, + `Allocated:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Allocated), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `Statistics:` + strings.Replace(fmt.Sprintf("%v", this.Statistics), "ResourceStatistics", "ResourceStatistics", 1) + `,`, + `ContainerID:` + strings.Replace(strings.Replace(this.ContainerID.String(), "ContainerID", "ContainerID", 1), `&`, ``, 1) + `,`, + `Tasks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tasks), "ResourceUsage_Executor_Task", "ResourceUsage_Executor_Task", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceUsage_Executor_Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceUsage_Executor_Task{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ID:` + strings.Replace(strings.Replace(this.ID.String(), "TaskID", "TaskID", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `Labels:` + strings.Replace(fmt.Sprintf("%v", this.Labels), "Labels", "Labels", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PerfStatistics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PerfStatistics{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, + `Cycles:` + valueToStringMesos(this.Cycles) + `,`, + `StalledCyclesFrontend:` + valueToStringMesos(this.StalledCyclesFrontend) + `,`, + `StalledCyclesBackend:` + valueToStringMesos(this.StalledCyclesBackend) + `,`, + `Instructions:` + valueToStringMesos(this.Instructions) + `,`, + `CacheReferences:` + valueToStringMesos(this.CacheReferences) + `,`, + `CacheMisses:` + valueToStringMesos(this.CacheMisses) + `,`, + `Branches:` + valueToStringMesos(this.Branches) + `,`, + `BranchMisses:` + valueToStringMesos(this.BranchMisses) + `,`, + `BusCycles:` + valueToStringMesos(this.BusCycles) + `,`, + `RefCycles:` + valueToStringMesos(this.RefCycles) + `,`, + `CPUClock:` + valueToStringMesos(this.CPUClock) + `,`, + `TaskClock:` + valueToStringMesos(this.TaskClock) + `,`, + `PageFaults:` + valueToStringMesos(this.PageFaults) + `,`, + `MinorFaults:` + valueToStringMesos(this.MinorFaults) + `,`, + `MajorFaults:` + valueToStringMesos(this.MajorFaults) + `,`, + `ContextSwitches:` + valueToStringMesos(this.ContextSwitches) + `,`, + `CPUMigrations:` + valueToStringMesos(this.CPUMigrations) + `,`, + `AlignmentFaults:` + valueToStringMesos(this.AlignmentFaults) + `,`, + `EmulationFaults:` + valueToStringMesos(this.EmulationFaults) + `,`, + `L1DcacheLoads:` + valueToStringMesos(this.L1DcacheLoads) + `,`, + `L1DcacheLoadMisses:` + valueToStringMesos(this.L1DcacheLoadMisses) + `,`, + `L1DcacheStores:` + valueToStringMesos(this.L1DcacheStores) + `,`, + `L1DcacheStoreMisses:` + valueToStringMesos(this.L1DcacheStoreMisses) + `,`, + `L1DcachePrefetches:` + valueToStringMesos(this.L1DcachePrefetches) + `,`, + `L1DcachePrefetchMisses:` + valueToStringMesos(this.L1DcachePrefetchMisses) + `,`, + `L1IcacheLoads:` + valueToStringMesos(this.L1IcacheLoads) + `,`, + `L1IcacheLoadMisses:` + valueToStringMesos(this.L1IcacheLoadMisses) + `,`, + `L1IcachePrefetches:` + valueToStringMesos(this.L1IcachePrefetches) + `,`, + `L1IcachePrefetchMisses:` + valueToStringMesos(this.L1IcachePrefetchMisses) + `,`, + `LLCLoads:` + valueToStringMesos(this.LLCLoads) + `,`, + `LLCLoadMisses:` + valueToStringMesos(this.LLCLoadMisses) + `,`, + `LLCStores:` + valueToStringMesos(this.LLCStores) + `,`, + `LLCStoreMisses:` + valueToStringMesos(this.LLCStoreMisses) + `,`, + `LLCPrefetches:` + valueToStringMesos(this.LLCPrefetches) + `,`, + `LLCPrefetchMisses:` + valueToStringMesos(this.LLCPrefetchMisses) + `,`, + `DTLBLoads:` + valueToStringMesos(this.DTLBLoads) + `,`, + `DTLBLoadMisses:` + valueToStringMesos(this.DTLBLoadMisses) + `,`, + `DTLBStores:` + valueToStringMesos(this.DTLBStores) + `,`, + `DTLBStoreMisses:` + valueToStringMesos(this.DTLBStoreMisses) + `,`, + `DTLBPrefetches:` + valueToStringMesos(this.DTLBPrefetches) + `,`, + `DTLBPrefetchMisses:` + valueToStringMesos(this.DTLBPrefetchMisses) + `,`, + `ITLBLoads:` + valueToStringMesos(this.ITLBLoads) + `,`, + `ITLBLoadMisses:` + valueToStringMesos(this.ITLBLoadMisses) + `,`, + `BranchLoads:` + valueToStringMesos(this.BranchLoads) + `,`, + `BranchLoadMisses:` + valueToStringMesos(this.BranchLoadMisses) + `,`, + `NodeLoads:` + valueToStringMesos(this.NodeLoads) + `,`, + `NodeLoadMisses:` + valueToStringMesos(this.NodeLoadMisses) + `,`, + `NodeStores:` + valueToStringMesos(this.NodeStores) + `,`, + `NodeStoreMisses:` + valueToStringMesos(this.NodeStoreMisses) + `,`, + `NodePrefetches:` + valueToStringMesos(this.NodePrefetches) + `,`, + `NodePrefetchMisses:` + valueToStringMesos(this.NodePrefetchMisses) + `,`, + `}`, + }, "") + return s +} +func (this *Request) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Request{`, + `AgentID:` + strings.Replace(fmt.Sprintf("%v", this.AgentID), "AgentID", "AgentID", 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Offer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Offer{`, + `ID:` + strings.Replace(strings.Replace(this.ID.String(), "OfferID", "OfferID", 1), `&`, ``, 1) + `,`, + `FrameworkID:` + strings.Replace(strings.Replace(this.FrameworkID.String(), "FrameworkID", "FrameworkID", 1), `&`, ``, 1) + `,`, + `AgentID:` + strings.Replace(strings.Replace(this.AgentID.String(), "AgentID", "AgentID", 1), `&`, ``, 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `ExecutorIDs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExecutorIDs), "ExecutorID", "ExecutorID", 1), `&`, ``, 1) + `,`, + `Attributes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Attributes), "Attribute", "Attribute", 1), `&`, ``, 1) + `,`, + `URL:` + strings.Replace(fmt.Sprintf("%v", this.URL), "URL", "URL", 1) + `,`, + `Unavailability:` + strings.Replace(fmt.Sprintf("%v", this.Unavailability), "Unavailability", "Unavailability", 1) + `,`, + `AllocationInfo:` + strings.Replace(fmt.Sprintf("%v", this.AllocationInfo), "Resource_AllocationInfo", "Resource_AllocationInfo", 1) + `,`, + `Domain:` + strings.Replace(fmt.Sprintf("%v", this.Domain), "DomainInfo", "DomainInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Offer_Operation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Offer_Operation{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Launch:` + strings.Replace(fmt.Sprintf("%v", this.Launch), "Offer_Operation_Launch", "Offer_Operation_Launch", 1) + `,`, + `Reserve:` + strings.Replace(fmt.Sprintf("%v", this.Reserve), "Offer_Operation_Reserve", "Offer_Operation_Reserve", 1) + `,`, + `Unreserve:` + strings.Replace(fmt.Sprintf("%v", this.Unreserve), "Offer_Operation_Unreserve", "Offer_Operation_Unreserve", 1) + `,`, + `Create:` + strings.Replace(fmt.Sprintf("%v", this.Create), "Offer_Operation_Create", "Offer_Operation_Create", 1) + `,`, + `Destroy:` + strings.Replace(fmt.Sprintf("%v", this.Destroy), "Offer_Operation_Destroy", "Offer_Operation_Destroy", 1) + `,`, + `LaunchGroup:` + strings.Replace(fmt.Sprintf("%v", this.LaunchGroup), "Offer_Operation_LaunchGroup", "Offer_Operation_LaunchGroup", 1) + `,`, + `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "OperationID", "OperationID", 1) + `,`, + `GrowVolume:` + strings.Replace(fmt.Sprintf("%v", this.GrowVolume), "Offer_Operation_GrowVolume", "Offer_Operation_GrowVolume", 1) + `,`, + `ShrinkVolume:` + strings.Replace(fmt.Sprintf("%v", this.ShrinkVolume), "Offer_Operation_ShrinkVolume", "Offer_Operation_ShrinkVolume", 1) + `,`, + `CreateDisk:` + strings.Replace(fmt.Sprintf("%v", this.CreateDisk), "Offer_Operation_CreateDisk", "Offer_Operation_CreateDisk", 1) + `,`, + `DestroyDisk:` + strings.Replace(fmt.Sprintf("%v", this.DestroyDisk), "Offer_Operation_DestroyDisk", "Offer_Operation_DestroyDisk", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Offer_Operation_Launch) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Offer_Operation_Launch{`, + `TaskInfos:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TaskInfos), "TaskInfo", "TaskInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Offer_Operation_LaunchGroup) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Offer_Operation_LaunchGroup{`, + `Executor:` + strings.Replace(strings.Replace(this.Executor.String(), "ExecutorInfo", "ExecutorInfo", 1), `&`, ``, 1) + `,`, + `TaskGroup:` + strings.Replace(strings.Replace(this.TaskGroup.String(), "TaskGroupInfo", "TaskGroupInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Offer_Operation_Reserve) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Offer_Operation_Reserve{`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Offer_Operation_Unreserve) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Offer_Operation_Unreserve{`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Offer_Operation_Create) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Offer_Operation_Create{`, + `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Offer_Operation_Destroy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Offer_Operation_Destroy{`, + `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Offer_Operation_GrowVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Offer_Operation_GrowVolume{`, + `Volume:` + strings.Replace(strings.Replace(this.Volume.String(), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `Addition:` + strings.Replace(strings.Replace(this.Addition.String(), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Offer_Operation_ShrinkVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Offer_Operation_ShrinkVolume{`, + `Volume:` + strings.Replace(strings.Replace(this.Volume.String(), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `Subtract:` + strings.Replace(strings.Replace(this.Subtract.String(), "Value_Scalar", "Value_Scalar", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Offer_Operation_CreateDisk) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Offer_Operation_CreateDisk{`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `TargetType:` + fmt.Sprintf("%v", this.TargetType) + `,`, + `}`, + }, "") + return s +} +func (this *Offer_Operation_DestroyDisk) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Offer_Operation_DestroyDisk{`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *InverseOffer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InverseOffer{`, + `OfferID:` + strings.Replace(strings.Replace(this.OfferID.String(), "OfferID", "OfferID", 1), `&`, ``, 1) + `,`, + `URL:` + strings.Replace(fmt.Sprintf("%v", this.URL), "URL", "URL", 1) + `,`, + `FrameworkID:` + strings.Replace(strings.Replace(this.FrameworkID.String(), "FrameworkID", "FrameworkID", 1), `&`, ``, 1) + `,`, + `AgentID:` + strings.Replace(fmt.Sprintf("%v", this.AgentID), "AgentID", "AgentID", 1) + `,`, + `Unavailability:` + strings.Replace(strings.Replace(this.Unavailability.String(), "Unavailability", "Unavailability", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskInfo{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TaskID:` + strings.Replace(strings.Replace(this.TaskID.String(), "TaskID", "TaskID", 1), `&`, ``, 1) + `,`, + `AgentID:` + strings.Replace(strings.Replace(this.AgentID.String(), "AgentID", "AgentID", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `Executor:` + strings.Replace(fmt.Sprintf("%v", this.Executor), "ExecutorInfo", "ExecutorInfo", 1) + `,`, + `Data:` + valueToStringMesos(this.Data) + `,`, + `Command:` + strings.Replace(fmt.Sprintf("%v", this.Command), "CommandInfo", "CommandInfo", 1) + `,`, + `HealthCheck:` + strings.Replace(fmt.Sprintf("%v", this.HealthCheck), "HealthCheck", "HealthCheck", 1) + `,`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerInfo", "ContainerInfo", 1) + `,`, + `Labels:` + strings.Replace(fmt.Sprintf("%v", this.Labels), "Labels", "Labels", 1) + `,`, + `Discovery:` + strings.Replace(fmt.Sprintf("%v", this.Discovery), "DiscoveryInfo", "DiscoveryInfo", 1) + `,`, + `KillPolicy:` + strings.Replace(fmt.Sprintf("%v", this.KillPolicy), "KillPolicy", "KillPolicy", 1) + `,`, + `Check:` + strings.Replace(fmt.Sprintf("%v", this.Check), "CheckInfo", "CheckInfo", 1) + `,`, + `MaxCompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.MaxCompletionTime), "DurationInfo", "DurationInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskGroupInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskGroupInfo{`, + `Tasks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tasks), "TaskInfo", "TaskInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Task{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TaskID:` + strings.Replace(strings.Replace(this.TaskID.String(), "TaskID", "TaskID", 1), `&`, ``, 1) + `,`, + `FrameworkID:` + strings.Replace(strings.Replace(this.FrameworkID.String(), "FrameworkID", "FrameworkID", 1), `&`, ``, 1) + `,`, + `ExecutorID:` + strings.Replace(fmt.Sprintf("%v", this.ExecutorID), "ExecutorID", "ExecutorID", 1) + `,`, + `AgentID:` + strings.Replace(strings.Replace(this.AgentID.String(), "AgentID", "AgentID", 1), `&`, ``, 1) + `,`, + `State:` + valueToStringMesos(this.State) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `Statuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Statuses), "TaskStatus", "TaskStatus", 1), `&`, ``, 1) + `,`, + `StatusUpdateState:` + valueToStringMesos(this.StatusUpdateState) + `,`, + `StatusUpdateUUID:` + valueToStringMesos(this.StatusUpdateUUID) + `,`, + `Labels:` + strings.Replace(fmt.Sprintf("%v", this.Labels), "Labels", "Labels", 1) + `,`, + `Discovery:` + strings.Replace(fmt.Sprintf("%v", this.Discovery), "DiscoveryInfo", "DiscoveryInfo", 1) + `,`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerInfo", "ContainerInfo", 1) + `,`, + `User:` + valueToStringMesos(this.User) + `,`, + `}`, + }, "") + return s +} +func (this *TaskResourceLimitation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskResourceLimitation{`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *UUID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UUID{`, + `Value:` + valueToStringMesos(this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Operation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Operation{`, + `FrameworkID:` + strings.Replace(fmt.Sprintf("%v", this.FrameworkID), "FrameworkID", "FrameworkID", 1) + `,`, + `AgentID:` + strings.Replace(fmt.Sprintf("%v", this.AgentID), "AgentID", "AgentID", 1) + `,`, + `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Offer_Operation", "Offer_Operation", 1), `&`, ``, 1) + `,`, + `LatestStatus:` + strings.Replace(strings.Replace(this.LatestStatus.String(), "OperationStatus", "OperationStatus", 1), `&`, ``, 1) + `,`, + `Statuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Statuses), "OperationStatus", "OperationStatus", 1), `&`, ``, 1) + `,`, + `UUID:` + strings.Replace(strings.Replace(this.UUID.String(), "UUID", "UUID", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *OperationStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OperationStatus{`, + `OperationID:` + strings.Replace(fmt.Sprintf("%v", this.OperationID), "OperationID", "OperationID", 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Message:` + valueToStringMesos(this.Message) + `,`, + `ConvertedResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ConvertedResources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `UUID:` + strings.Replace(fmt.Sprintf("%v", this.UUID), "UUID", "UUID", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CheckStatusInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckStatusInfo{`, + `Type:` + valueToStringMesos(this.Type) + `,`, + `Command:` + strings.Replace(fmt.Sprintf("%v", this.Command), "CheckStatusInfo_Command", "CheckStatusInfo_Command", 1) + `,`, + `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "CheckStatusInfo_Http", "CheckStatusInfo_Http", 1) + `,`, + `TCP:` + strings.Replace(fmt.Sprintf("%v", this.TCP), "CheckStatusInfo_Tcp", "CheckStatusInfo_Tcp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CheckStatusInfo_Command) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckStatusInfo_Command{`, + `ExitCode:` + valueToStringMesos(this.ExitCode) + `,`, + `}`, + }, "") + return s +} +func (this *CheckStatusInfo_Http) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckStatusInfo_Http{`, + `StatusCode:` + valueToStringMesos(this.StatusCode) + `,`, + `}`, + }, "") + return s +} +func (this *CheckStatusInfo_Tcp) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckStatusInfo_Tcp{`, + `Succeeded:` + valueToStringMesos(this.Succeeded) + `,`, + `}`, + }, "") + return s +} +func (this *TaskStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskStatus{`, + `TaskID:` + strings.Replace(strings.Replace(this.TaskID.String(), "TaskID", "TaskID", 1), `&`, ``, 1) + `,`, + `State:` + valueToStringMesos(this.State) + `,`, + `Data:` + valueToStringMesos(this.Data) + `,`, + `Message:` + valueToStringMesos(this.Message) + `,`, + `AgentID:` + strings.Replace(fmt.Sprintf("%v", this.AgentID), "AgentID", "AgentID", 1) + `,`, + `Timestamp:` + valueToStringMesos(this.Timestamp) + `,`, + `ExecutorID:` + strings.Replace(fmt.Sprintf("%v", this.ExecutorID), "ExecutorID", "ExecutorID", 1) + `,`, + `Healthy:` + valueToStringMesos(this.Healthy) + `,`, + `Source:` + valueToStringMesos(this.Source) + `,`, + `Reason:` + valueToStringMesos(this.Reason) + `,`, + `UUID:` + valueToStringMesos(this.UUID) + `,`, + `Labels:` + strings.Replace(fmt.Sprintf("%v", this.Labels), "Labels", "Labels", 1) + `,`, + `ContainerStatus:` + strings.Replace(fmt.Sprintf("%v", this.ContainerStatus), "ContainerStatus", "ContainerStatus", 1) + `,`, + `UnreachableTime:` + strings.Replace(fmt.Sprintf("%v", this.UnreachableTime), "TimeInfo", "TimeInfo", 1) + `,`, + `CheckStatus:` + strings.Replace(fmt.Sprintf("%v", this.CheckStatus), "CheckStatusInfo", "CheckStatusInfo", 1) + `,`, + `Limitation:` + strings.Replace(fmt.Sprintf("%v", this.Limitation), "TaskResourceLimitation", "TaskResourceLimitation", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Filters) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Filters{`, + `RefuseSeconds:` + valueToStringMesos(this.RefuseSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *Environment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Environment{`, + `Variables:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Variables), "Environment_Variable", "Environment_Variable", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Environment_Variable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Environment_Variable{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + valueToStringMesos(this.Value) + `,`, + `Type:` + valueToStringMesos(this.Type) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Parameter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Parameter{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Parameters) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Parameters{`, + `Parameter:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameter), "Parameter", "Parameter", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Credential) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Credential{`, + `Principal:` + fmt.Sprintf("%v", this.Principal) + `,`, + `Secret:` + valueToStringMesos(this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *Credentials) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Credentials{`, + `Credentials:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Credentials), "Credential", "Credential", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Secret{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Reference:` + strings.Replace(fmt.Sprintf("%v", this.Reference), "Secret_Reference", "Secret_Reference", 1) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Secret_Value", "Secret_Value", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Secret_Reference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Secret_Reference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Key:` + valueToStringMesos(this.Key) + `,`, + `}`, + }, "") + return s +} +func (this *Secret_Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Secret_Value{`, + `Data:` + valueToStringMesos(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *RateLimit) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RateLimit{`, + `QPS:` + valueToStringMesos(this.QPS) + `,`, + `Principal:` + fmt.Sprintf("%v", this.Principal) + `,`, + `Capacity:` + valueToStringMesos(this.Capacity) + `,`, + `}`, + }, "") + return s +} +func (this *RateLimits) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RateLimits{`, + `Limits:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Limits), "RateLimit", "RateLimit", 1), `&`, ``, 1) + `,`, + `AggregateDefaultQPS:` + valueToStringMesos(this.AggregateDefaultQPS) + `,`, + `AggregateDefaultCapacity:` + valueToStringMesos(this.AggregateDefaultCapacity) + `,`, + `}`, + }, "") + return s +} +func (this *Image) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Image{`, + `Type:` + valueToStringMesos(this.Type) + `,`, + `Appc:` + strings.Replace(fmt.Sprintf("%v", this.Appc), "Image_Appc", "Image_Appc", 1) + `,`, + `Docker:` + strings.Replace(fmt.Sprintf("%v", this.Docker), "Image_Docker", "Image_Docker", 1) + `,`, + `Cached:` + valueToStringMesos(this.Cached) + `,`, + `}`, + }, "") + return s +} +func (this *Image_Appc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Image_Appc{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ID:` + valueToStringMesos(this.ID) + `,`, + `Labels:` + strings.Replace(fmt.Sprintf("%v", this.Labels), "Labels", "Labels", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Image_Docker) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Image_Docker{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Credential:` + strings.Replace(fmt.Sprintf("%v", this.Credential), "Credential", "Credential", 1) + `,`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MountPropagation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MountPropagation{`, + `Mode:` + valueToStringMesos(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *Volume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume{`, + `ContainerPath:` + fmt.Sprintf("%v", this.ContainerPath) + `,`, + `HostPath:` + valueToStringMesos(this.HostPath) + `,`, + `Mode:` + valueToStringMesos(this.Mode) + `,`, + `Image:` + strings.Replace(fmt.Sprintf("%v", this.Image), "Image", "Image", 1) + `,`, + `Source:` + strings.Replace(fmt.Sprintf("%v", this.Source), "Volume_Source", "Volume_Source", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Volume_Source) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume_Source{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `DockerVolume:` + strings.Replace(fmt.Sprintf("%v", this.DockerVolume), "Volume_Source_DockerVolume", "Volume_Source_DockerVolume", 1) + `,`, + `SandboxPath:` + strings.Replace(fmt.Sprintf("%v", this.SandboxPath), "Volume_Source_SandboxPath", "Volume_Source_SandboxPath", 1) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "Volume_Source_HostPath", "Volume_Source_HostPath", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Volume_Source_DockerVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume_Source_DockerVolume{`, + `Driver:` + valueToStringMesos(this.Driver) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DriverOptions:` + strings.Replace(fmt.Sprintf("%v", this.DriverOptions), "Parameters", "Parameters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Volume_Source_HostPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume_Source_HostPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `MountPropagation:` + strings.Replace(fmt.Sprintf("%v", this.MountPropagation), "MountPropagation", "MountPropagation", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Volume_Source_SandboxPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume_Source_SandboxPath{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkInfo{`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Labels:` + strings.Replace(fmt.Sprintf("%v", this.Labels), "Labels", "Labels", 1) + `,`, + `IPAddresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.IPAddresses), "NetworkInfo_IPAddress", "NetworkInfo_IPAddress", 1), `&`, ``, 1) + `,`, + `Name:` + valueToStringMesos(this.Name) + `,`, + `PortMappings:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PortMappings), "NetworkInfo_PortMapping", "NetworkInfo_PortMapping", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkInfo_IPAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkInfo_IPAddress{`, + `Protocol:` + valueToStringMesos(this.Protocol) + `,`, + `IPAddress:` + valueToStringMesos(this.IPAddress) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkInfo_PortMapping) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkInfo_PortMapping{`, + `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, + `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, + `Protocol:` + valueToStringMesos(this.Protocol) + `,`, + `}`, + }, "") + return s +} +func (this *CapabilityInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CapabilityInfo{`, + `Capabilities:` + fmt.Sprintf("%v", this.Capabilities) + `,`, + `}`, + }, "") + return s +} +func (this *LinuxInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LinuxInfo{`, + `CapabilityInfo:` + strings.Replace(fmt.Sprintf("%v", this.CapabilityInfo), "CapabilityInfo", "CapabilityInfo", 1) + `,`, + `BoundingCapabilities:` + strings.Replace(fmt.Sprintf("%v", this.BoundingCapabilities), "CapabilityInfo", "CapabilityInfo", 1) + `,`, + `EffectiveCapabilities:` + strings.Replace(fmt.Sprintf("%v", this.EffectiveCapabilities), "CapabilityInfo", "CapabilityInfo", 1) + `,`, + `SharePIDNamespace:` + valueToStringMesos(this.SharePIDNamespace) + `,`, + `}`, + }, "") + return s +} +func (this *RLimitInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RLimitInfo{`, + `Rlimits:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rlimits), "RLimitInfo_RLimit", "RLimitInfo_RLimit", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RLimitInfo_RLimit) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RLimitInfo_RLimit{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Hard:` + valueToStringMesos(this.Hard) + `,`, + `Soft:` + valueToStringMesos(this.Soft) + `,`, + `}`, + }, "") + return s +} +func (this *TTYInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TTYInfo{`, + `WindowSize:` + strings.Replace(fmt.Sprintf("%v", this.WindowSize), "TTYInfo_WindowSize", "TTYInfo_WindowSize", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TTYInfo_WindowSize) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TTYInfo_WindowSize{`, + `Rows:` + fmt.Sprintf("%v", this.Rows) + `,`, + `Columns:` + fmt.Sprintf("%v", this.Columns) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerInfo{`, + `Type:` + valueToStringMesos(this.Type) + `,`, + `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "Volume", 1), `&`, ``, 1) + `,`, + `Docker:` + strings.Replace(fmt.Sprintf("%v", this.Docker), "ContainerInfo_DockerInfo", "ContainerInfo_DockerInfo", 1) + `,`, + `Hostname:` + valueToStringMesos(this.Hostname) + `,`, + `Mesos:` + strings.Replace(fmt.Sprintf("%v", this.Mesos), "ContainerInfo_MesosInfo", "ContainerInfo_MesosInfo", 1) + `,`, + `NetworkInfos:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NetworkInfos), "NetworkInfo", "NetworkInfo", 1), `&`, ``, 1) + `,`, + `LinuxInfo:` + strings.Replace(fmt.Sprintf("%v", this.LinuxInfo), "LinuxInfo", "LinuxInfo", 1) + `,`, + `RlimitInfo:` + strings.Replace(fmt.Sprintf("%v", this.RlimitInfo), "RLimitInfo", "RLimitInfo", 1) + `,`, + `TTYInfo:` + strings.Replace(fmt.Sprintf("%v", this.TTYInfo), "TTYInfo", "TTYInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerInfo_DockerInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerInfo_DockerInfo{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Network:` + valueToStringMesos(this.Network) + `,`, + `PortMappings:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PortMappings), "ContainerInfo_DockerInfo_PortMapping", "ContainerInfo_DockerInfo_PortMapping", 1), `&`, ``, 1) + `,`, + `Privileged:` + valueToStringMesos(this.Privileged) + `,`, + `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "Parameter", "Parameter", 1), `&`, ``, 1) + `,`, + `ForcePullImage:` + valueToStringMesos(this.ForcePullImage) + `,`, + `VolumeDriver:` + valueToStringMesos(this.VolumeDriver) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerInfo_DockerInfo_PortMapping) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerInfo_DockerInfo_PortMapping{`, + `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, + `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, + `Protocol:` + valueToStringMesos(this.Protocol) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerInfo_MesosInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerInfo_MesosInfo{`, + `Image:` + strings.Replace(fmt.Sprintf("%v", this.Image), "Image", "Image", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatus{`, + `NetworkInfos:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NetworkInfos), "NetworkInfo", "NetworkInfo", 1), `&`, ``, 1) + `,`, + `CgroupInfo:` + strings.Replace(fmt.Sprintf("%v", this.CgroupInfo), "CgroupInfo", "CgroupInfo", 1) + `,`, + `ExecutorPID:` + valueToStringMesos(this.ExecutorPID) + `,`, + `ContainerID:` + strings.Replace(fmt.Sprintf("%v", this.ContainerID), "ContainerID", "ContainerID", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CgroupInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CgroupInfo{`, + `NetCLS:` + strings.Replace(fmt.Sprintf("%v", this.NetCLS), "CgroupInfo_NetCls", "CgroupInfo_NetCls", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CgroupInfo_Blkio) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CgroupInfo_Blkio{`, + `}`, + }, "") + return s +} +func (this *CgroupInfo_Blkio_Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CgroupInfo_Blkio_Value{`, + `Op:` + valueToStringMesos(this.Op) + `,`, + `Value:` + valueToStringMesos(this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *CgroupInfo_Blkio_CFQ) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CgroupInfo_Blkio_CFQ{`, + `}`, + }, "") + return s +} +func (this *CgroupInfo_Blkio_CFQ_Statistics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CgroupInfo_Blkio_CFQ_Statistics{`, + `Device:` + strings.Replace(fmt.Sprintf("%v", this.Device), "Device_Number", "Device_Number", 1) + `,`, + `Sectors:` + valueToStringMesos(this.Sectors) + `,`, + `Time:` + valueToStringMesos(this.Time) + `,`, + `IOServiced:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.IOServiced), "CgroupInfo_Blkio_Value", "CgroupInfo_Blkio_Value", 1), `&`, ``, 1) + `,`, + `IOServiceBytes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.IOServiceBytes), "CgroupInfo_Blkio_Value", "CgroupInfo_Blkio_Value", 1), `&`, ``, 1) + `,`, + `IOServiceTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.IOServiceTime), "CgroupInfo_Blkio_Value", "CgroupInfo_Blkio_Value", 1), `&`, ``, 1) + `,`, + `IOWaitTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.IOWaitTime), "CgroupInfo_Blkio_Value", "CgroupInfo_Blkio_Value", 1), `&`, ``, 1) + `,`, + `IOMerged:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.IOMerged), "CgroupInfo_Blkio_Value", "CgroupInfo_Blkio_Value", 1), `&`, ``, 1) + `,`, + `IOQueued:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.IOQueued), "CgroupInfo_Blkio_Value", "CgroupInfo_Blkio_Value", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CgroupInfo_Blkio_Throttling) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CgroupInfo_Blkio_Throttling{`, + `}`, + }, "") + return s +} +func (this *CgroupInfo_Blkio_Throttling_Statistics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CgroupInfo_Blkio_Throttling_Statistics{`, + `Device:` + strings.Replace(fmt.Sprintf("%v", this.Device), "Device_Number", "Device_Number", 1) + `,`, + `IOServiced:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.IOServiced), "CgroupInfo_Blkio_Value", "CgroupInfo_Blkio_Value", 1), `&`, ``, 1) + `,`, + `IOServiceBytes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.IOServiceBytes), "CgroupInfo_Blkio_Value", "CgroupInfo_Blkio_Value", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CgroupInfo_Blkio_Statistics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CgroupInfo_Blkio_Statistics{`, + `CFQ:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CFQ), "CgroupInfo_Blkio_CFQ_Statistics", "CgroupInfo_Blkio_CFQ_Statistics", 1), `&`, ``, 1) + `,`, + `CFQRecursive:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CFQRecursive), "CgroupInfo_Blkio_CFQ_Statistics", "CgroupInfo_Blkio_CFQ_Statistics", 1), `&`, ``, 1) + `,`, + `Throttling:` + strings.Replace(fmt.Sprintf("%v", this.Throttling), "CgroupInfo_Blkio_Throttling_Statistics", "CgroupInfo_Blkio_Throttling_Statistics", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CgroupInfo_NetCls) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CgroupInfo_NetCls{`, + `ClassID:` + valueToStringMesos(this.ClassID) + `,`, + `}`, + }, "") + return s +} +func (this *Labels) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Labels{`, + `Labels:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Labels), "Label", "Label", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Label) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Label{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + valueToStringMesos(this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Port) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Port{`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Name:` + valueToStringMesos(this.Name) + `,`, + `Protocol:` + valueToStringMesos(this.Protocol) + `,`, + `Visibility:` + valueToStringMesos(this.Visibility) + `,`, + `Labels:` + strings.Replace(fmt.Sprintf("%v", this.Labels), "Labels", "Labels", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Ports) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Ports{`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "Port", "Port", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DiscoveryInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DiscoveryInfo{`, + `Visibility:` + fmt.Sprintf("%v", this.Visibility) + `,`, + `Name:` + valueToStringMesos(this.Name) + `,`, + `Environment:` + valueToStringMesos(this.Environment) + `,`, + `Location:` + valueToStringMesos(this.Location) + `,`, + `Version:` + valueToStringMesos(this.Version) + `,`, + `Ports:` + strings.Replace(fmt.Sprintf("%v", this.Ports), "Ports", "Ports", 1) + `,`, + `Labels:` + strings.Replace(fmt.Sprintf("%v", this.Labels), "Labels", "Labels", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WeightInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WeightInfo{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `Role:` + valueToStringMesos(this.Role) + `,`, + `}`, + }, "") + return s +} +func (this *VersionInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VersionInfo{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `BuildDate:` + valueToStringMesos(this.BuildDate) + `,`, + `BuildTime:` + valueToStringMesos(this.BuildTime) + `,`, + `BuildUser:` + valueToStringMesos(this.BuildUser) + `,`, + `GitSHA:` + valueToStringMesos(this.GitSHA) + `,`, + `GitBranch:` + valueToStringMesos(this.GitBranch) + `,`, + `GitTag:` + valueToStringMesos(this.GitTag) + `,`, + `}`, + }, "") + return s +} +func (this *Flag) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Flag{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + valueToStringMesos(this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Role) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Role{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `Frameworks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Frameworks), "FrameworkID", "FrameworkID", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Metric) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Metric{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + valueToStringMesos(this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *FileInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FileInfo{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Nlink:` + valueToStringMesos(this.Nlink) + `,`, + `Size:` + valueToStringMesos(this.Size) + `,`, + `Mtime:` + strings.Replace(fmt.Sprintf("%v", this.Mtime), "TimeInfo", "TimeInfo", 1) + `,`, + `Mode:` + valueToStringMesos(this.Mode) + `,`, + `UID:` + valueToStringMesos(this.UID) + `,`, + `GID:` + valueToStringMesos(this.GID) + `,`, + `}`, + }, "") + return s +} +func (this *Device) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Device{`, + `Path:` + valueToStringMesos(this.Path) + `,`, + `Number:` + strings.Replace(fmt.Sprintf("%v", this.Number), "Device_Number", "Device_Number", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Device_Number) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Device_Number{`, + `MajorNumber:` + valueToStringMesos(this.MajorNumber) + `,`, + `MinorNumber:` + valueToStringMesos(this.MinorNumber) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAccess) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAccess{`, + `Device:` + strings.Replace(strings.Replace(this.Device.String(), "Device", "Device", 1), `&`, ``, 1) + `,`, + `Access:` + strings.Replace(strings.Replace(this.Access.String(), "DeviceAccess_Access", "DeviceAccess_Access", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAccess_Access) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAccess_Access{`, + `Read:` + valueToStringMesos(this.Read) + `,`, + `Write:` + valueToStringMesos(this.Write) + `,`, + `Mknod:` + valueToStringMesos(this.Mknod) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceWhitelist) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceWhitelist{`, + `AllowedDevices:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedDevices), "DeviceAccess", "DeviceAccess", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringMesos(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FrameworkID) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FrameworkID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FrameworkID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OfferID) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OfferID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OfferID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AgentID) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AgentID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AgentID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskID) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecutorID) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecutorID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecutorID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerID) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parent == nil { + m.Parent = &ContainerID{} + } + if err := m.Parent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceProviderID) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceProviderID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceProviderID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OperationID) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OperationID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OperationID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanoseconds", wireType) + } + m.Nanoseconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanoseconds |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("nanoseconds") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DurationInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DurationInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DurationInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanoseconds", wireType) + } + m.Nanoseconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanoseconds |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("nanoseconds") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Address) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Address: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Address: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Hostname = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.IP = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("port") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *URL) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: URL: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: URL: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheme", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scheme = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Address.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = append(m.Query, Parameter{}) + if err := m.Query[len(m.Query)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Fragment = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("scheme") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("address") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Unavailability) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Unavailability: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Unavailability: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Start.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Duration == nil { + m.Duration = &DurationInfo{} + } + if err := m.Duration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("start") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MachineID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MachineID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MachineID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Hostname = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.IP = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MachineInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MachineInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MachineInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v MachineInfo_Mode + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (MachineInfo_Mode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unavailability", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Unavailability == nil { + m.Unavailability = &Unavailability{} + } + if err := m.Unavailability.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FrameworkInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FrameworkInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FrameworkInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ID == nil { + m.ID = &FrameworkID{} + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field FailoverTimeout", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.FailoverTimeout = &v2 + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Checkpoint = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Role = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Hostname = &s + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Principal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Principal = &s + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WebUiURL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.WebUiURL = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Capabilities = append(m.Capabilities, FrameworkInfo_Capability{}) + if err := m.Capabilities[len(m.Capabilities)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = &Labels{} + } + if err := m.Labels.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("user") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FrameworkInfo_Capability) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Capability: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Capability: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (FrameworkInfo_Capability_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (CheckInfo_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Command == nil { + m.Command = &CheckInfo_Command{} + } + if err := m.Command.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTP == nil { + m.HTTP = &CheckInfo_Http{} + } + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field DelaySeconds", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.DelaySeconds = &v2 + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field IntervalSeconds", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.IntervalSeconds = &v2 + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.TimeoutSeconds = &v2 + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TCP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TCP == nil { + m.TCP = &CheckInfo_Tcp{} + } + if err := m.TCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckInfo_Command) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Command: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Command: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Command.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("command") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckInfo_Http) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Http: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Http: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("port") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckInfo_Tcp) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Tcp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Tcp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("port") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HealthCheck) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthCheck: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthCheck: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTP == nil { + m.HTTP = &HealthCheck_HTTPCheckInfo{} + } + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field DelaySeconds", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.DelaySeconds = &v2 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field IntervalSeconds", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.IntervalSeconds = &v2 + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.TimeoutSeconds = &v2 + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsecutiveFailures", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ConsecutiveFailures = &v + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.GracePeriodSeconds = &v2 + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Command == nil { + m.Command = &CommandInfo{} + } + if err := m.Command.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (HealthCheck_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TCP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TCP == nil { + m.TCP = &HealthCheck_TCPCheckInfo{} + } + if err := m.TCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HealthCheck_HTTPCheckInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPCheckInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPCheckInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheme", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Scheme = &s + iNdEx = postIndex + case 4: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Statuses = append(m.Statuses, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Statuses = append(m.Statuses, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var v NetworkInfo_Protocol + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NetworkInfo_Protocol(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Protocol = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("port") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HealthCheck_TCPCheckInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TCPCheckInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TCPCheckInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var v NetworkInfo_Protocol + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NetworkInfo_Protocol(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Protocol = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("port") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KillPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KillPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KillPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GracePeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GracePeriod == nil { + m.GracePeriod = &DurationInfo{} + } + if err := m.GracePeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommandInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommandInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommandInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URIs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URIs = append(m.URIs, CommandInfo_URI{}) + if err := m.URIs[len(m.URIs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Environment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Environment == nil { + m.Environment = &Environment{} + } + if err := m.Environment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.User = &s + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Shell", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Shell = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Arguments = append(m.Arguments, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommandInfo_URI) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: URI: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: URI: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Executable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Executable = &b + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Extract", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Extract = &b + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Cache = &b + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OutputFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.OutputFile = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecutorInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecutorInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecutorInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecutorID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ExecutorID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Command == nil { + m.Command = &CommandInfo{} + } + if err := m.Command.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrameworkID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FrameworkID == nil { + m.FrameworkID = &FrameworkID{} + } + if err := m.FrameworkID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Source = &s + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Container == nil { + m.Container = &ContainerInfo{} + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Discovery", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Discovery == nil { + m.Discovery = &DiscoveryInfo{} + } + if err := m.Discovery.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShutdownGracePeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ShutdownGracePeriod == nil { + m.ShutdownGracePeriod = &DurationInfo{} + } + if err := m.ShutdownGracePeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = &Labels{} + } + if err := m.Labels.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ExecutorInfo_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("executor_id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DomainInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DomainInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DomainInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FaultDomain", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FaultDomain == nil { + m.FaultDomain = &DomainInfo_FaultDomain{} + } + if err := m.FaultDomain.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DomainInfo_FaultDomain) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FaultDomain: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FaultDomain: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Region.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Zone.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("region") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("zone") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DomainInfo_FaultDomain_RegionInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RegionInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RegionInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DomainInfo_FaultDomain_ZoneInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ZoneInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ZoneInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MasterInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MasterInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MasterInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + m.IP = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IP |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + hasFields[0] |= uint64(0x00000004) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PID = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Hostname = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Version = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Address == nil { + m.Address = &Address{} + } + if err := m.Address.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Domain == nil { + m.Domain = &DomainInfo{} + } + if err := m.Domain.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Capabilities = append(m.Capabilities, MasterInfo_Capability{}) + if err := m.Capabilities[len(m.Capabilities)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("id") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("ip") + } + if hasFields[0]&uint64(0x00000004) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("port") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MasterInfo_Capability) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Capability: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Capability: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (MasterInfo_Capability_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AgentInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AgentInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AgentInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ID == nil { + m.ID = &AgentID{} + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Domain == nil { + m.Domain = &DomainInfo{} + } + if err := m.Domain.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("hostname") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AgentInfo_Capability) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Capability: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Capability: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (AgentInfo_Capability_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIPluginContainerInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIPluginContainerInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIPluginContainerInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v CSIPluginContainerInfo_Service + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (CSIPluginContainerInfo_Service(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Services = append(m.Services, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v CSIPluginContainerInfo_Service + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (CSIPluginContainerInfo_Service(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Services = append(m.Services, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Command == nil { + m.Command = &CommandInfo{} + } + if err := m.Command.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Container == nil { + m.Container = &ContainerInfo{} + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIPluginInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIPluginInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIPluginInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, CSIPluginContainerInfo{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceProviderInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceProviderInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceProviderInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ID == nil { + m.ID = &ResourceProviderID{} + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultReservations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultReservations = append(m.DefaultReservations, Resource_ReservationInfo{}) + if err := m.DefaultReservations[len(m.DefaultReservations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Storage == nil { + m.Storage = &ResourceProviderInfo_Storage{} + } + if err := m.Storage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceProviderInfo_Storage) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Storage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Storage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plugin", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Plugin.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("plugin") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Value_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scalar", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scalar == nil { + m.Scalar = &Value_Scalar{} + } + if err := m.Scalar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ranges == nil { + m.Ranges = &Value_Ranges{} + } + if err := m.Ranges.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Set", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Set == nil { + m.Set = &Value_Set{} + } + if err := m.Set.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Text", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Text == nil { + m.Text = &Value_Text{} + } + if err := m.Text.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value_Scalar) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scalar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scalar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + m.Value = float64(math.Float64frombits(v)) + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value_Range) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Range: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Range: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Begin", wireType) + } + m.Begin = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Begin |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + m.End = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.End |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("begin") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("end") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value_Ranges) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ranges: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ranges: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Range = append(m.Range, Value_Range{}) + if err := m.Range[len(m.Range)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value_Set) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Set: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Set: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Item", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Item = append(m.Item, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value_Text) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Text: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Text: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Attribute) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Attribute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Attribute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Value_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scalar", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scalar == nil { + m.Scalar = &Value_Scalar{} + } + if err := m.Scalar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ranges == nil { + m.Ranges = &Value_Ranges{} + } + if err := m.Ranges.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Text", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Text == nil { + m.Text = &Value_Text{} + } + if err := m.Text.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Set", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Set == nil { + m.Set = &Value_Set{} + } + if err := m.Set.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resource) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Resource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var v Value_Type + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (Value_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Type = &v + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scalar", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scalar == nil { + m.Scalar = &Value_Scalar{} + } + if err := m.Scalar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ranges == nil { + m.Ranges = &Value_Ranges{} + } + if err := m.Ranges.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Set", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Set == nil { + m.Set = &Value_Set{} + } + if err := m.Set.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Role = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Disk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Disk == nil { + m.Disk = &Resource_DiskInfo{} + } + if err := m.Disk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reservation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Reservation == nil { + m.Reservation = &Resource_ReservationInfo{} + } + if err := m.Reservation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revocable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revocable == nil { + m.Revocable = &Resource_RevocableInfo{} + } + if err := m.Revocable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shared", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Shared == nil { + m.Shared = &Resource_SharedInfo{} + } + if err := m.Shared.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllocationInfo == nil { + m.AllocationInfo = &Resource_AllocationInfo{} + } + if err := m.AllocationInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderID == nil { + m.ProviderID = &ResourceProviderID{} + } + if err := m.ProviderID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reservations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reservations = append(m.Reservations, Resource_ReservationInfo{}) + if err := m.Reservations[len(m.Reservations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resource_AllocationInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocationInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocationInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Role = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resource_ReservationInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReservationInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReservationInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Principal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Principal = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = &Labels{} + } + if err := m.Labels.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Role = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var v Resource_ReservationInfo_Type + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (Resource_ReservationInfo_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Type = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resource_DiskInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiskInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiskInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Persistence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Persistence == nil { + m.Persistence = &Resource_DiskInfo_Persistence{} + } + if err := m.Persistence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Volume == nil { + m.Volume = &Volume{} + } + if err := m.Volume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Source == nil { + m.Source = &Resource_DiskInfo_Source{} + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resource_DiskInfo_Persistence) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Persistence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Persistence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Principal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Principal = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resource_DiskInfo_Source) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Source: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Source: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Resource_DiskInfo_Source_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Path == nil { + m.Path = &Resource_DiskInfo_Source_Path{} + } + if err := m.Path.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Mount == nil { + m.Mount = &Resource_DiskInfo_Source_Mount{} + } + if err := m.Mount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ID = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &Labels{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Profile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Profile = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resource_DiskInfo_Source_Path) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Path: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Path: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Root = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resource_DiskInfo_Source_Mount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Root = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resource_RevocableInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RevocableInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RevocableInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resource_SharedInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SharedInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SharedInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TrafficControlStatistics) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TrafficControlStatistics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TrafficControlStatistics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Backlog", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Backlog = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Bytes = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Drops", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Drops = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Overlimits", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Overlimits = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Packets", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Packets = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Qlen", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Qlen = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RateBPS", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RateBPS = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RatePPS", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RatePPS = &v + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Requeues", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Requeues = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IpStatistics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IpStatistics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IpStatistics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Forwarding", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Forwarding = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultTTL", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultTTL = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InReceives", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InReceives = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InHdrErrors", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InHdrErrors = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InAddrErrors", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InAddrErrors = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForwDatagrams", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ForwDatagrams = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InUnknownProtos", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InUnknownProtos = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InDiscards", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InDiscards = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InDelivers", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InDelivers = &v + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutRequests", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutRequests = &v + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutDiscards", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutDiscards = &v + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutNoRoutes", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutNoRoutes = &v + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReasmTimeout", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReasmTimeout = &v + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReasmReqds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReasmReqds = &v + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReasmOKs", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReasmOKs = &v + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReasmFails", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReasmFails = &v + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FragOKs", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FragOKs = &v + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FragFails", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FragFails = &v + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FragCreates", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FragCreates = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IcmpStatistics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IcmpStatistics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IcmpStatistics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InMsgs", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InMsgs = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InErrors", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InErrors = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InCsumErrors", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InCsumErrors = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InDestUnreachs", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InDestUnreachs = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InTimeExcds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InTimeExcds = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InParmProbs", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InParmProbs = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InSrcQuenchs", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InSrcQuenchs = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InRedirects", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InRedirects = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InEchos", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InEchos = &v + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InEchoReps", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InEchoReps = &v + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InTimestamps", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InTimestamps = &v + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InTimestampReps", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InTimestampReps = &v + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InAddrMasks", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InAddrMasks = &v + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InAddrMaskReps", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InAddrMaskReps = &v + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutMsgs", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutMsgs = &v + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutErrors", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutErrors = &v + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutDestUnreachs", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutDestUnreachs = &v + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutTimeExcds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutTimeExcds = &v + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutParmProbs", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutParmProbs = &v + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutSrcQuenchs", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutSrcQuenchs = &v + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutRedirects", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutRedirects = &v + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutEchos", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutEchos = &v + case 23: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutEchoReps", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutEchoReps = &v + case 24: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutTimestamps", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutTimestamps = &v + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutTimestampReps", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutTimestampReps = &v + case 26: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutAddrMasks", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutAddrMasks = &v + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutAddrMaskReps", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutAddrMaskReps = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TcpStatistics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TcpStatistics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TcpStatistics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RtoAlgorithm", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RtoAlgorithm = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RtoMin", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RtoMin = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RtoMax", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RtoMax = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxConn", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxConn = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveOpens", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveOpens = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PassiveOpens", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PassiveOpens = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AttemptFails", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.AttemptFails = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EstabResets", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.EstabResets = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrEstab", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrEstab = &v + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InSegs", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InSegs = &v + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutSegs", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutSegs = &v + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetransSegs", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RetransSegs = &v + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InErrs", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InErrs = &v + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutRsts", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutRsts = &v + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InCsumErrors", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InCsumErrors = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UdpStatistics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UdpStatistics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UdpStatistics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InDatagrams", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InDatagrams = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoPorts", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NoPorts = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InErrors", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InErrors = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OutDatagrams", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OutDatagrams = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RcvbufErrors", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RcvbufErrors = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SndbufErrors", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SndbufErrors = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InCsumErrors", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InCsumErrors = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoredMulti", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoredMulti = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SNMPStatistics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SNMPStatistics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SNMPStatistics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPStats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IPStats == nil { + m.IPStats = &IpStatistics{} + } + if err := m.IPStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ICMPStats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ICMPStats == nil { + m.ICMPStats = &IcmpStatistics{} + } + if err := m.ICMPStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TCPStats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TCPStats == nil { + m.TCPStats = &TcpStatistics{} + } + if err := m.TCPStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UDPStats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UDPStats == nil { + m.UDPStats = &UdpStatistics{} + } + if err := m.UDPStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiskStatistics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiskStatistics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiskStatistics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Source == nil { + m.Source = &Resource_DiskInfo_Source{} + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Persistence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Persistence == nil { + m.Persistence = &Resource_DiskInfo_Persistence{} + } + if err := m.Persistence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LimitBytes = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UsedBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.UsedBytes = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceStatistics) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceStatistics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceStatistics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + m.Timestamp = float64(math.Float64frombits(v)) + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CPUsUserTimeSecs", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.CPUsUserTimeSecs = &v2 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CPUsSystemTimeSecs", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.CPUsSystemTimeSecs = &v2 + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CPUsLimit", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.CPUsLimit = &v2 + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemRSSBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemRSSBytes = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemLimitBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemLimitBytes = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CPUsNrPeriods", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CPUsNrPeriods = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CPUsNrThrottled", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CPUsNrThrottled = &v + case 9: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CPUsThrottledTimeSecs", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.CPUsThrottledTimeSecs = &v2 + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemFileBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemFileBytes = &v + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemAnonBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemAnonBytes = &v + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemMappedFileBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemMappedFileBytes = &v + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Perf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Perf == nil { + m.Perf = &PerfStatistics{} + } + if err := m.Perf.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NetRxPackets", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NetRxPackets = &v + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NetRxBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NetRxBytes = &v + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NetRxErrors", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NetRxErrors = &v + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NetRxDropped", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NetRxDropped = &v + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NetTxPackets", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NetTxPackets = &v + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NetTxBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NetTxBytes = &v + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NetTxErrors", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NetTxErrors = &v + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NetTxDropped", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NetTxDropped = &v + case 22: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NetTCPRttMicrosecsP50", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.NetTCPRttMicrosecsP50 = &v2 + case 23: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NetTCPRttMicrosecsP90", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.NetTCPRttMicrosecsP90 = &v2 + case 24: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NetTCPRttMicrosecsP95", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.NetTCPRttMicrosecsP95 = &v2 + case 25: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NetTCPRttMicrosecsP99", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.NetTCPRttMicrosecsP99 = &v2 + case 26: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskLimitBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DiskLimitBytes = &v + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskUsedBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DiskUsedBytes = &v + case 28: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NetTCPActiveConnections", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.NetTCPActiveConnections = &v2 + case 29: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NetTCPTimeWaitConnections", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.NetTCPTimeWaitConnections = &v2 + case 30: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Processes", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Processes = &v + case 31: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Threads", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Threads = &v + case 32: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemLowPressureCounter", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemLowPressureCounter = &v + case 33: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemMediumPressureCounter", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemMediumPressureCounter = &v + case 34: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemCriticalPressureCounter", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemCriticalPressureCounter = &v + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetTrafficControlStatistics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetTrafficControlStatistics = append(m.NetTrafficControlStatistics, TrafficControlStatistics{}) + if err := m.NetTrafficControlStatistics[len(m.NetTrafficControlStatistics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 36: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemTotalBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemTotalBytes = &v + case 37: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemTotalMemswBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemTotalMemswBytes = &v + case 38: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemSoftLimitBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemSoftLimitBytes = &v + case 39: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemCacheBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemCacheBytes = &v + case 40: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemSwapBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemSwapBytes = &v + case 41: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemUnevictableBytes", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MemUnevictableBytes = &v + case 42: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetSNMPStatistics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NetSNMPStatistics == nil { + m.NetSNMPStatistics = &SNMPStatistics{} + } + if err := m.NetSNMPStatistics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 43: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskStatistics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DiskStatistics = append(m.DiskStatistics, DiskStatistics{}) + if err := m.DiskStatistics[len(m.DiskStatistics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 44: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlkioStatistics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlkioStatistics == nil { + m.BlkioStatistics = &CgroupInfo_Blkio_Statistics{} + } + if err := m.BlkioStatistics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("timestamp") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Executors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Executors = append(m.Executors, ResourceUsage_Executor{}) + if err := m.Executors[len(m.Executors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Total = append(m.Total, Resource{}) + if err := m.Total[len(m.Total)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceUsage_Executor) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Executor: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Executor: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecutorInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ExecutorInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Allocated = append(m.Allocated, Resource{}) + if err := m.Allocated[len(m.Allocated)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statistics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Statistics == nil { + m.Statistics = &ResourceStatistics{} + } + if err := m.Statistics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ContainerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, ResourceUsage_Executor_Task{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("executor_info") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("container_id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceUsage_Executor_Task) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Task: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = &Labels{} + } + if err := m.Labels.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PerfStatistics) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PerfStatistics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PerfStatistics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + m.Timestamp = float64(math.Float64frombits(v)) + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + m.Duration = float64(math.Float64frombits(v)) + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cycles", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Cycles = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StalledCyclesFrontend", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StalledCyclesFrontend = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StalledCyclesBackend", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StalledCyclesBackend = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Instructions", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Instructions = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CacheReferences", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CacheReferences = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CacheMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CacheMisses = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Branches", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Branches = &v + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BranchMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.BranchMisses = &v + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BusCycles", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.BusCycles = &v + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RefCycles", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RefCycles = &v + case 13: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CPUClock", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.CPUClock = &v2 + case 14: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskClock", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.TaskClock = &v2 + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PageFaults", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PageFaults = &v + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinorFaults", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MinorFaults = &v + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MajorFaults", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MajorFaults = &v + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContextSwitches", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ContextSwitches = &v + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CPUMigrations", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CPUMigrations = &v + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AlignmentFaults", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.AlignmentFaults = &v + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EmulationFaults", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.EmulationFaults = &v + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field L1DcacheLoads", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.L1DcacheLoads = &v + case 23: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field L1DcacheLoadMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.L1DcacheLoadMisses = &v + case 24: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field L1DcacheStores", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.L1DcacheStores = &v + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field L1DcacheStoreMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.L1DcacheStoreMisses = &v + case 26: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field L1DcachePrefetches", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.L1DcachePrefetches = &v + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field L1DcachePrefetchMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.L1DcachePrefetchMisses = &v + case 28: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field L1IcacheLoads", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.L1IcacheLoads = &v + case 29: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field L1IcacheLoadMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.L1IcacheLoadMisses = &v + case 30: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field L1IcachePrefetches", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.L1IcachePrefetches = &v + case 31: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field L1IcachePrefetchMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.L1IcachePrefetchMisses = &v + case 32: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LLCLoads", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LLCLoads = &v + case 33: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LLCLoadMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LLCLoadMisses = &v + case 34: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LLCStores", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LLCStores = &v + case 35: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LLCStoreMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LLCStoreMisses = &v + case 36: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LLCPrefetches", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LLCPrefetches = &v + case 37: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LLCPrefetchMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LLCPrefetchMisses = &v + case 38: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DTLBLoads", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DTLBLoads = &v + case 39: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DTLBLoadMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DTLBLoadMisses = &v + case 40: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DTLBStores", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DTLBStores = &v + case 41: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DTLBStoreMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DTLBStoreMisses = &v + case 42: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DTLBPrefetches", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DTLBPrefetches = &v + case 43: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DTLBPrefetchMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DTLBPrefetchMisses = &v + case 44: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ITLBLoads", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ITLBLoads = &v + case 45: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ITLBLoadMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ITLBLoadMisses = &v + case 46: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BranchLoads", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.BranchLoads = &v + case 47: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BranchLoadMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.BranchLoadMisses = &v + case 48: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeLoads", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NodeLoads = &v + case 49: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeLoadMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NodeLoadMisses = &v + case 50: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeStores", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NodeStores = &v + case 51: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeStoreMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NodeStoreMisses = &v + case 52: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePrefetches", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NodePrefetches = &v + case 53: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePrefetchMisses", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NodePrefetchMisses = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("timestamp") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("duration") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AgentID == nil { + m.AgentID = &AgentID{} + } + if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Offer) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Offer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Offer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrameworkID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FrameworkID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000004) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000008) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecutorIDs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecutorIDs = append(m.ExecutorIDs, ExecutorID{}) + if err := m.ExecutorIDs[len(m.ExecutorIDs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.URL == nil { + m.URL = &URL{} + } + if err := m.URL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unavailability", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Unavailability == nil { + m.Unavailability = &Unavailability{} + } + if err := m.Unavailability.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllocationInfo == nil { + m.AllocationInfo = &Resource_AllocationInfo{} + } + if err := m.AllocationInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Domain == nil { + m.Domain = &DomainInfo{} + } + if err := m.Domain.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("id") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("framework_id") + } + if hasFields[0]&uint64(0x00000004) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("agent_id") + } + if hasFields[0]&uint64(0x00000008) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("hostname") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Offer_Operation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Operation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Operation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Offer_Operation_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Launch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Launch == nil { + m.Launch = &Offer_Operation_Launch{} + } + if err := m.Launch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reserve", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Reserve == nil { + m.Reserve = &Offer_Operation_Reserve{} + } + if err := m.Reserve.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unreserve", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Unreserve == nil { + m.Unreserve = &Offer_Operation_Unreserve{} + } + if err := m.Unreserve.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Create", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Create == nil { + m.Create = &Offer_Operation_Create{} + } + if err := m.Create.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Destroy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Destroy == nil { + m.Destroy = &Offer_Operation_Destroy{} + } + if err := m.Destroy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LaunchGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LaunchGroup == nil { + m.LaunchGroup = &Offer_Operation_LaunchGroup{} + } + if err := m.LaunchGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ID == nil { + m.ID = &OperationID{} + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GrowVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GrowVolume == nil { + m.GrowVolume = &Offer_Operation_GrowVolume{} + } + if err := m.GrowVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShrinkVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ShrinkVolume == nil { + m.ShrinkVolume = &Offer_Operation_ShrinkVolume{} + } + if err := m.ShrinkVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreateDisk == nil { + m.CreateDisk = &Offer_Operation_CreateDisk{} + } + if err := m.CreateDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestroyDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DestroyDisk == nil { + m.DestroyDisk = &Offer_Operation_DestroyDisk{} + } + if err := m.DestroyDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Offer_Operation_Launch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Launch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Launch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskInfos", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskInfos = append(m.TaskInfos, TaskInfo{}) + if err := m.TaskInfos[len(m.TaskInfos)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Offer_Operation_LaunchGroup) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LaunchGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LaunchGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Executor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Executor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TaskGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("executor") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("task_group") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Offer_Operation_Reserve) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Reserve: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Reserve: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Offer_Operation_Unreserve) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Unreserve: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Unreserve: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Offer_Operation_Create) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Create: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Create: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, Resource{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Offer_Operation_Destroy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Destroy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Destroy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, Resource{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Offer_Operation_GrowVolume) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GrowVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GrowVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Volume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Addition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("volume") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("addition") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Offer_Operation_ShrinkVolume) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShrinkVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShrinkVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Volume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subtract", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Subtract.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("volume") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("subtract") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Offer_Operation_CreateDisk) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateDisk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateDisk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetType", wireType) + } + m.TargetType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TargetType |= (Resource_DiskInfo_Source_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("source") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("target_type") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Offer_Operation_DestroyDisk) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DestroyDisk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DestroyDisk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("source") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InverseOffer) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InverseOffer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InverseOffer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OfferID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.OfferID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.URL == nil { + m.URL = &URL{} + } + if err := m.URL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrameworkID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FrameworkID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AgentID == nil { + m.AgentID = &AgentID{} + } + if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unavailability", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Unavailability.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000004) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("id") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("framework_id") + } + if hasFields[0]&uint64(0x00000004) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("unavailability") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TaskID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000004) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Executor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Executor == nil { + m.Executor = &ExecutorInfo{} + } + if err := m.Executor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Command == nil { + m.Command = &CommandInfo{} + } + if err := m.Command.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HealthCheck", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HealthCheck == nil { + m.HealthCheck = &HealthCheck{} + } + if err := m.HealthCheck.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Container == nil { + m.Container = &ContainerInfo{} + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = &Labels{} + } + if err := m.Labels.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Discovery", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Discovery == nil { + m.Discovery = &DiscoveryInfo{} + } + if err := m.Discovery.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KillPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KillPolicy == nil { + m.KillPolicy = &KillPolicy{} + } + if err := m.KillPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Check", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Check == nil { + m.Check = &CheckInfo{} + } + if err := m.Check.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxCompletionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxCompletionTime == nil { + m.MaxCompletionTime = &DurationInfo{} + } + if err := m.MaxCompletionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("task_id") + } + if hasFields[0]&uint64(0x00000004) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("agent_id") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskGroupInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskGroupInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskGroupInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, TaskInfo{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Task) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Task: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TaskID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrameworkID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FrameworkID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000004) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecutorID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExecutorID == nil { + m.ExecutorID = &ExecutorID{} + } + if err := m.ExecutorID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000008) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var v TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.State = &v + hasFields[0] |= uint64(0x00000010) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Statuses = append(m.Statuses, TaskStatus{}) + if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusUpdateState", wireType) + } + var v TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StatusUpdateState = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusUpdateUUID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StatusUpdateUUID = append(m.StatusUpdateUUID[:0], dAtA[iNdEx:postIndex]...) + if m.StatusUpdateUUID == nil { + m.StatusUpdateUUID = []byte{} + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = &Labels{} + } + if err := m.Labels.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Discovery", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Discovery == nil { + m.Discovery = &DiscoveryInfo{} + } + if err := m.Discovery.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Container == nil { + m.Container = &ContainerInfo{} + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.User = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("task_id") + } + if hasFields[0]&uint64(0x00000004) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("framework_id") + } + if hasFields[0]&uint64(0x00000008) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("agent_id") + } + if hasFields[0]&uint64(0x00000010) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("state") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskResourceLimitation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskResourceLimitation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskResourceLimitation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UUID) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UUID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UUID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Operation) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Operation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Operation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrameworkID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FrameworkID == nil { + m.FrameworkID = &FrameworkID{} + } + if err := m.FrameworkID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AgentID == nil { + m.AgentID = &AgentID{} + } + if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LatestStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Statuses = append(m.Statuses, OperationStatus{}) + if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UUID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UUID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000004) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("info") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("latest_status") + } + if hasFields[0]&uint64(0x00000004) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("uuid") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OperationStatus) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OperationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OperationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OperationID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OperationID == nil { + m.OperationID = &OperationID{} + } + if err := m.OperationID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (OperationState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Message = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConvertedResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConvertedResources = append(m.ConvertedResources, Resource{}) + if err := m.ConvertedResources[len(m.ConvertedResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UUID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UUID == nil { + m.UUID = &UUID{} + } + if err := m.UUID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("state") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckStatusInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckStatusInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckStatusInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var v CheckInfo_Type + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (CheckInfo_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Type = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Command == nil { + m.Command = &CheckStatusInfo_Command{} + } + if err := m.Command.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTP == nil { + m.HTTP = &CheckStatusInfo_Http{} + } + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TCP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TCP == nil { + m.TCP = &CheckStatusInfo_Tcp{} + } + if err := m.TCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckStatusInfo_Command) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Command: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Command: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ExitCode = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckStatusInfo_Http) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Http: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Http: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusCode", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StatusCode = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckStatusInfo_Tcp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Tcp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Tcp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Succeeded = &b + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskStatus) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TaskID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var v TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.State = &v + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Message = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AgentID == nil { + m.AgentID = &AgentID{} + } + if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.Timestamp = &v2 + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecutorID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExecutorID == nil { + m.ExecutorID = &ExecutorID{} + } + if err := m.ExecutorID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Healthy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Healthy = &b + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var v TaskStatus_Source + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskStatus_Source(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Source = &v + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var v TaskStatus_Reason + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskStatus_Reason(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Reason = &v + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UUID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UUID = append(m.UUID[:0], dAtA[iNdEx:postIndex]...) + if m.UUID == nil { + m.UUID = []byte{} + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = &Labels{} + } + if err := m.Labels.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerStatus == nil { + m.ContainerStatus = &ContainerStatus{} + } + if err := m.ContainerStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnreachableTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UnreachableTime == nil { + m.UnreachableTime = &TimeInfo{} + } + if err := m.UnreachableTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CheckStatus == nil { + m.CheckStatus = &CheckStatusInfo{} + } + if err := m.CheckStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limitation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limitation == nil { + m.Limitation = &TaskResourceLimitation{} + } + if err := m.Limitation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("task_id") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("state") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field RefuseSeconds", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.RefuseSeconds = &v2 + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Environment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Environment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Environment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Variables = append(m.Variables, Environment_Variable{}) + if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Environment_Variable) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Variable: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Variable: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var v Environment_Variable_Type + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (Environment_Variable_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Type = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Parameter) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Parameter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Parameter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("key") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Parameters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Parameters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Parameters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameter = append(m.Parameter, Parameter{}) + if err := m.Parameter[len(m.Parameter)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Credential) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Credential: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Credential: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Principal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Principal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Secret = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("principal") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Credentials) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Credentials: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Credentials: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Credentials", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Credentials = append(m.Credentials, Credential{}) + if err := m.Credentials[len(m.Credentials)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Secret) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Secret: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Secret_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Reference == nil { + m.Reference = &Secret_Reference{} + } + if err := m.Reference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &Secret_Value{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Secret_Reference) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Reference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Reference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Key = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Secret_Value) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("data") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RateLimit) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field QPS", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.QPS = &v2 + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Principal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Principal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Capacity = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("principal") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RateLimits) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RateLimits: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RateLimits: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Limits = append(m.Limits, RateLimit{}) + if err := m.Limits[len(m.Limits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregateDefaultQPS", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.AggregateDefaultQPS = &v2 + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregateDefaultCapacity", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.AggregateDefaultCapacity = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Image) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Image: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var v Image_Type + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (Image_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Type = &v + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Appc", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Appc == nil { + m.Appc = &Image_Appc{} + } + if err := m.Appc.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Docker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Docker == nil { + m.Docker = &Image_Docker{} + } + if err := m.Docker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cached", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Cached = &b + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Image_Appc) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Appc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Appc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ID = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = &Labels{} + } + if err := m.Labels.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Image_Docker) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Docker: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Docker: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Credential", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Credential == nil { + m.Credential = &Credential{} + } + if err := m.Credential.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Secret{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MountPropagation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MountPropagation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MountPropagation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v MountPropagation_Mode + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (MountPropagation_Mode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Volume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.HostPath = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v Volume_Mode + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (Volume_Mode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v + hasFields[0] |= uint64(0x00000002) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &Image{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Source == nil { + m.Source = &Volume_Source{} + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("container_path") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("mode") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume_Source) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Source: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Source: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Volume_Source_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DockerVolume == nil { + m.DockerVolume = &Volume_Source_DockerVolume{} + } + if err := m.DockerVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SandboxPath", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SandboxPath == nil { + m.SandboxPath = &Volume_Source_SandboxPath{} + } + if err := m.SandboxPath.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HostPath == nil { + m.HostPath = &Volume_Source_HostPath{} + } + if err := m.HostPath.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume_Source_DockerVolume) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DockerVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DockerVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Driver = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverOptions == nil { + m.DriverOptions = &Parameters{} + } + if err := m.DriverOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume_Source_HostPath) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPropagation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MountPropagation == nil { + m.MountPropagation = &MountPropagation{} + } + if err := m.MountPropagation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("path") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume_Source_SandboxPath) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SandboxPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SandboxPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Volume_Source_SandboxPath_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("path") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = &Labels{} + } + if err := m.Labels.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPAddresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPAddresses = append(m.IPAddresses, NetworkInfo_IPAddress{}) + if err := m.IPAddresses[len(m.IPAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortMappings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortMappings = append(m.PortMappings, NetworkInfo_PortMapping{}) + if err := m.PortMappings[len(m.PortMappings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkInfo_IPAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var v NetworkInfo_Protocol + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NetworkInfo_Protocol(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Protocol = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.IPAddress = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkInfo_PortMapping) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortMapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortMapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) + } + m.HostPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) + } + m.ContainerPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("host_port") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("container_port") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CapabilityInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CapabilityInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CapabilityInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v CapabilityInfo_Capability + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (CapabilityInfo_Capability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Capabilities = append(m.Capabilities, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v CapabilityInfo_Capability + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (CapabilityInfo_Capability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Capabilities = append(m.Capabilities, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CapabilityInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CapabilityInfo == nil { + m.CapabilityInfo = &CapabilityInfo{} + } + if err := m.CapabilityInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BoundingCapabilities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BoundingCapabilities == nil { + m.BoundingCapabilities = &CapabilityInfo{} + } + if err := m.BoundingCapabilities.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCapabilities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EffectiveCapabilities == nil { + m.EffectiveCapabilities = &CapabilityInfo{} + } + if err := m.EffectiveCapabilities.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SharePIDNamespace", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.SharePIDNamespace = &b + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RLimitInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RLimitInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RLimitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rlimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rlimits = append(m.Rlimits, RLimitInfo_RLimit{}) + if err := m.Rlimits[len(m.Rlimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RLimitInfo_RLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (RLimitInfo_RLimit_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Hard = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Soft", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Soft = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TTYInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TTYInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TTYInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WindowSize", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WindowSize == nil { + m.WindowSize = &TTYInfo_WindowSize{} + } + if err := m.WindowSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TTYInfo_WindowSize) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowSize: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowSize: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Rows", wireType) + } + m.Rows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Rows |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Columns", wireType) + } + m.Columns = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Columns |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("rows") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("columns") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var v ContainerInfo_Type + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (ContainerInfo_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Type = &v + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Docker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Docker == nil { + m.Docker = &ContainerInfo_DockerInfo{} + } + if err := m.Docker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Hostname = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mesos", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Mesos == nil { + m.Mesos = &ContainerInfo_MesosInfo{} + } + if err := m.Mesos.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkInfos", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkInfos = append(m.NetworkInfos, NetworkInfo{}) + if err := m.NetworkInfos[len(m.NetworkInfos)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LinuxInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LinuxInfo == nil { + m.LinuxInfo = &LinuxInfo{} + } + if err := m.LinuxInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RlimitInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RlimitInfo == nil { + m.RlimitInfo = &RLimitInfo{} + } + if err := m.RlimitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TTYInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TTYInfo == nil { + m.TTYInfo = &TTYInfo{} + } + if err := m.TTYInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerInfo_DockerInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DockerInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DockerInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var v ContainerInfo_DockerInfo_Network + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (ContainerInfo_DockerInfo_Network(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Network = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortMappings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortMappings = append(m.PortMappings, ContainerInfo_DockerInfo_PortMapping{}) + if err := m.PortMappings[len(m.PortMappings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Privileged = &b + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, Parameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePullImage", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ForcePullImage = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDriver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VolumeDriver = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("image") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerInfo_DockerInfo_PortMapping) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortMapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortMapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) + } + m.HostPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) + } + m.ContainerPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("host_port") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("container_port") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerInfo_MesosInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MesosInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MesosInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &Image{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkInfos", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkInfos = append(m.NetworkInfos, NetworkInfo{}) + if err := m.NetworkInfos[len(m.NetworkInfos)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CgroupInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CgroupInfo == nil { + m.CgroupInfo = &CgroupInfo{} + } + if err := m.CgroupInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecutorPID", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ExecutorPID = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerID == nil { + m.ContainerID = &ContainerID{} + } + if err := m.ContainerID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CgroupInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CgroupInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CgroupInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetCLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NetCLS == nil { + m.NetCLS = &CgroupInfo_NetCls{} + } + if err := m.NetCLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CgroupInfo_Blkio) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Blkio: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Blkio: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CgroupInfo_Blkio_Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) + } + var v CgroupInfo_Blkio_Operation + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (CgroupInfo_Blkio_Operation(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Op = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CgroupInfo_Blkio_CFQ) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CFQ: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CFQ: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CgroupInfo_Blkio_CFQ_Statistics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Statistics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Statistics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Device == nil { + m.Device = &Device_Number{} + } + if err := m.Device.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sectors", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Sectors = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Time = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IOServiced", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IOServiced = append(m.IOServiced, CgroupInfo_Blkio_Value{}) + if err := m.IOServiced[len(m.IOServiced)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IOServiceBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IOServiceBytes = append(m.IOServiceBytes, CgroupInfo_Blkio_Value{}) + if err := m.IOServiceBytes[len(m.IOServiceBytes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IOServiceTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IOServiceTime = append(m.IOServiceTime, CgroupInfo_Blkio_Value{}) + if err := m.IOServiceTime[len(m.IOServiceTime)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IOWaitTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IOWaitTime = append(m.IOWaitTime, CgroupInfo_Blkio_Value{}) + if err := m.IOWaitTime[len(m.IOWaitTime)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IOMerged", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IOMerged = append(m.IOMerged, CgroupInfo_Blkio_Value{}) + if err := m.IOMerged[len(m.IOMerged)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IOQueued", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IOQueued = append(m.IOQueued, CgroupInfo_Blkio_Value{}) + if err := m.IOQueued[len(m.IOQueued)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CgroupInfo_Blkio_Throttling) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Throttling: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Throttling: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CgroupInfo_Blkio_Throttling_Statistics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Statistics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Statistics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Device == nil { + m.Device = &Device_Number{} + } + if err := m.Device.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IOServiced", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IOServiced = append(m.IOServiced, CgroupInfo_Blkio_Value{}) + if err := m.IOServiced[len(m.IOServiced)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IOServiceBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IOServiceBytes = append(m.IOServiceBytes, CgroupInfo_Blkio_Value{}) + if err := m.IOServiceBytes[len(m.IOServiceBytes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CgroupInfo_Blkio_Statistics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Statistics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Statistics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CFQ", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CFQ = append(m.CFQ, CgroupInfo_Blkio_CFQ_Statistics{}) + if err := m.CFQ[len(m.CFQ)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CFQRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CFQRecursive = append(m.CFQRecursive, CgroupInfo_Blkio_CFQ_Statistics{}) + if err := m.CFQRecursive[len(m.CFQRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Throttling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Throttling = append(m.Throttling, &CgroupInfo_Blkio_Throttling_Statistics{}) + if err := m.Throttling[len(m.Throttling)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CgroupInfo_NetCls) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetCls: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetCls: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClassID", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ClassID = &v + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Labels) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Labels: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Labels: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Label) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Label: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Label: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("key") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Port) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Port: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Port: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Visibility", wireType) + } + var v DiscoveryInfo_Visibility + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (DiscoveryInfo_Visibility(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Visibility = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = &Labels{} + } + if err := m.Labels.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("number") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ports) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ports: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ports: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, Port{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiscoveryInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiscoveryInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiscoveryInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Visibility", wireType) + } + m.Visibility = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Visibility |= (DiscoveryInfo_Visibility(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Environment", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Environment = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Location", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Location = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Version = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ports == nil { + m.Ports = &Ports{} + } + if err := m.Ports.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = &Labels{} + } + if err := m.Labels.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("visibility") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WeightInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WeightInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WeightInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + m.Weight = float64(math.Float64frombits(v)) + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Role = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("weight") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VersionInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VersionInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VersionInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildDate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.BuildDate = &s + iNdEx = postIndex + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildTime", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.BuildTime = &v2 + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.BuildUser = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitSHA", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GitSHA = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitBranch", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GitBranch = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitTag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GitTag = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("version") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Flag) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Flag: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Flag: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + m.Weight = float64(math.Float64frombits(v)) + hasFields[0] |= uint64(0x00000002) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Frameworks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Frameworks = append(m.Frameworks, FrameworkID{}) + if err := m.Frameworks[len(m.Frameworks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("weight") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metric) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.Value = &v2 + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileInfo) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nlink", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nlink = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Size = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mtime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Mtime == nil { + m.Mtime = &TimeInfo{} + } + if err := m.Mtime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.UID = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GID = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("path") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Device) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Device: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Number == nil { + m.Number = &Device_Number{} + } + if err := m.Number.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Device_Number) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Number: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Number: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MajorNumber", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MajorNumber = &v + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinorNumber", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MinorNumber = &v + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("major_number") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("minor_number") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAccess) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAccess: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAccess: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Device.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Access", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Access.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("device") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("access") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAccess_Access) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Access: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Access: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Read", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Read = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Write", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Write = &b + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mknod", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Mknod = &b + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceWhitelist) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceWhitelist: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceWhitelist: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMesos + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMesos + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedDevices = append(m.AllowedDevices, DeviceAccess{}) + if err := m.AllowedDevices[len(m.AllowedDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMesos(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMesos + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMesos(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMesos + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMesos + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMesos + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthMesos + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMesos + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMesos(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthMesos = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMesos = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("mesos.proto", fileDescriptorMesos) } + +var fileDescriptorMesos = []byte{ + // 12024 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0xbd, 0x5d, 0x90, 0x1b, 0x49, + 0x72, 0x18, 0xbc, 0xf8, 0x07, 0x12, 0xc0, 0x4c, 0xb3, 0x66, 0x38, 0x3b, 0x04, 0xc9, 0x19, 0x2e, + 0x76, 0xc9, 0x25, 0x97, 0x5c, 0xfe, 0xed, 0x72, 0x6f, 0xc9, 0xbd, 0xdd, 0x3d, 0x0c, 0xd0, 0x33, + 0xd3, 0x47, 0xfc, 0x5d, 0x01, 0x20, 0x77, 0x2f, 0x14, 0x81, 0x68, 0x02, 0x3d, 0x33, 0x2d, 0x02, + 0x68, 0x6c, 0x77, 0x63, 0x48, 0xde, 0xcb, 0x77, 0xfa, 0x2c, 0xc9, 0x27, 0x5b, 0xb2, 0xfd, 0xe0, + 0x3f, 0xd9, 0x7e, 0xb0, 0x43, 0x0f, 0xd2, 0x83, 0xe5, 0x90, 0x64, 0x87, 0xc3, 0xe1, 0x70, 0xd8, + 0x7e, 0xf0, 0x8f, 0xde, 0xac, 0x7b, 0x53, 0x84, 0x23, 0x68, 0xdd, 0x48, 0x0e, 0x4b, 0xb6, 0x6c, + 0x9d, 0x25, 0xd9, 0x92, 0x6d, 0x59, 0x76, 0xd4, 0x6f, 0x57, 0x03, 0x18, 0xcc, 0x70, 0x4f, 0x0a, + 0x3f, 0x01, 0x95, 0x95, 0x99, 0x55, 0x95, 0x95, 0x55, 0x95, 0x59, 0x55, 0x9d, 0x05, 0xd9, 0xa1, + 0xe5, 0x39, 0xde, 0xcd, 0xb1, 0xeb, 0xf8, 0x0e, 0x4a, 0xd0, 0x44, 0xe1, 0xdd, 0x7d, 0xdb, 0x3f, + 0x98, 0x3c, 0xb9, 0xd9, 0x73, 0x86, 0xb7, 0xf6, 0x9d, 0x7d, 0xe7, 0x16, 0xcd, 0x7d, 0x32, 0xd9, + 0xa3, 0x29, 0x9a, 0xa0, 0xff, 0x18, 0x55, 0xf1, 0x1a, 0x64, 0xb7, 0x5d, 0x73, 0x68, 0x3d, 0x73, + 0xdc, 0xa7, 0x46, 0x05, 0x15, 0x20, 0x71, 0x68, 0x0e, 0x26, 0xd6, 0x7a, 0xe4, 0x52, 0xf4, 0x6a, + 0x66, 0x2b, 0xfe, 0xcb, 0x2f, 0x37, 0x5f, 0xc3, 0x0c, 0x54, 0xbc, 0x0c, 0xa9, 0xc6, 0xde, 0x9e, + 0xe5, 0x9e, 0x8c, 0x56, 0xda, 0xb7, 0x46, 0xfe, 0x09, 0x68, 0x6f, 0x41, 0xb2, 0x6d, 0x7a, 0x27, + 0x95, 0x79, 0x15, 0x40, 0x7f, 0x6e, 0xf5, 0x26, 0xbe, 0x73, 0x52, 0xb1, 0x1d, 0xc8, 0x96, 0x9d, + 0x91, 0x6f, 0xda, 0xa3, 0x93, 0x6a, 0x88, 0xde, 0x81, 0xe4, 0xd8, 0x74, 0xad, 0x91, 0xbf, 0x1e, + 0xbd, 0x14, 0xb9, 0x9a, 0xbd, 0x8b, 0x6e, 0x32, 0x39, 0x2a, 0xf4, 0x98, 0x63, 0x14, 0x6f, 0x03, + 0xc2, 0x96, 0xe7, 0x4c, 0xdc, 0x9e, 0xd5, 0x74, 0x9d, 0x43, 0xbb, 0x7f, 0x62, 0xfb, 0xaf, 0x41, + 0xb6, 0x31, 0xb6, 0x5c, 0xd3, 0xb7, 0x9d, 0xd1, 0x09, 0xa8, 0x77, 0x21, 0xdd, 0xb6, 0x87, 0x96, + 0x31, 0xda, 0x73, 0xd0, 0x15, 0xc8, 0x8e, 0xcc, 0x91, 0xe3, 0x59, 0x3d, 0x67, 0xd4, 0xf7, 0x28, + 0x76, 0x8c, 0x63, 0xab, 0x19, 0xc5, 0x0f, 0x20, 0x57, 0x99, 0x70, 0xee, 0xaf, 0x42, 0xf7, 0x18, + 0x52, 0xa5, 0x7e, 0xdf, 0xb5, 0x3c, 0x0f, 0x15, 0x20, 0x7d, 0xe0, 0x78, 0xfe, 0xc8, 0x1c, 0x92, + 0x5a, 0x45, 0xae, 0x66, 0xb0, 0x4c, 0xa3, 0x35, 0x88, 0xda, 0x63, 0x2a, 0x97, 0xcc, 0x56, 0xf2, + 0xe8, 0xe5, 0x66, 0xd4, 0x68, 0xe2, 0xa8, 0x3d, 0x46, 0xeb, 0x10, 0x1f, 0x3b, 0xae, 0xbf, 0x1e, + 0xbb, 0x14, 0xbd, 0x9a, 0xe0, 0xfc, 0x29, 0xa4, 0xf8, 0x8b, 0x11, 0x88, 0x75, 0x70, 0x15, 0x5d, + 0x80, 0xa4, 0xd7, 0x3b, 0xb0, 0x86, 0xe1, 0x96, 0x72, 0x18, 0xba, 0x09, 0x29, 0x93, 0x15, 0xbf, + 0x1e, 0xbd, 0x14, 0xbd, 0x9a, 0xbd, 0xbb, 0xc4, 0x85, 0xce, 0x2b, 0xc5, 0xd1, 0x05, 0x12, 0x42, + 0x10, 0x1f, 0x9b, 0xfe, 0xc1, 0x7a, 0x8c, 0xd6, 0x8f, 0xfe, 0x47, 0x37, 0x20, 0xf1, 0xc5, 0xc4, + 0x72, 0x5f, 0xac, 0xc7, 0x2f, 0xc5, 0xae, 0x66, 0xef, 0x6a, 0x9c, 0x43, 0xd3, 0x24, 0x0a, 0xec, + 0x5b, 0xae, 0x10, 0x2e, 0x45, 0x22, 0xad, 0xdc, 0x73, 0xcd, 0xfd, 0x21, 0xe9, 0xe7, 0x04, 0x6b, + 0xa5, 0x48, 0x17, 0x47, 0xb0, 0xd4, 0x19, 0x99, 0x87, 0xa6, 0x3d, 0x30, 0x9f, 0xd8, 0x03, 0xdb, + 0x7f, 0x81, 0xae, 0x43, 0xc2, 0xf3, 0x4d, 0xd7, 0xa7, 0x95, 0xcf, 0xde, 0x5d, 0xe6, 0xbc, 0x45, + 0xf7, 0x08, 0xd6, 0x14, 0x07, 0xdd, 0x82, 0x74, 0x9f, 0xf7, 0x01, 0x57, 0xa1, 0x15, 0x8e, 0xaf, + 0x76, 0x0d, 0x96, 0x48, 0xc5, 0x4f, 0x21, 0x53, 0x33, 0x7b, 0x07, 0xf6, 0xc8, 0xa2, 0x1a, 0xf1, + 0xca, 0xe2, 0x2f, 0xfe, 0x9b, 0x08, 0x64, 0x05, 0x07, 0xd2, 0xeb, 0x37, 0x20, 0x6a, 0xf7, 0x79, + 0x5d, 0x85, 0x1c, 0x64, 0x09, 0x5b, 0x40, 0x2a, 0x4b, 0xa9, 0x2b, 0x38, 0x6a, 0xf7, 0xd1, 0x75, + 0x88, 0x0f, 0x9d, 0xbe, 0x45, 0xf9, 0x2e, 0xdd, 0x7d, 0x7d, 0x0a, 0x7f, 0xb4, 0xe7, 0xdc, 0xac, + 0x39, 0x7d, 0x0b, 0x53, 0x24, 0xf4, 0x31, 0x2c, 0x4d, 0x42, 0xb2, 0xa1, 0x7d, 0x90, 0xbd, 0x7b, + 0x96, 0x93, 0x85, 0x05, 0x87, 0xa7, 0x90, 0x8b, 0x57, 0x20, 0x4e, 0x98, 0xa1, 0x24, 0x44, 0x3b, + 0x4d, 0x2d, 0x82, 0x72, 0x90, 0xae, 0xe0, 0x92, 0x51, 0x37, 0xea, 0x3b, 0x5a, 0x14, 0xa5, 0x21, + 0x5e, 0x69, 0x3c, 0xae, 0x6b, 0xb1, 0xe2, 0x6f, 0x24, 0x20, 0x1f, 0xcc, 0x3c, 0xa4, 0x4d, 0xeb, + 0x10, 0x9f, 0x78, 0x96, 0x1b, 0x52, 0x1f, 0x0a, 0x21, 0x39, 0x54, 0x5a, 0x51, 0x35, 0x87, 0xca, + 0xeb, 0x1d, 0x2a, 0x87, 0x58, 0x68, 0x18, 0x2b, 0xf3, 0x19, 0x97, 0x21, 0x93, 0xc2, 0x0d, 0xd0, + 0xf6, 0x4c, 0x7b, 0xe0, 0x1c, 0x5a, 0x6e, 0xd7, 0xb7, 0x87, 0x96, 0x33, 0xf1, 0xd7, 0xe3, 0x97, + 0x22, 0x57, 0x23, 0x0f, 0x22, 0xb7, 0xf1, 0xb2, 0xc8, 0x6a, 0xb3, 0x1c, 0x74, 0x19, 0xa0, 0x77, + 0x60, 0xf5, 0x9e, 0x8e, 0x1d, 0x9b, 0x2b, 0x50, 0xfa, 0x41, 0x62, 0xcf, 0x1c, 0x78, 0x16, 0x56, + 0x32, 0xd0, 0x39, 0x88, 0xbb, 0xce, 0xc0, 0x5a, 0x4f, 0x92, 0x2e, 0x7b, 0x10, 0x79, 0x67, 0x2b, + 0xba, 0x1e, 0xc1, 0x14, 0x14, 0xea, 0xe7, 0xd4, 0x54, 0x3f, 0x5f, 0x80, 0xcc, 0xd8, 0xb5, 0x47, + 0x3d, 0x7b, 0x6c, 0x0e, 0xd6, 0xd3, 0x34, 0x33, 0x00, 0xa0, 0x6b, 0x90, 0x79, 0x66, 0x3d, 0x99, + 0xd8, 0xdd, 0x89, 0x3b, 0x58, 0xcf, 0x50, 0x65, 0xc8, 0x1d, 0xbd, 0xdc, 0x4c, 0x3f, 0xb6, 0x9e, + 0x74, 0xec, 0x0e, 0xae, 0xe2, 0x34, 0xcd, 0xee, 0xb8, 0x03, 0x64, 0x40, 0xae, 0x67, 0x8e, 0x99, + 0xf0, 0x6d, 0xcb, 0x5b, 0x07, 0x3a, 0x34, 0x36, 0x67, 0x44, 0x41, 0x3a, 0xb9, 0x2c, 0x10, 0x5f, + 0x70, 0x19, 0x86, 0x48, 0xd1, 0x65, 0x48, 0x0e, 0xcc, 0x27, 0xd6, 0xc0, 0x5b, 0xcf, 0x52, 0x79, + 0xe6, 0x39, 0x93, 0x2a, 0x05, 0x62, 0x9e, 0x89, 0x56, 0x21, 0x41, 0x9a, 0xe7, 0xad, 0xe7, 0x2e, + 0xc5, 0xae, 0x66, 0x30, 0x4b, 0x14, 0xfe, 0x4a, 0x14, 0x20, 0xe0, 0x8f, 0x3e, 0x81, 0xb8, 0xff, + 0x62, 0xcc, 0xf4, 0x7b, 0xe9, 0xee, 0x5b, 0x27, 0x54, 0xe7, 0x66, 0xfb, 0xc5, 0xd8, 0x12, 0xfd, + 0x4a, 0xe8, 0x8a, 0xff, 0x2a, 0x02, 0x71, 0x02, 0x44, 0x59, 0x48, 0x75, 0xea, 0x0f, 0xeb, 0x44, + 0x67, 0x5e, 0x43, 0xaf, 0xc3, 0x0a, 0xd6, 0x1f, 0x35, 0xca, 0xa5, 0xad, 0xaa, 0xde, 0xc5, 0x7a, + 0xab, 0xd1, 0xc1, 0x65, 0xbd, 0xa5, 0x45, 0xd0, 0x1a, 0xa0, 0x76, 0xa9, 0xf5, 0xb0, 0xfb, 0xd0, + 0xa8, 0x56, 0x8d, 0xfa, 0x4e, 0xb7, 0xd5, 0x2e, 0xb5, 0x75, 0x2d, 0x8a, 0xce, 0x40, 0x7e, 0xa7, + 0xd9, 0x51, 0x50, 0x63, 0x68, 0x15, 0xb4, 0xd6, 0x6e, 0x09, 0xeb, 0x15, 0x05, 0x1a, 0x47, 0x2b, + 0xb0, 0xdc, 0x2c, 0xe1, 0xb6, 0xd1, 0x36, 0x1a, 0xf5, 0x6e, 0xe9, 0x71, 0x09, 0xeb, 0x5a, 0x02, + 0x2d, 0x01, 0xd4, 0x3a, 0xd5, 0xb6, 0xd1, 0xc5, 0x8d, 0xaa, 0xae, 0x25, 0x51, 0x01, 0xd6, 0xb0, + 0xde, 0xd2, 0xf1, 0xa3, 0x12, 0x45, 0xc3, 0xfa, 0xb6, 0x51, 0xd7, 0x6b, 0x7a, 0xbd, 0xad, 0xa5, + 0x90, 0x06, 0x39, 0xac, 0xef, 0x04, 0xd4, 0xe9, 0x42, 0xfc, 0x3b, 0x3f, 0xb3, 0x11, 0x29, 0xfe, + 0xdd, 0x38, 0x64, 0xca, 0x44, 0x5d, 0xa8, 0x8a, 0xdf, 0x0a, 0x89, 0x45, 0x8c, 0x28, 0x99, 0x3f, + 0x23, 0x07, 0x74, 0x17, 0x52, 0x3d, 0x67, 0x38, 0x34, 0x47, 0x7d, 0x3e, 0xd1, 0xac, 0xcf, 0xd0, + 0x94, 0x59, 0x3e, 0x16, 0x88, 0xe8, 0x3d, 0x88, 0x1f, 0xf8, 0xfe, 0x78, 0x6a, 0xd8, 0x06, 0x04, + 0xbb, 0xbe, 0x3f, 0xde, 0x4a, 0x1f, 0xbd, 0xdc, 0x8c, 0xef, 0xb6, 0xdb, 0x4d, 0x4c, 0x91, 0xd1, + 0xdb, 0x90, 0xef, 0x5b, 0x03, 0xf3, 0x45, 0x57, 0x2c, 0x24, 0x6c, 0x64, 0x44, 0xef, 0xdc, 0xc3, + 0x39, 0x9a, 0xd1, 0x62, 0x70, 0xf4, 0x2e, 0x68, 0xf6, 0xc8, 0xb7, 0xdc, 0x43, 0x73, 0x20, 0x71, + 0x13, 0x1c, 0xf7, 0x36, 0x5e, 0x16, 0x79, 0x02, 0xfd, 0x3a, 0x2c, 0xf3, 0xb1, 0x26, 0xb1, 0x93, + 0x0c, 0xfb, 0xee, 0x6d, 0xbc, 0xc4, 0xb3, 0x04, 0xf2, 0x2d, 0x88, 0xf9, 0xbd, 0x31, 0x1d, 0x2c, + 0xd9, 0xbb, 0xab, 0xb3, 0xd2, 0xe9, 0x8d, 0xb7, 0x52, 0x47, 0x2f, 0x37, 0x63, 0xed, 0x72, 0x13, + 0x13, 0xcc, 0xc2, 0xc7, 0x90, 0xe2, 0xcd, 0x57, 0x25, 0xc5, 0xa6, 0xc5, 0x60, 0x55, 0xa7, 0x50, + 0x65, 0x16, 0x17, 0x88, 0x85, 0xf7, 0x21, 0x4e, 0x84, 0x21, 0x17, 0x37, 0x42, 0x98, 0x57, 0x17, + 0x37, 0xb9, 0x0c, 0x45, 0x83, 0x65, 0xa8, 0xb0, 0x09, 0xb1, 0x76, 0x6f, 0x01, 0x51, 0xf1, 0xfe, + 0x3c, 0xdd, 0xcd, 0x42, 0xaa, 0xdc, 0xa8, 0xd5, 0x4a, 0xf5, 0x8a, 0x16, 0x21, 0xd3, 0x20, 0x91, + 0xbd, 0x16, 0x45, 0x29, 0x20, 0xad, 0xd1, 0x62, 0x5c, 0x5d, 0x7e, 0x29, 0x09, 0xd9, 0x5d, 0xcb, + 0x1c, 0xf8, 0x07, 0xb4, 0xd9, 0x64, 0x1c, 0xd1, 0xbe, 0x8c, 0x50, 0x91, 0x5c, 0xe2, 0x4d, 0x52, + 0x30, 0x6e, 0x12, 0x3e, 0x52, 0x44, 0x27, 0x77, 0x6b, 0xf4, 0x15, 0xba, 0x35, 0xf6, 0x4a, 0xdd, + 0x1a, 0x3f, 0xb6, 0x5b, 0xdf, 0x87, 0xd5, 0x9e, 0x33, 0xf2, 0x88, 0x19, 0x67, 0x1f, 0x5a, 0x5d, + 0x32, 0xd3, 0x4e, 0x5c, 0x8b, 0xa9, 0x4d, 0xfe, 0x41, 0xe4, 0x3d, 0xbc, 0xa2, 0x64, 0x6f, 0xf3, + 0x5c, 0x42, 0xb5, 0xef, 0x9a, 0x3d, 0xab, 0x3b, 0xb6, 0x5c, 0xdb, 0xe9, 0x4f, 0xab, 0xcf, 0x9d, + 0xdb, 0x18, 0xd1, 0xfc, 0x26, 0xcd, 0x16, 0x65, 0xdd, 0x08, 0xd4, 0x20, 0x35, 0x65, 0xdc, 0x49, + 0x35, 0x08, 0x86, 0xca, 0x1d, 0x3e, 0x1e, 0xd3, 0xa1, 0x85, 0x51, 0x15, 0xef, 0xcc, 0x88, 0x7c, + 0xc0, 0x74, 0x34, 0x43, 0x99, 0x6f, 0xce, 0xa3, 0x28, 0x2b, 0xfd, 0x11, 0x56, 0xd7, 0x5f, 0x88, + 0x40, 0x3e, 0xd4, 0x5f, 0xaf, 0xa6, 0x79, 0x68, 0x4d, 0x9a, 0x58, 0xcc, 0x2c, 0x12, 0xc6, 0x55, + 0x01, 0xd2, 0x9e, 0x6f, 0xfa, 0x13, 0xcf, 0xf2, 0xa8, 0x6d, 0x94, 0xc7, 0x32, 0x8d, 0x3e, 0x86, + 0x34, 0xb5, 0xf4, 0x7b, 0xce, 0x80, 0x0a, 0x7c, 0xe9, 0xee, 0x79, 0x5e, 0xe9, 0xba, 0xe5, 0xcb, + 0xb9, 0xb8, 0xc9, 0x51, 0x1e, 0xc4, 0x8d, 0xe6, 0xe1, 0xfb, 0x58, 0x92, 0x14, 0xf6, 0x21, 0xa7, + 0x36, 0x68, 0x41, 0x85, 0xd5, 0x82, 0xa2, 0xaf, 0x5c, 0xd0, 0x0f, 0x32, 0x68, 0x2a, 0x00, 0x0f, + 0xed, 0xc1, 0xa0, 0xe9, 0x0c, 0xec, 0xde, 0x0b, 0xf4, 0x01, 0xe4, 0x54, 0xbd, 0xe1, 0x43, 0x67, + 0xae, 0x81, 0x96, 0x55, 0x14, 0xa8, 0xf8, 0x47, 0x51, 0xe2, 0x41, 0x48, 0x25, 0x41, 0x1f, 0x42, + 0x7c, 0xe2, 0xda, 0xc4, 0xa2, 0x26, 0x2b, 0xea, 0xda, 0xac, 0x1a, 0xdd, 0xec, 0x60, 0x63, 0x2b, + 0xc7, 0x4d, 0xad, 0x78, 0x07, 0x1b, 0x1e, 0xa6, 0x14, 0xe8, 0x7d, 0xc8, 0x5a, 0xa3, 0x43, 0xdb, + 0x75, 0x46, 0xc3, 0x59, 0x27, 0x43, 0x0f, 0x72, 0xb0, 0x8a, 0x46, 0xd6, 0x55, 0xe6, 0x28, 0xb0, + 0xbe, 0xe5, 0xbe, 0x0a, 0xe2, 0x46, 0x11, 0xb3, 0x60, 0x99, 0x39, 0x54, 0x80, 0x84, 0x77, 0x60, + 0x0d, 0x06, 0x74, 0x28, 0xa4, 0x1f, 0xc4, 0x7d, 0x77, 0x62, 0x61, 0x06, 0x22, 0x86, 0x85, 0xe9, + 0xee, 0x4f, 0x08, 0x47, 0x6f, 0x3d, 0x45, 0x57, 0xe8, 0x00, 0x50, 0xf8, 0xeb, 0xd4, 0x56, 0x37, + 0x16, 0x7a, 0x47, 0x1b, 0x00, 0x16, 0x75, 0xb9, 0xcc, 0x27, 0x03, 0x66, 0x32, 0xa6, 0xb1, 0x02, + 0x41, 0x1b, 0x90, 0xb2, 0x9e, 0xfb, 0xae, 0xd9, 0xf3, 0x69, 0x4d, 0x45, 0xf9, 0x02, 0x48, 0xda, + 0xd1, 0x33, 0x7b, 0x07, 0x16, 0x9d, 0x10, 0xd2, 0x98, 0x25, 0xd0, 0x26, 0x64, 0x9d, 0x89, 0x3f, + 0x9e, 0xf8, 0xdd, 0x3d, 0x7b, 0x60, 0xf1, 0xe6, 0x00, 0x03, 0x6d, 0xdb, 0x03, 0xab, 0xf8, 0x63, + 0x09, 0xc8, 0x49, 0x57, 0x8f, 0xc8, 0x7f, 0x1b, 0xb2, 0x16, 0x4f, 0x77, 0xa5, 0xad, 0x7b, 0x46, + 0x48, 0x51, 0x3a, 0x85, 0x5b, 0x88, 0xf7, 0x80, 0xe2, 0x28, 0x8a, 0xfa, 0x3a, 0xae, 0xd1, 0x27, + 0x12, 0xec, 0x9b, 0xbe, 0x49, 0xab, 0x93, 0xc3, 0xf4, 0x3f, 0x7a, 0x0f, 0x32, 0x2e, 0xf7, 0xea, + 0xc8, 0x34, 0x14, 0x53, 0x2c, 0x7e, 0xe1, 0xed, 0x71, 0xa1, 0x04, 0x78, 0xaf, 0x38, 0xb5, 0x6c, + 0x43, 0x6e, 0x4f, 0x98, 0x3b, 0xa4, 0xfe, 0xe9, 0x63, 0x6d, 0xd4, 0xe5, 0xa3, 0x97, 0x9b, 0xaa, + 0x13, 0x8e, 0xb3, 0x92, 0x90, 0x55, 0x9f, 0x5a, 0x90, 0x19, 0xa6, 0x00, 0xd4, 0x7a, 0x2c, 0x40, + 0x92, 0x55, 0x6a, 0x1d, 0xa8, 0x71, 0x48, 0x6c, 0x4e, 0x0e, 0x41, 0x77, 0x21, 0xd3, 0x13, 0x7e, + 0x2c, 0x37, 0xe4, 0x56, 0x67, 0xfc, 0x5b, 0x52, 0xd3, 0x00, 0x8d, 0xd0, 0xf4, 0x6d, 0xaf, 0x47, + 0xcc, 0xdf, 0x17, 0xeb, 0xb9, 0x10, 0x4d, 0x45, 0xc0, 0x19, 0x8d, 0x44, 0x43, 0x3b, 0x70, 0xd6, + 0x3b, 0x98, 0xf8, 0x7d, 0xe7, 0xd9, 0xa8, 0x1b, 0x1a, 0x6f, 0xf9, 0xe3, 0xc7, 0xdb, 0x8a, 0xa0, + 0xd8, 0x09, 0xc6, 0x9d, 0x62, 0x76, 0x2e, 0x2d, 0x32, 0x3b, 0xef, 0xf2, 0xa9, 0x7a, 0x99, 0x4e, + 0x2d, 0xeb, 0xd3, 0x7a, 0x30, 0xcf, 0x7a, 0x2a, 0xde, 0x3d, 0x66, 0x4e, 0xa9, 0xe8, 0xdb, 0xa5, + 0x4e, 0xb5, 0xad, 0x45, 0x10, 0x40, 0xb2, 0xdc, 0x69, 0xb5, 0x1b, 0x35, 0x2d, 0xca, 0x27, 0x93, + 0x7f, 0x18, 0x05, 0xa8, 0x38, 0x43, 0xd3, 0x66, 0xee, 0xf5, 0xd7, 0x20, 0xb7, 0x67, 0x4e, 0x06, + 0x7e, 0xb7, 0x4f, 0x61, 0x7c, 0x36, 0xb9, 0x28, 0x5a, 0x27, 0x11, 0x6f, 0x6e, 0x13, 0x2c, 0x96, + 0xc6, 0xd9, 0xbd, 0x20, 0x51, 0xf8, 0xf7, 0x11, 0xc8, 0x2a, 0x99, 0x68, 0x1b, 0x92, 0xae, 0xb5, + 0x4f, 0x5c, 0x47, 0xa6, 0xd2, 0x57, 0x17, 0xf2, 0xba, 0x89, 0x29, 0xae, 0x62, 0xbd, 0x70, 0x6a, + 0xf4, 0x35, 0x88, 0x7f, 0xcb, 0x19, 0x59, 0xdc, 0x9d, 0xbe, 0xb2, 0x98, 0xcb, 0x37, 0x9d, 0x91, + 0xea, 0xc7, 0x52, 0xca, 0xc2, 0x15, 0x80, 0x80, 0xbb, 0x74, 0xb2, 0x22, 0xd3, 0x4e, 0x56, 0xe1, + 0x2d, 0x48, 0x0b, 0xfa, 0xe3, 0xb1, 0x8a, 0xff, 0x22, 0x06, 0x50, 0x33, 0x3d, 0x9f, 0xa9, 0x17, + 0x2a, 0x48, 0x0f, 0x35, 0x33, 0xe3, 0x8f, 0x16, 0xb8, 0x97, 0x4b, 0x96, 0x90, 0x20, 0x6f, 0x76, + 0xa3, 0x21, 0xff, 0x20, 0x7e, 0xef, 0xf6, 0xbd, 0xdb, 0x7c, 0x81, 0x39, 0x07, 0xb1, 0xb1, 0xdd, + 0xa7, 0xe3, 0x38, 0xc3, 0x16, 0xd6, 0xa6, 0x51, 0xc1, 0x04, 0x16, 0x72, 0xb5, 0x12, 0x53, 0xae, + 0xd6, 0x3a, 0xa4, 0x0e, 0x2d, 0xd7, 0x23, 0x02, 0xa7, 0x4e, 0x1a, 0x16, 0x49, 0x74, 0x35, 0xd8, + 0x93, 0x60, 0x03, 0x7a, 0x6a, 0x4f, 0x22, 0xd8, 0x8d, 0xb8, 0x06, 0x49, 0xde, 0xff, 0x6c, 0x18, + 0x9f, 0x99, 0x91, 0x36, 0xe6, 0x08, 0x64, 0xdc, 0x87, 0x1c, 0xb2, 0x0c, 0x9d, 0x5d, 0x2e, 0x48, + 0x9f, 0x5b, 0x08, 0xe8, 0x04, 0x6f, 0xac, 0x70, 0x18, 0xf2, 0xa7, 0x3e, 0x0a, 0x39, 0x0e, 0x6f, + 0x2c, 0xe2, 0x36, 0x3b, 0x0c, 0xae, 0xcf, 0x1b, 0x06, 0x1a, 0xe4, 0x4a, 0x3b, 0x7a, 0xbd, 0xdd, + 0xed, 0x34, 0x2b, 0xc4, 0x59, 0x8a, 0x70, 0xfd, 0xff, 0xb7, 0x31, 0xc8, 0xb0, 0xfd, 0x3b, 0xd2, + 0x8b, 0x97, 0x42, 0x7b, 0x15, 0x41, 0x97, 0x07, 0xe2, 0x0d, 0x4d, 0xa5, 0xb1, 0x53, 0x4e, 0xa5, + 0x1f, 0x00, 0x98, 0xbe, 0xef, 0xda, 0x4f, 0x26, 0xbe, 0x9c, 0x80, 0xc5, 0x36, 0x46, 0x49, 0x64, + 0x70, 0x32, 0x05, 0x13, 0x5d, 0xa1, 0x4a, 0x95, 0x0c, 0x77, 0x16, 0xdb, 0x6c, 0x0c, 0xb9, 0xfa, + 0x42, 0x89, 0x48, 0x6f, 0x25, 0xa8, 0x12, 0xdd, 0xe1, 0x4a, 0x14, 0xf4, 0x24, 0x9c, 0xd0, 0x93, + 0x85, 0xef, 0x46, 0x42, 0x5d, 0xf0, 0x20, 0xd4, 0x05, 0x97, 0x42, 0xa5, 0x9f, 0xd4, 0x03, 0x3f, + 0x3e, 0xd7, 0x9d, 0x0d, 0xfb, 0x97, 0x11, 0x74, 0x16, 0xce, 0xec, 0x1a, 0x3a, 0x2e, 0xe1, 0xf2, + 0xae, 0x51, 0x2e, 0x55, 0x19, 0x38, 0xba, 0xc0, 0xed, 0x8c, 0x11, 0x12, 0xe1, 0xc6, 0x76, 0x9b, + 0xb8, 0xf1, 0xc8, 0xa8, 0xe8, 0x58, 0x8b, 0x13, 0xbf, 0x17, 0xeb, 0x2d, 0xe3, 0x9b, 0x7a, 0xf7, + 0x51, 0xa3, 0xda, 0xa9, 0xe9, 0x5a, 0x82, 0xf7, 0xee, 0x3f, 0x8e, 0xc2, 0x5a, 0xb9, 0x65, 0x34, + 0x07, 0x93, 0x7d, 0x7b, 0x14, 0x5a, 0x0f, 0x50, 0x09, 0xd2, 0x9e, 0xe5, 0x1e, 0xda, 0xa4, 0x1f, + 0x89, 0xcd, 0xb3, 0x74, 0xf7, 0xb2, 0x58, 0x37, 0xe6, 0x12, 0xdc, 0x6c, 0x31, 0x6c, 0x2c, 0xc9, + 0xd4, 0x15, 0x32, 0x7a, 0xf2, 0x0a, 0xf9, 0xa5, 0x34, 0x27, 0xb4, 0xbc, 0xc5, 0x4f, 0xb5, 0xbc, + 0x15, 0xb7, 0x21, 0xc5, 0xeb, 0x1a, 0x96, 0xff, 0x1a, 0xa0, 0x72, 0xa3, 0xde, 0xc6, 0x8d, 0x6a, + 0x55, 0xc7, 0x5d, 0x22, 0x61, 0xa3, 0x4c, 0xfa, 0x41, 0x83, 0x5c, 0xbd, 0x51, 0xd1, 0x25, 0x44, + 0x2c, 0x0d, 0xdf, 0x89, 0x40, 0x5e, 0xca, 0x42, 0xcc, 0x86, 0x5c, 0x27, 0x94, 0xd9, 0x90, 0xba, + 0x09, 0xc7, 0x6f, 0x59, 0x95, 0x01, 0x64, 0xd5, 0x44, 0xbb, 0x2f, 0x2e, 0x94, 0xb4, 0x18, 0x08, + 0x01, 0x59, 0xf1, 0xa7, 0x62, 0xb0, 0x3a, 0xb3, 0x2f, 0x4d, 0x6a, 0x74, 0x87, 0x4f, 0xbb, 0x44, + 0x30, 0xe7, 0xa6, 0xa4, 0x19, 0x6c, 0x60, 0x87, 0x06, 0x4b, 0x78, 0x30, 0x46, 0x4f, 0x3d, 0x18, + 0x45, 0xe3, 0x63, 0xc7, 0x36, 0x3e, 0x3e, 0xd3, 0xf8, 0xcf, 0x60, 0xb5, 0x6f, 0xb1, 0x05, 0xd5, + 0xb5, 0x88, 0xde, 0x50, 0xe3, 0x40, 0x4c, 0x01, 0x9b, 0x53, 0x15, 0x26, 0x7f, 0x04, 0x8e, 0x22, + 0x88, 0x15, 0xce, 0x42, 0xc9, 0x25, 0x7e, 0x4e, 0xca, 0xf3, 0x1d, 0xd7, 0xdc, 0xb7, 0xf8, 0xfc, + 0xf0, 0xe6, 0x71, 0xad, 0xa7, 0xba, 0xcb, 0x50, 0xb1, 0xa0, 0x29, 0x7c, 0x0c, 0x29, 0x0e, 0x43, + 0x77, 0x21, 0x39, 0xa6, 0x9d, 0xc0, 0x17, 0xe8, 0xd5, 0xe9, 0xce, 0x51, 0x17, 0x63, 0x86, 0x59, + 0xfc, 0xcd, 0x18, 0x24, 0x1e, 0x51, 0xf3, 0xf9, 0xba, 0xa2, 0x12, 0x4b, 0x72, 0x7a, 0xa1, 0x79, + 0xb3, 0xce, 0xe4, 0x75, 0xe2, 0xd0, 0x99, 0x03, 0xd3, 0x9d, 0xda, 0x46, 0x66, 0xe8, 0x2d, 0x9a, + 0x85, 0x39, 0x0a, 0x41, 0x76, 0xcd, 0xd1, 0xbe, 0xe5, 0xf1, 0x9d, 0x9d, 0x30, 0x32, 0xa6, 0x59, + 0x98, 0xa3, 0xa0, 0x22, 0xc4, 0x3c, 0xcb, 0xe7, 0x23, 0x44, 0x0b, 0xb3, 0xb5, 0x7c, 0x4c, 0x32, + 0xd1, 0x65, 0x88, 0xfb, 0xd6, 0x73, 0xb6, 0xb9, 0x99, 0x9d, 0xae, 0xaa, 0xf5, 0xdc, 0xc7, 0x34, + 0xbb, 0xf0, 0x16, 0x24, 0x59, 0x4d, 0xc2, 0x6e, 0x43, 0x24, 0xe4, 0x36, 0x14, 0x3e, 0x82, 0x04, + 0xad, 0x02, 0x41, 0x7a, 0x62, 0x09, 0xe9, 0xc5, 0x05, 0x12, 0x05, 0xa1, 0x35, 0x88, 0x59, 0x74, + 0x72, 0x08, 0x72, 0x08, 0xa0, 0xf0, 0x21, 0x24, 0x59, 0xfd, 0xd1, 0x4d, 0x48, 0xd0, 0x16, 0x70, + 0xb7, 0x0b, 0xcd, 0xb6, 0x51, 0x70, 0xa4, 0x68, 0x85, 0x73, 0x10, 0x6b, 0x59, 0xd4, 0x5b, 0xb6, + 0x7d, 0x6b, 0x48, 0xa9, 0x32, 0x98, 0xfe, 0x2f, 0x14, 0x21, 0x4e, 0x5a, 0xb1, 0xf0, 0x04, 0xe6, + 0x0e, 0x9f, 0x97, 0x01, 0x92, 0xad, 0x72, 0xa9, 0x5a, 0xc2, 0xda, 0x6b, 0xe4, 0x3f, 0x2e, 0xd5, + 0x77, 0xe8, 0xc6, 0x62, 0x0a, 0x62, 0x2d, 0xbd, 0xcd, 0x36, 0xae, 0xdb, 0xfa, 0x67, 0x6d, 0x2d, + 0x56, 0xfc, 0x83, 0x08, 0x64, 0xe4, 0xb0, 0x38, 0xde, 0x1e, 0x92, 0x8a, 0x10, 0x7d, 0x35, 0x45, + 0x88, 0xbd, 0x8a, 0x22, 0xc4, 0x4f, 0x56, 0x84, 0xd3, 0x75, 0xb2, 0xd0, 0x97, 0xe4, 0x02, 0x7d, + 0x29, 0xfe, 0x5a, 0x0e, 0xd2, 0x62, 0x34, 0x2d, 0x68, 0xf8, 0xe5, 0x13, 0x1a, 0xfe, 0xa7, 0xde, + 0x64, 0xde, 0x96, 0xc4, 0x22, 0xdd, 0x5f, 0xb0, 0x6f, 0x7f, 0x03, 0xe2, 0x7d, 0xdb, 0x7b, 0xca, + 0x6d, 0xc2, 0xf5, 0xe9, 0x39, 0xa9, 0x62, 0x7b, 0x74, 0x2b, 0x03, 0x53, 0x2c, 0x54, 0x82, 0xac, + 0x32, 0x93, 0x71, 0xfb, 0xf0, 0xa4, 0x89, 0x0c, 0xab, 0x34, 0xe8, 0x23, 0xb2, 0x10, 0x1e, 0x3a, + 0x3d, 0xea, 0x70, 0x67, 0x42, 0x0e, 0x86, 0xc2, 0x80, 0x23, 0xb0, 0xc5, 0x4d, 0xe2, 0x93, 0xd9, + 0xca, 0x3b, 0x30, 0x5d, 0xab, 0xcf, 0x0d, 0x9a, 0xc2, 0x34, 0x65, 0x8b, 0xe6, 0x32, 0xcb, 0x86, + 0x61, 0xa2, 0x1d, 0x58, 0x36, 0x07, 0x03, 0xa7, 0x47, 0x8b, 0xef, 0xda, 0xa3, 0x3d, 0x87, 0x7b, + 0x8a, 0x1b, 0xd3, 0xc4, 0x25, 0x89, 0x46, 0x19, 0x2c, 0x99, 0xa1, 0x34, 0xfa, 0x3a, 0x64, 0xc7, + 0x7c, 0x5a, 0x25, 0x3e, 0x6e, 0xee, 0xa4, 0x65, 0x67, 0x89, 0xf8, 0xe9, 0x41, 0x1a, 0x83, 0xa0, + 0x36, 0xfa, 0xc8, 0x80, 0x5c, 0x68, 0x49, 0xc8, 0xbf, 0xca, 0x92, 0x10, 0x22, 0x2d, 0xbc, 0x05, + 0x4b, 0xe1, 0x8a, 0x93, 0xf9, 0x81, 0x76, 0x37, 0x3b, 0x6f, 0xa3, 0xff, 0x0b, 0xbf, 0x1a, 0x81, + 0xe5, 0x29, 0x6e, 0xe1, 0x73, 0x99, 0xc8, 0xf4, 0xb9, 0x4c, 0xe0, 0xaa, 0x46, 0x17, 0xb9, 0xaa, + 0xa2, 0xb0, 0x58, 0x50, 0x18, 0xfa, 0x90, 0x0f, 0x8a, 0x78, 0xe8, 0x40, 0xe4, 0xb8, 0x56, 0x29, + 0xe3, 0x64, 0xbe, 0x13, 0x4b, 0xe6, 0xab, 0x76, 0xa9, 0x6d, 0x94, 0xb5, 0x08, 0x75, 0x68, 0x3f, + 0xaf, 0x97, 0x6a, 0x46, 0x59, 0x58, 0x2a, 0x85, 0xbf, 0x96, 0x80, 0xb4, 0xd0, 0x53, 0xb4, 0x0d, + 0xd9, 0x31, 0xf1, 0x78, 0x3c, 0xdf, 0x1a, 0xf5, 0x2c, 0x6e, 0x1b, 0xbc, 0x75, 0x9c, 0x5a, 0xdf, + 0x6c, 0x06, 0xb8, 0x58, 0x25, 0x24, 0xad, 0x3f, 0x74, 0x06, 0x93, 0xa1, 0x35, 0xd5, 0xfa, 0x47, + 0x14, 0x88, 0x79, 0x26, 0xfa, 0x8a, 0xdc, 0x9c, 0x88, 0xcd, 0x1f, 0x0b, 0xb2, 0xa4, 0x16, 0x4d, + 0x8b, 0x9d, 0x8b, 0xc2, 0x0e, 0x64, 0x95, 0xb2, 0x17, 0x3a, 0x90, 0xa1, 0x6e, 0x8a, 0x4e, 0x75, + 0x53, 0xe1, 0xa7, 0x63, 0x90, 0x64, 0xbc, 0x95, 0x73, 0xa8, 0xe8, 0x3c, 0xb1, 0x4f, 0x55, 0x65, + 0x76, 0x5e, 0xfe, 0x50, 0xd9, 0x85, 0xcd, 0x9e, 0x4c, 0xdf, 0x34, 0xfd, 0x03, 0xbe, 0x57, 0xfb, + 0x11, 0x24, 0x86, 0xce, 0x64, 0xe4, 0x73, 0x29, 0x5c, 0x3e, 0x89, 0xb4, 0x46, 0x90, 0x31, 0xa3, + 0xa1, 0xc7, 0xc0, 0xc2, 0xd3, 0x55, 0x4d, 0xb5, 0x6b, 0x90, 0x1e, 0x5a, 0xbe, 0x49, 0xf7, 0xb3, + 0x12, 0xf3, 0x54, 0x50, 0x66, 0x13, 0xb7, 0x77, 0xec, 0x3a, 0x74, 0xb3, 0x8d, 0xbb, 0xbd, 0x3c, + 0x59, 0x28, 0x40, 0x9c, 0xd4, 0x93, 0xa9, 0xa9, 0xe3, 0x07, 0x63, 0xc2, 0xf1, 0x0b, 0xe7, 0x21, + 0x41, 0x2b, 0x32, 0x2f, 0xb3, 0xb8, 0x35, 0x4f, 0x13, 0xd3, 0x10, 0x6f, 0x96, 0xda, 0xbb, 0x5a, + 0x04, 0x65, 0x20, 0x51, 0x6b, 0x74, 0xea, 0x64, 0xb5, 0xcc, 0x40, 0x62, 0xab, 0xda, 0x28, 0x3f, + 0xd4, 0x62, 0x64, 0x05, 0xc5, 0xa5, 0xc7, 0x5a, 0x9c, 0x6b, 0xe6, 0x32, 0xe4, 0x43, 0x53, 0x59, + 0x21, 0x07, 0x10, 0xcc, 0x50, 0xc5, 0x7f, 0x10, 0x85, 0xf5, 0xb6, 0x6b, 0xee, 0xed, 0xd9, 0x3d, + 0x62, 0x02, 0xbb, 0xce, 0xa0, 0xe5, 0x9b, 0xbe, 0xed, 0xf9, 0x76, 0xcf, 0x5b, 0xa8, 0x11, 0xeb, + 0x90, 0x7a, 0x62, 0xf6, 0x9e, 0x0e, 0x9c, 0x7d, 0xda, 0x57, 0x71, 0x2c, 0x92, 0x68, 0x15, 0x12, + 0x4f, 0x5e, 0xf8, 0xdc, 0x6a, 0x8a, 0x63, 0x96, 0x20, 0xd0, 0xbe, 0xeb, 0x8c, 0xd9, 0x7a, 0x12, + 0xc7, 0x2c, 0x81, 0x36, 0x00, 0x9c, 0x43, 0xcb, 0x1d, 0xd8, 0x43, 0xdb, 0x67, 0xe7, 0x13, 0x71, + 0xac, 0x40, 0xa8, 0x50, 0xcd, 0xde, 0x53, 0xcb, 0x67, 0xc7, 0x10, 0x71, 0x2c, 0x92, 0x44, 0x5e, + 0x5f, 0x0c, 0xac, 0x11, 0x5d, 0x34, 0xe2, 0x98, 0xfe, 0x47, 0x97, 0x21, 0xe5, 0x9a, 0xbe, 0xf5, + 0x64, 0xec, 0xd1, 0x65, 0x21, 0xbe, 0x95, 0x3d, 0x7a, 0xb9, 0x99, 0xc2, 0xa6, 0x6f, 0x6d, 0x35, + 0x5b, 0x58, 0xe4, 0x09, 0xb4, 0xf1, 0xd8, 0xa3, 0x93, 0xbf, 0x82, 0xd6, 0x14, 0x68, 0xe3, 0x31, + 0xbd, 0xb5, 0xe1, 0x5a, 0x5f, 0x4c, 0xac, 0x09, 0x3d, 0xe5, 0x25, 0xa5, 0xc8, 0x74, 0xf1, 0x47, + 0x13, 0x90, 0x33, 0xc6, 0x8a, 0xa8, 0x36, 0x00, 0xb6, 0x1d, 0xf7, 0x99, 0xe9, 0xf6, 0xed, 0xd1, + 0x3e, 0xed, 0xc4, 0x18, 0x56, 0x20, 0x24, 0xbf, 0xc2, 0x8c, 0xe8, 0x76, 0xbb, 0x4a, 0x25, 0x16, + 0xc3, 0x0a, 0x84, 0xe4, 0x1b, 0x23, 0x6c, 0xf5, 0x2c, 0xfb, 0x90, 0x4b, 0x2e, 0x86, 0x15, 0x08, + 0xba, 0x04, 0x59, 0x63, 0xb4, 0xdb, 0x77, 0x75, 0xd7, 0x75, 0x5c, 0x26, 0xc4, 0x18, 0x56, 0x41, + 0xa8, 0x08, 0x39, 0x63, 0x54, 0xea, 0x4b, 0x94, 0x04, 0x45, 0x09, 0xc1, 0xd0, 0x5b, 0x90, 0x27, + 0x75, 0xaa, 0x98, 0xbe, 0xb9, 0xef, 0x9a, 0x43, 0x26, 0xd4, 0x18, 0x0e, 0x03, 0xd1, 0x55, 0x58, + 0x36, 0x46, 0x9d, 0xd1, 0xd3, 0x91, 0xf3, 0x6c, 0x44, 0x8f, 0x0f, 0xd8, 0x76, 0x4d, 0x0c, 0x4f, + 0x83, 0x59, 0xad, 0x2b, 0xb6, 0xd7, 0x33, 0xdd, 0x3e, 0x93, 0x39, 0xad, 0xb5, 0x80, 0xf0, 0x7c, + 0x6b, 0x60, 0x1f, 0x12, 0xd7, 0x2b, 0x23, 0xf3, 0x39, 0x84, 0xb4, 0xaa, 0x31, 0xf1, 0x31, 0x91, + 0xaa, 0xe7, 0x33, 0x29, 0xc7, 0xb0, 0x0a, 0xe2, 0x18, 0xb2, 0x88, 0xac, 0xc4, 0x90, 0x65, 0x30, + 0x8c, 0xba, 0x83, 0x1d, 0xea, 0x4e, 0xe5, 0x24, 0x86, 0x00, 0x11, 0xc9, 0x60, 0xcb, 0xf4, 0x86, + 0xfc, 0xa6, 0x01, 0xdd, 0x30, 0x8d, 0xe1, 0x10, 0x8c, 0xd4, 0x94, 0xa6, 0xb1, 0xf5, 0x45, 0x9f, + 0x6d, 0x8c, 0xc6, 0xb0, 0x02, 0x21, 0xca, 0x40, 0x53, 0x8d, 0x87, 0x1e, 0xdd, 0x11, 0x8d, 0x61, + 0x99, 0x96, 0xb4, 0xdb, 0xa6, 0x3d, 0xf0, 0xd6, 0x35, 0x85, 0x96, 0x42, 0x88, 0x12, 0x6f, 0xbb, + 0xe6, 0x3e, 0x21, 0x3d, 0x43, 0x33, 0x45, 0x92, 0x4c, 0xab, 0xe4, 0x2f, 0x23, 0x44, 0x34, 0x2f, + 0x00, 0x90, 0x96, 0x91, 0x44, 0xd9, 0xb5, 0x4c, 0xd2, 0xb2, 0x15, 0xd6, 0x32, 0x05, 0x54, 0xfc, + 0xa7, 0x29, 0x58, 0x32, 0x7a, 0x43, 0x55, 0x11, 0xd7, 0x20, 0x69, 0x8c, 0x6a, 0xde, 0xbe, 0xc7, + 0x95, 0x90, 0xa7, 0x48, 0x03, 0x8c, 0x11, 0x57, 0x0d, 0xa6, 0x7e, 0x32, 0xcd, 0x54, 0xa7, 0xec, + 0x4d, 0x86, 0x3c, 0x3f, 0x26, 0x54, 0x27, 0x80, 0xa1, 0x2b, 0xb0, 0x44, 0x3a, 0xce, 0xf3, 0x3b, + 0x23, 0xd7, 0x32, 0x7b, 0x07, 0x42, 0x07, 0xa7, 0xa0, 0x4c, 0x51, 0x89, 0x54, 0xf5, 0xe7, 0xbd, + 0xbe, 0xd0, 0x42, 0x15, 0xc4, 0x30, 0x9a, 0xa6, 0x3b, 0x6c, 0xba, 0xce, 0x13, 0xa1, 0x82, 0x2a, + 0x88, 0xd5, 0xa7, 0xe5, 0xf6, 0xbe, 0x31, 0xb1, 0x46, 0xa4, 0xa4, 0x94, 0xa8, 0x4f, 0x00, 0x63, + 0x5c, 0xb0, 0xd5, 0xb7, 0x5d, 0xab, 0xe7, 0x0b, 0xdd, 0x53, 0x41, 0x44, 0xec, 0xc6, 0x48, 0xef, + 0x1d, 0x38, 0x42, 0xf3, 0x44, 0x92, 0xa9, 0x25, 0xf9, 0x8b, 0xad, 0xb1, 0xd0, 0x3a, 0x05, 0xc2, + 0xca, 0x27, 0x15, 0xf6, 0x7c, 0x73, 0x38, 0x16, 0x5a, 0x17, 0x82, 0xb1, 0x41, 0x22, 0xd3, 0x94, + 0x51, 0x4e, 0x0c, 0x92, 0x10, 0x98, 0xd5, 0x94, 0x0c, 0xc2, 0x9a, 0xe9, 0x3d, 0xf5, 0xb8, 0xf6, + 0xa9, 0x20, 0x26, 0x5b, 0x91, 0xa4, 0xac, 0x96, 0x84, 0x6c, 0x55, 0x28, 0x69, 0x51, 0x63, 0xe2, + 0xd3, 0xce, 0x65, 0x3a, 0x28, 0x92, 0x44, 0x91, 0x1a, 0x13, 0x9f, 0x77, 0x1f, 0xd3, 0xc0, 0x00, + 0x40, 0xea, 0x4a, 0x46, 0x8c, 0xda, 0x79, 0x4c, 0x11, 0xa7, 0xc1, 0xa4, 0xe5, 0x8d, 0x89, 0x1f, + 0x74, 0x1f, 0xd3, 0xc9, 0x10, 0x8c, 0xe3, 0x04, 0x1d, 0xb8, 0x22, 0x71, 0x82, 0x1e, 0x7c, 0x0b, + 0xf2, 0x8d, 0x89, 0xaf, 0x74, 0xe1, 0x2a, 0x9b, 0x68, 0x42, 0x40, 0xce, 0x29, 0xe8, 0xc4, 0xb3, + 0x92, 0x53, 0xd0, 0x8b, 0x05, 0x48, 0x93, 0x86, 0xd0, 0x6e, 0x5c, 0x63, 0x7a, 0x2b, 0xd2, 0x7c, + 0xe8, 0xcb, 0x8e, 0x7c, 0x5d, 0x0e, 0x7d, 0xd9, 0x93, 0xac, 0x1e, 0x4a, 0x57, 0xae, 0xcb, 0x7a, + 0x28, 0x7d, 0xf9, 0x0e, 0x68, 0x2a, 0x80, 0x32, 0x3b, 0x47, 0x11, 0x67, 0xe0, 0xbc, 0xce, 0x41, + 0x77, 0x16, 0x64, 0x9d, 0x83, 0xfe, 0x64, 0xf2, 0x0e, 0x75, 0xe8, 0x79, 0x29, 0x6f, 0x15, 0x5c, + 0xfc, 0x6e, 0x0c, 0xf2, 0xed, 0x9e, 0x3a, 0x7e, 0xc9, 0x64, 0xe5, 0x3b, 0xa5, 0xc1, 0xbe, 0xe3, + 0xda, 0xfe, 0xc1, 0x90, 0x8f, 0xe2, 0x10, 0x8c, 0x8c, 0x71, 0xec, 0x3b, 0x35, 0x7b, 0xc4, 0x47, + 0x32, 0x4f, 0x09, 0xb8, 0xf9, 0x9c, 0x8f, 0x60, 0x9e, 0x22, 0x7a, 0x53, 0x33, 0x9f, 0x97, 0x9d, + 0xd1, 0x88, 0x0f, 0x5a, 0x91, 0x24, 0x12, 0x2c, 0xf5, 0x7c, 0xfb, 0xd0, 0x6a, 0x8c, 0xad, 0x91, + 0x1c, 0xad, 0x0a, 0x88, 0xd4, 0xa7, 0x69, 0x7a, 0x9e, 0x44, 0x61, 0xc3, 0x35, 0x04, 0x23, 0x38, + 0x25, 0xdf, 0xb7, 0x86, 0x63, 0x9f, 0xcd, 0x64, 0x7c, 0xbc, 0xaa, 0x30, 0x52, 0x92, 0xee, 0xf9, + 0xe6, 0x13, 0x62, 0x78, 0x07, 0xe3, 0x55, 0x01, 0x11, 0x1d, 0x2e, 0x4f, 0x5c, 0x97, 0x82, 0xf8, + 0x88, 0x0d, 0x00, 0x6c, 0x5e, 0x6b, 0x59, 0xfb, 0x62, 0xbc, 0xf2, 0x14, 0x1f, 0x13, 0x34, 0x23, + 0x2b, 0xc7, 0x04, 0xcd, 0xb9, 0x04, 0x59, 0x6c, 0xf9, 0xae, 0x39, 0xf2, 0x68, 0x2e, 0x5f, 0x18, + 0x14, 0x10, 0xe3, 0xa9, 0xbb, 0xae, 0x18, 0x94, 0x3c, 0xc5, 0x79, 0x62, 0xb2, 0x24, 0x2d, 0x49, + 0x9e, 0x24, 0x39, 0x33, 0x53, 0x2e, 0xcf, 0xce, 0x94, 0xc5, 0xbf, 0x19, 0x85, 0x7c, 0xa7, 0xaf, + 0xf6, 0x29, 0x9d, 0x01, 0x82, 0x45, 0x37, 0x22, 0x66, 0x80, 0x60, 0xc9, 0x5d, 0x87, 0x54, 0xdd, + 0x69, 0x3a, 0xae, 0x2f, 0x26, 0x67, 0x91, 0x0c, 0xcd, 0xdb, 0xb1, 0xd9, 0x79, 0x9b, 0x0c, 0x60, + 0xc9, 0x38, 0x2e, 0x75, 0x31, 0xe0, 0x4c, 0xf4, 0xa9, 0x77, 0xf8, 0x64, 0xb2, 0x17, 0x36, 0x0b, + 0x54, 0x18, 0xc1, 0x69, 0x8d, 0xfa, 0x01, 0x0e, 0xef, 0x63, 0x15, 0x36, 0xd3, 0xf2, 0xd4, 0x9c, + 0x35, 0x82, 0xe0, 0xec, 0x8f, 0x1c, 0xd7, 0xea, 0xd7, 0x26, 0x03, 0xdf, 0xe6, 0x9d, 0x1c, 0x82, + 0x15, 0xff, 0x72, 0x14, 0x96, 0x5a, 0xf5, 0x5a, 0x53, 0x11, 0xcf, 0x47, 0x90, 0xb6, 0xc7, 0x5d, + 0xcf, 0x37, 0x7d, 0x6f, 0xea, 0xf2, 0x80, 0x6a, 0x62, 0x31, 0x2b, 0xcd, 0xa0, 0x84, 0x1e, 0x4e, + 0xd9, 0x34, 0xcb, 0x43, 0x65, 0x00, 0xbb, 0x37, 0x14, 0xe4, 0xd1, 0xd0, 0x15, 0xac, 0xf0, 0xd2, + 0xb8, 0x95, 0x3f, 0x7a, 0xb9, 0x99, 0x31, 0xca, 0x35, 0xce, 0x22, 0x63, 0xf3, 0x6c, 0x0f, 0x7d, + 0x0a, 0x19, 0xbf, 0x27, 0x78, 0xc4, 0x42, 0x9b, 0xdc, 0xa1, 0xd1, 0xc9, 0x6e, 0x05, 0xb6, 0xcb, + 0x9c, 0x43, 0xda, 0xef, 0x05, 0x0c, 0x26, 0x7d, 0xc1, 0x20, 0xbc, 0x4b, 0x1e, 0x52, 0x05, 0xc6, + 0xa0, 0x53, 0x11, 0x0c, 0x26, 0x2c, 0xd3, 0x2b, 0x7e, 0x37, 0x02, 0x4b, 0xc4, 0x43, 0x51, 0xc4, + 0x12, 0xf8, 0x75, 0x91, 0x57, 0xf2, 0xeb, 0xa6, 0xfd, 0xcf, 0xe8, 0x97, 0xf5, 0x3f, 0x37, 0x21, + 0x4b, 0xcd, 0xf0, 0xae, 0x6a, 0xce, 0x03, 0x05, 0x6d, 0x51, 0x9b, 0xfe, 0x22, 0xc0, 0xc4, 0xb3, + 0xfa, 0x3c, 0x9f, 0x19, 0xf6, 0x19, 0x02, 0xa1, 0xd9, 0xc5, 0x5f, 0x5a, 0x09, 0xee, 0x72, 0x87, + 0x66, 0xb8, 0x8c, 0x2f, 0xa6, 0xd4, 0xd0, 0xc6, 0x66, 0x00, 0x46, 0x65, 0x58, 0xe9, 0x8d, 0x27, + 0x5e, 0x77, 0xe2, 0xf1, 0xbb, 0xa3, 0x5d, 0xcf, 0xea, 0xf1, 0xcb, 0x54, 0x5b, 0xab, 0x47, 0x2f, + 0x37, 0xb5, 0x72, 0xb3, 0xe3, 0x75, 0x3c, 0x76, 0x7d, 0xb4, 0x65, 0xf5, 0x3c, 0xac, 0x11, 0x02, + 0x15, 0x82, 0x0c, 0x38, 0x4b, 0x99, 0x78, 0x2f, 0x3c, 0xdf, 0x1a, 0x2a, 0x6c, 0xe8, 0x3d, 0xab, + 0xad, 0xb5, 0xa3, 0x97, 0x9b, 0x88, 0xb0, 0x69, 0xd1, 0x7c, 0xc9, 0x08, 0x11, 0xa2, 0x30, 0x0c, + 0xdd, 0x00, 0xa0, 0xac, 0x68, 0xe3, 0xd9, 0xcd, 0x2b, 0xa6, 0x4e, 0x84, 0xbe, 0x4a, 0x80, 0x38, + 0x43, 0x10, 0xe8, 0x5f, 0xf4, 0x1e, 0xe4, 0x87, 0xd6, 0xb0, 0xeb, 0x7a, 0x1e, 0x17, 0x0d, 0x75, + 0x6c, 0xd8, 0xbd, 0x83, 0x9a, 0x35, 0xc4, 0xad, 0x16, 0x15, 0x10, 0xce, 0x0e, 0xad, 0x21, 0xf6, + 0x3c, 0x26, 0xcc, 0x2b, 0xb0, 0x4c, 0x88, 0x54, 0x89, 0x33, 0x97, 0x87, 0xf0, 0xaa, 0x06, 0x42, + 0xbf, 0x0f, 0xcb, 0xb4, 0x2a, 0x23, 0x97, 0x5f, 0x00, 0x60, 0x63, 0x31, 0xbf, 0x75, 0xe6, 0xe8, + 0xe5, 0x66, 0x9e, 0xd4, 0xa7, 0xee, 0xb2, 0x93, 0x7e, 0x0f, 0xe7, 0x09, 0xa6, 0x4c, 0xa2, 0x4f, + 0xe1, 0x8c, 0x20, 0xf5, 0x0f, 0x5c, 0xc7, 0xf7, 0x07, 0x16, 0xbb, 0x27, 0x91, 0xdf, 0x5a, 0x39, + 0x7a, 0xb9, 0xb9, 0xcc, 0x88, 0xdb, 0x22, 0x0b, 0x2f, 0x33, 0x72, 0x09, 0x40, 0x18, 0xd6, 0x29, + 0x03, 0x49, 0xad, 0x08, 0x35, 0x43, 0x85, 0x72, 0xee, 0xe8, 0xe5, 0xe6, 0x59, 0xc2, 0x47, 0x12, + 0x49, 0xb9, 0xd2, 0xce, 0x98, 0x01, 0xa3, 0xb7, 0x60, 0x89, 0xb4, 0x9b, 0x78, 0xca, 0xbc, 0xd9, + 0xcc, 0xd9, 0xca, 0x0d, 0xad, 0xe1, 0xb6, 0x3d, 0xb0, 0x58, 0xab, 0x39, 0x96, 0x39, 0x72, 0x46, + 0x1c, 0x2b, 0x2b, 0xb1, 0x4a, 0x23, 0x67, 0xc4, 0xb0, 0xee, 0xc0, 0x59, 0x82, 0x35, 0x34, 0xc7, + 0x63, 0xab, 0xaf, 0xb2, 0xcc, 0x51, 0x64, 0x34, 0xb4, 0x86, 0x35, 0x9a, 0x17, 0x30, 0xbe, 0x06, + 0xf1, 0xb1, 0xe5, 0xee, 0xf1, 0x5b, 0x14, 0x62, 0xe6, 0x68, 0x5a, 0xee, 0x5e, 0xa0, 0xb2, 0x98, + 0xa2, 0x90, 0x3a, 0x8c, 0x2c, 0xbf, 0xeb, 0x3e, 0xef, 0x0a, 0x9f, 0x74, 0x89, 0xd5, 0x61, 0x64, + 0xf9, 0xf8, 0x79, 0x93, 0x3b, 0xa6, 0x97, 0x20, 0xc7, 0xb1, 0x58, 0xd1, 0xcb, 0x6c, 0xd8, 0x50, + 0x1c, 0x56, 0x64, 0x11, 0xf2, 0x1c, 0xc3, 0x0a, 0x0c, 0xb6, 0x38, 0xce, 0x52, 0x14, 0xe9, 0xa9, + 0x89, 0xb2, 0x88, 0xa3, 0x3c, 0xb6, 0xfa, 0xd4, 0x62, 0x13, 0x65, 0x55, 0x18, 0x4c, 0x60, 0xf9, + 0x41, 0x8d, 0x90, 0xc4, 0x6a, 0x4f, 0xd7, 0xc8, 0x17, 0x35, 0x5a, 0x91, 0x35, 0x6a, 0x87, 0x6b, + 0xe4, 0xcb, 0x1a, 0xad, 0xca, 0x1a, 0xb5, 0xa7, 0x6a, 0xe4, 0x07, 0x35, 0x3a, 0xab, 0x94, 0x25, + 0x6a, 0xd4, 0x82, 0x73, 0x14, 0xab, 0x37, 0xee, 0xba, 0xbe, 0xdf, 0x1d, 0xda, 0x3d, 0xd7, 0x21, + 0xea, 0xd1, 0x1d, 0xdf, 0xbb, 0x4d, 0xed, 0x37, 0xae, 0x22, 0x75, 0xcb, 0x6f, 0x97, 0x9b, 0xd8, + 0xf7, 0x6b, 0x02, 0xa3, 0x79, 0xef, 0x36, 0x3e, 0x4b, 0x78, 0xf5, 0xc6, 0x53, 0xe0, 0x05, 0x4c, + 0xef, 0xdf, 0xa6, 0x56, 0xdf, 0xf1, 0x4c, 0xef, 0xcf, 0x67, 0x7a, 0x7f, 0x21, 0xd3, 0x7b, 0xd4, + 0x4c, 0x5c, 0xc0, 0xf4, 0xde, 0x7c, 0xa6, 0xf7, 0x16, 0x31, 0xbd, 0x4f, 0x4d, 0xca, 0x05, 0x4c, + 0xef, 0xcf, 0x67, 0x7a, 0x1f, 0x5d, 0x05, 0xad, 0x6f, 0x7b, 0x4f, 0x43, 0x53, 0x43, 0x81, 0xca, + 0x7e, 0x89, 0xc0, 0x95, 0xb9, 0xe1, 0x0a, 0x2c, 0x53, 0x4c, 0x65, 0x56, 0x3e, 0xcf, 0xe6, 0x10, + 0x02, 0xee, 0x88, 0x99, 0x19, 0x7d, 0x06, 0x05, 0x51, 0x4d, 0x93, 0xda, 0x7a, 0xdd, 0x9e, 0x33, + 0x1a, 0x59, 0x3d, 0xb6, 0x11, 0x7c, 0x81, 0xd6, 0xf3, 0xfc, 0xd1, 0xcb, 0xcd, 0xd7, 0x59, 0x3d, + 0x99, 0x3d, 0x58, 0x0e, 0x50, 0xf0, 0xeb, 0xac, 0xa6, 0x33, 0x19, 0xa8, 0x0b, 0x17, 0x05, 0x67, + 0x3a, 0x35, 0x3c, 0x33, 0x6d, 0x3f, 0xc4, 0xfc, 0x22, 0x65, 0x7e, 0xf1, 0xe8, 0xe5, 0xe6, 0x39, + 0xc6, 0x9c, 0x4c, 0x04, 0x8f, 0x4d, 0xdb, 0x57, 0xd9, 0x9f, 0x63, 0xec, 0xe7, 0x64, 0xb1, 0x9d, + 0x48, 0xa7, 0x67, 0x79, 0x9e, 0xe5, 0xad, 0x6f, 0x90, 0xb9, 0x0b, 0x07, 0x00, 0x62, 0x47, 0xf9, + 0x07, 0xae, 0x65, 0xf6, 0xbd, 0xf5, 0x4d, 0x9a, 0x27, 0x92, 0xe8, 0x2b, 0xb0, 0x4e, 0xa7, 0x57, + 0xe7, 0x59, 0x77, 0xec, 0x5a, 0x9e, 0x37, 0x71, 0x49, 0xa3, 0x27, 0x23, 0xdf, 0x72, 0xd7, 0x2f, + 0x51, 0x19, 0x91, 0xa9, 0xa3, 0xea, 0x3c, 0x6b, 0xf2, 0xdc, 0x32, 0xcb, 0x44, 0x1f, 0xc3, 0x79, + 0x3a, 0xa7, 0x58, 0x7d, 0x7b, 0x32, 0x9c, 0xa5, 0x7d, 0x83, 0xd2, 0x12, 0xde, 0x35, 0x8a, 0x31, + 0x4d, 0x5e, 0x82, 0x8b, 0x84, 0xbc, 0xe7, 0xda, 0xbe, 0xdd, 0x33, 0x07, 0xb3, 0x0c, 0x8a, 0x94, + 0x41, 0x61, 0x68, 0x0d, 0xcb, 0x1c, 0x67, 0x9a, 0xc5, 0x0f, 0xc3, 0x06, 0x95, 0x29, 0xdb, 0xa6, + 0x23, 0xd2, 0xf4, 0x5d, 0x67, 0x40, 0x8d, 0x0d, 0x36, 0x3f, 0xad, 0xbf, 0x19, 0xda, 0xba, 0x3f, + 0x6e, 0x3f, 0x8f, 0x2f, 0xb3, 0xe7, 0x89, 0x70, 0x8f, 0xdb, 0xf2, 0xe3, 0xab, 0x90, 0xef, 0xf8, + 0xe6, 0x80, 0x6b, 0xd0, 0x5b, 0x72, 0x15, 0x6a, 0x13, 0x68, 0x68, 0xa6, 0x65, 0x78, 0x43, 0x6b, + 0xe8, 0x3d, 0xe3, 0xd8, 0x97, 0xe5, 0x4c, 0x4b, 0xb1, 0x6b, 0x24, 0x8b, 0x91, 0xdc, 0x82, 0x55, + 0x42, 0xe2, 0x39, 0x7b, 0x7e, 0x48, 0x95, 0xaf, 0x50, 0x8a, 0x33, 0x43, 0x6b, 0xd8, 0x72, 0xf6, + 0xfc, 0xb0, 0x36, 0x53, 0xd1, 0x99, 0xbd, 0x03, 0x31, 0x8f, 0xbf, 0x2d, 0xeb, 0x52, 0x26, 0xd0, + 0xd0, 0xda, 0xe0, 0x3d, 0x33, 0xc7, 0x1c, 0xed, 0xaa, 0x5c, 0x1b, 0x5a, 0xcf, 0xcc, 0x31, 0xc3, + 0xba, 0xcb, 0x6a, 0x3c, 0x19, 0x59, 0x87, 0x76, 0x8f, 0xde, 0xac, 0xe4, 0xc8, 0xd7, 0x28, 0xf2, + 0xca, 0xd0, 0x1a, 0x76, 0x82, 0x3c, 0x46, 0xf3, 0x43, 0xb0, 0x42, 0x24, 0xef, 0x8d, 0xb8, 0x81, + 0xc9, 0xc5, 0xfd, 0x4e, 0x68, 0xad, 0x08, 0x5b, 0xb3, 0x5b, 0x67, 0x8f, 0x5e, 0x6e, 0x9e, 0xa9, + 0x5b, 0x7e, 0x18, 0x8c, 0xcf, 0x8c, 0x2c, 0xbf, 0x35, 0x0a, 0x6d, 0xd5, 0x54, 0xf8, 0x68, 0x55, + 0x38, 0x5f, 0xa7, 0x1d, 0x79, 0x36, 0xb8, 0x0b, 0xf8, 0x74, 0xa6, 0xfb, 0xe8, 0x98, 0x57, 0xb8, + 0xd4, 0x40, 0x7b, 0x32, 0x78, 0x6a, 0x3b, 0x2a, 0x9b, 0x1b, 0xb4, 0x82, 0x45, 0x71, 0x8e, 0xbe, + 0xef, 0x3a, 0x93, 0x31, 0xb5, 0xf5, 0xb6, 0x08, 0xe6, 0x4d, 0xa5, 0x5a, 0xcb, 0x94, 0x36, 0x00, + 0x14, 0x7f, 0x37, 0x0e, 0x79, 0x61, 0xb4, 0x75, 0x3c, 0x73, 0xdf, 0x42, 0x25, 0xc8, 0x88, 0xdb, + 0x9d, 0xe2, 0x72, 0xee, 0xf4, 0x69, 0x19, 0x45, 0x94, 0x77, 0x03, 0x85, 0x39, 0x27, 0xa9, 0xd0, + 0x75, 0x48, 0x50, 0x4d, 0xe1, 0x97, 0x1d, 0x8e, 0xb9, 0x75, 0xc2, 0x70, 0x0a, 0xdf, 0x8f, 0x41, + 0x5a, 0xb0, 0x42, 0x9f, 0x40, 0x3e, 0xb8, 0x94, 0x3a, 0xda, 0x73, 0xf8, 0x15, 0x81, 0x95, 0x39, + 0xd7, 0x11, 0xc5, 0xc9, 0x94, 0xa5, 0x5e, 0x6a, 0x7d, 0x0f, 0x32, 0xfc, 0x08, 0xcd, 0xea, 0x2f, + 0x2e, 0x3d, 0xc0, 0x43, 0xf7, 0x01, 0x14, 0x61, 0xc6, 0xe6, 0x1e, 0xb2, 0x29, 0x32, 0x54, 0x90, + 0xd1, 0xd7, 0x21, 0x27, 0x6f, 0x8d, 0x74, 0xe9, 0x91, 0x42, 0x74, 0xfe, 0x07, 0x8f, 0x5b, 0x2b, + 0x7c, 0x43, 0x5d, 0xfd, 0x8a, 0x12, 0x67, 0x25, 0xb1, 0xd1, 0x47, 0x9f, 0x40, 0xc2, 0xa7, 0x7b, + 0x0c, 0xec, 0xb2, 0x46, 0x71, 0xa1, 0xd0, 0x6f, 0xb6, 0x4d, 0xef, 0xa9, 0x14, 0x24, 0x21, 0x2b, + 0xfc, 0x6c, 0x04, 0xe2, 0x04, 0xba, 0xe0, 0xe8, 0xf8, 0x1a, 0xdd, 0xe1, 0x67, 0x37, 0x1a, 0xc5, + 0xc9, 0x06, 0xfb, 0x4a, 0x74, 0x66, 0xc3, 0xff, 0x4b, 0xdd, 0x1e, 0x0a, 0x0e, 0xf0, 0xe2, 0x0b, + 0x0e, 0xf0, 0x8a, 0x7f, 0x61, 0x05, 0x96, 0xc2, 0x26, 0xd7, 0xa9, 0xbc, 0x84, 0x4b, 0xa1, 0xcf, + 0x02, 0x03, 0x14, 0x09, 0x25, 0x1e, 0x7e, 0xef, 0x45, 0x6f, 0x20, 0xbd, 0x17, 0x9e, 0x42, 0x1f, + 0xc0, 0xeb, 0x9e, 0x6f, 0x0e, 0x88, 0x05, 0xcb, 0x20, 0xdd, 0x3d, 0xd7, 0x19, 0xf9, 0xd6, 0xa8, + 0xcf, 0xdd, 0x98, 0xb3, 0x3c, 0xbb, 0x4c, 0x73, 0xb7, 0x79, 0x26, 0x7a, 0x1f, 0xd6, 0xa6, 0xe8, + 0x9e, 0x10, 0x23, 0x6b, 0xd4, 0xe7, 0x67, 0x17, 0xab, 0x21, 0xb2, 0x2d, 0x96, 0x47, 0xfc, 0x62, + 0x7b, 0xe4, 0xf9, 0xee, 0x84, 0xaf, 0x81, 0xcc, 0xae, 0x0f, 0xc1, 0xd0, 0x35, 0xd0, 0xd8, 0x44, + 0xe7, 0x5a, 0x7b, 0x96, 0x4b, 0xfc, 0x2f, 0x8f, 0x9f, 0x6d, 0x2c, 0x53, 0x38, 0x96, 0x60, 0xf4, + 0x06, 0xe4, 0x18, 0xea, 0xd0, 0xa6, 0xab, 0x60, 0x9a, 0x19, 0x6b, 0x14, 0x56, 0xa3, 0x20, 0x54, + 0x80, 0xf4, 0x13, 0xd7, 0x1c, 0xf5, 0x0e, 0x2c, 0x7e, 0xc6, 0x81, 0x65, 0x1a, 0xbd, 0x09, 0x79, + 0xf6, 0x5f, 0xd0, 0x73, 0x7b, 0x9b, 0x01, 0x39, 0x83, 0x8b, 0x00, 0x4f, 0x26, 0x1e, 0x6f, 0x24, + 0xb7, 0xb5, 0x33, 0x4f, 0x26, 0x1e, 0x6b, 0x18, 0xc9, 0x76, 0xad, 0x3d, 0x91, 0xcd, 0xac, 0xeb, + 0x8c, 0x6b, 0xed, 0xf1, 0xec, 0x6b, 0x40, 0xbc, 0xa1, 0x6e, 0x6f, 0xe0, 0xf4, 0x9e, 0x52, 0xcb, + 0x3a, 0xc2, 0x1c, 0xdf, 0x72, 0xb3, 0x53, 0x26, 0x30, 0x9c, 0xee, 0x8d, 0x27, 0xf4, 0x1f, 0xe1, + 0x44, 0xb4, 0x95, 0xe3, 0x12, 0x83, 0x3a, 0x82, 0x33, 0x04, 0xc2, 0xb2, 0x37, 0x21, 0x3b, 0x36, + 0xf7, 0xad, 0x2e, 0x3d, 0x28, 0x91, 0xc6, 0x34, 0x01, 0xd1, 0xdb, 0xb6, 0x54, 0x18, 0x43, 0x7b, + 0xe4, 0xb8, 0x02, 0x83, 0xdb, 0xd2, 0x14, 0xa6, 0xa0, 0x98, 0x3f, 0x1c, 0xa0, 0x9c, 0xe1, 0x28, + 0x04, 0xc6, 0x51, 0x88, 0xf4, 0x49, 0x17, 0x3f, 0xf7, 0xbb, 0xde, 0x33, 0xdb, 0xa7, 0x72, 0x43, + 0x5c, 0xfa, 0x0c, 0xde, 0xe2, 0x60, 0xf4, 0x21, 0x2c, 0x91, 0xb6, 0x0d, 0xed, 0x7d, 0x97, 0x1f, + 0x9c, 0x53, 0x7b, 0x5a, 0xba, 0x5f, 0x35, 0x99, 0x41, 0xdd, 0xaf, 0x20, 0x49, 0x0a, 0x31, 0x07, + 0xf6, 0x3e, 0xfd, 0xfa, 0x40, 0xd4, 0x85, 0x19, 0xda, 0xcb, 0x12, 0x1e, 0xd4, 0xc7, 0x1a, 0x4e, + 0x06, 0xec, 0xbe, 0x00, 0x47, 0x65, 0xe6, 0xf6, 0xb2, 0x84, 0x73, 0xd4, 0x2b, 0xb0, 0x3c, 0xb8, + 0xd3, 0xed, 0x33, 0x8d, 0x18, 0x38, 0xc4, 0xf4, 0x59, 0x63, 0xab, 0xe4, 0xe0, 0x4e, 0x85, 0x42, + 0xab, 0x04, 0x48, 0x56, 0xec, 0x30, 0x9e, 0xe8, 0xfe, 0xd7, 0xd9, 0x8a, 0xad, 0x62, 0x73, 0x25, + 0xb8, 0x0a, 0x5a, 0x40, 0xe2, 0xf9, 0x8e, 0x6b, 0xb1, 0x0d, 0xd4, 0x38, 0x5e, 0x12, 0xd8, 0x2d, + 0x0a, 0x45, 0xef, 0xc1, 0xda, 0x14, 0xa6, 0xe0, 0x7e, 0x8e, 0xad, 0xae, 0x21, 0x7c, 0xce, 0xfe, + 0x36, 0xac, 0x06, 0x44, 0x63, 0xa2, 0xf7, 0x4c, 0xf0, 0x85, 0x70, 0x85, 0x9a, 0x32, 0x07, 0xdd, + 0x87, 0x73, 0xb3, 0x14, 0xa2, 0x24, 0x66, 0xe9, 0xae, 0x4d, 0x93, 0xf1, 0xc2, 0x98, 0x98, 0x6c, + 0x55, 0x4c, 0x17, 0x84, 0x98, 0x8c, 0x19, 0x31, 0xd9, 0xb3, 0x62, 0xba, 0x28, 0x6a, 0x65, 0x4c, + 0x8b, 0x89, 0xb5, 0xc3, 0x9e, 0x69, 0xc7, 0x46, 0x98, 0x62, 0xa6, 0x1d, 0xf6, 0xfc, 0x76, 0x6c, + 0x8a, 0x76, 0x18, 0xf3, 0xda, 0x71, 0x0d, 0x32, 0x83, 0x41, 0x8f, 0xb7, 0x80, 0x1a, 0xae, 0x6c, + 0x68, 0x55, 0xab, 0x65, 0xda, 0x00, 0x9c, 0x1e, 0x0c, 0x7a, 0xac, 0x29, 0xf7, 0x61, 0x59, 0xa0, + 0x0a, 0xde, 0x6f, 0x04, 0xaa, 0xca, 0x09, 0x18, 0x5b, 0x9c, 0xe7, 0x54, 0xbc, 0x94, 0x1b, 0x00, + 0x84, 0x94, 0xf7, 0x39, 0x35, 0x51, 0xd9, 0x7e, 0x47, 0xb5, 0x5a, 0x66, 0x5d, 0x8e, 0x49, 0x35, + 0x78, 0xef, 0x7f, 0x15, 0x34, 0x89, 0x2d, 0x4a, 0x7a, 0x93, 0xd2, 0xa0, 0xa3, 0x97, 0x9b, 0x4b, + 0x82, 0x86, 0x17, 0xb5, 0x24, 0x08, 0x79, 0x59, 0x1f, 0x02, 0x81, 0xa8, 0x82, 0x7b, 0x2b, 0x54, + 0xcb, 0x40, 0x6e, 0xb4, 0x96, 0x8a, 0x18, 0x75, 0x58, 0x51, 0x29, 0x45, 0xd1, 0xd4, 0x04, 0x65, + 0x76, 0x98, 0x42, 0xce, 0x4b, 0x3f, 0xa3, 0xb0, 0x08, 0x1a, 0xdb, 0xf7, 0x07, 0x4f, 0xb8, 0x4c, + 0xaf, 0x04, 0x8d, 0xad, 0xb4, 0xab, 0x5b, 0x4c, 0xa8, 0x19, 0x82, 0xc0, 0xa4, 0xfa, 0x55, 0xd0, + 0x24, 0xb6, 0x28, 0xf1, 0xed, 0xa0, 0xb1, 0x82, 0x46, 0x34, 0x56, 0x10, 0xf2, 0xb2, 0x6e, 0x41, + 0x96, 0x52, 0x73, 0xc9, 0x52, 0x43, 0x95, 0xdd, 0xd2, 0x21, 0x84, 0x5c, 0xb4, 0xb4, 0x3a, 0x5c, + 0xb6, 0x9f, 0xc2, 0x99, 0x80, 0x40, 0x94, 0x47, 0x4d, 0x56, 0xb6, 0x67, 0x23, 0xc9, 0x78, 0x81, + 0xcb, 0x92, 0x96, 0x97, 0xf8, 0x11, 0x50, 0x90, 0x2a, 0xdf, 0x77, 0xc2, 0xd5, 0x55, 0x04, 0x4c, + 0xab, 0xab, 0x48, 0x78, 0x17, 0x56, 0x43, 0xc4, 0xa2, 0x02, 0xd7, 0x29, 0x07, 0xba, 0x83, 0xa6, + 0x72, 0xe0, 0x75, 0x40, 0x2a, 0x97, 0x40, 0xc8, 0x76, 0x20, 0xe4, 0x1b, 0x81, 0x90, 0x8d, 0x40, + 0xc8, 0xb6, 0x2a, 0x64, 0x7b, 0x5a, 0xc8, 0xef, 0x06, 0xb5, 0x36, 0xa6, 0x84, 0x6c, 0x87, 0x85, + 0xfc, 0x06, 0xf0, 0xc5, 0x8c, 0x97, 0x76, 0x93, 0x4d, 0xf8, 0x0c, 0xc6, 0x0a, 0xb8, 0x01, 0x48, + 0x41, 0x11, 0x45, 0xdc, 0xa2, 0x88, 0x5a, 0x80, 0x18, 0xac, 0x86, 0x23, 0xa7, 0x2f, 0xe6, 0x8d, + 0xdb, 0x6c, 0xb9, 0x23, 0x10, 0xc6, 0xec, 0x2a, 0x68, 0x32, 0x5b, 0xb0, 0xba, 0xc3, 0xe6, 0x49, + 0x81, 0xc4, 0x19, 0x6d, 0x42, 0x96, 0x62, 0xf2, 0xee, 0xbf, 0xcb, 0x77, 0x62, 0x9c, 0xbe, 0x98, + 0x48, 0xdf, 0x81, 0x33, 0x01, 0x82, 0xe0, 0xf5, 0x1e, 0x9b, 0xf9, 0x25, 0x1a, 0x67, 0xf6, 0x36, + 0x50, 0x90, 0xda, 0xb3, 0xef, 0x07, 0xa5, 0x2a, 0xbd, 0x78, 0x1b, 0x56, 0x43, 0x88, 0x82, 0xef, + 0x3d, 0x36, 0x41, 0xa9, 0xd8, 0x8c, 0x75, 0xf1, 0x10, 0x52, 0xfc, 0x08, 0x1e, 0x7d, 0x00, 0x69, + 0x73, 0x9f, 0xac, 0x58, 0xf2, 0x9a, 0xf3, 0xf4, 0x87, 0x00, 0x74, 0x63, 0x9e, 0x27, 0x70, 0x8a, + 0x22, 0x1b, 0x53, 0xf6, 0x62, 0xf4, 0x74, 0xf6, 0x62, 0xf1, 0x47, 0x56, 0x20, 0x41, 0x63, 0x9e, + 0xf0, 0x40, 0x03, 0x91, 0x50, 0xe8, 0x0a, 0x1e, 0x0d, 0x65, 0xc6, 0x34, 0xfd, 0xfa, 0xd4, 0xa7, + 0x5f, 0xd1, 0x90, 0xd1, 0xad, 0x7e, 0xfa, 0x25, 0x8d, 0xee, 0x63, 0x3f, 0xff, 0x7a, 0xa0, 0x34, + 0x37, 0x16, 0x0e, 0x9c, 0xc1, 0x9b, 0xbb, 0xcc, 0x79, 0xcc, 0x36, 0x59, 0xfd, 0x78, 0x23, 0x7e, + 0xf2, 0xc7, 0x1b, 0xa7, 0xfd, 0x0e, 0xce, 0x80, 0x9c, 0xf2, 0x61, 0x1e, 0x31, 0x1f, 0x63, 0xf3, + 0xbf, 0xcc, 0x93, 0xad, 0x0b, 0x60, 0x1e, 0xce, 0x06, 0x9f, 0xe6, 0x4d, 0x7f, 0x07, 0x92, 0x3a, + 0xf5, 0xd5, 0xf3, 0xcb, 0x10, 0x9b, 0xb8, 0x03, 0x7e, 0xd9, 0x12, 0xc4, 0xc9, 0x06, 0xae, 0xb2, + 0xcf, 0x82, 0x3a, 0xb8, 0x8a, 0x49, 0xfe, 0x9c, 0x50, 0x16, 0x99, 0x57, 0x08, 0x65, 0x31, 0xef, + 0x9a, 0x24, 0x7c, 0xa9, 0x6b, 0x92, 0xc1, 0x47, 0x27, 0xd9, 0x93, 0x3e, 0x3a, 0xf9, 0x8b, 0x4b, + 0x90, 0x91, 0xe1, 0x63, 0xd0, 0xbd, 0xd0, 0x37, 0x27, 0xe7, 0x55, 0xbd, 0xbb, 0x29, 0xb1, 0x66, + 0x6f, 0xad, 0xdd, 0x23, 0x6e, 0xce, 0x64, 0xd4, 0x13, 0xf7, 0xd6, 0x2e, 0x1e, 0x43, 0x58, 0xa5, + 0x48, 0x98, 0x23, 0xa3, 0x0f, 0x21, 0xc5, 0xae, 0x51, 0x8a, 0xab, 0x7b, 0x1b, 0xc7, 0xd0, 0xb1, + 0xdb, 0x8a, 0x16, 0x16, 0xe8, 0xe8, 0x13, 0xc8, 0x4c, 0x46, 0x82, 0x36, 0x1e, 0xfa, 0x56, 0x7d, + 0x9a, 0xb6, 0x23, 0xf0, 0x70, 0x40, 0x42, 0x2a, 0xdc, 0xa3, 0x77, 0x48, 0xf8, 0xad, 0xb6, 0xe3, + 0x2a, 0xcc, 0x2e, 0x9a, 0x60, 0x8e, 0x4c, 0x2a, 0xdc, 0xb7, 0x3c, 0xdf, 0x75, 0x5e, 0xf0, 0x8b, + 0xcb, 0xc7, 0x55, 0xb8, 0xc2, 0xb0, 0xb0, 0x40, 0x47, 0x3a, 0xe4, 0x58, 0xa3, 0xbb, 0x74, 0x33, + 0x82, 0xdf, 0xf5, 0x2d, 0x2e, 0x94, 0xd3, 0x0e, 0xc1, 0xc4, 0xd9, 0x41, 0x90, 0xe0, 0xb3, 0x42, + 0x2e, 0xf4, 0xad, 0x8b, 0x12, 0xfc, 0x27, 0x74, 0x77, 0x6f, 0x0b, 0xb2, 0xfb, 0xae, 0xf3, 0xac, + 0xcb, 0xef, 0x50, 0xb2, 0x0d, 0xfe, 0x37, 0x8e, 0x29, 0x71, 0xc7, 0x75, 0x9e, 0xf1, 0x7b, 0x95, + 0xb0, 0x2f, 0xff, 0xa3, 0x5d, 0xc8, 0x7b, 0x07, 0xae, 0x3d, 0x7a, 0x2a, 0xb8, 0x2c, 0x85, 0x3e, + 0x75, 0x98, 0xe6, 0xd2, 0xa2, 0xb8, 0x9c, 0x4f, 0xce, 0x53, 0x52, 0xa4, 0x36, 0x4c, 0x88, 0x5d, + 0x7a, 0xd7, 0x79, 0x79, 0x61, 0x6d, 0x98, 0xd8, 0x2b, 0xb6, 0xf7, 0x14, 0x43, 0x4f, 0xfe, 0x27, + 0x42, 0xe4, 0xf2, 0x64, 0x4c, 0xb4, 0x85, 0x42, 0xe4, 0x7d, 0x40, 0xb9, 0x64, 0xfb, 0x41, 0xa2, + 0xf0, 0x09, 0x24, 0x99, 0x80, 0xd1, 0xfb, 0xdc, 0xf9, 0x22, 0x43, 0x4d, 0xec, 0xed, 0x2c, 0xab, + 0xdb, 0x00, 0xc1, 0xb6, 0x0a, 0xf5, 0xc9, 0x48, 0xda, 0x2b, 0xfc, 0x7f, 0x90, 0x55, 0x3a, 0x08, + 0xdd, 0x83, 0xb4, 0x98, 0x62, 0x4e, 0xde, 0x9d, 0x91, 0xa8, 0xe8, 0x3e, 0x2f, 0x9b, 0xe9, 0x43, + 0x34, 0xf4, 0xe5, 0x07, 0x29, 0x7b, 0x47, 0x6c, 0x5a, 0xa9, 0x15, 0xa0, 0xc0, 0xc2, 0x27, 0x64, + 0x75, 0x62, 0x8a, 0x1c, 0x9a, 0x50, 0x23, 0xa7, 0x9b, 0x50, 0x0b, 0x5f, 0x83, 0x8c, 0x1c, 0x15, + 0x5f, 0x8e, 0xc3, 0x7d, 0x48, 0xb2, 0x3e, 0x42, 0xb7, 0x20, 0xc5, 0x54, 0xe3, 0x04, 0x62, 0x81, + 0x55, 0x78, 0x00, 0x29, 0xde, 0x33, 0xaf, 0x4e, 0x3b, 0x02, 0x08, 0x14, 0x15, 0xbd, 0x2b, 0xef, + 0x07, 0x87, 0x63, 0x28, 0x4d, 0x51, 0x8b, 0x7b, 0xc2, 0x77, 0x20, 0x6d, 0xf6, 0xfb, 0xb6, 0xdc, + 0x2d, 0x39, 0x96, 0x40, 0xa2, 0x15, 0x7c, 0xc8, 0xa9, 0x2a, 0xfd, 0xaa, 0x25, 0xde, 0x83, 0xb4, + 0x37, 0x79, 0xc2, 0x3e, 0x5d, 0x8f, 0x86, 0x34, 0x43, 0xfd, 0xe6, 0x40, 0x94, 0x2a, 0x50, 0x0b, + 0xdf, 0x89, 0x00, 0x04, 0x23, 0x80, 0x14, 0x2a, 0xcf, 0xc1, 0x17, 0x15, 0xca, 0x4f, 0xbf, 0x1f, + 0x42, 0xd6, 0x37, 0xdd, 0x7d, 0xcb, 0xef, 0x2a, 0x1f, 0x45, 0xbc, 0xca, 0x45, 0x64, 0x60, 0xe4, + 0x04, 0x52, 0xf8, 0x2a, 0x64, 0x95, 0x61, 0xf4, 0x8a, 0x55, 0x29, 0xfe, 0xfd, 0xc8, 0x31, 0x57, + 0xc9, 0xab, 0xa5, 0x4e, 0xbd, 0xbc, 0xcb, 0xbe, 0x7c, 0x63, 0xff, 0xbb, 0x3b, 0xb8, 0xd1, 0x69, + 0x6a, 0x49, 0x82, 0xca, 0x3e, 0x3e, 0xd4, 0xb5, 0x28, 0xca, 0x43, 0xa6, 0x53, 0x17, 0xc9, 0x18, + 0xfd, 0x78, 0x1a, 0xeb, 0xa5, 0xb6, 0xae, 0xc5, 0xd9, 0x57, 0xd5, 0xad, 0x36, 0x6e, 0x7c, 0xae, + 0x25, 0xd0, 0x32, 0x64, 0x77, 0x70, 0xe3, 0xb1, 0xf8, 0xf8, 0x30, 0x8b, 0xce, 0x40, 0xbe, 0xb5, + 0x8b, 0x8d, 0xfa, 0x43, 0x01, 0xca, 0x11, 0x1c, 0x46, 0xdc, 0xad, 0x18, 0xad, 0x87, 0x5a, 0x9e, + 0x94, 0xcd, 0x39, 0x30, 0xc8, 0x12, 0xff, 0xea, 0xee, 0x37, 0xa2, 0x90, 0x33, 0x46, 0x87, 0x96, + 0xeb, 0x59, 0xcc, 0x14, 0xbb, 0xb5, 0xc0, 0x14, 0x93, 0xc6, 0x10, 0x07, 0xd0, 0x99, 0x97, 0x5b, + 0x0b, 0xd1, 0x13, 0xac, 0x85, 0x69, 0xb3, 0x2d, 0xf6, 0x03, 0x98, 0x6d, 0xaa, 0x95, 0x1a, 0x7f, + 0x05, 0x2b, 0xb5, 0x3c, 0x63, 0xb1, 0x24, 0x68, 0x2d, 0xe6, 0x5b, 0x2c, 0x62, 0x0b, 0x7e, 0xca, + 0x6e, 0x09, 0x4d, 0x21, 0xc9, 0x53, 0x9a, 0xba, 0x3f, 0x9b, 0x80, 0xb4, 0x98, 0x63, 0x17, 0xec, + 0xd0, 0x7e, 0x00, 0x29, 0x36, 0x45, 0x1f, 0xb3, 0x4d, 0xbb, 0xc4, 0x45, 0xc3, 0x83, 0xfb, 0xe1, + 0x24, 0x9d, 0xa7, 0x7f, 0x30, 0x3b, 0x36, 0xd4, 0x9e, 0xf8, 0x29, 0xad, 0xd4, 0x5b, 0xca, 0x32, + 0x90, 0x08, 0xdd, 0xe2, 0x51, 0x97, 0x01, 0x65, 0x01, 0x10, 0x71, 0x22, 0x92, 0x4a, 0x9c, 0x88, + 0x57, 0x0b, 0xf9, 0x70, 0x0f, 0x72, 0x07, 0x34, 0x10, 0x4c, 0x97, 0x06, 0x08, 0x9b, 0x0a, 0xf9, + 0xa0, 0xc4, 0x88, 0xc1, 0xd9, 0x03, 0x25, 0xc6, 0x4f, 0xe8, 0x93, 0xd6, 0xcc, 0xe9, 0x22, 0x36, + 0x04, 0x1b, 0xd9, 0xb0, 0x38, 0x68, 0x82, 0x12, 0xd8, 0x21, 0x7b, 0xba, 0xc0, 0x0e, 0x77, 0x21, + 0xfb, 0xd4, 0x1e, 0x0c, 0xba, 0x63, 0x1a, 0x4e, 0x85, 0x1b, 0x37, 0xc2, 0x62, 0x0d, 0xe2, 0xac, + 0x60, 0x78, 0x1a, 0xc4, 0x5c, 0xb9, 0x02, 0x09, 0xd6, 0xe4, 0x7c, 0xe8, 0x9b, 0x2b, 0x19, 0x36, + 0x06, 0xb3, 0x6c, 0x54, 0x86, 0x95, 0xa1, 0xf9, 0xbc, 0xdb, 0x73, 0x86, 0xe3, 0x81, 0x45, 0xad, + 0x6a, 0xdf, 0x96, 0x56, 0xcc, 0xdc, 0x90, 0x11, 0x67, 0x86, 0xe6, 0xf3, 0xb2, 0x44, 0x6f, 0xdb, + 0x43, 0xab, 0xf8, 0x55, 0xc8, 0x87, 0x16, 0x64, 0x7a, 0x9c, 0x43, 0x0f, 0x26, 0x16, 0x5a, 0x0c, + 0x0c, 0xa7, 0xf8, 0xef, 0x12, 0x27, 0x9e, 0x42, 0x7c, 0x59, 0x1d, 0xff, 0x93, 0x9c, 0x40, 0xb6, + 0xc2, 0xd1, 0x4f, 0xe2, 0xa1, 0x5e, 0x50, 0x7c, 0xac, 0xa5, 0x05, 0x91, 0x4f, 0xd4, 0x31, 0x97, + 0x78, 0xc5, 0x31, 0x77, 0x85, 0xc6, 0x43, 0xf4, 0xad, 0xf5, 0x24, 0x5d, 0xb0, 0x34, 0x45, 0x02, + 0x2d, 0x02, 0xc7, 0x2c, 0x3b, 0x3c, 0x36, 0x53, 0xa7, 0x1c, 0x9b, 0xef, 0x29, 0xf1, 0x8a, 0xd2, + 0x21, 0xef, 0x51, 0xf0, 0x9f, 0x78, 0x72, 0x19, 0x16, 0x81, 0x8c, 0xbe, 0x06, 0x2b, 0xec, 0x7f, + 0x77, 0x32, 0xee, 0x13, 0xc3, 0x95, 0xd5, 0x2f, 0x43, 0x5d, 0xa3, 0xd9, 0xfa, 0x9d, 0x61, 0xc8, + 0x1d, 0x8a, 0x4b, 0x41, 0x68, 0x0b, 0x50, 0x98, 0xc3, 0x64, 0x62, 0xb3, 0xcf, 0xe6, 0x72, 0xec, + 0x12, 0x57, 0x4b, 0x21, 0xe9, 0x74, 0x8c, 0x0a, 0xd6, 0x54, 0x26, 0x9d, 0x89, 0xdd, 0x3f, 0x6d, + 0x90, 0xbc, 0x2f, 0x13, 0x51, 0x25, 0x34, 0x0f, 0xe4, 0x4f, 0x37, 0x0f, 0x88, 0xf0, 0x40, 0x4b, + 0x41, 0x78, 0xa0, 0x62, 0x0d, 0xd6, 0xda, 0xf4, 0x42, 0x2f, 0x13, 0x37, 0x3d, 0xc1, 0x66, 0xee, + 0xe4, 0x97, 0xb1, 0x2d, 0x8b, 0x17, 0x20, 0x4e, 0x64, 0x11, 0xc4, 0x27, 0x22, 0x03, 0x26, 0x27, + 0x3e, 0xa0, 0xfd, 0xad, 0xa8, 0xea, 0xaf, 0x4e, 0x07, 0xbd, 0x89, 0x7c, 0xc9, 0xa0, 0x37, 0xea, + 0xf2, 0x19, 0x7d, 0x85, 0xe5, 0xf3, 0x36, 0xc4, 0xa9, 0x9b, 0xce, 0x46, 0xde, 0xda, 0x7c, 0x4f, + 0x44, 0x8c, 0x75, 0x9b, 0x45, 0x3d, 0xc8, 0x0f, 0x4c, 0xdf, 0xf2, 0xfc, 0x2e, 0xeb, 0x6a, 0x7e, + 0x42, 0xba, 0x36, 0xed, 0xcc, 0x85, 0x94, 0x32, 0xc7, 0x48, 0x18, 0x0c, 0x7d, 0xa8, 0x68, 0x73, + 0x22, 0x14, 0x2c, 0x6a, 0x3e, 0x75, 0xa0, 0xd2, 0xef, 0x42, 0x9c, 0xaa, 0x60, 0x92, 0x96, 0x99, + 0x15, 0x6b, 0x7c, 0xc7, 0xa8, 0x28, 0x71, 0xa5, 0x88, 0x1e, 0x52, 0xb4, 0xe2, 0x4f, 0x47, 0x61, + 0x79, 0x8a, 0x25, 0x91, 0xb8, 0x23, 0x40, 0xb3, 0x12, 0x57, 0x7d, 0x51, 0x2a, 0x71, 0x05, 0x80, + 0xb3, 0x92, 0xd0, 0xe8, 0xa3, 0x3b, 0x62, 0xbc, 0x33, 0x03, 0xf5, 0xec, 0xbc, 0x16, 0x58, 0x4a, + 0x14, 0x54, 0xfa, 0xe9, 0x73, 0x6a, 0x68, 0x79, 0x9e, 0xb9, 0x2f, 0xbe, 0x74, 0x14, 0x49, 0xb4, + 0x0d, 0x2b, 0x3d, 0x87, 0x98, 0x6c, 0xbe, 0xd5, 0xef, 0x9e, 0x72, 0xe9, 0x46, 0x92, 0x02, 0xcb, + 0x79, 0xe2, 0x1a, 0x97, 0x0f, 0x5b, 0xbf, 0x43, 0xf2, 0x49, 0x4f, 0xc9, 0xe6, 0x65, 0x14, 0x96, + 0xe9, 0x72, 0xc3, 0xe4, 0xc2, 0xb7, 0x5d, 0x4e, 0x8e, 0xb6, 0x28, 0xbf, 0xf3, 0x9b, 0x8a, 0x5c, + 0xb1, 0xa1, 0x62, 0x07, 0x3c, 0x67, 0xa3, 0x2d, 0xde, 0x0f, 0x45, 0x5b, 0x3c, 0x7f, 0x0c, 0xd9, + 0xdc, 0x98, 0x8b, 0xf7, 0x58, 0x28, 0xb9, 0x78, 0xe8, 0xbb, 0xdd, 0x69, 0xca, 0xd9, 0xa0, 0x87, + 0x57, 0x82, 0xa0, 0x87, 0xe7, 0x21, 0x63, 0x3d, 0xa7, 0x37, 0x9a, 0xfa, 0xac, 0x99, 0x09, 0x62, + 0xd0, 0xd8, 0x7e, 0xd9, 0xe9, 0x5b, 0x85, 0xb7, 0x79, 0x74, 0xc3, 0x4d, 0xc8, 0xf2, 0x69, 0x4f, + 0xa2, 0xe5, 0xd9, 0x25, 0x81, 0x89, 0x47, 0x11, 0xdf, 0x64, 0x01, 0x0d, 0x2f, 0x40, 0xc6, 0x9b, + 0xf4, 0x7a, 0x96, 0xd5, 0xb7, 0x98, 0x1e, 0xa5, 0x71, 0x00, 0x28, 0xfe, 0xcc, 0x32, 0x40, 0x30, + 0x3b, 0xab, 0x6b, 0x64, 0xe4, 0x55, 0xd6, 0xc8, 0x2b, 0x61, 0x3d, 0x3b, 0x76, 0x5d, 0x11, 0xd6, + 0x58, 0x4c, 0xb1, 0xc6, 0x14, 0x85, 0x8b, 0x87, 0x15, 0xee, 0x83, 0xd0, 0x4a, 0x77, 0xfa, 0xf9, + 0xe2, 0x82, 0x7a, 0xaa, 0x9f, 0xe4, 0x87, 0xbd, 0xf2, 0x3c, 0x7f, 0x6a, 0x0d, 0x4e, 0x7d, 0x99, + 0x35, 0x78, 0x1d, 0x52, 0xcc, 0xd6, 0x7b, 0x41, 0xcd, 0xc1, 0x34, 0x16, 0x49, 0x74, 0x5b, 0x3a, + 0x6f, 0x99, 0x50, 0x48, 0xab, 0x40, 0xc8, 0xd3, 0x17, 0xa9, 0x6f, 0x43, 0xd2, 0xb5, 0x4c, 0xcf, + 0x61, 0xb1, 0x6b, 0xe6, 0x52, 0x60, 0x9a, 0x8f, 0x39, 0x1e, 0xba, 0xc0, 0x07, 0x50, 0x96, 0xae, + 0x71, 0x53, 0x63, 0x46, 0x59, 0xcb, 0x72, 0x8b, 0xd6, 0xb2, 0x12, 0x3b, 0x8c, 0x66, 0x77, 0x48, + 0xf8, 0x2c, 0xc9, 0x96, 0xa7, 0xb5, 0xe9, 0xe5, 0x89, 0xd5, 0x82, 0x1d, 0x52, 0x2b, 0x00, 0xf4, + 0x00, 0xb4, 0x09, 0xfb, 0xa6, 0x87, 0x5e, 0x74, 0x52, 0x8c, 0xbe, 0xe9, 0x40, 0xcb, 0x78, 0x59, + 0x41, 0x24, 0x40, 0x74, 0x1f, 0x72, 0xd4, 0x78, 0x14, 0x45, 0x2f, 0x87, 0x8b, 0x0e, 0x0f, 0x17, + 0x9c, 0xed, 0x05, 0x00, 0xf4, 0x31, 0xb0, 0xeb, 0xe1, 0xec, 0x4a, 0x86, 0x16, 0xda, 0x5a, 0x9c, + 0xbf, 0x44, 0x62, 0x85, 0xa0, 0xb8, 0x25, 0x3f, 0x23, 0x26, 0xfe, 0x2b, 0x0b, 0xb2, 0x53, 0x2b, + 0xb5, 0xda, 0x3a, 0x66, 0xf1, 0x93, 0x38, 0x88, 0x86, 0x51, 0xd2, 0x22, 0x68, 0x05, 0x96, 0x39, + 0x44, 0xff, 0x4c, 0x2f, 0x77, 0xda, 0x0d, 0xac, 0x45, 0x8b, 0xbf, 0x9d, 0x86, 0x24, 0xeb, 0x14, + 0x54, 0x84, 0x0d, 0xac, 0x97, 0x5a, 0x8d, 0x7a, 0x97, 0xc7, 0x34, 0x94, 0x78, 0xdd, 0xed, 0x92, + 0x51, 0xd5, 0x2b, 0xda, 0x6b, 0x21, 0x9c, 0x7a, 0xbb, 0x64, 0xd4, 0x75, 0xdc, 0xe5, 0x1e, 0x39, + 0xc7, 0x39, 0x8b, 0x36, 0xe1, 0xfc, 0x2c, 0x8e, 0x51, 0x33, 0xda, 0x34, 0x3e, 0x90, 0xb6, 0x82, + 0xde, 0x82, 0x4b, 0x0b, 0x10, 0x98, 0x77, 0xbd, 0x8a, 0xae, 0x40, 0x71, 0x11, 0x56, 0x4d, 0xaf, + 0x35, 0xf0, 0xe7, 0x5a, 0x1a, 0x6d, 0x40, 0x61, 0x06, 0xaf, 0x89, 0x75, 0xbd, 0xd6, 0x6c, 0xeb, + 0x15, 0xed, 0xcc, 0xdc, 0x2a, 0xb3, 0x98, 0x52, 0xa2, 0xca, 0x6b, 0x4a, 0x59, 0xb5, 0xd2, 0x67, + 0xa4, 0xf9, 0xcd, 0xaa, 0x4e, 0x4b, 0x69, 0x1b, 0x35, 0xbd, 0x8b, 0xf5, 0x52, 0x79, 0x57, 0xaf, + 0x68, 0x6f, 0xa0, 0xab, 0xf0, 0x16, 0xc7, 0x93, 0xa2, 0xc1, 0xfa, 0x8e, 0xd1, 0x6a, 0xe3, 0x92, + 0x44, 0x6f, 0x74, 0xda, 0xda, 0xeb, 0xe8, 0x1d, 0xb8, 0x32, 0x8b, 0x39, 0x17, 0x77, 0x5d, 0x69, + 0x81, 0xc4, 0x6d, 0xeb, 0xb8, 0x66, 0xd4, 0x4b, 0xa4, 0x05, 0x11, 0x74, 0x09, 0x2e, 0x4c, 0xe7, + 0x77, 0xea, 0x8c, 0x97, 0x8e, 0xf5, 0x8a, 0x16, 0x45, 0x17, 0x60, 0x9d, 0x63, 0x6c, 0xe3, 0x52, + 0x4d, 0x7f, 0xdc, 0xc0, 0x0f, 0xbb, 0x58, 0xaf, 0x35, 0x1e, 0xe9, 0x15, 0x2d, 0x46, 0x3a, 0x9e, + 0xe7, 0xee, 0x94, 0xbb, 0x3a, 0xc6, 0x0d, 0xac, 0xc5, 0x95, 0x42, 0x8d, 0xfa, 0xa3, 0x52, 0xd5, + 0xa8, 0x04, 0xa4, 0x46, 0x45, 0x4b, 0xa0, 0x73, 0x70, 0x76, 0x2a, 0xbf, 0xb1, 0xbd, 0xad, 0xe3, + 0x96, 0x96, 0x54, 0xea, 0x63, 0x34, 0xba, 0xad, 0xc7, 0x46, 0xbb, 0xbc, 0xbb, 0xd5, 0x28, 0x61, + 0xa2, 0x2f, 0x06, 0xa9, 0xf1, 0x79, 0x85, 0x39, 0xd3, 0x47, 0xd2, 0xa7, 0xe5, 0x46, 0xbd, 0xae, + 0x97, 0x49, 0x7e, 0x4a, 0x61, 0x8e, 0xf5, 0x72, 0xa3, 0x5e, 0x36, 0xaa, 0x06, 0x53, 0x8e, 0x8c, + 0xd2, 0x14, 0x19, 0xff, 0xb8, 0x2b, 0x36, 0x83, 0x10, 0xba, 0x08, 0xe7, 0x78, 0x2e, 0x0b, 0x0e, + 0x16, 0xe2, 0x0b, 0x68, 0x1d, 0x56, 0x43, 0xd9, 0x42, 0x06, 0x59, 0x45, 0xe7, 0x42, 0x39, 0xdd, + 0xad, 0xcf, 0xbb, 0x8d, 0xa6, 0x8e, 0x4b, 0x64, 0x34, 0x6c, 0xce, 0xb0, 0x17, 0x5d, 0x46, 0xc5, + 0x7c, 0x89, 0x45, 0xba, 0x0a, 0x65, 0xb7, 0xda, 0x25, 0x4c, 0x8a, 0xce, 0xcd, 0x14, 0x2d, 0xea, + 0x9c, 0x57, 0x8a, 0x96, 0x31, 0xa0, 0xf5, 0x4a, 0xb7, 0xd2, 0xc1, 0x46, 0x7d, 0x87, 0x0f, 0x1e, + 0x6d, 0x63, 0x1a, 0xab, 0xbc, 0xab, 0x97, 0x1f, 0xd2, 0x38, 0xd1, 0x9d, 0x16, 0x57, 0xd7, 0x8a, + 0x76, 0x01, 0x5d, 0x87, 0xb7, 0x55, 0xac, 0x5d, 0xbd, 0x54, 0x6d, 0xef, 0xce, 0x47, 0xbe, 0xa8, + 0xb4, 0x86, 0x22, 0xd3, 0x2d, 0x33, 0xd1, 0x9b, 0xda, 0x39, 0x65, 0x60, 0x28, 0xd9, 0x9d, 0x7a, + 0xa9, 0xd3, 0xde, 0x6d, 0x60, 0xe3, 0x9b, 0x7a, 0x45, 0x2b, 0xb0, 0x88, 0xd6, 0x01, 0x8e, 0x20, + 0x5e, 0x52, 0xba, 0x89, 0x66, 0x84, 0xc8, 0x96, 0xa7, 0xc9, 0x84, 0x2c, 0xb4, 0xe2, 0x7b, 0x90, + 0xda, 0xb6, 0x07, 0xbe, 0x45, 0xbf, 0xb2, 0x5c, 0x72, 0xad, 0xbd, 0x89, 0x67, 0x75, 0x83, 0xb7, + 0x01, 0x68, 0xb0, 0xf3, 0x7b, 0x38, 0xcf, 0x32, 0x78, 0xcc, 0xdc, 0xe2, 0xdf, 0x89, 0x42, 0x56, + 0x09, 0x4b, 0x8a, 0x3e, 0x85, 0xcc, 0xa1, 0xe9, 0xda, 0x64, 0x06, 0x16, 0x6e, 0xc2, 0xf9, 0xd9, + 0xe8, 0xa5, 0x37, 0x1f, 0x71, 0x1c, 0xe1, 0x32, 0x48, 0x9a, 0xc2, 0xaf, 0x44, 0x20, 0x2d, 0x72, + 0x17, 0xf8, 0xd9, 0xd2, 0xa3, 0x88, 0xaa, 0x11, 0x4f, 0x3f, 0x92, 0x61, 0xa5, 0xd4, 0x38, 0x6b, + 0xf3, 0x0a, 0xa6, 0x06, 0xdc, 0x83, 0xc4, 0xa3, 0x52, 0xb5, 0xa3, 0x73, 0x3b, 0xee, 0x32, 0x24, + 0x3d, 0xab, 0xe7, 0xca, 0xc8, 0x47, 0x62, 0x49, 0x6b, 0x51, 0x20, 0xe6, 0x99, 0xc5, 0xdb, 0xf3, + 0x36, 0x42, 0x33, 0xc0, 0x58, 0xb1, 0xb0, 0x90, 0x2d, 0xbd, 0x8c, 0xf5, 0xb6, 0x8c, 0xfd, 0xf5, + 0x29, 0x64, 0xe4, 0x3b, 0x03, 0x68, 0x0d, 0x62, 0x4f, 0xad, 0x17, 0xa1, 0x16, 0x11, 0x40, 0x10, + 0x69, 0x28, 0x3a, 0x1b, 0x69, 0x68, 0x0b, 0x40, 0x32, 0xf0, 0xd0, 0xfb, 0x90, 0x19, 0x8b, 0x14, + 0x17, 0xf1, 0x71, 0xcf, 0x19, 0x04, 0x88, 0xc5, 0x5d, 0xba, 0x11, 0xdd, 0xb7, 0x46, 0xbe, 0x6d, + 0x0e, 0x50, 0x31, 0x1c, 0xab, 0x24, 0x28, 0x51, 0x89, 0x58, 0xb2, 0x26, 0xe5, 0x11, 0xe5, 0x11, + 0x83, 0x99, 0x00, 0x76, 0x21, 0x1b, 0x70, 0xf2, 0xd0, 0x7d, 0x7a, 0x1a, 0x24, 0x92, 0xbc, 0x42, + 0xc2, 0xd2, 0x09, 0x10, 0xc5, 0xbb, 0x12, 0x0a, 0x6e, 0xf1, 0x67, 0xa2, 0x90, 0x64, 0xd2, 0x45, + 0x37, 0x42, 0xf6, 0x36, 0x0a, 0x89, 0x7e, 0xde, 0x21, 0x65, 0x46, 0xde, 0x2d, 0xe4, 0x46, 0xf7, + 0xeb, 0x61, 0x12, 0x79, 0xc7, 0x10, 0x07, 0x98, 0xe8, 0x9a, 0x1a, 0x26, 0x37, 0xd8, 0x34, 0xe2, + 0x24, 0x74, 0x23, 0x5f, 0x84, 0xa4, 0xfa, 0x0a, 0x64, 0x24, 0x8b, 0x05, 0x6a, 0xa8, 0xb1, 0xde, + 0x64, 0x02, 0x22, 0x7f, 0x0b, 0xe7, 0x45, 0x30, 0x2f, 0x61, 0x85, 0x32, 0x97, 0x97, 0xfe, 0x2f, + 0xbe, 0x37, 0x4f, 0x77, 0xf2, 0x90, 0xc1, 0xfa, 0xb6, 0x8e, 0xf5, 0x3a, 0x8d, 0x20, 0x27, 0x55, + 0x49, 0xa8, 0xcf, 0x1e, 0x64, 0xb0, 0xe9, 0x33, 0x43, 0x03, 0x9d, 0x83, 0xd8, 0x17, 0x63, 0x3e, + 0x1c, 0x99, 0x6d, 0xff, 0x8d, 0x66, 0x0b, 0x13, 0x58, 0xb8, 0x4f, 0xa3, 0xf3, 0xfb, 0xb4, 0x00, + 0xe9, 0x9e, 0x39, 0x36, 0x7b, 0xe2, 0x69, 0x86, 0x38, 0x96, 0xe9, 0xe2, 0xbf, 0x8e, 0x00, 0xc8, + 0x82, 0x3c, 0x74, 0x13, 0x92, 0x3c, 0x96, 0x45, 0x58, 0xc7, 0x24, 0x8a, 0x38, 0x21, 0xe0, 0xf1, + 0x2d, 0x1e, 0xc2, 0x59, 0x73, 0x7f, 0xdf, 0xb5, 0xf6, 0xe9, 0xc1, 0x20, 0x0f, 0xd4, 0x46, 0xea, + 0xca, 0xbe, 0x74, 0x7b, 0xfd, 0xe8, 0xe5, 0xe6, 0x4a, 0x49, 0x20, 0xf0, 0x70, 0x11, 0xa4, 0xee, + 0x2b, 0xe6, 0x34, 0x70, 0xec, 0xa1, 0xaf, 0x42, 0x61, 0x96, 0xd9, 0x54, 0xcd, 0xd7, 0xa7, 0x09, + 0xcb, 0xa2, 0x25, 0x3f, 0x17, 0x83, 0x84, 0x31, 0x24, 0xc6, 0xfd, 0xe5, 0xb9, 0x11, 0xd5, 0x68, + 0x9e, 0xea, 0xc2, 0x5d, 0x86, 0xb8, 0x39, 0x1e, 0xf7, 0xb8, 0x2a, 0x85, 0xd1, 0x4a, 0xe3, 0x71, + 0x0f, 0xd3, 0x6c, 0x74, 0x1d, 0x92, 0x7d, 0xa7, 0xf7, 0xd4, 0x9a, 0x0e, 0x3b, 0xc5, 0x10, 0x2b, + 0x34, 0x0b, 0x73, 0x14, 0x74, 0x01, 0x92, 0xf4, 0xd2, 0x1b, 0xdb, 0x80, 0x13, 0xa1, 0x8e, 0x39, + 0xac, 0xd0, 0x85, 0x38, 0x61, 0xbc, 0x40, 0xb5, 0xd6, 0xf8, 0x7d, 0xe6, 0xe9, 0x38, 0x2e, 0x81, + 0xe5, 0x1d, 0x5b, 0x60, 0x79, 0x17, 0xbe, 0x1d, 0x81, 0x24, 0xab, 0xd1, 0x82, 0x32, 0xee, 0x01, + 0x04, 0xe3, 0x71, 0xaa, 0xf5, 0xca, 0xd0, 0x8d, 0xae, 0x47, 0xb0, 0x82, 0x48, 0xaa, 0xd0, 0x73, + 0x46, 0x7b, 0xf6, 0xfe, 0x54, 0x15, 0xc4, 0x4c, 0xc9, 0x32, 0x8b, 0x45, 0xae, 0xed, 0x69, 0x88, + 0x97, 0x9a, 0xcd, 0x32, 0x9b, 0x1b, 0x2b, 0x8d, 0xf2, 0x43, 0x1d, 0x4b, 0xe5, 0xfe, 0x89, 0x08, + 0x68, 0x34, 0x6a, 0x4c, 0xd3, 0x75, 0xc6, 0xe6, 0x3e, 0xdb, 0x0a, 0xba, 0xcd, 0xdf, 0x1c, 0x61, + 0x93, 0x81, 0x8c, 0x7f, 0x3a, 0x85, 0xa6, 0x3c, 0x3c, 0x52, 0x2c, 0xf3, 0x97, 0x43, 0x42, 0x03, + 0xeb, 0x2c, 0x9c, 0xd9, 0x6d, 0xb4, 0xda, 0xdd, 0x76, 0x23, 0x30, 0x2f, 0xb5, 0x08, 0x31, 0xc8, + 0xb7, 0x8c, 0x8a, 0x81, 0xf5, 0x32, 0x31, 0x6b, 0x4a, 0x55, 0x59, 0x97, 0xdf, 0x4b, 0x41, 0x92, + 0x9f, 0x0e, 0x5e, 0x87, 0xa5, 0xc0, 0x6f, 0xa1, 0x51, 0x7c, 0x54, 0xe1, 0xe5, 0x65, 0x1e, 0x0d, + 0x86, 0x73, 0x1e, 0x32, 0x07, 0x8e, 0xe7, 0x77, 0x95, 0x98, 0xeb, 0xf4, 0x9a, 0x0c, 0xcd, 0xbc, + 0xc2, 0xdb, 0x12, 0xa3, 0x1a, 0x88, 0x42, 0x71, 0x8f, 0xd4, 0xa7, 0x53, 0x8a, 0x90, 0xb0, 0x87, + 0xc2, 0x3d, 0xcd, 0xde, 0xcd, 0xa9, 0xaa, 0x85, 0x59, 0x16, 0xba, 0x21, 0xdd, 0xbe, 0x44, 0x68, + 0x8b, 0x8f, 0x73, 0x9b, 0x8a, 0x89, 0xf4, 0x93, 0x49, 0xe9, 0x83, 0xbc, 0x17, 0x9a, 0x5d, 0xcf, + 0xcd, 0x23, 0x9b, 0x9d, 0x64, 0xb7, 0x21, 0xcf, 0x54, 0xb9, 0x1b, 0x0a, 0xdd, 0xf4, 0xc6, 0x5c, + 0x6a, 0xa6, 0x6a, 0xe2, 0xba, 0x40, 0x5f, 0x49, 0xa1, 0x32, 0xe4, 0x3c, 0x73, 0xd4, 0x7f, 0xe2, + 0x3c, 0xef, 0xca, 0x67, 0x79, 0x82, 0x3b, 0x1e, 0x61, 0x36, 0x2d, 0x86, 0x48, 0x63, 0x21, 0x65, + 0xbd, 0x20, 0x71, 0xca, 0xc5, 0x19, 0x3d, 0x50, 0xbb, 0x22, 0x7c, 0x1f, 0x24, 0x5c, 0xd0, 0x2e, + 0xef, 0x9f, 0xa0, 0xa7, 0x0a, 0xdf, 0x82, 0x9c, 0xda, 0x0a, 0xb2, 0xfe, 0xf5, 0x5d, 0xfb, 0x90, + 0x2e, 0xb2, 0x74, 0xfd, 0x63, 0xa9, 0x05, 0xe1, 0x39, 0x3f, 0x84, 0x25, 0x86, 0xd3, 0x75, 0xc6, + 0xec, 0x3e, 0x75, 0x2c, 0x34, 0xa4, 0x82, 0x45, 0x1c, 0xe7, 0x19, 0x62, 0x83, 0xe1, 0x15, 0x7e, + 0x18, 0xd2, 0xa2, 0x46, 0x34, 0x00, 0xed, 0xb4, 0xc6, 0xb1, 0xb8, 0x50, 0x15, 0x38, 0x43, 0x63, + 0x3c, 0x75, 0xc7, 0xc1, 0x28, 0x98, 0x5a, 0xfe, 0xa6, 0x07, 0x09, 0xd6, 0x86, 0x53, 0x90, 0xc2, + 0x4f, 0x47, 0x20, 0xab, 0xc8, 0x19, 0x7d, 0x1a, 0x52, 0x8e, 0xcb, 0x27, 0xf5, 0xcb, 0xac, 0xa2, + 0xac, 0xcb, 0x40, 0x57, 0x53, 0x15, 0x2e, 0xde, 0x3a, 0x26, 0xea, 0x53, 0x4b, 0xaf, 0x6e, 0xb3, + 0xe9, 0xa0, 0x59, 0xc2, 0xc4, 0x03, 0x16, 0x43, 0xf0, 0xf3, 0x79, 0x04, 0x67, 0x20, 0xcf, 0x66, + 0x0d, 0x71, 0x02, 0x1c, 0x21, 0x6b, 0x26, 0x1d, 0xda, 0x34, 0x7c, 0x54, 0x9c, 0x3a, 0xd4, 0xa5, + 0x7a, 0x65, 0xab, 0xf1, 0x19, 0x83, 0x44, 0x15, 0x2b, 0x4c, 0x44, 0xfa, 0x5f, 0x0b, 0x1e, 0x17, + 0xc2, 0x8f, 0xb5, 0x08, 0xfd, 0x6d, 0x68, 0xd1, 0xe2, 0x5f, 0x8d, 0x43, 0x56, 0x79, 0x65, 0x80, + 0x74, 0x3b, 0xbd, 0xc7, 0xc1, 0xbe, 0x0c, 0xc9, 0x60, 0x9e, 0x3a, 0xe5, 0xf7, 0x1f, 0xa8, 0x0d, + 0x39, 0x7b, 0xdc, 0xe5, 0xc1, 0x9f, 0xe5, 0xae, 0xee, 0x85, 0x39, 0xcf, 0x19, 0x18, 0x4d, 0xf1, + 0x7e, 0x95, 0x3c, 0xd2, 0x91, 0x20, 0xcb, 0xc3, 0x59, 0x7b, 0x2c, 0x13, 0x32, 0x92, 0x7b, 0x52, + 0x89, 0xe4, 0x6e, 0x40, 0x7e, 0xec, 0xb8, 0x3e, 0xfd, 0x44, 0xd8, 0x1e, 0xed, 0x8b, 0x23, 0x94, + 0x8d, 0x79, 0x2f, 0x27, 0x38, 0xae, 0x5f, 0x63, 0x68, 0x62, 0x1b, 0x7a, 0x1c, 0x80, 0xbc, 0xc2, + 0x73, 0xc8, 0xc8, 0xa2, 0x43, 0x8f, 0x31, 0x44, 0x5e, 0xf9, 0x31, 0x06, 0x7a, 0x3b, 0x56, 0x0a, + 0x80, 0xaf, 0x5f, 0xec, 0x76, 0xac, 0x28, 0x01, 0x67, 0x64, 0xd3, 0x0a, 0x2f, 0x20, 0xab, 0x54, + 0x0e, 0xbd, 0x21, 0xc6, 0xef, 0xf4, 0x3b, 0x11, 0x6c, 0x98, 0x3a, 0xae, 0x3f, 0x35, 0x35, 0x13, + 0xbc, 0xa8, 0x82, 0xa7, 0x4c, 0xcd, 0x04, 0xb9, 0xa0, 0xb4, 0x85, 0x6d, 0x34, 0x07, 0xaf, 0x46, + 0x6c, 0x40, 0x5a, 0x34, 0x82, 0xe8, 0x24, 0x69, 0x06, 0x7b, 0x29, 0xc2, 0x68, 0x1e, 0x7e, 0xa0, + 0x45, 0x8b, 0x47, 0x09, 0x58, 0x0a, 0x02, 0x33, 0x53, 0xdd, 0xa8, 0x4c, 0x05, 0xe8, 0x66, 0xb1, + 0x8e, 0xc5, 0x54, 0x16, 0x46, 0x56, 0x92, 0xe1, 0xf0, 0xdc, 0xc5, 0x9f, 0x48, 0x84, 0x82, 0x43, + 0x4f, 0xdd, 0xa8, 0x48, 0x94, 0x77, 0xc9, 0xdf, 0xdf, 0x4c, 0xa1, 0x33, 0x90, 0xab, 0x94, 0xca, + 0xdd, 0xc6, 0x23, 0x1d, 0x63, 0xa3, 0xa2, 0x6b, 0xbf, 0x95, 0x42, 0xab, 0xb0, 0x4c, 0x40, 0x58, + 0x2f, 0x55, 0xba, 0x2d, 0xbd, 0x84, 0xcb, 0xbb, 0xda, 0x7f, 0x4c, 0xa1, 0x2c, 0x24, 0xb7, 0x1b, + 0x8f, 0xc9, 0xea, 0xf6, 0x9f, 0x58, 0xa2, 0xa5, 0xb7, 0x8d, 0x8a, 0xf6, 0xdb, 0x29, 0x94, 0x81, + 0x38, 0xf1, 0x69, 0xb5, 0xff, 0x4c, 0xe1, 0x2d, 0xbd, 0xbd, 0x63, 0x54, 0xb4, 0xff, 0x22, 0x12, + 0x1d, 0xa3, 0xa2, 0xfd, 0x4e, 0x0a, 0xe5, 0x20, 0xd5, 0xd2, 0xdb, 0xcd, 0x72, 0xa9, 0xa9, 0x7d, + 0x9f, 0x16, 0x51, 0x35, 0xea, 0x9d, 0xcf, 0xba, 0x46, 0xad, 0xd6, 0x69, 0x97, 0xb6, 0xaa, 0xba, + 0xf6, 0x5f, 0x53, 0xe8, 0x2c, 0x68, 0x75, 0xbd, 0xdd, 0xdd, 0x32, 0xea, 0x15, 0x19, 0xdb, 0xf8, + 0x77, 0x53, 0x08, 0x41, 0x9e, 0x82, 0x71, 0xa3, 0x54, 0x29, 0x97, 0x5a, 0x6d, 0xed, 0xf7, 0x52, + 0x68, 0x09, 0x32, 0x04, 0x56, 0xaa, 0xd4, 0x8c, 0xba, 0xf6, 0xfb, 0x94, 0x3d, 0x49, 0xe3, 0xd2, + 0x63, 0xed, 0xbf, 0xa5, 0x50, 0x1e, 0xd2, 0x46, 0xb3, 0xdc, 0xa5, 0x11, 0xde, 0xfe, 0x3b, 0x45, + 0x26, 0x49, 0x56, 0xfb, 0x3f, 0x48, 0xa1, 0x65, 0x80, 0xd6, 0xe7, 0xad, 0x6e, 0xad, 0x51, 0xe9, + 0x54, 0x75, 0xed, 0x0f, 0x29, 0x02, 0x01, 0xe0, 0xd2, 0x63, 0xa3, 0xa1, 0xfd, 0x0f, 0x89, 0x50, + 0xde, 0xc5, 0x8d, 0x46, 0x5b, 0xfb, 0x9f, 0x12, 0xd0, 0x6c, 0xe3, 0x52, 0x59, 0xd7, 0xfe, 0x97, + 0xa4, 0x68, 0x96, 0xca, 0xe5, 0xb6, 0xf6, 0x47, 0x32, 0xcd, 0xea, 0xf3, 0xbf, 0x69, 0x0d, 0x48, + 0x7a, 0x8b, 0xd0, 0xff, 0xb1, 0x4c, 0xd6, 0x49, 0x8b, 0xfe, 0x0f, 0x15, 0x3a, 0x2d, 0x8f, 0x6f, + 0x70, 0x68, 0xdf, 0x4e, 0x0b, 0x8c, 0xb6, 0x51, 0xd3, 0xb5, 0x1f, 0x49, 0xa3, 0x15, 0x58, 0xa2, + 0xc9, 0xf6, 0xe7, 0xc4, 0xac, 0xd8, 0x36, 0x76, 0xb4, 0xff, 0x3f, 0x4d, 0xfa, 0xad, 0xf6, 0xb0, + 0xde, 0xa8, 0x68, 0x7f, 0x86, 0xfe, 0xaf, 0xea, 0xa5, 0x96, 0xae, 0xfd, 0x68, 0x1a, 0x69, 0x90, + 0x2d, 0x75, 0x2a, 0x46, 0xbb, 0xfb, 0x18, 0x1b, 0x6d, 0x5d, 0xfb, 0xb1, 0x34, 0x11, 0x19, 0x83, + 0xf0, 0xf8, 0xd1, 0xda, 0x8f, 0xa7, 0x79, 0x0f, 0x6c, 0x93, 0x1e, 0xf8, 0xb3, 0x69, 0x52, 0x85, + 0x9a, 0xda, 0xef, 0xdf, 0x49, 0x93, 0x36, 0x10, 0x10, 0x6b, 0xc3, 0x4f, 0xa4, 0x69, 0xff, 0x7d, + 0xde, 0xaa, 0x36, 0x76, 0xb4, 0x3f, 0x97, 0x26, 0x12, 0x78, 0x5c, 0x7a, 0xa8, 0x77, 0x4b, 0xd5, + 0x12, 0xae, 0x69, 0x7f, 0x9e, 0x16, 0x41, 0x43, 0xe8, 0x75, 0x5b, 0x9d, 0x56, 0x53, 0xaf, 0x57, + 0xb4, 0x9f, 0xa4, 0x48, 0xac, 0x58, 0xa2, 0x3b, 0xda, 0x4f, 0x89, 0x37, 0xa6, 0x7e, 0x21, 0x0a, + 0x99, 0xaa, 0x3d, 0x9a, 0x3c, 0xa7, 0xfa, 0xbd, 0x05, 0xcb, 0x52, 0x53, 0x5f, 0x88, 0x8f, 0x14, + 0x43, 0x2f, 0x41, 0x85, 0x54, 0x9c, 0x1a, 0x86, 0x4b, 0xbd, 0xf0, 0x18, 0xf9, 0x3a, 0x9c, 0x7d, + 0xe2, 0x4c, 0x46, 0x7d, 0x7b, 0xb4, 0xdf, 0x0d, 0x0d, 0x96, 0xe8, 0x02, 0x4e, 0x78, 0x55, 0xd0, + 0x94, 0xd5, 0x67, 0xc5, 0xaa, 0xb0, 0x66, 0xed, 0xed, 0x59, 0xfc, 0xbb, 0x6e, 0x95, 0x59, 0x6c, + 0x11, 0xb3, 0xb3, 0x92, 0x28, 0xc4, 0x4d, 0x87, 0x15, 0x1a, 0xc4, 0xb4, 0x3b, 0xb6, 0xfb, 0x5d, + 0x32, 0x85, 0x7a, 0x63, 0xb3, 0xc7, 0xdf, 0x1a, 0x61, 0xdf, 0x58, 0xd0, 0x68, 0x82, 0x4d, 0xa3, + 0x52, 0x17, 0x99, 0xf8, 0x0c, 0xa5, 0x68, 0xda, 0x7d, 0x09, 0x2a, 0x7e, 0x37, 0x06, 0x80, 0xa9, + 0x07, 0xc4, 0xdf, 0x7a, 0x49, 0xb9, 0x21, 0x47, 0x49, 0x46, 0x7d, 0x95, 0x38, 0xfc, 0xaf, 0xb8, + 0x02, 0xc7, 0xd1, 0x0b, 0xdf, 0x89, 0x41, 0x92, 0xe5, 0xa0, 0xaf, 0x84, 0xd6, 0xe0, 0x8b, 0xc7, + 0x71, 0x98, 0x5d, 0x7b, 0x11, 0xc4, 0x0f, 0x4c, 0xb7, 0xcf, 0x03, 0x17, 0xd2, 0xff, 0x04, 0xe6, + 0x39, 0x7b, 0x3e, 0x77, 0x93, 0xe8, 0xff, 0xe2, 0xdf, 0x8a, 0x1e, 0xf3, 0x9e, 0x05, 0xae, 0xd6, + 0xda, 0xdd, 0x52, 0x8b, 0xad, 0xa9, 0x34, 0x51, 0x6e, 0x60, 0x5d, 0x8b, 0xa2, 0x1c, 0xa4, 0x59, + 0xb2, 0xd9, 0xd1, 0x62, 0x32, 0xb3, 0x52, 0x6a, 0x97, 0xb4, 0x38, 0x5a, 0x22, 0x22, 0xa8, 0xb5, + 0xbb, 0xdb, 0x2d, 0xe3, 0x9b, 0xfc, 0xb9, 0x33, 0x9a, 0x26, 0x5a, 0xd7, 0xd2, 0x92, 0xf4, 0x49, + 0x33, 0x92, 0xae, 0xe9, 0x35, 0x3a, 0xd2, 0x53, 0x34, 0xac, 0x3c, 0x85, 0xb4, 0x76, 0xbe, 0xd1, + 0xd1, 0x3b, 0xba, 0x96, 0x96, 0x3c, 0xe9, 0xd0, 0xcb, 0xa0, 0x65, 0xc8, 0xb2, 0x64, 0x63, 0xdb, + 0xa8, 0xea, 0x1a, 0x48, 0xa6, 0xf5, 0x26, 0x6e, 0x94, 0xb5, 0xac, 0xac, 0x11, 0x6e, 0xb5, 0xd8, + 0x25, 0x30, 0x96, 0x6a, 0x37, 0xb1, 0xd1, 0xd0, 0xf2, 0x0a, 0x80, 0x8e, 0xd4, 0x25, 0xba, 0xb7, + 0x4a, 0x00, 0x2d, 0x63, 0x87, 0x8c, 0x02, 0xa3, 0xbe, 0xa3, 0x2d, 0x4b, 0xa6, 0xad, 0x76, 0xa9, + 0xfc, 0x50, 0xd3, 0xf8, 0x30, 0xf8, 0xa9, 0x08, 0xa4, 0xda, 0xed, 0xcf, 0x69, 0x87, 0x3e, 0x80, + 0xec, 0x33, 0x7b, 0xd4, 0x77, 0x9e, 0x75, 0x3d, 0xfb, 0x5b, 0xd6, 0x54, 0x3c, 0x74, 0x8e, 0x74, + 0xf3, 0x31, 0xc5, 0x68, 0xd9, 0xdf, 0xb2, 0x30, 0x3c, 0x93, 0xff, 0x0b, 0xdb, 0x00, 0x41, 0x0e, + 0x31, 0x8c, 0x5c, 0xe7, 0x99, 0x17, 0x7e, 0xf0, 0x88, 0x40, 0xd0, 0x06, 0xa4, 0x7a, 0xc4, 0xc0, + 0x1a, 0x79, 0xa1, 0xd5, 0x4b, 0x00, 0x8b, 0x2f, 0xd3, 0x90, 0x0f, 0x87, 0xd8, 0x7f, 0x37, 0xe4, + 0xc9, 0x9e, 0x9b, 0x77, 0xb8, 0xaf, 0x7a, 0xb4, 0xef, 0x06, 0xf7, 0x31, 0xd9, 0x07, 0x0b, 0xe1, + 0x88, 0xab, 0x53, 0xb7, 0x31, 0xd1, 0x57, 0xa6, 0x3c, 0xdb, 0xcd, 0xb9, 0xfc, 0x99, 0x79, 0x2c, + 0xee, 0x9c, 0x53, 0xdf, 0xb2, 0x10, 0xfa, 0x4e, 0x20, 0xfc, 0x7a, 0xc6, 0xfb, 0xc0, 0xde, 0x95, + 0xe5, 0x86, 0xf8, 0xc6, 0x5c, 0x9e, 0x35, 0x02, 0x63, 0xf7, 0x7c, 0x68, 0x36, 0xfa, 0x98, 0x06, + 0xf0, 0x60, 0xb7, 0x00, 0xe8, 0x5d, 0xde, 0x54, 0x28, 0x9a, 0xb7, 0x62, 0x83, 0x08, 0x53, 0x66, + 0x14, 0x80, 0x3c, 0x74, 0x0b, 0x60, 0x40, 0xe6, 0x33, 0x36, 0x7b, 0xa5, 0x43, 0x77, 0x8a, 0xe4, + 0x44, 0x87, 0x33, 0x03, 0x39, 0xe7, 0xdd, 0x85, 0x2c, 0x1b, 0x90, 0x8c, 0x22, 0x13, 0xb2, 0xd8, + 0x83, 0x11, 0x88, 0x81, 0x61, 0x51, 0x9a, 0x0f, 0x20, 0xed, 0x8b, 0x09, 0x12, 0x42, 0x67, 0x86, + 0x5c, 0x3f, 0xd8, 0x99, 0x21, 0x4f, 0xe0, 0x94, 0xcf, 0x26, 0xa6, 0xc2, 0x2f, 0xc6, 0x01, 0x02, + 0x21, 0xa2, 0x82, 0xf0, 0xf9, 0x42, 0xd1, 0xc5, 0x99, 0xaf, 0xb7, 0x03, 0x29, 0xde, 0x2e, 0xfe, + 0x22, 0xd6, 0xdb, 0x27, 0x74, 0x89, 0x90, 0xcc, 0x83, 0x38, 0xb1, 0x8c, 0xb1, 0xa0, 0x46, 0x8f, + 0xa6, 0xcd, 0x44, 0xf6, 0xc1, 0xf3, 0xf5, 0x93, 0xd8, 0x9d, 0x60, 0x33, 0xa2, 0xcb, 0x00, 0x63, + 0xd7, 0x3e, 0xb4, 0x07, 0xd6, 0xbe, 0xdc, 0xe3, 0x10, 0x8f, 0x5c, 0x06, 0x19, 0xe8, 0x03, 0x00, + 0xb9, 0x09, 0x39, 0xfd, 0x5c, 0xc7, 0xf4, 0x76, 0xa5, 0x82, 0x89, 0xae, 0x82, 0xb6, 0xe7, 0xb8, + 0x3d, 0xab, 0x3b, 0x9e, 0x0c, 0x06, 0x5d, 0x26, 0x26, 0xfa, 0x66, 0x15, 0x5e, 0xa2, 0xf0, 0xe6, + 0x64, 0x30, 0x60, 0x7b, 0x3c, 0x6f, 0x43, 0x9e, 0xa9, 0x71, 0x97, 0xbb, 0x6b, 0x29, 0xf9, 0xb0, + 0x51, 0x8e, 0x65, 0x54, 0x28, 0xfc, 0xff, 0xa5, 0xad, 0x79, 0x1f, 0x52, 0xbc, 0x7b, 0xe8, 0x53, + 0x64, 0x8d, 0x16, 0x7f, 0x40, 0x68, 0x0b, 0x1b, 0x95, 0x1d, 0x9d, 0xc5, 0x88, 0xaf, 0x37, 0xea, + 0xba, 0x16, 0x23, 0xff, 0x3a, 0x2d, 0x1d, 0xcb, 0xa8, 0xb7, 0xb7, 0x20, 0x23, 0xc7, 0x48, 0xb0, + 0x4b, 0x10, 0x39, 0x76, 0x97, 0xa0, 0xf8, 0x66, 0x10, 0x97, 0x9e, 0x6f, 0xb6, 0xb0, 0xf8, 0xba, + 0x7a, 0xab, 0xd1, 0x92, 0x8e, 0xd6, 0xb7, 0xa3, 0xb0, 0x3c, 0x75, 0xf4, 0x3a, 0x3b, 0xf2, 0x22, + 0xaf, 0x34, 0xf2, 0xee, 0x42, 0xb6, 0x47, 0x7d, 0x25, 0x36, 0x2e, 0xa6, 0x76, 0x93, 0x64, 0xe0, + 0x06, 0x0c, 0x3d, 0xf9, 0x1f, 0xdd, 0x55, 0xbe, 0x07, 0x1a, 0xf3, 0xd7, 0x58, 0xf3, 0xec, 0xba, + 0x89, 0x38, 0x14, 0x6f, 0x1a, 0x95, 0xe0, 0xc3, 0x9f, 0xa6, 0x4d, 0x5f, 0xc7, 0x9a, 0x8a, 0x4b, + 0x70, 0xcc, 0x43, 0xcc, 0x8c, 0xcf, 0x71, 0x31, 0x09, 0x8a, 0x3f, 0x9f, 0x05, 0x08, 0xaa, 0x85, + 0x3e, 0xa6, 0x03, 0xae, 0xdb, 0x1b, 0x88, 0xc8, 0x6d, 0xeb, 0xb3, 0x31, 0x27, 0xea, 0x96, 0x5f, + 0x1e, 0x78, 0x5b, 0x70, 0xf4, 0x72, 0x33, 0x49, 0xfe, 0x57, 0x5b, 0x38, 0x39, 0xa2, 0xb0, 0xc2, + 0xdf, 0x03, 0x48, 0xd0, 0x90, 0x14, 0x85, 0xa6, 0xd8, 0x01, 0xbe, 0x03, 0x51, 0x67, 0x3c, 0xf5, + 0xec, 0xce, 0x4c, 0x00, 0x0b, 0x79, 0x4b, 0x06, 0x47, 0x9d, 0x71, 0xf8, 0x58, 0x23, 0x2e, 0x36, + 0xa3, 0x7f, 0x3f, 0x0e, 0xb1, 0xf2, 0xf6, 0x37, 0x0a, 0xbf, 0x1d, 0x07, 0x50, 0xe2, 0x0a, 0xdc, + 0x80, 0x64, 0xdf, 0x3a, 0xb4, 0x65, 0x54, 0x35, 0x79, 0x4b, 0x8c, 0x02, 0x6f, 0xd6, 0x27, 0xc3, + 0x27, 0x74, 0x3f, 0x92, 0x26, 0xd1, 0x3a, 0xa4, 0x3c, 0xab, 0xe7, 0x8b, 0xa0, 0xa9, 0x71, 0x2c, + 0x92, 0xc4, 0x5e, 0xa0, 0xa7, 0xea, 0xdc, 0x5e, 0x20, 0xff, 0x11, 0x86, 0xac, 0xed, 0x74, 0xf9, + 0xeb, 0x2c, 0x7d, 0x7e, 0xfd, 0xe6, 0xe2, 0x71, 0x8d, 0xa0, 0xed, 0x0d, 0x5e, 0x53, 0x33, 0x1a, + 0xfc, 0xcd, 0x94, 0x3e, 0x06, 0xdb, 0x11, 0xff, 0x51, 0x17, 0xb4, 0x80, 0xa7, 0x0c, 0x27, 0x76, + 0x0a, 0xc6, 0x6b, 0x9c, 0xf1, 0x92, 0x64, 0xcc, 0x82, 0x8e, 0x2d, 0x49, 0xe6, 0x22, 0xc6, 0xc9, + 0xb2, 0x52, 0x00, 0x6d, 0x53, 0xf2, 0x34, 0xfc, 0xcf, 0x72, 0xfe, 0x79, 0xc9, 0xbf, 0x6d, 0x0f, + 0x2d, 0x9c, 0x97, 0xec, 0xe9, 0x65, 0x82, 0x16, 0xe4, 0x6c, 0x87, 0x45, 0x01, 0xa2, 0xac, 0x53, + 0xaf, 0x28, 0x93, 0xc7, 0xa6, 0x4d, 0x03, 0x70, 0x12, 0x99, 0x88, 0xff, 0xa8, 0x0a, 0x19, 0xdb, + 0xe9, 0x0e, 0x2d, 0x77, 0x9f, 0xc6, 0x2f, 0x3b, 0x05, 0x47, 0x8d, 0x73, 0x4c, 0x1b, 0x8d, 0x1a, + 0x25, 0xc3, 0x69, 0xdb, 0x61, 0xff, 0x38, 0x37, 0x1a, 0xd8, 0xb9, 0xcf, 0x5f, 0x8f, 0x3a, 0x3d, + 0xb7, 0x6f, 0x50, 0x32, 0xc2, 0x8d, 0xfd, 0x2b, 0xfc, 0x71, 0x04, 0x80, 0x07, 0x39, 0xb3, 0x47, + 0xfb, 0x85, 0xdf, 0x89, 0xfc, 0x00, 0xda, 0x37, 0xa5, 0x4f, 0xd1, 0x3f, 0x2d, 0x7d, 0x8a, 0xfd, + 0x09, 0xea, 0x53, 0xe1, 0xc7, 0xa2, 0xa1, 0x16, 0xeb, 0x10, 0xeb, 0xed, 0x7d, 0xc1, 0x67, 0xc5, + 0x2b, 0xc7, 0x15, 0x51, 0xde, 0xfe, 0xc6, 0x4d, 0x35, 0xd4, 0x23, 0x2f, 0x8b, 0x0c, 0x60, 0x4c, + 0xe8, 0x91, 0x09, 0xf9, 0xde, 0xde, 0x17, 0x5d, 0xd7, 0xea, 0x4d, 0x5c, 0xcf, 0x3e, 0xb4, 0xb8, + 0x30, 0x4e, 0xcb, 0x70, 0x95, 0x33, 0xcc, 0x11, 0x86, 0x82, 0x07, 0xce, 0xf5, 0xf6, 0xbe, 0x90, + 0x29, 0x54, 0x03, 0xf0, 0x65, 0xc7, 0x71, 0x99, 0xbc, 0x7b, 0x1c, 0xff, 0xa0, 0x8b, 0xd5, 0x68, + 0x3a, 0x0a, 0x83, 0x62, 0x4b, 0xbd, 0xa7, 0x39, 0x7d, 0xee, 0xd9, 0x6e, 0xb4, 0x4b, 0x55, 0xb6, + 0x71, 0x42, 0x5d, 0x4b, 0x1a, 0xc1, 0x9d, 0x79, 0xbb, 0x74, 0x31, 0x6b, 0x7d, 0x5e, 0x2f, 0x6b, + 0x71, 0x02, 0x2c, 0xd1, 0xbf, 0x09, 0xb9, 0xae, 0x25, 0xd9, 0x74, 0x8a, 0x2e, 0x43, 0xaa, 0x37, + 0x30, 0x3d, 0x8f, 0x5f, 0x41, 0xcc, 0x33, 0xe3, 0xa9, 0x4c, 0x40, 0x46, 0x05, 0x8b, 0xbc, 0xe2, + 0xfb, 0x90, 0x64, 0x7b, 0x6d, 0xe8, 0x1d, 0xb9, 0x15, 0xc7, 0xfa, 0x22, 0xa7, 0x6e, 0xc5, 0xc9, + 0x63, 0x29, 0x16, 0x8f, 0xe5, 0x1e, 0x24, 0x28, 0xf8, 0xd8, 0x83, 0xd7, 0xb9, 0x27, 0xc9, 0xc5, + 0x7f, 0x12, 0x81, 0x38, 0x5d, 0xd5, 0x2f, 0x40, 0x72, 0x44, 0x55, 0x39, 0x64, 0x22, 0x70, 0x98, + 0xdc, 0x97, 0x8b, 0x86, 0x5e, 0x58, 0x3c, 0xd6, 0x0e, 0x40, 0x9f, 0x02, 0x1c, 0xda, 0x9e, 0xcd, + 0xbf, 0xcf, 0x60, 0x0f, 0x3a, 0x6c, 0xce, 0xbb, 0xdc, 0x7b, 0xf3, 0x91, 0x44, 0xc3, 0x0a, 0x89, + 0xb2, 0x0b, 0x99, 0x58, 0x14, 0x85, 0xe6, 0x36, 0x24, 0x58, 0xcc, 0xd5, 0xb7, 0x21, 0x31, 0xa6, + 0xb1, 0x58, 0x99, 0xa4, 0xc4, 0x3d, 0x48, 0x92, 0x29, 0xec, 0x4d, 0x9a, 0x5f, 0xfc, 0x97, 0x51, + 0xc8, 0x87, 0x6a, 0x80, 0xf4, 0x50, 0x5d, 0x99, 0xdf, 0x71, 0x52, 0x5d, 0x85, 0x21, 0xa7, 0xd4, + 0x78, 0x9e, 0x88, 0x2e, 0x85, 0x5f, 0x39, 0x65, 0x52, 0x0a, 0xbd, 0x68, 0x5a, 0x80, 0xb4, 0xf8, + 0x0c, 0x56, 0xf8, 0x15, 0x22, 0xad, 0xbe, 0xca, 0x97, 0x08, 0xbf, 0xca, 0x57, 0x14, 0xad, 0x4d, + 0x86, 0xcc, 0x23, 0x2a, 0x0a, 0xde, 0x50, 0x45, 0x82, 0xa9, 0x45, 0x12, 0xfc, 0x00, 0x20, 0x68, + 0x16, 0x71, 0x55, 0xe5, 0x15, 0x1c, 0xfe, 0xb6, 0x6c, 0xb5, 0x43, 0x6f, 0x73, 0xd1, 0x57, 0xea, + 0xf5, 0xcf, 0xda, 0x3a, 0xa6, 0x47, 0x49, 0xc5, 0x4f, 0x00, 0x1e, 0x5b, 0xf6, 0xfe, 0x81, 0xcf, + 0xdf, 0x04, 0x49, 0x3e, 0xa3, 0xa9, 0x50, 0xdc, 0x1f, 0x0e, 0x93, 0x8f, 0x7d, 0x44, 0x83, 0xc7, + 0x3e, 0x8a, 0xff, 0x21, 0x02, 0xd9, 0x47, 0xac, 0x39, 0x94, 0xc3, 0x46, 0xd0, 0x58, 0x55, 0x75, + 0x65, 0x93, 0x69, 0x74, 0x1b, 0x7b, 0xd0, 0xef, 0xf6, 0xd9, 0xcd, 0x48, 0xfa, 0x9e, 0x05, 0x85, + 0x54, 0x4c, 0xdf, 0x0a, 0xb2, 0xe5, 0x0a, 0x1f, 0xe1, 0xd9, 0x74, 0xf9, 0x91, 0xd9, 0xf4, 0x26, + 0x78, 0x5c, 0xa1, 0xee, 0x78, 0x96, 0x8b, 0xde, 0x84, 0xd4, 0xbe, 0xed, 0x77, 0xbd, 0x03, 0xf6, + 0x64, 0x44, 0x86, 0x59, 0x3e, 0x3b, 0xb6, 0xdf, 0xda, 0x2d, 0xe1, 0xe4, 0xbe, 0xed, 0xb7, 0x0e, + 0x4c, 0xc2, 0x83, 0x20, 0xb1, 0x48, 0x03, 0x7c, 0x87, 0x3a, 0xb3, 0x6f, 0xfb, 0x5b, 0x14, 0x80, + 0x5e, 0x67, 0x3c, 0x7c, 0x73, 0x9f, 0xbf, 0x64, 0x4f, 0xe8, 0xda, 0xe6, 0x7e, 0xf1, 0x03, 0x88, + 0x6f, 0x0f, 0xcc, 0xfd, 0x57, 0xbd, 0xe4, 0x51, 0xfc, 0xf9, 0x08, 0xc4, 0xb1, 0xb3, 0xf0, 0x76, + 0x48, 0x20, 0xf6, 0xe8, 0x1c, 0xb1, 0x7f, 0x08, 0x20, 0x2f, 0x8c, 0x8b, 0x15, 0x63, 0xde, 0x3d, + 0x73, 0xae, 0xcb, 0x01, 0xee, 0x97, 0xfa, 0x9a, 0xa8, 0xf8, 0x21, 0x24, 0x6b, 0x96, 0xef, 0xda, + 0xbd, 0xd3, 0xb6, 0x34, 0x22, 0x5a, 0xfa, 0xcf, 0x22, 0x90, 0xde, 0xb6, 0x07, 0xf2, 0xf5, 0xcc, + 0x63, 0x8e, 0x85, 0x56, 0x21, 0x31, 0x1a, 0xd8, 0x23, 0xe6, 0x28, 0x26, 0x30, 0x4b, 0xd0, 0x5d, + 0x20, 0xfb, 0x5b, 0xd2, 0xaa, 0x23, 0xff, 0xd1, 0x65, 0x48, 0x0c, 0xa9, 0x22, 0xc4, 0xe7, 0x5f, + 0xa0, 0x64, 0xb9, 0x84, 0x94, 0x9e, 0x59, 0xd2, 0x47, 0xb6, 0xf9, 0xf9, 0xe4, 0x39, 0x88, 0x4d, + 0xf8, 0xfb, 0x89, 0xfc, 0x05, 0xcd, 0x8e, 0x51, 0xc1, 0x04, 0x46, 0xb2, 0xf6, 0xf9, 0x1d, 0x57, + 0x9e, 0xb5, 0x43, 0xb2, 0xf6, 0xed, 0x7e, 0xf1, 0x6f, 0x44, 0x20, 0xc9, 0x0c, 0x02, 0xf9, 0x28, + 0x75, 0x44, 0x79, 0x94, 0xfa, 0x86, 0x9c, 0x5c, 0xa3, 0x8b, 0x6c, 0x08, 0x86, 0x53, 0xa8, 0x43, + 0x92, 0x41, 0x82, 0x30, 0x48, 0xca, 0xd4, 0x2c, 0xc2, 0x20, 0x29, 0x28, 0x34, 0x98, 0x92, 0x2c, + 0x20, 0x2a, 0x83, 0x29, 0x31, 0x14, 0x22, 0xde, 0x1c, 0x2b, 0xa9, 0xd4, 0xeb, 0x59, 0x9e, 0x47, + 0xcf, 0xf7, 0x85, 0x49, 0xa3, 0xde, 0x4b, 0x66, 0x48, 0x42, 0x8b, 0xb8, 0x45, 0xf3, 0x21, 0x24, + 0x4d, 0x4a, 0xc6, 0x3f, 0xf4, 0x29, 0x84, 0x90, 0x19, 0xc7, 0x9b, 0xec, 0x47, 0x50, 0x32, 0xfc, + 0xc2, 0x2e, 0x24, 0x79, 0x81, 0x64, 0x02, 0xb0, 0x4c, 0x71, 0x63, 0x9a, 0xfe, 0x27, 0xbd, 0xf9, + 0xcc, 0xb5, 0x7d, 0xf1, 0x7c, 0x32, 0x4b, 0x10, 0xe8, 0xf0, 0xe9, 0xc8, 0x61, 0x1e, 0x52, 0x1a, + 0xb3, 0x44, 0xb1, 0x03, 0xcb, 0xac, 0xb8, 0xc7, 0x07, 0xb6, 0x6f, 0x0d, 0x6c, 0xcf, 0x47, 0x5b, + 0x2c, 0xf0, 0xc0, 0x33, 0xab, 0xdf, 0x65, 0x15, 0x15, 0x53, 0xff, 0xca, 0x9c, 0xfa, 0x89, 0x8f, + 0x00, 0x39, 0x05, 0xcb, 0xf2, 0xde, 0xf9, 0x21, 0x48, 0x72, 0x97, 0x70, 0x0d, 0x50, 0x05, 0x1b, + 0x8f, 0x74, 0xdc, 0xad, 0x37, 0xe8, 0x9e, 0x19, 0x66, 0xd7, 0x1e, 0x11, 0x2c, 0x71, 0x38, 0xee, + 0xd4, 0xeb, 0x46, 0x7d, 0x47, 0x8b, 0x2a, 0xb0, 0xd2, 0x56, 0x83, 0xe2, 0xc5, 0x14, 0x58, 0xab, + 0xdd, 0x68, 0x36, 0xf5, 0x8a, 0x16, 0x7f, 0xe7, 0x27, 0xa3, 0x90, 0x91, 0x77, 0xb6, 0x91, 0x06, + 0x39, 0x7a, 0x0f, 0xad, 0xd5, 0x2e, 0xed, 0x10, 0x3e, 0x49, 0x74, 0x06, 0xf2, 0x02, 0x82, 0xdb, + 0x04, 0xf4, 0x9a, 0x44, 0x12, 0x85, 0x45, 0x24, 0xe4, 0xa1, 0x51, 0xad, 0x12, 0x48, 0x5a, 0x92, + 0x6d, 0x1b, 0x75, 0xa3, 0xb5, 0x4b, 0xaf, 0x5e, 0x2e, 0x43, 0x96, 0x81, 0xd8, 0x5d, 0xd2, 0x98, + 0x04, 0xb0, 0x7b, 0x7e, 0x6c, 0x1f, 0x93, 0x02, 0xd8, 0xcd, 0xcb, 0x14, 0x99, 0xe7, 0x69, 0xba, + 0x4a, 0x3c, 0xf4, 0x84, 0x2c, 0xa5, 0x82, 0x59, 0xe5, 0x33, 0x68, 0x15, 0x34, 0x7e, 0x6d, 0x8e, + 0xde, 0x3c, 0xa5, 0xc7, 0x23, 0x20, 0xc9, 0x76, 0x88, 0x0b, 0x9f, 0x45, 0xe7, 0xe0, 0xac, 0x4c, + 0x86, 0x6e, 0x31, 0xe6, 0x24, 0x47, 0x79, 0x05, 0xf1, 0x9d, 0xef, 0x44, 0x61, 0x29, 0xfc, 0xa9, + 0x04, 0xa1, 0x67, 0x24, 0x46, 0xa3, 0xde, 0xed, 0xd4, 0x5b, 0x9d, 0x66, 0x93, 0x09, 0x94, 0xde, + 0x69, 0x08, 0xb2, 0xc4, 0xae, 0x66, 0x84, 0xf4, 0x53, 0x00, 0x56, 0x24, 0xb0, 0x0a, 0x9a, 0x02, + 0x17, 0x62, 0x58, 0x81, 0xe5, 0x00, 0x2a, 0x2e, 0x9d, 0x86, 0x38, 0x8b, 0x06, 0x27, 0xa6, 0xeb, + 0x12, 0xb4, 0x3a, 0x89, 0x36, 0xa0, 0x10, 0x64, 0xcd, 0xb4, 0x35, 0x85, 0xd6, 0x61, 0x35, 0xc8, + 0xc7, 0x7a, 0xb9, 0xf1, 0x48, 0xc7, 0xac, 0xaf, 0x42, 0x65, 0x09, 0x51, 0x64, 0xb6, 0xca, 0xbf, + 0xf2, 0xbd, 0x8d, 0xd7, 0x7e, 0xf5, 0x7b, 0x1b, 0xaf, 0xfd, 0xda, 0xf7, 0x36, 0x22, 0xdf, 0xff, + 0xde, 0x46, 0xe4, 0x0f, 0xbf, 0xb7, 0x11, 0xf9, 0xf6, 0xd1, 0x46, 0xe4, 0xe7, 0x8e, 0x36, 0x22, + 0xff, 0xe8, 0x68, 0x23, 0xf2, 0xcf, 0x8f, 0x36, 0x22, 0xbf, 0x7c, 0xb4, 0x11, 0xf9, 0x95, 0xa3, + 0x8d, 0xc8, 0x6f, 0x1e, 0x6d, 0xbc, 0xf6, 0xfd, 0xa3, 0x8d, 0xc8, 0x5f, 0xfa, 0xf5, 0x8d, 0xd7, + 0xfe, 0xf6, 0xaf, 0x6f, 0x44, 0xbe, 0xc9, 0xf6, 0x0f, 0xff, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x8b, 0xed, 0x3e, 0xc5, 0xd9, 0x8a, 0x00, 0x00, +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/mesos.pb_ffjson.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/mesos.pb_ffjson.go new file mode 100644 index 000000000000..c274c45d7257 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/mesos.pb_ffjson.go @@ -0,0 +1,63680 @@ +// DO NOT EDIT! +// Code generated by ffjson +// source: mesos.pb.go +// DO NOT EDIT! + +package mesos + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + fflib "github.com/pquerna/ffjson/fflib/v1" + "reflect" +) + +func (mj *Address) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Address) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteByte('{') + if mj.Hostname != nil { + if true { + buf.WriteString(`"hostname":`) + fflib.WriteJsonString(buf, string(*mj.Hostname)) + buf.WriteByte(',') + } + } + if mj.IP != nil { + if true { + buf.WriteString(`"ip":`) + fflib.WriteJsonString(buf, string(*mj.IP)) + buf.WriteByte(',') + } + } + buf.WriteString(`"port":`) + fflib.FormatBits2(buf, uint64(mj.Port), 10, mj.Port < 0) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Addressbase = iota + ffj_t_Addressno_such_key + + ffj_t_Address_Hostname + + ffj_t_Address_IP + + ffj_t_Address_Port +) + +var ffj_key_Address_Hostname = []byte("hostname") + +var ffj_key_Address_IP = []byte("ip") + +var ffj_key_Address_Port = []byte("port") + +func (uj *Address) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Address) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Addressbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Addressno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'h': + + if bytes.Equal(ffj_key_Address_Hostname, kn) { + currentKey = ffj_t_Address_Hostname + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_Address_IP, kn) { + currentKey = ffj_t_Address_IP + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_Address_Port, kn) { + currentKey = ffj_t_Address_Port + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Address_Port, kn) { + currentKey = ffj_t_Address_Port + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Address_IP, kn) { + currentKey = ffj_t_Address_IP + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Address_Hostname, kn) { + currentKey = ffj_t_Address_Hostname + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Addressno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Address_Hostname: + goto handle_Hostname + + case ffj_t_Address_IP: + goto handle_IP + + case ffj_t_Address_Port: + goto handle_Port + + case ffj_t_Addressno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Hostname: + + /* handler: uj.Hostname type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Hostname = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Hostname = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IP: + + /* handler: uj.IP type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.IP = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.IP = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Port: + + /* handler: uj.Port type=int32 kind=int32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Port = int32(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *AgentID) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *AgentID) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"value":`) + fflib.WriteJsonString(buf, string(mj.Value)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_AgentIDbase = iota + ffj_t_AgentIDno_such_key + + ffj_t_AgentID_Value +) + +var ffj_key_AgentID_Value = []byte("value") + +func (uj *AgentID) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *AgentID) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_AgentIDbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_AgentIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'v': + + if bytes.Equal(ffj_key_AgentID_Value, kn) { + currentKey = ffj_t_AgentID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_AgentID_Value, kn) { + currentKey = ffj_t_AgentID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_AgentIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_AgentID_Value: + goto handle_Value + + case ffj_t_AgentIDno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Value: + + /* handler: uj.Value type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Value = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *AgentInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *AgentInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "hostname":`) + fflib.WriteJsonString(buf, string(mj.Hostname)) + buf.WriteByte(',') + if mj.Port != nil { + if true { + buf.WriteString(`"port":`) + fflib.FormatBits2(buf, uint64(*mj.Port), 10, *mj.Port < 0) + buf.WriteByte(',') + } + } + buf.WriteString(`"resources":`) + if mj.Resources != nil { + buf.WriteString(`[`) + for i, v := range mj.Resources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"attributes":`) + if mj.Attributes != nil { + buf.WriteString(`[`) + for i, v := range mj.Attributes { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.ID != nil { + if true { + buf.WriteString(`"id":`) + + { + + err = mj.ID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Domain != nil { + if true { + buf.WriteString(`"domain":`) + + { + + err = mj.Domain.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_AgentInfobase = iota + ffj_t_AgentInfono_such_key + + ffj_t_AgentInfo_Hostname + + ffj_t_AgentInfo_Port + + ffj_t_AgentInfo_Resources + + ffj_t_AgentInfo_Attributes + + ffj_t_AgentInfo_ID + + ffj_t_AgentInfo_Domain +) + +var ffj_key_AgentInfo_Hostname = []byte("hostname") + +var ffj_key_AgentInfo_Port = []byte("port") + +var ffj_key_AgentInfo_Resources = []byte("resources") + +var ffj_key_AgentInfo_Attributes = []byte("attributes") + +var ffj_key_AgentInfo_ID = []byte("id") + +var ffj_key_AgentInfo_Domain = []byte("domain") + +func (uj *AgentInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *AgentInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_AgentInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_AgentInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_AgentInfo_Attributes, kn) { + currentKey = ffj_t_AgentInfo_Attributes + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_AgentInfo_Domain, kn) { + currentKey = ffj_t_AgentInfo_Domain + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'h': + + if bytes.Equal(ffj_key_AgentInfo_Hostname, kn) { + currentKey = ffj_t_AgentInfo_Hostname + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_AgentInfo_ID, kn) { + currentKey = ffj_t_AgentInfo_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_AgentInfo_Port, kn) { + currentKey = ffj_t_AgentInfo_Port + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_AgentInfo_Resources, kn) { + currentKey = ffj_t_AgentInfo_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_AgentInfo_Domain, kn) { + currentKey = ffj_t_AgentInfo_Domain + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_AgentInfo_ID, kn) { + currentKey = ffj_t_AgentInfo_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_AgentInfo_Attributes, kn) { + currentKey = ffj_t_AgentInfo_Attributes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_AgentInfo_Resources, kn) { + currentKey = ffj_t_AgentInfo_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_AgentInfo_Port, kn) { + currentKey = ffj_t_AgentInfo_Port + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_AgentInfo_Hostname, kn) { + currentKey = ffj_t_AgentInfo_Hostname + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_AgentInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_AgentInfo_Hostname: + goto handle_Hostname + + case ffj_t_AgentInfo_Port: + goto handle_Port + + case ffj_t_AgentInfo_Resources: + goto handle_Resources + + case ffj_t_AgentInfo_Attributes: + goto handle_Attributes + + case ffj_t_AgentInfo_ID: + goto handle_ID + + case ffj_t_AgentInfo_Domain: + goto handle_Domain + + case ffj_t_AgentInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Hostname: + + /* handler: uj.Hostname type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Hostname = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Port: + + /* handler: uj.Port type=int32 kind=int32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Port = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int32(tval) + uj.Port = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Resources: + + /* handler: uj.Resources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Resources = nil + } else { + + uj.Resources = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Resources Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Resources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Resources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Resources = append(uj.Resources, tmp_uj__Resources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Attributes: + + /* handler: uj.Attributes type=[]mesos.Attribute kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Attributes = nil + } else { + + uj.Attributes = []Attribute{} + + wantVal := true + + for { + + var tmp_uj__Attributes Attribute + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Attributes type=mesos.Attribute kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Attributes.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Attributes = append(uj.Attributes, tmp_uj__Attributes) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ID: + + /* handler: uj.ID type=mesos.AgentID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ID == nil { + uj.ID = new(AgentID) + } + + err = uj.ID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Domain: + + /* handler: uj.Domain type=mesos.DomainInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Domain = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Domain == nil { + uj.Domain = new(DomainInfo) + } + + err = uj.Domain.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *AgentInfo_Capability) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *AgentInfo_Capability) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_AgentInfo_Capabilitybase = iota + ffj_t_AgentInfo_Capabilityno_such_key + + ffj_t_AgentInfo_Capability_Type +) + +var ffj_key_AgentInfo_Capability_Type = []byte("type") + +func (uj *AgentInfo_Capability) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *AgentInfo_Capability) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_AgentInfo_Capabilitybase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_AgentInfo_Capabilityno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 't': + + if bytes.Equal(ffj_key_AgentInfo_Capability_Type, kn) { + currentKey = ffj_t_AgentInfo_Capability_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_AgentInfo_Capability_Type, kn) { + currentKey = ffj_t_AgentInfo_Capability_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_AgentInfo_Capabilityno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_AgentInfo_Capability_Type: + goto handle_Type + + case ffj_t_AgentInfo_Capabilityno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.AgentInfo_Capability_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Attribute) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Attribute) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteString(`,"type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.Scalar != nil { + if true { + buf.WriteString(`"scalar":`) + + { + + err = mj.Scalar.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Ranges != nil { + if true { + buf.WriteString(`"ranges":`) + + { + + err = mj.Ranges.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Set != nil { + if true { + buf.WriteString(`"set":`) + + { + + err = mj.Set.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Text != nil { + if true { + buf.WriteString(`"text":`) + + { + + err = mj.Text.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Attributebase = iota + ffj_t_Attributeno_such_key + + ffj_t_Attribute_Name + + ffj_t_Attribute_Type + + ffj_t_Attribute_Scalar + + ffj_t_Attribute_Ranges + + ffj_t_Attribute_Set + + ffj_t_Attribute_Text +) + +var ffj_key_Attribute_Name = []byte("name") + +var ffj_key_Attribute_Type = []byte("type") + +var ffj_key_Attribute_Scalar = []byte("scalar") + +var ffj_key_Attribute_Ranges = []byte("ranges") + +var ffj_key_Attribute_Set = []byte("set") + +var ffj_key_Attribute_Text = []byte("text") + +func (uj *Attribute) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Attribute) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Attributebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Attributeno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'n': + + if bytes.Equal(ffj_key_Attribute_Name, kn) { + currentKey = ffj_t_Attribute_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_Attribute_Ranges, kn) { + currentKey = ffj_t_Attribute_Ranges + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Attribute_Scalar, kn) { + currentKey = ffj_t_Attribute_Scalar + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Attribute_Set, kn) { + currentKey = ffj_t_Attribute_Set + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Attribute_Type, kn) { + currentKey = ffj_t_Attribute_Type + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Attribute_Text, kn) { + currentKey = ffj_t_Attribute_Text + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Attribute_Text, kn) { + currentKey = ffj_t_Attribute_Text + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Attribute_Set, kn) { + currentKey = ffj_t_Attribute_Set + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Attribute_Ranges, kn) { + currentKey = ffj_t_Attribute_Ranges + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Attribute_Scalar, kn) { + currentKey = ffj_t_Attribute_Scalar + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Attribute_Type, kn) { + currentKey = ffj_t_Attribute_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Attribute_Name, kn) { + currentKey = ffj_t_Attribute_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Attributeno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Attribute_Name: + goto handle_Name + + case ffj_t_Attribute_Type: + goto handle_Type + + case ffj_t_Attribute_Scalar: + goto handle_Scalar + + case ffj_t_Attribute_Ranges: + goto handle_Ranges + + case ffj_t_Attribute_Set: + goto handle_Set + + case ffj_t_Attribute_Text: + goto handle_Text + + case ffj_t_Attributeno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Type: + + /* handler: uj.Type type=mesos.Value_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Scalar: + + /* handler: uj.Scalar type=mesos.Value_Scalar kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Scalar = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Scalar == nil { + uj.Scalar = new(Value_Scalar) + } + + err = uj.Scalar.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Ranges: + + /* handler: uj.Ranges type=mesos.Value_Ranges kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Ranges = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Ranges == nil { + uj.Ranges = new(Value_Ranges) + } + + err = uj.Ranges.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Set: + + /* handler: uj.Set type=mesos.Value_Set kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Set = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Set == nil { + uj.Set = new(Value_Set) + } + + err = uj.Set.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Text: + + /* handler: uj.Text type=mesos.Value_Text kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Text = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Text == nil { + uj.Text = new(Value_Text) + } + + err = uj.Text.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CSIPluginContainerInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CSIPluginContainerInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if len(mj.Services) != 0 { + buf.WriteString(`"services":`) + if mj.Services != nil { + buf.WriteString(`[`) + for i, v := range mj.Services { + if i != 0 { + buf.WriteString(`,`) + } + + { + + obj, err = v.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if mj.Command != nil { + if true { + buf.WriteString(`"command":`) + + { + + err = mj.Command.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"resources":`) + if mj.Resources != nil { + buf.WriteString(`[`) + for i, v := range mj.Resources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.Container != nil { + if true { + buf.WriteString(`"container":`) + + { + + err = mj.Container.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CSIPluginContainerInfobase = iota + ffj_t_CSIPluginContainerInfono_such_key + + ffj_t_CSIPluginContainerInfo_Services + + ffj_t_CSIPluginContainerInfo_Command + + ffj_t_CSIPluginContainerInfo_Resources + + ffj_t_CSIPluginContainerInfo_Container +) + +var ffj_key_CSIPluginContainerInfo_Services = []byte("services") + +var ffj_key_CSIPluginContainerInfo_Command = []byte("command") + +var ffj_key_CSIPluginContainerInfo_Resources = []byte("resources") + +var ffj_key_CSIPluginContainerInfo_Container = []byte("container") + +func (uj *CSIPluginContainerInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CSIPluginContainerInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CSIPluginContainerInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CSIPluginContainerInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_CSIPluginContainerInfo_Command, kn) { + currentKey = ffj_t_CSIPluginContainerInfo_Command + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_CSIPluginContainerInfo_Container, kn) { + currentKey = ffj_t_CSIPluginContainerInfo_Container + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_CSIPluginContainerInfo_Resources, kn) { + currentKey = ffj_t_CSIPluginContainerInfo_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_CSIPluginContainerInfo_Services, kn) { + currentKey = ffj_t_CSIPluginContainerInfo_Services + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_CSIPluginContainerInfo_Container, kn) { + currentKey = ffj_t_CSIPluginContainerInfo_Container + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_CSIPluginContainerInfo_Resources, kn) { + currentKey = ffj_t_CSIPluginContainerInfo_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CSIPluginContainerInfo_Command, kn) { + currentKey = ffj_t_CSIPluginContainerInfo_Command + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_CSIPluginContainerInfo_Services, kn) { + currentKey = ffj_t_CSIPluginContainerInfo_Services + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CSIPluginContainerInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CSIPluginContainerInfo_Services: + goto handle_Services + + case ffj_t_CSIPluginContainerInfo_Command: + goto handle_Command + + case ffj_t_CSIPluginContainerInfo_Resources: + goto handle_Resources + + case ffj_t_CSIPluginContainerInfo_Container: + goto handle_Container + + case ffj_t_CSIPluginContainerInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Services: + + /* handler: uj.Services type=[]mesos.CSIPluginContainerInfo_Service kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Services = nil + } else { + + uj.Services = []CSIPluginContainerInfo_Service{} + + wantVal := true + + for { + + var tmp_uj__Services CSIPluginContainerInfo_Service + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Services type=mesos.CSIPluginContainerInfo_Service kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = tmp_uj__Services.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + uj.Services = append(uj.Services, tmp_uj__Services) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Command: + + /* handler: uj.Command type=mesos.CommandInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Command = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Command == nil { + uj.Command = new(CommandInfo) + } + + err = uj.Command.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Resources: + + /* handler: uj.Resources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Resources = nil + } else { + + uj.Resources = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Resources Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Resources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Resources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Resources = append(uj.Resources, tmp_uj__Resources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Container: + + /* handler: uj.Container type=mesos.ContainerInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Container = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Container == nil { + uj.Container = new(ContainerInfo) + } + + err = uj.Container.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CSIPluginInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CSIPluginInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"type":`) + fflib.WriteJsonString(buf, string(mj.Type)) + buf.WriteString(`,"name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteString(`,"containers":`) + if mj.Containers != nil { + buf.WriteString(`[`) + for i, v := range mj.Containers { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CSIPluginInfobase = iota + ffj_t_CSIPluginInfono_such_key + + ffj_t_CSIPluginInfo_Type + + ffj_t_CSIPluginInfo_Name + + ffj_t_CSIPluginInfo_Containers +) + +var ffj_key_CSIPluginInfo_Type = []byte("type") + +var ffj_key_CSIPluginInfo_Name = []byte("name") + +var ffj_key_CSIPluginInfo_Containers = []byte("containers") + +func (uj *CSIPluginInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CSIPluginInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CSIPluginInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CSIPluginInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_CSIPluginInfo_Containers, kn) { + currentKey = ffj_t_CSIPluginInfo_Containers + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_CSIPluginInfo_Name, kn) { + currentKey = ffj_t_CSIPluginInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_CSIPluginInfo_Type, kn) { + currentKey = ffj_t_CSIPluginInfo_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_CSIPluginInfo_Containers, kn) { + currentKey = ffj_t_CSIPluginInfo_Containers + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CSIPluginInfo_Name, kn) { + currentKey = ffj_t_CSIPluginInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CSIPluginInfo_Type, kn) { + currentKey = ffj_t_CSIPluginInfo_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CSIPluginInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CSIPluginInfo_Type: + goto handle_Type + + case ffj_t_CSIPluginInfo_Name: + goto handle_Name + + case ffj_t_CSIPluginInfo_Containers: + goto handle_Containers + + case ffj_t_CSIPluginInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Type = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Containers: + + /* handler: uj.Containers type=[]mesos.CSIPluginContainerInfo kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Containers = nil + } else { + + uj.Containers = []CSIPluginContainerInfo{} + + wantVal := true + + for { + + var tmp_uj__Containers CSIPluginContainerInfo + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Containers type=mesos.CSIPluginContainerInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Containers.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Containers = append(uj.Containers, tmp_uj__Containers) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CapabilityInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CapabilityInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if len(mj.Capabilities) != 0 { + buf.WriteString(`"capabilities":`) + if mj.Capabilities != nil { + buf.WriteString(`[`) + for i, v := range mj.Capabilities { + if i != 0 { + buf.WriteString(`,`) + } + + { + + obj, err = v.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CapabilityInfobase = iota + ffj_t_CapabilityInfono_such_key + + ffj_t_CapabilityInfo_Capabilities +) + +var ffj_key_CapabilityInfo_Capabilities = []byte("capabilities") + +func (uj *CapabilityInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CapabilityInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CapabilityInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CapabilityInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_CapabilityInfo_Capabilities, kn) { + currentKey = ffj_t_CapabilityInfo_Capabilities + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_CapabilityInfo_Capabilities, kn) { + currentKey = ffj_t_CapabilityInfo_Capabilities + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CapabilityInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CapabilityInfo_Capabilities: + goto handle_Capabilities + + case ffj_t_CapabilityInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Capabilities: + + /* handler: uj.Capabilities type=[]mesos.CapabilityInfo_Capability kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Capabilities = nil + } else { + + uj.Capabilities = []CapabilityInfo_Capability{} + + wantVal := true + + for { + + var tmp_uj__Capabilities CapabilityInfo_Capability + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Capabilities type=mesos.CapabilityInfo_Capability kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = tmp_uj__Capabilities.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + uj.Capabilities = append(uj.Capabilities, tmp_uj__Capabilities) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CgroupInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CgroupInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.NetCLS != nil { + if true { + buf.WriteString(`"net_cls":`) + + { + + err = mj.NetCLS.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CgroupInfobase = iota + ffj_t_CgroupInfono_such_key + + ffj_t_CgroupInfo_NetCLS +) + +var ffj_key_CgroupInfo_NetCLS = []byte("net_cls") + +func (uj *CgroupInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CgroupInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CgroupInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CgroupInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'n': + + if bytes.Equal(ffj_key_CgroupInfo_NetCLS, kn) { + currentKey = ffj_t_CgroupInfo_NetCLS + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_CgroupInfo_NetCLS, kn) { + currentKey = ffj_t_CgroupInfo_NetCLS + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CgroupInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CgroupInfo_NetCLS: + goto handle_NetCLS + + case ffj_t_CgroupInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_NetCLS: + + /* handler: uj.NetCLS type=mesos.CgroupInfo_NetCls kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.NetCLS = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.NetCLS == nil { + uj.NetCLS = new(CgroupInfo_NetCls) + } + + err = uj.NetCLS.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CgroupInfo_Blkio) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CgroupInfo_Blkio) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffj_t_CgroupInfo_Blkiobase = iota + ffj_t_CgroupInfo_Blkiono_such_key +) + +func (uj *CgroupInfo_Blkio) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CgroupInfo_Blkio) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CgroupInfo_Blkiobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CgroupInfo_Blkiono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffj_t_CgroupInfo_Blkiono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CgroupInfo_Blkiono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CgroupInfo_Blkio_CFQ) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CgroupInfo_Blkio_CFQ) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffj_t_CgroupInfo_Blkio_CFQbase = iota + ffj_t_CgroupInfo_Blkio_CFQno_such_key +) + +func (uj *CgroupInfo_Blkio_CFQ) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CgroupInfo_Blkio_CFQ) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CgroupInfo_Blkio_CFQbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CgroupInfo_Blkio_CFQno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffj_t_CgroupInfo_Blkio_CFQno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CgroupInfo_Blkio_CFQno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CgroupInfo_Blkio_CFQ_Statistics) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CgroupInfo_Blkio_CFQ_Statistics) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteByte('{') + if mj.Device != nil { + if true { + buf.WriteString(`"device":`) + + { + + err = mj.Device.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Sectors != nil { + if true { + buf.WriteString(`"sectors":`) + fflib.FormatBits2(buf, uint64(*mj.Sectors), 10, false) + buf.WriteByte(',') + } + } + if mj.Time != nil { + if true { + buf.WriteString(`"time":`) + fflib.FormatBits2(buf, uint64(*mj.Time), 10, false) + buf.WriteByte(',') + } + } + buf.WriteString(`"io_serviced":`) + if mj.IOServiced != nil { + buf.WriteString(`[`) + for i, v := range mj.IOServiced { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"io_service_bytes":`) + if mj.IOServiceBytes != nil { + buf.WriteString(`[`) + for i, v := range mj.IOServiceBytes { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"io_service_time":`) + if mj.IOServiceTime != nil { + buf.WriteString(`[`) + for i, v := range mj.IOServiceTime { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"io_wait_time":`) + if mj.IOWaitTime != nil { + buf.WriteString(`[`) + for i, v := range mj.IOWaitTime { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"io_merged":`) + if mj.IOMerged != nil { + buf.WriteString(`[`) + for i, v := range mj.IOMerged { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"io_queued":`) + if mj.IOQueued != nil { + buf.WriteString(`[`) + for i, v := range mj.IOQueued { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CgroupInfo_Blkio_CFQ_Statisticsbase = iota + ffj_t_CgroupInfo_Blkio_CFQ_Statisticsno_such_key + + ffj_t_CgroupInfo_Blkio_CFQ_Statistics_Device + + ffj_t_CgroupInfo_Blkio_CFQ_Statistics_Sectors + + ffj_t_CgroupInfo_Blkio_CFQ_Statistics_Time + + ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOServiced + + ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOServiceBytes + + ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOServiceTime + + ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOWaitTime + + ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOMerged + + ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOQueued +) + +var ffj_key_CgroupInfo_Blkio_CFQ_Statistics_Device = []byte("device") + +var ffj_key_CgroupInfo_Blkio_CFQ_Statistics_Sectors = []byte("sectors") + +var ffj_key_CgroupInfo_Blkio_CFQ_Statistics_Time = []byte("time") + +var ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOServiced = []byte("io_serviced") + +var ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOServiceBytes = []byte("io_service_bytes") + +var ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOServiceTime = []byte("io_service_time") + +var ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOWaitTime = []byte("io_wait_time") + +var ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOMerged = []byte("io_merged") + +var ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOQueued = []byte("io_queued") + +func (uj *CgroupInfo_Blkio_CFQ_Statistics) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CgroupInfo_Blkio_CFQ_Statistics) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CgroupInfo_Blkio_CFQ_Statisticsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'd': + + if bytes.Equal(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_Device, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_Device + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOServiced, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOServiced + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOServiceBytes, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOServiceBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOServiceTime, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOServiceTime + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOWaitTime, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOWaitTime + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOMerged, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOMerged + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOQueued, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOQueued + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_Sectors, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_Sectors + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_Time, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_Time + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOQueued, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOQueued + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOMerged, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOMerged + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOWaitTime, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOWaitTime + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOServiceTime, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOServiceTime + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOServiceBytes, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOServiceBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_IOServiced, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOServiced + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_Time, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_Time + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_Sectors, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_Sectors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CgroupInfo_Blkio_CFQ_Statistics_Device, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statistics_Device + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CgroupInfo_Blkio_CFQ_Statisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CgroupInfo_Blkio_CFQ_Statistics_Device: + goto handle_Device + + case ffj_t_CgroupInfo_Blkio_CFQ_Statistics_Sectors: + goto handle_Sectors + + case ffj_t_CgroupInfo_Blkio_CFQ_Statistics_Time: + goto handle_Time + + case ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOServiced: + goto handle_IOServiced + + case ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOServiceBytes: + goto handle_IOServiceBytes + + case ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOServiceTime: + goto handle_IOServiceTime + + case ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOWaitTime: + goto handle_IOWaitTime + + case ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOMerged: + goto handle_IOMerged + + case ffj_t_CgroupInfo_Blkio_CFQ_Statistics_IOQueued: + goto handle_IOQueued + + case ffj_t_CgroupInfo_Blkio_CFQ_Statisticsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Device: + + /* handler: uj.Device type=mesos.Device_Number kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Device = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Device == nil { + uj.Device = new(Device_Number) + } + + err = uj.Device.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Sectors: + + /* handler: uj.Sectors type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Sectors = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Sectors = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Time: + + /* handler: uj.Time type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Time = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Time = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IOServiced: + + /* handler: uj.IOServiced type=[]mesos.CgroupInfo_Blkio_Value kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.IOServiced = nil + } else { + + uj.IOServiced = []CgroupInfo_Blkio_Value{} + + wantVal := true + + for { + + var tmp_uj__IOServiced CgroupInfo_Blkio_Value + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__IOServiced type=mesos.CgroupInfo_Blkio_Value kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__IOServiced.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.IOServiced = append(uj.IOServiced, tmp_uj__IOServiced) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IOServiceBytes: + + /* handler: uj.IOServiceBytes type=[]mesos.CgroupInfo_Blkio_Value kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.IOServiceBytes = nil + } else { + + uj.IOServiceBytes = []CgroupInfo_Blkio_Value{} + + wantVal := true + + for { + + var tmp_uj__IOServiceBytes CgroupInfo_Blkio_Value + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__IOServiceBytes type=mesos.CgroupInfo_Blkio_Value kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__IOServiceBytes.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.IOServiceBytes = append(uj.IOServiceBytes, tmp_uj__IOServiceBytes) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IOServiceTime: + + /* handler: uj.IOServiceTime type=[]mesos.CgroupInfo_Blkio_Value kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.IOServiceTime = nil + } else { + + uj.IOServiceTime = []CgroupInfo_Blkio_Value{} + + wantVal := true + + for { + + var tmp_uj__IOServiceTime CgroupInfo_Blkio_Value + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__IOServiceTime type=mesos.CgroupInfo_Blkio_Value kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__IOServiceTime.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.IOServiceTime = append(uj.IOServiceTime, tmp_uj__IOServiceTime) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IOWaitTime: + + /* handler: uj.IOWaitTime type=[]mesos.CgroupInfo_Blkio_Value kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.IOWaitTime = nil + } else { + + uj.IOWaitTime = []CgroupInfo_Blkio_Value{} + + wantVal := true + + for { + + var tmp_uj__IOWaitTime CgroupInfo_Blkio_Value + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__IOWaitTime type=mesos.CgroupInfo_Blkio_Value kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__IOWaitTime.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.IOWaitTime = append(uj.IOWaitTime, tmp_uj__IOWaitTime) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IOMerged: + + /* handler: uj.IOMerged type=[]mesos.CgroupInfo_Blkio_Value kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.IOMerged = nil + } else { + + uj.IOMerged = []CgroupInfo_Blkio_Value{} + + wantVal := true + + for { + + var tmp_uj__IOMerged CgroupInfo_Blkio_Value + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__IOMerged type=mesos.CgroupInfo_Blkio_Value kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__IOMerged.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.IOMerged = append(uj.IOMerged, tmp_uj__IOMerged) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IOQueued: + + /* handler: uj.IOQueued type=[]mesos.CgroupInfo_Blkio_Value kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.IOQueued = nil + } else { + + uj.IOQueued = []CgroupInfo_Blkio_Value{} + + wantVal := true + + for { + + var tmp_uj__IOQueued CgroupInfo_Blkio_Value + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__IOQueued type=mesos.CgroupInfo_Blkio_Value kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__IOQueued.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.IOQueued = append(uj.IOQueued, tmp_uj__IOQueued) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CgroupInfo_Blkio_Statistics) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CgroupInfo_Blkio_Statistics) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "cfq":`) + if mj.CFQ != nil { + buf.WriteString(`[`) + for i, v := range mj.CFQ { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"cfq_recursive":`) + if mj.CFQRecursive != nil { + buf.WriteString(`[`) + for i, v := range mj.CFQRecursive { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if len(mj.Throttling) != 0 { + buf.WriteString(`"throttling":`) + if mj.Throttling != nil { + buf.WriteString(`[`) + for i, v := range mj.Throttling { + if i != 0 { + buf.WriteString(`,`) + } + + { + + if v == nil { + buf.WriteString("null") + return nil + } + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CgroupInfo_Blkio_Statisticsbase = iota + ffj_t_CgroupInfo_Blkio_Statisticsno_such_key + + ffj_t_CgroupInfo_Blkio_Statistics_CFQ + + ffj_t_CgroupInfo_Blkio_Statistics_CFQRecursive + + ffj_t_CgroupInfo_Blkio_Statistics_Throttling +) + +var ffj_key_CgroupInfo_Blkio_Statistics_CFQ = []byte("cfq") + +var ffj_key_CgroupInfo_Blkio_Statistics_CFQRecursive = []byte("cfq_recursive") + +var ffj_key_CgroupInfo_Blkio_Statistics_Throttling = []byte("throttling") + +func (uj *CgroupInfo_Blkio_Statistics) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CgroupInfo_Blkio_Statistics) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CgroupInfo_Blkio_Statisticsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CgroupInfo_Blkio_Statisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_CgroupInfo_Blkio_Statistics_CFQ, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Statistics_CFQ + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_CgroupInfo_Blkio_Statistics_CFQRecursive, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Statistics_CFQRecursive + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_CgroupInfo_Blkio_Statistics_Throttling, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Statistics_Throttling + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_CgroupInfo_Blkio_Statistics_Throttling, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Statistics_Throttling + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_CgroupInfo_Blkio_Statistics_CFQRecursive, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Statistics_CFQRecursive + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CgroupInfo_Blkio_Statistics_CFQ, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Statistics_CFQ + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CgroupInfo_Blkio_Statisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CgroupInfo_Blkio_Statistics_CFQ: + goto handle_CFQ + + case ffj_t_CgroupInfo_Blkio_Statistics_CFQRecursive: + goto handle_CFQRecursive + + case ffj_t_CgroupInfo_Blkio_Statistics_Throttling: + goto handle_Throttling + + case ffj_t_CgroupInfo_Blkio_Statisticsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_CFQ: + + /* handler: uj.CFQ type=[]mesos.CgroupInfo_Blkio_CFQ_Statistics kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.CFQ = nil + } else { + + uj.CFQ = []CgroupInfo_Blkio_CFQ_Statistics{} + + wantVal := true + + for { + + var tmp_uj__CFQ CgroupInfo_Blkio_CFQ_Statistics + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__CFQ type=mesos.CgroupInfo_Blkio_CFQ_Statistics kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__CFQ.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.CFQ = append(uj.CFQ, tmp_uj__CFQ) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CFQRecursive: + + /* handler: uj.CFQRecursive type=[]mesos.CgroupInfo_Blkio_CFQ_Statistics kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.CFQRecursive = nil + } else { + + uj.CFQRecursive = []CgroupInfo_Blkio_CFQ_Statistics{} + + wantVal := true + + for { + + var tmp_uj__CFQRecursive CgroupInfo_Blkio_CFQ_Statistics + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__CFQRecursive type=mesos.CgroupInfo_Blkio_CFQ_Statistics kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__CFQRecursive.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.CFQRecursive = append(uj.CFQRecursive, tmp_uj__CFQRecursive) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Throttling: + + /* handler: uj.Throttling type=[]*mesos.CgroupInfo_Blkio_Throttling_Statistics kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Throttling = nil + } else { + + uj.Throttling = []*CgroupInfo_Blkio_Throttling_Statistics{} + + wantVal := true + + for { + + var tmp_uj__Throttling *CgroupInfo_Blkio_Throttling_Statistics + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Throttling type=*mesos.CgroupInfo_Blkio_Throttling_Statistics kind=ptr quoted=false*/ + + { + if tok == fflib.FFTok_null { + + tmp_uj__Throttling = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if tmp_uj__Throttling == nil { + tmp_uj__Throttling = new(CgroupInfo_Blkio_Throttling_Statistics) + } + + err = tmp_uj__Throttling.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Throttling = append(uj.Throttling, tmp_uj__Throttling) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CgroupInfo_Blkio_Throttling) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CgroupInfo_Blkio_Throttling) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffj_t_CgroupInfo_Blkio_Throttlingbase = iota + ffj_t_CgroupInfo_Blkio_Throttlingno_such_key +) + +func (uj *CgroupInfo_Blkio_Throttling) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CgroupInfo_Blkio_Throttling) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CgroupInfo_Blkio_Throttlingbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CgroupInfo_Blkio_Throttlingno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffj_t_CgroupInfo_Blkio_Throttlingno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CgroupInfo_Blkio_Throttlingno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CgroupInfo_Blkio_Throttling_Statistics) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CgroupInfo_Blkio_Throttling_Statistics) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteByte('{') + if mj.Device != nil { + if true { + buf.WriteString(`"device":`) + + { + + err = mj.Device.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"io_serviced":`) + if mj.IOServiced != nil { + buf.WriteString(`[`) + for i, v := range mj.IOServiced { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"io_service_bytes":`) + if mj.IOServiceBytes != nil { + buf.WriteString(`[`) + for i, v := range mj.IOServiceBytes { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CgroupInfo_Blkio_Throttling_Statisticsbase = iota + ffj_t_CgroupInfo_Blkio_Throttling_Statisticsno_such_key + + ffj_t_CgroupInfo_Blkio_Throttling_Statistics_Device + + ffj_t_CgroupInfo_Blkio_Throttling_Statistics_IOServiced + + ffj_t_CgroupInfo_Blkio_Throttling_Statistics_IOServiceBytes +) + +var ffj_key_CgroupInfo_Blkio_Throttling_Statistics_Device = []byte("device") + +var ffj_key_CgroupInfo_Blkio_Throttling_Statistics_IOServiced = []byte("io_serviced") + +var ffj_key_CgroupInfo_Blkio_Throttling_Statistics_IOServiceBytes = []byte("io_service_bytes") + +func (uj *CgroupInfo_Blkio_Throttling_Statistics) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CgroupInfo_Blkio_Throttling_Statistics) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CgroupInfo_Blkio_Throttling_Statisticsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CgroupInfo_Blkio_Throttling_Statisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'd': + + if bytes.Equal(ffj_key_CgroupInfo_Blkio_Throttling_Statistics_Device, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Throttling_Statistics_Device + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_CgroupInfo_Blkio_Throttling_Statistics_IOServiced, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Throttling_Statistics_IOServiced + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_CgroupInfo_Blkio_Throttling_Statistics_IOServiceBytes, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Throttling_Statistics_IOServiceBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_CgroupInfo_Blkio_Throttling_Statistics_IOServiceBytes, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Throttling_Statistics_IOServiceBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_CgroupInfo_Blkio_Throttling_Statistics_IOServiced, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Throttling_Statistics_IOServiced + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CgroupInfo_Blkio_Throttling_Statistics_Device, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Throttling_Statistics_Device + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CgroupInfo_Blkio_Throttling_Statisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CgroupInfo_Blkio_Throttling_Statistics_Device: + goto handle_Device + + case ffj_t_CgroupInfo_Blkio_Throttling_Statistics_IOServiced: + goto handle_IOServiced + + case ffj_t_CgroupInfo_Blkio_Throttling_Statistics_IOServiceBytes: + goto handle_IOServiceBytes + + case ffj_t_CgroupInfo_Blkio_Throttling_Statisticsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Device: + + /* handler: uj.Device type=mesos.Device_Number kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Device = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Device == nil { + uj.Device = new(Device_Number) + } + + err = uj.Device.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IOServiced: + + /* handler: uj.IOServiced type=[]mesos.CgroupInfo_Blkio_Value kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.IOServiced = nil + } else { + + uj.IOServiced = []CgroupInfo_Blkio_Value{} + + wantVal := true + + for { + + var tmp_uj__IOServiced CgroupInfo_Blkio_Value + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__IOServiced type=mesos.CgroupInfo_Blkio_Value kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__IOServiced.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.IOServiced = append(uj.IOServiced, tmp_uj__IOServiced) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IOServiceBytes: + + /* handler: uj.IOServiceBytes type=[]mesos.CgroupInfo_Blkio_Value kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.IOServiceBytes = nil + } else { + + uj.IOServiceBytes = []CgroupInfo_Blkio_Value{} + + wantVal := true + + for { + + var tmp_uj__IOServiceBytes CgroupInfo_Blkio_Value + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__IOServiceBytes type=mesos.CgroupInfo_Blkio_Value kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__IOServiceBytes.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.IOServiceBytes = append(uj.IOServiceBytes, tmp_uj__IOServiceBytes) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CgroupInfo_Blkio_Value) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CgroupInfo_Blkio_Value) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Op != nil { + if true { + buf.WriteString(`"op":`) + + { + + obj, err = mj.Op.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.Value != nil { + if true { + buf.WriteString(`"value":`) + fflib.FormatBits2(buf, uint64(*mj.Value), 10, false) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CgroupInfo_Blkio_Valuebase = iota + ffj_t_CgroupInfo_Blkio_Valueno_such_key + + ffj_t_CgroupInfo_Blkio_Value_Op + + ffj_t_CgroupInfo_Blkio_Value_Value +) + +var ffj_key_CgroupInfo_Blkio_Value_Op = []byte("op") + +var ffj_key_CgroupInfo_Blkio_Value_Value = []byte("value") + +func (uj *CgroupInfo_Blkio_Value) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CgroupInfo_Blkio_Value) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CgroupInfo_Blkio_Valuebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CgroupInfo_Blkio_Valueno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'o': + + if bytes.Equal(ffj_key_CgroupInfo_Blkio_Value_Op, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Value_Op + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_CgroupInfo_Blkio_Value_Value, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Value_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_CgroupInfo_Blkio_Value_Value, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Value_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CgroupInfo_Blkio_Value_Op, kn) { + currentKey = ffj_t_CgroupInfo_Blkio_Value_Op + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CgroupInfo_Blkio_Valueno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CgroupInfo_Blkio_Value_Op: + goto handle_Op + + case ffj_t_CgroupInfo_Blkio_Value_Value: + goto handle_Value + + case ffj_t_CgroupInfo_Blkio_Valueno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Op: + + /* handler: uj.Op type=mesos.CgroupInfo_Blkio_Operation kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Op = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Op == nil { + uj.Op = new(CgroupInfo_Blkio_Operation) + } + + err = uj.Op.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Value: + + /* handler: uj.Value type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Value = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Value = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CgroupInfo_NetCls) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CgroupInfo_NetCls) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.ClassID != nil { + if true { + buf.WriteString(`"classid":`) + fflib.FormatBits2(buf, uint64(*mj.ClassID), 10, false) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CgroupInfo_NetClsbase = iota + ffj_t_CgroupInfo_NetClsno_such_key + + ffj_t_CgroupInfo_NetCls_ClassID +) + +var ffj_key_CgroupInfo_NetCls_ClassID = []byte("classid") + +func (uj *CgroupInfo_NetCls) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CgroupInfo_NetCls) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CgroupInfo_NetClsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CgroupInfo_NetClsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_CgroupInfo_NetCls_ClassID, kn) { + currentKey = ffj_t_CgroupInfo_NetCls_ClassID + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_CgroupInfo_NetCls_ClassID, kn) { + currentKey = ffj_t_CgroupInfo_NetCls_ClassID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CgroupInfo_NetClsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CgroupInfo_NetCls_ClassID: + goto handle_ClassID + + case ffj_t_CgroupInfo_NetClsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ClassID: + + /* handler: uj.ClassID type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.ClassID = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint32(tval) + uj.ClassID = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CheckInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CheckInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.Command != nil { + if true { + buf.WriteString(`"command":`) + + { + + err = mj.Command.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.HTTP != nil { + if true { + buf.WriteString(`"http":`) + + { + + err = mj.HTTP.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.TCP != nil { + if true { + buf.WriteString(`"tcp":`) + + { + + err = mj.TCP.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.DelaySeconds != nil { + if true { + buf.WriteString(`"delay_seconds":`) + fflib.AppendFloat(buf, float64(*mj.DelaySeconds), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.IntervalSeconds != nil { + if true { + buf.WriteString(`"interval_seconds":`) + fflib.AppendFloat(buf, float64(*mj.IntervalSeconds), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.TimeoutSeconds != nil { + if true { + buf.WriteString(`"timeout_seconds":`) + fflib.AppendFloat(buf, float64(*mj.TimeoutSeconds), 'g', -1, 64) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CheckInfobase = iota + ffj_t_CheckInfono_such_key + + ffj_t_CheckInfo_Type + + ffj_t_CheckInfo_Command + + ffj_t_CheckInfo_HTTP + + ffj_t_CheckInfo_TCP + + ffj_t_CheckInfo_DelaySeconds + + ffj_t_CheckInfo_IntervalSeconds + + ffj_t_CheckInfo_TimeoutSeconds +) + +var ffj_key_CheckInfo_Type = []byte("type") + +var ffj_key_CheckInfo_Command = []byte("command") + +var ffj_key_CheckInfo_HTTP = []byte("http") + +var ffj_key_CheckInfo_TCP = []byte("tcp") + +var ffj_key_CheckInfo_DelaySeconds = []byte("delay_seconds") + +var ffj_key_CheckInfo_IntervalSeconds = []byte("interval_seconds") + +var ffj_key_CheckInfo_TimeoutSeconds = []byte("timeout_seconds") + +func (uj *CheckInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CheckInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CheckInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CheckInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_CheckInfo_Command, kn) { + currentKey = ffj_t_CheckInfo_Command + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_CheckInfo_DelaySeconds, kn) { + currentKey = ffj_t_CheckInfo_DelaySeconds + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'h': + + if bytes.Equal(ffj_key_CheckInfo_HTTP, kn) { + currentKey = ffj_t_CheckInfo_HTTP + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_CheckInfo_IntervalSeconds, kn) { + currentKey = ffj_t_CheckInfo_IntervalSeconds + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_CheckInfo_Type, kn) { + currentKey = ffj_t_CheckInfo_Type + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_CheckInfo_TCP, kn) { + currentKey = ffj_t_CheckInfo_TCP + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_CheckInfo_TimeoutSeconds, kn) { + currentKey = ffj_t_CheckInfo_TimeoutSeconds + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_CheckInfo_TimeoutSeconds, kn) { + currentKey = ffj_t_CheckInfo_TimeoutSeconds + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_CheckInfo_IntervalSeconds, kn) { + currentKey = ffj_t_CheckInfo_IntervalSeconds + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_CheckInfo_DelaySeconds, kn) { + currentKey = ffj_t_CheckInfo_DelaySeconds + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CheckInfo_TCP, kn) { + currentKey = ffj_t_CheckInfo_TCP + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CheckInfo_HTTP, kn) { + currentKey = ffj_t_CheckInfo_HTTP + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CheckInfo_Command, kn) { + currentKey = ffj_t_CheckInfo_Command + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CheckInfo_Type, kn) { + currentKey = ffj_t_CheckInfo_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CheckInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CheckInfo_Type: + goto handle_Type + + case ffj_t_CheckInfo_Command: + goto handle_Command + + case ffj_t_CheckInfo_HTTP: + goto handle_HTTP + + case ffj_t_CheckInfo_TCP: + goto handle_TCP + + case ffj_t_CheckInfo_DelaySeconds: + goto handle_DelaySeconds + + case ffj_t_CheckInfo_IntervalSeconds: + goto handle_IntervalSeconds + + case ffj_t_CheckInfo_TimeoutSeconds: + goto handle_TimeoutSeconds + + case ffj_t_CheckInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.CheckInfo_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Command: + + /* handler: uj.Command type=mesos.CheckInfo_Command kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Command = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Command == nil { + uj.Command = new(CheckInfo_Command) + } + + err = uj.Command.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_HTTP: + + /* handler: uj.HTTP type=mesos.CheckInfo_Http kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.HTTP = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.HTTP == nil { + uj.HTTP = new(CheckInfo_Http) + } + + err = uj.HTTP.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TCP: + + /* handler: uj.TCP type=mesos.CheckInfo_Tcp kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.TCP = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.TCP == nil { + uj.TCP = new(CheckInfo_Tcp) + } + + err = uj.TCP.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DelaySeconds: + + /* handler: uj.DelaySeconds type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.DelaySeconds = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.DelaySeconds = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IntervalSeconds: + + /* handler: uj.IntervalSeconds type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.IntervalSeconds = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.IntervalSeconds = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TimeoutSeconds: + + /* handler: uj.TimeoutSeconds type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.TimeoutSeconds = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.TimeoutSeconds = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CheckInfo_Command) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CheckInfo_Command) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"command":`) + + { + + err = mj.Command.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CheckInfo_Commandbase = iota + ffj_t_CheckInfo_Commandno_such_key + + ffj_t_CheckInfo_Command_Command +) + +var ffj_key_CheckInfo_Command_Command = []byte("command") + +func (uj *CheckInfo_Command) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CheckInfo_Command) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CheckInfo_Commandbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CheckInfo_Commandno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_CheckInfo_Command_Command, kn) { + currentKey = ffj_t_CheckInfo_Command_Command + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_CheckInfo_Command_Command, kn) { + currentKey = ffj_t_CheckInfo_Command_Command + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CheckInfo_Commandno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CheckInfo_Command_Command: + goto handle_Command + + case ffj_t_CheckInfo_Commandno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Command: + + /* handler: uj.Command type=mesos.CommandInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Command.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CheckInfo_Http) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CheckInfo_Http) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "port":`) + fflib.FormatBits2(buf, uint64(mj.Port), 10, false) + buf.WriteByte(',') + if mj.Path != nil { + if true { + buf.WriteString(`"path":`) + fflib.WriteJsonString(buf, string(*mj.Path)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CheckInfo_Httpbase = iota + ffj_t_CheckInfo_Httpno_such_key + + ffj_t_CheckInfo_Http_Port + + ffj_t_CheckInfo_Http_Path +) + +var ffj_key_CheckInfo_Http_Port = []byte("port") + +var ffj_key_CheckInfo_Http_Path = []byte("path") + +func (uj *CheckInfo_Http) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CheckInfo_Http) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CheckInfo_Httpbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CheckInfo_Httpno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'p': + + if bytes.Equal(ffj_key_CheckInfo_Http_Port, kn) { + currentKey = ffj_t_CheckInfo_Http_Port + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_CheckInfo_Http_Path, kn) { + currentKey = ffj_t_CheckInfo_Http_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_CheckInfo_Http_Path, kn) { + currentKey = ffj_t_CheckInfo_Http_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CheckInfo_Http_Port, kn) { + currentKey = ffj_t_CheckInfo_Http_Port + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CheckInfo_Httpno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CheckInfo_Http_Port: + goto handle_Port + + case ffj_t_CheckInfo_Http_Path: + goto handle_Path + + case ffj_t_CheckInfo_Httpno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Port: + + /* handler: uj.Port type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Port = uint32(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Path: + + /* handler: uj.Path type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Path = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Path = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CheckInfo_Tcp) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CheckInfo_Tcp) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"port":`) + fflib.FormatBits2(buf, uint64(mj.Port), 10, false) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CheckInfo_Tcpbase = iota + ffj_t_CheckInfo_Tcpno_such_key + + ffj_t_CheckInfo_Tcp_Port +) + +var ffj_key_CheckInfo_Tcp_Port = []byte("port") + +func (uj *CheckInfo_Tcp) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CheckInfo_Tcp) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CheckInfo_Tcpbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CheckInfo_Tcpno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'p': + + if bytes.Equal(ffj_key_CheckInfo_Tcp_Port, kn) { + currentKey = ffj_t_CheckInfo_Tcp_Port + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_CheckInfo_Tcp_Port, kn) { + currentKey = ffj_t_CheckInfo_Tcp_Port + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CheckInfo_Tcpno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CheckInfo_Tcp_Port: + goto handle_Port + + case ffj_t_CheckInfo_Tcpno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Port: + + /* handler: uj.Port type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Port = uint32(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CheckStatusInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CheckStatusInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Type != nil { + if true { + buf.WriteString(`"type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.Command != nil { + if true { + buf.WriteString(`"command":`) + + { + + err = mj.Command.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.HTTP != nil { + if true { + buf.WriteString(`"http":`) + + { + + err = mj.HTTP.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.TCP != nil { + if true { + buf.WriteString(`"tcp":`) + + { + + err = mj.TCP.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CheckStatusInfobase = iota + ffj_t_CheckStatusInfono_such_key + + ffj_t_CheckStatusInfo_Type + + ffj_t_CheckStatusInfo_Command + + ffj_t_CheckStatusInfo_HTTP + + ffj_t_CheckStatusInfo_TCP +) + +var ffj_key_CheckStatusInfo_Type = []byte("type") + +var ffj_key_CheckStatusInfo_Command = []byte("command") + +var ffj_key_CheckStatusInfo_HTTP = []byte("http") + +var ffj_key_CheckStatusInfo_TCP = []byte("tcp") + +func (uj *CheckStatusInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CheckStatusInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CheckStatusInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CheckStatusInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_CheckStatusInfo_Command, kn) { + currentKey = ffj_t_CheckStatusInfo_Command + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'h': + + if bytes.Equal(ffj_key_CheckStatusInfo_HTTP, kn) { + currentKey = ffj_t_CheckStatusInfo_HTTP + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_CheckStatusInfo_Type, kn) { + currentKey = ffj_t_CheckStatusInfo_Type + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_CheckStatusInfo_TCP, kn) { + currentKey = ffj_t_CheckStatusInfo_TCP + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_CheckStatusInfo_TCP, kn) { + currentKey = ffj_t_CheckStatusInfo_TCP + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CheckStatusInfo_HTTP, kn) { + currentKey = ffj_t_CheckStatusInfo_HTTP + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CheckStatusInfo_Command, kn) { + currentKey = ffj_t_CheckStatusInfo_Command + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CheckStatusInfo_Type, kn) { + currentKey = ffj_t_CheckStatusInfo_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CheckStatusInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CheckStatusInfo_Type: + goto handle_Type + + case ffj_t_CheckStatusInfo_Command: + goto handle_Command + + case ffj_t_CheckStatusInfo_HTTP: + goto handle_HTTP + + case ffj_t_CheckStatusInfo_TCP: + goto handle_TCP + + case ffj_t_CheckStatusInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.CheckInfo_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Type = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Type == nil { + uj.Type = new(CheckInfo_Type) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Command: + + /* handler: uj.Command type=mesos.CheckStatusInfo_Command kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Command = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Command == nil { + uj.Command = new(CheckStatusInfo_Command) + } + + err = uj.Command.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_HTTP: + + /* handler: uj.HTTP type=mesos.CheckStatusInfo_Http kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.HTTP = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.HTTP == nil { + uj.HTTP = new(CheckStatusInfo_Http) + } + + err = uj.HTTP.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TCP: + + /* handler: uj.TCP type=mesos.CheckStatusInfo_Tcp kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.TCP = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.TCP == nil { + uj.TCP = new(CheckStatusInfo_Tcp) + } + + err = uj.TCP.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CheckStatusInfo_Command) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CheckStatusInfo_Command) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.ExitCode != nil { + if true { + buf.WriteString(`"exit_code":`) + fflib.FormatBits2(buf, uint64(*mj.ExitCode), 10, *mj.ExitCode < 0) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CheckStatusInfo_Commandbase = iota + ffj_t_CheckStatusInfo_Commandno_such_key + + ffj_t_CheckStatusInfo_Command_ExitCode +) + +var ffj_key_CheckStatusInfo_Command_ExitCode = []byte("exit_code") + +func (uj *CheckStatusInfo_Command) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CheckStatusInfo_Command) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CheckStatusInfo_Commandbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CheckStatusInfo_Commandno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'e': + + if bytes.Equal(ffj_key_CheckStatusInfo_Command_ExitCode, kn) { + currentKey = ffj_t_CheckStatusInfo_Command_ExitCode + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_CheckStatusInfo_Command_ExitCode, kn) { + currentKey = ffj_t_CheckStatusInfo_Command_ExitCode + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CheckStatusInfo_Commandno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CheckStatusInfo_Command_ExitCode: + goto handle_ExitCode + + case ffj_t_CheckStatusInfo_Commandno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ExitCode: + + /* handler: uj.ExitCode type=int32 kind=int32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.ExitCode = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int32(tval) + uj.ExitCode = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CheckStatusInfo_Http) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CheckStatusInfo_Http) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.StatusCode != nil { + if true { + buf.WriteString(`"status_code":`) + fflib.FormatBits2(buf, uint64(*mj.StatusCode), 10, false) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CheckStatusInfo_Httpbase = iota + ffj_t_CheckStatusInfo_Httpno_such_key + + ffj_t_CheckStatusInfo_Http_StatusCode +) + +var ffj_key_CheckStatusInfo_Http_StatusCode = []byte("status_code") + +func (uj *CheckStatusInfo_Http) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CheckStatusInfo_Http) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CheckStatusInfo_Httpbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CheckStatusInfo_Httpno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 's': + + if bytes.Equal(ffj_key_CheckStatusInfo_Http_StatusCode, kn) { + currentKey = ffj_t_CheckStatusInfo_Http_StatusCode + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_CheckStatusInfo_Http_StatusCode, kn) { + currentKey = ffj_t_CheckStatusInfo_Http_StatusCode + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CheckStatusInfo_Httpno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CheckStatusInfo_Http_StatusCode: + goto handle_StatusCode + + case ffj_t_CheckStatusInfo_Httpno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_StatusCode: + + /* handler: uj.StatusCode type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.StatusCode = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint32(tval) + uj.StatusCode = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CheckStatusInfo_Tcp) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CheckStatusInfo_Tcp) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Succeeded != nil { + if true { + if *mj.Succeeded { + buf.WriteString(`"succeeded":true`) + } else { + buf.WriteString(`"succeeded":false`) + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CheckStatusInfo_Tcpbase = iota + ffj_t_CheckStatusInfo_Tcpno_such_key + + ffj_t_CheckStatusInfo_Tcp_Succeeded +) + +var ffj_key_CheckStatusInfo_Tcp_Succeeded = []byte("succeeded") + +func (uj *CheckStatusInfo_Tcp) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CheckStatusInfo_Tcp) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CheckStatusInfo_Tcpbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CheckStatusInfo_Tcpno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 's': + + if bytes.Equal(ffj_key_CheckStatusInfo_Tcp_Succeeded, kn) { + currentKey = ffj_t_CheckStatusInfo_Tcp_Succeeded + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_CheckStatusInfo_Tcp_Succeeded, kn) { + currentKey = ffj_t_CheckStatusInfo_Tcp_Succeeded + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CheckStatusInfo_Tcpno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CheckStatusInfo_Tcp_Succeeded: + goto handle_Succeeded + + case ffj_t_CheckStatusInfo_Tcpno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Succeeded: + + /* handler: uj.Succeeded type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.Succeeded = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.Succeeded = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CommandInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CommandInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "uris":`) + if mj.URIs != nil { + buf.WriteString(`[`) + for i, v := range mj.URIs { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.Environment != nil { + if true { + buf.WriteString(`"environment":`) + + { + + err = mj.Environment.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Shell != nil { + if true { + if *mj.Shell { + buf.WriteString(`"shell":true`) + } else { + buf.WriteString(`"shell":false`) + } + buf.WriteByte(',') + } + } + if mj.Value != nil { + if true { + buf.WriteString(`"value":`) + fflib.WriteJsonString(buf, string(*mj.Value)) + buf.WriteByte(',') + } + } + if len(mj.Arguments) != 0 { + buf.WriteString(`"arguments":`) + if mj.Arguments != nil { + buf.WriteString(`[`) + for i, v := range mj.Arguments { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if mj.User != nil { + if true { + buf.WriteString(`"user":`) + fflib.WriteJsonString(buf, string(*mj.User)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CommandInfobase = iota + ffj_t_CommandInfono_such_key + + ffj_t_CommandInfo_URIs + + ffj_t_CommandInfo_Environment + + ffj_t_CommandInfo_Shell + + ffj_t_CommandInfo_Value + + ffj_t_CommandInfo_Arguments + + ffj_t_CommandInfo_User +) + +var ffj_key_CommandInfo_URIs = []byte("uris") + +var ffj_key_CommandInfo_Environment = []byte("environment") + +var ffj_key_CommandInfo_Shell = []byte("shell") + +var ffj_key_CommandInfo_Value = []byte("value") + +var ffj_key_CommandInfo_Arguments = []byte("arguments") + +var ffj_key_CommandInfo_User = []byte("user") + +func (uj *CommandInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CommandInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CommandInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CommandInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_CommandInfo_Arguments, kn) { + currentKey = ffj_t_CommandInfo_Arguments + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_CommandInfo_Environment, kn) { + currentKey = ffj_t_CommandInfo_Environment + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_CommandInfo_Shell, kn) { + currentKey = ffj_t_CommandInfo_Shell + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_CommandInfo_URIs, kn) { + currentKey = ffj_t_CommandInfo_URIs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_CommandInfo_User, kn) { + currentKey = ffj_t_CommandInfo_User + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_CommandInfo_Value, kn) { + currentKey = ffj_t_CommandInfo_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_CommandInfo_User, kn) { + currentKey = ffj_t_CommandInfo_User + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_CommandInfo_Arguments, kn) { + currentKey = ffj_t_CommandInfo_Arguments + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CommandInfo_Value, kn) { + currentKey = ffj_t_CommandInfo_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_CommandInfo_Shell, kn) { + currentKey = ffj_t_CommandInfo_Shell + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CommandInfo_Environment, kn) { + currentKey = ffj_t_CommandInfo_Environment + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_CommandInfo_URIs, kn) { + currentKey = ffj_t_CommandInfo_URIs + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CommandInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CommandInfo_URIs: + goto handle_URIs + + case ffj_t_CommandInfo_Environment: + goto handle_Environment + + case ffj_t_CommandInfo_Shell: + goto handle_Shell + + case ffj_t_CommandInfo_Value: + goto handle_Value + + case ffj_t_CommandInfo_Arguments: + goto handle_Arguments + + case ffj_t_CommandInfo_User: + goto handle_User + + case ffj_t_CommandInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_URIs: + + /* handler: uj.URIs type=[]mesos.CommandInfo_URI kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.URIs = nil + } else { + + uj.URIs = []CommandInfo_URI{} + + wantVal := true + + for { + + var tmp_uj__URIs CommandInfo_URI + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__URIs type=mesos.CommandInfo_URI kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__URIs.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.URIs = append(uj.URIs, tmp_uj__URIs) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Environment: + + /* handler: uj.Environment type=mesos.Environment kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Environment = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Environment == nil { + uj.Environment = new(Environment) + } + + err = uj.Environment.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Shell: + + /* handler: uj.Shell type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.Shell = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.Shell = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Value: + + /* handler: uj.Value type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Value = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Value = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Arguments: + + /* handler: uj.Arguments type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Arguments = nil + } else { + + uj.Arguments = []string{} + + wantVal := true + + for { + + var tmp_uj__Arguments string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Arguments type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmp_uj__Arguments = string(string(outBuf)) + + } + } + + uj.Arguments = append(uj.Arguments, tmp_uj__Arguments) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_User: + + /* handler: uj.User type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.User = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.User = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *CommandInfo_URI) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *CommandInfo_URI) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "value":`) + fflib.WriteJsonString(buf, string(mj.Value)) + buf.WriteByte(',') + if mj.Executable != nil { + if true { + if *mj.Executable { + buf.WriteString(`"executable":true`) + } else { + buf.WriteString(`"executable":false`) + } + buf.WriteByte(',') + } + } + if mj.Extract != nil { + if true { + if *mj.Extract { + buf.WriteString(`"extract":true`) + } else { + buf.WriteString(`"extract":false`) + } + buf.WriteByte(',') + } + } + if mj.Cache != nil { + if true { + if *mj.Cache { + buf.WriteString(`"cache":true`) + } else { + buf.WriteString(`"cache":false`) + } + buf.WriteByte(',') + } + } + if mj.OutputFile != nil { + if true { + buf.WriteString(`"output_file":`) + fflib.WriteJsonString(buf, string(*mj.OutputFile)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_CommandInfo_URIbase = iota + ffj_t_CommandInfo_URIno_such_key + + ffj_t_CommandInfo_URI_Value + + ffj_t_CommandInfo_URI_Executable + + ffj_t_CommandInfo_URI_Extract + + ffj_t_CommandInfo_URI_Cache + + ffj_t_CommandInfo_URI_OutputFile +) + +var ffj_key_CommandInfo_URI_Value = []byte("value") + +var ffj_key_CommandInfo_URI_Executable = []byte("executable") + +var ffj_key_CommandInfo_URI_Extract = []byte("extract") + +var ffj_key_CommandInfo_URI_Cache = []byte("cache") + +var ffj_key_CommandInfo_URI_OutputFile = []byte("output_file") + +func (uj *CommandInfo_URI) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *CommandInfo_URI) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_CommandInfo_URIbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_CommandInfo_URIno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_CommandInfo_URI_Cache, kn) { + currentKey = ffj_t_CommandInfo_URI_Cache + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_CommandInfo_URI_Executable, kn) { + currentKey = ffj_t_CommandInfo_URI_Executable + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_CommandInfo_URI_Extract, kn) { + currentKey = ffj_t_CommandInfo_URI_Extract + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'o': + + if bytes.Equal(ffj_key_CommandInfo_URI_OutputFile, kn) { + currentKey = ffj_t_CommandInfo_URI_OutputFile + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_CommandInfo_URI_Value, kn) { + currentKey = ffj_t_CommandInfo_URI_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_CommandInfo_URI_OutputFile, kn) { + currentKey = ffj_t_CommandInfo_URI_OutputFile + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CommandInfo_URI_Cache, kn) { + currentKey = ffj_t_CommandInfo_URI_Cache + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CommandInfo_URI_Extract, kn) { + currentKey = ffj_t_CommandInfo_URI_Extract + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CommandInfo_URI_Executable, kn) { + currentKey = ffj_t_CommandInfo_URI_Executable + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_CommandInfo_URI_Value, kn) { + currentKey = ffj_t_CommandInfo_URI_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_CommandInfo_URIno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_CommandInfo_URI_Value: + goto handle_Value + + case ffj_t_CommandInfo_URI_Executable: + goto handle_Executable + + case ffj_t_CommandInfo_URI_Extract: + goto handle_Extract + + case ffj_t_CommandInfo_URI_Cache: + goto handle_Cache + + case ffj_t_CommandInfo_URI_OutputFile: + goto handle_OutputFile + + case ffj_t_CommandInfo_URIno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Value: + + /* handler: uj.Value type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Value = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Executable: + + /* handler: uj.Executable type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.Executable = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.Executable = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Extract: + + /* handler: uj.Extract type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.Extract = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.Extract = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Cache: + + /* handler: uj.Cache type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.Cache = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.Cache = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutputFile: + + /* handler: uj.OutputFile type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.OutputFile = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.OutputFile = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ContainerID) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ContainerID) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "value":`) + fflib.WriteJsonString(buf, string(mj.Value)) + buf.WriteByte(',') + if mj.Parent != nil { + if true { + buf.WriteString(`"parent":`) + + { + + err = mj.Parent.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ContainerIDbase = iota + ffj_t_ContainerIDno_such_key + + ffj_t_ContainerID_Value + + ffj_t_ContainerID_Parent +) + +var ffj_key_ContainerID_Value = []byte("value") + +var ffj_key_ContainerID_Parent = []byte("parent") + +func (uj *ContainerID) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ContainerID) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ContainerIDbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ContainerIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'p': + + if bytes.Equal(ffj_key_ContainerID_Parent, kn) { + currentKey = ffj_t_ContainerID_Parent + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_ContainerID_Value, kn) { + currentKey = ffj_t_ContainerID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_ContainerID_Parent, kn) { + currentKey = ffj_t_ContainerID_Parent + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ContainerID_Value, kn) { + currentKey = ffj_t_ContainerID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ContainerIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ContainerID_Value: + goto handle_Value + + case ffj_t_ContainerID_Parent: + goto handle_Parent + + case ffj_t_ContainerIDno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Value: + + /* handler: uj.Value type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Value = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Parent: + + /* handler: uj.Parent type=mesos.ContainerID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Parent = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Parent == nil { + uj.Parent = new(ContainerID) + } + + err = uj.Parent.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ContainerInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ContainerInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Type != nil { + if true { + buf.WriteString(`"type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"volumes":`) + if mj.Volumes != nil { + buf.WriteString(`[`) + for i, v := range mj.Volumes { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.Hostname != nil { + if true { + buf.WriteString(`"hostname":`) + fflib.WriteJsonString(buf, string(*mj.Hostname)) + buf.WriteByte(',') + } + } + if mj.Docker != nil { + if true { + buf.WriteString(`"docker":`) + + { + + err = mj.Docker.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Mesos != nil { + if true { + buf.WriteString(`"mesos":`) + + { + + err = mj.Mesos.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"network_infos":`) + if mj.NetworkInfos != nil { + buf.WriteString(`[`) + for i, v := range mj.NetworkInfos { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.LinuxInfo != nil { + if true { + buf.WriteString(`"linux_info":`) + + { + + err = mj.LinuxInfo.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.RlimitInfo != nil { + if true { + buf.WriteString(`"rlimit_info":`) + + { + + err = mj.RlimitInfo.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.TTYInfo != nil { + if true { + buf.WriteString(`"tty_info":`) + + { + + err = mj.TTYInfo.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ContainerInfobase = iota + ffj_t_ContainerInfono_such_key + + ffj_t_ContainerInfo_Type + + ffj_t_ContainerInfo_Volumes + + ffj_t_ContainerInfo_Hostname + + ffj_t_ContainerInfo_Docker + + ffj_t_ContainerInfo_Mesos + + ffj_t_ContainerInfo_NetworkInfos + + ffj_t_ContainerInfo_LinuxInfo + + ffj_t_ContainerInfo_RlimitInfo + + ffj_t_ContainerInfo_TTYInfo +) + +var ffj_key_ContainerInfo_Type = []byte("type") + +var ffj_key_ContainerInfo_Volumes = []byte("volumes") + +var ffj_key_ContainerInfo_Hostname = []byte("hostname") + +var ffj_key_ContainerInfo_Docker = []byte("docker") + +var ffj_key_ContainerInfo_Mesos = []byte("mesos") + +var ffj_key_ContainerInfo_NetworkInfos = []byte("network_infos") + +var ffj_key_ContainerInfo_LinuxInfo = []byte("linux_info") + +var ffj_key_ContainerInfo_RlimitInfo = []byte("rlimit_info") + +var ffj_key_ContainerInfo_TTYInfo = []byte("tty_info") + +func (uj *ContainerInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ContainerInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ContainerInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ContainerInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'd': + + if bytes.Equal(ffj_key_ContainerInfo_Docker, kn) { + currentKey = ffj_t_ContainerInfo_Docker + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'h': + + if bytes.Equal(ffj_key_ContainerInfo_Hostname, kn) { + currentKey = ffj_t_ContainerInfo_Hostname + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_ContainerInfo_LinuxInfo, kn) { + currentKey = ffj_t_ContainerInfo_LinuxInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffj_key_ContainerInfo_Mesos, kn) { + currentKey = ffj_t_ContainerInfo_Mesos + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_ContainerInfo_NetworkInfos, kn) { + currentKey = ffj_t_ContainerInfo_NetworkInfos + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_ContainerInfo_RlimitInfo, kn) { + currentKey = ffj_t_ContainerInfo_RlimitInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_ContainerInfo_Type, kn) { + currentKey = ffj_t_ContainerInfo_Type + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ContainerInfo_TTYInfo, kn) { + currentKey = ffj_t_ContainerInfo_TTYInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_ContainerInfo_Volumes, kn) { + currentKey = ffj_t_ContainerInfo_Volumes + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_ContainerInfo_TTYInfo, kn) { + currentKey = ffj_t_ContainerInfo_TTYInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_ContainerInfo_RlimitInfo, kn) { + currentKey = ffj_t_ContainerInfo_RlimitInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_ContainerInfo_LinuxInfo, kn) { + currentKey = ffj_t_ContainerInfo_LinuxInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ContainerInfo_NetworkInfos, kn) { + currentKey = ffj_t_ContainerInfo_NetworkInfos + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ContainerInfo_Mesos, kn) { + currentKey = ffj_t_ContainerInfo_Mesos + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ContainerInfo_Docker, kn) { + currentKey = ffj_t_ContainerInfo_Docker + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ContainerInfo_Hostname, kn) { + currentKey = ffj_t_ContainerInfo_Hostname + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ContainerInfo_Volumes, kn) { + currentKey = ffj_t_ContainerInfo_Volumes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ContainerInfo_Type, kn) { + currentKey = ffj_t_ContainerInfo_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ContainerInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ContainerInfo_Type: + goto handle_Type + + case ffj_t_ContainerInfo_Volumes: + goto handle_Volumes + + case ffj_t_ContainerInfo_Hostname: + goto handle_Hostname + + case ffj_t_ContainerInfo_Docker: + goto handle_Docker + + case ffj_t_ContainerInfo_Mesos: + goto handle_Mesos + + case ffj_t_ContainerInfo_NetworkInfos: + goto handle_NetworkInfos + + case ffj_t_ContainerInfo_LinuxInfo: + goto handle_LinuxInfo + + case ffj_t_ContainerInfo_RlimitInfo: + goto handle_RlimitInfo + + case ffj_t_ContainerInfo_TTYInfo: + goto handle_TTYInfo + + case ffj_t_ContainerInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.ContainerInfo_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Type = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Type == nil { + uj.Type = new(ContainerInfo_Type) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Volumes: + + /* handler: uj.Volumes type=[]mesos.Volume kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Volumes = nil + } else { + + uj.Volumes = []Volume{} + + wantVal := true + + for { + + var tmp_uj__Volumes Volume + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Volumes type=mesos.Volume kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Volumes.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Volumes = append(uj.Volumes, tmp_uj__Volumes) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Hostname: + + /* handler: uj.Hostname type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Hostname = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Hostname = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Docker: + + /* handler: uj.Docker type=mesos.ContainerInfo_DockerInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Docker = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Docker == nil { + uj.Docker = new(ContainerInfo_DockerInfo) + } + + err = uj.Docker.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Mesos: + + /* handler: uj.Mesos type=mesos.ContainerInfo_MesosInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Mesos = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Mesos == nil { + uj.Mesos = new(ContainerInfo_MesosInfo) + } + + err = uj.Mesos.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetworkInfos: + + /* handler: uj.NetworkInfos type=[]mesos.NetworkInfo kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.NetworkInfos = nil + } else { + + uj.NetworkInfos = []NetworkInfo{} + + wantVal := true + + for { + + var tmp_uj__NetworkInfos NetworkInfo + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__NetworkInfos type=mesos.NetworkInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__NetworkInfos.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.NetworkInfos = append(uj.NetworkInfos, tmp_uj__NetworkInfos) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LinuxInfo: + + /* handler: uj.LinuxInfo type=mesos.LinuxInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.LinuxInfo = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.LinuxInfo == nil { + uj.LinuxInfo = new(LinuxInfo) + } + + err = uj.LinuxInfo.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_RlimitInfo: + + /* handler: uj.RlimitInfo type=mesos.RLimitInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.RlimitInfo = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.RlimitInfo == nil { + uj.RlimitInfo = new(RLimitInfo) + } + + err = uj.RlimitInfo.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TTYInfo: + + /* handler: uj.TTYInfo type=mesos.TTYInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.TTYInfo = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.TTYInfo == nil { + uj.TTYInfo = new(TTYInfo) + } + + err = uj.TTYInfo.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ContainerInfo_DockerInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ContainerInfo_DockerInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "image":`) + fflib.WriteJsonString(buf, string(mj.Image)) + buf.WriteByte(',') + if mj.Network != nil { + if true { + buf.WriteString(`"network":`) + + { + + obj, err = mj.Network.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"port_mappings":`) + if mj.PortMappings != nil { + buf.WriteString(`[`) + for i, v := range mj.PortMappings { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.Privileged != nil { + if true { + if *mj.Privileged { + buf.WriteString(`"privileged":true`) + } else { + buf.WriteString(`"privileged":false`) + } + buf.WriteByte(',') + } + } + buf.WriteString(`"parameters":`) + if mj.Parameters != nil { + buf.WriteString(`[`) + for i, v := range mj.Parameters { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.ForcePullImage != nil { + if true { + if *mj.ForcePullImage { + buf.WriteString(`"force_pull_image":true`) + } else { + buf.WriteString(`"force_pull_image":false`) + } + buf.WriteByte(',') + } + } + if mj.VolumeDriver != nil { + if true { + buf.WriteString(`"volume_driver":`) + fflib.WriteJsonString(buf, string(*mj.VolumeDriver)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ContainerInfo_DockerInfobase = iota + ffj_t_ContainerInfo_DockerInfono_such_key + + ffj_t_ContainerInfo_DockerInfo_Image + + ffj_t_ContainerInfo_DockerInfo_Network + + ffj_t_ContainerInfo_DockerInfo_PortMappings + + ffj_t_ContainerInfo_DockerInfo_Privileged + + ffj_t_ContainerInfo_DockerInfo_Parameters + + ffj_t_ContainerInfo_DockerInfo_ForcePullImage + + ffj_t_ContainerInfo_DockerInfo_VolumeDriver +) + +var ffj_key_ContainerInfo_DockerInfo_Image = []byte("image") + +var ffj_key_ContainerInfo_DockerInfo_Network = []byte("network") + +var ffj_key_ContainerInfo_DockerInfo_PortMappings = []byte("port_mappings") + +var ffj_key_ContainerInfo_DockerInfo_Privileged = []byte("privileged") + +var ffj_key_ContainerInfo_DockerInfo_Parameters = []byte("parameters") + +var ffj_key_ContainerInfo_DockerInfo_ForcePullImage = []byte("force_pull_image") + +var ffj_key_ContainerInfo_DockerInfo_VolumeDriver = []byte("volume_driver") + +func (uj *ContainerInfo_DockerInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ContainerInfo_DockerInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ContainerInfo_DockerInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ContainerInfo_DockerInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'f': + + if bytes.Equal(ffj_key_ContainerInfo_DockerInfo_ForcePullImage, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_ForcePullImage + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_ContainerInfo_DockerInfo_Image, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_Image + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_ContainerInfo_DockerInfo_Network, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_Network + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_ContainerInfo_DockerInfo_PortMappings, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_PortMappings + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ContainerInfo_DockerInfo_Privileged, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_Privileged + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ContainerInfo_DockerInfo_Parameters, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_Parameters + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_ContainerInfo_DockerInfo_VolumeDriver, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_VolumeDriver + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_ContainerInfo_DockerInfo_VolumeDriver, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_VolumeDriver + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_ContainerInfo_DockerInfo_ForcePullImage, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_ForcePullImage + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ContainerInfo_DockerInfo_Parameters, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_Parameters + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ContainerInfo_DockerInfo_Privileged, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_Privileged + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ContainerInfo_DockerInfo_PortMappings, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_PortMappings + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ContainerInfo_DockerInfo_Network, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_Network + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ContainerInfo_DockerInfo_Image, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_Image + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ContainerInfo_DockerInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ContainerInfo_DockerInfo_Image: + goto handle_Image + + case ffj_t_ContainerInfo_DockerInfo_Network: + goto handle_Network + + case ffj_t_ContainerInfo_DockerInfo_PortMappings: + goto handle_PortMappings + + case ffj_t_ContainerInfo_DockerInfo_Privileged: + goto handle_Privileged + + case ffj_t_ContainerInfo_DockerInfo_Parameters: + goto handle_Parameters + + case ffj_t_ContainerInfo_DockerInfo_ForcePullImage: + goto handle_ForcePullImage + + case ffj_t_ContainerInfo_DockerInfo_VolumeDriver: + goto handle_VolumeDriver + + case ffj_t_ContainerInfo_DockerInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Image: + + /* handler: uj.Image type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Image = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Network: + + /* handler: uj.Network type=mesos.ContainerInfo_DockerInfo_Network kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Network = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Network == nil { + uj.Network = new(ContainerInfo_DockerInfo_Network) + } + + err = uj.Network.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_PortMappings: + + /* handler: uj.PortMappings type=[]mesos.ContainerInfo_DockerInfo_PortMapping kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.PortMappings = nil + } else { + + uj.PortMappings = []ContainerInfo_DockerInfo_PortMapping{} + + wantVal := true + + for { + + var tmp_uj__PortMappings ContainerInfo_DockerInfo_PortMapping + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__PortMappings type=mesos.ContainerInfo_DockerInfo_PortMapping kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__PortMappings.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.PortMappings = append(uj.PortMappings, tmp_uj__PortMappings) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Privileged: + + /* handler: uj.Privileged type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.Privileged = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.Privileged = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Parameters: + + /* handler: uj.Parameters type=[]mesos.Parameter kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Parameters = nil + } else { + + uj.Parameters = []Parameter{} + + wantVal := true + + for { + + var tmp_uj__Parameters Parameter + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Parameters type=mesos.Parameter kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Parameters.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Parameters = append(uj.Parameters, tmp_uj__Parameters) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ForcePullImage: + + /* handler: uj.ForcePullImage type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.ForcePullImage = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.ForcePullImage = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_VolumeDriver: + + /* handler: uj.VolumeDriver type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.VolumeDriver = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.VolumeDriver = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ContainerInfo_DockerInfo_PortMapping) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ContainerInfo_DockerInfo_PortMapping) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "host_port":`) + fflib.FormatBits2(buf, uint64(mj.HostPort), 10, false) + buf.WriteString(`,"container_port":`) + fflib.FormatBits2(buf, uint64(mj.ContainerPort), 10, false) + buf.WriteByte(',') + if mj.Protocol != nil { + if true { + buf.WriteString(`"protocol":`) + fflib.WriteJsonString(buf, string(*mj.Protocol)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ContainerInfo_DockerInfo_PortMappingbase = iota + ffj_t_ContainerInfo_DockerInfo_PortMappingno_such_key + + ffj_t_ContainerInfo_DockerInfo_PortMapping_HostPort + + ffj_t_ContainerInfo_DockerInfo_PortMapping_ContainerPort + + ffj_t_ContainerInfo_DockerInfo_PortMapping_Protocol +) + +var ffj_key_ContainerInfo_DockerInfo_PortMapping_HostPort = []byte("host_port") + +var ffj_key_ContainerInfo_DockerInfo_PortMapping_ContainerPort = []byte("container_port") + +var ffj_key_ContainerInfo_DockerInfo_PortMapping_Protocol = []byte("protocol") + +func (uj *ContainerInfo_DockerInfo_PortMapping) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ContainerInfo_DockerInfo_PortMapping) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ContainerInfo_DockerInfo_PortMappingbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ContainerInfo_DockerInfo_PortMappingno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_ContainerInfo_DockerInfo_PortMapping_ContainerPort, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_PortMapping_ContainerPort + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'h': + + if bytes.Equal(ffj_key_ContainerInfo_DockerInfo_PortMapping_HostPort, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_PortMapping_HostPort + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_ContainerInfo_DockerInfo_PortMapping_Protocol, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_PortMapping_Protocol + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_ContainerInfo_DockerInfo_PortMapping_Protocol, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_PortMapping_Protocol + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_ContainerInfo_DockerInfo_PortMapping_ContainerPort, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_PortMapping_ContainerPort + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ContainerInfo_DockerInfo_PortMapping_HostPort, kn) { + currentKey = ffj_t_ContainerInfo_DockerInfo_PortMapping_HostPort + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ContainerInfo_DockerInfo_PortMappingno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ContainerInfo_DockerInfo_PortMapping_HostPort: + goto handle_HostPort + + case ffj_t_ContainerInfo_DockerInfo_PortMapping_ContainerPort: + goto handle_ContainerPort + + case ffj_t_ContainerInfo_DockerInfo_PortMapping_Protocol: + goto handle_Protocol + + case ffj_t_ContainerInfo_DockerInfo_PortMappingno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_HostPort: + + /* handler: uj.HostPort type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + uj.HostPort = uint32(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ContainerPort: + + /* handler: uj.ContainerPort type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + uj.ContainerPort = uint32(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Protocol: + + /* handler: uj.Protocol type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Protocol = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Protocol = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ContainerInfo_MesosInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ContainerInfo_MesosInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Image != nil { + if true { + buf.WriteString(`"image":`) + + { + + err = mj.Image.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ContainerInfo_MesosInfobase = iota + ffj_t_ContainerInfo_MesosInfono_such_key + + ffj_t_ContainerInfo_MesosInfo_Image +) + +var ffj_key_ContainerInfo_MesosInfo_Image = []byte("image") + +func (uj *ContainerInfo_MesosInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ContainerInfo_MesosInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ContainerInfo_MesosInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ContainerInfo_MesosInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'i': + + if bytes.Equal(ffj_key_ContainerInfo_MesosInfo_Image, kn) { + currentKey = ffj_t_ContainerInfo_MesosInfo_Image + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_ContainerInfo_MesosInfo_Image, kn) { + currentKey = ffj_t_ContainerInfo_MesosInfo_Image + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ContainerInfo_MesosInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ContainerInfo_MesosInfo_Image: + goto handle_Image + + case ffj_t_ContainerInfo_MesosInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Image: + + /* handler: uj.Image type=mesos.Image kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Image = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Image == nil { + uj.Image = new(Image) + } + + err = uj.Image.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ContainerStatus) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ContainerStatus) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.ContainerID != nil { + if true { + buf.WriteString(`"container_id":`) + + { + + err = mj.ContainerID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"network_infos":`) + if mj.NetworkInfos != nil { + buf.WriteString(`[`) + for i, v := range mj.NetworkInfos { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.CgroupInfo != nil { + if true { + buf.WriteString(`"cgroup_info":`) + + { + + err = mj.CgroupInfo.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.ExecutorPID != nil { + if true { + buf.WriteString(`"executor_pid":`) + fflib.FormatBits2(buf, uint64(*mj.ExecutorPID), 10, false) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ContainerStatusbase = iota + ffj_t_ContainerStatusno_such_key + + ffj_t_ContainerStatus_ContainerID + + ffj_t_ContainerStatus_NetworkInfos + + ffj_t_ContainerStatus_CgroupInfo + + ffj_t_ContainerStatus_ExecutorPID +) + +var ffj_key_ContainerStatus_ContainerID = []byte("container_id") + +var ffj_key_ContainerStatus_NetworkInfos = []byte("network_infos") + +var ffj_key_ContainerStatus_CgroupInfo = []byte("cgroup_info") + +var ffj_key_ContainerStatus_ExecutorPID = []byte("executor_pid") + +func (uj *ContainerStatus) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ContainerStatus) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ContainerStatusbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ContainerStatusno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_ContainerStatus_ContainerID, kn) { + currentKey = ffj_t_ContainerStatus_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ContainerStatus_CgroupInfo, kn) { + currentKey = ffj_t_ContainerStatus_CgroupInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_ContainerStatus_ExecutorPID, kn) { + currentKey = ffj_t_ContainerStatus_ExecutorPID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_ContainerStatus_NetworkInfos, kn) { + currentKey = ffj_t_ContainerStatus_NetworkInfos + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_ContainerStatus_ExecutorPID, kn) { + currentKey = ffj_t_ContainerStatus_ExecutorPID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_ContainerStatus_CgroupInfo, kn) { + currentKey = ffj_t_ContainerStatus_CgroupInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ContainerStatus_NetworkInfos, kn) { + currentKey = ffj_t_ContainerStatus_NetworkInfos + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_ContainerStatus_ContainerID, kn) { + currentKey = ffj_t_ContainerStatus_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ContainerStatusno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ContainerStatus_ContainerID: + goto handle_ContainerID + + case ffj_t_ContainerStatus_NetworkInfos: + goto handle_NetworkInfos + + case ffj_t_ContainerStatus_CgroupInfo: + goto handle_CgroupInfo + + case ffj_t_ContainerStatus_ExecutorPID: + goto handle_ExecutorPID + + case ffj_t_ContainerStatusno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ContainerID: + + /* handler: uj.ContainerID type=mesos.ContainerID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ContainerID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ContainerID == nil { + uj.ContainerID = new(ContainerID) + } + + err = uj.ContainerID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetworkInfos: + + /* handler: uj.NetworkInfos type=[]mesos.NetworkInfo kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.NetworkInfos = nil + } else { + + uj.NetworkInfos = []NetworkInfo{} + + wantVal := true + + for { + + var tmp_uj__NetworkInfos NetworkInfo + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__NetworkInfos type=mesos.NetworkInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__NetworkInfos.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.NetworkInfos = append(uj.NetworkInfos, tmp_uj__NetworkInfos) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CgroupInfo: + + /* handler: uj.CgroupInfo type=mesos.CgroupInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.CgroupInfo = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.CgroupInfo == nil { + uj.CgroupInfo = new(CgroupInfo) + } + + err = uj.CgroupInfo.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ExecutorPID: + + /* handler: uj.ExecutorPID type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.ExecutorPID = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint32(tval) + uj.ExecutorPID = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Credential) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Credential) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "principal":`) + fflib.WriteJsonString(buf, string(mj.Principal)) + buf.WriteByte(',') + if mj.Secret != nil { + if true { + buf.WriteString(`"secret":`) + fflib.WriteJsonString(buf, string(*mj.Secret)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Credentialbase = iota + ffj_t_Credentialno_such_key + + ffj_t_Credential_Principal + + ffj_t_Credential_Secret +) + +var ffj_key_Credential_Principal = []byte("principal") + +var ffj_key_Credential_Secret = []byte("secret") + +func (uj *Credential) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Credential) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Credentialbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Credentialno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'p': + + if bytes.Equal(ffj_key_Credential_Principal, kn) { + currentKey = ffj_t_Credential_Principal + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Credential_Secret, kn) { + currentKey = ffj_t_Credential_Secret + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Credential_Secret, kn) { + currentKey = ffj_t_Credential_Secret + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Credential_Principal, kn) { + currentKey = ffj_t_Credential_Principal + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Credentialno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Credential_Principal: + goto handle_Principal + + case ffj_t_Credential_Secret: + goto handle_Secret + + case ffj_t_Credentialno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Principal: + + /* handler: uj.Principal type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Principal = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Secret: + + /* handler: uj.Secret type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Secret = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Secret = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Credentials) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Credentials) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"credentials":`) + if mj.Credentials != nil { + buf.WriteString(`[`) + for i, v := range mj.Credentials { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Credentialsbase = iota + ffj_t_Credentialsno_such_key + + ffj_t_Credentials_Credentials +) + +var ffj_key_Credentials_Credentials = []byte("credentials") + +func (uj *Credentials) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Credentials) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Credentialsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Credentialsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Credentials_Credentials, kn) { + currentKey = ffj_t_Credentials_Credentials + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Credentials_Credentials, kn) { + currentKey = ffj_t_Credentials_Credentials + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Credentialsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Credentials_Credentials: + goto handle_Credentials + + case ffj_t_Credentialsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Credentials: + + /* handler: uj.Credentials type=[]mesos.Credential kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Credentials = nil + } else { + + uj.Credentials = []Credential{} + + wantVal := true + + for { + + var tmp_uj__Credentials Credential + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Credentials type=mesos.Credential kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Credentials.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Credentials = append(uj.Credentials, tmp_uj__Credentials) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Device) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Device) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Path != nil { + if true { + buf.WriteString(`"path":`) + fflib.WriteJsonString(buf, string(*mj.Path)) + buf.WriteByte(',') + } + } + if mj.Number != nil { + if true { + buf.WriteString(`"number":`) + + { + + err = mj.Number.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Devicebase = iota + ffj_t_Deviceno_such_key + + ffj_t_Device_Path + + ffj_t_Device_Number +) + +var ffj_key_Device_Path = []byte("path") + +var ffj_key_Device_Number = []byte("number") + +func (uj *Device) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Device) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Devicebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Deviceno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'n': + + if bytes.Equal(ffj_key_Device_Number, kn) { + currentKey = ffj_t_Device_Number + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_Device_Path, kn) { + currentKey = ffj_t_Device_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Device_Number, kn) { + currentKey = ffj_t_Device_Number + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Device_Path, kn) { + currentKey = ffj_t_Device_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Deviceno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Device_Path: + goto handle_Path + + case ffj_t_Device_Number: + goto handle_Number + + case ffj_t_Deviceno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Path: + + /* handler: uj.Path type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Path = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Path = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Number: + + /* handler: uj.Number type=mesos.Device_Number kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Number = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Number == nil { + uj.Number = new(Device_Number) + } + + err = uj.Number.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *DeviceAccess) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *DeviceAccess) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"device":`) + + { + + err = mj.Device.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"access":`) + + { + + err = mj.Access.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_DeviceAccessbase = iota + ffj_t_DeviceAccessno_such_key + + ffj_t_DeviceAccess_Device + + ffj_t_DeviceAccess_Access +) + +var ffj_key_DeviceAccess_Device = []byte("device") + +var ffj_key_DeviceAccess_Access = []byte("access") + +func (uj *DeviceAccess) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *DeviceAccess) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_DeviceAccessbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_DeviceAccessno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_DeviceAccess_Access, kn) { + currentKey = ffj_t_DeviceAccess_Access + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_DeviceAccess_Device, kn) { + currentKey = ffj_t_DeviceAccess_Device + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_DeviceAccess_Access, kn) { + currentKey = ffj_t_DeviceAccess_Access + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_DeviceAccess_Device, kn) { + currentKey = ffj_t_DeviceAccess_Device + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_DeviceAccessno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_DeviceAccess_Device: + goto handle_Device + + case ffj_t_DeviceAccess_Access: + goto handle_Access + + case ffj_t_DeviceAccessno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Device: + + /* handler: uj.Device type=mesos.Device kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Device.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Access: + + /* handler: uj.Access type=mesos.DeviceAccess_Access kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Access.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *DeviceAccess_Access) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *DeviceAccess_Access) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Read != nil { + if true { + if *mj.Read { + buf.WriteString(`"read":true`) + } else { + buf.WriteString(`"read":false`) + } + buf.WriteByte(',') + } + } + if mj.Write != nil { + if true { + if *mj.Write { + buf.WriteString(`"write":true`) + } else { + buf.WriteString(`"write":false`) + } + buf.WriteByte(',') + } + } + if mj.Mknod != nil { + if true { + if *mj.Mknod { + buf.WriteString(`"mknod":true`) + } else { + buf.WriteString(`"mknod":false`) + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_DeviceAccess_Accessbase = iota + ffj_t_DeviceAccess_Accessno_such_key + + ffj_t_DeviceAccess_Access_Read + + ffj_t_DeviceAccess_Access_Write + + ffj_t_DeviceAccess_Access_Mknod +) + +var ffj_key_DeviceAccess_Access_Read = []byte("read") + +var ffj_key_DeviceAccess_Access_Write = []byte("write") + +var ffj_key_DeviceAccess_Access_Mknod = []byte("mknod") + +func (uj *DeviceAccess_Access) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *DeviceAccess_Access) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_DeviceAccess_Accessbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_DeviceAccess_Accessno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'm': + + if bytes.Equal(ffj_key_DeviceAccess_Access_Mknod, kn) { + currentKey = ffj_t_DeviceAccess_Access_Mknod + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_DeviceAccess_Access_Read, kn) { + currentKey = ffj_t_DeviceAccess_Access_Read + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'w': + + if bytes.Equal(ffj_key_DeviceAccess_Access_Write, kn) { + currentKey = ffj_t_DeviceAccess_Access_Write + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_DeviceAccess_Access_Mknod, kn) { + currentKey = ffj_t_DeviceAccess_Access_Mknod + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_DeviceAccess_Access_Write, kn) { + currentKey = ffj_t_DeviceAccess_Access_Write + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_DeviceAccess_Access_Read, kn) { + currentKey = ffj_t_DeviceAccess_Access_Read + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_DeviceAccess_Accessno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_DeviceAccess_Access_Read: + goto handle_Read + + case ffj_t_DeviceAccess_Access_Write: + goto handle_Write + + case ffj_t_DeviceAccess_Access_Mknod: + goto handle_Mknod + + case ffj_t_DeviceAccess_Accessno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Read: + + /* handler: uj.Read type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.Read = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.Read = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Write: + + /* handler: uj.Write type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.Write = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.Write = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Mknod: + + /* handler: uj.Mknod type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.Mknod = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.Mknod = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *DeviceWhitelist) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *DeviceWhitelist) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"allowed_devices":`) + if mj.AllowedDevices != nil { + buf.WriteString(`[`) + for i, v := range mj.AllowedDevices { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_DeviceWhitelistbase = iota + ffj_t_DeviceWhitelistno_such_key + + ffj_t_DeviceWhitelist_AllowedDevices +) + +var ffj_key_DeviceWhitelist_AllowedDevices = []byte("allowed_devices") + +func (uj *DeviceWhitelist) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *DeviceWhitelist) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_DeviceWhitelistbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_DeviceWhitelistno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_DeviceWhitelist_AllowedDevices, kn) { + currentKey = ffj_t_DeviceWhitelist_AllowedDevices + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_DeviceWhitelist_AllowedDevices, kn) { + currentKey = ffj_t_DeviceWhitelist_AllowedDevices + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_DeviceWhitelistno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_DeviceWhitelist_AllowedDevices: + goto handle_AllowedDevices + + case ffj_t_DeviceWhitelistno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_AllowedDevices: + + /* handler: uj.AllowedDevices type=[]mesos.DeviceAccess kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.AllowedDevices = nil + } else { + + uj.AllowedDevices = []DeviceAccess{} + + wantVal := true + + for { + + var tmp_uj__AllowedDevices DeviceAccess + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__AllowedDevices type=mesos.DeviceAccess kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__AllowedDevices.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.AllowedDevices = append(uj.AllowedDevices, tmp_uj__AllowedDevices) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Device_Number) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Device_Number) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.MajorNumber != nil { + if true { + buf.WriteString(`"major_number":`) + fflib.FormatBits2(buf, uint64(*mj.MajorNumber), 10, false) + buf.WriteByte(',') + } + } + if mj.MinorNumber != nil { + if true { + buf.WriteString(`"minor_number":`) + fflib.FormatBits2(buf, uint64(*mj.MinorNumber), 10, false) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Device_Numberbase = iota + ffj_t_Device_Numberno_such_key + + ffj_t_Device_Number_MajorNumber + + ffj_t_Device_Number_MinorNumber +) + +var ffj_key_Device_Number_MajorNumber = []byte("major_number") + +var ffj_key_Device_Number_MinorNumber = []byte("minor_number") + +func (uj *Device_Number) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Device_Number) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Device_Numberbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Device_Numberno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'm': + + if bytes.Equal(ffj_key_Device_Number_MajorNumber, kn) { + currentKey = ffj_t_Device_Number_MajorNumber + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Device_Number_MinorNumber, kn) { + currentKey = ffj_t_Device_Number_MinorNumber + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_Device_Number_MinorNumber, kn) { + currentKey = ffj_t_Device_Number_MinorNumber + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Device_Number_MajorNumber, kn) { + currentKey = ffj_t_Device_Number_MajorNumber + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Device_Numberno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Device_Number_MajorNumber: + goto handle_MajorNumber + + case ffj_t_Device_Number_MinorNumber: + goto handle_MinorNumber + + case ffj_t_Device_Numberno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_MajorNumber: + + /* handler: uj.MajorNumber type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MajorNumber = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MajorNumber = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MinorNumber: + + /* handler: uj.MinorNumber type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MinorNumber = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MinorNumber = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *DiscoveryInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *DiscoveryInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "visibility":`) + + { + + obj, err = mj.Visibility.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.Name != nil { + if true { + buf.WriteString(`"name":`) + fflib.WriteJsonString(buf, string(*mj.Name)) + buf.WriteByte(',') + } + } + if mj.Environment != nil { + if true { + buf.WriteString(`"environment":`) + fflib.WriteJsonString(buf, string(*mj.Environment)) + buf.WriteByte(',') + } + } + if mj.Location != nil { + if true { + buf.WriteString(`"location":`) + fflib.WriteJsonString(buf, string(*mj.Location)) + buf.WriteByte(',') + } + } + if mj.Version != nil { + if true { + buf.WriteString(`"version":`) + fflib.WriteJsonString(buf, string(*mj.Version)) + buf.WriteByte(',') + } + } + if mj.Ports != nil { + if true { + buf.WriteString(`"ports":`) + + { + + err = mj.Ports.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Labels != nil { + if true { + buf.WriteString(`"labels":`) + + { + + err = mj.Labels.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_DiscoveryInfobase = iota + ffj_t_DiscoveryInfono_such_key + + ffj_t_DiscoveryInfo_Visibility + + ffj_t_DiscoveryInfo_Name + + ffj_t_DiscoveryInfo_Environment + + ffj_t_DiscoveryInfo_Location + + ffj_t_DiscoveryInfo_Version + + ffj_t_DiscoveryInfo_Ports + + ffj_t_DiscoveryInfo_Labels +) + +var ffj_key_DiscoveryInfo_Visibility = []byte("visibility") + +var ffj_key_DiscoveryInfo_Name = []byte("name") + +var ffj_key_DiscoveryInfo_Environment = []byte("environment") + +var ffj_key_DiscoveryInfo_Location = []byte("location") + +var ffj_key_DiscoveryInfo_Version = []byte("version") + +var ffj_key_DiscoveryInfo_Ports = []byte("ports") + +var ffj_key_DiscoveryInfo_Labels = []byte("labels") + +func (uj *DiscoveryInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *DiscoveryInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_DiscoveryInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_DiscoveryInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'e': + + if bytes.Equal(ffj_key_DiscoveryInfo_Environment, kn) { + currentKey = ffj_t_DiscoveryInfo_Environment + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_DiscoveryInfo_Location, kn) { + currentKey = ffj_t_DiscoveryInfo_Location + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_DiscoveryInfo_Labels, kn) { + currentKey = ffj_t_DiscoveryInfo_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_DiscoveryInfo_Name, kn) { + currentKey = ffj_t_DiscoveryInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_DiscoveryInfo_Ports, kn) { + currentKey = ffj_t_DiscoveryInfo_Ports + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_DiscoveryInfo_Visibility, kn) { + currentKey = ffj_t_DiscoveryInfo_Visibility + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_DiscoveryInfo_Version, kn) { + currentKey = ffj_t_DiscoveryInfo_Version + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_DiscoveryInfo_Labels, kn) { + currentKey = ffj_t_DiscoveryInfo_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_DiscoveryInfo_Ports, kn) { + currentKey = ffj_t_DiscoveryInfo_Ports + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_DiscoveryInfo_Version, kn) { + currentKey = ffj_t_DiscoveryInfo_Version + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_DiscoveryInfo_Location, kn) { + currentKey = ffj_t_DiscoveryInfo_Location + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_DiscoveryInfo_Environment, kn) { + currentKey = ffj_t_DiscoveryInfo_Environment + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_DiscoveryInfo_Name, kn) { + currentKey = ffj_t_DiscoveryInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_DiscoveryInfo_Visibility, kn) { + currentKey = ffj_t_DiscoveryInfo_Visibility + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_DiscoveryInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_DiscoveryInfo_Visibility: + goto handle_Visibility + + case ffj_t_DiscoveryInfo_Name: + goto handle_Name + + case ffj_t_DiscoveryInfo_Environment: + goto handle_Environment + + case ffj_t_DiscoveryInfo_Location: + goto handle_Location + + case ffj_t_DiscoveryInfo_Version: + goto handle_Version + + case ffj_t_DiscoveryInfo_Ports: + goto handle_Ports + + case ffj_t_DiscoveryInfo_Labels: + goto handle_Labels + + case ffj_t_DiscoveryInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Visibility: + + /* handler: uj.Visibility type=mesos.DiscoveryInfo_Visibility kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Visibility.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Name = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Name = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Environment: + + /* handler: uj.Environment type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Environment = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Environment = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Location: + + /* handler: uj.Location type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Location = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Location = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Version: + + /* handler: uj.Version type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Version = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Version = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Ports: + + /* handler: uj.Ports type=mesos.Ports kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Ports = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Ports == nil { + uj.Ports = new(Ports) + } + + err = uj.Ports.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Labels: + + /* handler: uj.Labels type=mesos.Labels kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Labels = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Labels == nil { + uj.Labels = new(Labels) + } + + err = uj.Labels.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *DiskStatistics) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *DiskStatistics) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Source != nil { + if true { + buf.WriteString(`"source":`) + + { + + err = mj.Source.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Persistence != nil { + if true { + buf.WriteString(`"persistence":`) + + { + + err = mj.Persistence.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.LimitBytes != nil { + if true { + buf.WriteString(`"limit_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.LimitBytes), 10, false) + buf.WriteByte(',') + } + } + if mj.UsedBytes != nil { + if true { + buf.WriteString(`"used_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.UsedBytes), 10, false) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_DiskStatisticsbase = iota + ffj_t_DiskStatisticsno_such_key + + ffj_t_DiskStatistics_Source + + ffj_t_DiskStatistics_Persistence + + ffj_t_DiskStatistics_LimitBytes + + ffj_t_DiskStatistics_UsedBytes +) + +var ffj_key_DiskStatistics_Source = []byte("source") + +var ffj_key_DiskStatistics_Persistence = []byte("persistence") + +var ffj_key_DiskStatistics_LimitBytes = []byte("limit_bytes") + +var ffj_key_DiskStatistics_UsedBytes = []byte("used_bytes") + +func (uj *DiskStatistics) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *DiskStatistics) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_DiskStatisticsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_DiskStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'l': + + if bytes.Equal(ffj_key_DiskStatistics_LimitBytes, kn) { + currentKey = ffj_t_DiskStatistics_LimitBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_DiskStatistics_Persistence, kn) { + currentKey = ffj_t_DiskStatistics_Persistence + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_DiskStatistics_Source, kn) { + currentKey = ffj_t_DiskStatistics_Source + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_DiskStatistics_UsedBytes, kn) { + currentKey = ffj_t_DiskStatistics_UsedBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_DiskStatistics_UsedBytes, kn) { + currentKey = ffj_t_DiskStatistics_UsedBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_DiskStatistics_LimitBytes, kn) { + currentKey = ffj_t_DiskStatistics_LimitBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_DiskStatistics_Persistence, kn) { + currentKey = ffj_t_DiskStatistics_Persistence + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_DiskStatistics_Source, kn) { + currentKey = ffj_t_DiskStatistics_Source + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_DiskStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_DiskStatistics_Source: + goto handle_Source + + case ffj_t_DiskStatistics_Persistence: + goto handle_Persistence + + case ffj_t_DiskStatistics_LimitBytes: + goto handle_LimitBytes + + case ffj_t_DiskStatistics_UsedBytes: + goto handle_UsedBytes + + case ffj_t_DiskStatisticsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Source: + + /* handler: uj.Source type=mesos.Resource_DiskInfo_Source kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Source = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Source == nil { + uj.Source = new(Resource_DiskInfo_Source) + } + + err = uj.Source.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Persistence: + + /* handler: uj.Persistence type=mesos.Resource_DiskInfo_Persistence kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Persistence = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Persistence == nil { + uj.Persistence = new(Resource_DiskInfo_Persistence) + } + + err = uj.Persistence.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LimitBytes: + + /* handler: uj.LimitBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.LimitBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.LimitBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UsedBytes: + + /* handler: uj.UsedBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.UsedBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.UsedBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *DomainInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *DomainInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.FaultDomain != nil { + if true { + buf.WriteString(`"fault_domain":`) + + { + + err = mj.FaultDomain.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_DomainInfobase = iota + ffj_t_DomainInfono_such_key + + ffj_t_DomainInfo_FaultDomain +) + +var ffj_key_DomainInfo_FaultDomain = []byte("fault_domain") + +func (uj *DomainInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *DomainInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_DomainInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_DomainInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'f': + + if bytes.Equal(ffj_key_DomainInfo_FaultDomain, kn) { + currentKey = ffj_t_DomainInfo_FaultDomain + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_DomainInfo_FaultDomain, kn) { + currentKey = ffj_t_DomainInfo_FaultDomain + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_DomainInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_DomainInfo_FaultDomain: + goto handle_FaultDomain + + case ffj_t_DomainInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_FaultDomain: + + /* handler: uj.FaultDomain type=mesos.DomainInfo_FaultDomain kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.FaultDomain = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.FaultDomain == nil { + uj.FaultDomain = new(DomainInfo_FaultDomain) + } + + err = uj.FaultDomain.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *DomainInfo_FaultDomain) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *DomainInfo_FaultDomain) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"region":`) + + { + + err = mj.Region.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"zone":`) + + { + + err = mj.Zone.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_DomainInfo_FaultDomainbase = iota + ffj_t_DomainInfo_FaultDomainno_such_key + + ffj_t_DomainInfo_FaultDomain_Region + + ffj_t_DomainInfo_FaultDomain_Zone +) + +var ffj_key_DomainInfo_FaultDomain_Region = []byte("region") + +var ffj_key_DomainInfo_FaultDomain_Zone = []byte("zone") + +func (uj *DomainInfo_FaultDomain) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *DomainInfo_FaultDomain) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_DomainInfo_FaultDomainbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_DomainInfo_FaultDomainno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'r': + + if bytes.Equal(ffj_key_DomainInfo_FaultDomain_Region, kn) { + currentKey = ffj_t_DomainInfo_FaultDomain_Region + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'z': + + if bytes.Equal(ffj_key_DomainInfo_FaultDomain_Zone, kn) { + currentKey = ffj_t_DomainInfo_FaultDomain_Zone + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_DomainInfo_FaultDomain_Zone, kn) { + currentKey = ffj_t_DomainInfo_FaultDomain_Zone + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_DomainInfo_FaultDomain_Region, kn) { + currentKey = ffj_t_DomainInfo_FaultDomain_Region + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_DomainInfo_FaultDomainno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_DomainInfo_FaultDomain_Region: + goto handle_Region + + case ffj_t_DomainInfo_FaultDomain_Zone: + goto handle_Zone + + case ffj_t_DomainInfo_FaultDomainno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Region: + + /* handler: uj.Region type=mesos.DomainInfo_FaultDomain_RegionInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Region.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Zone: + + /* handler: uj.Zone type=mesos.DomainInfo_FaultDomain_ZoneInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Zone.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *DomainInfo_FaultDomain_RegionInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *DomainInfo_FaultDomain_RegionInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_DomainInfo_FaultDomain_RegionInfobase = iota + ffj_t_DomainInfo_FaultDomain_RegionInfono_such_key + + ffj_t_DomainInfo_FaultDomain_RegionInfo_Name +) + +var ffj_key_DomainInfo_FaultDomain_RegionInfo_Name = []byte("name") + +func (uj *DomainInfo_FaultDomain_RegionInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *DomainInfo_FaultDomain_RegionInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_DomainInfo_FaultDomain_RegionInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_DomainInfo_FaultDomain_RegionInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'n': + + if bytes.Equal(ffj_key_DomainInfo_FaultDomain_RegionInfo_Name, kn) { + currentKey = ffj_t_DomainInfo_FaultDomain_RegionInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_DomainInfo_FaultDomain_RegionInfo_Name, kn) { + currentKey = ffj_t_DomainInfo_FaultDomain_RegionInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_DomainInfo_FaultDomain_RegionInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_DomainInfo_FaultDomain_RegionInfo_Name: + goto handle_Name + + case ffj_t_DomainInfo_FaultDomain_RegionInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *DomainInfo_FaultDomain_ZoneInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *DomainInfo_FaultDomain_ZoneInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_DomainInfo_FaultDomain_ZoneInfobase = iota + ffj_t_DomainInfo_FaultDomain_ZoneInfono_such_key + + ffj_t_DomainInfo_FaultDomain_ZoneInfo_Name +) + +var ffj_key_DomainInfo_FaultDomain_ZoneInfo_Name = []byte("name") + +func (uj *DomainInfo_FaultDomain_ZoneInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *DomainInfo_FaultDomain_ZoneInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_DomainInfo_FaultDomain_ZoneInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_DomainInfo_FaultDomain_ZoneInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'n': + + if bytes.Equal(ffj_key_DomainInfo_FaultDomain_ZoneInfo_Name, kn) { + currentKey = ffj_t_DomainInfo_FaultDomain_ZoneInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_DomainInfo_FaultDomain_ZoneInfo_Name, kn) { + currentKey = ffj_t_DomainInfo_FaultDomain_ZoneInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_DomainInfo_FaultDomain_ZoneInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_DomainInfo_FaultDomain_ZoneInfo_Name: + goto handle_Name + + case ffj_t_DomainInfo_FaultDomain_ZoneInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *DurationInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *DurationInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"nanoseconds":`) + fflib.FormatBits2(buf, uint64(mj.Nanoseconds), 10, mj.Nanoseconds < 0) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_DurationInfobase = iota + ffj_t_DurationInfono_such_key + + ffj_t_DurationInfo_Nanoseconds +) + +var ffj_key_DurationInfo_Nanoseconds = []byte("nanoseconds") + +func (uj *DurationInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *DurationInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_DurationInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_DurationInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'n': + + if bytes.Equal(ffj_key_DurationInfo_Nanoseconds, kn) { + currentKey = ffj_t_DurationInfo_Nanoseconds + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_DurationInfo_Nanoseconds, kn) { + currentKey = ffj_t_DurationInfo_Nanoseconds + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_DurationInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_DurationInfo_Nanoseconds: + goto handle_Nanoseconds + + case ffj_t_DurationInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Nanoseconds: + + /* handler: uj.Nanoseconds type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Nanoseconds = int64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Environment) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Environment) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"variables":`) + if mj.Variables != nil { + buf.WriteString(`[`) + for i, v := range mj.Variables { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Environmentbase = iota + ffj_t_Environmentno_such_key + + ffj_t_Environment_Variables +) + +var ffj_key_Environment_Variables = []byte("variables") + +func (uj *Environment) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Environment) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Environmentbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Environmentno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'v': + + if bytes.Equal(ffj_key_Environment_Variables, kn) { + currentKey = ffj_t_Environment_Variables + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Environment_Variables, kn) { + currentKey = ffj_t_Environment_Variables + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Environmentno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Environment_Variables: + goto handle_Variables + + case ffj_t_Environmentno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Variables: + + /* handler: uj.Variables type=[]mesos.Environment_Variable kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Variables = nil + } else { + + uj.Variables = []Environment_Variable{} + + wantVal := true + + for { + + var tmp_uj__Variables Environment_Variable + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Variables type=mesos.Environment_Variable kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Variables.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Variables = append(uj.Variables, tmp_uj__Variables) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Environment_Variable) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Environment_Variable) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteByte(',') + if mj.Type != nil { + if true { + buf.WriteString(`"type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.Value != nil { + if true { + buf.WriteString(`"value":`) + fflib.WriteJsonString(buf, string(*mj.Value)) + buf.WriteByte(',') + } + } + if mj.Secret != nil { + if true { + buf.WriteString(`"secret":`) + + { + + err = mj.Secret.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Environment_Variablebase = iota + ffj_t_Environment_Variableno_such_key + + ffj_t_Environment_Variable_Name + + ffj_t_Environment_Variable_Type + + ffj_t_Environment_Variable_Value + + ffj_t_Environment_Variable_Secret +) + +var ffj_key_Environment_Variable_Name = []byte("name") + +var ffj_key_Environment_Variable_Type = []byte("type") + +var ffj_key_Environment_Variable_Value = []byte("value") + +var ffj_key_Environment_Variable_Secret = []byte("secret") + +func (uj *Environment_Variable) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Environment_Variable) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Environment_Variablebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Environment_Variableno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'n': + + if bytes.Equal(ffj_key_Environment_Variable_Name, kn) { + currentKey = ffj_t_Environment_Variable_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Environment_Variable_Secret, kn) { + currentKey = ffj_t_Environment_Variable_Secret + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Environment_Variable_Type, kn) { + currentKey = ffj_t_Environment_Variable_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_Environment_Variable_Value, kn) { + currentKey = ffj_t_Environment_Variable_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Environment_Variable_Secret, kn) { + currentKey = ffj_t_Environment_Variable_Secret + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Environment_Variable_Value, kn) { + currentKey = ffj_t_Environment_Variable_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Environment_Variable_Type, kn) { + currentKey = ffj_t_Environment_Variable_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Environment_Variable_Name, kn) { + currentKey = ffj_t_Environment_Variable_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Environment_Variableno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Environment_Variable_Name: + goto handle_Name + + case ffj_t_Environment_Variable_Type: + goto handle_Type + + case ffj_t_Environment_Variable_Value: + goto handle_Value + + case ffj_t_Environment_Variable_Secret: + goto handle_Secret + + case ffj_t_Environment_Variableno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Type: + + /* handler: uj.Type type=mesos.Environment_Variable_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Type = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Type == nil { + uj.Type = new(Environment_Variable_Type) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Value: + + /* handler: uj.Value type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Value = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Value = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Secret: + + /* handler: uj.Secret type=mesos.Secret kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Secret = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Secret == nil { + uj.Secret = new(Secret) + } + + err = uj.Secret.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ExecutorID) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ExecutorID) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"value":`) + fflib.WriteJsonString(buf, string(mj.Value)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ExecutorIDbase = iota + ffj_t_ExecutorIDno_such_key + + ffj_t_ExecutorID_Value +) + +var ffj_key_ExecutorID_Value = []byte("value") + +func (uj *ExecutorID) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ExecutorID) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ExecutorIDbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ExecutorIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'v': + + if bytes.Equal(ffj_key_ExecutorID_Value, kn) { + currentKey = ffj_t_ExecutorID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_ExecutorID_Value, kn) { + currentKey = ffj_t_ExecutorID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ExecutorIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ExecutorID_Value: + goto handle_Value + + case ffj_t_ExecutorIDno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Value: + + /* handler: uj.Value type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Value = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ExecutorInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ExecutorInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteString(`,"executor_id":`) + + { + + err = mj.ExecutorID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + if mj.FrameworkID != nil { + if true { + buf.WriteString(`"framework_id":`) + + { + + err = mj.FrameworkID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Command != nil { + if true { + buf.WriteString(`"command":`) + + { + + err = mj.Command.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Container != nil { + if true { + buf.WriteString(`"container":`) + + { + + err = mj.Container.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"resources":`) + if mj.Resources != nil { + buf.WriteString(`[`) + for i, v := range mj.Resources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.Name != nil { + if true { + buf.WriteString(`"name":`) + fflib.WriteJsonString(buf, string(*mj.Name)) + buf.WriteByte(',') + } + } + if mj.Source != nil { + if true { + buf.WriteString(`"source":`) + fflib.WriteJsonString(buf, string(*mj.Source)) + buf.WriteByte(',') + } + } + if len(mj.Data) != 0 { + buf.WriteString(`"data":`) + if mj.Data != nil { + buf.WriteString(`"`) + { + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(reflect.Indirect(reflect.ValueOf(mj.Data)).Bytes()) + enc.Close() + } + buf.WriteString(`"`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if mj.Discovery != nil { + if true { + buf.WriteString(`"discovery":`) + + { + + err = mj.Discovery.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.ShutdownGracePeriod != nil { + if true { + buf.WriteString(`"shutdown_grace_period":`) + + { + + err = mj.ShutdownGracePeriod.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Labels != nil { + if true { + buf.WriteString(`"labels":`) + + { + + err = mj.Labels.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ExecutorInfobase = iota + ffj_t_ExecutorInfono_such_key + + ffj_t_ExecutorInfo_Type + + ffj_t_ExecutorInfo_ExecutorID + + ffj_t_ExecutorInfo_FrameworkID + + ffj_t_ExecutorInfo_Command + + ffj_t_ExecutorInfo_Container + + ffj_t_ExecutorInfo_Resources + + ffj_t_ExecutorInfo_Name + + ffj_t_ExecutorInfo_Source + + ffj_t_ExecutorInfo_Data + + ffj_t_ExecutorInfo_Discovery + + ffj_t_ExecutorInfo_ShutdownGracePeriod + + ffj_t_ExecutorInfo_Labels +) + +var ffj_key_ExecutorInfo_Type = []byte("type") + +var ffj_key_ExecutorInfo_ExecutorID = []byte("executor_id") + +var ffj_key_ExecutorInfo_FrameworkID = []byte("framework_id") + +var ffj_key_ExecutorInfo_Command = []byte("command") + +var ffj_key_ExecutorInfo_Container = []byte("container") + +var ffj_key_ExecutorInfo_Resources = []byte("resources") + +var ffj_key_ExecutorInfo_Name = []byte("name") + +var ffj_key_ExecutorInfo_Source = []byte("source") + +var ffj_key_ExecutorInfo_Data = []byte("data") + +var ffj_key_ExecutorInfo_Discovery = []byte("discovery") + +var ffj_key_ExecutorInfo_ShutdownGracePeriod = []byte("shutdown_grace_period") + +var ffj_key_ExecutorInfo_Labels = []byte("labels") + +func (uj *ExecutorInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ExecutorInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ExecutorInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ExecutorInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_ExecutorInfo_Command, kn) { + currentKey = ffj_t_ExecutorInfo_Command + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ExecutorInfo_Container, kn) { + currentKey = ffj_t_ExecutorInfo_Container + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_ExecutorInfo_Data, kn) { + currentKey = ffj_t_ExecutorInfo_Data + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ExecutorInfo_Discovery, kn) { + currentKey = ffj_t_ExecutorInfo_Discovery + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_ExecutorInfo_ExecutorID, kn) { + currentKey = ffj_t_ExecutorInfo_ExecutorID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffj_key_ExecutorInfo_FrameworkID, kn) { + currentKey = ffj_t_ExecutorInfo_FrameworkID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_ExecutorInfo_Labels, kn) { + currentKey = ffj_t_ExecutorInfo_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_ExecutorInfo_Name, kn) { + currentKey = ffj_t_ExecutorInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_ExecutorInfo_Resources, kn) { + currentKey = ffj_t_ExecutorInfo_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_ExecutorInfo_Source, kn) { + currentKey = ffj_t_ExecutorInfo_Source + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ExecutorInfo_ShutdownGracePeriod, kn) { + currentKey = ffj_t_ExecutorInfo_ShutdownGracePeriod + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_ExecutorInfo_Type, kn) { + currentKey = ffj_t_ExecutorInfo_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_ExecutorInfo_Labels, kn) { + currentKey = ffj_t_ExecutorInfo_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ExecutorInfo_ShutdownGracePeriod, kn) { + currentKey = ffj_t_ExecutorInfo_ShutdownGracePeriod + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ExecutorInfo_Discovery, kn) { + currentKey = ffj_t_ExecutorInfo_Discovery + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ExecutorInfo_Data, kn) { + currentKey = ffj_t_ExecutorInfo_Data + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ExecutorInfo_Source, kn) { + currentKey = ffj_t_ExecutorInfo_Source + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ExecutorInfo_Name, kn) { + currentKey = ffj_t_ExecutorInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ExecutorInfo_Resources, kn) { + currentKey = ffj_t_ExecutorInfo_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ExecutorInfo_Container, kn) { + currentKey = ffj_t_ExecutorInfo_Container + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ExecutorInfo_Command, kn) { + currentKey = ffj_t_ExecutorInfo_Command + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ExecutorInfo_FrameworkID, kn) { + currentKey = ffj_t_ExecutorInfo_FrameworkID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_ExecutorInfo_ExecutorID, kn) { + currentKey = ffj_t_ExecutorInfo_ExecutorID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ExecutorInfo_Type, kn) { + currentKey = ffj_t_ExecutorInfo_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ExecutorInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ExecutorInfo_Type: + goto handle_Type + + case ffj_t_ExecutorInfo_ExecutorID: + goto handle_ExecutorID + + case ffj_t_ExecutorInfo_FrameworkID: + goto handle_FrameworkID + + case ffj_t_ExecutorInfo_Command: + goto handle_Command + + case ffj_t_ExecutorInfo_Container: + goto handle_Container + + case ffj_t_ExecutorInfo_Resources: + goto handle_Resources + + case ffj_t_ExecutorInfo_Name: + goto handle_Name + + case ffj_t_ExecutorInfo_Source: + goto handle_Source + + case ffj_t_ExecutorInfo_Data: + goto handle_Data + + case ffj_t_ExecutorInfo_Discovery: + goto handle_Discovery + + case ffj_t_ExecutorInfo_ShutdownGracePeriod: + goto handle_ShutdownGracePeriod + + case ffj_t_ExecutorInfo_Labels: + goto handle_Labels + + case ffj_t_ExecutorInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.ExecutorInfo_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ExecutorID: + + /* handler: uj.ExecutorID type=mesos.ExecutorID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ExecutorID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_FrameworkID: + + /* handler: uj.FrameworkID type=mesos.FrameworkID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.FrameworkID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.FrameworkID == nil { + uj.FrameworkID = new(FrameworkID) + } + + err = uj.FrameworkID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Command: + + /* handler: uj.Command type=mesos.CommandInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Command = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Command == nil { + uj.Command = new(CommandInfo) + } + + err = uj.Command.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Container: + + /* handler: uj.Container type=mesos.ContainerInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Container = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Container == nil { + uj.Container = new(ContainerInfo) + } + + err = uj.Container.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Resources: + + /* handler: uj.Resources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Resources = nil + } else { + + uj.Resources = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Resources Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Resources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Resources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Resources = append(uj.Resources, tmp_uj__Resources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Name = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Name = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Source: + + /* handler: uj.Source type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Source = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Source = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Data: + + /* handler: uj.Data type=[]uint8 kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Data = nil + } else { + b := make([]byte, base64.StdEncoding.DecodedLen(fs.Output.Len())) + n, err := base64.StdEncoding.Decode(b, fs.Output.Bytes()) + if err != nil { + return fs.WrapErr(err) + } + + v := reflect.ValueOf(&uj.Data).Elem() + v.SetBytes(b[0:n]) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Discovery: + + /* handler: uj.Discovery type=mesos.DiscoveryInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Discovery = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Discovery == nil { + uj.Discovery = new(DiscoveryInfo) + } + + err = uj.Discovery.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ShutdownGracePeriod: + + /* handler: uj.ShutdownGracePeriod type=mesos.DurationInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ShutdownGracePeriod = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ShutdownGracePeriod == nil { + uj.ShutdownGracePeriod = new(DurationInfo) + } + + err = uj.ShutdownGracePeriod.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Labels: + + /* handler: uj.Labels type=mesos.Labels kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Labels = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Labels == nil { + uj.Labels = new(Labels) + } + + err = uj.Labels.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *FileInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *FileInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "path":`) + fflib.WriteJsonString(buf, string(mj.Path)) + buf.WriteByte(',') + if mj.Nlink != nil { + if true { + buf.WriteString(`"nlink":`) + fflib.FormatBits2(buf, uint64(*mj.Nlink), 10, *mj.Nlink < 0) + buf.WriteByte(',') + } + } + if mj.Size != nil { + if true { + buf.WriteString(`"size":`) + fflib.FormatBits2(buf, uint64(*mj.Size), 10, false) + buf.WriteByte(',') + } + } + if mj.Mtime != nil { + if true { + buf.WriteString(`"mtime":`) + + { + + err = mj.Mtime.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Mode != nil { + if true { + buf.WriteString(`"mode":`) + fflib.FormatBits2(buf, uint64(*mj.Mode), 10, false) + buf.WriteByte(',') + } + } + if mj.UID != nil { + if true { + buf.WriteString(`"uid":`) + fflib.WriteJsonString(buf, string(*mj.UID)) + buf.WriteByte(',') + } + } + if mj.GID != nil { + if true { + buf.WriteString(`"gid":`) + fflib.WriteJsonString(buf, string(*mj.GID)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_FileInfobase = iota + ffj_t_FileInfono_such_key + + ffj_t_FileInfo_Path + + ffj_t_FileInfo_Nlink + + ffj_t_FileInfo_Size + + ffj_t_FileInfo_Mtime + + ffj_t_FileInfo_Mode + + ffj_t_FileInfo_UID + + ffj_t_FileInfo_GID +) + +var ffj_key_FileInfo_Path = []byte("path") + +var ffj_key_FileInfo_Nlink = []byte("nlink") + +var ffj_key_FileInfo_Size = []byte("size") + +var ffj_key_FileInfo_Mtime = []byte("mtime") + +var ffj_key_FileInfo_Mode = []byte("mode") + +var ffj_key_FileInfo_UID = []byte("uid") + +var ffj_key_FileInfo_GID = []byte("gid") + +func (uj *FileInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *FileInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_FileInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_FileInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'g': + + if bytes.Equal(ffj_key_FileInfo_GID, kn) { + currentKey = ffj_t_FileInfo_GID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffj_key_FileInfo_Mtime, kn) { + currentKey = ffj_t_FileInfo_Mtime + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_FileInfo_Mode, kn) { + currentKey = ffj_t_FileInfo_Mode + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_FileInfo_Nlink, kn) { + currentKey = ffj_t_FileInfo_Nlink + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_FileInfo_Path, kn) { + currentKey = ffj_t_FileInfo_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_FileInfo_Size, kn) { + currentKey = ffj_t_FileInfo_Size + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_FileInfo_UID, kn) { + currentKey = ffj_t_FileInfo_UID + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_FileInfo_GID, kn) { + currentKey = ffj_t_FileInfo_GID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_FileInfo_UID, kn) { + currentKey = ffj_t_FileInfo_UID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_FileInfo_Mode, kn) { + currentKey = ffj_t_FileInfo_Mode + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_FileInfo_Mtime, kn) { + currentKey = ffj_t_FileInfo_Mtime + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_FileInfo_Size, kn) { + currentKey = ffj_t_FileInfo_Size + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_FileInfo_Nlink, kn) { + currentKey = ffj_t_FileInfo_Nlink + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_FileInfo_Path, kn) { + currentKey = ffj_t_FileInfo_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_FileInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_FileInfo_Path: + goto handle_Path + + case ffj_t_FileInfo_Nlink: + goto handle_Nlink + + case ffj_t_FileInfo_Size: + goto handle_Size + + case ffj_t_FileInfo_Mtime: + goto handle_Mtime + + case ffj_t_FileInfo_Mode: + goto handle_Mode + + case ffj_t_FileInfo_UID: + goto handle_UID + + case ffj_t_FileInfo_GID: + goto handle_GID + + case ffj_t_FileInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Path: + + /* handler: uj.Path type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Path = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Nlink: + + /* handler: uj.Nlink type=int32 kind=int32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Nlink = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int32(tval) + uj.Nlink = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Size: + + /* handler: uj.Size type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Size = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Size = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Mtime: + + /* handler: uj.Mtime type=mesos.TimeInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Mtime = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Mtime == nil { + uj.Mtime = new(TimeInfo) + } + + err = uj.Mtime.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Mode: + + /* handler: uj.Mode type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Mode = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint32(tval) + uj.Mode = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UID: + + /* handler: uj.UID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.UID = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.UID = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GID: + + /* handler: uj.GID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.GID = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.GID = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Filters) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Filters) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.RefuseSeconds != nil { + if true { + buf.WriteString(`"refuse_seconds":`) + fflib.AppendFloat(buf, float64(*mj.RefuseSeconds), 'g', -1, 64) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Filtersbase = iota + ffj_t_Filtersno_such_key + + ffj_t_Filters_RefuseSeconds +) + +var ffj_key_Filters_RefuseSeconds = []byte("refuse_seconds") + +func (uj *Filters) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Filters) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Filtersbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Filtersno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'r': + + if bytes.Equal(ffj_key_Filters_RefuseSeconds, kn) { + currentKey = ffj_t_Filters_RefuseSeconds + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Filters_RefuseSeconds, kn) { + currentKey = ffj_t_Filters_RefuseSeconds + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Filtersno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Filters_RefuseSeconds: + goto handle_RefuseSeconds + + case ffj_t_Filtersno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_RefuseSeconds: + + /* handler: uj.RefuseSeconds type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.RefuseSeconds = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.RefuseSeconds = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Flag) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Flag) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteByte(',') + if mj.Value != nil { + if true { + buf.WriteString(`"value":`) + fflib.WriteJsonString(buf, string(*mj.Value)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Flagbase = iota + ffj_t_Flagno_such_key + + ffj_t_Flag_Name + + ffj_t_Flag_Value +) + +var ffj_key_Flag_Name = []byte("name") + +var ffj_key_Flag_Value = []byte("value") + +func (uj *Flag) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Flag) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Flagbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Flagno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'n': + + if bytes.Equal(ffj_key_Flag_Name, kn) { + currentKey = ffj_t_Flag_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_Flag_Value, kn) { + currentKey = ffj_t_Flag_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Flag_Value, kn) { + currentKey = ffj_t_Flag_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Flag_Name, kn) { + currentKey = ffj_t_Flag_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Flagno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Flag_Name: + goto handle_Name + + case ffj_t_Flag_Value: + goto handle_Value + + case ffj_t_Flagno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Value: + + /* handler: uj.Value type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Value = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Value = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *FrameworkID) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *FrameworkID) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"value":`) + fflib.WriteJsonString(buf, string(mj.Value)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_FrameworkIDbase = iota + ffj_t_FrameworkIDno_such_key + + ffj_t_FrameworkID_Value +) + +var ffj_key_FrameworkID_Value = []byte("value") + +func (uj *FrameworkID) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *FrameworkID) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_FrameworkIDbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_FrameworkIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'v': + + if bytes.Equal(ffj_key_FrameworkID_Value, kn) { + currentKey = ffj_t_FrameworkID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_FrameworkID_Value, kn) { + currentKey = ffj_t_FrameworkID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_FrameworkIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_FrameworkID_Value: + goto handle_Value + + case ffj_t_FrameworkIDno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Value: + + /* handler: uj.Value type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Value = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *FrameworkInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *FrameworkInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "user":`) + fflib.WriteJsonString(buf, string(mj.User)) + buf.WriteString(`,"name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteByte(',') + if mj.ID != nil { + if true { + buf.WriteString(`"id":`) + + { + + err = mj.ID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.FailoverTimeout != nil { + if true { + buf.WriteString(`"failover_timeout":`) + fflib.AppendFloat(buf, float64(*mj.FailoverTimeout), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.Checkpoint != nil { + if true { + if *mj.Checkpoint { + buf.WriteString(`"checkpoint":true`) + } else { + buf.WriteString(`"checkpoint":false`) + } + buf.WriteByte(',') + } + } + if mj.Role != nil { + if true { + buf.WriteString(`"role":`) + fflib.WriteJsonString(buf, string(*mj.Role)) + buf.WriteByte(',') + } + } + if len(mj.Roles) != 0 { + buf.WriteString(`"roles":`) + if mj.Roles != nil { + buf.WriteString(`[`) + for i, v := range mj.Roles { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if mj.Hostname != nil { + if true { + buf.WriteString(`"hostname":`) + fflib.WriteJsonString(buf, string(*mj.Hostname)) + buf.WriteByte(',') + } + } + if mj.Principal != nil { + if true { + buf.WriteString(`"principal":`) + fflib.WriteJsonString(buf, string(*mj.Principal)) + buf.WriteByte(',') + } + } + if mj.WebUiURL != nil { + if true { + buf.WriteString(`"webui_url":`) + fflib.WriteJsonString(buf, string(*mj.WebUiURL)) + buf.WriteByte(',') + } + } + buf.WriteString(`"capabilities":`) + if mj.Capabilities != nil { + buf.WriteString(`[`) + for i, v := range mj.Capabilities { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.Labels != nil { + if true { + buf.WriteString(`"labels":`) + + { + + err = mj.Labels.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_FrameworkInfobase = iota + ffj_t_FrameworkInfono_such_key + + ffj_t_FrameworkInfo_User + + ffj_t_FrameworkInfo_Name + + ffj_t_FrameworkInfo_ID + + ffj_t_FrameworkInfo_FailoverTimeout + + ffj_t_FrameworkInfo_Checkpoint + + ffj_t_FrameworkInfo_Role + + ffj_t_FrameworkInfo_Roles + + ffj_t_FrameworkInfo_Hostname + + ffj_t_FrameworkInfo_Principal + + ffj_t_FrameworkInfo_WebUiURL + + ffj_t_FrameworkInfo_Capabilities + + ffj_t_FrameworkInfo_Labels +) + +var ffj_key_FrameworkInfo_User = []byte("user") + +var ffj_key_FrameworkInfo_Name = []byte("name") + +var ffj_key_FrameworkInfo_ID = []byte("id") + +var ffj_key_FrameworkInfo_FailoverTimeout = []byte("failover_timeout") + +var ffj_key_FrameworkInfo_Checkpoint = []byte("checkpoint") + +var ffj_key_FrameworkInfo_Role = []byte("role") + +var ffj_key_FrameworkInfo_Roles = []byte("roles") + +var ffj_key_FrameworkInfo_Hostname = []byte("hostname") + +var ffj_key_FrameworkInfo_Principal = []byte("principal") + +var ffj_key_FrameworkInfo_WebUiURL = []byte("webui_url") + +var ffj_key_FrameworkInfo_Capabilities = []byte("capabilities") + +var ffj_key_FrameworkInfo_Labels = []byte("labels") + +func (uj *FrameworkInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *FrameworkInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_FrameworkInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_FrameworkInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_FrameworkInfo_Checkpoint, kn) { + currentKey = ffj_t_FrameworkInfo_Checkpoint + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_FrameworkInfo_Capabilities, kn) { + currentKey = ffj_t_FrameworkInfo_Capabilities + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffj_key_FrameworkInfo_FailoverTimeout, kn) { + currentKey = ffj_t_FrameworkInfo_FailoverTimeout + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'h': + + if bytes.Equal(ffj_key_FrameworkInfo_Hostname, kn) { + currentKey = ffj_t_FrameworkInfo_Hostname + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_FrameworkInfo_ID, kn) { + currentKey = ffj_t_FrameworkInfo_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_FrameworkInfo_Labels, kn) { + currentKey = ffj_t_FrameworkInfo_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_FrameworkInfo_Name, kn) { + currentKey = ffj_t_FrameworkInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_FrameworkInfo_Principal, kn) { + currentKey = ffj_t_FrameworkInfo_Principal + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_FrameworkInfo_Role, kn) { + currentKey = ffj_t_FrameworkInfo_Role + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_FrameworkInfo_Roles, kn) { + currentKey = ffj_t_FrameworkInfo_Roles + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_FrameworkInfo_User, kn) { + currentKey = ffj_t_FrameworkInfo_User + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'w': + + if bytes.Equal(ffj_key_FrameworkInfo_WebUiURL, kn) { + currentKey = ffj_t_FrameworkInfo_WebUiURL + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_FrameworkInfo_Labels, kn) { + currentKey = ffj_t_FrameworkInfo_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_FrameworkInfo_Capabilities, kn) { + currentKey = ffj_t_FrameworkInfo_Capabilities + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_FrameworkInfo_WebUiURL, kn) { + currentKey = ffj_t_FrameworkInfo_WebUiURL + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_FrameworkInfo_Principal, kn) { + currentKey = ffj_t_FrameworkInfo_Principal + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_FrameworkInfo_Hostname, kn) { + currentKey = ffj_t_FrameworkInfo_Hostname + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_FrameworkInfo_Roles, kn) { + currentKey = ffj_t_FrameworkInfo_Roles + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_FrameworkInfo_Role, kn) { + currentKey = ffj_t_FrameworkInfo_Role + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_FrameworkInfo_Checkpoint, kn) { + currentKey = ffj_t_FrameworkInfo_Checkpoint + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_FrameworkInfo_FailoverTimeout, kn) { + currentKey = ffj_t_FrameworkInfo_FailoverTimeout + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_FrameworkInfo_ID, kn) { + currentKey = ffj_t_FrameworkInfo_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_FrameworkInfo_Name, kn) { + currentKey = ffj_t_FrameworkInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_FrameworkInfo_User, kn) { + currentKey = ffj_t_FrameworkInfo_User + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_FrameworkInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_FrameworkInfo_User: + goto handle_User + + case ffj_t_FrameworkInfo_Name: + goto handle_Name + + case ffj_t_FrameworkInfo_ID: + goto handle_ID + + case ffj_t_FrameworkInfo_FailoverTimeout: + goto handle_FailoverTimeout + + case ffj_t_FrameworkInfo_Checkpoint: + goto handle_Checkpoint + + case ffj_t_FrameworkInfo_Role: + goto handle_Role + + case ffj_t_FrameworkInfo_Roles: + goto handle_Roles + + case ffj_t_FrameworkInfo_Hostname: + goto handle_Hostname + + case ffj_t_FrameworkInfo_Principal: + goto handle_Principal + + case ffj_t_FrameworkInfo_WebUiURL: + goto handle_WebUiURL + + case ffj_t_FrameworkInfo_Capabilities: + goto handle_Capabilities + + case ffj_t_FrameworkInfo_Labels: + goto handle_Labels + + case ffj_t_FrameworkInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_User: + + /* handler: uj.User type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.User = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ID: + + /* handler: uj.ID type=mesos.FrameworkID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ID == nil { + uj.ID = new(FrameworkID) + } + + err = uj.ID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_FailoverTimeout: + + /* handler: uj.FailoverTimeout type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.FailoverTimeout = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.FailoverTimeout = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Checkpoint: + + /* handler: uj.Checkpoint type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.Checkpoint = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.Checkpoint = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Role: + + /* handler: uj.Role type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Role = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Role = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Roles: + + /* handler: uj.Roles type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Roles = nil + } else { + + uj.Roles = []string{} + + wantVal := true + + for { + + var tmp_uj__Roles string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Roles type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmp_uj__Roles = string(string(outBuf)) + + } + } + + uj.Roles = append(uj.Roles, tmp_uj__Roles) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Hostname: + + /* handler: uj.Hostname type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Hostname = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Hostname = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Principal: + + /* handler: uj.Principal type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Principal = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Principal = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_WebUiURL: + + /* handler: uj.WebUiURL type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.WebUiURL = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.WebUiURL = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Capabilities: + + /* handler: uj.Capabilities type=[]mesos.FrameworkInfo_Capability kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Capabilities = nil + } else { + + uj.Capabilities = []FrameworkInfo_Capability{} + + wantVal := true + + for { + + var tmp_uj__Capabilities FrameworkInfo_Capability + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Capabilities type=mesos.FrameworkInfo_Capability kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Capabilities.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Capabilities = append(uj.Capabilities, tmp_uj__Capabilities) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Labels: + + /* handler: uj.Labels type=mesos.Labels kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Labels = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Labels == nil { + uj.Labels = new(Labels) + } + + err = uj.Labels.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *FrameworkInfo_Capability) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *FrameworkInfo_Capability) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_FrameworkInfo_Capabilitybase = iota + ffj_t_FrameworkInfo_Capabilityno_such_key + + ffj_t_FrameworkInfo_Capability_Type +) + +var ffj_key_FrameworkInfo_Capability_Type = []byte("type") + +func (uj *FrameworkInfo_Capability) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *FrameworkInfo_Capability) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_FrameworkInfo_Capabilitybase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_FrameworkInfo_Capabilityno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 't': + + if bytes.Equal(ffj_key_FrameworkInfo_Capability_Type, kn) { + currentKey = ffj_t_FrameworkInfo_Capability_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_FrameworkInfo_Capability_Type, kn) { + currentKey = ffj_t_FrameworkInfo_Capability_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_FrameworkInfo_Capabilityno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_FrameworkInfo_Capability_Type: + goto handle_Type + + case ffj_t_FrameworkInfo_Capabilityno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.FrameworkInfo_Capability_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *HealthCheck) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *HealthCheck) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.DelaySeconds != nil { + if true { + buf.WriteString(`"delay_seconds":`) + fflib.AppendFloat(buf, float64(*mj.DelaySeconds), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.IntervalSeconds != nil { + if true { + buf.WriteString(`"interval_seconds":`) + fflib.AppendFloat(buf, float64(*mj.IntervalSeconds), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.TimeoutSeconds != nil { + if true { + buf.WriteString(`"timeout_seconds":`) + fflib.AppendFloat(buf, float64(*mj.TimeoutSeconds), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.ConsecutiveFailures != nil { + if true { + buf.WriteString(`"consecutive_failures":`) + fflib.FormatBits2(buf, uint64(*mj.ConsecutiveFailures), 10, false) + buf.WriteByte(',') + } + } + if mj.GracePeriodSeconds != nil { + if true { + buf.WriteString(`"grace_period_seconds":`) + fflib.AppendFloat(buf, float64(*mj.GracePeriodSeconds), 'g', -1, 64) + buf.WriteByte(',') + } + } + buf.WriteString(`"type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.Command != nil { + if true { + buf.WriteString(`"command":`) + + { + + err = mj.Command.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.HTTP != nil { + if true { + buf.WriteString(`"http":`) + + { + + err = mj.HTTP.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.TCP != nil { + if true { + buf.WriteString(`"tcp":`) + + { + + err = mj.TCP.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_HealthCheckbase = iota + ffj_t_HealthCheckno_such_key + + ffj_t_HealthCheck_DelaySeconds + + ffj_t_HealthCheck_IntervalSeconds + + ffj_t_HealthCheck_TimeoutSeconds + + ffj_t_HealthCheck_ConsecutiveFailures + + ffj_t_HealthCheck_GracePeriodSeconds + + ffj_t_HealthCheck_Type + + ffj_t_HealthCheck_Command + + ffj_t_HealthCheck_HTTP + + ffj_t_HealthCheck_TCP +) + +var ffj_key_HealthCheck_DelaySeconds = []byte("delay_seconds") + +var ffj_key_HealthCheck_IntervalSeconds = []byte("interval_seconds") + +var ffj_key_HealthCheck_TimeoutSeconds = []byte("timeout_seconds") + +var ffj_key_HealthCheck_ConsecutiveFailures = []byte("consecutive_failures") + +var ffj_key_HealthCheck_GracePeriodSeconds = []byte("grace_period_seconds") + +var ffj_key_HealthCheck_Type = []byte("type") + +var ffj_key_HealthCheck_Command = []byte("command") + +var ffj_key_HealthCheck_HTTP = []byte("http") + +var ffj_key_HealthCheck_TCP = []byte("tcp") + +func (uj *HealthCheck) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *HealthCheck) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_HealthCheckbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_HealthCheckno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_HealthCheck_ConsecutiveFailures, kn) { + currentKey = ffj_t_HealthCheck_ConsecutiveFailures + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_HealthCheck_Command, kn) { + currentKey = ffj_t_HealthCheck_Command + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_HealthCheck_DelaySeconds, kn) { + currentKey = ffj_t_HealthCheck_DelaySeconds + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'g': + + if bytes.Equal(ffj_key_HealthCheck_GracePeriodSeconds, kn) { + currentKey = ffj_t_HealthCheck_GracePeriodSeconds + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'h': + + if bytes.Equal(ffj_key_HealthCheck_HTTP, kn) { + currentKey = ffj_t_HealthCheck_HTTP + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_HealthCheck_IntervalSeconds, kn) { + currentKey = ffj_t_HealthCheck_IntervalSeconds + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_HealthCheck_TimeoutSeconds, kn) { + currentKey = ffj_t_HealthCheck_TimeoutSeconds + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_HealthCheck_Type, kn) { + currentKey = ffj_t_HealthCheck_Type + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_HealthCheck_TCP, kn) { + currentKey = ffj_t_HealthCheck_TCP + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_HealthCheck_TCP, kn) { + currentKey = ffj_t_HealthCheck_TCP + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_HealthCheck_HTTP, kn) { + currentKey = ffj_t_HealthCheck_HTTP + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_HealthCheck_Command, kn) { + currentKey = ffj_t_HealthCheck_Command + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_HealthCheck_Type, kn) { + currentKey = ffj_t_HealthCheck_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_HealthCheck_GracePeriodSeconds, kn) { + currentKey = ffj_t_HealthCheck_GracePeriodSeconds + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_HealthCheck_ConsecutiveFailures, kn) { + currentKey = ffj_t_HealthCheck_ConsecutiveFailures + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_HealthCheck_TimeoutSeconds, kn) { + currentKey = ffj_t_HealthCheck_TimeoutSeconds + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_HealthCheck_IntervalSeconds, kn) { + currentKey = ffj_t_HealthCheck_IntervalSeconds + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_HealthCheck_DelaySeconds, kn) { + currentKey = ffj_t_HealthCheck_DelaySeconds + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_HealthCheckno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_HealthCheck_DelaySeconds: + goto handle_DelaySeconds + + case ffj_t_HealthCheck_IntervalSeconds: + goto handle_IntervalSeconds + + case ffj_t_HealthCheck_TimeoutSeconds: + goto handle_TimeoutSeconds + + case ffj_t_HealthCheck_ConsecutiveFailures: + goto handle_ConsecutiveFailures + + case ffj_t_HealthCheck_GracePeriodSeconds: + goto handle_GracePeriodSeconds + + case ffj_t_HealthCheck_Type: + goto handle_Type + + case ffj_t_HealthCheck_Command: + goto handle_Command + + case ffj_t_HealthCheck_HTTP: + goto handle_HTTP + + case ffj_t_HealthCheck_TCP: + goto handle_TCP + + case ffj_t_HealthCheckno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_DelaySeconds: + + /* handler: uj.DelaySeconds type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.DelaySeconds = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.DelaySeconds = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IntervalSeconds: + + /* handler: uj.IntervalSeconds type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.IntervalSeconds = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.IntervalSeconds = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TimeoutSeconds: + + /* handler: uj.TimeoutSeconds type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.TimeoutSeconds = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.TimeoutSeconds = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ConsecutiveFailures: + + /* handler: uj.ConsecutiveFailures type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.ConsecutiveFailures = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint32(tval) + uj.ConsecutiveFailures = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GracePeriodSeconds: + + /* handler: uj.GracePeriodSeconds type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.GracePeriodSeconds = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.GracePeriodSeconds = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Type: + + /* handler: uj.Type type=mesos.HealthCheck_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Command: + + /* handler: uj.Command type=mesos.CommandInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Command = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Command == nil { + uj.Command = new(CommandInfo) + } + + err = uj.Command.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_HTTP: + + /* handler: uj.HTTP type=mesos.HealthCheck_HTTPCheckInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.HTTP = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.HTTP == nil { + uj.HTTP = new(HealthCheck_HTTPCheckInfo) + } + + err = uj.HTTP.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TCP: + + /* handler: uj.TCP type=mesos.HealthCheck_TCPCheckInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.TCP = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.TCP == nil { + uj.TCP = new(HealthCheck_TCPCheckInfo) + } + + err = uj.TCP.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *HealthCheck_HTTPCheckInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *HealthCheck_HTTPCheckInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Protocol != nil { + if true { + buf.WriteString(`"protocol":`) + + { + + obj, err = mj.Protocol.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.Scheme != nil { + if true { + buf.WriteString(`"scheme":`) + fflib.WriteJsonString(buf, string(*mj.Scheme)) + buf.WriteByte(',') + } + } + buf.WriteString(`"port":`) + fflib.FormatBits2(buf, uint64(mj.Port), 10, false) + buf.WriteByte(',') + if mj.Path != nil { + if true { + buf.WriteString(`"path":`) + fflib.WriteJsonString(buf, string(*mj.Path)) + buf.WriteByte(',') + } + } + if len(mj.Statuses) != 0 { + buf.WriteString(`"statuses":`) + if mj.Statuses != nil { + buf.WriteString(`[`) + for i, v := range mj.Statuses { + if i != 0 { + buf.WriteString(`,`) + } + fflib.FormatBits2(buf, uint64(v), 10, false) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_HealthCheck_HTTPCheckInfobase = iota + ffj_t_HealthCheck_HTTPCheckInfono_such_key + + ffj_t_HealthCheck_HTTPCheckInfo_Protocol + + ffj_t_HealthCheck_HTTPCheckInfo_Scheme + + ffj_t_HealthCheck_HTTPCheckInfo_Port + + ffj_t_HealthCheck_HTTPCheckInfo_Path + + ffj_t_HealthCheck_HTTPCheckInfo_Statuses +) + +var ffj_key_HealthCheck_HTTPCheckInfo_Protocol = []byte("protocol") + +var ffj_key_HealthCheck_HTTPCheckInfo_Scheme = []byte("scheme") + +var ffj_key_HealthCheck_HTTPCheckInfo_Port = []byte("port") + +var ffj_key_HealthCheck_HTTPCheckInfo_Path = []byte("path") + +var ffj_key_HealthCheck_HTTPCheckInfo_Statuses = []byte("statuses") + +func (uj *HealthCheck_HTTPCheckInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *HealthCheck_HTTPCheckInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_HealthCheck_HTTPCheckInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_HealthCheck_HTTPCheckInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'p': + + if bytes.Equal(ffj_key_HealthCheck_HTTPCheckInfo_Protocol, kn) { + currentKey = ffj_t_HealthCheck_HTTPCheckInfo_Protocol + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_HealthCheck_HTTPCheckInfo_Port, kn) { + currentKey = ffj_t_HealthCheck_HTTPCheckInfo_Port + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_HealthCheck_HTTPCheckInfo_Path, kn) { + currentKey = ffj_t_HealthCheck_HTTPCheckInfo_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_HealthCheck_HTTPCheckInfo_Scheme, kn) { + currentKey = ffj_t_HealthCheck_HTTPCheckInfo_Scheme + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_HealthCheck_HTTPCheckInfo_Statuses, kn) { + currentKey = ffj_t_HealthCheck_HTTPCheckInfo_Statuses + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_HealthCheck_HTTPCheckInfo_Statuses, kn) { + currentKey = ffj_t_HealthCheck_HTTPCheckInfo_Statuses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_HealthCheck_HTTPCheckInfo_Path, kn) { + currentKey = ffj_t_HealthCheck_HTTPCheckInfo_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_HealthCheck_HTTPCheckInfo_Port, kn) { + currentKey = ffj_t_HealthCheck_HTTPCheckInfo_Port + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_HealthCheck_HTTPCheckInfo_Scheme, kn) { + currentKey = ffj_t_HealthCheck_HTTPCheckInfo_Scheme + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_HealthCheck_HTTPCheckInfo_Protocol, kn) { + currentKey = ffj_t_HealthCheck_HTTPCheckInfo_Protocol + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_HealthCheck_HTTPCheckInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_HealthCheck_HTTPCheckInfo_Protocol: + goto handle_Protocol + + case ffj_t_HealthCheck_HTTPCheckInfo_Scheme: + goto handle_Scheme + + case ffj_t_HealthCheck_HTTPCheckInfo_Port: + goto handle_Port + + case ffj_t_HealthCheck_HTTPCheckInfo_Path: + goto handle_Path + + case ffj_t_HealthCheck_HTTPCheckInfo_Statuses: + goto handle_Statuses + + case ffj_t_HealthCheck_HTTPCheckInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Protocol: + + /* handler: uj.Protocol type=mesos.NetworkInfo_Protocol kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Protocol = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Protocol == nil { + uj.Protocol = new(NetworkInfo_Protocol) + } + + err = uj.Protocol.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Scheme: + + /* handler: uj.Scheme type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Scheme = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Scheme = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Port: + + /* handler: uj.Port type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Port = uint32(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Path: + + /* handler: uj.Path type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Path = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Path = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Statuses: + + /* handler: uj.Statuses type=[]uint32 kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Statuses = nil + } else { + + uj.Statuses = []uint32{} + + wantVal := true + + for { + + var tmp_uj__Statuses uint32 + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Statuses type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + tmp_uj__Statuses = uint32(tval) + + } + } + + uj.Statuses = append(uj.Statuses, tmp_uj__Statuses) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *HealthCheck_TCPCheckInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *HealthCheck_TCPCheckInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteByte('{') + if mj.Protocol != nil { + if true { + buf.WriteString(`"protocol":`) + + { + + obj, err = mj.Protocol.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"port":`) + fflib.FormatBits2(buf, uint64(mj.Port), 10, false) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_HealthCheck_TCPCheckInfobase = iota + ffj_t_HealthCheck_TCPCheckInfono_such_key + + ffj_t_HealthCheck_TCPCheckInfo_Protocol + + ffj_t_HealthCheck_TCPCheckInfo_Port +) + +var ffj_key_HealthCheck_TCPCheckInfo_Protocol = []byte("protocol") + +var ffj_key_HealthCheck_TCPCheckInfo_Port = []byte("port") + +func (uj *HealthCheck_TCPCheckInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *HealthCheck_TCPCheckInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_HealthCheck_TCPCheckInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_HealthCheck_TCPCheckInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'p': + + if bytes.Equal(ffj_key_HealthCheck_TCPCheckInfo_Protocol, kn) { + currentKey = ffj_t_HealthCheck_TCPCheckInfo_Protocol + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_HealthCheck_TCPCheckInfo_Port, kn) { + currentKey = ffj_t_HealthCheck_TCPCheckInfo_Port + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_HealthCheck_TCPCheckInfo_Port, kn) { + currentKey = ffj_t_HealthCheck_TCPCheckInfo_Port + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_HealthCheck_TCPCheckInfo_Protocol, kn) { + currentKey = ffj_t_HealthCheck_TCPCheckInfo_Protocol + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_HealthCheck_TCPCheckInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_HealthCheck_TCPCheckInfo_Protocol: + goto handle_Protocol + + case ffj_t_HealthCheck_TCPCheckInfo_Port: + goto handle_Port + + case ffj_t_HealthCheck_TCPCheckInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Protocol: + + /* handler: uj.Protocol type=mesos.NetworkInfo_Protocol kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Protocol = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Protocol == nil { + uj.Protocol = new(NetworkInfo_Protocol) + } + + err = uj.Protocol.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Port: + + /* handler: uj.Port type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Port = uint32(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *IcmpStatistics) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *IcmpStatistics) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.InMsgs != nil { + if true { + buf.WriteString(`"InMsgs":`) + fflib.FormatBits2(buf, uint64(*mj.InMsgs), 10, *mj.InMsgs < 0) + buf.WriteByte(',') + } + } + if mj.InErrors != nil { + if true { + buf.WriteString(`"InErrors":`) + fflib.FormatBits2(buf, uint64(*mj.InErrors), 10, *mj.InErrors < 0) + buf.WriteByte(',') + } + } + if mj.InCsumErrors != nil { + if true { + buf.WriteString(`"InCsumErrors":`) + fflib.FormatBits2(buf, uint64(*mj.InCsumErrors), 10, *mj.InCsumErrors < 0) + buf.WriteByte(',') + } + } + if mj.InDestUnreachs != nil { + if true { + buf.WriteString(`"InDestUnreachs":`) + fflib.FormatBits2(buf, uint64(*mj.InDestUnreachs), 10, *mj.InDestUnreachs < 0) + buf.WriteByte(',') + } + } + if mj.InTimeExcds != nil { + if true { + buf.WriteString(`"InTimeExcds":`) + fflib.FormatBits2(buf, uint64(*mj.InTimeExcds), 10, *mj.InTimeExcds < 0) + buf.WriteByte(',') + } + } + if mj.InParmProbs != nil { + if true { + buf.WriteString(`"InParmProbs":`) + fflib.FormatBits2(buf, uint64(*mj.InParmProbs), 10, *mj.InParmProbs < 0) + buf.WriteByte(',') + } + } + if mj.InSrcQuenchs != nil { + if true { + buf.WriteString(`"InSrcQuenchs":`) + fflib.FormatBits2(buf, uint64(*mj.InSrcQuenchs), 10, *mj.InSrcQuenchs < 0) + buf.WriteByte(',') + } + } + if mj.InRedirects != nil { + if true { + buf.WriteString(`"InRedirects":`) + fflib.FormatBits2(buf, uint64(*mj.InRedirects), 10, *mj.InRedirects < 0) + buf.WriteByte(',') + } + } + if mj.InEchos != nil { + if true { + buf.WriteString(`"InEchos":`) + fflib.FormatBits2(buf, uint64(*mj.InEchos), 10, *mj.InEchos < 0) + buf.WriteByte(',') + } + } + if mj.InEchoReps != nil { + if true { + buf.WriteString(`"InEchoReps":`) + fflib.FormatBits2(buf, uint64(*mj.InEchoReps), 10, *mj.InEchoReps < 0) + buf.WriteByte(',') + } + } + if mj.InTimestamps != nil { + if true { + buf.WriteString(`"InTimestamps":`) + fflib.FormatBits2(buf, uint64(*mj.InTimestamps), 10, *mj.InTimestamps < 0) + buf.WriteByte(',') + } + } + if mj.InTimestampReps != nil { + if true { + buf.WriteString(`"InTimestampReps":`) + fflib.FormatBits2(buf, uint64(*mj.InTimestampReps), 10, *mj.InTimestampReps < 0) + buf.WriteByte(',') + } + } + if mj.InAddrMasks != nil { + if true { + buf.WriteString(`"InAddrMasks":`) + fflib.FormatBits2(buf, uint64(*mj.InAddrMasks), 10, *mj.InAddrMasks < 0) + buf.WriteByte(',') + } + } + if mj.InAddrMaskReps != nil { + if true { + buf.WriteString(`"InAddrMaskReps":`) + fflib.FormatBits2(buf, uint64(*mj.InAddrMaskReps), 10, *mj.InAddrMaskReps < 0) + buf.WriteByte(',') + } + } + if mj.OutMsgs != nil { + if true { + buf.WriteString(`"OutMsgs":`) + fflib.FormatBits2(buf, uint64(*mj.OutMsgs), 10, *mj.OutMsgs < 0) + buf.WriteByte(',') + } + } + if mj.OutErrors != nil { + if true { + buf.WriteString(`"OutErrors":`) + fflib.FormatBits2(buf, uint64(*mj.OutErrors), 10, *mj.OutErrors < 0) + buf.WriteByte(',') + } + } + if mj.OutDestUnreachs != nil { + if true { + buf.WriteString(`"OutDestUnreachs":`) + fflib.FormatBits2(buf, uint64(*mj.OutDestUnreachs), 10, *mj.OutDestUnreachs < 0) + buf.WriteByte(',') + } + } + if mj.OutTimeExcds != nil { + if true { + buf.WriteString(`"OutTimeExcds":`) + fflib.FormatBits2(buf, uint64(*mj.OutTimeExcds), 10, *mj.OutTimeExcds < 0) + buf.WriteByte(',') + } + } + if mj.OutParmProbs != nil { + if true { + buf.WriteString(`"OutParmProbs":`) + fflib.FormatBits2(buf, uint64(*mj.OutParmProbs), 10, *mj.OutParmProbs < 0) + buf.WriteByte(',') + } + } + if mj.OutSrcQuenchs != nil { + if true { + buf.WriteString(`"OutSrcQuenchs":`) + fflib.FormatBits2(buf, uint64(*mj.OutSrcQuenchs), 10, *mj.OutSrcQuenchs < 0) + buf.WriteByte(',') + } + } + if mj.OutRedirects != nil { + if true { + buf.WriteString(`"OutRedirects":`) + fflib.FormatBits2(buf, uint64(*mj.OutRedirects), 10, *mj.OutRedirects < 0) + buf.WriteByte(',') + } + } + if mj.OutEchos != nil { + if true { + buf.WriteString(`"OutEchos":`) + fflib.FormatBits2(buf, uint64(*mj.OutEchos), 10, *mj.OutEchos < 0) + buf.WriteByte(',') + } + } + if mj.OutEchoReps != nil { + if true { + buf.WriteString(`"OutEchoReps":`) + fflib.FormatBits2(buf, uint64(*mj.OutEchoReps), 10, *mj.OutEchoReps < 0) + buf.WriteByte(',') + } + } + if mj.OutTimestamps != nil { + if true { + buf.WriteString(`"OutTimestamps":`) + fflib.FormatBits2(buf, uint64(*mj.OutTimestamps), 10, *mj.OutTimestamps < 0) + buf.WriteByte(',') + } + } + if mj.OutTimestampReps != nil { + if true { + buf.WriteString(`"OutTimestampReps":`) + fflib.FormatBits2(buf, uint64(*mj.OutTimestampReps), 10, *mj.OutTimestampReps < 0) + buf.WriteByte(',') + } + } + if mj.OutAddrMasks != nil { + if true { + buf.WriteString(`"OutAddrMasks":`) + fflib.FormatBits2(buf, uint64(*mj.OutAddrMasks), 10, *mj.OutAddrMasks < 0) + buf.WriteByte(',') + } + } + if mj.OutAddrMaskReps != nil { + if true { + buf.WriteString(`"OutAddrMaskReps":`) + fflib.FormatBits2(buf, uint64(*mj.OutAddrMaskReps), 10, *mj.OutAddrMaskReps < 0) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_IcmpStatisticsbase = iota + ffj_t_IcmpStatisticsno_such_key + + ffj_t_IcmpStatistics_InMsgs + + ffj_t_IcmpStatistics_InErrors + + ffj_t_IcmpStatistics_InCsumErrors + + ffj_t_IcmpStatistics_InDestUnreachs + + ffj_t_IcmpStatistics_InTimeExcds + + ffj_t_IcmpStatistics_InParmProbs + + ffj_t_IcmpStatistics_InSrcQuenchs + + ffj_t_IcmpStatistics_InRedirects + + ffj_t_IcmpStatistics_InEchos + + ffj_t_IcmpStatistics_InEchoReps + + ffj_t_IcmpStatistics_InTimestamps + + ffj_t_IcmpStatistics_InTimestampReps + + ffj_t_IcmpStatistics_InAddrMasks + + ffj_t_IcmpStatistics_InAddrMaskReps + + ffj_t_IcmpStatistics_OutMsgs + + ffj_t_IcmpStatistics_OutErrors + + ffj_t_IcmpStatistics_OutDestUnreachs + + ffj_t_IcmpStatistics_OutTimeExcds + + ffj_t_IcmpStatistics_OutParmProbs + + ffj_t_IcmpStatistics_OutSrcQuenchs + + ffj_t_IcmpStatistics_OutRedirects + + ffj_t_IcmpStatistics_OutEchos + + ffj_t_IcmpStatistics_OutEchoReps + + ffj_t_IcmpStatistics_OutTimestamps + + ffj_t_IcmpStatistics_OutTimestampReps + + ffj_t_IcmpStatistics_OutAddrMasks + + ffj_t_IcmpStatistics_OutAddrMaskReps +) + +var ffj_key_IcmpStatistics_InMsgs = []byte("InMsgs") + +var ffj_key_IcmpStatistics_InErrors = []byte("InErrors") + +var ffj_key_IcmpStatistics_InCsumErrors = []byte("InCsumErrors") + +var ffj_key_IcmpStatistics_InDestUnreachs = []byte("InDestUnreachs") + +var ffj_key_IcmpStatistics_InTimeExcds = []byte("InTimeExcds") + +var ffj_key_IcmpStatistics_InParmProbs = []byte("InParmProbs") + +var ffj_key_IcmpStatistics_InSrcQuenchs = []byte("InSrcQuenchs") + +var ffj_key_IcmpStatistics_InRedirects = []byte("InRedirects") + +var ffj_key_IcmpStatistics_InEchos = []byte("InEchos") + +var ffj_key_IcmpStatistics_InEchoReps = []byte("InEchoReps") + +var ffj_key_IcmpStatistics_InTimestamps = []byte("InTimestamps") + +var ffj_key_IcmpStatistics_InTimestampReps = []byte("InTimestampReps") + +var ffj_key_IcmpStatistics_InAddrMasks = []byte("InAddrMasks") + +var ffj_key_IcmpStatistics_InAddrMaskReps = []byte("InAddrMaskReps") + +var ffj_key_IcmpStatistics_OutMsgs = []byte("OutMsgs") + +var ffj_key_IcmpStatistics_OutErrors = []byte("OutErrors") + +var ffj_key_IcmpStatistics_OutDestUnreachs = []byte("OutDestUnreachs") + +var ffj_key_IcmpStatistics_OutTimeExcds = []byte("OutTimeExcds") + +var ffj_key_IcmpStatistics_OutParmProbs = []byte("OutParmProbs") + +var ffj_key_IcmpStatistics_OutSrcQuenchs = []byte("OutSrcQuenchs") + +var ffj_key_IcmpStatistics_OutRedirects = []byte("OutRedirects") + +var ffj_key_IcmpStatistics_OutEchos = []byte("OutEchos") + +var ffj_key_IcmpStatistics_OutEchoReps = []byte("OutEchoReps") + +var ffj_key_IcmpStatistics_OutTimestamps = []byte("OutTimestamps") + +var ffj_key_IcmpStatistics_OutTimestampReps = []byte("OutTimestampReps") + +var ffj_key_IcmpStatistics_OutAddrMasks = []byte("OutAddrMasks") + +var ffj_key_IcmpStatistics_OutAddrMaskReps = []byte("OutAddrMaskReps") + +func (uj *IcmpStatistics) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *IcmpStatistics) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_IcmpStatisticsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_IcmpStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'I': + + if bytes.Equal(ffj_key_IcmpStatistics_InMsgs, kn) { + currentKey = ffj_t_IcmpStatistics_InMsgs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_InErrors, kn) { + currentKey = ffj_t_IcmpStatistics_InErrors + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_InCsumErrors, kn) { + currentKey = ffj_t_IcmpStatistics_InCsumErrors + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_InDestUnreachs, kn) { + currentKey = ffj_t_IcmpStatistics_InDestUnreachs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_InTimeExcds, kn) { + currentKey = ffj_t_IcmpStatistics_InTimeExcds + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_InParmProbs, kn) { + currentKey = ffj_t_IcmpStatistics_InParmProbs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_InSrcQuenchs, kn) { + currentKey = ffj_t_IcmpStatistics_InSrcQuenchs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_InRedirects, kn) { + currentKey = ffj_t_IcmpStatistics_InRedirects + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_InEchos, kn) { + currentKey = ffj_t_IcmpStatistics_InEchos + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_InEchoReps, kn) { + currentKey = ffj_t_IcmpStatistics_InEchoReps + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_InTimestamps, kn) { + currentKey = ffj_t_IcmpStatistics_InTimestamps + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_InTimestampReps, kn) { + currentKey = ffj_t_IcmpStatistics_InTimestampReps + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_InAddrMasks, kn) { + currentKey = ffj_t_IcmpStatistics_InAddrMasks + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_InAddrMaskReps, kn) { + currentKey = ffj_t_IcmpStatistics_InAddrMaskReps + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'O': + + if bytes.Equal(ffj_key_IcmpStatistics_OutMsgs, kn) { + currentKey = ffj_t_IcmpStatistics_OutMsgs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_OutErrors, kn) { + currentKey = ffj_t_IcmpStatistics_OutErrors + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_OutDestUnreachs, kn) { + currentKey = ffj_t_IcmpStatistics_OutDestUnreachs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_OutTimeExcds, kn) { + currentKey = ffj_t_IcmpStatistics_OutTimeExcds + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_OutParmProbs, kn) { + currentKey = ffj_t_IcmpStatistics_OutParmProbs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_OutSrcQuenchs, kn) { + currentKey = ffj_t_IcmpStatistics_OutSrcQuenchs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_OutRedirects, kn) { + currentKey = ffj_t_IcmpStatistics_OutRedirects + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_OutEchos, kn) { + currentKey = ffj_t_IcmpStatistics_OutEchos + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_OutEchoReps, kn) { + currentKey = ffj_t_IcmpStatistics_OutEchoReps + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_OutTimestamps, kn) { + currentKey = ffj_t_IcmpStatistics_OutTimestamps + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_OutTimestampReps, kn) { + currentKey = ffj_t_IcmpStatistics_OutTimestampReps + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_OutAddrMasks, kn) { + currentKey = ffj_t_IcmpStatistics_OutAddrMasks + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IcmpStatistics_OutAddrMaskReps, kn) { + currentKey = ffj_t_IcmpStatistics_OutAddrMaskReps + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_OutAddrMaskReps, kn) { + currentKey = ffj_t_IcmpStatistics_OutAddrMaskReps + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_OutAddrMasks, kn) { + currentKey = ffj_t_IcmpStatistics_OutAddrMasks + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_OutTimestampReps, kn) { + currentKey = ffj_t_IcmpStatistics_OutTimestampReps + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_OutTimestamps, kn) { + currentKey = ffj_t_IcmpStatistics_OutTimestamps + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_OutEchoReps, kn) { + currentKey = ffj_t_IcmpStatistics_OutEchoReps + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_OutEchos, kn) { + currentKey = ffj_t_IcmpStatistics_OutEchos + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_OutRedirects, kn) { + currentKey = ffj_t_IcmpStatistics_OutRedirects + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_OutSrcQuenchs, kn) { + currentKey = ffj_t_IcmpStatistics_OutSrcQuenchs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_OutParmProbs, kn) { + currentKey = ffj_t_IcmpStatistics_OutParmProbs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_OutTimeExcds, kn) { + currentKey = ffj_t_IcmpStatistics_OutTimeExcds + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_OutDestUnreachs, kn) { + currentKey = ffj_t_IcmpStatistics_OutDestUnreachs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_OutErrors, kn) { + currentKey = ffj_t_IcmpStatistics_OutErrors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_OutMsgs, kn) { + currentKey = ffj_t_IcmpStatistics_OutMsgs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_InAddrMaskReps, kn) { + currentKey = ffj_t_IcmpStatistics_InAddrMaskReps + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_InAddrMasks, kn) { + currentKey = ffj_t_IcmpStatistics_InAddrMasks + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_InTimestampReps, kn) { + currentKey = ffj_t_IcmpStatistics_InTimestampReps + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_InTimestamps, kn) { + currentKey = ffj_t_IcmpStatistics_InTimestamps + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_InEchoReps, kn) { + currentKey = ffj_t_IcmpStatistics_InEchoReps + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_InEchos, kn) { + currentKey = ffj_t_IcmpStatistics_InEchos + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_InRedirects, kn) { + currentKey = ffj_t_IcmpStatistics_InRedirects + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_InSrcQuenchs, kn) { + currentKey = ffj_t_IcmpStatistics_InSrcQuenchs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_InParmProbs, kn) { + currentKey = ffj_t_IcmpStatistics_InParmProbs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_InTimeExcds, kn) { + currentKey = ffj_t_IcmpStatistics_InTimeExcds + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_InDestUnreachs, kn) { + currentKey = ffj_t_IcmpStatistics_InDestUnreachs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_InCsumErrors, kn) { + currentKey = ffj_t_IcmpStatistics_InCsumErrors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_InErrors, kn) { + currentKey = ffj_t_IcmpStatistics_InErrors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IcmpStatistics_InMsgs, kn) { + currentKey = ffj_t_IcmpStatistics_InMsgs + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_IcmpStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_IcmpStatistics_InMsgs: + goto handle_InMsgs + + case ffj_t_IcmpStatistics_InErrors: + goto handle_InErrors + + case ffj_t_IcmpStatistics_InCsumErrors: + goto handle_InCsumErrors + + case ffj_t_IcmpStatistics_InDestUnreachs: + goto handle_InDestUnreachs + + case ffj_t_IcmpStatistics_InTimeExcds: + goto handle_InTimeExcds + + case ffj_t_IcmpStatistics_InParmProbs: + goto handle_InParmProbs + + case ffj_t_IcmpStatistics_InSrcQuenchs: + goto handle_InSrcQuenchs + + case ffj_t_IcmpStatistics_InRedirects: + goto handle_InRedirects + + case ffj_t_IcmpStatistics_InEchos: + goto handle_InEchos + + case ffj_t_IcmpStatistics_InEchoReps: + goto handle_InEchoReps + + case ffj_t_IcmpStatistics_InTimestamps: + goto handle_InTimestamps + + case ffj_t_IcmpStatistics_InTimestampReps: + goto handle_InTimestampReps + + case ffj_t_IcmpStatistics_InAddrMasks: + goto handle_InAddrMasks + + case ffj_t_IcmpStatistics_InAddrMaskReps: + goto handle_InAddrMaskReps + + case ffj_t_IcmpStatistics_OutMsgs: + goto handle_OutMsgs + + case ffj_t_IcmpStatistics_OutErrors: + goto handle_OutErrors + + case ffj_t_IcmpStatistics_OutDestUnreachs: + goto handle_OutDestUnreachs + + case ffj_t_IcmpStatistics_OutTimeExcds: + goto handle_OutTimeExcds + + case ffj_t_IcmpStatistics_OutParmProbs: + goto handle_OutParmProbs + + case ffj_t_IcmpStatistics_OutSrcQuenchs: + goto handle_OutSrcQuenchs + + case ffj_t_IcmpStatistics_OutRedirects: + goto handle_OutRedirects + + case ffj_t_IcmpStatistics_OutEchos: + goto handle_OutEchos + + case ffj_t_IcmpStatistics_OutEchoReps: + goto handle_OutEchoReps + + case ffj_t_IcmpStatistics_OutTimestamps: + goto handle_OutTimestamps + + case ffj_t_IcmpStatistics_OutTimestampReps: + goto handle_OutTimestampReps + + case ffj_t_IcmpStatistics_OutAddrMasks: + goto handle_OutAddrMasks + + case ffj_t_IcmpStatistics_OutAddrMaskReps: + goto handle_OutAddrMaskReps + + case ffj_t_IcmpStatisticsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_InMsgs: + + /* handler: uj.InMsgs type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InMsgs = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InMsgs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InErrors: + + /* handler: uj.InErrors type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InErrors = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InErrors = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InCsumErrors: + + /* handler: uj.InCsumErrors type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InCsumErrors = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InCsumErrors = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InDestUnreachs: + + /* handler: uj.InDestUnreachs type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InDestUnreachs = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InDestUnreachs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InTimeExcds: + + /* handler: uj.InTimeExcds type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InTimeExcds = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InTimeExcds = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InParmProbs: + + /* handler: uj.InParmProbs type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InParmProbs = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InParmProbs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InSrcQuenchs: + + /* handler: uj.InSrcQuenchs type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InSrcQuenchs = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InSrcQuenchs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InRedirects: + + /* handler: uj.InRedirects type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InRedirects = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InRedirects = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InEchos: + + /* handler: uj.InEchos type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InEchos = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InEchos = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InEchoReps: + + /* handler: uj.InEchoReps type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InEchoReps = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InEchoReps = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InTimestamps: + + /* handler: uj.InTimestamps type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InTimestamps = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InTimestamps = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InTimestampReps: + + /* handler: uj.InTimestampReps type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InTimestampReps = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InTimestampReps = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InAddrMasks: + + /* handler: uj.InAddrMasks type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InAddrMasks = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InAddrMasks = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InAddrMaskReps: + + /* handler: uj.InAddrMaskReps type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InAddrMaskReps = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InAddrMaskReps = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutMsgs: + + /* handler: uj.OutMsgs type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutMsgs = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutMsgs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutErrors: + + /* handler: uj.OutErrors type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutErrors = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutErrors = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutDestUnreachs: + + /* handler: uj.OutDestUnreachs type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutDestUnreachs = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutDestUnreachs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutTimeExcds: + + /* handler: uj.OutTimeExcds type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutTimeExcds = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutTimeExcds = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutParmProbs: + + /* handler: uj.OutParmProbs type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutParmProbs = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutParmProbs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutSrcQuenchs: + + /* handler: uj.OutSrcQuenchs type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutSrcQuenchs = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutSrcQuenchs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutRedirects: + + /* handler: uj.OutRedirects type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutRedirects = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutRedirects = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutEchos: + + /* handler: uj.OutEchos type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutEchos = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutEchos = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutEchoReps: + + /* handler: uj.OutEchoReps type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutEchoReps = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutEchoReps = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutTimestamps: + + /* handler: uj.OutTimestamps type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutTimestamps = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutTimestamps = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutTimestampReps: + + /* handler: uj.OutTimestampReps type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutTimestampReps = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutTimestampReps = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutAddrMasks: + + /* handler: uj.OutAddrMasks type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutAddrMasks = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutAddrMasks = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutAddrMaskReps: + + /* handler: uj.OutAddrMaskReps type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutAddrMaskReps = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutAddrMaskReps = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Image) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Type != nil { + if true { + buf.WriteString(`"type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.Appc != nil { + if true { + buf.WriteString(`"appc":`) + + { + + err = mj.Appc.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Docker != nil { + if true { + buf.WriteString(`"docker":`) + + { + + err = mj.Docker.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Cached != nil { + if true { + if *mj.Cached { + buf.WriteString(`"cached":true`) + } else { + buf.WriteString(`"cached":false`) + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Imagebase = iota + ffj_t_Imageno_such_key + + ffj_t_Image_Type + + ffj_t_Image_Appc + + ffj_t_Image_Docker + + ffj_t_Image_Cached +) + +var ffj_key_Image_Type = []byte("type") + +var ffj_key_Image_Appc = []byte("appc") + +var ffj_key_Image_Docker = []byte("docker") + +var ffj_key_Image_Cached = []byte("cached") + +func (uj *Image) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Image) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Imagebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Imageno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_Image_Appc, kn) { + currentKey = ffj_t_Image_Appc + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'c': + + if bytes.Equal(ffj_key_Image_Cached, kn) { + currentKey = ffj_t_Image_Cached + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_Image_Docker, kn) { + currentKey = ffj_t_Image_Docker + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Image_Type, kn) { + currentKey = ffj_t_Image_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Image_Cached, kn) { + currentKey = ffj_t_Image_Cached + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Image_Docker, kn) { + currentKey = ffj_t_Image_Docker + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Image_Appc, kn) { + currentKey = ffj_t_Image_Appc + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Image_Type, kn) { + currentKey = ffj_t_Image_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Imageno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Image_Type: + goto handle_Type + + case ffj_t_Image_Appc: + goto handle_Appc + + case ffj_t_Image_Docker: + goto handle_Docker + + case ffj_t_Image_Cached: + goto handle_Cached + + case ffj_t_Imageno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.Image_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Type = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Type == nil { + uj.Type = new(Image_Type) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Appc: + + /* handler: uj.Appc type=mesos.Image_Appc kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Appc = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Appc == nil { + uj.Appc = new(Image_Appc) + } + + err = uj.Appc.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Docker: + + /* handler: uj.Docker type=mesos.Image_Docker kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Docker = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Docker == nil { + uj.Docker = new(Image_Docker) + } + + err = uj.Docker.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Cached: + + /* handler: uj.Cached type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.Cached = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.Cached = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Image_Appc) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Image_Appc) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteByte(',') + if mj.ID != nil { + if true { + buf.WriteString(`"id":`) + fflib.WriteJsonString(buf, string(*mj.ID)) + buf.WriteByte(',') + } + } + if mj.Labels != nil { + if true { + buf.WriteString(`"labels":`) + + { + + err = mj.Labels.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Image_Appcbase = iota + ffj_t_Image_Appcno_such_key + + ffj_t_Image_Appc_Name + + ffj_t_Image_Appc_ID + + ffj_t_Image_Appc_Labels +) + +var ffj_key_Image_Appc_Name = []byte("name") + +var ffj_key_Image_Appc_ID = []byte("id") + +var ffj_key_Image_Appc_Labels = []byte("labels") + +func (uj *Image_Appc) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Image_Appc) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Image_Appcbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Image_Appcno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'i': + + if bytes.Equal(ffj_key_Image_Appc_ID, kn) { + currentKey = ffj_t_Image_Appc_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_Image_Appc_Labels, kn) { + currentKey = ffj_t_Image_Appc_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_Image_Appc_Name, kn) { + currentKey = ffj_t_Image_Appc_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Image_Appc_Labels, kn) { + currentKey = ffj_t_Image_Appc_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Image_Appc_ID, kn) { + currentKey = ffj_t_Image_Appc_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Image_Appc_Name, kn) { + currentKey = ffj_t_Image_Appc_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Image_Appcno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Image_Appc_Name: + goto handle_Name + + case ffj_t_Image_Appc_ID: + goto handle_ID + + case ffj_t_Image_Appc_Labels: + goto handle_Labels + + case ffj_t_Image_Appcno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ID: + + /* handler: uj.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.ID = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.ID = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Labels: + + /* handler: uj.Labels type=mesos.Labels kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Labels = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Labels == nil { + uj.Labels = new(Labels) + } + + err = uj.Labels.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Image_Docker) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Image_Docker) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteByte(',') + if mj.Credential != nil { + if true { + buf.WriteString(`"credential":`) + + { + + err = mj.Credential.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Config != nil { + if true { + buf.WriteString(`"config":`) + + { + + err = mj.Config.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Image_Dockerbase = iota + ffj_t_Image_Dockerno_such_key + + ffj_t_Image_Docker_Name + + ffj_t_Image_Docker_Credential + + ffj_t_Image_Docker_Config +) + +var ffj_key_Image_Docker_Name = []byte("name") + +var ffj_key_Image_Docker_Credential = []byte("credential") + +var ffj_key_Image_Docker_Config = []byte("config") + +func (uj *Image_Docker) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Image_Docker) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Image_Dockerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Image_Dockerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Image_Docker_Credential, kn) { + currentKey = ffj_t_Image_Docker_Credential + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Image_Docker_Config, kn) { + currentKey = ffj_t_Image_Docker_Config + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_Image_Docker_Name, kn) { + currentKey = ffj_t_Image_Docker_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Image_Docker_Config, kn) { + currentKey = ffj_t_Image_Docker_Config + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Image_Docker_Credential, kn) { + currentKey = ffj_t_Image_Docker_Credential + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Image_Docker_Name, kn) { + currentKey = ffj_t_Image_Docker_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Image_Dockerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Image_Docker_Name: + goto handle_Name + + case ffj_t_Image_Docker_Credential: + goto handle_Credential + + case ffj_t_Image_Docker_Config: + goto handle_Config + + case ffj_t_Image_Dockerno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Credential: + + /* handler: uj.Credential type=mesos.Credential kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Credential = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Credential == nil { + uj.Credential = new(Credential) + } + + err = uj.Credential.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Config: + + /* handler: uj.Config type=mesos.Secret kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Config = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Config == nil { + uj.Config = new(Secret) + } + + err = uj.Config.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *InverseOffer) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *InverseOffer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"id":`) + + { + + err = mj.OfferID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + if mj.URL != nil { + if true { + buf.WriteString(`"url":`) + + { + + err = mj.URL.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"framework_id":`) + + { + + err = mj.FrameworkID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + if mj.AgentID != nil { + if true { + buf.WriteString(`"agent_id":`) + + { + + err = mj.AgentID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"unavailability":`) + + { + + err = mj.Unavailability.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"resources":`) + if mj.Resources != nil { + buf.WriteString(`[`) + for i, v := range mj.Resources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_InverseOfferbase = iota + ffj_t_InverseOfferno_such_key + + ffj_t_InverseOffer_OfferID + + ffj_t_InverseOffer_URL + + ffj_t_InverseOffer_FrameworkID + + ffj_t_InverseOffer_AgentID + + ffj_t_InverseOffer_Unavailability + + ffj_t_InverseOffer_Resources +) + +var ffj_key_InverseOffer_OfferID = []byte("id") + +var ffj_key_InverseOffer_URL = []byte("url") + +var ffj_key_InverseOffer_FrameworkID = []byte("framework_id") + +var ffj_key_InverseOffer_AgentID = []byte("agent_id") + +var ffj_key_InverseOffer_Unavailability = []byte("unavailability") + +var ffj_key_InverseOffer_Resources = []byte("resources") + +func (uj *InverseOffer) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *InverseOffer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_InverseOfferbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_InverseOfferno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_InverseOffer_AgentID, kn) { + currentKey = ffj_t_InverseOffer_AgentID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffj_key_InverseOffer_FrameworkID, kn) { + currentKey = ffj_t_InverseOffer_FrameworkID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_InverseOffer_OfferID, kn) { + currentKey = ffj_t_InverseOffer_OfferID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_InverseOffer_Resources, kn) { + currentKey = ffj_t_InverseOffer_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_InverseOffer_URL, kn) { + currentKey = ffj_t_InverseOffer_URL + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_InverseOffer_Unavailability, kn) { + currentKey = ffj_t_InverseOffer_Unavailability + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_InverseOffer_Resources, kn) { + currentKey = ffj_t_InverseOffer_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_InverseOffer_Unavailability, kn) { + currentKey = ffj_t_InverseOffer_Unavailability + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_InverseOffer_AgentID, kn) { + currentKey = ffj_t_InverseOffer_AgentID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_InverseOffer_FrameworkID, kn) { + currentKey = ffj_t_InverseOffer_FrameworkID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_InverseOffer_URL, kn) { + currentKey = ffj_t_InverseOffer_URL + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_InverseOffer_OfferID, kn) { + currentKey = ffj_t_InverseOffer_OfferID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_InverseOfferno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_InverseOffer_OfferID: + goto handle_OfferID + + case ffj_t_InverseOffer_URL: + goto handle_URL + + case ffj_t_InverseOffer_FrameworkID: + goto handle_FrameworkID + + case ffj_t_InverseOffer_AgentID: + goto handle_AgentID + + case ffj_t_InverseOffer_Unavailability: + goto handle_Unavailability + + case ffj_t_InverseOffer_Resources: + goto handle_Resources + + case ffj_t_InverseOfferno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_OfferID: + + /* handler: uj.OfferID type=mesos.OfferID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.OfferID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_URL: + + /* handler: uj.URL type=mesos.URL kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.URL = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.URL == nil { + uj.URL = new(URL) + } + + err = uj.URL.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_FrameworkID: + + /* handler: uj.FrameworkID type=mesos.FrameworkID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.FrameworkID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_AgentID: + + /* handler: uj.AgentID type=mesos.AgentID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.AgentID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.AgentID == nil { + uj.AgentID = new(AgentID) + } + + err = uj.AgentID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Unavailability: + + /* handler: uj.Unavailability type=mesos.Unavailability kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Unavailability.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Resources: + + /* handler: uj.Resources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Resources = nil + } else { + + uj.Resources = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Resources Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Resources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Resources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Resources = append(uj.Resources, tmp_uj__Resources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *IpStatistics) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *IpStatistics) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Forwarding != nil { + if true { + buf.WriteString(`"Forwarding":`) + fflib.FormatBits2(buf, uint64(*mj.Forwarding), 10, *mj.Forwarding < 0) + buf.WriteByte(',') + } + } + if mj.DefaultTTL != nil { + if true { + buf.WriteString(`"DefaultTTL":`) + fflib.FormatBits2(buf, uint64(*mj.DefaultTTL), 10, *mj.DefaultTTL < 0) + buf.WriteByte(',') + } + } + if mj.InReceives != nil { + if true { + buf.WriteString(`"InReceives":`) + fflib.FormatBits2(buf, uint64(*mj.InReceives), 10, *mj.InReceives < 0) + buf.WriteByte(',') + } + } + if mj.InHdrErrors != nil { + if true { + buf.WriteString(`"InHdrErrors":`) + fflib.FormatBits2(buf, uint64(*mj.InHdrErrors), 10, *mj.InHdrErrors < 0) + buf.WriteByte(',') + } + } + if mj.InAddrErrors != nil { + if true { + buf.WriteString(`"InAddrErrors":`) + fflib.FormatBits2(buf, uint64(*mj.InAddrErrors), 10, *mj.InAddrErrors < 0) + buf.WriteByte(',') + } + } + if mj.ForwDatagrams != nil { + if true { + buf.WriteString(`"ForwDatagrams":`) + fflib.FormatBits2(buf, uint64(*mj.ForwDatagrams), 10, *mj.ForwDatagrams < 0) + buf.WriteByte(',') + } + } + if mj.InUnknownProtos != nil { + if true { + buf.WriteString(`"InUnknownProtos":`) + fflib.FormatBits2(buf, uint64(*mj.InUnknownProtos), 10, *mj.InUnknownProtos < 0) + buf.WriteByte(',') + } + } + if mj.InDiscards != nil { + if true { + buf.WriteString(`"InDiscards":`) + fflib.FormatBits2(buf, uint64(*mj.InDiscards), 10, *mj.InDiscards < 0) + buf.WriteByte(',') + } + } + if mj.InDelivers != nil { + if true { + buf.WriteString(`"InDelivers":`) + fflib.FormatBits2(buf, uint64(*mj.InDelivers), 10, *mj.InDelivers < 0) + buf.WriteByte(',') + } + } + if mj.OutRequests != nil { + if true { + buf.WriteString(`"OutRequests":`) + fflib.FormatBits2(buf, uint64(*mj.OutRequests), 10, *mj.OutRequests < 0) + buf.WriteByte(',') + } + } + if mj.OutDiscards != nil { + if true { + buf.WriteString(`"OutDiscards":`) + fflib.FormatBits2(buf, uint64(*mj.OutDiscards), 10, *mj.OutDiscards < 0) + buf.WriteByte(',') + } + } + if mj.OutNoRoutes != nil { + if true { + buf.WriteString(`"OutNoRoutes":`) + fflib.FormatBits2(buf, uint64(*mj.OutNoRoutes), 10, *mj.OutNoRoutes < 0) + buf.WriteByte(',') + } + } + if mj.ReasmTimeout != nil { + if true { + buf.WriteString(`"ReasmTimeout":`) + fflib.FormatBits2(buf, uint64(*mj.ReasmTimeout), 10, *mj.ReasmTimeout < 0) + buf.WriteByte(',') + } + } + if mj.ReasmReqds != nil { + if true { + buf.WriteString(`"ReasmReqds":`) + fflib.FormatBits2(buf, uint64(*mj.ReasmReqds), 10, *mj.ReasmReqds < 0) + buf.WriteByte(',') + } + } + if mj.ReasmOKs != nil { + if true { + buf.WriteString(`"ReasmOKs":`) + fflib.FormatBits2(buf, uint64(*mj.ReasmOKs), 10, *mj.ReasmOKs < 0) + buf.WriteByte(',') + } + } + if mj.ReasmFails != nil { + if true { + buf.WriteString(`"ReasmFails":`) + fflib.FormatBits2(buf, uint64(*mj.ReasmFails), 10, *mj.ReasmFails < 0) + buf.WriteByte(',') + } + } + if mj.FragOKs != nil { + if true { + buf.WriteString(`"FragOKs":`) + fflib.FormatBits2(buf, uint64(*mj.FragOKs), 10, *mj.FragOKs < 0) + buf.WriteByte(',') + } + } + if mj.FragFails != nil { + if true { + buf.WriteString(`"FragFails":`) + fflib.FormatBits2(buf, uint64(*mj.FragFails), 10, *mj.FragFails < 0) + buf.WriteByte(',') + } + } + if mj.FragCreates != nil { + if true { + buf.WriteString(`"FragCreates":`) + fflib.FormatBits2(buf, uint64(*mj.FragCreates), 10, *mj.FragCreates < 0) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_IpStatisticsbase = iota + ffj_t_IpStatisticsno_such_key + + ffj_t_IpStatistics_Forwarding + + ffj_t_IpStatistics_DefaultTTL + + ffj_t_IpStatistics_InReceives + + ffj_t_IpStatistics_InHdrErrors + + ffj_t_IpStatistics_InAddrErrors + + ffj_t_IpStatistics_ForwDatagrams + + ffj_t_IpStatistics_InUnknownProtos + + ffj_t_IpStatistics_InDiscards + + ffj_t_IpStatistics_InDelivers + + ffj_t_IpStatistics_OutRequests + + ffj_t_IpStatistics_OutDiscards + + ffj_t_IpStatistics_OutNoRoutes + + ffj_t_IpStatistics_ReasmTimeout + + ffj_t_IpStatistics_ReasmReqds + + ffj_t_IpStatistics_ReasmOKs + + ffj_t_IpStatistics_ReasmFails + + ffj_t_IpStatistics_FragOKs + + ffj_t_IpStatistics_FragFails + + ffj_t_IpStatistics_FragCreates +) + +var ffj_key_IpStatistics_Forwarding = []byte("Forwarding") + +var ffj_key_IpStatistics_DefaultTTL = []byte("DefaultTTL") + +var ffj_key_IpStatistics_InReceives = []byte("InReceives") + +var ffj_key_IpStatistics_InHdrErrors = []byte("InHdrErrors") + +var ffj_key_IpStatistics_InAddrErrors = []byte("InAddrErrors") + +var ffj_key_IpStatistics_ForwDatagrams = []byte("ForwDatagrams") + +var ffj_key_IpStatistics_InUnknownProtos = []byte("InUnknownProtos") + +var ffj_key_IpStatistics_InDiscards = []byte("InDiscards") + +var ffj_key_IpStatistics_InDelivers = []byte("InDelivers") + +var ffj_key_IpStatistics_OutRequests = []byte("OutRequests") + +var ffj_key_IpStatistics_OutDiscards = []byte("OutDiscards") + +var ffj_key_IpStatistics_OutNoRoutes = []byte("OutNoRoutes") + +var ffj_key_IpStatistics_ReasmTimeout = []byte("ReasmTimeout") + +var ffj_key_IpStatistics_ReasmReqds = []byte("ReasmReqds") + +var ffj_key_IpStatistics_ReasmOKs = []byte("ReasmOKs") + +var ffj_key_IpStatistics_ReasmFails = []byte("ReasmFails") + +var ffj_key_IpStatistics_FragOKs = []byte("FragOKs") + +var ffj_key_IpStatistics_FragFails = []byte("FragFails") + +var ffj_key_IpStatistics_FragCreates = []byte("FragCreates") + +func (uj *IpStatistics) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *IpStatistics) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_IpStatisticsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_IpStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'D': + + if bytes.Equal(ffj_key_IpStatistics_DefaultTTL, kn) { + currentKey = ffj_t_IpStatistics_DefaultTTL + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'F': + + if bytes.Equal(ffj_key_IpStatistics_Forwarding, kn) { + currentKey = ffj_t_IpStatistics_Forwarding + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IpStatistics_ForwDatagrams, kn) { + currentKey = ffj_t_IpStatistics_ForwDatagrams + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IpStatistics_FragOKs, kn) { + currentKey = ffj_t_IpStatistics_FragOKs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IpStatistics_FragFails, kn) { + currentKey = ffj_t_IpStatistics_FragFails + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IpStatistics_FragCreates, kn) { + currentKey = ffj_t_IpStatistics_FragCreates + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'I': + + if bytes.Equal(ffj_key_IpStatistics_InReceives, kn) { + currentKey = ffj_t_IpStatistics_InReceives + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IpStatistics_InHdrErrors, kn) { + currentKey = ffj_t_IpStatistics_InHdrErrors + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IpStatistics_InAddrErrors, kn) { + currentKey = ffj_t_IpStatistics_InAddrErrors + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IpStatistics_InUnknownProtos, kn) { + currentKey = ffj_t_IpStatistics_InUnknownProtos + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IpStatistics_InDiscards, kn) { + currentKey = ffj_t_IpStatistics_InDiscards + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IpStatistics_InDelivers, kn) { + currentKey = ffj_t_IpStatistics_InDelivers + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'O': + + if bytes.Equal(ffj_key_IpStatistics_OutRequests, kn) { + currentKey = ffj_t_IpStatistics_OutRequests + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IpStatistics_OutDiscards, kn) { + currentKey = ffj_t_IpStatistics_OutDiscards + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IpStatistics_OutNoRoutes, kn) { + currentKey = ffj_t_IpStatistics_OutNoRoutes + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'R': + + if bytes.Equal(ffj_key_IpStatistics_ReasmTimeout, kn) { + currentKey = ffj_t_IpStatistics_ReasmTimeout + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IpStatistics_ReasmReqds, kn) { + currentKey = ffj_t_IpStatistics_ReasmReqds + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IpStatistics_ReasmOKs, kn) { + currentKey = ffj_t_IpStatistics_ReasmOKs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_IpStatistics_ReasmFails, kn) { + currentKey = ffj_t_IpStatistics_ReasmFails + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_FragCreates, kn) { + currentKey = ffj_t_IpStatistics_FragCreates + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_FragFails, kn) { + currentKey = ffj_t_IpStatistics_FragFails + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_FragOKs, kn) { + currentKey = ffj_t_IpStatistics_FragOKs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_ReasmFails, kn) { + currentKey = ffj_t_IpStatistics_ReasmFails + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_ReasmOKs, kn) { + currentKey = ffj_t_IpStatistics_ReasmOKs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_ReasmReqds, kn) { + currentKey = ffj_t_IpStatistics_ReasmReqds + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_ReasmTimeout, kn) { + currentKey = ffj_t_IpStatistics_ReasmTimeout + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_OutNoRoutes, kn) { + currentKey = ffj_t_IpStatistics_OutNoRoutes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_OutDiscards, kn) { + currentKey = ffj_t_IpStatistics_OutDiscards + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_OutRequests, kn) { + currentKey = ffj_t_IpStatistics_OutRequests + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_InDelivers, kn) { + currentKey = ffj_t_IpStatistics_InDelivers + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_InDiscards, kn) { + currentKey = ffj_t_IpStatistics_InDiscards + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_InUnknownProtos, kn) { + currentKey = ffj_t_IpStatistics_InUnknownProtos + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_ForwDatagrams, kn) { + currentKey = ffj_t_IpStatistics_ForwDatagrams + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_InAddrErrors, kn) { + currentKey = ffj_t_IpStatistics_InAddrErrors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_InHdrErrors, kn) { + currentKey = ffj_t_IpStatistics_InHdrErrors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_IpStatistics_InReceives, kn) { + currentKey = ffj_t_IpStatistics_InReceives + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_IpStatistics_DefaultTTL, kn) { + currentKey = ffj_t_IpStatistics_DefaultTTL + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_IpStatistics_Forwarding, kn) { + currentKey = ffj_t_IpStatistics_Forwarding + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_IpStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_IpStatistics_Forwarding: + goto handle_Forwarding + + case ffj_t_IpStatistics_DefaultTTL: + goto handle_DefaultTTL + + case ffj_t_IpStatistics_InReceives: + goto handle_InReceives + + case ffj_t_IpStatistics_InHdrErrors: + goto handle_InHdrErrors + + case ffj_t_IpStatistics_InAddrErrors: + goto handle_InAddrErrors + + case ffj_t_IpStatistics_ForwDatagrams: + goto handle_ForwDatagrams + + case ffj_t_IpStatistics_InUnknownProtos: + goto handle_InUnknownProtos + + case ffj_t_IpStatistics_InDiscards: + goto handle_InDiscards + + case ffj_t_IpStatistics_InDelivers: + goto handle_InDelivers + + case ffj_t_IpStatistics_OutRequests: + goto handle_OutRequests + + case ffj_t_IpStatistics_OutDiscards: + goto handle_OutDiscards + + case ffj_t_IpStatistics_OutNoRoutes: + goto handle_OutNoRoutes + + case ffj_t_IpStatistics_ReasmTimeout: + goto handle_ReasmTimeout + + case ffj_t_IpStatistics_ReasmReqds: + goto handle_ReasmReqds + + case ffj_t_IpStatistics_ReasmOKs: + goto handle_ReasmOKs + + case ffj_t_IpStatistics_ReasmFails: + goto handle_ReasmFails + + case ffj_t_IpStatistics_FragOKs: + goto handle_FragOKs + + case ffj_t_IpStatistics_FragFails: + goto handle_FragFails + + case ffj_t_IpStatistics_FragCreates: + goto handle_FragCreates + + case ffj_t_IpStatisticsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Forwarding: + + /* handler: uj.Forwarding type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Forwarding = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.Forwarding = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DefaultTTL: + + /* handler: uj.DefaultTTL type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.DefaultTTL = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.DefaultTTL = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InReceives: + + /* handler: uj.InReceives type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InReceives = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InReceives = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InHdrErrors: + + /* handler: uj.InHdrErrors type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InHdrErrors = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InHdrErrors = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InAddrErrors: + + /* handler: uj.InAddrErrors type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InAddrErrors = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InAddrErrors = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ForwDatagrams: + + /* handler: uj.ForwDatagrams type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.ForwDatagrams = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.ForwDatagrams = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InUnknownProtos: + + /* handler: uj.InUnknownProtos type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InUnknownProtos = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InUnknownProtos = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InDiscards: + + /* handler: uj.InDiscards type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InDiscards = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InDiscards = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InDelivers: + + /* handler: uj.InDelivers type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InDelivers = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InDelivers = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutRequests: + + /* handler: uj.OutRequests type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutRequests = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutRequests = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutDiscards: + + /* handler: uj.OutDiscards type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutDiscards = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutDiscards = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutNoRoutes: + + /* handler: uj.OutNoRoutes type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutNoRoutes = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutNoRoutes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ReasmTimeout: + + /* handler: uj.ReasmTimeout type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.ReasmTimeout = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.ReasmTimeout = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ReasmReqds: + + /* handler: uj.ReasmReqds type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.ReasmReqds = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.ReasmReqds = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ReasmOKs: + + /* handler: uj.ReasmOKs type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.ReasmOKs = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.ReasmOKs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ReasmFails: + + /* handler: uj.ReasmFails type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.ReasmFails = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.ReasmFails = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_FragOKs: + + /* handler: uj.FragOKs type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.FragOKs = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.FragOKs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_FragFails: + + /* handler: uj.FragFails type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.FragFails = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.FragFails = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_FragCreates: + + /* handler: uj.FragCreates type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.FragCreates = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.FragCreates = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *KillPolicy) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *KillPolicy) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.GracePeriod != nil { + if true { + buf.WriteString(`"grace_period":`) + + { + + err = mj.GracePeriod.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_KillPolicybase = iota + ffj_t_KillPolicyno_such_key + + ffj_t_KillPolicy_GracePeriod +) + +var ffj_key_KillPolicy_GracePeriod = []byte("grace_period") + +func (uj *KillPolicy) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *KillPolicy) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_KillPolicybase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_KillPolicyno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'g': + + if bytes.Equal(ffj_key_KillPolicy_GracePeriod, kn) { + currentKey = ffj_t_KillPolicy_GracePeriod + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_KillPolicy_GracePeriod, kn) { + currentKey = ffj_t_KillPolicy_GracePeriod + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_KillPolicyno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_KillPolicy_GracePeriod: + goto handle_GracePeriod + + case ffj_t_KillPolicyno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_GracePeriod: + + /* handler: uj.GracePeriod type=mesos.DurationInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GracePeriod = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GracePeriod == nil { + uj.GracePeriod = new(DurationInfo) + } + + err = uj.GracePeriod.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Label) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Label) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "key":`) + fflib.WriteJsonString(buf, string(mj.Key)) + buf.WriteByte(',') + if mj.Value != nil { + if true { + buf.WriteString(`"value":`) + fflib.WriteJsonString(buf, string(*mj.Value)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Labelbase = iota + ffj_t_Labelno_such_key + + ffj_t_Label_Key + + ffj_t_Label_Value +) + +var ffj_key_Label_Key = []byte("key") + +var ffj_key_Label_Value = []byte("value") + +func (uj *Label) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Label) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Labelbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Labelno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'k': + + if bytes.Equal(ffj_key_Label_Key, kn) { + currentKey = ffj_t_Label_Key + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_Label_Value, kn) { + currentKey = ffj_t_Label_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Label_Value, kn) { + currentKey = ffj_t_Label_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Label_Key, kn) { + currentKey = ffj_t_Label_Key + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Labelno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Label_Key: + goto handle_Key + + case ffj_t_Label_Value: + goto handle_Value + + case ffj_t_Labelno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Key: + + /* handler: uj.Key type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Key = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Value: + + /* handler: uj.Value type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Value = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Value = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Labels) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Labels) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"labels":`) + if mj.Labels != nil { + buf.WriteString(`[`) + for i, v := range mj.Labels { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Labelsbase = iota + ffj_t_Labelsno_such_key + + ffj_t_Labels_Labels +) + +var ffj_key_Labels_Labels = []byte("labels") + +func (uj *Labels) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Labels) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Labelsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Labelsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'l': + + if bytes.Equal(ffj_key_Labels_Labels, kn) { + currentKey = ffj_t_Labels_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Labels_Labels, kn) { + currentKey = ffj_t_Labels_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Labelsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Labels_Labels: + goto handle_Labels + + case ffj_t_Labelsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Labels: + + /* handler: uj.Labels type=[]mesos.Label kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Labels = nil + } else { + + uj.Labels = []Label{} + + wantVal := true + + for { + + var tmp_uj__Labels Label + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Labels type=mesos.Label kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Labels.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Labels = append(uj.Labels, tmp_uj__Labels) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *LinuxInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *LinuxInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.CapabilityInfo != nil { + if true { + buf.WriteString(`"capability_info":`) + + { + + err = mj.CapabilityInfo.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.BoundingCapabilities != nil { + if true { + buf.WriteString(`"bounding_capabilities":`) + + { + + err = mj.BoundingCapabilities.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.EffectiveCapabilities != nil { + if true { + buf.WriteString(`"effective_capabilities":`) + + { + + err = mj.EffectiveCapabilities.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.SharePIDNamespace != nil { + if true { + if *mj.SharePIDNamespace { + buf.WriteString(`"share_pid_namespace":true`) + } else { + buf.WriteString(`"share_pid_namespace":false`) + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_LinuxInfobase = iota + ffj_t_LinuxInfono_such_key + + ffj_t_LinuxInfo_CapabilityInfo + + ffj_t_LinuxInfo_BoundingCapabilities + + ffj_t_LinuxInfo_EffectiveCapabilities + + ffj_t_LinuxInfo_SharePIDNamespace +) + +var ffj_key_LinuxInfo_CapabilityInfo = []byte("capability_info") + +var ffj_key_LinuxInfo_BoundingCapabilities = []byte("bounding_capabilities") + +var ffj_key_LinuxInfo_EffectiveCapabilities = []byte("effective_capabilities") + +var ffj_key_LinuxInfo_SharePIDNamespace = []byte("share_pid_namespace") + +func (uj *LinuxInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *LinuxInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_LinuxInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_LinuxInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'b': + + if bytes.Equal(ffj_key_LinuxInfo_BoundingCapabilities, kn) { + currentKey = ffj_t_LinuxInfo_BoundingCapabilities + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'c': + + if bytes.Equal(ffj_key_LinuxInfo_CapabilityInfo, kn) { + currentKey = ffj_t_LinuxInfo_CapabilityInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_LinuxInfo_EffectiveCapabilities, kn) { + currentKey = ffj_t_LinuxInfo_EffectiveCapabilities + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_LinuxInfo_SharePIDNamespace, kn) { + currentKey = ffj_t_LinuxInfo_SharePIDNamespace + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_LinuxInfo_SharePIDNamespace, kn) { + currentKey = ffj_t_LinuxInfo_SharePIDNamespace + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_LinuxInfo_EffectiveCapabilities, kn) { + currentKey = ffj_t_LinuxInfo_EffectiveCapabilities + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_LinuxInfo_BoundingCapabilities, kn) { + currentKey = ffj_t_LinuxInfo_BoundingCapabilities + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_LinuxInfo_CapabilityInfo, kn) { + currentKey = ffj_t_LinuxInfo_CapabilityInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_LinuxInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_LinuxInfo_CapabilityInfo: + goto handle_CapabilityInfo + + case ffj_t_LinuxInfo_BoundingCapabilities: + goto handle_BoundingCapabilities + + case ffj_t_LinuxInfo_EffectiveCapabilities: + goto handle_EffectiveCapabilities + + case ffj_t_LinuxInfo_SharePIDNamespace: + goto handle_SharePIDNamespace + + case ffj_t_LinuxInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_CapabilityInfo: + + /* handler: uj.CapabilityInfo type=mesos.CapabilityInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.CapabilityInfo = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.CapabilityInfo == nil { + uj.CapabilityInfo = new(CapabilityInfo) + } + + err = uj.CapabilityInfo.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BoundingCapabilities: + + /* handler: uj.BoundingCapabilities type=mesos.CapabilityInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.BoundingCapabilities = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.BoundingCapabilities == nil { + uj.BoundingCapabilities = new(CapabilityInfo) + } + + err = uj.BoundingCapabilities.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_EffectiveCapabilities: + + /* handler: uj.EffectiveCapabilities type=mesos.CapabilityInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.EffectiveCapabilities = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.EffectiveCapabilities == nil { + uj.EffectiveCapabilities = new(CapabilityInfo) + } + + err = uj.EffectiveCapabilities.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_SharePIDNamespace: + + /* handler: uj.SharePIDNamespace type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.SharePIDNamespace = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.SharePIDNamespace = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *MachineID) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *MachineID) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Hostname != nil { + if true { + buf.WriteString(`"hostname":`) + fflib.WriteJsonString(buf, string(*mj.Hostname)) + buf.WriteByte(',') + } + } + if mj.IP != nil { + if true { + buf.WriteString(`"ip":`) + fflib.WriteJsonString(buf, string(*mj.IP)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_MachineIDbase = iota + ffj_t_MachineIDno_such_key + + ffj_t_MachineID_Hostname + + ffj_t_MachineID_IP +) + +var ffj_key_MachineID_Hostname = []byte("hostname") + +var ffj_key_MachineID_IP = []byte("ip") + +func (uj *MachineID) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *MachineID) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_MachineIDbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_MachineIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'h': + + if bytes.Equal(ffj_key_MachineID_Hostname, kn) { + currentKey = ffj_t_MachineID_Hostname + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_MachineID_IP, kn) { + currentKey = ffj_t_MachineID_IP + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_MachineID_IP, kn) { + currentKey = ffj_t_MachineID_IP + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_MachineID_Hostname, kn) { + currentKey = ffj_t_MachineID_Hostname + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_MachineIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_MachineID_Hostname: + goto handle_Hostname + + case ffj_t_MachineID_IP: + goto handle_IP + + case ffj_t_MachineIDno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Hostname: + + /* handler: uj.Hostname type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Hostname = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Hostname = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IP: + + /* handler: uj.IP type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.IP = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.IP = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *MachineInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *MachineInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "id":`) + + { + + err = mj.ID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + if mj.Mode != nil { + if true { + buf.WriteString(`"mode":`) + + { + + obj, err = mj.Mode.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.Unavailability != nil { + if true { + buf.WriteString(`"unavailability":`) + + { + + err = mj.Unavailability.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_MachineInfobase = iota + ffj_t_MachineInfono_such_key + + ffj_t_MachineInfo_ID + + ffj_t_MachineInfo_Mode + + ffj_t_MachineInfo_Unavailability +) + +var ffj_key_MachineInfo_ID = []byte("id") + +var ffj_key_MachineInfo_Mode = []byte("mode") + +var ffj_key_MachineInfo_Unavailability = []byte("unavailability") + +func (uj *MachineInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *MachineInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_MachineInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_MachineInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'i': + + if bytes.Equal(ffj_key_MachineInfo_ID, kn) { + currentKey = ffj_t_MachineInfo_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffj_key_MachineInfo_Mode, kn) { + currentKey = ffj_t_MachineInfo_Mode + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_MachineInfo_Unavailability, kn) { + currentKey = ffj_t_MachineInfo_Unavailability + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_MachineInfo_Unavailability, kn) { + currentKey = ffj_t_MachineInfo_Unavailability + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_MachineInfo_Mode, kn) { + currentKey = ffj_t_MachineInfo_Mode + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_MachineInfo_ID, kn) { + currentKey = ffj_t_MachineInfo_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_MachineInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_MachineInfo_ID: + goto handle_ID + + case ffj_t_MachineInfo_Mode: + goto handle_Mode + + case ffj_t_MachineInfo_Unavailability: + goto handle_Unavailability + + case ffj_t_MachineInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: uj.ID type=mesos.MachineID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Mode: + + /* handler: uj.Mode type=mesos.MachineInfo_Mode kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Mode = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Mode == nil { + uj.Mode = new(MachineInfo_Mode) + } + + err = uj.Mode.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Unavailability: + + /* handler: uj.Unavailability type=mesos.Unavailability kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Unavailability = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Unavailability == nil { + uj.Unavailability = new(Unavailability) + } + + err = uj.Unavailability.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *MasterInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *MasterInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"id":`) + fflib.WriteJsonString(buf, string(mj.ID)) + buf.WriteString(`,"ip":`) + fflib.FormatBits2(buf, uint64(mj.IP), 10, false) + buf.WriteByte(',') + if mj.Port != nil { + if true { + buf.WriteString(`"port":`) + fflib.FormatBits2(buf, uint64(*mj.Port), 10, false) + buf.WriteByte(',') + } + } + if mj.PID != nil { + if true { + buf.WriteString(`"pid":`) + fflib.WriteJsonString(buf, string(*mj.PID)) + buf.WriteByte(',') + } + } + if mj.Hostname != nil { + if true { + buf.WriteString(`"hostname":`) + fflib.WriteJsonString(buf, string(*mj.Hostname)) + buf.WriteByte(',') + } + } + if mj.Version != nil { + if true { + buf.WriteString(`"version":`) + fflib.WriteJsonString(buf, string(*mj.Version)) + buf.WriteByte(',') + } + } + if mj.Address != nil { + if true { + buf.WriteString(`"address":`) + + { + + err = mj.Address.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Domain != nil { + if true { + buf.WriteString(`"domain":`) + + { + + err = mj.Domain.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"capabilities":`) + if mj.Capabilities != nil { + buf.WriteString(`[`) + for i, v := range mj.Capabilities { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_MasterInfobase = iota + ffj_t_MasterInfono_such_key + + ffj_t_MasterInfo_ID + + ffj_t_MasterInfo_IP + + ffj_t_MasterInfo_Port + + ffj_t_MasterInfo_PID + + ffj_t_MasterInfo_Hostname + + ffj_t_MasterInfo_Version + + ffj_t_MasterInfo_Address + + ffj_t_MasterInfo_Domain + + ffj_t_MasterInfo_Capabilities +) + +var ffj_key_MasterInfo_ID = []byte("id") + +var ffj_key_MasterInfo_IP = []byte("ip") + +var ffj_key_MasterInfo_Port = []byte("port") + +var ffj_key_MasterInfo_PID = []byte("pid") + +var ffj_key_MasterInfo_Hostname = []byte("hostname") + +var ffj_key_MasterInfo_Version = []byte("version") + +var ffj_key_MasterInfo_Address = []byte("address") + +var ffj_key_MasterInfo_Domain = []byte("domain") + +var ffj_key_MasterInfo_Capabilities = []byte("capabilities") + +func (uj *MasterInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *MasterInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_MasterInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_MasterInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_MasterInfo_Address, kn) { + currentKey = ffj_t_MasterInfo_Address + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'c': + + if bytes.Equal(ffj_key_MasterInfo_Capabilities, kn) { + currentKey = ffj_t_MasterInfo_Capabilities + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_MasterInfo_Domain, kn) { + currentKey = ffj_t_MasterInfo_Domain + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'h': + + if bytes.Equal(ffj_key_MasterInfo_Hostname, kn) { + currentKey = ffj_t_MasterInfo_Hostname + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_MasterInfo_ID, kn) { + currentKey = ffj_t_MasterInfo_ID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_MasterInfo_IP, kn) { + currentKey = ffj_t_MasterInfo_IP + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_MasterInfo_Port, kn) { + currentKey = ffj_t_MasterInfo_Port + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_MasterInfo_PID, kn) { + currentKey = ffj_t_MasterInfo_PID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_MasterInfo_Version, kn) { + currentKey = ffj_t_MasterInfo_Version + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_MasterInfo_Capabilities, kn) { + currentKey = ffj_t_MasterInfo_Capabilities + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_MasterInfo_Domain, kn) { + currentKey = ffj_t_MasterInfo_Domain + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_MasterInfo_Address, kn) { + currentKey = ffj_t_MasterInfo_Address + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_MasterInfo_Version, kn) { + currentKey = ffj_t_MasterInfo_Version + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_MasterInfo_Hostname, kn) { + currentKey = ffj_t_MasterInfo_Hostname + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_MasterInfo_PID, kn) { + currentKey = ffj_t_MasterInfo_PID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_MasterInfo_Port, kn) { + currentKey = ffj_t_MasterInfo_Port + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_MasterInfo_IP, kn) { + currentKey = ffj_t_MasterInfo_IP + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_MasterInfo_ID, kn) { + currentKey = ffj_t_MasterInfo_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_MasterInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_MasterInfo_ID: + goto handle_ID + + case ffj_t_MasterInfo_IP: + goto handle_IP + + case ffj_t_MasterInfo_Port: + goto handle_Port + + case ffj_t_MasterInfo_PID: + goto handle_PID + + case ffj_t_MasterInfo_Hostname: + goto handle_Hostname + + case ffj_t_MasterInfo_Version: + goto handle_Version + + case ffj_t_MasterInfo_Address: + goto handle_Address + + case ffj_t_MasterInfo_Domain: + goto handle_Domain + + case ffj_t_MasterInfo_Capabilities: + goto handle_Capabilities + + case ffj_t_MasterInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: uj.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.ID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IP: + + /* handler: uj.IP type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + uj.IP = uint32(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Port: + + /* handler: uj.Port type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Port = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint32(tval) + uj.Port = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_PID: + + /* handler: uj.PID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.PID = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.PID = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Hostname: + + /* handler: uj.Hostname type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Hostname = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Hostname = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Version: + + /* handler: uj.Version type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Version = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Version = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Address: + + /* handler: uj.Address type=mesos.Address kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Address = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Address == nil { + uj.Address = new(Address) + } + + err = uj.Address.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Domain: + + /* handler: uj.Domain type=mesos.DomainInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Domain = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Domain == nil { + uj.Domain = new(DomainInfo) + } + + err = uj.Domain.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Capabilities: + + /* handler: uj.Capabilities type=[]mesos.MasterInfo_Capability kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Capabilities = nil + } else { + + uj.Capabilities = []MasterInfo_Capability{} + + wantVal := true + + for { + + var tmp_uj__Capabilities MasterInfo_Capability + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Capabilities type=mesos.MasterInfo_Capability kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Capabilities.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Capabilities = append(uj.Capabilities, tmp_uj__Capabilities) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *MasterInfo_Capability) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *MasterInfo_Capability) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_MasterInfo_Capabilitybase = iota + ffj_t_MasterInfo_Capabilityno_such_key + + ffj_t_MasterInfo_Capability_Type +) + +var ffj_key_MasterInfo_Capability_Type = []byte("type") + +func (uj *MasterInfo_Capability) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *MasterInfo_Capability) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_MasterInfo_Capabilitybase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_MasterInfo_Capabilityno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 't': + + if bytes.Equal(ffj_key_MasterInfo_Capability_Type, kn) { + currentKey = ffj_t_MasterInfo_Capability_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_MasterInfo_Capability_Type, kn) { + currentKey = ffj_t_MasterInfo_Capability_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_MasterInfo_Capabilityno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_MasterInfo_Capability_Type: + goto handle_Type + + case ffj_t_MasterInfo_Capabilityno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.MasterInfo_Capability_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Metric) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Metric) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteByte(',') + if mj.Value != nil { + if true { + buf.WriteString(`"value":`) + fflib.AppendFloat(buf, float64(*mj.Value), 'g', -1, 64) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Metricbase = iota + ffj_t_Metricno_such_key + + ffj_t_Metric_Name + + ffj_t_Metric_Value +) + +var ffj_key_Metric_Name = []byte("name") + +var ffj_key_Metric_Value = []byte("value") + +func (uj *Metric) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Metric) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Metricbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Metricno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'n': + + if bytes.Equal(ffj_key_Metric_Name, kn) { + currentKey = ffj_t_Metric_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_Metric_Value, kn) { + currentKey = ffj_t_Metric_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Metric_Value, kn) { + currentKey = ffj_t_Metric_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Metric_Name, kn) { + currentKey = ffj_t_Metric_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Metricno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Metric_Name: + goto handle_Name + + case ffj_t_Metric_Value: + goto handle_Value + + case ffj_t_Metricno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Value: + + /* handler: uj.Value type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Value = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.Value = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *MountPropagation) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *MountPropagation) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Mode != nil { + if true { + buf.WriteString(`"mode":`) + + { + + obj, err = mj.Mode.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_MountPropagationbase = iota + ffj_t_MountPropagationno_such_key + + ffj_t_MountPropagation_Mode +) + +var ffj_key_MountPropagation_Mode = []byte("mode") + +func (uj *MountPropagation) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *MountPropagation) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_MountPropagationbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_MountPropagationno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'm': + + if bytes.Equal(ffj_key_MountPropagation_Mode, kn) { + currentKey = ffj_t_MountPropagation_Mode + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_MountPropagation_Mode, kn) { + currentKey = ffj_t_MountPropagation_Mode + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_MountPropagationno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_MountPropagation_Mode: + goto handle_Mode + + case ffj_t_MountPropagationno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Mode: + + /* handler: uj.Mode type=mesos.MountPropagation_Mode kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Mode = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Mode == nil { + uj.Mode = new(MountPropagation_Mode) + } + + err = uj.Mode.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *NetworkInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *NetworkInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"ip_addresses":`) + if mj.IPAddresses != nil { + buf.WriteString(`[`) + for i, v := range mj.IPAddresses { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.Name != nil { + if true { + buf.WriteString(`"name":`) + fflib.WriteJsonString(buf, string(*mj.Name)) + buf.WriteByte(',') + } + } + if len(mj.Groups) != 0 { + buf.WriteString(`"groups":`) + if mj.Groups != nil { + buf.WriteString(`[`) + for i, v := range mj.Groups { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if mj.Labels != nil { + if true { + buf.WriteString(`"labels":`) + + { + + err = mj.Labels.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"port_mappings":`) + if mj.PortMappings != nil { + buf.WriteString(`[`) + for i, v := range mj.PortMappings { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_NetworkInfobase = iota + ffj_t_NetworkInfono_such_key + + ffj_t_NetworkInfo_IPAddresses + + ffj_t_NetworkInfo_Name + + ffj_t_NetworkInfo_Groups + + ffj_t_NetworkInfo_Labels + + ffj_t_NetworkInfo_PortMappings +) + +var ffj_key_NetworkInfo_IPAddresses = []byte("ip_addresses") + +var ffj_key_NetworkInfo_Name = []byte("name") + +var ffj_key_NetworkInfo_Groups = []byte("groups") + +var ffj_key_NetworkInfo_Labels = []byte("labels") + +var ffj_key_NetworkInfo_PortMappings = []byte("port_mappings") + +func (uj *NetworkInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *NetworkInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_NetworkInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_NetworkInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'g': + + if bytes.Equal(ffj_key_NetworkInfo_Groups, kn) { + currentKey = ffj_t_NetworkInfo_Groups + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_NetworkInfo_IPAddresses, kn) { + currentKey = ffj_t_NetworkInfo_IPAddresses + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_NetworkInfo_Labels, kn) { + currentKey = ffj_t_NetworkInfo_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_NetworkInfo_Name, kn) { + currentKey = ffj_t_NetworkInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_NetworkInfo_PortMappings, kn) { + currentKey = ffj_t_NetworkInfo_PortMappings + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_NetworkInfo_PortMappings, kn) { + currentKey = ffj_t_NetworkInfo_PortMappings + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_NetworkInfo_Labels, kn) { + currentKey = ffj_t_NetworkInfo_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_NetworkInfo_Groups, kn) { + currentKey = ffj_t_NetworkInfo_Groups + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_NetworkInfo_Name, kn) { + currentKey = ffj_t_NetworkInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_NetworkInfo_IPAddresses, kn) { + currentKey = ffj_t_NetworkInfo_IPAddresses + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_NetworkInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_NetworkInfo_IPAddresses: + goto handle_IPAddresses + + case ffj_t_NetworkInfo_Name: + goto handle_Name + + case ffj_t_NetworkInfo_Groups: + goto handle_Groups + + case ffj_t_NetworkInfo_Labels: + goto handle_Labels + + case ffj_t_NetworkInfo_PortMappings: + goto handle_PortMappings + + case ffj_t_NetworkInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_IPAddresses: + + /* handler: uj.IPAddresses type=[]mesos.NetworkInfo_IPAddress kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.IPAddresses = nil + } else { + + uj.IPAddresses = []NetworkInfo_IPAddress{} + + wantVal := true + + for { + + var tmp_uj__IPAddresses NetworkInfo_IPAddress + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__IPAddresses type=mesos.NetworkInfo_IPAddress kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__IPAddresses.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.IPAddresses = append(uj.IPAddresses, tmp_uj__IPAddresses) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Name = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Name = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Groups: + + /* handler: uj.Groups type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Groups = nil + } else { + + uj.Groups = []string{} + + wantVal := true + + for { + + var tmp_uj__Groups string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Groups type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmp_uj__Groups = string(string(outBuf)) + + } + } + + uj.Groups = append(uj.Groups, tmp_uj__Groups) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Labels: + + /* handler: uj.Labels type=mesos.Labels kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Labels = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Labels == nil { + uj.Labels = new(Labels) + } + + err = uj.Labels.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_PortMappings: + + /* handler: uj.PortMappings type=[]mesos.NetworkInfo_PortMapping kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.PortMappings = nil + } else { + + uj.PortMappings = []NetworkInfo_PortMapping{} + + wantVal := true + + for { + + var tmp_uj__PortMappings NetworkInfo_PortMapping + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__PortMappings type=mesos.NetworkInfo_PortMapping kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__PortMappings.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.PortMappings = append(uj.PortMappings, tmp_uj__PortMappings) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *NetworkInfo_IPAddress) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *NetworkInfo_IPAddress) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Protocol != nil { + if true { + buf.WriteString(`"protocol":`) + + { + + obj, err = mj.Protocol.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.IPAddress != nil { + if true { + buf.WriteString(`"ip_address":`) + fflib.WriteJsonString(buf, string(*mj.IPAddress)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_NetworkInfo_IPAddressbase = iota + ffj_t_NetworkInfo_IPAddressno_such_key + + ffj_t_NetworkInfo_IPAddress_Protocol + + ffj_t_NetworkInfo_IPAddress_IPAddress +) + +var ffj_key_NetworkInfo_IPAddress_Protocol = []byte("protocol") + +var ffj_key_NetworkInfo_IPAddress_IPAddress = []byte("ip_address") + +func (uj *NetworkInfo_IPAddress) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *NetworkInfo_IPAddress) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_NetworkInfo_IPAddressbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_NetworkInfo_IPAddressno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'i': + + if bytes.Equal(ffj_key_NetworkInfo_IPAddress_IPAddress, kn) { + currentKey = ffj_t_NetworkInfo_IPAddress_IPAddress + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_NetworkInfo_IPAddress_Protocol, kn) { + currentKey = ffj_t_NetworkInfo_IPAddress_Protocol + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_NetworkInfo_IPAddress_IPAddress, kn) { + currentKey = ffj_t_NetworkInfo_IPAddress_IPAddress + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_NetworkInfo_IPAddress_Protocol, kn) { + currentKey = ffj_t_NetworkInfo_IPAddress_Protocol + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_NetworkInfo_IPAddressno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_NetworkInfo_IPAddress_Protocol: + goto handle_Protocol + + case ffj_t_NetworkInfo_IPAddress_IPAddress: + goto handle_IPAddress + + case ffj_t_NetworkInfo_IPAddressno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Protocol: + + /* handler: uj.Protocol type=mesos.NetworkInfo_Protocol kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Protocol = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Protocol == nil { + uj.Protocol = new(NetworkInfo_Protocol) + } + + err = uj.Protocol.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IPAddress: + + /* handler: uj.IPAddress type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.IPAddress = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.IPAddress = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *NetworkInfo_PortMapping) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *NetworkInfo_PortMapping) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "host_port":`) + fflib.FormatBits2(buf, uint64(mj.HostPort), 10, false) + buf.WriteString(`,"container_port":`) + fflib.FormatBits2(buf, uint64(mj.ContainerPort), 10, false) + buf.WriteByte(',') + if mj.Protocol != nil { + if true { + buf.WriteString(`"protocol":`) + fflib.WriteJsonString(buf, string(*mj.Protocol)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_NetworkInfo_PortMappingbase = iota + ffj_t_NetworkInfo_PortMappingno_such_key + + ffj_t_NetworkInfo_PortMapping_HostPort + + ffj_t_NetworkInfo_PortMapping_ContainerPort + + ffj_t_NetworkInfo_PortMapping_Protocol +) + +var ffj_key_NetworkInfo_PortMapping_HostPort = []byte("host_port") + +var ffj_key_NetworkInfo_PortMapping_ContainerPort = []byte("container_port") + +var ffj_key_NetworkInfo_PortMapping_Protocol = []byte("protocol") + +func (uj *NetworkInfo_PortMapping) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *NetworkInfo_PortMapping) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_NetworkInfo_PortMappingbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_NetworkInfo_PortMappingno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_NetworkInfo_PortMapping_ContainerPort, kn) { + currentKey = ffj_t_NetworkInfo_PortMapping_ContainerPort + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'h': + + if bytes.Equal(ffj_key_NetworkInfo_PortMapping_HostPort, kn) { + currentKey = ffj_t_NetworkInfo_PortMapping_HostPort + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_NetworkInfo_PortMapping_Protocol, kn) { + currentKey = ffj_t_NetworkInfo_PortMapping_Protocol + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_NetworkInfo_PortMapping_Protocol, kn) { + currentKey = ffj_t_NetworkInfo_PortMapping_Protocol + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_NetworkInfo_PortMapping_ContainerPort, kn) { + currentKey = ffj_t_NetworkInfo_PortMapping_ContainerPort + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_NetworkInfo_PortMapping_HostPort, kn) { + currentKey = ffj_t_NetworkInfo_PortMapping_HostPort + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_NetworkInfo_PortMappingno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_NetworkInfo_PortMapping_HostPort: + goto handle_HostPort + + case ffj_t_NetworkInfo_PortMapping_ContainerPort: + goto handle_ContainerPort + + case ffj_t_NetworkInfo_PortMapping_Protocol: + goto handle_Protocol + + case ffj_t_NetworkInfo_PortMappingno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_HostPort: + + /* handler: uj.HostPort type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + uj.HostPort = uint32(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ContainerPort: + + /* handler: uj.ContainerPort type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + uj.ContainerPort = uint32(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Protocol: + + /* handler: uj.Protocol type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Protocol = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Protocol = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Offer) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Offer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "id":`) + + { + + err = mj.ID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"framework_id":`) + + { + + err = mj.FrameworkID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"agent_id":`) + + { + + err = mj.AgentID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"hostname":`) + fflib.WriteJsonString(buf, string(mj.Hostname)) + buf.WriteByte(',') + if mj.URL != nil { + if true { + buf.WriteString(`"url":`) + + { + + err = mj.URL.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Domain != nil { + if true { + buf.WriteString(`"domain":`) + + { + + err = mj.Domain.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"resources":`) + if mj.Resources != nil { + buf.WriteString(`[`) + for i, v := range mj.Resources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"attributes":`) + if mj.Attributes != nil { + buf.WriteString(`[`) + for i, v := range mj.Attributes { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"executor_ids":`) + if mj.ExecutorIDs != nil { + buf.WriteString(`[`) + for i, v := range mj.ExecutorIDs { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.Unavailability != nil { + if true { + buf.WriteString(`"unavailability":`) + + { + + err = mj.Unavailability.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.AllocationInfo != nil { + if true { + buf.WriteString(`"allocation_info":`) + + { + + err = mj.AllocationInfo.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Offerbase = iota + ffj_t_Offerno_such_key + + ffj_t_Offer_ID + + ffj_t_Offer_FrameworkID + + ffj_t_Offer_AgentID + + ffj_t_Offer_Hostname + + ffj_t_Offer_URL + + ffj_t_Offer_Domain + + ffj_t_Offer_Resources + + ffj_t_Offer_Attributes + + ffj_t_Offer_ExecutorIDs + + ffj_t_Offer_Unavailability + + ffj_t_Offer_AllocationInfo +) + +var ffj_key_Offer_ID = []byte("id") + +var ffj_key_Offer_FrameworkID = []byte("framework_id") + +var ffj_key_Offer_AgentID = []byte("agent_id") + +var ffj_key_Offer_Hostname = []byte("hostname") + +var ffj_key_Offer_URL = []byte("url") + +var ffj_key_Offer_Domain = []byte("domain") + +var ffj_key_Offer_Resources = []byte("resources") + +var ffj_key_Offer_Attributes = []byte("attributes") + +var ffj_key_Offer_ExecutorIDs = []byte("executor_ids") + +var ffj_key_Offer_Unavailability = []byte("unavailability") + +var ffj_key_Offer_AllocationInfo = []byte("allocation_info") + +func (uj *Offer) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Offer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Offerbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Offerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_Offer_AgentID, kn) { + currentKey = ffj_t_Offer_AgentID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Offer_Attributes, kn) { + currentKey = ffj_t_Offer_Attributes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Offer_AllocationInfo, kn) { + currentKey = ffj_t_Offer_AllocationInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_Offer_Domain, kn) { + currentKey = ffj_t_Offer_Domain + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_Offer_ExecutorIDs, kn) { + currentKey = ffj_t_Offer_ExecutorIDs + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffj_key_Offer_FrameworkID, kn) { + currentKey = ffj_t_Offer_FrameworkID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'h': + + if bytes.Equal(ffj_key_Offer_Hostname, kn) { + currentKey = ffj_t_Offer_Hostname + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_Offer_ID, kn) { + currentKey = ffj_t_Offer_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_Offer_Resources, kn) { + currentKey = ffj_t_Offer_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_Offer_URL, kn) { + currentKey = ffj_t_Offer_URL + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Offer_Unavailability, kn) { + currentKey = ffj_t_Offer_Unavailability + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_Offer_AllocationInfo, kn) { + currentKey = ffj_t_Offer_AllocationInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Offer_Unavailability, kn) { + currentKey = ffj_t_Offer_Unavailability + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Offer_ExecutorIDs, kn) { + currentKey = ffj_t_Offer_ExecutorIDs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Offer_Attributes, kn) { + currentKey = ffj_t_Offer_Attributes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Offer_Resources, kn) { + currentKey = ffj_t_Offer_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Offer_Domain, kn) { + currentKey = ffj_t_Offer_Domain + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Offer_URL, kn) { + currentKey = ffj_t_Offer_URL + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Offer_Hostname, kn) { + currentKey = ffj_t_Offer_Hostname + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Offer_AgentID, kn) { + currentKey = ffj_t_Offer_AgentID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Offer_FrameworkID, kn) { + currentKey = ffj_t_Offer_FrameworkID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Offer_ID, kn) { + currentKey = ffj_t_Offer_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Offerno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Offer_ID: + goto handle_ID + + case ffj_t_Offer_FrameworkID: + goto handle_FrameworkID + + case ffj_t_Offer_AgentID: + goto handle_AgentID + + case ffj_t_Offer_Hostname: + goto handle_Hostname + + case ffj_t_Offer_URL: + goto handle_URL + + case ffj_t_Offer_Domain: + goto handle_Domain + + case ffj_t_Offer_Resources: + goto handle_Resources + + case ffj_t_Offer_Attributes: + goto handle_Attributes + + case ffj_t_Offer_ExecutorIDs: + goto handle_ExecutorIDs + + case ffj_t_Offer_Unavailability: + goto handle_Unavailability + + case ffj_t_Offer_AllocationInfo: + goto handle_AllocationInfo + + case ffj_t_Offerno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: uj.ID type=mesos.OfferID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_FrameworkID: + + /* handler: uj.FrameworkID type=mesos.FrameworkID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.FrameworkID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_AgentID: + + /* handler: uj.AgentID type=mesos.AgentID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.AgentID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Hostname: + + /* handler: uj.Hostname type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Hostname = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_URL: + + /* handler: uj.URL type=mesos.URL kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.URL = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.URL == nil { + uj.URL = new(URL) + } + + err = uj.URL.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Domain: + + /* handler: uj.Domain type=mesos.DomainInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Domain = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Domain == nil { + uj.Domain = new(DomainInfo) + } + + err = uj.Domain.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Resources: + + /* handler: uj.Resources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Resources = nil + } else { + + uj.Resources = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Resources Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Resources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Resources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Resources = append(uj.Resources, tmp_uj__Resources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Attributes: + + /* handler: uj.Attributes type=[]mesos.Attribute kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Attributes = nil + } else { + + uj.Attributes = []Attribute{} + + wantVal := true + + for { + + var tmp_uj__Attributes Attribute + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Attributes type=mesos.Attribute kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Attributes.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Attributes = append(uj.Attributes, tmp_uj__Attributes) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ExecutorIDs: + + /* handler: uj.ExecutorIDs type=[]mesos.ExecutorID kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.ExecutorIDs = nil + } else { + + uj.ExecutorIDs = []ExecutorID{} + + wantVal := true + + for { + + var tmp_uj__ExecutorIDs ExecutorID + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__ExecutorIDs type=mesos.ExecutorID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__ExecutorIDs.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.ExecutorIDs = append(uj.ExecutorIDs, tmp_uj__ExecutorIDs) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Unavailability: + + /* handler: uj.Unavailability type=mesos.Unavailability kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Unavailability = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Unavailability == nil { + uj.Unavailability = new(Unavailability) + } + + err = uj.Unavailability.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_AllocationInfo: + + /* handler: uj.AllocationInfo type=mesos.Resource_AllocationInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.AllocationInfo = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.AllocationInfo == nil { + uj.AllocationInfo = new(Resource_AllocationInfo) + } + + err = uj.AllocationInfo.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *OfferID) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *OfferID) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"value":`) + fflib.WriteJsonString(buf, string(mj.Value)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_OfferIDbase = iota + ffj_t_OfferIDno_such_key + + ffj_t_OfferID_Value +) + +var ffj_key_OfferID_Value = []byte("value") + +func (uj *OfferID) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *OfferID) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_OfferIDbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_OfferIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'v': + + if bytes.Equal(ffj_key_OfferID_Value, kn) { + currentKey = ffj_t_OfferID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_OfferID_Value, kn) { + currentKey = ffj_t_OfferID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_OfferIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_OfferID_Value: + goto handle_Value + + case ffj_t_OfferIDno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Value: + + /* handler: uj.Value type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Value = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Offer_Operation) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Offer_Operation) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.ID != nil { + if true { + buf.WriteString(`"id":`) + + { + + err = mj.ID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Launch != nil { + if true { + buf.WriteString(`"launch":`) + + { + + err = mj.Launch.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.LaunchGroup != nil { + if true { + buf.WriteString(`"launch_group":`) + + { + + err = mj.LaunchGroup.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Reserve != nil { + if true { + buf.WriteString(`"reserve":`) + + { + + err = mj.Reserve.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Unreserve != nil { + if true { + buf.WriteString(`"unreserve":`) + + { + + err = mj.Unreserve.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Create != nil { + if true { + buf.WriteString(`"create":`) + + { + + err = mj.Create.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Destroy != nil { + if true { + buf.WriteString(`"destroy":`) + + { + + err = mj.Destroy.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.GrowVolume != nil { + if true { + buf.WriteString(`"grow_volume":`) + + { + + err = mj.GrowVolume.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.ShrinkVolume != nil { + if true { + buf.WriteString(`"shrink_volume":`) + + { + + err = mj.ShrinkVolume.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.CreateDisk != nil { + if true { + buf.WriteString(`"create_disk":`) + + { + + err = mj.CreateDisk.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.DestroyDisk != nil { + if true { + buf.WriteString(`"destroy_disk":`) + + { + + err = mj.DestroyDisk.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Offer_Operationbase = iota + ffj_t_Offer_Operationno_such_key + + ffj_t_Offer_Operation_Type + + ffj_t_Offer_Operation_ID + + ffj_t_Offer_Operation_Launch + + ffj_t_Offer_Operation_LaunchGroup + + ffj_t_Offer_Operation_Reserve + + ffj_t_Offer_Operation_Unreserve + + ffj_t_Offer_Operation_Create + + ffj_t_Offer_Operation_Destroy + + ffj_t_Offer_Operation_GrowVolume + + ffj_t_Offer_Operation_ShrinkVolume + + ffj_t_Offer_Operation_CreateDisk + + ffj_t_Offer_Operation_DestroyDisk +) + +var ffj_key_Offer_Operation_Type = []byte("type") + +var ffj_key_Offer_Operation_ID = []byte("id") + +var ffj_key_Offer_Operation_Launch = []byte("launch") + +var ffj_key_Offer_Operation_LaunchGroup = []byte("launch_group") + +var ffj_key_Offer_Operation_Reserve = []byte("reserve") + +var ffj_key_Offer_Operation_Unreserve = []byte("unreserve") + +var ffj_key_Offer_Operation_Create = []byte("create") + +var ffj_key_Offer_Operation_Destroy = []byte("destroy") + +var ffj_key_Offer_Operation_GrowVolume = []byte("grow_volume") + +var ffj_key_Offer_Operation_ShrinkVolume = []byte("shrink_volume") + +var ffj_key_Offer_Operation_CreateDisk = []byte("create_disk") + +var ffj_key_Offer_Operation_DestroyDisk = []byte("destroy_disk") + +func (uj *Offer_Operation) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Offer_Operation) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Offer_Operationbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Offer_Operationno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Offer_Operation_Create, kn) { + currentKey = ffj_t_Offer_Operation_Create + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Offer_Operation_CreateDisk, kn) { + currentKey = ffj_t_Offer_Operation_CreateDisk + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_Offer_Operation_Destroy, kn) { + currentKey = ffj_t_Offer_Operation_Destroy + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Offer_Operation_DestroyDisk, kn) { + currentKey = ffj_t_Offer_Operation_DestroyDisk + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'g': + + if bytes.Equal(ffj_key_Offer_Operation_GrowVolume, kn) { + currentKey = ffj_t_Offer_Operation_GrowVolume + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_Offer_Operation_ID, kn) { + currentKey = ffj_t_Offer_Operation_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_Offer_Operation_Launch, kn) { + currentKey = ffj_t_Offer_Operation_Launch + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Offer_Operation_LaunchGroup, kn) { + currentKey = ffj_t_Offer_Operation_LaunchGroup + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_Offer_Operation_Reserve, kn) { + currentKey = ffj_t_Offer_Operation_Reserve + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Offer_Operation_ShrinkVolume, kn) { + currentKey = ffj_t_Offer_Operation_ShrinkVolume + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Offer_Operation_Type, kn) { + currentKey = ffj_t_Offer_Operation_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_Offer_Operation_Unreserve, kn) { + currentKey = ffj_t_Offer_Operation_Unreserve + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Offer_Operation_DestroyDisk, kn) { + currentKey = ffj_t_Offer_Operation_DestroyDisk + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Offer_Operation_CreateDisk, kn) { + currentKey = ffj_t_Offer_Operation_CreateDisk + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Offer_Operation_ShrinkVolume, kn) { + currentKey = ffj_t_Offer_Operation_ShrinkVolume + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Offer_Operation_GrowVolume, kn) { + currentKey = ffj_t_Offer_Operation_GrowVolume + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Offer_Operation_Destroy, kn) { + currentKey = ffj_t_Offer_Operation_Destroy + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Offer_Operation_Create, kn) { + currentKey = ffj_t_Offer_Operation_Create + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Offer_Operation_Unreserve, kn) { + currentKey = ffj_t_Offer_Operation_Unreserve + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Offer_Operation_Reserve, kn) { + currentKey = ffj_t_Offer_Operation_Reserve + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Offer_Operation_LaunchGroup, kn) { + currentKey = ffj_t_Offer_Operation_LaunchGroup + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Offer_Operation_Launch, kn) { + currentKey = ffj_t_Offer_Operation_Launch + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Offer_Operation_ID, kn) { + currentKey = ffj_t_Offer_Operation_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Offer_Operation_Type, kn) { + currentKey = ffj_t_Offer_Operation_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Offer_Operationno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Offer_Operation_Type: + goto handle_Type + + case ffj_t_Offer_Operation_ID: + goto handle_ID + + case ffj_t_Offer_Operation_Launch: + goto handle_Launch + + case ffj_t_Offer_Operation_LaunchGroup: + goto handle_LaunchGroup + + case ffj_t_Offer_Operation_Reserve: + goto handle_Reserve + + case ffj_t_Offer_Operation_Unreserve: + goto handle_Unreserve + + case ffj_t_Offer_Operation_Create: + goto handle_Create + + case ffj_t_Offer_Operation_Destroy: + goto handle_Destroy + + case ffj_t_Offer_Operation_GrowVolume: + goto handle_GrowVolume + + case ffj_t_Offer_Operation_ShrinkVolume: + goto handle_ShrinkVolume + + case ffj_t_Offer_Operation_CreateDisk: + goto handle_CreateDisk + + case ffj_t_Offer_Operation_DestroyDisk: + goto handle_DestroyDisk + + case ffj_t_Offer_Operationno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.Offer_Operation_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ID: + + /* handler: uj.ID type=mesos.OperationID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ID == nil { + uj.ID = new(OperationID) + } + + err = uj.ID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Launch: + + /* handler: uj.Launch type=mesos.Offer_Operation_Launch kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Launch = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Launch == nil { + uj.Launch = new(Offer_Operation_Launch) + } + + err = uj.Launch.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LaunchGroup: + + /* handler: uj.LaunchGroup type=mesos.Offer_Operation_LaunchGroup kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.LaunchGroup = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.LaunchGroup == nil { + uj.LaunchGroup = new(Offer_Operation_LaunchGroup) + } + + err = uj.LaunchGroup.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Reserve: + + /* handler: uj.Reserve type=mesos.Offer_Operation_Reserve kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Reserve = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Reserve == nil { + uj.Reserve = new(Offer_Operation_Reserve) + } + + err = uj.Reserve.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Unreserve: + + /* handler: uj.Unreserve type=mesos.Offer_Operation_Unreserve kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Unreserve = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Unreserve == nil { + uj.Unreserve = new(Offer_Operation_Unreserve) + } + + err = uj.Unreserve.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Create: + + /* handler: uj.Create type=mesos.Offer_Operation_Create kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Create = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Create == nil { + uj.Create = new(Offer_Operation_Create) + } + + err = uj.Create.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Destroy: + + /* handler: uj.Destroy type=mesos.Offer_Operation_Destroy kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Destroy = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Destroy == nil { + uj.Destroy = new(Offer_Operation_Destroy) + } + + err = uj.Destroy.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GrowVolume: + + /* handler: uj.GrowVolume type=mesos.Offer_Operation_GrowVolume kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.GrowVolume = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.GrowVolume == nil { + uj.GrowVolume = new(Offer_Operation_GrowVolume) + } + + err = uj.GrowVolume.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ShrinkVolume: + + /* handler: uj.ShrinkVolume type=mesos.Offer_Operation_ShrinkVolume kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ShrinkVolume = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ShrinkVolume == nil { + uj.ShrinkVolume = new(Offer_Operation_ShrinkVolume) + } + + err = uj.ShrinkVolume.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CreateDisk: + + /* handler: uj.CreateDisk type=mesos.Offer_Operation_CreateDisk kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.CreateDisk = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.CreateDisk == nil { + uj.CreateDisk = new(Offer_Operation_CreateDisk) + } + + err = uj.CreateDisk.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DestroyDisk: + + /* handler: uj.DestroyDisk type=mesos.Offer_Operation_DestroyDisk kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.DestroyDisk = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.DestroyDisk == nil { + uj.DestroyDisk = new(Offer_Operation_DestroyDisk) + } + + err = uj.DestroyDisk.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Offer_Operation_Create) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Offer_Operation_Create) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"volumes":`) + if mj.Volumes != nil { + buf.WriteString(`[`) + for i, v := range mj.Volumes { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Offer_Operation_Createbase = iota + ffj_t_Offer_Operation_Createno_such_key + + ffj_t_Offer_Operation_Create_Volumes +) + +var ffj_key_Offer_Operation_Create_Volumes = []byte("volumes") + +func (uj *Offer_Operation_Create) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Offer_Operation_Create) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Offer_Operation_Createbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Offer_Operation_Createno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'v': + + if bytes.Equal(ffj_key_Offer_Operation_Create_Volumes, kn) { + currentKey = ffj_t_Offer_Operation_Create_Volumes + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Offer_Operation_Create_Volumes, kn) { + currentKey = ffj_t_Offer_Operation_Create_Volumes + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Offer_Operation_Createno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Offer_Operation_Create_Volumes: + goto handle_Volumes + + case ffj_t_Offer_Operation_Createno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Volumes: + + /* handler: uj.Volumes type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Volumes = nil + } else { + + uj.Volumes = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Volumes Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Volumes type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Volumes.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Volumes = append(uj.Volumes, tmp_uj__Volumes) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Offer_Operation_CreateDisk) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Offer_Operation_CreateDisk) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"source":`) + + { + + err = mj.Source.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"target_type":`) + + { + + obj, err = mj.TargetType.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Offer_Operation_CreateDiskbase = iota + ffj_t_Offer_Operation_CreateDiskno_such_key + + ffj_t_Offer_Operation_CreateDisk_Source + + ffj_t_Offer_Operation_CreateDisk_TargetType +) + +var ffj_key_Offer_Operation_CreateDisk_Source = []byte("source") + +var ffj_key_Offer_Operation_CreateDisk_TargetType = []byte("target_type") + +func (uj *Offer_Operation_CreateDisk) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Offer_Operation_CreateDisk) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Offer_Operation_CreateDiskbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Offer_Operation_CreateDiskno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 's': + + if bytes.Equal(ffj_key_Offer_Operation_CreateDisk_Source, kn) { + currentKey = ffj_t_Offer_Operation_CreateDisk_Source + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Offer_Operation_CreateDisk_TargetType, kn) { + currentKey = ffj_t_Offer_Operation_CreateDisk_TargetType + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_Offer_Operation_CreateDisk_TargetType, kn) { + currentKey = ffj_t_Offer_Operation_CreateDisk_TargetType + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Offer_Operation_CreateDisk_Source, kn) { + currentKey = ffj_t_Offer_Operation_CreateDisk_Source + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Offer_Operation_CreateDiskno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Offer_Operation_CreateDisk_Source: + goto handle_Source + + case ffj_t_Offer_Operation_CreateDisk_TargetType: + goto handle_TargetType + + case ffj_t_Offer_Operation_CreateDiskno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Source: + + /* handler: uj.Source type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Source.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TargetType: + + /* handler: uj.TargetType type=mesos.Resource_DiskInfo_Source_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.TargetType.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Offer_Operation_Destroy) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Offer_Operation_Destroy) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"volumes":`) + if mj.Volumes != nil { + buf.WriteString(`[`) + for i, v := range mj.Volumes { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Offer_Operation_Destroybase = iota + ffj_t_Offer_Operation_Destroyno_such_key + + ffj_t_Offer_Operation_Destroy_Volumes +) + +var ffj_key_Offer_Operation_Destroy_Volumes = []byte("volumes") + +func (uj *Offer_Operation_Destroy) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Offer_Operation_Destroy) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Offer_Operation_Destroybase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Offer_Operation_Destroyno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'v': + + if bytes.Equal(ffj_key_Offer_Operation_Destroy_Volumes, kn) { + currentKey = ffj_t_Offer_Operation_Destroy_Volumes + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Offer_Operation_Destroy_Volumes, kn) { + currentKey = ffj_t_Offer_Operation_Destroy_Volumes + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Offer_Operation_Destroyno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Offer_Operation_Destroy_Volumes: + goto handle_Volumes + + case ffj_t_Offer_Operation_Destroyno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Volumes: + + /* handler: uj.Volumes type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Volumes = nil + } else { + + uj.Volumes = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Volumes Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Volumes type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Volumes.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Volumes = append(uj.Volumes, tmp_uj__Volumes) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Offer_Operation_DestroyDisk) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Offer_Operation_DestroyDisk) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"source":`) + + { + + err = mj.Source.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Offer_Operation_DestroyDiskbase = iota + ffj_t_Offer_Operation_DestroyDiskno_such_key + + ffj_t_Offer_Operation_DestroyDisk_Source +) + +var ffj_key_Offer_Operation_DestroyDisk_Source = []byte("source") + +func (uj *Offer_Operation_DestroyDisk) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Offer_Operation_DestroyDisk) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Offer_Operation_DestroyDiskbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Offer_Operation_DestroyDiskno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 's': + + if bytes.Equal(ffj_key_Offer_Operation_DestroyDisk_Source, kn) { + currentKey = ffj_t_Offer_Operation_DestroyDisk_Source + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Offer_Operation_DestroyDisk_Source, kn) { + currentKey = ffj_t_Offer_Operation_DestroyDisk_Source + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Offer_Operation_DestroyDiskno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Offer_Operation_DestroyDisk_Source: + goto handle_Source + + case ffj_t_Offer_Operation_DestroyDiskno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Source: + + /* handler: uj.Source type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Source.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Offer_Operation_GrowVolume) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Offer_Operation_GrowVolume) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"volume":`) + + { + + err = mj.Volume.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"addition":`) + + { + + err = mj.Addition.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Offer_Operation_GrowVolumebase = iota + ffj_t_Offer_Operation_GrowVolumeno_such_key + + ffj_t_Offer_Operation_GrowVolume_Volume + + ffj_t_Offer_Operation_GrowVolume_Addition +) + +var ffj_key_Offer_Operation_GrowVolume_Volume = []byte("volume") + +var ffj_key_Offer_Operation_GrowVolume_Addition = []byte("addition") + +func (uj *Offer_Operation_GrowVolume) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Offer_Operation_GrowVolume) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Offer_Operation_GrowVolumebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Offer_Operation_GrowVolumeno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_Offer_Operation_GrowVolume_Addition, kn) { + currentKey = ffj_t_Offer_Operation_GrowVolume_Addition + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_Offer_Operation_GrowVolume_Volume, kn) { + currentKey = ffj_t_Offer_Operation_GrowVolume_Volume + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Offer_Operation_GrowVolume_Addition, kn) { + currentKey = ffj_t_Offer_Operation_GrowVolume_Addition + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Offer_Operation_GrowVolume_Volume, kn) { + currentKey = ffj_t_Offer_Operation_GrowVolume_Volume + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Offer_Operation_GrowVolumeno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Offer_Operation_GrowVolume_Volume: + goto handle_Volume + + case ffj_t_Offer_Operation_GrowVolume_Addition: + goto handle_Addition + + case ffj_t_Offer_Operation_GrowVolumeno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Volume: + + /* handler: uj.Volume type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Volume.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Addition: + + /* handler: uj.Addition type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Addition.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Offer_Operation_Launch) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Offer_Operation_Launch) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"task_infos":`) + if mj.TaskInfos != nil { + buf.WriteString(`[`) + for i, v := range mj.TaskInfos { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Offer_Operation_Launchbase = iota + ffj_t_Offer_Operation_Launchno_such_key + + ffj_t_Offer_Operation_Launch_TaskInfos +) + +var ffj_key_Offer_Operation_Launch_TaskInfos = []byte("task_infos") + +func (uj *Offer_Operation_Launch) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Offer_Operation_Launch) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Offer_Operation_Launchbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Offer_Operation_Launchno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 't': + + if bytes.Equal(ffj_key_Offer_Operation_Launch_TaskInfos, kn) { + currentKey = ffj_t_Offer_Operation_Launch_TaskInfos + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Offer_Operation_Launch_TaskInfos, kn) { + currentKey = ffj_t_Offer_Operation_Launch_TaskInfos + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Offer_Operation_Launchno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Offer_Operation_Launch_TaskInfos: + goto handle_TaskInfos + + case ffj_t_Offer_Operation_Launchno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_TaskInfos: + + /* handler: uj.TaskInfos type=[]mesos.TaskInfo kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.TaskInfos = nil + } else { + + uj.TaskInfos = []TaskInfo{} + + wantVal := true + + for { + + var tmp_uj__TaskInfos TaskInfo + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__TaskInfos type=mesos.TaskInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__TaskInfos.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.TaskInfos = append(uj.TaskInfos, tmp_uj__TaskInfos) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Offer_Operation_LaunchGroup) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Offer_Operation_LaunchGroup) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"executor":`) + + { + + err = mj.Executor.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"task_group":`) + + { + + err = mj.TaskGroup.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Offer_Operation_LaunchGroupbase = iota + ffj_t_Offer_Operation_LaunchGroupno_such_key + + ffj_t_Offer_Operation_LaunchGroup_Executor + + ffj_t_Offer_Operation_LaunchGroup_TaskGroup +) + +var ffj_key_Offer_Operation_LaunchGroup_Executor = []byte("executor") + +var ffj_key_Offer_Operation_LaunchGroup_TaskGroup = []byte("task_group") + +func (uj *Offer_Operation_LaunchGroup) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Offer_Operation_LaunchGroup) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Offer_Operation_LaunchGroupbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Offer_Operation_LaunchGroupno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'e': + + if bytes.Equal(ffj_key_Offer_Operation_LaunchGroup_Executor, kn) { + currentKey = ffj_t_Offer_Operation_LaunchGroup_Executor + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Offer_Operation_LaunchGroup_TaskGroup, kn) { + currentKey = ffj_t_Offer_Operation_LaunchGroup_TaskGroup + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Offer_Operation_LaunchGroup_TaskGroup, kn) { + currentKey = ffj_t_Offer_Operation_LaunchGroup_TaskGroup + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Offer_Operation_LaunchGroup_Executor, kn) { + currentKey = ffj_t_Offer_Operation_LaunchGroup_Executor + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Offer_Operation_LaunchGroupno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Offer_Operation_LaunchGroup_Executor: + goto handle_Executor + + case ffj_t_Offer_Operation_LaunchGroup_TaskGroup: + goto handle_TaskGroup + + case ffj_t_Offer_Operation_LaunchGroupno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Executor: + + /* handler: uj.Executor type=mesos.ExecutorInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Executor.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TaskGroup: + + /* handler: uj.TaskGroup type=mesos.TaskGroupInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.TaskGroup.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Offer_Operation_Reserve) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Offer_Operation_Reserve) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"resources":`) + if mj.Resources != nil { + buf.WriteString(`[`) + for i, v := range mj.Resources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Offer_Operation_Reservebase = iota + ffj_t_Offer_Operation_Reserveno_such_key + + ffj_t_Offer_Operation_Reserve_Resources +) + +var ffj_key_Offer_Operation_Reserve_Resources = []byte("resources") + +func (uj *Offer_Operation_Reserve) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Offer_Operation_Reserve) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Offer_Operation_Reservebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Offer_Operation_Reserveno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'r': + + if bytes.Equal(ffj_key_Offer_Operation_Reserve_Resources, kn) { + currentKey = ffj_t_Offer_Operation_Reserve_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Offer_Operation_Reserve_Resources, kn) { + currentKey = ffj_t_Offer_Operation_Reserve_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Offer_Operation_Reserveno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Offer_Operation_Reserve_Resources: + goto handle_Resources + + case ffj_t_Offer_Operation_Reserveno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Resources: + + /* handler: uj.Resources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Resources = nil + } else { + + uj.Resources = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Resources Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Resources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Resources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Resources = append(uj.Resources, tmp_uj__Resources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Offer_Operation_ShrinkVolume) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Offer_Operation_ShrinkVolume) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"volume":`) + + { + + err = mj.Volume.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"subtract":`) + + { + + err = mj.Subtract.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Offer_Operation_ShrinkVolumebase = iota + ffj_t_Offer_Operation_ShrinkVolumeno_such_key + + ffj_t_Offer_Operation_ShrinkVolume_Volume + + ffj_t_Offer_Operation_ShrinkVolume_Subtract +) + +var ffj_key_Offer_Operation_ShrinkVolume_Volume = []byte("volume") + +var ffj_key_Offer_Operation_ShrinkVolume_Subtract = []byte("subtract") + +func (uj *Offer_Operation_ShrinkVolume) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Offer_Operation_ShrinkVolume) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Offer_Operation_ShrinkVolumebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Offer_Operation_ShrinkVolumeno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 's': + + if bytes.Equal(ffj_key_Offer_Operation_ShrinkVolume_Subtract, kn) { + currentKey = ffj_t_Offer_Operation_ShrinkVolume_Subtract + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_Offer_Operation_ShrinkVolume_Volume, kn) { + currentKey = ffj_t_Offer_Operation_ShrinkVolume_Volume + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Offer_Operation_ShrinkVolume_Subtract, kn) { + currentKey = ffj_t_Offer_Operation_ShrinkVolume_Subtract + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Offer_Operation_ShrinkVolume_Volume, kn) { + currentKey = ffj_t_Offer_Operation_ShrinkVolume_Volume + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Offer_Operation_ShrinkVolumeno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Offer_Operation_ShrinkVolume_Volume: + goto handle_Volume + + case ffj_t_Offer_Operation_ShrinkVolume_Subtract: + goto handle_Subtract + + case ffj_t_Offer_Operation_ShrinkVolumeno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Volume: + + /* handler: uj.Volume type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Volume.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Subtract: + + /* handler: uj.Subtract type=mesos.Value_Scalar kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Subtract.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Offer_Operation_Unreserve) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Offer_Operation_Unreserve) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"resources":`) + if mj.Resources != nil { + buf.WriteString(`[`) + for i, v := range mj.Resources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Offer_Operation_Unreservebase = iota + ffj_t_Offer_Operation_Unreserveno_such_key + + ffj_t_Offer_Operation_Unreserve_Resources +) + +var ffj_key_Offer_Operation_Unreserve_Resources = []byte("resources") + +func (uj *Offer_Operation_Unreserve) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Offer_Operation_Unreserve) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Offer_Operation_Unreservebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Offer_Operation_Unreserveno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'r': + + if bytes.Equal(ffj_key_Offer_Operation_Unreserve_Resources, kn) { + currentKey = ffj_t_Offer_Operation_Unreserve_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Offer_Operation_Unreserve_Resources, kn) { + currentKey = ffj_t_Offer_Operation_Unreserve_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Offer_Operation_Unreserveno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Offer_Operation_Unreserve_Resources: + goto handle_Resources + + case ffj_t_Offer_Operation_Unreserveno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Resources: + + /* handler: uj.Resources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Resources = nil + } else { + + uj.Resources = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Resources Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Resources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Resources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Resources = append(uj.Resources, tmp_uj__Resources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Operation) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Operation) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteByte('{') + if mj.FrameworkID != nil { + if true { + buf.WriteString(`"framework_id":`) + + { + + err = mj.FrameworkID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.AgentID != nil { + if true { + buf.WriteString(`"agent_id":`) + + { + + err = mj.AgentID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"info":`) + + { + + err = mj.Info.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"latest_status":`) + + { + + err = mj.LatestStatus.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"statuses":`) + if mj.Statuses != nil { + buf.WriteString(`[`) + for i, v := range mj.Statuses { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"uuid":`) + + { + + err = mj.UUID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Operationbase = iota + ffj_t_Operationno_such_key + + ffj_t_Operation_FrameworkID + + ffj_t_Operation_AgentID + + ffj_t_Operation_Info + + ffj_t_Operation_LatestStatus + + ffj_t_Operation_Statuses + + ffj_t_Operation_UUID +) + +var ffj_key_Operation_FrameworkID = []byte("framework_id") + +var ffj_key_Operation_AgentID = []byte("agent_id") + +var ffj_key_Operation_Info = []byte("info") + +var ffj_key_Operation_LatestStatus = []byte("latest_status") + +var ffj_key_Operation_Statuses = []byte("statuses") + +var ffj_key_Operation_UUID = []byte("uuid") + +func (uj *Operation) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Operation) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Operationbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Operationno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_Operation_AgentID, kn) { + currentKey = ffj_t_Operation_AgentID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffj_key_Operation_FrameworkID, kn) { + currentKey = ffj_t_Operation_FrameworkID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_Operation_Info, kn) { + currentKey = ffj_t_Operation_Info + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_Operation_LatestStatus, kn) { + currentKey = ffj_t_Operation_LatestStatus + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Operation_Statuses, kn) { + currentKey = ffj_t_Operation_Statuses + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_Operation_UUID, kn) { + currentKey = ffj_t_Operation_UUID + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Operation_UUID, kn) { + currentKey = ffj_t_Operation_UUID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Operation_Statuses, kn) { + currentKey = ffj_t_Operation_Statuses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Operation_LatestStatus, kn) { + currentKey = ffj_t_Operation_LatestStatus + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Operation_Info, kn) { + currentKey = ffj_t_Operation_Info + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Operation_AgentID, kn) { + currentKey = ffj_t_Operation_AgentID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Operation_FrameworkID, kn) { + currentKey = ffj_t_Operation_FrameworkID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Operationno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Operation_FrameworkID: + goto handle_FrameworkID + + case ffj_t_Operation_AgentID: + goto handle_AgentID + + case ffj_t_Operation_Info: + goto handle_Info + + case ffj_t_Operation_LatestStatus: + goto handle_LatestStatus + + case ffj_t_Operation_Statuses: + goto handle_Statuses + + case ffj_t_Operation_UUID: + goto handle_UUID + + case ffj_t_Operationno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_FrameworkID: + + /* handler: uj.FrameworkID type=mesos.FrameworkID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.FrameworkID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.FrameworkID == nil { + uj.FrameworkID = new(FrameworkID) + } + + err = uj.FrameworkID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_AgentID: + + /* handler: uj.AgentID type=mesos.AgentID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.AgentID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.AgentID == nil { + uj.AgentID = new(AgentID) + } + + err = uj.AgentID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Info: + + /* handler: uj.Info type=mesos.Offer_Operation kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Info.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LatestStatus: + + /* handler: uj.LatestStatus type=mesos.OperationStatus kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.LatestStatus.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Statuses: + + /* handler: uj.Statuses type=[]mesos.OperationStatus kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Statuses = nil + } else { + + uj.Statuses = []OperationStatus{} + + wantVal := true + + for { + + var tmp_uj__Statuses OperationStatus + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Statuses type=mesos.OperationStatus kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Statuses.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Statuses = append(uj.Statuses, tmp_uj__Statuses) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UUID: + + /* handler: uj.UUID type=mesos.UUID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.UUID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *OperationID) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *OperationID) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"value":`) + fflib.WriteJsonString(buf, string(mj.Value)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_OperationIDbase = iota + ffj_t_OperationIDno_such_key + + ffj_t_OperationID_Value +) + +var ffj_key_OperationID_Value = []byte("value") + +func (uj *OperationID) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *OperationID) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_OperationIDbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_OperationIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'v': + + if bytes.Equal(ffj_key_OperationID_Value, kn) { + currentKey = ffj_t_OperationID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_OperationID_Value, kn) { + currentKey = ffj_t_OperationID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_OperationIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_OperationID_Value: + goto handle_Value + + case ffj_t_OperationIDno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Value: + + /* handler: uj.Value type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Value = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *OperationStatus) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *OperationStatus) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.OperationID != nil { + if true { + buf.WriteString(`"operation_id":`) + + { + + err = mj.OperationID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"state":`) + + { + + obj, err = mj.State.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.Message != nil { + if true { + buf.WriteString(`"message":`) + fflib.WriteJsonString(buf, string(*mj.Message)) + buf.WriteByte(',') + } + } + buf.WriteString(`"converted_resources":`) + if mj.ConvertedResources != nil { + buf.WriteString(`[`) + for i, v := range mj.ConvertedResources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.UUID != nil { + if true { + buf.WriteString(`"uuid":`) + + { + + err = mj.UUID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_OperationStatusbase = iota + ffj_t_OperationStatusno_such_key + + ffj_t_OperationStatus_OperationID + + ffj_t_OperationStatus_State + + ffj_t_OperationStatus_Message + + ffj_t_OperationStatus_ConvertedResources + + ffj_t_OperationStatus_UUID +) + +var ffj_key_OperationStatus_OperationID = []byte("operation_id") + +var ffj_key_OperationStatus_State = []byte("state") + +var ffj_key_OperationStatus_Message = []byte("message") + +var ffj_key_OperationStatus_ConvertedResources = []byte("converted_resources") + +var ffj_key_OperationStatus_UUID = []byte("uuid") + +func (uj *OperationStatus) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *OperationStatus) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_OperationStatusbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_OperationStatusno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_OperationStatus_ConvertedResources, kn) { + currentKey = ffj_t_OperationStatus_ConvertedResources + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffj_key_OperationStatus_Message, kn) { + currentKey = ffj_t_OperationStatus_Message + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'o': + + if bytes.Equal(ffj_key_OperationStatus_OperationID, kn) { + currentKey = ffj_t_OperationStatus_OperationID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_OperationStatus_State, kn) { + currentKey = ffj_t_OperationStatus_State + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_OperationStatus_UUID, kn) { + currentKey = ffj_t_OperationStatus_UUID + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_OperationStatus_UUID, kn) { + currentKey = ffj_t_OperationStatus_UUID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_OperationStatus_ConvertedResources, kn) { + currentKey = ffj_t_OperationStatus_ConvertedResources + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_OperationStatus_Message, kn) { + currentKey = ffj_t_OperationStatus_Message + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_OperationStatus_State, kn) { + currentKey = ffj_t_OperationStatus_State + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_OperationStatus_OperationID, kn) { + currentKey = ffj_t_OperationStatus_OperationID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_OperationStatusno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_OperationStatus_OperationID: + goto handle_OperationID + + case ffj_t_OperationStatus_State: + goto handle_State + + case ffj_t_OperationStatus_Message: + goto handle_Message + + case ffj_t_OperationStatus_ConvertedResources: + goto handle_ConvertedResources + + case ffj_t_OperationStatus_UUID: + goto handle_UUID + + case ffj_t_OperationStatusno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_OperationID: + + /* handler: uj.OperationID type=mesos.OperationID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.OperationID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.OperationID == nil { + uj.OperationID = new(OperationID) + } + + err = uj.OperationID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_State: + + /* handler: uj.State type=mesos.OperationState kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.State.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Message: + + /* handler: uj.Message type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Message = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Message = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ConvertedResources: + + /* handler: uj.ConvertedResources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.ConvertedResources = nil + } else { + + uj.ConvertedResources = []Resource{} + + wantVal := true + + for { + + var tmp_uj__ConvertedResources Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__ConvertedResources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__ConvertedResources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.ConvertedResources = append(uj.ConvertedResources, tmp_uj__ConvertedResources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UUID: + + /* handler: uj.UUID type=mesos.UUID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.UUID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.UUID == nil { + uj.UUID = new(UUID) + } + + err = uj.UUID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Parameter) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Parameter) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"key":`) + fflib.WriteJsonString(buf, string(mj.Key)) + buf.WriteString(`,"value":`) + fflib.WriteJsonString(buf, string(mj.Value)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Parameterbase = iota + ffj_t_Parameterno_such_key + + ffj_t_Parameter_Key + + ffj_t_Parameter_Value +) + +var ffj_key_Parameter_Key = []byte("key") + +var ffj_key_Parameter_Value = []byte("value") + +func (uj *Parameter) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Parameter) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Parameterbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Parameterno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'k': + + if bytes.Equal(ffj_key_Parameter_Key, kn) { + currentKey = ffj_t_Parameter_Key + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_Parameter_Value, kn) { + currentKey = ffj_t_Parameter_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Parameter_Value, kn) { + currentKey = ffj_t_Parameter_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Parameter_Key, kn) { + currentKey = ffj_t_Parameter_Key + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Parameterno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Parameter_Key: + goto handle_Key + + case ffj_t_Parameter_Value: + goto handle_Value + + case ffj_t_Parameterno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Key: + + /* handler: uj.Key type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Key = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Value: + + /* handler: uj.Value type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Value = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Parameters) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Parameters) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"parameter":`) + if mj.Parameter != nil { + buf.WriteString(`[`) + for i, v := range mj.Parameter { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Parametersbase = iota + ffj_t_Parametersno_such_key + + ffj_t_Parameters_Parameter +) + +var ffj_key_Parameters_Parameter = []byte("parameter") + +func (uj *Parameters) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Parameters) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Parametersbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Parametersno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'p': + + if bytes.Equal(ffj_key_Parameters_Parameter, kn) { + currentKey = ffj_t_Parameters_Parameter + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Parameters_Parameter, kn) { + currentKey = ffj_t_Parameters_Parameter + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Parametersno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Parameters_Parameter: + goto handle_Parameter + + case ffj_t_Parametersno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Parameter: + + /* handler: uj.Parameter type=[]mesos.Parameter kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Parameter = nil + } else { + + uj.Parameter = []Parameter{} + + wantVal := true + + for { + + var tmp_uj__Parameter Parameter + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Parameter type=mesos.Parameter kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Parameter.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Parameter = append(uj.Parameter, tmp_uj__Parameter) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *PerfStatistics) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *PerfStatistics) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "timestamp":`) + fflib.AppendFloat(buf, float64(mj.Timestamp), 'g', -1, 64) + buf.WriteString(`,"duration":`) + fflib.AppendFloat(buf, float64(mj.Duration), 'g', -1, 64) + buf.WriteByte(',') + if mj.Cycles != nil { + if true { + buf.WriteString(`"cycles":`) + fflib.FormatBits2(buf, uint64(*mj.Cycles), 10, false) + buf.WriteByte(',') + } + } + if mj.StalledCyclesFrontend != nil { + if true { + buf.WriteString(`"stalled_cycles_frontend":`) + fflib.FormatBits2(buf, uint64(*mj.StalledCyclesFrontend), 10, false) + buf.WriteByte(',') + } + } + if mj.StalledCyclesBackend != nil { + if true { + buf.WriteString(`"stalled_cycles_backend":`) + fflib.FormatBits2(buf, uint64(*mj.StalledCyclesBackend), 10, false) + buf.WriteByte(',') + } + } + if mj.Instructions != nil { + if true { + buf.WriteString(`"instructions":`) + fflib.FormatBits2(buf, uint64(*mj.Instructions), 10, false) + buf.WriteByte(',') + } + } + if mj.CacheReferences != nil { + if true { + buf.WriteString(`"cache_references":`) + fflib.FormatBits2(buf, uint64(*mj.CacheReferences), 10, false) + buf.WriteByte(',') + } + } + if mj.CacheMisses != nil { + if true { + buf.WriteString(`"cache_misses":`) + fflib.FormatBits2(buf, uint64(*mj.CacheMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.Branches != nil { + if true { + buf.WriteString(`"branches":`) + fflib.FormatBits2(buf, uint64(*mj.Branches), 10, false) + buf.WriteByte(',') + } + } + if mj.BranchMisses != nil { + if true { + buf.WriteString(`"branch_misses":`) + fflib.FormatBits2(buf, uint64(*mj.BranchMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.BusCycles != nil { + if true { + buf.WriteString(`"bus_cycles":`) + fflib.FormatBits2(buf, uint64(*mj.BusCycles), 10, false) + buf.WriteByte(',') + } + } + if mj.RefCycles != nil { + if true { + buf.WriteString(`"ref_cycles":`) + fflib.FormatBits2(buf, uint64(*mj.RefCycles), 10, false) + buf.WriteByte(',') + } + } + if mj.CPUClock != nil { + if true { + buf.WriteString(`"cpu_clock":`) + fflib.AppendFloat(buf, float64(*mj.CPUClock), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.TaskClock != nil { + if true { + buf.WriteString(`"task_clock":`) + fflib.AppendFloat(buf, float64(*mj.TaskClock), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.PageFaults != nil { + if true { + buf.WriteString(`"page_faults":`) + fflib.FormatBits2(buf, uint64(*mj.PageFaults), 10, false) + buf.WriteByte(',') + } + } + if mj.MinorFaults != nil { + if true { + buf.WriteString(`"minor_faults":`) + fflib.FormatBits2(buf, uint64(*mj.MinorFaults), 10, false) + buf.WriteByte(',') + } + } + if mj.MajorFaults != nil { + if true { + buf.WriteString(`"major_faults":`) + fflib.FormatBits2(buf, uint64(*mj.MajorFaults), 10, false) + buf.WriteByte(',') + } + } + if mj.ContextSwitches != nil { + if true { + buf.WriteString(`"context_switches":`) + fflib.FormatBits2(buf, uint64(*mj.ContextSwitches), 10, false) + buf.WriteByte(',') + } + } + if mj.CPUMigrations != nil { + if true { + buf.WriteString(`"cpu_migrations":`) + fflib.FormatBits2(buf, uint64(*mj.CPUMigrations), 10, false) + buf.WriteByte(',') + } + } + if mj.AlignmentFaults != nil { + if true { + buf.WriteString(`"alignment_faults":`) + fflib.FormatBits2(buf, uint64(*mj.AlignmentFaults), 10, false) + buf.WriteByte(',') + } + } + if mj.EmulationFaults != nil { + if true { + buf.WriteString(`"emulation_faults":`) + fflib.FormatBits2(buf, uint64(*mj.EmulationFaults), 10, false) + buf.WriteByte(',') + } + } + if mj.L1DcacheLoads != nil { + if true { + buf.WriteString(`"l1_dcache_loads":`) + fflib.FormatBits2(buf, uint64(*mj.L1DcacheLoads), 10, false) + buf.WriteByte(',') + } + } + if mj.L1DcacheLoadMisses != nil { + if true { + buf.WriteString(`"l1_dcache_load_misses":`) + fflib.FormatBits2(buf, uint64(*mj.L1DcacheLoadMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.L1DcacheStores != nil { + if true { + buf.WriteString(`"l1_dcache_stores":`) + fflib.FormatBits2(buf, uint64(*mj.L1DcacheStores), 10, false) + buf.WriteByte(',') + } + } + if mj.L1DcacheStoreMisses != nil { + if true { + buf.WriteString(`"l1_dcache_store_misses":`) + fflib.FormatBits2(buf, uint64(*mj.L1DcacheStoreMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.L1DcachePrefetches != nil { + if true { + buf.WriteString(`"l1_dcache_prefetches":`) + fflib.FormatBits2(buf, uint64(*mj.L1DcachePrefetches), 10, false) + buf.WriteByte(',') + } + } + if mj.L1DcachePrefetchMisses != nil { + if true { + buf.WriteString(`"l1_dcache_prefetch_misses":`) + fflib.FormatBits2(buf, uint64(*mj.L1DcachePrefetchMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.L1IcacheLoads != nil { + if true { + buf.WriteString(`"l1_icache_loads":`) + fflib.FormatBits2(buf, uint64(*mj.L1IcacheLoads), 10, false) + buf.WriteByte(',') + } + } + if mj.L1IcacheLoadMisses != nil { + if true { + buf.WriteString(`"l1_icache_load_misses":`) + fflib.FormatBits2(buf, uint64(*mj.L1IcacheLoadMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.L1IcachePrefetches != nil { + if true { + buf.WriteString(`"l1_icache_prefetches":`) + fflib.FormatBits2(buf, uint64(*mj.L1IcachePrefetches), 10, false) + buf.WriteByte(',') + } + } + if mj.L1IcachePrefetchMisses != nil { + if true { + buf.WriteString(`"l1_icache_prefetch_misses":`) + fflib.FormatBits2(buf, uint64(*mj.L1IcachePrefetchMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.LLCLoads != nil { + if true { + buf.WriteString(`"llc_loads":`) + fflib.FormatBits2(buf, uint64(*mj.LLCLoads), 10, false) + buf.WriteByte(',') + } + } + if mj.LLCLoadMisses != nil { + if true { + buf.WriteString(`"llc_load_misses":`) + fflib.FormatBits2(buf, uint64(*mj.LLCLoadMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.LLCStores != nil { + if true { + buf.WriteString(`"llc_stores":`) + fflib.FormatBits2(buf, uint64(*mj.LLCStores), 10, false) + buf.WriteByte(',') + } + } + if mj.LLCStoreMisses != nil { + if true { + buf.WriteString(`"llc_store_misses":`) + fflib.FormatBits2(buf, uint64(*mj.LLCStoreMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.LLCPrefetches != nil { + if true { + buf.WriteString(`"llc_prefetches":`) + fflib.FormatBits2(buf, uint64(*mj.LLCPrefetches), 10, false) + buf.WriteByte(',') + } + } + if mj.LLCPrefetchMisses != nil { + if true { + buf.WriteString(`"llc_prefetch_misses":`) + fflib.FormatBits2(buf, uint64(*mj.LLCPrefetchMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.DTLBLoads != nil { + if true { + buf.WriteString(`"dtlb_loads":`) + fflib.FormatBits2(buf, uint64(*mj.DTLBLoads), 10, false) + buf.WriteByte(',') + } + } + if mj.DTLBLoadMisses != nil { + if true { + buf.WriteString(`"dtlb_load_misses":`) + fflib.FormatBits2(buf, uint64(*mj.DTLBLoadMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.DTLBStores != nil { + if true { + buf.WriteString(`"dtlb_stores":`) + fflib.FormatBits2(buf, uint64(*mj.DTLBStores), 10, false) + buf.WriteByte(',') + } + } + if mj.DTLBStoreMisses != nil { + if true { + buf.WriteString(`"dtlb_store_misses":`) + fflib.FormatBits2(buf, uint64(*mj.DTLBStoreMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.DTLBPrefetches != nil { + if true { + buf.WriteString(`"dtlb_prefetches":`) + fflib.FormatBits2(buf, uint64(*mj.DTLBPrefetches), 10, false) + buf.WriteByte(',') + } + } + if mj.DTLBPrefetchMisses != nil { + if true { + buf.WriteString(`"dtlb_prefetch_misses":`) + fflib.FormatBits2(buf, uint64(*mj.DTLBPrefetchMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.ITLBLoads != nil { + if true { + buf.WriteString(`"itlb_loads":`) + fflib.FormatBits2(buf, uint64(*mj.ITLBLoads), 10, false) + buf.WriteByte(',') + } + } + if mj.ITLBLoadMisses != nil { + if true { + buf.WriteString(`"itlb_load_misses":`) + fflib.FormatBits2(buf, uint64(*mj.ITLBLoadMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.BranchLoads != nil { + if true { + buf.WriteString(`"branch_loads":`) + fflib.FormatBits2(buf, uint64(*mj.BranchLoads), 10, false) + buf.WriteByte(',') + } + } + if mj.BranchLoadMisses != nil { + if true { + buf.WriteString(`"branch_load_misses":`) + fflib.FormatBits2(buf, uint64(*mj.BranchLoadMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.NodeLoads != nil { + if true { + buf.WriteString(`"node_loads":`) + fflib.FormatBits2(buf, uint64(*mj.NodeLoads), 10, false) + buf.WriteByte(',') + } + } + if mj.NodeLoadMisses != nil { + if true { + buf.WriteString(`"node_load_misses":`) + fflib.FormatBits2(buf, uint64(*mj.NodeLoadMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.NodeStores != nil { + if true { + buf.WriteString(`"node_stores":`) + fflib.FormatBits2(buf, uint64(*mj.NodeStores), 10, false) + buf.WriteByte(',') + } + } + if mj.NodeStoreMisses != nil { + if true { + buf.WriteString(`"node_store_misses":`) + fflib.FormatBits2(buf, uint64(*mj.NodeStoreMisses), 10, false) + buf.WriteByte(',') + } + } + if mj.NodePrefetches != nil { + if true { + buf.WriteString(`"node_prefetches":`) + fflib.FormatBits2(buf, uint64(*mj.NodePrefetches), 10, false) + buf.WriteByte(',') + } + } + if mj.NodePrefetchMisses != nil { + if true { + buf.WriteString(`"node_prefetch_misses":`) + fflib.FormatBits2(buf, uint64(*mj.NodePrefetchMisses), 10, false) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_PerfStatisticsbase = iota + ffj_t_PerfStatisticsno_such_key + + ffj_t_PerfStatistics_Timestamp + + ffj_t_PerfStatistics_Duration + + ffj_t_PerfStatistics_Cycles + + ffj_t_PerfStatistics_StalledCyclesFrontend + + ffj_t_PerfStatistics_StalledCyclesBackend + + ffj_t_PerfStatistics_Instructions + + ffj_t_PerfStatistics_CacheReferences + + ffj_t_PerfStatistics_CacheMisses + + ffj_t_PerfStatistics_Branches + + ffj_t_PerfStatistics_BranchMisses + + ffj_t_PerfStatistics_BusCycles + + ffj_t_PerfStatistics_RefCycles + + ffj_t_PerfStatistics_CPUClock + + ffj_t_PerfStatistics_TaskClock + + ffj_t_PerfStatistics_PageFaults + + ffj_t_PerfStatistics_MinorFaults + + ffj_t_PerfStatistics_MajorFaults + + ffj_t_PerfStatistics_ContextSwitches + + ffj_t_PerfStatistics_CPUMigrations + + ffj_t_PerfStatistics_AlignmentFaults + + ffj_t_PerfStatistics_EmulationFaults + + ffj_t_PerfStatistics_L1DcacheLoads + + ffj_t_PerfStatistics_L1DcacheLoadMisses + + ffj_t_PerfStatistics_L1DcacheStores + + ffj_t_PerfStatistics_L1DcacheStoreMisses + + ffj_t_PerfStatistics_L1DcachePrefetches + + ffj_t_PerfStatistics_L1DcachePrefetchMisses + + ffj_t_PerfStatistics_L1IcacheLoads + + ffj_t_PerfStatistics_L1IcacheLoadMisses + + ffj_t_PerfStatistics_L1IcachePrefetches + + ffj_t_PerfStatistics_L1IcachePrefetchMisses + + ffj_t_PerfStatistics_LLCLoads + + ffj_t_PerfStatistics_LLCLoadMisses + + ffj_t_PerfStatistics_LLCStores + + ffj_t_PerfStatistics_LLCStoreMisses + + ffj_t_PerfStatistics_LLCPrefetches + + ffj_t_PerfStatistics_LLCPrefetchMisses + + ffj_t_PerfStatistics_DTLBLoads + + ffj_t_PerfStatistics_DTLBLoadMisses + + ffj_t_PerfStatistics_DTLBStores + + ffj_t_PerfStatistics_DTLBStoreMisses + + ffj_t_PerfStatistics_DTLBPrefetches + + ffj_t_PerfStatistics_DTLBPrefetchMisses + + ffj_t_PerfStatistics_ITLBLoads + + ffj_t_PerfStatistics_ITLBLoadMisses + + ffj_t_PerfStatistics_BranchLoads + + ffj_t_PerfStatistics_BranchLoadMisses + + ffj_t_PerfStatistics_NodeLoads + + ffj_t_PerfStatistics_NodeLoadMisses + + ffj_t_PerfStatistics_NodeStores + + ffj_t_PerfStatistics_NodeStoreMisses + + ffj_t_PerfStatistics_NodePrefetches + + ffj_t_PerfStatistics_NodePrefetchMisses +) + +var ffj_key_PerfStatistics_Timestamp = []byte("timestamp") + +var ffj_key_PerfStatistics_Duration = []byte("duration") + +var ffj_key_PerfStatistics_Cycles = []byte("cycles") + +var ffj_key_PerfStatistics_StalledCyclesFrontend = []byte("stalled_cycles_frontend") + +var ffj_key_PerfStatistics_StalledCyclesBackend = []byte("stalled_cycles_backend") + +var ffj_key_PerfStatistics_Instructions = []byte("instructions") + +var ffj_key_PerfStatistics_CacheReferences = []byte("cache_references") + +var ffj_key_PerfStatistics_CacheMisses = []byte("cache_misses") + +var ffj_key_PerfStatistics_Branches = []byte("branches") + +var ffj_key_PerfStatistics_BranchMisses = []byte("branch_misses") + +var ffj_key_PerfStatistics_BusCycles = []byte("bus_cycles") + +var ffj_key_PerfStatistics_RefCycles = []byte("ref_cycles") + +var ffj_key_PerfStatistics_CPUClock = []byte("cpu_clock") + +var ffj_key_PerfStatistics_TaskClock = []byte("task_clock") + +var ffj_key_PerfStatistics_PageFaults = []byte("page_faults") + +var ffj_key_PerfStatistics_MinorFaults = []byte("minor_faults") + +var ffj_key_PerfStatistics_MajorFaults = []byte("major_faults") + +var ffj_key_PerfStatistics_ContextSwitches = []byte("context_switches") + +var ffj_key_PerfStatistics_CPUMigrations = []byte("cpu_migrations") + +var ffj_key_PerfStatistics_AlignmentFaults = []byte("alignment_faults") + +var ffj_key_PerfStatistics_EmulationFaults = []byte("emulation_faults") + +var ffj_key_PerfStatistics_L1DcacheLoads = []byte("l1_dcache_loads") + +var ffj_key_PerfStatistics_L1DcacheLoadMisses = []byte("l1_dcache_load_misses") + +var ffj_key_PerfStatistics_L1DcacheStores = []byte("l1_dcache_stores") + +var ffj_key_PerfStatistics_L1DcacheStoreMisses = []byte("l1_dcache_store_misses") + +var ffj_key_PerfStatistics_L1DcachePrefetches = []byte("l1_dcache_prefetches") + +var ffj_key_PerfStatistics_L1DcachePrefetchMisses = []byte("l1_dcache_prefetch_misses") + +var ffj_key_PerfStatistics_L1IcacheLoads = []byte("l1_icache_loads") + +var ffj_key_PerfStatistics_L1IcacheLoadMisses = []byte("l1_icache_load_misses") + +var ffj_key_PerfStatistics_L1IcachePrefetches = []byte("l1_icache_prefetches") + +var ffj_key_PerfStatistics_L1IcachePrefetchMisses = []byte("l1_icache_prefetch_misses") + +var ffj_key_PerfStatistics_LLCLoads = []byte("llc_loads") + +var ffj_key_PerfStatistics_LLCLoadMisses = []byte("llc_load_misses") + +var ffj_key_PerfStatistics_LLCStores = []byte("llc_stores") + +var ffj_key_PerfStatistics_LLCStoreMisses = []byte("llc_store_misses") + +var ffj_key_PerfStatistics_LLCPrefetches = []byte("llc_prefetches") + +var ffj_key_PerfStatistics_LLCPrefetchMisses = []byte("llc_prefetch_misses") + +var ffj_key_PerfStatistics_DTLBLoads = []byte("dtlb_loads") + +var ffj_key_PerfStatistics_DTLBLoadMisses = []byte("dtlb_load_misses") + +var ffj_key_PerfStatistics_DTLBStores = []byte("dtlb_stores") + +var ffj_key_PerfStatistics_DTLBStoreMisses = []byte("dtlb_store_misses") + +var ffj_key_PerfStatistics_DTLBPrefetches = []byte("dtlb_prefetches") + +var ffj_key_PerfStatistics_DTLBPrefetchMisses = []byte("dtlb_prefetch_misses") + +var ffj_key_PerfStatistics_ITLBLoads = []byte("itlb_loads") + +var ffj_key_PerfStatistics_ITLBLoadMisses = []byte("itlb_load_misses") + +var ffj_key_PerfStatistics_BranchLoads = []byte("branch_loads") + +var ffj_key_PerfStatistics_BranchLoadMisses = []byte("branch_load_misses") + +var ffj_key_PerfStatistics_NodeLoads = []byte("node_loads") + +var ffj_key_PerfStatistics_NodeLoadMisses = []byte("node_load_misses") + +var ffj_key_PerfStatistics_NodeStores = []byte("node_stores") + +var ffj_key_PerfStatistics_NodeStoreMisses = []byte("node_store_misses") + +var ffj_key_PerfStatistics_NodePrefetches = []byte("node_prefetches") + +var ffj_key_PerfStatistics_NodePrefetchMisses = []byte("node_prefetch_misses") + +func (uj *PerfStatistics) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *PerfStatistics) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_PerfStatisticsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_PerfStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_PerfStatistics_AlignmentFaults, kn) { + currentKey = ffj_t_PerfStatistics_AlignmentFaults + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'b': + + if bytes.Equal(ffj_key_PerfStatistics_Branches, kn) { + currentKey = ffj_t_PerfStatistics_Branches + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_BranchMisses, kn) { + currentKey = ffj_t_PerfStatistics_BranchMisses + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_BusCycles, kn) { + currentKey = ffj_t_PerfStatistics_BusCycles + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_BranchLoads, kn) { + currentKey = ffj_t_PerfStatistics_BranchLoads + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_BranchLoadMisses, kn) { + currentKey = ffj_t_PerfStatistics_BranchLoadMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'c': + + if bytes.Equal(ffj_key_PerfStatistics_Cycles, kn) { + currentKey = ffj_t_PerfStatistics_Cycles + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_CacheReferences, kn) { + currentKey = ffj_t_PerfStatistics_CacheReferences + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_CacheMisses, kn) { + currentKey = ffj_t_PerfStatistics_CacheMisses + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_CPUClock, kn) { + currentKey = ffj_t_PerfStatistics_CPUClock + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_ContextSwitches, kn) { + currentKey = ffj_t_PerfStatistics_ContextSwitches + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_CPUMigrations, kn) { + currentKey = ffj_t_PerfStatistics_CPUMigrations + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_PerfStatistics_Duration, kn) { + currentKey = ffj_t_PerfStatistics_Duration + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_DTLBLoads, kn) { + currentKey = ffj_t_PerfStatistics_DTLBLoads + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_DTLBLoadMisses, kn) { + currentKey = ffj_t_PerfStatistics_DTLBLoadMisses + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_DTLBStores, kn) { + currentKey = ffj_t_PerfStatistics_DTLBStores + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_DTLBStoreMisses, kn) { + currentKey = ffj_t_PerfStatistics_DTLBStoreMisses + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_DTLBPrefetches, kn) { + currentKey = ffj_t_PerfStatistics_DTLBPrefetches + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_DTLBPrefetchMisses, kn) { + currentKey = ffj_t_PerfStatistics_DTLBPrefetchMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_PerfStatistics_EmulationFaults, kn) { + currentKey = ffj_t_PerfStatistics_EmulationFaults + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_PerfStatistics_Instructions, kn) { + currentKey = ffj_t_PerfStatistics_Instructions + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_ITLBLoads, kn) { + currentKey = ffj_t_PerfStatistics_ITLBLoads + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_ITLBLoadMisses, kn) { + currentKey = ffj_t_PerfStatistics_ITLBLoadMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_PerfStatistics_L1DcacheLoads, kn) { + currentKey = ffj_t_PerfStatistics_L1DcacheLoads + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_L1DcacheLoadMisses, kn) { + currentKey = ffj_t_PerfStatistics_L1DcacheLoadMisses + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_L1DcacheStores, kn) { + currentKey = ffj_t_PerfStatistics_L1DcacheStores + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_L1DcacheStoreMisses, kn) { + currentKey = ffj_t_PerfStatistics_L1DcacheStoreMisses + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_L1DcachePrefetches, kn) { + currentKey = ffj_t_PerfStatistics_L1DcachePrefetches + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_L1DcachePrefetchMisses, kn) { + currentKey = ffj_t_PerfStatistics_L1DcachePrefetchMisses + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_L1IcacheLoads, kn) { + currentKey = ffj_t_PerfStatistics_L1IcacheLoads + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_L1IcacheLoadMisses, kn) { + currentKey = ffj_t_PerfStatistics_L1IcacheLoadMisses + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_L1IcachePrefetches, kn) { + currentKey = ffj_t_PerfStatistics_L1IcachePrefetches + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_L1IcachePrefetchMisses, kn) { + currentKey = ffj_t_PerfStatistics_L1IcachePrefetchMisses + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_LLCLoads, kn) { + currentKey = ffj_t_PerfStatistics_LLCLoads + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_LLCLoadMisses, kn) { + currentKey = ffj_t_PerfStatistics_LLCLoadMisses + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_LLCStores, kn) { + currentKey = ffj_t_PerfStatistics_LLCStores + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_LLCStoreMisses, kn) { + currentKey = ffj_t_PerfStatistics_LLCStoreMisses + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_LLCPrefetches, kn) { + currentKey = ffj_t_PerfStatistics_LLCPrefetches + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_LLCPrefetchMisses, kn) { + currentKey = ffj_t_PerfStatistics_LLCPrefetchMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffj_key_PerfStatistics_MinorFaults, kn) { + currentKey = ffj_t_PerfStatistics_MinorFaults + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_MajorFaults, kn) { + currentKey = ffj_t_PerfStatistics_MajorFaults + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_PerfStatistics_NodeLoads, kn) { + currentKey = ffj_t_PerfStatistics_NodeLoads + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_NodeLoadMisses, kn) { + currentKey = ffj_t_PerfStatistics_NodeLoadMisses + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_NodeStores, kn) { + currentKey = ffj_t_PerfStatistics_NodeStores + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_NodeStoreMisses, kn) { + currentKey = ffj_t_PerfStatistics_NodeStoreMisses + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_NodePrefetches, kn) { + currentKey = ffj_t_PerfStatistics_NodePrefetches + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_NodePrefetchMisses, kn) { + currentKey = ffj_t_PerfStatistics_NodePrefetchMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_PerfStatistics_PageFaults, kn) { + currentKey = ffj_t_PerfStatistics_PageFaults + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_PerfStatistics_RefCycles, kn) { + currentKey = ffj_t_PerfStatistics_RefCycles + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_PerfStatistics_StalledCyclesFrontend, kn) { + currentKey = ffj_t_PerfStatistics_StalledCyclesFrontend + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_StalledCyclesBackend, kn) { + currentKey = ffj_t_PerfStatistics_StalledCyclesBackend + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_PerfStatistics_Timestamp, kn) { + currentKey = ffj_t_PerfStatistics_Timestamp + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_PerfStatistics_TaskClock, kn) { + currentKey = ffj_t_PerfStatistics_TaskClock + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_NodePrefetchMisses, kn) { + currentKey = ffj_t_PerfStatistics_NodePrefetchMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_NodePrefetches, kn) { + currentKey = ffj_t_PerfStatistics_NodePrefetches + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_NodeStoreMisses, kn) { + currentKey = ffj_t_PerfStatistics_NodeStoreMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_NodeStores, kn) { + currentKey = ffj_t_PerfStatistics_NodeStores + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_NodeLoadMisses, kn) { + currentKey = ffj_t_PerfStatistics_NodeLoadMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_NodeLoads, kn) { + currentKey = ffj_t_PerfStatistics_NodeLoads + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_BranchLoadMisses, kn) { + currentKey = ffj_t_PerfStatistics_BranchLoadMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_BranchLoads, kn) { + currentKey = ffj_t_PerfStatistics_BranchLoads + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_ITLBLoadMisses, kn) { + currentKey = ffj_t_PerfStatistics_ITLBLoadMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_ITLBLoads, kn) { + currentKey = ffj_t_PerfStatistics_ITLBLoads + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_DTLBPrefetchMisses, kn) { + currentKey = ffj_t_PerfStatistics_DTLBPrefetchMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_DTLBPrefetches, kn) { + currentKey = ffj_t_PerfStatistics_DTLBPrefetches + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_DTLBStoreMisses, kn) { + currentKey = ffj_t_PerfStatistics_DTLBStoreMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_DTLBStores, kn) { + currentKey = ffj_t_PerfStatistics_DTLBStores + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_DTLBLoadMisses, kn) { + currentKey = ffj_t_PerfStatistics_DTLBLoadMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_DTLBLoads, kn) { + currentKey = ffj_t_PerfStatistics_DTLBLoads + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_LLCPrefetchMisses, kn) { + currentKey = ffj_t_PerfStatistics_LLCPrefetchMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_LLCPrefetches, kn) { + currentKey = ffj_t_PerfStatistics_LLCPrefetches + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_LLCStoreMisses, kn) { + currentKey = ffj_t_PerfStatistics_LLCStoreMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_LLCStores, kn) { + currentKey = ffj_t_PerfStatistics_LLCStores + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_LLCLoadMisses, kn) { + currentKey = ffj_t_PerfStatistics_LLCLoadMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_LLCLoads, kn) { + currentKey = ffj_t_PerfStatistics_LLCLoads + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_L1IcachePrefetchMisses, kn) { + currentKey = ffj_t_PerfStatistics_L1IcachePrefetchMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_L1IcachePrefetches, kn) { + currentKey = ffj_t_PerfStatistics_L1IcachePrefetches + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_L1IcacheLoadMisses, kn) { + currentKey = ffj_t_PerfStatistics_L1IcacheLoadMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_L1IcacheLoads, kn) { + currentKey = ffj_t_PerfStatistics_L1IcacheLoads + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_L1DcachePrefetchMisses, kn) { + currentKey = ffj_t_PerfStatistics_L1DcachePrefetchMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_L1DcachePrefetches, kn) { + currentKey = ffj_t_PerfStatistics_L1DcachePrefetches + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_L1DcacheStoreMisses, kn) { + currentKey = ffj_t_PerfStatistics_L1DcacheStoreMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_L1DcacheStores, kn) { + currentKey = ffj_t_PerfStatistics_L1DcacheStores + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_L1DcacheLoadMisses, kn) { + currentKey = ffj_t_PerfStatistics_L1DcacheLoadMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_L1DcacheLoads, kn) { + currentKey = ffj_t_PerfStatistics_L1DcacheLoads + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_EmulationFaults, kn) { + currentKey = ffj_t_PerfStatistics_EmulationFaults + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_AlignmentFaults, kn) { + currentKey = ffj_t_PerfStatistics_AlignmentFaults + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_CPUMigrations, kn) { + currentKey = ffj_t_PerfStatistics_CPUMigrations + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_ContextSwitches, kn) { + currentKey = ffj_t_PerfStatistics_ContextSwitches + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_MajorFaults, kn) { + currentKey = ffj_t_PerfStatistics_MajorFaults + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_MinorFaults, kn) { + currentKey = ffj_t_PerfStatistics_MinorFaults + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_PageFaults, kn) { + currentKey = ffj_t_PerfStatistics_PageFaults + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_TaskClock, kn) { + currentKey = ffj_t_PerfStatistics_TaskClock + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_CPUClock, kn) { + currentKey = ffj_t_PerfStatistics_CPUClock + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_RefCycles, kn) { + currentKey = ffj_t_PerfStatistics_RefCycles + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_BusCycles, kn) { + currentKey = ffj_t_PerfStatistics_BusCycles + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_BranchMisses, kn) { + currentKey = ffj_t_PerfStatistics_BranchMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_Branches, kn) { + currentKey = ffj_t_PerfStatistics_Branches + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_CacheMisses, kn) { + currentKey = ffj_t_PerfStatistics_CacheMisses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_CacheReferences, kn) { + currentKey = ffj_t_PerfStatistics_CacheReferences + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_Instructions, kn) { + currentKey = ffj_t_PerfStatistics_Instructions + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_StalledCyclesBackend, kn) { + currentKey = ffj_t_PerfStatistics_StalledCyclesBackend + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_StalledCyclesFrontend, kn) { + currentKey = ffj_t_PerfStatistics_StalledCyclesFrontend + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_Cycles, kn) { + currentKey = ffj_t_PerfStatistics_Cycles + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_PerfStatistics_Duration, kn) { + currentKey = ffj_t_PerfStatistics_Duration + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_PerfStatistics_Timestamp, kn) { + currentKey = ffj_t_PerfStatistics_Timestamp + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_PerfStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_PerfStatistics_Timestamp: + goto handle_Timestamp + + case ffj_t_PerfStatistics_Duration: + goto handle_Duration + + case ffj_t_PerfStatistics_Cycles: + goto handle_Cycles + + case ffj_t_PerfStatistics_StalledCyclesFrontend: + goto handle_StalledCyclesFrontend + + case ffj_t_PerfStatistics_StalledCyclesBackend: + goto handle_StalledCyclesBackend + + case ffj_t_PerfStatistics_Instructions: + goto handle_Instructions + + case ffj_t_PerfStatistics_CacheReferences: + goto handle_CacheReferences + + case ffj_t_PerfStatistics_CacheMisses: + goto handle_CacheMisses + + case ffj_t_PerfStatistics_Branches: + goto handle_Branches + + case ffj_t_PerfStatistics_BranchMisses: + goto handle_BranchMisses + + case ffj_t_PerfStatistics_BusCycles: + goto handle_BusCycles + + case ffj_t_PerfStatistics_RefCycles: + goto handle_RefCycles + + case ffj_t_PerfStatistics_CPUClock: + goto handle_CPUClock + + case ffj_t_PerfStatistics_TaskClock: + goto handle_TaskClock + + case ffj_t_PerfStatistics_PageFaults: + goto handle_PageFaults + + case ffj_t_PerfStatistics_MinorFaults: + goto handle_MinorFaults + + case ffj_t_PerfStatistics_MajorFaults: + goto handle_MajorFaults + + case ffj_t_PerfStatistics_ContextSwitches: + goto handle_ContextSwitches + + case ffj_t_PerfStatistics_CPUMigrations: + goto handle_CPUMigrations + + case ffj_t_PerfStatistics_AlignmentFaults: + goto handle_AlignmentFaults + + case ffj_t_PerfStatistics_EmulationFaults: + goto handle_EmulationFaults + + case ffj_t_PerfStatistics_L1DcacheLoads: + goto handle_L1DcacheLoads + + case ffj_t_PerfStatistics_L1DcacheLoadMisses: + goto handle_L1DcacheLoadMisses + + case ffj_t_PerfStatistics_L1DcacheStores: + goto handle_L1DcacheStores + + case ffj_t_PerfStatistics_L1DcacheStoreMisses: + goto handle_L1DcacheStoreMisses + + case ffj_t_PerfStatistics_L1DcachePrefetches: + goto handle_L1DcachePrefetches + + case ffj_t_PerfStatistics_L1DcachePrefetchMisses: + goto handle_L1DcachePrefetchMisses + + case ffj_t_PerfStatistics_L1IcacheLoads: + goto handle_L1IcacheLoads + + case ffj_t_PerfStatistics_L1IcacheLoadMisses: + goto handle_L1IcacheLoadMisses + + case ffj_t_PerfStatistics_L1IcachePrefetches: + goto handle_L1IcachePrefetches + + case ffj_t_PerfStatistics_L1IcachePrefetchMisses: + goto handle_L1IcachePrefetchMisses + + case ffj_t_PerfStatistics_LLCLoads: + goto handle_LLCLoads + + case ffj_t_PerfStatistics_LLCLoadMisses: + goto handle_LLCLoadMisses + + case ffj_t_PerfStatistics_LLCStores: + goto handle_LLCStores + + case ffj_t_PerfStatistics_LLCStoreMisses: + goto handle_LLCStoreMisses + + case ffj_t_PerfStatistics_LLCPrefetches: + goto handle_LLCPrefetches + + case ffj_t_PerfStatistics_LLCPrefetchMisses: + goto handle_LLCPrefetchMisses + + case ffj_t_PerfStatistics_DTLBLoads: + goto handle_DTLBLoads + + case ffj_t_PerfStatistics_DTLBLoadMisses: + goto handle_DTLBLoadMisses + + case ffj_t_PerfStatistics_DTLBStores: + goto handle_DTLBStores + + case ffj_t_PerfStatistics_DTLBStoreMisses: + goto handle_DTLBStoreMisses + + case ffj_t_PerfStatistics_DTLBPrefetches: + goto handle_DTLBPrefetches + + case ffj_t_PerfStatistics_DTLBPrefetchMisses: + goto handle_DTLBPrefetchMisses + + case ffj_t_PerfStatistics_ITLBLoads: + goto handle_ITLBLoads + + case ffj_t_PerfStatistics_ITLBLoadMisses: + goto handle_ITLBLoadMisses + + case ffj_t_PerfStatistics_BranchLoads: + goto handle_BranchLoads + + case ffj_t_PerfStatistics_BranchLoadMisses: + goto handle_BranchLoadMisses + + case ffj_t_PerfStatistics_NodeLoads: + goto handle_NodeLoads + + case ffj_t_PerfStatistics_NodeLoadMisses: + goto handle_NodeLoadMisses + + case ffj_t_PerfStatistics_NodeStores: + goto handle_NodeStores + + case ffj_t_PerfStatistics_NodeStoreMisses: + goto handle_NodeStoreMisses + + case ffj_t_PerfStatistics_NodePrefetches: + goto handle_NodePrefetches + + case ffj_t_PerfStatistics_NodePrefetchMisses: + goto handle_NodePrefetchMisses + + case ffj_t_PerfStatisticsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Timestamp: + + /* handler: uj.Timestamp type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Timestamp = float64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Duration: + + /* handler: uj.Duration type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Duration = float64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Cycles: + + /* handler: uj.Cycles type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Cycles = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Cycles = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_StalledCyclesFrontend: + + /* handler: uj.StalledCyclesFrontend type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.StalledCyclesFrontend = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.StalledCyclesFrontend = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_StalledCyclesBackend: + + /* handler: uj.StalledCyclesBackend type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.StalledCyclesBackend = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.StalledCyclesBackend = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Instructions: + + /* handler: uj.Instructions type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Instructions = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Instructions = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CacheReferences: + + /* handler: uj.CacheReferences type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.CacheReferences = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.CacheReferences = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CacheMisses: + + /* handler: uj.CacheMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.CacheMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.CacheMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Branches: + + /* handler: uj.Branches type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Branches = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Branches = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BranchMisses: + + /* handler: uj.BranchMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.BranchMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.BranchMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BusCycles: + + /* handler: uj.BusCycles type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.BusCycles = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.BusCycles = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_RefCycles: + + /* handler: uj.RefCycles type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.RefCycles = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.RefCycles = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CPUClock: + + /* handler: uj.CPUClock type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.CPUClock = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.CPUClock = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TaskClock: + + /* handler: uj.TaskClock type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.TaskClock = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.TaskClock = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_PageFaults: + + /* handler: uj.PageFaults type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.PageFaults = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.PageFaults = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MinorFaults: + + /* handler: uj.MinorFaults type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MinorFaults = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MinorFaults = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MajorFaults: + + /* handler: uj.MajorFaults type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MajorFaults = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MajorFaults = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ContextSwitches: + + /* handler: uj.ContextSwitches type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.ContextSwitches = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.ContextSwitches = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CPUMigrations: + + /* handler: uj.CPUMigrations type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.CPUMigrations = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.CPUMigrations = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_AlignmentFaults: + + /* handler: uj.AlignmentFaults type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.AlignmentFaults = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.AlignmentFaults = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_EmulationFaults: + + /* handler: uj.EmulationFaults type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.EmulationFaults = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.EmulationFaults = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_L1DcacheLoads: + + /* handler: uj.L1DcacheLoads type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.L1DcacheLoads = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.L1DcacheLoads = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_L1DcacheLoadMisses: + + /* handler: uj.L1DcacheLoadMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.L1DcacheLoadMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.L1DcacheLoadMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_L1DcacheStores: + + /* handler: uj.L1DcacheStores type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.L1DcacheStores = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.L1DcacheStores = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_L1DcacheStoreMisses: + + /* handler: uj.L1DcacheStoreMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.L1DcacheStoreMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.L1DcacheStoreMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_L1DcachePrefetches: + + /* handler: uj.L1DcachePrefetches type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.L1DcachePrefetches = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.L1DcachePrefetches = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_L1DcachePrefetchMisses: + + /* handler: uj.L1DcachePrefetchMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.L1DcachePrefetchMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.L1DcachePrefetchMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_L1IcacheLoads: + + /* handler: uj.L1IcacheLoads type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.L1IcacheLoads = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.L1IcacheLoads = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_L1IcacheLoadMisses: + + /* handler: uj.L1IcacheLoadMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.L1IcacheLoadMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.L1IcacheLoadMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_L1IcachePrefetches: + + /* handler: uj.L1IcachePrefetches type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.L1IcachePrefetches = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.L1IcachePrefetches = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_L1IcachePrefetchMisses: + + /* handler: uj.L1IcachePrefetchMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.L1IcachePrefetchMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.L1IcachePrefetchMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LLCLoads: + + /* handler: uj.LLCLoads type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.LLCLoads = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.LLCLoads = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LLCLoadMisses: + + /* handler: uj.LLCLoadMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.LLCLoadMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.LLCLoadMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LLCStores: + + /* handler: uj.LLCStores type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.LLCStores = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.LLCStores = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LLCStoreMisses: + + /* handler: uj.LLCStoreMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.LLCStoreMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.LLCStoreMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LLCPrefetches: + + /* handler: uj.LLCPrefetches type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.LLCPrefetches = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.LLCPrefetches = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_LLCPrefetchMisses: + + /* handler: uj.LLCPrefetchMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.LLCPrefetchMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.LLCPrefetchMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DTLBLoads: + + /* handler: uj.DTLBLoads type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.DTLBLoads = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.DTLBLoads = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DTLBLoadMisses: + + /* handler: uj.DTLBLoadMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.DTLBLoadMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.DTLBLoadMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DTLBStores: + + /* handler: uj.DTLBStores type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.DTLBStores = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.DTLBStores = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DTLBStoreMisses: + + /* handler: uj.DTLBStoreMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.DTLBStoreMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.DTLBStoreMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DTLBPrefetches: + + /* handler: uj.DTLBPrefetches type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.DTLBPrefetches = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.DTLBPrefetches = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DTLBPrefetchMisses: + + /* handler: uj.DTLBPrefetchMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.DTLBPrefetchMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.DTLBPrefetchMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ITLBLoads: + + /* handler: uj.ITLBLoads type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.ITLBLoads = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.ITLBLoads = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ITLBLoadMisses: + + /* handler: uj.ITLBLoadMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.ITLBLoadMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.ITLBLoadMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BranchLoads: + + /* handler: uj.BranchLoads type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.BranchLoads = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.BranchLoads = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BranchLoadMisses: + + /* handler: uj.BranchLoadMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.BranchLoadMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.BranchLoadMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NodeLoads: + + /* handler: uj.NodeLoads type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NodeLoads = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.NodeLoads = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NodeLoadMisses: + + /* handler: uj.NodeLoadMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NodeLoadMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.NodeLoadMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NodeStores: + + /* handler: uj.NodeStores type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NodeStores = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.NodeStores = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NodeStoreMisses: + + /* handler: uj.NodeStoreMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NodeStoreMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.NodeStoreMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NodePrefetches: + + /* handler: uj.NodePrefetches type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NodePrefetches = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.NodePrefetches = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NodePrefetchMisses: + + /* handler: uj.NodePrefetchMisses type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NodePrefetchMisses = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.NodePrefetchMisses = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Port) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Port) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "number":`) + fflib.FormatBits2(buf, uint64(mj.Number), 10, false) + buf.WriteByte(',') + if mj.Name != nil { + if true { + buf.WriteString(`"name":`) + fflib.WriteJsonString(buf, string(*mj.Name)) + buf.WriteByte(',') + } + } + if mj.Protocol != nil { + if true { + buf.WriteString(`"protocol":`) + fflib.WriteJsonString(buf, string(*mj.Protocol)) + buf.WriteByte(',') + } + } + if mj.Visibility != nil { + if true { + buf.WriteString(`"visibility":`) + + { + + obj, err = mj.Visibility.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.Labels != nil { + if true { + buf.WriteString(`"labels":`) + + { + + err = mj.Labels.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Portbase = iota + ffj_t_Portno_such_key + + ffj_t_Port_Number + + ffj_t_Port_Name + + ffj_t_Port_Protocol + + ffj_t_Port_Visibility + + ffj_t_Port_Labels +) + +var ffj_key_Port_Number = []byte("number") + +var ffj_key_Port_Name = []byte("name") + +var ffj_key_Port_Protocol = []byte("protocol") + +var ffj_key_Port_Visibility = []byte("visibility") + +var ffj_key_Port_Labels = []byte("labels") + +func (uj *Port) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Port) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Portbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Portno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'l': + + if bytes.Equal(ffj_key_Port_Labels, kn) { + currentKey = ffj_t_Port_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_Port_Number, kn) { + currentKey = ffj_t_Port_Number + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Port_Name, kn) { + currentKey = ffj_t_Port_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_Port_Protocol, kn) { + currentKey = ffj_t_Port_Protocol + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_Port_Visibility, kn) { + currentKey = ffj_t_Port_Visibility + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Port_Labels, kn) { + currentKey = ffj_t_Port_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Port_Visibility, kn) { + currentKey = ffj_t_Port_Visibility + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Port_Protocol, kn) { + currentKey = ffj_t_Port_Protocol + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Port_Name, kn) { + currentKey = ffj_t_Port_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Port_Number, kn) { + currentKey = ffj_t_Port_Number + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Portno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Port_Number: + goto handle_Number + + case ffj_t_Port_Name: + goto handle_Name + + case ffj_t_Port_Protocol: + goto handle_Protocol + + case ffj_t_Port_Visibility: + goto handle_Visibility + + case ffj_t_Port_Labels: + goto handle_Labels + + case ffj_t_Portno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Number: + + /* handler: uj.Number type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Number = uint32(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Name = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Name = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Protocol: + + /* handler: uj.Protocol type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Protocol = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Protocol = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Visibility: + + /* handler: uj.Visibility type=mesos.DiscoveryInfo_Visibility kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Visibility = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Visibility == nil { + uj.Visibility = new(DiscoveryInfo_Visibility) + } + + err = uj.Visibility.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Labels: + + /* handler: uj.Labels type=mesos.Labels kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Labels = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Labels == nil { + uj.Labels = new(Labels) + } + + err = uj.Labels.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Ports) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Ports) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"ports":`) + if mj.Ports != nil { + buf.WriteString(`[`) + for i, v := range mj.Ports { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Portsbase = iota + ffj_t_Portsno_such_key + + ffj_t_Ports_Ports +) + +var ffj_key_Ports_Ports = []byte("ports") + +func (uj *Ports) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Ports) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Portsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Portsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'p': + + if bytes.Equal(ffj_key_Ports_Ports, kn) { + currentKey = ffj_t_Ports_Ports + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Ports_Ports, kn) { + currentKey = ffj_t_Ports_Ports + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Portsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Ports_Ports: + goto handle_Ports + + case ffj_t_Portsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Ports: + + /* handler: uj.Ports type=[]mesos.Port kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Ports = nil + } else { + + uj.Ports = []Port{} + + wantVal := true + + for { + + var tmp_uj__Ports Port + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Ports type=mesos.Port kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Ports.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Ports = append(uj.Ports, tmp_uj__Ports) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *RLimitInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *RLimitInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"rlimits":`) + if mj.Rlimits != nil { + buf.WriteString(`[`) + for i, v := range mj.Rlimits { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_RLimitInfobase = iota + ffj_t_RLimitInfono_such_key + + ffj_t_RLimitInfo_Rlimits +) + +var ffj_key_RLimitInfo_Rlimits = []byte("rlimits") + +func (uj *RLimitInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *RLimitInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_RLimitInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_RLimitInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'r': + + if bytes.Equal(ffj_key_RLimitInfo_Rlimits, kn) { + currentKey = ffj_t_RLimitInfo_Rlimits + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_RLimitInfo_Rlimits, kn) { + currentKey = ffj_t_RLimitInfo_Rlimits + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_RLimitInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_RLimitInfo_Rlimits: + goto handle_Rlimits + + case ffj_t_RLimitInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Rlimits: + + /* handler: uj.Rlimits type=[]mesos.RLimitInfo_RLimit kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Rlimits = nil + } else { + + uj.Rlimits = []RLimitInfo_RLimit{} + + wantVal := true + + for { + + var tmp_uj__Rlimits RLimitInfo_RLimit + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Rlimits type=mesos.RLimitInfo_RLimit kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Rlimits.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Rlimits = append(uj.Rlimits, tmp_uj__Rlimits) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *RLimitInfo_RLimit) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *RLimitInfo_RLimit) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.Hard != nil { + if true { + buf.WriteString(`"hard":`) + fflib.FormatBits2(buf, uint64(*mj.Hard), 10, false) + buf.WriteByte(',') + } + } + if mj.Soft != nil { + if true { + buf.WriteString(`"soft":`) + fflib.FormatBits2(buf, uint64(*mj.Soft), 10, false) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_RLimitInfo_RLimitbase = iota + ffj_t_RLimitInfo_RLimitno_such_key + + ffj_t_RLimitInfo_RLimit_Type + + ffj_t_RLimitInfo_RLimit_Hard + + ffj_t_RLimitInfo_RLimit_Soft +) + +var ffj_key_RLimitInfo_RLimit_Type = []byte("type") + +var ffj_key_RLimitInfo_RLimit_Hard = []byte("hard") + +var ffj_key_RLimitInfo_RLimit_Soft = []byte("soft") + +func (uj *RLimitInfo_RLimit) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *RLimitInfo_RLimit) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_RLimitInfo_RLimitbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_RLimitInfo_RLimitno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'h': + + if bytes.Equal(ffj_key_RLimitInfo_RLimit_Hard, kn) { + currentKey = ffj_t_RLimitInfo_RLimit_Hard + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_RLimitInfo_RLimit_Soft, kn) { + currentKey = ffj_t_RLimitInfo_RLimit_Soft + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_RLimitInfo_RLimit_Type, kn) { + currentKey = ffj_t_RLimitInfo_RLimit_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_RLimitInfo_RLimit_Soft, kn) { + currentKey = ffj_t_RLimitInfo_RLimit_Soft + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_RLimitInfo_RLimit_Hard, kn) { + currentKey = ffj_t_RLimitInfo_RLimit_Hard + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_RLimitInfo_RLimit_Type, kn) { + currentKey = ffj_t_RLimitInfo_RLimit_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_RLimitInfo_RLimitno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_RLimitInfo_RLimit_Type: + goto handle_Type + + case ffj_t_RLimitInfo_RLimit_Hard: + goto handle_Hard + + case ffj_t_RLimitInfo_RLimit_Soft: + goto handle_Soft + + case ffj_t_RLimitInfo_RLimitno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.RLimitInfo_RLimit_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Hard: + + /* handler: uj.Hard type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Hard = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Hard = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Soft: + + /* handler: uj.Soft type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Soft = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Soft = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *RateLimit) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *RateLimit) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.QPS != nil { + if true { + buf.WriteString(`"qps":`) + fflib.AppendFloat(buf, float64(*mj.QPS), 'g', -1, 64) + buf.WriteByte(',') + } + } + buf.WriteString(`"principal":`) + fflib.WriteJsonString(buf, string(mj.Principal)) + buf.WriteByte(',') + if mj.Capacity != nil { + if true { + buf.WriteString(`"capacity":`) + fflib.FormatBits2(buf, uint64(*mj.Capacity), 10, false) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_RateLimitbase = iota + ffj_t_RateLimitno_such_key + + ffj_t_RateLimit_QPS + + ffj_t_RateLimit_Principal + + ffj_t_RateLimit_Capacity +) + +var ffj_key_RateLimit_QPS = []byte("qps") + +var ffj_key_RateLimit_Principal = []byte("principal") + +var ffj_key_RateLimit_Capacity = []byte("capacity") + +func (uj *RateLimit) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *RateLimit) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_RateLimitbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_RateLimitno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_RateLimit_Capacity, kn) { + currentKey = ffj_t_RateLimit_Capacity + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_RateLimit_Principal, kn) { + currentKey = ffj_t_RateLimit_Principal + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'q': + + if bytes.Equal(ffj_key_RateLimit_QPS, kn) { + currentKey = ffj_t_RateLimit_QPS + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_RateLimit_Capacity, kn) { + currentKey = ffj_t_RateLimit_Capacity + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_RateLimit_Principal, kn) { + currentKey = ffj_t_RateLimit_Principal + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_RateLimit_QPS, kn) { + currentKey = ffj_t_RateLimit_QPS + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_RateLimitno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_RateLimit_QPS: + goto handle_QPS + + case ffj_t_RateLimit_Principal: + goto handle_Principal + + case ffj_t_RateLimit_Capacity: + goto handle_Capacity + + case ffj_t_RateLimitno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_QPS: + + /* handler: uj.QPS type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.QPS = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.QPS = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Principal: + + /* handler: uj.Principal type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Principal = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Capacity: + + /* handler: uj.Capacity type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Capacity = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Capacity = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *RateLimits) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *RateLimits) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "limits":`) + if mj.Limits != nil { + buf.WriteString(`[`) + for i, v := range mj.Limits { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.AggregateDefaultQPS != nil { + if true { + buf.WriteString(`"aggregate_default_qps":`) + fflib.AppendFloat(buf, float64(*mj.AggregateDefaultQPS), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.AggregateDefaultCapacity != nil { + if true { + buf.WriteString(`"aggregate_default_capacity":`) + fflib.FormatBits2(buf, uint64(*mj.AggregateDefaultCapacity), 10, false) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_RateLimitsbase = iota + ffj_t_RateLimitsno_such_key + + ffj_t_RateLimits_Limits + + ffj_t_RateLimits_AggregateDefaultQPS + + ffj_t_RateLimits_AggregateDefaultCapacity +) + +var ffj_key_RateLimits_Limits = []byte("limits") + +var ffj_key_RateLimits_AggregateDefaultQPS = []byte("aggregate_default_qps") + +var ffj_key_RateLimits_AggregateDefaultCapacity = []byte("aggregate_default_capacity") + +func (uj *RateLimits) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *RateLimits) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_RateLimitsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_RateLimitsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_RateLimits_AggregateDefaultQPS, kn) { + currentKey = ffj_t_RateLimits_AggregateDefaultQPS + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_RateLimits_AggregateDefaultCapacity, kn) { + currentKey = ffj_t_RateLimits_AggregateDefaultCapacity + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_RateLimits_Limits, kn) { + currentKey = ffj_t_RateLimits_Limits + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_RateLimits_AggregateDefaultCapacity, kn) { + currentKey = ffj_t_RateLimits_AggregateDefaultCapacity + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_RateLimits_AggregateDefaultQPS, kn) { + currentKey = ffj_t_RateLimits_AggregateDefaultQPS + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_RateLimits_Limits, kn) { + currentKey = ffj_t_RateLimits_Limits + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_RateLimitsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_RateLimits_Limits: + goto handle_Limits + + case ffj_t_RateLimits_AggregateDefaultQPS: + goto handle_AggregateDefaultQPS + + case ffj_t_RateLimits_AggregateDefaultCapacity: + goto handle_AggregateDefaultCapacity + + case ffj_t_RateLimitsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Limits: + + /* handler: uj.Limits type=[]mesos.RateLimit kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Limits = nil + } else { + + uj.Limits = []RateLimit{} + + wantVal := true + + for { + + var tmp_uj__Limits RateLimit + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Limits type=mesos.RateLimit kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Limits.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Limits = append(uj.Limits, tmp_uj__Limits) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_AggregateDefaultQPS: + + /* handler: uj.AggregateDefaultQPS type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.AggregateDefaultQPS = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.AggregateDefaultQPS = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_AggregateDefaultCapacity: + + /* handler: uj.AggregateDefaultCapacity type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.AggregateDefaultCapacity = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.AggregateDefaultCapacity = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Request) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Request) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteByte('{') + if mj.AgentID != nil { + if true { + buf.WriteString(`"agent_id":`) + + { + + err = mj.AgentID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"resources":`) + if mj.Resources != nil { + buf.WriteString(`[`) + for i, v := range mj.Resources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Requestbase = iota + ffj_t_Requestno_such_key + + ffj_t_Request_AgentID + + ffj_t_Request_Resources +) + +var ffj_key_Request_AgentID = []byte("agent_id") + +var ffj_key_Request_Resources = []byte("resources") + +func (uj *Request) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Request) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Requestbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Requestno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_Request_AgentID, kn) { + currentKey = ffj_t_Request_AgentID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_Request_Resources, kn) { + currentKey = ffj_t_Request_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Request_Resources, kn) { + currentKey = ffj_t_Request_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Request_AgentID, kn) { + currentKey = ffj_t_Request_AgentID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Requestno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Request_AgentID: + goto handle_AgentID + + case ffj_t_Request_Resources: + goto handle_Resources + + case ffj_t_Requestno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_AgentID: + + /* handler: uj.AgentID type=mesos.AgentID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.AgentID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.AgentID == nil { + uj.AgentID = new(AgentID) + } + + err = uj.AgentID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Resources: + + /* handler: uj.Resources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Resources = nil + } else { + + uj.Resources = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Resources Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Resources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Resources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Resources = append(uj.Resources, tmp_uj__Resources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Resource) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Resource) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.ProviderID != nil { + if true { + buf.WriteString(`"provider_id":`) + + { + + err = mj.ProviderID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteByte(',') + if mj.Type != nil { + if true { + buf.WriteString(`"type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.Scalar != nil { + if true { + buf.WriteString(`"scalar":`) + + { + + err = mj.Scalar.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Ranges != nil { + if true { + buf.WriteString(`"ranges":`) + + { + + err = mj.Ranges.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Set != nil { + if true { + buf.WriteString(`"set":`) + + { + + err = mj.Set.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Role != nil { + if true { + buf.WriteString(`"role":`) + fflib.WriteJsonString(buf, string(*mj.Role)) + buf.WriteByte(',') + } + } + if mj.AllocationInfo != nil { + if true { + buf.WriteString(`"allocation_info":`) + + { + + err = mj.AllocationInfo.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Reservation != nil { + if true { + buf.WriteString(`"reservation":`) + + { + + err = mj.Reservation.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"reservations":`) + if mj.Reservations != nil { + buf.WriteString(`[`) + for i, v := range mj.Reservations { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.Disk != nil { + if true { + buf.WriteString(`"disk":`) + + { + + err = mj.Disk.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Revocable != nil { + if true { + buf.WriteString(`"revocable":`) + + { + + err = mj.Revocable.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Shared != nil { + if true { + buf.WriteString(`"shared":`) + + { + + err = mj.Shared.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Resourcebase = iota + ffj_t_Resourceno_such_key + + ffj_t_Resource_ProviderID + + ffj_t_Resource_Name + + ffj_t_Resource_Type + + ffj_t_Resource_Scalar + + ffj_t_Resource_Ranges + + ffj_t_Resource_Set + + ffj_t_Resource_Role + + ffj_t_Resource_AllocationInfo + + ffj_t_Resource_Reservation + + ffj_t_Resource_Reservations + + ffj_t_Resource_Disk + + ffj_t_Resource_Revocable + + ffj_t_Resource_Shared +) + +var ffj_key_Resource_ProviderID = []byte("provider_id") + +var ffj_key_Resource_Name = []byte("name") + +var ffj_key_Resource_Type = []byte("type") + +var ffj_key_Resource_Scalar = []byte("scalar") + +var ffj_key_Resource_Ranges = []byte("ranges") + +var ffj_key_Resource_Set = []byte("set") + +var ffj_key_Resource_Role = []byte("role") + +var ffj_key_Resource_AllocationInfo = []byte("allocation_info") + +var ffj_key_Resource_Reservation = []byte("reservation") + +var ffj_key_Resource_Reservations = []byte("reservations") + +var ffj_key_Resource_Disk = []byte("disk") + +var ffj_key_Resource_Revocable = []byte("revocable") + +var ffj_key_Resource_Shared = []byte("shared") + +func (uj *Resource) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Resource) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Resourcebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Resourceno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_Resource_AllocationInfo, kn) { + currentKey = ffj_t_Resource_AllocationInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_Resource_Disk, kn) { + currentKey = ffj_t_Resource_Disk + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_Resource_Name, kn) { + currentKey = ffj_t_Resource_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_Resource_ProviderID, kn) { + currentKey = ffj_t_Resource_ProviderID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_Resource_Ranges, kn) { + currentKey = ffj_t_Resource_Ranges + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Resource_Role, kn) { + currentKey = ffj_t_Resource_Role + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Resource_Reservation, kn) { + currentKey = ffj_t_Resource_Reservation + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Resource_Reservations, kn) { + currentKey = ffj_t_Resource_Reservations + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Resource_Revocable, kn) { + currentKey = ffj_t_Resource_Revocable + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Resource_Scalar, kn) { + currentKey = ffj_t_Resource_Scalar + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Resource_Set, kn) { + currentKey = ffj_t_Resource_Set + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Resource_Shared, kn) { + currentKey = ffj_t_Resource_Shared + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Resource_Type, kn) { + currentKey = ffj_t_Resource_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Resource_Shared, kn) { + currentKey = ffj_t_Resource_Shared + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_Revocable, kn) { + currentKey = ffj_t_Resource_Revocable + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Resource_Disk, kn) { + currentKey = ffj_t_Resource_Disk + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Resource_Reservations, kn) { + currentKey = ffj_t_Resource_Reservations + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Resource_Reservation, kn) { + currentKey = ffj_t_Resource_Reservation + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Resource_AllocationInfo, kn) { + currentKey = ffj_t_Resource_AllocationInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_Role, kn) { + currentKey = ffj_t_Resource_Role + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Resource_Set, kn) { + currentKey = ffj_t_Resource_Set + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Resource_Ranges, kn) { + currentKey = ffj_t_Resource_Ranges + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Resource_Scalar, kn) { + currentKey = ffj_t_Resource_Scalar + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_Type, kn) { + currentKey = ffj_t_Resource_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_Name, kn) { + currentKey = ffj_t_Resource_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Resource_ProviderID, kn) { + currentKey = ffj_t_Resource_ProviderID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Resourceno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Resource_ProviderID: + goto handle_ProviderID + + case ffj_t_Resource_Name: + goto handle_Name + + case ffj_t_Resource_Type: + goto handle_Type + + case ffj_t_Resource_Scalar: + goto handle_Scalar + + case ffj_t_Resource_Ranges: + goto handle_Ranges + + case ffj_t_Resource_Set: + goto handle_Set + + case ffj_t_Resource_Role: + goto handle_Role + + case ffj_t_Resource_AllocationInfo: + goto handle_AllocationInfo + + case ffj_t_Resource_Reservation: + goto handle_Reservation + + case ffj_t_Resource_Reservations: + goto handle_Reservations + + case ffj_t_Resource_Disk: + goto handle_Disk + + case ffj_t_Resource_Revocable: + goto handle_Revocable + + case ffj_t_Resource_Shared: + goto handle_Shared + + case ffj_t_Resourceno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ProviderID: + + /* handler: uj.ProviderID type=mesos.ResourceProviderID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ProviderID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ProviderID == nil { + uj.ProviderID = new(ResourceProviderID) + } + + err = uj.ProviderID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Type: + + /* handler: uj.Type type=mesos.Value_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Type = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Type == nil { + uj.Type = new(Value_Type) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Scalar: + + /* handler: uj.Scalar type=mesos.Value_Scalar kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Scalar = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Scalar == nil { + uj.Scalar = new(Value_Scalar) + } + + err = uj.Scalar.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Ranges: + + /* handler: uj.Ranges type=mesos.Value_Ranges kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Ranges = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Ranges == nil { + uj.Ranges = new(Value_Ranges) + } + + err = uj.Ranges.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Set: + + /* handler: uj.Set type=mesos.Value_Set kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Set = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Set == nil { + uj.Set = new(Value_Set) + } + + err = uj.Set.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Role: + + /* handler: uj.Role type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Role = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Role = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_AllocationInfo: + + /* handler: uj.AllocationInfo type=mesos.Resource_AllocationInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.AllocationInfo = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.AllocationInfo == nil { + uj.AllocationInfo = new(Resource_AllocationInfo) + } + + err = uj.AllocationInfo.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Reservation: + + /* handler: uj.Reservation type=mesos.Resource_ReservationInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Reservation = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Reservation == nil { + uj.Reservation = new(Resource_ReservationInfo) + } + + err = uj.Reservation.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Reservations: + + /* handler: uj.Reservations type=[]mesos.Resource_ReservationInfo kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Reservations = nil + } else { + + uj.Reservations = []Resource_ReservationInfo{} + + wantVal := true + + for { + + var tmp_uj__Reservations Resource_ReservationInfo + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Reservations type=mesos.Resource_ReservationInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Reservations.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Reservations = append(uj.Reservations, tmp_uj__Reservations) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Disk: + + /* handler: uj.Disk type=mesos.Resource_DiskInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Disk = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Disk == nil { + uj.Disk = new(Resource_DiskInfo) + } + + err = uj.Disk.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Revocable: + + /* handler: uj.Revocable type=mesos.Resource_RevocableInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Revocable = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Revocable == nil { + uj.Revocable = new(Resource_RevocableInfo) + } + + err = uj.Revocable.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Shared: + + /* handler: uj.Shared type=mesos.Resource_SharedInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Shared = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Shared == nil { + uj.Shared = new(Resource_SharedInfo) + } + + err = uj.Shared.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ResourceProviderID) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ResourceProviderID) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"value":`) + fflib.WriteJsonString(buf, string(mj.Value)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ResourceProviderIDbase = iota + ffj_t_ResourceProviderIDno_such_key + + ffj_t_ResourceProviderID_Value +) + +var ffj_key_ResourceProviderID_Value = []byte("value") + +func (uj *ResourceProviderID) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ResourceProviderID) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ResourceProviderIDbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ResourceProviderIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'v': + + if bytes.Equal(ffj_key_ResourceProviderID_Value, kn) { + currentKey = ffj_t_ResourceProviderID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_ResourceProviderID_Value, kn) { + currentKey = ffj_t_ResourceProviderID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ResourceProviderIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ResourceProviderID_Value: + goto handle_Value + + case ffj_t_ResourceProviderIDno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Value: + + /* handler: uj.Value type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Value = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ResourceProviderInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ResourceProviderInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.ID != nil { + if true { + buf.WriteString(`"id":`) + + { + + err = mj.ID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"attributes":`) + if mj.Attributes != nil { + buf.WriteString(`[`) + for i, v := range mj.Attributes { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"type":`) + fflib.WriteJsonString(buf, string(mj.Type)) + buf.WriteString(`,"name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteString(`,"default_reservations":`) + if mj.DefaultReservations != nil { + buf.WriteString(`[`) + for i, v := range mj.DefaultReservations { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.Storage != nil { + if true { + buf.WriteString(`"storage":`) + + { + + err = mj.Storage.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ResourceProviderInfobase = iota + ffj_t_ResourceProviderInfono_such_key + + ffj_t_ResourceProviderInfo_ID + + ffj_t_ResourceProviderInfo_Attributes + + ffj_t_ResourceProviderInfo_Type + + ffj_t_ResourceProviderInfo_Name + + ffj_t_ResourceProviderInfo_DefaultReservations + + ffj_t_ResourceProviderInfo_Storage +) + +var ffj_key_ResourceProviderInfo_ID = []byte("id") + +var ffj_key_ResourceProviderInfo_Attributes = []byte("attributes") + +var ffj_key_ResourceProviderInfo_Type = []byte("type") + +var ffj_key_ResourceProviderInfo_Name = []byte("name") + +var ffj_key_ResourceProviderInfo_DefaultReservations = []byte("default_reservations") + +var ffj_key_ResourceProviderInfo_Storage = []byte("storage") + +func (uj *ResourceProviderInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ResourceProviderInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ResourceProviderInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ResourceProviderInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_ResourceProviderInfo_Attributes, kn) { + currentKey = ffj_t_ResourceProviderInfo_Attributes + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_ResourceProviderInfo_DefaultReservations, kn) { + currentKey = ffj_t_ResourceProviderInfo_DefaultReservations + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_ResourceProviderInfo_ID, kn) { + currentKey = ffj_t_ResourceProviderInfo_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_ResourceProviderInfo_Name, kn) { + currentKey = ffj_t_ResourceProviderInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_ResourceProviderInfo_Storage, kn) { + currentKey = ffj_t_ResourceProviderInfo_Storage + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_ResourceProviderInfo_Type, kn) { + currentKey = ffj_t_ResourceProviderInfo_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_ResourceProviderInfo_Storage, kn) { + currentKey = ffj_t_ResourceProviderInfo_Storage + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceProviderInfo_DefaultReservations, kn) { + currentKey = ffj_t_ResourceProviderInfo_DefaultReservations + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ResourceProviderInfo_Name, kn) { + currentKey = ffj_t_ResourceProviderInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ResourceProviderInfo_Type, kn) { + currentKey = ffj_t_ResourceProviderInfo_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceProviderInfo_Attributes, kn) { + currentKey = ffj_t_ResourceProviderInfo_Attributes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ResourceProviderInfo_ID, kn) { + currentKey = ffj_t_ResourceProviderInfo_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ResourceProviderInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ResourceProviderInfo_ID: + goto handle_ID + + case ffj_t_ResourceProviderInfo_Attributes: + goto handle_Attributes + + case ffj_t_ResourceProviderInfo_Type: + goto handle_Type + + case ffj_t_ResourceProviderInfo_Name: + goto handle_Name + + case ffj_t_ResourceProviderInfo_DefaultReservations: + goto handle_DefaultReservations + + case ffj_t_ResourceProviderInfo_Storage: + goto handle_Storage + + case ffj_t_ResourceProviderInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: uj.ID type=mesos.ResourceProviderID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ID == nil { + uj.ID = new(ResourceProviderID) + } + + err = uj.ID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Attributes: + + /* handler: uj.Attributes type=[]mesos.Attribute kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Attributes = nil + } else { + + uj.Attributes = []Attribute{} + + wantVal := true + + for { + + var tmp_uj__Attributes Attribute + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Attributes type=mesos.Attribute kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Attributes.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Attributes = append(uj.Attributes, tmp_uj__Attributes) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Type: + + /* handler: uj.Type type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Type = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DefaultReservations: + + /* handler: uj.DefaultReservations type=[]mesos.Resource_ReservationInfo kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.DefaultReservations = nil + } else { + + uj.DefaultReservations = []Resource_ReservationInfo{} + + wantVal := true + + for { + + var tmp_uj__DefaultReservations Resource_ReservationInfo + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__DefaultReservations type=mesos.Resource_ReservationInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__DefaultReservations.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.DefaultReservations = append(uj.DefaultReservations, tmp_uj__DefaultReservations) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Storage: + + /* handler: uj.Storage type=mesos.ResourceProviderInfo_Storage kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Storage = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Storage == nil { + uj.Storage = new(ResourceProviderInfo_Storage) + } + + err = uj.Storage.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ResourceProviderInfo_Storage) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ResourceProviderInfo_Storage) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"plugin":`) + + { + + err = mj.Plugin.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ResourceProviderInfo_Storagebase = iota + ffj_t_ResourceProviderInfo_Storageno_such_key + + ffj_t_ResourceProviderInfo_Storage_Plugin +) + +var ffj_key_ResourceProviderInfo_Storage_Plugin = []byte("plugin") + +func (uj *ResourceProviderInfo_Storage) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ResourceProviderInfo_Storage) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ResourceProviderInfo_Storagebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ResourceProviderInfo_Storageno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'p': + + if bytes.Equal(ffj_key_ResourceProviderInfo_Storage_Plugin, kn) { + currentKey = ffj_t_ResourceProviderInfo_Storage_Plugin + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_ResourceProviderInfo_Storage_Plugin, kn) { + currentKey = ffj_t_ResourceProviderInfo_Storage_Plugin + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ResourceProviderInfo_Storageno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ResourceProviderInfo_Storage_Plugin: + goto handle_Plugin + + case ffj_t_ResourceProviderInfo_Storageno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Plugin: + + /* handler: uj.Plugin type=mesos.CSIPluginInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Plugin.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ResourceStatistics) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ResourceStatistics) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "timestamp":`) + fflib.AppendFloat(buf, float64(mj.Timestamp), 'g', -1, 64) + buf.WriteByte(',') + if mj.Processes != nil { + if true { + buf.WriteString(`"processes":`) + fflib.FormatBits2(buf, uint64(*mj.Processes), 10, false) + buf.WriteByte(',') + } + } + if mj.Threads != nil { + if true { + buf.WriteString(`"threads":`) + fflib.FormatBits2(buf, uint64(*mj.Threads), 10, false) + buf.WriteByte(',') + } + } + if mj.CPUsUserTimeSecs != nil { + if true { + buf.WriteString(`"cpus_user_time_secs":`) + fflib.AppendFloat(buf, float64(*mj.CPUsUserTimeSecs), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.CPUsSystemTimeSecs != nil { + if true { + buf.WriteString(`"cpus_system_time_secs":`) + fflib.AppendFloat(buf, float64(*mj.CPUsSystemTimeSecs), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.CPUsLimit != nil { + if true { + buf.WriteString(`"cpus_limit":`) + fflib.AppendFloat(buf, float64(*mj.CPUsLimit), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.CPUsNrPeriods != nil { + if true { + buf.WriteString(`"cpus_nr_periods":`) + fflib.FormatBits2(buf, uint64(*mj.CPUsNrPeriods), 10, false) + buf.WriteByte(',') + } + } + if mj.CPUsNrThrottled != nil { + if true { + buf.WriteString(`"cpus_nr_throttled":`) + fflib.FormatBits2(buf, uint64(*mj.CPUsNrThrottled), 10, false) + buf.WriteByte(',') + } + } + if mj.CPUsThrottledTimeSecs != nil { + if true { + buf.WriteString(`"cpus_throttled_time_secs":`) + fflib.AppendFloat(buf, float64(*mj.CPUsThrottledTimeSecs), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.MemTotalBytes != nil { + if true { + buf.WriteString(`"mem_total_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.MemTotalBytes), 10, false) + buf.WriteByte(',') + } + } + if mj.MemTotalMemswBytes != nil { + if true { + buf.WriteString(`"mem_total_memsw_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.MemTotalMemswBytes), 10, false) + buf.WriteByte(',') + } + } + if mj.MemLimitBytes != nil { + if true { + buf.WriteString(`"mem_limit_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.MemLimitBytes), 10, false) + buf.WriteByte(',') + } + } + if mj.MemSoftLimitBytes != nil { + if true { + buf.WriteString(`"mem_soft_limit_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.MemSoftLimitBytes), 10, false) + buf.WriteByte(',') + } + } + if mj.MemFileBytes != nil { + if true { + buf.WriteString(`"mem_file_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.MemFileBytes), 10, false) + buf.WriteByte(',') + } + } + if mj.MemAnonBytes != nil { + if true { + buf.WriteString(`"mem_anon_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.MemAnonBytes), 10, false) + buf.WriteByte(',') + } + } + if mj.MemCacheBytes != nil { + if true { + buf.WriteString(`"mem_cache_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.MemCacheBytes), 10, false) + buf.WriteByte(',') + } + } + if mj.MemRSSBytes != nil { + if true { + buf.WriteString(`"mem_rss_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.MemRSSBytes), 10, false) + buf.WriteByte(',') + } + } + if mj.MemMappedFileBytes != nil { + if true { + buf.WriteString(`"mem_mapped_file_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.MemMappedFileBytes), 10, false) + buf.WriteByte(',') + } + } + if mj.MemSwapBytes != nil { + if true { + buf.WriteString(`"mem_swap_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.MemSwapBytes), 10, false) + buf.WriteByte(',') + } + } + if mj.MemUnevictableBytes != nil { + if true { + buf.WriteString(`"mem_unevictable_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.MemUnevictableBytes), 10, false) + buf.WriteByte(',') + } + } + if mj.MemLowPressureCounter != nil { + if true { + buf.WriteString(`"mem_low_pressure_counter":`) + fflib.FormatBits2(buf, uint64(*mj.MemLowPressureCounter), 10, false) + buf.WriteByte(',') + } + } + if mj.MemMediumPressureCounter != nil { + if true { + buf.WriteString(`"mem_medium_pressure_counter":`) + fflib.FormatBits2(buf, uint64(*mj.MemMediumPressureCounter), 10, false) + buf.WriteByte(',') + } + } + if mj.MemCriticalPressureCounter != nil { + if true { + buf.WriteString(`"mem_critical_pressure_counter":`) + fflib.FormatBits2(buf, uint64(*mj.MemCriticalPressureCounter), 10, false) + buf.WriteByte(',') + } + } + if mj.DiskLimitBytes != nil { + if true { + buf.WriteString(`"disk_limit_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.DiskLimitBytes), 10, false) + buf.WriteByte(',') + } + } + if mj.DiskUsedBytes != nil { + if true { + buf.WriteString(`"disk_used_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.DiskUsedBytes), 10, false) + buf.WriteByte(',') + } + } + buf.WriteString(`"disk_statistics":`) + if mj.DiskStatistics != nil { + buf.WriteString(`[`) + for i, v := range mj.DiskStatistics { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.BlkioStatistics != nil { + if true { + buf.WriteString(`"blkio_statistics":`) + + { + + err = mj.BlkioStatistics.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Perf != nil { + if true { + buf.WriteString(`"perf":`) + + { + + err = mj.Perf.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.NetRxPackets != nil { + if true { + buf.WriteString(`"net_rx_packets":`) + fflib.FormatBits2(buf, uint64(*mj.NetRxPackets), 10, false) + buf.WriteByte(',') + } + } + if mj.NetRxBytes != nil { + if true { + buf.WriteString(`"net_rx_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.NetRxBytes), 10, false) + buf.WriteByte(',') + } + } + if mj.NetRxErrors != nil { + if true { + buf.WriteString(`"net_rx_errors":`) + fflib.FormatBits2(buf, uint64(*mj.NetRxErrors), 10, false) + buf.WriteByte(',') + } + } + if mj.NetRxDropped != nil { + if true { + buf.WriteString(`"net_rx_dropped":`) + fflib.FormatBits2(buf, uint64(*mj.NetRxDropped), 10, false) + buf.WriteByte(',') + } + } + if mj.NetTxPackets != nil { + if true { + buf.WriteString(`"net_tx_packets":`) + fflib.FormatBits2(buf, uint64(*mj.NetTxPackets), 10, false) + buf.WriteByte(',') + } + } + if mj.NetTxBytes != nil { + if true { + buf.WriteString(`"net_tx_bytes":`) + fflib.FormatBits2(buf, uint64(*mj.NetTxBytes), 10, false) + buf.WriteByte(',') + } + } + if mj.NetTxErrors != nil { + if true { + buf.WriteString(`"net_tx_errors":`) + fflib.FormatBits2(buf, uint64(*mj.NetTxErrors), 10, false) + buf.WriteByte(',') + } + } + if mj.NetTxDropped != nil { + if true { + buf.WriteString(`"net_tx_dropped":`) + fflib.FormatBits2(buf, uint64(*mj.NetTxDropped), 10, false) + buf.WriteByte(',') + } + } + if mj.NetTCPRttMicrosecsP50 != nil { + if true { + buf.WriteString(`"net_tcp_rtt_microsecs_p50":`) + fflib.AppendFloat(buf, float64(*mj.NetTCPRttMicrosecsP50), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.NetTCPRttMicrosecsP90 != nil { + if true { + buf.WriteString(`"net_tcp_rtt_microsecs_p90":`) + fflib.AppendFloat(buf, float64(*mj.NetTCPRttMicrosecsP90), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.NetTCPRttMicrosecsP95 != nil { + if true { + buf.WriteString(`"net_tcp_rtt_microsecs_p95":`) + fflib.AppendFloat(buf, float64(*mj.NetTCPRttMicrosecsP95), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.NetTCPRttMicrosecsP99 != nil { + if true { + buf.WriteString(`"net_tcp_rtt_microsecs_p99":`) + fflib.AppendFloat(buf, float64(*mj.NetTCPRttMicrosecsP99), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.NetTCPActiveConnections != nil { + if true { + buf.WriteString(`"net_tcp_active_connections":`) + fflib.AppendFloat(buf, float64(*mj.NetTCPActiveConnections), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.NetTCPTimeWaitConnections != nil { + if true { + buf.WriteString(`"net_tcp_time_wait_connections":`) + fflib.AppendFloat(buf, float64(*mj.NetTCPTimeWaitConnections), 'g', -1, 64) + buf.WriteByte(',') + } + } + buf.WriteString(`"net_traffic_control_statistics":`) + if mj.NetTrafficControlStatistics != nil { + buf.WriteString(`[`) + for i, v := range mj.NetTrafficControlStatistics { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.NetSNMPStatistics != nil { + if true { + buf.WriteString(`"net_snmp_statistics":`) + + { + + err = mj.NetSNMPStatistics.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ResourceStatisticsbase = iota + ffj_t_ResourceStatisticsno_such_key + + ffj_t_ResourceStatistics_Timestamp + + ffj_t_ResourceStatistics_Processes + + ffj_t_ResourceStatistics_Threads + + ffj_t_ResourceStatistics_CPUsUserTimeSecs + + ffj_t_ResourceStatistics_CPUsSystemTimeSecs + + ffj_t_ResourceStatistics_CPUsLimit + + ffj_t_ResourceStatistics_CPUsNrPeriods + + ffj_t_ResourceStatistics_CPUsNrThrottled + + ffj_t_ResourceStatistics_CPUsThrottledTimeSecs + + ffj_t_ResourceStatistics_MemTotalBytes + + ffj_t_ResourceStatistics_MemTotalMemswBytes + + ffj_t_ResourceStatistics_MemLimitBytes + + ffj_t_ResourceStatistics_MemSoftLimitBytes + + ffj_t_ResourceStatistics_MemFileBytes + + ffj_t_ResourceStatistics_MemAnonBytes + + ffj_t_ResourceStatistics_MemCacheBytes + + ffj_t_ResourceStatistics_MemRSSBytes + + ffj_t_ResourceStatistics_MemMappedFileBytes + + ffj_t_ResourceStatistics_MemSwapBytes + + ffj_t_ResourceStatistics_MemUnevictableBytes + + ffj_t_ResourceStatistics_MemLowPressureCounter + + ffj_t_ResourceStatistics_MemMediumPressureCounter + + ffj_t_ResourceStatistics_MemCriticalPressureCounter + + ffj_t_ResourceStatistics_DiskLimitBytes + + ffj_t_ResourceStatistics_DiskUsedBytes + + ffj_t_ResourceStatistics_DiskStatistics + + ffj_t_ResourceStatistics_BlkioStatistics + + ffj_t_ResourceStatistics_Perf + + ffj_t_ResourceStatistics_NetRxPackets + + ffj_t_ResourceStatistics_NetRxBytes + + ffj_t_ResourceStatistics_NetRxErrors + + ffj_t_ResourceStatistics_NetRxDropped + + ffj_t_ResourceStatistics_NetTxPackets + + ffj_t_ResourceStatistics_NetTxBytes + + ffj_t_ResourceStatistics_NetTxErrors + + ffj_t_ResourceStatistics_NetTxDropped + + ffj_t_ResourceStatistics_NetTCPRttMicrosecsP50 + + ffj_t_ResourceStatistics_NetTCPRttMicrosecsP90 + + ffj_t_ResourceStatistics_NetTCPRttMicrosecsP95 + + ffj_t_ResourceStatistics_NetTCPRttMicrosecsP99 + + ffj_t_ResourceStatistics_NetTCPActiveConnections + + ffj_t_ResourceStatistics_NetTCPTimeWaitConnections + + ffj_t_ResourceStatistics_NetTrafficControlStatistics + + ffj_t_ResourceStatistics_NetSNMPStatistics +) + +var ffj_key_ResourceStatistics_Timestamp = []byte("timestamp") + +var ffj_key_ResourceStatistics_Processes = []byte("processes") + +var ffj_key_ResourceStatistics_Threads = []byte("threads") + +var ffj_key_ResourceStatistics_CPUsUserTimeSecs = []byte("cpus_user_time_secs") + +var ffj_key_ResourceStatistics_CPUsSystemTimeSecs = []byte("cpus_system_time_secs") + +var ffj_key_ResourceStatistics_CPUsLimit = []byte("cpus_limit") + +var ffj_key_ResourceStatistics_CPUsNrPeriods = []byte("cpus_nr_periods") + +var ffj_key_ResourceStatistics_CPUsNrThrottled = []byte("cpus_nr_throttled") + +var ffj_key_ResourceStatistics_CPUsThrottledTimeSecs = []byte("cpus_throttled_time_secs") + +var ffj_key_ResourceStatistics_MemTotalBytes = []byte("mem_total_bytes") + +var ffj_key_ResourceStatistics_MemTotalMemswBytes = []byte("mem_total_memsw_bytes") + +var ffj_key_ResourceStatistics_MemLimitBytes = []byte("mem_limit_bytes") + +var ffj_key_ResourceStatistics_MemSoftLimitBytes = []byte("mem_soft_limit_bytes") + +var ffj_key_ResourceStatistics_MemFileBytes = []byte("mem_file_bytes") + +var ffj_key_ResourceStatistics_MemAnonBytes = []byte("mem_anon_bytes") + +var ffj_key_ResourceStatistics_MemCacheBytes = []byte("mem_cache_bytes") + +var ffj_key_ResourceStatistics_MemRSSBytes = []byte("mem_rss_bytes") + +var ffj_key_ResourceStatistics_MemMappedFileBytes = []byte("mem_mapped_file_bytes") + +var ffj_key_ResourceStatistics_MemSwapBytes = []byte("mem_swap_bytes") + +var ffj_key_ResourceStatistics_MemUnevictableBytes = []byte("mem_unevictable_bytes") + +var ffj_key_ResourceStatistics_MemLowPressureCounter = []byte("mem_low_pressure_counter") + +var ffj_key_ResourceStatistics_MemMediumPressureCounter = []byte("mem_medium_pressure_counter") + +var ffj_key_ResourceStatistics_MemCriticalPressureCounter = []byte("mem_critical_pressure_counter") + +var ffj_key_ResourceStatistics_DiskLimitBytes = []byte("disk_limit_bytes") + +var ffj_key_ResourceStatistics_DiskUsedBytes = []byte("disk_used_bytes") + +var ffj_key_ResourceStatistics_DiskStatistics = []byte("disk_statistics") + +var ffj_key_ResourceStatistics_BlkioStatistics = []byte("blkio_statistics") + +var ffj_key_ResourceStatistics_Perf = []byte("perf") + +var ffj_key_ResourceStatistics_NetRxPackets = []byte("net_rx_packets") + +var ffj_key_ResourceStatistics_NetRxBytes = []byte("net_rx_bytes") + +var ffj_key_ResourceStatistics_NetRxErrors = []byte("net_rx_errors") + +var ffj_key_ResourceStatistics_NetRxDropped = []byte("net_rx_dropped") + +var ffj_key_ResourceStatistics_NetTxPackets = []byte("net_tx_packets") + +var ffj_key_ResourceStatistics_NetTxBytes = []byte("net_tx_bytes") + +var ffj_key_ResourceStatistics_NetTxErrors = []byte("net_tx_errors") + +var ffj_key_ResourceStatistics_NetTxDropped = []byte("net_tx_dropped") + +var ffj_key_ResourceStatistics_NetTCPRttMicrosecsP50 = []byte("net_tcp_rtt_microsecs_p50") + +var ffj_key_ResourceStatistics_NetTCPRttMicrosecsP90 = []byte("net_tcp_rtt_microsecs_p90") + +var ffj_key_ResourceStatistics_NetTCPRttMicrosecsP95 = []byte("net_tcp_rtt_microsecs_p95") + +var ffj_key_ResourceStatistics_NetTCPRttMicrosecsP99 = []byte("net_tcp_rtt_microsecs_p99") + +var ffj_key_ResourceStatistics_NetTCPActiveConnections = []byte("net_tcp_active_connections") + +var ffj_key_ResourceStatistics_NetTCPTimeWaitConnections = []byte("net_tcp_time_wait_connections") + +var ffj_key_ResourceStatistics_NetTrafficControlStatistics = []byte("net_traffic_control_statistics") + +var ffj_key_ResourceStatistics_NetSNMPStatistics = []byte("net_snmp_statistics") + +func (uj *ResourceStatistics) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ResourceStatistics) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ResourceStatisticsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ResourceStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'b': + + if bytes.Equal(ffj_key_ResourceStatistics_BlkioStatistics, kn) { + currentKey = ffj_t_ResourceStatistics_BlkioStatistics + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'c': + + if bytes.Equal(ffj_key_ResourceStatistics_CPUsUserTimeSecs, kn) { + currentKey = ffj_t_ResourceStatistics_CPUsUserTimeSecs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_CPUsSystemTimeSecs, kn) { + currentKey = ffj_t_ResourceStatistics_CPUsSystemTimeSecs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_CPUsLimit, kn) { + currentKey = ffj_t_ResourceStatistics_CPUsLimit + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_CPUsNrPeriods, kn) { + currentKey = ffj_t_ResourceStatistics_CPUsNrPeriods + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_CPUsNrThrottled, kn) { + currentKey = ffj_t_ResourceStatistics_CPUsNrThrottled + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_CPUsThrottledTimeSecs, kn) { + currentKey = ffj_t_ResourceStatistics_CPUsThrottledTimeSecs + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_ResourceStatistics_DiskLimitBytes, kn) { + currentKey = ffj_t_ResourceStatistics_DiskLimitBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_DiskUsedBytes, kn) { + currentKey = ffj_t_ResourceStatistics_DiskUsedBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_DiskStatistics, kn) { + currentKey = ffj_t_ResourceStatistics_DiskStatistics + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffj_key_ResourceStatistics_MemTotalBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemTotalBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_MemTotalMemswBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemTotalMemswBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_MemLimitBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemLimitBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_MemSoftLimitBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemSoftLimitBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_MemFileBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemFileBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_MemAnonBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemAnonBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_MemCacheBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemCacheBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_MemRSSBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemRSSBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_MemMappedFileBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemMappedFileBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_MemSwapBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemSwapBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_MemUnevictableBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemUnevictableBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_MemLowPressureCounter, kn) { + currentKey = ffj_t_ResourceStatistics_MemLowPressureCounter + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_MemMediumPressureCounter, kn) { + currentKey = ffj_t_ResourceStatistics_MemMediumPressureCounter + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_MemCriticalPressureCounter, kn) { + currentKey = ffj_t_ResourceStatistics_MemCriticalPressureCounter + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_ResourceStatistics_NetRxPackets, kn) { + currentKey = ffj_t_ResourceStatistics_NetRxPackets + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_NetRxBytes, kn) { + currentKey = ffj_t_ResourceStatistics_NetRxBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_NetRxErrors, kn) { + currentKey = ffj_t_ResourceStatistics_NetRxErrors + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_NetRxDropped, kn) { + currentKey = ffj_t_ResourceStatistics_NetRxDropped + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_NetTxPackets, kn) { + currentKey = ffj_t_ResourceStatistics_NetTxPackets + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_NetTxBytes, kn) { + currentKey = ffj_t_ResourceStatistics_NetTxBytes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_NetTxErrors, kn) { + currentKey = ffj_t_ResourceStatistics_NetTxErrors + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_NetTxDropped, kn) { + currentKey = ffj_t_ResourceStatistics_NetTxDropped + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_NetTCPRttMicrosecsP50, kn) { + currentKey = ffj_t_ResourceStatistics_NetTCPRttMicrosecsP50 + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_NetTCPRttMicrosecsP90, kn) { + currentKey = ffj_t_ResourceStatistics_NetTCPRttMicrosecsP90 + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_NetTCPRttMicrosecsP95, kn) { + currentKey = ffj_t_ResourceStatistics_NetTCPRttMicrosecsP95 + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_NetTCPRttMicrosecsP99, kn) { + currentKey = ffj_t_ResourceStatistics_NetTCPRttMicrosecsP99 + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_NetTCPActiveConnections, kn) { + currentKey = ffj_t_ResourceStatistics_NetTCPActiveConnections + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_NetTCPTimeWaitConnections, kn) { + currentKey = ffj_t_ResourceStatistics_NetTCPTimeWaitConnections + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_NetTrafficControlStatistics, kn) { + currentKey = ffj_t_ResourceStatistics_NetTrafficControlStatistics + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_NetSNMPStatistics, kn) { + currentKey = ffj_t_ResourceStatistics_NetSNMPStatistics + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_ResourceStatistics_Processes, kn) { + currentKey = ffj_t_ResourceStatistics_Processes + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_Perf, kn) { + currentKey = ffj_t_ResourceStatistics_Perf + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_ResourceStatistics_Timestamp, kn) { + currentKey = ffj_t_ResourceStatistics_Timestamp + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_ResourceStatistics_Threads, kn) { + currentKey = ffj_t_ResourceStatistics_Threads + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_NetSNMPStatistics, kn) { + currentKey = ffj_t_ResourceStatistics_NetSNMPStatistics + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_NetTrafficControlStatistics, kn) { + currentKey = ffj_t_ResourceStatistics_NetTrafficControlStatistics + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_NetTCPTimeWaitConnections, kn) { + currentKey = ffj_t_ResourceStatistics_NetTCPTimeWaitConnections + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_NetTCPActiveConnections, kn) { + currentKey = ffj_t_ResourceStatistics_NetTCPActiveConnections + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_NetTCPRttMicrosecsP99, kn) { + currentKey = ffj_t_ResourceStatistics_NetTCPRttMicrosecsP99 + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_NetTCPRttMicrosecsP95, kn) { + currentKey = ffj_t_ResourceStatistics_NetTCPRttMicrosecsP95 + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_NetTCPRttMicrosecsP90, kn) { + currentKey = ffj_t_ResourceStatistics_NetTCPRttMicrosecsP90 + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_NetTCPRttMicrosecsP50, kn) { + currentKey = ffj_t_ResourceStatistics_NetTCPRttMicrosecsP50 + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_ResourceStatistics_NetTxDropped, kn) { + currentKey = ffj_t_ResourceStatistics_NetTxDropped + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_NetTxErrors, kn) { + currentKey = ffj_t_ResourceStatistics_NetTxErrors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_NetTxBytes, kn) { + currentKey = ffj_t_ResourceStatistics_NetTxBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_NetTxPackets, kn) { + currentKey = ffj_t_ResourceStatistics_NetTxPackets + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_ResourceStatistics_NetRxDropped, kn) { + currentKey = ffj_t_ResourceStatistics_NetRxDropped + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_NetRxErrors, kn) { + currentKey = ffj_t_ResourceStatistics_NetRxErrors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_NetRxBytes, kn) { + currentKey = ffj_t_ResourceStatistics_NetRxBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_NetRxPackets, kn) { + currentKey = ffj_t_ResourceStatistics_NetRxPackets + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ResourceStatistics_Perf, kn) { + currentKey = ffj_t_ResourceStatistics_Perf + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_BlkioStatistics, kn) { + currentKey = ffj_t_ResourceStatistics_BlkioStatistics + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_DiskStatistics, kn) { + currentKey = ffj_t_ResourceStatistics_DiskStatistics + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_DiskUsedBytes, kn) { + currentKey = ffj_t_ResourceStatistics_DiskUsedBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_DiskLimitBytes, kn) { + currentKey = ffj_t_ResourceStatistics_DiskLimitBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_MemCriticalPressureCounter, kn) { + currentKey = ffj_t_ResourceStatistics_MemCriticalPressureCounter + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_MemMediumPressureCounter, kn) { + currentKey = ffj_t_ResourceStatistics_MemMediumPressureCounter + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_MemLowPressureCounter, kn) { + currentKey = ffj_t_ResourceStatistics_MemLowPressureCounter + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_MemUnevictableBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemUnevictableBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_MemSwapBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemSwapBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_MemMappedFileBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemMappedFileBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_MemRSSBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemRSSBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_MemCacheBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemCacheBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_MemAnonBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemAnonBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_MemFileBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemFileBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_MemSoftLimitBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemSoftLimitBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_MemLimitBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemLimitBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_MemTotalMemswBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemTotalMemswBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_MemTotalBytes, kn) { + currentKey = ffj_t_ResourceStatistics_MemTotalBytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_CPUsThrottledTimeSecs, kn) { + currentKey = ffj_t_ResourceStatistics_CPUsThrottledTimeSecs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_CPUsNrThrottled, kn) { + currentKey = ffj_t_ResourceStatistics_CPUsNrThrottled + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_CPUsNrPeriods, kn) { + currentKey = ffj_t_ResourceStatistics_CPUsNrPeriods + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_CPUsLimit, kn) { + currentKey = ffj_t_ResourceStatistics_CPUsLimit + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_CPUsSystemTimeSecs, kn) { + currentKey = ffj_t_ResourceStatistics_CPUsSystemTimeSecs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_CPUsUserTimeSecs, kn) { + currentKey = ffj_t_ResourceStatistics_CPUsUserTimeSecs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_Threads, kn) { + currentKey = ffj_t_ResourceStatistics_Threads + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_Processes, kn) { + currentKey = ffj_t_ResourceStatistics_Processes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceStatistics_Timestamp, kn) { + currentKey = ffj_t_ResourceStatistics_Timestamp + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ResourceStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ResourceStatistics_Timestamp: + goto handle_Timestamp + + case ffj_t_ResourceStatistics_Processes: + goto handle_Processes + + case ffj_t_ResourceStatistics_Threads: + goto handle_Threads + + case ffj_t_ResourceStatistics_CPUsUserTimeSecs: + goto handle_CPUsUserTimeSecs + + case ffj_t_ResourceStatistics_CPUsSystemTimeSecs: + goto handle_CPUsSystemTimeSecs + + case ffj_t_ResourceStatistics_CPUsLimit: + goto handle_CPUsLimit + + case ffj_t_ResourceStatistics_CPUsNrPeriods: + goto handle_CPUsNrPeriods + + case ffj_t_ResourceStatistics_CPUsNrThrottled: + goto handle_CPUsNrThrottled + + case ffj_t_ResourceStatistics_CPUsThrottledTimeSecs: + goto handle_CPUsThrottledTimeSecs + + case ffj_t_ResourceStatistics_MemTotalBytes: + goto handle_MemTotalBytes + + case ffj_t_ResourceStatistics_MemTotalMemswBytes: + goto handle_MemTotalMemswBytes + + case ffj_t_ResourceStatistics_MemLimitBytes: + goto handle_MemLimitBytes + + case ffj_t_ResourceStatistics_MemSoftLimitBytes: + goto handle_MemSoftLimitBytes + + case ffj_t_ResourceStatistics_MemFileBytes: + goto handle_MemFileBytes + + case ffj_t_ResourceStatistics_MemAnonBytes: + goto handle_MemAnonBytes + + case ffj_t_ResourceStatistics_MemCacheBytes: + goto handle_MemCacheBytes + + case ffj_t_ResourceStatistics_MemRSSBytes: + goto handle_MemRSSBytes + + case ffj_t_ResourceStatistics_MemMappedFileBytes: + goto handle_MemMappedFileBytes + + case ffj_t_ResourceStatistics_MemSwapBytes: + goto handle_MemSwapBytes + + case ffj_t_ResourceStatistics_MemUnevictableBytes: + goto handle_MemUnevictableBytes + + case ffj_t_ResourceStatistics_MemLowPressureCounter: + goto handle_MemLowPressureCounter + + case ffj_t_ResourceStatistics_MemMediumPressureCounter: + goto handle_MemMediumPressureCounter + + case ffj_t_ResourceStatistics_MemCriticalPressureCounter: + goto handle_MemCriticalPressureCounter + + case ffj_t_ResourceStatistics_DiskLimitBytes: + goto handle_DiskLimitBytes + + case ffj_t_ResourceStatistics_DiskUsedBytes: + goto handle_DiskUsedBytes + + case ffj_t_ResourceStatistics_DiskStatistics: + goto handle_DiskStatistics + + case ffj_t_ResourceStatistics_BlkioStatistics: + goto handle_BlkioStatistics + + case ffj_t_ResourceStatistics_Perf: + goto handle_Perf + + case ffj_t_ResourceStatistics_NetRxPackets: + goto handle_NetRxPackets + + case ffj_t_ResourceStatistics_NetRxBytes: + goto handle_NetRxBytes + + case ffj_t_ResourceStatistics_NetRxErrors: + goto handle_NetRxErrors + + case ffj_t_ResourceStatistics_NetRxDropped: + goto handle_NetRxDropped + + case ffj_t_ResourceStatistics_NetTxPackets: + goto handle_NetTxPackets + + case ffj_t_ResourceStatistics_NetTxBytes: + goto handle_NetTxBytes + + case ffj_t_ResourceStatistics_NetTxErrors: + goto handle_NetTxErrors + + case ffj_t_ResourceStatistics_NetTxDropped: + goto handle_NetTxDropped + + case ffj_t_ResourceStatistics_NetTCPRttMicrosecsP50: + goto handle_NetTCPRttMicrosecsP50 + + case ffj_t_ResourceStatistics_NetTCPRttMicrosecsP90: + goto handle_NetTCPRttMicrosecsP90 + + case ffj_t_ResourceStatistics_NetTCPRttMicrosecsP95: + goto handle_NetTCPRttMicrosecsP95 + + case ffj_t_ResourceStatistics_NetTCPRttMicrosecsP99: + goto handle_NetTCPRttMicrosecsP99 + + case ffj_t_ResourceStatistics_NetTCPActiveConnections: + goto handle_NetTCPActiveConnections + + case ffj_t_ResourceStatistics_NetTCPTimeWaitConnections: + goto handle_NetTCPTimeWaitConnections + + case ffj_t_ResourceStatistics_NetTrafficControlStatistics: + goto handle_NetTrafficControlStatistics + + case ffj_t_ResourceStatistics_NetSNMPStatistics: + goto handle_NetSNMPStatistics + + case ffj_t_ResourceStatisticsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Timestamp: + + /* handler: uj.Timestamp type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Timestamp = float64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Processes: + + /* handler: uj.Processes type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Processes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint32(tval) + uj.Processes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Threads: + + /* handler: uj.Threads type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Threads = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint32(tval) + uj.Threads = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CPUsUserTimeSecs: + + /* handler: uj.CPUsUserTimeSecs type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.CPUsUserTimeSecs = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.CPUsUserTimeSecs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CPUsSystemTimeSecs: + + /* handler: uj.CPUsSystemTimeSecs type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.CPUsSystemTimeSecs = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.CPUsSystemTimeSecs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CPUsLimit: + + /* handler: uj.CPUsLimit type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.CPUsLimit = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.CPUsLimit = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CPUsNrPeriods: + + /* handler: uj.CPUsNrPeriods type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.CPUsNrPeriods = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint32(tval) + uj.CPUsNrPeriods = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CPUsNrThrottled: + + /* handler: uj.CPUsNrThrottled type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.CPUsNrThrottled = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint32(tval) + uj.CPUsNrThrottled = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CPUsThrottledTimeSecs: + + /* handler: uj.CPUsThrottledTimeSecs type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.CPUsThrottledTimeSecs = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.CPUsThrottledTimeSecs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MemTotalBytes: + + /* handler: uj.MemTotalBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MemTotalBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MemTotalBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MemTotalMemswBytes: + + /* handler: uj.MemTotalMemswBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MemTotalMemswBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MemTotalMemswBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MemLimitBytes: + + /* handler: uj.MemLimitBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MemLimitBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MemLimitBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MemSoftLimitBytes: + + /* handler: uj.MemSoftLimitBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MemSoftLimitBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MemSoftLimitBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MemFileBytes: + + /* handler: uj.MemFileBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MemFileBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MemFileBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MemAnonBytes: + + /* handler: uj.MemAnonBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MemAnonBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MemAnonBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MemCacheBytes: + + /* handler: uj.MemCacheBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MemCacheBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MemCacheBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MemRSSBytes: + + /* handler: uj.MemRSSBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MemRSSBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MemRSSBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MemMappedFileBytes: + + /* handler: uj.MemMappedFileBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MemMappedFileBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MemMappedFileBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MemSwapBytes: + + /* handler: uj.MemSwapBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MemSwapBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MemSwapBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MemUnevictableBytes: + + /* handler: uj.MemUnevictableBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MemUnevictableBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MemUnevictableBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MemLowPressureCounter: + + /* handler: uj.MemLowPressureCounter type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MemLowPressureCounter = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MemLowPressureCounter = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MemMediumPressureCounter: + + /* handler: uj.MemMediumPressureCounter type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MemMediumPressureCounter = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MemMediumPressureCounter = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MemCriticalPressureCounter: + + /* handler: uj.MemCriticalPressureCounter type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MemCriticalPressureCounter = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.MemCriticalPressureCounter = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DiskLimitBytes: + + /* handler: uj.DiskLimitBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.DiskLimitBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.DiskLimitBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DiskUsedBytes: + + /* handler: uj.DiskUsedBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.DiskUsedBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.DiskUsedBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DiskStatistics: + + /* handler: uj.DiskStatistics type=[]mesos.DiskStatistics kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.DiskStatistics = nil + } else { + + uj.DiskStatistics = []DiskStatistics{} + + wantVal := true + + for { + + var tmp_uj__DiskStatistics DiskStatistics + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__DiskStatistics type=mesos.DiskStatistics kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__DiskStatistics.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.DiskStatistics = append(uj.DiskStatistics, tmp_uj__DiskStatistics) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BlkioStatistics: + + /* handler: uj.BlkioStatistics type=mesos.CgroupInfo_Blkio_Statistics kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.BlkioStatistics = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.BlkioStatistics == nil { + uj.BlkioStatistics = new(CgroupInfo_Blkio_Statistics) + } + + err = uj.BlkioStatistics.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Perf: + + /* handler: uj.Perf type=mesos.PerfStatistics kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Perf = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Perf == nil { + uj.Perf = new(PerfStatistics) + } + + err = uj.Perf.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetRxPackets: + + /* handler: uj.NetRxPackets type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NetRxPackets = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.NetRxPackets = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetRxBytes: + + /* handler: uj.NetRxBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NetRxBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.NetRxBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetRxErrors: + + /* handler: uj.NetRxErrors type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NetRxErrors = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.NetRxErrors = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetRxDropped: + + /* handler: uj.NetRxDropped type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NetRxDropped = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.NetRxDropped = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetTxPackets: + + /* handler: uj.NetTxPackets type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NetTxPackets = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.NetTxPackets = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetTxBytes: + + /* handler: uj.NetTxBytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NetTxBytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.NetTxBytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetTxErrors: + + /* handler: uj.NetTxErrors type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NetTxErrors = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.NetTxErrors = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetTxDropped: + + /* handler: uj.NetTxDropped type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NetTxDropped = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.NetTxDropped = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetTCPRttMicrosecsP50: + + /* handler: uj.NetTCPRttMicrosecsP50 type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NetTCPRttMicrosecsP50 = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.NetTCPRttMicrosecsP50 = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetTCPRttMicrosecsP90: + + /* handler: uj.NetTCPRttMicrosecsP90 type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NetTCPRttMicrosecsP90 = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.NetTCPRttMicrosecsP90 = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetTCPRttMicrosecsP95: + + /* handler: uj.NetTCPRttMicrosecsP95 type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NetTCPRttMicrosecsP95 = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.NetTCPRttMicrosecsP95 = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetTCPRttMicrosecsP99: + + /* handler: uj.NetTCPRttMicrosecsP99 type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NetTCPRttMicrosecsP99 = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.NetTCPRttMicrosecsP99 = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetTCPActiveConnections: + + /* handler: uj.NetTCPActiveConnections type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NetTCPActiveConnections = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.NetTCPActiveConnections = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetTCPTimeWaitConnections: + + /* handler: uj.NetTCPTimeWaitConnections type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NetTCPTimeWaitConnections = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.NetTCPTimeWaitConnections = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetTrafficControlStatistics: + + /* handler: uj.NetTrafficControlStatistics type=[]mesos.TrafficControlStatistics kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.NetTrafficControlStatistics = nil + } else { + + uj.NetTrafficControlStatistics = []TrafficControlStatistics{} + + wantVal := true + + for { + + var tmp_uj__NetTrafficControlStatistics TrafficControlStatistics + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__NetTrafficControlStatistics type=mesos.TrafficControlStatistics kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__NetTrafficControlStatistics.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.NetTrafficControlStatistics = append(uj.NetTrafficControlStatistics, tmp_uj__NetTrafficControlStatistics) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NetSNMPStatistics: + + /* handler: uj.NetSNMPStatistics type=mesos.SNMPStatistics kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.NetSNMPStatistics = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.NetSNMPStatistics == nil { + uj.NetSNMPStatistics = new(SNMPStatistics) + } + + err = uj.NetSNMPStatistics.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ResourceUsage) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ResourceUsage) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"executors":`) + if mj.Executors != nil { + buf.WriteString(`[`) + for i, v := range mj.Executors { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"total":`) + if mj.Total != nil { + buf.WriteString(`[`) + for i, v := range mj.Total { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ResourceUsagebase = iota + ffj_t_ResourceUsageno_such_key + + ffj_t_ResourceUsage_Executors + + ffj_t_ResourceUsage_Total +) + +var ffj_key_ResourceUsage_Executors = []byte("executors") + +var ffj_key_ResourceUsage_Total = []byte("total") + +func (uj *ResourceUsage) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ResourceUsage) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ResourceUsagebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ResourceUsageno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'e': + + if bytes.Equal(ffj_key_ResourceUsage_Executors, kn) { + currentKey = ffj_t_ResourceUsage_Executors + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_ResourceUsage_Total, kn) { + currentKey = ffj_t_ResourceUsage_Total + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_ResourceUsage_Total, kn) { + currentKey = ffj_t_ResourceUsage_Total + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceUsage_Executors, kn) { + currentKey = ffj_t_ResourceUsage_Executors + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ResourceUsageno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ResourceUsage_Executors: + goto handle_Executors + + case ffj_t_ResourceUsage_Total: + goto handle_Total + + case ffj_t_ResourceUsageno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Executors: + + /* handler: uj.Executors type=[]mesos.ResourceUsage_Executor kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Executors = nil + } else { + + uj.Executors = []ResourceUsage_Executor{} + + wantVal := true + + for { + + var tmp_uj__Executors ResourceUsage_Executor + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Executors type=mesos.ResourceUsage_Executor kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Executors.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Executors = append(uj.Executors, tmp_uj__Executors) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Total: + + /* handler: uj.Total type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Total = nil + } else { + + uj.Total = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Total Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Total type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Total.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Total = append(uj.Total, tmp_uj__Total) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ResourceUsage_Executor) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ResourceUsage_Executor) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"executor_info":`) + + { + + err = mj.ExecutorInfo.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"allocated":`) + if mj.Allocated != nil { + buf.WriteString(`[`) + for i, v := range mj.Allocated { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.Statistics != nil { + if true { + buf.WriteString(`"statistics":`) + + { + + err = mj.Statistics.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"container_id":`) + + { + + err = mj.ContainerID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"tasks":`) + if mj.Tasks != nil { + buf.WriteString(`[`) + for i, v := range mj.Tasks { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ResourceUsage_Executorbase = iota + ffj_t_ResourceUsage_Executorno_such_key + + ffj_t_ResourceUsage_Executor_ExecutorInfo + + ffj_t_ResourceUsage_Executor_Allocated + + ffj_t_ResourceUsage_Executor_Statistics + + ffj_t_ResourceUsage_Executor_ContainerID + + ffj_t_ResourceUsage_Executor_Tasks +) + +var ffj_key_ResourceUsage_Executor_ExecutorInfo = []byte("executor_info") + +var ffj_key_ResourceUsage_Executor_Allocated = []byte("allocated") + +var ffj_key_ResourceUsage_Executor_Statistics = []byte("statistics") + +var ffj_key_ResourceUsage_Executor_ContainerID = []byte("container_id") + +var ffj_key_ResourceUsage_Executor_Tasks = []byte("tasks") + +func (uj *ResourceUsage_Executor) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ResourceUsage_Executor) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ResourceUsage_Executorbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ResourceUsage_Executorno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_ResourceUsage_Executor_Allocated, kn) { + currentKey = ffj_t_ResourceUsage_Executor_Allocated + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'c': + + if bytes.Equal(ffj_key_ResourceUsage_Executor_ContainerID, kn) { + currentKey = ffj_t_ResourceUsage_Executor_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_ResourceUsage_Executor_ExecutorInfo, kn) { + currentKey = ffj_t_ResourceUsage_Executor_ExecutorInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_ResourceUsage_Executor_Statistics, kn) { + currentKey = ffj_t_ResourceUsage_Executor_Statistics + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_ResourceUsage_Executor_Tasks, kn) { + currentKey = ffj_t_ResourceUsage_Executor_Tasks + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_ResourceUsage_Executor_Tasks, kn) { + currentKey = ffj_t_ResourceUsage_Executor_Tasks + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_ResourceUsage_Executor_ContainerID, kn) { + currentKey = ffj_t_ResourceUsage_Executor_ContainerID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceUsage_Executor_Statistics, kn) { + currentKey = ffj_t_ResourceUsage_Executor_Statistics + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ResourceUsage_Executor_Allocated, kn) { + currentKey = ffj_t_ResourceUsage_Executor_Allocated + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_ResourceUsage_Executor_ExecutorInfo, kn) { + currentKey = ffj_t_ResourceUsage_Executor_ExecutorInfo + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ResourceUsage_Executorno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ResourceUsage_Executor_ExecutorInfo: + goto handle_ExecutorInfo + + case ffj_t_ResourceUsage_Executor_Allocated: + goto handle_Allocated + + case ffj_t_ResourceUsage_Executor_Statistics: + goto handle_Statistics + + case ffj_t_ResourceUsage_Executor_ContainerID: + goto handle_ContainerID + + case ffj_t_ResourceUsage_Executor_Tasks: + goto handle_Tasks + + case ffj_t_ResourceUsage_Executorno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ExecutorInfo: + + /* handler: uj.ExecutorInfo type=mesos.ExecutorInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ExecutorInfo.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Allocated: + + /* handler: uj.Allocated type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Allocated = nil + } else { + + uj.Allocated = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Allocated Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Allocated type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Allocated.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Allocated = append(uj.Allocated, tmp_uj__Allocated) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Statistics: + + /* handler: uj.Statistics type=mesos.ResourceStatistics kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Statistics = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Statistics == nil { + uj.Statistics = new(ResourceStatistics) + } + + err = uj.Statistics.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ContainerID: + + /* handler: uj.ContainerID type=mesos.ContainerID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ContainerID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Tasks: + + /* handler: uj.Tasks type=[]mesos.ResourceUsage_Executor_Task kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Tasks = nil + } else { + + uj.Tasks = []ResourceUsage_Executor_Task{} + + wantVal := true + + for { + + var tmp_uj__Tasks ResourceUsage_Executor_Task + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Tasks type=mesos.ResourceUsage_Executor_Task kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Tasks.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Tasks = append(uj.Tasks, tmp_uj__Tasks) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *ResourceUsage_Executor_Task) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *ResourceUsage_Executor_Task) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteString(`,"id":`) + + { + + err = mj.ID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"resources":`) + if mj.Resources != nil { + buf.WriteString(`[`) + for i, v := range mj.Resources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.Labels != nil { + if true { + buf.WriteString(`"labels":`) + + { + + err = mj.Labels.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_ResourceUsage_Executor_Taskbase = iota + ffj_t_ResourceUsage_Executor_Taskno_such_key + + ffj_t_ResourceUsage_Executor_Task_Name + + ffj_t_ResourceUsage_Executor_Task_ID + + ffj_t_ResourceUsage_Executor_Task_Resources + + ffj_t_ResourceUsage_Executor_Task_Labels +) + +var ffj_key_ResourceUsage_Executor_Task_Name = []byte("name") + +var ffj_key_ResourceUsage_Executor_Task_ID = []byte("id") + +var ffj_key_ResourceUsage_Executor_Task_Resources = []byte("resources") + +var ffj_key_ResourceUsage_Executor_Task_Labels = []byte("labels") + +func (uj *ResourceUsage_Executor_Task) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *ResourceUsage_Executor_Task) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_ResourceUsage_Executor_Taskbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_ResourceUsage_Executor_Taskno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'i': + + if bytes.Equal(ffj_key_ResourceUsage_Executor_Task_ID, kn) { + currentKey = ffj_t_ResourceUsage_Executor_Task_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_ResourceUsage_Executor_Task_Labels, kn) { + currentKey = ffj_t_ResourceUsage_Executor_Task_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_ResourceUsage_Executor_Task_Name, kn) { + currentKey = ffj_t_ResourceUsage_Executor_Task_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_ResourceUsage_Executor_Task_Resources, kn) { + currentKey = ffj_t_ResourceUsage_Executor_Task_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_ResourceUsage_Executor_Task_Labels, kn) { + currentKey = ffj_t_ResourceUsage_Executor_Task_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_ResourceUsage_Executor_Task_Resources, kn) { + currentKey = ffj_t_ResourceUsage_Executor_Task_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ResourceUsage_Executor_Task_ID, kn) { + currentKey = ffj_t_ResourceUsage_Executor_Task_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_ResourceUsage_Executor_Task_Name, kn) { + currentKey = ffj_t_ResourceUsage_Executor_Task_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_ResourceUsage_Executor_Taskno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_ResourceUsage_Executor_Task_Name: + goto handle_Name + + case ffj_t_ResourceUsage_Executor_Task_ID: + goto handle_ID + + case ffj_t_ResourceUsage_Executor_Task_Resources: + goto handle_Resources + + case ffj_t_ResourceUsage_Executor_Task_Labels: + goto handle_Labels + + case ffj_t_ResourceUsage_Executor_Taskno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ID: + + /* handler: uj.ID type=mesos.TaskID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.ID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Resources: + + /* handler: uj.Resources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Resources = nil + } else { + + uj.Resources = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Resources Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Resources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Resources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Resources = append(uj.Resources, tmp_uj__Resources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Labels: + + /* handler: uj.Labels type=mesos.Labels kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Labels = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Labels == nil { + uj.Labels = new(Labels) + } + + err = uj.Labels.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Resource_AllocationInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Resource_AllocationInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Role != nil { + if true { + buf.WriteString(`"role":`) + fflib.WriteJsonString(buf, string(*mj.Role)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Resource_AllocationInfobase = iota + ffj_t_Resource_AllocationInfono_such_key + + ffj_t_Resource_AllocationInfo_Role +) + +var ffj_key_Resource_AllocationInfo_Role = []byte("role") + +func (uj *Resource_AllocationInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Resource_AllocationInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Resource_AllocationInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Resource_AllocationInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'r': + + if bytes.Equal(ffj_key_Resource_AllocationInfo_Role, kn) { + currentKey = ffj_t_Resource_AllocationInfo_Role + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_AllocationInfo_Role, kn) { + currentKey = ffj_t_Resource_AllocationInfo_Role + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Resource_AllocationInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Resource_AllocationInfo_Role: + goto handle_Role + + case ffj_t_Resource_AllocationInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Role: + + /* handler: uj.Role type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Role = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Role = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Resource_DiskInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Resource_DiskInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Persistence != nil { + if true { + buf.WriteString(`"persistence":`) + + { + + err = mj.Persistence.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Volume != nil { + if true { + buf.WriteString(`"volume":`) + + { + + err = mj.Volume.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Source != nil { + if true { + buf.WriteString(`"source":`) + + { + + err = mj.Source.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Resource_DiskInfobase = iota + ffj_t_Resource_DiskInfono_such_key + + ffj_t_Resource_DiskInfo_Persistence + + ffj_t_Resource_DiskInfo_Volume + + ffj_t_Resource_DiskInfo_Source +) + +var ffj_key_Resource_DiskInfo_Persistence = []byte("persistence") + +var ffj_key_Resource_DiskInfo_Volume = []byte("volume") + +var ffj_key_Resource_DiskInfo_Source = []byte("source") + +func (uj *Resource_DiskInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Resource_DiskInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Resource_DiskInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Resource_DiskInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'p': + + if bytes.Equal(ffj_key_Resource_DiskInfo_Persistence, kn) { + currentKey = ffj_t_Resource_DiskInfo_Persistence + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Resource_DiskInfo_Source, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_Resource_DiskInfo_Volume, kn) { + currentKey = ffj_t_Resource_DiskInfo_Volume + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Resource_DiskInfo_Source, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_DiskInfo_Volume, kn) { + currentKey = ffj_t_Resource_DiskInfo_Volume + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Resource_DiskInfo_Persistence, kn) { + currentKey = ffj_t_Resource_DiskInfo_Persistence + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Resource_DiskInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Resource_DiskInfo_Persistence: + goto handle_Persistence + + case ffj_t_Resource_DiskInfo_Volume: + goto handle_Volume + + case ffj_t_Resource_DiskInfo_Source: + goto handle_Source + + case ffj_t_Resource_DiskInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Persistence: + + /* handler: uj.Persistence type=mesos.Resource_DiskInfo_Persistence kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Persistence = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Persistence == nil { + uj.Persistence = new(Resource_DiskInfo_Persistence) + } + + err = uj.Persistence.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Volume: + + /* handler: uj.Volume type=mesos.Volume kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Volume = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Volume == nil { + uj.Volume = new(Volume) + } + + err = uj.Volume.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Source: + + /* handler: uj.Source type=mesos.Resource_DiskInfo_Source kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Source = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Source == nil { + uj.Source = new(Resource_DiskInfo_Source) + } + + err = uj.Source.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Resource_DiskInfo_Persistence) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Resource_DiskInfo_Persistence) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "id":`) + fflib.WriteJsonString(buf, string(mj.ID)) + buf.WriteByte(',') + if mj.Principal != nil { + if true { + buf.WriteString(`"principal":`) + fflib.WriteJsonString(buf, string(*mj.Principal)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Resource_DiskInfo_Persistencebase = iota + ffj_t_Resource_DiskInfo_Persistenceno_such_key + + ffj_t_Resource_DiskInfo_Persistence_ID + + ffj_t_Resource_DiskInfo_Persistence_Principal +) + +var ffj_key_Resource_DiskInfo_Persistence_ID = []byte("id") + +var ffj_key_Resource_DiskInfo_Persistence_Principal = []byte("principal") + +func (uj *Resource_DiskInfo_Persistence) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Resource_DiskInfo_Persistence) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Resource_DiskInfo_Persistencebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Resource_DiskInfo_Persistenceno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'i': + + if bytes.Equal(ffj_key_Resource_DiskInfo_Persistence_ID, kn) { + currentKey = ffj_t_Resource_DiskInfo_Persistence_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_Resource_DiskInfo_Persistence_Principal, kn) { + currentKey = ffj_t_Resource_DiskInfo_Persistence_Principal + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_DiskInfo_Persistence_Principal, kn) { + currentKey = ffj_t_Resource_DiskInfo_Persistence_Principal + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_DiskInfo_Persistence_ID, kn) { + currentKey = ffj_t_Resource_DiskInfo_Persistence_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Resource_DiskInfo_Persistenceno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Resource_DiskInfo_Persistence_ID: + goto handle_ID + + case ffj_t_Resource_DiskInfo_Persistence_Principal: + goto handle_Principal + + case ffj_t_Resource_DiskInfo_Persistenceno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: uj.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.ID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Principal: + + /* handler: uj.Principal type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Principal = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Principal = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Resource_DiskInfo_Source) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Resource_DiskInfo_Source) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.Path != nil { + if true { + buf.WriteString(`"path":`) + + { + + err = mj.Path.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Mount != nil { + if true { + buf.WriteString(`"mount":`) + + { + + err = mj.Mount.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.ID != nil { + if true { + buf.WriteString(`"id":`) + fflib.WriteJsonString(buf, string(*mj.ID)) + buf.WriteByte(',') + } + } + if mj.Metadata != nil { + if true { + buf.WriteString(`"metadata":`) + + { + + err = mj.Metadata.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Profile != nil { + if true { + buf.WriteString(`"profile":`) + fflib.WriteJsonString(buf, string(*mj.Profile)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Resource_DiskInfo_Sourcebase = iota + ffj_t_Resource_DiskInfo_Sourceno_such_key + + ffj_t_Resource_DiskInfo_Source_Type + + ffj_t_Resource_DiskInfo_Source_Path + + ffj_t_Resource_DiskInfo_Source_Mount + + ffj_t_Resource_DiskInfo_Source_ID + + ffj_t_Resource_DiskInfo_Source_Metadata + + ffj_t_Resource_DiskInfo_Source_Profile +) + +var ffj_key_Resource_DiskInfo_Source_Type = []byte("type") + +var ffj_key_Resource_DiskInfo_Source_Path = []byte("path") + +var ffj_key_Resource_DiskInfo_Source_Mount = []byte("mount") + +var ffj_key_Resource_DiskInfo_Source_ID = []byte("id") + +var ffj_key_Resource_DiskInfo_Source_Metadata = []byte("metadata") + +var ffj_key_Resource_DiskInfo_Source_Profile = []byte("profile") + +func (uj *Resource_DiskInfo_Source) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Resource_DiskInfo_Source) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Resource_DiskInfo_Sourcebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Resource_DiskInfo_Sourceno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'i': + + if bytes.Equal(ffj_key_Resource_DiskInfo_Source_ID, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffj_key_Resource_DiskInfo_Source_Mount, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_Mount + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Resource_DiskInfo_Source_Metadata, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_Metadata + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_Resource_DiskInfo_Source_Path, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_Path + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Resource_DiskInfo_Source_Profile, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_Profile + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Resource_DiskInfo_Source_Type, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_DiskInfo_Source_Profile, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_Profile + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_DiskInfo_Source_Metadata, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_Metadata + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_DiskInfo_Source_ID, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_DiskInfo_Source_Mount, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_Mount + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_DiskInfo_Source_Path, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_DiskInfo_Source_Type, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Resource_DiskInfo_Sourceno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Resource_DiskInfo_Source_Type: + goto handle_Type + + case ffj_t_Resource_DiskInfo_Source_Path: + goto handle_Path + + case ffj_t_Resource_DiskInfo_Source_Mount: + goto handle_Mount + + case ffj_t_Resource_DiskInfo_Source_ID: + goto handle_ID + + case ffj_t_Resource_DiskInfo_Source_Metadata: + goto handle_Metadata + + case ffj_t_Resource_DiskInfo_Source_Profile: + goto handle_Profile + + case ffj_t_Resource_DiskInfo_Sourceno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.Resource_DiskInfo_Source_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Path: + + /* handler: uj.Path type=mesos.Resource_DiskInfo_Source_Path kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Path = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Path == nil { + uj.Path = new(Resource_DiskInfo_Source_Path) + } + + err = uj.Path.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Mount: + + /* handler: uj.Mount type=mesos.Resource_DiskInfo_Source_Mount kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Mount = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Mount == nil { + uj.Mount = new(Resource_DiskInfo_Source_Mount) + } + + err = uj.Mount.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ID: + + /* handler: uj.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.ID = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.ID = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Metadata: + + /* handler: uj.Metadata type=mesos.Labels kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Metadata = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Metadata == nil { + uj.Metadata = new(Labels) + } + + err = uj.Metadata.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Profile: + + /* handler: uj.Profile type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Profile = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Profile = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Resource_DiskInfo_Source_Mount) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Resource_DiskInfo_Source_Mount) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Root != nil { + if true { + buf.WriteString(`"root":`) + fflib.WriteJsonString(buf, string(*mj.Root)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Resource_DiskInfo_Source_Mountbase = iota + ffj_t_Resource_DiskInfo_Source_Mountno_such_key + + ffj_t_Resource_DiskInfo_Source_Mount_Root +) + +var ffj_key_Resource_DiskInfo_Source_Mount_Root = []byte("root") + +func (uj *Resource_DiskInfo_Source_Mount) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Resource_DiskInfo_Source_Mount) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Resource_DiskInfo_Source_Mountbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Resource_DiskInfo_Source_Mountno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'r': + + if bytes.Equal(ffj_key_Resource_DiskInfo_Source_Mount_Root, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_Mount_Root + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_DiskInfo_Source_Mount_Root, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_Mount_Root + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Resource_DiskInfo_Source_Mountno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Resource_DiskInfo_Source_Mount_Root: + goto handle_Root + + case ffj_t_Resource_DiskInfo_Source_Mountno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Root: + + /* handler: uj.Root type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Root = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Root = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Resource_DiskInfo_Source_Path) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Resource_DiskInfo_Source_Path) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Root != nil { + if true { + buf.WriteString(`"root":`) + fflib.WriteJsonString(buf, string(*mj.Root)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Resource_DiskInfo_Source_Pathbase = iota + ffj_t_Resource_DiskInfo_Source_Pathno_such_key + + ffj_t_Resource_DiskInfo_Source_Path_Root +) + +var ffj_key_Resource_DiskInfo_Source_Path_Root = []byte("root") + +func (uj *Resource_DiskInfo_Source_Path) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Resource_DiskInfo_Source_Path) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Resource_DiskInfo_Source_Pathbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Resource_DiskInfo_Source_Pathno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'r': + + if bytes.Equal(ffj_key_Resource_DiskInfo_Source_Path_Root, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_Path_Root + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_DiskInfo_Source_Path_Root, kn) { + currentKey = ffj_t_Resource_DiskInfo_Source_Path_Root + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Resource_DiskInfo_Source_Pathno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Resource_DiskInfo_Source_Path_Root: + goto handle_Root + + case ffj_t_Resource_DiskInfo_Source_Pathno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Root: + + /* handler: uj.Root type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Root = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Root = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Resource_ReservationInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Resource_ReservationInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Type != nil { + if true { + buf.WriteString(`"type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.Role != nil { + if true { + buf.WriteString(`"role":`) + fflib.WriteJsonString(buf, string(*mj.Role)) + buf.WriteByte(',') + } + } + if mj.Principal != nil { + if true { + buf.WriteString(`"principal":`) + fflib.WriteJsonString(buf, string(*mj.Principal)) + buf.WriteByte(',') + } + } + if mj.Labels != nil { + if true { + buf.WriteString(`"labels":`) + + { + + err = mj.Labels.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Resource_ReservationInfobase = iota + ffj_t_Resource_ReservationInfono_such_key + + ffj_t_Resource_ReservationInfo_Type + + ffj_t_Resource_ReservationInfo_Role + + ffj_t_Resource_ReservationInfo_Principal + + ffj_t_Resource_ReservationInfo_Labels +) + +var ffj_key_Resource_ReservationInfo_Type = []byte("type") + +var ffj_key_Resource_ReservationInfo_Role = []byte("role") + +var ffj_key_Resource_ReservationInfo_Principal = []byte("principal") + +var ffj_key_Resource_ReservationInfo_Labels = []byte("labels") + +func (uj *Resource_ReservationInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Resource_ReservationInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Resource_ReservationInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Resource_ReservationInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'l': + + if bytes.Equal(ffj_key_Resource_ReservationInfo_Labels, kn) { + currentKey = ffj_t_Resource_ReservationInfo_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_Resource_ReservationInfo_Principal, kn) { + currentKey = ffj_t_Resource_ReservationInfo_Principal + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_Resource_ReservationInfo_Role, kn) { + currentKey = ffj_t_Resource_ReservationInfo_Role + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Resource_ReservationInfo_Type, kn) { + currentKey = ffj_t_Resource_ReservationInfo_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Resource_ReservationInfo_Labels, kn) { + currentKey = ffj_t_Resource_ReservationInfo_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_ReservationInfo_Principal, kn) { + currentKey = ffj_t_Resource_ReservationInfo_Principal + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_ReservationInfo_Role, kn) { + currentKey = ffj_t_Resource_ReservationInfo_Role + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Resource_ReservationInfo_Type, kn) { + currentKey = ffj_t_Resource_ReservationInfo_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Resource_ReservationInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Resource_ReservationInfo_Type: + goto handle_Type + + case ffj_t_Resource_ReservationInfo_Role: + goto handle_Role + + case ffj_t_Resource_ReservationInfo_Principal: + goto handle_Principal + + case ffj_t_Resource_ReservationInfo_Labels: + goto handle_Labels + + case ffj_t_Resource_ReservationInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.Resource_ReservationInfo_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Type = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Type == nil { + uj.Type = new(Resource_ReservationInfo_Type) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Role: + + /* handler: uj.Role type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Role = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Role = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Principal: + + /* handler: uj.Principal type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Principal = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Principal = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Labels: + + /* handler: uj.Labels type=mesos.Labels kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Labels = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Labels == nil { + uj.Labels = new(Labels) + } + + err = uj.Labels.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Resource_RevocableInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Resource_RevocableInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffj_t_Resource_RevocableInfobase = iota + ffj_t_Resource_RevocableInfono_such_key +) + +func (uj *Resource_RevocableInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Resource_RevocableInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Resource_RevocableInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Resource_RevocableInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffj_t_Resource_RevocableInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Resource_RevocableInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Resource_SharedInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Resource_SharedInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{}`) + return nil +} + +const ( + ffj_t_Resource_SharedInfobase = iota + ffj_t_Resource_SharedInfono_such_key +) + +func (uj *Resource_SharedInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Resource_SharedInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Resource_SharedInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Resource_SharedInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + } + + currentKey = ffj_t_Resource_SharedInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Resource_SharedInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Role) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Role) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteString(`,"weight":`) + fflib.AppendFloat(buf, float64(mj.Weight), 'g', -1, 64) + buf.WriteString(`,"frameworks":`) + if mj.Frameworks != nil { + buf.WriteString(`[`) + for i, v := range mj.Frameworks { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"resources":`) + if mj.Resources != nil { + buf.WriteString(`[`) + for i, v := range mj.Resources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Rolebase = iota + ffj_t_Roleno_such_key + + ffj_t_Role_Name + + ffj_t_Role_Weight + + ffj_t_Role_Frameworks + + ffj_t_Role_Resources +) + +var ffj_key_Role_Name = []byte("name") + +var ffj_key_Role_Weight = []byte("weight") + +var ffj_key_Role_Frameworks = []byte("frameworks") + +var ffj_key_Role_Resources = []byte("resources") + +func (uj *Role) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Role) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Rolebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Roleno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'f': + + if bytes.Equal(ffj_key_Role_Frameworks, kn) { + currentKey = ffj_t_Role_Frameworks + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_Role_Name, kn) { + currentKey = ffj_t_Role_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_Role_Resources, kn) { + currentKey = ffj_t_Role_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'w': + + if bytes.Equal(ffj_key_Role_Weight, kn) { + currentKey = ffj_t_Role_Weight + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Role_Resources, kn) { + currentKey = ffj_t_Role_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Role_Frameworks, kn) { + currentKey = ffj_t_Role_Frameworks + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Role_Weight, kn) { + currentKey = ffj_t_Role_Weight + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Role_Name, kn) { + currentKey = ffj_t_Role_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Roleno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Role_Name: + goto handle_Name + + case ffj_t_Role_Weight: + goto handle_Weight + + case ffj_t_Role_Frameworks: + goto handle_Frameworks + + case ffj_t_Role_Resources: + goto handle_Resources + + case ffj_t_Roleno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Weight: + + /* handler: uj.Weight type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Weight = float64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Frameworks: + + /* handler: uj.Frameworks type=[]mesos.FrameworkID kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Frameworks = nil + } else { + + uj.Frameworks = []FrameworkID{} + + wantVal := true + + for { + + var tmp_uj__Frameworks FrameworkID + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Frameworks type=mesos.FrameworkID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Frameworks.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Frameworks = append(uj.Frameworks, tmp_uj__Frameworks) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Resources: + + /* handler: uj.Resources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Resources = nil + } else { + + uj.Resources = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Resources Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Resources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Resources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Resources = append(uj.Resources, tmp_uj__Resources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *SNMPStatistics) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *SNMPStatistics) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.IPStats != nil { + if true { + buf.WriteString(`"ip_stats":`) + + { + + err = mj.IPStats.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.ICMPStats != nil { + if true { + buf.WriteString(`"icmp_stats":`) + + { + + err = mj.ICMPStats.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.TCPStats != nil { + if true { + buf.WriteString(`"tcp_stats":`) + + { + + err = mj.TCPStats.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.UDPStats != nil { + if true { + buf.WriteString(`"udp_stats":`) + + { + + err = mj.UDPStats.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_SNMPStatisticsbase = iota + ffj_t_SNMPStatisticsno_such_key + + ffj_t_SNMPStatistics_IPStats + + ffj_t_SNMPStatistics_ICMPStats + + ffj_t_SNMPStatistics_TCPStats + + ffj_t_SNMPStatistics_UDPStats +) + +var ffj_key_SNMPStatistics_IPStats = []byte("ip_stats") + +var ffj_key_SNMPStatistics_ICMPStats = []byte("icmp_stats") + +var ffj_key_SNMPStatistics_TCPStats = []byte("tcp_stats") + +var ffj_key_SNMPStatistics_UDPStats = []byte("udp_stats") + +func (uj *SNMPStatistics) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *SNMPStatistics) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_SNMPStatisticsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_SNMPStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'i': + + if bytes.Equal(ffj_key_SNMPStatistics_IPStats, kn) { + currentKey = ffj_t_SNMPStatistics_IPStats + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_SNMPStatistics_ICMPStats, kn) { + currentKey = ffj_t_SNMPStatistics_ICMPStats + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_SNMPStatistics_TCPStats, kn) { + currentKey = ffj_t_SNMPStatistics_TCPStats + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_SNMPStatistics_UDPStats, kn) { + currentKey = ffj_t_SNMPStatistics_UDPStats + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_SNMPStatistics_UDPStats, kn) { + currentKey = ffj_t_SNMPStatistics_UDPStats + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_SNMPStatistics_TCPStats, kn) { + currentKey = ffj_t_SNMPStatistics_TCPStats + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_SNMPStatistics_ICMPStats, kn) { + currentKey = ffj_t_SNMPStatistics_ICMPStats + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_SNMPStatistics_IPStats, kn) { + currentKey = ffj_t_SNMPStatistics_IPStats + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_SNMPStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_SNMPStatistics_IPStats: + goto handle_IPStats + + case ffj_t_SNMPStatistics_ICMPStats: + goto handle_ICMPStats + + case ffj_t_SNMPStatistics_TCPStats: + goto handle_TCPStats + + case ffj_t_SNMPStatistics_UDPStats: + goto handle_UDPStats + + case ffj_t_SNMPStatisticsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_IPStats: + + /* handler: uj.IPStats type=mesos.IpStatistics kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.IPStats = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.IPStats == nil { + uj.IPStats = new(IpStatistics) + } + + err = uj.IPStats.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ICMPStats: + + /* handler: uj.ICMPStats type=mesos.IcmpStatistics kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ICMPStats = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ICMPStats == nil { + uj.ICMPStats = new(IcmpStatistics) + } + + err = uj.ICMPStats.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TCPStats: + + /* handler: uj.TCPStats type=mesos.TcpStatistics kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.TCPStats = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.TCPStats == nil { + uj.TCPStats = new(TcpStatistics) + } + + err = uj.TCPStats.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UDPStats: + + /* handler: uj.UDPStats type=mesos.UdpStatistics kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.UDPStats = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.UDPStats == nil { + uj.UDPStats = new(UdpStatistics) + } + + err = uj.UDPStats.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Secret) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Secret) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.Reference != nil { + if true { + buf.WriteString(`"reference":`) + + { + + err = mj.Reference.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Value != nil { + if true { + buf.WriteString(`"value":`) + + { + + err = mj.Value.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Secretbase = iota + ffj_t_Secretno_such_key + + ffj_t_Secret_Type + + ffj_t_Secret_Reference + + ffj_t_Secret_Value +) + +var ffj_key_Secret_Type = []byte("type") + +var ffj_key_Secret_Reference = []byte("reference") + +var ffj_key_Secret_Value = []byte("value") + +func (uj *Secret) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Secret) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Secretbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Secretno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'r': + + if bytes.Equal(ffj_key_Secret_Reference, kn) { + currentKey = ffj_t_Secret_Reference + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Secret_Type, kn) { + currentKey = ffj_t_Secret_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_Secret_Value, kn) { + currentKey = ffj_t_Secret_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Secret_Value, kn) { + currentKey = ffj_t_Secret_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Secret_Reference, kn) { + currentKey = ffj_t_Secret_Reference + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Secret_Type, kn) { + currentKey = ffj_t_Secret_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Secretno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Secret_Type: + goto handle_Type + + case ffj_t_Secret_Reference: + goto handle_Reference + + case ffj_t_Secret_Value: + goto handle_Value + + case ffj_t_Secretno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.Secret_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Reference: + + /* handler: uj.Reference type=mesos.Secret_Reference kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Reference = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Reference == nil { + uj.Reference = new(Secret_Reference) + } + + err = uj.Reference.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Value: + + /* handler: uj.Value type=mesos.Secret_Value kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Value = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Value == nil { + uj.Value = new(Secret_Value) + } + + err = uj.Value.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Secret_Reference) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Secret_Reference) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteByte(',') + if mj.Key != nil { + if true { + buf.WriteString(`"key":`) + fflib.WriteJsonString(buf, string(*mj.Key)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Secret_Referencebase = iota + ffj_t_Secret_Referenceno_such_key + + ffj_t_Secret_Reference_Name + + ffj_t_Secret_Reference_Key +) + +var ffj_key_Secret_Reference_Name = []byte("name") + +var ffj_key_Secret_Reference_Key = []byte("key") + +func (uj *Secret_Reference) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Secret_Reference) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Secret_Referencebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Secret_Referenceno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'k': + + if bytes.Equal(ffj_key_Secret_Reference_Key, kn) { + currentKey = ffj_t_Secret_Reference_Key + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_Secret_Reference_Name, kn) { + currentKey = ffj_t_Secret_Reference_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Secret_Reference_Key, kn) { + currentKey = ffj_t_Secret_Reference_Key + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Secret_Reference_Name, kn) { + currentKey = ffj_t_Secret_Reference_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Secret_Referenceno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Secret_Reference_Name: + goto handle_Name + + case ffj_t_Secret_Reference_Key: + goto handle_Key + + case ffj_t_Secret_Referenceno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Key: + + /* handler: uj.Key type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Key = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Key = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Secret_Value) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Secret_Value) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if len(mj.Data) != 0 { + buf.WriteString(`"data":`) + if mj.Data != nil { + buf.WriteString(`"`) + { + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(reflect.Indirect(reflect.ValueOf(mj.Data)).Bytes()) + enc.Close() + } + buf.WriteString(`"`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Secret_Valuebase = iota + ffj_t_Secret_Valueno_such_key + + ffj_t_Secret_Value_Data +) + +var ffj_key_Secret_Value_Data = []byte("data") + +func (uj *Secret_Value) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Secret_Value) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Secret_Valuebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Secret_Valueno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'd': + + if bytes.Equal(ffj_key_Secret_Value_Data, kn) { + currentKey = ffj_t_Secret_Value_Data + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Secret_Value_Data, kn) { + currentKey = ffj_t_Secret_Value_Data + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Secret_Valueno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Secret_Value_Data: + goto handle_Data + + case ffj_t_Secret_Valueno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Data: + + /* handler: uj.Data type=[]uint8 kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Data = nil + } else { + b := make([]byte, base64.StdEncoding.DecodedLen(fs.Output.Len())) + n, err := base64.StdEncoding.Decode(b, fs.Output.Bytes()) + if err != nil { + return fs.WrapErr(err) + } + + v := reflect.ValueOf(&uj.Data).Elem() + v.SetBytes(b[0:n]) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *TTYInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *TTYInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.WindowSize != nil { + if true { + buf.WriteString(`"window_size":`) + + { + + err = mj.WindowSize.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_TTYInfobase = iota + ffj_t_TTYInfono_such_key + + ffj_t_TTYInfo_WindowSize +) + +var ffj_key_TTYInfo_WindowSize = []byte("window_size") + +func (uj *TTYInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *TTYInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_TTYInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_TTYInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'w': + + if bytes.Equal(ffj_key_TTYInfo_WindowSize, kn) { + currentKey = ffj_t_TTYInfo_WindowSize + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_TTYInfo_WindowSize, kn) { + currentKey = ffj_t_TTYInfo_WindowSize + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_TTYInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_TTYInfo_WindowSize: + goto handle_WindowSize + + case ffj_t_TTYInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_WindowSize: + + /* handler: uj.WindowSize type=mesos.TTYInfo_WindowSize kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.WindowSize = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.WindowSize == nil { + uj.WindowSize = new(TTYInfo_WindowSize) + } + + err = uj.WindowSize.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *TTYInfo_WindowSize) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *TTYInfo_WindowSize) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"rows":`) + fflib.FormatBits2(buf, uint64(mj.Rows), 10, false) + buf.WriteString(`,"columns":`) + fflib.FormatBits2(buf, uint64(mj.Columns), 10, false) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_TTYInfo_WindowSizebase = iota + ffj_t_TTYInfo_WindowSizeno_such_key + + ffj_t_TTYInfo_WindowSize_Rows + + ffj_t_TTYInfo_WindowSize_Columns +) + +var ffj_key_TTYInfo_WindowSize_Rows = []byte("rows") + +var ffj_key_TTYInfo_WindowSize_Columns = []byte("columns") + +func (uj *TTYInfo_WindowSize) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *TTYInfo_WindowSize) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_TTYInfo_WindowSizebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_TTYInfo_WindowSizeno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_TTYInfo_WindowSize_Columns, kn) { + currentKey = ffj_t_TTYInfo_WindowSize_Columns + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_TTYInfo_WindowSize_Rows, kn) { + currentKey = ffj_t_TTYInfo_WindowSize_Rows + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_TTYInfo_WindowSize_Columns, kn) { + currentKey = ffj_t_TTYInfo_WindowSize_Columns + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TTYInfo_WindowSize_Rows, kn) { + currentKey = ffj_t_TTYInfo_WindowSize_Rows + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_TTYInfo_WindowSizeno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_TTYInfo_WindowSize_Rows: + goto handle_Rows + + case ffj_t_TTYInfo_WindowSize_Columns: + goto handle_Columns + + case ffj_t_TTYInfo_WindowSizeno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Rows: + + /* handler: uj.Rows type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Rows = uint32(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Columns: + + /* handler: uj.Columns type=uint32 kind=uint32 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint32", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Columns = uint32(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Task) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Task) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteString(`,"task_id":`) + + { + + err = mj.TaskID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"framework_id":`) + + { + + err = mj.FrameworkID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + if mj.ExecutorID != nil { + if true { + buf.WriteString(`"executor_id":`) + + { + + err = mj.ExecutorID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"agent_id":`) + + { + + err = mj.AgentID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + if mj.State != nil { + if true { + buf.WriteString(`"state":`) + + { + + obj, err = mj.State.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"resources":`) + if mj.Resources != nil { + buf.WriteString(`[`) + for i, v := range mj.Resources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"statuses":`) + if mj.Statuses != nil { + buf.WriteString(`[`) + for i, v := range mj.Statuses { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.StatusUpdateState != nil { + if true { + buf.WriteString(`"status_update_state":`) + + { + + obj, err = mj.StatusUpdateState.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if len(mj.StatusUpdateUUID) != 0 { + buf.WriteString(`"status_update_uuid":`) + if mj.StatusUpdateUUID != nil { + buf.WriteString(`"`) + { + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(reflect.Indirect(reflect.ValueOf(mj.StatusUpdateUUID)).Bytes()) + enc.Close() + } + buf.WriteString(`"`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if mj.Labels != nil { + if true { + buf.WriteString(`"labels":`) + + { + + err = mj.Labels.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Discovery != nil { + if true { + buf.WriteString(`"discovery":`) + + { + + err = mj.Discovery.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Container != nil { + if true { + buf.WriteString(`"container":`) + + { + + err = mj.Container.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.User != nil { + if true { + buf.WriteString(`"user":`) + fflib.WriteJsonString(buf, string(*mj.User)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Taskbase = iota + ffj_t_Taskno_such_key + + ffj_t_Task_Name + + ffj_t_Task_TaskID + + ffj_t_Task_FrameworkID + + ffj_t_Task_ExecutorID + + ffj_t_Task_AgentID + + ffj_t_Task_State + + ffj_t_Task_Resources + + ffj_t_Task_Statuses + + ffj_t_Task_StatusUpdateState + + ffj_t_Task_StatusUpdateUUID + + ffj_t_Task_Labels + + ffj_t_Task_Discovery + + ffj_t_Task_Container + + ffj_t_Task_User +) + +var ffj_key_Task_Name = []byte("name") + +var ffj_key_Task_TaskID = []byte("task_id") + +var ffj_key_Task_FrameworkID = []byte("framework_id") + +var ffj_key_Task_ExecutorID = []byte("executor_id") + +var ffj_key_Task_AgentID = []byte("agent_id") + +var ffj_key_Task_State = []byte("state") + +var ffj_key_Task_Resources = []byte("resources") + +var ffj_key_Task_Statuses = []byte("statuses") + +var ffj_key_Task_StatusUpdateState = []byte("status_update_state") + +var ffj_key_Task_StatusUpdateUUID = []byte("status_update_uuid") + +var ffj_key_Task_Labels = []byte("labels") + +var ffj_key_Task_Discovery = []byte("discovery") + +var ffj_key_Task_Container = []byte("container") + +var ffj_key_Task_User = []byte("user") + +func (uj *Task) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Task) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Taskbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Taskno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_Task_AgentID, kn) { + currentKey = ffj_t_Task_AgentID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'c': + + if bytes.Equal(ffj_key_Task_Container, kn) { + currentKey = ffj_t_Task_Container + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_Task_Discovery, kn) { + currentKey = ffj_t_Task_Discovery + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_Task_ExecutorID, kn) { + currentKey = ffj_t_Task_ExecutorID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffj_key_Task_FrameworkID, kn) { + currentKey = ffj_t_Task_FrameworkID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_Task_Labels, kn) { + currentKey = ffj_t_Task_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_Task_Name, kn) { + currentKey = ffj_t_Task_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_Task_Resources, kn) { + currentKey = ffj_t_Task_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Task_State, kn) { + currentKey = ffj_t_Task_State + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Task_Statuses, kn) { + currentKey = ffj_t_Task_Statuses + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Task_StatusUpdateState, kn) { + currentKey = ffj_t_Task_StatusUpdateState + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Task_StatusUpdateUUID, kn) { + currentKey = ffj_t_Task_StatusUpdateUUID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Task_TaskID, kn) { + currentKey = ffj_t_Task_TaskID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_Task_User, kn) { + currentKey = ffj_t_Task_User + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Task_User, kn) { + currentKey = ffj_t_Task_User + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Task_Container, kn) { + currentKey = ffj_t_Task_Container + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Task_Discovery, kn) { + currentKey = ffj_t_Task_Discovery + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Task_Labels, kn) { + currentKey = ffj_t_Task_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Task_StatusUpdateUUID, kn) { + currentKey = ffj_t_Task_StatusUpdateUUID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Task_StatusUpdateState, kn) { + currentKey = ffj_t_Task_StatusUpdateState + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Task_Statuses, kn) { + currentKey = ffj_t_Task_Statuses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Task_Resources, kn) { + currentKey = ffj_t_Task_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Task_State, kn) { + currentKey = ffj_t_Task_State + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Task_AgentID, kn) { + currentKey = ffj_t_Task_AgentID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Task_ExecutorID, kn) { + currentKey = ffj_t_Task_ExecutorID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Task_FrameworkID, kn) { + currentKey = ffj_t_Task_FrameworkID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Task_TaskID, kn) { + currentKey = ffj_t_Task_TaskID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Task_Name, kn) { + currentKey = ffj_t_Task_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Taskno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Task_Name: + goto handle_Name + + case ffj_t_Task_TaskID: + goto handle_TaskID + + case ffj_t_Task_FrameworkID: + goto handle_FrameworkID + + case ffj_t_Task_ExecutorID: + goto handle_ExecutorID + + case ffj_t_Task_AgentID: + goto handle_AgentID + + case ffj_t_Task_State: + goto handle_State + + case ffj_t_Task_Resources: + goto handle_Resources + + case ffj_t_Task_Statuses: + goto handle_Statuses + + case ffj_t_Task_StatusUpdateState: + goto handle_StatusUpdateState + + case ffj_t_Task_StatusUpdateUUID: + goto handle_StatusUpdateUUID + + case ffj_t_Task_Labels: + goto handle_Labels + + case ffj_t_Task_Discovery: + goto handle_Discovery + + case ffj_t_Task_Container: + goto handle_Container + + case ffj_t_Task_User: + goto handle_User + + case ffj_t_Taskno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TaskID: + + /* handler: uj.TaskID type=mesos.TaskID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.TaskID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_FrameworkID: + + /* handler: uj.FrameworkID type=mesos.FrameworkID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.FrameworkID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ExecutorID: + + /* handler: uj.ExecutorID type=mesos.ExecutorID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ExecutorID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ExecutorID == nil { + uj.ExecutorID = new(ExecutorID) + } + + err = uj.ExecutorID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_AgentID: + + /* handler: uj.AgentID type=mesos.AgentID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.AgentID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_State: + + /* handler: uj.State type=mesos.TaskState kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.State = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.State == nil { + uj.State = new(TaskState) + } + + err = uj.State.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Resources: + + /* handler: uj.Resources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Resources = nil + } else { + + uj.Resources = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Resources Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Resources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Resources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Resources = append(uj.Resources, tmp_uj__Resources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Statuses: + + /* handler: uj.Statuses type=[]mesos.TaskStatus kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Statuses = nil + } else { + + uj.Statuses = []TaskStatus{} + + wantVal := true + + for { + + var tmp_uj__Statuses TaskStatus + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Statuses type=mesos.TaskStatus kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Statuses.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Statuses = append(uj.Statuses, tmp_uj__Statuses) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_StatusUpdateState: + + /* handler: uj.StatusUpdateState type=mesos.TaskState kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.StatusUpdateState = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.StatusUpdateState == nil { + uj.StatusUpdateState = new(TaskState) + } + + err = uj.StatusUpdateState.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_StatusUpdateUUID: + + /* handler: uj.StatusUpdateUUID type=[]uint8 kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.StatusUpdateUUID = nil + } else { + b := make([]byte, base64.StdEncoding.DecodedLen(fs.Output.Len())) + n, err := base64.StdEncoding.Decode(b, fs.Output.Bytes()) + if err != nil { + return fs.WrapErr(err) + } + + v := reflect.ValueOf(&uj.StatusUpdateUUID).Elem() + v.SetBytes(b[0:n]) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Labels: + + /* handler: uj.Labels type=mesos.Labels kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Labels = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Labels == nil { + uj.Labels = new(Labels) + } + + err = uj.Labels.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Discovery: + + /* handler: uj.Discovery type=mesos.DiscoveryInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Discovery = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Discovery == nil { + uj.Discovery = new(DiscoveryInfo) + } + + err = uj.Discovery.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Container: + + /* handler: uj.Container type=mesos.ContainerInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Container = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Container == nil { + uj.Container = new(ContainerInfo) + } + + err = uj.Container.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_User: + + /* handler: uj.User type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.User = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.User = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *TaskGroupInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *TaskGroupInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"tasks":`) + if mj.Tasks != nil { + buf.WriteString(`[`) + for i, v := range mj.Tasks { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_TaskGroupInfobase = iota + ffj_t_TaskGroupInfono_such_key + + ffj_t_TaskGroupInfo_Tasks +) + +var ffj_key_TaskGroupInfo_Tasks = []byte("tasks") + +func (uj *TaskGroupInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *TaskGroupInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_TaskGroupInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_TaskGroupInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 't': + + if bytes.Equal(ffj_key_TaskGroupInfo_Tasks, kn) { + currentKey = ffj_t_TaskGroupInfo_Tasks + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_TaskGroupInfo_Tasks, kn) { + currentKey = ffj_t_TaskGroupInfo_Tasks + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_TaskGroupInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_TaskGroupInfo_Tasks: + goto handle_Tasks + + case ffj_t_TaskGroupInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Tasks: + + /* handler: uj.Tasks type=[]mesos.TaskInfo kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Tasks = nil + } else { + + uj.Tasks = []TaskInfo{} + + wantVal := true + + for { + + var tmp_uj__Tasks TaskInfo + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Tasks type=mesos.TaskInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Tasks.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Tasks = append(uj.Tasks, tmp_uj__Tasks) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *TaskID) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *TaskID) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"value":`) + fflib.WriteJsonString(buf, string(mj.Value)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_TaskIDbase = iota + ffj_t_TaskIDno_such_key + + ffj_t_TaskID_Value +) + +var ffj_key_TaskID_Value = []byte("value") + +func (uj *TaskID) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *TaskID) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_TaskIDbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_TaskIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'v': + + if bytes.Equal(ffj_key_TaskID_Value, kn) { + currentKey = ffj_t_TaskID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_TaskID_Value, kn) { + currentKey = ffj_t_TaskID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_TaskIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_TaskID_Value: + goto handle_Value + + case ffj_t_TaskIDno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Value: + + /* handler: uj.Value type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Value = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *TaskInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *TaskInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteString(`,"task_id":`) + + { + + err = mj.TaskID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"agent_id":`) + + { + + err = mj.AgentID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"resources":`) + if mj.Resources != nil { + buf.WriteString(`[`) + for i, v := range mj.Resources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.Executor != nil { + if true { + buf.WriteString(`"executor":`) + + { + + err = mj.Executor.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Command != nil { + if true { + buf.WriteString(`"command":`) + + { + + err = mj.Command.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Container != nil { + if true { + buf.WriteString(`"container":`) + + { + + err = mj.Container.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.HealthCheck != nil { + if true { + buf.WriteString(`"health_check":`) + + { + + err = mj.HealthCheck.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Check != nil { + if true { + buf.WriteString(`"check":`) + + { + + err = mj.Check.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.KillPolicy != nil { + if true { + buf.WriteString(`"kill_policy":`) + + { + + err = mj.KillPolicy.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if len(mj.Data) != 0 { + buf.WriteString(`"data":`) + if mj.Data != nil { + buf.WriteString(`"`) + { + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(reflect.Indirect(reflect.ValueOf(mj.Data)).Bytes()) + enc.Close() + } + buf.WriteString(`"`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if mj.Labels != nil { + if true { + buf.WriteString(`"labels":`) + + { + + err = mj.Labels.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Discovery != nil { + if true { + buf.WriteString(`"discovery":`) + + { + + err = mj.Discovery.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.MaxCompletionTime != nil { + if true { + buf.WriteString(`"max_completion_time":`) + + { + + err = mj.MaxCompletionTime.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_TaskInfobase = iota + ffj_t_TaskInfono_such_key + + ffj_t_TaskInfo_Name + + ffj_t_TaskInfo_TaskID + + ffj_t_TaskInfo_AgentID + + ffj_t_TaskInfo_Resources + + ffj_t_TaskInfo_Executor + + ffj_t_TaskInfo_Command + + ffj_t_TaskInfo_Container + + ffj_t_TaskInfo_HealthCheck + + ffj_t_TaskInfo_Check + + ffj_t_TaskInfo_KillPolicy + + ffj_t_TaskInfo_Data + + ffj_t_TaskInfo_Labels + + ffj_t_TaskInfo_Discovery + + ffj_t_TaskInfo_MaxCompletionTime +) + +var ffj_key_TaskInfo_Name = []byte("name") + +var ffj_key_TaskInfo_TaskID = []byte("task_id") + +var ffj_key_TaskInfo_AgentID = []byte("agent_id") + +var ffj_key_TaskInfo_Resources = []byte("resources") + +var ffj_key_TaskInfo_Executor = []byte("executor") + +var ffj_key_TaskInfo_Command = []byte("command") + +var ffj_key_TaskInfo_Container = []byte("container") + +var ffj_key_TaskInfo_HealthCheck = []byte("health_check") + +var ffj_key_TaskInfo_Check = []byte("check") + +var ffj_key_TaskInfo_KillPolicy = []byte("kill_policy") + +var ffj_key_TaskInfo_Data = []byte("data") + +var ffj_key_TaskInfo_Labels = []byte("labels") + +var ffj_key_TaskInfo_Discovery = []byte("discovery") + +var ffj_key_TaskInfo_MaxCompletionTime = []byte("max_completion_time") + +func (uj *TaskInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *TaskInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_TaskInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_TaskInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_TaskInfo_AgentID, kn) { + currentKey = ffj_t_TaskInfo_AgentID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'c': + + if bytes.Equal(ffj_key_TaskInfo_Command, kn) { + currentKey = ffj_t_TaskInfo_Command + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TaskInfo_Container, kn) { + currentKey = ffj_t_TaskInfo_Container + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TaskInfo_Check, kn) { + currentKey = ffj_t_TaskInfo_Check + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_TaskInfo_Data, kn) { + currentKey = ffj_t_TaskInfo_Data + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TaskInfo_Discovery, kn) { + currentKey = ffj_t_TaskInfo_Discovery + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_TaskInfo_Executor, kn) { + currentKey = ffj_t_TaskInfo_Executor + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'h': + + if bytes.Equal(ffj_key_TaskInfo_HealthCheck, kn) { + currentKey = ffj_t_TaskInfo_HealthCheck + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'k': + + if bytes.Equal(ffj_key_TaskInfo_KillPolicy, kn) { + currentKey = ffj_t_TaskInfo_KillPolicy + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_TaskInfo_Labels, kn) { + currentKey = ffj_t_TaskInfo_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffj_key_TaskInfo_MaxCompletionTime, kn) { + currentKey = ffj_t_TaskInfo_MaxCompletionTime + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_TaskInfo_Name, kn) { + currentKey = ffj_t_TaskInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_TaskInfo_Resources, kn) { + currentKey = ffj_t_TaskInfo_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_TaskInfo_TaskID, kn) { + currentKey = ffj_t_TaskInfo_TaskID + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_TaskInfo_MaxCompletionTime, kn) { + currentKey = ffj_t_TaskInfo_MaxCompletionTime + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskInfo_Discovery, kn) { + currentKey = ffj_t_TaskInfo_Discovery + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskInfo_Labels, kn) { + currentKey = ffj_t_TaskInfo_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_TaskInfo_Data, kn) { + currentKey = ffj_t_TaskInfo_Data + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskInfo_KillPolicy, kn) { + currentKey = ffj_t_TaskInfo_KillPolicy + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskInfo_Check, kn) { + currentKey = ffj_t_TaskInfo_Check + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskInfo_HealthCheck, kn) { + currentKey = ffj_t_TaskInfo_HealthCheck + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_TaskInfo_Container, kn) { + currentKey = ffj_t_TaskInfo_Container + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_TaskInfo_Command, kn) { + currentKey = ffj_t_TaskInfo_Command + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_TaskInfo_Executor, kn) { + currentKey = ffj_t_TaskInfo_Executor + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskInfo_Resources, kn) { + currentKey = ffj_t_TaskInfo_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_TaskInfo_AgentID, kn) { + currentKey = ffj_t_TaskInfo_AgentID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskInfo_TaskID, kn) { + currentKey = ffj_t_TaskInfo_TaskID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_TaskInfo_Name, kn) { + currentKey = ffj_t_TaskInfo_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_TaskInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_TaskInfo_Name: + goto handle_Name + + case ffj_t_TaskInfo_TaskID: + goto handle_TaskID + + case ffj_t_TaskInfo_AgentID: + goto handle_AgentID + + case ffj_t_TaskInfo_Resources: + goto handle_Resources + + case ffj_t_TaskInfo_Executor: + goto handle_Executor + + case ffj_t_TaskInfo_Command: + goto handle_Command + + case ffj_t_TaskInfo_Container: + goto handle_Container + + case ffj_t_TaskInfo_HealthCheck: + goto handle_HealthCheck + + case ffj_t_TaskInfo_Check: + goto handle_Check + + case ffj_t_TaskInfo_KillPolicy: + goto handle_KillPolicy + + case ffj_t_TaskInfo_Data: + goto handle_Data + + case ffj_t_TaskInfo_Labels: + goto handle_Labels + + case ffj_t_TaskInfo_Discovery: + goto handle_Discovery + + case ffj_t_TaskInfo_MaxCompletionTime: + goto handle_MaxCompletionTime + + case ffj_t_TaskInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TaskID: + + /* handler: uj.TaskID type=mesos.TaskID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.TaskID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_AgentID: + + /* handler: uj.AgentID type=mesos.AgentID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.AgentID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Resources: + + /* handler: uj.Resources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Resources = nil + } else { + + uj.Resources = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Resources Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Resources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Resources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Resources = append(uj.Resources, tmp_uj__Resources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Executor: + + /* handler: uj.Executor type=mesos.ExecutorInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Executor = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Executor == nil { + uj.Executor = new(ExecutorInfo) + } + + err = uj.Executor.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Command: + + /* handler: uj.Command type=mesos.CommandInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Command = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Command == nil { + uj.Command = new(CommandInfo) + } + + err = uj.Command.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Container: + + /* handler: uj.Container type=mesos.ContainerInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Container = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Container == nil { + uj.Container = new(ContainerInfo) + } + + err = uj.Container.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_HealthCheck: + + /* handler: uj.HealthCheck type=mesos.HealthCheck kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.HealthCheck = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.HealthCheck == nil { + uj.HealthCheck = new(HealthCheck) + } + + err = uj.HealthCheck.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Check: + + /* handler: uj.Check type=mesos.CheckInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Check = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Check == nil { + uj.Check = new(CheckInfo) + } + + err = uj.Check.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_KillPolicy: + + /* handler: uj.KillPolicy type=mesos.KillPolicy kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.KillPolicy = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.KillPolicy == nil { + uj.KillPolicy = new(KillPolicy) + } + + err = uj.KillPolicy.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Data: + + /* handler: uj.Data type=[]uint8 kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Data = nil + } else { + b := make([]byte, base64.StdEncoding.DecodedLen(fs.Output.Len())) + n, err := base64.StdEncoding.Decode(b, fs.Output.Bytes()) + if err != nil { + return fs.WrapErr(err) + } + + v := reflect.ValueOf(&uj.Data).Elem() + v.SetBytes(b[0:n]) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Labels: + + /* handler: uj.Labels type=mesos.Labels kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Labels = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Labels == nil { + uj.Labels = new(Labels) + } + + err = uj.Labels.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Discovery: + + /* handler: uj.Discovery type=mesos.DiscoveryInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Discovery = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Discovery == nil { + uj.Discovery = new(DiscoveryInfo) + } + + err = uj.Discovery.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MaxCompletionTime: + + /* handler: uj.MaxCompletionTime type=mesos.DurationInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.MaxCompletionTime = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.MaxCompletionTime == nil { + uj.MaxCompletionTime = new(DurationInfo) + } + + err = uj.MaxCompletionTime.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *TaskResourceLimitation) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *TaskResourceLimitation) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"resources":`) + if mj.Resources != nil { + buf.WriteString(`[`) + for i, v := range mj.Resources { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_TaskResourceLimitationbase = iota + ffj_t_TaskResourceLimitationno_such_key + + ffj_t_TaskResourceLimitation_Resources +) + +var ffj_key_TaskResourceLimitation_Resources = []byte("resources") + +func (uj *TaskResourceLimitation) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *TaskResourceLimitation) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_TaskResourceLimitationbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_TaskResourceLimitationno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'r': + + if bytes.Equal(ffj_key_TaskResourceLimitation_Resources, kn) { + currentKey = ffj_t_TaskResourceLimitation_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_TaskResourceLimitation_Resources, kn) { + currentKey = ffj_t_TaskResourceLimitation_Resources + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_TaskResourceLimitationno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_TaskResourceLimitation_Resources: + goto handle_Resources + + case ffj_t_TaskResourceLimitationno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Resources: + + /* handler: uj.Resources type=[]mesos.Resource kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Resources = nil + } else { + + uj.Resources = []Resource{} + + wantVal := true + + for { + + var tmp_uj__Resources Resource + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Resources type=mesos.Resource kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Resources.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Resources = append(uj.Resources, tmp_uj__Resources) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *TaskStatus) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *TaskStatus) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "task_id":`) + + { + + err = mj.TaskID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + if mj.State != nil { + if true { + buf.WriteString(`"state":`) + + { + + obj, err = mj.State.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.Message != nil { + if true { + buf.WriteString(`"message":`) + fflib.WriteJsonString(buf, string(*mj.Message)) + buf.WriteByte(',') + } + } + if mj.Source != nil { + if true { + buf.WriteString(`"source":`) + + { + + obj, err = mj.Source.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if mj.Reason != nil { + if true { + buf.WriteString(`"reason":`) + + { + + obj, err = mj.Reason.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + if len(mj.Data) != 0 { + buf.WriteString(`"data":`) + if mj.Data != nil { + buf.WriteString(`"`) + { + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(reflect.Indirect(reflect.ValueOf(mj.Data)).Bytes()) + enc.Close() + } + buf.WriteString(`"`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if mj.AgentID != nil { + if true { + buf.WriteString(`"agent_id":`) + + { + + err = mj.AgentID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.ExecutorID != nil { + if true { + buf.WriteString(`"executor_id":`) + + { + + err = mj.ExecutorID.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Timestamp != nil { + if true { + buf.WriteString(`"timestamp":`) + fflib.AppendFloat(buf, float64(*mj.Timestamp), 'g', -1, 64) + buf.WriteByte(',') + } + } + if len(mj.UUID) != 0 { + buf.WriteString(`"uuid":`) + if mj.UUID != nil { + buf.WriteString(`"`) + { + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(reflect.Indirect(reflect.ValueOf(mj.UUID)).Bytes()) + enc.Close() + } + buf.WriteString(`"`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + if mj.Healthy != nil { + if true { + if *mj.Healthy { + buf.WriteString(`"healthy":true`) + } else { + buf.WriteString(`"healthy":false`) + } + buf.WriteByte(',') + } + } + if mj.CheckStatus != nil { + if true { + buf.WriteString(`"check_status":`) + + { + + err = mj.CheckStatus.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Labels != nil { + if true { + buf.WriteString(`"labels":`) + + { + + err = mj.Labels.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.ContainerStatus != nil { + if true { + buf.WriteString(`"container_status":`) + + { + + err = mj.ContainerStatus.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.UnreachableTime != nil { + if true { + buf.WriteString(`"unreachable_time":`) + + { + + err = mj.UnreachableTime.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Limitation != nil { + if true { + buf.WriteString(`"limitation":`) + + { + + err = mj.Limitation.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_TaskStatusbase = iota + ffj_t_TaskStatusno_such_key + + ffj_t_TaskStatus_TaskID + + ffj_t_TaskStatus_State + + ffj_t_TaskStatus_Message + + ffj_t_TaskStatus_Source + + ffj_t_TaskStatus_Reason + + ffj_t_TaskStatus_Data + + ffj_t_TaskStatus_AgentID + + ffj_t_TaskStatus_ExecutorID + + ffj_t_TaskStatus_Timestamp + + ffj_t_TaskStatus_UUID + + ffj_t_TaskStatus_Healthy + + ffj_t_TaskStatus_CheckStatus + + ffj_t_TaskStatus_Labels + + ffj_t_TaskStatus_ContainerStatus + + ffj_t_TaskStatus_UnreachableTime + + ffj_t_TaskStatus_Limitation +) + +var ffj_key_TaskStatus_TaskID = []byte("task_id") + +var ffj_key_TaskStatus_State = []byte("state") + +var ffj_key_TaskStatus_Message = []byte("message") + +var ffj_key_TaskStatus_Source = []byte("source") + +var ffj_key_TaskStatus_Reason = []byte("reason") + +var ffj_key_TaskStatus_Data = []byte("data") + +var ffj_key_TaskStatus_AgentID = []byte("agent_id") + +var ffj_key_TaskStatus_ExecutorID = []byte("executor_id") + +var ffj_key_TaskStatus_Timestamp = []byte("timestamp") + +var ffj_key_TaskStatus_UUID = []byte("uuid") + +var ffj_key_TaskStatus_Healthy = []byte("healthy") + +var ffj_key_TaskStatus_CheckStatus = []byte("check_status") + +var ffj_key_TaskStatus_Labels = []byte("labels") + +var ffj_key_TaskStatus_ContainerStatus = []byte("container_status") + +var ffj_key_TaskStatus_UnreachableTime = []byte("unreachable_time") + +var ffj_key_TaskStatus_Limitation = []byte("limitation") + +func (uj *TaskStatus) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *TaskStatus) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_TaskStatusbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_TaskStatusno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_TaskStatus_AgentID, kn) { + currentKey = ffj_t_TaskStatus_AgentID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'c': + + if bytes.Equal(ffj_key_TaskStatus_CheckStatus, kn) { + currentKey = ffj_t_TaskStatus_CheckStatus + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TaskStatus_ContainerStatus, kn) { + currentKey = ffj_t_TaskStatus_ContainerStatus + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_TaskStatus_Data, kn) { + currentKey = ffj_t_TaskStatus_Data + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_TaskStatus_ExecutorID, kn) { + currentKey = ffj_t_TaskStatus_ExecutorID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'h': + + if bytes.Equal(ffj_key_TaskStatus_Healthy, kn) { + currentKey = ffj_t_TaskStatus_Healthy + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_TaskStatus_Labels, kn) { + currentKey = ffj_t_TaskStatus_Labels + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TaskStatus_Limitation, kn) { + currentKey = ffj_t_TaskStatus_Limitation + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffj_key_TaskStatus_Message, kn) { + currentKey = ffj_t_TaskStatus_Message + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_TaskStatus_Reason, kn) { + currentKey = ffj_t_TaskStatus_Reason + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_TaskStatus_State, kn) { + currentKey = ffj_t_TaskStatus_State + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TaskStatus_Source, kn) { + currentKey = ffj_t_TaskStatus_Source + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_TaskStatus_TaskID, kn) { + currentKey = ffj_t_TaskStatus_TaskID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TaskStatus_Timestamp, kn) { + currentKey = ffj_t_TaskStatus_Timestamp + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_TaskStatus_UUID, kn) { + currentKey = ffj_t_TaskStatus_UUID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TaskStatus_UnreachableTime, kn) { + currentKey = ffj_t_TaskStatus_UnreachableTime + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_TaskStatus_Limitation, kn) { + currentKey = ffj_t_TaskStatus_Limitation + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_TaskStatus_UnreachableTime, kn) { + currentKey = ffj_t_TaskStatus_UnreachableTime + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskStatus_ContainerStatus, kn) { + currentKey = ffj_t_TaskStatus_ContainerStatus + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskStatus_Labels, kn) { + currentKey = ffj_t_TaskStatus_Labels + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskStatus_CheckStatus, kn) { + currentKey = ffj_t_TaskStatus_CheckStatus + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_TaskStatus_Healthy, kn) { + currentKey = ffj_t_TaskStatus_Healthy + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_TaskStatus_UUID, kn) { + currentKey = ffj_t_TaskStatus_UUID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskStatus_Timestamp, kn) { + currentKey = ffj_t_TaskStatus_Timestamp + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_TaskStatus_ExecutorID, kn) { + currentKey = ffj_t_TaskStatus_ExecutorID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_TaskStatus_AgentID, kn) { + currentKey = ffj_t_TaskStatus_AgentID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_TaskStatus_Data, kn) { + currentKey = ffj_t_TaskStatus_Data + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskStatus_Reason, kn) { + currentKey = ffj_t_TaskStatus_Reason + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskStatus_Source, kn) { + currentKey = ffj_t_TaskStatus_Source + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskStatus_Message, kn) { + currentKey = ffj_t_TaskStatus_Message + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskStatus_State, kn) { + currentKey = ffj_t_TaskStatus_State + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TaskStatus_TaskID, kn) { + currentKey = ffj_t_TaskStatus_TaskID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_TaskStatusno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_TaskStatus_TaskID: + goto handle_TaskID + + case ffj_t_TaskStatus_State: + goto handle_State + + case ffj_t_TaskStatus_Message: + goto handle_Message + + case ffj_t_TaskStatus_Source: + goto handle_Source + + case ffj_t_TaskStatus_Reason: + goto handle_Reason + + case ffj_t_TaskStatus_Data: + goto handle_Data + + case ffj_t_TaskStatus_AgentID: + goto handle_AgentID + + case ffj_t_TaskStatus_ExecutorID: + goto handle_ExecutorID + + case ffj_t_TaskStatus_Timestamp: + goto handle_Timestamp + + case ffj_t_TaskStatus_UUID: + goto handle_UUID + + case ffj_t_TaskStatus_Healthy: + goto handle_Healthy + + case ffj_t_TaskStatus_CheckStatus: + goto handle_CheckStatus + + case ffj_t_TaskStatus_Labels: + goto handle_Labels + + case ffj_t_TaskStatus_ContainerStatus: + goto handle_ContainerStatus + + case ffj_t_TaskStatus_UnreachableTime: + goto handle_UnreachableTime + + case ffj_t_TaskStatus_Limitation: + goto handle_Limitation + + case ffj_t_TaskStatusno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_TaskID: + + /* handler: uj.TaskID type=mesos.TaskID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.TaskID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_State: + + /* handler: uj.State type=mesos.TaskState kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.State = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.State == nil { + uj.State = new(TaskState) + } + + err = uj.State.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Message: + + /* handler: uj.Message type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Message = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Message = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Source: + + /* handler: uj.Source type=mesos.TaskStatus_Source kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Source = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Source == nil { + uj.Source = new(TaskStatus_Source) + } + + err = uj.Source.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Reason: + + /* handler: uj.Reason type=mesos.TaskStatus_Reason kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Reason = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Reason == nil { + uj.Reason = new(TaskStatus_Reason) + } + + err = uj.Reason.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Data: + + /* handler: uj.Data type=[]uint8 kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Data = nil + } else { + b := make([]byte, base64.StdEncoding.DecodedLen(fs.Output.Len())) + n, err := base64.StdEncoding.Decode(b, fs.Output.Bytes()) + if err != nil { + return fs.WrapErr(err) + } + + v := reflect.ValueOf(&uj.Data).Elem() + v.SetBytes(b[0:n]) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_AgentID: + + /* handler: uj.AgentID type=mesos.AgentID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.AgentID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.AgentID == nil { + uj.AgentID = new(AgentID) + } + + err = uj.AgentID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ExecutorID: + + /* handler: uj.ExecutorID type=mesos.ExecutorID kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ExecutorID = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ExecutorID == nil { + uj.ExecutorID = new(ExecutorID) + } + + err = uj.ExecutorID.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Timestamp: + + /* handler: uj.Timestamp type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Timestamp = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.Timestamp = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UUID: + + /* handler: uj.UUID type=[]uint8 kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.UUID = nil + } else { + b := make([]byte, base64.StdEncoding.DecodedLen(fs.Output.Len())) + n, err := base64.StdEncoding.Decode(b, fs.Output.Bytes()) + if err != nil { + return fs.WrapErr(err) + } + + v := reflect.ValueOf(&uj.UUID).Elem() + v.SetBytes(b[0:n]) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Healthy: + + /* handler: uj.Healthy type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + uj.Healthy = nil + + } else { + tmpb := fs.Output.Bytes() + + var tval bool + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + tval = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + tval = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + uj.Healthy = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CheckStatus: + + /* handler: uj.CheckStatus type=mesos.CheckStatusInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.CheckStatus = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.CheckStatus == nil { + uj.CheckStatus = new(CheckStatusInfo) + } + + err = uj.CheckStatus.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Labels: + + /* handler: uj.Labels type=mesos.Labels kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Labels = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Labels == nil { + uj.Labels = new(Labels) + } + + err = uj.Labels.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ContainerStatus: + + /* handler: uj.ContainerStatus type=mesos.ContainerStatus kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.ContainerStatus = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.ContainerStatus == nil { + uj.ContainerStatus = new(ContainerStatus) + } + + err = uj.ContainerStatus.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UnreachableTime: + + /* handler: uj.UnreachableTime type=mesos.TimeInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.UnreachableTime = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.UnreachableTime == nil { + uj.UnreachableTime = new(TimeInfo) + } + + err = uj.UnreachableTime.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Limitation: + + /* handler: uj.Limitation type=mesos.TaskResourceLimitation kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Limitation = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Limitation == nil { + uj.Limitation = new(TaskResourceLimitation) + } + + err = uj.Limitation.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *TcpStatistics) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *TcpStatistics) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.RtoAlgorithm != nil { + if true { + buf.WriteString(`"RtoAlgorithm":`) + fflib.FormatBits2(buf, uint64(*mj.RtoAlgorithm), 10, *mj.RtoAlgorithm < 0) + buf.WriteByte(',') + } + } + if mj.RtoMin != nil { + if true { + buf.WriteString(`"RtoMin":`) + fflib.FormatBits2(buf, uint64(*mj.RtoMin), 10, *mj.RtoMin < 0) + buf.WriteByte(',') + } + } + if mj.RtoMax != nil { + if true { + buf.WriteString(`"RtoMax":`) + fflib.FormatBits2(buf, uint64(*mj.RtoMax), 10, *mj.RtoMax < 0) + buf.WriteByte(',') + } + } + if mj.MaxConn != nil { + if true { + buf.WriteString(`"MaxConn":`) + fflib.FormatBits2(buf, uint64(*mj.MaxConn), 10, *mj.MaxConn < 0) + buf.WriteByte(',') + } + } + if mj.ActiveOpens != nil { + if true { + buf.WriteString(`"ActiveOpens":`) + fflib.FormatBits2(buf, uint64(*mj.ActiveOpens), 10, *mj.ActiveOpens < 0) + buf.WriteByte(',') + } + } + if mj.PassiveOpens != nil { + if true { + buf.WriteString(`"PassiveOpens":`) + fflib.FormatBits2(buf, uint64(*mj.PassiveOpens), 10, *mj.PassiveOpens < 0) + buf.WriteByte(',') + } + } + if mj.AttemptFails != nil { + if true { + buf.WriteString(`"AttemptFails":`) + fflib.FormatBits2(buf, uint64(*mj.AttemptFails), 10, *mj.AttemptFails < 0) + buf.WriteByte(',') + } + } + if mj.EstabResets != nil { + if true { + buf.WriteString(`"EstabResets":`) + fflib.FormatBits2(buf, uint64(*mj.EstabResets), 10, *mj.EstabResets < 0) + buf.WriteByte(',') + } + } + if mj.CurrEstab != nil { + if true { + buf.WriteString(`"CurrEstab":`) + fflib.FormatBits2(buf, uint64(*mj.CurrEstab), 10, *mj.CurrEstab < 0) + buf.WriteByte(',') + } + } + if mj.InSegs != nil { + if true { + buf.WriteString(`"InSegs":`) + fflib.FormatBits2(buf, uint64(*mj.InSegs), 10, *mj.InSegs < 0) + buf.WriteByte(',') + } + } + if mj.OutSegs != nil { + if true { + buf.WriteString(`"OutSegs":`) + fflib.FormatBits2(buf, uint64(*mj.OutSegs), 10, *mj.OutSegs < 0) + buf.WriteByte(',') + } + } + if mj.RetransSegs != nil { + if true { + buf.WriteString(`"RetransSegs":`) + fflib.FormatBits2(buf, uint64(*mj.RetransSegs), 10, *mj.RetransSegs < 0) + buf.WriteByte(',') + } + } + if mj.InErrs != nil { + if true { + buf.WriteString(`"InErrs":`) + fflib.FormatBits2(buf, uint64(*mj.InErrs), 10, *mj.InErrs < 0) + buf.WriteByte(',') + } + } + if mj.OutRsts != nil { + if true { + buf.WriteString(`"OutRsts":`) + fflib.FormatBits2(buf, uint64(*mj.OutRsts), 10, *mj.OutRsts < 0) + buf.WriteByte(',') + } + } + if mj.InCsumErrors != nil { + if true { + buf.WriteString(`"InCsumErrors":`) + fflib.FormatBits2(buf, uint64(*mj.InCsumErrors), 10, *mj.InCsumErrors < 0) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_TcpStatisticsbase = iota + ffj_t_TcpStatisticsno_such_key + + ffj_t_TcpStatistics_RtoAlgorithm + + ffj_t_TcpStatistics_RtoMin + + ffj_t_TcpStatistics_RtoMax + + ffj_t_TcpStatistics_MaxConn + + ffj_t_TcpStatistics_ActiveOpens + + ffj_t_TcpStatistics_PassiveOpens + + ffj_t_TcpStatistics_AttemptFails + + ffj_t_TcpStatistics_EstabResets + + ffj_t_TcpStatistics_CurrEstab + + ffj_t_TcpStatistics_InSegs + + ffj_t_TcpStatistics_OutSegs + + ffj_t_TcpStatistics_RetransSegs + + ffj_t_TcpStatistics_InErrs + + ffj_t_TcpStatistics_OutRsts + + ffj_t_TcpStatistics_InCsumErrors +) + +var ffj_key_TcpStatistics_RtoAlgorithm = []byte("RtoAlgorithm") + +var ffj_key_TcpStatistics_RtoMin = []byte("RtoMin") + +var ffj_key_TcpStatistics_RtoMax = []byte("RtoMax") + +var ffj_key_TcpStatistics_MaxConn = []byte("MaxConn") + +var ffj_key_TcpStatistics_ActiveOpens = []byte("ActiveOpens") + +var ffj_key_TcpStatistics_PassiveOpens = []byte("PassiveOpens") + +var ffj_key_TcpStatistics_AttemptFails = []byte("AttemptFails") + +var ffj_key_TcpStatistics_EstabResets = []byte("EstabResets") + +var ffj_key_TcpStatistics_CurrEstab = []byte("CurrEstab") + +var ffj_key_TcpStatistics_InSegs = []byte("InSegs") + +var ffj_key_TcpStatistics_OutSegs = []byte("OutSegs") + +var ffj_key_TcpStatistics_RetransSegs = []byte("RetransSegs") + +var ffj_key_TcpStatistics_InErrs = []byte("InErrs") + +var ffj_key_TcpStatistics_OutRsts = []byte("OutRsts") + +var ffj_key_TcpStatistics_InCsumErrors = []byte("InCsumErrors") + +func (uj *TcpStatistics) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *TcpStatistics) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_TcpStatisticsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_TcpStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'A': + + if bytes.Equal(ffj_key_TcpStatistics_ActiveOpens, kn) { + currentKey = ffj_t_TcpStatistics_ActiveOpens + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TcpStatistics_AttemptFails, kn) { + currentKey = ffj_t_TcpStatistics_AttemptFails + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'C': + + if bytes.Equal(ffj_key_TcpStatistics_CurrEstab, kn) { + currentKey = ffj_t_TcpStatistics_CurrEstab + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'E': + + if bytes.Equal(ffj_key_TcpStatistics_EstabResets, kn) { + currentKey = ffj_t_TcpStatistics_EstabResets + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'I': + + if bytes.Equal(ffj_key_TcpStatistics_InSegs, kn) { + currentKey = ffj_t_TcpStatistics_InSegs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TcpStatistics_InErrs, kn) { + currentKey = ffj_t_TcpStatistics_InErrs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TcpStatistics_InCsumErrors, kn) { + currentKey = ffj_t_TcpStatistics_InCsumErrors + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'M': + + if bytes.Equal(ffj_key_TcpStatistics_MaxConn, kn) { + currentKey = ffj_t_TcpStatistics_MaxConn + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'O': + + if bytes.Equal(ffj_key_TcpStatistics_OutSegs, kn) { + currentKey = ffj_t_TcpStatistics_OutSegs + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TcpStatistics_OutRsts, kn) { + currentKey = ffj_t_TcpStatistics_OutRsts + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'P': + + if bytes.Equal(ffj_key_TcpStatistics_PassiveOpens, kn) { + currentKey = ffj_t_TcpStatistics_PassiveOpens + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'R': + + if bytes.Equal(ffj_key_TcpStatistics_RtoAlgorithm, kn) { + currentKey = ffj_t_TcpStatistics_RtoAlgorithm + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TcpStatistics_RtoMin, kn) { + currentKey = ffj_t_TcpStatistics_RtoMin + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TcpStatistics_RtoMax, kn) { + currentKey = ffj_t_TcpStatistics_RtoMax + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TcpStatistics_RetransSegs, kn) { + currentKey = ffj_t_TcpStatistics_RetransSegs + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_TcpStatistics_InCsumErrors, kn) { + currentKey = ffj_t_TcpStatistics_InCsumErrors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TcpStatistics_OutRsts, kn) { + currentKey = ffj_t_TcpStatistics_OutRsts + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TcpStatistics_InErrs, kn) { + currentKey = ffj_t_TcpStatistics_InErrs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TcpStatistics_RetransSegs, kn) { + currentKey = ffj_t_TcpStatistics_RetransSegs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TcpStatistics_OutSegs, kn) { + currentKey = ffj_t_TcpStatistics_OutSegs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TcpStatistics_InSegs, kn) { + currentKey = ffj_t_TcpStatistics_InSegs + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TcpStatistics_CurrEstab, kn) { + currentKey = ffj_t_TcpStatistics_CurrEstab + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TcpStatistics_EstabResets, kn) { + currentKey = ffj_t_TcpStatistics_EstabResets + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TcpStatistics_AttemptFails, kn) { + currentKey = ffj_t_TcpStatistics_AttemptFails + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TcpStatistics_PassiveOpens, kn) { + currentKey = ffj_t_TcpStatistics_PassiveOpens + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TcpStatistics_ActiveOpens, kn) { + currentKey = ffj_t_TcpStatistics_ActiveOpens + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_TcpStatistics_MaxConn, kn) { + currentKey = ffj_t_TcpStatistics_MaxConn + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_TcpStatistics_RtoMax, kn) { + currentKey = ffj_t_TcpStatistics_RtoMax + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_TcpStatistics_RtoMin, kn) { + currentKey = ffj_t_TcpStatistics_RtoMin + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_TcpStatistics_RtoAlgorithm, kn) { + currentKey = ffj_t_TcpStatistics_RtoAlgorithm + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_TcpStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_TcpStatistics_RtoAlgorithm: + goto handle_RtoAlgorithm + + case ffj_t_TcpStatistics_RtoMin: + goto handle_RtoMin + + case ffj_t_TcpStatistics_RtoMax: + goto handle_RtoMax + + case ffj_t_TcpStatistics_MaxConn: + goto handle_MaxConn + + case ffj_t_TcpStatistics_ActiveOpens: + goto handle_ActiveOpens + + case ffj_t_TcpStatistics_PassiveOpens: + goto handle_PassiveOpens + + case ffj_t_TcpStatistics_AttemptFails: + goto handle_AttemptFails + + case ffj_t_TcpStatistics_EstabResets: + goto handle_EstabResets + + case ffj_t_TcpStatistics_CurrEstab: + goto handle_CurrEstab + + case ffj_t_TcpStatistics_InSegs: + goto handle_InSegs + + case ffj_t_TcpStatistics_OutSegs: + goto handle_OutSegs + + case ffj_t_TcpStatistics_RetransSegs: + goto handle_RetransSegs + + case ffj_t_TcpStatistics_InErrs: + goto handle_InErrs + + case ffj_t_TcpStatistics_OutRsts: + goto handle_OutRsts + + case ffj_t_TcpStatistics_InCsumErrors: + goto handle_InCsumErrors + + case ffj_t_TcpStatisticsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_RtoAlgorithm: + + /* handler: uj.RtoAlgorithm type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.RtoAlgorithm = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.RtoAlgorithm = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_RtoMin: + + /* handler: uj.RtoMin type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.RtoMin = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.RtoMin = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_RtoMax: + + /* handler: uj.RtoMax type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.RtoMax = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.RtoMax = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MaxConn: + + /* handler: uj.MaxConn type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.MaxConn = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.MaxConn = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ActiveOpens: + + /* handler: uj.ActiveOpens type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.ActiveOpens = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.ActiveOpens = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_PassiveOpens: + + /* handler: uj.PassiveOpens type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.PassiveOpens = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.PassiveOpens = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_AttemptFails: + + /* handler: uj.AttemptFails type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.AttemptFails = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.AttemptFails = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_EstabResets: + + /* handler: uj.EstabResets type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.EstabResets = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.EstabResets = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CurrEstab: + + /* handler: uj.CurrEstab type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.CurrEstab = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.CurrEstab = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InSegs: + + /* handler: uj.InSegs type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InSegs = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InSegs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutSegs: + + /* handler: uj.OutSegs type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutSegs = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutSegs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_RetransSegs: + + /* handler: uj.RetransSegs type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.RetransSegs = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.RetransSegs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InErrs: + + /* handler: uj.InErrs type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InErrs = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InErrs = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutRsts: + + /* handler: uj.OutRsts type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutRsts = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutRsts = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InCsumErrors: + + /* handler: uj.InCsumErrors type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InCsumErrors = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InCsumErrors = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *TimeInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *TimeInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"nanoseconds":`) + fflib.FormatBits2(buf, uint64(mj.Nanoseconds), 10, mj.Nanoseconds < 0) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_TimeInfobase = iota + ffj_t_TimeInfono_such_key + + ffj_t_TimeInfo_Nanoseconds +) + +var ffj_key_TimeInfo_Nanoseconds = []byte("nanoseconds") + +func (uj *TimeInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *TimeInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_TimeInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_TimeInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'n': + + if bytes.Equal(ffj_key_TimeInfo_Nanoseconds, kn) { + currentKey = ffj_t_TimeInfo_Nanoseconds + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_TimeInfo_Nanoseconds, kn) { + currentKey = ffj_t_TimeInfo_Nanoseconds + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_TimeInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_TimeInfo_Nanoseconds: + goto handle_Nanoseconds + + case ffj_t_TimeInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Nanoseconds: + + /* handler: uj.Nanoseconds type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Nanoseconds = int64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *TrafficControlStatistics) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *TrafficControlStatistics) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "id":`) + fflib.WriteJsonString(buf, string(mj.ID)) + buf.WriteByte(',') + if mj.Backlog != nil { + if true { + buf.WriteString(`"backlog":`) + fflib.FormatBits2(buf, uint64(*mj.Backlog), 10, false) + buf.WriteByte(',') + } + } + if mj.Bytes != nil { + if true { + buf.WriteString(`"bytes":`) + fflib.FormatBits2(buf, uint64(*mj.Bytes), 10, false) + buf.WriteByte(',') + } + } + if mj.Drops != nil { + if true { + buf.WriteString(`"drops":`) + fflib.FormatBits2(buf, uint64(*mj.Drops), 10, false) + buf.WriteByte(',') + } + } + if mj.Overlimits != nil { + if true { + buf.WriteString(`"overlimits":`) + fflib.FormatBits2(buf, uint64(*mj.Overlimits), 10, false) + buf.WriteByte(',') + } + } + if mj.Packets != nil { + if true { + buf.WriteString(`"packets":`) + fflib.FormatBits2(buf, uint64(*mj.Packets), 10, false) + buf.WriteByte(',') + } + } + if mj.Qlen != nil { + if true { + buf.WriteString(`"qlen":`) + fflib.FormatBits2(buf, uint64(*mj.Qlen), 10, false) + buf.WriteByte(',') + } + } + if mj.RateBPS != nil { + if true { + buf.WriteString(`"ratebps":`) + fflib.FormatBits2(buf, uint64(*mj.RateBPS), 10, false) + buf.WriteByte(',') + } + } + if mj.RatePPS != nil { + if true { + buf.WriteString(`"ratepps":`) + fflib.FormatBits2(buf, uint64(*mj.RatePPS), 10, false) + buf.WriteByte(',') + } + } + if mj.Requeues != nil { + if true { + buf.WriteString(`"requeues":`) + fflib.FormatBits2(buf, uint64(*mj.Requeues), 10, false) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_TrafficControlStatisticsbase = iota + ffj_t_TrafficControlStatisticsno_such_key + + ffj_t_TrafficControlStatistics_ID + + ffj_t_TrafficControlStatistics_Backlog + + ffj_t_TrafficControlStatistics_Bytes + + ffj_t_TrafficControlStatistics_Drops + + ffj_t_TrafficControlStatistics_Overlimits + + ffj_t_TrafficControlStatistics_Packets + + ffj_t_TrafficControlStatistics_Qlen + + ffj_t_TrafficControlStatistics_RateBPS + + ffj_t_TrafficControlStatistics_RatePPS + + ffj_t_TrafficControlStatistics_Requeues +) + +var ffj_key_TrafficControlStatistics_ID = []byte("id") + +var ffj_key_TrafficControlStatistics_Backlog = []byte("backlog") + +var ffj_key_TrafficControlStatistics_Bytes = []byte("bytes") + +var ffj_key_TrafficControlStatistics_Drops = []byte("drops") + +var ffj_key_TrafficControlStatistics_Overlimits = []byte("overlimits") + +var ffj_key_TrafficControlStatistics_Packets = []byte("packets") + +var ffj_key_TrafficControlStatistics_Qlen = []byte("qlen") + +var ffj_key_TrafficControlStatistics_RateBPS = []byte("ratebps") + +var ffj_key_TrafficControlStatistics_RatePPS = []byte("ratepps") + +var ffj_key_TrafficControlStatistics_Requeues = []byte("requeues") + +func (uj *TrafficControlStatistics) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *TrafficControlStatistics) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_TrafficControlStatisticsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_TrafficControlStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'b': + + if bytes.Equal(ffj_key_TrafficControlStatistics_Backlog, kn) { + currentKey = ffj_t_TrafficControlStatistics_Backlog + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TrafficControlStatistics_Bytes, kn) { + currentKey = ffj_t_TrafficControlStatistics_Bytes + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_TrafficControlStatistics_Drops, kn) { + currentKey = ffj_t_TrafficControlStatistics_Drops + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_TrafficControlStatistics_ID, kn) { + currentKey = ffj_t_TrafficControlStatistics_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'o': + + if bytes.Equal(ffj_key_TrafficControlStatistics_Overlimits, kn) { + currentKey = ffj_t_TrafficControlStatistics_Overlimits + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_TrafficControlStatistics_Packets, kn) { + currentKey = ffj_t_TrafficControlStatistics_Packets + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'q': + + if bytes.Equal(ffj_key_TrafficControlStatistics_Qlen, kn) { + currentKey = ffj_t_TrafficControlStatistics_Qlen + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_TrafficControlStatistics_RateBPS, kn) { + currentKey = ffj_t_TrafficControlStatistics_RateBPS + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TrafficControlStatistics_RatePPS, kn) { + currentKey = ffj_t_TrafficControlStatistics_RatePPS + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_TrafficControlStatistics_Requeues, kn) { + currentKey = ffj_t_TrafficControlStatistics_Requeues + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_TrafficControlStatistics_Requeues, kn) { + currentKey = ffj_t_TrafficControlStatistics_Requeues + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TrafficControlStatistics_RatePPS, kn) { + currentKey = ffj_t_TrafficControlStatistics_RatePPS + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TrafficControlStatistics_RateBPS, kn) { + currentKey = ffj_t_TrafficControlStatistics_RateBPS + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_TrafficControlStatistics_Qlen, kn) { + currentKey = ffj_t_TrafficControlStatistics_Qlen + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TrafficControlStatistics_Packets, kn) { + currentKey = ffj_t_TrafficControlStatistics_Packets + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TrafficControlStatistics_Overlimits, kn) { + currentKey = ffj_t_TrafficControlStatistics_Overlimits + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TrafficControlStatistics_Drops, kn) { + currentKey = ffj_t_TrafficControlStatistics_Drops + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TrafficControlStatistics_Bytes, kn) { + currentKey = ffj_t_TrafficControlStatistics_Bytes + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_TrafficControlStatistics_Backlog, kn) { + currentKey = ffj_t_TrafficControlStatistics_Backlog + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_TrafficControlStatistics_ID, kn) { + currentKey = ffj_t_TrafficControlStatistics_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_TrafficControlStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_TrafficControlStatistics_ID: + goto handle_ID + + case ffj_t_TrafficControlStatistics_Backlog: + goto handle_Backlog + + case ffj_t_TrafficControlStatistics_Bytes: + goto handle_Bytes + + case ffj_t_TrafficControlStatistics_Drops: + goto handle_Drops + + case ffj_t_TrafficControlStatistics_Overlimits: + goto handle_Overlimits + + case ffj_t_TrafficControlStatistics_Packets: + goto handle_Packets + + case ffj_t_TrafficControlStatistics_Qlen: + goto handle_Qlen + + case ffj_t_TrafficControlStatistics_RateBPS: + goto handle_RateBPS + + case ffj_t_TrafficControlStatistics_RatePPS: + goto handle_RatePPS + + case ffj_t_TrafficControlStatistics_Requeues: + goto handle_Requeues + + case ffj_t_TrafficControlStatisticsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ID: + + /* handler: uj.ID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.ID = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Backlog: + + /* handler: uj.Backlog type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Backlog = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Backlog = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Bytes: + + /* handler: uj.Bytes type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Bytes = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Bytes = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Drops: + + /* handler: uj.Drops type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Drops = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Drops = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Overlimits: + + /* handler: uj.Overlimits type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Overlimits = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Overlimits = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Packets: + + /* handler: uj.Packets type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Packets = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Packets = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Qlen: + + /* handler: uj.Qlen type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Qlen = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Qlen = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_RateBPS: + + /* handler: uj.RateBPS type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.RateBPS = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.RateBPS = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_RatePPS: + + /* handler: uj.RatePPS type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.RatePPS = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.RatePPS = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Requeues: + + /* handler: uj.Requeues type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.Requeues = nil + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := uint64(tval) + uj.Requeues = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *URL) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *URL) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "scheme":`) + fflib.WriteJsonString(buf, string(mj.Scheme)) + buf.WriteString(`,"address":`) + + { + + err = mj.Address.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + if mj.Path != nil { + if true { + buf.WriteString(`"path":`) + fflib.WriteJsonString(buf, string(*mj.Path)) + buf.WriteByte(',') + } + } + buf.WriteString(`"query":`) + if mj.Query != nil { + buf.WriteString(`[`) + for i, v := range mj.Query { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + if mj.Fragment != nil { + if true { + buf.WriteString(`"fragment":`) + fflib.WriteJsonString(buf, string(*mj.Fragment)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_URLbase = iota + ffj_t_URLno_such_key + + ffj_t_URL_Scheme + + ffj_t_URL_Address + + ffj_t_URL_Path + + ffj_t_URL_Query + + ffj_t_URL_Fragment +) + +var ffj_key_URL_Scheme = []byte("scheme") + +var ffj_key_URL_Address = []byte("address") + +var ffj_key_URL_Path = []byte("path") + +var ffj_key_URL_Query = []byte("query") + +var ffj_key_URL_Fragment = []byte("fragment") + +func (uj *URL) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *URL) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_URLbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_URLno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'a': + + if bytes.Equal(ffj_key_URL_Address, kn) { + currentKey = ffj_t_URL_Address + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffj_key_URL_Fragment, kn) { + currentKey = ffj_t_URL_Fragment + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_URL_Path, kn) { + currentKey = ffj_t_URL_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'q': + + if bytes.Equal(ffj_key_URL_Query, kn) { + currentKey = ffj_t_URL_Query + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_URL_Scheme, kn) { + currentKey = ffj_t_URL_Scheme + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_URL_Fragment, kn) { + currentKey = ffj_t_URL_Fragment + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_URL_Query, kn) { + currentKey = ffj_t_URL_Query + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_URL_Path, kn) { + currentKey = ffj_t_URL_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_URL_Address, kn) { + currentKey = ffj_t_URL_Address + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_URL_Scheme, kn) { + currentKey = ffj_t_URL_Scheme + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_URLno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_URL_Scheme: + goto handle_Scheme + + case ffj_t_URL_Address: + goto handle_Address + + case ffj_t_URL_Path: + goto handle_Path + + case ffj_t_URL_Query: + goto handle_Query + + case ffj_t_URL_Fragment: + goto handle_Fragment + + case ffj_t_URLno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Scheme: + + /* handler: uj.Scheme type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Scheme = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Address: + + /* handler: uj.Address type=mesos.Address kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Address.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Path: + + /* handler: uj.Path type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Path = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Path = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Query: + + /* handler: uj.Query type=[]mesos.Parameter kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Query = nil + } else { + + uj.Query = []Parameter{} + + wantVal := true + + for { + + var tmp_uj__Query Parameter + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Query type=mesos.Parameter kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Query.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Query = append(uj.Query, tmp_uj__Query) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Fragment: + + /* handler: uj.Fragment type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Fragment = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Fragment = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *UUID) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *UUID) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if len(mj.Value) != 0 { + buf.WriteString(`"value":`) + if mj.Value != nil { + buf.WriteString(`"`) + { + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(reflect.Indirect(reflect.ValueOf(mj.Value)).Bytes()) + enc.Close() + } + buf.WriteString(`"`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_UUIDbase = iota + ffj_t_UUIDno_such_key + + ffj_t_UUID_Value +) + +var ffj_key_UUID_Value = []byte("value") + +func (uj *UUID) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *UUID) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_UUIDbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_UUIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'v': + + if bytes.Equal(ffj_key_UUID_Value, kn) { + currentKey = ffj_t_UUID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_UUID_Value, kn) { + currentKey = ffj_t_UUID_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_UUIDno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_UUID_Value: + goto handle_Value + + case ffj_t_UUIDno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Value: + + /* handler: uj.Value type=[]uint8 kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Value = nil + } else { + b := make([]byte, base64.StdEncoding.DecodedLen(fs.Output.Len())) + n, err := base64.StdEncoding.Decode(b, fs.Output.Bytes()) + if err != nil { + return fs.WrapErr(err) + } + + v := reflect.ValueOf(&uj.Value).Elem() + v.SetBytes(b[0:n]) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *UdpStatistics) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *UdpStatistics) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.InDatagrams != nil { + if true { + buf.WriteString(`"InDatagrams":`) + fflib.FormatBits2(buf, uint64(*mj.InDatagrams), 10, *mj.InDatagrams < 0) + buf.WriteByte(',') + } + } + if mj.NoPorts != nil { + if true { + buf.WriteString(`"NoPorts":`) + fflib.FormatBits2(buf, uint64(*mj.NoPorts), 10, *mj.NoPorts < 0) + buf.WriteByte(',') + } + } + if mj.InErrors != nil { + if true { + buf.WriteString(`"InErrors":`) + fflib.FormatBits2(buf, uint64(*mj.InErrors), 10, *mj.InErrors < 0) + buf.WriteByte(',') + } + } + if mj.OutDatagrams != nil { + if true { + buf.WriteString(`"OutDatagrams":`) + fflib.FormatBits2(buf, uint64(*mj.OutDatagrams), 10, *mj.OutDatagrams < 0) + buf.WriteByte(',') + } + } + if mj.RcvbufErrors != nil { + if true { + buf.WriteString(`"RcvbufErrors":`) + fflib.FormatBits2(buf, uint64(*mj.RcvbufErrors), 10, *mj.RcvbufErrors < 0) + buf.WriteByte(',') + } + } + if mj.SndbufErrors != nil { + if true { + buf.WriteString(`"SndbufErrors":`) + fflib.FormatBits2(buf, uint64(*mj.SndbufErrors), 10, *mj.SndbufErrors < 0) + buf.WriteByte(',') + } + } + if mj.InCsumErrors != nil { + if true { + buf.WriteString(`"InCsumErrors":`) + fflib.FormatBits2(buf, uint64(*mj.InCsumErrors), 10, *mj.InCsumErrors < 0) + buf.WriteByte(',') + } + } + if mj.IgnoredMulti != nil { + if true { + buf.WriteString(`"IgnoredMulti":`) + fflib.FormatBits2(buf, uint64(*mj.IgnoredMulti), 10, *mj.IgnoredMulti < 0) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_UdpStatisticsbase = iota + ffj_t_UdpStatisticsno_such_key + + ffj_t_UdpStatistics_InDatagrams + + ffj_t_UdpStatistics_NoPorts + + ffj_t_UdpStatistics_InErrors + + ffj_t_UdpStatistics_OutDatagrams + + ffj_t_UdpStatistics_RcvbufErrors + + ffj_t_UdpStatistics_SndbufErrors + + ffj_t_UdpStatistics_InCsumErrors + + ffj_t_UdpStatistics_IgnoredMulti +) + +var ffj_key_UdpStatistics_InDatagrams = []byte("InDatagrams") + +var ffj_key_UdpStatistics_NoPorts = []byte("NoPorts") + +var ffj_key_UdpStatistics_InErrors = []byte("InErrors") + +var ffj_key_UdpStatistics_OutDatagrams = []byte("OutDatagrams") + +var ffj_key_UdpStatistics_RcvbufErrors = []byte("RcvbufErrors") + +var ffj_key_UdpStatistics_SndbufErrors = []byte("SndbufErrors") + +var ffj_key_UdpStatistics_InCsumErrors = []byte("InCsumErrors") + +var ffj_key_UdpStatistics_IgnoredMulti = []byte("IgnoredMulti") + +func (uj *UdpStatistics) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *UdpStatistics) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_UdpStatisticsbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_UdpStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'I': + + if bytes.Equal(ffj_key_UdpStatistics_InDatagrams, kn) { + currentKey = ffj_t_UdpStatistics_InDatagrams + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_UdpStatistics_InErrors, kn) { + currentKey = ffj_t_UdpStatistics_InErrors + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_UdpStatistics_InCsumErrors, kn) { + currentKey = ffj_t_UdpStatistics_InCsumErrors + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_UdpStatistics_IgnoredMulti, kn) { + currentKey = ffj_t_UdpStatistics_IgnoredMulti + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'N': + + if bytes.Equal(ffj_key_UdpStatistics_NoPorts, kn) { + currentKey = ffj_t_UdpStatistics_NoPorts + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'O': + + if bytes.Equal(ffj_key_UdpStatistics_OutDatagrams, kn) { + currentKey = ffj_t_UdpStatistics_OutDatagrams + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'R': + + if bytes.Equal(ffj_key_UdpStatistics_RcvbufErrors, kn) { + currentKey = ffj_t_UdpStatistics_RcvbufErrors + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'S': + + if bytes.Equal(ffj_key_UdpStatistics_SndbufErrors, kn) { + currentKey = ffj_t_UdpStatistics_SndbufErrors + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_UdpStatistics_IgnoredMulti, kn) { + currentKey = ffj_t_UdpStatistics_IgnoredMulti + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_UdpStatistics_InCsumErrors, kn) { + currentKey = ffj_t_UdpStatistics_InCsumErrors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_UdpStatistics_SndbufErrors, kn) { + currentKey = ffj_t_UdpStatistics_SndbufErrors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_UdpStatistics_RcvbufErrors, kn) { + currentKey = ffj_t_UdpStatistics_RcvbufErrors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_UdpStatistics_OutDatagrams, kn) { + currentKey = ffj_t_UdpStatistics_OutDatagrams + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_UdpStatistics_InErrors, kn) { + currentKey = ffj_t_UdpStatistics_InErrors + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_UdpStatistics_NoPorts, kn) { + currentKey = ffj_t_UdpStatistics_NoPorts + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_UdpStatistics_InDatagrams, kn) { + currentKey = ffj_t_UdpStatistics_InDatagrams + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_UdpStatisticsno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_UdpStatistics_InDatagrams: + goto handle_InDatagrams + + case ffj_t_UdpStatistics_NoPorts: + goto handle_NoPorts + + case ffj_t_UdpStatistics_InErrors: + goto handle_InErrors + + case ffj_t_UdpStatistics_OutDatagrams: + goto handle_OutDatagrams + + case ffj_t_UdpStatistics_RcvbufErrors: + goto handle_RcvbufErrors + + case ffj_t_UdpStatistics_SndbufErrors: + goto handle_SndbufErrors + + case ffj_t_UdpStatistics_InCsumErrors: + goto handle_InCsumErrors + + case ffj_t_UdpStatistics_IgnoredMulti: + goto handle_IgnoredMulti + + case ffj_t_UdpStatisticsno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_InDatagrams: + + /* handler: uj.InDatagrams type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InDatagrams = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InDatagrams = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NoPorts: + + /* handler: uj.NoPorts type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.NoPorts = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.NoPorts = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InErrors: + + /* handler: uj.InErrors type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InErrors = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InErrors = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_OutDatagrams: + + /* handler: uj.OutDatagrams type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.OutDatagrams = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.OutDatagrams = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_RcvbufErrors: + + /* handler: uj.RcvbufErrors type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.RcvbufErrors = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.RcvbufErrors = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_SndbufErrors: + + /* handler: uj.SndbufErrors type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.SndbufErrors = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.SndbufErrors = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InCsumErrors: + + /* handler: uj.InCsumErrors type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.InCsumErrors = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.InCsumErrors = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IgnoredMulti: + + /* handler: uj.IgnoredMulti type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.IgnoredMulti = nil + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := int64(tval) + uj.IgnoredMulti = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Unavailability) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Unavailability) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "start":`) + + { + + err = mj.Start.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + if mj.Duration != nil { + if true { + buf.WriteString(`"duration":`) + + { + + err = mj.Duration.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Unavailabilitybase = iota + ffj_t_Unavailabilityno_such_key + + ffj_t_Unavailability_Start + + ffj_t_Unavailability_Duration +) + +var ffj_key_Unavailability_Start = []byte("start") + +var ffj_key_Unavailability_Duration = []byte("duration") + +func (uj *Unavailability) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Unavailability) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Unavailabilitybase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Unavailabilityno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'd': + + if bytes.Equal(ffj_key_Unavailability_Duration, kn) { + currentKey = ffj_t_Unavailability_Duration + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Unavailability_Start, kn) { + currentKey = ffj_t_Unavailability_Start + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Unavailability_Duration, kn) { + currentKey = ffj_t_Unavailability_Duration + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Unavailability_Start, kn) { + currentKey = ffj_t_Unavailability_Start + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Unavailabilityno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Unavailability_Start: + goto handle_Start + + case ffj_t_Unavailability_Duration: + goto handle_Duration + + case ffj_t_Unavailabilityno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Start: + + /* handler: uj.Start type=mesos.TimeInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Start.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Duration: + + /* handler: uj.Duration type=mesos.DurationInfo kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Duration = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Duration == nil { + uj.Duration = new(DurationInfo) + } + + err = uj.Duration.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Value) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Value) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.Scalar != nil { + if true { + buf.WriteString(`"scalar":`) + + { + + err = mj.Scalar.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Ranges != nil { + if true { + buf.WriteString(`"ranges":`) + + { + + err = mj.Ranges.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Set != nil { + if true { + buf.WriteString(`"set":`) + + { + + err = mj.Set.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Text != nil { + if true { + buf.WriteString(`"text":`) + + { + + err = mj.Text.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Valuebase = iota + ffj_t_Valueno_such_key + + ffj_t_Value_Type + + ffj_t_Value_Scalar + + ffj_t_Value_Ranges + + ffj_t_Value_Set + + ffj_t_Value_Text +) + +var ffj_key_Value_Type = []byte("type") + +var ffj_key_Value_Scalar = []byte("scalar") + +var ffj_key_Value_Ranges = []byte("ranges") + +var ffj_key_Value_Set = []byte("set") + +var ffj_key_Value_Text = []byte("text") + +func (uj *Value) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Value) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Valuebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Valueno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'r': + + if bytes.Equal(ffj_key_Value_Ranges, kn) { + currentKey = ffj_t_Value_Ranges + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Value_Scalar, kn) { + currentKey = ffj_t_Value_Scalar + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Value_Set, kn) { + currentKey = ffj_t_Value_Set + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Value_Type, kn) { + currentKey = ffj_t_Value_Type + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Value_Text, kn) { + currentKey = ffj_t_Value_Text + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Value_Text, kn) { + currentKey = ffj_t_Value_Text + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Value_Set, kn) { + currentKey = ffj_t_Value_Set + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Value_Ranges, kn) { + currentKey = ffj_t_Value_Ranges + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Value_Scalar, kn) { + currentKey = ffj_t_Value_Scalar + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Value_Type, kn) { + currentKey = ffj_t_Value_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Valueno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Value_Type: + goto handle_Type + + case ffj_t_Value_Scalar: + goto handle_Scalar + + case ffj_t_Value_Ranges: + goto handle_Ranges + + case ffj_t_Value_Set: + goto handle_Set + + case ffj_t_Value_Text: + goto handle_Text + + case ffj_t_Valueno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.Value_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Scalar: + + /* handler: uj.Scalar type=mesos.Value_Scalar kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Scalar = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Scalar == nil { + uj.Scalar = new(Value_Scalar) + } + + err = uj.Scalar.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Ranges: + + /* handler: uj.Ranges type=mesos.Value_Ranges kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Ranges = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Ranges == nil { + uj.Ranges = new(Value_Ranges) + } + + err = uj.Ranges.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Set: + + /* handler: uj.Set type=mesos.Value_Set kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Set = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Set == nil { + uj.Set = new(Value_Set) + } + + err = uj.Set.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Text: + + /* handler: uj.Text type=mesos.Value_Text kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Text = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Text == nil { + uj.Text = new(Value_Text) + } + + err = uj.Text.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Value_Range) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Value_Range) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"begin":`) + fflib.FormatBits2(buf, uint64(mj.Begin), 10, false) + buf.WriteString(`,"end":`) + fflib.FormatBits2(buf, uint64(mj.End), 10, false) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Value_Rangebase = iota + ffj_t_Value_Rangeno_such_key + + ffj_t_Value_Range_Begin + + ffj_t_Value_Range_End +) + +var ffj_key_Value_Range_Begin = []byte("begin") + +var ffj_key_Value_Range_End = []byte("end") + +func (uj *Value_Range) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Value_Range) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Value_Rangebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Value_Rangeno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'b': + + if bytes.Equal(ffj_key_Value_Range_Begin, kn) { + currentKey = ffj_t_Value_Range_Begin + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_Value_Range_End, kn) { + currentKey = ffj_t_Value_Range_End + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Value_Range_End, kn) { + currentKey = ffj_t_Value_Range_End + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Value_Range_Begin, kn) { + currentKey = ffj_t_Value_Range_Begin + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Value_Rangeno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Value_Range_Begin: + goto handle_Begin + + case ffj_t_Value_Range_End: + goto handle_End + + case ffj_t_Value_Rangeno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Begin: + + /* handler: uj.Begin type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Begin = uint64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_End: + + /* handler: uj.End type=uint64 kind=uint64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for uint64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.End = uint64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Value_Ranges) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Value_Ranges) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"range":`) + if mj.Range != nil { + buf.WriteString(`[`) + for i, v := range mj.Range { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Value_Rangesbase = iota + ffj_t_Value_Rangesno_such_key + + ffj_t_Value_Ranges_Range +) + +var ffj_key_Value_Ranges_Range = []byte("range") + +func (uj *Value_Ranges) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Value_Ranges) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Value_Rangesbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Value_Rangesno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'r': + + if bytes.Equal(ffj_key_Value_Ranges_Range, kn) { + currentKey = ffj_t_Value_Ranges_Range + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Value_Ranges_Range, kn) { + currentKey = ffj_t_Value_Ranges_Range + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Value_Rangesno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Value_Ranges_Range: + goto handle_Range + + case ffj_t_Value_Rangesno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Range: + + /* handler: uj.Range type=[]mesos.Value_Range kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Range = nil + } else { + + uj.Range = []Value_Range{} + + wantVal := true + + for { + + var tmp_uj__Range Value_Range + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Range type=mesos.Value_Range kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Range.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Range = append(uj.Range, tmp_uj__Range) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Value_Scalar) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Value_Scalar) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"value":`) + fflib.AppendFloat(buf, float64(mj.Value), 'g', -1, 64) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Value_Scalarbase = iota + ffj_t_Value_Scalarno_such_key + + ffj_t_Value_Scalar_Value +) + +var ffj_key_Value_Scalar_Value = []byte("value") + +func (uj *Value_Scalar) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Value_Scalar) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Value_Scalarbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Value_Scalarno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'v': + + if bytes.Equal(ffj_key_Value_Scalar_Value, kn) { + currentKey = ffj_t_Value_Scalar_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Value_Scalar_Value, kn) { + currentKey = ffj_t_Value_Scalar_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Value_Scalarno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Value_Scalar_Value: + goto handle_Value + + case ffj_t_Value_Scalarno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Value: + + /* handler: uj.Value type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Value = float64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Value_Set) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Value_Set) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if len(mj.Item) != 0 { + buf.WriteString(`"item":`) + if mj.Item != nil { + buf.WriteString(`[`) + for i, v := range mj.Item { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Value_Setbase = iota + ffj_t_Value_Setno_such_key + + ffj_t_Value_Set_Item +) + +var ffj_key_Value_Set_Item = []byte("item") + +func (uj *Value_Set) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Value_Set) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Value_Setbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Value_Setno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'i': + + if bytes.Equal(ffj_key_Value_Set_Item, kn) { + currentKey = ffj_t_Value_Set_Item + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Value_Set_Item, kn) { + currentKey = ffj_t_Value_Set_Item + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Value_Setno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Value_Set_Item: + goto handle_Item + + case ffj_t_Value_Setno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Item: + + /* handler: uj.Item type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Item = nil + } else { + + uj.Item = []string{} + + wantVal := true + + for { + + var tmp_uj__Item string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Item type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmp_uj__Item = string(string(outBuf)) + + } + } + + uj.Item = append(uj.Item, tmp_uj__Item) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Value_Text) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Value_Text) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"value":`) + fflib.WriteJsonString(buf, string(mj.Value)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Value_Textbase = iota + ffj_t_Value_Textno_such_key + + ffj_t_Value_Text_Value +) + +var ffj_key_Value_Text_Value = []byte("value") + +func (uj *Value_Text) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Value_Text) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Value_Textbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Value_Textno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'v': + + if bytes.Equal(ffj_key_Value_Text_Value, kn) { + currentKey = ffj_t_Value_Text_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Value_Text_Value, kn) { + currentKey = ffj_t_Value_Text_Value + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Value_Textno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Value_Text_Value: + goto handle_Value + + case ffj_t_Value_Textno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Value: + + /* handler: uj.Value type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Value = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *VersionInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *VersionInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "version":`) + fflib.WriteJsonString(buf, string(mj.Version)) + buf.WriteByte(',') + if mj.BuildDate != nil { + if true { + buf.WriteString(`"build_date":`) + fflib.WriteJsonString(buf, string(*mj.BuildDate)) + buf.WriteByte(',') + } + } + if mj.BuildTime != nil { + if true { + buf.WriteString(`"build_time":`) + fflib.AppendFloat(buf, float64(*mj.BuildTime), 'g', -1, 64) + buf.WriteByte(',') + } + } + if mj.BuildUser != nil { + if true { + buf.WriteString(`"build_user":`) + fflib.WriteJsonString(buf, string(*mj.BuildUser)) + buf.WriteByte(',') + } + } + if mj.GitSHA != nil { + if true { + buf.WriteString(`"git_sha":`) + fflib.WriteJsonString(buf, string(*mj.GitSHA)) + buf.WriteByte(',') + } + } + if mj.GitBranch != nil { + if true { + buf.WriteString(`"git_branch":`) + fflib.WriteJsonString(buf, string(*mj.GitBranch)) + buf.WriteByte(',') + } + } + if mj.GitTag != nil { + if true { + buf.WriteString(`"git_tag":`) + fflib.WriteJsonString(buf, string(*mj.GitTag)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_VersionInfobase = iota + ffj_t_VersionInfono_such_key + + ffj_t_VersionInfo_Version + + ffj_t_VersionInfo_BuildDate + + ffj_t_VersionInfo_BuildTime + + ffj_t_VersionInfo_BuildUser + + ffj_t_VersionInfo_GitSHA + + ffj_t_VersionInfo_GitBranch + + ffj_t_VersionInfo_GitTag +) + +var ffj_key_VersionInfo_Version = []byte("version") + +var ffj_key_VersionInfo_BuildDate = []byte("build_date") + +var ffj_key_VersionInfo_BuildTime = []byte("build_time") + +var ffj_key_VersionInfo_BuildUser = []byte("build_user") + +var ffj_key_VersionInfo_GitSHA = []byte("git_sha") + +var ffj_key_VersionInfo_GitBranch = []byte("git_branch") + +var ffj_key_VersionInfo_GitTag = []byte("git_tag") + +func (uj *VersionInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *VersionInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_VersionInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_VersionInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'b': + + if bytes.Equal(ffj_key_VersionInfo_BuildDate, kn) { + currentKey = ffj_t_VersionInfo_BuildDate + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_VersionInfo_BuildTime, kn) { + currentKey = ffj_t_VersionInfo_BuildTime + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_VersionInfo_BuildUser, kn) { + currentKey = ffj_t_VersionInfo_BuildUser + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'g': + + if bytes.Equal(ffj_key_VersionInfo_GitSHA, kn) { + currentKey = ffj_t_VersionInfo_GitSHA + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_VersionInfo_GitBranch, kn) { + currentKey = ffj_t_VersionInfo_GitBranch + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_VersionInfo_GitTag, kn) { + currentKey = ffj_t_VersionInfo_GitTag + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_VersionInfo_Version, kn) { + currentKey = ffj_t_VersionInfo_Version + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_VersionInfo_GitTag, kn) { + currentKey = ffj_t_VersionInfo_GitTag + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_VersionInfo_GitBranch, kn) { + currentKey = ffj_t_VersionInfo_GitBranch + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_VersionInfo_GitSHA, kn) { + currentKey = ffj_t_VersionInfo_GitSHA + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_VersionInfo_BuildUser, kn) { + currentKey = ffj_t_VersionInfo_BuildUser + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_VersionInfo_BuildTime, kn) { + currentKey = ffj_t_VersionInfo_BuildTime + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_VersionInfo_BuildDate, kn) { + currentKey = ffj_t_VersionInfo_BuildDate + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_VersionInfo_Version, kn) { + currentKey = ffj_t_VersionInfo_Version + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_VersionInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_VersionInfo_Version: + goto handle_Version + + case ffj_t_VersionInfo_BuildDate: + goto handle_BuildDate + + case ffj_t_VersionInfo_BuildTime: + goto handle_BuildTime + + case ffj_t_VersionInfo_BuildUser: + goto handle_BuildUser + + case ffj_t_VersionInfo_GitSHA: + goto handle_GitSHA + + case ffj_t_VersionInfo_GitBranch: + goto handle_GitBranch + + case ffj_t_VersionInfo_GitTag: + goto handle_GitTag + + case ffj_t_VersionInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Version: + + /* handler: uj.Version type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Version = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BuildDate: + + /* handler: uj.BuildDate type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.BuildDate = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.BuildDate = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BuildTime: + + /* handler: uj.BuildTime type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + uj.BuildTime = nil + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + ttypval := float64(tval) + uj.BuildTime = &ttypval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_BuildUser: + + /* handler: uj.BuildUser type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.BuildUser = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.BuildUser = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GitSHA: + + /* handler: uj.GitSHA type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.GitSHA = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.GitSHA = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GitBranch: + + /* handler: uj.GitBranch type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.GitBranch = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.GitBranch = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GitTag: + + /* handler: uj.GitTag type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.GitTag = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.GitTag = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Volume) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Volume) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Mode != nil { + if true { + buf.WriteString(`"mode":`) + + { + + obj, err = mj.Mode.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + } + } + buf.WriteString(`"container_path":`) + fflib.WriteJsonString(buf, string(mj.ContainerPath)) + buf.WriteByte(',') + if mj.HostPath != nil { + if true { + buf.WriteString(`"host_path":`) + fflib.WriteJsonString(buf, string(*mj.HostPath)) + buf.WriteByte(',') + } + } + if mj.Image != nil { + if true { + buf.WriteString(`"image":`) + + { + + err = mj.Image.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Source != nil { + if true { + buf.WriteString(`"source":`) + + { + + err = mj.Source.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Volumebase = iota + ffj_t_Volumeno_such_key + + ffj_t_Volume_Mode + + ffj_t_Volume_ContainerPath + + ffj_t_Volume_HostPath + + ffj_t_Volume_Image + + ffj_t_Volume_Source +) + +var ffj_key_Volume_Mode = []byte("mode") + +var ffj_key_Volume_ContainerPath = []byte("container_path") + +var ffj_key_Volume_HostPath = []byte("host_path") + +var ffj_key_Volume_Image = []byte("image") + +var ffj_key_Volume_Source = []byte("source") + +func (uj *Volume) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Volume) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Volumebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Volumeno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Volume_ContainerPath, kn) { + currentKey = ffj_t_Volume_ContainerPath + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'h': + + if bytes.Equal(ffj_key_Volume_HostPath, kn) { + currentKey = ffj_t_Volume_HostPath + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_Volume_Image, kn) { + currentKey = ffj_t_Volume_Image + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffj_key_Volume_Mode, kn) { + currentKey = ffj_t_Volume_Mode + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Volume_Source, kn) { + currentKey = ffj_t_Volume_Source + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Volume_Source, kn) { + currentKey = ffj_t_Volume_Source + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Volume_Image, kn) { + currentKey = ffj_t_Volume_Image + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Volume_HostPath, kn) { + currentKey = ffj_t_Volume_HostPath + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Volume_ContainerPath, kn) { + currentKey = ffj_t_Volume_ContainerPath + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Volume_Mode, kn) { + currentKey = ffj_t_Volume_Mode + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Volumeno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Volume_Mode: + goto handle_Mode + + case ffj_t_Volume_ContainerPath: + goto handle_ContainerPath + + case ffj_t_Volume_HostPath: + goto handle_HostPath + + case ffj_t_Volume_Image: + goto handle_Image + + case ffj_t_Volume_Source: + goto handle_Source + + case ffj_t_Volumeno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Mode: + + /* handler: uj.Mode type=mesos.Volume_Mode kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Mode = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + if uj.Mode == nil { + uj.Mode = new(Volume_Mode) + } + + err = uj.Mode.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ContainerPath: + + /* handler: uj.ContainerPath type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.ContainerPath = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_HostPath: + + /* handler: uj.HostPath type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.HostPath = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.HostPath = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Image: + + /* handler: uj.Image type=mesos.Image kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Image = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Image == nil { + uj.Image = new(Image) + } + + err = uj.Image.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Source: + + /* handler: uj.Source type=mesos.Volume_Source kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Source = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Source == nil { + uj.Source = new(Volume_Source) + } + + err = uj.Source.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Volume_Source) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Volume_Source) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteByte(',') + if mj.DockerVolume != nil { + if true { + buf.WriteString(`"docker_volume":`) + + { + + err = mj.DockerVolume.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.HostPath != nil { + if true { + buf.WriteString(`"host_path":`) + + { + + err = mj.HostPath.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.SandboxPath != nil { + if true { + buf.WriteString(`"sandbox_path":`) + + { + + err = mj.SandboxPath.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + if mj.Secret != nil { + if true { + buf.WriteString(`"secret":`) + + { + + err = mj.Secret.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Volume_Sourcebase = iota + ffj_t_Volume_Sourceno_such_key + + ffj_t_Volume_Source_Type + + ffj_t_Volume_Source_DockerVolume + + ffj_t_Volume_Source_HostPath + + ffj_t_Volume_Source_SandboxPath + + ffj_t_Volume_Source_Secret +) + +var ffj_key_Volume_Source_Type = []byte("type") + +var ffj_key_Volume_Source_DockerVolume = []byte("docker_volume") + +var ffj_key_Volume_Source_HostPath = []byte("host_path") + +var ffj_key_Volume_Source_SandboxPath = []byte("sandbox_path") + +var ffj_key_Volume_Source_Secret = []byte("secret") + +func (uj *Volume_Source) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Volume_Source) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Volume_Sourcebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Volume_Sourceno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'd': + + if bytes.Equal(ffj_key_Volume_Source_DockerVolume, kn) { + currentKey = ffj_t_Volume_Source_DockerVolume + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'h': + + if bytes.Equal(ffj_key_Volume_Source_HostPath, kn) { + currentKey = ffj_t_Volume_Source_HostPath + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Volume_Source_SandboxPath, kn) { + currentKey = ffj_t_Volume_Source_SandboxPath + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Volume_Source_Secret, kn) { + currentKey = ffj_t_Volume_Source_Secret + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Volume_Source_Type, kn) { + currentKey = ffj_t_Volume_Source_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Volume_Source_Secret, kn) { + currentKey = ffj_t_Volume_Source_Secret + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Volume_Source_SandboxPath, kn) { + currentKey = ffj_t_Volume_Source_SandboxPath + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Volume_Source_HostPath, kn) { + currentKey = ffj_t_Volume_Source_HostPath + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Volume_Source_DockerVolume, kn) { + currentKey = ffj_t_Volume_Source_DockerVolume + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Volume_Source_Type, kn) { + currentKey = ffj_t_Volume_Source_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Volume_Sourceno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Volume_Source_Type: + goto handle_Type + + case ffj_t_Volume_Source_DockerVolume: + goto handle_DockerVolume + + case ffj_t_Volume_Source_HostPath: + goto handle_HostPath + + case ffj_t_Volume_Source_SandboxPath: + goto handle_SandboxPath + + case ffj_t_Volume_Source_Secret: + goto handle_Secret + + case ffj_t_Volume_Sourceno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.Volume_Source_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DockerVolume: + + /* handler: uj.DockerVolume type=mesos.Volume_Source_DockerVolume kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.DockerVolume = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.DockerVolume == nil { + uj.DockerVolume = new(Volume_Source_DockerVolume) + } + + err = uj.DockerVolume.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_HostPath: + + /* handler: uj.HostPath type=mesos.Volume_Source_HostPath kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.HostPath = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.HostPath == nil { + uj.HostPath = new(Volume_Source_HostPath) + } + + err = uj.HostPath.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_SandboxPath: + + /* handler: uj.SandboxPath type=mesos.Volume_Source_SandboxPath kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.SandboxPath = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.SandboxPath == nil { + uj.SandboxPath = new(Volume_Source_SandboxPath) + } + + err = uj.SandboxPath.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Secret: + + /* handler: uj.Secret type=mesos.Secret kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.Secret = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.Secret == nil { + uj.Secret = new(Secret) + } + + err = uj.Secret.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Volume_Source_DockerVolume) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Volume_Source_DockerVolume) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ `) + if mj.Driver != nil { + if true { + buf.WriteString(`"driver":`) + fflib.WriteJsonString(buf, string(*mj.Driver)) + buf.WriteByte(',') + } + } + buf.WriteString(`"name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + buf.WriteByte(',') + if mj.DriverOptions != nil { + if true { + buf.WriteString(`"driver_options":`) + + { + + err = mj.DriverOptions.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Volume_Source_DockerVolumebase = iota + ffj_t_Volume_Source_DockerVolumeno_such_key + + ffj_t_Volume_Source_DockerVolume_Driver + + ffj_t_Volume_Source_DockerVolume_Name + + ffj_t_Volume_Source_DockerVolume_DriverOptions +) + +var ffj_key_Volume_Source_DockerVolume_Driver = []byte("driver") + +var ffj_key_Volume_Source_DockerVolume_Name = []byte("name") + +var ffj_key_Volume_Source_DockerVolume_DriverOptions = []byte("driver_options") + +func (uj *Volume_Source_DockerVolume) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Volume_Source_DockerVolume) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Volume_Source_DockerVolumebase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Volume_Source_DockerVolumeno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'd': + + if bytes.Equal(ffj_key_Volume_Source_DockerVolume_Driver, kn) { + currentKey = ffj_t_Volume_Source_DockerVolume_Driver + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Volume_Source_DockerVolume_DriverOptions, kn) { + currentKey = ffj_t_Volume_Source_DockerVolume_DriverOptions + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_Volume_Source_DockerVolume_Name, kn) { + currentKey = ffj_t_Volume_Source_DockerVolume_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Volume_Source_DockerVolume_DriverOptions, kn) { + currentKey = ffj_t_Volume_Source_DockerVolume_DriverOptions + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Volume_Source_DockerVolume_Name, kn) { + currentKey = ffj_t_Volume_Source_DockerVolume_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Volume_Source_DockerVolume_Driver, kn) { + currentKey = ffj_t_Volume_Source_DockerVolume_Driver + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Volume_Source_DockerVolumeno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Volume_Source_DockerVolume_Driver: + goto handle_Driver + + case ffj_t_Volume_Source_DockerVolume_Name: + goto handle_Name + + case ffj_t_Volume_Source_DockerVolume_DriverOptions: + goto handle_DriverOptions + + case ffj_t_Volume_Source_DockerVolumeno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Driver: + + /* handler: uj.Driver type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Driver = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Driver = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DriverOptions: + + /* handler: uj.DriverOptions type=mesos.Parameters kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.DriverOptions = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.DriverOptions == nil { + uj.DriverOptions = new(Parameters) + } + + err = uj.DriverOptions.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Volume_Source_HostPath) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Volume_Source_HostPath) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "path":`) + fflib.WriteJsonString(buf, string(mj.Path)) + buf.WriteByte(',') + if mj.MountPropagation != nil { + if true { + buf.WriteString(`"mount_propagation":`) + + { + + err = mj.MountPropagation.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Volume_Source_HostPathbase = iota + ffj_t_Volume_Source_HostPathno_such_key + + ffj_t_Volume_Source_HostPath_Path + + ffj_t_Volume_Source_HostPath_MountPropagation +) + +var ffj_key_Volume_Source_HostPath_Path = []byte("path") + +var ffj_key_Volume_Source_HostPath_MountPropagation = []byte("mount_propagation") + +func (uj *Volume_Source_HostPath) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Volume_Source_HostPath) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Volume_Source_HostPathbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Volume_Source_HostPathno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'm': + + if bytes.Equal(ffj_key_Volume_Source_HostPath_MountPropagation, kn) { + currentKey = ffj_t_Volume_Source_HostPath_MountPropagation + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_Volume_Source_HostPath_Path, kn) { + currentKey = ffj_t_Volume_Source_HostPath_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.AsciiEqualFold(ffj_key_Volume_Source_HostPath_MountPropagation, kn) { + currentKey = ffj_t_Volume_Source_HostPath_MountPropagation + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Volume_Source_HostPath_Path, kn) { + currentKey = ffj_t_Volume_Source_HostPath_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Volume_Source_HostPathno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Volume_Source_HostPath_Path: + goto handle_Path + + case ffj_t_Volume_Source_HostPath_MountPropagation: + goto handle_MountPropagation + + case ffj_t_Volume_Source_HostPathno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Path: + + /* handler: uj.Path type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Path = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MountPropagation: + + /* handler: uj.MountPropagation type=mesos.MountPropagation kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + uj.MountPropagation = nil + + state = fflib.FFParse_after_value + goto mainparse + } + + if uj.MountPropagation == nil { + uj.MountPropagation = new(MountPropagation) + } + + err = uj.MountPropagation.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *Volume_Source_SandboxPath) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Volume_Source_SandboxPath) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"type":`) + + { + + obj, err = mj.Type.MarshalJSON() + if err != nil { + return err + } + buf.Write(obj) + + } + buf.WriteString(`,"path":`) + fflib.WriteJsonString(buf, string(mj.Path)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Volume_Source_SandboxPathbase = iota + ffj_t_Volume_Source_SandboxPathno_such_key + + ffj_t_Volume_Source_SandboxPath_Type + + ffj_t_Volume_Source_SandboxPath_Path +) + +var ffj_key_Volume_Source_SandboxPath_Type = []byte("type") + +var ffj_key_Volume_Source_SandboxPath_Path = []byte("path") + +func (uj *Volume_Source_SandboxPath) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Volume_Source_SandboxPath) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Volume_Source_SandboxPathbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Volume_Source_SandboxPathno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'p': + + if bytes.Equal(ffj_key_Volume_Source_SandboxPath_Path, kn) { + currentKey = ffj_t_Volume_Source_SandboxPath_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Volume_Source_SandboxPath_Type, kn) { + currentKey = ffj_t_Volume_Source_SandboxPath_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Volume_Source_SandboxPath_Path, kn) { + currentKey = ffj_t_Volume_Source_SandboxPath_Path + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Volume_Source_SandboxPath_Type, kn) { + currentKey = ffj_t_Volume_Source_SandboxPath_Type + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Volume_Source_SandboxPathno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Volume_Source_SandboxPath_Type: + goto handle_Type + + case ffj_t_Volume_Source_SandboxPath_Path: + goto handle_Path + + case ffj_t_Volume_Source_SandboxPathno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Type: + + /* handler: uj.Type type=mesos.Volume_Source_SandboxPath_Type kind=int32 quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + tbuf, err := fs.CaptureField(tok) + if err != nil { + return fs.WrapErr(err) + } + + err = uj.Type.UnmarshalJSON(tbuf) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Path: + + /* handler: uj.Path type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Path = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} + +func (mj *WeightInfo) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *WeightInfo) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{ "weight":`) + fflib.AppendFloat(buf, float64(mj.Weight), 'g', -1, 64) + buf.WriteByte(',') + if mj.Role != nil { + if true { + buf.WriteString(`"role":`) + fflib.WriteJsonString(buf, string(*mj.Role)) + buf.WriteByte(',') + } + } + buf.Rewind(1) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_WeightInfobase = iota + ffj_t_WeightInfono_such_key + + ffj_t_WeightInfo_Weight + + ffj_t_WeightInfo_Role +) + +var ffj_key_WeightInfo_Weight = []byte("weight") + +var ffj_key_WeightInfo_Role = []byte("role") + +func (uj *WeightInfo) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *WeightInfo) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_WeightInfobase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_WeightInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'r': + + if bytes.Equal(ffj_key_WeightInfo_Role, kn) { + currentKey = ffj_t_WeightInfo_Role + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'w': + + if bytes.Equal(ffj_key_WeightInfo_Weight, kn) { + currentKey = ffj_t_WeightInfo_Weight + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_WeightInfo_Role, kn) { + currentKey = ffj_t_WeightInfo_Role + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_WeightInfo_Weight, kn) { + currentKey = ffj_t_WeightInfo_Weight + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_WeightInfono_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_WeightInfo_Weight: + goto handle_Weight + + case ffj_t_WeightInfo_Role: + goto handle_Role + + case ffj_t_WeightInfono_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Weight: + + /* handler: uj.Weight type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Weight = float64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Role: + + /* handler: uj.Role type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Role = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Role = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/mesos.proto b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/mesos.proto new file mode 100644 index 000000000000..7bb7f3978c65 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/mesos.proto @@ -0,0 +1,3600 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package mesos; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option go_package = "mesos"; +option (gogoproto.benchgen_all) = true; +option (gogoproto.enum_stringer_all) = true; +option (gogoproto.equal_all) = true; +option (gogoproto.goproto_enum_prefix_all) = false; +option (gogoproto.goproto_enum_stringer_all) = false; +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.goproto_unrecognized_all) = false; +option (gogoproto.gostring_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.populate_all) = true; +option (gogoproto.protosizer_all) = true; +option (gogoproto.stringer_all) = true; +option (gogoproto.testgen_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.verbose_equal_all) = true; + +// NOTES: (gogogo protobuf for idiomatic Golang) +// - enum fields may be nullable if the default enum value is 0, otherwise the Golang zero-value of the enum isn't valid. +// - enums that declare UNKNOWN or other commonly used tokens should specify the goproto_enum_prefix option. +// - required fields are generally marked nullable +// - arrays of non-native types are generally marked nullable +// - fields w/ acronyms are generally renamed (via customname) to be idiomatic w/ respect to Golang +// - fields w/ glued acronyms are also renamed at the author's discretion w/ respect to legibility + +/** + * Status is used to indicate the state of the scheduler and executor + * driver after function calls. + */ +enum Status { + DRIVER_NOT_STARTED = 1; + DRIVER_RUNNING = 2; + DRIVER_ABORTED = 3; + DRIVER_STOPPED = 4; +} + + +/** + * A unique ID assigned to a framework. A framework can reuse this ID + * in order to do failover (see MesosSchedulerDriver). + */ +message FrameworkID { + required string value = 1 [(gogoproto.nullable) = false]; +} + + +/** + * A unique ID assigned to an offer. + */ +message OfferID { + required string value = 1 [(gogoproto.nullable) = false]; +} + + +/** + * A unique ID assigned to an agent. Currently, an agent gets a new ID + * whenever it (re)registers with Mesos. Framework writers shouldn't + * assume any binding between an agent ID and and a hostname. + */ +message AgentID { + required string value = 1 [(gogoproto.nullable) = false]; +} + + +/** + * A framework-generated ID to distinguish a task. The ID must remain + * unique while the task is active. A framework can reuse an ID _only_ + * if the previous task with the same ID has reached a terminal state + * (e.g., TASK_FINISHED, TASK_KILLED, etc.). However, reusing task IDs + * is strongly discouraged (MESOS-2198). + */ +message TaskID { + required string value = 1 [(gogoproto.nullable) = false]; +} + + +/** + * A framework-generated ID to distinguish an executor. Only one + * executor with the same ID can be active on the same agent at a + * time. However, reusing executor IDs is discouraged. + */ +message ExecutorID { + required string value = 1 [(gogoproto.nullable) = false]; +} + + +/** + * ID used to uniquely identify a container. If the `parent` is not + * specified, the ID is a UUID generated by the agent to uniquely + * identify the container of an executor run. If the `parent` field is + * specified, it represents a nested container. + */ +message ContainerID { + required string value = 1 [(gogoproto.nullable) = false]; + optional ContainerID parent = 2; +} + + +/** + * A unique ID assigned to a resource provider. Currently, a resource + * provider gets a new ID whenever it (re)registers with Mesos. + */ +message ResourceProviderID { + required string value = 1 [(gogoproto.nullable) = false]; +} + + +/** + * A framework-generated ID to distinguish an operation. The ID + * must be unique within the framework. + */ +message OperationID { + required string value = 1 [(gogoproto.nullable) = false]; +} + + +/** + * Represents time since the epoch, in nanoseconds. + */ +message TimeInfo { + required int64 nanoseconds = 1 [(gogoproto.nullable) = false]; +} + + +/** + * Represents duration in nanoseconds. + */ +message DurationInfo { + required int64 nanoseconds = 1 [(gogoproto.nullable) = false]; +} + + +/** + * A network address. + * + * TODO(bmahler): Use this more widely. + */ +message Address { + // May contain a hostname, IP address, or both. + optional string hostname = 1; + optional string ip = 2 [(gogoproto.customname) = "IP"]; + + required int32 port = 3 [(gogoproto.nullable) = false]; +} + + +/** + * Represents a URL. + */ +message URL { + required string scheme = 1 [(gogoproto.nullable) = false]; + required Address address = 2 [(gogoproto.nullable) = false]; + optional string path = 3; + repeated Parameter query = 4 [(gogoproto.nullable) = false]; + optional string fragment = 5; +} + + +/** + * Represents an interval, from a given start time over a given duration. + * This interval pertains to an unavailability event, such as maintenance, + * and is not a generic interval. + */ +message Unavailability { + required TimeInfo start = 1 [(gogoproto.nullable) = false]; + + // When added to `start`, this represents the end of the interval. + // If unspecified, the duration is assumed to be infinite. + optional DurationInfo duration = 2; + + // TODO(josephw): Add additional fields for expressing the purpose and + // urgency of the unavailability event. +} + + +/** + * Represents a single machine, which may hold one or more agents. + * + * NOTE: In order to match an agent to a machine, both the `hostname` and + * `ip` must match the values advertised by the agent to the master. + * Hostname is not case-sensitive. + */ +message MachineID { + optional string hostname = 1; + optional string ip = 2 [(gogoproto.customname) = "IP"]; +} + + +/** + * Holds information about a single machine, its `mode`, and any other + * relevant information which may affect the behavior of the machine. + */ +message MachineInfo { + // Describes the several states that a machine can be in. A `Mode` + // applies to a machine and to all associated agents on the machine. + enum Mode { + // In this mode, a machine is behaving normally; + // offering resources, executing tasks, etc. + UP = 1; + + // In this mode, all agents on the machine are expected to cooperate with + // frameworks to drain resources. In general, draining is done ahead of + // a pending `unavailability`. The resources should be drained so as to + // maximize utilization prior to the maintenance but without knowingly + // violating the frameworks' requirements. + DRAINING = 2; + + // In this mode, a machine is not running any tasks and will not offer + // any of its resources. Agents on the machine will not be allowed to + // register with the master. + DOWN = 3; + } + + required MachineID id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"]; + optional Mode mode = 2; + + // Signifies that the machine may be unavailable during the given interval. + // See comments in `Unavailability` and for the `unavailability` fields + // in `Offer` and `InverseOffer` for more information. + optional Unavailability unavailability = 3; +} + + +/** + * Describes a framework. + */ +message FrameworkInfo { + // Used to determine the Unix user that an executor or task should be + // launched as. + // + // When using the MesosSchedulerDriver, if the field is set to an + // empty string, it will automagically set it to the current user. + // + // When using the HTTP Scheduler API, the user has to be set + // explicitly. + required string user = 1 [(gogoproto.nullable) = false]; + + // Name of the framework that shows up in the Mesos Web UI. + required string name = 2 [(gogoproto.nullable) = false]; + + // Note that 'id' is only available after a framework has + // registered, however, it is included here in order to facilitate + // scheduler failover (i.e., if it is set then the + // MesosSchedulerDriver expects the scheduler is performing + // failover). + optional FrameworkID id = 3 [(gogoproto.customname) = "ID"]; + + // The amount of time (in seconds) that the master will wait for the + // scheduler to failover before it tears down the framework by + // killing all its tasks/executors. This should be non-zero if a + // framework expects to reconnect after a failure and not lose its + // tasks/executors. + // + // NOTE: To avoid accidental destruction of tasks, production + // frameworks typically set this to a large value (e.g., 1 week). + optional double failover_timeout = 4 [default = 0.0]; + + // If set, agents running tasks started by this framework will write + // the framework pid, executor pids and status updates to disk. If + // the agent exits (e.g., due to a crash or as part of upgrading + // Mesos), this checkpointed data allows the restarted agent to + // reconnect to executors that were started by the old instance of + // the agent. Enabling checkpointing improves fault tolerance, at + // the cost of a (usually small) increase in disk I/O. + optional bool checkpoint = 5 [default = false]; + + // Roles are the entities to which allocations are made. + // The framework must have at least one role in order to + // be offered resources. Note that `role` is deprecated + // in favor of `roles` and only one of these fields must + // be used. Since we cannot distinguish between empty + // `roles` and the default unset `role`, we require that + // frameworks set the `MULTI_ROLE` capability if + // setting the `roles` field. + optional string role = 6 [default = "*", deprecated=true]; + repeated string roles = 12; + + // Used to indicate the current host from which the scheduler is + // registered in the Mesos Web UI. If set to an empty string Mesos + // will automagically set it to the current hostname if one is + // available. + optional string hostname = 7; + + // This field should match the credential's principal the framework + // uses for authentication. This field is used for framework API + // rate limiting and dynamic reservations. It should be set even + // if authentication is not enabled if these features are desired. + optional string principal = 8; + + // This field allows a framework to advertise its web UI, so that + // the Mesos web UI can link to it. It is expected to be a full URL, + // for example http://my-scheduler.example.com:8080/. + optional string webui_url = 9 [(gogoproto.customname) = "WebUiURL"]; + + message Capability { + enum Type { + // This must be the first enum value in this list, to + // ensure that if 'type' is not set, the default value + // is UNKNOWN. This enables enum values to be added + // in a backwards-compatible way. See: MESOS-4997. + UNKNOWN = 0; + + // Receive offers with revocable resources. See 'Resource' + // message for details. + REVOCABLE_RESOURCES = 1; + + // Receive the TASK_KILLING TaskState when a task is being + // killed by an executor. The executor will examine this + // capability to determine whether it can send TASK_KILLING. + TASK_KILLING_STATE = 2; + + // Indicates whether the framework is aware of GPU resources. + // Frameworks that are aware of GPU resources are expected to + // avoid placing non-GPU workloads on GPU agents, in order + // to avoid occupying a GPU agent and preventing GPU workloads + // from running! Currently, if a framework is unaware of GPU + // resources, it will not be offered *any* of the resources on + // an agent with GPUs. This restriction is in place because we + // do not have a revocation mechanism that ensures GPU workloads + // can evict GPU agent occupants if necessary. + // + // TODO(bmahler): As we add revocation we can relax the + // restriction here. See MESOS-5634 for more information. + GPU_RESOURCES = 3; + + // Receive offers with resources that are shared. + SHARED_RESOURCES = 4; + + // Indicates that (1) the framework is prepared to handle the + // following TaskStates: TASK_UNREACHABLE, TASK_DROPPED, + // TASK_GONE, TASK_GONE_BY_OPERATOR, and TASK_UNKNOWN, and (2) + // the framework will assume responsibility for managing + // partitioned tasks that reregister with the master. + // + // Frameworks that enable this capability can define how they + // would like to handle partitioned tasks. Frameworks will + // receive TASK_UNREACHABLE for tasks on agents that are + // partitioned from the master. + // + // Without this capability, frameworks will receive TASK_LOST + // for tasks on partitioned agents. + // NOTE: Prior to Mesos 1.5, such tasks will be killed by Mesos + // when the agent reregisters (unless the master has failed over). + // However due to the lack of benefit in maintaining different + // behaviors depending on whether the master has failed over + // (see MESOS-7215), as of 1.5, Mesos will not kill these + // tasks in either case. + PARTITION_AWARE = 5; + + // This expresses the ability for the framework to be + // "multi-tenant" via using the newly introduced `roles` + // field, and examining `Offer.allocation_info` to determine + // which role the offers are being made to. We also + // expect that "single-tenant" schedulers eventually + // provide this and move away from the deprecated + // `role` field. + MULTI_ROLE = 6; + + // This capability has two effects for a framework. + // + // (1) The framework is offered resources in a new format. + // + // The offered resources have the `Resource.reservations` field set + // rather than `Resource.role` and `Resource.reservation`. In short, + // an empty `reservations` field denotes unreserved resources, and + // each `ReservationInfo` in the `reservations` field denotes a + // reservation that refines the previous one. + // + // See the 'Resource Format' section for more details. + // + // (2) The framework can create refined reservations. + // + // A framework can refine an existing reservation via the + // `Resource.reservations` field. For example, a reservation for role + // `eng` can be refined to `eng/front_end`. + // + // See `ReservationInfo.reservations` for more details. + // + // NOTE: Without this capability, a framework is not offered resources + // that have refined reservations. A resource is said to have refined + // reservations if it uses the `Resource.reservations` field, and + // `Resource.reservations_size() > 1`. + RESERVATION_REFINEMENT = 7; // EXPERIMENTAL. + + // Indicates that the framework is prepared to receive offers + // for agents whose region is different from the master's + // region. Network links between hosts in different regions + // typically have higher latency and lower bandwidth than + // network links within a region, so frameworks should be + // careful to only place suitable workloads in remote regions. + // Frameworks that are not region-aware will never receive + // offers for remote agents; region-aware frameworks are assumed + // to implement their own logic to decide which workloads (if + // any) are suitable for placement on remote agents. + REGION_AWARE = 8; + + option (gogoproto.goproto_enum_prefix) = true; + } + + // Enum fields should be optional, see: MESOS-4997. + optional Type type = 1 [(gogoproto.nullable) = false]; + } + + // This field allows a framework to advertise its set of + // capabilities (e.g., ability to receive offers for revocable + // resources). + repeated Capability capabilities = 10 [(gogoproto.nullable) = false]; + + // Labels are free-form key value pairs supplied by the framework + // scheduler (e.g., to describe additional functionality offered by + // the framework). These labels are not interpreted by Mesos itself. + // Labels should not contain duplicate key-value pairs. + optional Labels labels = 11; +} + + +/** + * Describes a general non-interpreting non-killing check for a task or + * executor (or any arbitrary process/command). A type is picked by + * specifying one of the optional fields. Specifying more than one type + * is an error. + * + * NOTE: This API is subject to change and the related feature is experimental. + */ +message CheckInfo { + enum Type { + UNKNOWN = 0; + COMMAND = 1; + HTTP = 2; + TCP = 3; + + // TODO(alexr): Consider supporting custom user checks. They should + // probably be paired with a `data` field and complemented by a + // `data` response in `CheckStatusInfo`. + + option (gogoproto.goproto_enum_prefix) = true; + } + + // Describes a command check. If applicable, enters mount and/or network + // namespaces of the task. + message Command { + required CommandInfo command = 1 [(gogoproto.nullable) = false]; + } + + // Describes an HTTP check. Sends a GET request to + // http://:port/path. Note that is not configurable and is + // resolved automatically to 127.0.0.1. + message Http { + // Port to send the HTTP request. + required uint32 port = 1 [(gogoproto.nullable) = false]; + + // HTTP request path. + optional string path = 2; + + // TODO(alexr): Add support for HTTP method. While adding POST + // and PUT is simple, supporting payload is more involved. + + // TODO(alexr): Add support for custom HTTP headers. + + // TODO(alexr): Consider adding an optional message to describe TLS + // options and thus enabling https. Such message might contain certificate + // validation, TLS version. + } + + // Describes a TCP check, i.e. based on establishing a TCP connection to + // the specified port. Note that is not configurable and is resolved + // automatically to 127.0.0.1. + message Tcp { + required uint32 port = 1 [(gogoproto.nullable) = false]; + } + + // The type of the check. + optional Type type = 1 [(gogoproto.nullable) = false]; + + // Command check. + optional Command command = 2; + + // HTTP check. + optional Http http = 3 [(gogoproto.customname) = "HTTP"]; + + // TCP check. + optional Tcp tcp = 7 [(gogoproto.customname) = "TCP"]; + + // Amount of time to wait to start checking the task after it + // transitions to `TASK_RUNNING` or `TASK_STARTING` if the latter + // is used by the executor. + optional double delay_seconds = 4 [default = 15.0]; + + // Interval between check attempts, i.e., amount of time to wait after + // the previous check finished or timed out to start the next check. + optional double interval_seconds = 5 [default = 10.0]; + + // Amount of time to wait for the check to complete. Zero means infinite + // timeout. + // + // After this timeout, the check attempt is aborted and no result is + // reported. Note that this may be considered a state change and hence + // may trigger a check status change delivery to the corresponding + // scheduler. See `CheckStatusInfo` for more details. + optional double timeout_seconds = 6 [default = 20.0]; +} + + +/** + * Describes a health check for a task or executor (or any arbitrary + * process/command). A type is picked by specifying one of the + * optional fields. Specifying more than one type is an error. + */ +message HealthCheck { + enum Type { + UNKNOWN = 0; + COMMAND = 1; + HTTP = 2; + TCP = 3; + + option (gogoproto.goproto_enum_prefix) = true; + } + + // Describes an HTTP health check. Sends a GET request to + // scheme://:port/path. Note that is not configurable and is + // resolved automatically, in most cases to 127.0.0.1. Default executors + // treat return codes between 200 and 399 as success; custom executors + // may employ a different strategy, e.g. leveraging the `statuses` field. + message HTTPCheckInfo { + optional NetworkInfo.Protocol protocol = 5 [default = IPv4]; + + // Currently "http" and "https" are supported. + optional string scheme = 3; + + // Port to send the HTTP request. + required uint32 port = 1 [(gogoproto.nullable) = false]; + + // HTTP request path. + optional string path = 2; + + // TODO(alexr): Add support for HTTP method. While adding POST + // and PUT is simple, supporting payload is more involved. + + // TODO(alexr): Add support for custom HTTP headers. + + // TODO(alexr): Add support for success and possibly failure + // statuses. + + // NOTE: It is up to the custom executor to interpret and act on this + // field. Setting this field has no effect on the default executors. + // + // TODO(haosdent): Deprecate this field when we add better support for + // success and possibly failure statuses, e.g. ranges of success and + // failure statuses. + repeated uint32 statuses = 4; + + // TODO(haosdent): Consider adding a flag to enable task's certificate + // validation for HTTPS health checks, see MESOS-5997. + + // TODO(benh): Include an 'optional bytes data' field for checking + // for specific data in the response. + } + + // Describes a TCP health check, i.e. based on establishing + // a TCP connection to the specified port. + message TCPCheckInfo { + optional NetworkInfo.Protocol protocol = 2 [default = IPv4]; + + // Port expected to be open. + required uint32 port = 1 [(gogoproto.nullable) = false]; + } + + // TODO(benh): Consider adding a URL health check strategy which + // allows doing something similar to the HTTP strategy but + // encapsulates all the details in a single string field. + + // Amount of time to wait to start health checking the task after it + // transitions to `TASK_RUNNING` or `TASK_STATING` if the latter is + // used by the executor. + optional double delay_seconds = 2 [default = 15.0]; + + // Interval between health checks, i.e., amount of time to wait after + // the previous health check finished or timed out to start the next + // health check. + optional double interval_seconds = 3 [default = 10.0]; + + // Amount of time to wait for the health check to complete. After this + // timeout, the health check is aborted and treated as a failure. Zero + // means infinite timeout. + optional double timeout_seconds = 4 [default = 20.0]; + + // Number of consecutive failures until the task is killed by the executor. + optional uint32 consecutive_failures = 5 [default = 3]; + + // Amount of time after the task is launched during which health check + // failures are ignored. Once a check succeeds for the first time, + // the grace period does not apply anymore. Note that it includes + // `delay_seconds`, i.e., setting `grace_period_seconds` < `delay_seconds` + // has no effect. + optional double grace_period_seconds = 6 [default = 10.0]; + + // TODO(alexr): Add an optional `KillPolicy` that should be used + // if the task is killed because of a health check failure. + + // The type of health check. + optional Type type = 8 [(gogoproto.nullable) = false]; + + // Command health check. + optional CommandInfo command = 7; + + // HTTP health check. + optional HTTPCheckInfo http = 1 [(gogoproto.customname) = "HTTP"]; + + // TCP health check. + optional TCPCheckInfo tcp = 9 [(gogoproto.customname) = "TCP"]; +} + + +/** + * Describes a kill policy for a task. Currently does not express + * different policies (e.g. hitting HTTP endpoints), only controls + * how long to wait between graceful and forcible task kill: + * + * graceful kill --------------> forcible kill + * grace_period + * + * Kill policies are best-effort, because machine failures / forcible + * terminations may occur. + * + * NOTE: For executor-less command-based tasks, the kill is performed + * via sending a signal to the task process: SIGTERM for the graceful + * kill and SIGKILL for the forcible kill. For the docker executor-less + * tasks the grace period is passed to 'docker stop --time'. + */ +message KillPolicy { + // The grace period specifies how long to wait before forcibly + // killing the task. It is recommended to attempt to gracefully + // kill the task (and send TASK_KILLING) to indicate that the + // graceful kill is in progress. Once the grace period elapses, + // if the task has not terminated, a forcible kill should occur. + // The task should not assume that it will always be allotted + // the full grace period. For example, the executor may be + // shutdown more quickly by the agent, or failures / forcible + // terminations may occur. + optional DurationInfo grace_period = 1; +} + + +/** + * Describes a command, executed via: '/bin/sh -c value'. Any URIs specified + * are fetched before executing the command. If the executable field for an + * uri is set, executable file permission is set on the downloaded file. + * Otherwise, if the downloaded file has a recognized archive extension + * (currently [compressed] tar and zip) it is extracted into the executor's + * working directory. This extraction can be disabled by setting `extract` to + * false. In addition, any environment variables are set before executing + * the command (so they can be used to "parameterize" your command). + */ +message CommandInfo { + message URI { + required string value = 1 [(gogoproto.nullable) = false]; + optional bool executable = 2; + + // In case the fetched file is recognized as an archive, extract + // its contents into the sandbox. Note that a cached archive is + // not copied from the cache to the sandbox in case extraction + // originates from an archive in the cache. + optional bool extract = 3 [default = true]; + + // If this field is "true", the fetcher cache will be used. If not, + // fetching bypasses the cache and downloads directly into the + // sandbox directory, no matter whether a suitable cache file is + // available or not. The former directs the fetcher to download to + // the file cache, then copy from there to the sandbox. Subsequent + // fetch attempts with the same URI will omit downloading and copy + // from the cache as long as the file is resident there. Cache files + // may get evicted at any time, which then leads to renewed + // downloading. See also "docs/fetcher.md" and + // "docs/fetcher-cache-internals.md". + optional bool cache = 4; + + // The fetcher's default behavior is to use the URI string's basename to + // name the local copy. If this field is provided, the local copy will be + // named with its value instead. If there is a directory component (which + // must be a relative path), the local copy will be stored in that + // subdirectory inside the sandbox. + optional string output_file = 5; + } + + repeated URI uris = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "URIs"]; + + optional Environment environment = 2; + + // There are two ways to specify the command: + // 1) If 'shell == true', the command will be launched via shell + // (i.e., /bin/sh -c 'value'). The 'value' specified will be + // treated as the shell command. The 'arguments' will be ignored. + // 2) If 'shell == false', the command will be launched by passing + // arguments to an executable. The 'value' specified will be + // treated as the filename of the executable. The 'arguments' + // will be treated as the arguments to the executable. This is + // similar to how POSIX exec families launch processes (i.e., + // execlp(value, arguments(0), arguments(1), ...)). + // NOTE: The field 'value' is changed from 'required' to 'optional' + // in 0.20.0. It will only cause issues if a new framework is + // connecting to an old master. + optional bool shell = 6 [default = true]; + optional string value = 3; + repeated string arguments = 7; + + // Enables executor and tasks to run as a specific user. If the user + // field is present both in FrameworkInfo and here, the CommandInfo + // user value takes precedence. + optional string user = 5; +} + + +/** + * Describes information about an executor. + */ +message ExecutorInfo { + enum Type { + UNKNOWN = 0; + + // Mesos provides a simple built-in default executor that frameworks can + // leverage to run shell commands and containers. + // + // NOTES: + // + // 1) `command` must not be set when using a default executor. + // + // 2) Default executor only accepts a *single* `LAUNCH` or `LAUNCH_GROUP` + // operation. + // + // 3) If `container` is set, `container.type` must be `MESOS` + // and `container.mesos.image` must not be set. + DEFAULT = 1; + + // For frameworks that need custom functionality to run tasks, a `CUSTOM` + // executor can be used. Note that `command` must be set when using a + // `CUSTOM` executor. + CUSTOM = 2; + + option (gogoproto.goproto_enum_prefix) = true; + } + + // For backwards compatibility, if this field is not set when using `LAUNCH` + // operation, Mesos will infer the type by checking if `command` is set + // (`CUSTOM`) or unset (`DEFAULT`). `type` must be set when using + // `LAUNCH_GROUP` operation. + // + // TODO(vinod): Add support for explicitly setting `type` to `DEFAULT` in + // `LAUNCH` operation. + optional Type type = 15 [(gogoproto.nullable) = false]; + + required ExecutorID executor_id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ExecutorID"]; + optional FrameworkID framework_id = 8 [(gogoproto.customname) = "FrameworkID"]; // TODO(benh): Make this required. + optional CommandInfo command = 7; + + // Executor provided with a container will launch the container + // with the executor's CommandInfo and we expect the container to + // act as a Mesos executor. + optional ContainerInfo container = 11; + + repeated Resource resources = 5 [(gogoproto.nullable) = false]; + optional string name = 9; + + // 'source' is an identifier style string used by frameworks to + // track the source of an executor. This is useful when it's + // possible for different executor ids to be related semantically. + // + // NOTE: 'source' is exposed alongside the resource usage of the + // executor via JSON on the agent. This allows users to import usage + // information into a time series database for monitoring. + // + // This field is deprecated since 1.0. Please use labels for + // free-form metadata instead. + optional string source = 10 [deprecated = true]; // Since 1.0. + + // This field can be used to pass arbitrary bytes to an executor. + optional bytes data = 4; + + // Service discovery information for the executor. It is not + // interpreted or acted upon by Mesos. It is up to a service + // discovery system to use this information as needed and to handle + // executors without service discovery information. + optional DiscoveryInfo discovery = 12; + + // When shutting down an executor the agent will wait in a + // best-effort manner for the grace period specified here + // before forcibly destroying the container. The executor + // must not assume that it will always be allotted the full + // grace period, as the agent may decide to allot a shorter + // period and failures / forcible terminations may occur. + optional DurationInfo shutdown_grace_period = 13; + + // Labels are free-form key value pairs which are exposed through + // master and agent endpoints. Labels will not be interpreted or + // acted upon by Mesos itself. As opposed to the data field, labels + // will be kept in memory on master and agent processes. Therefore, + // labels should be used to tag executors with lightweight metadata. + // Labels should not contain duplicate key-value pairs. + optional Labels labels = 14; +} + + +/** + * Describes a domain. A domain is a collection of hosts that have + * similar characteristics. Mesos currently only supports "fault + * domains", which identify groups of hosts with similar failure + * characteristics. + * + * Frameworks can generally assume that network links between hosts in + * the same fault domain have lower latency, higher bandwidth, and better + * availability than network links between hosts in different domains. + * Schedulers may prefer to place network-intensive workloads in the + * same domain, as this may improve performance. Conversely, a single + * failure that affects a host in a domain may be more likely to + * affect other hosts in the same domain; hence, schedulers may prefer + * to place workloads that require high availability in multiple + * domains. (For example, all the hosts in a single rack might lose + * power or network connectivity simultaneously.) + * + * There are two kinds of fault domains: regions and zones. Regions + * offer the highest degree of fault isolation, but network latency + * between regions is typically high (typically >50 ms). Zones offer a + * modest degree of fault isolation along with reasonably low network + * latency (typically <10 ms). + * + * The mapping from fault domains to physical infrastructure is up to + * the operator to configure. In cloud environments, regions and zones + * can be mapped to the "region" and "availability zone" concepts + * exposed by most cloud providers, respectively. In on-premise + * deployments, regions and zones can be mapped to data centers and + * racks, respectively. + * + * Both masters and agents can be configured with domains. Frameworks + * can compare the domains of two hosts to determine if the hosts are + * in the same zone, in different zones in the same region, or in + * different regions. Note that all masters in a given Mesos cluster + * must be in the same region. + */ +message DomainInfo { + message FaultDomain { + message RegionInfo { + required string name = 1 [(gogoproto.nullable) = false]; + } + + message ZoneInfo { + required string name = 1 [(gogoproto.nullable) = false]; + } + + required RegionInfo region = 1 [(gogoproto.nullable) = false]; + required ZoneInfo zone = 2 [(gogoproto.nullable) = false]; + } + + optional FaultDomain fault_domain = 1; +} + + +/** + * Describes a master. This will probably have more fields in the + * future which might be used, for example, to link a framework webui + * to a master webui. + */ +message MasterInfo { + required string id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"]; + + // The IP address (only IPv4) as a packed 4-bytes integer, + // stored in network order. Deprecated, use `address.ip` instead. + required uint32 ip = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "IP"]; + + // The TCP port the Master is listening on for incoming + // HTTP requests; deprecated, use `address.port` instead. + required uint32 port = 3 [default = 5050]; + + // In the default implementation, this will contain information + // about both the IP address, port and Master name; it should really + // not be relied upon by external tooling/frameworks and be + // considered an "internal" implementation field. + optional string pid = 4 [(gogoproto.customname) = "PID"]; + + // The server's hostname, if available; it may be unreliable + // in environments where the DNS configuration does not resolve + // internal hostnames (eg, some public cloud providers). + // Deprecated, use `address.hostname` instead. + optional string hostname = 5; + + // The running Master version, as a string; taken from the + // generated "master/version.hpp". + optional string version = 6; + + // The full IP address (supports both IPv4 and IPv6 formats) + // and supersedes the use of `ip`, `port` and `hostname`. + // Since Mesos 0.24. + optional Address address = 7; + + // The domain that this master belongs to. All masters in a Mesos + // cluster should belong to the same region. + optional DomainInfo domain = 8; + + message Capability { + enum Type { + UNKNOWN = 0; + + // The master can handle slaves whose state + // changes after reregistering. + AGENT_UPDATE = 1; + + option (gogoproto.goproto_enum_prefix) = true; + } + optional Type type = 1 [(gogoproto.nullable) = false]; + } + + repeated Capability capabilities = 9 [(gogoproto.nullable) = false]; +} + + +/** + * Describes an agent. Note that the 'id' field is only available + * after an agent is registered with the master, and is made available + * here to facilitate re-registration. + */ +message AgentInfo { + required string hostname = 1 [(gogoproto.nullable) = false]; + optional int32 port = 8 [default = 5051]; + + // The configured resources at the agent. This does not include any + // dynamic reservations or persistent volumes that may currently + // exist at the agent. + repeated Resource resources = 3 [(gogoproto.nullable) = false]; + + repeated Attribute attributes = 5 [(gogoproto.nullable) = false]; + optional AgentID id = 6 [(gogoproto.customname) = "ID"]; + + // The domain that this agent belongs to. If the agent's region + // differs from the master's region, it will not appear in resource + // offers to frameworks that have not enabled the REGION_AWARE + // capability. + optional DomainInfo domain = 10; + + message Capability { + enum Type { + // This must be the first enum value in this list, to + // ensure that if 'type' is not set, the default value + // is UNKNOWN. This enables enum values to be added + // in a backwards-compatible way. See: MESOS-4997. + UNKNOWN = 0; + + // This expresses the ability for the agent to be able + // to launch tasks of a 'multi-role' framework. + MULTI_ROLE = 1; + + // This expresses the ability for the agent to be able to launch + // tasks, reserve resources, and create volumes using resources + // allocated to a 'hierarchical-role'. + // NOTE: This capability is required specifically for creating + // volumes because a hierchical role includes '/' (slashes) in them. + // Agents with this capability know to transform the '/' (slashes) + // into ' ' (spaces). + HIERARCHICAL_ROLE = 2; + + // This capability has three effects for an agent. + // + // (1) The format of the checkpointed resources, and + // the resources reported to master. + // + // These resources are reported in the "pre-reservation-refinement" + // format if none of the resources have refined reservations. If any + // of the resources have refined reservations, they are reported in + // the "post-reservation-refinement" format. The purpose is to allow + // downgrading of an agent as well as communication with a pre-1.4.0 + // master until the reservation refinement feature is actually used. + // + // See the 'Resource Format' section for more details. + // + // (2) The format of the resources reported by the HTTP endpoints. + // + // For resources reported by agent endpoints, the + // "pre-reservation-refinement" format is "injected" if possible. + // That is, resources without refined reservations will have the + // `Resource.role` and `Resource.reservation` set, whereas + // resources with refined reservations will not. + // + // See the 'Resource Format' section for more details. + // + // (3) The ability for the agent to launch tasks, reserve resources, and + // create volumes using resources that have refined reservations. + // + // See `ReservationInfo.reservations` section for more details. + // + // NOTE: Resources are said to have refined reservations if it uses the + // `Resource.reservations` field, and `Resource.reservations_size() > 1`. + RESERVATION_REFINEMENT = 3; + + // This expresses the ability for the agent to handle resource + // provider related operations. This includes the following: + // + // (1) The ability to report resources that are provided by some + // local resource providers through the resource provider API. + // + // (2) The ability to provide operation feedback. + RESOURCE_PROVIDER = 4; + + // This expresses the capability for the agent to handle persistent volume + // resize operations safely. This capability is turned on by default. + RESIZE_VOLUME = 5; + + option (gogoproto.goproto_enum_prefix) = true; + } + + // Enum fields should be optional, see: MESOS-4997. + optional Type type = 1 [(gogoproto.nullable) = false]; + } +} + + +/** + * Describes the container configuration to run a CSI plugin component. + */ +message CSIPluginContainerInfo { + enum Service { + UNKNOWN = 0; + CONTROLLER_SERVICE = 1; + NODE_SERVICE = 2; + + option (gogoproto.goproto_enum_prefix) = true; + } + + repeated Service services = 1; + optional CommandInfo command = 2; + repeated Resource resources = 3 [(gogoproto.nullable) = false]; + optional ContainerInfo container = 4; +} + + +/** + * Describes a CSI plugin. + */ +message CSIPluginInfo { + // The type of the CSI service. This uniquely identifies a CSI + // implementation. For instance: + // org.apache.mesos.csi.test + // + // Please follow to Java package naming convention + // (https://en.wikipedia.org/wiki/Java_package#Package_naming_conventions) + // to avoid conflicts on type names. + required string type = 1 [(gogoproto.nullable) = false]; + + // The name of the CSI service. There could be mutliple instances of a + // type of CSI service. The name field is used to distinguish these + // instances. It should be a legal Java identifier + // (https://docs.oracle.com/javase/tutorial/java/nutsandbolts/variables.html) + // to avoid conflicts on concatenation of type and name. + required string name = 2 [(gogoproto.nullable) = false]; + + // A list of container configurations to run CSI plugin components. + // The controller service will be served by the first configuration + // that contains `CONTROLLER_SERVICE`, and the node service will be + // served by the first configuration that contains `NODE_SERVICE`. + repeated CSIPluginContainerInfo containers = 3 [(gogoproto.nullable) = false]; +} + + +/** + * Describes a resource provider. Note that the 'id' field is only available + * after a resource provider is registered with the master, and is made + * available here to facilitate re-registration. + */ +message ResourceProviderInfo { + optional ResourceProviderID id = 1 [(gogoproto.customname) = "ID"]; + repeated Attribute attributes = 2 [(gogoproto.nullable) = false]; + + // The type of the resource provider. This uniquely identifies a + // resource provider implementation. For instance: + // org.apache.mesos.rp.local.storage + // + // Please follow to Java package naming convention + // (https://en.wikipedia.org/wiki/Java_package#Package_naming_conventions) + // to avoid conflicts on type names. + required string type = 3 [(gogoproto.nullable) = false]; + + // The name of the resource provider. There could be multiple + // instances of a type of resource provider. The name field is used + // to distinguish these instances. It should be a legal Java identifier + // (https://docs.oracle.com/javase/tutorial/java/nutsandbolts/variables.html) + // to avoid conflicts on concatenation of type and name. + required string name = 4 [(gogoproto.nullable) = false]; + + // The stack of default reservations. If this field is not empty, it + // indicates that resources from this resource provider are reserved + // by default, except for the resources that have been reserved or + // unreserved through operations. The first `ReservationInfo` + // may have type `STATIC` or `DYNAMIC`, but the rest must have + // `DYNAMIC`. One can create a new reservation on top of an existing + // one by pushing a new `ReservationInfo` to the back. The last + // `ReservationInfo` in this stack is the "current" reservation. The + // new reservation's role must be a child of the current one. + repeated Resource.ReservationInfo default_reservations = 5 [(gogoproto.nullable) = false]; // EXPERIMENTAL. + + // Storage resource provider related information. + message Storage { + required CSIPluginInfo plugin = 1 [(gogoproto.nullable) = false]; + } + + optional Storage storage = 6; // EXPERIMENTAL. +} + + +/** + * Describes an Attribute or Resource "value". A value is described + * using the standard protocol buffer "union" trick. + */ +message Value { + enum Type { + SCALAR = 0; + RANGES = 1; + SET = 2; + TEXT = 3; + } + + message Scalar { + // Scalar values are represented using floating point. To reduce + // the chance of unpredictable floating point behavior due to + // roundoff error, Mesos only supports three decimal digits of + // precision for scalar resource values. That is, floating point + // values are converted to a fixed point format that supports + // three decimal digits of precision, and then converted back to + // floating point on output. Any additional precision in scalar + // resource values is discarded (via rounding). + required double value = 1 [(gogoproto.nullable) = false]; + } + + message Range { + required uint64 begin = 1 [(gogoproto.nullable) = false]; + required uint64 end = 2 [(gogoproto.nullable) = false]; + } + + message Ranges { + repeated Range range = 1 [(gogoproto.nullable) = false]; + } + + message Set { + repeated string item = 1; + } + + message Text { + required string value = 1 [(gogoproto.nullable) = false]; + } + + required Type type = 1 [(gogoproto.nullable) = false]; + optional Scalar scalar = 2; + optional Ranges ranges = 3; + optional Set set = 4; + optional Text text = 5; +} + + +/** + * Describes an attribute that can be set on a machine. For now, + * attributes and resources share the same "value" type, but this may + * change in the future and attributes may only be string based. + */ +message Attribute { + required string name = 1 [(gogoproto.nullable) = false]; + required Value.Type type = 2 [(gogoproto.nullable) = false]; + optional Value.Scalar scalar = 3; + optional Value.Ranges ranges = 4; + optional Value.Set set = 6; + optional Value.Text text = 5; +} + + +/** + * Describes a resource from a resource provider. The `name` field is + * a string like "cpus" or "mem" that indicates which kind of resource + * this is; the rest of the fields describe the properties of the + * resource. A resource can take on one of three types: scalar + * (double), a list of finite and discrete ranges (e.g., [1-10, + * 20-30]), or a set of items. A resource is described using the + * standard protocol buffer "union" trick. + * + * Note that "disk" and "mem" resources are scalar values expressed in + * megabytes. Fractional "cpus" values are allowed (e.g., "0.5"), + * which correspond to partial shares of a CPU. + */ +message Resource { + optional ResourceProviderID provider_id = 12 [(gogoproto.customname) = "ProviderID"]; + + required string name = 1 [(gogoproto.nullable) = false]; + required Value.Type type = 2; + optional Value.Scalar scalar = 3; + optional Value.Ranges ranges = 4; + optional Value.Set set = 5; + + // The role that this resource is reserved for. If "*", this indicates + // that the resource is unreserved. Otherwise, the resource will only + // be offered to frameworks that belong to this role. + // + // NOTE: Frameworks must not set this field if `reservations` is set. + // See the 'Resource Format' section for more details. + // + // TODO(mpark): Deprecate once `reservations` is no longer experimental. + optional string role = 6 [default = "*", deprecated=true]; + + // This was initially introduced to support MULTI_ROLE capable + // frameworks. Frameworks that are not MULTI_ROLE capable can + // continue to assume that the offered resources are allocated + // to their role. + message AllocationInfo { + // If set, this resource is allocated to a role. Note that in the + // future, this may be unset and the scheduler may be responsible + // for allocating to one of its roles. + optional string role = 1; + + // In the future, we may add additional fields here, e.g. priority + // tier, type of allocation (quota / fair share). + } + + optional AllocationInfo allocation_info = 11; + + // Resource Format: + // + // Frameworks receive resource offers in one of two formats, depending on + // whether the RESERVATION_REFINEMENT capability is enabled. + // + // __WITHOUT__ the RESERVATION_REFINEMENT capability, the framework is offered + // resources in the "pre-reservation-refinement" format. In this format, the + // `Resource.role` and `Resource.reservation` fields are used in conjunction + // to describe the reservation state of a `Resource` message. + // + // The following is an overview of the possible reservation states: + // + // +------------+------------------------------------------------------------+ + // | unreserved | { | + // | | role: "*", | + // | | reservation: , | + // | | reservations: | + // | | } | + // +------------+------------------------------------------------------------+ + // | static | { | + // | | role: "eng", | + // | | reservation: , | + // | | reservations: | + // | | } | + // +------------+------------------------------------------------------------+ + // | dynamic | { | + // | | role: "eng", | + // | | reservation: { | + // | | type: , | + // | | role: , | + // | | principal: , | + // | | labels: | + // | | }, | + // | | reservations: | + // | | } | + // +------------+------------------------------------------------------------+ + // + // __WITH__ the RESERVATION_REFINEMENT capability, the framework is offered + // resources in the "post-reservation-refinement" format. In this format, the + // reservation state of a `Resource` message is expressed solely in + // `Resource.reservations` field. + // + // The following is an overview of the possible reservation states: + // + // +------------+------------------------------------------------------------+ + // | unreserved | { | + // | | role: , | + // | | reservation: , | + // | | reservations: [] | + // | | } | + // +------------+------------------------------------------------------------+ + // | static | { | + // | | role: , | + // | | reservation: , | + // | | reservations: [ | + // | | { | + // | | type: STATIC, | + // | | role: "eng", | + // | | principal: , | + // | | labels: | + // | | } | + // | | ] | + // | | } | + // +------------+------------------------------------------------------------+ + // | dynamic | { | + // | | role: , | + // | | reservation: , | + // | | reservations: [ | + // | | { | + // | | type: DYNAMIC, | + // | | role: "eng", | + // | | principal: , | + // | | labels: | + // | | } | + // | | ] | + // | | } | + // +------------+------------------------------------------------------------+ + // + // We can also __refine__ reservations with this capability like so: + // + // +------------+------------------------------------------------------------+ + // | refined | { | + // | | role: , | + // | | reservation: , | + // | | reservations: [ | + // | | { | + // | | type: STATIC or DYNAMIC, | + // | | role: "eng", | + // | | principal: , | + // | | labels: | + // | | }, | + // | | { | + // | | type: DYNAMIC, | + // | | role: "eng/front_end", | + // | | principal: , | + // | | labels: | + // | | } | + // | | ] | + // | | } | + // +------------+------------------------------------------------------------+ + // + // NOTE: Each `ReservationInfo` in the `reservations` field denotes + // a reservation that refines the previous `ReservationInfo`. + + message ReservationInfo { + // Describes a reservation. A static reservation is set by the operator on + // the command-line and they are immutable without agent restart. A dynamic + // reservation is made by an operator via the '/reserve' HTTP endpoint + // or by a framework via the offer cycle by sending back an + // 'Offer::Operation::Reserve' message. + // + // NOTE: We currently do not allow frameworks with role "*" to make dynamic + // reservations. + + enum Type { + UNKNOWN = 0; + STATIC = 1; + DYNAMIC = 2; + + option (gogoproto.goproto_enum_prefix) = true; + } + + // The type of this reservation. + // + // NOTE: This field must not be set for `Resource.reservation`. + // See the 'Resource Format' section for more details. + optional Type type = 4; + + // The role to which this reservation is made for. + // + // NOTE: This field must not be set for `Resource.reservation`. + // See the 'Resource Format' section for more details. + optional string role = 3; + + // Indicates the principal, if any, of the framework or operator + // that reserved this resource. If reserved by a framework, the + // field should match the `FrameworkInfo.principal`. It is used in + // conjunction with the `UnreserveResources` ACL to determine + // whether the entity attempting to unreserve this resource is + // permitted to do so. + optional string principal = 1; + + // Labels are free-form key value pairs that can be used to + // associate arbitrary metadata with a reserved resource. For + // example, frameworks can use labels to identify the intended + // purpose for a portion of the resources the framework has + // reserved at a given agent. Labels should not contain duplicate + // key-value pairs. + optional Labels labels = 2; + } + + // If this is set, this resource was dynamically reserved by an + // operator or a framework. Otherwise, this resource is either unreserved + // or statically reserved by an operator via the --resources flag. + // + // NOTE: Frameworks must not set this field if `reservations` is set. + // See the 'Resource Format' section for more details. + // + // TODO(mpark): Deprecate once `reservations` is no longer experimental. + optional ReservationInfo reservation = 8; + + // The stack of reservations. If this field is empty, it indicates that this + // resource is unreserved. Otherwise, the resource is reserved. The first + // `ReservationInfo` may have type `STATIC` or `DYNAMIC`, but the rest must + // have `DYNAMIC`. One can create a new reservation on top of an existing + // one by pushing a new `ReservationInfo` to the back. The last + // `ReservationInfo` in this stack is the "current" reservation. The new + // reservation's role must be a child of the current reservation's role. + // + // NOTE: Frameworks must not set this field if `reservation` is set. + // See the 'Resource Format' section for more details. + // + // TODO(mpark): Deprecate `role` and `reservation` once this is stable. + repeated ReservationInfo reservations = 13 [(gogoproto.nullable) = false]; // EXPERIMENTAL. + + message DiskInfo { + // Describes a persistent disk volume. + // + // A persistent disk volume will not be automatically garbage + // collected if the task/executor/agent terminates, but will be + // re-offered to the framework(s) belonging to the 'role'. + // + // NOTE: Currently, we do not allow persistent disk volumes + // without a reservation (i.e., 'role' cannot be '*'). + message Persistence { + // A unique ID for the persistent disk volume. This ID must be + // unique per role on each agent. Although it is possible to use + // the same ID on different agents in the cluster and to reuse + // IDs after a volume with that ID has been destroyed, both + // practices are discouraged. + required string id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"]; + + // This field indicates the principal of the operator or + // framework that created this volume. It is used in conjunction + // with the "destroy" ACL to determine whether an entity + // attempting to destroy the volume is permitted to do so. + // + // NOTE: This field should match the FrameworkInfo.principal of + // the framework that created the volume. + optional string principal = 2; + } + + optional Persistence persistence = 1; + + // Describes how this disk resource will be mounted in the + // container. If not set, the disk resource will be used as the + // sandbox. Otherwise, it will be mounted according to the + // 'container_path' inside 'volume'. The 'host_path' inside + // 'volume' is ignored. + // NOTE: If 'volume' is set but 'persistence' is not set, the + // volume will be automatically garbage collected after + // task/executor terminates. Currently, if 'persistence' is set, + // 'volume' must be set. + optional Volume volume = 2; + + // Describes where a disk originates from. + message Source { + enum Type { + UNKNOWN = 0; + PATH = 1; + MOUNT = 2; + BLOCK = 3; + RAW = 4; + + option (gogoproto.goproto_enum_prefix) = true; + } + + // A folder that can be located on a separate disk device. This + // can be shared and carved up as necessary between frameworks. + message Path { + // Path to the folder (e.g., /mnt/raid/disk0). If the path is a + // relative path, it is relative to the agent work directory. + optional string root = 1; + } + + // A mounted file-system set up by the Agent administrator. This + // can only be used exclusively: a framework cannot accept a + // partial amount of this disk. + message Mount { + // Path to mount point (e.g., /mnt/raid/disk0). If the path is a + // relative path, it is relative to the agent work directory. + optional string root = 1; + } + + required Type type = 1 [(gogoproto.nullable) = false]; + optional Path path = 2; + optional Mount mount = 3; + + // An identifier for this source. This field maps onto CSI + // volume IDs and is not expected to be set by frameworks. + optional string id = 4 [(gogoproto.customname) = "ID"]; // EXPERIMENTAL. + + // Additional metadata for this source. This field maps onto CSI + // volume metadata and is not expected to be set by frameworks. + optional Labels metadata = 5; // EXPERIMENTAL. + + // This field serves as an indirection to a set of storage + // vendor specific disk parameters which describe the properties + // of the disk. The operator will setup mappings between a + // profile name to a set of vendor specific disk parameters. And + // the framework will do disk selection based on profile names, + // instead of vendor specific disk parameters. + // + // Also see the DiskProfileAdaptor module. + optional string profile = 6; // EXPERIMENTAL. + } + + optional Source source = 3; + } + + optional DiskInfo disk = 7; + + message RevocableInfo {} + + // If this is set, the resources are revocable, i.e., any tasks or + // executors launched using these resources could get preempted or + // throttled at any time. This could be used by frameworks to run + // best effort tasks that do not need strict uptime or performance + // guarantees. Note that if this is set, 'disk' or 'reservation' + // cannot be set. + optional RevocableInfo revocable = 9; + + // Allow the resource to be shared across tasks. + message SharedInfo {} + + // If this is set, the resources are shared, i.e. multiple tasks + // can be launched using this resource and all of them shall refer + // to the same physical resource on the cluster. Note that only + // persistent volumes can be shared currently. + optional SharedInfo shared = 10; +} + + +/** + * When the network bandwidth caps are enabled and the container + * is over its limit, outbound packets may be either delayed or + * dropped completely either because it exceeds the maximum bandwidth + * allocation for a single container (the cap) or because the combined + * network traffic of multiple containers on the host exceeds the + * transmit capacity of the host (the share). We can report the + * following statistics for each of these conditions exported directly + * from the Linux Traffic Control Queueing Discipline. + * + * id : name of the limiter, e.g. 'tx_bw_cap' + * backlog : number of packets currently delayed + * bytes : total bytes seen + * drops : number of packets dropped in total + * overlimits : number of packets which exceeded allocation + * packets : total packets seen + * qlen : number of packets currently queued + * rate_bps : throughput in bytes/sec + * rate_pps : throughput in packets/sec + * requeues : number of times a packet has been delayed due to + * locking or device contention issues + * + * More information on the operation of Linux Traffic Control can be + * found at http://www.lartc.org/lartc.html. + */ +message TrafficControlStatistics { + required string id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"]; + optional uint64 backlog = 2; + optional uint64 bytes = 3; + optional uint64 drops = 4; + optional uint64 overlimits = 5; + optional uint64 packets = 6; + optional uint64 qlen = 7; + optional uint64 ratebps = 8 [(gogoproto.customname) = "RateBPS"]; + optional uint64 ratepps = 9 [(gogoproto.customname) = "RatePPS"]; + optional uint64 requeues = 10; +} + + +message IpStatistics { + optional int64 Forwarding = 1; + optional int64 DefaultTTL = 2; + optional int64 InReceives = 3; + optional int64 InHdrErrors = 4; + optional int64 InAddrErrors = 5; + optional int64 ForwDatagrams = 6; + optional int64 InUnknownProtos = 7; + optional int64 InDiscards = 8; + optional int64 InDelivers = 9; + optional int64 OutRequests = 10; + optional int64 OutDiscards = 11; + optional int64 OutNoRoutes = 12; + optional int64 ReasmTimeout = 13; + optional int64 ReasmReqds = 14; + optional int64 ReasmOKs = 15; + optional int64 ReasmFails = 16; + optional int64 FragOKs = 17; + optional int64 FragFails = 18; + optional int64 FragCreates = 19; +} + + +message IcmpStatistics { + optional int64 InMsgs = 1; + optional int64 InErrors = 2; + optional int64 InCsumErrors = 3; + optional int64 InDestUnreachs = 4; + optional int64 InTimeExcds = 5; + optional int64 InParmProbs = 6; + optional int64 InSrcQuenchs = 7; + optional int64 InRedirects = 8; + optional int64 InEchos = 9; + optional int64 InEchoReps = 10; + optional int64 InTimestamps = 11; + optional int64 InTimestampReps = 12; + optional int64 InAddrMasks = 13; + optional int64 InAddrMaskReps = 14; + optional int64 OutMsgs = 15; + optional int64 OutErrors = 16; + optional int64 OutDestUnreachs = 17; + optional int64 OutTimeExcds = 18; + optional int64 OutParmProbs = 19; + optional int64 OutSrcQuenchs = 20; + optional int64 OutRedirects = 21; + optional int64 OutEchos = 22; + optional int64 OutEchoReps = 23; + optional int64 OutTimestamps = 24; + optional int64 OutTimestampReps = 25; + optional int64 OutAddrMasks = 26; + optional int64 OutAddrMaskReps = 27; +} + + +message TcpStatistics { + optional int64 RtoAlgorithm = 1; + optional int64 RtoMin = 2; + optional int64 RtoMax = 3; + optional int64 MaxConn = 4; + optional int64 ActiveOpens = 5; + optional int64 PassiveOpens = 6; + optional int64 AttemptFails = 7; + optional int64 EstabResets = 8; + optional int64 CurrEstab = 9; + optional int64 InSegs = 10; + optional int64 OutSegs = 11; + optional int64 RetransSegs = 12; + optional int64 InErrs = 13; + optional int64 OutRsts = 14; + optional int64 InCsumErrors = 15; +} + + +message UdpStatistics { + optional int64 InDatagrams = 1; + optional int64 NoPorts = 2; + optional int64 InErrors = 3; + optional int64 OutDatagrams = 4; + optional int64 RcvbufErrors = 5; + optional int64 SndbufErrors = 6; + optional int64 InCsumErrors = 7; + optional int64 IgnoredMulti = 8; +} + + +message SNMPStatistics { + optional IpStatistics ip_stats = 1 [(gogoproto.customname) = "IPStats"]; + optional IcmpStatistics icmp_stats = 2 [(gogoproto.customname) = "ICMPStats"]; + optional TcpStatistics tcp_stats = 3 [(gogoproto.customname) = "TCPStats"]; + optional UdpStatistics udp_stats = 4 [(gogoproto.customname) = "UDPStats"]; +} + + +message DiskStatistics { + optional Resource.DiskInfo.Source source = 1; + optional Resource.DiskInfo.Persistence persistence = 2; + optional uint64 limit_bytes = 3; + optional uint64 used_bytes = 4; +} + + +/** + * A snapshot of resource usage statistics. + */ +message ResourceStatistics { + required double timestamp = 1 [(gogoproto.nullable) = false]; // Snapshot time, in seconds since the Epoch. + + optional uint32 processes = 30; + optional uint32 threads = 31; + + // CPU Usage Information: + // Total CPU time spent in user mode, and kernel mode. + optional double cpus_user_time_secs = 2 [(gogoproto.customname) = "CPUsUserTimeSecs"]; + optional double cpus_system_time_secs = 3 [(gogoproto.customname) = "CPUsSystemTimeSecs"]; + + // Number of CPUs allocated. + optional double cpus_limit = 4 [(gogoproto.customname) = "CPUsLimit"]; + + // cpu.stat on process throttling (for contention issues). + optional uint32 cpus_nr_periods = 7 [(gogoproto.customname) = "CPUsNrPeriods"]; + optional uint32 cpus_nr_throttled = 8 [(gogoproto.customname) = "CPUsNrThrottled"]; + optional double cpus_throttled_time_secs = 9 [(gogoproto.customname) = "CPUsThrottledTimeSecs"]; + + // Memory Usage Information: + + // mem_total_bytes was added in 0.23.0 to represent the total memory + // of a process in RAM (as opposed to in Swap). This was previously + // reported as mem_rss_bytes, which was also changed in 0.23.0 to + // represent only the anonymous memory usage, to keep in sync with + // Linux kernel's (arguably erroneous) use of terminology. + optional uint64 mem_total_bytes = 36; + + // Total memory + swap usage. This is set if swap is enabled. + optional uint64 mem_total_memsw_bytes = 37; + + // Hard memory limit for a container. + optional uint64 mem_limit_bytes = 6; + + // Soft memory limit for a container. + optional uint64 mem_soft_limit_bytes = 38; + + // Broken out memory usage information: pagecache, rss (anonymous), + // mmaped files and swap. + + // TODO(chzhcn) mem_file_bytes and mem_anon_bytes are deprecated in + // 0.23.0 and will be removed in 0.24.0. + optional uint64 mem_file_bytes = 10; + optional uint64 mem_anon_bytes = 11; + + // mem_cache_bytes is added in 0.23.0 to represent page cache usage. + optional uint64 mem_cache_bytes = 39; + + // Since 0.23.0, mem_rss_bytes is changed to represent only + // anonymous memory usage. Note that neither its requiredness, type, + // name nor numeric tag has been changed. + optional uint64 mem_rss_bytes = 5 [(gogoproto.customname) = "MemRSSBytes"]; + + optional uint64 mem_mapped_file_bytes = 12; + // This is only set if swap is enabled. + optional uint64 mem_swap_bytes = 40; + optional uint64 mem_unevictable_bytes = 41; + + // Number of occurrences of different levels of memory pressure + // events reported by memory cgroup. Pressure listening (re)starts + // with these values set to 0 when agent (re)starts. See + // https://www.kernel.org/doc/Documentation/cgroups/memory.txt for + // more details. + optional uint64 mem_low_pressure_counter = 32; + optional uint64 mem_medium_pressure_counter = 33; + optional uint64 mem_critical_pressure_counter = 34; + + // Disk Usage Information for executor working directory. + optional uint64 disk_limit_bytes = 26; + optional uint64 disk_used_bytes = 27; + + // Per disk (resource) statistics. + repeated DiskStatistics disk_statistics = 43 [(gogoproto.nullable) = false]; + + // Cgroups blkio statistics. + optional CgroupInfo.Blkio.Statistics blkio_statistics = 44; + + // Perf statistics. + optional PerfStatistics perf = 13; + + // Network Usage Information: + optional uint64 net_rx_packets = 14; + optional uint64 net_rx_bytes = 15; + optional uint64 net_rx_errors = 16; + optional uint64 net_rx_dropped = 17; + optional uint64 net_tx_packets = 18; + optional uint64 net_tx_bytes = 19; + optional uint64 net_tx_errors = 20; + optional uint64 net_tx_dropped = 21; + + // The kernel keeps track of RTT (round-trip time) for its TCP + // sockets. RTT is a way to tell the latency of a container. + optional double net_tcp_rtt_microsecs_p50 = 22 [(gogoproto.customname) = "NetTCPRttMicrosecsP50"]; + optional double net_tcp_rtt_microsecs_p90 = 23 [(gogoproto.customname) = "NetTCPRttMicrosecsP90"]; + optional double net_tcp_rtt_microsecs_p95 = 24 [(gogoproto.customname) = "NetTCPRttMicrosecsP95"]; + optional double net_tcp_rtt_microsecs_p99 = 25 [(gogoproto.customname) = "NetTCPRttMicrosecsP99"]; + + optional double net_tcp_active_connections = 28 [(gogoproto.customname) = "NetTCPActiveConnections"]; + optional double net_tcp_time_wait_connections = 29 [(gogoproto.customname) = "NetTCPTimeWaitConnections"]; + + // Network traffic flowing into or out of a container can be delayed + // or dropped due to congestion or policy inside and outside the + // container. + repeated TrafficControlStatistics net_traffic_control_statistics = 35 [(gogoproto.nullable) = false]; + + // Network SNMP statistics for each container. + optional SNMPStatistics net_snmp_statistics = 42 [(gogoproto.customname) = "NetSNMPStatistics"]; +} + + +/** + * Describes a snapshot of the resource usage for executors. + */ +message ResourceUsage { + message Executor { + required ExecutorInfo executor_info = 1 [(gogoproto.nullable) = false]; + + // This includes resources used by the executor itself + // as well as its active tasks. + repeated Resource allocated = 2 [(gogoproto.nullable) = false]; + + // Current resource usage. If absent, the containerizer + // cannot provide resource usage. + optional ResourceStatistics statistics = 3; + + // The container id for the executor specified in the executor_info field. + required ContainerID container_id = 4 [(gogoproto.nullable) = false, (gogoproto.customname) = "ContainerID"]; + + message Task { + required string name = 1 [(gogoproto.nullable) = false]; + required TaskID id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"]; + repeated Resource resources = 3 [(gogoproto.nullable) = false]; + optional Labels labels = 4; + } + + // Non-terminal tasks. + repeated Task tasks = 5 [(gogoproto.nullable) = false]; + } + + repeated Executor executors = 1 [(gogoproto.nullable) = false]; + + // Agent's total resources including checkpointed dynamic + // reservations and persistent volumes. + repeated Resource total = 2 [(gogoproto.nullable) = false]; +} + + +/** + * Describes a sample of events from "perf stat". Only available on + * Linux. + * + * NOTE: Each optional field matches the name of a perf event (see + * "perf list") with the following changes: + * 1. Names are downcased. + * 2. Hyphens ('-') are replaced with underscores ('_'). + * 3. Events with alternate names use the name "perf stat" returns, + * e.g., for the event "cycles OR cpu-cycles" perf always returns + * cycles. + */ +message PerfStatistics { + required double timestamp = 1 [(gogoproto.nullable) = false]; // Start of sample interval, in seconds since the Epoch. + required double duration = 2 [(gogoproto.nullable) = false]; // Duration of sample interval, in seconds. + + // Hardware event. + optional uint64 cycles = 3; + optional uint64 stalled_cycles_frontend = 4; + optional uint64 stalled_cycles_backend = 5; + optional uint64 instructions = 6; + optional uint64 cache_references = 7; + optional uint64 cache_misses = 8; + optional uint64 branches = 9; + optional uint64 branch_misses = 10; + optional uint64 bus_cycles = 11; + optional uint64 ref_cycles = 12; + + // Software event. + optional double cpu_clock = 13 [(gogoproto.customname) = "CPUClock"]; + optional double task_clock = 14; + optional uint64 page_faults = 15; + optional uint64 minor_faults = 16; + optional uint64 major_faults = 17; + optional uint64 context_switches = 18; + optional uint64 cpu_migrations = 19 [(gogoproto.customname) = "CPUMigrations"]; + optional uint64 alignment_faults = 20; + optional uint64 emulation_faults = 21; + + // Hardware cache event. + optional uint64 l1_dcache_loads = 22; + optional uint64 l1_dcache_load_misses = 23; + optional uint64 l1_dcache_stores = 24; + optional uint64 l1_dcache_store_misses = 25; + optional uint64 l1_dcache_prefetches = 26; + optional uint64 l1_dcache_prefetch_misses = 27; + optional uint64 l1_icache_loads = 28; + optional uint64 l1_icache_load_misses = 29; + optional uint64 l1_icache_prefetches = 30; + optional uint64 l1_icache_prefetch_misses = 31; + optional uint64 llc_loads = 32 [(gogoproto.customname) = "LLCLoads"]; + optional uint64 llc_load_misses = 33 [(gogoproto.customname) = "LLCLoadMisses"]; + optional uint64 llc_stores = 34 [(gogoproto.customname) = "LLCStores"]; + optional uint64 llc_store_misses = 35 [(gogoproto.customname) = "LLCStoreMisses"]; + optional uint64 llc_prefetches = 36 [(gogoproto.customname) = "LLCPrefetches"]; + optional uint64 llc_prefetch_misses = 37 [(gogoproto.customname) = "LLCPrefetchMisses"]; + optional uint64 dtlb_loads = 38 [(gogoproto.customname) = "DTLBLoads"]; + optional uint64 dtlb_load_misses = 39 [(gogoproto.customname) = "DTLBLoadMisses"]; + optional uint64 dtlb_stores = 40 [(gogoproto.customname) = "DTLBStores"]; + optional uint64 dtlb_store_misses = 41 [(gogoproto.customname) = "DTLBStoreMisses"]; + optional uint64 dtlb_prefetches = 42 [(gogoproto.customname) = "DTLBPrefetches"]; + optional uint64 dtlb_prefetch_misses = 43 [(gogoproto.customname) = "DTLBPrefetchMisses"]; + optional uint64 itlb_loads = 44 [(gogoproto.customname) = "ITLBLoads"]; + optional uint64 itlb_load_misses = 45 [(gogoproto.customname) = "ITLBLoadMisses"]; + optional uint64 branch_loads = 46; + optional uint64 branch_load_misses = 47; + optional uint64 node_loads = 48; + optional uint64 node_load_misses = 49; + optional uint64 node_stores = 50; + optional uint64 node_store_misses = 51; + optional uint64 node_prefetches = 52; + optional uint64 node_prefetch_misses = 53; +} + + +/** + * Describes a request for resources that can be used by a framework + * to proactively influence the allocator. If 'agent_id' is provided + * then this request is assumed to only apply to resources on that + * agent. + */ +message Request { + optional AgentID agent_id = 1 [(gogoproto.customname) = "AgentID"]; + repeated Resource resources = 2 [(gogoproto.nullable) = false]; +} + + +/** + * Describes some resources available on an agent. An offer only + * contains resources from a single agent. + */ +message Offer { + required OfferID id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"]; + required FrameworkID framework_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "FrameworkID"]; + required AgentID agent_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "AgentID"]; + required string hostname = 4 [(gogoproto.nullable) = false]; + + // URL for reaching the agent running on the host. + optional URL url = 8 [(gogoproto.customname) = "URL"]; + + // The domain of the agent. + optional DomainInfo domain = 11; + + repeated Resource resources = 5 [(gogoproto.nullable) = false]; + repeated Attribute attributes = 7 [(gogoproto.nullable) = false]; + + // Executors of the same framework running on this agent. + repeated ExecutorID executor_ids = 6 [(gogoproto.nullable) = false, (gogoproto.customname) = "ExecutorIDs"]; + + // Signifies that the resources in this Offer may be unavailable during + // the given interval. Any tasks launched using these resources may be + // killed when the interval arrives. For example, these resources may be + // part of a planned maintenance schedule. + // + // This field only provides information about a planned unavailability. + // The unavailability interval may not necessarily start at exactly this + // interval, nor last for exactly the duration of this interval. + // The unavailability may also be forever! See comments in + // `Unavailability` for more details. + optional Unavailability unavailability = 9; + + // An offer represents resources allocated to *one* of the + // roles managed by the scheduler. (Therefore, each + // `Offer.resources[i].allocation_info` will match the + // top level `Offer.allocation_info`). + optional Resource.AllocationInfo allocation_info = 10; + + // Defines an operation that can be performed against offers. + message Operation { + enum Type { + UNKNOWN = 0; + LAUNCH = 1; + LAUNCH_GROUP = 6; + RESERVE = 2; + UNRESERVE = 3; + CREATE = 4; + DESTROY = 5; + GROW_VOLUME = 11; // EXPERIMENTAL. + SHRINK_VOLUME = 12; // EXPERIMENTAL. + CREATE_DISK = 13; // EXPERIMENTAL. + DESTROY_DISK = 14; // EXPERIMENTAL. + + option (gogoproto.goproto_enum_prefix) = true; + } + + // TODO(vinod): Deprecate this in favor of `LaunchGroup` below. + message Launch { + repeated TaskInfo task_infos = 1 [(gogoproto.nullable) = false]; + } + + // Unlike `Launch` above, all the tasks in a `task_group` are + // atomically delivered to an executor. + // + // `NetworkInfo` set on executor will be shared by all tasks in + // the task group. + // + // TODO(vinod): Any volumes set on executor could be used by a + // task by explicitly setting `Volume.source` in its resources. + message LaunchGroup { + required ExecutorInfo executor = 1 [(gogoproto.nullable) = false]; + required TaskGroupInfo task_group = 2 [(gogoproto.nullable) = false]; + } + + message Reserve { + repeated Resource resources = 1 [(gogoproto.nullable) = false]; + } + + message Unreserve { + repeated Resource resources = 1 [(gogoproto.nullable) = false]; + } + + message Create { + repeated Resource volumes = 1 [(gogoproto.nullable) = false]; + } + + message Destroy { + repeated Resource volumes = 1 [(gogoproto.nullable) = false]; + } + + // Grow a volume by an additional disk resource. + // NOTE: This is currently experimental and only for persistent volumes + // created on ROOT/PATH disk. + message GrowVolume { + required Resource volume = 1 [(gogoproto.nullable) = false]; + required Resource addition = 2 [(gogoproto.nullable) = false]; + } + + // Shrink a volume by the size specified in the `subtract` field. + // NOTE: This is currently experimental and only for persistent volumes + // created on ROOT/PATH disk. + message ShrinkVolume { + required Resource volume = 1 [(gogoproto.nullable) = false]; + + // See comments in `Value.Scalar` for maximum precision supported. + required Value.Scalar subtract = 2 [(gogoproto.nullable) = false]; + } + + // Create a `MOUNT` or `BLOCK` disk resource from a `RAW` disk resource. + // NOTE: For the time being, this API is subject to change and the related + // feature is experimental. + message CreateDisk { + required Resource source = 1 [(gogoproto.nullable) = false]; + + // NOTE: Only `MOUNT` or `BLOCK` is allowed in the `target_type` field. + required Resource.DiskInfo.Source.Type target_type = 2 [(gogoproto.nullable) = false]; + } + + // Destroy a `MOUNT` or `BLOCK` disk resource. This will result in a `RAW` + // disk resource. + // NOTE: For the time being, this API is subject to change and the related + // feature is experimental. + message DestroyDisk { + // NOTE: Only a `MOUNT` or `BLOCK` disk is allowed in the `source` field. + required Resource source = 1 [(gogoproto.nullable) = false]; + } + + + optional Type type = 1 [(gogoproto.nullable) = false]; + + // NOTE: The `id` field will allow frameworks to indicate that they wish to + // receive feedback about an operation. Since this feature is not yet + // implemented, the `id` field should NOT be set at present. See MESOS-8054. + optional OperationID id = 12 [(gogoproto.customname) = "ID"]; // EXPERIMENTAL. + + optional Launch launch = 2; + optional LaunchGroup launch_group = 7; + optional Reserve reserve = 3; + optional Unreserve unreserve = 4; + optional Create create = 5; + optional Destroy destroy = 6; + optional GrowVolume grow_volume = 13; // EXPERIMENTAL. + optional ShrinkVolume shrink_volume = 14; // EXPERIMENTAL. + optional CreateDisk create_disk = 15; // EXPERIMENTAL. + optional DestroyDisk destroy_disk = 16; // EXPERIMENTAL. + } +} + + +/** + * A request to return some resources occupied by a framework. + */ +message InverseOffer { + // This is the same OfferID as found in normal offers, which allows + // re-use of some of the OfferID-only messages. + required OfferID id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "OfferID"]; + + // URL for reaching the agent running on the host. This enables some + // optimizations as described in MESOS-3012, such as allowing the + // scheduler driver to bypass the master and talk directly with an agent. + optional URL url = 2 [(gogoproto.customname) = "URL"]; + + // The framework that should release its resources. + // If no specifics are provided (i.e. which agent), all the framework's + // resources are requested back. + required FrameworkID framework_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "FrameworkID"]; + + // Specified if the resources need to be released from a particular agent. + // All the framework's resources on this agent are requested back, + // unless further qualified by the `resources` field. + optional AgentID agent_id = 4 [(gogoproto.customname) = "AgentID"]; + + // This InverseOffer represents a planned unavailability event in the + // specified interval. Any tasks running on the given framework or agent + // may be killed when the interval arrives. Therefore, frameworks should + // aim to gracefully terminate tasks prior to the arrival of the interval. + // + // For reserved resources, the resources are expected to be returned to the + // framework after the unavailability interval. This is an expectation, + // not a guarantee. For example, if the unavailability duration is not set, + // the resources may be removed permanently. + // + // For other resources, there is no guarantee that requested resources will + // be returned after the unavailability interval. The allocator has no + // obligation to re-offer these resources to the prior framework after + // the unavailability. + required Unavailability unavailability = 5 [(gogoproto.nullable) = false]; + + // A list of resources being requested back from the framework, + // on the agent identified by `agent_id`. If no resources are specified + // then all resources are being requested back. For the purpose of + // maintenance, this field is always empty (maintenance always requests + // all resources back). + repeated Resource resources = 6 [(gogoproto.nullable) = false]; + + // TODO(josephw): Add additional options for narrowing down the resources + // being requested back. Such as specific executors, tasks, etc. +} + + +/** + * Describes a task. Passed from the scheduler all the way to an + * executor (see SchedulerDriver::launchTasks and + * Executor::launchTask). Either ExecutorInfo or CommandInfo should be set. + * A different executor can be used to launch this task, and subsequent tasks + * meant for the same executor can reuse the same ExecutorInfo struct. + */ +message TaskInfo { + required string name = 1 [(gogoproto.nullable) = false]; + required TaskID task_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "TaskID"]; + required AgentID agent_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "AgentID"]; + repeated Resource resources = 4 [(gogoproto.nullable) = false]; + optional ExecutorInfo executor = 5; + optional CommandInfo command = 7; + + // Task provided with a container will launch the container as part + // of this task paired with the task's CommandInfo. + optional ContainerInfo container = 9; + + // A health check for the task. Implemented for executor-less + // command-based tasks. For tasks that specify an executor, it is + // the executor's responsibility to implement the health checking. + optional HealthCheck health_check = 8; + + // A general check for the task. Implemented for all built-in executors. + // For tasks that specify an executor, it is the executor's responsibility + // to implement checking support. Executors should (all built-in executors + // will) neither interpret nor act on the check's result. + // + // NOTE: Check support in built-in executors is experimental. + // + // TODO(alexr): Consider supporting multiple checks per task. + optional CheckInfo check = 13; + + // A kill policy for the task. Implemented for executor-less + // command-based and docker tasks. For tasks that specify an + // executor, it is the executor's responsibility to implement + // the kill policy. + optional KillPolicy kill_policy = 12; + + optional bytes data = 6; + + // Labels are free-form key value pairs which are exposed through + // master and agent endpoints. Labels will not be interpreted or + // acted upon by Mesos itself. As opposed to the data field, labels + // will be kept in memory on master and agent processes. Therefore, + // labels should be used to tag tasks with light-weight meta-data. + // Labels should not contain duplicate key-value pairs. + optional Labels labels = 10; + + // Service discovery information for the task. It is not interpreted + // or acted upon by Mesos. It is up to a service discovery system + // to use this information as needed and to handle tasks without + // service discovery information. + optional DiscoveryInfo discovery = 11; + + // Maximum duration for task completion. If the task is non-terminal at the + // end of this duration, it will fail with the reason + // `REASON_MAX_COMPLETION_TIME_REACHED`. Mesos supports this field for + // executor-less tasks, and tasks that use Docker or default executors. + // It is the executor's responsibility to implement this, so it might not be + // supported by all custom executors. + optional DurationInfo max_completion_time = 14; +} + + +/** + * Describes a group of tasks that belong to an executor. The + * executor will receive the task group in a single message to + * allow the group to be launched "atomically". + * + * NOTES: + * 1) `NetworkInfo` must not be set inside task's `ContainerInfo`. + * 2) `TaskInfo.executor` doesn't need to set. If set, it should match + * `LaunchGroup.executor`. + */ +message TaskGroupInfo { + repeated TaskInfo tasks = 1 [(gogoproto.nullable) = false]; +} + + +// TODO(bmahler): Add executor_uuid here, and send it to the master. This will +// allow us to expose executor work directories for tasks in the webui when +// looking from the master level. Currently only the agent knows which run the +// task belongs to. +/** + * Describes a task, similar to `TaskInfo`. + * + * `Task` is used in some of the Mesos messages found below. + * `Task` is used instead of `TaskInfo` if: + * 1) we need additional IDs, such as a specific + * framework, executor, or agent; or + * 2) we do not need the additional data, such as the command run by the + * task or the health checks. These additional fields may be large and + * unnecessary for some Mesos messages. + * + * `Task` is generally constructed from a `TaskInfo`. See protobuf::createTask. + */ +message Task { + required string name = 1 [(gogoproto.nullable) = false]; + required TaskID task_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "TaskID"]; + required FrameworkID framework_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "FrameworkID"]; + optional ExecutorID executor_id = 4 [(gogoproto.customname) = "ExecutorID"]; + required AgentID agent_id = 5 [(gogoproto.nullable) = false, (gogoproto.customname) = "AgentID"]; + required TaskState state = 6; // Latest state of the task. + repeated Resource resources = 7 [(gogoproto.nullable) = false]; + repeated TaskStatus statuses = 8 [(gogoproto.nullable) = false]; + + // These fields correspond to the state and uuid of the latest + // status update forwarded to the master. + // NOTE: Either both the fields must be set or both must be unset. + optional TaskState status_update_state = 9; + optional bytes status_update_uuid = 10 [(gogoproto.customname) = "StatusUpdateUUID"]; + + optional Labels labels = 11; + + // Service discovery information for the task. It is not interpreted + // or acted upon by Mesos. It is up to a service discovery system + // to use this information as needed and to handle tasks without + // service discovery information. + optional DiscoveryInfo discovery = 12; + + // Container information for the task. + optional ContainerInfo container = 13; + + // Specific user under which task is running. + optional string user = 14; +} + + +/** + * Describes possible task states. IMPORTANT: Mesos assumes tasks that + * enter terminal states (see below) imply the task is no longer + * running and thus clean up any thing associated with the task + * (ultimately offering any resources being consumed by that task to + * another task). + */ +enum TaskState { + TASK_STAGING = 6; // Initial state. Framework status updates should not use. + TASK_STARTING = 0; // The task is being launched by the executor. + TASK_RUNNING = 1; + + // NOTE: This should only be sent when the framework has + // the TASK_KILLING_STATE capability. + TASK_KILLING = 8; // The task is being killed by the executor. + + // The task finished successfully on its own without external interference. + TASK_FINISHED = 2; // TERMINAL. + + TASK_FAILED = 3; // TERMINAL: The task failed to finish successfully. + TASK_KILLED = 4; // TERMINAL: The task was killed by the executor. + TASK_ERROR = 7; // TERMINAL: The task description contains an error. + + // In Mesos 1.3, this will only be sent when the framework does NOT + // opt-in to the PARTITION_AWARE capability. + // + // NOTE: This state is not always terminal. For example, tasks might + // transition from TASK_LOST to TASK_RUNNING or other states when a + // partitioned agent reregisters. + TASK_LOST = 5; // The task failed but can be rescheduled. + + // The following task states are only sent when the framework + // opts-in to the PARTITION_AWARE capability. + + // The task failed to launch because of a transient error. The + // task's executor never started running. Unlike TASK_ERROR, the + // task description is valid -- attempting to launch the task again + // may be successful. + TASK_DROPPED = 9; // TERMINAL. + + // The task was running on an agent that has lost contact with the + // master, typically due to a network failure or partition. The task + // may or may not still be running. + TASK_UNREACHABLE = 10; + + // The task is no longer running. This can occur if the agent has + // been terminated along with all of its tasks (e.g., the host that + // was running the agent was rebooted). It might also occur if the + // task was terminated due to an agent or containerizer error, or if + // the task was preempted by the QoS controller in an + // oversubscription scenario. + TASK_GONE = 11; // TERMINAL. + + // The task was running on an agent that the master cannot contact; + // the operator has asserted that the agent has been shutdown, but + // this has not been directly confirmed by the master. If the + // operator is correct, the task is not running and this is a + // terminal state; if the operator is mistaken, the task may still + // be running and might return to RUNNING in the future. + TASK_GONE_BY_OPERATOR = 12; + + // The master has no knowledge of the task. This is typically + // because either (a) the master never had knowledge of the task, or + // (b) the master forgot about the task because it garbage collected + // its metadata about the task. The task may or may not still be + // running. + TASK_UNKNOWN = 13; +} + + +/** + * Describes a resource limitation that caused a task failure. + */ +message TaskResourceLimitation { + // This field contains the resource whose limits were violated. + // + // NOTE: 'Resources' is used here because the resource may span + // multiple roles (e.g. `"mem(*):1;mem(role):2"`). + repeated Resource resources = 1 [(gogoproto.nullable) = false]; +} + + +/** + * Describes a UUID. + */ +message UUID { + required bytes value = 1; +} + + +/** + * Describes an operation, similar to `Offer.Operation`, with + * some additional information. + */ +message Operation { + optional FrameworkID framework_id = 1 [(gogoproto.customname) = "FrameworkID"]; + optional AgentID agent_id = 2 [(gogoproto.customname) = "AgentID"]; + required Offer.Operation info = 3 [(gogoproto.nullable) = false]; + required OperationStatus latest_status = 4 [(gogoproto.nullable) = false]; + + // All the statuses known to this operation. Some of the statuses in this + // list might not have been acknowledged yet. The statuses are ordered. + repeated OperationStatus statuses = 5 [(gogoproto.nullable) = false]; + + // This is the internal UUID for the operation, which is kept independently + // from the framework-specified operation ID, which is optional. + required UUID uuid = 6 [(gogoproto.nullable) = false, (gogoproto.customname) = "UUID"]; +} + + +/** + * Describes possible operation states. + */ +enum OperationState { + // Default value if the enum is not set. See MESOS-4997. + OPERATION_UNSUPPORTED = 0; + + // Initial state. + OPERATION_PENDING = 1; + + // TERMINAL: The operation was successfully applied. + OPERATION_FINISHED = 2; + + // TERMINAL: The operation failed to apply. + OPERATION_FAILED = 3; + + // TERMINAL: The operation description contains an error. + OPERATION_ERROR = 4; + + // TERMINAL: The operation was dropped due to a transient error. + OPERATION_DROPPED = 5; + + // The operation affects an agent that has lost contact with the master, + // typically due to a network failure or partition. The operation may or may + // not still be pending. + OPERATION_UNREACHABLE = 6; + + // The operation affected an agent that the master cannot contact; + // the operator has asserted that the agent has been shutdown, but this has + // not been directly confirmed by the master. + // + // If the operator is correct, the operation is not pending and this is a + // terminal state; if the operator is mistaken, the operation may still be + // pending and might return to a different state in the future. + OPERATION_GONE_BY_OPERATOR = 7; + + // The operation affects an agent that the master recovered from its + // state, but that agent has not yet re-registered. + // + // The operation can transition to `OPERATION_UNREACHABLE` if the + // corresponding agent is marked as unreachable, and will transition to + // another status if the agent re-registers. + OPERATION_RECOVERING = 8; + + // The master has no knowledge of the operation. This is typically + // because either (a) the master never had knowledge of the operation, or + // (b) the master forgot about the operation because it garbage collected + // its metadata about the operation. The operation may or may not still be + // pending. + OPERATION_UNKNOWN = 9; +} + + +/** + * Describes the current status of an operation. + */ +message OperationStatus { + // While frameworks will only receive status updates for operations on which + // they have set an ID, this field is optional because this message is also + // used internally by Mesos components when the operation's ID has not been + // set. + optional OperationID operation_id = 1 [(gogoproto.customname) = "OperationID"]; + + required OperationState state = 2 [(gogoproto.nullable) = false]; + optional string message = 3; + + // Converted resources after applying the operation. This only + // applies if the `state` is `OPERATION_FINISHED`. + repeated Resource converted_resources = 4 [(gogoproto.nullable) = false]; + + // Statuses that are delivered reliably to the scheduler will + // include a `uuid`. The status is considered delivered once + // it is acknowledged by the scheduler. + optional UUID uuid = 5 [(gogoproto.customname) = "UUID"]; +} + + +/** +* Describes the status of a check. Type and the corresponding field, i.e., +* `command` or `http` must be set. If the result of the check is not available +* (e.g., the check timed out), these fields must contain empty messages, i.e., +* `exit_code` or `status_code` will be unset. +* +* NOTE: This API is subject to change and the related feature is experimental. +*/ +message CheckStatusInfo { + message Command { + // Exit code of a command check. It is the result of calling + // `WEXITSTATUS()` on `waitpid()` termination information on + // Posix and calling `GetExitCodeProcess()` on Windows. + optional int32 exit_code = 1; + } + + message Http { + // HTTP status code of an HTTP check. + optional uint32 status_code = 1; + } + + message Tcp { + // Whether a TCP connection succeeded. + optional bool succeeded = 1; + } + + // TODO(alexr): Consider adding a `data` field, which can contain, e.g., + // truncated stdout/stderr output for command checks or HTTP response body + // for HTTP checks. Alternatively, it can be an even shorter `message` field + // containing the last line of stdout or Reason-Phrase of the status line of + // the HTTP response. + + // The type of the check this status corresponds to. + optional CheckInfo.Type type = 1; + + // Status of a command check. + optional Command command = 2; + + // Status of an HTTP check. + optional Http http = 3 [(gogoproto.customname) = "HTTP"]; + + // Status of a TCP check. + optional Tcp tcp = 4 [(gogoproto.customname) = "TCP"]; + + // TODO(alexr): Consider introducing a "last changed at" timestamp, since + // task status update's timestamp may not correspond to the last check's + // state, e.g., for reconciliation. + + // TODO(alexr): Consider introducing a `reason` enum here to explicitly + // distinguish between completed, delayed, and timed out checks. +} + + +/** + * Describes the current status of a task. + */ +message TaskStatus { + // Describes the source of the task status update. + enum Source { + SOURCE_MASTER = 0; + SOURCE_AGENT = 1; + SOURCE_EXECUTOR = 2; + } + + // Detailed reason for the task status update. + // Refer to docs/task-state-reasons.md for additional explanation. + enum Reason { + // TODO(jieyu): The default value when a caller doesn't check for + // presence is 0 and so ideally the 0 reason is not a valid one. + // Since this is not used anywhere, consider removing this reason. + REASON_COMMAND_EXECUTOR_FAILED = 0; + + REASON_CONTAINER_LAUNCH_FAILED = 21; + REASON_CONTAINER_LIMITATION = 19; + REASON_CONTAINER_LIMITATION_DISK = 20; + REASON_CONTAINER_LIMITATION_MEMORY = 8; + REASON_CONTAINER_PREEMPTED = 17; + REASON_CONTAINER_UPDATE_FAILED = 22; + REASON_MAX_COMPLETION_TIME_REACHED = 33; + REASON_EXECUTOR_REGISTRATION_TIMEOUT = 23; + REASON_EXECUTOR_REREGISTRATION_TIMEOUT = 24; + REASON_EXECUTOR_TERMINATED = 1; + REASON_EXECUTOR_UNREGISTERED = 2; // No longer used. + REASON_FRAMEWORK_REMOVED = 3; + REASON_GC_ERROR = 4; + REASON_INVALID_FRAMEWORKID = 5; + REASON_INVALID_OFFERS = 6; + REASON_IO_SWITCHBOARD_EXITED = 27; + REASON_MASTER_DISCONNECTED = 7; + REASON_RECONCILIATION = 9; + REASON_RESOURCES_UNKNOWN = 18; + REASON_AGENT_DISCONNECTED = 10; + REASON_AGENT_REMOVED = 11; + REASON_AGENT_REMOVED_BY_OPERATOR = 31; + REASON_AGENT_REREGISTERED = 32; + REASON_AGENT_RESTARTED = 12; + REASON_AGENT_UNKNOWN = 13; + REASON_TASK_KILLED_DURING_LAUNCH = 30; + REASON_TASK_CHECK_STATUS_UPDATED = 28; + REASON_TASK_HEALTH_CHECK_STATUS_UPDATED = 29; + REASON_TASK_GROUP_INVALID = 25; + REASON_TASK_GROUP_UNAUTHORIZED = 26; + REASON_TASK_INVALID = 14; + REASON_TASK_UNAUTHORIZED = 15; + REASON_TASK_UNKNOWN = 16; + } + + required TaskID task_id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "TaskID"]; + required TaskState state = 2; + optional string message = 4; // Possible message explaining state. + optional Source source = 9; + optional Reason reason = 10; + optional bytes data = 3; + optional AgentID agent_id = 5 [(gogoproto.customname) = "AgentID"]; + optional ExecutorID executor_id = 7 [(gogoproto.customname) = "ExecutorID"]; // TODO(benh): Use in master/agent. + optional double timestamp = 6; + + // Statuses that are delivered reliably to the scheduler will + // include a 'uuid'. The status is considered delivered once + // it is acknowledged by the scheduler. Schedulers can choose + // to either explicitly acknowledge statuses or let the scheduler + // driver implicitly acknowledge (default). + // + // TODO(bmahler): This is currently overwritten in the scheduler + // driver and executor driver, but executors will need to set this + // to a valid RFC-4122 UUID if using the HTTP API. + optional bytes uuid = 11 [(gogoproto.customname) = "UUID"]; + + // Describes whether the task has been determined to be healthy (true) or + // unhealthy (false) according to the `health_check` field in `TaskInfo`. + optional bool healthy = 8; + + // Contains check status for the check specified in the corresponding + // `TaskInfo`. If no check has been specified, this field must be + // absent, otherwise it must be present even if the check status is + // not available yet. If the status update is triggered for a different + // reason than `REASON_TASK_CHECK_STATUS_UPDATED`, this field will contain + // the last known value. + // + // NOTE: A check-related task status update is triggered if and only if + // the value or presence of any field in `CheckStatusInfo` changes. + // + // NOTE: Check support in built-in executors is experimental. + optional CheckStatusInfo check_status = 15; + + // Labels are free-form key value pairs which are exposed through + // master and agent endpoints. Labels will not be interpreted or + // acted upon by Mesos itself. As opposed to the data field, labels + // will be kept in memory on master and agent processes. Therefore, + // labels should be used to tag TaskStatus message with light-weight + // meta-data. Labels should not contain duplicate key-value pairs. + optional Labels labels = 12; + + // Container related information that is resolved dynamically such as + // network address. + optional ContainerStatus container_status = 13; + + // The time (according to the master's clock) when the agent where + // this task was running became unreachable. This is only set on + // status updates for tasks running on agents that are unreachable + // (e.g., partitioned away from the master). + optional TimeInfo unreachable_time = 14; + + // If the reason field indicates a container resource limitation, + // this field optionally contains additional information. + optional TaskResourceLimitation limitation = 16; +} + + +/** + * Describes possible filters that can be applied to unused resources + * (see SchedulerDriver::launchTasks) to influence the allocator. + */ +message Filters { + // Time to consider unused resources refused. Note that all unused + // resources will be considered refused and use the default value + // (below) regardless of whether Filters was passed to + // SchedulerDriver::launchTasks. You MUST pass Filters with this + // field set to change this behavior (i.e., get another offer which + // includes unused resources sooner or later than the default). + // + // If this field is set to a number of seconds greater than 31536000 + // (365 days), then the resources will be considered refused for 365 + // days. If it is set to a negative number, then the default value + // will be used. + optional double refuse_seconds = 1 [default = 5.0]; +} + + +/** +* Describes a collection of environment variables. This is used with +* CommandInfo in order to set environment variables before running a +* command. The contents of each variable may be specified as a string +* or a Secret; only one of `value` and `secret` must be set. +*/ +message Environment { + message Variable { + required string name = 1 [(gogoproto.nullable) = false]; + + enum Type { + UNKNOWN = 0; + VALUE = 1; + SECRET = 2; + + option (gogoproto.goproto_enum_prefix) = true; + } + + // In Mesos 1.2, the `Environment.variables.value` message was made + // optional. The default type for `Environment.variables.type` is now VALUE, + // which requires `value` to be set, maintaining backward compatibility. + // + // TODO(greggomann): The default can be removed in Mesos 2.1 (MESOS-7134). + optional Type type = 3 [default = VALUE]; + + // Only one of `value` and `secret` must be set. + optional string value = 2; + optional Secret secret = 4; + } + + repeated Variable variables = 1 [(gogoproto.nullable) = false]; +} + + +/** + * A generic (key, value) pair used in various places for parameters. + */ +message Parameter { + required string key = 1 [(gogoproto.nullable) = false]; + required string value = 2 [(gogoproto.nullable) = false]; +} + + +/** + * Collection of Parameter. + */ +message Parameters { + repeated Parameter parameter = 1 [(gogoproto.nullable) = false]; +} + + +/** + * Credential used in various places for authentication and + * authorization. + * + * NOTE: A 'principal' is different from 'FrameworkInfo.user'. The + * former is used for authentication and authorization while the + * latter is used to determine the default user under which the + * framework's executors/tasks are run. + */ +message Credential { + required string principal = 1 [(gogoproto.nullable) = false]; + optional string secret = 2; +} + + +/** + * Credentials used for framework authentication, HTTP authentication + * (where the common 'username' and 'password' are captured as + * 'principal' and 'secret' respectively), etc. + */ +message Credentials { + repeated Credential credentials = 1 [(gogoproto.nullable) = false]; +} + + +/** + * Secret used to pass privileged information. It is designed to provide + * pass-by-value or pass-by-reference semantics, where the REFERENCE type can be + * used by custom modules which interact with a secure back-end. + */ +message Secret +{ + enum Type { + UNKNOWN = 0; + REFERENCE = 1; + VALUE = 2; + + option (gogoproto.goproto_enum_prefix) = true; + } + + // Can be used by modules to refer to a secret stored in a secure back-end. + // The `key` field is provided to permit reference to a single value within a + // secret containing arbitrary key-value pairs. + // + // For example, given a back-end secret store with a secret named + // "my-secret" containing the following key-value pairs: + // + // { + // "username": "my-user", + // "password": "my-password + // } + // + // the username could be referred to in a `Secret` by specifying + // "my-secret" for the `name` and "username" for the `key`. + message Reference + { + required string name = 1 [(gogoproto.nullable) = false]; + optional string key = 2; + } + + // Used to pass the value of a secret. + message Value + { + required bytes data = 1; + } + + optional Type type = 1 [(gogoproto.nullable) = false]; + + // Only one of `reference` and `value` must be set. + optional Reference reference = 2; + optional Value value = 3; +} + + +/** + * Rate (queries per second, QPS) limit for messages from a framework to master. + * Strictly speaking they are the combined rate from all frameworks of the same + * principal. + */ +message RateLimit { + // Leaving QPS unset gives it unlimited rate (i.e., not throttled), + // which also implies unlimited capacity. + optional double qps = 1 [(gogoproto.customname) = "QPS"]; + + // Principal of framework(s) to be throttled. Should match + // FrameworkInfo.principal and Credential.principal (if using authentication). + required string principal = 2 [(gogoproto.nullable) = false]; + + // Max number of outstanding messages from frameworks of this principal + // allowed by master before the next message is dropped and an error is sent + // back to the sender. Messages received before the capacity is reached are + // still going to be processed after the error is sent. + // If unspecified, this principal is assigned unlimited capacity. + // NOTE: This value is ignored if 'qps' is not set. + optional uint64 capacity = 3; +} + + +/** + * Collection of RateLimit. + * Frameworks without rate limits defined here are not throttled unless + * 'aggregate_default_qps' is specified. + */ +message RateLimits { + // Items should have unique principals. + repeated RateLimit limits = 1 [(gogoproto.nullable) = false]; + + // All the frameworks not specified in 'limits' get this default rate. + // This rate is an aggregate rate for all of them, i.e., their combined + // traffic is throttled together at this rate. + optional double aggregate_default_qps = 2 [(gogoproto.customname) = "AggregateDefaultQPS"]; + + // All the frameworks not specified in 'limits' get this default capacity. + // This is an aggregate value similar to 'aggregate_default_qps'. + optional uint64 aggregate_default_capacity = 3; +} + + +/** + * Describe an image used by tasks or executors. Note that it's only + * for tasks or executors launched by MesosContainerizer currently. + */ +message Image { + enum Type { + APPC = 1; + DOCKER = 2; + + option (gogoproto.goproto_enum_prefix) = true; + } + + // Protobuf for specifying an Appc container image. See: + // https://github.com/appc/spec/blob/master/spec/aci.md + message Appc { + // The name of the image. + required string name = 1 [(gogoproto.nullable) = false]; + + // An image ID is a string of the format "hash-value", where + // "hash" is the hash algorithm used and "value" is the hex + // encoded string of the digest. Currently the only permitted + // hash algorithm is sha512. + optional string id = 2 [(gogoproto.customname) = "ID"]; + + // Optional labels. Suggested labels: "version", "os", and "arch". + optional Labels labels = 3; + } + + message Docker { + // The name of the image. Expected format: + // [REGISTRY_HOST[:REGISTRY_PORT]/]REPOSITORY[:TAG|@TYPE:DIGEST] + // + // See: https://docs.docker.com/reference/commandline/pull/ + required string name = 1 [(gogoproto.nullable) = false]; + + // Credential to authenticate with docker registry. + // NOTE: This is not encrypted, therefore framework and operators + // should enable SSL when passing this information. + // + // This field has never been used in Mesos before and is + // deprecated since Mesos 1.3. Please use `config` below + // (see MESOS-7088 for details). + optional Credential credential = 2 [deprecated = true]; // Since 1.3. + + // Docker config containing credentials to authenticate with + // docker registry. The secret is expected to be a docker + // config file in JSON format with UTF-8 character encoding. + optional Secret config = 3; + } + + required Type type = 1; + + // Only one of the following image messages should be set to match + // the type. + optional Appc appc = 2; + optional Docker docker = 3; + + // With this flag set to false, the mesos containerizer will pull + // the docker/appc image from the registry even if the image is + // already downloaded on the agent. + optional bool cached = 4 [default = true]; +} + + +/** + * Describes how the mount will be propagated for a volume. See the + * following doc for more details about mount propagation: + * https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + */ +message MountPropagation { + enum Mode { + UNKNOWN = 0; + + // The volume in a container will receive new mounts from the host + // or other containers, but filesystems mounted inside the + // container won't be propagated to the host or other containers. + // This is currently the default behavior for all volumes. + HOST_TO_CONTAINER = 1; + + // The volume in a container will receive new mounts from the host + // or other containers, and its own mounts will be propagated from + // the container to the host or other containers. + BIDIRECTIONAL = 2; + + option (gogoproto.goproto_enum_prefix) = true; + } + + optional Mode mode = 1; +} + + +/** + * Describes a volume mapping either from host to container or vice + * versa. Both paths can either refer to a directory or a file. + */ +message Volume { + enum Mode { + RW = 1; // read-write. + RO = 2; // read-only. + } + + // TODO(gyliu513): Make this as `optional` after deprecation cycle of 1.0. + required Mode mode = 3; + + // Path pointing to a directory or file in the container. If the + // path is a relative path, it is relative to the container work + // directory. If the path is an absolute path, that path must + // already exist. + required string container_path = 1 [(gogoproto.nullable) = false]; + + // The following specifies the source of this volume. At most one of + // the following should be set. + + // Absolute path pointing to a directory or file on the host or a + // path relative to the container work directory. + optional string host_path = 2; + + // The source of the volume is an Image which describes a root + // filesystem which will be provisioned by Mesos. + optional Image image = 4; + + // Describes where a volume originates from. + message Source { + enum Type { + // This must be the first enum value in this list, to + // ensure that if 'type' is not set, the default value + // is UNKNOWN. This enables enum values to be added + // in a backwards-compatible way. See: MESOS-4997. + UNKNOWN = 0; + + // TODO(gyliu513): Add IMAGE as volume source type. + DOCKER_VOLUME = 1; + HOST_PATH = 4; + SANDBOX_PATH = 2; + SECRET = 3; + + option (gogoproto.goproto_enum_prefix) = true; + } + + message DockerVolume { + // Driver of the volume, it can be flocker, convoy, raxrey etc. + optional string driver = 1; + + // Name of the volume. + required string name = 2 [(gogoproto.nullable) = false]; + + // Volume driver specific options. + optional Parameters driver_options = 3; + } + + // Absolute path pointing to a directory or file on the host. + message HostPath { + required string path = 1 [(gogoproto.nullable) = false]; + optional MountPropagation mount_propagation = 2; + } + + // Describe a path from a container's sandbox. The container can + // be the current container (SELF), or its parent container + // (PARENT). PARENT allows all child containers to share a volume + // from their parent container's sandbox. It'll be an error if + // the current container is a top level container. + message SandboxPath { + enum Type { + UNKNOWN = 0; + SELF = 1; + PARENT = 2; + + option (gogoproto.goproto_enum_prefix) = true; + } + + optional Type type = 1 [(gogoproto.nullable) = false]; + + // A path relative to the corresponding container's sandbox. + // Note that upwards traversal (i.e. ../../abc) is not allowed. + required string path = 2 [(gogoproto.nullable) = false]; + } + + // Enum fields should be optional, see: MESOS-4997. + optional Type type = 1 [(gogoproto.nullable) = false]; + + // The following specifies the source of this volume. At most one of + // the following should be set. + + // The source of the volume created by docker volume driver. + optional DockerVolume docker_volume = 2; + + optional HostPath host_path = 5; + optional SandboxPath sandbox_path = 3; + + // The volume/secret isolator uses the secret-fetcher module (third-party or + // internal) downloads the secret and makes it available at container_path. + optional Secret secret = 4; + } + + optional Source source = 5; +} + + +/** + * Describes a network request from a framework as well as network resolution + * provided by Mesos. + * + * A framework may request the network isolator on the Agent to isolate the + * container in a network namespace and create a virtual network interface. + * The `NetworkInfo` message describes the properties of that virtual + * interface, including the IP addresses and network isolation policy + * (network group membership). + * + * The NetworkInfo message is not interpreted by the Master or Agent and is + * intended to be used by Agent and Master modules implementing network + * isolation. If the modules are missing, the message is simply ignored. In + * future, the task launch will fail if there is no module providing the + * network isolation capabilities (MESOS-3390). + * + * An executor, Agent, or an Agent module may append NetworkInfos inside + * TaskStatus::container_status to provide information such as the container IP + * address and isolation groups. + */ +message NetworkInfo { + enum Protocol { + IPv4 = 1; + IPv6 = 2; + } + + // Specifies a request for an IP address, or reports the assigned container + // IP address. + // + // Users can request an automatically assigned IP (for example, via an + // IPAM service) or a specific IP by adding a NetworkInfo to the + // ContainerInfo for a task. On a request, specifying neither `protocol` + // nor `ip_address` means that any available address may be assigned. + message IPAddress { + // Specify IP address requirement. Set protocol to the desired value to + // request the network isolator on the Agent to assign an IP address to the + // container being launched. If a specific IP address is specified in + // ip_address, this field should not be set. + optional Protocol protocol = 1 [default = IPv4]; + + // Statically assigned IP provided by the Framework. This IP will be + // assigned to the container by the network isolator module on the Agent. + // This field should not be used with the protocol field above. + // + // If an explicit address is requested but is unavailable, the network + // isolator should fail the task. + optional string ip_address = 2 [(gogoproto.customname) = "IPAddress"]; + } + + // When included in a ContainerInfo, each of these represent a + // request for an IP address. Each request can specify an explicit address + // or the IP protocol to use. + // + // When included in a TaskStatus message, these inform the framework + // scheduler about the IP addresses that are bound to the container + // interface. When there are no custom network isolator modules installed, + // this field is filled in automatically with the Agent IP address. + repeated IPAddress ip_addresses = 5 [(gogoproto.nullable) = false, (gogoproto.customname) = "IPAddresses"]; + + // Name of the network which will be used by network isolator to determine + // the network that the container joins. It's up to the network isolator + // to decide how to interpret this field. + optional string name = 6; + + // A group is the name given to a set of logically-related interfaces that + // are allowed to communicate among themselves. Network traffic is allowed + // between two container interfaces that share at least one network group. + // For example, one might want to create separate groups for isolating dev, + // testing, qa and prod deployment environments. + repeated string groups = 3; + + // To tag certain metadata to be used by Isolator/IPAM, e.g., rack, etc. + optional Labels labels = 4; + + // Specifies a port mapping request for the task on this network. + message PortMapping { + required uint32 host_port = 1 [(gogoproto.nullable) = false]; + required uint32 container_port = 2 [(gogoproto.nullable) = false]; + // Protocol to expose as (ie: tcp, udp). + optional string protocol = 3; + } + + repeated PortMapping port_mappings = 7 [(gogoproto.nullable) = false]; +}; + + +/** + * Encapsulation of `Capabilities` supported by Linux. + * Reference: http://linux.die.net/man/7/capabilities. + */ +message CapabilityInfo { + // We start the actual values at an offset(1000) because Protobuf 2 + // uses the first value as the default one. Separating the default + // value from the real first value helps to disambiguate them. This + // is especially valuable for backward compatibility. + // See: MESOS-4997. + enum Capability { + UNKNOWN = 0; + CHOWN = 1000; + DAC_OVERRIDE = 1001; + DAC_READ_SEARCH = 1002; + FOWNER = 1003; + FSETID = 1004; + KILL = 1005; + SETGID = 1006; + SETUID = 1007; + SETPCAP = 1008; + LINUX_IMMUTABLE = 1009; + NET_BIND_SERVICE = 1010; + NET_BROADCAST = 1011; + NET_ADMIN = 1012; + NET_RAW = 1013; + IPC_LOCK = 1014; + IPC_OWNER = 1015; + SYS_MODULE = 1016; + SYS_RAWIO = 1017; + SYS_CHROOT = 1018; + SYS_PTRACE = 1019; + SYS_PACCT = 1020; + SYS_ADMIN = 1021; + SYS_BOOT = 1022; + SYS_NICE = 1023; + SYS_RESOURCE = 1024; + SYS_TIME = 1025; + SYS_TTY_CONFIG = 1026; + MKNOD = 1027; + LEASE = 1028; + AUDIT_WRITE = 1029; + AUDIT_CONTROL = 1030; + SETFCAP = 1031; + MAC_OVERRIDE = 1032; + MAC_ADMIN = 1033; + SYSLOG = 1034; + WAKE_ALARM = 1035; + BLOCK_SUSPEND = 1036; + AUDIT_READ = 1037; + + option (gogoproto.goproto_enum_prefix) = true; + } + + repeated Capability capabilities = 1; +} + + +/** + * Encapsulation for Linux specific configuration. + * E.g, capabilities, limits etc. + */ +message LinuxInfo { + // Since 1.4.0, deprecated in favor of `effective_capabilities`. + optional CapabilityInfo capability_info = 1 [deprecated = true]; + + // The set of capabilities that are allowed but not initially + // granted to tasks. + optional CapabilityInfo bounding_capabilities = 2; + + // Represents the set of capabilities that the task will + // be executed with. + optional CapabilityInfo effective_capabilities = 3; + + // If set as 'true', the container shares the pid namespace with + // its parent. If the container is a top level container, it will + // share the pid namespace with the agent. If the container is a + // nested container, it will share the pid namespace with its + // parent container. This field will be ignored if 'namespaces/pid' + // isolator is not enabled. + optional bool share_pid_namespace = 4 [(gogoproto.customname) = "SharePIDNamespace"]; +} + + +/** +* Encapsulation for POSIX rlimits, see +* http://pubs.opengroup.org/onlinepubs/009695399/functions/getrlimit.html. +* Note that some types might only be defined for Linux. +* We use a custom prefix to avoid conflict with existing system macros +* (e.g., `RLIMIT_CPU` or `NOFILE`). +*/ +message RLimitInfo { + message RLimit { + enum Type { + UNKNOWN = 0; + RLMT_AS = 1; + RLMT_CORE = 2; + RLMT_CPU = 3; + RLMT_DATA = 4; + RLMT_FSIZE = 5; + RLMT_LOCKS = 6; + RLMT_MEMLOCK = 7; + RLMT_MSGQUEUE = 8; + RLMT_NICE = 9; + RLMT_NOFILE = 10; + RLMT_NPROC = 11; + RLMT_RSS = 12; + RLMT_RTPRIO = 13; + RLMT_RTTIME = 14; + RLMT_SIGPENDING = 15; + RLMT_STACK = 16; + + option (gogoproto.goproto_enum_prefix) = true; + } + optional Type type = 1 [(gogoproto.nullable) = false]; + + // Either both are set or both are not set. + // If both are not set, it represents unlimited. + // If both are set, we require `soft` <= `hard`. + optional uint64 hard = 2; + optional uint64 soft = 3; + } + + repeated RLimit rlimits = 1 [(gogoproto.nullable) = false]; +} + + +/** + * Describes the information about (pseudo) TTY that can + * be attached to a process running in a container. + */ +message TTYInfo { + message WindowSize { + required uint32 rows = 1 [(gogoproto.nullable) = false]; + required uint32 columns = 2 [(gogoproto.nullable) = false]; + } + + optional WindowSize window_size = 1; +} + + +/** + * Describes a container configuration and allows extensible + * configurations for different container implementations. + * + * NOTE: `ContainerInfo` may be specified, e.g., by a task, even if no + * container image is provided. In this case neither `MesosInfo` nor + * `DockerInfo` is set, the required `type` must be `MESOS`. This is to + * address a case when a task without an image, e.g., a shell script + * with URIs, wants to use features originally designed for containers, + * for example custom network isolation via `NetworkInfo`. + */ +message ContainerInfo { + // All container implementation types. + enum Type { + DOCKER = 1; + MESOS = 2; + + option (gogoproto.goproto_enum_prefix) = true; + } + + message DockerInfo { + // The docker image that is going to be passed to the registry. + required string image = 1 [(gogoproto.nullable) = false]; + + // Network options. + enum Network { + HOST = 1; + BRIDGE = 2; + NONE = 3; + USER = 4; + + option (gogoproto.goproto_enum_prefix) = true; + } + + optional Network network = 2 [default = HOST]; + + message PortMapping { + required uint32 host_port = 1 [(gogoproto.nullable) = false]; + required uint32 container_port = 2 [(gogoproto.nullable) = false]; + // Protocol to expose as (ie: tcp, udp). + optional string protocol = 3; + } + + repeated PortMapping port_mappings = 3 [(gogoproto.nullable) = false]; + + optional bool privileged = 4 [default = false]; + + // Allowing arbitrary parameters to be passed to docker CLI. + // Note that anything passed to this field is not guaranteed + // to be supported moving forward, as we might move away from + // the docker CLI. + repeated Parameter parameters = 5 [(gogoproto.nullable) = false]; + + // With this flag set to true, the docker containerizer will + // pull the docker image from the registry even if the image + // is already downloaded on the agent. + optional bool force_pull_image = 6; + + // The name of volume driver plugin. + optional string volume_driver = 7 [deprecated = true]; // Since 1.0 + } + + message MesosInfo { + optional Image image = 1; + } + + required Type type = 1; + repeated Volume volumes = 2 [(gogoproto.nullable) = false]; + optional string hostname = 4; + + // Only one of the following *Info messages should be set to match + // the type. + optional DockerInfo docker = 3; + optional MesosInfo mesos = 5; + + // A list of network requests. A framework can request multiple IP addresses + // for the container. + repeated NetworkInfo network_infos = 7 [(gogoproto.nullable) = false]; + + // Linux specific information for the container. + optional LinuxInfo linux_info = 8; + + // (POSIX only) rlimits of the container. + optional RLimitInfo rlimit_info = 9; + + // If specified a tty will be attached to the container entrypoint. + optional TTYInfo tty_info = 10 [(gogoproto.customname) = "TTYInfo"]; +} + + +/** + * Container related information that is resolved during container + * setup. The information is sent back to the framework as part of the + * TaskStatus message. + */ +message ContainerStatus { + optional ContainerID container_id = 4 [(gogoproto.customname) = "ContainerID"]; + + // This field can be reliably used to identify the container IP address. + repeated NetworkInfo network_infos = 1 [(gogoproto.nullable) = false]; + + // Information about Linux control group (cgroup). + optional CgroupInfo cgroup_info = 2; + + // Information about Executor PID. + optional uint32 executor_pid = 3 [(gogoproto.customname) = "ExecutorPID"]; +} + + +/** + * Linux control group (cgroup) information. + */ +message CgroupInfo { + // Configuration of a blkio cgroup subsystem. + message Blkio { + enum Operation { + UNKNOWN = 0; + TOTAL = 1; + READ = 2; + WRITE = 3; + SYNC = 4; + ASYNC = 5; + + option (gogoproto.goproto_enum_prefix) = true; + } + + // Describes a stat value without the device descriptor part. + message Value { + optional Operation op = 1; // Required. + optional uint64 value = 2; // Required. + } + + message CFQ { + message Statistics { + // Stats are grouped by block devices. If `device` is not + // set, it represents `Total`. + optional Device.Number device = 1; + // blkio.sectors + optional uint64 sectors = 2; + // blkio.time + optional uint64 time = 3; + // blkio.io_serviced + repeated Value io_serviced = 4 [(gogoproto.nullable) = false, (gogoproto.customname) = "IOServiced"]; + // blkio.io_service_bytes + repeated Value io_service_bytes = 5 [(gogoproto.nullable) = false, (gogoproto.customname) = "IOServiceBytes"]; + // blkio.io_service_time + repeated Value io_service_time = 6 [(gogoproto.nullable) = false, (gogoproto.customname) = "IOServiceTime"]; + // blkio.io_wait_time + repeated Value io_wait_time = 7 [(gogoproto.nullable) = false, (gogoproto.customname) = "IOWaitTime"]; + // blkio.io_merged + repeated Value io_merged = 8 [(gogoproto.nullable) = false, (gogoproto.customname) = "IOMerged"]; + // blkio.io_queued + repeated Value io_queued = 9 [(gogoproto.nullable) = false, (gogoproto.customname) = "IOQueued"]; + } + + // TODO(jasonlai): Add fields for blkio weight and weight + // device. + } + + message Throttling { + message Statistics { + // Stats are grouped by block devices. If `device` is not + // set, it represents `Total`. + optional Device.Number device = 1; + // blkio.throttle.io_serviced + repeated Value io_serviced = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "IOServiced"]; + // blkio.throttle.io_service_bytes + repeated Value io_service_bytes = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "IOServiceBytes"]; + } + + // TODO(jasonlai): Add fields for blkio.throttle.*_device. + } + + message Statistics { + repeated CFQ.Statistics cfq = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "CFQ"]; + repeated CFQ.Statistics cfq_recursive = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "CFQRecursive"]; + repeated Throttling.Statistics throttling = 3; + } + } + + // Configuration of a net_cls cgroup subsystem. + message NetCls { + // The 32-bit classid consists of two parts, a 16 bit major handle + // and a 16-bit minor handle. The major and minor handle are + // represented using the format 0xAAAABBBB, where 0xAAAA is the + // 16-bit major handle and 0xBBBB is the 16-bit minor handle. + optional uint32 classid = 1 [(gogoproto.customname) = "ClassID"]; + } + + optional NetCls net_cls = 1 [(gogoproto.customname) = "NetCLS"]; +} + + +/** + * Collection of labels. Labels should not contain duplicate key-value + * pairs. + */ +message Labels { + repeated Label labels = 1 [(gogoproto.nullable) = false]; +} + + +/** + * Key, value pair used to store free form user-data. + */ +message Label { + required string key = 1 [(gogoproto.nullable) = false]; + optional string value = 2; +} + + +/** + * Named port used for service discovery. + */ +message Port { + // Port number on which the framework exposes a service. + required uint32 number = 1 [(gogoproto.nullable) = false]; + + // Name of the service hosted on this port. + optional string name = 2; + + // Layer 4-7 protocol on which the framework exposes its services. + optional string protocol = 3; + + // This field restricts discovery within a framework (FRAMEWORK), + // within a Mesos cluster (CLUSTER), or places no restrictions (EXTERNAL). + // The visibility setting for a Port overrides the general visibility setting + // in the DiscoveryInfo. + optional DiscoveryInfo.Visibility visibility = 4; + + // This can be used to decorate the message with metadata to be + // interpreted by external applications such as firewalls. + optional Labels labels = 5; +} + + +/** + * Collection of ports. + */ +message Ports { + repeated Port ports = 1 [(gogoproto.nullable) = false]; +} + + +/** +* Service discovery information. +* The visibility field restricts discovery within a framework (FRAMEWORK), +* within a Mesos cluster (CLUSTER), or places no restrictions (EXTERNAL). +* Each port in the ports field also has an optional visibility field. +* If visibility is specified for a port, it overrides the default service-wide +* DiscoveryInfo.visibility for that port. +* The environment, location, and version fields provide first class support for +* common attributes used to differentiate between similar services. The +* environment may receive values such as PROD/QA/DEV, the location field may +* receive values like EAST-US/WEST-US/EUROPE/AMEA, and the version field may +* receive values like v2.0/v0.9. The exact use of these fields is up to each +* service discovery system. +*/ +message DiscoveryInfo { + enum Visibility { + FRAMEWORK = 0; + CLUSTER = 1; + EXTERNAL = 2; + } + + required Visibility visibility = 1 [(gogoproto.nullable) = false]; + optional string name = 2; + optional string environment = 3; + optional string location = 4; + optional string version = 5; + optional Ports ports = 6; + optional Labels labels = 7; +} + + +/** + * Named WeightInfo to indicate resource allocation + * priority between the different roles. + */ +message WeightInfo { + required double weight = 1 [(gogoproto.nullable) = false]; + + // Related role name. + optional string role = 2; +} + + +/** + * Version information of a component. + */ +message VersionInfo { + required string version = 1 [(gogoproto.nullable) = false]; + optional string build_date = 2; + optional double build_time = 3; + optional string build_user = 4; + optional string git_sha = 5 [(gogoproto.customname) = "GitSHA"]; + optional string git_branch = 6; + optional string git_tag = 7; +} + + +/** + * Flag consists of a name and optionally its value. + */ +message Flag { + required string name = 1 [(gogoproto.nullable) = false]; + optional string value = 2; +} + + +/** + * Describes a Role. Roles can be used to specify that certain resources are + * reserved for the use of one or more frameworks. + */ +message Role { + required string name = 1 [(gogoproto.nullable) = false]; + required double weight = 2 [(gogoproto.nullable) = false]; + repeated FrameworkID frameworks = 3 [(gogoproto.nullable) = false]; + repeated Resource resources = 4 [(gogoproto.nullable) = false]; +} + + +/** + * Metric consists of a name and optionally its value. + */ +message Metric { + required string name = 1 [(gogoproto.nullable) = false]; + optional double value = 2; +} + + +/** + * Describes a File. + */ +message FileInfo { + // Absolute path to the file. + required string path = 1 [(gogoproto.nullable) = false]; + + // Number of hard links. + optional int32 nlink = 2; + + // Total size in bytes. + optional uint64 size = 3; + + // Last modification time. + optional TimeInfo mtime = 4; + + // Represents a file's mode and permission bits. The bits have the same + // definition on all systems and is portable. + optional uint32 mode = 5; + + // User ID of owner. + optional string uid = 6 [(gogoproto.customname) = "UID"]; + + // Group ID of owner. + optional string gid = 7 [(gogoproto.customname) = "GID"]; +} + + +/** + * Describes information abount a device. + */ +message Device { + message Number { + required uint64 major_number = 1; + required uint64 minor_number = 2; + } + + optional string path = 1; + optional Number number = 2; +} + + +/** + * Describes a device whitelist entry that expose from host to container. + */ +message DeviceAccess { + message Access { + optional bool read = 1; + optional bool write = 2; + optional bool mknod = 3; + } + required Device device = 1 [(gogoproto.nullable) = false]; + required Access access = 2 [(gogoproto.nullable) = false]; +} + + +message DeviceWhitelist { + repeated DeviceAccess allowed_devices = 1 [(gogoproto.nullable) = false]; +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/ranges.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/ranges.go new file mode 100644 index 000000000000..d391e1573dbd --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/ranges.go @@ -0,0 +1,253 @@ +package mesos + +import ( + "sort" +) + +// Ranges represents a list of Ranges. +type Ranges []Value_Range + +// NewRanges returns squashed Ranges from the given numbers. +func NewRanges(ns ...uint64) Ranges { + xs := append(uint64s{}, ns...) + sort.Sort(xs) + rs := make(Ranges, len(xs)) + for i := range xs { + rs[i].Begin, rs[i].End = xs[i], xs[i] + } + return rs.Squash() +} + +// NewPortRanges returns Ranges from the "ports" resource in the +// given *Offer. If that resource isn't provided, nil will be returned. +// +// The returned Ranges are sorted and have all overlapping ranges merged from +// left to right. e.g. [[0, 5], [4, 3], [10, 7]] -> [[0, 5], [7, 10]] +func NewPortRanges(o *Offer) Ranges { + if o == nil { + return Ranges{} + } + + var ( + r Resource + found bool + ) + for i := range o.Resources { + if o.Resources[i].GetName() == "ports" { + r = o.Resources[i] + found = true + break + } + } + + if !found { + return Ranges{} + } + + offered := r.GetRanges().GetRange() + rs := make(Ranges, len(offered)) + for i, r := range offered { + if lo, hi := r.GetBegin(), r.GetEnd(); lo <= hi { + rs[i].Begin, rs[i].End = lo, hi + } else { + rs[i].Begin, rs[i].End = hi, lo + } + } + return rs.Sort().Squash() +} + +// These three methods implement sort.Interface +func (rs Ranges) Len() int { return len(rs) } +func (rs Ranges) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] } +func (rs Ranges) Less(i, j int) bool { + return rs[i].Begin < rs[j].Begin || (rs[i].Begin == rs[j].Begin && rs[i].End < rs[j].End) +} + +// Size returns the sum of the Size of all Ranges. +func (rs Ranges) Size() uint64 { + var sz uint64 + for i := range rs { + sz += 1 + (rs[i].End - rs[i].Begin) + } + return sz +} + +// Sort sorts the receiving Ranges and returns the result; convenience +func (rs Ranges) Sort() Ranges { + sort.Sort(rs) + return rs +} + +// Squash merges overlapping and continuous Ranges. It assumes they're pre-sorted. +func (rs Ranges) Squash() Ranges { + if len(rs) < 2 { + return rs + } + squashed := Ranges{rs[0]} + for i := 1; i < len(rs); i++ { + switch max := squashed[len(squashed)-1].End; { + case 1+max < rs[i].Begin: // no overlap nor continuity: push + squashed = append(squashed, rs[i]) + case max <= rs[i].End: // overlap or continuity: squash + squashed[len(squashed)-1].End = rs[i].End + } + } + return squashed +} + +// Search performs a binary search for n returning the index of the Range it was +// found at or -1 if not found. +func (rs Ranges) Search(n uint64) int { + for lo, hi := 0, len(rs)-1; lo <= hi; { + switch m := lo + (hi-lo)/2; { + case n < rs[m].Begin: + hi = m - 1 + case n > rs[m].End: + lo = m + 1 + default: + return m + } + } + return -1 +} + +// Partition partitions Ranges around n. It returns the partitioned Ranges +// and a boolean indicating if n was found. +func (rs Ranges) Partition(n uint64) (Ranges, bool) { + i := rs.Search(n) + if i < 0 { + return rs, false + } + + pn := make(Ranges, 0, len(rs)+1) + switch pn = append(pn, rs[:i]...); { + case rs[i].Begin == rs[i].End: // delete + case rs[i].Begin == n: // increment lower bound + pn = append(pn, Value_Range{rs[i].Begin + 1, rs[i].End}) + case rs[i].End == n: // decrement upper bound + pn = append(pn, Value_Range{rs[i].Begin, rs[i].End - 1}) + default: // split + pn = append(pn, Value_Range{rs[i].Begin, n - 1}, Value_Range{n + 1, rs[i].End}) + } + return append(pn, rs[i+1:]...), true +} + +// Remove removes a range from already coalesced ranges. +// The algorithms constructs a new vector of ranges which is then +// Squash'ed into a Ranges instance. +func (rs Ranges) Remove(removal Value_Range) Ranges { + ranges := make([]Value_Range, 0, len(rs)) + for _, r := range rs { + // skip if the entire range is subsumed by removal + if r.Begin >= removal.Begin && r.End <= removal.End { + continue + } + // divide if the range subsumes the removal + if r.Begin < removal.Begin && r.End > removal.End { + ranges = append(ranges, + Value_Range{r.Begin, removal.Begin - 1}, + Value_Range{removal.End + 1, r.End}, + ) + continue + } + // add the full range if there's no intersection + if r.End < removal.Begin || r.Begin > removal.End { + ranges = append(ranges, r) + continue + } + // trim if the range does intersect + if r.End > removal.End { + ranges = append(ranges, Value_Range{removal.End + 1, r.End}) + } else { + if r.Begin >= removal.Begin { + // should never happen + panic("r.Begin >= removal.Begin") + } + ranges = append(ranges, Value_Range{r.Begin, removal.Begin - 1}) + } + } + return Ranges(ranges).Squash() +} + +// Compare assumes that both Ranges are already in sort-order. +// Returns 0 if rs and right are equivalent, -1 if rs is a subset of right, or else 1 +func (rs Ranges) Compare(right Ranges) int { + x, y, result := rs.equiv(right) + if result { + return 0 + } + for _, a := range x { + // make sure that this range is a subset of a range in y + matched := false + for _, b := range y { + if a.Begin >= b.Begin && a.End <= b.End { + matched = true + break + } + } + if !matched { + return 1 + } + } + return -1 +} + +// Equivalent assumes that both Ranges are already in sort-order. +func (rs Ranges) Equivalent(right Ranges) (result bool) { + _, _, result = rs.equiv(right) + return +} + +// Equivalent assumes that both Ranges are already in sort-order. +func (rs Ranges) equiv(right Ranges) (_, _ Ranges, _ bool) { + // we need to squash rs and right but don't want to change the originals + switch len(rs) { + case 0: + case 1: + rs = Ranges{rs[0]} + default: + rs = Ranges(append([]Value_Range{rs[0], rs[1]}, rs[2:]...)).Sort().Squash() + } + switch len(right) { + case 0: + case 1: + right = Ranges{right[0]} + default: + right = Ranges(append([]Value_Range{right[0], right[1]}, right[2:]...)).Sort().Squash() + } + return rs, right, (&Value_Ranges{Range: rs}).Equal(&Value_Ranges{Range: right}) +} + +func (rs Ranges) Clone() Ranges { + if len(rs) == 0 { + return nil + } + x := make(Ranges, len(rs)) + copy(x, rs) + return x +} + +// Min returns the minimum number in Ranges. It will panic on empty Ranges. +func (rs Ranges) Min() uint64 { return rs[0].Begin } + +// Max returns the maximum number in Ranges. It will panic on empty Ranges. +func (rs Ranges) Max() uint64 { return rs[len(rs)-1].End } + +// resource returns a *Resource with the given name and Ranges. +func (rs Ranges) resource(name string) Resource { + vr := make([]Value_Range, len(rs)) + copy(vr, rs) + return Resource{ + Name: name, + Type: RANGES.Enum(), + Ranges: &Value_Ranges{Range: vr}, + } +} + +// uint64s is an utility used to sort a slice of uint64s +type uint64s []uint64 + +// These three methods implement sort.Interface +func (ns uint64s) Len() int { return len(ns) } +func (ns uint64s) Less(i, j int) bool { return ns[i] < ns[j] } +func (ns uint64s) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/recordio/doc.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/recordio/doc.go new file mode 100644 index 000000000000..bc5b095c846f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/recordio/doc.go @@ -0,0 +1,5 @@ +// Package recordio implements the Mesos variant of RecordIO framing, whereby +// each record is prefixed by a line that indicates the length of the record in +// decimal ASCII. The bytes of the record immediately follow the length-line. +// Zero-length records are allowed. +package recordio diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/recordio/reader.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/recordio/reader.go new file mode 100644 index 000000000000..b8d034e82665 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/recordio/reader.go @@ -0,0 +1,145 @@ +package recordio + +import ( + "bufio" + "bytes" + "io" + + logger "github.com/mesos/mesos-go/api/v1/lib/debug" + "github.com/mesos/mesos-go/api/v1/lib/encoding/framing" +) + +const debug = logger.Logger(false) + +type ( + Opt func(*reader) + + reader struct { + *bufio.Scanner + pend int + splitf func(data []byte, atEOF bool) (int, []byte, error) + maxf int // max frame size + } +) + +// NewReader returns a reader that parses frames from a recordio stream. +func NewReader(read io.Reader, opt ...Opt) framing.Reader { + debug.Log("new frame reader") + r := &reader{Scanner: bufio.NewScanner(read)} + r.Split(func(data []byte, atEOF bool) (int, []byte, error) { + // Scanner panics if we invoke Split after scanning has started, + // use this proxy func as a work-around. + return r.splitf(data, atEOF) + }) + buf := make([]byte, 16*1024) + r.Buffer(buf, 1<<22) // 1<<22 == max protobuf size + r.splitf = r.splitSize + // apply options + for _, f := range opt { + if f != nil { + f(r) + } + } + return r +} + +// MaxMessageSize returns a functional option that configures the internal Scanner's buffer and max token (message) +// length, in bytes. +func MaxMessageSize(max int) Opt { + return func(r *reader) { + buf := make([]byte, max>>1) + r.Buffer(buf, max) + r.maxf = max + } +} + +func (r *reader) splitSize(data []byte, atEOF bool) (int, []byte, error) { + const maxTokenLength = 20 // textual length of largest uint64 number + if atEOF { + x := len(data) + switch { + case x == 0: + debug.Log("EOF and empty frame, returning io.EOF") + return 0, nil, io.EOF + case x < 2: // min frame size + debug.Log("remaining data less than min total frame length") + return 0, nil, framing.ErrorUnderrun + } + // otherwise, we may have a valid frame... + } + debug.Log("len(data)=", len(data)) + adv := 0 + for { + i := 0 + for ; i < maxTokenLength && i < len(data) && data[i] != '\n'; i++ { + } + debug.Log("i=", i) + if i == len(data) { + debug.Log("need more input") + return 0, nil, nil // need more input + } + if i == maxTokenLength && data[i] != '\n' { + debug.Log("frame size: max token length exceeded") + return 0, nil, framing.ErrorBadSize + } + n, err := ParseUintBytes(bytes.TrimSpace(data[:i]), 10, 64) + if err != nil { + debug.Log("failed to parse frame size field:", err) + return 0, nil, framing.ErrorBadSize + } + if r.maxf != 0 && int(n) > r.maxf { + debug.Log("frame size max length exceeded:", n) + return 0, nil, framing.ErrorOversizedFrame + } + if n == 0 { + // special case... don't invoke splitData, just parse the next size header + adv += i + 1 + data = data[i+1:] + continue + } + r.pend = int(n) + r.splitf = r.splitFrame + debug.Logf("split next frame: %d, %d", n, adv+i+1) + return adv + i + 1, data[:0], nil // returning a nil token screws up the Scanner, so return empty + } +} + +func (r *reader) splitFrame(data []byte, atEOF bool) (advance int, token []byte, err error) { + x := len(data) + debug.Log("splitFrame:x=", x, ",eof=", atEOF) + if atEOF { + if x < r.pend { + return 0, nil, framing.ErrorUnderrun + } + } + if r.pend == 0 { + panic("asked to read frame data, but no data left in frame") + } + if x < int(r.pend) { + // need more data + return 0, nil, nil + } + r.splitf = r.splitSize + adv := int(r.pend) + r.pend = 0 + return adv, data[:adv], nil +} + +// ReadFrame implements framing.Reader +func (r *reader) ReadFrame() (tok []byte, err error) { + for r.Scan() { + b := r.Bytes() + if len(b) == 0 { + continue + } + tok = b + debug.Log("len(tok)", len(tok)) + break + } + // either scan failed, or it succeeded and we have a token... + err = r.Err() + if err == nil && len(tok) == 0 { + err = io.EOF + } + return +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/recordio/strconv.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/recordio/strconv.go new file mode 100644 index 000000000000..6e2b26812d5f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/recordio/strconv.go @@ -0,0 +1,117 @@ +/* +Copyright 2013 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package recordio + +import ( + "errors" + "strconv" +) + +// ParseUintBytes is like strconv.ParseUint, but using a []byte. +func ParseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { + var cutoff, maxVal uint64 + + if bitSize == 0 { + bitSize = int(strconv.IntSize) + } + + s0 := s + switch { + case len(s) < 1: + err = strconv.ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for octal, hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + base = 16 + s = s[2:] + if len(s) < 1 { + err = strconv.ErrSyntax + goto Error + } + case s[0] == '0': + base = 8 + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + n = 0 + cutoff = cutoff64(base) + maxVal = 1<= base { + n = 0 + err = strconv.ErrSyntax + goto Error + } + + if n >= cutoff { + // n*base overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n *= uint64(base) + + n1 := n + uint64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n = n1 + } + + return n, nil + +Error: + return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} +} + +// Return the first number n such that n*base >= 1<<64. +func cutoff64(base int) uint64 { + if base < 2 { + return 0 + } + return (1<<64-1)/uint64(base) + 1 +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/recordio/writer.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/recordio/writer.go new file mode 100644 index 000000000000..085f967d97b1 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/recordio/writer.go @@ -0,0 +1,34 @@ +package recordio + +import ( + "io" + "strconv" +) + +var lf = []byte{'\n'} + +type Writer struct { + out io.Writer +} + +func NewWriter(out io.Writer) *Writer { + return &Writer{out} +} + +func (w *Writer) writeBuffer(b []byte, err error) error { + if err != nil { + return err + } + n, err := w.out.Write(b) + if err == nil && n != len(b) { + return io.ErrShortWrite + } + return err +} + +func (w *Writer) WriteFrame(b []byte) (err error) { + err = w.writeBuffer(([]byte)(strconv.Itoa(len(b))), err) + err = w.writeBuffer(lf, err) + err = w.writeBuffer(b, err) + return +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/resources.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/resources.go new file mode 100644 index 000000000000..2c348030c4ce --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/resources.go @@ -0,0 +1,1145 @@ +package mesos + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/gogo/protobuf/proto" + "github.com/mesos/mesos-go/api/v1/lib/roles" +) + +const DefaultRole = "*" + +type ( + Resources []Resource + resourceErrorType int + + resourceError struct { + errorType resourceErrorType + reason string + spec Resource + } +) + +const ( + resourceErrorTypeIllegalName resourceErrorType = iota + resourceErrorTypeIllegalType + resourceErrorTypeUnsupportedType + resourceErrorTypeIllegalScalar + resourceErrorTypeIllegalRanges + resourceErrorTypeIllegalSet + resourceErrorTypeIllegalDisk + resourceErrorTypeIllegalReservation + resourceErrorTypeIllegalShare + + noReason = "" // make error generation code more readable +) + +var ( + resourceErrorMessages = map[resourceErrorType]string{ + resourceErrorTypeIllegalName: "missing or illegal resource name", + resourceErrorTypeIllegalType: "missing or illegal resource type", + resourceErrorTypeUnsupportedType: "unsupported resource type", + resourceErrorTypeIllegalScalar: "illegal scalar resource", + resourceErrorTypeIllegalRanges: "illegal ranges resource", + resourceErrorTypeIllegalSet: "illegal set resource", + resourceErrorTypeIllegalDisk: "illegal disk resource", + resourceErrorTypeIllegalReservation: "illegal resource reservation", + resourceErrorTypeIllegalShare: "illegal shared resource", + } +) + +func (t resourceErrorType) Generate(reason string) error { + msg := resourceErrorMessages[t] + if reason != noReason { + if msg != "" { + msg += ": " + reason + } else { + msg = reason + } + } + return &resourceError{errorType: t, reason: msg} +} + +func (err *resourceError) Reason() string { return err.reason } +func (err *resourceError) Resource() Resource { return err.spec } +func (err *resourceError) WithResource(r Resource) { err.spec = r } + +func (err *resourceError) Error() string { + // TODO(jdef) include additional context here? (type, resource) + if err.reason != "" { + return "resource error: " + err.reason + } + return "resource error" +} + +func IsResourceError(err error) (ok bool) { + _, ok = err.(*resourceError) + return +} + +func (r *Resource_ReservationInfo) Assign() func(interface{}) { + return func(v interface{}) { + type reserver interface { + WithReservation(*Resource_ReservationInfo) + } + if ri, ok := v.(reserver); ok { + ri.WithReservation(r) + } + } +} + +func (resources Resources) Clone() Resources { + if resources == nil { + return nil + } + clone := make(Resources, 0, len(resources)) + for i := range resources { + rr := proto.Clone(&resources[i]).(*Resource) + clone = append(clone, *rr) + } + return clone +} + +// Minus calculates and returns the result of `resources - that` without modifying either +// the receiving `resources` or `that`. +func (resources Resources) Minus(that ...Resource) Resources { + x := resources.Clone() + return x.Subtract(that...) +} + +// Subtract subtracts `that` from the receiving `resources` and returns the result (the modified +// `resources` receiver). +func (resources *Resources) Subtract(that ...Resource) (rs Resources) { + if resources != nil { + if len(that) > 0 { + x := make(Resources, len(that)) + copy(x, that) + that = x + + for i := range that { + resources.Subtract1(that[i]) + } + } + rs = *resources + } + return +} + +// Plus calculates and returns the result of `resources + that` without modifying either +// the receiving `resources` or `that`. +func (resources Resources) Plus(that ...Resource) Resources { + x := resources.Clone() + return x.Add(that...) +} + +// Add adds `that` to the receiving `resources` and returns the result (the modified +// `resources` receiver). +func (resources *Resources) Add(that ...Resource) (rs Resources) { + if resources != nil { + rs = *resources + } + for i := range that { + rs = rs._add(that[i]) + } + if resources != nil { + *resources = rs + } + return +} + +// Add1 adds `that` to the receiving `resources` and returns the result (the modified +// `resources` receiver). +func (resources *Resources) Add1(that Resource) (rs Resources) { + if resources != nil { + rs = *resources + } + rs = rs._add(that) + if resources != nil { + *resources = rs + } + return +} + +func (resources Resources) _add(that Resource) Resources { + if that.Validate() != nil || that.IsEmpty() { + return resources + } + for i := range resources { + r := &resources[i] + if r.Addable(that) { + r.Add(that) + return resources + } + } + // cannot be combined with an existing resource + r := proto.Clone(&that).(*Resource) + return append(resources, *r) +} + +// Minus1 calculates and returns the result of `resources - that` without modifying either +// the receiving `resources` or `that`. +func (resources *Resources) Minus1(that Resource) Resources { + x := resources.Clone() + return x.Subtract1(that) +} + +// Subtract1 subtracts `that` from the receiving `resources` and returns the result (the modified +// `resources` receiver). +func (resources *Resources) Subtract1(that Resource) Resources { + if resources == nil { + return nil + } + if that.Validate() == nil && !that.IsEmpty() { + for i := range *resources { + r := &(*resources)[i] + if r.Subtractable(that) { + r.Subtract(that) + // remove the resource if it becomes invalid or zero. + // need to do validation in order to strip negative scalar + // resource objects. + if r.Validate() != nil || r.IsEmpty() { + // delete resource at i, without leaking an uncollectable Resource + // a, a[len(a)-1] = append(a[:i], a[i+1:]...), nil + (*resources), (*resources)[len((*resources))-1] = append((*resources)[:i], (*resources)[i+1:]...), Resource{} + } + break + } + } + } + return *resources +} + +// String returns a human-friendly representation of the resource collection using default formatting +// options (e.g. allocation-info is not rendered). For additional control over resource formatting see +// the Format func. +func (resources Resources) String() string { + return resources.Format() +} + +type ResourcesFormatOptions struct { + ShowAllocated bool // ShowAllocated when true will not display resource allocation info +} + +func (resources Resources) Format(options ...func(*ResourcesFormatOptions)) string { + if len(resources) == 0 { + return "" + } + var f ResourcesFormatOptions + for _, o := range options { + if o != nil { + o(&f) + } + } + // TODO(jdef) use a string.Builder once we can rely on a more modern golang version + buf := bytes.Buffer{} + for i := range resources { + if i > 0 { + buf.WriteString(";") + } + r := &resources[i] + buf.WriteString(r.Name) + if r.AllocationInfo != nil && f.ShowAllocated { + buf.WriteString("(allocated: ") + buf.WriteString(r.AllocationInfo.GetRole()) + buf.WriteString(")") + } + if res := r.Reservations; len(res) > 0 || (r.Role != nil && *r.Role != "*") { + if len(res) == 0 { + res = make([]Resource_ReservationInfo, 0, 1) + if r.Reservation == nil { + res = append(res, Resource_ReservationInfo{ + Type: Resource_ReservationInfo_STATIC.Enum(), + Role: r.Role, + }) + } else { + res = append(res, *r.Reservation) // copy! + res[0].Type = Resource_ReservationInfo_DYNAMIC.Enum() + res[0].Role = r.Role + } + } + buf.WriteString("(reservations: [") + for j := range res { + if j > 0 { + buf.WriteString(",") + } + rr := &res[j] + buf.WriteString("(") + buf.WriteString(rr.GetType().String()) + buf.WriteString(",") + buf.WriteString(rr.GetRole()) + if rr.Principal != nil { + buf.WriteString(",") + buf.WriteString(*rr.Principal) + } + if rr.Labels != nil { + buf.WriteString(",{") + rr.GetLabels().writeTo(&buf) + buf.WriteString("}") + } + buf.WriteString(")") + } + buf.WriteString("])") + } + if d := r.GetDisk(); d != nil { + buf.WriteString("[") + if s := d.GetSource(); s != nil { + switch s.GetType() { + case Resource_DiskInfo_Source_BLOCK: + buf.WriteString("BLOCK") + if id, profile := s.GetID(), s.GetProfile(); id != "" || profile != "" { + buf.WriteByte('(') + buf.WriteString(id) + buf.WriteByte(',') + buf.WriteString(profile) + buf.WriteByte(')') + } + case Resource_DiskInfo_Source_RAW: + buf.WriteString("RAW") + if id, profile := s.GetID(), s.GetProfile(); id != "" || profile != "" { + buf.WriteByte('(') + buf.WriteString(id) + buf.WriteByte(',') + buf.WriteString(profile) + buf.WriteByte(')') + } + case Resource_DiskInfo_Source_PATH: + buf.WriteString("PATH") + if id, profile := s.GetID(), s.GetProfile(); id != "" || profile != "" { + buf.WriteByte('(') + buf.WriteString(id) + buf.WriteByte(',') + buf.WriteString(profile) + buf.WriteByte(')') + } else if root := s.GetPath().GetRoot(); root != "" { + buf.WriteByte(':') + buf.WriteString(root) + } + case Resource_DiskInfo_Source_MOUNT: + buf.WriteString("MOUNT") + if id, profile := s.GetID(), s.GetProfile(); id != "" || profile != "" { + buf.WriteByte('(') + buf.WriteString(id) + buf.WriteByte(',') + buf.WriteString(profile) + buf.WriteByte(')') + } else if root := s.GetMount().GetRoot(); root != "" { + buf.WriteByte(':') + buf.WriteString(root) + } + } + } + if p := d.GetPersistence(); p != nil { + if d.GetSource() != nil { + buf.WriteString(",") + } + buf.WriteString(p.GetID()) + } + if v := d.GetVolume(); v != nil { + buf.WriteString(":") + vconfig := v.GetContainerPath() + if h := v.GetHostPath(); h != "" { + vconfig = h + ":" + vconfig + } + if m := v.Mode; m != nil { + switch *m { + case RO: + vconfig += ":ro" + case RW: + vconfig += ":rw" + default: + panic("unrecognized volume mode: " + m.String()) + } + } + buf.WriteString(vconfig) + } + buf.WriteString("]") + } + if r.Revocable != nil { + buf.WriteString("{REV}") + } + if r.Shared != nil { + buf.WriteString("") + } + buf.WriteString(":") + switch r.GetType() { + case SCALAR: + buf.WriteString(strconv.FormatFloat(r.GetScalar().GetValue(), 'f', -1, 64)) + case RANGES: + buf.WriteString("[") + ranges := Ranges(r.GetRanges().GetRange()) + for j := range ranges { + if j > 0 { + buf.WriteString(",") + } + if b, e := ranges[j].Begin, ranges[j].End; b == e { + buf.WriteString(strconv.FormatUint(b, 10)) + } else { + buf.WriteString(strconv.FormatUint(b, 10)) + buf.WriteString("-") + buf.WriteString(strconv.FormatUint(e, 10)) + } + } + buf.WriteString("]") + case SET: + buf.WriteString("{") + items := r.GetSet().GetItem() + for j := range items { + if j > 0 { + buf.WriteString(",") + } + buf.WriteString(items[j]) + } + buf.WriteString("}") + } + } + return buf.String() +} + +func (left *Resource) Validate() error { + if left.GetName() == "" { + return resourceErrorTypeIllegalName.Generate(noReason) + } + if _, ok := Value_Type_name[int32(left.GetType())]; !ok { + return resourceErrorTypeIllegalType.Generate(noReason) + } + switch left.GetType() { + case SCALAR: + if s := left.GetScalar(); s == nil || left.GetRanges() != nil || left.GetSet() != nil { + return resourceErrorTypeIllegalScalar.Generate(noReason) + } else if s.GetValue() < 0 { + return resourceErrorTypeIllegalScalar.Generate("value < 0") + } + case RANGES: + r := left.GetRanges() + if left.GetScalar() != nil || r == nil || left.GetSet() != nil { + return resourceErrorTypeIllegalRanges.Generate(noReason) + } + for i, rr := range r.GetRange() { + // ensure that ranges are not inverted + if rr.Begin > rr.End { + return resourceErrorTypeIllegalRanges.Generate("begin > end") + } + // ensure that ranges don't overlap (but not necessarily squashed) + for j := i + 1; j < len(r.GetRange()); j++ { + r2 := r.GetRange()[j] + if rr.Begin <= r2.Begin && r2.Begin <= rr.End { + return resourceErrorTypeIllegalRanges.Generate("overlapping ranges") + } + } + } + case SET: + s := left.GetSet() + if left.GetScalar() != nil || left.GetRanges() != nil || s == nil { + return resourceErrorTypeIllegalSet.Generate(noReason) + } + unique := make(map[string]struct{}, len(s.GetItem())) + for _, x := range s.GetItem() { + if _, found := unique[x]; found { + return resourceErrorTypeIllegalSet.Generate("duplicated elements") + } + unique[x] = struct{}{} + } + default: + return resourceErrorTypeUnsupportedType.Generate(noReason) + } + + // check for disk resource + if disk := left.GetDisk(); disk != nil { + if left.GetName() != "disk" { + return resourceErrorTypeIllegalDisk.Generate("DiskInfo should not be set for \"" + left.GetName() + "\" resource") + } + if s := disk.GetSource(); s != nil { + switch s.GetType() { + case Resource_DiskInfo_Source_PATH, + Resource_DiskInfo_Source_MOUNT: + // these only contain optional members + case Resource_DiskInfo_Source_BLOCK, + Resource_DiskInfo_Source_RAW: + // TODO(jdef): update w/ validation once the format of BLOCK and RAW + // disks is known. + case Resource_DiskInfo_Source_UNKNOWN: + return resourceErrorTypeIllegalDisk.Generate(fmt.Sprintf("unsupported DiskInfo.Source.Type in %q", s)) + } + } + } + + if rs := left.GetReservations(); len(rs) == 0 { + // check for "pre-reservation-refinement" format + if _, err := roles.Parse(left.GetRole()); err != nil { + return resourceErrorTypeIllegalReservation.Generate(err.Error()) + } + + if r := left.GetReservation(); r != nil { + if r.Type != nil { + return resourceErrorTypeIllegalReservation.Generate( + "Resource.ReservationInfo.type must not be set for the Resource.reservation field") + } + if r.Role != nil { + return resourceErrorTypeIllegalReservation.Generate( + "Resource.ReservationInfo.role must not be set for the Resource.reservation field") + } + // check for invalid state of (role,reservation) pair + if left.GetRole() == "*" { + return resourceErrorTypeIllegalReservation.Generate("default role cannot be dynamically reserved") + } + } + } else { + // check for "post-reservation-refinement" format + for i := range rs { + r := &rs[i] + if r.Type == nil { + return resourceErrorTypeIllegalReservation.Generate( + "Resource.ReservationInfo.type must be set") + } + if r.Role == nil { + return resourceErrorTypeIllegalReservation.Generate( + "Resource.ReservationInfo.role must be set") + } + if _, err := roles.Parse(r.GetRole()); err != nil { + return resourceErrorTypeIllegalReservation.Generate(err.Error()) + } + if r.GetRole() == "*" { + return resourceErrorTypeIllegalReservation.Generate( + "role '*' cannot be reserved") + } + } + // check that reservations are correctly refined + ancestor := rs[0].GetRole() + for i := 1; i < len(rs); i++ { + r := &rs[i] + if r.GetType() == Resource_ReservationInfo_STATIC { + return resourceErrorTypeIllegalReservation.Generate( + "a refined reservation cannot be STATIC") + } + child := r.GetRole() + if !roles.IsStrictSubroleOf(child, ancestor) { + return resourceErrorTypeIllegalReservation.Generate(fmt.Sprintf( + "role %q is not a refinement of %q", child, ancestor)) + } + } + + // Additionally, we allow the "pre-reservation-refinement" format to be set + // as long as there is only one reservation, and the `Resource.role` and + // `Resource.reservation` fields are consistent with the reservation. + if len(rs) == 1 { + if r := left.Role; r != nil && *r != rs[0].GetRole() { + return resourceErrorTypeIllegalReservation.Generate(fmt.Sprintf( + "'Resource.role' field with %q does not match the role %q in 'Resource.reservations'", + *r, rs[0].GetRole())) + } + + switch rs[0].GetType() { + case Resource_ReservationInfo_STATIC: + if left.Reservation != nil { + return resourceErrorTypeIllegalReservation.Generate( + "'Resource.reservation' must not be set if the single reservation in 'Resource.reservations' is STATIC") + } + case Resource_ReservationInfo_DYNAMIC: + if (left.Role == nil) != (left.GetReservation() == nil) { + return resourceErrorTypeIllegalReservation.Generate( + "'Resource.role' and 'Resource.reservation' must both be set or both not be set if the single reservation in 'Resource.reservations' is DYNAMIC") + } + if r := left.GetReservation(); r != nil && r.GetPrincipal() != rs[0].GetPrincipal() { + return resourceErrorTypeIllegalReservation.Generate(fmt.Sprintf( + "'Resource.reservation.principal' with %q does not match the principal %q in 'Resource.reservations'", + r.GetPrincipal(), rs[0].GetPrincipal())) + } + if r := left.GetReservation(); r != nil && !r.GetLabels().Equivalent(rs[0].GetLabels()) { + return resourceErrorTypeIllegalReservation.Generate(fmt.Sprintf( + "'Resource.reservation.labels' with %q does not match the labels %q in 'Resource.reservations'", + r.GetLabels(), rs[0].GetLabels())) + } + case Resource_ReservationInfo_UNKNOWN: + return resourceErrorTypeIllegalReservation.Generate("Unsupported 'Resource.ReservationInfo.type'") + } + } else { + if r := left.Role; r != nil { + return resourceErrorTypeIllegalReservation.Generate( + "'Resource.role' must not be set if there is more than one reservation in 'Resource.reservations'") + } + if r := left.GetReservation(); r != nil { + return resourceErrorTypeIllegalReservation.Generate( + "'Resource.reservation' must not be set if there is more than one reservation in 'Resource.reservations'") + } + } + } + + // Check that shareability is enabled for supported resource types. + // For now, it is for persistent volumes only. + // NOTE: We need to modify this once we extend shareability to other + // resource types. + if s := left.GetShared(); s != nil { + if left.GetName() != "disk" { + return resourceErrorTypeIllegalShare.Generate(fmt.Sprintf( + "Resource %q cannot be shared", left.GetName())) + } + if p := left.GetDisk().GetPersistence(); p == nil { + return resourceErrorTypeIllegalShare.Generate("only persistent volumes can be shared") + } + } + + return nil +} + +func (left *Resource_AllocationInfo) Equivalent(right *Resource_AllocationInfo) bool { + if (left == nil) != (right == nil) { + return false + } else if left == nil { + return true + } + if (left.Role == nil) != (right.Role == nil) { + return false + } + if left.Role != nil && *left.Role != *right.Role { + return false + } + return true +} + +func (r *Resource_ReservationInfo) Equivalent(right *Resource_ReservationInfo) bool { + // TODO(jdef) should we consider equivalency of both pre- and post-refinement formats, + // such that a pre-refinement format could be the equivalent of a post-refinement format + // if defined just the right way? + if (r == nil) != (right == nil) { + return false + } else if r == nil { + return true + } + if (r.Type == nil) != (right.Type == nil) { + return false + } + if r.Type != nil && *r.Type != *right.Type { + return false + } + if (r.Role == nil) != (right.Role == nil) { + return false + } + if r.Role != nil && *r.Role != *right.Role { + return false + } + if (r.Principal == nil) != (right.Principal == nil) { + return false + } + if r.Principal != nil && *r.Principal != *right.Principal { + return false + } + return r.Labels.Equivalent(right.Labels) +} + +func (left *Resource_DiskInfo) Equivalent(right *Resource_DiskInfo) bool { + // NOTE: We ignore 'volume' inside DiskInfo when doing comparison + // because it describes how this resource will be used which has + // nothing to do with the Resource object itself. A framework can + // use this resource and specify different 'volume' every time it + // uses it. + // see https://github.com/apache/mesos/blob/0.25.0/src/common/resources.cpp#L67 + if (left == nil) != (right == nil) { + return false + } + + if a, b := left.GetSource(), right.GetSource(); (a == nil) != (b == nil) { + return false + } else if a != nil { + if a.GetType() != b.GetType() { + return false + } + if aa, bb := a.GetMount(), b.GetMount(); (aa == nil) != (bb == nil) { + return false + } else if aa.GetRoot() != bb.GetRoot() { + return false + } + if aa, bb := a.GetPath(), b.GetPath(); (aa == nil) != (bb == nil) { + return false + } else if aa.GetRoot() != bb.GetRoot() { + return false + } + if aa, bb := a.GetID(), b.GetID(); aa != bb { + return false + } + if aa, bb := a.GetProfile(), b.GetProfile(); aa != bb { + return false + } + if aa, bb := a.GetMetadata(), b.GetMetadata(); (aa == nil) != (bb == nil) { + return false + } else if !labelList(aa.GetLabels()).Equivalent(labelList(bb.GetLabels())) { + return false + } + } + + if a, b := left.GetPersistence(), right.GetPersistence(); (a == nil) != (b == nil) { + return false + } else if a != nil { + return a.GetID() == b.GetID() + } + + return true +} + +// Equivalent returns true if right is equivalent to left (differs from Equal in that +// deeply nested values are test for equivalence, not equality). +func (left *Resource) Equivalent(right Resource) bool { + if left == nil { + return right.IsEmpty() + } + if left.GetName() != right.GetName() || + left.GetType() != right.GetType() || + left.GetRole() != right.GetRole() { + return false + } + if a, b := left.GetAllocationInfo(), right.GetAllocationInfo(); !a.Equivalent(b) { + return false + } + if a, b := left.GetReservations(), right.GetReservations(); len(a) != len(b) { + return false + } else { + for i := range a { + ri := &a[i] + if !ri.Equivalent(&b[i]) { + return false + } + } + } + if !left.GetReservation().Equivalent(right.GetReservation()) { + return false + } + if !left.GetDisk().Equivalent(right.GetDisk()) { + return false + } + if (left.Revocable == nil) != (right.Revocable == nil) { + return false + } + if a, b := left.ProviderID, right.ProviderID; (a == nil) != (b == nil) { + return false + } else if a != nil && a.Value != b.Value { + return false + } + if a, b := left.Shared, right.Shared; (a == nil) != (b == nil) { + return false + } + + switch left.GetType() { + case SCALAR: + return left.GetScalar().Compare(right.GetScalar()) == 0 + case RANGES: + return Ranges(left.GetRanges().GetRange()).Equivalent(right.GetRanges().GetRange()) + case SET: + return left.GetSet().Compare(right.GetSet()) == 0 + default: + return false + } +} + +// Addable tests if we can add two Resource objects together resulting in one +// valid Resource object. For example, two Resource objects with +// different name, type or role are not addable. +func (left *Resource) Addable(right Resource) bool { + if left == nil { + return true + } + if left.GetName() != right.GetName() || + left.GetType() != right.GetType() || + left.GetRole() != right.GetRole() { + return false + } + + if a, b := left.GetShared(), right.GetShared(); (a == nil) != (b == nil) { + // shared has no fields + return false + } + + if a, b := left.GetAllocationInfo(), right.GetAllocationInfo(); !a.Equivalent(b) { + return false + } + + if !left.GetReservation().Equivalent(right.GetReservation()) { + return false + } + + if a, b := left.Reservations, right.Reservations; len(a) != len(b) { + return false + } else { + for i := range a { + aa := &a[i] + if !aa.Equivalent(&b[i]) { + return false + } + } + } + + if !left.GetDisk().Equivalent(right.GetDisk()) { + return false + } + + if ls := left.GetDisk().GetSource(); ls != nil { + switch ls.GetType() { + case Resource_DiskInfo_Source_PATH: + // Two PATH resources can be added if their disks are identical + case Resource_DiskInfo_Source_BLOCK, + Resource_DiskInfo_Source_MOUNT: + // Two resources that represent exclusive 'MOUNT' or 'RAW' disks + // cannot be added together; this would defeat the exclusivity. + return false + case Resource_DiskInfo_Source_RAW: + // We can only add resources representing 'RAW' disks if + // they have no identity or are identical. + if ls.GetID() != "" { + return false + } + case Resource_DiskInfo_Source_UNKNOWN: + panic("unreachable") + } + } + + // from apache/mesos: src/common/resources.cpp + // TODO(jieyu): Even if two Resource objects with DiskInfo have the + // same persistence ID, they cannot be added together. In fact, this + // shouldn't happen if we do not add resources from different + // namespaces (e.g., across slave). Consider adding a warning. + if left.GetDisk().GetPersistence() != nil { + return false + } + if (left.GetRevocable() == nil) != (right.GetRevocable() == nil) { + return false + } + if a, b := left.GetProviderID(), right.GetProviderID(); (a == nil) != (b == nil) { + return false + } else if a != nil && a.Value != b.Value { + return false + } + return true +} + +// Subtractable tests if we can subtract "right" from "left" resulting in one +// valid Resource object. For example, two Resource objects with different +// name, type or role are not subtractable. +// NOTE: Set subtraction is always well defined, it does not require +// 'right' to be contained within 'left'. For example, assuming that +// "left = {1, 2}" and "right = {2, 3}", "left" and "right" are +// subtractable because "left - right = {1}". However, "left" does not +// contain "right". +func (left *Resource) Subtractable(right Resource) bool { + if left.GetName() != right.GetName() || + left.GetType() != right.GetType() || + left.GetRole() != right.GetRole() { + return false + } + if a, b := left.GetShared(), right.GetShared(); (a == nil) != (b == nil) { + // shared has no fields + return false + } + + if a, b := left.GetAllocationInfo(), right.GetAllocationInfo(); !a.Equivalent(b) { + return false + } + + if !left.GetReservation().Equivalent(right.GetReservation()) { + return false + } + if a, b := left.Reservations, right.Reservations; len(a) != len(b) { + return false + } else { + for i := range a { + aa := &a[i] + if !aa.Equivalent(&b[i]) { + return false + } + } + } + + if !left.GetDisk().Equivalent(right.GetDisk()) { + return false + } + + if ls := left.GetDisk().GetSource(); ls != nil { + switch ls.GetType() { + case Resource_DiskInfo_Source_PATH: + // Two PATH resources can be subtracted if their disks are identical + case Resource_DiskInfo_Source_BLOCK, + Resource_DiskInfo_Source_MOUNT: + // Two resources that represent exclusive 'MOUNT' or 'RAW' disks + // cannot be substracted from each other if they are not the same; + // this would defeat the exclusivity. + if !left.Equivalent(right) { + return false + } + case Resource_DiskInfo_Source_RAW: + // We can only add resources representing 'RAW' disks if + // they have no identity or refer to the same disk. + if ls.GetID() != "" && !left.Equivalent(right) { + return false + } + case Resource_DiskInfo_Source_UNKNOWN: + panic("unreachable") + } + } + + // NOTE: For Resource objects that have DiskInfo, we can only do + // subtraction if they are **equal**. + if left.GetDisk().GetPersistence() != nil && !left.Equivalent(right) { + return false + } + if (left.GetRevocable() == nil) != (right.GetRevocable() == nil) { + return false + } + if a, b := left.GetProviderID(), right.GetProviderID(); (a == nil) != (b == nil) { + return false + } else if a != nil && a.Value != b.Value { + return false + } + return true +} + +// Contains tests if "right" is contained in "left". +func (left Resource) Contains(right Resource) bool { + if !left.Subtractable(right) { + return false + } + switch left.GetType() { + case SCALAR: + return right.GetScalar().Compare(left.GetScalar()) <= 0 + case RANGES: + return right.GetRanges().Compare(left.GetRanges()) <= 0 + case SET: + return right.GetSet().Compare(left.GetSet()) <= 0 + default: + return false + } +} + +// Subtract removes right from left. +// This func panics if the resource types don't match. +func (left *Resource) Subtract(right Resource) { + switch right.checkType(left.GetType()) { + case SCALAR: + left.Scalar = left.GetScalar().Subtract(right.GetScalar()) + case RANGES: + left.Ranges = left.GetRanges().Subtract(right.GetRanges()) + case SET: + left.Set = left.GetSet().Subtract(right.GetSet()) + } +} + +// Add adds right to left. +// This func panics if the resource types don't match. +func (left *Resource) Add(right Resource) { + switch right.checkType(left.GetType()) { + case SCALAR: + left.Scalar = left.GetScalar().Add(right.GetScalar()) + case RANGES: + left.Ranges = left.GetRanges().Add(right.GetRanges()) + case SET: + left.Set = left.GetSet().Add(right.GetSet()) + } +} + +// checkType panics if the type of this resources != t +func (left *Resource) checkType(t Value_Type) Value_Type { + if left != nil && left.GetType() != t { + panic(fmt.Sprintf("expected type %v instead of %v", t, left.GetType())) + } + return t +} + +// IsEmpty returns true if the value of this resource is equivalent to the zero-value, +// where a zero-length slice or map is equivalent to a nil reference to such. +func (left *Resource) IsEmpty() bool { + if left == nil { + return true + } + switch left.GetType() { + case SCALAR: + return left.GetScalar().GetValue() == 0 + case RANGES: + return len(left.GetRanges().GetRange()) == 0 + case SET: + return len(left.GetSet().GetItem()) == 0 + } + return false +} + +// IsUnreserved returns true if this resource neither statically or dynamically reserved. +// A resource is considered statically reserved if it has a non-default role. +func (left *Resource) IsUnreserved() bool { + // role != RoleDefault -> static reservation + // GetReservation() != nil -> dynamic reservation + // return {no-static-reservation} && {no-dynamic-reservation} + return (left.Role == nil || left.GetRole() == "*") && left.GetReservation() == nil && len(left.GetReservations()) == 0 +} + +// IsReserved returns true if this resource has been reserved for the given role. +// If role=="" then return true if there are no static or dynamic reservations for this resource. +// It's expected that this Resource has already been validated (see Validate). +func (left *Resource) IsReserved(role string) bool { + return !left.IsUnreserved() && (role == "" || role == left.ReservationRole()) +} + +// ReservationRole returns the role for which the resource is reserved. Callers should check the +// reservation status of the resource via IsReserved prior to invoking this func. +func (r *Resource) ReservationRole() string { + // if using reservation refinement, return the role of the last refinement + rs := r.GetReservations() + if x := len(rs); x > 0 { + return rs[x-1].GetRole() + } + // if using the old reservation API, role is a first class field of Resource + // (and it's never stored in Resource.Reservation). + return r.GetRole() +} + +// IsAllocatableTo returns true if the resource may be allocated to the given role. +func (left *Resource) IsAllocatableTo(role string) bool { + if left.IsUnreserved() { + return true + } + r := left.ReservationRole() + return role == r || roles.IsStrictSubroleOf(role, r) +} + +// IsDynamicallyReserved returns true if this resource has a non-nil reservation descriptor +func (left *Resource) IsDynamicallyReserved() bool { + if left.IsReserved("") { + if left.GetReservation() != nil { + return true + } + rs := left.GetReservations() + return rs[len(rs)-1].GetType() == Resource_ReservationInfo_DYNAMIC + } + return false +} + +// IsRevocable returns true if this resource has a non-nil revocable descriptor +func (left *Resource) IsRevocable() bool { + return left.GetRevocable() != nil +} + +// IsPersistentVolume returns true if this is a disk resource with a non-nil Persistence descriptor +func (left *Resource) IsPersistentVolume() bool { + return left.GetDisk().GetPersistence() != nil +} + +// IsDisk returns true if this is a disk resource of the specified type. +func (left *Resource) IsDisk(t Resource_DiskInfo_Source_Type) bool { + if s := left.GetDisk().GetSource(); s != nil { + return s.GetType() == t + } + return false +} + +// HasResourceProvider returns true if the given Resource object is provided by a resource provider. +func (left *Resource) HasResourceProvider() bool { + return left.GetProviderID() != nil +} + +// ToUnreserved returns a (cloned) view of the Resources w/o any reservation data. It does not modify +// the receiver. +func (rs Resources) ToUnreserved() (result Resources) { + if rs == nil { + return nil + } + for i := range rs { + r := rs[i] // intentionally shallow-copy + r.Reservations = nil + r.Reservation = nil + r.Role = nil + result.Add1(r) + } + return +} + +// PushReservation returns a cloned set of Resources w/ the given resource refinement. +// Panics if resources become invalid as a result of pushing the reservation (e.g. pre- and post- +// refinement modes are mixed). +func (rs Resources) PushReservation(ri Resource_ReservationInfo) (result Resources) { +push_next: + for i := range rs { + if rs[i].IsEmpty() { + continue + } + r := proto.Clone(&rs[i]).(*Resource) // we don't want to impact rs + r.Reservations = append(r.Reservations, *(proto.Clone(&ri).(*Resource_ReservationInfo))) + + if err := r.Validate(); err != nil { + panic(err) + } + + // unroll Add1 to avoid additional calls to Clone + rr := *r + for j := range result { + r2 := &result[j] + if r2.Addable(rr) { + r2.Add(rr) + continue push_next + } + } + // cannot be combined with an existing resource + result = append(result, rr) + } + return +} + +// PopReservation returns a cloned set of Resources wherein the most recent reservation refeinement has been +// removed. Panics if for any resource in the collection there is no "last refinement" to remove. +func (rs Resources) PopReservation() (result Resources) { +pop_next: + for i := range rs { + r := &rs[i] + ls := len(r.Reservations) + if ls == 0 { + panic(fmt.Sprintf("no reservations exist for resource %q", r)) + } + + r = proto.Clone(r).(*Resource) // avoid modifying rs + r.Reservations[ls-1] = Resource_ReservationInfo{} // don't leak nested pointers + r.Reservations = r.Reservations[:ls-1] // shrink the slice + + // unroll Add1 to avoid additional calls to Clone + rr := *r + for j := range result { + r2 := &result[j] + if r2.Addable(rr) { + r2.Add(rr) + continue pop_next + } + } + + // cannot be combined with an existing resource + result = append(result, rr) + } + return +} + +// Allocate sets the AllocationInfo for the resource, panics if role is "". +func (r *Resource) Allocate(role string) { + if role == "" { + panic(fmt.Sprintf("cannot allocate resource to an empty-string role: %q", r)) + } + r.AllocationInfo = &Resource_AllocationInfo{Role: &role} +} + +// Unallocate clears the AllocationInfo for the resource. +func (r *Resource) Unallocate() { + r.AllocationInfo = nil +} + +// Allocate sets the AllocationInfo for all the resources. +// Returns a reference to the receiver to allow for chaining. +func (rs Resources) Allocate(role string) Resources { + if role == "" { + panic(fmt.Sprintf("cannot allocate resources to an empty-string role: %q", rs)) + } + for i := range rs { + rs[i].AllocationInfo = &Resource_AllocationInfo{Role: &role} + } + return rs +} + +// Unallocate clears the AllocationInfo for all the resources. +// Returns a reference to the receiver to allow for chaining. +func (rs Resources) Unallocate() Resources { + for i := range rs { + rs[i].AllocationInfo = nil + } + return rs +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/roles/role.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/roles/role.go new file mode 100644 index 000000000000..b6075f8d62e5 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/roles/role.go @@ -0,0 +1,82 @@ +package roles + +import ( + "fmt" + "strings" + "unicode" +) + +// Role is a deprecated type. +type Role string + +const defaultRole = Role("*") + +func (r Role) IsDefault() bool { + return r == defaultRole +} + +func (r Role) Assign() func(interface{}) { + return func(v interface{}) { + type roler interface { + WithRole(string) + } + if ri, ok := v.(roler); ok { + ri.WithRole(string(r)) + } + } +} + +func (r Role) Proto() *string { + s := string(r) + return &s +} + +// IsStrictSubroleOf returns true if left is a strict subrole of right. +func IsStrictSubroleOf(left, right string) bool { + return len(left) > len(right) && left[len(right)] == '/' && strings.HasPrefix(left, right) +} + +var illegalComponents = map[string]struct{}{ + ".": struct{}{}, + "..": struct{}{}, + "*": struct{}{}, +} + +func Parse(s string) (string, error) { + if s == string(defaultRole) { + return s, nil + } + if strings.HasPrefix(s, "/") { + return "", fmt.Errorf("role %q cannot start with a slash", s) + } + if strings.HasSuffix(s, "/") { + return "", fmt.Errorf("role %q cannot end with a slash", s) + } + + // validate each component in the role path + for _, part := range strings.Split(s, "/") { + if part == "" { + return "", fmt.Errorf("role %q cannot contain two adjacent slashes", s) + } + if bad, found := illegalComponents[part]; found { + return "", fmt.Errorf("role %q cannot contain %q as a component", s, bad) + } + if strings.HasPrefix(part, "-") { + return "", fmt.Errorf("role component %q is invalid because it begins with a dash", part) + } + if strings.IndexFunc(part, func(r rune) bool { return unicode.IsSpace(r) || unicode.IsControl(r) }) > -1 { + return "", fmt.Errorf("role component %q is invalid because it contains backspace or whitespace", part) + } + } + return s, nil +} + +func Validate(roles ...string) error { + for i := range roles { + _, err := Parse(roles[i]) + if err != nil { + return err + } + } + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/values.go b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/values.go new file mode 100644 index 000000000000..ae46905e9e18 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/mesos/mesos-go/api/v1/lib/values.go @@ -0,0 +1,142 @@ +package mesos + +func (left *Value_Scalar) Compare(right *Value_Scalar) int { + var ( + a = convertToFixed64(left.GetValue()) + b = convertToFixed64(right.GetValue()) + ) + if a < b { + return -1 + } + if a > b { + return 1 + } + return 0 +} + +func (left *Value_Ranges) Compare(right *Value_Ranges) int { + return Ranges(left.GetRange()).Compare(right.GetRange()) +} + +func (left *Value_Set) Compare(right *Value_Set) int { + i, j := left.GetItem(), right.GetItem() + if len(i) <= len(j) { + b := make(map[string]struct{}, len(j)) + for _, x := range j { + b[x] = struct{}{} + } + // make sure that each item on the left exists on the right, + // otherwise left is not a subset of right. + a := make(map[string]struct{}, len(i)) + for _, x := range i { + if _, ok := b[x]; !ok { + return 1 + } + a[x] = struct{}{} + } + // if every item on the right also exists on the left, then + // the sets are equal, otherwise left < right + for x := range b { + if _, ok := a[x]; !ok { + return -1 + } + } + return 0 + } + return 1 +} + +func (left *Value_Set) Add(right *Value_Set) *Value_Set { + lefty := left.GetItem() + righty := right.GetItem() + c := len(lefty) + len(righty) + if c == 0 { + return nil + } + m := make(map[string]struct{}, c) + for _, v := range lefty { + m[v] = struct{}{} + } + for _, v := range righty { + m[v] = struct{}{} + } + x := make([]string, 0, len(m)) + for v := range m { + x = append(x, v) + } + return &Value_Set{Item: x} +} + +func (left *Value_Set) Subtract(right *Value_Set) *Value_Set { + // for each item in right, remove it from left + lefty := left.GetItem() + righty := right.GetItem() + if c := len(lefty); c == 0 { + return nil + } else if len(righty) == 0 { + x := make([]string, c) + copy(x, lefty) + return &Value_Set{Item: x} + } + + a := make(map[string]struct{}, len(lefty)) + for _, x := range lefty { + a[x] = struct{}{} + } + for _, x := range righty { + delete(a, x) + } + if len(a) == 0 { + return nil + } + i := 0 + for k := range a { + lefty[i] = k + i++ + } + return &Value_Set{Item: lefty[:len(a)]} +} + +func (left *Value_Ranges) Add(right *Value_Ranges) *Value_Ranges { + a, b := Ranges(left.GetRange()), Ranges(right.GetRange()) + c := len(a) + len(b) + if c == 0 { + return nil + } + x := make(Ranges, c) + if len(a) > 0 { + copy(x, a) + } + if len(b) > 0 { + copy(x[len(a):], b) + } + return &Value_Ranges{ + Range: x.Sort().Squash(), + } +} + +func (left *Value_Ranges) Subtract(right *Value_Ranges) *Value_Ranges { + a, b := Ranges(left.GetRange()), Ranges(right.GetRange()) + if len(a) > 1 { + x := make(Ranges, len(a)) + copy(x, a) + a = x.Sort().Squash() + } + for _, r := range b { + a = a.Remove(r) + } + if len(a) == 0 { + return nil + } + return &Value_Ranges{Range: a} +} + +func (left *Value_Scalar) Add(right *Value_Scalar) *Value_Scalar { + sum := convertToFixed64(left.GetValue()) + convertToFixed64(right.GetValue()) + return &Value_Scalar{Value: convertToFloat64(sum)} +} + +func (left *Value_Scalar) Subtract(right *Value_Scalar) *Value_Scalar { + diff := convertToFixed64(left.GetValue()) - convertToFixed64(right.GetValue()) + return &Value_Scalar{Value: convertToFloat64(diff)} +} diff --git a/cluster-autoscaler/vendor/github.com/modern-go/reflect2/.travis.yml b/cluster-autoscaler/vendor/github.com/modern-go/reflect2/.travis.yml index 449e67cd01ac..fbb43744d94b 100644 --- a/cluster-autoscaler/vendor/github.com/modern-go/reflect2/.travis.yml +++ b/cluster-autoscaler/vendor/github.com/modern-go/reflect2/.travis.yml @@ -6,6 +6,7 @@ go: before_install: - go get -t -v ./... + - go get -t -v github.com/modern-go/reflect2-tests/... script: - ./test.sh diff --git a/cluster-autoscaler/vendor/github.com/modern-go/reflect2/Gopkg.toml b/cluster-autoscaler/vendor/github.com/modern-go/reflect2/Gopkg.toml index 3593fd0417fd..2f4f4dbdcc5e 100644 --- a/cluster-autoscaler/vendor/github.com/modern-go/reflect2/Gopkg.toml +++ b/cluster-autoscaler/vendor/github.com/modern-go/reflect2/Gopkg.toml @@ -24,7 +24,7 @@ # go-tests = true # unused-packages = true -ignored = ["github.com/modern-go/test","github.com/modern-go/test/must","github.com/modern-go/test/should"] +ignored = [] [[constraint]] name = "github.com/modern-go/concurrent" diff --git a/cluster-autoscaler/vendor/github.com/modern-go/reflect2/reflect2.go b/cluster-autoscaler/vendor/github.com/modern-go/reflect2/reflect2.go index 0632b71fb039..63b49c799194 100644 --- a/cluster-autoscaler/vendor/github.com/modern-go/reflect2/reflect2.go +++ b/cluster-autoscaler/vendor/github.com/modern-go/reflect2/reflect2.go @@ -150,6 +150,9 @@ func (cfg *frozenConfig) TypeOf(obj interface{}) Type { } func (cfg *frozenConfig) Type2(type1 reflect.Type) Type { + if type1 == nil { + return nil + } cacheKey := uintptr(unpackEFace(type1).data) typeObj, found := cfg.cache.Load(cacheKey) if found { diff --git a/cluster-autoscaler/vendor/github.com/modern-go/reflect2/test.sh b/cluster-autoscaler/vendor/github.com/modern-go/reflect2/test.sh index fbcef73098be..3d2b9768ce61 100755 --- a/cluster-autoscaler/vendor/github.com/modern-go/reflect2/test.sh +++ b/cluster-autoscaler/vendor/github.com/modern-go/reflect2/test.sh @@ -3,7 +3,7 @@ set -e echo "" > coverage.txt -for d in $(go list ./... | grep -v vendor); do +for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d if [ -f profile.out ]; then cat profile.out >> coverage.txt diff --git a/cluster-autoscaler/vendor/github.com/modern-go/reflect2/type_map.go b/cluster-autoscaler/vendor/github.com/modern-go/reflect2/type_map.go index 6d489112ffdc..3acfb55803a8 100644 --- a/cluster-autoscaler/vendor/github.com/modern-go/reflect2/type_map.go +++ b/cluster-autoscaler/vendor/github.com/modern-go/reflect2/type_map.go @@ -4,6 +4,7 @@ import ( "reflect" "runtime" "strings" + "sync" "unsafe" ) @@ -15,10 +16,17 @@ func typelinks1() [][]unsafe.Pointer //go:linkname typelinks2 reflect.typelinks func typelinks2() (sections []unsafe.Pointer, offset [][]int32) -var types = map[string]reflect.Type{} -var packages = map[string]map[string]reflect.Type{} +// initOnce guards initialization of types and packages +var initOnce sync.Once + +var types map[string]reflect.Type +var packages map[string]map[string]reflect.Type + +// discoverTypes initializes types and packages +func discoverTypes() { + types = make(map[string]reflect.Type) + packages = make(map[string]map[string]reflect.Type) -func init() { ver := runtime.Version() if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") { loadGo15Types() @@ -90,11 +98,13 @@ type emptyInterface struct { // TypeByName return the type by its name, just like Class.forName in java func TypeByName(typeName string) Type { + initOnce.Do(discoverTypes) return Type2(types[typeName]) } // TypeByPackageName return the type by its package and name func TypeByPackageName(pkgPath string, name string) Type { + initOnce.Do(discoverTypes) pkgTypes := packages[pkgPath] if pkgTypes == nil { return nil diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/LICENSE b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/LICENSE deleted file mode 100644 index 261eeb9e9f8b..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/common/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/common/BUILD.bazel deleted file mode 100644 index ac9e600f3f4b..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/common/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "consts.go", - "plugins.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/apis/cluster/common", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/pkg/errors:go_default_library", - "//vendor/k8s.io/klog:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/common/consts.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/common/consts.go deleted file mode 100644 index 287480250d57..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/common/consts.go +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package common - -// Constants aren't automatically generated for unversioned packages. -// Instead share the same constant for all versioned packages -type MachineStatusError string - -const ( - // Represents that the combination of configuration in the MachineSpec - // is not supported by this cluster. This is not a transient error, but - // indicates a state that must be fixed before progress can be made. - // - // Example: the ProviderSpec specifies an instance type that doesn't exist, - InvalidConfigurationMachineError MachineStatusError = "InvalidConfiguration" - - // This indicates that the MachineSpec has been updated in a way that - // is not supported for reconciliation on this cluster. The spec may be - // completely valid from a configuration standpoint, but the controller - // does not support changing the real world state to match the new - // spec. - // - // Example: the responsible controller is not capable of changing the - // container runtime from docker to rkt. - UnsupportedChangeMachineError MachineStatusError = "UnsupportedChange" - - // This generally refers to exceeding one's quota in a cloud provider, - // or running out of physical machines in an on-premise environment. - InsufficientResourcesMachineError MachineStatusError = "InsufficientResources" - - // There was an error while trying to create a Node to match this - // Machine. This may indicate a transient problem that will be fixed - // automatically with time, such as a service outage, or a terminal - // error during creation that doesn't match a more specific - // MachineStatusError value. - // - // Example: timeout trying to connect to GCE. - CreateMachineError MachineStatusError = "CreateError" - - // An error was encountered while trying to delete the Node that this - // Machine represents. This could be a transient or terminal error, but - // will only be observable if the provider's Machine controller has - // added a finalizer to the object to more gracefully handle deletions. - // - // Example: cannot resolve EC2 IP address. - DeleteMachineError MachineStatusError = "DeleteError" - - // This error indicates that the machine did not join the cluster - // as a new node within the expected timeframe after instance - // creation at the provider succeeded - // - // Example use case: A controller that deletes Machines which do - // not result in a Node joining the cluster within a given timeout - // and that are managed by a MachineSet - JoinClusterTimeoutMachineError = "JoinClusterTimeoutError" -) - -type ClusterStatusError string - -const ( - // InvalidConfigurationClusterError indicates that the cluster - // configuration is invalid. - InvalidConfigurationClusterError ClusterStatusError = "InvalidConfiguration" - - // UnsupportedChangeClusterError indicates that the cluster - // spec has been updated in an unsupported way. That cannot be - // reconciled. - UnsupportedChangeClusterError ClusterStatusError = "UnsupportedChange" - - // CreateClusterError indicates that an error was encountered - // when trying to create the cluster. - CreateClusterError ClusterStatusError = "CreateError" - - // UpdateClusterError indicates that an error was encountered - // when trying to update the cluster. - UpdateClusterError ClusterStatusError = "UpdateError" - - // DeleteClusterError indicates that an error was encountered - // when trying to delete the cluster. - DeleteClusterError ClusterStatusError = "DeleteError" -) - -type MachineSetStatusError string - -const ( - // Represents that the combination of configuration in the MachineTemplateSpec - // is not supported by this cluster. This is not a transient error, but - // indicates a state that must be fixed before progress can be made. - // - // Example: the ProviderSpec specifies an instance type that doesn't exist. - InvalidConfigurationMachineSetError MachineSetStatusError = "InvalidConfiguration" -) - -type MachineDeploymentStrategyType string - -const ( - // Replace the old MachineSet by new one using rolling update - // i.e. gradually scale down the old MachineSet and scale up the new one. - RollingUpdateMachineDeploymentStrategyType MachineDeploymentStrategyType = "RollingUpdate" -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/common/plugins.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/common/plugins.go deleted file mode 100644 index 8c2083d88c08..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/common/plugins.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package common - -import ( - "sync" - - "github.com/pkg/errors" - "k8s.io/klog" -) - -var ( - providersMutex sync.Mutex - providers = make(map[string]interface{}) -) - -// RegisterClusterProvisioner registers a ClusterProvisioner by name. This -// is expected to happen during app startup. -func RegisterClusterProvisioner(name string, provisioner interface{}) { - providersMutex.Lock() - defer providersMutex.Unlock() - if _, found := providers[name]; found { - klog.Fatalf("Cluster provisioner %q was registered twice", name) - } - klog.V(1).Infof("Registered cluster provisioner %q", name) - providers[name] = provisioner -} - -func ClusterProvisioner(name string) (interface{}, error) { - providersMutex.Lock() - defer providersMutex.Unlock() - provisioner, found := providers[name] - if !found { - return nil, errors.Errorf("unable to find provisioner for %s", name) - } - return provisioner, nil -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/BUILD.bazel deleted file mode 100644 index 1e5c0f98329a..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,52 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "cluster_types.go", - "common_types.go", - "defaults.go", - "doc.go", - "machine_types.go", - "machineclass_types.go", - "machinedeployment_types.go", - "machineset_types.go", - "register.go", - "zz_generated.deepcopy.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/common:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/runtime/scheme:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "cluster_types_test.go", - "machine_types_test.go", - "machinedeployment_types_test.go", - "machineset_types_test.go", - "v1alpha1_suite_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/envtest:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/cluster_types.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/cluster_types.go deleted file mode 100644 index dae8b49cebe5..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/cluster_types.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - "github.com/openshift/cluster-api/pkg/apis/cluster/common" - "k8s.io/apimachinery/pkg/util/validation/field" -) - -const ClusterFinalizer = "cluster.cluster.k8s.io" - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -/// [Cluster] -// Cluster is the Schema for the clusters API -// +k8s:openapi-gen=true -// +kubebuilder:subresource:status -type Cluster struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec ClusterSpec `json:"spec,omitempty"` - Status ClusterStatus `json:"status,omitempty"` -} - -/// [Cluster] - -/// [ClusterSpec] -// ClusterSpec defines the desired state of Cluster -type ClusterSpec struct { - // Cluster network configuration - ClusterNetwork ClusterNetworkingConfig `json:"clusterNetwork"` - - // Provider-specific serialized configuration to use during - // cluster creation. It is recommended that providers maintain - // their own versioned API types that should be - // serialized/deserialized from this field. - // +optional - ProviderSpec ProviderSpec `json:"providerSpec,omitempty"` -} - -/// [ClusterSpec] - -/// [ClusterNetworkingConfig] -// ClusterNetworkingConfig specifies the different networking -// parameters for a cluster. -type ClusterNetworkingConfig struct { - // The network ranges from which service VIPs are allocated. - Services NetworkRanges `json:"services"` - - // The network ranges from which Pod networks are allocated. - Pods NetworkRanges `json:"pods"` - - // Domain name for services. - ServiceDomain string `json:"serviceDomain"` -} - -/// [ClusterNetworkingConfig] - -/// [NetworkRanges] -// NetworkRanges represents ranges of network addresses. -type NetworkRanges struct { - CIDRBlocks []string `json:"cidrBlocks"` -} - -/// [NetworkRanges] - -/// [ClusterStatus] -// ClusterStatus defines the observed state of Cluster -type ClusterStatus struct { - // APIEndpoint represents the endpoint to communicate with the IP. - // +optional - APIEndpoints []APIEndpoint `json:"apiEndpoints,omitempty"` - - // NB: Eventually we will redefine ErrorReason as ClusterStatusError once the - // following issue is fixed. - // https://github.com/kubernetes-incubator/apiserver-builder/issues/176 - - // If set, indicates that there is a problem reconciling the - // state, and will be set to a token value suitable for - // programmatic interpretation. - // +optional - ErrorReason common.ClusterStatusError `json:"errorReason,omitempty"` - - // If set, indicates that there is a problem reconciling the - // state, and will be set to a descriptive error message. - // +optional - ErrorMessage string `json:"errorMessage,omitempty"` - - // Provider-specific status. - // It is recommended that providers maintain their - // own versioned API types that should be - // serialized/deserialized from this field. - // +optional - ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"` -} - -/// [ClusterStatus] - -/// [APIEndpoint] -// APIEndpoint represents a reachable Kubernetes API endpoint. -type APIEndpoint struct { - // The hostname on which the API server is serving. - Host string `json:"host"` - - // The port on which the API server is serving. - Port int `json:"port"` -} - -/// [APIEndpoint] - -func (o *Cluster) Validate() field.ErrorList { - errors := field.ErrorList{} - // perform validation here and add to errors using field.Invalid - if o.Spec.ClusterNetwork.ServiceDomain == "" { - errors = append(errors, field.Invalid( - field.NewPath("Spec", "ClusterNetwork", "ServiceDomain"), - o.Spec.ClusterNetwork.ServiceDomain, - "invalid cluster configuration: missing Cluster.Spec.ClusterNetwork.ServiceDomain")) - } - if len(o.Spec.ClusterNetwork.Pods.CIDRBlocks) == 0 { - errors = append(errors, field.Invalid( - field.NewPath("Spec", "ClusterNetwork", "Pods"), - o.Spec.ClusterNetwork.Pods, - "invalid cluster configuration: missing Cluster.Spec.ClusterNetwork.Pods")) - } - if len(o.Spec.ClusterNetwork.Services.CIDRBlocks) == 0 { - errors = append(errors, field.Invalid( - field.NewPath("Spec", "ClusterNetwork", "Services"), - o.Spec.ClusterNetwork.Services, - "invalid cluster configuration: missing Cluster.Spec.ClusterNetwork.Services")) - } - return errors -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterList contains a list of Cluster -type ClusterList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Cluster `json:"items"` -} - -func init() { - SchemeBuilder.Register(&Cluster{}, &ClusterList{}) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/common_types.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/common_types.go deleted file mode 100644 index 7a9c647fe296..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/common_types.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// ProviderSpec defines the configuration to use during node creation. -type ProviderSpec struct { - - // No more than one of the following may be specified. - - // Value is an inlined, serialized representation of the resource - // configuration. It is recommended that providers maintain their own - // versioned API types that should be serialized/deserialized from this - // field, akin to component config. - // +optional - Value *runtime.RawExtension `json:"value,omitempty"` - - // Source for the provider configuration. Cannot be used if value is - // not empty. - // +optional - ValueFrom *ProviderSpecSource `json:"valueFrom,omitempty"` -} - -// ProviderSpecSource represents a source for the provider-specific -// resource configuration. -type ProviderSpecSource struct { - // The machine class from which the provider config should be sourced. - // +optional - MachineClass *MachineClassRef `json:"machineClass,omitempty"` -} - -// MachineClassRef is a reference to the MachineClass object. Controllers should find the right MachineClass using this reference. -type MachineClassRef struct { - // +optional - *corev1.ObjectReference `json:",inline"` - - // Provider is the name of the cloud-provider which MachineClass is intended for. - // +optional - Provider string `json:"provider,omitempty"` -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/defaults.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/defaults.go deleted file mode 100644 index a64e64ab375b..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/defaults.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/openshift/cluster-api/pkg/apis/cluster/common" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// PopulateDefaultsMachineDeployment fills in default field values -// Currently it is called after reading objects, but it could be called in an admission webhook also -func PopulateDefaultsMachineDeployment(d *MachineDeployment) { - if d.Spec.Replicas == nil { - d.Spec.Replicas = new(int32) - *d.Spec.Replicas = 1 - } - - if d.Spec.MinReadySeconds == nil { - d.Spec.MinReadySeconds = new(int32) - *d.Spec.MinReadySeconds = 0 - } - - if d.Spec.RevisionHistoryLimit == nil { - d.Spec.RevisionHistoryLimit = new(int32) - *d.Spec.RevisionHistoryLimit = 1 - } - - if d.Spec.ProgressDeadlineSeconds == nil { - d.Spec.ProgressDeadlineSeconds = new(int32) - *d.Spec.ProgressDeadlineSeconds = 600 - } - - if d.Spec.Strategy == nil { - d.Spec.Strategy = &MachineDeploymentStrategy{} - } - - if d.Spec.Strategy.Type == "" { - d.Spec.Strategy.Type = common.RollingUpdateMachineDeploymentStrategyType - } - - // Default RollingUpdate strategy only if strategy type is RollingUpdate. - if d.Spec.Strategy.Type == common.RollingUpdateMachineDeploymentStrategyType { - if d.Spec.Strategy.RollingUpdate == nil { - d.Spec.Strategy.RollingUpdate = &MachineRollingUpdateDeployment{} - } - if d.Spec.Strategy.RollingUpdate.MaxSurge == nil { - ios1 := intstr.FromInt(1) - d.Spec.Strategy.RollingUpdate.MaxSurge = &ios1 - } - if d.Spec.Strategy.RollingUpdate.MaxUnavailable == nil { - ios0 := intstr.FromInt(0) - d.Spec.Strategy.RollingUpdate.MaxUnavailable = &ios0 - } - } - - if len(d.Namespace) == 0 { - d.Namespace = metav1.NamespaceDefault - } -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/doc.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/doc.go deleted file mode 100644 index 269c574e93fe..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1alpha1 contains API Schema definitions for the cluster v1alpha1 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/openshift/cluster-api/pkg/apis/cluster -// +k8s:defaulter-gen=TypeMeta -// +groupName=cluster.k8s.io -package v1alpha1 diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machine_types.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machine_types.go deleted file mode 100644 index 6c6cc4a3c25c..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machine_types.go +++ /dev/null @@ -1,227 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - "github.com/openshift/cluster-api/pkg/apis/cluster/common" -) - -// Finalizer is set on PrepareForCreate callback -const MachineFinalizer = "machine.cluster.k8s.io" - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -/// [Machine] -// Machine is the Schema for the machines API -// +k8s:openapi-gen=true -// +kubebuilder:subresource:status -type Machine struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec MachineSpec `json:"spec,omitempty"` - Status MachineStatus `json:"status,omitempty"` -} - -/// [Machine] - -/// [MachineSpec] -// MachineSpec defines the desired state of Machine -type MachineSpec struct { - // ObjectMeta will autopopulate the Node created. Use this to - // indicate what labels, annotations, name prefix, etc., should be used - // when creating the Node. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Taints is the full, authoritative list of taints to apply to the corresponding - // Node. This list will overwrite any modifications made to the Node on - // an ongoing basis. - // +optional - Taints []corev1.Taint `json:"taints,omitempty"` - - // ProviderSpec details Provider-specific configuration to use during node creation. - // +optional - ProviderSpec ProviderSpec `json:"providerSpec"` - - // Versions of key software to use. This field is optional at cluster - // creation time, and omitting the field indicates that the cluster - // installation tool should select defaults for the user. These - // defaults may differ based on the cluster installer, but the tool - // should populate the values it uses when persisting Machine objects. - // A Machine spec missing this field at runtime is invalid. - // +optional - Versions MachineVersionInfo `json:"versions,omitempty"` - - // ConfigSource is used to populate in the associated Node for dynamic kubelet config. This - // field already exists in Node, so any updates to it in the Machine - // spec will be automatically copied to the linked NodeRef from the - // status. The rest of dynamic kubelet config support should then work - // as-is. - // +optional - ConfigSource *corev1.NodeConfigSource `json:"configSource,omitempty"` -} - -/// [MachineSpec] - -/// [MachineStatus] -// MachineStatus defines the observed state of Machine -type MachineStatus struct { - // NodeRef will point to the corresponding Node if it exists. - // +optional - NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"` - - // LastUpdated identifies when this status was last observed. - // +optional - LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` - - // Versions specifies the current versions of software on the corresponding Node (if it - // exists). This is provided for a few reasons: - // - // 1) It is more convenient than checking the NodeRef, traversing it to - // the Node, and finding the appropriate field in Node.Status.NodeInfo - // (which uses different field names and formatting). - // 2) It removes some of the dependency on the structure of the Node, - // so that if the structure of Node.Status.NodeInfo changes, only - // machine controllers need to be updated, rather than every client - // of the Machines API. - // 3) There is no other simple way to check the control plane - // version. A client would have to connect directly to the apiserver - // running on the target node in order to find out its version. - // +optional - Versions *MachineVersionInfo `json:"versions,omitempty"` - - // ErrorReason will be set in the event that there is a terminal problem - // reconciling the Machine and will contain a succinct value suitable - // for machine interpretation. - // - // This field should not be set for transitive errors that a controller - // faces that are expected to be fixed automatically over - // time (like service outages), but instead indicate that something is - // fundamentally wrong with the Machine's spec or the configuration of - // the controller, and that manual intervention is required. Examples - // of terminal errors would be invalid combinations of settings in the - // spec, values that are unsupported by the controller, or the - // responsible controller itself being critically misconfigured. - // - // Any transient errors that occur during the reconciliation of Machines - // can be added as events to the Machine object and/or logged in the - // controller's output. - // +optional - ErrorReason *common.MachineStatusError `json:"errorReason,omitempty"` - - // ErrorMessage will be set in the event that there is a terminal problem - // reconciling the Machine and will contain a more verbose string suitable - // for logging and human consumption. - // - // This field should not be set for transitive errors that a controller - // faces that are expected to be fixed automatically over - // time (like service outages), but instead indicate that something is - // fundamentally wrong with the Machine's spec or the configuration of - // the controller, and that manual intervention is required. Examples - // of terminal errors would be invalid combinations of settings in the - // spec, values that are unsupported by the controller, or the - // responsible controller itself being critically misconfigured. - // - // Any transient errors that occur during the reconciliation of Machines - // can be added as events to the Machine object and/or logged in the - // controller's output. - // +optional - ErrorMessage *string `json:"errorMessage,omitempty"` - - // ProviderStatus details a Provider-specific status. - // It is recommended that providers maintain their - // own versioned API types that should be - // serialized/deserialized from this field. - // +optional - ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"` - - // Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available. - // +optional - Addresses []corev1.NodeAddress `json:"addresses,omitempty"` - - // Conditions lists the conditions synced from the node conditions of the corresponding node-object. - // Machine-controller is responsible for keeping conditions up-to-date. - // MachineSet controller will be taking these conditions as a signal to decide if - // machine is healthy or needs to be replaced. - // Refer: https://kubernetes.io/docs/concepts/architecture/nodes/#condition - // +optional - Conditions []corev1.NodeCondition `json:"conditions,omitempty"` - - // LastOperation describes the last-operation performed by the machine-controller. - // This API should be useful as a history in terms of the latest operation performed on the - // specific machine. It should also convey the state of the latest-operation for example if - // it is still on-going, failed or completed successfully. - // +optional - LastOperation *LastOperation `json:"lastOperation,omitempty"` - - // Phase represents the current phase of machine actuation. - // E.g. Pending, Running, Terminating, Failed etc. - // +optional - Phase *string `json:"phase,omitempty"` -} - -// LastOperation represents the detail of the last performed operation on the MachineObject. -type LastOperation struct { - // Description is the human-readable description of the last operation. - Description *string `json:"description,omitempty"` - - // LastUpdated is the timestamp at which LastOperation API was last-updated. - LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` - - // State is the current status of the last performed operation. - // E.g. Processing, Failed, Successful etc - State *string `json:"state,omitempty"` - - // Type is the type of operation which was last performed. - // E.g. Create, Delete, Update etc - Type *string `json:"type,omitempty"` -} - -/// [MachineStatus] - -/// [MachineVersionInfo] -type MachineVersionInfo struct { - // Kubelet is the semantic version of kubelet to run - Kubelet string `json:"kubelet"` - - // ControlPlane is the semantic version of the Kubernetes control plane to - // run. This should only be populated when the machine is a - // control plane. - // +optional - ControlPlane string `json:"controlPlane,omitempty"` -} - -/// [MachineVersionInfo] - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// MachineList contains a list of Machine -type MachineList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Machine `json:"items"` -} - -func init() { - SchemeBuilder.Register(&Machine{}, &MachineList{}) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineclass_types.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineclass_types.go deleted file mode 100644 index 7844f6cfa463..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineclass_types.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -/// [MachineClass] -// MachineClass can be used to templatize and re-use provider configuration -// across multiple Machines / MachineSets / MachineDeployments. -// +k8s:openapi-gen=true -// +resource:path=machineclasses -type MachineClass struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // The total capacity available on this machine type (cpu/memory/disk). - // - // WARNING: It is up to the creator of the MachineClass to ensure that - // this field is consistent with the underlying machine that will - // be provisioned when this class is used, to inform higher level - // automation (e.g. the cluster autoscaler). - // TODO(hardikdr) Add allocatable field once requirements are clear from autoscaler-clusterapi // integration topic. - // Capacity corev1.ResourceList `json:"capacity"` - - // How much capacity is actually allocatable on this machine. - // Must be equal to or less than the capacity, and when less - // indicates the resources reserved for system overhead. - // - // WARNING: It is up to the creator of the MachineClass to ensure that - // this field is consistent with the underlying machine that will - // be provisioned when this class is used, to inform higher level - // automation (e.g. the cluster autoscaler). - // TODO(hardikdr) Add allocatable field once requirements are clear from autoscaler-clusterapi // integration topic. - // Allocatable corev1.ResourceList `json:"allocatable"` - - // Provider-specific configuration to use during node creation. - ProviderSpec runtime.RawExtension `json:"providerSpec"` - - // TODO: should this use an api.ObjectReference to a 'MachineTemplate' instead? - // A link to the MachineTemplate that will be used to create provider - // specific configuration for Machines of this class. - // MachineTemplate corev1.ObjectReference `json:machineTemplate` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// MachineClassList contains a list of MachineClasses -type MachineClassList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []MachineClass `json:"items"` -} - -func init() { - SchemeBuilder.Register(&MachineClass{}, &MachineClassList{}) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machinedeployment_types.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machinedeployment_types.go deleted file mode 100644 index be0cc25ff6d7..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machinedeployment_types.go +++ /dev/null @@ -1,194 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - - "github.com/openshift/cluster-api/pkg/apis/cluster/common" -) - -/// [MachineDeploymentSpec] -// MachineDeploymentSpec defines the desired state of MachineDeployment -type MachineDeploymentSpec struct { - // Number of desired machines. Defaults to 1. - // This is a pointer to distinguish between explicit zero and not specified. - Replicas *int32 `json:"replicas,omitempty"` - - // Label selector for machines. Existing MachineSets whose machines are - // selected by this will be the ones affected by this deployment. - // It must match the machine template's labels. - Selector metav1.LabelSelector `json:"selector"` - - // Template describes the machines that will be created. - Template MachineTemplateSpec `json:"template"` - - // The deployment strategy to use to replace existing machines with - // new ones. - // +optional - Strategy *MachineDeploymentStrategy `json:"strategy,omitempty"` - - // Minimum number of seconds for which a newly created machine should - // be ready. - // Defaults to 0 (machine will be considered available as soon as it - // is ready) - // +optional - MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` - - // The number of old MachineSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 1. - // +optional - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` - - // Indicates that the deployment is paused. - // +optional - Paused bool `json:"paused,omitempty"` - - // The maximum time in seconds for a deployment to make progress before it - // is considered to be failed. The deployment controller will continue to - // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Note that progress will - // not be estimated during the time a deployment is paused. Defaults to 600s. - ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` -} - -/// [MachineDeploymentSpec] - -/// [MachineDeploymentStrategy] -// MachineDeploymentStrategy describes how to replace existing machines -// with new ones. -type MachineDeploymentStrategy struct { - // Type of deployment. Currently the only supported strategy is - // "RollingUpdate". - // Default is RollingUpdate. - // +optional - Type common.MachineDeploymentStrategyType `json:"type,omitempty"` - - // Rolling update config params. Present only if - // MachineDeploymentStrategyType = RollingUpdate. - // +optional - RollingUpdate *MachineRollingUpdateDeployment `json:"rollingUpdate,omitempty"` -} - -/// [MachineDeploymentStrategy] - -/// [MachineRollingUpdateDeployment] -// Spec to control the desired behavior of rolling update. -type MachineRollingUpdateDeployment struct { - // The maximum number of machines that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired - // machines (ex: 10%). - // Absolute number is calculated from percentage by rounding down. - // This can not be 0 if MaxSurge is 0. - // Defaults to 0. - // Example: when this is set to 30%, the old MachineSet can be scaled - // down to 70% of desired machines immediately when the rolling update - // starts. Once new machines are ready, old MachineSet can be scaled - // down further, followed by scaling up the new MachineSet, ensuring - // that the total number of machines available at all times - // during the update is at least 70% of desired machines. - // +optional - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` - - // The maximum number of machines that can be scheduled above the - // desired number of machines. - // Value can be an absolute number (ex: 5) or a percentage of - // desired machines (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // Defaults to 1. - // Example: when this is set to 30%, the new MachineSet can be scaled - // up immediately when the rolling update starts, such that the total - // number of old and new machines do not exceed 130% of desired - // machines. Once old machines have been killed, new MachineSet can - // be scaled up further, ensuring that total number of machines running - // at any time during the update is at most 130% of desired machines. - // +optional - MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` -} - -/// [MachineRollingUpdateDeployment] - -/// [MachineDeploymentStatus] -// MachineDeploymentStatus defines the observed state of MachineDeployment -type MachineDeploymentStatus struct { - // The generation observed by the deployment controller. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // Total number of non-terminated machines targeted by this deployment - // (their labels match the selector). - // +optional - Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - - // Total number of non-terminated machines targeted by this deployment - // that have the desired template spec. - // +optional - UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - - // Total number of ready machines targeted by this deployment. - // +optional - ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` - - // Total number of available machines (ready for at least minReadySeconds) - // targeted by this deployment. - // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` - - // Total number of unavailable machines targeted by this deployment. - // This is the total number of machines that are still required for - // the deployment to have 100% available capacity. They may either - // be machines that are running but not yet available or machines - // that still have not been created. - // +optional - UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` -} - -/// [MachineDeploymentStatus] - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -/// [MachineDeployment] -// MachineDeployment is the Schema for the machinedeployments API -// +k8s:openapi-gen=true -// +kubebuilder:subresource:status -// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector -type MachineDeployment struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec MachineDeploymentSpec `json:"spec,omitempty"` - Status MachineDeploymentStatus `json:"status,omitempty"` -} - -/// [MachineDeployment] - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// MachineDeploymentList contains a list of MachineDeployment -type MachineDeploymentList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []MachineDeployment `json:"items"` -} - -func init() { - SchemeBuilder.Register(&MachineDeployment{}, &MachineDeploymentList{}) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types.go deleted file mode 100644 index 95644395acee..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types.go +++ /dev/null @@ -1,187 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "log" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" - - "github.com/openshift/cluster-api/pkg/apis/cluster/common" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/validation/field" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -/// [MachineSet] -// MachineSet ensures that a specified number of machines replicas are running at any given time. -// +k8s:openapi-gen=true -// +kubebuilder:subresource:status -// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector -type MachineSet struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec MachineSetSpec `json:"spec,omitempty"` - Status MachineSetStatus `json:"status,omitempty"` -} - -/// [MachineSet] - -/// [MachineSetSpec] -// MachineSetSpec defines the desired state of MachineSet -type MachineSetSpec struct { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // +optional - Replicas *int32 `json:"replicas,omitempty"` - - // MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. - // Defaults to 0 (machine will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty"` - - // Selector is a label query over machines that should match the replica count. - // Label keys and values that must match in order to be controlled by this MachineSet. - // It must match the machine template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - Selector metav1.LabelSelector `json:"selector"` - - // Template is the object that describes the machine that will be created if - // insufficient replicas are detected. - // +optional - Template MachineTemplateSpec `json:"template,omitempty"` -} - -/// [MachineSetSpec] // doxygen marker - -/// [MachineTemplateSpec] // doxygen marker -// MachineTemplateSpec describes the data needed to create a Machine from a template -type MachineTemplateSpec struct { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Specification of the desired behavior of the machine. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec MachineSpec `json:"spec,omitempty"` -} - -/// [MachineTemplateSpec] - -/// [MachineSetStatus] -// MachineSetStatus defines the observed state of MachineSet -type MachineSetStatus struct { - // Replicas is the most recently observed number of replicas. - Replicas int32 `json:"replicas"` - - // The number of replicas that have labels matching the labels of the machine template of the MachineSet. - // +optional - FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` - - // The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is "Ready". - // +optional - ReadyReplicas int32 `json:"readyReplicas,omitempty"` - - // The number of available replicas (ready for at least minReadySeconds) for this MachineSet. - // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty"` - - // ObservedGeneration reflects the generation of the most recently observed MachineSet. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` - - // In the event that there is a terminal problem reconciling the - // replicas, both ErrorReason and ErrorMessage will be set. ErrorReason - // will be populated with a succinct value suitable for machine - // interpretation, while ErrorMessage will contain a more verbose - // string suitable for logging and human consumption. - // - // These fields should not be set for transitive errors that a - // controller faces that are expected to be fixed automatically over - // time (like service outages), but instead indicate that something is - // fundamentally wrong with the MachineTemplate's spec or the configuration of - // the machine controller, and that manual intervention is required. Examples - // of terminal errors would be invalid combinations of settings in the - // spec, values that are unsupported by the machine controller, or the - // responsible machine controller itself being critically misconfigured. - // - // Any transient errors that occur during the reconciliation of Machines - // can be added as events to the MachineSet object and/or logged in the - // controller's output. - // +optional - ErrorReason *common.MachineSetStatusError `json:"errorReason,omitempty"` - // +optional - ErrorMessage *string `json:"errorMessage,omitempty"` -} - -/// [MachineSetStatus] - -func (machineSet *MachineSet) Validate() field.ErrorList { - errors := field.ErrorList{} - - // validate spec.selector and spec.template.labels - fldPath := field.NewPath("spec") - errors = append(errors, metav1validation.ValidateLabelSelector(&machineSet.Spec.Selector, fldPath.Child("selector"))...) - if len(machineSet.Spec.Selector.MatchLabels)+len(machineSet.Spec.Selector.MatchExpressions) == 0 { - errors = append(errors, field.Invalid(fldPath.Child("selector"), machineSet.Spec.Selector, "empty selector is not valid for MachineSet.")) - } - selector, err := metav1.LabelSelectorAsSelector(&machineSet.Spec.Selector) - if err != nil { - errors = append(errors, field.Invalid(fldPath.Child("selector"), machineSet.Spec.Selector, "invalid label selector.")) - } else { - labels := labels.Set(machineSet.Spec.Template.Labels) - if !selector.Matches(labels) { - errors = append(errors, field.Invalid(fldPath.Child("template", "metadata", "labels"), machineSet.Spec.Template.Labels, "`selector` does not match template `labels`")) - } - } - - return errors -} - -// DefaultingFunction sets default MachineSet field values -func (obj *MachineSet) Default() { - log.Printf("Defaulting fields for MachineSet %s\n", obj.Name) - - if obj.Spec.Replicas == nil { - obj.Spec.Replicas = new(int32) - *obj.Spec.Replicas = 1 - } - - if len(obj.Namespace) == 0 { - obj.Namespace = metav1.NamespaceDefault - } -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// MachineSetList contains a list of MachineSet -type MachineSetList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []MachineSet `json:"items"` -} - -func init() { - SchemeBuilder.Register(&MachineSet{}, &MachineSetList{}) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/register.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/register.go deleted file mode 100644 index 0b2c132c58b3..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/register.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// NOTE: Boilerplate only. Ignore this file. - -// Package v1alpha1 contains API Schema definitions for the cluster v1alpha1 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/openshift/cluster-api/pkg/apis/cluster -// +k8s:defaulter-gen=TypeMeta -// +groupName=cluster.k8s.io -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" -) - -var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "cluster.k8s.io", Version: "v1alpha1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - - // Required by pkg/client/... - // TODO(pwittrock): Remove this after removing pkg/client/... - AddToScheme = SchemeBuilder.AddToScheme -) - -// Required by pkg/client/listers/... -// TODO(pwittrock): Remove this after removing pkg/client/... -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index fd8e1f63a9c9..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,821 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by main. DO NOT EDIT. - -package v1alpha1 - -import ( - common "github.com/openshift/cluster-api/pkg/apis/cluster/common" - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIEndpoint) DeepCopyInto(out *APIEndpoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIEndpoint. -func (in *APIEndpoint) DeepCopy() *APIEndpoint { - if in == nil { - return nil - } - out := new(APIEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Cluster) DeepCopyInto(out *Cluster) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. -func (in *Cluster) DeepCopy() *Cluster { - if in == nil { - return nil - } - out := new(Cluster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Cluster) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterList) DeepCopyInto(out *ClusterList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Cluster, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. -func (in *ClusterList) DeepCopy() *ClusterList { - if in == nil { - return nil - } - out := new(ClusterList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterNetworkingConfig) DeepCopyInto(out *ClusterNetworkingConfig) { - *out = *in - in.Services.DeepCopyInto(&out.Services) - in.Pods.DeepCopyInto(&out.Pods) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkingConfig. -func (in *ClusterNetworkingConfig) DeepCopy() *ClusterNetworkingConfig { - if in == nil { - return nil - } - out := new(ClusterNetworkingConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { - *out = *in - in.ClusterNetwork.DeepCopyInto(&out.ClusterNetwork) - in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. -func (in *ClusterSpec) DeepCopy() *ClusterSpec { - if in == nil { - return nil - } - out := new(ClusterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { - *out = *in - if in.APIEndpoints != nil { - in, out := &in.APIEndpoints, &out.APIEndpoints - *out = make([]APIEndpoint, len(*in)) - copy(*out, *in) - } - if in.ProviderStatus != nil { - in, out := &in.ProviderStatus, &out.ProviderStatus - *out = new(runtime.RawExtension) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. -func (in *ClusterStatus) DeepCopy() *ClusterStatus { - if in == nil { - return nil - } - out := new(ClusterStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LastOperation) DeepCopyInto(out *LastOperation) { - *out = *in - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in - } - if in.LastUpdated != nil { - in, out := &in.LastUpdated, &out.LastUpdated - *out = (*in).DeepCopy() - } - if in.State != nil { - in, out := &in.State, &out.State - *out = new(string) - **out = **in - } - if in.Type != nil { - in, out := &in.Type, &out.Type - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation. -func (in *LastOperation) DeepCopy() *LastOperation { - if in == nil { - return nil - } - out := new(LastOperation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Machine) DeepCopyInto(out *Machine) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. -func (in *Machine) DeepCopy() *Machine { - if in == nil { - return nil - } - out := new(Machine) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Machine) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineClass) DeepCopyInto(out *MachineClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineClass. -func (in *MachineClass) DeepCopy() *MachineClass { - if in == nil { - return nil - } - out := new(MachineClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineClassList) DeepCopyInto(out *MachineClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]MachineClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineClassList. -func (in *MachineClassList) DeepCopy() *MachineClassList { - if in == nil { - return nil - } - out := new(MachineClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineClassRef) DeepCopyInto(out *MachineClassRef) { - *out = *in - if in.ObjectReference != nil { - in, out := &in.ObjectReference, &out.ObjectReference - *out = new(v1.ObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineClassRef. -func (in *MachineClassRef) DeepCopy() *MachineClassRef { - if in == nil { - return nil - } - out := new(MachineClassRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineDeployment) DeepCopyInto(out *MachineDeployment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeployment. -func (in *MachineDeployment) DeepCopy() *MachineDeployment { - if in == nil { - return nil - } - out := new(MachineDeployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineDeployment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineDeploymentList) DeepCopyInto(out *MachineDeploymentList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]MachineDeployment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentList. -func (in *MachineDeploymentList) DeepCopy() *MachineDeploymentList { - if in == nil { - return nil - } - out := new(MachineDeploymentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineDeploymentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineDeploymentSpec) DeepCopyInto(out *MachineDeploymentSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - in.Selector.DeepCopyInto(&out.Selector) - in.Template.DeepCopyInto(&out.Template) - if in.Strategy != nil { - in, out := &in.Strategy, &out.Strategy - *out = new(MachineDeploymentStrategy) - (*in).DeepCopyInto(*out) - } - if in.MinReadySeconds != nil { - in, out := &in.MinReadySeconds, &out.MinReadySeconds - *out = new(int32) - **out = **in - } - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - if in.ProgressDeadlineSeconds != nil { - in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentSpec. -func (in *MachineDeploymentSpec) DeepCopy() *MachineDeploymentSpec { - if in == nil { - return nil - } - out := new(MachineDeploymentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineDeploymentStatus) DeepCopyInto(out *MachineDeploymentStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentStatus. -func (in *MachineDeploymentStatus) DeepCopy() *MachineDeploymentStatus { - if in == nil { - return nil - } - out := new(MachineDeploymentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineDeploymentStrategy) DeepCopyInto(out *MachineDeploymentStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(MachineRollingUpdateDeployment) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentStrategy. -func (in *MachineDeploymentStrategy) DeepCopy() *MachineDeploymentStrategy { - if in == nil { - return nil - } - out := new(MachineDeploymentStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineList) DeepCopyInto(out *MachineList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Machine, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineList. -func (in *MachineList) DeepCopy() *MachineList { - if in == nil { - return nil - } - out := new(MachineList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineRollingUpdateDeployment) DeepCopyInto(out *MachineRollingUpdateDeployment) { - *out = *in - if in.MaxUnavailable != nil { - in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in - } - if in.MaxSurge != nil { - in, out := &in.MaxSurge, &out.MaxSurge - *out = new(intstr.IntOrString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineRollingUpdateDeployment. -func (in *MachineRollingUpdateDeployment) DeepCopy() *MachineRollingUpdateDeployment { - if in == nil { - return nil - } - out := new(MachineRollingUpdateDeployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSet) DeepCopyInto(out *MachineSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSet. -func (in *MachineSet) DeepCopy() *MachineSet { - if in == nil { - return nil - } - out := new(MachineSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSetList) DeepCopyInto(out *MachineSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]MachineSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetList. -func (in *MachineSetList) DeepCopy() *MachineSetList { - if in == nil { - return nil - } - out := new(MachineSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSetSpec) DeepCopyInto(out *MachineSetSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - in.Selector.DeepCopyInto(&out.Selector) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetSpec. -func (in *MachineSetSpec) DeepCopy() *MachineSetSpec { - if in == nil { - return nil - } - out := new(MachineSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSetStatus) DeepCopyInto(out *MachineSetStatus) { - *out = *in - if in.ErrorReason != nil { - in, out := &in.ErrorReason, &out.ErrorReason - *out = new(common.MachineSetStatusError) - **out = **in - } - if in.ErrorMessage != nil { - in, out := &in.ErrorMessage, &out.ErrorMessage - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetStatus. -func (in *MachineSetStatus) DeepCopy() *MachineSetStatus { - if in == nil { - return nil - } - out := new(MachineSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Taints != nil { - in, out := &in.Taints, &out.Taints - *out = make([]v1.Taint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) - out.Versions = in.Versions - if in.ConfigSource != nil { - in, out := &in.ConfigSource, &out.ConfigSource - *out = new(v1.NodeConfigSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSpec. -func (in *MachineSpec) DeepCopy() *MachineSpec { - if in == nil { - return nil - } - out := new(MachineSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { - *out = *in - if in.NodeRef != nil { - in, out := &in.NodeRef, &out.NodeRef - *out = new(v1.ObjectReference) - **out = **in - } - if in.LastUpdated != nil { - in, out := &in.LastUpdated, &out.LastUpdated - *out = (*in).DeepCopy() - } - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = new(MachineVersionInfo) - **out = **in - } - if in.ErrorReason != nil { - in, out := &in.ErrorReason, &out.ErrorReason - *out = new(common.MachineStatusError) - **out = **in - } - if in.ErrorMessage != nil { - in, out := &in.ErrorMessage, &out.ErrorMessage - *out = new(string) - **out = **in - } - if in.ProviderStatus != nil { - in, out := &in.ProviderStatus, &out.ProviderStatus - *out = new(runtime.RawExtension) - (*in).DeepCopyInto(*out) - } - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]v1.NodeAddress, len(*in)) - copy(*out, *in) - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.NodeCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.LastOperation != nil { - in, out := &in.LastOperation, &out.LastOperation - *out = new(LastOperation) - (*in).DeepCopyInto(*out) - } - if in.Phase != nil { - in, out := &in.Phase, &out.Phase - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus. -func (in *MachineStatus) DeepCopy() *MachineStatus { - if in == nil { - return nil - } - out := new(MachineStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTemplateSpec. -func (in *MachineTemplateSpec) DeepCopy() *MachineTemplateSpec { - if in == nil { - return nil - } - out := new(MachineTemplateSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineVersionInfo) DeepCopyInto(out *MachineVersionInfo) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineVersionInfo. -func (in *MachineVersionInfo) DeepCopy() *MachineVersionInfo { - if in == nil { - return nil - } - out := new(MachineVersionInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkRanges) DeepCopyInto(out *NetworkRanges) { - *out = *in - if in.CIDRBlocks != nil { - in, out := &in.CIDRBlocks, &out.CIDRBlocks - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRanges. -func (in *NetworkRanges) DeepCopy() *NetworkRanges { - if in == nil { - return nil - } - out := new(NetworkRanges) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProviderSpec) DeepCopyInto(out *ProviderSpec) { - *out = *in - if in.Value != nil { - in, out := &in.Value, &out.Value - *out = new(runtime.RawExtension) - (*in).DeepCopyInto(*out) - } - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - *out = new(ProviderSpecSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpec. -func (in *ProviderSpec) DeepCopy() *ProviderSpec { - if in == nil { - return nil - } - out := new(ProviderSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProviderSpecSource) DeepCopyInto(out *ProviderSpecSource) { - *out = *in - if in.MachineClass != nil { - in, out := &in.MachineClass, &out.MachineClass - *out = new(MachineClassRef) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpecSource. -func (in *ProviderSpecSource) DeepCopy() *ProviderSpecSource { - if in == nil { - return nil - } - out := new(ProviderSpecSource) - in.DeepCopyInto(out) - return out -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/common/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/common/BUILD.bazel deleted file mode 100644 index 311c852d40f7..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/common/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "consts.go", - "plugins.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/apis/machine/common", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/pkg/errors:go_default_library", - "//vendor/k8s.io/klog:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/common/consts.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/common/consts.go deleted file mode 100644 index 287480250d57..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/common/consts.go +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package common - -// Constants aren't automatically generated for unversioned packages. -// Instead share the same constant for all versioned packages -type MachineStatusError string - -const ( - // Represents that the combination of configuration in the MachineSpec - // is not supported by this cluster. This is not a transient error, but - // indicates a state that must be fixed before progress can be made. - // - // Example: the ProviderSpec specifies an instance type that doesn't exist, - InvalidConfigurationMachineError MachineStatusError = "InvalidConfiguration" - - // This indicates that the MachineSpec has been updated in a way that - // is not supported for reconciliation on this cluster. The spec may be - // completely valid from a configuration standpoint, but the controller - // does not support changing the real world state to match the new - // spec. - // - // Example: the responsible controller is not capable of changing the - // container runtime from docker to rkt. - UnsupportedChangeMachineError MachineStatusError = "UnsupportedChange" - - // This generally refers to exceeding one's quota in a cloud provider, - // or running out of physical machines in an on-premise environment. - InsufficientResourcesMachineError MachineStatusError = "InsufficientResources" - - // There was an error while trying to create a Node to match this - // Machine. This may indicate a transient problem that will be fixed - // automatically with time, such as a service outage, or a terminal - // error during creation that doesn't match a more specific - // MachineStatusError value. - // - // Example: timeout trying to connect to GCE. - CreateMachineError MachineStatusError = "CreateError" - - // An error was encountered while trying to delete the Node that this - // Machine represents. This could be a transient or terminal error, but - // will only be observable if the provider's Machine controller has - // added a finalizer to the object to more gracefully handle deletions. - // - // Example: cannot resolve EC2 IP address. - DeleteMachineError MachineStatusError = "DeleteError" - - // This error indicates that the machine did not join the cluster - // as a new node within the expected timeframe after instance - // creation at the provider succeeded - // - // Example use case: A controller that deletes Machines which do - // not result in a Node joining the cluster within a given timeout - // and that are managed by a MachineSet - JoinClusterTimeoutMachineError = "JoinClusterTimeoutError" -) - -type ClusterStatusError string - -const ( - // InvalidConfigurationClusterError indicates that the cluster - // configuration is invalid. - InvalidConfigurationClusterError ClusterStatusError = "InvalidConfiguration" - - // UnsupportedChangeClusterError indicates that the cluster - // spec has been updated in an unsupported way. That cannot be - // reconciled. - UnsupportedChangeClusterError ClusterStatusError = "UnsupportedChange" - - // CreateClusterError indicates that an error was encountered - // when trying to create the cluster. - CreateClusterError ClusterStatusError = "CreateError" - - // UpdateClusterError indicates that an error was encountered - // when trying to update the cluster. - UpdateClusterError ClusterStatusError = "UpdateError" - - // DeleteClusterError indicates that an error was encountered - // when trying to delete the cluster. - DeleteClusterError ClusterStatusError = "DeleteError" -) - -type MachineSetStatusError string - -const ( - // Represents that the combination of configuration in the MachineTemplateSpec - // is not supported by this cluster. This is not a transient error, but - // indicates a state that must be fixed before progress can be made. - // - // Example: the ProviderSpec specifies an instance type that doesn't exist. - InvalidConfigurationMachineSetError MachineSetStatusError = "InvalidConfiguration" -) - -type MachineDeploymentStrategyType string - -const ( - // Replace the old MachineSet by new one using rolling update - // i.e. gradually scale down the old MachineSet and scale up the new one. - RollingUpdateMachineDeploymentStrategyType MachineDeploymentStrategyType = "RollingUpdate" -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/common/plugins.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/common/plugins.go deleted file mode 100644 index 8c2083d88c08..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/common/plugins.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package common - -import ( - "sync" - - "github.com/pkg/errors" - "k8s.io/klog" -) - -var ( - providersMutex sync.Mutex - providers = make(map[string]interface{}) -) - -// RegisterClusterProvisioner registers a ClusterProvisioner by name. This -// is expected to happen during app startup. -func RegisterClusterProvisioner(name string, provisioner interface{}) { - providersMutex.Lock() - defer providersMutex.Unlock() - if _, found := providers[name]; found { - klog.Fatalf("Cluster provisioner %q was registered twice", name) - } - klog.V(1).Infof("Registered cluster provisioner %q", name) - providers[name] = provisioner -} - -func ClusterProvisioner(name string) (interface{}, error) { - providersMutex.Lock() - defer providersMutex.Unlock() - provisioner, found := providers[name] - if !found { - return nil, errors.Errorf("unable to find provisioner for %s", name) - } - return provisioner, nil -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/BUILD.bazel deleted file mode 100644 index 3fc6c32d0d0e..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/BUILD.bazel +++ /dev/null @@ -1,52 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "cluster_types.go", - "common_types.go", - "defaults.go", - "doc.go", - "machine_types.go", - "machineclass_types.go", - "machinedeployment_types.go", - "machineset_types.go", - "register.go", - "zz_generated.deepcopy.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/machine/common:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/runtime/scheme:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "cluster_types_test.go", - "machine_types_test.go", - "machinedeployment_types_test.go", - "machineset_types_test.go", - "v1alpha1_suite_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/envtest:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/cluster_types.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/cluster_types.go deleted file mode 100644 index 134a63abb614..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/cluster_types.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - "github.com/openshift/cluster-api/pkg/apis/machine/common" - "k8s.io/apimachinery/pkg/util/validation/field" -) - -const ClusterFinalizer = "cluster.machine.openshift.io" - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -/// [Cluster] -// Cluster is the Schema for the clusters API -// +k8s:openapi-gen=true -// +kubebuilder:subresource:status -type Cluster struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec ClusterSpec `json:"spec,omitempty"` - Status ClusterStatus `json:"status,omitempty"` -} - -/// [Cluster] - -/// [ClusterSpec] -// ClusterSpec defines the desired state of Cluster -type ClusterSpec struct { - // Cluster network configuration - ClusterNetwork ClusterNetworkingConfig `json:"clusterNetwork"` - - // Provider-specific serialized configuration to use during - // cluster creation. It is recommended that providers maintain - // their own versioned API types that should be - // serialized/deserialized from this field. - // +optional - ProviderSpec ProviderSpec `json:"providerSpec,omitempty"` -} - -/// [ClusterSpec] - -/// [ClusterNetworkingConfig] -// ClusterNetworkingConfig specifies the different networking -// parameters for a cluster. -type ClusterNetworkingConfig struct { - // The network ranges from which service VIPs are allocated. - Services NetworkRanges `json:"services"` - - // The network ranges from which Pod networks are allocated. - Pods NetworkRanges `json:"pods"` - - // Domain name for services. - ServiceDomain string `json:"serviceDomain"` -} - -/// [ClusterNetworkingConfig] - -/// [NetworkRanges] -// NetworkRanges represents ranges of network addresses. -type NetworkRanges struct { - CIDRBlocks []string `json:"cidrBlocks"` -} - -/// [NetworkRanges] - -/// [ClusterStatus] -// ClusterStatus defines the observed state of Cluster -type ClusterStatus struct { - // APIEndpoint represents the endpoint to communicate with the IP. - // +optional - APIEndpoints []APIEndpoint `json:"apiEndpoints,omitempty"` - - // NB: Eventually we will redefine ErrorReason as ClusterStatusError once the - // following issue is fixed. - // https://github.com/kubernetes-incubator/apiserver-builder/issues/176 - - // If set, indicates that there is a problem reconciling the - // state, and will be set to a token value suitable for - // programmatic interpretation. - // +optional - ErrorReason common.ClusterStatusError `json:"errorReason,omitempty"` - - // If set, indicates that there is a problem reconciling the - // state, and will be set to a descriptive error message. - // +optional - ErrorMessage string `json:"errorMessage,omitempty"` - - // Provider-specific status. - // It is recommended that providers maintain their - // own versioned API types that should be - // serialized/deserialized from this field. - // +optional - ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"` -} - -/// [ClusterStatus] - -/// [APIEndpoint] -// APIEndpoint represents a reachable Kubernetes API endpoint. -type APIEndpoint struct { - // The hostname on which the API server is serving. - Host string `json:"host"` - - // The port on which the API server is serving. - Port int `json:"port"` -} - -/// [APIEndpoint] - -func (o *Cluster) Validate() field.ErrorList { - errors := field.ErrorList{} - // perform validation here and add to errors using field.Invalid - if o.Spec.ClusterNetwork.ServiceDomain == "" { - errors = append(errors, field.Invalid( - field.NewPath("Spec", "ClusterNetwork", "ServiceDomain"), - o.Spec.ClusterNetwork.ServiceDomain, - "invalid cluster configuration: missing Cluster.Spec.ClusterNetwork.ServiceDomain")) - } - if len(o.Spec.ClusterNetwork.Pods.CIDRBlocks) == 0 { - errors = append(errors, field.Invalid( - field.NewPath("Spec", "ClusterNetwork", "Pods"), - o.Spec.ClusterNetwork.Pods, - "invalid cluster configuration: missing Cluster.Spec.ClusterNetwork.Pods")) - } - if len(o.Spec.ClusterNetwork.Services.CIDRBlocks) == 0 { - errors = append(errors, field.Invalid( - field.NewPath("Spec", "ClusterNetwork", "Services"), - o.Spec.ClusterNetwork.Services, - "invalid cluster configuration: missing Cluster.Spec.ClusterNetwork.Services")) - } - return errors -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterList contains a list of Cluster -type ClusterList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Cluster `json:"items"` -} - -func init() { - SchemeBuilder.Register(&Cluster{}, &ClusterList{}) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/common_types.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/common_types.go deleted file mode 100644 index 7884754101dd..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/common_types.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// ProviderSpec defines the configuration to use during node creation. -type ProviderSpec struct { - - // No more than one of the following may be specified. - - // Value is an inlined, serialized representation of the resource - // configuration. It is recommended that providers maintain their own - // versioned API types that should be serialized/deserialized from this - // field, akin to component config. - // +optional - Value *runtime.RawExtension `json:"value,omitempty"` - - // Source for the provider configuration. Cannot be used if value is - // not empty. - // +optional - ValueFrom *ProviderSpecSource `json:"valueFrom,omitempty"` -} - -// ProviderSpecSource represents a source for the provider-specific -// resource configuration. -type ProviderSpecSource struct { - // The machine class from which the provider config should be sourced. - // +optional - MachineClass *MachineClassRef `json:"machineClass,omitempty"` -} - -// MachineClassRef is a reference to the MachineClass object. Controllers should find the right MachineClass using this reference. -type MachineClassRef struct { - // +optional - *corev1.ObjectReference `json:",inline"` - - // Provider is the name of the cloud-provider which MachineClass is intended for. - // +optional - Provider string `json:"provider,omitempty"` -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/defaults.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/defaults.go deleted file mode 100644 index 354cfa3f6ea0..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/defaults.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "github.com/openshift/cluster-api/pkg/apis/machine/common" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// PopulateDefaultsMachineDeployment fills in default field values -// Currently it is called after reading objects, but it could be called in an admission webhook also -func PopulateDefaultsMachineDeployment(d *MachineDeployment) { - if d.Spec.Replicas == nil { - d.Spec.Replicas = new(int32) - *d.Spec.Replicas = 1 - } - - if d.Spec.MinReadySeconds == nil { - d.Spec.MinReadySeconds = new(int32) - *d.Spec.MinReadySeconds = 0 - } - - if d.Spec.RevisionHistoryLimit == nil { - d.Spec.RevisionHistoryLimit = new(int32) - *d.Spec.RevisionHistoryLimit = 1 - } - - if d.Spec.ProgressDeadlineSeconds == nil { - d.Spec.ProgressDeadlineSeconds = new(int32) - *d.Spec.ProgressDeadlineSeconds = 600 - } - - if d.Spec.Strategy == nil { - d.Spec.Strategy = &MachineDeploymentStrategy{} - } - - if d.Spec.Strategy.Type == "" { - d.Spec.Strategy.Type = common.RollingUpdateMachineDeploymentStrategyType - } - - // Default RollingUpdate strategy only if strategy type is RollingUpdate. - if d.Spec.Strategy.Type == common.RollingUpdateMachineDeploymentStrategyType { - if d.Spec.Strategy.RollingUpdate == nil { - d.Spec.Strategy.RollingUpdate = &MachineRollingUpdateDeployment{} - } - if d.Spec.Strategy.RollingUpdate.MaxSurge == nil { - ios1 := intstr.FromInt(1) - d.Spec.Strategy.RollingUpdate.MaxSurge = &ios1 - } - if d.Spec.Strategy.RollingUpdate.MaxUnavailable == nil { - ios0 := intstr.FromInt(0) - d.Spec.Strategy.RollingUpdate.MaxUnavailable = &ios0 - } - } - - if len(d.Namespace) == 0 { - d.Namespace = metav1.NamespaceDefault - } -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/doc.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/doc.go deleted file mode 100644 index 349faed081fc..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1beta1 contains API Schema definitions for the machine v1beta1 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/openshift/cluster-api/pkg/apis/machine -// +k8s:defaulter-gen=TypeMeta -// +groupName=machine.openshift.io -package v1beta1 diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go deleted file mode 100644 index 657d7aeeaa3c..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go +++ /dev/null @@ -1,227 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - "github.com/openshift/cluster-api/pkg/apis/machine/common" -) - -// Finalizer is set on PrepareForCreate callback -const MachineFinalizer = "machine.machine.openshift.io" - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -/// [Machine] -// Machine is the Schema for the machines API -// +k8s:openapi-gen=true -// +kubebuilder:subresource:status -type Machine struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec MachineSpec `json:"spec,omitempty"` - Status MachineStatus `json:"status,omitempty"` -} - -/// [Machine] - -/// [MachineSpec] -// MachineSpec defines the desired state of Machine -type MachineSpec struct { - // ObjectMeta will autopopulate the Node created. Use this to - // indicate what labels, annotations, name prefix, etc., should be used - // when creating the Node. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Taints is the full, authoritative list of taints to apply to the corresponding - // Node. This list will overwrite any modifications made to the Node on - // an ongoing basis. - // +optional - Taints []corev1.Taint `json:"taints,omitempty"` - - // ProviderSpec details Provider-specific configuration to use during node creation. - // +optional - ProviderSpec ProviderSpec `json:"providerSpec"` - - // Versions of key software to use. This field is optional at cluster - // creation time, and omitting the field indicates that the cluster - // installation tool should select defaults for the user. These - // defaults may differ based on the cluster installer, but the tool - // should populate the values it uses when persisting Machine objects. - // A Machine spec missing this field at runtime is invalid. - // +optional - Versions MachineVersionInfo `json:"versions,omitempty"` - - // ConfigSource is used to populate in the associated Node for dynamic kubelet config. This - // field already exists in Node, so any updates to it in the Machine - // spec will be automatically copied to the linked NodeRef from the - // status. The rest of dynamic kubelet config support should then work - // as-is. - // +optional - ConfigSource *corev1.NodeConfigSource `json:"configSource,omitempty"` -} - -/// [MachineSpec] - -/// [MachineStatus] -// MachineStatus defines the observed state of Machine -type MachineStatus struct { - // NodeRef will point to the corresponding Node if it exists. - // +optional - NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"` - - // LastUpdated identifies when this status was last observed. - // +optional - LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` - - // Versions specifies the current versions of software on the corresponding Node (if it - // exists). This is provided for a few reasons: - // - // 1) It is more convenient than checking the NodeRef, traversing it to - // the Node, and finding the appropriate field in Node.Status.NodeInfo - // (which uses different field names and formatting). - // 2) It removes some of the dependency on the structure of the Node, - // so that if the structure of Node.Status.NodeInfo changes, only - // machine controllers need to be updated, rather than every client - // of the Machines API. - // 3) There is no other simple way to check the control plane - // version. A client would have to connect directly to the apiserver - // running on the target node in order to find out its version. - // +optional - Versions *MachineVersionInfo `json:"versions,omitempty"` - - // ErrorReason will be set in the event that there is a terminal problem - // reconciling the Machine and will contain a succinct value suitable - // for machine interpretation. - // - // This field should not be set for transitive errors that a controller - // faces that are expected to be fixed automatically over - // time (like service outages), but instead indicate that something is - // fundamentally wrong with the Machine's spec or the configuration of - // the controller, and that manual intervention is required. Examples - // of terminal errors would be invalid combinations of settings in the - // spec, values that are unsupported by the controller, or the - // responsible controller itself being critically misconfigured. - // - // Any transient errors that occur during the reconciliation of Machines - // can be added as events to the Machine object and/or logged in the - // controller's output. - // +optional - ErrorReason *common.MachineStatusError `json:"errorReason,omitempty"` - - // ErrorMessage will be set in the event that there is a terminal problem - // reconciling the Machine and will contain a more verbose string suitable - // for logging and human consumption. - // - // This field should not be set for transitive errors that a controller - // faces that are expected to be fixed automatically over - // time (like service outages), but instead indicate that something is - // fundamentally wrong with the Machine's spec or the configuration of - // the controller, and that manual intervention is required. Examples - // of terminal errors would be invalid combinations of settings in the - // spec, values that are unsupported by the controller, or the - // responsible controller itself being critically misconfigured. - // - // Any transient errors that occur during the reconciliation of Machines - // can be added as events to the Machine object and/or logged in the - // controller's output. - // +optional - ErrorMessage *string `json:"errorMessage,omitempty"` - - // ProviderStatus details a Provider-specific status. - // It is recommended that providers maintain their - // own versioned API types that should be - // serialized/deserialized from this field. - // +optional - ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"` - - // Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available. - // +optional - Addresses []corev1.NodeAddress `json:"addresses,omitempty"` - - // Conditions lists the conditions synced from the node conditions of the corresponding node-object. - // Machine-controller is responsible for keeping conditions up-to-date. - // MachineSet controller will be taking these conditions as a signal to decide if - // machine is healthy or needs to be replaced. - // Refer: https://kubernetes.io/docs/concepts/architecture/nodes/#condition - // +optional - Conditions []corev1.NodeCondition `json:"conditions,omitempty"` - - // LastOperation describes the last-operation performed by the machine-controller. - // This API should be useful as a history in terms of the latest operation performed on the - // specific machine. It should also convey the state of the latest-operation for example if - // it is still on-going, failed or completed successfully. - // +optional - LastOperation *LastOperation `json:"lastOperation,omitempty"` - - // Phase represents the current phase of machine actuation. - // E.g. Pending, Running, Terminating, Failed etc. - // +optional - Phase *string `json:"phase,omitempty"` -} - -// LastOperation represents the detail of the last performed operation on the MachineObject. -type LastOperation struct { - // Description is the human-readable description of the last operation. - Description *string `json:"description,omitempty"` - - // LastUpdated is the timestamp at which LastOperation API was last-updated. - LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` - - // State is the current status of the last performed operation. - // E.g. Processing, Failed, Successful etc - State *string `json:"state,omitempty"` - - // Type is the type of operation which was last performed. - // E.g. Create, Delete, Update etc - Type *string `json:"type,omitempty"` -} - -/// [MachineStatus] - -/// [MachineVersionInfo] -type MachineVersionInfo struct { - // Kubelet is the semantic version of kubelet to run - Kubelet string `json:"kubelet"` - - // ControlPlane is the semantic version of the Kubernetes control plane to - // run. This should only be populated when the machine is a - // control plane. - // +optional - ControlPlane string `json:"controlPlane,omitempty"` -} - -/// [MachineVersionInfo] - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// MachineList contains a list of Machine -type MachineList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Machine `json:"items"` -} - -func init() { - SchemeBuilder.Register(&Machine{}, &MachineList{}) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineclass_types.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineclass_types.go deleted file mode 100644 index 85a1ea37d0e6..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineclass_types.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -/// [MachineClass] -// MachineClass can be used to templatize and re-use provider configuration -// across multiple Machines / MachineSets / MachineDeployments. -// +k8s:openapi-gen=true -// +resource:path=machineclasses -type MachineClass struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // The total capacity available on this machine type (cpu/memory/disk). - // - // WARNING: It is up to the creator of the MachineClass to ensure that - // this field is consistent with the underlying machine that will - // be provisioned when this class is used, to inform higher level - // automation (e.g. the cluster autoscaler). - // TODO(hardikdr) Add allocatable field once requirements are clear from autoscaler-clusterapi // integration topic. - // Capacity corev1.ResourceList `json:"capacity"` - - // How much capacity is actually allocatable on this machine. - // Must be equal to or less than the capacity, and when less - // indicates the resources reserved for system overhead. - // - // WARNING: It is up to the creator of the MachineClass to ensure that - // this field is consistent with the underlying machine that will - // be provisioned when this class is used, to inform higher level - // automation (e.g. the cluster autoscaler). - // TODO(hardikdr) Add allocatable field once requirements are clear from autoscaler-clusterapi // integration topic. - // Allocatable corev1.ResourceList `json:"allocatable"` - - // Provider-specific configuration to use during node creation. - ProviderSpec runtime.RawExtension `json:"providerSpec"` - - // TODO: should this use an api.ObjectReference to a 'MachineTemplate' instead? - // A link to the MachineTemplate that will be used to create provider - // specific configuration for Machines of this class. - // MachineTemplate corev1.ObjectReference `json:machineTemplate` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// MachineClassList contains a list of MachineClasses -type MachineClassList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []MachineClass `json:"items"` -} - -func init() { - SchemeBuilder.Register(&MachineClass{}, &MachineClassList{}) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machinedeployment_types.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machinedeployment_types.go deleted file mode 100644 index 8e892ecc07f1..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machinedeployment_types.go +++ /dev/null @@ -1,194 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - - "github.com/openshift/cluster-api/pkg/apis/machine/common" -) - -/// [MachineDeploymentSpec] -// MachineDeploymentSpec defines the desired state of MachineDeployment -type MachineDeploymentSpec struct { - // Number of desired machines. Defaults to 1. - // This is a pointer to distinguish between explicit zero and not specified. - Replicas *int32 `json:"replicas,omitempty"` - - // Label selector for machines. Existing MachineSets whose machines are - // selected by this will be the ones affected by this deployment. - // It must match the machine template's labels. - Selector metav1.LabelSelector `json:"selector"` - - // Template describes the machines that will be created. - Template MachineTemplateSpec `json:"template"` - - // The deployment strategy to use to replace existing machines with - // new ones. - // +optional - Strategy *MachineDeploymentStrategy `json:"strategy,omitempty"` - - // Minimum number of seconds for which a newly created machine should - // be ready. - // Defaults to 0 (machine will be considered available as soon as it - // is ready) - // +optional - MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` - - // The number of old MachineSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 1. - // +optional - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` - - // Indicates that the deployment is paused. - // +optional - Paused bool `json:"paused,omitempty"` - - // The maximum time in seconds for a deployment to make progress before it - // is considered to be failed. The deployment controller will continue to - // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Note that progress will - // not be estimated during the time a deployment is paused. Defaults to 600s. - ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` -} - -/// [MachineDeploymentSpec] - -/// [MachineDeploymentStrategy] -// MachineDeploymentStrategy describes how to replace existing machines -// with new ones. -type MachineDeploymentStrategy struct { - // Type of deployment. Currently the only supported strategy is - // "RollingUpdate". - // Default is RollingUpdate. - // +optional - Type common.MachineDeploymentStrategyType `json:"type,omitempty"` - - // Rolling update config params. Present only if - // MachineDeploymentStrategyType = RollingUpdate. - // +optional - RollingUpdate *MachineRollingUpdateDeployment `json:"rollingUpdate,omitempty"` -} - -/// [MachineDeploymentStrategy] - -/// [MachineRollingUpdateDeployment] -// Spec to control the desired behavior of rolling update. -type MachineRollingUpdateDeployment struct { - // The maximum number of machines that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired - // machines (ex: 10%). - // Absolute number is calculated from percentage by rounding down. - // This can not be 0 if MaxSurge is 0. - // Defaults to 0. - // Example: when this is set to 30%, the old MachineSet can be scaled - // down to 70% of desired machines immediately when the rolling update - // starts. Once new machines are ready, old MachineSet can be scaled - // down further, followed by scaling up the new MachineSet, ensuring - // that the total number of machines available at all times - // during the update is at least 70% of desired machines. - // +optional - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` - - // The maximum number of machines that can be scheduled above the - // desired number of machines. - // Value can be an absolute number (ex: 5) or a percentage of - // desired machines (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // Defaults to 1. - // Example: when this is set to 30%, the new MachineSet can be scaled - // up immediately when the rolling update starts, such that the total - // number of old and new machines do not exceed 130% of desired - // machines. Once old machines have been killed, new MachineSet can - // be scaled up further, ensuring that total number of machines running - // at any time during the update is at most 130% of desired machines. - // +optional - MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` -} - -/// [MachineRollingUpdateDeployment] - -/// [MachineDeploymentStatus] -// MachineDeploymentStatus defines the observed state of MachineDeployment -type MachineDeploymentStatus struct { - // The generation observed by the deployment controller. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // Total number of non-terminated machines targeted by this deployment - // (their labels match the selector). - // +optional - Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - - // Total number of non-terminated machines targeted by this deployment - // that have the desired template spec. - // +optional - UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - - // Total number of ready machines targeted by this deployment. - // +optional - ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` - - // Total number of available machines (ready for at least minReadySeconds) - // targeted by this deployment. - // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` - - // Total number of unavailable machines targeted by this deployment. - // This is the total number of machines that are still required for - // the deployment to have 100% available capacity. They may either - // be machines that are running but not yet available or machines - // that still have not been created. - // +optional - UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` -} - -/// [MachineDeploymentStatus] - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -/// [MachineDeployment] -// MachineDeployment is the Schema for the machinedeployments API -// +k8s:openapi-gen=true -// +kubebuilder:subresource:status -// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector -type MachineDeployment struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec MachineDeploymentSpec `json:"spec,omitempty"` - Status MachineDeploymentStatus `json:"status,omitempty"` -} - -/// [MachineDeployment] - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// MachineDeploymentList contains a list of MachineDeployment -type MachineDeploymentList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []MachineDeployment `json:"items"` -} - -func init() { - SchemeBuilder.Register(&MachineDeployment{}, &MachineDeploymentList{}) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go deleted file mode 100644 index ee810bc94d63..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go +++ /dev/null @@ -1,187 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "log" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" - - "github.com/openshift/cluster-api/pkg/apis/machine/common" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/validation/field" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -/// [MachineSet] -// MachineSet ensures that a specified number of machines replicas are running at any given time. -// +k8s:openapi-gen=true -// +kubebuilder:subresource:status -// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector -type MachineSet struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec MachineSetSpec `json:"spec,omitempty"` - Status MachineSetStatus `json:"status,omitempty"` -} - -/// [MachineSet] - -/// [MachineSetSpec] -// MachineSetSpec defines the desired state of MachineSet -type MachineSetSpec struct { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // +optional - Replicas *int32 `json:"replicas,omitempty"` - - // MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. - // Defaults to 0 (machine will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty"` - - // Selector is a label query over machines that should match the replica count. - // Label keys and values that must match in order to be controlled by this MachineSet. - // It must match the machine template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - Selector metav1.LabelSelector `json:"selector"` - - // Template is the object that describes the machine that will be created if - // insufficient replicas are detected. - // +optional - Template MachineTemplateSpec `json:"template,omitempty"` -} - -/// [MachineSetSpec] // doxygen marker - -/// [MachineTemplateSpec] // doxygen marker -// MachineTemplateSpec describes the data needed to create a Machine from a template -type MachineTemplateSpec struct { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Specification of the desired behavior of the machine. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec MachineSpec `json:"spec,omitempty"` -} - -/// [MachineTemplateSpec] - -/// [MachineSetStatus] -// MachineSetStatus defines the observed state of MachineSet -type MachineSetStatus struct { - // Replicas is the most recently observed number of replicas. - Replicas int32 `json:"replicas"` - - // The number of replicas that have labels matching the labels of the machine template of the MachineSet. - // +optional - FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` - - // The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is "Ready". - // +optional - ReadyReplicas int32 `json:"readyReplicas,omitempty"` - - // The number of available replicas (ready for at least minReadySeconds) for this MachineSet. - // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty"` - - // ObservedGeneration reflects the generation of the most recently observed MachineSet. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` - - // In the event that there is a terminal problem reconciling the - // replicas, both ErrorReason and ErrorMessage will be set. ErrorReason - // will be populated with a succinct value suitable for machine - // interpretation, while ErrorMessage will contain a more verbose - // string suitable for logging and human consumption. - // - // These fields should not be set for transitive errors that a - // controller faces that are expected to be fixed automatically over - // time (like service outages), but instead indicate that something is - // fundamentally wrong with the MachineTemplate's spec or the configuration of - // the machine controller, and that manual intervention is required. Examples - // of terminal errors would be invalid combinations of settings in the - // spec, values that are unsupported by the machine controller, or the - // responsible machine controller itself being critically misconfigured. - // - // Any transient errors that occur during the reconciliation of Machines - // can be added as events to the MachineSet object and/or logged in the - // controller's output. - // +optional - ErrorReason *common.MachineSetStatusError `json:"errorReason,omitempty"` - // +optional - ErrorMessage *string `json:"errorMessage,omitempty"` -} - -/// [MachineSetStatus] - -func (machineSet *MachineSet) Validate() field.ErrorList { - errors := field.ErrorList{} - - // validate spec.selector and spec.template.labels - fldPath := field.NewPath("spec") - errors = append(errors, metav1validation.ValidateLabelSelector(&machineSet.Spec.Selector, fldPath.Child("selector"))...) - if len(machineSet.Spec.Selector.MatchLabels)+len(machineSet.Spec.Selector.MatchExpressions) == 0 { - errors = append(errors, field.Invalid(fldPath.Child("selector"), machineSet.Spec.Selector, "empty selector is not valid for MachineSet.")) - } - selector, err := metav1.LabelSelectorAsSelector(&machineSet.Spec.Selector) - if err != nil { - errors = append(errors, field.Invalid(fldPath.Child("selector"), machineSet.Spec.Selector, "invalid label selector.")) - } else { - labels := labels.Set(machineSet.Spec.Template.Labels) - if !selector.Matches(labels) { - errors = append(errors, field.Invalid(fldPath.Child("template", "metadata", "labels"), machineSet.Spec.Template.Labels, "`selector` does not match template `labels`")) - } - } - - return errors -} - -// DefaultingFunction sets default MachineSet field values -func (obj *MachineSet) Default() { - log.Printf("Defaulting fields for MachineSet %s\n", obj.Name) - - if obj.Spec.Replicas == nil { - obj.Spec.Replicas = new(int32) - *obj.Spec.Replicas = 1 - } - - if len(obj.Namespace) == 0 { - obj.Namespace = metav1.NamespaceDefault - } -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// MachineSetList contains a list of MachineSet -type MachineSetList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []MachineSet `json:"items"` -} - -func init() { - SchemeBuilder.Register(&MachineSet{}, &MachineSetList{}) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/register.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/register.go deleted file mode 100644 index 586d1d898f19..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/register.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// NOTE: Boilerplate only. Ignore this file. - -// Package v1beta1 contains API Schema definitions for the machine v1beta1 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/openshift/cluster-api/pkg/apis/machine -// +k8s:defaulter-gen=TypeMeta -// +groupName=machine.openshift.io -package v1beta1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" -) - -var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "machine.openshift.io", Version: "v1beta1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - - // Required by pkg/client/... - // TODO(pwittrock): Remove this after removing pkg/client/... - AddToScheme = SchemeBuilder.AddToScheme -) - -// Required by pkg/client/listers/... -// TODO(pwittrock): Remove this after removing pkg/client/... -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index ab6e3a58796f..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,821 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by main. DO NOT EDIT. - -package v1beta1 - -import ( - common "github.com/openshift/cluster-api/pkg/apis/machine/common" - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIEndpoint) DeepCopyInto(out *APIEndpoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIEndpoint. -func (in *APIEndpoint) DeepCopy() *APIEndpoint { - if in == nil { - return nil - } - out := new(APIEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Cluster) DeepCopyInto(out *Cluster) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. -func (in *Cluster) DeepCopy() *Cluster { - if in == nil { - return nil - } - out := new(Cluster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Cluster) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterList) DeepCopyInto(out *ClusterList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Cluster, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. -func (in *ClusterList) DeepCopy() *ClusterList { - if in == nil { - return nil - } - out := new(ClusterList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterNetworkingConfig) DeepCopyInto(out *ClusterNetworkingConfig) { - *out = *in - in.Services.DeepCopyInto(&out.Services) - in.Pods.DeepCopyInto(&out.Pods) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkingConfig. -func (in *ClusterNetworkingConfig) DeepCopy() *ClusterNetworkingConfig { - if in == nil { - return nil - } - out := new(ClusterNetworkingConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { - *out = *in - in.ClusterNetwork.DeepCopyInto(&out.ClusterNetwork) - in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. -func (in *ClusterSpec) DeepCopy() *ClusterSpec { - if in == nil { - return nil - } - out := new(ClusterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { - *out = *in - if in.APIEndpoints != nil { - in, out := &in.APIEndpoints, &out.APIEndpoints - *out = make([]APIEndpoint, len(*in)) - copy(*out, *in) - } - if in.ProviderStatus != nil { - in, out := &in.ProviderStatus, &out.ProviderStatus - *out = new(runtime.RawExtension) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. -func (in *ClusterStatus) DeepCopy() *ClusterStatus { - if in == nil { - return nil - } - out := new(ClusterStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LastOperation) DeepCopyInto(out *LastOperation) { - *out = *in - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in - } - if in.LastUpdated != nil { - in, out := &in.LastUpdated, &out.LastUpdated - *out = (*in).DeepCopy() - } - if in.State != nil { - in, out := &in.State, &out.State - *out = new(string) - **out = **in - } - if in.Type != nil { - in, out := &in.Type, &out.Type - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation. -func (in *LastOperation) DeepCopy() *LastOperation { - if in == nil { - return nil - } - out := new(LastOperation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Machine) DeepCopyInto(out *Machine) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. -func (in *Machine) DeepCopy() *Machine { - if in == nil { - return nil - } - out := new(Machine) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Machine) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineClass) DeepCopyInto(out *MachineClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineClass. -func (in *MachineClass) DeepCopy() *MachineClass { - if in == nil { - return nil - } - out := new(MachineClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineClassList) DeepCopyInto(out *MachineClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]MachineClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineClassList. -func (in *MachineClassList) DeepCopy() *MachineClassList { - if in == nil { - return nil - } - out := new(MachineClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineClassRef) DeepCopyInto(out *MachineClassRef) { - *out = *in - if in.ObjectReference != nil { - in, out := &in.ObjectReference, &out.ObjectReference - *out = new(v1.ObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineClassRef. -func (in *MachineClassRef) DeepCopy() *MachineClassRef { - if in == nil { - return nil - } - out := new(MachineClassRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineDeployment) DeepCopyInto(out *MachineDeployment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeployment. -func (in *MachineDeployment) DeepCopy() *MachineDeployment { - if in == nil { - return nil - } - out := new(MachineDeployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineDeployment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineDeploymentList) DeepCopyInto(out *MachineDeploymentList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]MachineDeployment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentList. -func (in *MachineDeploymentList) DeepCopy() *MachineDeploymentList { - if in == nil { - return nil - } - out := new(MachineDeploymentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineDeploymentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineDeploymentSpec) DeepCopyInto(out *MachineDeploymentSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - in.Selector.DeepCopyInto(&out.Selector) - in.Template.DeepCopyInto(&out.Template) - if in.Strategy != nil { - in, out := &in.Strategy, &out.Strategy - *out = new(MachineDeploymentStrategy) - (*in).DeepCopyInto(*out) - } - if in.MinReadySeconds != nil { - in, out := &in.MinReadySeconds, &out.MinReadySeconds - *out = new(int32) - **out = **in - } - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - if in.ProgressDeadlineSeconds != nil { - in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentSpec. -func (in *MachineDeploymentSpec) DeepCopy() *MachineDeploymentSpec { - if in == nil { - return nil - } - out := new(MachineDeploymentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineDeploymentStatus) DeepCopyInto(out *MachineDeploymentStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentStatus. -func (in *MachineDeploymentStatus) DeepCopy() *MachineDeploymentStatus { - if in == nil { - return nil - } - out := new(MachineDeploymentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineDeploymentStrategy) DeepCopyInto(out *MachineDeploymentStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(MachineRollingUpdateDeployment) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentStrategy. -func (in *MachineDeploymentStrategy) DeepCopy() *MachineDeploymentStrategy { - if in == nil { - return nil - } - out := new(MachineDeploymentStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineList) DeepCopyInto(out *MachineList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Machine, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineList. -func (in *MachineList) DeepCopy() *MachineList { - if in == nil { - return nil - } - out := new(MachineList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineRollingUpdateDeployment) DeepCopyInto(out *MachineRollingUpdateDeployment) { - *out = *in - if in.MaxUnavailable != nil { - in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in - } - if in.MaxSurge != nil { - in, out := &in.MaxSurge, &out.MaxSurge - *out = new(intstr.IntOrString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineRollingUpdateDeployment. -func (in *MachineRollingUpdateDeployment) DeepCopy() *MachineRollingUpdateDeployment { - if in == nil { - return nil - } - out := new(MachineRollingUpdateDeployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSet) DeepCopyInto(out *MachineSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSet. -func (in *MachineSet) DeepCopy() *MachineSet { - if in == nil { - return nil - } - out := new(MachineSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSetList) DeepCopyInto(out *MachineSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]MachineSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetList. -func (in *MachineSetList) DeepCopy() *MachineSetList { - if in == nil { - return nil - } - out := new(MachineSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSetSpec) DeepCopyInto(out *MachineSetSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - in.Selector.DeepCopyInto(&out.Selector) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetSpec. -func (in *MachineSetSpec) DeepCopy() *MachineSetSpec { - if in == nil { - return nil - } - out := new(MachineSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSetStatus) DeepCopyInto(out *MachineSetStatus) { - *out = *in - if in.ErrorReason != nil { - in, out := &in.ErrorReason, &out.ErrorReason - *out = new(common.MachineSetStatusError) - **out = **in - } - if in.ErrorMessage != nil { - in, out := &in.ErrorMessage, &out.ErrorMessage - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetStatus. -func (in *MachineSetStatus) DeepCopy() *MachineSetStatus { - if in == nil { - return nil - } - out := new(MachineSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Taints != nil { - in, out := &in.Taints, &out.Taints - *out = make([]v1.Taint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) - out.Versions = in.Versions - if in.ConfigSource != nil { - in, out := &in.ConfigSource, &out.ConfigSource - *out = new(v1.NodeConfigSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSpec. -func (in *MachineSpec) DeepCopy() *MachineSpec { - if in == nil { - return nil - } - out := new(MachineSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { - *out = *in - if in.NodeRef != nil { - in, out := &in.NodeRef, &out.NodeRef - *out = new(v1.ObjectReference) - **out = **in - } - if in.LastUpdated != nil { - in, out := &in.LastUpdated, &out.LastUpdated - *out = (*in).DeepCopy() - } - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = new(MachineVersionInfo) - **out = **in - } - if in.ErrorReason != nil { - in, out := &in.ErrorReason, &out.ErrorReason - *out = new(common.MachineStatusError) - **out = **in - } - if in.ErrorMessage != nil { - in, out := &in.ErrorMessage, &out.ErrorMessage - *out = new(string) - **out = **in - } - if in.ProviderStatus != nil { - in, out := &in.ProviderStatus, &out.ProviderStatus - *out = new(runtime.RawExtension) - (*in).DeepCopyInto(*out) - } - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]v1.NodeAddress, len(*in)) - copy(*out, *in) - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.NodeCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.LastOperation != nil { - in, out := &in.LastOperation, &out.LastOperation - *out = new(LastOperation) - (*in).DeepCopyInto(*out) - } - if in.Phase != nil { - in, out := &in.Phase, &out.Phase - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus. -func (in *MachineStatus) DeepCopy() *MachineStatus { - if in == nil { - return nil - } - out := new(MachineStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTemplateSpec. -func (in *MachineTemplateSpec) DeepCopy() *MachineTemplateSpec { - if in == nil { - return nil - } - out := new(MachineTemplateSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineVersionInfo) DeepCopyInto(out *MachineVersionInfo) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineVersionInfo. -func (in *MachineVersionInfo) DeepCopy() *MachineVersionInfo { - if in == nil { - return nil - } - out := new(MachineVersionInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkRanges) DeepCopyInto(out *NetworkRanges) { - *out = *in - if in.CIDRBlocks != nil { - in, out := &in.CIDRBlocks, &out.CIDRBlocks - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRanges. -func (in *NetworkRanges) DeepCopy() *NetworkRanges { - if in == nil { - return nil - } - out := new(NetworkRanges) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProviderSpec) DeepCopyInto(out *ProviderSpec) { - *out = *in - if in.Value != nil { - in, out := &in.Value, &out.Value - *out = new(runtime.RawExtension) - (*in).DeepCopyInto(*out) - } - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - *out = new(ProviderSpecSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpec. -func (in *ProviderSpec) DeepCopy() *ProviderSpec { - if in == nil { - return nil - } - out := new(ProviderSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProviderSpecSource) DeepCopyInto(out *ProviderSpecSource) { - *out = *in - if in.MachineClass != nil { - in, out := &in.MachineClass, &out.MachineClass - *out = new(MachineClassRef) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpecSource. -func (in *ProviderSpecSource) DeepCopy() *ProviderSpecSource { - if in == nil { - return nil - } - out := new(ProviderSpecSource) - in.DeepCopyInto(out) - return out -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/BUILD.bazel deleted file mode 100644 index 10e4314959e9..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clientset.go", - "doc.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset", - visibility = ["//visibility:public"], - deps = [ - "//pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/machine/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/discovery:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/clientset.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/clientset.go deleted file mode 100644 index 1ea1557458c4..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/clientset.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package clientset - -import ( - clusterv1alpha1 "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" - machinev1beta1 "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1" - discovery "k8s.io/client-go/discovery" - rest "k8s.io/client-go/rest" - flowcontrol "k8s.io/client-go/util/flowcontrol" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - ClusterV1alpha1() clusterv1alpha1.ClusterV1alpha1Interface - // Deprecated: please explicitly pick a version if possible. - Cluster() clusterv1alpha1.ClusterV1alpha1Interface - MachineV1beta1() machinev1beta1.MachineV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Machine() machinev1beta1.MachineV1beta1Interface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - clusterV1alpha1 *clusterv1alpha1.ClusterV1alpha1Client - machineV1beta1 *machinev1beta1.MachineV1beta1Client -} - -// ClusterV1alpha1 retrieves the ClusterV1alpha1Client -func (c *Clientset) ClusterV1alpha1() clusterv1alpha1.ClusterV1alpha1Interface { - return c.clusterV1alpha1 -} - -// Deprecated: Cluster retrieves the default version of ClusterClient. -// Please explicitly pick a version. -func (c *Clientset) Cluster() clusterv1alpha1.ClusterV1alpha1Interface { - return c.clusterV1alpha1 -} - -// MachineV1beta1 retrieves the MachineV1beta1Client -func (c *Clientset) MachineV1beta1() machinev1beta1.MachineV1beta1Interface { - return c.machineV1beta1 -} - -// Deprecated: Machine retrieves the default version of MachineClient. -// Please explicitly pick a version. -func (c *Clientset) Machine() machinev1beta1.MachineV1beta1Interface { - return c.machineV1beta1 -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - if c == nil { - return nil - } - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -func NewForConfig(c *rest.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var cs Clientset - var err error - cs.clusterV1alpha1, err = clusterv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.machineV1beta1, err = machinev1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - return &cs, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.clusterV1alpha1 = clusterv1alpha1.NewForConfigOrDie(c) - cs.machineV1beta1 = machinev1beta1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs -} - -// New creates a new Clientset for the given RESTClient. -func New(c rest.Interface) *Clientset { - var cs Clientset - cs.clusterV1alpha1 = clusterv1alpha1.New(c) - cs.machineV1beta1 = machinev1beta1.New(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &cs -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/doc.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/doc.go deleted file mode 100644 index 3421911a75a9..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package clientset diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/fake/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/fake/BUILD.bazel deleted file mode 100644 index f28dda486b7f..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/fake/BUILD.bazel +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clientset_generated.go", - "doc.go", - "register.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/fake", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/apis/machine/v1beta1:go_default_library", - "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/machine/v1beta1:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/discovery:go_default_library", - "//vendor/k8s.io/client-go/discovery/fake:go_default_library", - "//vendor/k8s.io/client-go/testing:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/fake/clientset_generated.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/fake/clientset_generated.go deleted file mode 100644 index dce09350de40..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/fake/clientset_generated.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - clientset "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset" - clusterv1alpha1 "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" - fakeclusterv1alpha1 "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake" - machinev1beta1 "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1" - fakemachinev1beta1 "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/discovery" - fakediscovery "k8s.io/client-go/discovery/fake" - "k8s.io/client-go/testing" -) - -// NewSimpleClientset returns a clientset that will respond with the provided objects. -// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement -// for a real clientset and is mostly useful in simple unit tests. -func NewSimpleClientset(objects ...runtime.Object) *Clientset { - o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) - for _, obj := range objects { - if err := o.Add(obj); err != nil { - panic(err) - } - } - - cs := &Clientset{} - cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} - cs.AddReactor("*", "*", testing.ObjectReaction(o)) - cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { - gvr := action.GetResource() - ns := action.GetNamespace() - watch, err := o.Watch(gvr, ns) - if err != nil { - return false, nil, err - } - return true, watch, nil - }) - - return cs -} - -// Clientset implements clientset.Interface. Meant to be embedded into a -// struct to get a default implementation. This makes faking out just the method -// you want to test easier. -type Clientset struct { - testing.Fake - discovery *fakediscovery.FakeDiscovery -} - -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return c.discovery -} - -var _ clientset.Interface = &Clientset{} - -// ClusterV1alpha1 retrieves the ClusterV1alpha1Client -func (c *Clientset) ClusterV1alpha1() clusterv1alpha1.ClusterV1alpha1Interface { - return &fakeclusterv1alpha1.FakeClusterV1alpha1{Fake: &c.Fake} -} - -// Cluster retrieves the ClusterV1alpha1Client -func (c *Clientset) Cluster() clusterv1alpha1.ClusterV1alpha1Interface { - return &fakeclusterv1alpha1.FakeClusterV1alpha1{Fake: &c.Fake} -} - -// MachineV1beta1 retrieves the MachineV1beta1Client -func (c *Clientset) MachineV1beta1() machinev1beta1.MachineV1beta1Interface { - return &fakemachinev1beta1.FakeMachineV1beta1{Fake: &c.Fake} -} - -// Machine retrieves the MachineV1beta1Client -func (c *Clientset) Machine() machinev1beta1.MachineV1beta1Interface { - return &fakemachinev1beta1.FakeMachineV1beta1{Fake: &c.Fake} -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/fake/doc.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/fake/doc.go deleted file mode 100644 index 0bc260bcaa22..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated fake clientset. -package fake diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/fake/register.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/fake/register.go deleted file mode 100644 index c10e170c3225..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/fake/register.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - clusterv1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - machinev1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" -) - -var scheme = runtime.NewScheme() -var codecs = serializer.NewCodecFactory(scheme) -var parameterCodec = runtime.NewParameterCodec(scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - clusterv1alpha1.AddToScheme, - machinev1beta1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(scheme)) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel deleted file mode 100644 index 44d177000425..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/apis/machine/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme/doc.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme/doc.go deleted file mode 100644 index 5c5c8debb6b4..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package contains the scheme of the automatically generated clientset. -package scheme diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme/register.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme/register.go deleted file mode 100644 index afb084a63f7f..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme/register.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package scheme - -import ( - clusterv1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - machinev1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" -) - -var Scheme = runtime.NewScheme() -var Codecs = serializer.NewCodecFactory(Scheme) -var ParameterCodec = runtime.NewParameterCodec(Scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - clusterv1alpha1.AddToScheme, - machinev1beta1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(Scheme)) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/BUILD.bazel deleted file mode 100644 index 157ef970edb5..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "cluster_client.go", - "doc.go", - "generated_expansion.go", - "machine.go", - "machineclass.go", - "machinedeployment.go", - "machineset.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset/scheme:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster.go deleted file mode 100644 index 00b10e505e45..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - scheme "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// ClustersGetter has a method to return a ClusterInterface. -// A group's client should implement this interface. -type ClustersGetter interface { - Clusters(namespace string) ClusterInterface -} - -// ClusterInterface has methods to work with Cluster resources. -type ClusterInterface interface { - Create(*v1alpha1.Cluster) (*v1alpha1.Cluster, error) - Update(*v1alpha1.Cluster) (*v1alpha1.Cluster, error) - UpdateStatus(*v1alpha1.Cluster) (*v1alpha1.Cluster, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.Cluster, error) - List(opts v1.ListOptions) (*v1alpha1.ClusterList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Cluster, err error) - ClusterExpansion -} - -// clusters implements ClusterInterface -type clusters struct { - client rest.Interface - ns string -} - -// newClusters returns a Clusters -func newClusters(c *ClusterV1alpha1Client, namespace string) *clusters { - return &clusters{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. -func (c *clusters) Get(name string, options v1.GetOptions) (result *v1alpha1.Cluster, err error) { - result = &v1alpha1.Cluster{} - err = c.client.Get(). - Namespace(c.ns). - Resource("clusters"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Clusters that match those selectors. -func (c *clusters) List(opts v1.ListOptions) (result *v1alpha1.ClusterList, err error) { - result = &v1alpha1.ClusterList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("clusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusters. -func (c *clusters) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("clusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *clusters) Create(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { - result = &v1alpha1.Cluster{} - err = c.client.Post(). - Namespace(c.ns). - Resource("clusters"). - Body(cluster). - Do(). - Into(result) - return -} - -// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *clusters) Update(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { - result = &v1alpha1.Cluster{} - err = c.client.Put(). - Namespace(c.ns). - Resource("clusters"). - Name(cluster.Name). - Body(cluster). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *clusters) UpdateStatus(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { - result = &v1alpha1.Cluster{} - err = c.client.Put(). - Namespace(c.ns). - Resource("clusters"). - Name(cluster.Name). - SubResource("status"). - Body(cluster). - Do(). - Into(result) - return -} - -// Delete takes name of the cluster and deletes it. Returns an error if one occurs. -func (c *clusters) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("clusters"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusters) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("clusters"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched cluster. -func (c *clusters) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Cluster, err error) { - result = &v1alpha1.Cluster{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("clusters"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster_client.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster_client.go deleted file mode 100644 index f9081c3503cd..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster_client.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - rest "k8s.io/client-go/rest" -) - -type ClusterV1alpha1Interface interface { - RESTClient() rest.Interface - ClustersGetter - MachinesGetter - MachineClassesGetter - MachineDeploymentsGetter - MachineSetsGetter -} - -// ClusterV1alpha1Client is used to interact with features provided by the cluster.k8s.io group. -type ClusterV1alpha1Client struct { - restClient rest.Interface -} - -func (c *ClusterV1alpha1Client) Clusters(namespace string) ClusterInterface { - return newClusters(c, namespace) -} - -func (c *ClusterV1alpha1Client) Machines(namespace string) MachineInterface { - return newMachines(c, namespace) -} - -func (c *ClusterV1alpha1Client) MachineClasses(namespace string) MachineClassInterface { - return newMachineClasses(c, namespace) -} - -func (c *ClusterV1alpha1Client) MachineDeployments(namespace string) MachineDeploymentInterface { - return newMachineDeployments(c, namespace) -} - -func (c *ClusterV1alpha1Client) MachineSets(namespace string) MachineSetInterface { - return newMachineSets(c, namespace) -} - -// NewForConfig creates a new ClusterV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*ClusterV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &ClusterV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new ClusterV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *ClusterV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new ClusterV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *ClusterV1alpha1Client { - return &ClusterV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *ClusterV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/doc.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/doc.go deleted file mode 100644 index 69ca30111b4c..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/BUILD.bazel deleted file mode 100644 index 4da0d037ead9..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake_cluster.go", - "fake_cluster_client.go", - "fake_machine.go", - "fake_machineclass.go", - "fake_machinedeployment.go", - "fake_machineset.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/testing:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/doc.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/doc.go deleted file mode 100644 index 87a1873edccb..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_cluster.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_cluster.go deleted file mode 100644 index 4554fd127ade..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_cluster.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeClusters implements ClusterInterface -type FakeClusters struct { - Fake *FakeClusterV1alpha1 - ns string -} - -var clustersResource = schema.GroupVersionResource{Group: "cluster.k8s.io", Version: "v1alpha1", Resource: "clusters"} - -var clustersKind = schema.GroupVersionKind{Group: "cluster.k8s.io", Version: "v1alpha1", Kind: "Cluster"} - -// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. -func (c *FakeClusters) Get(name string, options v1.GetOptions) (result *v1alpha1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(clustersResource, c.ns, name), &v1alpha1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Cluster), err -} - -// List takes label and field selectors, and returns the list of Clusters that match those selectors. -func (c *FakeClusters) List(opts v1.ListOptions) (result *v1alpha1.ClusterList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(clustersResource, clustersKind, c.ns, opts), &v1alpha1.ClusterList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ClusterList{ListMeta: obj.(*v1alpha1.ClusterList).ListMeta} - for _, item := range obj.(*v1alpha1.ClusterList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusters. -func (c *FakeClusters) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(clustersResource, c.ns, opts)) - -} - -// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *FakeClusters) Create(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(clustersResource, c.ns, cluster), &v1alpha1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Cluster), err -} - -// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *FakeClusters) Update(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(clustersResource, c.ns, cluster), &v1alpha1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Cluster), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeClusters) UpdateStatus(cluster *v1alpha1.Cluster) (*v1alpha1.Cluster, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(clustersResource, "status", c.ns, cluster), &v1alpha1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Cluster), err -} - -// Delete takes name of the cluster and deletes it. Returns an error if one occurs. -func (c *FakeClusters) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(clustersResource, c.ns, name), &v1alpha1.Cluster{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusters) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(clustersResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterList{}) - return err -} - -// Patch applies the patch and returns the patched cluster. -func (c *FakeClusters) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(clustersResource, c.ns, name, data, subresources...), &v1alpha1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Cluster), err -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_cluster_client.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_cluster_client.go deleted file mode 100644 index aca19fdcbde9..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_cluster_client.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeClusterV1alpha1 struct { - *testing.Fake -} - -func (c *FakeClusterV1alpha1) Clusters(namespace string) v1alpha1.ClusterInterface { - return &FakeClusters{c, namespace} -} - -func (c *FakeClusterV1alpha1) Machines(namespace string) v1alpha1.MachineInterface { - return &FakeMachines{c, namespace} -} - -func (c *FakeClusterV1alpha1) MachineClasses(namespace string) v1alpha1.MachineClassInterface { - return &FakeMachineClasses{c, namespace} -} - -func (c *FakeClusterV1alpha1) MachineDeployments(namespace string) v1alpha1.MachineDeploymentInterface { - return &FakeMachineDeployments{c, namespace} -} - -func (c *FakeClusterV1alpha1) MachineSets(namespace string) v1alpha1.MachineSetInterface { - return &FakeMachineSets{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeClusterV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machine.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machine.go deleted file mode 100644 index 70036f36c08a..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machine.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeMachines implements MachineInterface -type FakeMachines struct { - Fake *FakeClusterV1alpha1 - ns string -} - -var machinesResource = schema.GroupVersionResource{Group: "cluster.k8s.io", Version: "v1alpha1", Resource: "machines"} - -var machinesKind = schema.GroupVersionKind{Group: "cluster.k8s.io", Version: "v1alpha1", Kind: "Machine"} - -// Get takes name of the machine, and returns the corresponding machine object, and an error if there is any. -func (c *FakeMachines) Get(name string, options v1.GetOptions) (result *v1alpha1.Machine, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(machinesResource, c.ns, name), &v1alpha1.Machine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Machine), err -} - -// List takes label and field selectors, and returns the list of Machines that match those selectors. -func (c *FakeMachines) List(opts v1.ListOptions) (result *v1alpha1.MachineList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(machinesResource, machinesKind, c.ns, opts), &v1alpha1.MachineList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.MachineList{ListMeta: obj.(*v1alpha1.MachineList).ListMeta} - for _, item := range obj.(*v1alpha1.MachineList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested machines. -func (c *FakeMachines) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(machinesResource, c.ns, opts)) - -} - -// Create takes the representation of a machine and creates it. Returns the server's representation of the machine, and an error, if there is any. -func (c *FakeMachines) Create(machine *v1alpha1.Machine) (result *v1alpha1.Machine, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(machinesResource, c.ns, machine), &v1alpha1.Machine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Machine), err -} - -// Update takes the representation of a machine and updates it. Returns the server's representation of the machine, and an error, if there is any. -func (c *FakeMachines) Update(machine *v1alpha1.Machine) (result *v1alpha1.Machine, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(machinesResource, c.ns, machine), &v1alpha1.Machine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Machine), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeMachines) UpdateStatus(machine *v1alpha1.Machine) (*v1alpha1.Machine, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(machinesResource, "status", c.ns, machine), &v1alpha1.Machine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Machine), err -} - -// Delete takes name of the machine and deletes it. Returns an error if one occurs. -func (c *FakeMachines) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(machinesResource, c.ns, name), &v1alpha1.Machine{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMachines) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(machinesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.MachineList{}) - return err -} - -// Patch applies the patch and returns the patched machine. -func (c *FakeMachines) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Machine, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(machinesResource, c.ns, name, data, subresources...), &v1alpha1.Machine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Machine), err -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machineclass.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machineclass.go deleted file mode 100644 index 995955a7df88..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machineclass.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeMachineClasses implements MachineClassInterface -type FakeMachineClasses struct { - Fake *FakeClusterV1alpha1 - ns string -} - -var machineclassesResource = schema.GroupVersionResource{Group: "cluster.k8s.io", Version: "v1alpha1", Resource: "machineclasses"} - -var machineclassesKind = schema.GroupVersionKind{Group: "cluster.k8s.io", Version: "v1alpha1", Kind: "MachineClass"} - -// Get takes name of the machineClass, and returns the corresponding machineClass object, and an error if there is any. -func (c *FakeMachineClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.MachineClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(machineclassesResource, c.ns, name), &v1alpha1.MachineClass{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineClass), err -} - -// List takes label and field selectors, and returns the list of MachineClasses that match those selectors. -func (c *FakeMachineClasses) List(opts v1.ListOptions) (result *v1alpha1.MachineClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(machineclassesResource, machineclassesKind, c.ns, opts), &v1alpha1.MachineClassList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.MachineClassList{ListMeta: obj.(*v1alpha1.MachineClassList).ListMeta} - for _, item := range obj.(*v1alpha1.MachineClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested machineClasses. -func (c *FakeMachineClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(machineclassesResource, c.ns, opts)) - -} - -// Create takes the representation of a machineClass and creates it. Returns the server's representation of the machineClass, and an error, if there is any. -func (c *FakeMachineClasses) Create(machineClass *v1alpha1.MachineClass) (result *v1alpha1.MachineClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(machineclassesResource, c.ns, machineClass), &v1alpha1.MachineClass{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineClass), err -} - -// Update takes the representation of a machineClass and updates it. Returns the server's representation of the machineClass, and an error, if there is any. -func (c *FakeMachineClasses) Update(machineClass *v1alpha1.MachineClass) (result *v1alpha1.MachineClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(machineclassesResource, c.ns, machineClass), &v1alpha1.MachineClass{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineClass), err -} - -// Delete takes name of the machineClass and deletes it. Returns an error if one occurs. -func (c *FakeMachineClasses) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(machineclassesResource, c.ns, name), &v1alpha1.MachineClass{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMachineClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(machineclassesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.MachineClassList{}) - return err -} - -// Patch applies the patch and returns the patched machineClass. -func (c *FakeMachineClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(machineclassesResource, c.ns, name, data, subresources...), &v1alpha1.MachineClass{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineClass), err -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machinedeployment.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machinedeployment.go deleted file mode 100644 index 092c03c3bde4..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machinedeployment.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeMachineDeployments implements MachineDeploymentInterface -type FakeMachineDeployments struct { - Fake *FakeClusterV1alpha1 - ns string -} - -var machinedeploymentsResource = schema.GroupVersionResource{Group: "cluster.k8s.io", Version: "v1alpha1", Resource: "machinedeployments"} - -var machinedeploymentsKind = schema.GroupVersionKind{Group: "cluster.k8s.io", Version: "v1alpha1", Kind: "MachineDeployment"} - -// Get takes name of the machineDeployment, and returns the corresponding machineDeployment object, and an error if there is any. -func (c *FakeMachineDeployments) Get(name string, options v1.GetOptions) (result *v1alpha1.MachineDeployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(machinedeploymentsResource, c.ns, name), &v1alpha1.MachineDeployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineDeployment), err -} - -// List takes label and field selectors, and returns the list of MachineDeployments that match those selectors. -func (c *FakeMachineDeployments) List(opts v1.ListOptions) (result *v1alpha1.MachineDeploymentList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(machinedeploymentsResource, machinedeploymentsKind, c.ns, opts), &v1alpha1.MachineDeploymentList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.MachineDeploymentList{ListMeta: obj.(*v1alpha1.MachineDeploymentList).ListMeta} - for _, item := range obj.(*v1alpha1.MachineDeploymentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested machineDeployments. -func (c *FakeMachineDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(machinedeploymentsResource, c.ns, opts)) - -} - -// Create takes the representation of a machineDeployment and creates it. Returns the server's representation of the machineDeployment, and an error, if there is any. -func (c *FakeMachineDeployments) Create(machineDeployment *v1alpha1.MachineDeployment) (result *v1alpha1.MachineDeployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(machinedeploymentsResource, c.ns, machineDeployment), &v1alpha1.MachineDeployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineDeployment), err -} - -// Update takes the representation of a machineDeployment and updates it. Returns the server's representation of the machineDeployment, and an error, if there is any. -func (c *FakeMachineDeployments) Update(machineDeployment *v1alpha1.MachineDeployment) (result *v1alpha1.MachineDeployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(machinedeploymentsResource, c.ns, machineDeployment), &v1alpha1.MachineDeployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineDeployment), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeMachineDeployments) UpdateStatus(machineDeployment *v1alpha1.MachineDeployment) (*v1alpha1.MachineDeployment, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(machinedeploymentsResource, "status", c.ns, machineDeployment), &v1alpha1.MachineDeployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineDeployment), err -} - -// Delete takes name of the machineDeployment and deletes it. Returns an error if one occurs. -func (c *FakeMachineDeployments) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(machinedeploymentsResource, c.ns, name), &v1alpha1.MachineDeployment{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMachineDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(machinedeploymentsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.MachineDeploymentList{}) - return err -} - -// Patch applies the patch and returns the patched machineDeployment. -func (c *FakeMachineDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineDeployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(machinedeploymentsResource, c.ns, name, data, subresources...), &v1alpha1.MachineDeployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineDeployment), err -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machineset.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machineset.go deleted file mode 100644 index fbd9447fe9a0..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machineset.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeMachineSets implements MachineSetInterface -type FakeMachineSets struct { - Fake *FakeClusterV1alpha1 - ns string -} - -var machinesetsResource = schema.GroupVersionResource{Group: "cluster.k8s.io", Version: "v1alpha1", Resource: "machinesets"} - -var machinesetsKind = schema.GroupVersionKind{Group: "cluster.k8s.io", Version: "v1alpha1", Kind: "MachineSet"} - -// Get takes name of the machineSet, and returns the corresponding machineSet object, and an error if there is any. -func (c *FakeMachineSets) Get(name string, options v1.GetOptions) (result *v1alpha1.MachineSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(machinesetsResource, c.ns, name), &v1alpha1.MachineSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineSet), err -} - -// List takes label and field selectors, and returns the list of MachineSets that match those selectors. -func (c *FakeMachineSets) List(opts v1.ListOptions) (result *v1alpha1.MachineSetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(machinesetsResource, machinesetsKind, c.ns, opts), &v1alpha1.MachineSetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.MachineSetList{ListMeta: obj.(*v1alpha1.MachineSetList).ListMeta} - for _, item := range obj.(*v1alpha1.MachineSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested machineSets. -func (c *FakeMachineSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(machinesetsResource, c.ns, opts)) - -} - -// Create takes the representation of a machineSet and creates it. Returns the server's representation of the machineSet, and an error, if there is any. -func (c *FakeMachineSets) Create(machineSet *v1alpha1.MachineSet) (result *v1alpha1.MachineSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(machinesetsResource, c.ns, machineSet), &v1alpha1.MachineSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineSet), err -} - -// Update takes the representation of a machineSet and updates it. Returns the server's representation of the machineSet, and an error, if there is any. -func (c *FakeMachineSets) Update(machineSet *v1alpha1.MachineSet) (result *v1alpha1.MachineSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(machinesetsResource, c.ns, machineSet), &v1alpha1.MachineSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeMachineSets) UpdateStatus(machineSet *v1alpha1.MachineSet) (*v1alpha1.MachineSet, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(machinesetsResource, "status", c.ns, machineSet), &v1alpha1.MachineSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineSet), err -} - -// Delete takes name of the machineSet and deletes it. Returns an error if one occurs. -func (c *FakeMachineSets) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(machinesetsResource, c.ns, name), &v1alpha1.MachineSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMachineSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(machinesetsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.MachineSetList{}) - return err -} - -// Patch applies the patch and returns the patched machineSet. -func (c *FakeMachineSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(machinesetsResource, c.ns, name, data, subresources...), &v1alpha1.MachineSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineSet), err -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/generated_expansion.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/generated_expansion.go deleted file mode 100644 index 2bb55c714f03..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type ClusterExpansion interface{} - -type MachineExpansion interface{} - -type MachineClassExpansion interface{} - -type MachineDeploymentExpansion interface{} - -type MachineSetExpansion interface{} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machine.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machine.go deleted file mode 100644 index 4a3ada009a49..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machine.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - scheme "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// MachinesGetter has a method to return a MachineInterface. -// A group's client should implement this interface. -type MachinesGetter interface { - Machines(namespace string) MachineInterface -} - -// MachineInterface has methods to work with Machine resources. -type MachineInterface interface { - Create(*v1alpha1.Machine) (*v1alpha1.Machine, error) - Update(*v1alpha1.Machine) (*v1alpha1.Machine, error) - UpdateStatus(*v1alpha1.Machine) (*v1alpha1.Machine, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.Machine, error) - List(opts v1.ListOptions) (*v1alpha1.MachineList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Machine, err error) - MachineExpansion -} - -// machines implements MachineInterface -type machines struct { - client rest.Interface - ns string -} - -// newMachines returns a Machines -func newMachines(c *ClusterV1alpha1Client, namespace string) *machines { - return &machines{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the machine, and returns the corresponding machine object, and an error if there is any. -func (c *machines) Get(name string, options v1.GetOptions) (result *v1alpha1.Machine, err error) { - result = &v1alpha1.Machine{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machines"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Machines that match those selectors. -func (c *machines) List(opts v1.ListOptions) (result *v1alpha1.MachineList, err error) { - result = &v1alpha1.MachineList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machines"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested machines. -func (c *machines) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("machines"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a machine and creates it. Returns the server's representation of the machine, and an error, if there is any. -func (c *machines) Create(machine *v1alpha1.Machine) (result *v1alpha1.Machine, err error) { - result = &v1alpha1.Machine{} - err = c.client.Post(). - Namespace(c.ns). - Resource("machines"). - Body(machine). - Do(). - Into(result) - return -} - -// Update takes the representation of a machine and updates it. Returns the server's representation of the machine, and an error, if there is any. -func (c *machines) Update(machine *v1alpha1.Machine) (result *v1alpha1.Machine, err error) { - result = &v1alpha1.Machine{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machines"). - Name(machine.Name). - Body(machine). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *machines) UpdateStatus(machine *v1alpha1.Machine) (result *v1alpha1.Machine, err error) { - result = &v1alpha1.Machine{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machines"). - Name(machine.Name). - SubResource("status"). - Body(machine). - Do(). - Into(result) - return -} - -// Delete takes name of the machine and deletes it. Returns an error if one occurs. -func (c *machines) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machines"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *machines) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machines"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched machine. -func (c *machines) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Machine, err error) { - result = &v1alpha1.Machine{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("machines"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machineclass.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machineclass.go deleted file mode 100644 index 2d01222e8a9e..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machineclass.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - scheme "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// MachineClassesGetter has a method to return a MachineClassInterface. -// A group's client should implement this interface. -type MachineClassesGetter interface { - MachineClasses(namespace string) MachineClassInterface -} - -// MachineClassInterface has methods to work with MachineClass resources. -type MachineClassInterface interface { - Create(*v1alpha1.MachineClass) (*v1alpha1.MachineClass, error) - Update(*v1alpha1.MachineClass) (*v1alpha1.MachineClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.MachineClass, error) - List(opts v1.ListOptions) (*v1alpha1.MachineClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineClass, err error) - MachineClassExpansion -} - -// machineClasses implements MachineClassInterface -type machineClasses struct { - client rest.Interface - ns string -} - -// newMachineClasses returns a MachineClasses -func newMachineClasses(c *ClusterV1alpha1Client, namespace string) *machineClasses { - return &machineClasses{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the machineClass, and returns the corresponding machineClass object, and an error if there is any. -func (c *machineClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.MachineClass, err error) { - result = &v1alpha1.MachineClass{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machineclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MachineClasses that match those selectors. -func (c *machineClasses) List(opts v1.ListOptions) (result *v1alpha1.MachineClassList, err error) { - result = &v1alpha1.MachineClassList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machineclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested machineClasses. -func (c *machineClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("machineclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a machineClass and creates it. Returns the server's representation of the machineClass, and an error, if there is any. -func (c *machineClasses) Create(machineClass *v1alpha1.MachineClass) (result *v1alpha1.MachineClass, err error) { - result = &v1alpha1.MachineClass{} - err = c.client.Post(). - Namespace(c.ns). - Resource("machineclasses"). - Body(machineClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a machineClass and updates it. Returns the server's representation of the machineClass, and an error, if there is any. -func (c *machineClasses) Update(machineClass *v1alpha1.MachineClass) (result *v1alpha1.MachineClass, err error) { - result = &v1alpha1.MachineClass{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machineclasses"). - Name(machineClass.Name). - Body(machineClass). - Do(). - Into(result) - return -} - -// Delete takes name of the machineClass and deletes it. Returns an error if one occurs. -func (c *machineClasses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machineclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *machineClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machineclasses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched machineClass. -func (c *machineClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineClass, err error) { - result = &v1alpha1.MachineClass{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("machineclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machinedeployment.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machinedeployment.go deleted file mode 100644 index 9582b5bc692a..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machinedeployment.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - scheme "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// MachineDeploymentsGetter has a method to return a MachineDeploymentInterface. -// A group's client should implement this interface. -type MachineDeploymentsGetter interface { - MachineDeployments(namespace string) MachineDeploymentInterface -} - -// MachineDeploymentInterface has methods to work with MachineDeployment resources. -type MachineDeploymentInterface interface { - Create(*v1alpha1.MachineDeployment) (*v1alpha1.MachineDeployment, error) - Update(*v1alpha1.MachineDeployment) (*v1alpha1.MachineDeployment, error) - UpdateStatus(*v1alpha1.MachineDeployment) (*v1alpha1.MachineDeployment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.MachineDeployment, error) - List(opts v1.ListOptions) (*v1alpha1.MachineDeploymentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineDeployment, err error) - MachineDeploymentExpansion -} - -// machineDeployments implements MachineDeploymentInterface -type machineDeployments struct { - client rest.Interface - ns string -} - -// newMachineDeployments returns a MachineDeployments -func newMachineDeployments(c *ClusterV1alpha1Client, namespace string) *machineDeployments { - return &machineDeployments{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the machineDeployment, and returns the corresponding machineDeployment object, and an error if there is any. -func (c *machineDeployments) Get(name string, options v1.GetOptions) (result *v1alpha1.MachineDeployment, err error) { - result = &v1alpha1.MachineDeployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machinedeployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MachineDeployments that match those selectors. -func (c *machineDeployments) List(opts v1.ListOptions) (result *v1alpha1.MachineDeploymentList, err error) { - result = &v1alpha1.MachineDeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machinedeployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested machineDeployments. -func (c *machineDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("machinedeployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a machineDeployment and creates it. Returns the server's representation of the machineDeployment, and an error, if there is any. -func (c *machineDeployments) Create(machineDeployment *v1alpha1.MachineDeployment) (result *v1alpha1.MachineDeployment, err error) { - result = &v1alpha1.MachineDeployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("machinedeployments"). - Body(machineDeployment). - Do(). - Into(result) - return -} - -// Update takes the representation of a machineDeployment and updates it. Returns the server's representation of the machineDeployment, and an error, if there is any. -func (c *machineDeployments) Update(machineDeployment *v1alpha1.MachineDeployment) (result *v1alpha1.MachineDeployment, err error) { - result = &v1alpha1.MachineDeployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machinedeployments"). - Name(machineDeployment.Name). - Body(machineDeployment). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *machineDeployments) UpdateStatus(machineDeployment *v1alpha1.MachineDeployment) (result *v1alpha1.MachineDeployment, err error) { - result = &v1alpha1.MachineDeployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machinedeployments"). - Name(machineDeployment.Name). - SubResource("status"). - Body(machineDeployment). - Do(). - Into(result) - return -} - -// Delete takes name of the machineDeployment and deletes it. Returns an error if one occurs. -func (c *machineDeployments) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machinedeployments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *machineDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machinedeployments"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched machineDeployment. -func (c *machineDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineDeployment, err error) { - result = &v1alpha1.MachineDeployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("machinedeployments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machineset.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machineset.go deleted file mode 100644 index 42dee1177572..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machineset.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - scheme "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// MachineSetsGetter has a method to return a MachineSetInterface. -// A group's client should implement this interface. -type MachineSetsGetter interface { - MachineSets(namespace string) MachineSetInterface -} - -// MachineSetInterface has methods to work with MachineSet resources. -type MachineSetInterface interface { - Create(*v1alpha1.MachineSet) (*v1alpha1.MachineSet, error) - Update(*v1alpha1.MachineSet) (*v1alpha1.MachineSet, error) - UpdateStatus(*v1alpha1.MachineSet) (*v1alpha1.MachineSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.MachineSet, error) - List(opts v1.ListOptions) (*v1alpha1.MachineSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineSet, err error) - MachineSetExpansion -} - -// machineSets implements MachineSetInterface -type machineSets struct { - client rest.Interface - ns string -} - -// newMachineSets returns a MachineSets -func newMachineSets(c *ClusterV1alpha1Client, namespace string) *machineSets { - return &machineSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the machineSet, and returns the corresponding machineSet object, and an error if there is any. -func (c *machineSets) Get(name string, options v1.GetOptions) (result *v1alpha1.MachineSet, err error) { - result = &v1alpha1.MachineSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machinesets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MachineSets that match those selectors. -func (c *machineSets) List(opts v1.ListOptions) (result *v1alpha1.MachineSetList, err error) { - result = &v1alpha1.MachineSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machinesets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested machineSets. -func (c *machineSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("machinesets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a machineSet and creates it. Returns the server's representation of the machineSet, and an error, if there is any. -func (c *machineSets) Create(machineSet *v1alpha1.MachineSet) (result *v1alpha1.MachineSet, err error) { - result = &v1alpha1.MachineSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("machinesets"). - Body(machineSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a machineSet and updates it. Returns the server's representation of the machineSet, and an error, if there is any. -func (c *machineSets) Update(machineSet *v1alpha1.MachineSet) (result *v1alpha1.MachineSet, err error) { - result = &v1alpha1.MachineSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machinesets"). - Name(machineSet.Name). - Body(machineSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *machineSets) UpdateStatus(machineSet *v1alpha1.MachineSet) (result *v1alpha1.MachineSet, err error) { - result = &v1alpha1.MachineSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machinesets"). - Name(machineSet.Name). - SubResource("status"). - Body(machineSet). - Do(). - Into(result) - return -} - -// Delete takes name of the machineSet and deletes it. Returns an error if one occurs. -func (c *machineSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machinesets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *machineSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machinesets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched machineSet. -func (c *machineSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineSet, err error) { - result = &v1alpha1.MachineSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("machinesets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/BUILD.bazel deleted file mode 100644 index be7c3feb1d45..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "doc.go", - "generated_expansion.go", - "machine.go", - "machine_client.go", - "machineclass.go", - "machinedeployment.go", - "machineset.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/machine/v1beta1:go_default_library", - "//pkg/client/clientset_generated/clientset/scheme:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/cluster.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/cluster.go deleted file mode 100644 index b996ae44ad52..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/cluster.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - scheme "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// ClustersGetter has a method to return a ClusterInterface. -// A group's client should implement this interface. -type ClustersGetter interface { - Clusters(namespace string) ClusterInterface -} - -// ClusterInterface has methods to work with Cluster resources. -type ClusterInterface interface { - Create(*v1beta1.Cluster) (*v1beta1.Cluster, error) - Update(*v1beta1.Cluster) (*v1beta1.Cluster, error) - UpdateStatus(*v1beta1.Cluster) (*v1beta1.Cluster, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Cluster, error) - List(opts v1.ListOptions) (*v1beta1.ClusterList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Cluster, err error) - ClusterExpansion -} - -// clusters implements ClusterInterface -type clusters struct { - client rest.Interface - ns string -} - -// newClusters returns a Clusters -func newClusters(c *MachineV1beta1Client, namespace string) *clusters { - return &clusters{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. -func (c *clusters) Get(name string, options v1.GetOptions) (result *v1beta1.Cluster, err error) { - result = &v1beta1.Cluster{} - err = c.client.Get(). - Namespace(c.ns). - Resource("clusters"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Clusters that match those selectors. -func (c *clusters) List(opts v1.ListOptions) (result *v1beta1.ClusterList, err error) { - result = &v1beta1.ClusterList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("clusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusters. -func (c *clusters) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("clusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *clusters) Create(cluster *v1beta1.Cluster) (result *v1beta1.Cluster, err error) { - result = &v1beta1.Cluster{} - err = c.client.Post(). - Namespace(c.ns). - Resource("clusters"). - Body(cluster). - Do(). - Into(result) - return -} - -// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *clusters) Update(cluster *v1beta1.Cluster) (result *v1beta1.Cluster, err error) { - result = &v1beta1.Cluster{} - err = c.client.Put(). - Namespace(c.ns). - Resource("clusters"). - Name(cluster.Name). - Body(cluster). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *clusters) UpdateStatus(cluster *v1beta1.Cluster) (result *v1beta1.Cluster, err error) { - result = &v1beta1.Cluster{} - err = c.client.Put(). - Namespace(c.ns). - Resource("clusters"). - Name(cluster.Name). - SubResource("status"). - Body(cluster). - Do(). - Into(result) - return -} - -// Delete takes name of the cluster and deletes it. Returns an error if one occurs. -func (c *clusters) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("clusters"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusters) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("clusters"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched cluster. -func (c *clusters) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Cluster, err error) { - result = &v1beta1.Cluster{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("clusters"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/doc.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/doc.go deleted file mode 100644 index 11ae7049d165..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/BUILD.bazel deleted file mode 100644 index 6fcca0ee8508..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake_cluster.go", - "fake_machine.go", - "fake_machine_client.go", - "fake_machineclass.go", - "fake_machinedeployment.go", - "fake_machineset.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/machine/v1beta1:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/machine/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/testing:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/doc.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/doc.go deleted file mode 100644 index 87a1873edccb..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_cluster.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_cluster.go deleted file mode 100644 index 6e6087777ec8..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_cluster.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeClusters implements ClusterInterface -type FakeClusters struct { - Fake *FakeMachineV1beta1 - ns string -} - -var clustersResource = schema.GroupVersionResource{Group: "machine.openshift.io", Version: "v1beta1", Resource: "clusters"} - -var clustersKind = schema.GroupVersionKind{Group: "machine.openshift.io", Version: "v1beta1", Kind: "Cluster"} - -// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. -func (c *FakeClusters) Get(name string, options v1.GetOptions) (result *v1beta1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(clustersResource, c.ns, name), &v1beta1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Cluster), err -} - -// List takes label and field selectors, and returns the list of Clusters that match those selectors. -func (c *FakeClusters) List(opts v1.ListOptions) (result *v1beta1.ClusterList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(clustersResource, clustersKind, c.ns, opts), &v1beta1.ClusterList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ClusterList{ListMeta: obj.(*v1beta1.ClusterList).ListMeta} - for _, item := range obj.(*v1beta1.ClusterList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusters. -func (c *FakeClusters) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(clustersResource, c.ns, opts)) - -} - -// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *FakeClusters) Create(cluster *v1beta1.Cluster) (result *v1beta1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(clustersResource, c.ns, cluster), &v1beta1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Cluster), err -} - -// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *FakeClusters) Update(cluster *v1beta1.Cluster) (result *v1beta1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(clustersResource, c.ns, cluster), &v1beta1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Cluster), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeClusters) UpdateStatus(cluster *v1beta1.Cluster) (*v1beta1.Cluster, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(clustersResource, "status", c.ns, cluster), &v1beta1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Cluster), err -} - -// Delete takes name of the cluster and deletes it. Returns an error if one occurs. -func (c *FakeClusters) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(clustersResource, c.ns, name), &v1beta1.Cluster{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusters) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(clustersResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.ClusterList{}) - return err -} - -// Patch applies the patch and returns the patched cluster. -func (c *FakeClusters) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(clustersResource, c.ns, name, data, subresources...), &v1beta1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Cluster), err -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_machine.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_machine.go deleted file mode 100644 index 8f686996291f..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_machine.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeMachines implements MachineInterface -type FakeMachines struct { - Fake *FakeMachineV1beta1 - ns string -} - -var machinesResource = schema.GroupVersionResource{Group: "machine.openshift.io", Version: "v1beta1", Resource: "machines"} - -var machinesKind = schema.GroupVersionKind{Group: "machine.openshift.io", Version: "v1beta1", Kind: "Machine"} - -// Get takes name of the machine, and returns the corresponding machine object, and an error if there is any. -func (c *FakeMachines) Get(name string, options v1.GetOptions) (result *v1beta1.Machine, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(machinesResource, c.ns, name), &v1beta1.Machine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Machine), err -} - -// List takes label and field selectors, and returns the list of Machines that match those selectors. -func (c *FakeMachines) List(opts v1.ListOptions) (result *v1beta1.MachineList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(machinesResource, machinesKind, c.ns, opts), &v1beta1.MachineList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.MachineList{ListMeta: obj.(*v1beta1.MachineList).ListMeta} - for _, item := range obj.(*v1beta1.MachineList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested machines. -func (c *FakeMachines) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(machinesResource, c.ns, opts)) - -} - -// Create takes the representation of a machine and creates it. Returns the server's representation of the machine, and an error, if there is any. -func (c *FakeMachines) Create(machine *v1beta1.Machine) (result *v1beta1.Machine, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(machinesResource, c.ns, machine), &v1beta1.Machine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Machine), err -} - -// Update takes the representation of a machine and updates it. Returns the server's representation of the machine, and an error, if there is any. -func (c *FakeMachines) Update(machine *v1beta1.Machine) (result *v1beta1.Machine, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(machinesResource, c.ns, machine), &v1beta1.Machine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Machine), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeMachines) UpdateStatus(machine *v1beta1.Machine) (*v1beta1.Machine, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(machinesResource, "status", c.ns, machine), &v1beta1.Machine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Machine), err -} - -// Delete takes name of the machine and deletes it. Returns an error if one occurs. -func (c *FakeMachines) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(machinesResource, c.ns, name), &v1beta1.Machine{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMachines) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(machinesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.MachineList{}) - return err -} - -// Patch applies the patch and returns the patched machine. -func (c *FakeMachines) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Machine, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(machinesResource, c.ns, name, data, subresources...), &v1beta1.Machine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Machine), err -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_machine_client.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_machine_client.go deleted file mode 100644 index c94b1ce47e08..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_machine_client.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeMachineV1beta1 struct { - *testing.Fake -} - -func (c *FakeMachineV1beta1) Clusters(namespace string) v1beta1.ClusterInterface { - return &FakeClusters{c, namespace} -} - -func (c *FakeMachineV1beta1) Machines(namespace string) v1beta1.MachineInterface { - return &FakeMachines{c, namespace} -} - -func (c *FakeMachineV1beta1) MachineClasses(namespace string) v1beta1.MachineClassInterface { - return &FakeMachineClasses{c, namespace} -} - -func (c *FakeMachineV1beta1) MachineDeployments(namespace string) v1beta1.MachineDeploymentInterface { - return &FakeMachineDeployments{c, namespace} -} - -func (c *FakeMachineV1beta1) MachineSets(namespace string) v1beta1.MachineSetInterface { - return &FakeMachineSets{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeMachineV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_machineclass.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_machineclass.go deleted file mode 100644 index 018e6d24c9db..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_machineclass.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeMachineClasses implements MachineClassInterface -type FakeMachineClasses struct { - Fake *FakeMachineV1beta1 - ns string -} - -var machineclassesResource = schema.GroupVersionResource{Group: "machine.openshift.io", Version: "v1beta1", Resource: "machineclasses"} - -var machineclassesKind = schema.GroupVersionKind{Group: "machine.openshift.io", Version: "v1beta1", Kind: "MachineClass"} - -// Get takes name of the machineClass, and returns the corresponding machineClass object, and an error if there is any. -func (c *FakeMachineClasses) Get(name string, options v1.GetOptions) (result *v1beta1.MachineClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(machineclassesResource, c.ns, name), &v1beta1.MachineClass{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MachineClass), err -} - -// List takes label and field selectors, and returns the list of MachineClasses that match those selectors. -func (c *FakeMachineClasses) List(opts v1.ListOptions) (result *v1beta1.MachineClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(machineclassesResource, machineclassesKind, c.ns, opts), &v1beta1.MachineClassList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.MachineClassList{ListMeta: obj.(*v1beta1.MachineClassList).ListMeta} - for _, item := range obj.(*v1beta1.MachineClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested machineClasses. -func (c *FakeMachineClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(machineclassesResource, c.ns, opts)) - -} - -// Create takes the representation of a machineClass and creates it. Returns the server's representation of the machineClass, and an error, if there is any. -func (c *FakeMachineClasses) Create(machineClass *v1beta1.MachineClass) (result *v1beta1.MachineClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(machineclassesResource, c.ns, machineClass), &v1beta1.MachineClass{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MachineClass), err -} - -// Update takes the representation of a machineClass and updates it. Returns the server's representation of the machineClass, and an error, if there is any. -func (c *FakeMachineClasses) Update(machineClass *v1beta1.MachineClass) (result *v1beta1.MachineClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(machineclassesResource, c.ns, machineClass), &v1beta1.MachineClass{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MachineClass), err -} - -// Delete takes name of the machineClass and deletes it. Returns an error if one occurs. -func (c *FakeMachineClasses) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(machineclassesResource, c.ns, name), &v1beta1.MachineClass{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMachineClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(machineclassesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.MachineClassList{}) - return err -} - -// Patch applies the patch and returns the patched machineClass. -func (c *FakeMachineClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MachineClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(machineclassesResource, c.ns, name, data, subresources...), &v1beta1.MachineClass{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MachineClass), err -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_machinedeployment.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_machinedeployment.go deleted file mode 100644 index 0cd408f6d9c2..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_machinedeployment.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeMachineDeployments implements MachineDeploymentInterface -type FakeMachineDeployments struct { - Fake *FakeMachineV1beta1 - ns string -} - -var machinedeploymentsResource = schema.GroupVersionResource{Group: "machine.openshift.io", Version: "v1beta1", Resource: "machinedeployments"} - -var machinedeploymentsKind = schema.GroupVersionKind{Group: "machine.openshift.io", Version: "v1beta1", Kind: "MachineDeployment"} - -// Get takes name of the machineDeployment, and returns the corresponding machineDeployment object, and an error if there is any. -func (c *FakeMachineDeployments) Get(name string, options v1.GetOptions) (result *v1beta1.MachineDeployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(machinedeploymentsResource, c.ns, name), &v1beta1.MachineDeployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MachineDeployment), err -} - -// List takes label and field selectors, and returns the list of MachineDeployments that match those selectors. -func (c *FakeMachineDeployments) List(opts v1.ListOptions) (result *v1beta1.MachineDeploymentList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(machinedeploymentsResource, machinedeploymentsKind, c.ns, opts), &v1beta1.MachineDeploymentList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.MachineDeploymentList{ListMeta: obj.(*v1beta1.MachineDeploymentList).ListMeta} - for _, item := range obj.(*v1beta1.MachineDeploymentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested machineDeployments. -func (c *FakeMachineDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(machinedeploymentsResource, c.ns, opts)) - -} - -// Create takes the representation of a machineDeployment and creates it. Returns the server's representation of the machineDeployment, and an error, if there is any. -func (c *FakeMachineDeployments) Create(machineDeployment *v1beta1.MachineDeployment) (result *v1beta1.MachineDeployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(machinedeploymentsResource, c.ns, machineDeployment), &v1beta1.MachineDeployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MachineDeployment), err -} - -// Update takes the representation of a machineDeployment and updates it. Returns the server's representation of the machineDeployment, and an error, if there is any. -func (c *FakeMachineDeployments) Update(machineDeployment *v1beta1.MachineDeployment) (result *v1beta1.MachineDeployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(machinedeploymentsResource, c.ns, machineDeployment), &v1beta1.MachineDeployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MachineDeployment), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeMachineDeployments) UpdateStatus(machineDeployment *v1beta1.MachineDeployment) (*v1beta1.MachineDeployment, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(machinedeploymentsResource, "status", c.ns, machineDeployment), &v1beta1.MachineDeployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MachineDeployment), err -} - -// Delete takes name of the machineDeployment and deletes it. Returns an error if one occurs. -func (c *FakeMachineDeployments) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(machinedeploymentsResource, c.ns, name), &v1beta1.MachineDeployment{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMachineDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(machinedeploymentsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.MachineDeploymentList{}) - return err -} - -// Patch applies the patch and returns the patched machineDeployment. -func (c *FakeMachineDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MachineDeployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(machinedeploymentsResource, c.ns, name, data, subresources...), &v1beta1.MachineDeployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MachineDeployment), err -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_machineset.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_machineset.go deleted file mode 100644 index 2bcf0b380091..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/fake/fake_machineset.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeMachineSets implements MachineSetInterface -type FakeMachineSets struct { - Fake *FakeMachineV1beta1 - ns string -} - -var machinesetsResource = schema.GroupVersionResource{Group: "machine.openshift.io", Version: "v1beta1", Resource: "machinesets"} - -var machinesetsKind = schema.GroupVersionKind{Group: "machine.openshift.io", Version: "v1beta1", Kind: "MachineSet"} - -// Get takes name of the machineSet, and returns the corresponding machineSet object, and an error if there is any. -func (c *FakeMachineSets) Get(name string, options v1.GetOptions) (result *v1beta1.MachineSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(machinesetsResource, c.ns, name), &v1beta1.MachineSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MachineSet), err -} - -// List takes label and field selectors, and returns the list of MachineSets that match those selectors. -func (c *FakeMachineSets) List(opts v1.ListOptions) (result *v1beta1.MachineSetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(machinesetsResource, machinesetsKind, c.ns, opts), &v1beta1.MachineSetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.MachineSetList{ListMeta: obj.(*v1beta1.MachineSetList).ListMeta} - for _, item := range obj.(*v1beta1.MachineSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested machineSets. -func (c *FakeMachineSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(machinesetsResource, c.ns, opts)) - -} - -// Create takes the representation of a machineSet and creates it. Returns the server's representation of the machineSet, and an error, if there is any. -func (c *FakeMachineSets) Create(machineSet *v1beta1.MachineSet) (result *v1beta1.MachineSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(machinesetsResource, c.ns, machineSet), &v1beta1.MachineSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MachineSet), err -} - -// Update takes the representation of a machineSet and updates it. Returns the server's representation of the machineSet, and an error, if there is any. -func (c *FakeMachineSets) Update(machineSet *v1beta1.MachineSet) (result *v1beta1.MachineSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(machinesetsResource, c.ns, machineSet), &v1beta1.MachineSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MachineSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeMachineSets) UpdateStatus(machineSet *v1beta1.MachineSet) (*v1beta1.MachineSet, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(machinesetsResource, "status", c.ns, machineSet), &v1beta1.MachineSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MachineSet), err -} - -// Delete takes name of the machineSet and deletes it. Returns an error if one occurs. -func (c *FakeMachineSets) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(machinesetsResource, c.ns, name), &v1beta1.MachineSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMachineSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(machinesetsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.MachineSetList{}) - return err -} - -// Patch applies the patch and returns the patched machineSet. -func (c *FakeMachineSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MachineSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(machinesetsResource, c.ns, name, data, subresources...), &v1beta1.MachineSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MachineSet), err -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/generated_expansion.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/generated_expansion.go deleted file mode 100644 index e4cc01446439..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/generated_expansion.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type ClusterExpansion interface{} - -type MachineExpansion interface{} - -type MachineClassExpansion interface{} - -type MachineDeploymentExpansion interface{} - -type MachineSetExpansion interface{} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/machine.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/machine.go deleted file mode 100644 index c3f02e9542d2..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/machine.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - scheme "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// MachinesGetter has a method to return a MachineInterface. -// A group's client should implement this interface. -type MachinesGetter interface { - Machines(namespace string) MachineInterface -} - -// MachineInterface has methods to work with Machine resources. -type MachineInterface interface { - Create(*v1beta1.Machine) (*v1beta1.Machine, error) - Update(*v1beta1.Machine) (*v1beta1.Machine, error) - UpdateStatus(*v1beta1.Machine) (*v1beta1.Machine, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Machine, error) - List(opts v1.ListOptions) (*v1beta1.MachineList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Machine, err error) - MachineExpansion -} - -// machines implements MachineInterface -type machines struct { - client rest.Interface - ns string -} - -// newMachines returns a Machines -func newMachines(c *MachineV1beta1Client, namespace string) *machines { - return &machines{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the machine, and returns the corresponding machine object, and an error if there is any. -func (c *machines) Get(name string, options v1.GetOptions) (result *v1beta1.Machine, err error) { - result = &v1beta1.Machine{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machines"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Machines that match those selectors. -func (c *machines) List(opts v1.ListOptions) (result *v1beta1.MachineList, err error) { - result = &v1beta1.MachineList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machines"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested machines. -func (c *machines) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("machines"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a machine and creates it. Returns the server's representation of the machine, and an error, if there is any. -func (c *machines) Create(machine *v1beta1.Machine) (result *v1beta1.Machine, err error) { - result = &v1beta1.Machine{} - err = c.client.Post(). - Namespace(c.ns). - Resource("machines"). - Body(machine). - Do(). - Into(result) - return -} - -// Update takes the representation of a machine and updates it. Returns the server's representation of the machine, and an error, if there is any. -func (c *machines) Update(machine *v1beta1.Machine) (result *v1beta1.Machine, err error) { - result = &v1beta1.Machine{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machines"). - Name(machine.Name). - Body(machine). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *machines) UpdateStatus(machine *v1beta1.Machine) (result *v1beta1.Machine, err error) { - result = &v1beta1.Machine{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machines"). - Name(machine.Name). - SubResource("status"). - Body(machine). - Do(). - Into(result) - return -} - -// Delete takes name of the machine and deletes it. Returns an error if one occurs. -func (c *machines) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machines"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *machines) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machines"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched machine. -func (c *machines) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Machine, err error) { - result = &v1beta1.Machine{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("machines"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/machine_client.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/machine_client.go deleted file mode 100644 index 93e84915757a..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/machine_client.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - rest "k8s.io/client-go/rest" -) - -type MachineV1beta1Interface interface { - RESTClient() rest.Interface - ClustersGetter - MachinesGetter - MachineClassesGetter - MachineDeploymentsGetter - MachineSetsGetter -} - -// MachineV1beta1Client is used to interact with features provided by the machine.openshift.io group. -type MachineV1beta1Client struct { - restClient rest.Interface -} - -func (c *MachineV1beta1Client) Clusters(namespace string) ClusterInterface { - return newClusters(c, namespace) -} - -func (c *MachineV1beta1Client) Machines(namespace string) MachineInterface { - return newMachines(c, namespace) -} - -func (c *MachineV1beta1Client) MachineClasses(namespace string) MachineClassInterface { - return newMachineClasses(c, namespace) -} - -func (c *MachineV1beta1Client) MachineDeployments(namespace string) MachineDeploymentInterface { - return newMachineDeployments(c, namespace) -} - -func (c *MachineV1beta1Client) MachineSets(namespace string) MachineSetInterface { - return newMachineSets(c, namespace) -} - -// NewForConfig creates a new MachineV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*MachineV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &MachineV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new MachineV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *MachineV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new MachineV1beta1Client for the given RESTClient. -func New(c rest.Interface) *MachineV1beta1Client { - return &MachineV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *MachineV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/machineclass.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/machineclass.go deleted file mode 100644 index 7bee80f3c0d4..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/machineclass.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - scheme "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// MachineClassesGetter has a method to return a MachineClassInterface. -// A group's client should implement this interface. -type MachineClassesGetter interface { - MachineClasses(namespace string) MachineClassInterface -} - -// MachineClassInterface has methods to work with MachineClass resources. -type MachineClassInterface interface { - Create(*v1beta1.MachineClass) (*v1beta1.MachineClass, error) - Update(*v1beta1.MachineClass) (*v1beta1.MachineClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.MachineClass, error) - List(opts v1.ListOptions) (*v1beta1.MachineClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MachineClass, err error) - MachineClassExpansion -} - -// machineClasses implements MachineClassInterface -type machineClasses struct { - client rest.Interface - ns string -} - -// newMachineClasses returns a MachineClasses -func newMachineClasses(c *MachineV1beta1Client, namespace string) *machineClasses { - return &machineClasses{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the machineClass, and returns the corresponding machineClass object, and an error if there is any. -func (c *machineClasses) Get(name string, options v1.GetOptions) (result *v1beta1.MachineClass, err error) { - result = &v1beta1.MachineClass{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machineclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MachineClasses that match those selectors. -func (c *machineClasses) List(opts v1.ListOptions) (result *v1beta1.MachineClassList, err error) { - result = &v1beta1.MachineClassList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machineclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested machineClasses. -func (c *machineClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("machineclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a machineClass and creates it. Returns the server's representation of the machineClass, and an error, if there is any. -func (c *machineClasses) Create(machineClass *v1beta1.MachineClass) (result *v1beta1.MachineClass, err error) { - result = &v1beta1.MachineClass{} - err = c.client.Post(). - Namespace(c.ns). - Resource("machineclasses"). - Body(machineClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a machineClass and updates it. Returns the server's representation of the machineClass, and an error, if there is any. -func (c *machineClasses) Update(machineClass *v1beta1.MachineClass) (result *v1beta1.MachineClass, err error) { - result = &v1beta1.MachineClass{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machineclasses"). - Name(machineClass.Name). - Body(machineClass). - Do(). - Into(result) - return -} - -// Delete takes name of the machineClass and deletes it. Returns an error if one occurs. -func (c *machineClasses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machineclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *machineClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machineclasses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched machineClass. -func (c *machineClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MachineClass, err error) { - result = &v1beta1.MachineClass{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("machineclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/machinedeployment.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/machinedeployment.go deleted file mode 100644 index 0d5550c18b12..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/machinedeployment.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - scheme "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// MachineDeploymentsGetter has a method to return a MachineDeploymentInterface. -// A group's client should implement this interface. -type MachineDeploymentsGetter interface { - MachineDeployments(namespace string) MachineDeploymentInterface -} - -// MachineDeploymentInterface has methods to work with MachineDeployment resources. -type MachineDeploymentInterface interface { - Create(*v1beta1.MachineDeployment) (*v1beta1.MachineDeployment, error) - Update(*v1beta1.MachineDeployment) (*v1beta1.MachineDeployment, error) - UpdateStatus(*v1beta1.MachineDeployment) (*v1beta1.MachineDeployment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.MachineDeployment, error) - List(opts v1.ListOptions) (*v1beta1.MachineDeploymentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MachineDeployment, err error) - MachineDeploymentExpansion -} - -// machineDeployments implements MachineDeploymentInterface -type machineDeployments struct { - client rest.Interface - ns string -} - -// newMachineDeployments returns a MachineDeployments -func newMachineDeployments(c *MachineV1beta1Client, namespace string) *machineDeployments { - return &machineDeployments{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the machineDeployment, and returns the corresponding machineDeployment object, and an error if there is any. -func (c *machineDeployments) Get(name string, options v1.GetOptions) (result *v1beta1.MachineDeployment, err error) { - result = &v1beta1.MachineDeployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machinedeployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MachineDeployments that match those selectors. -func (c *machineDeployments) List(opts v1.ListOptions) (result *v1beta1.MachineDeploymentList, err error) { - result = &v1beta1.MachineDeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machinedeployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested machineDeployments. -func (c *machineDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("machinedeployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a machineDeployment and creates it. Returns the server's representation of the machineDeployment, and an error, if there is any. -func (c *machineDeployments) Create(machineDeployment *v1beta1.MachineDeployment) (result *v1beta1.MachineDeployment, err error) { - result = &v1beta1.MachineDeployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("machinedeployments"). - Body(machineDeployment). - Do(). - Into(result) - return -} - -// Update takes the representation of a machineDeployment and updates it. Returns the server's representation of the machineDeployment, and an error, if there is any. -func (c *machineDeployments) Update(machineDeployment *v1beta1.MachineDeployment) (result *v1beta1.MachineDeployment, err error) { - result = &v1beta1.MachineDeployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machinedeployments"). - Name(machineDeployment.Name). - Body(machineDeployment). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *machineDeployments) UpdateStatus(machineDeployment *v1beta1.MachineDeployment) (result *v1beta1.MachineDeployment, err error) { - result = &v1beta1.MachineDeployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machinedeployments"). - Name(machineDeployment.Name). - SubResource("status"). - Body(machineDeployment). - Do(). - Into(result) - return -} - -// Delete takes name of the machineDeployment and deletes it. Returns an error if one occurs. -func (c *machineDeployments) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machinedeployments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *machineDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machinedeployments"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched machineDeployment. -func (c *machineDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MachineDeployment, err error) { - result = &v1beta1.MachineDeployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("machinedeployments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/machineset.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/machineset.go deleted file mode 100644 index 73d377629f00..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/typed/machine/v1beta1/machineset.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - scheme "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// MachineSetsGetter has a method to return a MachineSetInterface. -// A group's client should implement this interface. -type MachineSetsGetter interface { - MachineSets(namespace string) MachineSetInterface -} - -// MachineSetInterface has methods to work with MachineSet resources. -type MachineSetInterface interface { - Create(*v1beta1.MachineSet) (*v1beta1.MachineSet, error) - Update(*v1beta1.MachineSet) (*v1beta1.MachineSet, error) - UpdateStatus(*v1beta1.MachineSet) (*v1beta1.MachineSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.MachineSet, error) - List(opts v1.ListOptions) (*v1beta1.MachineSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MachineSet, err error) - MachineSetExpansion -} - -// machineSets implements MachineSetInterface -type machineSets struct { - client rest.Interface - ns string -} - -// newMachineSets returns a MachineSets -func newMachineSets(c *MachineV1beta1Client, namespace string) *machineSets { - return &machineSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the machineSet, and returns the corresponding machineSet object, and an error if there is any. -func (c *machineSets) Get(name string, options v1.GetOptions) (result *v1beta1.MachineSet, err error) { - result = &v1beta1.MachineSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machinesets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MachineSets that match those selectors. -func (c *machineSets) List(opts v1.ListOptions) (result *v1beta1.MachineSetList, err error) { - result = &v1beta1.MachineSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machinesets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested machineSets. -func (c *machineSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("machinesets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a machineSet and creates it. Returns the server's representation of the machineSet, and an error, if there is any. -func (c *machineSets) Create(machineSet *v1beta1.MachineSet) (result *v1beta1.MachineSet, err error) { - result = &v1beta1.MachineSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("machinesets"). - Body(machineSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a machineSet and updates it. Returns the server's representation of the machineSet, and an error, if there is any. -func (c *machineSets) Update(machineSet *v1beta1.MachineSet) (result *v1beta1.MachineSet, err error) { - result = &v1beta1.MachineSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machinesets"). - Name(machineSet.Name). - Body(machineSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *machineSets) UpdateStatus(machineSet *v1beta1.MachineSet) (result *v1beta1.MachineSet, err error) { - result = &v1beta1.MachineSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machinesets"). - Name(machineSet.Name). - SubResource("status"). - Body(machineSet). - Do(). - Into(result) - return -} - -// Delete takes name of the machineSet and deletes it. Returns an error if one occurs. -func (c *machineSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machinesets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *machineSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machinesets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched machineSet. -func (c *machineSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MachineSet, err error) { - result = &v1beta1.MachineSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("machinesets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/BUILD.bazel deleted file mode 100644 index 60a55cb9f642..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "factory.go", - "generic.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/apis/machine/v1beta1:go_default_library", - "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/informers_generated/externalversions/cluster:go_default_library", - "//pkg/client/informers_generated/externalversions/internalinterfaces:go_default_library", - "//pkg/client/informers_generated/externalversions/machine:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/BUILD.bazel deleted file mode 100644 index 1d77735c7520..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importpath = "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster", - visibility = ["//visibility:public"], - deps = [ - "//pkg/client/informers_generated/externalversions/cluster/v1alpha1:go_default_library", - "//pkg/client/informers_generated/externalversions/internalinterfaces:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/interface.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/interface.go deleted file mode 100644 index f133f8ea32bc..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/interface.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package cluster - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1" - internalinterfaces "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" -) - -// Interface provides access to each of this group's versions. -type Interface interface { - // V1alpha1 provides access to shared informers for resources in V1alpha1. - V1alpha1() v1alpha1.Interface -} - -type group struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// V1alpha1 returns a new v1alpha1.Interface. -func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/BUILD.bazel deleted file mode 100644 index 60d106d83238..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "interface.go", - "machine.go", - "machineclass.go", - "machinedeployment.go", - "machineset.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/informers_generated/externalversions/internalinterfaces:go_default_library", - "//pkg/client/listers_generated/cluster/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/cluster.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/cluster.go deleted file mode 100644 index 4a5e1ec53b45..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/cluster.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - time "time" - - clusterv1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - clientset "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset" - internalinterfaces "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1alpha1 "github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// ClusterInformer provides access to a shared informer and lister for -// Clusters. -type ClusterInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.ClusterLister -} - -type clusterInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewClusterInformer constructs a new informer for Cluster type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewClusterInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredClusterInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredClusterInformer constructs a new informer for Cluster type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredClusterInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().Clusters(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().Clusters(namespace).Watch(options) - }, - }, - &clusterv1alpha1.Cluster{}, - resyncPeriod, - indexers, - ) -} - -func (f *clusterInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredClusterInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *clusterInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&clusterv1alpha1.Cluster{}, f.defaultInformer) -} - -func (f *clusterInformer) Lister() v1alpha1.ClusterLister { - return v1alpha1.NewClusterLister(f.Informer().GetIndexer()) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/interface.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/interface.go deleted file mode 100644 index 68b08874b6aa..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/interface.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - internalinterfaces "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // Clusters returns a ClusterInformer. - Clusters() ClusterInformer - // Machines returns a MachineInformer. - Machines() MachineInformer - // MachineClasses returns a MachineClassInformer. - MachineClasses() MachineClassInformer - // MachineDeployments returns a MachineDeploymentInformer. - MachineDeployments() MachineDeploymentInformer - // MachineSets returns a MachineSetInformer. - MachineSets() MachineSetInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// Clusters returns a ClusterInformer. -func (v *version) Clusters() ClusterInformer { - return &clusterInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// Machines returns a MachineInformer. -func (v *version) Machines() MachineInformer { - return &machineInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// MachineClasses returns a MachineClassInformer. -func (v *version) MachineClasses() MachineClassInformer { - return &machineClassInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// MachineDeployments returns a MachineDeploymentInformer. -func (v *version) MachineDeployments() MachineDeploymentInformer { - return &machineDeploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// MachineSets returns a MachineSetInformer. -func (v *version) MachineSets() MachineSetInformer { - return &machineSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machine.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machine.go deleted file mode 100644 index 06063ebe21f8..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machine.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - time "time" - - clusterv1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - clientset "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset" - internalinterfaces "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1alpha1 "github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// MachineInformer provides access to a shared informer and lister for -// Machines. -type MachineInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.MachineLister -} - -type machineInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewMachineInformer constructs a new informer for Machine type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewMachineInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredMachineInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredMachineInformer constructs a new informer for Machine type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredMachineInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().Machines(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().Machines(namespace).Watch(options) - }, - }, - &clusterv1alpha1.Machine{}, - resyncPeriod, - indexers, - ) -} - -func (f *machineInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredMachineInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *machineInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&clusterv1alpha1.Machine{}, f.defaultInformer) -} - -func (f *machineInformer) Lister() v1alpha1.MachineLister { - return v1alpha1.NewMachineLister(f.Informer().GetIndexer()) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machineclass.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machineclass.go deleted file mode 100644 index 6db4475b0624..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machineclass.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - time "time" - - clusterv1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - clientset "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset" - internalinterfaces "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1alpha1 "github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// MachineClassInformer provides access to a shared informer and lister for -// MachineClasses. -type MachineClassInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.MachineClassLister -} - -type machineClassInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewMachineClassInformer constructs a new informer for MachineClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewMachineClassInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredMachineClassInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredMachineClassInformer constructs a new informer for MachineClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredMachineClassInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().MachineClasses(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().MachineClasses(namespace).Watch(options) - }, - }, - &clusterv1alpha1.MachineClass{}, - resyncPeriod, - indexers, - ) -} - -func (f *machineClassInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredMachineClassInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *machineClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&clusterv1alpha1.MachineClass{}, f.defaultInformer) -} - -func (f *machineClassInformer) Lister() v1alpha1.MachineClassLister { - return v1alpha1.NewMachineClassLister(f.Informer().GetIndexer()) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machinedeployment.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machinedeployment.go deleted file mode 100644 index 0b5f7c4113fc..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machinedeployment.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - time "time" - - clusterv1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - clientset "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset" - internalinterfaces "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1alpha1 "github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// MachineDeploymentInformer provides access to a shared informer and lister for -// MachineDeployments. -type MachineDeploymentInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.MachineDeploymentLister -} - -type machineDeploymentInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewMachineDeploymentInformer constructs a new informer for MachineDeployment type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewMachineDeploymentInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredMachineDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredMachineDeploymentInformer constructs a new informer for MachineDeployment type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredMachineDeploymentInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().MachineDeployments(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().MachineDeployments(namespace).Watch(options) - }, - }, - &clusterv1alpha1.MachineDeployment{}, - resyncPeriod, - indexers, - ) -} - -func (f *machineDeploymentInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredMachineDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *machineDeploymentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&clusterv1alpha1.MachineDeployment{}, f.defaultInformer) -} - -func (f *machineDeploymentInformer) Lister() v1alpha1.MachineDeploymentLister { - return v1alpha1.NewMachineDeploymentLister(f.Informer().GetIndexer()) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machineset.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machineset.go deleted file mode 100644 index a3179d4fbfee..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machineset.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - time "time" - - clusterv1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - clientset "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset" - internalinterfaces "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1alpha1 "github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// MachineSetInformer provides access to a shared informer and lister for -// MachineSets. -type MachineSetInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.MachineSetLister -} - -type machineSetInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewMachineSetInformer constructs a new informer for MachineSet type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewMachineSetInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredMachineSetInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredMachineSetInformer constructs a new informer for MachineSet type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredMachineSetInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().MachineSets(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().MachineSets(namespace).Watch(options) - }, - }, - &clusterv1alpha1.MachineSet{}, - resyncPeriod, - indexers, - ) -} - -func (f *machineSetInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredMachineSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *machineSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&clusterv1alpha1.MachineSet{}, f.defaultInformer) -} - -func (f *machineSetInformer) Lister() v1alpha1.MachineSetLister { - return v1alpha1.NewMachineSetLister(f.Informer().GetIndexer()) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/factory.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/factory.go deleted file mode 100644 index 2383e1a543b1..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/factory.go +++ /dev/null @@ -1,186 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package externalversions - -import ( - reflect "reflect" - sync "sync" - time "time" - - clientset "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset" - cluster "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/cluster" - internalinterfaces "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - machine "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" -) - -// SharedInformerOption defines the functional option type for SharedInformerFactory. -type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory - -type sharedInformerFactory struct { - client clientset.Interface - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc - lock sync.Mutex - defaultResync time.Duration - customResync map[reflect.Type]time.Duration - - informers map[reflect.Type]cache.SharedIndexInformer - // startedInformers is used for tracking which informers have been started. - // This allows Start() to be called multiple times safely. - startedInformers map[reflect.Type]bool -} - -// WithCustomResyncConfig sets a custom resync period for the specified informer types. -func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - for k, v := range resyncConfig { - factory.customResync[reflect.TypeOf(k)] = v - } - return factory - } -} - -// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. -func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.tweakListOptions = tweakListOptions - return factory - } -} - -// WithNamespace limits the SharedInformerFactory to the specified namespace. -func WithNamespace(namespace string) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.namespace = namespace - return factory - } -} - -// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. -func NewSharedInformerFactory(client clientset.Interface, defaultResync time.Duration) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync) -} - -// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. -// Listers obtained via this SharedInformerFactory will be subject to the same filters -// as specified here. -// Deprecated: Please use NewSharedInformerFactoryWithOptions instead -func NewFilteredSharedInformerFactory(client clientset.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) -} - -// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. -func NewSharedInformerFactoryWithOptions(client clientset.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { - factory := &sharedInformerFactory{ - client: client, - namespace: v1.NamespaceAll, - defaultResync: defaultResync, - informers: make(map[reflect.Type]cache.SharedIndexInformer), - startedInformers: make(map[reflect.Type]bool), - customResync: make(map[reflect.Type]time.Duration), - } - - // Apply all options - for _, opt := range options { - factory = opt(factory) - } - - return factory -} - -// Start initializes all requested informers. -func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { - f.lock.Lock() - defer f.lock.Unlock() - - for informerType, informer := range f.informers { - if !f.startedInformers[informerType] { - go informer.Run(stopCh) - f.startedInformers[informerType] = true - } - } -} - -// WaitForCacheSync waits for all started informers' cache were synced. -func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { - informers := func() map[reflect.Type]cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informers := map[reflect.Type]cache.SharedIndexInformer{} - for informerType, informer := range f.informers { - if f.startedInformers[informerType] { - informers[informerType] = informer - } - } - return informers - }() - - res := map[reflect.Type]bool{} - for informType, informer := range informers { - res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) - } - return res -} - -// InternalInformerFor returns the SharedIndexInformer for obj using an internal -// client. -func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informerType := reflect.TypeOf(obj) - informer, exists := f.informers[informerType] - if exists { - return informer - } - - resyncPeriod, exists := f.customResync[informerType] - if !exists { - resyncPeriod = f.defaultResync - } - - informer = newFunc(f.client, resyncPeriod) - f.informers[informerType] = informer - - return informer -} - -// SharedInformerFactory provides shared informers for resources in all known -// API group versions. -type SharedInformerFactory interface { - internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) - WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool - - Cluster() cluster.Interface - Machine() machine.Interface -} - -func (f *sharedInformerFactory) Cluster() cluster.Interface { - return cluster.New(f, f.namespace, f.tweakListOptions) -} - -func (f *sharedInformerFactory) Machine() machine.Interface { - return machine.New(f, f.namespace, f.tweakListOptions) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/generic.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/generic.go deleted file mode 100644 index b26301d5950c..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/generic.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package externalversions - -import ( - "fmt" - - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" -) - -// GenericInformer is type of SharedIndexInformer which will locate and delegate to other -// sharedInformers based on type -type GenericInformer interface { - Informer() cache.SharedIndexInformer - Lister() cache.GenericLister -} - -type genericInformer struct { - informer cache.SharedIndexInformer - resource schema.GroupResource -} - -// Informer returns the SharedIndexInformer. -func (f *genericInformer) Informer() cache.SharedIndexInformer { - return f.informer -} - -// Lister returns the GenericLister. -func (f *genericInformer) Lister() cache.GenericLister { - return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) -} - -// ForResource gives generic access to a shared informer of the matching type -// TODO extend this to unknown resources with a client pool -func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { - switch resource { - // Group=cluster.k8s.io, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithResource("clusters"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().Clusters().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("machines"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().Machines().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("machineclasses"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().MachineClasses().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("machinedeployments"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().MachineDeployments().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("machinesets"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().MachineSets().Informer()}, nil - - // Group=machine.openshift.io, Version=v1beta1 - case v1beta1.SchemeGroupVersion.WithResource("clusters"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Machine().V1beta1().Clusters().Informer()}, nil - case v1beta1.SchemeGroupVersion.WithResource("machines"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Machine().V1beta1().Machines().Informer()}, nil - case v1beta1.SchemeGroupVersion.WithResource("machineclasses"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Machine().V1beta1().MachineClasses().Informer()}, nil - case v1beta1.SchemeGroupVersion.WithResource("machinedeployments"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Machine().V1beta1().MachineDeployments().Informer()}, nil - case v1beta1.SchemeGroupVersion.WithResource("machinesets"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Machine().V1beta1().MachineSets().Informer()}, nil - - } - - return nil, fmt.Errorf("no informer found for %v", resource) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces/BUILD.bazel deleted file mode 100644 index 86d3c2e4036e..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["factory_interfaces.go"], - importpath = "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces", - visibility = ["//visibility:public"], - deps = [ - "//pkg/client/clientset_generated/clientset:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces/factory_interfaces.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces/factory_interfaces.go deleted file mode 100644 index 919e36ac497f..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces/factory_interfaces.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package internalinterfaces - -import ( - time "time" - - clientset "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - cache "k8s.io/client-go/tools/cache" -) - -type NewInformerFunc func(clientset.Interface, time.Duration) cache.SharedIndexInformer - -// SharedInformerFactory a small interface to allow for adding an informer without an import cycle -type SharedInformerFactory interface { - Start(stopCh <-chan struct{}) - InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer -} - -type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/BUILD.bazel deleted file mode 100644 index ea2550080604..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importpath = "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine", - visibility = ["//visibility:public"], - deps = [ - "//pkg/client/informers_generated/externalversions/internalinterfaces:go_default_library", - "//pkg/client/informers_generated/externalversions/machine/v1beta1:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/interface.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/interface.go deleted file mode 100644 index ef2ffff99427..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/interface.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package machine - -import ( - internalinterfaces "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1beta1 "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1" -) - -// Interface provides access to each of this group's versions. -type Interface interface { - // V1beta1 provides access to shared informers for resources in V1beta1. - V1beta1() v1beta1.Interface -} - -type group struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// V1beta1 returns a new v1beta1.Interface. -func (g *group) V1beta1() v1beta1.Interface { - return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/BUILD.bazel deleted file mode 100644 index 65f99f6c1482..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "interface.go", - "machine.go", - "machineclass.go", - "machinedeployment.go", - "machineset.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/machine/v1beta1:go_default_library", - "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/informers_generated/externalversions/internalinterfaces:go_default_library", - "//pkg/client/listers_generated/machine/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/cluster.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/cluster.go deleted file mode 100644 index 292c42f02ce4..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/cluster.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - time "time" - - machinev1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - clientset "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset" - internalinterfaces "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1beta1 "github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// ClusterInformer provides access to a shared informer and lister for -// Clusters. -type ClusterInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1beta1.ClusterLister -} - -type clusterInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewClusterInformer constructs a new informer for Cluster type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewClusterInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredClusterInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredClusterInformer constructs a new informer for Cluster type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredClusterInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.MachineV1beta1().Clusters(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.MachineV1beta1().Clusters(namespace).Watch(options) - }, - }, - &machinev1beta1.Cluster{}, - resyncPeriod, - indexers, - ) -} - -func (f *clusterInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredClusterInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *clusterInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&machinev1beta1.Cluster{}, f.defaultInformer) -} - -func (f *clusterInformer) Lister() v1beta1.ClusterLister { - return v1beta1.NewClusterLister(f.Informer().GetIndexer()) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/interface.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/interface.go deleted file mode 100644 index 870a0b17b6d7..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/interface.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - internalinterfaces "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // Clusters returns a ClusterInformer. - Clusters() ClusterInformer - // Machines returns a MachineInformer. - Machines() MachineInformer - // MachineClasses returns a MachineClassInformer. - MachineClasses() MachineClassInformer - // MachineDeployments returns a MachineDeploymentInformer. - MachineDeployments() MachineDeploymentInformer - // MachineSets returns a MachineSetInformer. - MachineSets() MachineSetInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// Clusters returns a ClusterInformer. -func (v *version) Clusters() ClusterInformer { - return &clusterInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// Machines returns a MachineInformer. -func (v *version) Machines() MachineInformer { - return &machineInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// MachineClasses returns a MachineClassInformer. -func (v *version) MachineClasses() MachineClassInformer { - return &machineClassInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// MachineDeployments returns a MachineDeploymentInformer. -func (v *version) MachineDeployments() MachineDeploymentInformer { - return &machineDeploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// MachineSets returns a MachineSetInformer. -func (v *version) MachineSets() MachineSetInformer { - return &machineSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/machine.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/machine.go deleted file mode 100644 index 21a0714e24cc..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/machine.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - time "time" - - machinev1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - clientset "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset" - internalinterfaces "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1beta1 "github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// MachineInformer provides access to a shared informer and lister for -// Machines. -type MachineInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1beta1.MachineLister -} - -type machineInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewMachineInformer constructs a new informer for Machine type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewMachineInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredMachineInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredMachineInformer constructs a new informer for Machine type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredMachineInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.MachineV1beta1().Machines(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.MachineV1beta1().Machines(namespace).Watch(options) - }, - }, - &machinev1beta1.Machine{}, - resyncPeriod, - indexers, - ) -} - -func (f *machineInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredMachineInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *machineInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&machinev1beta1.Machine{}, f.defaultInformer) -} - -func (f *machineInformer) Lister() v1beta1.MachineLister { - return v1beta1.NewMachineLister(f.Informer().GetIndexer()) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/machineclass.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/machineclass.go deleted file mode 100644 index dba4270077d1..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/machineclass.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - time "time" - - machinev1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - clientset "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset" - internalinterfaces "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1beta1 "github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// MachineClassInformer provides access to a shared informer and lister for -// MachineClasses. -type MachineClassInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1beta1.MachineClassLister -} - -type machineClassInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewMachineClassInformer constructs a new informer for MachineClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewMachineClassInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredMachineClassInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredMachineClassInformer constructs a new informer for MachineClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredMachineClassInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.MachineV1beta1().MachineClasses(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.MachineV1beta1().MachineClasses(namespace).Watch(options) - }, - }, - &machinev1beta1.MachineClass{}, - resyncPeriod, - indexers, - ) -} - -func (f *machineClassInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredMachineClassInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *machineClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&machinev1beta1.MachineClass{}, f.defaultInformer) -} - -func (f *machineClassInformer) Lister() v1beta1.MachineClassLister { - return v1beta1.NewMachineClassLister(f.Informer().GetIndexer()) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/machinedeployment.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/machinedeployment.go deleted file mode 100644 index e1c226bdb3cd..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/machinedeployment.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - time "time" - - machinev1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - clientset "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset" - internalinterfaces "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1beta1 "github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// MachineDeploymentInformer provides access to a shared informer and lister for -// MachineDeployments. -type MachineDeploymentInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1beta1.MachineDeploymentLister -} - -type machineDeploymentInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewMachineDeploymentInformer constructs a new informer for MachineDeployment type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewMachineDeploymentInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredMachineDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredMachineDeploymentInformer constructs a new informer for MachineDeployment type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredMachineDeploymentInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.MachineV1beta1().MachineDeployments(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.MachineV1beta1().MachineDeployments(namespace).Watch(options) - }, - }, - &machinev1beta1.MachineDeployment{}, - resyncPeriod, - indexers, - ) -} - -func (f *machineDeploymentInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredMachineDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *machineDeploymentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&machinev1beta1.MachineDeployment{}, f.defaultInformer) -} - -func (f *machineDeploymentInformer) Lister() v1beta1.MachineDeploymentLister { - return v1beta1.NewMachineDeploymentLister(f.Informer().GetIndexer()) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/machineset.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/machineset.go deleted file mode 100644 index fe576d6327fb..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/machine/v1beta1/machineset.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - time "time" - - machinev1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - clientset "github.com/openshift/cluster-api/pkg/client/clientset_generated/clientset" - internalinterfaces "github.com/openshift/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1beta1 "github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// MachineSetInformer provides access to a shared informer and lister for -// MachineSets. -type MachineSetInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1beta1.MachineSetLister -} - -type machineSetInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewMachineSetInformer constructs a new informer for MachineSet type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewMachineSetInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredMachineSetInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredMachineSetInformer constructs a new informer for MachineSet type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredMachineSetInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.MachineV1beta1().MachineSets(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.MachineV1beta1().MachineSets(namespace).Watch(options) - }, - }, - &machinev1beta1.MachineSet{}, - resyncPeriod, - indexers, - ) -} - -func (f *machineSetInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredMachineSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *machineSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&machinev1beta1.MachineSet{}, f.defaultInformer) -} - -func (f *machineSetInformer) Lister() v1beta1.MachineSetLister { - return v1beta1.NewMachineSetLister(f.Informer().GetIndexer()) -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/BUILD.bazel deleted file mode 100644 index 50e7b656d762..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "expansion_generated.go", - "machine.go", - "machineclass.go", - "machinedeployment.go", - "machineset.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/cluster.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/cluster.go deleted file mode 100644 index 79f5a06f8244..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/cluster.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ClusterLister helps list Clusters. -type ClusterLister interface { - // List lists all Clusters in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.Cluster, err error) - // Clusters returns an object that can list and get Clusters. - Clusters(namespace string) ClusterNamespaceLister - ClusterListerExpansion -} - -// clusterLister implements the ClusterLister interface. -type clusterLister struct { - indexer cache.Indexer -} - -// NewClusterLister returns a new ClusterLister. -func NewClusterLister(indexer cache.Indexer) ClusterLister { - return &clusterLister{indexer: indexer} -} - -// List lists all Clusters in the indexer. -func (s *clusterLister) List(selector labels.Selector) (ret []*v1alpha1.Cluster, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Cluster)) - }) - return ret, err -} - -// Clusters returns an object that can list and get Clusters. -func (s *clusterLister) Clusters(namespace string) ClusterNamespaceLister { - return clusterNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ClusterNamespaceLister helps list and get Clusters. -type ClusterNamespaceLister interface { - // List lists all Clusters in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1alpha1.Cluster, err error) - // Get retrieves the Cluster from the indexer for a given namespace and name. - Get(name string) (*v1alpha1.Cluster, error) - ClusterNamespaceListerExpansion -} - -// clusterNamespaceLister implements the ClusterNamespaceLister -// interface. -type clusterNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Clusters in the indexer for a given namespace. -func (s clusterNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Cluster, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Cluster)) - }) - return ret, err -} - -// Get retrieves the Cluster from the indexer for a given namespace and name. -func (s clusterNamespaceLister) Get(name string) (*v1alpha1.Cluster, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("cluster"), name) - } - return obj.(*v1alpha1.Cluster), nil -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/expansion_generated.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/expansion_generated.go deleted file mode 100644 index 8daf52835ef8..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/expansion_generated.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -// ClusterListerExpansion allows custom methods to be added to -// ClusterLister. -type ClusterListerExpansion interface{} - -// ClusterNamespaceListerExpansion allows custom methods to be added to -// ClusterNamespaceLister. -type ClusterNamespaceListerExpansion interface{} - -// MachineListerExpansion allows custom methods to be added to -// MachineLister. -type MachineListerExpansion interface{} - -// MachineNamespaceListerExpansion allows custom methods to be added to -// MachineNamespaceLister. -type MachineNamespaceListerExpansion interface{} - -// MachineClassListerExpansion allows custom methods to be added to -// MachineClassLister. -type MachineClassListerExpansion interface{} - -// MachineClassNamespaceListerExpansion allows custom methods to be added to -// MachineClassNamespaceLister. -type MachineClassNamespaceListerExpansion interface{} - -// MachineDeploymentListerExpansion allows custom methods to be added to -// MachineDeploymentLister. -type MachineDeploymentListerExpansion interface{} - -// MachineDeploymentNamespaceListerExpansion allows custom methods to be added to -// MachineDeploymentNamespaceLister. -type MachineDeploymentNamespaceListerExpansion interface{} - -// MachineSetListerExpansion allows custom methods to be added to -// MachineSetLister. -type MachineSetListerExpansion interface{} - -// MachineSetNamespaceListerExpansion allows custom methods to be added to -// MachineSetNamespaceLister. -type MachineSetNamespaceListerExpansion interface{} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machine.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machine.go deleted file mode 100644 index 7d1335329b7a..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machine.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// MachineLister helps list Machines. -type MachineLister interface { - // List lists all Machines in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.Machine, err error) - // Machines returns an object that can list and get Machines. - Machines(namespace string) MachineNamespaceLister - MachineListerExpansion -} - -// machineLister implements the MachineLister interface. -type machineLister struct { - indexer cache.Indexer -} - -// NewMachineLister returns a new MachineLister. -func NewMachineLister(indexer cache.Indexer) MachineLister { - return &machineLister{indexer: indexer} -} - -// List lists all Machines in the indexer. -func (s *machineLister) List(selector labels.Selector) (ret []*v1alpha1.Machine, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Machine)) - }) - return ret, err -} - -// Machines returns an object that can list and get Machines. -func (s *machineLister) Machines(namespace string) MachineNamespaceLister { - return machineNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// MachineNamespaceLister helps list and get Machines. -type MachineNamespaceLister interface { - // List lists all Machines in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1alpha1.Machine, err error) - // Get retrieves the Machine from the indexer for a given namespace and name. - Get(name string) (*v1alpha1.Machine, error) - MachineNamespaceListerExpansion -} - -// machineNamespaceLister implements the MachineNamespaceLister -// interface. -type machineNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Machines in the indexer for a given namespace. -func (s machineNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Machine, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Machine)) - }) - return ret, err -} - -// Get retrieves the Machine from the indexer for a given namespace and name. -func (s machineNamespaceLister) Get(name string) (*v1alpha1.Machine, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("machine"), name) - } - return obj.(*v1alpha1.Machine), nil -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machineclass.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machineclass.go deleted file mode 100644 index df2c0ed6ca40..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machineclass.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// MachineClassLister helps list MachineClasses. -type MachineClassLister interface { - // List lists all MachineClasses in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.MachineClass, err error) - // MachineClasses returns an object that can list and get MachineClasses. - MachineClasses(namespace string) MachineClassNamespaceLister - MachineClassListerExpansion -} - -// machineClassLister implements the MachineClassLister interface. -type machineClassLister struct { - indexer cache.Indexer -} - -// NewMachineClassLister returns a new MachineClassLister. -func NewMachineClassLister(indexer cache.Indexer) MachineClassLister { - return &machineClassLister{indexer: indexer} -} - -// List lists all MachineClasses in the indexer. -func (s *machineClassLister) List(selector labels.Selector) (ret []*v1alpha1.MachineClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.MachineClass)) - }) - return ret, err -} - -// MachineClasses returns an object that can list and get MachineClasses. -func (s *machineClassLister) MachineClasses(namespace string) MachineClassNamespaceLister { - return machineClassNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// MachineClassNamespaceLister helps list and get MachineClasses. -type MachineClassNamespaceLister interface { - // List lists all MachineClasses in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1alpha1.MachineClass, err error) - // Get retrieves the MachineClass from the indexer for a given namespace and name. - Get(name string) (*v1alpha1.MachineClass, error) - MachineClassNamespaceListerExpansion -} - -// machineClassNamespaceLister implements the MachineClassNamespaceLister -// interface. -type machineClassNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all MachineClasses in the indexer for a given namespace. -func (s machineClassNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.MachineClass, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.MachineClass)) - }) - return ret, err -} - -// Get retrieves the MachineClass from the indexer for a given namespace and name. -func (s machineClassNamespaceLister) Get(name string) (*v1alpha1.MachineClass, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("machineclass"), name) - } - return obj.(*v1alpha1.MachineClass), nil -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machinedeployment.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machinedeployment.go deleted file mode 100644 index 96b47bbf565b..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machinedeployment.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// MachineDeploymentLister helps list MachineDeployments. -type MachineDeploymentLister interface { - // List lists all MachineDeployments in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.MachineDeployment, err error) - // MachineDeployments returns an object that can list and get MachineDeployments. - MachineDeployments(namespace string) MachineDeploymentNamespaceLister - MachineDeploymentListerExpansion -} - -// machineDeploymentLister implements the MachineDeploymentLister interface. -type machineDeploymentLister struct { - indexer cache.Indexer -} - -// NewMachineDeploymentLister returns a new MachineDeploymentLister. -func NewMachineDeploymentLister(indexer cache.Indexer) MachineDeploymentLister { - return &machineDeploymentLister{indexer: indexer} -} - -// List lists all MachineDeployments in the indexer. -func (s *machineDeploymentLister) List(selector labels.Selector) (ret []*v1alpha1.MachineDeployment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.MachineDeployment)) - }) - return ret, err -} - -// MachineDeployments returns an object that can list and get MachineDeployments. -func (s *machineDeploymentLister) MachineDeployments(namespace string) MachineDeploymentNamespaceLister { - return machineDeploymentNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// MachineDeploymentNamespaceLister helps list and get MachineDeployments. -type MachineDeploymentNamespaceLister interface { - // List lists all MachineDeployments in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1alpha1.MachineDeployment, err error) - // Get retrieves the MachineDeployment from the indexer for a given namespace and name. - Get(name string) (*v1alpha1.MachineDeployment, error) - MachineDeploymentNamespaceListerExpansion -} - -// machineDeploymentNamespaceLister implements the MachineDeploymentNamespaceLister -// interface. -type machineDeploymentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all MachineDeployments in the indexer for a given namespace. -func (s machineDeploymentNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.MachineDeployment, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.MachineDeployment)) - }) - return ret, err -} - -// Get retrieves the MachineDeployment from the indexer for a given namespace and name. -func (s machineDeploymentNamespaceLister) Get(name string) (*v1alpha1.MachineDeployment, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("machinedeployment"), name) - } - return obj.(*v1alpha1.MachineDeployment), nil -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machineset.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machineset.go deleted file mode 100644 index 50066366c49c..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machineset.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// MachineSetLister helps list MachineSets. -type MachineSetLister interface { - // List lists all MachineSets in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.MachineSet, err error) - // MachineSets returns an object that can list and get MachineSets. - MachineSets(namespace string) MachineSetNamespaceLister - MachineSetListerExpansion -} - -// machineSetLister implements the MachineSetLister interface. -type machineSetLister struct { - indexer cache.Indexer -} - -// NewMachineSetLister returns a new MachineSetLister. -func NewMachineSetLister(indexer cache.Indexer) MachineSetLister { - return &machineSetLister{indexer: indexer} -} - -// List lists all MachineSets in the indexer. -func (s *machineSetLister) List(selector labels.Selector) (ret []*v1alpha1.MachineSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.MachineSet)) - }) - return ret, err -} - -// MachineSets returns an object that can list and get MachineSets. -func (s *machineSetLister) MachineSets(namespace string) MachineSetNamespaceLister { - return machineSetNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// MachineSetNamespaceLister helps list and get MachineSets. -type MachineSetNamespaceLister interface { - // List lists all MachineSets in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1alpha1.MachineSet, err error) - // Get retrieves the MachineSet from the indexer for a given namespace and name. - Get(name string) (*v1alpha1.MachineSet, error) - MachineSetNamespaceListerExpansion -} - -// machineSetNamespaceLister implements the MachineSetNamespaceLister -// interface. -type machineSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all MachineSets in the indexer for a given namespace. -func (s machineSetNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.MachineSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.MachineSet)) - }) - return ret, err -} - -// Get retrieves the MachineSet from the indexer for a given namespace and name. -func (s machineSetNamespaceLister) Get(name string) (*v1alpha1.MachineSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("machineset"), name) - } - return obj.(*v1alpha1.MachineSet), nil -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/BUILD.bazel b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/BUILD.bazel deleted file mode 100644 index dd58545475f8..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "expansion_generated.go", - "machine.go", - "machineclass.go", - "machinedeployment.go", - "machineset.go", - ], - importpath = "github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/machine/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/cluster.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/cluster.go deleted file mode 100644 index 1a509ddaffea..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/cluster.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ClusterLister helps list Clusters. -type ClusterLister interface { - // List lists all Clusters in the indexer. - List(selector labels.Selector) (ret []*v1beta1.Cluster, err error) - // Clusters returns an object that can list and get Clusters. - Clusters(namespace string) ClusterNamespaceLister - ClusterListerExpansion -} - -// clusterLister implements the ClusterLister interface. -type clusterLister struct { - indexer cache.Indexer -} - -// NewClusterLister returns a new ClusterLister. -func NewClusterLister(indexer cache.Indexer) ClusterLister { - return &clusterLister{indexer: indexer} -} - -// List lists all Clusters in the indexer. -func (s *clusterLister) List(selector labels.Selector) (ret []*v1beta1.Cluster, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Cluster)) - }) - return ret, err -} - -// Clusters returns an object that can list and get Clusters. -func (s *clusterLister) Clusters(namespace string) ClusterNamespaceLister { - return clusterNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ClusterNamespaceLister helps list and get Clusters. -type ClusterNamespaceLister interface { - // List lists all Clusters in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta1.Cluster, err error) - // Get retrieves the Cluster from the indexer for a given namespace and name. - Get(name string) (*v1beta1.Cluster, error) - ClusterNamespaceListerExpansion -} - -// clusterNamespaceLister implements the ClusterNamespaceLister -// interface. -type clusterNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Clusters in the indexer for a given namespace. -func (s clusterNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Cluster, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Cluster)) - }) - return ret, err -} - -// Get retrieves the Cluster from the indexer for a given namespace and name. -func (s clusterNamespaceLister) Get(name string) (*v1beta1.Cluster, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("cluster"), name) - } - return obj.(*v1beta1.Cluster), nil -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/expansion_generated.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/expansion_generated.go deleted file mode 100644 index 8333c8008143..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/expansion_generated.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -// ClusterListerExpansion allows custom methods to be added to -// ClusterLister. -type ClusterListerExpansion interface{} - -// ClusterNamespaceListerExpansion allows custom methods to be added to -// ClusterNamespaceLister. -type ClusterNamespaceListerExpansion interface{} - -// MachineListerExpansion allows custom methods to be added to -// MachineLister. -type MachineListerExpansion interface{} - -// MachineNamespaceListerExpansion allows custom methods to be added to -// MachineNamespaceLister. -type MachineNamespaceListerExpansion interface{} - -// MachineClassListerExpansion allows custom methods to be added to -// MachineClassLister. -type MachineClassListerExpansion interface{} - -// MachineClassNamespaceListerExpansion allows custom methods to be added to -// MachineClassNamespaceLister. -type MachineClassNamespaceListerExpansion interface{} - -// MachineDeploymentListerExpansion allows custom methods to be added to -// MachineDeploymentLister. -type MachineDeploymentListerExpansion interface{} - -// MachineDeploymentNamespaceListerExpansion allows custom methods to be added to -// MachineDeploymentNamespaceLister. -type MachineDeploymentNamespaceListerExpansion interface{} - -// MachineSetListerExpansion allows custom methods to be added to -// MachineSetLister. -type MachineSetListerExpansion interface{} - -// MachineSetNamespaceListerExpansion allows custom methods to be added to -// MachineSetNamespaceLister. -type MachineSetNamespaceListerExpansion interface{} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/machine.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/machine.go deleted file mode 100644 index 8208d3386abc..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/machine.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// MachineLister helps list Machines. -type MachineLister interface { - // List lists all Machines in the indexer. - List(selector labels.Selector) (ret []*v1beta1.Machine, err error) - // Machines returns an object that can list and get Machines. - Machines(namespace string) MachineNamespaceLister - MachineListerExpansion -} - -// machineLister implements the MachineLister interface. -type machineLister struct { - indexer cache.Indexer -} - -// NewMachineLister returns a new MachineLister. -func NewMachineLister(indexer cache.Indexer) MachineLister { - return &machineLister{indexer: indexer} -} - -// List lists all Machines in the indexer. -func (s *machineLister) List(selector labels.Selector) (ret []*v1beta1.Machine, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Machine)) - }) - return ret, err -} - -// Machines returns an object that can list and get Machines. -func (s *machineLister) Machines(namespace string) MachineNamespaceLister { - return machineNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// MachineNamespaceLister helps list and get Machines. -type MachineNamespaceLister interface { - // List lists all Machines in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta1.Machine, err error) - // Get retrieves the Machine from the indexer for a given namespace and name. - Get(name string) (*v1beta1.Machine, error) - MachineNamespaceListerExpansion -} - -// machineNamespaceLister implements the MachineNamespaceLister -// interface. -type machineNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Machines in the indexer for a given namespace. -func (s machineNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Machine, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Machine)) - }) - return ret, err -} - -// Get retrieves the Machine from the indexer for a given namespace and name. -func (s machineNamespaceLister) Get(name string) (*v1beta1.Machine, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("machine"), name) - } - return obj.(*v1beta1.Machine), nil -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/machineclass.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/machineclass.go deleted file mode 100644 index 7e78a7ec8693..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/machineclass.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// MachineClassLister helps list MachineClasses. -type MachineClassLister interface { - // List lists all MachineClasses in the indexer. - List(selector labels.Selector) (ret []*v1beta1.MachineClass, err error) - // MachineClasses returns an object that can list and get MachineClasses. - MachineClasses(namespace string) MachineClassNamespaceLister - MachineClassListerExpansion -} - -// machineClassLister implements the MachineClassLister interface. -type machineClassLister struct { - indexer cache.Indexer -} - -// NewMachineClassLister returns a new MachineClassLister. -func NewMachineClassLister(indexer cache.Indexer) MachineClassLister { - return &machineClassLister{indexer: indexer} -} - -// List lists all MachineClasses in the indexer. -func (s *machineClassLister) List(selector labels.Selector) (ret []*v1beta1.MachineClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.MachineClass)) - }) - return ret, err -} - -// MachineClasses returns an object that can list and get MachineClasses. -func (s *machineClassLister) MachineClasses(namespace string) MachineClassNamespaceLister { - return machineClassNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// MachineClassNamespaceLister helps list and get MachineClasses. -type MachineClassNamespaceLister interface { - // List lists all MachineClasses in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta1.MachineClass, err error) - // Get retrieves the MachineClass from the indexer for a given namespace and name. - Get(name string) (*v1beta1.MachineClass, error) - MachineClassNamespaceListerExpansion -} - -// machineClassNamespaceLister implements the MachineClassNamespaceLister -// interface. -type machineClassNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all MachineClasses in the indexer for a given namespace. -func (s machineClassNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.MachineClass, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.MachineClass)) - }) - return ret, err -} - -// Get retrieves the MachineClass from the indexer for a given namespace and name. -func (s machineClassNamespaceLister) Get(name string) (*v1beta1.MachineClass, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("machineclass"), name) - } - return obj.(*v1beta1.MachineClass), nil -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/machinedeployment.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/machinedeployment.go deleted file mode 100644 index 359522d56362..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/machinedeployment.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// MachineDeploymentLister helps list MachineDeployments. -type MachineDeploymentLister interface { - // List lists all MachineDeployments in the indexer. - List(selector labels.Selector) (ret []*v1beta1.MachineDeployment, err error) - // MachineDeployments returns an object that can list and get MachineDeployments. - MachineDeployments(namespace string) MachineDeploymentNamespaceLister - MachineDeploymentListerExpansion -} - -// machineDeploymentLister implements the MachineDeploymentLister interface. -type machineDeploymentLister struct { - indexer cache.Indexer -} - -// NewMachineDeploymentLister returns a new MachineDeploymentLister. -func NewMachineDeploymentLister(indexer cache.Indexer) MachineDeploymentLister { - return &machineDeploymentLister{indexer: indexer} -} - -// List lists all MachineDeployments in the indexer. -func (s *machineDeploymentLister) List(selector labels.Selector) (ret []*v1beta1.MachineDeployment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.MachineDeployment)) - }) - return ret, err -} - -// MachineDeployments returns an object that can list and get MachineDeployments. -func (s *machineDeploymentLister) MachineDeployments(namespace string) MachineDeploymentNamespaceLister { - return machineDeploymentNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// MachineDeploymentNamespaceLister helps list and get MachineDeployments. -type MachineDeploymentNamespaceLister interface { - // List lists all MachineDeployments in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta1.MachineDeployment, err error) - // Get retrieves the MachineDeployment from the indexer for a given namespace and name. - Get(name string) (*v1beta1.MachineDeployment, error) - MachineDeploymentNamespaceListerExpansion -} - -// machineDeploymentNamespaceLister implements the MachineDeploymentNamespaceLister -// interface. -type machineDeploymentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all MachineDeployments in the indexer for a given namespace. -func (s machineDeploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.MachineDeployment, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.MachineDeployment)) - }) - return ret, err -} - -// Get retrieves the MachineDeployment from the indexer for a given namespace and name. -func (s machineDeploymentNamespaceLister) Get(name string) (*v1beta1.MachineDeployment, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("machinedeployment"), name) - } - return obj.(*v1beta1.MachineDeployment), nil -} diff --git a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/machineset.go b/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/machineset.go deleted file mode 100644 index 0c5d85c94a5d..000000000000 --- a/cluster-autoscaler/vendor/github.com/openshift/cluster-api/pkg/client/listers_generated/machine/v1beta1/machineset.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// MachineSetLister helps list MachineSets. -type MachineSetLister interface { - // List lists all MachineSets in the indexer. - List(selector labels.Selector) (ret []*v1beta1.MachineSet, err error) - // MachineSets returns an object that can list and get MachineSets. - MachineSets(namespace string) MachineSetNamespaceLister - MachineSetListerExpansion -} - -// machineSetLister implements the MachineSetLister interface. -type machineSetLister struct { - indexer cache.Indexer -} - -// NewMachineSetLister returns a new MachineSetLister. -func NewMachineSetLister(indexer cache.Indexer) MachineSetLister { - return &machineSetLister{indexer: indexer} -} - -// List lists all MachineSets in the indexer. -func (s *machineSetLister) List(selector labels.Selector) (ret []*v1beta1.MachineSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.MachineSet)) - }) - return ret, err -} - -// MachineSets returns an object that can list and get MachineSets. -func (s *machineSetLister) MachineSets(namespace string) MachineSetNamespaceLister { - return machineSetNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// MachineSetNamespaceLister helps list and get MachineSets. -type MachineSetNamespaceLister interface { - // List lists all MachineSets in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta1.MachineSet, err error) - // Get retrieves the MachineSet from the indexer for a given namespace and name. - Get(name string) (*v1beta1.MachineSet, error) - MachineSetNamespaceListerExpansion -} - -// machineSetNamespaceLister implements the MachineSetNamespaceLister -// interface. -type machineSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all MachineSets in the indexer for a given namespace. -func (s machineSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.MachineSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.MachineSet)) - }) - return ret, err -} - -// Get retrieves the MachineSet from the indexer for a given namespace and name. -func (s machineSetNamespaceLister) Get(name string) (*v1beta1.MachineSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("machineset"), name) - } - return obj.(*v1beta1.MachineSet), nil -} diff --git a/cluster-autoscaler/vendor/github.com/coreos/etcd/LICENSE b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/LICENSE similarity index 100% rename from cluster-autoscaler/vendor/github.com/coreos/etcd/LICENSE rename to cluster-autoscaler/vendor/github.com/pquerna/ffjson/LICENSE diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/NOTICE b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/NOTICE new file mode 100644 index 000000000000..405a49618ba6 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/NOTICE @@ -0,0 +1,8 @@ +ffjson +Copyright (c) 2014, Paul Querna + +This product includes software developed by +Paul Querna (http://paul.querna.org/). + +Portions of this software were developed as +part of Go, Copyright (c) 2012 The Go Authors. \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go new file mode 100644 index 000000000000..7f63a8582d80 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go @@ -0,0 +1,421 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package v1 + +// Simple byte buffer for marshaling data. + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "unicode/utf8" +) + +type grower interface { + Grow(n int) +} + +type truncater interface { + Truncate(n int) + Reset() +} + +type bytesReader interface { + Bytes() []byte + String() string +} + +type runeWriter interface { + WriteRune(r rune) (n int, err error) +} + +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +type lener interface { + Len() int +} + +type rewinder interface { + Rewind(n int) (err error) +} + +type encoder interface { + Encode(interface{}) error +} + +// TODO(pquerna): continue to reduce these interfaces + +type EncodingBuffer interface { + io.Writer + io.WriterTo + io.ByteWriter + stringWriter + truncater + grower + rewinder + encoder +} + +type DecodingBuffer interface { + io.ReadWriter + io.ByteWriter + stringWriter + runeWriter + truncater + grower + bytesReader + lener +} + +// A Buffer is a variable-sized buffer of bytes with Read and Write methods. +// The zero value for Buffer is an empty buffer ready to use. +type Buffer struct { + buf []byte // contents are the bytes buf[off : len(buf)] + off int // read at &buf[off], write at &buf[len(buf)] + runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune + encoder *json.Encoder + skipTrailingByte bool +} + +// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer. +var ErrTooLarge = errors.New("fflib.v1.Buffer: too large") + +// Bytes returns a slice of the contents of the unread portion of the buffer; +// len(b.Bytes()) == b.Len(). If the caller changes the contents of the +// returned slice, the contents of the buffer will change provided there +// are no intervening method calls on the Buffer. +func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } + +// String returns the contents of the unread portion of the buffer +// as a string. If the Buffer is a nil pointer, it returns "". +func (b *Buffer) String() string { + if b == nil { + // Special case, useful in debugging. + return "" + } + return string(b.buf[b.off:]) +} + +// Len returns the number of bytes of the unread portion of the buffer; +// b.Len() == len(b.Bytes()). +func (b *Buffer) Len() int { return len(b.buf) - b.off } + +// Truncate discards all but the first n unread bytes from the buffer. +// It panics if n is negative or greater than the length of the buffer. +func (b *Buffer) Truncate(n int) { + if n == 0 { + b.off = 0 + b.buf = b.buf[0:0] + } else { + b.buf = b.buf[0 : b.off+n] + } +} + +// Reset resets the buffer so it has no content. +// b.Reset() is the same as b.Truncate(0). +func (b *Buffer) Reset() { b.Truncate(0) } + +// grow grows the buffer to guarantee space for n more bytes. +// It returns the index where bytes should be written. +// If the buffer can't grow it will panic with ErrTooLarge. +func (b *Buffer) grow(n int) int { + // If we have no buffer, get one from the pool + m := b.Len() + if m == 0 { + if b.buf == nil { + b.buf = makeSlice(2 * n) + b.off = 0 + } else if b.off != 0 { + // If buffer is empty, reset to recover space. + b.Truncate(0) + } + } + if len(b.buf)+n > cap(b.buf) { + var buf []byte + if m+n <= cap(b.buf)/2 { + // We can slide things down instead of allocating a new + // slice. We only need m+n <= cap(b.buf) to slide, but + // we instead let capacity get twice as large so we + // don't spend all our time copying. + copy(b.buf[:], b.buf[b.off:]) + buf = b.buf[:m] + } else { + // not enough space anywhere + buf = makeSlice(2*cap(b.buf) + n) + copy(buf, b.buf[b.off:]) + Pool(b.buf) + b.buf = buf + } + b.off = 0 + } + b.buf = b.buf[0 : b.off+m+n] + return b.off + m +} + +// Grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After Grow(n), at least n bytes can be written to the +// buffer without another allocation. +// If n is negative, Grow will panic. +// If the buffer can't grow it will panic with ErrTooLarge. +func (b *Buffer) Grow(n int) { + if n < 0 { + panic("bytes.Buffer.Grow: negative count") + } + m := b.grow(n) + b.buf = b.buf[0:m] +} + +// Write appends the contents of p to the buffer, growing the buffer as +// needed. The return value n is the length of p; err is always nil. If the +// buffer becomes too large, Write will panic with ErrTooLarge. +func (b *Buffer) Write(p []byte) (n int, err error) { + if b.skipTrailingByte { + p = p[:len(p)-1] + } + m := b.grow(len(p)) + return copy(b.buf[m:], p), nil +} + +// WriteString appends the contents of s to the buffer, growing the buffer as +// needed. The return value n is the length of s; err is always nil. If the +// buffer becomes too large, WriteString will panic with ErrTooLarge. +func (b *Buffer) WriteString(s string) (n int, err error) { + m := b.grow(len(s)) + return copy(b.buf[m:], s), nil +} + +// MinRead is the minimum slice size passed to a Read call by +// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond +// what is required to hold the contents of r, ReadFrom will not grow the +// underlying buffer. +const minRead = 512 + +// ReadFrom reads data from r until EOF and appends it to the buffer, growing +// the buffer as needed. The return value n is the number of bytes read. Any +// error except io.EOF encountered during the read is also returned. If the +// buffer becomes too large, ReadFrom will panic with ErrTooLarge. +func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { + // If buffer is empty, reset to recover space. + if b.off >= len(b.buf) { + b.Truncate(0) + } + for { + if free := cap(b.buf) - len(b.buf); free < minRead { + // not enough space at end + newBuf := b.buf + if b.off+free < minRead { + // not enough space using beginning of buffer; + // double buffer capacity + newBuf = makeSlice(2*cap(b.buf) + minRead) + } + copy(newBuf, b.buf[b.off:]) + Pool(b.buf) + b.buf = newBuf[:len(b.buf)-b.off] + b.off = 0 + } + m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) + b.buf = b.buf[0 : len(b.buf)+m] + n += int64(m) + if e == io.EOF { + break + } + if e != nil { + return n, e + } + } + return n, nil // err is EOF, so return nil explicitly +} + +// WriteTo writes data to w until the buffer is drained or an error occurs. +// The return value n is the number of bytes written; it always fits into an +// int, but it is int64 to match the io.WriterTo interface. Any error +// encountered during the write is also returned. +func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { + if b.off < len(b.buf) { + nBytes := b.Len() + m, e := w.Write(b.buf[b.off:]) + if m > nBytes { + panic("bytes.Buffer.WriteTo: invalid Write count") + } + b.off += m + n = int64(m) + if e != nil { + return n, e + } + // all bytes should have been written, by definition of + // Write method in io.Writer + if m != nBytes { + return n, io.ErrShortWrite + } + } + // Buffer is now empty; reset. + b.Truncate(0) + return +} + +// WriteByte appends the byte c to the buffer, growing the buffer as needed. +// The returned error is always nil, but is included to match bufio.Writer's +// WriteByte. If the buffer becomes too large, WriteByte will panic with +// ErrTooLarge. +func (b *Buffer) WriteByte(c byte) error { + m := b.grow(1) + b.buf[m] = c + return nil +} + +func (b *Buffer) Rewind(n int) error { + b.buf = b.buf[:len(b.buf)-n] + return nil +} + +func (b *Buffer) Encode(v interface{}) error { + if b.encoder == nil { + b.encoder = json.NewEncoder(b) + } + b.skipTrailingByte = true + err := b.encoder.Encode(v) + b.skipTrailingByte = false + return err +} + +// WriteRune appends the UTF-8 encoding of Unicode code point r to the +// buffer, returning its length and an error, which is always nil but is +// included to match bufio.Writer's WriteRune. The buffer is grown as needed; +// if it becomes too large, WriteRune will panic with ErrTooLarge. +func (b *Buffer) WriteRune(r rune) (n int, err error) { + if r < utf8.RuneSelf { + b.WriteByte(byte(r)) + return 1, nil + } + n = utf8.EncodeRune(b.runeBytes[0:], r) + b.Write(b.runeBytes[0:n]) + return n, nil +} + +// Read reads the next len(p) bytes from the buffer or until the buffer +// is drained. The return value n is the number of bytes read. If the +// buffer has no data to return, err is io.EOF (unless len(p) is zero); +// otherwise it is nil. +func (b *Buffer) Read(p []byte) (n int, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + if len(p) == 0 { + return + } + return 0, io.EOF + } + n = copy(p, b.buf[b.off:]) + b.off += n + return +} + +// Next returns a slice containing the next n bytes from the buffer, +// advancing the buffer as if the bytes had been returned by Read. +// If there are fewer than n bytes in the buffer, Next returns the entire buffer. +// The slice is only valid until the next call to a read or write method. +func (b *Buffer) Next(n int) []byte { + m := b.Len() + if n > m { + n = m + } + data := b.buf[b.off : b.off+n] + b.off += n + return data +} + +// ReadByte reads and returns the next byte from the buffer. +// If no byte is available, it returns error io.EOF. +func (b *Buffer) ReadByte() (c byte, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + return 0, io.EOF + } + c = b.buf[b.off] + b.off++ + return c, nil +} + +// ReadRune reads and returns the next UTF-8-encoded +// Unicode code point from the buffer. +// If no bytes are available, the error returned is io.EOF. +// If the bytes are an erroneous UTF-8 encoding, it +// consumes one byte and returns U+FFFD, 1. +func (b *Buffer) ReadRune() (r rune, size int, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + return 0, 0, io.EOF + } + c := b.buf[b.off] + if c < utf8.RuneSelf { + b.off++ + return rune(c), 1, nil + } + r, n := utf8.DecodeRune(b.buf[b.off:]) + b.off += n + return r, n, nil +} + +// ReadBytes reads until the first occurrence of delim in the input, +// returning a slice containing the data up to and including the delimiter. +// If ReadBytes encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. +func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { + slice, err := b.readSlice(delim) + // return a copy of slice. The buffer's backing array may + // be overwritten by later calls. + line = append(line, slice...) + return +} + +// readSlice is like ReadBytes but returns a reference to internal buffer data. +func (b *Buffer) readSlice(delim byte) (line []byte, err error) { + i := bytes.IndexByte(b.buf[b.off:], delim) + end := b.off + i + 1 + if i < 0 { + end = len(b.buf) + err = io.EOF + } + line = b.buf[b.off:end] + b.off = end + return line, err +} + +// ReadString reads until the first occurrence of delim in the input, +// returning a string containing the data up to and including the delimiter. +// If ReadString encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadString returns err != nil if and only if the returned data does not end +// in delim. +func (b *Buffer) ReadString(delim byte) (line string, err error) { + slice, err := b.readSlice(delim) + return string(slice), err +} + +// NewBuffer creates and initializes a new Buffer using buf as its initial +// contents. It is intended to prepare a Buffer to read existing data. It +// can also be used to size the internal buffer for writing. To do that, +// buf should have the desired capacity but a length of zero. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is +// sufficient to initialize a Buffer. +func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } + +// NewBufferString creates and initializes a new Buffer using string s as its +// initial contents. It is intended to prepare a buffer to read an existing +// string. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is +// sufficient to initialize a Buffer. +func NewBufferString(s string) *Buffer { + return &Buffer{buf: []byte(s)} +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go new file mode 100644 index 000000000000..b84af6ff9688 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go @@ -0,0 +1,11 @@ +// +build !go1.3 + +package v1 + +// Stub version of buffer_pool.go for Go 1.2, which doesn't have sync.Pool. + +func Pool(b []byte) {} + +func makeSlice(n int) []byte { + return make([]byte, n) +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go new file mode 100644 index 000000000000..a021c57cf402 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go @@ -0,0 +1,105 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.3 + +package v1 + +// Allocation pools for Buffers. + +import "sync" + +var pools [14]sync.Pool +var pool64 *sync.Pool + +func init() { + var i uint + // TODO(pquerna): add science here around actual pool sizes. + for i = 6; i < 20; i++ { + n := 1 << i + pools[poolNum(n)].New = func() interface{} { return make([]byte, 0, n) } + } + pool64 = &pools[0] +} + +// This returns the pool number that will give a buffer of +// at least 'i' bytes. +func poolNum(i int) int { + // TODO(pquerna): convert to log2 w/ bsr asm instruction: + // + if i <= 64 { + return 0 + } else if i <= 128 { + return 1 + } else if i <= 256 { + return 2 + } else if i <= 512 { + return 3 + } else if i <= 1024 { + return 4 + } else if i <= 2048 { + return 5 + } else if i <= 4096 { + return 6 + } else if i <= 8192 { + return 7 + } else if i <= 16384 { + return 8 + } else if i <= 32768 { + return 9 + } else if i <= 65536 { + return 10 + } else if i <= 131072 { + return 11 + } else if i <= 262144 { + return 12 + } else if i <= 524288 { + return 13 + } else { + return -1 + } +} + +// Send a buffer to the Pool to reuse for other instances. +// You may no longer utilize the content of the buffer, since it may be used +// by other goroutines. +func Pool(b []byte) { + if b == nil { + return + } + c := cap(b) + + // Our smallest buffer is 64 bytes, so we discard smaller buffers. + if c < 64 { + return + } + + // We need to put the incoming buffer into the NEXT buffer, + // since a buffer guarantees AT LEAST the number of bytes available + // that is the top of this buffer. + // That is the reason for dividing the cap by 2, so it gets into the NEXT bucket. + // We add 2 to avoid rounding down if size is exactly power of 2. + pn := poolNum((c + 2) >> 1) + if pn != -1 { + pools[pn].Put(b[0:0]) + } + // if we didn't have a slot for this []byte, we just drop it and let the GC + // take care of it. +} + +// makeSlice allocates a slice of size n -- it will attempt to use a pool'ed +// instance whenever possible. +func makeSlice(n int) []byte { + if n <= 64 { + return pool64.Get().([]byte)[0:n] + } + + pn := poolNum(n) + + if pn != -1 { + return pools[pn].Get().([]byte)[0:n] + } else { + return make([]byte, n) + } +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go new file mode 100644 index 000000000000..08477409acba --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go @@ -0,0 +1,88 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Portions of this file are on Go stdlib's strconv/iota.go */ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package v1 + +import ( + "github.com/pquerna/ffjson/fflib/v1/internal" +) + +func ParseFloat(s []byte, bitSize int) (f float64, err error) { + return internal.ParseFloat(s, bitSize) +} + +// ParseUint is like ParseInt but for unsigned numbers, and oeprating on []byte +func ParseUint(s []byte, base int, bitSize int) (n uint64, err error) { + if len(s) == 1 { + switch s[0] { + case '0': + return 0, nil + case '1': + return 1, nil + case '2': + return 2, nil + case '3': + return 3, nil + case '4': + return 4, nil + case '5': + return 5, nil + case '6': + return 6, nil + case '7': + return 7, nil + case '8': + return 8, nil + case '9': + return 9, nil + } + } + return internal.ParseUint(s, base, bitSize) +} + +func ParseInt(s []byte, base int, bitSize int) (i int64, err error) { + if len(s) == 1 { + switch s[0] { + case '0': + return 0, nil + case '1': + return 1, nil + case '2': + return 2, nil + case '3': + return 3, nil + case '4': + return 4, nil + case '5': + return 5, nil + case '6': + return 6, nil + case '7': + return 7, nil + case '8': + return 8, nil + case '9': + return 9, nil + } + } + return internal.ParseInt(s, base, bitSize) +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go new file mode 100644 index 000000000000..069df7a02a6e --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go @@ -0,0 +1,378 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Multiprecision decimal numbers. +// For floating-point formatting only; not general purpose. +// Only operations are assign and (binary) left/right shift. +// Can do binary floating point in multiprecision decimal precisely +// because 2 divides 10; cannot do decimal floating point +// in multiprecision binary precisely. + +package v1 + +type decimal struct { + d [800]byte // digits + nd int // number of digits used + dp int // decimal point + neg bool + trunc bool // discarded nonzero digits beyond d[:nd] +} + +func (a *decimal) String() string { + n := 10 + a.nd + if a.dp > 0 { + n += a.dp + } + if a.dp < 0 { + n += -a.dp + } + + buf := make([]byte, n) + w := 0 + switch { + case a.nd == 0: + return "0" + + case a.dp <= 0: + // zeros fill space between decimal point and digits + buf[w] = '0' + w++ + buf[w] = '.' + w++ + w += digitZero(buf[w : w+-a.dp]) + w += copy(buf[w:], a.d[0:a.nd]) + + case a.dp < a.nd: + // decimal point in middle of digits + w += copy(buf[w:], a.d[0:a.dp]) + buf[w] = '.' + w++ + w += copy(buf[w:], a.d[a.dp:a.nd]) + + default: + // zeros fill space between digits and decimal point + w += copy(buf[w:], a.d[0:a.nd]) + w += digitZero(buf[w : w+a.dp-a.nd]) + } + return string(buf[0:w]) +} + +func digitZero(dst []byte) int { + for i := range dst { + dst[i] = '0' + } + return len(dst) +} + +// trim trailing zeros from number. +// (They are meaningless; the decimal point is tracked +// independent of the number of digits.) +func trim(a *decimal) { + for a.nd > 0 && a.d[a.nd-1] == '0' { + a.nd-- + } + if a.nd == 0 { + a.dp = 0 + } +} + +// Assign v to a. +func (a *decimal) Assign(v uint64) { + var buf [24]byte + + // Write reversed decimal in buf. + n := 0 + for v > 0 { + v1 := v / 10 + v -= 10 * v1 + buf[n] = byte(v + '0') + n++ + v = v1 + } + + // Reverse again to produce forward decimal in a.d. + a.nd = 0 + for n--; n >= 0; n-- { + a.d[a.nd] = buf[n] + a.nd++ + } + a.dp = a.nd + trim(a) +} + +// Maximum shift that we can do in one pass without overflow. +// Signed int has 31 bits, and we have to be able to accommodate 9<>k == 0; r++ { + if r >= a.nd { + if n == 0 { + // a == 0; shouldn't get here, but handle anyway. + a.nd = 0 + return + } + for n>>k == 0 { + n = n * 10 + r++ + } + break + } + c := int(a.d[r]) + n = n*10 + c - '0' + } + a.dp -= r - 1 + + // Pick up a digit, put down a digit. + for ; r < a.nd; r++ { + c := int(a.d[r]) + dig := n >> k + n -= dig << k + a.d[w] = byte(dig + '0') + w++ + n = n*10 + c - '0' + } + + // Put down extra digits. + for n > 0 { + dig := n >> k + n -= dig << k + if w < len(a.d) { + a.d[w] = byte(dig + '0') + w++ + } else if dig > 0 { + a.trunc = true + } + n = n * 10 + } + + a.nd = w + trim(a) +} + +// Cheat sheet for left shift: table indexed by shift count giving +// number of new digits that will be introduced by that shift. +// +// For example, leftcheats[4] = {2, "625"}. That means that +// if we are shifting by 4 (multiplying by 16), it will add 2 digits +// when the string prefix is "625" through "999", and one fewer digit +// if the string prefix is "000" through "624". +// +// Credit for this trick goes to Ken. + +type leftCheat struct { + delta int // number of new digits + cutoff string // minus one digit if original < a. +} + +var leftcheats = []leftCheat{ + // Leading digits of 1/2^i = 5^i. + // 5^23 is not an exact 64-bit floating point number, + // so have to use bc for the math. + /* + seq 27 | sed 's/^/5^/' | bc | + awk 'BEGIN{ print "\tleftCheat{ 0, \"\" }," } + { + log2 = log(2)/log(10) + printf("\tleftCheat{ %d, \"%s\" },\t// * %d\n", + int(log2*NR+1), $0, 2**NR) + }' + */ + {0, ""}, + {1, "5"}, // * 2 + {1, "25"}, // * 4 + {1, "125"}, // * 8 + {2, "625"}, // * 16 + {2, "3125"}, // * 32 + {2, "15625"}, // * 64 + {3, "78125"}, // * 128 + {3, "390625"}, // * 256 + {3, "1953125"}, // * 512 + {4, "9765625"}, // * 1024 + {4, "48828125"}, // * 2048 + {4, "244140625"}, // * 4096 + {4, "1220703125"}, // * 8192 + {5, "6103515625"}, // * 16384 + {5, "30517578125"}, // * 32768 + {5, "152587890625"}, // * 65536 + {6, "762939453125"}, // * 131072 + {6, "3814697265625"}, // * 262144 + {6, "19073486328125"}, // * 524288 + {7, "95367431640625"}, // * 1048576 + {7, "476837158203125"}, // * 2097152 + {7, "2384185791015625"}, // * 4194304 + {7, "11920928955078125"}, // * 8388608 + {8, "59604644775390625"}, // * 16777216 + {8, "298023223876953125"}, // * 33554432 + {8, "1490116119384765625"}, // * 67108864 + {9, "7450580596923828125"}, // * 134217728 +} + +// Is the leading prefix of b lexicographically less than s? +func prefixIsLessThan(b []byte, s string) bool { + for i := 0; i < len(s); i++ { + if i >= len(b) { + return true + } + if b[i] != s[i] { + return b[i] < s[i] + } + } + return false +} + +// Binary shift left (/ 2) by k bits. k <= maxShift to avoid overflow. +func leftShift(a *decimal, k uint) { + delta := leftcheats[k].delta + if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { + delta-- + } + + r := a.nd // read index + w := a.nd + delta // write index + n := 0 + + // Pick up a digit, put down a digit. + for r--; r >= 0; r-- { + n += (int(a.d[r]) - '0') << k + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + // Put down extra digits. + for n > 0 { + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + a.nd += delta + if a.nd >= len(a.d) { + a.nd = len(a.d) + } + a.dp += delta + trim(a) +} + +// Binary shift left (k > 0) or right (k < 0). +func (a *decimal) Shift(k int) { + switch { + case a.nd == 0: + // nothing to do: a == 0 + case k > 0: + for k > maxShift { + leftShift(a, maxShift) + k -= maxShift + } + leftShift(a, uint(k)) + case k < 0: + for k < -maxShift { + rightShift(a, maxShift) + k += maxShift + } + rightShift(a, uint(-k)) + } +} + +// If we chop a at nd digits, should we round up? +func shouldRoundUp(a *decimal, nd int) bool { + if nd < 0 || nd >= a.nd { + return false + } + if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even + // if we truncated, a little higher than what's recorded - always round up + if a.trunc { + return true + } + return nd > 0 && (a.d[nd-1]-'0')%2 != 0 + } + // not halfway - digit tells all + return a.d[nd] >= '5' +} + +// Round a to nd digits (or fewer). +// If nd is zero, it means we're rounding +// just to the left of the digits, as in +// 0.09 -> 0.1. +func (a *decimal) Round(nd int) { + if nd < 0 || nd >= a.nd { + return + } + if shouldRoundUp(a, nd) { + a.RoundUp(nd) + } else { + a.RoundDown(nd) + } +} + +// Round a down to nd digits (or fewer). +func (a *decimal) RoundDown(nd int) { + if nd < 0 || nd >= a.nd { + return + } + a.nd = nd + trim(a) +} + +// Round a up to nd digits (or fewer). +func (a *decimal) RoundUp(nd int) { + if nd < 0 || nd >= a.nd { + return + } + + // round up + for i := nd - 1; i >= 0; i-- { + c := a.d[i] + if c < '9' { // can stop after this digit + a.d[i]++ + a.nd = i + 1 + return + } + } + + // Number is all 9s. + // Change to single 1 with adjusted decimal point. + a.d[0] = '1' + a.nd = 1 + a.dp++ +} + +// Extract integer part, rounded appropriately. +// No guarantees about overflow. +func (a *decimal) RoundedInteger() uint64 { + if a.dp > 20 { + return 0xFFFFFFFFFFFFFFFF + } + var i int + n := uint64(0) + for i = 0; i < a.dp && i < a.nd; i++ { + n = n*10 + uint64(a.d[i]-'0') + } + for ; i < a.dp; i++ { + n *= 10 + } + if shouldRoundUp(a, a.dp) { + n++ + } + return n +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go new file mode 100644 index 000000000000..508ddc6bedaf --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go @@ -0,0 +1,668 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package v1 + +// An extFloat represents an extended floating-point number, with more +// precision than a float64. It does not try to save bits: the +// number represented by the structure is mant*(2^exp), with a negative +// sign if neg is true. +type extFloat struct { + mant uint64 + exp int + neg bool +} + +// Powers of ten taken from double-conversion library. +// http://code.google.com/p/double-conversion/ +const ( + firstPowerOfTen = -348 + stepPowerOfTen = 8 +) + +var smallPowersOfTen = [...]extFloat{ + {1 << 63, -63, false}, // 1 + {0xa << 60, -60, false}, // 1e1 + {0x64 << 57, -57, false}, // 1e2 + {0x3e8 << 54, -54, false}, // 1e3 + {0x2710 << 50, -50, false}, // 1e4 + {0x186a0 << 47, -47, false}, // 1e5 + {0xf4240 << 44, -44, false}, // 1e6 + {0x989680 << 40, -40, false}, // 1e7 +} + +var powersOfTen = [...]extFloat{ + {0xfa8fd5a0081c0288, -1220, false}, // 10^-348 + {0xbaaee17fa23ebf76, -1193, false}, // 10^-340 + {0x8b16fb203055ac76, -1166, false}, // 10^-332 + {0xcf42894a5dce35ea, -1140, false}, // 10^-324 + {0x9a6bb0aa55653b2d, -1113, false}, // 10^-316 + {0xe61acf033d1a45df, -1087, false}, // 10^-308 + {0xab70fe17c79ac6ca, -1060, false}, // 10^-300 + {0xff77b1fcbebcdc4f, -1034, false}, // 10^-292 + {0xbe5691ef416bd60c, -1007, false}, // 10^-284 + {0x8dd01fad907ffc3c, -980, false}, // 10^-276 + {0xd3515c2831559a83, -954, false}, // 10^-268 + {0x9d71ac8fada6c9b5, -927, false}, // 10^-260 + {0xea9c227723ee8bcb, -901, false}, // 10^-252 + {0xaecc49914078536d, -874, false}, // 10^-244 + {0x823c12795db6ce57, -847, false}, // 10^-236 + {0xc21094364dfb5637, -821, false}, // 10^-228 + {0x9096ea6f3848984f, -794, false}, // 10^-220 + {0xd77485cb25823ac7, -768, false}, // 10^-212 + {0xa086cfcd97bf97f4, -741, false}, // 10^-204 + {0xef340a98172aace5, -715, false}, // 10^-196 + {0xb23867fb2a35b28e, -688, false}, // 10^-188 + {0x84c8d4dfd2c63f3b, -661, false}, // 10^-180 + {0xc5dd44271ad3cdba, -635, false}, // 10^-172 + {0x936b9fcebb25c996, -608, false}, // 10^-164 + {0xdbac6c247d62a584, -582, false}, // 10^-156 + {0xa3ab66580d5fdaf6, -555, false}, // 10^-148 + {0xf3e2f893dec3f126, -529, false}, // 10^-140 + {0xb5b5ada8aaff80b8, -502, false}, // 10^-132 + {0x87625f056c7c4a8b, -475, false}, // 10^-124 + {0xc9bcff6034c13053, -449, false}, // 10^-116 + {0x964e858c91ba2655, -422, false}, // 10^-108 + {0xdff9772470297ebd, -396, false}, // 10^-100 + {0xa6dfbd9fb8e5b88f, -369, false}, // 10^-92 + {0xf8a95fcf88747d94, -343, false}, // 10^-84 + {0xb94470938fa89bcf, -316, false}, // 10^-76 + {0x8a08f0f8bf0f156b, -289, false}, // 10^-68 + {0xcdb02555653131b6, -263, false}, // 10^-60 + {0x993fe2c6d07b7fac, -236, false}, // 10^-52 + {0xe45c10c42a2b3b06, -210, false}, // 10^-44 + {0xaa242499697392d3, -183, false}, // 10^-36 + {0xfd87b5f28300ca0e, -157, false}, // 10^-28 + {0xbce5086492111aeb, -130, false}, // 10^-20 + {0x8cbccc096f5088cc, -103, false}, // 10^-12 + {0xd1b71758e219652c, -77, false}, // 10^-4 + {0x9c40000000000000, -50, false}, // 10^4 + {0xe8d4a51000000000, -24, false}, // 10^12 + {0xad78ebc5ac620000, 3, false}, // 10^20 + {0x813f3978f8940984, 30, false}, // 10^28 + {0xc097ce7bc90715b3, 56, false}, // 10^36 + {0x8f7e32ce7bea5c70, 83, false}, // 10^44 + {0xd5d238a4abe98068, 109, false}, // 10^52 + {0x9f4f2726179a2245, 136, false}, // 10^60 + {0xed63a231d4c4fb27, 162, false}, // 10^68 + {0xb0de65388cc8ada8, 189, false}, // 10^76 + {0x83c7088e1aab65db, 216, false}, // 10^84 + {0xc45d1df942711d9a, 242, false}, // 10^92 + {0x924d692ca61be758, 269, false}, // 10^100 + {0xda01ee641a708dea, 295, false}, // 10^108 + {0xa26da3999aef774a, 322, false}, // 10^116 + {0xf209787bb47d6b85, 348, false}, // 10^124 + {0xb454e4a179dd1877, 375, false}, // 10^132 + {0x865b86925b9bc5c2, 402, false}, // 10^140 + {0xc83553c5c8965d3d, 428, false}, // 10^148 + {0x952ab45cfa97a0b3, 455, false}, // 10^156 + {0xde469fbd99a05fe3, 481, false}, // 10^164 + {0xa59bc234db398c25, 508, false}, // 10^172 + {0xf6c69a72a3989f5c, 534, false}, // 10^180 + {0xb7dcbf5354e9bece, 561, false}, // 10^188 + {0x88fcf317f22241e2, 588, false}, // 10^196 + {0xcc20ce9bd35c78a5, 614, false}, // 10^204 + {0x98165af37b2153df, 641, false}, // 10^212 + {0xe2a0b5dc971f303a, 667, false}, // 10^220 + {0xa8d9d1535ce3b396, 694, false}, // 10^228 + {0xfb9b7cd9a4a7443c, 720, false}, // 10^236 + {0xbb764c4ca7a44410, 747, false}, // 10^244 + {0x8bab8eefb6409c1a, 774, false}, // 10^252 + {0xd01fef10a657842c, 800, false}, // 10^260 + {0x9b10a4e5e9913129, 827, false}, // 10^268 + {0xe7109bfba19c0c9d, 853, false}, // 10^276 + {0xac2820d9623bf429, 880, false}, // 10^284 + {0x80444b5e7aa7cf85, 907, false}, // 10^292 + {0xbf21e44003acdd2d, 933, false}, // 10^300 + {0x8e679c2f5e44ff8f, 960, false}, // 10^308 + {0xd433179d9c8cb841, 986, false}, // 10^316 + {0x9e19db92b4e31ba9, 1013, false}, // 10^324 + {0xeb96bf6ebadf77d9, 1039, false}, // 10^332 + {0xaf87023b9bf0ee6b, 1066, false}, // 10^340 +} + +// floatBits returns the bits of the float64 that best approximates +// the extFloat passed as receiver. Overflow is set to true if +// the resulting float64 is ±Inf. +func (f *extFloat) floatBits(flt *floatInfo) (bits uint64, overflow bool) { + f.Normalize() + + exp := f.exp + 63 + + // Exponent too small. + if exp < flt.bias+1 { + n := flt.bias + 1 - exp + f.mant >>= uint(n) + exp += n + } + + // Extract 1+flt.mantbits bits from the 64-bit mantissa. + mant := f.mant >> (63 - flt.mantbits) + if f.mant&(1<<(62-flt.mantbits)) != 0 { + // Round up. + mant += 1 + } + + // Rounding might have added a bit; shift down. + if mant == 2<>= 1 + exp++ + } + + // Infinities. + if exp-flt.bias >= 1<>uint(-f.exp))<>= uint(-f.exp) + f.exp = 0 + return *f, *f + } + expBiased := exp - flt.bias + + upper = extFloat{mant: 2*f.mant + 1, exp: f.exp - 1, neg: f.neg} + if mant != 1<>(64-32) == 0 { + mant <<= 32 + exp -= 32 + } + if mant>>(64-16) == 0 { + mant <<= 16 + exp -= 16 + } + if mant>>(64-8) == 0 { + mant <<= 8 + exp -= 8 + } + if mant>>(64-4) == 0 { + mant <<= 4 + exp -= 4 + } + if mant>>(64-2) == 0 { + mant <<= 2 + exp -= 2 + } + if mant>>(64-1) == 0 { + mant <<= 1 + exp -= 1 + } + shift = uint(f.exp - exp) + f.mant, f.exp = mant, exp + return +} + +// Multiply sets f to the product f*g: the result is correctly rounded, +// but not normalized. +func (f *extFloat) Multiply(g extFloat) { + fhi, flo := f.mant>>32, uint64(uint32(f.mant)) + ghi, glo := g.mant>>32, uint64(uint32(g.mant)) + + // Cross products. + cross1 := fhi * glo + cross2 := flo * ghi + + // f.mant*g.mant is fhi*ghi << 64 + (cross1+cross2) << 32 + flo*glo + f.mant = fhi*ghi + (cross1 >> 32) + (cross2 >> 32) + rem := uint64(uint32(cross1)) + uint64(uint32(cross2)) + ((flo * glo) >> 32) + // Round up. + rem += (1 << 31) + + f.mant += (rem >> 32) + f.exp = f.exp + g.exp + 64 +} + +var uint64pow10 = [...]uint64{ + 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, +} + +// AssignDecimal sets f to an approximate value mantissa*10^exp. It +// returns true if the value represented by f is guaranteed to be the +// best approximation of d after being rounded to a float64 or +// float32 depending on flt. +func (f *extFloat) AssignDecimal(mantissa uint64, exp10 int, neg bool, trunc bool, flt *floatInfo) (ok bool) { + const uint64digits = 19 + const errorscale = 8 + errors := 0 // An upper bound for error, computed in errorscale*ulp. + if trunc { + // the decimal number was truncated. + errors += errorscale / 2 + } + + f.mant = mantissa + f.exp = 0 + f.neg = neg + + // Multiply by powers of ten. + i := (exp10 - firstPowerOfTen) / stepPowerOfTen + if exp10 < firstPowerOfTen || i >= len(powersOfTen) { + return false + } + adjExp := (exp10 - firstPowerOfTen) % stepPowerOfTen + + // We multiply by exp%step + if adjExp < uint64digits && mantissa < uint64pow10[uint64digits-adjExp] { + // We can multiply the mantissa exactly. + f.mant *= uint64pow10[adjExp] + f.Normalize() + } else { + f.Normalize() + f.Multiply(smallPowersOfTen[adjExp]) + errors += errorscale / 2 + } + + // We multiply by 10 to the exp - exp%step. + f.Multiply(powersOfTen[i]) + if errors > 0 { + errors += 1 + } + errors += errorscale / 2 + + // Normalize + shift := f.Normalize() + errors <<= shift + + // Now f is a good approximation of the decimal. + // Check whether the error is too large: that is, if the mantissa + // is perturbated by the error, the resulting float64 will change. + // The 64 bits mantissa is 1 + 52 bits for float64 + 11 extra bits. + // + // In many cases the approximation will be good enough. + denormalExp := flt.bias - 63 + var extrabits uint + if f.exp <= denormalExp { + // f.mant * 2^f.exp is smaller than 2^(flt.bias+1). + extrabits = uint(63 - flt.mantbits + 1 + uint(denormalExp-f.exp)) + } else { + extrabits = uint(63 - flt.mantbits) + } + + halfway := uint64(1) << (extrabits - 1) + mant_extra := f.mant & (1< expMax: + i-- + default: + break Loop + } + } + // Apply the desired decimal shift on f. It will have exponent + // in the desired range. This is multiplication by 10^-exp10. + f.Multiply(powersOfTen[i]) + + return -(firstPowerOfTen + i*stepPowerOfTen), i +} + +// frexp10Many applies a common shift by a power of ten to a, b, c. +func frexp10Many(a, b, c *extFloat) (exp10 int) { + exp10, i := c.frexp10() + a.Multiply(powersOfTen[i]) + b.Multiply(powersOfTen[i]) + return +} + +// FixedDecimal stores in d the first n significant digits +// of the decimal representation of f. It returns false +// if it cannot be sure of the answer. +func (f *extFloat) FixedDecimal(d *decimalSlice, n int) bool { + if f.mant == 0 { + d.nd = 0 + d.dp = 0 + d.neg = f.neg + return true + } + if n == 0 { + panic("strconv: internal error: extFloat.FixedDecimal called with n == 0") + } + // Multiply by an appropriate power of ten to have a reasonable + // number to process. + f.Normalize() + exp10, _ := f.frexp10() + + shift := uint(-f.exp) + integer := uint32(f.mant >> shift) + fraction := f.mant - (uint64(integer) << shift) + ε := uint64(1) // ε is the uncertainty we have on the mantissa of f. + + // Write exactly n digits to d. + needed := n // how many digits are left to write. + integerDigits := 0 // the number of decimal digits of integer. + pow10 := uint64(1) // the power of ten by which f was scaled. + for i, pow := 0, uint64(1); i < 20; i++ { + if pow > uint64(integer) { + integerDigits = i + break + } + pow *= 10 + } + rest := integer + if integerDigits > needed { + // the integral part is already large, trim the last digits. + pow10 = uint64pow10[integerDigits-needed] + integer /= uint32(pow10) + rest -= integer * uint32(pow10) + } else { + rest = 0 + } + + // Write the digits of integer: the digits of rest are omitted. + var buf [32]byte + pos := len(buf) + for v := integer; v > 0; { + v1 := v / 10 + v -= 10 * v1 + pos-- + buf[pos] = byte(v + '0') + v = v1 + } + for i := pos; i < len(buf); i++ { + d.d[i-pos] = buf[i] + } + nd := len(buf) - pos + d.nd = nd + d.dp = integerDigits + exp10 + needed -= nd + + if needed > 0 { + if rest != 0 || pow10 != 1 { + panic("strconv: internal error, rest != 0 but needed > 0") + } + // Emit digits for the fractional part. Each time, 10*fraction + // fits in a uint64 without overflow. + for needed > 0 { + fraction *= 10 + ε *= 10 // the uncertainty scales as we multiply by ten. + if 2*ε > 1<> shift + d.d[nd] = byte(digit + '0') + fraction -= digit << shift + nd++ + needed-- + } + d.nd = nd + } + + // We have written a truncation of f (a numerator / 10^d.dp). The remaining part + // can be interpreted as a small number (< 1) to be added to the last digit of the + // numerator. + // + // If rest > 0, the amount is: + // (rest< 0 guarantees that pow10 << shift does not overflow a uint64. + // + // If rest = 0, pow10 == 1 and the amount is + // fraction / (1 << shift) + // fraction being known with a ±ε uncertainty. + // + // We pass this information to the rounding routine for adjustment. + + ok := adjustLastDigitFixed(d, uint64(rest)<= 0; i-- { + if d.d[i] != '0' { + d.nd = i + 1 + break + } + } + return true +} + +// adjustLastDigitFixed assumes d contains the representation of the integral part +// of some number, whose fractional part is num / (den << shift). The numerator +// num is only known up to an uncertainty of size ε, assumed to be less than +// (den << shift)/2. +// +// It will increase the last digit by one to account for correct rounding, typically +// when the fractional part is greater than 1/2, and will return false if ε is such +// that no correct answer can be given. +func adjustLastDigitFixed(d *decimalSlice, num, den uint64, shift uint, ε uint64) bool { + if num > den< den< den< (den< den<= 0; i-- { + if d.d[i] == '9' { + d.nd-- + } else { + break + } + } + if i < 0 { + d.d[0] = '1' + d.nd = 1 + d.dp++ + } else { + d.d[i]++ + } + return true + } + return false +} + +// ShortestDecimal stores in d the shortest decimal representation of f +// which belongs to the open interval (lower, upper), where f is supposed +// to lie. It returns false whenever the result is unsure. The implementation +// uses the Grisu3 algorithm. +func (f *extFloat) ShortestDecimal(d *decimalSlice, lower, upper *extFloat) bool { + if f.mant == 0 { + d.nd = 0 + d.dp = 0 + d.neg = f.neg + return true + } + if f.exp == 0 && *lower == *f && *lower == *upper { + // an exact integer. + var buf [24]byte + n := len(buf) - 1 + for v := f.mant; v > 0; { + v1 := v / 10 + v -= 10 * v1 + buf[n] = byte(v + '0') + n-- + v = v1 + } + nd := len(buf) - n - 1 + for i := 0; i < nd; i++ { + d.d[i] = buf[n+1+i] + } + d.nd, d.dp = nd, nd + for d.nd > 0 && d.d[d.nd-1] == '0' { + d.nd-- + } + if d.nd == 0 { + d.dp = 0 + } + d.neg = f.neg + return true + } + upper.Normalize() + // Uniformize exponents. + if f.exp > upper.exp { + f.mant <<= uint(f.exp - upper.exp) + f.exp = upper.exp + } + if lower.exp > upper.exp { + lower.mant <<= uint(lower.exp - upper.exp) + lower.exp = upper.exp + } + + exp10 := frexp10Many(lower, f, upper) + // Take a safety margin due to rounding in frexp10Many, but we lose precision. + upper.mant++ + lower.mant-- + + // The shortest representation of f is either rounded up or down, but + // in any case, it is a truncation of upper. + shift := uint(-upper.exp) + integer := uint32(upper.mant >> shift) + fraction := upper.mant - (uint64(integer) << shift) + + // How far we can go down from upper until the result is wrong. + allowance := upper.mant - lower.mant + // How far we should go to get a very precise result. + targetDiff := upper.mant - f.mant + + // Count integral digits: there are at most 10. + var integerDigits int + for i, pow := 0, uint64(1); i < 20; i++ { + if pow > uint64(integer) { + integerDigits = i + break + } + pow *= 10 + } + for i := 0; i < integerDigits; i++ { + pow := uint64pow10[integerDigits-i-1] + digit := integer / uint32(pow) + d.d[i] = byte(digit + '0') + integer -= digit * uint32(pow) + // evaluate whether we should stop. + if currentDiff := uint64(integer)<> shift) + d.d[d.nd] = byte(digit + '0') + d.nd++ + fraction -= uint64(digit) << shift + if fraction < allowance*multiplier { + // We are in the admissible range. Note that if allowance is about to + // overflow, that is, allowance > 2^64/10, the condition is automatically + // true due to the limited range of fraction. + return adjustLastDigit(d, + fraction, targetDiff*multiplier, allowance*multiplier, + 1< maxDiff-ulpBinary { + // we went too far + return false + } + if d.nd == 1 && d.d[0] == '0' { + // the number has actually reached zero. + d.nd = 0 + d.dp = 0 + } + return true +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/fold.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/fold.go new file mode 100644 index 000000000000..4d33e6f77d0a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/fold.go @@ -0,0 +1,121 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Portions of this file are on Go stdlib's encoding/json/fold.go */ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package v1 + +import ( + "unicode/utf8" +) + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func EqualFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func AsciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func SimpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go new file mode 100644 index 000000000000..360d6dbcf93d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go @@ -0,0 +1,542 @@ +package v1 + +/** + * Copyright 2015 Paul Querna, Klaus Post + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Most of this file are on Go stdlib's strconv/ftoa.go */ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +import "math" + +// TODO: move elsewhere? +type floatInfo struct { + mantbits uint + expbits uint + bias int +} + +var optimize = true // can change for testing + +var float32info = floatInfo{23, 8, -127} +var float64info = floatInfo{52, 11, -1023} + +// AppendFloat appends the string form of the floating-point number f, +// as generated by FormatFloat +func AppendFloat(dst EncodingBuffer, val float64, fmt byte, prec, bitSize int) { + var bits uint64 + var flt *floatInfo + switch bitSize { + case 32: + bits = uint64(math.Float32bits(float32(val))) + flt = &float32info + case 64: + bits = math.Float64bits(val) + flt = &float64info + default: + panic("strconv: illegal AppendFloat/FormatFloat bitSize") + } + + neg := bits>>(flt.expbits+flt.mantbits) != 0 + exp := int(bits>>flt.mantbits) & (1< digs.nd && digs.nd >= digs.dp { + eprec = digs.nd + } + // %e is used if the exponent from the conversion + // is less than -4 or greater than or equal to the precision. + // if precision was the shortest possible, use precision 6 for this decision. + if shortest { + eprec = 6 + } + exp := digs.dp - 1 + if exp < -4 || exp >= eprec { + if prec > digs.nd { + prec = digs.nd + } + fmtE(dst, neg, digs, prec-1, fmt+'e'-'g') + return + } + if prec > digs.dp { + prec = digs.nd + } + fmtF(dst, neg, digs, max(prec-digs.dp, 0)) + return + } + + // unknown format + dst.Write([]byte{'%', fmt}) + return +} + +// Round d (= mant * 2^exp) to the shortest number of digits +// that will let the original floating point value be precisely +// reconstructed. Size is original floating point size (64 or 32). +func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { + // If mantissa is zero, the number is zero; stop now. + if mant == 0 { + d.nd = 0 + return + } + + // Compute upper and lower such that any decimal number + // between upper and lower (possibly inclusive) + // will round to the original floating point number. + + // We may see at once that the number is already shortest. + // + // Suppose d is not denormal, so that 2^exp <= d < 10^dp. + // The closest shorter number is at least 10^(dp-nd) away. + // The lower/upper bounds computed below are at distance + // at most 2^(exp-mantbits). + // + // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), + // or equivalently log2(10)*(dp-nd) > exp-mantbits. + // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). + minexp := flt.bias + 1 // minimum possible exponent + if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { + // The number is already shortest. + return + } + + // d = mant << (exp - mantbits) + // Next highest floating point number is mant+1 << exp-mantbits. + // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. + upper := new(decimal) + upper.Assign(mant*2 + 1) + upper.Shift(exp - int(flt.mantbits) - 1) + + // d = mant << (exp - mantbits) + // Next lowest floating point number is mant-1 << exp-mantbits, + // unless mant-1 drops the significant bit and exp is not the minimum exp, + // in which case the next lowest is mant*2-1 << exp-mantbits-1. + // Either way, call it mantlo << explo-mantbits. + // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. + var mantlo uint64 + var explo int + if mant > 1< 0 { + dst.WriteByte('.') + i := 1 + m := min(d.nd, prec+1) + if i < m { + dst.Write(d.d[i:m]) + i = m + } + for i <= prec { + dst.WriteByte('0') + i++ + } + } + + // e± + dst.WriteByte(fmt) + exp := d.dp - 1 + if d.nd == 0 { // special case: 0 has exponent 0 + exp = 0 + } + if exp < 0 { + ch = '-' + exp = -exp + } else { + ch = '+' + } + dst.WriteByte(ch) + + // dd or ddd + switch { + case exp < 10: + dst.WriteByte('0') + dst.WriteByte(byte(exp) + '0') + case exp < 100: + dst.WriteByte(byte(exp/10) + '0') + dst.WriteByte(byte(exp%10) + '0') + default: + dst.WriteByte(byte(exp/100) + '0') + dst.WriteByte(byte(exp/10)%10 + '0') + dst.WriteByte(byte(exp%10) + '0') + } + + return +} + +// %f: -ddddddd.ddddd +func fmtF(dst EncodingBuffer, neg bool, d decimalSlice, prec int) { + // sign + if neg { + dst.WriteByte('-') + } + + // integer, padded with zeros as needed. + if d.dp > 0 { + m := min(d.nd, d.dp) + dst.Write(d.d[:m]) + for ; m < d.dp; m++ { + dst.WriteByte('0') + } + } else { + dst.WriteByte('0') + } + + // fraction + if prec > 0 { + dst.WriteByte('.') + for i := 0; i < prec; i++ { + ch := byte('0') + if j := d.dp + i; 0 <= j && j < d.nd { + ch = d.d[j] + } + dst.WriteByte(ch) + } + } + + return +} + +// %b: -ddddddddp±ddd +func fmtB(dst EncodingBuffer, neg bool, mant uint64, exp int, flt *floatInfo) { + // sign + if neg { + dst.WriteByte('-') + } + + // mantissa + formatBits(dst, mant, 10, false) + + // p + dst.WriteByte('p') + + // ±exponent + exp -= int(flt.mantbits) + if exp >= 0 { + dst.WriteByte('+') + } + formatBits(dst, uint64(exp), 10, exp < 0) + + return +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +// formatBits computes the string representation of u in the given base. +// If neg is set, u is treated as negative int64 value. +func formatBits(dst EncodingBuffer, u uint64, base int, neg bool) { + if base < 2 || base > len(digits) { + panic("strconv: illegal AppendInt/FormatInt base") + } + // 2 <= base && base <= len(digits) + + var a [64 + 1]byte // +1 for sign of 64bit value in base 2 + i := len(a) + + if neg { + u = -u + } + + // convert bits + if base == 10 { + // common case: use constants for / because + // the compiler can optimize it into a multiply+shift + + if ^uintptr(0)>>32 == 0 { + for u > uint64(^uintptr(0)) { + q := u / 1e9 + us := uintptr(u - q*1e9) // us % 1e9 fits into a uintptr + for j := 9; j > 0; j-- { + i-- + qs := us / 10 + a[i] = byte(us - qs*10 + '0') + us = qs + } + u = q + } + } + + // u guaranteed to fit into a uintptr + us := uintptr(u) + for us >= 10 { + i-- + q := us / 10 + a[i] = byte(us - q*10 + '0') + us = q + } + // u < 10 + i-- + a[i] = byte(us + '0') + + } else if s := shifts[base]; s > 0 { + // base is power of 2: use shifts and masks instead of / and % + b := uint64(base) + m := uintptr(b) - 1 // == 1<= b { + i-- + a[i] = digits[uintptr(u)&m] + u >>= s + } + // u < base + i-- + a[i] = digits[uintptr(u)] + + } else { + // general case + b := uint64(base) + for u >= b { + i-- + q := u / b + a[i] = digits[uintptr(u-q*b)] + u = q + } + // u < base + i-- + a[i] = digits[uintptr(u)] + } + + // add sign, if any + if neg { + i-- + a[i] = '-' + } + + dst.Write(a[i:]) +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atof.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atof.go new file mode 100644 index 000000000000..46c1289ec4fb --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atof.go @@ -0,0 +1,936 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Portions of this file are on Go stdlib's strconv/atof.go */ + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// decimal to binary floating point conversion. +// Algorithm: +// 1) Store input in multiprecision decimal. +// 2) Multiply/divide decimal by powers of two until in range [0.5, 1) +// 3) Multiply by 2^precision and round to get mantissa. + +import "math" + +var optimize = true // can change for testing + +func equalIgnoreCase(s1 []byte, s2 []byte) bool { + if len(s1) != len(s2) { + return false + } + for i := 0; i < len(s1); i++ { + c1 := s1[i] + if 'A' <= c1 && c1 <= 'Z' { + c1 += 'a' - 'A' + } + c2 := s2[i] + if 'A' <= c2 && c2 <= 'Z' { + c2 += 'a' - 'A' + } + if c1 != c2 { + return false + } + } + return true +} + +func special(s []byte) (f float64, ok bool) { + if len(s) == 0 { + return + } + switch s[0] { + default: + return + case '+': + if equalIgnoreCase(s, []byte("+inf")) || equalIgnoreCase(s, []byte("+infinity")) { + return math.Inf(1), true + } + case '-': + if equalIgnoreCase(s, []byte("-inf")) || equalIgnoreCase(s, []byte("-infinity")) { + return math.Inf(-1), true + } + case 'n', 'N': + if equalIgnoreCase(s, []byte("nan")) { + return math.NaN(), true + } + case 'i', 'I': + if equalIgnoreCase(s, []byte("inf")) || equalIgnoreCase(s, []byte("infinity")) { + return math.Inf(1), true + } + } + return +} + +func (b *decimal) set(s []byte) (ok bool) { + i := 0 + b.neg = false + b.trunc = false + + // optional sign + if i >= len(s) { + return + } + switch { + case s[i] == '+': + i++ + case s[i] == '-': + b.neg = true + i++ + } + + // digits + sawdot := false + sawdigits := false + for ; i < len(s); i++ { + switch { + case s[i] == '.': + if sawdot { + return + } + sawdot = true + b.dp = b.nd + continue + + case '0' <= s[i] && s[i] <= '9': + sawdigits = true + if s[i] == '0' && b.nd == 0 { // ignore leading zeros + b.dp-- + continue + } + if b.nd < len(b.d) { + b.d[b.nd] = s[i] + b.nd++ + } else if s[i] != '0' { + b.trunc = true + } + continue + } + break + } + if !sawdigits { + return + } + if !sawdot { + b.dp = b.nd + } + + // optional exponent moves decimal point. + // if we read a very large, very long number, + // just be sure to move the decimal point by + // a lot (say, 100000). it doesn't matter if it's + // not the exact number. + if i < len(s) && (s[i] == 'e' || s[i] == 'E') { + i++ + if i >= len(s) { + return + } + esign := 1 + if s[i] == '+' { + i++ + } else if s[i] == '-' { + i++ + esign = -1 + } + if i >= len(s) || s[i] < '0' || s[i] > '9' { + return + } + e := 0 + for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { + if e < 10000 { + e = e*10 + int(s[i]) - '0' + } + } + b.dp += e * esign + } + + if i != len(s) { + return + } + + ok = true + return +} + +// readFloat reads a decimal mantissa and exponent from a float +// string representation. It sets ok to false if the number could +// not fit return types or is invalid. +func readFloat(s []byte) (mantissa uint64, exp int, neg, trunc, ok bool) { + const uint64digits = 19 + i := 0 + + // optional sign + if i >= len(s) { + return + } + switch { + case s[i] == '+': + i++ + case s[i] == '-': + neg = true + i++ + } + + // digits + sawdot := false + sawdigits := false + nd := 0 + ndMant := 0 + dp := 0 + for ; i < len(s); i++ { + switch c := s[i]; true { + case c == '.': + if sawdot { + return + } + sawdot = true + dp = nd + continue + + case '0' <= c && c <= '9': + sawdigits = true + if c == '0' && nd == 0 { // ignore leading zeros + dp-- + continue + } + nd++ + if ndMant < uint64digits { + mantissa *= 10 + mantissa += uint64(c - '0') + ndMant++ + } else if s[i] != '0' { + trunc = true + } + continue + } + break + } + if !sawdigits { + return + } + if !sawdot { + dp = nd + } + + // optional exponent moves decimal point. + // if we read a very large, very long number, + // just be sure to move the decimal point by + // a lot (say, 100000). it doesn't matter if it's + // not the exact number. + if i < len(s) && (s[i] == 'e' || s[i] == 'E') { + i++ + if i >= len(s) { + return + } + esign := 1 + if s[i] == '+' { + i++ + } else if s[i] == '-' { + i++ + esign = -1 + } + if i >= len(s) || s[i] < '0' || s[i] > '9' { + return + } + e := 0 + for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { + if e < 10000 { + e = e*10 + int(s[i]) - '0' + } + } + dp += e * esign + } + + if i != len(s) { + return + } + + exp = dp - ndMant + ok = true + return + +} + +// decimal power of ten to binary power of two. +var powtab = []int{1, 3, 6, 9, 13, 16, 19, 23, 26} + +func (d *decimal) floatBits(flt *floatInfo) (b uint64, overflow bool) { + var exp int + var mant uint64 + + // Zero is always a special case. + if d.nd == 0 { + mant = 0 + exp = flt.bias + goto out + } + + // Obvious overflow/underflow. + // These bounds are for 64-bit floats. + // Will have to change if we want to support 80-bit floats in the future. + if d.dp > 310 { + goto overflow + } + if d.dp < -330 { + // zero + mant = 0 + exp = flt.bias + goto out + } + + // Scale by powers of two until in range [0.5, 1.0) + exp = 0 + for d.dp > 0 { + var n int + if d.dp >= len(powtab) { + n = 27 + } else { + n = powtab[d.dp] + } + d.Shift(-n) + exp += n + } + for d.dp < 0 || d.dp == 0 && d.d[0] < '5' { + var n int + if -d.dp >= len(powtab) { + n = 27 + } else { + n = powtab[-d.dp] + } + d.Shift(n) + exp -= n + } + + // Our range is [0.5,1) but floating point range is [1,2). + exp-- + + // Minimum representable exponent is flt.bias+1. + // If the exponent is smaller, move it up and + // adjust d accordingly. + if exp < flt.bias+1 { + n := flt.bias + 1 - exp + d.Shift(-n) + exp += n + } + + if exp-flt.bias >= 1<>= 1 + exp++ + if exp-flt.bias >= 1<>float64info.mantbits != 0 { + return + } + f = float64(mantissa) + if neg { + f = -f + } + switch { + case exp == 0: + // an integer. + return f, true + // Exact integers are <= 10^15. + // Exact powers of ten are <= 10^22. + case exp > 0 && exp <= 15+22: // int * 10^k + // If exponent is big but number of digits is not, + // can move a few zeros into the integer part. + if exp > 22 { + f *= float64pow10[exp-22] + exp = 22 + } + if f > 1e15 || f < -1e15 { + // the exponent was really too large. + return + } + return f * float64pow10[exp], true + case exp < 0 && exp >= -22: // int / 10^k + return f / float64pow10[-exp], true + } + return +} + +// If possible to compute mantissa*10^exp to 32-bit float f exactly, +// entirely in floating-point math, do so, avoiding the machinery above. +func atof32exact(mantissa uint64, exp int, neg bool) (f float32, ok bool) { + if mantissa>>float32info.mantbits != 0 { + return + } + f = float32(mantissa) + if neg { + f = -f + } + switch { + case exp == 0: + return f, true + // Exact integers are <= 10^7. + // Exact powers of ten are <= 10^10. + case exp > 0 && exp <= 7+10: // int * 10^k + // If exponent is big but number of digits is not, + // can move a few zeros into the integer part. + if exp > 10 { + f *= float32pow10[exp-10] + exp = 10 + } + if f > 1e7 || f < -1e7 { + // the exponent was really too large. + return + } + return f * float32pow10[exp], true + case exp < 0 && exp >= -10: // int / 10^k + return f / float32pow10[-exp], true + } + return +} + +const fnParseFloat = "ParseFloat" + +func atof32(s []byte) (f float32, err error) { + if val, ok := special(s); ok { + return float32(val), nil + } + + if optimize { + // Parse mantissa and exponent. + mantissa, exp, neg, trunc, ok := readFloat(s) + if ok { + // Try pure floating-point arithmetic conversion. + if !trunc { + if f, ok := atof32exact(mantissa, exp, neg); ok { + return f, nil + } + } + // Try another fast path. + ext := new(extFloat) + if ok := ext.AssignDecimal(mantissa, exp, neg, trunc, &float32info); ok { + b, ovf := ext.floatBits(&float32info) + f = math.Float32frombits(uint32(b)) + if ovf { + err = rangeError(fnParseFloat, string(s)) + } + return f, err + } + } + } + var d decimal + if !d.set(s) { + return 0, syntaxError(fnParseFloat, string(s)) + } + b, ovf := d.floatBits(&float32info) + f = math.Float32frombits(uint32(b)) + if ovf { + err = rangeError(fnParseFloat, string(s)) + } + return f, err +} + +func atof64(s []byte) (f float64, err error) { + if val, ok := special(s); ok { + return val, nil + } + + if optimize { + // Parse mantissa and exponent. + mantissa, exp, neg, trunc, ok := readFloat(s) + if ok { + // Try pure floating-point arithmetic conversion. + if !trunc { + if f, ok := atof64exact(mantissa, exp, neg); ok { + return f, nil + } + } + // Try another fast path. + ext := new(extFloat) + if ok := ext.AssignDecimal(mantissa, exp, neg, trunc, &float64info); ok { + b, ovf := ext.floatBits(&float64info) + f = math.Float64frombits(b) + if ovf { + err = rangeError(fnParseFloat, string(s)) + } + return f, err + } + } + } + var d decimal + if !d.set(s) { + return 0, syntaxError(fnParseFloat, string(s)) + } + b, ovf := d.floatBits(&float64info) + f = math.Float64frombits(b) + if ovf { + err = rangeError(fnParseFloat, string(s)) + } + return f, err +} + +// ParseFloat converts the string s to a floating-point number +// with the precision specified by bitSize: 32 for float32, or 64 for float64. +// When bitSize=32, the result still has type float64, but it will be +// convertible to float32 without changing its value. +// +// If s is well-formed and near a valid floating point number, +// ParseFloat returns the nearest floating point number rounded +// using IEEE754 unbiased rounding. +// +// The errors that ParseFloat returns have concrete type *NumError +// and include err.Num = s. +// +// If s is not syntactically well-formed, ParseFloat returns err.Err = ErrSyntax. +// +// If s is syntactically well-formed but is more than 1/2 ULP +// away from the largest floating point number of the given size, +// ParseFloat returns f = ±Inf, err.Err = ErrRange. +func ParseFloat(s []byte, bitSize int) (f float64, err error) { + if bitSize == 32 { + f1, err1 := atof32(s) + return float64(f1), err1 + } + f1, err1 := atof64(s) + return f1, err1 +} + +// oroginal: strconv/decimal.go, but not exported, and needed for PareFloat. + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Multiprecision decimal numbers. +// For floating-point formatting only; not general purpose. +// Only operations are assign and (binary) left/right shift. +// Can do binary floating point in multiprecision decimal precisely +// because 2 divides 10; cannot do decimal floating point +// in multiprecision binary precisely. + +type decimal struct { + d [800]byte // digits + nd int // number of digits used + dp int // decimal point + neg bool + trunc bool // discarded nonzero digits beyond d[:nd] +} + +func (a *decimal) String() string { + n := 10 + a.nd + if a.dp > 0 { + n += a.dp + } + if a.dp < 0 { + n += -a.dp + } + + buf := make([]byte, n) + w := 0 + switch { + case a.nd == 0: + return "0" + + case a.dp <= 0: + // zeros fill space between decimal point and digits + buf[w] = '0' + w++ + buf[w] = '.' + w++ + w += digitZero(buf[w : w+-a.dp]) + w += copy(buf[w:], a.d[0:a.nd]) + + case a.dp < a.nd: + // decimal point in middle of digits + w += copy(buf[w:], a.d[0:a.dp]) + buf[w] = '.' + w++ + w += copy(buf[w:], a.d[a.dp:a.nd]) + + default: + // zeros fill space between digits and decimal point + w += copy(buf[w:], a.d[0:a.nd]) + w += digitZero(buf[w : w+a.dp-a.nd]) + } + return string(buf[0:w]) +} + +func digitZero(dst []byte) int { + for i := range dst { + dst[i] = '0' + } + return len(dst) +} + +// trim trailing zeros from number. +// (They are meaningless; the decimal point is tracked +// independent of the number of digits.) +func trim(a *decimal) { + for a.nd > 0 && a.d[a.nd-1] == '0' { + a.nd-- + } + if a.nd == 0 { + a.dp = 0 + } +} + +// Assign v to a. +func (a *decimal) Assign(v uint64) { + var buf [24]byte + + // Write reversed decimal in buf. + n := 0 + for v > 0 { + v1 := v / 10 + v -= 10 * v1 + buf[n] = byte(v + '0') + n++ + v = v1 + } + + // Reverse again to produce forward decimal in a.d. + a.nd = 0 + for n--; n >= 0; n-- { + a.d[a.nd] = buf[n] + a.nd++ + } + a.dp = a.nd + trim(a) +} + +// Maximum shift that we can do in one pass without overflow. +// Signed int has 31 bits, and we have to be able to accommodate 9<>k == 0; r++ { + if r >= a.nd { + if n == 0 { + // a == 0; shouldn't get here, but handle anyway. + a.nd = 0 + return + } + for n>>k == 0 { + n = n * 10 + r++ + } + break + } + c := int(a.d[r]) + n = n*10 + c - '0' + } + a.dp -= r - 1 + + // Pick up a digit, put down a digit. + for ; r < a.nd; r++ { + c := int(a.d[r]) + dig := n >> k + n -= dig << k + a.d[w] = byte(dig + '0') + w++ + n = n*10 + c - '0' + } + + // Put down extra digits. + for n > 0 { + dig := n >> k + n -= dig << k + if w < len(a.d) { + a.d[w] = byte(dig + '0') + w++ + } else if dig > 0 { + a.trunc = true + } + n = n * 10 + } + + a.nd = w + trim(a) +} + +// Cheat sheet for left shift: table indexed by shift count giving +// number of new digits that will be introduced by that shift. +// +// For example, leftcheats[4] = {2, "625"}. That means that +// if we are shifting by 4 (multiplying by 16), it will add 2 digits +// when the string prefix is "625" through "999", and one fewer digit +// if the string prefix is "000" through "624". +// +// Credit for this trick goes to Ken. + +type leftCheat struct { + delta int // number of new digits + cutoff string // minus one digit if original < a. +} + +var leftcheats = []leftCheat{ + // Leading digits of 1/2^i = 5^i. + // 5^23 is not an exact 64-bit floating point number, + // so have to use bc for the math. + /* + seq 27 | sed 's/^/5^/' | bc | + awk 'BEGIN{ print "\tleftCheat{ 0, \"\" }," } + { + log2 = log(2)/log(10) + printf("\tleftCheat{ %d, \"%s\" },\t// * %d\n", + int(log2*NR+1), $0, 2**NR) + }' + */ + {0, ""}, + {1, "5"}, // * 2 + {1, "25"}, // * 4 + {1, "125"}, // * 8 + {2, "625"}, // * 16 + {2, "3125"}, // * 32 + {2, "15625"}, // * 64 + {3, "78125"}, // * 128 + {3, "390625"}, // * 256 + {3, "1953125"}, // * 512 + {4, "9765625"}, // * 1024 + {4, "48828125"}, // * 2048 + {4, "244140625"}, // * 4096 + {4, "1220703125"}, // * 8192 + {5, "6103515625"}, // * 16384 + {5, "30517578125"}, // * 32768 + {5, "152587890625"}, // * 65536 + {6, "762939453125"}, // * 131072 + {6, "3814697265625"}, // * 262144 + {6, "19073486328125"}, // * 524288 + {7, "95367431640625"}, // * 1048576 + {7, "476837158203125"}, // * 2097152 + {7, "2384185791015625"}, // * 4194304 + {7, "11920928955078125"}, // * 8388608 + {8, "59604644775390625"}, // * 16777216 + {8, "298023223876953125"}, // * 33554432 + {8, "1490116119384765625"}, // * 67108864 + {9, "7450580596923828125"}, // * 134217728 +} + +// Is the leading prefix of b lexicographically less than s? +func prefixIsLessThan(b []byte, s string) bool { + for i := 0; i < len(s); i++ { + if i >= len(b) { + return true + } + if b[i] != s[i] { + return b[i] < s[i] + } + } + return false +} + +// Binary shift left (/ 2) by k bits. k <= maxShift to avoid overflow. +func leftShift(a *decimal, k uint) { + delta := leftcheats[k].delta + if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { + delta-- + } + + r := a.nd // read index + w := a.nd + delta // write index + n := 0 + + // Pick up a digit, put down a digit. + for r--; r >= 0; r-- { + n += (int(a.d[r]) - '0') << k + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + // Put down extra digits. + for n > 0 { + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + a.nd += delta + if a.nd >= len(a.d) { + a.nd = len(a.d) + } + a.dp += delta + trim(a) +} + +// Binary shift left (k > 0) or right (k < 0). +func (a *decimal) Shift(k int) { + switch { + case a.nd == 0: + // nothing to do: a == 0 + case k > 0: + for k > maxShift { + leftShift(a, maxShift) + k -= maxShift + } + leftShift(a, uint(k)) + case k < 0: + for k < -maxShift { + rightShift(a, maxShift) + k += maxShift + } + rightShift(a, uint(-k)) + } +} + +// If we chop a at nd digits, should we round up? +func shouldRoundUp(a *decimal, nd int) bool { + if nd < 0 || nd >= a.nd { + return false + } + if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even + // if we truncated, a little higher than what's recorded - always round up + if a.trunc { + return true + } + return nd > 0 && (a.d[nd-1]-'0')%2 != 0 + } + // not halfway - digit tells all + return a.d[nd] >= '5' +} + +// Round a to nd digits (or fewer). +// If nd is zero, it means we're rounding +// just to the left of the digits, as in +// 0.09 -> 0.1. +func (a *decimal) Round(nd int) { + if nd < 0 || nd >= a.nd { + return + } + if shouldRoundUp(a, nd) { + a.RoundUp(nd) + } else { + a.RoundDown(nd) + } +} + +// Round a down to nd digits (or fewer). +func (a *decimal) RoundDown(nd int) { + if nd < 0 || nd >= a.nd { + return + } + a.nd = nd + trim(a) +} + +// Round a up to nd digits (or fewer). +func (a *decimal) RoundUp(nd int) { + if nd < 0 || nd >= a.nd { + return + } + + // round up + for i := nd - 1; i >= 0; i-- { + c := a.d[i] + if c < '9' { // can stop after this digit + a.d[i]++ + a.nd = i + 1 + return + } + } + + // Number is all 9s. + // Change to single 1 with adjusted decimal point. + a.d[0] = '1' + a.nd = 1 + a.dp++ +} + +// Extract integer part, rounded appropriately. +// No guarantees about overflow. +func (a *decimal) RoundedInteger() uint64 { + if a.dp > 20 { + return 0xFFFFFFFFFFFFFFFF + } + var i int + n := uint64(0) + for i = 0; i < a.dp && i < a.nd; i++ { + n = n*10 + uint64(a.d[i]-'0') + } + for ; i < a.dp; i++ { + n *= 10 + } + if shouldRoundUp(a, a.dp) { + n++ + } + return n +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go new file mode 100644 index 000000000000..06eb2ec29f84 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go @@ -0,0 +1,213 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Portions of this file are on Go stdlib's strconv/atoi.go */ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "errors" + "strconv" +) + +// ErrRange indicates that a value is out of range for the target type. +var ErrRange = errors.New("value out of range") + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// A NumError records a failed conversion. +type NumError struct { + Func string // the failing function (ParseBool, ParseInt, ParseUint, ParseFloat) + Num string // the input + Err error // the reason the conversion failed (ErrRange, ErrSyntax) +} + +func (e *NumError) Error() string { + return "strconv." + e.Func + ": " + "parsing " + strconv.Quote(e.Num) + ": " + e.Err.Error() +} + +func syntaxError(fn, str string) *NumError { + return &NumError{fn, str, ErrSyntax} +} + +func rangeError(fn, str string) *NumError { + return &NumError{fn, str, ErrRange} +} + +const intSize = 32 << uint(^uint(0)>>63) + +// IntSize is the size in bits of an int or uint value. +const IntSize = intSize + +// Return the first number n such that n*base >= 1<<64. +func cutoff64(base int) uint64 { + if base < 2 { + return 0 + } + return (1<<64-1)/uint64(base) + 1 +} + +// ParseUint is like ParseInt but for unsigned numbers, and oeprating on []byte +func ParseUint(s []byte, base int, bitSize int) (n uint64, err error) { + var cutoff, maxVal uint64 + + if bitSize == 0 { + bitSize = int(IntSize) + } + + s0 := s + switch { + case len(s) < 1: + err = ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for octal, hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + base = 16 + s = s[2:] + if len(s) < 1 { + err = ErrSyntax + goto Error + } + case s[0] == '0': + base = 8 + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + n = 0 + cutoff = cutoff64(base) + maxVal = 1<= base { + n = 0 + err = ErrSyntax + goto Error + } + + if n >= cutoff { + // n*base overflows + n = 1<<64 - 1 + err = ErrRange + goto Error + } + n *= uint64(base) + + n1 := n + uint64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + n = 1<<64 - 1 + err = ErrRange + goto Error + } + n = n1 + } + + return n, nil + +Error: + return n, &NumError{"ParseUint", string(s0), err} +} + +// ParseInt interprets a string s in the given base (2 to 36) and +// returns the corresponding value i. If base == 0, the base is +// implied by the string's prefix: base 16 for "0x", base 8 for +// "0", and base 10 otherwise. +// +// The bitSize argument specifies the integer type +// that the result must fit into. Bit sizes 0, 8, 16, 32, and 64 +// correspond to int, int8, int16, int32, and int64. +// +// The errors that ParseInt returns have concrete type *NumError +// and include err.Num = s. If s is empty or contains invalid +// digits, err.Err = ErrSyntax and the returned value is 0; +// if the value corresponding to s cannot be represented by a +// signed integer of the given size, err.Err = ErrRange and the +// returned value is the maximum magnitude integer of the +// appropriate bitSize and sign. +func ParseInt(s []byte, base int, bitSize int) (i int64, err error) { + const fnParseInt = "ParseInt" + + if bitSize == 0 { + bitSize = int(IntSize) + } + + // Empty string bad. + if len(s) == 0 { + return 0, syntaxError(fnParseInt, string(s)) + } + + // Pick off leading sign. + s0 := s + neg := false + if s[0] == '+' { + s = s[1:] + } else if s[0] == '-' { + neg = true + s = s[1:] + } + + // Convert unsigned and check range. + var un uint64 + un, err = ParseUint(s, base, bitSize) + if err != nil && err.(*NumError).Err != ErrRange { + err.(*NumError).Func = fnParseInt + err.(*NumError).Num = string(s0) + return 0, err + } + cutoff := uint64(1 << uint(bitSize-1)) + if !neg && un >= cutoff { + return int64(cutoff - 1), rangeError(fnParseInt, string(s0)) + } + if neg && un > cutoff { + return -int64(cutoff), rangeError(fnParseInt, string(s0)) + } + n := int64(un) + if neg { + n = -n + } + return n, nil +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go new file mode 100644 index 000000000000..ab791085a420 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go @@ -0,0 +1,668 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// An extFloat represents an extended floating-point number, with more +// precision than a float64. It does not try to save bits: the +// number represented by the structure is mant*(2^exp), with a negative +// sign if neg is true. +type extFloat struct { + mant uint64 + exp int + neg bool +} + +// Powers of ten taken from double-conversion library. +// http://code.google.com/p/double-conversion/ +const ( + firstPowerOfTen = -348 + stepPowerOfTen = 8 +) + +var smallPowersOfTen = [...]extFloat{ + {1 << 63, -63, false}, // 1 + {0xa << 60, -60, false}, // 1e1 + {0x64 << 57, -57, false}, // 1e2 + {0x3e8 << 54, -54, false}, // 1e3 + {0x2710 << 50, -50, false}, // 1e4 + {0x186a0 << 47, -47, false}, // 1e5 + {0xf4240 << 44, -44, false}, // 1e6 + {0x989680 << 40, -40, false}, // 1e7 +} + +var powersOfTen = [...]extFloat{ + {0xfa8fd5a0081c0288, -1220, false}, // 10^-348 + {0xbaaee17fa23ebf76, -1193, false}, // 10^-340 + {0x8b16fb203055ac76, -1166, false}, // 10^-332 + {0xcf42894a5dce35ea, -1140, false}, // 10^-324 + {0x9a6bb0aa55653b2d, -1113, false}, // 10^-316 + {0xe61acf033d1a45df, -1087, false}, // 10^-308 + {0xab70fe17c79ac6ca, -1060, false}, // 10^-300 + {0xff77b1fcbebcdc4f, -1034, false}, // 10^-292 + {0xbe5691ef416bd60c, -1007, false}, // 10^-284 + {0x8dd01fad907ffc3c, -980, false}, // 10^-276 + {0xd3515c2831559a83, -954, false}, // 10^-268 + {0x9d71ac8fada6c9b5, -927, false}, // 10^-260 + {0xea9c227723ee8bcb, -901, false}, // 10^-252 + {0xaecc49914078536d, -874, false}, // 10^-244 + {0x823c12795db6ce57, -847, false}, // 10^-236 + {0xc21094364dfb5637, -821, false}, // 10^-228 + {0x9096ea6f3848984f, -794, false}, // 10^-220 + {0xd77485cb25823ac7, -768, false}, // 10^-212 + {0xa086cfcd97bf97f4, -741, false}, // 10^-204 + {0xef340a98172aace5, -715, false}, // 10^-196 + {0xb23867fb2a35b28e, -688, false}, // 10^-188 + {0x84c8d4dfd2c63f3b, -661, false}, // 10^-180 + {0xc5dd44271ad3cdba, -635, false}, // 10^-172 + {0x936b9fcebb25c996, -608, false}, // 10^-164 + {0xdbac6c247d62a584, -582, false}, // 10^-156 + {0xa3ab66580d5fdaf6, -555, false}, // 10^-148 + {0xf3e2f893dec3f126, -529, false}, // 10^-140 + {0xb5b5ada8aaff80b8, -502, false}, // 10^-132 + {0x87625f056c7c4a8b, -475, false}, // 10^-124 + {0xc9bcff6034c13053, -449, false}, // 10^-116 + {0x964e858c91ba2655, -422, false}, // 10^-108 + {0xdff9772470297ebd, -396, false}, // 10^-100 + {0xa6dfbd9fb8e5b88f, -369, false}, // 10^-92 + {0xf8a95fcf88747d94, -343, false}, // 10^-84 + {0xb94470938fa89bcf, -316, false}, // 10^-76 + {0x8a08f0f8bf0f156b, -289, false}, // 10^-68 + {0xcdb02555653131b6, -263, false}, // 10^-60 + {0x993fe2c6d07b7fac, -236, false}, // 10^-52 + {0xe45c10c42a2b3b06, -210, false}, // 10^-44 + {0xaa242499697392d3, -183, false}, // 10^-36 + {0xfd87b5f28300ca0e, -157, false}, // 10^-28 + {0xbce5086492111aeb, -130, false}, // 10^-20 + {0x8cbccc096f5088cc, -103, false}, // 10^-12 + {0xd1b71758e219652c, -77, false}, // 10^-4 + {0x9c40000000000000, -50, false}, // 10^4 + {0xe8d4a51000000000, -24, false}, // 10^12 + {0xad78ebc5ac620000, 3, false}, // 10^20 + {0x813f3978f8940984, 30, false}, // 10^28 + {0xc097ce7bc90715b3, 56, false}, // 10^36 + {0x8f7e32ce7bea5c70, 83, false}, // 10^44 + {0xd5d238a4abe98068, 109, false}, // 10^52 + {0x9f4f2726179a2245, 136, false}, // 10^60 + {0xed63a231d4c4fb27, 162, false}, // 10^68 + {0xb0de65388cc8ada8, 189, false}, // 10^76 + {0x83c7088e1aab65db, 216, false}, // 10^84 + {0xc45d1df942711d9a, 242, false}, // 10^92 + {0x924d692ca61be758, 269, false}, // 10^100 + {0xda01ee641a708dea, 295, false}, // 10^108 + {0xa26da3999aef774a, 322, false}, // 10^116 + {0xf209787bb47d6b85, 348, false}, // 10^124 + {0xb454e4a179dd1877, 375, false}, // 10^132 + {0x865b86925b9bc5c2, 402, false}, // 10^140 + {0xc83553c5c8965d3d, 428, false}, // 10^148 + {0x952ab45cfa97a0b3, 455, false}, // 10^156 + {0xde469fbd99a05fe3, 481, false}, // 10^164 + {0xa59bc234db398c25, 508, false}, // 10^172 + {0xf6c69a72a3989f5c, 534, false}, // 10^180 + {0xb7dcbf5354e9bece, 561, false}, // 10^188 + {0x88fcf317f22241e2, 588, false}, // 10^196 + {0xcc20ce9bd35c78a5, 614, false}, // 10^204 + {0x98165af37b2153df, 641, false}, // 10^212 + {0xe2a0b5dc971f303a, 667, false}, // 10^220 + {0xa8d9d1535ce3b396, 694, false}, // 10^228 + {0xfb9b7cd9a4a7443c, 720, false}, // 10^236 + {0xbb764c4ca7a44410, 747, false}, // 10^244 + {0x8bab8eefb6409c1a, 774, false}, // 10^252 + {0xd01fef10a657842c, 800, false}, // 10^260 + {0x9b10a4e5e9913129, 827, false}, // 10^268 + {0xe7109bfba19c0c9d, 853, false}, // 10^276 + {0xac2820d9623bf429, 880, false}, // 10^284 + {0x80444b5e7aa7cf85, 907, false}, // 10^292 + {0xbf21e44003acdd2d, 933, false}, // 10^300 + {0x8e679c2f5e44ff8f, 960, false}, // 10^308 + {0xd433179d9c8cb841, 986, false}, // 10^316 + {0x9e19db92b4e31ba9, 1013, false}, // 10^324 + {0xeb96bf6ebadf77d9, 1039, false}, // 10^332 + {0xaf87023b9bf0ee6b, 1066, false}, // 10^340 +} + +// floatBits returns the bits of the float64 that best approximates +// the extFloat passed as receiver. Overflow is set to true if +// the resulting float64 is ±Inf. +func (f *extFloat) floatBits(flt *floatInfo) (bits uint64, overflow bool) { + f.Normalize() + + exp := f.exp + 63 + + // Exponent too small. + if exp < flt.bias+1 { + n := flt.bias + 1 - exp + f.mant >>= uint(n) + exp += n + } + + // Extract 1+flt.mantbits bits from the 64-bit mantissa. + mant := f.mant >> (63 - flt.mantbits) + if f.mant&(1<<(62-flt.mantbits)) != 0 { + // Round up. + mant += 1 + } + + // Rounding might have added a bit; shift down. + if mant == 2<>= 1 + exp++ + } + + // Infinities. + if exp-flt.bias >= 1<>uint(-f.exp))<>= uint(-f.exp) + f.exp = 0 + return *f, *f + } + expBiased := exp - flt.bias + + upper = extFloat{mant: 2*f.mant + 1, exp: f.exp - 1, neg: f.neg} + if mant != 1<>(64-32) == 0 { + mant <<= 32 + exp -= 32 + } + if mant>>(64-16) == 0 { + mant <<= 16 + exp -= 16 + } + if mant>>(64-8) == 0 { + mant <<= 8 + exp -= 8 + } + if mant>>(64-4) == 0 { + mant <<= 4 + exp -= 4 + } + if mant>>(64-2) == 0 { + mant <<= 2 + exp -= 2 + } + if mant>>(64-1) == 0 { + mant <<= 1 + exp -= 1 + } + shift = uint(f.exp - exp) + f.mant, f.exp = mant, exp + return +} + +// Multiply sets f to the product f*g: the result is correctly rounded, +// but not normalized. +func (f *extFloat) Multiply(g extFloat) { + fhi, flo := f.mant>>32, uint64(uint32(f.mant)) + ghi, glo := g.mant>>32, uint64(uint32(g.mant)) + + // Cross products. + cross1 := fhi * glo + cross2 := flo * ghi + + // f.mant*g.mant is fhi*ghi << 64 + (cross1+cross2) << 32 + flo*glo + f.mant = fhi*ghi + (cross1 >> 32) + (cross2 >> 32) + rem := uint64(uint32(cross1)) + uint64(uint32(cross2)) + ((flo * glo) >> 32) + // Round up. + rem += (1 << 31) + + f.mant += (rem >> 32) + f.exp = f.exp + g.exp + 64 +} + +var uint64pow10 = [...]uint64{ + 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, +} + +// AssignDecimal sets f to an approximate value mantissa*10^exp. It +// returns true if the value represented by f is guaranteed to be the +// best approximation of d after being rounded to a float64 or +// float32 depending on flt. +func (f *extFloat) AssignDecimal(mantissa uint64, exp10 int, neg bool, trunc bool, flt *floatInfo) (ok bool) { + const uint64digits = 19 + const errorscale = 8 + errors := 0 // An upper bound for error, computed in errorscale*ulp. + if trunc { + // the decimal number was truncated. + errors += errorscale / 2 + } + + f.mant = mantissa + f.exp = 0 + f.neg = neg + + // Multiply by powers of ten. + i := (exp10 - firstPowerOfTen) / stepPowerOfTen + if exp10 < firstPowerOfTen || i >= len(powersOfTen) { + return false + } + adjExp := (exp10 - firstPowerOfTen) % stepPowerOfTen + + // We multiply by exp%step + if adjExp < uint64digits && mantissa < uint64pow10[uint64digits-adjExp] { + // We can multiply the mantissa exactly. + f.mant *= uint64pow10[adjExp] + f.Normalize() + } else { + f.Normalize() + f.Multiply(smallPowersOfTen[adjExp]) + errors += errorscale / 2 + } + + // We multiply by 10 to the exp - exp%step. + f.Multiply(powersOfTen[i]) + if errors > 0 { + errors += 1 + } + errors += errorscale / 2 + + // Normalize + shift := f.Normalize() + errors <<= shift + + // Now f is a good approximation of the decimal. + // Check whether the error is too large: that is, if the mantissa + // is perturbated by the error, the resulting float64 will change. + // The 64 bits mantissa is 1 + 52 bits for float64 + 11 extra bits. + // + // In many cases the approximation will be good enough. + denormalExp := flt.bias - 63 + var extrabits uint + if f.exp <= denormalExp { + // f.mant * 2^f.exp is smaller than 2^(flt.bias+1). + extrabits = uint(63 - flt.mantbits + 1 + uint(denormalExp-f.exp)) + } else { + extrabits = uint(63 - flt.mantbits) + } + + halfway := uint64(1) << (extrabits - 1) + mant_extra := f.mant & (1< expMax: + i-- + default: + break Loop + } + } + // Apply the desired decimal shift on f. It will have exponent + // in the desired range. This is multiplication by 10^-exp10. + f.Multiply(powersOfTen[i]) + + return -(firstPowerOfTen + i*stepPowerOfTen), i +} + +// frexp10Many applies a common shift by a power of ten to a, b, c. +func frexp10Many(a, b, c *extFloat) (exp10 int) { + exp10, i := c.frexp10() + a.Multiply(powersOfTen[i]) + b.Multiply(powersOfTen[i]) + return +} + +// FixedDecimal stores in d the first n significant digits +// of the decimal representation of f. It returns false +// if it cannot be sure of the answer. +func (f *extFloat) FixedDecimal(d *decimalSlice, n int) bool { + if f.mant == 0 { + d.nd = 0 + d.dp = 0 + d.neg = f.neg + return true + } + if n == 0 { + panic("strconv: internal error: extFloat.FixedDecimal called with n == 0") + } + // Multiply by an appropriate power of ten to have a reasonable + // number to process. + f.Normalize() + exp10, _ := f.frexp10() + + shift := uint(-f.exp) + integer := uint32(f.mant >> shift) + fraction := f.mant - (uint64(integer) << shift) + ε := uint64(1) // ε is the uncertainty we have on the mantissa of f. + + // Write exactly n digits to d. + needed := n // how many digits are left to write. + integerDigits := 0 // the number of decimal digits of integer. + pow10 := uint64(1) // the power of ten by which f was scaled. + for i, pow := 0, uint64(1); i < 20; i++ { + if pow > uint64(integer) { + integerDigits = i + break + } + pow *= 10 + } + rest := integer + if integerDigits > needed { + // the integral part is already large, trim the last digits. + pow10 = uint64pow10[integerDigits-needed] + integer /= uint32(pow10) + rest -= integer * uint32(pow10) + } else { + rest = 0 + } + + // Write the digits of integer: the digits of rest are omitted. + var buf [32]byte + pos := len(buf) + for v := integer; v > 0; { + v1 := v / 10 + v -= 10 * v1 + pos-- + buf[pos] = byte(v + '0') + v = v1 + } + for i := pos; i < len(buf); i++ { + d.d[i-pos] = buf[i] + } + nd := len(buf) - pos + d.nd = nd + d.dp = integerDigits + exp10 + needed -= nd + + if needed > 0 { + if rest != 0 || pow10 != 1 { + panic("strconv: internal error, rest != 0 but needed > 0") + } + // Emit digits for the fractional part. Each time, 10*fraction + // fits in a uint64 without overflow. + for needed > 0 { + fraction *= 10 + ε *= 10 // the uncertainty scales as we multiply by ten. + if 2*ε > 1<> shift + d.d[nd] = byte(digit + '0') + fraction -= digit << shift + nd++ + needed-- + } + d.nd = nd + } + + // We have written a truncation of f (a numerator / 10^d.dp). The remaining part + // can be interpreted as a small number (< 1) to be added to the last digit of the + // numerator. + // + // If rest > 0, the amount is: + // (rest< 0 guarantees that pow10 << shift does not overflow a uint64. + // + // If rest = 0, pow10 == 1 and the amount is + // fraction / (1 << shift) + // fraction being known with a ±ε uncertainty. + // + // We pass this information to the rounding routine for adjustment. + + ok := adjustLastDigitFixed(d, uint64(rest)<= 0; i-- { + if d.d[i] != '0' { + d.nd = i + 1 + break + } + } + return true +} + +// adjustLastDigitFixed assumes d contains the representation of the integral part +// of some number, whose fractional part is num / (den << shift). The numerator +// num is only known up to an uncertainty of size ε, assumed to be less than +// (den << shift)/2. +// +// It will increase the last digit by one to account for correct rounding, typically +// when the fractional part is greater than 1/2, and will return false if ε is such +// that no correct answer can be given. +func adjustLastDigitFixed(d *decimalSlice, num, den uint64, shift uint, ε uint64) bool { + if num > den< den< den< (den< den<= 0; i-- { + if d.d[i] == '9' { + d.nd-- + } else { + break + } + } + if i < 0 { + d.d[0] = '1' + d.nd = 1 + d.dp++ + } else { + d.d[i]++ + } + return true + } + return false +} + +// ShortestDecimal stores in d the shortest decimal representation of f +// which belongs to the open interval (lower, upper), where f is supposed +// to lie. It returns false whenever the result is unsure. The implementation +// uses the Grisu3 algorithm. +func (f *extFloat) ShortestDecimal(d *decimalSlice, lower, upper *extFloat) bool { + if f.mant == 0 { + d.nd = 0 + d.dp = 0 + d.neg = f.neg + return true + } + if f.exp == 0 && *lower == *f && *lower == *upper { + // an exact integer. + var buf [24]byte + n := len(buf) - 1 + for v := f.mant; v > 0; { + v1 := v / 10 + v -= 10 * v1 + buf[n] = byte(v + '0') + n-- + v = v1 + } + nd := len(buf) - n - 1 + for i := 0; i < nd; i++ { + d.d[i] = buf[n+1+i] + } + d.nd, d.dp = nd, nd + for d.nd > 0 && d.d[d.nd-1] == '0' { + d.nd-- + } + if d.nd == 0 { + d.dp = 0 + } + d.neg = f.neg + return true + } + upper.Normalize() + // Uniformize exponents. + if f.exp > upper.exp { + f.mant <<= uint(f.exp - upper.exp) + f.exp = upper.exp + } + if lower.exp > upper.exp { + lower.mant <<= uint(lower.exp - upper.exp) + lower.exp = upper.exp + } + + exp10 := frexp10Many(lower, f, upper) + // Take a safety margin due to rounding in frexp10Many, but we lose precision. + upper.mant++ + lower.mant-- + + // The shortest representation of f is either rounded up or down, but + // in any case, it is a truncation of upper. + shift := uint(-upper.exp) + integer := uint32(upper.mant >> shift) + fraction := upper.mant - (uint64(integer) << shift) + + // How far we can go down from upper until the result is wrong. + allowance := upper.mant - lower.mant + // How far we should go to get a very precise result. + targetDiff := upper.mant - f.mant + + // Count integral digits: there are at most 10. + var integerDigits int + for i, pow := 0, uint64(1); i < 20; i++ { + if pow > uint64(integer) { + integerDigits = i + break + } + pow *= 10 + } + for i := 0; i < integerDigits; i++ { + pow := uint64pow10[integerDigits-i-1] + digit := integer / uint32(pow) + d.d[i] = byte(digit + '0') + integer -= digit * uint32(pow) + // evaluate whether we should stop. + if currentDiff := uint64(integer)<> shift) + d.d[d.nd] = byte(digit + '0') + d.nd++ + fraction -= uint64(digit) << shift + if fraction < allowance*multiplier { + // We are in the admissible range. Note that if allowance is about to + // overflow, that is, allowance > 2^64/10, the condition is automatically + // true due to the limited range of fraction. + return adjustLastDigit(d, + fraction, targetDiff*multiplier, allowance*multiplier, + 1< maxDiff-ulpBinary { + // we went too far + return false + } + if d.nd == 1 && d.d[0] == '0' { + // the number has actually reached zero. + d.nd = 0 + d.dp = 0 + } + return true +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go new file mode 100644 index 000000000000..253f83b45a14 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go @@ -0,0 +1,475 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Binary to decimal floating point conversion. +// Algorithm: +// 1) store mantissa in multiprecision decimal +// 2) shift decimal by exponent +// 3) read digits out & format + +package internal + +import "math" + +// TODO: move elsewhere? +type floatInfo struct { + mantbits uint + expbits uint + bias int +} + +var float32info = floatInfo{23, 8, -127} +var float64info = floatInfo{52, 11, -1023} + +// FormatFloat converts the floating-point number f to a string, +// according to the format fmt and precision prec. It rounds the +// result assuming that the original was obtained from a floating-point +// value of bitSize bits (32 for float32, 64 for float64). +// +// The format fmt is one of +// 'b' (-ddddp±ddd, a binary exponent), +// 'e' (-d.dddde±dd, a decimal exponent), +// 'E' (-d.ddddE±dd, a decimal exponent), +// 'f' (-ddd.dddd, no exponent), +// 'g' ('e' for large exponents, 'f' otherwise), or +// 'G' ('E' for large exponents, 'f' otherwise). +// +// The precision prec controls the number of digits +// (excluding the exponent) printed by the 'e', 'E', 'f', 'g', and 'G' formats. +// For 'e', 'E', and 'f' it is the number of digits after the decimal point. +// For 'g' and 'G' it is the total number of digits. +// The special precision -1 uses the smallest number of digits +// necessary such that ParseFloat will return f exactly. +func formatFloat(f float64, fmt byte, prec, bitSize int) string { + return string(genericFtoa(make([]byte, 0, max(prec+4, 24)), f, fmt, prec, bitSize)) +} + +// AppendFloat appends the string form of the floating-point number f, +// as generated by FormatFloat, to dst and returns the extended buffer. +func appendFloat(dst []byte, f float64, fmt byte, prec int, bitSize int) []byte { + return genericFtoa(dst, f, fmt, prec, bitSize) +} + +func genericFtoa(dst []byte, val float64, fmt byte, prec, bitSize int) []byte { + var bits uint64 + var flt *floatInfo + switch bitSize { + case 32: + bits = uint64(math.Float32bits(float32(val))) + flt = &float32info + case 64: + bits = math.Float64bits(val) + flt = &float64info + default: + panic("strconv: illegal AppendFloat/FormatFloat bitSize") + } + + neg := bits>>(flt.expbits+flt.mantbits) != 0 + exp := int(bits>>flt.mantbits) & (1< digs.nd && digs.nd >= digs.dp { + eprec = digs.nd + } + // %e is used if the exponent from the conversion + // is less than -4 or greater than or equal to the precision. + // if precision was the shortest possible, use precision 6 for this decision. + if shortest { + eprec = 6 + } + exp := digs.dp - 1 + if exp < -4 || exp >= eprec { + if prec > digs.nd { + prec = digs.nd + } + return fmtE(dst, neg, digs, prec-1, fmt+'e'-'g') + } + if prec > digs.dp { + prec = digs.nd + } + return fmtF(dst, neg, digs, max(prec-digs.dp, 0)) + } + + // unknown format + return append(dst, '%', fmt) +} + +// Round d (= mant * 2^exp) to the shortest number of digits +// that will let the original floating point value be precisely +// reconstructed. Size is original floating point size (64 or 32). +func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { + // If mantissa is zero, the number is zero; stop now. + if mant == 0 { + d.nd = 0 + return + } + + // Compute upper and lower such that any decimal number + // between upper and lower (possibly inclusive) + // will round to the original floating point number. + + // We may see at once that the number is already shortest. + // + // Suppose d is not denormal, so that 2^exp <= d < 10^dp. + // The closest shorter number is at least 10^(dp-nd) away. + // The lower/upper bounds computed below are at distance + // at most 2^(exp-mantbits). + // + // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), + // or equivalently log2(10)*(dp-nd) > exp-mantbits. + // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). + minexp := flt.bias + 1 // minimum possible exponent + if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { + // The number is already shortest. + return + } + + // d = mant << (exp - mantbits) + // Next highest floating point number is mant+1 << exp-mantbits. + // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. + upper := new(decimal) + upper.Assign(mant*2 + 1) + upper.Shift(exp - int(flt.mantbits) - 1) + + // d = mant << (exp - mantbits) + // Next lowest floating point number is mant-1 << exp-mantbits, + // unless mant-1 drops the significant bit and exp is not the minimum exp, + // in which case the next lowest is mant*2-1 << exp-mantbits-1. + // Either way, call it mantlo << explo-mantbits. + // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. + var mantlo uint64 + var explo int + if mant > 1< 0 { + dst = append(dst, '.') + i := 1 + m := d.nd + prec + 1 - max(d.nd, prec+1) + for i < m { + dst = append(dst, d.d[i]) + i++ + } + for i <= prec { + dst = append(dst, '0') + i++ + } + } + + // e± + dst = append(dst, fmt) + exp := d.dp - 1 + if d.nd == 0 { // special case: 0 has exponent 0 + exp = 0 + } + if exp < 0 { + ch = '-' + exp = -exp + } else { + ch = '+' + } + dst = append(dst, ch) + + // dddd + var buf [3]byte + i := len(buf) + for exp >= 10 { + i-- + buf[i] = byte(exp%10 + '0') + exp /= 10 + } + // exp < 10 + i-- + buf[i] = byte(exp + '0') + + switch i { + case 0: + dst = append(dst, buf[0], buf[1], buf[2]) + case 1: + dst = append(dst, buf[1], buf[2]) + case 2: + // leading zeroes + dst = append(dst, '0', buf[2]) + } + return dst +} + +// %f: -ddddddd.ddddd +func fmtF(dst []byte, neg bool, d decimalSlice, prec int) []byte { + // sign + if neg { + dst = append(dst, '-') + } + + // integer, padded with zeros as needed. + if d.dp > 0 { + var i int + for i = 0; i < d.dp && i < d.nd; i++ { + dst = append(dst, d.d[i]) + } + for ; i < d.dp; i++ { + dst = append(dst, '0') + } + } else { + dst = append(dst, '0') + } + + // fraction + if prec > 0 { + dst = append(dst, '.') + for i := 0; i < prec; i++ { + ch := byte('0') + if j := d.dp + i; 0 <= j && j < d.nd { + ch = d.d[j] + } + dst = append(dst, ch) + } + } + + return dst +} + +// %b: -ddddddddp+ddd +func fmtB(dst []byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte { + var buf [50]byte + w := len(buf) + exp -= int(flt.mantbits) + esign := byte('+') + if exp < 0 { + esign = '-' + exp = -exp + } + n := 0 + for exp > 0 || n < 1 { + n++ + w-- + buf[w] = byte(exp%10 + '0') + exp /= 10 + } + w-- + buf[w] = esign + w-- + buf[w] = 'p' + n = 0 + for mant > 0 || n < 1 { + n++ + w-- + buf[w] = byte(mant%10 + '0') + mant /= 10 + } + if neg { + w-- + buf[w] = '-' + } + return append(dst, buf[w:]...) +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/iota.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/iota.go new file mode 100644 index 000000000000..3e50f0c41801 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/iota.go @@ -0,0 +1,161 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Portions of this file are on Go stdlib's strconv/iota.go */ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package v1 + +import ( + "io" +) + +const ( + digits = "0123456789abcdefghijklmnopqrstuvwxyz" + digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" + digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" +) + +var shifts = [len(digits) + 1]uint{ + 1 << 1: 1, + 1 << 2: 2, + 1 << 3: 3, + 1 << 4: 4, + 1 << 5: 5, +} + +var smallNumbers = [][]byte{ + []byte("0"), + []byte("1"), + []byte("2"), + []byte("3"), + []byte("4"), + []byte("5"), + []byte("6"), + []byte("7"), + []byte("8"), + []byte("9"), + []byte("10"), +} + +type FormatBitsWriter interface { + io.Writer + io.ByteWriter +} + +type FormatBitsScratch struct{} + +// +// DEPRECIATED: `scratch` is no longer used, FormatBits2 is available. +// +// FormatBits computes the string representation of u in the given base. +// If neg is set, u is treated as negative int64 value. If append_ is +// set, the string is appended to dst and the resulting byte slice is +// returned as the first result value; otherwise the string is returned +// as the second result value. +// +func FormatBits(scratch *FormatBitsScratch, dst FormatBitsWriter, u uint64, base int, neg bool) { + FormatBits2(dst, u, base, neg) +} + +// FormatBits2 computes the string representation of u in the given base. +// If neg is set, u is treated as negative int64 value. If append_ is +// set, the string is appended to dst and the resulting byte slice is +// returned as the first result value; otherwise the string is returned +// as the second result value. +// +func FormatBits2(dst FormatBitsWriter, u uint64, base int, neg bool) { + if base < 2 || base > len(digits) { + panic("strconv: illegal AppendInt/FormatInt base") + } + // fast path for small common numbers + if u <= 10 { + if neg { + dst.WriteByte('-') + } + dst.Write(smallNumbers[u]) + return + } + + // 2 <= base && base <= len(digits) + + var a = makeSlice(65) + // var a [64 + 1]byte // +1 for sign of 64bit value in base 2 + i := len(a) + + if neg { + u = -u + } + + // convert bits + if base == 10 { + // common case: use constants for / and % because + // the compiler can optimize it into a multiply+shift, + // and unroll loop + for u >= 100 { + i -= 2 + q := u / 100 + j := uintptr(u - q*100) + a[i+1] = digits01[j] + a[i+0] = digits10[j] + u = q + } + if u >= 10 { + i-- + q := u / 10 + a[i] = digits[uintptr(u-q*10)] + u = q + } + + } else if s := shifts[base]; s > 0 { + // base is power of 2: use shifts and masks instead of / and % + b := uint64(base) + m := uintptr(b) - 1 // == 1<= b { + i-- + a[i] = digits[uintptr(u)&m] + u >>= s + } + + } else { + // general case + b := uint64(base) + for u >= b { + i-- + a[i] = digits[uintptr(u%b)] + u /= b + } + } + + // u < base + i-- + a[i] = digits[uintptr(u)] + + // add sign, if any + if neg { + i-- + a[i] = '-' + } + + dst.Write(a[i:]) + + Pool(a) + + return +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/jsonstring.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/jsonstring.go new file mode 100644 index 000000000000..513b45d57033 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/jsonstring.go @@ -0,0 +1,512 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Portions of this file are on Go stdlib's encoding/json/encode.go */ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package v1 + +import ( + "io" + "unicode/utf8" + "strconv" + "unicode/utf16" + "unicode" +) + +const hex = "0123456789abcdef" + +type JsonStringWriter interface { + io.Writer + io.ByteWriter + stringWriter +} + +func WriteJsonString(buf JsonStringWriter, s string) { + WriteJson(buf, []byte(s)) +} + +/** + * Function ported from encoding/json: func (e *encodeState) string(s string) (int, error) + */ +func WriteJson(buf JsonStringWriter, s []byte) { + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + /* + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + */ + if lt[b] == true { + i++ + continue + } + + if start < i { + buf.Write(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + // This encodes bytes < 0x20 except for \n and \r, + // as well as < and >. The latter are escaped because they + // can lead to security holes when user-controlled strings + // are rendered into JSON and served to some browsers. + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.Write(s[start:]) + } + buf.WriteByte('"') +} + +// UnquoteBytes will decode []byte containing json string to go string +// ported from encoding/json/decode.go +func UnquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// TODO(pquerna): consider combining wibth the normal byte mask. +var lt [256]bool = [256]bool{ + false, /* 0 */ + false, /* 1 */ + false, /* 2 */ + false, /* 3 */ + false, /* 4 */ + false, /* 5 */ + false, /* 6 */ + false, /* 7 */ + false, /* 8 */ + false, /* 9 */ + false, /* 10 */ + false, /* 11 */ + false, /* 12 */ + false, /* 13 */ + false, /* 14 */ + false, /* 15 */ + false, /* 16 */ + false, /* 17 */ + false, /* 18 */ + false, /* 19 */ + false, /* 20 */ + false, /* 21 */ + false, /* 22 */ + false, /* 23 */ + false, /* 24 */ + false, /* 25 */ + false, /* 26 */ + false, /* 27 */ + false, /* 28 */ + false, /* 29 */ + false, /* 30 */ + false, /* 31 */ + true, /* 32 */ + true, /* 33 */ + false, /* 34 */ + true, /* 35 */ + true, /* 36 */ + true, /* 37 */ + false, /* 38 */ + true, /* 39 */ + true, /* 40 */ + true, /* 41 */ + true, /* 42 */ + true, /* 43 */ + true, /* 44 */ + true, /* 45 */ + true, /* 46 */ + true, /* 47 */ + true, /* 48 */ + true, /* 49 */ + true, /* 50 */ + true, /* 51 */ + true, /* 52 */ + true, /* 53 */ + true, /* 54 */ + true, /* 55 */ + true, /* 56 */ + true, /* 57 */ + true, /* 58 */ + true, /* 59 */ + false, /* 60 */ + true, /* 61 */ + false, /* 62 */ + true, /* 63 */ + true, /* 64 */ + true, /* 65 */ + true, /* 66 */ + true, /* 67 */ + true, /* 68 */ + true, /* 69 */ + true, /* 70 */ + true, /* 71 */ + true, /* 72 */ + true, /* 73 */ + true, /* 74 */ + true, /* 75 */ + true, /* 76 */ + true, /* 77 */ + true, /* 78 */ + true, /* 79 */ + true, /* 80 */ + true, /* 81 */ + true, /* 82 */ + true, /* 83 */ + true, /* 84 */ + true, /* 85 */ + true, /* 86 */ + true, /* 87 */ + true, /* 88 */ + true, /* 89 */ + true, /* 90 */ + true, /* 91 */ + false, /* 92 */ + true, /* 93 */ + true, /* 94 */ + true, /* 95 */ + true, /* 96 */ + true, /* 97 */ + true, /* 98 */ + true, /* 99 */ + true, /* 100 */ + true, /* 101 */ + true, /* 102 */ + true, /* 103 */ + true, /* 104 */ + true, /* 105 */ + true, /* 106 */ + true, /* 107 */ + true, /* 108 */ + true, /* 109 */ + true, /* 110 */ + true, /* 111 */ + true, /* 112 */ + true, /* 113 */ + true, /* 114 */ + true, /* 115 */ + true, /* 116 */ + true, /* 117 */ + true, /* 118 */ + true, /* 119 */ + true, /* 120 */ + true, /* 121 */ + true, /* 122 */ + true, /* 123 */ + true, /* 124 */ + true, /* 125 */ + true, /* 126 */ + true, /* 127 */ + true, /* 128 */ + true, /* 129 */ + true, /* 130 */ + true, /* 131 */ + true, /* 132 */ + true, /* 133 */ + true, /* 134 */ + true, /* 135 */ + true, /* 136 */ + true, /* 137 */ + true, /* 138 */ + true, /* 139 */ + true, /* 140 */ + true, /* 141 */ + true, /* 142 */ + true, /* 143 */ + true, /* 144 */ + true, /* 145 */ + true, /* 146 */ + true, /* 147 */ + true, /* 148 */ + true, /* 149 */ + true, /* 150 */ + true, /* 151 */ + true, /* 152 */ + true, /* 153 */ + true, /* 154 */ + true, /* 155 */ + true, /* 156 */ + true, /* 157 */ + true, /* 158 */ + true, /* 159 */ + true, /* 160 */ + true, /* 161 */ + true, /* 162 */ + true, /* 163 */ + true, /* 164 */ + true, /* 165 */ + true, /* 166 */ + true, /* 167 */ + true, /* 168 */ + true, /* 169 */ + true, /* 170 */ + true, /* 171 */ + true, /* 172 */ + true, /* 173 */ + true, /* 174 */ + true, /* 175 */ + true, /* 176 */ + true, /* 177 */ + true, /* 178 */ + true, /* 179 */ + true, /* 180 */ + true, /* 181 */ + true, /* 182 */ + true, /* 183 */ + true, /* 184 */ + true, /* 185 */ + true, /* 186 */ + true, /* 187 */ + true, /* 188 */ + true, /* 189 */ + true, /* 190 */ + true, /* 191 */ + true, /* 192 */ + true, /* 193 */ + true, /* 194 */ + true, /* 195 */ + true, /* 196 */ + true, /* 197 */ + true, /* 198 */ + true, /* 199 */ + true, /* 200 */ + true, /* 201 */ + true, /* 202 */ + true, /* 203 */ + true, /* 204 */ + true, /* 205 */ + true, /* 206 */ + true, /* 207 */ + true, /* 208 */ + true, /* 209 */ + true, /* 210 */ + true, /* 211 */ + true, /* 212 */ + true, /* 213 */ + true, /* 214 */ + true, /* 215 */ + true, /* 216 */ + true, /* 217 */ + true, /* 218 */ + true, /* 219 */ + true, /* 220 */ + true, /* 221 */ + true, /* 222 */ + true, /* 223 */ + true, /* 224 */ + true, /* 225 */ + true, /* 226 */ + true, /* 227 */ + true, /* 228 */ + true, /* 229 */ + true, /* 230 */ + true, /* 231 */ + true, /* 232 */ + true, /* 233 */ + true, /* 234 */ + true, /* 235 */ + true, /* 236 */ + true, /* 237 */ + true, /* 238 */ + true, /* 239 */ + true, /* 240 */ + true, /* 241 */ + true, /* 242 */ + true, /* 243 */ + true, /* 244 */ + true, /* 245 */ + true, /* 246 */ + true, /* 247 */ + true, /* 248 */ + true, /* 249 */ + true, /* 250 */ + true, /* 251 */ + true, /* 252 */ + true, /* 253 */ + true, /* 254 */ + true, /* 255 */ +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/lexer.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/lexer.go new file mode 100644 index 000000000000..5589292ff2c7 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/lexer.go @@ -0,0 +1,937 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Portions of this file are on derived from yajl: */ +/* + * Copyright (c) 2007-2014, Lloyd Hilaiel + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package v1 + +import ( + "errors" + "fmt" + "io" +) + +type FFParseState int + +const ( + FFParse_map_start FFParseState = iota + FFParse_want_key + FFParse_want_colon + FFParse_want_value + FFParse_after_value +) + +type FFTok int + +const ( + FFTok_init FFTok = iota + FFTok_bool FFTok = iota + FFTok_colon FFTok = iota + FFTok_comma FFTok = iota + FFTok_eof FFTok = iota + FFTok_error FFTok = iota + FFTok_left_brace FFTok = iota + FFTok_left_bracket FFTok = iota + FFTok_null FFTok = iota + FFTok_right_brace FFTok = iota + FFTok_right_bracket FFTok = iota + + /* we differentiate between integers and doubles to allow the + * parser to interpret the number without re-scanning */ + FFTok_integer FFTok = iota + FFTok_double FFTok = iota + + FFTok_string FFTok = iota + + /* comment tokens are not currently returned to the parser, ever */ + FFTok_comment FFTok = iota +) + +type FFErr int + +const ( + FFErr_e_ok FFErr = iota + FFErr_io FFErr = iota + FFErr_string_invalid_utf8 FFErr = iota + FFErr_string_invalid_escaped_char FFErr = iota + FFErr_string_invalid_json_char FFErr = iota + FFErr_string_invalid_hex_char FFErr = iota + FFErr_invalid_char FFErr = iota + FFErr_invalid_string FFErr = iota + FFErr_missing_integer_after_decimal FFErr = iota + FFErr_missing_integer_after_exponent FFErr = iota + FFErr_missing_integer_after_minus FFErr = iota + FFErr_unallowed_comment FFErr = iota + FFErr_incomplete_comment FFErr = iota + FFErr_unexpected_token_type FFErr = iota // TODO: improve this error +) + +type FFLexer struct { + reader *ffReader + Output DecodingBuffer + Token FFTok + Error FFErr + BigError error + // TODO: convert all of this to an interface + lastCurrentChar int + captureAll bool + buf Buffer +} + +func NewFFLexer(input []byte) *FFLexer { + fl := &FFLexer{ + Token: FFTok_init, + Error: FFErr_e_ok, + reader: newffReader(input), + Output: &Buffer{}, + } + // TODO: guess size? + //fl.Output.Grow(64) + return fl +} + +type LexerError struct { + offset int + line int + char int + err error +} + +// Reset the Lexer and add new input. +func (ffl *FFLexer) Reset(input []byte) { + ffl.Token = FFTok_init + ffl.Error = FFErr_e_ok + ffl.BigError = nil + ffl.reader.Reset(input) + ffl.lastCurrentChar = 0 + ffl.Output.Reset() +} + +func (le *LexerError) Error() string { + return fmt.Sprintf(`ffjson error: (%T)%s offset=%d line=%d char=%d`, + le.err, le.err.Error(), + le.offset, le.line, le.char) +} + +func (ffl *FFLexer) WrapErr(err error) error { + line, char := ffl.reader.PosWithLine() + // TOOD: calcualte lines/characters based on offset + return &LexerError{ + offset: ffl.reader.Pos(), + line: line, + char: char, + err: err, + } +} + +func (ffl *FFLexer) scanReadByte() (byte, error) { + var c byte + var err error + if ffl.captureAll { + c, err = ffl.reader.ReadByte() + } else { + c, err = ffl.reader.ReadByteNoWS() + } + + if err != nil { + ffl.Error = FFErr_io + ffl.BigError = err + return 0, err + } + + return c, nil +} + +func (ffl *FFLexer) readByte() (byte, error) { + + c, err := ffl.reader.ReadByte() + if err != nil { + ffl.Error = FFErr_io + ffl.BigError = err + return 0, err + } + + return c, nil +} + +func (ffl *FFLexer) unreadByte() { + ffl.reader.UnreadByte() +} + +func (ffl *FFLexer) wantBytes(want []byte, iftrue FFTok) FFTok { + startPos := ffl.reader.Pos() + for _, b := range want { + c, err := ffl.readByte() + + if err != nil { + return FFTok_error + } + + if c != b { + ffl.unreadByte() + // fmt.Printf("wanted bytes: %s\n", string(want)) + // TODO(pquerna): thsi is a bad error message + ffl.Error = FFErr_invalid_string + return FFTok_error + } + } + + endPos := ffl.reader.Pos() + ffl.Output.Write(ffl.reader.Slice(startPos, endPos)) + return iftrue +} + +func (ffl *FFLexer) lexComment() FFTok { + c, err := ffl.readByte() + if err != nil { + return FFTok_error + } + + if c == '/' { + // a // comment, scan until line ends. + for { + c, err := ffl.readByte() + if err != nil { + return FFTok_error + } + + if c == '\n' { + return FFTok_comment + } + } + } else if c == '*' { + // a /* */ comment, scan */ + for { + c, err := ffl.readByte() + if err != nil { + return FFTok_error + } + + if c == '*' { + c, err := ffl.readByte() + + if err != nil { + return FFTok_error + } + + if c == '/' { + return FFTok_comment + } + + ffl.Error = FFErr_incomplete_comment + return FFTok_error + } + } + } else { + ffl.Error = FFErr_incomplete_comment + return FFTok_error + } +} + +func (ffl *FFLexer) lexString() FFTok { + if ffl.captureAll { + ffl.buf.Reset() + err := ffl.reader.SliceString(&ffl.buf) + + if err != nil { + ffl.BigError = err + return FFTok_error + } + + WriteJson(ffl.Output, ffl.buf.Bytes()) + + return FFTok_string + } else { + err := ffl.reader.SliceString(ffl.Output) + + if err != nil { + ffl.BigError = err + return FFTok_error + } + + return FFTok_string + } +} + +func (ffl *FFLexer) lexNumber() FFTok { + var numRead int = 0 + tok := FFTok_integer + startPos := ffl.reader.Pos() + + c, err := ffl.readByte() + if err != nil { + return FFTok_error + } + + /* optional leading minus */ + if c == '-' { + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + } + + /* a single zero, or a series of integers */ + if c == '0' { + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + } else if c >= '1' && c <= '9' { + for c >= '0' && c <= '9' { + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + } + } else { + ffl.unreadByte() + ffl.Error = FFErr_missing_integer_after_minus + return FFTok_error + } + + if c == '.' { + numRead = 0 + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + + for c >= '0' && c <= '9' { + numRead++ + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + } + + if numRead == 0 { + ffl.unreadByte() + + ffl.Error = FFErr_missing_integer_after_decimal + return FFTok_error + } + + tok = FFTok_double + } + + /* optional exponent (indicates this is floating point) */ + if c == 'e' || c == 'E' { + numRead = 0 + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + + /* optional sign */ + if c == '+' || c == '-' { + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + } + + for c >= '0' && c <= '9' { + numRead++ + c, err = ffl.readByte() + if err != nil { + return FFTok_error + } + } + + if numRead == 0 { + ffl.Error = FFErr_missing_integer_after_exponent + return FFTok_error + } + + tok = FFTok_double + } + + ffl.unreadByte() + + endPos := ffl.reader.Pos() + ffl.Output.Write(ffl.reader.Slice(startPos, endPos)) + return tok +} + +var true_bytes = []byte{'r', 'u', 'e'} +var false_bytes = []byte{'a', 'l', 's', 'e'} +var null_bytes = []byte{'u', 'l', 'l'} + +func (ffl *FFLexer) Scan() FFTok { + tok := FFTok_error + if ffl.captureAll == false { + ffl.Output.Reset() + } + ffl.Token = FFTok_init + + for { + c, err := ffl.scanReadByte() + if err != nil { + if err == io.EOF { + return FFTok_eof + } else { + return FFTok_error + } + } + + switch c { + case '{': + tok = FFTok_left_bracket + if ffl.captureAll { + ffl.Output.WriteByte('{') + } + goto lexed + case '}': + tok = FFTok_right_bracket + if ffl.captureAll { + ffl.Output.WriteByte('}') + } + goto lexed + case '[': + tok = FFTok_left_brace + if ffl.captureAll { + ffl.Output.WriteByte('[') + } + goto lexed + case ']': + tok = FFTok_right_brace + if ffl.captureAll { + ffl.Output.WriteByte(']') + } + goto lexed + case ',': + tok = FFTok_comma + if ffl.captureAll { + ffl.Output.WriteByte(',') + } + goto lexed + case ':': + tok = FFTok_colon + if ffl.captureAll { + ffl.Output.WriteByte(':') + } + goto lexed + case '\t', '\n', '\v', '\f', '\r', ' ': + if ffl.captureAll { + ffl.Output.WriteByte(c) + } + case 't': + ffl.Output.WriteByte('t') + tok = ffl.wantBytes(true_bytes, FFTok_bool) + goto lexed + case 'f': + ffl.Output.WriteByte('f') + tok = ffl.wantBytes(false_bytes, FFTok_bool) + goto lexed + case 'n': + ffl.Output.WriteByte('n') + tok = ffl.wantBytes(null_bytes, FFTok_null) + goto lexed + case '"': + tok = ffl.lexString() + goto lexed + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + ffl.unreadByte() + tok = ffl.lexNumber() + goto lexed + case '/': + tok = ffl.lexComment() + goto lexed + default: + tok = FFTok_error + ffl.Error = FFErr_invalid_char + goto lexed + } + } + +lexed: + ffl.Token = tok + return tok +} + +func (ffl *FFLexer) scanField(start FFTok, capture bool) ([]byte, error) { + switch start { + case FFTok_left_brace, + FFTok_left_bracket: + { + end := FFTok_right_brace + if start == FFTok_left_bracket { + end = FFTok_right_bracket + if capture { + ffl.Output.WriteByte('{') + } + } else { + if capture { + ffl.Output.WriteByte('[') + } + } + + depth := 1 + if capture { + ffl.captureAll = true + } + // TODO: work. + scanloop: + for { + tok := ffl.Scan() + //fmt.Printf("capture-token: %v end: %v depth: %v\n", tok, end, depth) + switch tok { + case FFTok_eof: + return nil, errors.New("ffjson: unexpected EOF") + case FFTok_error: + if ffl.BigError != nil { + return nil, ffl.BigError + } + return nil, ffl.Error.ToError() + case end: + depth-- + if depth == 0 { + break scanloop + } + case start: + depth++ + } + } + + if capture { + ffl.captureAll = false + } + + if capture { + return ffl.Output.Bytes(), nil + } else { + return nil, nil + } + } + case FFTok_bool, + FFTok_integer, + FFTok_null, + FFTok_double: + // simple value, return it. + if capture { + return ffl.Output.Bytes(), nil + } else { + return nil, nil + } + + case FFTok_string: + //TODO(pquerna): so, other users expect this to be a quoted string :( + if capture { + ffl.buf.Reset() + WriteJson(&ffl.buf, ffl.Output.Bytes()) + return ffl.buf.Bytes(), nil + } else { + return nil, nil + } + } + + return nil, fmt.Errorf("ffjson: invalid capture type: %v", start) +} + +// Captures an entire field value, including recursive objects, +// and converts them to a []byte suitable to pass to a sub-object's +// UnmarshalJSON +func (ffl *FFLexer) CaptureField(start FFTok) ([]byte, error) { + return ffl.scanField(start, true) +} + +func (ffl *FFLexer) SkipField(start FFTok) error { + _, err := ffl.scanField(start, false) + return err +} + +// TODO(pquerna): return line number and offset. +func (err FFErr) ToError() error { + switch err { + case FFErr_e_ok: + return nil + case FFErr_io: + return errors.New("ffjson: IO error") + case FFErr_string_invalid_utf8: + return errors.New("ffjson: string with invalid UTF-8 sequence") + case FFErr_string_invalid_escaped_char: + return errors.New("ffjson: string with invalid escaped character") + case FFErr_string_invalid_json_char: + return errors.New("ffjson: string with invalid JSON character") + case FFErr_string_invalid_hex_char: + return errors.New("ffjson: string with invalid hex character") + case FFErr_invalid_char: + return errors.New("ffjson: invalid character") + case FFErr_invalid_string: + return errors.New("ffjson: invalid string") + case FFErr_missing_integer_after_decimal: + return errors.New("ffjson: missing integer after decimal") + case FFErr_missing_integer_after_exponent: + return errors.New("ffjson: missing integer after exponent") + case FFErr_missing_integer_after_minus: + return errors.New("ffjson: missing integer after minus") + case FFErr_unallowed_comment: + return errors.New("ffjson: unallowed comment") + case FFErr_incomplete_comment: + return errors.New("ffjson: incomplete comment") + case FFErr_unexpected_token_type: + return errors.New("ffjson: unexpected token sequence") + } + + panic(fmt.Sprintf("unknown error type: %v ", err)) +} + +func (state FFParseState) String() string { + switch state { + case FFParse_map_start: + return "map:start" + case FFParse_want_key: + return "want_key" + case FFParse_want_colon: + return "want_colon" + case FFParse_want_value: + return "want_value" + case FFParse_after_value: + return "after_value" + } + + panic(fmt.Sprintf("unknown parse state: %d", int(state))) +} + +func (tok FFTok) String() string { + switch tok { + case FFTok_init: + return "tok:init" + case FFTok_bool: + return "tok:bool" + case FFTok_colon: + return "tok:colon" + case FFTok_comma: + return "tok:comma" + case FFTok_eof: + return "tok:eof" + case FFTok_error: + return "tok:error" + case FFTok_left_brace: + return "tok:left_brace" + case FFTok_left_bracket: + return "tok:left_bracket" + case FFTok_null: + return "tok:null" + case FFTok_right_brace: + return "tok:right_brace" + case FFTok_right_bracket: + return "tok:right_bracket" + case FFTok_integer: + return "tok:integer" + case FFTok_double: + return "tok:double" + case FFTok_string: + return "tok:string" + case FFTok_comment: + return "comment" + } + + panic(fmt.Sprintf("unknown token: %d", int(tok))) +} + +/* a lookup table which lets us quickly determine three things: + * cVEC - valid escaped control char + * note. the solidus '/' may be escaped or not. + * cIJC - invalid json char + * cVHC - valid hex char + * cNFP - needs further processing (from a string scanning perspective) + * cNUC - needs utf8 checking when enabled (from a string scanning perspective) + */ + +const ( + cVEC int8 = 0x01 + cIJC int8 = 0x02 + cVHC int8 = 0x04 + cNFP int8 = 0x08 + cNUC int8 = 0x10 +) + +var byteLookupTable [256]int8 = [256]int8{ + cIJC, /* 0 */ + cIJC, /* 1 */ + cIJC, /* 2 */ + cIJC, /* 3 */ + cIJC, /* 4 */ + cIJC, /* 5 */ + cIJC, /* 6 */ + cIJC, /* 7 */ + cIJC, /* 8 */ + cIJC, /* 9 */ + cIJC, /* 10 */ + cIJC, /* 11 */ + cIJC, /* 12 */ + cIJC, /* 13 */ + cIJC, /* 14 */ + cIJC, /* 15 */ + cIJC, /* 16 */ + cIJC, /* 17 */ + cIJC, /* 18 */ + cIJC, /* 19 */ + cIJC, /* 20 */ + cIJC, /* 21 */ + cIJC, /* 22 */ + cIJC, /* 23 */ + cIJC, /* 24 */ + cIJC, /* 25 */ + cIJC, /* 26 */ + cIJC, /* 27 */ + cIJC, /* 28 */ + cIJC, /* 29 */ + cIJC, /* 30 */ + cIJC, /* 31 */ + 0, /* 32 */ + 0, /* 33 */ + cVEC | cIJC | cNFP, /* 34 */ + 0, /* 35 */ + 0, /* 36 */ + 0, /* 37 */ + 0, /* 38 */ + 0, /* 39 */ + 0, /* 40 */ + 0, /* 41 */ + 0, /* 42 */ + 0, /* 43 */ + 0, /* 44 */ + 0, /* 45 */ + 0, /* 46 */ + cVEC, /* 47 */ + cVHC, /* 48 */ + cVHC, /* 49 */ + cVHC, /* 50 */ + cVHC, /* 51 */ + cVHC, /* 52 */ + cVHC, /* 53 */ + cVHC, /* 54 */ + cVHC, /* 55 */ + cVHC, /* 56 */ + cVHC, /* 57 */ + 0, /* 58 */ + 0, /* 59 */ + 0, /* 60 */ + 0, /* 61 */ + 0, /* 62 */ + 0, /* 63 */ + 0, /* 64 */ + cVHC, /* 65 */ + cVHC, /* 66 */ + cVHC, /* 67 */ + cVHC, /* 68 */ + cVHC, /* 69 */ + cVHC, /* 70 */ + 0, /* 71 */ + 0, /* 72 */ + 0, /* 73 */ + 0, /* 74 */ + 0, /* 75 */ + 0, /* 76 */ + 0, /* 77 */ + 0, /* 78 */ + 0, /* 79 */ + 0, /* 80 */ + 0, /* 81 */ + 0, /* 82 */ + 0, /* 83 */ + 0, /* 84 */ + 0, /* 85 */ + 0, /* 86 */ + 0, /* 87 */ + 0, /* 88 */ + 0, /* 89 */ + 0, /* 90 */ + 0, /* 91 */ + cVEC | cIJC | cNFP, /* 92 */ + 0, /* 93 */ + 0, /* 94 */ + 0, /* 95 */ + 0, /* 96 */ + cVHC, /* 97 */ + cVEC | cVHC, /* 98 */ + cVHC, /* 99 */ + cVHC, /* 100 */ + cVHC, /* 101 */ + cVEC | cVHC, /* 102 */ + 0, /* 103 */ + 0, /* 104 */ + 0, /* 105 */ + 0, /* 106 */ + 0, /* 107 */ + 0, /* 108 */ + 0, /* 109 */ + cVEC, /* 110 */ + 0, /* 111 */ + 0, /* 112 */ + 0, /* 113 */ + cVEC, /* 114 */ + 0, /* 115 */ + cVEC, /* 116 */ + 0, /* 117 */ + 0, /* 118 */ + 0, /* 119 */ + 0, /* 120 */ + 0, /* 121 */ + 0, /* 122 */ + 0, /* 123 */ + 0, /* 124 */ + 0, /* 125 */ + 0, /* 126 */ + 0, /* 127 */ + cNUC, /* 128 */ + cNUC, /* 129 */ + cNUC, /* 130 */ + cNUC, /* 131 */ + cNUC, /* 132 */ + cNUC, /* 133 */ + cNUC, /* 134 */ + cNUC, /* 135 */ + cNUC, /* 136 */ + cNUC, /* 137 */ + cNUC, /* 138 */ + cNUC, /* 139 */ + cNUC, /* 140 */ + cNUC, /* 141 */ + cNUC, /* 142 */ + cNUC, /* 143 */ + cNUC, /* 144 */ + cNUC, /* 145 */ + cNUC, /* 146 */ + cNUC, /* 147 */ + cNUC, /* 148 */ + cNUC, /* 149 */ + cNUC, /* 150 */ + cNUC, /* 151 */ + cNUC, /* 152 */ + cNUC, /* 153 */ + cNUC, /* 154 */ + cNUC, /* 155 */ + cNUC, /* 156 */ + cNUC, /* 157 */ + cNUC, /* 158 */ + cNUC, /* 159 */ + cNUC, /* 160 */ + cNUC, /* 161 */ + cNUC, /* 162 */ + cNUC, /* 163 */ + cNUC, /* 164 */ + cNUC, /* 165 */ + cNUC, /* 166 */ + cNUC, /* 167 */ + cNUC, /* 168 */ + cNUC, /* 169 */ + cNUC, /* 170 */ + cNUC, /* 171 */ + cNUC, /* 172 */ + cNUC, /* 173 */ + cNUC, /* 174 */ + cNUC, /* 175 */ + cNUC, /* 176 */ + cNUC, /* 177 */ + cNUC, /* 178 */ + cNUC, /* 179 */ + cNUC, /* 180 */ + cNUC, /* 181 */ + cNUC, /* 182 */ + cNUC, /* 183 */ + cNUC, /* 184 */ + cNUC, /* 185 */ + cNUC, /* 186 */ + cNUC, /* 187 */ + cNUC, /* 188 */ + cNUC, /* 189 */ + cNUC, /* 190 */ + cNUC, /* 191 */ + cNUC, /* 192 */ + cNUC, /* 193 */ + cNUC, /* 194 */ + cNUC, /* 195 */ + cNUC, /* 196 */ + cNUC, /* 197 */ + cNUC, /* 198 */ + cNUC, /* 199 */ + cNUC, /* 200 */ + cNUC, /* 201 */ + cNUC, /* 202 */ + cNUC, /* 203 */ + cNUC, /* 204 */ + cNUC, /* 205 */ + cNUC, /* 206 */ + cNUC, /* 207 */ + cNUC, /* 208 */ + cNUC, /* 209 */ + cNUC, /* 210 */ + cNUC, /* 211 */ + cNUC, /* 212 */ + cNUC, /* 213 */ + cNUC, /* 214 */ + cNUC, /* 215 */ + cNUC, /* 216 */ + cNUC, /* 217 */ + cNUC, /* 218 */ + cNUC, /* 219 */ + cNUC, /* 220 */ + cNUC, /* 221 */ + cNUC, /* 222 */ + cNUC, /* 223 */ + cNUC, /* 224 */ + cNUC, /* 225 */ + cNUC, /* 226 */ + cNUC, /* 227 */ + cNUC, /* 228 */ + cNUC, /* 229 */ + cNUC, /* 230 */ + cNUC, /* 231 */ + cNUC, /* 232 */ + cNUC, /* 233 */ + cNUC, /* 234 */ + cNUC, /* 235 */ + cNUC, /* 236 */ + cNUC, /* 237 */ + cNUC, /* 238 */ + cNUC, /* 239 */ + cNUC, /* 240 */ + cNUC, /* 241 */ + cNUC, /* 242 */ + cNUC, /* 243 */ + cNUC, /* 244 */ + cNUC, /* 245 */ + cNUC, /* 246 */ + cNUC, /* 247 */ + cNUC, /* 248 */ + cNUC, /* 249 */ + cNUC, /* 250 */ + cNUC, /* 251 */ + cNUC, /* 252 */ + cNUC, /* 253 */ + cNUC, /* 254 */ + cNUC, /* 255 */ +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go new file mode 100644 index 000000000000..0f22c469d62b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go @@ -0,0 +1,512 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package v1 + +import ( + "fmt" + "io" + "unicode" + "unicode/utf16" +) + +const sliceStringMask = cIJC | cNFP + +type ffReader struct { + s []byte + i int + l int +} + +func newffReader(d []byte) *ffReader { + return &ffReader{ + s: d, + i: 0, + l: len(d), + } +} + +func (r *ffReader) Slice(start, stop int) []byte { + return r.s[start:stop] +} + +func (r *ffReader) Pos() int { + return r.i +} + +// Reset the reader, and add new input. +func (r *ffReader) Reset(d []byte) { + r.s = d + r.i = 0 + r.l = len(d) +} + +// Calcuates the Position with line and line offset, +// because this isn't counted for performance reasons, +// it will iterate the buffer from the beginning, and should +// only be used in error-paths. +func (r *ffReader) PosWithLine() (int, int) { + currentLine := 1 + currentChar := 0 + + for i := 0; i < r.i; i++ { + c := r.s[i] + currentChar++ + if c == '\n' { + currentLine++ + currentChar = 0 + } + } + + return currentLine, currentChar +} + +func (r *ffReader) ReadByteNoWS() (byte, error) { + if r.i >= r.l { + return 0, io.EOF + } + + j := r.i + + for { + c := r.s[j] + j++ + + // inline whitespace parsing gives another ~8% performance boost + // for many kinds of nicely indented JSON. + // ... and using a [255]bool instead of multiple ifs, gives another 2% + /* + if c != '\t' && + c != '\n' && + c != '\v' && + c != '\f' && + c != '\r' && + c != ' ' { + r.i = j + return c, nil + } + */ + if whitespaceLookupTable[c] == false { + r.i = j + return c, nil + } + + if j >= r.l { + return 0, io.EOF + } + } +} + +func (r *ffReader) ReadByte() (byte, error) { + if r.i >= r.l { + return 0, io.EOF + } + + r.i++ + + return r.s[r.i-1], nil +} + +func (r *ffReader) UnreadByte() error { + if r.i <= 0 { + panic("ffReader.UnreadByte: at beginning of slice") + } + r.i-- + return nil +} + +func (r *ffReader) readU4(j int) (rune, error) { + + var u4 [4]byte + for i := 0; i < 4; i++ { + if j >= r.l { + return -1, io.EOF + } + c := r.s[j] + if byteLookupTable[c]&cVHC != 0 { + u4[i] = c + j++ + continue + } else { + // TODO(pquerna): handle errors better. layering violation. + return -1, fmt.Errorf("lex_string_invalid_hex_char: %v %v", c, string(u4[:])) + } + } + + // TODO(pquerna): utf16.IsSurrogate + rr, err := ParseUint(u4[:], 16, 64) + if err != nil { + return -1, err + } + return rune(rr), nil +} + +func (r *ffReader) handleEscaped(c byte, j int, out DecodingBuffer) (int, error) { + if j >= r.l { + return 0, io.EOF + } + + c = r.s[j] + j++ + + if c == 'u' { + ru, err := r.readU4(j) + if err != nil { + return 0, err + } + + if utf16.IsSurrogate(ru) { + ru2, err := r.readU4(j + 6) + if err != nil { + return 0, err + } + out.Write(r.s[r.i : j-2]) + r.i = j + 10 + j = r.i + rval := utf16.DecodeRune(ru, ru2) + if rval != unicode.ReplacementChar { + out.WriteRune(rval) + } else { + return 0, fmt.Errorf("lex_string_invalid_unicode_surrogate: %v %v", ru, ru2) + } + } else { + out.Write(r.s[r.i : j-2]) + r.i = j + 4 + j = r.i + out.WriteRune(ru) + } + return j, nil + } else if byteLookupTable[c]&cVEC == 0 { + return 0, fmt.Errorf("lex_string_invalid_escaped_char: %v", c) + } else { + out.Write(r.s[r.i : j-2]) + r.i = j + j = r.i + + switch c { + case '"': + out.WriteByte('"') + case '\\': + out.WriteByte('\\') + case '/': + out.WriteByte('/') + case 'b': + out.WriteByte('\b') + case 'f': + out.WriteByte('\f') + case 'n': + out.WriteByte('\n') + case 'r': + out.WriteByte('\r') + case 't': + out.WriteByte('\t') + } + } + + return j, nil +} + +func (r *ffReader) SliceString(out DecodingBuffer) error { + var c byte + // TODO(pquerna): string_with_escapes? de-escape here? + j := r.i + + for { + if j >= r.l { + return io.EOF + } + + j, c = scanString(r.s, j) + + if c == '"' { + if j != r.i { + out.Write(r.s[r.i : j-1]) + r.i = j + } + return nil + } else if c == '\\' { + var err error + j, err = r.handleEscaped(c, j, out) + if err != nil { + return err + } + } else if byteLookupTable[c]&cIJC != 0 { + return fmt.Errorf("lex_string_invalid_json_char: %v", c) + } + continue + } +} + +// TODO(pquerna): consider combining wibth the normal byte mask. +var whitespaceLookupTable [256]bool = [256]bool{ + false, /* 0 */ + false, /* 1 */ + false, /* 2 */ + false, /* 3 */ + false, /* 4 */ + false, /* 5 */ + false, /* 6 */ + false, /* 7 */ + false, /* 8 */ + true, /* 9 */ + true, /* 10 */ + true, /* 11 */ + true, /* 12 */ + true, /* 13 */ + false, /* 14 */ + false, /* 15 */ + false, /* 16 */ + false, /* 17 */ + false, /* 18 */ + false, /* 19 */ + false, /* 20 */ + false, /* 21 */ + false, /* 22 */ + false, /* 23 */ + false, /* 24 */ + false, /* 25 */ + false, /* 26 */ + false, /* 27 */ + false, /* 28 */ + false, /* 29 */ + false, /* 30 */ + false, /* 31 */ + true, /* 32 */ + false, /* 33 */ + false, /* 34 */ + false, /* 35 */ + false, /* 36 */ + false, /* 37 */ + false, /* 38 */ + false, /* 39 */ + false, /* 40 */ + false, /* 41 */ + false, /* 42 */ + false, /* 43 */ + false, /* 44 */ + false, /* 45 */ + false, /* 46 */ + false, /* 47 */ + false, /* 48 */ + false, /* 49 */ + false, /* 50 */ + false, /* 51 */ + false, /* 52 */ + false, /* 53 */ + false, /* 54 */ + false, /* 55 */ + false, /* 56 */ + false, /* 57 */ + false, /* 58 */ + false, /* 59 */ + false, /* 60 */ + false, /* 61 */ + false, /* 62 */ + false, /* 63 */ + false, /* 64 */ + false, /* 65 */ + false, /* 66 */ + false, /* 67 */ + false, /* 68 */ + false, /* 69 */ + false, /* 70 */ + false, /* 71 */ + false, /* 72 */ + false, /* 73 */ + false, /* 74 */ + false, /* 75 */ + false, /* 76 */ + false, /* 77 */ + false, /* 78 */ + false, /* 79 */ + false, /* 80 */ + false, /* 81 */ + false, /* 82 */ + false, /* 83 */ + false, /* 84 */ + false, /* 85 */ + false, /* 86 */ + false, /* 87 */ + false, /* 88 */ + false, /* 89 */ + false, /* 90 */ + false, /* 91 */ + false, /* 92 */ + false, /* 93 */ + false, /* 94 */ + false, /* 95 */ + false, /* 96 */ + false, /* 97 */ + false, /* 98 */ + false, /* 99 */ + false, /* 100 */ + false, /* 101 */ + false, /* 102 */ + false, /* 103 */ + false, /* 104 */ + false, /* 105 */ + false, /* 106 */ + false, /* 107 */ + false, /* 108 */ + false, /* 109 */ + false, /* 110 */ + false, /* 111 */ + false, /* 112 */ + false, /* 113 */ + false, /* 114 */ + false, /* 115 */ + false, /* 116 */ + false, /* 117 */ + false, /* 118 */ + false, /* 119 */ + false, /* 120 */ + false, /* 121 */ + false, /* 122 */ + false, /* 123 */ + false, /* 124 */ + false, /* 125 */ + false, /* 126 */ + false, /* 127 */ + false, /* 128 */ + false, /* 129 */ + false, /* 130 */ + false, /* 131 */ + false, /* 132 */ + false, /* 133 */ + false, /* 134 */ + false, /* 135 */ + false, /* 136 */ + false, /* 137 */ + false, /* 138 */ + false, /* 139 */ + false, /* 140 */ + false, /* 141 */ + false, /* 142 */ + false, /* 143 */ + false, /* 144 */ + false, /* 145 */ + false, /* 146 */ + false, /* 147 */ + false, /* 148 */ + false, /* 149 */ + false, /* 150 */ + false, /* 151 */ + false, /* 152 */ + false, /* 153 */ + false, /* 154 */ + false, /* 155 */ + false, /* 156 */ + false, /* 157 */ + false, /* 158 */ + false, /* 159 */ + false, /* 160 */ + false, /* 161 */ + false, /* 162 */ + false, /* 163 */ + false, /* 164 */ + false, /* 165 */ + false, /* 166 */ + false, /* 167 */ + false, /* 168 */ + false, /* 169 */ + false, /* 170 */ + false, /* 171 */ + false, /* 172 */ + false, /* 173 */ + false, /* 174 */ + false, /* 175 */ + false, /* 176 */ + false, /* 177 */ + false, /* 178 */ + false, /* 179 */ + false, /* 180 */ + false, /* 181 */ + false, /* 182 */ + false, /* 183 */ + false, /* 184 */ + false, /* 185 */ + false, /* 186 */ + false, /* 187 */ + false, /* 188 */ + false, /* 189 */ + false, /* 190 */ + false, /* 191 */ + false, /* 192 */ + false, /* 193 */ + false, /* 194 */ + false, /* 195 */ + false, /* 196 */ + false, /* 197 */ + false, /* 198 */ + false, /* 199 */ + false, /* 200 */ + false, /* 201 */ + false, /* 202 */ + false, /* 203 */ + false, /* 204 */ + false, /* 205 */ + false, /* 206 */ + false, /* 207 */ + false, /* 208 */ + false, /* 209 */ + false, /* 210 */ + false, /* 211 */ + false, /* 212 */ + false, /* 213 */ + false, /* 214 */ + false, /* 215 */ + false, /* 216 */ + false, /* 217 */ + false, /* 218 */ + false, /* 219 */ + false, /* 220 */ + false, /* 221 */ + false, /* 222 */ + false, /* 223 */ + false, /* 224 */ + false, /* 225 */ + false, /* 226 */ + false, /* 227 */ + false, /* 228 */ + false, /* 229 */ + false, /* 230 */ + false, /* 231 */ + false, /* 232 */ + false, /* 233 */ + false, /* 234 */ + false, /* 235 */ + false, /* 236 */ + false, /* 237 */ + false, /* 238 */ + false, /* 239 */ + false, /* 240 */ + false, /* 241 */ + false, /* 242 */ + false, /* 243 */ + false, /* 244 */ + false, /* 245 */ + false, /* 246 */ + false, /* 247 */ + false, /* 248 */ + false, /* 249 */ + false, /* 250 */ + false, /* 251 */ + false, /* 252 */ + false, /* 253 */ + false, /* 254 */ + false, /* 255 */ +} diff --git a/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/reader_scan_generic.go b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/reader_scan_generic.go new file mode 100644 index 000000000000..47c260770806 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/pquerna/ffjson/fflib/v1/reader_scan_generic.go @@ -0,0 +1,34 @@ +/** + * Copyright 2014 Paul Querna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package v1 + +func scanString(s []byte, j int) (int, byte) { + for { + if j >= len(s) { + return j, 0 + } + + c := s[j] + j++ + if byteLookupTable[c]&sliceStringMask == 0 { + continue + } + + return j, c + } +} diff --git a/cluster-autoscaler/vendor/golang.org/x/exp/LICENSE b/cluster-autoscaler/vendor/github.com/sigma/go-inotify/LICENSE similarity index 100% rename from cluster-autoscaler/vendor/golang.org/x/exp/LICENSE rename to cluster-autoscaler/vendor/github.com/sigma/go-inotify/LICENSE diff --git a/cluster-autoscaler/vendor/golang.org/x/exp/PATENTS b/cluster-autoscaler/vendor/github.com/sigma/go-inotify/PATENTS similarity index 100% rename from cluster-autoscaler/vendor/golang.org/x/exp/PATENTS rename to cluster-autoscaler/vendor/github.com/sigma/go-inotify/PATENTS diff --git a/cluster-autoscaler/vendor/github.com/sigma/go-inotify/README.md b/cluster-autoscaler/vendor/github.com/sigma/go-inotify/README.md new file mode 100644 index 000000000000..0c723a8d2b4f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/sigma/go-inotify/README.md @@ -0,0 +1,5 @@ +This is a fork of golang.org/x/exp/inotify before it was deleted. + +Please use gopkg.in/fsnotify.v0 instead. + +For updates, see: https://fsnotify.org/ diff --git a/cluster-autoscaler/vendor/golang.org/x/exp/inotify/inotify_linux.go b/cluster-autoscaler/vendor/github.com/sigma/go-inotify/inotify_linux.go similarity index 100% rename from cluster-autoscaler/vendor/golang.org/x/exp/inotify/inotify_linux.go rename to cluster-autoscaler/vendor/github.com/sigma/go-inotify/inotify_linux.go diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/LICENSE b/cluster-autoscaler/vendor/github.com/ugorji/go/LICENSE deleted file mode 100644 index 95a0f0541cda..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2012-2015 Ugorji Nwoke. -All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/0doc.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/0doc.go deleted file mode 100644 index 209f9ebad5bb..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/0doc.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -/* -High Performance, Feature-Rich Idiomatic Go codec/encoding library for -binc, msgpack, cbor, json. - -Supported Serialization formats are: - - - msgpack: https://github.com/msgpack/msgpack - - binc: http://github.com/ugorji/binc - - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 - - json: http://json.org http://tools.ietf.org/html/rfc7159 - - simple: - -To install: - - go get github.com/ugorji/go/codec - -This package understands the 'unsafe' tag, to allow using unsafe semantics: - - - When decoding into a struct, you need to read the field name as a string - so you can find the struct field it is mapped to. - Using `unsafe` will bypass the allocation and copying overhead of []byte->string conversion. - -To install using unsafe, pass the 'unsafe' tag: - - go get -tags=unsafe github.com/ugorji/go/codec - -For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer . - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. - - Multiple conversions: - Package coerces types where appropriate - e.g. decode an int in the stream into a float, etc. - - Corner Cases: - Overflows, nil maps/slices, nil values in streams are handled correctly - - Standard field renaming via tags - - Support for omitting empty fields during an encoding - - Encoding from any value and decoding into pointer to any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Extensions to support efficient encoding/decoding of any named types - - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces - - Decoding without a schema (into a interface{}). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Encode a struct as an array, and decode struct from an array in the data stream - - Comprehensive support for anonymous fields - - Fast (no-reflection) encoding/decoding of common maps and slices - - Code-generation for faster performance. - - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats - - Support indefinite-length formats to enable true streaming - (for formats which support it e.g. json, cbor) - - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. - This mostly applies to maps, where iteration order is non-deterministic. - - NIL in data stream decoded as zero value - - Never silently skip data when decoding. - User decides whether to return an error or silently skip data when keys or indexes - in the data stream do not map to fields in the struct. - - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown) - - Encode/Decode from/to chan types (for iterative streaming support) - - Drop-in replacement for encoding/json. `json:` key in struct tag supported. - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Handle unique idiosyncrasies of codecs e.g. - - For messagepack, configure how ambiguities in handling raw bytes are resolved - - For messagepack, provide rpc server/client codec to support - msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - -Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -Usage - -The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification. - -The Encoder and Decoder are NOT safe for concurrent use. - -Consequently, the usage model is basically: - - - Create and initialize the Handle before any use. - Once created, DO NOT modify it. - - Multiple Encoders or Decoders can now use the Handle concurrently. - They only read information off the Handle (never write). - - However, each Encoder or Decoder MUST not be used concurrently - - To re-use an Encoder/Decoder, call Reset(...) on it first. - This allows you use state maintained on the Encoder/Decoder. - -Sample usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ch codec.CborHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -*/ -package codec - -// Benefits of go-codec: -// -// - encoding/json always reads whole file into memory first. -// This makes it unsuitable for parsing very large files. -// - encoding/xml cannot parse into a map[string]interface{} -// I found this out on reading https://github.com/clbanning/mxj - -// TODO: -// -// - optimization for codecgen: -// if len of entity is <= 3 words, then support a value receiver for encode. -// - (En|De)coder should store an error when it occurs. -// Until reset, subsequent calls return that error that was stored. -// This means that free panics must go away. -// All errors must be raised through errorf method. -// - Decoding using a chan is good, but incurs concurrency costs. -// This is because there's no fast way to use a channel without it -// having to switch goroutines constantly. -// Callback pattern is still the best. Maybe consider supporting something like: -// type X struct { -// Name string -// Ys []Y -// Ys chan <- Y -// Ys func(Y) -> call this function for each entry -// } -// - Consider adding a isZeroer interface { isZero() bool } -// It is used within isEmpty, for omitEmpty support. -// - Consider making Handle used AS-IS within the encoding/decoding session. -// This means that we don't cache Handle information within the (En|De)coder, -// except we really need it at Reset(...) -// - Consider adding math/big support -// - Consider reducing the size of the generated functions: -// Maybe use one loop, and put the conditionals in the loop. -// for ... { if cLen > 0 { if j == cLen { break } } else if dd.CheckBreak() { break } } diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/README.md b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/README.md deleted file mode 100644 index 91cb3a27bd51..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/README.md +++ /dev/null @@ -1,148 +0,0 @@ -# Codec - -High Performance, Feature-Rich Idiomatic Go codec/encoding library for -binc, msgpack, cbor, json. - -Supported Serialization formats are: - - - msgpack: https://github.com/msgpack/msgpack - - binc: http://github.com/ugorji/binc - - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 - - json: http://json.org http://tools.ietf.org/html/rfc7159 - - simple: - -To install: - - go get github.com/ugorji/go/codec - -This package understands the `unsafe` tag, to allow using unsafe semantics: - - - When decoding into a struct, you need to read the field name as a string - so you can find the struct field it is mapped to. - Using `unsafe` will bypass the allocation and copying overhead of `[]byte->string` conversion. - -To use it, you must pass the `unsafe` tag during install: - -``` -go install -tags=unsafe github.com/ugorji/go/codec -``` - -Online documentation: http://godoc.org/github.com/ugorji/go/codec -Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. - - Multiple conversions: - Package coerces types where appropriate - e.g. decode an int in the stream into a float, etc. - - Corner Cases: - Overflows, nil maps/slices, nil values in streams are handled correctly - - Standard field renaming via tags - - Support for omitting empty fields during an encoding - - Encoding from any value and decoding into pointer to any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Extensions to support efficient encoding/decoding of any named types - - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces - - Decoding without a schema (into a interface{}). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Encode a struct as an array, and decode struct from an array in the data stream - - Comprehensive support for anonymous fields - - Fast (no-reflection) encoding/decoding of common maps and slices - - Code-generation for faster performance. - - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats - - Support indefinite-length formats to enable true streaming - (for formats which support it e.g. json, cbor) - - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. - This mostly applies to maps, where iteration order is non-deterministic. - - NIL in data stream decoded as zero value - - Never silently skip data when decoding. - User decides whether to return an error or silently skip data when keys or indexes - in the data stream do not map to fields in the struct. - - Encode/Decode from/to chan types (for iterative streaming support) - - Drop-in replacement for encoding/json. `json:` key in struct tag supported. - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Handle unique idiosyncrasies of codecs e.g. - - For messagepack, configure how ambiguities in handling raw bytes are resolved - - For messagepack, provide rpc server/client codec to support - msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - -## Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -## RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -## Usage - -Typical usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ch codec.CborHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/binc.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/binc.go deleted file mode 100644 index 33120dcb61f0..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/binc.go +++ /dev/null @@ -1,929 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "math" - "reflect" - "time" -) - -const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. - -// vd as low 4 bits (there are 16 slots) -const ( - bincVdSpecial byte = iota - bincVdPosInt - bincVdNegInt - bincVdFloat - - bincVdString - bincVdByteArray - bincVdArray - bincVdMap - - bincVdTimestamp - bincVdSmallInt - bincVdUnicodeOther - bincVdSymbol - - bincVdDecimal - _ // open slot - _ // open slot - bincVdCustomExt = 0x0f -) - -const ( - bincSpNil byte = iota - bincSpFalse - bincSpTrue - bincSpNan - bincSpPosInf - bincSpNegInf - bincSpZeroFloat - bincSpZero - bincSpNegOne -) - -const ( - bincFlBin16 byte = iota - bincFlBin32 - _ // bincFlBin32e - bincFlBin64 - _ // bincFlBin64e - // others not currently supported -) - -type bincEncDriver struct { - e *Encoder - w encWriter - m map[string]uint16 // symbols - b [scratchByteArrayLen]byte - s uint16 // symbols sequencer - encNoSeparator -} - -func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) { - if rt == timeTypId { - var bs []byte - switch x := v.(type) { - case time.Time: - bs = encodeTime(x) - case *time.Time: - bs = encodeTime(*x) - default: - e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v) - } - e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) - e.w.writeb(bs) - } -} - -func (e *bincEncDriver) EncodeNil() { - e.w.writen1(bincVdSpecial<<4 | bincSpNil) -} - -func (e *bincEncDriver) EncodeBool(b bool) { - if b { - e.w.writen1(bincVdSpecial<<4 | bincSpTrue) - } else { - e.w.writen1(bincVdSpecial<<4 | bincSpFalse) - } -} - -func (e *bincEncDriver) EncodeFloat32(f float32) { - if f == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) - return - } - e.w.writen1(bincVdFloat<<4 | bincFlBin32) - bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) -} - -func (e *bincEncDriver) EncodeFloat64(f float64) { - if f == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) - return - } - bigen.PutUint64(e.b[:8], math.Float64bits(f)) - if bincDoPrune { - i := 7 - for ; i >= 0 && (e.b[i] == 0); i-- { - } - i++ - if i <= 6 { - e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) - e.w.writen1(byte(i)) - e.w.writeb(e.b[:i]) - return - } - } - e.w.writen1(bincVdFloat<<4 | bincFlBin64) - e.w.writeb(e.b[:8]) -} - -func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { - if lim == 4 { - bigen.PutUint32(e.b[:lim], uint32(v)) - } else { - bigen.PutUint64(e.b[:lim], v) - } - if bincDoPrune { - i := pruneSignExt(e.b[:lim], pos) - e.w.writen1(bd | lim - 1 - byte(i)) - e.w.writeb(e.b[i:lim]) - } else { - e.w.writen1(bd | lim - 1) - e.w.writeb(e.b[:lim]) - } -} - -func (e *bincEncDriver) EncodeInt(v int64) { - const nbd byte = bincVdNegInt << 4 - if v >= 0 { - e.encUint(bincVdPosInt<<4, true, uint64(v)) - } else if v == -1 { - e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) - } else { - e.encUint(bincVdNegInt<<4, false, uint64(-v)) - } -} - -func (e *bincEncDriver) EncodeUint(v uint64) { - e.encUint(bincVdPosInt<<4, true, v) -} - -func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { - if v == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZero) - } else if pos && v >= 1 && v <= 16 { - e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) - } else if v <= math.MaxUint8 { - e.w.writen2(bd|0x0, byte(v)) - } else if v <= math.MaxUint16 { - e.w.writen1(bd | 0x01) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { - e.encIntegerPrune(bd, pos, v, 4) - } else { - e.encIntegerPrune(bd, pos, v, 8) - } -} - -func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { - bs := ext.WriteExt(rv) - if bs == nil { - e.EncodeNil() - return - } - e.encodeExtPreamble(uint8(xtag), len(bs)) - e.w.writeb(bs) -} - -func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { - e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.w.writeb(re.Data) -} - -func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { - e.encLen(bincVdCustomExt<<4, uint64(length)) - e.w.writen1(xtag) -} - -func (e *bincEncDriver) EncodeArrayStart(length int) { - e.encLen(bincVdArray<<4, uint64(length)) -} - -func (e *bincEncDriver) EncodeMapStart(length int) { - e.encLen(bincVdMap<<4, uint64(length)) -} - -func (e *bincEncDriver) EncodeString(c charEncoding, v string) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writestr(v) - } -} - -func (e *bincEncDriver) EncodeSymbol(v string) { - // if WriteSymbolsNoRefs { - // e.encodeString(c_UTF8, v) - // return - // } - - //symbols only offer benefit when string length > 1. - //This is because strings with length 1 take only 2 bytes to store - //(bd with embedded length, and single byte for string val). - - l := len(v) - if l == 0 { - e.encBytesLen(c_UTF8, 0) - return - } else if l == 1 { - e.encBytesLen(c_UTF8, 1) - e.w.writen1(v[0]) - return - } - if e.m == nil { - e.m = make(map[string]uint16, 16) - } - ui, ok := e.m[v] - if ok { - if ui <= math.MaxUint8 { - e.w.writen2(bincVdSymbol<<4, byte(ui)) - } else { - e.w.writen1(bincVdSymbol<<4 | 0x8) - bigenHelper{e.b[:2], e.w}.writeUint16(ui) - } - } else { - e.s++ - ui = e.s - //ui = uint16(atomic.AddUint32(&e.s, 1)) - e.m[v] = ui - var lenprec uint8 - if l <= math.MaxUint8 { - // lenprec = 0 - } else if l <= math.MaxUint16 { - lenprec = 1 - } else if int64(l) <= math.MaxUint32 { - lenprec = 2 - } else { - lenprec = 3 - } - if ui <= math.MaxUint8 { - e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) - } else { - e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) - bigenHelper{e.b[:2], e.w}.writeUint16(ui) - } - if lenprec == 0 { - e.w.writen1(byte(l)) - } else if lenprec == 1 { - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l)) - } else if lenprec == 2 { - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l)) - } else { - bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l)) - } - e.w.writestr(v) - } -} - -func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writeb(v) - } -} - -func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { - //TODO: support bincUnicodeOther (for now, just use string or bytearray) - if c == c_RAW { - e.encLen(bincVdByteArray<<4, length) - } else { - e.encLen(bincVdString<<4, length) - } -} - -func (e *bincEncDriver) encLen(bd byte, l uint64) { - if l < 12 { - e.w.writen1(bd | uint8(l+4)) - } else { - e.encLenNumber(bd, l) - } -} - -func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { - if v <= math.MaxUint8 { - e.w.writen2(bd, byte(v)) - } else if v <= math.MaxUint16 { - e.w.writen1(bd | 0x01) - bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { - e.w.writen1(bd | 0x02) - bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) - } else { - e.w.writen1(bd | 0x03) - bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v)) - } -} - -//------------------------------------ - -type bincDecSymbol struct { - s string - b []byte - i uint16 -} - -type bincDecDriver struct { - d *Decoder - h *BincHandle - r decReader - br bool // bytes reader - bdRead bool - bd byte - vd byte - vs byte - noStreamingCodec - decNoSeparator - b [scratchByteArrayLen]byte - - // linear searching on this slice is ok, - // because we typically expect < 32 symbols in each stream. - s []bincDecSymbol -} - -func (d *bincDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.vd = d.bd >> 4 - d.vs = d.bd & 0x0f - d.bdRead = true -} - -func (d *bincDecDriver) uncacheRead() { - if d.bdRead { - d.r.unreadn1() - d.bdRead = false - } -} - -func (d *bincDecDriver) ContainerType() (vt valueType) { - if d.vd == bincVdSpecial && d.vs == bincSpNil { - return valueTypeNil - } else if d.vd == bincVdByteArray { - return valueTypeBytes - } else if d.vd == bincVdString { - return valueTypeString - } else if d.vd == bincVdArray { - return valueTypeArray - } else if d.vd == bincVdMap { - return valueTypeMap - } else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - } - return valueTypeUnset -} - -func (d *bincDecDriver) TryDecodeAsNil() bool { - if !d.bdRead { - d.readNextBd() - } - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return true - } - return false -} - -func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) { - if !d.bdRead { - d.readNextBd() - } - if rt == timeTypId { - if d.vd != bincVdTimestamp { - d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) - return - } - tt, err := decodeTime(d.r.readx(int(d.vs))) - if err != nil { - panic(err) - } - var vt *time.Time = v.(*time.Time) - *vt = tt - d.bdRead = false - } -} - -func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { - if vs&0x8 == 0 { - d.r.readb(d.b[0:defaultLen]) - } else { - l := d.r.readn1() - if l > 8 { - d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l) - return - } - for i := l; i < 8; i++ { - d.b[i] = 0 - } - d.r.readb(d.b[0:l]) - } -} - -func (d *bincDecDriver) decFloat() (f float64) { - //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; } - if x := d.vs & 0x7; x == bincFlBin32 { - d.decFloatPre(d.vs, 4) - f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) - } else if x == bincFlBin64 { - d.decFloatPre(d.vs, 8) - f = math.Float64frombits(bigen.Uint64(d.b[0:8])) - } else { - d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) - return - } - return -} - -func (d *bincDecDriver) decUint() (v uint64) { - // need to inline the code (interface conversion and type assertion expensive) - switch d.vs { - case 0: - v = uint64(d.r.readn1()) - case 1: - d.r.readb(d.b[6:8]) - v = uint64(bigen.Uint16(d.b[6:8])) - case 2: - d.b[4] = 0 - d.r.readb(d.b[5:8]) - v = uint64(bigen.Uint32(d.b[4:8])) - case 3: - d.r.readb(d.b[4:8]) - v = uint64(bigen.Uint32(d.b[4:8])) - case 4, 5, 6: - lim := int(7 - d.vs) - d.r.readb(d.b[lim:8]) - for i := 0; i < lim; i++ { - d.b[i] = 0 - } - v = uint64(bigen.Uint64(d.b[:8])) - case 7: - d.r.readb(d.b[:8]) - v = uint64(bigen.Uint64(d.b[:8])) - default: - d.d.errorf("unsigned integers with greater than 64 bits of precision not supported") - return - } - return -} - -func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) { - if !d.bdRead { - d.readNextBd() - } - vd, vs := d.vd, d.vs - if vd == bincVdPosInt { - ui = d.decUint() - } else if vd == bincVdNegInt { - ui = d.decUint() - neg = true - } else if vd == bincVdSmallInt { - ui = uint64(d.vs) + 1 - } else if vd == bincVdSpecial { - if vs == bincSpZero { - //i = 0 - } else if vs == bincSpNegOne { - neg = true - ui = 1 - } else { - d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs) - return - } - } else { - d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) - return - } - return -} - -func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) { - ui, neg := d.decCheckInteger() - i, overflow := chkOvf.SignedInt(ui) - if overflow { - d.d.errorf("simple: overflow converting %v to signed integer", ui) - return - } - if neg { - i = -i - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("binc: overflow integer: %v", i) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - ui, neg := d.decCheckInteger() - if neg { - d.d.errorf("Assigning negative signed value to unsigned type") - return - } - if chkOvf.Uint(ui, bitsize) { - d.d.errorf("binc: overflow integer: %v", ui) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - vd, vs := d.vd, d.vs - if vd == bincVdSpecial { - d.bdRead = false - if vs == bincSpNan { - return math.NaN() - } else if vs == bincSpPosInf { - return math.Inf(1) - } else if vs == bincSpZeroFloat || vs == bincSpZero { - return - } else if vs == bincSpNegInf { - return math.Inf(-1) - } else { - d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) - return - } - } else if vd == bincVdFloat { - f = d.decFloat() - } else { - f = float64(d.DecodeInt(64)) - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("binc: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *bincDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) { - // b = false - } else if bd == (bincVdSpecial | bincSpTrue) { - b = true - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) ReadMapStart() (length int) { - if d.vd != bincVdMap { - d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) - return - } - length = d.decLen() - d.bdRead = false - return -} - -func (d *bincDecDriver) ReadArrayStart() (length int) { - if d.vd != bincVdArray { - d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) - return - } - length = d.decLen() - d.bdRead = false - return -} - -func (d *bincDecDriver) decLen() int { - if d.vs > 3 { - return int(d.vs - 4) - } - return int(d.decLenNumber()) -} - -func (d *bincDecDriver) decLenNumber() (v uint64) { - if x := d.vs; x == 0 { - v = uint64(d.r.readn1()) - } else if x == 1 { - d.r.readb(d.b[6:8]) - v = uint64(bigen.Uint16(d.b[6:8])) - } else if x == 2 { - d.r.readb(d.b[4:8]) - v = uint64(bigen.Uint32(d.b[4:8])) - } else { - d.r.readb(d.b[:8]) - v = bigen.Uint64(d.b[:8]) - } - return -} - -func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return - } - var slen int = -1 - // var ok bool - switch d.vd { - case bincVdString, bincVdByteArray: - slen = d.decLen() - if zerocopy { - if d.br { - bs2 = d.r.readx(slen) - } else if len(bs) == 0 { - bs2 = decByteSlice(d.r, slen, d.b[:]) - } else { - bs2 = decByteSlice(d.r, slen, bs) - } - } else { - bs2 = decByteSlice(d.r, slen, bs) - } - if withString { - s = string(bs2) - } - case bincVdSymbol: - // zerocopy doesn't apply for symbols, - // as the values must be stored in a table for later use. - // - //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, - //extract symbol - //if containsStringVal, read it and put in map - //else look in map for string value - var symbol uint16 - vs := d.vs - if vs&0x8 == 0 { - symbol = uint16(d.r.readn1()) - } else { - symbol = uint16(bigen.Uint16(d.r.readx(2))) - } - if d.s == nil { - d.s = make([]bincDecSymbol, 0, 16) - } - - if vs&0x4 == 0 { - for i := range d.s { - j := &d.s[i] - if j.i == symbol { - bs2 = j.b - if withString { - if j.s == "" && bs2 != nil { - j.s = string(bs2) - } - s = j.s - } - break - } - } - } else { - switch vs & 0x3 { - case 0: - slen = int(d.r.readn1()) - case 1: - slen = int(bigen.Uint16(d.r.readx(2))) - case 2: - slen = int(bigen.Uint32(d.r.readx(4))) - case 3: - slen = int(bigen.Uint64(d.r.readx(8))) - } - // since using symbols, do not store any part of - // the parameter bs in the map, as it might be a shared buffer. - // bs2 = decByteSlice(d.r, slen, bs) - bs2 = decByteSlice(d.r, slen, nil) - if withString { - s = string(bs2) - } - d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2}) - } - default: - d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, bincVdSymbol, d.vd) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeString() (s string) { - // DecodeBytes does not accommodate symbols, whose impl stores string version in map. - // Use decStringAndBytes directly. - // return string(d.DecodeBytes(d.b[:], true, true)) - _, s = d.decStringAndBytes(d.b[:], true, true) - return -} - -func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - if isstring { - bsOut, _ = d.decStringAndBytes(bs, false, zerocopy) - return - } - if !d.bdRead { - d.readNextBd() - } - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return nil - } - var clen int - if d.vd == bincVdString || d.vd == bincVdByteArray { - clen = d.decLen() - } else { - d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, d.vd) - return - } - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, bs) -} - -func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if xtag > 0xff { - d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) - return - } - realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag = uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) - } else { - ext.ReadExt(rv, xbs) - } - return -} - -func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.vd == bincVdCustomExt { - l := d.decLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - return - } - xbs = d.r.readx(l) - } else if d.vd == bincVdByteArray { - xbs = d.DecodeBytes(nil, false, true) - } else { - d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) - return - } - d.bdRead = false - return -} - -func (d *bincDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - - n := &d.d.n - var decodeFurther bool - - switch d.vd { - case bincVdSpecial: - switch d.vs { - case bincSpNil: - n.v = valueTypeNil - case bincSpFalse: - n.v = valueTypeBool - n.b = false - case bincSpTrue: - n.v = valueTypeBool - n.b = true - case bincSpNan: - n.v = valueTypeFloat - n.f = math.NaN() - case bincSpPosInf: - n.v = valueTypeFloat - n.f = math.Inf(1) - case bincSpNegInf: - n.v = valueTypeFloat - n.f = math.Inf(-1) - case bincSpZeroFloat: - n.v = valueTypeFloat - n.f = float64(0) - case bincSpZero: - n.v = valueTypeUint - n.u = uint64(0) // int8(0) - case bincSpNegOne: - n.v = valueTypeInt - n.i = int64(-1) // int8(-1) - default: - d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs) - } - case bincVdSmallInt: - n.v = valueTypeUint - n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 - case bincVdPosInt: - n.v = valueTypeUint - n.u = d.decUint() - case bincVdNegInt: - n.v = valueTypeInt - n.i = -(int64(d.decUint())) - case bincVdFloat: - n.v = valueTypeFloat - n.f = d.decFloat() - case bincVdSymbol: - n.v = valueTypeSymbol - n.s = d.DecodeString() - case bincVdString: - n.v = valueTypeString - n.s = d.DecodeString() - case bincVdByteArray: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - case bincVdTimestamp: - n.v = valueTypeTimestamp - tt, err := decodeTime(d.r.readx(int(d.vs))) - if err != nil { - panic(err) - } - n.t = tt - case bincVdCustomExt: - n.v = valueTypeExt - l := d.decLen() - n.u = uint64(d.r.readn1()) - n.l = d.r.readx(l) - case bincVdArray: - n.v = valueTypeArray - decodeFurther = true - case bincVdMap: - n.v = valueTypeMap - decodeFurther = true - default: - d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) - } - - if !decodeFurther { - d.bdRead = false - } - if n.v == valueTypeUint && d.h.SignedInteger { - n.v = valueTypeInt - n.i = int64(n.u) - } - return -} - -//------------------------------------ - -//BincHandle is a Handle for the Binc Schema-Free Encoding Format -//defined at https://github.com/ugorji/binc . -// -//BincHandle currently supports all Binc features with the following EXCEPTIONS: -// - only integers up to 64 bits of precision are supported. -// big integers are unsupported. -// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). -// extended precision and decimal IEEE 754 floats are unsupported. -// - Only UTF-8 strings supported. -// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. -// -//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. -type BincHandle struct { - BasicHandle - binaryEncodingType -} - -func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{b: ext}) -} - -func (h *BincHandle) newEncDriver(e *Encoder) encDriver { - return &bincEncDriver{e: e, w: e.w} -} - -func (h *BincHandle) newDecDriver(d *Decoder) decDriver { - return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes} -} - -func (e *bincEncDriver) reset() { - e.w = e.e.w - e.s = 0 - e.m = nil -} - -func (d *bincDecDriver) reset() { - d.r = d.d.r - d.s = nil - d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0 -} - -var _ decDriver = (*bincDecDriver)(nil) -var _ encDriver = (*bincEncDriver)(nil) diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/cbor.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/cbor.go deleted file mode 100644 index 4fa349ac87f2..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/cbor.go +++ /dev/null @@ -1,592 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "math" - "reflect" -) - -const ( - cborMajorUint byte = iota - cborMajorNegInt - cborMajorBytes - cborMajorText - cborMajorArray - cborMajorMap - cborMajorTag - cborMajorOther -) - -const ( - cborBdFalse byte = 0xf4 + iota - cborBdTrue - cborBdNil - cborBdUndefined - cborBdExt - cborBdFloat16 - cborBdFloat32 - cborBdFloat64 -) - -const ( - cborBdIndefiniteBytes byte = 0x5f - cborBdIndefiniteString = 0x7f - cborBdIndefiniteArray = 0x9f - cborBdIndefiniteMap = 0xbf - cborBdBreak = 0xff -) - -const ( - CborStreamBytes byte = 0x5f - CborStreamString = 0x7f - CborStreamArray = 0x9f - CborStreamMap = 0xbf - CborStreamBreak = 0xff -) - -const ( - cborBaseUint byte = 0x00 - cborBaseNegInt = 0x20 - cborBaseBytes = 0x40 - cborBaseString = 0x60 - cborBaseArray = 0x80 - cborBaseMap = 0xa0 - cborBaseTag = 0xc0 - cborBaseSimple = 0xe0 -) - -// ------------------- - -type cborEncDriver struct { - noBuiltInTypes - encNoSeparator - e *Encoder - w encWriter - h *CborHandle - x [8]byte -} - -func (e *cborEncDriver) EncodeNil() { - e.w.writen1(cborBdNil) -} - -func (e *cborEncDriver) EncodeBool(b bool) { - if b { - e.w.writen1(cborBdTrue) - } else { - e.w.writen1(cborBdFalse) - } -} - -func (e *cborEncDriver) EncodeFloat32(f float32) { - e.w.writen1(cborBdFloat32) - bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) -} - -func (e *cborEncDriver) EncodeFloat64(f float64) { - e.w.writen1(cborBdFloat64) - bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) -} - -func (e *cborEncDriver) encUint(v uint64, bd byte) { - if v <= 0x17 { - e.w.writen1(byte(v) + bd) - } else if v <= math.MaxUint8 { - e.w.writen2(bd+0x18, uint8(v)) - } else if v <= math.MaxUint16 { - e.w.writen1(bd + 0x19) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v)) - } else if v <= math.MaxUint32 { - e.w.writen1(bd + 0x1a) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v)) - } else { // if v <= math.MaxUint64 { - e.w.writen1(bd + 0x1b) - bigenHelper{e.x[:8], e.w}.writeUint64(v) - } -} - -func (e *cborEncDriver) EncodeInt(v int64) { - if v < 0 { - e.encUint(uint64(-1-v), cborBaseNegInt) - } else { - e.encUint(uint64(v), cborBaseUint) - } -} - -func (e *cborEncDriver) EncodeUint(v uint64) { - e.encUint(v, cborBaseUint) -} - -func (e *cborEncDriver) encLen(bd byte, length int) { - e.encUint(uint64(length), bd) -} - -func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { - e.encUint(uint64(xtag), cborBaseTag) - if v := ext.ConvertExt(rv); v == nil { - e.EncodeNil() - } else { - en.encode(v) - } -} - -func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { - e.encUint(uint64(re.Tag), cborBaseTag) - if re.Data != nil { - en.encode(re.Data) - } else if re.Value == nil { - e.EncodeNil() - } else { - en.encode(re.Value) - } -} - -func (e *cborEncDriver) EncodeArrayStart(length int) { - e.encLen(cborBaseArray, length) -} - -func (e *cborEncDriver) EncodeMapStart(length int) { - e.encLen(cborBaseMap, length) -} - -func (e *cborEncDriver) EncodeString(c charEncoding, v string) { - e.encLen(cborBaseString, len(v)) - e.w.writestr(v) -} - -func (e *cborEncDriver) EncodeSymbol(v string) { - e.EncodeString(c_UTF8, v) -} - -func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - if c == c_RAW { - e.encLen(cborBaseBytes, len(v)) - } else { - e.encLen(cborBaseString, len(v)) - } - e.w.writeb(v) -} - -// ---------------------- - -type cborDecDriver struct { - d *Decoder - h *CborHandle - r decReader - b [scratchByteArrayLen]byte - br bool // bytes reader - bdRead bool - bd byte - noBuiltInTypes - decNoSeparator -} - -func (d *cborDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.bdRead = true -} - -func (d *cborDecDriver) uncacheRead() { - if d.bdRead { - d.r.unreadn1() - d.bdRead = false - } -} - -func (d *cborDecDriver) ContainerType() (vt valueType) { - if d.bd == cborBdNil { - return valueTypeNil - } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) { - return valueTypeBytes - } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) { - return valueTypeString - } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { - return valueTypeArray - } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) { - return valueTypeMap - } else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - } - return valueTypeUnset -} - -func (d *cborDecDriver) TryDecodeAsNil() bool { - if !d.bdRead { - d.readNextBd() - } - // treat Nil and Undefined as nil values - if d.bd == cborBdNil || d.bd == cborBdUndefined { - d.bdRead = false - return true - } - return false -} - -func (d *cborDecDriver) CheckBreak() bool { - if !d.bdRead { - d.readNextBd() - } - if d.bd == cborBdBreak { - d.bdRead = false - return true - } - return false -} - -func (d *cborDecDriver) decUint() (ui uint64) { - v := d.bd & 0x1f - if v <= 0x17 { - ui = uint64(v) - } else { - if v == 0x18 { - ui = uint64(d.r.readn1()) - } else if v == 0x19 { - ui = uint64(bigen.Uint16(d.r.readx(2))) - } else if v == 0x1a { - ui = uint64(bigen.Uint32(d.r.readx(4))) - } else if v == 0x1b { - ui = uint64(bigen.Uint64(d.r.readx(8))) - } else { - d.d.errorf("decUint: Invalid descriptor: %v", d.bd) - return - } - } - return -} - -func (d *cborDecDriver) decCheckInteger() (neg bool) { - if !d.bdRead { - d.readNextBd() - } - major := d.bd >> 5 - if major == cborMajorUint { - } else if major == cborMajorNegInt { - neg = true - } else { - d.d.errorf("invalid major: %v (bd: %v)", major, d.bd) - return - } - return -} - -func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) { - neg := d.decCheckInteger() - ui := d.decUint() - // check if this number can be converted to an int without overflow - var overflow bool - if neg { - if i, overflow = chkOvf.SignedInt(ui + 1); overflow { - d.d.errorf("cbor: overflow converting %v to signed integer", ui+1) - return - } - i = -i - } else { - if i, overflow = chkOvf.SignedInt(ui); overflow { - d.d.errorf("cbor: overflow converting %v to signed integer", ui) - return - } - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("cbor: overflow integer: %v", i) - return - } - d.bdRead = false - return -} - -func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - if d.decCheckInteger() { - d.d.errorf("Assigning negative signed value to unsigned type") - return - } - ui = d.decUint() - if chkOvf.Uint(ui, bitsize) { - d.d.errorf("cbor: overflow integer: %v", ui) - return - } - d.bdRead = false - return -} - -func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - if bd := d.bd; bd == cborBdFloat16 { - f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2))))) - } else if bd == cborBdFloat32 { - f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - } else if bd == cborBdFloat64 { - f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - } else if bd >= cborBaseUint && bd < cborBaseBytes { - f = float64(d.DecodeInt(64)) - } else { - d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd) - return - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("cbor: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *cborDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if bd := d.bd; bd == cborBdTrue { - b = true - } else if bd == cborBdFalse { - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *cborDecDriver) ReadMapStart() (length int) { - d.bdRead = false - if d.bd == cborBdIndefiniteMap { - return -1 - } - return d.decLen() -} - -func (d *cborDecDriver) ReadArrayStart() (length int) { - d.bdRead = false - if d.bd == cborBdIndefiniteArray { - return -1 - } - return d.decLen() -} - -func (d *cborDecDriver) decLen() int { - return int(d.decUint()) -} - -func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte { - d.bdRead = false - for { - if d.CheckBreak() { - break - } - if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText { - d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd) - return nil - } - n := d.decLen() - oldLen := len(bs) - newLen := oldLen + n - if newLen > cap(bs) { - bs2 := make([]byte, newLen, 2*cap(bs)+n) - copy(bs2, bs) - bs = bs2 - } else { - bs = bs[:newLen] - } - d.r.readb(bs[oldLen:newLen]) - // bs = append(bs, d.r.readn()...) - d.bdRead = false - } - d.bdRead = false - return bs -} - -func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == cborBdNil || d.bd == cborBdUndefined { - d.bdRead = false - return nil - } - if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { - if bs == nil { - return d.decAppendIndefiniteBytes(nil) - } - return d.decAppendIndefiniteBytes(bs[:0]) - } - clen := d.decLen() - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, bs) -} - -func (d *cborDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.b[:], true, true)) -} - -func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if !d.bdRead { - d.readNextBd() - } - u := d.decUint() - d.bdRead = false - realxtag = u - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - d.d.decode(&re.Value) - } else if xtag != realxtag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) - return - } else { - var v interface{} - d.d.decode(&v) - ext.UpdateExt(rv, v) - } - d.bdRead = false - return -} - -func (d *cborDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - - n := &d.d.n - var decodeFurther bool - - switch d.bd { - case cborBdNil: - n.v = valueTypeNil - case cborBdFalse: - n.v = valueTypeBool - n.b = false - case cborBdTrue: - n.v = valueTypeBool - n.b = true - case cborBdFloat16, cborBdFloat32: - n.v = valueTypeFloat - n.f = d.DecodeFloat(true) - case cborBdFloat64: - n.v = valueTypeFloat - n.f = d.DecodeFloat(false) - case cborBdIndefiniteBytes: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - case cborBdIndefiniteString: - n.v = valueTypeString - n.s = d.DecodeString() - case cborBdIndefiniteArray: - n.v = valueTypeArray - decodeFurther = true - case cborBdIndefiniteMap: - n.v = valueTypeMap - decodeFurther = true - default: - switch { - case d.bd >= cborBaseUint && d.bd < cborBaseNegInt: - if d.h.SignedInteger { - n.v = valueTypeInt - n.i = d.DecodeInt(64) - } else { - n.v = valueTypeUint - n.u = d.DecodeUint(64) - } - case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: - n.v = valueTypeInt - n.i = d.DecodeInt(64) - case d.bd >= cborBaseBytes && d.bd < cborBaseString: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - case d.bd >= cborBaseString && d.bd < cborBaseArray: - n.v = valueTypeString - n.s = d.DecodeString() - case d.bd >= cborBaseArray && d.bd < cborBaseMap: - n.v = valueTypeArray - decodeFurther = true - case d.bd >= cborBaseMap && d.bd < cborBaseTag: - n.v = valueTypeMap - decodeFurther = true - case d.bd >= cborBaseTag && d.bd < cborBaseSimple: - n.v = valueTypeExt - n.u = d.decUint() - n.l = nil - // d.bdRead = false - // d.d.decode(&re.Value) // handled by decode itself. - // decodeFurther = true - default: - d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) - return - } - } - - if !decodeFurther { - d.bdRead = false - } - return -} - -// ------------------------- - -// CborHandle is a Handle for the CBOR encoding format, -// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io . -// -// CBOR is comprehensively supported, including support for: -// - indefinite-length arrays/maps/bytes/strings -// - (extension) tags in range 0..0xffff (0 .. 65535) -// - half, single and double-precision floats -// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers) -// - nil, true, false, ... -// - arrays and maps, bytes and text strings -// -// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. -// Users can implement them as needed (using SetExt), including spec-documented ones: -// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. -// -// To encode with indefinite lengths (streaming), users will use -// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants. -// -// For example, to encode "one-byte" as an indefinite length string: -// var buf bytes.Buffer -// e := NewEncoder(&buf, new(CborHandle)) -// buf.WriteByte(CborStreamString) -// e.MustEncode("one-") -// e.MustEncode("byte") -// buf.WriteByte(CborStreamBreak) -// encodedBytes := buf.Bytes() -// var vv interface{} -// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv) -// // Now, vv contains the same string "one-byte" -// -type CborHandle struct { - binaryEncodingType - BasicHandle -} - -func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{i: ext}) -} - -func (h *CborHandle) newEncDriver(e *Encoder) encDriver { - return &cborEncDriver{e: e, w: e.w, h: h} -} - -func (h *CborHandle) newDecDriver(d *Decoder) decDriver { - return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes} -} - -func (e *cborEncDriver) reset() { - e.w = e.e.w -} - -func (d *cborDecDriver) reset() { - d.r = d.d.r - d.bd, d.bdRead = 0, false -} - -var _ decDriver = (*cborDecDriver)(nil) -var _ encDriver = (*cborEncDriver)(nil) diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/decode.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/decode.go deleted file mode 100644 index 52c1dfe836b0..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/decode.go +++ /dev/null @@ -1,2053 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "encoding" - "errors" - "fmt" - "io" - "reflect" - "time" -) - -// Some tagging information for error messages. -const ( - msgBadDesc = "Unrecognized descriptor byte" - msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" -) - -var ( - onlyMapOrArrayCanDecodeIntoStructErr = errors.New("only encoded map or array can be decoded into a struct") - cannotDecodeIntoNilErr = errors.New("cannot decode into nil") -) - -// decReader abstracts the reading source, allowing implementations that can -// read from an io.Reader or directly off a byte slice with zero-copying. -type decReader interface { - unreadn1() - - // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR - // just return a view of the []byte being decoded from. - // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control. - readx(n int) []byte - readb([]byte) - readn1() uint8 - readn1eof() (v uint8, eof bool) - numread() int // number of bytes read - track() - stopTrack() []byte -} - -type decReaderByteScanner interface { - io.Reader - io.ByteScanner -} - -type decDriver interface { - // this will check if the next token is a break. - CheckBreak() bool - TryDecodeAsNil() bool - // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. - ContainerType() (vt valueType) - IsBuiltinType(rt uintptr) bool - DecodeBuiltin(rt uintptr, v interface{}) - - // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. - // For maps and arrays, it will not do the decoding in-band, but will signal - // the decoder, so that is done later, by setting the decNaked.valueType field. - // - // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). - // for extensions, DecodeNaked must read the tag and the []byte if it exists. - // if the []byte is not read, then kInterfaceNaked will treat it as a Handle - // that stores the subsequent value in-band, and complete reading the RawExt. - // - // extensions should also use readx to decode them, for efficiency. - // kInterface will extract the detached byte slice if it has to pass it outside its realm. - DecodeNaked() - DecodeInt(bitsize uint8) (i int64) - DecodeUint(bitsize uint8) (ui uint64) - DecodeFloat(chkOverflow32 bool) (f float64) - DecodeBool() (b bool) - // DecodeString can also decode symbols. - // It looks redundant as DecodeBytes is available. - // However, some codecs (e.g. binc) support symbols and can - // return a pre-stored string value, meaning that it can bypass - // the cost of []byte->string conversion. - DecodeString() (s string) - - // DecodeBytes may be called directly, without going through reflection. - // Consequently, it must be designed to handle possible nil. - DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) - - // decodeExt will decode into a *RawExt or into an extension. - DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) - // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) - ReadMapStart() int - ReadArrayStart() int - - reset() - uncacheRead() -} - -type decNoSeparator struct { -} - -func (_ decNoSeparator) ReadEnd() {} - -// func (_ decNoSeparator) uncacheRead() {} - -type DecodeOptions struct { - // MapType specifies type to use during schema-less decoding of a map in the stream. - // If nil, we use map[interface{}]interface{} - MapType reflect.Type - - // SliceType specifies type to use during schema-less decoding of an array in the stream. - // If nil, we use []interface{} - SliceType reflect.Type - - // MaxInitLen defines the initial length that we "make" a collection (slice, chan or map) with. - // If 0 or negative, we default to a sensible value based on the size of an element in the collection. - // - // For example, when decoding, a stream may say that it has MAX_UINT elements. - // We should not auto-matically provision a slice of that length, to prevent Out-Of-Memory crash. - // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. - MaxInitLen int - - // If ErrorIfNoField, return an error when decoding a map - // from a codec stream into a struct, and no matching struct field is found. - ErrorIfNoField bool - - // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. - // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, - // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). - ErrorIfNoArrayExpand bool - - // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). - SignedInteger bool - - // MapValueReset controls how we decode into a map value. - // - // By default, we MAY retrieve the mapping for a key, and then decode into that. - // However, especially with big maps, that retrieval may be expensive and unnecessary - // if the stream already contains all that is necessary to recreate the value. - // - // If true, we will never retrieve the previous mapping, - // but rather decode into a new value and set that in the map. - // - // If false, we will retrieve the previous mapping if necessary e.g. - // the previous mapping is a pointer, or is a struct or array with pre-set state, - // or is an interface. - MapValueReset bool - - // InterfaceReset controls how we decode into an interface. - // - // By default, when we see a field that is an interface{...}, - // or a map with interface{...} value, we will attempt decoding into the - // "contained" value. - // - // However, this prevents us from reading a string into an interface{} - // that formerly contained a number. - // - // If true, we will decode into a new "blank" value, and set that in the interface. - // If false, we will decode into whatever is contained in the interface. - InterfaceReset bool - - // InternString controls interning of strings during decoding. - // - // Some handles, e.g. json, typically will read map keys as strings. - // If the set of keys are finite, it may help reduce allocation to - // look them up from a map (than to allocate them afresh). - // - // Note: Handles will be smart when using the intern functionality. - // So everything will not be interned. - InternString bool - - // PreferArrayOverSlice controls whether to decode to an array or a slice. - // - // This only impacts decoding into a nil interface{}. - // Consequently, it has no effect on codecgen. - // - // *Note*: This only applies if using go1.5 and above, - // as it requires reflect.ArrayOf support which was absent before go1.5. - PreferArrayOverSlice bool -} - -// ------------------------------------ - -// ioDecByteScanner implements Read(), ReadByte(...), UnreadByte(...) methods -// of io.Reader, io.ByteScanner. -type ioDecByteScanner struct { - r io.Reader - l byte // last byte - ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread - b [1]byte // tiny buffer for reading single bytes -} - -func (z *ioDecByteScanner) Read(p []byte) (n int, err error) { - var firstByte bool - if z.ls == 1 { - z.ls = 2 - p[0] = z.l - if len(p) == 1 { - n = 1 - return - } - firstByte = true - p = p[1:] - } - n, err = z.r.Read(p) - if n > 0 { - if err == io.EOF && n == len(p) { - err = nil // read was successful, so postpone EOF (till next time) - } - z.l = p[n-1] - z.ls = 2 - } - if firstByte { - n++ - } - return -} - -func (z *ioDecByteScanner) ReadByte() (c byte, err error) { - n, err := z.Read(z.b[:]) - if n == 1 { - c = z.b[0] - if err == io.EOF { - err = nil // read was successful, so postpone EOF (till next time) - } - } - return -} - -func (z *ioDecByteScanner) UnreadByte() (err error) { - x := z.ls - if x == 0 { - err = errors.New("cannot unread - nothing has been read") - } else if x == 1 { - err = errors.New("cannot unread - last byte has not been read") - } else if x == 2 { - z.ls = 1 - } - return -} - -// ioDecReader is a decReader that reads off an io.Reader -type ioDecReader struct { - br decReaderByteScanner - // temp byte array re-used internally for efficiency during read. - // shares buffer with Decoder, so we keep size of struct within 8 words. - x *[scratchByteArrayLen]byte - bs ioDecByteScanner - n int // num read - tr []byte // tracking bytes read - trb bool -} - -func (z *ioDecReader) numread() int { - return z.n -} - -func (z *ioDecReader) readx(n int) (bs []byte) { - if n <= 0 { - return - } - if n < len(z.x) { - bs = z.x[:n] - } else { - bs = make([]byte, n) - } - if _, err := io.ReadAtLeast(z.br, bs, n); err != nil { - panic(err) - } - z.n += len(bs) - if z.trb { - z.tr = append(z.tr, bs...) - } - return -} - -func (z *ioDecReader) readb(bs []byte) { - if len(bs) == 0 { - return - } - n, err := io.ReadAtLeast(z.br, bs, len(bs)) - z.n += n - if err != nil { - panic(err) - } - if z.trb { - z.tr = append(z.tr, bs...) - } -} - -func (z *ioDecReader) readn1() (b uint8) { - b, err := z.br.ReadByte() - if err != nil { - panic(err) - } - z.n++ - if z.trb { - z.tr = append(z.tr, b) - } - return b -} - -func (z *ioDecReader) readn1eof() (b uint8, eof bool) { - b, err := z.br.ReadByte() - if err == nil { - z.n++ - if z.trb { - z.tr = append(z.tr, b) - } - } else if err == io.EOF { - eof = true - } else { - panic(err) - } - return -} - -func (z *ioDecReader) unreadn1() { - err := z.br.UnreadByte() - if err != nil { - panic(err) - } - z.n-- - if z.trb { - if l := len(z.tr) - 1; l >= 0 { - z.tr = z.tr[:l] - } - } -} - -func (z *ioDecReader) track() { - if z.tr != nil { - z.tr = z.tr[:0] - } - z.trb = true -} - -func (z *ioDecReader) stopTrack() (bs []byte) { - z.trb = false - return z.tr -} - -// ------------------------------------ - -var bytesDecReaderCannotUnreadErr = errors.New("cannot unread last byte read") - -// bytesDecReader is a decReader that reads off a byte slice with zero copying -type bytesDecReader struct { - b []byte // data - c int // cursor - a int // available - t int // track start -} - -func (z *bytesDecReader) reset(in []byte) { - z.b = in - z.a = len(in) - z.c = 0 - z.t = 0 -} - -func (z *bytesDecReader) numread() int { - return z.c -} - -func (z *bytesDecReader) unreadn1() { - if z.c == 0 || len(z.b) == 0 { - panic(bytesDecReaderCannotUnreadErr) - } - z.c-- - z.a++ - return -} - -func (z *bytesDecReader) readx(n int) (bs []byte) { - // slicing from a non-constant start position is more expensive, - // as more computation is required to decipher the pointer start position. - // However, we do it only once, and it's better than reslicing both z.b and return value. - - if n <= 0 { - } else if z.a == 0 { - panic(io.EOF) - } else if n > z.a { - panic(io.ErrUnexpectedEOF) - } else { - c0 := z.c - z.c = c0 + n - z.a = z.a - n - bs = z.b[c0:z.c] - } - return -} - -func (z *bytesDecReader) readn1() (v uint8) { - if z.a == 0 { - panic(io.EOF) - } - v = z.b[z.c] - z.c++ - z.a-- - return -} - -func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { - if z.a == 0 { - eof = true - return - } - v = z.b[z.c] - z.c++ - z.a-- - return -} - -func (z *bytesDecReader) readb(bs []byte) { - copy(bs, z.readx(len(bs))) -} - -func (z *bytesDecReader) track() { - z.t = z.c -} - -func (z *bytesDecReader) stopTrack() (bs []byte) { - return z.b[z.t:z.c] -} - -// ------------------------------------ - -type decFnInfo struct { - d *Decoder - ti *typeInfo - xfFn Ext - xfTag uint64 - seq seqType -} - -// ---------------------------------------- - -type decFn struct { - i decFnInfo - f func(*decFnInfo, reflect.Value) -} - -func (f *decFnInfo) builtin(rv reflect.Value) { - f.d.d.DecodeBuiltin(f.ti.rtid, rv.Addr().Interface()) -} - -func (f *decFnInfo) rawExt(rv reflect.Value) { - f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil) -} - -func (f *decFnInfo) raw(rv reflect.Value) { - rv.SetBytes(f.d.raw()) -} - -func (f *decFnInfo) ext(rv reflect.Value) { - f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn) -} - -func (f *decFnInfo) getValueForUnmarshalInterface(rv reflect.Value, indir int8) (v interface{}) { - if indir == -1 { - v = rv.Addr().Interface() - } else if indir == 0 { - v = rv.Interface() - } else { - for j := int8(0); j < indir; j++ { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - v = rv.Interface() - } - return -} - -func (f *decFnInfo) selferUnmarshal(rv reflect.Value) { - f.getValueForUnmarshalInterface(rv, f.ti.csIndir).(Selfer).CodecDecodeSelf(f.d) -} - -func (f *decFnInfo) binaryUnmarshal(rv reflect.Value) { - bm := f.getValueForUnmarshalInterface(rv, f.ti.bunmIndir).(encoding.BinaryUnmarshaler) - xbs := f.d.d.DecodeBytes(nil, false, true) - if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { - panic(fnerr) - } -} - -func (f *decFnInfo) textUnmarshal(rv reflect.Value) { - tm := f.getValueForUnmarshalInterface(rv, f.ti.tunmIndir).(encoding.TextUnmarshaler) - fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) - if fnerr != nil { - panic(fnerr) - } -} - -func (f *decFnInfo) jsonUnmarshal(rv reflect.Value) { - tm := f.getValueForUnmarshalInterface(rv, f.ti.junmIndir).(jsonUnmarshaler) - // bs := f.d.d.DecodeBytes(f.d.b[:], true, true) - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } -} - -func (f *decFnInfo) kErr(rv reflect.Value) { - f.d.errorf("no decoding function defined for kind %v", rv.Kind()) -} - -func (f *decFnInfo) kString(rv reflect.Value) { - rv.SetString(f.d.d.DecodeString()) -} - -func (f *decFnInfo) kBool(rv reflect.Value) { - rv.SetBool(f.d.d.DecodeBool()) -} - -func (f *decFnInfo) kInt(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(intBitsize)) -} - -func (f *decFnInfo) kInt64(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(64)) -} - -func (f *decFnInfo) kInt32(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(32)) -} - -func (f *decFnInfo) kInt8(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(8)) -} - -func (f *decFnInfo) kInt16(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(16)) -} - -func (f *decFnInfo) kFloat32(rv reflect.Value) { - rv.SetFloat(f.d.d.DecodeFloat(true)) -} - -func (f *decFnInfo) kFloat64(rv reflect.Value) { - rv.SetFloat(f.d.d.DecodeFloat(false)) -} - -func (f *decFnInfo) kUint8(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(8)) -} - -func (f *decFnInfo) kUint64(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(64)) -} - -func (f *decFnInfo) kUint(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(uintBitsize)) -} - -func (f *decFnInfo) kUintptr(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(uintBitsize)) -} - -func (f *decFnInfo) kUint32(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(32)) -} - -func (f *decFnInfo) kUint16(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(16)) -} - -// func (f *decFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") -// if rv.IsNil() { -// rv.Set(reflect.New(rv.Type().Elem())) -// } -// f.d.decodeValue(rv.Elem()) -// } - -// var kIntfCtr uint64 - -func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) { - // nil interface: - // use some hieristics to decode it appropriately - // based on the detected next value in the stream. - d := f.d - d.d.DecodeNaked() - n := &d.n - if n.v == valueTypeNil { - return - } - // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader). - // if num := f.ti.rt.NumMethod(); num > 0 { - if f.ti.numMeth > 0 { - d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) - return - } - // var useRvn bool - switch n.v { - case valueTypeMap: - // if d.h.MapType == nil || d.h.MapType == mapIntfIntfTyp { - // } else if d.h.MapType == mapStrIntfTyp { // for json performance - // } - if d.mtid == 0 || d.mtid == mapIntfIntfTypId { - l := len(n.ms) - n.ms = append(n.ms, nil) - var v2 interface{} = &n.ms[l] - d.decode(v2) - rvn = reflect.ValueOf(v2).Elem() - n.ms = n.ms[:l] - } else if d.mtid == mapStrIntfTypId { // for json performance - l := len(n.ns) - n.ns = append(n.ns, nil) - var v2 interface{} = &n.ns[l] - d.decode(v2) - rvn = reflect.ValueOf(v2).Elem() - n.ns = n.ns[:l] - } else { - rvn = reflect.New(d.h.MapType).Elem() - d.decodeValue(rvn, nil) - } - case valueTypeArray: - // if d.h.SliceType == nil || d.h.SliceType == intfSliceTyp { - if d.stid == 0 || d.stid == intfSliceTypId { - l := len(n.ss) - n.ss = append(n.ss, nil) - var v2 interface{} = &n.ss[l] - d.decode(v2) - n.ss = n.ss[:l] - rvn = reflect.ValueOf(v2).Elem() - if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice { - rvn = reflectArrayOf(rvn) - } - } else { - rvn = reflect.New(d.h.SliceType).Elem() - d.decodeValue(rvn, nil) - } - case valueTypeExt: - var v interface{} - tag, bytes := n.u, n.l // calling decode below might taint the values - if bytes == nil { - l := len(n.is) - n.is = append(n.is, nil) - v2 := &n.is[l] - d.decode(v2) - v = *v2 - n.is = n.is[:l] - } - bfn := d.h.getExtForTag(tag) - if bfn == nil { - var re RawExt - re.Tag = tag - re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) - rvn = reflect.ValueOf(re) - } else { - rvnA := reflect.New(bfn.rt) - rvn = rvnA.Elem() - if bytes != nil { - bfn.ext.ReadExt(rvnA.Interface(), bytes) - } else { - bfn.ext.UpdateExt(rvnA.Interface(), v) - } - } - case valueTypeNil: - // no-op - case valueTypeInt: - rvn = reflect.ValueOf(&n.i).Elem() - case valueTypeUint: - rvn = reflect.ValueOf(&n.u).Elem() - case valueTypeFloat: - rvn = reflect.ValueOf(&n.f).Elem() - case valueTypeBool: - rvn = reflect.ValueOf(&n.b).Elem() - case valueTypeString, valueTypeSymbol: - rvn = reflect.ValueOf(&n.s).Elem() - case valueTypeBytes: - rvn = reflect.ValueOf(&n.l).Elem() - case valueTypeTimestamp: - rvn = reflect.ValueOf(&n.t).Elem() - default: - panic(fmt.Errorf("kInterfaceNaked: unexpected valueType: %d", n.v)) - } - return -} - -func (f *decFnInfo) kInterface(rv reflect.Value) { - // debugf("\t===> kInterface") - - // Note: - // A consequence of how kInterface works, is that - // if an interface already contains something, we try - // to decode into what was there before. - // We do not replace with a generic value (as got from decodeNaked). - - var rvn reflect.Value - if rv.IsNil() { - rvn = f.kInterfaceNaked() - if rvn.IsValid() { - rv.Set(rvn) - } - } else if f.d.h.InterfaceReset { - rvn = f.kInterfaceNaked() - if rvn.IsValid() { - rv.Set(rvn) - } else { - // reset to zero value based on current type in there. - rv.Set(reflect.Zero(rv.Elem().Type())) - } - } else { - rvn = rv.Elem() - // Note: interface{} is settable, but underlying type may not be. - // Consequently, we have to set the reflect.Value directly. - // if underlying type is settable (e.g. ptr or interface), - // we just decode into it. - // Else we create a settable value, decode into it, and set on the interface. - if rvn.CanSet() { - f.d.decodeValue(rvn, nil) - } else { - rvn2 := reflect.New(rvn.Type()).Elem() - rvn2.Set(rvn) - f.d.decodeValue(rvn2, nil) - rv.Set(rvn2) - } - } -} - -func (f *decFnInfo) kStruct(rv reflect.Value) { - fti := f.ti - d := f.d - dd := d.d - cr := d.cr - ctyp := dd.ContainerType() - if ctyp == valueTypeMap { - containerLen := dd.ReadMapStart() - if containerLen == 0 { - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return - } - tisfi := fti.sfi - hasLen := containerLen >= 0 - if hasLen { - for j := 0; j < containerLen; j++ { - // rvkencname := dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapKey) - } - rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true)) - // rvksi := ti.getForEncName(rvkencname) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if k := fti.indexForEncName(rvkencname); k > -1 { - si := tisfi[k] - if dd.TryDecodeAsNil() { - si.setToZeroValue(rv) - } else { - d.decodeValue(si.field(rv, true), nil) - } - } else { - d.structFieldNotFound(-1, rvkencname) - } - } - } else { - for j := 0; !dd.CheckBreak(); j++ { - // rvkencname := dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapKey) - } - rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true)) - // rvksi := ti.getForEncName(rvkencname) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if k := fti.indexForEncName(rvkencname); k > -1 { - si := tisfi[k] - if dd.TryDecodeAsNil() { - si.setToZeroValue(rv) - } else { - d.decodeValue(si.field(rv, true), nil) - } - } else { - d.structFieldNotFound(-1, rvkencname) - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - } else if ctyp == valueTypeArray { - containerLen := dd.ReadArrayStart() - if containerLen == 0 { - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } - return - } - // Not much gain from doing it two ways for array. - // Arrays are not used as much for structs. - hasLen := containerLen >= 0 - for j, si := range fti.sfip { - if hasLen { - if j == containerLen { - break - } - } else if dd.CheckBreak() { - break - } - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - if dd.TryDecodeAsNil() { - si.setToZeroValue(rv) - } else { - d.decodeValue(si.field(rv, true), nil) - } - } - if containerLen > len(fti.sfip) { - // read remaining values and throw away - for j := len(fti.sfip); j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - d.structFieldNotFound(j, "") - } - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } - } else { - f.d.error(onlyMapOrArrayCanDecodeIntoStructErr) - return - } -} - -func (f *decFnInfo) kSlice(rv reflect.Value) { - // A slice can be set from a map or array in stream. - // This way, the order can be kept (as order is lost with map). - ti := f.ti - d := f.d - dd := d.d - rtelem0 := ti.rt.Elem() - ctyp := dd.ContainerType() - if ctyp == valueTypeBytes || ctyp == valueTypeString { - // you can only decode bytes or string in the stream into a slice or array of bytes - if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { - f.d.errorf("bytes or string in the stream must be decoded into a slice or array of bytes, not %v", ti.rt) - } - if f.seq == seqTypeChan { - bs2 := dd.DecodeBytes(nil, false, true) - ch := rv.Interface().(chan<- byte) - for _, b := range bs2 { - ch <- b - } - } else { - rvbs := rv.Bytes() - bs2 := dd.DecodeBytes(rvbs, false, false) - if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { - if rv.CanSet() { - rv.SetBytes(bs2) - } else { - copy(rvbs, bs2) - } - } - } - return - } - - // array := f.seq == seqTypeChan - - slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) - - // // an array can never return a nil slice. so no need to check f.array here. - if containerLenS == 0 { - if f.seq == seqTypeSlice { - if rv.IsNil() { - rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) - } else { - rv.SetLen(0) - } - } else if f.seq == seqTypeChan { - if rv.IsNil() { - rv.Set(reflect.MakeChan(ti.rt, 0)) - } - } - slh.End() - return - } - - rtelem := rtelem0 - for rtelem.Kind() == reflect.Ptr { - rtelem = rtelem.Elem() - } - fn := d.getDecFn(rtelem, true, true) - - var rv0, rv9 reflect.Value - rv0 = rv - rvChanged := false - - // for j := 0; j < containerLenS; j++ { - var rvlen int - if containerLenS > 0 { // hasLen - if f.seq == seqTypeChan { - if rv.IsNil() { - rvlen, _ = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size())) - rv.Set(reflect.MakeChan(ti.rt, rvlen)) - } - // handle chan specially: - for j := 0; j < containerLenS; j++ { - rv9 = reflect.New(rtelem0).Elem() - slh.ElemContainerState(j) - d.decodeValue(rv9, fn) - rv.Send(rv9) - } - } else { // slice or array - var truncated bool // says len of sequence is not same as expected number of elements - numToRead := containerLenS // if truncated, reset numToRead - - rvcap := rv.Cap() - rvlen = rv.Len() - if containerLenS > rvcap { - if f.seq == seqTypeArray { - d.arrayCannotExpand(rvlen, containerLenS) - } else { - oldRvlenGtZero := rvlen > 0 - rvlen, truncated = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size())) - if truncated { - if rvlen <= rvcap { - rv.SetLen(rvlen) - } else { - rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) - rvChanged = true - } - } else { - rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) - rvChanged = true - } - if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { - reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) - } - rvcap = rvlen - } - numToRead = rvlen - } else if containerLenS != rvlen { - if f.seq == seqTypeSlice { - rv.SetLen(containerLenS) - rvlen = containerLenS - } - } - j := 0 - // we read up to the numToRead - for ; j < numToRead; j++ { - slh.ElemContainerState(j) - d.decodeValue(rv.Index(j), fn) - } - - // if slice, expand and read up to containerLenS (or EOF) iff truncated - // if array, swallow all the rest. - - if f.seq == seqTypeArray { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } else if truncated { // slice was truncated, as chan NOT in this block - for ; j < containerLenS; j++ { - rv = expandSliceValue(rv, 1) - rv9 = rv.Index(j) - if resetSliceElemToZeroValue { - rv9.Set(reflect.Zero(rtelem0)) - } - slh.ElemContainerState(j) - d.decodeValue(rv9, fn) - } - } - } - } else { - rvlen = rv.Len() - j := 0 - for ; !dd.CheckBreak(); j++ { - if f.seq == seqTypeChan { - slh.ElemContainerState(j) - rv9 = reflect.New(rtelem0).Elem() - d.decodeValue(rv9, fn) - rv.Send(rv9) - } else { - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= rvlen { - if f.seq == seqTypeArray { - d.arrayCannotExpand(rvlen, j+1) - decodeIntoBlank = true - } else { // if f.seq == seqTypeSlice - // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs - rv = expandSliceValue(rv, 1) - rv9 = rv.Index(j) - // rv.Index(rv.Len() - 1).Set(reflect.Zero(rtelem0)) - if resetSliceElemToZeroValue { - rv9.Set(reflect.Zero(rtelem0)) - } - rvlen++ - rvChanged = true - } - } else { // slice or array - rv9 = rv.Index(j) - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { // seqTypeSlice - d.decodeValue(rv9, fn) - } - } - } - if f.seq == seqTypeSlice { - if j < rvlen { - rv.SetLen(j) - } else if j == 0 && rv.IsNil() { - rv = reflect.MakeSlice(ti.rt, 0, 0) - rvChanged = true - } - } - } - slh.End() - - if rvChanged { - rv0.Set(rv) - } -} - -func (f *decFnInfo) kArray(rv reflect.Value) { - // f.d.decodeValue(rv.Slice(0, rv.Len())) - f.kSlice(rv.Slice(0, rv.Len())) -} - -func (f *decFnInfo) kMap(rv reflect.Value) { - d := f.d - dd := d.d - containerLen := dd.ReadMapStart() - cr := d.cr - ti := f.ti - if rv.IsNil() { - rv.Set(reflect.MakeMap(ti.rt)) - } - - if containerLen == 0 { - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return - } - - ktype, vtype := ti.rt.Key(), ti.rt.Elem() - ktypeId := reflect.ValueOf(ktype).Pointer() - vtypeKind := vtype.Kind() - var keyFn, valFn *decFn - var xtyp reflect.Type - for xtyp = ktype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() { - } - keyFn = d.getDecFn(xtyp, true, true) - for xtyp = vtype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() { - } - valFn = d.getDecFn(xtyp, true, true) - var mapGet, mapSet bool - if !f.d.h.MapValueReset { - // if pointer, mapGet = true - // if interface, mapGet = true if !DecodeNakedAlways (else false) - // if builtin, mapGet = false - // else mapGet = true - if vtypeKind == reflect.Ptr { - mapGet = true - } else if vtypeKind == reflect.Interface { - if !f.d.h.InterfaceReset { - mapGet = true - } - } else if !isImmutableKind(vtypeKind) { - mapGet = true - } - } - - var rvk, rvv, rvz reflect.Value - - // for j := 0; j < containerLen; j++ { - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - rvk = reflect.New(ktype).Elem() - if cr != nil { - cr.sendContainerState(containerMapKey) - } - d.decodeValue(rvk, keyFn) - - // special case if a byte array. - if ktypeId == intfTypId { - rvk = rvk.Elem() - if rvk.Type() == uint8SliceTyp { - rvk = reflect.ValueOf(d.string(rvk.Bytes())) - } - } - mapSet = true // set to false if u do a get, and its a pointer, and exists - if mapGet { - rvv = rv.MapIndex(rvk) - if rvv.IsValid() { - if vtypeKind == reflect.Ptr { - mapSet = false - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - d.decodeValue(rvv, valFn) - if mapSet { - rv.SetMapIndex(rvk, rvv) - } - } - } else { - for j := 0; !dd.CheckBreak(); j++ { - rvk = reflect.New(ktype).Elem() - if cr != nil { - cr.sendContainerState(containerMapKey) - } - d.decodeValue(rvk, keyFn) - - // special case if a byte array. - if ktypeId == intfTypId { - rvk = rvk.Elem() - if rvk.Type() == uint8SliceTyp { - rvk = reflect.ValueOf(d.string(rvk.Bytes())) - } - } - mapSet = true // set to false if u do a get, and its a pointer, and exists - if mapGet { - rvv = rv.MapIndex(rvk) - if rvv.IsValid() { - if vtypeKind == reflect.Ptr { - mapSet = false - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - d.decodeValue(rvv, valFn) - if mapSet { - rv.SetMapIndex(rvk, rvv) - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -type decRtidFn struct { - rtid uintptr - fn decFn -} - -// decNaked is used to keep track of the primitives decoded. -// Without it, we would have to decode each primitive and wrap it -// in an interface{}, causing an allocation. -// In this model, the primitives are decoded in a "pseudo-atomic" fashion, -// so we can rest assured that no other decoding happens while these -// primitives are being decoded. -// -// maps and arrays are not handled by this mechanism. -// However, RawExt is, and we accommodate for extensions that decode -// RawExt from DecodeNaked, but need to decode the value subsequently. -// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. -// -// However, decNaked also keeps some arrays of default maps and slices -// used in DecodeNaked. This way, we can get a pointer to it -// without causing a new heap allocation. -// -// kInterfaceNaked will ensure that there is no allocation for the common -// uses. -type decNaked struct { - // r RawExt // used for RawExt, uint, []byte. - u uint64 - i int64 - f float64 - l []byte - s string - t time.Time - b bool - v valueType - - // stacks for reducing allocation - is []interface{} - ms []map[interface{}]interface{} - ns []map[string]interface{} - ss [][]interface{} - // rs []RawExt - - // keep arrays at the bottom? Chance is that they are not used much. - ia [4]interface{} - ma [4]map[interface{}]interface{} - na [4]map[string]interface{} - sa [4][]interface{} - // ra [2]RawExt -} - -func (n *decNaked) reset() { - if n.ss != nil { - n.ss = n.ss[:0] - } - if n.is != nil { - n.is = n.is[:0] - } - if n.ms != nil { - n.ms = n.ms[:0] - } - if n.ns != nil { - n.ns = n.ns[:0] - } -} - -// A Decoder reads and decodes an object from an input stream in the codec format. -type Decoder struct { - // hopefully, reduce derefencing cost by laying the decReader inside the Decoder. - // Try to put things that go together to fit within a cache line (8 words). - - d decDriver - // NOTE: Decoder shouldn't call it's read methods, - // as the handler MAY need to do some coordination. - r decReader - // sa [initCollectionCap]decRtidFn - h *BasicHandle - hh Handle - - be bool // is binary encoding - bytes bool // is bytes reader - js bool // is json handle - - rb bytesDecReader - ri ioDecReader - cr containerStateRecv - - s []decRtidFn - f map[uintptr]*decFn - - // _ uintptr // for alignment purposes, so next one starts from a cache line - - // cache the mapTypeId and sliceTypeId for faster comparisons - mtid uintptr - stid uintptr - - n decNaked - b [scratchByteArrayLen]byte - is map[string]string // used for interning strings -} - -// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. -// -// For efficiency, Users are encouraged to pass in a memory buffered reader -// (eg bufio.Reader, bytes.Buffer). -func NewDecoder(r io.Reader, h Handle) *Decoder { - d := newDecoder(h) - d.Reset(r) - return d -} - -// NewDecoderBytes returns a Decoder which efficiently decodes directly -// from a byte slice with zero copying. -func NewDecoderBytes(in []byte, h Handle) *Decoder { - d := newDecoder(h) - d.ResetBytes(in) - return d -} - -func newDecoder(h Handle) *Decoder { - d := &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} - n := &d.n - // n.rs = n.ra[:0] - n.ms = n.ma[:0] - n.is = n.ia[:0] - n.ns = n.na[:0] - n.ss = n.sa[:0] - _, d.js = h.(*JsonHandle) - if d.h.InternString { - d.is = make(map[string]string, 32) - } - d.d = h.newDecDriver(d) - d.cr, _ = d.d.(containerStateRecv) - // d.d = h.newDecDriver(decReaderT{true, &d.rb, &d.ri}) - return d -} - -func (d *Decoder) resetCommon() { - d.n.reset() - d.d.reset() - // reset all things which were cached from the Handle, - // but could be changed. - d.mtid, d.stid = 0, 0 - if d.h.MapType != nil { - d.mtid = reflect.ValueOf(d.h.MapType).Pointer() - } - if d.h.SliceType != nil { - d.stid = reflect.ValueOf(d.h.SliceType).Pointer() - } -} - -func (d *Decoder) Reset(r io.Reader) { - d.ri.x = &d.b - // d.s = d.sa[:0] - d.ri.bs.r = r - var ok bool - d.ri.br, ok = r.(decReaderByteScanner) - if !ok { - d.ri.br = &d.ri.bs - } - d.r = &d.ri - d.resetCommon() -} - -func (d *Decoder) ResetBytes(in []byte) { - // d.s = d.sa[:0] - d.rb.reset(in) - d.r = &d.rb - d.resetCommon() -} - -// func (d *Decoder) sendContainerState(c containerState) { -// if d.cr != nil { -// d.cr.sendContainerState(c) -// } -// } - -// Decode decodes the stream from reader and stores the result in the -// value pointed to by v. v cannot be a nil pointer. v can also be -// a reflect.Value of a pointer. -// -// Note that a pointer to a nil interface is not a nil pointer. -// If you do not know what type of stream it is, pass in a pointer to a nil interface. -// We will decode and store a value in that nil interface. -// -// Sample usages: -// // Decoding into a non-nil typed value -// var f float32 -// err = codec.NewDecoder(r, handle).Decode(&f) -// -// // Decoding into nil interface -// var v interface{} -// dec := codec.NewDecoder(r, handle) -// err = dec.Decode(&v) -// -// When decoding into a nil interface{}, we will decode into an appropriate value based -// on the contents of the stream: -// - Numbers are decoded as float64, int64 or uint64. -// - Other values are decoded appropriately depending on the type: -// bool, string, []byte, time.Time, etc -// - Extensions are decoded as RawExt (if no ext function registered for the tag) -// Configurations exist on the Handle to override defaults -// (e.g. for MapType, SliceType and how to decode raw bytes). -// -// When decoding into a non-nil interface{} value, the mode of encoding is based on the -// type of the value. When a value is seen: -// - If an extension is registered for it, call that extension function -// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error -// - Else decode it based on its reflect.Kind -// -// There are some special rules when decoding into containers (slice/array/map/struct). -// Decode will typically use the stream contents to UPDATE the container. -// - A map can be decoded from a stream map, by updating matching keys. -// - A slice can be decoded from a stream array, -// by updating the first n elements, where n is length of the stream. -// - A slice can be decoded from a stream map, by decoding as if -// it contains a sequence of key-value pairs. -// - A struct can be decoded from a stream map, by updating matching fields. -// - A struct can be decoded from a stream array, -// by updating fields as they occur in the struct (by index). -// -// When decoding a stream map or array with length of 0 into a nil map or slice, -// we reset the destination map or slice to a zero-length value. -// -// However, when decoding a stream nil, we reset the destination container -// to its "zero" value (e.g. nil for slice/map, etc). -// -func (d *Decoder) Decode(v interface{}) (err error) { - defer panicToErr(&err) - d.decode(v) - return -} - -// this is not a smart swallow, as it allocates objects and does unnecessary work. -func (d *Decoder) swallowViaHammer() { - var blank interface{} - d.decodeValue(reflect.ValueOf(&blank).Elem(), nil) -} - -func (d *Decoder) swallow() { - // smarter decode that just swallows the content - dd := d.d - if dd.TryDecodeAsNil() { - return - } - cr := d.cr - switch dd.ContainerType() { - case valueTypeMap: - containerLen := dd.ReadMapStart() - clenGtEqualZero := containerLen >= 0 - for j := 0; ; j++ { - if clenGtEqualZero { - if j >= containerLen { - break - } - } else if dd.CheckBreak() { - break - } - if cr != nil { - cr.sendContainerState(containerMapKey) - } - d.swallow() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - d.swallow() - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - case valueTypeArray: - containerLenS := dd.ReadArrayStart() - clenGtEqualZero := containerLenS >= 0 - for j := 0; ; j++ { - if clenGtEqualZero { - if j >= containerLenS { - break - } - } else if dd.CheckBreak() { - break - } - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - d.swallow() - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } - case valueTypeBytes: - dd.DecodeBytes(d.b[:], false, true) - case valueTypeString: - dd.DecodeBytes(d.b[:], true, true) - // dd.DecodeStringAsBytes(d.b[:]) - default: - // these are all primitives, which we can get from decodeNaked - // if RawExt using Value, complete the processing. - dd.DecodeNaked() - if n := &d.n; n.v == valueTypeExt && n.l == nil { - l := len(n.is) - n.is = append(n.is, nil) - v2 := &n.is[l] - d.decode(v2) - n.is = n.is[:l] - } - } -} - -// MustDecode is like Decode, but panics if unable to Decode. -// This provides insight to the code location that triggered the error. -func (d *Decoder) MustDecode(v interface{}) { - d.decode(v) -} - -func (d *Decoder) decode(iv interface{}) { - // if ics, ok := iv.(Selfer); ok { - // ics.CodecDecodeSelf(d) - // return - // } - - if d.d.TryDecodeAsNil() { - switch v := iv.(type) { - case nil: - case *string: - *v = "" - case *bool: - *v = false - case *int: - *v = 0 - case *int8: - *v = 0 - case *int16: - *v = 0 - case *int32: - *v = 0 - case *int64: - *v = 0 - case *uint: - *v = 0 - case *uint8: - *v = 0 - case *uint16: - *v = 0 - case *uint32: - *v = 0 - case *uint64: - *v = 0 - case *float32: - *v = 0 - case *float64: - *v = 0 - case *[]uint8: - *v = nil - case *Raw: - *v = nil - case reflect.Value: - if v.Kind() != reflect.Ptr || v.IsNil() { - d.errNotValidPtrValue(v) - } - // d.chkPtrValue(v) - v = v.Elem() - if v.IsValid() { - v.Set(reflect.Zero(v.Type())) - } - default: - rv := reflect.ValueOf(iv) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - d.errNotValidPtrValue(rv) - } - // d.chkPtrValue(rv) - rv = rv.Elem() - if rv.IsValid() { - rv.Set(reflect.Zero(rv.Type())) - } - } - return - } - - switch v := iv.(type) { - case nil: - d.error(cannotDecodeIntoNilErr) - return - - case Selfer: - v.CodecDecodeSelf(d) - - case reflect.Value: - if v.Kind() != reflect.Ptr || v.IsNil() { - d.errNotValidPtrValue(v) - } - // d.chkPtrValue(v) - d.decodeValueNotNil(v.Elem(), nil) - - case *string: - *v = d.d.DecodeString() - case *bool: - *v = d.d.DecodeBool() - case *int: - *v = int(d.d.DecodeInt(intBitsize)) - case *int8: - *v = int8(d.d.DecodeInt(8)) - case *int16: - *v = int16(d.d.DecodeInt(16)) - case *int32: - *v = int32(d.d.DecodeInt(32)) - case *int64: - *v = d.d.DecodeInt(64) - case *uint: - *v = uint(d.d.DecodeUint(uintBitsize)) - case *uint8: - *v = uint8(d.d.DecodeUint(8)) - case *uint16: - *v = uint16(d.d.DecodeUint(16)) - case *uint32: - *v = uint32(d.d.DecodeUint(32)) - case *uint64: - *v = d.d.DecodeUint(64) - case *float32: - *v = float32(d.d.DecodeFloat(true)) - case *float64: - *v = d.d.DecodeFloat(false) - case *[]uint8: - *v = d.d.DecodeBytes(*v, false, false) - - case *Raw: - *v = d.raw() - - case *interface{}: - d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil) - - default: - if !fastpathDecodeTypeSwitch(iv, d) { - d.decodeI(iv, true, false, false, false) - } - } -} - -func (d *Decoder) preDecodeValue(rv reflect.Value, tryNil bool) (rv2 reflect.Value, proceed bool) { - if tryNil && d.d.TryDecodeAsNil() { - // No need to check if a ptr, recursively, to determine - // whether to set value to nil. - // Just always set value to its zero type. - if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid - rv.Set(reflect.Zero(rv.Type())) - } - return - } - - // If stream is not containing a nil value, then we can deref to the base - // non-pointer value, and decode into that. - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - return rv, true -} - -func (d *Decoder) decodeI(iv interface{}, checkPtr, tryNil, checkFastpath, checkCodecSelfer bool) { - rv := reflect.ValueOf(iv) - if checkPtr { - if rv.Kind() != reflect.Ptr || rv.IsNil() { - d.errNotValidPtrValue(rv) - } - // d.chkPtrValue(rv) - } - rv, proceed := d.preDecodeValue(rv, tryNil) - if proceed { - fn := d.getDecFn(rv.Type(), checkFastpath, checkCodecSelfer) - fn.f(&fn.i, rv) - } -} - -func (d *Decoder) decodeValue(rv reflect.Value, fn *decFn) { - if rv, proceed := d.preDecodeValue(rv, true); proceed { - if fn == nil { - fn = d.getDecFn(rv.Type(), true, true) - } - fn.f(&fn.i, rv) - } -} - -func (d *Decoder) decodeValueNotNil(rv reflect.Value, fn *decFn) { - if rv, proceed := d.preDecodeValue(rv, false); proceed { - if fn == nil { - fn = d.getDecFn(rv.Type(), true, true) - } - fn.f(&fn.i, rv) - } -} - -func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *decFn) { - rtid := reflect.ValueOf(rt).Pointer() - - // retrieve or register a focus'ed function for this type - // to eliminate need to do the retrieval multiple times - - // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } - var ok bool - if useMapForCodecCache { - fn, ok = d.f[rtid] - } else { - for i := range d.s { - v := &(d.s[i]) - if v.rtid == rtid { - fn, ok = &(v.fn), true - break - } - } - } - if ok { - return - } - - if useMapForCodecCache { - if d.f == nil { - d.f = make(map[uintptr]*decFn, initCollectionCap) - } - fn = new(decFn) - d.f[rtid] = fn - } else { - if d.s == nil { - d.s = make([]decRtidFn, 0, initCollectionCap) - } - d.s = append(d.s, decRtidFn{rtid: rtid}) - fn = &(d.s[len(d.s)-1]).fn - } - - // debugf("\tCreating new dec fn for type: %v\n", rt) - ti := d.h.getTypeInfo(rtid, rt) - fi := &(fn.i) - fi.d = d - fi.ti = ti - - // An extension can be registered for any type, regardless of the Kind - // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. - // - // We can't check if it's an extension byte here first, because the user may have - // registered a pointer or non-pointer type, meaning we may have to recurse first - // before matching a mapped type, even though the extension byte is already detected. - // - // NOTE: if decoding into a nil interface{}, we return a non-nil - // value except even if the container registers a length of 0. - if checkCodecSelfer && ti.cs { - fn.f = (*decFnInfo).selferUnmarshal - } else if rtid == rawExtTypId { - fn.f = (*decFnInfo).rawExt - } else if rtid == rawTypId { - fn.f = (*decFnInfo).raw - } else if d.d.IsBuiltinType(rtid) { - fn.f = (*decFnInfo).builtin - } else if xfFn := d.h.getExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext - fn.f = (*decFnInfo).ext - } else if supportMarshalInterfaces && d.be && ti.bunm { - fn.f = (*decFnInfo).binaryUnmarshal - } else if supportMarshalInterfaces && !d.be && d.js && ti.junm { - //If JSON, we should check JSONUnmarshal before textUnmarshal - fn.f = (*decFnInfo).jsonUnmarshal - } else if supportMarshalInterfaces && !d.be && ti.tunm { - fn.f = (*decFnInfo).textUnmarshal - } else { - rk := rt.Kind() - if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { - if rt.PkgPath() == "" { - if idx := fastpathAV.index(rtid); idx != -1 { - fn.f = fastpathAV[idx].decfn - } - } else { - // use mapping for underlying type if there - ok = false - var rtu reflect.Type - if rk == reflect.Map { - rtu = reflect.MapOf(rt.Key(), rt.Elem()) - } else { - rtu = reflect.SliceOf(rt.Elem()) - } - rtuid := reflect.ValueOf(rtu).Pointer() - if idx := fastpathAV.index(rtuid); idx != -1 { - xfnf := fastpathAV[idx].decfn - xrt := fastpathAV[idx].rt - fn.f = func(xf *decFnInfo, xrv reflect.Value) { - // xfnf(xf, xrv.Convert(xrt)) - xfnf(xf, xrv.Addr().Convert(reflect.PtrTo(xrt)).Elem()) - } - } - } - } - if fn.f == nil { - switch rk { - case reflect.String: - fn.f = (*decFnInfo).kString - case reflect.Bool: - fn.f = (*decFnInfo).kBool - case reflect.Int: - fn.f = (*decFnInfo).kInt - case reflect.Int64: - fn.f = (*decFnInfo).kInt64 - case reflect.Int32: - fn.f = (*decFnInfo).kInt32 - case reflect.Int8: - fn.f = (*decFnInfo).kInt8 - case reflect.Int16: - fn.f = (*decFnInfo).kInt16 - case reflect.Float32: - fn.f = (*decFnInfo).kFloat32 - case reflect.Float64: - fn.f = (*decFnInfo).kFloat64 - case reflect.Uint8: - fn.f = (*decFnInfo).kUint8 - case reflect.Uint64: - fn.f = (*decFnInfo).kUint64 - case reflect.Uint: - fn.f = (*decFnInfo).kUint - case reflect.Uint32: - fn.f = (*decFnInfo).kUint32 - case reflect.Uint16: - fn.f = (*decFnInfo).kUint16 - // case reflect.Ptr: - // fn.f = (*decFnInfo).kPtr - case reflect.Uintptr: - fn.f = (*decFnInfo).kUintptr - case reflect.Interface: - fn.f = (*decFnInfo).kInterface - case reflect.Struct: - fn.f = (*decFnInfo).kStruct - case reflect.Chan: - fi.seq = seqTypeChan - fn.f = (*decFnInfo).kSlice - case reflect.Slice: - fi.seq = seqTypeSlice - fn.f = (*decFnInfo).kSlice - case reflect.Array: - fi.seq = seqTypeArray - fn.f = (*decFnInfo).kArray - case reflect.Map: - fn.f = (*decFnInfo).kMap - default: - fn.f = (*decFnInfo).kErr - } - } - } - - return -} - -func (d *Decoder) structFieldNotFound(index int, rvkencname string) { - // NOTE: rvkencname may be a stringView, so don't pass it to another function. - if d.h.ErrorIfNoField { - if index >= 0 { - d.errorf("no matching struct field found when decoding stream array at index %v", index) - return - } else if rvkencname != "" { - d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) - return - } - } - d.swallow() -} - -func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { - if d.h.ErrorIfNoArrayExpand { - d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) - } -} - -func (d *Decoder) chkPtrValue(rv reflect.Value) { - // We can only decode into a non-nil pointer - if rv.Kind() == reflect.Ptr && !rv.IsNil() { - return - } - d.errNotValidPtrValue(rv) -} - -func (d *Decoder) errNotValidPtrValue(rv reflect.Value) { - if !rv.IsValid() { - d.error(cannotDecodeIntoNilErr) - return - } - if !rv.CanInterface() { - d.errorf("cannot decode into a value without an interface: %v", rv) - return - } - rvi := rv.Interface() - d.errorf("cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", rv.Kind(), rvi, rvi) -} - -func (d *Decoder) error(err error) { - panic(err) -} - -func (d *Decoder) errorf(format string, params ...interface{}) { - params2 := make([]interface{}, len(params)+1) - params2[0] = d.r.numread() - copy(params2[1:], params) - err := fmt.Errorf("[pos %d]: "+format, params2...) - panic(err) -} - -func (d *Decoder) string(v []byte) (s string) { - if d.is != nil { - s, ok := d.is[string(v)] // no allocation here. - if !ok { - s = string(v) - d.is[s] = s - } - return s - } - return string(v) // don't return stringView, as we need a real string here. -} - -func (d *Decoder) intern(s string) { - if d.is != nil { - d.is[s] = s - } -} - -// nextValueBytes returns the next value in the stream as a set of bytes. -func (d *Decoder) nextValueBytes() []byte { - d.d.uncacheRead() - d.r.track() - d.swallow() - return d.r.stopTrack() -} - -func (d *Decoder) raw() []byte { - // ensure that this is not a view into the bytes - // i.e. make new copy always. - bs := d.nextValueBytes() - bs2 := make([]byte, len(bs)) - copy(bs2, bs) - return bs2 -} - -// -------------------------------------------------- - -// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. -// A slice can be set from a map or array in stream. This supports the MapBySlice interface. -type decSliceHelper struct { - d *Decoder - // ct valueType - array bool -} - -func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { - dd := d.d - ctyp := dd.ContainerType() - if ctyp == valueTypeArray { - x.array = true - clen = dd.ReadArrayStart() - } else if ctyp == valueTypeMap { - clen = dd.ReadMapStart() * 2 - } else { - d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp) - } - // x.ct = ctyp - x.d = d - return -} - -func (x decSliceHelper) End() { - cr := x.d.cr - if cr == nil { - return - } - if x.array { - cr.sendContainerState(containerArrayEnd) - } else { - cr.sendContainerState(containerMapEnd) - } -} - -func (x decSliceHelper) ElemContainerState(index int) { - cr := x.d.cr - if cr == nil { - return - } - if x.array { - cr.sendContainerState(containerArrayElem) - } else { - if index%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } -} - -func decByteSlice(r decReader, clen int, bs []byte) (bsOut []byte) { - if clen == 0 { - return zeroByteSlice - } - if len(bs) == clen { - bsOut = bs - } else if cap(bs) >= clen { - bsOut = bs[:clen] - } else { - bsOut = make([]byte, clen) - } - r.readb(bsOut) - return -} - -func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { - if xlen := len(in); xlen > 0 { - if isBytesReader || xlen <= scratchByteArrayLen { - if cap(dest) >= xlen { - out = dest[:xlen] - } else { - out = make([]byte, xlen) - } - copy(out, in) - return - } - } - return in -} - -// decInferLen will infer a sensible length, given the following: -// - clen: length wanted. -// - maxlen: max length to be returned. -// if <= 0, it is unset, and we infer it based on the unit size -// - unit: number of bytes for each element of the collection -func decInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { - // handle when maxlen is not set i.e. <= 0 - if clen <= 0 { - return - } - if maxlen <= 0 { - // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. - // maxlen = 256 * 1024 / unit - // if maxlen < (4 * 1024) { - // maxlen = 4 * 1024 - // } - if unit < (256 / 4) { - maxlen = 256 * 1024 / unit - } else { - maxlen = 4 * 1024 - } - } - if clen > maxlen { - rvlen = maxlen - truncated = true - } else { - rvlen = clen - } - return - // if clen <= 0 { - // rvlen = 0 - // } else if maxlen > 0 && clen > maxlen { - // rvlen = maxlen - // truncated = true - // } else { - // rvlen = clen - // } - // return -} - -// // implement overall decReader wrapping both, for possible use inline: -// type decReaderT struct { -// bytes bool -// rb *bytesDecReader -// ri *ioDecReader -// } -// -// // implement *Decoder as a decReader. -// // Using decReaderT (defined just above) caused performance degradation -// // possibly because of constant copying the value, -// // and some value->interface conversion causing allocation. -// func (d *Decoder) unreadn1() { -// if d.bytes { -// d.rb.unreadn1() -// } else { -// d.ri.unreadn1() -// } -// } -// ... for other methods of decReader. -// Testing showed that performance improvement was negligible. diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/decode_go.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/decode_go.go deleted file mode 100644 index ba289cef6133..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/decode_go.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.5 - -package codec - -import "reflect" - -const reflectArrayOfSupported = true - -func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) { - rvn2 = reflect.New(reflect.ArrayOf(rvn.Len(), intfTyp)).Elem() - reflect.Copy(rvn2, rvn) - return -} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/decode_go14.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/decode_go14.go deleted file mode 100644 index 50063bc8fc88..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/decode_go14.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build !go1.5 - -package codec - -import "reflect" - -const reflectArrayOfSupported = false - -func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) { - panic("reflect.ArrayOf unsupported") -} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/encode.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/encode.go deleted file mode 100644 index c2cef812ec72..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/encode.go +++ /dev/null @@ -1,1461 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "encoding" - "fmt" - "io" - "reflect" - "sort" - "sync" -) - -const ( - defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 -) - -// AsSymbolFlag defines what should be encoded as symbols. -type AsSymbolFlag uint8 - -const ( - // AsSymbolDefault is default. - // Currently, this means only encode struct field names as symbols. - // The default is subject to change. - AsSymbolDefault AsSymbolFlag = iota - - // AsSymbolAll means encode anything which could be a symbol as a symbol. - AsSymbolAll = 0xfe - - // AsSymbolNone means do not encode anything as a symbol. - AsSymbolNone = 1 << iota - - // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. - AsSymbolMapStringKeysFlag - - // AsSymbolStructFieldName means encode struct field names as symbols. - AsSymbolStructFieldNameFlag -) - -// encWriter abstracts writing to a byte array or to an io.Writer. -type encWriter interface { - writeb([]byte) - writestr(string) - writen1(byte) - writen2(byte, byte) - atEndOfEncode() -} - -// encDriver abstracts the actual codec (binc vs msgpack, etc) -type encDriver interface { - IsBuiltinType(rt uintptr) bool - EncodeBuiltin(rt uintptr, v interface{}) - EncodeNil() - EncodeInt(i int64) - EncodeUint(i uint64) - EncodeBool(b bool) - EncodeFloat32(f float32) - EncodeFloat64(f float64) - // encodeExtPreamble(xtag byte, length int) - EncodeRawExt(re *RawExt, e *Encoder) - EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) - EncodeArrayStart(length int) - EncodeMapStart(length int) - EncodeString(c charEncoding, v string) - EncodeSymbol(v string) - EncodeStringBytes(c charEncoding, v []byte) - //TODO - //encBignum(f *big.Int) - //encStringRunes(c charEncoding, v []rune) - - reset() -} - -type encDriverAsis interface { - EncodeAsis(v []byte) -} - -type encNoSeparator struct{} - -func (_ encNoSeparator) EncodeEnd() {} - -type ioEncWriterWriter interface { - WriteByte(c byte) error - WriteString(s string) (n int, err error) - Write(p []byte) (n int, err error) -} - -type ioEncStringWriter interface { - WriteString(s string) (n int, err error) -} - -type EncodeOptions struct { - // Encode a struct as an array, and not as a map - StructToArray bool - - // Canonical representation means that encoding a value will always result in the same - // sequence of bytes. - // - // This only affects maps, as the iteration order for maps is random. - // - // The implementation MAY use the natural sort order for the map keys if possible: - // - // - If there is a natural sort order (ie for number, bool, string or []byte keys), - // then the map keys are first sorted in natural order and then written - // with corresponding map values to the strema. - // - If there is no natural sort order, then the map keys will first be - // encoded into []byte, and then sorted, - // before writing the sorted keys and the corresponding map values to the stream. - // - Canonical bool - - // CheckCircularRef controls whether we check for circular references - // and error fast during an encode. - // - // If enabled, an error is received if a pointer to a struct - // references itself either directly or through one of its fields (iteratively). - // - // This is opt-in, as there may be a performance hit to checking circular references. - CheckCircularRef bool - - // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers - // when checking if a value is empty. - // - // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls. - RecursiveEmptyCheck bool - - // Raw controls whether we encode Raw values. - // This is a "dangerous" option and must be explicitly set. - // If set, we blindly encode Raw values as-is, without checking - // if they are a correct representation of a value in that format. - // If unset, we error out. - Raw bool - - // AsSymbols defines what should be encoded as symbols. - // - // Encoding as symbols can reduce the encoded size significantly. - // - // However, during decoding, each string to be encoded as a symbol must - // be checked to see if it has been seen before. Consequently, encoding time - // will increase if using symbols, because string comparisons has a clear cost. - // - // Sample values: - // AsSymbolNone - // AsSymbolAll - // AsSymbolMapStringKeys - // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag - AsSymbols AsSymbolFlag -} - -// --------------------------------------------- - -type simpleIoEncWriterWriter struct { - w io.Writer - bw io.ByteWriter - sw ioEncStringWriter - bs [1]byte -} - -func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { - if o.bw != nil { - return o.bw.WriteByte(c) - } - // _, err = o.w.Write([]byte{c}) - o.bs[0] = c - _, err = o.w.Write(o.bs[:]) - return -} - -func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { - if o.sw != nil { - return o.sw.WriteString(s) - } - // return o.w.Write([]byte(s)) - return o.w.Write(bytesView(s)) -} - -func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { - return o.w.Write(p) -} - -// ---------------------------------------- - -// ioEncWriter implements encWriter and can write to an io.Writer implementation -type ioEncWriter struct { - w ioEncWriterWriter - s simpleIoEncWriterWriter - // x [8]byte // temp byte array re-used internally for efficiency -} - -func (z *ioEncWriter) writeb(bs []byte) { - if len(bs) == 0 { - return - } - n, err := z.w.Write(bs) - if err != nil { - panic(err) - } - if n != len(bs) { - panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n)) - } -} - -func (z *ioEncWriter) writestr(s string) { - n, err := z.w.WriteString(s) - if err != nil { - panic(err) - } - if n != len(s) { - panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n)) - } -} - -func (z *ioEncWriter) writen1(b byte) { - if err := z.w.WriteByte(b); err != nil { - panic(err) - } -} - -func (z *ioEncWriter) writen2(b1 byte, b2 byte) { - z.writen1(b1) - z.writen1(b2) -} - -func (z *ioEncWriter) atEndOfEncode() {} - -// ---------------------------------------- - -// bytesEncWriter implements encWriter and can write to an byte slice. -// It is used by Marshal function. -type bytesEncWriter struct { - b []byte - c int // cursor - out *[]byte // write out on atEndOfEncode -} - -func (z *bytesEncWriter) writeb(s []byte) { - if len(s) == 0 { - return - } - oc, a := z.growNoAlloc(len(s)) - if a { - z.growAlloc(len(s), oc) - } - copy(z.b[oc:], s) -} - -func (z *bytesEncWriter) writestr(s string) { - if len(s) == 0 { - return - } - oc, a := z.growNoAlloc(len(s)) - if a { - z.growAlloc(len(s), oc) - } - copy(z.b[oc:], s) -} - -func (z *bytesEncWriter) writen1(b1 byte) { - oc, a := z.growNoAlloc(1) - if a { - z.growAlloc(1, oc) - } - z.b[oc] = b1 -} - -func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { - oc, a := z.growNoAlloc(2) - if a { - z.growAlloc(2, oc) - } - z.b[oc+1] = b2 - z.b[oc] = b1 -} - -func (z *bytesEncWriter) atEndOfEncode() { - *(z.out) = z.b[:z.c] -} - -// have a growNoalloc(n int), which can be inlined. -// if allocation is needed, then call growAlloc(n int) - -func (z *bytesEncWriter) growNoAlloc(n int) (oldcursor int, allocNeeded bool) { - oldcursor = z.c - z.c = z.c + n - if z.c > len(z.b) { - if z.c > cap(z.b) { - allocNeeded = true - } else { - z.b = z.b[:cap(z.b)] - } - } - return -} - -func (z *bytesEncWriter) growAlloc(n int, oldcursor int) { - // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls. - // bytes.Buffer model (2*cap + n): much better - // bs := make([]byte, 2*cap(z.b)+n) - bs := make([]byte, growCap(cap(z.b), 1, n)) - copy(bs, z.b[:oldcursor]) - z.b = bs -} - -// --------------------------------------------- - -type encFnInfo struct { - e *Encoder - ti *typeInfo - xfFn Ext - xfTag uint64 - seq seqType -} - -func (f *encFnInfo) builtin(rv reflect.Value) { - f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface()) -} - -func (f *encFnInfo) raw(rv reflect.Value) { - f.e.raw(rv.Interface().(Raw)) -} - -func (f *encFnInfo) rawExt(rv reflect.Value) { - // rev := rv.Interface().(RawExt) - // f.e.e.EncodeRawExt(&rev, f.e) - var re *RawExt - if rv.CanAddr() { - re = rv.Addr().Interface().(*RawExt) - } else { - rev := rv.Interface().(RawExt) - re = &rev - } - f.e.e.EncodeRawExt(re, f.e) -} - -func (f *encFnInfo) ext(rv reflect.Value) { - // if this is a struct|array and it was addressable, then pass the address directly (not the value) - if k := rv.Kind(); (k == reflect.Struct || k == reflect.Array) && rv.CanAddr() { - rv = rv.Addr() - } - f.e.e.EncodeExt(rv.Interface(), f.xfTag, f.xfFn, f.e) -} - -func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v interface{}, proceed bool) { - if indir == 0 { - v = rv.Interface() - } else if indir == -1 { - // If a non-pointer was passed to Encode(), then that value is not addressable. - // Take addr if addressable, else copy value to an addressable value. - if rv.CanAddr() { - v = rv.Addr().Interface() - } else { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - // fmt.Printf("rv.Type: %v, rv2.Type: %v, v: %v\n", rv.Type(), rv2.Type(), v) - } - } else { - for j := int8(0); j < indir; j++ { - if rv.IsNil() { - f.e.e.EncodeNil() - return - } - rv = rv.Elem() - } - v = rv.Interface() - } - return v, true -} - -func (f *encFnInfo) selferMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.csIndir); proceed { - v.(Selfer).CodecEncodeSelf(f.e) - } -} - -func (f *encFnInfo) binaryMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.bmIndir); proceed { - bs, fnerr := v.(encoding.BinaryMarshaler).MarshalBinary() - f.e.marshal(bs, fnerr, false, c_RAW) - } -} - -func (f *encFnInfo) textMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.tmIndir); proceed { - // debugf(">>>> encoding.TextMarshaler: %T", rv.Interface()) - bs, fnerr := v.(encoding.TextMarshaler).MarshalText() - f.e.marshal(bs, fnerr, false, c_UTF8) - } -} - -func (f *encFnInfo) jsonMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.jmIndir); proceed { - bs, fnerr := v.(jsonMarshaler).MarshalJSON() - f.e.marshal(bs, fnerr, true, c_UTF8) - } -} - -func (f *encFnInfo) kBool(rv reflect.Value) { - f.e.e.EncodeBool(rv.Bool()) -} - -func (f *encFnInfo) kString(rv reflect.Value) { - f.e.e.EncodeString(c_UTF8, rv.String()) -} - -func (f *encFnInfo) kFloat64(rv reflect.Value) { - f.e.e.EncodeFloat64(rv.Float()) -} - -func (f *encFnInfo) kFloat32(rv reflect.Value) { - f.e.e.EncodeFloat32(float32(rv.Float())) -} - -func (f *encFnInfo) kInt(rv reflect.Value) { - f.e.e.EncodeInt(rv.Int()) -} - -func (f *encFnInfo) kUint(rv reflect.Value) { - f.e.e.EncodeUint(rv.Uint()) -} - -func (f *encFnInfo) kInvalid(rv reflect.Value) { - f.e.e.EncodeNil() -} - -func (f *encFnInfo) kErr(rv reflect.Value) { - f.e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) -} - -func (f *encFnInfo) kSlice(rv reflect.Value) { - ti := f.ti - // array may be non-addressable, so we have to manage with care - // (don't call rv.Bytes, rv.Slice, etc). - // E.g. type struct S{B [2]byte}; - // Encode(S{}) will bomb on "panic: slice of unaddressable array". - e := f.e - if f.seq != seqTypeArray { - if rv.IsNil() { - e.e.EncodeNil() - return - } - // If in this method, then there was no extension function defined. - // So it's okay to treat as []byte. - if ti.rtid == uint8SliceTypId { - e.e.EncodeStringBytes(c_RAW, rv.Bytes()) - return - } - } - cr := e.cr - rtelem := ti.rt.Elem() - l := rv.Len() - if ti.rtid == uint8SliceTypId || rtelem.Kind() == reflect.Uint8 { - switch f.seq { - case seqTypeArray: - // if l == 0 { e.e.encodeStringBytes(c_RAW, nil) } else - if rv.CanAddr() { - e.e.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes()) - } else { - var bs []byte - if l <= cap(e.b) { - bs = e.b[:l] - } else { - bs = make([]byte, l) - } - reflect.Copy(reflect.ValueOf(bs), rv) - // TODO: Test that reflect.Copy works instead of manual one-by-one - // for i := 0; i < l; i++ { - // bs[i] = byte(rv.Index(i).Uint()) - // } - e.e.EncodeStringBytes(c_RAW, bs) - } - case seqTypeSlice: - e.e.EncodeStringBytes(c_RAW, rv.Bytes()) - case seqTypeChan: - bs := e.b[:0] - // do not use range, so that the number of elements encoded - // does not change, and encoding does not hang waiting on someone to close chan. - // for b := range rv.Interface().(<-chan byte) { - // bs = append(bs, b) - // } - ch := rv.Interface().(<-chan byte) - for i := 0; i < l; i++ { - bs = append(bs, <-ch) - } - e.e.EncodeStringBytes(c_RAW, bs) - } - return - } - - if ti.mbs { - if l%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", l) - return - } - e.e.EncodeMapStart(l / 2) - } else { - e.e.EncodeArrayStart(l) - } - - if l > 0 { - for rtelem.Kind() == reflect.Ptr { - rtelem = rtelem.Elem() - } - // if kind is reflect.Interface, do not pre-determine the - // encoding type, because preEncodeValue may break it down to - // a concrete type and kInterface will bomb. - var fn *encFn - if rtelem.Kind() != reflect.Interface { - rtelemid := reflect.ValueOf(rtelem).Pointer() - fn = e.getEncFn(rtelemid, rtelem, true, true) - } - // TODO: Consider perf implication of encoding odd index values as symbols if type is string - for j := 0; j < l; j++ { - if cr != nil { - if ti.mbs { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } else { - cr.sendContainerState(containerArrayElem) - } - } - if f.seq == seqTypeChan { - if rv2, ok2 := rv.Recv(); ok2 { - e.encodeValue(rv2, fn) - } else { - e.encode(nil) // WE HAVE TO DO SOMETHING, so nil if nothing received. - } - } else { - e.encodeValue(rv.Index(j), fn) - } - } - } - - if cr != nil { - if ti.mbs { - cr.sendContainerState(containerMapEnd) - } else { - cr.sendContainerState(containerArrayEnd) - } - } -} - -func (f *encFnInfo) kStruct(rv reflect.Value) { - fti := f.ti - e := f.e - cr := e.cr - tisfi := fti.sfip - toMap := !(fti.toArray || e.h.StructToArray) - newlen := len(fti.sfi) - - // Use sync.Pool to reduce allocating slices unnecessarily. - // The cost of sync.Pool is less than the cost of new allocation. - pool, poolv, fkvs := encStructPoolGet(newlen) - - // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) - if toMap { - tisfi = fti.sfi - } - newlen = 0 - var kv stringRv - recur := e.h.RecursiveEmptyCheck - for _, si := range tisfi { - kv.r = si.field(rv, false) - if toMap { - if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { - continue - } - kv.v = si.encName - } else { - // use the zero value. - // if a reference or struct, set to nil (so you do not output too much) - if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { - switch kv.r.Kind() { - case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice: - kv.r = reflect.Value{} //encode as nil - } - } - } - fkvs[newlen] = kv - newlen++ - } - - // debugf(">>>> kStruct: newlen: %v", newlen) - // sep := !e.be - ee := e.e //don't dereference every time - - if toMap { - ee.EncodeMapStart(newlen) - // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - for j := 0; j < newlen; j++ { - kv = fkvs[j] - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(kv.v) - } else { - ee.EncodeString(c_UTF8, kv.v) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(kv.r, nil) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - } else { - ee.EncodeArrayStart(newlen) - for j := 0; j < newlen; j++ { - kv = fkvs[j] - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - e.encodeValue(kv.r, nil) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } - } - - // do not use defer. Instead, use explicit pool return at end of function. - // defer has a cost we are trying to avoid. - // If there is a panic and these slices are not returned, it is ok. - if pool != nil { - pool.Put(poolv) - } -} - -// func (f *encFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") -// if rv.IsNil() { -// f.e.e.encodeNil() -// return -// } -// f.e.encodeValue(rv.Elem()) -// } - -// func (f *encFnInfo) kInterface(rv reflect.Value) { -// println("kInterface called") -// debug.PrintStack() -// if rv.IsNil() { -// f.e.e.EncodeNil() -// return -// } -// f.e.encodeValue(rv.Elem(), nil) -// } - -func (f *encFnInfo) kMap(rv reflect.Value) { - ee := f.e.e - if rv.IsNil() { - ee.EncodeNil() - return - } - - l := rv.Len() - ee.EncodeMapStart(l) - e := f.e - cr := e.cr - if l == 0 { - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return - } - var asSymbols bool - // determine the underlying key and val encFn's for the map. - // This eliminates some work which is done for each loop iteration i.e. - // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn. - // - // However, if kind is reflect.Interface, do not pre-determine the - // encoding type, because preEncodeValue may break it down to - // a concrete type and kInterface will bomb. - var keyFn, valFn *encFn - ti := f.ti - rtkey := ti.rt.Key() - rtval := ti.rt.Elem() - rtkeyid := reflect.ValueOf(rtkey).Pointer() - // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String - var keyTypeIsString = rtkeyid == stringTypId - if keyTypeIsString { - asSymbols = e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - } else { - for rtkey.Kind() == reflect.Ptr { - rtkey = rtkey.Elem() - } - if rtkey.Kind() != reflect.Interface { - rtkeyid = reflect.ValueOf(rtkey).Pointer() - keyFn = e.getEncFn(rtkeyid, rtkey, true, true) - } - } - for rtval.Kind() == reflect.Ptr { - rtval = rtval.Elem() - } - if rtval.Kind() != reflect.Interface { - rtvalid := reflect.ValueOf(rtval).Pointer() - valFn = e.getEncFn(rtvalid, rtval, true, true) - } - mks := rv.MapKeys() - // for j, lmks := 0, len(mks); j < lmks; j++ { - - if e.h.Canonical { - e.kMapCanonical(rtkeyid, rtkey, rv, mks, valFn, asSymbols) - } else { - for j := range mks { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if keyTypeIsString { - if asSymbols { - ee.EncodeSymbol(mks[j].String()) - } else { - ee.EncodeString(c_UTF8, mks[j].String()) - } - } else { - e.encodeValue(mks[j], keyFn) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mks[j]), valFn) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *encFn, asSymbols bool) { - ee := e.e - cr := e.cr - // we previously did out-of-band if an extension was registered. - // This is not necessary, as the natural kind is sufficient for ordering. - - if rtkeyid == uint8SliceTypId { - mksv := make([]bytesRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Bytes() - } - sort.Sort(bytesRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeStringBytes(c_RAW, mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - } else { - switch rtkey.Kind() { - case reflect.Bool: - mksv := make([]boolRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Bool() - } - sort.Sort(boolRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - case reflect.String: - mksv := make([]stringRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.String() - } - sort.Sort(stringRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(mksv[i].v) - } else { - ee.EncodeString(c_UTF8, mksv[i].v) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: - mksv := make([]uintRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Uint() - } - sort.Sort(uintRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - mksv := make([]intRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Int() - } - sort.Sort(intRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - case reflect.Float32: - mksv := make([]floatRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Float() - } - sort.Sort(floatRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(mksv[i].v)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - case reflect.Float64: - mksv := make([]floatRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Float() - } - sort.Sort(floatRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) - } - default: - // out-of-band - // first encode each key to a []byte first, then sort them, then record - var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - mksbv := make([]bytesRv, len(mks)) - for i, k := range mks { - v := &mksbv[i] - l := len(mksv) - e2.MustEncode(k) - v.r = k - v.v = mksv[l:] - // fmt.Printf(">>>>> %s\n", mksv[l:]) - } - sort.Sort(bytesRvSlice(mksbv)) - for j := range mksbv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(mksbv[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksbv[j].r), valFn) - } - } - } -} - -// -------------------------------------------------- - -// encFn encapsulates the captured variables and the encode function. -// This way, we only do some calculations one times, and pass to the -// code block that should be called (encapsulated in a function) -// instead of executing the checks every time. -type encFn struct { - i encFnInfo - f func(*encFnInfo, reflect.Value) -} - -// -------------------------------------------------- - -type encRtidFn struct { - rtid uintptr - fn encFn -} - -// An Encoder writes an object to an output stream in the codec format. -type Encoder struct { - // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder - e encDriver - // NOTE: Encoder shouldn't call it's write methods, - // as the handler MAY need to do some coordination. - w encWriter - s []encRtidFn - ci set - be bool // is binary encoding - js bool // is json handle - - wi ioEncWriter - wb bytesEncWriter - - h *BasicHandle - hh Handle - - cr containerStateRecv - as encDriverAsis - - f map[uintptr]*encFn - b [scratchByteArrayLen]byte -} - -// NewEncoder returns an Encoder for encoding into an io.Writer. -// -// For efficiency, Users are encouraged to pass in a memory buffered writer -// (eg bufio.Writer, bytes.Buffer). -func NewEncoder(w io.Writer, h Handle) *Encoder { - e := newEncoder(h) - e.Reset(w) - return e -} - -// NewEncoderBytes returns an encoder for encoding directly and efficiently -// into a byte slice, using zero-copying to temporary slices. -// -// It will potentially replace the output byte slice pointed to. -// After encoding, the out parameter contains the encoded contents. -func NewEncoderBytes(out *[]byte, h Handle) *Encoder { - e := newEncoder(h) - e.ResetBytes(out) - return e -} - -func newEncoder(h Handle) *Encoder { - e := &Encoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} - _, e.js = h.(*JsonHandle) - e.e = h.newEncDriver(e) - e.as, _ = e.e.(encDriverAsis) - e.cr, _ = e.e.(containerStateRecv) - return e -} - -// Reset the Encoder with a new output stream. -// -// This accommodates using the state of the Encoder, -// where it has "cached" information about sub-engines. -func (e *Encoder) Reset(w io.Writer) { - ww, ok := w.(ioEncWriterWriter) - if ok { - e.wi.w = ww - } else { - sww := &e.wi.s - sww.w = w - sww.bw, _ = w.(io.ByteWriter) - sww.sw, _ = w.(ioEncStringWriter) - e.wi.w = sww - //ww = bufio.NewWriterSize(w, defEncByteBufSize) - } - e.w = &e.wi - e.e.reset() -} - -func (e *Encoder) ResetBytes(out *[]byte) { - in := *out - if in == nil { - in = make([]byte, defEncByteBufSize) - } - e.wb.b, e.wb.out, e.wb.c = in, out, 0 - e.w = &e.wb - e.e.reset() -} - -// func (e *Encoder) sendContainerState(c containerState) { -// if e.cr != nil { -// e.cr.sendContainerState(c) -// } -// } - -// Encode writes an object into a stream. -// -// Encoding can be configured via the struct tag for the fields. -// The "codec" key in struct field's tag value is the key name, -// followed by an optional comma and options. -// Note that the "json" key is used in the absence of the "codec" key. -// -// To set an option on all fields (e.g. omitempty on all fields), you -// can create a field called _struct, and set flags on it. -// -// Struct values "usually" encode as maps. Each exported struct field is encoded unless: -// - the field's tag is "-", OR -// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option. -// -// When encoding as a map, the first string in the tag (before the comma) -// is the map key string to use when encoding. -// -// However, struct values may encode as arrays. This happens when: -// - StructToArray Encode option is set, OR -// - the tag on the _struct field sets the "toarray" option -// -// Values with types that implement MapBySlice are encoded as stream maps. -// -// The empty values (for omitempty option) are false, 0, any nil pointer -// or interface value, and any array, slice, map, or string of length zero. -// -// Anonymous fields are encoded inline except: -// - the struct tag specifies a replacement name (first value) -// - the field is of an interface type -// -// Examples: -// -// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below. -// type MyStruct struct { -// _struct bool `codec:",omitempty"` //set omitempty for every field -// Field1 string `codec:"-"` //skip this field -// Field2 int `codec:"myName"` //Use key "myName" in encode stream -// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. -// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. -// io.Reader //use key "Reader". -// MyStruct `codec:"my1" //use key "my1". -// MyStruct //inline it -// ... -// } -// -// type MyStruct struct { -// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field -// //and encode struct as an array -// } -// -// The mode of encoding is based on the type of the value. When a value is seen: -// - If a Selfer, call its CodecEncodeSelf method -// - If an extension is registered for it, call that extension function -// - If it implements encoding.(Binary|Text|JSON)Marshaler, call its Marshal(Binary|Text|JSON) method -// - Else encode it based on its reflect.Kind -// -// Note that struct field names and keys in map[string]XXX will be treated as symbols. -// Some formats support symbols (e.g. binc) and will properly encode the string -// only once in the stream, and use a tag to refer to it thereafter. -func (e *Encoder) Encode(v interface{}) (err error) { - defer panicToErr(&err) - e.encode(v) - e.w.atEndOfEncode() - return -} - -// MustEncode is like Encode, but panics if unable to Encode. -// This provides insight to the code location that triggered the error. -func (e *Encoder) MustEncode(v interface{}) { - e.encode(v) - e.w.atEndOfEncode() -} - -func (e *Encoder) encode(iv interface{}) { - // if ics, ok := iv.(Selfer); ok { - // ics.CodecEncodeSelf(e) - // return - // } - - switch v := iv.(type) { - case nil: - e.e.EncodeNil() - case Selfer: - v.CodecEncodeSelf(e) - case Raw: - e.raw(v) - case reflect.Value: - e.encodeValue(v, nil) - - case string: - e.e.EncodeString(c_UTF8, v) - case bool: - e.e.EncodeBool(v) - case int: - e.e.EncodeInt(int64(v)) - case int8: - e.e.EncodeInt(int64(v)) - case int16: - e.e.EncodeInt(int64(v)) - case int32: - e.e.EncodeInt(int64(v)) - case int64: - e.e.EncodeInt(v) - case uint: - e.e.EncodeUint(uint64(v)) - case uint8: - e.e.EncodeUint(uint64(v)) - case uint16: - e.e.EncodeUint(uint64(v)) - case uint32: - e.e.EncodeUint(uint64(v)) - case uint64: - e.e.EncodeUint(v) - case float32: - e.e.EncodeFloat32(v) - case float64: - e.e.EncodeFloat64(v) - - case []uint8: - e.e.EncodeStringBytes(c_RAW, v) - - case *string: - e.e.EncodeString(c_UTF8, *v) - case *bool: - e.e.EncodeBool(*v) - case *int: - e.e.EncodeInt(int64(*v)) - case *int8: - e.e.EncodeInt(int64(*v)) - case *int16: - e.e.EncodeInt(int64(*v)) - case *int32: - e.e.EncodeInt(int64(*v)) - case *int64: - e.e.EncodeInt(*v) - case *uint: - e.e.EncodeUint(uint64(*v)) - case *uint8: - e.e.EncodeUint(uint64(*v)) - case *uint16: - e.e.EncodeUint(uint64(*v)) - case *uint32: - e.e.EncodeUint(uint64(*v)) - case *uint64: - e.e.EncodeUint(*v) - case *float32: - e.e.EncodeFloat32(*v) - case *float64: - e.e.EncodeFloat64(*v) - - case *[]uint8: - e.e.EncodeStringBytes(c_RAW, *v) - - default: - const checkCodecSelfer1 = true // in case T is passed, where *T is a Selfer, still checkCodecSelfer - if !fastpathEncodeTypeSwitch(iv, e) { - e.encodeI(iv, false, checkCodecSelfer1) - } - } -} - -func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, sptr uintptr, proceed bool) { - // use a goto statement instead of a recursive function for ptr/interface. -TOP: - switch rv.Kind() { - case reflect.Ptr: - if rv.IsNil() { - e.e.EncodeNil() - return - } - rv = rv.Elem() - if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { - // TODO: Movable pointers will be an issue here. Future problem. - sptr = rv.UnsafeAddr() - break TOP - } - goto TOP - case reflect.Interface: - if rv.IsNil() { - e.e.EncodeNil() - return - } - rv = rv.Elem() - goto TOP - case reflect.Slice, reflect.Map: - if rv.IsNil() { - e.e.EncodeNil() - return - } - case reflect.Invalid, reflect.Func: - e.e.EncodeNil() - return - } - - proceed = true - rv2 = rv - return -} - -func (e *Encoder) doEncodeValue(rv reflect.Value, fn *encFn, sptr uintptr, - checkFastpath, checkCodecSelfer bool) { - if sptr != 0 { - if (&e.ci).add(sptr) { - e.errorf("circular reference found: # %d", sptr) - } - } - if fn == nil { - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - // fn = e.getEncFn(rtid, rt, true, true) - fn = e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer) - } - fn.f(&fn.i, rv) - if sptr != 0 { - (&e.ci).remove(sptr) - } -} - -func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) { - if rv, sptr, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed { - e.doEncodeValue(rv, nil, sptr, checkFastpath, checkCodecSelfer) - } -} - -func (e *Encoder) encodeValue(rv reflect.Value, fn *encFn) { - // if a valid fn is passed, it MUST BE for the dereferenced type of rv - if rv, sptr, proceed := e.preEncodeValue(rv); proceed { - e.doEncodeValue(rv, fn, sptr, true, true) - } -} - -func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *encFn) { - // rtid := reflect.ValueOf(rt).Pointer() - var ok bool - if useMapForCodecCache { - fn, ok = e.f[rtid] - } else { - for i := range e.s { - v := &(e.s[i]) - if v.rtid == rtid { - fn, ok = &(v.fn), true - break - } - } - } - if ok { - return - } - - if useMapForCodecCache { - if e.f == nil { - e.f = make(map[uintptr]*encFn, initCollectionCap) - } - fn = new(encFn) - e.f[rtid] = fn - } else { - if e.s == nil { - e.s = make([]encRtidFn, 0, initCollectionCap) - } - e.s = append(e.s, encRtidFn{rtid: rtid}) - fn = &(e.s[len(e.s)-1]).fn - } - - ti := e.h.getTypeInfo(rtid, rt) - fi := &(fn.i) - fi.e = e - fi.ti = ti - - if checkCodecSelfer && ti.cs { - fn.f = (*encFnInfo).selferMarshal - } else if rtid == rawTypId { - fn.f = (*encFnInfo).raw - } else if rtid == rawExtTypId { - fn.f = (*encFnInfo).rawExt - } else if e.e.IsBuiltinType(rtid) { - fn.f = (*encFnInfo).builtin - } else if xfFn := e.h.getExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext - fn.f = (*encFnInfo).ext - } else if supportMarshalInterfaces && e.be && ti.bm { - fn.f = (*encFnInfo).binaryMarshal - } else if supportMarshalInterfaces && !e.be && e.js && ti.jm { - //If JSON, we should check JSONMarshal before textMarshal - fn.f = (*encFnInfo).jsonMarshal - } else if supportMarshalInterfaces && !e.be && ti.tm { - fn.f = (*encFnInfo).textMarshal - } else { - rk := rt.Kind() - if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { - if rt.PkgPath() == "" { // un-named slice or map - if idx := fastpathAV.index(rtid); idx != -1 { - fn.f = fastpathAV[idx].encfn - } - } else { - ok = false - // use mapping for underlying type if there - var rtu reflect.Type - if rk == reflect.Map { - rtu = reflect.MapOf(rt.Key(), rt.Elem()) - } else { - rtu = reflect.SliceOf(rt.Elem()) - } - rtuid := reflect.ValueOf(rtu).Pointer() - if idx := fastpathAV.index(rtuid); idx != -1 { - xfnf := fastpathAV[idx].encfn - xrt := fastpathAV[idx].rt - fn.f = func(xf *encFnInfo, xrv reflect.Value) { - xfnf(xf, xrv.Convert(xrt)) - } - } - } - } - if fn.f == nil { - switch rk { - case reflect.Bool: - fn.f = (*encFnInfo).kBool - case reflect.String: - fn.f = (*encFnInfo).kString - case reflect.Float64: - fn.f = (*encFnInfo).kFloat64 - case reflect.Float32: - fn.f = (*encFnInfo).kFloat32 - case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: - fn.f = (*encFnInfo).kInt - case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uintptr: - fn.f = (*encFnInfo).kUint - case reflect.Invalid: - fn.f = (*encFnInfo).kInvalid - case reflect.Chan: - fi.seq = seqTypeChan - fn.f = (*encFnInfo).kSlice - case reflect.Slice: - fi.seq = seqTypeSlice - fn.f = (*encFnInfo).kSlice - case reflect.Array: - fi.seq = seqTypeArray - fn.f = (*encFnInfo).kSlice - case reflect.Struct: - fn.f = (*encFnInfo).kStruct - // reflect.Ptr and reflect.Interface are handled already by preEncodeValue - // case reflect.Ptr: - // fn.f = (*encFnInfo).kPtr - // case reflect.Interface: - // fn.f = (*encFnInfo).kInterface - case reflect.Map: - fn.f = (*encFnInfo).kMap - default: - fn.f = (*encFnInfo).kErr - } - } - } - - return -} - -func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { - if fnerr != nil { - panic(fnerr) - } - if bs == nil { - e.e.EncodeNil() - } else if asis { - e.asis(bs) - } else { - e.e.EncodeStringBytes(c, bs) - } -} - -func (e *Encoder) asis(v []byte) { - if e.as == nil { - e.w.writeb(v) - } else { - e.as.EncodeAsis(v) - } -} - -func (e *Encoder) raw(vv Raw) { - v := []byte(vv) - if !e.h.Raw { - e.errorf("Raw values cannot be encoded: %v", v) - } - if e.as == nil { - e.w.writeb(v) - } else { - e.as.EncodeAsis(v) - } -} - -func (e *Encoder) errorf(format string, params ...interface{}) { - err := fmt.Errorf(format, params...) - panic(err) -} - -// ---------------------------------------- - -const encStructPoolLen = 5 - -// encStructPool is an array of sync.Pool. -// Each element of the array pools one of encStructPool(8|16|32|64). -// It allows the re-use of slices up to 64 in length. -// A performance cost of encoding structs was collecting -// which values were empty and should be omitted. -// We needed slices of reflect.Value and string to collect them. -// This shared pool reduces the amount of unnecessary creation we do. -// The cost is that of locking sometimes, but sync.Pool is efficient -// enough to reduce thread contention. -var encStructPool [encStructPoolLen]sync.Pool - -func init() { - encStructPool[0].New = func() interface{} { return new([8]stringRv) } - encStructPool[1].New = func() interface{} { return new([16]stringRv) } - encStructPool[2].New = func() interface{} { return new([32]stringRv) } - encStructPool[3].New = func() interface{} { return new([64]stringRv) } - encStructPool[4].New = func() interface{} { return new([128]stringRv) } -} - -func encStructPoolGet(newlen int) (p *sync.Pool, v interface{}, s []stringRv) { - // if encStructPoolLen != 5 { // constant chec, so removed at build time. - // panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed - // } - // idxpool := newlen / 8 - if newlen <= 8 { - p = &encStructPool[0] - v = p.Get() - s = v.(*[8]stringRv)[:newlen] - } else if newlen <= 16 { - p = &encStructPool[1] - v = p.Get() - s = v.(*[16]stringRv)[:newlen] - } else if newlen <= 32 { - p = &encStructPool[2] - v = p.Get() - s = v.(*[32]stringRv)[:newlen] - } else if newlen <= 64 { - p = &encStructPool[3] - v = p.Get() - s = v.(*[64]stringRv)[:newlen] - } else if newlen <= 128 { - p = &encStructPool[4] - v = p.Get() - s = v.(*[128]stringRv)[:newlen] - } else { - s = make([]stringRv, newlen) - } - return -} - -// ---------------------------------------- - -// func encErr(format string, params ...interface{}) { -// doPanic(msgTagEnc, format, params...) -// } diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/fast-path.generated.go deleted file mode 100644 index f2e5d2dcf66e..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/fast-path.generated.go +++ /dev/null @@ -1,39352 +0,0 @@ -// +build !notfastpath - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl -// ************************************************************ - -package codec - -// Fast path functions try to create a fast path encode or decode implementation -// for common maps and slices. -// -// We define the functions and register then in this single file -// so as not to pollute the encode.go and decode.go, and create a dependency in there. -// This file can be omitted without causing a build failure. -// -// The advantage of fast paths is: -// - Many calls bypass reflection altogether -// -// Currently support -// - slice of all builtin types, -// - map of all builtin types to string or interface value -// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) -// This should provide adequate "typical" implementations. -// -// Note that fast track decode functions must handle values for which an address cannot be obtained. -// For example: -// m2 := map[string]int{} -// p2 := []interface{}{m2} -// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. -// - -import ( - "reflect" - "sort" -) - -const fastpathEnabled = true - -const fastpathCheckNilFalse = false // for reflect -const fastpathCheckNilTrue = true // for type switch - -type fastpathT struct{} - -var fastpathTV fastpathT - -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*encFnInfo, reflect.Value) - decfn func(*decFnInfo, reflect.Value) -} - -type fastpathA [271]fastpathE - -func (x *fastpathA) index(rtid uintptr) int { - // use binary search to grab the index (adapted from sort/search.go) - h, i, j := 0, 0, 271 // len(x) - for i < j { - h = i + (j-i)/2 - if x[h].rtid < rtid { - i = h + 1 - } else { - j = h - } - } - if i < 271 && x[i].rtid == rtid { - return i - } - return -1 -} - -type fastpathAslice []fastpathE - -func (x fastpathAslice) Len() int { return len(x) } -func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } -func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -var fastpathAV fastpathA - -// due to possible initialization loop error, make fastpath in an init() -func init() { - i := 0 - fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { - xrt := reflect.TypeOf(v) - xptr := reflect.ValueOf(xrt).Pointer() - fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} - i++ - return - } - - fn([]interface{}(nil), (*encFnInfo).fastpathEncSliceIntfR, (*decFnInfo).fastpathDecSliceIntfR) - fn([]string(nil), (*encFnInfo).fastpathEncSliceStringR, (*decFnInfo).fastpathDecSliceStringR) - fn([]float32(nil), (*encFnInfo).fastpathEncSliceFloat32R, (*decFnInfo).fastpathDecSliceFloat32R) - fn([]float64(nil), (*encFnInfo).fastpathEncSliceFloat64R, (*decFnInfo).fastpathDecSliceFloat64R) - fn([]uint(nil), (*encFnInfo).fastpathEncSliceUintR, (*decFnInfo).fastpathDecSliceUintR) - fn([]uint16(nil), (*encFnInfo).fastpathEncSliceUint16R, (*decFnInfo).fastpathDecSliceUint16R) - fn([]uint32(nil), (*encFnInfo).fastpathEncSliceUint32R, (*decFnInfo).fastpathDecSliceUint32R) - fn([]uint64(nil), (*encFnInfo).fastpathEncSliceUint64R, (*decFnInfo).fastpathDecSliceUint64R) - fn([]uintptr(nil), (*encFnInfo).fastpathEncSliceUintptrR, (*decFnInfo).fastpathDecSliceUintptrR) - fn([]int(nil), (*encFnInfo).fastpathEncSliceIntR, (*decFnInfo).fastpathDecSliceIntR) - fn([]int8(nil), (*encFnInfo).fastpathEncSliceInt8R, (*decFnInfo).fastpathDecSliceInt8R) - fn([]int16(nil), (*encFnInfo).fastpathEncSliceInt16R, (*decFnInfo).fastpathDecSliceInt16R) - fn([]int32(nil), (*encFnInfo).fastpathEncSliceInt32R, (*decFnInfo).fastpathDecSliceInt32R) - fn([]int64(nil), (*encFnInfo).fastpathEncSliceInt64R, (*decFnInfo).fastpathDecSliceInt64R) - fn([]bool(nil), (*encFnInfo).fastpathEncSliceBoolR, (*decFnInfo).fastpathDecSliceBoolR) - - fn(map[interface{}]interface{}(nil), (*encFnInfo).fastpathEncMapIntfIntfR, (*decFnInfo).fastpathDecMapIntfIntfR) - fn(map[interface{}]string(nil), (*encFnInfo).fastpathEncMapIntfStringR, (*decFnInfo).fastpathDecMapIntfStringR) - fn(map[interface{}]uint(nil), (*encFnInfo).fastpathEncMapIntfUintR, (*decFnInfo).fastpathDecMapIntfUintR) - fn(map[interface{}]uint8(nil), (*encFnInfo).fastpathEncMapIntfUint8R, (*decFnInfo).fastpathDecMapIntfUint8R) - fn(map[interface{}]uint16(nil), (*encFnInfo).fastpathEncMapIntfUint16R, (*decFnInfo).fastpathDecMapIntfUint16R) - fn(map[interface{}]uint32(nil), (*encFnInfo).fastpathEncMapIntfUint32R, (*decFnInfo).fastpathDecMapIntfUint32R) - fn(map[interface{}]uint64(nil), (*encFnInfo).fastpathEncMapIntfUint64R, (*decFnInfo).fastpathDecMapIntfUint64R) - fn(map[interface{}]uintptr(nil), (*encFnInfo).fastpathEncMapIntfUintptrR, (*decFnInfo).fastpathDecMapIntfUintptrR) - fn(map[interface{}]int(nil), (*encFnInfo).fastpathEncMapIntfIntR, (*decFnInfo).fastpathDecMapIntfIntR) - fn(map[interface{}]int8(nil), (*encFnInfo).fastpathEncMapIntfInt8R, (*decFnInfo).fastpathDecMapIntfInt8R) - fn(map[interface{}]int16(nil), (*encFnInfo).fastpathEncMapIntfInt16R, (*decFnInfo).fastpathDecMapIntfInt16R) - fn(map[interface{}]int32(nil), (*encFnInfo).fastpathEncMapIntfInt32R, (*decFnInfo).fastpathDecMapIntfInt32R) - fn(map[interface{}]int64(nil), (*encFnInfo).fastpathEncMapIntfInt64R, (*decFnInfo).fastpathDecMapIntfInt64R) - fn(map[interface{}]float32(nil), (*encFnInfo).fastpathEncMapIntfFloat32R, (*decFnInfo).fastpathDecMapIntfFloat32R) - fn(map[interface{}]float64(nil), (*encFnInfo).fastpathEncMapIntfFloat64R, (*decFnInfo).fastpathDecMapIntfFloat64R) - fn(map[interface{}]bool(nil), (*encFnInfo).fastpathEncMapIntfBoolR, (*decFnInfo).fastpathDecMapIntfBoolR) - fn(map[string]interface{}(nil), (*encFnInfo).fastpathEncMapStringIntfR, (*decFnInfo).fastpathDecMapStringIntfR) - fn(map[string]string(nil), (*encFnInfo).fastpathEncMapStringStringR, (*decFnInfo).fastpathDecMapStringStringR) - fn(map[string]uint(nil), (*encFnInfo).fastpathEncMapStringUintR, (*decFnInfo).fastpathDecMapStringUintR) - fn(map[string]uint8(nil), (*encFnInfo).fastpathEncMapStringUint8R, (*decFnInfo).fastpathDecMapStringUint8R) - fn(map[string]uint16(nil), (*encFnInfo).fastpathEncMapStringUint16R, (*decFnInfo).fastpathDecMapStringUint16R) - fn(map[string]uint32(nil), (*encFnInfo).fastpathEncMapStringUint32R, (*decFnInfo).fastpathDecMapStringUint32R) - fn(map[string]uint64(nil), (*encFnInfo).fastpathEncMapStringUint64R, (*decFnInfo).fastpathDecMapStringUint64R) - fn(map[string]uintptr(nil), (*encFnInfo).fastpathEncMapStringUintptrR, (*decFnInfo).fastpathDecMapStringUintptrR) - fn(map[string]int(nil), (*encFnInfo).fastpathEncMapStringIntR, (*decFnInfo).fastpathDecMapStringIntR) - fn(map[string]int8(nil), (*encFnInfo).fastpathEncMapStringInt8R, (*decFnInfo).fastpathDecMapStringInt8R) - fn(map[string]int16(nil), (*encFnInfo).fastpathEncMapStringInt16R, (*decFnInfo).fastpathDecMapStringInt16R) - fn(map[string]int32(nil), (*encFnInfo).fastpathEncMapStringInt32R, (*decFnInfo).fastpathDecMapStringInt32R) - fn(map[string]int64(nil), (*encFnInfo).fastpathEncMapStringInt64R, (*decFnInfo).fastpathDecMapStringInt64R) - fn(map[string]float32(nil), (*encFnInfo).fastpathEncMapStringFloat32R, (*decFnInfo).fastpathDecMapStringFloat32R) - fn(map[string]float64(nil), (*encFnInfo).fastpathEncMapStringFloat64R, (*decFnInfo).fastpathDecMapStringFloat64R) - fn(map[string]bool(nil), (*encFnInfo).fastpathEncMapStringBoolR, (*decFnInfo).fastpathDecMapStringBoolR) - fn(map[float32]interface{}(nil), (*encFnInfo).fastpathEncMapFloat32IntfR, (*decFnInfo).fastpathDecMapFloat32IntfR) - fn(map[float32]string(nil), (*encFnInfo).fastpathEncMapFloat32StringR, (*decFnInfo).fastpathDecMapFloat32StringR) - fn(map[float32]uint(nil), (*encFnInfo).fastpathEncMapFloat32UintR, (*decFnInfo).fastpathDecMapFloat32UintR) - fn(map[float32]uint8(nil), (*encFnInfo).fastpathEncMapFloat32Uint8R, (*decFnInfo).fastpathDecMapFloat32Uint8R) - fn(map[float32]uint16(nil), (*encFnInfo).fastpathEncMapFloat32Uint16R, (*decFnInfo).fastpathDecMapFloat32Uint16R) - fn(map[float32]uint32(nil), (*encFnInfo).fastpathEncMapFloat32Uint32R, (*decFnInfo).fastpathDecMapFloat32Uint32R) - fn(map[float32]uint64(nil), (*encFnInfo).fastpathEncMapFloat32Uint64R, (*decFnInfo).fastpathDecMapFloat32Uint64R) - fn(map[float32]uintptr(nil), (*encFnInfo).fastpathEncMapFloat32UintptrR, (*decFnInfo).fastpathDecMapFloat32UintptrR) - fn(map[float32]int(nil), (*encFnInfo).fastpathEncMapFloat32IntR, (*decFnInfo).fastpathDecMapFloat32IntR) - fn(map[float32]int8(nil), (*encFnInfo).fastpathEncMapFloat32Int8R, (*decFnInfo).fastpathDecMapFloat32Int8R) - fn(map[float32]int16(nil), (*encFnInfo).fastpathEncMapFloat32Int16R, (*decFnInfo).fastpathDecMapFloat32Int16R) - fn(map[float32]int32(nil), (*encFnInfo).fastpathEncMapFloat32Int32R, (*decFnInfo).fastpathDecMapFloat32Int32R) - fn(map[float32]int64(nil), (*encFnInfo).fastpathEncMapFloat32Int64R, (*decFnInfo).fastpathDecMapFloat32Int64R) - fn(map[float32]float32(nil), (*encFnInfo).fastpathEncMapFloat32Float32R, (*decFnInfo).fastpathDecMapFloat32Float32R) - fn(map[float32]float64(nil), (*encFnInfo).fastpathEncMapFloat32Float64R, (*decFnInfo).fastpathDecMapFloat32Float64R) - fn(map[float32]bool(nil), (*encFnInfo).fastpathEncMapFloat32BoolR, (*decFnInfo).fastpathDecMapFloat32BoolR) - fn(map[float64]interface{}(nil), (*encFnInfo).fastpathEncMapFloat64IntfR, (*decFnInfo).fastpathDecMapFloat64IntfR) - fn(map[float64]string(nil), (*encFnInfo).fastpathEncMapFloat64StringR, (*decFnInfo).fastpathDecMapFloat64StringR) - fn(map[float64]uint(nil), (*encFnInfo).fastpathEncMapFloat64UintR, (*decFnInfo).fastpathDecMapFloat64UintR) - fn(map[float64]uint8(nil), (*encFnInfo).fastpathEncMapFloat64Uint8R, (*decFnInfo).fastpathDecMapFloat64Uint8R) - fn(map[float64]uint16(nil), (*encFnInfo).fastpathEncMapFloat64Uint16R, (*decFnInfo).fastpathDecMapFloat64Uint16R) - fn(map[float64]uint32(nil), (*encFnInfo).fastpathEncMapFloat64Uint32R, (*decFnInfo).fastpathDecMapFloat64Uint32R) - fn(map[float64]uint64(nil), (*encFnInfo).fastpathEncMapFloat64Uint64R, (*decFnInfo).fastpathDecMapFloat64Uint64R) - fn(map[float64]uintptr(nil), (*encFnInfo).fastpathEncMapFloat64UintptrR, (*decFnInfo).fastpathDecMapFloat64UintptrR) - fn(map[float64]int(nil), (*encFnInfo).fastpathEncMapFloat64IntR, (*decFnInfo).fastpathDecMapFloat64IntR) - fn(map[float64]int8(nil), (*encFnInfo).fastpathEncMapFloat64Int8R, (*decFnInfo).fastpathDecMapFloat64Int8R) - fn(map[float64]int16(nil), (*encFnInfo).fastpathEncMapFloat64Int16R, (*decFnInfo).fastpathDecMapFloat64Int16R) - fn(map[float64]int32(nil), (*encFnInfo).fastpathEncMapFloat64Int32R, (*decFnInfo).fastpathDecMapFloat64Int32R) - fn(map[float64]int64(nil), (*encFnInfo).fastpathEncMapFloat64Int64R, (*decFnInfo).fastpathDecMapFloat64Int64R) - fn(map[float64]float32(nil), (*encFnInfo).fastpathEncMapFloat64Float32R, (*decFnInfo).fastpathDecMapFloat64Float32R) - fn(map[float64]float64(nil), (*encFnInfo).fastpathEncMapFloat64Float64R, (*decFnInfo).fastpathDecMapFloat64Float64R) - fn(map[float64]bool(nil), (*encFnInfo).fastpathEncMapFloat64BoolR, (*decFnInfo).fastpathDecMapFloat64BoolR) - fn(map[uint]interface{}(nil), (*encFnInfo).fastpathEncMapUintIntfR, (*decFnInfo).fastpathDecMapUintIntfR) - fn(map[uint]string(nil), (*encFnInfo).fastpathEncMapUintStringR, (*decFnInfo).fastpathDecMapUintStringR) - fn(map[uint]uint(nil), (*encFnInfo).fastpathEncMapUintUintR, (*decFnInfo).fastpathDecMapUintUintR) - fn(map[uint]uint8(nil), (*encFnInfo).fastpathEncMapUintUint8R, (*decFnInfo).fastpathDecMapUintUint8R) - fn(map[uint]uint16(nil), (*encFnInfo).fastpathEncMapUintUint16R, (*decFnInfo).fastpathDecMapUintUint16R) - fn(map[uint]uint32(nil), (*encFnInfo).fastpathEncMapUintUint32R, (*decFnInfo).fastpathDecMapUintUint32R) - fn(map[uint]uint64(nil), (*encFnInfo).fastpathEncMapUintUint64R, (*decFnInfo).fastpathDecMapUintUint64R) - fn(map[uint]uintptr(nil), (*encFnInfo).fastpathEncMapUintUintptrR, (*decFnInfo).fastpathDecMapUintUintptrR) - fn(map[uint]int(nil), (*encFnInfo).fastpathEncMapUintIntR, (*decFnInfo).fastpathDecMapUintIntR) - fn(map[uint]int8(nil), (*encFnInfo).fastpathEncMapUintInt8R, (*decFnInfo).fastpathDecMapUintInt8R) - fn(map[uint]int16(nil), (*encFnInfo).fastpathEncMapUintInt16R, (*decFnInfo).fastpathDecMapUintInt16R) - fn(map[uint]int32(nil), (*encFnInfo).fastpathEncMapUintInt32R, (*decFnInfo).fastpathDecMapUintInt32R) - fn(map[uint]int64(nil), (*encFnInfo).fastpathEncMapUintInt64R, (*decFnInfo).fastpathDecMapUintInt64R) - fn(map[uint]float32(nil), (*encFnInfo).fastpathEncMapUintFloat32R, (*decFnInfo).fastpathDecMapUintFloat32R) - fn(map[uint]float64(nil), (*encFnInfo).fastpathEncMapUintFloat64R, (*decFnInfo).fastpathDecMapUintFloat64R) - fn(map[uint]bool(nil), (*encFnInfo).fastpathEncMapUintBoolR, (*decFnInfo).fastpathDecMapUintBoolR) - fn(map[uint8]interface{}(nil), (*encFnInfo).fastpathEncMapUint8IntfR, (*decFnInfo).fastpathDecMapUint8IntfR) - fn(map[uint8]string(nil), (*encFnInfo).fastpathEncMapUint8StringR, (*decFnInfo).fastpathDecMapUint8StringR) - fn(map[uint8]uint(nil), (*encFnInfo).fastpathEncMapUint8UintR, (*decFnInfo).fastpathDecMapUint8UintR) - fn(map[uint8]uint8(nil), (*encFnInfo).fastpathEncMapUint8Uint8R, (*decFnInfo).fastpathDecMapUint8Uint8R) - fn(map[uint8]uint16(nil), (*encFnInfo).fastpathEncMapUint8Uint16R, (*decFnInfo).fastpathDecMapUint8Uint16R) - fn(map[uint8]uint32(nil), (*encFnInfo).fastpathEncMapUint8Uint32R, (*decFnInfo).fastpathDecMapUint8Uint32R) - fn(map[uint8]uint64(nil), (*encFnInfo).fastpathEncMapUint8Uint64R, (*decFnInfo).fastpathDecMapUint8Uint64R) - fn(map[uint8]uintptr(nil), (*encFnInfo).fastpathEncMapUint8UintptrR, (*decFnInfo).fastpathDecMapUint8UintptrR) - fn(map[uint8]int(nil), (*encFnInfo).fastpathEncMapUint8IntR, (*decFnInfo).fastpathDecMapUint8IntR) - fn(map[uint8]int8(nil), (*encFnInfo).fastpathEncMapUint8Int8R, (*decFnInfo).fastpathDecMapUint8Int8R) - fn(map[uint8]int16(nil), (*encFnInfo).fastpathEncMapUint8Int16R, (*decFnInfo).fastpathDecMapUint8Int16R) - fn(map[uint8]int32(nil), (*encFnInfo).fastpathEncMapUint8Int32R, (*decFnInfo).fastpathDecMapUint8Int32R) - fn(map[uint8]int64(nil), (*encFnInfo).fastpathEncMapUint8Int64R, (*decFnInfo).fastpathDecMapUint8Int64R) - fn(map[uint8]float32(nil), (*encFnInfo).fastpathEncMapUint8Float32R, (*decFnInfo).fastpathDecMapUint8Float32R) - fn(map[uint8]float64(nil), (*encFnInfo).fastpathEncMapUint8Float64R, (*decFnInfo).fastpathDecMapUint8Float64R) - fn(map[uint8]bool(nil), (*encFnInfo).fastpathEncMapUint8BoolR, (*decFnInfo).fastpathDecMapUint8BoolR) - fn(map[uint16]interface{}(nil), (*encFnInfo).fastpathEncMapUint16IntfR, (*decFnInfo).fastpathDecMapUint16IntfR) - fn(map[uint16]string(nil), (*encFnInfo).fastpathEncMapUint16StringR, (*decFnInfo).fastpathDecMapUint16StringR) - fn(map[uint16]uint(nil), (*encFnInfo).fastpathEncMapUint16UintR, (*decFnInfo).fastpathDecMapUint16UintR) - fn(map[uint16]uint8(nil), (*encFnInfo).fastpathEncMapUint16Uint8R, (*decFnInfo).fastpathDecMapUint16Uint8R) - fn(map[uint16]uint16(nil), (*encFnInfo).fastpathEncMapUint16Uint16R, (*decFnInfo).fastpathDecMapUint16Uint16R) - fn(map[uint16]uint32(nil), (*encFnInfo).fastpathEncMapUint16Uint32R, (*decFnInfo).fastpathDecMapUint16Uint32R) - fn(map[uint16]uint64(nil), (*encFnInfo).fastpathEncMapUint16Uint64R, (*decFnInfo).fastpathDecMapUint16Uint64R) - fn(map[uint16]uintptr(nil), (*encFnInfo).fastpathEncMapUint16UintptrR, (*decFnInfo).fastpathDecMapUint16UintptrR) - fn(map[uint16]int(nil), (*encFnInfo).fastpathEncMapUint16IntR, (*decFnInfo).fastpathDecMapUint16IntR) - fn(map[uint16]int8(nil), (*encFnInfo).fastpathEncMapUint16Int8R, (*decFnInfo).fastpathDecMapUint16Int8R) - fn(map[uint16]int16(nil), (*encFnInfo).fastpathEncMapUint16Int16R, (*decFnInfo).fastpathDecMapUint16Int16R) - fn(map[uint16]int32(nil), (*encFnInfo).fastpathEncMapUint16Int32R, (*decFnInfo).fastpathDecMapUint16Int32R) - fn(map[uint16]int64(nil), (*encFnInfo).fastpathEncMapUint16Int64R, (*decFnInfo).fastpathDecMapUint16Int64R) - fn(map[uint16]float32(nil), (*encFnInfo).fastpathEncMapUint16Float32R, (*decFnInfo).fastpathDecMapUint16Float32R) - fn(map[uint16]float64(nil), (*encFnInfo).fastpathEncMapUint16Float64R, (*decFnInfo).fastpathDecMapUint16Float64R) - fn(map[uint16]bool(nil), (*encFnInfo).fastpathEncMapUint16BoolR, (*decFnInfo).fastpathDecMapUint16BoolR) - fn(map[uint32]interface{}(nil), (*encFnInfo).fastpathEncMapUint32IntfR, (*decFnInfo).fastpathDecMapUint32IntfR) - fn(map[uint32]string(nil), (*encFnInfo).fastpathEncMapUint32StringR, (*decFnInfo).fastpathDecMapUint32StringR) - fn(map[uint32]uint(nil), (*encFnInfo).fastpathEncMapUint32UintR, (*decFnInfo).fastpathDecMapUint32UintR) - fn(map[uint32]uint8(nil), (*encFnInfo).fastpathEncMapUint32Uint8R, (*decFnInfo).fastpathDecMapUint32Uint8R) - fn(map[uint32]uint16(nil), (*encFnInfo).fastpathEncMapUint32Uint16R, (*decFnInfo).fastpathDecMapUint32Uint16R) - fn(map[uint32]uint32(nil), (*encFnInfo).fastpathEncMapUint32Uint32R, (*decFnInfo).fastpathDecMapUint32Uint32R) - fn(map[uint32]uint64(nil), (*encFnInfo).fastpathEncMapUint32Uint64R, (*decFnInfo).fastpathDecMapUint32Uint64R) - fn(map[uint32]uintptr(nil), (*encFnInfo).fastpathEncMapUint32UintptrR, (*decFnInfo).fastpathDecMapUint32UintptrR) - fn(map[uint32]int(nil), (*encFnInfo).fastpathEncMapUint32IntR, (*decFnInfo).fastpathDecMapUint32IntR) - fn(map[uint32]int8(nil), (*encFnInfo).fastpathEncMapUint32Int8R, (*decFnInfo).fastpathDecMapUint32Int8R) - fn(map[uint32]int16(nil), (*encFnInfo).fastpathEncMapUint32Int16R, (*decFnInfo).fastpathDecMapUint32Int16R) - fn(map[uint32]int32(nil), (*encFnInfo).fastpathEncMapUint32Int32R, (*decFnInfo).fastpathDecMapUint32Int32R) - fn(map[uint32]int64(nil), (*encFnInfo).fastpathEncMapUint32Int64R, (*decFnInfo).fastpathDecMapUint32Int64R) - fn(map[uint32]float32(nil), (*encFnInfo).fastpathEncMapUint32Float32R, (*decFnInfo).fastpathDecMapUint32Float32R) - fn(map[uint32]float64(nil), (*encFnInfo).fastpathEncMapUint32Float64R, (*decFnInfo).fastpathDecMapUint32Float64R) - fn(map[uint32]bool(nil), (*encFnInfo).fastpathEncMapUint32BoolR, (*decFnInfo).fastpathDecMapUint32BoolR) - fn(map[uint64]interface{}(nil), (*encFnInfo).fastpathEncMapUint64IntfR, (*decFnInfo).fastpathDecMapUint64IntfR) - fn(map[uint64]string(nil), (*encFnInfo).fastpathEncMapUint64StringR, (*decFnInfo).fastpathDecMapUint64StringR) - fn(map[uint64]uint(nil), (*encFnInfo).fastpathEncMapUint64UintR, (*decFnInfo).fastpathDecMapUint64UintR) - fn(map[uint64]uint8(nil), (*encFnInfo).fastpathEncMapUint64Uint8R, (*decFnInfo).fastpathDecMapUint64Uint8R) - fn(map[uint64]uint16(nil), (*encFnInfo).fastpathEncMapUint64Uint16R, (*decFnInfo).fastpathDecMapUint64Uint16R) - fn(map[uint64]uint32(nil), (*encFnInfo).fastpathEncMapUint64Uint32R, (*decFnInfo).fastpathDecMapUint64Uint32R) - fn(map[uint64]uint64(nil), (*encFnInfo).fastpathEncMapUint64Uint64R, (*decFnInfo).fastpathDecMapUint64Uint64R) - fn(map[uint64]uintptr(nil), (*encFnInfo).fastpathEncMapUint64UintptrR, (*decFnInfo).fastpathDecMapUint64UintptrR) - fn(map[uint64]int(nil), (*encFnInfo).fastpathEncMapUint64IntR, (*decFnInfo).fastpathDecMapUint64IntR) - fn(map[uint64]int8(nil), (*encFnInfo).fastpathEncMapUint64Int8R, (*decFnInfo).fastpathDecMapUint64Int8R) - fn(map[uint64]int16(nil), (*encFnInfo).fastpathEncMapUint64Int16R, (*decFnInfo).fastpathDecMapUint64Int16R) - fn(map[uint64]int32(nil), (*encFnInfo).fastpathEncMapUint64Int32R, (*decFnInfo).fastpathDecMapUint64Int32R) - fn(map[uint64]int64(nil), (*encFnInfo).fastpathEncMapUint64Int64R, (*decFnInfo).fastpathDecMapUint64Int64R) - fn(map[uint64]float32(nil), (*encFnInfo).fastpathEncMapUint64Float32R, (*decFnInfo).fastpathDecMapUint64Float32R) - fn(map[uint64]float64(nil), (*encFnInfo).fastpathEncMapUint64Float64R, (*decFnInfo).fastpathDecMapUint64Float64R) - fn(map[uint64]bool(nil), (*encFnInfo).fastpathEncMapUint64BoolR, (*decFnInfo).fastpathDecMapUint64BoolR) - fn(map[uintptr]interface{}(nil), (*encFnInfo).fastpathEncMapUintptrIntfR, (*decFnInfo).fastpathDecMapUintptrIntfR) - fn(map[uintptr]string(nil), (*encFnInfo).fastpathEncMapUintptrStringR, (*decFnInfo).fastpathDecMapUintptrStringR) - fn(map[uintptr]uint(nil), (*encFnInfo).fastpathEncMapUintptrUintR, (*decFnInfo).fastpathDecMapUintptrUintR) - fn(map[uintptr]uint8(nil), (*encFnInfo).fastpathEncMapUintptrUint8R, (*decFnInfo).fastpathDecMapUintptrUint8R) - fn(map[uintptr]uint16(nil), (*encFnInfo).fastpathEncMapUintptrUint16R, (*decFnInfo).fastpathDecMapUintptrUint16R) - fn(map[uintptr]uint32(nil), (*encFnInfo).fastpathEncMapUintptrUint32R, (*decFnInfo).fastpathDecMapUintptrUint32R) - fn(map[uintptr]uint64(nil), (*encFnInfo).fastpathEncMapUintptrUint64R, (*decFnInfo).fastpathDecMapUintptrUint64R) - fn(map[uintptr]uintptr(nil), (*encFnInfo).fastpathEncMapUintptrUintptrR, (*decFnInfo).fastpathDecMapUintptrUintptrR) - fn(map[uintptr]int(nil), (*encFnInfo).fastpathEncMapUintptrIntR, (*decFnInfo).fastpathDecMapUintptrIntR) - fn(map[uintptr]int8(nil), (*encFnInfo).fastpathEncMapUintptrInt8R, (*decFnInfo).fastpathDecMapUintptrInt8R) - fn(map[uintptr]int16(nil), (*encFnInfo).fastpathEncMapUintptrInt16R, (*decFnInfo).fastpathDecMapUintptrInt16R) - fn(map[uintptr]int32(nil), (*encFnInfo).fastpathEncMapUintptrInt32R, (*decFnInfo).fastpathDecMapUintptrInt32R) - fn(map[uintptr]int64(nil), (*encFnInfo).fastpathEncMapUintptrInt64R, (*decFnInfo).fastpathDecMapUintptrInt64R) - fn(map[uintptr]float32(nil), (*encFnInfo).fastpathEncMapUintptrFloat32R, (*decFnInfo).fastpathDecMapUintptrFloat32R) - fn(map[uintptr]float64(nil), (*encFnInfo).fastpathEncMapUintptrFloat64R, (*decFnInfo).fastpathDecMapUintptrFloat64R) - fn(map[uintptr]bool(nil), (*encFnInfo).fastpathEncMapUintptrBoolR, (*decFnInfo).fastpathDecMapUintptrBoolR) - fn(map[int]interface{}(nil), (*encFnInfo).fastpathEncMapIntIntfR, (*decFnInfo).fastpathDecMapIntIntfR) - fn(map[int]string(nil), (*encFnInfo).fastpathEncMapIntStringR, (*decFnInfo).fastpathDecMapIntStringR) - fn(map[int]uint(nil), (*encFnInfo).fastpathEncMapIntUintR, (*decFnInfo).fastpathDecMapIntUintR) - fn(map[int]uint8(nil), (*encFnInfo).fastpathEncMapIntUint8R, (*decFnInfo).fastpathDecMapIntUint8R) - fn(map[int]uint16(nil), (*encFnInfo).fastpathEncMapIntUint16R, (*decFnInfo).fastpathDecMapIntUint16R) - fn(map[int]uint32(nil), (*encFnInfo).fastpathEncMapIntUint32R, (*decFnInfo).fastpathDecMapIntUint32R) - fn(map[int]uint64(nil), (*encFnInfo).fastpathEncMapIntUint64R, (*decFnInfo).fastpathDecMapIntUint64R) - fn(map[int]uintptr(nil), (*encFnInfo).fastpathEncMapIntUintptrR, (*decFnInfo).fastpathDecMapIntUintptrR) - fn(map[int]int(nil), (*encFnInfo).fastpathEncMapIntIntR, (*decFnInfo).fastpathDecMapIntIntR) - fn(map[int]int8(nil), (*encFnInfo).fastpathEncMapIntInt8R, (*decFnInfo).fastpathDecMapIntInt8R) - fn(map[int]int16(nil), (*encFnInfo).fastpathEncMapIntInt16R, (*decFnInfo).fastpathDecMapIntInt16R) - fn(map[int]int32(nil), (*encFnInfo).fastpathEncMapIntInt32R, (*decFnInfo).fastpathDecMapIntInt32R) - fn(map[int]int64(nil), (*encFnInfo).fastpathEncMapIntInt64R, (*decFnInfo).fastpathDecMapIntInt64R) - fn(map[int]float32(nil), (*encFnInfo).fastpathEncMapIntFloat32R, (*decFnInfo).fastpathDecMapIntFloat32R) - fn(map[int]float64(nil), (*encFnInfo).fastpathEncMapIntFloat64R, (*decFnInfo).fastpathDecMapIntFloat64R) - fn(map[int]bool(nil), (*encFnInfo).fastpathEncMapIntBoolR, (*decFnInfo).fastpathDecMapIntBoolR) - fn(map[int8]interface{}(nil), (*encFnInfo).fastpathEncMapInt8IntfR, (*decFnInfo).fastpathDecMapInt8IntfR) - fn(map[int8]string(nil), (*encFnInfo).fastpathEncMapInt8StringR, (*decFnInfo).fastpathDecMapInt8StringR) - fn(map[int8]uint(nil), (*encFnInfo).fastpathEncMapInt8UintR, (*decFnInfo).fastpathDecMapInt8UintR) - fn(map[int8]uint8(nil), (*encFnInfo).fastpathEncMapInt8Uint8R, (*decFnInfo).fastpathDecMapInt8Uint8R) - fn(map[int8]uint16(nil), (*encFnInfo).fastpathEncMapInt8Uint16R, (*decFnInfo).fastpathDecMapInt8Uint16R) - fn(map[int8]uint32(nil), (*encFnInfo).fastpathEncMapInt8Uint32R, (*decFnInfo).fastpathDecMapInt8Uint32R) - fn(map[int8]uint64(nil), (*encFnInfo).fastpathEncMapInt8Uint64R, (*decFnInfo).fastpathDecMapInt8Uint64R) - fn(map[int8]uintptr(nil), (*encFnInfo).fastpathEncMapInt8UintptrR, (*decFnInfo).fastpathDecMapInt8UintptrR) - fn(map[int8]int(nil), (*encFnInfo).fastpathEncMapInt8IntR, (*decFnInfo).fastpathDecMapInt8IntR) - fn(map[int8]int8(nil), (*encFnInfo).fastpathEncMapInt8Int8R, (*decFnInfo).fastpathDecMapInt8Int8R) - fn(map[int8]int16(nil), (*encFnInfo).fastpathEncMapInt8Int16R, (*decFnInfo).fastpathDecMapInt8Int16R) - fn(map[int8]int32(nil), (*encFnInfo).fastpathEncMapInt8Int32R, (*decFnInfo).fastpathDecMapInt8Int32R) - fn(map[int8]int64(nil), (*encFnInfo).fastpathEncMapInt8Int64R, (*decFnInfo).fastpathDecMapInt8Int64R) - fn(map[int8]float32(nil), (*encFnInfo).fastpathEncMapInt8Float32R, (*decFnInfo).fastpathDecMapInt8Float32R) - fn(map[int8]float64(nil), (*encFnInfo).fastpathEncMapInt8Float64R, (*decFnInfo).fastpathDecMapInt8Float64R) - fn(map[int8]bool(nil), (*encFnInfo).fastpathEncMapInt8BoolR, (*decFnInfo).fastpathDecMapInt8BoolR) - fn(map[int16]interface{}(nil), (*encFnInfo).fastpathEncMapInt16IntfR, (*decFnInfo).fastpathDecMapInt16IntfR) - fn(map[int16]string(nil), (*encFnInfo).fastpathEncMapInt16StringR, (*decFnInfo).fastpathDecMapInt16StringR) - fn(map[int16]uint(nil), (*encFnInfo).fastpathEncMapInt16UintR, (*decFnInfo).fastpathDecMapInt16UintR) - fn(map[int16]uint8(nil), (*encFnInfo).fastpathEncMapInt16Uint8R, (*decFnInfo).fastpathDecMapInt16Uint8R) - fn(map[int16]uint16(nil), (*encFnInfo).fastpathEncMapInt16Uint16R, (*decFnInfo).fastpathDecMapInt16Uint16R) - fn(map[int16]uint32(nil), (*encFnInfo).fastpathEncMapInt16Uint32R, (*decFnInfo).fastpathDecMapInt16Uint32R) - fn(map[int16]uint64(nil), (*encFnInfo).fastpathEncMapInt16Uint64R, (*decFnInfo).fastpathDecMapInt16Uint64R) - fn(map[int16]uintptr(nil), (*encFnInfo).fastpathEncMapInt16UintptrR, (*decFnInfo).fastpathDecMapInt16UintptrR) - fn(map[int16]int(nil), (*encFnInfo).fastpathEncMapInt16IntR, (*decFnInfo).fastpathDecMapInt16IntR) - fn(map[int16]int8(nil), (*encFnInfo).fastpathEncMapInt16Int8R, (*decFnInfo).fastpathDecMapInt16Int8R) - fn(map[int16]int16(nil), (*encFnInfo).fastpathEncMapInt16Int16R, (*decFnInfo).fastpathDecMapInt16Int16R) - fn(map[int16]int32(nil), (*encFnInfo).fastpathEncMapInt16Int32R, (*decFnInfo).fastpathDecMapInt16Int32R) - fn(map[int16]int64(nil), (*encFnInfo).fastpathEncMapInt16Int64R, (*decFnInfo).fastpathDecMapInt16Int64R) - fn(map[int16]float32(nil), (*encFnInfo).fastpathEncMapInt16Float32R, (*decFnInfo).fastpathDecMapInt16Float32R) - fn(map[int16]float64(nil), (*encFnInfo).fastpathEncMapInt16Float64R, (*decFnInfo).fastpathDecMapInt16Float64R) - fn(map[int16]bool(nil), (*encFnInfo).fastpathEncMapInt16BoolR, (*decFnInfo).fastpathDecMapInt16BoolR) - fn(map[int32]interface{}(nil), (*encFnInfo).fastpathEncMapInt32IntfR, (*decFnInfo).fastpathDecMapInt32IntfR) - fn(map[int32]string(nil), (*encFnInfo).fastpathEncMapInt32StringR, (*decFnInfo).fastpathDecMapInt32StringR) - fn(map[int32]uint(nil), (*encFnInfo).fastpathEncMapInt32UintR, (*decFnInfo).fastpathDecMapInt32UintR) - fn(map[int32]uint8(nil), (*encFnInfo).fastpathEncMapInt32Uint8R, (*decFnInfo).fastpathDecMapInt32Uint8R) - fn(map[int32]uint16(nil), (*encFnInfo).fastpathEncMapInt32Uint16R, (*decFnInfo).fastpathDecMapInt32Uint16R) - fn(map[int32]uint32(nil), (*encFnInfo).fastpathEncMapInt32Uint32R, (*decFnInfo).fastpathDecMapInt32Uint32R) - fn(map[int32]uint64(nil), (*encFnInfo).fastpathEncMapInt32Uint64R, (*decFnInfo).fastpathDecMapInt32Uint64R) - fn(map[int32]uintptr(nil), (*encFnInfo).fastpathEncMapInt32UintptrR, (*decFnInfo).fastpathDecMapInt32UintptrR) - fn(map[int32]int(nil), (*encFnInfo).fastpathEncMapInt32IntR, (*decFnInfo).fastpathDecMapInt32IntR) - fn(map[int32]int8(nil), (*encFnInfo).fastpathEncMapInt32Int8R, (*decFnInfo).fastpathDecMapInt32Int8R) - fn(map[int32]int16(nil), (*encFnInfo).fastpathEncMapInt32Int16R, (*decFnInfo).fastpathDecMapInt32Int16R) - fn(map[int32]int32(nil), (*encFnInfo).fastpathEncMapInt32Int32R, (*decFnInfo).fastpathDecMapInt32Int32R) - fn(map[int32]int64(nil), (*encFnInfo).fastpathEncMapInt32Int64R, (*decFnInfo).fastpathDecMapInt32Int64R) - fn(map[int32]float32(nil), (*encFnInfo).fastpathEncMapInt32Float32R, (*decFnInfo).fastpathDecMapInt32Float32R) - fn(map[int32]float64(nil), (*encFnInfo).fastpathEncMapInt32Float64R, (*decFnInfo).fastpathDecMapInt32Float64R) - fn(map[int32]bool(nil), (*encFnInfo).fastpathEncMapInt32BoolR, (*decFnInfo).fastpathDecMapInt32BoolR) - fn(map[int64]interface{}(nil), (*encFnInfo).fastpathEncMapInt64IntfR, (*decFnInfo).fastpathDecMapInt64IntfR) - fn(map[int64]string(nil), (*encFnInfo).fastpathEncMapInt64StringR, (*decFnInfo).fastpathDecMapInt64StringR) - fn(map[int64]uint(nil), (*encFnInfo).fastpathEncMapInt64UintR, (*decFnInfo).fastpathDecMapInt64UintR) - fn(map[int64]uint8(nil), (*encFnInfo).fastpathEncMapInt64Uint8R, (*decFnInfo).fastpathDecMapInt64Uint8R) - fn(map[int64]uint16(nil), (*encFnInfo).fastpathEncMapInt64Uint16R, (*decFnInfo).fastpathDecMapInt64Uint16R) - fn(map[int64]uint32(nil), (*encFnInfo).fastpathEncMapInt64Uint32R, (*decFnInfo).fastpathDecMapInt64Uint32R) - fn(map[int64]uint64(nil), (*encFnInfo).fastpathEncMapInt64Uint64R, (*decFnInfo).fastpathDecMapInt64Uint64R) - fn(map[int64]uintptr(nil), (*encFnInfo).fastpathEncMapInt64UintptrR, (*decFnInfo).fastpathDecMapInt64UintptrR) - fn(map[int64]int(nil), (*encFnInfo).fastpathEncMapInt64IntR, (*decFnInfo).fastpathDecMapInt64IntR) - fn(map[int64]int8(nil), (*encFnInfo).fastpathEncMapInt64Int8R, (*decFnInfo).fastpathDecMapInt64Int8R) - fn(map[int64]int16(nil), (*encFnInfo).fastpathEncMapInt64Int16R, (*decFnInfo).fastpathDecMapInt64Int16R) - fn(map[int64]int32(nil), (*encFnInfo).fastpathEncMapInt64Int32R, (*decFnInfo).fastpathDecMapInt64Int32R) - fn(map[int64]int64(nil), (*encFnInfo).fastpathEncMapInt64Int64R, (*decFnInfo).fastpathDecMapInt64Int64R) - fn(map[int64]float32(nil), (*encFnInfo).fastpathEncMapInt64Float32R, (*decFnInfo).fastpathDecMapInt64Float32R) - fn(map[int64]float64(nil), (*encFnInfo).fastpathEncMapInt64Float64R, (*decFnInfo).fastpathDecMapInt64Float64R) - fn(map[int64]bool(nil), (*encFnInfo).fastpathEncMapInt64BoolR, (*decFnInfo).fastpathDecMapInt64BoolR) - fn(map[bool]interface{}(nil), (*encFnInfo).fastpathEncMapBoolIntfR, (*decFnInfo).fastpathDecMapBoolIntfR) - fn(map[bool]string(nil), (*encFnInfo).fastpathEncMapBoolStringR, (*decFnInfo).fastpathDecMapBoolStringR) - fn(map[bool]uint(nil), (*encFnInfo).fastpathEncMapBoolUintR, (*decFnInfo).fastpathDecMapBoolUintR) - fn(map[bool]uint8(nil), (*encFnInfo).fastpathEncMapBoolUint8R, (*decFnInfo).fastpathDecMapBoolUint8R) - fn(map[bool]uint16(nil), (*encFnInfo).fastpathEncMapBoolUint16R, (*decFnInfo).fastpathDecMapBoolUint16R) - fn(map[bool]uint32(nil), (*encFnInfo).fastpathEncMapBoolUint32R, (*decFnInfo).fastpathDecMapBoolUint32R) - fn(map[bool]uint64(nil), (*encFnInfo).fastpathEncMapBoolUint64R, (*decFnInfo).fastpathDecMapBoolUint64R) - fn(map[bool]uintptr(nil), (*encFnInfo).fastpathEncMapBoolUintptrR, (*decFnInfo).fastpathDecMapBoolUintptrR) - fn(map[bool]int(nil), (*encFnInfo).fastpathEncMapBoolIntR, (*decFnInfo).fastpathDecMapBoolIntR) - fn(map[bool]int8(nil), (*encFnInfo).fastpathEncMapBoolInt8R, (*decFnInfo).fastpathDecMapBoolInt8R) - fn(map[bool]int16(nil), (*encFnInfo).fastpathEncMapBoolInt16R, (*decFnInfo).fastpathDecMapBoolInt16R) - fn(map[bool]int32(nil), (*encFnInfo).fastpathEncMapBoolInt32R, (*decFnInfo).fastpathDecMapBoolInt32R) - fn(map[bool]int64(nil), (*encFnInfo).fastpathEncMapBoolInt64R, (*decFnInfo).fastpathDecMapBoolInt64R) - fn(map[bool]float32(nil), (*encFnInfo).fastpathEncMapBoolFloat32R, (*decFnInfo).fastpathDecMapBoolFloat32R) - fn(map[bool]float64(nil), (*encFnInfo).fastpathEncMapBoolFloat64R, (*decFnInfo).fastpathDecMapBoolFloat64R) - fn(map[bool]bool(nil), (*encFnInfo).fastpathEncMapBoolBoolR, (*decFnInfo).fastpathDecMapBoolBoolR) - - sort.Sort(fastpathAslice(fastpathAV[:])) -} - -// -- encode - -// -- -- fast path type switch -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { - - case []interface{}: - fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e) - case *[]interface{}: - fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e) - case *map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]string: - fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e) - case *map[interface{}]string: - fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint: - fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint: - fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e) - case *map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int: - fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e) - case *map[interface{}]int: - fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e) - case *map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e) - - case []string: - fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e) - case *[]string: - fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e) - - case map[string]interface{}: - fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e) - case *map[string]interface{}: - fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e) - - case map[string]string: - fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e) - case *map[string]string: - fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e) - - case map[string]uint: - fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e) - case *map[string]uint: - fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e) - - case map[string]uint8: - fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e) - case *map[string]uint8: - fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e) - - case map[string]uint16: - fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e) - case *map[string]uint16: - fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e) - - case map[string]uint32: - fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e) - case *map[string]uint32: - fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e) - - case map[string]uint64: - fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e) - case *map[string]uint64: - fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e) - - case map[string]uintptr: - fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e) - case *map[string]uintptr: - fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e) - - case map[string]int: - fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e) - case *map[string]int: - fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e) - - case map[string]int8: - fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e) - case *map[string]int8: - fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e) - - case map[string]int16: - fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e) - case *map[string]int16: - fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e) - - case map[string]int32: - fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e) - case *map[string]int32: - fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e) - - case map[string]int64: - fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e) - case *map[string]int64: - fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e) - - case map[string]float32: - fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e) - case *map[string]float32: - fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e) - - case map[string]float64: - fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e) - case *map[string]float64: - fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e) - - case map[string]bool: - fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e) - case *map[string]bool: - fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e) - - case []float32: - fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e) - case *[]float32: - fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e) - - case map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e) - case *map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e) - - case map[float32]string: - fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e) - case *map[float32]string: - fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e) - - case map[float32]uint: - fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e) - case *map[float32]uint: - fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e) - - case map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e) - case *map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e) - case *map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e) - case *map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e) - case *map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e) - case *map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[float32]int: - fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e) - case *map[float32]int: - fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e) - - case map[float32]int8: - fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e) - case *map[float32]int8: - fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e) - - case map[float32]int16: - fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e) - case *map[float32]int16: - fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e) - - case map[float32]int32: - fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e) - case *map[float32]int32: - fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e) - - case map[float32]int64: - fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e) - case *map[float32]int64: - fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e) - - case map[float32]float32: - fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e) - case *map[float32]float32: - fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e) - - case map[float32]float64: - fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e) - case *map[float32]float64: - fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e) - - case map[float32]bool: - fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e) - case *map[float32]bool: - fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e) - - case []float64: - fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e) - case *[]float64: - fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e) - - case map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e) - case *map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e) - - case map[float64]string: - fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e) - case *map[float64]string: - fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e) - - case map[float64]uint: - fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e) - case *map[float64]uint: - fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e) - - case map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e) - case *map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e) - case *map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e) - case *map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e) - case *map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e) - case *map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[float64]int: - fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e) - case *map[float64]int: - fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e) - - case map[float64]int8: - fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e) - case *map[float64]int8: - fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e) - - case map[float64]int16: - fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e) - case *map[float64]int16: - fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e) - - case map[float64]int32: - fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e) - case *map[float64]int32: - fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e) - - case map[float64]int64: - fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e) - case *map[float64]int64: - fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e) - - case map[float64]float32: - fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e) - case *map[float64]float32: - fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e) - - case map[float64]float64: - fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e) - case *map[float64]float64: - fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e) - - case map[float64]bool: - fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e) - case *map[float64]bool: - fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e) - - case []uint: - fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e) - case *[]uint: - fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e) - - case map[uint]interface{}: - fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e) - case *map[uint]interface{}: - fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e) - - case map[uint]string: - fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e) - case *map[uint]string: - fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e) - - case map[uint]uint: - fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e) - case *map[uint]uint: - fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e) - - case map[uint]uint8: - fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e) - case *map[uint]uint8: - fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint16: - fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e) - case *map[uint]uint16: - fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint32: - fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e) - case *map[uint]uint32: - fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint64: - fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e) - case *map[uint]uint64: - fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e) - - case map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e) - case *map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint]int: - fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e) - case *map[uint]int: - fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e) - - case map[uint]int8: - fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e) - case *map[uint]int8: - fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e) - - case map[uint]int16: - fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e) - case *map[uint]int16: - fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e) - - case map[uint]int32: - fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e) - case *map[uint]int32: - fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e) - - case map[uint]int64: - fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e) - case *map[uint]int64: - fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e) - - case map[uint]float32: - fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e) - case *map[uint]float32: - fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e) - - case map[uint]float64: - fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e) - case *map[uint]float64: - fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e) - - case map[uint]bool: - fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e) - case *map[uint]bool: - fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e) - - case map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e) - case *map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint8]string: - fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e) - case *map[uint8]string: - fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint: - fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e) - case *map[uint8]uint: - fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint8]int: - fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e) - case *map[uint8]int: - fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e) - - case map[uint8]int8: - fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e) - case *map[uint8]int8: - fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int16: - fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e) - case *map[uint8]int16: - fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int32: - fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e) - case *map[uint8]int32: - fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int64: - fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e) - case *map[uint8]int64: - fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]float32: - fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e) - case *map[uint8]float32: - fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]float64: - fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e) - case *map[uint8]float64: - fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]bool: - fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e) - case *map[uint8]bool: - fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e) - - case []uint16: - fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e) - case *[]uint16: - fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e) - case *map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint16]string: - fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e) - case *map[uint16]string: - fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint: - fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e) - case *map[uint16]uint: - fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint16]int: - fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e) - case *map[uint16]int: - fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e) - - case map[uint16]int8: - fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e) - case *map[uint16]int8: - fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int16: - fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e) - case *map[uint16]int16: - fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int32: - fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e) - case *map[uint16]int32: - fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int64: - fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e) - case *map[uint16]int64: - fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]float32: - fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e) - case *map[uint16]float32: - fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]float64: - fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e) - case *map[uint16]float64: - fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]bool: - fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e) - case *map[uint16]bool: - fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e) - - case []uint32: - fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e) - case *[]uint32: - fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e) - case *map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint32]string: - fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e) - case *map[uint32]string: - fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint: - fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e) - case *map[uint32]uint: - fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint32]int: - fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e) - case *map[uint32]int: - fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e) - - case map[uint32]int8: - fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e) - case *map[uint32]int8: - fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int16: - fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e) - case *map[uint32]int16: - fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int32: - fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e) - case *map[uint32]int32: - fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int64: - fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e) - case *map[uint32]int64: - fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]float32: - fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e) - case *map[uint32]float32: - fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]float64: - fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e) - case *map[uint32]float64: - fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]bool: - fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e) - case *map[uint32]bool: - fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e) - - case []uint64: - fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e) - case *[]uint64: - fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e) - case *map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint64]string: - fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e) - case *map[uint64]string: - fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint: - fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e) - case *map[uint64]uint: - fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint64]int: - fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e) - case *map[uint64]int: - fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e) - - case map[uint64]int8: - fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e) - case *map[uint64]int8: - fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int16: - fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e) - case *map[uint64]int16: - fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int32: - fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e) - case *map[uint64]int32: - fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int64: - fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e) - case *map[uint64]int64: - fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]float32: - fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e) - case *map[uint64]float32: - fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]float64: - fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e) - case *map[uint64]float64: - fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]bool: - fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e) - case *map[uint64]bool: - fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e) - - case []uintptr: - fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e) - case *[]uintptr: - fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e) - case *map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]string: - fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e) - case *map[uintptr]string: - fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e) - case *map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int: - fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e) - case *map[uintptr]int: - fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e) - case *map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e) - - case []int: - fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e) - case *[]int: - fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e) - - case map[int]interface{}: - fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e) - case *map[int]interface{}: - fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e) - - case map[int]string: - fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e) - case *map[int]string: - fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e) - - case map[int]uint: - fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e) - case *map[int]uint: - fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e) - - case map[int]uint8: - fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e) - case *map[int]uint8: - fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e) - - case map[int]uint16: - fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e) - case *map[int]uint16: - fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e) - - case map[int]uint32: - fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e) - case *map[int]uint32: - fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e) - - case map[int]uint64: - fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e) - case *map[int]uint64: - fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e) - - case map[int]uintptr: - fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e) - case *map[int]uintptr: - fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e) - - case map[int]int: - fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e) - case *map[int]int: - fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e) - - case map[int]int8: - fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e) - case *map[int]int8: - fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e) - - case map[int]int16: - fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e) - case *map[int]int16: - fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e) - - case map[int]int32: - fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e) - case *map[int]int32: - fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e) - - case map[int]int64: - fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e) - case *map[int]int64: - fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e) - - case map[int]float32: - fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e) - case *map[int]float32: - fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e) - - case map[int]float64: - fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e) - case *map[int]float64: - fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e) - - case map[int]bool: - fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e) - case *map[int]bool: - fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e) - - case []int8: - fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e) - case *[]int8: - fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e) - - case map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e) - case *map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e) - - case map[int8]string: - fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e) - case *map[int8]string: - fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e) - - case map[int8]uint: - fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e) - case *map[int8]uint: - fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e) - - case map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e) - case *map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e) - case *map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e) - case *map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e) - case *map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e) - case *map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int8]int: - fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e) - case *map[int8]int: - fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e) - - case map[int8]int8: - fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e) - case *map[int8]int8: - fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e) - - case map[int8]int16: - fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e) - case *map[int8]int16: - fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e) - - case map[int8]int32: - fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e) - case *map[int8]int32: - fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e) - - case map[int8]int64: - fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e) - case *map[int8]int64: - fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e) - - case map[int8]float32: - fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e) - case *map[int8]float32: - fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e) - - case map[int8]float64: - fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e) - case *map[int8]float64: - fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e) - - case map[int8]bool: - fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e) - case *map[int8]bool: - fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e) - - case []int16: - fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e) - case *[]int16: - fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e) - - case map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e) - case *map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e) - - case map[int16]string: - fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e) - case *map[int16]string: - fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e) - - case map[int16]uint: - fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e) - case *map[int16]uint: - fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e) - - case map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e) - case *map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e) - case *map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e) - case *map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e) - case *map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e) - case *map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int16]int: - fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e) - case *map[int16]int: - fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e) - - case map[int16]int8: - fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e) - case *map[int16]int8: - fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e) - - case map[int16]int16: - fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e) - case *map[int16]int16: - fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e) - - case map[int16]int32: - fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e) - case *map[int16]int32: - fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e) - - case map[int16]int64: - fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e) - case *map[int16]int64: - fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e) - - case map[int16]float32: - fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e) - case *map[int16]float32: - fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e) - - case map[int16]float64: - fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e) - case *map[int16]float64: - fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e) - - case map[int16]bool: - fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e) - case *map[int16]bool: - fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e) - - case []int32: - fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e) - case *[]int32: - fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e) - - case map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e) - case *map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e) - - case map[int32]string: - fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e) - case *map[int32]string: - fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e) - - case map[int32]uint: - fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e) - case *map[int32]uint: - fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e) - - case map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e) - case *map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e) - case *map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e) - case *map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e) - case *map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e) - case *map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int32]int: - fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e) - case *map[int32]int: - fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e) - - case map[int32]int8: - fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e) - case *map[int32]int8: - fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e) - - case map[int32]int16: - fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e) - case *map[int32]int16: - fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e) - - case map[int32]int32: - fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e) - case *map[int32]int32: - fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e) - - case map[int32]int64: - fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e) - case *map[int32]int64: - fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e) - - case map[int32]float32: - fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e) - case *map[int32]float32: - fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e) - - case map[int32]float64: - fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e) - case *map[int32]float64: - fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e) - - case map[int32]bool: - fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e) - case *map[int32]bool: - fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e) - - case []int64: - fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e) - case *[]int64: - fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e) - - case map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e) - case *map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e) - - case map[int64]string: - fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e) - case *map[int64]string: - fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e) - - case map[int64]uint: - fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e) - case *map[int64]uint: - fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e) - - case map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e) - case *map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e) - case *map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e) - case *map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e) - case *map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e) - case *map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int64]int: - fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e) - case *map[int64]int: - fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e) - - case map[int64]int8: - fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e) - case *map[int64]int8: - fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e) - - case map[int64]int16: - fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e) - case *map[int64]int16: - fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e) - - case map[int64]int32: - fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e) - case *map[int64]int32: - fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e) - - case map[int64]int64: - fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e) - case *map[int64]int64: - fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e) - - case map[int64]float32: - fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e) - case *map[int64]float32: - fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e) - - case map[int64]float64: - fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e) - case *map[int64]float64: - fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e) - - case map[int64]bool: - fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e) - case *map[int64]bool: - fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e) - - case []bool: - fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e) - case *[]bool: - fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e) - - case map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e) - case *map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e) - - case map[bool]string: - fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e) - case *map[bool]string: - fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e) - - case map[bool]uint: - fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e) - case *map[bool]uint: - fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e) - - case map[bool]uint8: - fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e) - case *map[bool]uint8: - fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint16: - fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e) - case *map[bool]uint16: - fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint32: - fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e) - case *map[bool]uint32: - fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint64: - fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e) - case *map[bool]uint64: - fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e) - - case map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e) - case *map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e) - - case map[bool]int: - fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e) - case *map[bool]int: - fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e) - - case map[bool]int8: - fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e) - case *map[bool]int8: - fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e) - - case map[bool]int16: - fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e) - case *map[bool]int16: - fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e) - - case map[bool]int32: - fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e) - case *map[bool]int32: - fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e) - - case map[bool]int64: - fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e) - case *map[bool]int64: - fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e) - - case map[bool]float32: - fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e) - case *map[bool]float32: - fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e) - - case map[bool]float64: - fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e) - case *map[bool]float64: - fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e) - - case map[bool]bool: - fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e) - case *map[bool]bool: - fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e) - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { - - case []interface{}: - fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e) - case *[]interface{}: - fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e) - - case []string: - fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e) - case *[]string: - fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e) - - case []float32: - fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e) - case *[]float32: - fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e) - - case []float64: - fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e) - case *[]float64: - fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e) - - case []uint: - fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e) - case *[]uint: - fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e) - - case []uint16: - fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e) - case *[]uint16: - fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e) - - case []uint32: - fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e) - case *[]uint32: - fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e) - - case []uint64: - fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e) - case *[]uint64: - fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e) - - case []uintptr: - fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e) - case *[]uintptr: - fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e) - - case []int: - fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e) - case *[]int: - fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e) - - case []int8: - fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e) - case *[]int8: - fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e) - - case []int16: - fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e) - case *[]int16: - fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e) - - case []int32: - fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e) - case *[]int32: - fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e) - - case []int64: - fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e) - case *[]int64: - fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e) - - case []bool: - fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e) - case *[]bool: - fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e) - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { - - case map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e) - case *map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]string: - fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e) - case *map[interface{}]string: - fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint: - fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint: - fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e) - case *map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int: - fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e) - case *map[interface{}]int: - fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e) - case *map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e) - - case map[string]interface{}: - fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e) - case *map[string]interface{}: - fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e) - - case map[string]string: - fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e) - case *map[string]string: - fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e) - - case map[string]uint: - fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e) - case *map[string]uint: - fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e) - - case map[string]uint8: - fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e) - case *map[string]uint8: - fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e) - - case map[string]uint16: - fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e) - case *map[string]uint16: - fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e) - - case map[string]uint32: - fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e) - case *map[string]uint32: - fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e) - - case map[string]uint64: - fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e) - case *map[string]uint64: - fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e) - - case map[string]uintptr: - fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e) - case *map[string]uintptr: - fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e) - - case map[string]int: - fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e) - case *map[string]int: - fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e) - - case map[string]int8: - fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e) - case *map[string]int8: - fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e) - - case map[string]int16: - fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e) - case *map[string]int16: - fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e) - - case map[string]int32: - fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e) - case *map[string]int32: - fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e) - - case map[string]int64: - fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e) - case *map[string]int64: - fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e) - - case map[string]float32: - fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e) - case *map[string]float32: - fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e) - - case map[string]float64: - fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e) - case *map[string]float64: - fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e) - - case map[string]bool: - fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e) - case *map[string]bool: - fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e) - - case map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e) - case *map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e) - - case map[float32]string: - fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e) - case *map[float32]string: - fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e) - - case map[float32]uint: - fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e) - case *map[float32]uint: - fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e) - - case map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e) - case *map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e) - case *map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e) - case *map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e) - case *map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e) - case *map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[float32]int: - fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e) - case *map[float32]int: - fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e) - - case map[float32]int8: - fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e) - case *map[float32]int8: - fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e) - - case map[float32]int16: - fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e) - case *map[float32]int16: - fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e) - - case map[float32]int32: - fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e) - case *map[float32]int32: - fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e) - - case map[float32]int64: - fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e) - case *map[float32]int64: - fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e) - - case map[float32]float32: - fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e) - case *map[float32]float32: - fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e) - - case map[float32]float64: - fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e) - case *map[float32]float64: - fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e) - - case map[float32]bool: - fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e) - case *map[float32]bool: - fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e) - - case map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e) - case *map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e) - - case map[float64]string: - fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e) - case *map[float64]string: - fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e) - - case map[float64]uint: - fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e) - case *map[float64]uint: - fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e) - - case map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e) - case *map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e) - case *map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e) - case *map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e) - case *map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e) - case *map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[float64]int: - fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e) - case *map[float64]int: - fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e) - - case map[float64]int8: - fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e) - case *map[float64]int8: - fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e) - - case map[float64]int16: - fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e) - case *map[float64]int16: - fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e) - - case map[float64]int32: - fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e) - case *map[float64]int32: - fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e) - - case map[float64]int64: - fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e) - case *map[float64]int64: - fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e) - - case map[float64]float32: - fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e) - case *map[float64]float32: - fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e) - - case map[float64]float64: - fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e) - case *map[float64]float64: - fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e) - - case map[float64]bool: - fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e) - case *map[float64]bool: - fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint]interface{}: - fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e) - case *map[uint]interface{}: - fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e) - - case map[uint]string: - fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e) - case *map[uint]string: - fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e) - - case map[uint]uint: - fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e) - case *map[uint]uint: - fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e) - - case map[uint]uint8: - fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e) - case *map[uint]uint8: - fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint16: - fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e) - case *map[uint]uint16: - fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint32: - fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e) - case *map[uint]uint32: - fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint64: - fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e) - case *map[uint]uint64: - fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e) - - case map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e) - case *map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint]int: - fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e) - case *map[uint]int: - fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e) - - case map[uint]int8: - fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e) - case *map[uint]int8: - fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e) - - case map[uint]int16: - fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e) - case *map[uint]int16: - fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e) - - case map[uint]int32: - fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e) - case *map[uint]int32: - fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e) - - case map[uint]int64: - fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e) - case *map[uint]int64: - fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e) - - case map[uint]float32: - fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e) - case *map[uint]float32: - fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e) - - case map[uint]float64: - fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e) - case *map[uint]float64: - fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e) - - case map[uint]bool: - fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e) - case *map[uint]bool: - fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e) - - case map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e) - case *map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint8]string: - fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e) - case *map[uint8]string: - fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint: - fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e) - case *map[uint8]uint: - fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint8]int: - fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e) - case *map[uint8]int: - fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e) - - case map[uint8]int8: - fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e) - case *map[uint8]int8: - fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int16: - fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e) - case *map[uint8]int16: - fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int32: - fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e) - case *map[uint8]int32: - fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int64: - fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e) - case *map[uint8]int64: - fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]float32: - fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e) - case *map[uint8]float32: - fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]float64: - fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e) - case *map[uint8]float64: - fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]bool: - fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e) - case *map[uint8]bool: - fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e) - case *map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint16]string: - fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e) - case *map[uint16]string: - fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint: - fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e) - case *map[uint16]uint: - fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint16]int: - fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e) - case *map[uint16]int: - fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e) - - case map[uint16]int8: - fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e) - case *map[uint16]int8: - fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int16: - fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e) - case *map[uint16]int16: - fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int32: - fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e) - case *map[uint16]int32: - fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int64: - fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e) - case *map[uint16]int64: - fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]float32: - fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e) - case *map[uint16]float32: - fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]float64: - fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e) - case *map[uint16]float64: - fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]bool: - fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e) - case *map[uint16]bool: - fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e) - case *map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint32]string: - fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e) - case *map[uint32]string: - fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint: - fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e) - case *map[uint32]uint: - fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint32]int: - fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e) - case *map[uint32]int: - fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e) - - case map[uint32]int8: - fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e) - case *map[uint32]int8: - fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int16: - fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e) - case *map[uint32]int16: - fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int32: - fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e) - case *map[uint32]int32: - fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int64: - fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e) - case *map[uint32]int64: - fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]float32: - fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e) - case *map[uint32]float32: - fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]float64: - fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e) - case *map[uint32]float64: - fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]bool: - fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e) - case *map[uint32]bool: - fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e) - case *map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint64]string: - fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e) - case *map[uint64]string: - fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint: - fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e) - case *map[uint64]uint: - fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint64]int: - fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e) - case *map[uint64]int: - fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e) - - case map[uint64]int8: - fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e) - case *map[uint64]int8: - fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int16: - fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e) - case *map[uint64]int16: - fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int32: - fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e) - case *map[uint64]int32: - fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int64: - fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e) - case *map[uint64]int64: - fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]float32: - fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e) - case *map[uint64]float32: - fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]float64: - fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e) - case *map[uint64]float64: - fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]bool: - fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e) - case *map[uint64]bool: - fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e) - case *map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]string: - fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e) - case *map[uintptr]string: - fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e) - case *map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int: - fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e) - case *map[uintptr]int: - fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e) - case *map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e) - - case map[int]interface{}: - fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e) - case *map[int]interface{}: - fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e) - - case map[int]string: - fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e) - case *map[int]string: - fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e) - - case map[int]uint: - fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e) - case *map[int]uint: - fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e) - - case map[int]uint8: - fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e) - case *map[int]uint8: - fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e) - - case map[int]uint16: - fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e) - case *map[int]uint16: - fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e) - - case map[int]uint32: - fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e) - case *map[int]uint32: - fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e) - - case map[int]uint64: - fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e) - case *map[int]uint64: - fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e) - - case map[int]uintptr: - fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e) - case *map[int]uintptr: - fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e) - - case map[int]int: - fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e) - case *map[int]int: - fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e) - - case map[int]int8: - fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e) - case *map[int]int8: - fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e) - - case map[int]int16: - fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e) - case *map[int]int16: - fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e) - - case map[int]int32: - fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e) - case *map[int]int32: - fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e) - - case map[int]int64: - fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e) - case *map[int]int64: - fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e) - - case map[int]float32: - fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e) - case *map[int]float32: - fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e) - - case map[int]float64: - fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e) - case *map[int]float64: - fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e) - - case map[int]bool: - fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e) - case *map[int]bool: - fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e) - - case map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e) - case *map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e) - - case map[int8]string: - fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e) - case *map[int8]string: - fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e) - - case map[int8]uint: - fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e) - case *map[int8]uint: - fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e) - - case map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e) - case *map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e) - case *map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e) - case *map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e) - case *map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e) - case *map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int8]int: - fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e) - case *map[int8]int: - fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e) - - case map[int8]int8: - fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e) - case *map[int8]int8: - fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e) - - case map[int8]int16: - fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e) - case *map[int8]int16: - fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e) - - case map[int8]int32: - fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e) - case *map[int8]int32: - fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e) - - case map[int8]int64: - fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e) - case *map[int8]int64: - fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e) - - case map[int8]float32: - fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e) - case *map[int8]float32: - fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e) - - case map[int8]float64: - fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e) - case *map[int8]float64: - fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e) - - case map[int8]bool: - fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e) - case *map[int8]bool: - fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e) - - case map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e) - case *map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e) - - case map[int16]string: - fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e) - case *map[int16]string: - fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e) - - case map[int16]uint: - fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e) - case *map[int16]uint: - fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e) - - case map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e) - case *map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e) - case *map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e) - case *map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e) - case *map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e) - case *map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int16]int: - fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e) - case *map[int16]int: - fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e) - - case map[int16]int8: - fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e) - case *map[int16]int8: - fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e) - - case map[int16]int16: - fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e) - case *map[int16]int16: - fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e) - - case map[int16]int32: - fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e) - case *map[int16]int32: - fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e) - - case map[int16]int64: - fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e) - case *map[int16]int64: - fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e) - - case map[int16]float32: - fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e) - case *map[int16]float32: - fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e) - - case map[int16]float64: - fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e) - case *map[int16]float64: - fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e) - - case map[int16]bool: - fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e) - case *map[int16]bool: - fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e) - - case map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e) - case *map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e) - - case map[int32]string: - fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e) - case *map[int32]string: - fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e) - - case map[int32]uint: - fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e) - case *map[int32]uint: - fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e) - - case map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e) - case *map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e) - case *map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e) - case *map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e) - case *map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e) - case *map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int32]int: - fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e) - case *map[int32]int: - fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e) - - case map[int32]int8: - fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e) - case *map[int32]int8: - fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e) - - case map[int32]int16: - fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e) - case *map[int32]int16: - fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e) - - case map[int32]int32: - fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e) - case *map[int32]int32: - fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e) - - case map[int32]int64: - fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e) - case *map[int32]int64: - fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e) - - case map[int32]float32: - fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e) - case *map[int32]float32: - fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e) - - case map[int32]float64: - fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e) - case *map[int32]float64: - fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e) - - case map[int32]bool: - fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e) - case *map[int32]bool: - fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e) - - case map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e) - case *map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e) - - case map[int64]string: - fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e) - case *map[int64]string: - fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e) - - case map[int64]uint: - fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e) - case *map[int64]uint: - fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e) - - case map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e) - case *map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e) - case *map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e) - case *map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e) - case *map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e) - case *map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int64]int: - fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e) - case *map[int64]int: - fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e) - - case map[int64]int8: - fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e) - case *map[int64]int8: - fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e) - - case map[int64]int16: - fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e) - case *map[int64]int16: - fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e) - - case map[int64]int32: - fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e) - case *map[int64]int32: - fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e) - - case map[int64]int64: - fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e) - case *map[int64]int64: - fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e) - - case map[int64]float32: - fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e) - case *map[int64]float32: - fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e) - - case map[int64]float64: - fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e) - case *map[int64]float64: - fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e) - - case map[int64]bool: - fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e) - case *map[int64]bool: - fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e) - - case map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e) - case *map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e) - - case map[bool]string: - fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e) - case *map[bool]string: - fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e) - - case map[bool]uint: - fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e) - case *map[bool]uint: - fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e) - - case map[bool]uint8: - fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e) - case *map[bool]uint8: - fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint16: - fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e) - case *map[bool]uint16: - fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint32: - fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e) - case *map[bool]uint32: - fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint64: - fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e) - case *map[bool]uint64: - fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e) - - case map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e) - case *map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e) - - case map[bool]int: - fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e) - case *map[bool]int: - fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e) - - case map[bool]int8: - fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e) - case *map[bool]int8: - fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e) - - case map[bool]int16: - fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e) - case *map[bool]int16: - fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e) - - case map[bool]int32: - fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e) - case *map[bool]int32: - fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e) - - case map[bool]int64: - fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e) - case *map[bool]int64: - fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e) - - case map[bool]float32: - fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e) - case *map[bool]float32: - fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e) - - case map[bool]float64: - fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e) - case *map[bool]float64: - fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e) - - case map[bool]bool: - fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e) - case *map[bool]bool: - fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e) - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions - -func (f *encFnInfo) fastpathEncSliceIntfR(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - e.encode(v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - e.encode(v2) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeString(c_UTF8, v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceStringV(v []string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeString(c_UTF8, v2) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeFloat32(v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeFloat32(v2) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeFloat64(v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeFloat64(v2) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceUintV(v []uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeUint(uint64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - e.encode(v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - e.encode(v2) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceIntV(v []int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceInt8V(v []int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceInt16V(v []int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceInt32V(v []int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceInt64V(v []int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeInt(int64(v2)) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.EncAsMapSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) - } - ee.EncodeBool(v2) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } -} - -func (_ fastpathT) EncAsMapSliceBoolV(v []bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - ee.EncodeBool(v2) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) { - fastpathTV.EncMapIntfIntfV(rv.Interface().(map[interface{}]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfStringR(rv reflect.Value) { - fastpathTV.EncMapIntfStringV(rv.Interface().(map[interface{}]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUintR(rv reflect.Value) { - fastpathTV.EncMapIntfUintV(rv.Interface().(map[interface{}]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUint8R(rv reflect.Value) { - fastpathTV.EncMapIntfUint8V(rv.Interface().(map[interface{}]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUint16R(rv reflect.Value) { - fastpathTV.EncMapIntfUint16V(rv.Interface().(map[interface{}]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUint32R(rv reflect.Value) { - fastpathTV.EncMapIntfUint32V(rv.Interface().(map[interface{}]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUint64R(rv reflect.Value) { - fastpathTV.EncMapIntfUint64V(rv.Interface().(map[interface{}]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfUintptrR(rv reflect.Value) { - fastpathTV.EncMapIntfUintptrV(rv.Interface().(map[interface{}]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfIntR(rv reflect.Value) { - fastpathTV.EncMapIntfIntV(rv.Interface().(map[interface{}]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfInt8R(rv reflect.Value) { - fastpathTV.EncMapIntfInt8V(rv.Interface().(map[interface{}]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfInt16R(rv reflect.Value) { - fastpathTV.EncMapIntfInt16V(rv.Interface().(map[interface{}]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfInt32R(rv reflect.Value) { - fastpathTV.EncMapIntfInt32V(rv.Interface().(map[interface{}]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfInt64R(rv reflect.Value) { - fastpathTV.EncMapIntfInt64V(rv.Interface().(map[interface{}]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfFloat32R(rv reflect.Value) { - fastpathTV.EncMapIntfFloat32V(rv.Interface().(map[interface{}]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfFloat64R(rv reflect.Value) { - fastpathTV.EncMapIntfFloat64V(rv.Interface().(map[interface{}]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntfBoolR(rv reflect.Value) { - fastpathTV.EncMapIntfBoolV(rv.Interface().(map[interface{}]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[v2[j].i]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringIntfR(rv reflect.Value) { - fastpathTV.EncMapStringIntfV(rv.Interface().(map[string]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringStringR(rv reflect.Value) { - fastpathTV.EncMapStringStringV(rv.Interface().(map[string]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringStringV(v map[string]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUintR(rv reflect.Value) { - fastpathTV.EncMapStringUintV(rv.Interface().(map[string]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUintV(v map[string]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUint8R(rv reflect.Value) { - fastpathTV.EncMapStringUint8V(rv.Interface().(map[string]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUint16R(rv reflect.Value) { - fastpathTV.EncMapStringUint16V(rv.Interface().(map[string]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUint32R(rv reflect.Value) { - fastpathTV.EncMapStringUint32V(rv.Interface().(map[string]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUint64R(rv reflect.Value) { - fastpathTV.EncMapStringUint64V(rv.Interface().(map[string]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringUintptrR(rv reflect.Value) { - fastpathTV.EncMapStringUintptrV(rv.Interface().(map[string]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringIntR(rv reflect.Value) { - fastpathTV.EncMapStringIntV(rv.Interface().(map[string]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringIntV(v map[string]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringInt8R(rv reflect.Value) { - fastpathTV.EncMapStringInt8V(rv.Interface().(map[string]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringInt8V(v map[string]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringInt16R(rv reflect.Value) { - fastpathTV.EncMapStringInt16V(rv.Interface().(map[string]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringInt16V(v map[string]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringInt32R(rv reflect.Value) { - fastpathTV.EncMapStringInt32V(rv.Interface().(map[string]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringInt32V(v map[string]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringInt64R(rv reflect.Value) { - fastpathTV.EncMapStringInt64V(rv.Interface().(map[string]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringInt64V(v map[string]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[string(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringFloat32R(rv reflect.Value) { - fastpathTV.EncMapStringFloat32V(rv.Interface().(map[string]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringFloat64R(rv reflect.Value) { - fastpathTV.EncMapStringFloat64V(rv.Interface().(map[string]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapStringBoolR(rv reflect.Value) { - fastpathTV.EncMapStringBoolV(rv.Interface().(map[string]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapStringBoolV(v map[string]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - if e.h.Canonical { - v2 := make([]string, len(v)) - var i int - for k, _ := range v { - v2[i] = string(k) - i++ - } - sort.Sort(stringSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[string(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32IntfR(rv reflect.Value) { - fastpathTV.EncMapFloat32IntfV(rv.Interface().(map[float32]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32StringR(rv reflect.Value) { - fastpathTV.EncMapFloat32StringV(rv.Interface().(map[float32]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32UintR(rv reflect.Value) { - fastpathTV.EncMapFloat32UintV(rv.Interface().(map[float32]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Uint8R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint8V(rv.Interface().(map[float32]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Uint16R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint16V(rv.Interface().(map[float32]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Uint32R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint32V(rv.Interface().(map[float32]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Uint64R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint64V(rv.Interface().(map[float32]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32UintptrR(rv reflect.Value) { - fastpathTV.EncMapFloat32UintptrV(rv.Interface().(map[float32]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32IntR(rv reflect.Value) { - fastpathTV.EncMapFloat32IntV(rv.Interface().(map[float32]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Int8R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int8V(rv.Interface().(map[float32]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Int16R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int16V(rv.Interface().(map[float32]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Int32R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int32V(rv.Interface().(map[float32]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Int64R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int64V(rv.Interface().(map[float32]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Float32R(rv reflect.Value) { - fastpathTV.EncMapFloat32Float32V(rv.Interface().(map[float32]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32Float64R(rv reflect.Value) { - fastpathTV.EncMapFloat32Float64V(rv.Interface().(map[float32]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat32BoolR(rv reflect.Value) { - fastpathTV.EncMapFloat32BoolV(rv.Interface().(map[float32]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[float32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64IntfR(rv reflect.Value) { - fastpathTV.EncMapFloat64IntfV(rv.Interface().(map[float64]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64StringR(rv reflect.Value) { - fastpathTV.EncMapFloat64StringV(rv.Interface().(map[float64]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64UintR(rv reflect.Value) { - fastpathTV.EncMapFloat64UintV(rv.Interface().(map[float64]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Uint8R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint8V(rv.Interface().(map[float64]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Uint16R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint16V(rv.Interface().(map[float64]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Uint32R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint32V(rv.Interface().(map[float64]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Uint64R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint64V(rv.Interface().(map[float64]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64UintptrR(rv reflect.Value) { - fastpathTV.EncMapFloat64UintptrV(rv.Interface().(map[float64]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64IntR(rv reflect.Value) { - fastpathTV.EncMapFloat64IntV(rv.Interface().(map[float64]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Int8R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int8V(rv.Interface().(map[float64]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Int16R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int16V(rv.Interface().(map[float64]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Int32R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int32V(rv.Interface().(map[float64]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Int64R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int64V(rv.Interface().(map[float64]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[float64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Float32R(rv reflect.Value) { - fastpathTV.EncMapFloat64Float32V(rv.Interface().(map[float64]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64Float64R(rv reflect.Value) { - fastpathTV.EncMapFloat64Float64V(rv.Interface().(map[float64]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapFloat64BoolR(rv reflect.Value) { - fastpathTV.EncMapFloat64BoolV(rv.Interface().(map[float64]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]float64, len(v)) - var i int - for k, _ := range v { - v2[i] = float64(k) - i++ - } - sort.Sort(floatSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[float64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintIntfR(rv reflect.Value) { - fastpathTV.EncMapUintIntfV(rv.Interface().(map[uint]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintStringR(rv reflect.Value) { - fastpathTV.EncMapUintStringV(rv.Interface().(map[uint]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintStringV(v map[uint]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUintR(rv reflect.Value) { - fastpathTV.EncMapUintUintV(rv.Interface().(map[uint]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUintV(v map[uint]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUint8R(rv reflect.Value) { - fastpathTV.EncMapUintUint8V(rv.Interface().(map[uint]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUint16R(rv reflect.Value) { - fastpathTV.EncMapUintUint16V(rv.Interface().(map[uint]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUint32R(rv reflect.Value) { - fastpathTV.EncMapUintUint32V(rv.Interface().(map[uint]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUint64R(rv reflect.Value) { - fastpathTV.EncMapUintUint64V(rv.Interface().(map[uint]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintUintptrR(rv reflect.Value) { - fastpathTV.EncMapUintUintptrV(rv.Interface().(map[uint]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintIntR(rv reflect.Value) { - fastpathTV.EncMapUintIntV(rv.Interface().(map[uint]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintIntV(v map[uint]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintInt8R(rv reflect.Value) { - fastpathTV.EncMapUintInt8V(rv.Interface().(map[uint]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintInt16R(rv reflect.Value) { - fastpathTV.EncMapUintInt16V(rv.Interface().(map[uint]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintInt32R(rv reflect.Value) { - fastpathTV.EncMapUintInt32V(rv.Interface().(map[uint]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintInt64R(rv reflect.Value) { - fastpathTV.EncMapUintInt64V(rv.Interface().(map[uint]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintFloat32R(rv reflect.Value) { - fastpathTV.EncMapUintFloat32V(rv.Interface().(map[uint]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintFloat64R(rv reflect.Value) { - fastpathTV.EncMapUintFloat64V(rv.Interface().(map[uint]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintBoolR(rv reflect.Value) { - fastpathTV.EncMapUintBoolV(rv.Interface().(map[uint]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uint(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8IntfR(rv reflect.Value) { - fastpathTV.EncMapUint8IntfV(rv.Interface().(map[uint8]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8StringR(rv reflect.Value) { - fastpathTV.EncMapUint8StringV(rv.Interface().(map[uint8]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8UintR(rv reflect.Value) { - fastpathTV.EncMapUint8UintV(rv.Interface().(map[uint8]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint8V(rv.Interface().(map[uint8]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint16V(rv.Interface().(map[uint8]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint32V(rv.Interface().(map[uint8]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint64V(rv.Interface().(map[uint8]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint8UintptrV(rv.Interface().(map[uint8]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8IntR(rv reflect.Value) { - fastpathTV.EncMapUint8IntV(rv.Interface().(map[uint8]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Int8R(rv reflect.Value) { - fastpathTV.EncMapUint8Int8V(rv.Interface().(map[uint8]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Int16R(rv reflect.Value) { - fastpathTV.EncMapUint8Int16V(rv.Interface().(map[uint8]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Int32R(rv reflect.Value) { - fastpathTV.EncMapUint8Int32V(rv.Interface().(map[uint8]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Int64R(rv reflect.Value) { - fastpathTV.EncMapUint8Int64V(rv.Interface().(map[uint8]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Float32R(rv reflect.Value) { - fastpathTV.EncMapUint8Float32V(rv.Interface().(map[uint8]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8Float64R(rv reflect.Value) { - fastpathTV.EncMapUint8Float64V(rv.Interface().(map[uint8]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint8BoolR(rv reflect.Value) { - fastpathTV.EncMapUint8BoolV(rv.Interface().(map[uint8]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uint8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16IntfR(rv reflect.Value) { - fastpathTV.EncMapUint16IntfV(rv.Interface().(map[uint16]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16StringR(rv reflect.Value) { - fastpathTV.EncMapUint16StringV(rv.Interface().(map[uint16]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16UintR(rv reflect.Value) { - fastpathTV.EncMapUint16UintV(rv.Interface().(map[uint16]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint8V(rv.Interface().(map[uint16]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint16V(rv.Interface().(map[uint16]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint32V(rv.Interface().(map[uint16]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint64V(rv.Interface().(map[uint16]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint16UintptrV(rv.Interface().(map[uint16]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16IntR(rv reflect.Value) { - fastpathTV.EncMapUint16IntV(rv.Interface().(map[uint16]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Int8R(rv reflect.Value) { - fastpathTV.EncMapUint16Int8V(rv.Interface().(map[uint16]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Int16R(rv reflect.Value) { - fastpathTV.EncMapUint16Int16V(rv.Interface().(map[uint16]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Int32R(rv reflect.Value) { - fastpathTV.EncMapUint16Int32V(rv.Interface().(map[uint16]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Int64R(rv reflect.Value) { - fastpathTV.EncMapUint16Int64V(rv.Interface().(map[uint16]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Float32R(rv reflect.Value) { - fastpathTV.EncMapUint16Float32V(rv.Interface().(map[uint16]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16Float64R(rv reflect.Value) { - fastpathTV.EncMapUint16Float64V(rv.Interface().(map[uint16]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint16BoolR(rv reflect.Value) { - fastpathTV.EncMapUint16BoolV(rv.Interface().(map[uint16]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uint16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32IntfR(rv reflect.Value) { - fastpathTV.EncMapUint32IntfV(rv.Interface().(map[uint32]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32StringR(rv reflect.Value) { - fastpathTV.EncMapUint32StringV(rv.Interface().(map[uint32]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32UintR(rv reflect.Value) { - fastpathTV.EncMapUint32UintV(rv.Interface().(map[uint32]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint8V(rv.Interface().(map[uint32]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint16V(rv.Interface().(map[uint32]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint32V(rv.Interface().(map[uint32]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint64V(rv.Interface().(map[uint32]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint32UintptrV(rv.Interface().(map[uint32]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32IntR(rv reflect.Value) { - fastpathTV.EncMapUint32IntV(rv.Interface().(map[uint32]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Int8R(rv reflect.Value) { - fastpathTV.EncMapUint32Int8V(rv.Interface().(map[uint32]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Int16R(rv reflect.Value) { - fastpathTV.EncMapUint32Int16V(rv.Interface().(map[uint32]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Int32R(rv reflect.Value) { - fastpathTV.EncMapUint32Int32V(rv.Interface().(map[uint32]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Int64R(rv reflect.Value) { - fastpathTV.EncMapUint32Int64V(rv.Interface().(map[uint32]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Float32R(rv reflect.Value) { - fastpathTV.EncMapUint32Float32V(rv.Interface().(map[uint32]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32Float64R(rv reflect.Value) { - fastpathTV.EncMapUint32Float64V(rv.Interface().(map[uint32]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint32BoolR(rv reflect.Value) { - fastpathTV.EncMapUint32BoolV(rv.Interface().(map[uint32]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uint32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64IntfR(rv reflect.Value) { - fastpathTV.EncMapUint64IntfV(rv.Interface().(map[uint64]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64StringR(rv reflect.Value) { - fastpathTV.EncMapUint64StringV(rv.Interface().(map[uint64]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64UintR(rv reflect.Value) { - fastpathTV.EncMapUint64UintV(rv.Interface().(map[uint64]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint8V(rv.Interface().(map[uint64]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint16V(rv.Interface().(map[uint64]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint32V(rv.Interface().(map[uint64]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint64V(rv.Interface().(map[uint64]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint64UintptrV(rv.Interface().(map[uint64]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64IntR(rv reflect.Value) { - fastpathTV.EncMapUint64IntV(rv.Interface().(map[uint64]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Int8R(rv reflect.Value) { - fastpathTV.EncMapUint64Int8V(rv.Interface().(map[uint64]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Int16R(rv reflect.Value) { - fastpathTV.EncMapUint64Int16V(rv.Interface().(map[uint64]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Int32R(rv reflect.Value) { - fastpathTV.EncMapUint64Int32V(rv.Interface().(map[uint64]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Int64R(rv reflect.Value) { - fastpathTV.EncMapUint64Int64V(rv.Interface().(map[uint64]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uint64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Float32R(rv reflect.Value) { - fastpathTV.EncMapUint64Float32V(rv.Interface().(map[uint64]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64Float64R(rv reflect.Value) { - fastpathTV.EncMapUint64Float64V(rv.Interface().(map[uint64]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUint64BoolR(rv reflect.Value) { - fastpathTV.EncMapUint64BoolV(rv.Interface().(map[uint64]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uint64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrIntfR(rv reflect.Value) { - fastpathTV.EncMapUintptrIntfV(rv.Interface().(map[uintptr]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrStringR(rv reflect.Value) { - fastpathTV.EncMapUintptrStringV(rv.Interface().(map[uintptr]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUintR(rv reflect.Value) { - fastpathTV.EncMapUintptrUintV(rv.Interface().(map[uintptr]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUint8R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint8V(rv.Interface().(map[uintptr]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUint16R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint16V(rv.Interface().(map[uintptr]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUint32R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint32V(rv.Interface().(map[uintptr]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUint64R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint64V(rv.Interface().(map[uintptr]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrUintptrR(rv reflect.Value) { - fastpathTV.EncMapUintptrUintptrV(rv.Interface().(map[uintptr]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrIntR(rv reflect.Value) { - fastpathTV.EncMapUintptrIntV(rv.Interface().(map[uintptr]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrInt8R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt8V(rv.Interface().(map[uintptr]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrInt16R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt16V(rv.Interface().(map[uintptr]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrInt32R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt32V(rv.Interface().(map[uintptr]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrInt64R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt64V(rv.Interface().(map[uintptr]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[uintptr(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrFloat32R(rv reflect.Value) { - fastpathTV.EncMapUintptrFloat32V(rv.Interface().(map[uintptr]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrFloat64R(rv reflect.Value) { - fastpathTV.EncMapUintptrFloat64V(rv.Interface().(map[uintptr]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapUintptrBoolR(rv reflect.Value) { - fastpathTV.EncMapUintptrBoolV(rv.Interface().(map[uintptr]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]uint64, len(v)) - var i int - for k, _ := range v { - v2[i] = uint64(k) - i++ - } - sort.Sort(uintSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[uintptr(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntIntfR(rv reflect.Value) { - fastpathTV.EncMapIntIntfV(rv.Interface().(map[int]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntStringR(rv reflect.Value) { - fastpathTV.EncMapIntStringV(rv.Interface().(map[int]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntStringV(v map[int]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUintR(rv reflect.Value) { - fastpathTV.EncMapIntUintV(rv.Interface().(map[int]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUintV(v map[int]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUint8R(rv reflect.Value) { - fastpathTV.EncMapIntUint8V(rv.Interface().(map[int]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUint16R(rv reflect.Value) { - fastpathTV.EncMapIntUint16V(rv.Interface().(map[int]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUint32R(rv reflect.Value) { - fastpathTV.EncMapIntUint32V(rv.Interface().(map[int]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUint64R(rv reflect.Value) { - fastpathTV.EncMapIntUint64V(rv.Interface().(map[int]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntUintptrR(rv reflect.Value) { - fastpathTV.EncMapIntUintptrV(rv.Interface().(map[int]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntIntR(rv reflect.Value) { - fastpathTV.EncMapIntIntV(rv.Interface().(map[int]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntIntV(v map[int]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntInt8R(rv reflect.Value) { - fastpathTV.EncMapIntInt8V(rv.Interface().(map[int]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntInt8V(v map[int]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntInt16R(rv reflect.Value) { - fastpathTV.EncMapIntInt16V(rv.Interface().(map[int]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntInt16V(v map[int]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntInt32R(rv reflect.Value) { - fastpathTV.EncMapIntInt32V(rv.Interface().(map[int]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntInt32V(v map[int]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntInt64R(rv reflect.Value) { - fastpathTV.EncMapIntInt64V(rv.Interface().(map[int]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntInt64V(v map[int]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntFloat32R(rv reflect.Value) { - fastpathTV.EncMapIntFloat32V(rv.Interface().(map[int]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntFloat64R(rv reflect.Value) { - fastpathTV.EncMapIntFloat64V(rv.Interface().(map[int]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapIntBoolR(rv reflect.Value) { - fastpathTV.EncMapIntBoolV(rv.Interface().(map[int]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapIntBoolV(v map[int]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[int(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8IntfR(rv reflect.Value) { - fastpathTV.EncMapInt8IntfV(rv.Interface().(map[int8]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8StringR(rv reflect.Value) { - fastpathTV.EncMapInt8StringV(rv.Interface().(map[int8]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8StringV(v map[int8]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8UintR(rv reflect.Value) { - fastpathTV.EncMapInt8UintV(rv.Interface().(map[int8]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint8V(rv.Interface().(map[int8]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint16V(rv.Interface().(map[int8]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint32V(rv.Interface().(map[int8]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint64V(rv.Interface().(map[int8]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt8UintptrV(rv.Interface().(map[int8]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8IntR(rv reflect.Value) { - fastpathTV.EncMapInt8IntV(rv.Interface().(map[int8]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8IntV(v map[int8]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Int8R(rv reflect.Value) { - fastpathTV.EncMapInt8Int8V(rv.Interface().(map[int8]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Int16R(rv reflect.Value) { - fastpathTV.EncMapInt8Int16V(rv.Interface().(map[int8]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Int32R(rv reflect.Value) { - fastpathTV.EncMapInt8Int32V(rv.Interface().(map[int8]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Int64R(rv reflect.Value) { - fastpathTV.EncMapInt8Int64V(rv.Interface().(map[int8]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int8(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Float32R(rv reflect.Value) { - fastpathTV.EncMapInt8Float32V(rv.Interface().(map[int8]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8Float64R(rv reflect.Value) { - fastpathTV.EncMapInt8Float64V(rv.Interface().(map[int8]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt8BoolR(rv reflect.Value) { - fastpathTV.EncMapInt8BoolV(rv.Interface().(map[int8]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[int8(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16IntfR(rv reflect.Value) { - fastpathTV.EncMapInt16IntfV(rv.Interface().(map[int16]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16StringR(rv reflect.Value) { - fastpathTV.EncMapInt16StringV(rv.Interface().(map[int16]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16StringV(v map[int16]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16UintR(rv reflect.Value) { - fastpathTV.EncMapInt16UintV(rv.Interface().(map[int16]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint8V(rv.Interface().(map[int16]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint16V(rv.Interface().(map[int16]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint32V(rv.Interface().(map[int16]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint64V(rv.Interface().(map[int16]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt16UintptrV(rv.Interface().(map[int16]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16IntR(rv reflect.Value) { - fastpathTV.EncMapInt16IntV(rv.Interface().(map[int16]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16IntV(v map[int16]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Int8R(rv reflect.Value) { - fastpathTV.EncMapInt16Int8V(rv.Interface().(map[int16]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Int16R(rv reflect.Value) { - fastpathTV.EncMapInt16Int16V(rv.Interface().(map[int16]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Int32R(rv reflect.Value) { - fastpathTV.EncMapInt16Int32V(rv.Interface().(map[int16]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Int64R(rv reflect.Value) { - fastpathTV.EncMapInt16Int64V(rv.Interface().(map[int16]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int16(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Float32R(rv reflect.Value) { - fastpathTV.EncMapInt16Float32V(rv.Interface().(map[int16]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16Float64R(rv reflect.Value) { - fastpathTV.EncMapInt16Float64V(rv.Interface().(map[int16]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt16BoolR(rv reflect.Value) { - fastpathTV.EncMapInt16BoolV(rv.Interface().(map[int16]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[int16(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32IntfR(rv reflect.Value) { - fastpathTV.EncMapInt32IntfV(rv.Interface().(map[int32]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32StringR(rv reflect.Value) { - fastpathTV.EncMapInt32StringV(rv.Interface().(map[int32]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32StringV(v map[int32]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32UintR(rv reflect.Value) { - fastpathTV.EncMapInt32UintV(rv.Interface().(map[int32]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint8V(rv.Interface().(map[int32]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint16V(rv.Interface().(map[int32]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint32V(rv.Interface().(map[int32]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint64V(rv.Interface().(map[int32]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt32UintptrV(rv.Interface().(map[int32]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32IntR(rv reflect.Value) { - fastpathTV.EncMapInt32IntV(rv.Interface().(map[int32]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32IntV(v map[int32]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Int8R(rv reflect.Value) { - fastpathTV.EncMapInt32Int8V(rv.Interface().(map[int32]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Int16R(rv reflect.Value) { - fastpathTV.EncMapInt32Int16V(rv.Interface().(map[int32]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Int32R(rv reflect.Value) { - fastpathTV.EncMapInt32Int32V(rv.Interface().(map[int32]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Int64R(rv reflect.Value) { - fastpathTV.EncMapInt32Int64V(rv.Interface().(map[int32]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int32(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Float32R(rv reflect.Value) { - fastpathTV.EncMapInt32Float32V(rv.Interface().(map[int32]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32Float64R(rv reflect.Value) { - fastpathTV.EncMapInt32Float64V(rv.Interface().(map[int32]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt32BoolR(rv reflect.Value) { - fastpathTV.EncMapInt32BoolV(rv.Interface().(map[int32]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[int32(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64IntfR(rv reflect.Value) { - fastpathTV.EncMapInt64IntfV(rv.Interface().(map[int64]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64StringR(rv reflect.Value) { - fastpathTV.EncMapInt64StringV(rv.Interface().(map[int64]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64StringV(v map[int64]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64UintR(rv reflect.Value) { - fastpathTV.EncMapInt64UintV(rv.Interface().(map[int64]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint8V(rv.Interface().(map[int64]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint16V(rv.Interface().(map[int64]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint32V(rv.Interface().(map[int64]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint64V(rv.Interface().(map[int64]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt64UintptrV(rv.Interface().(map[int64]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64IntR(rv reflect.Value) { - fastpathTV.EncMapInt64IntV(rv.Interface().(map[int64]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64IntV(v map[int64]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Int8R(rv reflect.Value) { - fastpathTV.EncMapInt64Int8V(rv.Interface().(map[int64]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Int16R(rv reflect.Value) { - fastpathTV.EncMapInt64Int16V(rv.Interface().(map[int64]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Int32R(rv reflect.Value) { - fastpathTV.EncMapInt64Int32V(rv.Interface().(map[int64]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Int64R(rv reflect.Value) { - fastpathTV.EncMapInt64Int64V(rv.Interface().(map[int64]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[int64(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Float32R(rv reflect.Value) { - fastpathTV.EncMapInt64Float32V(rv.Interface().(map[int64]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64Float64R(rv reflect.Value) { - fastpathTV.EncMapInt64Float64V(rv.Interface().(map[int64]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapInt64BoolR(rv reflect.Value) { - fastpathTV.EncMapInt64BoolV(rv.Interface().(map[int64]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]int64, len(v)) - var i int - for k, _ := range v { - v2[i] = int64(k) - i++ - } - sort.Sort(intSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[int64(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolIntfR(rv reflect.Value) { - fastpathTV.EncMapBoolIntfV(rv.Interface().(map[bool]interface{}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolStringR(rv reflect.Value) { - fastpathTV.EncMapBoolStringV(rv.Interface().(map[bool]string), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolStringV(v map[bool]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeString(c_UTF8, v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUintR(rv reflect.Value) { - fastpathTV.EncMapBoolUintV(rv.Interface().(map[bool]uint), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUint8R(rv reflect.Value) { - fastpathTV.EncMapBoolUint8V(rv.Interface().(map[bool]uint8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUint16R(rv reflect.Value) { - fastpathTV.EncMapBoolUint16V(rv.Interface().(map[bool]uint16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUint32R(rv reflect.Value) { - fastpathTV.EncMapBoolUint32V(rv.Interface().(map[bool]uint32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUint64R(rv reflect.Value) { - fastpathTV.EncMapBoolUint64V(rv.Interface().(map[bool]uint64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeUint(uint64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolUintptrR(rv reflect.Value) { - fastpathTV.EncMapBoolUintptrV(rv.Interface().(map[bool]uintptr), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encode(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolIntR(rv reflect.Value) { - fastpathTV.EncMapBoolIntV(rv.Interface().(map[bool]int), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolIntV(v map[bool]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolInt8R(rv reflect.Value) { - fastpathTV.EncMapBoolInt8V(rv.Interface().(map[bool]int8), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolInt16R(rv reflect.Value) { - fastpathTV.EncMapBoolInt16V(rv.Interface().(map[bool]int16), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolInt32R(rv reflect.Value) { - fastpathTV.EncMapBoolInt32V(rv.Interface().(map[bool]int32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolInt64R(rv reflect.Value) { - fastpathTV.EncMapBoolInt64V(rv.Interface().(map[bool]int64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v[bool(k2)])) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeInt(int64(v2)) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolFloat32R(rv reflect.Value) { - fastpathTV.EncMapBoolFloat32V(rv.Interface().(map[bool]float32), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat32(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolFloat64R(rv reflect.Value) { - fastpathTV.EncMapBoolFloat64V(rv.Interface().(map[bool]float64), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeFloat64(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -func (f *encFnInfo) fastpathEncMapBoolBoolR(rv reflect.Value) { - fastpathTV.EncMapBoolBoolV(rv.Interface().(map[bool]bool), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - if e.h.Canonical { - v2 := make([]bool, len(v)) - var i int - for k, _ := range v { - v2[i] = bool(k) - i++ - } - sort.Sort(boolSlice(v2)) - for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v[bool(k2)]) - } - } else { - for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - ee.EncodeBool(v2) - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} - -// -- decode - -// -- -- fast path type switch -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - switch v := iv.(type) { - - case []interface{}: - fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, d) - case *[]interface{}: - v2, changed2 := fastpathTV.DecSliceIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]interface{}: - fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]interface{}: - v2, changed2 := fastpathTV.DecMapIntfIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]string: - fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]string: - v2, changed2 := fastpathTV.DecMapIntfStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uint: - fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uint: - v2, changed2 := fastpathTV.DecMapIntfUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uint8: - fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uint8: - v2, changed2 := fastpathTV.DecMapIntfUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uint16: - fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uint16: - v2, changed2 := fastpathTV.DecMapIntfUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uint32: - fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uint32: - v2, changed2 := fastpathTV.DecMapIntfUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uint64: - fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uint64: - v2, changed2 := fastpathTV.DecMapIntfUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]uintptr: - fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]uintptr: - v2, changed2 := fastpathTV.DecMapIntfUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]int: - fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]int: - v2, changed2 := fastpathTV.DecMapIntfIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]int8: - fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]int8: - v2, changed2 := fastpathTV.DecMapIntfInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]int16: - fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]int16: - v2, changed2 := fastpathTV.DecMapIntfInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]int32: - fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]int32: - v2, changed2 := fastpathTV.DecMapIntfInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]int64: - fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]int64: - v2, changed2 := fastpathTV.DecMapIntfInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]float32: - fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]float32: - v2, changed2 := fastpathTV.DecMapIntfFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]float64: - fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]float64: - v2, changed2 := fastpathTV.DecMapIntfFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[interface{}]bool: - fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, d) - case *map[interface{}]bool: - v2, changed2 := fastpathTV.DecMapIntfBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []string: - fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, d) - case *[]string: - v2, changed2 := fastpathTV.DecSliceStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]interface{}: - fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, d) - case *map[string]interface{}: - v2, changed2 := fastpathTV.DecMapStringIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]string: - fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, d) - case *map[string]string: - v2, changed2 := fastpathTV.DecMapStringStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uint: - fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, d) - case *map[string]uint: - v2, changed2 := fastpathTV.DecMapStringUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uint8: - fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, d) - case *map[string]uint8: - v2, changed2 := fastpathTV.DecMapStringUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uint16: - fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, d) - case *map[string]uint16: - v2, changed2 := fastpathTV.DecMapStringUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uint32: - fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, d) - case *map[string]uint32: - v2, changed2 := fastpathTV.DecMapStringUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uint64: - fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, d) - case *map[string]uint64: - v2, changed2 := fastpathTV.DecMapStringUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]uintptr: - fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[string]uintptr: - v2, changed2 := fastpathTV.DecMapStringUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]int: - fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, d) - case *map[string]int: - v2, changed2 := fastpathTV.DecMapStringIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]int8: - fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, d) - case *map[string]int8: - v2, changed2 := fastpathTV.DecMapStringInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]int16: - fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, d) - case *map[string]int16: - v2, changed2 := fastpathTV.DecMapStringInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]int32: - fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, d) - case *map[string]int32: - v2, changed2 := fastpathTV.DecMapStringInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]int64: - fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, d) - case *map[string]int64: - v2, changed2 := fastpathTV.DecMapStringInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]float32: - fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[string]float32: - v2, changed2 := fastpathTV.DecMapStringFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]float64: - fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[string]float64: - v2, changed2 := fastpathTV.DecMapStringFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[string]bool: - fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, d) - case *map[string]bool: - v2, changed2 := fastpathTV.DecMapStringBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []float32: - fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, d) - case *[]float32: - v2, changed2 := fastpathTV.DecSliceFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]interface{}: - fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, d) - case *map[float32]interface{}: - v2, changed2 := fastpathTV.DecMapFloat32IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]string: - fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, d) - case *map[float32]string: - v2, changed2 := fastpathTV.DecMapFloat32StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uint: - fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, d) - case *map[float32]uint: - v2, changed2 := fastpathTV.DecMapFloat32UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uint8: - fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[float32]uint8: - v2, changed2 := fastpathTV.DecMapFloat32Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uint16: - fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[float32]uint16: - v2, changed2 := fastpathTV.DecMapFloat32Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uint32: - fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[float32]uint32: - v2, changed2 := fastpathTV.DecMapFloat32Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uint64: - fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[float32]uint64: - v2, changed2 := fastpathTV.DecMapFloat32Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]uintptr: - fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[float32]uintptr: - v2, changed2 := fastpathTV.DecMapFloat32UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]int: - fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, d) - case *map[float32]int: - v2, changed2 := fastpathTV.DecMapFloat32IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]int8: - fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, d) - case *map[float32]int8: - v2, changed2 := fastpathTV.DecMapFloat32Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]int16: - fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, d) - case *map[float32]int16: - v2, changed2 := fastpathTV.DecMapFloat32Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]int32: - fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, d) - case *map[float32]int32: - v2, changed2 := fastpathTV.DecMapFloat32Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]int64: - fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, d) - case *map[float32]int64: - v2, changed2 := fastpathTV.DecMapFloat32Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]float32: - fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, d) - case *map[float32]float32: - v2, changed2 := fastpathTV.DecMapFloat32Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]float64: - fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, d) - case *map[float32]float64: - v2, changed2 := fastpathTV.DecMapFloat32Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float32]bool: - fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, d) - case *map[float32]bool: - v2, changed2 := fastpathTV.DecMapFloat32BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []float64: - fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, d) - case *[]float64: - v2, changed2 := fastpathTV.DecSliceFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]interface{}: - fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, d) - case *map[float64]interface{}: - v2, changed2 := fastpathTV.DecMapFloat64IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]string: - fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, d) - case *map[float64]string: - v2, changed2 := fastpathTV.DecMapFloat64StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uint: - fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, d) - case *map[float64]uint: - v2, changed2 := fastpathTV.DecMapFloat64UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uint8: - fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[float64]uint8: - v2, changed2 := fastpathTV.DecMapFloat64Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uint16: - fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[float64]uint16: - v2, changed2 := fastpathTV.DecMapFloat64Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uint32: - fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[float64]uint32: - v2, changed2 := fastpathTV.DecMapFloat64Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uint64: - fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[float64]uint64: - v2, changed2 := fastpathTV.DecMapFloat64Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]uintptr: - fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[float64]uintptr: - v2, changed2 := fastpathTV.DecMapFloat64UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]int: - fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, d) - case *map[float64]int: - v2, changed2 := fastpathTV.DecMapFloat64IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]int8: - fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, d) - case *map[float64]int8: - v2, changed2 := fastpathTV.DecMapFloat64Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]int16: - fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, d) - case *map[float64]int16: - v2, changed2 := fastpathTV.DecMapFloat64Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]int32: - fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, d) - case *map[float64]int32: - v2, changed2 := fastpathTV.DecMapFloat64Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]int64: - fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, d) - case *map[float64]int64: - v2, changed2 := fastpathTV.DecMapFloat64Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]float32: - fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, d) - case *map[float64]float32: - v2, changed2 := fastpathTV.DecMapFloat64Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]float64: - fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, d) - case *map[float64]float64: - v2, changed2 := fastpathTV.DecMapFloat64Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[float64]bool: - fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, d) - case *map[float64]bool: - v2, changed2 := fastpathTV.DecMapFloat64BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []uint: - fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, d) - case *[]uint: - v2, changed2 := fastpathTV.DecSliceUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]interface{}: - fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, d) - case *map[uint]interface{}: - v2, changed2 := fastpathTV.DecMapUintIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]string: - fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, d) - case *map[uint]string: - v2, changed2 := fastpathTV.DecMapUintStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uint: - fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, d) - case *map[uint]uint: - v2, changed2 := fastpathTV.DecMapUintUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uint8: - fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, d) - case *map[uint]uint8: - v2, changed2 := fastpathTV.DecMapUintUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uint16: - fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, d) - case *map[uint]uint16: - v2, changed2 := fastpathTV.DecMapUintUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uint32: - fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, d) - case *map[uint]uint32: - v2, changed2 := fastpathTV.DecMapUintUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uint64: - fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, d) - case *map[uint]uint64: - v2, changed2 := fastpathTV.DecMapUintUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]uintptr: - fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uint]uintptr: - v2, changed2 := fastpathTV.DecMapUintUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]int: - fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, d) - case *map[uint]int: - v2, changed2 := fastpathTV.DecMapUintIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]int8: - fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, d) - case *map[uint]int8: - v2, changed2 := fastpathTV.DecMapUintInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]int16: - fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, d) - case *map[uint]int16: - v2, changed2 := fastpathTV.DecMapUintInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]int32: - fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, d) - case *map[uint]int32: - v2, changed2 := fastpathTV.DecMapUintInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]int64: - fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, d) - case *map[uint]int64: - v2, changed2 := fastpathTV.DecMapUintInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]float32: - fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[uint]float32: - v2, changed2 := fastpathTV.DecMapUintFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]float64: - fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[uint]float64: - v2, changed2 := fastpathTV.DecMapUintFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint]bool: - fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, d) - case *map[uint]bool: - v2, changed2 := fastpathTV.DecMapUintBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]interface{}: - fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]interface{}: - v2, changed2 := fastpathTV.DecMapUint8IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]string: - fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]string: - v2, changed2 := fastpathTV.DecMapUint8StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uint: - fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uint: - v2, changed2 := fastpathTV.DecMapUint8UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uint8: - fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uint8: - v2, changed2 := fastpathTV.DecMapUint8Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uint16: - fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uint16: - v2, changed2 := fastpathTV.DecMapUint8Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uint32: - fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uint32: - v2, changed2 := fastpathTV.DecMapUint8Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uint64: - fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uint64: - v2, changed2 := fastpathTV.DecMapUint8Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]uintptr: - fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]uintptr: - v2, changed2 := fastpathTV.DecMapUint8UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]int: - fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]int: - v2, changed2 := fastpathTV.DecMapUint8IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]int8: - fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]int8: - v2, changed2 := fastpathTV.DecMapUint8Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]int16: - fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]int16: - v2, changed2 := fastpathTV.DecMapUint8Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]int32: - fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]int32: - v2, changed2 := fastpathTV.DecMapUint8Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]int64: - fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]int64: - v2, changed2 := fastpathTV.DecMapUint8Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]float32: - fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]float32: - v2, changed2 := fastpathTV.DecMapUint8Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]float64: - fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, d) - case *map[uint8]float64: - v2, changed2 := fastpathTV.DecMapUint8Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint8]bool: - fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, d) - case *map[uint8]bool: - v2, changed2 := fastpathTV.DecMapUint8BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []uint16: - fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, d) - case *[]uint16: - v2, changed2 := fastpathTV.DecSliceUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]interface{}: - fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]interface{}: - v2, changed2 := fastpathTV.DecMapUint16IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]string: - fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]string: - v2, changed2 := fastpathTV.DecMapUint16StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uint: - fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uint: - v2, changed2 := fastpathTV.DecMapUint16UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uint8: - fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uint8: - v2, changed2 := fastpathTV.DecMapUint16Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uint16: - fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uint16: - v2, changed2 := fastpathTV.DecMapUint16Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uint32: - fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uint32: - v2, changed2 := fastpathTV.DecMapUint16Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uint64: - fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uint64: - v2, changed2 := fastpathTV.DecMapUint16Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]uintptr: - fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]uintptr: - v2, changed2 := fastpathTV.DecMapUint16UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]int: - fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]int: - v2, changed2 := fastpathTV.DecMapUint16IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]int8: - fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]int8: - v2, changed2 := fastpathTV.DecMapUint16Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]int16: - fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]int16: - v2, changed2 := fastpathTV.DecMapUint16Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]int32: - fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]int32: - v2, changed2 := fastpathTV.DecMapUint16Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]int64: - fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]int64: - v2, changed2 := fastpathTV.DecMapUint16Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]float32: - fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]float32: - v2, changed2 := fastpathTV.DecMapUint16Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]float64: - fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, d) - case *map[uint16]float64: - v2, changed2 := fastpathTV.DecMapUint16Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint16]bool: - fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, d) - case *map[uint16]bool: - v2, changed2 := fastpathTV.DecMapUint16BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []uint32: - fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, d) - case *[]uint32: - v2, changed2 := fastpathTV.DecSliceUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]interface{}: - fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]interface{}: - v2, changed2 := fastpathTV.DecMapUint32IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]string: - fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]string: - v2, changed2 := fastpathTV.DecMapUint32StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uint: - fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uint: - v2, changed2 := fastpathTV.DecMapUint32UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uint8: - fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uint8: - v2, changed2 := fastpathTV.DecMapUint32Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uint16: - fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uint16: - v2, changed2 := fastpathTV.DecMapUint32Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uint32: - fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uint32: - v2, changed2 := fastpathTV.DecMapUint32Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uint64: - fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uint64: - v2, changed2 := fastpathTV.DecMapUint32Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]uintptr: - fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]uintptr: - v2, changed2 := fastpathTV.DecMapUint32UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]int: - fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]int: - v2, changed2 := fastpathTV.DecMapUint32IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]int8: - fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]int8: - v2, changed2 := fastpathTV.DecMapUint32Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]int16: - fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]int16: - v2, changed2 := fastpathTV.DecMapUint32Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]int32: - fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]int32: - v2, changed2 := fastpathTV.DecMapUint32Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]int64: - fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]int64: - v2, changed2 := fastpathTV.DecMapUint32Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]float32: - fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]float32: - v2, changed2 := fastpathTV.DecMapUint32Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]float64: - fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, d) - case *map[uint32]float64: - v2, changed2 := fastpathTV.DecMapUint32Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint32]bool: - fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, d) - case *map[uint32]bool: - v2, changed2 := fastpathTV.DecMapUint32BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []uint64: - fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, d) - case *[]uint64: - v2, changed2 := fastpathTV.DecSliceUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]interface{}: - fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]interface{}: - v2, changed2 := fastpathTV.DecMapUint64IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]string: - fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]string: - v2, changed2 := fastpathTV.DecMapUint64StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uint: - fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uint: - v2, changed2 := fastpathTV.DecMapUint64UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uint8: - fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uint8: - v2, changed2 := fastpathTV.DecMapUint64Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uint16: - fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uint16: - v2, changed2 := fastpathTV.DecMapUint64Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uint32: - fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uint32: - v2, changed2 := fastpathTV.DecMapUint64Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uint64: - fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uint64: - v2, changed2 := fastpathTV.DecMapUint64Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]uintptr: - fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]uintptr: - v2, changed2 := fastpathTV.DecMapUint64UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]int: - fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]int: - v2, changed2 := fastpathTV.DecMapUint64IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]int8: - fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]int8: - v2, changed2 := fastpathTV.DecMapUint64Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]int16: - fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]int16: - v2, changed2 := fastpathTV.DecMapUint64Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]int32: - fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]int32: - v2, changed2 := fastpathTV.DecMapUint64Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]int64: - fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]int64: - v2, changed2 := fastpathTV.DecMapUint64Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]float32: - fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]float32: - v2, changed2 := fastpathTV.DecMapUint64Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]float64: - fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, d) - case *map[uint64]float64: - v2, changed2 := fastpathTV.DecMapUint64Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uint64]bool: - fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, d) - case *map[uint64]bool: - v2, changed2 := fastpathTV.DecMapUint64BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []uintptr: - fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, d) - case *[]uintptr: - v2, changed2 := fastpathTV.DecSliceUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]interface{}: - fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]interface{}: - v2, changed2 := fastpathTV.DecMapUintptrIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]string: - fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]string: - v2, changed2 := fastpathTV.DecMapUintptrStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uint: - fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uint: - v2, changed2 := fastpathTV.DecMapUintptrUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uint8: - fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uint8: - v2, changed2 := fastpathTV.DecMapUintptrUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uint16: - fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uint16: - v2, changed2 := fastpathTV.DecMapUintptrUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uint32: - fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uint32: - v2, changed2 := fastpathTV.DecMapUintptrUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uint64: - fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uint64: - v2, changed2 := fastpathTV.DecMapUintptrUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]uintptr: - fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]uintptr: - v2, changed2 := fastpathTV.DecMapUintptrUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]int: - fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]int: - v2, changed2 := fastpathTV.DecMapUintptrIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]int8: - fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]int8: - v2, changed2 := fastpathTV.DecMapUintptrInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]int16: - fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]int16: - v2, changed2 := fastpathTV.DecMapUintptrInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]int32: - fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]int32: - v2, changed2 := fastpathTV.DecMapUintptrInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]int64: - fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]int64: - v2, changed2 := fastpathTV.DecMapUintptrInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]float32: - fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]float32: - v2, changed2 := fastpathTV.DecMapUintptrFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]float64: - fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]float64: - v2, changed2 := fastpathTV.DecMapUintptrFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[uintptr]bool: - fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, d) - case *map[uintptr]bool: - v2, changed2 := fastpathTV.DecMapUintptrBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []int: - fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, d) - case *[]int: - v2, changed2 := fastpathTV.DecSliceIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]interface{}: - fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, d) - case *map[int]interface{}: - v2, changed2 := fastpathTV.DecMapIntIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]string: - fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, d) - case *map[int]string: - v2, changed2 := fastpathTV.DecMapIntStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uint: - fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, d) - case *map[int]uint: - v2, changed2 := fastpathTV.DecMapIntUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uint8: - fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, d) - case *map[int]uint8: - v2, changed2 := fastpathTV.DecMapIntUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uint16: - fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, d) - case *map[int]uint16: - v2, changed2 := fastpathTV.DecMapIntUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uint32: - fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, d) - case *map[int]uint32: - v2, changed2 := fastpathTV.DecMapIntUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uint64: - fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, d) - case *map[int]uint64: - v2, changed2 := fastpathTV.DecMapIntUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]uintptr: - fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[int]uintptr: - v2, changed2 := fastpathTV.DecMapIntUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]int: - fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, d) - case *map[int]int: - v2, changed2 := fastpathTV.DecMapIntIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]int8: - fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, d) - case *map[int]int8: - v2, changed2 := fastpathTV.DecMapIntInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]int16: - fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, d) - case *map[int]int16: - v2, changed2 := fastpathTV.DecMapIntInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]int32: - fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, d) - case *map[int]int32: - v2, changed2 := fastpathTV.DecMapIntInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]int64: - fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, d) - case *map[int]int64: - v2, changed2 := fastpathTV.DecMapIntInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]float32: - fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[int]float32: - v2, changed2 := fastpathTV.DecMapIntFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]float64: - fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[int]float64: - v2, changed2 := fastpathTV.DecMapIntFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int]bool: - fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, d) - case *map[int]bool: - v2, changed2 := fastpathTV.DecMapIntBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []int8: - fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, d) - case *[]int8: - v2, changed2 := fastpathTV.DecSliceInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]interface{}: - fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, d) - case *map[int8]interface{}: - v2, changed2 := fastpathTV.DecMapInt8IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]string: - fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, d) - case *map[int8]string: - v2, changed2 := fastpathTV.DecMapInt8StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uint: - fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, d) - case *map[int8]uint: - v2, changed2 := fastpathTV.DecMapInt8UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uint8: - fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[int8]uint8: - v2, changed2 := fastpathTV.DecMapInt8Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uint16: - fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[int8]uint16: - v2, changed2 := fastpathTV.DecMapInt8Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uint32: - fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[int8]uint32: - v2, changed2 := fastpathTV.DecMapInt8Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uint64: - fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[int8]uint64: - v2, changed2 := fastpathTV.DecMapInt8Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]uintptr: - fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[int8]uintptr: - v2, changed2 := fastpathTV.DecMapInt8UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]int: - fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, d) - case *map[int8]int: - v2, changed2 := fastpathTV.DecMapInt8IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]int8: - fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, d) - case *map[int8]int8: - v2, changed2 := fastpathTV.DecMapInt8Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]int16: - fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, d) - case *map[int8]int16: - v2, changed2 := fastpathTV.DecMapInt8Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]int32: - fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, d) - case *map[int8]int32: - v2, changed2 := fastpathTV.DecMapInt8Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]int64: - fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, d) - case *map[int8]int64: - v2, changed2 := fastpathTV.DecMapInt8Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]float32: - fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, d) - case *map[int8]float32: - v2, changed2 := fastpathTV.DecMapInt8Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]float64: - fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, d) - case *map[int8]float64: - v2, changed2 := fastpathTV.DecMapInt8Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int8]bool: - fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, d) - case *map[int8]bool: - v2, changed2 := fastpathTV.DecMapInt8BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []int16: - fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, d) - case *[]int16: - v2, changed2 := fastpathTV.DecSliceInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]interface{}: - fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, d) - case *map[int16]interface{}: - v2, changed2 := fastpathTV.DecMapInt16IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]string: - fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, d) - case *map[int16]string: - v2, changed2 := fastpathTV.DecMapInt16StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uint: - fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, d) - case *map[int16]uint: - v2, changed2 := fastpathTV.DecMapInt16UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uint8: - fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[int16]uint8: - v2, changed2 := fastpathTV.DecMapInt16Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uint16: - fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[int16]uint16: - v2, changed2 := fastpathTV.DecMapInt16Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uint32: - fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[int16]uint32: - v2, changed2 := fastpathTV.DecMapInt16Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uint64: - fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[int16]uint64: - v2, changed2 := fastpathTV.DecMapInt16Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]uintptr: - fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[int16]uintptr: - v2, changed2 := fastpathTV.DecMapInt16UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]int: - fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, d) - case *map[int16]int: - v2, changed2 := fastpathTV.DecMapInt16IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]int8: - fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, d) - case *map[int16]int8: - v2, changed2 := fastpathTV.DecMapInt16Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]int16: - fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, d) - case *map[int16]int16: - v2, changed2 := fastpathTV.DecMapInt16Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]int32: - fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, d) - case *map[int16]int32: - v2, changed2 := fastpathTV.DecMapInt16Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]int64: - fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, d) - case *map[int16]int64: - v2, changed2 := fastpathTV.DecMapInt16Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]float32: - fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, d) - case *map[int16]float32: - v2, changed2 := fastpathTV.DecMapInt16Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]float64: - fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, d) - case *map[int16]float64: - v2, changed2 := fastpathTV.DecMapInt16Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int16]bool: - fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, d) - case *map[int16]bool: - v2, changed2 := fastpathTV.DecMapInt16BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []int32: - fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, d) - case *[]int32: - v2, changed2 := fastpathTV.DecSliceInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]interface{}: - fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, d) - case *map[int32]interface{}: - v2, changed2 := fastpathTV.DecMapInt32IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]string: - fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, d) - case *map[int32]string: - v2, changed2 := fastpathTV.DecMapInt32StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uint: - fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, d) - case *map[int32]uint: - v2, changed2 := fastpathTV.DecMapInt32UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uint8: - fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[int32]uint8: - v2, changed2 := fastpathTV.DecMapInt32Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uint16: - fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[int32]uint16: - v2, changed2 := fastpathTV.DecMapInt32Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uint32: - fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[int32]uint32: - v2, changed2 := fastpathTV.DecMapInt32Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uint64: - fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[int32]uint64: - v2, changed2 := fastpathTV.DecMapInt32Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]uintptr: - fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[int32]uintptr: - v2, changed2 := fastpathTV.DecMapInt32UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]int: - fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, d) - case *map[int32]int: - v2, changed2 := fastpathTV.DecMapInt32IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]int8: - fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, d) - case *map[int32]int8: - v2, changed2 := fastpathTV.DecMapInt32Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]int16: - fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, d) - case *map[int32]int16: - v2, changed2 := fastpathTV.DecMapInt32Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]int32: - fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, d) - case *map[int32]int32: - v2, changed2 := fastpathTV.DecMapInt32Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]int64: - fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, d) - case *map[int32]int64: - v2, changed2 := fastpathTV.DecMapInt32Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]float32: - fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, d) - case *map[int32]float32: - v2, changed2 := fastpathTV.DecMapInt32Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]float64: - fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, d) - case *map[int32]float64: - v2, changed2 := fastpathTV.DecMapInt32Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int32]bool: - fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, d) - case *map[int32]bool: - v2, changed2 := fastpathTV.DecMapInt32BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []int64: - fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, d) - case *[]int64: - v2, changed2 := fastpathTV.DecSliceInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]interface{}: - fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, d) - case *map[int64]interface{}: - v2, changed2 := fastpathTV.DecMapInt64IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]string: - fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, d) - case *map[int64]string: - v2, changed2 := fastpathTV.DecMapInt64StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uint: - fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, d) - case *map[int64]uint: - v2, changed2 := fastpathTV.DecMapInt64UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uint8: - fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, d) - case *map[int64]uint8: - v2, changed2 := fastpathTV.DecMapInt64Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uint16: - fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, d) - case *map[int64]uint16: - v2, changed2 := fastpathTV.DecMapInt64Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uint32: - fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, d) - case *map[int64]uint32: - v2, changed2 := fastpathTV.DecMapInt64Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uint64: - fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, d) - case *map[int64]uint64: - v2, changed2 := fastpathTV.DecMapInt64Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]uintptr: - fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, d) - case *map[int64]uintptr: - v2, changed2 := fastpathTV.DecMapInt64UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]int: - fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, d) - case *map[int64]int: - v2, changed2 := fastpathTV.DecMapInt64IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]int8: - fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, d) - case *map[int64]int8: - v2, changed2 := fastpathTV.DecMapInt64Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]int16: - fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, d) - case *map[int64]int16: - v2, changed2 := fastpathTV.DecMapInt64Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]int32: - fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, d) - case *map[int64]int32: - v2, changed2 := fastpathTV.DecMapInt64Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]int64: - fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, d) - case *map[int64]int64: - v2, changed2 := fastpathTV.DecMapInt64Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]float32: - fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, d) - case *map[int64]float32: - v2, changed2 := fastpathTV.DecMapInt64Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]float64: - fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, d) - case *map[int64]float64: - v2, changed2 := fastpathTV.DecMapInt64Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[int64]bool: - fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, d) - case *map[int64]bool: - v2, changed2 := fastpathTV.DecMapInt64BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case []bool: - fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, d) - case *[]bool: - v2, changed2 := fastpathTV.DecSliceBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]interface{}: - fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, d) - case *map[bool]interface{}: - v2, changed2 := fastpathTV.DecMapBoolIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]string: - fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, d) - case *map[bool]string: - v2, changed2 := fastpathTV.DecMapBoolStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uint: - fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, d) - case *map[bool]uint: - v2, changed2 := fastpathTV.DecMapBoolUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uint8: - fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, d) - case *map[bool]uint8: - v2, changed2 := fastpathTV.DecMapBoolUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uint16: - fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, d) - case *map[bool]uint16: - v2, changed2 := fastpathTV.DecMapBoolUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uint32: - fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, d) - case *map[bool]uint32: - v2, changed2 := fastpathTV.DecMapBoolUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uint64: - fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, d) - case *map[bool]uint64: - v2, changed2 := fastpathTV.DecMapBoolUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]uintptr: - fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, d) - case *map[bool]uintptr: - v2, changed2 := fastpathTV.DecMapBoolUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]int: - fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, d) - case *map[bool]int: - v2, changed2 := fastpathTV.DecMapBoolIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]int8: - fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, d) - case *map[bool]int8: - v2, changed2 := fastpathTV.DecMapBoolInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]int16: - fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, d) - case *map[bool]int16: - v2, changed2 := fastpathTV.DecMapBoolInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]int32: - fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, d) - case *map[bool]int32: - v2, changed2 := fastpathTV.DecMapBoolInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]int64: - fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, d) - case *map[bool]int64: - v2, changed2 := fastpathTV.DecMapBoolInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]float32: - fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, d) - case *map[bool]float32: - v2, changed2 := fastpathTV.DecMapBoolFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]float64: - fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, d) - case *map[bool]float64: - v2, changed2 := fastpathTV.DecMapBoolFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - case map[bool]bool: - fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, d) - case *map[bool]bool: - v2, changed2 := fastpathTV.DecMapBoolBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions - -func (f *decFnInfo) fastpathDecSliceIntfR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]interface{}) - v, changed := fastpathTV.DecSliceIntfV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]interface{}) - fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceIntfX(vp *[]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecSliceIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, d *Decoder) (_ []interface{}, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []interface{}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]interface{}, xlen) - } - } else { - v = make([]interface{}, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - d.decode(&v[j]) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, nil) - slh.ElemContainerState(j) - d.decode(&v[j]) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []interface{}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]interface{}, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, nil) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - d.decode(&v[j]) - - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceStringR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]string) - v, changed := fastpathTV.DecSliceStringV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]string) - fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceStringX(vp *[]string, checkNil bool, d *Decoder) { - v, changed := f.DecSliceStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d *Decoder) (_ []string, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []string{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]string, xlen) - } - } else { - v = make([]string, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeString() - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, "") - slh.ElemContainerState(j) - v[j] = dd.DecodeString() - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []string{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]string, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, "") - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeString() - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceFloat32R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]float32) - v, changed := fastpathTV.DecSliceFloat32V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]float32) - fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceFloat32X(vp *[]float32, checkNil bool, d *Decoder) { - v, changed := f.DecSliceFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, d *Decoder) (_ []float32, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []float32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]float32, xlen) - } - } else { - v = make([]float32, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = float32(dd.DecodeFloat(true)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = float32(dd.DecodeFloat(true)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []float32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]float32, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = float32(dd.DecodeFloat(true)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceFloat64R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]float64) - v, changed := fastpathTV.DecSliceFloat64V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]float64) - fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceFloat64X(vp *[]float64, checkNil bool, d *Decoder) { - v, changed := f.DecSliceFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, d *Decoder) (_ []float64, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []float64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]float64, xlen) - } - } else { - v = make([]float64, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeFloat(false) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = dd.DecodeFloat(false) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []float64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]float64, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeFloat(false) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceUintR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint) - v, changed := fastpathTV.DecSliceUintV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]uint) - fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceUintX(vp *[]uint, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Decoder) (_ []uint, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint, xlen) - } - } else { - v = make([]uint, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uint(dd.DecodeUint(uintBitsize)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uint(dd.DecodeUint(uintBitsize)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]uint, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uint(dd.DecodeUint(uintBitsize)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceUint16R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint16) - v, changed := fastpathTV.DecSliceUint16V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]uint16) - fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceUint16X(vp *[]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d *Decoder) (_ []uint16, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint16, xlen) - } - } else { - v = make([]uint16, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uint16(dd.DecodeUint(16)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uint16(dd.DecodeUint(16)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]uint16, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uint16(dd.DecodeUint(16)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceUint32R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint32) - v, changed := fastpathTV.DecSliceUint32V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]uint32) - fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceUint32X(vp *[]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d *Decoder) (_ []uint32, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint32, xlen) - } - } else { - v = make([]uint32, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uint32(dd.DecodeUint(32)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uint32(dd.DecodeUint(32)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]uint32, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uint32(dd.DecodeUint(32)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceUint64R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint64) - v, changed := fastpathTV.DecSliceUint64V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]uint64) - fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceUint64X(vp *[]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d *Decoder) (_ []uint64, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uint64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint64, xlen) - } - } else { - v = make([]uint64, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeUint(64) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = dd.DecodeUint(64) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]uint64, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeUint(64) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceUintptrR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uintptr) - v, changed := fastpathTV.DecSliceUintptrV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]uintptr) - fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, d *Decoder) (_ []uintptr, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []uintptr{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uintptr, xlen) - } - } else { - v = make([]uintptr, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uintptr(dd.DecodeUint(uintBitsize)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uintptr(dd.DecodeUint(uintBitsize)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uintptr{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]uintptr, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uintptr(dd.DecodeUint(uintBitsize)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceIntR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int) - v, changed := fastpathTV.DecSliceIntV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]int) - fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceIntX(vp *[]int, checkNil bool, d *Decoder) { - v, changed := f.DecSliceIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decoder) (_ []int, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int, xlen) - } - } else { - v = make([]int, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int(dd.DecodeInt(intBitsize)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int(dd.DecodeInt(intBitsize)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]int, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int(dd.DecodeInt(intBitsize)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceInt8R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int8) - v, changed := fastpathTV.DecSliceInt8V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]int8) - fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceInt8X(vp *[]int8, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Decoder) (_ []int8, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int8{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int8, xlen) - } - } else { - v = make([]int8, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int8(dd.DecodeInt(8)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int8(dd.DecodeInt(8)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int8{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]int8, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int8(dd.DecodeInt(8)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceInt16R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int16) - v, changed := fastpathTV.DecSliceInt16V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]int16) - fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceInt16X(vp *[]int16, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *Decoder) (_ []int16, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int16, xlen) - } - } else { - v = make([]int16, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int16(dd.DecodeInt(16)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int16(dd.DecodeInt(16)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]int16, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int16(dd.DecodeInt(16)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceInt32R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int32) - v, changed := fastpathTV.DecSliceInt32V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]int32) - fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceInt32X(vp *[]int32, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *Decoder) (_ []int32, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int32, xlen) - } - } else { - v = make([]int32, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int32(dd.DecodeInt(32)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int32(dd.DecodeInt(32)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]int32, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int32(dd.DecodeInt(32)) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceInt64R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int64) - v, changed := fastpathTV.DecSliceInt64V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]int64) - fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceInt64X(vp *[]int64, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *Decoder) (_ []int64, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []int64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int64, xlen) - } - } else { - v = make([]int64, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeInt(64) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = dd.DecodeInt(64) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]int64, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeInt(64) - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecSliceBoolR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]bool) - v, changed := fastpathTV.DecSliceBoolV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]bool) - fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) DecSliceBoolX(vp *[]bool, checkNil bool, d *Decoder) { - v, changed := f.DecSliceBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Decoder) (_ []bool, changed bool) { - dd := d.d - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []bool{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]bool, xlen) - } - } else { - v = make([]bool, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeBool() - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, false) - slh.ElemContainerState(j) - v[j] = dd.DecodeBool() - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []bool{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]bool, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, false) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeBool() - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]interface{}) - v, changed := fastpathTV.DecMapIntfIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]interface{}) - fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[interface{}]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk interface{} - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]string) - v, changed := fastpathTV.DecMapIntfStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]string) - fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[interface{}]string, xlen) - changed = true - } - - var mk interface{} - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint) - v, changed := fastpathTV.DecMapIntfUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uint) - fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]uint, xlen) - changed = true - } - - var mk interface{} - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint8) - v, changed := fastpathTV.DecMapIntfUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uint8) - fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[interface{}]uint8, xlen) - changed = true - } - - var mk interface{} - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint16) - v, changed := fastpathTV.DecMapIntfUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uint16) - fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[interface{}]uint16, xlen) - changed = true - } - - var mk interface{} - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint32) - v, changed := fastpathTV.DecMapIntfUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uint32) - fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[interface{}]uint32, xlen) - changed = true - } - - var mk interface{} - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint64) - v, changed := fastpathTV.DecMapIntfUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uint64) - fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]uint64, xlen) - changed = true - } - - var mk interface{} - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uintptr) - v, changed := fastpathTV.DecMapIntfUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]uintptr) - fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]uintptr, xlen) - changed = true - } - - var mk interface{} - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int) - v, changed := fastpathTV.DecMapIntfIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]int) - fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]int, xlen) - changed = true - } - - var mk interface{} - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int8) - v, changed := fastpathTV.DecMapIntfInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]int8) - fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[interface{}]int8, xlen) - changed = true - } - - var mk interface{} - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int16) - v, changed := fastpathTV.DecMapIntfInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]int16) - fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[interface{}]int16, xlen) - changed = true - } - - var mk interface{} - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int32) - v, changed := fastpathTV.DecMapIntfInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]int32) - fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[interface{}]int32, xlen) - changed = true - } - - var mk interface{} - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int64) - v, changed := fastpathTV.DecMapIntfInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]int64) - fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]int64, xlen) - changed = true - } - - var mk interface{} - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]float32) - v, changed := fastpathTV.DecMapIntfFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]float32) - fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[interface{}]float32, xlen) - changed = true - } - - var mk interface{} - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]float64) - v, changed := fastpathTV.DecMapIntfFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]float64) - fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[interface{}]float64, xlen) - changed = true - } - - var mk interface{} - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntfBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]bool) - v, changed := fastpathTV.DecMapIntfBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[interface{}]bool) - fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[interface{}]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[interface{}]bool, xlen) - changed = true - } - - var mk interface{} - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]interface{}) - v, changed := fastpathTV.DecMapStringIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]interface{}) - fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[string]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[string]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk string - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]string) - v, changed := fastpathTV.DecMapStringStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]string) - fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringStringX(vp *map[string]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringStringV(v map[string]string, checkNil bool, canChange bool, - d *Decoder) (_ map[string]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) - v = make(map[string]string, xlen) - changed = true - } - - var mk string - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint) - v, changed := fastpathTV.DecMapStringUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uint) - fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUintX(vp *map[string]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUintV(v map[string]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]uint, xlen) - changed = true - } - - var mk string - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint8) - v, changed := fastpathTV.DecMapStringUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uint8) - fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[string]uint8, xlen) - changed = true - } - - var mk string - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint16) - v, changed := fastpathTV.DecMapStringUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uint16) - fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[string]uint16, xlen) - changed = true - } - - var mk string - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint32) - v, changed := fastpathTV.DecMapStringUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uint32) - fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[string]uint32, xlen) - changed = true - } - - var mk string - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint64) - v, changed := fastpathTV.DecMapStringUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uint64) - fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]uint64, xlen) - changed = true - } - - var mk string - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uintptr) - v, changed := fastpathTV.DecMapStringUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]uintptr) - fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[string]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]uintptr, xlen) - changed = true - } - - var mk string - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int) - v, changed := fastpathTV.DecMapStringIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]int) - fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringIntX(vp *map[string]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringIntV(v map[string]int, checkNil bool, canChange bool, - d *Decoder) (_ map[string]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]int, xlen) - changed = true - } - - var mk string - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int8) - v, changed := fastpathTV.DecMapStringInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]int8) - fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt8V(v map[string]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[string]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[string]int8, xlen) - changed = true - } - - var mk string - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int16) - v, changed := fastpathTV.DecMapStringInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]int16) - fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt16V(v map[string]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[string]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[string]int16, xlen) - changed = true - } - - var mk string - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int32) - v, changed := fastpathTV.DecMapStringInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]int32) - fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt32V(v map[string]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[string]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[string]int32, xlen) - changed = true - } - - var mk string - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int64) - v, changed := fastpathTV.DecMapStringInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]int64) - fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringInt64V(v map[string]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[string]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]int64, xlen) - changed = true - } - - var mk string - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]float32) - v, changed := fastpathTV.DecMapStringFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]float32) - fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[string]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[string]float32, xlen) - changed = true - } - - var mk string - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]float64) - v, changed := fastpathTV.DecMapStringFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]float64) - fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[string]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[string]float64, xlen) - changed = true - } - - var mk string - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapStringBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]bool) - v, changed := fastpathTV.DecMapStringBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[string]bool) - fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapStringBoolV(v map[string]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[string]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[string]bool, xlen) - changed = true - } - - var mk string - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]interface{}) - v, changed := fastpathTV.DecMapFloat32IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]interface{}) - fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[float32]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk float32 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]string) - v, changed := fastpathTV.DecMapFloat32StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]string) - fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[float32]string, xlen) - changed = true - } - - var mk float32 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint) - v, changed := fastpathTV.DecMapFloat32UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uint) - fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]uint, xlen) - changed = true - } - - var mk float32 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint8) - v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uint8) - fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[float32]uint8, xlen) - changed = true - } - - var mk float32 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint16) - v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uint16) - fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[float32]uint16, xlen) - changed = true - } - - var mk float32 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint32) - v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uint32) - fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[float32]uint32, xlen) - changed = true - } - - var mk float32 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint64) - v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uint64) - fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]uint64, xlen) - changed = true - } - - var mk float32 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uintptr) - v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]uintptr) - fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]uintptr, xlen) - changed = true - } - - var mk float32 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int) - v, changed := fastpathTV.DecMapFloat32IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]int) - fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]int, xlen) - changed = true - } - - var mk float32 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int8) - v, changed := fastpathTV.DecMapFloat32Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]int8) - fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[float32]int8, xlen) - changed = true - } - - var mk float32 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int16) - v, changed := fastpathTV.DecMapFloat32Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]int16) - fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[float32]int16, xlen) - changed = true - } - - var mk float32 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int32) - v, changed := fastpathTV.DecMapFloat32Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]int32) - fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[float32]int32, xlen) - changed = true - } - - var mk float32 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int64) - v, changed := fastpathTV.DecMapFloat32Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]int64) - fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]int64, xlen) - changed = true - } - - var mk float32 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]float32) - v, changed := fastpathTV.DecMapFloat32Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]float32) - fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[float32]float32, xlen) - changed = true - } - - var mk float32 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]float64) - v, changed := fastpathTV.DecMapFloat32Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]float64) - fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float32]float64, xlen) - changed = true - } - - var mk float32 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat32BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]bool) - v, changed := fastpathTV.DecMapFloat32BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float32]bool) - fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[float32]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[float32]bool, xlen) - changed = true - } - - var mk float32 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]interface{}) - v, changed := fastpathTV.DecMapFloat64IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]interface{}) - fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[float64]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk float64 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]string) - v, changed := fastpathTV.DecMapFloat64StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]string) - fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[float64]string, xlen) - changed = true - } - - var mk float64 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint) - v, changed := fastpathTV.DecMapFloat64UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uint) - fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]uint, xlen) - changed = true - } - - var mk float64 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint8) - v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uint8) - fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[float64]uint8, xlen) - changed = true - } - - var mk float64 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint16) - v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uint16) - fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[float64]uint16, xlen) - changed = true - } - - var mk float64 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint32) - v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uint32) - fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float64]uint32, xlen) - changed = true - } - - var mk float64 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint64) - v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uint64) - fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]uint64, xlen) - changed = true - } - - var mk float64 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uintptr) - v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]uintptr) - fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]uintptr, xlen) - changed = true - } - - var mk float64 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int) - v, changed := fastpathTV.DecMapFloat64IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]int) - fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]int, xlen) - changed = true - } - - var mk float64 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int8) - v, changed := fastpathTV.DecMapFloat64Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]int8) - fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[float64]int8, xlen) - changed = true - } - - var mk float64 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int16) - v, changed := fastpathTV.DecMapFloat64Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]int16) - fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[float64]int16, xlen) - changed = true - } - - var mk float64 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int32) - v, changed := fastpathTV.DecMapFloat64Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]int32) - fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float64]int32, xlen) - changed = true - } - - var mk float64 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int64) - v, changed := fastpathTV.DecMapFloat64Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]int64) - fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]int64, xlen) - changed = true - } - - var mk float64 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]float32) - v, changed := fastpathTV.DecMapFloat64Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]float32) - fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[float64]float32, xlen) - changed = true - } - - var mk float64 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]float64) - v, changed := fastpathTV.DecMapFloat64Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]float64) - fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[float64]float64, xlen) - changed = true - } - - var mk float64 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapFloat64BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]bool) - v, changed := fastpathTV.DecMapFloat64BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[float64]bool) - fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[float64]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[float64]bool, xlen) - changed = true - } - - var mk float64 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]interface{}) - v, changed := fastpathTV.DecMapUintIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]interface{}) - fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]string) - v, changed := fastpathTV.DecMapUintStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]string) - fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintStringX(vp *map[uint]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintStringV(v map[uint]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint]string, xlen) - changed = true - } - - var mk uint - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint) - v, changed := fastpathTV.DecMapUintUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uint) - fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUintV(v map[uint]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]uint, xlen) - changed = true - } - - var mk uint - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint8) - v, changed := fastpathTV.DecMapUintUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uint8) - fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint]uint8, xlen) - changed = true - } - - var mk uint - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint16) - v, changed := fastpathTV.DecMapUintUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uint16) - fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint]uint16, xlen) - changed = true - } - - var mk uint - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint32) - v, changed := fastpathTV.DecMapUintUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uint32) - fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint]uint32, xlen) - changed = true - } - - var mk uint - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint64) - v, changed := fastpathTV.DecMapUintUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uint64) - fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]uint64, xlen) - changed = true - } - - var mk uint - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uintptr) - v, changed := fastpathTV.DecMapUintUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]uintptr) - fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]uintptr, xlen) - changed = true - } - - var mk uint - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int) - v, changed := fastpathTV.DecMapUintIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]int) - fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintIntX(vp *map[uint]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintIntV(v map[uint]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]int, xlen) - changed = true - } - - var mk uint - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int8) - v, changed := fastpathTV.DecMapUintInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]int8) - fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint]int8, xlen) - changed = true - } - - var mk uint - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int16) - v, changed := fastpathTV.DecMapUintInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]int16) - fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint]int16, xlen) - changed = true - } - - var mk uint - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int32) - v, changed := fastpathTV.DecMapUintInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]int32) - fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint]int32, xlen) - changed = true - } - - var mk uint - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int64) - v, changed := fastpathTV.DecMapUintInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]int64) - fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]int64, xlen) - changed = true - } - - var mk uint - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]float32) - v, changed := fastpathTV.DecMapUintFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]float32) - fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint]float32, xlen) - changed = true - } - - var mk uint - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]float64) - v, changed := fastpathTV.DecMapUintFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]float64) - fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint]float64, xlen) - changed = true - } - - var mk uint - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]bool) - v, changed := fastpathTV.DecMapUintBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint]bool) - fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uint]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint]bool, xlen) - changed = true - } - - var mk uint - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]interface{}) - v, changed := fastpathTV.DecMapUint8IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]interface{}) - fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[uint8]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint8 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]string) - v, changed := fastpathTV.DecMapUint8StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]string) - fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[uint8]string, xlen) - changed = true - } - - var mk uint8 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint) - v, changed := fastpathTV.DecMapUint8UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uint) - fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]uint, xlen) - changed = true - } - - var mk uint8 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint8) - v, changed := fastpathTV.DecMapUint8Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uint8) - fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[uint8]uint8, xlen) - changed = true - } - - var mk uint8 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint16) - v, changed := fastpathTV.DecMapUint8Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uint16) - fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint8]uint16, xlen) - changed = true - } - - var mk uint8 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint32) - v, changed := fastpathTV.DecMapUint8Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uint32) - fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint8]uint32, xlen) - changed = true - } - - var mk uint8 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint64) - v, changed := fastpathTV.DecMapUint8Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uint64) - fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]uint64, xlen) - changed = true - } - - var mk uint8 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uintptr) - v, changed := fastpathTV.DecMapUint8UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]uintptr) - fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]uintptr, xlen) - changed = true - } - - var mk uint8 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int) - v, changed := fastpathTV.DecMapUint8IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]int) - fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]int, xlen) - changed = true - } - - var mk uint8 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int8) - v, changed := fastpathTV.DecMapUint8Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]int8) - fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[uint8]int8, xlen) - changed = true - } - - var mk uint8 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int16) - v, changed := fastpathTV.DecMapUint8Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]int16) - fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint8]int16, xlen) - changed = true - } - - var mk uint8 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int32) - v, changed := fastpathTV.DecMapUint8Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]int32) - fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint8]int32, xlen) - changed = true - } - - var mk uint8 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int64) - v, changed := fastpathTV.DecMapUint8Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]int64) - fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]int64, xlen) - changed = true - } - - var mk uint8 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]float32) - v, changed := fastpathTV.DecMapUint8Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]float32) - fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint8]float32, xlen) - changed = true - } - - var mk uint8 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]float64) - v, changed := fastpathTV.DecMapUint8Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]float64) - fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint8]float64, xlen) - changed = true - } - - var mk uint8 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint8BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]bool) - v, changed := fastpathTV.DecMapUint8BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint8]bool) - fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uint8]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[uint8]bool, xlen) - changed = true - } - - var mk uint8 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]interface{}) - v, changed := fastpathTV.DecMapUint16IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]interface{}) - fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[uint16]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint16 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]string) - v, changed := fastpathTV.DecMapUint16StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]string) - fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[uint16]string, xlen) - changed = true - } - - var mk uint16 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint) - v, changed := fastpathTV.DecMapUint16UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uint) - fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]uint, xlen) - changed = true - } - - var mk uint16 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint8) - v, changed := fastpathTV.DecMapUint16Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uint8) - fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint16]uint8, xlen) - changed = true - } - - var mk uint16 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint16) - v, changed := fastpathTV.DecMapUint16Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uint16) - fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[uint16]uint16, xlen) - changed = true - } - - var mk uint16 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint32) - v, changed := fastpathTV.DecMapUint16Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uint32) - fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint16]uint32, xlen) - changed = true - } - - var mk uint16 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint64) - v, changed := fastpathTV.DecMapUint16Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uint64) - fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]uint64, xlen) - changed = true - } - - var mk uint16 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uintptr) - v, changed := fastpathTV.DecMapUint16UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]uintptr) - fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]uintptr, xlen) - changed = true - } - - var mk uint16 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int) - v, changed := fastpathTV.DecMapUint16IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]int) - fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]int, xlen) - changed = true - } - - var mk uint16 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int8) - v, changed := fastpathTV.DecMapUint16Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]int8) - fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint16]int8, xlen) - changed = true - } - - var mk uint16 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int16) - v, changed := fastpathTV.DecMapUint16Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]int16) - fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[uint16]int16, xlen) - changed = true - } - - var mk uint16 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int32) - v, changed := fastpathTV.DecMapUint16Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]int32) - fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint16]int32, xlen) - changed = true - } - - var mk uint16 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int64) - v, changed := fastpathTV.DecMapUint16Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]int64) - fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]int64, xlen) - changed = true - } - - var mk uint16 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]float32) - v, changed := fastpathTV.DecMapUint16Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]float32) - fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint16]float32, xlen) - changed = true - } - - var mk uint16 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]float64) - v, changed := fastpathTV.DecMapUint16Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]float64) - fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint16]float64, xlen) - changed = true - } - - var mk uint16 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint16BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]bool) - v, changed := fastpathTV.DecMapUint16BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint16]bool) - fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uint16]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[uint16]bool, xlen) - changed = true - } - - var mk uint16 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]interface{}) - v, changed := fastpathTV.DecMapUint32IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]interface{}) - fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[uint32]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint32 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]string) - v, changed := fastpathTV.DecMapUint32StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]string) - fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[uint32]string, xlen) - changed = true - } - - var mk uint32 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint) - v, changed := fastpathTV.DecMapUint32UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uint) - fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]uint, xlen) - changed = true - } - - var mk uint32 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint8) - v, changed := fastpathTV.DecMapUint32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uint8) - fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint32]uint8, xlen) - changed = true - } - - var mk uint32 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint16) - v, changed := fastpathTV.DecMapUint32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uint16) - fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint32]uint16, xlen) - changed = true - } - - var mk uint32 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint32) - v, changed := fastpathTV.DecMapUint32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uint32) - fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[uint32]uint32, xlen) - changed = true - } - - var mk uint32 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint64) - v, changed := fastpathTV.DecMapUint32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uint64) - fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]uint64, xlen) - changed = true - } - - var mk uint32 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uintptr) - v, changed := fastpathTV.DecMapUint32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]uintptr) - fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]uintptr, xlen) - changed = true - } - - var mk uint32 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int) - v, changed := fastpathTV.DecMapUint32IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]int) - fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]int, xlen) - changed = true - } - - var mk uint32 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int8) - v, changed := fastpathTV.DecMapUint32Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]int8) - fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint32]int8, xlen) - changed = true - } - - var mk uint32 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int16) - v, changed := fastpathTV.DecMapUint32Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]int16) - fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[uint32]int16, xlen) - changed = true - } - - var mk uint32 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int32) - v, changed := fastpathTV.DecMapUint32Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]int32) - fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[uint32]int32, xlen) - changed = true - } - - var mk uint32 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int64) - v, changed := fastpathTV.DecMapUint32Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]int64) - fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]int64, xlen) - changed = true - } - - var mk uint32 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]float32) - v, changed := fastpathTV.DecMapUint32Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]float32) - fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[uint32]float32, xlen) - changed = true - } - - var mk uint32 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]float64) - v, changed := fastpathTV.DecMapUint32Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]float64) - fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint32]float64, xlen) - changed = true - } - - var mk uint32 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint32BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]bool) - v, changed := fastpathTV.DecMapUint32BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint32]bool) - fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uint32]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[uint32]bool, xlen) - changed = true - } - - var mk uint32 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]interface{}) - v, changed := fastpathTV.DecMapUint64IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]interface{}) - fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint64]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uint64 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]string) - v, changed := fastpathTV.DecMapUint64StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]string) - fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uint64]string, xlen) - changed = true - } - - var mk uint64 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint) - v, changed := fastpathTV.DecMapUint64UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uint) - fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]uint, xlen) - changed = true - } - - var mk uint64 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint8) - v, changed := fastpathTV.DecMapUint64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uint8) - fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint64]uint8, xlen) - changed = true - } - - var mk uint64 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint16) - v, changed := fastpathTV.DecMapUint64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uint16) - fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint64]uint16, xlen) - changed = true - } - - var mk uint64 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint32) - v, changed := fastpathTV.DecMapUint64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uint32) - fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint64]uint32, xlen) - changed = true - } - - var mk uint64 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint64) - v, changed := fastpathTV.DecMapUint64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uint64) - fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]uint64, xlen) - changed = true - } - - var mk uint64 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uintptr) - v, changed := fastpathTV.DecMapUint64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]uintptr) - fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]uintptr, xlen) - changed = true - } - - var mk uint64 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int) - v, changed := fastpathTV.DecMapUint64IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]int) - fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]int, xlen) - changed = true - } - - var mk uint64 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int8) - v, changed := fastpathTV.DecMapUint64Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]int8) - fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint64]int8, xlen) - changed = true - } - - var mk uint64 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int16) - v, changed := fastpathTV.DecMapUint64Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]int16) - fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uint64]int16, xlen) - changed = true - } - - var mk uint64 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int32) - v, changed := fastpathTV.DecMapUint64Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]int32) - fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint64]int32, xlen) - changed = true - } - - var mk uint64 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int64) - v, changed := fastpathTV.DecMapUint64Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]int64) - fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]int64, xlen) - changed = true - } - - var mk uint64 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]float32) - v, changed := fastpathTV.DecMapUint64Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]float32) - fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uint64]float32, xlen) - changed = true - } - - var mk uint64 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]float64) - v, changed := fastpathTV.DecMapUint64Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]float64) - fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uint64]float64, xlen) - changed = true - } - - var mk uint64 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUint64BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]bool) - v, changed := fastpathTV.DecMapUint64BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uint64]bool) - fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uint64]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uint64]bool, xlen) - changed = true - } - - var mk uint64 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]interface{}) - v, changed := fastpathTV.DecMapUintptrIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]interface{}) - fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uintptr]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk uintptr - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]string) - v, changed := fastpathTV.DecMapUintptrStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]string) - fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[uintptr]string, xlen) - changed = true - } - - var mk uintptr - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint) - v, changed := fastpathTV.DecMapUintptrUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uint) - fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]uint, xlen) - changed = true - } - - var mk uintptr - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint8) - v, changed := fastpathTV.DecMapUintptrUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uint8) - fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uintptr]uint8, xlen) - changed = true - } - - var mk uintptr - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint16) - v, changed := fastpathTV.DecMapUintptrUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uint16) - fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uintptr]uint16, xlen) - changed = true - } - - var mk uintptr - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint32) - v, changed := fastpathTV.DecMapUintptrUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uint32) - fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uintptr]uint32, xlen) - changed = true - } - - var mk uintptr - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint64) - v, changed := fastpathTV.DecMapUintptrUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uint64) - fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]uint64, xlen) - changed = true - } - - var mk uintptr - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uintptr) - v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]uintptr) - fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]uintptr, xlen) - changed = true - } - - var mk uintptr - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int) - v, changed := fastpathTV.DecMapUintptrIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]int) - fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]int, xlen) - changed = true - } - - var mk uintptr - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int8) - v, changed := fastpathTV.DecMapUintptrInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]int8) - fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uintptr]int8, xlen) - changed = true - } - - var mk uintptr - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int16) - v, changed := fastpathTV.DecMapUintptrInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]int16) - fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[uintptr]int16, xlen) - changed = true - } - - var mk uintptr - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int32) - v, changed := fastpathTV.DecMapUintptrInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]int32) - fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uintptr]int32, xlen) - changed = true - } - - var mk uintptr - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int64) - v, changed := fastpathTV.DecMapUintptrInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]int64) - fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]int64, xlen) - changed = true - } - - var mk uintptr - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]float32) - v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]float32) - fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[uintptr]float32, xlen) - changed = true - } - - var mk uintptr - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]float64) - v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]float64) - fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[uintptr]float64, xlen) - changed = true - } - - var mk uintptr - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapUintptrBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]bool) - v, changed := fastpathTV.DecMapUintptrBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[uintptr]bool) - fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[uintptr]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[uintptr]bool, xlen) - changed = true - } - - var mk uintptr - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]interface{}) - v, changed := fastpathTV.DecMapIntIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]interface{}) - fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[int]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]string) - v, changed := fastpathTV.DecMapIntStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]string) - fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntStringX(vp *map[int]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntStringV(v map[int]string, checkNil bool, canChange bool, - d *Decoder) (_ map[int]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int]string, xlen) - changed = true - } - - var mk int - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint) - v, changed := fastpathTV.DecMapIntUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uint) - fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUintX(vp *map[int]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUintV(v map[int]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]uint, xlen) - changed = true - } - - var mk int - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint8) - v, changed := fastpathTV.DecMapIntUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uint8) - fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int]uint8, xlen) - changed = true - } - - var mk int - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint16) - v, changed := fastpathTV.DecMapIntUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uint16) - fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int]uint16, xlen) - changed = true - } - - var mk int - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint32) - v, changed := fastpathTV.DecMapIntUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uint32) - fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int]uint32, xlen) - changed = true - } - - var mk int - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint64) - v, changed := fastpathTV.DecMapIntUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uint64) - fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]uint64, xlen) - changed = true - } - - var mk int - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uintptr) - v, changed := fastpathTV.DecMapIntUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]uintptr) - fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[int]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]uintptr, xlen) - changed = true - } - - var mk int - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int) - v, changed := fastpathTV.DecMapIntIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]int) - fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntIntX(vp *map[int]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntIntV(v map[int]int, checkNil bool, canChange bool, - d *Decoder) (_ map[int]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]int, xlen) - changed = true - } - - var mk int - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int8) - v, changed := fastpathTV.DecMapIntInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]int8) - fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt8V(v map[int]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[int]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int]int8, xlen) - changed = true - } - - var mk int - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int16) - v, changed := fastpathTV.DecMapIntInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]int16) - fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt16V(v map[int]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[int]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int]int16, xlen) - changed = true - } - - var mk int - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int32) - v, changed := fastpathTV.DecMapIntInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]int32) - fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt32V(v map[int]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[int]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int]int32, xlen) - changed = true - } - - var mk int - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int64) - v, changed := fastpathTV.DecMapIntInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]int64) - fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntInt64V(v map[int]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[int]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]int64, xlen) - changed = true - } - - var mk int - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]float32) - v, changed := fastpathTV.DecMapIntFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]float32) - fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[int]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int]float32, xlen) - changed = true - } - - var mk int - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]float64) - v, changed := fastpathTV.DecMapIntFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]float64) - fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[int]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int]float64, xlen) - changed = true - } - - var mk int - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapIntBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]bool) - v, changed := fastpathTV.DecMapIntBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int]bool) - fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapIntBoolV(v map[int]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[int]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int]bool, xlen) - changed = true - } - - var mk int - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]interface{}) - v, changed := fastpathTV.DecMapInt8IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]interface{}) - fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[int8]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int8 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]string) - v, changed := fastpathTV.DecMapInt8StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]string) - fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8StringV(v map[int8]string, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[int8]string, xlen) - changed = true - } - - var mk int8 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint) - v, changed := fastpathTV.DecMapInt8UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uint) - fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]uint, xlen) - changed = true - } - - var mk int8 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint8) - v, changed := fastpathTV.DecMapInt8Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uint8) - fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[int8]uint8, xlen) - changed = true - } - - var mk int8 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint16) - v, changed := fastpathTV.DecMapInt8Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uint16) - fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int8]uint16, xlen) - changed = true - } - - var mk int8 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint32) - v, changed := fastpathTV.DecMapInt8Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uint32) - fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int8]uint32, xlen) - changed = true - } - - var mk int8 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint64) - v, changed := fastpathTV.DecMapInt8Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uint64) - fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]uint64, xlen) - changed = true - } - - var mk int8 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uintptr) - v, changed := fastpathTV.DecMapInt8UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]uintptr) - fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]uintptr, xlen) - changed = true - } - - var mk int8 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int) - v, changed := fastpathTV.DecMapInt8IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]int) - fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8IntV(v map[int8]int, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]int, xlen) - changed = true - } - - var mk int8 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int8) - v, changed := fastpathTV.DecMapInt8Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]int8) - fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[int8]int8, xlen) - changed = true - } - - var mk int8 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int16) - v, changed := fastpathTV.DecMapInt8Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]int16) - fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int8]int16, xlen) - changed = true - } - - var mk int8 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int32) - v, changed := fastpathTV.DecMapInt8Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]int32) - fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int8]int32, xlen) - changed = true - } - - var mk int8 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int64) - v, changed := fastpathTV.DecMapInt8Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]int64) - fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]int64, xlen) - changed = true - } - - var mk int8 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]float32) - v, changed := fastpathTV.DecMapInt8Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]float32) - fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int8]float32, xlen) - changed = true - } - - var mk int8 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]float64) - v, changed := fastpathTV.DecMapInt8Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]float64) - fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int8]float64, xlen) - changed = true - } - - var mk int8 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt8BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]bool) - v, changed := fastpathTV.DecMapInt8BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int8]bool) - fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[int8]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[int8]bool, xlen) - changed = true - } - - var mk int8 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]interface{}) - v, changed := fastpathTV.DecMapInt16IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]interface{}) - fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[int16]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int16 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]string) - v, changed := fastpathTV.DecMapInt16StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]string) - fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16StringV(v map[int16]string, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) - v = make(map[int16]string, xlen) - changed = true - } - - var mk int16 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint) - v, changed := fastpathTV.DecMapInt16UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uint) - fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]uint, xlen) - changed = true - } - - var mk int16 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint8) - v, changed := fastpathTV.DecMapInt16Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uint8) - fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int16]uint8, xlen) - changed = true - } - - var mk int16 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint16) - v, changed := fastpathTV.DecMapInt16Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uint16) - fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[int16]uint16, xlen) - changed = true - } - - var mk int16 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint32) - v, changed := fastpathTV.DecMapInt16Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uint32) - fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int16]uint32, xlen) - changed = true - } - - var mk int16 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint64) - v, changed := fastpathTV.DecMapInt16Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uint64) - fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]uint64, xlen) - changed = true - } - - var mk int16 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uintptr) - v, changed := fastpathTV.DecMapInt16UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]uintptr) - fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]uintptr, xlen) - changed = true - } - - var mk int16 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int) - v, changed := fastpathTV.DecMapInt16IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]int) - fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16IntV(v map[int16]int, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]int, xlen) - changed = true - } - - var mk int16 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int8) - v, changed := fastpathTV.DecMapInt16Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]int8) - fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int16]int8, xlen) - changed = true - } - - var mk int16 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int16) - v, changed := fastpathTV.DecMapInt16Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]int16) - fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) - v = make(map[int16]int16, xlen) - changed = true - } - - var mk int16 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int32) - v, changed := fastpathTV.DecMapInt16Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]int32) - fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int16]int32, xlen) - changed = true - } - - var mk int16 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int64) - v, changed := fastpathTV.DecMapInt16Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]int64) - fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]int64, xlen) - changed = true - } - - var mk int16 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]float32) - v, changed := fastpathTV.DecMapInt16Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]float32) - fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int16]float32, xlen) - changed = true - } - - var mk int16 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]float64) - v, changed := fastpathTV.DecMapInt16Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]float64) - fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int16]float64, xlen) - changed = true - } - - var mk int16 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt16BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]bool) - v, changed := fastpathTV.DecMapInt16BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int16]bool) - fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[int16]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[int16]bool, xlen) - changed = true - } - - var mk int16 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]interface{}) - v, changed := fastpathTV.DecMapInt32IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]interface{}) - fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[int32]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int32 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]string) - v, changed := fastpathTV.DecMapInt32StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]string) - fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32StringV(v map[int32]string, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) - v = make(map[int32]string, xlen) - changed = true - } - - var mk int32 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint) - v, changed := fastpathTV.DecMapInt32UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uint) - fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]uint, xlen) - changed = true - } - - var mk int32 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint8) - v, changed := fastpathTV.DecMapInt32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uint8) - fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int32]uint8, xlen) - changed = true - } - - var mk int32 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint16) - v, changed := fastpathTV.DecMapInt32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uint16) - fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int32]uint16, xlen) - changed = true - } - - var mk int32 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint32) - v, changed := fastpathTV.DecMapInt32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uint32) - fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[int32]uint32, xlen) - changed = true - } - - var mk int32 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint64) - v, changed := fastpathTV.DecMapInt32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uint64) - fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]uint64, xlen) - changed = true - } - - var mk int32 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uintptr) - v, changed := fastpathTV.DecMapInt32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]uintptr) - fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]uintptr, xlen) - changed = true - } - - var mk int32 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int) - v, changed := fastpathTV.DecMapInt32IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]int) - fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32IntV(v map[int32]int, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]int, xlen) - changed = true - } - - var mk int32 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int8) - v, changed := fastpathTV.DecMapInt32Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]int8) - fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int32]int8, xlen) - changed = true - } - - var mk int32 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int16) - v, changed := fastpathTV.DecMapInt32Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]int16) - fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) - v = make(map[int32]int16, xlen) - changed = true - } - - var mk int32 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int32) - v, changed := fastpathTV.DecMapInt32Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]int32) - fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[int32]int32, xlen) - changed = true - } - - var mk int32 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int64) - v, changed := fastpathTV.DecMapInt32Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]int64) - fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]int64, xlen) - changed = true - } - - var mk int32 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]float32) - v, changed := fastpathTV.DecMapInt32Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]float32) - fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) - v = make(map[int32]float32, xlen) - changed = true - } - - var mk int32 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]float64) - v, changed := fastpathTV.DecMapInt32Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]float64) - fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int32]float64, xlen) - changed = true - } - - var mk int32 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt32BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]bool) - v, changed := fastpathTV.DecMapInt32BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int32]bool) - fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[int32]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[int32]bool, xlen) - changed = true - } - - var mk int32 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]interface{}) - v, changed := fastpathTV.DecMapInt64IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]interface{}) - fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64IntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int64]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk int64 - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]string) - v, changed := fastpathTV.DecMapInt64StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]string) - fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64StringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64StringV(v map[int64]string, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) - v = make(map[int64]string, xlen) - changed = true - } - - var mk int64 - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint) - v, changed := fastpathTV.DecMapInt64UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uint) - fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64UintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]uint, xlen) - changed = true - } - - var mk int64 - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint8) - v, changed := fastpathTV.DecMapInt64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uint8) - fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int64]uint8, xlen) - changed = true - } - - var mk int64 - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint16) - v, changed := fastpathTV.DecMapInt64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uint16) - fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int64]uint16, xlen) - changed = true - } - - var mk int64 - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint32) - v, changed := fastpathTV.DecMapInt64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uint32) - fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int64]uint32, xlen) - changed = true - } - - var mk int64 - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint64) - v, changed := fastpathTV.DecMapInt64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uint64) - fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]uint64, xlen) - changed = true - } - - var mk int64 - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uintptr) - v, changed := fastpathTV.DecMapInt64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]uintptr) - fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64UintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]uintptr, xlen) - changed = true - } - - var mk int64 - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int) - v, changed := fastpathTV.DecMapInt64IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]int) - fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64IntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64IntV(v map[int64]int, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]int, xlen) - changed = true - } - - var mk int64 - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int8) - v, changed := fastpathTV.DecMapInt64Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]int8) - fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int64]int8, xlen) - changed = true - } - - var mk int64 - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int16) - v, changed := fastpathTV.DecMapInt64Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]int16) - fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) - v = make(map[int64]int16, xlen) - changed = true - } - - var mk int64 - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int32) - v, changed := fastpathTV.DecMapInt64Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]int32) - fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int64]int32, xlen) - changed = true - } - - var mk int64 - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int64) - v, changed := fastpathTV.DecMapInt64Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]int64) - fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]int64, xlen) - changed = true - } - - var mk int64 - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]float32) - v, changed := fastpathTV.DecMapInt64Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]float32) - fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Float32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) - v = make(map[int64]float32, xlen) - changed = true - } - - var mk int64 - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]float64) - v, changed := fastpathTV.DecMapInt64Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]float64) - fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Float64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) - v = make(map[int64]float64, xlen) - changed = true - } - - var mk int64 - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapInt64BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]bool) - v, changed := fastpathTV.DecMapInt64BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[int64]bool) - fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64BoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[int64]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[int64]bool, xlen) - changed = true - } - - var mk int64 - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]interface{}) - v, changed := fastpathTV.DecMapBoolIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]interface{}) - fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolIntfV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[bool]interface{}, xlen) - changed = true - } - mapGet := !d.h.MapValueReset && !d.h.InterfaceReset - var mk bool - var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]string) - v, changed := fastpathTV.DecMapBoolStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]string) - fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolStringV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolStringV(v map[bool]string, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) - v = make(map[bool]string, xlen) - changed = true - } - - var mk bool - var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint) - v, changed := fastpathTV.DecMapBoolUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uint) - fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUintV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]uint, xlen) - changed = true - } - - var mk bool - var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint8) - v, changed := fastpathTV.DecMapBoolUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uint8) - fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[bool]uint8, xlen) - changed = true - } - - var mk bool - var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint16) - v, changed := fastpathTV.DecMapBoolUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uint16) - fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[bool]uint16, xlen) - changed = true - } - - var mk bool - var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint32) - v, changed := fastpathTV.DecMapBoolUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uint32) - fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[bool]uint32, xlen) - changed = true - } - - var mk bool - var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint64) - v, changed := fastpathTV.DecMapBoolUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uint64) - fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]uint64, xlen) - changed = true - } - - var mk bool - var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uintptr) - v, changed := fastpathTV.DecMapBoolUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]uintptr) - fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUintptrV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]uintptr, xlen) - changed = true - } - - var mk bool - var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int) - v, changed := fastpathTV.DecMapBoolIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]int) - fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolIntV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolIntV(v map[bool]int, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]int, xlen) - changed = true - } - - var mk bool - var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int8) - v, changed := fastpathTV.DecMapBoolInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]int8) - fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt8V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[bool]int8, xlen) - changed = true - } - - var mk bool - var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int16) - v, changed := fastpathTV.DecMapBoolInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]int16) - fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt16V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) - v = make(map[bool]int16, xlen) - changed = true - } - - var mk bool - var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int32) - v, changed := fastpathTV.DecMapBoolInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]int32) - fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[bool]int32, xlen) - changed = true - } - - var mk bool - var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int64) - v, changed := fastpathTV.DecMapBoolInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]int64) - fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]int64, xlen) - changed = true - } - - var mk bool - var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]float32) - v, changed := fastpathTV.DecMapBoolFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]float32) - fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolFloat32V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) - v = make(map[bool]float32, xlen) - changed = true - } - - var mk bool - var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]float64) - v, changed := fastpathTV.DecMapBoolFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]float64) - fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolFloat64V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) - v = make(map[bool]float64, xlen) - changed = true - } - - var mk bool - var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} - -func (f *decFnInfo) fastpathDecMapBoolBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]bool) - v, changed := fastpathTV.DecMapBoolBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[bool]bool) - fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolBoolV(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, checkNil bool, canChange bool, - d *Decoder) (_ map[bool]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) - v = make(map[bool]bool, xlen) - changed = true - } - - var mk bool - var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } - return v, changed -} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl deleted file mode 100644 index c3ffdf93d960..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl +++ /dev/null @@ -1,527 +0,0 @@ -// +build !notfastpath - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl -// ************************************************************ - -package codec - -// Fast path functions try to create a fast path encode or decode implementation -// for common maps and slices. -// -// We define the functions and register then in this single file -// so as not to pollute the encode.go and decode.go, and create a dependency in there. -// This file can be omitted without causing a build failure. -// -// The advantage of fast paths is: -// - Many calls bypass reflection altogether -// -// Currently support -// - slice of all builtin types, -// - map of all builtin types to string or interface value -// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) -// This should provide adequate "typical" implementations. -// -// Note that fast track decode functions must handle values for which an address cannot be obtained. -// For example: -// m2 := map[string]int{} -// p2 := []interface{}{m2} -// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. -// - -import ( - "reflect" - "sort" -) - -const fastpathEnabled = true - -const fastpathCheckNilFalse = false // for reflect -const fastpathCheckNilTrue = true // for type switch - -type fastpathT struct {} - -var fastpathTV fastpathT - -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*encFnInfo, reflect.Value) - decfn func(*decFnInfo, reflect.Value) -} - -type fastpathA [{{ .FastpathLen }}]fastpathE - -func (x *fastpathA) index(rtid uintptr) int { - // use binary search to grab the index (adapted from sort/search.go) - h, i, j := 0, 0, {{ .FastpathLen }} // len(x) - for i < j { - h = i + (j-i)/2 - if x[h].rtid < rtid { - i = h + 1 - } else { - j = h - } - } - if i < {{ .FastpathLen }} && x[i].rtid == rtid { - return i - } - return -1 -} - -type fastpathAslice []fastpathE - -func (x fastpathAslice) Len() int { return len(x) } -func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } -func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -var fastpathAV fastpathA - -// due to possible initialization loop error, make fastpath in an init() -func init() { - i := 0 - fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { - xrt := reflect.TypeOf(v) - xptr := reflect.ValueOf(xrt).Pointer() - fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} - i++ - return - } - - {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - fn([]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} - - {{range .Values}}{{if not .Primitive}}{{if .MapKey }} - fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} - - sort.Sort(fastpathAslice(fastpathAV[:])) -} - -// -- encode - -// -- -- fast path type switch -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}:{{else}} - case map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e){{if not .MapKey }} - case *[]{{ .Elem }}:{{else}} - case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) -{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e) - case *[]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) -{{end}}{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} - case map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e) - case *map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) -{{end}}{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - -func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { cr.sendContainerState(containerArrayElem) } - {{ encmd .Elem "v2"}} - } - if cr != nil { cr.sendContainerState(containerArrayEnd) }{{/* ee.EncodeEnd() */}} -} - -func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - {{ encmd .Elem "v2"}} - } - if cr != nil { cr.sendContainerState(containerMapEnd) } -} - -{{end}}{{end}}{{end}} - -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} - -func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - {{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - {{end}}if e.h.Canonical { - {{if eq .MapKey "interface{}"}}{{/* out of band - */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}} - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { cr.sendContainerState(containerMapKey) } - e.asis(v2[j].v) - if cr != nil { cr.sendContainerState(containerMapValue) } - e.encode(v[v2[j].i]) - } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v)) - var i int - for k, _ := range v { - v2[i] = {{ $x }}(k) - i++ - } - sort.Sort({{ sorttype .MapKey false}}(v2)) - for _, k2 := range v2 { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{if eq .MapKey "string"}}if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - }{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }} - } {{end}} - } else { - for k2, v2 := range v { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{if eq .MapKey "string"}}if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - }{{else}}{{ encmd .MapKey "k2"}}{{end}} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ encmd .Elem "v2"}} - } - } - if cr != nil { cr.sendContainerState(containerMapEnd) }{{/* ee.EncodeEnd() */}} -} - -{{end}}{{end}}{{end}} - -// -- decode - -// -- -- fast path type switch -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}:{{else}} - case map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, d){{if not .MapKey }} - case *[]{{ .Elem }}:{{else}} - case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} - v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } -{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} -{{/* -Slices can change if they -- did not come from an array -- are addressable (from a ptr) -- are settable (e.g. contained in an interface{}) -*/}} -func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { {{/* // CanSet => CanAddr + Exported */}} - vp := rv.Addr().Interface().(*[]{{ .Elem }}) - v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]{{ .Elem }}) - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, checkNil bool, d *Decoder) { - v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) { - dd := d.d - {{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}} - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []{{ .Elem }}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { {{/* - // fast-path is for "basic" immutable types, so no need to copy them over - // s := make([]{{ .Elem }}, decInferLen(containerLenS, d.h.MaxInitLen)) - // copy(s, v[:cap(v)]) - // v = s */}} - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]{{ .Elem }}, xlen) - } - } else { - v = make([]{{ .Elem }}, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } {{/* // all checks done. cannot go past len. */}} - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } - if xtrunc { {{/* // means canChange=true, changed=true already. */}} - for ; j < containerLenS; j++ { - v = append(v, {{ zerocmd .Elem }}) - slh.ElemContainerState(j) - {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() {{/* check break first, so we can initialize v with a capacity of 4 if necessary */}} - if breakFound { - if canChange { - if v == nil { - v = []{{ .Elem }}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]{{ .Elem }}, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, {{ zerocmd .Elem }}) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { {{/* // all checks done. cannot go past len. */}} - {{ if eq .Elem "interface{}" }}d.decode(&v[j]) - {{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -{{end}}{{end}}{{end}} - - -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} -{{/* -Maps can change if they are -- addressable (from a ptr) -- settable (e.g. contained in an interface{}) -*/}} -func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[{{ .MapKey }}]{{ .Elem }}) - v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}) - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, d *Decoder) { - v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, canChange bool, - d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) { - dd := d.d - cr := d.cr - {{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}} - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}) - v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen) - changed = true - } - {{ if eq .Elem "interface{}" }}mapGet := !d.h.MapValueReset && !d.h.InterfaceReset{{end}} - var mk {{ .MapKey }} - var mv {{ .Elem }} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{ if eq .MapKey "interface{}" }}mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} - }{{ else }}mk = {{ decmd .MapKey }}{{ end }} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } - d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{ if eq .MapKey "interface{}" }}mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} - }{{ else }}mk = {{ decmd .MapKey }}{{ end }} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } - d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { cr.sendContainerState(containerMapEnd) } - return v, changed -} - -{{end}}{{end}}{{end}} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/fast-path.not.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/fast-path.not.go deleted file mode 100644 index 63e59114529b..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/fast-path.not.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build notfastpath - -package codec - -import "reflect" - -const fastpathEnabled = false - -// The generated fast-path code is very large, and adds a few seconds to the build time. -// This causes test execution, execution of small tools which use codec, etc -// to take a long time. -// -// To mitigate, we now support the notfastpath tag. -// This tag disables fastpath during build, allowing for faster build, test execution, -// short-program runs, etc. - -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } - -type fastpathT struct{} -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*encFnInfo, reflect.Value) - decfn func(*decFnInfo, reflect.Value) -} -type fastpathA [0]fastpathE - -func (x fastpathA) index(rtid uintptr) int { return -1 } - -var fastpathAV fastpathA -var fastpathTV fastpathT diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl deleted file mode 100644 index 32df54144ca0..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl +++ /dev/null @@ -1,104 +0,0 @@ -{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} -var {{var "c"}} bool {{/* // changed */}} -_ = {{var "c"}}{{end}} -if {{var "l"}} == 0 { - {{if isSlice }}if {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - } else if len({{var "v"}}) != 0 { - {{var "v"}} = {{var "v"}}[:0] - {{var "c"}} = true - } {{end}} {{if isChan }}if {{var "v"}} == nil { - {{var "v"}} = make({{ .CTyp }}, 0) - {{var "c"}} = true - } {{end}} -} else if {{var "l"}} > 0 { - {{if isChan }}if {{var "v"}} == nil { - {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - {{var "v"}} = make({{ .CTyp }}, {{var "rl"}}) - {{var "c"}} = true - } - for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ { - {{var "h"}}.ElemContainerState({{var "r"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - } - {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} - var {{var "rt"}} bool {{/* truncated */}} - _, _ = {{var "rl"}}, {{var "rt"}} - {{var "rr"}} = {{var "l"}} // len({{var "v"}}) - if {{var "l"}} > cap({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) - {{ else }}{{if not .Immutable }} - {{var "rg"}} := len({{var "v"}}) > 0 - {{var "v2"}} := {{var "v"}} {{end}} - {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - if {{var "rt"}} { - if {{var "rl"}} <= cap({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "rl"}}] - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - {{var "c"}} = true - {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }} - if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}} - } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "l"}}] - {{var "c"}} = true - } {{end}} {{/* end isSlice:47 */}} - {{var "j"}} := 0 - for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - z.DecSwallow() - } - {{ else }}if {{var "rt"}} { - for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "v"}} = append({{var "v"}}, {{ zero}}) - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - } {{end}} {{/* end isArray:56 */}} - {{end}} {{/* end isChan:16 */}} -} else { {{/* len < 0 */}} - {{var "j"}} := 0 - for ; !r.CheckBreak(); {{var "j"}}++ { - {{if isChan }} - {{var "h"}}.ElemContainerState({{var "j"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - {{ else }} - if {{var "j"}} >= len({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1) - {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }} - {{var "c"}} = true {{end}} - } - {{var "h"}}.ElemContainerState({{var "j"}}) - if {{var "j"}} < len({{var "v"}}) { - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } else { - z.DecSwallow() - } - {{end}} - } - {{if isSlice }}if {{var "j"}} < len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "j"}}] - {{var "c"}} = true - } else if {{var "j"}} == 0 && {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - }{{end}} -} -{{var "h"}}.End() -{{if not isArray }}if {{var "c"}} { - *{{ .Varname }} = {{var "v"}} -}{{end}} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl deleted file mode 100644 index 77400e0a1122..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl +++ /dev/null @@ -1,58 +0,0 @@ -{{var "v"}} := *{{ .Varname }} -{{var "l"}} := r.ReadMapStart() -{{var "bh"}} := z.DecBasicHandle() -if {{var "v"}} == nil { - {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) - {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) - *{{ .Varname }} = {{var "v"}} -} -var {{var "mk"}} {{ .KTyp }} -var {{var "mv"}} {{ .Typ }} -var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool -if {{var "bh"}}.MapValueReset { - {{if decElemKindPtr}}{{var "mg"}} = true - {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } - {{else if not decElemKindImmutable}}{{var "mg"}} = true - {{end}} } -if {{var "l"}} > 0 { -for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true{{end}} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} else if {{var "l"}} < 0 { -for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true {{ end }} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} // else len==0: TODO: Should we clear map entries? -z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen-helper.generated.go deleted file mode 100644 index eb0bdad35777..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen-helper.generated.go +++ /dev/null @@ -1,243 +0,0 @@ -/* // +build ignore */ - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl -// ************************************************************ - -package codec - -import ( - "encoding" - "reflect" -) - -// This file is used to generate helper code for codecgen. -// The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continuously and without notice. -// -// To help enforce this, we create an unexported type with exported members. -// The only way to get the type is via the one exported type that we control (somewhat). -// -// When static codecs are created for types, they will use this value -// to perform encoding or decoding of primitives or known slice or map types. - -// GenHelperEncoder is exported so that it can be used externally by codecgen. -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { - return genHelperEncoder{e: e}, e.e -} - -// GenHelperDecoder is exported so that it can be used externally by codecgen. -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { - return genHelperDecoder{d: d}, d.d -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperEncoder struct { - e *Encoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperDecoder struct { - d *Decoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBasicHandle() *BasicHandle { - return f.e.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinary() bool { - return f.e.be // f.e.hh.isBinaryEncoding() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncFallback(iv interface{}) { - // println(">>>>>>>>> EncFallback") - f.e.encodeI(iv, false, false) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { - bs, fnerr := iv.MarshalText() - f.e.marshal(bs, fnerr, false, c_UTF8) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { - bs, fnerr := iv.MarshalJSON() - f.e.marshal(bs, fnerr, true, c_UTF8) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { - bs, fnerr := iv.MarshalBinary() - f.e.marshal(bs, fnerr, false, c_RAW) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncRaw(iv Raw) { - f.e.raw(iv) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) TimeRtidIfBinc() uintptr { - if _, ok := f.e.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) IsJSONHandle() bool { - return f.e.js -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) HasExtensions() bool { - return len(f.e.h.extHandle) != 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v) - if rt.Kind() == reflect.Ptr { - rt = rt.Elem() - } - rtid := reflect.ValueOf(rt).Pointer() - if xfFn := f.e.h.getExt(rtid); xfFn != nil { - f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) - return true - } - return false -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncSendContainerState(c containerState) { - if f.e.cr != nil { - f.e.cr.sendContainerState(c) - } -} - -// ---------------- DECODER FOLLOWS ----------------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBasicHandle() *BasicHandle { - return f.d.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinary() bool { - return f.d.be // f.d.hh.isBinaryEncoding() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSwallow() { - f.d.swallow() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecScratchBuffer() []byte { - return f.d.b[:] -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { - // println(">>>>>>>>> DecFallback") - f.d.decodeI(iv, chkPtr, false, false, false) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { - return f.d.decSliceHelperStart() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { - f.d.structFieldNotFound(index, name) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { - f.d.arrayCannotExpand(sliceLen, streamLen) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { - fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) - if fnerr != nil { - panic(fnerr) - } -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { - // bs := f.dd.DecodeBytes(f.d.b[:], true, true) - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { - fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true)) - if fnerr != nil { - panic(fnerr) - } -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecRaw() []byte { - return f.d.raw() -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) TimeRtidIfBinc() uintptr { - if _, ok := f.d.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) IsJSONHandle() bool { - return f.d.js -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) HasExtensions() bool { - return len(f.d.h.extHandle) != 0 -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v).Elem() - rtid := reflect.ValueOf(rt).Pointer() - if xfFn := f.d.h.getExt(rtid); xfFn != nil { - f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) - return true - } - return false -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { - return decInferLen(clen, maxlen, unit) -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSendContainerState(c containerState) { - if f.d.cr != nil { - f.d.cr.sendContainerState(c) - } -} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl deleted file mode 100644 index ad99f6671bd6..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl +++ /dev/null @@ -1,372 +0,0 @@ -/* // +build ignore */ - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl -// ************************************************************ - -package codec - -import ( - "encoding" - "reflect" -) - -// This file is used to generate helper code for codecgen. -// The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continuously and without notice. -// -// To help enforce this, we create an unexported type with exported members. -// The only way to get the type is via the one exported type that we control (somewhat). -// -// When static codecs are created for types, they will use this value -// to perform encoding or decoding of primitives or known slice or map types. - -// GenHelperEncoder is exported so that it can be used externally by codecgen. -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { - return genHelperEncoder{e:e}, e.e -} - -// GenHelperDecoder is exported so that it can be used externally by codecgen. -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { - return genHelperDecoder{d:d}, d.d -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperEncoder struct { - e *Encoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperDecoder struct { - d *Decoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBasicHandle() *BasicHandle { - return f.e.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinary() bool { - return f.e.be // f.e.hh.isBinaryEncoding() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncFallback(iv interface{}) { - // println(">>>>>>>>> EncFallback") - f.e.encodeI(iv, false, false) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { - bs, fnerr := iv.MarshalText() - f.e.marshal(bs, fnerr, false, c_UTF8) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { - bs, fnerr := iv.MarshalJSON() - f.e.marshal(bs, fnerr, true, c_UTF8) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { - bs, fnerr := iv.MarshalBinary() - f.e.marshal(bs, fnerr, false, c_RAW) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncRaw(iv Raw) { - f.e.raw(iv) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) TimeRtidIfBinc() uintptr { - if _, ok := f.e.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) IsJSONHandle() bool { - return f.e.js -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) HasExtensions() bool { - return len(f.e.h.extHandle) != 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v) - if rt.Kind() == reflect.Ptr { - rt = rt.Elem() - } - rtid := reflect.ValueOf(rt).Pointer() - if xfFn := f.e.h.getExt(rtid); xfFn != nil { - f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) - return true - } - return false -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncSendContainerState(c containerState) { - if f.e.cr != nil { - f.e.cr.sendContainerState(c) - } -} - -// ---------------- DECODER FOLLOWS ----------------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBasicHandle() *BasicHandle { - return f.d.h -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinary() bool { - return f.d.be // f.d.hh.isBinaryEncoding() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSwallow() { - f.d.swallow() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecScratchBuffer() []byte { - return f.d.b[:] -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { - // println(">>>>>>>>> DecFallback") - f.d.decodeI(iv, chkPtr, false, false, false) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { - return f.d.decSliceHelperStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { - f.d.structFieldNotFound(index, name) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { - f.d.arrayCannotExpand(sliceLen, streamLen) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { - fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { - // bs := f.dd.DecodeBytes(f.d.b[:], true, true) - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { - fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true)) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecRaw() []byte { - return f.d.raw() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) TimeRtidIfBinc() uintptr { - if _, ok := f.d.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) IsJSONHandle() bool { - return f.d.js -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) HasExtensions() bool { - return len(f.d.h.extHandle) != 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v).Elem() - rtid := reflect.ValueOf(rt).Pointer() - if xfFn := f.d.h.getExt(rtid); xfFn != nil { - f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) - return true - } - return false -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { - return decInferLen(clen, maxlen, unit) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSendContainerState(c containerState) { - if f.d.cr != nil { - f.d.cr.sendContainerState(c) - } -} - -{{/* - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncDriver() encDriver { - return f.e.e -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecDriver() decDriver { - return f.d.d -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncNil() { - f.e.e.EncodeNil() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBytes(v []byte) { - f.e.e.EncodeStringBytes(c_RAW, v) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncArrayStart(length int) { - f.e.e.EncodeArrayStart(length) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncArrayEnd() { - f.e.e.EncodeArrayEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncArrayEntrySeparator() { - f.e.e.EncodeArrayEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapStart(length int) { - f.e.e.EncodeMapStart(length) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapEnd() { - f.e.e.EncodeMapEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapEntrySeparator() { - f.e.e.EncodeMapEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapKVSeparator() { - f.e.e.EncodeMapKVSeparator() -} - -// --------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBytes(v *[]byte) { - *v = f.d.d.DecodeBytes(*v) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTryNil() bool { - return f.d.d.TryDecodeAsNil() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerIsNil() (b bool) { - return f.d.d.IsContainerType(valueTypeNil) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerIsMap() (b bool) { - return f.d.d.IsContainerType(valueTypeMap) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerIsArray() (b bool) { - return f.d.d.IsContainerType(valueTypeArray) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecCheckBreak() bool { - return f.d.d.CheckBreak() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapStart() int { - return f.d.d.ReadMapStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayStart() int { - return f.d.d.ReadArrayStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapEnd() { - f.d.d.ReadMapEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayEnd() { - f.d.d.ReadArrayEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayEntrySeparator() { - f.d.d.ReadArrayEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapEntrySeparator() { - f.d.d.ReadMapEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapKVSeparator() { - f.d.d.ReadMapKVSeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) ReadStringAsBytes(bs []byte) []byte { - return f.d.d.DecodeStringAsBytes(bs) -} - - -// -- encode calls (primitives) -{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) {{ .MethodNamePfx "Enc" true }}(v {{ .Primitive }}) { - ee := f.e.e - {{ encmd .Primitive "v" }} -} -{{ end }}{{ end }}{{ end }} - -// -- decode calls (primitives) -{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) {{ .MethodNamePfx "Dec" true }}(vp *{{ .Primitive }}) { - dd := f.d.d - *vp = {{ decmd .Primitive }} -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) {{ .MethodNamePfx "Read" true }}() (v {{ .Primitive }}) { - dd := f.d.d - v = {{ decmd .Primitive }} - return -} -{{ end }}{{ end }}{{ end }} - - -// -- encode calls (slices/maps) -{{range .Values}}{{if not .Primitive }}{{if .Slice }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v []{{ .Elem }}) { {{ else }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v map[{{ .MapKey }}]{{ .Elem }}) { {{end}} - f.F.{{ .MethodNamePfx "Enc" false }}V(v, false, f.e) -} -{{ end }}{{ end }} - -// -- decode calls (slices/maps) -{{range .Values}}{{if not .Primitive }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -{{if .Slice }}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *[]{{ .Elem }}) { -{{else}}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *map[{{ .MapKey }}]{{ .Elem }}) { {{end}} - v, changed := f.F.{{ .MethodNamePfx "Dec" false }}V(*vp, false, true, f.d) - if changed { - *vp = v - } -} -{{ end }}{{ end }} -*/}} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen.generated.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen.generated.go deleted file mode 100644 index 2ace97b78cd7..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen.generated.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl - -const genDecMapTmpl = ` -{{var "v"}} := *{{ .Varname }} -{{var "l"}} := r.ReadMapStart() -{{var "bh"}} := z.DecBasicHandle() -if {{var "v"}} == nil { - {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) - {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) - *{{ .Varname }} = {{var "v"}} -} -var {{var "mk"}} {{ .KTyp }} -var {{var "mv"}} {{ .Typ }} -var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool -if {{var "bh"}}.MapValueReset { - {{if decElemKindPtr}}{{var "mg"}} = true - {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } - {{else if not decElemKindImmutable}}{{var "mg"}} = true - {{end}} } -if {{var "l"}} > 0 { -for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true{{end}} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} else if {{var "l"}} < 0 { -for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true {{ end }} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} // else len==0: TODO: Should we clear map entries? -z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) -` - -const genDecListTmpl = ` -{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} -var {{var "c"}} bool {{/* // changed */}} -_ = {{var "c"}}{{end}} -if {{var "l"}} == 0 { - {{if isSlice }}if {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - } else if len({{var "v"}}) != 0 { - {{var "v"}} = {{var "v"}}[:0] - {{var "c"}} = true - } {{end}} {{if isChan }}if {{var "v"}} == nil { - {{var "v"}} = make({{ .CTyp }}, 0) - {{var "c"}} = true - } {{end}} -} else if {{var "l"}} > 0 { - {{if isChan }}if {{var "v"}} == nil { - {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - {{var "v"}} = make({{ .CTyp }}, {{var "rl"}}) - {{var "c"}} = true - } - for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ { - {{var "h"}}.ElemContainerState({{var "r"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - } - {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} - var {{var "rt"}} bool {{/* truncated */}} - _, _ = {{var "rl"}}, {{var "rt"}} - {{var "rr"}} = {{var "l"}} // len({{var "v"}}) - if {{var "l"}} > cap({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) - {{ else }}{{if not .Immutable }} - {{var "rg"}} := len({{var "v"}}) > 0 - {{var "v2"}} := {{var "v"}} {{end}} - {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - if {{var "rt"}} { - if {{var "rl"}} <= cap({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "rl"}}] - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - {{var "c"}} = true - {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }} - if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}} - } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "l"}}] - {{var "c"}} = true - } {{end}} {{/* end isSlice:47 */}} - {{var "j"}} := 0 - for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - z.DecSwallow() - } - {{ else }}if {{var "rt"}} { - for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "v"}} = append({{var "v"}}, {{ zero}}) - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - } {{end}} {{/* end isArray:56 */}} - {{end}} {{/* end isChan:16 */}} -} else { {{/* len < 0 */}} - {{var "j"}} := 0 - for ; !r.CheckBreak(); {{var "j"}}++ { - {{if isChan }} - {{var "h"}}.ElemContainerState({{var "j"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - {{ else }} - if {{var "j"}} >= len({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1) - {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }} - {{var "c"}} = true {{end}} - } - {{var "h"}}.ElemContainerState({{var "j"}}) - if {{var "j"}} < len({{var "v"}}) { - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } else { - z.DecSwallow() - } - {{end}} - } - {{if isSlice }}if {{var "j"}} < len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "j"}}] - {{var "c"}} = true - } else if {{var "j"}} == 0 && {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - }{{end}} -} -{{var "h"}}.End() -{{if not isArray }}if {{var "c"}} { - *{{ .Varname }} = {{var "v"}} -}{{end}} -` - diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen.go deleted file mode 100644 index c4944dbff3e0..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen.go +++ /dev/null @@ -1,2014 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "bytes" - "encoding/base64" - "errors" - "fmt" - "go/format" - "io" - "io/ioutil" - "math/rand" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "sync" - "text/template" - "time" - "unicode" - "unicode/utf8" -) - -// --------------------------------------------------- -// codecgen supports the full cycle of reflection-based codec: -// - RawExt -// - Raw -// - Builtins -// - Extensions -// - (Binary|Text|JSON)(Unm|M)arshal -// - generic by-kind -// -// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. -// In those areas, we try to only do reflection or interface-conversion when NECESSARY: -// - Extensions, only if Extensions are configured. -// -// However, codecgen doesn't support the following: -// - Canonical option. (codecgen IGNORES it currently) -// This is just because it has not been implemented. -// -// During encode/decode, Selfer takes precedence. -// A type implementing Selfer will know how to encode/decode itself statically. -// -// The following field types are supported: -// array: [n]T -// slice: []T -// map: map[K]V -// primitive: [u]int[n], float(32|64), bool, string -// struct -// -// --------------------------------------------------- -// Note that a Selfer cannot call (e|d).(En|De)code on itself, -// as this will cause a circular reference, as (En|De)code will call Selfer methods. -// Any type that implements Selfer must implement completely and not fallback to (En|De)code. -// -// In addition, code in this file manages the generation of fast-path implementations of -// encode/decode of slices/maps of primitive keys/values. -// -// Users MUST re-generate their implementations whenever the code shape changes. -// The generated code will panic if it was generated with a version older than the supporting library. -// --------------------------------------------------- -// -// codec framework is very feature rich. -// When encoding or decoding into an interface, it depends on the runtime type of the interface. -// The type of the interface may be a named type, an extension, etc. -// Consequently, we fallback to runtime codec for encoding/decoding interfaces. -// In addition, we fallback for any value which cannot be guaranteed at runtime. -// This allows us support ANY value, including any named types, specifically those which -// do not implement our interfaces (e.g. Selfer). -// -// This explains some slowness compared to other code generation codecs (e.g. msgp). -// This reduction in speed is only seen when your refers to interfaces, -// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } -// -// codecgen will panic if the file was generated with an old version of the library in use. -// -// Note: -// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. -// This way, there isn't a function call overhead just to see that we should not enter a block of code. - -// GenVersion is the current version of codecgen. -// -// NOTE: Increment this value each time codecgen changes fundamentally. -// Fundamental changes are: -// - helper methods change (signature change, new ones added, some removed, etc) -// - codecgen command line changes -// -// v1: Initial Version -// v2: -// v3: Changes for Kubernetes: -// changes in signature of some unpublished helper methods and codecgen cmdline arguments. -// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) -// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. -const GenVersion = 5 - -const ( - genCodecPkg = "codec1978" - genTempVarPfx = "yy" - genTopLevelVarName = "x" - - // ignore canBeNil parameter, and always set to true. - // This is because nil can appear anywhere, so we should always check. - genAnythingCanBeNil = true - - // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function; - // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals - // are not executed a lot. - // - // From testing, it didn't make much difference in runtime, so keep as true (one function only) - genUseOneFunctionForDecStructMap = true -) - -type genStructMapStyle uint8 - -const ( - genStructMapStyleConsolidated genStructMapStyle = iota - genStructMapStyleLenPrefix - genStructMapStyleCheckBreak -) - -var ( - genAllTypesSamePkgErr = errors.New("All types must be in the same package") - genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice") - genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") - genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) - genCheckVendor bool -) - -// genRunner holds some state used during a Gen run. -type genRunner struct { - w io.Writer // output - c uint64 // counter used for generating varsfx - t []reflect.Type // list of types to run selfer on - - tc reflect.Type // currently running selfer on this type - te map[uintptr]bool // types for which the encoder has been created - td map[uintptr]bool // types for which the decoder has been created - cp string // codec import path - - im map[string]reflect.Type // imports to add - imn map[string]string // package names of imports to add - imc uint64 // counter for import numbers - - is map[reflect.Type]struct{} // types seen during import search - bp string // base PkgPath, for which we are generating for - - cpfx string // codec package prefix - unsafe bool // is unsafe to be used in generated code? - - tm map[reflect.Type]struct{} // types for which enc/dec must be generated - ts []reflect.Type // types for which enc/dec must be generated - - xs string // top level variable/constant suffix - hn string // fn helper type name - - ti *TypeInfos - // rr *rand.Rand // random generator for file-specific types -} - -// Gen will write a complete go file containing Selfer implementations for each -// type passed. All the types must be in the same package. -// -// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.* -func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) { - // All types passed to this method do not have a codec.Selfer method implemented directly. - // codecgen already checks the AST and skips any types that define the codec.Selfer methods. - // Consequently, there's no need to check and trim them if they implement codec.Selfer - - if len(typ) == 0 { - return - } - x := genRunner{ - unsafe: useUnsafe, - w: w, - t: typ, - te: make(map[uintptr]bool), - td: make(map[uintptr]bool), - im: make(map[string]reflect.Type), - imn: make(map[string]string), - is: make(map[reflect.Type]struct{}), - tm: make(map[reflect.Type]struct{}), - ts: []reflect.Type{}, - bp: genImportPath(typ[0]), - xs: uid, - ti: ti, - } - if x.ti == nil { - x.ti = defTypeInfos - } - if x.xs == "" { - rr := rand.New(rand.NewSource(time.Now().UnixNano())) - x.xs = strconv.FormatInt(rr.Int63n(9999), 10) - } - - // gather imports first: - x.cp = genImportPath(reflect.TypeOf(x)) - x.imn[x.cp] = genCodecPkg - for _, t := range typ { - // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) - if genImportPath(t) != x.bp { - panic(genAllTypesSamePkgErr) - } - x.genRefPkgs(t) - } - if buildTags != "" { - x.line("// +build " + buildTags) - x.line("") - } - x.line(` - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -`) - x.line("package " + pkgName) - x.line("") - x.line("import (") - if x.cp != x.bp { - x.cpfx = genCodecPkg + "." - x.linef("%s \"%s\"", genCodecPkg, x.cp) - } - // use a sorted set of im keys, so that we can get consistent output - imKeys := make([]string, 0, len(x.im)) - for k, _ := range x.im { - imKeys = append(imKeys, k) - } - sort.Strings(imKeys) - for _, k := range imKeys { // for k, _ := range x.im { - x.linef("%s \"%s\"", x.imn[k], k) - } - // add required packages - for _, k := range [...]string{"reflect", "unsafe", "runtime", "fmt", "errors"} { - if _, ok := x.im[k]; !ok { - if k == "unsafe" && !x.unsafe { - continue - } - x.line("\"" + k + "\"") - } - } - x.line(")") - x.line("") - - x.line("const (") - x.linef("// ----- content types ----") - x.linef("codecSelferC_UTF8%s = %v", x.xs, int64(c_UTF8)) - x.linef("codecSelferC_RAW%s = %v", x.xs, int64(c_RAW)) - x.linef("// ----- value types used ----") - x.linef("codecSelferValueTypeArray%s = %v", x.xs, int64(valueTypeArray)) - x.linef("codecSelferValueTypeMap%s = %v", x.xs, int64(valueTypeMap)) - x.linef("// ----- containerStateValues ----") - x.linef("codecSelfer_containerMapKey%s = %v", x.xs, int64(containerMapKey)) - x.linef("codecSelfer_containerMapValue%s = %v", x.xs, int64(containerMapValue)) - x.linef("codecSelfer_containerMapEnd%s = %v", x.xs, int64(containerMapEnd)) - x.linef("codecSelfer_containerArrayElem%s = %v", x.xs, int64(containerArrayElem)) - x.linef("codecSelfer_containerArrayEnd%s = %v", x.xs, int64(containerArrayEnd)) - x.line(")") - x.line("var (") - x.line("codecSelferBitsize" + x.xs + " = uint8(reflect.TypeOf(uint(0)).Bits())") - x.line("codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)") - x.line(")") - x.line("") - - if x.unsafe { - x.line("type codecSelferUnsafeString" + x.xs + " struct { Data uintptr; Len int}") - x.line("") - } - x.hn = "codecSelfer" + x.xs - x.line("type " + x.hn + " struct{}") - x.line("") - - x.varsfxreset() - x.line("func init() {") - x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion) - x.line("_, file, _, _ := runtime.Caller(0)") - x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) - x.linef(`%v, %sGenVersion, file)`, GenVersion, x.cpfx) - x.line("panic(err)") - x.linef("}") - x.line("if false { // reference the types, but skip this branch at build/run time") - var n int - // for k, t := range x.im { - for _, k := range imKeys { - t := x.im[k] - x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) - n++ - } - if x.unsafe { - x.linef("var v%v unsafe.Pointer", n) - n++ - } - if n > 0 { - x.out("_") - for i := 1; i < n; i++ { - x.out(", _") - } - x.out(" = v0") - for i := 1; i < n; i++ { - x.outf(", v%v", i) - } - } - x.line("} ") // close if false - x.line("}") // close init - x.line("") - - // generate rest of type info - for _, t := range typ { - x.tc = t - x.selfer(true) - x.selfer(false) - } - - for _, t := range x.ts { - rtid := reflect.ValueOf(t).Pointer() - // generate enc functions for all these slice/map types. - x.varsfxreset() - x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) - x.genRequiredMethodVars(true) - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - x.encListFallback("v", t) - case reflect.Map: - x.encMapFallback("v", t) - default: - panic(genExpectArrayOrMapErr) - } - x.line("}") - x.line("") - - // generate dec functions for all these slice/map types. - x.varsfxreset() - x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) - x.genRequiredMethodVars(false) - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - x.decListFallback("v", rtid, t) - case reflect.Map: - x.decMapFallback("v", rtid, t) - default: - panic(genExpectArrayOrMapErr) - } - x.line("}") - x.line("") - } - - x.line("") -} - -func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { - // return varname != genTopLevelVarName && t != x.tc - // the only time we checkForSelfer is if we are not at the TOP of the generated code. - return varname != genTopLevelVarName -} - -func (x *genRunner) arr2str(t reflect.Type, s string) string { - if t.Kind() == reflect.Array { - return s - } - return "" -} - -func (x *genRunner) genRequiredMethodVars(encode bool) { - x.line("var h " + x.hn) - if encode { - x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)") - } else { - x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)") - } - x.line("_, _, _ = h, z, r") -} - -func (x *genRunner) genRefPkgs(t reflect.Type) { - if _, ok := x.is[t]; ok { - return - } - // fmt.Printf(">>>>>>: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) - x.is[t] = struct{}{} - tpkg, tname := genImportPath(t), t.Name() - if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { - if _, ok := x.im[tpkg]; !ok { - x.im[tpkg] = t - if idx := strings.LastIndex(tpkg, "/"); idx < 0 { - x.imn[tpkg] = tpkg - } else { - x.imc++ - x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) - } - } - } - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: - x.genRefPkgs(t.Elem()) - case reflect.Map: - x.genRefPkgs(t.Elem()) - x.genRefPkgs(t.Key()) - case reflect.Struct: - for i := 0; i < t.NumField(); i++ { - if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { - x.genRefPkgs(t.Field(i).Type) - } - } - } -} - -func (x *genRunner) line(s string) { - x.out(s) - if len(s) == 0 || s[len(s)-1] != '\n' { - x.out("\n") - } -} - -func (x *genRunner) varsfx() string { - x.c++ - return strconv.FormatUint(x.c, 10) -} - -func (x *genRunner) varsfxreset() { - x.c = 0 -} - -func (x *genRunner) out(s string) { - if _, err := io.WriteString(x.w, s); err != nil { - panic(err) - } -} - -func (x *genRunner) linef(s string, params ...interface{}) { - x.line(fmt.Sprintf(s, params...)) -} - -func (x *genRunner) outf(s string, params ...interface{}) { - x.out(fmt.Sprintf(s, params...)) -} - -func (x *genRunner) genTypeName(t reflect.Type) (n string) { - // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }() - - // if the type has a PkgPath, which doesn't match the current package, - // then include it. - // We cannot depend on t.String() because it includes current package, - // or t.PkgPath because it includes full import path, - // - var ptrPfx string - for t.Kind() == reflect.Ptr { - ptrPfx += "*" - t = t.Elem() - } - if tn := t.Name(); tn != "" { - return ptrPfx + x.genTypeNamePrim(t) - } - switch t.Kind() { - case reflect.Map: - return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) - case reflect.Slice: - return ptrPfx + "[]" + x.genTypeName(t.Elem()) - case reflect.Array: - return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) - case reflect.Chan: - return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) - default: - if t == intfTyp { - return ptrPfx + "interface{}" - } else { - return ptrPfx + x.genTypeNamePrim(t) - } - } -} - -func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { - if t.Name() == "" { - return t.String() - } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { - return t.Name() - } else { - return x.imn[genImportPath(t)] + "." + t.Name() - // return t.String() // best way to get the package name inclusive - } -} - -func (x *genRunner) genZeroValueR(t reflect.Type) string { - // if t is a named type, w - switch t.Kind() { - case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, - reflect.Slice, reflect.Map, reflect.Invalid: - return "nil" - case reflect.Bool: - return "false" - case reflect.String: - return `""` - case reflect.Struct, reflect.Array: - return x.genTypeName(t) + "{}" - default: // all numbers - return "0" - } -} - -func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { - return genMethodNameT(t, x.tc) -} - -func (x *genRunner) selfer(encode bool) { - t := x.tc - t0 := t - // always make decode use a pointer receiver, - // and structs always use a ptr receiver (encode|decode) - isptr := !encode || t.Kind() == reflect.Struct - x.varsfxreset() - fnSigPfx := "func (x " - if isptr { - fnSigPfx += "*" - } - fnSigPfx += x.genTypeName(t) - - x.out(fnSigPfx) - if isptr { - t = reflect.PtrTo(t) - } - if encode { - x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") - x.genRequiredMethodVars(true) - // x.enc(genTopLevelVarName, t) - x.encVar(genTopLevelVarName, t) - } else { - x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - // do not use decVar, as there is no need to check TryDecodeAsNil - // or way to elegantly handle that, and also setting it to a - // non-nil value doesn't affect the pointer passed. - // x.decVar(genTopLevelVarName, t, false) - x.dec(genTopLevelVarName, t0) - } - x.line("}") - x.line("") - - if encode || t0.Kind() != reflect.Struct { - return - } - - // write is containerMap - if genUseOneFunctionForDecStructMap { - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleConsolidated) - x.line("}") - x.line("") - } else { - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleLenPrefix) - x.line("}") - x.line("") - - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleCheckBreak) - x.line("}") - x.line("") - } - - // write containerArray - x.out(fnSigPfx) - x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") - x.genRequiredMethodVars(false) - x.decStructArray(genTopLevelVarName, "l", "return", reflect.ValueOf(t0).Pointer(), t0) - x.line("}") - x.line("") - -} - -// used for chan, array, slice, map -func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) { - if encode { - x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), varname) - } else { - x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname) - } - x.registerXtraT(t) -} - -func (x *genRunner) registerXtraT(t reflect.Type) { - // recursively register the types - if _, ok := x.tm[t]; ok { - return - } - var tkey reflect.Type - switch t.Kind() { - case reflect.Chan, reflect.Slice, reflect.Array: - case reflect.Map: - tkey = t.Key() - default: - return - } - x.tm[t] = struct{}{} - x.ts = append(x.ts, t) - // check if this refers to any xtra types eg. a slice of array: add the array - x.registerXtraT(t.Elem()) - if tkey != nil { - x.registerXtraT(tkey) - } -} - -// encVar will encode a variable. -// The parameter, t, is the reflect.Type of the variable itself -func (x *genRunner) encVar(varname string, t reflect.Type) { - // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t) - var checkNil bool - switch t.Kind() { - case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: - checkNil = true - } - if checkNil { - x.linef("if %s == nil { r.EncodeNil() } else { ", varname) - } - switch t.Kind() { - case reflect.Ptr: - switch t.Elem().Kind() { - case reflect.Struct, reflect.Array: - x.enc(varname, genNonPtr(t)) - default: - i := x.varsfx() - x.line(genTempVarPfx + i + " := *" + varname) - x.enc(genTempVarPfx+i, genNonPtr(t)) - } - case reflect.Struct, reflect.Array: - i := x.varsfx() - x.line(genTempVarPfx + i + " := &" + varname) - x.enc(genTempVarPfx+i, t) - default: - x.enc(varname, t) - } - - if checkNil { - x.line("}") - } - -} - -// enc will encode a variable (varname) of type t, -// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying) -func (x *genRunner) enc(varname string, t reflect.Type) { - rtid := reflect.ValueOf(t).Pointer() - // We call CodecEncodeSelf if one of the following are honored: - // - the type already implements Selfer, call that - // - the type has a Selfer implementation just created, use that - // - the type is in the list of the ones we will generate for, but it is not currently being generated - - mi := x.varsfx() - tptr := reflect.PtrTo(t) - tk := t.Kind() - if x.checkForSelfer(t, varname) { - if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T - if tptr.Implements(selferTyp) || t.Implements(selferTyp) { - x.line(varname + ".CodecEncodeSelf(e)") - return - } - } else { // varname is of type T - if t.Implements(selferTyp) { - x.line(varname + ".CodecEncodeSelf(e)") - return - } else if tptr.Implements(selferTyp) { - x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname) - x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) - return - } - } - - if _, ok := x.te[rtid]; ok { - x.line(varname + ".CodecEncodeSelf(e)") - return - } - } - - inlist := false - for _, t0 := range x.t { - if t == t0 { - inlist = true - if x.checkForSelfer(t, varname) { - x.line(varname + ".CodecEncodeSelf(e)") - return - } - break - } - } - - var rtidAdded bool - if t == x.tc { - x.te[rtid] = true - rtidAdded = true - } - - // check if - // - type is RawExt, Raw - // - the type implements (Text|JSON|Binary)(Unm|M)arshal - x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi) - x.linef("_ = %sm%s", genTempVarPfx, mi) - x.line("if false {") //start if block - defer func() { x.line("}") }() //end if block - - if t == rawTyp { - x.linef("} else { z.EncRaw(%v)", varname) - return - } - if t == rawExtTyp { - x.linef("} else { r.EncodeRawExt(%v, e)", varname) - return - } - // HACK: Support for Builtins. - // Currently, only Binc supports builtins, and the only builtin type is time.Time. - // Have a method that returns the rtid for time.Time if Handle is Binc. - if t == timeTyp { - vrtid := genTempVarPfx + "m" + x.varsfx() - x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) - x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname) - } - // only check for extensions if the type is named, and has a packagePath. - if genImportPath(t) != "" && t.Name() != "" { - // first check if extensions are configued, before doing the interface conversion - x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname) - } - if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T - if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { - x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) - } - if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) - } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { - x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) - } - } else { // varname is of type T - if t.Implements(binaryMarshalerTyp) { - x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) - } else if tptr.Implements(binaryMarshalerTyp) { - x.linef("} else if %sm%s { z.EncBinaryMarshal(&%v) ", genTempVarPfx, mi, varname) - } - if t.Implements(jsonMarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) - } else if tptr.Implements(jsonMarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", genTempVarPfx, mi, varname) - } else if t.Implements(textMarshalerTyp) { - x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) - } else if tptr.Implements(textMarshalerTyp) { - x.linef("} else if !%sm%s { z.EncTextMarshal(&%v) ", genTempVarPfx, mi, varname) - } - } - x.line("} else {") - - switch t.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x.line("r.EncodeInt(int64(" + varname + "))") - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x.line("r.EncodeUint(uint64(" + varname + "))") - case reflect.Float32: - x.line("r.EncodeFloat32(float32(" + varname + "))") - case reflect.Float64: - x.line("r.EncodeFloat64(float64(" + varname + "))") - case reflect.Bool: - x.line("r.EncodeBool(bool(" + varname + "))") - case reflect.String: - x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(" + varname + "))") - case reflect.Chan: - x.xtraSM(varname, true, t) - // x.encListFallback(varname, rtid, t) - case reflect.Array: - x.xtraSM(varname, true, t) - case reflect.Slice: - // if nil, call dedicated function - // if a []uint8, call dedicated function - // if a known fastpath slice, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - if rtid == uint8SliceTypId { - x.line("r.EncodeStringBytes(codecSelferC_RAW" + x.xs + ", []byte(" + varname + "))") - } else if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)") - } else { - x.xtraSM(varname, true, t) - // x.encListFallback(varname, rtid, t) - } - case reflect.Map: - // if nil, call dedicated function - // if a known fastpath map, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") - if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)") - } else { - x.xtraSM(varname, true, t) - // x.encMapFallback(varname, rtid, t) - } - case reflect.Struct: - if !inlist { - delete(x.te, rtid) - x.line("z.EncFallback(" + varname + ")") - break - } - x.encStruct(varname, rtid, t) - default: - if rtidAdded { - delete(x.te, rtid) - } - x.line("z.EncFallback(" + varname + ")") - } -} - -func (x *genRunner) encZero(t reflect.Type) { - switch t.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x.line("r.EncodeInt(0)") - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x.line("r.EncodeUint(0)") - case reflect.Float32: - x.line("r.EncodeFloat32(0)") - case reflect.Float64: - x.line("r.EncodeFloat64(0)") - case reflect.Bool: - x.line("r.EncodeBool(false)") - case reflect.String: - x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + `, "")`) - default: - x.line("r.EncodeNil()") - } -} - -func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { - // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) - // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it - - // if t === type currently running selfer on, do for all - ti := x.ti.get(rtid, t) - i := x.varsfx() - sepVarname := genTempVarPfx + "sep" + i - numfieldsvar := genTempVarPfx + "q" + i - ti2arrayvar := genTempVarPfx + "r" + i - struct2arrvar := genTempVarPfx + "2arr" + i - - x.line(sepVarname + " := !z.EncBinary()") - x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) - tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. - // due to omitEmpty, we need to calculate the - // number of non-empty things we write out first. - // This is required as we need to pre-determine the size of the container, - // to support length-prefixing. - x.linef("var %s [%v]bool", numfieldsvar, len(tisfi)) - x.linef("_, _, _ = %s, %s, %s", sepVarname, numfieldsvar, struct2arrvar) - x.linef("const %s bool = %v", ti2arrayvar, ti.toArray) - nn := 0 - for j, si := range tisfi { - if !si.omitEmpty { - nn++ - continue - } - var t2 reflect.StructField - var omitline string - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { - t2typ := t - varname3 := varname - for _, ix := range si.is { - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(ix) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - omitline += varname3 + " != nil && " - } - } - } - // never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. - // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) - switch t2.Type.Kind() { - case reflect.Struct: - omitline += " true" - case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: - omitline += "len(" + varname + "." + t2.Name + ") != 0" - default: - omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type) - } - x.linef("%s[%v] = %s", numfieldsvar, j, omitline) - } - x.linef("var %snn%s int", genTempVarPfx, i) - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { - x.line("r.EncodeArrayStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")") - x.linef("} else {") // if not ti.toArray - x.linef("%snn%s = %v", genTempVarPfx, i, nn) - x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) - x.linef("r.EncodeMapStart(%snn%s)", genTempVarPfx, i) - x.linef("%snn%s = %v", genTempVarPfx, i, 0) - // x.line("r.EncodeMapStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")") - x.line("}") // close if not StructToArray - - for j, si := range tisfi { - i := x.varsfx() - isNilVarName := genTempVarPfx + "n" + i - var labelUsed bool - var t2 reflect.StructField - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { - t2typ := t - varname3 := varname - for _, ix := range si.is { - // fmt.Printf("%%%% %v, ix: %v\n", t2typ, ix) - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(ix) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - if !labelUsed { - x.line("var " + isNilVarName + " bool") - } - x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ") - x.line("goto LABEL" + i) - x.line("}") - labelUsed = true - // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }") - } - } - // t2 = t.FieldByIndex(si.is) - } - if labelUsed { - x.line("LABEL" + i + ":") - } - // if the type of the field is a Selfer, or one of the ones - - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray - if labelUsed { - x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") - } - x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - if si.omitEmpty { - x.linef("if %s[%v] {", numfieldsvar, j) - } - x.encVar(varname+"."+t2.Name, t2.Type) - if si.omitEmpty { - x.linef("} else {") - x.encZero(t2.Type) - x.linef("}") - } - if labelUsed { - x.line("}") - } - - x.linef("} else {") // if not ti.toArray - - if si.omitEmpty { - x.linef("if %s[%v] {", numfieldsvar, j) - } - x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) - x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))") - x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) - if labelUsed { - x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") - x.encVar(varname+"."+t2.Name, t2.Type) - x.line("}") - } else { - x.encVar(varname+"."+t2.Name, t2.Type) - } - if si.omitEmpty { - x.line("}") - } - x.linef("} ") // end if/else ti.toArray - } - x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { - x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) - x.line("} else {") - x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) - x.line("}") - -} - -func (x *genRunner) encListFallback(varname string, t reflect.Type) { - if t.AssignableTo(uint8SliceTyp) { - x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, []byte(%s))", x.xs, varname) - return - } - if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { - x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, ([%v]byte(%s))[:])", x.xs, t.Len(), varname) - return - } - i := x.varsfx() - g := genTempVarPfx - x.line("r.EncodeArrayStart(len(" + varname + "))") - if t.Kind() == reflect.Chan { - x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i) - x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - x.linef("%sv%s := <-%s", g, i, varname) - } else { - // x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) - x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) - x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - } - x.encVar(genTempVarPfx+"v"+i, t.Elem()) - x.line("}") - x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) -} - -func (x *genRunner) encMapFallback(varname string, t reflect.Type) { - // TODO: expand this to handle canonical. - i := x.varsfx() - x.line("r.EncodeMapStart(len(" + varname + "))") - x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) - // x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {") - x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) - x.encVar(genTempVarPfx+"k"+i, t.Key()) - x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) - x.encVar(genTempVarPfx+"v"+i, t.Elem()) - x.line("}") - x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) -} - -func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) { - // We only encode as nil if a nillable value. - // This removes some of the wasted checks for TryDecodeAsNil. - // We need to think about this more, to see what happens if omitempty, etc - // cause a nil value to be stored when something is expected. - // This could happen when decoding from a struct encoded as an array. - // For that, decVar should be called with canNil=true, to force true as its value. - i := x.varsfx() - if !canBeNil { - canBeNil = genAnythingCanBeNil || !genIsImmutable(t) - } - if canBeNil { - x.line("if r.TryDecodeAsNil() {") - if t.Kind() == reflect.Ptr { - x.line("if " + varname + " != nil { ") - - // if varname is a field of a struct (has a dot in it), - // then just set it to nil - if strings.IndexByte(varname, '.') != -1 { - x.line(varname + " = nil") - } else { - x.line("*" + varname + " = " + x.genZeroValueR(t.Elem())) - } - x.line("}") - } else { - x.line(varname + " = " + x.genZeroValueR(t)) - } - x.line("} else {") - } else { - x.line("// cannot be nil") - } - if t.Kind() != reflect.Ptr { - if x.decTryAssignPrimitive(varname, t) { - x.line(genTempVarPfx + "v" + i + " := &" + varname) - x.dec(genTempVarPfx+"v"+i, t) - } - } else { - x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) - // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). - // There's a chance of a **T in here which is nil. - var ptrPfx string - for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { - ptrPfx += "*" - x.linef("if %s%s == nil { %s%s = new(%s)}", - ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) - } - // if varname has [ in it, then create temp variable for this ptr thingie - if strings.Index(varname, "[") >= 0 { - varname2 := genTempVarPfx + "w" + i - x.line(varname2 + " := " + varname) - varname = varname2 - } - - if ptrPfx == "" { - x.dec(varname, t) - } else { - x.line(genTempVarPfx + "z" + i + " := " + ptrPfx + varname) - x.dec(genTempVarPfx+"z"+i, t) - } - - } - - if canBeNil { - x.line("} ") - } -} - -// dec will decode a variable (varname) of type ptrTo(t). -// t is always a basetype (i.e. not of kind reflect.Ptr). -func (x *genRunner) dec(varname string, t reflect.Type) { - // assumptions: - // - the varname is to a pointer already. No need to take address of it - // - t is always a baseType T (not a *T, etc). - rtid := reflect.ValueOf(t).Pointer() - tptr := reflect.PtrTo(t) - if x.checkForSelfer(t, varname) { - if t.Implements(selferTyp) || tptr.Implements(selferTyp) { - x.line(varname + ".CodecDecodeSelf(d)") - return - } - if _, ok := x.td[rtid]; ok { - x.line(varname + ".CodecDecodeSelf(d)") - return - } - } - - inlist := false - for _, t0 := range x.t { - if t == t0 { - inlist = true - if x.checkForSelfer(t, varname) { - x.line(varname + ".CodecDecodeSelf(d)") - return - } - break - } - } - - var rtidAdded bool - if t == x.tc { - x.td[rtid] = true - rtidAdded = true - } - - // check if - // - type is Raw, RawExt - // - the type implements (Text|JSON|Binary)(Unm|M)arshal - mi := x.varsfx() - x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) - x.linef("_ = %sm%s", genTempVarPfx, mi) - x.line("if false {") //start if block - defer func() { x.line("}") }() //end if block - - if t == rawTyp { - x.linef("} else { *%v = z.DecRaw()", varname) - return - } - if t == rawExtTyp { - x.linef("} else { r.DecodeExt(%v, 0, nil)", varname) - return - } - - // HACK: Support for Builtins. - // Currently, only Binc supports builtins, and the only builtin type is time.Time. - // Have a method that returns the rtid for time.Time if Handle is Binc. - if t == timeTyp { - vrtid := genTempVarPfx + "m" + x.varsfx() - x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) - x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname) - } - // only check for extensions if the type is named, and has a packagePath. - if genImportPath(t) != "" && t.Name() != "" { - // first check if extensions are configued, before doing the interface conversion - x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) - } - - if t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) { - x.linef("} else if %sm%s { z.DecBinaryUnmarshal(%v) ", genTempVarPfx, mi, varname) - } - if t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.DecJSONUnmarshal(%v)", genTempVarPfx, mi, varname) - } else if t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) { - x.linef("} else if !%sm%s { z.DecTextUnmarshal(%v)", genTempVarPfx, mi, varname) - } - - x.line("} else {") - - // Since these are pointers, we cannot share, and have to use them one by one - switch t.Kind() { - case reflect.Int: - x.line("*((*int)(" + varname + ")) = int(r.DecodeInt(codecSelferBitsize" + x.xs + "))") - // x.line("z.DecInt((*int)(" + varname + "))") - case reflect.Int8: - x.line("*((*int8)(" + varname + ")) = int8(r.DecodeInt(8))") - // x.line("z.DecInt8((*int8)(" + varname + "))") - case reflect.Int16: - x.line("*((*int16)(" + varname + ")) = int16(r.DecodeInt(16))") - // x.line("z.DecInt16((*int16)(" + varname + "))") - case reflect.Int32: - x.line("*((*int32)(" + varname + ")) = int32(r.DecodeInt(32))") - // x.line("z.DecInt32((*int32)(" + varname + "))") - case reflect.Int64: - x.line("*((*int64)(" + varname + ")) = int64(r.DecodeInt(64))") - // x.line("z.DecInt64((*int64)(" + varname + "))") - - case reflect.Uint: - x.line("*((*uint)(" + varname + ")) = uint(r.DecodeUint(codecSelferBitsize" + x.xs + "))") - // x.line("z.DecUint((*uint)(" + varname + "))") - case reflect.Uint8: - x.line("*((*uint8)(" + varname + ")) = uint8(r.DecodeUint(8))") - // x.line("z.DecUint8((*uint8)(" + varname + "))") - case reflect.Uint16: - x.line("*((*uint16)(" + varname + ")) = uint16(r.DecodeUint(16))") - //x.line("z.DecUint16((*uint16)(" + varname + "))") - case reflect.Uint32: - x.line("*((*uint32)(" + varname + ")) = uint32(r.DecodeUint(32))") - //x.line("z.DecUint32((*uint32)(" + varname + "))") - case reflect.Uint64: - x.line("*((*uint64)(" + varname + ")) = uint64(r.DecodeUint(64))") - //x.line("z.DecUint64((*uint64)(" + varname + "))") - case reflect.Uintptr: - x.line("*((*uintptr)(" + varname + ")) = uintptr(r.DecodeUint(codecSelferBitsize" + x.xs + "))") - - case reflect.Float32: - x.line("*((*float32)(" + varname + ")) = float32(r.DecodeFloat(true))") - //x.line("z.DecFloat32((*float32)(" + varname + "))") - case reflect.Float64: - x.line("*((*float64)(" + varname + ")) = float64(r.DecodeFloat(false))") - // x.line("z.DecFloat64((*float64)(" + varname + "))") - - case reflect.Bool: - x.line("*((*bool)(" + varname + ")) = r.DecodeBool()") - // x.line("z.DecBool((*bool)(" + varname + "))") - case reflect.String: - x.line("*((*string)(" + varname + ")) = r.DecodeString()") - // x.line("z.DecString((*string)(" + varname + "))") - case reflect.Array, reflect.Chan: - x.xtraSM(varname, false, t) - // x.decListFallback(varname, rtid, true, t) - case reflect.Slice: - // if a []uint8, call dedicated function - // if a known fastpath slice, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - if rtid == uint8SliceTypId { - x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false, false)") - } else if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)") - } else { - x.xtraSM(varname, false, t) - // x.decListFallback(varname, rtid, false, t) - } - case reflect.Map: - // if a known fastpath map, call dedicated function - // else write encode function in-line. - // - if elements are primitives or Selfers, call dedicated function on each member. - // - else call Encoder.encode(XXX) on it. - if fastpathAV.index(rtid) != -1 { - g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)") - } else { - x.xtraSM(varname, false, t) - // x.decMapFallback(varname, rtid, t) - } - case reflect.Struct: - if inlist { - x.decStruct(varname, rtid, t) - } else { - // delete(x.td, rtid) - x.line("z.DecFallback(" + varname + ", false)") - } - default: - if rtidAdded { - delete(x.te, rtid) - } - x.line("z.DecFallback(" + varname + ", true)") - } -} - -func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) { - // This should only be used for exact primitives (ie un-named types). - // Named types may be implementations of Selfer, Unmarshaler, etc. - // They should be handled by dec(...) - - if t.Name() != "" { - tryAsPtr = true - return - } - - switch t.Kind() { - case reflect.Int: - x.linef("%s = r.DecodeInt(codecSelferBitsize%s)", varname, x.xs) - case reflect.Int8: - x.linef("%s = r.DecodeInt(8)", varname) - case reflect.Int16: - x.linef("%s = r.DecodeInt(16)", varname) - case reflect.Int32: - x.linef("%s = r.DecodeInt(32)", varname) - case reflect.Int64: - x.linef("%s = r.DecodeInt(64)", varname) - - case reflect.Uint: - x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) - case reflect.Uint8: - x.linef("%s = r.DecodeUint(8)", varname) - case reflect.Uint16: - x.linef("%s = r.DecodeUint(16)", varname) - case reflect.Uint32: - x.linef("%s = r.DecodeUint(32)", varname) - case reflect.Uint64: - x.linef("%s = r.DecodeUint(64)", varname) - case reflect.Uintptr: - x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) - - case reflect.Float32: - x.linef("%s = r.DecodeFloat(true)", varname) - case reflect.Float64: - x.linef("%s = r.DecodeFloat(false)", varname) - - case reflect.Bool: - x.linef("%s = r.DecodeBool()", varname) - case reflect.String: - x.linef("%s = r.DecodeString()", varname) - default: - tryAsPtr = true - } - return -} - -func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { - if t.AssignableTo(uint8SliceTyp) { - x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false, false)") - return - } - if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { - x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], false, true)", t.Len(), varname) - return - } - type tstruc struct { - TempVar string - Rand string - Varname string - CTyp string - Typ string - Immutable bool - Size int - } - telem := t.Elem() - ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} - - funcs := make(template.FuncMap) - - funcs["decLineVar"] = func(varname string) string { - x.decVar(varname, telem, false) - return "" - } - funcs["decLine"] = func(pfx string) string { - x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) - return "" - } - funcs["var"] = func(s string) string { - return ts.TempVar + s + ts.Rand - } - funcs["zero"] = func() string { - return x.genZeroValueR(telem) - } - funcs["isArray"] = func() bool { - return t.Kind() == reflect.Array - } - funcs["isSlice"] = func() bool { - return t.Kind() == reflect.Slice - } - funcs["isChan"] = func() bool { - return t.Kind() == reflect.Chan - } - tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) - if err != nil { - panic(err) - } - if err = tm.Execute(x.w, &ts); err != nil { - panic(err) - } -} - -func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { - type tstruc struct { - TempVar string - Sfx string - Rand string - Varname string - KTyp string - Typ string - Size int - } - telem := t.Elem() - tkey := t.Key() - ts := tstruc{ - genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), - x.genTypeName(telem), int(telem.Size() + tkey.Size()), - } - - funcs := make(template.FuncMap) - funcs["decElemZero"] = func() string { - return x.genZeroValueR(telem) - } - funcs["decElemKindImmutable"] = func() bool { - return genIsImmutable(telem) - } - funcs["decElemKindPtr"] = func() bool { - return telem.Kind() == reflect.Ptr - } - funcs["decElemKindIntf"] = func() bool { - return telem.Kind() == reflect.Interface - } - funcs["decLineVarK"] = func(varname string) string { - x.decVar(varname, tkey, false) - return "" - } - funcs["decLineVar"] = func(varname string) string { - x.decVar(varname, telem, false) - return "" - } - funcs["decLineK"] = func(pfx string) string { - x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false) - return "" - } - funcs["decLine"] = func(pfx string) string { - x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) - return "" - } - funcs["var"] = func(s string) string { - return ts.TempVar + s + ts.Rand - } - - tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) - if err != nil { - panic(err) - } - if err = tm.Execute(x.w, &ts); err != nil { - panic(err) - } -} - -func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { - ti := x.ti.get(rtid, t) - tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. - x.line("switch (" + kName + ") {") - for _, si := range tisfi { - x.line("case \"" + si.encName + "\":") - var t2 reflect.StructField - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { - //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. - // t2 = t.FieldByIndex(si.is) - t2typ := t - varname3 := varname - for _, ix := range si.is { - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(ix) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) - } - } - } - x.decVar(varname+"."+t2.Name, t2.Type, false) - } - x.line("default:") - // pass the slice here, so that the string will not escape, and maybe save allocation - x.line("z.DecStructFieldNotFound(-1, " + kName + ")") - x.line("} // end switch " + kName) -} - -func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) { - tpfx := genTempVarPfx - i := x.varsfx() - kName := tpfx + "s" + i - - // We thought to use ReadStringAsBytes, as go compiler might optimize the copy out. - // However, using that was more expensive, as it seems that the switch expression - // is evaluated each time. - // - // We could depend on decodeString using a temporary/shared buffer internally. - // However, this model of creating a byte array, and using explicitly is faster, - // and allows optional use of unsafe []byte->string conversion without alloc. - - // Also, ensure that the slice array doesn't escape. - // That will help escape analysis prevent allocation when it gets better. - - // x.line("var " + kName + "Arr = [32]byte{} // default string to decode into") - // x.line("var " + kName + "Slc = " + kName + "Arr[:] // default slice to decode into") - // use the scratch buffer to avoid allocation (most field names are < 32). - - x.line("var " + kName + "Slc = z.DecScratchBuffer() // default slice to decode into") - - x.line("_ = " + kName + "Slc") - switch style { - case genStructMapStyleLenPrefix: - x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i) - case genStructMapStyleCheckBreak: - x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i) - default: // 0, otherwise. - x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length - x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i) - x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) - x.line("} else { if r.CheckBreak() { break }; }") - } - x.linef("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs) - x.line(kName + "Slc = r.DecodeBytes(" + kName + "Slc, true, true)") - // let string be scoped to this loop alone, so it doesn't escape. - if x.unsafe { - x.line(kName + "SlcHdr := codecSelferUnsafeString" + x.xs + "{uintptr(unsafe.Pointer(&" + - kName + "Slc[0])), len(" + kName + "Slc)}") - x.line(kName + " := *(*string)(unsafe.Pointer(&" + kName + "SlcHdr))") - } else { - x.line(kName + " := string(" + kName + "Slc)") - } - x.linef("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs) - x.decStructMapSwitch(kName, varname, rtid, t) - - x.line("} // end for " + tpfx + "j" + i) - x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) -} - -func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { - tpfx := genTempVarPfx - i := x.varsfx() - ti := x.ti.get(rtid, t) - tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. - x.linef("var %sj%s int", tpfx, i) - x.linef("var %sb%s bool", tpfx, i) // break - x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length - for _, si := range tisfi { - var t2 reflect.StructField - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { - //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. - // t2 = t.FieldByIndex(si.is) - t2typ := t - varname3 := varname - for _, ix := range si.is { - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(ix) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) - } - } - } - - x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", - tpfx, i, tpfx, i, tpfx, i, - tpfx, i, lenvarname, tpfx, i) - x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }", - tpfx, i, x.xs, breakString) - x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - x.decVar(varname+"."+t2.Name, t2.Type, true) - } - // read remaining values and throw away. - x.line("for {") - x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", - tpfx, i, tpfx, i, tpfx, i, - tpfx, i, lenvarname, tpfx, i) - x.linef("if %sb%s { break }", tpfx, i) - x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) - x.line("}") - x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) -} - -func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { - // if container is map - i := x.varsfx() - x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) - x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) - x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") - x.linef("if %sl%s == 0 {", genTempVarPfx, i) - x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) - if genUseOneFunctionForDecStructMap { - x.line("} else { ") - x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i) - } else { - x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ") - x.line("x.codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)") - x.line("} else {") - x.line("x.codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)") - } - x.line("}") - - // else if container is array - x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) - x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") - x.linef("if %sl%s == 0 {", genTempVarPfx, i) - x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) - x.line("} else { ") - x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i) - x.line("}") - // else panic - x.line("} else { ") - x.line("panic(codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + ")") - x.line("} ") -} - -// -------- - -type genV struct { - // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice - MapKey string - Elem string - Primitive string - Size int -} - -func (x *genRunner) newGenV(t reflect.Type) (v genV) { - switch t.Kind() { - case reflect.Slice, reflect.Array: - te := t.Elem() - v.Elem = x.genTypeName(te) - v.Size = int(te.Size()) - case reflect.Map: - te, tk := t.Elem(), t.Key() - v.Elem = x.genTypeName(te) - v.MapKey = x.genTypeName(tk) - v.Size = int(te.Size() + tk.Size()) - default: - panic("unexpected type for newGenV. Requires map or slice type") - } - return -} - -func (x *genV) MethodNamePfx(prefix string, prim bool) string { - var name []byte - if prefix != "" { - name = append(name, prefix...) - } - if prim { - name = append(name, genTitleCaseName(x.Primitive)...) - } else { - if x.MapKey == "" { - name = append(name, "Slice"...) - } else { - name = append(name, "Map"...) - name = append(name, genTitleCaseName(x.MapKey)...) - } - name = append(name, genTitleCaseName(x.Elem)...) - } - return string(name) - -} - -// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. -// -// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, -// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. -// We strip it here. -func genImportPath(t reflect.Type) (s string) { - s = t.PkgPath() - if genCheckVendor { - // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. - // if s contains /vendor/ OR startsWith vendor/, then return everything after it. - const vendorStart = "vendor/" - const vendorInline = "/vendor/" - if i := strings.LastIndex(s, vendorInline); i >= 0 { - s = s[i+len(vendorInline):] - } else if strings.HasPrefix(s, vendorStart) { - s = s[len(vendorStart):] - } - } - return -} - -// A go identifier is (letter|_)[letter|number|_]* -func genGoIdentifier(s string, checkFirstChar bool) string { - b := make([]byte, 0, len(s)) - t := make([]byte, 4) - var n int - for i, r := range s { - if checkFirstChar && i == 0 && !unicode.IsLetter(r) { - b = append(b, '_') - } - // r must be unicode_letter, unicode_digit or _ - if unicode.IsLetter(r) || unicode.IsDigit(r) { - n = utf8.EncodeRune(t, r) - b = append(b, t[:n]...) - } else { - b = append(b, '_') - } - } - return string(b) -} - -func genNonPtr(t reflect.Type) reflect.Type { - for t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t -} - -func genTitleCaseName(s string) string { - switch s { - case "interface{}", "interface {}": - return "Intf" - default: - return strings.ToUpper(s[0:1]) + s[1:] - } -} - -func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { - var ptrPfx string - for t.Kind() == reflect.Ptr { - ptrPfx += "Ptrto" - t = t.Elem() - } - tstr := t.String() - if tn := t.Name(); tn != "" { - if tRef != nil && genImportPath(t) == genImportPath(tRef) { - return ptrPfx + tn - } else { - if genQNameRegex.MatchString(tstr) { - return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } - } - switch t.Kind() { - case reflect.Map: - return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) - case reflect.Slice: - return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) - case reflect.Array: - return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) - case reflect.Chan: - var cx string - switch t.ChanDir() { - case reflect.SendDir: - cx = "ChanSend" - case reflect.RecvDir: - cx = "ChanRecv" - default: - cx = "Chan" - } - return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) - default: - if t == intfTyp { - return ptrPfx + "Interface" - } else { - if tRef != nil && genImportPath(t) == genImportPath(tRef) { - if t.Name() != "" { - return ptrPfx + t.Name() - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } else { - // best way to get the package name inclusive - // return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr)) - if t.Name() != "" && genQNameRegex.MatchString(tstr) { - return ptrPfx + strings.Replace(tstr, ".", "_", 1000) - } else { - return ptrPfx + genCustomTypeName(tstr) - } - } - } - } -} - -// genCustomNameForType base64encodes the t.String() value in such a way -// that it can be used within a function name. -func genCustomTypeName(tstr string) string { - len2 := genBase64enc.EncodedLen(len(tstr)) - bufx := make([]byte, len2) - genBase64enc.Encode(bufx, []byte(tstr)) - for i := len2 - 1; i >= 0; i-- { - if bufx[i] == '=' { - len2-- - } else { - break - } - } - return string(bufx[:len2]) -} - -func genIsImmutable(t reflect.Type) (v bool) { - return isImmutableKind(t.Kind()) -} - -type genInternal struct { - Values []genV - Unsafe bool -} - -func (x genInternal) FastpathLen() (l int) { - for _, v := range x.Values { - if v.Primitive == "" { - l++ - } - } - return -} - -func genInternalZeroValue(s string) string { - switch s { - case "interface{}", "interface {}": - return "nil" - case "bool": - return "false" - case "string": - return `""` - default: - return "0" - } -} - -func genInternalEncCommandAsString(s string, vname string) string { - switch s { - case "uint", "uint8", "uint16", "uint32", "uint64": - return "ee.EncodeUint(uint64(" + vname + "))" - case "int", "int8", "int16", "int32", "int64": - return "ee.EncodeInt(int64(" + vname + "))" - case "string": - return "ee.EncodeString(c_UTF8, " + vname + ")" - case "float32": - return "ee.EncodeFloat32(" + vname + ")" - case "float64": - return "ee.EncodeFloat64(" + vname + ")" - case "bool": - return "ee.EncodeBool(" + vname + ")" - case "symbol": - return "ee.EncodeSymbol(" + vname + ")" - default: - return "e.encode(" + vname + ")" - } -} - -func genInternalDecCommandAsString(s string) string { - switch s { - case "uint": - return "uint(dd.DecodeUint(uintBitsize))" - case "uint8": - return "uint8(dd.DecodeUint(8))" - case "uint16": - return "uint16(dd.DecodeUint(16))" - case "uint32": - return "uint32(dd.DecodeUint(32))" - case "uint64": - return "dd.DecodeUint(64)" - case "uintptr": - return "uintptr(dd.DecodeUint(uintBitsize))" - case "int": - return "int(dd.DecodeInt(intBitsize))" - case "int8": - return "int8(dd.DecodeInt(8))" - case "int16": - return "int16(dd.DecodeInt(16))" - case "int32": - return "int32(dd.DecodeInt(32))" - case "int64": - return "dd.DecodeInt(64)" - - case "string": - return "dd.DecodeString()" - case "float32": - return "float32(dd.DecodeFloat(true))" - case "float64": - return "dd.DecodeFloat(false)" - case "bool": - return "dd.DecodeBool()" - default: - panic(errors.New("gen internal: unknown type for decode: " + s)) - } -} - -func genInternalSortType(s string, elem bool) string { - for _, v := range [...]string{"int", "uint", "float", "bool", "string"} { - if strings.HasPrefix(s, v) { - if elem { - if v == "int" || v == "uint" || v == "float" { - return v + "64" - } else { - return v - } - } - return v + "Slice" - } - } - panic("sorttype: unexpected type: " + s) -} - -// var genInternalMu sync.Mutex -var genInternalV genInternal -var genInternalTmplFuncs template.FuncMap -var genInternalOnce sync.Once - -func genInternalInit() { - types := [...]string{ - "interface{}", - "string", - "float32", - "float64", - "uint", - "uint8", - "uint16", - "uint32", - "uint64", - "uintptr", - "int", - "int8", - "int16", - "int32", - "int64", - "bool", - } - // keep as slice, so it is in specific iteration order. - // Initial order was uint64, string, interface{}, int, int64 - mapvaltypes := [...]string{ - "interface{}", - "string", - "uint", - "uint8", - "uint16", - "uint32", - "uint64", - "uintptr", - "int", - "int8", - "int16", - "int32", - "int64", - "float32", - "float64", - "bool", - } - wordSizeBytes := int(intBitsize) / 8 - - mapvaltypes2 := map[string]int{ - "interface{}": 2 * wordSizeBytes, - "string": 2 * wordSizeBytes, - "uint": 1 * wordSizeBytes, - "uint8": 1, - "uint16": 2, - "uint32": 4, - "uint64": 8, - "uintptr": 1 * wordSizeBytes, - "int": 1 * wordSizeBytes, - "int8": 1, - "int16": 2, - "int32": 4, - "int64": 8, - "float32": 4, - "float64": 8, - "bool": 1, - } - var gt genInternal - - // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function - for _, s := range types { - gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) - if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. - gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) - } - if _, ok := mapvaltypes2[s]; !ok { - gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]}) - } - for _, ms := range mapvaltypes { - gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]}) - } - } - - funcs := make(template.FuncMap) - // funcs["haspfx"] = strings.HasPrefix - funcs["encmd"] = genInternalEncCommandAsString - funcs["decmd"] = genInternalDecCommandAsString - funcs["zerocmd"] = genInternalZeroValue - funcs["hasprefix"] = strings.HasPrefix - funcs["sorttype"] = genInternalSortType - - genInternalV = gt - genInternalTmplFuncs = funcs -} - -// genInternalGoFile is used to generate source files from templates. -// It is run by the program author alone. -// Unfortunately, it has to be exported so that it can be called from a command line tool. -// *** DO NOT USE *** -func genInternalGoFile(r io.Reader, w io.Writer, safe bool) (err error) { - genInternalOnce.Do(genInternalInit) - - gt := genInternalV - gt.Unsafe = !safe - - t := template.New("").Funcs(genInternalTmplFuncs) - - tmplstr, err := ioutil.ReadAll(r) - if err != nil { - return - } - - if t, err = t.Parse(string(tmplstr)); err != nil { - return - } - - var out bytes.Buffer - err = t.Execute(&out, gt) - if err != nil { - return - } - - bout, err := format.Source(out.Bytes()) - if err != nil { - w.Write(out.Bytes()) // write out if error, so we can still see. - // w.Write(bout) // write out if error, as much as possible, so we can still see. - return - } - w.Write(bout) - return -} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen_15.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen_15.go deleted file mode 100644 index ab76c310270b..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen_15.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.5,!go1.6 - -package codec - -import "os" - -func init() { - genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" -} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen_16.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen_16.go deleted file mode 100644 index 87c04e2e1827..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen_16.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.6 - -package codec - -import "os" - -func init() { - genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" -} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen_17.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen_17.go deleted file mode 100644 index 3881a43ce681..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/gen_17.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build go1.7 - -package codec - -func init() { - genCheckVendor = true -} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/helper.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/helper.go deleted file mode 100644 index 8b94fc1e4db8..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/helper.go +++ /dev/null @@ -1,1314 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// Contains code shared by both encode and decode. - -// Some shared ideas around encoding/decoding -// ------------------------------------------ -// -// If an interface{} is passed, we first do a type assertion to see if it is -// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. -// -// If we start with a reflect.Value, we are already in reflect.Value land and -// will try to grab the function for the underlying Type and directly call that function. -// This is more performant than calling reflect.Value.Interface(). -// -// This still helps us bypass many layers of reflection, and give best performance. -// -// Containers -// ------------ -// Containers in the stream are either associative arrays (key-value pairs) or -// regular arrays (indexed by incrementing integers). -// -// Some streams support indefinite-length containers, and use a breaking -// byte-sequence to denote that the container has come to an end. -// -// Some streams also are text-based, and use explicit separators to denote the -// end/beginning of different values. -// -// During encode, we use a high-level condition to determine how to iterate through -// the container. That decision is based on whether the container is text-based (with -// separators) or binary (without separators). If binary, we do not even call the -// encoding of separators. -// -// During decode, we use a different high-level condition to determine how to iterate -// through the containers. That decision is based on whether the stream contained -// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that -// it has to be binary, and we do not even try to read separators. -// -// Philosophy -// ------------ -// On decode, this codec will update containers appropriately: -// - If struct, update fields from stream into fields of struct. -// If field in stream not found in struct, handle appropriately (based on option). -// If a struct field has no corresponding value in the stream, leave it AS IS. -// If nil in stream, set value to nil/zero value. -// - If map, update map from stream. -// If the stream value is NIL, set the map to nil. -// - if slice, try to update up to length of array in stream. -// if container len is less than stream array length, -// and container cannot be expanded, handled (based on option). -// This means you can decode 4-element stream array into 1-element array. -// -// ------------------------------------ -// On encode, user can specify omitEmpty. This means that the value will be omitted -// if the zero value. The problem may occur during decode, where omitted values do not affect -// the value being decoded into. This means that if decoding into a struct with an -// int field with current value=5, and the field is omitted in the stream, then after -// decoding, the value will still be 5 (not 0). -// omitEmpty only works if you guarantee that you always decode into zero-values. -// -// ------------------------------------ -// We could have truncated a map to remove keys not available in the stream, -// or set values in the struct which are not in the stream to their zero values. -// We decided against it because there is no efficient way to do it. -// We may introduce it as an option later. -// However, that will require enabling it for both runtime and code generation modes. -// -// To support truncate, we need to do 2 passes over the container: -// map -// - first collect all keys (e.g. in k1) -// - for each key in stream, mark k1 that the key should not be removed -// - after updating map, do second pass and call delete for all keys in k1 which are not marked -// struct: -// - for each field, track the *typeInfo s1 -// - iterate through all s1, and for each one not marked, set value to zero -// - this involves checking the possible anonymous fields which are nil ptrs. -// too much work. -// -// ------------------------------------------ -// Error Handling is done within the library using panic. -// -// This way, the code doesn't have to keep checking if an error has happened, -// and we don't have to keep sending the error value along with each call -// or storing it in the En|Decoder and checking it constantly along the way. -// -// The disadvantage is that small functions which use panics cannot be inlined. -// The code accounts for that by only using panics behind an interface; -// since interface calls cannot be inlined, this is irrelevant. -// -// We considered storing the error is En|Decoder. -// - once it has its err field set, it cannot be used again. -// - panicing will be optional, controlled by const flag. -// - code should always check error first and return early. -// We eventually decided against it as it makes the code clumsier to always -// check for these error conditions. - -import ( - "bytes" - "encoding" - "encoding/binary" - "errors" - "fmt" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" -) - -const ( - scratchByteArrayLen = 32 - initCollectionCap = 32 // 32 is defensive. 16 is preferred. - - // Support encoding.(Binary|Text)(Unm|M)arshaler. - // This constant flag will enable or disable it. - supportMarshalInterfaces = true - - // Each Encoder or Decoder uses a cache of functions based on conditionals, - // so that the conditionals are not run every time. - // - // Either a map or a slice is used to keep track of the functions. - // The map is more natural, but has a higher cost than a slice/array. - // This flag (useMapForCodecCache) controls which is used. - // - // From benchmarks, slices with linear search perform better with < 32 entries. - // We have typically seen a high threshold of about 24 entries. - useMapForCodecCache = false - - // for debugging, set this to false, to catch panic traces. - // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. - recoverPanicToErr = true - - // if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first. - // Only concern is that, if the slice already contained some garbage, we will decode into that garbage. - // The chances of this are slim, so leave this "optimization". - // TODO: should this be true, to ensure that we always decode into a "zero" "empty" value? - resetSliceElemToZeroValue bool = false -) - -var ( - oneByteArr = [1]byte{0} - zeroByteSlice = oneByteArr[:0:0] -) - -type charEncoding uint8 - -const ( - c_RAW charEncoding = iota - c_UTF8 - c_UTF16LE - c_UTF16BE - c_UTF32LE - c_UTF32BE -) - -// valueType is the stream type -type valueType uint8 - -const ( - valueTypeUnset valueType = iota - valueTypeNil - valueTypeInt - valueTypeUint - valueTypeFloat - valueTypeBool - valueTypeString - valueTypeSymbol - valueTypeBytes - valueTypeMap - valueTypeArray - valueTypeTimestamp - valueTypeExt - - // valueTypeInvalid = 0xff -) - -type seqType uint8 - -const ( - _ seqType = iota - seqTypeArray - seqTypeSlice - seqTypeChan -) - -// note that containerMapStart and containerArraySend are not sent. -// This is because the ReadXXXStart and EncodeXXXStart already does these. -type containerState uint8 - -const ( - _ containerState = iota - - containerMapStart // slot left open, since Driver method already covers it - containerMapKey - containerMapValue - containerMapEnd - containerArrayStart // slot left open, since Driver methods already cover it - containerArrayElem - containerArrayEnd -) - -// sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo -type sfiIdx struct { - name string - index int -} - -// do not recurse if a containing type refers to an embedded type -// which refers back to its containing type (via a pointer). -// The second time this back-reference happens, break out, -// so as not to cause an infinite loop. -const rgetMaxRecursion = 2 - -// Anecdotally, we believe most types have <= 12 fields. -// Java's PMD rules set TooManyFields threshold to 15. -const rgetPoolTArrayLen = 12 - -type rgetT struct { - fNames []string - encNames []string - etypes []uintptr - sfis []*structFieldInfo -} - -type rgetPoolT struct { - fNames [rgetPoolTArrayLen]string - encNames [rgetPoolTArrayLen]string - etypes [rgetPoolTArrayLen]uintptr - sfis [rgetPoolTArrayLen]*structFieldInfo - sfiidx [rgetPoolTArrayLen]sfiIdx -} - -var rgetPool = sync.Pool{ - New: func() interface{} { return new(rgetPoolT) }, -} - -type containerStateRecv interface { - sendContainerState(containerState) -} - -// mirror json.Marshaler and json.Unmarshaler here, -// so we don't import the encoding/json package -type jsonMarshaler interface { - MarshalJSON() ([]byte, error) -} -type jsonUnmarshaler interface { - UnmarshalJSON([]byte) error -} - -var ( - bigen = binary.BigEndian - structInfoFieldName = "_struct" - - mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) - mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) - intfSliceTyp = reflect.TypeOf([]interface{}(nil)) - intfTyp = intfSliceTyp.Elem() - - stringTyp = reflect.TypeOf("") - timeTyp = reflect.TypeOf(time.Time{}) - rawExtTyp = reflect.TypeOf(RawExt{}) - rawTyp = reflect.TypeOf(Raw{}) - uint8SliceTyp = reflect.TypeOf([]uint8(nil)) - - mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() - - binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() - binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() - - textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() - - jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() - jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() - - selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() - - uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() - rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() - rawTypId = reflect.ValueOf(rawTyp).Pointer() - intfTypId = reflect.ValueOf(intfTyp).Pointer() - timeTypId = reflect.ValueOf(timeTyp).Pointer() - stringTypId = reflect.ValueOf(stringTyp).Pointer() - - mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() - mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() - intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() - // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() - - intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) - uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) - - bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} - bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} - - chkOvf checkOverflow - - noFieldNameToStructFieldInfoErr = errors.New("no field name passed to parseStructFieldInfo") -) - -var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) - -// Selfer defines methods by which a value can encode or decode itself. -// -// Any type which implements Selfer will be able to encode or decode itself. -// Consequently, during (en|de)code, this takes precedence over -// (text|binary)(M|Unm)arshal or extension support. -type Selfer interface { - CodecEncodeSelf(*Encoder) - CodecDecodeSelf(*Decoder) -} - -// MapBySlice represents a slice which should be encoded as a map in the stream. -// The slice contains a sequence of key-value pairs. -// This affords storing a map in a specific sequence in the stream. -// -// The support of MapBySlice affords the following: -// - A slice type which implements MapBySlice will be encoded as a map -// - A slice can be decoded from a map in the stream -type MapBySlice interface { - MapBySlice() -} - -// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. -// -// BasicHandle encapsulates the common options and extension functions. -type BasicHandle struct { - // TypeInfos is used to get the type info for any type. - // - // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json - TypeInfos *TypeInfos - - extHandle - EncodeOptions - DecodeOptions -} - -func (x *BasicHandle) getBasicHandle() *BasicHandle { - return x -} - -func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - if x.TypeInfos != nil { - return x.TypeInfos.get(rtid, rt) - } - return defTypeInfos.get(rtid, rt) -} - -// Handle is the interface for a specific encoding format. -// -// Typically, a Handle is pre-configured before first time use, -// and not modified while in use. Such a pre-configured Handle -// is safe for concurrent access. -type Handle interface { - getBasicHandle() *BasicHandle - newEncDriver(w *Encoder) encDriver - newDecDriver(r *Decoder) decDriver - isBinary() bool -} - -// Raw represents raw formatted bytes. -// We "blindly" store it during encode and store the raw bytes during decode. -// Note: it is dangerous during encode, so we may gate the behaviour behind an Encode flag which must be explicitly set. -type Raw []byte - -// RawExt represents raw unprocessed extension data. -// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag. -// -// Only one of Data or Value is nil. If Data is nil, then the content of the RawExt is in the Value. -type RawExt struct { - Tag uint64 - // Data is the []byte which represents the raw ext. If Data is nil, ext is exposed in Value. - // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types - Data []byte - // Value represents the extension, if Data is nil. - // Value is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. - Value interface{} -} - -// BytesExt handles custom (de)serialization of types to/from []byte. -// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. -type BytesExt interface { - // WriteExt converts a value to a []byte. - // - // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. - WriteExt(v interface{}) []byte - - // ReadExt updates a value from a []byte. - ReadExt(dst interface{}, src []byte) -} - -// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. -// The Encoder or Decoder will then handle the further (de)serialization of that known type. -// -// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. -type InterfaceExt interface { - // ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64. - // - // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. - ConvertExt(v interface{}) interface{} - - // UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time. - UpdateExt(dst interface{}, src interface{}) -} - -// Ext handles custom (de)serialization of custom types / extensions. -type Ext interface { - BytesExt - InterfaceExt -} - -// addExtWrapper is a wrapper implementation to support former AddExt exported method. -type addExtWrapper struct { - encFn func(reflect.Value) ([]byte, error) - decFn func(reflect.Value, []byte) error -} - -func (x addExtWrapper) WriteExt(v interface{}) []byte { - bs, err := x.encFn(reflect.ValueOf(v)) - if err != nil { - panic(err) - } - return bs -} - -func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { - if err := x.decFn(reflect.ValueOf(v), bs); err != nil { - panic(err) - } -} - -func (x addExtWrapper) ConvertExt(v interface{}) interface{} { - return x.WriteExt(v) -} - -func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { - x.ReadExt(dest, v.([]byte)) -} - -type setExtWrapper struct { - b BytesExt - i InterfaceExt -} - -func (x *setExtWrapper) WriteExt(v interface{}) []byte { - if x.b == nil { - panic("BytesExt.WriteExt is not supported") - } - return x.b.WriteExt(v) -} - -func (x *setExtWrapper) ReadExt(v interface{}, bs []byte) { - if x.b == nil { - panic("BytesExt.WriteExt is not supported") - - } - x.b.ReadExt(v, bs) -} - -func (x *setExtWrapper) ConvertExt(v interface{}) interface{} { - if x.i == nil { - panic("InterfaceExt.ConvertExt is not supported") - - } - return x.i.ConvertExt(v) -} - -func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) { - if x.i == nil { - panic("InterfaceExxt.UpdateExt is not supported") - - } - x.i.UpdateExt(dest, v) -} - -// type errorString string -// func (x errorString) Error() string { return string(x) } - -type binaryEncodingType struct{} - -func (_ binaryEncodingType) isBinary() bool { return true } - -type textEncodingType struct{} - -func (_ textEncodingType) isBinary() bool { return false } - -// noBuiltInTypes is embedded into many types which do not support builtins -// e.g. msgpack, simple, cbor. -type noBuiltInTypes struct{} - -func (_ noBuiltInTypes) IsBuiltinType(rt uintptr) bool { return false } -func (_ noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} -func (_ noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} - -type noStreamingCodec struct{} - -func (_ noStreamingCodec) CheckBreak() bool { return false } - -// bigenHelper. -// Users must already slice the x completely, because we will not reslice. -type bigenHelper struct { - x []byte // must be correctly sliced to appropriate len. slicing is a cost. - w encWriter -} - -func (z bigenHelper) writeUint16(v uint16) { - bigen.PutUint16(z.x, v) - z.w.writeb(z.x) -} - -func (z bigenHelper) writeUint32(v uint32) { - bigen.PutUint32(z.x, v) - z.w.writeb(z.x) -} - -func (z bigenHelper) writeUint64(v uint64) { - bigen.PutUint64(z.x, v) - z.w.writeb(z.x) -} - -type extTypeTagFn struct { - rtid uintptr - rt reflect.Type - tag uint64 - ext Ext -} - -type extHandle []extTypeTagFn - -// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. -// -// AddExt registes an encode and decode function for a reflect.Type. -// AddExt internally calls SetExt. -// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. -func (o *extHandle) AddExt( - rt reflect.Type, tag byte, - encfn func(reflect.Value) ([]byte, error), decfn func(reflect.Value, []byte) error, -) (err error) { - if encfn == nil || decfn == nil { - return o.SetExt(rt, uint64(tag), nil) - } - return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) -} - -// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. -// -// Note that the type must be a named type, and specifically not -// a pointer or Interface. An error is returned if that is not honored. -// -// To Deregister an ext, call SetExt with nil Ext -func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { - // o is a pointer, because we may need to initialize it - if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { - err = fmt.Errorf("codec.Handle.AddExt: Takes named type, not a pointer or interface: %T", - reflect.Zero(rt).Interface()) - return - } - - rtid := reflect.ValueOf(rt).Pointer() - for _, v := range *o { - if v.rtid == rtid { - v.tag, v.ext = tag, ext - return - } - } - - if *o == nil { - *o = make([]extTypeTagFn, 0, 4) - } - *o = append(*o, extTypeTagFn{rtid, rt, tag, ext}) - return -} - -func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { - var v *extTypeTagFn - for i := range o { - v = &o[i] - if v.rtid == rtid { - return v - } - } - return nil -} - -func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn { - var v *extTypeTagFn - for i := range o { - v = &o[i] - if v.tag == tag { - return v - } - } - return nil -} - -type structFieldInfo struct { - encName string // encode name - fieldName string // field name - - // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. - - is []int // (recursive/embedded) field index in struct - i int16 // field index in struct - omitEmpty bool - toArray bool // if field is _struct, is the toArray set? -} - -// func (si *structFieldInfo) isZero() bool { -// return si.encName == "" && len(si.is) == 0 && si.i == 0 && !si.omitEmpty && !si.toArray -// } - -// rv returns the field of the struct. -// If anonymous, it returns an Invalid -func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value) { - if si.i != -1 { - v = v.Field(int(si.i)) - return v - } - // replicate FieldByIndex - for _, x := range si.is { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - if !update { - return - } - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = v.Field(x) - } - return v -} - -func (si *structFieldInfo) setToZeroValue(v reflect.Value) { - if si.i != -1 { - v = v.Field(int(si.i)) - v.Set(reflect.Zero(v.Type())) - // v.Set(reflect.New(v.Type()).Elem()) - // v.Set(reflect.New(v.Type())) - } else { - // replicate FieldByIndex - for _, x := range si.is { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - return - } - v = v.Elem() - } - v = v.Field(x) - } - v.Set(reflect.Zero(v.Type())) - } -} - -func parseStructFieldInfo(fname string, stag string) *structFieldInfo { - // if fname == "" { - // panic(noFieldNameToStructFieldInfoErr) - // } - si := structFieldInfo{ - encName: fname, - } - - if stag != "" { - for i, s := range strings.Split(stag, ",") { - if i == 0 { - if s != "" { - si.encName = s - } - } else { - if s == "omitempty" { - si.omitEmpty = true - } else if s == "toarray" { - si.toArray = true - } - } - } - } - // si.encNameBs = []byte(si.encName) - return &si -} - -type sfiSortedByEncName []*structFieldInfo - -func (p sfiSortedByEncName) Len() int { - return len(p) -} - -func (p sfiSortedByEncName) Less(i, j int) bool { - return p[i].encName < p[j].encName -} - -func (p sfiSortedByEncName) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -// typeInfo keeps information about each type referenced in the encode/decode sequence. -// -// During an encode/decode sequence, we work as below: -// - If base is a built in type, en/decode base value -// - If base is registered as an extension, en/decode base value -// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method -// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method -// - Else decode appropriately based on the reflect.Kind -type typeInfo struct { - sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. - sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. - - rt reflect.Type - rtid uintptr - - numMeth uint16 // number of methods - - // baseId gives pointer to the base reflect.Type, after deferencing - // the pointers. E.g. base type of ***time.Time is time.Time. - base reflect.Type - baseId uintptr - baseIndir int8 // number of indirections to get to base - - mbs bool // base type (T or *T) is a MapBySlice - - bm bool // base type (T or *T) is a binaryMarshaler - bunm bool // base type (T or *T) is a binaryUnmarshaler - bmIndir int8 // number of indirections to get to binaryMarshaler type - bunmIndir int8 // number of indirections to get to binaryUnmarshaler type - - tm bool // base type (T or *T) is a textMarshaler - tunm bool // base type (T or *T) is a textUnmarshaler - tmIndir int8 // number of indirections to get to textMarshaler type - tunmIndir int8 // number of indirections to get to textUnmarshaler type - - jm bool // base type (T or *T) is a jsonMarshaler - junm bool // base type (T or *T) is a jsonUnmarshaler - jmIndir int8 // number of indirections to get to jsonMarshaler type - junmIndir int8 // number of indirections to get to jsonUnmarshaler type - - cs bool // base type (T or *T) is a Selfer - csIndir int8 // number of indirections to get to Selfer type - - toArray bool // whether this (struct) type should be encoded as an array -} - -func (ti *typeInfo) indexForEncName(name string) int { - // NOTE: name may be a stringView, so don't pass it to another function. - //tisfi := ti.sfi - const binarySearchThreshold = 16 - if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { - // linear search. faster than binary search in my testing up to 16-field structs. - for i, si := range ti.sfi { - if si.encName == name { - return i - } - } - } else { - // binary search. adapted from sort/search.go. - h, i, j := 0, 0, sfilen - for i < j { - h = i + (j-i)/2 - if ti.sfi[h].encName < name { - i = h + 1 - } else { - j = h - } - } - if i < sfilen && ti.sfi[i].encName == name { - return i - } - } - return -1 -} - -// TypeInfos caches typeInfo for each type on first inspection. -// -// It is configured with a set of tag keys, which are used to get -// configuration for the type. -type TypeInfos struct { - infos map[uintptr]*typeInfo - mu sync.RWMutex - tags []string -} - -// NewTypeInfos creates a TypeInfos given a set of struct tags keys. -// -// This allows users customize the struct tag keys which contain configuration -// of their types. -func NewTypeInfos(tags []string) *TypeInfos { - return &TypeInfos{tags: tags, infos: make(map[uintptr]*typeInfo, 64)} -} - -func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { - // check for tags: codec, json, in that order. - // this allows seamless support for many configured structs. - for _, x := range x.tags { - s = t.Get(x) - if s != "" { - return s - } - } - return -} - -func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - var ok bool - x.mu.RLock() - pti, ok = x.infos[rtid] - x.mu.RUnlock() - if ok { - return - } - - // do not hold lock while computing this. - // it may lead to duplication, but that's ok. - ti := typeInfo{rt: rt, rtid: rtid} - ti.numMeth = uint16(rt.NumMethod()) - - var indir int8 - if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { - ti.bm, ti.bmIndir = true, indir - } - if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { - ti.bunm, ti.bunmIndir = true, indir - } - if ok, indir = implementsIntf(rt, textMarshalerTyp); ok { - ti.tm, ti.tmIndir = true, indir - } - if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok { - ti.tunm, ti.tunmIndir = true, indir - } - if ok, indir = implementsIntf(rt, jsonMarshalerTyp); ok { - ti.jm, ti.jmIndir = true, indir - } - if ok, indir = implementsIntf(rt, jsonUnmarshalerTyp); ok { - ti.junm, ti.junmIndir = true, indir - } - if ok, indir = implementsIntf(rt, selferTyp); ok { - ti.cs, ti.csIndir = true, indir - } - if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { - ti.mbs = true - } - - pt := rt - var ptIndir int8 - // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } - for pt.Kind() == reflect.Ptr { - pt = pt.Elem() - ptIndir++ - } - if ptIndir == 0 { - ti.base = rt - ti.baseId = rtid - } else { - ti.base = pt - ti.baseId = reflect.ValueOf(pt).Pointer() - ti.baseIndir = ptIndir - } - - if rt.Kind() == reflect.Struct { - var omitEmpty bool - if f, ok := rt.FieldByName(structInfoFieldName); ok { - siInfo := parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag)) - ti.toArray = siInfo.toArray - omitEmpty = siInfo.omitEmpty - } - pi := rgetPool.Get() - pv := pi.(*rgetPoolT) - pv.etypes[0] = ti.baseId - vv := rgetT{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} - x.rget(rt, rtid, omitEmpty, nil, &vv) - ti.sfip, ti.sfi = rgetResolveSFI(vv.sfis, pv.sfiidx[:0]) - rgetPool.Put(pi) - } - // sfi = sfip - - x.mu.Lock() - if pti, ok = x.infos[rtid]; !ok { - pti = &ti - x.infos[rtid] = pti - } - x.mu.Unlock() - return -} - -func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, - indexstack []int, pv *rgetT, -) { - // Read up fields and store how to access the value. - // - // It uses go's rules for message selectors, - // which say that the field with the shallowest depth is selected. - // - // Note: we consciously use slices, not a map, to simulate a set. - // Typically, types have < 16 fields, - // and iteration using equals is faster than maps there - -LOOP: - for j, jlen := 0, rt.NumField(); j < jlen; j++ { - f := rt.Field(j) - fkind := f.Type.Kind() - // skip if a func type, or is unexported, or structTag value == "-" - switch fkind { - case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer: - continue LOOP - } - - // if r1, _ := utf8.DecodeRuneInString(f.Name); - // r1 == utf8.RuneError || !unicode.IsUpper(r1) { - if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded - continue - } - stag := x.structTag(f.Tag) - if stag == "-" { - continue - } - var si *structFieldInfo - // if anonymous and no struct tag (or it's blank), - // and a struct (or pointer to struct), inline it. - if f.Anonymous && fkind != reflect.Interface { - doInline := stag == "" - if !doInline { - si = parseStructFieldInfo("", stag) - doInline = si.encName == "" - // doInline = si.isZero() - } - if doInline { - ft := f.Type - for ft.Kind() == reflect.Ptr { - ft = ft.Elem() - } - if ft.Kind() == reflect.Struct { - // if etypes contains this, don't call rget again (as fields are already seen here) - ftid := reflect.ValueOf(ft).Pointer() - // We cannot recurse forever, but we need to track other field depths. - // So - we break if we see a type twice (not the first time). - // This should be sufficient to handle an embedded type that refers to its - // owning type, which then refers to its embedded type. - processIt := true - numk := 0 - for _, k := range pv.etypes { - if k == ftid { - numk++ - if numk == rgetMaxRecursion { - processIt = false - break - } - } - } - if processIt { - pv.etypes = append(pv.etypes, ftid) - indexstack2 := make([]int, len(indexstack)+1) - copy(indexstack2, indexstack) - indexstack2[len(indexstack)] = j - // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - x.rget(ft, ftid, omitEmpty, indexstack2, pv) - } - continue - } - } - } - - // after the anonymous dance: if an unexported field, skip - if f.PkgPath != "" { // unexported - continue - } - - if f.Name == "" { - panic(noFieldNameToStructFieldInfoErr) - } - - pv.fNames = append(pv.fNames, f.Name) - - if si == nil { - si = parseStructFieldInfo(f.Name, stag) - } else if si.encName == "" { - si.encName = f.Name - } - si.fieldName = f.Name - - pv.encNames = append(pv.encNames, si.encName) - - // si.ikind = int(f.Type.Kind()) - if len(indexstack) == 0 { - si.i = int16(j) - } else { - si.i = -1 - si.is = make([]int, len(indexstack)+1) - copy(si.is, indexstack) - si.is[len(indexstack)] = j - // si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - } - - if omitEmpty { - si.omitEmpty = true - } - pv.sfis = append(pv.sfis, si) - } -} - -// resolves the struct field info got from a call to rget. -// Returns a trimmed, unsorted and sorted []*structFieldInfo. -func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo) { - var n int - for i, v := range x { - xn := v.encName //TODO: fieldName or encName? use encName for now. - var found bool - for j, k := range pv { - if k.name == xn { - // one of them must be reset to nil, and the index updated appropriately to the other one - if len(v.is) == len(x[k.index].is) { - } else if len(v.is) < len(x[k.index].is) { - pv[j].index = i - if x[k.index] != nil { - x[k.index] = nil - n++ - } - } else { - if x[i] != nil { - x[i] = nil - n++ - } - } - found = true - break - } - } - if !found { - pv = append(pv, sfiIdx{xn, i}) - } - } - - // remove all the nils - y = make([]*structFieldInfo, len(x)-n) - n = 0 - for _, v := range x { - if v == nil { - continue - } - y[n] = v - n++ - } - - z = make([]*structFieldInfo, len(y)) - copy(z, y) - sort.Sort(sfiSortedByEncName(z)) - return -} - -func panicToErr(err *error) { - if recoverPanicToErr { - if x := recover(); x != nil { - //debug.PrintStack() - panicValToErr(x, err) - } - } -} - -// func doPanic(tag string, format string, params ...interface{}) { -// params2 := make([]interface{}, len(params)+1) -// params2[0] = tag -// copy(params2[1:], params) -// panic(fmt.Errorf("%s: "+format, params2...)) -// } - -func isImmutableKind(k reflect.Kind) (v bool) { - return false || - k == reflect.Int || - k == reflect.Int8 || - k == reflect.Int16 || - k == reflect.Int32 || - k == reflect.Int64 || - k == reflect.Uint || - k == reflect.Uint8 || - k == reflect.Uint16 || - k == reflect.Uint32 || - k == reflect.Uint64 || - k == reflect.Uintptr || - k == reflect.Float32 || - k == reflect.Float64 || - k == reflect.Bool || - k == reflect.String -} - -// these functions must be inlinable, and not call anybody -type checkOverflow struct{} - -func (_ checkOverflow) Float32(f float64) (overflow bool) { - if f < 0 { - f = -f - } - return math.MaxFloat32 < f && f <= math.MaxFloat64 -} - -func (_ checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { - if bitsize == 0 || bitsize >= 64 || v == 0 { - return - } - if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { - overflow = true - } - return -} - -func (_ checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { - if bitsize == 0 || bitsize >= 64 || v == 0 { - return - } - if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { - overflow = true - } - return -} - -func (_ checkOverflow) SignedInt(v uint64) (i int64, overflow bool) { - //e.g. -127 to 128 for int8 - pos := (v >> 63) == 0 - ui2 := v & 0x7fffffffffffffff - if pos { - if ui2 > math.MaxInt64 { - overflow = true - return - } - } else { - if ui2 > math.MaxInt64-1 { - overflow = true - return - } - } - i = int64(v) - return -} - -// ------------------ SORT ----------------- - -func isNaN(f float64) bool { return f != f } - -// ----------------------- - -type intSlice []int64 -type uintSlice []uint64 -type floatSlice []float64 -type boolSlice []bool -type stringSlice []string -type bytesSlice [][]byte - -func (p intSlice) Len() int { return len(p) } -func (p intSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p uintSlice) Len() int { return len(p) } -func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p floatSlice) Len() int { return len(p) } -func (p floatSlice) Less(i, j int) bool { - return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j]) -} -func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p stringSlice) Len() int { return len(p) } -func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p bytesSlice) Len() int { return len(p) } -func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 } -func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p boolSlice) Len() int { return len(p) } -func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] } -func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// --------------------- - -type intRv struct { - v int64 - r reflect.Value -} -type intRvSlice []intRv -type uintRv struct { - v uint64 - r reflect.Value -} -type uintRvSlice []uintRv -type floatRv struct { - v float64 - r reflect.Value -} -type floatRvSlice []floatRv -type boolRv struct { - v bool - r reflect.Value -} -type boolRvSlice []boolRv -type stringRv struct { - v string - r reflect.Value -} -type stringRvSlice []stringRv -type bytesRv struct { - v []byte - r reflect.Value -} -type bytesRvSlice []bytesRv - -func (p intRvSlice) Len() int { return len(p) } -func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } -func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p uintRvSlice) Len() int { return len(p) } -func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } -func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p floatRvSlice) Len() int { return len(p) } -func (p floatRvSlice) Less(i, j int) bool { - return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v) -} -func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p stringRvSlice) Len() int { return len(p) } -func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } -func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p bytesRvSlice) Len() int { return len(p) } -func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } -func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p boolRvSlice) Len() int { return len(p) } -func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v } -func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// ----------------- - -type bytesI struct { - v []byte - i interface{} -} - -type bytesISlice []bytesI - -func (p bytesISlice) Len() int { return len(p) } -func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } -func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// ----------------- - -type set []uintptr - -func (s *set) add(v uintptr) (exists bool) { - // e.ci is always nil, or len >= 1 - // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Add: %v, exists: %v\n", v, exists) }() - x := *s - if x == nil { - x = make([]uintptr, 1, 8) - x[0] = v - *s = x - return - } - // typically, length will be 1. make this perform. - if len(x) == 1 { - if j := x[0]; j == 0 { - x[0] = v - } else if j == v { - exists = true - } else { - x = append(x, v) - *s = x - } - return - } - // check if it exists - for _, j := range x { - if j == v { - exists = true - return - } - } - // try to replace a "deleted" slot - for i, j := range x { - if j == 0 { - x[i] = v - return - } - } - // if unable to replace deleted slot, just append it. - x = append(x, v) - *s = x - return -} - -func (s *set) remove(v uintptr) (exists bool) { - // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Rm: %v, exists: %v\n", v, exists) }() - x := *s - if len(x) == 0 { - return - } - if len(x) == 1 { - if x[0] == v { - x[0] = 0 - } - return - } - for i, j := range x { - if j == v { - exists = true - x[i] = 0 // set it to 0, as way to delete it. - // copy(x[i:], x[i+1:]) - // x = x[:len(x)-1] - return - } - } - return -} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/helper_internal.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/helper_internal.go deleted file mode 100644 index 5d0727f77f60..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/helper_internal.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// All non-std package dependencies live in this file, -// so porting to different environment is easy (just update functions). - -import ( - "errors" - "fmt" - "math" - "reflect" -) - -func panicValToErr(panicVal interface{}, err *error) { - if panicVal == nil { - return - } - // case nil - switch xerr := panicVal.(type) { - case error: - *err = xerr - case string: - *err = errors.New(xerr) - default: - *err = fmt.Errorf("%v", panicVal) - } - return -} - -func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { - switch v.Kind() { - case reflect.Invalid: - return true - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if deref { - if v.IsNil() { - return true - } - return hIsEmptyValue(v.Elem(), deref, checkStruct) - } else { - return v.IsNil() - } - case reflect.Struct: - if !checkStruct { - return false - } - // return true if all fields are empty. else return false. - // we cannot use equality check, because some fields may be maps/slices/etc - // and consequently the structs are not comparable. - // return v.Interface() == reflect.Zero(v.Type()).Interface() - for i, n := 0, v.NumField(); i < n; i++ { - if !hIsEmptyValue(v.Field(i), deref, checkStruct) { - return false - } - } - return true - } - return false -} - -func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool { - return hIsEmptyValue(v, deref, checkStruct) -} - -func pruneSignExt(v []byte, pos bool) (n int) { - if len(v) < 2 { - } else if pos && v[0] == 0 { - for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { - } - } else if !pos && v[0] == 0xff { - for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { - } - } - return -} - -func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { - if typ == nil { - return - } - rt := typ - // The type might be a pointer and we need to keep - // dereferencing to the base type until we find an implementation. - for { - if rt.Implements(iTyp) { - return true, indir - } - if p := rt; p.Kind() == reflect.Ptr { - indir++ - if indir >= math.MaxInt8 { // insane number of indirections - return false, 0 - } - rt = p.Elem() - continue - } - break - } - // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. - if typ.Kind() != reflect.Ptr { - // Not a pointer, but does the pointer work? - if reflect.PtrTo(typ).Implements(iTyp) { - return true, -1 - } - } - return false, 0 -} - -// validate that this function is correct ... -// culled from OGRE (Object-Oriented Graphics Rendering Engine) -// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html) -func halfFloatToFloatBits(yy uint16) (d uint32) { - y := uint32(yy) - s := (y >> 15) & 0x01 - e := (y >> 10) & 0x1f - m := y & 0x03ff - - if e == 0 { - if m == 0 { // plu or minus 0 - return s << 31 - } else { // Denormalized number -- renormalize it - for (m & 0x00000400) == 0 { - m <<= 1 - e -= 1 - } - e += 1 - const zz uint32 = 0x0400 - m &= ^zz - } - } else if e == 31 { - if m == 0 { // Inf - return (s << 31) | 0x7f800000 - } else { // NaN - return (s << 31) | 0x7f800000 | (m << 13) - } - } - e = e + (127 - 15) - m = m << 13 - return (s << 31) | (e << 23) | m -} - -// GrowCap will return a new capacity for a slice, given the following: -// - oldCap: current capacity -// - unit: in-memory size of an element -// - num: number of elements to add -func growCap(oldCap, unit, num int) (newCap int) { - // appendslice logic (if cap < 1024, *2, else *1.25): - // leads to many copy calls, especially when copying bytes. - // bytes.Buffer model (2*cap + n): much better for bytes. - // smarter way is to take the byte-size of the appended element(type) into account - - // maintain 3 thresholds: - // t1: if cap <= t1, newcap = 2x - // t2: if cap <= t2, newcap = 1.75x - // t3: if cap <= t3, newcap = 1.5x - // else newcap = 1.25x - // - // t1, t2, t3 >= 1024 always. - // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same) - // - // With this, appending for bytes increase by: - // 100% up to 4K - // 75% up to 8K - // 50% up to 16K - // 25% beyond that - - // unit can be 0 e.g. for struct{}{}; handle that appropriately - var t1, t2, t3 int // thresholds - if unit <= 1 { - t1, t2, t3 = 4*1024, 8*1024, 16*1024 - } else if unit < 16 { - t3 = 16 / unit * 1024 - t1 = t3 * 1 / 4 - t2 = t3 * 2 / 4 - } else { - t1, t2, t3 = 1024, 1024, 1024 - } - - var x int // temporary variable - - // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively - if oldCap <= t1 { // [0,t1] - x = 8 - } else if oldCap > t3 { // (t3,infinity] - x = 5 - } else if oldCap <= t2 { // (t1,t2] - x = 7 - } else { // (t2,t3] - x = 6 - } - newCap = x * oldCap / 4 - - if num > 0 { - newCap += num - } - - // ensure newCap is a multiple of 64 (if it is > 64) or 16. - if newCap > 64 { - if x = newCap % 64; x != 0 { - x = newCap / 64 - newCap = 64 * (x + 1) - } - } else { - if x = newCap % 16; x != 0 { - x = newCap / 16 - newCap = 16 * (x + 1) - } - } - return -} - -func expandSliceValue(s reflect.Value, num int) reflect.Value { - if num <= 0 { - return s - } - l0 := s.Len() - l1 := l0 + num // new slice length - if l1 < l0 { - panic("ExpandSlice: slice overflow") - } - c0 := s.Cap() - if l1 <= c0 { - return s.Slice(0, l1) - } - st := s.Type() - c1 := growCap(c0, int(st.Elem().Size()), num) - s2 := reflect.MakeSlice(st, l1, c1) - // println("expandslicevalue: cap-old: ", c0, ", cap-new: ", c1, ", len-new: ", l1) - reflect.Copy(s2, s) - return s2 -} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go deleted file mode 100644 index 8b06a0045908..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !unsafe - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// stringView returns a view of the []byte as a string. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -func stringView(v []byte) string { - return string(v) -} - -// bytesView returns a view of the string as a []byte. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -func bytesView(v string) []byte { - return []byte(v) -} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/helper_unsafe.go deleted file mode 100644 index 0f596c71aad8..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/helper_unsafe.go +++ /dev/null @@ -1,49 +0,0 @@ -// +build unsafe - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "unsafe" -) - -// This file has unsafe variants of some helper methods. - -type unsafeString struct { - Data uintptr - Len int -} - -type unsafeSlice struct { - Data uintptr - Len int - Cap int -} - -// stringView returns a view of the []byte as a string. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -func stringView(v []byte) string { - if len(v) == 0 { - return "" - } - - bx := (*unsafeSlice)(unsafe.Pointer(&v)) - sx := unsafeString{bx.Data, bx.Len} - return *(*string)(unsafe.Pointer(&sx)) -} - -// bytesView returns a view of the string as a []byte. -// In unsafe mode, it doesn't incur allocation and copying caused by conversion. -// In regular safe mode, it is an allocation and copy. -func bytesView(v string) []byte { - if len(v) == 0 { - return zeroByteSlice - } - - sx := (*unsafeString)(unsafe.Pointer(&v)) - bx := unsafeSlice{sx.Data, sx.Len, sx.Len} - return *(*[]byte)(unsafe.Pointer(&bx)) -} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/json.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/json.go deleted file mode 100644 index 5bb389628978..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/json.go +++ /dev/null @@ -1,1234 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -// By default, this json support uses base64 encoding for bytes, because you cannot -// store and read any arbitrary string in json (only unicode). -// However, the user can configre how to encode/decode bytes. -// -// This library specifically supports UTF-8 for encoding and decoding only. -// -// Note that the library will happily encode/decode things which are not valid -// json e.g. a map[int64]string. We do it for consistency. With valid json, -// we will encode and decode appropriately. -// Users can specify their map type if necessary to force it. -// -// Note: -// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently. -// We implement it here. -// - Also, strconv.ParseXXX for floats and integers -// - only works on strings resulting in unnecessary allocation and []byte-string conversion. -// - it does a lot of redundant checks, because json numbers are simpler that what it supports. -// - We parse numbers (floats and integers) directly here. -// We only delegate parsing floats if it is a hairy float which could cause a loss of precision. -// In that case, we delegate to strconv.ParseFloat. -// -// Note: -// - encode does not beautify. There is no whitespace when encoding. -// - rpc calls which take single integer arguments or write single numeric arguments will need care. - -// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver -// MUST not call one-another. - -import ( - "bytes" - "encoding/base64" - "fmt" - "reflect" - "strconv" - "unicode/utf16" - "unicode/utf8" -) - -//-------------------------------- - -var ( - jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'} - - jsonFloat64Pow10 = [...]float64{ - 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, - 1e20, 1e21, 1e22, - } - - jsonUint64Pow10 = [...]uint64{ - 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, - } - - // jsonTabs and jsonSpaces are used as caches for indents - jsonTabs, jsonSpaces string -) - -const ( - // jsonUnreadAfterDecNum controls whether we unread after decoding a number. - // - // instead of unreading, just update d.tok (iff it's not a whitespace char) - // However, doing this means that we may HOLD onto some data which belongs to another stream. - // Thus, it is safest to unread the data when done. - // keep behind a constant flag for now. - jsonUnreadAfterDecNum = true - - // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: - // - If we see first character of null, false or true, - // do not validate subsequent characters. - // - e.g. if we see a n, assume null and skip next 3 characters, - // and do not validate they are ull. - // P.S. Do not expect a significant decoding boost from this. - jsonValidateSymbols = true - - // if jsonTruncateMantissa, truncate mantissa if trailing 0's. - // This is important because it could allow some floats to be decoded without - // deferring to strconv.ParseFloat. - jsonTruncateMantissa = true - - // if mantissa >= jsonNumUintCutoff before multiplying by 10, this is an overflow - jsonNumUintCutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base) - - // if mantissa >= jsonNumUintMaxVal, this is an overflow - jsonNumUintMaxVal = 1< 1<<53 || v < -(1<<53)) { - e.w.writen1('"') - e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) - e.w.writen1('"') - return - } - e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) -} - -func (e *jsonEncDriver) EncodeUint(v uint64) { - if x := e.h.IntegerAsString; x == 'A' || x == 'L' && v > 1<<53 { - e.w.writen1('"') - e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) - e.w.writen1('"') - return - } - e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) -} - -func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { - if v := ext.ConvertExt(rv); v == nil { - e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil() - } else { - en.encode(v) - } -} - -func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { - // only encodes re.Value (never re.Data) - if re.Value == nil { - e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil() - } else { - en.encode(re.Value) - } -} - -func (e *jsonEncDriver) EncodeArrayStart(length int) { - if e.d { - e.dl++ - } - e.w.writen1('[') - e.c = containerArrayStart -} - -func (e *jsonEncDriver) EncodeMapStart(length int) { - if e.d { - e.dl++ - } - e.w.writen1('{') - e.c = containerMapStart -} - -func (e *jsonEncDriver) EncodeString(c charEncoding, v string) { - // e.w.writestr(strconv.Quote(v)) - e.quoteStr(v) -} - -func (e *jsonEncDriver) EncodeSymbol(v string) { - // e.EncodeString(c_UTF8, v) - e.quoteStr(v) -} - -func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) { - // if encoding raw bytes and RawBytesExt is configured, use it to encode - if c == c_RAW && e.se.i != nil { - e.EncodeExt(v, 0, &e.se, e.e) - return - } - if c == c_RAW { - slen := base64.StdEncoding.EncodedLen(len(v)) - if cap(e.bs) >= slen { - e.bs = e.bs[:slen] - } else { - e.bs = make([]byte, slen) - } - base64.StdEncoding.Encode(e.bs, v) - e.w.writen1('"') - e.w.writeb(e.bs) - e.w.writen1('"') - } else { - // e.EncodeString(c, string(v)) - e.quoteStr(stringView(v)) - } -} - -func (e *jsonEncDriver) EncodeAsis(v []byte) { - e.w.writeb(v) -} - -func (e *jsonEncDriver) quoteStr(s string) { - // adapted from std pkg encoding/json - const hex = "0123456789abcdef" - w := e.w - w.writen1('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - w.writestr(s[start:i]) - } - switch b { - case '\\', '"': - w.writen2('\\', b) - case '\n': - w.writen2('\\', 'n') - case '\r': - w.writen2('\\', 'r') - case '\b': - w.writen2('\\', 'b') - case '\f': - w.writen2('\\', 'f') - case '\t': - w.writen2('\\', 't') - default: - // encode all bytes < 0x20 (except \r, \n). - // also encode < > & to prevent security holes when served to some browsers. - w.writestr(`\u00`) - w.writen2(hex[b>>4], hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - w.writestr(s[start:i]) - } - w.writestr(`\ufffd`) - i += size - start = i - continue - } - // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. - // Both technically valid JSON, but bomb on JSONP, so fix here. - if c == '\u2028' || c == '\u2029' { - if start < i { - w.writestr(s[start:i]) - } - w.writestr(`\u202`) - w.writen1(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - w.writestr(s[start:]) - } - w.writen1('"') -} - -//-------------------------------- - -type jsonNum struct { - // bytes []byte // may have [+-.eE0-9] - mantissa uint64 // where mantissa ends, and maybe dot begins. - exponent int16 // exponent value. - manOverflow bool - neg bool // started with -. No initial sign in the bytes above. - dot bool // has dot - explicitExponent bool // explicit exponent -} - -func (x *jsonNum) reset() { - x.manOverflow = false - x.neg = false - x.dot = false - x.explicitExponent = false - x.mantissa = 0 - x.exponent = 0 -} - -// uintExp is called only if exponent > 0. -func (x *jsonNum) uintExp() (n uint64, overflow bool) { - n = x.mantissa - e := x.exponent - if e >= int16(len(jsonUint64Pow10)) { - overflow = true - return - } - n *= jsonUint64Pow10[e] - if n < x.mantissa || n > jsonNumUintMaxVal { - overflow = true - return - } - return - // for i := int16(0); i < e; i++ { - // if n >= jsonNumUintCutoff { - // overflow = true - // return - // } - // n *= 10 - // } - // return -} - -// these constants are only used withn floatVal. -// They are brought out, so that floatVal can be inlined. -const ( - jsonUint64MantissaBits = 52 - jsonMaxExponent = int16(len(jsonFloat64Pow10)) - 1 -) - -func (x *jsonNum) floatVal() (f float64, parseUsingStrConv bool) { - // We do not want to lose precision. - // Consequently, we will delegate to strconv.ParseFloat if any of the following happen: - // - There are more digits than in math.MaxUint64: 18446744073709551615 (20 digits) - // We expect up to 99.... (19 digits) - // - The mantissa cannot fit into a 52 bits of uint64 - // - The exponent is beyond our scope ie beyong 22. - parseUsingStrConv = x.manOverflow || - x.exponent > jsonMaxExponent || - (x.exponent < 0 && -(x.exponent) > jsonMaxExponent) || - x.mantissa>>jsonUint64MantissaBits != 0 - - if parseUsingStrConv { - return - } - - // all good. so handle parse here. - f = float64(x.mantissa) - // fmt.Printf(".Float: uint64 value: %v, float: %v\n", m, f) - if x.neg { - f = -f - } - if x.exponent > 0 { - f *= jsonFloat64Pow10[x.exponent] - } else if x.exponent < 0 { - f /= jsonFloat64Pow10[-x.exponent] - } - return -} - -type jsonDecDriver struct { - noBuiltInTypes - d *Decoder - h *JsonHandle - r decReader - - c containerState - // tok is used to store the token read right after skipWhiteSpace. - tok uint8 - - bstr [8]byte // scratch used for string \UXXX parsing - b [64]byte // scratch, used for parsing strings or numbers - b2 [64]byte // scratch, used only for decodeBytes (after base64) - bs []byte // scratch. Initialized from b. Used for parsing strings or numbers. - - se setExtWrapper - - n jsonNum -} - -func jsonIsWS(b byte) bool { - return b == ' ' || b == '\t' || b == '\r' || b == '\n' -} - -// // This will skip whitespace characters and return the next byte to read. -// // The next byte determines what the value will be one of. -// func (d *jsonDecDriver) skipWhitespace() { -// // fast-path: do not enter loop. Just check first (in case no whitespace). -// b := d.r.readn1() -// if jsonIsWS(b) { -// r := d.r -// for b = r.readn1(); jsonIsWS(b); b = r.readn1() { -// } -// } -// d.tok = b -// } - -func (d *jsonDecDriver) uncacheRead() { - if d.tok != 0 { - d.r.unreadn1() - d.tok = 0 - } -} - -func (d *jsonDecDriver) sendContainerState(c containerState) { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - var xc uint8 // char expected - if c == containerMapKey { - if d.c != containerMapStart { - xc = ',' - } - } else if c == containerMapValue { - xc = ':' - } else if c == containerMapEnd { - xc = '}' - } else if c == containerArrayElem { - if d.c != containerArrayStart { - xc = ',' - } - } else if c == containerArrayEnd { - xc = ']' - } - if xc != 0 { - if d.tok != xc { - d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - } - d.c = c -} - -func (d *jsonDecDriver) CheckBreak() bool { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok == '}' || d.tok == ']' { - // d.tok = 0 // only checking, not consuming - return true - } - return false -} - -func (d *jsonDecDriver) readStrIdx(fromIdx, toIdx uint8) { - bs := d.r.readx(int(toIdx - fromIdx)) - d.tok = 0 - if jsonValidateSymbols { - if !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) { - d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs) - return - } - } -} - -func (d *jsonDecDriver) TryDecodeAsNil() bool { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok == 'n' { - d.readStrIdx(10, 13) // ull - return true - } - return false -} - -func (d *jsonDecDriver) DecodeBool() bool { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok == 'f' { - d.readStrIdx(5, 9) // alse - return false - } - if d.tok == 't' { - d.readStrIdx(1, 4) // rue - return true - } - d.d.errorf("json: decode bool: got first char %c", d.tok) - return false // "unreachable" -} - -func (d *jsonDecDriver) ReadMapStart() int { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok != '{' { - d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok) - } - d.tok = 0 - d.c = containerMapStart - return -1 -} - -func (d *jsonDecDriver) ReadArrayStart() int { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok != '[' { - d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok) - } - d.tok = 0 - d.c = containerArrayStart - return -1 -} - -func (d *jsonDecDriver) ContainerType() (vt valueType) { - // check container type by checking the first char - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if b := d.tok; b == '{' { - return valueTypeMap - } else if b == '[' { - return valueTypeArray - } else if b == 'n' { - return valueTypeNil - } else if b == '"' { - return valueTypeString - } - return valueTypeUnset - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - // return false // "unreachable" -} - -func (d *jsonDecDriver) decNum(storeBytes bool) { - // If it is has a . or an e|E, decode as a float; else decode as an int. - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - b := d.tok - var str bool - if b == '"' { - str = true - b = d.r.readn1() - } - if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) { - d.d.errorf("json: decNum: got first char '%c'", b) - return - } - d.tok = 0 - - const cutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base) - const jsonNumUintMaxVal = 1<= jsonNumUintCutoff { - n.manOverflow = true - break - } - v := uint64(b - '0') - n.mantissa *= 10 - if v != 0 { - n1 := n.mantissa + v - if n1 < n.mantissa || n1 > jsonNumUintMaxVal { - n.manOverflow = true // n+v overflows - break - } - n.mantissa = n1 - } - case 6: - state = 7 - fallthrough - case 7: - if !(b == '0' && e == 0) { - e = e*10 + int16(b-'0') - } - default: - break LOOP - } - case '"': - if str { - if storeBytes { - d.bs = append(d.bs, '"') - } - b, eof = r.readn1eof() - } - break LOOP - default: - break LOOP - } - if storeBytes { - d.bs = append(d.bs, b) - } - b, eof = r.readn1eof() - } - - if jsonTruncateMantissa && n.mantissa != 0 { - for n.mantissa%10 == 0 { - n.mantissa /= 10 - n.exponent++ - } - } - - if e != 0 { - if eNeg { - n.exponent -= e - } else { - n.exponent += e - } - } - - // d.n = n - - if !eof { - if jsonUnreadAfterDecNum { - r.unreadn1() - } else { - if !jsonIsWS(b) { - d.tok = b - } - } - } - // fmt.Printf("1: n: bytes: %s, neg: %v, dot: %v, exponent: %v, mantissaEndIndex: %v\n", - // n.bytes, n.neg, n.dot, n.exponent, n.mantissaEndIndex) - return -} - -func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) { - d.decNum(false) - n := &d.n - if n.manOverflow { - d.d.errorf("json: overflow integer after: %v", n.mantissa) - return - } - var u uint64 - if n.exponent == 0 { - u = n.mantissa - } else if n.exponent < 0 { - d.d.errorf("json: fractional integer") - return - } else if n.exponent > 0 { - var overflow bool - if u, overflow = n.uintExp(); overflow { - d.d.errorf("json: overflow integer") - return - } - } - i = int64(u) - if n.neg { - i = -i - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs) - return - } - // fmt.Printf("DecodeInt: %v\n", i) - return -} - -// floatVal MUST only be called after a decNum, as d.bs now contains the bytes of the number -func (d *jsonDecDriver) floatVal() (f float64) { - f, useStrConv := d.n.floatVal() - if useStrConv { - var err error - if f, err = strconv.ParseFloat(stringView(d.bs), 64); err != nil { - panic(fmt.Errorf("parse float: %s, %v", d.bs, err)) - } - if d.n.neg { - f = -f - } - } - return -} - -func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) { - d.decNum(false) - n := &d.n - if n.neg { - d.d.errorf("json: unsigned integer cannot be negative") - return - } - if n.manOverflow { - d.d.errorf("json: overflow integer after: %v", n.mantissa) - return - } - if n.exponent == 0 { - u = n.mantissa - } else if n.exponent < 0 { - d.d.errorf("json: fractional integer") - return - } else if n.exponent > 0 { - var overflow bool - if u, overflow = n.uintExp(); overflow { - d.d.errorf("json: overflow integer") - return - } - } - if chkOvf.Uint(u, bitsize) { - d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs) - return - } - // fmt.Printf("DecodeUint: %v\n", u) - return -} - -func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - d.decNum(true) - f = d.floatVal() - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("json: overflow float32: %v, %s", f, d.bs) - return - } - return -} - -func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if ext == nil { - re := rv.(*RawExt) - re.Tag = xtag - d.d.decode(&re.Value) - } else { - var v interface{} - d.d.decode(&v) - ext.UpdateExt(rv, v) - } - return -} - -func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. - if !isstring && d.se.i != nil { - bsOut = bs - d.DecodeExt(&bsOut, 0, &d.se) - return - } - d.appendStringAsBytes() - // if isstring, then just return the bytes, even if it is using the scratch buffer. - // the bytes will be converted to a string as needed. - if isstring { - return d.bs - } - // if appendStringAsBytes returned a zero-len slice, then treat as nil. - // This should only happen for null, and "". - if len(d.bs) == 0 { - return nil - } - bs0 := d.bs - slen := base64.StdEncoding.DecodedLen(len(bs0)) - if slen <= cap(bs) { - bsOut = bs[:slen] - } else if zerocopy && slen <= cap(d.b2) { - bsOut = d.b2[:slen] - } else { - bsOut = make([]byte, slen) - } - slen2, err := base64.StdEncoding.Decode(bsOut, bs0) - if err != nil { - d.d.errorf("json: error decoding base64 binary '%s': %v", bs0, err) - return nil - } - if slen != slen2 { - bsOut = bsOut[:slen2] - } - return -} - -func (d *jsonDecDriver) DecodeString() (s string) { - d.appendStringAsBytes() - // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key - if d.c == containerMapKey { - return d.d.string(d.bs) - } - return string(d.bs) -} - -func (d *jsonDecDriver) appendStringAsBytes() { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - - // handle null as a string - if d.tok == 'n' { - d.readStrIdx(10, 13) // ull - d.bs = d.bs[:0] - return - } - - if d.tok != '"' { - d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok) - } - d.tok = 0 - - v := d.bs[:0] - var c uint8 - r := d.r - for { - c = r.readn1() - if c == '"' { - break - } else if c == '\\' { - c = r.readn1() - switch c { - case '"', '\\', '/', '\'': - v = append(v, c) - case 'b': - v = append(v, '\b') - case 'f': - v = append(v, '\f') - case 'n': - v = append(v, '\n') - case 'r': - v = append(v, '\r') - case 't': - v = append(v, '\t') - case 'u': - rr := d.jsonU4(false) - // fmt.Printf("$$$$$$$$$: is surrogate: %v\n", utf16.IsSurrogate(rr)) - if utf16.IsSurrogate(rr) { - rr = utf16.DecodeRune(rr, d.jsonU4(true)) - } - w2 := utf8.EncodeRune(d.bstr[:], rr) - v = append(v, d.bstr[:w2]...) - default: - d.d.errorf("json: unsupported escaped value: %c", c) - } - } else { - v = append(v, c) - } - } - d.bs = v -} - -func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune { - r := d.r - if checkSlashU && !(r.readn1() == '\\' && r.readn1() == 'u') { - d.d.errorf(`json: unquoteStr: invalid unicode sequence. Expecting \u`) - return 0 - } - // u, _ := strconv.ParseUint(string(d.bstr[:4]), 16, 64) - var u uint32 - for i := 0; i < 4; i++ { - v := r.readn1() - if '0' <= v && v <= '9' { - v = v - '0' - } else if 'a' <= v && v <= 'z' { - v = v - 'a' + 10 - } else if 'A' <= v && v <= 'Z' { - v = v - 'A' + 10 - } else { - d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, v) - return 0 - } - u = u*16 + uint32(v) - } - return rune(u) -} - -func (d *jsonDecDriver) DecodeNaked() { - z := &d.d.n - // var decodeFurther bool - - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - switch d.tok { - case 'n': - d.readStrIdx(10, 13) // ull - z.v = valueTypeNil - case 'f': - d.readStrIdx(5, 9) // alse - z.v = valueTypeBool - z.b = false - case 't': - d.readStrIdx(1, 4) // rue - z.v = valueTypeBool - z.b = true - case '{': - z.v = valueTypeMap - // d.tok = 0 // don't consume. kInterfaceNaked will call ReadMapStart - // decodeFurther = true - case '[': - z.v = valueTypeArray - // d.tok = 0 // don't consume. kInterfaceNaked will call ReadArrayStart - // decodeFurther = true - case '"': - z.v = valueTypeString - z.s = d.DecodeString() - default: // number - d.decNum(true) - n := &d.n - // if the string had a any of [.eE], then decode as float. - switch { - case n.explicitExponent, n.dot, n.exponent < 0, n.manOverflow: - z.v = valueTypeFloat - z.f = d.floatVal() - case n.exponent == 0: - u := n.mantissa - switch { - case n.neg: - z.v = valueTypeInt - z.i = -int64(u) - case d.h.SignedInteger: - z.v = valueTypeInt - z.i = int64(u) - default: - z.v = valueTypeUint - z.u = u - } - default: - u, overflow := n.uintExp() - switch { - case overflow: - z.v = valueTypeFloat - z.f = d.floatVal() - case n.neg: - z.v = valueTypeInt - z.i = -int64(u) - case d.h.SignedInteger: - z.v = valueTypeInt - z.i = int64(u) - default: - z.v = valueTypeUint - z.u = u - } - } - // fmt.Printf("DecodeNaked: Number: %T, %v\n", v, v) - } - // if decodeFurther { - // d.s.sc.retryRead() - // } - return -} - -//---------------------- - -// JsonHandle is a handle for JSON encoding format. -// -// Json is comprehensively supported: -// - decodes numbers into interface{} as int, uint or float64 -// - configurable way to encode/decode []byte . -// by default, encodes and decodes []byte using base64 Std Encoding -// - UTF-8 support for encoding and decoding -// -// It has better performance than the json library in the standard library, -// by leveraging the performance improvements of the codec library and -// minimizing allocations. -// -// In addition, it doesn't read more bytes than necessary during a decode, which allows -// reading multiple values from a stream containing json and non-json content. -// For example, a user can read a json value, then a cbor value, then a msgpack value, -// all from the same stream in sequence. -type JsonHandle struct { - textEncodingType - BasicHandle - // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. - // If not configured, raw bytes are encoded to/from base64 text. - RawBytesExt InterfaceExt - - // Indent indicates how a value is encoded. - // - If positive, indent by that number of spaces. - // - If negative, indent by that number of tabs. - Indent int8 - - // IntegerAsString controls how integers (signed and unsigned) are encoded. - // - // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. - // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. - // This can be mitigated by configuring how to encode integers. - // - // IntegerAsString interpretes the following values: - // - if 'L', then encode integers > 2^53 as a json string. - // - if 'A', then encode all integers as a json string - // containing the exact integer representation as a decimal. - // - else encode all integers as a json number (default) - IntegerAsString uint8 -} - -func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{i: ext}) -} - -func (h *JsonHandle) newEncDriver(e *Encoder) encDriver { - hd := jsonEncDriver{e: e, h: h} - hd.bs = hd.b[:0] - - hd.reset() - - return &hd -} - -func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { - // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} - hd := jsonDecDriver{d: d, h: h} - hd.bs = hd.b[:0] - hd.reset() - return &hd -} - -func (e *jsonEncDriver) reset() { - e.w = e.e.w - e.se.i = e.h.RawBytesExt - if e.bs != nil { - e.bs = e.bs[:0] - } - e.d, e.dt, e.dl, e.ds = false, false, 0, "" - e.c = 0 - if e.h.Indent > 0 { - e.d = true - e.ds = jsonSpaces[:e.h.Indent] - } else if e.h.Indent < 0 { - e.d = true - e.dt = true - e.ds = jsonTabs[:-(e.h.Indent)] - } -} - -func (d *jsonDecDriver) reset() { - d.r = d.d.r - d.se.i = d.h.RawBytesExt - if d.bs != nil { - d.bs = d.bs[:0] - } - d.c, d.tok = 0, 0 - d.n.reset() -} - -var jsonEncodeTerminate = []byte{' '} - -func (h *JsonHandle) rpcEncodeTerminate() []byte { - return jsonEncodeTerminate -} - -var _ decDriver = (*jsonDecDriver)(nil) -var _ encDriver = (*jsonEncDriver)(nil) diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/msgpack.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/msgpack.go deleted file mode 100644 index e79830b562ad..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/msgpack.go +++ /dev/null @@ -1,852 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -/* -MSGPACK - -Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. -We need to maintain compatibility with it and how it encodes integer values -without caring about the type. - -For compatibility with behaviour of msgpack-c reference implementation: - - Go intX (>0) and uintX - IS ENCODED AS - msgpack +ve fixnum, unsigned - - Go intX (<0) - IS ENCODED AS - msgpack -ve fixnum, signed - -*/ -package codec - -import ( - "fmt" - "io" - "math" - "net/rpc" - "reflect" -) - -const ( - mpPosFixNumMin byte = 0x00 - mpPosFixNumMax = 0x7f - mpFixMapMin = 0x80 - mpFixMapMax = 0x8f - mpFixArrayMin = 0x90 - mpFixArrayMax = 0x9f - mpFixStrMin = 0xa0 - mpFixStrMax = 0xbf - mpNil = 0xc0 - _ = 0xc1 - mpFalse = 0xc2 - mpTrue = 0xc3 - mpFloat = 0xca - mpDouble = 0xcb - mpUint8 = 0xcc - mpUint16 = 0xcd - mpUint32 = 0xce - mpUint64 = 0xcf - mpInt8 = 0xd0 - mpInt16 = 0xd1 - mpInt32 = 0xd2 - mpInt64 = 0xd3 - - // extensions below - mpBin8 = 0xc4 - mpBin16 = 0xc5 - mpBin32 = 0xc6 - mpExt8 = 0xc7 - mpExt16 = 0xc8 - mpExt32 = 0xc9 - mpFixExt1 = 0xd4 - mpFixExt2 = 0xd5 - mpFixExt4 = 0xd6 - mpFixExt8 = 0xd7 - mpFixExt16 = 0xd8 - - mpStr8 = 0xd9 // new - mpStr16 = 0xda - mpStr32 = 0xdb - - mpArray16 = 0xdc - mpArray32 = 0xdd - - mpMap16 = 0xde - mpMap32 = 0xdf - - mpNegFixNumMin = 0xe0 - mpNegFixNumMax = 0xff -) - -// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec -// that the backend RPC service takes multiple arguments, which have been arranged -// in sequence in the slice. -// -// The Codec then passes it AS-IS to the rpc service (without wrapping it in an -// array of 1 element). -type MsgpackSpecRpcMultiArgs []interface{} - -// A MsgpackContainer type specifies the different types of msgpackContainers. -type msgpackContainerType struct { - fixCutoff int - bFixMin, b8, b16, b32 byte - hasFixMin, has8, has8Always bool -} - -var ( - msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} - msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} - msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} - msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} -) - -//--------------------------------------------- - -type msgpackEncDriver struct { - noBuiltInTypes - encNoSeparator - e *Encoder - w encWriter - h *MsgpackHandle - x [8]byte -} - -func (e *msgpackEncDriver) EncodeNil() { - e.w.writen1(mpNil) -} - -func (e *msgpackEncDriver) EncodeInt(i int64) { - if i >= 0 { - e.EncodeUint(uint64(i)) - } else if i >= -32 { - e.w.writen1(byte(i)) - } else if i >= math.MinInt8 { - e.w.writen2(mpInt8, byte(i)) - } else if i >= math.MinInt16 { - e.w.writen1(mpInt16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) - } else if i >= math.MinInt32 { - e.w.writen1(mpInt32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) - } else { - e.w.writen1(mpInt64) - bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) - } -} - -func (e *msgpackEncDriver) EncodeUint(i uint64) { - if i <= math.MaxInt8 { - e.w.writen1(byte(i)) - } else if i <= math.MaxUint8 { - e.w.writen2(mpUint8, byte(i)) - } else if i <= math.MaxUint16 { - e.w.writen1(mpUint16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) - } else if i <= math.MaxUint32 { - e.w.writen1(mpUint32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) - } else { - e.w.writen1(mpUint64) - bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) - } -} - -func (e *msgpackEncDriver) EncodeBool(b bool) { - if b { - e.w.writen1(mpTrue) - } else { - e.w.writen1(mpFalse) - } -} - -func (e *msgpackEncDriver) EncodeFloat32(f float32) { - e.w.writen1(mpFloat) - bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) -} - -func (e *msgpackEncDriver) EncodeFloat64(f float64) { - e.w.writen1(mpDouble) - bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) -} - -func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { - bs := ext.WriteExt(v) - if bs == nil { - e.EncodeNil() - return - } - if e.h.WriteExt { - e.encodeExtPreamble(uint8(xtag), len(bs)) - e.w.writeb(bs) - } else { - e.EncodeStringBytes(c_RAW, bs) - } -} - -func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { - e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) - e.w.writeb(re.Data) -} - -func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { - if l == 1 { - e.w.writen2(mpFixExt1, xtag) - } else if l == 2 { - e.w.writen2(mpFixExt2, xtag) - } else if l == 4 { - e.w.writen2(mpFixExt4, xtag) - } else if l == 8 { - e.w.writen2(mpFixExt8, xtag) - } else if l == 16 { - e.w.writen2(mpFixExt16, xtag) - } else if l < 256 { - e.w.writen2(mpExt8, byte(l)) - e.w.writen1(xtag) - } else if l < 65536 { - e.w.writen1(mpExt16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) - e.w.writen1(xtag) - } else { - e.w.writen1(mpExt32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) - e.w.writen1(xtag) - } -} - -func (e *msgpackEncDriver) EncodeArrayStart(length int) { - e.writeContainerLen(msgpackContainerList, length) -} - -func (e *msgpackEncDriver) EncodeMapStart(length int) { - e.writeContainerLen(msgpackContainerMap, length) -} - -func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(s)) - } else { - e.writeContainerLen(msgpackContainerStr, len(s)) - } - if len(s) > 0 { - e.w.writestr(s) - } -} - -func (e *msgpackEncDriver) EncodeSymbol(v string) { - e.EncodeString(c_UTF8, v) -} - -func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(bs)) - } else { - e.writeContainerLen(msgpackContainerStr, len(bs)) - } - if len(bs) > 0 { - e.w.writeb(bs) - } -} - -func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { - if ct.hasFixMin && l < ct.fixCutoff { - e.w.writen1(ct.bFixMin | byte(l)) - } else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) { - e.w.writen2(ct.b8, uint8(l)) - } else if l < 65536 { - e.w.writen1(ct.b16) - bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) - } else { - e.w.writen1(ct.b32) - bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) - } -} - -//--------------------------------------------- - -type msgpackDecDriver struct { - d *Decoder - r decReader // *Decoder decReader decReaderT - h *MsgpackHandle - b [scratchByteArrayLen]byte - bd byte - bdRead bool - br bool // bytes reader - noBuiltInTypes - noStreamingCodec - decNoSeparator -} - -// Note: This returns either a primitive (int, bool, etc) for non-containers, -// or a containerType, or a specific type denoting nil or extension. -// It is called when a nil interface{} is passed, leaving it up to the DecDriver -// to introspect the stream and decide how best to decode. -// It deciphers the value by looking at the stream first. -func (d *msgpackDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - bd := d.bd - n := &d.d.n - var decodeFurther bool - - switch bd { - case mpNil: - n.v = valueTypeNil - d.bdRead = false - case mpFalse: - n.v = valueTypeBool - n.b = false - case mpTrue: - n.v = valueTypeBool - n.b = true - - case mpFloat: - n.v = valueTypeFloat - n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - case mpDouble: - n.v = valueTypeFloat - n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - - case mpUint8: - n.v = valueTypeUint - n.u = uint64(d.r.readn1()) - case mpUint16: - n.v = valueTypeUint - n.u = uint64(bigen.Uint16(d.r.readx(2))) - case mpUint32: - n.v = valueTypeUint - n.u = uint64(bigen.Uint32(d.r.readx(4))) - case mpUint64: - n.v = valueTypeUint - n.u = uint64(bigen.Uint64(d.r.readx(8))) - - case mpInt8: - n.v = valueTypeInt - n.i = int64(int8(d.r.readn1())) - case mpInt16: - n.v = valueTypeInt - n.i = int64(int16(bigen.Uint16(d.r.readx(2)))) - case mpInt32: - n.v = valueTypeInt - n.i = int64(int32(bigen.Uint32(d.r.readx(4)))) - case mpInt64: - n.v = valueTypeInt - n.i = int64(int64(bigen.Uint64(d.r.readx(8)))) - - default: - switch { - case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: - // positive fixnum (always signed) - n.v = valueTypeInt - n.i = int64(int8(bd)) - case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: - // negative fixnum - n.v = valueTypeInt - n.i = int64(int8(bd)) - case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - if d.h.RawToString { - n.v = valueTypeString - n.s = d.DecodeString() - } else { - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - } - case bd == mpBin8, bd == mpBin16, bd == mpBin32: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - n.v = valueTypeArray - decodeFurther = true - case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - n.v = valueTypeMap - decodeFurther = true - case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: - n.v = valueTypeExt - clen := d.readExtLen() - n.u = uint64(d.r.readn1()) - n.l = d.r.readx(clen) - default: - d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) - } - } - if !decodeFurther { - d.bdRead = false - } - if n.v == valueTypeUint && d.h.SignedInteger { - n.v = valueTypeInt - n.i = int64(n.u) - } - return -} - -// int can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) { - if !d.bdRead { - d.readNextBd() - } - switch d.bd { - case mpUint8: - i = int64(uint64(d.r.readn1())) - case mpUint16: - i = int64(uint64(bigen.Uint16(d.r.readx(2)))) - case mpUint32: - i = int64(uint64(bigen.Uint32(d.r.readx(4)))) - case mpUint64: - i = int64(bigen.Uint64(d.r.readx(8))) - case mpInt8: - i = int64(int8(d.r.readn1())) - case mpInt16: - i = int64(int16(bigen.Uint16(d.r.readx(2)))) - case mpInt32: - i = int64(int32(bigen.Uint32(d.r.readx(4)))) - case mpInt64: - i = int64(bigen.Uint64(d.r.readx(8))) - default: - switch { - case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: - i = int64(int8(d.bd)) - case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - i = int64(int8(d.bd)) - default: - d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - return - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { - d.d.errorf("Overflow int value: %v", i) - return - } - } - d.bdRead = false - return -} - -// uint can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - if !d.bdRead { - d.readNextBd() - } - switch d.bd { - case mpUint8: - ui = uint64(d.r.readn1()) - case mpUint16: - ui = uint64(bigen.Uint16(d.r.readx(2))) - case mpUint32: - ui = uint64(bigen.Uint32(d.r.readx(4))) - case mpUint64: - ui = bigen.Uint64(d.r.readx(8)) - case mpInt8: - if i := int64(int8(d.r.readn1())); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - case mpInt16: - if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - case mpInt32: - if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - case mpInt64: - if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 { - ui = uint64(i) - } else { - d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) - return - } - default: - switch { - case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: - ui = uint64(d.bd) - case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd)) - return - default: - d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - return - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { - d.d.errorf("Overflow uint value: %v", ui) - return - } - } - d.bdRead = false - return -} - -// float can either be decoded from msgpack type: float, double or intX -func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == mpFloat { - f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - } else if d.bd == mpDouble { - f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - } else { - f = float64(d.DecodeInt(0)) - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("msgpack: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool, fixnum 0 or 1. -func (d *msgpackDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == mpFalse || d.bd == 0 { - // b = false - } else if d.bd == mpTrue || d.bd == 1 { - b = true - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - var clen int - // ignore isstring. Expect that the bytes may be found from msgpackContainerStr or msgpackContainerBin - if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { - clen = d.readContainerLen(msgpackContainerBin) - } else { - clen = d.readContainerLen(msgpackContainerStr) - } - // println("DecodeBytes: clen: ", clen) - d.bdRead = false - // bytes may be nil, so handle it. if nil, clen=-1. - if clen < 0 { - return nil - } - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, bs) -} - -func (d *msgpackDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.b[:], true, true)) -} - -func (d *msgpackDecDriver) readNextBd() { - d.bd = d.r.readn1() - d.bdRead = true -} - -func (d *msgpackDecDriver) uncacheRead() { - if d.bdRead { - d.r.unreadn1() - d.bdRead = false - } -} - -func (d *msgpackDecDriver) ContainerType() (vt valueType) { - bd := d.bd - if bd == mpNil { - return valueTypeNil - } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 || - (!d.h.RawToString && - (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) { - return valueTypeBytes - } else if d.h.RawToString && - (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) { - return valueTypeString - } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { - return valueTypeArray - } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { - return valueTypeMap - } else { - // d.d.errorf("isContainerType: unsupported parameter: %v", vt) - } - return valueTypeUnset -} - -func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == mpNil { - d.bdRead = false - v = true - } - return -} - -func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { - bd := d.bd - if bd == mpNil { - clen = -1 // to represent nil - } else if bd == ct.b8 { - clen = int(d.r.readn1()) - } else if bd == ct.b16 { - clen = int(bigen.Uint16(d.r.readx(2))) - } else if bd == ct.b32 { - clen = int(bigen.Uint32(d.r.readx(4))) - } else if (ct.bFixMin & bd) == ct.bFixMin { - clen = int(ct.bFixMin ^ bd) - } else { - d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) - return - } - d.bdRead = false - return -} - -func (d *msgpackDecDriver) ReadMapStart() int { - return d.readContainerLen(msgpackContainerMap) -} - -func (d *msgpackDecDriver) ReadArrayStart() int { - return d.readContainerLen(msgpackContainerList) -} - -func (d *msgpackDecDriver) readExtLen() (clen int) { - switch d.bd { - case mpNil: - clen = -1 // to represent nil - case mpFixExt1: - clen = 1 - case mpFixExt2: - clen = 2 - case mpFixExt4: - clen = 4 - case mpFixExt8: - clen = 8 - case mpFixExt16: - clen = 16 - case mpExt8: - clen = int(d.r.readn1()) - case mpExt16: - clen = int(bigen.Uint16(d.r.readx(2))) - case mpExt32: - clen = int(bigen.Uint32(d.r.readx(4))) - default: - d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) - return - } - return -} - -func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if xtag > 0xff { - d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) - return - } - realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag = uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) - } else { - ext.ReadExt(rv, xbs) - } - return -} - -func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - if !d.bdRead { - d.readNextBd() - } - xbd := d.bd - if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { - xbs = d.DecodeBytes(nil, false, true) - } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || - (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { - xbs = d.DecodeBytes(nil, true, true) - } else { - clen := d.readExtLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - return - } - xbs = d.r.readx(clen) - } - d.bdRead = false - return -} - -//-------------------------------------------------- - -//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. -type MsgpackHandle struct { - BasicHandle - - // RawToString controls how raw bytes are decoded into a nil interface{}. - RawToString bool - - // WriteExt flag supports encoding configured extensions with extension tags. - // It also controls whether other elements of the new spec are encoded (ie Str8). - // - // With WriteExt=false, configured extensions are serialized as raw bytes - // and Str8 is not encoded. - // - // A stream can still be decoded into a typed value, provided an appropriate value - // is provided, but the type cannot be inferred from the stream. If no appropriate - // type is provided (e.g. decoding into a nil interface{}), you get back - // a []byte or string based on the setting of RawToString. - WriteExt bool - binaryEncodingType -} - -func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{b: ext}) -} - -func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { - return &msgpackEncDriver{e: e, w: e.w, h: h} -} - -func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { - return &msgpackDecDriver{d: d, r: d.r, h: h, br: d.bytes} -} - -func (e *msgpackEncDriver) reset() { - e.w = e.e.w -} - -func (d *msgpackDecDriver) reset() { - d.r = d.d.r - d.bd, d.bdRead = 0, false -} - -//-------------------------------------------------- - -type msgpackSpecRpcCodec struct { - rpcCodec -} - -// /////////////// Spec RPC Codec /////////////////// -func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - // WriteRequest can write to both a Go service, and other services that do - // not abide by the 1 argument rule of a Go service. - // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs - var bodyArr []interface{} - if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { - bodyArr = ([]interface{})(m) - } else { - bodyArr = []interface{}{body} - } - r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} - return c.write(r2, nil, false, true) -} - -func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - var moe interface{} - if r.Error != "" { - moe = r.Error - } - if moe != nil && body != nil { - body = nil - } - r2 := []interface{}{1, uint32(r.Seq), moe, body} - return c.write(r2, nil, false, true) -} - -func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { - return c.parseCustomHeader(1, &r.Seq, &r.Error) -} - -func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { - return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) -} - -func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { - if body == nil { // read and discard - return c.read(nil) - } - bodyArr := []interface{}{body} - return c.read(&bodyArr) -} - -func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { - - if c.isClosed() { - return io.EOF - } - - // We read the response header by hand - // so that the body can be decoded on its own from the stream at a later time. - - const fia byte = 0x94 //four item array descriptor value - // Not sure why the panic of EOF is swallowed above. - // if bs1 := c.dec.r.readn1(); bs1 != fia { - // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) - // return - // } - var b byte - b, err = c.br.ReadByte() - if err != nil { - return - } - if b != fia { - err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) - return - } - - if err = c.read(&b); err != nil { - return - } - if b != expectTypeByte { - err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) - return - } - if err = c.read(msgid); err != nil { - return - } - if err = c.read(methodOrError); err != nil { - return - } - return -} - -//-------------------------------------------------- - -// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol -// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md -type msgpackSpecRpc struct{} - -// MsgpackSpecRpc implements Rpc using the communication protocol defined in -// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. -var MsgpackSpecRpc msgpackSpecRpc - -func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} -} - -func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} -} - -var _ decDriver = (*msgpackDecDriver)(nil) -var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/noop.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/noop.go deleted file mode 100644 index cfee3d084dc2..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/noop.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "math/rand" - "time" -) - -// NoopHandle returns a no-op handle. It basically does nothing. -// It is only useful for benchmarking, as it gives an idea of the -// overhead from the codec framework. -// -// LIBRARY USERS: *** DO NOT USE *** -func NoopHandle(slen int) *noopHandle { - h := noopHandle{} - h.rand = rand.New(rand.NewSource(time.Now().UnixNano())) - h.B = make([][]byte, slen) - h.S = make([]string, slen) - for i := 0; i < len(h.S); i++ { - b := make([]byte, i+1) - for j := 0; j < len(b); j++ { - b[j] = 'a' + byte(i) - } - h.B[i] = b - h.S[i] = string(b) - } - return &h -} - -// noopHandle does nothing. -// It is used to simulate the overhead of the codec framework. -type noopHandle struct { - BasicHandle - binaryEncodingType - noopDrv // noopDrv is unexported here, so we can get a copy of it when needed. -} - -type noopDrv struct { - d *Decoder - e *Encoder - i int - S []string - B [][]byte - mks []bool // stack. if map (true), else if array (false) - mk bool // top of stack. what container are we on? map or array? - ct valueType // last response for IsContainerType. - cb int // counter for ContainerType - rand *rand.Rand -} - -func (h *noopDrv) r(v int) int { return h.rand.Intn(v) } -func (h *noopDrv) m(v int) int { h.i++; return h.i % v } - -func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h } -func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h } - -func (h *noopDrv) reset() {} -func (h *noopDrv) uncacheRead() {} - -// --- encDriver - -// stack functions (for map and array) -func (h *noopDrv) start(b bool) { - // println("start", len(h.mks)+1) - h.mks = append(h.mks, b) - h.mk = b -} -func (h *noopDrv) end() { - // println("end: ", len(h.mks)-1) - h.mks = h.mks[:len(h.mks)-1] - if len(h.mks) > 0 { - h.mk = h.mks[len(h.mks)-1] - } else { - h.mk = false - } -} - -func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {} -func (h *noopDrv) EncodeNil() {} -func (h *noopDrv) EncodeInt(i int64) {} -func (h *noopDrv) EncodeUint(i uint64) {} -func (h *noopDrv) EncodeBool(b bool) {} -func (h *noopDrv) EncodeFloat32(f float32) {} -func (h *noopDrv) EncodeFloat64(f float64) {} -func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {} -func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) } -func (h *noopDrv) EncodeMapStart(length int) { h.start(false) } -func (h *noopDrv) EncodeEnd() { h.end() } - -func (h *noopDrv) EncodeString(c charEncoding, v string) {} -func (h *noopDrv) EncodeSymbol(v string) {} -func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {} - -func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {} - -// ---- decDriver -func (h *noopDrv) initReadNext() {} -func (h *noopDrv) CheckBreak() bool { return false } -func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false } -func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {} -func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) } -func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) } -func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) } -func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 } -func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] } - -// func (h *noopDrv) DecodeStringAsBytes(bs []byte) []byte { return h.DecodeBytes(bs) } - -func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] } - -func (h *noopDrv) ReadEnd() { h.end() } - -// toggle map/slice -func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) } -func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) } - -func (h *noopDrv) ContainerType() (vt valueType) { - // return h.m(2) == 0 - // handle kStruct, which will bomb is it calls this and doesn't get back a map or array. - // consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2 - // for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs) - // however, every 10th time it is called, we just return something else. - var vals = [...]valueType{valueTypeArray, valueTypeMap} - // ------------ TAKE ------------ - // if h.cb%2 == 0 { - // if h.ct == valueTypeMap || h.ct == valueTypeArray { - // } else { - // h.ct = vals[h.m(2)] - // } - // } else if h.cb%5 == 0 { - // h.ct = valueType(h.m(8)) - // } else { - // h.ct = vals[h.m(2)] - // } - // ------------ TAKE ------------ - // if h.cb%16 == 0 { - // h.ct = valueType(h.cb % 8) - // } else { - // h.ct = vals[h.cb%2] - // } - h.ct = vals[h.cb%2] - h.cb++ - return h.ct - - // if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes { - // return h.ct - // } - // return valueTypeUnset - // TODO: may need to tweak this so it works. - // if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap { - // h.cb = !h.cb - // h.ct = vt - // return h.cb - // } - // // go in a loop and check it. - // h.ct = vt - // h.cb = h.m(7) == 0 - // return h.cb -} -func (h *noopDrv) TryDecodeAsNil() bool { - if h.mk { - return false - } else { - return h.m(8) == 0 - } -} -func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 { - return 0 -} - -func (h *noopDrv) DecodeNaked() { - // use h.r (random) not h.m() because h.m() could cause the same value to be given. - var sk int - if h.mk { - // if mapkey, do not support values of nil OR bytes, array, map or rawext - sk = h.r(7) + 1 - } else { - sk = h.r(12) - } - n := &h.d.n - switch sk { - case 0: - n.v = valueTypeNil - case 1: - n.v, n.b = valueTypeBool, false - case 2: - n.v, n.b = valueTypeBool, true - case 3: - n.v, n.i = valueTypeInt, h.DecodeInt(64) - case 4: - n.v, n.u = valueTypeUint, h.DecodeUint(64) - case 5: - n.v, n.f = valueTypeFloat, h.DecodeFloat(true) - case 6: - n.v, n.f = valueTypeFloat, h.DecodeFloat(false) - case 7: - n.v, n.s = valueTypeString, h.DecodeString() - case 8: - n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))] - case 9: - n.v = valueTypeArray - case 10: - n.v = valueTypeMap - default: - n.v = valueTypeExt - n.u = h.DecodeUint(64) - n.l = h.B[h.m(len(h.B))] - } - h.ct = n.v - return -} diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/prebuild.go b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/prebuild.go deleted file mode 100644 index 2353263e88af..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/prebuild.go +++ /dev/null @@ -1,3 +0,0 @@ -package codec - -//go:generate bash prebuild.sh diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/prebuild.sh b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/prebuild.sh deleted file mode 100755 index 909f4bb0f24d..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/prebuild.sh +++ /dev/null @@ -1,199 +0,0 @@ -#!/bin/bash - -# _needgen is a helper function to tell if we need to generate files for msgp, codecgen. -_needgen() { - local a="$1" - zneedgen=0 - if [[ ! -e "$a" ]] - then - zneedgen=1 - echo 1 - return 0 - fi - for i in `ls -1 *.go.tmpl gen.go values_test.go` - do - if [[ "$a" -ot "$i" ]] - then - zneedgen=1 - echo 1 - return 0 - fi - done - echo 0 -} - -# _build generates fast-path.go and gen-helper.go. -# -# It is needed because there is some dependency between the generated code -# and the other classes. Consequently, we have to totally remove the -# generated files and put stubs in place, before calling "go run" again -# to recreate them. -_build() { - if ! [[ "${zforce}" == "1" || - "1" == $( _needgen "fast-path.generated.go" ) || - "1" == $( _needgen "gen-helper.generated.go" ) || - "1" == $( _needgen "gen.generated.go" ) || - 1 == 0 ]] - then - return 0 - fi - - # echo "Running prebuild" - if [ "${zbak}" == "1" ] - then - # echo "Backing up old generated files" - _zts=`date '+%m%d%Y_%H%M%S'` - _gg=".generated.go" - [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak - [ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak - # [ -e "safe${_gg}" ] && mv safe${_gg} safe${_gg}__${_zts}.bak - # [ -e "unsafe${_gg}" ] && mv unsafe${_gg} unsafe${_gg}__${_zts}.bak - else - rm -f fast-path.generated.go gen.generated.go gen-helper.generated.go \ - *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go - fi - - cat > gen.generated.go <> gen.generated.go < gen-dec-map.go.tmpl - - cat >> gen.generated.go <> gen.generated.go < gen-dec-array.go.tmpl - - cat >> gen.generated.go < gen-from-tmpl.codec.generated.go < gen-from-tmpl.generated.go < math.MaxInt64 { - // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui) - // return - // } - return -} - -func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) { - ui, neg := d.decCheckInteger() - i, overflow := chkOvf.SignedInt(ui) - if overflow { - d.d.errorf("simple: overflow converting %v to signed integer", ui) - return - } - if neg { - i = -i - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("simple: overflow integer: %v", i) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) { - ui, neg := d.decCheckInteger() - if neg { - d.d.errorf("Assigning negative signed value to unsigned type") - return - } - if chkOvf.Uint(ui, bitsize) { - d.d.errorf("simple: overflow integer: %v", ui) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdFloat32 { - f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) - } else if d.bd == simpleVdFloat64 { - f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) - } else { - if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { - f = float64(d.DecodeInt(64)) - } else { - d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd) - return - } - } - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("msgpack: float32 overflow: %v", f) - return - } - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *simpleDecDriver) DecodeBool() (b bool) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdTrue { - b = true - } else if d.bd == simpleVdFalse { - } else { - d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) ReadMapStart() (length int) { - d.bdRead = false - return d.decLen() -} - -func (d *simpleDecDriver) ReadArrayStart() (length int) { - d.bdRead = false - return d.decLen() -} - -func (d *simpleDecDriver) decLen() int { - switch d.bd % 8 { - case 0: - return 0 - case 1: - return int(d.r.readn1()) - case 2: - return int(bigen.Uint16(d.r.readx(2))) - case 3: - ui := uint64(bigen.Uint32(d.r.readx(4))) - if chkOvf.Uint(ui, intBitsize) { - d.d.errorf("simple: overflow integer: %v", ui) - return 0 - } - return int(ui) - case 4: - ui := bigen.Uint64(d.r.readx(8)) - if chkOvf.Uint(ui, intBitsize) { - d.d.errorf("simple: overflow integer: %v", ui) - return 0 - } - return int(ui) - } - d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) - return -1 -} - -func (d *simpleDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.b[:], true, true)) -} - -func (d *simpleDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - if !d.bdRead { - d.readNextBd() - } - if d.bd == simpleVdNil { - d.bdRead = false - return - } - clen := d.decLen() - d.bdRead = false - if zerocopy { - if d.br { - return d.r.readx(clen) - } else if len(bs) == 0 { - bs = d.b[:] - } - } - return decByteSlice(d.r, clen, bs) -} - -func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { - if xtag > 0xff { - d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) - return - } - realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) - realxtag = uint64(realxtag1) - if ext == nil { - re := rv.(*RawExt) - re.Tag = realxtag - re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) - } else { - ext.ReadExt(rv, xbs) - } - return -} - -func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - if !d.bdRead { - d.readNextBd() - } - switch d.bd { - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - l := d.decLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - return - } - xbs = d.r.readx(l) - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - xbs = d.DecodeBytes(nil, false, true) - default: - d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) - return - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) DecodeNaked() { - if !d.bdRead { - d.readNextBd() - } - - n := &d.d.n - var decodeFurther bool - - switch d.bd { - case simpleVdNil: - n.v = valueTypeNil - case simpleVdFalse: - n.v = valueTypeBool - n.b = false - case simpleVdTrue: - n.v = valueTypeBool - n.b = true - case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: - if d.h.SignedInteger { - n.v = valueTypeInt - n.i = d.DecodeInt(64) - } else { - n.v = valueTypeUint - n.u = d.DecodeUint(64) - } - case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: - n.v = valueTypeInt - n.i = d.DecodeInt(64) - case simpleVdFloat32: - n.v = valueTypeFloat - n.f = d.DecodeFloat(true) - case simpleVdFloat64: - n.v = valueTypeFloat - n.f = d.DecodeFloat(false) - case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - n.v = valueTypeString - n.s = d.DecodeString() - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - n.v = valueTypeExt - l := d.decLen() - n.u = uint64(d.r.readn1()) - n.l = d.r.readx(l) - case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: - n.v = valueTypeArray - decodeFurther = true - case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - n.v = valueTypeMap - decodeFurther = true - default: - d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) - } - - if !decodeFurther { - d.bdRead = false - } - return -} - -//------------------------------------ - -// SimpleHandle is a Handle for a very simple encoding format. -// -// simple is a simplistic codec similar to binc, but not as compact. -// - Encoding of a value is always preceded by the descriptor byte (bd) -// - True, false, nil are encoded fully in 1 byte (the descriptor) -// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). -// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. -// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) -// - Lenght of containers (strings, bytes, array, map, extensions) -// are encoded in 0, 1, 2, 4 or 8 bytes. -// Zero-length containers have no length encoded. -// For others, the number of bytes is given by pow(2, bd%3) -// - maps are encoded as [bd] [length] [[key][value]]... -// - arrays are encoded as [bd] [length] [value]... -// - extensions are encoded as [bd] [length] [tag] [byte]... -// - strings/bytearrays are encoded as [bd] [length] [byte]... -// -// The full spec will be published soon. -type SimpleHandle struct { - BasicHandle - binaryEncodingType -} - -func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { - return h.SetExt(rt, tag, &setExtWrapper{b: ext}) -} - -func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver { - return &simpleEncDriver{e: e, w: e.w, h: h} -} - -func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver { - return &simpleDecDriver{d: d, r: d.r, h: h, br: d.bytes} -} - -func (e *simpleEncDriver) reset() { - e.w = e.e.w -} - -func (d *simpleDecDriver) reset() { - d.r = d.d.r - d.bd, d.bdRead = 0, false -} - -var _ decDriver = (*simpleDecDriver)(nil) -var _ encDriver = (*simpleEncDriver)(nil) diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json deleted file mode 100644 index 9028586711e4..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json +++ /dev/null @@ -1,639 +0,0 @@ -[ - { - "cbor": "AA==", - "hex": "00", - "roundtrip": true, - "decoded": 0 - }, - { - "cbor": "AQ==", - "hex": "01", - "roundtrip": true, - "decoded": 1 - }, - { - "cbor": "Cg==", - "hex": "0a", - "roundtrip": true, - "decoded": 10 - }, - { - "cbor": "Fw==", - "hex": "17", - "roundtrip": true, - "decoded": 23 - }, - { - "cbor": "GBg=", - "hex": "1818", - "roundtrip": true, - "decoded": 24 - }, - { - "cbor": "GBk=", - "hex": "1819", - "roundtrip": true, - "decoded": 25 - }, - { - "cbor": "GGQ=", - "hex": "1864", - "roundtrip": true, - "decoded": 100 - }, - { - "cbor": "GQPo", - "hex": "1903e8", - "roundtrip": true, - "decoded": 1000 - }, - { - "cbor": "GgAPQkA=", - "hex": "1a000f4240", - "roundtrip": true, - "decoded": 1000000 - }, - { - "cbor": "GwAAAOjUpRAA", - "hex": "1b000000e8d4a51000", - "roundtrip": true, - "decoded": 1000000000000 - }, - { - "cbor": "G///////////", - "hex": "1bffffffffffffffff", - "roundtrip": true, - "decoded": 18446744073709551615 - }, - { - "cbor": "wkkBAAAAAAAAAAA=", - "hex": "c249010000000000000000", - "roundtrip": true, - "decoded": 18446744073709551616 - }, - { - "cbor": "O///////////", - "hex": "3bffffffffffffffff", - "roundtrip": true, - "decoded": -18446744073709551616, - "skip": true - }, - { - "cbor": "w0kBAAAAAAAAAAA=", - "hex": "c349010000000000000000", - "roundtrip": true, - "decoded": -18446744073709551617 - }, - { - "cbor": "IA==", - "hex": "20", - "roundtrip": true, - "decoded": -1 - }, - { - "cbor": "KQ==", - "hex": "29", - "roundtrip": true, - "decoded": -10 - }, - { - "cbor": "OGM=", - "hex": "3863", - "roundtrip": true, - "decoded": -100 - }, - { - "cbor": "OQPn", - "hex": "3903e7", - "roundtrip": true, - "decoded": -1000 - }, - { - "cbor": "+QAA", - "hex": "f90000", - "roundtrip": true, - "decoded": 0.0 - }, - { - "cbor": "+YAA", - "hex": "f98000", - "roundtrip": true, - "decoded": -0.0 - }, - { - "cbor": "+TwA", - "hex": "f93c00", - "roundtrip": true, - "decoded": 1.0 - }, - { - "cbor": "+z/xmZmZmZma", - "hex": "fb3ff199999999999a", - "roundtrip": true, - "decoded": 1.1 - }, - { - "cbor": "+T4A", - "hex": "f93e00", - "roundtrip": true, - "decoded": 1.5 - }, - { - "cbor": "+Xv/", - "hex": "f97bff", - "roundtrip": true, - "decoded": 65504.0 - }, - { - "cbor": "+kfDUAA=", - "hex": "fa47c35000", - "roundtrip": true, - "decoded": 100000.0 - }, - { - "cbor": "+n9///8=", - "hex": "fa7f7fffff", - "roundtrip": true, - "decoded": 3.4028234663852886e+38 - }, - { - "cbor": "+3435DyIAHWc", - "hex": "fb7e37e43c8800759c", - "roundtrip": true, - "decoded": 1.0e+300 - }, - { - "cbor": "+QAB", - "hex": "f90001", - "roundtrip": true, - "decoded": 5.960464477539063e-08 - }, - { - "cbor": "+QQA", - "hex": "f90400", - "roundtrip": true, - "decoded": 6.103515625e-05 - }, - { - "cbor": "+cQA", - "hex": "f9c400", - "roundtrip": true, - "decoded": -4.0 - }, - { - "cbor": "+8AQZmZmZmZm", - "hex": "fbc010666666666666", - "roundtrip": true, - "decoded": -4.1 - }, - { - "cbor": "+XwA", - "hex": "f97c00", - "roundtrip": true, - "diagnostic": "Infinity" - }, - { - "cbor": "+X4A", - "hex": "f97e00", - "roundtrip": true, - "diagnostic": "NaN" - }, - { - "cbor": "+fwA", - "hex": "f9fc00", - "roundtrip": true, - "diagnostic": "-Infinity" - }, - { - "cbor": "+n+AAAA=", - "hex": "fa7f800000", - "roundtrip": false, - "diagnostic": "Infinity" - }, - { - "cbor": "+n/AAAA=", - "hex": "fa7fc00000", - "roundtrip": false, - "diagnostic": "NaN" - }, - { - "cbor": "+v+AAAA=", - "hex": "faff800000", - "roundtrip": false, - "diagnostic": "-Infinity" - }, - { - "cbor": "+3/wAAAAAAAA", - "hex": "fb7ff0000000000000", - "roundtrip": false, - "diagnostic": "Infinity" - }, - { - "cbor": "+3/4AAAAAAAA", - "hex": "fb7ff8000000000000", - "roundtrip": false, - "diagnostic": "NaN" - }, - { - "cbor": "+//wAAAAAAAA", - "hex": "fbfff0000000000000", - "roundtrip": false, - "diagnostic": "-Infinity" - }, - { - "cbor": "9A==", - "hex": "f4", - "roundtrip": true, - "decoded": false - }, - { - "cbor": "9Q==", - "hex": "f5", - "roundtrip": true, - "decoded": true - }, - { - "cbor": "9g==", - "hex": "f6", - "roundtrip": true, - "decoded": null - }, - { - "cbor": "9w==", - "hex": "f7", - "roundtrip": true, - "diagnostic": "undefined" - }, - { - "cbor": "8A==", - "hex": "f0", - "roundtrip": true, - "diagnostic": "simple(16)" - }, - { - "cbor": "+Bg=", - "hex": "f818", - "roundtrip": true, - "diagnostic": "simple(24)" - }, - { - "cbor": "+P8=", - "hex": "f8ff", - "roundtrip": true, - "diagnostic": "simple(255)" - }, - { - "cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==", - "hex": "c074323031332d30332d32315432303a30343a30305a", - "roundtrip": true, - "diagnostic": "0(\"2013-03-21T20:04:00Z\")" - }, - { - "cbor": "wRpRS2ew", - "hex": "c11a514b67b0", - "roundtrip": true, - "diagnostic": "1(1363896240)" - }, - { - "cbor": "wftB1FLZ7CAAAA==", - "hex": "c1fb41d452d9ec200000", - "roundtrip": true, - "diagnostic": "1(1363896240.5)" - }, - { - "cbor": "10QBAgME", - "hex": "d74401020304", - "roundtrip": true, - "diagnostic": "23(h'01020304')" - }, - { - "cbor": "2BhFZElFVEY=", - "hex": "d818456449455446", - "roundtrip": true, - "diagnostic": "24(h'6449455446')" - }, - { - "cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==", - "hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d", - "roundtrip": true, - "diagnostic": "32(\"http://www.example.com\")" - }, - { - "cbor": "QA==", - "hex": "40", - "roundtrip": true, - "diagnostic": "h''" - }, - { - "cbor": "RAECAwQ=", - "hex": "4401020304", - "roundtrip": true, - "diagnostic": "h'01020304'" - }, - { - "cbor": "YA==", - "hex": "60", - "roundtrip": true, - "decoded": "" - }, - { - "cbor": "YWE=", - "hex": "6161", - "roundtrip": true, - "decoded": "a" - }, - { - "cbor": "ZElFVEY=", - "hex": "6449455446", - "roundtrip": true, - "decoded": "IETF" - }, - { - "cbor": "YiJc", - "hex": "62225c", - "roundtrip": true, - "decoded": "\"\\" - }, - { - "cbor": "YsO8", - "hex": "62c3bc", - "roundtrip": true, - "decoded": "ü" - }, - { - "cbor": "Y+awtA==", - "hex": "63e6b0b4", - "roundtrip": true, - "decoded": "水" - }, - { - "cbor": "ZPCQhZE=", - "hex": "64f0908591", - "roundtrip": true, - "decoded": "𐅑" - }, - { - "cbor": "gA==", - "hex": "80", - "roundtrip": true, - "decoded": [ - - ] - }, - { - "cbor": "gwECAw==", - "hex": "83010203", - "roundtrip": true, - "decoded": [ - 1, - 2, - 3 - ] - }, - { - "cbor": "gwGCAgOCBAU=", - "hex": "8301820203820405", - "roundtrip": true, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=", - "hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819", - "roundtrip": true, - "decoded": [ - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25 - ] - }, - { - "cbor": "oA==", - "hex": "a0", - "roundtrip": true, - "decoded": { - } - }, - { - "cbor": "ogECAwQ=", - "hex": "a201020304", - "roundtrip": true, - "skip": true, - "diagnostic": "{1: 2, 3: 4}" - }, - { - "cbor": "omFhAWFiggID", - "hex": "a26161016162820203", - "roundtrip": true, - "decoded": { - "a": 1, - "b": [ - 2, - 3 - ] - } - }, - { - "cbor": "gmFhoWFiYWM=", - "hex": "826161a161626163", - "roundtrip": true, - "decoded": [ - "a", - { - "b": "c" - } - ] - }, - { - "cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF", - "hex": "a56161614161626142616361436164614461656145", - "roundtrip": true, - "decoded": { - "a": "A", - "b": "B", - "c": "C", - "d": "D", - "e": "E" - } - }, - { - "cbor": "X0IBAkMDBAX/", - "hex": "5f42010243030405ff", - "roundtrip": false, - "skip": true, - "diagnostic": "(_ h'0102', h'030405')" - }, - { - "cbor": "f2VzdHJlYWRtaW5n/w==", - "hex": "7f657374726561646d696e67ff", - "roundtrip": false, - "decoded": "streaming" - }, - { - "cbor": "n/8=", - "hex": "9fff", - "roundtrip": false, - "decoded": [ - - ] - }, - { - "cbor": "nwGCAgOfBAX//w==", - "hex": "9f018202039f0405ffff", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "nwGCAgOCBAX/", - "hex": "9f01820203820405ff", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "gwGCAgOfBAX/", - "hex": "83018202039f0405ff", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "gwGfAgP/ggQF", - "hex": "83019f0203ff820405", - "roundtrip": false, - "decoded": [ - 1, - [ - 2, - 3 - ], - [ - 4, - 5 - ] - ] - }, - { - "cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=", - "hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff", - "roundtrip": false, - "decoded": [ - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25 - ] - }, - { - "cbor": "v2FhAWFinwID//8=", - "hex": "bf61610161629f0203ffff", - "roundtrip": false, - "decoded": { - "a": 1, - "b": [ - 2, - 3 - ] - } - }, - { - "cbor": "gmFhv2FiYWP/", - "hex": "826161bf61626163ff", - "roundtrip": false, - "decoded": [ - "a", - { - "b": "c" - } - ] - }, - { - "cbor": "v2NGdW71Y0FtdCH/", - "hex": "bf6346756ef563416d7421ff", - "roundtrip": false, - "decoded": { - "Fun": true, - "Amt": -2 - } - } -] diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/test.py b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/test.py deleted file mode 100755 index c0ad20b34cb0..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/test.py +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/env python - -# This will create golden files in a directory passed to it. -# A Test calls this internally to create the golden files -# So it can process them (so we don't have to checkin the files). - -# Ensure msgpack-python and cbor are installed first, using: -# sudo apt-get install python-dev -# sudo apt-get install python-pip -# pip install --user msgpack-python msgpack-rpc-python cbor - -# Ensure all "string" keys are utf strings (else encoded as bytes) - -import cbor, msgpack, msgpackrpc, sys, os, threading - -def get_test_data_list(): - # get list with all primitive types, and a combo type - l0 = [ - -8, - -1616, - -32323232, - -6464646464646464, - 192, - 1616, - 32323232, - 6464646464646464, - 192, - -3232.0, - -6464646464.0, - 3232.0, - 6464.0, - 6464646464.0, - False, - True, - u"null", - None, - u"someday", - 1328176922000002000, - u"", - -2206187877999998000, - u"bytestring", - 270, - u"none", - -2013855847999995777, - #-6795364578871345152, - ] - l1 = [ - { "true": True, - "false": False }, - { "true": u"True", - "false": False, - "uint16(1616)": 1616 }, - { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], - "int32":32323232, "bool": True, - "LONG STRING": u"123456789012345678901234567890123456789012345678901234567890", - "SHORT STRING": u"1234567890" }, - { True: "true", 138: False, "false": 200 } - ] - - l = [] - l.extend(l0) - l.append(l0) - l.append(1) - l.extend(l1) - return l - -def build_test_data(destdir): - l = get_test_data_list() - for i in range(len(l)): - # packer = msgpack.Packer() - serialized = msgpack.dumps(l[i]) - f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb') - f.write(serialized) - f.close() - serialized = cbor.dumps(l[i]) - f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb') - f.write(serialized) - f.close() - -def doRpcServer(port, stopTimeSec): - class EchoHandler(object): - def Echo123(self, msg1, msg2, msg3): - return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3)) - def EchoStruct(self, msg): - return ("%s" % msg) - - addr = msgpackrpc.Address('localhost', port) - server = msgpackrpc.Server(EchoHandler()) - server.listen(addr) - # run thread to stop it after stopTimeSec seconds if > 0 - if stopTimeSec > 0: - def myStopRpcServer(): - server.stop() - t = threading.Timer(stopTimeSec, myStopRpcServer) - t.start() - server.start() - -def doRpcClientToPythonSvc(port): - address = msgpackrpc.Address('localhost', port) - client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("Echo123", "A1", "B2", "C3") - print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) - -def doRpcClientToGoSvc(port): - # print ">>>> port: ", port, " <<<<<" - address = msgpackrpc.Address('localhost', port) - client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) - print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) - -def doMain(args): - if len(args) == 2 and args[0] == "testdata": - build_test_data(args[1]) - elif len(args) == 3 and args[0] == "rpc-server": - doRpcServer(int(args[1]), int(args[2])) - elif len(args) == 2 and args[0] == "rpc-client-python-service": - doRpcClientToPythonSvc(int(args[1])) - elif len(args) == 2 and args[0] == "rpc-client-go-service": - doRpcClientToGoSvc(int(args[1])) - else: - print("Usage: test.py " + - "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") - -if __name__ == "__main__": - doMain(sys.argv[1:]) - diff --git a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/tests.sh b/cluster-autoscaler/vendor/github.com/ugorji/go/codec/tests.sh deleted file mode 100755 index 342f336dfe1a..000000000000 --- a/cluster-autoscaler/vendor/github.com/ugorji/go/codec/tests.sh +++ /dev/null @@ -1,102 +0,0 @@ -#!/bin/bash - -# Run all the different permutations of all the tests. -# This helps ensure that nothing gets broken. - -_run() { - # 1. VARIATIONS: regular (t), canonical (c), IO R/W (i), - # binc-nosymbols (n), struct2array (s), intern string (e), - # json-indent (d), circular (l) - # 2. MODE: reflection (r), external (x), codecgen (g), unsafe (u), notfastpath (f) - # 3. OPTIONS: verbose (v), reset (z), must (m), - # - # Use combinations of mode to get exactly what you want, - # and then pass the variations you need. - - ztags="" - zargs="" - local OPTIND - OPTIND=1 - while getopts "_xurtcinsvgzmefdl" flag - do - case "x$flag" in - 'xr') ;; - 'xf') ztags="$ztags notfastpath" ;; - 'xg') ztags="$ztags codecgen" ;; - 'xx') ztags="$ztags x" ;; - 'xu') ztags="$ztags unsafe" ;; - 'xv') zargs="$zargs -tv" ;; - 'xz') zargs="$zargs -tr" ;; - 'xm') zargs="$zargs -tm" ;; - 'xl') zargs="$zargs -tl" ;; - *) ;; - esac - done - # shift $((OPTIND-1)) - printf '............. TAGS: %s .............\n' "$ztags" - # echo ">>>>>>> TAGS: $ztags" - - OPTIND=1 - while getopts "_xurtcinsvgzmefdl" flag - do - case "x$flag" in - 'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" $zargs ; sleep 2 ;; - 'xc') printf ">>>>>>> CANONICAL : "; go test "-tags=$ztags" $zargs -tc; sleep 2 ;; - 'xi') printf ">>>>>>> I/O : "; go test "-tags=$ztags" $zargs -ti; sleep 2 ;; - 'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" -run=Binc $zargs -tn; sleep 2 ;; - 'xs') printf ">>>>>>> TO_ARRAY : "; go test "-tags=$ztags" $zargs -ts; sleep 2 ;; - 'xe') printf ">>>>>>> INTERN : "; go test "-tags=$ztags" $zargs -te; sleep 2 ;; - 'xd') printf ">>>>>>> INDENT : "; - go test "-tags=$ztags" -run=JsonCodecsTable -td=-1 $zargs; - go test "-tags=$ztags" -run=JsonCodecsTable -td=8 $zargs; - sleep 2 ;; - *) ;; - esac - done - shift $((OPTIND-1)) - - OPTIND=1 -} - -# echo ">>>>>>> RUNNING VARIATIONS OF TESTS" -if [[ "x$@" = "x" || "x$@" = "x-A" ]]; then - # All: r, x, g, gu - _run "-_tcinsed_ml" # regular - _run "-_tcinsed_ml_z" # regular with reset - _run "-_tcinsed_ml_f" # regular with no fastpath (notfastpath) - _run "-x_tcinsed_ml" # external - _run "-gx_tcinsed_ml" # codecgen: requires external - _run "-gxu_tcinsed_ml" # codecgen + unsafe -elif [[ "x$@" = "x-Z" ]]; then - # Regular - _run "-_tcinsed_ml" # regular - _run "-_tcinsed_ml_z" # regular with reset -elif [[ "x$@" = "x-F" ]]; then - # regular with notfastpath - _run "-_tcinsed_ml_f" # regular - _run "-_tcinsed_ml_zf" # regular with reset -elif [[ "x$@" = "x-C" ]]; then - # codecgen - _run "-gx_tcinsed_ml" # codecgen: requires external - _run "-gxu_tcinsed_ml" # codecgen + unsafe -elif [[ "x$@" = "x-X" ]]; then - # external - _run "-x_tcinsed_ml" # external -elif [[ "x$@" = "x-h" || "x$@" = "x-?" ]]; then - cat <= 0) - bd = bd | (byte(7-f) << 2) - copy(bs[i:], btmp[f:]) - i = i + (8 - f) - } - if tnsecs != 0 { - bd = bd | 0x40 - bigen.PutUint32(btmp[:4], uint32(tnsecs)) - f := pruneSignExt(btmp[:4], true) - bd = bd | byte(3-f) - copy(bs[i:], btmp[f:4]) - i = i + (4 - f) - } - if l != nil { - bd = bd | 0x20 - // Note that Go Libs do not give access to dst flag. - _, zoneOffset := t.Zone() - //zoneName, zoneOffset := t.Zone() - zoneOffset /= 60 - z := uint16(zoneOffset) - bigen.PutUint16(btmp[:2], z) - // clear dst flags - bs[i] = btmp[0] & 0x3f - bs[i+1] = btmp[1] - i = i + 2 - } - bs[0] = bd - return bs[0:i] -} - -// DecodeTime decodes a []byte into a time.Time. -func decodeTime(bs []byte) (tt time.Time, err error) { - bd := bs[0] - var ( - tsec int64 - tnsec uint32 - tz uint16 - i byte = 1 - i2 byte - n byte - ) - if bd&(1<<7) != 0 { - var btmp [8]byte - n = ((bd >> 2) & 0x7) + 1 - i2 = i + n - copy(btmp[8-n:], bs[i:i2]) - //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) - if bs[i]&(1<<7) != 0 { - copy(btmp[0:8-n], bsAll0xff) - //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } - } - i = i2 - tsec = int64(bigen.Uint64(btmp[:])) - } - if bd&(1<<6) != 0 { - var btmp [4]byte - n = (bd & 0x3) + 1 - i2 = i + n - copy(btmp[4-n:], bs[i:i2]) - i = i2 - tnsec = bigen.Uint32(btmp[:]) - } - if bd&(1<<5) == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - return - } - // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. - // However, we need name here, so it can be shown when time is printed. - // Zone name is in form: UTC-08:00. - // Note that Go Libs do not give access to dst flag, so we ignore dst bits - - i2 = i + 2 - tz = bigen.Uint16(bs[i:i2]) - i = i2 - // sign extend sign bit into top 2 MSB (which were dst bits): - if tz&(1<<13) == 0 { // positive - tz = tz & 0x3fff //clear 2 MSBs: dst bits - } else { // negative - tz = tz | 0xc000 //set 2 MSBs: dst bits - //tzname[3] = '-' (TODO: verify. this works here) - } - tzint := int16(tz) - if tzint == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - } else { - // For Go Time, do not use a descriptive timezone. - // It's unnecessary, and makes it harder to do a reflect.DeepEqual. - // The Offset already tells what the offset should be, if not on UTC and unknown zone name. - // var zoneName = timeLocUTCName(tzint) - tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) - } - return -} - -func timeLocUTCName(tzint int16) string { - if tzint == 0 { - return "UTC" - } - var tzname = []byte("UTC+00:00") - //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. - //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first - var tzhr, tzmin int16 - if tzint < 0 { - tzname[3] = '-' // (TODO: verify. this works here) - tzhr, tzmin = -tzint/60, (-tzint)%60 - } else { - tzhr, tzmin = tzint/60, tzint%60 - } - tzname[4] = timeDigits[tzhr/10] - tzname[5] = timeDigits[tzhr%10] - tzname[7] = timeDigits[tzmin/10] - tzname[8] = timeDigits[tzmin%10] - return string(tzname) - //return time.FixedZone(string(tzname), int(tzint)*60) -} diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/Makefile b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/Makefile index a0e68e7a9aaa..6c8413b13a52 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/Makefile +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/Makefile @@ -3,8 +3,7 @@ DIRS := \ nl DEPS = \ - github.com/vishvananda/netns \ - golang.org/x/sys/unix + github.com/vishvananda/netns uniq = $(if $1,$(firstword $1) $(call uniq,$(filter-out $(firstword $1),$1))) testdirs = $(call uniq,$(foreach d,$(1),$(dir $(wildcard $(d)/*_test.go)))) @@ -19,7 +18,7 @@ $(call goroot,$(DEPS)): .PHONY: $(call testdirs,$(DIRS)) $(call testdirs,$(DIRS)): - go test -test.exec sudo -test.parallel 4 -timeout 60s -test.v github.com/vishvananda/netlink/$@ + sudo -E go test -test.parallel 4 -timeout 60s -v github.com/vishvananda/netlink/$@ $(call fmt,$(call testdirs,$(DIRS))): ! gofmt -l $(subst fmt-,,$@)/*.go | grep -q . diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/README.md b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/README.md index a88e2f418409..0b61be217e0b 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/README.md +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/README.md @@ -89,4 +89,3 @@ There are also a few pieces of low level netlink functionality that still need to be implemented. Routing rules are not in place and some of the more advanced link types. Hopefully there is decent structure and testing in place to make these fairly straightforward to add. - diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/addr_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/addr_linux.go index 8597ab7fcbcc..8808b42d9b66 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/addr_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -4,10 +4,10 @@ import ( "fmt" "net" "strings" + "syscall" "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" - "golang.org/x/sys/unix" ) // IFA_FLAGS is a u32 attribute. @@ -22,7 +22,7 @@ func AddrAdd(link Link, addr *Addr) error { // AddrAdd will add an IP address to a link device. // Equivalent to: `ip addr add $addr dev $link` func (h *Handle) AddrAdd(link Link, addr *Addr) error { - req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) return h.addrHandle(link, addr, req) } @@ -35,7 +35,7 @@ func AddrReplace(link Link, addr *Addr) error { // AddrReplace will replace (or, if not present, add) an IP address on a link device. // Equivalent to: `ip addr replace $addr dev $link` func (h *Handle) AddrReplace(link Link, addr *Addr) error { - req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_REPLACE|unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE|syscall.NLM_F_ACK) return h.addrHandle(link, addr, req) } @@ -48,7 +48,7 @@ func AddrDel(link Link, addr *Addr) error { // AddrDel will delete an IP address from a link device. // Equivalent to: `ip addr del $addr dev $link` func (h *Handle) AddrDel(link Link, addr *Addr) error { - req := h.newNetlinkRequest(unix.RTM_DELADDR, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_DELADDR, syscall.NLM_F_ACK) return h.addrHandle(link, addr, req) } @@ -75,7 +75,7 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error localAddrData = addr.IP.To16() } - localData := nl.NewRtAttr(unix.IFA_LOCAL, localAddrData) + localData := nl.NewRtAttr(syscall.IFA_LOCAL, localAddrData) req.AddData(localData) var peerAddrData []byte if addr.Peer != nil { @@ -88,7 +88,7 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error peerAddrData = localAddrData } - addressData := nl.NewRtAttr(unix.IFA_ADDRESS, peerAddrData) + addressData := nl.NewRtAttr(syscall.IFA_ADDRESS, peerAddrData) req.AddData(addressData) if addr.Flags != 0 { @@ -109,14 +109,14 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error } addr.Broadcast = calcBroadcast } - req.AddData(nl.NewRtAttr(unix.IFA_BROADCAST, addr.Broadcast)) + req.AddData(nl.NewRtAttr(syscall.IFA_BROADCAST, addr.Broadcast)) if addr.Label != "" { - labelData := nl.NewRtAttr(unix.IFA_LABEL, nl.ZeroTerminated(addr.Label)) + labelData := nl.NewRtAttr(syscall.IFA_LABEL, nl.ZeroTerminated(addr.Label)) req.AddData(labelData) } - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -131,11 +131,11 @@ func AddrList(link Link, family int) ([]Addr, error) { // Equivalent to: `ip addr show`. // The list can be filtered by link and ip family. func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { - req := h.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(syscall.RTM_GETADDR, syscall.NLM_F_DUMP) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWADDR) if err != nil { return nil, err } @@ -187,21 +187,21 @@ func parseAddr(m []byte) (addr Addr, family, index int, err error) { var local, dst *net.IPNet for _, attr := range attrs { switch attr.Attr.Type { - case unix.IFA_ADDRESS: + case syscall.IFA_ADDRESS: dst = &net.IPNet{ IP: attr.Value, Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)), } addr.Peer = dst - case unix.IFA_LOCAL: + case syscall.IFA_LOCAL: local = &net.IPNet{ IP: attr.Value, Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)), } addr.IPNet = local - case unix.IFA_BROADCAST: + case syscall.IFA_BROADCAST: addr.Broadcast = attr.Value - case unix.IFA_LABEL: + case syscall.IFA_LABEL: addr.Label = string(attr.Value[:len(attr.Value)-1]) case IFA_FLAGS: addr.Flags = int(native.Uint32(attr.Value[0:4])) @@ -264,7 +264,7 @@ func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, option } func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error)) error { - s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR) + s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_IFADDR, syscall.RTNLGRP_IPV6_IFADDR) if err != nil { return err } @@ -286,7 +286,7 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c } for _, m := range msgs { msgType := m.Header.Type - if msgType != unix.RTM_NEWADDR && msgType != unix.RTM_DELADDR { + if msgType != syscall.RTM_NEWADDR && msgType != syscall.RTM_DELADDR { if cberr != nil { cberr(fmt.Errorf("bad message type: %d", msgType)) } @@ -303,7 +303,7 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c ch <- AddrUpdate{LinkAddress: *addr.IPNet, LinkIndex: ifindex, - NewAddr: msgType == unix.RTM_NEWADDR, + NewAddr: msgType == syscall.RTM_NEWADDR, Flags: addr.Flags, Scope: addr.Scope, PreferedLft: addr.PreferedLft, diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/bridge_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/bridge_linux.go index 6eb331ef1545..a65d6a1319a5 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/bridge_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/bridge_linux.go @@ -2,9 +2,9 @@ package netlink import ( "fmt" + "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) // BridgeVlanList gets a map of device id to bridge vlan infos. @@ -16,12 +16,12 @@ func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { // BridgeVlanList gets a map of device id to bridge vlan infos. // Equivalent to: `bridge vlan show` func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { - req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) - msg := nl.NewIfInfomsg(unix.AF_BRIDGE) + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) + msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) req.AddData(msg) req.AddData(nl.NewRtAttr(nl.IFLA_EXT_MASK, nl.Uint32Attr(uint32(nl.RTEXT_FILTER_BRVLAN)))) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK) if err != nil { return nil, err } @@ -63,7 +63,7 @@ func BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) err // BridgeVlanAdd adds a new vlan filter entry // Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, pvid, untagged, self, master) + return h.bridgeVlanModify(syscall.RTM_SETLINK, link, vid, pvid, untagged, self, master) } // BridgeVlanDel adds a new vlan filter entry @@ -75,15 +75,15 @@ func BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) err // BridgeVlanDel adds a new vlan filter entry // Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, pvid, untagged, self, master) + return h.bridgeVlanModify(syscall.RTM_DELLINK, link, vid, pvid, untagged, self, master) } func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged, self, master bool) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(cmd, unix.NLM_F_ACK) + req := h.newNetlinkRequest(cmd, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_BRIDGE) + msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) msg.Index = int32(base.Index) req.AddData(msg) @@ -107,7 +107,7 @@ func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged } nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) req.AddData(br) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) if err != nil { return err } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/class_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/class_linux.go index a4997740e292..91cd3883de90 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/class_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/class_linux.go @@ -5,7 +5,6 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) // NOTE: function is in here because it uses other linux functions @@ -51,7 +50,7 @@ func ClassDel(class Class) error { // ClassDel will delete a class from the system. // Equivalent to: `tc class del $class` func (h *Handle) ClassDel(class Class) error { - return h.classModify(unix.RTM_DELTCLASS, 0, class) + return h.classModify(syscall.RTM_DELTCLASS, 0, class) } // ClassChange will change a class in place @@ -65,7 +64,7 @@ func ClassChange(class Class) error { // Equivalent to: `tc class change $class` // The parent and handle MUST NOT be changed. func (h *Handle) ClassChange(class Class) error { - return h.classModify(unix.RTM_NEWTCLASS, 0, class) + return h.classModify(syscall.RTM_NEWTCLASS, 0, class) } // ClassReplace will replace a class to the system. @@ -83,7 +82,7 @@ func ClassReplace(class Class) error { // If a class already exist with this parent/handle pair, the class is changed. // If a class does not already exist with this parent/handle, a new class is created. func (h *Handle) ClassReplace(class Class) error { - return h.classModify(unix.RTM_NEWTCLASS, unix.NLM_F_CREATE, class) + return h.classModify(syscall.RTM_NEWTCLASS, syscall.NLM_F_CREATE, class) } // ClassAdd will add a class to the system. @@ -96,14 +95,14 @@ func ClassAdd(class Class) error { // Equivalent to: `tc class add $class` func (h *Handle) ClassAdd(class Class) error { return h.classModify( - unix.RTM_NEWTCLASS, - unix.NLM_F_CREATE|unix.NLM_F_EXCL, + syscall.RTM_NEWTCLASS, + syscall.NLM_F_CREATE|syscall.NLM_F_EXCL, class, ) } func (h *Handle) classModify(cmd, flags int, class Class) error { - req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK) + req := h.newNetlinkRequest(cmd, flags|syscall.NLM_F_ACK) base := class.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -113,12 +112,12 @@ func (h *Handle) classModify(cmd, flags int, class Class) error { } req.AddData(msg) - if cmd != unix.RTM_DELTCLASS { + if cmd != syscall.RTM_DELTCLASS { if err := classPayload(req, class); err != nil { return err } } - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -142,12 +141,12 @@ func classPayload(req *nl.NetlinkRequest, class Class) error { var rtab [256]uint32 var ctab [256]uint32 tcrate := nl.TcRateSpec{Rate: uint32(htb.Rate)} - if CalcRtable(&tcrate, rtab[:], cellLog, uint32(mtu), linklayer) < 0 { + if CalcRtable(&tcrate, rtab, cellLog, uint32(mtu), linklayer) < 0 { return errors.New("HTB: failed to calculate rate table") } opt.Rate = tcrate tcceil := nl.TcRateSpec{Rate: uint32(htb.Ceil)} - if CalcRtable(&tcceil, ctab[:], ccellLog, uint32(mtu), linklayer) < 0 { + if CalcRtable(&tcceil, ctab, ccellLog, uint32(mtu), linklayer) < 0 { return errors.New("HTB: failed to calculate ceil rate table") } opt.Ceil = tcceil @@ -170,7 +169,7 @@ func ClassList(link Link, parent uint32) ([]Class, error) { // Equivalent to: `tc class show`. // Generally returns nothing if link and parent are not specified. func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { - req := h.newNetlinkRequest(unix.RTM_GETTCLASS, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(syscall.RTM_GETTCLASS, syscall.NLM_F_DUMP) msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, Parent: parent, @@ -182,7 +181,7 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { } req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTCLASS) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWTCLASS) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/conntrack_linux.go index a0fc74a37224..ecf044565909 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/conntrack_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/conntrack_linux.go @@ -6,9 +6,9 @@ import ( "errors" "fmt" "net" + "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) // ConntrackTableType Conntrack table for the netlink operation @@ -85,8 +85,8 @@ func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) // conntrack -F [table] Flush table // The flush operation applies to all the family types func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error { - req := h.newConntrackRequest(table, unix.AF_INET, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) - _, err := req.Execute(unix.NETLINK_NETFILTER, 0) + req := h.newConntrackRequest(table, syscall.AF_INET, nl.IPCTNL_MSG_CT_DELETE, syscall.NLM_F_ACK) + _, err := req.Execute(syscall.NETLINK_NETFILTER, 0) return err } @@ -102,10 +102,10 @@ func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFami for _, dataRaw := range res { flow := parseRawData(dataRaw) if match := filter.MatchConntrackFlow(flow); match { - req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) + req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, syscall.NLM_F_ACK) // skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already req2.AddRawData(dataRaw[4:]) - req2.Execute(unix.NETLINK_NETFILTER, 0) + req2.Execute(syscall.NETLINK_NETFILTER, 0) matched++ } } @@ -127,8 +127,8 @@ func (h *Handle) newConntrackRequest(table ConntrackTableType, family InetFamily } func (h *Handle) dumpConntrackTable(table ConntrackTableType, family InetFamily) ([][]byte, error) { - req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_GET, unix.NLM_F_DUMP) - return req.Execute(unix.NETLINK_NETFILTER, 0) + req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_GET, syscall.NLM_F_DUMP) + return req.Execute(syscall.NETLINK_NETFILTER, 0) } // The full conntrack flow structure is very complicated and can be found in the file: diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter.go index 30b541494e24..1120c79d6a9b 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter.go @@ -17,7 +17,7 @@ type FilterAttrs struct { Handle uint32 Parent uint32 Priority uint16 // lower is higher priority - Protocol uint16 // unix.ETH_P_* + Protocol uint16 // syscall.ETH_P_* } func (q FilterAttrs) String() string { diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter_linux.go index 7cb7a4fd9400..5025bd56c1b3 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -9,7 +9,6 @@ import ( "unsafe" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) // Constants used in TcU32Sel.Flags. @@ -56,7 +55,7 @@ func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) { if police.Rate.Rate != 0 { police.Rate.Mpu = fattrs.Mpu police.Rate.Overhead = fattrs.Overhead - if CalcRtable(&police.Rate, rtab[:], rcellLog, fattrs.Mtu, linklayer) < 0 { + if CalcRtable(&police.Rate, rtab, rcellLog, fattrs.Mtu, linklayer) < 0 { return nil, errors.New("TBF: failed to calculate rate table") } police.Burst = uint32(Xmittime(uint64(police.Rate.Rate), uint32(buffer))) @@ -65,7 +64,7 @@ func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) { if police.PeakRate.Rate != 0 { police.PeakRate.Mpu = fattrs.Mpu police.PeakRate.Overhead = fattrs.Overhead - if CalcRtable(&police.PeakRate, ptab[:], pcellLog, fattrs.Mtu, linklayer) < 0 { + if CalcRtable(&police.PeakRate, ptab, pcellLog, fattrs.Mtu, linklayer) < 0 { return nil, errors.New("POLICE: failed to calculate peak rate table") } } @@ -99,7 +98,7 @@ func FilterDel(filter Filter) error { // FilterDel will delete a filter from the system. // Equivalent to: `tc filter del $filter` func (h *Handle) FilterDel(filter Filter) error { - req := h.newNetlinkRequest(unix.RTM_DELTFILTER, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_DELTFILTER, syscall.NLM_F_ACK) base := filter.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -110,7 +109,7 @@ func (h *Handle) FilterDel(filter Filter) error { } req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -124,7 +123,7 @@ func FilterAdd(filter Filter) error { // Equivalent to: `tc filter add $filter` func (h *Handle) FilterAdd(filter Filter) error { native = nl.NativeEndian() - req := h.newNetlinkRequest(unix.RTM_NEWTFILTER, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_NEWTFILTER, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) base := filter.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -225,7 +224,7 @@ func (h *Handle) FilterAdd(filter Filter) error { } req.AddData(options) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -240,7 +239,7 @@ func FilterList(link Link, parent uint32) ([]Filter, error) { // Equivalent to: `tc filter show`. // Generally returns nothing if link and parent are not specified. func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { - req := h.newNetlinkRequest(unix.RTM_GETTFILTER, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(syscall.RTM_GETTFILTER, syscall.NLM_F_DUMP) msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, Parent: parent, @@ -252,7 +251,7 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { } req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTFILTER) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWTFILTER) if err != nil { return nil, err } @@ -563,7 +562,7 @@ func AdjustSize(sz uint, mpu uint, linklayer int) uint { } } -func CalcRtable(rate *nl.TcRateSpec, rtab []uint32, cellLog int, mtu uint32, linklayer int) int { +func CalcRtable(rate *nl.TcRateSpec, rtab [256]uint32, cellLog int, mtu uint32, linklayer int) int { bps := rate.Rate mpu := rate.Mpu var sz uint diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou.go deleted file mode 100644 index 71e73c37a0a3..000000000000 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou.go +++ /dev/null @@ -1,21 +0,0 @@ -package netlink - -import ( - "errors" -) - -var ( - // ErrAttrHeaderTruncated is returned when a netlink attribute's header is - // truncated. - ErrAttrHeaderTruncated = errors.New("attribute header truncated") - // ErrAttrBodyTruncated is returned when a netlink attribute's body is - // truncated. - ErrAttrBodyTruncated = errors.New("attribute body truncated") -) - -type Fou struct { - Family int - Port int - Protocol int - EncapType int -} diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou_linux.go deleted file mode 100644 index 62d59bd2d093..000000000000 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou_linux.go +++ /dev/null @@ -1,215 +0,0 @@ -// +build linux - -package netlink - -import ( - "encoding/binary" - "errors" - - "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" -) - -const ( - FOU_GENL_NAME = "fou" -) - -const ( - FOU_CMD_UNSPEC uint8 = iota - FOU_CMD_ADD - FOU_CMD_DEL - FOU_CMD_GET - FOU_CMD_MAX = FOU_CMD_GET -) - -const ( - FOU_ATTR_UNSPEC = iota - FOU_ATTR_PORT - FOU_ATTR_AF - FOU_ATTR_IPPROTO - FOU_ATTR_TYPE - FOU_ATTR_REMCSUM_NOPARTIAL - FOU_ATTR_MAX = FOU_ATTR_REMCSUM_NOPARTIAL -) - -const ( - FOU_ENCAP_UNSPEC = iota - FOU_ENCAP_DIRECT - FOU_ENCAP_GUE - FOU_ENCAP_MAX = FOU_ENCAP_GUE -) - -var fouFamilyId int - -func FouFamilyId() (int, error) { - if fouFamilyId != 0 { - return fouFamilyId, nil - } - - fam, err := GenlFamilyGet(FOU_GENL_NAME) - if err != nil { - return -1, err - } - - fouFamilyId = int(fam.ID) - return fouFamilyId, nil -} - -func FouAdd(f Fou) error { - return pkgHandle.FouAdd(f) -} - -func (h *Handle) FouAdd(f Fou) error { - fam_id, err := FouFamilyId() - if err != nil { - return err - } - - // setting ip protocol conflicts with encapsulation type GUE - if f.EncapType == FOU_ENCAP_GUE && f.Protocol != 0 { - return errors.New("GUE encapsulation doesn't specify an IP protocol") - } - - req := h.newNetlinkRequest(fam_id, unix.NLM_F_ACK) - - // int to byte for port - bp := make([]byte, 2) - binary.BigEndian.PutUint16(bp[0:2], uint16(f.Port)) - - attrs := []*nl.RtAttr{ - nl.NewRtAttr(FOU_ATTR_PORT, bp), - nl.NewRtAttr(FOU_ATTR_TYPE, []byte{uint8(f.EncapType)}), - nl.NewRtAttr(FOU_ATTR_AF, []byte{uint8(f.Family)}), - nl.NewRtAttr(FOU_ATTR_IPPROTO, []byte{uint8(f.Protocol)}), - } - raw := []byte{FOU_CMD_ADD, 1, 0, 0} - for _, a := range attrs { - raw = append(raw, a.Serialize()...) - } - - req.AddRawData(raw) - - _, err = req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return err - } - - return nil -} - -func FouDel(f Fou) error { - return pkgHandle.FouDel(f) -} - -func (h *Handle) FouDel(f Fou) error { - fam_id, err := FouFamilyId() - if err != nil { - return err - } - - req := h.newNetlinkRequest(fam_id, unix.NLM_F_ACK) - - // int to byte for port - bp := make([]byte, 2) - binary.BigEndian.PutUint16(bp[0:2], uint16(f.Port)) - - attrs := []*nl.RtAttr{ - nl.NewRtAttr(FOU_ATTR_PORT, bp), - nl.NewRtAttr(FOU_ATTR_AF, []byte{uint8(f.Family)}), - } - raw := []byte{FOU_CMD_DEL, 1, 0, 0} - for _, a := range attrs { - raw = append(raw, a.Serialize()...) - } - - req.AddRawData(raw) - - _, err = req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return err - } - - return nil -} - -func FouList(fam int) ([]Fou, error) { - return pkgHandle.FouList(fam) -} - -func (h *Handle) FouList(fam int) ([]Fou, error) { - fam_id, err := FouFamilyId() - if err != nil { - return nil, err - } - - req := h.newNetlinkRequest(fam_id, unix.NLM_F_DUMP) - - attrs := []*nl.RtAttr{ - nl.NewRtAttr(FOU_ATTR_AF, []byte{uint8(fam)}), - } - raw := []byte{FOU_CMD_GET, 1, 0, 0} - for _, a := range attrs { - raw = append(raw, a.Serialize()...) - } - - req.AddRawData(raw) - - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return nil, err - } - - fous := make([]Fou, 0, len(msgs)) - for _, m := range msgs { - f, err := deserializeFouMsg(m) - if err != nil { - return fous, err - } - - fous = append(fous, f) - } - - return fous, nil -} - -func deserializeFouMsg(msg []byte) (Fou, error) { - // we'll skip to byte 4 to first attribute - msg = msg[3:] - var shift int - fou := Fou{} - - for { - // attribute header is at least 16 bits - if len(msg) < 4 { - return fou, ErrAttrHeaderTruncated - } - - lgt := int(binary.BigEndian.Uint16(msg[0:2])) - if len(msg) < lgt+4 { - return fou, ErrAttrBodyTruncated - } - attr := binary.BigEndian.Uint16(msg[2:4]) - - shift = lgt + 3 - switch attr { - case FOU_ATTR_AF: - fou.Family = int(msg[5]) - case FOU_ATTR_PORT: - fou.Port = int(binary.BigEndian.Uint16(msg[5:7])) - // port is 2 bytes - shift = lgt + 2 - case FOU_ATTR_IPPROTO: - fou.Protocol = int(msg[5]) - case FOU_ATTR_TYPE: - fou.EncapType = int(msg[5]) - } - - msg = msg[shift:] - - if len(msg) < 4 { - break - } - } - - return fou, nil -} diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou_unspecified.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou_unspecified.go deleted file mode 100644 index 3a8365bfe623..000000000000 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/fou_unspecified.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !linux - -package netlink - -func FouAdd(f Fou) error { - return ErrNotImplemented -} - -func FouDel(f Fou) error { - return ErrNotImplemented -} - -func FouList(fam int) ([]Fou, error) { - return nil, ErrNotImplemented -} diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/genetlink_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/genetlink_linux.go index ce7969907d43..a388a87001ce 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/genetlink_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/genetlink_linux.go @@ -5,7 +5,6 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) type GenlOp struct { @@ -131,9 +130,9 @@ func (h *Handle) GenlFamilyList() ([]*GenlFamily, error) { Command: nl.GENL_CTRL_CMD_GETFAMILY, Version: nl.GENL_CTRL_VERSION, } - req := h.newNetlinkRequest(nl.GENL_ID_CTRL, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(nl.GENL_ID_CTRL, syscall.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) if err != nil { return nil, err } @@ -152,7 +151,7 @@ func (h *Handle) GenlFamilyGet(name string) (*GenlFamily, error) { req := h.newNetlinkRequest(nl.GENL_ID_CTRL, 0) req.AddData(msg) req.AddData(nl.NewRtAttr(nl.GENL_CTRL_ATTR_FAMILY_NAME, nl.ZeroTerminated(name))) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/gtp_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/gtp_linux.go index f5e160ba5c06..7331303ecbe1 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/gtp_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/gtp_linux.go @@ -7,7 +7,6 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) type PDP struct { @@ -83,9 +82,9 @@ func (h *Handle) GTPPDPList() ([]*PDP, error) { Command: nl.GENL_GTP_CMD_GETPDP, Version: nl.GENL_GTP_VERSION, } - req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_DUMP) + req := h.newNetlinkRequest(int(f.ID), syscall.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) if err != nil { return nil, err } @@ -97,7 +96,7 @@ func GTPPDPList() ([]*PDP, error) { } func gtpPDPGet(req *nl.NetlinkRequest) (*PDP, error) { - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) if err != nil { return nil, err } @@ -183,7 +182,7 @@ func (h *Handle) GTPPDPAdd(link Link, pdp *PDP) error { Command: nl.GENL_GTP_CMD_NEWPDP, Version: nl.GENL_GTP_VERSION, } - req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(int(f.ID), syscall.NLM_F_EXCL|syscall.NLM_F_ACK) req.AddData(msg) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_VERSION, nl.Uint32Attr(pdp.Version))) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_LINK, nl.Uint32Attr(uint32(link.Attrs().Index)))) @@ -200,7 +199,7 @@ func (h *Handle) GTPPDPAdd(link Link, pdp *PDP) error { default: return fmt.Errorf("unsupported GTP version: %d", pdp.Version) } - _, err = req.Execute(unix.NETLINK_GENERIC, 0) + _, err = req.Execute(syscall.NETLINK_GENERIC, 0) return err } @@ -217,7 +216,7 @@ func (h *Handle) GTPPDPDel(link Link, pdp *PDP) error { Command: nl.GENL_GTP_CMD_DELPDP, Version: nl.GENL_GTP_VERSION, } - req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(int(f.ID), syscall.NLM_F_EXCL|syscall.NLM_F_ACK) req.AddData(msg) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_VERSION, nl.Uint32Attr(pdp.Version))) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_LINK, nl.Uint32Attr(uint32(link.Attrs().Index)))) @@ -230,7 +229,7 @@ func (h *Handle) GTPPDPDel(link Link, pdp *PDP) error { default: return fmt.Errorf("unsupported GTP version: %d", pdp.Version) } - _, err = req.Execute(unix.NETLINK_GENERIC, 0) + _, err = req.Execute(syscall.NETLINK_GENERIC, 0) return err } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/handle_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/handle_linux.go index 9f6d7fe0fbd1..d37b087c33e3 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/handle_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/handle_linux.go @@ -2,11 +2,11 @@ package netlink import ( "fmt" + "syscall" "time" "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" - "golang.org/x/sys/unix" ) // Empty handle used by the netlink package methods @@ -43,7 +43,7 @@ func (h *Handle) SetSocketTimeout(to time.Duration) error { if to < time.Microsecond { return fmt.Errorf("invalid timeout, minimul value is %s", time.Microsecond) } - tv := unix.NsecToTimeval(to.Nanoseconds()) + tv := syscall.NsecToTimeval(to.Nanoseconds()) for _, sh := range h.sockets { if err := sh.Socket.SetSendTimeout(&tv); err != nil { return err @@ -59,13 +59,13 @@ func (h *Handle) SetSocketTimeout(to time.Duration) error { // socket in the netlink handle. The maximum value is capped by // /proc/sys/net/core/rmem_max. func (h *Handle) SetSocketReceiveBufferSize(size int, force bool) error { - opt := unix.SO_RCVBUF + opt := syscall.SO_RCVBUF if force { - opt = unix.SO_RCVBUFFORCE + opt = syscall.SO_RCVBUFFORCE } for _, sh := range h.sockets { fd := sh.Socket.GetFd() - err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, opt, size) + err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, opt, size) if err != nil { return err } @@ -81,7 +81,7 @@ func (h *Handle) GetSocketReceiveBufferSize() ([]int, error) { i := 0 for _, sh := range h.sockets { fd := sh.Socket.GetFd() - size, err := unix.GetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF) + size, err := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_RCVBUF) if err != nil { return nil, err } @@ -134,10 +134,10 @@ func (h *Handle) newNetlinkRequest(proto, flags int) *nl.NetlinkRequest { return nl.NewNetlinkRequest(proto, flags) } return &nl.NetlinkRequest{ - NlMsghdr: unix.NlMsghdr{ - Len: uint32(unix.SizeofNlMsghdr), + NlMsghdr: syscall.NlMsghdr{ + Len: uint32(syscall.SizeofNlMsghdr), Type: uint16(proto), - Flags: unix.NLM_F_REQUEST | uint16(flags), + Flags: syscall.NLM_F_REQUEST | uint16(flags), }, Sockets: h.sockets, } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link.go index d8ba16a948f5..5aa3a1790ab5 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link.go @@ -3,7 +3,6 @@ package netlink import ( "fmt" "net" - "os" ) // Link represents a link device from netlink. Shared link attributes @@ -39,8 +38,6 @@ type LinkAttrs struct { Protinfo *Protinfo OperState LinkOperState NetNsID int - NumTxQueues int - NumRxQueues int } // LinkOperState represents the values of the IFLA_OPERSTATE link @@ -262,9 +259,6 @@ const ( type Macvlan struct { LinkAttrs Mode MacvlanMode - - // MACAddrs is only populated for Macvlan SOURCE links - MACAddrs []net.HardwareAddr } func (macvlan *Macvlan) Attrs() *LinkAttrs { @@ -290,10 +284,8 @@ type TuntapFlag uint16 // Tuntap links created via /dev/tun/tap, but can be destroyed via netlink type Tuntap struct { LinkAttrs - Mode TuntapMode - Flags TuntapFlag - Queues int - Fds []*os.File + Mode TuntapMode + Flags TuntapFlag } func (tuntap *Tuntap) Attrs() *LinkAttrs { @@ -335,28 +327,26 @@ func (generic *GenericLink) Type() string { type Vxlan struct { LinkAttrs - VxlanId int - VtepDevIndex int - SrcAddr net.IP - Group net.IP - TTL int - TOS int - Learning bool - Proxy bool - RSC bool - L2miss bool - L3miss bool - UDPCSum bool - UDP6ZeroCSumTx bool - UDP6ZeroCSumRx bool - NoAge bool - GBP bool - FlowBased bool - Age int - Limit int - Port int - PortLow int - PortHigh int + VxlanId int + VtepDevIndex int + SrcAddr net.IP + Group net.IP + TTL int + TOS int + Learning bool + Proxy bool + RSC bool + L2miss bool + L3miss bool + UDPCSum bool + NoAge bool + GBP bool + FlowBased bool + Age int + Limit int + Port int + PortLow int + PortHigh int } func (vxlan *Vxlan) Attrs() *LinkAttrs { @@ -710,17 +700,12 @@ func (gretap *Gretap) Type() string { type Iptun struct { LinkAttrs - Ttl uint8 - Tos uint8 - PMtuDisc uint8 - Link uint32 - Local net.IP - Remote net.IP - EncapSport uint16 - EncapDport uint16 - EncapType uint16 - EncapFlags uint16 - FlowBased bool + Ttl uint8 + Tos uint8 + PMtuDisc uint8 + Link uint32 + Local net.IP + Remote net.IP } func (iptun *Iptun) Attrs() *LinkAttrs { @@ -731,28 +716,6 @@ func (iptun *Iptun) Type() string { return "ipip" } -type Sittun struct { - LinkAttrs - Link uint32 - Local net.IP - Remote net.IP - Ttl uint8 - Tos uint8 - PMtuDisc uint8 - EncapType uint16 - EncapFlags uint16 - EncapSport uint16 - EncapDport uint16 -} - -func (sittun *Sittun) Attrs() *LinkAttrs { - return &sittun.LinkAttrs -} - -func (sittun *Sittun) Type() string { - return "sit" -} - type Vti struct { LinkAttrs IKey uint32 @@ -772,20 +735,16 @@ func (iptun *Vti) Type() string { type Gretun struct { LinkAttrs - Link uint32 - IFlags uint16 - OFlags uint16 - IKey uint32 - OKey uint32 - Local net.IP - Remote net.IP - Ttl uint8 - Tos uint8 - PMtuDisc uint8 - EncapType uint16 - EncapFlags uint16 - EncapSport uint16 - EncapDport uint16 + Link uint32 + IFlags uint16 + OFlags uint16 + IKey uint32 + OKey uint32 + Local net.IP + Remote net.IP + Ttl uint8 + Tos uint8 + PMtuDisc uint8 } func (gretun *Gretun) Attrs() *LinkAttrs { diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link_linux.go index a6ae10418772..e94fd9766cd7 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/link_linux.go @@ -11,7 +11,6 @@ import ( "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" - "golang.org/x/sys/unix" ) const ( @@ -21,15 +20,13 @@ const ( ) const ( - TUNTAP_MODE_TUN TuntapMode = unix.IFF_TUN - TUNTAP_MODE_TAP TuntapMode = unix.IFF_TAP - TUNTAP_DEFAULTS TuntapFlag = unix.IFF_TUN_EXCL | unix.IFF_ONE_QUEUE - TUNTAP_VNET_HDR TuntapFlag = unix.IFF_VNET_HDR - TUNTAP_TUN_EXCL TuntapFlag = unix.IFF_TUN_EXCL - TUNTAP_NO_PI TuntapFlag = unix.IFF_NO_PI - TUNTAP_ONE_QUEUE TuntapFlag = unix.IFF_ONE_QUEUE - TUNTAP_MULTI_QUEUE TuntapFlag = 0x0100 - TUNTAP_MULTI_QUEUE_DEFAULTS TuntapFlag = TUNTAP_MULTI_QUEUE | TUNTAP_NO_PI + TUNTAP_MODE_TUN TuntapMode = syscall.IFF_TUN + TUNTAP_MODE_TAP TuntapMode = syscall.IFF_TAP + TUNTAP_DEFAULTS TuntapFlag = syscall.IFF_TUN_EXCL | syscall.IFF_ONE_QUEUE + TUNTAP_VNET_HDR TuntapFlag = syscall.IFF_VNET_HDR + TUNTAP_TUN_EXCL TuntapFlag = syscall.IFF_TUN_EXCL + TUNTAP_NO_PI TuntapFlag = syscall.IFF_NO_PI + TUNTAP_ONE_QUEUE TuntapFlag = syscall.IFF_ONE_QUEUE ) var lookupByDump = false @@ -64,15 +61,15 @@ func (h *Handle) ensureIndex(link *LinkAttrs) { func (h *Handle) LinkSetARPOff(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) - msg.Change |= unix.IFF_NOARP - msg.Flags |= unix.IFF_NOARP + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Change |= syscall.IFF_NOARP + msg.Flags |= syscall.IFF_NOARP msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -83,15 +80,15 @@ func LinkSetARPOff(link Link) error { func (h *Handle) LinkSetARPOn(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) - msg.Change |= unix.IFF_NOARP - msg.Flags &= ^uint32(unix.IFF_NOARP) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Change |= syscall.IFF_NOARP + msg.Flags &= ^uint32(syscall.IFF_NOARP) msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -102,84 +99,15 @@ func LinkSetARPOn(link Link) error { func (h *Handle) SetPromiscOn(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) - msg.Change = unix.IFF_PROMISC - msg.Flags = unix.IFF_PROMISC + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Change = syscall.IFF_PROMISC + msg.Flags = syscall.IFF_PROMISC msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) - return err -} - -func MacvlanMACAddrAdd(link Link, addr net.HardwareAddr) error { - return pkgHandle.MacvlanMACAddrAdd(link, addr) -} - -func (h *Handle) MacvlanMACAddrAdd(link Link, addr net.HardwareAddr) error { - return h.macvlanMACAddrChange(link, []net.HardwareAddr{addr}, nl.MACVLAN_MACADDR_ADD) -} - -func MacvlanMACAddrDel(link Link, addr net.HardwareAddr) error { - return pkgHandle.MacvlanMACAddrDel(link, addr) -} - -func (h *Handle) MacvlanMACAddrDel(link Link, addr net.HardwareAddr) error { - return h.macvlanMACAddrChange(link, []net.HardwareAddr{addr}, nl.MACVLAN_MACADDR_DEL) -} - -func MacvlanMACAddrFlush(link Link) error { - return pkgHandle.MacvlanMACAddrFlush(link) -} - -func (h *Handle) MacvlanMACAddrFlush(link Link) error { - return h.macvlanMACAddrChange(link, nil, nl.MACVLAN_MACADDR_FLUSH) -} - -func MacvlanMACAddrSet(link Link, addrs []net.HardwareAddr) error { - return pkgHandle.MacvlanMACAddrSet(link, addrs) -} - -func (h *Handle) MacvlanMACAddrSet(link Link, addrs []net.HardwareAddr) error { - return h.macvlanMACAddrChange(link, addrs, nl.MACVLAN_MACADDR_SET) -} - -func (h *Handle) macvlanMACAddrChange(link Link, addrs []net.HardwareAddr, mode uint32) error { - base := link.Attrs() - h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) - - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) - msg.Index = int32(base.Index) - req.AddData(msg) - - linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) - nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) - inner := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) - - // IFLA_MACVLAN_MACADDR_MODE = mode - b := make([]byte, 4) - native.PutUint32(b, mode) - nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR_MODE, b) - - // populate message with MAC addrs, if necessary - switch mode { - case nl.MACVLAN_MACADDR_ADD, nl.MACVLAN_MACADDR_DEL: - if len(addrs) == 1 { - nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR, []byte(addrs[0])) - } - case nl.MACVLAN_MACADDR_SET: - mad := nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR_DATA, nil) - for _, addr := range addrs { - nl.NewRtAttrChild(mad, nl.IFLA_MACVLAN_MACADDR, []byte(addr)) - } - } - - req.AddData(linkInfo) - - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -190,7 +118,7 @@ func BridgeSetMcastSnoop(link Link, on bool) error { func (h *Handle) BridgeSetMcastSnoop(link Link, on bool) error { bridge := link.(*Bridge) bridge.MulticastSnooping = &on - return h.linkModify(bridge, unix.NLM_F_ACK) + return h.linkModify(bridge, syscall.NLM_F_ACK) } func SetPromiscOn(link Link) error { @@ -200,15 +128,15 @@ func SetPromiscOn(link Link) error { func (h *Handle) SetPromiscOff(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) - msg.Change = unix.IFF_PROMISC - msg.Flags = 0 & ^unix.IFF_PROMISC + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Change = syscall.IFF_PROMISC + msg.Flags = 0 & ^syscall.IFF_PROMISC msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -227,15 +155,15 @@ func LinkSetUp(link Link) error { func (h *Handle) LinkSetUp(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) - msg.Change = unix.IFF_UP - msg.Flags = unix.IFF_UP + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Change = syscall.IFF_UP + msg.Flags = syscall.IFF_UP msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -250,15 +178,15 @@ func LinkSetDown(link Link) error { func (h *Handle) LinkSetDown(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) - msg.Change = unix.IFF_UP - msg.Flags = 0 & ^unix.IFF_UP + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Change = syscall.IFF_UP + msg.Flags = 0 & ^syscall.IFF_UP msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -273,19 +201,19 @@ func LinkSetMTU(link Link, mtu int) error { func (h *Handle) LinkSetMTU(link Link, mtu int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(mtu)) - data := nl.NewRtAttr(unix.IFLA_MTU, b) + data := nl.NewRtAttr(syscall.IFLA_MTU, b) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -300,16 +228,16 @@ func LinkSetName(link Link, name string) error { func (h *Handle) LinkSetName(link Link, name string) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(unix.IFLA_IFNAME, []byte(name)) + data := nl.NewRtAttr(syscall.IFLA_IFNAME, []byte(name)) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -324,16 +252,16 @@ func LinkSetAlias(link Link, name string) error { func (h *Handle) LinkSetAlias(link Link, name string) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(unix.IFLA_IFALIAS, []byte(name)) + data := nl.NewRtAttr(syscall.IFLA_IFALIAS, []byte(name)) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -348,16 +276,16 @@ func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error { func (h *Handle) LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(unix.IFLA_ADDRESS, []byte(hwaddr)) + data := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(hwaddr)) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -372,9 +300,9 @@ func LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error { func (h *Handle) LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -387,7 +315,7 @@ func (h *Handle) LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAdd nl.NewRtAttrChild(info, nl.IFLA_VF_MAC, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -402,9 +330,9 @@ func LinkSetVfVlan(link Link, vf, vlan int) error { func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -417,7 +345,7 @@ func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error { nl.NewRtAttrChild(info, nl.IFLA_VF_VLAN, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -432,9 +360,9 @@ func LinkSetVfTxRate(link Link, vf, rate int) error { func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -447,7 +375,7 @@ func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error { nl.NewRtAttrChild(info, nl.IFLA_VF_TX_RATE, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -463,9 +391,9 @@ func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error { var setting uint32 base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -481,7 +409,7 @@ func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error { nl.NewRtAttrChild(info, nl.IFLA_VF_SPOOFCHK, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -497,9 +425,9 @@ func (h *Handle) LinkSetVfTrust(link Link, vf int, state bool) error { var setting uint32 base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -515,7 +443,7 @@ func (h *Handle) LinkSetVfTrust(link Link, vf int, state bool) error { nl.NewRtAttrChild(info, nl.IFLA_VF_TRUST, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -563,19 +491,19 @@ func LinkSetMasterByIndex(link Link, masterIndex int) error { func (h *Handle) LinkSetMasterByIndex(link Link, masterIndex int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(masterIndex)) - data := nl.NewRtAttr(unix.IFLA_MASTER, b) + data := nl.NewRtAttr(syscall.IFLA_MASTER, b) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -592,19 +520,19 @@ func LinkSetNsPid(link Link, nspid int) error { func (h *Handle) LinkSetNsPid(link Link, nspid int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(nspid)) - data := nl.NewRtAttr(unix.IFLA_NET_NS_PID, b) + data := nl.NewRtAttr(syscall.IFLA_NET_NS_PID, b) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -621,9 +549,9 @@ func LinkSetNsFd(link Link, fd int) error { func (h *Handle) LinkSetNsFd(link Link, fd int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -633,7 +561,7 @@ func (h *Handle) LinkSetNsFd(link Link, fd int) error { data := nl.NewRtAttr(nl.IFLA_NET_NS_FD, b) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -648,15 +576,15 @@ func LinkSetXdpFd(link Link, fd int) error { func LinkSetXdpFdWithFlags(link Link, fd, flags int) error { base := link.Attrs() ensureIndex(base) - req := nl.NewNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) addXdpAttrs(&LinkXdp{Fd: fd, Flags: uint32(flags)}, req) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -714,8 +642,6 @@ func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) { nl.NewRtAttrChild(data, nl.IFLA_VXLAN_RSC, boolAttr(vxlan.RSC)) nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L2MISS, boolAttr(vxlan.L2miss)) nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L3MISS, boolAttr(vxlan.L3miss)) - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_ZERO_CSUM6_TX, boolAttr(vxlan.UDP6ZeroCSumTx)) - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_ZERO_CSUM6_RX, boolAttr(vxlan.UDP6ZeroCSumRx)) if vxlan.UDPCSum { nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_CSUM, boolAttr(vxlan.UDPCSum)) @@ -840,12 +766,6 @@ func addBondAttrs(bond *Bond, linkInfo *nl.RtAttr) { } } -func cleanupFds(fds []*os.File) { - for _, f := range fds { - f.Close() - } -} - // LinkAdd adds a new link device. The type and features of the device // are taken from the parameters in the link object. // Equivalent to: `ip link add $link` @@ -857,7 +777,7 @@ func LinkAdd(link Link) error { // are taken fromt the parameters in the link object. // Equivalent to: `ip link add $link` func (h *Handle) LinkAdd(link Link) error { - return h.linkModify(link, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + return h.linkModify(link, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) } func (h *Handle) linkModify(link Link, flags int) error { @@ -871,152 +791,101 @@ func (h *Handle) linkModify(link Link, flags int) error { if tuntap, ok := link.(*Tuntap); ok { // TODO: support user // TODO: support group + // TODO: multi_queue // TODO: support non- persistent - if tuntap.Mode < unix.IFF_TUN || tuntap.Mode > unix.IFF_TAP { + if tuntap.Mode < syscall.IFF_TUN || tuntap.Mode > syscall.IFF_TAP { return fmt.Errorf("Tuntap.Mode %v unknown!", tuntap.Mode) } - - queues := tuntap.Queues - - var fds []*os.File + file, err := os.OpenFile("/dev/net/tun", os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() var req ifReq - copy(req.Name[:15], base.Name) - - req.Flags = uint16(tuntap.Flags) - - if queues == 0 { //Legacy compatibility - queues = 1 - if tuntap.Flags == 0 { - req.Flags = uint16(TUNTAP_DEFAULTS) - } + if tuntap.Flags == 0 { + req.Flags = uint16(TUNTAP_DEFAULTS) } else { - // For best peformance set Flags to TUNTAP_MULTI_QUEUE_DEFAULTS | TUNTAP_VNET_HDR - // when a) KVM has support for this ABI and - // b) the value of the flag is queryable using the TUNGETIFF ioctl - if tuntap.Flags == 0 { - req.Flags = uint16(TUNTAP_MULTI_QUEUE_DEFAULTS) - } + req.Flags = uint16(tuntap.Flags) } - req.Flags |= uint16(tuntap.Mode) - - for i := 0; i < queues; i++ { - localReq := req - file, err := os.OpenFile("/dev/net/tun", os.O_RDWR, 0) - if err != nil { - cleanupFds(fds) - return err - } - - fds = append(fds, file) - _, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), uintptr(unix.TUNSETIFF), uintptr(unsafe.Pointer(&localReq))) - if errno != 0 { - cleanupFds(fds) - return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed [%d], errno %v", i, errno) - } + copy(req.Name[:15], base.Name) + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), uintptr(syscall.TUNSETIFF), uintptr(unsafe.Pointer(&req))) + if errno != 0 { + return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed, errno %v", errno) } - - _, _, errno := unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 1) + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), uintptr(syscall.TUNSETPERSIST), 1) if errno != 0 { - cleanupFds(fds) return fmt.Errorf("Tuntap IOCTL TUNSETPERSIST failed, errno %v", errno) } - h.ensureIndex(base) // can't set master during create, so set it afterwards if base.MasterIndex != 0 { // TODO: verify MasterIndex is actually a bridge? - err := h.LinkSetMasterByIndex(link, base.MasterIndex) - if err != nil { - _, _, _ = unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 0) - cleanupFds(fds) - return err - } - } - - if tuntap.Queues == 0 { - cleanupFds(fds) - } else { - tuntap.Fds = fds + return h.LinkSetMasterByIndex(link, base.MasterIndex) } - return nil } - req := h.newNetlinkRequest(unix.RTM_NEWLINK, flags) + req := h.newNetlinkRequest(syscall.RTM_NEWLINK, flags) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) // TODO: make it shorter if base.Flags&net.FlagUp != 0 { - msg.Change = unix.IFF_UP - msg.Flags = unix.IFF_UP + msg.Change = syscall.IFF_UP + msg.Flags = syscall.IFF_UP } if base.Flags&net.FlagBroadcast != 0 { - msg.Change |= unix.IFF_BROADCAST - msg.Flags |= unix.IFF_BROADCAST + msg.Change |= syscall.IFF_BROADCAST + msg.Flags |= syscall.IFF_BROADCAST } if base.Flags&net.FlagLoopback != 0 { - msg.Change |= unix.IFF_LOOPBACK - msg.Flags |= unix.IFF_LOOPBACK + msg.Change |= syscall.IFF_LOOPBACK + msg.Flags |= syscall.IFF_LOOPBACK } if base.Flags&net.FlagPointToPoint != 0 { - msg.Change |= unix.IFF_POINTOPOINT - msg.Flags |= unix.IFF_POINTOPOINT + msg.Change |= syscall.IFF_POINTOPOINT + msg.Flags |= syscall.IFF_POINTOPOINT } if base.Flags&net.FlagMulticast != 0 { - msg.Change |= unix.IFF_MULTICAST - msg.Flags |= unix.IFF_MULTICAST - } - if base.Index != 0 { - msg.Index = int32(base.Index) + msg.Change |= syscall.IFF_MULTICAST + msg.Flags |= syscall.IFF_MULTICAST } - req.AddData(msg) if base.ParentIndex != 0 { b := make([]byte, 4) native.PutUint32(b, uint32(base.ParentIndex)) - data := nl.NewRtAttr(unix.IFLA_LINK, b) + data := nl.NewRtAttr(syscall.IFLA_LINK, b) req.AddData(data) } else if link.Type() == "ipvlan" { return fmt.Errorf("Can't create ipvlan link without ParentIndex") } - nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(base.Name)) + nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(base.Name)) req.AddData(nameData) if base.MTU > 0 { - mtu := nl.NewRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + mtu := nl.NewRtAttr(syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) req.AddData(mtu) } if base.TxQLen >= 0 { - qlen := nl.NewRtAttr(unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) + qlen := nl.NewRtAttr(syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) req.AddData(qlen) } if base.HardwareAddr != nil { - hwaddr := nl.NewRtAttr(unix.IFLA_ADDRESS, []byte(base.HardwareAddr)) + hwaddr := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(base.HardwareAddr)) req.AddData(hwaddr) } - if base.NumTxQueues > 0 { - txqueues := nl.NewRtAttr(nl.IFLA_NUM_TX_QUEUES, nl.Uint32Attr(uint32(base.NumTxQueues))) - req.AddData(txqueues) - } - - if base.NumRxQueues > 0 { - rxqueues := nl.NewRtAttr(nl.IFLA_NUM_RX_QUEUES, nl.Uint32Attr(uint32(base.NumRxQueues))) - req.AddData(rxqueues) - } - if base.Namespace != nil { var attr *nl.RtAttr switch base.Namespace.(type) { case NsPid: val := nl.Uint32Attr(uint32(base.Namespace.(NsPid))) - attr = nl.NewRtAttr(unix.IFLA_NET_NS_PID, val) + attr = nl.NewRtAttr(syscall.IFLA_NET_NS_PID, val) case NsFd: val := nl.Uint32Attr(uint32(base.Namespace.(NsFd))) attr = nl.NewRtAttr(nl.IFLA_NET_NS_FD, val) @@ -1029,7 +898,7 @@ func (h *Handle) linkModify(link Link, flags int) error { addXdpAttrs(base.Xdp, req) } - linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) + linkInfo := nl.NewRtAttr(syscall.IFLA_LINKINFO, nil) nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) switch link := link.(type) { @@ -1041,13 +910,13 @@ func (h *Handle) linkModify(link Link, flags int) error { case *Veth: data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) peer := nl.NewRtAttrChild(data, nl.VETH_INFO_PEER, nil) - nl.NewIfInfomsgChild(peer, unix.AF_UNSPEC) - nl.NewRtAttrChild(peer, unix.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName)) + nl.NewIfInfomsgChild(peer, syscall.AF_UNSPEC) + nl.NewRtAttrChild(peer, syscall.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName)) if base.TxQLen >= 0 { - nl.NewRtAttrChild(peer, unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) + nl.NewRtAttrChild(peer, syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) } if base.MTU > 0 { - nl.NewRtAttrChild(peer, unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + nl.NewRtAttrChild(peer, syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) } case *Vxlan: @@ -1071,8 +940,6 @@ func (h *Handle) linkModify(link Link, flags int) error { addGretapAttrs(link, linkInfo) case *Iptun: addIptunAttrs(link, linkInfo) - case *Sittun: - addSittunAttrs(link, linkInfo) case *Gretun: addGretunAttrs(link, linkInfo) case *Vti: @@ -1087,7 +954,7 @@ func (h *Handle) linkModify(link Link, flags int) error { req.AddData(linkInfo) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) if err != nil { return err } @@ -1117,13 +984,13 @@ func (h *Handle) LinkDel(link Link) error { h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_DELLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -1166,16 +1033,16 @@ func (h *Handle) LinkByName(name string) (Link, error) { return h.linkByNameDump(name) } - req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) req.AddData(msg) - nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(name)) + nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(name)) req.AddData(nameData) link, err := execGetLink(req) - if err == unix.EINVAL { + if err == syscall.EINVAL { // older kernels don't support looking up via IFLA_IFNAME // so fall back to dumping all links h.lookupByDump = true @@ -1198,16 +1065,16 @@ func (h *Handle) LinkByAlias(alias string) (Link, error) { return h.linkByAliasDump(alias) } - req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) req.AddData(msg) - nameData := nl.NewRtAttr(unix.IFLA_IFALIAS, nl.ZeroTerminated(alias)) + nameData := nl.NewRtAttr(syscall.IFLA_IFALIAS, nl.ZeroTerminated(alias)) req.AddData(nameData) link, err := execGetLink(req) - if err == unix.EINVAL { + if err == syscall.EINVAL { // older kernels don't support looking up via IFLA_IFALIAS // so fall back to dumping all links h.lookupByDump = true @@ -1224,9 +1091,9 @@ func LinkByIndex(index int) (Link, error) { // LinkByIndex finds a link by index and returns a pointer to the object. func (h *Handle) LinkByIndex(index int) (Link, error) { - req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(index) req.AddData(msg) @@ -1234,10 +1101,10 @@ func (h *Handle) LinkByIndex(index int) (Link, error) { } func execGetLink(req *nl.NetlinkRequest) (Link, error) { - msgs, err := req.Execute(unix.NETLINK_ROUTE, 0) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0) if err != nil { if errno, ok := err.(syscall.Errno); ok { - if errno == unix.ENODEV { + if errno == syscall.ENODEV { return nil, LinkNotFoundError{fmt.Errorf("Link not found")} } } @@ -1258,7 +1125,7 @@ func execGetLink(req *nl.NetlinkRequest) (Link, error) { // linkDeserialize deserializes a raw message received from netlink into // a link object. -func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { +func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { msg := nl.DeserializeIfInfomsg(m) attrs, err := nl.ParseRouteAttr(m[msg.Len():]) @@ -1267,7 +1134,7 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { } base := LinkAttrs{Index: int(msg.Index), RawFlags: msg.Flags, Flags: linkFlags(msg.Flags), EncapType: msg.EncapType()} - if msg.Flags&unix.IFF_PROMISC != 0 { + if msg.Flags&syscall.IFF_PROMISC != 0 { base.Promisc = 1 } var ( @@ -1278,7 +1145,7 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { ) for _, attr := range attrs { switch attr.Attr.Type { - case unix.IFLA_LINKINFO: + case syscall.IFLA_LINKINFO: infos, err := nl.ParseRouteAttr(attr.Value) if err != nil { return nil, err @@ -1312,8 +1179,6 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { link = &Gretap{} case "ipip": link = &Iptun{} - case "sit": - link = &Sittun{} case "gre": link = &Gretun{} case "vti": @@ -1347,8 +1212,6 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { parseGretapData(link, data) case "ipip": parseIptunData(link, data) - case "sit": - parseSittunData(link, data) case "gre": parseGretunData(link, data) case "vti": @@ -1362,7 +1225,7 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { } } } - case unix.IFLA_ADDRESS: + case syscall.IFLA_ADDRESS: var nonzero bool for _, b := range attr.Value { if b != 0 { @@ -1372,19 +1235,19 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { if nonzero { base.HardwareAddr = attr.Value[:] } - case unix.IFLA_IFNAME: + case syscall.IFLA_IFNAME: base.Name = string(attr.Value[:len(attr.Value)-1]) - case unix.IFLA_MTU: + case syscall.IFLA_MTU: base.MTU = int(native.Uint32(attr.Value[0:4])) - case unix.IFLA_LINK: + case syscall.IFLA_LINK: base.ParentIndex = int(native.Uint32(attr.Value[0:4])) - case unix.IFLA_MASTER: + case syscall.IFLA_MASTER: base.MasterIndex = int(native.Uint32(attr.Value[0:4])) - case unix.IFLA_TXQLEN: + case syscall.IFLA_TXQLEN: base.TxQLen = int(native.Uint32(attr.Value[0:4])) - case unix.IFLA_IFALIAS: + case syscall.IFLA_IFALIAS: base.Alias = string(attr.Value[:len(attr.Value)-1]) - case unix.IFLA_STATS: + case syscall.IFLA_STATS: stats32 = attr.Value[:] case IFLA_STATS64: stats64 = attr.Value[:] @@ -1394,16 +1257,16 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { return nil, err } base.Xdp = xdp - case unix.IFLA_PROTINFO | unix.NLA_F_NESTED: - if hdr != nil && hdr.Type == unix.RTM_NEWLINK && - msg.Family == unix.AF_BRIDGE { + case syscall.IFLA_PROTINFO | syscall.NLA_F_NESTED: + if hdr != nil && hdr.Type == syscall.RTM_NEWLINK && + msg.Family == syscall.AF_BRIDGE { attrs, err := nl.ParseRouteAttr(attr.Value[:]) if err != nil { return nil, err } base.Protinfo = parseProtinfo(attrs) } - case unix.IFLA_OPERSTATE: + case syscall.IFLA_OPERSTATE: base.OperState = LinkOperState(uint8(attr.Value[0])) case nl.IFLA_LINK_NETNSID: base.NetNsID = int(native.Uint32(attr.Value[0:4])) @@ -1436,12 +1299,12 @@ func LinkList() ([]Link, error) { func (h *Handle) LinkList() ([]Link, error) { // NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need // to get the message ourselves to parse link type. - req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK) if err != nil { return nil, err } @@ -1461,7 +1324,7 @@ func (h *Handle) LinkList() ([]Link, error) { // LinkUpdate is used to pass information back from LinkSubscribe() type LinkUpdate struct { nl.IfInfomsg - Header unix.NlMsghdr + Header syscall.NlMsghdr Link } @@ -1496,7 +1359,7 @@ func LinkSubscribeWithOptions(ch chan<- LinkUpdate, done <-chan struct{}, option } func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}, cberr func(error)) error { - s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_LINK) + s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_LINK) if err != nil { return err } @@ -1518,15 +1381,14 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c } for _, m := range msgs { ifmsg := nl.DeserializeIfInfomsg(m.Data) - header := unix.NlMsghdr(m.Header) - link, err := LinkDeserialize(&header, m.Data) + link, err := LinkDeserialize(&m.Header, m.Data) if err != nil { if cberr != nil { cberr(err) } return } - ch <- LinkUpdate{IfInfomsg: *ifmsg, Header: header, Link: link} + ch <- LinkUpdate{IfInfomsg: *ifmsg, Header: m.Header, Link: link} } } }() @@ -1601,16 +1463,16 @@ func (h *Handle) LinkSetBrProxyArpWiFi(link Link, mode bool) error { func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_BRIDGE) + msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) msg.Index = int32(base.Index) req.AddData(msg) - br := nl.NewRtAttr(unix.IFLA_PROTINFO|unix.NLA_F_NESTED, nil) + br := nl.NewRtAttr(syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED, nil) nl.NewRtAttrChild(br, attr, boolToByte(mode)) req.AddData(br) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) if err != nil { return err } @@ -1628,19 +1490,19 @@ func LinkSetTxQLen(link Link, qlen int) error { func (h *Handle) LinkSetTxQLen(link Link, qlen int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(qlen)) - data := nl.NewRtAttr(unix.IFLA_TXQLEN, b) + data := nl.NewRtAttr(syscall.IFLA_TXQLEN, b) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -1686,10 +1548,6 @@ func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) { vxlan.L3miss = int8(datum.Value[0]) != 0 case nl.IFLA_VXLAN_UDP_CSUM: vxlan.UDPCSum = int8(datum.Value[0]) != 0 - case nl.IFLA_VXLAN_UDP_ZERO_CSUM6_TX: - vxlan.UDP6ZeroCSumTx = int8(datum.Value[0]) != 0 - case nl.IFLA_VXLAN_UDP_ZERO_CSUM6_RX: - vxlan.UDP6ZeroCSumRx = int8(datum.Value[0]) != 0 case nl.IFLA_VXLAN_GBP: vxlan.GBP = true case nl.IFLA_VXLAN_FLOWBASED: @@ -1792,8 +1650,7 @@ func parseMacvtapData(link Link, data []syscall.NetlinkRouteAttr) { func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { macv := link.(*Macvlan) for _, datum := range data { - switch datum.Attr.Type { - case nl.IFLA_MACVLAN_MODE: + if datum.Attr.Type == nl.IFLA_MACVLAN_MODE { switch native.Uint32(datum.Value[0:4]) { case nl.MACVLAN_MODE_PRIVATE: macv.Mode = MACVLAN_MODE_PRIVATE @@ -1806,16 +1663,7 @@ func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { case nl.MACVLAN_MODE_SOURCE: macv.Mode = MACVLAN_MODE_SOURCE } - case nl.IFLA_MACVLAN_MACADDR_COUNT: - macv.MACAddrs = make([]net.HardwareAddr, 0, int(native.Uint32(datum.Value[0:4]))) - case nl.IFLA_MACVLAN_MACADDR_DATA: - macs, err := nl.ParseRouteAttr(datum.Value[:]) - if err != nil { - panic(fmt.Sprintf("failed to ParseRouteAttr for IFLA_MACVLAN_MACADDR_DATA: %v", err)) - } - for _, macDatum := range macs { - macv.MACAddrs = append(macv.MACAddrs, net.HardwareAddr(macDatum.Value[0:6])) - } + return } } } @@ -1823,19 +1671,19 @@ func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { // copied from pkg/net_linux.go func linkFlags(rawFlags uint32) net.Flags { var f net.Flags - if rawFlags&unix.IFF_UP != 0 { + if rawFlags&syscall.IFF_UP != 0 { f |= net.FlagUp } - if rawFlags&unix.IFF_BROADCAST != 0 { + if rawFlags&syscall.IFF_BROADCAST != 0 { f |= net.FlagBroadcast } - if rawFlags&unix.IFF_LOOPBACK != 0 { + if rawFlags&syscall.IFF_LOOPBACK != 0 { f |= net.FlagLoopback } - if rawFlags&unix.IFF_POINTOPOINT != 0 { + if rawFlags&syscall.IFF_POINTOPOINT != 0 { f |= net.FlagPointToPoint } - if rawFlags&unix.IFF_MULTICAST != 0 { + if rawFlags&syscall.IFF_MULTICAST != 0 { f |= net.FlagMulticast } return f @@ -1917,9 +1765,7 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_GRE_ENCAP_FLAGS: gre.EncapFlags = native.Uint16(datum.Value[0:2]) case nl.IFLA_GRE_COLLECT_METADATA: - if len(datum.Value) > 0 { - gre.FlowBased = int8(datum.Value[0]) != 0 - } + gre.FlowBased = int8(datum.Value[0]) != 0 } } } @@ -1956,10 +1802,6 @@ func addGretunAttrs(gre *Gretun, linkInfo *nl.RtAttr) { nl.NewRtAttrChild(data, nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gre.PMtuDisc)) nl.NewRtAttrChild(data, nl.IFLA_GRE_TTL, nl.Uint8Attr(gre.Ttl)) nl.NewRtAttrChild(data, nl.IFLA_GRE_TOS, nl.Uint8Attr(gre.Tos)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gre.EncapType)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gre.EncapFlags)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_SPORT, htons(gre.EncapSport)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_DPORT, htons(gre.EncapDport)) } func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { @@ -1985,14 +1827,6 @@ func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { gre.Tos = uint8(datum.Value[0]) case nl.IFLA_GRE_PMTUDISC: gre.PMtuDisc = uint8(datum.Value[0]) - case nl.IFLA_GRE_ENCAP_TYPE: - gre.EncapType = native.Uint16(datum.Value[0:2]) - case nl.IFLA_GRE_ENCAP_FLAGS: - gre.EncapFlags = native.Uint16(datum.Value[0:2]) - case nl.IFLA_GRE_ENCAP_SPORT: - gre.EncapSport = ntohs(datum.Value[0:2]) - case nl.IFLA_GRE_ENCAP_DPORT: - gre.EncapDport = ntohs(datum.Value[0:2]) } } } @@ -2006,7 +1840,7 @@ func parseLinkStats64(data []byte) *LinkStatistics { } func addXdpAttrs(xdp *LinkXdp, req *nl.NetlinkRequest) { - attrs := nl.NewRtAttr(nl.IFLA_XDP|unix.NLA_F_NESTED, nil) + attrs := nl.NewRtAttr(nl.IFLA_XDP|syscall.NLA_F_NESTED, nil) b := make([]byte, 4) native.PutUint32(b, uint32(xdp.Fd)) nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FD, b) @@ -2039,12 +1873,6 @@ func parseLinkXdp(data []byte) (*LinkXdp, error) { } func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { - if iptun.FlowBased { - // In flow based mode, no other attributes need to be configured - nl.NewRtAttrChild(linkInfo, nl.IFLA_IPTUN_COLLECT_METADATA, boolAttr(iptun.FlowBased)) - return - } - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) ip := iptun.Local.To4() @@ -2063,10 +1891,6 @@ func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { nl.NewRtAttrChild(data, nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(iptun.PMtuDisc)) nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TTL, nl.Uint8Attr(iptun.Ttl)) nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TOS, nl.Uint8Attr(iptun.Tos)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(iptun.EncapType)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(iptun.EncapFlags)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_SPORT, htons(iptun.EncapSport)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_DPORT, htons(iptun.EncapDport)) } func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { @@ -2083,72 +1907,6 @@ func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { iptun.Tos = uint8(datum.Value[0]) case nl.IFLA_IPTUN_PMTUDISC: iptun.PMtuDisc = uint8(datum.Value[0]) - case nl.IFLA_IPTUN_ENCAP_SPORT: - iptun.EncapSport = ntohs(datum.Value[0:2]) - case nl.IFLA_IPTUN_ENCAP_DPORT: - iptun.EncapDport = ntohs(datum.Value[0:2]) - case nl.IFLA_IPTUN_ENCAP_TYPE: - iptun.EncapType = native.Uint16(datum.Value[0:2]) - case nl.IFLA_IPTUN_ENCAP_FLAGS: - iptun.EncapFlags = native.Uint16(datum.Value[0:2]) - case nl.IFLA_IPTUN_COLLECT_METADATA: - iptun.FlowBased = int8(datum.Value[0]) != 0 - } - } -} - -func addSittunAttrs(sittun *Sittun, linkInfo *nl.RtAttr) { - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) - - if sittun.Link != 0 { - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LINK, nl.Uint32Attr(sittun.Link)) - } - - ip := sittun.Local.To4() - if ip != nil { - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LOCAL, []byte(ip)) - } - - ip = sittun.Remote.To4() - if ip != nil { - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_REMOTE, []byte(ip)) - } - - if sittun.Ttl > 0 { - // Would otherwise fail on 3.10 kernel - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TTL, nl.Uint8Attr(sittun.Ttl)) - } - - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TOS, nl.Uint8Attr(sittun.Tos)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(sittun.PMtuDisc)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(sittun.EncapType)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(sittun.EncapFlags)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_SPORT, htons(sittun.EncapSport)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_DPORT, htons(sittun.EncapDport)) -} - -func parseSittunData(link Link, data []syscall.NetlinkRouteAttr) { - sittun := link.(*Sittun) - for _, datum := range data { - switch datum.Attr.Type { - case nl.IFLA_IPTUN_LOCAL: - sittun.Local = net.IP(datum.Value[0:4]) - case nl.IFLA_IPTUN_REMOTE: - sittun.Remote = net.IP(datum.Value[0:4]) - case nl.IFLA_IPTUN_TTL: - sittun.Ttl = uint8(datum.Value[0]) - case nl.IFLA_IPTUN_TOS: - sittun.Tos = uint8(datum.Value[0]) - case nl.IFLA_IPTUN_PMTUDISC: - sittun.PMtuDisc = uint8(datum.Value[0]) - case nl.IFLA_IPTUN_ENCAP_TYPE: - sittun.EncapType = native.Uint16(datum.Value[0:2]) - case nl.IFLA_IPTUN_ENCAP_FLAGS: - sittun.EncapFlags = native.Uint16(datum.Value[0:2]) - case nl.IFLA_IPTUN_ENCAP_SPORT: - sittun.EncapSport = ntohs(datum.Value[0:2]) - case nl.IFLA_IPTUN_ENCAP_DPORT: - sittun.EncapDport = ntohs(datum.Value[0:2]) } } } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh.go index 3f5cd497a739..6a6f71ce866d 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh.go @@ -15,8 +15,6 @@ type Neigh struct { IP net.IP HardwareAddr net.HardwareAddr LLIPAddr net.IP //Used in the case of NHRP - Vlan int - VNI int } // String returns $ip/$hwaddr $label diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh_linux.go index f75c22649f90..5edc8b41259c 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -2,10 +2,10 @@ package netlink import ( "net" + "syscall" "unsafe" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) const ( @@ -73,7 +73,7 @@ func NeighAdd(neigh *Neigh) error { // NeighAdd will add an IP to MAC mapping to the ARP table // Equivalent to: `ip neigh add ....` func (h *Handle) NeighAdd(neigh *Neigh) error { - return h.neighAdd(neigh, unix.NLM_F_CREATE|unix.NLM_F_EXCL) + return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL) } // NeighSet will add or replace an IP to MAC mapping to the ARP table @@ -85,7 +85,7 @@ func NeighSet(neigh *Neigh) error { // NeighSet will add or replace an IP to MAC mapping to the ARP table // Equivalent to: `ip neigh replace....` func (h *Handle) NeighSet(neigh *Neigh) error { - return h.neighAdd(neigh, unix.NLM_F_CREATE|unix.NLM_F_REPLACE) + return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE) } // NeighAppend will append an entry to FDB @@ -97,7 +97,7 @@ func NeighAppend(neigh *Neigh) error { // NeighAppend will append an entry to FDB // Equivalent to: `bridge fdb append...` func (h *Handle) NeighAppend(neigh *Neigh) error { - return h.neighAdd(neigh, unix.NLM_F_CREATE|unix.NLM_F_APPEND) + return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_APPEND) } // NeighAppend will append an entry to FDB @@ -109,7 +109,7 @@ func neighAdd(neigh *Neigh, mode int) error { // NeighAppend will append an entry to FDB // Equivalent to: `bridge fdb append...` func (h *Handle) neighAdd(neigh *Neigh, mode int) error { - req := h.newNetlinkRequest(unix.RTM_NEWNEIGH, mode|unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_NEWNEIGH, mode|syscall.NLM_F_ACK) return neighHandle(neigh, req) } @@ -122,7 +122,7 @@ func NeighDel(neigh *Neigh) error { // NeighDel will delete an IP address from a link device. // Equivalent to: `ip addr del $addr dev $link` func (h *Handle) NeighDel(neigh *Neigh) error { - req := h.newNetlinkRequest(unix.RTM_DELNEIGH, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_DELNEIGH, syscall.NLM_F_ACK) return neighHandle(neigh, req) } @@ -160,17 +160,7 @@ func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error { req.AddData(hwData) } - if neigh.Vlan != 0 { - vlanData := nl.NewRtAttr(NDA_VLAN, nl.Uint16Attr(uint16(neigh.Vlan))) - req.AddData(vlanData) - } - - if neigh.VNI != 0 { - vniData := nl.NewRtAttr(NDA_VNI, nl.Uint32Attr(uint32(neigh.VNI))) - req.AddData(vniData) - } - - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -203,7 +193,7 @@ func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) { } func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) { - req := h.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(syscall.RTM_GETNEIGH, syscall.NLM_F_DUMP) msg := Ndmsg{ Family: uint8(family), Index: uint32(linkIndex), @@ -211,7 +201,7 @@ func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) { } req.AddData(&msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNEIGH) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWNEIGH) if err != nil { return nil, err } @@ -267,7 +257,7 @@ func NeighDeserialize(m []byte) (*Neigh, error) { // BUG: Is this a bug in the netlink library? // #define RTA_LENGTH(len) (RTA_ALIGN(sizeof(struct rtattr)) + (len)) // #define RTA_PAYLOAD(rta) ((int)((rta)->rta_len) - RTA_LENGTH(0)) - attrLen := attr.Attr.Len - unix.SizeofRtAttr + attrLen := attr.Attr.Len - syscall.SizeofRtAttr if attrLen == 4 && (encapType == "ipip" || encapType == "sit" || encapType == "gre") { @@ -278,10 +268,6 @@ func NeighDeserialize(m []byte) (*Neigh, error) { } else { neigh.HardwareAddr = net.HardwareAddr(attr.Value) } - case NDA_VLAN: - neigh.Vlan = int(native.Uint16(attr.Value[0:2])) - case NDA_VNI: - neigh.VNI = int(native.Uint32(attr.Value[0:4])) } } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/addr_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/addr_linux.go index 50db3b4cdd87..fe362e9fa7c3 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/addr_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/addr_linux.go @@ -1,18 +1,17 @@ package nl import ( + "syscall" "unsafe" - - "golang.org/x/sys/unix" ) type IfAddrmsg struct { - unix.IfAddrmsg + syscall.IfAddrmsg } func NewIfAddrmsg(family int) *IfAddrmsg { return &IfAddrmsg{ - IfAddrmsg: unix.IfAddrmsg{ + IfAddrmsg: syscall.IfAddrmsg{ Family: uint8(family), }, } @@ -36,15 +35,15 @@ func NewIfAddrmsg(family int) *IfAddrmsg { // SizeofIfAddrmsg = 0x8 func DeserializeIfAddrmsg(b []byte) *IfAddrmsg { - return (*IfAddrmsg)(unsafe.Pointer(&b[0:unix.SizeofIfAddrmsg][0])) + return (*IfAddrmsg)(unsafe.Pointer(&b[0:syscall.SizeofIfAddrmsg][0])) } func (msg *IfAddrmsg) Serialize() []byte { - return (*(*[unix.SizeofIfAddrmsg]byte)(unsafe.Pointer(msg)))[:] + return (*(*[syscall.SizeofIfAddrmsg]byte)(unsafe.Pointer(msg)))[:] } func (msg *IfAddrmsg) Len() int { - return unix.SizeofIfAddrmsg + return syscall.SizeofIfAddrmsg } // struct ifa_cacheinfo { diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/link_linux.go index ba0b3e19c676..9ae65a12c2af 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/link_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -1,15 +1,14 @@ package nl import ( + "syscall" "unsafe" - - "golang.org/x/sys/unix" ) const ( DEFAULT_CHANGE = 0xFFFFFFFF // doesn't exist in syscall - IFLA_VFINFO_LIST = unix.IFLA_IFALIAS + 1 + iota + IFLA_VFINFO_LIST = syscall.IFLA_IFALIAS + 1 + iota IFLA_STATS64 IFLA_VF_PORTS IFLA_PORT_SELF @@ -119,10 +118,6 @@ const ( IFLA_MACVLAN_UNSPEC = iota IFLA_MACVLAN_MODE IFLA_MACVLAN_FLAGS - IFLA_MACVLAN_MACADDR_MODE - IFLA_MACVLAN_MACADDR - IFLA_MACVLAN_MACADDR_DATA - IFLA_MACVLAN_MACADDR_COUNT IFLA_MACVLAN_MAX = IFLA_MACVLAN_FLAGS ) @@ -134,13 +129,6 @@ const ( MACVLAN_MODE_SOURCE = 16 ) -const ( - MACVLAN_MACADDR_ADD = iota - MACVLAN_MACADDR_DEL - MACVLAN_MACADDR_FLUSH - MACVLAN_MACADDR_SET -) - const ( IFLA_BOND_UNSPEC = iota IFLA_BOND_MODE @@ -487,12 +475,7 @@ const ( IFLA_IPTUN_6RD_RELAY_PREFIX IFLA_IPTUN_6RD_PREFIXLEN IFLA_IPTUN_6RD_RELAY_PREFIXLEN - IFLA_IPTUN_ENCAP_TYPE - IFLA_IPTUN_ENCAP_FLAGS - IFLA_IPTUN_ENCAP_SPORT - IFLA_IPTUN_ENCAP_DPORT - IFLA_IPTUN_COLLECT_METADATA - IFLA_IPTUN_MAX = IFLA_IPTUN_COLLECT_METADATA + IFLA_IPTUN_MAX = IFLA_IPTUN_6RD_RELAY_PREFIXLEN ) const ( diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/nl_linux.go index bc8e82c2cc4d..72f7f6af3c81 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/nl_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -13,19 +13,18 @@ import ( "unsafe" "github.com/vishvananda/netns" - "golang.org/x/sys/unix" ) const ( // Family type definitions - FAMILY_ALL = unix.AF_UNSPEC - FAMILY_V4 = unix.AF_INET - FAMILY_V6 = unix.AF_INET6 + FAMILY_ALL = syscall.AF_UNSPEC + FAMILY_V4 = syscall.AF_INET + FAMILY_V6 = syscall.AF_INET6 FAMILY_MPLS = AF_MPLS ) // SupportedNlFamilies contains the list of netlink families this netlink package supports -var SupportedNlFamilies = []int{unix.NETLINK_ROUTE, unix.NETLINK_XFRM, unix.NETLINK_NETFILTER} +var SupportedNlFamilies = []int{syscall.NETLINK_ROUTE, syscall.NETLINK_XFRM, syscall.NETLINK_NETFILTER} var nextSeqNr uint32 @@ -78,161 +77,161 @@ type NetlinkRequestData interface { // IfInfomsg is related to links, but it is used for list requests as well type IfInfomsg struct { - unix.IfInfomsg + syscall.IfInfomsg } // Create an IfInfomsg with family specified func NewIfInfomsg(family int) *IfInfomsg { return &IfInfomsg{ - IfInfomsg: unix.IfInfomsg{ + IfInfomsg: syscall.IfInfomsg{ Family: uint8(family), }, } } func DeserializeIfInfomsg(b []byte) *IfInfomsg { - return (*IfInfomsg)(unsafe.Pointer(&b[0:unix.SizeofIfInfomsg][0])) + return (*IfInfomsg)(unsafe.Pointer(&b[0:syscall.SizeofIfInfomsg][0])) } func (msg *IfInfomsg) Serialize() []byte { - return (*(*[unix.SizeofIfInfomsg]byte)(unsafe.Pointer(msg)))[:] + return (*(*[syscall.SizeofIfInfomsg]byte)(unsafe.Pointer(msg)))[:] } func (msg *IfInfomsg) Len() int { - return unix.SizeofIfInfomsg + return syscall.SizeofIfInfomsg } func (msg *IfInfomsg) EncapType() string { switch msg.Type { case 0: return "generic" - case unix.ARPHRD_ETHER: + case syscall.ARPHRD_ETHER: return "ether" - case unix.ARPHRD_EETHER: + case syscall.ARPHRD_EETHER: return "eether" - case unix.ARPHRD_AX25: + case syscall.ARPHRD_AX25: return "ax25" - case unix.ARPHRD_PRONET: + case syscall.ARPHRD_PRONET: return "pronet" - case unix.ARPHRD_CHAOS: + case syscall.ARPHRD_CHAOS: return "chaos" - case unix.ARPHRD_IEEE802: + case syscall.ARPHRD_IEEE802: return "ieee802" - case unix.ARPHRD_ARCNET: + case syscall.ARPHRD_ARCNET: return "arcnet" - case unix.ARPHRD_APPLETLK: + case syscall.ARPHRD_APPLETLK: return "atalk" - case unix.ARPHRD_DLCI: + case syscall.ARPHRD_DLCI: return "dlci" - case unix.ARPHRD_ATM: + case syscall.ARPHRD_ATM: return "atm" - case unix.ARPHRD_METRICOM: + case syscall.ARPHRD_METRICOM: return "metricom" - case unix.ARPHRD_IEEE1394: + case syscall.ARPHRD_IEEE1394: return "ieee1394" - case unix.ARPHRD_INFINIBAND: + case syscall.ARPHRD_INFINIBAND: return "infiniband" - case unix.ARPHRD_SLIP: + case syscall.ARPHRD_SLIP: return "slip" - case unix.ARPHRD_CSLIP: + case syscall.ARPHRD_CSLIP: return "cslip" - case unix.ARPHRD_SLIP6: + case syscall.ARPHRD_SLIP6: return "slip6" - case unix.ARPHRD_CSLIP6: + case syscall.ARPHRD_CSLIP6: return "cslip6" - case unix.ARPHRD_RSRVD: + case syscall.ARPHRD_RSRVD: return "rsrvd" - case unix.ARPHRD_ADAPT: + case syscall.ARPHRD_ADAPT: return "adapt" - case unix.ARPHRD_ROSE: + case syscall.ARPHRD_ROSE: return "rose" - case unix.ARPHRD_X25: + case syscall.ARPHRD_X25: return "x25" - case unix.ARPHRD_HWX25: + case syscall.ARPHRD_HWX25: return "hwx25" - case unix.ARPHRD_PPP: + case syscall.ARPHRD_PPP: return "ppp" - case unix.ARPHRD_HDLC: + case syscall.ARPHRD_HDLC: return "hdlc" - case unix.ARPHRD_LAPB: + case syscall.ARPHRD_LAPB: return "lapb" - case unix.ARPHRD_DDCMP: + case syscall.ARPHRD_DDCMP: return "ddcmp" - case unix.ARPHRD_RAWHDLC: + case syscall.ARPHRD_RAWHDLC: return "rawhdlc" - case unix.ARPHRD_TUNNEL: + case syscall.ARPHRD_TUNNEL: return "ipip" - case unix.ARPHRD_TUNNEL6: + case syscall.ARPHRD_TUNNEL6: return "tunnel6" - case unix.ARPHRD_FRAD: + case syscall.ARPHRD_FRAD: return "frad" - case unix.ARPHRD_SKIP: + case syscall.ARPHRD_SKIP: return "skip" - case unix.ARPHRD_LOOPBACK: + case syscall.ARPHRD_LOOPBACK: return "loopback" - case unix.ARPHRD_LOCALTLK: + case syscall.ARPHRD_LOCALTLK: return "ltalk" - case unix.ARPHRD_FDDI: + case syscall.ARPHRD_FDDI: return "fddi" - case unix.ARPHRD_BIF: + case syscall.ARPHRD_BIF: return "bif" - case unix.ARPHRD_SIT: + case syscall.ARPHRD_SIT: return "sit" - case unix.ARPHRD_IPDDP: + case syscall.ARPHRD_IPDDP: return "ip/ddp" - case unix.ARPHRD_IPGRE: + case syscall.ARPHRD_IPGRE: return "gre" - case unix.ARPHRD_PIMREG: + case syscall.ARPHRD_PIMREG: return "pimreg" - case unix.ARPHRD_HIPPI: + case syscall.ARPHRD_HIPPI: return "hippi" - case unix.ARPHRD_ASH: + case syscall.ARPHRD_ASH: return "ash" - case unix.ARPHRD_ECONET: + case syscall.ARPHRD_ECONET: return "econet" - case unix.ARPHRD_IRDA: + case syscall.ARPHRD_IRDA: return "irda" - case unix.ARPHRD_FCPP: + case syscall.ARPHRD_FCPP: return "fcpp" - case unix.ARPHRD_FCAL: + case syscall.ARPHRD_FCAL: return "fcal" - case unix.ARPHRD_FCPL: + case syscall.ARPHRD_FCPL: return "fcpl" - case unix.ARPHRD_FCFABRIC: + case syscall.ARPHRD_FCFABRIC: return "fcfb0" - case unix.ARPHRD_FCFABRIC + 1: + case syscall.ARPHRD_FCFABRIC + 1: return "fcfb1" - case unix.ARPHRD_FCFABRIC + 2: + case syscall.ARPHRD_FCFABRIC + 2: return "fcfb2" - case unix.ARPHRD_FCFABRIC + 3: + case syscall.ARPHRD_FCFABRIC + 3: return "fcfb3" - case unix.ARPHRD_FCFABRIC + 4: + case syscall.ARPHRD_FCFABRIC + 4: return "fcfb4" - case unix.ARPHRD_FCFABRIC + 5: + case syscall.ARPHRD_FCFABRIC + 5: return "fcfb5" - case unix.ARPHRD_FCFABRIC + 6: + case syscall.ARPHRD_FCFABRIC + 6: return "fcfb6" - case unix.ARPHRD_FCFABRIC + 7: + case syscall.ARPHRD_FCFABRIC + 7: return "fcfb7" - case unix.ARPHRD_FCFABRIC + 8: + case syscall.ARPHRD_FCFABRIC + 8: return "fcfb8" - case unix.ARPHRD_FCFABRIC + 9: + case syscall.ARPHRD_FCFABRIC + 9: return "fcfb9" - case unix.ARPHRD_FCFABRIC + 10: + case syscall.ARPHRD_FCFABRIC + 10: return "fcfb10" - case unix.ARPHRD_FCFABRIC + 11: + case syscall.ARPHRD_FCFABRIC + 11: return "fcfb11" - case unix.ARPHRD_FCFABRIC + 12: + case syscall.ARPHRD_FCFABRIC + 12: return "fcfb12" - case unix.ARPHRD_IEEE802_TR: + case syscall.ARPHRD_IEEE802_TR: return "tr" - case unix.ARPHRD_IEEE80211: + case syscall.ARPHRD_IEEE80211: return "ieee802.11" - case unix.ARPHRD_IEEE80211_PRISM: + case syscall.ARPHRD_IEEE80211_PRISM: return "ieee802.11/prism" - case unix.ARPHRD_IEEE80211_RADIOTAP: + case syscall.ARPHRD_IEEE80211_RADIOTAP: return "ieee802.11/radiotap" - case unix.ARPHRD_IEEE802154: + case syscall.ARPHRD_IEEE802154: return "ieee802.15.4" case 65534: @@ -244,7 +243,7 @@ func (msg *IfInfomsg) EncapType() string { } func rtaAlignOf(attrlen int) int { - return (attrlen + unix.RTA_ALIGNTO - 1) & ^(unix.RTA_ALIGNTO - 1) + return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1) } func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { @@ -255,7 +254,7 @@ func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { // Extend RtAttr to handle data and children type RtAttr struct { - unix.RtAttr + syscall.RtAttr Data []byte children []NetlinkRequestData } @@ -263,7 +262,7 @@ type RtAttr struct { // Create a new Extended RtAttr object func NewRtAttr(attrType int, data []byte) *RtAttr { return &RtAttr{ - RtAttr: unix.RtAttr{ + RtAttr: syscall.RtAttr{ Type: uint16(attrType), }, children: []NetlinkRequestData{}, @@ -278,21 +277,16 @@ func NewRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { return attr } -// AddChild adds an existing RtAttr as a child. -func (a *RtAttr) AddChild(attr *RtAttr) { - a.children = append(a.children, attr) -} - func (a *RtAttr) Len() int { if len(a.children) == 0 { - return (unix.SizeofRtAttr + len(a.Data)) + return (syscall.SizeofRtAttr + len(a.Data)) } l := 0 for _, child := range a.children { l += rtaAlignOf(child.Len()) } - l += unix.SizeofRtAttr + l += syscall.SizeofRtAttr return rtaAlignOf(l + len(a.Data)) } @@ -325,7 +319,7 @@ func (a *RtAttr) Serialize() []byte { } type NetlinkRequest struct { - unix.NlMsghdr + syscall.NlMsghdr Data []NetlinkRequestData RawData []byte Sockets map[int]*SocketHandle @@ -333,7 +327,7 @@ type NetlinkRequest struct { // Serialize the Netlink Request into a byte array func (req *NetlinkRequest) Serialize() []byte { - length := unix.SizeofNlMsghdr + length := syscall.SizeofNlMsghdr dataBytes := make([][]byte, len(req.Data)) for i, data := range req.Data { dataBytes[i] = data.Serialize() @@ -343,8 +337,8 @@ func (req *NetlinkRequest) Serialize() []byte { req.Len = uint32(length) b := make([]byte, length) - hdr := (*(*[unix.SizeofNlMsghdr]byte)(unsafe.Pointer(req)))[:] - next := unix.SizeofNlMsghdr + hdr := (*(*[syscall.SizeofNlMsghdr]byte)(unsafe.Pointer(req)))[:] + next := syscall.SizeofNlMsghdr copy(b[0:next], hdr) for _, data := range dataBytes { for _, dataByte := range data { @@ -427,10 +421,10 @@ done: if m.Header.Pid != pid { return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid) } - if m.Header.Type == unix.NLMSG_DONE { + if m.Header.Type == syscall.NLMSG_DONE { break done } - if m.Header.Type == unix.NLMSG_ERROR { + if m.Header.Type == syscall.NLMSG_ERROR { native := NativeEndian() error := int32(native.Uint32(m.Data[0:4])) if error == 0 { @@ -442,7 +436,7 @@ done: continue } res = append(res, m.Data) - if m.Header.Flags&unix.NLM_F_MULTI == 0 { + if m.Header.Flags&syscall.NLM_F_MULTI == 0 { break done } } @@ -455,10 +449,10 @@ done: // the message is serialized func NewNetlinkRequest(proto, flags int) *NetlinkRequest { return &NetlinkRequest{ - NlMsghdr: unix.NlMsghdr{ - Len: uint32(unix.SizeofNlMsghdr), + NlMsghdr: syscall.NlMsghdr{ + Len: uint32(syscall.SizeofNlMsghdr), Type: uint16(proto), - Flags: unix.NLM_F_REQUEST | uint16(flags), + Flags: syscall.NLM_F_REQUEST | uint16(flags), Seq: atomic.AddUint32(&nextSeqNr, 1), }, } @@ -466,21 +460,21 @@ func NewNetlinkRequest(proto, flags int) *NetlinkRequest { type NetlinkSocket struct { fd int32 - lsa unix.SockaddrNetlink + lsa syscall.SockaddrNetlink sync.Mutex } func getNetlinkSocket(protocol int) (*NetlinkSocket, error) { - fd, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW|unix.SOCK_CLOEXEC, protocol) + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW|syscall.SOCK_CLOEXEC, protocol) if err != nil { return nil, err } s := &NetlinkSocket{ fd: int32(fd), } - s.lsa.Family = unix.AF_NETLINK - if err := unix.Bind(fd, &s.lsa); err != nil { - unix.Close(fd) + s.lsa.Family = syscall.AF_NETLINK + if err := syscall.Bind(fd, &s.lsa); err != nil { + syscall.Close(fd) return nil, err } @@ -557,21 +551,21 @@ func executeInNetns(newNs, curNs netns.NsHandle) (func(), error) { // Returns the netlink socket on which Receive() method can be called // to retrieve the messages from the kernel. func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) { - fd, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW, protocol) + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, protocol) if err != nil { return nil, err } s := &NetlinkSocket{ fd: int32(fd), } - s.lsa.Family = unix.AF_NETLINK + s.lsa.Family = syscall.AF_NETLINK for _, g := range groups { s.lsa.Groups |= (1 << (g - 1)) } - if err := unix.Bind(fd, &s.lsa); err != nil { - unix.Close(fd) + if err := syscall.Bind(fd, &s.lsa); err != nil { + syscall.Close(fd) return nil, err } @@ -592,7 +586,7 @@ func SubscribeAt(newNs, curNs netns.NsHandle, protocol int, groups ...uint) (*Ne func (s *NetlinkSocket) Close() { fd := int(atomic.SwapInt32(&s.fd, -1)) - unix.Close(fd) + syscall.Close(fd) } func (s *NetlinkSocket) GetFd() int { @@ -604,7 +598,7 @@ func (s *NetlinkSocket) Send(request *NetlinkRequest) error { if fd < 0 { return fmt.Errorf("Send called on a closed socket") } - if err := unix.Sendto(fd, request.Serialize(), 0, &s.lsa); err != nil { + if err := syscall.Sendto(fd, request.Serialize(), 0, &s.lsa); err != nil { return err } return nil @@ -615,12 +609,12 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { if fd < 0 { return nil, fmt.Errorf("Receive called on a closed socket") } - rb := make([]byte, unix.Getpagesize()) - nr, _, err := unix.Recvfrom(fd, rb, 0) + rb := make([]byte, syscall.Getpagesize()) + nr, _, err := syscall.Recvfrom(fd, rb, 0) if err != nil { return nil, err } - if nr < unix.NLMSG_HDRLEN { + if nr < syscall.NLMSG_HDRLEN { return nil, fmt.Errorf("Got short response from netlink") } rb = rb[:nr] @@ -628,27 +622,27 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { } // SetSendTimeout allows to set a send timeout on the socket -func (s *NetlinkSocket) SetSendTimeout(timeout *unix.Timeval) error { +func (s *NetlinkSocket) SetSendTimeout(timeout *syscall.Timeval) error { // Set a send timeout of SOCKET_SEND_TIMEOUT, this will allow the Send to periodically unblock and avoid that a routine // remains stuck on a send on a closed fd - return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_SNDTIMEO, timeout) + return syscall.SetsockoptTimeval(int(s.fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, timeout) } // SetReceiveTimeout allows to set a receive timeout on the socket -func (s *NetlinkSocket) SetReceiveTimeout(timeout *unix.Timeval) error { +func (s *NetlinkSocket) SetReceiveTimeout(timeout *syscall.Timeval) error { // Set a read timeout of SOCKET_READ_TIMEOUT, this will allow the Read to periodically unblock and avoid that a routine // remains stuck on a recvmsg on a closed fd - return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_RCVTIMEO, timeout) + return syscall.SetsockoptTimeval(int(s.fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, timeout) } func (s *NetlinkSocket) GetPid() (uint32, error) { fd := int(atomic.LoadInt32(&s.fd)) - lsa, err := unix.Getsockname(fd) + lsa, err := syscall.Getsockname(fd) if err != nil { return 0, err } switch v := lsa.(type) { - case *unix.SockaddrNetlink: + case *syscall.SockaddrNetlink: return v.Pid, nil } return 0, fmt.Errorf("Wrong socket type") @@ -703,24 +697,24 @@ func Uint64Attr(v uint64) []byte { func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) { var attrs []syscall.NetlinkRouteAttr - for len(b) >= unix.SizeofRtAttr { + for len(b) >= syscall.SizeofRtAttr { a, vbuf, alen, err := netlinkRouteAttrAndValue(b) if err != nil { return nil, err } - ra := syscall.NetlinkRouteAttr{Attr: syscall.RtAttr(*a), Value: vbuf[:int(a.Len)-unix.SizeofRtAttr]} + ra := syscall.NetlinkRouteAttr{Attr: *a, Value: vbuf[:int(a.Len)-syscall.SizeofRtAttr]} attrs = append(attrs, ra) b = b[alen:] } return attrs, nil } -func netlinkRouteAttrAndValue(b []byte) (*unix.RtAttr, []byte, int, error) { - a := (*unix.RtAttr)(unsafe.Pointer(&b[0])) - if int(a.Len) < unix.SizeofRtAttr || int(a.Len) > len(b) { - return nil, nil, 0, unix.EINVAL +func netlinkRouteAttrAndValue(b []byte) (*syscall.RtAttr, []byte, int, error) { + a := (*syscall.RtAttr)(unsafe.Pointer(&b[0])) + if int(a.Len) < syscall.SizeofRtAttr || int(a.Len) > len(b) { + return nil, nil, 0, syscall.EINVAL } - return a, b[unix.SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil + return a, b[syscall.SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil } // SocketHandle contains the netlink socket and the associated diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/route_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/route_linux.go index f6906fcaf7e0..1a064d65d2fb 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/route_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/route_linux.go @@ -1,66 +1,65 @@ package nl import ( + "syscall" "unsafe" - - "golang.org/x/sys/unix" ) type RtMsg struct { - unix.RtMsg + syscall.RtMsg } func NewRtMsg() *RtMsg { return &RtMsg{ - RtMsg: unix.RtMsg{ - Table: unix.RT_TABLE_MAIN, - Scope: unix.RT_SCOPE_UNIVERSE, - Protocol: unix.RTPROT_BOOT, - Type: unix.RTN_UNICAST, + RtMsg: syscall.RtMsg{ + Table: syscall.RT_TABLE_MAIN, + Scope: syscall.RT_SCOPE_UNIVERSE, + Protocol: syscall.RTPROT_BOOT, + Type: syscall.RTN_UNICAST, }, } } func NewRtDelMsg() *RtMsg { return &RtMsg{ - RtMsg: unix.RtMsg{ - Table: unix.RT_TABLE_MAIN, - Scope: unix.RT_SCOPE_NOWHERE, + RtMsg: syscall.RtMsg{ + Table: syscall.RT_TABLE_MAIN, + Scope: syscall.RT_SCOPE_NOWHERE, }, } } func (msg *RtMsg) Len() int { - return unix.SizeofRtMsg + return syscall.SizeofRtMsg } func DeserializeRtMsg(b []byte) *RtMsg { - return (*RtMsg)(unsafe.Pointer(&b[0:unix.SizeofRtMsg][0])) + return (*RtMsg)(unsafe.Pointer(&b[0:syscall.SizeofRtMsg][0])) } func (msg *RtMsg) Serialize() []byte { - return (*(*[unix.SizeofRtMsg]byte)(unsafe.Pointer(msg)))[:] + return (*(*[syscall.SizeofRtMsg]byte)(unsafe.Pointer(msg)))[:] } type RtNexthop struct { - unix.RtNexthop + syscall.RtNexthop Children []NetlinkRequestData } func DeserializeRtNexthop(b []byte) *RtNexthop { - return (*RtNexthop)(unsafe.Pointer(&b[0:unix.SizeofRtNexthop][0])) + return (*RtNexthop)(unsafe.Pointer(&b[0:syscall.SizeofRtNexthop][0])) } func (msg *RtNexthop) Len() int { if len(msg.Children) == 0 { - return unix.SizeofRtNexthop + return syscall.SizeofRtNexthop } l := 0 for _, child := range msg.Children { l += rtaAlignOf(child.Len()) } - l += unix.SizeofRtNexthop + l += syscall.SizeofRtNexthop return rtaAlignOf(l) } @@ -68,8 +67,8 @@ func (msg *RtNexthop) Serialize() []byte { length := msg.Len() msg.RtNexthop.Len = uint16(length) buf := make([]byte, length) - copy(buf, (*(*[unix.SizeofRtNexthop]byte)(unsafe.Pointer(msg)))[:]) - next := rtaAlignOf(unix.SizeofRtNexthop) + copy(buf, (*(*[syscall.SizeofRtNexthop]byte)(unsafe.Pointer(msg)))[:]) + next := rtaAlignOf(syscall.SizeofRtNexthop) if len(msg.Children) > 0 { for _, child := range msg.Children { childBuf := child.Serialize() diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go deleted file mode 100644 index b3425f6b0ecc..000000000000 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go +++ /dev/null @@ -1,111 +0,0 @@ -package nl - -import ( - "errors" - "fmt" - "net" -) - -type IPv6SrHdr struct { - nextHdr uint8 - hdrLen uint8 - routingType uint8 - segmentsLeft uint8 - firstSegment uint8 - flags uint8 - reserved uint16 - - Segments []net.IP -} - -func (s1 *IPv6SrHdr) Equal(s2 IPv6SrHdr) bool { - if len(s1.Segments) != len(s2.Segments) { - return false - } - for i := range s1.Segments { - if s1.Segments[i].Equal(s2.Segments[i]) != true { - return false - } - } - return s1.nextHdr == s2.nextHdr && - s1.hdrLen == s2.hdrLen && - s1.routingType == s2.routingType && - s1.segmentsLeft == s2.segmentsLeft && - s1.firstSegment == s2.firstSegment && - s1.flags == s2.flags - // reserved doesn't need to be identical. -} - -// seg6 encap mode -const ( - SEG6_IPTUN_MODE_INLINE = iota - SEG6_IPTUN_MODE_ENCAP -) - -// number of nested RTATTR -// from include/uapi/linux/seg6_iptunnel.h -const ( - SEG6_IPTUNNEL_UNSPEC = iota - SEG6_IPTUNNEL_SRH - __SEG6_IPTUNNEL_MAX -) -const ( - SEG6_IPTUNNEL_MAX = __SEG6_IPTUNNEL_MAX - 1 -) - -func EncodeSEG6Encap(mode int, segments []net.IP) ([]byte, error) { - nsegs := len(segments) // nsegs: number of segments - if nsegs == 0 { - return nil, errors.New("EncodeSEG6Encap: No Segment in srh") - } - b := make([]byte, 12, 12+len(segments)*16) - native := NativeEndian() - native.PutUint32(b, uint32(mode)) - b[4] = 0 // srh.nextHdr (0 when calling netlink) - b[5] = uint8(16 * nsegs >> 3) // srh.hdrLen (in 8-octets unit) - b[6] = IPV6_SRCRT_TYPE_4 // srh.routingType (assigned by IANA) - b[7] = uint8(nsegs - 1) // srh.segmentsLeft - b[8] = uint8(nsegs - 1) // srh.firstSegment - b[9] = 0 // srh.flags (SR6_FLAG1_HMAC for srh_hmac) - // srh.reserved: Defined as "Tag" in draft-ietf-6man-segment-routing-header-07 - native.PutUint16(b[10:], 0) // srh.reserved - for _, netIP := range segments { - b = append(b, netIP...) // srh.Segments - } - return b, nil -} - -func DecodeSEG6Encap(buf []byte) (int, []net.IP, error) { - native := NativeEndian() - mode := int(native.Uint32(buf)) - srh := IPv6SrHdr{ - nextHdr: buf[4], - hdrLen: buf[5], - routingType: buf[6], - segmentsLeft: buf[7], - firstSegment: buf[8], - flags: buf[9], - reserved: native.Uint16(buf[10:12]), - } - buf = buf[12:] - if len(buf)%16 != 0 { - err := fmt.Errorf("DecodeSEG6Encap: error parsing Segment List (buf len: %d)\n", len(buf)) - return mode, nil, err - } - for len(buf) > 0 { - srh.Segments = append(srh.Segments, net.IP(buf[:16])) - buf = buf[16:] - } - return mode, srh.Segments, nil -} - -// Helper functions -func SEG6EncapModeString(mode int) string { - switch mode { - case SEG6_IPTUN_MODE_INLINE: - return "inline" - case SEG6_IPTUN_MODE_ENCAP: - return "encap" - } - return "unknown" -} diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/syscall.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/syscall.go index fc631e0e505f..3473e536384d 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/syscall.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/nl/syscall.go @@ -65,14 +65,4 @@ const ( LWTUNNEL_ENCAP_IP LWTUNNEL_ENCAP_ILA LWTUNNEL_ENCAP_IP6 - LWTUNNEL_ENCAP_SEG6 - LWTUNNEL_ENCAP_BPF -) - -// routing header types -const ( - IPV6_SRCRT_STRICT = 0x01 // Deprecated; will be removed - IPV6_SRCRT_TYPE_0 = 0 // Deprecated; will be removed - IPV6_SRCRT_TYPE_2 = 2 // IPv6 type 2 Routing Header - IPV6_SRCRT_TYPE_4 = 4 // Segment Routing with IPv6 ) diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/protinfo_linux.go index 43c465f05758..10dd0d53357c 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/protinfo_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/protinfo_linux.go @@ -5,7 +5,6 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) func LinkGetProtinfo(link Link) (Protinfo, error) { @@ -16,10 +15,10 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { base := link.Attrs() h.ensureIndex(base) var pi Protinfo - req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) - msg := nl.NewIfInfomsg(unix.AF_BRIDGE) + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) + msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, 0) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0) if err != nil { return pi, err } @@ -34,7 +33,7 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { return pi, err } for _, attr := range attrs { - if attr.Attr.Type != unix.IFLA_PROTINFO|unix.NLA_F_NESTED { + if attr.Attr.Type != syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED { continue } infos, err := nl.ParseRouteAttr(attr.Value) diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/qdisc_linux.go index 91193145ae75..1123396e47dd 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/qdisc_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -8,7 +8,6 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) // NOTE function is here because it uses other linux functions @@ -85,7 +84,7 @@ func QdiscDel(qdisc Qdisc) error { // QdiscDel will delete a qdisc from the system. // Equivalent to: `tc qdisc del $qdisc` func (h *Handle) QdiscDel(qdisc Qdisc) error { - return h.qdiscModify(unix.RTM_DELQDISC, 0, qdisc) + return h.qdiscModify(syscall.RTM_DELQDISC, 0, qdisc) } // QdiscChange will change a qdisc in place @@ -99,7 +98,7 @@ func QdiscChange(qdisc Qdisc) error { // Equivalent to: `tc qdisc change $qdisc` // The parent and handle MUST NOT be changed. func (h *Handle) QdiscChange(qdisc Qdisc) error { - return h.qdiscModify(unix.RTM_NEWQDISC, 0, qdisc) + return h.qdiscModify(syscall.RTM_NEWQDISC, 0, qdisc) } // QdiscReplace will replace a qdisc to the system. @@ -114,8 +113,8 @@ func QdiscReplace(qdisc Qdisc) error { // The handle MUST change. func (h *Handle) QdiscReplace(qdisc Qdisc) error { return h.qdiscModify( - unix.RTM_NEWQDISC, - unix.NLM_F_CREATE|unix.NLM_F_REPLACE, + syscall.RTM_NEWQDISC, + syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE, qdisc) } @@ -129,13 +128,13 @@ func QdiscAdd(qdisc Qdisc) error { // Equivalent to: `tc qdisc add $qdisc` func (h *Handle) QdiscAdd(qdisc Qdisc) error { return h.qdiscModify( - unix.RTM_NEWQDISC, - unix.NLM_F_CREATE|unix.NLM_F_EXCL, + syscall.RTM_NEWQDISC, + syscall.NLM_F_CREATE|syscall.NLM_F_EXCL, qdisc) } func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error { - req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK) + req := h.newNetlinkRequest(cmd, flags|syscall.NLM_F_ACK) base := qdisc.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -146,13 +145,13 @@ func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error { req.AddData(msg) // When deleting don't bother building the rest of the netlink payload - if cmd != unix.RTM_DELQDISC { + if cmd != syscall.RTM_DELQDISC { if err := qdiscPayload(req, qdisc); err != nil { return err } } - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -249,7 +248,7 @@ func QdiscList(link Link) ([]Qdisc, error) { // Equivalent to: `tc qdisc show`. // The list can be filtered by link. func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { - req := h.newNetlinkRequest(unix.RTM_GETQDISC, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(syscall.RTM_GETQDISC, syscall.NLM_F_DUMP) index := int32(0) if link != nil { base := link.Attrs() @@ -262,7 +261,7 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { } req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWQDISC) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWQDISC) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route.go index 2cd58ee33424..68c6a2230d25 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route.go @@ -45,8 +45,6 @@ type Route struct { MPLSDst *int NewDst Destination Encap Encap - MTU int - AdvMSS int } func (r Route) String() string { diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route_linux.go index fd5ac8983540..9234c6986da2 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/route_linux.go @@ -8,17 +8,16 @@ import ( "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" - "golang.org/x/sys/unix" ) // RtAttr is shared so it is in netlink_linux.go const ( - SCOPE_UNIVERSE Scope = unix.RT_SCOPE_UNIVERSE - SCOPE_SITE Scope = unix.RT_SCOPE_SITE - SCOPE_LINK Scope = unix.RT_SCOPE_LINK - SCOPE_HOST Scope = unix.RT_SCOPE_HOST - SCOPE_NOWHERE Scope = unix.RT_SCOPE_NOWHERE + SCOPE_UNIVERSE Scope = syscall.RT_SCOPE_UNIVERSE + SCOPE_SITE Scope = syscall.RT_SCOPE_SITE + SCOPE_LINK Scope = syscall.RT_SCOPE_LINK + SCOPE_HOST Scope = syscall.RT_SCOPE_HOST + SCOPE_NOWHERE Scope = syscall.RT_SCOPE_NOWHERE ) const ( @@ -35,8 +34,8 @@ const ( ) const ( - FLAG_ONLINK NextHopFlag = unix.RTNH_F_ONLINK - FLAG_PERVASIVE NextHopFlag = unix.RTNH_F_PERVASIVE + FLAG_ONLINK NextHopFlag = syscall.RTNH_F_ONLINK + FLAG_PERVASIVE NextHopFlag = syscall.RTNH_F_PERVASIVE ) var testFlags = []flagString{ @@ -125,17 +124,17 @@ func (e *MPLSEncap) Type() int { func (e *MPLSEncap) Decode(buf []byte) error { if len(buf) < 4 { - return fmt.Errorf("lack of bytes") + return fmt.Errorf("Lack of bytes") } native := nl.NativeEndian() l := native.Uint16(buf) if len(buf) < int(l) { - return fmt.Errorf("lack of bytes") + return fmt.Errorf("Lack of bytes") } buf = buf[:l] typ := native.Uint16(buf[2:]) if typ != nl.MPLS_IPTUNNEL_DST { - return fmt.Errorf("unknown MPLS Encap Type: %d", typ) + return fmt.Errorf("Unknown MPLS Encap Type: %d", typ) } e.Labels = nl.DecodeMPLSStack(buf[4:]) return nil @@ -186,79 +185,6 @@ func (e *MPLSEncap) Equal(x Encap) bool { return true } -// SEG6 definitions -type SEG6Encap struct { - Mode int - Segments []net.IP -} - -func (e *SEG6Encap) Type() int { - return nl.LWTUNNEL_ENCAP_SEG6 -} -func (e *SEG6Encap) Decode(buf []byte) error { - if len(buf) < 4 { - return fmt.Errorf("lack of bytes") - } - native := nl.NativeEndian() - // Get Length(l) & Type(typ) : 2 + 2 bytes - l := native.Uint16(buf) - if len(buf) < int(l) { - return fmt.Errorf("lack of bytes") - } - buf = buf[:l] // make sure buf size upper limit is Length - typ := native.Uint16(buf[2:]) - if typ != nl.SEG6_IPTUNNEL_SRH { - return fmt.Errorf("unknown SEG6 Type: %d", typ) - } - - var err error - e.Mode, e.Segments, err = nl.DecodeSEG6Encap(buf[4:]) - - return err -} -func (e *SEG6Encap) Encode() ([]byte, error) { - s, err := nl.EncodeSEG6Encap(e.Mode, e.Segments) - native := nl.NativeEndian() - hdr := make([]byte, 4) - native.PutUint16(hdr, uint16(len(s)+4)) - native.PutUint16(hdr[2:], nl.SEG6_IPTUNNEL_SRH) - return append(hdr, s...), err -} -func (e *SEG6Encap) String() string { - segs := make([]string, 0, len(e.Segments)) - // append segment backwards (from n to 0) since seg#0 is the last segment. - for i := len(e.Segments); i > 0; i-- { - segs = append(segs, fmt.Sprintf("%s", e.Segments[i-1])) - } - str := fmt.Sprintf("mode %s segs %d [ %s ]", nl.SEG6EncapModeString(e.Mode), - len(e.Segments), strings.Join(segs, " ")) - return str -} -func (e *SEG6Encap) Equal(x Encap) bool { - o, ok := x.(*SEG6Encap) - if !ok { - return false - } - if e == o { - return true - } - if e == nil || o == nil { - return false - } - if e.Mode != o.Mode { - return false - } - if len(e.Segments) != len(o.Segments) { - return false - } - for i := range e.Segments { - if !e.Segments[i].Equal(o.Segments[i]) { - return false - } - } - return true -} - // RouteAdd will add a route to the system. // Equivalent to: `ip route add $route` func RouteAdd(route *Route) error { @@ -268,8 +194,8 @@ func RouteAdd(route *Route) error { // RouteAdd will add a route to the system. // Equivalent to: `ip route add $route` func (h *Handle) RouteAdd(route *Route) error { - flags := unix.NLM_F_CREATE | unix.NLM_F_EXCL | unix.NLM_F_ACK - req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) + flags := syscall.NLM_F_CREATE | syscall.NLM_F_EXCL | syscall.NLM_F_ACK + req := h.newNetlinkRequest(syscall.RTM_NEWROUTE, flags) return h.routeHandle(route, req, nl.NewRtMsg()) } @@ -282,8 +208,8 @@ func RouteReplace(route *Route) error { // RouteReplace will add a route to the system. // Equivalent to: `ip route replace $route` func (h *Handle) RouteReplace(route *Route) error { - flags := unix.NLM_F_CREATE | unix.NLM_F_REPLACE | unix.NLM_F_ACK - req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) + flags := syscall.NLM_F_CREATE | syscall.NLM_F_REPLACE | syscall.NLM_F_ACK + req := h.newNetlinkRequest(syscall.RTM_NEWROUTE, flags) return h.routeHandle(route, req, nl.NewRtMsg()) } @@ -296,7 +222,7 @@ func RouteDel(route *Route) error { // RouteDel will delete a route from the system. // Equivalent to: `ip route del $route` func (h *Handle) RouteDel(route *Route) error { - req := h.newNetlinkRequest(unix.RTM_DELROUTE, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_DELROUTE, syscall.NLM_F_ACK) return h.routeHandle(route, req, nl.NewRtDelMsg()) } @@ -319,12 +245,12 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg } else { dstData = route.Dst.IP.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_DST, dstData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData)) } else if route.MPLSDst != nil { family = nl.FAMILY_MPLS msg.Dst_len = uint8(20) - msg.Type = unix.RTN_UNICAST - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_DST, nl.EncodeMPLSStack(*route.MPLSDst))) + msg.Type = syscall.RTN_UNICAST + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, nl.EncodeMPLSStack(*route.MPLSDst))) } if route.NewDst != nil { @@ -362,7 +288,7 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg srcData = route.Src.To16() } // The commonly used src ip for routes is actually PREFSRC - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_PREFSRC, srcData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PREFSRC, srcData)) } if route.Gw != nil { @@ -377,14 +303,14 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg } else { gwData = route.Gw.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_GATEWAY, gwData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_GATEWAY, gwData)) } if len(route.MultiPath) > 0 { buf := []byte{} for _, nh := range route.MultiPath { rtnh := &nl.RtNexthop{ - RtNexthop: unix.RtNexthop{ + RtNexthop: syscall.RtNexthop{ Hops: uint8(nh.Hops), Ifindex: int32(nh.LinkIndex), Flags: uint8(nh.Flags), @@ -397,9 +323,9 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg return fmt.Errorf("gateway, source, and destination ip are not the same IP family") } if gwFamily == FAMILY_V4 { - children = append(children, nl.NewRtAttr(unix.RTA_GATEWAY, []byte(nh.Gw.To4()))) + children = append(children, nl.NewRtAttr(syscall.RTA_GATEWAY, []byte(nh.Gw.To4()))) } else { - children = append(children, nl.NewRtAttr(unix.RTA_GATEWAY, []byte(nh.Gw.To16()))) + children = append(children, nl.NewRtAttr(syscall.RTA_GATEWAY, []byte(nh.Gw.To16()))) } } if nh.NewDst != nil { @@ -425,15 +351,15 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg rtnh.Children = children buf = append(buf, rtnh.Serialize()...) } - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_MULTIPATH, buf)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_MULTIPATH, buf)) } if route.Table > 0 { if route.Table >= 256 { - msg.Table = unix.RT_TABLE_UNSPEC + msg.Table = syscall.RT_TABLE_UNSPEC b := make([]byte, 4) native.PutUint32(b, uint32(route.Table)) - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_TABLE, b)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_TABLE, b)) } else { msg.Table = uint8(route.Table) } @@ -442,7 +368,7 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg if route.Priority > 0 { b := make([]byte, 4) native.PutUint32(b, uint32(route.Priority)) - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_PRIORITY, b)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PRIORITY, b)) } if route.Tos > 0 { msg.Tos = uint8(route.Tos) @@ -454,25 +380,6 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg msg.Type = uint8(route.Type) } - var metrics []*nl.RtAttr - // TODO: support other rta_metric values - if route.MTU > 0 { - b := nl.Uint32Attr(uint32(route.MTU)) - metrics = append(metrics, nl.NewRtAttr(unix.RTAX_MTU, b)) - } - if route.AdvMSS > 0 { - b := nl.Uint32Attr(uint32(route.AdvMSS)) - metrics = append(metrics, nl.NewRtAttr(unix.RTAX_ADVMSS, b)) - } - - if metrics != nil { - attr := nl.NewRtAttr(unix.RTA_METRICS, nil) - for _, metric := range metrics { - attr.AddChild(metric) - } - rtAttrs = append(rtAttrs, attr) - } - msg.Flags = uint32(route.Flags) msg.Scope = uint8(route.Scope) msg.Family = uint8(family) @@ -487,9 +394,9 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg ) native.PutUint32(b, uint32(route.LinkIndex)) - req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) + req.AddData(nl.NewRtAttr(syscall.RTA_OIF, b)) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -522,11 +429,11 @@ func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, e // RouteListFiltered gets a list of routes in the system filtered with specified rules. // All rules must be defined in RouteFilter struct func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { - req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP) infmsg := nl.NewIfInfomsg(family) req.AddData(infmsg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE) if err != nil { return nil, err } @@ -534,11 +441,11 @@ func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) var res []Route for _, m := range msgs { msg := nl.DeserializeRtMsg(m) - if msg.Flags&unix.RTM_F_CLONED != 0 { + if msg.Flags&syscall.RTM_F_CLONED != 0 { // Ignore cloned routes continue } - if msg.Table != unix.RT_TABLE_MAIN { + if msg.Table != syscall.RT_TABLE_MAIN { if filter == nil || filter != nil && filterMask&RT_FILTER_TABLE == 0 { // Ignore non-main tables continue @@ -550,7 +457,7 @@ func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) } if filter != nil { switch { - case filterMask&RT_FILTER_TABLE != 0 && filter.Table != unix.RT_TABLE_UNSPEC && route.Table != filter.Table: + case filterMask&RT_FILTER_TABLE != 0 && filter.Table != syscall.RT_TABLE_UNSPEC && route.Table != filter.Table: continue case filterMask&RT_FILTER_PROTOCOL != 0 && route.Protocol != filter.Protocol: continue @@ -601,11 +508,11 @@ func deserializeRoute(m []byte) (Route, error) { var encap, encapType syscall.NetlinkRouteAttr for _, attr := range attrs { switch attr.Attr.Type { - case unix.RTA_GATEWAY: + case syscall.RTA_GATEWAY: route.Gw = net.IP(attr.Value) - case unix.RTA_PREFSRC: + case syscall.RTA_PREFSRC: route.Src = net.IP(attr.Value) - case unix.RTA_DST: + case syscall.RTA_DST: if msg.Family == nl.FAMILY_MPLS { stack := nl.DecodeMPLSStack(attr.Value) if len(stack) == 0 || len(stack) > 1 { @@ -618,36 +525,36 @@ func deserializeRoute(m []byte) (Route, error) { Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attr.Value)), } } - case unix.RTA_OIF: + case syscall.RTA_OIF: route.LinkIndex = int(native.Uint32(attr.Value[0:4])) - case unix.RTA_IIF: + case syscall.RTA_IIF: route.ILinkIndex = int(native.Uint32(attr.Value[0:4])) - case unix.RTA_PRIORITY: + case syscall.RTA_PRIORITY: route.Priority = int(native.Uint32(attr.Value[0:4])) - case unix.RTA_TABLE: + case syscall.RTA_TABLE: route.Table = int(native.Uint32(attr.Value[0:4])) - case unix.RTA_MULTIPATH: + case syscall.RTA_MULTIPATH: parseRtNexthop := func(value []byte) (*NexthopInfo, []byte, error) { - if len(value) < unix.SizeofRtNexthop { - return nil, nil, fmt.Errorf("lack of bytes") + if len(value) < syscall.SizeofRtNexthop { + return nil, nil, fmt.Errorf("Lack of bytes") } nh := nl.DeserializeRtNexthop(value) if len(value) < int(nh.RtNexthop.Len) { - return nil, nil, fmt.Errorf("lack of bytes") + return nil, nil, fmt.Errorf("Lack of bytes") } info := &NexthopInfo{ LinkIndex: int(nh.RtNexthop.Ifindex), Hops: int(nh.RtNexthop.Hops), Flags: int(nh.RtNexthop.Flags), } - attrs, err := nl.ParseRouteAttr(value[unix.SizeofRtNexthop:int(nh.RtNexthop.Len)]) + attrs, err := nl.ParseRouteAttr(value[syscall.SizeofRtNexthop:int(nh.RtNexthop.Len)]) if err != nil { return nil, nil, err } var encap, encapType syscall.NetlinkRouteAttr for _, attr := range attrs { switch attr.Attr.Type { - case unix.RTA_GATEWAY: + case syscall.RTA_GATEWAY: info.Gw = net.IP(attr.Value) case nl.RTA_NEWDST: var d Destination @@ -704,19 +611,6 @@ func deserializeRoute(m []byte) (Route, error) { encapType = attr case nl.RTA_ENCAP: encap = attr - case unix.RTA_METRICS: - metrics, err := nl.ParseRouteAttr(attr.Value) - if err != nil { - return route, err - } - for _, metric := range metrics { - switch metric.Attr.Type { - case unix.RTAX_MTU: - route.MTU = int(native.Uint32(metric.Value[0:4])) - case unix.RTAX_ADVMSS: - route.AdvMSS = int(native.Uint32(metric.Value[0:4])) - } - } } } @@ -729,11 +623,6 @@ func deserializeRoute(m []byte) (Route, error) { if err := e.Decode(encap.Value); err != nil { return route, err } - case nl.LWTUNNEL_ENCAP_SEG6: - e = &SEG6Encap{} - if err := e.Decode(encap.Value); err != nil { - return route, err - } } route.Encap = e } @@ -750,7 +639,7 @@ func RouteGet(destination net.IP) ([]Route, error) { // RouteGet gets a route to a specific destination from the host system. // Equivalent to: 'ip route get'. func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { - req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_REQUEST) + req := h.newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_REQUEST) family := nl.GetIPFamily(destination) var destinationData []byte var bitlen uint8 @@ -766,10 +655,10 @@ func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { msg.Dst_len = bitlen req.AddData(msg) - rtaDst := nl.NewRtAttr(unix.RTA_DST, destinationData) + rtaDst := nl.NewRtAttr(syscall.RTA_DST, destinationData) req.AddData(rtaDst) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE) if err != nil { return nil, err } @@ -817,7 +706,7 @@ func RouteSubscribeWithOptions(ch chan<- RouteUpdate, done <-chan struct{}, opti } func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}, cberr func(error)) error { - s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_ROUTE, unix.RTNLGRP_IPV6_ROUTE) + s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_ROUTE, syscall.RTNLGRP_IPV6_ROUTE) if err != nil { return err } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule.go index 7fc8ae5df15e..e4d9168d6c0a 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule.go @@ -21,7 +21,6 @@ type Rule struct { OifName string SuppressIfgroup int SuppressPrefixlen int - Invert bool } func (r Rule) String() string { diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule_linux.go index 6238ae458642..cbd91a56bb33 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -3,13 +3,11 @@ package netlink import ( "fmt" "net" + "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) -const FibRuleInvert = 0x2 - // RuleAdd adds a rule to the system. // Equivalent to: ip rule add func RuleAdd(rule *Rule) error { @@ -19,7 +17,7 @@ func RuleAdd(rule *Rule) error { // RuleAdd adds a rule to the system. // Equivalent to: ip rule add func (h *Handle) RuleAdd(rule *Rule) error { - req := h.newNetlinkRequest(unix.RTM_NEWRULE, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_NEWRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) return ruleHandle(rule, req) } @@ -32,31 +30,18 @@ func RuleDel(rule *Rule) error { // RuleDel deletes a rule from the system. // Equivalent to: ip rule del func (h *Handle) RuleDel(rule *Rule) error { - req := h.newNetlinkRequest(unix.RTM_DELRULE, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_DELRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) return ruleHandle(rule, req) } func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { msg := nl.NewRtMsg() - msg.Family = unix.AF_INET - msg.Protocol = unix.RTPROT_BOOT - msg.Scope = unix.RT_SCOPE_UNIVERSE - msg.Table = unix.RT_TABLE_UNSPEC - msg.Type = unix.RTN_UNSPEC - if req.NlMsghdr.Flags&unix.NLM_F_CREATE > 0 { - msg.Type = unix.RTN_UNICAST - } - if rule.Invert { - msg.Flags |= FibRuleInvert - } + msg.Family = syscall.AF_INET if rule.Family != 0 { msg.Family = uint8(rule.Family) } - if rule.Table >= 0 && rule.Table < 256 { - msg.Table = uint8(rule.Table) - } - var dstFamily uint8 + var rtAttrs []*nl.RtAttr if rule.Dst != nil && rule.Dst.IP != nil { dstLen, _ := rule.Dst.Mask.Size() @@ -64,12 +49,12 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { msg.Family = uint8(nl.GetIPFamily(rule.Dst.IP)) dstFamily = msg.Family var dstData []byte - if msg.Family == unix.AF_INET { + if msg.Family == syscall.AF_INET { dstData = rule.Dst.IP.To4() } else { dstData = rule.Dst.IP.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_DST, dstData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData)) } if rule.Src != nil && rule.Src.IP != nil { @@ -80,12 +65,19 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { srcLen, _ := rule.Src.Mask.Size() msg.Src_len = uint8(srcLen) var srcData []byte - if msg.Family == unix.AF_INET { + if msg.Family == syscall.AF_INET { srcData = rule.Src.IP.To4() } else { srcData = rule.Src.IP.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_SRC, srcData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_SRC, srcData)) + } + + if rule.Table >= 0 { + msg.Table = uint8(rule.Table) + if rule.Table >= 256 { + msg.Table = syscall.RT_TABLE_UNSPEC + } } req.AddData(msg) @@ -150,7 +142,7 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b)) } - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -163,11 +155,11 @@ func RuleList(family int) ([]Rule, error) { // RuleList lists rules in the system. // Equivalent to: ip rule list func (h *Handle) RuleList(family int) ([]Rule, error) { - req := h.newNetlinkRequest(unix.RTM_GETRULE, unix.NLM_F_DUMP|unix.NLM_F_REQUEST) + req := h.newNetlinkRequest(syscall.RTM_GETRULE, syscall.NLM_F_DUMP|syscall.NLM_F_REQUEST) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWRULE) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWRULE) if err != nil { return nil, err } @@ -183,11 +175,9 @@ func (h *Handle) RuleList(family int) ([]Rule, error) { rule := NewRule() - rule.Invert = msg.Flags&FibRuleInvert > 0 - for j := range attrs { switch attrs[j].Attr.Type { - case unix.RTA_TABLE: + case syscall.RTA_TABLE: rule.Table = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_SRC: rule.Src = &net.IPNet{ diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/socket_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/socket_linux.go index 99e9fb4d8979..b42b84f0cfe4 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/socket_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/socket_linux.go @@ -4,9 +4,9 @@ import ( "errors" "fmt" "net" + "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) const ( @@ -123,15 +123,15 @@ func SocketGet(local, remote net.Addr) (*Socket, error) { return nil, ErrNotImplemented } - s, err := nl.Subscribe(unix.NETLINK_INET_DIAG) + s, err := nl.Subscribe(syscall.NETLINK_INET_DIAG) if err != nil { return nil, err } defer s.Close() req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, 0) req.AddData(&socketRequest{ - Family: unix.AF_INET, - Protocol: unix.IPPROTO_TCP, + Family: syscall.AF_INET, + Protocol: syscall.IPPROTO_TCP, ID: SocketID{ SourcePort: uint16(localTCP.Port), DestinationPort: uint16(remoteTCP.Port), diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm.go index 02b41842e102..9962dcf7006d 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm.go @@ -2,20 +2,19 @@ package netlink import ( "fmt" - - "golang.org/x/sys/unix" + "syscall" ) // Proto is an enum representing an ipsec protocol. type Proto uint8 const ( - XFRM_PROTO_ROUTE2 Proto = unix.IPPROTO_ROUTING - XFRM_PROTO_ESP Proto = unix.IPPROTO_ESP - XFRM_PROTO_AH Proto = unix.IPPROTO_AH - XFRM_PROTO_HAO Proto = unix.IPPROTO_DSTOPTS + XFRM_PROTO_ROUTE2 Proto = syscall.IPPROTO_ROUTING + XFRM_PROTO_ESP Proto = syscall.IPPROTO_ESP + XFRM_PROTO_AH Proto = syscall.IPPROTO_AH + XFRM_PROTO_HAO Proto = syscall.IPPROTO_DSTOPTS XFRM_PROTO_COMP Proto = 0x6c // NOTE not defined on darwin - XFRM_PROTO_IPSEC_ANY Proto = unix.IPPROTO_RAW + XFRM_PROTO_IPSEC_ANY Proto = syscall.IPPROTO_RAW ) func (p Proto) String() string { diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go index efe72ddf29ce..7b98c9cb6d3b 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go @@ -2,10 +2,11 @@ package netlink import ( "fmt" + "syscall" - "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" - "golang.org/x/sys/unix" + + "github.com/vishvananda/netlink/nl" ) type XfrmMsg interface { @@ -38,7 +39,7 @@ func XfrmMonitor(ch chan<- XfrmMsg, done <-chan struct{}, errorChan chan<- error if err != nil { return nil } - s, err := nl.SubscribeAt(netns.None(), netns.None(), unix.NETLINK_XFRM, groups...) + s, err := nl.SubscribeAt(netns.None(), netns.None(), syscall.NETLINK_XFRM, groups...) if err != nil { return err } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go index fde0c2ca5ad0..c3d4e4222724 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -1,8 +1,9 @@ package netlink import ( + "syscall" + "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) { @@ -54,7 +55,7 @@ func (h *Handle) XfrmPolicyUpdate(policy *XfrmPolicy) error { } func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { - req := h.newNetlinkRequest(nlProto, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) msg := &nl.XfrmUserpolicyInfo{} selFromPolicy(&msg.Sel, policy) @@ -90,7 +91,7 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { req.AddData(out) } - _, err := req.Execute(unix.NETLINK_XFRM, 0) + _, err := req.Execute(syscall.NETLINK_XFRM, 0) return err } @@ -120,12 +121,12 @@ func XfrmPolicyList(family int) ([]XfrmPolicy, error) { // Equivalent to: `ip xfrm policy show`. // The list can be filtered by ip family. func (h *Handle) XfrmPolicyList(family int) ([]XfrmPolicy, error) { - req := h.newNetlinkRequest(nl.XFRM_MSG_GETPOLICY, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(nl.XFRM_MSG_GETPOLICY, syscall.NLM_F_DUMP) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY) + msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY) if err != nil { return nil, err } @@ -164,13 +165,13 @@ func XfrmPolicyFlush() error { // XfrmPolicyFlush will flush the policies on the system. // Equivalent to: `ip xfrm policy flush` func (h *Handle) XfrmPolicyFlush() error { - req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHPOLICY, unix.NLM_F_ACK) - _, err := req.Execute(unix.NETLINK_XFRM, 0) + req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHPOLICY, syscall.NLM_F_ACK) + _, err := req.Execute(syscall.NETLINK_XFRM, 0) return err } func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPolicy, error) { - req := h.newNetlinkRequest(nlProto, unix.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, syscall.NLM_F_ACK) msg := &nl.XfrmUserpolicyId{} selFromPolicy(&msg.Sel, policy) @@ -188,7 +189,7 @@ func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPo resType = 0 } - msgs, err := req.Execute(unix.NETLINK_XFRM, uint16(resType)) + msgs, err := req.Execute(syscall.NETLINK_XFRM, uint16(resType)) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state.go index d14740dc55b3..368a9b986d61 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state.go @@ -3,7 +3,6 @@ package netlink import ( "fmt" "net" - "time" ) // XfrmStateAlgo represents the algorithm to use for the ipsec encryption. @@ -68,19 +67,6 @@ type XfrmStateLimits struct { TimeUseHard uint64 } -// XfrmStateStats represents the current number of bytes/packets -// processed by this State, the State's installation and first use -// time and the replay window counters. -type XfrmStateStats struct { - ReplayWindow uint32 - Replay uint32 - Failed uint32 - Bytes uint64 - Packets uint64 - AddTime uint64 - UseTime uint64 -} - // XfrmState represents the state of an ipsec policy. It optionally // contains an XfrmStateAlgo for encryption and one for authentication. type XfrmState struct { @@ -92,7 +78,6 @@ type XfrmState struct { Reqid int ReplayWindow int Limits XfrmStateLimits - Statistics XfrmStateStats Mark *XfrmMark Auth *XfrmStateAlgo Crypt *XfrmStateAlgo @@ -109,16 +94,10 @@ func (sa XfrmState) Print(stats bool) string { if !stats { return sa.String() } - at := time.Unix(int64(sa.Statistics.AddTime), 0).Format(time.UnixDate) - ut := "-" - if sa.Statistics.UseTime > 0 { - ut = time.Unix(int64(sa.Statistics.UseTime), 0).Format(time.UnixDate) - } - return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d, Bytes: %d, Packets: %d, "+ - "AddTime: %s, UseTime: %s, ReplayWindow: %d, Replay: %d, Failed: %d", + + return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d", sa.String(), printLimit(sa.Limits.ByteSoft), printLimit(sa.Limits.ByteHard), printLimit(sa.Limits.PacketSoft), printLimit(sa.Limits.PacketHard), - sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard, sa.Statistics.Bytes, sa.Statistics.Packets, at, ut, - sa.Statistics.ReplayWindow, sa.Statistics.Replay, sa.Statistics.Failed) + sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard) } func printLimit(lmt uint64) string { diff --git a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go index 7fc92900c058..6a7bc0deca24 100644 --- a/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go +++ b/cluster-autoscaler/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -2,10 +2,10 @@ package netlink import ( "fmt" + "syscall" "unsafe" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) func writeStateAlgo(a *XfrmStateAlgo) []byte { @@ -111,7 +111,7 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { if state.Spi == 0 { return fmt.Errorf("Spi must be set when adding xfrm state.") } - req := h.newNetlinkRequest(nlProto, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) msg := xfrmUsersaInfoFromXfrmState(state) @@ -157,13 +157,13 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { req.AddData(out) } - _, err := req.Execute(unix.NETLINK_XFRM, 0) + _, err := req.Execute(syscall.NETLINK_XFRM, 0) return err } func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { req := h.newNetlinkRequest(nl.XFRM_MSG_ALLOCSPI, - unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) msg := &nl.XfrmUserSpiInfo{} msg.XfrmUsersaInfo = *(xfrmUsersaInfoFromXfrmState(state)) @@ -177,7 +177,7 @@ func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { req.AddData(out) } - msgs, err := req.Execute(unix.NETLINK_XFRM, 0) + msgs, err := req.Execute(syscall.NETLINK_XFRM, 0) if err != nil { return nil, err } @@ -216,9 +216,9 @@ func XfrmStateList(family int) ([]XfrmState, error) { // Equivalent to: `ip xfrm state show`. // The list can be filtered by ip family. func (h *Handle) XfrmStateList(family int) ([]XfrmState, error) { - req := h.newNetlinkRequest(nl.XFRM_MSG_GETSA, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(nl.XFRM_MSG_GETSA, syscall.NLM_F_DUMP) - msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWSA) + msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWSA) if err != nil { return nil, err } @@ -255,7 +255,7 @@ func (h *Handle) XfrmStateGet(state *XfrmState) (*XfrmState, error) { } func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState, error) { - req := h.newNetlinkRequest(nlProto, unix.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, syscall.NLM_F_ACK) msg := &nl.XfrmUsersaId{} msg.Family = uint16(nl.GetIPFamily(state.Dst)) @@ -278,7 +278,7 @@ func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState resType = 0 } - msgs, err := req.Execute(unix.NETLINK_XFRM, uint16(resType)) + msgs, err := req.Execute(syscall.NETLINK_XFRM, uint16(resType)) if err != nil { return nil, err } @@ -308,7 +308,6 @@ func xfrmStateFromXfrmUsersaInfo(msg *nl.XfrmUsersaInfo) *XfrmState { state.Reqid = int(msg.Reqid) state.ReplayWindow = int(msg.ReplayWindow) lftToLimits(&msg.Lft, &state.Limits) - curToStats(&msg.Curlft, &msg.Stats, &state.Statistics) return &state } @@ -387,11 +386,11 @@ func XfrmStateFlush(proto Proto) error { // proto = 0 means any transformation protocols // Equivalent to: `ip xfrm state flush [ proto XFRM-PROTO ]` func (h *Handle) XfrmStateFlush(proto Proto) error { - req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHSA, unix.NLM_F_ACK) + req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHSA, syscall.NLM_F_ACK) req.AddData(&nl.XfrmUsersaFlush{Proto: uint8(proto)}) - _, err := req.Execute(unix.NETLINK_XFRM, 0) + _, err := req.Execute(syscall.NETLINK_XFRM, 0) if err != nil { return err } @@ -430,16 +429,6 @@ func lftToLimits(lft *nl.XfrmLifetimeCfg, lmts *XfrmStateLimits) { *lmts = *(*XfrmStateLimits)(unsafe.Pointer(lft)) } -func curToStats(cur *nl.XfrmLifetimeCur, wstats *nl.XfrmStats, stats *XfrmStateStats) { - stats.Bytes = cur.Bytes - stats.Packets = cur.Packets - stats.AddTime = cur.AddTime - stats.UseTime = cur.UseTime - stats.ReplayWindow = wstats.ReplayWindow - stats.Replay = wstats.Replay - stats.Failed = wstats.IntegrityFailed -} - func xfrmUsersaInfoFromXfrmState(state *XfrmState) *nl.XfrmUsersaInfo { msg := &nl.XfrmUsersaInfo{} msg.Family = uint16(nl.GetIPFamily(state.Dst)) diff --git a/cluster-autoscaler/vendor/golang.org/x/exp/AUTHORS b/cluster-autoscaler/vendor/golang.org/x/exp/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/cluster-autoscaler/vendor/golang.org/x/exp/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/cluster-autoscaler/vendor/golang.org/x/exp/CONTRIBUTORS b/cluster-autoscaler/vendor/golang.org/x/exp/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/cluster-autoscaler/vendor/golang.org/x/exp/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/cluster-autoscaler/vendor/golang.org/x/net/context/context.go b/cluster-autoscaler/vendor/golang.org/x/net/context/context.go index a25301a76fa4..839e3a64b62c 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/context/context.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/context/context.go @@ -5,6 +5,8 @@ // Package context defines the Context type, which carries deadlines, // cancelation signals, and other request-scoped values across API boundaries // and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. // // Incoming requests to a server should create a Context, and outgoing calls to // servers should accept a Context. The chain of function calls between must diff --git a/cluster-autoscaler/vendor/golang.org/x/net/html/atom/gen.go b/cluster-autoscaler/vendor/golang.org/x/net/html/atom/gen.go index 6bfa86601987..cc5dc5dbceeb 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/html/atom/gen.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/html/atom/gen.go @@ -4,17 +4,17 @@ // +build ignore -package main +//go:generate go run gen.go +//go:generate go run gen.go -test -// This program generates table.go and table_test.go. -// Invoke as -// -// go run gen.go |gofmt >table.go -// go run gen.go -test |gofmt >table_test.go +package main import ( + "bytes" "flag" "fmt" + "go/format" + "io/ioutil" "math/rand" "os" "sort" @@ -42,6 +42,18 @@ func identifier(s string) string { var test = flag.Bool("test", false, "generate table_test.go") +func genFile(name string, buf *bytes.Buffer) { + b, err := format.Source(buf.Bytes()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := ioutil.WriteFile(name, b, 0644); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + func main() { flag.Parse() @@ -52,32 +64,31 @@ func main() { all = append(all, extra...) sort.Strings(all) - if *test { - fmt.Printf("// generated by go run gen.go -test; DO NOT EDIT\n\n") - fmt.Printf("package atom\n\n") - fmt.Printf("var testAtomList = []string{\n") - for _, s := range all { - fmt.Printf("\t%q,\n", s) - } - fmt.Printf("}\n") - return - } - // uniq - lists have dups - // compute max len too - maxLen := 0 w := 0 for _, s := range all { if w == 0 || all[w-1] != s { - if maxLen < len(s) { - maxLen = len(s) - } all[w] = s w++ } } all = all[:w] + if *test { + var buf bytes.Buffer + fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") + fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n") + fmt.Fprintln(&buf, "package atom\n") + fmt.Fprintln(&buf, "var testAtomList = []string{") + for _, s := range all { + fmt.Fprintf(&buf, "\t%q,\n", s) + } + fmt.Fprintln(&buf, "}") + + genFile("table_test.go", &buf) + return + } + // Find hash that minimizes table size. var best *table for i := 0; i < 1000000; i++ { @@ -163,36 +174,46 @@ func main() { atom[s] = uint32(off<<8 | len(s)) } + var buf bytes.Buffer // Generate the Go code. - fmt.Printf("// generated by go run gen.go; DO NOT EDIT\n\n") - fmt.Printf("package atom\n\nconst (\n") + fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") + fmt.Fprintln(&buf, "//go:generate go run gen.go\n") + fmt.Fprintln(&buf, "package atom\n\nconst (") + + // compute max len + maxLen := 0 for _, s := range all { - fmt.Printf("\t%s Atom = %#x\n", identifier(s), atom[s]) + if maxLen < len(s) { + maxLen = len(s) + } + fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s]) } - fmt.Printf(")\n\n") + fmt.Fprintln(&buf, ")\n") - fmt.Printf("const hash0 = %#x\n\n", best.h0) - fmt.Printf("const maxAtomLen = %d\n\n", maxLen) + fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0) + fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen) - fmt.Printf("var table = [1<<%d]Atom{\n", best.k) + fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k) for i, s := range best.tab { if s == "" { continue } - fmt.Printf("\t%#x: %#x, // %s\n", i, atom[s], s) + fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s) } - fmt.Printf("}\n") + fmt.Fprintf(&buf, "}\n") datasize := (1 << best.k) * 4 - fmt.Printf("const atomText =\n") + fmt.Fprintln(&buf, "const atomText =") textsize := len(text) for len(text) > 60 { - fmt.Printf("\t%q +\n", text[:60]) + fmt.Fprintf(&buf, "\t%q +\n", text[:60]) text = text[60:] } - fmt.Printf("\t%q\n\n", text) + fmt.Fprintf(&buf, "\t%q\n\n", text) + + genFile("table.go", &buf) - fmt.Fprintf(os.Stderr, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) + fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) } type byLen []string @@ -285,8 +306,10 @@ func (t *table) push(i uint32, depth int) bool { // The lists of element names and attribute keys were taken from // https://html.spec.whatwg.org/multipage/indices.html#index -// as of the "HTML Living Standard - Last Updated 21 February 2015" version. +// as of the "HTML Living Standard - Last Updated 18 September 2017" version. +// "command", "keygen" and "menuitem" have been removed from the spec, +// but are kept here for backwards compatibility. var elements = []string{ "a", "abbr", @@ -349,6 +372,7 @@ var elements = []string{ "legend", "li", "link", + "main", "map", "mark", "menu", @@ -364,6 +388,7 @@ var elements = []string{ "output", "p", "param", + "picture", "pre", "progress", "q", @@ -375,6 +400,7 @@ var elements = []string{ "script", "section", "select", + "slot", "small", "source", "span", @@ -403,14 +429,21 @@ var elements = []string{ } // https://html.spec.whatwg.org/multipage/indices.html#attributes-3 - +// +// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup", +// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec, +// but are kept here for backwards compatibility. var attributes = []string{ "abbr", "accept", "accept-charset", "accesskey", "action", + "allowfullscreen", + "allowpaymentrequest", + "allowusermedia", "alt", + "as", "async", "autocomplete", "autofocus", @@ -420,6 +453,7 @@ var attributes = []string{ "checked", "cite", "class", + "color", "cols", "colspan", "command", @@ -457,6 +491,8 @@ var attributes = []string{ "icon", "id", "inputmode", + "integrity", + "is", "ismap", "itemid", "itemprop", @@ -481,16 +517,20 @@ var attributes = []string{ "multiple", "muted", "name", + "nomodule", + "nonce", "novalidate", "open", "optimum", "pattern", "ping", "placeholder", + "playsinline", "poster", "preload", "radiogroup", "readonly", + "referrerpolicy", "rel", "required", "reversed", @@ -507,10 +547,13 @@ var attributes = []string{ "sizes", "sortable", "sorted", + "slot", "span", + "spellcheck", "src", "srcdoc", "srclang", + "srcset", "start", "step", "style", @@ -520,16 +563,22 @@ var attributes = []string{ "translate", "type", "typemustmatch", + "updateviacache", "usemap", "value", "width", + "workertype", "wrap", } +// "onautocomplete", "onautocompleteerror", "onmousewheel", +// "onshow" and "onsort" have been removed from the spec, +// but are kept here for backwards compatibility. var eventHandlers = []string{ "onabort", "onautocomplete", "onautocompleteerror", + "onauxclick", "onafterprint", "onbeforeprint", "onbeforeunload", @@ -541,11 +590,14 @@ var eventHandlers = []string{ "onclick", "onclose", "oncontextmenu", + "oncopy", "oncuechange", + "oncut", "ondblclick", "ondrag", "ondragend", "ondragenter", + "ondragexit", "ondragleave", "ondragover", "ondragstart", @@ -565,18 +617,24 @@ var eventHandlers = []string{ "onload", "onloadeddata", "onloadedmetadata", + "onloadend", "onloadstart", "onmessage", + "onmessageerror", "onmousedown", + "onmouseenter", + "onmouseleave", "onmousemove", "onmouseout", "onmouseover", "onmouseup", "onmousewheel", + "onwheel", "onoffline", "ononline", "onpagehide", "onpageshow", + "onpaste", "onpause", "onplay", "onplaying", @@ -585,7 +643,9 @@ var eventHandlers = []string{ "onratechange", "onreset", "onresize", + "onrejectionhandled", "onscroll", + "onsecuritypolicyviolation", "onseeked", "onseeking", "onselect", @@ -597,6 +657,7 @@ var eventHandlers = []string{ "onsuspend", "ontimeupdate", "ontoggle", + "onunhandledrejection", "onunload", "onvolumechange", "onwaiting", diff --git a/cluster-autoscaler/vendor/golang.org/x/net/html/atom/table.go b/cluster-autoscaler/vendor/golang.org/x/net/html/atom/table.go index 2605ba3102f6..f74018ecea17 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/html/atom/table.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/html/atom/table.go @@ -1,713 +1,777 @@ -// generated by go run gen.go; DO NOT EDIT +// Code generated by go generate gen.go; DO NOT EDIT. + +//go:generate go run gen.go package atom const ( - A Atom = 0x1 - Abbr Atom = 0x4 - Accept Atom = 0x2106 - AcceptCharset Atom = 0x210e - Accesskey Atom = 0x3309 - Action Atom = 0x1f606 - Address Atom = 0x4f307 - Align Atom = 0x1105 - Alt Atom = 0x4503 - Annotation Atom = 0x1670a - AnnotationXml Atom = 0x1670e - Applet Atom = 0x2b306 - Area Atom = 0x2fa04 - Article Atom = 0x38807 - Aside Atom = 0x8305 - Async Atom = 0x7b05 - Audio Atom = 0xa605 - Autocomplete Atom = 0x1fc0c - Autofocus Atom = 0xb309 - Autoplay Atom = 0xce08 - B Atom = 0x101 - Base Atom = 0xd604 - Basefont Atom = 0xd608 - Bdi Atom = 0x1a03 - Bdo Atom = 0xe703 - Bgsound Atom = 0x11807 - Big Atom = 0x12403 - Blink Atom = 0x12705 - Blockquote Atom = 0x12c0a - Body Atom = 0x2f04 - Br Atom = 0x202 - Button Atom = 0x13606 - Canvas Atom = 0x7f06 - Caption Atom = 0x1bb07 - Center Atom = 0x5b506 - Challenge Atom = 0x21f09 - Charset Atom = 0x2807 - Checked Atom = 0x32807 - Cite Atom = 0x3c804 - Class Atom = 0x4de05 - Code Atom = 0x14904 - Col Atom = 0x15003 - Colgroup Atom = 0x15008 - Color Atom = 0x15d05 - Cols Atom = 0x16204 - Colspan Atom = 0x16207 - Command Atom = 0x17507 - Content Atom = 0x42307 - Contenteditable Atom = 0x4230f - Contextmenu Atom = 0x3310b - Controls Atom = 0x18808 - Coords Atom = 0x19406 - Crossorigin Atom = 0x19f0b - Data Atom = 0x44a04 - Datalist Atom = 0x44a08 - Datetime Atom = 0x23c08 - Dd Atom = 0x26702 - Default Atom = 0x8607 - Defer Atom = 0x14b05 - Del Atom = 0x3ef03 - Desc Atom = 0x4db04 - Details Atom = 0x4807 - Dfn Atom = 0x6103 - Dialog Atom = 0x1b06 - Dir Atom = 0x6903 - Dirname Atom = 0x6907 - Disabled Atom = 0x10c08 - Div Atom = 0x11303 - Dl Atom = 0x11e02 - Download Atom = 0x40008 - Draggable Atom = 0x17b09 - Dropzone Atom = 0x39108 - Dt Atom = 0x50902 - Em Atom = 0x6502 - Embed Atom = 0x6505 - Enctype Atom = 0x21107 - Face Atom = 0x5b304 - Fieldset Atom = 0x1b008 - Figcaption Atom = 0x1b80a - Figure Atom = 0x1cc06 - Font Atom = 0xda04 - Footer Atom = 0x8d06 - For Atom = 0x1d803 - ForeignObject Atom = 0x1d80d - Foreignobject Atom = 0x1e50d - Form Atom = 0x1f204 - Formaction Atom = 0x1f20a - Formenctype Atom = 0x20d0b - Formmethod Atom = 0x2280a - Formnovalidate Atom = 0x2320e - Formtarget Atom = 0x2470a - Frame Atom = 0x9a05 - Frameset Atom = 0x9a08 - H1 Atom = 0x26e02 - H2 Atom = 0x29402 - H3 Atom = 0x2a702 - H4 Atom = 0x2e902 - H5 Atom = 0x2f302 - H6 Atom = 0x50b02 - Head Atom = 0x2d504 - Header Atom = 0x2d506 - Headers Atom = 0x2d507 - Height Atom = 0x25106 - Hgroup Atom = 0x25906 - Hidden Atom = 0x26506 - High Atom = 0x26b04 - Hr Atom = 0x27002 - Href Atom = 0x27004 - Hreflang Atom = 0x27008 - Html Atom = 0x25504 - HttpEquiv Atom = 0x2780a - I Atom = 0x601 - Icon Atom = 0x42204 - Id Atom = 0x8502 - Iframe Atom = 0x29606 - Image Atom = 0x29c05 - Img Atom = 0x2a103 - Input Atom = 0x3e805 - Inputmode Atom = 0x3e809 - Ins Atom = 0x1a803 - Isindex Atom = 0x2a907 - Ismap Atom = 0x2b005 - Itemid Atom = 0x33c06 - Itemprop Atom = 0x3c908 - Itemref Atom = 0x5ad07 - Itemscope Atom = 0x2b909 - Itemtype Atom = 0x2c308 - Kbd Atom = 0x1903 - Keygen Atom = 0x3906 - Keytype Atom = 0x53707 - Kind Atom = 0x10904 - Label Atom = 0xf005 - Lang Atom = 0x27404 - Legend Atom = 0x18206 - Li Atom = 0x1202 - Link Atom = 0x12804 - List Atom = 0x44e04 - Listing Atom = 0x44e07 - Loop Atom = 0xf404 - Low Atom = 0x11f03 - Malignmark Atom = 0x100a - Manifest Atom = 0x5f108 - Map Atom = 0x2b203 - Mark Atom = 0x1604 - Marquee Atom = 0x2cb07 - Math Atom = 0x2d204 - Max Atom = 0x2e103 - Maxlength Atom = 0x2e109 - Media Atom = 0x6e05 - Mediagroup Atom = 0x6e0a - Menu Atom = 0x33804 - Menuitem Atom = 0x33808 - Meta Atom = 0x45d04 - Meter Atom = 0x24205 - Method Atom = 0x22c06 - Mglyph Atom = 0x2a206 - Mi Atom = 0x2eb02 - Min Atom = 0x2eb03 - Minlength Atom = 0x2eb09 - Mn Atom = 0x23502 - Mo Atom = 0x3ed02 - Ms Atom = 0x2bc02 - Mtext Atom = 0x2f505 - Multiple Atom = 0x30308 - Muted Atom = 0x30b05 - Name Atom = 0x6c04 - Nav Atom = 0x3e03 - Nobr Atom = 0x5704 - Noembed Atom = 0x6307 - Noframes Atom = 0x9808 - Noscript Atom = 0x3d208 - Novalidate Atom = 0x2360a - Object Atom = 0x1ec06 - Ol Atom = 0xc902 - Onabort Atom = 0x13a07 - Onafterprint Atom = 0x1c00c - Onautocomplete Atom = 0x1fa0e - Onautocompleteerror Atom = 0x1fa13 - Onbeforeprint Atom = 0x6040d - Onbeforeunload Atom = 0x4e70e - Onblur Atom = 0xaa06 - Oncancel Atom = 0xe908 - Oncanplay Atom = 0x28509 - Oncanplaythrough Atom = 0x28510 - Onchange Atom = 0x3a708 - Onclick Atom = 0x31007 - Onclose Atom = 0x31707 - Oncontextmenu Atom = 0x32f0d - Oncuechange Atom = 0x3420b - Ondblclick Atom = 0x34d0a - Ondrag Atom = 0x35706 - Ondragend Atom = 0x35709 - Ondragenter Atom = 0x3600b - Ondragleave Atom = 0x36b0b - Ondragover Atom = 0x3760a - Ondragstart Atom = 0x3800b - Ondrop Atom = 0x38f06 - Ondurationchange Atom = 0x39f10 - Onemptied Atom = 0x39609 - Onended Atom = 0x3af07 - Onerror Atom = 0x3b607 - Onfocus Atom = 0x3bd07 - Onhashchange Atom = 0x3da0c - Oninput Atom = 0x3e607 - Oninvalid Atom = 0x3f209 - Onkeydown Atom = 0x3fb09 - Onkeypress Atom = 0x4080a - Onkeyup Atom = 0x41807 - Onlanguagechange Atom = 0x43210 - Onload Atom = 0x44206 - Onloadeddata Atom = 0x4420c - Onloadedmetadata Atom = 0x45510 - Onloadstart Atom = 0x46b0b - Onmessage Atom = 0x47609 - Onmousedown Atom = 0x47f0b - Onmousemove Atom = 0x48a0b - Onmouseout Atom = 0x4950a - Onmouseover Atom = 0x4a20b - Onmouseup Atom = 0x4ad09 - Onmousewheel Atom = 0x4b60c - Onoffline Atom = 0x4c209 - Ononline Atom = 0x4cb08 - Onpagehide Atom = 0x4d30a - Onpageshow Atom = 0x4fe0a - Onpause Atom = 0x50d07 - Onplay Atom = 0x51706 - Onplaying Atom = 0x51709 - Onpopstate Atom = 0x5200a - Onprogress Atom = 0x52a0a - Onratechange Atom = 0x53e0c - Onreset Atom = 0x54a07 - Onresize Atom = 0x55108 - Onscroll Atom = 0x55f08 - Onseeked Atom = 0x56708 - Onseeking Atom = 0x56f09 - Onselect Atom = 0x57808 - Onshow Atom = 0x58206 - Onsort Atom = 0x58b06 - Onstalled Atom = 0x59509 - Onstorage Atom = 0x59e09 - Onsubmit Atom = 0x5a708 - Onsuspend Atom = 0x5bb09 - Ontimeupdate Atom = 0xdb0c - Ontoggle Atom = 0x5c408 - Onunload Atom = 0x5cc08 - Onvolumechange Atom = 0x5d40e - Onwaiting Atom = 0x5e209 - Open Atom = 0x3cf04 - Optgroup Atom = 0xf608 - Optimum Atom = 0x5eb07 - Option Atom = 0x60006 - Output Atom = 0x49c06 - P Atom = 0xc01 - Param Atom = 0xc05 - Pattern Atom = 0x5107 - Ping Atom = 0x7704 - Placeholder Atom = 0xc30b - Plaintext Atom = 0xfd09 - Poster Atom = 0x15706 - Pre Atom = 0x25e03 - Preload Atom = 0x25e07 - Progress Atom = 0x52c08 - Prompt Atom = 0x5fa06 - Public Atom = 0x41e06 - Q Atom = 0x13101 - Radiogroup Atom = 0x30a - Readonly Atom = 0x2fb08 - Rel Atom = 0x25f03 - Required Atom = 0x1d008 - Reversed Atom = 0x5a08 - Rows Atom = 0x9204 - Rowspan Atom = 0x9207 - Rp Atom = 0x1c602 - Rt Atom = 0x13f02 - Ruby Atom = 0xaf04 - S Atom = 0x2c01 - Samp Atom = 0x4e04 - Sandbox Atom = 0xbb07 - Scope Atom = 0x2bd05 - Scoped Atom = 0x2bd06 - Script Atom = 0x3d406 - Seamless Atom = 0x31c08 - Section Atom = 0x4e207 - Select Atom = 0x57a06 - Selected Atom = 0x57a08 - Shape Atom = 0x4f905 - Size Atom = 0x55504 - Sizes Atom = 0x55505 - Small Atom = 0x18f05 - Sortable Atom = 0x58d08 - Sorted Atom = 0x19906 - Source Atom = 0x1aa06 - Spacer Atom = 0x2db06 - Span Atom = 0x9504 - Spellcheck Atom = 0x3230a - Src Atom = 0x3c303 - Srcdoc Atom = 0x3c306 - Srclang Atom = 0x41107 - Start Atom = 0x38605 - Step Atom = 0x5f704 - Strike Atom = 0x53306 - Strong Atom = 0x55906 - Style Atom = 0x61105 - Sub Atom = 0x5a903 - Summary Atom = 0x61607 - Sup Atom = 0x61d03 - Svg Atom = 0x62003 - System Atom = 0x62306 - Tabindex Atom = 0x46308 - Table Atom = 0x42d05 - Target Atom = 0x24b06 - Tbody Atom = 0x2e05 - Td Atom = 0x4702 - Template Atom = 0x62608 - Textarea Atom = 0x2f608 - Tfoot Atom = 0x8c05 - Th Atom = 0x22e02 - Thead Atom = 0x2d405 - Time Atom = 0xdd04 - Title Atom = 0xa105 - Tr Atom = 0x10502 - Track Atom = 0x10505 - Translate Atom = 0x14009 - Tt Atom = 0x5302 - Type Atom = 0x21404 - Typemustmatch Atom = 0x2140d - U Atom = 0xb01 - Ul Atom = 0x8a02 - Usemap Atom = 0x51106 - Value Atom = 0x4005 - Var Atom = 0x11503 - Video Atom = 0x28105 - Wbr Atom = 0x12103 - Width Atom = 0x50705 - Wrap Atom = 0x58704 - Xmp Atom = 0xc103 + A Atom = 0x1 + Abbr Atom = 0x4 + Accept Atom = 0x1a06 + AcceptCharset Atom = 0x1a0e + Accesskey Atom = 0x2c09 + Action Atom = 0x25a06 + Address Atom = 0x6ed07 + Align Atom = 0x6d405 + Allowfullscreen Atom = 0x1f00f + Allowpaymentrequest Atom = 0x6913 + Allowusermedia Atom = 0x850e + Alt Atom = 0xb003 + Annotation Atom = 0x1b90a + AnnotationXml Atom = 0x1b90e + Applet Atom = 0x30106 + Area Atom = 0x34a04 + Article Atom = 0x3f007 + As Atom = 0xb902 + Aside Atom = 0xc105 + Async Atom = 0xb905 + Audio Atom = 0xcf05 + Autocomplete Atom = 0x2600c + Autofocus Atom = 0xeb09 + Autoplay Atom = 0x10608 + B Atom = 0x101 + Base Atom = 0x11504 + Basefont Atom = 0x11508 + Bdi Atom = 0x16103 + Bdo Atom = 0x13403 + Bgsound Atom = 0x14707 + Big Atom = 0x15903 + Blink Atom = 0x15c05 + Blockquote Atom = 0x1680a + Body Atom = 0x2804 + Br Atom = 0x202 + Button Atom = 0x17206 + Canvas Atom = 0xbd06 + Caption Atom = 0x21907 + Center Atom = 0x20806 + Challenge Atom = 0x28309 + Charset Atom = 0x2107 + Checked Atom = 0x46d07 + Cite Atom = 0x55804 + Class Atom = 0x5b905 + Code Atom = 0x19004 + Col Atom = 0x19703 + Colgroup Atom = 0x19708 + Color Atom = 0x1af05 + Cols Atom = 0x1b404 + Colspan Atom = 0x1b407 + Command Atom = 0x1c707 + Content Atom = 0x57f07 + Contenteditable Atom = 0x57f0f + Contextmenu Atom = 0x3740b + Controls Atom = 0x1ce08 + Coords Atom = 0x1da06 + Crossorigin Atom = 0x1e30b + Data Atom = 0x49904 + Datalist Atom = 0x49908 + Datetime Atom = 0x2a008 + Dd Atom = 0x2bf02 + Default Atom = 0xc407 + Defer Atom = 0x19205 + Del Atom = 0x44603 + Desc Atom = 0x55504 + Details Atom = 0x4607 + Dfn Atom = 0x5f03 + Dialog Atom = 0x16206 + Dir Atom = 0xa303 + Dirname Atom = 0xa307 + Disabled Atom = 0x14d08 + Div Atom = 0x15403 + Dl Atom = 0x5e202 + Download Atom = 0x45708 + Draggable Atom = 0x18309 + Dropzone Atom = 0x3f908 + Dt Atom = 0x64702 + Em Atom = 0x4202 + Embed Atom = 0x4205 + Enctype Atom = 0x27507 + Face Atom = 0x20604 + Fieldset Atom = 0x20e08 + Figcaption Atom = 0x2160a + Figure Atom = 0x23006 + Font Atom = 0x11904 + Footer Atom = 0xb306 + For Atom = 0x23c03 + ForeignObject Atom = 0x23c0d + Foreignobject Atom = 0x2490d + Form Atom = 0x25604 + Formaction Atom = 0x2560a + Formenctype Atom = 0x2710b + Formmethod Atom = 0x28c0a + Formnovalidate Atom = 0x2960e + Formtarget Atom = 0x2a80a + Frame Atom = 0x5705 + Frameset Atom = 0x5708 + H1 Atom = 0x14502 + H2 Atom = 0x2c602 + H3 Atom = 0x2f502 + H4 Atom = 0x33902 + H5 Atom = 0x34302 + H6 Atom = 0x64902 + Head Atom = 0x32504 + Header Atom = 0x32506 + Headers Atom = 0x32507 + Height Atom = 0x12c06 + Hgroup Atom = 0x2b206 + Hidden Atom = 0x2bd06 + High Atom = 0x2c304 + Hr Atom = 0x14002 + Href Atom = 0x2c804 + Hreflang Atom = 0x2c808 + Html Atom = 0x13004 + HttpEquiv Atom = 0x2d00a + I Atom = 0x601 + Icon Atom = 0x57e04 + Id Atom = 0xc302 + Iframe Atom = 0x2e406 + Image Atom = 0x2ea05 + Img Atom = 0x2ef03 + Input Atom = 0x43f05 + Inputmode Atom = 0x43f09 + Ins Atom = 0x1ec03 + Integrity Atom = 0x22709 + Is Atom = 0x14e02 + Isindex Atom = 0x2f707 + Ismap Atom = 0x2fe05 + Itemid Atom = 0x37f06 + Itemprop Atom = 0x55908 + Itemref Atom = 0x3c107 + Itemscope Atom = 0x66d09 + Itemtype Atom = 0x30708 + Kbd Atom = 0x16003 + Keygen Atom = 0x3206 + Keytype Atom = 0x7e07 + Kind Atom = 0x18004 + Label Atom = 0xda05 + Lang Atom = 0x2cc04 + Legend Atom = 0x18a06 + Li Atom = 0x11102 + Link Atom = 0x15d04 + List Atom = 0x49d04 + Listing Atom = 0x49d07 + Loop Atom = 0xde04 + Low Atom = 0x6b03 + Main Atom = 0x1004 + Malignmark Atom = 0x6d30a + Manifest Atom = 0x30f08 + Map Atom = 0x30003 + Mark Atom = 0x6d904 + Marquee Atom = 0x31b07 + Math Atom = 0x32204 + Max Atom = 0x33103 + Maxlength Atom = 0x33109 + Media Atom = 0x8e05 + Mediagroup Atom = 0x8e0a + Menu Atom = 0x37b04 + Menuitem Atom = 0x37b08 + Meta Atom = 0x4ac04 + Meter Atom = 0xa805 + Method Atom = 0x29006 + Mglyph Atom = 0x2f006 + Mi Atom = 0x33b02 + Min Atom = 0x33b03 + Minlength Atom = 0x33b09 + Mn Atom = 0x29902 + Mo Atom = 0x6302 + Ms Atom = 0x67002 + Mtext Atom = 0x34505 + Multiple Atom = 0x35308 + Muted Atom = 0x35b05 + Name Atom = 0xa604 + Nav Atom = 0x1303 + Nobr Atom = 0x3704 + Noembed Atom = 0x4007 + Noframes Atom = 0x5508 + Nomodule Atom = 0x6108 + Nonce Atom = 0x56205 + Noscript Atom = 0x1fe08 + Novalidate Atom = 0x29a0a + Object Atom = 0x25006 + Ol Atom = 0x10102 + Onabort Atom = 0x17607 + Onafterprint Atom = 0x21e0c + Onautocomplete Atom = 0x25e0e + Onautocompleteerror Atom = 0x25e13 + Onauxclick Atom = 0x61b0a + Onbeforeprint Atom = 0x69a0d + Onbeforeunload Atom = 0x6e10e + Onblur Atom = 0x5c206 + Oncancel Atom = 0xd308 + Oncanplay Atom = 0x13609 + Oncanplaythrough Atom = 0x13610 + Onchange Atom = 0x40f08 + Onclick Atom = 0x2dd07 + Onclose Atom = 0x36007 + Oncontextmenu Atom = 0x3720d + Oncopy Atom = 0x38506 + Oncuechange Atom = 0x38b0b + Oncut Atom = 0x39605 + Ondblclick Atom = 0x39b0a + Ondrag Atom = 0x3a506 + Ondragend Atom = 0x3a509 + Ondragenter Atom = 0x3ae0b + Ondragexit Atom = 0x3b90a + Ondragleave Atom = 0x3d30b + Ondragover Atom = 0x3de0a + Ondragstart Atom = 0x3e80b + Ondrop Atom = 0x3f706 + Ondurationchange Atom = 0x40710 + Onemptied Atom = 0x3fe09 + Onended Atom = 0x41707 + Onerror Atom = 0x41e07 + Onfocus Atom = 0x42507 + Onhashchange Atom = 0x4310c + Oninput Atom = 0x43d07 + Oninvalid Atom = 0x44909 + Onkeydown Atom = 0x45209 + Onkeypress Atom = 0x45f0a + Onkeyup Atom = 0x47407 + Onlanguagechange Atom = 0x48110 + Onload Atom = 0x49106 + Onloadeddata Atom = 0x4910c + Onloadedmetadata Atom = 0x4a410 + Onloadend Atom = 0x4ba09 + Onloadstart Atom = 0x4c30b + Onmessage Atom = 0x4ce09 + Onmessageerror Atom = 0x4ce0e + Onmousedown Atom = 0x4dc0b + Onmouseenter Atom = 0x4e70c + Onmouseleave Atom = 0x4f30c + Onmousemove Atom = 0x4ff0b + Onmouseout Atom = 0x50a0a + Onmouseover Atom = 0x5170b + Onmouseup Atom = 0x52209 + Onmousewheel Atom = 0x5300c + Onoffline Atom = 0x53c09 + Ononline Atom = 0x54508 + Onpagehide Atom = 0x54d0a + Onpageshow Atom = 0x5670a + Onpaste Atom = 0x57307 + Onpause Atom = 0x58e07 + Onplay Atom = 0x59806 + Onplaying Atom = 0x59809 + Onpopstate Atom = 0x5a10a + Onprogress Atom = 0x5ab0a + Onratechange Atom = 0x5c80c + Onrejectionhandled Atom = 0x5d412 + Onreset Atom = 0x5e607 + Onresize Atom = 0x5ed08 + Onscroll Atom = 0x5fc08 + Onsecuritypolicyviolation Atom = 0x60419 + Onseeked Atom = 0x62508 + Onseeking Atom = 0x62d09 + Onselect Atom = 0x63608 + Onshow Atom = 0x64006 + Onsort Atom = 0x64b06 + Onstalled Atom = 0x65509 + Onstorage Atom = 0x65e09 + Onsubmit Atom = 0x66708 + Onsuspend Atom = 0x67709 + Ontimeupdate Atom = 0x11a0c + Ontoggle Atom = 0x68008 + Onunhandledrejection Atom = 0x68814 + Onunload Atom = 0x6a708 + Onvolumechange Atom = 0x6af0e + Onwaiting Atom = 0x6bd09 + Onwheel Atom = 0x6c607 + Open Atom = 0x55f04 + Optgroup Atom = 0xe008 + Optimum Atom = 0x6cd07 + Option Atom = 0x6dd06 + Output Atom = 0x51106 + P Atom = 0xc01 + Param Atom = 0xc05 + Pattern Atom = 0x4f07 + Picture Atom = 0x9707 + Ping Atom = 0xe704 + Placeholder Atom = 0xfb0b + Plaintext Atom = 0x19e09 + Playsinline Atom = 0x10a0b + Poster Atom = 0x2b706 + Pre Atom = 0x46403 + Preload Atom = 0x47a07 + Progress Atom = 0x5ad08 + Prompt Atom = 0x52a06 + Public Atom = 0x57a06 + Q Atom = 0x7701 + Radiogroup Atom = 0x30a + Readonly Atom = 0x34b08 + Referrerpolicy Atom = 0x3c50e + Rel Atom = 0x47b03 + Required Atom = 0x23408 + Reversed Atom = 0x9c08 + Rows Atom = 0x3a04 + Rowspan Atom = 0x3a07 + Rp Atom = 0x22402 + Rt Atom = 0x17b02 + Ruby Atom = 0xac04 + S Atom = 0x2501 + Samp Atom = 0x4c04 + Sandbox Atom = 0xf307 + Scope Atom = 0x67105 + Scoped Atom = 0x67106 + Script Atom = 0x20006 + Seamless Atom = 0x36508 + Section Atom = 0x5bd07 + Select Atom = 0x63806 + Selected Atom = 0x63808 + Shape Atom = 0x1d505 + Size Atom = 0x5f104 + Sizes Atom = 0x5f105 + Slot Atom = 0x1df04 + Small Atom = 0x1ee05 + Sortable Atom = 0x64d08 + Sorted Atom = 0x32b06 + Source Atom = 0x36c06 + Spacer Atom = 0x42b06 + Span Atom = 0x3d04 + Spellcheck Atom = 0x4680a + Src Atom = 0x5b403 + Srcdoc Atom = 0x5b406 + Srclang Atom = 0x5f507 + Srcset Atom = 0x6f306 + Start Atom = 0x3ee05 + Step Atom = 0x57704 + Strike Atom = 0x7a06 + Strong Atom = 0x31506 + Style Atom = 0x6f905 + Sub Atom = 0x66903 + Summary Atom = 0x6fe07 + Sup Atom = 0x70503 + Svg Atom = 0x70803 + System Atom = 0x70b06 + Tabindex Atom = 0x4b208 + Table Atom = 0x58905 + Target Atom = 0x2ac06 + Tbody Atom = 0x2705 + Td Atom = 0x5e02 + Template Atom = 0x70e08 + Textarea Atom = 0x34608 + Tfoot Atom = 0xb205 + Th Atom = 0x13f02 + Thead Atom = 0x32405 + Time Atom = 0x11c04 + Title Atom = 0xca05 + Tr Atom = 0x7402 + Track Atom = 0x17c05 + Translate Atom = 0x1a609 + Tt Atom = 0x5102 + Type Atom = 0x8104 + Typemustmatch Atom = 0x2780d + U Atom = 0xb01 + Ul Atom = 0x6602 + Updateviacache Atom = 0x1200e + Usemap Atom = 0x59206 + Value Atom = 0x1505 + Var Atom = 0x15603 + Video Atom = 0x2d905 + Wbr Atom = 0x57003 + Width Atom = 0x64505 + Workertype Atom = 0x7160a + Wrap Atom = 0x72004 + Xmp Atom = 0xf903 ) -const hash0 = 0xc17da63e +const hash0 = 0x81cdf10e -const maxAtomLen = 19 +const maxAtomLen = 25 var table = [1 << 9]Atom{ - 0x1: 0x48a0b, // onmousemove - 0x2: 0x5e209, // onwaiting - 0x3: 0x1fa13, // onautocompleteerror - 0x4: 0x5fa06, // prompt - 0x7: 0x5eb07, // optimum - 0x8: 0x1604, // mark - 0xa: 0x5ad07, // itemref - 0xb: 0x4fe0a, // onpageshow - 0xc: 0x57a06, // select - 0xd: 0x17b09, // draggable - 0xe: 0x3e03, // nav - 0xf: 0x17507, // command - 0x11: 0xb01, // u - 0x14: 0x2d507, // headers - 0x15: 0x44a08, // datalist - 0x17: 0x4e04, // samp - 0x1a: 0x3fb09, // onkeydown - 0x1b: 0x55f08, // onscroll - 0x1c: 0x15003, // col - 0x20: 0x3c908, // itemprop - 0x21: 0x2780a, // http-equiv - 0x22: 0x61d03, // sup - 0x24: 0x1d008, // required - 0x2b: 0x25e07, // preload - 0x2c: 0x6040d, // onbeforeprint - 0x2d: 0x3600b, // ondragenter - 0x2e: 0x50902, // dt - 0x2f: 0x5a708, // onsubmit - 0x30: 0x27002, // hr - 0x31: 0x32f0d, // oncontextmenu - 0x33: 0x29c05, // image - 0x34: 0x50d07, // onpause - 0x35: 0x25906, // hgroup - 0x36: 0x7704, // ping - 0x37: 0x57808, // onselect - 0x3a: 0x11303, // div - 0x3b: 0x1fa0e, // onautocomplete - 0x40: 0x2eb02, // mi - 0x41: 0x31c08, // seamless - 0x42: 0x2807, // charset - 0x43: 0x8502, // id - 0x44: 0x5200a, // onpopstate - 0x45: 0x3ef03, // del - 0x46: 0x2cb07, // marquee - 0x47: 0x3309, // accesskey - 0x49: 0x8d06, // footer - 0x4a: 0x44e04, // list - 0x4b: 0x2b005, // ismap - 0x51: 0x33804, // menu - 0x52: 0x2f04, // body - 0x55: 0x9a08, // frameset - 0x56: 0x54a07, // onreset - 0x57: 0x12705, // blink - 0x58: 0xa105, // title - 0x59: 0x38807, // article - 0x5b: 0x22e02, // th - 0x5d: 0x13101, // q - 0x5e: 0x3cf04, // open - 0x5f: 0x2fa04, // area - 0x61: 0x44206, // onload - 0x62: 0xda04, // font - 0x63: 0xd604, // base - 0x64: 0x16207, // colspan - 0x65: 0x53707, // keytype - 0x66: 0x11e02, // dl - 0x68: 0x1b008, // fieldset - 0x6a: 0x2eb03, // min - 0x6b: 0x11503, // var - 0x6f: 0x2d506, // header - 0x70: 0x13f02, // rt - 0x71: 0x15008, // colgroup - 0x72: 0x23502, // mn - 0x74: 0x13a07, // onabort - 0x75: 0x3906, // keygen - 0x76: 0x4c209, // onoffline - 0x77: 0x21f09, // challenge - 0x78: 0x2b203, // map - 0x7a: 0x2e902, // h4 - 0x7b: 0x3b607, // onerror - 0x7c: 0x2e109, // maxlength - 0x7d: 0x2f505, // mtext - 0x7e: 0xbb07, // sandbox - 0x7f: 0x58b06, // onsort - 0x80: 0x100a, // malignmark - 0x81: 0x45d04, // meta - 0x82: 0x7b05, // async - 0x83: 0x2a702, // h3 - 0x84: 0x26702, // dd - 0x85: 0x27004, // href - 0x86: 0x6e0a, // mediagroup - 0x87: 0x19406, // coords - 0x88: 0x41107, // srclang - 0x89: 0x34d0a, // ondblclick - 0x8a: 0x4005, // value - 0x8c: 0xe908, // oncancel - 0x8e: 0x3230a, // spellcheck - 0x8f: 0x9a05, // frame - 0x91: 0x12403, // big - 0x94: 0x1f606, // action - 0x95: 0x6903, // dir - 0x97: 0x2fb08, // readonly - 0x99: 0x42d05, // table - 0x9a: 0x61607, // summary - 0x9b: 0x12103, // wbr - 0x9c: 0x30a, // radiogroup - 0x9d: 0x6c04, // name - 0x9f: 0x62306, // system - 0xa1: 0x15d05, // color - 0xa2: 0x7f06, // canvas - 0xa3: 0x25504, // html - 0xa5: 0x56f09, // onseeking - 0xac: 0x4f905, // shape - 0xad: 0x25f03, // rel - 0xae: 0x28510, // oncanplaythrough - 0xaf: 0x3760a, // ondragover - 0xb0: 0x62608, // template - 0xb1: 0x1d80d, // foreignObject - 0xb3: 0x9204, // rows - 0xb6: 0x44e07, // listing - 0xb7: 0x49c06, // output - 0xb9: 0x3310b, // contextmenu - 0xbb: 0x11f03, // low - 0xbc: 0x1c602, // rp - 0xbd: 0x5bb09, // onsuspend - 0xbe: 0x13606, // button - 0xbf: 0x4db04, // desc - 0xc1: 0x4e207, // section - 0xc2: 0x52a0a, // onprogress - 0xc3: 0x59e09, // onstorage - 0xc4: 0x2d204, // math - 0xc5: 0x4503, // alt - 0xc7: 0x8a02, // ul - 0xc8: 0x5107, // pattern - 0xc9: 0x4b60c, // onmousewheel - 0xca: 0x35709, // ondragend - 0xcb: 0xaf04, // ruby - 0xcc: 0xc01, // p - 0xcd: 0x31707, // onclose - 0xce: 0x24205, // meter - 0xcf: 0x11807, // bgsound - 0xd2: 0x25106, // height - 0xd4: 0x101, // b - 0xd5: 0x2c308, // itemtype - 0xd8: 0x1bb07, // caption - 0xd9: 0x10c08, // disabled - 0xdb: 0x33808, // menuitem - 0xdc: 0x62003, // svg - 0xdd: 0x18f05, // small - 0xde: 0x44a04, // data - 0xe0: 0x4cb08, // ononline - 0xe1: 0x2a206, // mglyph - 0xe3: 0x6505, // embed - 0xe4: 0x10502, // tr - 0xe5: 0x46b0b, // onloadstart - 0xe7: 0x3c306, // srcdoc - 0xeb: 0x5c408, // ontoggle - 0xed: 0xe703, // bdo - 0xee: 0x4702, // td - 0xef: 0x8305, // aside - 0xf0: 0x29402, // h2 - 0xf1: 0x52c08, // progress - 0xf2: 0x12c0a, // blockquote - 0xf4: 0xf005, // label - 0xf5: 0x601, // i - 0xf7: 0x9207, // rowspan - 0xfb: 0x51709, // onplaying - 0xfd: 0x2a103, // img - 0xfe: 0xf608, // optgroup - 0xff: 0x42307, // content - 0x101: 0x53e0c, // onratechange - 0x103: 0x3da0c, // onhashchange - 0x104: 0x4807, // details - 0x106: 0x40008, // download - 0x109: 0x14009, // translate - 0x10b: 0x4230f, // contenteditable - 0x10d: 0x36b0b, // ondragleave - 0x10e: 0x2106, // accept - 0x10f: 0x57a08, // selected - 0x112: 0x1f20a, // formaction - 0x113: 0x5b506, // center - 0x115: 0x45510, // onloadedmetadata - 0x116: 0x12804, // link - 0x117: 0xdd04, // time - 0x118: 0x19f0b, // crossorigin - 0x119: 0x3bd07, // onfocus - 0x11a: 0x58704, // wrap - 0x11b: 0x42204, // icon - 0x11d: 0x28105, // video - 0x11e: 0x4de05, // class - 0x121: 0x5d40e, // onvolumechange - 0x122: 0xaa06, // onblur - 0x123: 0x2b909, // itemscope - 0x124: 0x61105, // style - 0x127: 0x41e06, // public - 0x129: 0x2320e, // formnovalidate - 0x12a: 0x58206, // onshow - 0x12c: 0x51706, // onplay - 0x12d: 0x3c804, // cite - 0x12e: 0x2bc02, // ms - 0x12f: 0xdb0c, // ontimeupdate - 0x130: 0x10904, // kind - 0x131: 0x2470a, // formtarget - 0x135: 0x3af07, // onended - 0x136: 0x26506, // hidden - 0x137: 0x2c01, // s - 0x139: 0x2280a, // formmethod - 0x13a: 0x3e805, // input - 0x13c: 0x50b02, // h6 - 0x13d: 0xc902, // ol - 0x13e: 0x3420b, // oncuechange - 0x13f: 0x1e50d, // foreignobject - 0x143: 0x4e70e, // onbeforeunload - 0x144: 0x2bd05, // scope - 0x145: 0x39609, // onemptied - 0x146: 0x14b05, // defer - 0x147: 0xc103, // xmp - 0x148: 0x39f10, // ondurationchange - 0x149: 0x1903, // kbd - 0x14c: 0x47609, // onmessage - 0x14d: 0x60006, // option - 0x14e: 0x2eb09, // minlength - 0x14f: 0x32807, // checked - 0x150: 0xce08, // autoplay - 0x152: 0x202, // br - 0x153: 0x2360a, // novalidate - 0x156: 0x6307, // noembed - 0x159: 0x31007, // onclick - 0x15a: 0x47f0b, // onmousedown - 0x15b: 0x3a708, // onchange - 0x15e: 0x3f209, // oninvalid - 0x15f: 0x2bd06, // scoped - 0x160: 0x18808, // controls - 0x161: 0x30b05, // muted - 0x162: 0x58d08, // sortable - 0x163: 0x51106, // usemap - 0x164: 0x1b80a, // figcaption - 0x165: 0x35706, // ondrag - 0x166: 0x26b04, // high - 0x168: 0x3c303, // src - 0x169: 0x15706, // poster - 0x16b: 0x1670e, // annotation-xml - 0x16c: 0x5f704, // step - 0x16d: 0x4, // abbr - 0x16e: 0x1b06, // dialog - 0x170: 0x1202, // li - 0x172: 0x3ed02, // mo - 0x175: 0x1d803, // for - 0x176: 0x1a803, // ins - 0x178: 0x55504, // size - 0x179: 0x43210, // onlanguagechange - 0x17a: 0x8607, // default - 0x17b: 0x1a03, // bdi - 0x17c: 0x4d30a, // onpagehide - 0x17d: 0x6907, // dirname - 0x17e: 0x21404, // type - 0x17f: 0x1f204, // form - 0x181: 0x28509, // oncanplay - 0x182: 0x6103, // dfn - 0x183: 0x46308, // tabindex - 0x186: 0x6502, // em - 0x187: 0x27404, // lang - 0x189: 0x39108, // dropzone - 0x18a: 0x4080a, // onkeypress - 0x18b: 0x23c08, // datetime - 0x18c: 0x16204, // cols - 0x18d: 0x1, // a - 0x18e: 0x4420c, // onloadeddata - 0x190: 0xa605, // audio - 0x192: 0x2e05, // tbody - 0x193: 0x22c06, // method - 0x195: 0xf404, // loop - 0x196: 0x29606, // iframe - 0x198: 0x2d504, // head - 0x19e: 0x5f108, // manifest - 0x19f: 0xb309, // autofocus - 0x1a0: 0x14904, // code - 0x1a1: 0x55906, // strong - 0x1a2: 0x30308, // multiple - 0x1a3: 0xc05, // param - 0x1a6: 0x21107, // enctype - 0x1a7: 0x5b304, // face - 0x1a8: 0xfd09, // plaintext - 0x1a9: 0x26e02, // h1 - 0x1aa: 0x59509, // onstalled - 0x1ad: 0x3d406, // script - 0x1ae: 0x2db06, // spacer - 0x1af: 0x55108, // onresize - 0x1b0: 0x4a20b, // onmouseover - 0x1b1: 0x5cc08, // onunload - 0x1b2: 0x56708, // onseeked - 0x1b4: 0x2140d, // typemustmatch - 0x1b5: 0x1cc06, // figure - 0x1b6: 0x4950a, // onmouseout - 0x1b7: 0x25e03, // pre - 0x1b8: 0x50705, // width - 0x1b9: 0x19906, // sorted - 0x1bb: 0x5704, // nobr - 0x1be: 0x5302, // tt - 0x1bf: 0x1105, // align - 0x1c0: 0x3e607, // oninput - 0x1c3: 0x41807, // onkeyup - 0x1c6: 0x1c00c, // onafterprint - 0x1c7: 0x210e, // accept-charset - 0x1c8: 0x33c06, // itemid - 0x1c9: 0x3e809, // inputmode - 0x1cb: 0x53306, // strike - 0x1cc: 0x5a903, // sub - 0x1cd: 0x10505, // track - 0x1ce: 0x38605, // start - 0x1d0: 0xd608, // basefont - 0x1d6: 0x1aa06, // source - 0x1d7: 0x18206, // legend - 0x1d8: 0x2d405, // thead - 0x1da: 0x8c05, // tfoot - 0x1dd: 0x1ec06, // object - 0x1de: 0x6e05, // media - 0x1df: 0x1670a, // annotation - 0x1e0: 0x20d0b, // formenctype - 0x1e2: 0x3d208, // noscript - 0x1e4: 0x55505, // sizes - 0x1e5: 0x1fc0c, // autocomplete - 0x1e6: 0x9504, // span - 0x1e7: 0x9808, // noframes - 0x1e8: 0x24b06, // target - 0x1e9: 0x38f06, // ondrop - 0x1ea: 0x2b306, // applet - 0x1ec: 0x5a08, // reversed - 0x1f0: 0x2a907, // isindex - 0x1f3: 0x27008, // hreflang - 0x1f5: 0x2f302, // h5 - 0x1f6: 0x4f307, // address - 0x1fa: 0x2e103, // max - 0x1fb: 0xc30b, // placeholder - 0x1fc: 0x2f608, // textarea - 0x1fe: 0x4ad09, // onmouseup - 0x1ff: 0x3800b, // ondragstart + 0x1: 0x8e0a, // mediagroup + 0x2: 0x2cc04, // lang + 0x4: 0x2c09, // accesskey + 0x5: 0x5708, // frameset + 0x7: 0x63608, // onselect + 0x8: 0x70b06, // system + 0xa: 0x64505, // width + 0xc: 0x2710b, // formenctype + 0xd: 0x10102, // ol + 0xe: 0x38b0b, // oncuechange + 0x10: 0x13403, // bdo + 0x11: 0xcf05, // audio + 0x12: 0x18309, // draggable + 0x14: 0x2d905, // video + 0x15: 0x29902, // mn + 0x16: 0x37b04, // menu + 0x17: 0x2b706, // poster + 0x19: 0xb306, // footer + 0x1a: 0x29006, // method + 0x1b: 0x2a008, // datetime + 0x1c: 0x17607, // onabort + 0x1d: 0x1200e, // updateviacache + 0x1e: 0xb905, // async + 0x1f: 0x49106, // onload + 0x21: 0xd308, // oncancel + 0x22: 0x62508, // onseeked + 0x23: 0x2ea05, // image + 0x24: 0x5d412, // onrejectionhandled + 0x26: 0x15d04, // link + 0x27: 0x51106, // output + 0x28: 0x32504, // head + 0x29: 0x4f30c, // onmouseleave + 0x2a: 0x57307, // onpaste + 0x2b: 0x59809, // onplaying + 0x2c: 0x1b407, // colspan + 0x2f: 0x1af05, // color + 0x30: 0x5f104, // size + 0x31: 0x2d00a, // http-equiv + 0x33: 0x601, // i + 0x34: 0x54d0a, // onpagehide + 0x35: 0x68814, // onunhandledrejection + 0x37: 0x41e07, // onerror + 0x3a: 0x11508, // basefont + 0x3f: 0x1303, // nav + 0x40: 0x18004, // kind + 0x41: 0x34b08, // readonly + 0x42: 0x2f006, // mglyph + 0x44: 0x11102, // li + 0x46: 0x2bd06, // hidden + 0x47: 0x70803, // svg + 0x48: 0x57704, // step + 0x49: 0x22709, // integrity + 0x4a: 0x57a06, // public + 0x4c: 0x19703, // col + 0x4d: 0x1680a, // blockquote + 0x4e: 0x34302, // h5 + 0x50: 0x5ad08, // progress + 0x51: 0x5f105, // sizes + 0x52: 0x33902, // h4 + 0x56: 0x32405, // thead + 0x57: 0x7e07, // keytype + 0x58: 0x5ab0a, // onprogress + 0x59: 0x43f09, // inputmode + 0x5a: 0x3a509, // ondragend + 0x5d: 0x39605, // oncut + 0x5e: 0x42b06, // spacer + 0x5f: 0x19708, // colgroup + 0x62: 0x14e02, // is + 0x65: 0xb902, // as + 0x66: 0x53c09, // onoffline + 0x67: 0x32b06, // sorted + 0x69: 0x48110, // onlanguagechange + 0x6c: 0x4310c, // onhashchange + 0x6d: 0xa604, // name + 0x6e: 0xb205, // tfoot + 0x6f: 0x55504, // desc + 0x70: 0x33103, // max + 0x72: 0x1da06, // coords + 0x73: 0x2f502, // h3 + 0x74: 0x6e10e, // onbeforeunload + 0x75: 0x3a04, // rows + 0x76: 0x63806, // select + 0x77: 0xa805, // meter + 0x78: 0x37f06, // itemid + 0x79: 0x5300c, // onmousewheel + 0x7a: 0x5b406, // srcdoc + 0x7d: 0x17c05, // track + 0x7f: 0x30708, // itemtype + 0x82: 0x6302, // mo + 0x83: 0x40f08, // onchange + 0x84: 0x32507, // headers + 0x85: 0x5c80c, // onratechange + 0x86: 0x60419, // onsecuritypolicyviolation + 0x88: 0x49908, // datalist + 0x89: 0x4dc0b, // onmousedown + 0x8a: 0x1df04, // slot + 0x8b: 0x4a410, // onloadedmetadata + 0x8c: 0x1a06, // accept + 0x8d: 0x25006, // object + 0x91: 0x6af0e, // onvolumechange + 0x92: 0x2107, // charset + 0x93: 0x25e13, // onautocompleteerror + 0x94: 0x6913, // allowpaymentrequest + 0x95: 0x2804, // body + 0x96: 0xc407, // default + 0x97: 0x63808, // selected + 0x98: 0x20604, // face + 0x99: 0x1d505, // shape + 0x9b: 0x68008, // ontoggle + 0x9e: 0x64702, // dt + 0x9f: 0x6d904, // mark + 0xa1: 0xb01, // u + 0xa4: 0x6a708, // onunload + 0xa5: 0xde04, // loop + 0xa6: 0x14d08, // disabled + 0xaa: 0x41707, // onended + 0xab: 0x6d30a, // malignmark + 0xad: 0x67709, // onsuspend + 0xae: 0x34505, // mtext + 0xaf: 0x64b06, // onsort + 0xb0: 0x55908, // itemprop + 0xb3: 0x66d09, // itemscope + 0xb4: 0x15c05, // blink + 0xb6: 0x3a506, // ondrag + 0xb7: 0x6602, // ul + 0xb8: 0x25604, // form + 0xb9: 0xf307, // sandbox + 0xba: 0x5705, // frame + 0xbb: 0x1505, // value + 0xbc: 0x65e09, // onstorage + 0xc0: 0x17b02, // rt + 0xc2: 0x202, // br + 0xc3: 0x20e08, // fieldset + 0xc4: 0x2780d, // typemustmatch + 0xc5: 0x6108, // nomodule + 0xc6: 0x4007, // noembed + 0xc7: 0x69a0d, // onbeforeprint + 0xc8: 0x17206, // button + 0xc9: 0x2dd07, // onclick + 0xca: 0x6fe07, // summary + 0xcd: 0xac04, // ruby + 0xce: 0x5b905, // class + 0xcf: 0x3e80b, // ondragstart + 0xd0: 0x21907, // caption + 0xd4: 0x850e, // allowusermedia + 0xd5: 0x4c30b, // onloadstart + 0xd9: 0x15403, // div + 0xda: 0x49d04, // list + 0xdb: 0x32204, // math + 0xdc: 0x43f05, // input + 0xdf: 0x3de0a, // ondragover + 0xe0: 0x2c602, // h2 + 0xe2: 0x19e09, // plaintext + 0xe4: 0x4e70c, // onmouseenter + 0xe7: 0x46d07, // checked + 0xe8: 0x46403, // pre + 0xea: 0x35308, // multiple + 0xeb: 0x16103, // bdi + 0xec: 0x33109, // maxlength + 0xed: 0x7701, // q + 0xee: 0x61b0a, // onauxclick + 0xf0: 0x57003, // wbr + 0xf2: 0x11504, // base + 0xf3: 0x6dd06, // option + 0xf5: 0x40710, // ondurationchange + 0xf7: 0x5508, // noframes + 0xf9: 0x3f908, // dropzone + 0xfb: 0x67105, // scope + 0xfc: 0x9c08, // reversed + 0xfd: 0x3ae0b, // ondragenter + 0xfe: 0x3ee05, // start + 0xff: 0xf903, // xmp + 0x100: 0x5f507, // srclang + 0x101: 0x2ef03, // img + 0x104: 0x101, // b + 0x105: 0x23c03, // for + 0x106: 0xc105, // aside + 0x107: 0x43d07, // oninput + 0x108: 0x34a04, // area + 0x109: 0x28c0a, // formmethod + 0x10a: 0x72004, // wrap + 0x10c: 0x22402, // rp + 0x10d: 0x45f0a, // onkeypress + 0x10e: 0x5102, // tt + 0x110: 0x33b02, // mi + 0x111: 0x35b05, // muted + 0x112: 0xb003, // alt + 0x113: 0x19004, // code + 0x114: 0x4202, // em + 0x115: 0x3b90a, // ondragexit + 0x117: 0x3d04, // span + 0x119: 0x30f08, // manifest + 0x11a: 0x37b08, // menuitem + 0x11b: 0x57f07, // content + 0x11d: 0x6bd09, // onwaiting + 0x11f: 0x4ba09, // onloadend + 0x121: 0x3720d, // oncontextmenu + 0x123: 0x5c206, // onblur + 0x124: 0x3f007, // article + 0x125: 0xa303, // dir + 0x126: 0xe704, // ping + 0x127: 0x23408, // required + 0x128: 0x44909, // oninvalid + 0x129: 0x6d405, // align + 0x12b: 0x57e04, // icon + 0x12c: 0x64902, // h6 + 0x12d: 0x1b404, // cols + 0x12e: 0x2160a, // figcaption + 0x12f: 0x45209, // onkeydown + 0x130: 0x66708, // onsubmit + 0x131: 0x13609, // oncanplay + 0x132: 0x70503, // sup + 0x133: 0xc01, // p + 0x135: 0x3fe09, // onemptied + 0x136: 0x38506, // oncopy + 0x137: 0x55804, // cite + 0x138: 0x39b0a, // ondblclick + 0x13a: 0x4ff0b, // onmousemove + 0x13c: 0x66903, // sub + 0x13d: 0x47b03, // rel + 0x13e: 0xe008, // optgroup + 0x142: 0x3a07, // rowspan + 0x143: 0x36c06, // source + 0x144: 0x1fe08, // noscript + 0x145: 0x55f04, // open + 0x146: 0x1ec03, // ins + 0x147: 0x23c0d, // foreignObject + 0x148: 0x5a10a, // onpopstate + 0x14a: 0x27507, // enctype + 0x14b: 0x25e0e, // onautocomplete + 0x14c: 0x34608, // textarea + 0x14e: 0x2600c, // autocomplete + 0x14f: 0x14002, // hr + 0x150: 0x1ce08, // controls + 0x151: 0xc302, // id + 0x153: 0x21e0c, // onafterprint + 0x155: 0x2490d, // foreignobject + 0x156: 0x31b07, // marquee + 0x157: 0x58e07, // onpause + 0x158: 0x5e202, // dl + 0x159: 0x12c06, // height + 0x15a: 0x33b03, // min + 0x15b: 0xa307, // dirname + 0x15c: 0x1a609, // translate + 0x15d: 0x13004, // html + 0x15e: 0x33b09, // minlength + 0x15f: 0x47a07, // preload + 0x160: 0x70e08, // template + 0x161: 0x3d30b, // ondragleave + 0x164: 0x5b403, // src + 0x165: 0x31506, // strong + 0x167: 0x4c04, // samp + 0x168: 0x6ed07, // address + 0x169: 0x54508, // ononline + 0x16b: 0xfb0b, // placeholder + 0x16c: 0x2ac06, // target + 0x16d: 0x1ee05, // small + 0x16e: 0x6c607, // onwheel + 0x16f: 0x1b90a, // annotation + 0x170: 0x4680a, // spellcheck + 0x171: 0x4607, // details + 0x172: 0xbd06, // canvas + 0x173: 0xeb09, // autofocus + 0x174: 0xc05, // param + 0x176: 0x45708, // download + 0x177: 0x44603, // del + 0x178: 0x36007, // onclose + 0x179: 0x16003, // kbd + 0x17a: 0x30106, // applet + 0x17b: 0x2c804, // href + 0x17c: 0x5ed08, // onresize + 0x17e: 0x4910c, // onloadeddata + 0x180: 0x7402, // tr + 0x181: 0x2a80a, // formtarget + 0x182: 0xca05, // title + 0x183: 0x6f905, // style + 0x184: 0x7a06, // strike + 0x185: 0x59206, // usemap + 0x186: 0x2e406, // iframe + 0x187: 0x1004, // main + 0x189: 0x9707, // picture + 0x18c: 0x2fe05, // ismap + 0x18e: 0x49904, // data + 0x18f: 0xda05, // label + 0x191: 0x3c50e, // referrerpolicy + 0x192: 0x13f02, // th + 0x194: 0x52a06, // prompt + 0x195: 0x5bd07, // section + 0x197: 0x6cd07, // optimum + 0x198: 0x2c304, // high + 0x199: 0x14502, // h1 + 0x19a: 0x65509, // onstalled + 0x19b: 0x15603, // var + 0x19c: 0x11c04, // time + 0x19e: 0x67002, // ms + 0x19f: 0x32506, // header + 0x1a0: 0x4ce09, // onmessage + 0x1a1: 0x56205, // nonce + 0x1a2: 0x2560a, // formaction + 0x1a3: 0x20806, // center + 0x1a4: 0x3704, // nobr + 0x1a5: 0x58905, // table + 0x1a6: 0x49d07, // listing + 0x1a7: 0x18a06, // legend + 0x1a9: 0x28309, // challenge + 0x1aa: 0x23006, // figure + 0x1ab: 0x8e05, // media + 0x1ae: 0x8104, // type + 0x1af: 0x11904, // font + 0x1b0: 0x4ce0e, // onmessageerror + 0x1b1: 0x36508, // seamless + 0x1b2: 0x5f03, // dfn + 0x1b3: 0x19205, // defer + 0x1b4: 0x6b03, // low + 0x1b5: 0x62d09, // onseeking + 0x1b6: 0x5170b, // onmouseover + 0x1b7: 0x29a0a, // novalidate + 0x1b8: 0x7160a, // workertype + 0x1ba: 0x3c107, // itemref + 0x1bd: 0x1, // a + 0x1be: 0x30003, // map + 0x1bf: 0x11a0c, // ontimeupdate + 0x1c0: 0x14707, // bgsound + 0x1c1: 0x3206, // keygen + 0x1c2: 0x2705, // tbody + 0x1c5: 0x64006, // onshow + 0x1c7: 0x2501, // s + 0x1c8: 0x4f07, // pattern + 0x1cc: 0x13610, // oncanplaythrough + 0x1ce: 0x2bf02, // dd + 0x1cf: 0x6f306, // srcset + 0x1d0: 0x15903, // big + 0x1d2: 0x64d08, // sortable + 0x1d3: 0x47407, // onkeyup + 0x1d5: 0x59806, // onplay + 0x1d7: 0x4ac04, // meta + 0x1d8: 0x3f706, // ondrop + 0x1da: 0x5fc08, // onscroll + 0x1db: 0x1e30b, // crossorigin + 0x1dc: 0x5670a, // onpageshow + 0x1dd: 0x4, // abbr + 0x1de: 0x5e02, // td + 0x1df: 0x57f0f, // contenteditable + 0x1e0: 0x25a06, // action + 0x1e1: 0x10a0b, // playsinline + 0x1e2: 0x42507, // onfocus + 0x1e3: 0x2c808, // hreflang + 0x1e5: 0x50a0a, // onmouseout + 0x1e6: 0x5e607, // onreset + 0x1e7: 0x10608, // autoplay + 0x1ea: 0x67106, // scoped + 0x1ec: 0x30a, // radiogroup + 0x1ee: 0x3740b, // contextmenu + 0x1ef: 0x52209, // onmouseup + 0x1f1: 0x2b206, // hgroup + 0x1f2: 0x1f00f, // allowfullscreen + 0x1f3: 0x4b208, // tabindex + 0x1f6: 0x2f707, // isindex + 0x1f7: 0x1a0e, // accept-charset + 0x1f8: 0x2960e, // formnovalidate + 0x1fb: 0x1b90e, // annotation-xml + 0x1fc: 0x4205, // embed + 0x1fd: 0x20006, // script + 0x1fe: 0x16206, // dialog + 0x1ff: 0x1c707, // command } -const atomText = "abbradiogrouparamalignmarkbdialogaccept-charsetbodyaccesskey" + - "genavaluealtdetailsampatternobreversedfnoembedirnamediagroup" + - "ingasyncanvasidefaultfooterowspanoframesetitleaudionblurubya" + - "utofocusandboxmplaceholderautoplaybasefontimeupdatebdoncance" + - "labelooptgrouplaintextrackindisabledivarbgsoundlowbrbigblink" + - "blockquotebuttonabortranslatecodefercolgroupostercolorcolspa" + - "nnotation-xmlcommandraggablegendcontrolsmallcoordsortedcross" + - "originsourcefieldsetfigcaptionafterprintfigurequiredforeignO" + - "bjectforeignobjectformactionautocompleteerrorformenctypemust" + - "matchallengeformmethodformnovalidatetimeterformtargetheightm" + - "lhgroupreloadhiddenhigh1hreflanghttp-equivideoncanplaythroug" + - "h2iframeimageimglyph3isindexismappletitemscopeditemtypemarqu" + - "eematheaderspacermaxlength4minlength5mtextareadonlymultiplem" + - "utedonclickoncloseamlesspellcheckedoncontextmenuitemidoncuec" + - "hangeondblclickondragendondragenterondragleaveondragoverondr" + - "agstarticleondropzonemptiedondurationchangeonendedonerroronf" + - "ocusrcdocitempropenoscriptonhashchangeoninputmodeloninvalido" + - "nkeydownloadonkeypressrclangonkeyupublicontenteditableonlang" + - "uagechangeonloadeddatalistingonloadedmetadatabindexonloadsta" + - "rtonmessageonmousedownonmousemoveonmouseoutputonmouseoveronm" + - "ouseuponmousewheelonofflineononlineonpagehidesclassectionbef" + - "oreunloaddresshapeonpageshowidth6onpausemaponplayingonpopsta" + - "teonprogresstrikeytypeonratechangeonresetonresizestrongonscr" + - "ollonseekedonseekingonselectedonshowraponsortableonstalledon" + - "storageonsubmitemrefacenteronsuspendontoggleonunloadonvolume" + - "changeonwaitingoptimumanifestepromptoptionbeforeprintstylesu" + - "mmarysupsvgsystemplate" +const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobro" + + "wspanoembedetailsampatternoframesetdfnomoduleallowpaymentreq" + + "uestrikeytypeallowusermediagroupictureversedirnameterubyaltf" + + "ooterasyncanvasidefaultitleaudioncancelabelooptgroupingautof" + + "ocusandboxmplaceholderautoplaysinlinebasefontimeupdateviacac" + + "heightmlbdoncanplaythrough1bgsoundisabledivarbigblinkbdialog" + + "blockquotebuttonabortrackindraggablegendcodefercolgrouplaint" + + "extranslatecolorcolspannotation-xmlcommandcontrolshapecoords" + + "lotcrossoriginsmallowfullscreenoscriptfacenterfieldsetfigcap" + + "tionafterprintegrityfigurequiredforeignObjectforeignobjectfo" + + "rmactionautocompleteerrorformenctypemustmatchallengeformmeth" + + "odformnovalidatetimeformtargethgrouposterhiddenhigh2hreflang" + + "http-equivideonclickiframeimageimglyph3isindexismappletitemt" + + "ypemanifestrongmarqueematheadersortedmaxlength4minlength5mte" + + "xtareadonlymultiplemutedoncloseamlessourceoncontextmenuitemi" + + "doncopyoncuechangeoncutondblclickondragendondragenterondrage" + + "xitemreferrerpolicyondragleaveondragoverondragstarticleondro" + + "pzonemptiedondurationchangeonendedonerroronfocuspaceronhashc" + + "hangeoninputmodeloninvalidonkeydownloadonkeypresspellchecked" + + "onkeyupreloadonlanguagechangeonloadeddatalistingonloadedmeta" + + "databindexonloadendonloadstartonmessageerroronmousedownonmou" + + "seenteronmouseleaveonmousemoveonmouseoutputonmouseoveronmous" + + "eupromptonmousewheelonofflineononlineonpagehidescitempropeno" + + "nceonpageshowbronpastepublicontenteditableonpausemaponplayin" + + "gonpopstateonprogressrcdoclassectionbluronratechangeonreject" + + "ionhandledonresetonresizesrclangonscrollonsecuritypolicyviol" + + "ationauxclickonseekedonseekingonselectedonshowidth6onsortabl" + + "eonstalledonstorageonsubmitemscopedonsuspendontoggleonunhand" + + "ledrejectionbeforeprintonunloadonvolumechangeonwaitingonwhee" + + "loptimumalignmarkoptionbeforeunloaddressrcsetstylesummarysup" + + "svgsystemplateworkertypewrap" diff --git a/cluster-autoscaler/vendor/golang.org/x/net/html/const.go b/cluster-autoscaler/vendor/golang.org/x/net/html/const.go index 52f651ff6db8..b37e6212471a 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/html/const.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/html/const.go @@ -52,10 +52,12 @@ var isSpecialElementMap = map[string]bool{ "iframe": true, "img": true, "input": true, - "isindex": true, + "isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility. + "keygen": true, "li": true, "link": true, "listing": true, + "main": true, "marquee": true, "menu": true, "meta": true, diff --git a/cluster-autoscaler/vendor/golang.org/x/net/html/doc.go b/cluster-autoscaler/vendor/golang.org/x/net/html/doc.go index b453fe1e4c2d..e3ec457d3255 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/html/doc.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/html/doc.go @@ -49,18 +49,18 @@ call to Next. For example, to extract an HTML page's anchor text: for { tt := z.Next() switch tt { - case ErrorToken: + case html.ErrorToken: return z.Err() - case TextToken: + case html.TextToken: if depth > 0 { // emitBytes should copy the []byte it receives, // if it doesn't process it immediately. emitBytes(z.Text()) } - case StartTagToken, EndTagToken: + case html.StartTagToken, html.EndTagToken: tn, _ := z.TagName() if len(tn) == 1 && tn[0] == 'a' { - if tt == StartTagToken { + if tt == html.StartTagToken { depth++ } else { depth-- diff --git a/cluster-autoscaler/vendor/golang.org/x/net/html/token.go b/cluster-autoscaler/vendor/golang.org/x/net/html/token.go index 893e272a9e8d..e3c01d7c9060 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/html/token.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/html/token.go @@ -1161,8 +1161,8 @@ func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) { return nil, nil, false } -// Token returns the next Token. The result's Data and Attr values remain valid -// after subsequent Next calls. +// Token returns the current Token. The result's Data and Attr values remain +// valid after subsequent Next calls. func (z *Tokenizer) Token() Token { t := Token{Type: z.tt} switch z.tt { diff --git a/cluster-autoscaler/vendor/golang.org/x/net/http2/ciphers.go b/cluster-autoscaler/vendor/golang.org/x/net/http2/ciphers.go index 698860b77755..c9a0cf3b422c 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/http2/ciphers.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/http2/ciphers.go @@ -5,7 +5,7 @@ package http2 // A list of the possible cipher suite ids. Taken from -// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt +// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt const ( cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 diff --git a/cluster-autoscaler/vendor/golang.org/x/net/http2/configure_transport.go b/cluster-autoscaler/vendor/golang.org/x/net/http2/configure_transport.go index b65fc6d423d3..088d6e2bdb34 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/http2/configure_transport.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/http2/configure_transport.go @@ -73,7 +73,7 @@ type noDialH2RoundTripper struct{ t *Transport } func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { res, err := rt.t.RoundTrip(req) - if err == ErrNoCachedConn { + if isNoCachedConnError(err) { return nil, http.ErrSkipAltProtocol } return res, err diff --git a/cluster-autoscaler/vendor/golang.org/x/net/http2/server.go b/cluster-autoscaler/vendor/golang.org/x/net/http2/server.go index eae143ddff10..460ede03b171 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/http2/server.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/http2/server.go @@ -220,12 +220,15 @@ func ConfigureServer(s *http.Server, conf *Server) error { } else if s.TLSConfig.CipherSuites != nil { // If they already provided a CipherSuite list, return // an error if it has a bad order or is missing - // ECDHE_RSA_WITH_AES_128_GCM_SHA256. - const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. haveRequired := false sawBad := false for i, cs := range s.TLSConfig.CipherSuites { - if cs == requiredCipher { + switch cs { + case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + // Alternative MTI cipher to not discourage ECDSA-only servers. + // See http://golang.org/cl/30721 for further information. + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: haveRequired = true } if isBadCipher(cs) { @@ -235,7 +238,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { } } if !haveRequired { - return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.") } } @@ -649,7 +652,7 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { if err == nil { return } - if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout { // Boring, expected errors. sc.vlogf(format, args...) } else { @@ -853,8 +856,13 @@ func (sc *serverConn) serve() { } } - if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { - return + // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY + // with no error code (graceful shutdown), don't start the timer until + // all open streams have been completed. + sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame + gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0 + if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) { + sc.shutDownIn(goAwayTimeout) } } } @@ -889,8 +897,11 @@ func (sc *serverConn) sendServeMsg(msg interface{}) { } } -// readPreface reads the ClientPreface greeting from the peer -// or returns an error on timeout or an invalid greeting. +var errPrefaceTimeout = errors.New("timeout waiting for client preface") + +// readPreface reads the ClientPreface greeting from the peer or +// returns errPrefaceTimeout on timeout, or an error if the greeting +// is invalid. func (sc *serverConn) readPreface() error { errc := make(chan error, 1) go func() { @@ -908,7 +919,7 @@ func (sc *serverConn) readPreface() error { defer timer.Stop() select { case <-timer.C: - return errors.New("timeout waiting for client preface") + return errPrefaceTimeout case err := <-errc: if err == nil { if VerboseLogs { @@ -1218,30 +1229,31 @@ func (sc *serverConn) startGracefulShutdown() { sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) } +// After sending GOAWAY, the connection will close after goAwayTimeout. +// If we close the connection immediately after sending GOAWAY, there may +// be unsent data in our kernel receive buffer, which will cause the kernel +// to send a TCP RST on close() instead of a FIN. This RST will abort the +// connection immediately, whether or not the client had received the GOAWAY. +// +// Ideally we should delay for at least 1 RTT + epsilon so the client has +// a chance to read the GOAWAY and stop sending messages. Measuring RTT +// is hard, so we approximate with 1 second. See golang.org/issue/18701. +// +// This is a var so it can be shorter in tests, where all requests uses the +// loopback interface making the expected RTT very small. +// +// TODO: configurable? +var goAwayTimeout = 1 * time.Second + func (sc *serverConn) startGracefulShutdownInternal() { - sc.goAwayIn(ErrCodeNo, 0) + sc.goAway(ErrCodeNo) } func (sc *serverConn) goAway(code ErrCode) { - sc.serveG.check() - var forceCloseIn time.Duration - if code != ErrCodeNo { - forceCloseIn = 250 * time.Millisecond - } else { - // TODO: configurable - forceCloseIn = 1 * time.Second - } - sc.goAwayIn(code, forceCloseIn) -} - -func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) { sc.serveG.check() if sc.inGoAway { return } - if forceCloseIn != 0 { - sc.shutDownIn(forceCloseIn) - } sc.inGoAway = true sc.needToSendGoAway = true sc.goAwayCode = code @@ -2310,7 +2322,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { clen = strconv.Itoa(len(p)) } _, hasContentType := rws.snapHeader["Content-Type"] - if !hasContentType && bodyAllowedForStatus(rws.status) { + if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { ctype = http.DetectContentType(p) } var date string @@ -2478,6 +2490,24 @@ func (w *responseWriter) Header() http.Header { return rws.handlerHeader } +// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode. +func checkWriteHeaderCode(code int) { + // Issue 22880: require valid WriteHeader status codes. + // For now we only enforce that it's three digits. + // In the future we might block things over 599 (600 and above aren't defined + // at http://httpwg.org/specs/rfc7231.html#status.codes) + // and we might block under 200 (once we have more mature 1xx support). + // But for now any three digits. + // + // We used to send "HTTP/1.1 000 0" on the wire in responses but there's + // no equivalent bogus thing we can realistically send in HTTP/2, + // so we'll consistently panic instead and help people find their bugs + // early. (We can't return an error from WriteHeader even if we wanted to.) + if code < 100 || code > 999 { + panic(fmt.Sprintf("invalid WriteHeader code %v", code)) + } +} + func (w *responseWriter) WriteHeader(code int) { rws := w.rws if rws == nil { @@ -2488,6 +2518,7 @@ func (w *responseWriter) WriteHeader(code int) { func (rws *responseWriterState) writeHeader(code int) { if !rws.wroteHeader { + checkWriteHeaderCode(code) rws.wroteHeader = true rws.status = code if len(rws.handlerHeader) > 0 { diff --git a/cluster-autoscaler/vendor/golang.org/x/net/http2/transport.go b/cluster-autoscaler/vendor/golang.org/x/net/http2/transport.go index e0dfe9f6a697..e6b321f4bb60 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/http2/transport.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/http2/transport.go @@ -87,7 +87,7 @@ type Transport struct { // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to // send in the initial settings frame. It is how many bytes - // of response headers are allow. Unlike the http2 spec, zero here + // of response headers are allowed. Unlike the http2 spec, zero here // means to use a default limit (currently 10MB). If you actually // want to advertise an ulimited value to the peer, Transport // interprets the highest possible value here (0xffffffff or 1<<32-1) @@ -172,9 +172,10 @@ type ClientConn struct { fr *Framer lastActive time.Time // Settings from peer: (also guarded by mu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + initialWindowSize uint32 hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder @@ -273,6 +274,13 @@ func (cs *clientStream) checkResetOrDone() error { } } +func (cs *clientStream) getStartedWrite() bool { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + return cs.startedWrite +} + func (cs *clientStream) abortRequestBodyWrite(err error) { if err == nil { panic("nil error") @@ -298,7 +306,26 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { return } -var ErrNoCachedConn = errors.New("http2: no cached connection was available") +// noCachedConnError is the concrete type of ErrNoCachedConn, which +// needs to be detected by net/http regardless of whether it's its +// bundled version (in h2_bundle.go with a rewritten type name) or +// from a user's x/net/http2. As such, as it has a unique method name +// (IsHTTP2NoCachedConnError) that net/http sniffs for via func +// isNoCachedConnError. +type noCachedConnError struct{} + +func (noCachedConnError) IsHTTP2NoCachedConnError() {} +func (noCachedConnError) Error() string { return "http2: no cached connection was available" } + +// isNoCachedConnError reports whether err is of type noCachedConnError +// or its equivalent renamed type in net/http2's h2_bundle.go. Both types +// may coexist in the same running program. +func isNoCachedConnError(err error) bool { + _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) + return ok +} + +var ErrNoCachedConn error = noCachedConnError{} // RoundTripOpt are options for the Transport.RoundTripOpt method. type RoundTripOpt struct { @@ -348,14 +375,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res return nil, err } traceGotConn(req, cc) - res, err := cc.RoundTrip(req) + res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) if err != nil && retry <= 6 { - afterBodyWrite := false - if e, ok := err.(afterReqBodyWriteError); ok { - err = e - afterBodyWrite = true - } - if req, err = shouldRetryRequest(req, err, afterBodyWrite); err == nil { + if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { // After the first retry, do exponential backoff with 10% jitter. if retry == 0 { continue @@ -393,16 +415,6 @@ var ( errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") ) -// afterReqBodyWriteError is a wrapper around errors returned by ClientConn.RoundTrip. -// It is used to signal that err happened after part of Request.Body was sent to the server. -type afterReqBodyWriteError struct { - err error -} - -func (e afterReqBodyWriteError) Error() string { - return e.err.Error() + "; some request body already written" -} - // shouldRetryRequest is called by RoundTrip when a request fails to get // response headers. It is always called with a non-nil error. // It returns either a request to retry (either the same request, or a @@ -519,17 +531,18 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d @@ -750,8 +763,13 @@ func actualContentLength(req *http.Request) int64 { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + resp, _, err := cc.roundTrip(req) + return resp, err +} + +func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { if err := checkConnHeaders(req); err != nil { - return nil, err + return nil, false, err } if cc.idleTimer != nil { cc.idleTimer.Stop() @@ -759,14 +777,14 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { trailers, err := commaSeparatedTrailers(req) if err != nil { - return nil, err + return nil, false, err } hasTrailers := trailers != "" cc.mu.Lock() if err := cc.awaitOpenSlotForRequest(req); err != nil { cc.mu.Unlock() - return nil, err + return nil, false, err } body := req.Body @@ -800,7 +818,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) if err != nil { cc.mu.Unlock() - return nil, err + return nil, false, err } cs := cc.newStream() @@ -812,7 +830,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { cc.wmu.Lock() endStream := !hasBody && !hasTrailers - werr := cc.writeHeaders(cs.ID, endStream, hdrs) + werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) cc.wmu.Unlock() traceWroteHeaders(cs.trace) cc.mu.Unlock() @@ -826,7 +844,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // Don't bother sending a RST_STREAM (our write already failed; // no need to keep writing) traceWroteRequest(cs.trace, werr) - return nil, werr + return nil, false, werr } var respHeaderTimer <-chan time.Time @@ -845,7 +863,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { bodyWritten := false ctx := reqContext(req) - handleReadLoopResponse := func(re resAndError) (*http.Response, error) { + handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { res := re.res if re.err != nil || res.StatusCode > 299 { // On error or status code 3xx, 4xx, 5xx, etc abort any @@ -861,18 +879,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { cs.abortRequestBodyWrite(errStopReqBodyWrite) } if re.err != nil { - cc.mu.Lock() - afterBodyWrite := cs.startedWrite - cc.mu.Unlock() cc.forgetStreamID(cs.ID) - if afterBodyWrite { - return nil, afterReqBodyWriteError{re.err} - } - return nil, re.err + return nil, cs.getStartedWrite(), re.err } res.Request = req res.TLS = cc.tlsState - return res, nil + return res, false, nil } for { @@ -887,7 +899,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) } cc.forgetStreamID(cs.ID) - return nil, errTimeout + return nil, cs.getStartedWrite(), errTimeout case <-ctx.Done(): if !hasBody || bodyWritten { cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) @@ -896,7 +908,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) } cc.forgetStreamID(cs.ID) - return nil, ctx.Err() + return nil, cs.getStartedWrite(), ctx.Err() case <-req.Cancel: if !hasBody || bodyWritten { cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) @@ -905,12 +917,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) } cc.forgetStreamID(cs.ID) - return nil, errRequestCanceled + return nil, cs.getStartedWrite(), errRequestCanceled case <-cs.peerReset: // processResetStream already removed the // stream from the streams map; no need for // forgetStreamID. - return nil, cs.resetErr + return nil, cs.getStartedWrite(), cs.resetErr case err := <-bodyWriter.resc: // Prefer the read loop's response, if available. Issue 16102. select { @@ -919,7 +931,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { default: } if err != nil { - return nil, err + return nil, cs.getStartedWrite(), err } bodyWritten = true if d := cc.responseHeaderTimeout(); d != 0 { @@ -971,13 +983,12 @@ func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { } // requires cc.wmu be held -func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { first := true // first frame written (HEADERS is first, then CONTINUATION) - frameSize := int(cc.maxFrameSize) for len(hdrs) > 0 && cc.werr == nil { chunk := hdrs - if len(chunk) > frameSize { - chunk = chunk[:frameSize] + if len(chunk) > maxFrameSize { + chunk = chunk[:maxFrameSize] } hdrs = hdrs[len(chunk):] endHeaders := len(hdrs) == 0 @@ -1085,17 +1096,26 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( var trls []byte if hasTrailers { cc.mu.Lock() - defer cc.mu.Unlock() - trls = cc.encodeTrailers(req) + trls, err = cc.encodeTrailers(req) + cc.mu.Unlock() + if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeInternal, err) + cc.forgetStreamID(cs.ID) + return err + } } + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + cc.wmu.Lock() defer cc.wmu.Unlock() // Two ways to send END_STREAM: either with trailers, or // with an empty DATA frame. if len(trls) > 0 { - err = cc.writeHeaders(cs.ID, true, trls) + err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) } else { err = cc.fr.WriteData(cs.ID, true, nil) } @@ -1189,62 +1209,86 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } } - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of - // [RFC3986]). - cc.writeHeader(":authority", host) - cc.writeHeader(":method", req.Method) - if req.Method != "CONNECT" { - cc.writeHeader(":path", path) - cc.writeHeader(":scheme", req.URL.Scheme) - } - if trailers != "" { - cc.writeHeader("trailer", trailers) - } + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + f(":method", req.Method) + if req.Method != "CONNECT" { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if trailers != "" { + f("trailer", trailers) + } - var didUA bool - for k, vv := range req.Header { - lowKey := strings.ToLower(k) - switch lowKey { - case "host", "content-length": - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - case "user-agent": - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { + var didUA bool + for k, vv := range req.Header { + if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. continue - } - vv = vv[:1] - if vv[0] == "" { + } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || + strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || + strings.EqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. continue + } else if strings.EqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + + } + + for _, v := range vv { + f(k, v) } } - for _, v := range vv { - cc.writeHeader(lowKey, v) + if shouldSendReqContentLength(req.Method, contentLength) { + f("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", defaultUserAgent) } } - if shouldSendReqContentLength(req.Method, contentLength) { - cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - cc.writeHeader("accept-encoding", "gzip") - } - if !didUA { - cc.writeHeader("user-agent", defaultUserAgent) + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize } + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + cc.writeHeader(strings.ToLower(name), value) + }) + return cc.hbuf.Bytes(), nil } @@ -1271,17 +1315,29 @@ func shouldSendReqContentLength(method string, contentLength int64) bool { } // requires cc.mu be held. -func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { +func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { cc.hbuf.Reset() + + hlSize := uint64(0) + for k, vv := range req.Trailer { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + hlSize += uint64(hf.Size()) + } + } + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + for k, vv := range req.Trailer { - // Transfer-Encoding, etc.. have already been filter at the + // Transfer-Encoding, etc.. have already been filtered at the // start of RoundTrip lowKey := strings.ToLower(k) for _, v := range vv { cc.writeHeader(lowKey, v) } } - return cc.hbuf.Bytes() + return cc.hbuf.Bytes(), nil } func (cc *ClientConn) writeHeader(name, value string) { @@ -1339,17 +1395,12 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. type clientConnReadLoop struct { cc *ClientConn - activeRes map[uint32]*clientStream // keyed by streamID closeWhenIdle bool } // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { - rl := &clientConnReadLoop{ - cc: cc, - activeRes: make(map[uint32]*clientStream), - } - + rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() if ce, ok := cc.readerErr.(ConnectionError); ok { @@ -1404,10 +1455,8 @@ func (rl *clientConnReadLoop) cleanup() { } else if err == io.EOF { err = io.ErrUnexpectedEOF } - for _, cs := range rl.activeRes { - cs.bufPipe.CloseWithError(err) - } for _, cs := range cc.streams { + cs.bufPipe.CloseWithError(err) // no-op if already closed select { case cs.resc <- resAndError{err: err}: default: @@ -1485,7 +1534,7 @@ func (rl *clientConnReadLoop) run() error { } return err } - if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { + if rl.closeWhenIdle && gotReply && maybeIdle { cc.closeIfIdle() } } @@ -1493,13 +1542,31 @@ func (rl *clientConnReadLoop) run() error { func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) + cs := cc.streamByID(f.StreamID, false) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this // was just something we canceled, ignore it. return nil } + if f.StreamEnded() { + // Issue 20521: If the stream has ended, streamByID() causes + // clientStream.done to be closed, which causes the request's bodyWriter + // to be closed with an errStreamClosed, which may be received by + // clientConn.RoundTrip before the result of processing these headers. + // Deferring stream closure allows the header processing to occur first. + // clientConn.RoundTrip may still receive the bodyWriter error first, but + // the fix for issue 16102 prioritises any response. + // + // Issue 22413: If there is no request body, we should close the + // stream before writing to cs.resc so that the stream is closed + // immediately once RoundTrip returns. + if cs.req.Body != nil { + defer cc.forgetStreamID(f.StreamID) + } else { + cc.forgetStreamID(f.StreamID) + } + } if !cs.firstByte { if cs.trace != nil { // TODO(bradfitz): move first response byte earlier, @@ -1523,6 +1590,7 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { } // Any other error type is a stream error. cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cc.forgetStreamID(cs.ID) cs.resc <- resAndError{err: err} return nil // return nil from process* funcs to keep conn alive } @@ -1530,9 +1598,6 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { // (nil, nil) special case. See handleResponse docs. return nil } - if res.Body != noBody { - rl.activeRes[cs.ID] = cs - } cs.resTrailer = &res.Trailer cs.resc <- resAndError{res: res} return nil @@ -1552,11 +1617,11 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra status := f.PseudoValue("status") if status == "" { - return nil, errors.New("missing status pseudo header") + return nil, errors.New("malformed response from server: missing status pseudo header") } statusCode, err := strconv.Atoi(status) if err != nil { - return nil, errors.New("malformed non-numeric status pseudo header") + return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") } if statusCode == 100 { @@ -1789,7 +1854,23 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { } return nil } + if !cs.firstByte { + cc.logf("protocol error: received DATA before a HEADERS frame") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } if f.Length > 0 { + if cs.req.Method == "HEAD" && len(data) > 0 { + cc.logf("protocol error: received DATA on a HEAD request") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } // Check connection-level flow control. cc.mu.Lock() if cs.inflow.available() >= int32(f.Length) { @@ -1851,11 +1932,10 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { err = io.EOF code = cs.copyTrailers } - cs.bufPipe.closeWithErrorAndCode(err, code) - delete(rl.activeRes, cs.ID) if isConnectionCloseRequest(cs.req) { rl.closeWhenIdle = true } + cs.bufPipe.closeWithErrorAndCode(err, code) select { case cs.resc <- resAndError{err: err}: @@ -1903,6 +1983,8 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { cc.maxFrameSize = s.Val case SettingMaxConcurrentStreams: cc.maxConcurrentStreams = s.Val + case SettingMaxHeaderListSize: + cc.peerMaxHeaderListSize = uint64(s.Val) case SettingInitialWindowSize: // Values above the maximum flow-control // window size of 2^31-1 MUST be treated as a @@ -1980,7 +2062,6 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { cs.bufPipe.CloseWithError(err) cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl } - delete(rl.activeRes, cs.ID) return nil } @@ -2069,6 +2150,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) var ( errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") ) diff --git a/cluster-autoscaler/vendor/golang.org/x/net/http2/write.go b/cluster-autoscaler/vendor/golang.org/x/net/http2/write.go index 6b0dfae319a4..54ab4a88e7b8 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/http2/write.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/http2/write.go @@ -10,7 +10,6 @@ import ( "log" "net/http" "net/url" - "time" "golang.org/x/net/http2/hpack" "golang.org/x/net/lex/httplex" @@ -90,11 +89,7 @@ type writeGoAway struct { func (p *writeGoAway) writeFrame(ctx writeContext) error { err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) - if p.code != 0 { - ctx.Flush() // ignore error: we're hanging up on them anyway - time.Sleep(50 * time.Millisecond) - ctx.CloseConn() - } + ctx.Flush() // ignore error: we're hanging up on them anyway return err } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/idna/idna.go b/cluster-autoscaler/vendor/golang.org/x/net/idna/idna.go index 1810100bfd86..6655f023a3f7 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/idna/idna.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/idna/idna.go @@ -21,6 +21,7 @@ import ( "unicode/utf8" "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/bidi" "golang.org/x/text/unicode/norm" ) @@ -68,7 +69,7 @@ func VerifyDNSLength(verify bool) Option { } // RemoveLeadingDots removes leading label separators. Leading runes that map to -// dots, such as U+3002, are removed as well. +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. // // This is the behavior suggested by the UTS #46 and is adopted by some // browsers. @@ -92,7 +93,7 @@ func ValidateLabels(enable bool) Option { } } -// StrictDomainName limits the set of permissable ASCII characters to those +// StrictDomainName limits the set of permissible ASCII characters to those // allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the // hyphen). This is set by default for MapForLookup and ValidateForRegistration. // @@ -142,7 +143,6 @@ func MapForLookup() Option { o.mapping = validateAndMap StrictDomainName(true)(o) ValidateLabels(true)(o) - RemoveLeadingDots(true)(o) } } @@ -160,14 +160,14 @@ type options struct { // mapping implements a validation and mapping step as defined in RFC 5895 // or UTS 46, tailored to, for example, domain registration or lookup. - mapping func(p *Profile, s string) (string, error) + mapping func(p *Profile, s string) (mapped string, isBidi bool, err error) // bidirule, if specified, checks whether s conforms to the Bidi Rule // defined in RFC 5893. bidirule func(s string) bool } -// A Profile defines the configuration of a IDNA mapper. +// A Profile defines the configuration of an IDNA mapper. type Profile struct { options } @@ -251,23 +251,21 @@ var ( punycode = &Profile{} lookup = &Profile{options{ - transitional: true, - useSTD3Rules: true, - validateLabels: true, - removeLeadingDots: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, + transitional: true, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, }} display = &Profile{options{ - useSTD3Rules: true, - validateLabels: true, - removeLeadingDots: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, }} registration = &Profile{options{ useSTD3Rules: true, @@ -302,14 +300,16 @@ func (e runeError) Error() string { // see http://www.unicode.org/reports/tr46. func (p *Profile) process(s string, toASCII bool) (string, error) { var err error + var isBidi bool if p.mapping != nil { - s, err = p.mapping(p, s) + s, isBidi, err = p.mapping(p, s) } // Remove leading empty labels. if p.removeLeadingDots { for ; len(s) > 0 && s[0] == '.'; s = s[1:] { } } + // TODO: allow for a quick check of the tables data. // It seems like we should only create this error on ToASCII, but the // UTS 46 conformance tests suggests we should always check this. if err == nil && p.verifyDNSLength && s == "" { @@ -335,6 +335,7 @@ func (p *Profile) process(s string, toASCII bool) (string, error) { // Spec says keep the old label. continue } + isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight labels.set(u) if err == nil && p.validateLabels { err = p.fromPuny(p, u) @@ -349,6 +350,14 @@ func (p *Profile) process(s string, toASCII bool) (string, error) { err = p.validateLabel(label) } } + if isBidi && p.bidirule != nil && err == nil { + for labels.reset(); !labels.done(); labels.next() { + if !p.bidirule(labels.label()) { + err = &labelError{s, "B"} + break + } + } + } if toASCII { for labels.reset(); !labels.done(); labels.next() { label := labels.label() @@ -380,16 +389,26 @@ func (p *Profile) process(s string, toASCII bool) (string, error) { return s, err } -func normalize(p *Profile, s string) (string, error) { - return norm.NFC.String(s), nil +func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) { + // TODO: consider first doing a quick check to see if any of these checks + // need to be done. This will make it slower in the general case, but + // faster in the common case. + mapped = norm.NFC.String(s) + isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft + return mapped, isBidi, nil } -func validateRegistration(p *Profile, s string) (string, error) { +func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) { + // TODO: filter need for normalization in loop below. if !norm.NFC.IsNormalString(s) { - return s, &labelError{s, "V1"} + return s, false, &labelError{s, "V1"} } for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return s, bidi, runeError(utf8.RuneError) + } + bidi = bidi || info(v).isBidi(s[i:]) // Copy bytes not copied so far. switch p.simplify(info(v).category()) { // TODO: handle the NV8 defined in the Unicode idna data set to allow @@ -397,21 +416,50 @@ func validateRegistration(p *Profile, s string) (string, error) { case valid, deviation: case disallowed, mapped, unknown, ignored: r, _ := utf8.DecodeRuneInString(s[i:]) - return s, runeError(r) + return s, bidi, runeError(r) } i += sz } - return s, nil + return s, bidi, nil +} + +func (c info) isBidi(s string) bool { + if !c.isMapped() { + return c&attributesMask == rtl + } + // TODO: also store bidi info for mapped data. This is possible, but a bit + // cumbersome and not for the common case. + p, _ := bidi.LookupString(s) + switch p.Class() { + case bidi.R, bidi.AL, bidi.AN: + return true + } + return false } -func validateAndMap(p *Profile, s string) (string, error) { +func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) { var ( - err error - b []byte - k int + b []byte + k int ) + // combinedInfoBits contains the or-ed bits of all runes. We use this + // to derive the mayNeedNorm bit later. This may trigger normalization + // overeagerly, but it will not do so in the common case. The end result + // is another 10% saving on BenchmarkProfile for the common case. + var combinedInfoBits info for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) + if sz == 0 { + b = append(b, s[k:i]...) + b = append(b, "\ufffd"...) + k = len(s) + if err == nil { + err = runeError(utf8.RuneError) + } + break + } + combinedInfoBits |= info(v) + bidi = bidi || info(v).isBidi(s[i:]) start := i i += sz // Copy bytes not copied so far. @@ -438,7 +486,9 @@ func validateAndMap(p *Profile, s string) (string, error) { } if k == 0 { // No changes so far. - s = norm.NFC.String(s) + if combinedInfoBits&mayNeedNorm != 0 { + s = norm.NFC.String(s) + } } else { b = append(b, s[k:]...) if norm.NFC.QuickSpan(b) != len(b) { @@ -447,7 +497,7 @@ func validateAndMap(p *Profile, s string) (string, error) { // TODO: the punycode converters require strings as input. s = string(b) } - return s, err + return s, bidi, err } // A labelIter allows iterating over domain name labels. @@ -542,8 +592,13 @@ func validateFromPunycode(p *Profile, s string) error { if !norm.NFC.IsNormalString(s) { return &labelError{s, "V1"} } + // TODO: detect whether string may have to be normalized in the following + // loop. for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return runeError(utf8.RuneError) + } if c := p.simplify(info(v).category()); c != valid && c != deviation { return &labelError{s, "V6"} } @@ -616,16 +671,13 @@ var joinStates = [][numJoinTypes]joinState{ // validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are // already implicitly satisfied by the overall implementation. -func (p *Profile) validateLabel(s string) error { +func (p *Profile) validateLabel(s string) (err error) { if s == "" { if p.verifyDNSLength { return &labelError{s, "A4"} } return nil } - if p.bidirule != nil && !p.bidirule(s) { - return &labelError{s, "B"} - } if !p.validateLabels { return nil } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/idna/tables.go b/cluster-autoscaler/vendor/golang.org/x/net/idna/tables.go index d2819345fcd8..f910b2691443 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/idna/tables.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/idna/tables.go @@ -3,7 +3,7 @@ package idna // UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "9.0.0" +const UnicodeVersion = "10.0.0" var mappings string = "" + // Size: 8176 bytes "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + @@ -544,7 +544,7 @@ func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { return 0 } -// idnaTrie. Total size: 28496 bytes (27.83 KiB). Checksum: 43288b883596640e. +// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd. type idnaTrie struct{} func newIdnaTrie(i int) *idnaTrie { @@ -554,17 +554,17 @@ func newIdnaTrie(i int) *idnaTrie { // lookupValue determines the type of block n and looks up the value for b. func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { switch { - case n < 123: + case n < 125: return uint16(idnaValues[n<<6+uint32(b)]) default: - n -= 123 + n -= 125 return uint16(idnaSparse.lookup(n, b)) } } -// idnaValues: 125 blocks, 8000 entries, 16000 bytes +// idnaValues: 127 blocks, 8128 entries, 16256 bytes // The third block is the zero block. -var idnaValues = [8000]uint16{ +var idnaValues = [8128]uint16{ // Block 0x0, offset 0x0 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, @@ -675,14 +675,14 @@ var idnaValues = [8000]uint16{ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, // Block 0xa, offset 0x280 - 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x1308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, - 0x286: 0x1308, 0x287: 0x1308, 0x288: 0x1308, 0x289: 0x1308, 0x28a: 0x1308, 0x28b: 0x1308, - 0x28c: 0x1308, 0x28d: 0x1308, 0x28e: 0x1308, 0x28f: 0x13c0, 0x290: 0x1308, 0x291: 0x1308, - 0x292: 0x1308, 0x293: 0x1308, 0x294: 0x1308, 0x295: 0x1308, 0x296: 0x1308, 0x297: 0x1308, - 0x298: 0x1308, 0x299: 0x1308, 0x29a: 0x1308, 0x29b: 0x1308, 0x29c: 0x1308, 0x29d: 0x1308, - 0x29e: 0x1308, 0x29f: 0x1308, 0x2a0: 0x1308, 0x2a1: 0x1308, 0x2a2: 0x1308, 0x2a3: 0x1308, - 0x2a4: 0x1308, 0x2a5: 0x1308, 0x2a6: 0x1308, 0x2a7: 0x1308, 0x2a8: 0x1308, 0x2a9: 0x1308, - 0x2aa: 0x1308, 0x2ab: 0x1308, 0x2ac: 0x1308, 0x2ad: 0x1308, 0x2ae: 0x1308, 0x2af: 0x1308, + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, @@ -723,8 +723,8 @@ var idnaValues = [8000]uint16{ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, // Block 0xe, offset 0x380 - 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x1308, 0x384: 0x1308, 0x385: 0x1308, - 0x386: 0x1308, 0x387: 0x1308, 0x388: 0x1318, 0x389: 0x1318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, @@ -759,129 +759,129 @@ var idnaValues = [8000]uint16{ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, // Block 0x11, offset 0x440 - 0x440: 0x0040, 0x441: 0x0040, 0x442: 0x0040, 0x443: 0x0040, 0x444: 0x0040, 0x445: 0x0040, - 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0018, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0018, - 0x44c: 0x0018, 0x44d: 0x0018, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x1308, 0x451: 0x1308, - 0x452: 0x1308, 0x453: 0x1308, 0x454: 0x1308, 0x455: 0x1308, 0x456: 0x1308, 0x457: 0x1308, - 0x458: 0x1308, 0x459: 0x1308, 0x45a: 0x1308, 0x45b: 0x0018, 0x45c: 0x0340, 0x45d: 0x0040, - 0x45e: 0x0018, 0x45f: 0x0018, 0x460: 0x0208, 0x461: 0x0008, 0x462: 0x0408, 0x463: 0x0408, - 0x464: 0x0408, 0x465: 0x0408, 0x466: 0x0208, 0x467: 0x0408, 0x468: 0x0208, 0x469: 0x0408, - 0x46a: 0x0208, 0x46b: 0x0208, 0x46c: 0x0208, 0x46d: 0x0208, 0x46e: 0x0208, 0x46f: 0x0408, - 0x470: 0x0408, 0x471: 0x0408, 0x472: 0x0408, 0x473: 0x0208, 0x474: 0x0208, 0x475: 0x0208, - 0x476: 0x0208, 0x477: 0x0208, 0x478: 0x0208, 0x479: 0x0208, 0x47a: 0x0208, 0x47b: 0x0208, - 0x47c: 0x0208, 0x47d: 0x0208, 0x47e: 0x0208, 0x47f: 0x0208, + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, // Block 0x12, offset 0x480 - 0x480: 0x0408, 0x481: 0x0208, 0x482: 0x0208, 0x483: 0x0408, 0x484: 0x0408, 0x485: 0x0408, - 0x486: 0x0408, 0x487: 0x0408, 0x488: 0x0408, 0x489: 0x0408, 0x48a: 0x0408, 0x48b: 0x0408, - 0x48c: 0x0208, 0x48d: 0x0408, 0x48e: 0x0208, 0x48f: 0x0408, 0x490: 0x0208, 0x491: 0x0208, - 0x492: 0x0408, 0x493: 0x0408, 0x494: 0x0018, 0x495: 0x0408, 0x496: 0x1308, 0x497: 0x1308, - 0x498: 0x1308, 0x499: 0x1308, 0x49a: 0x1308, 0x49b: 0x1308, 0x49c: 0x1308, 0x49d: 0x0040, - 0x49e: 0x0018, 0x49f: 0x1308, 0x4a0: 0x1308, 0x4a1: 0x1308, 0x4a2: 0x1308, 0x4a3: 0x1308, - 0x4a4: 0x1308, 0x4a5: 0x0008, 0x4a6: 0x0008, 0x4a7: 0x1308, 0x4a8: 0x1308, 0x4a9: 0x0018, - 0x4aa: 0x1308, 0x4ab: 0x1308, 0x4ac: 0x1308, 0x4ad: 0x1308, 0x4ae: 0x0408, 0x4af: 0x0408, - 0x4b0: 0x0008, 0x4b1: 0x0008, 0x4b2: 0x0008, 0x4b3: 0x0008, 0x4b4: 0x0008, 0x4b5: 0x0008, - 0x4b6: 0x0008, 0x4b7: 0x0008, 0x4b8: 0x0008, 0x4b9: 0x0008, 0x4ba: 0x0208, 0x4bb: 0x0208, - 0x4bc: 0x0208, 0x4bd: 0x0008, 0x4be: 0x0008, 0x4bf: 0x0208, + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, // Block 0x13, offset 0x4c0 - 0x4c0: 0x0018, 0x4c1: 0x0018, 0x4c2: 0x0018, 0x4c3: 0x0018, 0x4c4: 0x0018, 0x4c5: 0x0018, - 0x4c6: 0x0018, 0x4c7: 0x0018, 0x4c8: 0x0018, 0x4c9: 0x0018, 0x4ca: 0x0018, 0x4cb: 0x0018, - 0x4cc: 0x0018, 0x4cd: 0x0018, 0x4ce: 0x0040, 0x4cf: 0x0340, 0x4d0: 0x0408, 0x4d1: 0x1308, - 0x4d2: 0x0208, 0x4d3: 0x0208, 0x4d4: 0x0208, 0x4d5: 0x0408, 0x4d6: 0x0408, 0x4d7: 0x0408, - 0x4d8: 0x0408, 0x4d9: 0x0408, 0x4da: 0x0208, 0x4db: 0x0208, 0x4dc: 0x0208, 0x4dd: 0x0208, - 0x4de: 0x0408, 0x4df: 0x0208, 0x4e0: 0x0208, 0x4e1: 0x0208, 0x4e2: 0x0208, 0x4e3: 0x0208, - 0x4e4: 0x0208, 0x4e5: 0x0208, 0x4e6: 0x0208, 0x4e7: 0x0208, 0x4e8: 0x0408, 0x4e9: 0x0208, - 0x4ea: 0x0408, 0x4eb: 0x0208, 0x4ec: 0x0408, 0x4ed: 0x0208, 0x4ee: 0x0208, 0x4ef: 0x0408, - 0x4f0: 0x1308, 0x4f1: 0x1308, 0x4f2: 0x1308, 0x4f3: 0x1308, 0x4f4: 0x1308, 0x4f5: 0x1308, - 0x4f6: 0x1308, 0x4f7: 0x1308, 0x4f8: 0x1308, 0x4f9: 0x1308, 0x4fa: 0x1308, 0x4fb: 0x1308, - 0x4fc: 0x1308, 0x4fd: 0x1308, 0x4fe: 0x1308, 0x4ff: 0x1308, + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, // Block 0x14, offset 0x500 - 0x500: 0x1008, 0x501: 0x1308, 0x502: 0x1308, 0x503: 0x1308, 0x504: 0x1308, 0x505: 0x1308, - 0x506: 0x1308, 0x507: 0x1308, 0x508: 0x1308, 0x509: 0x1008, 0x50a: 0x1008, 0x50b: 0x1008, - 0x50c: 0x1008, 0x50d: 0x1b08, 0x50e: 0x1008, 0x50f: 0x1008, 0x510: 0x0008, 0x511: 0x1308, - 0x512: 0x1308, 0x513: 0x1308, 0x514: 0x1308, 0x515: 0x1308, 0x516: 0x1308, 0x517: 0x1308, - 0x518: 0x04c9, 0x519: 0x0501, 0x51a: 0x0539, 0x51b: 0x0571, 0x51c: 0x05a9, 0x51d: 0x05e1, - 0x51e: 0x0619, 0x51f: 0x0651, 0x520: 0x0008, 0x521: 0x0008, 0x522: 0x1308, 0x523: 0x1308, - 0x524: 0x0018, 0x525: 0x0018, 0x526: 0x0008, 0x527: 0x0008, 0x528: 0x0008, 0x529: 0x0008, - 0x52a: 0x0008, 0x52b: 0x0008, 0x52c: 0x0008, 0x52d: 0x0008, 0x52e: 0x0008, 0x52f: 0x0008, - 0x530: 0x0018, 0x531: 0x0008, 0x532: 0x0008, 0x533: 0x0008, 0x534: 0x0008, 0x535: 0x0008, - 0x536: 0x0008, 0x537: 0x0008, 0x538: 0x0008, 0x539: 0x0008, 0x53a: 0x0008, 0x53b: 0x0008, - 0x53c: 0x0008, 0x53d: 0x0008, 0x53e: 0x0008, 0x53f: 0x0008, + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, // Block 0x15, offset 0x540 - 0x540: 0x0008, 0x541: 0x1308, 0x542: 0x1008, 0x543: 0x1008, 0x544: 0x0040, 0x545: 0x0008, - 0x546: 0x0008, 0x547: 0x0008, 0x548: 0x0008, 0x549: 0x0008, 0x54a: 0x0008, 0x54b: 0x0008, - 0x54c: 0x0008, 0x54d: 0x0040, 0x54e: 0x0040, 0x54f: 0x0008, 0x550: 0x0008, 0x551: 0x0040, - 0x552: 0x0040, 0x553: 0x0008, 0x554: 0x0008, 0x555: 0x0008, 0x556: 0x0008, 0x557: 0x0008, - 0x558: 0x0008, 0x559: 0x0008, 0x55a: 0x0008, 0x55b: 0x0008, 0x55c: 0x0008, 0x55d: 0x0008, - 0x55e: 0x0008, 0x55f: 0x0008, 0x560: 0x0008, 0x561: 0x0008, 0x562: 0x0008, 0x563: 0x0008, - 0x564: 0x0008, 0x565: 0x0008, 0x566: 0x0008, 0x567: 0x0008, 0x568: 0x0008, 0x569: 0x0040, - 0x56a: 0x0008, 0x56b: 0x0008, 0x56c: 0x0008, 0x56d: 0x0008, 0x56e: 0x0008, 0x56f: 0x0008, - 0x570: 0x0008, 0x571: 0x0040, 0x572: 0x0008, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, - 0x576: 0x0008, 0x577: 0x0008, 0x578: 0x0008, 0x579: 0x0008, 0x57a: 0x0040, 0x57b: 0x0040, - 0x57c: 0x1308, 0x57d: 0x0008, 0x57e: 0x1008, 0x57f: 0x1008, + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, // Block 0x16, offset 0x580 - 0x580: 0x1008, 0x581: 0x1308, 0x582: 0x1308, 0x583: 0x1308, 0x584: 0x1308, 0x585: 0x0040, - 0x586: 0x0040, 0x587: 0x1008, 0x588: 0x1008, 0x589: 0x0040, 0x58a: 0x0040, 0x58b: 0x1008, - 0x58c: 0x1008, 0x58d: 0x1b08, 0x58e: 0x0008, 0x58f: 0x0040, 0x590: 0x0040, 0x591: 0x0040, - 0x592: 0x0040, 0x593: 0x0040, 0x594: 0x0040, 0x595: 0x0040, 0x596: 0x0040, 0x597: 0x1008, - 0x598: 0x0040, 0x599: 0x0040, 0x59a: 0x0040, 0x59b: 0x0040, 0x59c: 0x0689, 0x59d: 0x06c1, - 0x59e: 0x0040, 0x59f: 0x06f9, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x1308, 0x5a3: 0x1308, - 0x5a4: 0x0040, 0x5a5: 0x0040, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, - 0x5b0: 0x0008, 0x5b1: 0x0008, 0x5b2: 0x0018, 0x5b3: 0x0018, 0x5b4: 0x0018, 0x5b5: 0x0018, - 0x5b6: 0x0018, 0x5b7: 0x0018, 0x5b8: 0x0018, 0x5b9: 0x0018, 0x5ba: 0x0018, 0x5bb: 0x0018, - 0x5bc: 0x0040, 0x5bd: 0x0040, 0x5be: 0x0040, 0x5bf: 0x0040, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, // Block 0x17, offset 0x5c0 - 0x5c0: 0x0040, 0x5c1: 0x1308, 0x5c2: 0x1308, 0x5c3: 0x1008, 0x5c4: 0x0040, 0x5c5: 0x0008, - 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0040, - 0x5cc: 0x0040, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, - 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0731, 0x5f4: 0x0040, 0x5f5: 0x0008, - 0x5f6: 0x0769, 0x5f7: 0x0040, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, - 0x5fc: 0x1308, 0x5fd: 0x0040, 0x5fe: 0x1008, 0x5ff: 0x1008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, // Block 0x18, offset 0x600 - 0x600: 0x1008, 0x601: 0x1308, 0x602: 0x1308, 0x603: 0x0040, 0x604: 0x0040, 0x605: 0x0040, - 0x606: 0x0040, 0x607: 0x1308, 0x608: 0x1308, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x1308, - 0x60c: 0x1308, 0x60d: 0x1b08, 0x60e: 0x0040, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x1308, - 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x0040, - 0x618: 0x0040, 0x619: 0x07a1, 0x61a: 0x07d9, 0x61b: 0x0811, 0x61c: 0x0008, 0x61d: 0x0040, - 0x61e: 0x0849, 0x61f: 0x0040, 0x620: 0x0040, 0x621: 0x0040, 0x622: 0x0040, 0x623: 0x0040, + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, - 0x630: 0x1308, 0x631: 0x1308, 0x632: 0x0008, 0x633: 0x0008, 0x634: 0x0008, 0x635: 0x1308, - 0x636: 0x0040, 0x637: 0x0040, 0x638: 0x0040, 0x639: 0x0040, 0x63a: 0x0040, 0x63b: 0x0040, - 0x63c: 0x0040, 0x63d: 0x0040, 0x63e: 0x0040, 0x63f: 0x0040, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040, // Block 0x19, offset 0x640 - 0x640: 0x0040, 0x641: 0x1308, 0x642: 0x1308, 0x643: 0x1008, 0x644: 0x0040, 0x645: 0x0008, - 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0008, - 0x64c: 0x0008, 0x64d: 0x0008, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0008, + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, - 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0008, 0x674: 0x0040, 0x675: 0x0008, - 0x676: 0x0008, 0x677: 0x0008, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, - 0x67c: 0x1308, 0x67d: 0x0008, 0x67e: 0x1008, 0x67f: 0x1008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, // Block 0x1a, offset 0x680 - 0x680: 0x1008, 0x681: 0x1308, 0x682: 0x1308, 0x683: 0x1308, 0x684: 0x1308, 0x685: 0x1308, - 0x686: 0x0040, 0x687: 0x1308, 0x688: 0x1308, 0x689: 0x1008, 0x68a: 0x0040, 0x68b: 0x1008, - 0x68c: 0x1008, 0x68d: 0x1b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0008, 0x691: 0x0040, + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, - 0x698: 0x0040, 0x699: 0x0040, 0x69a: 0x0040, 0x69b: 0x0040, 0x69c: 0x0040, 0x69d: 0x0040, - 0x69e: 0x0040, 0x69f: 0x0040, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x1308, 0x6a3: 0x1308, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, - 0x6b0: 0x0018, 0x6b1: 0x0018, 0x6b2: 0x0040, 0x6b3: 0x0040, 0x6b4: 0x0040, 0x6b5: 0x0040, - 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, // Block 0x1b, offset 0x6c0 - 0x6c0: 0x0040, 0x6c1: 0x1308, 0x6c2: 0x1008, 0x6c3: 0x1008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, - 0x6cc: 0x0008, 0x6cd: 0x0040, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0040, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, @@ -889,1457 +889,1490 @@ var idnaValues = [8000]uint16{ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, - 0x6fc: 0x1308, 0x6fd: 0x0008, 0x6fe: 0x1008, 0x6ff: 0x1308, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, // Block 0x1c, offset 0x700 - 0x700: 0x1008, 0x701: 0x1308, 0x702: 0x1308, 0x703: 0x1308, 0x704: 0x1308, 0x705: 0x0040, - 0x706: 0x0040, 0x707: 0x1008, 0x708: 0x1008, 0x709: 0x0040, 0x70a: 0x0040, 0x70b: 0x1008, - 0x70c: 0x1008, 0x70d: 0x1b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0040, 0x711: 0x0040, - 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x1308, 0x717: 0x1008, - 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0881, 0x71d: 0x08b9, - 0x71e: 0x0040, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x1308, 0x723: 0x1308, + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, - 0x730: 0x0018, 0x731: 0x0008, 0x732: 0x0018, 0x733: 0x0018, 0x734: 0x0018, 0x735: 0x0018, - 0x736: 0x0018, 0x737: 0x0018, 0x738: 0x0040, 0x739: 0x0040, 0x73a: 0x0040, 0x73b: 0x0040, - 0x73c: 0x0040, 0x73d: 0x0040, 0x73e: 0x0040, 0x73f: 0x0040, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, // Block 0x1d, offset 0x740 - 0x740: 0x0040, 0x741: 0x0040, 0x742: 0x1308, 0x743: 0x0008, 0x744: 0x0040, 0x745: 0x0008, - 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0040, - 0x74c: 0x0040, 0x74d: 0x0040, 0x74e: 0x0008, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, - 0x752: 0x0008, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0040, 0x757: 0x0040, - 0x758: 0x0040, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0040, 0x75c: 0x0008, 0x75d: 0x0040, - 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0040, 0x761: 0x0040, 0x762: 0x0040, 0x763: 0x0008, - 0x764: 0x0008, 0x765: 0x0040, 0x766: 0x0040, 0x767: 0x0040, 0x768: 0x0008, 0x769: 0x0008, - 0x76a: 0x0008, 0x76b: 0x0040, 0x76c: 0x0040, 0x76d: 0x0040, 0x76e: 0x0008, 0x76f: 0x0008, - 0x770: 0x0008, 0x771: 0x0008, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0008, 0x775: 0x0008, + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, - 0x77c: 0x0040, 0x77d: 0x0040, 0x77e: 0x1008, 0x77f: 0x1008, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, // Block 0x1e, offset 0x780 - 0x780: 0x1308, 0x781: 0x1008, 0x782: 0x1008, 0x783: 0x1008, 0x784: 0x1008, 0x785: 0x0040, - 0x786: 0x1308, 0x787: 0x1308, 0x788: 0x1308, 0x789: 0x0040, 0x78a: 0x1308, 0x78b: 0x1308, - 0x78c: 0x1308, 0x78d: 0x1b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, - 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x1308, 0x796: 0x1308, 0x797: 0x0040, - 0x798: 0x0008, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0040, 0x79c: 0x0040, 0x79d: 0x0040, - 0x79e: 0x0040, 0x79f: 0x0040, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x1308, 0x7a3: 0x1308, + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, - 0x7b0: 0x0040, 0x7b1: 0x0040, 0x7b2: 0x0040, 0x7b3: 0x0040, 0x7b4: 0x0040, 0x7b5: 0x0040, - 0x7b6: 0x0040, 0x7b7: 0x0040, 0x7b8: 0x0018, 0x7b9: 0x0018, 0x7ba: 0x0018, 0x7bb: 0x0018, - 0x7bc: 0x0018, 0x7bd: 0x0018, 0x7be: 0x0018, 0x7bf: 0x0018, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, // Block 0x1f, offset 0x7c0 - 0x7c0: 0x0008, 0x7c1: 0x1308, 0x7c2: 0x1008, 0x7c3: 0x1008, 0x7c4: 0x0040, 0x7c5: 0x0008, - 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0008, - 0x7cc: 0x0008, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, - 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0008, 0x7d7: 0x0008, - 0x7d8: 0x0008, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0008, 0x7dc: 0x0008, 0x7dd: 0x0008, - 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x0008, 0x7e3: 0x0008, - 0x7e4: 0x0008, 0x7e5: 0x0008, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0040, - 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008, - 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0040, 0x7f5: 0x0008, + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, - 0x7fc: 0x1308, 0x7fd: 0x0008, 0x7fe: 0x1008, 0x7ff: 0x1308, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, // Block 0x20, offset 0x800 - 0x800: 0x1008, 0x801: 0x1008, 0x802: 0x1008, 0x803: 0x1008, 0x804: 0x1008, 0x805: 0x0040, - 0x806: 0x1308, 0x807: 0x1008, 0x808: 0x1008, 0x809: 0x0040, 0x80a: 0x1008, 0x80b: 0x1008, - 0x80c: 0x1308, 0x80d: 0x1b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, - 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x1008, 0x816: 0x1008, 0x817: 0x0040, - 0x818: 0x0040, 0x819: 0x0040, 0x81a: 0x0040, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, - 0x81e: 0x0008, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x1308, 0x823: 0x1308, + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, - 0x830: 0x0040, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, - 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0040, 0x839: 0x0040, 0x83a: 0x0040, 0x83b: 0x0040, - 0x83c: 0x0040, 0x83d: 0x0040, 0x83e: 0x0040, 0x83f: 0x0040, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, // Block 0x21, offset 0x840 - 0x840: 0x1008, 0x841: 0x1308, 0x842: 0x1308, 0x843: 0x1308, 0x844: 0x1308, 0x845: 0x0040, - 0x846: 0x1008, 0x847: 0x1008, 0x848: 0x1008, 0x849: 0x0040, 0x84a: 0x1008, 0x84b: 0x1008, - 0x84c: 0x1008, 0x84d: 0x1b08, 0x84e: 0x0008, 0x84f: 0x0018, 0x850: 0x0040, 0x851: 0x0040, - 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x1008, - 0x858: 0x0018, 0x859: 0x0018, 0x85a: 0x0018, 0x85b: 0x0018, 0x85c: 0x0018, 0x85d: 0x0018, - 0x85e: 0x0018, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x1308, 0x863: 0x1308, - 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008, + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, - 0x870: 0x0018, 0x871: 0x0018, 0x872: 0x0018, 0x873: 0x0018, 0x874: 0x0018, 0x875: 0x0018, - 0x876: 0x0018, 0x877: 0x0018, 0x878: 0x0018, 0x879: 0x0018, 0x87a: 0x0008, 0x87b: 0x0008, - 0x87c: 0x0008, 0x87d: 0x0008, 0x87e: 0x0008, 0x87f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, // Block 0x22, offset 0x880 - 0x880: 0x0040, 0x881: 0x0008, 0x882: 0x0008, 0x883: 0x0040, 0x884: 0x0008, 0x885: 0x0040, - 0x886: 0x0040, 0x887: 0x0008, 0x888: 0x0008, 0x889: 0x0040, 0x88a: 0x0008, 0x88b: 0x0040, - 0x88c: 0x0040, 0x88d: 0x0008, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, - 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x0008, - 0x898: 0x0040, 0x899: 0x0008, 0x89a: 0x0008, 0x89b: 0x0008, 0x89c: 0x0008, 0x89d: 0x0008, - 0x89e: 0x0008, 0x89f: 0x0008, 0x8a0: 0x0040, 0x8a1: 0x0008, 0x8a2: 0x0008, 0x8a3: 0x0008, - 0x8a4: 0x0040, 0x8a5: 0x0008, 0x8a6: 0x0040, 0x8a7: 0x0008, 0x8a8: 0x0040, 0x8a9: 0x0040, - 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0040, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, - 0x8b0: 0x0008, 0x8b1: 0x1308, 0x8b2: 0x0008, 0x8b3: 0x0929, 0x8b4: 0x1308, 0x8b5: 0x1308, - 0x8b6: 0x1308, 0x8b7: 0x1308, 0x8b8: 0x1308, 0x8b9: 0x1308, 0x8ba: 0x0040, 0x8bb: 0x1308, - 0x8bc: 0x1308, 0x8bd: 0x0008, 0x8be: 0x0040, 0x8bf: 0x0040, + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, // Block 0x23, offset 0x8c0 - 0x8c0: 0x0008, 0x8c1: 0x0008, 0x8c2: 0x0008, 0x8c3: 0x09d1, 0x8c4: 0x0008, 0x8c5: 0x0008, - 0x8c6: 0x0008, 0x8c7: 0x0008, 0x8c8: 0x0040, 0x8c9: 0x0008, 0x8ca: 0x0008, 0x8cb: 0x0008, - 0x8cc: 0x0008, 0x8cd: 0x0a09, 0x8ce: 0x0008, 0x8cf: 0x0008, 0x8d0: 0x0008, 0x8d1: 0x0008, - 0x8d2: 0x0a41, 0x8d3: 0x0008, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x0a79, - 0x8d8: 0x0008, 0x8d9: 0x0008, 0x8da: 0x0008, 0x8db: 0x0008, 0x8dc: 0x0ab1, 0x8dd: 0x0008, - 0x8de: 0x0008, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x0008, 0x8e3: 0x0008, - 0x8e4: 0x0008, 0x8e5: 0x0008, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0ae9, - 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0040, 0x8ee: 0x0040, 0x8ef: 0x0040, - 0x8f0: 0x0040, 0x8f1: 0x1308, 0x8f2: 0x1308, 0x8f3: 0x0b21, 0x8f4: 0x1308, 0x8f5: 0x0b59, - 0x8f6: 0x0b91, 0x8f7: 0x0bc9, 0x8f8: 0x0c19, 0x8f9: 0x0c51, 0x8fa: 0x1308, 0x8fb: 0x1308, - 0x8fc: 0x1308, 0x8fd: 0x1308, 0x8fe: 0x1308, 0x8ff: 0x1008, + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, // Block 0x24, offset 0x900 - 0x900: 0x1308, 0x901: 0x0ca1, 0x902: 0x1308, 0x903: 0x1308, 0x904: 0x1b08, 0x905: 0x0018, - 0x906: 0x1308, 0x907: 0x1308, 0x908: 0x0008, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0008, - 0x90c: 0x0008, 0x90d: 0x1308, 0x90e: 0x1308, 0x90f: 0x1308, 0x910: 0x1308, 0x911: 0x1308, - 0x912: 0x1308, 0x913: 0x0cd9, 0x914: 0x1308, 0x915: 0x1308, 0x916: 0x1308, 0x917: 0x1308, - 0x918: 0x0040, 0x919: 0x1308, 0x91a: 0x1308, 0x91b: 0x1308, 0x91c: 0x1308, 0x91d: 0x0d11, - 0x91e: 0x1308, 0x91f: 0x1308, 0x920: 0x1308, 0x921: 0x1308, 0x922: 0x0d49, 0x923: 0x1308, - 0x924: 0x1308, 0x925: 0x1308, 0x926: 0x1308, 0x927: 0x0d81, 0x928: 0x1308, 0x929: 0x1308, - 0x92a: 0x1308, 0x92b: 0x1308, 0x92c: 0x0db9, 0x92d: 0x1308, 0x92e: 0x1308, 0x92f: 0x1308, - 0x930: 0x1308, 0x931: 0x1308, 0x932: 0x1308, 0x933: 0x1308, 0x934: 0x1308, 0x935: 0x1308, - 0x936: 0x1308, 0x937: 0x1308, 0x938: 0x1308, 0x939: 0x0df1, 0x93a: 0x1308, 0x93b: 0x1308, - 0x93c: 0x1308, 0x93d: 0x0040, 0x93e: 0x0018, 0x93f: 0x0018, + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, // Block 0x25, offset 0x940 - 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x0008, 0x944: 0x0008, 0x945: 0x0008, - 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, - 0x94c: 0x0008, 0x94d: 0x0008, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, - 0x952: 0x0008, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0008, - 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0008, 0x95d: 0x0008, + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, - 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0008, - 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0039, 0x96d: 0x0ed1, 0x96e: 0x0ee9, 0x96f: 0x0008, - 0x970: 0x0ef9, 0x971: 0x0f09, 0x972: 0x0f19, 0x973: 0x0f31, 0x974: 0x0249, 0x975: 0x0f41, - 0x976: 0x0259, 0x977: 0x0f51, 0x978: 0x0359, 0x979: 0x0f61, 0x97a: 0x0f71, 0x97b: 0x0008, - 0x97c: 0x00d9, 0x97d: 0x0f81, 0x97e: 0x0f99, 0x97f: 0x0269, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, // Block 0x26, offset 0x980 - 0x980: 0x0fa9, 0x981: 0x0fb9, 0x982: 0x0279, 0x983: 0x0039, 0x984: 0x0fc9, 0x985: 0x0fe1, - 0x986: 0x059d, 0x987: 0x0ee9, 0x988: 0x0ef9, 0x989: 0x0f09, 0x98a: 0x0ff9, 0x98b: 0x1011, - 0x98c: 0x1029, 0x98d: 0x0f31, 0x98e: 0x0008, 0x98f: 0x0f51, 0x990: 0x0f61, 0x991: 0x1041, - 0x992: 0x00d9, 0x993: 0x1059, 0x994: 0x05b5, 0x995: 0x05b5, 0x996: 0x0f99, 0x997: 0x0fa9, - 0x998: 0x0fb9, 0x999: 0x059d, 0x99a: 0x1071, 0x99b: 0x1089, 0x99c: 0x05cd, 0x99d: 0x1099, - 0x99e: 0x10b1, 0x99f: 0x10c9, 0x9a0: 0x10e1, 0x9a1: 0x10f9, 0x9a2: 0x0f41, 0x9a3: 0x0269, - 0x9a4: 0x0fb9, 0x9a5: 0x1089, 0x9a6: 0x1099, 0x9a7: 0x10b1, 0x9a8: 0x1111, 0x9a9: 0x10e1, - 0x9aa: 0x10f9, 0x9ab: 0x0008, 0x9ac: 0x0008, 0x9ad: 0x0008, 0x9ae: 0x0008, 0x9af: 0x0008, - 0x9b0: 0x0008, 0x9b1: 0x0008, 0x9b2: 0x0008, 0x9b3: 0x0008, 0x9b4: 0x0008, 0x9b5: 0x0008, - 0x9b6: 0x0008, 0x9b7: 0x0008, 0x9b8: 0x1129, 0x9b9: 0x0008, 0x9ba: 0x0008, 0x9bb: 0x0008, - 0x9bc: 0x0008, 0x9bd: 0x0008, 0x9be: 0x0008, 0x9bf: 0x0008, + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, // Block 0x27, offset 0x9c0 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, - 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x1141, 0x9dc: 0x1159, 0x9dd: 0x1169, - 0x9de: 0x1181, 0x9df: 0x1029, 0x9e0: 0x1199, 0x9e1: 0x11a9, 0x9e2: 0x11c1, 0x9e3: 0x11d9, - 0x9e4: 0x11f1, 0x9e5: 0x1209, 0x9e6: 0x1221, 0x9e7: 0x05e5, 0x9e8: 0x1239, 0x9e9: 0x1251, - 0x9ea: 0xe17d, 0x9eb: 0x1269, 0x9ec: 0x1281, 0x9ed: 0x1299, 0x9ee: 0x12b1, 0x9ef: 0x12c9, - 0x9f0: 0x12e1, 0x9f1: 0x12f9, 0x9f2: 0x1311, 0x9f3: 0x1329, 0x9f4: 0x1341, 0x9f5: 0x1359, - 0x9f6: 0x1371, 0x9f7: 0x1389, 0x9f8: 0x05fd, 0x9f9: 0x13a1, 0x9fa: 0x13b9, 0x9fb: 0x13d1, - 0x9fc: 0x13e1, 0x9fd: 0x13f9, 0x9fe: 0x1411, 0x9ff: 0x1429, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, // Block 0x28, offset 0xa00 - 0xa00: 0xe00d, 0xa01: 0x0008, 0xa02: 0xe00d, 0xa03: 0x0008, 0xa04: 0xe00d, 0xa05: 0x0008, - 0xa06: 0xe00d, 0xa07: 0x0008, 0xa08: 0xe00d, 0xa09: 0x0008, 0xa0a: 0xe00d, 0xa0b: 0x0008, - 0xa0c: 0xe00d, 0xa0d: 0x0008, 0xa0e: 0xe00d, 0xa0f: 0x0008, 0xa10: 0xe00d, 0xa11: 0x0008, - 0xa12: 0xe00d, 0xa13: 0x0008, 0xa14: 0xe00d, 0xa15: 0x0008, 0xa16: 0xe00d, 0xa17: 0x0008, - 0xa18: 0xe00d, 0xa19: 0x0008, 0xa1a: 0xe00d, 0xa1b: 0x0008, 0xa1c: 0xe00d, 0xa1d: 0x0008, - 0xa1e: 0xe00d, 0xa1f: 0x0008, 0xa20: 0xe00d, 0xa21: 0x0008, 0xa22: 0xe00d, 0xa23: 0x0008, - 0xa24: 0xe00d, 0xa25: 0x0008, 0xa26: 0xe00d, 0xa27: 0x0008, 0xa28: 0xe00d, 0xa29: 0x0008, - 0xa2a: 0xe00d, 0xa2b: 0x0008, 0xa2c: 0xe00d, 0xa2d: 0x0008, 0xa2e: 0xe00d, 0xa2f: 0x0008, - 0xa30: 0xe00d, 0xa31: 0x0008, 0xa32: 0xe00d, 0xa33: 0x0008, 0xa34: 0xe00d, 0xa35: 0x0008, - 0xa36: 0xe00d, 0xa37: 0x0008, 0xa38: 0xe00d, 0xa39: 0x0008, 0xa3a: 0xe00d, 0xa3b: 0x0008, - 0xa3c: 0xe00d, 0xa3d: 0x0008, 0xa3e: 0xe00d, 0xa3f: 0x0008, + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, // Block 0x29, offset 0xa40 - 0xa40: 0xe00d, 0xa41: 0x0008, 0xa42: 0xe00d, 0xa43: 0x0008, 0xa44: 0xe00d, 0xa45: 0x0008, - 0xa46: 0xe00d, 0xa47: 0x0008, 0xa48: 0xe00d, 0xa49: 0x0008, 0xa4a: 0xe00d, 0xa4b: 0x0008, - 0xa4c: 0xe00d, 0xa4d: 0x0008, 0xa4e: 0xe00d, 0xa4f: 0x0008, 0xa50: 0xe00d, 0xa51: 0x0008, - 0xa52: 0xe00d, 0xa53: 0x0008, 0xa54: 0xe00d, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, - 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0615, 0xa5b: 0x0635, 0xa5c: 0x0008, 0xa5d: 0x0008, - 0xa5e: 0x1441, 0xa5f: 0x0008, 0xa60: 0xe00d, 0xa61: 0x0008, 0xa62: 0xe00d, 0xa63: 0x0008, - 0xa64: 0xe00d, 0xa65: 0x0008, 0xa66: 0xe00d, 0xa67: 0x0008, 0xa68: 0xe00d, 0xa69: 0x0008, - 0xa6a: 0xe00d, 0xa6b: 0x0008, 0xa6c: 0xe00d, 0xa6d: 0x0008, 0xa6e: 0xe00d, 0xa6f: 0x0008, - 0xa70: 0xe00d, 0xa71: 0x0008, 0xa72: 0xe00d, 0xa73: 0x0008, 0xa74: 0xe00d, 0xa75: 0x0008, - 0xa76: 0xe00d, 0xa77: 0x0008, 0xa78: 0xe00d, 0xa79: 0x0008, 0xa7a: 0xe00d, 0xa7b: 0x0008, - 0xa7c: 0xe00d, 0xa7d: 0x0008, 0xa7e: 0xe00d, 0xa7f: 0x0008, + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, // Block 0x2a, offset 0xa80 - 0xa80: 0x0008, 0xa81: 0x0008, 0xa82: 0x0008, 0xa83: 0x0008, 0xa84: 0x0008, 0xa85: 0x0008, - 0xa86: 0x0040, 0xa87: 0x0040, 0xa88: 0xe045, 0xa89: 0xe045, 0xa8a: 0xe045, 0xa8b: 0xe045, - 0xa8c: 0xe045, 0xa8d: 0xe045, 0xa8e: 0x0040, 0xa8f: 0x0040, 0xa90: 0x0008, 0xa91: 0x0008, - 0xa92: 0x0008, 0xa93: 0x0008, 0xa94: 0x0008, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008, - 0xa98: 0x0040, 0xa99: 0xe045, 0xa9a: 0x0040, 0xa9b: 0xe045, 0xa9c: 0x0040, 0xa9d: 0xe045, - 0xa9e: 0x0040, 0xa9f: 0xe045, 0xaa0: 0x0008, 0xaa1: 0x0008, 0xaa2: 0x0008, 0xaa3: 0x0008, - 0xaa4: 0x0008, 0xaa5: 0x0008, 0xaa6: 0x0008, 0xaa7: 0x0008, 0xaa8: 0xe045, 0xaa9: 0xe045, - 0xaaa: 0xe045, 0xaab: 0xe045, 0xaac: 0xe045, 0xaad: 0xe045, 0xaae: 0xe045, 0xaaf: 0xe045, - 0xab0: 0x0008, 0xab1: 0x1459, 0xab2: 0x0008, 0xab3: 0x1471, 0xab4: 0x0008, 0xab5: 0x1489, - 0xab6: 0x0008, 0xab7: 0x14a1, 0xab8: 0x0008, 0xab9: 0x14b9, 0xaba: 0x0008, 0xabb: 0x14d1, - 0xabc: 0x0008, 0xabd: 0x14e9, 0xabe: 0x0040, 0xabf: 0x0040, + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, // Block 0x2b, offset 0xac0 - 0xac0: 0x1501, 0xac1: 0x1531, 0xac2: 0x1561, 0xac3: 0x1591, 0xac4: 0x15c1, 0xac5: 0x15f1, - 0xac6: 0x1621, 0xac7: 0x1651, 0xac8: 0x1501, 0xac9: 0x1531, 0xaca: 0x1561, 0xacb: 0x1591, - 0xacc: 0x15c1, 0xacd: 0x15f1, 0xace: 0x1621, 0xacf: 0x1651, 0xad0: 0x1681, 0xad1: 0x16b1, - 0xad2: 0x16e1, 0xad3: 0x1711, 0xad4: 0x1741, 0xad5: 0x1771, 0xad6: 0x17a1, 0xad7: 0x17d1, - 0xad8: 0x1681, 0xad9: 0x16b1, 0xada: 0x16e1, 0xadb: 0x1711, 0xadc: 0x1741, 0xadd: 0x1771, - 0xade: 0x17a1, 0xadf: 0x17d1, 0xae0: 0x1801, 0xae1: 0x1831, 0xae2: 0x1861, 0xae3: 0x1891, - 0xae4: 0x18c1, 0xae5: 0x18f1, 0xae6: 0x1921, 0xae7: 0x1951, 0xae8: 0x1801, 0xae9: 0x1831, - 0xaea: 0x1861, 0xaeb: 0x1891, 0xaec: 0x18c1, 0xaed: 0x18f1, 0xaee: 0x1921, 0xaef: 0x1951, - 0xaf0: 0x0008, 0xaf1: 0x0008, 0xaf2: 0x1981, 0xaf3: 0x19b1, 0xaf4: 0x19d9, 0xaf5: 0x0040, - 0xaf6: 0x0008, 0xaf7: 0x1a01, 0xaf8: 0xe045, 0xaf9: 0xe045, 0xafa: 0x064d, 0xafb: 0x1459, - 0xafc: 0x19b1, 0xafd: 0x0666, 0xafe: 0x1a31, 0xaff: 0x0686, + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, // Block 0x2c, offset 0xb00 - 0xb00: 0x06a6, 0xb01: 0x1a4a, 0xb02: 0x1a79, 0xb03: 0x1aa9, 0xb04: 0x1ad1, 0xb05: 0x0040, - 0xb06: 0x0008, 0xb07: 0x1af9, 0xb08: 0x06c5, 0xb09: 0x1471, 0xb0a: 0x06dd, 0xb0b: 0x1489, - 0xb0c: 0x1aa9, 0xb0d: 0x1b2a, 0xb0e: 0x1b5a, 0xb0f: 0x1b8a, 0xb10: 0x0008, 0xb11: 0x0008, - 0xb12: 0x0008, 0xb13: 0x1bb9, 0xb14: 0x0040, 0xb15: 0x0040, 0xb16: 0x0008, 0xb17: 0x0008, - 0xb18: 0xe045, 0xb19: 0xe045, 0xb1a: 0x06f5, 0xb1b: 0x14a1, 0xb1c: 0x0040, 0xb1d: 0x1bd2, - 0xb1e: 0x1c02, 0xb1f: 0x1c32, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x1c61, + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, - 0xb2a: 0x070d, 0xb2b: 0x14d1, 0xb2c: 0xe04d, 0xb2d: 0x1c7a, 0xb2e: 0x03d2, 0xb2f: 0x1caa, - 0xb30: 0x0040, 0xb31: 0x0040, 0xb32: 0x1cb9, 0xb33: 0x1ce9, 0xb34: 0x1d11, 0xb35: 0x0040, - 0xb36: 0x0008, 0xb37: 0x1d39, 0xb38: 0x0725, 0xb39: 0x14b9, 0xb3a: 0x0515, 0xb3b: 0x14e9, - 0xb3c: 0x1ce9, 0xb3d: 0x073e, 0xb3e: 0x075e, 0xb3f: 0x0040, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, // Block 0x2d, offset 0xb40 - 0xb40: 0x000a, 0xb41: 0x000a, 0xb42: 0x000a, 0xb43: 0x000a, 0xb44: 0x000a, 0xb45: 0x000a, - 0xb46: 0x000a, 0xb47: 0x000a, 0xb48: 0x000a, 0xb49: 0x000a, 0xb4a: 0x000a, 0xb4b: 0x03c0, - 0xb4c: 0x0003, 0xb4d: 0x0003, 0xb4e: 0x0340, 0xb4f: 0x0340, 0xb50: 0x0018, 0xb51: 0xe00d, - 0xb52: 0x0018, 0xb53: 0x0018, 0xb54: 0x0018, 0xb55: 0x0018, 0xb56: 0x0018, 0xb57: 0x077e, - 0xb58: 0x0018, 0xb59: 0x0018, 0xb5a: 0x0018, 0xb5b: 0x0018, 0xb5c: 0x0018, 0xb5d: 0x0018, - 0xb5e: 0x0018, 0xb5f: 0x0018, 0xb60: 0x0018, 0xb61: 0x0018, 0xb62: 0x0018, 0xb63: 0x0018, - 0xb64: 0x0040, 0xb65: 0x0040, 0xb66: 0x0040, 0xb67: 0x0018, 0xb68: 0x0040, 0xb69: 0x0040, - 0xb6a: 0x0340, 0xb6b: 0x0340, 0xb6c: 0x0340, 0xb6d: 0x0340, 0xb6e: 0x0340, 0xb6f: 0x000a, - 0xb70: 0x0018, 0xb71: 0x0018, 0xb72: 0x0018, 0xb73: 0x1d69, 0xb74: 0x1da1, 0xb75: 0x0018, - 0xb76: 0x1df1, 0xb77: 0x1e29, 0xb78: 0x0018, 0xb79: 0x0018, 0xb7a: 0x0018, 0xb7b: 0x0018, - 0xb7c: 0x1e7a, 0xb7d: 0x0018, 0xb7e: 0x079e, 0xb7f: 0x0018, + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, // Block 0x2e, offset 0xb80 - 0xb80: 0x0018, 0xb81: 0x0018, 0xb82: 0x0018, 0xb83: 0x0018, 0xb84: 0x0018, 0xb85: 0x0018, - 0xb86: 0x0018, 0xb87: 0x1e92, 0xb88: 0x1eaa, 0xb89: 0x1ec2, 0xb8a: 0x0018, 0xb8b: 0x0018, - 0xb8c: 0x0018, 0xb8d: 0x0018, 0xb8e: 0x0018, 0xb8f: 0x0018, 0xb90: 0x0018, 0xb91: 0x0018, - 0xb92: 0x0018, 0xb93: 0x0018, 0xb94: 0x0018, 0xb95: 0x0018, 0xb96: 0x0018, 0xb97: 0x1ed9, - 0xb98: 0x0018, 0xb99: 0x0018, 0xb9a: 0x0018, 0xb9b: 0x0018, 0xb9c: 0x0018, 0xb9d: 0x0018, - 0xb9e: 0x0018, 0xb9f: 0x000a, 0xba0: 0x03c0, 0xba1: 0x0340, 0xba2: 0x0340, 0xba3: 0x0340, - 0xba4: 0x03c0, 0xba5: 0x0040, 0xba6: 0x0040, 0xba7: 0x0040, 0xba8: 0x0040, 0xba9: 0x0040, - 0xbaa: 0x0340, 0xbab: 0x0340, 0xbac: 0x0340, 0xbad: 0x0340, 0xbae: 0x0340, 0xbaf: 0x0340, - 0xbb0: 0x1f41, 0xbb1: 0x0f41, 0xbb2: 0x0040, 0xbb3: 0x0040, 0xbb4: 0x1f51, 0xbb5: 0x1f61, - 0xbb6: 0x1f71, 0xbb7: 0x1f81, 0xbb8: 0x1f91, 0xbb9: 0x1fa1, 0xbba: 0x1fb2, 0xbbb: 0x07bd, - 0xbbc: 0x1fc2, 0xbbd: 0x1fd2, 0xbbe: 0x1fe2, 0xbbf: 0x0f71, + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, // Block 0x2f, offset 0xbc0 - 0xbc0: 0x1f41, 0xbc1: 0x00c9, 0xbc2: 0x0069, 0xbc3: 0x0079, 0xbc4: 0x1f51, 0xbc5: 0x1f61, - 0xbc6: 0x1f71, 0xbc7: 0x1f81, 0xbc8: 0x1f91, 0xbc9: 0x1fa1, 0xbca: 0x1fb2, 0xbcb: 0x07d5, - 0xbcc: 0x1fc2, 0xbcd: 0x1fd2, 0xbce: 0x1fe2, 0xbcf: 0x0040, 0xbd0: 0x0039, 0xbd1: 0x0f09, - 0xbd2: 0x00d9, 0xbd3: 0x0369, 0xbd4: 0x0ff9, 0xbd5: 0x0249, 0xbd6: 0x0f51, 0xbd7: 0x0359, - 0xbd8: 0x0f61, 0xbd9: 0x0f71, 0xbda: 0x0f99, 0xbdb: 0x01d9, 0xbdc: 0x0fa9, 0xbdd: 0x0040, - 0xbde: 0x0040, 0xbdf: 0x0040, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, - 0xbe4: 0x0018, 0xbe5: 0x0018, 0xbe6: 0x0018, 0xbe7: 0x0018, 0xbe8: 0x1ff1, 0xbe9: 0x0018, - 0xbea: 0x0018, 0xbeb: 0x0018, 0xbec: 0x0018, 0xbed: 0x0018, 0xbee: 0x0018, 0xbef: 0x0018, - 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x0018, 0xbf4: 0x0018, 0xbf5: 0x0018, - 0xbf6: 0x0018, 0xbf7: 0x0018, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, - 0xbfc: 0x0018, 0xbfd: 0x0018, 0xbfe: 0x0018, 0xbff: 0x0040, + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, // Block 0x30, offset 0xc00 - 0xc00: 0x07ee, 0xc01: 0x080e, 0xc02: 0x1159, 0xc03: 0x082d, 0xc04: 0x0018, 0xc05: 0x084e, - 0xc06: 0x086e, 0xc07: 0x1011, 0xc08: 0x0018, 0xc09: 0x088d, 0xc0a: 0x0f31, 0xc0b: 0x0249, - 0xc0c: 0x0249, 0xc0d: 0x0249, 0xc0e: 0x0249, 0xc0f: 0x2009, 0xc10: 0x0f41, 0xc11: 0x0f41, - 0xc12: 0x0359, 0xc13: 0x0359, 0xc14: 0x0018, 0xc15: 0x0f71, 0xc16: 0x2021, 0xc17: 0x0018, - 0xc18: 0x0018, 0xc19: 0x0f99, 0xc1a: 0x2039, 0xc1b: 0x0269, 0xc1c: 0x0269, 0xc1d: 0x0269, - 0xc1e: 0x0018, 0xc1f: 0x0018, 0xc20: 0x2049, 0xc21: 0x08ad, 0xc22: 0x2061, 0xc23: 0x0018, - 0xc24: 0x13d1, 0xc25: 0x0018, 0xc26: 0x2079, 0xc27: 0x0018, 0xc28: 0x13d1, 0xc29: 0x0018, - 0xc2a: 0x0f51, 0xc2b: 0x2091, 0xc2c: 0x0ee9, 0xc2d: 0x1159, 0xc2e: 0x0018, 0xc2f: 0x0f09, - 0xc30: 0x0f09, 0xc31: 0x1199, 0xc32: 0x0040, 0xc33: 0x0f61, 0xc34: 0x00d9, 0xc35: 0x20a9, - 0xc36: 0x20c1, 0xc37: 0x20d9, 0xc38: 0x20f1, 0xc39: 0x0f41, 0xc3a: 0x0018, 0xc3b: 0x08cd, - 0xc3c: 0x2109, 0xc3d: 0x10b1, 0xc3e: 0x10b1, 0xc3f: 0x2109, + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, // Block 0x31, offset 0xc40 - 0xc40: 0x08ed, 0xc41: 0x0018, 0xc42: 0x0018, 0xc43: 0x0018, 0xc44: 0x0018, 0xc45: 0x0ef9, - 0xc46: 0x0ef9, 0xc47: 0x0f09, 0xc48: 0x0f41, 0xc49: 0x0259, 0xc4a: 0x0018, 0xc4b: 0x0018, - 0xc4c: 0x0018, 0xc4d: 0x0018, 0xc4e: 0x0008, 0xc4f: 0x0018, 0xc50: 0x2121, 0xc51: 0x2151, - 0xc52: 0x2181, 0xc53: 0x21b9, 0xc54: 0x21e9, 0xc55: 0x2219, 0xc56: 0x2249, 0xc57: 0x2279, - 0xc58: 0x22a9, 0xc59: 0x22d9, 0xc5a: 0x2309, 0xc5b: 0x2339, 0xc5c: 0x2369, 0xc5d: 0x2399, - 0xc5e: 0x23c9, 0xc5f: 0x23f9, 0xc60: 0x0f41, 0xc61: 0x2421, 0xc62: 0x0905, 0xc63: 0x2439, - 0xc64: 0x1089, 0xc65: 0x2451, 0xc66: 0x0925, 0xc67: 0x2469, 0xc68: 0x2491, 0xc69: 0x0369, - 0xc6a: 0x24a9, 0xc6b: 0x0945, 0xc6c: 0x0359, 0xc6d: 0x1159, 0xc6e: 0x0ef9, 0xc6f: 0x0f61, - 0xc70: 0x0f41, 0xc71: 0x2421, 0xc72: 0x0965, 0xc73: 0x2439, 0xc74: 0x1089, 0xc75: 0x2451, - 0xc76: 0x0985, 0xc77: 0x2469, 0xc78: 0x2491, 0xc79: 0x0369, 0xc7a: 0x24a9, 0xc7b: 0x09a5, - 0xc7c: 0x0359, 0xc7d: 0x1159, 0xc7e: 0x0ef9, 0xc7f: 0x0f61, + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, // Block 0x32, offset 0xc80 - 0xc80: 0x0018, 0xc81: 0x0018, 0xc82: 0x0018, 0xc83: 0x0018, 0xc84: 0x0018, 0xc85: 0x0018, - 0xc86: 0x0018, 0xc87: 0x0018, 0xc88: 0x0018, 0xc89: 0x0018, 0xc8a: 0x0018, 0xc8b: 0x0040, - 0xc8c: 0x0040, 0xc8d: 0x0040, 0xc8e: 0x0040, 0xc8f: 0x0040, 0xc90: 0x0040, 0xc91: 0x0040, - 0xc92: 0x0040, 0xc93: 0x0040, 0xc94: 0x0040, 0xc95: 0x0040, 0xc96: 0x0040, 0xc97: 0x0040, - 0xc98: 0x0040, 0xc99: 0x0040, 0xc9a: 0x0040, 0xc9b: 0x0040, 0xc9c: 0x0040, 0xc9d: 0x0040, - 0xc9e: 0x0040, 0xc9f: 0x0040, 0xca0: 0x00c9, 0xca1: 0x0069, 0xca2: 0x0079, 0xca3: 0x1f51, - 0xca4: 0x1f61, 0xca5: 0x1f71, 0xca6: 0x1f81, 0xca7: 0x1f91, 0xca8: 0x1fa1, 0xca9: 0x2601, - 0xcaa: 0x2619, 0xcab: 0x2631, 0xcac: 0x2649, 0xcad: 0x2661, 0xcae: 0x2679, 0xcaf: 0x2691, - 0xcb0: 0x26a9, 0xcb1: 0x26c1, 0xcb2: 0x26d9, 0xcb3: 0x26f1, 0xcb4: 0x0a06, 0xcb5: 0x0a26, - 0xcb6: 0x0a46, 0xcb7: 0x0a66, 0xcb8: 0x0a86, 0xcb9: 0x0aa6, 0xcba: 0x0ac6, 0xcbb: 0x0ae6, - 0xcbc: 0x0b06, 0xcbd: 0x270a, 0xcbe: 0x2732, 0xcbf: 0x275a, + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, // Block 0x33, offset 0xcc0 - 0xcc0: 0x2782, 0xcc1: 0x27aa, 0xcc2: 0x27d2, 0xcc3: 0x27fa, 0xcc4: 0x2822, 0xcc5: 0x284a, - 0xcc6: 0x2872, 0xcc7: 0x289a, 0xcc8: 0x0040, 0xcc9: 0x0040, 0xcca: 0x0040, 0xccb: 0x0040, - 0xccc: 0x0040, 0xccd: 0x0040, 0xcce: 0x0040, 0xccf: 0x0040, 0xcd0: 0x0040, 0xcd1: 0x0040, - 0xcd2: 0x0040, 0xcd3: 0x0040, 0xcd4: 0x0040, 0xcd5: 0x0040, 0xcd6: 0x0040, 0xcd7: 0x0040, - 0xcd8: 0x0040, 0xcd9: 0x0040, 0xcda: 0x0040, 0xcdb: 0x0040, 0xcdc: 0x0b26, 0xcdd: 0x0b46, - 0xcde: 0x0b66, 0xcdf: 0x0b86, 0xce0: 0x0ba6, 0xce1: 0x0bc6, 0xce2: 0x0be6, 0xce3: 0x0c06, - 0xce4: 0x0c26, 0xce5: 0x0c46, 0xce6: 0x0c66, 0xce7: 0x0c86, 0xce8: 0x0ca6, 0xce9: 0x0cc6, - 0xcea: 0x0ce6, 0xceb: 0x0d06, 0xcec: 0x0d26, 0xced: 0x0d46, 0xcee: 0x0d66, 0xcef: 0x0d86, - 0xcf0: 0x0da6, 0xcf1: 0x0dc6, 0xcf2: 0x0de6, 0xcf3: 0x0e06, 0xcf4: 0x0e26, 0xcf5: 0x0e46, - 0xcf6: 0x0039, 0xcf7: 0x0ee9, 0xcf8: 0x1159, 0xcf9: 0x0ef9, 0xcfa: 0x0f09, 0xcfb: 0x1199, - 0xcfc: 0x0f31, 0xcfd: 0x0249, 0xcfe: 0x0f41, 0xcff: 0x0259, + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, // Block 0x34, offset 0xd00 - 0xd00: 0x0f51, 0xd01: 0x0359, 0xd02: 0x0f61, 0xd03: 0x0f71, 0xd04: 0x00d9, 0xd05: 0x0f99, - 0xd06: 0x2039, 0xd07: 0x0269, 0xd08: 0x01d9, 0xd09: 0x0fa9, 0xd0a: 0x0fb9, 0xd0b: 0x1089, - 0xd0c: 0x0279, 0xd0d: 0x0369, 0xd0e: 0x0289, 0xd0f: 0x13d1, 0xd10: 0x0039, 0xd11: 0x0ee9, - 0xd12: 0x1159, 0xd13: 0x0ef9, 0xd14: 0x0f09, 0xd15: 0x1199, 0xd16: 0x0f31, 0xd17: 0x0249, - 0xd18: 0x0f41, 0xd19: 0x0259, 0xd1a: 0x0f51, 0xd1b: 0x0359, 0xd1c: 0x0f61, 0xd1d: 0x0f71, - 0xd1e: 0x00d9, 0xd1f: 0x0f99, 0xd20: 0x2039, 0xd21: 0x0269, 0xd22: 0x01d9, 0xd23: 0x0fa9, - 0xd24: 0x0fb9, 0xd25: 0x1089, 0xd26: 0x0279, 0xd27: 0x0369, 0xd28: 0x0289, 0xd29: 0x13d1, - 0xd2a: 0x1f41, 0xd2b: 0x0018, 0xd2c: 0x0018, 0xd2d: 0x0018, 0xd2e: 0x0018, 0xd2f: 0x0018, - 0xd30: 0x0018, 0xd31: 0x0018, 0xd32: 0x0018, 0xd33: 0x0018, 0xd34: 0x0018, 0xd35: 0x0018, - 0xd36: 0x0018, 0xd37: 0x0018, 0xd38: 0x0018, 0xd39: 0x0018, 0xd3a: 0x0018, 0xd3b: 0x0018, - 0xd3c: 0x0018, 0xd3d: 0x0018, 0xd3e: 0x0018, 0xd3f: 0x0018, + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, // Block 0x35, offset 0xd40 - 0xd40: 0x0008, 0xd41: 0x0008, 0xd42: 0x0008, 0xd43: 0x0008, 0xd44: 0x0008, 0xd45: 0x0008, - 0xd46: 0x0008, 0xd47: 0x0008, 0xd48: 0x0008, 0xd49: 0x0008, 0xd4a: 0x0008, 0xd4b: 0x0008, - 0xd4c: 0x0008, 0xd4d: 0x0008, 0xd4e: 0x0008, 0xd4f: 0x0008, 0xd50: 0x0008, 0xd51: 0x0008, - 0xd52: 0x0008, 0xd53: 0x0008, 0xd54: 0x0008, 0xd55: 0x0008, 0xd56: 0x0008, 0xd57: 0x0008, - 0xd58: 0x0008, 0xd59: 0x0008, 0xd5a: 0x0008, 0xd5b: 0x0008, 0xd5c: 0x0008, 0xd5d: 0x0008, - 0xd5e: 0x0008, 0xd5f: 0x0040, 0xd60: 0xe00d, 0xd61: 0x0008, 0xd62: 0x2971, 0xd63: 0x0ebd, - 0xd64: 0x2989, 0xd65: 0x0008, 0xd66: 0x0008, 0xd67: 0xe07d, 0xd68: 0x0008, 0xd69: 0xe01d, - 0xd6a: 0x0008, 0xd6b: 0xe03d, 0xd6c: 0x0008, 0xd6d: 0x0fe1, 0xd6e: 0x1281, 0xd6f: 0x0fc9, - 0xd70: 0x1141, 0xd71: 0x0008, 0xd72: 0xe00d, 0xd73: 0x0008, 0xd74: 0x0008, 0xd75: 0xe01d, - 0xd76: 0x0008, 0xd77: 0x0008, 0xd78: 0x0008, 0xd79: 0x0008, 0xd7a: 0x0008, 0xd7b: 0x0008, - 0xd7c: 0x0259, 0xd7d: 0x1089, 0xd7e: 0x29a1, 0xd7f: 0x29b9, + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, // Block 0x36, offset 0xd80 - 0xd80: 0xe00d, 0xd81: 0x0008, 0xd82: 0xe00d, 0xd83: 0x0008, 0xd84: 0xe00d, 0xd85: 0x0008, - 0xd86: 0xe00d, 0xd87: 0x0008, 0xd88: 0xe00d, 0xd89: 0x0008, 0xd8a: 0xe00d, 0xd8b: 0x0008, - 0xd8c: 0xe00d, 0xd8d: 0x0008, 0xd8e: 0xe00d, 0xd8f: 0x0008, 0xd90: 0xe00d, 0xd91: 0x0008, - 0xd92: 0xe00d, 0xd93: 0x0008, 0xd94: 0xe00d, 0xd95: 0x0008, 0xd96: 0xe00d, 0xd97: 0x0008, - 0xd98: 0xe00d, 0xd99: 0x0008, 0xd9a: 0xe00d, 0xd9b: 0x0008, 0xd9c: 0xe00d, 0xd9d: 0x0008, - 0xd9e: 0xe00d, 0xd9f: 0x0008, 0xda0: 0xe00d, 0xda1: 0x0008, 0xda2: 0xe00d, 0xda3: 0x0008, - 0xda4: 0x0008, 0xda5: 0x0018, 0xda6: 0x0018, 0xda7: 0x0018, 0xda8: 0x0018, 0xda9: 0x0018, - 0xdaa: 0x0018, 0xdab: 0xe03d, 0xdac: 0x0008, 0xdad: 0xe01d, 0xdae: 0x0008, 0xdaf: 0x1308, - 0xdb0: 0x1308, 0xdb1: 0x1308, 0xdb2: 0xe00d, 0xdb3: 0x0008, 0xdb4: 0x0040, 0xdb5: 0x0040, - 0xdb6: 0x0040, 0xdb7: 0x0040, 0xdb8: 0x0040, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, // Block 0x37, offset 0xdc0 - 0xdc0: 0x26fd, 0xdc1: 0x271d, 0xdc2: 0x273d, 0xdc3: 0x275d, 0xdc4: 0x277d, 0xdc5: 0x279d, - 0xdc6: 0x27bd, 0xdc7: 0x27dd, 0xdc8: 0x27fd, 0xdc9: 0x281d, 0xdca: 0x283d, 0xdcb: 0x285d, - 0xdcc: 0x287d, 0xdcd: 0x289d, 0xdce: 0x28bd, 0xdcf: 0x28dd, 0xdd0: 0x28fd, 0xdd1: 0x291d, - 0xdd2: 0x293d, 0xdd3: 0x295d, 0xdd4: 0x297d, 0xdd5: 0x299d, 0xdd6: 0x0040, 0xdd7: 0x0040, - 0xdd8: 0x0040, 0xdd9: 0x0040, 0xdda: 0x0040, 0xddb: 0x0040, 0xddc: 0x0040, 0xddd: 0x0040, - 0xdde: 0x0040, 0xddf: 0x0040, 0xde0: 0x0040, 0xde1: 0x0040, 0xde2: 0x0040, 0xde3: 0x0040, - 0xde4: 0x0040, 0xde5: 0x0040, 0xde6: 0x0040, 0xde7: 0x0040, 0xde8: 0x0040, 0xde9: 0x0040, - 0xdea: 0x0040, 0xdeb: 0x0040, 0xdec: 0x0040, 0xded: 0x0040, 0xdee: 0x0040, 0xdef: 0x0040, - 0xdf0: 0x0040, 0xdf1: 0x0040, 0xdf2: 0x0040, 0xdf3: 0x0040, 0xdf4: 0x0040, 0xdf5: 0x0040, - 0xdf6: 0x0040, 0xdf7: 0x0040, 0xdf8: 0x0040, 0xdf9: 0x0040, 0xdfa: 0x0040, 0xdfb: 0x0040, - 0xdfc: 0x0040, 0xdfd: 0x0040, 0xdfe: 0x0040, 0xdff: 0x0040, + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, // Block 0x38, offset 0xe00 - 0xe00: 0x000a, 0xe01: 0x0018, 0xe02: 0x29d1, 0xe03: 0x0018, 0xe04: 0x0018, 0xe05: 0x0008, - 0xe06: 0x0008, 0xe07: 0x0008, 0xe08: 0x0018, 0xe09: 0x0018, 0xe0a: 0x0018, 0xe0b: 0x0018, - 0xe0c: 0x0018, 0xe0d: 0x0018, 0xe0e: 0x0018, 0xe0f: 0x0018, 0xe10: 0x0018, 0xe11: 0x0018, - 0xe12: 0x0018, 0xe13: 0x0018, 0xe14: 0x0018, 0xe15: 0x0018, 0xe16: 0x0018, 0xe17: 0x0018, - 0xe18: 0x0018, 0xe19: 0x0018, 0xe1a: 0x0018, 0xe1b: 0x0018, 0xe1c: 0x0018, 0xe1d: 0x0018, - 0xe1e: 0x0018, 0xe1f: 0x0018, 0xe20: 0x0018, 0xe21: 0x0018, 0xe22: 0x0018, 0xe23: 0x0018, - 0xe24: 0x0018, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, - 0xe2a: 0x1308, 0xe2b: 0x1308, 0xe2c: 0x1308, 0xe2d: 0x1308, 0xe2e: 0x1018, 0xe2f: 0x1018, - 0xe30: 0x0018, 0xe31: 0x0018, 0xe32: 0x0018, 0xe33: 0x0018, 0xe34: 0x0018, 0xe35: 0x0018, - 0xe36: 0xe125, 0xe37: 0x0018, 0xe38: 0x29bd, 0xe39: 0x29dd, 0xe3a: 0x29fd, 0xe3b: 0x0018, - 0xe3c: 0x0008, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, // Block 0x39, offset 0xe40 - 0xe40: 0x2b3d, 0xe41: 0x2b5d, 0xe42: 0x2b7d, 0xe43: 0x2b9d, 0xe44: 0x2bbd, 0xe45: 0x2bdd, - 0xe46: 0x2bdd, 0xe47: 0x2bdd, 0xe48: 0x2bfd, 0xe49: 0x2bfd, 0xe4a: 0x2bfd, 0xe4b: 0x2bfd, - 0xe4c: 0x2c1d, 0xe4d: 0x2c1d, 0xe4e: 0x2c1d, 0xe4f: 0x2c3d, 0xe50: 0x2c5d, 0xe51: 0x2c5d, - 0xe52: 0x2a7d, 0xe53: 0x2a7d, 0xe54: 0x2c5d, 0xe55: 0x2c5d, 0xe56: 0x2c7d, 0xe57: 0x2c7d, - 0xe58: 0x2c5d, 0xe59: 0x2c5d, 0xe5a: 0x2a7d, 0xe5b: 0x2a7d, 0xe5c: 0x2c5d, 0xe5d: 0x2c5d, - 0xe5e: 0x2c3d, 0xe5f: 0x2c3d, 0xe60: 0x2c9d, 0xe61: 0x2c9d, 0xe62: 0x2cbd, 0xe63: 0x2cbd, - 0xe64: 0x0040, 0xe65: 0x2cdd, 0xe66: 0x2cfd, 0xe67: 0x2d1d, 0xe68: 0x2d1d, 0xe69: 0x2d3d, - 0xe6a: 0x2d5d, 0xe6b: 0x2d7d, 0xe6c: 0x2d9d, 0xe6d: 0x2dbd, 0xe6e: 0x2ddd, 0xe6f: 0x2dfd, - 0xe70: 0x2e1d, 0xe71: 0x2e3d, 0xe72: 0x2e3d, 0xe73: 0x2e5d, 0xe74: 0x2e7d, 0xe75: 0x2e7d, - 0xe76: 0x2e9d, 0xe77: 0x2ebd, 0xe78: 0x2e5d, 0xe79: 0x2edd, 0xe7a: 0x2efd, 0xe7b: 0x2edd, - 0xe7c: 0x2e5d, 0xe7d: 0x2f1d, 0xe7e: 0x2f3d, 0xe7f: 0x2f5d, + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, // Block 0x3a, offset 0xe80 - 0xe80: 0x2f7d, 0xe81: 0x2f9d, 0xe82: 0x2cfd, 0xe83: 0x2cdd, 0xe84: 0x2fbd, 0xe85: 0x2fdd, - 0xe86: 0x2ffd, 0xe87: 0x301d, 0xe88: 0x303d, 0xe89: 0x305d, 0xe8a: 0x307d, 0xe8b: 0x309d, - 0xe8c: 0x30bd, 0xe8d: 0x30dd, 0xe8e: 0x30fd, 0xe8f: 0x0040, 0xe90: 0x0018, 0xe91: 0x0018, - 0xe92: 0x311d, 0xe93: 0x313d, 0xe94: 0x315d, 0xe95: 0x317d, 0xe96: 0x319d, 0xe97: 0x31bd, - 0xe98: 0x31dd, 0xe99: 0x31fd, 0xe9a: 0x321d, 0xe9b: 0x323d, 0xe9c: 0x315d, 0xe9d: 0x325d, - 0xe9e: 0x327d, 0xe9f: 0x329d, 0xea0: 0x0008, 0xea1: 0x0008, 0xea2: 0x0008, 0xea3: 0x0008, - 0xea4: 0x0008, 0xea5: 0x0008, 0xea6: 0x0008, 0xea7: 0x0008, 0xea8: 0x0008, 0xea9: 0x0008, - 0xeaa: 0x0008, 0xeab: 0x0008, 0xeac: 0x0008, 0xead: 0x0008, 0xeae: 0x0008, 0xeaf: 0x0008, - 0xeb0: 0x0008, 0xeb1: 0x0008, 0xeb2: 0x0008, 0xeb3: 0x0008, 0xeb4: 0x0008, 0xeb5: 0x0008, - 0xeb6: 0x0008, 0xeb7: 0x0008, 0xeb8: 0x0008, 0xeb9: 0x0008, 0xeba: 0x0008, 0xebb: 0x0040, - 0xebc: 0x0040, 0xebd: 0x0040, 0xebe: 0x0040, 0xebf: 0x0040, + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, // Block 0x3b, offset 0xec0 - 0xec0: 0x36a2, 0xec1: 0x36d2, 0xec2: 0x3702, 0xec3: 0x3732, 0xec4: 0x32bd, 0xec5: 0x32dd, - 0xec6: 0x32fd, 0xec7: 0x331d, 0xec8: 0x0018, 0xec9: 0x0018, 0xeca: 0x0018, 0xecb: 0x0018, - 0xecc: 0x0018, 0xecd: 0x0018, 0xece: 0x0018, 0xecf: 0x0018, 0xed0: 0x333d, 0xed1: 0x3761, - 0xed2: 0x3779, 0xed3: 0x3791, 0xed4: 0x37a9, 0xed5: 0x37c1, 0xed6: 0x37d9, 0xed7: 0x37f1, - 0xed8: 0x3809, 0xed9: 0x3821, 0xeda: 0x3839, 0xedb: 0x3851, 0xedc: 0x3869, 0xedd: 0x3881, - 0xede: 0x3899, 0xedf: 0x38b1, 0xee0: 0x335d, 0xee1: 0x337d, 0xee2: 0x339d, 0xee3: 0x33bd, - 0xee4: 0x33dd, 0xee5: 0x33dd, 0xee6: 0x33fd, 0xee7: 0x341d, 0xee8: 0x343d, 0xee9: 0x345d, - 0xeea: 0x347d, 0xeeb: 0x349d, 0xeec: 0x34bd, 0xeed: 0x34dd, 0xeee: 0x34fd, 0xeef: 0x351d, - 0xef0: 0x353d, 0xef1: 0x355d, 0xef2: 0x357d, 0xef3: 0x359d, 0xef4: 0x35bd, 0xef5: 0x35dd, - 0xef6: 0x35fd, 0xef7: 0x361d, 0xef8: 0x363d, 0xef9: 0x365d, 0xefa: 0x367d, 0xefb: 0x369d, - 0xefc: 0x38c9, 0xefd: 0x3901, 0xefe: 0x36bd, 0xeff: 0x0018, + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, // Block 0x3c, offset 0xf00 - 0xf00: 0x36dd, 0xf01: 0x36fd, 0xf02: 0x371d, 0xf03: 0x373d, 0xf04: 0x375d, 0xf05: 0x377d, - 0xf06: 0x379d, 0xf07: 0x37bd, 0xf08: 0x37dd, 0xf09: 0x37fd, 0xf0a: 0x381d, 0xf0b: 0x383d, - 0xf0c: 0x385d, 0xf0d: 0x387d, 0xf0e: 0x389d, 0xf0f: 0x38bd, 0xf10: 0x38dd, 0xf11: 0x38fd, - 0xf12: 0x391d, 0xf13: 0x393d, 0xf14: 0x395d, 0xf15: 0x397d, 0xf16: 0x399d, 0xf17: 0x39bd, - 0xf18: 0x39dd, 0xf19: 0x39fd, 0xf1a: 0x3a1d, 0xf1b: 0x3a3d, 0xf1c: 0x3a5d, 0xf1d: 0x3a7d, - 0xf1e: 0x3a9d, 0xf1f: 0x3abd, 0xf20: 0x3add, 0xf21: 0x3afd, 0xf22: 0x3b1d, 0xf23: 0x3b3d, - 0xf24: 0x3b5d, 0xf25: 0x3b7d, 0xf26: 0x127d, 0xf27: 0x3b9d, 0xf28: 0x3bbd, 0xf29: 0x3bdd, - 0xf2a: 0x3bfd, 0xf2b: 0x3c1d, 0xf2c: 0x3c3d, 0xf2d: 0x3c5d, 0xf2e: 0x239d, 0xf2f: 0x3c7d, - 0xf30: 0x3c9d, 0xf31: 0x3939, 0xf32: 0x3951, 0xf33: 0x3969, 0xf34: 0x3981, 0xf35: 0x3999, - 0xf36: 0x39b1, 0xf37: 0x39c9, 0xf38: 0x39e1, 0xf39: 0x39f9, 0xf3a: 0x3a11, 0xf3b: 0x3a29, - 0xf3c: 0x3a41, 0xf3d: 0x3a59, 0xf3e: 0x3a71, 0xf3f: 0x3a89, + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, // Block 0x3d, offset 0xf40 - 0xf40: 0x3aa1, 0xf41: 0x3ac9, 0xf42: 0x3af1, 0xf43: 0x3b19, 0xf44: 0x3b41, 0xf45: 0x3b69, - 0xf46: 0x3b91, 0xf47: 0x3bb9, 0xf48: 0x3be1, 0xf49: 0x3c09, 0xf4a: 0x3c39, 0xf4b: 0x3c69, - 0xf4c: 0x3c99, 0xf4d: 0x3cbd, 0xf4e: 0x3cb1, 0xf4f: 0x3cdd, 0xf50: 0x3cfd, 0xf51: 0x3d15, - 0xf52: 0x3d2d, 0xf53: 0x3d45, 0xf54: 0x3d5d, 0xf55: 0x3d5d, 0xf56: 0x3d45, 0xf57: 0x3d75, - 0xf58: 0x07bd, 0xf59: 0x3d8d, 0xf5a: 0x3da5, 0xf5b: 0x3dbd, 0xf5c: 0x3dd5, 0xf5d: 0x3ded, - 0xf5e: 0x3e05, 0xf5f: 0x3e1d, 0xf60: 0x3e35, 0xf61: 0x3e4d, 0xf62: 0x3e65, 0xf63: 0x3e7d, - 0xf64: 0x3e95, 0xf65: 0x3e95, 0xf66: 0x3ead, 0xf67: 0x3ead, 0xf68: 0x3ec5, 0xf69: 0x3ec5, - 0xf6a: 0x3edd, 0xf6b: 0x3ef5, 0xf6c: 0x3f0d, 0xf6d: 0x3f25, 0xf6e: 0x3f3d, 0xf6f: 0x3f3d, - 0xf70: 0x3f55, 0xf71: 0x3f55, 0xf72: 0x3f55, 0xf73: 0x3f6d, 0xf74: 0x3f85, 0xf75: 0x3f9d, - 0xf76: 0x3fb5, 0xf77: 0x3f9d, 0xf78: 0x3fcd, 0xf79: 0x3fe5, 0xf7a: 0x3f6d, 0xf7b: 0x3ffd, - 0xf7c: 0x4015, 0xf7d: 0x4015, 0xf7e: 0x4015, 0xf7f: 0x0040, + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, // Block 0x3e, offset 0xf80 - 0xf80: 0x3cc9, 0xf81: 0x3d31, 0xf82: 0x3d99, 0xf83: 0x3e01, 0xf84: 0x3e51, 0xf85: 0x3eb9, - 0xf86: 0x3f09, 0xf87: 0x3f59, 0xf88: 0x3fd9, 0xf89: 0x4041, 0xf8a: 0x4091, 0xf8b: 0x40e1, - 0xf8c: 0x4131, 0xf8d: 0x4199, 0xf8e: 0x4201, 0xf8f: 0x4251, 0xf90: 0x42a1, 0xf91: 0x42d9, - 0xf92: 0x4329, 0xf93: 0x4391, 0xf94: 0x43f9, 0xf95: 0x4431, 0xf96: 0x44b1, 0xf97: 0x4549, - 0xf98: 0x45c9, 0xf99: 0x4619, 0xf9a: 0x4699, 0xf9b: 0x4719, 0xf9c: 0x4781, 0xf9d: 0x47d1, - 0xf9e: 0x4821, 0xf9f: 0x4871, 0xfa0: 0x48d9, 0xfa1: 0x4959, 0xfa2: 0x49c1, 0xfa3: 0x4a11, - 0xfa4: 0x4a61, 0xfa5: 0x4ab1, 0xfa6: 0x4ae9, 0xfa7: 0x4b21, 0xfa8: 0x4b59, 0xfa9: 0x4b91, - 0xfaa: 0x4be1, 0xfab: 0x4c31, 0xfac: 0x4cb1, 0xfad: 0x4d01, 0xfae: 0x4d69, 0xfaf: 0x4de9, - 0xfb0: 0x4e39, 0xfb1: 0x4e71, 0xfb2: 0x4ea9, 0xfb3: 0x4f29, 0xfb4: 0x4f91, 0xfb5: 0x5011, - 0xfb6: 0x5061, 0xfb7: 0x50e1, 0xfb8: 0x5119, 0xfb9: 0x5169, 0xfba: 0x51b9, 0xfbb: 0x5209, - 0xfbc: 0x5259, 0xfbd: 0x52a9, 0xfbe: 0x5311, 0xfbf: 0x5361, + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, // Block 0x3f, offset 0xfc0 - 0xfc0: 0x5399, 0xfc1: 0x53e9, 0xfc2: 0x5439, 0xfc3: 0x5489, 0xfc4: 0x54f1, 0xfc5: 0x5541, - 0xfc6: 0x5591, 0xfc7: 0x55e1, 0xfc8: 0x5661, 0xfc9: 0x56c9, 0xfca: 0x5701, 0xfcb: 0x5781, - 0xfcc: 0x57b9, 0xfcd: 0x5821, 0xfce: 0x5889, 0xfcf: 0x58d9, 0xfd0: 0x5929, 0xfd1: 0x5979, - 0xfd2: 0x59e1, 0xfd3: 0x5a19, 0xfd4: 0x5a69, 0xfd5: 0x5ad1, 0xfd6: 0x5b09, 0xfd7: 0x5b89, - 0xfd8: 0x5bd9, 0xfd9: 0x5c01, 0xfda: 0x5c29, 0xfdb: 0x5c51, 0xfdc: 0x5c79, 0xfdd: 0x5ca1, - 0xfde: 0x5cc9, 0xfdf: 0x5cf1, 0xfe0: 0x5d19, 0xfe1: 0x5d41, 0xfe2: 0x5d69, 0xfe3: 0x5d99, - 0xfe4: 0x5dc9, 0xfe5: 0x5df9, 0xfe6: 0x5e29, 0xfe7: 0x5e59, 0xfe8: 0x5e89, 0xfe9: 0x5eb9, - 0xfea: 0x5ee9, 0xfeb: 0x5f19, 0xfec: 0x5f49, 0xfed: 0x5f79, 0xfee: 0x5fa9, 0xfef: 0x5fd9, - 0xff0: 0x6009, 0xff1: 0x402d, 0xff2: 0x6039, 0xff3: 0x6051, 0xff4: 0x404d, 0xff5: 0x6069, - 0xff6: 0x6081, 0xff7: 0x6099, 0xff8: 0x406d, 0xff9: 0x406d, 0xffa: 0x60b1, 0xffb: 0x60c9, - 0xffc: 0x6101, 0xffd: 0x6139, 0xffe: 0x6171, 0xfff: 0x61a9, + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, // Block 0x40, offset 0x1000 - 0x1000: 0x6211, 0x1001: 0x6229, 0x1002: 0x408d, 0x1003: 0x6241, 0x1004: 0x6259, 0x1005: 0x6271, - 0x1006: 0x6289, 0x1007: 0x62a1, 0x1008: 0x40ad, 0x1009: 0x62b9, 0x100a: 0x62e1, 0x100b: 0x62f9, - 0x100c: 0x40cd, 0x100d: 0x40cd, 0x100e: 0x6311, 0x100f: 0x6329, 0x1010: 0x6341, 0x1011: 0x40ed, - 0x1012: 0x410d, 0x1013: 0x412d, 0x1014: 0x414d, 0x1015: 0x416d, 0x1016: 0x6359, 0x1017: 0x6371, - 0x1018: 0x6389, 0x1019: 0x63a1, 0x101a: 0x63b9, 0x101b: 0x418d, 0x101c: 0x63d1, 0x101d: 0x63e9, - 0x101e: 0x6401, 0x101f: 0x41ad, 0x1020: 0x41cd, 0x1021: 0x6419, 0x1022: 0x41ed, 0x1023: 0x420d, - 0x1024: 0x422d, 0x1025: 0x6431, 0x1026: 0x424d, 0x1027: 0x6449, 0x1028: 0x6479, 0x1029: 0x6211, - 0x102a: 0x426d, 0x102b: 0x428d, 0x102c: 0x42ad, 0x102d: 0x42cd, 0x102e: 0x64b1, 0x102f: 0x64f1, - 0x1030: 0x6539, 0x1031: 0x6551, 0x1032: 0x42ed, 0x1033: 0x6569, 0x1034: 0x6581, 0x1035: 0x6599, - 0x1036: 0x430d, 0x1037: 0x65b1, 0x1038: 0x65c9, 0x1039: 0x65b1, 0x103a: 0x65e1, 0x103b: 0x65f9, - 0x103c: 0x432d, 0x103d: 0x6611, 0x103e: 0x6629, 0x103f: 0x6611, + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, // Block 0x41, offset 0x1040 - 0x1040: 0x434d, 0x1041: 0x436d, 0x1042: 0x0040, 0x1043: 0x6641, 0x1044: 0x6659, 0x1045: 0x6671, - 0x1046: 0x6689, 0x1047: 0x0040, 0x1048: 0x66c1, 0x1049: 0x66d9, 0x104a: 0x66f1, 0x104b: 0x6709, - 0x104c: 0x6721, 0x104d: 0x6739, 0x104e: 0x6401, 0x104f: 0x6751, 0x1050: 0x6769, 0x1051: 0x6781, - 0x1052: 0x438d, 0x1053: 0x6799, 0x1054: 0x6289, 0x1055: 0x43ad, 0x1056: 0x43cd, 0x1057: 0x67b1, - 0x1058: 0x0040, 0x1059: 0x43ed, 0x105a: 0x67c9, 0x105b: 0x67e1, 0x105c: 0x67f9, 0x105d: 0x6811, - 0x105e: 0x6829, 0x105f: 0x6859, 0x1060: 0x6889, 0x1061: 0x68b1, 0x1062: 0x68d9, 0x1063: 0x6901, - 0x1064: 0x6929, 0x1065: 0x6951, 0x1066: 0x6979, 0x1067: 0x69a1, 0x1068: 0x69c9, 0x1069: 0x69f1, - 0x106a: 0x6a21, 0x106b: 0x6a51, 0x106c: 0x6a81, 0x106d: 0x6ab1, 0x106e: 0x6ae1, 0x106f: 0x6b11, - 0x1070: 0x6b41, 0x1071: 0x6b71, 0x1072: 0x6ba1, 0x1073: 0x6bd1, 0x1074: 0x6c01, 0x1075: 0x6c31, - 0x1076: 0x6c61, 0x1077: 0x6c91, 0x1078: 0x6cc1, 0x1079: 0x6cf1, 0x107a: 0x6d21, 0x107b: 0x6d51, - 0x107c: 0x6d81, 0x107d: 0x6db1, 0x107e: 0x6de1, 0x107f: 0x440d, + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, // Block 0x42, offset 0x1080 - 0x1080: 0xe00d, 0x1081: 0x0008, 0x1082: 0xe00d, 0x1083: 0x0008, 0x1084: 0xe00d, 0x1085: 0x0008, - 0x1086: 0xe00d, 0x1087: 0x0008, 0x1088: 0xe00d, 0x1089: 0x0008, 0x108a: 0xe00d, 0x108b: 0x0008, - 0x108c: 0xe00d, 0x108d: 0x0008, 0x108e: 0xe00d, 0x108f: 0x0008, 0x1090: 0xe00d, 0x1091: 0x0008, - 0x1092: 0xe00d, 0x1093: 0x0008, 0x1094: 0xe00d, 0x1095: 0x0008, 0x1096: 0xe00d, 0x1097: 0x0008, - 0x1098: 0xe00d, 0x1099: 0x0008, 0x109a: 0xe00d, 0x109b: 0x0008, 0x109c: 0xe00d, 0x109d: 0x0008, - 0x109e: 0xe00d, 0x109f: 0x0008, 0x10a0: 0xe00d, 0x10a1: 0x0008, 0x10a2: 0xe00d, 0x10a3: 0x0008, - 0x10a4: 0xe00d, 0x10a5: 0x0008, 0x10a6: 0xe00d, 0x10a7: 0x0008, 0x10a8: 0xe00d, 0x10a9: 0x0008, - 0x10aa: 0xe00d, 0x10ab: 0x0008, 0x10ac: 0xe00d, 0x10ad: 0x0008, 0x10ae: 0x0008, 0x10af: 0x1308, - 0x10b0: 0x1318, 0x10b1: 0x1318, 0x10b2: 0x1318, 0x10b3: 0x0018, 0x10b4: 0x1308, 0x10b5: 0x1308, - 0x10b6: 0x1308, 0x10b7: 0x1308, 0x10b8: 0x1308, 0x10b9: 0x1308, 0x10ba: 0x1308, 0x10bb: 0x1308, - 0x10bc: 0x1308, 0x10bd: 0x1308, 0x10be: 0x0018, 0x10bf: 0x0008, + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, // Block 0x43, offset 0x10c0 - 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, - 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, - 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, - 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, - 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0x0ea1, 0x10dd: 0x6e11, - 0x10de: 0x1308, 0x10df: 0x1308, 0x10e0: 0x0008, 0x10e1: 0x0008, 0x10e2: 0x0008, 0x10e3: 0x0008, - 0x10e4: 0x0008, 0x10e5: 0x0008, 0x10e6: 0x0008, 0x10e7: 0x0008, 0x10e8: 0x0008, 0x10e9: 0x0008, - 0x10ea: 0x0008, 0x10eb: 0x0008, 0x10ec: 0x0008, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x0008, - 0x10f0: 0x0008, 0x10f1: 0x0008, 0x10f2: 0x0008, 0x10f3: 0x0008, 0x10f4: 0x0008, 0x10f5: 0x0008, - 0x10f6: 0x0008, 0x10f7: 0x0008, 0x10f8: 0x0008, 0x10f9: 0x0008, 0x10fa: 0x0008, 0x10fb: 0x0008, - 0x10fc: 0x0008, 0x10fd: 0x0008, 0x10fe: 0x0008, 0x10ff: 0x0008, + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, // Block 0x44, offset 0x1100 - 0x1100: 0x0018, 0x1101: 0x0018, 0x1102: 0x0018, 0x1103: 0x0018, 0x1104: 0x0018, 0x1105: 0x0018, - 0x1106: 0x0018, 0x1107: 0x0018, 0x1108: 0x0018, 0x1109: 0x0018, 0x110a: 0x0018, 0x110b: 0x0018, - 0x110c: 0x0018, 0x110d: 0x0018, 0x110e: 0x0018, 0x110f: 0x0018, 0x1110: 0x0018, 0x1111: 0x0018, - 0x1112: 0x0018, 0x1113: 0x0018, 0x1114: 0x0018, 0x1115: 0x0018, 0x1116: 0x0018, 0x1117: 0x0008, - 0x1118: 0x0008, 0x1119: 0x0008, 0x111a: 0x0008, 0x111b: 0x0008, 0x111c: 0x0008, 0x111d: 0x0008, - 0x111e: 0x0008, 0x111f: 0x0008, 0x1120: 0x0018, 0x1121: 0x0018, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, - 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0xe00d, 0x112f: 0x0008, - 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0xe00d, 0x1133: 0x0008, 0x1134: 0xe00d, 0x1135: 0x0008, - 0x1136: 0xe00d, 0x1137: 0x0008, 0x1138: 0xe00d, 0x1139: 0x0008, 0x113a: 0xe00d, 0x113b: 0x0008, - 0x113c: 0xe00d, 0x113d: 0x0008, 0x113e: 0xe00d, 0x113f: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, // Block 0x45, offset 0x1140 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, - 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0xe00d, 0x115d: 0x0008, - 0x115e: 0xe00d, 0x115f: 0x0008, 0x1160: 0xe00d, 0x1161: 0x0008, 0x1162: 0xe00d, 0x1163: 0x0008, - 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, - 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, - 0x1170: 0xe0fd, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, - 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0xe01d, 0x117a: 0x0008, 0x117b: 0xe03d, - 0x117c: 0x0008, 0x117d: 0x442d, 0x117e: 0xe00d, 0x117f: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, // Block 0x46, offset 0x1180 - 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, - 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0x0008, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0xe03d, - 0x118c: 0x0008, 0x118d: 0x11d9, 0x118e: 0x0008, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, - 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0x0008, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, - 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, - 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, - 0x11aa: 0x6e29, 0x11ab: 0x1029, 0x11ac: 0x11c1, 0x11ad: 0x6e41, 0x11ae: 0x1221, 0x11af: 0x0040, - 0x11b0: 0x6e59, 0x11b1: 0x6e71, 0x11b2: 0x1239, 0x11b3: 0x444d, 0x11b4: 0xe00d, 0x11b5: 0x0008, - 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0x0040, 0x11b9: 0x0040, 0x11ba: 0x0040, 0x11bb: 0x0040, - 0x11bc: 0x0040, 0x11bd: 0x0040, 0x11be: 0x0040, 0x11bf: 0x0040, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, // Block 0x47, offset 0x11c0 - 0x11c0: 0x64d5, 0x11c1: 0x64f5, 0x11c2: 0x6515, 0x11c3: 0x6535, 0x11c4: 0x6555, 0x11c5: 0x6575, - 0x11c6: 0x6595, 0x11c7: 0x65b5, 0x11c8: 0x65d5, 0x11c9: 0x65f5, 0x11ca: 0x6615, 0x11cb: 0x6635, - 0x11cc: 0x6655, 0x11cd: 0x6675, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0x6695, 0x11d1: 0x0008, - 0x11d2: 0x66b5, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x66d5, 0x11d6: 0x66f5, 0x11d7: 0x6715, - 0x11d8: 0x6735, 0x11d9: 0x6755, 0x11da: 0x6775, 0x11db: 0x6795, 0x11dc: 0x67b5, 0x11dd: 0x67d5, - 0x11de: 0x67f5, 0x11df: 0x0008, 0x11e0: 0x6815, 0x11e1: 0x0008, 0x11e2: 0x6835, 0x11e3: 0x0008, - 0x11e4: 0x0008, 0x11e5: 0x6855, 0x11e6: 0x6875, 0x11e7: 0x0008, 0x11e8: 0x0008, 0x11e9: 0x0008, - 0x11ea: 0x6895, 0x11eb: 0x68b5, 0x11ec: 0x68d5, 0x11ed: 0x68f5, 0x11ee: 0x6915, 0x11ef: 0x6935, - 0x11f0: 0x6955, 0x11f1: 0x6975, 0x11f2: 0x6995, 0x11f3: 0x69b5, 0x11f4: 0x69d5, 0x11f5: 0x69f5, - 0x11f6: 0x6a15, 0x11f7: 0x6a35, 0x11f8: 0x6a55, 0x11f9: 0x6a75, 0x11fa: 0x6a95, 0x11fb: 0x6ab5, - 0x11fc: 0x6ad5, 0x11fd: 0x6af5, 0x11fe: 0x6b15, 0x11ff: 0x6b35, + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, // Block 0x48, offset 0x1200 - 0x1200: 0x7a95, 0x1201: 0x7ab5, 0x1202: 0x7ad5, 0x1203: 0x7af5, 0x1204: 0x7b15, 0x1205: 0x7b35, - 0x1206: 0x7b55, 0x1207: 0x7b75, 0x1208: 0x7b95, 0x1209: 0x7bb5, 0x120a: 0x7bd5, 0x120b: 0x7bf5, - 0x120c: 0x7c15, 0x120d: 0x7c35, 0x120e: 0x7c55, 0x120f: 0x6ec9, 0x1210: 0x6ef1, 0x1211: 0x6f19, - 0x1212: 0x7c75, 0x1213: 0x7c95, 0x1214: 0x7cb5, 0x1215: 0x6f41, 0x1216: 0x6f69, 0x1217: 0x6f91, - 0x1218: 0x7cd5, 0x1219: 0x7cf5, 0x121a: 0x0040, 0x121b: 0x0040, 0x121c: 0x0040, 0x121d: 0x0040, - 0x121e: 0x0040, 0x121f: 0x0040, 0x1220: 0x0040, 0x1221: 0x0040, 0x1222: 0x0040, 0x1223: 0x0040, - 0x1224: 0x0040, 0x1225: 0x0040, 0x1226: 0x0040, 0x1227: 0x0040, 0x1228: 0x0040, 0x1229: 0x0040, - 0x122a: 0x0040, 0x122b: 0x0040, 0x122c: 0x0040, 0x122d: 0x0040, 0x122e: 0x0040, 0x122f: 0x0040, - 0x1230: 0x0040, 0x1231: 0x0040, 0x1232: 0x0040, 0x1233: 0x0040, 0x1234: 0x0040, 0x1235: 0x0040, - 0x1236: 0x0040, 0x1237: 0x0040, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, // Block 0x49, offset 0x1240 - 0x1240: 0x6fb9, 0x1241: 0x6fd1, 0x1242: 0x6fe9, 0x1243: 0x7d15, 0x1244: 0x7d35, 0x1245: 0x7001, - 0x1246: 0x7001, 0x1247: 0x0040, 0x1248: 0x0040, 0x1249: 0x0040, 0x124a: 0x0040, 0x124b: 0x0040, - 0x124c: 0x0040, 0x124d: 0x0040, 0x124e: 0x0040, 0x124f: 0x0040, 0x1250: 0x0040, 0x1251: 0x0040, - 0x1252: 0x0040, 0x1253: 0x7019, 0x1254: 0x7041, 0x1255: 0x7069, 0x1256: 0x7091, 0x1257: 0x70b9, - 0x1258: 0x0040, 0x1259: 0x0040, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x70e1, - 0x125e: 0x1308, 0x125f: 0x7109, 0x1260: 0x7131, 0x1261: 0x20a9, 0x1262: 0x20f1, 0x1263: 0x7149, - 0x1264: 0x7161, 0x1265: 0x7179, 0x1266: 0x7191, 0x1267: 0x71a9, 0x1268: 0x71c1, 0x1269: 0x1fb2, - 0x126a: 0x71d9, 0x126b: 0x7201, 0x126c: 0x7229, 0x126d: 0x7261, 0x126e: 0x7299, 0x126f: 0x72c1, - 0x1270: 0x72e9, 0x1271: 0x7311, 0x1272: 0x7339, 0x1273: 0x7361, 0x1274: 0x7389, 0x1275: 0x73b1, - 0x1276: 0x73d9, 0x1277: 0x0040, 0x1278: 0x7401, 0x1279: 0x7429, 0x127a: 0x7451, 0x127b: 0x7479, - 0x127c: 0x74a1, 0x127d: 0x0040, 0x127e: 0x74c9, 0x127f: 0x0040, + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, // Block 0x4a, offset 0x1280 - 0x1280: 0x74f1, 0x1281: 0x7519, 0x1282: 0x0040, 0x1283: 0x7541, 0x1284: 0x7569, 0x1285: 0x0040, - 0x1286: 0x7591, 0x1287: 0x75b9, 0x1288: 0x75e1, 0x1289: 0x7609, 0x128a: 0x7631, 0x128b: 0x7659, - 0x128c: 0x7681, 0x128d: 0x76a9, 0x128e: 0x76d1, 0x128f: 0x76f9, 0x1290: 0x7721, 0x1291: 0x7721, - 0x1292: 0x7739, 0x1293: 0x7739, 0x1294: 0x7739, 0x1295: 0x7739, 0x1296: 0x7751, 0x1297: 0x7751, - 0x1298: 0x7751, 0x1299: 0x7751, 0x129a: 0x7769, 0x129b: 0x7769, 0x129c: 0x7769, 0x129d: 0x7769, - 0x129e: 0x7781, 0x129f: 0x7781, 0x12a0: 0x7781, 0x12a1: 0x7781, 0x12a2: 0x7799, 0x12a3: 0x7799, - 0x12a4: 0x7799, 0x12a5: 0x7799, 0x12a6: 0x77b1, 0x12a7: 0x77b1, 0x12a8: 0x77b1, 0x12a9: 0x77b1, - 0x12aa: 0x77c9, 0x12ab: 0x77c9, 0x12ac: 0x77c9, 0x12ad: 0x77c9, 0x12ae: 0x77e1, 0x12af: 0x77e1, - 0x12b0: 0x77e1, 0x12b1: 0x77e1, 0x12b2: 0x77f9, 0x12b3: 0x77f9, 0x12b4: 0x77f9, 0x12b5: 0x77f9, - 0x12b6: 0x7811, 0x12b7: 0x7811, 0x12b8: 0x7811, 0x12b9: 0x7811, 0x12ba: 0x7829, 0x12bb: 0x7829, - 0x12bc: 0x7829, 0x12bd: 0x7829, 0x12be: 0x7841, 0x12bf: 0x7841, + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, // Block 0x4b, offset 0x12c0 - 0x12c0: 0x7841, 0x12c1: 0x7841, 0x12c2: 0x7859, 0x12c3: 0x7859, 0x12c4: 0x7871, 0x12c5: 0x7871, - 0x12c6: 0x7889, 0x12c7: 0x7889, 0x12c8: 0x78a1, 0x12c9: 0x78a1, 0x12ca: 0x78b9, 0x12cb: 0x78b9, - 0x12cc: 0x78d1, 0x12cd: 0x78d1, 0x12ce: 0x78e9, 0x12cf: 0x78e9, 0x12d0: 0x78e9, 0x12d1: 0x78e9, - 0x12d2: 0x7901, 0x12d3: 0x7901, 0x12d4: 0x7901, 0x12d5: 0x7901, 0x12d6: 0x7919, 0x12d7: 0x7919, - 0x12d8: 0x7919, 0x12d9: 0x7919, 0x12da: 0x7931, 0x12db: 0x7931, 0x12dc: 0x7931, 0x12dd: 0x7931, - 0x12de: 0x7949, 0x12df: 0x7949, 0x12e0: 0x7961, 0x12e1: 0x7961, 0x12e2: 0x7961, 0x12e3: 0x7961, - 0x12e4: 0x7979, 0x12e5: 0x7979, 0x12e6: 0x7991, 0x12e7: 0x7991, 0x12e8: 0x7991, 0x12e9: 0x7991, - 0x12ea: 0x79a9, 0x12eb: 0x79a9, 0x12ec: 0x79a9, 0x12ed: 0x79a9, 0x12ee: 0x79c1, 0x12ef: 0x79c1, - 0x12f0: 0x79d9, 0x12f1: 0x79d9, 0x12f2: 0x0018, 0x12f3: 0x0018, 0x12f4: 0x0018, 0x12f5: 0x0018, - 0x12f6: 0x0018, 0x12f7: 0x0018, 0x12f8: 0x0018, 0x12f9: 0x0018, 0x12fa: 0x0018, 0x12fb: 0x0018, - 0x12fc: 0x0018, 0x12fd: 0x0018, 0x12fe: 0x0018, 0x12ff: 0x0018, + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, // Block 0x4c, offset 0x1300 - 0x1300: 0x0018, 0x1301: 0x0018, 0x1302: 0x0040, 0x1303: 0x0040, 0x1304: 0x0040, 0x1305: 0x0040, - 0x1306: 0x0040, 0x1307: 0x0040, 0x1308: 0x0040, 0x1309: 0x0040, 0x130a: 0x0040, 0x130b: 0x0040, - 0x130c: 0x0040, 0x130d: 0x0040, 0x130e: 0x0040, 0x130f: 0x0040, 0x1310: 0x0040, 0x1311: 0x0040, - 0x1312: 0x0040, 0x1313: 0x79f1, 0x1314: 0x79f1, 0x1315: 0x79f1, 0x1316: 0x79f1, 0x1317: 0x7a09, - 0x1318: 0x7a09, 0x1319: 0x7a21, 0x131a: 0x7a21, 0x131b: 0x7a39, 0x131c: 0x7a39, 0x131d: 0x0479, - 0x131e: 0x7a51, 0x131f: 0x7a51, 0x1320: 0x7a69, 0x1321: 0x7a69, 0x1322: 0x7a81, 0x1323: 0x7a81, - 0x1324: 0x7a99, 0x1325: 0x7a99, 0x1326: 0x7a99, 0x1327: 0x7a99, 0x1328: 0x7ab1, 0x1329: 0x7ab1, - 0x132a: 0x7ac9, 0x132b: 0x7ac9, 0x132c: 0x7af1, 0x132d: 0x7af1, 0x132e: 0x7b19, 0x132f: 0x7b19, - 0x1330: 0x7b41, 0x1331: 0x7b41, 0x1332: 0x7b69, 0x1333: 0x7b69, 0x1334: 0x7b91, 0x1335: 0x7b91, - 0x1336: 0x7bb9, 0x1337: 0x7bb9, 0x1338: 0x7bb9, 0x1339: 0x7be1, 0x133a: 0x7be1, 0x133b: 0x7be1, - 0x133c: 0x7c09, 0x133d: 0x7c09, 0x133e: 0x7c09, 0x133f: 0x7c09, + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, // Block 0x4d, offset 0x1340 - 0x1340: 0x85f9, 0x1341: 0x8621, 0x1342: 0x8649, 0x1343: 0x8671, 0x1344: 0x8699, 0x1345: 0x86c1, - 0x1346: 0x86e9, 0x1347: 0x8711, 0x1348: 0x8739, 0x1349: 0x8761, 0x134a: 0x8789, 0x134b: 0x87b1, - 0x134c: 0x87d9, 0x134d: 0x8801, 0x134e: 0x8829, 0x134f: 0x8851, 0x1350: 0x8879, 0x1351: 0x88a1, - 0x1352: 0x88c9, 0x1353: 0x88f1, 0x1354: 0x8919, 0x1355: 0x8941, 0x1356: 0x8969, 0x1357: 0x8991, - 0x1358: 0x89b9, 0x1359: 0x89e1, 0x135a: 0x8a09, 0x135b: 0x8a31, 0x135c: 0x8a59, 0x135d: 0x8a81, - 0x135e: 0x8aaa, 0x135f: 0x8ada, 0x1360: 0x8b0a, 0x1361: 0x8b3a, 0x1362: 0x8b6a, 0x1363: 0x8b9a, - 0x1364: 0x8bc9, 0x1365: 0x8bf1, 0x1366: 0x7c71, 0x1367: 0x8c19, 0x1368: 0x7be1, 0x1369: 0x7c99, - 0x136a: 0x8c41, 0x136b: 0x8c69, 0x136c: 0x7d39, 0x136d: 0x8c91, 0x136e: 0x7d61, 0x136f: 0x7d89, - 0x1370: 0x8cb9, 0x1371: 0x8ce1, 0x1372: 0x7e29, 0x1373: 0x8d09, 0x1374: 0x7e51, 0x1375: 0x7e79, - 0x1376: 0x8d31, 0x1377: 0x8d59, 0x1378: 0x7ec9, 0x1379: 0x8d81, 0x137a: 0x7ef1, 0x137b: 0x7f19, - 0x137c: 0x83a1, 0x137d: 0x83c9, 0x137e: 0x8441, 0x137f: 0x8469, + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, // Block 0x4e, offset 0x1380 - 0x1380: 0x8491, 0x1381: 0x8531, 0x1382: 0x8559, 0x1383: 0x8581, 0x1384: 0x85a9, 0x1385: 0x8649, - 0x1386: 0x8671, 0x1387: 0x8699, 0x1388: 0x8da9, 0x1389: 0x8739, 0x138a: 0x8dd1, 0x138b: 0x8df9, - 0x138c: 0x8829, 0x138d: 0x8e21, 0x138e: 0x8851, 0x138f: 0x8879, 0x1390: 0x8a81, 0x1391: 0x8e49, - 0x1392: 0x8e71, 0x1393: 0x89b9, 0x1394: 0x8e99, 0x1395: 0x89e1, 0x1396: 0x8a09, 0x1397: 0x7c21, - 0x1398: 0x7c49, 0x1399: 0x8ec1, 0x139a: 0x7c71, 0x139b: 0x8ee9, 0x139c: 0x7cc1, 0x139d: 0x7ce9, - 0x139e: 0x7d11, 0x139f: 0x7d39, 0x13a0: 0x8f11, 0x13a1: 0x7db1, 0x13a2: 0x7dd9, 0x13a3: 0x7e01, - 0x13a4: 0x7e29, 0x13a5: 0x8f39, 0x13a6: 0x7ec9, 0x13a7: 0x7f41, 0x13a8: 0x7f69, 0x13a9: 0x7f91, - 0x13aa: 0x7fb9, 0x13ab: 0x7fe1, 0x13ac: 0x8031, 0x13ad: 0x8059, 0x13ae: 0x8081, 0x13af: 0x80a9, - 0x13b0: 0x80d1, 0x13b1: 0x80f9, 0x13b2: 0x8f61, 0x13b3: 0x8121, 0x13b4: 0x8149, 0x13b5: 0x8171, - 0x13b6: 0x8199, 0x13b7: 0x81c1, 0x13b8: 0x81e9, 0x13b9: 0x8239, 0x13ba: 0x8261, 0x13bb: 0x8289, - 0x13bc: 0x82b1, 0x13bd: 0x82d9, 0x13be: 0x8301, 0x13bf: 0x8329, + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, // Block 0x4f, offset 0x13c0 - 0x13c0: 0x8351, 0x13c1: 0x8379, 0x13c2: 0x83f1, 0x13c3: 0x8419, 0x13c4: 0x84b9, 0x13c5: 0x84e1, - 0x13c6: 0x8509, 0x13c7: 0x8531, 0x13c8: 0x8559, 0x13c9: 0x85d1, 0x13ca: 0x85f9, 0x13cb: 0x8621, - 0x13cc: 0x8649, 0x13cd: 0x8f89, 0x13ce: 0x86c1, 0x13cf: 0x86e9, 0x13d0: 0x8711, 0x13d1: 0x8739, - 0x13d2: 0x87b1, 0x13d3: 0x87d9, 0x13d4: 0x8801, 0x13d5: 0x8829, 0x13d6: 0x8fb1, 0x13d7: 0x88a1, - 0x13d8: 0x88c9, 0x13d9: 0x8fd9, 0x13da: 0x8941, 0x13db: 0x8969, 0x13dc: 0x8991, 0x13dd: 0x89b9, - 0x13de: 0x9001, 0x13df: 0x7c71, 0x13e0: 0x8ee9, 0x13e1: 0x7d39, 0x13e2: 0x8f11, 0x13e3: 0x7e29, - 0x13e4: 0x8f39, 0x13e5: 0x7ec9, 0x13e6: 0x9029, 0x13e7: 0x80d1, 0x13e8: 0x9051, 0x13e9: 0x9079, - 0x13ea: 0x90a1, 0x13eb: 0x8531, 0x13ec: 0x8559, 0x13ed: 0x8649, 0x13ee: 0x8829, 0x13ef: 0x8fb1, - 0x13f0: 0x89b9, 0x13f1: 0x9001, 0x13f2: 0x90c9, 0x13f3: 0x9101, 0x13f4: 0x9139, 0x13f5: 0x9171, - 0x13f6: 0x9199, 0x13f7: 0x91c1, 0x13f8: 0x91e9, 0x13f9: 0x9211, 0x13fa: 0x9239, 0x13fb: 0x9261, - 0x13fc: 0x9289, 0x13fd: 0x92b1, 0x13fe: 0x92d9, 0x13ff: 0x9301, + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, // Block 0x50, offset 0x1400 - 0x1400: 0x9329, 0x1401: 0x9351, 0x1402: 0x9379, 0x1403: 0x93a1, 0x1404: 0x93c9, 0x1405: 0x93f1, - 0x1406: 0x9419, 0x1407: 0x9441, 0x1408: 0x9469, 0x1409: 0x9491, 0x140a: 0x94b9, 0x140b: 0x94e1, - 0x140c: 0x9079, 0x140d: 0x9509, 0x140e: 0x9531, 0x140f: 0x9559, 0x1410: 0x9581, 0x1411: 0x9171, - 0x1412: 0x9199, 0x1413: 0x91c1, 0x1414: 0x91e9, 0x1415: 0x9211, 0x1416: 0x9239, 0x1417: 0x9261, - 0x1418: 0x9289, 0x1419: 0x92b1, 0x141a: 0x92d9, 0x141b: 0x9301, 0x141c: 0x9329, 0x141d: 0x9351, - 0x141e: 0x9379, 0x141f: 0x93a1, 0x1420: 0x93c9, 0x1421: 0x93f1, 0x1422: 0x9419, 0x1423: 0x9441, - 0x1424: 0x9469, 0x1425: 0x9491, 0x1426: 0x94b9, 0x1427: 0x94e1, 0x1428: 0x9079, 0x1429: 0x9509, - 0x142a: 0x9531, 0x142b: 0x9559, 0x142c: 0x9581, 0x142d: 0x9491, 0x142e: 0x94b9, 0x142f: 0x94e1, - 0x1430: 0x9079, 0x1431: 0x9051, 0x1432: 0x90a1, 0x1433: 0x8211, 0x1434: 0x8059, 0x1435: 0x8081, - 0x1436: 0x80a9, 0x1437: 0x9491, 0x1438: 0x94b9, 0x1439: 0x94e1, 0x143a: 0x8211, 0x143b: 0x8239, - 0x143c: 0x95a9, 0x143d: 0x95a9, 0x143e: 0x0018, 0x143f: 0x0018, + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, // Block 0x51, offset 0x1440 - 0x1440: 0x0040, 0x1441: 0x0040, 0x1442: 0x0040, 0x1443: 0x0040, 0x1444: 0x0040, 0x1445: 0x0040, - 0x1446: 0x0040, 0x1447: 0x0040, 0x1448: 0x0040, 0x1449: 0x0040, 0x144a: 0x0040, 0x144b: 0x0040, - 0x144c: 0x0040, 0x144d: 0x0040, 0x144e: 0x0040, 0x144f: 0x0040, 0x1450: 0x95d1, 0x1451: 0x9609, - 0x1452: 0x9609, 0x1453: 0x9641, 0x1454: 0x9679, 0x1455: 0x96b1, 0x1456: 0x96e9, 0x1457: 0x9721, - 0x1458: 0x9759, 0x1459: 0x9759, 0x145a: 0x9791, 0x145b: 0x97c9, 0x145c: 0x9801, 0x145d: 0x9839, - 0x145e: 0x9871, 0x145f: 0x98a9, 0x1460: 0x98a9, 0x1461: 0x98e1, 0x1462: 0x9919, 0x1463: 0x9919, - 0x1464: 0x9951, 0x1465: 0x9951, 0x1466: 0x9989, 0x1467: 0x99c1, 0x1468: 0x99c1, 0x1469: 0x99f9, - 0x146a: 0x9a31, 0x146b: 0x9a31, 0x146c: 0x9a69, 0x146d: 0x9a69, 0x146e: 0x9aa1, 0x146f: 0x9ad9, - 0x1470: 0x9ad9, 0x1471: 0x9b11, 0x1472: 0x9b11, 0x1473: 0x9b49, 0x1474: 0x9b81, 0x1475: 0x9bb9, - 0x1476: 0x9bf1, 0x1477: 0x9bf1, 0x1478: 0x9c29, 0x1479: 0x9c61, 0x147a: 0x9c99, 0x147b: 0x9cd1, - 0x147c: 0x9d09, 0x147d: 0x9d09, 0x147e: 0x9d41, 0x147f: 0x9d79, + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, // Block 0x52, offset 0x1480 - 0x1480: 0xa949, 0x1481: 0xa981, 0x1482: 0xa9b9, 0x1483: 0xa8a1, 0x1484: 0x9bb9, 0x1485: 0x9989, - 0x1486: 0xa9f1, 0x1487: 0xaa29, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040, - 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x0040, 0x1491: 0x0040, - 0x1492: 0x0040, 0x1493: 0x0040, 0x1494: 0x0040, 0x1495: 0x0040, 0x1496: 0x0040, 0x1497: 0x0040, - 0x1498: 0x0040, 0x1499: 0x0040, 0x149a: 0x0040, 0x149b: 0x0040, 0x149c: 0x0040, 0x149d: 0x0040, - 0x149e: 0x0040, 0x149f: 0x0040, 0x14a0: 0x0040, 0x14a1: 0x0040, 0x14a2: 0x0040, 0x14a3: 0x0040, - 0x14a4: 0x0040, 0x14a5: 0x0040, 0x14a6: 0x0040, 0x14a7: 0x0040, 0x14a8: 0x0040, 0x14a9: 0x0040, - 0x14aa: 0x0040, 0x14ab: 0x0040, 0x14ac: 0x0040, 0x14ad: 0x0040, 0x14ae: 0x0040, 0x14af: 0x0040, - 0x14b0: 0xaa61, 0x14b1: 0xaa99, 0x14b2: 0xaad1, 0x14b3: 0xab19, 0x14b4: 0xab61, 0x14b5: 0xaba9, - 0x14b6: 0xabf1, 0x14b7: 0xac39, 0x14b8: 0xac81, 0x14b9: 0xacc9, 0x14ba: 0xad02, 0x14bb: 0xae12, - 0x14bc: 0xae91, 0x14bd: 0x0018, 0x14be: 0x0040, 0x14bf: 0x0040, + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, // Block 0x53, offset 0x14c0 - 0x14c0: 0x13c0, 0x14c1: 0x13c0, 0x14c2: 0x13c0, 0x14c3: 0x13c0, 0x14c4: 0x13c0, 0x14c5: 0x13c0, - 0x14c6: 0x13c0, 0x14c7: 0x13c0, 0x14c8: 0x13c0, 0x14c9: 0x13c0, 0x14ca: 0x13c0, 0x14cb: 0x13c0, - 0x14cc: 0x13c0, 0x14cd: 0x13c0, 0x14ce: 0x13c0, 0x14cf: 0x13c0, 0x14d0: 0xaeda, 0x14d1: 0x7d55, - 0x14d2: 0x0040, 0x14d3: 0xaeea, 0x14d4: 0x03c2, 0x14d5: 0xaefa, 0x14d6: 0xaf0a, 0x14d7: 0x7d75, - 0x14d8: 0x7d95, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040, - 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x1308, 0x14e1: 0x1308, 0x14e2: 0x1308, 0x14e3: 0x1308, - 0x14e4: 0x1308, 0x14e5: 0x1308, 0x14e6: 0x1308, 0x14e7: 0x1308, 0x14e8: 0x1308, 0x14e9: 0x1308, - 0x14ea: 0x1308, 0x14eb: 0x1308, 0x14ec: 0x1308, 0x14ed: 0x1308, 0x14ee: 0x1308, 0x14ef: 0x1308, - 0x14f0: 0x0040, 0x14f1: 0x7db5, 0x14f2: 0x7dd5, 0x14f3: 0xaf1a, 0x14f4: 0xaf1a, 0x14f5: 0x1fd2, - 0x14f6: 0x1fe2, 0x14f7: 0xaf2a, 0x14f8: 0xaf3a, 0x14f9: 0x7df5, 0x14fa: 0x7e15, 0x14fb: 0x7e35, - 0x14fc: 0x7df5, 0x14fd: 0x7e55, 0x14fe: 0x7e75, 0x14ff: 0x7e55, + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, // Block 0x54, offset 0x1500 - 0x1500: 0x7e95, 0x1501: 0x7eb5, 0x1502: 0x7ed5, 0x1503: 0x7eb5, 0x1504: 0x7ef5, 0x1505: 0x0018, - 0x1506: 0x0018, 0x1507: 0xaf4a, 0x1508: 0xaf5a, 0x1509: 0x7f16, 0x150a: 0x7f36, 0x150b: 0x7f56, - 0x150c: 0x7f76, 0x150d: 0xaf1a, 0x150e: 0xaf1a, 0x150f: 0xaf1a, 0x1510: 0xaeda, 0x1511: 0x7f95, - 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x03c2, 0x1515: 0xaeea, 0x1516: 0xaf0a, 0x1517: 0xaefa, - 0x1518: 0x7fb5, 0x1519: 0x1fd2, 0x151a: 0x1fe2, 0x151b: 0xaf2a, 0x151c: 0xaf3a, 0x151d: 0x7e95, - 0x151e: 0x7ef5, 0x151f: 0xaf6a, 0x1520: 0xaf7a, 0x1521: 0xaf8a, 0x1522: 0x1fb2, 0x1523: 0xaf99, - 0x1524: 0xafaa, 0x1525: 0xafba, 0x1526: 0x1fc2, 0x1527: 0x0040, 0x1528: 0xafca, 0x1529: 0xafda, - 0x152a: 0xafea, 0x152b: 0xaffa, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, - 0x1530: 0x7fd6, 0x1531: 0xb009, 0x1532: 0x7ff6, 0x1533: 0x0008, 0x1534: 0x8016, 0x1535: 0x0040, - 0x1536: 0x8036, 0x1537: 0xb031, 0x1538: 0x8056, 0x1539: 0xb059, 0x153a: 0x8076, 0x153b: 0xb081, - 0x153c: 0x8096, 0x153d: 0xb0a9, 0x153e: 0x80b6, 0x153f: 0xb0d1, + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, // Block 0x55, offset 0x1540 - 0x1540: 0xb0f9, 0x1541: 0xb111, 0x1542: 0xb111, 0x1543: 0xb129, 0x1544: 0xb129, 0x1545: 0xb141, - 0x1546: 0xb141, 0x1547: 0xb159, 0x1548: 0xb159, 0x1549: 0xb171, 0x154a: 0xb171, 0x154b: 0xb171, - 0x154c: 0xb171, 0x154d: 0xb189, 0x154e: 0xb189, 0x154f: 0xb1a1, 0x1550: 0xb1a1, 0x1551: 0xb1a1, - 0x1552: 0xb1a1, 0x1553: 0xb1b9, 0x1554: 0xb1b9, 0x1555: 0xb1d1, 0x1556: 0xb1d1, 0x1557: 0xb1d1, - 0x1558: 0xb1d1, 0x1559: 0xb1e9, 0x155a: 0xb1e9, 0x155b: 0xb1e9, 0x155c: 0xb1e9, 0x155d: 0xb201, - 0x155e: 0xb201, 0x155f: 0xb201, 0x1560: 0xb201, 0x1561: 0xb219, 0x1562: 0xb219, 0x1563: 0xb219, - 0x1564: 0xb219, 0x1565: 0xb231, 0x1566: 0xb231, 0x1567: 0xb231, 0x1568: 0xb231, 0x1569: 0xb249, - 0x156a: 0xb249, 0x156b: 0xb261, 0x156c: 0xb261, 0x156d: 0xb279, 0x156e: 0xb279, 0x156f: 0xb291, - 0x1570: 0xb291, 0x1571: 0xb2a9, 0x1572: 0xb2a9, 0x1573: 0xb2a9, 0x1574: 0xb2a9, 0x1575: 0xb2c1, - 0x1576: 0xb2c1, 0x1577: 0xb2c1, 0x1578: 0xb2c1, 0x1579: 0xb2d9, 0x157a: 0xb2d9, 0x157b: 0xb2d9, - 0x157c: 0xb2d9, 0x157d: 0xb2f1, 0x157e: 0xb2f1, 0x157f: 0xb2f1, + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, // Block 0x56, offset 0x1580 - 0x1580: 0xb2f1, 0x1581: 0xb309, 0x1582: 0xb309, 0x1583: 0xb309, 0x1584: 0xb309, 0x1585: 0xb321, - 0x1586: 0xb321, 0x1587: 0xb321, 0x1588: 0xb321, 0x1589: 0xb339, 0x158a: 0xb339, 0x158b: 0xb339, - 0x158c: 0xb339, 0x158d: 0xb351, 0x158e: 0xb351, 0x158f: 0xb351, 0x1590: 0xb351, 0x1591: 0xb369, - 0x1592: 0xb369, 0x1593: 0xb369, 0x1594: 0xb369, 0x1595: 0xb381, 0x1596: 0xb381, 0x1597: 0xb381, - 0x1598: 0xb381, 0x1599: 0xb399, 0x159a: 0xb399, 0x159b: 0xb399, 0x159c: 0xb399, 0x159d: 0xb3b1, - 0x159e: 0xb3b1, 0x159f: 0xb3b1, 0x15a0: 0xb3b1, 0x15a1: 0xb3c9, 0x15a2: 0xb3c9, 0x15a3: 0xb3c9, - 0x15a4: 0xb3c9, 0x15a5: 0xb3e1, 0x15a6: 0xb3e1, 0x15a7: 0xb3e1, 0x15a8: 0xb3e1, 0x15a9: 0xb3f9, - 0x15aa: 0xb3f9, 0x15ab: 0xb3f9, 0x15ac: 0xb3f9, 0x15ad: 0xb411, 0x15ae: 0xb411, 0x15af: 0x7ab1, - 0x15b0: 0x7ab1, 0x15b1: 0xb429, 0x15b2: 0xb429, 0x15b3: 0xb429, 0x15b4: 0xb429, 0x15b5: 0xb441, - 0x15b6: 0xb441, 0x15b7: 0xb469, 0x15b8: 0xb469, 0x15b9: 0xb491, 0x15ba: 0xb491, 0x15bb: 0xb4b9, - 0x15bc: 0xb4b9, 0x15bd: 0x0040, 0x15be: 0x0040, 0x15bf: 0x03c0, + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, // Block 0x57, offset 0x15c0 - 0x15c0: 0x0040, 0x15c1: 0xaefa, 0x15c2: 0xb4e2, 0x15c3: 0xaf6a, 0x15c4: 0xafda, 0x15c5: 0xafea, - 0x15c6: 0xaf7a, 0x15c7: 0xb4f2, 0x15c8: 0x1fd2, 0x15c9: 0x1fe2, 0x15ca: 0xaf8a, 0x15cb: 0x1fb2, - 0x15cc: 0xaeda, 0x15cd: 0xaf99, 0x15ce: 0x29d1, 0x15cf: 0xb502, 0x15d0: 0x1f41, 0x15d1: 0x00c9, - 0x15d2: 0x0069, 0x15d3: 0x0079, 0x15d4: 0x1f51, 0x15d5: 0x1f61, 0x15d6: 0x1f71, 0x15d7: 0x1f81, - 0x15d8: 0x1f91, 0x15d9: 0x1fa1, 0x15da: 0xaeea, 0x15db: 0x03c2, 0x15dc: 0xafaa, 0x15dd: 0x1fc2, - 0x15de: 0xafba, 0x15df: 0xaf0a, 0x15e0: 0xaffa, 0x15e1: 0x0039, 0x15e2: 0x0ee9, 0x15e3: 0x1159, - 0x15e4: 0x0ef9, 0x15e5: 0x0f09, 0x15e6: 0x1199, 0x15e7: 0x0f31, 0x15e8: 0x0249, 0x15e9: 0x0f41, - 0x15ea: 0x0259, 0x15eb: 0x0f51, 0x15ec: 0x0359, 0x15ed: 0x0f61, 0x15ee: 0x0f71, 0x15ef: 0x00d9, - 0x15f0: 0x0f99, 0x15f1: 0x2039, 0x15f2: 0x0269, 0x15f3: 0x01d9, 0x15f4: 0x0fa9, 0x15f5: 0x0fb9, - 0x15f6: 0x1089, 0x15f7: 0x0279, 0x15f8: 0x0369, 0x15f9: 0x0289, 0x15fa: 0x13d1, 0x15fb: 0xaf4a, - 0x15fc: 0xafca, 0x15fd: 0xaf5a, 0x15fe: 0xb512, 0x15ff: 0xaf1a, + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, // Block 0x58, offset 0x1600 - 0x1600: 0x1caa, 0x1601: 0x0039, 0x1602: 0x0ee9, 0x1603: 0x1159, 0x1604: 0x0ef9, 0x1605: 0x0f09, - 0x1606: 0x1199, 0x1607: 0x0f31, 0x1608: 0x0249, 0x1609: 0x0f41, 0x160a: 0x0259, 0x160b: 0x0f51, - 0x160c: 0x0359, 0x160d: 0x0f61, 0x160e: 0x0f71, 0x160f: 0x00d9, 0x1610: 0x0f99, 0x1611: 0x2039, - 0x1612: 0x0269, 0x1613: 0x01d9, 0x1614: 0x0fa9, 0x1615: 0x0fb9, 0x1616: 0x1089, 0x1617: 0x0279, - 0x1618: 0x0369, 0x1619: 0x0289, 0x161a: 0x13d1, 0x161b: 0xaf2a, 0x161c: 0xb522, 0x161d: 0xaf3a, - 0x161e: 0xb532, 0x161f: 0x80d5, 0x1620: 0x80f5, 0x1621: 0x29d1, 0x1622: 0x8115, 0x1623: 0x8115, - 0x1624: 0x8135, 0x1625: 0x8155, 0x1626: 0x8175, 0x1627: 0x8195, 0x1628: 0x81b5, 0x1629: 0x81d5, - 0x162a: 0x81f5, 0x162b: 0x8215, 0x162c: 0x8235, 0x162d: 0x8255, 0x162e: 0x8275, 0x162f: 0x8295, - 0x1630: 0x82b5, 0x1631: 0x82d5, 0x1632: 0x82f5, 0x1633: 0x8315, 0x1634: 0x8335, 0x1635: 0x8355, - 0x1636: 0x8375, 0x1637: 0x8395, 0x1638: 0x83b5, 0x1639: 0x83d5, 0x163a: 0x83f5, 0x163b: 0x8415, - 0x163c: 0x81b5, 0x163d: 0x8435, 0x163e: 0x8455, 0x163f: 0x8215, + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, // Block 0x59, offset 0x1640 - 0x1640: 0x8475, 0x1641: 0x8495, 0x1642: 0x84b5, 0x1643: 0x84d5, 0x1644: 0x84f5, 0x1645: 0x8515, - 0x1646: 0x8535, 0x1647: 0x8555, 0x1648: 0x84d5, 0x1649: 0x8575, 0x164a: 0x84d5, 0x164b: 0x8595, - 0x164c: 0x8595, 0x164d: 0x85b5, 0x164e: 0x85b5, 0x164f: 0x85d5, 0x1650: 0x8515, 0x1651: 0x85f5, - 0x1652: 0x8615, 0x1653: 0x85f5, 0x1654: 0x8635, 0x1655: 0x8615, 0x1656: 0x8655, 0x1657: 0x8655, - 0x1658: 0x8675, 0x1659: 0x8675, 0x165a: 0x8695, 0x165b: 0x8695, 0x165c: 0x8615, 0x165d: 0x8115, - 0x165e: 0x86b5, 0x165f: 0x86d5, 0x1660: 0x0040, 0x1661: 0x86f5, 0x1662: 0x8715, 0x1663: 0x8735, - 0x1664: 0x8755, 0x1665: 0x8735, 0x1666: 0x8775, 0x1667: 0x8795, 0x1668: 0x87b5, 0x1669: 0x87b5, - 0x166a: 0x87d5, 0x166b: 0x87d5, 0x166c: 0x87f5, 0x166d: 0x87f5, 0x166e: 0x87d5, 0x166f: 0x87d5, - 0x1670: 0x8815, 0x1671: 0x8835, 0x1672: 0x8855, 0x1673: 0x8875, 0x1674: 0x8895, 0x1675: 0x88b5, - 0x1676: 0x88b5, 0x1677: 0x88b5, 0x1678: 0x88d5, 0x1679: 0x88d5, 0x167a: 0x88d5, 0x167b: 0x88d5, - 0x167c: 0x87b5, 0x167d: 0x87b5, 0x167e: 0x87b5, 0x167f: 0x0040, + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, // Block 0x5a, offset 0x1680 - 0x1680: 0x0040, 0x1681: 0x0040, 0x1682: 0x8715, 0x1683: 0x86f5, 0x1684: 0x88f5, 0x1685: 0x86f5, - 0x1686: 0x8715, 0x1687: 0x86f5, 0x1688: 0x0040, 0x1689: 0x0040, 0x168a: 0x8915, 0x168b: 0x8715, - 0x168c: 0x8935, 0x168d: 0x88f5, 0x168e: 0x8935, 0x168f: 0x8715, 0x1690: 0x0040, 0x1691: 0x0040, - 0x1692: 0x8955, 0x1693: 0x8975, 0x1694: 0x8875, 0x1695: 0x8935, 0x1696: 0x88f5, 0x1697: 0x8935, - 0x1698: 0x0040, 0x1699: 0x0040, 0x169a: 0x8995, 0x169b: 0x89b5, 0x169c: 0x8995, 0x169d: 0x0040, - 0x169e: 0x0040, 0x169f: 0x0040, 0x16a0: 0xb541, 0x16a1: 0xb559, 0x16a2: 0xb571, 0x16a3: 0x89d6, - 0x16a4: 0xb589, 0x16a5: 0xb5a1, 0x16a6: 0x89f5, 0x16a7: 0x0040, 0x16a8: 0x8a15, 0x16a9: 0x8a35, - 0x16aa: 0x8a55, 0x16ab: 0x8a35, 0x16ac: 0x8a75, 0x16ad: 0x8a95, 0x16ae: 0x8ab5, 0x16af: 0x0040, - 0x16b0: 0x0040, 0x16b1: 0x0040, 0x16b2: 0x0040, 0x16b3: 0x0040, 0x16b4: 0x0040, 0x16b5: 0x0040, - 0x16b6: 0x0040, 0x16b7: 0x0040, 0x16b8: 0x0040, 0x16b9: 0x0340, 0x16ba: 0x0340, 0x16bb: 0x0340, - 0x16bc: 0x0040, 0x16bd: 0x0040, 0x16be: 0x0040, 0x16bf: 0x0040, + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, // Block 0x5b, offset 0x16c0 - 0x16c0: 0x0208, 0x16c1: 0x0208, 0x16c2: 0x0208, 0x16c3: 0x0208, 0x16c4: 0x0208, 0x16c5: 0x0408, - 0x16c6: 0x0008, 0x16c7: 0x0408, 0x16c8: 0x0018, 0x16c9: 0x0408, 0x16ca: 0x0408, 0x16cb: 0x0008, - 0x16cc: 0x0008, 0x16cd: 0x0108, 0x16ce: 0x0408, 0x16cf: 0x0408, 0x16d0: 0x0408, 0x16d1: 0x0408, - 0x16d2: 0x0408, 0x16d3: 0x0208, 0x16d4: 0x0208, 0x16d5: 0x0208, 0x16d6: 0x0208, 0x16d7: 0x0108, - 0x16d8: 0x0208, 0x16d9: 0x0208, 0x16da: 0x0208, 0x16db: 0x0208, 0x16dc: 0x0208, 0x16dd: 0x0408, - 0x16de: 0x0208, 0x16df: 0x0208, 0x16e0: 0x0208, 0x16e1: 0x0408, 0x16e2: 0x0008, 0x16e3: 0x0008, - 0x16e4: 0x0408, 0x16e5: 0x1308, 0x16e6: 0x1308, 0x16e7: 0x0040, 0x16e8: 0x0040, 0x16e9: 0x0040, - 0x16ea: 0x0040, 0x16eb: 0x0218, 0x16ec: 0x0218, 0x16ed: 0x0218, 0x16ee: 0x0218, 0x16ef: 0x0418, - 0x16f0: 0x0018, 0x16f1: 0x0018, 0x16f2: 0x0018, 0x16f3: 0x0018, 0x16f4: 0x0018, 0x16f5: 0x0018, - 0x16f6: 0x0018, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0040, 0x16fa: 0x0040, 0x16fb: 0x0040, - 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040, + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, // Block 0x5c, offset 0x1700 - 0x1700: 0x0208, 0x1701: 0x0408, 0x1702: 0x0208, 0x1703: 0x0408, 0x1704: 0x0408, 0x1705: 0x0408, - 0x1706: 0x0208, 0x1707: 0x0208, 0x1708: 0x0208, 0x1709: 0x0408, 0x170a: 0x0208, 0x170b: 0x0208, - 0x170c: 0x0408, 0x170d: 0x0208, 0x170e: 0x0408, 0x170f: 0x0408, 0x1710: 0x0208, 0x1711: 0x0408, - 0x1712: 0x0040, 0x1713: 0x0040, 0x1714: 0x0040, 0x1715: 0x0040, 0x1716: 0x0040, 0x1717: 0x0040, - 0x1718: 0x0040, 0x1719: 0x0018, 0x171a: 0x0018, 0x171b: 0x0018, 0x171c: 0x0018, 0x171d: 0x0040, - 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0x0040, 0x1721: 0x0040, 0x1722: 0x0040, 0x1723: 0x0040, - 0x1724: 0x0040, 0x1725: 0x0040, 0x1726: 0x0040, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0418, - 0x172a: 0x0418, 0x172b: 0x0418, 0x172c: 0x0418, 0x172d: 0x0218, 0x172e: 0x0218, 0x172f: 0x0018, + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, - 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, // Block 0x5d, offset 0x1740 - 0x1740: 0x1308, 0x1741: 0x1308, 0x1742: 0x1008, 0x1743: 0x1008, 0x1744: 0x0040, 0x1745: 0x0008, - 0x1746: 0x0008, 0x1747: 0x0008, 0x1748: 0x0008, 0x1749: 0x0008, 0x174a: 0x0008, 0x174b: 0x0008, - 0x174c: 0x0008, 0x174d: 0x0040, 0x174e: 0x0040, 0x174f: 0x0008, 0x1750: 0x0008, 0x1751: 0x0040, - 0x1752: 0x0040, 0x1753: 0x0008, 0x1754: 0x0008, 0x1755: 0x0008, 0x1756: 0x0008, 0x1757: 0x0008, - 0x1758: 0x0008, 0x1759: 0x0008, 0x175a: 0x0008, 0x175b: 0x0008, 0x175c: 0x0008, 0x175d: 0x0008, - 0x175e: 0x0008, 0x175f: 0x0008, 0x1760: 0x0008, 0x1761: 0x0008, 0x1762: 0x0008, 0x1763: 0x0008, - 0x1764: 0x0008, 0x1765: 0x0008, 0x1766: 0x0008, 0x1767: 0x0008, 0x1768: 0x0008, 0x1769: 0x0040, - 0x176a: 0x0008, 0x176b: 0x0008, 0x176c: 0x0008, 0x176d: 0x0008, 0x176e: 0x0008, 0x176f: 0x0008, - 0x1770: 0x0008, 0x1771: 0x0040, 0x1772: 0x0008, 0x1773: 0x0008, 0x1774: 0x0040, 0x1775: 0x0008, - 0x1776: 0x0008, 0x1777: 0x0008, 0x1778: 0x0008, 0x1779: 0x0008, 0x177a: 0x0040, 0x177b: 0x0040, - 0x177c: 0x1308, 0x177d: 0x0008, 0x177e: 0x1008, 0x177f: 0x1008, + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, // Block 0x5e, offset 0x1780 - 0x1780: 0x1308, 0x1781: 0x1008, 0x1782: 0x1008, 0x1783: 0x1008, 0x1784: 0x1008, 0x1785: 0x0040, - 0x1786: 0x0040, 0x1787: 0x1008, 0x1788: 0x1008, 0x1789: 0x0040, 0x178a: 0x0040, 0x178b: 0x1008, - 0x178c: 0x1008, 0x178d: 0x1808, 0x178e: 0x0040, 0x178f: 0x0040, 0x1790: 0x0008, 0x1791: 0x0040, - 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x1008, - 0x1798: 0x0040, 0x1799: 0x0040, 0x179a: 0x0040, 0x179b: 0x0040, 0x179c: 0x0040, 0x179d: 0x0008, - 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x1008, 0x17a3: 0x1008, - 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x1308, 0x17a7: 0x1308, 0x17a8: 0x1308, 0x17a9: 0x1308, - 0x17aa: 0x1308, 0x17ab: 0x1308, 0x17ac: 0x1308, 0x17ad: 0x0040, 0x17ae: 0x0040, 0x17af: 0x0040, - 0x17b0: 0x1308, 0x17b1: 0x1308, 0x17b2: 0x1308, 0x17b3: 0x1308, 0x17b4: 0x1308, 0x17b5: 0x0040, + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, // Block 0x5f, offset 0x17c0 - 0x17c0: 0x0039, 0x17c1: 0x0ee9, 0x17c2: 0x1159, 0x17c3: 0x0ef9, 0x17c4: 0x0f09, 0x17c5: 0x1199, - 0x17c6: 0x0f31, 0x17c7: 0x0249, 0x17c8: 0x0f41, 0x17c9: 0x0259, 0x17ca: 0x0f51, 0x17cb: 0x0359, - 0x17cc: 0x0f61, 0x17cd: 0x0f71, 0x17ce: 0x00d9, 0x17cf: 0x0f99, 0x17d0: 0x2039, 0x17d1: 0x0269, - 0x17d2: 0x01d9, 0x17d3: 0x0fa9, 0x17d4: 0x0fb9, 0x17d5: 0x1089, 0x17d6: 0x0279, 0x17d7: 0x0369, - 0x17d8: 0x0289, 0x17d9: 0x13d1, 0x17da: 0x0039, 0x17db: 0x0ee9, 0x17dc: 0x1159, 0x17dd: 0x0ef9, - 0x17de: 0x0f09, 0x17df: 0x1199, 0x17e0: 0x0f31, 0x17e1: 0x0249, 0x17e2: 0x0f41, 0x17e3: 0x0259, - 0x17e4: 0x0f51, 0x17e5: 0x0359, 0x17e6: 0x0f61, 0x17e7: 0x0f71, 0x17e8: 0x00d9, 0x17e9: 0x0f99, - 0x17ea: 0x2039, 0x17eb: 0x0269, 0x17ec: 0x01d9, 0x17ed: 0x0fa9, 0x17ee: 0x0fb9, 0x17ef: 0x1089, - 0x17f0: 0x0279, 0x17f1: 0x0369, 0x17f2: 0x0289, 0x17f3: 0x13d1, 0x17f4: 0x0039, 0x17f5: 0x0ee9, - 0x17f6: 0x1159, 0x17f7: 0x0ef9, 0x17f8: 0x0f09, 0x17f9: 0x1199, 0x17fa: 0x0f31, 0x17fb: 0x0249, - 0x17fc: 0x0f41, 0x17fd: 0x0259, 0x17fe: 0x0f51, 0x17ff: 0x0359, + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, // Block 0x60, offset 0x1800 - 0x1800: 0x0f61, 0x1801: 0x0f71, 0x1802: 0x00d9, 0x1803: 0x0f99, 0x1804: 0x2039, 0x1805: 0x0269, - 0x1806: 0x01d9, 0x1807: 0x0fa9, 0x1808: 0x0fb9, 0x1809: 0x1089, 0x180a: 0x0279, 0x180b: 0x0369, - 0x180c: 0x0289, 0x180d: 0x13d1, 0x180e: 0x0039, 0x180f: 0x0ee9, 0x1810: 0x1159, 0x1811: 0x0ef9, - 0x1812: 0x0f09, 0x1813: 0x1199, 0x1814: 0x0f31, 0x1815: 0x0040, 0x1816: 0x0f41, 0x1817: 0x0259, - 0x1818: 0x0f51, 0x1819: 0x0359, 0x181a: 0x0f61, 0x181b: 0x0f71, 0x181c: 0x00d9, 0x181d: 0x0f99, - 0x181e: 0x2039, 0x181f: 0x0269, 0x1820: 0x01d9, 0x1821: 0x0fa9, 0x1822: 0x0fb9, 0x1823: 0x1089, - 0x1824: 0x0279, 0x1825: 0x0369, 0x1826: 0x0289, 0x1827: 0x13d1, 0x1828: 0x0039, 0x1829: 0x0ee9, - 0x182a: 0x1159, 0x182b: 0x0ef9, 0x182c: 0x0f09, 0x182d: 0x1199, 0x182e: 0x0f31, 0x182f: 0x0249, - 0x1830: 0x0f41, 0x1831: 0x0259, 0x1832: 0x0f51, 0x1833: 0x0359, 0x1834: 0x0f61, 0x1835: 0x0f71, - 0x1836: 0x00d9, 0x1837: 0x0f99, 0x1838: 0x2039, 0x1839: 0x0269, 0x183a: 0x01d9, 0x183b: 0x0fa9, - 0x183c: 0x0fb9, 0x183d: 0x1089, 0x183e: 0x0279, 0x183f: 0x0369, + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, // Block 0x61, offset 0x1840 - 0x1840: 0x0289, 0x1841: 0x13d1, 0x1842: 0x0039, 0x1843: 0x0ee9, 0x1844: 0x1159, 0x1845: 0x0ef9, - 0x1846: 0x0f09, 0x1847: 0x1199, 0x1848: 0x0f31, 0x1849: 0x0249, 0x184a: 0x0f41, 0x184b: 0x0259, - 0x184c: 0x0f51, 0x184d: 0x0359, 0x184e: 0x0f61, 0x184f: 0x0f71, 0x1850: 0x00d9, 0x1851: 0x0f99, - 0x1852: 0x2039, 0x1853: 0x0269, 0x1854: 0x01d9, 0x1855: 0x0fa9, 0x1856: 0x0fb9, 0x1857: 0x1089, - 0x1858: 0x0279, 0x1859: 0x0369, 0x185a: 0x0289, 0x185b: 0x13d1, 0x185c: 0x0039, 0x185d: 0x0040, - 0x185e: 0x1159, 0x185f: 0x0ef9, 0x1860: 0x0040, 0x1861: 0x0040, 0x1862: 0x0f31, 0x1863: 0x0040, - 0x1864: 0x0040, 0x1865: 0x0259, 0x1866: 0x0f51, 0x1867: 0x0040, 0x1868: 0x0040, 0x1869: 0x0f71, - 0x186a: 0x00d9, 0x186b: 0x0f99, 0x186c: 0x2039, 0x186d: 0x0040, 0x186e: 0x01d9, 0x186f: 0x0fa9, - 0x1870: 0x0fb9, 0x1871: 0x1089, 0x1872: 0x0279, 0x1873: 0x0369, 0x1874: 0x0289, 0x1875: 0x13d1, - 0x1876: 0x0039, 0x1877: 0x0ee9, 0x1878: 0x1159, 0x1879: 0x0ef9, 0x187a: 0x0040, 0x187b: 0x1199, - 0x187c: 0x0040, 0x187d: 0x0249, 0x187e: 0x0f41, 0x187f: 0x0259, + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, // Block 0x62, offset 0x1880 - 0x1880: 0x0f51, 0x1881: 0x0359, 0x1882: 0x0f61, 0x1883: 0x0f71, 0x1884: 0x0040, 0x1885: 0x0f99, - 0x1886: 0x2039, 0x1887: 0x0269, 0x1888: 0x01d9, 0x1889: 0x0fa9, 0x188a: 0x0fb9, 0x188b: 0x1089, - 0x188c: 0x0279, 0x188d: 0x0369, 0x188e: 0x0289, 0x188f: 0x13d1, 0x1890: 0x0039, 0x1891: 0x0ee9, - 0x1892: 0x1159, 0x1893: 0x0ef9, 0x1894: 0x0f09, 0x1895: 0x1199, 0x1896: 0x0f31, 0x1897: 0x0249, - 0x1898: 0x0f41, 0x1899: 0x0259, 0x189a: 0x0f51, 0x189b: 0x0359, 0x189c: 0x0f61, 0x189d: 0x0f71, - 0x189e: 0x00d9, 0x189f: 0x0f99, 0x18a0: 0x2039, 0x18a1: 0x0269, 0x18a2: 0x01d9, 0x18a3: 0x0fa9, - 0x18a4: 0x0fb9, 0x18a5: 0x1089, 0x18a6: 0x0279, 0x18a7: 0x0369, 0x18a8: 0x0289, 0x18a9: 0x13d1, - 0x18aa: 0x0039, 0x18ab: 0x0ee9, 0x18ac: 0x1159, 0x18ad: 0x0ef9, 0x18ae: 0x0f09, 0x18af: 0x1199, - 0x18b0: 0x0f31, 0x18b1: 0x0249, 0x18b2: 0x0f41, 0x18b3: 0x0259, 0x18b4: 0x0f51, 0x18b5: 0x0359, - 0x18b6: 0x0f61, 0x18b7: 0x0f71, 0x18b8: 0x00d9, 0x18b9: 0x0f99, 0x18ba: 0x2039, 0x18bb: 0x0269, - 0x18bc: 0x01d9, 0x18bd: 0x0fa9, 0x18be: 0x0fb9, 0x18bf: 0x1089, + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, // Block 0x63, offset 0x18c0 - 0x18c0: 0x0279, 0x18c1: 0x0369, 0x18c2: 0x0289, 0x18c3: 0x13d1, 0x18c4: 0x0039, 0x18c5: 0x0ee9, - 0x18c6: 0x0040, 0x18c7: 0x0ef9, 0x18c8: 0x0f09, 0x18c9: 0x1199, 0x18ca: 0x0f31, 0x18cb: 0x0040, - 0x18cc: 0x0040, 0x18cd: 0x0259, 0x18ce: 0x0f51, 0x18cf: 0x0359, 0x18d0: 0x0f61, 0x18d1: 0x0f71, - 0x18d2: 0x00d9, 0x18d3: 0x0f99, 0x18d4: 0x2039, 0x18d5: 0x0040, 0x18d6: 0x01d9, 0x18d7: 0x0fa9, - 0x18d8: 0x0fb9, 0x18d9: 0x1089, 0x18da: 0x0279, 0x18db: 0x0369, 0x18dc: 0x0289, 0x18dd: 0x0040, - 0x18de: 0x0039, 0x18df: 0x0ee9, 0x18e0: 0x1159, 0x18e1: 0x0ef9, 0x18e2: 0x0f09, 0x18e3: 0x1199, - 0x18e4: 0x0f31, 0x18e5: 0x0249, 0x18e6: 0x0f41, 0x18e7: 0x0259, 0x18e8: 0x0f51, 0x18e9: 0x0359, - 0x18ea: 0x0f61, 0x18eb: 0x0f71, 0x18ec: 0x00d9, 0x18ed: 0x0f99, 0x18ee: 0x2039, 0x18ef: 0x0269, - 0x18f0: 0x01d9, 0x18f1: 0x0fa9, 0x18f2: 0x0fb9, 0x18f3: 0x1089, 0x18f4: 0x0279, 0x18f5: 0x0369, - 0x18f6: 0x0289, 0x18f7: 0x13d1, 0x18f8: 0x0039, 0x18f9: 0x0ee9, 0x18fa: 0x0040, 0x18fb: 0x0ef9, - 0x18fc: 0x0f09, 0x18fd: 0x1199, 0x18fe: 0x0f31, 0x18ff: 0x0040, + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, // Block 0x64, offset 0x1900 - 0x1900: 0x0f41, 0x1901: 0x0259, 0x1902: 0x0f51, 0x1903: 0x0359, 0x1904: 0x0f61, 0x1905: 0x0040, - 0x1906: 0x00d9, 0x1907: 0x0040, 0x1908: 0x0040, 0x1909: 0x0040, 0x190a: 0x01d9, 0x190b: 0x0fa9, - 0x190c: 0x0fb9, 0x190d: 0x1089, 0x190e: 0x0279, 0x190f: 0x0369, 0x1910: 0x0289, 0x1911: 0x0040, - 0x1912: 0x0039, 0x1913: 0x0ee9, 0x1914: 0x1159, 0x1915: 0x0ef9, 0x1916: 0x0f09, 0x1917: 0x1199, - 0x1918: 0x0f31, 0x1919: 0x0249, 0x191a: 0x0f41, 0x191b: 0x0259, 0x191c: 0x0f51, 0x191d: 0x0359, - 0x191e: 0x0f61, 0x191f: 0x0f71, 0x1920: 0x00d9, 0x1921: 0x0f99, 0x1922: 0x2039, 0x1923: 0x0269, - 0x1924: 0x01d9, 0x1925: 0x0fa9, 0x1926: 0x0fb9, 0x1927: 0x1089, 0x1928: 0x0279, 0x1929: 0x0369, - 0x192a: 0x0289, 0x192b: 0x13d1, 0x192c: 0x0039, 0x192d: 0x0ee9, 0x192e: 0x1159, 0x192f: 0x0ef9, - 0x1930: 0x0f09, 0x1931: 0x1199, 0x1932: 0x0f31, 0x1933: 0x0249, 0x1934: 0x0f41, 0x1935: 0x0259, - 0x1936: 0x0f51, 0x1937: 0x0359, 0x1938: 0x0f61, 0x1939: 0x0f71, 0x193a: 0x00d9, 0x193b: 0x0f99, - 0x193c: 0x2039, 0x193d: 0x0269, 0x193e: 0x01d9, 0x193f: 0x0fa9, + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, // Block 0x65, offset 0x1940 - 0x1940: 0x0fb9, 0x1941: 0x1089, 0x1942: 0x0279, 0x1943: 0x0369, 0x1944: 0x0289, 0x1945: 0x13d1, - 0x1946: 0x0039, 0x1947: 0x0ee9, 0x1948: 0x1159, 0x1949: 0x0ef9, 0x194a: 0x0f09, 0x194b: 0x1199, - 0x194c: 0x0f31, 0x194d: 0x0249, 0x194e: 0x0f41, 0x194f: 0x0259, 0x1950: 0x0f51, 0x1951: 0x0359, - 0x1952: 0x0f61, 0x1953: 0x0f71, 0x1954: 0x00d9, 0x1955: 0x0f99, 0x1956: 0x2039, 0x1957: 0x0269, - 0x1958: 0x01d9, 0x1959: 0x0fa9, 0x195a: 0x0fb9, 0x195b: 0x1089, 0x195c: 0x0279, 0x195d: 0x0369, - 0x195e: 0x0289, 0x195f: 0x13d1, 0x1960: 0x0039, 0x1961: 0x0ee9, 0x1962: 0x1159, 0x1963: 0x0ef9, - 0x1964: 0x0f09, 0x1965: 0x1199, 0x1966: 0x0f31, 0x1967: 0x0249, 0x1968: 0x0f41, 0x1969: 0x0259, - 0x196a: 0x0f51, 0x196b: 0x0359, 0x196c: 0x0f61, 0x196d: 0x0f71, 0x196e: 0x00d9, 0x196f: 0x0f99, - 0x1970: 0x2039, 0x1971: 0x0269, 0x1972: 0x01d9, 0x1973: 0x0fa9, 0x1974: 0x0fb9, 0x1975: 0x1089, - 0x1976: 0x0279, 0x1977: 0x0369, 0x1978: 0x0289, 0x1979: 0x13d1, 0x197a: 0x0039, 0x197b: 0x0ee9, - 0x197c: 0x1159, 0x197d: 0x0ef9, 0x197e: 0x0f09, 0x197f: 0x1199, + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, // Block 0x66, offset 0x1980 - 0x1980: 0x0f31, 0x1981: 0x0249, 0x1982: 0x0f41, 0x1983: 0x0259, 0x1984: 0x0f51, 0x1985: 0x0359, - 0x1986: 0x0f61, 0x1987: 0x0f71, 0x1988: 0x00d9, 0x1989: 0x0f99, 0x198a: 0x2039, 0x198b: 0x0269, - 0x198c: 0x01d9, 0x198d: 0x0fa9, 0x198e: 0x0fb9, 0x198f: 0x1089, 0x1990: 0x0279, 0x1991: 0x0369, - 0x1992: 0x0289, 0x1993: 0x13d1, 0x1994: 0x0039, 0x1995: 0x0ee9, 0x1996: 0x1159, 0x1997: 0x0ef9, - 0x1998: 0x0f09, 0x1999: 0x1199, 0x199a: 0x0f31, 0x199b: 0x0249, 0x199c: 0x0f41, 0x199d: 0x0259, - 0x199e: 0x0f51, 0x199f: 0x0359, 0x19a0: 0x0f61, 0x19a1: 0x0f71, 0x19a2: 0x00d9, 0x19a3: 0x0f99, - 0x19a4: 0x2039, 0x19a5: 0x0269, 0x19a6: 0x01d9, 0x19a7: 0x0fa9, 0x19a8: 0x0fb9, 0x19a9: 0x1089, - 0x19aa: 0x0279, 0x19ab: 0x0369, 0x19ac: 0x0289, 0x19ad: 0x13d1, 0x19ae: 0x0039, 0x19af: 0x0ee9, - 0x19b0: 0x1159, 0x19b1: 0x0ef9, 0x19b2: 0x0f09, 0x19b3: 0x1199, 0x19b4: 0x0f31, 0x19b5: 0x0249, - 0x19b6: 0x0f41, 0x19b7: 0x0259, 0x19b8: 0x0f51, 0x19b9: 0x0359, 0x19ba: 0x0f61, 0x19bb: 0x0f71, - 0x19bc: 0x00d9, 0x19bd: 0x0f99, 0x19be: 0x2039, 0x19bf: 0x0269, + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, // Block 0x67, offset 0x19c0 - 0x19c0: 0x01d9, 0x19c1: 0x0fa9, 0x19c2: 0x0fb9, 0x19c3: 0x1089, 0x19c4: 0x0279, 0x19c5: 0x0369, - 0x19c6: 0x0289, 0x19c7: 0x13d1, 0x19c8: 0x0039, 0x19c9: 0x0ee9, 0x19ca: 0x1159, 0x19cb: 0x0ef9, - 0x19cc: 0x0f09, 0x19cd: 0x1199, 0x19ce: 0x0f31, 0x19cf: 0x0249, 0x19d0: 0x0f41, 0x19d1: 0x0259, - 0x19d2: 0x0f51, 0x19d3: 0x0359, 0x19d4: 0x0f61, 0x19d5: 0x0f71, 0x19d6: 0x00d9, 0x19d7: 0x0f99, - 0x19d8: 0x2039, 0x19d9: 0x0269, 0x19da: 0x01d9, 0x19db: 0x0fa9, 0x19dc: 0x0fb9, 0x19dd: 0x1089, - 0x19de: 0x0279, 0x19df: 0x0369, 0x19e0: 0x0289, 0x19e1: 0x13d1, 0x19e2: 0x0039, 0x19e3: 0x0ee9, - 0x19e4: 0x1159, 0x19e5: 0x0ef9, 0x19e6: 0x0f09, 0x19e7: 0x1199, 0x19e8: 0x0f31, 0x19e9: 0x0249, - 0x19ea: 0x0f41, 0x19eb: 0x0259, 0x19ec: 0x0f51, 0x19ed: 0x0359, 0x19ee: 0x0f61, 0x19ef: 0x0f71, - 0x19f0: 0x00d9, 0x19f1: 0x0f99, 0x19f2: 0x2039, 0x19f3: 0x0269, 0x19f4: 0x01d9, 0x19f5: 0x0fa9, - 0x19f6: 0x0fb9, 0x19f7: 0x1089, 0x19f8: 0x0279, 0x19f9: 0x0369, 0x19fa: 0x0289, 0x19fb: 0x13d1, - 0x19fc: 0x0039, 0x19fd: 0x0ee9, 0x19fe: 0x1159, 0x19ff: 0x0ef9, + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, // Block 0x68, offset 0x1a00 - 0x1a00: 0x0f09, 0x1a01: 0x1199, 0x1a02: 0x0f31, 0x1a03: 0x0249, 0x1a04: 0x0f41, 0x1a05: 0x0259, - 0x1a06: 0x0f51, 0x1a07: 0x0359, 0x1a08: 0x0f61, 0x1a09: 0x0f71, 0x1a0a: 0x00d9, 0x1a0b: 0x0f99, - 0x1a0c: 0x2039, 0x1a0d: 0x0269, 0x1a0e: 0x01d9, 0x1a0f: 0x0fa9, 0x1a10: 0x0fb9, 0x1a11: 0x1089, - 0x1a12: 0x0279, 0x1a13: 0x0369, 0x1a14: 0x0289, 0x1a15: 0x13d1, 0x1a16: 0x0039, 0x1a17: 0x0ee9, - 0x1a18: 0x1159, 0x1a19: 0x0ef9, 0x1a1a: 0x0f09, 0x1a1b: 0x1199, 0x1a1c: 0x0f31, 0x1a1d: 0x0249, - 0x1a1e: 0x0f41, 0x1a1f: 0x0259, 0x1a20: 0x0f51, 0x1a21: 0x0359, 0x1a22: 0x0f61, 0x1a23: 0x0f71, - 0x1a24: 0x00d9, 0x1a25: 0x0f99, 0x1a26: 0x2039, 0x1a27: 0x0269, 0x1a28: 0x01d9, 0x1a29: 0x0fa9, - 0x1a2a: 0x0fb9, 0x1a2b: 0x1089, 0x1a2c: 0x0279, 0x1a2d: 0x0369, 0x1a2e: 0x0289, 0x1a2f: 0x13d1, - 0x1a30: 0x0039, 0x1a31: 0x0ee9, 0x1a32: 0x1159, 0x1a33: 0x0ef9, 0x1a34: 0x0f09, 0x1a35: 0x1199, - 0x1a36: 0x0f31, 0x1a37: 0x0249, 0x1a38: 0x0f41, 0x1a39: 0x0259, 0x1a3a: 0x0f51, 0x1a3b: 0x0359, - 0x1a3c: 0x0f61, 0x1a3d: 0x0f71, 0x1a3e: 0x00d9, 0x1a3f: 0x0f99, + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, // Block 0x69, offset 0x1a40 - 0x1a40: 0x2039, 0x1a41: 0x0269, 0x1a42: 0x01d9, 0x1a43: 0x0fa9, 0x1a44: 0x0fb9, 0x1a45: 0x1089, - 0x1a46: 0x0279, 0x1a47: 0x0369, 0x1a48: 0x0289, 0x1a49: 0x13d1, 0x1a4a: 0x0039, 0x1a4b: 0x0ee9, - 0x1a4c: 0x1159, 0x1a4d: 0x0ef9, 0x1a4e: 0x0f09, 0x1a4f: 0x1199, 0x1a50: 0x0f31, 0x1a51: 0x0249, - 0x1a52: 0x0f41, 0x1a53: 0x0259, 0x1a54: 0x0f51, 0x1a55: 0x0359, 0x1a56: 0x0f61, 0x1a57: 0x0f71, - 0x1a58: 0x00d9, 0x1a59: 0x0f99, 0x1a5a: 0x2039, 0x1a5b: 0x0269, 0x1a5c: 0x01d9, 0x1a5d: 0x0fa9, - 0x1a5e: 0x0fb9, 0x1a5f: 0x1089, 0x1a60: 0x0279, 0x1a61: 0x0369, 0x1a62: 0x0289, 0x1a63: 0x13d1, - 0x1a64: 0xba81, 0x1a65: 0xba99, 0x1a66: 0x0040, 0x1a67: 0x0040, 0x1a68: 0xbab1, 0x1a69: 0x1099, - 0x1a6a: 0x10b1, 0x1a6b: 0x10c9, 0x1a6c: 0xbac9, 0x1a6d: 0xbae1, 0x1a6e: 0xbaf9, 0x1a6f: 0x1429, - 0x1a70: 0x1a31, 0x1a71: 0xbb11, 0x1a72: 0xbb29, 0x1a73: 0xbb41, 0x1a74: 0xbb59, 0x1a75: 0xbb71, - 0x1a76: 0xbb89, 0x1a77: 0x2109, 0x1a78: 0x1111, 0x1a79: 0x1429, 0x1a7a: 0xbba1, 0x1a7b: 0xbbb9, - 0x1a7c: 0xbbd1, 0x1a7d: 0x10e1, 0x1a7e: 0x10f9, 0x1a7f: 0xbbe9, + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, // Block 0x6a, offset 0x1a80 - 0x1a80: 0x2079, 0x1a81: 0xbc01, 0x1a82: 0xbab1, 0x1a83: 0x1099, 0x1a84: 0x10b1, 0x1a85: 0x10c9, - 0x1a86: 0xbac9, 0x1a87: 0xbae1, 0x1a88: 0xbaf9, 0x1a89: 0x1429, 0x1a8a: 0x1a31, 0x1a8b: 0xbb11, - 0x1a8c: 0xbb29, 0x1a8d: 0xbb41, 0x1a8e: 0xbb59, 0x1a8f: 0xbb71, 0x1a90: 0xbb89, 0x1a91: 0x2109, - 0x1a92: 0x1111, 0x1a93: 0xbba1, 0x1a94: 0xbba1, 0x1a95: 0xbbb9, 0x1a96: 0xbbd1, 0x1a97: 0x10e1, - 0x1a98: 0x10f9, 0x1a99: 0xbbe9, 0x1a9a: 0x2079, 0x1a9b: 0xbc21, 0x1a9c: 0xbac9, 0x1a9d: 0x1429, - 0x1a9e: 0xbb11, 0x1a9f: 0x10e1, 0x1aa0: 0x1111, 0x1aa1: 0x2109, 0x1aa2: 0xbab1, 0x1aa3: 0x1099, - 0x1aa4: 0x10b1, 0x1aa5: 0x10c9, 0x1aa6: 0xbac9, 0x1aa7: 0xbae1, 0x1aa8: 0xbaf9, 0x1aa9: 0x1429, - 0x1aaa: 0x1a31, 0x1aab: 0xbb11, 0x1aac: 0xbb29, 0x1aad: 0xbb41, 0x1aae: 0xbb59, 0x1aaf: 0xbb71, - 0x1ab0: 0xbb89, 0x1ab1: 0x2109, 0x1ab2: 0x1111, 0x1ab3: 0x1429, 0x1ab4: 0xbba1, 0x1ab5: 0xbbb9, - 0x1ab6: 0xbbd1, 0x1ab7: 0x10e1, 0x1ab8: 0x10f9, 0x1ab9: 0xbbe9, 0x1aba: 0x2079, 0x1abb: 0xbc01, - 0x1abc: 0xbab1, 0x1abd: 0x1099, 0x1abe: 0x10b1, 0x1abf: 0x10c9, + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, // Block 0x6b, offset 0x1ac0 - 0x1ac0: 0xbac9, 0x1ac1: 0xbae1, 0x1ac2: 0xbaf9, 0x1ac3: 0x1429, 0x1ac4: 0x1a31, 0x1ac5: 0xbb11, - 0x1ac6: 0xbb29, 0x1ac7: 0xbb41, 0x1ac8: 0xbb59, 0x1ac9: 0xbb71, 0x1aca: 0xbb89, 0x1acb: 0x2109, - 0x1acc: 0x1111, 0x1acd: 0xbba1, 0x1ace: 0xbba1, 0x1acf: 0xbbb9, 0x1ad0: 0xbbd1, 0x1ad1: 0x10e1, - 0x1ad2: 0x10f9, 0x1ad3: 0xbbe9, 0x1ad4: 0x2079, 0x1ad5: 0xbc21, 0x1ad6: 0xbac9, 0x1ad7: 0x1429, - 0x1ad8: 0xbb11, 0x1ad9: 0x10e1, 0x1ada: 0x1111, 0x1adb: 0x2109, 0x1adc: 0xbab1, 0x1add: 0x1099, - 0x1ade: 0x10b1, 0x1adf: 0x10c9, 0x1ae0: 0xbac9, 0x1ae1: 0xbae1, 0x1ae2: 0xbaf9, 0x1ae3: 0x1429, - 0x1ae4: 0x1a31, 0x1ae5: 0xbb11, 0x1ae6: 0xbb29, 0x1ae7: 0xbb41, 0x1ae8: 0xbb59, 0x1ae9: 0xbb71, - 0x1aea: 0xbb89, 0x1aeb: 0x2109, 0x1aec: 0x1111, 0x1aed: 0x1429, 0x1aee: 0xbba1, 0x1aef: 0xbbb9, - 0x1af0: 0xbbd1, 0x1af1: 0x10e1, 0x1af2: 0x10f9, 0x1af3: 0xbbe9, 0x1af4: 0x2079, 0x1af5: 0xbc01, - 0x1af6: 0xbab1, 0x1af7: 0x1099, 0x1af8: 0x10b1, 0x1af9: 0x10c9, 0x1afa: 0xbac9, 0x1afb: 0xbae1, - 0x1afc: 0xbaf9, 0x1afd: 0x1429, 0x1afe: 0x1a31, 0x1aff: 0xbb11, + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, // Block 0x6c, offset 0x1b00 - 0x1b00: 0xbb29, 0x1b01: 0xbb41, 0x1b02: 0xbb59, 0x1b03: 0xbb71, 0x1b04: 0xbb89, 0x1b05: 0x2109, - 0x1b06: 0x1111, 0x1b07: 0xbba1, 0x1b08: 0xbba1, 0x1b09: 0xbbb9, 0x1b0a: 0xbbd1, 0x1b0b: 0x10e1, - 0x1b0c: 0x10f9, 0x1b0d: 0xbbe9, 0x1b0e: 0x2079, 0x1b0f: 0xbc21, 0x1b10: 0xbac9, 0x1b11: 0x1429, - 0x1b12: 0xbb11, 0x1b13: 0x10e1, 0x1b14: 0x1111, 0x1b15: 0x2109, 0x1b16: 0xbab1, 0x1b17: 0x1099, - 0x1b18: 0x10b1, 0x1b19: 0x10c9, 0x1b1a: 0xbac9, 0x1b1b: 0xbae1, 0x1b1c: 0xbaf9, 0x1b1d: 0x1429, - 0x1b1e: 0x1a31, 0x1b1f: 0xbb11, 0x1b20: 0xbb29, 0x1b21: 0xbb41, 0x1b22: 0xbb59, 0x1b23: 0xbb71, - 0x1b24: 0xbb89, 0x1b25: 0x2109, 0x1b26: 0x1111, 0x1b27: 0x1429, 0x1b28: 0xbba1, 0x1b29: 0xbbb9, - 0x1b2a: 0xbbd1, 0x1b2b: 0x10e1, 0x1b2c: 0x10f9, 0x1b2d: 0xbbe9, 0x1b2e: 0x2079, 0x1b2f: 0xbc01, - 0x1b30: 0xbab1, 0x1b31: 0x1099, 0x1b32: 0x10b1, 0x1b33: 0x10c9, 0x1b34: 0xbac9, 0x1b35: 0xbae1, - 0x1b36: 0xbaf9, 0x1b37: 0x1429, 0x1b38: 0x1a31, 0x1b39: 0xbb11, 0x1b3a: 0xbb29, 0x1b3b: 0xbb41, - 0x1b3c: 0xbb59, 0x1b3d: 0xbb71, 0x1b3e: 0xbb89, 0x1b3f: 0x2109, + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, // Block 0x6d, offset 0x1b40 - 0x1b40: 0x1111, 0x1b41: 0xbba1, 0x1b42: 0xbba1, 0x1b43: 0xbbb9, 0x1b44: 0xbbd1, 0x1b45: 0x10e1, - 0x1b46: 0x10f9, 0x1b47: 0xbbe9, 0x1b48: 0x2079, 0x1b49: 0xbc21, 0x1b4a: 0xbac9, 0x1b4b: 0x1429, - 0x1b4c: 0xbb11, 0x1b4d: 0x10e1, 0x1b4e: 0x1111, 0x1b4f: 0x2109, 0x1b50: 0xbab1, 0x1b51: 0x1099, - 0x1b52: 0x10b1, 0x1b53: 0x10c9, 0x1b54: 0xbac9, 0x1b55: 0xbae1, 0x1b56: 0xbaf9, 0x1b57: 0x1429, - 0x1b58: 0x1a31, 0x1b59: 0xbb11, 0x1b5a: 0xbb29, 0x1b5b: 0xbb41, 0x1b5c: 0xbb59, 0x1b5d: 0xbb71, - 0x1b5e: 0xbb89, 0x1b5f: 0x2109, 0x1b60: 0x1111, 0x1b61: 0x1429, 0x1b62: 0xbba1, 0x1b63: 0xbbb9, - 0x1b64: 0xbbd1, 0x1b65: 0x10e1, 0x1b66: 0x10f9, 0x1b67: 0xbbe9, 0x1b68: 0x2079, 0x1b69: 0xbc01, - 0x1b6a: 0xbab1, 0x1b6b: 0x1099, 0x1b6c: 0x10b1, 0x1b6d: 0x10c9, 0x1b6e: 0xbac9, 0x1b6f: 0xbae1, - 0x1b70: 0xbaf9, 0x1b71: 0x1429, 0x1b72: 0x1a31, 0x1b73: 0xbb11, 0x1b74: 0xbb29, 0x1b75: 0xbb41, - 0x1b76: 0xbb59, 0x1b77: 0xbb71, 0x1b78: 0xbb89, 0x1b79: 0x2109, 0x1b7a: 0x1111, 0x1b7b: 0xbba1, - 0x1b7c: 0xbba1, 0x1b7d: 0xbbb9, 0x1b7e: 0xbbd1, 0x1b7f: 0x10e1, + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, // Block 0x6e, offset 0x1b80 - 0x1b80: 0x10f9, 0x1b81: 0xbbe9, 0x1b82: 0x2079, 0x1b83: 0xbc21, 0x1b84: 0xbac9, 0x1b85: 0x1429, - 0x1b86: 0xbb11, 0x1b87: 0x10e1, 0x1b88: 0x1111, 0x1b89: 0x2109, 0x1b8a: 0xbc41, 0x1b8b: 0xbc41, - 0x1b8c: 0x0040, 0x1b8d: 0x0040, 0x1b8e: 0x1f41, 0x1b8f: 0x00c9, 0x1b90: 0x0069, 0x1b91: 0x0079, - 0x1b92: 0x1f51, 0x1b93: 0x1f61, 0x1b94: 0x1f71, 0x1b95: 0x1f81, 0x1b96: 0x1f91, 0x1b97: 0x1fa1, - 0x1b98: 0x1f41, 0x1b99: 0x00c9, 0x1b9a: 0x0069, 0x1b9b: 0x0079, 0x1b9c: 0x1f51, 0x1b9d: 0x1f61, - 0x1b9e: 0x1f71, 0x1b9f: 0x1f81, 0x1ba0: 0x1f91, 0x1ba1: 0x1fa1, 0x1ba2: 0x1f41, 0x1ba3: 0x00c9, - 0x1ba4: 0x0069, 0x1ba5: 0x0079, 0x1ba6: 0x1f51, 0x1ba7: 0x1f61, 0x1ba8: 0x1f71, 0x1ba9: 0x1f81, - 0x1baa: 0x1f91, 0x1bab: 0x1fa1, 0x1bac: 0x1f41, 0x1bad: 0x00c9, 0x1bae: 0x0069, 0x1baf: 0x0079, - 0x1bb0: 0x1f51, 0x1bb1: 0x1f61, 0x1bb2: 0x1f71, 0x1bb3: 0x1f81, 0x1bb4: 0x1f91, 0x1bb5: 0x1fa1, - 0x1bb6: 0x1f41, 0x1bb7: 0x00c9, 0x1bb8: 0x0069, 0x1bb9: 0x0079, 0x1bba: 0x1f51, 0x1bbb: 0x1f61, - 0x1bbc: 0x1f71, 0x1bbd: 0x1f81, 0x1bbe: 0x1f91, 0x1bbf: 0x1fa1, + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, // Block 0x6f, offset 0x1bc0 - 0x1bc0: 0xe115, 0x1bc1: 0xe115, 0x1bc2: 0xe135, 0x1bc3: 0xe135, 0x1bc4: 0xe115, 0x1bc5: 0xe115, - 0x1bc6: 0xe175, 0x1bc7: 0xe175, 0x1bc8: 0xe115, 0x1bc9: 0xe115, 0x1bca: 0xe135, 0x1bcb: 0xe135, - 0x1bcc: 0xe115, 0x1bcd: 0xe115, 0x1bce: 0xe1f5, 0x1bcf: 0xe1f5, 0x1bd0: 0xe115, 0x1bd1: 0xe115, - 0x1bd2: 0xe135, 0x1bd3: 0xe135, 0x1bd4: 0xe115, 0x1bd5: 0xe115, 0x1bd6: 0xe175, 0x1bd7: 0xe175, - 0x1bd8: 0xe115, 0x1bd9: 0xe115, 0x1bda: 0xe135, 0x1bdb: 0xe135, 0x1bdc: 0xe115, 0x1bdd: 0xe115, - 0x1bde: 0x8b05, 0x1bdf: 0x8b05, 0x1be0: 0x04b5, 0x1be1: 0x04b5, 0x1be2: 0x0208, 0x1be3: 0x0208, - 0x1be4: 0x0208, 0x1be5: 0x0208, 0x1be6: 0x0208, 0x1be7: 0x0208, 0x1be8: 0x0208, 0x1be9: 0x0208, - 0x1bea: 0x0208, 0x1beb: 0x0208, 0x1bec: 0x0208, 0x1bed: 0x0208, 0x1bee: 0x0208, 0x1bef: 0x0208, - 0x1bf0: 0x0208, 0x1bf1: 0x0208, 0x1bf2: 0x0208, 0x1bf3: 0x0208, 0x1bf4: 0x0208, 0x1bf5: 0x0208, - 0x1bf6: 0x0208, 0x1bf7: 0x0208, 0x1bf8: 0x0208, 0x1bf9: 0x0208, 0x1bfa: 0x0208, 0x1bfb: 0x0208, - 0x1bfc: 0x0208, 0x1bfd: 0x0208, 0x1bfe: 0x0208, 0x1bff: 0x0208, + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, // Block 0x70, offset 0x1c00 - 0x1c00: 0xb189, 0x1c01: 0xb1a1, 0x1c02: 0xb201, 0x1c03: 0xb249, 0x1c04: 0x0040, 0x1c05: 0xb411, - 0x1c06: 0xb291, 0x1c07: 0xb219, 0x1c08: 0xb309, 0x1c09: 0xb429, 0x1c0a: 0xb399, 0x1c0b: 0xb3b1, - 0x1c0c: 0xb3c9, 0x1c0d: 0xb3e1, 0x1c0e: 0xb2a9, 0x1c0f: 0xb339, 0x1c10: 0xb369, 0x1c11: 0xb2d9, - 0x1c12: 0xb381, 0x1c13: 0xb279, 0x1c14: 0xb2c1, 0x1c15: 0xb1d1, 0x1c16: 0xb1e9, 0x1c17: 0xb231, - 0x1c18: 0xb261, 0x1c19: 0xb2f1, 0x1c1a: 0xb321, 0x1c1b: 0xb351, 0x1c1c: 0xbc59, 0x1c1d: 0x7949, - 0x1c1e: 0xbc71, 0x1c1f: 0xbc89, 0x1c20: 0x0040, 0x1c21: 0xb1a1, 0x1c22: 0xb201, 0x1c23: 0x0040, - 0x1c24: 0xb3f9, 0x1c25: 0x0040, 0x1c26: 0x0040, 0x1c27: 0xb219, 0x1c28: 0x0040, 0x1c29: 0xb429, - 0x1c2a: 0xb399, 0x1c2b: 0xb3b1, 0x1c2c: 0xb3c9, 0x1c2d: 0xb3e1, 0x1c2e: 0xb2a9, 0x1c2f: 0xb339, - 0x1c30: 0xb369, 0x1c31: 0xb2d9, 0x1c32: 0xb381, 0x1c33: 0x0040, 0x1c34: 0xb2c1, 0x1c35: 0xb1d1, - 0x1c36: 0xb1e9, 0x1c37: 0xb231, 0x1c38: 0x0040, 0x1c39: 0xb2f1, 0x1c3a: 0x0040, 0x1c3b: 0xb351, - 0x1c3c: 0x0040, 0x1c3d: 0x0040, 0x1c3e: 0x0040, 0x1c3f: 0x0040, + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, // Block 0x71, offset 0x1c40 - 0x1c40: 0x0040, 0x1c41: 0x0040, 0x1c42: 0xb201, 0x1c43: 0x0040, 0x1c44: 0x0040, 0x1c45: 0x0040, - 0x1c46: 0x0040, 0x1c47: 0xb219, 0x1c48: 0x0040, 0x1c49: 0xb429, 0x1c4a: 0x0040, 0x1c4b: 0xb3b1, - 0x1c4c: 0x0040, 0x1c4d: 0xb3e1, 0x1c4e: 0xb2a9, 0x1c4f: 0xb339, 0x1c50: 0x0040, 0x1c51: 0xb2d9, - 0x1c52: 0xb381, 0x1c53: 0x0040, 0x1c54: 0xb2c1, 0x1c55: 0x0040, 0x1c56: 0x0040, 0x1c57: 0xb231, - 0x1c58: 0x0040, 0x1c59: 0xb2f1, 0x1c5a: 0x0040, 0x1c5b: 0xb351, 0x1c5c: 0x0040, 0x1c5d: 0x7949, - 0x1c5e: 0x0040, 0x1c5f: 0xbc89, 0x1c60: 0x0040, 0x1c61: 0xb1a1, 0x1c62: 0xb201, 0x1c63: 0x0040, - 0x1c64: 0xb3f9, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0xb219, 0x1c68: 0xb309, 0x1c69: 0xb429, - 0x1c6a: 0xb399, 0x1c6b: 0x0040, 0x1c6c: 0xb3c9, 0x1c6d: 0xb3e1, 0x1c6e: 0xb2a9, 0x1c6f: 0xb339, - 0x1c70: 0xb369, 0x1c71: 0xb2d9, 0x1c72: 0xb381, 0x1c73: 0x0040, 0x1c74: 0xb2c1, 0x1c75: 0xb1d1, - 0x1c76: 0xb1e9, 0x1c77: 0xb231, 0x1c78: 0x0040, 0x1c79: 0xb2f1, 0x1c7a: 0xb321, 0x1c7b: 0xb351, - 0x1c7c: 0xbc59, 0x1c7d: 0x0040, 0x1c7e: 0xbc71, 0x1c7f: 0x0040, + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, // Block 0x72, offset 0x1c80 - 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0xb3f9, 0x1c85: 0xb411, - 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0x0040, 0x1c8b: 0xb3b1, + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, - 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0x0040, 0x1c9d: 0x0040, - 0x1c9e: 0x0040, 0x1c9f: 0x0040, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0xb249, - 0x1ca4: 0x0040, 0x1ca5: 0xb411, 0x1ca6: 0xb291, 0x1ca7: 0xb219, 0x1ca8: 0xb309, 0x1ca9: 0xb429, - 0x1caa: 0x0040, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, - 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0xb279, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, - 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0xb261, 0x1cb9: 0xb2f1, 0x1cba: 0xb321, 0x1cbb: 0xb351, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, // Block 0x73, offset 0x1cc0 - 0x1cc0: 0x0040, 0x1cc1: 0xbca2, 0x1cc2: 0xbcba, 0x1cc3: 0xbcd2, 0x1cc4: 0xbcea, 0x1cc5: 0xbd02, - 0x1cc6: 0xbd1a, 0x1cc7: 0xbd32, 0x1cc8: 0xbd4a, 0x1cc9: 0xbd62, 0x1cca: 0xbd7a, 0x1ccb: 0x0018, - 0x1ccc: 0x0018, 0x1ccd: 0x0040, 0x1cce: 0x0040, 0x1ccf: 0x0040, 0x1cd0: 0xbd92, 0x1cd1: 0xbdb2, - 0x1cd2: 0xbdd2, 0x1cd3: 0xbdf2, 0x1cd4: 0xbe12, 0x1cd5: 0xbe32, 0x1cd6: 0xbe52, 0x1cd7: 0xbe72, - 0x1cd8: 0xbe92, 0x1cd9: 0xbeb2, 0x1cda: 0xbed2, 0x1cdb: 0xbef2, 0x1cdc: 0xbf12, 0x1cdd: 0xbf32, - 0x1cde: 0xbf52, 0x1cdf: 0xbf72, 0x1ce0: 0xbf92, 0x1ce1: 0xbfb2, 0x1ce2: 0xbfd2, 0x1ce3: 0xbff2, - 0x1ce4: 0xc012, 0x1ce5: 0xc032, 0x1ce6: 0xc052, 0x1ce7: 0xc072, 0x1ce8: 0xc092, 0x1ce9: 0xc0b2, - 0x1cea: 0xc0d1, 0x1ceb: 0x1159, 0x1cec: 0x0269, 0x1ced: 0x6671, 0x1cee: 0xc111, 0x1cef: 0x0040, - 0x1cf0: 0x0039, 0x1cf1: 0x0ee9, 0x1cf2: 0x1159, 0x1cf3: 0x0ef9, 0x1cf4: 0x0f09, 0x1cf5: 0x1199, - 0x1cf6: 0x0f31, 0x1cf7: 0x0249, 0x1cf8: 0x0f41, 0x1cf9: 0x0259, 0x1cfa: 0x0f51, 0x1cfb: 0x0359, - 0x1cfc: 0x0f61, 0x1cfd: 0x0f71, 0x1cfe: 0x00d9, 0x1cff: 0x0f99, + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, // Block 0x74, offset 0x1d00 - 0x1d00: 0x2039, 0x1d01: 0x0269, 0x1d02: 0x01d9, 0x1d03: 0x0fa9, 0x1d04: 0x0fb9, 0x1d05: 0x1089, - 0x1d06: 0x0279, 0x1d07: 0x0369, 0x1d08: 0x0289, 0x1d09: 0x13d1, 0x1d0a: 0xc129, 0x1d0b: 0x65b1, - 0x1d0c: 0xc141, 0x1d0d: 0x1441, 0x1d0e: 0xc159, 0x1d0f: 0xc179, 0x1d10: 0x0018, 0x1d11: 0x0018, - 0x1d12: 0x0018, 0x1d13: 0x0018, 0x1d14: 0x0018, 0x1d15: 0x0018, 0x1d16: 0x0018, 0x1d17: 0x0018, - 0x1d18: 0x0018, 0x1d19: 0x0018, 0x1d1a: 0x0018, 0x1d1b: 0x0018, 0x1d1c: 0x0018, 0x1d1d: 0x0018, - 0x1d1e: 0x0018, 0x1d1f: 0x0018, 0x1d20: 0x0018, 0x1d21: 0x0018, 0x1d22: 0x0018, 0x1d23: 0x0018, - 0x1d24: 0x0018, 0x1d25: 0x0018, 0x1d26: 0x0018, 0x1d27: 0x0018, 0x1d28: 0x0018, 0x1d29: 0x0018, - 0x1d2a: 0xc191, 0x1d2b: 0xc1a9, 0x1d2c: 0x0040, 0x1d2d: 0x0040, 0x1d2e: 0x0040, 0x1d2f: 0x0040, - 0x1d30: 0x0018, 0x1d31: 0x0018, 0x1d32: 0x0018, 0x1d33: 0x0018, 0x1d34: 0x0018, 0x1d35: 0x0018, - 0x1d36: 0x0018, 0x1d37: 0x0018, 0x1d38: 0x0018, 0x1d39: 0x0018, 0x1d3a: 0x0018, 0x1d3b: 0x0018, - 0x1d3c: 0x0018, 0x1d3d: 0x0018, 0x1d3e: 0x0018, 0x1d3f: 0x0018, + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, // Block 0x75, offset 0x1d40 - 0x1d40: 0xc1d9, 0x1d41: 0xc211, 0x1d42: 0xc249, 0x1d43: 0x0040, 0x1d44: 0x0040, 0x1d45: 0x0040, - 0x1d46: 0x0040, 0x1d47: 0x0040, 0x1d48: 0x0040, 0x1d49: 0x0040, 0x1d4a: 0x0040, 0x1d4b: 0x0040, - 0x1d4c: 0x0040, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xc269, 0x1d51: 0xc289, - 0x1d52: 0xc2a9, 0x1d53: 0xc2c9, 0x1d54: 0xc2e9, 0x1d55: 0xc309, 0x1d56: 0xc329, 0x1d57: 0xc349, - 0x1d58: 0xc369, 0x1d59: 0xc389, 0x1d5a: 0xc3a9, 0x1d5b: 0xc3c9, 0x1d5c: 0xc3e9, 0x1d5d: 0xc409, - 0x1d5e: 0xc429, 0x1d5f: 0xc449, 0x1d60: 0xc469, 0x1d61: 0xc489, 0x1d62: 0xc4a9, 0x1d63: 0xc4c9, - 0x1d64: 0xc4e9, 0x1d65: 0xc509, 0x1d66: 0xc529, 0x1d67: 0xc549, 0x1d68: 0xc569, 0x1d69: 0xc589, - 0x1d6a: 0xc5a9, 0x1d6b: 0xc5c9, 0x1d6c: 0xc5e9, 0x1d6d: 0xc609, 0x1d6e: 0xc629, 0x1d6f: 0xc649, - 0x1d70: 0xc669, 0x1d71: 0xc689, 0x1d72: 0xc6a9, 0x1d73: 0xc6c9, 0x1d74: 0xc6e9, 0x1d75: 0xc709, - 0x1d76: 0xc729, 0x1d77: 0xc749, 0x1d78: 0xc769, 0x1d79: 0xc789, 0x1d7a: 0xc7a9, 0x1d7b: 0xc7c9, - 0x1d7c: 0x0040, 0x1d7d: 0x0040, 0x1d7e: 0x0040, 0x1d7f: 0x0040, + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, // Block 0x76, offset 0x1d80 - 0x1d80: 0xcaf9, 0x1d81: 0xcb19, 0x1d82: 0xcb39, 0x1d83: 0x8b1d, 0x1d84: 0xcb59, 0x1d85: 0xcb79, - 0x1d86: 0xcb99, 0x1d87: 0xcbb9, 0x1d88: 0xcbd9, 0x1d89: 0xcbf9, 0x1d8a: 0xcc19, 0x1d8b: 0xcc39, - 0x1d8c: 0xcc59, 0x1d8d: 0x8b3d, 0x1d8e: 0xcc79, 0x1d8f: 0xcc99, 0x1d90: 0xccb9, 0x1d91: 0xccd9, - 0x1d92: 0x8b5d, 0x1d93: 0xccf9, 0x1d94: 0xcd19, 0x1d95: 0xc429, 0x1d96: 0x8b7d, 0x1d97: 0xcd39, - 0x1d98: 0xcd59, 0x1d99: 0xcd79, 0x1d9a: 0xcd99, 0x1d9b: 0xcdb9, 0x1d9c: 0x8b9d, 0x1d9d: 0xcdd9, - 0x1d9e: 0xcdf9, 0x1d9f: 0xce19, 0x1da0: 0xce39, 0x1da1: 0xce59, 0x1da2: 0xc789, 0x1da3: 0xce79, - 0x1da4: 0xce99, 0x1da5: 0xceb9, 0x1da6: 0xced9, 0x1da7: 0xcef9, 0x1da8: 0xcf19, 0x1da9: 0xcf39, - 0x1daa: 0xcf59, 0x1dab: 0xcf79, 0x1dac: 0xcf99, 0x1dad: 0xcfb9, 0x1dae: 0xcfd9, 0x1daf: 0xcff9, - 0x1db0: 0xd019, 0x1db1: 0xd039, 0x1db2: 0xd039, 0x1db3: 0xd039, 0x1db4: 0x8bbd, 0x1db5: 0xd059, - 0x1db6: 0xd079, 0x1db7: 0xd099, 0x1db8: 0x8bdd, 0x1db9: 0xd0b9, 0x1dba: 0xd0d9, 0x1dbb: 0xd0f9, - 0x1dbc: 0xd119, 0x1dbd: 0xd139, 0x1dbe: 0xd159, 0x1dbf: 0xd179, + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, // Block 0x77, offset 0x1dc0 - 0x1dc0: 0xd199, 0x1dc1: 0xd1b9, 0x1dc2: 0xd1d9, 0x1dc3: 0xd1f9, 0x1dc4: 0xd219, 0x1dc5: 0xd239, - 0x1dc6: 0xd239, 0x1dc7: 0xd259, 0x1dc8: 0xd279, 0x1dc9: 0xd299, 0x1dca: 0xd2b9, 0x1dcb: 0xd2d9, - 0x1dcc: 0xd2f9, 0x1dcd: 0xd319, 0x1dce: 0xd339, 0x1dcf: 0xd359, 0x1dd0: 0xd379, 0x1dd1: 0xd399, - 0x1dd2: 0xd3b9, 0x1dd3: 0xd3d9, 0x1dd4: 0xd3f9, 0x1dd5: 0xd419, 0x1dd6: 0xd439, 0x1dd7: 0xd459, - 0x1dd8: 0xd479, 0x1dd9: 0x8bfd, 0x1dda: 0xd499, 0x1ddb: 0xd4b9, 0x1ddc: 0xd4d9, 0x1ddd: 0xc309, - 0x1dde: 0xd4f9, 0x1ddf: 0xd519, 0x1de0: 0x8c1d, 0x1de1: 0x8c3d, 0x1de2: 0xd539, 0x1de3: 0xd559, - 0x1de4: 0xd579, 0x1de5: 0xd599, 0x1de6: 0xd5b9, 0x1de7: 0xd5d9, 0x1de8: 0x0040, 0x1de9: 0xd5f9, - 0x1dea: 0xd619, 0x1deb: 0xd619, 0x1dec: 0x8c5d, 0x1ded: 0xd639, 0x1dee: 0xd659, 0x1def: 0xd679, - 0x1df0: 0xd699, 0x1df1: 0x8c7d, 0x1df2: 0xd6b9, 0x1df3: 0xd6d9, 0x1df4: 0x0040, 0x1df5: 0xd6f9, - 0x1df6: 0xd719, 0x1df7: 0xd739, 0x1df8: 0xd759, 0x1df9: 0xd779, 0x1dfa: 0xd799, 0x1dfb: 0x8c9d, - 0x1dfc: 0xd7b9, 0x1dfd: 0x8cbd, 0x1dfe: 0xd7d9, 0x1dff: 0xd7f9, + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, // Block 0x78, offset 0x1e00 - 0x1e00: 0xd819, 0x1e01: 0xd839, 0x1e02: 0xd859, 0x1e03: 0xd879, 0x1e04: 0xd899, 0x1e05: 0xd8b9, - 0x1e06: 0xd8d9, 0x1e07: 0xd8f9, 0x1e08: 0xd919, 0x1e09: 0x8cdd, 0x1e0a: 0xd939, 0x1e0b: 0xd959, - 0x1e0c: 0xd979, 0x1e0d: 0xd999, 0x1e0e: 0xd9b9, 0x1e0f: 0x8cfd, 0x1e10: 0xd9d9, 0x1e11: 0x8d1d, - 0x1e12: 0x8d3d, 0x1e13: 0xd9f9, 0x1e14: 0xda19, 0x1e15: 0xda19, 0x1e16: 0xda39, 0x1e17: 0x8d5d, - 0x1e18: 0x8d7d, 0x1e19: 0xda59, 0x1e1a: 0xda79, 0x1e1b: 0xda99, 0x1e1c: 0xdab9, 0x1e1d: 0xdad9, - 0x1e1e: 0xdaf9, 0x1e1f: 0xdb19, 0x1e20: 0xdb39, 0x1e21: 0xdb59, 0x1e22: 0xdb79, 0x1e23: 0xdb99, - 0x1e24: 0x8d9d, 0x1e25: 0xdbb9, 0x1e26: 0xdbd9, 0x1e27: 0xdbf9, 0x1e28: 0xdc19, 0x1e29: 0xdbf9, - 0x1e2a: 0xdc39, 0x1e2b: 0xdc59, 0x1e2c: 0xdc79, 0x1e2d: 0xdc99, 0x1e2e: 0xdcb9, 0x1e2f: 0xdcd9, - 0x1e30: 0xdcf9, 0x1e31: 0xdd19, 0x1e32: 0xdd39, 0x1e33: 0xdd59, 0x1e34: 0xdd79, 0x1e35: 0xdd99, - 0x1e36: 0xddb9, 0x1e37: 0xddd9, 0x1e38: 0x8dbd, 0x1e39: 0xddf9, 0x1e3a: 0xde19, 0x1e3b: 0xde39, - 0x1e3c: 0xde59, 0x1e3d: 0xde79, 0x1e3e: 0x8ddd, 0x1e3f: 0xde99, + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, // Block 0x79, offset 0x1e40 - 0x1e40: 0xe599, 0x1e41: 0xe5b9, 0x1e42: 0xe5d9, 0x1e43: 0xe5f9, 0x1e44: 0xe619, 0x1e45: 0xe639, - 0x1e46: 0x8efd, 0x1e47: 0xe659, 0x1e48: 0xe679, 0x1e49: 0xe699, 0x1e4a: 0xe6b9, 0x1e4b: 0xe6d9, - 0x1e4c: 0xe6f9, 0x1e4d: 0x8f1d, 0x1e4e: 0xe719, 0x1e4f: 0xe739, 0x1e50: 0x8f3d, 0x1e51: 0x8f5d, - 0x1e52: 0xe759, 0x1e53: 0xe779, 0x1e54: 0xe799, 0x1e55: 0xe7b9, 0x1e56: 0xe7d9, 0x1e57: 0xe7f9, - 0x1e58: 0xe819, 0x1e59: 0xe839, 0x1e5a: 0xe859, 0x1e5b: 0x8f7d, 0x1e5c: 0xe879, 0x1e5d: 0x8f9d, - 0x1e5e: 0xe899, 0x1e5f: 0x0040, 0x1e60: 0xe8b9, 0x1e61: 0xe8d9, 0x1e62: 0xe8f9, 0x1e63: 0x8fbd, - 0x1e64: 0xe919, 0x1e65: 0xe939, 0x1e66: 0x8fdd, 0x1e67: 0x8ffd, 0x1e68: 0xe959, 0x1e69: 0xe979, - 0x1e6a: 0xe999, 0x1e6b: 0xe9b9, 0x1e6c: 0xe9d9, 0x1e6d: 0xe9d9, 0x1e6e: 0xe9f9, 0x1e6f: 0xea19, - 0x1e70: 0xea39, 0x1e71: 0xea59, 0x1e72: 0xea79, 0x1e73: 0xea99, 0x1e74: 0xeab9, 0x1e75: 0x901d, - 0x1e76: 0xead9, 0x1e77: 0x903d, 0x1e78: 0xeaf9, 0x1e79: 0x905d, 0x1e7a: 0xeb19, 0x1e7b: 0x907d, - 0x1e7c: 0x909d, 0x1e7d: 0x90bd, 0x1e7e: 0xeb39, 0x1e7f: 0xeb59, + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, // Block 0x7a, offset 0x1e80 - 0x1e80: 0xeb79, 0x1e81: 0x90dd, 0x1e82: 0x90fd, 0x1e83: 0x911d, 0x1e84: 0x913d, 0x1e85: 0xeb99, - 0x1e86: 0xebb9, 0x1e87: 0xebb9, 0x1e88: 0xebd9, 0x1e89: 0xebf9, 0x1e8a: 0xec19, 0x1e8b: 0xec39, - 0x1e8c: 0xec59, 0x1e8d: 0x915d, 0x1e8e: 0xec79, 0x1e8f: 0xec99, 0x1e90: 0xecb9, 0x1e91: 0xecd9, - 0x1e92: 0x917d, 0x1e93: 0xecf9, 0x1e94: 0x919d, 0x1e95: 0x91bd, 0x1e96: 0xed19, 0x1e97: 0xed39, - 0x1e98: 0xed59, 0x1e99: 0xed79, 0x1e9a: 0xed99, 0x1e9b: 0xedb9, 0x1e9c: 0x91dd, 0x1e9d: 0x91fd, - 0x1e9e: 0x921d, 0x1e9f: 0x0040, 0x1ea0: 0xedd9, 0x1ea1: 0x923d, 0x1ea2: 0xedf9, 0x1ea3: 0xee19, - 0x1ea4: 0xee39, 0x1ea5: 0x925d, 0x1ea6: 0xee59, 0x1ea7: 0xee79, 0x1ea8: 0xee99, 0x1ea9: 0xeeb9, - 0x1eaa: 0xeed9, 0x1eab: 0x927d, 0x1eac: 0xeef9, 0x1ead: 0xef19, 0x1eae: 0xef39, 0x1eaf: 0xef59, - 0x1eb0: 0xef79, 0x1eb1: 0xef99, 0x1eb2: 0x929d, 0x1eb3: 0x92bd, 0x1eb4: 0xefb9, 0x1eb5: 0x92dd, - 0x1eb6: 0xefd9, 0x1eb7: 0x92fd, 0x1eb8: 0xeff9, 0x1eb9: 0xf019, 0x1eba: 0xf039, 0x1ebb: 0x931d, - 0x1ebc: 0x933d, 0x1ebd: 0xf059, 0x1ebe: 0x935d, 0x1ebf: 0xf079, + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, // Block 0x7b, offset 0x1ec0 - 0x1ec0: 0xf6b9, 0x1ec1: 0xf6d9, 0x1ec2: 0xf6f9, 0x1ec3: 0xf719, 0x1ec4: 0xf739, 0x1ec5: 0x951d, - 0x1ec6: 0xf759, 0x1ec7: 0xf779, 0x1ec8: 0xf799, 0x1ec9: 0xf7b9, 0x1eca: 0xf7d9, 0x1ecb: 0x953d, - 0x1ecc: 0x955d, 0x1ecd: 0xf7f9, 0x1ece: 0xf819, 0x1ecf: 0xf839, 0x1ed0: 0xf859, 0x1ed1: 0xf879, - 0x1ed2: 0xf899, 0x1ed3: 0x957d, 0x1ed4: 0xf8b9, 0x1ed5: 0xf8d9, 0x1ed6: 0xf8f9, 0x1ed7: 0xf919, - 0x1ed8: 0x959d, 0x1ed9: 0x95bd, 0x1eda: 0xf939, 0x1edb: 0xf959, 0x1edc: 0xf979, 0x1edd: 0x95dd, - 0x1ede: 0xf999, 0x1edf: 0xf9b9, 0x1ee0: 0x6815, 0x1ee1: 0x95fd, 0x1ee2: 0xf9d9, 0x1ee3: 0xf9f9, - 0x1ee4: 0xfa19, 0x1ee5: 0x961d, 0x1ee6: 0xfa39, 0x1ee7: 0xfa59, 0x1ee8: 0xfa79, 0x1ee9: 0xfa99, - 0x1eea: 0xfab9, 0x1eeb: 0xfad9, 0x1eec: 0xfaf9, 0x1eed: 0x963d, 0x1eee: 0xfb19, 0x1eef: 0xfb39, - 0x1ef0: 0xfb59, 0x1ef1: 0x965d, 0x1ef2: 0xfb79, 0x1ef3: 0xfb99, 0x1ef4: 0xfbb9, 0x1ef5: 0xfbd9, - 0x1ef6: 0x7b35, 0x1ef7: 0x967d, 0x1ef8: 0xfbf9, 0x1ef9: 0xfc19, 0x1efa: 0xfc39, 0x1efb: 0x969d, - 0x1efc: 0xfc59, 0x1efd: 0x96bd, 0x1efe: 0xfc79, 0x1eff: 0xfc79, + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, // Block 0x7c, offset 0x1f00 - 0x1f00: 0xfc99, 0x1f01: 0x96dd, 0x1f02: 0xfcb9, 0x1f03: 0xfcd9, 0x1f04: 0xfcf9, 0x1f05: 0xfd19, - 0x1f06: 0xfd39, 0x1f07: 0xfd59, 0x1f08: 0xfd79, 0x1f09: 0x96fd, 0x1f0a: 0xfd99, 0x1f0b: 0xfdb9, - 0x1f0c: 0xfdd9, 0x1f0d: 0xfdf9, 0x1f0e: 0xfe19, 0x1f0f: 0xfe39, 0x1f10: 0x971d, 0x1f11: 0xfe59, - 0x1f12: 0x973d, 0x1f13: 0x975d, 0x1f14: 0x977d, 0x1f15: 0xfe79, 0x1f16: 0xfe99, 0x1f17: 0xfeb9, - 0x1f18: 0xfed9, 0x1f19: 0xfef9, 0x1f1a: 0xff19, 0x1f1b: 0xff39, 0x1f1c: 0xff59, 0x1f1d: 0x979d, - 0x1f1e: 0x0040, 0x1f1f: 0x0040, 0x1f20: 0x0040, 0x1f21: 0x0040, 0x1f22: 0x0040, 0x1f23: 0x0040, - 0x1f24: 0x0040, 0x1f25: 0x0040, 0x1f26: 0x0040, 0x1f27: 0x0040, 0x1f28: 0x0040, 0x1f29: 0x0040, - 0x1f2a: 0x0040, 0x1f2b: 0x0040, 0x1f2c: 0x0040, 0x1f2d: 0x0040, 0x1f2e: 0x0040, 0x1f2f: 0x0040, - 0x1f30: 0x0040, 0x1f31: 0x0040, 0x1f32: 0x0040, 0x1f33: 0x0040, 0x1f34: 0x0040, 0x1f35: 0x0040, - 0x1f36: 0x0040, 0x1f37: 0x0040, 0x1f38: 0x0040, 0x1f39: 0x0040, 0x1f3a: 0x0040, 0x1f3b: 0x0040, - 0x1f3c: 0x0040, 0x1f3d: 0x0040, 0x1f3e: 0x0040, 0x1f3f: 0x0040, + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, } -// idnaIndex: 35 blocks, 2240 entries, 4480 bytes +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes // Block 0 is the zero block. -var idnaIndex = [2240]uint16{ +var idnaIndex = [2304]uint16{ // Block 0x0, offset 0x0 // Block 0x1, offset 0x40 // Block 0x2, offset 0x80 // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x7b, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, - 0xc8: 0x06, 0xc9: 0x7c, 0xca: 0x7d, 0xcb: 0x07, 0xcc: 0x7e, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, - 0xd0: 0x7f, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x80, 0xd6: 0x81, 0xd7: 0x82, - 0xd8: 0x0f, 0xd9: 0x83, 0xda: 0x84, 0xdb: 0x10, 0xdc: 0x11, 0xdd: 0x85, 0xde: 0x86, 0xdf: 0x87, + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, - 0xf0: 0x1c, 0xf1: 0x1d, 0xf2: 0x1d, 0xf3: 0x1f, 0xf4: 0x20, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, // Block 0x4, offset 0x100 - 0x120: 0x88, 0x121: 0x89, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x12, 0x126: 0x13, 0x127: 0x14, - 0x128: 0x15, 0x129: 0x16, 0x12a: 0x17, 0x12b: 0x18, 0x12c: 0x19, 0x12d: 0x1a, 0x12e: 0x1b, 0x12f: 0x8d, - 0x130: 0x8e, 0x131: 0x1c, 0x132: 0x1d, 0x133: 0x1e, 0x134: 0x8f, 0x135: 0x1f, 0x136: 0x90, 0x137: 0x91, - 0x138: 0x92, 0x139: 0x93, 0x13a: 0x20, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x21, 0x13e: 0x22, 0x13f: 0x96, + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, // Block 0x5, offset 0x140 - 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9b, 0x147: 0x9b, - 0x148: 0x9d, 0x149: 0x9e, 0x14a: 0x9f, 0x14b: 0xa0, 0x14c: 0xa1, 0x14d: 0xa2, 0x14e: 0xa3, 0x14f: 0xa4, - 0x150: 0xa5, 0x151: 0x9d, 0x152: 0x9d, 0x153: 0x9d, 0x154: 0x9d, 0x155: 0x9d, 0x156: 0x9d, 0x157: 0x9d, - 0x158: 0x9d, 0x159: 0xa6, 0x15a: 0xa7, 0x15b: 0xa8, 0x15c: 0xa9, 0x15d: 0xaa, 0x15e: 0xab, 0x15f: 0xac, - 0x160: 0xad, 0x161: 0xae, 0x162: 0xaf, 0x163: 0xb0, 0x164: 0xb1, 0x165: 0xb2, 0x166: 0xb3, 0x167: 0xb4, - 0x168: 0xb5, 0x169: 0xb6, 0x16a: 0xb7, 0x16b: 0xb8, 0x16c: 0xb9, 0x16d: 0xba, 0x16e: 0xbb, 0x16f: 0xbc, - 0x170: 0xbd, 0x171: 0xbe, 0x172: 0xbf, 0x173: 0xc0, 0x174: 0x23, 0x175: 0x24, 0x176: 0x25, 0x177: 0xc1, - 0x178: 0x26, 0x179: 0x26, 0x17a: 0x27, 0x17b: 0x26, 0x17c: 0xc2, 0x17d: 0x28, 0x17e: 0x29, 0x17f: 0x2a, + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, // Block 0x6, offset 0x180 - 0x180: 0x2b, 0x181: 0x2c, 0x182: 0x2d, 0x183: 0xc3, 0x184: 0x2e, 0x185: 0x2f, 0x186: 0xc4, 0x187: 0x9b, - 0x188: 0xc5, 0x189: 0xc6, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc7, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0xc8, - 0x190: 0xc9, 0x191: 0x30, 0x192: 0x31, 0x193: 0x32, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, - 0x1a8: 0xca, 0x1a9: 0xcb, 0x1aa: 0x9b, 0x1ab: 0xcc, 0x1ac: 0x9b, 0x1ad: 0xcd, 0x1ae: 0xce, 0x1af: 0xcf, - 0x1b0: 0xd0, 0x1b1: 0x33, 0x1b2: 0x26, 0x1b3: 0x34, 0x1b4: 0xd1, 0x1b5: 0xd2, 0x1b6: 0xd3, 0x1b7: 0xd4, - 0x1b8: 0xd5, 0x1b9: 0xd6, 0x1ba: 0xd7, 0x1bb: 0xd8, 0x1bc: 0xd9, 0x1bd: 0xda, 0x1be: 0xdb, 0x1bf: 0x35, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, // Block 0x7, offset 0x1c0 - 0x1c0: 0x36, 0x1c1: 0xdc, 0x1c2: 0xdd, 0x1c3: 0xde, 0x1c4: 0xdf, 0x1c5: 0x37, 0x1c6: 0x38, 0x1c7: 0xe0, - 0x1c8: 0xe1, 0x1c9: 0x39, 0x1ca: 0x3a, 0x1cb: 0x3b, 0x1cc: 0x3c, 0x1cd: 0x3d, 0x1ce: 0x3e, 0x1cf: 0x3f, - 0x1d0: 0x9d, 0x1d1: 0x9d, 0x1d2: 0x9d, 0x1d3: 0x9d, 0x1d4: 0x9d, 0x1d5: 0x9d, 0x1d6: 0x9d, 0x1d7: 0x9d, - 0x1d8: 0x9d, 0x1d9: 0x9d, 0x1da: 0x9d, 0x1db: 0x9d, 0x1dc: 0x9d, 0x1dd: 0x9d, 0x1de: 0x9d, 0x1df: 0x9d, - 0x1e0: 0x9d, 0x1e1: 0x9d, 0x1e2: 0x9d, 0x1e3: 0x9d, 0x1e4: 0x9d, 0x1e5: 0x9d, 0x1e6: 0x9d, 0x1e7: 0x9d, - 0x1e8: 0x9d, 0x1e9: 0x9d, 0x1ea: 0x9d, 0x1eb: 0x9d, 0x1ec: 0x9d, 0x1ed: 0x9d, 0x1ee: 0x9d, 0x1ef: 0x9d, - 0x1f0: 0x9d, 0x1f1: 0x9d, 0x1f2: 0x9d, 0x1f3: 0x9d, 0x1f4: 0x9d, 0x1f5: 0x9d, 0x1f6: 0x9d, 0x1f7: 0x9d, - 0x1f8: 0x9d, 0x1f9: 0x9d, 0x1fa: 0x9d, 0x1fb: 0x9d, 0x1fc: 0x9d, 0x1fd: 0x9d, 0x1fe: 0x9d, 0x1ff: 0x9d, + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, // Block 0x8, offset 0x200 - 0x200: 0x9d, 0x201: 0x9d, 0x202: 0x9d, 0x203: 0x9d, 0x204: 0x9d, 0x205: 0x9d, 0x206: 0x9d, 0x207: 0x9d, - 0x208: 0x9d, 0x209: 0x9d, 0x20a: 0x9d, 0x20b: 0x9d, 0x20c: 0x9d, 0x20d: 0x9d, 0x20e: 0x9d, 0x20f: 0x9d, - 0x210: 0x9d, 0x211: 0x9d, 0x212: 0x9d, 0x213: 0x9d, 0x214: 0x9d, 0x215: 0x9d, 0x216: 0x9d, 0x217: 0x9d, - 0x218: 0x9d, 0x219: 0x9d, 0x21a: 0x9d, 0x21b: 0x9d, 0x21c: 0x9d, 0x21d: 0x9d, 0x21e: 0x9d, 0x21f: 0x9d, - 0x220: 0x9d, 0x221: 0x9d, 0x222: 0x9d, 0x223: 0x9d, 0x224: 0x9d, 0x225: 0x9d, 0x226: 0x9d, 0x227: 0x9d, - 0x228: 0x9d, 0x229: 0x9d, 0x22a: 0x9d, 0x22b: 0x9d, 0x22c: 0x9d, 0x22d: 0x9d, 0x22e: 0x9d, 0x22f: 0x9d, - 0x230: 0x9d, 0x231: 0x9d, 0x232: 0x9d, 0x233: 0x9d, 0x234: 0x9d, 0x235: 0x9d, 0x236: 0xb0, 0x237: 0x9b, - 0x238: 0x9d, 0x239: 0x9d, 0x23a: 0x9d, 0x23b: 0x9d, 0x23c: 0x9d, 0x23d: 0x9d, 0x23e: 0x9d, 0x23f: 0x9d, + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, // Block 0x9, offset 0x240 - 0x240: 0x9d, 0x241: 0x9d, 0x242: 0x9d, 0x243: 0x9d, 0x244: 0x9d, 0x245: 0x9d, 0x246: 0x9d, 0x247: 0x9d, - 0x248: 0x9d, 0x249: 0x9d, 0x24a: 0x9d, 0x24b: 0x9d, 0x24c: 0x9d, 0x24d: 0x9d, 0x24e: 0x9d, 0x24f: 0x9d, - 0x250: 0x9d, 0x251: 0x9d, 0x252: 0x9d, 0x253: 0x9d, 0x254: 0x9d, 0x255: 0x9d, 0x256: 0x9d, 0x257: 0x9d, - 0x258: 0x9d, 0x259: 0x9d, 0x25a: 0x9d, 0x25b: 0x9d, 0x25c: 0x9d, 0x25d: 0x9d, 0x25e: 0x9d, 0x25f: 0x9d, - 0x260: 0x9d, 0x261: 0x9d, 0x262: 0x9d, 0x263: 0x9d, 0x264: 0x9d, 0x265: 0x9d, 0x266: 0x9d, 0x267: 0x9d, - 0x268: 0x9d, 0x269: 0x9d, 0x26a: 0x9d, 0x26b: 0x9d, 0x26c: 0x9d, 0x26d: 0x9d, 0x26e: 0x9d, 0x26f: 0x9d, - 0x270: 0x9d, 0x271: 0x9d, 0x272: 0x9d, 0x273: 0x9d, 0x274: 0x9d, 0x275: 0x9d, 0x276: 0x9d, 0x277: 0x9d, - 0x278: 0x9d, 0x279: 0x9d, 0x27a: 0x9d, 0x27b: 0x9d, 0x27c: 0x9d, 0x27d: 0x9d, 0x27e: 0x9d, 0x27f: 0x9d, + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, // Block 0xa, offset 0x280 - 0x280: 0x9d, 0x281: 0x9d, 0x282: 0x9d, 0x283: 0x9d, 0x284: 0x9d, 0x285: 0x9d, 0x286: 0x9d, 0x287: 0x9d, - 0x288: 0x9d, 0x289: 0x9d, 0x28a: 0x9d, 0x28b: 0x9d, 0x28c: 0x9d, 0x28d: 0x9d, 0x28e: 0x9d, 0x28f: 0x9d, - 0x290: 0x9d, 0x291: 0x9d, 0x292: 0x9d, 0x293: 0x9d, 0x294: 0x9d, 0x295: 0x9d, 0x296: 0x9d, 0x297: 0x9d, - 0x298: 0x9d, 0x299: 0x9d, 0x29a: 0x9d, 0x29b: 0x9d, 0x29c: 0x9d, 0x29d: 0x9d, 0x29e: 0x9d, 0x29f: 0x9d, - 0x2a0: 0x9d, 0x2a1: 0x9d, 0x2a2: 0x9d, 0x2a3: 0x9d, 0x2a4: 0x9d, 0x2a5: 0x9d, 0x2a6: 0x9d, 0x2a7: 0x9d, - 0x2a8: 0x9d, 0x2a9: 0x9d, 0x2aa: 0x9d, 0x2ab: 0x9d, 0x2ac: 0x9d, 0x2ad: 0x9d, 0x2ae: 0x9d, 0x2af: 0x9d, - 0x2b0: 0x9d, 0x2b1: 0x9d, 0x2b2: 0x9d, 0x2b3: 0x9d, 0x2b4: 0x9d, 0x2b5: 0x9d, 0x2b6: 0x9d, 0x2b7: 0x9d, - 0x2b8: 0x9d, 0x2b9: 0x9d, 0x2ba: 0x9d, 0x2bb: 0x9d, 0x2bc: 0x9d, 0x2bd: 0x9d, 0x2be: 0x9d, 0x2bf: 0xe2, + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, // Block 0xb, offset 0x2c0 - 0x2c0: 0x9d, 0x2c1: 0x9d, 0x2c2: 0x9d, 0x2c3: 0x9d, 0x2c4: 0x9d, 0x2c5: 0x9d, 0x2c6: 0x9d, 0x2c7: 0x9d, - 0x2c8: 0x9d, 0x2c9: 0x9d, 0x2ca: 0x9d, 0x2cb: 0x9d, 0x2cc: 0x9d, 0x2cd: 0x9d, 0x2ce: 0x9d, 0x2cf: 0x9d, - 0x2d0: 0x9d, 0x2d1: 0x9d, 0x2d2: 0xe3, 0x2d3: 0xe4, 0x2d4: 0x9d, 0x2d5: 0x9d, 0x2d6: 0x9d, 0x2d7: 0x9d, - 0x2d8: 0xe5, 0x2d9: 0x40, 0x2da: 0x41, 0x2db: 0xe6, 0x2dc: 0x42, 0x2dd: 0x43, 0x2de: 0x44, 0x2df: 0xe7, - 0x2e0: 0xe8, 0x2e1: 0xe9, 0x2e2: 0xea, 0x2e3: 0xeb, 0x2e4: 0xec, 0x2e5: 0xed, 0x2e6: 0xee, 0x2e7: 0xef, - 0x2e8: 0xf0, 0x2e9: 0xf1, 0x2ea: 0xf2, 0x2eb: 0xf3, 0x2ec: 0xf4, 0x2ed: 0xf5, 0x2ee: 0xf6, 0x2ef: 0xf7, - 0x2f0: 0x9d, 0x2f1: 0x9d, 0x2f2: 0x9d, 0x2f3: 0x9d, 0x2f4: 0x9d, 0x2f5: 0x9d, 0x2f6: 0x9d, 0x2f7: 0x9d, - 0x2f8: 0x9d, 0x2f9: 0x9d, 0x2fa: 0x9d, 0x2fb: 0x9d, 0x2fc: 0x9d, 0x2fd: 0x9d, 0x2fe: 0x9d, 0x2ff: 0x9d, + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, // Block 0xc, offset 0x300 - 0x300: 0x9d, 0x301: 0x9d, 0x302: 0x9d, 0x303: 0x9d, 0x304: 0x9d, 0x305: 0x9d, 0x306: 0x9d, 0x307: 0x9d, - 0x308: 0x9d, 0x309: 0x9d, 0x30a: 0x9d, 0x30b: 0x9d, 0x30c: 0x9d, 0x30d: 0x9d, 0x30e: 0x9d, 0x30f: 0x9d, - 0x310: 0x9d, 0x311: 0x9d, 0x312: 0x9d, 0x313: 0x9d, 0x314: 0x9d, 0x315: 0x9d, 0x316: 0x9d, 0x317: 0x9d, - 0x318: 0x9d, 0x319: 0x9d, 0x31a: 0x9d, 0x31b: 0x9d, 0x31c: 0x9d, 0x31d: 0x9d, 0x31e: 0xf8, 0x31f: 0xf9, + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, // Block 0xd, offset 0x340 - 0x340: 0xb8, 0x341: 0xb8, 0x342: 0xb8, 0x343: 0xb8, 0x344: 0xb8, 0x345: 0xb8, 0x346: 0xb8, 0x347: 0xb8, - 0x348: 0xb8, 0x349: 0xb8, 0x34a: 0xb8, 0x34b: 0xb8, 0x34c: 0xb8, 0x34d: 0xb8, 0x34e: 0xb8, 0x34f: 0xb8, - 0x350: 0xb8, 0x351: 0xb8, 0x352: 0xb8, 0x353: 0xb8, 0x354: 0xb8, 0x355: 0xb8, 0x356: 0xb8, 0x357: 0xb8, - 0x358: 0xb8, 0x359: 0xb8, 0x35a: 0xb8, 0x35b: 0xb8, 0x35c: 0xb8, 0x35d: 0xb8, 0x35e: 0xb8, 0x35f: 0xb8, - 0x360: 0xb8, 0x361: 0xb8, 0x362: 0xb8, 0x363: 0xb8, 0x364: 0xb8, 0x365: 0xb8, 0x366: 0xb8, 0x367: 0xb8, - 0x368: 0xb8, 0x369: 0xb8, 0x36a: 0xb8, 0x36b: 0xb8, 0x36c: 0xb8, 0x36d: 0xb8, 0x36e: 0xb8, 0x36f: 0xb8, - 0x370: 0xb8, 0x371: 0xb8, 0x372: 0xb8, 0x373: 0xb8, 0x374: 0xb8, 0x375: 0xb8, 0x376: 0xb8, 0x377: 0xb8, - 0x378: 0xb8, 0x379: 0xb8, 0x37a: 0xb8, 0x37b: 0xb8, 0x37c: 0xb8, 0x37d: 0xb8, 0x37e: 0xb8, 0x37f: 0xb8, + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, // Block 0xe, offset 0x380 - 0x380: 0xb8, 0x381: 0xb8, 0x382: 0xb8, 0x383: 0xb8, 0x384: 0xb8, 0x385: 0xb8, 0x386: 0xb8, 0x387: 0xb8, - 0x388: 0xb8, 0x389: 0xb8, 0x38a: 0xb8, 0x38b: 0xb8, 0x38c: 0xb8, 0x38d: 0xb8, 0x38e: 0xb8, 0x38f: 0xb8, - 0x390: 0xb8, 0x391: 0xb8, 0x392: 0xb8, 0x393: 0xb8, 0x394: 0xb8, 0x395: 0xb8, 0x396: 0xb8, 0x397: 0xb8, - 0x398: 0xb8, 0x399: 0xb8, 0x39a: 0xb8, 0x39b: 0xb8, 0x39c: 0xb8, 0x39d: 0xb8, 0x39e: 0xb8, 0x39f: 0xb8, - 0x3a0: 0xb8, 0x3a1: 0xb8, 0x3a2: 0xb8, 0x3a3: 0xb8, 0x3a4: 0xfa, 0x3a5: 0xfb, 0x3a6: 0xfc, 0x3a7: 0xfd, - 0x3a8: 0x45, 0x3a9: 0xfe, 0x3aa: 0xff, 0x3ab: 0x46, 0x3ac: 0x47, 0x3ad: 0x48, 0x3ae: 0x49, 0x3af: 0x4a, - 0x3b0: 0x100, 0x3b1: 0x4b, 0x3b2: 0x4c, 0x3b3: 0x4d, 0x3b4: 0x4e, 0x3b5: 0x4f, 0x3b6: 0x101, 0x3b7: 0x50, - 0x3b8: 0x51, 0x3b9: 0x52, 0x3ba: 0x53, 0x3bb: 0x54, 0x3bc: 0x55, 0x3bd: 0x56, 0x3be: 0x57, 0x3bf: 0x58, + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, // Block 0xf, offset 0x3c0 - 0x3c0: 0x102, 0x3c1: 0x103, 0x3c2: 0x9d, 0x3c3: 0x104, 0x3c4: 0x105, 0x3c5: 0x9b, 0x3c6: 0x106, 0x3c7: 0x107, - 0x3c8: 0xb8, 0x3c9: 0xb8, 0x3ca: 0x108, 0x3cb: 0x109, 0x3cc: 0x10a, 0x3cd: 0x10b, 0x3ce: 0x10c, 0x3cf: 0x10d, - 0x3d0: 0x10e, 0x3d1: 0x9d, 0x3d2: 0x10f, 0x3d3: 0x110, 0x3d4: 0x111, 0x3d5: 0x112, 0x3d6: 0xb8, 0x3d7: 0xb8, - 0x3d8: 0x9d, 0x3d9: 0x9d, 0x3da: 0x9d, 0x3db: 0x9d, 0x3dc: 0x113, 0x3dd: 0x114, 0x3de: 0xb8, 0x3df: 0xb8, - 0x3e0: 0x115, 0x3e1: 0x116, 0x3e2: 0x117, 0x3e3: 0x118, 0x3e4: 0x119, 0x3e5: 0xb8, 0x3e6: 0x11a, 0x3e7: 0x11b, - 0x3e8: 0x11c, 0x3e9: 0x11d, 0x3ea: 0x11e, 0x3eb: 0x59, 0x3ec: 0x11f, 0x3ed: 0x120, 0x3ee: 0x5a, 0x3ef: 0xb8, - 0x3f0: 0x9d, 0x3f1: 0x121, 0x3f2: 0x122, 0x3f3: 0x123, 0x3f4: 0xb8, 0x3f5: 0xb8, 0x3f6: 0xb8, 0x3f7: 0xb8, - 0x3f8: 0xb8, 0x3f9: 0x124, 0x3fa: 0xb8, 0x3fb: 0xb8, 0x3fc: 0xb8, 0x3fd: 0xb8, 0x3fe: 0xb8, 0x3ff: 0xb8, + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, // Block 0x10, offset 0x400 - 0x400: 0x125, 0x401: 0x126, 0x402: 0x127, 0x403: 0x128, 0x404: 0x129, 0x405: 0x12a, 0x406: 0x12b, 0x407: 0x12c, - 0x408: 0x12d, 0x409: 0xb8, 0x40a: 0x12e, 0x40b: 0x12f, 0x40c: 0x5b, 0x40d: 0x5c, 0x40e: 0xb8, 0x40f: 0xb8, - 0x410: 0x130, 0x411: 0x131, 0x412: 0x132, 0x413: 0x133, 0x414: 0xb8, 0x415: 0xb8, 0x416: 0x134, 0x417: 0x135, - 0x418: 0x136, 0x419: 0x137, 0x41a: 0x138, 0x41b: 0x139, 0x41c: 0x13a, 0x41d: 0xb8, 0x41e: 0xb8, 0x41f: 0xb8, - 0x420: 0xb8, 0x421: 0xb8, 0x422: 0x13b, 0x423: 0x13c, 0x424: 0xb8, 0x425: 0xb8, 0x426: 0xb8, 0x427: 0xb8, - 0x428: 0xb8, 0x429: 0xb8, 0x42a: 0xb8, 0x42b: 0x13d, 0x42c: 0xb8, 0x42d: 0xb8, 0x42e: 0xb8, 0x42f: 0xb8, - 0x430: 0x13e, 0x431: 0x13f, 0x432: 0x140, 0x433: 0xb8, 0x434: 0xb8, 0x435: 0xb8, 0x436: 0xb8, 0x437: 0xb8, - 0x438: 0xb8, 0x439: 0xb8, 0x43a: 0xb8, 0x43b: 0xb8, 0x43c: 0xb8, 0x43d: 0xb8, 0x43e: 0xb8, 0x43f: 0xb8, + 0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e, + 0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137, + 0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, // Block 0x11, offset 0x440 - 0x440: 0x9d, 0x441: 0x9d, 0x442: 0x9d, 0x443: 0x9d, 0x444: 0x9d, 0x445: 0x9d, 0x446: 0x9d, 0x447: 0x9d, - 0x448: 0x9d, 0x449: 0x9d, 0x44a: 0x9d, 0x44b: 0x9d, 0x44c: 0x9d, 0x44d: 0x9d, 0x44e: 0x141, 0x44f: 0xb8, - 0x450: 0x9b, 0x451: 0x142, 0x452: 0x9d, 0x453: 0x9d, 0x454: 0x9d, 0x455: 0x143, 0x456: 0xb8, 0x457: 0xb8, - 0x458: 0xb8, 0x459: 0xb8, 0x45a: 0xb8, 0x45b: 0xb8, 0x45c: 0xb8, 0x45d: 0xb8, 0x45e: 0xb8, 0x45f: 0xb8, - 0x460: 0xb8, 0x461: 0xb8, 0x462: 0xb8, 0x463: 0xb8, 0x464: 0xb8, 0x465: 0xb8, 0x466: 0xb8, 0x467: 0xb8, - 0x468: 0xb8, 0x469: 0xb8, 0x46a: 0xb8, 0x46b: 0xb8, 0x46c: 0xb8, 0x46d: 0xb8, 0x46e: 0xb8, 0x46f: 0xb8, - 0x470: 0xb8, 0x471: 0xb8, 0x472: 0xb8, 0x473: 0xb8, 0x474: 0xb8, 0x475: 0xb8, 0x476: 0xb8, 0x477: 0xb8, - 0x478: 0xb8, 0x479: 0xb8, 0x47a: 0xb8, 0x47b: 0xb8, 0x47c: 0xb8, 0x47d: 0xb8, 0x47e: 0xb8, 0x47f: 0xb8, + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, // Block 0x12, offset 0x480 - 0x480: 0x9d, 0x481: 0x9d, 0x482: 0x9d, 0x483: 0x9d, 0x484: 0x9d, 0x485: 0x9d, 0x486: 0x9d, 0x487: 0x9d, - 0x488: 0x9d, 0x489: 0x9d, 0x48a: 0x9d, 0x48b: 0x9d, 0x48c: 0x9d, 0x48d: 0x9d, 0x48e: 0x9d, 0x48f: 0x9d, - 0x490: 0x144, 0x491: 0xb8, 0x492: 0xb8, 0x493: 0xb8, 0x494: 0xb8, 0x495: 0xb8, 0x496: 0xb8, 0x497: 0xb8, - 0x498: 0xb8, 0x499: 0xb8, 0x49a: 0xb8, 0x49b: 0xb8, 0x49c: 0xb8, 0x49d: 0xb8, 0x49e: 0xb8, 0x49f: 0xb8, - 0x4a0: 0xb8, 0x4a1: 0xb8, 0x4a2: 0xb8, 0x4a3: 0xb8, 0x4a4: 0xb8, 0x4a5: 0xb8, 0x4a6: 0xb8, 0x4a7: 0xb8, - 0x4a8: 0xb8, 0x4a9: 0xb8, 0x4aa: 0xb8, 0x4ab: 0xb8, 0x4ac: 0xb8, 0x4ad: 0xb8, 0x4ae: 0xb8, 0x4af: 0xb8, - 0x4b0: 0xb8, 0x4b1: 0xb8, 0x4b2: 0xb8, 0x4b3: 0xb8, 0x4b4: 0xb8, 0x4b5: 0xb8, 0x4b6: 0xb8, 0x4b7: 0xb8, - 0x4b8: 0xb8, 0x4b9: 0xb8, 0x4ba: 0xb8, 0x4bb: 0xb8, 0x4bc: 0xb8, 0x4bd: 0xb8, 0x4be: 0xb8, 0x4bf: 0xb8, + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, // Block 0x13, offset 0x4c0 - 0x4c0: 0xb8, 0x4c1: 0xb8, 0x4c2: 0xb8, 0x4c3: 0xb8, 0x4c4: 0xb8, 0x4c5: 0xb8, 0x4c6: 0xb8, 0x4c7: 0xb8, - 0x4c8: 0xb8, 0x4c9: 0xb8, 0x4ca: 0xb8, 0x4cb: 0xb8, 0x4cc: 0xb8, 0x4cd: 0xb8, 0x4ce: 0xb8, 0x4cf: 0xb8, - 0x4d0: 0x9d, 0x4d1: 0x9d, 0x4d2: 0x9d, 0x4d3: 0x9d, 0x4d4: 0x9d, 0x4d5: 0x9d, 0x4d6: 0x9d, 0x4d7: 0x9d, - 0x4d8: 0x9d, 0x4d9: 0x145, 0x4da: 0xb8, 0x4db: 0xb8, 0x4dc: 0xb8, 0x4dd: 0xb8, 0x4de: 0xb8, 0x4df: 0xb8, - 0x4e0: 0xb8, 0x4e1: 0xb8, 0x4e2: 0xb8, 0x4e3: 0xb8, 0x4e4: 0xb8, 0x4e5: 0xb8, 0x4e6: 0xb8, 0x4e7: 0xb8, - 0x4e8: 0xb8, 0x4e9: 0xb8, 0x4ea: 0xb8, 0x4eb: 0xb8, 0x4ec: 0xb8, 0x4ed: 0xb8, 0x4ee: 0xb8, 0x4ef: 0xb8, - 0x4f0: 0xb8, 0x4f1: 0xb8, 0x4f2: 0xb8, 0x4f3: 0xb8, 0x4f4: 0xb8, 0x4f5: 0xb8, 0x4f6: 0xb8, 0x4f7: 0xb8, - 0x4f8: 0xb8, 0x4f9: 0xb8, 0x4fa: 0xb8, 0x4fb: 0xb8, 0x4fc: 0xb8, 0x4fd: 0xb8, 0x4fe: 0xb8, 0x4ff: 0xb8, + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, // Block 0x14, offset 0x500 - 0x500: 0xb8, 0x501: 0xb8, 0x502: 0xb8, 0x503: 0xb8, 0x504: 0xb8, 0x505: 0xb8, 0x506: 0xb8, 0x507: 0xb8, - 0x508: 0xb8, 0x509: 0xb8, 0x50a: 0xb8, 0x50b: 0xb8, 0x50c: 0xb8, 0x50d: 0xb8, 0x50e: 0xb8, 0x50f: 0xb8, - 0x510: 0xb8, 0x511: 0xb8, 0x512: 0xb8, 0x513: 0xb8, 0x514: 0xb8, 0x515: 0xb8, 0x516: 0xb8, 0x517: 0xb8, - 0x518: 0xb8, 0x519: 0xb8, 0x51a: 0xb8, 0x51b: 0xb8, 0x51c: 0xb8, 0x51d: 0xb8, 0x51e: 0xb8, 0x51f: 0xb8, - 0x520: 0x9d, 0x521: 0x9d, 0x522: 0x9d, 0x523: 0x9d, 0x524: 0x9d, 0x525: 0x9d, 0x526: 0x9d, 0x527: 0x9d, - 0x528: 0x13d, 0x529: 0x146, 0x52a: 0xb8, 0x52b: 0x147, 0x52c: 0x148, 0x52d: 0x149, 0x52e: 0x14a, 0x52f: 0xb8, - 0x530: 0xb8, 0x531: 0xb8, 0x532: 0xb8, 0x533: 0xb8, 0x534: 0xb8, 0x535: 0xb8, 0x536: 0xb8, 0x537: 0xb8, - 0x538: 0xb8, 0x539: 0xb8, 0x53a: 0xb8, 0x53b: 0xb8, 0x53c: 0x9d, 0x53d: 0x14b, 0x53e: 0x14c, 0x53f: 0x14d, + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154, // Block 0x15, offset 0x540 - 0x540: 0x9d, 0x541: 0x9d, 0x542: 0x9d, 0x543: 0x9d, 0x544: 0x9d, 0x545: 0x9d, 0x546: 0x9d, 0x547: 0x9d, - 0x548: 0x9d, 0x549: 0x9d, 0x54a: 0x9d, 0x54b: 0x9d, 0x54c: 0x9d, 0x54d: 0x9d, 0x54e: 0x9d, 0x54f: 0x9d, - 0x550: 0x9d, 0x551: 0x9d, 0x552: 0x9d, 0x553: 0x9d, 0x554: 0x9d, 0x555: 0x9d, 0x556: 0x9d, 0x557: 0x9d, - 0x558: 0x9d, 0x559: 0x9d, 0x55a: 0x9d, 0x55b: 0x9d, 0x55c: 0x9d, 0x55d: 0x9d, 0x55e: 0x9d, 0x55f: 0x14e, - 0x560: 0x9d, 0x561: 0x9d, 0x562: 0x9d, 0x563: 0x9d, 0x564: 0x9d, 0x565: 0x9d, 0x566: 0x9d, 0x567: 0x9d, - 0x568: 0x9d, 0x569: 0x9d, 0x56a: 0x9d, 0x56b: 0x14f, 0x56c: 0xb8, 0x56d: 0xb8, 0x56e: 0xb8, 0x56f: 0xb8, - 0x570: 0xb8, 0x571: 0xb8, 0x572: 0xb8, 0x573: 0xb8, 0x574: 0xb8, 0x575: 0xb8, 0x576: 0xb8, 0x577: 0xb8, - 0x578: 0xb8, 0x579: 0xb8, 0x57a: 0xb8, 0x57b: 0xb8, 0x57c: 0xb8, 0x57d: 0xb8, 0x57e: 0xb8, 0x57f: 0xb8, + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, // Block 0x16, offset 0x580 - 0x580: 0x150, 0x581: 0xb8, 0x582: 0xb8, 0x583: 0xb8, 0x584: 0xb8, 0x585: 0xb8, 0x586: 0xb8, 0x587: 0xb8, - 0x588: 0xb8, 0x589: 0xb8, 0x58a: 0xb8, 0x58b: 0xb8, 0x58c: 0xb8, 0x58d: 0xb8, 0x58e: 0xb8, 0x58f: 0xb8, - 0x590: 0xb8, 0x591: 0xb8, 0x592: 0xb8, 0x593: 0xb8, 0x594: 0xb8, 0x595: 0xb8, 0x596: 0xb8, 0x597: 0xb8, - 0x598: 0xb8, 0x599: 0xb8, 0x59a: 0xb8, 0x59b: 0xb8, 0x59c: 0xb8, 0x59d: 0xb8, 0x59e: 0xb8, 0x59f: 0xb8, - 0x5a0: 0xb8, 0x5a1: 0xb8, 0x5a2: 0xb8, 0x5a3: 0xb8, 0x5a4: 0xb8, 0x5a5: 0xb8, 0x5a6: 0xb8, 0x5a7: 0xb8, - 0x5a8: 0xb8, 0x5a9: 0xb8, 0x5aa: 0xb8, 0x5ab: 0xb8, 0x5ac: 0xb8, 0x5ad: 0xb8, 0x5ae: 0xb8, 0x5af: 0xb8, - 0x5b0: 0x9d, 0x5b1: 0x151, 0x5b2: 0x152, 0x5b3: 0xb8, 0x5b4: 0xb8, 0x5b5: 0xb8, 0x5b6: 0xb8, 0x5b7: 0xb8, - 0x5b8: 0xb8, 0x5b9: 0xb8, 0x5ba: 0xb8, 0x5bb: 0xb8, 0x5bc: 0xb8, 0x5bd: 0xb8, 0x5be: 0xb8, 0x5bf: 0xb8, + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, // Block 0x17, offset 0x5c0 - 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x153, 0x5c4: 0x154, 0x5c5: 0x155, 0x5c6: 0x156, 0x5c7: 0x157, - 0x5c8: 0x9b, 0x5c9: 0x158, 0x5ca: 0xb8, 0x5cb: 0xb8, 0x5cc: 0x9b, 0x5cd: 0x159, 0x5ce: 0xb8, 0x5cf: 0xb8, - 0x5d0: 0x5d, 0x5d1: 0x5e, 0x5d2: 0x5f, 0x5d3: 0x60, 0x5d4: 0x61, 0x5d5: 0x62, 0x5d6: 0x63, 0x5d7: 0x64, - 0x5d8: 0x65, 0x5d9: 0x66, 0x5da: 0x67, 0x5db: 0x68, 0x5dc: 0x69, 0x5dd: 0x6a, 0x5de: 0x6b, 0x5df: 0x6c, + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160, + 0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, - 0x5e8: 0x15a, 0x5e9: 0x15b, 0x5ea: 0x15c, 0x5eb: 0xb8, 0x5ec: 0xb8, 0x5ed: 0xb8, 0x5ee: 0xb8, 0x5ef: 0xb8, - 0x5f0: 0xb8, 0x5f1: 0xb8, 0x5f2: 0xb8, 0x5f3: 0xb8, 0x5f4: 0xb8, 0x5f5: 0xb8, 0x5f6: 0xb8, 0x5f7: 0xb8, - 0x5f8: 0xb8, 0x5f9: 0xb8, 0x5fa: 0xb8, 0x5fb: 0xb8, 0x5fc: 0xb8, 0x5fd: 0xb8, 0x5fe: 0xb8, 0x5ff: 0xb8, + 0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, // Block 0x18, offset 0x600 - 0x600: 0x15d, 0x601: 0xb8, 0x602: 0xb8, 0x603: 0xb8, 0x604: 0xb8, 0x605: 0xb8, 0x606: 0xb8, 0x607: 0xb8, - 0x608: 0xb8, 0x609: 0xb8, 0x60a: 0xb8, 0x60b: 0xb8, 0x60c: 0xb8, 0x60d: 0xb8, 0x60e: 0xb8, 0x60f: 0xb8, - 0x610: 0xb8, 0x611: 0xb8, 0x612: 0xb8, 0x613: 0xb8, 0x614: 0xb8, 0x615: 0xb8, 0x616: 0xb8, 0x617: 0xb8, - 0x618: 0xb8, 0x619: 0xb8, 0x61a: 0xb8, 0x61b: 0xb8, 0x61c: 0xb8, 0x61d: 0xb8, 0x61e: 0xb8, 0x61f: 0xb8, - 0x620: 0x9d, 0x621: 0x9d, 0x622: 0x9d, 0x623: 0x15e, 0x624: 0x6d, 0x625: 0x15f, 0x626: 0xb8, 0x627: 0xb8, - 0x628: 0xb8, 0x629: 0xb8, 0x62a: 0xb8, 0x62b: 0xb8, 0x62c: 0xb8, 0x62d: 0xb8, 0x62e: 0xb8, 0x62f: 0xb8, - 0x630: 0xb8, 0x631: 0xb8, 0x632: 0xb8, 0x633: 0xb8, 0x634: 0xb8, 0x635: 0xb8, 0x636: 0xb8, 0x637: 0xb8, - 0x638: 0x6e, 0x639: 0x6f, 0x63a: 0x70, 0x63b: 0x160, 0x63c: 0xb8, 0x63d: 0xb8, 0x63e: 0xb8, 0x63f: 0xb8, + 0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, // Block 0x19, offset 0x640 - 0x640: 0x161, 0x641: 0x9b, 0x642: 0x162, 0x643: 0x163, 0x644: 0x71, 0x645: 0x72, 0x646: 0x164, 0x647: 0x165, - 0x648: 0x73, 0x649: 0x166, 0x64a: 0xb8, 0x64b: 0xb8, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e, + 0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, - 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x167, 0x65c: 0x9b, 0x65d: 0x168, 0x65e: 0x9b, 0x65f: 0x169, - 0x660: 0x16a, 0x661: 0x16b, 0x662: 0x16c, 0x663: 0xb8, 0x664: 0x16d, 0x665: 0x16e, 0x666: 0x16f, 0x667: 0x170, - 0x668: 0xb8, 0x669: 0xb8, 0x66a: 0xb8, 0x66b: 0xb8, 0x66c: 0xb8, 0x66d: 0xb8, 0x66e: 0xb8, 0x66f: 0xb8, - 0x670: 0xb8, 0x671: 0xb8, 0x672: 0xb8, 0x673: 0xb8, 0x674: 0xb8, 0x675: 0xb8, 0x676: 0xb8, 0x677: 0xb8, - 0x678: 0xb8, 0x679: 0xb8, 0x67a: 0xb8, 0x67b: 0xb8, 0x67c: 0xb8, 0x67d: 0xb8, 0x67e: 0xb8, 0x67f: 0xb8, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172, + 0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, // Block 0x1a, offset 0x680 - 0x680: 0x9d, 0x681: 0x9d, 0x682: 0x9d, 0x683: 0x9d, 0x684: 0x9d, 0x685: 0x9d, 0x686: 0x9d, 0x687: 0x9d, - 0x688: 0x9d, 0x689: 0x9d, 0x68a: 0x9d, 0x68b: 0x9d, 0x68c: 0x9d, 0x68d: 0x9d, 0x68e: 0x9d, 0x68f: 0x9d, - 0x690: 0x9d, 0x691: 0x9d, 0x692: 0x9d, 0x693: 0x9d, 0x694: 0x9d, 0x695: 0x9d, 0x696: 0x9d, 0x697: 0x9d, - 0x698: 0x9d, 0x699: 0x9d, 0x69a: 0x9d, 0x69b: 0x171, 0x69c: 0x9d, 0x69d: 0x9d, 0x69e: 0x9d, 0x69f: 0x9d, - 0x6a0: 0x9d, 0x6a1: 0x9d, 0x6a2: 0x9d, 0x6a3: 0x9d, 0x6a4: 0x9d, 0x6a5: 0x9d, 0x6a6: 0x9d, 0x6a7: 0x9d, - 0x6a8: 0x9d, 0x6a9: 0x9d, 0x6aa: 0x9d, 0x6ab: 0x9d, 0x6ac: 0x9d, 0x6ad: 0x9d, 0x6ae: 0x9d, 0x6af: 0x9d, - 0x6b0: 0x9d, 0x6b1: 0x9d, 0x6b2: 0x9d, 0x6b3: 0x9d, 0x6b4: 0x9d, 0x6b5: 0x9d, 0x6b6: 0x9d, 0x6b7: 0x9d, - 0x6b8: 0x9d, 0x6b9: 0x9d, 0x6ba: 0x9d, 0x6bb: 0x9d, 0x6bc: 0x9d, 0x6bd: 0x9d, 0x6be: 0x9d, 0x6bf: 0x9d, + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, // Block 0x1b, offset 0x6c0 - 0x6c0: 0x9d, 0x6c1: 0x9d, 0x6c2: 0x9d, 0x6c3: 0x9d, 0x6c4: 0x9d, 0x6c5: 0x9d, 0x6c6: 0x9d, 0x6c7: 0x9d, - 0x6c8: 0x9d, 0x6c9: 0x9d, 0x6ca: 0x9d, 0x6cb: 0x9d, 0x6cc: 0x9d, 0x6cd: 0x9d, 0x6ce: 0x9d, 0x6cf: 0x9d, - 0x6d0: 0x9d, 0x6d1: 0x9d, 0x6d2: 0x9d, 0x6d3: 0x9d, 0x6d4: 0x9d, 0x6d5: 0x9d, 0x6d6: 0x9d, 0x6d7: 0x9d, - 0x6d8: 0x9d, 0x6d9: 0x9d, 0x6da: 0x9d, 0x6db: 0x9d, 0x6dc: 0x172, 0x6dd: 0x9d, 0x6de: 0x9d, 0x6df: 0x9d, - 0x6e0: 0x173, 0x6e1: 0x9d, 0x6e2: 0x9d, 0x6e3: 0x9d, 0x6e4: 0x9d, 0x6e5: 0x9d, 0x6e6: 0x9d, 0x6e7: 0x9d, - 0x6e8: 0x9d, 0x6e9: 0x9d, 0x6ea: 0x9d, 0x6eb: 0x9d, 0x6ec: 0x9d, 0x6ed: 0x9d, 0x6ee: 0x9d, 0x6ef: 0x9d, - 0x6f0: 0x9d, 0x6f1: 0x9d, 0x6f2: 0x9d, 0x6f3: 0x9d, 0x6f4: 0x9d, 0x6f5: 0x9d, 0x6f6: 0x9d, 0x6f7: 0x9d, - 0x6f8: 0x9d, 0x6f9: 0x9d, 0x6fa: 0x9d, 0x6fb: 0x9d, 0x6fc: 0x9d, 0x6fd: 0x9d, 0x6fe: 0x9d, 0x6ff: 0x9d, + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, // Block 0x1c, offset 0x700 - 0x700: 0x9d, 0x701: 0x9d, 0x702: 0x9d, 0x703: 0x9d, 0x704: 0x9d, 0x705: 0x9d, 0x706: 0x9d, 0x707: 0x9d, - 0x708: 0x9d, 0x709: 0x9d, 0x70a: 0x9d, 0x70b: 0x9d, 0x70c: 0x9d, 0x70d: 0x9d, 0x70e: 0x9d, 0x70f: 0x9d, - 0x710: 0x9d, 0x711: 0x9d, 0x712: 0x9d, 0x713: 0x9d, 0x714: 0x9d, 0x715: 0x9d, 0x716: 0x9d, 0x717: 0x9d, - 0x718: 0x9d, 0x719: 0x9d, 0x71a: 0x9d, 0x71b: 0x9d, 0x71c: 0x9d, 0x71d: 0x9d, 0x71e: 0x9d, 0x71f: 0x9d, - 0x720: 0x9d, 0x721: 0x9d, 0x722: 0x9d, 0x723: 0x9d, 0x724: 0x9d, 0x725: 0x9d, 0x726: 0x9d, 0x727: 0x9d, - 0x728: 0x9d, 0x729: 0x9d, 0x72a: 0x9d, 0x72b: 0x9d, 0x72c: 0x9d, 0x72d: 0x9d, 0x72e: 0x9d, 0x72f: 0x9d, - 0x730: 0x9d, 0x731: 0x9d, 0x732: 0x9d, 0x733: 0x9d, 0x734: 0x9d, 0x735: 0x9d, 0x736: 0x9d, 0x737: 0x9d, - 0x738: 0x9d, 0x739: 0x9d, 0x73a: 0x174, 0x73b: 0xb8, 0x73c: 0xb8, 0x73d: 0xb8, 0x73e: 0xb8, 0x73f: 0xb8, + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, // Block 0x1d, offset 0x740 - 0x740: 0xb8, 0x741: 0xb8, 0x742: 0xb8, 0x743: 0xb8, 0x744: 0xb8, 0x745: 0xb8, 0x746: 0xb8, 0x747: 0xb8, - 0x748: 0xb8, 0x749: 0xb8, 0x74a: 0xb8, 0x74b: 0xb8, 0x74c: 0xb8, 0x74d: 0xb8, 0x74e: 0xb8, 0x74f: 0xb8, - 0x750: 0xb8, 0x751: 0xb8, 0x752: 0xb8, 0x753: 0xb8, 0x754: 0xb8, 0x755: 0xb8, 0x756: 0xb8, 0x757: 0xb8, - 0x758: 0xb8, 0x759: 0xb8, 0x75a: 0xb8, 0x75b: 0xb8, 0x75c: 0xb8, 0x75d: 0xb8, 0x75e: 0xb8, 0x75f: 0xb8, - 0x760: 0x74, 0x761: 0x75, 0x762: 0x76, 0x763: 0x175, 0x764: 0x77, 0x765: 0x78, 0x766: 0x176, 0x767: 0x79, - 0x768: 0x7a, 0x769: 0xb8, 0x76a: 0xb8, 0x76b: 0xb8, 0x76c: 0xb8, 0x76d: 0xb8, 0x76e: 0xb8, 0x76f: 0xb8, - 0x770: 0xb8, 0x771: 0xb8, 0x772: 0xb8, 0x773: 0xb8, 0x774: 0xb8, 0x775: 0xb8, 0x776: 0xb8, 0x777: 0xb8, - 0x778: 0xb8, 0x779: 0xb8, 0x77a: 0xb8, 0x77b: 0xb8, 0x77c: 0xb8, 0x77d: 0xb8, 0x77e: 0xb8, 0x77f: 0xb8, + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, // Block 0x1e, offset 0x780 - 0x790: 0x0d, 0x791: 0x0e, 0x792: 0x0f, 0x793: 0x10, 0x794: 0x11, 0x795: 0x0b, 0x796: 0x12, 0x797: 0x07, - 0x798: 0x13, 0x799: 0x0b, 0x79a: 0x0b, 0x79b: 0x14, 0x79c: 0x0b, 0x79d: 0x15, 0x79e: 0x16, 0x79f: 0x17, - 0x7a0: 0x07, 0x7a1: 0x07, 0x7a2: 0x07, 0x7a3: 0x07, 0x7a4: 0x07, 0x7a5: 0x07, 0x7a6: 0x07, 0x7a7: 0x07, - 0x7a8: 0x07, 0x7a9: 0x07, 0x7aa: 0x18, 0x7ab: 0x19, 0x7ac: 0x1a, 0x7ad: 0x0b, 0x7ae: 0x0b, 0x7af: 0x1b, - 0x7b0: 0x0b, 0x7b1: 0x0b, 0x7b2: 0x0b, 0x7b3: 0x0b, 0x7b4: 0x0b, 0x7b5: 0x0b, 0x7b6: 0x0b, 0x7b7: 0x0b, - 0x7b8: 0x0b, 0x7b9: 0x0b, 0x7ba: 0x0b, 0x7bb: 0x0b, 0x7bc: 0x0b, 0x7bd: 0x0b, 0x7be: 0x0b, 0x7bf: 0x0b, + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, // Block 0x1f, offset 0x7c0 - 0x7c0: 0x0b, 0x7c1: 0x0b, 0x7c2: 0x0b, 0x7c3: 0x0b, 0x7c4: 0x0b, 0x7c5: 0x0b, 0x7c6: 0x0b, 0x7c7: 0x0b, - 0x7c8: 0x0b, 0x7c9: 0x0b, 0x7ca: 0x0b, 0x7cb: 0x0b, 0x7cc: 0x0b, 0x7cd: 0x0b, 0x7ce: 0x0b, 0x7cf: 0x0b, - 0x7d0: 0x0b, 0x7d1: 0x0b, 0x7d2: 0x0b, 0x7d3: 0x0b, 0x7d4: 0x0b, 0x7d5: 0x0b, 0x7d6: 0x0b, 0x7d7: 0x0b, - 0x7d8: 0x0b, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x0b, 0x7dc: 0x0b, 0x7dd: 0x0b, 0x7de: 0x0b, 0x7df: 0x0b, - 0x7e0: 0x0b, 0x7e1: 0x0b, 0x7e2: 0x0b, 0x7e3: 0x0b, 0x7e4: 0x0b, 0x7e5: 0x0b, 0x7e6: 0x0b, 0x7e7: 0x0b, - 0x7e8: 0x0b, 0x7e9: 0x0b, 0x7ea: 0x0b, 0x7eb: 0x0b, 0x7ec: 0x0b, 0x7ed: 0x0b, 0x7ee: 0x0b, 0x7ef: 0x0b, + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, // Block 0x20, offset 0x800 - 0x800: 0x177, 0x801: 0x178, 0x802: 0xb8, 0x803: 0xb8, 0x804: 0x179, 0x805: 0x179, 0x806: 0x179, 0x807: 0x17a, - 0x808: 0xb8, 0x809: 0xb8, 0x80a: 0xb8, 0x80b: 0xb8, 0x80c: 0xb8, 0x80d: 0xb8, 0x80e: 0xb8, 0x80f: 0xb8, - 0x810: 0xb8, 0x811: 0xb8, 0x812: 0xb8, 0x813: 0xb8, 0x814: 0xb8, 0x815: 0xb8, 0x816: 0xb8, 0x817: 0xb8, - 0x818: 0xb8, 0x819: 0xb8, 0x81a: 0xb8, 0x81b: 0xb8, 0x81c: 0xb8, 0x81d: 0xb8, 0x81e: 0xb8, 0x81f: 0xb8, - 0x820: 0xb8, 0x821: 0xb8, 0x822: 0xb8, 0x823: 0xb8, 0x824: 0xb8, 0x825: 0xb8, 0x826: 0xb8, 0x827: 0xb8, - 0x828: 0xb8, 0x829: 0xb8, 0x82a: 0xb8, 0x82b: 0xb8, 0x82c: 0xb8, 0x82d: 0xb8, 0x82e: 0xb8, 0x82f: 0xb8, - 0x830: 0xb8, 0x831: 0xb8, 0x832: 0xb8, 0x833: 0xb8, 0x834: 0xb8, 0x835: 0xb8, 0x836: 0xb8, 0x837: 0xb8, - 0x838: 0xb8, 0x839: 0xb8, 0x83a: 0xb8, 0x83b: 0xb8, 0x83c: 0xb8, 0x83d: 0xb8, 0x83e: 0xb8, 0x83f: 0xb8, + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, // Block 0x21, offset 0x840 - 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b, - 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b, - 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b, - 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b, - 0x860: 0x1e, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b, - 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b, - 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, - 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, + 0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, // Block 0x22, offset 0x880 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, } -// idnaSparseOffset: 256 entries, 512 bytes -var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x5c, 0x60, 0x6f, 0x74, 0x7b, 0x87, 0x95, 0xa3, 0xa8, 0xb1, 0xc1, 0xcf, 0xdc, 0xe8, 0xf9, 0x103, 0x10a, 0x117, 0x128, 0x12f, 0x13a, 0x149, 0x157, 0x161, 0x163, 0x167, 0x169, 0x175, 0x180, 0x188, 0x18e, 0x194, 0x199, 0x19e, 0x1a1, 0x1a5, 0x1ab, 0x1b0, 0x1bc, 0x1c6, 0x1cc, 0x1dd, 0x1e7, 0x1ea, 0x1f2, 0x1f5, 0x202, 0x20a, 0x20e, 0x215, 0x21d, 0x22d, 0x239, 0x23b, 0x245, 0x251, 0x25d, 0x269, 0x271, 0x276, 0x280, 0x291, 0x295, 0x2a0, 0x2a4, 0x2ad, 0x2b5, 0x2bb, 0x2c0, 0x2c3, 0x2c6, 0x2ca, 0x2d0, 0x2d4, 0x2d8, 0x2de, 0x2e5, 0x2eb, 0x2f3, 0x2fa, 0x305, 0x30f, 0x313, 0x316, 0x31c, 0x320, 0x322, 0x325, 0x327, 0x32a, 0x334, 0x337, 0x346, 0x34a, 0x34f, 0x352, 0x356, 0x35b, 0x360, 0x366, 0x36c, 0x37b, 0x381, 0x385, 0x394, 0x399, 0x3a1, 0x3ab, 0x3b6, 0x3be, 0x3cf, 0x3d8, 0x3e8, 0x3f5, 0x3ff, 0x404, 0x411, 0x415, 0x41a, 0x41c, 0x420, 0x422, 0x426, 0x42f, 0x435, 0x439, 0x449, 0x453, 0x458, 0x45b, 0x461, 0x468, 0x46d, 0x471, 0x477, 0x47c, 0x485, 0x48a, 0x490, 0x497, 0x49e, 0x4a5, 0x4a9, 0x4ae, 0x4b1, 0x4b6, 0x4c2, 0x4c8, 0x4cd, 0x4d4, 0x4dc, 0x4e1, 0x4e5, 0x4f5, 0x4fc, 0x500, 0x504, 0x50b, 0x50e, 0x511, 0x515, 0x519, 0x51f, 0x528, 0x534, 0x53b, 0x544, 0x54c, 0x553, 0x561, 0x56e, 0x57b, 0x584, 0x588, 0x596, 0x59e, 0x5a9, 0x5b2, 0x5b8, 0x5c0, 0x5c9, 0x5d3, 0x5d6, 0x5e2, 0x5e5, 0x5ea, 0x5ed, 0x5f7, 0x600, 0x60c, 0x60f, 0x614, 0x617, 0x61a, 0x61d, 0x624, 0x62b, 0x62f, 0x63a, 0x63d, 0x643, 0x648, 0x64c, 0x64f, 0x652, 0x655, 0x65a, 0x664, 0x667, 0x66b, 0x67a, 0x686, 0x68a, 0x68f, 0x694, 0x698, 0x69d, 0x6a6, 0x6b1, 0x6b7, 0x6bf, 0x6c3, 0x6c7, 0x6cd, 0x6d3, 0x6d8, 0x6db, 0x6e9, 0x6f0, 0x6f3, 0x6f6, 0x6fa, 0x700, 0x705, 0x70f, 0x714, 0x717, 0x71a, 0x71d, 0x720, 0x724, 0x727, 0x737, 0x748, 0x74d, 0x74f, 0x751} +// idnaSparseOffset: 264 entries, 528 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778} -// idnaSparseValues: 1876 entries, 7504 bytes -var idnaSparseValues = [1876]valueRange{ +// idnaSparseValues: 1915 entries, 7660 bytes +var idnaSparseValues = [1915]valueRange{ // Block 0x0, offset 0x0 {value: 0x0000, lo: 0x07}, {value: 0xe105, lo: 0x80, hi: 0x96}, @@ -2382,7 +2415,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xb9, hi: 0xbf}, // Block 0x3, offset 0x25 {value: 0x0000, lo: 0x01}, - {value: 0x1308, lo: 0x80, hi: 0xbf}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, // Block 0x4, offset 0x27 {value: 0x0000, lo: 0x04}, {value: 0x03f5, lo: 0x80, hi: 0x8f}, @@ -2407,155 +2440,123 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x8b, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x1308, lo: 0x91, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbe}, - {value: 0x1308, lo: 0xbf, hi: 0xbf}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, // Block 0x7, offset 0x3f {value: 0x0000, lo: 0x0b}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x82}, - {value: 0x0018, lo: 0x83, hi: 0x83}, - {value: 0x1308, lo: 0x84, hi: 0x85}, - {value: 0x0018, lo: 0x86, hi: 0x86}, - {value: 0x1308, lo: 0x87, hi: 0x87}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xaa}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, // Block 0x8, offset 0x4b - {value: 0x0000, lo: 0x10}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0208, lo: 0x81, hi: 0x87}, - {value: 0x0408, lo: 0x88, hi: 0x88}, - {value: 0x0208, lo: 0x89, hi: 0x8a}, - {value: 0x1308, lo: 0x8b, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xad}, - {value: 0x0208, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb0}, - {value: 0x0408, lo: 0xb1, hi: 0xb3}, - {value: 0x0008, lo: 0xb4, hi: 0xb4}, - {value: 0x0429, lo: 0xb5, hi: 0xb5}, - {value: 0x0451, lo: 0xb6, hi: 0xb6}, - {value: 0x0479, lo: 0xb7, hi: 0xb7}, - {value: 0x04a1, lo: 0xb8, hi: 0xb8}, - {value: 0x0208, lo: 0xb9, hi: 0xbf}, - // Block 0x9, offset 0x5c {value: 0x0000, lo: 0x03}, - {value: 0x0208, lo: 0x80, hi: 0x87}, - {value: 0x0408, lo: 0x88, hi: 0x99}, - {value: 0x0208, lo: 0x9a, hi: 0xbf}, - // Block 0xa, offset 0x60 + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f {value: 0x0000, lo: 0x0e}, - {value: 0x1308, lo: 0x80, hi: 0x8a}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8c}, - {value: 0x0408, lo: 0x8d, hi: 0x8d}, - {value: 0x0208, lo: 0x8e, hi: 0x98}, - {value: 0x0408, lo: 0x99, hi: 0x9b}, - {value: 0x0208, lo: 0x9c, hi: 0xaa}, - {value: 0x0408, lo: 0xab, hi: 0xac}, - {value: 0x0208, lo: 0xad, hi: 0xb0}, - {value: 0x0408, lo: 0xb1, hi: 0xb1}, - {value: 0x0208, lo: 0xb2, hi: 0xb2}, - {value: 0x0408, lo: 0xb3, hi: 0xb4}, - {value: 0x0208, lo: 0xb5, hi: 0xb7}, - {value: 0x0408, lo: 0xb8, hi: 0xb9}, - {value: 0x0208, lo: 0xba, hi: 0xbf}, - // Block 0xb, offset 0x6f + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xb0}, - {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xc, offset 0x74 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0208, lo: 0x8a, hi: 0xaa}, - {value: 0x1308, lo: 0xab, hi: 0xb3}, - {value: 0x0008, lo: 0xb4, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xba}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0xd, offset 0x7b + // Block 0xc, offset 0x6b {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x1308, lo: 0x96, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0xa3}, - {value: 0x0008, lo: 0xa4, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xa8}, - {value: 0x1308, lo: 0xa9, hi: 0xad}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbe}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xe, offset 0x87 - {value: 0x0000, lo: 0x0d}, - {value: 0x0408, lo: 0x80, hi: 0x80}, - {value: 0x0208, lo: 0x81, hi: 0x85}, - {value: 0x0408, lo: 0x86, hi: 0x87}, - {value: 0x0208, lo: 0x88, hi: 0x88}, - {value: 0x0408, lo: 0x89, hi: 0x89}, - {value: 0x0208, lo: 0x8a, hi: 0x93}, - {value: 0x0408, lo: 0x94, hi: 0x94}, - {value: 0x0208, lo: 0x95, hi: 0x95}, - {value: 0x0008, lo: 0x96, hi: 0x98}, - {value: 0x1308, lo: 0x99, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xf, offset 0x95 + // Block 0xd, offset 0x77 {value: 0x0000, lo: 0x0d}, {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0208, lo: 0xa0, hi: 0xa9}, - {value: 0x0408, lo: 0xaa, hi: 0xac}, - {value: 0x0008, lo: 0xad, hi: 0xad}, - {value: 0x0408, lo: 0xae, hi: 0xae}, - {value: 0x0208, lo: 0xaf, hi: 0xb0}, - {value: 0x0408, lo: 0xb1, hi: 0xb2}, - {value: 0x0208, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xb5}, - {value: 0x0208, lo: 0xb6, hi: 0xb8}, - {value: 0x0408, lo: 0xb9, hi: 0xb9}, - {value: 0x0208, lo: 0xba, hi: 0xbd}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x10, offset 0xa3 + // Block 0xe, offset 0x85 {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x93}, - {value: 0x1308, lo: 0x94, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x1308, lo: 0xa3, hi: 0xbf}, - // Block 0x11, offset 0xa8 + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8a {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0xb9}, - {value: 0x1308, lo: 0xba, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x12, offset 0xb1 + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x93 {value: 0x0000, lo: 0x0f}, - {value: 0x1308, lo: 0x80, hi: 0x80}, - {value: 0x1008, lo: 0x81, hi: 0x82}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x85}, - {value: 0x1008, lo: 0x86, hi: 0x88}, + {value: 0x3008, lo: 0x86, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x1008, lo: 0x8a, hi: 0x8c}, - {value: 0x1b08, lo: 0x8d, hi: 0x8d}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x96}, - {value: 0x1008, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x97, hi: 0x97}, {value: 0x0040, lo: 0x98, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x13, offset 0xc1 + // Block 0x11, offset 0xa3 {value: 0x0000, lo: 0x0d}, - {value: 0x1308, lo: 0x80, hi: 0x80}, - {value: 0x1008, lo: 0x81, hi: 0x83}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, @@ -2566,25 +2567,24 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xaa, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x1308, lo: 0xbe, hi: 0xbf}, - // Block 0x14, offset 0xcf - {value: 0x0000, lo: 0x0c}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x83}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, {value: 0x0008, lo: 0x8e, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x91}, {value: 0x0008, lo: 0x92, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x15, offset 0xdc + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbd {value: 0x0000, lo: 0x0b}, {value: 0x0040, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x83}, + {value: 0x3008, lo: 0x82, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x99}, @@ -2594,50 +2594,50 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x16, offset 0xe8 + // Block 0x14, offset 0xc9 {value: 0x0000, lo: 0x10}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x89}, - {value: 0x1b08, lo: 0x8a, hi: 0x8a}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8e}, - {value: 0x1008, lo: 0x8f, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0x94}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, {value: 0x0040, lo: 0x95, hi: 0x95}, - {value: 0x1308, lo: 0x96, hi: 0x96}, + {value: 0x3308, lo: 0x96, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x1008, lo: 0x98, hi: 0x9f}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xb1}, - {value: 0x1008, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x17, offset 0xf9 + // Block 0x15, offset 0xda {value: 0x0000, lo: 0x09}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0xb0}, - {value: 0x1308, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xb2}, {value: 0x08f1, lo: 0xb3, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb9}, - {value: 0x1b08, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbe}, {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0x18, offset 0x103 + // Block 0x16, offset 0xe4 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x1308, lo: 0x87, hi: 0x8e}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, {value: 0x0018, lo: 0x8f, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0018, lo: 0x9a, hi: 0x9b}, {value: 0x0040, lo: 0x9c, hi: 0xbf}, - // Block 0x19, offset 0x10a + // Block 0x17, offset 0xeb {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x85}, {value: 0x0008, lo: 0x86, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x1308, lo: 0x88, hi: 0x8d}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9b}, @@ -2645,76 +2645,76 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0999, lo: 0x9d, hi: 0x9d}, {value: 0x0008, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x1a, offset 0x117 + // Block 0x18, offset 0xf8 {value: 0x0000, lo: 0x10}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8a}, {value: 0x0008, lo: 0x8b, hi: 0x8b}, {value: 0xe03d, lo: 0x8c, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x97}, - {value: 0x1308, lo: 0x98, hi: 0x99}, + {value: 0x3308, lo: 0x98, hi: 0x99}, {value: 0x0018, lo: 0x9a, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, {value: 0x0018, lo: 0xb6, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, {value: 0x0018, lo: 0xb8, hi: 0xb8}, - {value: 0x1308, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, {value: 0x0018, lo: 0xba, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x1b, offset 0x128 + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x109 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x85}, - {value: 0x1308, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x86, hi: 0x86}, {value: 0x0018, lo: 0x87, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, {value: 0x0018, lo: 0x8e, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0xbf}, - // Block 0x1c, offset 0x12f + // Block 0x1a, offset 0x110 {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x1008, lo: 0xab, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xb0}, - {value: 0x1008, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb7}, - {value: 0x1008, lo: 0xb8, hi: 0xb8}, - {value: 0x1b08, lo: 0xb9, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbc}, - {value: 0x1308, lo: 0xbd, hi: 0xbe}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x1d, offset 0x13a + // Block 0x1b, offset 0x11b {value: 0x0000, lo: 0x0e}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0018, lo: 0x8a, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x95}, - {value: 0x1008, lo: 0x96, hi: 0x97}, - {value: 0x1308, lo: 0x98, hi: 0x99}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, {value: 0x0008, lo: 0x9a, hi: 0x9d}, - {value: 0x1308, lo: 0x9e, hi: 0xa0}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, {value: 0x0008, lo: 0xa1, hi: 0xa1}, - {value: 0x1008, lo: 0xa2, hi: 0xa4}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, {value: 0x0008, lo: 0xa5, hi: 0xa6}, - {value: 0x1008, lo: 0xa7, hi: 0xad}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xb0}, - {value: 0x1308, lo: 0xb1, hi: 0xb4}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, {value: 0x0008, lo: 0xb5, hi: 0xbf}, - // Block 0x1e, offset 0x149 + // Block 0x1c, offset 0x12a {value: 0x0000, lo: 0x0d}, {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x1308, lo: 0x82, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x86}, - {value: 0x1008, lo: 0x87, hi: 0x8c}, - {value: 0x1308, lo: 0x8d, hi: 0x8d}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, {value: 0x0008, lo: 0x8e, hi: 0x8e}, - {value: 0x1008, lo: 0x8f, hi: 0x8f}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x1008, lo: 0x9a, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9d}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x1f, offset 0x157 + // Block 0x1d, offset 0x138 {value: 0x0000, lo: 0x09}, {value: 0x0040, lo: 0x80, hi: 0x86}, {value: 0x055d, lo: 0x87, hi: 0x87}, @@ -2725,18 +2725,27 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0018, lo: 0xbb, hi: 0xbb}, {value: 0xe105, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0x20, offset 0x161 + // Block 0x1e, offset 0x142 {value: 0x0000, lo: 0x01}, {value: 0x0018, lo: 0x80, hi: 0xbf}, - // Block 0x21, offset 0x163 - {value: 0x0000, lo: 0x03}, + // Block 0x1f, offset 0x144 + {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xbf}, - // Block 0x22, offset 0x167 + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x149 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14c + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x14f {value: 0x0000, lo: 0x01}, {value: 0x0008, lo: 0x80, hi: 0xbf}, - // Block 0x23, offset 0x169 + // Block 0x23, offset 0x151 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, @@ -2749,7 +2758,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0x9a, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x24, offset 0x175 + // Block 0x24, offset 0x15d {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, @@ -2761,7 +2770,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xb6, hi: 0xb7}, {value: 0x0008, lo: 0xb8, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x25, offset 0x180 + // Block 0x25, offset 0x168 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0040, lo: 0x81, hi: 0x81}, @@ -2770,146 +2779,146 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0x88, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x26, offset 0x188 + // Block 0x26, offset 0x170 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x91}, {value: 0x0008, lo: 0x92, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x27, offset 0x18e + // Block 0x27, offset 0x176 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9f}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x28, offset 0x194 + // Block 0x28, offset 0x17c {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x29, offset 0x199 + // Block 0x29, offset 0x181 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb7}, {value: 0xe045, lo: 0xb8, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x2a, offset 0x19e + // Block 0x2a, offset 0x186 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x2b, offset 0x1a1 + // Block 0x2b, offset 0x189 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xac}, {value: 0x0018, lo: 0xad, hi: 0xae}, {value: 0x0008, lo: 0xaf, hi: 0xbf}, - // Block 0x2c, offset 0x1a5 + // Block 0x2c, offset 0x18d {value: 0x0000, lo: 0x05}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9c}, {value: 0x0040, lo: 0x9d, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x2d, offset 0x1ab + // Block 0x2d, offset 0x193 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xaa}, {value: 0x0018, lo: 0xab, hi: 0xb0}, {value: 0x0008, lo: 0xb1, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0x2e, offset 0x1b0 + // Block 0x2e, offset 0x198 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, {value: 0x0008, lo: 0x8e, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0x93}, - {value: 0x1b08, lo: 0x94, hi: 0x94}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, {value: 0x0040, lo: 0x95, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb3}, - {value: 0x1b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, {value: 0x0018, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x2f, offset 0x1bc + // Block 0x2f, offset 0x1a4 {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0x93}, + {value: 0x3308, lo: 0x92, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x30, offset 0x1c6 + // Block 0x30, offset 0x1ae {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0xb3}, - {value: 0x1340, lo: 0xb4, hi: 0xb5}, - {value: 0x1008, lo: 0xb6, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x31, offset 0x1cc + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b4 {value: 0x0000, lo: 0x10}, - {value: 0x1008, lo: 0x80, hi: 0x85}, - {value: 0x1308, lo: 0x86, hi: 0x86}, - {value: 0x1008, lo: 0x87, hi: 0x88}, - {value: 0x1308, lo: 0x89, hi: 0x91}, - {value: 0x1b08, lo: 0x92, hi: 0x92}, - {value: 0x1308, lo: 0x93, hi: 0x93}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, {value: 0x0018, lo: 0x94, hi: 0x96}, {value: 0x0008, lo: 0x97, hi: 0x97}, {value: 0x0018, lo: 0x98, hi: 0x9b}, {value: 0x0008, lo: 0x9c, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9d}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x32, offset 0x1dd + // Block 0x32, offset 0x1c5 {value: 0x0000, lo: 0x09}, {value: 0x0018, lo: 0x80, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x86}, {value: 0x0218, lo: 0x87, hi: 0x87}, {value: 0x0018, lo: 0x88, hi: 0x8a}, - {value: 0x13c0, lo: 0x8b, hi: 0x8d}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0208, lo: 0xa0, hi: 0xbf}, - // Block 0x33, offset 0x1e7 + // Block 0x33, offset 0x1cf {value: 0x0000, lo: 0x02}, {value: 0x0208, lo: 0x80, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x34, offset 0x1ea + // Block 0x34, offset 0x1d2 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x86}, + {value: 0x3308, lo: 0x85, hi: 0x86}, {value: 0x0208, lo: 0x87, hi: 0xa8}, - {value: 0x1308, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, {value: 0x0208, lo: 0xaa, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x35, offset 0x1f2 + // Block 0x35, offset 0x1da {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0x36, offset 0x1f5 + // Block 0x36, offset 0x1dd {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa6}, - {value: 0x1308, lo: 0xa7, hi: 0xa8}, - {value: 0x1008, lo: 0xa9, hi: 0xab}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb2}, - {value: 0x1008, lo: 0xb3, hi: 0xb8}, - {value: 0x1308, lo: 0xb9, hi: 0xbb}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x37, offset 0x202 + // Block 0x37, offset 0x1ea {value: 0x0000, lo: 0x07}, {value: 0x0018, lo: 0x80, hi: 0x80}, {value: 0x0040, lo: 0x81, hi: 0x83}, @@ -2918,12 +2927,12 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x38, offset 0x20a + // Block 0x38, offset 0x1f2 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x39, offset 0x20e + // Block 0x39, offset 0x1f6 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0x8f}, @@ -2931,33 +2940,33 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0028, lo: 0x9a, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0xbf}, - // Block 0x3a, offset 0x215 + // Block 0x3a, offset 0x1fd {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x1308, lo: 0x97, hi: 0x98}, - {value: 0x1008, lo: 0x99, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0x9b}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, {value: 0x0040, lo: 0x9c, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x3b, offset 0x21d + // Block 0x3b, offset 0x205 {value: 0x0000, lo: 0x0f}, {value: 0x0008, lo: 0x80, hi: 0x94}, - {value: 0x1008, lo: 0x95, hi: 0x95}, - {value: 0x1308, lo: 0x96, hi: 0x96}, - {value: 0x1008, lo: 0x97, hi: 0x97}, - {value: 0x1308, lo: 0x98, hi: 0x9e}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x1b08, lo: 0xa0, hi: 0xa0}, - {value: 0x1008, lo: 0xa1, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xac}, - {value: 0x1008, lo: 0xad, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xbc}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x1308, lo: 0xbf, hi: 0xbf}, - // Block 0x3c, offset 0x22d + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x215 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0x8f}, @@ -2967,78 +2976,78 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xa7, hi: 0xa7}, {value: 0x0018, lo: 0xa8, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xbd}, - {value: 0x1318, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x3d, offset 0x239 + // Block 0x3d, offset 0x221 {value: 0x0000, lo: 0x01}, {value: 0x0040, lo: 0x80, hi: 0xbf}, - // Block 0x3e, offset 0x23b + // Block 0x3e, offset 0x223 {value: 0x0000, lo: 0x09}, - {value: 0x1308, lo: 0x80, hi: 0x83}, - {value: 0x1008, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb4}, - {value: 0x1008, lo: 0xb5, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, - {value: 0x1008, lo: 0xbd, hi: 0xbf}, - // Block 0x3f, offset 0x245 + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22d {value: 0x0000, lo: 0x0b}, - {value: 0x1008, lo: 0x80, hi: 0x81}, - {value: 0x1308, lo: 0x82, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x83}, - {value: 0x1808, lo: 0x84, hi: 0x84}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0018, lo: 0x9a, hi: 0xaa}, - {value: 0x1308, lo: 0xab, hi: 0xb3}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x40, offset 0x251 + // Block 0x40, offset 0x239 {value: 0x0000, lo: 0x0b}, - {value: 0x1308, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xa0}, - {value: 0x1008, lo: 0xa1, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa5}, - {value: 0x1008, lo: 0xa6, hi: 0xa7}, - {value: 0x1308, lo: 0xa8, hi: 0xa9}, - {value: 0x1808, lo: 0xaa, hi: 0xaa}, - {value: 0x1b08, lo: 0xab, hi: 0xab}, - {value: 0x1308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xbf}, - // Block 0x41, offset 0x25d + // Block 0x41, offset 0x245 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xa6}, - {value: 0x1008, lo: 0xa7, hi: 0xa7}, - {value: 0x1308, lo: 0xa8, hi: 0xa9}, - {value: 0x1008, lo: 0xaa, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xad}, - {value: 0x1008, lo: 0xae, hi: 0xae}, - {value: 0x1308, lo: 0xaf, hi: 0xb1}, - {value: 0x1808, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbb}, {value: 0x0018, lo: 0xbc, hi: 0xbf}, - // Block 0x42, offset 0x269 + // Block 0x42, offset 0x251 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x1008, lo: 0xa4, hi: 0xab}, - {value: 0x1308, lo: 0xac, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xba}, {value: 0x0018, lo: 0xbb, hi: 0xbf}, - // Block 0x43, offset 0x271 + // Block 0x43, offset 0x259 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0x8c}, {value: 0x0008, lo: 0x8d, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x44, offset 0x276 + // Block 0x44, offset 0x25e {value: 0x0000, lo: 0x09}, {value: 0x0e29, lo: 0x80, hi: 0x80}, {value: 0x0e41, lo: 0x81, hi: 0x81}, @@ -3049,30 +3058,30 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0eb9, lo: 0x87, hi: 0x87}, {value: 0x057d, lo: 0x88, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0x45, offset 0x280 + // Block 0x45, offset 0x268 {value: 0x0000, lo: 0x10}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x1308, lo: 0x90, hi: 0x92}, + {value: 0x3308, lo: 0x90, hi: 0x92}, {value: 0x0018, lo: 0x93, hi: 0x93}, - {value: 0x1308, lo: 0x94, hi: 0xa0}, - {value: 0x1008, lo: 0xa1, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa8}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, {value: 0x0008, lo: 0xa9, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xad}, + {value: 0x3308, lo: 0xad, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xb1}, - {value: 0x1008, lo: 0xb2, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xb9}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x46, offset 0x291 + // Block 0x46, offset 0x279 {value: 0x0000, lo: 0x03}, - {value: 0x1308, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xba}, - {value: 0x1308, lo: 0xbb, hi: 0xbf}, - // Block 0x47, offset 0x295 + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27d {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x87}, {value: 0xe045, lo: 0x88, hi: 0x8f}, @@ -3084,12 +3093,12 @@ var idnaSparseValues = [1876]valueRange{ {value: 0xe045, lo: 0xa8, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb7}, {value: 0xe045, lo: 0xb8, hi: 0xbf}, - // Block 0x48, offset 0x2a0 + // Block 0x48, offset 0x288 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x1318, lo: 0x90, hi: 0xb0}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xbf}, - // Block 0x49, offset 0x2a4 + // Block 0x49, offset 0x28c {value: 0x0000, lo: 0x08}, {value: 0x0018, lo: 0x80, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x83}, @@ -3099,7 +3108,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0018, lo: 0x8a, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x4a, offset 0x2ad + // Block 0x4a, offset 0x295 {value: 0x0000, lo: 0x07}, {value: 0x0018, lo: 0x80, hi: 0xab}, {value: 0x24f1, lo: 0xac, hi: 0xac}, @@ -3108,72 +3117,68 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x2579, lo: 0xaf, hi: 0xaf}, {value: 0x25b1, lo: 0xb0, hi: 0xb0}, {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0x4b, offset 0x2b5 + // Block 0x4b, offset 0x29d {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x9f}, {value: 0x0080, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xad}, {value: 0x0080, lo: 0xae, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x4c, offset 0x2bb + // Block 0x4c, offset 0x2a3 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0xa8}, {value: 0x09c5, lo: 0xa9, hi: 0xa9}, {value: 0x09e5, lo: 0xaa, hi: 0xaa}, {value: 0x0018, lo: 0xab, hi: 0xbf}, - // Block 0x4d, offset 0x2c0 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x4e, offset 0x2c3 + // Block 0x4d, offset 0x2a8 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xa6}, {value: 0x0040, lo: 0xa7, hi: 0xbf}, - // Block 0x4f, offset 0x2c6 + // Block 0x4e, offset 0x2ab {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x28c1, lo: 0x8c, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0xbf}, - // Block 0x50, offset 0x2ca + // Block 0x4f, offset 0x2af {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0e66, lo: 0xb4, hi: 0xb4}, {value: 0x292a, lo: 0xb5, hi: 0xb5}, {value: 0x0e86, lo: 0xb6, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x51, offset 0x2d0 + // Block 0x50, offset 0x2b5 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x9b}, {value: 0x2941, lo: 0x9c, hi: 0x9c}, {value: 0x0018, lo: 0x9d, hi: 0xbf}, - // Block 0x52, offset 0x2d4 + // Block 0x51, offset 0x2b9 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xb5}, {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0x53, offset 0x2d8 + // Block 0x52, offset 0x2bd {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x97}, {value: 0x0018, lo: 0x98, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbc}, {value: 0x0018, lo: 0xbd, hi: 0xbf}, - // Block 0x54, offset 0x2de + // Block 0x53, offset 0x2c3 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0xab}, + {value: 0x0018, lo: 0x8a, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xab}, {value: 0x0018, lo: 0xac, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x55, offset 0x2e5 + // Block 0x54, offset 0x2ca {value: 0x0000, lo: 0x05}, {value: 0xe185, lo: 0x80, hi: 0x8f}, {value: 0x03f5, lo: 0x90, hi: 0x9f}, {value: 0x0ea5, lo: 0xa0, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x56, offset 0x2eb + // Block 0x55, offset 0x2d0 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xa5}, {value: 0x0040, lo: 0xa6, hi: 0xa6}, @@ -3182,15 +3187,15 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xad, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x57, offset 0x2f3 + // Block 0x56, offset 0x2d8 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xae}, {value: 0xe075, lo: 0xaf, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0x58, offset 0x2fa + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2df {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x9f}, @@ -3202,7 +3207,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xb7, hi: 0xb7}, {value: 0x0008, lo: 0xb8, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x59, offset 0x305 + // Block 0x58, offset 0x2ea {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, @@ -3212,62 +3217,62 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x97, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xbf}, - // Block 0x5a, offset 0x30f + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f4 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xae}, {value: 0x0008, lo: 0xaf, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x5b, offset 0x313 + // Block 0x5a, offset 0x2f8 {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0xbf}, - // Block 0x5c, offset 0x316 + {value: 0x0018, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0x5b, offset 0x2fb {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9e}, {value: 0x0edd, lo: 0x9f, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0x5d, offset 0x31c + // Block 0x5c, offset 0x301 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xb2}, {value: 0x0efd, lo: 0xb3, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x5e, offset 0x320 + // Block 0x5d, offset 0x305 {value: 0x0020, lo: 0x01}, {value: 0x0f1d, lo: 0x80, hi: 0xbf}, - // Block 0x5f, offset 0x322 + // Block 0x5e, offset 0x307 {value: 0x0020, lo: 0x02}, {value: 0x171d, lo: 0x80, hi: 0x8f}, {value: 0x18fd, lo: 0x90, hi: 0xbf}, - // Block 0x60, offset 0x325 + // Block 0x5f, offset 0x30a {value: 0x0020, lo: 0x01}, {value: 0x1efd, lo: 0x80, hi: 0xbf}, - // Block 0x61, offset 0x327 + // Block 0x60, offset 0x30c {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x62, offset 0x32a + // Block 0x61, offset 0x30f {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x98}, - {value: 0x1308, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, {value: 0x29e2, lo: 0x9b, hi: 0x9b}, {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, {value: 0x0008, lo: 0x9d, hi: 0x9e}, {value: 0x2a31, lo: 0x9f, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa0}, {value: 0x0008, lo: 0xa1, hi: 0xbf}, - // Block 0x63, offset 0x334 + // Block 0x62, offset 0x319 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xbe}, {value: 0x2a69, lo: 0xbf, hi: 0xbf}, - // Block 0x64, offset 0x337 + // Block 0x63, offset 0x31c {value: 0x0000, lo: 0x0e}, {value: 0x0040, lo: 0x80, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xb0}, + {value: 0x0008, lo: 0x85, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, @@ -3279,150 +3284,150 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x2afd, lo: 0xba, hi: 0xbb}, {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, {value: 0x2afd, lo: 0xbe, hi: 0xbf}, - // Block 0x65, offset 0x346 + // Block 0x64, offset 0x32b {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x66, offset 0x34a + // Block 0x65, offset 0x32f {value: 0x0030, lo: 0x04}, {value: 0x2aa2, lo: 0x80, hi: 0x9d}, {value: 0x305a, lo: 0x9e, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, {value: 0x30a2, lo: 0xa0, hi: 0xbf}, - // Block 0x67, offset 0x34f + // Block 0x66, offset 0x334 {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x68, offset 0x352 + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0x67, offset 0x337 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x69, offset 0x356 + // Block 0x68, offset 0x33b {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x6a, offset 0x35b + // Block 0x69, offset 0x340 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xbf}, - // Block 0x6b, offset 0x360 + // Block 0x6a, offset 0x345 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0xa5}, {value: 0x0018, lo: 0xa6, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, {value: 0x0018, lo: 0xb2, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6c, offset 0x366 + // Block 0x6b, offset 0x34b {value: 0x0000, lo: 0x05}, {value: 0x0040, lo: 0x80, hi: 0xb6}, {value: 0x0008, lo: 0xb7, hi: 0xb7}, {value: 0x2009, lo: 0xb8, hi: 0xb8}, {value: 0x6e89, lo: 0xb9, hi: 0xb9}, {value: 0x0008, lo: 0xba, hi: 0xbf}, - // Block 0x6d, offset 0x36c + // Block 0x6c, offset 0x351 {value: 0x0000, lo: 0x0e}, {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x1308, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0x85}, - {value: 0x1b08, lo: 0x86, hi: 0x86}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, {value: 0x0008, lo: 0x87, hi: 0x8a}, - {value: 0x1308, lo: 0x8b, hi: 0x8b}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, {value: 0x0008, lo: 0x8c, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa6}, - {value: 0x1008, lo: 0xa7, hi: 0xa7}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, {value: 0x0018, lo: 0xa8, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x6e, offset 0x37b + // Block 0x6d, offset 0x360 {value: 0x0000, lo: 0x05}, {value: 0x0208, lo: 0x80, hi: 0xb1}, {value: 0x0108, lo: 0xb2, hi: 0xb2}, {value: 0x0008, lo: 0xb3, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6f, offset 0x381 + // Block 0x6e, offset 0x366 {value: 0x0000, lo: 0x03}, - {value: 0x1008, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x80, hi: 0x81}, {value: 0x0008, lo: 0x82, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xbf}, - // Block 0x70, offset 0x385 + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x36a {value: 0x0000, lo: 0x0e}, - {value: 0x1008, lo: 0x80, hi: 0x83}, - {value: 0x1b08, lo: 0x84, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x85}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x8d}, {value: 0x0018, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xb7}, {value: 0x0018, lo: 0xb8, hi: 0xba}, {value: 0x0008, lo: 0xbb, hi: 0xbb}, {value: 0x0018, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x71, offset 0x394 + // Block 0x70, offset 0x379 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xad}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x72, offset 0x399 + // Block 0x71, offset 0x37e {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x1308, lo: 0x87, hi: 0x91}, - {value: 0x1008, lo: 0x92, hi: 0x92}, - {value: 0x1808, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x73, offset 0x3a1 + // Block 0x72, offset 0x386 {value: 0x0000, lo: 0x09}, - {value: 0x1308, lo: 0x80, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xb9}, - {value: 0x1008, lo: 0xba, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, - {value: 0x1008, lo: 0xbd, hi: 0xbf}, - // Block 0x74, offset 0x3ab + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x390 {value: 0x0000, lo: 0x0a}, - {value: 0x1808, lo: 0x80, hi: 0x80}, + {value: 0x3808, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8e}, {value: 0x0008, lo: 0x8f, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x75, offset 0x3b6 + // Block 0x74, offset 0x39b {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xa8}, - {value: 0x1308, lo: 0xa9, hi: 0xae}, - {value: 0x1008, lo: 0xaf, hi: 0xb0}, - {value: 0x1308, lo: 0xb1, hi: 0xb2}, - {value: 0x1008, lo: 0xb3, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x76, offset 0x3be + // Block 0x75, offset 0x3a3 {value: 0x0000, lo: 0x10}, {value: 0x0008, lo: 0x80, hi: 0x82}, - {value: 0x1308, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x8b}, - {value: 0x1308, lo: 0x8c, hi: 0x8c}, - {value: 0x1008, lo: 0x8d, hi: 0x8d}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9b}, @@ -3430,38 +3435,38 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xa0, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xb9}, {value: 0x0008, lo: 0xba, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, - {value: 0x1008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, {value: 0x0008, lo: 0xbe, hi: 0xbf}, - // Block 0x77, offset 0x3cf + // Block 0x76, offset 0x3b4 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb0}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, {value: 0x0008, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb4}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xb8}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, {value: 0x0008, lo: 0xb9, hi: 0xbd}, - {value: 0x1308, lo: 0xbe, hi: 0xbf}, - // Block 0x78, offset 0x3d8 + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3bd {value: 0x0000, lo: 0x0f}, {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x81}, + {value: 0x3308, lo: 0x81, hi: 0x81}, {value: 0x0008, lo: 0x82, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x9a}, {value: 0x0008, lo: 0x9b, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xaa}, - {value: 0x1008, lo: 0xab, hi: 0xab}, - {value: 0x1308, lo: 0xac, hi: 0xad}, - {value: 0x1008, lo: 0xae, hi: 0xaf}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xb4}, - {value: 0x1008, lo: 0xb5, hi: 0xb5}, - {value: 0x1b08, lo: 0xb6, hi: 0xb6}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x79, offset 0x3e8 + // Block 0x78, offset 0x3cd {value: 0x0000, lo: 0x0c}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x86}, @@ -3475,7 +3480,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xa8, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x7a, offset 0x3f5 + // Block 0x79, offset 0x3da {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9b}, @@ -3486,54 +3491,54 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xa0, hi: 0xa5}, {value: 0x0040, lo: 0xa6, hi: 0xaf}, {value: 0x4495, lo: 0xb0, hi: 0xbf}, - // Block 0x7b, offset 0x3ff + // Block 0x7a, offset 0x3e4 {value: 0x0000, lo: 0x04}, {value: 0x44b5, lo: 0x80, hi: 0x8f}, {value: 0x44d5, lo: 0x90, hi: 0x9f}, {value: 0x44f5, lo: 0xa0, hi: 0xaf}, {value: 0x44d5, lo: 0xb0, hi: 0xbf}, - // Block 0x7c, offset 0x404 + // Block 0x7b, offset 0x3e9 {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa5}, - {value: 0x1008, lo: 0xa6, hi: 0xa7}, - {value: 0x1308, lo: 0xa8, hi: 0xa8}, - {value: 0x1008, lo: 0xa9, hi: 0xaa}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, {value: 0x0018, lo: 0xab, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xac}, - {value: 0x1b08, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x7d, offset 0x411 + // Block 0x7c, offset 0x3f6 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x7e, offset 0x415 + // Block 0x7d, offset 0x3fa {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8a}, {value: 0x0018, lo: 0x8b, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x7f, offset 0x41a + // Block 0x7e, offset 0x3ff {value: 0x0020, lo: 0x01}, {value: 0x4515, lo: 0x80, hi: 0xbf}, - // Block 0x80, offset 0x41c + // Block 0x7f, offset 0x401 {value: 0x0020, lo: 0x03}, {value: 0x4d15, lo: 0x80, hi: 0x94}, {value: 0x4ad5, lo: 0x95, hi: 0x95}, {value: 0x4fb5, lo: 0x96, hi: 0xbf}, - // Block 0x81, offset 0x420 + // Block 0x80, offset 0x405 {value: 0x0020, lo: 0x01}, {value: 0x54f5, lo: 0x80, hi: 0xbf}, - // Block 0x82, offset 0x422 + // Block 0x81, offset 0x407 {value: 0x0020, lo: 0x03}, {value: 0x5cf5, lo: 0x80, hi: 0x84}, {value: 0x5655, lo: 0x85, hi: 0x85}, {value: 0x5d95, lo: 0x86, hi: 0xbf}, - // Block 0x83, offset 0x426 + // Block 0x82, offset 0x40b {value: 0x0020, lo: 0x08}, {value: 0x6b55, lo: 0x80, hi: 0x8f}, {value: 0x6d15, lo: 0x90, hi: 0x90}, @@ -3543,19 +3548,19 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xae, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x70d5, lo: 0xb0, hi: 0xbf}, - // Block 0x84, offset 0x42f + // Block 0x83, offset 0x414 {value: 0x0020, lo: 0x05}, {value: 0x72d5, lo: 0x80, hi: 0xad}, {value: 0x6535, lo: 0xae, hi: 0xae}, {value: 0x7895, lo: 0xaf, hi: 0xb5}, {value: 0x6f55, lo: 0xb6, hi: 0xb6}, {value: 0x7975, lo: 0xb7, hi: 0xbf}, - // Block 0x85, offset 0x435 + // Block 0x84, offset 0x41a {value: 0x0028, lo: 0x03}, {value: 0x7c21, lo: 0x80, hi: 0x82}, {value: 0x7be1, lo: 0x83, hi: 0x83}, {value: 0x7c99, lo: 0x84, hi: 0xbf}, - // Block 0x86, offset 0x439 + // Block 0x85, offset 0x41e {value: 0x0038, lo: 0x0f}, {value: 0x9db1, lo: 0x80, hi: 0x83}, {value: 0x9e59, lo: 0x84, hi: 0x85}, @@ -3572,7 +3577,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0xa869, lo: 0xbc, hi: 0xbc}, {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, - // Block 0x87, offset 0x449 + // Block 0x86, offset 0x42e {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8c}, @@ -3583,24 +3588,24 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xbc, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x88, offset 0x453 + // Block 0x87, offset 0x438 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0x89, offset 0x458 + // Block 0x88, offset 0x43d {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x8a, offset 0x45b + // Block 0x89, offset 0x440 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x86}, {value: 0x0018, lo: 0x87, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x8b, offset 0x461 + // Block 0x8a, offset 0x446 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x8e}, {value: 0x0040, lo: 0x8f, hi: 0x8f}, @@ -3608,31 +3613,31 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x9c, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa0}, {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0x8c, offset 0x468 + // Block 0x8b, offset 0x44d {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbc}, - {value: 0x1308, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x8d, offset 0x46d + // Block 0x8c, offset 0x452 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x9c}, {value: 0x0040, lo: 0x9d, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x8e, offset 0x471 + // Block 0x8d, offset 0x456 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x8f, offset 0x477 + // Block 0x8e, offset 0x45c {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x90, offset 0x47c + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x461 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x81}, @@ -3640,22 +3645,22 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0018, lo: 0x8a, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xba}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x91, offset 0x485 + // Block 0x90, offset 0x46a {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x92, offset 0x48a + // Block 0x91, offset 0x46f {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x87}, {value: 0x0008, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x93, offset 0x490 + // Block 0x92, offset 0x475 {value: 0x0000, lo: 0x06}, {value: 0xe145, lo: 0x80, hi: 0x87}, {value: 0xe1c5, lo: 0x88, hi: 0x8f}, @@ -3663,7 +3668,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x8ad5, lo: 0x98, hi: 0x9f}, {value: 0x8aed, lo: 0xa0, hi: 0xa7}, {value: 0x0008, lo: 0xa8, hi: 0xbf}, - // Block 0x94, offset 0x497 + // Block 0x93, offset 0x47c {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, @@ -3671,7 +3676,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x8aed, lo: 0xb0, hi: 0xb7}, {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, - // Block 0x95, offset 0x49e + // Block 0x94, offset 0x483 {value: 0x0000, lo: 0x06}, {value: 0xe145, lo: 0x80, hi: 0x87}, {value: 0xe1c5, lo: 0x88, hi: 0x8f}, @@ -3679,173 +3684,176 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x94, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x96, offset 0x4a5 + // Block 0x95, offset 0x48a {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x97, offset 0x4a9 + // Block 0x96, offset 0x48e {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xae}, {value: 0x0018, lo: 0xaf, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x98, offset 0x4ae + // Block 0x97, offset 0x493 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x99, offset 0x4b1 + // Block 0x98, offset 0x496 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xbf}, - // Block 0x9a, offset 0x4b6 + // Block 0x99, offset 0x49b {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x85}, + {value: 0x0808, lo: 0x80, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0808, lo: 0x88, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0xb5}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb6}, - {value: 0x0008, lo: 0xb7, hi: 0xb8}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbb}, - {value: 0x0008, lo: 0xbc, hi: 0xbc}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x9b, offset 0x4c2 + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a7 {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0808, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x96}, - {value: 0x0018, lo: 0x97, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x9c, offset 0x4c8 + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4ad {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0xa6}, - {value: 0x0018, lo: 0xa7, hi: 0xaf}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x9d, offset 0x4cd + // Block 0x9c, offset 0x4b2 {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb3}, - {value: 0x0008, lo: 0xb4, hi: 0xb5}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbf}, - // Block 0x9e, offset 0x4d4 + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b9 {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0018, lo: 0x96, hi: 0x9b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, {value: 0x0040, lo: 0x9c, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb9}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbe}, - {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0x9f, offset 0x4dc + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4c1 {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xb7}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbb}, - {value: 0x0018, lo: 0xbc, hi: 0xbd}, - {value: 0x0008, lo: 0xbe, hi: 0xbf}, - // Block 0xa0, offset 0x4e1 + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c6 {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x0018, lo: 0x92, hi: 0xbf}, - // Block 0xa1, offset 0x4e5 + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4ca {value: 0x0000, lo: 0x0f}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x83}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x86}, + {value: 0x3308, lo: 0x85, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8b}, - {value: 0x1308, lo: 0x8c, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x93}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x94}, - {value: 0x0008, lo: 0x95, hi: 0x97}, + {value: 0x0808, lo: 0x95, hi: 0x97}, {value: 0x0040, lo: 0x98, hi: 0x98}, - {value: 0x0008, lo: 0x99, hi: 0xb3}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xba}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xa2, offset 0x4f5 + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4da {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0818, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x98}, + {value: 0x0818, lo: 0x90, hi: 0x98}, {value: 0x0040, lo: 0x99, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbc}, - {value: 0x0018, lo: 0xbd, hi: 0xbf}, - // Block 0xa3, offset 0x4fc + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4e1 {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xa4, offset 0x500 + // Block 0xa3, offset 0x4e5 {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb8}, {value: 0x0018, lo: 0xb9, hi: 0xbf}, - // Block 0xa5, offset 0x504 + // Block 0xa4, offset 0x4e9 {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0808, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0018, lo: 0x98, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xbf}, - // Block 0xa6, offset 0x50b + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4f0 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4f2 {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0808, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0xa7, offset 0x50e + // Block 0xa7, offset 0x4f5 {value: 0x0000, lo: 0x02}, {value: 0x03dd, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xa8, offset 0x511 + // Block 0xa8, offset 0x4f8 {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0xa9, offset 0x515 + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4fc {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbe}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xaa, offset 0x519 + // Block 0xaa, offset 0x500 {value: 0x0000, lo: 0x05}, - {value: 0x1008, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xbf}, - // Block 0xab, offset 0x51f + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x506 {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x85}, - {value: 0x1b08, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, {value: 0x0018, lo: 0x87, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x91}, {value: 0x0018, lo: 0x92, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xac, offset 0x528 + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x50f {value: 0x0000, lo: 0x0b}, - {value: 0x1308, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb6}, - {value: 0x1008, lo: 0xb7, hi: 0xb8}, - {value: 0x1b08, lo: 0xb9, hi: 0xb9}, - {value: 0x1308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, {value: 0x0018, lo: 0xbb, hi: 0xbc}, {value: 0x0340, lo: 0xbd, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0xad, offset 0x534 + // Block 0xad, offset 0x51b {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x81}, {value: 0x0040, lo: 0x82, hi: 0x8f}, @@ -3853,39 +3861,39 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xa9, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xae, offset 0x53b + // Block 0xae, offset 0x522 {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x80, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xa6}, - {value: 0x1308, lo: 0xa7, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xb2}, - {value: 0x1b08, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xb5}, {value: 0x0008, lo: 0xb6, hi: 0xbf}, - // Block 0xaf, offset 0x544 + // Block 0xaf, offset 0x52b {value: 0x0000, lo: 0x07}, {value: 0x0018, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xb5}, {value: 0x0008, lo: 0xb6, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xb0, offset 0x54c + // Block 0xb0, offset 0x533 {value: 0x0000, lo: 0x06}, - {value: 0x1308, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xb2}, - {value: 0x1008, lo: 0xb3, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xbe}, - {value: 0x1008, lo: 0xbf, hi: 0xbf}, - // Block 0xb1, offset 0x553 + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x53a {value: 0x0000, lo: 0x0d}, - {value: 0x1808, lo: 0x80, hi: 0x80}, + {value: 0x3808, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x89}, - {value: 0x1308, lo: 0x8a, hi: 0x8c}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x9a}, @@ -3895,21 +3903,21 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xb2, offset 0x561 + // Block 0xb2, offset 0x548 {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x91}, {value: 0x0040, lo: 0x92, hi: 0x92}, {value: 0x0008, lo: 0x93, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xae}, - {value: 0x1308, lo: 0xaf, hi: 0xb1}, - {value: 0x1008, lo: 0xb2, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb4}, - {value: 0x1808, lo: 0xb5, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, {value: 0x0018, lo: 0xb8, hi: 0xbd}, - {value: 0x1308, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xb3, offset 0x56e + // Block 0xb3, offset 0x555 {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, @@ -3923,28 +3931,28 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0018, lo: 0xa9, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0xb4, offset 0x57b + // Block 0xb4, offset 0x562 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x1308, lo: 0x9f, hi: 0x9f}, - {value: 0x1008, lo: 0xa0, hi: 0xa2}, - {value: 0x1308, lo: 0xa3, hi: 0xa9}, - {value: 0x1b08, lo: 0xaa, hi: 0xaa}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xb5, offset 0x584 + // Block 0xb5, offset 0x56b {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xb4}, - {value: 0x1008, lo: 0xb5, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xbf}, - // Block 0xb6, offset 0x588 + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb6, offset 0x56f {value: 0x0000, lo: 0x0d}, - {value: 0x1008, lo: 0x80, hi: 0x81}, - {value: 0x1b08, lo: 0x82, hi: 0x82}, - {value: 0x1308, lo: 0x83, hi: 0x84}, - {value: 0x1008, lo: 0x85, hi: 0x85}, - {value: 0x1308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, {value: 0x0008, lo: 0x87, hi: 0x8a}, {value: 0x0018, lo: 0x8b, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, @@ -3953,56 +3961,56 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x9c, hi: 0x9c}, {value: 0x0018, lo: 0x9d, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xb7, offset 0x596 + // Block 0xb7, offset 0x57d {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb8}, - {value: 0x1008, lo: 0xb9, hi: 0xb9}, - {value: 0x1308, lo: 0xba, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbe}, - {value: 0x1308, lo: 0xbf, hi: 0xbf}, - // Block 0xb8, offset 0x59e + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x585 {value: 0x0000, lo: 0x0a}, - {value: 0x1308, lo: 0x80, hi: 0x80}, - {value: 0x1008, lo: 0x81, hi: 0x81}, - {value: 0x1b08, lo: 0x82, hi: 0x82}, - {value: 0x1308, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x85}, {value: 0x0018, lo: 0x86, hi: 0x86}, {value: 0x0008, lo: 0x87, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xb9, offset 0x5a9 + // Block 0xb9, offset 0x590 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0xae}, - {value: 0x1008, lo: 0xaf, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb5}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0x1008, lo: 0xb8, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xba, offset 0x5b2 + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x599 {value: 0x0000, lo: 0x05}, - {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0x9b}, - {value: 0x1308, lo: 0x9c, hi: 0x9d}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xbb, offset 0x5b8 + // Block 0xbb, offset 0x59f {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbc}, - {value: 0x1308, lo: 0xbd, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xbc, offset 0x5c0 + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5a7 {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x8f}, @@ -4010,60 +4018,97 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xbd, offset 0x5c9 + // Block 0xbd, offset 0x5b0 {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x1308, lo: 0xab, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xad}, - {value: 0x1008, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb5}, - {value: 0x1808, lo: 0xb6, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0xbe, offset 0x5d3 + // Block 0xbe, offset 0x5ba {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0xbf}, - // Block 0xbf, offset 0x5d6 + // Block 0xbf, offset 0x5bd {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9f}, - {value: 0x1008, lo: 0xa0, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa5}, - {value: 0x1008, lo: 0xa6, hi: 0xa6}, - {value: 0x1308, lo: 0xa7, hi: 0xaa}, - {value: 0x1b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0xc0, offset 0x5e2 + // Block 0xc0, offset 0x5c9 {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x049d, lo: 0xa0, hi: 0xbf}, - // Block 0xc1, offset 0x5e5 + // Block 0xc1, offset 0x5cc {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0xc2, offset 0x5ea + // Block 0xc2, offset 0x5d1 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc4, offset 0x5e7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc5, offset 0x5f3 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xc3, offset 0x5ed + // Block 0xc6, offset 0x5f6 {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, {value: 0x0008, lo: 0x8a, hi: 0xae}, - {value: 0x1008, lo: 0xaf, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb6}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xc4, offset 0x5f7 + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x600 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x85}, @@ -4073,42 +4118,65 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xad, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xbf}, - // Block 0xc5, offset 0x600 + // Block 0xc8, offset 0x609 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0xa7}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xa8}, - {value: 0x1008, lo: 0xa9, hi: 0xa9}, - {value: 0x1308, lo: 0xaa, hi: 0xb0}, - {value: 0x1008, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xc6, offset 0x60c + // Block 0xc9, offset 0x615 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xca, offset 0x622 + {value: 0x0000, lo: 0x07}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcb, offset 0x62a {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xc7, offset 0x60f + // Block 0xcc, offset 0x62d {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xc8, offset 0x614 + // Block 0xcd, offset 0x632 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0xbf}, - // Block 0xc9, offset 0x617 + // Block 0xce, offset 0x635 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xbf}, - // Block 0xca, offset 0x61a + // Block 0xcf, offset 0x638 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0xbf}, - // Block 0xcb, offset 0x61d + // Block 0xd0, offset 0x63b {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, @@ -4116,20 +4184,20 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xaa, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xcc, offset 0x624 + // Block 0xd1, offset 0x642 {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb4}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, {value: 0x0018, lo: 0xb5, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xcd, offset 0x62b + // Block 0xd2, offset 0x649 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb6}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0xce, offset 0x62f + // Block 0xd3, offset 0x64d {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0018, lo: 0x84, hi: 0x85}, @@ -4141,67 +4209,75 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xa3, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0xcf, offset 0x63a + // Block 0xd4, offset 0x658 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0xbf}, - // Block 0xd0, offset 0x63d + // Block 0xd5, offset 0x65b {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x1008, lo: 0x91, hi: 0xbe}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xd1, offset 0x643 + // Block 0xd6, offset 0x661 {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x8e}, - {value: 0x1308, lo: 0x8f, hi: 0x92}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, {value: 0x0008, lo: 0x93, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xd2, offset 0x648 + // Block 0xd7, offset 0x666 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa0}, - {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0xd3, offset 0x64c + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xd8, offset 0x66a {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xd4, offset 0x64f + // Block 0xd9, offset 0x66d {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xd5, offset 0x652 + // Block 0xda, offset 0x670 {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x0040, lo: 0x82, hi: 0xbf}, - // Block 0xd6, offset 0x655 + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xdb, offset 0x673 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xdc, offset 0x676 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xdd, offset 0x679 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0xd7, offset 0x65a + // Block 0xde, offset 0x67e {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9b}, {value: 0x0018, lo: 0x9c, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9e}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0x9f}, {value: 0x03c0, lo: 0xa0, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xbf}, - // Block 0xd8, offset 0x664 + // Block 0xdf, offset 0x688 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xd9, offset 0x667 + // Block 0xe0, offset 0x68b {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xa6}, {value: 0x0040, lo: 0xa7, hi: 0xa8}, {value: 0x0018, lo: 0xa9, hi: 0xbf}, - // Block 0xda, offset 0x66b + // Block 0xe1, offset 0x68f {value: 0x0000, lo: 0x0e}, {value: 0x0018, lo: 0x80, hi: 0x9d}, {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, @@ -4211,127 +4287,127 @@ var idnaSparseValues = [1876]valueRange{ {value: 0xb719, lo: 0xa2, hi: 0xa2}, {value: 0xb781, lo: 0xa3, hi: 0xa3}, {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, - {value: 0x1018, lo: 0xa5, hi: 0xa6}, - {value: 0x1318, lo: 0xa7, hi: 0xa9}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xac}, - {value: 0x1018, lo: 0xad, hi: 0xb2}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, {value: 0x0340, lo: 0xb3, hi: 0xba}, - {value: 0x1318, lo: 0xbb, hi: 0xbf}, - // Block 0xdb, offset 0x67a + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x69e {value: 0x0000, lo: 0x0b}, - {value: 0x1318, lo: 0x80, hi: 0x82}, + {value: 0x3318, lo: 0x80, hi: 0x82}, {value: 0x0018, lo: 0x83, hi: 0x84}, - {value: 0x1318, lo: 0x85, hi: 0x8b}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, {value: 0x0018, lo: 0x8c, hi: 0xa9}, - {value: 0x1318, lo: 0xaa, hi: 0xad}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xba}, {value: 0xb851, lo: 0xbb, hi: 0xbb}, {value: 0xb899, lo: 0xbc, hi: 0xbc}, {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, {value: 0xb949, lo: 0xbe, hi: 0xbe}, {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, - // Block 0xdc, offset 0x686 + // Block 0xe3, offset 0x6aa {value: 0x0000, lo: 0x03}, {value: 0xba19, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0xa8}, {value: 0x0040, lo: 0xa9, hi: 0xbf}, - // Block 0xdd, offset 0x68a + // Block 0xe4, offset 0x6ae {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x81}, - {value: 0x1318, lo: 0x82, hi: 0x84}, + {value: 0x3318, lo: 0x82, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0xbf}, - // Block 0xde, offset 0x68f + // Block 0xe5, offset 0x6b3 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xdf, offset 0x694 + // Block 0xe6, offset 0x6b8 {value: 0x0000, lo: 0x03}, - {value: 0x1308, lo: 0x80, hi: 0xb6}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xba}, - {value: 0x1308, lo: 0xbb, hi: 0xbf}, - // Block 0xe0, offset 0x698 + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe7, offset 0x6bc {value: 0x0000, lo: 0x04}, - {value: 0x1308, lo: 0x80, hi: 0xac}, + {value: 0x3308, lo: 0x80, hi: 0xac}, {value: 0x0018, lo: 0xad, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0xe1, offset 0x69d + // Block 0xe8, offset 0x6c1 {value: 0x0000, lo: 0x08}, {value: 0x0018, lo: 0x80, hi: 0x83}, - {value: 0x1308, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x84, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0x9f}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x1308, lo: 0xa1, hi: 0xaf}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xe2, offset 0x6a6 + // Block 0xe9, offset 0x6ca {value: 0x0000, lo: 0x0a}, - {value: 0x1308, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x1308, lo: 0x88, hi: 0x98}, + {value: 0x3308, lo: 0x88, hi: 0x98}, {value: 0x0040, lo: 0x99, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0xa1}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x1308, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, {value: 0x0040, lo: 0xa5, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xaa}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xbf}, - // Block 0xe3, offset 0x6b1 + // Block 0xea, offset 0x6d5 {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0808, lo: 0x80, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0x8f}, - {value: 0x1308, lo: 0x90, hi: 0x96}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0xe4, offset 0x6b7 + // Block 0xeb, offset 0x6db {value: 0x0000, lo: 0x07}, - {value: 0x0208, lo: 0x80, hi: 0x83}, - {value: 0x1308, lo: 0x84, hi: 0x8a}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0808, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xe5, offset 0x6bf + // Block 0xec, offset 0x6e3 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xe6, offset 0x6c3 + // Block 0xed, offset 0x6e7 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0xe7, offset 0x6c7 + // Block 0xee, offset 0x6eb {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xb0}, {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0xe8, offset 0x6cd + // Block 0xef, offset 0x6f1 {value: 0x0000, lo: 0x05}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x90}, {value: 0x0018, lo: 0x91, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xe9, offset 0x6d3 + // Block 0xf0, offset 0x6f7 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x8f}, {value: 0xc1c1, lo: 0x90, hi: 0x90}, {value: 0x0018, lo: 0x91, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xea, offset 0x6d8 + // Block 0xf1, offset 0x6fc {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0xa5}, {value: 0x0018, lo: 0xa6, hi: 0xbf}, - // Block 0xeb, offset 0x6db - {value: 0x0000, lo: 0x0d}, + // Block 0xf2, offset 0x6ff + {value: 0x0000, lo: 0x0f}, {value: 0xc7e9, lo: 0x80, hi: 0x80}, {value: 0xc839, lo: 0x81, hi: 0x81}, {value: 0xc889, lo: 0x82, hi: 0x82}, @@ -4344,84 +4420,88 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x89, hi: 0x8f}, {value: 0xcab9, lo: 0x90, hi: 0x90}, {value: 0xcad9, lo: 0x91, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0xbf}, - // Block 0xec, offset 0x6e9 + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xf3, offset 0x70f {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x92}, - {value: 0x0040, lo: 0x93, hi: 0x9f}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xed, offset 0x6f0 + {value: 0x0018, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xf4, offset 0x716 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0xee, offset 0x6f3 + // Block 0xf5, offset 0x719 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0x94}, {value: 0x0040, lo: 0x95, hi: 0xbf}, - // Block 0xef, offset 0x6f6 + // Block 0xf6, offset 0x71c {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0xf0, offset 0x6fa + // Block 0xf7, offset 0x720 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0xf1, offset 0x700 + // Block 0xf8, offset 0x726 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xbf}, - // Block 0xf2, offset 0x705 - {value: 0x0000, lo: 0x09}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb2}, - {value: 0x0018, lo: 0xb3, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xf3, offset 0x70f + // Block 0xf9, offset 0x72b {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xf4, offset 0x714 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0xbf}, - // Block 0xf5, offset 0x717 + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xfa, offset 0x730 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0xfb, offset 0x735 {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xbf}, + // Block 0xfc, offset 0x738 + {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0xbf}, - // Block 0xf6, offset 0x71a + {value: 0x0040, lo: 0x81, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0xfd, offset 0x73d {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0xf7, offset 0x71d + // Block 0xfe, offset 0x740 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xf8, offset 0x720 + // Block 0xff, offset 0x743 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0xf9, offset 0x724 - {value: 0x0000, lo: 0x02}, + // Block 0x100, offset 0x747 + {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xbf}, - // Block 0xfa, offset 0x727 + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x101, offset 0x74b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x102, offset 0x74e {value: 0x0020, lo: 0x0f}, {value: 0xdeb9, lo: 0x80, hi: 0x89}, {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, @@ -4438,7 +4518,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0xe4f9, lo: 0xba, hi: 0xba}, {value: 0x8edd, lo: 0xbb, hi: 0xbb}, {value: 0xe519, lo: 0xbc, hi: 0xbf}, - // Block 0xfb, offset 0x737 + // Block 0x103, offset 0x75e {value: 0x0020, lo: 0x10}, {value: 0x937d, lo: 0x80, hi: 0x80}, {value: 0xf099, lo: 0x81, hi: 0x86}, @@ -4455,23 +4535,23 @@ var idnaSparseValues = [1876]valueRange{ {value: 0xf4d9, lo: 0xae, hi: 0xaf}, {value: 0x94dd, lo: 0xb0, hi: 0xb1}, {value: 0xf519, lo: 0xb2, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xfc, offset 0x748 + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x104, offset 0x76f {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0340, lo: 0x81, hi: 0x81}, {value: 0x0040, lo: 0x82, hi: 0x9f}, {value: 0x0340, lo: 0xa0, hi: 0xbf}, - // Block 0xfd, offset 0x74d + // Block 0x105, offset 0x774 {value: 0x0000, lo: 0x01}, {value: 0x0340, lo: 0x80, hi: 0xbf}, - // Block 0xfe, offset 0x74f + // Block 0x106, offset 0x776 {value: 0x0000, lo: 0x01}, - {value: 0x13c0, lo: 0x80, hi: 0xbf}, - // Block 0xff, offset 0x751 + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x107, offset 0x778 {value: 0x0000, lo: 0x02}, - {value: 0x13c0, lo: 0x80, hi: 0xaf}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, } -// Total table size 41559 bytes (40KiB); checksum: F4A1FA4E +// Total table size 42115 bytes (41KiB); checksum: F4A1FA4E diff --git a/cluster-autoscaler/vendor/golang.org/x/net/idna/trieval.go b/cluster-autoscaler/vendor/golang.org/x/net/idna/trieval.go index 63cb03b59b12..7a8cf889b5bc 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/idna/trieval.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/idna/trieval.go @@ -26,9 +26,9 @@ package idna // 15..3 index into xor or mapping table // } // } else { -// 15..13 unused -// 12 modifier (including virama) -// 11 virama modifier +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes // 10..8 joining type // 7..3 category type // } @@ -49,15 +49,20 @@ const ( joinShift = 8 joinMask = 0x07 - viramaModifier = 0x0800 + // Attributes + attributesMask = 0x1800 + viramaModifier = 0x1800 modifier = 0x1000 + rtl = 0x0800 + + mayNeedNorm = 0x2000 ) // A category corresponds to a category defined in the IDNA mapping table. type category uint16 const ( - unknown category = 0 // not defined currently in unicode. + unknown category = 0 // not currently defined in unicode. mapped category = 1 disallowedSTD3Mapped category = 2 deviation category = 3 @@ -110,5 +115,5 @@ func (c info) isModifier() bool { } func (c info) isViramaModifier() bool { - return c&(viramaModifier|catSmallMask) == viramaModifier + return c&(attributesMask|catSmallMask) == viramaModifier } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/proxy/per_host.go b/cluster-autoscaler/vendor/golang.org/x/net/proxy/per_host.go index 242d5623f801..0689bb6a70f6 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/proxy/per_host.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/proxy/per_host.go @@ -61,7 +61,7 @@ func (p *PerHost) dialerForRequest(host string) Dialer { return p.bypass } if host == zone[1:] { - // For a zone "example.com", we match "example.com" + // For a zone ".example.com", we match "example.com" // too. return p.bypass } diff --git a/cluster-autoscaler/vendor/golang.org/x/net/proxy/socks5.go b/cluster-autoscaler/vendor/golang.org/x/net/proxy/socks5.go index 2efec6e8d7e5..3fed38ef1cc4 100644 --- a/cluster-autoscaler/vendor/golang.org/x/net/proxy/socks5.go +++ b/cluster-autoscaler/vendor/golang.org/x/net/proxy/socks5.go @@ -12,7 +12,7 @@ import ( ) // SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address -// with an optional username and password. See RFC 1928. +// with an optional username and password. See RFC 1928 and RFC 1929. func SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) { s := &socks5{ network: network, @@ -60,7 +60,7 @@ var socks5Errors = []string{ "address type not supported", } -// Dial connects to the address addr on the network net via the SOCKS5 proxy. +// Dial connects to the address addr on the given network via the SOCKS5 proxy. func (s *socks5) Dial(network, addr string) (net.Conn, error) { switch network { case "tcp", "tcp6", "tcp4": @@ -120,6 +120,7 @@ func (s *socks5) connect(conn net.Conn, target string) error { return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") } + // See RFC 1929 if buf[1] == socks5AuthPassword { buf = buf[:0] buf = append(buf, 1 /* password protocol version */) diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/.please-update b/cluster-autoscaler/vendor/google.golang.org/grpc/.please-update deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/.travis.yml b/cluster-autoscaler/vendor/google.golang.org/grpc/.travis.yml index 22bf25004a3b..3c2621ab750a 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/.travis.yml +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/.travis.yml @@ -1,20 +1,24 @@ language: go go: + - 1.6.x - 1.7.x - 1.8.x - 1.9.x + - 1.10.x matrix: include: - - go: 1.9.x - env: ARCH=386 + - go: 1.10.x + env: RUN386=1 go_import_path: google.golang.org/grpc before_install: - - if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh -install || exit 1; fi + - if [[ -n "$RUN386" ]]; then export GOARCH=386; fi + - if [[ "$TRAVIS_GO_VERSION" = 1.10* && "$GOARCH" != "386" ]]; then ./vet.sh -install || exit 1; fi script: - - if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh || exit 1; fi - - make test testrace + - if [[ "$TRAVIS_GO_VERSION" = 1.10* && "$GOARCH" != "386" ]]; then ./vet.sh || exit 1; fi + - make test || exit 1 + - if [[ "$GOARCH" != "386" ]]; then make testrace; fi diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/CONTRIBUTING.md b/cluster-autoscaler/vendor/google.golang.org/grpc/CONTRIBUTING.md index a5c6e06e2550..0863eb26b602 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -7,7 +7,7 @@ If you are new to github, please start by reading [Pull Request howto](https://h ## Legal requirements In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). +[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). ## Guidelines for Pull Requests How to get your contributions merged smoothly and quickly. @@ -27,6 +27,10 @@ How to get your contributions merged smoothly and quickly. - Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change). - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. + - `make all` to test everything, OR + - `make vet` to catch vet errors + - `make test` to run the tests + - `make testrace` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/Makefile b/cluster-autoscaler/vendor/google.golang.org/grpc/Makefile index 39606b564a6d..6f393a808dfa 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/Makefile +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/Makefile @@ -1,4 +1,4 @@ -all: test testrace +all: vet test testrace deps: go get -d -v google.golang.org/grpc/... @@ -22,11 +22,14 @@ proto: fi go generate google.golang.org/grpc/... +vet: + ./vet.sh + test: testdeps - go test -cpu 1,4 google.golang.org/grpc/... + go test -cpu 1,4 -timeout 5m google.golang.org/grpc/... testrace: testdeps - go test -race -cpu 1,4 google.golang.org/grpc/... + go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... clean: go clean -i google.golang.org/grpc/... @@ -39,7 +42,7 @@ clean: updatetestdeps \ build \ proto \ + vet \ test \ testrace \ - clean \ - coverage + clean diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/README.md b/cluster-autoscaler/vendor/google.golang.org/grpc/README.md index 622a5dc3e85d..789adfd65362 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/README.md +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/README.md @@ -1,6 +1,6 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide. @@ -16,7 +16,7 @@ $ go get -u google.golang.org/grpc Prerequisites ------------- -This requires Go 1.7 or later. +This requires Go 1.6 or later. Go 1.7 will be required soon. Constraints ----------- diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/backoff.go b/cluster-autoscaler/vendor/google.golang.org/grpc/backoff.go index 090fbe87c522..fa31565fd283 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/backoff.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/backoff.go @@ -16,83 +16,23 @@ * */ +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatility. + package grpc import ( - "math/rand" "time" ) // DefaultBackoffConfig uses values specified for backoff in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. -var ( - DefaultBackoffConfig = BackoffConfig{ - MaxDelay: 120 * time.Second, - baseDelay: 1.0 * time.Second, - factor: 1.6, - jitter: 0.2, - } -) - -// backoffStrategy defines the methodology for backing off after a grpc -// connection failure. -// -// This is unexported until the gRPC project decides whether or not to allow -// alternative backoff strategies. Once a decision is made, this type and its -// method may be exported. -type backoffStrategy interface { - // backoff returns the amount of time to wait before the next retry given - // the number of consecutive failures. - backoff(retries int) time.Duration +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, } // BackoffConfig defines the parameters for the default gRPC backoff strategy. type BackoffConfig struct { // MaxDelay is the upper bound of backoff delay. MaxDelay time.Duration - - // TODO(stevvooe): The following fields are not exported, as allowing - // changes would violate the current gRPC specification for backoff. If - // gRPC decides to allow more interesting backoff strategies, these fields - // may be opened up in the future. - - // baseDelay is the amount of time to wait before retrying after the first - // failure. - baseDelay time.Duration - - // factor is applied to the backoff after each retry. - factor float64 - - // jitter provides a range to randomize backoff delays. - jitter float64 -} - -func setDefaults(bc *BackoffConfig) { - md := bc.MaxDelay - *bc = DefaultBackoffConfig - - if md > 0 { - bc.MaxDelay = md - } -} - -func (bc BackoffConfig) backoff(retries int) time.Duration { - if retries == 0 { - return bc.baseDelay - } - backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay) - for backoff < max && retries > 0 { - backoff *= bc.factor - retries-- - } - if backoff > max { - backoff = max - } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= 1 + bc.jitter*(rand.Float64()*2-1) - if backoff < 0 { - return 0 - } - return time.Duration(backoff) } diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer.go b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer.go index ab65049ddc17..e1730166cde6 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer.go @@ -28,10 +28,12 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/naming" + "google.golang.org/grpc/status" ) // Address represents a server the client connects to. -// This is the EXPERIMENTAL API and may be changed or extended in the future. +// +// Deprecated: please use package balancer. type Address struct { // Addr is the server address on which a connection will be established. Addr string @@ -41,6 +43,8 @@ type Address struct { } // BalancerConfig specifies the configurations for Balancer. +// +// Deprecated: please use package balancer. type BalancerConfig struct { // DialCreds is the transport credential the Balancer implementation can // use to dial to a remote load balancer server. The Balancer implementations @@ -53,7 +57,8 @@ type BalancerConfig struct { } // BalancerGetOptions configures a Get call. -// This is the EXPERIMENTAL API and may be changed or extended in the future. +// +// Deprecated: please use package balancer. type BalancerGetOptions struct { // BlockingWait specifies whether Get should block when there is no // connected address. @@ -61,7 +66,8 @@ type BalancerGetOptions struct { } // Balancer chooses network addresses for RPCs. -// This is the EXPERIMENTAL API and may be changed or extended in the future. +// +// Deprecated: please use package balancer. type Balancer interface { // Start does the initialization work to bootstrap a Balancer. For example, // this function may start the name resolution and watch the updates. It will @@ -134,6 +140,8 @@ func downErrorf(timeout, temporary bool, format string, a ...interface{}) downEr // RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch // the name resolution updates and updates the addresses available correspondingly. +// +// Deprecated: please use package balancer/roundrobin. func RoundRobin(r naming.Resolver) Balancer { return &roundRobin{r: r} } @@ -310,7 +318,7 @@ func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Ad if !opts.BlockingWait { if len(rr.addrs) == 0 { rr.mu.Unlock() - err = Errorf(codes.Unavailable, "there is no address available") + err = status.Errorf(codes.Unavailable, "there is no address available") return } // Returns the next addr on rr.addrs for failfast RPCs. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/balancer.go b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/balancer.go index 84e10b630e75..f9d83c2f39ff 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/balancer.go @@ -23,6 +23,7 @@ package balancer import ( "errors" "net" + "strings" "golang.org/x/net/context" "google.golang.org/grpc/connectivity" @@ -33,24 +34,26 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) - // defaultBuilder is the default balancer to use. - defaultBuilder Builder // TODO(bar) install pickfirst as default. ) -// Register registers the balancer builder to the balancer map. -// b.Name will be used as the name registered with this builder. +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. func Register(b Builder) { - m[b.Name()] = b + m[strings.ToLower(b.Name())] = b } // Get returns the resolver builder registered with the given name. -// If no builder is register with the name, the default pickfirst will -// be used. +// Note that the compare is done in a case-insenstive fashion. +// If no builder is register with the name, nil will be returned. func Get(name string) Builder { - if b, ok := m[name]; ok { + if b, ok := m[strings.ToLower(name)]; ok { return b } - return defaultBuilder + return nil } // SubConn represents a gRPC sub connection. @@ -66,6 +69,11 @@ func Get(name string) Builder { // When the connection encounters an error, it will reconnect immediately. // When the connection becomes IDLE, it will not reconnect unless Connect is // called. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. @@ -83,6 +91,11 @@ type SubConn interface { type NewSubConnOptions struct{} // ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. @@ -99,6 +112,9 @@ type ClientConn interface { // on the new picker to pick new SubConn. UpdateBalancerState(s connectivity.State, p Picker) + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOption) + // Target returns the dial target for this ClientConn. Target() string } @@ -113,6 +129,8 @@ type BuildOptions struct { // to a remote load balancer server. The Balancer implementations // can ignore this if it doesn't need to talk to remote balancer. Dialer func(context.Context, string) (net.Conn, error) + // ChannelzParentID is the entity parent's channelz unique identification number. + ChannelzParentID int64 } // Builder creates a balancer. @@ -131,6 +149,10 @@ type PickOptions struct{} type DoneInfo struct { // Err is the rpc error the RPC finished with. It could be nil. Err error + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool } var ( @@ -143,7 +165,7 @@ var ( ) // Picker is used by gRPC to pick a SubConn to send an RPC. -// Balancer is expected to generate a new picker from its snapshot everytime its +// Balancer is expected to generate a new picker from its snapshot every time its // internal state has changed. // // The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). @@ -161,7 +183,7 @@ type Picker interface { // If a SubConn is returned: // - If it is READY, gRPC will send the RPC on it; // - If it is not ready, or becomes not ready after it's returned, gRPC will block - // this call until a new picker is updated and will call pick on the new picker. + // until UpdateBalancerState() is called and will call pick on the new picker. // // If the returned error is not nil: // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() @@ -204,3 +226,45 @@ type Balancer interface { // ClientConn.RemoveSubConn for its existing SubConns. Close() } + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +// +// Idle and Shutdown are not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/base/balancer.go b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 000000000000..23d13511bb2f --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &connectivityStateEvaluator{}, + // Initialize picker to a picker that always return + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateBalancerState with this picker. + picker: NewErrPicker(balancer.ErrNoSubConnAvailable), + } +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + + csEvltr *connectivityStateEvaluator + state connectivity.State + + subConns map[resolver.Address]balancer.SubConn + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker +} + +func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err) + return + } + grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs) + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range addrs { + addrsSet[a] = struct{}{} + if _, ok := b.subConns[a]; !ok { + // a is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns[a] = sc + b.scStates[sc] = connectivity.Idle + sc.Connect() + } + } + for a, sc := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(sc) + delete(b.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker with ErrTransientFailure if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = NewErrPicker(balancer.ErrTransientFailure) + return + } + readySCs := make(map[resolver.Address]balancer.SubConn) + + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[addr] = sc + } + } + b.picker = b.pickerBuilder.Build(readySCs) +} + +func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + oldS, ok := b.scStates[sc] + if !ok { + grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + oldAggrState := b.state + b.state = b.csEvltr.recordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc became ready from not-ready + // - this sc became not-ready from ready + // - the aggregated state of balancer became TransientFailure from non-TransientFailure + // - the aggregated state of balancer became non-TransientFailure from TransientFailure + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { + b.regeneratePicker() + } + + b.cc.UpdateBalancerState(b.state, b.picker) +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call RemoveSubConn for the SubConns. +func (b *baseBalancer) Close() { +} + +// NewErrPicker returns a picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + return nil, nil, p.err +} + +// connectivityStateEvaluator gets updated by addrConns when their +// states transition, based on which it evaluates the state of +// ClientConn. +type connectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// recordTransition records state change happening in every subConn and based on +// that it evaluates what aggregated state should be. +// It can only transition between Ready, Connecting and TransientFailure. Other states, +// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection +// before any subConn is created ClientConn is in idle state. In the end when ClientConn +// closes it is in Shutdown state. +// +// recordTransition should only be called synchronously from the same goroutine. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/base/base.go b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 000000000000..012ace2f2f78 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build takes a slice of ready SubConns, and returns a picker that will be + // used by gRPC to pick a SubConn. + Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker +} + +// NewBalancerBuilder returns a balancer builder. The balancers +// built by this builder will use the picker builder to build pickers. +func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + } +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 000000000000..2eda0a1c2107 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker { + grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs) + var scs []balancer.SubConn + for _, sc := range readySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + + mu sync.Mutex + next int +} + +func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if len(p.subConns) <= 0 { + return nil, nil, balancer.ErrNoSubConnAvailable + } + + p.mu.Lock() + sc := p.subConns[p.next] + p.next = (p.next + 1) % len(p.subConns) + p.mu.Unlock() + return sc, nil, nil +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index f5dbc4ba201f..c23f81706fba 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -19,6 +19,7 @@ package grpc import ( + "fmt" "sync" "google.golang.org/grpc/balancer" @@ -73,7 +74,7 @@ func (b *scStateUpdateBuffer) load() { } } -// get returns the channel that receives a recvMsg in the buffer. +// get returns the channel that the scStateUpdate will be sent to. // // Upon receiving, the caller should call load to send another // scStateChangeTuple onto the channel if there is any. @@ -96,6 +97,9 @@ type ccBalancerWrapper struct { stateChangeQueue *scStateUpdateBuffer resolverUpdateCh chan *resolverUpdate done chan struct{} + + mu sync.Mutex + subConns map[*acBalancerWrapper]struct{} } func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { @@ -104,21 +108,34 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui stateChangeQueue: newSCStateUpdateBuffer(), resolverUpdateCh: make(chan *resolverUpdate, 1), done: make(chan struct{}), + subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() ccb.balancer = b.Build(ccb, bopts) return ccb } -// watcher balancer functions sequencially, so the balancer can be implemeneted +// watcher balancer functions sequentially, so the balancer can be implemented // lock-free. func (ccb *ccBalancerWrapper) watcher() { for { select { case t := <-ccb.stateChangeQueue.get(): ccb.stateChangeQueue.load() + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } ccb.balancer.HandleSubConnStateChange(t.sc, t.state) case t := <-ccb.resolverUpdateCh: + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } ccb.balancer.HandleResolvedAddrs(t.addrs, t.err) case <-ccb.done: } @@ -126,6 +143,13 @@ func (ccb *ccBalancerWrapper) watcher() { select { case <-ccb.done: ccb.balancer.Close() + ccb.mu.Lock() + scs := ccb.subConns + ccb.subConns = nil + ccb.mu.Unlock() + for acbw := range scs { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + } return default: } @@ -165,33 +189,54 @@ func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - grpclog.Infof("ccBalancerWrapper: new subconn: %v", addrs) + if len(addrs) <= 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") + } ac, err := ccb.cc.newAddrConn(addrs) if err != nil { return nil, err } acbw := &acBalancerWrapper{ac: ac} - ac.mu.Lock() + acbw.ac.mu.Lock() ac.acbw = acbw - ac.mu.Unlock() + acbw.ac.mu.Unlock() + ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - grpclog.Infof("ccBalancerWrapper: removing subconn") acbw, ok := sc.(*acBalancerWrapper) if !ok { return } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + delete(ccb.subConns, acbw) ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { - grpclog.Infof("ccBalancerWrapper: updating state and picker called by balancer: %v, %p", s, p) + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } ccb.cc.csMgr.updateState(s) ccb.cc.blockingpicker.updatePicker(p) } +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) { + ccb.cc.resolveNow(o) +} + func (ccb *ccBalancerWrapper) Target() string { return ccb.cc.target } @@ -204,9 +249,12 @@ type acBalancerWrapper struct { } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - grpclog.Infof("acBalancerWrapper: UpdateAddresses called with %v", addrs) acbw.mu.Lock() defer acbw.mu.Unlock() + if len(addrs) <= 0 { + acbw.ac.tearDown(errConnDrain) + return + } if !acbw.ac.tryUpdateAddrs(addrs) { cc := acbw.ac.cc acbw.ac.mu.Lock() @@ -234,7 +282,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { ac.acbw = acbw ac.mu.Unlock() if acState != connectivity.Idle { - ac.connect(false) + ac.connect() } } } @@ -242,7 +290,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { func (acbw *acBalancerWrapper) Connect() { acbw.mu.Lock() defer acbw.mu.Unlock() - acbw.ac.connect(false) + acbw.ac.connect() } func (acbw *acBalancerWrapper) getAddrConn() *addrConn { diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer_v1_wrapper.go index 9d0616080a1b..e0ce32cfb6d4 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/balancer_v1_wrapper.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -19,6 +19,7 @@ package grpc import ( + "strings" "sync" "golang.org/x/net/context" @@ -27,6 +28,7 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) type balancerWrapperBuilder struct { @@ -34,20 +36,27 @@ type balancerWrapperBuilder struct { } func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - bwb.b.Start(cc.Target(), BalancerConfig{ + targetAddr := cc.Target() + targetSplitted := strings.Split(targetAddr, ":///") + if len(targetSplitted) >= 2 { + targetAddr = targetSplitted[1] + } + + bwb.b.Start(targetAddr, BalancerConfig{ DialCreds: opts.DialCreds, Dialer: opts.Dialer, }) _, pickfirst := bwb.b.(*pickFirst) bw := &balancerWrapper{ - balancer: bwb.b, - pickfirst: pickfirst, - cc: cc, - startCh: make(chan struct{}), - conns: make(map[resolver.Address]balancer.SubConn), - connSt: make(map[balancer.SubConn]*scState), - csEvltr: &connectivityStateEvaluator{}, - state: connectivity.Idle, + balancer: bwb.b, + pickfirst: pickfirst, + cc: cc, + targetAddr: targetAddr, + startCh: make(chan struct{}), + conns: make(map[resolver.Address]balancer.SubConn), + connSt: make(map[balancer.SubConn]*scState), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + state: connectivity.Idle, } cc.UpdateBalancerState(connectivity.Idle, bw) go bw.lbWatcher() @@ -68,11 +77,8 @@ type balancerWrapper struct { balancer Balancer // The v1 balancer. pickfirst bool - cc balancer.ClientConn - - // To aggregate the connectivity state. - csEvltr *connectivityStateEvaluator - state connectivity.State + cc balancer.ClientConn + targetAddr string // Target without the scheme. mu sync.Mutex conns map[resolver.Address]balancer.SubConn @@ -82,18 +88,21 @@ type balancerWrapper struct { // - NewSubConn is created, cc wants to notify balancer of state changes; // - Build hasn't return, cc doesn't have access to balancer. startCh chan struct{} + + // To aggregate the connectivity state. + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State } // lbWatcher watches the Notify channel of the balancer and manages // connections accordingly. func (bw *balancerWrapper) lbWatcher() { <-bw.startCh - grpclog.Infof("balancerWrapper: is pickfirst: %v\n", bw.pickfirst) notifyCh := bw.balancer.Notify() if notifyCh == nil { // There's no resolver in the balancer. Connect directly. a := resolver.Address{ - Addr: bw.cc.Target(), + Addr: bw.targetAddr, Type: resolver.Backend, } sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) @@ -103,7 +112,7 @@ func (bw *balancerWrapper) lbWatcher() { bw.mu.Lock() bw.conns[a] = sc bw.connSt[sc] = &scState{ - addr: Address{Addr: bw.cc.Target()}, + addr: Address{Addr: bw.targetAddr}, s: connectivity.Idle, } bw.mu.Unlock() @@ -165,10 +174,10 @@ func (bw *balancerWrapper) lbWatcher() { sc.Connect() } } else { - oldSC.UpdateAddresses(newAddrs) bw.mu.Lock() bw.connSt[oldSC].addr = addrs[0] bw.mu.Unlock() + oldSC.UpdateAddresses(newAddrs) } } else { var ( @@ -221,7 +230,6 @@ func (bw *balancerWrapper) lbWatcher() { } func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - grpclog.Infof("balancerWrapper: handle subconn state change: %p, %v", sc, s) bw.mu.Lock() defer bw.mu.Unlock() scSt, ok := bw.connSt[sc] @@ -240,7 +248,7 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne scSt.down(errConnClosing) } } - sa := bw.csEvltr.recordTransition(oldS, s) + sa := bw.csEvltr.RecordTransition(oldS, s) if bw.state != sa { bw.state = sa } @@ -249,7 +257,6 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne // Remove state for this sc. delete(bw.connSt, sc) } - return } func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { @@ -262,7 +269,6 @@ func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { } // There should be a resolver inside the balancer. // All updates here, if any, are ignored. - return } func (bw *balancerWrapper) Close() { @@ -274,7 +280,6 @@ func (bw *balancerWrapper) Close() { close(bw.startCh) } bw.balancer.Close() - return } // The picker is the balancerWrapper itself. @@ -310,58 +315,14 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) Metadata: a.Metadata, }] if !ok && failfast { - return nil, nil, Errorf(codes.Unavailable, "there is no connection available") + return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available") } if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) { // If the returned sc is not ready and RPC is failfast, // return error, and this RPC will fail. - return nil, nil, Errorf(codes.Unavailable, "there is no connection available") + return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available") } } return sc, done, nil } - -// connectivityStateEvaluator gets updated by addrConns when their -// states transition, based on which it evaluates the state of -// ClientConn. -type connectivityStateEvaluator struct { - mu sync.Mutex - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. - numTransientFailure uint64 // Number of addrConns in transientFailure. -} - -// recordTransition records state change happening in every subConn and based on -// that it evaluates what aggregated state should be. -// It can only transition between Ready, Connecting and TransientFailure. Other states, -// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection -// before any subConn is created ClientConn is in idle state. In the end when ClientConn -// closes it is in Shutdown state. -// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state. -func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { - cse.mu.Lock() - defer cse.mu.Unlock() - - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - case connectivity.TransientFailure: - cse.numTransientFailure += updateVal - } - } - - // Evaluate. - if cse.numReady > 0 { - return connectivity.Ready - } - if cse.numConnecting > 0 { - return connectivity.Connecting - } - return connectivity.TransientFailure -} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/call.go b/cluster-autoscaler/vendor/google.golang.org/grpc/call.go index 1ef2507c35f3..f73b7d5528f5 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/call.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/call.go @@ -19,289 +19,75 @@ package grpc import ( - "bytes" - "io" - "time" - "golang.org/x/net/context" - "golang.org/x/net/trace" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" - "google.golang.org/grpc/transport" ) -// recvResponse receives and parses an RPC response. -// On error, it returns the error and indicates whether the call should be retried. +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. // -// TODO(zhaoq): Check whether the received message sequence is valid. -// TODO ctx is used for stats collection and processing. It is the context passed from the application. -func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) { - // Try to acquire header metadata from the server if there is any. - defer func() { - if err != nil { - if _, ok := err.(transport.ConnectionError); !ok { - t.CloseStream(stream, err) - } - } - }() - c.headerMD, err = stream.Header() - if err != nil { - return - } - p := &parser{r: stream} - var inPayload *stats.InPayload - if dopts.copts.StatsHandler != nil { - inPayload = &stats.InPayload{ - Client: true, - } - } - for { - if c.maxReceiveMessageSize == nil { - return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") - } - if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil { - if err == io.EOF { - break - } - return - } - } - if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK { - // TODO in the current implementation, inTrailer may be handled before inPayload in some cases. - // Fix the order if necessary. - dopts.copts.StatsHandler.HandleRPC(ctx, inPayload) - } - c.trailerMD = stream.Trailer() - return nil -} +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) -// sendRequest writes out various information of an RPC such as Context and Message. -func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) { - defer func() { - if err != nil { - // If err is connection error, t will be closed, no need to close stream here. - if _, ok := err.(transport.ConnectionError); !ok { - t.CloseStream(stream, err) - } - } - }() - var ( - cbuf *bytes.Buffer - outPayload *stats.OutPayload - ) - if compressor != nil { - cbuf = new(bytes.Buffer) - } - if dopts.copts.StatsHandler != nil { - outPayload = &stats.OutPayload{ - Client: true, - } - } - hdr, data, err := encode(dopts.codec, args, compressor, cbuf, outPayload) - if err != nil { - return err - } - if c.maxSendMessageSize == nil { - return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") - } - if len(data) > *c.maxSendMessageSize { - return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize) - } - err = t.Write(stream, hdr, data, opts) - if err == nil && outPayload != nil { - outPayload.SentTime = time.Now() - dopts.copts.StatsHandler.HandleRPC(ctx, outPayload) - } - // t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method - // does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following - // recvResponse to get the final status. - if err != nil && err != io.EOF { - return err - } - // Sent successfully. - return nil -} - -// Invoke sends the RPC request on the wire and returns after response is received. -// Invoke is called by generated code. Also users can call Invoke directly when it -// is really needed in their use cases. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { if cc.dopts.unaryInt != nil { return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) } return invoke(ctx, method, args, reply, cc, opts...) } -func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) { - c := defaultCallInfo() - mc := cc.GetMethodConfig(method) - if mc.WaitForReady != nil { - c.failFast = !*mc.WaitForReady - } - - if mc.Timeout != nil && *mc.Timeout >= 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) - defer cancel() - } +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race connditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} - opts = append(cc.dopts.callOptions, opts...) - for _, o := range opts { - if err := o.before(c); err != nil { - return toRPCErr(err) - } - } - defer func() { - for _, o := range opts { - o.after(c) - } - }() +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} - c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) - c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} - if EnableTracing { - c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) - defer c.traceInfo.tr.Finish() - c.traceInfo.firstLine.client = true - if deadline, ok := ctx.Deadline(); ok { - c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) - } - c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) - // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. - defer func() { - if e != nil { - c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{e}}, true) - c.traceInfo.tr.SetError() - } - }() - } - ctx = newContextWithRPCInfo(ctx, c.failFast) - sh := cc.dopts.copts.StatsHandler - if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) - begin := &stats.Begin{ - Client: true, - BeginTime: time.Now(), - FailFast: c.failFast, - } - sh.HandleRPC(ctx, begin) - defer func() { - end := &stats.End{ - Client: true, - EndTime: time.Now(), - Error: e, - } - sh.HandleRPC(ctx, end) - }() - } - topts := &transport.Options{ - Last: true, - Delay: false, - } +func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + // TODO: implement retries in clientStream and make this simply + // newClientStream, SendMsg, RecvMsg. + firstAttempt := true for { - var ( - err error - t transport.ClientTransport - stream *transport.Stream - // Record the done handler from Balancer.Get(...). It is called once the - // RPC has completed or failed. - done func(balancer.DoneInfo) - ) - // TODO(zhaoq): Need a formal spec of fail-fast. - callHdr := &transport.CallHdr{ - Host: cc.authority, - Method: method, - } - if cc.dopts.cp != nil { - callHdr.SendCompress = cc.dopts.cp.Type() - } - if c.creds != nil { - callHdr.Creds = c.creds - } - - t, done, err = cc.getTransport(ctx, c.failFast) + csInt, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { - // TODO(zhaoq): Probably revisit the error handling. - if _, ok := status.FromError(err); ok { - return err - } - if err == errConnClosing || err == errConnUnavailable { - if c.failFast { - return Errorf(codes.Unavailable, "%v", err) - } - continue - } - // All the other errors are treated as Internal errors. - return Errorf(codes.Internal, "%v", err) - } - if c.traceInfo.tr != nil { - c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) - } - stream, err = t.NewStream(ctx, callHdr) - if err != nil { - if done != nil { - if _, ok := err.(transport.ConnectionError); ok { - // If error is connection error, transport was sending data on wire, - // and we are not sure if anything has been sent on wire. - // If error is not connection error, we are sure nothing has been sent. - updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) - } - done(balancer.DoneInfo{Err: err}) - } - if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { - continue - } - return toRPCErr(err) - } - if peer, ok := peer.FromContext(stream.Context()); ok { - c.peer = peer + return err } - err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts) - if err != nil { - if done != nil { - updateRPCInfoInContext(ctx, rpcInfo{ - bytesSent: stream.BytesSent(), - bytesReceived: stream.BytesReceived(), - }) - done(balancer.DoneInfo{Err: err}) - } - // Retry a non-failfast RPC when - // i) there is a connection error; or - // ii) the server started to drain before this RPC was initiated. - if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { + cs := csInt.(*clientStream) + if err := cs.SendMsg(req); err != nil { + if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt { + // TODO: Add a field to header for grpc-transparent-retry-attempts + firstAttempt = false continue } - return toRPCErr(err) + return err } - err = recvResponse(ctx, cc.dopts, t, c, stream, reply) - if err != nil { - if done != nil { - updateRPCInfoInContext(ctx, rpcInfo{ - bytesSent: stream.BytesSent(), - bytesReceived: stream.BytesReceived(), - }) - done(balancer.DoneInfo{Err: err}) - } - if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { + if err := cs.RecvMsg(reply); err != nil { + if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt { + // TODO: Add a field to header for grpc-transparent-retry-attempts + firstAttempt = false continue } - return toRPCErr(err) - } - if c.traceInfo.tr != nil { - c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) - } - t.CloseStream(stream, nil) - if done != nil { - updateRPCInfoInContext(ctx, rpcInfo{ - bytesSent: stream.BytesSent(), - bytesReceived: stream.BytesReceived(), - }) - done(balancer.DoneInfo{Err: err}) + return err } - return stream.Status().Err() + return nil } } diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/clientconn.go b/cluster-autoscaler/vendor/google.golang.org/grpc/clientconn.go index 71de2e50d2bd..84ba9e5ad72b 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/clientconn.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/clientconn.go @@ -31,24 +31,54 @@ import ( "golang.org/x/net/context" "golang.org/x/net/trace" "google.golang.org/grpc/balancer" + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" "google.golang.org/grpc/transport" ) +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second + // must match grpclbName in grpclb/grpclb.go + grpclbName = "grpclb" +) + var ( // ErrClientConnClosing indicates that the operation is illegal because // the ClientConn is closing. - ErrClientConnClosing = errors.New("grpc: the client connection is closing") - // ErrClientConnTimeout indicates that the ClientConn cannot establish the - // underlying connections within the specified timeout. - // DEPRECATED: Please use context.DeadlineExceeded instead. - ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errConnUnavailable indicates that the connection is unavailable. + errConnUnavailable = errors.New("grpc: the connection is unavailable") + // errBalancerClosed indicates that the balancer is closed. + errBalancerClosed = errors.New("grpc: balancer is closed") + // We use an accessor so that minConnectTimeout can be + // atomically read and updated while testing. + getMinConnectTimeout = func() time.Duration { + return minConnectTimeout + } +) +// The following errors are returned from Dial and DialContext +var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. @@ -62,16 +92,6 @@ var ( errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") // errNetworkIO indicates that the connection is down due to some network I/O error. errNetworkIO = errors.New("grpc: failed with network I/O error") - // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. - errConnDrain = errors.New("grpc: the connection is drained") - // errConnClosing indicates that the connection is closing. - errConnClosing = errors.New("grpc: the connection is closing") - // errConnUnavailable indicates that the connection is unavailable. - errConnUnavailable = errors.New("grpc: the connection is unavailable") - // errBalancerClosed indicates that the balancer is closed. - errBalancerClosed = errors.New("grpc: balancer is closed") - // minimum time to give a connection to complete - minConnectTimeout = 20 * time.Second ) // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -79,18 +99,23 @@ var ( type dialOptions struct { unaryInt UnaryClientInterceptor streamInt StreamClientInterceptor - codec Codec cp Compressor dc Decompressor - bs backoffStrategy + bs backoff.Strategy block bool insecure bool timeout time.Duration scChan <-chan ServiceConfig copts transport.ConnectOptions callOptions []CallOption - // This is to support v1 balancer. + // This is used by v1 balancer dial option WithBalancer to support v1 + // balancer, and also by WithBalancerName dial option. balancerBuilder balancer.Builder + // This is to support grpclb. + resolverBuilder resolver.Builder + waitForHandshake bool + channelzParentID int64 + disableServiceConfig bool } const ( @@ -98,9 +123,24 @@ const ( defaultClientMaxSendMessageSize = math.MaxInt32 ) +// RegisterChannelz turns on channelz service. +// This is an EXPERIMENTAL API. +func RegisterChannelz() { + channelz.TurnOn() +} + // DialOption configures how we set up the connection. type DialOption func(*dialOptions) +// WithWaitForHandshake blocks until the initial settings frame is received from the +// server before assigning RPCs to the connection. +// Experimental API. +func WithWaitForHandshake() DialOption { + return func(o *dialOptions) { + o.waitForHandshake = true + } +} + // WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched // before doing a write on the wire. func WithWriteBufferSize(s int) DialOption { @@ -133,7 +173,9 @@ func WithInitialConnWindowSize(s int32) DialOption { } } -// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. +// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. func WithMaxMsgSize(s int) DialOption { return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) } @@ -146,22 +188,32 @@ func WithDefaultCallOptions(cos ...CallOption) DialOption { } // WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(CallCustomCodec(c)) instead. func WithCodec(c Codec) DialOption { - return func(o *dialOptions) { - o.codec = c - } + return WithDefaultCallOptions(CallCustomCodec(c)) } -// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message -// compressor. +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by +// the UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. func WithCompressor(cp Compressor) DialOption { return func(o *dialOptions) { o.cp = cp } } -// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating -// message decompressor. +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. func WithDecompressor(dc Decompressor) DialOption { return func(o *dialOptions) { o.dc = dc @@ -170,7 +222,8 @@ func WithDecompressor(dc Decompressor) DialOption { // WithBalancer returns a DialOption which sets a load balancer with the v1 API. // Name resolver will be ignored if this DialOption is specified. -// Deprecated: use the new balancer APIs in balancer package instead. +// +// Deprecated: use the new balancer APIs in balancer package and WithBalancerName. func WithBalancer(b Balancer) DialOption { return func(o *dialOptions) { o.balancerBuilder = &balancerWrapperBuilder{ @@ -179,16 +232,35 @@ func WithBalancer(b Balancer) DialOption { } } -// WithBalancerBuilder is for testing only. Users using custom balancers should -// register their balancer and use service config to choose the balancer to use. -func WithBalancerBuilder(b balancer.Builder) DialOption { - // TODO(bar) remove this when switching balancer is done. +// WithBalancerName sets the balancer that the ClientConn will be initialized +// with. Balancer registered with balancerName will be used. This function +// panics if no balancer was registered by balancerName. +// +// The balancer cannot be overridden by balancer option specified by service +// config. +// +// This is an EXPERIMENTAL API. +func WithBalancerName(balancerName string) DialOption { + builder := balancer.Get(balancerName) + if builder == nil { + panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) + } + return func(o *dialOptions) { + o.balancerBuilder = builder + } +} + +// withResolverBuilder is only for grpclb. +func withResolverBuilder(b resolver.Builder) DialOption { return func(o *dialOptions) { - o.balancerBuilder = b + o.resolverBuilder = b } } // WithServiceConfig returns a DialOption which has a channel to read the service configuration. +// +// Deprecated: service config should be received through name resolver, as specified here. +// https://github.com/grpc/grpc/blob/master/doc/service_config.md func WithServiceConfig(c <-chan ServiceConfig) DialOption { return func(o *dialOptions) { o.scChan = c @@ -207,17 +279,17 @@ func WithBackoffMaxDelay(md time.Duration) DialOption { // Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up // for use. func WithBackoffConfig(b BackoffConfig) DialOption { - // Set defaults to ensure that provided BackoffConfig is valid and - // unexported fields get default values. - setDefaults(&b) - return withBackoff(b) + + return withBackoff(backoff.Exponential{ + MaxDelay: b.MaxDelay, + }) } -// withBackoff sets the backoff strategy used for retries after a +// withBackoff sets the backoff strategy used for connectRetryNum after a // failed connection attempt. // // This can be exported if arbitrary backoff strategies are allowed by gRPC. -func withBackoff(bs backoffStrategy) DialOption { +func withBackoff(bs backoff.Strategy) DialOption { return func(o *dialOptions) { o.bs = bs } @@ -258,6 +330,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn // initially. This is valid if and only if WithBlock() is present. +// // Deprecated: use DialContext and context.WithTimeout instead. func WithTimeout(d time.Duration) DialOption { return func(o *dialOptions) { @@ -265,18 +338,28 @@ func WithTimeout(d time.Duration) DialOption { } } +func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return func(o *dialOptions) { + o.copts.Dialer = f + } +} + +func init() { + internal.WithContextDialer = withContextDialer + internal.WithResolverBuilder = withResolverBuilder +} + // WithDialer returns a DialOption that specifies a function to use for dialing network addresses. // If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's // Temporary() method to decide if it should try to reconnect to the network address. func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { - return func(o *dialOptions) { - o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { + return withContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { if deadline, ok := ctx.Deadline(); ok { return f(addr, deadline.Sub(time.Now())) } return f(addr, 0) - } - } + }) } // WithStatsHandler returns a DialOption that specifies the stats handler @@ -335,15 +418,44 @@ func WithAuthority(a string) DialOption { } } +// WithChannelzParentID returns a DialOption that specifies the channelz ID of current ClientConn's +// parent. This function is used in nested channel creation (e.g. grpclb dial). +func WithChannelzParentID(id int64) DialOption { + return func(o *dialOptions) { + o.channelzParentID = id + } +} + +// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +func WithDisableServiceConfig() DialOption { + return func(o *dialOptions) { + o.disableServiceConfig = true + } +} + // Dial creates a client connection to the given target. func Dial(target string, opts ...DialOption) (*ClientConn, error) { return DialContext(context.Background(), target, opts...) } -// DialContext creates a client connection to the given target. ctx can be used to -// cancel or expire the pending connection. Once this function returns, the -// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close -// to terminate all the pending operations after this function returns. +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, @@ -358,6 +470,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * opt(&cc.dopts) } + if channelz.IsOn() { + if cc.dopts.channelzParentID != 0 { + cc.channelzID = channelz.RegisterChannel(cc, cc.dopts.channelzParentID, target) + } else { + cc.channelzID = channelz.RegisterChannel(cc, 0, target) + } + } + if !cc.dopts.insecure { if cc.dopts.copts.TransportCredentials == nil { return nil, errNoTransportSecurity @@ -378,7 +498,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if cc.dopts.copts.Dialer == nil { cc.dopts.copts.Dialer = newProxyDialer( func(ctx context.Context, addr string) (net.Conn, error) { - return (&net.Dialer{}).DialContext(ctx, "tcp", addr) + network, addr := parseDialTarget(addr) + return dialContext(ctx, network, addr) }, ) } @@ -419,12 +540,29 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * default: } } - // Set defaults. - if cc.dopts.codec == nil { - cc.dopts.codec = protoCodec{} - } if cc.dopts.bs == nil { - cc.dopts.bs = DefaultBackoffConfig + cc.dopts.bs = backoff.Exponential{ + MaxDelay: DefaultBackoffConfig.MaxDelay, + } + } + if cc.dopts.resolverBuilder == nil { + // Only try to parse target when resolver builder is not already set. + cc.parsedTarget = parseTarget(cc.target) + grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) + cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) + if cc.dopts.resolverBuilder == nil { + // If resolver builder is still nil, the parse target's scheme is + // not registered. Fallback to default resolver and set Endpoint to + // the original unparsed target. + grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + cc.parsedTarget = resolver.Target{ + Scheme: resolver.GetDefaultScheme(), + Endpoint: target, + } + cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) + } + } else { + cc.parsedTarget = resolver.Target{Endpoint: target} } creds := cc.dopts.copts.TransportCredentials if creds != nil && creds.Info().ServerName != "" { @@ -432,45 +570,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } else if cc.dopts.insecure && cc.dopts.copts.Authority != "" { cc.authority = cc.dopts.copts.Authority } else { - cc.authority = target + // Use endpoint from "scheme://authority/endpoint" as the default + // authority for ClientConn. + cc.authority = cc.parsedTarget.Endpoint } - if cc.dopts.balancerBuilder != nil { - var credsClone credentials.TransportCredentials - if creds != nil { - credsClone = creds.Clone() - } - buildOpts := balancer.BuildOptions{ - DialCreds: credsClone, - Dialer: cc.dopts.copts.Dialer, - } - // Build should not take long time. So it's ok to not have a goroutine for it. - // TODO(bar) init balancer after first resolver result to support service config balancer. - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, buildOpts) - } else { - waitC := make(chan error, 1) - go func() { - defer close(waitC) - // No balancer, or no resolver within the balancer. Connect directly. - ac, err := cc.newAddrConn([]resolver.Address{{Addr: target}}) - if err != nil { - waitC <- err - return - } - if err := ac.connect(cc.dopts.block); err != nil { - waitC <- err - return - } - }() - select { - case <-ctx.Done(): - return nil, ctx.Err() - case err := <-waitC: - if err != nil { - return nil, err - } - } - } if cc.dopts.scChan != nil && !scSet { // Blocking wait for the initial service config. select { @@ -486,19 +590,29 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * go cc.scWatcher() } + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + cc.balancerBuildOpts = balancer.BuildOptions{ + DialCreds: credsClone, + Dialer: cc.dopts.copts.Dialer, + ChannelzParentID: cc.channelzID, + } + // Build the resolver. cc.resolverWrapper, err = newCCResolverWrapper(cc) if err != nil { return nil, fmt.Errorf("failed to build resolver: %v", err) } - - if cc.balancerWrapper != nil && cc.resolverWrapper == nil { - // TODO(bar) there should always be a resolver (DNS as the default). - // Unblock balancer initialization with a fake resolver update if there's no resolver. - // The balancer wrapper will not read the addresses, so an empty list works. - // TODO(bar) remove this after the real resolver is started. - cc.balancerWrapper.handleResolvedAddrs([]resolver.Address{}, nil) - } + // Start the resolver wrapper goroutine after resolverWrapper is created. + // + // If the goroutine is started before resolverWrapper is ready, the + // following may happen: The goroutine sends updates to cc. cc forwards + // those to balancer. Balancer creates new addrConn. addrConn fails to + // connect, and calls resolveNow(). resolveNow() tries to use the non-ready + // resolverWrapper. + cc.resolverWrapper.start() // A blocking dial blocks until the clientConn is ready. if cc.dopts.block { @@ -565,21 +679,33 @@ type ClientConn struct { ctx context.Context cancel context.CancelFunc - target string - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerWrapper *ccBalancerWrapper - resolverWrapper *ccResolverWrapper + target string + parsedTarget resolver.Target + authority string + dopts dialOptions + csMgr *connectivityStateManager - blockingpicker *pickerWrapper + balancerBuildOpts balancer.BuildOptions + resolverWrapper *ccResolverWrapper + blockingpicker *pickerWrapper mu sync.RWMutex sc ServiceConfig + scRaw string conns map[*addrConn]struct{} // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters + mkp keepalive.ClientParameters + curBalancerName string + preBalancerName string // previous balancer name. + curAddresses []resolver.Address + balancerWrapper *ccBalancerWrapper + + channelzID int64 // channelz unique identification number + czmu sync.RWMutex + callsStarted int64 + callsSucceeded int64 + callsFailed int64 + lastCallStartedTime time.Time } // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or @@ -615,6 +741,7 @@ func (cc *ClientConn) scWatcher() { // TODO: load balance policy runtime change is ignored. // We may revist this decision in the future. cc.sc = sc + cc.scRaw = "" cc.mu.Unlock() case <-cc.ctx.Done(): return @@ -622,7 +749,115 @@ func (cc *ClientConn) scWatcher() { } } +func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) { + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { + // cc was closed. + return + } + + if reflect.DeepEqual(cc.curAddresses, addrs) { + return + } + + cc.curAddresses = addrs + + if cc.dopts.balancerBuilder == nil { + // Only look at balancer types and switch balancer if balancer dial + // option is not set. + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + var newBalancerName string + if isGRPCLB { + newBalancerName = grpclbName + } else { + // Address list doesn't contain grpclb address. Try to pick a + // non-grpclb balancer. + newBalancerName = cc.curBalancerName + // If current balancer is grpclb, switch to the previous one. + if newBalancerName == grpclbName { + newBalancerName = cc.preBalancerName + } + // The following could be true in two cases: + // - the first time handling resolved addresses + // (curBalancerName="") + // - the first time handling non-grpclb addresses + // (curBalancerName="grpclb", preBalancerName="") + if newBalancerName == "" { + newBalancerName = PickFirstBalancerName + } + } + cc.switchBalancer(newBalancerName) + } else if cc.balancerWrapper == nil { + // Balancer dial option was set, and this is the first time handling + // resolved addresses. Build a balancer with dopts.balancerBuilder. + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + } + + cc.balancerWrapper.handleResolvedAddrs(addrs, nil) +} + +// switchBalancer starts the switching from current balancer to the balancer +// with the given name. +// +// It will NOT send the current address list to the new balancer. If needed, +// caller of this function should send address list to the new balancer after +// this function returns. +// +// Caller must hold cc.mu. +func (cc *ClientConn) switchBalancer(name string) { + if cc.conns == nil { + return + } + + if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) { + return + } + + grpclog.Infof("ClientConn switching balancer to %q", name) + if cc.dopts.balancerBuilder != nil { + grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") + return + } + // TODO(bar switching) change this to two steps: drain and close. + // Keep track of sc in wrapper. + if cc.balancerWrapper != nil { + cc.balancerWrapper.close() + } + // Clear all stickiness state. + cc.blockingpicker.clearStickinessState() + + builder := balancer.Get(name) + if builder == nil { + grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) + builder = newPickfirstBuilder() + } + cc.preBalancerName = cc.curBalancerName + cc.curBalancerName = builder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + // TODO(bar switching) send updates to all balancer wrappers when balancer + // gracefully switching is supported. + cc.balancerWrapper.handleSubConnStateChange(sc, s) + cc.mu.Unlock() +} + // newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { ac := &addrConn{ cc: cc, @@ -636,6 +871,9 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { cc.mu.Unlock() return nil, ErrClientConnClosing } + if channelz.IsOn() { + ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") + } cc.conns[ac] = struct{}{} cc.mu.Unlock() return ac, nil @@ -654,12 +892,48 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { ac.tearDown(err) } +// ChannelzMetric returns ChannelInternalMetric of current ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) ChannelzMetric() *channelz.ChannelInternalMetric { + state := cc.GetState() + cc.czmu.RLock() + defer cc.czmu.RUnlock() + return &channelz.ChannelInternalMetric{ + State: state, + Target: cc.target, + CallsStarted: cc.callsStarted, + CallsSucceeded: cc.callsSucceeded, + CallsFailed: cc.callsFailed, + LastCallStartedTimestamp: cc.lastCallStartedTime, + } +} + +func (cc *ClientConn) incrCallsStarted() { + cc.czmu.Lock() + cc.callsStarted++ + // TODO(yuxuanli): will make this a time.Time pointer improve performance? + cc.lastCallStartedTime = time.Now() + cc.czmu.Unlock() +} + +func (cc *ClientConn) incrCallsSucceeded() { + cc.czmu.Lock() + cc.callsSucceeded++ + cc.czmu.Unlock() +} + +func (cc *ClientConn) incrCallsFailed() { + cc.czmu.Lock() + cc.callsFailed++ + cc.czmu.Unlock() +} + // connect starts to creating transport and also starts the transport monitor // goroutine for this ac. // It does nothing if the ac is not IDLE. // TODO(bar) Move this to the addrConn section. // This was part of resetAddrConn, keep it here to make the diff look clean. -func (ac *addrConn) connect(block bool) error { +func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() @@ -670,39 +944,21 @@ func (ac *addrConn) connect(block bool) error { return nil } ac.state = connectivity.Connecting - if ac.cc.balancerWrapper != nil { - ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) - } else { - ac.cc.csMgr.updateState(ac.state) - } + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) ac.mu.Unlock() - if block { + // Start a goroutine connecting to the server asynchronously. + go func() { if err := ac.resetTransport(); err != nil { + grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err) if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. ac.tearDown(err) } - if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { - return e.Origin() - } - return err + return } - // Start to monitor the error status of transport. - go ac.transportMonitor() - } else { - // Start a goroutine connecting to the server asynchronously. - go func() { - if err := ac.resetTransport(); err != nil { - grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err) - if err != errConnClosing { - // Keep this ac in cc.conns, to get the reason it's torn down. - ac.tearDown(err) - } - return - } - ac.transportMonitor() - }() - } + ac.transportMonitor() + }() return nil } @@ -731,6 +987,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) if curAddrFound { ac.addrs = addrs + ac.reconnectIdx = 0 // Start reconnecting from beginning in the new list. } return curAddrFound @@ -741,7 +998,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { // the corresponding MethodConfig. // If there isn't an exact match for the input method, we look for the default config // under the service (i.e /service/). If there is a default MethodConfig for -// the serivce, we return it. +// the service, we return it. // Otherwise, we return an empty MethodConfig. func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { // TODO: Avoid the locking here. @@ -750,37 +1007,12 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { m, ok := cc.sc.Methods[method] if !ok { i := strings.LastIndex(method, "/") - m, _ = cc.sc.Methods[method[:i+1]] + m = cc.sc.Methods[method[:i+1]] } return m } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) { - if cc.balancerWrapper == nil { - // If balancer is nil, there should be only one addrConn available. - cc.mu.RLock() - if cc.conns == nil { - cc.mu.RUnlock() - // TODO this function returns toRPCErr and non-toRPCErr. Clean up - // the errors in ClientConn. - return nil, nil, toRPCErr(ErrClientConnClosing) - } - var ac *addrConn - for ac = range cc.conns { - // Break after the first iteration to get the first addrConn. - break - } - cc.mu.RUnlock() - if ac == nil { - return nil, nil, errConnClosing - } - t, err := ac.wait(ctx, false /*hasBalancer*/, failfast) - if err != nil { - return nil, nil, err - } - return t, nil, nil - } - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{}) if err != nil { return nil, nil, toRPCErr(err) @@ -788,9 +1020,61 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transpor return t, done, nil } +// handleServiceConfig parses the service config string in JSON format to Go native +// struct ServiceConfig, and store both the struct and the JSON string in ClientConn. +func (cc *ClientConn) handleServiceConfig(js string) error { + if cc.dopts.disableServiceConfig { + return nil + } + sc, err := parseServiceConfig(js) + if err != nil { + return err + } + cc.mu.Lock() + cc.scRaw = js + cc.sc = sc + if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config. + if cc.curBalancerName == grpclbName { + // If current balancer is grpclb, there's at least one grpclb + // balancer address in the resolved list. Don't switch the balancer, + // but change the previous balancer name, so if a new resolved + // address list doesn't contain grpclb address, balancer will be + // switched to *sc.LB. + cc.preBalancerName = *sc.LB + } else { + cc.switchBalancer(*sc.LB) + cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil) + } + } + + if envConfigStickinessOn { + var newStickinessMDKey string + if sc.stickinessMetadataKey != nil && *sc.stickinessMetadataKey != "" { + newStickinessMDKey = *sc.stickinessMetadataKey + } + // newStickinessMDKey is "" if one of the following happens: + // - stickinessMetadataKey is set to "" + // - stickinessMetadataKey field doesn't exist in service config + cc.blockingpicker.updateStickinessMDKey(strings.ToLower(newStickinessMDKey)) + } + + cc.mu.Unlock() + return nil +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) { + cc.mu.RLock() + r := cc.resolverWrapper + cc.mu.RUnlock() + if r == nil { + return + } + go r.resolveNow(o) +} + // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { - cc.cancel() + defer cc.cancel() cc.mu.Lock() if cc.conns == nil { @@ -800,17 +1084,28 @@ func (cc *ClientConn) Close() error { conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) + + rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper + cc.balancerWrapper = nil cc.mu.Unlock() + cc.blockingpicker.close() - if cc.resolverWrapper != nil { - cc.resolverWrapper.close() + + if rWrapper != nil { + rWrapper.close() } - if cc.balancerWrapper != nil { - cc.balancerWrapper.close() + if bWrapper != nil { + bWrapper.close() } + for ac := range conns { ac.tearDown(ErrClientConnClosing) } + if channelz.IsOn() { + channelz.RemoveEntry(cc.channelzID) + } return nil } @@ -819,15 +1114,16 @@ type addrConn struct { ctx context.Context cancel context.CancelFunc - cc *ClientConn - curAddr resolver.Address - addrs []resolver.Address - dopts dialOptions - events trace.EventLog - acbw balancer.SubConn + cc *ClientConn + addrs []resolver.Address + dopts dialOptions + events trace.EventLog + acbw balancer.SubConn - mu sync.Mutex - state connectivity.State + mu sync.Mutex + curAddr resolver.Address + reconnectIdx int // The index in addrs list to start reconnecting from. + state connectivity.State // ready is closed and becomes nil when a new transport is up or failed // due to timeout. ready chan struct{} @@ -835,13 +1131,28 @@ type addrConn struct { // The reason this addrConn is torn down. tearDownErr error + + connectRetryNum int + // backoffDeadline is the time until which resetTransport needs to + // wait before increasing connectRetryNum count. + backoffDeadline time.Time + // connectDeadline is the time by which all connection + // negotiations must complete. + connectDeadline time.Time + + channelzID int64 // channelz unique identification number + czmu sync.RWMutex + callsStarted int64 + callsSucceeded int64 + callsFailed int64 + lastCallStartedTime time.Time } // adjustParams updates parameters used to create transports upon // receiving a GoAway. func (ac *addrConn) adjustParams(r transport.GoAwayReason) { switch r { - case transport.TooManyPings: + case transport.GoAwayTooManyPings: v := 2 * ac.dopts.copts.KeepaliveParams.Time ac.cc.mu.Lock() if v > ac.cc.mkp.Time { @@ -869,6 +1180,15 @@ func (ac *addrConn) errorf(format string, a ...interface{}) { // resetTransport recreates a transport to the address for ac. The old // transport will close itself on error or when the clientconn is closed. +// The created transport must receive initial settings frame from the server. +// In case that doesn't happen, transportMonitor will kill the newly created +// transport after connectDeadline has expired. +// In case there was an error on the transport before the settings frame was +// received, resetTransport resumes connecting to backends after the one that +// was previously connected to. In case end of the list is reached, resetTransport +// backs off until the original deadline. +// If the DialOption WithWaitForHandshake was set, resetTrasport returns +// successfully only after server settings are received. // // TODO(bar) make sure all state transitions are valid. func (ac *addrConn) resetTransport() error { @@ -882,19 +1202,38 @@ func (ac *addrConn) resetTransport() error { ac.ready = nil } ac.transport = nil - ac.curAddr = resolver.Address{} + ridx := ac.reconnectIdx ac.mu.Unlock() ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp ac.cc.mu.RUnlock() - for retries := 0; ; retries++ { - sleepTime := ac.dopts.bs.backoff(retries) - timeout := minConnectTimeout + var backoffDeadline, connectDeadline time.Time + for connectRetryNum := 0; ; connectRetryNum++ { ac.mu.Lock() - if timeout < time.Duration(int(sleepTime)/len(ac.addrs)) { - timeout = time.Duration(int(sleepTime) / len(ac.addrs)) + if ac.backoffDeadline.IsZero() { + // This means either a successful HTTP2 connection was established + // or this is the first time this addrConn is trying to establish a + // connection. + backoffFor := ac.dopts.bs.Backoff(connectRetryNum) // time.Duration. + // This will be the duration that dial gets to finish. + dialDuration := getMinConnectTimeout() + if backoffFor > dialDuration { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + start := time.Now() + backoffDeadline = start.Add(backoffFor) + connectDeadline = start.Add(dialDuration) + ridx = 0 // Start connecting from the beginning. + } else { + // Continue trying to connect with the same deadlines. + connectRetryNum = ac.connectRetryNum + backoffDeadline = ac.backoffDeadline + connectDeadline = ac.connectDeadline + ac.backoffDeadline = time.Time{} + ac.connectDeadline = time.Time{} + ac.connectRetryNum = 0 } - connectTime := time.Now() if ac.state == connectivity.Shutdown { ac.mu.Unlock() return errConnClosing @@ -902,116 +1241,178 @@ func (ac *addrConn) resetTransport() error { ac.printf("connecting") if ac.state != connectivity.Connecting { ac.state = connectivity.Connecting - // TODO(bar) remove condition once we always have a balancer. - if ac.cc.balancerWrapper != nil { - ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) - } else { - ac.cc.csMgr.updateState(ac.state) - } + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) } // copy ac.addrs in case of race addrsIter := make([]resolver.Address, len(ac.addrs)) copy(addrsIter, ac.addrs) copts := ac.dopts.copts ac.mu.Unlock() - for _, addr := range addrsIter { + connected, err := ac.createTransport(connectRetryNum, ridx, backoffDeadline, connectDeadline, addrsIter, copts) + if err != nil { + return err + } + if connected { + return nil + } + } +} + +// createTransport creates a connection to one of the backends in addrs. +// It returns true if a connection was established. +func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, connectDeadline time.Time, addrs []resolver.Address, copts transport.ConnectOptions) (bool, error) { + for i := ridx; i < len(addrs); i++ { + addr := addrs[i] + target := transport.TargetInfo{ + Addr: addr.Addr, + Metadata: addr.Metadata, + Authority: ac.cc.authority, + } + done := make(chan struct{}) + onPrefaceReceipt := func() { ac.mu.Lock() - if ac.state == connectivity.Shutdown { - // ac.tearDown(...) has been invoked. - ac.mu.Unlock() - return errConnClosing + close(done) + if !ac.backoffDeadline.IsZero() { + // If we haven't already started reconnecting to + // other backends. + // Note, this can happen when writer notices an error + // and triggers resetTransport while at the same time + // reader receives the preface and invokes this closure. + ac.backoffDeadline = time.Time{} + ac.connectDeadline = time.Time{} + ac.connectRetryNum = 0 } ac.mu.Unlock() - sinfo := transport.TargetInfo{ - Addr: addr.Addr, - Metadata: addr.Metadata, - } - newTransport, err := transport.NewClientTransport(ac.cc.ctx, sinfo, copts, timeout) - if err != nil { - if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { - ac.mu.Lock() - if ac.state != connectivity.Shutdown { - ac.state = connectivity.TransientFailure - if ac.cc.balancerWrapper != nil { - ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) - } else { - ac.cc.csMgr.updateState(ac.state) - } - } - ac.mu.Unlock() - return err - } - grpclog.Warningf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, addr) - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - // ac.tearDown(...) has been invoked. - ac.mu.Unlock() - return errConnClosing - } - ac.mu.Unlock() - continue - } + } + // Do not cancel in the success path because of + // this issue in Go1.6: https://github.com/golang/go/issues/15078. + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + if channelz.IsOn() { + copts.ChannelzParentID = ac.channelzID + } + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt) + if err != nil { + cancel() + ac.cc.blockingpicker.updateConnectionError(err) ac.mu.Lock() - ac.printf("ready") if ac.state == connectivity.Shutdown { // ac.tearDown(...) has been invoked. ac.mu.Unlock() - newTransport.Close() - return errConnClosing - } - ac.state = connectivity.Ready - if ac.cc.balancerWrapper != nil { - ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) - } else { - ac.cc.csMgr.updateState(ac.state) - } - t := ac.transport - ac.transport = newTransport - if t != nil { - t.Close() - } - ac.curAddr = addr - if ac.ready != nil { - close(ac.ready) - ac.ready = nil + return false, errConnClosing } ac.mu.Unlock() - return nil + grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) + continue + } + if ac.dopts.waitForHandshake { + select { + case <-done: + case <-connectCtx.Done(): + // Didn't receive server preface, must kill this new transport now. + grpclog.Warningf("grpc: addrConn.createTransport failed to receive server preface before deadline.") + newTr.Close() + break + case <-ac.ctx.Done(): + } } ac.mu.Lock() - ac.state = connectivity.TransientFailure - if ac.cc.balancerWrapper != nil { - ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) - } else { - ac.cc.csMgr.updateState(ac.state) + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + // ac.tearDonn(...) has been invoked. + newTr.Close() + return false, errConnClosing } + ac.printf("ready") + ac.state = connectivity.Ready + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.transport = newTr + ac.curAddr = addr if ac.ready != nil { close(ac.ready) ac.ready = nil } - ac.mu.Unlock() - timer := time.NewTimer(sleepTime - time.Since(connectTime)) select { - case <-timer.C: - case <-ac.ctx.Done(): - timer.Stop() - return ac.ctx.Err() + case <-done: + // If the server has responded back with preface already, + // don't set the reconnect parameters. + default: + ac.connectRetryNum = connectRetryNum + ac.backoffDeadline = backoffDeadline + ac.connectDeadline = connectDeadline + ac.reconnectIdx = i + 1 // Start reconnecting from the next backend in the list. } + ac.mu.Unlock() + return true, nil + } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return false, errConnClosing + } + ac.state = connectivity.TransientFailure + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.cc.resolveNow(resolver.ResolveNowOption{}) + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + ac.mu.Unlock() + timer := time.NewTimer(backoffDeadline.Sub(time.Now())) + select { + case <-timer.C: + case <-ac.ctx.Done(): timer.Stop() + return false, ac.ctx.Err() } + return false, nil } // Run in a goroutine to track the error in transport and create the // new transport if an error happens. It returns when the channel is closing. func (ac *addrConn) transportMonitor() { for { + var timer *time.Timer + var cdeadline <-chan time.Time ac.mu.Lock() t := ac.transport + if !ac.connectDeadline.IsZero() { + timer = time.NewTimer(ac.connectDeadline.Sub(time.Now())) + cdeadline = timer.C + } ac.mu.Unlock() // Block until we receive a goaway or an error occurs. select { case <-t.GoAway(): + done := t.Error() + cleanup := t.Close + // Since this transport will be orphaned (won't have a transportMonitor) + // we need to launch a goroutine to keep track of clientConn.Close() + // happening since it might not be noticed by any other goroutine for a while. + go func() { + <-done + cleanup() + }() case <-t.Error(): + // In case this is triggered because clientConn.Close() + // was called, we want to immeditately close the transport + // since no other goroutine might notice it for a while. + t.Close() + case <-cdeadline: + ac.mu.Lock() + // This implies that client received server preface. + if ac.backoffDeadline.IsZero() { + ac.mu.Unlock() + continue + } + ac.mu.Unlock() + timer = nil + // No server preface received until deadline. + // Kill the connection. + grpclog.Warningf("grpc: addrConn.transportMonitor didn't get server preface after waiting. Closing the new transport now.") + t.Close() + } + if timer != nil { + timer.Stop() } // If a GoAway happened, regardless of error, adjust our keepalive // parameters as appropriate. @@ -1028,11 +1429,8 @@ func (ac *addrConn) transportMonitor() { // Set connectivity state to TransientFailure before calling // resetTransport. Transition READY->CONNECTING is not valid. ac.state = connectivity.TransientFailure - if ac.cc.balancerWrapper != nil { - ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) - } else { - ac.cc.csMgr.updateState(ac.state) - } + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.cc.resolveNow(resolver.ResolveNowOption{}) ac.curAddr = resolver.Address{} ac.mu.Unlock() if err := ac.resetTransport(); err != nil { @@ -1106,7 +1504,7 @@ func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { ac.mu.Unlock() // Trigger idle ac to connect. if idle { - ac.connect(false) + ac.connect() } return nil, false } @@ -1119,8 +1517,11 @@ func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { func (ac *addrConn) tearDown(err error) { ac.cancel() ac.mu.Lock() - ac.curAddr = resolver.Address{} defer ac.mu.Unlock() + if ac.state == connectivity.Shutdown { + return + } + ac.curAddr = resolver.Address{} if err == errConnDrain && ac.transport != nil { // GracefulClose(...) may be executed multiple times when // i) receiving multiple GoAway frames from the server; or @@ -1128,16 +1529,9 @@ func (ac *addrConn) tearDown(err error) { // address removal and GoAway. ac.transport.GracefulClose() } - if ac.state == connectivity.Shutdown { - return - } ac.state = connectivity.Shutdown ac.tearDownErr = err - if ac.cc.balancerWrapper != nil { - ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) - } else { - ac.cc.csMgr.updateState(ac.state) - } + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) if ac.events != nil { ac.events.Finish() ac.events = nil @@ -1146,7 +1540,9 @@ func (ac *addrConn) tearDown(err error) { close(ac.ready) ac.ready = nil } - return + if channelz.IsOn() { + channelz.RemoveEntry(ac.channelzID) + } } func (ac *addrConn) getState() connectivity.State { @@ -1154,3 +1550,53 @@ func (ac *addrConn) getState() connectivity.State { defer ac.mu.Unlock() return ac.state } + +func (ac *addrConn) getCurAddr() (ret resolver.Address) { + ac.mu.Lock() + ret = ac.curAddr + ac.mu.Unlock() + return +} + +func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { + ac.mu.Lock() + addr := ac.curAddr.Addr + ac.mu.Unlock() + state := ac.getState() + ac.czmu.RLock() + defer ac.czmu.RUnlock() + return &channelz.ChannelInternalMetric{ + State: state, + Target: addr, + CallsStarted: ac.callsStarted, + CallsSucceeded: ac.callsSucceeded, + CallsFailed: ac.callsFailed, + LastCallStartedTimestamp: ac.lastCallStartedTime, + } +} + +func (ac *addrConn) incrCallsStarted() { + ac.czmu.Lock() + ac.callsStarted++ + ac.lastCallStartedTime = time.Now() + ac.czmu.Unlock() +} + +func (ac *addrConn) incrCallsSucceeded() { + ac.czmu.Lock() + ac.callsSucceeded++ + ac.czmu.Unlock() +} + +func (ac *addrConn) incrCallsFailed() { + ac.czmu.Lock() + ac.callsFailed++ + ac.czmu.Unlock() +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/codec.go b/cluster-autoscaler/vendor/google.golang.org/grpc/codec.go index 905b048e2ac5..129776547811 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/codec.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/codec.go @@ -19,86 +19,32 @@ package grpc import ( - "math" - "sync" - - "github.com/golang/protobuf/proto" + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" ) +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. +type baseCodec interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) + // Codec defines the interface gRPC uses to encode and decode messages. // Note that implementations of this interface must be thread safe; // a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. type Codec interface { // Marshal returns the wire format of v. Marshal(v interface{}) ([]byte, error) // Unmarshal parses the wire format into v. Unmarshal(data []byte, v interface{}) error - // String returns the name of the Codec implementation. The returned - // string will be used as part of content type in transmission. + // String returns the name of the Codec implementation. This is unused by + // gRPC. String() string } - -// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC. -type protoCodec struct { -} - -type cachedProtoBuffer struct { - lastMarshaledSize uint32 - proto.Buffer -} - -func capToMaxInt32(val int) uint32 { - if val > math.MaxInt32 { - return uint32(math.MaxInt32) - } - return uint32(val) -} - -func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { - protoMsg := v.(proto.Message) - newSlice := make([]byte, 0, cb.lastMarshaledSize) - - cb.SetBuf(newSlice) - cb.Reset() - if err := cb.Marshal(protoMsg); err != nil { - return nil, err - } - out := cb.Bytes() - cb.lastMarshaledSize = capToMaxInt32(len(out)) - return out, nil -} - -func (p protoCodec) Marshal(v interface{}) ([]byte, error) { - cb := protoBufferPool.Get().(*cachedProtoBuffer) - out, err := p.marshal(v, cb) - - // put back buffer and lose the ref to the slice - cb.SetBuf(nil) - protoBufferPool.Put(cb) - return out, err -} - -func (p protoCodec) Unmarshal(data []byte, v interface{}) error { - cb := protoBufferPool.Get().(*cachedProtoBuffer) - cb.SetBuf(data) - v.(proto.Message).Reset() - err := cb.Unmarshal(v.(proto.Message)) - cb.SetBuf(nil) - protoBufferPool.Put(cb) - return err -} - -func (protoCodec) String() string { - return "proto" -} - -var ( - protoBufferPool = &sync.Pool{ - New: func() interface{} { - return &cachedProtoBuffer{ - Buffer: proto.Buffer{}, - lastMarshaledSize: 16, - } - }, - } -) diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/codes/code_string.go b/cluster-autoscaler/vendor/google.golang.org/grpc/codes/code_string.go index 259837060abf..0b206a57822a 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/codes/code_string.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/codes/code_string.go @@ -1,16 +1,62 @@ -// Code generated by "stringer -type=Code"; DO NOT EDIT. +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ package codes -import "fmt" +import "strconv" -const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated" - -var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192} - -func (i Code) String() string { - if i >= Code(len(_Code_index)-1) { - return fmt.Sprintf("Code(%d)", i) +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" } - return _Code_name[_Code_index[i]:_Code_index[i+1]] } diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/codes/codes.go b/cluster-autoscaler/vendor/google.golang.org/grpc/codes/codes.go index 81fe7bf85b39..027e3ec823b3 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/codes/codes.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/codes/codes.go @@ -20,11 +20,14 @@ // consistent across various languages. package codes +import ( + "fmt" + "strconv" +) + // A Code is an unsigned 32-bit error code as defined in the gRPC spec. type Code uint32 -//go:generate stringer -type=Code - const ( // OK is returned on success. OK Code = 0 @@ -32,9 +35,9 @@ const ( // Canceled indicates the operation was canceled (typically by the caller). Canceled Code = 1 - // Unknown error. An example of where this error may be returned is + // Unknown error. An example of where this error may be returned is // if a Status value received from another address space belongs to - // an error-space that is not known in this address space. Also + // an error-space that is not known in this address space. Also // errors raised by APIs that do not return enough error information // may be converted to this error. Unknown Code = 2 @@ -63,15 +66,11 @@ const ( // PermissionDenied indicates the caller does not have permission to // execute the specified operation. It must not be used for rejections // caused by exhausting some resource (use ResourceExhausted - // instead for those errors). It must not be + // instead for those errors). It must not be // used if the caller cannot be identified (use Unauthenticated // instead for those errors). PermissionDenied Code = 7 - // Unauthenticated indicates the request does not have valid - // authentication credentials for the operation. - Unauthenticated Code = 16 - // ResourceExhausted indicates some resource has been exhausted, perhaps // a per-user quota, or perhaps the entire file system is out of space. ResourceExhausted Code = 8 @@ -87,7 +86,7 @@ const ( // (b) Use Aborted if the client should retry at a higher-level // (e.g., restarting a read-modify-write sequence). // (c) Use FailedPrecondition if the client should not retry until - // the system state has been explicitly fixed. E.g., if an "rmdir" + // the system state has been explicitly fixed. E.g., if an "rmdir" // fails because the directory is non-empty, FailedPrecondition // should be returned since the client should not retry unless // they have first fixed up the directory by deleting files from it. @@ -116,7 +115,7 @@ const ( // file size. // // There is a fair bit of overlap between FailedPrecondition and - // OutOfRange. We recommend using OutOfRange (the more specific + // OutOfRange. We recommend using OutOfRange (the more specific // error) when it applies so that callers who are iterating through // a space can easily look for an OutOfRange error to detect when // they are done. @@ -126,8 +125,8 @@ const ( // supported/enabled in this service. Unimplemented Code = 12 - // Internal errors. Means some invariants expected by underlying - // system has been broken. If you see one of these errors, + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, // something is very broken. Internal Code = 13 @@ -141,4 +140,58 @@ const ( // DataLoss indicates unrecoverable data loss or corruption. DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + Unauthenticated Code = 16 + + _maxCode = 17 ) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/credentials/credentials.go b/cluster-autoscaler/vendor/google.golang.org/grpc/credentials/credentials.go index 0ce766a4dcf5..b918bf13ad42 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/credentials/credentials.go @@ -34,10 +34,8 @@ import ( "golang.org/x/net/context" ) -var ( - // alpnProtoStr are the specified application level protocols for gRPC. - alpnProtoStr = []string{"h2"} -) +// alpnProtoStr are the specified application level protocols for gRPC. +var alpnProtoStr = []string{"h2"} // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). @@ -45,8 +43,9 @@ type PerRPCCredentials interface { // GetRequestMetadata gets the current request metadata, refreshing // tokens if required. This should be called by the transport layer on // each request, and the data should be populated in headers or other - // context. uri is the URI of the entry point for the request. When - // supported by the underlying implementation, ctx can be used for + // context. If a status code is returned, it will be used as the status + // for the RPC. uri is the URI of the entry point for the request. + // When supported by the underlying implementation, ctx can be used for // timeout and cancellation. // TODO(zhaoq): Define the set of the qualified keys instead of leaving // it as an arbitrary string. @@ -74,11 +73,9 @@ type AuthInfo interface { AuthType() string } -var ( - // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC - // and the caller should not close rawConn. - ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") -) +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") // TransportCredentials defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). @@ -135,15 +132,15 @@ func (c tlsCreds) Info() ProtocolInfo { } } -func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints cfg := cloneTLSConfig(c.config) if cfg.ServerName == "" { - colonPos := strings.LastIndex(addr, ":") + colonPos := strings.LastIndex(authority, ":") if colonPos == -1 { - colonPos = len(addr) + colonPos = len(authority) } - cfg.ServerName = addr[:colonPos] + cfg.ServerName = authority[:colonPos] } conn := tls.Client(rawConn, cfg) errChannel := make(chan error, 1) diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/encoding/encoding.go b/cluster-autoscaler/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 000000000000..ade8b7cec73a --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,118 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// This package is EXPERIMENTAL. +package encoding + +import ( + "io" + "strings" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + contentSubtype := strings.ToLower(codec.Name()) + if contentSubtype == "" { + panic("cannot register Codec with empty string result for String()") + } + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + return registeredCodecs[contentSubtype] +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/encoding/proto/proto.go b/cluster-autoscaler/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 000000000000..66b97a6f692a --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "math" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} + +type cachedProtoBuffer struct { + lastMarshaledSize uint32 + proto.Buffer +} + +func capToMaxInt32(val int) uint32 { + if val > math.MaxInt32 { + return uint32(math.MaxInt32) + } + return uint32(val) +} + +func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { + protoMsg := v.(proto.Message) + newSlice := make([]byte, 0, cb.lastMarshaledSize) + + cb.SetBuf(newSlice) + cb.Reset() + if err := cb.Marshal(protoMsg); err != nil { + return nil, err + } + out := cb.Bytes() + cb.lastMarshaledSize = capToMaxInt32(len(out)) + return out, nil +} + +func (codec) Marshal(v interface{}) ([]byte, error) { + if pm, ok := v.(proto.Marshaler); ok { + // object can marshal itself, no need for buffer + return pm.Marshal() + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + out, err := marshal(v, cb) + + // put back buffer and lose the ref to the slice + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return out, err +} + +func (codec) Unmarshal(data []byte, v interface{}) error { + protoMsg := v.(proto.Message) + protoMsg.Reset() + + if pu, ok := protoMsg.(proto.Unmarshaler); ok { + // object can unmarshal itself, no need for buffer + return pu.Unmarshal(data) + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + cb.SetBuf(data) + err := cb.Unmarshal(protoMsg) + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return err +} + +func (codec) Name() string { + return Name +} + +var protoBufferPool = &sync.Pool{ + New: func() interface{} { + return &cachedProtoBuffer{ + Buffer: proto.Buffer{}, + lastMarshaledSize: 16, + } + }, +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/envconfig.go b/cluster-autoscaler/vendor/google.golang.org/grpc/envconfig.go new file mode 100644 index 000000000000..d50178e5171b --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/envconfig.go @@ -0,0 +1,37 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "os" + "strings" +) + +const ( + envConfigPrefix = "GRPC_GO_" + envConfigStickinessStr = envConfigPrefix + "STICKINESS" +) + +var ( + envConfigStickinessOn bool +) + +func init() { + envConfigStickinessOn = strings.EqualFold(os.Getenv(envConfigStickinessStr), "on") +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/go16.go b/cluster-autoscaler/vendor/google.golang.org/grpc/go16.go new file mode 100644 index 000000000000..535ee9356f34 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/go16.go @@ -0,0 +1,70 @@ +// +build go1.6,!go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "io" + "net" + "net/http" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req.Cancel = ctx.Done() + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if err == nil || err == io.EOF { + return err + } + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.StreamError: + return status.Error(e.Code, e.Desc) + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/go17.go b/cluster-autoscaler/vendor/google.golang.org/grpc/go17.go new file mode 100644 index 000000000000..ec676a93c396 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/go17.go @@ -0,0 +1,71 @@ +// +build go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "io" + "net" + "net/http" + + netctx "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, address) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if err == nil || err == io.EOF { + return err + } + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.StreamError: + return status.Error(e.Code, e.Desc) + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled, netctx.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/grpclb.go b/cluster-autoscaler/vendor/google.golang.org/grpc/grpclb.go deleted file mode 100644 index db56ff362172..000000000000 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/grpclb.go +++ /dev/null @@ -1,704 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "errors" - "fmt" - "math/rand" - "net" - "sync" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc/codes" - lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/naming" -) - -// Client API for LoadBalancer service. -// Mostly copied from generated pb.go file. -// To avoid circular dependency. -type loadBalancerClient struct { - cc *ClientConn -} - -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) { - desc := &StreamDesc{ - StreamName: "BalanceLoad", - ServerStreams: true, - ClientStreams: true, - } - stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) - if err != nil { - return nil, err - } - x := &balanceLoadClientStream{stream} - return x, nil -} - -type balanceLoadClientStream struct { - ClientStream -} - -func (x *balanceLoadClientStream) Send(m *lbmpb.LoadBalanceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *balanceLoadClientStream) Recv() (*lbmpb.LoadBalanceResponse, error) { - m := new(lbmpb.LoadBalanceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// NewGRPCLBBalancer creates a grpclb load balancer. -func NewGRPCLBBalancer(r naming.Resolver) Balancer { - return &grpclbBalancer{ - r: r, - } -} - -type remoteBalancerInfo struct { - addr string - // the server name used for authentication with the remote LB server. - name string -} - -// grpclbAddrInfo consists of the information of a backend server. -type grpclbAddrInfo struct { - addr Address - connected bool - // dropForRateLimiting indicates whether this particular request should be - // dropped by the client for rate limiting. - dropForRateLimiting bool - // dropForLoadBalancing indicates whether this particular request should be - // dropped by the client for load balancing. - dropForLoadBalancing bool -} - -type grpclbBalancer struct { - r naming.Resolver - target string - mu sync.Mutex - seq int // a sequence number to make sure addrCh does not get stale addresses. - w naming.Watcher - addrCh chan []Address - rbs []remoteBalancerInfo - addrs []*grpclbAddrInfo - next int - waitCh chan struct{} - done bool - rand *rand.Rand - - clientStats lbmpb.ClientStats -} - -func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error { - updates, err := w.Next() - if err != nil { - grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err) - return err - } - b.mu.Lock() - defer b.mu.Unlock() - if b.done { - return ErrClientConnClosing - } - for _, update := range updates { - switch update.Op { - case naming.Add: - var exist bool - for _, v := range b.rbs { - // TODO: Is the same addr with different server name a different balancer? - if update.Addr == v.addr { - exist = true - break - } - } - if exist { - continue - } - md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB) - if !ok { - // TODO: Revisit the handling here and may introduce some fallback mechanism. - grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata) - continue - } - switch md.AddrType { - case naming.Backend: - // TODO: Revisit the handling here and may introduce some fallback mechanism. - grpclog.Errorf("The name resolution does not give grpclb addresses") - continue - case naming.GRPCLB: - b.rbs = append(b.rbs, remoteBalancerInfo{ - addr: update.Addr, - name: md.ServerName, - }) - default: - grpclog.Errorf("Received unknow address type %d", md.AddrType) - continue - } - case naming.Delete: - for i, v := range b.rbs { - if update.Addr == v.addr { - copy(b.rbs[i:], b.rbs[i+1:]) - b.rbs = b.rbs[:len(b.rbs)-1] - break - } - } - default: - grpclog.Errorf("Unknown update.Op %v", update.Op) - } - } - // TODO: Fall back to the basic round-robin load balancing if the resulting address is - // not a load balancer. - select { - case <-ch: - default: - } - ch <- b.rbs - return nil -} - -func convertDuration(d *lbmpb.Duration) time.Duration { - if d == nil { - return 0 - } - return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond -} - -func (b *grpclbBalancer) processServerList(l *lbmpb.ServerList, seq int) { - if l == nil { - return - } - servers := l.GetServers() - var ( - sl []*grpclbAddrInfo - addrs []Address - ) - for _, s := range servers { - md := metadata.Pairs("lb-token", s.LoadBalanceToken) - ip := net.IP(s.IpAddress) - ipStr := ip.String() - if ip.To4() == nil { - // Add square brackets to ipv6 addresses, otherwise net.Dial() and - // net.SplitHostPort() will return too many colons error. - ipStr = fmt.Sprintf("[%s]", ipStr) - } - addr := Address{ - Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), - Metadata: &md, - } - sl = append(sl, &grpclbAddrInfo{ - addr: addr, - dropForRateLimiting: s.DropForRateLimiting, - dropForLoadBalancing: s.DropForLoadBalancing, - }) - addrs = append(addrs, addr) - } - b.mu.Lock() - defer b.mu.Unlock() - if b.done || seq < b.seq { - return - } - if len(sl) > 0 { - // reset b.next to 0 when replacing the server list. - b.next = 0 - b.addrs = sl - b.addrCh <- addrs - } - return -} - -func (b *grpclbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) { - ticker := time.NewTicker(interval) - defer ticker.Stop() - for { - select { - case <-ticker.C: - case <-done: - return - } - b.mu.Lock() - stats := b.clientStats - b.clientStats = lbmpb.ClientStats{} // Clear the stats. - b.mu.Unlock() - t := time.Now() - stats.Timestamp = &lbmpb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := s.Send(&lbmpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_ClientStats{ - ClientStats: &stats, - }, - }); err != nil { - grpclog.Errorf("grpclb: failed to send load report: %v", err) - return - } - } -} - -func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - stream, err := lbc.BalanceLoad(ctx) - if err != nil { - grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) - return - } - b.mu.Lock() - if b.done { - b.mu.Unlock() - return - } - b.mu.Unlock() - initReq := &lbmpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_InitialRequest{ - InitialRequest: &lbmpb.InitialLoadBalanceRequest{ - Name: b.target, - }, - }, - } - if err := stream.Send(initReq); err != nil { - grpclog.Errorf("grpclb: failed to send init request: %v", err) - // TODO: backoff on retry? - return true - } - reply, err := stream.Recv() - if err != nil { - grpclog.Errorf("grpclb: failed to recv init response: %v", err) - // TODO: backoff on retry? - return true - } - initResp := reply.GetInitialResponse() - if initResp == nil { - grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.") - return - } - // TODO: Support delegation. - if initResp.LoadBalancerDelegate != "" { - // delegation - grpclog.Errorf("TODO: Delegation is not supported yet.") - return - } - streamDone := make(chan struct{}) - defer close(streamDone) - b.mu.Lock() - b.clientStats = lbmpb.ClientStats{} // Clear client stats. - b.mu.Unlock() - if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { - go b.sendLoadReport(stream, d, streamDone) - } - // Retrieve the server list. - for { - reply, err := stream.Recv() - if err != nil { - grpclog.Errorf("grpclb: failed to recv server list: %v", err) - break - } - b.mu.Lock() - if b.done || seq < b.seq { - b.mu.Unlock() - return - } - b.seq++ // tick when receiving a new list of servers. - seq = b.seq - b.mu.Unlock() - if serverList := reply.GetServerList(); serverList != nil { - b.processServerList(serverList, seq) - } - } - return true -} - -func (b *grpclbBalancer) Start(target string, config BalancerConfig) error { - b.rand = rand.New(rand.NewSource(time.Now().Unix())) - // TODO: Fall back to the basic direct connection if there is no name resolver. - if b.r == nil { - return errors.New("there is no name resolver installed") - } - b.target = target - b.mu.Lock() - if b.done { - b.mu.Unlock() - return ErrClientConnClosing - } - b.addrCh = make(chan []Address) - w, err := b.r.Resolve(target) - if err != nil { - b.mu.Unlock() - grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err) - return err - } - b.w = w - b.mu.Unlock() - balancerAddrsCh := make(chan []remoteBalancerInfo, 1) - // Spawn a goroutine to monitor the name resolution of remote load balancer. - go func() { - for { - if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil { - grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err) - close(balancerAddrsCh) - return - } - } - }() - // Spawn a goroutine to talk to the remote load balancer. - go func() { - var ( - cc *ClientConn - // ccError is closed when there is an error in the current cc. - // A new rb should be picked from rbs and connected. - ccError chan struct{} - rb *remoteBalancerInfo - rbs []remoteBalancerInfo - rbIdx int - ) - - defer func() { - if ccError != nil { - select { - case <-ccError: - default: - close(ccError) - } - } - if cc != nil { - cc.Close() - } - }() - - for { - var ok bool - select { - case rbs, ok = <-balancerAddrsCh: - if !ok { - return - } - foundIdx := -1 - if rb != nil { - for i, trb := range rbs { - if trb == *rb { - foundIdx = i - break - } - } - } - if foundIdx >= 0 { - if foundIdx >= 1 { - // Move the address in use to the beginning of the list. - b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0] - rbIdx = 0 - } - continue // If found, don't dial new cc. - } else if len(rbs) > 0 { - // Pick a random one from the list, instead of always using the first one. - if l := len(rbs); l > 1 && rb != nil { - tmpIdx := b.rand.Intn(l - 1) - b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0] - } - rbIdx = 0 - rb = &rbs[0] - } else { - // foundIdx < 0 && len(rbs) <= 0. - rb = nil - } - case <-ccError: - ccError = nil - if rbIdx < len(rbs)-1 { - rbIdx++ - rb = &rbs[rbIdx] - } else { - rb = nil - } - } - - if rb == nil { - continue - } - - if cc != nil { - cc.Close() - } - // Talk to the remote load balancer to get the server list. - var ( - err error - dopts []DialOption - ) - if creds := config.DialCreds; creds != nil { - if rb.name != "" { - if err := creds.OverrideServerName(rb.name); err != nil { - grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err) - continue - } - } - dopts = append(dopts, WithTransportCredentials(creds)) - } else { - dopts = append(dopts, WithInsecure()) - } - if dialer := config.Dialer; dialer != nil { - // WithDialer takes a different type of function, so we instead use a special DialOption here. - dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer }) - } - dopts = append(dopts, WithBlock()) - ccError = make(chan struct{}) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - cc, err = DialContext(ctx, rb.addr, dopts...) - cancel() - if err != nil { - grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err) - close(ccError) - continue - } - b.mu.Lock() - b.seq++ // tick when getting a new balancer address - seq := b.seq - b.next = 0 - b.mu.Unlock() - go func(cc *ClientConn, ccError chan struct{}) { - lbc := &loadBalancerClient{cc} - b.callRemoteBalancer(lbc, seq) - cc.Close() - select { - case <-ccError: - default: - close(ccError) - } - }(cc, ccError) - } - }() - return nil -} - -func (b *grpclbBalancer) down(addr Address, err error) { - b.mu.Lock() - defer b.mu.Unlock() - for _, a := range b.addrs { - if addr == a.addr { - a.connected = false - break - } - } -} - -func (b *grpclbBalancer) Up(addr Address) func(error) { - b.mu.Lock() - defer b.mu.Unlock() - if b.done { - return nil - } - var cnt int - for _, a := range b.addrs { - if a.addr == addr { - if a.connected { - return nil - } - a.connected = true - } - if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing { - cnt++ - } - } - // addr is the only one which is connected. Notify the Get() callers who are blocking. - if cnt == 1 && b.waitCh != nil { - close(b.waitCh) - b.waitCh = nil - } - return func(err error) { - b.down(addr, err) - } -} - -func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { - var ch chan struct{} - b.mu.Lock() - if b.done { - b.mu.Unlock() - err = ErrClientConnClosing - return - } - seq := b.seq - - defer func() { - if err != nil { - return - } - put = func() { - s, ok := rpcInfoFromContext(ctx) - if !ok { - return - } - b.mu.Lock() - defer b.mu.Unlock() - if b.done || seq < b.seq { - return - } - b.clientStats.NumCallsFinished++ - if !s.bytesSent { - b.clientStats.NumCallsFinishedWithClientFailedToSend++ - } else if s.bytesReceived { - b.clientStats.NumCallsFinishedKnownReceived++ - } - } - }() - - b.clientStats.NumCallsStarted++ - if len(b.addrs) > 0 { - if b.next >= len(b.addrs) { - b.next = 0 - } - next := b.next - for { - a := b.addrs[next] - next = (next + 1) % len(b.addrs) - if a.connected { - if !a.dropForRateLimiting && !a.dropForLoadBalancing { - addr = a.addr - b.next = next - b.mu.Unlock() - return - } - if !opts.BlockingWait { - b.next = next - if a.dropForLoadBalancing { - b.clientStats.NumCallsFinished++ - b.clientStats.NumCallsFinishedWithDropForLoadBalancing++ - } else if a.dropForRateLimiting { - b.clientStats.NumCallsFinished++ - b.clientStats.NumCallsFinishedWithDropForRateLimiting++ - } - b.mu.Unlock() - err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr) - return - } - } - if next == b.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - if !opts.BlockingWait { - b.clientStats.NumCallsFinished++ - b.clientStats.NumCallsFinishedWithClientFailedToSend++ - b.mu.Unlock() - err = Errorf(codes.Unavailable, "there is no address available") - return - } - // Wait on b.waitCh for non-failfast RPCs. - if b.waitCh == nil { - ch = make(chan struct{}) - b.waitCh = ch - } else { - ch = b.waitCh - } - b.mu.Unlock() - for { - select { - case <-ctx.Done(): - b.mu.Lock() - b.clientStats.NumCallsFinished++ - b.clientStats.NumCallsFinishedWithClientFailedToSend++ - b.mu.Unlock() - err = ctx.Err() - return - case <-ch: - b.mu.Lock() - if b.done { - b.clientStats.NumCallsFinished++ - b.clientStats.NumCallsFinishedWithClientFailedToSend++ - b.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(b.addrs) > 0 { - if b.next >= len(b.addrs) { - b.next = 0 - } - next := b.next - for { - a := b.addrs[next] - next = (next + 1) % len(b.addrs) - if a.connected { - if !a.dropForRateLimiting && !a.dropForLoadBalancing { - addr = a.addr - b.next = next - b.mu.Unlock() - return - } - if !opts.BlockingWait { - b.next = next - if a.dropForLoadBalancing { - b.clientStats.NumCallsFinished++ - b.clientStats.NumCallsFinishedWithDropForLoadBalancing++ - } else if a.dropForRateLimiting { - b.clientStats.NumCallsFinished++ - b.clientStats.NumCallsFinishedWithDropForRateLimiting++ - } - b.mu.Unlock() - err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr) - return - } - } - if next == b.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - // The newly added addr got removed by Down() again. - if b.waitCh == nil { - ch = make(chan struct{}) - b.waitCh = ch - } else { - ch = b.waitCh - } - b.mu.Unlock() - } - } -} - -func (b *grpclbBalancer) Notify() <-chan []Address { - return b.addrCh -} - -func (b *grpclbBalancer) Close() error { - b.mu.Lock() - defer b.mu.Unlock() - if b.done { - return errBalancerClosed - } - b.done = true - if b.waitCh != nil { - close(b.waitCh) - } - if b.addrCh != nil { - close(b.addrCh) - } - if b.w != nil { - b.w.Close() - } - return nil -} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go b/cluster-autoscaler/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go deleted file mode 100644 index f4a27125a4fb..000000000000 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go +++ /dev/null @@ -1,615 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_lb_v1/messages/messages.proto - -/* -Package messages is a generated protocol buffer package. - -It is generated from these files: - grpc_lb_v1/messages/messages.proto - -It has these top-level messages: - Duration - Timestamp - LoadBalanceRequest - InitialLoadBalanceRequest - ClientStats - LoadBalanceResponse - InitialLoadBalanceResponse - ServerList - Server -*/ -package messages - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Duration struct { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` -} - -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *Duration) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Duration) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` -} - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *Timestamp) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Timestamp) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -type LoadBalanceRequest struct { - // Types that are valid to be assigned to LoadBalanceRequestType: - // *LoadBalanceRequest_InitialRequest - // *LoadBalanceRequest_ClientStats - LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` -} - -func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} } -func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) } -func (*LoadBalanceRequest) ProtoMessage() {} -func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -type isLoadBalanceRequest_LoadBalanceRequestType interface { - isLoadBalanceRequest_LoadBalanceRequestType() -} - -type LoadBalanceRequest_InitialRequest struct { - InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"` -} -type LoadBalanceRequest_ClientStats struct { - ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"` -} - -func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} -func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {} - -func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType { - if m != nil { - return m.LoadBalanceRequestType - } - return nil -} - -func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest { - if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok { - return x.InitialRequest - } - return nil -} - -func (m *LoadBalanceRequest) GetClientStats() *ClientStats { - if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok { - return x.ClientStats - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{ - (*LoadBalanceRequest_InitialRequest)(nil), - (*LoadBalanceRequest_ClientStats)(nil), - } -} - -func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*LoadBalanceRequest) - // load_balance_request_type - switch x := m.LoadBalanceRequestType.(type) { - case *LoadBalanceRequest_InitialRequest: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.InitialRequest); err != nil { - return err - } - case *LoadBalanceRequest_ClientStats: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ClientStats); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x) - } - return nil -} - -func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*LoadBalanceRequest) - switch tag { - case 1: // load_balance_request_type.initial_request - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(InitialLoadBalanceRequest) - err := b.DecodeMessage(msg) - m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg} - return true, err - case 2: // load_balance_request_type.client_stats - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ClientStats) - err := b.DecodeMessage(msg) - m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg} - return true, err - default: - return false, nil - } -} - -func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) { - m := msg.(*LoadBalanceRequest) - // load_balance_request_type - switch x := m.LoadBalanceRequestType.(type) { - case *LoadBalanceRequest_InitialRequest: - s := proto.Size(x.InitialRequest) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *LoadBalanceRequest_ClientStats: - s := proto.Size(x.ClientStats) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type InitialLoadBalanceRequest struct { - // Name of load balanced service (IE, balancer.service.com) - // length should be less than 256 bytes. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} } -func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) } -func (*InitialLoadBalanceRequest) ProtoMessage() {} -func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *InitialLoadBalanceRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -// Contains client level statistics that are useful to load balancing. Each -// count except the timestamp should be reset to zero after reporting the stats. -type ClientStats struct { - // The timestamp of generating the report. - Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"` - // The total number of RPCs that started. - NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"` - // The total number of RPCs that finished. - NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"` - // The total number of RPCs that were dropped by the client because of rate - // limiting. - NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"` - // The total number of RPCs that were dropped by the client because of load - // balancing. - NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"` - // The total number of RPCs that failed to reach a server except dropped RPCs. - NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"` - // The total number of RPCs that finished and are known to have been received - // by a server. - NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"` -} - -func (m *ClientStats) Reset() { *m = ClientStats{} } -func (m *ClientStats) String() string { return proto.CompactTextString(m) } -func (*ClientStats) ProtoMessage() {} -func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -func (m *ClientStats) GetTimestamp() *Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -func (m *ClientStats) GetNumCallsStarted() int64 { - if m != nil { - return m.NumCallsStarted - } - return 0 -} - -func (m *ClientStats) GetNumCallsFinished() int64 { - if m != nil { - return m.NumCallsFinished - } - return 0 -} - -func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 { - if m != nil { - return m.NumCallsFinishedWithDropForRateLimiting - } - return 0 -} - -func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 { - if m != nil { - return m.NumCallsFinishedWithDropForLoadBalancing - } - return 0 -} - -func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { - if m != nil { - return m.NumCallsFinishedWithClientFailedToSend - } - return 0 -} - -func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 { - if m != nil { - return m.NumCallsFinishedKnownReceived - } - return 0 -} - -type LoadBalanceResponse struct { - // Types that are valid to be assigned to LoadBalanceResponseType: - // *LoadBalanceResponse_InitialResponse - // *LoadBalanceResponse_ServerList - LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"` -} - -func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} } -func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) } -func (*LoadBalanceResponse) ProtoMessage() {} -func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -type isLoadBalanceResponse_LoadBalanceResponseType interface { - isLoadBalanceResponse_LoadBalanceResponseType() -} - -type LoadBalanceResponse_InitialResponse struct { - InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"` -} -type LoadBalanceResponse_ServerList struct { - ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"` -} - -func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} -func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {} - -func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType { - if m != nil { - return m.LoadBalanceResponseType - } - return nil -} - -func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse { - if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok { - return x.InitialResponse - } - return nil -} - -func (m *LoadBalanceResponse) GetServerList() *ServerList { - if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok { - return x.ServerList - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{ - (*LoadBalanceResponse_InitialResponse)(nil), - (*LoadBalanceResponse_ServerList)(nil), - } -} - -func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*LoadBalanceResponse) - // load_balance_response_type - switch x := m.LoadBalanceResponseType.(type) { - case *LoadBalanceResponse_InitialResponse: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.InitialResponse); err != nil { - return err - } - case *LoadBalanceResponse_ServerList: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ServerList); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x) - } - return nil -} - -func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*LoadBalanceResponse) - switch tag { - case 1: // load_balance_response_type.initial_response - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(InitialLoadBalanceResponse) - err := b.DecodeMessage(msg) - m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg} - return true, err - case 2: // load_balance_response_type.server_list - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ServerList) - err := b.DecodeMessage(msg) - m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg} - return true, err - default: - return false, nil - } -} - -func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) { - m := msg.(*LoadBalanceResponse) - // load_balance_response_type - switch x := m.LoadBalanceResponseType.(type) { - case *LoadBalanceResponse_InitialResponse: - s := proto.Size(x.InitialResponse) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *LoadBalanceResponse_ServerList: - s := proto.Size(x.ServerList) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type InitialLoadBalanceResponse struct { - // This is an application layer redirect that indicates the client should use - // the specified server for load balancing. When this field is non-empty in - // the response, the client should open a separate connection to the - // load_balancer_delegate and call the BalanceLoad method. Its length should - // be less than 64 bytes. - LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"` - // This interval defines how often the client should send the client stats - // to the load balancer. Stats should only be reported when the duration is - // positive. - ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"` -} - -func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} } -func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) } -func (*InitialLoadBalanceResponse) ProtoMessage() {} -func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string { - if m != nil { - return m.LoadBalancerDelegate - } - return "" -} - -func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration { - if m != nil { - return m.ClientStatsReportInterval - } - return nil -} - -type ServerList struct { - // Contains a list of servers selected by the load balancer. The list will - // be updated when server resolutions change or as needed to balance load - // across more servers. The client should consume the server list in order - // unless instructed otherwise via the client_config. - Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"` -} - -func (m *ServerList) Reset() { *m = ServerList{} } -func (m *ServerList) String() string { return proto.CompactTextString(m) } -func (*ServerList) ProtoMessage() {} -func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *ServerList) GetServers() []*Server { - if m != nil { - return m.Servers - } - return nil -} - -// Contains server information. When none of the [drop_for_*] fields are true, -// use the other fields. When drop_for_rate_limiting is true, ignore all other -// fields. Use drop_for_load_balancing only when it is true and -// drop_for_rate_limiting is false. -type Server struct { - // A resolved address for the server, serialized in network-byte-order. It may - // either be an IPv4 or IPv6 address. - IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` - // A resolved port number for the server. - Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` - // An opaque but printable token given to the frontend for each pick. All - // frontend requests for that pick must include the token in its initial - // metadata. The token is used by the backend to verify the request and to - // allow the backend to report load to the gRPC LB system. - // - // Its length is variable but less than 50 bytes. - LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"` - // Indicates whether this particular request should be dropped by the client - // for rate limiting. - DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"` - // Indicates whether this particular request should be dropped by the client - // for load balancing. - DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"` -} - -func (m *Server) Reset() { *m = Server{} } -func (m *Server) String() string { return proto.CompactTextString(m) } -func (*Server) ProtoMessage() {} -func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *Server) GetIpAddress() []byte { - if m != nil { - return m.IpAddress - } - return nil -} - -func (m *Server) GetPort() int32 { - if m != nil { - return m.Port - } - return 0 -} - -func (m *Server) GetLoadBalanceToken() string { - if m != nil { - return m.LoadBalanceToken - } - return "" -} - -func (m *Server) GetDropForRateLimiting() bool { - if m != nil { - return m.DropForRateLimiting - } - return false -} - -func (m *Server) GetDropForLoadBalancing() bool { - if m != nil { - return m.DropForLoadBalancing - } - return false -} - -func init() { - proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration") - proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp") - proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest") - proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest") - proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats") - proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse") - proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse") - proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList") - proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") -} - -func init() { proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 709 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x3b, - 0x10, 0x26, 0x27, 0x01, 0x92, 0x09, 0x3a, 0xe4, 0x98, 0x1c, 0x08, 0x14, 0x24, 0xba, 0x52, 0x69, - 0x54, 0xd1, 0x20, 0xa0, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43, 0x55, - 0xa9, 0x52, 0x65, 0x39, 0xd9, 0x21, 0x58, 0x6c, 0xec, 0xad, 0xed, 0x04, 0xf5, 0x11, 0xfa, 0x28, - 0x7d, 0x8c, 0xaa, 0xcf, 0xd0, 0xf7, 0xa9, 0xd6, 0xbb, 0x9b, 0x5d, 0x20, 0x80, 0x7a, 0x67, 0x8f, - 0xbf, 0xf9, 0xbe, 0xf1, 0xac, 0xbf, 0x59, 0xf0, 0x06, 0x3a, 0xec, 0xb3, 0xa0, 0xc7, 0xc6, 0xbb, - 0x3b, 0x43, 0x34, 0x86, 0x0f, 0xd0, 0x4c, 0x16, 0xad, 0x50, 0x2b, 0xab, 0x08, 0x44, 0x98, 0x56, - 0xd0, 0x6b, 0x8d, 0x77, 0xbd, 0x97, 0x50, 0x3e, 0x1c, 0x69, 0x6e, 0x85, 0x92, 0xa4, 0x01, 0xf3, - 0x06, 0xfb, 0x4a, 0xfa, 0xa6, 0x51, 0xd8, 0x2c, 0x34, 0x8b, 0x34, 0xdd, 0x92, 0x3a, 0xcc, 0x4a, - 0x2e, 0x95, 0x69, 0xfc, 0xb3, 0x59, 0x68, 0xce, 0xd2, 0x78, 0xe3, 0xbd, 0x82, 0xca, 0xa9, 0x18, - 0xa2, 0xb1, 0x7c, 0x18, 0xfe, 0x75, 0xf2, 0xcf, 0x02, 0x90, 0x13, 0xc5, 0xfd, 0x36, 0x0f, 0xb8, - 0xec, 0x23, 0xc5, 0xaf, 0x23, 0x34, 0x96, 0x7c, 0x80, 0x45, 0x21, 0x85, 0x15, 0x3c, 0x60, 0x3a, - 0x0e, 0x39, 0xba, 0xea, 0xde, 0xa3, 0x56, 0x56, 0x75, 0xeb, 0x38, 0x86, 0xdc, 0xcc, 0xef, 0xcc, - 0xd0, 0x7f, 0x93, 0xfc, 0x94, 0xf1, 0x35, 0x2c, 0xf4, 0x03, 0x81, 0xd2, 0x32, 0x63, 0xb9, 0x8d, - 0xab, 0xa8, 0xee, 0xad, 0xe4, 0xe9, 0x0e, 0xdc, 0x79, 0x37, 0x3a, 0xee, 0xcc, 0xd0, 0x6a, 0x3f, - 0xdb, 0xb6, 0x1f, 0xc0, 0x6a, 0xa0, 0xb8, 0xcf, 0x7a, 0xb1, 0x4c, 0x5a, 0x14, 0xb3, 0xdf, 0x42, - 0xf4, 0x76, 0x60, 0xf5, 0xd6, 0x4a, 0x08, 0x81, 0x92, 0xe4, 0x43, 0x74, 0xe5, 0x57, 0xa8, 0x5b, - 0x7b, 0xdf, 0x4b, 0x50, 0xcd, 0x89, 0x91, 0x7d, 0xa8, 0xd8, 0xb4, 0x83, 0xc9, 0x3d, 0xff, 0xcf, - 0x17, 0x36, 0x69, 0x2f, 0xcd, 0x70, 0xe4, 0x09, 0xfc, 0x27, 0x47, 0x43, 0xd6, 0xe7, 0x41, 0x60, - 0xa2, 0x3b, 0x69, 0x8b, 0xbe, 0xbb, 0x55, 0x91, 0x2e, 0xca, 0xd1, 0xf0, 0x20, 0x8a, 0x77, 0xe3, - 0x30, 0xd9, 0x06, 0x92, 0x61, 0xcf, 0x84, 0x14, 0xe6, 0x1c, 0xfd, 0x46, 0xd1, 0x81, 0x6b, 0x29, - 0xf8, 0x28, 0x89, 0x13, 0x06, 0xad, 0x9b, 0x68, 0x76, 0x29, 0xec, 0x39, 0xf3, 0xb5, 0x0a, 0xd9, - 0x99, 0xd2, 0x4c, 0x73, 0x8b, 0x2c, 0x10, 0x43, 0x61, 0x85, 0x1c, 0x34, 0x4a, 0x8e, 0xe9, 0xf1, - 0x75, 0xa6, 0x4f, 0xc2, 0x9e, 0x1f, 0x6a, 0x15, 0x1e, 0x29, 0x4d, 0xb9, 0xc5, 0x93, 0x04, 0x4e, - 0x38, 0xec, 0xdc, 0x2b, 0x90, 0x6b, 0x77, 0xa4, 0x30, 0xeb, 0x14, 0x9a, 0x77, 0x28, 0x64, 0xbd, - 0x8f, 0x24, 0xbe, 0xc0, 0xd3, 0xdb, 0x24, 0x92, 0x67, 0x70, 0xc6, 0x45, 0x80, 0x3e, 0xb3, 0x8a, - 0x19, 0x94, 0x7e, 0x63, 0xce, 0x09, 0x6c, 0x4d, 0x13, 0x88, 0x3f, 0xd5, 0x91, 0xc3, 0x9f, 0xaa, - 0x2e, 0x4a, 0x9f, 0x74, 0xe0, 0xe1, 0x14, 0xfa, 0x0b, 0xa9, 0x2e, 0x25, 0xd3, 0xd8, 0x47, 0x31, - 0x46, 0xbf, 0x31, 0xef, 0x28, 0x37, 0xae, 0x53, 0xbe, 0x8f, 0x50, 0x34, 0x01, 0x79, 0xbf, 0x0a, - 0xb0, 0x74, 0xe5, 0xd9, 0x98, 0x50, 0x49, 0x83, 0xa4, 0x0b, 0xb5, 0xcc, 0x01, 0x71, 0x2c, 0x79, - 0x1a, 0x5b, 0xf7, 0x59, 0x20, 0x46, 0x77, 0x66, 0xe8, 0xe2, 0xc4, 0x03, 0x09, 0xe9, 0x0b, 0xa8, - 0x1a, 0xd4, 0x63, 0xd4, 0x2c, 0x10, 0xc6, 0x26, 0x1e, 0x58, 0xce, 0xf3, 0x75, 0xdd, 0xf1, 0x89, - 0x70, 0x1e, 0x02, 0x33, 0xd9, 0xb5, 0xd7, 0x61, 0xed, 0x9a, 0x03, 0x62, 0xce, 0xd8, 0x02, 0x3f, - 0x0a, 0xb0, 0x76, 0x7b, 0x29, 0xe4, 0x19, 0x2c, 0xe7, 0x93, 0x35, 0xf3, 0x31, 0xc0, 0x01, 0xb7, - 0xa9, 0x2d, 0xea, 0x41, 0x96, 0xa4, 0x0f, 0x93, 0x33, 0xf2, 0x11, 0xd6, 0xf3, 0x96, 0x65, 0x1a, - 0x43, 0xa5, 0x2d, 0x13, 0xd2, 0xa2, 0x1e, 0xf3, 0x20, 0x29, 0xbf, 0x9e, 0x2f, 0x3f, 0x1d, 0x62, - 0x74, 0x35, 0xe7, 0x5e, 0xea, 0xf2, 0x8e, 0x93, 0x34, 0xef, 0x0d, 0x40, 0x76, 0x4b, 0xb2, 0x1d, - 0x0d, 0xac, 0x68, 0x17, 0x0d, 0xac, 0x62, 0xb3, 0xba, 0x47, 0x6e, 0xb6, 0x83, 0xa6, 0x90, 0x77, - 0xa5, 0x72, 0xb1, 0x56, 0xf2, 0x7e, 0x17, 0x60, 0x2e, 0x3e, 0x21, 0x1b, 0x00, 0x22, 0x64, 0xdc, - 0xf7, 0x35, 0x9a, 0x78, 0xe4, 0x2d, 0xd0, 0x8a, 0x08, 0xdf, 0xc6, 0x81, 0xc8, 0xfd, 0x91, 0x76, - 0x32, 0xf3, 0xdc, 0x3a, 0x32, 0xe3, 0x95, 0x4e, 0x5a, 0x75, 0x81, 0xd2, 0x99, 0xb1, 0x42, 0x6b, - 0xb9, 0x46, 0x9c, 0x46, 0x71, 0xb2, 0x0f, 0xcb, 0x77, 0x98, 0xae, 0x4c, 0x97, 0xfc, 0x29, 0x06, - 0x7b, 0x0e, 0x2b, 0x77, 0x19, 0xa9, 0x4c, 0xeb, 0xfe, 0x14, 0xd3, 0xb4, 0xe1, 0x73, 0x39, 0xfd, - 0x47, 0xf4, 0xe6, 0xdc, 0x4f, 0x62, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x36, 0x86, - 0xa6, 0x4a, 0x06, 0x00, 0x00, -} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto b/cluster-autoscaler/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto deleted file mode 100644 index 2ed04551fad2..000000000000 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package grpc.lb.v1; -option go_package = "messages"; - -message Duration { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. - int64 seconds = 1; - - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - int32 nanos = 2; -} - -message Timestamp { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - int32 nanos = 2; -} - -message LoadBalanceRequest { - oneof load_balance_request_type { - // This message should be sent on the first request to the load balancer. - InitialLoadBalanceRequest initial_request = 1; - - // The client stats should be periodically reported to the load balancer - // based on the duration defined in the InitialLoadBalanceResponse. - ClientStats client_stats = 2; - } -} - -message InitialLoadBalanceRequest { - // Name of load balanced service (IE, balancer.service.com) - // length should be less than 256 bytes. - string name = 1; -} - -// Contains client level statistics that are useful to load balancing. Each -// count except the timestamp should be reset to zero after reporting the stats. -message ClientStats { - // The timestamp of generating the report. - Timestamp timestamp = 1; - - // The total number of RPCs that started. - int64 num_calls_started = 2; - - // The total number of RPCs that finished. - int64 num_calls_finished = 3; - - // The total number of RPCs that were dropped by the client because of rate - // limiting. - int64 num_calls_finished_with_drop_for_rate_limiting = 4; - - // The total number of RPCs that were dropped by the client because of load - // balancing. - int64 num_calls_finished_with_drop_for_load_balancing = 5; - - // The total number of RPCs that failed to reach a server except dropped RPCs. - int64 num_calls_finished_with_client_failed_to_send = 6; - - // The total number of RPCs that finished and are known to have been received - // by a server. - int64 num_calls_finished_known_received = 7; -} - -message LoadBalanceResponse { - oneof load_balance_response_type { - // This message should be sent on the first response to the client. - InitialLoadBalanceResponse initial_response = 1; - - // Contains the list of servers selected by the load balancer. The client - // should send requests to these servers in the specified order. - ServerList server_list = 2; - } -} - -message InitialLoadBalanceResponse { - // This is an application layer redirect that indicates the client should use - // the specified server for load balancing. When this field is non-empty in - // the response, the client should open a separate connection to the - // load_balancer_delegate and call the BalanceLoad method. Its length should - // be less than 64 bytes. - string load_balancer_delegate = 1; - - // This interval defines how often the client should send the client stats - // to the load balancer. Stats should only be reported when the duration is - // positive. - Duration client_stats_report_interval = 2; -} - -message ServerList { - // Contains a list of servers selected by the load balancer. The list will - // be updated when server resolutions change or as needed to balance load - // across more servers. The client should consume the server list in order - // unless instructed otherwise via the client_config. - repeated Server servers = 1; - - // Was google.protobuf.Duration expiration_interval. - reserved 3; -} - -// Contains server information. When none of the [drop_for_*] fields are true, -// use the other fields. When drop_for_rate_limiting is true, ignore all other -// fields. Use drop_for_load_balancing only when it is true and -// drop_for_rate_limiting is false. -message Server { - // A resolved address for the server, serialized in network-byte-order. It may - // either be an IPv4 or IPv6 address. - bytes ip_address = 1; - - // A resolved port number for the server. - int32 port = 2; - - // An opaque but printable token given to the frontend for each pick. All - // frontend requests for that pick must include the token in its initial - // metadata. The token is used by the backend to verify the request and to - // allow the backend to report load to the gRPC LB system. - // - // Its length is variable but less than 50 bytes. - string load_balance_token = 3; - - // Indicates whether this particular request should be dropped by the client - // for rate limiting. - bool drop_for_rate_limiting = 4; - - // Indicates whether this particular request should be dropped by the client - // for load balancing. - bool drop_for_load_balancing = 5; -} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/grpclog/grpclog.go b/cluster-autoscaler/vendor/google.golang.org/grpc/grpclog/grpclog.go index 1d71e25de502..501bd529cdd3 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -105,18 +105,21 @@ func Fatalln(args ...interface{}) { } // Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// // Deprecated: use Info. func Print(args ...interface{}) { logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// // Deprecated: use Infof. func Printf(format string, args ...interface{}) { logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// // Deprecated: use Infoln. func Println(args ...interface{}) { logger.Infoln(args...) diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/grpclog/logger.go b/cluster-autoscaler/vendor/google.golang.org/grpc/grpclog/logger.go index d03b2397bfab..097494f710f5 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/grpclog/logger.go @@ -19,6 +19,7 @@ package grpclog // Logger mimics golang's standard Logger as an interface. +// // Deprecated: use LoggerV2. type Logger interface { Fatal(args ...interface{}) @@ -31,6 +32,7 @@ type Logger interface { // SetLogger sets the logger that is used in grpc. Call only from // init() functions. +// // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { logger = &loggerWrapper{Logger: l} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/interceptor.go b/cluster-autoscaler/vendor/google.golang.org/grpc/interceptor.go index 06dc825b9fba..1f6ef678035b 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/interceptor.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/interceptor.go @@ -48,7 +48,9 @@ type UnaryServerInfo struct { } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. +// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the +// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as +// the status message of the RPC. type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 000000000000..1bd0cce5abdf --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,78 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "time" + + "google.golang.org/grpc/internal/grpcrand" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +// +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +const ( + // baseDelay is the amount of time to wait before retrying after the first + // failure. + baseDelay = 1.0 * time.Second + // factor is applied to the backoff after each retry. + factor = 1.6 + // jitter provides a range to randomize backoff delays. + jitter = 0.2 +) + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return baseDelay + } + backoff, max := float64(baseDelay), float64(bc.MaxDelay) + for backoff < max && retries > 0 { + backoff *= factor + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + jitter*(grpcrand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 000000000000..586a0336b473 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,573 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( + "sort" + "sync" + "sync/atomic" + + "google.golang.org/grpc/grpclog" +) + +var ( + db dbWrapper + idGen idGenerator + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = 50 + curState int32 +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + if !IsOn() { + NewChannelzStorage() + atomic.StoreInt32(&curState, 1) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.CompareAndSwapInt32(&curState, 1, 1) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { + mu sync.RWMutex + DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { + d.mu.Lock() + d.DB = db + d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { + d.mu.RLock() + defer d.mu.RUnlock() + return d.DB +} + +// NewChannelzStorage initializes channelz data storage and id generator. +// +// Note: This function is exported for testing purpose only. User should not call +// it in most cases. +func NewChannelzStorage() { + db.set(&channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + }) + idGen.reset() +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of EntryPerPage, and is +// sorted in ascending id order. +func GetTopChannels(id int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of EntryPerPage, and is +// sorted in ascending id order. +func GetServers(id int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of EntryPerPage, +// and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { + return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { + return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { + return db.get().GetSocket(id) +} + +// RegisterChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). pid = 0 means no parent. It returns the unique channelz tracking id +// assigned to this channel. +func RegisterChannel(c Channel, pid int64, ref string) int64 { + id := idGen.genID() + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, + pid: pid, + } + if pid == 0 { + db.get().addChannel(id, cn, true, pid, ref) + } else { + db.get().addChannel(id, cn, false, pid, ref) + } + return id +} + +// RegisterSubChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). It returns the unique channelz tracking id assigned to this subchannel. +func RegisterSubChannel(c Channel, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a SubChannel's parent id cannot be 0") + return 0 + } + id := idGen.genID() + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, + pid: pid, + } + db.get().addSubChannel(id, sc, pid, ref) + return id +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +func RegisterServer(s Server, ref string) int64 { + id := idGen.genID() + svr := &server{ + refName: ref, + s: s, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + id: id, + } + db.get().addServer(id, svr) + return id +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +func RegisterListenSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a ListenSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addListenSocket(id, ls, pid, ref) + return id +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a NormalSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addNormalSocket(id, ns, pid, ref) + return id +} + +// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// channelz database. +func RemoveEntry(id int64) { + db.get().removeEntry(id) +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + servers map[int64]*server + channels map[int64]*channel + subChannels map[int64]*subChannel + listenSockets map[int64]*listenSocket + normalSockets map[int64]*normalSocket +} + +func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c + c.servers[id] = s + c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { + c.mu.Lock() + cn.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else { + c.findEntry(pid).addChild(id, cn) + } + c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { + c.mu.Lock() + sc.cm = c + c.subChannels[id] = sc + c.findEntry(pid).addChild(id, sc) + c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls + c.findEntry(pid).addChild(id, ls) + c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns + c.findEntry(pid).addChild(id, ns) + c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the +// entry, if it has to wait on the deletion of its children, or may lead to a chain +// of entry deletion. For example, deleting the last socket of a gracefully shutting +// down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + c.findEntry(id).triggerDelete() + c.mu.Unlock() +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + var v entry + var ok bool + if v, ok = c.channels[id]; ok { + return v + } + if v, ok = c.subChannels[id]; ok { + return v + } + if v, ok = c.servers[id]; ok { + return v + } + if v, ok = c.listenSockets[id]; ok { + return v + } + if v, ok = c.normalSockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { + var ok bool + if _, ok = c.normalSockets[id]; ok { + delete(c.normalSockets, id) + return + } + if _, ok = c.subChannels[id]; ok { + delete(c.subChannels, id) + return + } + if _, ok = c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return + } + if _, ok = c.listenSockets[id]; ok { + delete(c.listenSockets, id) + return + } + if _, ok = c.servers[id]; ok { + delete(c.servers, id) + return + } +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) { + c.mu.RLock() + l := len(c.topLevelChannels) + ids := make([]int64, 0, l) + cns := make([]*channel, 0, min(l, EntryPerPage)) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := 0 + var end bool + var t []*ChannelMetric + for i, v := range ids[idx:] { + if count == EntryPerPage { + break + } + if cn, ok := c.channels[v]; ok { + cns = append(cns, cn) + t = append(t, &ChannelMetric{ + NestedChans: copyMap(cn.nestedChans), + SubChans: copyMap(cn.subChans), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, cn := range cns { + t[i].ChannelData = cn.c.ChannelzMetric() + t[i].ID = cn.id + t[i].RefName = cn.refName + } + return t, end +} + +func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) { + c.mu.RLock() + l := len(c.servers) + ids := make([]int64, 0, l) + ss := make([]*server, 0, min(l, EntryPerPage)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := 0 + var end bool + var s []*ServerMetric + for i, v := range ids[idx:] { + if count == EntryPerPage { + break + } + if svr, ok := c.servers[v]; ok { + ss = append(ss, svr) + s = append(s, &ServerMetric{ + ListenSockets: copyMap(svr.listenSockets), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, svr := range ss { + s[i].ServerData = svr.s.ChannelzMetric() + s[i].ID = svr.id + s[i].RefName = svr.refName + } + return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) { + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + // server with id doesn't exist. + c.mu.RUnlock() + return nil, true + } + svrskts := svr.sockets + l := len(svrskts) + ids := make([]int64, 0, l) + sks := make([]*normalSocket, 0, min(l, EntryPerPage)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort((int64Slice(ids))) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := 0 + var end bool + for i, v := range ids[idx:] { + if count == EntryPerPage { + break + } + if ns, ok := c.normalSockets[v]; ok { + sks = append(sks, ns) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + var s []*SocketMetric + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + s = append(s, sm) + } + return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { + cm := &ChannelMetric{} + var cn *channel + var ok bool + c.mu.RLock() + if cn, ok = c.channels[id]; !ok { + // channel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.NestedChans = copyMap(cn.nestedChans) + cm.SubChans = copyMap(cn.subChans) + c.mu.RUnlock() + cm.ChannelData = cn.c.ChannelzMetric() + cm.ID = cn.id + cm.RefName = cn.refName + return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { + cm := &SubChannelMetric{} + var sc *subChannel + var ok bool + c.mu.RLock() + if sc, ok = c.subChannels[id]; !ok { + // subchannel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.Sockets = copyMap(sc.sockets) + c.mu.RUnlock() + cm.ChannelData = sc.c.ChannelzMetric() + cm.ID = sc.id + cm.RefName = sc.refName + return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { + sm := &SocketMetric{} + c.mu.RLock() + if ls, ok := c.listenSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ls.s.ChannelzMetric() + sm.ID = ls.id + sm.RefName = ls.refName + return sm + } + if ns, ok := c.normalSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + return sm + } + c.mu.RUnlock() + return nil +} + +type idGenerator struct { + id int64 +} + +func (i *idGenerator) reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *idGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/channelz/types.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/channelz/types.go new file mode 100644 index 000000000000..153d75340e41 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -0,0 +1,418 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "net" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if child + // list is not empty, then deletion from the database is on hold until the last + // child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child + // list is now empty. If both conditions are met, then delete self from database. + deleteSelfIfReady() +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { + idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race condition. + // For example, there could be a race between ClientConn.Close() info being propagated + // to addrConn and http2Client. ClientConn.Close() cancel the context and result + // in http2Client to error. The error info is then caught by transport monitor + // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, + // the addrConn will create a new transport. And when registering the new transport in + // channelz, its parent addrConn could have already been torn down and deleted + // from channelz tracking, and thus reach the code here. + grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + // ChannelData contains channel internal metric reported by the channel through + // ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this channel in the format of + // a map from nested channel channelz id to corresponding reference string. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this channel in the format of a + // map from subchannel channelz id to corresponding reference string. + SubChans map[int64]string + // Sockets tracks the socket type children of this channel in the format of a map + // from socket channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow channel having sockets directly, + // therefore, this is field is unused. + Sockets map[int64]string +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + // ChannelData contains subchannel internal metric reported by the subchannel + // through ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this subchannel in the format of + // a map from nested channel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have nested channels + // as children, therefore, this field is unused. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this subchannel in the format of a + // map from subchannel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have subchannels + // as children, therefore, this field is unused. + SubChans map[int64]string + // Sockets tracks the socket type children of this subchannel in the format of a map + // from socket channelz id to corresponding reference string. + Sockets map[int64]string +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { + // current connectivity state of the channel. + State connectivity.State + // The target this channel originally tried to connect to. May be absent + Target string + // The number of calls started on the channel. + CallsStarted int64 + // The number of calls that have completed with an OK status. + CallsSucceeded int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp time.Time + //TODO: trace +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { + ChannelzMetric() *ChannelInternalMetric +} + +type channel struct { + refName string + c Channel + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + id int64 + pid int64 + cm *channelMap +} + +func (c *channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *subChannel: + c.subChans[id] = v.refName + case *channel: + c.nestedChans[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *channel) deleteSelfIfReady() { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return + } + c.cm.deleteEntry(c.id) + // not top channel + if c.pid != 0 { + c.cm.findEntry(c.pid).deleteChild(c.id) + } +} + +type subChannel struct { + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap +} + +func (sc *subChannel) addChild(id int64, e entry) { + if v, ok := e.(*normalSocket); ok { + sc.sockets[id] = v.refName + } else { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *subChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *subChannel) deleteSelfIfReady() { + if !sc.closeCalled || len(sc.sockets) != 0 { + return + } + sc.cm.deleteEntry(sc.id) + sc.cm.findEntry(sc.pid).deleteChild(sc.id) +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { + // ID is the channelz id of this socket. + ID int64 + // RefName is the human readable reference string of this socket. + RefName string + // SocketData contains socket internal metric reported by the socket through + // ChannelzMetric(). + SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { + // The number of streams that have been started. + StreamsStarted int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed int64 + // The number of messages successfully sent on this socket. + MessagesSent int64 + MessagesReceived int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp time.Time + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp time.Time + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp time.Time + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp time.Time + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 + // The locally bound address. + LocalAddr net.Addr + // The remote bound address. May be absent. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + RemoteName string + //TODO: socket options + //TODO: Security +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { + ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { + ls.cm.deleteEntry(ls.id) + ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +type normalSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { + ns.cm.deleteEntry(ns.id) + ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { + // ID is the channelz id of this server. + ID int64 + // RefName is the human readable reference string of this server. + RefName string + // ServerData contains server internal metric reported by the server through + // ChannelzMetric(). + ServerData *ServerInternalMetric + // ListenSockets tracks the listener socket type children of this server in the + // format of a map from socket channelz id to corresponding reference string. + ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { + // The number of incoming calls started on the server. + CallsStarted int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the server. + LastCallStartedTimestamp time.Time + //TODO: trace +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { + ChannelzMetric() *ServerInternalMetric +} + +type server struct { + refName string + s Server + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + id int64 + cm *channelMap +} + +func (s *server) addChild(id int64, e entry) { + switch v := e.(type) { + case *normalSocket: + s.sockets[id] = v.refName + case *listenSocket: + s.listenSockets[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.id) +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 000000000000..200b115ca209 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( + "math/rand" + "sync" + "time" +) + +var ( + r = rand.New(rand.NewSource(time.Now().UnixNano())) + mu sync.Mutex +) + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + mu.Lock() + res := r.Int63n(n) + mu.Unlock() + return res +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + mu.Lock() + res := r.Intn(n) + mu.Unlock() + return res +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + mu.Lock() + res := r.Float64() + mu.Unlock() + return res +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/internal.go b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/internal.go index 07083832c3c6..cd34267f7f30 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/internal/internal.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/internal/internal.go @@ -15,20 +15,22 @@ * */ -// Package internal contains gRPC-internal code for testing, to avoid polluting -// the godoc of the top-level grpc package. +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. package internal -// TestingCloseConns closes all existing transports but keeps -// grpcServer.lis accepting new connections. -// -// The provided grpcServer must be of type *grpc.Server. It is untyped -// for circular dependency reasons. -var TestingCloseConns func(grpcServer interface{}) +var ( -// TestingUseHandlerImpl enables the http.Handler-based server implementation. -// It must be called before Serve and requires TLS credentials. -// -// The provided grpcServer must be of type *grpc.Server. It is untyped -// for circular dependency reasons. -var TestingUseHandlerImpl func(grpcServer interface{}) + // TestingUseHandlerImpl enables the http.Handler-based server implementation. + // It must be called before Serve and requires TLS credentials. + // + // The provided grpcServer must be of type *grpc.Server. It is untyped + // for circular dependency reasons. + TestingUseHandlerImpl func(grpcServer interface{}) + + // WithContextDialer is exported by clientconn.go + WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption + // WithResolverBuilder is exported by clientconn.go + WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption +) diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/metadata/metadata.go b/cluster-autoscaler/vendor/google.golang.org/grpc/metadata/metadata.go index 589161d57fa1..3f8a9edaeea2 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/metadata/metadata.go @@ -17,7 +17,8 @@ */ // Package metadata define the structure of the metadata supported by gRPC library. -// Please refer to https://grpc.io/docs/guides/wire.html for more information about custom-metadata. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. package metadata import ( @@ -27,7 +28,9 @@ import ( "golang.org/x/net/context" ) -// DecodeKeyValue returns k, v, nil. It is deprecated and should not be used. +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. func DecodeKeyValue(k, v string) (string, string, error) { return k, v, nil } @@ -94,6 +97,30 @@ func (md MD) Copy() MD { return Join(md) } +// Get obtains the values for a given key. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at that key. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + // Join joins any number of mds into a single MD. // The order of values for each key is determined by the order in which // the mds containing those values are presented to Join. @@ -115,9 +142,26 @@ func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) } -// NewOutgoingContext creates a new context with outgoing md attached. +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. func NewOutgoingContext(ctx context.Context, md MD) context.Context { - return context.WithValue(ctx, mdOutgoingKey{}, md) + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the +// documentation of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + added[len(added)-1] = make([]string, len(kv)) + copy(added[len(added)-1], kv) + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } // FromIncomingContext returns the incoming metadata in ctx if it exists. The @@ -128,10 +172,39 @@ func FromIncomingContext(ctx context.Context) (md MD, ok bool) { return } +// FromOutgoingContextRaw returns the un-merged, intermediary contents +// of rawMD. Remember to perform strings.ToLower on the keys. The returned +// MD should not be modified. Writing to it may cause races. Modification +// should be made to copies of the returned MD. +// +// This is intended for gRPC-internal use ONLY. +func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + // FromOutgoingContext returns the outgoing metadata in ctx if it exists. The // returned MD should not be modified. Writing to it may cause races. -// Modification should be made to the copies of the returned MD. -func FromOutgoingContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdOutgoingKey{}).(MD) - return +// Modification should be made to copies of the returned MD. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + mds := make([]MD, 0, len(raw.added)+1) + mds = append(mds, raw.md) + for _, vv := range raw.added { + mds = append(mds, Pairs(vv...)) + } + return Join(mds...), ok +} + +type rawMD struct { + md MD + added [][]string } diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/naming/dns_resolver.go b/cluster-autoscaler/vendor/google.golang.org/grpc/naming/dns_resolver.go index 7e69a2ca0a66..0f8a908ea9c1 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/naming/dns_resolver.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/naming/dns_resolver.go @@ -153,10 +153,10 @@ type ipWatcher struct { updateChan chan *Update } -// Next returns the adrress resolution Update for the target. For IP address, -// the resolution is itself, thus polling name server is unncessary. Therefore, +// Next returns the address resolution Update for the target. For IP address, +// the resolution is itself, thus polling name server is unnecessary. Therefore, // Next() will return an Update the first time it is called, and will be blocked -// for all following calls as no Update exisits until watcher is closed. +// for all following calls as no Update exists until watcher is closed. func (i *ipWatcher) Next() ([]*Update, error) { u, ok := <-i.updateChan if !ok { diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/naming/go17.go b/cluster-autoscaler/vendor/google.golang.org/grpc/naming/go17.go index 8bdf21e79989..57b65d7b8898 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/naming/go17.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/naming/go17.go @@ -1,4 +1,4 @@ -// +build go1.7, !go1.8 +// +build go1.6,!go1.8 /* * diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/naming/naming.go b/cluster-autoscaler/vendor/google.golang.org/grpc/naming/naming.go index 1af7e32f86d0..8cc39e93758f 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/naming/naming.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/naming/naming.go @@ -18,20 +18,26 @@ // Package naming defines the naming API and related data structures for gRPC. // The interface is EXPERIMENTAL and may be suject to change. +// +// Deprecated: please use package resolver. package naming // Operation defines the corresponding operations for a name resolution change. +// +// Deprecated: please use package resolver. type Operation uint8 const ( // Add indicates a new address is added. Add Operation = iota - // Delete indicates an exisiting address is deleted. + // Delete indicates an existing address is deleted. Delete ) // Update defines a name resolution update. Notice that it is not valid having both // empty string Addr and nil Metadata in an Update. +// +// Deprecated: please use package resolver. type Update struct { // Op indicates the operation of the update. Op Operation @@ -43,12 +49,16 @@ type Update struct { } // Resolver creates a Watcher for a target to track its resolution changes. +// +// Deprecated: please use package resolver. type Resolver interface { // Resolve creates a Watcher for target. Resolve(target string) (Watcher, error) } // Watcher watches for the updates on the specified target. +// +// Deprecated: please use package resolver. type Watcher interface { // Next blocks until an update or error happens. It may return one or more // updates. The first call should get the full set of the results. It should diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/picker_wrapper.go b/cluster-autoscaler/vendor/google.golang.org/grpc/picker_wrapper.go index 9085dbc9c98b..019e658004eb 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/picker_wrapper.go @@ -19,12 +19,17 @@ package grpc import ( + "io" "sync" + "sync/atomic" "golang.org/x/net/context" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/status" "google.golang.org/grpc/transport" ) @@ -36,13 +41,57 @@ type pickerWrapper struct { done bool blockingCh chan struct{} picker balancer.Picker + + // The latest connection happened. + connErrMu sync.Mutex + connErr error + + stickinessMDKey atomic.Value + stickiness *stickyStore } func newPickerWrapper() *pickerWrapper { - bp := &pickerWrapper{blockingCh: make(chan struct{})} + bp := &pickerWrapper{ + blockingCh: make(chan struct{}), + stickiness: newStickyStore(), + } return bp } +func (bp *pickerWrapper) updateConnectionError(err error) { + bp.connErrMu.Lock() + bp.connErr = err + bp.connErrMu.Unlock() +} + +func (bp *pickerWrapper) connectionError() error { + bp.connErrMu.Lock() + err := bp.connErr + bp.connErrMu.Unlock() + return err +} + +func (bp *pickerWrapper) updateStickinessMDKey(newKey string) { + // No need to check ok because mdKey == "" if ok == false. + if oldKey, _ := bp.stickinessMDKey.Load().(string); oldKey != newKey { + bp.stickinessMDKey.Store(newKey) + bp.stickiness.reset(newKey) + } +} + +func (bp *pickerWrapper) getStickinessMDKey() string { + // No need to check ok because mdKey == "" if ok == false. + mdKey, _ := bp.stickinessMDKey.Load().(string) + return mdKey +} + +func (bp *pickerWrapper) clearStickinessState() { + if oldKey := bp.getStickinessMDKey(); oldKey != "" { + // There's no need to reset store if mdKey was "". + bp.stickiness.reset(oldKey) + } +} + // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (bp *pickerWrapper) updatePicker(p balancer.Picker) { bp.mu.Lock() @@ -57,6 +106,23 @@ func (bp *pickerWrapper) updatePicker(p balancer.Picker) { bp.mu.Unlock() } +func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { + acw.mu.Lock() + ac := acw.ac + acw.mu.Unlock() + ac.incrCallsStarted() + return func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + // pick returns the transport that will be used for the RPC. // It may block in the following cases: // - there's no picker @@ -65,6 +131,27 @@ func (bp *pickerWrapper) updatePicker(p balancer.Picker) { // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { + + mdKey := bp.getStickinessMDKey() + stickyKey, isSticky := stickyKeyFromContext(ctx, mdKey) + + // Potential race here: if stickinessMDKey is updated after the above two + // lines, and this pick is a sticky pick, the following put could add an + // entry to sticky store with an outdated sticky key. + // + // The solution: keep the current md key in sticky store, and at the + // beginning of each get/put, check the mdkey against store.curMDKey. + // - Cons: one more string comparing for each get/put. + // - Pros: the string matching happens inside get/put, so the overhead for + // non-sticky RPCs will be minimal. + + if isSticky { + if t, ok := bp.stickiness.get(mdKey, stickyKey); ok { + // Done function returned is always nil. + return t, nil, nil + } + } + var ( p balancer.Picker ch chan struct{} @@ -97,7 +184,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer. p = bp.picker bp.mu.Unlock() - subConn, put, err := p.Pick(ctx, opts) + subConn, done, err := p.Pick(ctx, opts) if err != nil { switch err { @@ -107,7 +194,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer. if !failfast { continue } - return nil, nil, status.Errorf(codes.Unavailable, "%v", err) + return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError()) default: // err is some other error. return nil, nil, toRPCErr(err) @@ -120,7 +207,13 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer. continue } if t, ok := acw.getAddrConn().getReadyTransport(); ok { - return t, put, nil + if isSticky { + bp.stickiness.put(mdKey, stickyKey, acw) + } + if channelz.IsOn() { + return t, doneChannelzWrapper(acw, done), nil + } + return t, done, nil } grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") // If ok == false, ac.state is not READY. @@ -139,3 +232,105 @@ func (bp *pickerWrapper) close() { bp.done = true close(bp.blockingCh) } + +const stickinessKeyCountLimit = 1000 + +type stickyStoreEntry struct { + acw *acBalancerWrapper + addr resolver.Address +} + +type stickyStore struct { + mu sync.Mutex + // curMDKey is check before every get/put to avoid races. The operation will + // abort immediately when the given mdKey is different from the curMDKey. + curMDKey string + store *linkedMap +} + +func newStickyStore() *stickyStore { + return &stickyStore{ + store: newLinkedMap(), + } +} + +// reset clears the map in stickyStore, and set the currentMDKey to newMDKey. +func (ss *stickyStore) reset(newMDKey string) { + ss.mu.Lock() + ss.curMDKey = newMDKey + ss.store.clear() + ss.mu.Unlock() +} + +// stickyKey is the key to look up in store. mdKey will be checked against +// curMDKey to avoid races. +func (ss *stickyStore) put(mdKey, stickyKey string, acw *acBalancerWrapper) { + ss.mu.Lock() + defer ss.mu.Unlock() + if mdKey != ss.curMDKey { + return + } + // TODO(stickiness): limit the total number of entries. + ss.store.put(stickyKey, &stickyStoreEntry{ + acw: acw, + addr: acw.getAddrConn().getCurAddr(), + }) + if ss.store.len() > stickinessKeyCountLimit { + ss.store.removeOldest() + } +} + +// stickyKey is the key to look up in store. mdKey will be checked against +// curMDKey to avoid races. +func (ss *stickyStore) get(mdKey, stickyKey string) (transport.ClientTransport, bool) { + ss.mu.Lock() + defer ss.mu.Unlock() + if mdKey != ss.curMDKey { + return nil, false + } + entry, ok := ss.store.get(stickyKey) + if !ok { + return nil, false + } + ac := entry.acw.getAddrConn() + if ac.getCurAddr() != entry.addr { + ss.store.remove(stickyKey) + return nil, false + } + t, ok := ac.getReadyTransport() + if !ok { + ss.store.remove(stickyKey) + return nil, false + } + return t, true +} + +// Get one value from metadata in ctx with key stickinessMDKey. +// +// It returns "", false if stickinessMDKey is an empty string. +func stickyKeyFromContext(ctx context.Context, stickinessMDKey string) (string, bool) { + if stickinessMDKey == "" { + return "", false + } + + md, added, ok := metadata.FromOutgoingContextRaw(ctx) + if !ok { + return "", false + } + + if vv, ok := md[stickinessMDKey]; ok { + if len(vv) > 0 { + return vv[0], true + } + } + + for _, ss := range added { + for i := 0; i < len(ss)-1; i += 2 { + if ss[i] == stickinessMDKey { + return ss[i+1], true + } + } + } + + return "", false +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/pickfirst.go b/cluster-autoscaler/vendor/google.golang.org/grpc/pickfirst.go index 7f993ef5a381..bf659d49d2f1 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/pickfirst.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/pickfirst.go @@ -26,6 +26,9 @@ import ( "google.golang.org/grpc/resolver" ) +// PickFirstBalancerName is the name of the pick_first balancer. +const PickFirstBalancerName = "pick_first" + func newPickfirstBuilder() balancer.Builder { return &pickfirstBuilder{} } @@ -37,7 +40,7 @@ func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions } func (*pickfirstBuilder) Name() string { - return "pickfirst" + return PickFirstBalancerName } type pickfirstBalancer struct { @@ -57,14 +60,20 @@ func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err er return } b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) + b.sc.Connect() } else { b.sc.UpdateAddresses(addrs) + b.sc.Connect() } } func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) - if b.sc != sc || s == connectivity.Shutdown { + if b.sc != sc { + grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + return + } + if s == connectivity.Shutdown { b.sc = nil return } @@ -93,3 +102,7 @@ func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer. } return p.sc, nil, nil } + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/proxy.go b/cluster-autoscaler/vendor/google.golang.org/grpc/proxy.go index 3e17efec61bd..2d40236e2180 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/proxy.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/proxy.go @@ -82,8 +82,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ Header: map[string][]string{"User-Agent": {grpcUA}}, }) - req = req.WithContext(ctx) - if err := req.Write(conn); err != nil { + if err := sendHTTPRequest(ctx, req, conn); err != nil { return nil, fmt.Errorf("failed to write the HTTP request: %v", err) } diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 000000000000..048fde67d00a --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,381 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/resolver" +) + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 + golang = "GO" + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var ( + errMissingAddr = errors.New("missing address") +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{freq: defaultFreq} +} + +type dnsBuilder struct { + // frequency of polling the DNS server. + freq time.Duration +} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + if target.Authority != "" { + return nil, fmt.Errorf("Default DNS resolver does not support custom DNS server") + } + host, port, err := parseTarget(target.Endpoint) + if err != nil { + return nil, err + } + + // IP address. + if net.ParseIP(host) != nil { + host, _ = formatIP(host) + addr := []resolver.Address{{Addr: host + ":" + port}} + i := &ipResolver{ + cc: cc, + ip: addr, + rn: make(chan struct{}, 1), + q: make(chan struct{}), + } + cc.NewAddress(addr) + go i.watcher() + return i, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + freq: b.freq, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + t: time.NewTimer(0), + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +// ipResolver watches for the name resolution update for an IP address. +type ipResolver struct { + cc resolver.ClientConn + ip []resolver.Address + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + q chan struct{} +} + +// ResolveNow resend the address it stores, no resolution is needed. +func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case i.rn <- struct{}{}: + default: + } +} + +// Close closes the ipResolver. +func (i *ipResolver) Close() { + close(i.q) +} + +func (i *ipResolver) watcher() { + for { + select { + case <-i.rn: + i.cc.NewAddress(i.ip) + case <-i.q: + return + } + } +} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + freq time.Duration + host string + port string + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + t *time.Timer + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() + d.t.Stop() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + for { + select { + case <-d.ctx.Done(): + return + case <-d.t.C: + case <-d.rn: + } + result, sc := d.lookup() + // Next lookup should happen after an interval defined by d.freq. + d.t.Reset(d.freq) + d.cc.NewServiceConfig(sc) + d.cc.NewAddress(result) + } +} + +func (d *dnsResolver) lookupSRV() []resolver.Address { + var newAddrs []resolver.Address + _, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(d.ctx, s.Target) + if err != nil { + grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + } + } + return newAddrs +} + +func (d *dnsResolver) lookupTXT() string { + ss, err := lookupTXT(d.ctx, d.host) + if err != nil { + grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err) + return "" + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute) + return "" + } + return strings.TrimPrefix(res, txtAttribute) +} + +func (d *dnsResolver) lookupHost() []resolver.Address { + var newAddrs []resolver.Address + addrs, err := lookupHost(d.ctx, d.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs +} + +func (d *dnsResolver) lookup() ([]resolver.Address, string) { + newAddrs := d.lookupSRV() + // Support fallback to non-balancer address. + newAddrs = append(newAddrs, d.lookupHost()...) + if d.disableServiceConfig { + return newAddrs, "" + } + sc := d.lookupTXT() + return newAddrs, canaryingSC(sc) +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return grpcrand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/dns/go17.go b/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/dns/go17.go new file mode 100644 index 000000000000..b466bc8f6d45 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/dns/go17.go @@ -0,0 +1,35 @@ +// +build go1.6, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } + lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) } +) diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/dns/go18.go b/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/dns/go18.go new file mode 100644 index 000000000000..fa34f14cad48 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/dns/go18.go @@ -0,0 +1,29 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV + lookupTXT = net.DefaultResolver.LookupTXT +) diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go new file mode 100644 index 000000000000..b76010d74d14 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/resolver.go b/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/resolver.go index 49307e8fe9e9..506afac88aea 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/resolver/resolver.go @@ -24,42 +24,42 @@ var ( // m is a map from scheme to resolver builder. m = make(map[string]Builder) // defaultScheme is the default scheme to use. - defaultScheme string + defaultScheme = "passthrough" ) // TODO(bar) install dns resolver in init(){}. -// Register registers the resolver builder to the resolver map. -// b.Scheme will be used as the scheme registered with this builder. +// Register registers the resolver builder to the resolver map. b.Scheme will be +// used as the scheme registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. func Register(b Builder) { m[b.Scheme()] = b } // Get returns the resolver builder registered with the given scheme. -// If no builder is register with the scheme, the default scheme will -// be used. -// If the default scheme is not modified, "dns" will be the default -// scheme, and the preinstalled dns resolver will be used. -// If the default scheme is modified, and a resolver is registered with -// the scheme, that resolver will be returned. -// If the default scheme is modified, and no resolver is registered with -// the scheme, nil will be returned. +// +// If no builder is register with the scheme, nil will be returned. func Get(scheme string) Builder { if b, ok := m[scheme]; ok { return b } - if b, ok := m[defaultScheme]; ok { - return b - } return nil } // SetDefaultScheme sets the default scheme that will be used. -// The default default scheme is "dns". +// The default default scheme is "passthrough". func SetDefaultScheme(scheme string) { defaultScheme = scheme } +// GetDefaultScheme gets the default scheme that will be used. +func GetDefaultScheme() string { + return defaultScheme +} + // AddressType indicates the address type returned by name resolution. type AddressType uint8 @@ -78,7 +78,9 @@ type Address struct { // Type is the type of this address. Type AddressType // ServerName is the name of this address. - // It's the name of the grpc load balancer, which will be used for authentication. + // + // e.g. if Type is GRPCLB, ServerName should be the name of the remote load + // balancer, not the name of the backend. ServerName string // Metadata is the information associated with Addr, which may be used // to make load balancing decision. @@ -88,10 +90,17 @@ type Address struct { // BuildOption includes additional information for the builder to create // the resolver. type BuildOption struct { + // DisableServiceConfig indicates whether resolver should fetch service config data. + DisableServiceConfig bool } // ClientConn contains the callbacks for resolver to notify any updates // to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. type ClientConn interface { // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. @@ -128,8 +137,10 @@ type ResolveNowOption struct{} // Resolver watches for the updates on the specified target. // Updates include address updates and service config updates. type Resolver interface { - // ResolveNow will be called by gRPC to try to resolve the target name again. - // It's just a hint, resolver can ignore this if it's not necessary. + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. ResolveNow(ResolveNowOption) // Close closes the resolver. Close() diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/cluster-autoscaler/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 7d53964d094d..494d6931ebb5 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,6 +19,7 @@ package grpc import ( + "fmt" "strings" "google.golang.org/grpc/grpclog" @@ -36,39 +37,43 @@ type ccResolverWrapper struct { } // split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns "", s instead. -func split2(s, sep string) (string, string) { +// If sep is not found, it returns ("", s, false) instead. +func split2(s, sep string) (string, string, bool) { spl := strings.SplitN(s, sep, 2) if len(spl) < 2 { - return "", s + return "", "", false } - return spl[0], spl[1] + return spl[0], spl[1], true } // parseTarget splits target into a struct containing scheme, authority and // endpoint. +// +// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: +// target}. func parseTarget(target string) (ret resolver.Target) { - ret.Scheme, ret.Endpoint = split2(target, "://") - ret.Authority, ret.Endpoint = split2(ret.Endpoint, "/") + var ok bool + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") + if !ok { + return resolver.Target{Endpoint: target} + } return ret } // newCCResolverWrapper parses cc.target for scheme and gets the resolver -// builder for this scheme. It then builds the resolver and starts the -// monitoring goroutine for it. +// builder for this scheme and builds the resolver. The monitoring goroutine +// for it is not started yet and can be created by calling start(). // -// This function could return nil, nil, in tests for old behaviors. -// TODO(bar) never return nil, nil when DNS becomes the default resolver. +// If withResolverBuilder dial option is set, the specified resolver will be +// used instead. func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { - target := parseTarget(cc.target) - grpclog.Infof("dialing to target with scheme: %q", target.Scheme) - - rb := resolver.Get(target.Scheme) + rb := cc.dopts.resolverBuilder if rb == nil { - // TODO(bar) return error when DNS becomes the default (implemented and - // registered by DNS package). - grpclog.Infof("could not get resolver for scheme: %q", target.Scheme) - return nil, nil + return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme) } ccr := &ccResolverWrapper{ @@ -79,15 +84,18 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { } var err error - ccr.resolver, err = rb.Build(target, ccr, resolver.BuildOption{}) + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig}) if err != nil { return nil, err } - go ccr.watcher() return ccr, nil } -// watcher processes address updates and service config updates sequencially. +func (ccr *ccResolverWrapper) start() { + go ccr.watcher() +} + +// watcher processes address updates and service config updates sequentially. // Otherwise, we need to resolve possible races between address and service // config (e.g. they specify different balancer types). func (ccr *ccResolverWrapper) watcher() { @@ -100,20 +108,31 @@ func (ccr *ccResolverWrapper) watcher() { select { case addrs := <-ccr.addrCh: - grpclog.Infof("ccResolverWrapper: sending new addresses to balancer wrapper: %v", addrs) - // TODO(bar switching) this should never be nil. Pickfirst should be default. - if ccr.cc.balancerWrapper != nil { - // TODO(bar switching) create balancer if it's nil? - ccr.cc.balancerWrapper.handleResolvedAddrs(addrs, nil) + select { + case <-ccr.done: + return + default: } + grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + ccr.cc.handleResolvedAddrs(addrs, nil) case sc := <-ccr.scCh: + select { + case <-ccr.done: + return + default: + } grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + ccr.cc.handleServiceConfig(sc) case <-ccr.done: return } } } +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) { + ccr.resolver.ResolveNow(o) +} + func (ccr *ccResolverWrapper) close() { ccr.resolver.Close() close(ccr.done) @@ -129,7 +148,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { } // NewServiceConfig is called by the resolver implemenetion to send service -// configs to gPRC. +// configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { select { case <-ccr.scCh: diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/rpc_util.go b/cluster-autoscaler/vendor/google.golang.org/grpc/rpc_util.go index 188a75fff946..033801f34f8d 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/rpc_util.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/rpc_util.go @@ -21,18 +21,21 @@ package grpc import ( "bytes" "compress/gzip" - stdctx "context" "encoding/binary" + "fmt" "io" "io/ioutil" "math" - "os" + "net/url" + "strings" "sync" "time" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -41,6 +44,8 @@ import ( ) // Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. type Compressor interface { // Do compresses p into w. Do(w io.Writer, p []byte) error @@ -53,14 +58,34 @@ type gzipCompressor struct { } // NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } return &gzipCompressor{ pool: sync.Pool{ New: func() interface{} { - return gzip.NewWriter(ioutil.Discard) + w, err := gzip.NewWriterLevel(ioutil.Discard, level) + if err != nil { + panic(err) + } + return w }, }, - } + }, nil } func (c *gzipCompressor) Do(w io.Writer, p []byte) error { @@ -78,6 +103,8 @@ func (c *gzipCompressor) Type() string { } // Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. type Decompressor interface { // Do reads the data from r and uncompress them. Do(r io.Reader) ([]byte, error) @@ -90,6 +117,8 @@ type gzipDecompressor struct { } // NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. func NewGZIPDecompressor() Decompressor { return &gzipDecompressor{} } @@ -124,14 +153,15 @@ func (d *gzipDecompressor) Type() string { // callInfo contains all related configuration and information about an RPC. type callInfo struct { + compressorType string failFast bool - headerMD metadata.MD - trailerMD metadata.MD - peer *peer.Peer + stream *clientStream traceInfo traceInfo // in trace.go maxReceiveMessageSize *int maxSendMessageSize *int creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec } func defaultCallInfo() *callInfo { @@ -158,87 +188,239 @@ type EmptyCallOption struct{} func (EmptyCallOption) before(*callInfo) error { return nil } func (EmptyCallOption) after(*callInfo) {} -type beforeCall func(c *callInfo) error - -func (o beforeCall) before(c *callInfo) error { return o(c) } -func (o beforeCall) after(c *callInfo) {} - -type afterCall func(c *callInfo) - -func (o afterCall) before(c *callInfo) error { return nil } -func (o afterCall) after(c *callInfo) { o(c) } - // Header returns a CallOptions that retrieves the header metadata // for a unary RPC. func Header(md *metadata.MD) CallOption { - return afterCall(func(c *callInfo) { - *md = c.headerMD - }) + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(c *callInfo) error { return nil } +func (o HeaderCallOption) after(c *callInfo) { + if c.stream != nil { + *o.HeaderAddr, _ = c.stream.Header() + } } // Trailer returns a CallOptions that retrieves the trailer metadata // for a unary RPC. func Trailer(md *metadata.MD) CallOption { - return afterCall(func(c *callInfo) { - *md = c.trailerMD - }) + return TrailerCallOption{TrailerAddr: md} } -// Peer returns a CallOption that retrieves peer information for a -// unary RPC. -func Peer(peer *peer.Peer) CallOption { - return afterCall(func(c *callInfo) { - if c.peer != nil { - *peer = *c.peer +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(c *callInfo) error { return nil } +func (o TrailerCallOption) after(c *callInfo) { + if c.stream != nil { + *o.TrailerAddr = c.stream.Trailer() + } +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(c *callInfo) error { return nil } +func (o PeerCallOption) after(c *callInfo) { + if c.stream != nil { + if x, ok := peer.FromContext(c.stream.Context()); ok { + *o.PeerAddr = *x } - }) + } } // FailFast configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If failfast is true, the RPC will fail +// connections or unreachable servers. If failFast is true, the RPC will fail // immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will retry -// the call if it fails due to a transient error. Please refer to +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error. gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data. Please refer to // https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. -// Note: failFast is default to true. +// +// By default, RPCs are "Fail Fast". func FailFast(failFast bool) CallOption { - return beforeCall(func(c *callInfo) error { - c.failFast = failFast - return nil - }) + return FailFastCallOption{FailFast: failFast} } +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// This is an EXPERIMENTAL API. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(c *callInfo) {} + // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. func MaxCallRecvMsgSize(s int) CallOption { - return beforeCall(func(o *callInfo) error { - o.maxReceiveMessageSize = &s - return nil - }) + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size the client can receive. +// This is an EXPERIMENTAL API. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int } +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} + // MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. func MaxCallSendMsgSize(s int) CallOption { - return beforeCall(func(o *callInfo) error { - o.maxSendMessageSize = &s - return nil - }) + return MaxSendMsgSizeCallOption{MaxSendMsgSize: s} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size the client can send. +// This is an EXPERIMENTAL API. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil } +func (o MaxSendMsgSizeCallOption) after(c *callInfo) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { - return beforeCall(func(c *callInfo) error { - c.creds = creds - return nil - }) + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// This is an EXPERIMENTAL API. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(c *callInfo) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// This API is EXPERIMENTAL. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} } +// CompressorCallOption is a CallOption that indicates the compressor to use. +// This is an EXPERIMENTAL API. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(c *callInfo) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If CallCustomCodec is not also used, the content-subtype will be used to +// look up the Codec to use in the registry controlled by RegisterCodec. See +// the documentation on RegisterCodec for details on registration. The lookup +// of content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If CallCustomCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// This is an EXPERIMENTAL API. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(c *callInfo) {} + +// CallCustomCodec returns a CallOption that will set the given Codec to be +// used for all request and response messages for a call. The result of calling +// String() will be used as the content-subtype in a case-insensitive manner. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// This is an EXPERIMENTAL API. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o CustomCodecCallOption) after(c *callInfo) {} + // The format of the payload: compressed or not? type payloadFormat uint8 const ( - compressionNone payloadFormat = iota // no compression - compressionMade + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed ) // parser reads complete gRPC messages from the underlying reader. @@ -248,8 +430,8 @@ type parser struct { // error types. r io.Reader - // The header of a gRPC message. Find more detail - // at https://grpc.io/docs/guides/wire.html. + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte } @@ -277,8 +459,11 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if length == 0 { return pf, nil, nil } - if length > uint32(maxReceiveMessageSize) { - return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead // of making it for each message: @@ -292,67 +477,104 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt return pf, msg, nil } -// encode serializes msg and returns a buffer of message header and a buffer of msg. -// If msg is nil, it generates the message header and an empty msg buffer. -func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, []byte, error) { - var b []byte - const ( - payloadLen = 1 - sizeLen = 4 - ) +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg interface{}) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(len(b)) > math.MaxUint32 { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} - if msg != nil { - var err error - b, err = c.Marshal(msg) - if err != nil { - return nil, nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) +// compress returns the input bytes compressed by compressor or cp. If both +// compressors are nil, returns nil. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { + if compressor == nil && cp == nil { + return nil, nil + } + wrapErr := func(err error) error { + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + cbuf := &bytes.Buffer{} + if compressor != nil { + z, _ := compressor.Compress(cbuf) + if _, err := z.Write(in); err != nil { + return nil, wrapErr(err) } - if outPayload != nil { - outPayload.Payload = msg - // TODO truncate large payload. - outPayload.Data = b - outPayload.Length = len(b) + if err := z.Close(); err != nil { + return nil, wrapErr(err) } - if cp != nil { - if err := cp.Do(cbuf, b); err != nil { - return nil, nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) - } - b = cbuf.Bytes() + } else { + if err := cp.Do(cbuf, in); err != nil { + return nil, wrapErr(err) } } + return cbuf.Bytes(), nil +} - if uint(len(b)) > math.MaxUint32 { - return nil, nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) - } +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) - bufHeader := make([]byte, payloadLen+sizeLen) - if cp == nil { - bufHeader[0] = byte(compressionNone) +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + hdr = make([]byte, headerLen) + if compData != nil { + hdr[0] = byte(compressionMade) + data = compData } else { - bufHeader[0] = byte(compressionMade) + hdr[0] = byte(compressionNone) } - // Write length of b into buf - binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b))) - if outPayload != nil { - outPayload.WireLength = payloadLen + sizeLen + len(b) + + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) + return hdr, data +} + +func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + SentTime: t, } - return bufHeader, b, nil } -func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { switch pf { case compressionNone: case compressionMade: - if dc == nil || recvCompress != dc.Type() { - return Errorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } default: - return Errorf(codes.Internal, "grpc: received unexpected payload format %d", pf) + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) } return nil } -func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error { +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error { pf, d, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return err @@ -360,22 +582,37 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{ if inPayload != nil { inPayload.WireLength = len(d) } - if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil { - return err + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + return st.Err() } + if pf == compressionMade { - d, err = dc.Do(bytes.NewReader(d)) - if err != nil { - return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + d, err = dc.Do(bytes.NewReader(d)) + if err != nil { + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } else { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + d, err = ioutil.ReadAll(dcReader) + if err != nil { + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } } } if len(d) > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) + return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) } if err := c.Unmarshal(d, m); err != nil { - return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) } if inPayload != nil { inPayload.RecvTime = time.Now() @@ -388,9 +625,7 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{ } type rpcInfo struct { - failfast bool - bytesSent bool - bytesReceived bool + failfast bool } type rpcInfoContextKey struct{} @@ -404,69 +639,10 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { return } -func updateRPCInfoInContext(ctx context.Context, s rpcInfo) { - if ss, ok := rpcInfoFromContext(ctx); ok { - ss.bytesReceived = s.bytesReceived - ss.bytesSent = s.bytesSent - } - return -} - -// toRPCErr converts an error into an error from the status package. -func toRPCErr(err error) error { - if _, ok := status.FromError(err); ok { - return err - } - switch e := err.(type) { - case transport.StreamError: - return status.Error(e.Code, e.Desc) - case transport.ConnectionError: - return status.Error(codes.Unavailable, e.Desc) - default: - switch err { - case context.DeadlineExceeded, stdctx.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled, stdctx.Canceled: - return status.Error(codes.Canceled, err.Error()) - case ErrClientConnClosing: - return status.Error(codes.FailedPrecondition, err.Error()) - } - } - return status.Error(codes.Unknown, err.Error()) -} - -// convertCode converts a standard Go error into its canonical code. Note that -// this is only used to translate the error returned by the server applications. -func convertCode(err error) codes.Code { - switch err { - case nil: - return codes.OK - case io.EOF: - return codes.OutOfRange - case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: - return codes.FailedPrecondition - case os.ErrInvalid: - return codes.InvalidArgument - case context.Canceled, stdctx.Canceled: - return codes.Canceled - case context.DeadlineExceeded, stdctx.DeadlineExceeded: - return codes.DeadlineExceeded - } - switch { - case os.IsExist(err): - return codes.AlreadyExists - case os.IsNotExist(err): - return codes.NotFound - case os.IsPermission(err): - return codes.PermissionDenied - } - return codes.Unknown -} - // Code returns the error code for err if it was produced by the rpc system. // Otherwise, it returns codes.Unknown. // -// Deprecated; use status.FromError and Code method instead. +// Deprecated: use status.FromError and Code method instead. func Code(err error) codes.Code { if s, ok := status.FromError(err); ok { return s.Code() @@ -477,7 +653,7 @@ func Code(err error) codes.Code { // ErrorDesc returns the error description of err if it was produced by the rpc system. // Otherwise, it returns err.Error() or empty string when err is nil. // -// Deprecated; use status.FromError and Message method instead. +// Deprecated: use status.FromError and Message method instead. func ErrorDesc(err error) string { if s, ok := status.FromError(err); ok { return s.Message() @@ -488,85 +664,78 @@ func ErrorDesc(err error) string { // Errorf returns an error containing an error code and a description; // Errorf returns nil if c is OK. // -// Deprecated; use status.Errorf instead. +// Deprecated: use status.Errorf instead. func Errorf(c codes.Code, format string, a ...interface{}) error { return status.Errorf(c, format, a...) } -// MethodConfig defines the configuration recommended by the service providers for a -// particular method. -// This is EXPERIMENTAL and subject to change. -type MethodConfig struct { - // WaitForReady indicates whether RPCs sent to this method should wait until - // the connection is ready by default (!failfast). The value specified via the - // gRPC client API will override the value set here. - WaitForReady *bool - // Timeout is the default timeout for RPCs sent to this method. The actual - // deadline used will be the minimum of the value specified here and the value - // set by the application via the gRPC client API. If either one is not set, - // then the other will be used. If neither is set, then the RPC has no deadline. - Timeout *time.Duration - // MaxReqSize is the maximum allowed payload size for an individual request in a - // stream (client->server) in bytes. The size which is measured is the serialized - // payload after per-message compression (but before stream compression) in bytes. - // The actual value used is the minimum of the value specified here and the value set - // by the application via the gRPC client API. If either one is not set, then the other - // will be used. If neither is set, then the built-in default is used. - MaxReqSize *int - // MaxRespSize is the maximum allowed payload size for an individual response in a - // stream (server->client) in bytes. - MaxRespSize *int -} - -// ServiceConfig is provided by the service provider and contains parameters for how -// clients that connect to the service should behave. -// This is EXPERIMENTAL and subject to change. -type ServiceConfig struct { - // LB is the load balancer the service providers recommends. The balancer specified - // via grpc.WithBalancer will override this. - LB Balancer - // Methods contains a map for the methods in this service. - // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig. - // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists. - // Otherwise, the method has no MethodConfig to use. - Methods map[string]MethodConfig -} - -func min(a, b *int) *int { - if *a < *b { - return a - } - return b -} - -func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { - if mcMax == nil && doptMax == nil { - return &defaultVal - } - if mcMax != nil && doptMax != nil { - return min(mcMax, doptMax) - } - if mcMax != nil { - return mcMax - } - return doptMax -} - -// SupportPackageIsVersion3 is referenced from generated protocol buffer files. -// The latest support package version is 4. -// SupportPackageIsVersion3 is kept for compatibility. It will be removed in the -// next support package version update. -const SupportPackageIsVersion3 = true - -// SupportPackageIsVersion4 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the grpc package. -// -// This constant may be renamed in the future if a change in the generated code -// requires a synchronised update of grpc-go and protoc-gen-go. This constant -// should not be referenced from any other code. -const SupportPackageIsVersion4 = true +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it. + return nil + } -// Version is the current grpc version. -const Version = "1.7.5" + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = encoding.GetCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = encoding.GetCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// parseDialTarget returns the network and address to pass to dialer +func parseDialTarget(target string) (net string, addr string) { + net = "tcp" + + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + net = n + addr = target[m1+1:] + return net, addr + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr = t.Path + if scheme == "unix" { + net = scheme + if addr == "" { + addr = t.Host + } + return net, addr + } + } + + return net, target +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 5. +// +// Older versions are kept for compatibility. They may be removed if +// compatibility cannot be maintained. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true +) const grpcUA = "grpc-go/" + Version diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/server.go b/cluster-autoscaler/vendor/google.golang.org/grpc/server.go index 787665dfeb31..014c72b3f283 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/server.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/server.go @@ -32,13 +32,19 @@ import ( "sync" "time" + "io/ioutil" + "golang.org/x/net/context" "golang.org/x/net/http2" "golang.org/x/net/trace" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" @@ -89,18 +95,28 @@ type Server struct { conns map[io.Closer]bool serve bool drain bool - ctx context.Context - cancel context.CancelFunc - // A CondVar to let GracefulStop() blocks until all the pending RPCs are finished - // and all the transport goes away. - cv *sync.Cond + cv *sync.Cond // signaled when connections close for GracefulStop m map[string]*service // service name -> service info events trace.EventLog + + quit chan struct{} + done chan struct{} + quitOnce sync.Once + doneOnce sync.Once + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + + channelzID int64 // channelz unique identification number + czmu sync.RWMutex + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + lastCallStartedTime time.Time } type options struct { creds credentials.TransportCredentials - codec Codec + codec baseCodec cp Compressor dc Decompressor unaryInt UnaryServerInterceptor @@ -177,20 +193,32 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { } // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. func CustomCodec(codec Codec) ServerOption { return func(o *options) { o.codec = codec } } -// RPCCompressor returns a ServerOption that sets a compressor for outbound messages. +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. func RPCCompressor(cp Compressor) ServerOption { return func(o *options) { o.cp = cp } } -// RPCDecompressor returns a ServerOption that sets a decompressor for inbound messages. +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. func RPCDecompressor(dc Decompressor) ServerOption { return func(o *options) { o.dc = dc @@ -198,7 +226,9 @@ func RPCDecompressor(dc Decompressor) ServerOption { } // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. -// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. func MaxMsgSize(m int) ServerOption { return MaxRecvMsgSize(m) } @@ -297,6 +327,8 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { // connection establishment (up to and including HTTP/2 handshaking) for all // new connections. If this is not set, the default is 120 seconds. A zero or // negative value will result in an immediate timeout. +// +// This API is EXPERIMENTAL. func ConnectionTimeout(d time.Duration) ServerOption { return func(o *options) { o.connectionTimeout = d @@ -310,22 +342,23 @@ func NewServer(opt ...ServerOption) *Server { for _, o := range opt { o(&opts) } - if opts.codec == nil { - // Set the default codec. - opts.codec = protoCodec{} - } s := &Server{ lis: make(map[net.Listener]bool), opts: opts, conns: make(map[io.Closer]bool), m: make(map[string]*service), + quit: make(chan struct{}), + done: make(chan struct{}), } s.cv = sync.NewCond(&s.mu) - s.ctx, s.cancel = context.WithCancel(context.Background()) if EnableTracing { _, file, line, _ := runtime.Caller(1) s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) } + + if channelz.IsOn() { + s.channelzID = channelz.RegisterServer(s, "") + } return s } @@ -430,11 +463,9 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo { return ret } -var ( - // ErrServerStopped indicates that the operation is now illegal because of - // the server being stopped. - ErrServerStopped = errors.New("grpc: the server has been stopped") -) +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { if s.opts.creds == nil { @@ -443,28 +474,66 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti return s.opts.creds.ServerHandshake(rawConn) } +type listenSocket struct { + net.Listener + channelzID int64 +} + +func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { + return &channelz.SocketInternalMetric{ + LocalAddr: l.Listener.Addr(), + } +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + if channelz.IsOn() { + channelz.RemoveEntry(l.channelzID) + } + return err +} + // Serve accepts incoming connections on the listener lis, creating a new // ServerTransport and service goroutine for each. The service goroutines // read gRPC requests and then call the registered handlers to reply to them. // Serve returns when lis.Accept fails with fatal errors. lis will be closed when // this method returns. -// Serve always returns non-nil error. +// Serve will return a non-nil error unless Stop or GracefulStop is called. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") s.serve = true if s.lis == nil { + // Serve called after Stop or GracefulStop. s.mu.Unlock() lis.Close() return ErrServerStopped } - s.lis[lis] = true + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + select { + // Stop or GracefulStop called; block until done and return nil. + case <-s.quit: + <-s.done + default: + } + }() + + ls := &listenSocket{Listener: lis} + s.lis[ls] = true + + if channelz.IsOn() { + ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, "") + } s.mu.Unlock() + defer func() { s.mu.Lock() - if s.lis != nil && s.lis[lis] { - lis.Close() - delete(s.lis, lis) + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) } s.mu.Unlock() }() @@ -491,25 +560,39 @@ func (s *Server) Serve(lis net.Listener) error { timer := time.NewTimer(tempDelay) select { case <-timer.C: - case <-s.ctx.Done(): + case <-s.quit: + timer.Stop() + return nil } - timer.Stop() continue } s.mu.Lock() s.printf("done serving; Accept = %v", err) s.mu.Unlock() + + select { + case <-s.quit: + return nil + default: + } return err } tempDelay = 0 - // Start a new goroutine to deal with rawConn - // so we don't stall this Accept loop goroutine. - go s.handleRawConn(rawConn) + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(rawConn) + s.serveWG.Done() + }() } } -// handleRawConn is run in its own goroutine and handles a just-accepted -// connection that has not had any I/O performed on it yet. +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. func (s *Server) handleRawConn(rawConn net.Conn) { rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) conn, authInfo, err := s.useTransportAuthenticator(rawConn) @@ -534,17 +617,28 @@ func (s *Server) handleRawConn(rawConn net.Conn) { } s.mu.Unlock() + var serve func() + c := conn.(io.Closer) if s.opts.useHandlerImpl { - rawConn.SetDeadline(time.Time{}) - s.serveUsingHandler(conn) + serve = func() { s.serveUsingHandler(conn) } } else { + // Finish handshaking (HTTP2) st := s.newHTTP2Transport(conn, authInfo) if st == nil { return } - rawConn.SetDeadline(time.Time{}) - s.serveStreams(st) + c = st + serve = func() { s.serveStreams(st) } + } + + rawConn.SetDeadline(time.Time{}) + if !s.addConn(c) { + return } + go func() { + serve() + s.removeConn(c) + }() } // newHTTP2Transport sets up a http/2 transport (using the @@ -561,6 +655,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr InitialConnWindowSize: s.opts.initialConnWindowSize, WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, + ChannelzParentID: s.channelzID, } st, err := transport.NewServerTransport("http2", c, config) if err != nil { @@ -571,15 +666,11 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) return nil } - if !s.addConn(st) { - st.Close() - return nil - } + return st } func (s *Server) serveStreams(st transport.ServerTransport) { - defer s.removeConn(st) defer st.Close() var wg sync.WaitGroup st.HandleStreams(func(stream *transport.Stream) { @@ -613,11 +704,6 @@ var _ http.Handler = (*Server)(nil) // // conn is the *tls.Conn that's already been authenticated. func (s *Server) serveUsingHandler(conn net.Conn) { - if !s.addConn(conn) { - conn.Close() - return - } - defer s.removeConn(conn) h2s := &http2.Server{ MaxConcurrentStreams: s.opts.maxConcurrentStreams, } @@ -651,13 +737,12 @@ func (s *Server) serveUsingHandler(conn net.Conn) { // available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL // and subject to change. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } if !s.addConn(st) { - st.Close() return } defer s.removeConn(st) @@ -687,9 +772,15 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea func (s *Server) addConn(c io.Closer) bool { s.mu.Lock() defer s.mu.Unlock() - if s.conns == nil || s.drain { + if s.conns == nil { + c.Close() return false } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + c.(transport.ServerTransport).Drain() + } s.conns[c] = true return true } @@ -703,43 +794,83 @@ func (s *Server) removeConn(c io.Closer) { } } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error { - var ( - cbuf *bytes.Buffer - outPayload *stats.OutPayload - ) - if cp != nil { - cbuf = new(bytes.Buffer) - } - if s.opts.statsHandler != nil { - outPayload = &stats.OutPayload{} +// ChannelzMetric returns ServerInternalMetric of current server. +// This is an EXPERIMENTAL API. +func (s *Server) ChannelzMetric() *channelz.ServerInternalMetric { + s.czmu.RLock() + defer s.czmu.RUnlock() + return &channelz.ServerInternalMetric{ + CallsStarted: s.callsStarted, + CallsSucceeded: s.callsSucceeded, + CallsFailed: s.callsFailed, + LastCallStartedTimestamp: s.lastCallStartedTime, } - hdr, data, err := encode(s.opts.codec, msg, cp, cbuf, outPayload) +} + +func (s *Server) incrCallsStarted() { + s.czmu.Lock() + s.callsStarted++ + s.lastCallStartedTime = time.Now() + s.czmu.Unlock() +} + +func (s *Server) incrCallsSucceeded() { + s.czmu.Lock() + s.callsSucceeded++ + s.czmu.Unlock() +} + +func (s *Server) incrCallsFailed() { + s.czmu.Lock() + s.callsFailed++ + s.czmu.Unlock() +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { grpclog.Errorln("grpc: server failed to encode response: ", err) return err } - if len(data) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), s.opts.maxSendMessageSize) + compData, err := compress(data, cp, comp) + if err != nil { + grpclog.Errorln("grpc: server failed to compress response: ", err) + return err } - err = t.Write(stream, hdr, data, opts) - if err == nil && outPayload != nil { - outPayload.SentTime = time.Now() - s.opts.statsHandler.HandleRPC(stream.Context(), outPayload) + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) + if err == nil && s.opts.statsHandler != nil { + s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) } return err } func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + defer func() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + }() + } sh := s.opts.statsHandler if sh != nil { + beginTime := time.Now() begin := &stats.Begin{ - BeginTime: time.Now(), + BeginTime: beginTime, } sh.HandleRPC(stream.Context(), begin) defer func() { end := &stats.End{ - EndTime: time.Now(), + BeginTime: beginTime, + EndTime: time.Now(), } if err != nil && err != io.EOF { end.Error = toRPCErr(err) @@ -758,10 +889,43 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } }() } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { - // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. - stream.SetSendCompress(s.opts.cp.Type()) + cp = s.opts.cp + stream.SetSendCompress(cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + stream.SetSendCompress(rc) + } } + p := &parser{r: stream} pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize) if err == io.EOF { @@ -769,7 +933,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return err } if err == io.ErrUnexpectedEOF { - err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) } if err != nil { if st, ok := status.FromError(err); ok { @@ -790,19 +954,14 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return err } - - if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { - if st, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, st); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - return err - } - if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil { + if channelz.IsOn() { + t.IncrMsgRecv() + } + if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil { + if e := t.WriteStatus(stream, st); e != nil { grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) } - - // TODO checkRecvPayload always return RPC error. Add a return here if necessary. + return st.Err() } var inPayload *stats.InPayload if sh != nil { @@ -816,9 +975,17 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } if pf == compressionMade { var err error - req, err = s.opts.dc.Do(bytes.NewReader(req)) - if err != nil { - return Errorf(codes.Internal, err.Error()) + if dc != nil { + req, err = dc.Do(bytes.NewReader(req)) + if err != nil { + return status.Errorf(codes.Internal, err.Error()) + } + } else { + tmp, _ := decomp.Decompress(bytes.NewReader(req)) + req, err = ioutil.ReadAll(tmp) + if err != nil { + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } } } if len(req) > s.opts.maxReceiveMessageSize { @@ -826,7 +993,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // java implementation. return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize) } - if err := s.opts.codec.Unmarshal(req, v); err != nil { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(req, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } if inPayload != nil { @@ -840,12 +1007,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { // Convert appErr if it is not a grpc status error. - appErr = status.Error(convertCode(appErr), appErr.Error()) + appErr = status.Error(codes.Unknown, appErr.Error()) appStatus, _ = status.FromError(appErr) } if trInfo != nil { @@ -864,7 +1032,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Last: true, Delay: false, } - if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { + + if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err @@ -887,6 +1056,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return err } + if channelz.IsOn() { + t.IncrMsgSent() + } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } @@ -897,15 +1069,27 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + defer func() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + }() + } sh := s.opts.statsHandler if sh != nil { + beginTime := time.Now() begin := &stats.Begin{ - BeginTime: time.Now(), + BeginTime: beginTime, } sh.HandleRPC(stream.Context(), begin) defer func() { end := &stats.End{ - EndTime: time.Now(), + BeginTime: beginTime, + EndTime: time.Now(), } if err != nil && err != io.EOF { end.Error = toRPCErr(err) @@ -913,21 +1097,47 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp sh.HandleRPC(stream.Context(), end) }() } - if s.opts.cp != nil { - stream.SetSendCompress(s.opts.cp.Type()) - } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ + ctx: ctx, t: t, s: stream, p: &parser{r: stream}, - codec: s.opts.codec, - cp: s.opts.cp, - dc: s.opts.dc, + codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, statsHandler: sh, } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + stream.SetSendCompress(s.opts.cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + stream.SetSendCompress(rc) + } + } + if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) defer func() { @@ -963,7 +1173,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp case transport.StreamError: appStatus = status.New(err.Code, err.Desc) default: - appStatus = status.New(convertCode(appErr), appErr.Error()) + appStatus = status.New(codes.Unknown, appErr.Error()) } appErr = appStatus.Err() } @@ -983,7 +1193,6 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } return t.WriteStatus(ss.s, status.New(codes.OK, "")) - } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { @@ -1065,12 +1274,65 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } } +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// This API is EXPERIMENTAL. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// This API is EXPERIMENTAL. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// This API is EXPERIMENTAL. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + // Stop stops the gRPC server. It immediately closes all open // connections and listeners. // It cancels all active RPCs on the server side and the corresponding // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { + s.quitOnce.Do(func() { + close(s.quit) + }) + + defer func() { + s.serveWG.Wait() + s.doneOnce.Do(func() { + close(s.done) + }) + }() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + s.mu.Lock() listeners := s.lis s.lis = nil @@ -1088,7 +1350,6 @@ func (s *Server) Stop() { } s.mu.Lock() - s.cancel() if s.events != nil { s.events.Finish() s.events = nil @@ -1100,22 +1361,44 @@ func (s *Server) Stop() { // accepting new connections and RPCs and blocks until all the pending RPCs are // finished. func (s *Server) GracefulStop() { + s.quitOnce.Do(func() { + close(s.quit) + }) + + defer func() { + s.doneOnce.Do(func() { + close(s.done) + }) + }() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) s.mu.Lock() - defer s.mu.Unlock() if s.conns == nil { + s.mu.Unlock() return } + for lis := range s.lis { lis.Close() } s.lis = nil - s.cancel() if !s.drain { for c := range s.conns { c.(transport.ServerTransport).Drain() } s.drain = true } + + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + s.mu.Lock() + for len(s.conns) != 0 { s.cv.Wait() } @@ -1124,26 +1407,29 @@ func (s *Server) GracefulStop() { s.events.Finish() s.events = nil } + s.mu.Unlock() } func init() { - internal.TestingCloseConns = func(arg interface{}) { - arg.(*Server).testingCloseConns() - } internal.TestingUseHandlerImpl = func(arg interface{}) { arg.(*Server).opts.useHandlerImpl = true } } -// testingCloseConns closes all existing transports but keeps s.lis -// accepting new connections. -func (s *Server) testingCloseConns() { - s.mu.Lock() - for c := range s.conns { - c.Close() - delete(s.conns, c) +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec } - s.mu.Unlock() + if contentSubtype == "" { + return encoding.GetCodec(proto.Name) + } + codec := encoding.GetCodec(contentSubtype) + if codec == nil { + return encoding.GetCodec(proto.Name) + } + return codec } // SetHeader sets the header metadata. @@ -1156,9 +1442,9 @@ func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil } - stream, ok := transport.StreamFromContext(ctx) - if !ok { - return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) } return stream.SetHeader(md) } @@ -1166,15 +1452,11 @@ func SetHeader(ctx context.Context, md metadata.MD) error { // SendHeader sends header metadata. It may be called at most once. // The provided md and headers set by SetHeader() will be sent. func SendHeader(ctx context.Context, md metadata.MD) error { - stream, ok := transport.StreamFromContext(ctx) - if !ok { - return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) - } - t := stream.ServerTransport() - if t == nil { - grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream) + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) } - if err := t.WriteHeader(stream, md); err != nil { + if err := stream.SendHeader(md); err != nil { return toRPCErr(err) } return nil @@ -1186,9 +1468,19 @@ func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil } - stream, ok := transport.StreamFromContext(ctx) - if !ok { - return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) } return stream.SetTrailer(md) } + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/service_config.go b/cluster-autoscaler/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 000000000000..015631d8d38a --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,233 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/grpclog" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + // LB is the load balancer the service providers recommends. The balancer specified + // via grpc.WithBalancer will override this. + LB *string + // Methods contains a map for the methods in this service. + // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig. + // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists. + // Otherwise, the method has no MethodConfig to use. + Methods map[string]MethodConfig + + stickinessMetadataKey *string +} + +func parseDuration(s *string) (*time.Duration, error) { + if s == nil { + return nil, nil + } + if !strings.HasSuffix(*s, "s") { + return nil, fmt.Errorf("malformed duration %q", *s) + } + ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) + if len(ss) > 2 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var d time.Duration + if len(ss[0]) > 0 { + i, err := strconv.ParseInt(ss[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + d = time.Duration(i) * time.Second + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + f, err := strconv.ParseInt(ss[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + for i := 9; i > len(ss[1]); i-- { + f *= 10 + } + d += time.Duration(f) + hasDigits = true + } + if !hasDigits { + return nil, fmt.Errorf("malformed duration %q", *s) + } + + return &d, nil +} + +type jsonName struct { + Service *string + Method *string +} + +func (j jsonName) generatePath() (string, bool) { + if j.Service == nil { + return "", false + } + res := "/" + *j.Service + "/" + if j.Method != nil { + res += *j.Method + } + return res, true +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *string + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + StickinessMetadataKey *string + MethodConfig *[]jsonMC +} + +func parseServiceConfig(js string) (ServiceConfig, error) { + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return ServiceConfig{}, err + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + + stickinessMetadataKey: rsc.StickinessMetadataKey, + } + if rsc.MethodConfig == nil { + return sc, nil + } + + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + d, err := parseDuration(m.Timeout) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return ServiceConfig{}, err + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: d, + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for _, n := range *m.Name { + if path, valid := n.generatePath(); valid { + sc.Methods[path] = mc + } + } + } + + return sc, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/stats/stats.go b/cluster-autoscaler/vendor/google.golang.org/grpc/stats/stats.go index e844541e9c06..22692b69b517 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/stats/stats.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/stats/stats.go @@ -169,6 +169,8 @@ func (s *OutTrailer) isRPCStats() {} type End struct { // Client is true if this End is from client side. Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time // EndTime is the time when the RPC ends. EndTime time.Time // Error is the error the RPC ended with. It is an error generated from diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/status/go16.go b/cluster-autoscaler/vendor/google.golang.org/grpc/status/go16.go new file mode 100644 index 000000000000..e59b53e82be7 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/status/go16.go @@ -0,0 +1,42 @@ +// +build go1.6,!go1.7 + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package status + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/codes" +) + +// FromContextError converts a context error into a Status. It returns a +// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is +// non-nil and not a context error. +func FromContextError(err error) *Status { + switch err { + case nil: + return New(codes.OK, "") + case context.DeadlineExceeded: + return New(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return New(codes.Canceled, err.Error()) + default: + return New(codes.Unknown, err.Error()) + } +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/status/go17.go b/cluster-autoscaler/vendor/google.golang.org/grpc/status/go17.go new file mode 100644 index 000000000000..090215149cfb --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/status/go17.go @@ -0,0 +1,44 @@ +// +build go1.7 + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package status + +import ( + "context" + + netctx "golang.org/x/net/context" + "google.golang.org/grpc/codes" +) + +// FromContextError converts a context error into a Status. It returns a +// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is +// non-nil and not a context error. +func FromContextError(err error) *Status { + switch err { + case nil: + return New(codes.OK, "") + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return New(codes.DeadlineExceeded, err.Error()) + case context.Canceled, netctx.Canceled: + return New(codes.Canceled, err.Error()) + default: + return New(codes.Unknown, err.Error()) + } +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/status/status.go b/cluster-autoscaler/vendor/google.golang.org/grpc/status/status.go index 871dc4b31c76..9c61b094508f 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/status/status.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/status/status.go @@ -46,7 +46,7 @@ func (se *statusError) Error() string { return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) } -func (se *statusError) status() *Status { +func (se *statusError) GRPCStatus() *Status { return &Status{s: (*spb.Status)(se)} } @@ -120,15 +120,23 @@ func FromProto(s *spb.Status) *Status { } // FromError returns a Status representing err if it was produced from this -// package, otherwise it returns nil, false. +// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a +// Status is returned with codes.Unknown and the original error message. func FromError(err error) (s *Status, ok bool) { if err == nil { return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true } - if s, ok := err.(*statusError); ok { - return s.status(), true + if se, ok := err.(interface{ GRPCStatus() *Status }); ok { + return se.GRPCStatus(), true } - return nil, false + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s } // WithDetails returns a new status with the provided details messages appended to the status. @@ -166,3 +174,16 @@ func (s *Status) Details() []interface{} { } return details } + +// Code returns the Code of the error if it is a Status error, codes.OK if err +// is nil, or codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + if se, ok := err.(interface{ GRPCStatus() *Status }); ok { + return se.GRPCStatus().Code() + } + return codes.Unknown +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/stickiness_linkedmap.go b/cluster-autoscaler/vendor/google.golang.org/grpc/stickiness_linkedmap.go new file mode 100644 index 000000000000..1c726af1640d --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/stickiness_linkedmap.go @@ -0,0 +1,97 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "container/list" +) + +type linkedMapKVPair struct { + key string + value *stickyStoreEntry +} + +// linkedMap is an implementation of a map that supports removing the oldest +// entry. +// +// linkedMap is NOT thread safe. +// +// It's for use of stickiness only! +type linkedMap struct { + m map[string]*list.Element + l *list.List // Head of the list is the oldest element. +} + +// newLinkedMap returns a new LinkedMap. +func newLinkedMap() *linkedMap { + return &linkedMap{ + m: make(map[string]*list.Element), + l: list.New(), + } +} + +// put adds entry (key, value) to the map. Existing key will be overridden. +func (m *linkedMap) put(key string, value *stickyStoreEntry) { + if oldE, ok := m.m[key]; ok { + // Remove existing entry. + m.l.Remove(oldE) + } + e := m.l.PushBack(&linkedMapKVPair{key: key, value: value}) + m.m[key] = e +} + +// get returns the value of the given key. +func (m *linkedMap) get(key string) (*stickyStoreEntry, bool) { + e, ok := m.m[key] + if !ok { + return nil, false + } + m.l.MoveToBack(e) + return e.Value.(*linkedMapKVPair).value, true +} + +// remove removes key from the map, and returns the value. The map is not +// modified if key is not in the map. +func (m *linkedMap) remove(key string) (*stickyStoreEntry, bool) { + e, ok := m.m[key] + if !ok { + return nil, false + } + delete(m.m, key) + m.l.Remove(e) + return e.Value.(*linkedMapKVPair).value, true +} + +// len returns the len of the map. +func (m *linkedMap) len() int { + return len(m.m) +} + +// clear removes all elements from the map. +func (m *linkedMap) clear() { + m.m = make(map[string]*list.Element) + m.l = list.New() +} + +// removeOldest removes the oldest key from the map. +func (m *linkedMap) removeOldest() { + e := m.l.Front() + m.l.Remove(e) + delete(m.m, e.Value.(*linkedMapKVPair).key) +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/stream.go b/cluster-autoscaler/vendor/google.golang.org/grpc/stream.go index 75eab40b1095..152d9eccd62d 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/stream.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/stream.go @@ -19,7 +19,6 @@ package grpc import ( - "bytes" "errors" "io" "sync" @@ -29,15 +28,19 @@ import ( "golang.org/x/net/trace" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/transport" ) // StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. +// execution of a streaming RPC. If a StreamHandler returns an error, it +// should be produced by the status package, or else gRPC will use +// codes.Unknown as the status code and err.Error() as the status message +// of the RPC. type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. @@ -51,6 +54,8 @@ type StreamDesc struct { } // Stream defines the common interface a client or server stream has to satisfy. +// +// All errors returned from Stream are compatible with the status package. type Stream interface { // Context returns the context for this stream. Context() context.Context @@ -89,43 +94,78 @@ type ClientStream interface { // Stream.SendMsg() may return a non-nil error when something wrong happens sending // the request. The returned error indicates the status of this sending, not the final // status of the RPC. - // Always call Stream.RecvMsg() to get the final status if you care about the status of - // the RPC. + // + // Always call Stream.RecvMsg() to drain the stream and get the final + // status, otherwise there could be leaked resources. Stream } -// NewClientStream creates a new Stream for the client side. This is called -// by generated code. -func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + if cc.dopts.streamInt != nil { return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) } return newClientStream(ctx, desc, cc, method, opts...) } +// NewClientStream is a wrapper for ClientConn.NewStream. +// +// DEPRECATED: Use ClientConn.NewStream instead. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { - var ( - t transport.ClientTransport - s *transport.Stream - done func(balancer.DoneInfo) - cancel context.CancelFunc - ) + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } c := defaultCallInfo() mc := cc.GetMethodConfig(method) if mc.WaitForReady != nil { c.failFast = !*mc.WaitForReady } - if mc.Timeout != nil { + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) - defer func() { - if err != nil { - cancel() - } - }() + } else { + ctx, cancel = context.WithCancel(ctx) } + defer func() { + if err != nil { + cancel() + } + }() - opts = append(cc.dopts.callOptions, opts...) for _, o := range opts { if err := o.before(c); err != nil { return nil, toRPCErr(err) @@ -133,6 +173,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } callHdr := &transport.CallHdr{ Host: cc.authority, @@ -141,10 +184,27 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth // so we don't flush the header. // If it's client streaming, the user may never send a request or send it any // time soon, so we ask the transport to flush the header. - Flush: desc.ClientStreams, - } - if cc.dopts.cp != nil { + Flush: desc.ClientStreams, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp } if c.creds != nil { callHdr.Creds = c.creds @@ -170,11 +230,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } ctx = newContextWithRPCInfo(ctx, c.failFast) sh := cc.dopts.copts.StatsHandler + var beginTime time.Time if sh != nil { ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + beginTime = time.Now() begin := &stats.Begin{ Client: true, - BeginTime: time.Now(), + BeginTime: beginTime, FailFast: c.failFast, } sh.HandleRPC(ctx, begin) @@ -182,341 +244,384 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if err != nil { // Only handle end stats if err != nil. end := &stats.End{ - Client: true, - Error: err, + Client: true, + Error: err, + BeginTime: beginTime, + EndTime: time.Now(), } sh.HandleRPC(ctx, end) } }() } + + var ( + t transport.ClientTransport + s *transport.Stream + done func(balancer.DoneInfo) + ) for { + // Check to make sure the context has expired. This will prevent us from + // looping forever if an error occurs for wait-for-ready RPCs where no data + // is sent on the wire. + select { + case <-ctx.Done(): + return nil, toRPCErr(ctx.Err()) + default: + } + t, done, err = cc.getTransport(ctx, c.failFast) if err != nil { - // TODO(zhaoq): Probably revisit the error handling. - if _, ok := status.FromError(err); ok { - return nil, err - } - if err == errConnClosing || err == errConnUnavailable { - if c.failFast { - return nil, Errorf(codes.Unavailable, "%v", err) - } - continue - } - // All the other errors are treated as Internal errors. - return nil, Errorf(codes.Internal, "%v", err) + return nil, err } s, err = t.NewStream(ctx, callHdr) if err != nil { - if _, ok := err.(transport.ConnectionError); ok && done != nil { - // If error is connection error, transport was sending data on wire, - // and we are not sure if anything has been sent on wire. - // If error is not connection error, we are sure nothing has been sent. - updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) - } if done != nil { done(balancer.DoneInfo{Err: err}) done = nil } - if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { + // In the event of any error from NewStream, we never attempted to write + // anything to the wire, so we can retry indefinitely for non-fail-fast + // RPCs. + if !c.failFast { continue } return nil, toRPCErr(err) } break } - // Set callInfo.peer object from stream's context. - if peer, ok := peer.FromContext(s.Context()); ok { - c.peer = peer - } + cs := &clientStream{ opts: opts, c: c, + cc: cc, desc: desc, - codec: cc.dopts.codec, - cp: cc.dopts.cp, - dc: cc.dopts.dc, + codec: c.codec, + cp: cp, + comp: comp, cancel: cancel, - - done: done, - t: t, - s: s, - p: &parser{r: s}, - - tracing: EnableTracing, - trInfo: trInfo, - - statsCtx: ctx, - statsHandler: cc.dopts.copts.StatsHandler, + attempt: &csAttempt{ + t: t, + s: s, + p: &parser{r: s}, + done: done, + dc: cc.dopts.dc, + ctx: ctx, + trInfo: trInfo, + statsHandler: sh, + beginTime: beginTime, + }, + } + cs.c.stream = cs + cs.attempt.cs = cs + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() } - // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination - // when there is no pending I/O operations on this stream. - go func() { - select { - case <-t.Error(): - // Incur transport error, simply exit. - case <-cc.ctx.Done(): - cs.finish(ErrClientConnClosing) - cs.closeTransportStream(ErrClientConnClosing) - case <-s.Done(): - // TODO: The trace of the RPC is terminated here when there is no pending - // I/O, which is probably not the optimal solution. - cs.finish(s.Status().Err()) - cs.closeTransportStream(nil) - case <-s.GoAway(): - cs.finish(errConnDrain) - cs.closeTransportStream(errConnDrain) - case <-s.Context().Done(): - err := s.Context().Err() - cs.finish(err) - cs.closeTransportStream(transport.ContextErr(err)) - } - }() return cs, nil } // clientStream implements a client side Stream. type clientStream struct { - opts []CallOption - c *callInfo - t transport.ClientTransport - s *transport.Stream - p *parser - desc *StreamDesc - codec Codec - cp Compressor - dc Decompressor - cancel context.CancelFunc + opts []CallOption + c *callInfo + cc *ClientConn + desc *StreamDesc - tracing bool // set to EnableTracing when the clientStream is created. + codec baseCodec + cp Compressor + comp encoding.Compressor - mu sync.Mutex - done func(balancer.DoneInfo) - closed bool - finished bool - // trInfo.tr is set when the clientStream is created (if EnableTracing is true), - // and is set to nil when the clientStream's finish method is called. + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + + mu sync.Mutex // guards finished + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + + attempt *csAttempt // the active client stream attempt + // TODO(hedging): hedging will have multiple attempts simultaneously. +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + done func(balancer.DoneInfo) + + dc Decompressor + decomp encoding.Compressor + decompSet bool + + ctx context.Context // the application's context, wrapped by stats/tracing + + mu sync.Mutex // guards trInfo.tr + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. trInfo traceInfo - // statsCtx keeps the user context for stats handling. - // All stats collection should use the statsCtx (instead of the stream context) - // so that all the generated stats for a particular RPC can be associated in the processing phase. - statsCtx context.Context statsHandler stats.Handler + beginTime time.Time } func (cs *clientStream) Context() context.Context { - return cs.s.Context() + // TODO(retry): commit the current attempt (the context has peer-aware data). + return cs.attempt.context() } func (cs *clientStream) Header() (metadata.MD, error) { - m, err := cs.s.Header() + m, err := cs.attempt.header() if err != nil { - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) - } + // TODO(retry): maybe retry on error or commit attempt on success. + err = toRPCErr(err) + cs.finish(err) } return m, err } func (cs *clientStream) Trailer() metadata.MD { - return cs.s.Trailer() + // TODO(retry): on error, maybe retry (trailers-only). + return cs.attempt.trailer() } func (cs *clientStream) SendMsg(m interface{}) (err error) { - if cs.tracing { - cs.mu.Lock() - if cs.trInfo.tr != nil { - cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) - } + // TODO(retry): buffer message for replaying if not committed. + return cs.attempt.sendMsg(m) +} + +func (cs *clientStream) RecvMsg(m interface{}) (err error) { + // TODO(retry): maybe retry on error or commit attempt on success. + return cs.attempt.recvMsg(m) +} + +func (cs *clientStream) CloseSend() error { + cs.attempt.closeSend() + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { cs.mu.Unlock() + return + } + cs.finished = true + cs.mu.Unlock() + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + // TODO(retry): commit current attempt if necessary. + cs.attempt.finish(err) + for _, o := range cs.opts { + o.after(cs.c) } + cs.cancel() +} + +func (a *csAttempt) context() context.Context { + return a.s.Context() +} + +func (a *csAttempt) header() (metadata.MD, error) { + return a.s.Header() +} + +func (a *csAttempt) trailer() metadata.MD { + return a.s.Trailer() +} + +func (a *csAttempt) sendMsg(m interface{}) (err error) { // TODO Investigate how to signal the stats handling party. // generate error stats if err != nil && err != io.EOF? + cs := a.cs defer func() { - if err != nil { - cs.finish(err) - } - if err == nil { - return - } - if err == io.EOF { - // Specialize the process for server streaming. SendMsg is only called - // once when creating the stream object. io.EOF needs to be skipped when - // the rpc is early finished (before the stream object is created.). - // TODO: It is probably better to move this into the generated code. - if !cs.desc.ClientStreams && cs.desc.ServerStreams { - err = nil - } - return + // For non-client-streaming RPCs, we return nil instead of EOF on success + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + if err == io.EOF && !cs.desc.ClientStreams { + err = nil } - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error below; the real error will be + // returned from RecvMsg eventually in that case, or be retried.) + cs.finish(err) } - err = toRPCErr(err) }() - var outPayload *stats.OutPayload - if cs.statsHandler != nil { - outPayload = &stats.OutPayload{ - Client: true, + // TODO: Check cs.sentLast and error if we already ended the stream. + if EnableTracing { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } + a.mu.Unlock() } - hdr, data, err := encode(cs.codec, m, cs.cp, bytes.NewBuffer([]byte{}), outPayload) + data, err := encode(cs.codec, m) if err != nil { return err } - if cs.c.maxSendMessageSize == nil { - return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") + compData, err := compress(data, cs.cp, cs.comp) + if err != nil { + return err } - if len(data) > *cs.c.maxSendMessageSize { - return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize) + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.c.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.c.maxSendMessageSize) } - err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false}) - if err == nil && outPayload != nil { - outPayload.SentTime = time.Now() - cs.statsHandler.HandleRPC(cs.statsCtx, outPayload) + + if !cs.desc.ClientStreams { + cs.sentLast = true } - return err + err = a.t.Write(a.s, hdr, payload, &transport.Options{Last: !cs.desc.ClientStreams}) + if err == nil { + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payload, time.Now())) + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil + } + return io.EOF } -func (cs *clientStream) RecvMsg(m interface{}) (err error) { +func (a *csAttempt) recvMsg(m interface{}) (err error) { + cs := a.cs + defer func() { + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + } + }() var inPayload *stats.InPayload - if cs.statsHandler != nil { + if a.statsHandler != nil { inPayload = &stats.InPayload{ Client: true, } } - if cs.c.maxReceiveMessageSize == nil { - return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") - } - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload) - defer func() { - // err != nil indicates the termination of the stream. - if err != nil { - cs.finish(err) - } - }() - if err == nil { - if cs.tracing { - cs.mu.Lock() - if cs.trInfo.tr != nil { - cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) } - cs.mu.Unlock() - } - if inPayload != nil { - cs.statsHandler.HandleRPC(cs.statsCtx, inPayload) - } - if !cs.desc.ClientStreams || cs.desc.ServerStreams { - return - } - // Special handling for client streaming rpc. - // This recv expects EOF or errors, so we don't collect inPayload. - if cs.c.maxReceiveMessageSize == nil { - return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") - } - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil) - cs.closeTransportStream(err) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } else { + // No compression is used; disable our decompressor. + a.dc = nil } + // Only initialize this state once per stream. + a.decompSet = true + } + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, inPayload, a.decomp) + if err != nil { if err == io.EOF { - if se := cs.s.Status().Err(); se != nil { - return se + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr } - cs.finish(err) - return nil + return io.EOF // indicates successful end of stream. } return toRPCErr(err) } - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) - } - if err == io.EOF { - if statusErr := cs.s.Status().Err(); statusErr != nil { - return statusErr + if EnableTracing { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } - // Returns io.EOF to indicate the end of the stream. - return + a.mu.Unlock() } - return toRPCErr(err) -} - -func (cs *clientStream) CloseSend() (err error) { - err = cs.t.Write(cs.s, nil, nil, &transport.Options{Last: true}) - defer func() { - if err != nil { - cs.finish(err) - } - }() - if err == nil || err == io.EOF { + if inPayload != nil { + a.statsHandler.HandleRPC(a.ctx, inPayload) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. return nil } - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, nil, a.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success } - err = toRPCErr(err) - return + return toRPCErr(err) } -func (cs *clientStream) closeTransportStream(err error) { - cs.mu.Lock() - if cs.closed { - cs.mu.Unlock() +func (a *csAttempt) closeSend() { + cs := a.cs + if cs.sentLast { return } - cs.closed = true - cs.mu.Unlock() - cs.t.CloseStream(cs.s, err) + cs.sentLast = true + cs.attempt.t.Write(cs.attempt.s, nil, nil, &transport.Options{Last: true}) + // We ignore errors from Write. Any error it would return would also be + // returned by a subsequent RecvMsg call, and the user is supposed to always + // finish the stream by calling RecvMsg until it returns err != nil. } -func (cs *clientStream) finish(err error) { - cs.mu.Lock() - defer cs.mu.Unlock() - if cs.finished { - return - } - cs.finished = true - defer func() { - if cs.cancel != nil { - cs.cancel() - } - }() - for _, o := range cs.opts { - o.after(cs.c) - } - if cs.done != nil { - updateRPCInfoInContext(cs.s.Context(), rpcInfo{ - bytesSent: cs.s.BytesSent(), - bytesReceived: cs.s.BytesReceived(), +func (a *csAttempt) finish(err error) { + a.mu.Lock() + a.t.CloseStream(a.s, err) + + if a.done != nil { + a.done(balancer.DoneInfo{ + Err: err, + BytesSent: true, + BytesReceived: a.s.BytesReceived(), }) - cs.done(balancer.DoneInfo{Err: err}) - cs.done = nil } - if cs.statsHandler != nil { + if a.statsHandler != nil { end := &stats.End{ - Client: true, - EndTime: time.Now(), - } - if err != io.EOF { - // end.Error is nil if the RPC finished successfully. - end.Error = toRPCErr(err) + Client: true, + BeginTime: a.beginTime, + EndTime: time.Now(), + Error: err, } - cs.statsHandler.HandleRPC(cs.statsCtx, end) - } - if !cs.tracing { - return + a.statsHandler.HandleRPC(a.ctx, end) } - if cs.trInfo.tr != nil { - if err == nil || err == io.EOF { - cs.trInfo.tr.LazyPrintf("RPC: [OK]") + if a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") } else { - cs.trInfo.tr.LazyPrintf("RPC: [%v]", err) - cs.trInfo.tr.SetError() + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() } - cs.trInfo.tr.Finish() - cs.trInfo.tr = nil + a.trInfo.tr.Finish() + a.trInfo.tr = nil } + a.mu.Unlock() } // ServerStream defines the interface a server stream has to satisfy. @@ -540,12 +645,17 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { - t transport.ServerTransport - s *transport.Stream - p *parser - codec Codec - cp Compressor - dc Decompressor + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo @@ -556,7 +666,7 @@ type serverStream struct { } func (ss *serverStream) Context() context.Context { - return ss.s.Context() + return ss.ctx } func (ss *serverStream) SetHeader(md metadata.MD) error { @@ -575,7 +685,6 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { return } ss.s.SetTrailer(md) - return } func (ss *serverStream) SendMsg(m interface{}) (err error) { @@ -596,24 +705,28 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { st, _ := status.FromError(toRPCErr(err)) ss.t.WriteStatus(ss.s, st) } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } }() - var outPayload *stats.OutPayload - if ss.statsHandler != nil { - outPayload = &stats.OutPayload{} + data, err := encode(ss.codec, m) + if err != nil { + return err } - hdr, data, err := encode(ss.codec, m, ss.cp, bytes.NewBuffer([]byte{}), outPayload) + compData, err := compress(data, ss.cp, ss.comp) if err != nil { return err } - if len(data) > ss.maxSendMessageSize { - return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize) + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) } - if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil { + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } - if outPayload != nil { - outPayload.SentTime = time.Now() - ss.statsHandler.HandleRPC(ss.s.Context(), outPayload) + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) } return nil } @@ -636,17 +749,20 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { st, _ := status.FromError(toRPCErr(err)) ss.t.WriteStatus(ss.s, st) } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } }() var inPayload *stats.InPayload if ss.statsHandler != nil { inPayload = &stats.InPayload{} } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload, ss.decomp); err != nil { if err == io.EOF { return err } if err == io.ErrUnexpectedEOF { - err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) } return toRPCErr(err) } @@ -655,3 +771,9 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } return nil } + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/bdp_estimator.go b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/bdp_estimator.go index 8dd2ed42792f..63cd2627c87a 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/bdp_estimator.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/bdp_estimator.go @@ -41,12 +41,9 @@ const ( gamma = 2 ) -var ( - // Adding arbitrary data to ping so that its ack can be - // identified. - // Easter-egg: what does the ping message say? - bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} -) +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} type bdpEstimator struct { // sentAt is the time when the ping was sent. diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/control.go b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/control.go deleted file mode 100644 index dd1a8d42e7e2..000000000000 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/control.go +++ /dev/null @@ -1,311 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "fmt" - "math" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" -) - -const ( - // The default value of flow control window size in HTTP2 spec. - defaultWindowSize = 65535 - // The initial window size for flow control. - initialWindowSize = defaultWindowSize // for an RPC - infinity = time.Duration(math.MaxInt64) - defaultClientKeepaliveTime = infinity - defaultClientKeepaliveTimeout = time.Duration(20 * time.Second) - defaultMaxStreamsClient = 100 - defaultMaxConnectionIdle = infinity - defaultMaxConnectionAge = infinity - defaultMaxConnectionAgeGrace = infinity - defaultServerKeepaliveTime = time.Duration(2 * time.Hour) - defaultServerKeepaliveTimeout = time.Duration(20 * time.Second) - defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute) - // max window limit set by HTTP2 Specs. - maxWindowSize = math.MaxInt32 - // defaultLocalSendQuota sets is default value for number of data - // bytes that each stream can schedule before some of it being - // flushed out. - defaultLocalSendQuota = 64 * 1024 -) - -// The following defines various control items which could flow through -// the control buffer of transport. They represent different aspects of -// control tasks, e.g., flow control, settings, streaming resetting, etc. - -type headerFrame struct { - streamID uint32 - hf []hpack.HeaderField - endStream bool -} - -func (*headerFrame) item() {} - -type continuationFrame struct { - streamID uint32 - endHeaders bool - headerBlockFragment []byte -} - -type dataFrame struct { - streamID uint32 - endStream bool - d []byte - f func() -} - -func (*dataFrame) item() {} - -func (*continuationFrame) item() {} - -type windowUpdate struct { - streamID uint32 - increment uint32 -} - -func (*windowUpdate) item() {} - -type settings struct { - ack bool - ss []http2.Setting -} - -func (*settings) item() {} - -type resetStream struct { - streamID uint32 - code http2.ErrCode -} - -func (*resetStream) item() {} - -type goAway struct { - code http2.ErrCode - debugData []byte - headsUp bool - closeConn bool -} - -func (*goAway) item() {} - -type flushIO struct { -} - -func (*flushIO) item() {} - -type ping struct { - ack bool - data [8]byte -} - -func (*ping) item() {} - -// quotaPool is a pool which accumulates the quota and sends it to acquire() -// when it is available. -type quotaPool struct { - c chan int - - mu sync.Mutex - version uint32 - quota int -} - -// newQuotaPool creates a quotaPool which has quota q available to consume. -func newQuotaPool(q int) *quotaPool { - qb := "aPool{ - c: make(chan int, 1), - } - if q > 0 { - qb.c <- q - } else { - qb.quota = q - } - return qb -} - -// add cancels the pending quota sent on acquired, incremented by v and sends -// it back on acquire. -func (qb *quotaPool) add(v int) { - qb.mu.Lock() - defer qb.mu.Unlock() - qb.lockedAdd(v) -} - -func (qb *quotaPool) lockedAdd(v int) { - select { - case n := <-qb.c: - qb.quota += n - default: - } - qb.quota += v - if qb.quota <= 0 { - return - } - // After the pool has been created, this is the only place that sends on - // the channel. Since mu is held at this point and any quota that was sent - // on the channel has been retrieved, we know that this code will always - // place any positive quota value on the channel. - select { - case qb.c <- qb.quota: - qb.quota = 0 - default: - } -} - -func (qb *quotaPool) addAndUpdate(v int) { - qb.mu.Lock() - defer qb.mu.Unlock() - qb.lockedAdd(v) - // Update the version only after having added to the quota - // so that if acquireWithVesrion sees the new vesrion it is - // guaranteed to have seen the updated quota. - // Also, still keep this inside of the lock, so that when - // compareAndExecute is processing, this function doesn't - // get executed partially (quota gets updated but the version - // doesn't). - atomic.AddUint32(&(qb.version), 1) -} - -func (qb *quotaPool) acquireWithVersion() (<-chan int, uint32) { - return qb.c, atomic.LoadUint32(&(qb.version)) -} - -func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool { - qb.mu.Lock() - defer qb.mu.Unlock() - if version == atomic.LoadUint32(&(qb.version)) { - success() - return true - } - failure() - return false -} - -// acquire returns the channel on which available quota amounts are sent. -func (qb *quotaPool) acquire() <-chan int { - return qb.c -} - -// inFlow deals with inbound flow control -type inFlow struct { - mu sync.Mutex - // The inbound flow control limit for pending data. - limit uint32 - // pendingData is the overall data which have been received but not been - // consumed by applications. - pendingData uint32 - // The amount of data the application has consumed but grpc has not sent - // window update for them. Used to reduce window update frequency. - pendingUpdate uint32 - // delta is the extra window update given by receiver when an application - // is reading data bigger in size than the inFlow limit. - delta uint32 -} - -// newLimit updates the inflow window to a new value n. -// It assumes that n is always greater than the old limit. -func (f *inFlow) newLimit(n uint32) uint32 { - f.mu.Lock() - defer f.mu.Unlock() - d := n - f.limit - f.limit = n - return d -} - -func (f *inFlow) maybeAdjust(n uint32) uint32 { - if n > uint32(math.MaxInt32) { - n = uint32(math.MaxInt32) - } - f.mu.Lock() - defer f.mu.Unlock() - // estSenderQuota is the receiver's view of the maximum number of bytes the sender - // can send without a window update. - estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) - // estUntransmittedData is the maximum number of bytes the sends might not have put - // on the wire yet. A value of 0 or less means that we have already received all or - // more bytes than the application is requesting to read. - estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. - // This implies that unless we send a window update, the sender won't be able to send all the bytes - // for this message. Therefore we must send an update over the limit since there's an active read - // request from the application. - if estUntransmittedData > estSenderQuota { - // Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec. - if f.limit+n > maxWindowSize { - f.delta = maxWindowSize - f.limit - } else { - // Send a window update for the whole message and not just the difference between - // estUntransmittedData and estSenderQuota. This will be helpful in case the message - // is padded; We will fallback on the current available window(at least a 1/4th of the limit). - f.delta = n - } - return f.delta - } - return 0 -} - -// onData is invoked when some data frame is received. It updates pendingData. -func (f *inFlow) onData(n uint32) error { - f.mu.Lock() - defer f.mu.Unlock() - f.pendingData += n - if f.pendingData+f.pendingUpdate > f.limit+f.delta { - return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit) - } - return nil -} - -// onRead is invoked when the application reads the data. It returns the window size -// to be sent to the peer. -func (f *inFlow) onRead(n uint32) uint32 { - f.mu.Lock() - defer f.mu.Unlock() - if f.pendingData == 0 { - return 0 - } - f.pendingData -= n - if n > f.delta { - n -= f.delta - f.delta = 0 - } else { - f.delta -= n - n = 0 - } - f.pendingUpdate += n - if f.pendingUpdate >= f.limit/4 { - wu := f.pendingUpdate - f.pendingUpdate = 0 - return wu - } - return 0 -} - -func (f *inFlow) resetPendingUpdate() uint32 { - f.mu.Lock() - defer f.mu.Unlock() - n := f.pendingUpdate - f.pendingUpdate = 0 - return n -} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/controlbuf.go b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/controlbuf.go new file mode 100644 index 000000000000..5c5891a11bfe --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/controlbuf.go @@ -0,0 +1,796 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "fmt" + "runtime" + "sync" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { + it interface{} + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i interface{}) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() interface{} { + return il.head.it +} + +func (il *itemList) dequeue() interface{} { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) (bool, error) // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +type cleanupStream struct { + streamID uint32 + idPtr *uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + d []byte + // onEachWrite is called every time + // a part of d is written out. + onEachWrite func() +} + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +type incomingSettings struct { + ss []http2.Setting +} + +type outgoingSettings struct { + ss []http2.Setting +} + +type settingsAck struct { +} + +type incomingGoAway struct { +} + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn bool +} + +type ping struct { + ack bool + data [8]byte +} + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +type controlBuffer struct { + ch chan struct{} + done <-chan struct{} + mu sync.Mutex + consumerWaiting bool + list *itemList + err error +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + ch: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +func (c *controlBuffer) put(it interface{}) error { + _, err := c.executeAndPut(nil, it) + return err +} + +func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if f != nil { + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + } + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + c.mu.Unlock() + if wakeUp { + select { + case c.ch <- struct{}{}: + default: + } + } + return true, nil +} + +func (c *controlBuffer) get(block bool) (interface{}, error) { + for { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return nil, c.err + } + if !c.list.isEmpty() { + h := c.list.dequeue() + c.mu.Unlock() + return h, nil + } + if !block { + c.mu.Unlock() + return nil, nil + } + c.consumerWaiting = true + c.mu.Unlock() + select { + case <-c.ch: + case <-c.done: + c.finish() + return nil, ErrConnClosing + } + } +} + +func (c *controlBuffer) finish() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = ErrConnClosing + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + hdr, ok := head.it.(*headerFrame) + if !ok { + continue + } + if hdr.onOrphaned != nil { // It will be nil on the server-side. + hdr.onOrphaned(ErrConnClosing) + } + } + c.mu.Unlock() +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + estdStreams map[uint32]*outStream // Established streams. + activeStreams *outStreamList // Streams that are sending data. + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +func (l *loopyWriter) run() (err error) { + defer func() { + if err == ErrConnClosing { + // Don't log ErrConnClosing as error since it happens + // 1. When the connection is closed by some other known issue. + // 2. User closed the connection. + // 3. A graceful close of connection. + infof("transport: loopyWriter.run returning. %v", err) + err = nil + } + }() + for { + it, err := l.cbuf.get(true) + if err != nil { + return err + } + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + gosched := true + hasdata: + for { + it, err := l.cbuf.get(false) + if err != nil { + return err + } + if it != nil { + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + continue hasdata + } + isEmpty, err := l.processData() + if err != nil { + return err + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return nil + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return nil + } + } + return nil +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + if err := l.applySettings(s.ss); err != nil { + return err + } + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) error { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str + return nil +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { + warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + str.itl.enqueue(h) + return l.originateStream(str) +} + +func (l *loopyWriter) originateStream(str *outStream) error { + hdr := str.itl.dequeue().(*headerFrame) + sendPing, err := hdr.initStream(str.id) + if err != nil { + if err == ErrConnClosing { + return err + } + // Other errors(errStreamDrain) need not close transport. + return nil + } + if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + if sendPing { + return l.pingHandler(&ping{data: [8]byte{}}) + } + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err) + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) error { + str, ok := l.estdStreams[df.streamID] + if !ok { + return nil + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } + return nil +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { + o.resp <- l.sendQuota + return nil +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { + return ErrConnClosing + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + return ErrConnClosing + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i interface{}) error { + switch i := i.(type) { + case *incomingWindowUpdate: + return l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *registerStream: + return l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + return l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + return l.outFlowControlSizeRequestHandler(i) + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) error { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) + } + } + return nil +} + +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, nil + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + var ( + idx int + buf []byte + ) + if len(dataItem.h) != 0 { // data header has not been written out yet. + buf = dataItem.h + } else { + idx = 1 + buf = dataItem.d + } + size := http2MaxFrameLen + if len(buf) < size { + size = len(buf) + } + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { + str.state = waitingOnStreamQuota + return false, nil + } else if strQuota < size { + size = strQuota + } + + if l.sendQuota < uint32(size) { + size = int(l.sendQuota) + } + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // This last data message on this stream and all + // of it can be written in this go. + if dataItem.endStream && size == len(buf) { + // buf contains either data or it contains header but data is empty. + if idx == 1 || len(dataItem.d) == 0 { + endStream = true + } + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + return false, err + } + buf = buf[size:] + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + if idx == 0 { + dataItem.h = buf + } else { + dataItem.d = buf + } + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/flowcontrol.go b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/flowcontrol.go new file mode 100644 index 000000000000..bbf98b6f5eeb --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/flowcontrol.go @@ -0,0 +1,242 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + w := &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } + w.replenish = w.realReplenish + return w +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) realReplenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) uint32 { + f.mu.Lock() + d := n - f.limit + f.limit = n + f.mu.Unlock() + return d +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + f.mu.Unlock() + return f.delta + } + f.mu.Unlock() + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/go16.go b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/go16.go new file mode 100644 index 000000000000..5babcf9b8770 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/go16.go @@ -0,0 +1,51 @@ +// +build go1.6,!go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "net" + "net/http" + + "google.golang.org/grpc/codes" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) +} + +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded: + return streamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled: + return streamErrorf(codes.Canceled, "%v", err) + } + return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) +} + +// contextFromRequest returns a background context. +func contextFromRequest(r *http.Request) context.Context { + return context.Background() +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/go17.go b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/go17.go new file mode 100644 index 000000000000..b7fa6bdb9ca2 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/go17.go @@ -0,0 +1,52 @@ +// +build go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "net" + "net/http" + + "google.golang.org/grpc/codes" + + netctx "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, address) +} + +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return streamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled, netctx.Canceled: + return streamErrorf(codes.Canceled, "%v", err) + } + return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) +} + +// contextFromRequest returns a context from the HTTP Request. +func contextFromRequest(r *http.Request) context.Context { + return r.Context() +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/handler_server.go b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/handler_server.go index 7e0fdb35938b..f71b74821749 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/handler_server.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/handler_server.go @@ -40,20 +40,24 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // NewServerHandlerTransport returns a ServerTransport handling gRPC // from inside an http.Handler. It requires that the http Server // supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { return nil, errors.New("gRPC requires HTTP/2") } if r.Method != "POST" { return nil, errors.New("invalid gRPC request method") } - if !validContentType(r.Header.Get("Content-Type")) { + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := contentSubtype(contentType) + if !validContentType { return nil, errors.New("invalid gRPC request content-type") } if _, ok := w.(http.Flusher); !ok { @@ -64,10 +68,13 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr } st := &serverHandlerTransport{ - rw: w, - req: r, - closedCh: make(chan struct{}), - writes: make(chan func()), + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, } if v := r.Header.Get("grpc-timeout"); v != "" { @@ -79,19 +86,19 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr st.timeout = to } - var metakv []string + metakv := []string{"content-type", contentType} if r.Host != "" { metakv = append(metakv, ":authority", r.Host) } for k, vv := range r.Header { k = strings.ToLower(k) - if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) { + if isReservedHeader(k) && !isWhitelistedHeader(k) { continue } for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err) + return nil, streamErrorf(codes.Internal, "malformed binary metadata: %v", err) } metakv = append(metakv, k, v) } @@ -126,6 +133,14 @@ type serverHandlerTransport struct { // block concurrent WriteStatus calls // e.g. grpc/(*serverStream).SendMsg/RecvMsg writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats stats.Handler } func (ht *serverHandlerTransport) Close() error { @@ -219,6 +234,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) if err == nil { // transport has not been closed + if ht.stats != nil { + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + } ht.Close() close(ht.writes) } @@ -235,7 +253,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore - h.Set("Content-Type", "application/grpc") + h.Set("Content-Type", ht.contentType) // Predeclare trailers we'll set later in WriteStatus (after the body). // This is a SHOULD in the HTTP RFC, and the way you add (known) @@ -263,7 +281,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { - return ht.do(func() { + err := ht.do(func() { ht.writeCommonHeaders(s) h := ht.rw.Header() for k, vv := range md { @@ -279,17 +297,24 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { ht.rw.WriteHeader(200) ht.rw.(http.Flusher).Flush() }) + + if err == nil { + if ht.stats != nil { + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{}) + } + } + return err } func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { // With this transport type there will be exactly 1 stream: this HTTP request. - var ctx context.Context + ctx := contextFromRequest(ht.req) var cancel context.CancelFunc if ht.timeoutSet { - ctx, cancel = context.WithTimeout(context.Background(), ht.timeout) + ctx, cancel = context.WithTimeout(ctx, ht.timeout) } else { - ctx, cancel = context.WithCancel(context.Background()) + ctx, cancel = context.WithCancel(ctx) } // requestOver is closed when either the request's context is done @@ -313,13 +338,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace req := ht.req s := &Stream{ - id: 0, // irrelevant - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, } pr := &peer.Peer{ Addr: ht.RemoteAddr(), @@ -328,10 +354,18 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - ctx = peer.NewContext(ctx, pr) - s.ctx = newContextWithStream(ctx, s) + s.ctx = peer.NewContext(ctx, pr) + if ht.stats != nil { + s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: ht.RemoteAddr(), + Compression: s.recvCompress, + } + ht.stats.HandleRPC(s.ctx, inHeader) + } s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, recv: s.buf}, + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, windowHandler: func(int) {}, } @@ -386,6 +420,10 @@ func (ht *serverHandlerTransport) runStream() { } } +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + func (ht *serverHandlerTransport) Drain() { panic("Drain() is not implemented") } diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/http2_client.go b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/http2_client.go index 1abb62e6df4a..eaf007eb0ae4 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/http2_client.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/http2_client.go @@ -19,7 +19,6 @@ package transport import ( - "bytes" "io" "math" "net" @@ -31,8 +30,10 @@ import ( "golang.org/x/net/context" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -44,15 +45,17 @@ import ( type http2Client struct { ctx context.Context cancel context.CancelFunc - target string // server name/addr + ctxDone <-chan struct{} // Cache the ctx.Done() chan. userAgent string md interface{} conn net.Conn // underlying communication channel + loopy *loopyWriter remoteAddr net.Addr localAddr net.Addr authInfo credentials.AuthInfo // auth info about the connection - nextID uint32 // the next stream ID to be used + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. goAway chan struct{} @@ -60,18 +63,10 @@ type http2Client struct { awakenKeepalive chan struct{} framer *framer - hBuf *bytes.Buffer // the buffer for HPACK encoding - hEnc *hpack.Encoder // HPACK encoder - // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer - fc *inFlow - // sendQuotaPool provides flow control to outbound message. - sendQuotaPool *quotaPool - // streamsQuota limits the max number of concurrent streams. - streamsQuota *quotaPool - + fc *trInFlow // The scheme used: https if TLS is on, http otherwise. scheme string @@ -81,50 +76,60 @@ type http2Client struct { // Boolean to keep track of reading activity on transport. // 1 is true and 0 is false. - activity uint32 // Accessed atomically. - kp keepalive.ClientParameters + activity uint32 // Accessed atomically. + kp keepalive.ClientParameters + keepaliveEnabled bool statsHandler stats.Handler initialWindowSize int32 - bdpEst *bdpEstimator - outQuotaVersion uint32 + bdpEst *bdpEstimator + // onSuccess is a callback that client transport calls upon + // receiving server preface to signal that a succefull HTTP2 + // connection was established. + onSuccess func() - mu sync.Mutex // guard the following variables - state transportState // the state of underlying connection + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + nextID uint32 + + mu sync.Mutex // guard the following variables + state transportState activeStreams map[uint32]*Stream - // The max number of concurrent streams - maxStreams int - // the per-stream outbound flow control window size set by the peer. - streamSendQuota uint32 // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the // GoAway frame. goAwayReason GoAwayReason + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czmu sync.RWMutex + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // The number of streams that have ended successfully by receiving EoS bit set + // frame from server. + streamsSucceeded int64 + streamsFailed int64 + lastStreamCreated time.Time + msgSent int64 + msgRecv int64 + lastMsgSent time.Time + lastMsgRecv time.Time } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { if fn != nil { return fn(ctx, addr) } - return (&net.Dialer{}).DialContext(ctx, "tcp", addr) + return dialContext(ctx, "tcp", addr) } func isTemporary(err error) bool { - switch err { - case io.EOF: - // Connection closures may be resolved upon retry, and are thus - // treated as temporary. - return true - case context.DeadlineExceeded: - // In Go 1.7, context.DeadlineExceeded implements Timeout(), and this - // special case is not needed. Until then, we need to keep this - // clause. - return true - } - switch err := err.(type) { case interface { Temporary() bool @@ -137,18 +142,16 @@ func isTemporary(err error) bool { // temporary. return err.Timeout() } - return false + return true } // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, timeout time.Duration) (_ ClientTransport, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ ClientTransport, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) - connectCtx, connectCancel := context.WithTimeout(ctx, timeout) defer func() { - connectCancel() if err != nil { cancel() } @@ -173,12 +176,9 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t ) if creds := opts.TransportCredentials; creds != nil { scheme = "https" - conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Addr, conn) + conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn) if err != nil { - // Credentials handshake errors are typically considered permanent - // to avoid retrying on e.g. bad certificates. - temp := isTemporary(err) - return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err) + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } isSecure = true } @@ -196,7 +196,6 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t icwz = opts.InitialConnWindowSize dynamicWindow = false } - var buf bytes.Buffer writeBufSize := defaultWriteBufSize if opts.WriteBufferSize > 0 { writeBufSize = opts.WriteBufferSize @@ -206,37 +205,35 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t readBufSize = opts.ReadBufferSize } t := &http2Client{ - ctx: ctx, - cancel: cancel, - target: addr.Addr, - userAgent: opts.UserAgent, - md: addr.Metadata, - conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: authInfo, - // The client initiated stream id is odd starting from 1. - nextID: 1, - goAway: make(chan struct{}), - awakenKeepalive: make(chan struct{}, 1), - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - framer: newFramer(conn, writeBufSize, readBufSize), - controlBuf: newControlBuffer(), - fc: &inFlow{limit: uint32(icwz)}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - scheme: scheme, - state: reachable, - activeStreams: make(map[uint32]*Stream), - isSecure: isSecure, - creds: opts.PerRPCCredentials, - maxStreams: defaultMaxStreamsClient, - streamsQuota: newQuotaPool(defaultMaxStreamsClient), - streamSendQuota: defaultWindowSize, - kp: kp, - statsHandler: opts.StatsHandler, - initialWindowSize: initialWindowSize, - } + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + md: addr.Metadata, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + awakenKeepalive: make(chan struct{}, 1), + framer: newFramer(conn, writeBufSize, readBufSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + creds: opts.PerRPCCredentials, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + onSuccess: onSuccess, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + } + t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { t.initialWindowSize = opts.InitialWindowSize dynamicWindow = false @@ -260,6 +257,13 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t } t.statsHandler.HandleConn(t.ctx, connBegin) } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, "") + } + if t.kp.Time != infinity { + t.keepaliveEnabled = true + go t.keepalive() + } // Start the reader goroutine for incoming message. Each transport has // a dedicated goroutine which reads HTTP2 frame from network. Then it // dispatches the frame to the corresponding stream entity. @@ -295,30 +299,32 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t } t.framer.writer.Flush() go func() { - loopyWriter(t.ctx, t.controlBuf, t.itemHandler) - t.Close() + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) + err := t.loopy.run() + if err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } + // If it's a connection error, let reader goroutine handle it + // since there might be data in the buffers. + if _, ok := err.(net.Error); !ok { + t.conn.Close() + } + close(t.writerDone) }() - if t.kp.Time != infinity { - go t.keepalive() - } return t, nil } func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ - id: t.nextID, done: make(chan struct{}), - goAway: make(chan struct{}), method: callHdr.Method, sendCompress: callHdr.SendCompress, buf: newRecvBuffer(), - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), - localSendQuota: newQuotaPool(defaultLocalSendQuota), headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, } - t.nextID += 2 + s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } @@ -328,21 +334,18 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { s.ctx = ctx s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - goAway: s.goAway, - recv: s.buf, + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) }, } - return s } -// NewStream creates a stream and registers it into the transport as "active" -// streams. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +func (t *http2Client) getPeer() *peer.Peer { pr := &peer.Peer{ Addr: t.remoteAddr, } @@ -350,74 +353,20 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if t.authInfo != nil { pr.AuthInfo = t.authInfo } - ctx = peer.NewContext(ctx, pr) - var ( - authData = make(map[string]string) - audience string - ) - // Create an audience string only if needed. - if len(t.creds) > 0 || callHdr.Creds != nil { - // Construct URI required to get auth request metadata. - // Omit port if it is the default one. - host := strings.TrimSuffix(callHdr.Host, ":443") - pos := strings.LastIndex(callHdr.Method, "/") - if pos == -1 { - pos = len(callHdr.Method) - } - audience = "https://" + host + callHdr.Method[:pos] - } - for _, c := range t.creds { - data, err := c.GetRequestMetadata(ctx, audience) - if err != nil { - return nil, streamErrorf(codes.Internal, "transport: %v", err) - } - for k, v := range data { - // Capital header names are illegal in HTTP/2. - k = strings.ToLower(k) - authData[k] = v - } - } - callAuthData := map[string]string{} - // Check if credentials.PerRPCCredentials were provided via call options. - // Note: if these credentials are provided both via dial options and call - // options, then both sets of credentials will be applied. - if callCreds := callHdr.Creds; callCreds != nil { - if !t.isSecure && callCreds.RequireTransportSecurity() { - return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") - } - data, err := callCreds.GetRequestMetadata(ctx, audience) - if err != nil { - return nil, streamErrorf(codes.Internal, "transport: %v", err) - } - for k, v := range data { - // Capital header names are illegal in HTTP/2 - k = strings.ToLower(k) - callAuthData[k] = v - } - } - t.mu.Lock() - if t.activeStreams == nil { - t.mu.Unlock() - return nil, ErrConnClosing - } - if t.state == draining { - t.mu.Unlock() - return nil, ErrStreamDrain - } - if t.state != reachable { - t.mu.Unlock() - return nil, ErrConnClosing - } - t.mu.Unlock() - sq, err := wait(ctx, t.ctx, nil, nil, t.streamsQuota.acquire()) + return pr +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + authData, err := t.getTrAuthData(ctx, aud) if err != nil { return nil, err } - // Returns the quota balance back. - if sq > 1 { - t.streamsQuota.add(sq - 1) + callAuthData, err := t.getCallAuthData(ctx, aud, callHdr) + if err != nil { + return nil, err } - // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. // Make the slice of certain predictable size to reduce allocations made by append. hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te @@ -427,7 +376,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) @@ -452,7 +401,22 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if b := stats.OutgoingTrace(ctx); b != nil { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } - if md, ok := metadata.FromOutgoingContext(ctx); ok { + + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + var k string + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = v + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) + } + } for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. if isReservedHeader(k) { @@ -473,42 +437,178 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } } } - t.mu.Lock() - if t.state == draining { - t.mu.Unlock() - t.streamsQuota.add(1) - return nil, ErrStreamDrain + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.creds) == 0 && callHdr.Creds == nil { + return "" } - if t.state != reachable { - t.mu.Unlock() - return nil, ErrConnClosing + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + authData := map[string]string{} + for _, c := range t.creds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if _, ok := status.FromError(err); ok { + return nil, err + } + + return nil, streamErrorf(codes.Unauthenticated, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + callAuthData := map[string]string{} + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if !t.isSecure && callCreds.RequireTransportSecurity() { + return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, streamErrorf(codes.Internal, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + ctx = peer.NewContext(ctx, t.getPeer()) + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + return nil, err } s := t.newStream(ctx, callHdr) - t.activeStreams[s.id] = s - // If the number of active streams change from 0 to 1, then check if keepalive - // has gone dormant. If so, wake it up. - if len(t.activeStreams) == 1 { - select { - case t.awakenKeepalive <- struct{}{}: - t.controlBuf.put(&ping{data: [8]byte{}}) - // Fill the awakenKeepalive channel again as this channel must be - // kept non-writable except at the point that the keepalive() - // goroutine is waiting either to be awaken or shutdown. - t.awakenKeepalive <- struct{}{} - default: + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.SwapUint32(&s.headerDone, 1) == 0 { + close(s.headerChan) } + } - t.controlBuf.put(&headerFrame{ - streamID: s.id, + hdr := &headerFrame{ hf: headerFields, endStream: false, - }) - t.mu.Unlock() - - s.mu.Lock() - s.bytesSent = true - s.mu.Unlock() - + initStream: func(id uint32) (bool, error) { + t.mu.Lock() + if state := t.state; state != reachable { + t.mu.Unlock() + // Do a quick cleanup. + err := error(errStreamDrain) + if state == closing { + err = ErrConnClosing + } + cleanup(err) + return false, err + } + t.activeStreams[id] = s + if channelz.IsOn() { + t.czmu.Lock() + t.streamsStarted++ + t.lastStreamCreated = time.Now() + t.czmu.Unlock() + } + var sendPing bool + // If the number of active streams change from 0 to 1, then check if keepalive + // has gone dormant. If so, wake it up. + if len(t.activeStreams) == 1 && t.keepaliveEnabled { + select { + case t.awakenKeepalive <- struct{}{}: + sendPing = true + // Fill the awakenKeepalive channel again as this channel must be + // kept non-writable except at the point that the keepalive() + // goroutine is waiting either to be awaken or shutdown. + t.awakenKeepalive <- struct{}{} + default: + } + } + t.mu.Unlock() + return sendPing, nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + checkForStreamQuota := func(it interface{}) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + h := it.(*headerFrame) + h.streamID = t.nextID + t.nextID += 2 + s.id = h.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(checkForStreamQuota, hdr) + if err != nil { + return nil, err + } + if success { + break + } + firstTry = false + select { + case <-ch: + case <-s.ctx.Done(): + return nil, ContextErr(s.ctx.Err()) + case <-t.goAway: + return nil, errStreamDrain + case <-t.ctx.Done(): + return nil, ErrConnClosing + } + } if t.statsHandler != nil { outHeader := &stats.OutHeader{ Client: true, @@ -525,86 +625,97 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea // CloseStream clears the footprint of a stream when the stream is not needed any more. // This must not be executed in reader's goroutine. func (t *http2Client) CloseStream(s *Stream, err error) { - t.mu.Lock() - if t.activeStreams == nil { - t.mu.Unlock() - return - } + var ( + rst bool + rstCode http2.ErrCode + ) if err != nil { - // notify in-flight streams, before the deletion - s.write(recvMsg{err: err}) + rst = true + rstCode = http2.ErrCodeCancel } - delete(t.activeStreams, s.id) - if t.state == draining && len(t.activeStreams) == 0 { - // The transport is draining and s is the last live stream on t. - t.mu.Unlock() - t.Close() + t.closeStream(s, err, rst, rstCode, nil, nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. return } - t.mu.Unlock() - // rstStream is true in case the stream is being closed at the client-side - // and the server needs to be intimated about it by sending a RST_STREAM - // frame. - // To make sure this frame is written to the wire before the headers of the - // next stream waiting for streamsQuota, we add to streamsQuota pool only - // after having acquired the writableChan to send RST_STREAM out (look at - // the controller() routine). - var rstStream bool - var rstError http2.ErrCode - defer func() { - // In case, the client doesn't have to send RST_STREAM to server - // we can safely add back to streamsQuota pool now. - if !rstStream { - t.streamsQuota.add(1) - return - } - t.controlBuf.put(&resetStream{s.id, rstError}) - }() - s.mu.Lock() - rstStream = s.rstStream - rstError = s.rstError - if s.state == streamDone { - s.mu.Unlock() - return + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata } - if !s.headerDone { + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // This will unblock write. + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.SwapUint32(&s.headerDone, 1) == 0 { close(s.headerChan) - s.headerDone = true } - s.state = streamDone - s.mu.Unlock() - if _, ok := err.(StreamError); ok { - rstStream = true - rstError = http2.ErrCodeCancel + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + t.czmu.Lock() + if eosReceived { + t.streamsSucceeded++ + } else { + t.streamsFailed++ + } + t.czmu.Unlock() + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func(interface{}) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) } // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. -func (t *http2Client) Close() (err error) { +func (t *http2Client) Close() error { t.mu.Lock() + // Make sure we only Close once. if t.state == closing { t.mu.Unlock() - return + return nil } t.state = closing - t.mu.Unlock() - t.cancel() - err = t.conn.Close() - t.mu.Lock() streams := t.activeStreams t.activeStreams = nil t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } // Notify all active streams. for _, s := range streams { - s.mu.Lock() - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.mu.Unlock() - s.write(recvMsg{err: ErrConnClosing}) + t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, nil, nil, false) } if t.statsHandler != nil { connEnd := &stats.ConnEnd{ @@ -622,8 +733,8 @@ func (t *http2Client) Close() (err error) { // closing. func (t *http2Client) GracefulClose() error { t.mu.Lock() - switch t.state { - case closing, draining: + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { t.mu.Unlock() return nil } @@ -633,108 +744,41 @@ func (t *http2Client) GracefulClose() error { if active == 0 { return t.Close() } + t.controlBuf.put(&incomingGoAway{}) return nil } // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { - select { - case <-s.ctx.Done(): - return ContextErr(s.ctx.Err()) - case <-t.ctx.Done(): - return ErrConnClosing - default: - } - - if hdr == nil && data == nil && opts.Last { - // stream.CloseSend uses this to send an empty frame with endStream=True - t.controlBuf.put(&dataFrame{streamID: s.id, endStream: true, f: func() {}}) - return nil - } - // Add data to header frame so that we can equally distribute data across frames. - emptyLen := http2MaxFrameLen - len(hdr) - if emptyLen > len(data) { - emptyLen = len(data) - } - hdr = append(hdr, data[:emptyLen]...) - data = data[emptyLen:] - for idx, r := range [][]byte{hdr, data} { - for len(r) > 0 { - size := http2MaxFrameLen - // Wait until the stream has some quota to send the data. - quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() - sq, err := wait(s.ctx, t.ctx, s.done, s.goAway, quotaChan) - if err != nil { - return err - } - // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, t.ctx, s.done, s.goAway, t.sendQuotaPool.acquire()) - if err != nil { - return err - } - if sq < size { - size = sq - } - if tq < size { - size = tq - } - if size > len(r) { - size = len(r) - } - p := r[:size] - ps := len(p) - if ps < tq { - // Overbooked transport quota. Return it back. - t.sendQuotaPool.add(tq - ps) - } - // Acquire local send quota to be able to write to the controlBuf. - ltq, err := wait(s.ctx, t.ctx, s.done, s.goAway, s.localSendQuota.acquire()) - if err != nil { - if _, ok := err.(ConnectionError); !ok { - t.sendQuotaPool.add(ps) - } - return err - } - s.localSendQuota.add(ltq - ps) // It's ok if we make it negative. - var endStream bool - // See if this is the last frame to be written. - if opts.Last { - if len(r)-size == 0 { // No more data in r after this iteration. - if idx == 0 { // We're writing data header. - if len(data) == 0 { // There's no data to follow. - endStream = true - } - } else { // We're writing data. - endStream = true - } - } - } - success := func() { - t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { s.localSendQuota.add(ps) }}) - if ps < sq { - s.sendQuotaPool.lockedAdd(sq - ps) - } - r = r[ps:] - } - failure := func() { - s.sendQuotaPool.lockedAdd(sq) - } - if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { - t.sendQuotaPool.add(ps) - s.localSendQuota.add(ps) - } + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + return errStreamDone } + } else if s.getState() != streamActive { + return errStreamDone } - if !opts.Last { - return nil - } - s.mu.Lock() - if s.state != streamDone { - s.state = streamWriteDone + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + } + if hdr != nil || data != nil { // If it's not an empty data frame. + // Add some data to grpc message header so that we can equally + // distribute bytes across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + df.h, df.d = hdr, data + // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler. + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return err + } } - s.mu.Unlock() - return nil + return t.controlBuf.put(df) } func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { @@ -748,34 +792,17 @@ func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { // of stream if the application is requesting data larger in size than // the window. func (t *http2Client) adjustWindow(s *Stream, n uint32) { - s.mu.Lock() - defer s.mu.Unlock() - if s.state == streamDone { - return - } if w := s.fc.maybeAdjust(n); w > 0 { - // Piggyback connection's window update along. - if cw := t.fc.resetPendingUpdate(); cw > 0 { - t.controlBuf.put(&windowUpdate{0, cw}) - } - t.controlBuf.put(&windowUpdate{s.id, w}) + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } } -// updateWindow adjusts the inbound quota for the stream and the transport. -// Window updates will deliver to the controller for sending when -// the cumulative quota exceeds the corresponding threshold. +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. func (t *http2Client) updateWindow(s *Stream, n uint32) { - s.mu.Lock() - defer s.mu.Unlock() - if s.state == streamDone { - return - } if w := s.fc.onRead(n); w > 0 { - if cw := t.fc.resetPendingUpdate(); cw > 0 { - t.controlBuf.put(&windowUpdate{0, cw}) - } - t.controlBuf.put(&windowUpdate{s.id, w}) + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } } @@ -787,15 +814,17 @@ func (t *http2Client) updateFlowControl(n uint32) { for _, s := range t.activeStreams { s.fc.newLimit(n) } - t.initialWindowSize = int32(n) t.mu.Unlock() - t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)}) - t.controlBuf.put(&settings{ - ack: false, + updateIWS := func(interface{}) bool { + t.initialWindowSize = int32(n) + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ ss: []http2.Setting{ { ID: http2.SettingInitialWindowSize, - Val: uint32(n), + Val: n, }, }, }) @@ -805,7 +834,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { - sendBDPPing = t.bdpEst.add(uint32(size)) + sendBDPPing = t.bdpEst.add(size) } // Decouple connection's flow control from application's read. // An update on connection's flow control should not depend on @@ -816,21 +845,24 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // active(fast) streams from starving in presence of slow or // inactive streams. // - // Furthermore, if a bdpPing is being sent out we can piggyback - // connection's window update for the bytes we just received. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } if sendBDPPing { - if size != 0 { // Could've been an empty data frame. - t.controlBuf.put(&windowUpdate{0, uint32(size)}) + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) } + t.controlBuf.put(bdpPing) - } else { - if err := t.fc.onData(uint32(size)); err != nil { - t.Close() - return - } - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } } // Select the right stream to dispatch. s, ok := t.getStream(f) @@ -838,25 +870,15 @@ func (t *http2Client) handleData(f *http2.DataFrame) { return } if size > 0 { - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return - } - if err := s.fc.onData(uint32(size)); err != nil { - s.rstStream = true - s.rstError = http2.ErrCodeFlowControl - s.finish(status.New(codes.Internal, err.Error())) - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) return } if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { - t.controlBuf.put(&windowUpdate{s.id, w}) + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } - s.mu.Unlock() // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? @@ -869,14 +891,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // The server has closed the stream without sending trailers. Record that // the read direction is closed, and set the status appropriately. if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return - } - s.finish(status.New(codes.Internal, "server closed the stream without sending trailers")) - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) } } @@ -885,36 +900,55 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { if !ok { return } - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return - } - if !s.headerDone { - close(s.headerChan) - s.headerDone = true + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) } - statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)] + statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) statusCode = codes.Unknown } - s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode)) - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) } -func (t *http2Client) handleSettings(f *http2.SettingsFrame) { +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { if f.IsAck() { return } + var maxStreams *uint32 var ss []http2.Setting f.ForeachSetting(func(s http2.Setting) error { + if s.ID == http2.SettingMaxConcurrentStreams { + maxStreams = new(uint32) + *maxStreams = s.Val + return nil + } ss = append(ss, s) return nil }) - // The settings will be applied once the ack is sent. - t.controlBuf.put(&settings{ack: true, ss: ss}) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams == nil { + t.controlBuf.put(sf) + return + } + updateStreamQuota := func(interface{}) bool { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + return true + } + t.controlBuf.executeAndPut(updateStreamQuota, sf) } func (t *http2Client) handlePing(f *http2.PingFrame) { @@ -932,7 +966,7 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.mu.Lock() - if t.state != reachable && t.state != draining { + if t.state == closing { t.mu.Unlock() return } @@ -945,12 +979,16 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.Close() return } - // A client can receive multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387). - // The idea is that the first GoAway will be sent with an ID of MaxInt32 and the second GoAway will be sent after an RTT delay - // with the ID of the last stream the server will process. - // Therefore, when we get the first GoAway we don't really close any streams. While in case of second GoAway we - // close all streams created after the second GoAwayId. This way streams that were in-flight while the GoAway from server - // was being sent don't get killed. + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. select { case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). // If there are multiple GoAways the first one should always have an ID greater than the following ones. @@ -963,6 +1001,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.setGoAwayReason(f) close(t.goAway) t.state = draining + t.controlBuf.put(&incomingGoAway{}) } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. @@ -972,7 +1011,9 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { } for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { - close(stream.goAway) + // The stream was unprocessed by the server. + atomic.StoreUint32(&stream.unprocessed, 1) + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } } t.prevGoAwayID = id @@ -988,11 +1029,11 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // It expects a lock on transport's mutext to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { - t.goAwayReason = NoReason + t.goAwayReason = GoAwayNoReason switch f.ErrCode { case http2.ErrCodeEnhanceYourCalm: if string(f.DebugData()) == "too_many_pings" { - t.goAwayReason = TooManyPings + t.goAwayReason = GoAwayTooManyPings } } } @@ -1004,15 +1045,10 @@ func (t *http2Client) GetGoAwayReason() GoAwayReason { } func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { - id := f.Header().StreamID - incr := f.Increment - if id == 0 { - t.sendQuotaPool.add(int(incr)) - return - } - if s, ok := t.getStream(f); ok { - s.sendQuotaPool.add(int(incr)) - } + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) } // operateHeaders takes action on the decoded headers. @@ -1021,18 +1057,10 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if !ok { return } - s.mu.Lock() - s.bytesReceived = true - s.mu.Unlock() + atomic.StoreUint32(&s.bytesReceived, 1) var state decodeState if err := state.decodeResponseHeader(frame); err != nil { - s.mu.Lock() - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.mu.Unlock() - s.write(recvMsg{err: err}) + t.closeStream(s, err, true, http2.ErrCodeProtocol, nil, nil, false) // Something wrong. Stops reading even when there is remaining. return } @@ -1056,40 +1084,25 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } }() - - s.mu.Lock() - if !endStream { - s.recvCompress = state.encoding - } - if !s.headerDone { - if !endStream && len(state.mdata) > 0 { - s.header = state.mdata + // If headers haven't been received yet. + if atomic.SwapUint32(&s.headerDone, 1) == 0 { + if !endStream { + // Headers frame is not actually a trailers-only frame. + isHeader = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + s.recvCompress = state.encoding + if len(state.mdata) > 0 { + s.header = state.mdata + } } close(s.headerChan) - s.headerDone = true - isHeader = true } - if !endStream || s.state == streamDone { - s.mu.Unlock() + if !endStream { return } - - if len(state.mdata) > 0 { - s.trailer = state.mdata - } - s.finish(state.status()) - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) -} - -func handleMalformedHTTP2(s *Stream, err error) { - s.mu.Lock() - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.mu.Unlock() - s.write(recvMsg{err: err}) + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, state.status(), state.mdata, true) } // reader runs as a separate goroutine in charge of reading data from network @@ -1099,24 +1112,30 @@ func handleMalformedHTTP2(s *Stream, err error) { // optimal. // TODO(zhaoq): Check the validity of the incoming frame sequence. func (t *http2Client) reader() { + defer close(t.readerDone) // Check the validity of server preface. frame, err := t.framer.fr.ReadFrame() if err != nil { t.Close() return } - atomic.CompareAndSwapUint32(&t.activity, 0, 1) + if t.keepaliveEnabled { + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + } sf, ok := frame.(*http2.SettingsFrame) if !ok { t.Close() return } - t.handleSettings(sf) + t.onSuccess() + t.handleSettings(sf, true) // loop to keep reading incoming messages on this transport. for { frame, err := t.framer.fr.ReadFrame() - atomic.CompareAndSwapUint32(&t.activity, 0, 1) + if t.keepaliveEnabled { + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + } if err != nil { // Abort an active stream if the http2.Framer returns a // http2.StreamError. This can happen only if the server's response @@ -1127,7 +1146,7 @@ func (t *http2Client) reader() { t.mu.Unlock() if s != nil { // use error detail to provide better err message - handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail())) + t.closeStream(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail()), true, http2.ErrCodeProtocol, nil, nil, false) } continue } else { @@ -1144,7 +1163,7 @@ func (t *http2Client) reader() { case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: - t.handleSettings(frame) + t.handleSettings(frame, false) case *http2.PingFrame: t.handlePing(frame) case *http2.GoAwayFrame: @@ -1157,107 +1176,6 @@ func (t *http2Client) reader() { } } -func (t *http2Client) applySettings(ss []http2.Setting) { - for _, s := range ss { - switch s.ID { - case http2.SettingMaxConcurrentStreams: - // TODO(zhaoq): This is a hack to avoid significant refactoring of the - // code to deal with the unrealistic int32 overflow. Probably will try - // to find a better way to handle this later. - if s.Val > math.MaxInt32 { - s.Val = math.MaxInt32 - } - t.mu.Lock() - ms := t.maxStreams - t.maxStreams = int(s.Val) - t.mu.Unlock() - t.streamsQuota.add(int(s.Val) - ms) - case http2.SettingInitialWindowSize: - t.mu.Lock() - for _, stream := range t.activeStreams { - // Adjust the sending quota for each stream. - stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota)) - } - t.streamSendQuota = s.Val - t.mu.Unlock() - } - } -} - -// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) -// is duplicated between the client and the server. -// The transport layer needs to be refactored to take care of this. -func (t *http2Client) itemHandler(i item) error { - var err error - switch i := i.(type) { - case *dataFrame: - err = t.framer.fr.WriteData(i.streamID, i.endStream, i.d) - if err == nil { - i.f() - } - case *headerFrame: - t.hBuf.Reset() - for _, f := range i.hf { - t.hEnc.WriteField(f) - } - endHeaders := false - first := true - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - if first { - first = false - err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ - StreamID: i.streamID, - BlockFragment: t.hBuf.Next(size), - EndStream: i.endStream, - EndHeaders: endHeaders, - }) - } else { - err = t.framer.fr.WriteContinuation( - i.streamID, - endHeaders, - t.hBuf.Next(size), - ) - } - if err != nil { - return err - } - } - case *windowUpdate: - err = t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) - case *settings: - if i.ack { - t.applySettings(i.ss) - err = t.framer.fr.WriteSettingsAck() - } else { - err = t.framer.fr.WriteSettings(i.ss...) - } - case *resetStream: - // If the server needs to be to intimated about stream closing, - // then we need to make sure the RST_STREAM frame is written to - // the wire before the headers of the next stream waiting on - // streamQuota. We ensure this by adding to the streamsQuota pool - // only after having acquired the writableChan to send RST_STREAM. - err = t.framer.fr.WriteRSTStream(i.streamID, i.code) - t.streamsQuota.add(1) - case *flushIO: - err = t.framer.writer.Flush() - case *ping: - if !i.ack { - t.bdpEst.timesnap(i.data) - } - err = t.framer.fr.WritePing(i.ack, i.data) - default: - errorf("transport: http2Client.controller got unexpected item type %v\n", i) - } - return err -} - // keepalive running in a separate goroutune makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} @@ -1284,6 +1202,11 @@ func (t *http2Client) keepalive() { } } else { t.mu.Unlock() + if channelz.IsOn() { + t.czmu.Lock() + t.kpCount++ + t.czmu.Unlock() + } // Send ping. t.controlBuf.put(p) } @@ -1320,3 +1243,56 @@ func (t *http2Client) Error() <-chan struct{} { func (t *http2Client) GoAway() <-chan struct{} { return t.goAway } + +func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { + t.czmu.RLock() + s := channelz.SocketInternalMetric{ + StreamsStarted: t.streamsStarted, + StreamsSucceeded: t.streamsSucceeded, + StreamsFailed: t.streamsFailed, + MessagesSent: t.msgSent, + MessagesReceived: t.msgRecv, + KeepAlivesSent: t.kpCount, + LastLocalStreamCreatedTimestamp: t.lastStreamCreated, + LastMessageSentTimestamp: t.lastMsgSent, + LastMessageReceivedTimestamp: t.lastMsgRecv, + LocalFlowControlWindow: int64(t.fc.getSize()), + //socket options + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // Security + // RemoteName : + } + t.czmu.RUnlock() + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Client) IncrMsgSent() { + t.czmu.Lock() + t.msgSent++ + t.lastMsgSent = time.Now() + t.czmu.Unlock() +} + +func (t *http2Client) IncrMsgRecv() { + t.czmu.Lock() + t.msgRecv++ + t.lastMsgRecv = time.Now() + t.czmu.Unlock() +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/http2_server.go b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/http2_server.go index 00df8eed0fdb..19acedb2b020 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/http2_server.go @@ -24,7 +24,6 @@ import ( "fmt" "io" "math" - "math/rand" "net" "strconv" "sync" @@ -35,8 +34,12 @@ import ( "golang.org/x/net/context" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -52,25 +55,25 @@ var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHe // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { ctx context.Context + ctxDone <-chan struct{} // Cache the context.Done() chan cancel context.CancelFunc conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. remoteAddr net.Addr localAddr net.Addr maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection inTapHandle tap.ServerInHandle framer *framer - hBuf *bytes.Buffer // the buffer for HPACK encoding - hEnc *hpack.Encoder // HPACK encoder // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer - fc *inFlow - // sendQuotaPool provides flow control to outbound message. - sendQuotaPool *quotaPool - stats stats.Handler + fc *trInFlow + stats stats.Handler // Flag to keep track of reading activity on transport. // 1 is true and 0 is false. activity uint32 // Accessed atomically. @@ -101,13 +104,27 @@ type http2Server struct { drainChan chan struct{} state transportState activeStreams map[uint32]*Stream - // the per-stream outbound flow control window size set by the peer. - streamSendQuota uint32 // idle is the time instant when the connection went idle. // This is either the beginning of the connection or when the number of // RPCs go down to 0. // When the connection is busy, this value is set to 0. idle time.Time + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czmu sync.RWMutex + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // The number of streams that have ended successfully by sending frame with + // EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + lastStreamCreated time.Time + msgSent int64 + msgRecv int64 + lastMsgSent time.Time + lastMsgRecv time.Time } // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is @@ -182,32 +199,30 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } - var buf bytes.Buffer ctx, cancel := context.WithCancel(context.Background()) t := &http2Server{ ctx: ctx, cancel: cancel, + ctxDone: ctx.Done(), conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), authInfo: config.AuthInfo, framer: framer, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), maxStreams: maxStreams, inTapHandle: config.InTapHandle, - controlBuf: newControlBuffer(), - fc: &inFlow{limit: uint32(icwz)}, - sendQuotaPool: newQuotaPool(defaultWindowSize), + fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*Stream), - streamSendQuota: defaultWindowSize, stats: config.StatsHandler, kp: kp, idle: time.Now(), kep: kep, initialWindowSize: iwz, } + t.controlBuf = newControlBuffer(t.ctxDone) if dynamicWindow { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, @@ -222,8 +237,17 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err connBegin := &stats.ConnBegin{} t.stats.HandleConn(t.ctx, connBegin) } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, "") + } t.framer.writer.Flush() + defer func() { + if err != nil { + t.Close() + } + }() + // Check the validity of client preface. preface := make([]byte, len(clientPreface)) if _, err := io.ReadFull(t.conn, preface); err != nil { @@ -235,8 +259,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err frame, err := t.framer.fr.ReadFrame() if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() - return + return nil, err } if err != nil { return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) @@ -249,8 +272,13 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err t.handleSettings(sf) go func() { - loopyWriter(t.ctx, t.controlBuf, t.itemHandler) - t.Close() + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + if err := t.loopy.run(); err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } + t.conn.Close() + close(t.writerDone) }() go t.keepalive() return t, nil @@ -259,12 +287,16 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err // operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) { streamID := frame.Header().StreamID - var state decodeState for _, hf := range frame.Fields { if err := state.processHeaderField(hf); err != nil { if se, ok := err.(StreamError); ok { - t.controlBuf.put(&resetStream{streamID, statusCodeConvTab[se.Code]}) + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: statusCodeConvTab[se.Code], + onWrite: func() {}, + }) } return } @@ -272,14 +304,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - recvCompress: state.encoding, - method: state.method, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + recvCompress: state.encoding, + method: state.method, + contentSubtype: state.contentSubtype, } - if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone @@ -297,10 +329,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( pr.AuthInfo = t.authInfo } s.ctx = peer.NewContext(s.ctx, pr) - // Cache the current stream to the context so that the server application - // can find out. Required when the server wants to send some metadata - // back to the client (unary call only). - s.ctx = newContextWithStream(s.ctx, s) // Attach the received metadata to the context. if len(state.mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) @@ -319,7 +347,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.ctx, err = t.inTapHandle(s.ctx, info) if err != nil { warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) return } } @@ -330,7 +363,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() - t.controlBuf.put(&resetStream{streamID, http2.ErrCodeRefusedStream}) + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) return } if streamID%2 != 1 || streamID <= t.maxStreamID { @@ -340,13 +378,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( return true } t.maxStreamID = streamID - s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) - s.localSendQuota = newQuotaPool(defaultLocalSendQuota) t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} } t.mu.Unlock() + if channelz.IsOn() { + t.czmu.Lock() + t.streamsStarted++ + t.lastStreamCreated = time.Now() + t.czmu.Unlock() + } s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } @@ -362,15 +404,23 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } t.stats.HandleRPC(s.ctx, inHeader) } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - recv: s.buf, + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) }, } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) handle(s) return } @@ -379,18 +429,26 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + defer close(t.readerDone) for { frame, err := t.framer.fr.ReadFrame() atomic.StoreUint32(&t.activity, 1) if err != nil { if se, ok := err.(http2.StreamError); ok { + warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) t.mu.Lock() s := t.activeStreams[se.StreamID] t.mu.Unlock() if s != nil { - t.closeStream(s) + t.closeStream(s, true, se.Code, nil, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) } - t.controlBuf.put(&resetStream{se.StreamID, se.Code}) continue } if err == io.EOF || err == io.ErrUnexpectedEOF { @@ -444,33 +502,20 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { // of stream if the application is requesting data larger in size than // the window. func (t *http2Server) adjustWindow(s *Stream, n uint32) { - s.mu.Lock() - defer s.mu.Unlock() - if s.state == streamDone { - return - } if w := s.fc.maybeAdjust(n); w > 0 { - if cw := t.fc.resetPendingUpdate(); cw > 0 { - t.controlBuf.put(&windowUpdate{0, cw}) - } - t.controlBuf.put(&windowUpdate{s.id, w}) + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } + } // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. func (t *http2Server) updateWindow(s *Stream, n uint32) { - s.mu.Lock() - defer s.mu.Unlock() - if s.state == streamDone { - return - } if w := s.fc.onRead(n); w > 0 { - if cw := t.fc.resetPendingUpdate(); cw > 0 { - t.controlBuf.put(&windowUpdate{0, cw}) - } - t.controlBuf.put(&windowUpdate{s.id, w}) + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) } } @@ -484,13 +529,15 @@ func (t *http2Server) updateFlowControl(n uint32) { } t.initialWindowSize = int32(n) t.mu.Unlock() - t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)}) - t.controlBuf.put(&settings{ - ack: false, + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ ss: []http2.Setting{ { ID: http2.SettingInitialWindowSize, - Val: uint32(n), + Val: n, }, }, }) @@ -501,7 +548,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { - sendBDPPing = t.bdpEst.add(uint32(size)) + sendBDPPing = t.bdpEst.add(size) } // Decouple connection's flow control from application's read. // An update on connection's flow control should not depend on @@ -511,23 +558,22 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // Decoupling the connection flow control will prevent other // active(fast) streams from starving in presence of slow or // inactive streams. - // - // Furthermore, if a bdpPing is being sent out we can piggyback - // connection's window update for the bytes we just received. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } if sendBDPPing { - if size != 0 { // Could be an empty frame. - t.controlBuf.put(&windowUpdate{0, uint32(size)}) + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) } t.controlBuf.put(bdpPing) - } else { - if err := t.fc.onData(uint32(size)); err != nil { - errorf("transport: http2Server %v", err) - t.Close() - return - } - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } } // Select the right stream to dispatch. s, ok := t.getStream(f) @@ -535,23 +581,15 @@ func (t *http2Server) handleData(f *http2.DataFrame) { return } if size > 0 { - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return - } - if err := s.fc.onData(uint32(size)); err != nil { - s.mu.Unlock() - t.closeStream(s) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, nil, false) return } if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { - t.controlBuf.put(&windowUpdate{s.id, w}) + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } - s.mu.Unlock() // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? @@ -563,11 +601,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { } if f.Header().Flags.Has(http2.FlagDataEndStream) { // Received the end of stream from the client. - s.mu.Lock() - if s.state != streamDone { - s.state = streamReadDone - } - s.mu.Unlock() + s.compareAndSwapState(streamActive, streamReadDone) s.write(recvMsg{err: io.EOF}) } } @@ -577,7 +611,7 @@ func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { if !ok { return } - t.closeStream(s) + t.closeStream(s, false, 0, nil, false) } func (t *http2Server) handleSettings(f *http2.SettingsFrame) { @@ -589,21 +623,9 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { ss = append(ss, s) return nil }) - t.controlBuf.put(&settings{ack: true, ss: ss}) -} - -func (t *http2Server) applySettings(ss []http2.Setting) { - for _, s := range ss { - if s.ID == http2.SettingInitialWindowSize { - t.mu.Lock() - for _, stream := range t.activeStreams { - stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota)) - } - t.streamSendQuota = s.Val - t.mu.Unlock() - } - - } + t.controlBuf.put(&incomingSettings{ + ss: ss, + }) } const ( @@ -656,39 +678,37 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - errorf("transport: Got to too many pings from the client, closing the connection.") + errorf("transport: Got too many pings from the client, closing the connection.") t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) } } func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { - id := f.Header().StreamID - incr := f.Increment - if id == 0 { - t.sendQuotaPool.add(int(incr)) - return - } - if s, ok := t.getStream(f); ok { - s.sendQuotaPool.add(int(incr)) + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } } + return headerFields } // WriteHeader sends the header metedata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - select { - case <-s.ctx.Done(): - return ContextErr(s.ctx.Err()) - case <-t.ctx.Done(): - return ErrConnClosing - default: - } - - s.mu.Lock() - if s.headerOk || s.state == streamDone { - s.mu.Unlock() + if s.updateHeaderSent() || s.getState() == streamDone { return ErrIllegalHeaderWrite } - s.headerOk = true + s.hdrMu.Lock() if md.Len() > 0 { if s.header.Len() > 0 { s.header = metadata.Join(s.header, md) @@ -696,37 +716,35 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { s.header = md } } - md = s.header - s.mu.Unlock() - // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields + t.writeHeaderLocked(s) + s.hdrMu.Unlock() + return nil +} + +func (t *http2Server) writeHeaderLocked(s *Stream) { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) if s.sendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } - for k, vv := range md { - if isReservedHeader(k) { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) t.controlBuf.put(&headerFrame{ streamID: s.id, hf: headerFields, endStream: false, + onWrite: func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + }, }) if t.stats != nil { - outHeader := &stats.OutHeader{ - //WireLength: // TODO(mmukhi): Revisit this later, if needed. - } + // Note: WireLength is not set in outHeader. + // TODO(mmukhi): Revisit this later, if needed. + outHeader := &stats.OutHeader{} t.stats.HandleRPC(s.Context(), outHeader) } - return nil } // WriteStatus sends stream status to the client and terminates the stream. @@ -734,37 +752,20 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { - select { - case <-t.ctx.Done(): - return ErrConnClosing - default: - } - - var headersSent, hasHeader bool - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() + if s.getState() == streamDone { return nil } - if s.headerOk { - headersSent = true - } - if s.header.Len() > 0 { - hasHeader = true - } - s.mu.Unlock() - - if !headersSent && hasHeader { - t.WriteHeader(s, nil) - headersSent = true - } - - // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields + s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. - if !headersSent { - headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + t.writeHeaderLocked(s) + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + } } headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) @@ -773,126 +774,75 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - panic(err) + grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } - - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } // Attach the trailer metadata. - for k, vv := range s.trailer { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - } - t.controlBuf.put(&headerFrame{ + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ streamID: s.id, hf: headerFields, endStream: true, - }) + onWrite: func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + }, + } + s.hdrMu.Unlock() + t.closeStream(s, false, 0, trailingHeader, true) if t.stats != nil { t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) } - t.closeStream(s) return nil } // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (err error) { - select { - case <-s.ctx.Done(): - return ContextErr(s.ctx.Err()) - case <-t.ctx.Done(): - return ErrConnClosing - default: - } - - var writeHeaderFrame bool - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return streamErrorf(codes.Unknown, "the stream has been done") - } - if !s.headerOk { - writeHeaderFrame = true - } - s.mu.Unlock() - if writeHeaderFrame { - t.WriteHeader(s, nil) +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + // TODO(mmukhi, dfawley): Make sure this is the right code to return. + return streamErrorf(codes.Internal, "transport: %v", err) + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + // TODO(mmukhi, dfawley): Should the server write also return io.EOF? + s.cancel() + select { + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } } - // Add data to header frame so that we can equally distribute data across frames. + // Add some data to header frame so that we can equally distribute bytes across frames. emptyLen := http2MaxFrameLen - len(hdr) if emptyLen > len(data) { emptyLen = len(data) } hdr = append(hdr, data[:emptyLen]...) data = data[emptyLen:] - for _, r := range [][]byte{hdr, data} { - for len(r) > 0 { - size := http2MaxFrameLen - // Wait until the stream has some quota to send the data. - quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() - sq, err := wait(s.ctx, t.ctx, nil, nil, quotaChan) - if err != nil { - return err - } - // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, t.ctx, nil, nil, t.sendQuotaPool.acquire()) - if err != nil { - return err - } - if sq < size { - size = sq - } - if tq < size { - size = tq - } - if size > len(r) { - size = len(r) - } - p := r[:size] - ps := len(p) - if ps < tq { - // Overbooked transport quota. Return it back. - t.sendQuotaPool.add(tq - ps) - } - // Acquire local send quota to be able to write to the controlBuf. - ltq, err := wait(s.ctx, t.ctx, nil, nil, s.localSendQuota.acquire()) - if err != nil { - if _, ok := err.(ConnectionError); !ok { - t.sendQuotaPool.add(ps) - } - return err - } - s.localSendQuota.add(ltq - ps) // It's ok we make this negative. - // Reset ping strikes when sending data since this might cause - // the peer to send ping. + df := &dataFrame{ + streamID: s.id, + h: hdr, + d: data, + onEachWrite: func() { atomic.StoreUint32(&t.resetPingStrikes, 1) - success := func() { - t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() { - s.localSendQuota.add(ps) - }}) - if ps < sq { - // Overbooked stream quota. Return it back. - s.sendQuotaPool.lockedAdd(sq - ps) - } - r = r[ps:] - } - failure := func() { - s.sendQuotaPool.lockedAdd(sq) - } - if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { - t.sendQuotaPool.add(ps) - s.localSendQuota.add(ps) - } + }, + } + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + select { + case <-t.ctx.Done(): + return ErrConnClosing + default: } + return ContextErr(s.ctx.Err()) } - return nil + return t.controlBuf.put(df) } // keepalive running in a separate goroutine does the following: @@ -937,7 +887,7 @@ func (t *http2Server) keepalive() { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. t.drain(http2.ErrCodeNo, []byte{}) - // Reseting the timer so that the clean-up doesn't deadlock. + // Resetting the timer so that the clean-up doesn't deadlock. maxIdle.Reset(infinity) return } @@ -949,7 +899,7 @@ func (t *http2Server) keepalive() { case <-maxAge.C: // Close the connection after grace period. t.Close() - // Reseting the timer so that the clean-up doesn't deadlock. + // Resetting the timer so that the clean-up doesn't deadlock. maxAge.Reset(infinity) case <-t.ctx.Done(): } @@ -962,11 +912,16 @@ func (t *http2Server) keepalive() { } if pingSent { t.Close() - // Reseting the timer so that the clean-up doesn't deadlock. + // Resetting the timer so that the clean-up doesn't deadlock. keepalive.Reset(infinity) return } pingSent = true + if channelz.IsOn() { + t.czmu.Lock() + t.kpCount++ + t.czmu.Unlock() + } t.controlBuf.put(p) keepalive.Reset(t.kp.Timeout) case <-t.ctx.Done(): @@ -975,127 +930,6 @@ func (t *http2Server) keepalive() { } } -var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} - -// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) -// is duplicated between the client and the server. -// The transport layer needs to be refactored to take care of this. -func (t *http2Server) itemHandler(i item) error { - switch i := i.(type) { - case *dataFrame: - if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil { - return err - } - i.f() - return nil - case *headerFrame: - t.hBuf.Reset() - for _, f := range i.hf { - t.hEnc.WriteField(f) - } - first := true - endHeaders := false - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - var err error - if first { - first = false - err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ - StreamID: i.streamID, - BlockFragment: t.hBuf.Next(size), - EndStream: i.endStream, - EndHeaders: endHeaders, - }) - } else { - err = t.framer.fr.WriteContinuation( - i.streamID, - endHeaders, - t.hBuf.Next(size), - ) - } - if err != nil { - return err - } - } - atomic.StoreUint32(&t.resetPingStrikes, 1) - return nil - case *windowUpdate: - return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) - case *settings: - if i.ack { - t.applySettings(i.ss) - return t.framer.fr.WriteSettingsAck() - } - return t.framer.fr.WriteSettings(i.ss...) - case *resetStream: - return t.framer.fr.WriteRSTStream(i.streamID, i.code) - case *goAway: - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - // The transport is closing. - return fmt.Errorf("transport: Connection closing") - } - sid := t.maxStreamID - if !i.headsUp { - // Stop accepting more streams now. - t.state = draining - t.mu.Unlock() - if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil { - return err - } - if i.closeConn { - // Abruptly close the connection following the GoAway (via - // loopywriter). But flush out what's inside the buffer first. - t.framer.writer.Flush() - return fmt.Errorf("transport: Connection closing") - } - return nil - } - t.mu.Unlock() - // For a graceful close, send out a GoAway with stream ID of MaxUInt32, - // Follow that with a ping and wait for the ack to come back or a timer - // to expire. During this time accept new streams since they might have - // originated before the GoAway reaches the client. - // After getting the ack or timer expiration send out another GoAway this - // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { - return err - } - if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { - return err - } - go func() { - timer := time.NewTimer(time.Minute) - defer timer.Stop() - select { - case <-t.drainChan: - case <-timer.C: - case <-t.ctx.Done(): - return - } - t.controlBuf.put(&goAway{code: i.code, debugData: i.debugData}) - }() - return nil - case *flushIO: - return t.framer.writer.Flush() - case *ping: - if !i.ack { - t.bdpEst.timesnap(i.data) - } - return t.framer.fr.WritePing(i.ack, i.data) - default: - err := status.Errorf(codes.Internal, "transport: http2Server.controller got unexpected item type %t", i) - errorf("%v", err) - return err - } -} - // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. @@ -1109,8 +943,12 @@ func (t *http2Server) Close() error { streams := t.activeStreams t.activeStreams = nil t.mu.Unlock() + t.controlBuf.finish() t.cancel() err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } // Cancel all active streams. for _, s := range streams { s.cancel() @@ -1124,27 +962,45 @@ func (t *http2Server) Close() error { // closeStream clears the footprint of a stream when the stream is not needed // any more. -func (t *http2Server) closeStream(s *Stream) { - t.mu.Lock() - delete(t.activeStreams, s.id) - if len(t.activeStreams) == 0 { - t.idle = time.Now() - } - if t.state == draining && len(t.activeStreams) == 0 { - defer t.Close() +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + if s.swapState(streamDone) == streamDone { + // If the stream was already done, return. + return } - t.mu.Unlock() // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. s.cancel() - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return + cleanup := &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + if channelz.IsOn() { + t.czmu.Lock() + if eosReceived { + t.streamsSucceeded++ + } else { + t.streamsFailed++ + } + t.czmu.Unlock() + } + }, + } + if hdr != nil { + hdr.cleanup = cleanup + t.controlBuf.put(hdr) + } else { + t.controlBuf.put(cleanup) } - s.state = streamDone - s.mu.Unlock() } func (t *http2Server) RemoteAddr() net.Addr { @@ -1165,7 +1021,115 @@ func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) } -var rgen = rand.New(rand.NewSource(time.Now().UnixNano())) +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + sid := t.maxStreamID + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + if len(t.activeStreams) == 0 { + g.closeConn = true + } + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + if g.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return false, fmt.Errorf("transport: Connection closing") + } + return true, nil + } + t.mu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainChan: + case <-timer.C: + case <-t.ctx.Done(): + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { + t.czmu.RLock() + s := channelz.SocketInternalMetric{ + StreamsStarted: t.streamsStarted, + StreamsSucceeded: t.streamsSucceeded, + StreamsFailed: t.streamsFailed, + MessagesSent: t.msgSent, + MessagesReceived: t.msgRecv, + KeepAlivesSent: t.kpCount, + LastRemoteStreamCreatedTimestamp: t.lastStreamCreated, + LastMessageSentTimestamp: t.lastMsgSent, + LastMessageReceivedTimestamp: t.lastMsgRecv, + LocalFlowControlWindow: int64(t.fc.getSize()), + //socket options + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // Security + // RemoteName : + } + t.czmu.RUnlock() + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Server) IncrMsgSent() { + t.czmu.Lock() + t.msgSent++ + t.lastMsgSent = time.Now() + t.czmu.Unlock() +} + +func (t *http2Server) IncrMsgRecv() { + t.czmu.Lock() + t.msgRecv++ + t.lastMsgRecv = time.Now() + t.czmu.Unlock() +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} func getJitter(v time.Duration) time.Duration { if v == infinity { @@ -1173,6 +1137,6 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := rgen.Int63n(2*r) - r + j := grpcrand.Int63n(2*r) - r return time.Duration(j) } diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/http_util.go b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/http_util.go index 39f878cfd5bf..7d15c7d74261 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/http_util.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/http_util.go @@ -23,12 +23,12 @@ import ( "bytes" "encoding/base64" "fmt" - "io" "net" "net/http" "strconv" "strings" "time" + "unicode/utf8" "github.com/golang/protobuf/proto" "golang.org/x/net/http2" @@ -46,6 +46,12 @@ const ( // http2IOBufSize specifies the buffer size for sending frames. defaultWriteBufSize = 32 * 1024 defaultReadBufSize = 32 * 1024 + // baseContentType is the base content-type for gRPC. This is a valid + // content-type on it's own, but can also include a content-subtype such as + // "proto" as a suffix after "+" or ";". See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + baseContentType = "application/grpc" ) var ( @@ -64,7 +70,7 @@ var ( http2.ErrCodeConnect: codes.Internal, http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, http2.ErrCodeInadequateSecurity: codes.PermissionDenied, - http2.ErrCodeHTTP11Required: codes.FailedPrecondition, + http2.ErrCodeHTTP11Required: codes.Internal, } statusCodeConvTab = map[codes.Code]http2.ErrCode{ codes.Internal: http2.ErrCodeInternal, @@ -111,9 +117,10 @@ type decodeState struct { timeout time.Duration method string // key-value metadata map from the peer. - mdata map[string][]string - statsTags []byte - statsTrace []byte + mdata map[string][]string + statsTags []byte + statsTrace []byte + contentSubtype string } // isReservedHeader checks whether hdr belongs to HTTP2 headers @@ -125,6 +132,7 @@ func isReservedHeader(hdr string) bool { } switch hdr { case "content-type", + "user-agent", "grpc-message-type", "grpc-encoding", "grpc-message", @@ -138,28 +146,55 @@ func isReservedHeader(hdr string) bool { } } -// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders -// that should be propagated into metadata visible to users. -func isWhitelistedPseudoHeader(hdr string) bool { +// isWhitelistedHeader checks whether hdr should be propagated +// into metadata visible to users. +func isWhitelistedHeader(hdr string) bool { switch hdr { - case ":authority": + case ":authority", "user-agent": return true default: return false } } -func validContentType(t string) bool { - e := "application/grpc" - if !strings.HasPrefix(t, e) { - return false +// contentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func contentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false } - // Support variations on the content-type - // (e.g. "application/grpc+blah", "application/grpc;blah"). - if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' { - return false +} + +// contentSubtype is assumed to be lowercase +func contentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType } - return true + return baseContentType + "+" + contentSubtype } func (d *decodeState) status() *status.Status { @@ -228,9 +263,9 @@ func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error // gRPC status doesn't exist and http status is OK. // Set rawStatusCode to be unknown and return nil error. // So that, if the stream has ended this Unknown status - // will be propogated to the user. + // will be propagated to the user. // Otherwise, it will be ignored. In which case, status from - // a later trailer, that has StreamEnded flag set, is propogated. + // a later trailer, that has StreamEnded flag set, is propagated. code := int(codes.Unknown) d.rawStatusCode = &code return nil @@ -247,9 +282,16 @@ func (d *decodeState) addMetadata(k, v string) { func (d *decodeState) processHeaderField(f hpack.HeaderField) error { switch f.Name { case "content-type": - if !validContentType(f.Value) { - return streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value) + contentSubtype, validContentType := contentSubtype(f.Value) + if !validContentType { + return streamErrorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value) } + d.contentSubtype = contentSubtype + // TODO: do we want to propagate the whole content-type in the metadata, + // or come up with a way to just propagate the content-subtype if it was set? + // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} + // in the metadata? + d.addMetadata(f.Name, f.Value) case "grpc-encoding": d.encoding = f.Value case "grpc-status": @@ -299,7 +341,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error { d.statsTrace = v d.addMetadata(f.Name, string(v)) default: - if isReservedHeader(f.Name) && !isWhitelistedPseudoHeader(f.Name) { + if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { break } v, err := decodeMetadataHeader(f.Name, f.Value) @@ -307,7 +349,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error { errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) return nil } - d.addMetadata(f.Name, string(v)) + d.addMetadata(f.Name, v) } return nil } @@ -396,16 +438,17 @@ func decodeTimeout(s string) (time.Duration, error) { const ( spaceByte = ' ' - tildaByte = '~' + tildeByte = '~' percentByte = '%' ) // encodeGrpcMessage is used to encode status code in header field -// "grpc-message". -// It checks to see if each individual byte in msg is an -// allowable byte, and then either percent encoding or passing it through. -// When percent encoding, the byte is converted into hexadecimal notation -// with a '%' prepended. +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. func encodeGrpcMessage(msg string) string { if msg == "" { return "" @@ -413,7 +456,7 @@ func encodeGrpcMessage(msg string) string { lenMsg := len(msg) for i := 0; i < lenMsg; i++ { c := msg[i] - if !(c >= spaceByte && c < tildaByte && c != percentByte) { + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { return encodeGrpcMessageUnchecked(msg) } } @@ -422,14 +465,26 @@ func encodeGrpcMessage(msg string) string { func encodeGrpcMessageUnchecked(msg string) string { var buf bytes.Buffer - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - c := msg[i] - if c >= spaceByte && c < tildaByte && c != percentByte { - buf.WriteByte(c) - } else { - buf.WriteString(fmt.Sprintf("%%%02X", c)) + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + buf.WriteString(fmt.Sprintf("%%%02X", b)) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + buf.WriteByte(b) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", b)) + } } + msg = msg[size:] } return buf.String() } @@ -468,19 +523,67 @@ func decodeGrpcMessageUnchecked(msg string) string { return buf.String() } +type bufWriter struct { + buf []byte + offset int + batchSize int + conn net.Conn + err error + + onFlush func() +} + +func newBufWriter(conn net.Conn, batchSize int) *bufWriter { + return &bufWriter{ + buf: make([]byte, batchSize*2), + batchSize: batchSize, + conn: conn, + } +} + +func (w *bufWriter) Write(b []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.Flush() + } + } + return n, err +} + +func (w *bufWriter) Flush() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + if w.onFlush != nil { + w.onFlush() + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.offset = 0 + return w.err +} + type framer struct { - numWriters int32 - reader io.Reader - writer *bufio.Writer - fr *http2.Framer + writer *bufWriter + fr *http2.Framer } func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer { + r := bufio.NewReaderSize(conn, readBufferSize) + w := newBufWriter(conn, writeBufferSize) f := &framer{ - reader: bufio.NewReaderSize(conn, readBufferSize), - writer: bufio.NewWriterSize(conn, writeBufferSize), + writer: w, + fr: http2.NewFramer(w, r), } - f.fr = http2.NewFramer(f.writer, f.reader) // Opt-in to Frame reuse API on framer to reduce garbage. // Frames aren't safe to read from after a subsequent call to ReadFrame. f.fr.SetReuseFrames() diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/transport.go b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/transport.go index ce5cb74d2ee1..f51f878884df 100644 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/transport/transport.go +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/transport/transport.go @@ -17,19 +17,19 @@ */ // Package transport defines and implements message oriented communication -// channel to complete various transactions (e.g., an RPC). -package transport +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport // externally used as import "google.golang.org/grpc/transport" import ( - stdctx "context" + "errors" "fmt" "io" "net" "sync" - "time" + "sync/atomic" "golang.org/x/net/context" - "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" @@ -58,6 +58,7 @@ type recvBuffer struct { c chan recvMsg mu sync.Mutex backlog []recvMsg + err error } func newRecvBuffer() *recvBuffer { @@ -69,6 +70,13 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() + if b.err != nil { + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err if len(b.backlog) == 0 { select { case b.c <- r: @@ -102,14 +110,15 @@ func (b *recvBuffer) get() <-chan recvMsg { return b.c } +// // recvBufferReader implements io.Reader interface to read the data from // recvBuffer. type recvBufferReader struct { - ctx context.Context - goAway chan struct{} - recv *recvBuffer - last []byte // Stores the remaining data in the previous calls. - err error + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last []byte // Stores the remaining data in the previous calls. + err error } // Read reads the next len(p) bytes from last. If last is drained, it tries to @@ -131,10 +140,8 @@ func (r *recvBufferReader) read(p []byte) (n int, err error) { return copied, nil } select { - case <-r.ctx.Done(): + case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) - case <-r.goAway: - return 0, ErrStreamDrain case m := <-r.recv.get(): r.recv.load() if m.err != nil { @@ -146,61 +153,7 @@ func (r *recvBufferReader) read(p []byte) (n int, err error) { } } -// All items in an out of a controlBuffer should be the same type. -type item interface { - item() -} - -// controlBuffer is an unbounded channel of item. -type controlBuffer struct { - c chan item - mu sync.Mutex - backlog []item -} - -func newControlBuffer() *controlBuffer { - b := &controlBuffer{ - c: make(chan item, 1), - } - return b -} - -func (b *controlBuffer) put(r item) { - b.mu.Lock() - if len(b.backlog) == 0 { - select { - case b.c <- r: - b.mu.Unlock() - return - default: - } - } - b.backlog = append(b.backlog, r) - b.mu.Unlock() -} - -func (b *controlBuffer) load() { - b.mu.Lock() - if len(b.backlog) > 0 { - select { - case b.c <- b.backlog[0]: - b.backlog[0] = nil - b.backlog = b.backlog[1:] - default: - } - } - b.mu.Unlock() -} - -// get returns the channel that receives an item in the buffer. -// -// Upon receipt of an item, the caller should call load to send another -// item onto the channel if there is any. -func (b *controlBuffer) get() <-chan item { - return b.c -} - -type streamState uint8 +type streamState uint32 const ( streamActive streamState = iota @@ -211,66 +164,93 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { - id uint32 - // nil for client side Stream. - st ServerTransport - // ctx is the associated context of the stream. - ctx context.Context - // cancel is always nil for client side Stream. - cancel context.CancelFunc - // done is closed when the final status arrives. - done chan struct{} - // goAway is closed when the server sent GoAways signal before this stream was initiated. - goAway chan struct{} - // method records the associated RPC method of the stream. - method string + id uint32 + st ServerTransport // nil for client side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream recvCompress string sendCompress string buf *recvBuffer trReader io.Reader fc *inFlow recvQuota uint32 - - // TODO: Remote this unused variable. - // The accumulated inbound quota pending for window update. - updateQuota uint32 + wq *writeQuota // Callback to state application's intentions to read data. This - // is used to adjust flow control, if need be. + // is used to adjust flow control, if needed. requestRead func(int) - sendQuotaPool *quotaPool - localSendQuota *quotaPool - // Close headerChan to indicate the end of reception of header metadata. - headerChan chan struct{} - // header caches the received header metadata. - header metadata.MD - // The key-value map of trailer metadata. - trailer metadata.MD - - mu sync.RWMutex // guard the following - // headerOK becomes true from the first header is about to send. - headerOk bool - state streamState - // true iff headerChan is closed. Used to avoid closing headerChan - // multiple times. - headerDone bool - // the status error received from the server. + headerChan chan struct{} // closed to indicate the end of header metadata. + headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + header metadata.MD // the received header metadata. + trailer metadata.MD // the key-value map of trailer metadata. + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. status *status.Status - // rstStream indicates whether a RST_STREAM frame needs to be sent - // to the server to signify that this stream is closing. - rstStream bool - // rstError is the error that needs to be sent along with the RST_STREAM frame. - rstError http2.ErrCode - // bytesSent and bytesReceived indicates whether any bytes have been sent or - // received on this stream. - bytesSent bool - bytesReceived bool + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() error { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return nil + } + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-s.headerChan: + return nil + } } // RecvCompress returns the compression algorithm applied to the inbound // message. It is empty string if there is no compression applied. func (s *Stream) RecvCompress() string { + if err := s.waitOnHeader(); err != nil { + return "" + } return s.recvCompress } @@ -285,28 +265,17 @@ func (s *Stream) Done() <-chan struct{} { return s.done } -// GoAway returns a channel which is closed when the server sent GoAways signal -// before this stream was initiated. -func (s *Stream) GoAway() <-chan struct{} { - return s.goAway -} - // Header acquires the key-value pairs of header metadata once it // is available. It blocks until i) the metadata is ready or ii) there is no // header metadata or iii) the stream is canceled/expired. func (s *Stream) Header() (metadata.MD, error) { - var err error - select { - case <-s.ctx.Done(): - err = ContextErr(s.ctx.Err()) - case <-s.goAway: - err = ErrStreamDrain - case <-s.headerChan: - return s.header.Copy(), nil - } + err := s.waitOnHeader() // Even if the stream is closed, header is returned if available. select { case <-s.headerChan: + if s.header == nil { + return nil, nil + } return s.header.Copy(), nil default: } @@ -316,10 +285,10 @@ func (s *Stream) Header() (metadata.MD, error) { // Trailer returns the cached trailer metedata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. func (s *Stream) Trailer() metadata.MD { - s.mu.RLock() c := s.trailer.Copy() - s.mu.RUnlock() return c } @@ -329,6 +298,15 @@ func (s *Stream) ServerTransport() ServerTransport { return s.st } +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + // Context returns the context of the stream. func (s *Stream) Context() context.Context { return s.ctx @@ -340,36 +318,49 @@ func (s *Stream) Method() string { } // Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, read or write has returned io.EOF. func (s *Stream) Status() *status.Status { return s.status } // SetHeader sets the header metadata. This can be called multiple times. // Server side only. +// This should not be called in parallel to other data writes. func (s *Stream) SetHeader(md metadata.MD) error { - s.mu.Lock() - if s.headerOk || s.state == streamDone { - s.mu.Unlock() - return ErrIllegalHeaderWrite - } if md.Len() == 0 { - s.mu.Unlock() return nil } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() s.header = metadata.Join(s.header, md) - s.mu.Unlock() + s.hdrMu.Unlock() return nil } +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + t := s.ServerTransport() + return t.WriteHeader(s, md) +} + // SetTrailer sets the trailer metadata which will be sent with the RPC status // by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. func (s *Stream) SetTrailer(md metadata.MD) error { if md.Len() == 0 { return nil } - s.mu.Lock() + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() s.trailer = metadata.Join(s.trailer, md) - s.mu.Unlock() + s.hdrMu.Unlock() return nil } @@ -409,28 +400,15 @@ func (t *transportReader) Read(p []byte) (n int, err error) { return } -// finish sets the stream's state and status, and closes the done channel. -// s.mu must be held by the caller. st must always be non-nil. -func (s *Stream) finish(st *status.Status) { - s.status = st - s.state = streamDone - close(s.done) -} - -// BytesSent indicates whether any bytes have been sent on this stream. -func (s *Stream) BytesSent() bool { - s.mu.Lock() - bs := s.bytesSent - s.mu.Unlock() - return bs -} - // BytesReceived indicates whether any bytes have been received on this stream. func (s *Stream) BytesReceived() bool { - s.mu.Lock() - br := s.bytesReceived - s.mu.Unlock() - return br + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 } // GoString is implemented by Stream so context.String() won't @@ -439,21 +417,6 @@ func (s *Stream) GoString() string { return fmt.Sprintf("", s, s.method) } -// The key to save transport.Stream in the context. -type streamKey struct{} - -// newContextWithStream creates a new context from ctx and attaches stream -// to it. -func newContextWithStream(ctx context.Context, stream *Stream) context.Context { - return context.WithValue(ctx, streamKey{}, stream) -} - -// StreamFromContext returns the stream saved in ctx. -func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { - s, ok = ctx.Value(streamKey{}).(*Stream) - return -} - // state of transport type transportState int @@ -475,6 +438,7 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int + ChannelzParentID int64 } // NewServerTransport creates a ServerTransport with conn or non-nil error @@ -510,18 +474,21 @@ type ConnectOptions struct { WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID int64 } // TargetInfo contains the information of the target such as network address and metadata. type TargetInfo struct { - Addr string - Metadata interface{} + Addr string + Metadata interface{} + Authority string } // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions, timeout time.Duration) (ClientTransport, error) { - return newHTTP2Client(ctx, target, opts, timeout) +func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess) } // Options provides additional hints and information for message @@ -545,10 +512,6 @@ type CallHdr struct { // Method specifies the operation to perform. Method string - // RecvCompress specifies the compression algorithm applied on - // inbound messages. - RecvCompress string - // SendCompress specifies the compression algorithm applied on // outbound message. SendCompress string @@ -563,6 +526,14 @@ type CallHdr struct { // for performance purposes. // If it's false, new stream will never be flushed. Flush bool + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string } // ClientTransport is the common interface for all gRPC client-side transport @@ -604,6 +575,12 @@ type ClientTransport interface { // GetGoAwayReason returns the reason why GoAway frame was received. GetGoAwayReason() GoAwayReason + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() } // ServerTransport is the common interface for all gRPC server-side transport @@ -637,6 +614,12 @@ type ServerTransport interface { // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain() + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() } // streamErrorf creates an StreamError with the specified error code and description. @@ -686,9 +669,16 @@ func (e ConnectionError) Origin() error { var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") - // ErrStreamDrain indicates that the stream is rejected by the server because - // the server stops accepting new RPCs. - ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indiacte application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") ) // TODO: See if we can replace StreamError with status package errors. @@ -703,75 +693,16 @@ func (e StreamError) Error() string { return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) } -// wait blocks until it can receive from one of the provided contexts or channels -func wait(ctx, tctx context.Context, done, goAway <-chan struct{}, proceed <-chan int) (int, error) { - select { - case <-ctx.Done(): - return 0, ContextErr(ctx.Err()) - case <-done: - return 0, io.EOF - case <-goAway: - return 0, ErrStreamDrain - case <-tctx.Done(): - return 0, ErrConnClosing - case i := <-proceed: - return i, nil - } -} - -// ContextErr converts the error from context package into a StreamError. -func ContextErr(err error) StreamError { - switch err { - case context.DeadlineExceeded, stdctx.DeadlineExceeded: - return streamErrorf(codes.DeadlineExceeded, "%v", err) - case context.Canceled, stdctx.Canceled: - return streamErrorf(codes.Canceled, "%v", err) - } - return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) -} - // GoAwayReason contains the reason for the GoAway frame received. type GoAwayReason uint8 const ( - // Invalid indicates that no GoAway frame is received. - Invalid GoAwayReason = 0 - // NoReason is the default value when GoAway frame is received. - NoReason GoAwayReason = 1 - // TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm - // was received and that the debug data said "too_many_pings". - TooManyPings GoAwayReason = 2 + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 ) - -// loopyWriter is run in a separate go routine. It is the single code path that will -// write data on wire. -func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) error) { - for { - select { - case i := <-cbuf.get(): - cbuf.load() - if err := handler(i); err != nil { - return - } - case <-ctx.Done(): - return - } - hasData: - for { - select { - case i := <-cbuf.get(): - cbuf.load() - if err := handler(i); err != nil { - return - } - case <-ctx.Done(): - return - default: - if err := handler(&flushIO{}); err != nil { - return - } - break hasData - } - } - } -} diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/version.go b/cluster-autoscaler/vendor/google.golang.org/grpc/version.go new file mode 100644 index 000000000000..7f124fbd5321 --- /dev/null +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.13.0" diff --git a/cluster-autoscaler/vendor/google.golang.org/grpc/vet.sh b/cluster-autoscaler/vendor/google.golang.org/grpc/vet.sh index d006a426347c..079bc2896d7b 100755 --- a/cluster-autoscaler/vendor/google.golang.org/grpc/vet.sh +++ b/cluster-autoscaler/vendor/google.golang.org/grpc/vet.sh @@ -1,5 +1,10 @@ #!/bin/bash +if [[ `uname -a` = *"Darwin"* ]]; then + echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047" + exit 1 +fi + set -ex # Exit on error; debugging enabled. set -o pipefail # Fail a pipe if any sub-command fails. @@ -8,12 +13,6 @@ die() { exit 1 } -# TODO: Remove this check and the mangling below once "context" is imported -# directly. -if git status --porcelain | read; then - die "Uncommitted or untracked files found; commit changes first" -fi - PATH="$GOPATH/bin:$GOROOT/bin:$PATH" # Check proto in manual runs or cron runs. @@ -28,8 +27,8 @@ if [ "$1" = "-install" ]; then github.com/golang/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ - github.com/golang/protobuf/protoc-gen-go \ - golang.org/x/tools/cmd/stringer + github.com/client9/misspell/cmd/misspell \ + github.com/golang/protobuf/protoc-gen-go if [[ "$check_proto" = "true" ]]; then if [[ "$TRAVIS" = "true" ]]; then PROTOBUF_VERSION=3.3.0 @@ -48,10 +47,18 @@ elif [[ "$#" -ne 0 ]]; then die "Unknown argument(s): $*" fi +# TODO: Remove this check and the mangling below once "context" is imported +# directly. +if git status --porcelain | read; then + die "Uncommitted or untracked files found; commit changes first" +fi + git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read) +git ls-files "*.go" | xargs grep -l '"unsafe"' 2>&1 | (! grep -v '_test.go') | tee /dev/stderr | (! read) +git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand') | tee /dev/stderr | (! read) gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read) goimports -l . 2>&1 | tee /dev/stderr | (! read) -golint ./... 2>&1 | (grep -vE "(_mock|_string|grpc_lb_v1/doc|\.pb)\.go:" || true) | tee /dev/stderr | (! read) +golint ./... 2>&1 | (grep -vE "(_mock|\.pb)\.go:" || true) | tee /dev/stderr | (! read) # Undo any edits made by this script. cleanup() { @@ -64,7 +71,7 @@ trap cleanup EXIT git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":' set +o pipefail # TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed. -go tool vet -all . 2>&1 | grep -vF '.pb.go:' | tee /dev/stderr | (! read) +go tool vet -all . 2>&1 | grep -vE '(clientconn|transport\/transport_test).go:.*cancel (function|var)' | grep -vF '.pb.go:' | tee /dev/stderr | (! read) set -o pipefail git reset --hard HEAD @@ -75,4 +82,13 @@ if [[ "$check_proto" = "true" ]]; then fi # TODO(menghanl): fix errors in transport_test. -staticcheck -ignore google.golang.org/grpc/transport/transport_test.go:SA2002 ./... +staticcheck -ignore ' +google.golang.org/grpc/transport/transport_test.go:SA2002 +google.golang.org/grpc/benchmark/benchmain/main.go:SA1019 +google.golang.org/grpc/stats/stats_test.go:SA1019 +google.golang.org/grpc/test/end2end_test.go:SA1019 +google.golang.org/grpc/balancer_test.go:SA1019 +google.golang.org/grpc/balancer.go:SA1019 +google.golang.org/grpc/clientconn_test.go:SA1019 +' ./... +misspell -error . diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/.travis.yml b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/.travis.yml index 004172a2e37f..9f556934d8b9 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/.travis.yml +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/.travis.yml @@ -4,6 +4,9 @@ go: - 1.4 - 1.5 - 1.6 + - 1.7 + - 1.8 + - 1.9 - tip go_import_path: gopkg.in/yaml.v2 diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/LICENSE b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/LICENSE index 866d74a7ad79..8dada3edaf50 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/LICENSE +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/LICENSE @@ -1,13 +1,201 @@ -Copyright 2011-2016 Canonical Ltd. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - http://www.apache.org/licenses/LICENSE-2.0 + 1. Definitions. -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/NOTICE b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/NOTICE new file mode 100644 index 000000000000..866d74a7ad79 --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/README.md b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/README.md index 7a512d67c2b9..b50c6e877559 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/README.md +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/README.md @@ -48,8 +48,6 @@ The yaml package is licensed under the Apache License 2.0. Please see the LICENS Example ------- -Some more examples can be found in the "examples" folder. - ```Go package main @@ -67,6 +65,8 @@ b: d: [3, 4] ` +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. type T struct { A string B struct { diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/apic.go b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/apic.go index 95ec014e8ccf..1f7e87e67275 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/apic.go +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/apic.go @@ -2,7 +2,6 @@ package yaml import ( "io" - "os" ) func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { @@ -48,9 +47,9 @@ func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err return n, nil } -// File read handler. -func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_file.Read(buffer) +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) } // Set a string input. @@ -64,12 +63,12 @@ func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { } // Set a file input. -func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { if parser.read_handler != nil { panic("must set the input source only once") } - parser.read_handler = yaml_file_read_handler - parser.input_file = file + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r } // Set the source encoding. @@ -81,14 +80,13 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { } // Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { +func yaml_emitter_initialize(emitter *yaml_emitter_t) { *emitter = yaml_emitter_t{ buffer: make([]byte, output_buffer_size), raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), } - return true } // Destroy an emitter object. @@ -102,9 +100,10 @@ func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { return nil } -// File write handler. -func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_file.Write(buffer) +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) return err } @@ -118,12 +117,12 @@ func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]by } // Set a file output. -func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { if emitter.write_handler != nil { panic("must set the output target only once") } - emitter.write_handler = yaml_file_write_handler - emitter.output_file = file + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w } // Set the output encoding. @@ -252,41 +251,41 @@ func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { // // Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { *event = yaml_event_t{ typ: yaml_STREAM_START_EVENT, encoding: encoding, } - return true } // Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) bool { +func yaml_stream_end_event_initialize(event *yaml_event_t) { *event = yaml_event_t{ typ: yaml_STREAM_END_EVENT, } - return true } // Create DOCUMENT-START. -func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, implicit bool) bool { +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { *event = yaml_event_t{ typ: yaml_DOCUMENT_START_EVENT, version_directive: version_directive, tag_directives: tag_directives, implicit: implicit, } - return true } // Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { *event = yaml_event_t{ typ: yaml_DOCUMENT_END_EVENT, implicit: implicit, } - return true } ///* @@ -348,7 +347,7 @@ func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { } // Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { *event = yaml_event_t{ typ: yaml_MAPPING_START_EVENT, anchor: anchor, @@ -356,15 +355,13 @@ func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte implicit: implicit, style: yaml_style_t(style), } - return true } // Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { +func yaml_mapping_end_event_initialize(event *yaml_event_t) { *event = yaml_event_t{ typ: yaml_MAPPING_END_EVENT, } - return true } // Destroy an event object. @@ -471,7 +468,7 @@ func yaml_event_delete(event *yaml_event_t) { // } context // tag_directive *yaml_tag_directive_t // -// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. // // assert(document) // Non-NULL document object is expected. // diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/decode.go b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/decode.go index db1f5f20686f..e4e56e28e0e8 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/decode.go +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/decode.go @@ -4,6 +4,7 @@ import ( "encoding" "encoding/base64" "fmt" + "io" "math" "reflect" "strconv" @@ -22,19 +23,22 @@ type node struct { kind int line, column int tag string - value string - implicit bool - children []*node - anchors map[string]*node + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node } // ---------------------------------------------------------------------------- // Parser, produces a node tree out of a libyaml event stream. type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *node + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool } func newParser(b []byte) *parser { @@ -42,21 +46,30 @@ func newParser(b []byte) *parser { if !yaml_parser_initialize(&p.parser) { panic("failed to initialize YAML emitter") } - if len(b) == 0 { b = []byte{'\n'} } - yaml_parser_set_input_string(&p.parser, b) + return &p +} - p.skip() - if p.event.typ != yaml_STREAM_START_EVENT { - panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") } - p.skip() + yaml_parser_set_input_reader(&p.parser, r) return &p } +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + func (p *parser) destroy() { if p.event.typ != yaml_NO_EVENT { yaml_event_delete(&p.event) @@ -64,16 +77,35 @@ func (p *parser) destroy() { yaml_parser_delete(&p.parser) } -func (p *parser) skip() { - if p.event.typ != yaml_NO_EVENT { - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() } - yaml_event_delete(&p.event) + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ } if !yaml_parser_parse(&p.parser, &p.event) { p.fail() } + return p.event.typ } func (p *parser) fail() { @@ -81,6 +113,10 @@ func (p *parser) fail() { var line int if p.parser.problem_mark.line != 0 { line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } } else if p.parser.context_mark.line != 0 { line = p.parser.context_mark.line } @@ -103,7 +139,8 @@ func (p *parser) anchor(n *node, anchor []byte) { } func (p *parser) parse() *node { - switch p.event.typ { + p.init() + switch p.peek() { case yaml_SCALAR_EVENT: return p.scalar() case yaml_ALIAS_EVENT: @@ -118,7 +155,7 @@ func (p *parser) parse() *node { // Happens when attempting to decode an empty buffer. return nil default: - panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + panic("attempted to parse unknown event: " + p.event.typ.String()) } } @@ -134,19 +171,20 @@ func (p *parser) document() *node { n := p.node(documentNode) n.anchors = make(map[string]*node) p.doc = n - p.skip() + p.expect(yaml_DOCUMENT_START_EVENT) n.children = append(n.children, p.parse()) - if p.event.typ != yaml_DOCUMENT_END_EVENT { - panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) - } - p.skip() + p.expect(yaml_DOCUMENT_END_EVENT) return n } func (p *parser) alias() *node { n := p.node(aliasNode) n.value = string(p.event.anchor) - p.skip() + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) return n } @@ -156,29 +194,29 @@ func (p *parser) scalar() *node { n.tag = string(p.event.tag) n.implicit = p.event.implicit p.anchor(n, p.event.anchor) - p.skip() + p.expect(yaml_SCALAR_EVENT) return n } func (p *parser) sequence() *node { n := p.node(sequenceNode) p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_SEQUENCE_END_EVENT { + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { n.children = append(n.children, p.parse()) } - p.skip() + p.expect(yaml_SEQUENCE_END_EVENT) return n } func (p *parser) mapping() *node { n := p.node(mappingNode) p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_MAPPING_END_EVENT { + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { n.children = append(n.children, p.parse(), p.parse()) } - p.skip() + p.expect(yaml_MAPPING_END_EVENT) return n } @@ -187,7 +225,7 @@ func (p *parser) mapping() *node { type decoder struct { doc *node - aliases map[string]bool + aliases map[*node]bool mapType reflect.Type terrors []string strict bool @@ -198,11 +236,13 @@ var ( durationType = reflect.TypeOf(time.Duration(0)) defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) ) func newDecoder(strict bool) *decoder { d := &decoder{mapType: defaultMapType, strict: strict} - d.aliases = make(map[string]bool) + d.aliases = make(map[*node]bool) return d } @@ -251,7 +291,7 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { // // If n holds a null value, prepare returns before doing anything. func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { return out, false, false } again := true @@ -308,16 +348,13 @@ func (d *decoder) document(n *node, out reflect.Value) (good bool) { } func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - an, ok := d.doc.anchors[n.value] - if !ok { - failf("unknown anchor '%s' referenced", n.value) - } - if d.aliases[n.value] { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. failf("anchor '%s' value contains itself", n.value) } - d.aliases[n.value] = true - good = d.unmarshal(an, out) - delete(d.aliases, n.value) + d.aliases[n] = true + good = d.unmarshal(n.alias, out) + delete(d.aliases, n) return good } @@ -329,7 +366,7 @@ func resetMap(out reflect.Value) { } } -func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { +func (d *decoder) scalar(n *node, out reflect.Value) bool { var tag string var resolved interface{} if n.tag == "" && !n.implicit { @@ -353,9 +390,26 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { } return true } - if s, ok := resolved.(string); ok && out.CanAddr() { - if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { - err := u.UnmarshalText([]byte(s)) + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) if err != nil { fail(err) } @@ -366,46 +420,54 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { case reflect.String: if tag == yaml_BINARY_TAG { out.SetString(resolved.(string)) - good = true - } else if resolved != nil { + return true + } + if resolved != nil { out.SetString(n.value) - good = true + return true } case reflect.Interface: if resolved == nil { out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) } else { out.Set(reflect.ValueOf(resolved)) } - good = true + return true case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: switch resolved := resolved.(type) { case int: if !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) - good = true + return true } case int64: if !out.OverflowInt(resolved) { out.SetInt(resolved) - good = true + return true } case uint64: if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) - good = true + return true } case float64: if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) - good = true + return true } case string: if out.Type() == durationType { d, err := time.ParseDuration(resolved) if err == nil { out.SetInt(int64(d)) - good = true + return true } } } @@ -414,44 +476,49 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { case int: if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) - good = true + return true } case int64: if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) - good = true + return true } case uint64: if !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) - good = true + return true } case float64: if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) - good = true + return true } } case reflect.Bool: switch resolved := resolved.(type) { case bool: out.SetBool(resolved) - good = true + return true } case reflect.Float32, reflect.Float64: switch resolved := resolved.(type) { case int: out.SetFloat(float64(resolved)) - good = true + return true case int64: out.SetFloat(float64(resolved)) - good = true + return true case uint64: out.SetFloat(float64(resolved)) - good = true + return true case float64: out.SetFloat(resolved) - good = true + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true } case reflect.Ptr: if out.Type().Elem() == reflect.TypeOf(resolved) { @@ -459,13 +526,11 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { elem := reflect.New(out.Type().Elem()) elem.Elem().Set(reflect.ValueOf(resolved)) out.Set(elem) - good = true + return true } } - if !good { - d.terror(n, tag, out) - } - return good + d.terror(n, tag, out) + return false } func settableValueOf(i interface{}) reflect.Value { @@ -482,6 +547,10 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { switch out.Kind() { case reflect.Slice: out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } case reflect.Interface: // No type hints. Will have to use a generic sequence. iface = out @@ -500,7 +569,9 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { j++ } } - out.Set(out.Slice(0, j)) + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } if iface.IsValid() { iface.Set(out) } @@ -561,7 +632,7 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { } e := reflect.New(et).Elem() if d.unmarshal(n.children[i+1], e) { - out.SetMapIndex(k, e) + d.setMapIndex(n.children[i+1], out, k, e) } } } @@ -569,6 +640,14 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { return true } +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { outt := out.Type() if outt.Elem() != mapItemType { @@ -616,6 +695,10 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { elemType = inlineMap.Type().Elem() } + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } for i := 0; i < l; i += 2 { ni := n.children[i] if isMerge(ni) { @@ -626,6 +709,13 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { continue } if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } var field reflect.Value if info.Inline == nil { field = out.Field(info.Num) @@ -639,9 +729,9 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { } value := reflect.New(elemType).Elem() d.unmarshal(n.children[i+1], value) - inlineMap.SetMapIndex(name, value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) } else if d.strict { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in struct %s", n.line+1, name.String(), out.Type())) + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) } } return true diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/emitterc.go b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/emitterc.go index 41de8b856c2f..a1c2cc52627f 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/emitterc.go +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/emitterc.go @@ -2,6 +2,7 @@ package yaml import ( "bytes" + "fmt" ) // Flush the buffer if needed. @@ -664,7 +665,7 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, return yaml_emitter_emit_mapping_start(emitter, event) default: return yaml_emitter_set_emitter_error(emitter, - "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) } } @@ -842,7 +843,7 @@ func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event return true } -// Write an achor. +// Write an anchor. func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { if emitter.anchor_data.anchor == nil { return true @@ -995,9 +996,9 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { space_break = false preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false + followed_by_whitespace = false + previous_space = false + previous_break = false ) emitter.scalar_data.value = value diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/encode.go b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/encode.go index 84f84995517b..a14435e82f84 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/encode.go +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/encode.go @@ -3,12 +3,14 @@ package yaml import ( "encoding" "fmt" + "io" "reflect" "regexp" "sort" "strconv" "strings" "time" + "unicode/utf8" ) type encoder struct { @@ -16,25 +18,39 @@ type encoder struct { event yaml_event_t out []byte flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool } -func newEncoder() (e *encoder) { - e = &encoder{} - e.must(yaml_emitter_initialize(&e.emitter)) +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) yaml_emitter_set_output_string(&e.emitter, &e.out) yaml_emitter_set_unicode(&e.emitter, true) - e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) - e.emit() - e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) - e.emit() return e } -func (e *encoder) finish() { - e.must(yaml_document_end_event_initialize(&e.event, true)) +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { e.emitter.open_ended = false - e.must(yaml_stream_end_event_initialize(&e.event)) + yaml_stream_end_event_initialize(&e.event) e.emit() } @@ -44,9 +60,7 @@ func (e *encoder) destroy() { func (e *encoder) emit() { // This will internally delete the e.event value. - if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { - e.must(false) - } + e.must(yaml_emitter_emit(&e.emitter, &e.event)) } func (e *encoder) must(ok bool) { @@ -59,13 +73,28 @@ func (e *encoder) must(ok bool) { } } +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { e.nilv() return } iface := in.Interface() - if m, ok := iface.(Marshaler); ok { + switch m := iface.(type) { + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: v, err := m.MarshalYAML() if err != nil { fail(err) @@ -75,31 +104,34 @@ func (e *encoder) marshal(tag string, in reflect.Value) { return } in = reflect.ValueOf(v) - } else if m, ok := iface.(encoding.TextMarshaler); ok { + case encoding.TextMarshaler: text, err := m.MarshalText() if err != nil { fail(err) } in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return } switch in.Kind() { case reflect.Interface: - if in.IsNil() { - e.nilv() - } else { - e.marshal(tag, in.Elem()) - } + e.marshal(tag, in.Elem()) case reflect.Map: e.mapv(tag, in) case reflect.Ptr: - if in.IsNil() { - e.nilv() + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) } else { e.marshal(tag, in.Elem()) } case reflect.Struct: - e.structv(tag, in) - case reflect.Slice: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: if in.Type().Elem() == mapItemType { e.itemsv(tag, in) } else { @@ -191,10 +223,10 @@ func (e *encoder) mappingv(tag string, f func()) { e.flow = false style = yaml_FLOW_MAPPING_STYLE } - e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) e.emit() f() - e.must(yaml_mapping_end_event_initialize(&e.event)) + yaml_mapping_end_event_initialize(&e.event) e.emit() } @@ -240,23 +272,36 @@ var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0 func (e *encoder) stringv(tag string, in reflect.Value) { var style yaml_scalar_style_t s := in.String() - rtag, rs := resolve("", s) - if rtag == yaml_BINARY_TAG { - if tag == "" || tag == yaml_STR_TAG { - tag = rtag - s = rs.(string) - } else if tag == yaml_BINARY_TAG { + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { failf("explicitly tagged !!binary data must be base64-encoded") - } else { + } + if tag != "" { failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) } - if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } else if strings.Contains(s, "\n") { + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): style = yaml_LITERAL_SCALAR_STYLE - } else { + case canUsePlain: style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE } e.emitScalar(s, "", tag, style) } @@ -281,9 +326,20 @@ func (e *encoder) uintv(tag string, in reflect.Value) { e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) } +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + func (e *encoder) floatv(tag string, in reflect.Value) { - // FIXME: Handle 64 bits here. - s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) switch s { case "+Inf": s = ".inf" diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/go.mod b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/go.mod new file mode 100644 index 000000000000..1934e8769450 --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/go.mod @@ -0,0 +1,5 @@ +module "gopkg.in/yaml.v2" + +require ( + "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 +) diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/readerc.go b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/readerc.go index f450791717bf..7c1f5fac3dbd 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/readerc.go +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/readerc.go @@ -93,9 +93,18 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { panic("read handler must be set") } + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + // If the EOF flag is set and the raw buffer is empty, do nothing. if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - return true + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true } // Return if the buffer contains enough characters. @@ -389,6 +398,15 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { break } } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } parser.buffer = parser.buffer[:buffer_len] return true } diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/resolve.go b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/resolve.go index 232313cc0845..6c151db6fbd5 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/resolve.go +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/resolve.go @@ -6,7 +6,7 @@ import ( "regexp" "strconv" "strings" - "unicode/utf8" + "time" ) type resolveMapItem struct { @@ -75,7 +75,7 @@ func longTag(tag string) string { func resolvableTag(tag string) bool { switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: return true } return false @@ -92,6 +92,19 @@ func resolve(tag string, in string) (rtag string, out interface{}) { switch tag { case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } } failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) }() @@ -125,6 +138,15 @@ func resolve(tag string, in string) (rtag string, out interface{}) { case 'D', 'S': // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + plain := strings.Replace(in, "_", "", -1) intv, err := strconv.ParseInt(plain, 0, 64) if err == nil { @@ -158,28 +180,20 @@ func resolve(tag string, in string) (rtag string, out interface{}) { return yaml_INT_TAG, uintv } } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt(plain[3:], 2, 64) + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, -int(intv) + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) } else { - return yaml_INT_TAG, -intv + return yaml_INT_TAG, intv } } } - // XXX Handle timestamps here. - default: panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") } } - if tag == yaml_BINARY_TAG { - return yaml_BINARY_TAG, in - } - if utf8.ValidString(in) { - return yaml_STR_TAG, in - } - return yaml_BINARY_TAG, encodeBase64(in) + return yaml_STR_TAG, in } // encodeBase64 encodes s as base64 that is broken up into multiple lines @@ -206,3 +220,39 @@ func encodeBase64(s string) string { } return string(out[:k]) } + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/scannerc.go b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/scannerc.go index dd64ee4b9506..077fd1dd2d44 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/scannerc.go @@ -871,12 +871,6 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { required := parser.flow_level == 0 && parser.indent == parser.mark.column - // A simple key is required only when it is the first token in the current - // line. Therefore it is always allowed. But we add a check anyway. - if required && !parser.simple_key_allowed { - panic("should not happen") - } - // // If the current position may start a simple key, save it. // @@ -1944,7 +1938,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma } else { // It's either the '!' tag or not really a tag handle. If it's a %TAG // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && !(s[0] == '!' && s[1] == 0) { + if directive && string(s) != "!" { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected '!'") return false @@ -1959,12 +1953,12 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { //size_t length = head ? strlen((char *)head) : 0 var s []byte - length := len(head) + hasTag := len(head) > 0 // Copy the head if needed. // // Note that we don't copy the leading '!' character. - if length > 0 { + if len(head) > 1 { s = append(s, head[1:]...) } @@ -1997,15 +1991,14 @@ func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte } } else { s = read(parser, s) - length++ } if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } + hasTag = true } - // Check if the tag is non-empty. - if length == 0 { + if !hasTag { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected tag URI") return false @@ -2476,6 +2469,10 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si } } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + // Check if we are at the end of the scalar. if single { if parser.buffer[parser.buffer_pos] == '\'' { @@ -2488,10 +2485,6 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si } // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { if is_blank(parser.buffer, parser.buffer_pos) { // Consume a space or a tab character. @@ -2593,19 +2586,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b // Consume non-blank characters. for !is_blankz(parser.buffer, parser.buffer_pos) { - // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". - if parser.flow_level > 0 && - parser.buffer[parser.buffer_pos] == ':' && - !is_blankz(parser.buffer, parser.buffer_pos+1) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found unexpected ':'") - return false - } - // Check for indicators that may end a plain scalar. if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || parser.buffer[parser.buffer_pos] == '}')) { @@ -2657,10 +2641,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { if is_blank(parser.buffer, parser.buffer_pos) { - // Check for tab character that abuse indentation. + // Check for tab characters that abuse indentation. if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violate indentation") + start_mark, "found a tab character that violates indentation") return false } diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/sorter.go b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/sorter.go index 5958822f9c6b..4c45e660a8f2 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/sorter.go +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/sorter.go @@ -51,6 +51,15 @@ func (l keyList) Less(i, j int) bool { } var ai, bi int var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { an = an*10 + int64(ar[ai]-'0') } diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/writerc.go b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/writerc.go index 190362f25dfb..a2dde608cb7a 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/writerc.go +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/writerc.go @@ -18,72 +18,9 @@ func yaml_emitter_flush(emitter *yaml_emitter_t) bool { return true } - // If the output encoding is UTF-8, we don't need to recode the buffer. - if emitter.encoding == yaml_UTF8_ENCODING { - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true - } - - // Recode the buffer into the raw buffer. - var low, high int - if emitter.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - pos := 0 - for pos < emitter.buffer_pos { - // See the "reader.c" code for more details on UTF-8 encoding. Note - // that we assume that the buffer contains a valid UTF-8 sequence. - - // Read the next UTF-8 character. - octet := emitter.buffer[pos] - - var w int - var value rune - switch { - case octet&0x80 == 0x00: - w, value = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, value = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, value = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, value = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = emitter.buffer[pos+k] - value = (value << 6) + (rune(octet) & 0x3F) - } - pos += w - - // Write the character. - if value < 0x10000 { - var b [2]byte - b[high] = byte(value >> 8) - b[low] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) - } else { - // Write the character using a surrogate pair (check "reader.c"). - var b [4]byte - value -= 0x10000 - b[high] = byte(0xD8 + (value >> 18)) - b[low] = byte((value >> 10) & 0xFF) - b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) - b[low+2] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) - } - } - - // Write the raw buffer. - if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) } emitter.buffer_pos = 0 - emitter.raw_buffer = emitter.raw_buffer[:0] return true } diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/yaml.go b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/yaml.go index bf18884e0e3a..de85aa4cdb71 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/yaml.go +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/yaml.go @@ -9,6 +9,7 @@ package yaml import ( "errors" "fmt" + "io" "reflect" "strings" "sync" @@ -81,12 +82,58 @@ func Unmarshal(in []byte, out interface{}) (err error) { } // UnmarshalStrict is like Unmarshal except that any fields that are found -// in the data that do not have corresponding struct members will result in +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in // an error. func UnmarshalStrict(in []byte, out interface{}) (err error) { return unmarshal(in, out, true) } +// A Decorder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + func unmarshal(in []byte, out interface{}, strict bool) (err error) { defer handleErr(&err) d := newDecoder(strict) @@ -110,8 +157,8 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // of the generated document will reflect the structure of the value itself. // Maps and pointers (to struct, string, int, etc) are accepted as the in value. // -// Struct fields are only unmarshalled if they are exported (have an upper case -// first letter), and are unmarshalled using the field name lowercased as the +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the // default key. Custom keys may be defined via the "yaml" name in the field // tag: the content preceding the first comma is used as the key, and the // following comma-separated options are used to tweak the marshalling process. @@ -125,7 +172,10 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // // omitempty Only include the field if it's not set to the zero // value for the type or to empty slices or maps. -// Does not apply to zero valued structs. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be included if that method returns true. // // flow Marshal using a flow style (useful for structs, // sequences and maps). @@ -140,7 +190,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // For example: // // type T struct { -// F int "a,omitempty" +// F int `yaml:"a,omitempty"` // B int // } // yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" @@ -150,12 +200,47 @@ func Marshal(in interface{}) (out []byte, err error) { defer handleErr(&err) e := newEncoder() defer e.destroy() - e.marshal("", reflect.ValueOf(in)) + e.marshalDoc("", reflect.ValueOf(in)) e.finish() out = e.out return } +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + func handleErr(err *error) { if v := recover(); v != nil { if e, ok := v.(yamlError); ok { @@ -211,6 +296,9 @@ type fieldInfo struct { Num int OmitEmpty bool Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int // Inline holds the field index if the field is part of an inlined struct. Inline []int @@ -290,6 +378,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { } else { finfo.Inline = append([]int{i}, finfo.Inline...) } + finfo.Id = len(fieldsList) fieldsMap[finfo.Key] = finfo fieldsList = append(fieldsList, finfo) } @@ -311,11 +400,16 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { return nil, errors.New(msg) } + info.Id = len(fieldsList) fieldsList = append(fieldsList, info) fieldsMap[info.Key] = info } - sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } fieldMapMutex.Lock() structMap[st] = sinfo @@ -323,8 +417,23 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { return sinfo, nil } +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + func isZero(v reflect.Value) bool { - switch v.Kind() { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { case reflect.String: return len(v.String()) == 0 case reflect.Interface, reflect.Ptr: diff --git a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/yamlh.go b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/yamlh.go index 3caeca0491b5..e25cee563be8 100644 --- a/cluster-autoscaler/vendor/gopkg.in/yaml.v2/yamlh.go +++ b/cluster-autoscaler/vendor/gopkg.in/yaml.v2/yamlh.go @@ -1,6 +1,7 @@ package yaml import ( + "fmt" "io" ) @@ -239,6 +240,27 @@ const ( yaml_MAPPING_END_EVENT // A MAPPING-END event. ) +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + // The event structure. type yaml_event_t struct { @@ -521,9 +543,9 @@ type yaml_parser_t struct { read_handler yaml_read_handler_t // Read handler. - input_file io.Reader // File input data. - input []byte // String input data. - input_pos int + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int eof bool // EOF flag @@ -632,7 +654,7 @@ type yaml_emitter_t struct { write_handler yaml_write_handler_t // Write handler. output_buffer *[]byte // String output data. - output_file io.Writer // File output data. + output_writer io.Writer // File output data. buffer []byte // The working buffer. buffer_pos int // The current position of the buffer. diff --git a/cluster-autoscaler/vendor/k8s.io/api/admission/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/admission/v1beta1/doc.go index 9a8b8e41f0dd..26d3c73b41a3 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admission/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/admission/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=false // +groupName=admission.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/admission/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/admission/v1beta1/generated.pb.go index d2b938e5a279..4082082ff942 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admission/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/admission/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -274,24 +273,6 @@ func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1014,51 +995,14 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.AuditAnnotations == nil { m.AuditAnnotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1068,41 +1012,80 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.AuditAnnotations[mapkey] = mapvalue - } else { - var mapvalue string - m.AuditAnnotations[mapkey] = mapvalue } + m.AuditAnnotations[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go index 4fa2d9c6627b..d2b73a4d8cc6 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go @@ -16,10 +16,10 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:openapi-gen=true +// +groupName=admissionregistration.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration // InitializerConfiguration and validatingWebhookConfiguration is for the // new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go index b87f74e52076..74c467a2223f 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto -// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -251,24 +250,6 @@ func (m *Rule) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto index e17b559689d9..98e9a571a2cf 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -88,7 +88,7 @@ message Rule { repeated string apiVersions = 2; // Resources is a list of resources this rule applies to. - // + // // For example: // 'pods' means pods. // 'pods/log' means the log subresource of pods. @@ -96,10 +96,10 @@ message Rule { // 'pods/*' means all subresources of pods. // '*/scale' means all scale subresources. // '*/*' means all resources and their subresources. - // + // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. - // + // // Depending on the enclosing object, subresources might not be allowed. // Required. repeated string resources = 3; diff --git a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go index 387af54a6d93..9b05e22fdf51 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go @@ -16,10 +16,10 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:openapi-gen=true +// +groupName=admissionregistration.k8s.io // Package v1beta1 is the v1beta1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration // InitializerConfiguration and validatingWebhookConfiguration is for the // new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index d6c9d958bf54..2ca3fa6524fb 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -506,24 +505,6 @@ func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 4d55ca878a9d..1c40ae530dea 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -66,7 +66,7 @@ message Rule { repeated string apiVersions = 2; // Resources is a list of resources this rule applies to. - // + // // For example: // 'pods' means pods. // 'pods/log' means the log subresource of pods. @@ -74,10 +74,10 @@ message Rule { // 'pods/*' means all subresources of pods. // '*/scale' means all scale subresources. // '*/*' means all resources and their subresources. - // + // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. - // + // // Depending on the enclosing object, subresources might not be allowed. // Required. repeated string resources = 3; @@ -168,7 +168,7 @@ message Webhook { // object itself is a namespace, the matching is performed on // object.metadata.labels. If the object is another cluster scoped resource, // it never skips the webhook. - // + // // For example, to run the webhook on any objects whose namespace is not // associated with "runlevel" of "0" or "1"; you will set the selector as // follows: @@ -184,7 +184,7 @@ message Webhook { // } // ] // } - // + // // If instead you want to only run the webhook on any objects whose // namespace is associated with the "environment" of "prod" or "staging"; // you will set the selector as follows: @@ -200,11 +200,11 @@ message Webhook { // } // ] // } - // + // // See // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // for more examples of label selectors. - // + // // Default to the empty LabelSelector, which matches everything. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; @@ -223,47 +223,47 @@ message Webhook { // connection with the webhook message WebhookClientConfig { // `url` gives the location of the webhook, in standard URL form - // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // (`scheme://host:port/path`). Exactly one of `url` or `service` // must be specified. - // + // // The `host` should not refer to a service running in the cluster; use // the `service` field instead. The host might be resolved via external // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve // in-cluster DNS as that would be a layering violation). `host` may // also be an IP address. - // + // // Please note that using `localhost` or `127.0.0.1` as a `host` is // risky unless you take great care to run this webhook on all hosts // which run an apiserver which might need to make calls to this // webhook. Such installs are likely to be non-portable, i.e., not easy // to turn up in a new cluster. - // + // // The scheme must be "https"; the URL must begin with "https://". - // + // // A path is optional, and if present may be any string permissible in // a URL. You may use the path to pass an arbitrary string to the // webhook, for example, a cluster identifier. - // + // // Attempting to use a user or basic auth e.g. "user:password@" is not // allowed. Fragments ("#...") and query parameters ("?...") are not // allowed, either. - // + // // +optional optional string url = 3; // `service` is a reference to the service for this webhook. Either // `service` or `url` must be specified. - // + // // If the webhook is running within the cluster, then you should use `service`. - // + // // Port 443 will be used if it is open, otherwise it is an error. - // + // // +optional optional ServiceReference service = 1; - // `caBundle` is a PEM encoded CA bundle which will be used to validate - // the webhook's server certificate. - // Required. + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional optional bytes caBundle = 2; } diff --git a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 0b948ba1df9d..49d94ec0eb75 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -246,7 +246,7 @@ const ( // connection with the webhook type WebhookClientConfig struct { // `url` gives the location of the webhook, in standard URL form - // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // (`scheme://host:port/path`). Exactly one of `url` or `service` // must be specified. // // The `host` should not refer to a service running in the cluster; use @@ -282,12 +282,12 @@ type WebhookClientConfig struct { // Port 443 will be used if it is open, otherwise it is an error. // // +optional - Service *ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"` + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` - // `caBundle` is a PEM encoded CA bundle which will be used to validate - // the webhook's server certificate. - // Required. - CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"` + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` } // ServiceReference holds a reference to Service.legacy.k8s.io diff --git a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index aab917a4028a..e97628aab7cf 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -114,9 +114,9 @@ func (Webhook) SwaggerDoc() map[string]string { var map_WebhookClientConfig = map[string]string{ "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", - "url": "`url` gives the location of the webhook, in standard URL form (`[scheme://]host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.", - "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. Required.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", } func (WebhookClientConfig) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/apps/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/apps/v1/generated.pb.go index eac6ef2a12c1..5b29f432080b 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -1440,24 +1439,6 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/apps/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/apps/v1/generated.proto index c8a957ac6808..fea81922f3b6 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/apps/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/apps/v1/generated.proto @@ -280,6 +280,7 @@ message DeploymentSpec { // The deployment strategy to use to replace existing pods with new ones. // +optional + // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; // Minimum number of seconds for which a newly created pod should be ready diff --git a/cluster-autoscaler/vendor/k8s.io/api/apps/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/apps/v1/types.go index 4431ca2c3170..68ac55bf17ab 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/apps/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/apps/v1/types.go @@ -32,6 +32,8 @@ const ( ) // +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // StatefulSet represents a set of pods with consistent identities. @@ -244,6 +246,8 @@ type StatefulSetList struct { } // +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Deployment enables declarative updates for Pods and ReplicaSets. @@ -279,7 +283,8 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + // +patchStrategy=retainKeys + Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. @@ -653,6 +658,8 @@ type DaemonSetList struct { } // +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicaSet ensures that a specified number of pod replicas are running at any given time. diff --git a/cluster-autoscaler/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 85fb159ddcc8..7e992c58469a 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { } var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", + "": "DaemonSetStatus represents the current status of a daemon set.", "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", diff --git a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index ef9aa8e09f04..935304755409 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -1091,24 +1090,6 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2552,51 +2533,14 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2606,41 +2550,80 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.UpdatedAnnotations[mapkey] = mapvalue - } else { - var mapvalue string - m.UpdatedAnnotations[mapkey] = mapvalue } + m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -3833,51 +3816,14 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3887,41 +3833,80 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { diff --git a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/generated.proto index 6f41f06bccce..f87f39fe94e9 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -143,6 +143,7 @@ message DeploymentSpec { // The deployment strategy to use to replace existing pods with new ones. // +optional + // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; // Minimum number of seconds for which a newly created pod should be ready diff --git a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/types.go index d462604d79ea..326902fd0f86 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta1/types.go @@ -55,8 +55,6 @@ type ScaleStatus struct { TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } -// +genclient -// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Scale represents a scaling request for a resource. @@ -323,7 +321,8 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + // +patchStrategy=retainKeys + Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. diff --git a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index 72d832c34079..fc1efbc90bca 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto -// DO NOT EDIT! /* Package v1beta2 is a generated protocol buffer package. @@ -1570,24 +1569,6 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -6109,51 +6090,14 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6163,41 +6107,80 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { diff --git a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/generated.proto index cc3656d284e9..5d11cbe8d810 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -286,6 +286,7 @@ message DeploymentSpec { // The deployment strategy to use to replace existing pods with new ones. // +optional + // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; // Minimum number of seconds for which a newly created pod should be ready diff --git a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/types.go b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/types.go index e5525222a1ec..e75589adc542 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/types.go @@ -57,8 +57,6 @@ type ScaleStatus struct { TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } -// +genclient -// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Scale represents a scaling request for a resource. @@ -331,7 +329,8 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + // +patchStrategy=retainKeys + Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. diff --git a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 627df3ab76de..f8229ceda82f 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { } var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", + "": "DaemonSetStatus represents the current status of a daemon set.", "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", diff --git a/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/BUILD new file mode 100644 index 000000000000..5f1523e0f002 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/BUILD @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.deepcopy.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1", + importpath = "k8s.io/api/auditregistration/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go new file mode 100644 index 000000000000..5caa960a1007 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// +groupName=auditregistration.k8s.io + +package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go new file mode 100644 index 000000000000..399d92b380ea --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go @@ -0,0 +1,1685 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto + + It has these top-level messages: + AuditSink + AuditSinkList + AuditSinkSpec + Policy + ServiceReference + Webhook + WebhookClientConfig + WebhookThrottleConfig +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *AuditSink) Reset() { *m = AuditSink{} } +func (*AuditSink) ProtoMessage() {} +func (*AuditSink) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *AuditSinkList) Reset() { *m = AuditSinkList{} } +func (*AuditSinkList) ProtoMessage() {} +func (*AuditSinkList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *AuditSinkSpec) Reset() { *m = AuditSinkSpec{} } +func (*AuditSinkSpec) ProtoMessage() {} +func (*AuditSinkSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *Policy) Reset() { *m = Policy{} } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *Webhook) Reset() { *m = Webhook{} } +func (*Webhook) ProtoMessage() {} +func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *WebhookThrottleConfig) Reset() { *m = WebhookThrottleConfig{} } +func (*WebhookThrottleConfig) ProtoMessage() {} +func (*WebhookThrottleConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func init() { + proto.RegisterType((*AuditSink)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSink") + proto.RegisterType((*AuditSinkList)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSinkList") + proto.RegisterType((*AuditSinkSpec)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSinkSpec") + proto.RegisterType((*Policy)(nil), "k8s.io.api.auditregistration.v1alpha1.Policy") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.auditregistration.v1alpha1.ServiceReference") + proto.RegisterType((*Webhook)(nil), "k8s.io.api.auditregistration.v1alpha1.Webhook") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookClientConfig") + proto.RegisterType((*WebhookThrottleConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookThrottleConfig") +} +func (m *AuditSink) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditSink) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *AuditSinkList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditSinkList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AuditSinkSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditSinkSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Policy.Size())) + n4, err := m.Policy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Webhook.Size())) + n5, err := m.Webhook.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *Policy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Policy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i += copy(dAtA[i:], m.Level) + if len(m.Stages) > 0 { + for _, s := range m.Stages { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Path != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i += copy(dAtA[i:], *m.Path) + } + return i, nil +} + +func (m *Webhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Throttle != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Throttle.Size())) + n6, err := m.Throttle.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) + n7, err := m.ClientConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.URL != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i += copy(dAtA[i:], *m.URL) + } + if m.Service != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) + n8, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.CABundle != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i += copy(dAtA[i:], m.CABundle) + } + return i, nil +} + +func (m *WebhookThrottleConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookThrottleConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.QPS != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.QPS)) + } + if m.Burst != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Burst)) + } + return i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *AuditSink) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AuditSinkList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *AuditSinkSpec) Size() (n int) { + var l int + _ = l + l = m.Policy.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Policy) Size() (n int) { + var l int + _ = l + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Stages) > 0 { + for _, s := range m.Stages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceReference) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Webhook) Size() (n int) { + var l int + _ = l + if m.Throttle != nil { + l = m.Throttle.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + var l int + _ = l + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WebhookThrottleConfig) Size() (n int) { + var l int + _ = l + if m.QPS != nil { + n += 1 + sovGenerated(uint64(*m.QPS)) + } + if m.Burst != nil { + n += 1 + sovGenerated(uint64(*m.Burst)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AuditSink) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditSink{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "AuditSinkSpec", "AuditSinkSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *AuditSinkList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditSinkList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "AuditSink", "AuditSink", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *AuditSinkSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditSinkSpec{`, + `Policy:` + strings.Replace(strings.Replace(this.Policy.String(), "Policy", "Policy", 1), `&`, ``, 1) + `,`, + `Webhook:` + strings.Replace(strings.Replace(this.Webhook.String(), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Policy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Policy{`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `Stages:` + fmt.Sprintf("%v", this.Stages) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *Webhook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Webhook{`, + `Throttle:` + strings.Replace(fmt.Sprintf("%v", this.Throttle), "WebhookThrottleConfig", "WebhookThrottleConfig", 1) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookThrottleConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookThrottleConfig{`, + `QPS:` + valueToStringGenerated(this.QPS) + `,`, + `Burst:` + valueToStringGenerated(this.Burst) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AuditSink) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditSink: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditSink: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditSinkList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditSinkList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditSinkList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, AuditSink{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditSinkSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditSinkSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Policy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Policy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Policy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = Level(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stages = append(m.Stages, Stage(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Webhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Webhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Throttle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Throttle == nil { + m.Throttle = &WebhookThrottleConfig{} + } + if err := m.Throttle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookThrottleConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookThrottleConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QPS", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.QPS = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Burst", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Burst = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 747 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x6f, 0xd3, 0x48, + 0x14, 0x8e, 0x9b, 0xa4, 0x49, 0xa6, 0xe9, 0x6e, 0x77, 0xba, 0xbb, 0xca, 0x56, 0x2b, 0xa7, 0xb2, + 0xb4, 0x52, 0xa5, 0xdd, 0x8e, 0xb7, 0xa8, 0x02, 0x84, 0xb8, 0xd4, 0x3d, 0x21, 0x95, 0x52, 0x26, + 0x14, 0x04, 0x42, 0x88, 0x89, 0xf3, 0x62, 0x0f, 0x49, 0x6c, 0x63, 0x8f, 0x83, 0x7a, 0x43, 0xe2, + 0x0f, 0xf0, 0x7b, 0xb8, 0x21, 0x81, 0xd4, 0x63, 0x8f, 0x3d, 0x55, 0x34, 0x1c, 0xf8, 0x0f, 0x9c, + 0xd0, 0x8c, 0xc7, 0x49, 0x68, 0x8a, 0x48, 0x6f, 0x33, 0xdf, 0xbc, 0xef, 0x7b, 0xdf, 0xf7, 0xde, + 0xa0, 0xfd, 0xde, 0xcd, 0x84, 0xf0, 0xd0, 0xee, 0xa5, 0x6d, 0x88, 0x03, 0x10, 0x90, 0xd8, 0x43, + 0x08, 0x3a, 0x61, 0x6c, 0xeb, 0x07, 0x16, 0x71, 0x9b, 0xa5, 0x1d, 0x2e, 0x62, 0xf0, 0x78, 0x22, + 0x62, 0x26, 0x78, 0x18, 0xd8, 0xc3, 0x2d, 0xd6, 0x8f, 0x7c, 0xb6, 0x65, 0x7b, 0x10, 0x40, 0xcc, + 0x04, 0x74, 0x48, 0x14, 0x87, 0x22, 0xc4, 0xff, 0x64, 0x34, 0xc2, 0x22, 0x4e, 0x66, 0x68, 0x24, + 0xa7, 0xad, 0x6d, 0x7a, 0x5c, 0xf8, 0x69, 0x9b, 0xb8, 0xe1, 0xc0, 0xf6, 0x42, 0x2f, 0xb4, 0x15, + 0xbb, 0x9d, 0x76, 0xd5, 0x4d, 0x5d, 0xd4, 0x29, 0x53, 0x5d, 0xdb, 0x9e, 0x98, 0x19, 0x30, 0xd7, + 0xe7, 0x01, 0xc4, 0x47, 0x76, 0xd4, 0xf3, 0x24, 0x90, 0xd8, 0x03, 0x10, 0xcc, 0x1e, 0xce, 0x78, + 0x59, 0xb3, 0x7f, 0xc4, 0x8a, 0xd3, 0x40, 0xf0, 0x01, 0xcc, 0x10, 0xae, 0xff, 0x8c, 0x90, 0xb8, + 0x3e, 0x0c, 0xd8, 0x45, 0x9e, 0xf5, 0xd1, 0x40, 0xb5, 0x1d, 0x19, 0xb6, 0xc5, 0x83, 0x1e, 0x7e, + 0x8e, 0xaa, 0xd2, 0x51, 0x87, 0x09, 0xd6, 0x30, 0xd6, 0x8d, 0x8d, 0xa5, 0x6b, 0xff, 0x93, 0xc9, + 0x54, 0xc6, 0xc2, 0x24, 0xea, 0x79, 0x12, 0x48, 0x88, 0xac, 0x26, 0xc3, 0x2d, 0x72, 0xaf, 0xfd, + 0x02, 0x5c, 0x71, 0x17, 0x04, 0x73, 0xf0, 0xf1, 0x59, 0xb3, 0x30, 0x3a, 0x6b, 0xa2, 0x09, 0x46, + 0xc7, 0xaa, 0xf8, 0x21, 0x2a, 0x25, 0x11, 0xb8, 0x8d, 0x05, 0xa5, 0xbe, 0x4d, 0xe6, 0x9a, 0x39, + 0x19, 0x3b, 0x6c, 0x45, 0xe0, 0x3a, 0x75, 0xdd, 0xa1, 0x24, 0x6f, 0x54, 0xe9, 0x59, 0x1f, 0x0c, + 0xb4, 0x3c, 0xae, 0xda, 0xe3, 0x89, 0xc0, 0x4f, 0x67, 0xb2, 0x90, 0xf9, 0xb2, 0x48, 0xb6, 0x4a, + 0xb2, 0xa2, 0xfb, 0x54, 0x73, 0x64, 0x2a, 0xc7, 0x21, 0x2a, 0x73, 0x01, 0x83, 0xa4, 0xb1, 0xb0, + 0x5e, 0xbc, 0x30, 0xa6, 0xb9, 0x82, 0x38, 0xcb, 0x5a, 0xbc, 0x7c, 0x47, 0xca, 0xd0, 0x4c, 0xcd, + 0x7a, 0x3f, 0x1d, 0x43, 0xc6, 0xc3, 0x87, 0x68, 0x31, 0x0a, 0xfb, 0xdc, 0x3d, 0xd2, 0x21, 0x36, + 0xe7, 0xec, 0x74, 0xa0, 0x48, 0xce, 0x2f, 0xba, 0xcd, 0x62, 0x76, 0xa7, 0x5a, 0x0c, 0x3f, 0x46, + 0x95, 0x57, 0xd0, 0xf6, 0xc3, 0xb0, 0xa7, 0x57, 0x41, 0xe6, 0xd4, 0x7d, 0x94, 0xb1, 0x9c, 0x5f, + 0xb5, 0x70, 0x45, 0x03, 0x34, 0xd7, 0xb3, 0x5c, 0xa4, 0x9b, 0xe1, 0xff, 0x50, 0xb9, 0x0f, 0x43, + 0xe8, 0x2b, 0xeb, 0x35, 0xe7, 0xcf, 0x3c, 0xf2, 0x9e, 0x04, 0xbf, 0xe6, 0x07, 0x9a, 0x15, 0xe1, + 0x7f, 0xd1, 0x62, 0x22, 0x98, 0x07, 0xd9, 0x4c, 0x6b, 0xce, 0xaa, 0xb4, 0xdd, 0x52, 0x88, 0xac, + 0x55, 0x27, 0xaa, 0x4b, 0xac, 0x37, 0x06, 0x5a, 0x69, 0x41, 0x3c, 0xe4, 0x2e, 0x50, 0xe8, 0x42, + 0x0c, 0x81, 0x0b, 0xd8, 0x46, 0xb5, 0x80, 0x0d, 0x20, 0x89, 0x98, 0x0b, 0xba, 0xe7, 0x6f, 0xba, + 0x67, 0x6d, 0x3f, 0x7f, 0xa0, 0x93, 0x1a, 0xbc, 0x8e, 0x4a, 0xf2, 0xa2, 0x46, 0x50, 0x9b, 0xfc, + 0x2b, 0x59, 0x4b, 0xd5, 0x0b, 0xfe, 0x1b, 0x95, 0x22, 0x26, 0xfc, 0x46, 0x51, 0x55, 0x54, 0xe5, + 0xeb, 0x01, 0x13, 0x3e, 0x55, 0xa8, 0xf5, 0xc5, 0x40, 0x79, 0x7e, 0xdc, 0x45, 0x55, 0xe1, 0xc7, + 0xa1, 0x10, 0x7d, 0xd0, 0xab, 0xba, 0x7d, 0xb5, 0x91, 0x3e, 0xd0, 0xec, 0xdd, 0x30, 0xe8, 0x72, + 0xcf, 0xa9, 0xcb, 0x9f, 0x97, 0x63, 0x74, 0xac, 0x8d, 0x05, 0xaa, 0xbb, 0x7d, 0x0e, 0x81, 0xc8, + 0xea, 0xf4, 0xfa, 0x6e, 0x5d, 0xad, 0xd7, 0xee, 0x94, 0x82, 0xf3, 0xbb, 0xce, 0x5d, 0x9f, 0x46, + 0xe9, 0x77, 0x5d, 0xac, 0x77, 0x06, 0x5a, 0xbd, 0x84, 0x8b, 0xff, 0x42, 0xc5, 0x34, 0xce, 0x17, + 0x5c, 0x19, 0x9d, 0x35, 0x8b, 0x87, 0x74, 0x8f, 0x4a, 0x0c, 0x3f, 0x43, 0x95, 0x24, 0xdb, 0x90, + 0xf6, 0x78, 0x63, 0x4e, 0x8f, 0x17, 0xf7, 0xea, 0x2c, 0xc9, 0x7f, 0x96, 0xa3, 0xb9, 0x28, 0xde, + 0x40, 0x55, 0x97, 0x39, 0x69, 0xd0, 0xe9, 0x83, 0x5a, 0x4f, 0x3d, 0x1b, 0xd9, 0xee, 0x4e, 0x86, + 0xd1, 0xf1, 0xab, 0xd5, 0x42, 0x7f, 0x5c, 0x3a, 0x63, 0xe9, 0xfe, 0x65, 0x94, 0x28, 0xf7, 0xc5, + 0xcc, 0xfd, 0xfd, 0x83, 0x16, 0x95, 0x18, 0x6e, 0xa2, 0x72, 0x3b, 0x8d, 0x13, 0xa1, 0xbc, 0x17, + 0x9d, 0x9a, 0xfc, 0xb7, 0x8e, 0x04, 0x68, 0x86, 0x3b, 0xe4, 0xf8, 0xdc, 0x2c, 0x9c, 0x9c, 0x9b, + 0x85, 0xd3, 0x73, 0xb3, 0xf0, 0x7a, 0x64, 0x1a, 0xc7, 0x23, 0xd3, 0x38, 0x19, 0x99, 0xc6, 0xe9, + 0xc8, 0x34, 0x3e, 0x8d, 0x4c, 0xe3, 0xed, 0x67, 0xb3, 0xf0, 0xa4, 0x9a, 0xa7, 0xfa, 0x16, 0x00, + 0x00, 0xff, 0xff, 0x55, 0x1b, 0x03, 0x56, 0xaf, 0x06, 0x00, 0x00, +} diff --git a/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto new file mode 100644 index 000000000000..70801a6c515d --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto @@ -0,0 +1,158 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.auditregistration.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// AuditSink represents a cluster level audit sink +message AuditSink { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the audit configuration spec + optional AuditSinkSpec spec = 2; +} + +// AuditSinkList is a list of AuditSink items. +message AuditSinkList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of audit configurations. + repeated AuditSink items = 2; +} + +// AuditSinkSpec holds the spec for the audit sink +message AuditSinkSpec { + // Policy defines the policy for selecting which events should be sent to the webhook + // required + optional Policy policy = 1; + + // Webhook to send events + // required + optional Webhook webhook = 2; +} + +// Policy defines the configuration of how audit events are logged +message Policy { + // The Level that all requests are recorded at. + // available options: None, Metadata, Request, RequestResponse + // required + optional string level = 1; + + // Stages is a list of stages for which events are created. + // +optional + repeated string stages = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // `namespace` is the namespace of the service. + // Required + optional string namespace = 1; + + // `name` is the name of the service. + // Required + optional string name = 2; + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + optional string path = 3; +} + +// Webhook holds the configuration of the webhook +message Webhook { + // Throttle holds the options for throttling the webhook + // +optional + optional WebhookThrottleConfig throttle = 1; + + // ClientConfig holds the connection parameters for the webhook + // required + optional WebhookClientConfig clientConfig = 2; +} + +// WebhookClientConfig contains the information to make a connection with the webhook +message WebhookClientConfig { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 1; + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // Port 443 will be used if it is open, otherwise it is an error. + // + // +optional + optional ServiceReference service = 2; + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + optional bytes caBundle = 3; +} + +// WebhookThrottleConfig holds the configuration for throttling events +message WebhookThrottleConfig { + // ThrottleQPS maximum number of batches per second + // default 10 QPS + // +optional + optional int64 qps = 1; + + // ThrottleBurst is the maximum number of events sent at the same moment + // default 15 QPS + // +optional + optional int64 burst = 2; +} + diff --git a/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/register.go b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/register.go new file mode 100644 index 000000000000..d6271608f00c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "auditregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AuditSink{}, + &AuditSinkList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/types.go b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/types.go new file mode 100644 index 000000000000..af31cfe275a2 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/types.go @@ -0,0 +1,194 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Level defines the amount of information logged during auditing +type Level string + +// Valid audit levels +const ( + // LevelNone disables auditing + LevelNone Level = "None" + // LevelMetadata provides the basic level of auditing. + LevelMetadata Level = "Metadata" + // LevelRequest provides Metadata level of auditing, and additionally + // logs the request object (does not apply for non-resource requests). + LevelRequest Level = "Request" + // LevelRequestResponse provides Request level of auditing, and additionally + // logs the response object (does not apply for non-resource requests and watches). + LevelRequestResponse Level = "RequestResponse" +) + +// Stage defines the stages in request handling during which audit events may be generated. +type Stage string + +// Valid audit stages. +const ( + // The stage for events generated after the audit handler receives the request, but before it + // is delegated down the handler chain. + StageRequestReceived = "RequestReceived" + // The stage for events generated after the response headers are sent, but before the response body + // is sent. This stage is only generated for long-running requests (e.g. watch). + StageResponseStarted = "ResponseStarted" + // The stage for events generated after the response body has been completed, and no more bytes + // will be sent. + StageResponseComplete = "ResponseComplete" + // The stage for events generated when a panic occurred. + StagePanic = "Panic" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuditSink represents a cluster level audit sink +type AuditSink struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the audit configuration spec + Spec AuditSinkSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// AuditSinkSpec holds the spec for the audit sink +type AuditSinkSpec struct { + // Policy defines the policy for selecting which events should be sent to the webhook + // required + Policy Policy `json:"policy" protobuf:"bytes,1,opt,name=policy"` + + // Webhook to send events + // required + Webhook Webhook `json:"webhook" protobuf:"bytes,2,opt,name=webhook"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuditSinkList is a list of AuditSink items. +type AuditSinkList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of audit configurations. + Items []AuditSink `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Policy defines the configuration of how audit events are logged +type Policy struct { + // The Level that all requests are recorded at. + // available options: None, Metadata, Request, RequestResponse + // required + Level Level `json:"level" protobuf:"bytes,1,opt,name=level"` + + // Stages is a list of stages for which events are created. + // +optional + Stages []Stage `json:"stages" protobuf:"bytes,2,opt,name=stages"` +} + +// Webhook holds the configuration of the webhook +type Webhook struct { + // Throttle holds the options for throttling the webhook + // +optional + Throttle *WebhookThrottleConfig `json:"throttle,omitempty" protobuf:"bytes,1,opt,name=throttle"` + + // ClientConfig holds the connection parameters for the webhook + // required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` +} + +// WebhookThrottleConfig holds the configuration for throttling events +type WebhookThrottleConfig struct { + // ThrottleQPS maximum number of batches per second + // default 10 QPS + // +optional + QPS *int64 `json:"qps,omitempty" protobuf:"bytes,1,opt,name=qps"` + + // ThrottleBurst is the maximum number of events sent at the same moment + // default 15 QPS + // +optional + Burst *int64 `json:"burst,omitempty" protobuf:"bytes,2,opt,name=burst"` +} + +// WebhookClientConfig contains the information to make a connection with the webhook +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"` + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // Port 443 will be used if it is open, otherwise it is an error. + // + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,2,opt,name=service"` + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,3,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + + // `name` is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` +} diff --git a/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000000..edd608f3b26e --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,110 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AuditSink = map[string]string{ + "": "AuditSink represents a cluster level audit sink", + "spec": "Spec defines the audit configuration spec", +} + +func (AuditSink) SwaggerDoc() map[string]string { + return map_AuditSink +} + +var map_AuditSinkList = map[string]string{ + "": "AuditSinkList is a list of AuditSink items.", + "items": "List of audit configurations.", +} + +func (AuditSinkList) SwaggerDoc() map[string]string { + return map_AuditSinkList +} + +var map_AuditSinkSpec = map[string]string{ + "": "AuditSinkSpec holds the spec for the audit sink", + "policy": "Policy defines the policy for selecting which events should be sent to the webhook required", + "webhook": "Webhook to send events required", +} + +func (AuditSinkSpec) SwaggerDoc() map[string]string { + return map_AuditSinkSpec +} + +var map_Policy = map[string]string{ + "": "Policy defines the configuration of how audit events are logged", + "level": "The Level that all requests are recorded at. available options: None, Metadata, Request, RequestResponse required", + "stages": "Stages is a list of stages for which events are created.", +} + +func (Policy) SwaggerDoc() map[string]string { + return map_Policy +} + +var map_ServiceReference = map[string]string{ + "": "ServiceReference holds a reference to Service.legacy.k8s.io", + "namespace": "`namespace` is the namespace of the service. Required", + "name": "`name` is the name of the service. Required", + "path": "`path` is an optional URL path which will be sent in any request to this service.", +} + +func (ServiceReference) SwaggerDoc() map[string]string { + return map_ServiceReference +} + +var map_Webhook = map[string]string{ + "": "Webhook holds the configuration of the webhook", + "throttle": "Throttle holds the options for throttling the webhook", + "clientConfig": "ClientConfig holds the connection parameters for the webhook required", +} + +func (Webhook) SwaggerDoc() map[string]string { + return map_Webhook +} + +var map_WebhookClientConfig = map[string]string{ + "": "WebhookClientConfig contains the information to make a connection with the webhook", + "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", +} + +func (WebhookClientConfig) SwaggerDoc() map[string]string { + return map_WebhookClientConfig +} + +var map_WebhookThrottleConfig = map[string]string{ + "": "WebhookThrottleConfig holds the configuration for throttling events", + "qps": "ThrottleQPS maximum number of batches per second default 10 QPS", + "burst": "ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS", +} + +func (WebhookThrottleConfig) SwaggerDoc() map[string]string { + return map_WebhookThrottleConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..e71deffad371 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,224 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditSink) DeepCopyInto(out *AuditSink) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSink. +func (in *AuditSink) DeepCopy() *AuditSink { + if in == nil { + return nil + } + out := new(AuditSink) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuditSink) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditSinkList) DeepCopyInto(out *AuditSinkList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AuditSink, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkList. +func (in *AuditSinkList) DeepCopy() *AuditSinkList { + if in == nil { + return nil + } + out := new(AuditSinkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuditSinkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditSinkSpec) DeepCopyInto(out *AuditSinkSpec) { + *out = *in + in.Policy.DeepCopyInto(&out.Policy) + in.Webhook.DeepCopyInto(&out.Webhook) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkSpec. +func (in *AuditSinkSpec) DeepCopy() *AuditSinkSpec { + if in == nil { + return nil + } + out := new(AuditSinkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + if in.Stages != nil { + in, out := &in.Stages, &out.Stages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Webhook) DeepCopyInto(out *Webhook) { + *out = *in + if in.Throttle != nil { + in, out := &in.Throttle, &out.Throttle + *out = new(WebhookThrottleConfig) + (*in).DeepCopyInto(*out) + } + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. +func (in *Webhook) DeepCopy() *Webhook { + if in == nil { + return nil + } + out := new(Webhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookThrottleConfig) DeepCopyInto(out *WebhookThrottleConfig) { + *out = *in + if in.QPS != nil { + in, out := &in.QPS, &out.QPS + *out = new(int64) + **out = **in + } + if in.Burst != nil { + in, out := &in.Burst, &out.Burst + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookThrottleConfig. +func (in *WebhookThrottleConfig) DeepCopy() *WebhookThrottleConfig { + if in == nil { + return nil + } + out := new(WebhookThrottleConfig) + in.DeepCopyInto(out) + return out +} diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/doc.go index 55cffaa1a88c..ad5ba6432760 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true + package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.pb.go index 2ce2e2d78e00..4e7f28d8c974 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -356,6 +355,21 @@ func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) i += copy(dAtA[i:], m.Token) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } @@ -394,6 +408,21 @@ func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } @@ -469,24 +498,6 @@ func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -580,6 +591,12 @@ func (m *TokenReviewSpec) Size() (n int) { _ = l l = len(m.Token) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -591,6 +608,12 @@ func (m *TokenReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Error) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -698,6 +721,7 @@ func (this *TokenReviewSpec) String() string { } s := strings.Join([]string{`&TokenReviewSpec{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -710,6 +734,7 @@ func (this *TokenReviewStatus) String() string { `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -1569,6 +1594,35 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } m.Token = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1698,6 +1752,35 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1861,51 +1944,14 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1915,46 +1961,85 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -2087,61 +2172,62 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 892 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x8f, 0xdb, 0x44, - 0x14, 0x8e, 0xf3, 0x63, 0xb5, 0x99, 0x74, 0x97, 0xdd, 0x29, 0x95, 0xa2, 0x05, 0xec, 0x60, 0x24, - 0x14, 0x01, 0xb5, 0x9b, 0x08, 0x95, 0xaa, 0x48, 0x48, 0x6b, 0x36, 0x82, 0x08, 0x41, 0xab, 0x69, - 0x77, 0x41, 0x9c, 0x98, 0xd8, 0x6f, 0xb3, 0x26, 0x78, 0x6c, 0xec, 0x71, 0x68, 0x6e, 0xfd, 0x13, - 0x38, 0x82, 0xc4, 0x81, 0x3f, 0x02, 0x89, 0x23, 0xd7, 0x3d, 0x56, 0x9c, 0x7a, 0x40, 0x11, 0x6b, - 0xfe, 0x05, 0x4e, 0x9c, 0xd0, 0x8c, 0x67, 0xe3, 0xfc, 0xd8, 0x4d, 0x73, 0xea, 0x2d, 0xf3, 0xde, - 0xf7, 0xbe, 0x79, 0xef, 0x9b, 0x2f, 0xcf, 0xa8, 0x37, 0xba, 0x97, 0x58, 0x7e, 0x68, 0x8f, 0xd2, - 0x01, 0xc4, 0x0c, 0x38, 0x24, 0xf6, 0x18, 0x98, 0x17, 0xc6, 0xb6, 0x4a, 0xd0, 0xc8, 0xb7, 0x69, - 0xca, 0xcf, 0x80, 0x71, 0xdf, 0xa5, 0xdc, 0x0f, 0x99, 0x3d, 0xee, 0xd8, 0x43, 0x60, 0x10, 0x53, - 0x0e, 0x9e, 0x15, 0xc5, 0x21, 0x0f, 0xf1, 0xeb, 0x39, 0xda, 0xa2, 0x91, 0x6f, 0x2d, 0xa2, 0xad, - 0x71, 0xe7, 0xe0, 0xf6, 0xd0, 0xe7, 0x67, 0xe9, 0xc0, 0x72, 0xc3, 0xc0, 0x1e, 0x86, 0xc3, 0xd0, - 0x96, 0x45, 0x83, 0xf4, 0x54, 0x9e, 0xe4, 0x41, 0xfe, 0xca, 0xc9, 0x0e, 0xde, 0x2f, 0xae, 0x0e, - 0xa8, 0x7b, 0xe6, 0x33, 0x88, 0x27, 0x76, 0x34, 0x1a, 0x8a, 0x40, 0x62, 0x07, 0xc0, 0xe9, 0x15, - 0x2d, 0x1c, 0xd8, 0xd7, 0x55, 0xc5, 0x29, 0xe3, 0x7e, 0x00, 0x2b, 0x05, 0x77, 0x5f, 0x54, 0x90, - 0xb8, 0x67, 0x10, 0xd0, 0xe5, 0x3a, 0xf3, 0x4f, 0x0d, 0xbd, 0xea, 0x84, 0x29, 0xf3, 0x1e, 0x0c, - 0xbe, 0x05, 0x97, 0x13, 0x38, 0x85, 0x18, 0x98, 0x0b, 0xb8, 0x85, 0xaa, 0x23, 0x9f, 0x79, 0x4d, - 0xad, 0xa5, 0xb5, 0xeb, 0xce, 0x8d, 0xf3, 0xa9, 0x51, 0xca, 0xa6, 0x46, 0xf5, 0x33, 0x9f, 0x79, - 0x44, 0x66, 0x70, 0x17, 0x21, 0xfa, 0xb0, 0x7f, 0x02, 0x71, 0xe2, 0x87, 0xac, 0x59, 0x96, 0x38, - 0xac, 0x70, 0xe8, 0x70, 0x96, 0x21, 0x73, 0x28, 0xc1, 0xca, 0x68, 0x00, 0xcd, 0xca, 0x22, 0xeb, - 0x17, 0x34, 0x00, 0x22, 0x33, 0xd8, 0x41, 0x95, 0xb4, 0x7f, 0xd4, 0xac, 0x4a, 0xc0, 0x1d, 0x05, - 0xa8, 0x1c, 0xf7, 0x8f, 0xfe, 0x9b, 0x1a, 0x6f, 0x5e, 0x37, 0x24, 0x9f, 0x44, 0x90, 0x58, 0xc7, - 0xfd, 0x23, 0x22, 0x8a, 0xcd, 0x0f, 0x10, 0xea, 0x3d, 0xe1, 0x31, 0x3d, 0xa1, 0xdf, 0xa5, 0x80, - 0x0d, 0x54, 0xf3, 0x39, 0x04, 0x49, 0x53, 0x6b, 0x55, 0xda, 0x75, 0xa7, 0x9e, 0x4d, 0x8d, 0x5a, - 0x5f, 0x04, 0x48, 0x1e, 0xbf, 0xbf, 0xfd, 0xd3, 0xaf, 0x46, 0xe9, 0xe9, 0x5f, 0xad, 0x92, 0xf9, - 0x4b, 0x19, 0xdd, 0x78, 0x1c, 0x8e, 0x80, 0x11, 0xf8, 0x3e, 0x85, 0x84, 0xe3, 0x6f, 0xd0, 0xb6, - 0x78, 0x22, 0x8f, 0x72, 0x2a, 0x95, 0x68, 0x74, 0xef, 0x58, 0x85, 0x3b, 0x66, 0x4d, 0x58, 0xd1, - 0x68, 0x28, 0x02, 0x89, 0x25, 0xd0, 0xd6, 0xb8, 0x63, 0xe5, 0x72, 0x7e, 0x0e, 0x9c, 0x16, 0x9a, - 0x14, 0x31, 0x32, 0x63, 0xc5, 0x0f, 0x51, 0x35, 0x89, 0xc0, 0x95, 0xfa, 0x35, 0xba, 0x96, 0xb5, - 0xce, 0x7b, 0xd6, 0x7c, 0x6f, 0x8f, 0x22, 0x70, 0x0b, 0x05, 0xc5, 0x89, 0x48, 0x26, 0xfc, 0x15, - 0xda, 0x4a, 0x38, 0xe5, 0x69, 0x22, 0x55, 0x5e, 0xec, 0xf8, 0x45, 0x9c, 0xb2, 0xce, 0xd9, 0x55, - 0xac, 0x5b, 0xf9, 0x99, 0x28, 0x3e, 0xf3, 0x5f, 0x0d, 0xed, 0x2d, 0xb7, 0x80, 0xdf, 0x45, 0x75, - 0x9a, 0x7a, 0xbe, 0x30, 0xcd, 0xa5, 0xc4, 0x3b, 0xd9, 0xd4, 0xa8, 0x1f, 0x5e, 0x06, 0x49, 0x91, - 0xc7, 0x0c, 0xed, 0x0e, 0x16, 0xdc, 0xa6, 0x7a, 0xec, 0xae, 0xef, 0xf1, 0x2a, 0x87, 0x3a, 0x38, - 0x9b, 0x1a, 0xbb, 0x8b, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x46, 0xfb, 0xf0, 0x24, 0xf2, 0x63, 0xc9, - 0xf4, 0x08, 0xdc, 0x90, 0x79, 0x89, 0xf4, 0x56, 0xc5, 0xb9, 0x95, 0x4d, 0x8d, 0xfd, 0xde, 0x72, - 0x92, 0xac, 0xe2, 0xcd, 0xdf, 0x34, 0x84, 0x57, 0x55, 0xc2, 0x6f, 0xa1, 0x1a, 0x17, 0x51, 0xf5, - 0x17, 0xd9, 0x51, 0xa2, 0xd5, 0x72, 0x68, 0x9e, 0xc3, 0x13, 0x74, 0xb3, 0x20, 0x7c, 0xec, 0x07, - 0x90, 0x70, 0x1a, 0x44, 0xea, 0xb5, 0xdf, 0xd9, 0xcc, 0x4b, 0xa2, 0xcc, 0x79, 0x4d, 0xd1, 0xdf, - 0xec, 0xad, 0xd2, 0x91, 0xab, 0xee, 0x30, 0x7f, 0x2e, 0xa3, 0x86, 0x6a, 0x7b, 0xec, 0xc3, 0x0f, - 0x2f, 0xc1, 0xcb, 0x0f, 0x16, 0xbc, 0x7c, 0x7b, 0x23, 0xdf, 0x89, 0xd6, 0xae, 0xb5, 0xf2, 0x97, - 0x4b, 0x56, 0xb6, 0x37, 0xa7, 0x5c, 0xef, 0xe4, 0xbb, 0xe8, 0x95, 0xa5, 0xfb, 0x37, 0x7a, 0x4e, - 0xf3, 0x0f, 0x0d, 0xed, 0xaf, 0xdc, 0x82, 0x3f, 0x44, 0x3b, 0x73, 0xcd, 0x40, 0xbe, 0x34, 0xb7, - 0x9d, 0x5b, 0x8a, 0x62, 0xe7, 0x70, 0x3e, 0x49, 0x16, 0xb1, 0xf8, 0x53, 0x54, 0x4d, 0x13, 0x88, - 0x95, 0x68, 0x6f, 0xaf, 0x9f, 0xf0, 0x38, 0x81, 0xb8, 0xcf, 0x4e, 0xc3, 0x42, 0x2d, 0x11, 0x21, - 0x92, 0x41, 0x4c, 0x00, 0x71, 0x1c, 0xc6, 0x6a, 0xbb, 0xce, 0x26, 0xe8, 0x89, 0x20, 0xc9, 0x73, - 0xe6, 0xef, 0x65, 0xb4, 0x7d, 0xc9, 0x82, 0xdf, 0x43, 0xdb, 0xa2, 0x52, 0xae, 0xe4, 0x7c, 0xec, - 0x3d, 0x55, 0x24, 0x31, 0x22, 0x4e, 0x66, 0x08, 0xfc, 0x06, 0xaa, 0xa4, 0xbe, 0xa7, 0x36, 0x7d, - 0x63, 0x6e, 0x35, 0x13, 0x11, 0xc7, 0x26, 0xda, 0x1a, 0xc6, 0x61, 0x1a, 0x89, 0xc7, 0x12, 0x5b, - 0x00, 0x09, 0xdd, 0x3f, 0x91, 0x11, 0xa2, 0x32, 0xf8, 0x04, 0xd5, 0x40, 0x6c, 0xe6, 0x66, 0xb5, - 0x55, 0x69, 0x37, 0xba, 0x9d, 0xcd, 0xa6, 0xb5, 0xe4, 0x36, 0xef, 0x31, 0x1e, 0x4f, 0xe6, 0xa6, - 0x12, 0x31, 0x92, 0xd3, 0x1d, 0x0c, 0xd4, 0xc6, 0x97, 0x18, 0xbc, 0x87, 0x2a, 0x23, 0x98, 0xe4, - 0x13, 0x11, 0xf1, 0x13, 0x7f, 0x84, 0x6a, 0x63, 0xf1, 0x31, 0x50, 0x2a, 0xb7, 0xd7, 0xdf, 0x5b, - 0x7c, 0x3c, 0x48, 0x5e, 0x76, 0xbf, 0x7c, 0x4f, 0x73, 0xda, 0xe7, 0x17, 0x7a, 0xe9, 0xd9, 0x85, - 0x5e, 0x7a, 0x7e, 0xa1, 0x97, 0x9e, 0x66, 0xba, 0x76, 0x9e, 0xe9, 0xda, 0xb3, 0x4c, 0xd7, 0x9e, - 0x67, 0xba, 0xf6, 0x77, 0xa6, 0x6b, 0x3f, 0xfe, 0xa3, 0x97, 0xbe, 0x2e, 0x8f, 0x3b, 0xff, 0x07, - 0x00, 0x00, 0xff, 0xff, 0x5e, 0x8d, 0x94, 0x78, 0x88, 0x08, 0x00, 0x00, + // 900 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0xc0, 0x2e, 0x41, 0x42, + 0x15, 0xb0, 0xf6, 0x26, 0x42, 0xb0, 0x5a, 0x24, 0xa4, 0x9a, 0x46, 0x10, 0x21, 0xd8, 0xd5, 0xec, + 0xb6, 0x20, 0x4e, 0x4c, 0xec, 0xd7, 0xc4, 0x04, 0x8f, 0x8d, 0x3d, 0x0e, 0x9b, 0xdb, 0xfe, 0x09, + 0x1c, 0x41, 0xe2, 0xc0, 0x1f, 0x81, 0xc4, 0xbf, 0xd0, 0xe3, 0x8a, 0xd3, 0x1e, 0x50, 0x44, 0xcd, + 0x95, 0x23, 0x27, 0x4e, 0x68, 0xc6, 0xd3, 0x38, 0x4e, 0xda, 0x34, 0x27, 0x6e, 0x9e, 0xf7, 0xbe, + 0xf7, 0xbd, 0x37, 0xdf, 0x7c, 0x9e, 0x41, 0xbd, 0xf1, 0xfd, 0xd8, 0xf4, 0x02, 0x6b, 0x9c, 0x0c, + 0x20, 0x62, 0xc0, 0x21, 0xb6, 0x26, 0xc0, 0xdc, 0x20, 0xb2, 0x54, 0x82, 0x86, 0x9e, 0x45, 0x13, + 0x3e, 0x02, 0xc6, 0x3d, 0x87, 0x72, 0x2f, 0x60, 0xd6, 0xa4, 0x63, 0x0d, 0x81, 0x41, 0x44, 0x39, + 0xb8, 0x66, 0x18, 0x05, 0x3c, 0xc0, 0xaf, 0x66, 0x68, 0x93, 0x86, 0x9e, 0x59, 0x44, 0x9b, 0x93, + 0xce, 0xfe, 0xdd, 0xa1, 0xc7, 0x47, 0xc9, 0xc0, 0x74, 0x02, 0xdf, 0x1a, 0x06, 0xc3, 0xc0, 0x92, + 0x45, 0x83, 0xe4, 0x4c, 0xae, 0xe4, 0x42, 0x7e, 0x65, 0x64, 0xfb, 0xef, 0xe6, 0xad, 0x7d, 0xea, + 0x8c, 0x3c, 0x06, 0xd1, 0xd4, 0x0a, 0xc7, 0x43, 0x11, 0x88, 0x2d, 0x1f, 0x38, 0xbd, 0x62, 0x84, + 0x7d, 0xeb, 0xba, 0xaa, 0x28, 0x61, 0xdc, 0xf3, 0x61, 0xa5, 0xe0, 0xbd, 0x9b, 0x0a, 0x62, 0x67, + 0x04, 0x3e, 0x5d, 0xae, 0x6b, 0xff, 0xae, 0xa1, 0x97, 0xed, 0x20, 0x61, 0xee, 0xc3, 0xc1, 0x37, + 0xe0, 0x70, 0x02, 0x67, 0x10, 0x01, 0x73, 0x00, 0x1f, 0xa0, 0xea, 0xd8, 0x63, 0x6e, 0x4b, 0x3b, + 0xd0, 0x0e, 0x1b, 0xf6, 0xad, 0xf3, 0x99, 0x51, 0x4a, 0x67, 0x46, 0xf5, 0x53, 0x8f, 0xb9, 0x44, + 0x66, 0x70, 0x17, 0x21, 0xfa, 0xa8, 0x7f, 0x0a, 0x51, 0xec, 0x05, 0xac, 0x55, 0x96, 0x38, 0xac, + 0x70, 0xe8, 0x68, 0x9e, 0x21, 0x0b, 0x28, 0xc1, 0xca, 0xa8, 0x0f, 0xad, 0x4a, 0x91, 0xf5, 0x73, + 0xea, 0x03, 0x91, 0x19, 0x6c, 0xa3, 0x4a, 0xd2, 0x3f, 0x6e, 0x55, 0x25, 0xe0, 0x9e, 0x02, 0x54, + 0x4e, 0xfa, 0xc7, 0xff, 0xce, 0x8c, 0xd7, 0xaf, 0xdb, 0x24, 0x9f, 0x86, 0x10, 0x9b, 0x27, 0xfd, + 0x63, 0x22, 0x8a, 0xdb, 0xef, 0x23, 0xd4, 0x7b, 0xca, 0x23, 0x7a, 0x4a, 0xbf, 0x4d, 0x00, 0x1b, + 0xa8, 0xe6, 0x71, 0xf0, 0xe3, 0x96, 0x76, 0x50, 0x39, 0x6c, 0xd8, 0x8d, 0x74, 0x66, 0xd4, 0xfa, + 0x22, 0x40, 0xb2, 0xf8, 0x83, 0xfa, 0x8f, 0xbf, 0x18, 0xa5, 0x67, 0x7f, 0x1c, 0x94, 0xda, 0x3f, + 0x97, 0xd1, 0xad, 0x27, 0xc1, 0x18, 0x18, 0x81, 0xef, 0x12, 0x88, 0x39, 0xfe, 0x1a, 0xd5, 0xc5, + 0x11, 0xb9, 0x94, 0x53, 0xa9, 0x44, 0xb3, 0x7b, 0xcf, 0xcc, 0xdd, 0x31, 0x1f, 0xc2, 0x0c, 0xc7, + 0x43, 0x11, 0x88, 0x4d, 0x81, 0x36, 0x27, 0x1d, 0x33, 0x93, 0xf3, 0x33, 0xe0, 0x34, 0xd7, 0x24, + 0x8f, 0x91, 0x39, 0x2b, 0x7e, 0x84, 0xaa, 0x71, 0x08, 0x8e, 0xd4, 0xaf, 0xd9, 0x35, 0xcd, 0x75, + 0xde, 0x33, 0x17, 0x67, 0x7b, 0x1c, 0x82, 0x93, 0x2b, 0x28, 0x56, 0x44, 0x32, 0xe1, 0x2f, 0xd1, + 0x56, 0xcc, 0x29, 0x4f, 0x62, 0xa9, 0x72, 0x71, 0xe2, 0x9b, 0x38, 0x65, 0x9d, 0xbd, 0xa3, 0x58, + 0xb7, 0xb2, 0x35, 0x51, 0x7c, 0xed, 0x7f, 0x34, 0xb4, 0xbb, 0x3c, 0x02, 0x7e, 0x1b, 0x35, 0x68, + 0xe2, 0x7a, 0xc2, 0x34, 0x97, 0x12, 0x6f, 0xa7, 0x33, 0xa3, 0x71, 0x74, 0x19, 0x24, 0x79, 0x1e, + 0x33, 0xb4, 0x33, 0x28, 0xb8, 0x4d, 0xcd, 0xd8, 0x5d, 0x3f, 0xe3, 0x55, 0x0e, 0xb5, 0x71, 0x3a, + 0x33, 0x76, 0x8a, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x42, 0x7b, 0xf0, 0x34, 0xf4, 0x22, 0xc9, 0xf4, + 0x18, 0x9c, 0x80, 0xb9, 0xb1, 0xf4, 0x56, 0xc5, 0xbe, 0x93, 0xce, 0x8c, 0xbd, 0xde, 0x72, 0x92, + 0xac, 0xe2, 0xdb, 0xbf, 0x6a, 0x08, 0xaf, 0xaa, 0x84, 0xdf, 0x40, 0x35, 0x2e, 0xa2, 0xea, 0x17, + 0xd9, 0x56, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x53, 0x74, 0x3b, 0x27, 0x7c, 0xe2, 0xf9, 0x10, + 0x73, 0xea, 0x87, 0xea, 0xb4, 0xdf, 0xda, 0xcc, 0x4b, 0xa2, 0xcc, 0x7e, 0x45, 0xd1, 0xdf, 0xee, + 0xad, 0xd2, 0x91, 0xab, 0x7a, 0xb4, 0x7f, 0x2a, 0xa3, 0xa6, 0x1a, 0x7b, 0xe2, 0xc1, 0xf7, 0xff, + 0x83, 0x97, 0x1f, 0x16, 0xbc, 0x7c, 0x77, 0x23, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x17, 0x4b, + 0x56, 0xb6, 0x36, 0xa7, 0x5c, 0xef, 0x64, 0x07, 0xbd, 0xb4, 0xd4, 0x7f, 0xb3, 0xe3, 0x2c, 0x98, + 0xbd, 0xbc, 0xde, 0xec, 0xed, 0xbf, 0x35, 0xb4, 0xb7, 0x32, 0x12, 0xfe, 0x00, 0x6d, 0x2f, 0x4c, + 0x0e, 0xd9, 0x0d, 0x5b, 0xb7, 0xef, 0xa8, 0x7e, 0xdb, 0x47, 0x8b, 0x49, 0x52, 0xc4, 0xe2, 0x4f, + 0x50, 0x35, 0x89, 0x21, 0x52, 0x0a, 0xbf, 0xb9, 0x5e, 0x8e, 0x93, 0x18, 0xa2, 0x3e, 0x3b, 0x0b, + 0x72, 0x69, 0x45, 0x84, 0x48, 0x06, 0xb1, 0x5d, 0x88, 0xa2, 0x20, 0x52, 0x57, 0xf1, 0x7c, 0xbb, + 0x3d, 0x11, 0x24, 0x59, 0xae, 0xb8, 0xdd, 0xea, 0x0d, 0xdb, 0xfd, 0xad, 0x8c, 0xea, 0x97, 0x2d, + 0xf1, 0x3b, 0xa8, 0x2e, 0xda, 0xc8, 0xcb, 0x3e, 0x13, 0x74, 0x57, 0x75, 0x90, 0x18, 0x11, 0x27, + 0x73, 0x04, 0x7e, 0x0d, 0x55, 0x12, 0xcf, 0x55, 0x6f, 0x48, 0x73, 0xe1, 0xd2, 0x27, 0x22, 0x8e, + 0xdb, 0x68, 0x6b, 0x18, 0x05, 0x49, 0x28, 0x6c, 0x20, 0x66, 0x40, 0xe2, 0x44, 0x3f, 0x96, 0x11, + 0xa2, 0x32, 0xf8, 0x14, 0xd5, 0x40, 0xdc, 0xf9, 0x72, 0xcc, 0x66, 0xb7, 0xb3, 0x99, 0x34, 0xa6, + 0x7c, 0x27, 0x7a, 0x8c, 0x47, 0xd3, 0x05, 0x09, 0x44, 0x8c, 0x64, 0x74, 0xfb, 0x03, 0xf5, 0x96, + 0x48, 0x0c, 0xde, 0x45, 0x95, 0x31, 0x4c, 0xb3, 0x1d, 0x11, 0xf1, 0x89, 0x3f, 0x44, 0xb5, 0x89, + 0x78, 0x66, 0xd4, 0x91, 0x1c, 0xae, 0xef, 0x9b, 0x3f, 0x4b, 0x24, 0x2b, 0x7b, 0x50, 0xbe, 0xaf, + 0xd9, 0x87, 0xe7, 0x17, 0x7a, 0xe9, 0xf9, 0x85, 0x5e, 0x7a, 0x71, 0xa1, 0x97, 0x9e, 0xa5, 0xba, + 0x76, 0x9e, 0xea, 0xda, 0xf3, 0x54, 0xd7, 0x5e, 0xa4, 0xba, 0xf6, 0x67, 0xaa, 0x6b, 0x3f, 0xfc, + 0xa5, 0x97, 0xbe, 0x2a, 0x4f, 0x3a, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x04, 0x81, 0x6f, + 0xe2, 0x08, 0x00, 0x00, } diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.proto index 10c792171d2a..b69636a814dd 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/generated.proto @@ -118,6 +118,14 @@ message TokenReviewSpec { // Token is the opaque bearer token. // +optional optional string token = 1; + + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + repeated string audiences = 2; } // TokenReviewStatus is the result of the token authentication request. @@ -130,6 +138,18 @@ message TokenReviewStatus { // +optional optional UserInfo user = 2; + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + repeated string audiences = 4; + // Error indicates that the token couldn't be checked // +optional optional string error = 3; diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types.go index 723457a3dd21..d348c6fd4054 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types.go @@ -64,6 +64,13 @@ type TokenReviewSpec struct { // Token is the opaque bearer token. // +optional Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` } // TokenReviewStatus is the result of the token authentication request. @@ -74,6 +81,17 @@ type TokenReviewStatus struct { // User is the UserInfo associated with the provided token. // +optional User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` // Error indicates that the token couldn't be checked // +optional Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go index 6632a5dd5b20..f2c9b95c71f1 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -79,8 +79,9 @@ func (TokenReview) SwaggerDoc() map[string]string { } var map_TokenReviewSpec = map[string]string{ - "": "TokenReviewSpec is a description of the token authentication request.", - "token": "Token is the opaque bearer token.", + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", + "audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.", } func (TokenReviewSpec) SwaggerDoc() map[string]string { @@ -91,6 +92,7 @@ var map_TokenReviewStatus = map[string]string{ "": "TokenReviewStatus is the result of the token authentication request.", "authenticated": "Authenticated indicates that the token was associated with a known user.", "user": "User is the UserInfo associated with the provided token.", + "audiences": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.", "error": "Error indicates that the token couldn't be checked", } diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go index f36c253b2e75..aca99c42b765 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -141,7 +141,7 @@ func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } @@ -167,6 +167,11 @@ func (in *TokenReview) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -184,6 +189,11 @@ func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { *out = *in in.User.DeepCopyInto(&out.User) + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/doc.go index 0efef36703d5..2f824e726884 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 8503d212baae..5f34e76a9c3e 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -176,6 +175,21 @@ func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) i += copy(dAtA[i:], m.Token) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } @@ -214,6 +228,21 @@ func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } @@ -289,24 +318,6 @@ func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -345,6 +356,12 @@ func (m *TokenReviewSpec) Size() (n int) { _ = l l = len(m.Token) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -356,6 +373,12 @@ func (m *TokenReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Error) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -415,6 +438,7 @@ func (this *TokenReviewSpec) String() string { } s := strings.Join([]string{`&TokenReviewSpec{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -427,6 +451,7 @@ func (this *TokenReviewStatus) String() string { `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -739,6 +764,35 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } m.Token = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -868,6 +922,35 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1031,51 +1114,14 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1085,46 +1131,85 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -1257,45 +1342,47 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 635 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcf, 0x4f, 0xd4, 0x40, - 0x14, 0x6e, 0xf7, 0x07, 0xee, 0xce, 0x8a, 0xe2, 0x24, 0x26, 0x9b, 0x4d, 0xec, 0xae, 0xeb, 0x85, - 0x44, 0x99, 0x0a, 0x21, 0x48, 0xf0, 0x64, 0x95, 0x18, 0x4c, 0x88, 0xc9, 0x08, 0x1e, 0xd4, 0x83, - 0xb3, 0xdd, 0x47, 0xb7, 0xae, 0xed, 0x34, 0xd3, 0x69, 0x95, 0x1b, 0x7f, 0x82, 0x47, 0x8f, 0x26, - 0xfe, 0x25, 0x26, 0x1e, 0x38, 0x72, 0xe4, 0x60, 0x88, 0xd4, 0x7f, 0xc4, 0xcc, 0x74, 0x64, 0x17, - 0x88, 0x01, 0x6e, 0xf3, 0xbe, 0xf7, 0xbe, 0x6f, 0xde, 0xf7, 0x66, 0x1e, 0x7a, 0x31, 0x5e, 0x4d, - 0x49, 0xc8, 0xdd, 0x71, 0x36, 0x00, 0x11, 0x83, 0x84, 0xd4, 0xcd, 0x21, 0x1e, 0x72, 0xe1, 0x9a, - 0x04, 0x4b, 0x42, 0x97, 0x65, 0x72, 0x04, 0xb1, 0x0c, 0x7d, 0x26, 0x43, 0x1e, 0xbb, 0xf9, 0xe2, - 0x00, 0x24, 0x5b, 0x74, 0x03, 0x88, 0x41, 0x30, 0x09, 0x43, 0x92, 0x08, 0x2e, 0x39, 0xbe, 0x5b, - 0x52, 0x08, 0x4b, 0x42, 0x72, 0x9a, 0x42, 0x0c, 0xa5, 0xb3, 0x10, 0x84, 0x72, 0x94, 0x0d, 0x88, - 0xcf, 0x23, 0x37, 0xe0, 0x01, 0x77, 0x35, 0x73, 0x90, 0xed, 0xe8, 0x48, 0x07, 0xfa, 0x54, 0x2a, - 0x76, 0x96, 0x27, 0x4d, 0x44, 0xcc, 0x1f, 0x85, 0x31, 0x88, 0x5d, 0x37, 0x19, 0x07, 0x0a, 0x48, - 0xdd, 0x08, 0x24, 0x73, 0xf3, 0x73, 0x7d, 0x74, 0xdc, 0xff, 0xb1, 0x44, 0x16, 0xcb, 0x30, 0x82, - 0x73, 0x84, 0x95, 0x8b, 0x08, 0xa9, 0x3f, 0x82, 0x88, 0x9d, 0xe5, 0xf5, 0x1f, 0x21, 0xb4, 0xfe, - 0x59, 0x0a, 0xf6, 0x9a, 0x7d, 0xcc, 0x00, 0x77, 0x51, 0x3d, 0x94, 0x10, 0xa5, 0x6d, 0xbb, 0x57, - 0x9d, 0x6f, 0x7a, 0xcd, 0xe2, 0xa8, 0x5b, 0xdf, 0x50, 0x00, 0x2d, 0xf1, 0xb5, 0xc6, 0xd7, 0x6f, - 0x5d, 0x6b, 0xef, 0x57, 0xcf, 0xea, 0x7f, 0xaf, 0xa0, 0xd6, 0x16, 0x1f, 0x43, 0x4c, 0x21, 0x0f, - 0xe1, 0x13, 0x7e, 0x8f, 0x1a, 0xca, 0xcc, 0x90, 0x49, 0xd6, 0xb6, 0x7b, 0xf6, 0x7c, 0x6b, 0xe9, - 0x21, 0x99, 0x0c, 0xf3, 0xa4, 0x27, 0x92, 0x8c, 0x03, 0x05, 0xa4, 0x44, 0x55, 0x93, 0x7c, 0x91, - 0xbc, 0x1c, 0x7c, 0x00, 0x5f, 0x6e, 0x82, 0x64, 0x1e, 0xde, 0x3f, 0xea, 0x5a, 0xc5, 0x51, 0x17, - 0x4d, 0x30, 0x7a, 0xa2, 0x8a, 0xb7, 0x50, 0x2d, 0x4d, 0xc0, 0x6f, 0x57, 0xb4, 0xfa, 0x12, 0xb9, - 0xf0, 0xa9, 0xc8, 0x54, 0x7f, 0xaf, 0x12, 0xf0, 0xbd, 0xeb, 0x46, 0xbf, 0xa6, 0x22, 0xaa, 0xd5, - 0xf0, 0x3b, 0x34, 0x93, 0x4a, 0x26, 0xb3, 0xb4, 0x5d, 0xd5, 0xba, 0xcb, 0x57, 0xd4, 0xd5, 0x5c, - 0xef, 0x86, 0x51, 0x9e, 0x29, 0x63, 0x6a, 0x34, 0xfb, 0x2b, 0xe8, 0xe6, 0x99, 0x26, 0xf0, 0x3d, - 0x54, 0x97, 0x0a, 0xd2, 0x53, 0x6a, 0x7a, 0xb3, 0x86, 0x59, 0x2f, 0xeb, 0xca, 0x5c, 0xff, 0xa7, - 0x8d, 0x6e, 0x9d, 0xbb, 0x05, 0x3f, 0x46, 0xb3, 0x53, 0x1d, 0xc1, 0x50, 0x4b, 0x34, 0xbc, 0xdb, - 0x46, 0x62, 0xf6, 0xc9, 0x74, 0x92, 0x9e, 0xae, 0xc5, 0x9b, 0xa8, 0x96, 0xa5, 0x20, 0xcc, 0xf8, - 0xee, 0x5f, 0xc2, 0xe6, 0x76, 0x0a, 0x62, 0x23, 0xde, 0xe1, 0x93, 0xb9, 0x29, 0x84, 0x6a, 0x19, - 0x65, 0x03, 0x84, 0xe0, 0x42, 0x8f, 0x6d, 0xca, 0xc6, 0xba, 0x02, 0x69, 0x99, 0xeb, 0xff, 0xa8, - 0xa0, 0xc6, 0x3f, 0x15, 0xfc, 0x00, 0x35, 0x14, 0x33, 0x66, 0x11, 0x18, 0xef, 0x73, 0x86, 0xa4, - 0x6b, 0x14, 0x4e, 0x4f, 0x2a, 0xf0, 0x1d, 0x54, 0xcd, 0xc2, 0xa1, 0xee, 0xb6, 0xe9, 0xb5, 0x4c, - 0x61, 0x75, 0x7b, 0xe3, 0x19, 0x55, 0x38, 0xee, 0xa3, 0x99, 0x40, 0xf0, 0x2c, 0x51, 0xcf, 0xa6, - 0xbe, 0x2a, 0x52, 0xc3, 0x7f, 0xae, 0x11, 0x6a, 0x32, 0xf8, 0x2d, 0xaa, 0x83, 0xfa, 0xdb, 0xed, - 0x5a, 0xaf, 0x3a, 0xdf, 0x5a, 0x5a, 0xb9, 0x82, 0x65, 0xa2, 0x97, 0x62, 0x3d, 0x96, 0x62, 0x77, - 0xca, 0x9a, 0xc2, 0x68, 0xa9, 0xd9, 0x09, 0xcc, 0xe2, 0xe8, 0x1a, 0x3c, 0x87, 0xaa, 0x63, 0xd8, - 0x2d, 0x6d, 0x51, 0x75, 0xc4, 0x4f, 0x51, 0x3d, 0x57, 0x3b, 0x65, 0xe6, 0xbd, 0x70, 0x89, 0xcb, - 0x27, 0x8b, 0x48, 0x4b, 0xee, 0x5a, 0x65, 0xd5, 0xf6, 0x16, 0xf6, 0x8f, 0x1d, 0xeb, 0xe0, 0xd8, - 0xb1, 0x0e, 0x8f, 0x1d, 0x6b, 0xaf, 0x70, 0xec, 0xfd, 0xc2, 0xb1, 0x0f, 0x0a, 0xc7, 0x3e, 0x2c, - 0x1c, 0xfb, 0x77, 0xe1, 0xd8, 0x5f, 0xfe, 0x38, 0xd6, 0x9b, 0x6b, 0x46, 0xe4, 0x6f, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x39, 0x00, 0xe7, 0xfa, 0x0e, 0x05, 0x00, 0x00, + // 663 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x4e, 0x14, 0x4d, + 0x14, 0xed, 0x9e, 0x1f, 0xbe, 0x99, 0x9a, 0x6f, 0x14, 0x2b, 0x31, 0x99, 0x4c, 0x62, 0x0f, 0x8e, + 0x1b, 0x12, 0xa4, 0x5a, 0x08, 0x41, 0x82, 0x2b, 0x5a, 0x89, 0xc1, 0x84, 0x98, 0x94, 0xe0, 0x42, + 0x5d, 0x58, 0xd3, 0x73, 0xe9, 0x69, 0xc7, 0xfe, 0x49, 0x55, 0xf5, 0x28, 0x3b, 0x1e, 0xc1, 0xa5, + 0x4b, 0x13, 0x9f, 0xc4, 0x1d, 0x4b, 0x96, 0x2c, 0xcc, 0x44, 0xda, 0x27, 0xf0, 0x0d, 0x4c, 0x55, + 0x17, 0xcc, 0x00, 0x31, 0xc0, 0xae, 0xeb, 0xdc, 0x7b, 0xce, 0x3d, 0xf7, 0x54, 0x17, 0x7a, 0x31, + 0x5c, 0x13, 0x24, 0x4c, 0xdc, 0x61, 0xd6, 0x03, 0x1e, 0x83, 0x04, 0xe1, 0x8e, 0x20, 0xee, 0x27, + 0xdc, 0x35, 0x05, 0x96, 0x86, 0x2e, 0xcb, 0xe4, 0x00, 0x62, 0x19, 0xfa, 0x4c, 0x86, 0x49, 0xec, + 0x8e, 0x96, 0x7a, 0x20, 0xd9, 0x92, 0x1b, 0x40, 0x0c, 0x9c, 0x49, 0xe8, 0x93, 0x94, 0x27, 0x32, + 0xc1, 0xf7, 0x0b, 0x0a, 0x61, 0x69, 0x48, 0xce, 0x53, 0x88, 0xa1, 0xb4, 0x17, 0x83, 0x50, 0x0e, + 0xb2, 0x1e, 0xf1, 0x93, 0xc8, 0x0d, 0x92, 0x20, 0x71, 0x35, 0xb3, 0x97, 0xed, 0xe9, 0x93, 0x3e, + 0xe8, 0xaf, 0x42, 0xb1, 0xbd, 0x32, 0x31, 0x11, 0x31, 0x7f, 0x10, 0xc6, 0xc0, 0xf7, 0xdd, 0x74, + 0x18, 0x28, 0x40, 0xb8, 0x11, 0x48, 0xe6, 0x8e, 0x2e, 0xf9, 0x68, 0xbb, 0xff, 0x62, 0xf1, 0x2c, + 0x96, 0x61, 0x04, 0x97, 0x08, 0xab, 0x57, 0x11, 0x84, 0x3f, 0x80, 0x88, 0x5d, 0xe4, 0x75, 0x1f, + 0x23, 0xb4, 0xf9, 0x59, 0x72, 0xf6, 0x9a, 0x7d, 0xcc, 0x00, 0x77, 0x50, 0x35, 0x94, 0x10, 0x89, + 0x96, 0x3d, 0x57, 0x9e, 0xaf, 0x7b, 0xf5, 0x7c, 0xdc, 0xa9, 0x6e, 0x29, 0x80, 0x16, 0xf8, 0x7a, + 0xed, 0xeb, 0xb7, 0x8e, 0x75, 0xf0, 0x73, 0xce, 0xea, 0x7e, 0x2f, 0xa1, 0xc6, 0x4e, 0x32, 0x84, + 0x98, 0xc2, 0x28, 0x84, 0x4f, 0xf8, 0x3d, 0xaa, 0xa9, 0x65, 0xfa, 0x4c, 0xb2, 0x96, 0x3d, 0x67, + 0xcf, 0x37, 0x96, 0x1f, 0x91, 0x49, 0x98, 0x67, 0x9e, 0x48, 0x3a, 0x0c, 0x14, 0x20, 0x88, 0xea, + 0x26, 0xa3, 0x25, 0xf2, 0xb2, 0xf7, 0x01, 0x7c, 0xb9, 0x0d, 0x92, 0x79, 0xf8, 0x70, 0xdc, 0xb1, + 0xf2, 0x71, 0x07, 0x4d, 0x30, 0x7a, 0xa6, 0x8a, 0x77, 0x50, 0x45, 0xa4, 0xe0, 0xb7, 0x4a, 0x5a, + 0x7d, 0x99, 0x5c, 0x79, 0x55, 0x64, 0xca, 0xdf, 0xab, 0x14, 0x7c, 0xef, 0x7f, 0xa3, 0x5f, 0x51, + 0x27, 0xaa, 0xd5, 0xf0, 0x3b, 0x34, 0x23, 0x24, 0x93, 0x99, 0x68, 0x95, 0xb5, 0xee, 0xca, 0x0d, + 0x75, 0x35, 0xd7, 0xbb, 0x65, 0x94, 0x67, 0x8a, 0x33, 0x35, 0x9a, 0x5d, 0x1f, 0xdd, 0xbe, 0x60, + 0x02, 0x3f, 0x40, 0x55, 0xa9, 0x20, 0x9d, 0x52, 0xdd, 0x6b, 0x1a, 0x66, 0xb5, 0xe8, 0x2b, 0x6a, + 0x78, 0x01, 0xd5, 0x59, 0xd6, 0x0f, 0x21, 0xf6, 0x41, 0xb4, 0x4a, 0xfa, 0x32, 0x9a, 0xf9, 0xb8, + 0x53, 0xdf, 0x38, 0x05, 0xe9, 0xa4, 0xde, 0xfd, 0x63, 0xa3, 0x3b, 0x97, 0x2c, 0xe1, 0x27, 0xa8, + 0x39, 0x65, 0x1f, 0xfa, 0x7a, 0x5e, 0xcd, 0xbb, 0x6b, 0xe6, 0x35, 0x37, 0xa6, 0x8b, 0xf4, 0x7c, + 0x2f, 0xde, 0x46, 0x95, 0x4c, 0x00, 0x37, 0x59, 0x2f, 0x5c, 0x23, 0x93, 0x5d, 0x01, 0x7c, 0x2b, + 0xde, 0x4b, 0x26, 0x21, 0x2b, 0x84, 0x6a, 0x19, 0xb5, 0x33, 0x70, 0x9e, 0x70, 0x9d, 0xf1, 0xd4, + 0xce, 0x9b, 0x0a, 0xa4, 0x45, 0xed, 0xfc, 0xce, 0x95, 0x2b, 0x76, 0xfe, 0x51, 0x42, 0xb5, 0xd3, + 0x91, 0xf8, 0x21, 0xaa, 0xa9, 0x31, 0x31, 0x8b, 0xc0, 0xa4, 0x3a, 0x6b, 0x26, 0xe8, 0x1e, 0x85, + 0xd3, 0xb3, 0x0e, 0x7c, 0x0f, 0x95, 0xb3, 0xb0, 0xaf, 0x57, 0xab, 0x7b, 0x0d, 0xd3, 0x58, 0xde, + 0xdd, 0x7a, 0x46, 0x15, 0x8e, 0xbb, 0x68, 0x26, 0xe0, 0x49, 0x96, 0xaa, 0x1f, 0x42, 0x79, 0x40, + 0xea, 0x5a, 0x9f, 0x6b, 0x84, 0x9a, 0x0a, 0x7e, 0x8b, 0xaa, 0xa0, 0x5e, 0x8d, 0xb6, 0xd9, 0x58, + 0x5e, 0xbd, 0x41, 0x3e, 0x44, 0x3f, 0xb7, 0xcd, 0x58, 0xf2, 0xfd, 0xa9, 0x1c, 0x14, 0x46, 0x0b, + 0xcd, 0x76, 0x60, 0x9e, 0xa4, 0xee, 0xc1, 0xb3, 0xa8, 0x3c, 0x84, 0xfd, 0x62, 0x2d, 0xaa, 0x3e, + 0xf1, 0x53, 0x54, 0x1d, 0xa9, 0xd7, 0x6a, 0x2e, 0x67, 0xf1, 0x1a, 0xc3, 0x27, 0x4f, 0x9c, 0x16, + 0xdc, 0xf5, 0xd2, 0x9a, 0xed, 0x2d, 0x1e, 0x9e, 0x38, 0xd6, 0xd1, 0x89, 0x63, 0x1d, 0x9f, 0x38, + 0xd6, 0x41, 0xee, 0xd8, 0x87, 0xb9, 0x63, 0x1f, 0xe5, 0x8e, 0x7d, 0x9c, 0x3b, 0xf6, 0xaf, 0xdc, + 0xb1, 0xbf, 0xfc, 0x76, 0xac, 0x37, 0xff, 0x19, 0x91, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf7, + 0xd6, 0x32, 0x28, 0x68, 0x05, 0x00, 0x00, } diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/generated.proto index a057bc591cf3..caf2a6a53af4 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -57,6 +57,14 @@ message TokenReviewSpec { // Token is the opaque bearer token. // +optional optional string token = 1; + + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + repeated string audiences = 2; } // TokenReviewStatus is the result of the token authentication request. @@ -69,6 +77,18 @@ message TokenReviewStatus { // +optional optional UserInfo user = 2; + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + repeated string audiences = 4; + // Error indicates that the token couldn't be checked // +optional optional string error = 3; diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types.go index a90949dc37de..0b6cba822a26 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -48,6 +48,13 @@ type TokenReviewSpec struct { // Token is the opaque bearer token. // +optional Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` } // TokenReviewStatus is the result of the token authentication request. @@ -58,6 +65,17 @@ type TokenReviewStatus struct { // User is the UserInfo associated with the provided token. // +optional User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` // Error indicates that the token couldn't be checked // +optional Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go index 968999d1ebe6..8c9acfb5b249 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go @@ -38,8 +38,9 @@ func (TokenReview) SwaggerDoc() map[string]string { } var map_TokenReviewSpec = map[string]string{ - "": "TokenReviewSpec is a description of the token authentication request.", - "token": "Token is the opaque bearer token.", + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", + "audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.", } func (TokenReviewSpec) SwaggerDoc() map[string]string { @@ -50,6 +51,7 @@ var map_TokenReviewStatus = map[string]string{ "": "TokenReviewStatus is the result of the token authentication request.", "authenticated": "Authenticated indicates that the token was associated with a known user.", "user": "User is the UserInfo associated with the provided token.", + "audiences": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.", "error": "Error indicates that the token couldn't be checked", } diff --git a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index 3a5f6d5a9367..a5d82a8100a5 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -49,7 +49,7 @@ func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } @@ -75,6 +75,11 @@ func (in *TokenReview) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -92,6 +97,11 @@ func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { *out = *in in.User.DeepCopyInto(&out.User) + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/doc.go index 6fa0c3cbdd4c..5be4427e8638 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=authorization.k8s.io + package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/generated.pb.go index e9145af0261d..fc6a25f62999 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -795,24 +794,6 @@ func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2888,51 +2869,14 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2942,46 +2886,85 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex case 6: if wireType != 2 { diff --git a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/doc.go index e609a445e396..88afc0fe7c72 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=authorization.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index 75ee6cf918fc..7cce98eb1952 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -795,24 +794,6 @@ func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2888,51 +2869,14 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2942,46 +2886,85 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex case 6: if wireType != 2 { diff --git a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/generated.pb.go index 47a46a5574ee..950e93340d68 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -996,24 +995,6 @@ func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index a6e874f3da94..72ac97271218 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -196,8 +196,8 @@ func (PodsMetricStatus) SwaggerDoc() map[string]string { } var map_ResourceMetricSource = map[string]string{ - "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", } @@ -207,8 +207,8 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { } var map_ResourceMetricStatus = map[string]string{ - "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", } diff --git a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go index bee94129d8ef..b6a5f35629b9 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto -// DO NOT EDIT! /* Package v2beta1 is a generated protocol buffer package. @@ -916,24 +915,6 @@ func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index 411b817d04ec..589408ace0ce 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -197,8 +197,8 @@ func (PodsMetricStatus) SwaggerDoc() map[string]string { } var map_ResourceMetricSource = map[string]string{ - "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", } @@ -208,8 +208,8 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { } var map_ResourceMetricStatus = map[string]string{ - "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", } diff --git a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go index be752a140251..816fea9d532a 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto -// DO NOT EDIT! /* Package v2beta2 is a generated protocol buffer package. @@ -966,24 +965,6 @@ func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 38f099ae74b3..b4e4c95a3b1a 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -259,31 +259,37 @@ message MetricTarget { optional string type = 1; // value is the target value of the metric (as a quantity). + // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) + // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // Currently only valid for Resource metric source type + // +optional optional int32 averageUtilization = 4; } // MetricValueStatus holds the current value for a metric message MetricValueStatus { // value is the current value of the metric (as a quantity). + // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) + // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. + // +optional optional int32 averageUtilization = 3; } diff --git a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/types.go index e873971f3fbe..2d337953740e 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -200,16 +200,18 @@ type MetricTarget struct { // type represents whether the metric type is Utilization, Value, or AverageValue Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"` // value is the target value of the metric (as a quantity). - Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,name=value"` + // +optional + Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) - AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,name=averageValue"` - + // +optional + AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"` // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // Currently only valid for Resource metric source type - AverageUtilization *int32 `json:"averageUtilization,omitempty" protobuf:"bytes,4,name=averageUtilization"` + // +optional + AverageUtilization *int32 `json:"averageUtilization,omitempty" protobuf:"bytes,4,opt,name=averageUtilization"` } // MetricTargetType specifies the type of metric being targeted, and should be either @@ -364,14 +366,17 @@ type ExternalMetricStatus struct { // MetricValueStatus holds the current value for a metric type MetricValueStatus struct { // value is the current value of the metric (as a quantity). - Value *resource.Quantity `json:"value" protobuf:"bytes,1,name=value"` + // +optional + Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"` // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) - AverageValue *resource.Quantity `json:"averageValue" protobuf:"bytes,2,name=averageValue"` + // +optional + AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"` // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. - AverageUtilization *int32 `json:"averageUtilization" protobuf:"bytes,3,name=averageUtilization"` + // +optional + AverageUtilization *int32 `json:"averageUtilization,omitempty" protobuf:"bytes,3,opt,name=averageUtilization"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.pb.go index 097a6ff28c57..3aa32b578461 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -343,24 +342,6 @@ func (m *JobStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index ece2204f932f..36342a3af1a3 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -336,24 +335,6 @@ func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go index 6ab41ebbcb3a..4d9ba5c00045 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto -// DO NOT EDIT! /* Package v2alpha1 is a generated protocol buffer package. @@ -336,24 +335,6 @@ func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/doc.go index 8250a24e484b..771a12940cd4 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=certificates.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index eda1599005a3..19bf225fa31a 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -378,24 +377,6 @@ func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1221,51 +1202,14 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1275,46 +1219,85 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/doc.go index da5c8e128a71..cf14fc6834fb 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=coordination.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index 6c2dbd91f9a9..aa57e9dd6442 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -196,24 +195,6 @@ func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/annotation_key_constants.go index 16a0cfced13e..2c72ec2df2d6 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -78,4 +78,23 @@ const ( // // Not all cloud providers support this annotation, though AWS & GCE do. AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that + // represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z') + // of the last change, of some Pod or Service object, that triggered the endpoints object change. + // In other words, if a Pod / Service changed at time T0, that change was observed by endpoints + // controller at T1, and the Endpoints object was changed at T2, the + // EndpointsLastChangeTriggerTime would be set to T0. + // + // The "endpoints change trigger" here means any Pod or Service change that resulted in the + // Endpoints object change. + // + // Given the definition of the "endpoints change trigger", please note that this annotation will + // be set ONLY for endpoints object changes triggered by either Pod or Service change. If the + // Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's + // already set). + // + // This annotation will be used to compute the in-cluster network programming latency SLI, see + // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md + EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" ) diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.pb.go index 54803b48c3d1..05cc6d62844c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -82,6 +81,7 @@ limitations under the License. FlockerVolumeSource GCEPersistentDiskVolumeSource GitRepoVolumeSource + GlusterfsPersistentVolumeSource GlusterfsVolumeSource HTTPGetAction HTTPHeader @@ -499,604 +499,610 @@ func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSou func (*GitRepoVolumeSource) ProtoMessage() {} func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } +func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } +func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} +func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{57} +} + func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } +func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } +func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } func (m *Handler) Reset() { *m = Handler{} } func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} -func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } +func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{63} + return fileDescriptorGenerated, []int{64} } func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } +func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } +func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } +func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } +func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} -func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } +func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } +func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } +func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } +func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } +func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } +func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} -func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } +func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} -func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } +func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } +func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } +func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } +func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{92} + return fileDescriptorGenerated, []int{93} } func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } +func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } +func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{101} } func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{101} + return fileDescriptorGenerated, []int{102} } func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{102} + return fileDescriptorGenerated, []int{103} } func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{103} + return fileDescriptorGenerated, []int{104} } func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{104} + return fileDescriptorGenerated, []int{105} } func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{105} + return fileDescriptorGenerated, []int{106} } func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} } +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{107} + return fileDescriptorGenerated, []int{108} } func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{109} } func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{109} + return fileDescriptorGenerated, []int{110} } func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{110} + return fileDescriptorGenerated, []int{111} } func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } +func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } +func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } +func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} -func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } +func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} -func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } +func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } +func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } +func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } +func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} -func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } +func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } +func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } +func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } +func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } +func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } +func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } +func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } +func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{136} + return fileDescriptorGenerated, []int{137} } func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } +func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{140} + return fileDescriptorGenerated, []int{141} } func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } +func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } +func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{144} + return fileDescriptorGenerated, []int{145} } func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{145} + return fileDescriptorGenerated, []int{146} } func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{146} + return fileDescriptorGenerated, []int{147} } func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{147} + return fileDescriptorGenerated, []int{148} } func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } +func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } +func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{155} + return fileDescriptorGenerated, []int{156} } func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} -func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } +func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{158} + return fileDescriptorGenerated, []int{159} } func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } +func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } +func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } +func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } +func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} -func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } +func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } +func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } +func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } +func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{171} + return fileDescriptorGenerated, []int{172} } func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} } func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} } +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} -func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{178} + return fileDescriptorGenerated, []int{179} } func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} -func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{184} } func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{184} + return fileDescriptorGenerated, []int{185} } func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} -func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{185} } +func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{186} } func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{186} + return fileDescriptorGenerated, []int{187} } func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{187} } +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{188} } func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} -func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{188} } +func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} } func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} } +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} } func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} -func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} } +func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} } func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} } +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{192} } func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{192} } +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{193} } func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{193} + return fileDescriptorGenerated, []int{194} } func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{194} + return fileDescriptorGenerated, []int{195} } func init() { @@ -1157,6 +1163,7 @@ func init() { proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") + proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource") proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") @@ -3949,6 +3956,46 @@ func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *GlusterfsPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GlusterfsPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) + i += copy(dAtA[i:], m.EndpointsName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.EndpointsNamespace != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EndpointsNamespace))) + i += copy(dAtA[i:], *m.EndpointsNamespace) + } + return i, nil +} + func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7684,6 +7731,18 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) i += copy(dAtA[i:], *m.RuntimeClassName) } + if m.EnableServiceLinks != nil { + dAtA[i] = 0xf0 + i++ + dAtA[i] = 0x1 + i++ + if *m.EnableServiceLinks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -10368,10 +10427,12 @@ func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) + if m.APIGroup != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) + i += copy(dAtA[i:], *m.APIGroup) + } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) @@ -10944,24 +11005,6 @@ func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -11951,6 +11994,21 @@ func (m *GitRepoVolumeSource) Size() (n int) { return n } +func (m *GlusterfsPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.EndpointsNamespace != nil { + l = len(*m.EndpointsNamespace) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *GlusterfsVolumeSource) Size() (n int) { var l int _ = l @@ -13313,6 +13371,9 @@ func (m *PodSpec) Size() (n int) { l = len(*m.RuntimeClassName) n += 2 + l + sovGenerated(uint64(l)) } + if m.EnableServiceLinks != nil { + n += 3 + } return n } @@ -14284,8 +14345,10 @@ func (m *TopologySelectorTerm) Size() (n int) { func (m *TypedLocalObjectReference) Size() (n int) { var l int _ = l - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) + if m.APIGroup != nil { + l = len(*m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + } l = len(m.Kind) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) @@ -15291,6 +15354,19 @@ func (this *GitRepoVolumeSource) String() string { }, "") return s } +func (this *GlusterfsPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlusterfsPersistentVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `EndpointsNamespace:` + valueToStringGenerated(this.EndpointsNamespace) + `,`, + `}`, + }, "") + return s +} func (this *GlusterfsVolumeSource) String() string { if this == nil { return "nil" @@ -16006,7 +16082,7 @@ func (this *PersistentVolumeSource) String() string { `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, - `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsPersistentVolumeSource", "GlusterfsPersistentVolumeSource", 1) + `,`, `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, @@ -16321,6 +16397,7 @@ func (this *PodSpec) String() string { `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, `ReadinessGates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ReadinessGates), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + `,`, `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, + `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, `}`, }, "") return s @@ -17157,7 +17234,7 @@ func (this *TypedLocalObjectReference) String() string { return "nil" } s := strings.Join([]string{`&TypedLocalObjectReference{`, - `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `}`, @@ -18553,51 +18630,14 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.VolumeAttributes == nil { m.VolumeAttributes = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18607,41 +18647,80 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.VolumeAttributes[mapkey] = mapvalue - } else { - var mapvalue string - m.VolumeAttributes[mapkey] = mapvalue } + m.VolumeAttributes[mapkey] = mapvalue iNdEx = postIndex case 6: if wireType != 2 { @@ -20174,51 +20253,14 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Data == nil { m.Data = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20228,41 +20270,80 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Data[mapkey] = mapvalue - } else { - var mapvalue string - m.Data[mapkey] = mapvalue } + m.Data[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -20290,51 +20371,14 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.BinaryData == nil { m.BinaryData = make(map[string][]byte) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20344,42 +20388,81 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - m.BinaryData[mapkey] = mapvalue - } else { - var mapvalue []byte - m.BinaryData[mapkey] = mapvalue } + m.BinaryData[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -25909,51 +25992,14 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Options == nil { m.Options = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25963,41 +26009,80 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Options[mapkey] = mapvalue - } else { - var mapvalue string - m.Options[mapkey] = mapvalue } + m.Options[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -26186,51 +26271,14 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Options == nil { m.Options = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26240,41 +26288,80 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Options[mapkey] = mapvalue - } else { - var mapvalue string - m.Options[mapkey] = mapvalue } + m.Options[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -26689,7 +26776,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { +func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26712,10 +26799,10 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -26796,118 +26883,276 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } } m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EndpointsNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.EndpointsNamespace = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndpointsName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -28601,51 +28846,14 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Max == nil { m.Max = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28655,46 +28863,85 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Max[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Max[ResourceName(mapkey)] = mapvalue } + m.Max[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -28722,51 +28969,14 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Min == nil { m.Min = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28776,46 +28986,85 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Min[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Min[ResourceName(mapkey)] = mapvalue } + m.Min[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -28843,51 +29092,14 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Default == nil { m.Default = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28897,46 +29109,85 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Default[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Default[ResourceName(mapkey)] = mapvalue } + m.Default[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 5: if wireType != 2 { @@ -28964,51 +29215,14 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.DefaultRequest == nil { m.DefaultRequest = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29018,46 +29232,85 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.DefaultRequest[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.DefaultRequest[ResourceName(mapkey)] = mapvalue } + m.DefaultRequest[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 6: if wireType != 2 { @@ -29085,51 +29338,14 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.MaxLimitRequestRatio == nil { m.MaxLimitRequestRatio = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29139,46 +29355,85 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.MaxLimitRequestRatio[ResourceName(mapkey)] = mapvalue } + m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -31592,51 +31847,14 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31646,46 +31864,85 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue } + m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -32314,51 +32571,14 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32368,46 +32588,85 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue } + m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -32435,51 +32694,14 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Allocatable == nil { m.Allocatable = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32489,46 +32711,85 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Allocatable[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Allocatable[ResourceName(mapkey)] = mapvalue } + m.Allocatable[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -34464,42 +34725,13 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = PersistentVolumeClaimPhase(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) + m.Phase = PersistentVolumeClaimPhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34509,34 +34741,26 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) } - var stringLenmapkey uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34546,26 +34770,26 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34575,46 +34799,85 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue } + m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -35033,7 +35296,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Glusterfs == nil { - m.Glusterfs = &GlusterfsVolumeSource{} + m.Glusterfs = &GlusterfsPersistentVolumeSource{} } if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -35709,51 +35972,14 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35763,46 +35989,85 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue } + m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -38806,51 +39071,14 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.NodeSelector == nil { m.NodeSelector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -38860,41 +39088,80 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.NodeSelector[mapkey] = mapvalue - } else { - var mapvalue string - m.NodeSelector[mapkey] = mapvalue } + m.NodeSelector[mapkey] = mapvalue iNdEx = postIndex case 8: if wireType != 2 { @@ -39505,6 +39772,27 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.RuntimeClassName = &s iNdEx = postIndex + case 30: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnableServiceLinks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.EnableServiceLinks = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -42499,51 +42787,14 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -42553,41 +42804,80 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -43282,51 +43572,14 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Hard == nil { m.Hard = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43336,46 +43589,85 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Hard[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Hard[ResourceName(mapkey)] = mapvalue } + m.Hard[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -43515,51 +43807,14 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Hard == nil { m.Hard = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43569,46 +43824,85 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Hard[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Hard[ResourceName(mapkey)] = mapvalue } + m.Hard[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -43636,51 +43930,14 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Used == nil { m.Used = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43690,46 +43947,85 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Used[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Used[ResourceName(mapkey)] = mapvalue } + m.Used[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -43807,51 +44103,14 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Limits == nil { m.Limits = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43861,46 +44120,85 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Limits[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Limits[ResourceName(mapkey)] = mapvalue } + m.Limits[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -43928,51 +44226,14 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Requests == nil { m.Requests = make(ResourceList) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey ResourceName + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43982,46 +44243,85 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Requests[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Requests[ResourceName(mapkey)] = mapvalue } + m.Requests[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -45165,51 +45465,162 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Data == nil { - m.Data = make(map[string][]byte) + if m.Data == nil { + m.Data = make(map[string][]byte) + } + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Data[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = SecretType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StringData == nil { + m.StringData = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -45219,187 +45630,80 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - m.Data[mapkey] = mapvalue - } else { - var mapvalue []byte - m.Data[mapkey] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = SecretType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringData", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.StringData == nil { - m.StringData = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if skippy < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if (iNdEx + skippy) > postIndex { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.StringData[mapkey] = mapvalue - } else { - var mapvalue string - m.StringData[mapkey] = mapvalue } + m.StringData[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -47499,51 +47803,14 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47553,41 +47820,80 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -49275,7 +49581,8 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroup = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.APIGroup = &s iNdEx = postIndex case 2: if wireType != 2 { @@ -51341,804 +51648,808 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 12778 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6b, 0x90, 0x24, 0x57, - 0x56, 0x18, 0xbc, 0x59, 0x55, 0xfd, 0xa8, 0xd3, 0xef, 0x3b, 0x0f, 0xf5, 0xb4, 0x34, 0x53, 0xa3, - 0xd4, 0xee, 0x68, 0xb4, 0x92, 0x7a, 0x56, 0x23, 0x69, 0x57, 0xac, 0xb4, 0x82, 0xee, 0xae, 0xee, - 0x99, 0xd2, 0x4c, 0xf7, 0x94, 0x6e, 0xf5, 0x8c, 0x76, 0x85, 0x76, 0xd9, 0xec, 0xaa, 0xdb, 0xdd, - 0xa9, 0xce, 0xce, 0x2c, 0x65, 0x66, 0xf5, 0x4c, 0xeb, 0x83, 0x88, 0xcf, 0x8b, 0xc1, 0xc6, 0x60, - 0xc7, 0x86, 0x21, 0xfc, 0x00, 0x02, 0x47, 0x60, 0x1c, 0x80, 0xb1, 0x1d, 0xc6, 0x60, 0xc0, 0x2c, - 0xd8, 0x18, 0xfc, 0x03, 0x47, 0x38, 0xd6, 0xd8, 0x11, 0x8e, 0x25, 0x82, 0x70, 0x1b, 0x1a, 0x3f, - 0x82, 0x1f, 0x7e, 0x84, 0xf1, 0x0f, 0xd3, 0x26, 0x8c, 0xe3, 0x3e, 0xf3, 0xde, 0xac, 0xcc, 0xaa, - 0xea, 0x51, 0x4f, 0x4b, 0x10, 0xfb, 0xaf, 0xea, 0x9e, 0x73, 0xcf, 0xbd, 0x79, 0x9f, 0xe7, 0x9c, - 0x7b, 0x1e, 0xf0, 0xea, 0xce, 0x2b, 0xd1, 0xbc, 0x1b, 0x5c, 0xdb, 0xe9, 0x6c, 0x90, 0xd0, 0x27, - 0x31, 0x89, 0xae, 0xed, 0x11, 0xbf, 0x15, 0x84, 0xd7, 0x04, 0xc0, 0x69, 0xbb, 0xd7, 0x9a, 0x41, - 0x48, 0xae, 0xed, 0xbd, 0x70, 0x6d, 0x8b, 0xf8, 0x24, 0x74, 0x62, 0xd2, 0x9a, 0x6f, 0x87, 0x41, - 0x1c, 0x20, 0xc4, 0x71, 0xe6, 0x9d, 0xb6, 0x3b, 0x4f, 0x71, 0xe6, 0xf7, 0x5e, 0x98, 0x7b, 0x7e, - 0xcb, 0x8d, 0xb7, 0x3b, 0x1b, 0xf3, 0xcd, 0x60, 0xf7, 0xda, 0x56, 0xb0, 0x15, 0x5c, 0x63, 0xa8, - 0x1b, 0x9d, 0x4d, 0xf6, 0x8f, 0xfd, 0x61, 0xbf, 0x38, 0x89, 0xb9, 0x97, 0x92, 0x66, 0x76, 0x9d, - 0xe6, 0xb6, 0xeb, 0x93, 0x70, 0xff, 0x5a, 0x7b, 0x67, 0x8b, 0xb5, 0x1b, 0x92, 0x28, 0xe8, 0x84, - 0x4d, 0x92, 0x6e, 0xb8, 0x67, 0xad, 0xe8, 0xda, 0x2e, 0x89, 0x9d, 0x8c, 0xee, 0xce, 0x5d, 0xcb, - 0xab, 0x15, 0x76, 0xfc, 0xd8, 0xdd, 0xed, 0x6e, 0xe6, 0xd3, 0xfd, 0x2a, 0x44, 0xcd, 0x6d, 0xb2, - 0xeb, 0x74, 0xd5, 0x7b, 0x31, 0xaf, 0x5e, 0x27, 0x76, 0xbd, 0x6b, 0xae, 0x1f, 0x47, 0x71, 0x98, - 0xae, 0x64, 0x7f, 0xc3, 0x82, 0xcb, 0x0b, 0x6f, 0x35, 0x96, 0x3d, 0x27, 0x8a, 0xdd, 0xe6, 0xa2, - 0x17, 0x34, 0x77, 0x1a, 0x71, 0x10, 0x92, 0x7b, 0x81, 0xd7, 0xd9, 0x25, 0x0d, 0x36, 0x10, 0xe8, - 0x39, 0x18, 0xdd, 0x63, 0xff, 0x6b, 0xd5, 0x59, 0xeb, 0xb2, 0x75, 0xb5, 0xbc, 0x38, 0xfd, 0x9b, - 0x07, 0x95, 0x8f, 0x1d, 0x1e, 0x54, 0x46, 0xef, 0x89, 0x72, 0xac, 0x30, 0xd0, 0x15, 0x18, 0xde, - 0x8c, 0xd6, 0xf7, 0xdb, 0x64, 0xb6, 0xc0, 0x70, 0x27, 0x05, 0xee, 0xf0, 0x4a, 0x83, 0x96, 0x62, - 0x01, 0x45, 0xd7, 0xa0, 0xdc, 0x76, 0xc2, 0xd8, 0x8d, 0xdd, 0xc0, 0x9f, 0x2d, 0x5e, 0xb6, 0xae, - 0x0e, 0x2d, 0xce, 0x08, 0xd4, 0x72, 0x5d, 0x02, 0x70, 0x82, 0x43, 0xbb, 0x11, 0x12, 0xa7, 0x75, - 0xc7, 0xf7, 0xf6, 0x67, 0x4b, 0x97, 0xad, 0xab, 0xa3, 0x49, 0x37, 0xb0, 0x28, 0xc7, 0x0a, 0xc3, - 0xfe, 0xe1, 0x02, 0x8c, 0x2e, 0x6c, 0x6e, 0xba, 0xbe, 0x1b, 0xef, 0xa3, 0x7b, 0x30, 0xee, 0x07, - 0x2d, 0x22, 0xff, 0xb3, 0xaf, 0x18, 0xbb, 0x7e, 0x79, 0xbe, 0x7b, 0x29, 0xcd, 0xaf, 0x69, 0x78, - 0x8b, 0xd3, 0x87, 0x07, 0x95, 0x71, 0xbd, 0x04, 0x1b, 0x74, 0x10, 0x86, 0xb1, 0x76, 0xd0, 0x52, - 0x64, 0x0b, 0x8c, 0x6c, 0x25, 0x8b, 0x6c, 0x3d, 0x41, 0x5b, 0x9c, 0x3a, 0x3c, 0xa8, 0x8c, 0x69, - 0x05, 0x58, 0x27, 0x82, 0x36, 0x60, 0x8a, 0xfe, 0xf5, 0x63, 0x57, 0xd1, 0x2d, 0x32, 0xba, 0x4f, - 0xe5, 0xd1, 0xd5, 0x50, 0x17, 0xcf, 0x1c, 0x1e, 0x54, 0xa6, 0x52, 0x85, 0x38, 0x4d, 0xd0, 0x7e, - 0x1f, 0x26, 0x17, 0xe2, 0xd8, 0x69, 0x6e, 0x93, 0x16, 0x9f, 0x41, 0xf4, 0x12, 0x94, 0x7c, 0x67, - 0x97, 0x88, 0xf9, 0xbd, 0x2c, 0x06, 0xb6, 0xb4, 0xe6, 0xec, 0x92, 0xa3, 0x83, 0xca, 0xf4, 0x5d, - 0xdf, 0x7d, 0xaf, 0x23, 0x56, 0x05, 0x2d, 0xc3, 0x0c, 0x1b, 0x5d, 0x07, 0x68, 0x91, 0x3d, 0xb7, - 0x49, 0xea, 0x4e, 0xbc, 0x2d, 0xe6, 0x1b, 0x89, 0xba, 0x50, 0x55, 0x10, 0xac, 0x61, 0xd9, 0x0f, - 0xa0, 0xbc, 0xb0, 0x17, 0xb8, 0xad, 0x7a, 0xd0, 0x8a, 0xd0, 0x0e, 0x4c, 0xb5, 0x43, 0xb2, 0x49, - 0x42, 0x55, 0x34, 0x6b, 0x5d, 0x2e, 0x5e, 0x1d, 0xbb, 0x7e, 0x35, 0xf3, 0x63, 0x4d, 0xd4, 0x65, - 0x3f, 0x0e, 0xf7, 0x17, 0x1f, 0x13, 0xed, 0x4d, 0xa5, 0xa0, 0x38, 0x4d, 0xd9, 0xfe, 0x17, 0x05, - 0x38, 0xb7, 0xf0, 0x7e, 0x27, 0x24, 0x55, 0x37, 0xda, 0x49, 0xaf, 0xf0, 0x96, 0x1b, 0xed, 0xac, - 0x25, 0x23, 0xa0, 0x96, 0x56, 0x55, 0x94, 0x63, 0x85, 0x81, 0x9e, 0x87, 0x11, 0xfa, 0xfb, 0x2e, - 0xae, 0x89, 0x4f, 0x3e, 0x23, 0x90, 0xc7, 0xaa, 0x4e, 0xec, 0x54, 0x39, 0x08, 0x4b, 0x1c, 0xb4, - 0x0a, 0x63, 0x4d, 0xb6, 0x21, 0xb7, 0x56, 0x83, 0x16, 0x61, 0x93, 0x59, 0x5e, 0x7c, 0x96, 0xa2, - 0x2f, 0x25, 0xc5, 0x47, 0x07, 0x95, 0x59, 0xde, 0x37, 0x41, 0x42, 0x83, 0x61, 0xbd, 0x3e, 0xb2, - 0xd5, 0xfe, 0x2a, 0x31, 0x4a, 0x90, 0xb1, 0xb7, 0xae, 0x6a, 0x5b, 0x65, 0x88, 0x6d, 0x95, 0xf1, - 0xec, 0x6d, 0x82, 0x5e, 0x80, 0xd2, 0x8e, 0xeb, 0xb7, 0x66, 0x87, 0x19, 0xad, 0x8b, 0x74, 0xce, - 0x6f, 0xb9, 0x7e, 0xeb, 0xe8, 0xa0, 0x32, 0x63, 0x74, 0x87, 0x16, 0x62, 0x86, 0x6a, 0xff, 0xa1, - 0x05, 0x15, 0x06, 0x5b, 0x71, 0x3d, 0x52, 0x27, 0x61, 0xe4, 0x46, 0x31, 0xf1, 0x63, 0x63, 0x40, - 0xaf, 0x03, 0x44, 0xa4, 0x19, 0x92, 0x58, 0x1b, 0x52, 0xb5, 0x30, 0x1a, 0x0a, 0x82, 0x35, 0x2c, - 0x7a, 0x20, 0x44, 0xdb, 0x4e, 0xc8, 0xd6, 0x97, 0x18, 0x58, 0x75, 0x20, 0x34, 0x24, 0x00, 0x27, - 0x38, 0xc6, 0x81, 0x50, 0xec, 0x77, 0x20, 0xa0, 0xcf, 0xc1, 0x54, 0xd2, 0x58, 0xd4, 0x76, 0x9a, - 0x72, 0x00, 0xd9, 0x96, 0x69, 0x98, 0x20, 0x9c, 0xc6, 0xb5, 0xff, 0xae, 0x25, 0x16, 0x0f, 0xfd, - 0xea, 0x8f, 0xf8, 0xb7, 0xda, 0xbf, 0x64, 0xc1, 0xc8, 0xa2, 0xeb, 0xb7, 0x5c, 0x7f, 0x0b, 0x7d, - 0x19, 0x46, 0xe9, 0xdd, 0xd4, 0x72, 0x62, 0x47, 0x9c, 0x7b, 0x9f, 0xd2, 0xf6, 0x96, 0xba, 0x2a, - 0xe6, 0xdb, 0x3b, 0x5b, 0xb4, 0x20, 0x9a, 0xa7, 0xd8, 0x74, 0xb7, 0xdd, 0xd9, 0x78, 0x97, 0x34, - 0xe3, 0x55, 0x12, 0x3b, 0xc9, 0xe7, 0x24, 0x65, 0x58, 0x51, 0x45, 0xb7, 0x60, 0x38, 0x76, 0xc2, - 0x2d, 0x12, 0x8b, 0x03, 0x30, 0xf3, 0xa0, 0xe2, 0x35, 0x31, 0xdd, 0x91, 0xc4, 0x6f, 0x92, 0xe4, - 0x5a, 0x58, 0x67, 0x55, 0xb1, 0x20, 0x61, 0xff, 0xa5, 0x61, 0xb8, 0xb0, 0xd4, 0xa8, 0xe5, 0xac, - 0xab, 0x2b, 0x30, 0xdc, 0x0a, 0xdd, 0x3d, 0x12, 0x8a, 0x71, 0x56, 0x54, 0xaa, 0xac, 0x14, 0x0b, - 0x28, 0x7a, 0x05, 0xc6, 0xf9, 0x85, 0x74, 0xd3, 0xf1, 0x5b, 0x9e, 0x1c, 0xe2, 0xb3, 0x02, 0x7b, - 0xfc, 0x9e, 0x06, 0xc3, 0x06, 0xe6, 0x31, 0x17, 0xd5, 0x95, 0xd4, 0x66, 0xcc, 0xbb, 0xec, 0xbe, - 0xcf, 0x82, 0x69, 0xde, 0xcc, 0x42, 0x1c, 0x87, 0xee, 0x46, 0x27, 0x26, 0xd1, 0xec, 0x10, 0x3b, - 0xe9, 0x96, 0xb2, 0x46, 0x2b, 0x77, 0x04, 0xe6, 0xef, 0xa5, 0xa8, 0xf0, 0x43, 0x70, 0x56, 0xb4, - 0x3b, 0x9d, 0x06, 0xe3, 0xae, 0x66, 0xd1, 0x77, 0x5b, 0x30, 0xd7, 0x0c, 0xfc, 0x38, 0x0c, 0x3c, - 0x8f, 0x84, 0xf5, 0xce, 0x86, 0xe7, 0x46, 0xdb, 0x7c, 0x9d, 0x62, 0xb2, 0xc9, 0x4e, 0x82, 0x9c, - 0x39, 0x54, 0x48, 0x62, 0x0e, 0x2f, 0x1d, 0x1e, 0x54, 0xe6, 0x96, 0x72, 0x49, 0xe1, 0x1e, 0xcd, - 0xa0, 0x1d, 0x40, 0xf4, 0x2a, 0x6d, 0xc4, 0xce, 0x16, 0x49, 0x1a, 0x1f, 0x19, 0xbc, 0xf1, 0xf3, - 0x87, 0x07, 0x15, 0xb4, 0xd6, 0x45, 0x02, 0x67, 0x90, 0x45, 0xef, 0xc1, 0x59, 0x5a, 0xda, 0xf5, - 0xad, 0xa3, 0x83, 0x37, 0x37, 0x7b, 0x78, 0x50, 0x39, 0xbb, 0x96, 0x41, 0x04, 0x67, 0x92, 0x9e, - 0x5b, 0x82, 0x73, 0x99, 0x53, 0x85, 0xa6, 0xa1, 0xb8, 0x43, 0x38, 0x0b, 0x52, 0xc6, 0xf4, 0x27, - 0x3a, 0x0b, 0x43, 0x7b, 0x8e, 0xd7, 0x11, 0xab, 0x14, 0xf3, 0x3f, 0x9f, 0x2d, 0xbc, 0x62, 0xd9, - 0x4d, 0x18, 0x5f, 0x72, 0xda, 0xce, 0x86, 0xeb, 0xb9, 0xb1, 0x4b, 0x22, 0xf4, 0x34, 0x14, 0x9d, - 0x56, 0x8b, 0x5d, 0x91, 0xe5, 0xc5, 0x73, 0x87, 0x07, 0x95, 0xe2, 0x42, 0x8b, 0x9e, 0xd5, 0xa0, - 0xb0, 0xf6, 0x31, 0xc5, 0x40, 0x9f, 0x84, 0x52, 0x2b, 0x0c, 0xda, 0xb3, 0x05, 0x86, 0x49, 0x87, - 0xaa, 0x54, 0x0d, 0x83, 0x76, 0x0a, 0x95, 0xe1, 0xd8, 0xbf, 0x56, 0x80, 0x27, 0x96, 0x48, 0x7b, - 0x7b, 0xa5, 0x91, 0xb3, 0xe9, 0xae, 0xc2, 0xe8, 0x6e, 0xe0, 0xbb, 0x71, 0x10, 0x46, 0xa2, 0x69, - 0x76, 0x9b, 0xac, 0x8a, 0x32, 0xac, 0xa0, 0xe8, 0x32, 0x94, 0xda, 0x09, 0x27, 0x30, 0x2e, 0xb9, - 0x08, 0xc6, 0x03, 0x30, 0x08, 0xc5, 0xe8, 0x44, 0x24, 0x14, 0xb7, 0xa0, 0xc2, 0xb8, 0x1b, 0x91, - 0x10, 0x33, 0x48, 0x72, 0x9c, 0xd2, 0x83, 0x56, 0x6c, 0xab, 0xd4, 0x71, 0x4a, 0x21, 0x58, 0xc3, - 0x42, 0x75, 0x28, 0x47, 0x6a, 0x52, 0x87, 0x06, 0x9f, 0xd4, 0x09, 0x76, 0xde, 0xaa, 0x99, 0x4c, - 0x88, 0x18, 0xc7, 0xc0, 0x70, 0xdf, 0xf3, 0xf6, 0x6b, 0x05, 0x40, 0x7c, 0x08, 0xff, 0x94, 0x0d, - 0xdc, 0xdd, 0xee, 0x81, 0xcb, 0xe4, 0xbc, 0x6e, 0x07, 0x4d, 0xc7, 0x4b, 0x1f, 0xe1, 0x27, 0x35, - 0x7a, 0xff, 0xcb, 0x82, 0x27, 0x96, 0x5c, 0xbf, 0x45, 0xc2, 0x9c, 0x05, 0xf8, 0x68, 0x04, 0x90, - 0xe3, 0x9d, 0xf4, 0xc6, 0x12, 0x2b, 0x9d, 0xc0, 0x12, 0xb3, 0xff, 0xbb, 0x05, 0x88, 0x7f, 0xf6, - 0x47, 0xee, 0x63, 0xef, 0x76, 0x7f, 0xec, 0x09, 0x2c, 0x0b, 0xfb, 0x36, 0x4c, 0x2e, 0x79, 0x2e, - 0xf1, 0xe3, 0x5a, 0x7d, 0x29, 0xf0, 0x37, 0xdd, 0x2d, 0xf4, 0x59, 0x98, 0xa4, 0x32, 0x6d, 0xd0, - 0x89, 0x1b, 0xa4, 0x19, 0xf8, 0x8c, 0xfd, 0xa7, 0x92, 0x20, 0x3a, 0x3c, 0xa8, 0x4c, 0xae, 0x1b, - 0x10, 0x9c, 0xc2, 0xb4, 0x7f, 0x87, 0x8e, 0x5f, 0xb0, 0xdb, 0x0e, 0x7c, 0xe2, 0xc7, 0x4b, 0x81, - 0xdf, 0xe2, 0x62, 0xe2, 0x67, 0xa1, 0x14, 0xd3, 0xf1, 0xe0, 0x63, 0x77, 0x45, 0x6e, 0x14, 0x3a, - 0x0a, 0x47, 0x07, 0x95, 0xf3, 0xdd, 0x35, 0xd8, 0x38, 0xb1, 0x3a, 0xe8, 0x5b, 0x60, 0x38, 0x8a, - 0x9d, 0xb8, 0x13, 0x89, 0xd1, 0x7c, 0x52, 0x8e, 0x66, 0x83, 0x95, 0x1e, 0x1d, 0x54, 0xa6, 0x54, - 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x0c, 0x8c, 0xec, 0x92, 0x28, 0x72, 0xb6, 0x24, 0x87, 0x3f, - 0x25, 0xea, 0x8e, 0xac, 0xf2, 0x62, 0x2c, 0xe1, 0xe8, 0x29, 0x18, 0x22, 0x61, 0x18, 0x84, 0x62, - 0x8f, 0x4e, 0x08, 0xc4, 0xa1, 0x65, 0x5a, 0x88, 0x39, 0xcc, 0xfe, 0xd7, 0x16, 0x4c, 0xa9, 0xbe, - 0xf2, 0xb6, 0x4e, 0x81, 0x95, 0x7b, 0x1b, 0xa0, 0x29, 0x3f, 0x30, 0x62, 0xb7, 0xc7, 0xd8, 0xf5, - 0x2b, 0x99, 0x0c, 0x4a, 0xd7, 0x30, 0x26, 0x94, 0x55, 0x51, 0x84, 0x35, 0x6a, 0xf6, 0xaf, 0x5a, - 0x70, 0x26, 0xf5, 0x45, 0xb7, 0xdd, 0x28, 0x46, 0xef, 0x74, 0x7d, 0xd5, 0xfc, 0x60, 0x5f, 0x45, - 0x6b, 0xb3, 0x6f, 0x52, 0x4b, 0x59, 0x96, 0x68, 0x5f, 0x74, 0x13, 0x86, 0xdc, 0x98, 0xec, 0xca, - 0x8f, 0x79, 0xaa, 0xe7, 0xc7, 0xf0, 0x5e, 0x25, 0x33, 0x52, 0xa3, 0x35, 0x31, 0x27, 0x60, 0xff, - 0x60, 0x11, 0xca, 0x7c, 0xd9, 0xae, 0x3a, 0xed, 0x53, 0x98, 0x8b, 0x1a, 0x94, 0x18, 0x75, 0xde, - 0xf1, 0xa7, 0xb3, 0x3b, 0x2e, 0xba, 0x33, 0x4f, 0xe5, 0x34, 0xce, 0x0a, 0xaa, 0xab, 0x81, 0x16, - 0x61, 0x46, 0x02, 0x39, 0x00, 0x1b, 0xae, 0xef, 0x84, 0xfb, 0xb4, 0x6c, 0xb6, 0xc8, 0x08, 0x3e, - 0xdf, 0x9b, 0xe0, 0xa2, 0xc2, 0xe7, 0x64, 0x55, 0x5f, 0x13, 0x00, 0xd6, 0x88, 0xce, 0x7d, 0x06, - 0xca, 0x0a, 0xf9, 0x38, 0x3c, 0xce, 0xdc, 0xe7, 0x60, 0x2a, 0xd5, 0x56, 0xbf, 0xea, 0xe3, 0x3a, - 0x8b, 0xf4, 0xcb, 0xec, 0x14, 0x10, 0xbd, 0x5e, 0xf6, 0xf7, 0xc4, 0x29, 0xfa, 0x3e, 0x9c, 0xf5, - 0x32, 0x0e, 0x27, 0x31, 0x55, 0x83, 0x1f, 0x66, 0x4f, 0x88, 0xcf, 0x3e, 0x9b, 0x05, 0xc5, 0x99, - 0x6d, 0xd0, 0x6b, 0x3f, 0x68, 0xd3, 0x35, 0xef, 0x78, 0xac, 0xbf, 0x42, 0xfa, 0xbe, 0x23, 0xca, - 0xb0, 0x82, 0xd2, 0x23, 0xec, 0xac, 0xea, 0xfc, 0x2d, 0xb2, 0xdf, 0x20, 0x1e, 0x69, 0xc6, 0x41, - 0xf8, 0xa1, 0x76, 0xff, 0x22, 0x1f, 0x7d, 0x7e, 0x02, 0x8e, 0x09, 0x02, 0xc5, 0x5b, 0x64, 0x9f, - 0x4f, 0x85, 0xfe, 0x75, 0xc5, 0x9e, 0x5f, 0xf7, 0xb3, 0x16, 0x4c, 0xa8, 0xaf, 0x3b, 0x85, 0xad, - 0xbe, 0x68, 0x6e, 0xf5, 0x8b, 0x3d, 0x17, 0x78, 0xce, 0x26, 0xff, 0x5a, 0x01, 0x2e, 0x28, 0x1c, - 0xca, 0xee, 0xf3, 0x3f, 0x62, 0x55, 0x5d, 0x83, 0xb2, 0xaf, 0xb4, 0x07, 0x96, 0x29, 0xb6, 0x27, - 0xba, 0x83, 0x04, 0x87, 0x72, 0x6d, 0x7e, 0x22, 0xe2, 0x8f, 0xeb, 0x6a, 0x35, 0xa1, 0x42, 0x5b, - 0x84, 0x62, 0xc7, 0x6d, 0x89, 0x3b, 0xe3, 0x53, 0x72, 0xb4, 0xef, 0xd6, 0xaa, 0x47, 0x07, 0x95, - 0x27, 0xf3, 0x54, 0xba, 0xf4, 0xb2, 0x8a, 0xe6, 0xef, 0xd6, 0xaa, 0x98, 0x56, 0x46, 0x0b, 0x30, - 0x25, 0xb5, 0xd6, 0xf7, 0x28, 0x07, 0x15, 0xf8, 0xe2, 0x6a, 0x51, 0xba, 0x31, 0x6c, 0x82, 0x71, - 0x1a, 0x1f, 0x55, 0x61, 0x7a, 0xa7, 0xb3, 0x41, 0x3c, 0x12, 0xf3, 0x0f, 0xbe, 0x45, 0xb8, 0xe6, - 0xa8, 0x9c, 0x88, 0x96, 0xb7, 0x52, 0x70, 0xdc, 0x55, 0xc3, 0xfe, 0x13, 0x76, 0xc4, 0x8b, 0xd1, - 0xab, 0x87, 0x01, 0x5d, 0x58, 0x94, 0xfa, 0x87, 0xb9, 0x9c, 0x07, 0x59, 0x15, 0xb7, 0xc8, 0xfe, - 0x7a, 0x40, 0x99, 0xed, 0xec, 0x55, 0x61, 0xac, 0xf9, 0x52, 0xcf, 0x35, 0xff, 0xf3, 0x05, 0x38, - 0xa7, 0x46, 0xc0, 0xe0, 0xeb, 0xfe, 0xb4, 0x8f, 0xc1, 0x0b, 0x30, 0xd6, 0x22, 0x9b, 0x4e, 0xc7, - 0x8b, 0x95, 0x1a, 0x73, 0x88, 0xab, 0xb2, 0xab, 0x49, 0x31, 0xd6, 0x71, 0x8e, 0x31, 0x6c, 0x3f, - 0x31, 0xc6, 0xee, 0xd6, 0xd8, 0xa1, 0x6b, 0x5c, 0xed, 0x1a, 0x2b, 0x77, 0xd7, 0x3c, 0x05, 0x43, - 0xee, 0x2e, 0xe5, 0xb5, 0x0a, 0x26, 0x0b, 0x55, 0xa3, 0x85, 0x98, 0xc3, 0xd0, 0x27, 0x60, 0xa4, - 0x19, 0xec, 0xee, 0x3a, 0x7e, 0x8b, 0x5d, 0x79, 0xe5, 0xc5, 0x31, 0xca, 0x8e, 0x2d, 0xf1, 0x22, - 0x2c, 0x61, 0xe8, 0x09, 0x28, 0x39, 0xe1, 0x56, 0x34, 0x5b, 0x62, 0x38, 0xa3, 0xb4, 0xa5, 0x85, - 0x70, 0x2b, 0xc2, 0xac, 0x94, 0x4a, 0x55, 0xf7, 0x83, 0x70, 0xc7, 0xf5, 0xb7, 0xaa, 0x6e, 0x28, - 0xb6, 0x84, 0xba, 0x0b, 0xdf, 0x52, 0x10, 0xac, 0x61, 0xa1, 0x15, 0x18, 0x6a, 0x07, 0x61, 0x1c, - 0xcd, 0x0e, 0xb3, 0xe1, 0x7e, 0x32, 0xe7, 0x20, 0xe2, 0x5f, 0x5b, 0x0f, 0xc2, 0x38, 0xf9, 0x00, - 0xfa, 0x2f, 0xc2, 0xbc, 0x3a, 0xfa, 0x16, 0x28, 0x12, 0x7f, 0x6f, 0x76, 0x84, 0x51, 0x99, 0xcb, - 0xa2, 0xb2, 0xec, 0xef, 0xdd, 0x73, 0xc2, 0xe4, 0x94, 0x5e, 0xf6, 0xf7, 0x30, 0xad, 0x83, 0xbe, - 0x00, 0x65, 0xb9, 0xc5, 0x23, 0xa1, 0xe6, 0xc8, 0x5c, 0x62, 0xf2, 0x60, 0xc0, 0xe4, 0xbd, 0x8e, - 0x1b, 0x92, 0x5d, 0xe2, 0xc7, 0x51, 0x72, 0xa6, 0x49, 0x68, 0x84, 0x13, 0x6a, 0xe8, 0x0b, 0x52, - 0xb7, 0xb6, 0x1a, 0x74, 0xfc, 0x38, 0x9a, 0x2d, 0xb3, 0xee, 0x65, 0xbe, 0x7a, 0xdc, 0x4b, 0xf0, - 0xd2, 0xca, 0x37, 0x5e, 0x19, 0x1b, 0xa4, 0x10, 0x86, 0x09, 0xcf, 0xdd, 0x23, 0x3e, 0x89, 0xa2, - 0x7a, 0x18, 0x6c, 0x90, 0x59, 0x60, 0x3d, 0xbf, 0x90, 0xfd, 0x18, 0x10, 0x6c, 0x90, 0xc5, 0x99, - 0xc3, 0x83, 0xca, 0xc4, 0x6d, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0xbb, 0x30, 0x49, 0xe5, 0x1a, 0x37, - 0x21, 0x3a, 0xd6, 0x8f, 0x28, 0x93, 0x3e, 0xb0, 0x51, 0x09, 0xa7, 0x88, 0xa0, 0x37, 0xa0, 0xec, - 0xb9, 0x9b, 0xa4, 0xb9, 0xdf, 0xf4, 0xc8, 0xec, 0x38, 0xa3, 0x98, 0xb9, 0xad, 0x6e, 0x4b, 0x24, - 0x2e, 0x17, 0xa9, 0xbf, 0x38, 0xa9, 0x8e, 0xee, 0xc1, 0xf9, 0x98, 0x84, 0xbb, 0xae, 0xef, 0xd0, - 0xed, 0x20, 0xe4, 0x05, 0xf6, 0xa4, 0x32, 0xc1, 0xd6, 0xdb, 0x25, 0x31, 0x74, 0xe7, 0xd7, 0x33, - 0xb1, 0x70, 0x4e, 0x6d, 0x74, 0x07, 0xa6, 0xd8, 0x4e, 0xa8, 0x77, 0x3c, 0xaf, 0x1e, 0x78, 0x6e, - 0x73, 0x7f, 0x76, 0x92, 0x11, 0xfc, 0x84, 0xbc, 0x17, 0x6a, 0x26, 0xf8, 0xe8, 0xa0, 0x02, 0xc9, - 0x3f, 0x9c, 0xae, 0x8d, 0x36, 0x98, 0x0e, 0xbd, 0x13, 0xba, 0xf1, 0x3e, 0x5d, 0xbf, 0xe4, 0x41, - 0x3c, 0x3b, 0xd5, 0x53, 0x14, 0xd6, 0x51, 0x95, 0xa2, 0x5d, 0x2f, 0xc4, 0x69, 0x82, 0x74, 0x6b, - 0x47, 0x71, 0xcb, 0xf5, 0x67, 0xa7, 0xd9, 0x89, 0xa1, 0x76, 0x46, 0x83, 0x16, 0x62, 0x0e, 0x63, - 0xfa, 0x73, 0xfa, 0xe3, 0x0e, 0x3d, 0x41, 0x67, 0x18, 0x62, 0xa2, 0x3f, 0x97, 0x00, 0x9c, 0xe0, - 0x50, 0xa6, 0x26, 0x8e, 0xf7, 0x67, 0x11, 0x43, 0x55, 0xdb, 0x65, 0x7d, 0xfd, 0x0b, 0x98, 0x96, - 0xa3, 0xdb, 0x30, 0x42, 0xfc, 0xbd, 0x95, 0x30, 0xd8, 0x9d, 0x3d, 0x93, 0xbf, 0x67, 0x97, 0x39, - 0x0a, 0x3f, 0xd0, 0x13, 0x01, 0x4f, 0x14, 0x63, 0x49, 0x02, 0x3d, 0x80, 0xd9, 0x8c, 0x19, 0xe1, - 0x13, 0x70, 0x96, 0x4d, 0xc0, 0x6b, 0xa2, 0xee, 0xec, 0x7a, 0x0e, 0xde, 0x51, 0x0f, 0x18, 0xce, - 0xa5, 0x8e, 0xbe, 0x08, 0x13, 0x7c, 0x43, 0xf1, 0xc7, 0xb7, 0x68, 0xf6, 0x1c, 0xfb, 0x9a, 0xcb, - 0xf9, 0x9b, 0x93, 0x23, 0x2e, 0x9e, 0x13, 0x1d, 0x9a, 0xd0, 0x4b, 0x23, 0x6c, 0x52, 0xb3, 0x37, - 0x60, 0x52, 0x9d, 0x5b, 0x6c, 0xe9, 0xa0, 0x0a, 0x0c, 0x31, 0x6e, 0x47, 0xe8, 0xb7, 0xca, 0x74, - 0xa6, 0x18, 0x27, 0x84, 0x79, 0x39, 0x9b, 0x29, 0xf7, 0x7d, 0xb2, 0xb8, 0x1f, 0x13, 0x2e, 0x55, - 0x17, 0xb5, 0x99, 0x92, 0x00, 0x9c, 0xe0, 0xd8, 0xff, 0x97, 0x73, 0x8d, 0xc9, 0xe1, 0x38, 0xc0, - 0x75, 0xf0, 0x1c, 0x8c, 0x6e, 0x07, 0x51, 0x4c, 0xb1, 0x59, 0x1b, 0x43, 0x09, 0x9f, 0x78, 0x53, - 0x94, 0x63, 0x85, 0x81, 0x5e, 0x85, 0x89, 0xa6, 0xde, 0x80, 0xb8, 0xcb, 0xd4, 0x10, 0x18, 0xad, - 0x63, 0x13, 0x17, 0xbd, 0x02, 0xa3, 0xec, 0xe9, 0xbc, 0x19, 0x78, 0x82, 0xc9, 0x92, 0x17, 0xf2, - 0x68, 0x5d, 0x94, 0x1f, 0x69, 0xbf, 0xb1, 0xc2, 0x46, 0x57, 0x60, 0x98, 0x76, 0xa1, 0x56, 0x17, - 0xb7, 0x88, 0x52, 0xd5, 0xdc, 0x64, 0xa5, 0x58, 0x40, 0xed, 0xbf, 0x5a, 0xd0, 0x46, 0x99, 0x4a, - 0xa4, 0x04, 0xd5, 0x61, 0xe4, 0xbe, 0xe3, 0xc6, 0xae, 0xbf, 0x25, 0xd8, 0x85, 0x67, 0x7a, 0x5e, - 0x29, 0xac, 0xd2, 0x5b, 0xbc, 0x02, 0xbf, 0xf4, 0xc4, 0x1f, 0x2c, 0xc9, 0x50, 0x8a, 0x61, 0xc7, - 0xf7, 0x29, 0xc5, 0xc2, 0xa0, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, 0x3f, 0x58, 0x92, 0x41, 0xef, - 0x00, 0xc8, 0x65, 0x49, 0x5a, 0xe2, 0xc9, 0xfa, 0xb9, 0xfe, 0x44, 0xd7, 0x55, 0x9d, 0xc5, 0x49, - 0x7a, 0xa5, 0x26, 0xff, 0xb1, 0x46, 0xcf, 0x8e, 0x19, 0x5b, 0xd5, 0xdd, 0x19, 0xf4, 0xed, 0xf4, - 0x24, 0x70, 0xc2, 0x98, 0xb4, 0x16, 0x62, 0x31, 0x38, 0x9f, 0x1c, 0x4c, 0xa6, 0x58, 0x77, 0x77, - 0x89, 0x7e, 0x6a, 0x08, 0x22, 0x38, 0xa1, 0x67, 0xff, 0x62, 0x11, 0x66, 0xf3, 0xba, 0x4b, 0x17, - 0x1d, 0x79, 0xe0, 0xc6, 0x4b, 0x94, 0x1b, 0xb2, 0xcc, 0x45, 0xb7, 0x2c, 0xca, 0xb1, 0xc2, 0xa0, - 0xb3, 0x1f, 0xb9, 0x5b, 0x52, 0x24, 0x1c, 0x4a, 0x66, 0xbf, 0xc1, 0x4a, 0xb1, 0x80, 0x52, 0xbc, - 0x90, 0x38, 0x91, 0xb0, 0x89, 0xd0, 0x56, 0x09, 0x66, 0xa5, 0x58, 0x40, 0x75, 0x7d, 0x53, 0xa9, - 0x8f, 0xbe, 0xc9, 0x18, 0xa2, 0xa1, 0x93, 0x1d, 0x22, 0xf4, 0x25, 0x80, 0x4d, 0xd7, 0x77, 0xa3, - 0x6d, 0x46, 0x7d, 0xf8, 0xd8, 0xd4, 0x15, 0x2f, 0xb5, 0xa2, 0xa8, 0x60, 0x8d, 0x22, 0x7a, 0x19, - 0xc6, 0xd4, 0x06, 0xac, 0x55, 0xd9, 0x03, 0x91, 0xf6, 0xe0, 0x9e, 0x9c, 0x46, 0x55, 0xac, 0xe3, - 0xd9, 0xef, 0xa6, 0xd7, 0x8b, 0xd8, 0x01, 0xda, 0xf8, 0x5a, 0x83, 0x8e, 0x6f, 0xa1, 0xf7, 0xf8, - 0xda, 0xbf, 0x5e, 0x84, 0x29, 0xa3, 0xb1, 0x4e, 0x34, 0xc0, 0x99, 0x75, 0x83, 0xde, 0x73, 0x4e, - 0x4c, 0xc4, 0xfe, 0xb3, 0xfb, 0x6f, 0x15, 0xfd, 0x2e, 0xa4, 0x3b, 0x80, 0xd7, 0x47, 0x5f, 0x82, - 0xb2, 0xe7, 0x44, 0x4c, 0x77, 0x45, 0xc4, 0xbe, 0x1b, 0x84, 0x58, 0x22, 0x47, 0x38, 0x51, 0xac, - 0x5d, 0x35, 0x9c, 0x76, 0x42, 0x92, 0x5e, 0xc8, 0x94, 0xf7, 0x91, 0x46, 0x37, 0xaa, 0x13, 0x94, - 0x41, 0xda, 0xc7, 0x1c, 0x86, 0x5e, 0x81, 0xf1, 0x90, 0xb0, 0x55, 0xb1, 0x44, 0x59, 0x39, 0xb6, - 0xcc, 0x86, 0x12, 0x9e, 0x0f, 0x6b, 0x30, 0x6c, 0x60, 0x26, 0xac, 0xfc, 0x70, 0x0f, 0x56, 0xfe, - 0x19, 0x18, 0x61, 0x3f, 0xd4, 0x0a, 0x50, 0xb3, 0x51, 0xe3, 0xc5, 0x58, 0xc2, 0xd3, 0x0b, 0x66, - 0x74, 0xc0, 0x05, 0xf3, 0x49, 0x98, 0xac, 0x3a, 0x64, 0x37, 0xf0, 0x97, 0xfd, 0x56, 0x3b, 0x70, - 0xfd, 0x18, 0xcd, 0x42, 0x89, 0xdd, 0x0e, 0x7c, 0x6f, 0x97, 0x28, 0x05, 0x5c, 0xa2, 0x8c, 0xb9, - 0xbd, 0x05, 0xe7, 0xaa, 0xc1, 0x7d, 0xff, 0xbe, 0x13, 0xb6, 0x16, 0xea, 0x35, 0x4d, 0xce, 0x5d, - 0x93, 0x72, 0x16, 0x37, 0x62, 0xc9, 0x3c, 0x53, 0xb5, 0x9a, 0xfc, 0xae, 0x5d, 0x71, 0x3d, 0x92, - 0xa3, 0x8d, 0xf8, 0xeb, 0x05, 0xa3, 0xa5, 0x04, 0x5f, 0x3d, 0x18, 0x59, 0xb9, 0x0f, 0x46, 0x6f, - 0xc2, 0xe8, 0xa6, 0x4b, 0xbc, 0x16, 0x26, 0x9b, 0x62, 0x89, 0x3d, 0x9d, 0xff, 0x2e, 0xbf, 0x42, - 0x31, 0xa5, 0xf6, 0x89, 0x4b, 0x69, 0x2b, 0xa2, 0x32, 0x56, 0x64, 0xd0, 0x0e, 0x4c, 0x4b, 0x31, - 0x40, 0x42, 0xc5, 0x82, 0x7b, 0xa6, 0x97, 0x6c, 0x61, 0x12, 0x3f, 0x7b, 0x78, 0x50, 0x99, 0xc6, - 0x29, 0x32, 0xb8, 0x8b, 0x30, 0x15, 0xcb, 0x76, 0xe9, 0xd1, 0x5a, 0x62, 0xc3, 0xcf, 0xc4, 0x32, - 0x26, 0x61, 0xb2, 0x52, 0xfb, 0x47, 0x2d, 0x78, 0xac, 0x6b, 0x64, 0x84, 0xa4, 0x7d, 0xc2, 0xb3, - 0x90, 0x96, 0x7c, 0x0b, 0xfd, 0x25, 0x5f, 0xfb, 0xef, 0x59, 0x70, 0x76, 0x79, 0xb7, 0x1d, 0xef, - 0x57, 0x5d, 0xf3, 0x75, 0xe7, 0x33, 0x30, 0xbc, 0x4b, 0x5a, 0x6e, 0x67, 0x57, 0xcc, 0x5c, 0x45, - 0x1e, 0x3f, 0xab, 0xac, 0xf4, 0xe8, 0xa0, 0x32, 0xd1, 0x88, 0x83, 0xd0, 0xd9, 0x22, 0xbc, 0x00, - 0x0b, 0x74, 0x76, 0x88, 0xbb, 0xef, 0x93, 0xdb, 0xee, 0xae, 0x2b, 0xed, 0x2c, 0x7a, 0xea, 0xce, - 0xe6, 0xe5, 0x80, 0xce, 0xbf, 0xd9, 0x71, 0xfc, 0xd8, 0x8d, 0xf7, 0xc5, 0xc3, 0x8c, 0x24, 0x82, - 0x13, 0x7a, 0xf6, 0x37, 0x2c, 0x98, 0x92, 0xeb, 0x7e, 0xa1, 0xd5, 0x0a, 0x49, 0x14, 0xa1, 0x39, - 0x28, 0xb8, 0x6d, 0xd1, 0x4b, 0x10, 0xbd, 0x2c, 0xd4, 0xea, 0xb8, 0xe0, 0xb6, 0x51, 0x1d, 0xca, - 0xdc, 0x5c, 0x23, 0x59, 0x5c, 0x03, 0x19, 0x7d, 0xb0, 0x1e, 0xac, 0xcb, 0x9a, 0x38, 0x21, 0x22, - 0x39, 0x38, 0x76, 0x66, 0x16, 0xcd, 0x57, 0xaf, 0x9b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x15, 0x46, - 0xfd, 0xa0, 0xc5, 0xad, 0x67, 0xf8, 0xed, 0xc7, 0x96, 0xec, 0x9a, 0x28, 0xc3, 0x0a, 0x6a, 0xff, - 0x80, 0x05, 0xe3, 0xf2, 0xcb, 0x06, 0x64, 0x26, 0xe9, 0xd6, 0x4a, 0x18, 0xc9, 0x64, 0x6b, 0x51, - 0x66, 0x90, 0x41, 0x0c, 0x1e, 0xb0, 0x78, 0x1c, 0x1e, 0xd0, 0xfe, 0x91, 0x02, 0x4c, 0xca, 0xee, - 0x34, 0x3a, 0x1b, 0x11, 0x89, 0xd1, 0x3a, 0x94, 0x1d, 0x3e, 0xe4, 0x44, 0xae, 0xd8, 0xa7, 0xb2, - 0x85, 0x0f, 0x63, 0x7e, 0x92, 0x6b, 0x79, 0x41, 0xd6, 0xc6, 0x09, 0x21, 0xe4, 0xc1, 0x8c, 0x1f, - 0xc4, 0xec, 0x88, 0x56, 0xf0, 0x5e, 0x4f, 0x20, 0x69, 0xea, 0x17, 0x04, 0xf5, 0x99, 0xb5, 0x34, - 0x15, 0xdc, 0x4d, 0x18, 0x2d, 0x4b, 0x85, 0x47, 0x31, 0x5f, 0xdc, 0xd0, 0x67, 0x21, 0x5b, 0xdf, - 0x61, 0xff, 0x8a, 0x05, 0x65, 0x89, 0x76, 0x1a, 0xaf, 0x5d, 0xab, 0x30, 0x12, 0xb1, 0x49, 0x90, - 0x43, 0x63, 0xf7, 0xea, 0x38, 0x9f, 0xaf, 0xe4, 0xe6, 0xe1, 0xff, 0x23, 0x2c, 0x69, 0x30, 0x7d, - 0xb7, 0xea, 0xfe, 0x47, 0x44, 0xdf, 0xad, 0xfa, 0x93, 0x73, 0xc3, 0xfc, 0x17, 0xd6, 0x67, 0x4d, - 0xac, 0xa5, 0x0c, 0x52, 0x3b, 0x24, 0x9b, 0xee, 0x83, 0x34, 0x83, 0x54, 0x67, 0xa5, 0x58, 0x40, - 0xd1, 0x3b, 0x30, 0xde, 0x94, 0x8a, 0xce, 0xe4, 0x18, 0xb8, 0xd2, 0x53, 0xe9, 0xae, 0xde, 0x67, - 0xb8, 0x65, 0xed, 0x92, 0x56, 0x1f, 0x1b, 0xd4, 0xcc, 0xe7, 0xf6, 0x62, 0xbf, 0xe7, 0xf6, 0x84, - 0x6e, 0xfe, 0xe3, 0xf3, 0x8f, 0x59, 0x30, 0xcc, 0xd5, 0x65, 0x83, 0xe9, 0x17, 0xb5, 0xe7, 0xaa, - 0x64, 0xec, 0xee, 0xd1, 0x42, 0xf1, 0xfc, 0x84, 0x56, 0xa1, 0xcc, 0x7e, 0x30, 0xb5, 0x41, 0x31, - 0xdf, 0xa4, 0x98, 0xb7, 0xaa, 0x77, 0xf0, 0x9e, 0xac, 0x86, 0x13, 0x0a, 0xf6, 0x0f, 0x15, 0xe9, - 0x51, 0x95, 0xa0, 0x1a, 0x37, 0xb8, 0xf5, 0xe8, 0x6e, 0xf0, 0xc2, 0xa3, 0xba, 0xc1, 0xb7, 0x60, - 0xaa, 0xa9, 0x3d, 0x6e, 0x25, 0x33, 0x79, 0xb5, 0xe7, 0x22, 0xd1, 0xde, 0xc1, 0xb8, 0xca, 0x68, - 0xc9, 0x24, 0x82, 0xd3, 0x54, 0xd1, 0xb7, 0xc3, 0x38, 0x9f, 0x67, 0xd1, 0x0a, 0xb7, 0x58, 0xf8, - 0x44, 0xfe, 0x7a, 0xd1, 0x9b, 0x60, 0x2b, 0xb1, 0xa1, 0x55, 0xc7, 0x06, 0x31, 0xfb, 0x17, 0x47, - 0x61, 0x68, 0x79, 0x8f, 0xf8, 0xf1, 0x29, 0x1c, 0x48, 0x4d, 0x98, 0x74, 0xfd, 0xbd, 0xc0, 0xdb, - 0x23, 0x2d, 0x0e, 0x3f, 0xce, 0xe5, 0x7a, 0x5e, 0x90, 0x9e, 0xac, 0x19, 0x24, 0x70, 0x8a, 0xe4, - 0xa3, 0x90, 0x30, 0x6f, 0xc0, 0x30, 0x9f, 0x7b, 0x21, 0x5e, 0x66, 0x2a, 0x83, 0xd9, 0x20, 0x8a, - 0x5d, 0x90, 0x48, 0xbf, 0x5c, 0xfb, 0x2c, 0xaa, 0xa3, 0x77, 0x61, 0x72, 0xd3, 0x0d, 0xa3, 0x98, - 0x8a, 0x86, 0x51, 0xec, 0xec, 0xb6, 0x1f, 0x42, 0xa2, 0x54, 0xe3, 0xb0, 0x62, 0x50, 0xc2, 0x29, - 0xca, 0x68, 0x0b, 0x26, 0xa8, 0x90, 0x93, 0x34, 0x35, 0x72, 0xec, 0xa6, 0x94, 0xca, 0xe8, 0xb6, - 0x4e, 0x08, 0x9b, 0x74, 0xe9, 0x61, 0xd2, 0x64, 0x42, 0xd1, 0x28, 0xe3, 0x28, 0xd4, 0x61, 0xc2, - 0xa5, 0x21, 0x0e, 0xa3, 0x67, 0x12, 0x33, 0x5b, 0x29, 0x9b, 0x67, 0x92, 0x66, 0x9c, 0xf2, 0x65, - 0x28, 0x13, 0x3a, 0x84, 0x94, 0xb0, 0x50, 0x8c, 0x5f, 0x1b, 0xac, 0xaf, 0xab, 0x6e, 0x33, 0x0c, - 0x4c, 0x59, 0x7e, 0x59, 0x52, 0xc2, 0x09, 0x51, 0xb4, 0x04, 0xc3, 0x11, 0x09, 0x5d, 0x12, 0x09, - 0x15, 0x79, 0x8f, 0x69, 0x64, 0x68, 0xdc, 0xf6, 0x9c, 0xff, 0xc6, 0xa2, 0x2a, 0x5d, 0x5e, 0x0e, - 0x93, 0x86, 0x98, 0x56, 0x5c, 0x5b, 0x5e, 0x0b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x03, 0x46, 0x42, - 0xe2, 0x31, 0x65, 0xd1, 0xc4, 0xe0, 0x8b, 0x9c, 0xeb, 0x9e, 0x78, 0x3d, 0x2c, 0x09, 0xa0, 0x5b, - 0x80, 0x42, 0x42, 0x79, 0x08, 0xd7, 0xdf, 0x52, 0xc6, 0x1c, 0x42, 0xd7, 0xfd, 0xb8, 0x68, 0xff, - 0x0c, 0x4e, 0x30, 0xa4, 0x55, 0x2a, 0xce, 0xa8, 0x86, 0x6e, 0xc0, 0x8c, 0x2a, 0xad, 0xf9, 0x51, - 0xec, 0xf8, 0x4d, 0xc2, 0xd4, 0xdc, 0xe5, 0x84, 0x2b, 0xc2, 0x69, 0x04, 0xdc, 0x5d, 0xc7, 0xfe, - 0x69, 0xca, 0xce, 0xd0, 0xd1, 0x3a, 0x05, 0x5e, 0xe0, 0x75, 0x93, 0x17, 0xb8, 0x90, 0x3b, 0x73, - 0x39, 0x7c, 0xc0, 0xa1, 0x05, 0x63, 0xda, 0xcc, 0x26, 0x6b, 0xd6, 0xea, 0xb1, 0x66, 0x3b, 0x30, - 0x4d, 0x57, 0xfa, 0x9d, 0x8d, 0x88, 0x84, 0x7b, 0xa4, 0xc5, 0x16, 0x66, 0xe1, 0xe1, 0x16, 0xa6, - 0x7a, 0x65, 0xbe, 0x9d, 0x22, 0x88, 0xbb, 0x9a, 0x40, 0x9f, 0x91, 0x9a, 0x93, 0xa2, 0x61, 0xa4, - 0xc5, 0xb5, 0x22, 0x47, 0x07, 0x95, 0x69, 0xed, 0x43, 0x74, 0x4d, 0x89, 0xfd, 0x65, 0xf9, 0x8d, - 0xea, 0x35, 0xbf, 0xa9, 0x16, 0x4b, 0xea, 0x35, 0x5f, 0x2d, 0x07, 0x9c, 0xe0, 0xd0, 0x3d, 0x4a, - 0x45, 0x90, 0xf4, 0x6b, 0x3e, 0x15, 0x50, 0x30, 0x83, 0xd8, 0x2f, 0x02, 0x2c, 0x3f, 0x20, 0x4d, - 0xbe, 0xd4, 0xf5, 0x07, 0x48, 0x2b, 0xff, 0x01, 0xd2, 0xfe, 0xb7, 0x16, 0x4c, 0xae, 0x2c, 0x19, - 0x62, 0xe2, 0x3c, 0x00, 0x97, 0x8d, 0xde, 0x7a, 0x6b, 0x4d, 0xea, 0xd6, 0xb9, 0x7a, 0x54, 0x95, - 0x62, 0x0d, 0x03, 0x5d, 0x80, 0xa2, 0xd7, 0xf1, 0x85, 0xc8, 0x32, 0x72, 0x78, 0x50, 0x29, 0xde, - 0xee, 0xf8, 0x98, 0x96, 0x69, 0x16, 0x82, 0xc5, 0x81, 0x2d, 0x04, 0xfb, 0xba, 0x57, 0xa1, 0x0a, - 0x0c, 0xdd, 0xbf, 0xef, 0xb6, 0xb8, 0x11, 0xbb, 0xd0, 0xfb, 0xbf, 0xf5, 0x56, 0xad, 0x1a, 0x61, - 0x5e, 0x6e, 0x7f, 0xb5, 0x08, 0x73, 0x2b, 0x1e, 0x79, 0xf0, 0x01, 0x0d, 0xf9, 0x07, 0xb5, 0x6f, - 0x3c, 0x1e, 0xbf, 0x78, 0x5c, 0x1b, 0xd6, 0xfe, 0xe3, 0xb1, 0x09, 0x23, 0xfc, 0x31, 0x5b, 0x9a, - 0xf5, 0xbf, 0x9a, 0xd5, 0x7a, 0xfe, 0x80, 0xcc, 0xf3, 0x47, 0x71, 0x61, 0xce, 0xaf, 0x6e, 0x5a, - 0x51, 0x8a, 0x25, 0xf1, 0xb9, 0xcf, 0xc2, 0xb8, 0x8e, 0x79, 0x2c, 0x6b, 0xf2, 0x3f, 0x57, 0x84, - 0x69, 0xda, 0x83, 0x47, 0x3a, 0x11, 0x77, 0xbb, 0x27, 0xe2, 0xa4, 0x2d, 0x8a, 0xfb, 0xcf, 0xc6, - 0x3b, 0xe9, 0xd9, 0x78, 0x21, 0x6f, 0x36, 0x4e, 0x7b, 0x0e, 0xbe, 0xdb, 0x82, 0x33, 0x2b, 0x5e, - 0xd0, 0xdc, 0x49, 0x59, 0xfd, 0xbe, 0x0c, 0x63, 0xf4, 0x1c, 0x8f, 0x0c, 0x2f, 0x22, 0xc3, 0xaf, - 0x4c, 0x80, 0xb0, 0x8e, 0xa7, 0x55, 0xbb, 0x7b, 0xb7, 0x56, 0xcd, 0x72, 0x47, 0x13, 0x20, 0xac, - 0xe3, 0xd9, 0x5f, 0xb7, 0xe0, 0xe2, 0x8d, 0xa5, 0xe5, 0x64, 0x29, 0x76, 0x79, 0xc4, 0x51, 0x29, - 0xb0, 0xa5, 0x75, 0x25, 0x91, 0x02, 0xab, 0xac, 0x17, 0x02, 0xfa, 0x51, 0xf1, 0xf6, 0xfc, 0x29, - 0x0b, 0xce, 0xdc, 0x70, 0x63, 0x7a, 0x2d, 0xa7, 0x7d, 0xb3, 0xe8, 0xbd, 0x1c, 0xb9, 0x71, 0x10, - 0xee, 0xa7, 0x7d, 0xb3, 0xb0, 0x82, 0x60, 0x0d, 0x8b, 0xb7, 0xbc, 0xe7, 0x32, 0x33, 0xaa, 0x82, - 0xa9, 0x8a, 0xc2, 0xa2, 0x1c, 0x2b, 0x0c, 0xfa, 0x61, 0x2d, 0x37, 0x64, 0xa2, 0xc4, 0xbe, 0x38, - 0x61, 0xd5, 0x87, 0x55, 0x25, 0x00, 0x27, 0x38, 0xf6, 0x8f, 0x5a, 0x70, 0xee, 0x86, 0xd7, 0x89, - 0x62, 0x12, 0x6e, 0x46, 0x46, 0x67, 0x5f, 0x84, 0x32, 0x91, 0xe2, 0xba, 0xe8, 0xab, 0x62, 0x30, - 0x95, 0x1c, 0xcf, 0x1d, 0xc3, 0x14, 0xde, 0x00, 0x9e, 0x03, 0xc7, 0x73, 0x1d, 0xfb, 0xb9, 0x02, - 0x4c, 0xdc, 0x5c, 0x5f, 0xaf, 0xdf, 0x20, 0xb1, 0xb8, 0xc5, 0xfa, 0xab, 0x9a, 0xb1, 0xa6, 0x31, - 0xeb, 0x25, 0x14, 0x75, 0x62, 0xd7, 0x9b, 0xe7, 0x9e, 0xc8, 0xf3, 0x35, 0x3f, 0xbe, 0x13, 0x36, - 0xe2, 0xd0, 0xf5, 0xb7, 0x32, 0x75, 0x6c, 0xf2, 0xae, 0x2d, 0xe6, 0xdd, 0xb5, 0xe8, 0x45, 0x18, - 0x66, 0xae, 0xd0, 0x52, 0x3c, 0x79, 0x5c, 0xc9, 0x14, 0xac, 0xf4, 0xe8, 0xa0, 0x52, 0xbe, 0x8b, - 0x6b, 0xfc, 0x0f, 0x16, 0xa8, 0xe8, 0x2e, 0x8c, 0x6d, 0xc7, 0x71, 0xfb, 0x26, 0x71, 0x5a, 0x24, - 0x94, 0xa7, 0xc3, 0xa5, 0xac, 0xd3, 0x81, 0x0e, 0x02, 0x47, 0x4b, 0x36, 0x54, 0x52, 0x16, 0x61, - 0x9d, 0x8e, 0xdd, 0x00, 0x48, 0x60, 0x27, 0xa4, 0x5f, 0xb0, 0x7f, 0xdf, 0x82, 0x11, 0xee, 0x95, - 0x16, 0xa2, 0xd7, 0xa0, 0x44, 0x1e, 0x90, 0xa6, 0xe0, 0x1c, 0x33, 0x3b, 0x9c, 0x30, 0x1e, 0x5c, - 0x5b, 0x4e, 0xff, 0x63, 0x56, 0x0b, 0xdd, 0x84, 0x11, 0xda, 0xdb, 0x1b, 0xca, 0x45, 0xef, 0xc9, - 0xbc, 0x2f, 0x56, 0xd3, 0xce, 0x79, 0x15, 0x51, 0x84, 0x65, 0x75, 0xa6, 0xf9, 0x6d, 0xb6, 0x1b, - 0xf4, 0x00, 0x8b, 0x7b, 0xdd, 0xb3, 0xeb, 0x4b, 0x75, 0x8e, 0x24, 0xa8, 0x71, 0xcd, 0xaf, 0x2c, - 0xc4, 0x09, 0x11, 0x7b, 0x1d, 0xca, 0x74, 0x52, 0x17, 0x3c, 0xd7, 0xe9, 0xad, 0x74, 0x7e, 0x16, - 0xca, 0x52, 0x01, 0x1c, 0x09, 0xc7, 0x26, 0x46, 0x55, 0xea, 0x87, 0x23, 0x9c, 0xc0, 0xed, 0x4d, - 0x38, 0xcb, 0x5e, 0xfe, 0x9d, 0x78, 0xdb, 0xd8, 0x63, 0xfd, 0x17, 0xf3, 0x73, 0x42, 0x10, 0xe3, - 0x33, 0x33, 0xab, 0xf9, 0x0e, 0x8c, 0x4b, 0x8a, 0x89, 0x50, 0x66, 0xff, 0x41, 0x09, 0x1e, 0xaf, - 0x35, 0xf2, 0x1d, 0x16, 0x5f, 0x81, 0x71, 0xce, 0xa6, 0xd1, 0xa5, 0xed, 0x78, 0xa2, 0x5d, 0xf5, - 0x2e, 0xb6, 0xae, 0xc1, 0xb0, 0x81, 0x89, 0x2e, 0x42, 0xd1, 0x7d, 0xcf, 0x4f, 0x9b, 0xe1, 0xd6, - 0xde, 0x5c, 0xc3, 0xb4, 0x9c, 0x82, 0x29, 0xc7, 0xc7, 0x8f, 0x52, 0x05, 0x56, 0x5c, 0xdf, 0xeb, - 0x30, 0xe9, 0x46, 0xcd, 0xc8, 0xad, 0xf9, 0xf4, 0x9c, 0x49, 0x9c, 0x5d, 0x13, 0x25, 0x01, 0xed, - 0xb4, 0x82, 0xe2, 0x14, 0xb6, 0x76, 0xae, 0x0f, 0x0d, 0xcc, 0x35, 0xf6, 0xf5, 0xf4, 0xa1, 0x0c, - 0x71, 0x9b, 0x7d, 0x5d, 0xc4, 0x8c, 0xda, 0x04, 0x43, 0xcc, 0x3f, 0x38, 0xc2, 0x12, 0x46, 0x25, - 0xb0, 0xe6, 0xb6, 0xd3, 0x5e, 0xe8, 0xc4, 0xdb, 0x55, 0x37, 0x6a, 0x06, 0x7b, 0x24, 0xdc, 0x67, - 0xc2, 0xf3, 0x68, 0x22, 0x81, 0x29, 0xc0, 0xd2, 0xcd, 0x85, 0x3a, 0xc5, 0xc4, 0xdd, 0x75, 0x4c, - 0xae, 0x10, 0x4e, 0x82, 0x2b, 0x5c, 0x80, 0x29, 0xd9, 0x4c, 0x83, 0x44, 0xec, 0x8e, 0x18, 0x63, - 0x1d, 0x53, 0xa6, 0xb6, 0xa2, 0x58, 0x75, 0x2b, 0x8d, 0x8f, 0x3e, 0x03, 0x13, 0xae, 0xef, 0xc6, - 0xae, 0x13, 0x07, 0x21, 0xbb, 0x61, 0xb9, 0x9c, 0xcc, 0x2c, 0xd9, 0x6a, 0x3a, 0x00, 0x9b, 0x78, - 0xf6, 0x7f, 0x2c, 0xc1, 0x0c, 0x9b, 0xb6, 0x6f, 0xae, 0xb0, 0x8f, 0xcc, 0x0a, 0xbb, 0xdb, 0xbd, - 0xc2, 0x4e, 0x82, 0xdd, 0xfd, 0x30, 0x97, 0xd9, 0xbb, 0x50, 0x56, 0xb6, 0xc0, 0xd2, 0x19, 0xc0, - 0xca, 0x71, 0x06, 0xe8, 0xcf, 0x7d, 0xc8, 0x67, 0xdc, 0x62, 0xe6, 0x33, 0xee, 0xdf, 0xb4, 0x20, - 0x31, 0x89, 0x44, 0x37, 0xa1, 0xdc, 0x0e, 0x98, 0xd9, 0x41, 0x28, 0x6d, 0x79, 0x1e, 0xcf, 0xbc, - 0xa8, 0xf8, 0xa5, 0xc8, 0xc7, 0xaf, 0x2e, 0x6b, 0xe0, 0xa4, 0x32, 0x5a, 0x84, 0x91, 0x76, 0x48, - 0x1a, 0x31, 0x73, 0x81, 0xed, 0x4b, 0x87, 0xaf, 0x11, 0x8e, 0x8f, 0x65, 0x45, 0xfb, 0xe7, 0x2d, - 0x00, 0xfe, 0x52, 0xea, 0xf8, 0x5b, 0xe4, 0x14, 0xb4, 0xbf, 0x55, 0x28, 0x45, 0x6d, 0xd2, 0xec, - 0x65, 0x10, 0x92, 0xf4, 0xa7, 0xd1, 0x26, 0xcd, 0x64, 0xc0, 0xe9, 0x3f, 0xcc, 0x6a, 0xdb, 0xdf, - 0x03, 0x30, 0x99, 0xa0, 0xd5, 0x62, 0xb2, 0x8b, 0x9e, 0x37, 0x5c, 0xe2, 0x2e, 0xa4, 0x5c, 0xe2, - 0xca, 0x0c, 0x5b, 0x53, 0x34, 0xbe, 0x0b, 0xc5, 0x5d, 0xe7, 0x81, 0xd0, 0x24, 0x3d, 0xdb, 0xbb, - 0x1b, 0x94, 0xfe, 0xfc, 0xaa, 0xf3, 0x80, 0xcb, 0x4c, 0xcf, 0xca, 0x05, 0xb2, 0xea, 0x3c, 0x38, - 0xe2, 0x66, 0x1f, 0xec, 0x90, 0xba, 0xed, 0x46, 0xf1, 0x57, 0xfe, 0x43, 0xf2, 0x9f, 0x2d, 0x3b, - 0xda, 0x08, 0x6b, 0xcb, 0xf5, 0xc5, 0xbb, 0xe1, 0x40, 0x6d, 0xb9, 0x7e, 0xba, 0x2d, 0xd7, 0x1f, - 0xa0, 0x2d, 0xd7, 0x47, 0xef, 0xc3, 0x88, 0x78, 0xa3, 0x67, 0xb6, 0xde, 0xa6, 0x96, 0x2a, 0xaf, - 0x3d, 0xf1, 0xc4, 0xcf, 0xdb, 0xbc, 0x26, 0x65, 0x42, 0x51, 0xda, 0xb7, 0x5d, 0xd9, 0x20, 0xfa, - 0x6b, 0x16, 0x4c, 0x8a, 0xdf, 0x98, 0xbc, 0xd7, 0x21, 0x51, 0x2c, 0x78, 0xcf, 0x4f, 0x0f, 0xde, - 0x07, 0x51, 0x91, 0x77, 0xe5, 0xd3, 0xf2, 0x98, 0x35, 0x81, 0x7d, 0x7b, 0x94, 0xea, 0x05, 0xfa, - 0x07, 0x16, 0x9c, 0xdd, 0x75, 0x1e, 0xf0, 0x16, 0x79, 0x19, 0x76, 0x62, 0x37, 0x10, 0xb6, 0xeb, - 0xaf, 0x0d, 0x36, 0xfd, 0x5d, 0xd5, 0x79, 0x27, 0xa5, 0x99, 0xeb, 0xd9, 0x2c, 0x94, 0xbe, 0x5d, - 0xcd, 0xec, 0xd7, 0xdc, 0x26, 0x8c, 0xca, 0xf5, 0x96, 0x21, 0x79, 0x57, 0x75, 0xc6, 0xfa, 0xd8, - 0x26, 0x12, 0xba, 0x5f, 0x1a, 0x6d, 0x47, 0xac, 0xb5, 0x47, 0xda, 0xce, 0xbb, 0x30, 0xae, 0xaf, - 0xb1, 0x47, 0xda, 0xd6, 0x7b, 0x70, 0x26, 0x63, 0x2d, 0x3d, 0xd2, 0x26, 0xef, 0xc3, 0x85, 0xdc, - 0xf5, 0xf1, 0x28, 0x1b, 0xb6, 0x7f, 0xce, 0xd2, 0xcf, 0xc1, 0x53, 0x50, 0xc1, 0x2f, 0x99, 0x2a, - 0xf8, 0x4b, 0xbd, 0x77, 0x4e, 0x8e, 0x1e, 0xfe, 0x1d, 0xbd, 0xd3, 0xf4, 0x54, 0x47, 0x6f, 0xc0, - 0xb0, 0x47, 0x4b, 0xa4, 0x71, 0x88, 0xdd, 0x7f, 0x47, 0x26, 0xbc, 0x14, 0x2b, 0x8f, 0xb0, 0xa0, - 0x60, 0xff, 0x92, 0x05, 0xa5, 0x53, 0x18, 0x09, 0x6c, 0x8e, 0xc4, 0xf3, 0xb9, 0xa4, 0x45, 0x48, - 0xb3, 0x79, 0xec, 0xdc, 0x5f, 0x7e, 0x10, 0x13, 0x3f, 0x62, 0xa2, 0x62, 0xe6, 0xc0, 0x7c, 0x07, - 0x9c, 0xb9, 0x1d, 0x38, 0xad, 0x45, 0xc7, 0x73, 0xfc, 0x26, 0x09, 0x6b, 0xfe, 0x56, 0x5f, 0x2b, - 0x25, 0xdd, 0xa6, 0xa8, 0xd0, 0xcf, 0xa6, 0xc8, 0xde, 0x06, 0xa4, 0x37, 0x20, 0xec, 0x38, 0x31, - 0x8c, 0xb8, 0xbc, 0x29, 0x31, 0xfc, 0x4f, 0x67, 0x73, 0x77, 0x5d, 0x3d, 0xd3, 0x2c, 0x14, 0x79, - 0x01, 0x96, 0x84, 0xec, 0x57, 0x20, 0xd3, 0x77, 0xab, 0xbf, 0xda, 0xc0, 0xfe, 0x02, 0xcc, 0xb0, - 0x9a, 0xc7, 0x14, 0x69, 0xed, 0x94, 0x92, 0x2e, 0x23, 0x64, 0x94, 0xfd, 0x7d, 0x16, 0x4c, 0xad, - 0xa5, 0xe2, 0x57, 0x5c, 0x61, 0xef, 0x81, 0x19, 0xba, 0xe1, 0x06, 0x2b, 0xc5, 0x02, 0x7a, 0xe2, - 0x3a, 0xa8, 0x3f, 0xb1, 0x20, 0x71, 0xa7, 0x3c, 0x05, 0xc6, 0x6b, 0xc9, 0x60, 0xbc, 0x32, 0x75, - 0x23, 0xaa, 0x3b, 0x79, 0x7c, 0x17, 0xba, 0xa5, 0x62, 0x07, 0xf4, 0x50, 0x8b, 0x24, 0x64, 0xb8, - 0xa7, 0xf9, 0xa4, 0x19, 0x60, 0x40, 0x46, 0x13, 0x60, 0xa6, 0x44, 0x0a, 0xf7, 0x23, 0x62, 0x4a, - 0xa4, 0xfa, 0x93, 0xb3, 0x43, 0xeb, 0x5a, 0x97, 0xd9, 0xc9, 0xf5, 0xad, 0xcc, 0x34, 0xdc, 0xf1, - 0xdc, 0xf7, 0x89, 0x0a, 0x80, 0x52, 0x11, 0xa6, 0xde, 0xa2, 0xf4, 0xe8, 0xa0, 0x32, 0xa1, 0xfe, - 0xf1, 0x28, 0x59, 0x49, 0x15, 0xfb, 0x26, 0x4c, 0xa5, 0x06, 0x0c, 0xbd, 0x0c, 0x43, 0xed, 0x6d, - 0x27, 0x22, 0x29, 0xf3, 0xc9, 0xa1, 0x3a, 0x2d, 0x3c, 0x3a, 0xa8, 0x4c, 0xaa, 0x0a, 0xac, 0x04, - 0x73, 0x6c, 0xfb, 0x7f, 0x58, 0x50, 0x5a, 0x0b, 0x5a, 0xa7, 0xb1, 0x98, 0x5e, 0x37, 0x16, 0xd3, - 0x13, 0x79, 0x31, 0x06, 0x73, 0xd7, 0xd1, 0x4a, 0x6a, 0x1d, 0x5d, 0xca, 0xa5, 0xd0, 0x7b, 0x09, - 0xed, 0xc2, 0x18, 0x8b, 0x5c, 0x28, 0xcc, 0x39, 0x5f, 0x34, 0x64, 0x80, 0x4a, 0x4a, 0x06, 0x98, - 0xd2, 0x50, 0x35, 0x49, 0xe0, 0x19, 0x18, 0x11, 0x26, 0x85, 0x69, 0x23, 0x78, 0x81, 0x8b, 0x25, - 0xdc, 0xfe, 0xb1, 0x22, 0x18, 0x91, 0x12, 0xd1, 0xaf, 0x58, 0x30, 0x1f, 0x72, 0xaf, 0xc2, 0x56, - 0xb5, 0x13, 0xba, 0xfe, 0x56, 0xa3, 0xb9, 0x4d, 0x5a, 0x1d, 0xcf, 0xf5, 0xb7, 0x6a, 0x5b, 0x7e, - 0xa0, 0x8a, 0x97, 0x1f, 0x90, 0x66, 0x87, 0xbd, 0x0b, 0xf4, 0x09, 0xcb, 0xa8, 0x4c, 0x76, 0xae, - 0x1f, 0x1e, 0x54, 0xe6, 0xf1, 0xb1, 0x68, 0xe3, 0x63, 0xf6, 0x05, 0x7d, 0xdd, 0x82, 0x6b, 0x3c, - 0x80, 0xe0, 0xe0, 0xfd, 0xef, 0x21, 0x31, 0xd5, 0x25, 0xa9, 0x84, 0xc8, 0x3a, 0x09, 0x77, 0x17, - 0x3f, 0x23, 0x06, 0xf4, 0x5a, 0xfd, 0x78, 0x6d, 0xe1, 0xe3, 0x76, 0xce, 0xfe, 0xe7, 0x45, 0x98, - 0x10, 0x0e, 0xed, 0x22, 0x52, 0xca, 0xcb, 0xc6, 0x92, 0x78, 0x32, 0xb5, 0x24, 0x66, 0x0c, 0xe4, - 0x93, 0x09, 0x92, 0x12, 0xc1, 0x8c, 0xe7, 0x44, 0xf1, 0x4d, 0xe2, 0x84, 0xf1, 0x06, 0x71, 0xb8, - 0x29, 0x4b, 0xf1, 0xd8, 0x66, 0x37, 0x4a, 0x45, 0x73, 0x3b, 0x4d, 0x0c, 0x77, 0xd3, 0x47, 0x7b, - 0x80, 0x98, 0x3d, 0x4e, 0xe8, 0xf8, 0x11, 0xff, 0x16, 0x57, 0xbc, 0x19, 0x1c, 0xaf, 0xd5, 0x39, - 0xd1, 0x2a, 0xba, 0xdd, 0x45, 0x0d, 0x67, 0xb4, 0xa0, 0xd9, 0x59, 0x0d, 0x0d, 0x6a, 0x67, 0x35, - 0xdc, 0xc7, 0xd3, 0xc4, 0x87, 0xe9, 0xae, 0x98, 0x04, 0x6f, 0x43, 0x59, 0xd9, 0xc3, 0x89, 0x43, - 0xa7, 0x77, 0x68, 0x8f, 0x34, 0x05, 0xae, 0x46, 0x49, 0x6c, 0x31, 0x13, 0x72, 0xf6, 0x3f, 0x2c, - 0x18, 0x0d, 0xf2, 0x49, 0x5c, 0x83, 0x51, 0x27, 0x8a, 0xdc, 0x2d, 0x9f, 0xb4, 0xc4, 0x8e, 0xfd, - 0x78, 0xde, 0x8e, 0x35, 0x9a, 0x61, 0x36, 0x89, 0x0b, 0xa2, 0x26, 0x56, 0x34, 0xd0, 0x4d, 0x6e, - 0x30, 0xb4, 0x27, 0x79, 0xfe, 0xc1, 0xa8, 0x81, 0x34, 0x29, 0xda, 0x23, 0x58, 0xd4, 0x47, 0x5f, - 0xe4, 0x16, 0x5d, 0xb7, 0xfc, 0xe0, 0xbe, 0x7f, 0x23, 0x08, 0xa4, 0x17, 0xda, 0x60, 0x04, 0x67, - 0xa4, 0x1d, 0x97, 0xaa, 0x8e, 0x4d, 0x6a, 0x83, 0xc5, 0xed, 0xf9, 0x4e, 0x38, 0x43, 0x49, 0x9b, - 0xbe, 0x24, 0x11, 0x22, 0x30, 0x25, 0xa2, 0x25, 0xc8, 0x32, 0x31, 0x76, 0x99, 0xec, 0xbc, 0x59, - 0x3b, 0x51, 0xfa, 0xdd, 0x32, 0x49, 0xe0, 0x34, 0x4d, 0xfb, 0x27, 0x2d, 0x60, 0x56, 0xf0, 0xa7, - 0xc0, 0x32, 0x7c, 0xce, 0x64, 0x19, 0x66, 0xf3, 0x06, 0x39, 0x87, 0x5b, 0x78, 0x89, 0xaf, 0xac, - 0x7a, 0x18, 0x3c, 0xd8, 0x17, 0xaf, 0xe9, 0xfd, 0x39, 0x59, 0xfb, 0xff, 0x58, 0xfc, 0x10, 0x53, - 0x8e, 0xe9, 0xe8, 0xbb, 0x60, 0xb4, 0xe9, 0xb4, 0x9d, 0x26, 0x0f, 0xeb, 0x9b, 0xab, 0xd5, 0x31, - 0x2a, 0xcd, 0x2f, 0x89, 0x1a, 0x5c, 0x4b, 0x21, 0xa3, 0x6e, 0x8c, 0xca, 0xe2, 0xbe, 0x9a, 0x09, - 0xd5, 0xe4, 0xdc, 0x0e, 0x4c, 0x18, 0xc4, 0x1e, 0xa9, 0x48, 0xfb, 0x5d, 0xfc, 0x8a, 0x55, 0x51, - 0x62, 0x76, 0x61, 0xc6, 0xd7, 0xfe, 0xd3, 0x0b, 0x45, 0x8a, 0x29, 0x1f, 0xef, 0x77, 0x89, 0xb2, - 0xdb, 0x47, 0xb3, 0xf2, 0x4f, 0x91, 0xc1, 0xdd, 0x94, 0xed, 0x1f, 0xb7, 0xe0, 0x31, 0x1d, 0x51, - 0x8b, 0x19, 0xd0, 0x4f, 0x4f, 0x5c, 0x85, 0xd1, 0xa0, 0x4d, 0x42, 0x27, 0x0e, 0x42, 0x71, 0x6b, - 0x5c, 0x95, 0x83, 0x7e, 0x47, 0x94, 0x1f, 0x89, 0xf8, 0x8a, 0x92, 0xba, 0x2c, 0xc7, 0xaa, 0x26, - 0x95, 0x63, 0xd8, 0x60, 0x44, 0x22, 0x9e, 0x03, 0x3b, 0x03, 0xd8, 0x93, 0x69, 0x84, 0x05, 0xc4, - 0xfe, 0x03, 0x8b, 0x2f, 0x2c, 0xbd, 0xeb, 0xe8, 0x3d, 0x98, 0xde, 0x75, 0xe2, 0xe6, 0xf6, 0xf2, - 0x83, 0x76, 0xc8, 0xd5, 0xe3, 0x72, 0x9c, 0x9e, 0xed, 0x37, 0x4e, 0xda, 0x47, 0x26, 0x46, 0x6a, - 0xab, 0x29, 0x62, 0xb8, 0x8b, 0x3c, 0xda, 0x80, 0x31, 0x56, 0xc6, 0xac, 0xa1, 0xa3, 0x5e, 0xac, - 0x41, 0x5e, 0x6b, 0xea, 0xd5, 0x79, 0x35, 0xa1, 0x83, 0x75, 0xa2, 0xf6, 0x57, 0x8a, 0x7c, 0xb7, - 0x33, 0x6e, 0xfb, 0x19, 0x18, 0x69, 0x07, 0xad, 0xa5, 0x5a, 0x15, 0x8b, 0x59, 0x50, 0xd7, 0x48, - 0x9d, 0x17, 0x63, 0x09, 0x47, 0xaf, 0x02, 0x90, 0x07, 0x31, 0x09, 0x7d, 0xc7, 0x53, 0x46, 0x23, - 0xca, 0x4c, 0xb2, 0x1a, 0xac, 0x05, 0xf1, 0xdd, 0x88, 0x7c, 0xc7, 0xb2, 0x42, 0xc1, 0x1a, 0x3a, - 0xba, 0x0e, 0xd0, 0x0e, 0x83, 0x3d, 0xb7, 0xc5, 0xdc, 0xeb, 0x8a, 0xa6, 0x49, 0x45, 0x5d, 0x41, - 0xb0, 0x86, 0x85, 0x5e, 0x85, 0x89, 0x8e, 0x1f, 0x71, 0x0e, 0xc5, 0xd9, 0x10, 0xd1, 0x09, 0x47, - 0x13, 0xeb, 0x86, 0xbb, 0x3a, 0x10, 0x9b, 0xb8, 0x68, 0x01, 0x86, 0x63, 0x87, 0xd9, 0x44, 0x0c, - 0xe5, 0xdb, 0x36, 0xae, 0x53, 0x0c, 0x3d, 0xa8, 0x2c, 0xad, 0x80, 0x45, 0x45, 0xf4, 0xb6, 0xf4, - 0x55, 0xe0, 0x67, 0xbd, 0x30, 0x2a, 0x1e, 0xec, 0x5e, 0xd0, 0x3c, 0x15, 0x84, 0xb1, 0xb2, 0x41, - 0xcb, 0xfe, 0x7a, 0x19, 0x20, 0x61, 0xc7, 0xd1, 0xfb, 0x5d, 0xe7, 0xd1, 0x73, 0xbd, 0x19, 0xf8, - 0x93, 0x3b, 0x8c, 0xd0, 0xf7, 0x5a, 0x30, 0xe6, 0x78, 0x5e, 0xd0, 0x74, 0x62, 0x36, 0xca, 0x85, - 0xde, 0xe7, 0xa1, 0x68, 0x7f, 0x21, 0xa9, 0xc1, 0xbb, 0xf0, 0xa2, 0x5c, 0x78, 0x1a, 0xa4, 0x6f, - 0x2f, 0xf4, 0x86, 0xd1, 0xa7, 0xa4, 0x94, 0xc6, 0x97, 0xc7, 0x5c, 0x5a, 0x4a, 0x2b, 0xb3, 0xa3, - 0x5f, 0x13, 0xd0, 0xd0, 0x5d, 0x23, 0xf0, 0x5c, 0x29, 0x3f, 0x06, 0x83, 0xc1, 0x95, 0xf6, 0x8b, - 0x39, 0x87, 0xea, 0xba, 0x73, 0xd5, 0x50, 0x7e, 0xa0, 0x12, 0x4d, 0xfc, 0xe9, 0xe3, 0x58, 0xf5, - 0x2e, 0x4c, 0xb5, 0xcc, 0xbb, 0x5d, 0xac, 0xa6, 0xa7, 0xf3, 0xe8, 0xa6, 0x58, 0x81, 0xe4, 0x36, - 0x4f, 0x01, 0x70, 0x9a, 0x30, 0xaa, 0x73, 0x37, 0xb7, 0x9a, 0xbf, 0x19, 0x08, 0xe3, 0x74, 0x3b, - 0x77, 0x2e, 0xf7, 0xa3, 0x98, 0xec, 0x52, 0xcc, 0xe4, 0xd2, 0x5e, 0x13, 0x75, 0xb1, 0xa2, 0x82, - 0xde, 0x80, 0x61, 0xe6, 0x27, 0x1b, 0xcd, 0x8e, 0xe6, 0x2b, 0x13, 0xcd, 0x10, 0x0f, 0xc9, 0xa6, - 0x62, 0x7f, 0x23, 0x2c, 0x28, 0xa0, 0x9b, 0x32, 0x0e, 0x4c, 0x54, 0xf3, 0xef, 0x46, 0x84, 0xc5, - 0x81, 0x29, 0x2f, 0x7e, 0x3c, 0x09, 0xf1, 0xc2, 0xcb, 0x33, 0xc3, 0xc7, 0x1b, 0x35, 0x29, 0x73, - 0x24, 0xfe, 0xcb, 0xa8, 0xf4, 0xb3, 0x90, 0xdf, 0x3d, 0x33, 0x72, 0x7d, 0x32, 0x9c, 0xf7, 0x4c, - 0x12, 0x38, 0x4d, 0x93, 0x32, 0x9a, 0x7c, 0xe7, 0x0a, 0xf3, 0xf6, 0x7e, 0xfb, 0x9f, 0xcb, 0xd7, - 0xec, 0x92, 0xe1, 0x25, 0x58, 0xd4, 0x3f, 0xd5, 0x5b, 0x7f, 0xce, 0x87, 0xe9, 0xf4, 0x16, 0x7d, - 0xa4, 0x5c, 0xc6, 0xef, 0x97, 0x60, 0xd2, 0x5c, 0x52, 0xe8, 0x1a, 0x94, 0x05, 0x11, 0x15, 0x94, - 0x54, 0xed, 0x92, 0x55, 0x09, 0xc0, 0x09, 0x0e, 0x8b, 0x45, 0xcb, 0xaa, 0x6b, 0x66, 0x89, 0x49, - 0x2c, 0x5a, 0x05, 0xc1, 0x1a, 0x16, 0x95, 0x97, 0x36, 0x82, 0x20, 0x56, 0x97, 0x8a, 0x5a, 0x77, - 0x8b, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x4c, 0x76, 0x48, 0xe8, 0x13, 0xcf, 0x8c, 0x75, 0xa6, 0x2e, - 0x93, 0x5b, 0x3a, 0x10, 0x9b, 0xb8, 0xf4, 0x96, 0x0c, 0x22, 0xb6, 0x90, 0x85, 0x54, 0x96, 0x98, - 0x79, 0x36, 0xb8, 0xc7, 0xb9, 0x84, 0xa3, 0x2f, 0xc0, 0x63, 0xca, 0x41, 0x1c, 0x73, 0x45, 0xb5, - 0x6c, 0x71, 0xd8, 0x50, 0xa2, 0x3c, 0xb6, 0x94, 0x8d, 0x86, 0xf3, 0xea, 0xa3, 0xd7, 0x61, 0x52, - 0x70, 0xee, 0x92, 0xe2, 0x88, 0x69, 0x3b, 0x71, 0xcb, 0x80, 0xe2, 0x14, 0xb6, 0x8c, 0xd6, 0xc6, - 0x98, 0x67, 0x49, 0x61, 0xb4, 0x3b, 0x5a, 0x9b, 0x0e, 0xc7, 0x5d, 0x35, 0xd0, 0x02, 0x4c, 0x71, - 0xd6, 0xca, 0xf5, 0xb7, 0xf8, 0x9c, 0x08, 0xef, 0x13, 0xb5, 0xa5, 0xee, 0x98, 0x60, 0x9c, 0xc6, - 0x47, 0xaf, 0xc0, 0xb8, 0x13, 0x36, 0xb7, 0xdd, 0x98, 0x34, 0xe3, 0x4e, 0xc8, 0xdd, 0x52, 0x34, - 0xe3, 0x93, 0x05, 0x0d, 0x86, 0x0d, 0x4c, 0xfb, 0x7d, 0x38, 0x93, 0xe1, 0xb8, 0x46, 0x17, 0x8e, - 0xd3, 0x76, 0xe5, 0x37, 0xa5, 0x0c, 0x36, 0x17, 0xea, 0x35, 0xf9, 0x35, 0x1a, 0x16, 0x5d, 0x9d, - 0xcc, 0xc1, 0x4d, 0x4b, 0x42, 0xa1, 0x56, 0xe7, 0x8a, 0x04, 0xe0, 0x04, 0xc7, 0xfe, 0x9f, 0x05, - 0x98, 0xca, 0x50, 0xbe, 0xb3, 0x44, 0x08, 0x29, 0xd9, 0x23, 0xc9, 0x7b, 0x60, 0x06, 0xff, 0x2b, - 0x1c, 0x23, 0xf8, 0x5f, 0xb1, 0x5f, 0xf0, 0xbf, 0xd2, 0x07, 0x09, 0xfe, 0x67, 0x8e, 0xd8, 0xd0, - 0x40, 0x23, 0x96, 0x11, 0x30, 0x70, 0xf8, 0x98, 0x01, 0x03, 0x8d, 0x41, 0x1f, 0x19, 0x60, 0xd0, - 0x7f, 0xa8, 0x00, 0xd3, 0x69, 0x23, 0xb9, 0x53, 0x50, 0xc7, 0xbe, 0x61, 0xa8, 0x63, 0xb3, 0xd3, - 0x8a, 0xa4, 0x4d, 0xf7, 0xf2, 0x54, 0xb3, 0x38, 0xa5, 0x9a, 0xfd, 0xe4, 0x40, 0xd4, 0x7a, 0xab, - 0x69, 0xff, 0x76, 0x01, 0xce, 0xa5, 0xab, 0x2c, 0x79, 0x8e, 0xbb, 0x7b, 0x0a, 0x63, 0x73, 0xc7, - 0x18, 0x9b, 0xe7, 0x07, 0xf9, 0x1a, 0xd6, 0xb5, 0xdc, 0x01, 0x7a, 0x2b, 0x35, 0x40, 0xd7, 0x06, - 0x27, 0xd9, 0x7b, 0x94, 0xbe, 0x51, 0x84, 0x4b, 0x99, 0xf5, 0x12, 0x6d, 0xe6, 0x8a, 0xa1, 0xcd, - 0xbc, 0x9e, 0xd2, 0x66, 0xda, 0xbd, 0x6b, 0x9f, 0x8c, 0x7a, 0x53, 0x78, 0x14, 0xb2, 0x00, 0x71, - 0x0f, 0xa9, 0xda, 0x34, 0x3c, 0x0a, 0x15, 0x21, 0x6c, 0xd2, 0xfd, 0xb3, 0xa4, 0xd2, 0xfc, 0x97, - 0x16, 0x5c, 0xc8, 0x9c, 0x9b, 0x53, 0x50, 0x61, 0xad, 0x99, 0x2a, 0xac, 0x67, 0x06, 0x5e, 0xad, - 0x39, 0x3a, 0xad, 0xdf, 0x28, 0xe5, 0x7c, 0x0b, 0x13, 0xd0, 0xef, 0xc0, 0x98, 0xd3, 0x6c, 0x92, - 0x28, 0x5a, 0x0d, 0x5a, 0x2a, 0x60, 0xda, 0xf3, 0x4c, 0xce, 0x4a, 0x8a, 0x8f, 0x0e, 0x2a, 0x73, - 0x69, 0x12, 0x09, 0x18, 0xeb, 0x14, 0xcc, 0x18, 0x8f, 0x85, 0x13, 0x8d, 0xf1, 0x78, 0x1d, 0x60, - 0x4f, 0x71, 0xeb, 0x69, 0x21, 0x5f, 0xe3, 0xe3, 0x35, 0x2c, 0xf4, 0x45, 0x18, 0x8d, 0xc4, 0x35, - 0x2e, 0x96, 0xe2, 0x8b, 0x03, 0xce, 0x95, 0xb3, 0x41, 0x3c, 0xd3, 0x75, 0x5d, 0xe9, 0x43, 0x14, - 0x49, 0xf4, 0x6d, 0x30, 0x1d, 0xf1, 0xc8, 0x28, 0x4b, 0x9e, 0x13, 0x31, 0x3f, 0x08, 0xb1, 0x0a, - 0x99, 0x3f, 0x7a, 0x23, 0x05, 0xc3, 0x5d, 0xd8, 0x68, 0x45, 0x7e, 0x14, 0x0b, 0xe3, 0xc2, 0x17, - 0xe6, 0x95, 0xe4, 0x83, 0x44, 0x1a, 0xa6, 0xb3, 0xe9, 0xe1, 0x67, 0x03, 0xaf, 0xd5, 0x44, 0x5f, - 0x04, 0xa0, 0xcb, 0x47, 0xe8, 0x12, 0x46, 0xf2, 0x0f, 0x4f, 0x7a, 0xaa, 0xb4, 0x32, 0x2d, 0x3f, - 0x99, 0x2f, 0x5f, 0x55, 0x11, 0xc1, 0x1a, 0x41, 0xfb, 0x87, 0x4a, 0xf0, 0x78, 0x8f, 0x33, 0x12, - 0x2d, 0x98, 0x4f, 0xa0, 0xcf, 0xa6, 0x85, 0xeb, 0xb9, 0xcc, 0xca, 0x86, 0xb4, 0x9d, 0x5a, 0x8a, - 0x85, 0x0f, 0xbc, 0x14, 0xbf, 0xdf, 0xd2, 0xd4, 0x1e, 0xdc, 0x98, 0xef, 0x73, 0xc7, 0x3c, 0xfb, - 0x4f, 0x50, 0x0f, 0xb2, 0x99, 0xa1, 0x4c, 0xb8, 0x3e, 0x70, 0x77, 0x06, 0xd6, 0x2e, 0x9c, 0xae, - 0xf2, 0xf7, 0x2b, 0x16, 0x3c, 0x99, 0xd9, 0x5f, 0xc3, 0x64, 0xe3, 0x1a, 0x94, 0x9b, 0xb4, 0x50, - 0x73, 0xdd, 0x4a, 0x7c, 0x5a, 0x25, 0x00, 0x27, 0x38, 0x86, 0x65, 0x46, 0xa1, 0xaf, 0x65, 0xc6, - 0x3f, 0xb3, 0xa0, 0x6b, 0x7f, 0x9c, 0xc2, 0x41, 0x5d, 0x33, 0x0f, 0xea, 0x8f, 0x0f, 0x32, 0x97, - 0x39, 0x67, 0xf4, 0x7f, 0x9a, 0x82, 0xf3, 0x39, 0xbe, 0x1a, 0x7b, 0x30, 0xb3, 0xd5, 0x24, 0xa6, - 0x53, 0x9c, 0xf8, 0x98, 0x4c, 0xff, 0xc1, 0x9e, 0x1e, 0x74, 0x2c, 0x3d, 0xcf, 0x4c, 0x17, 0x0a, - 0xee, 0x6e, 0x02, 0x7d, 0xc5, 0x82, 0xb3, 0xce, 0xfd, 0xa8, 0x2b, 0x09, 0xa3, 0x58, 0x33, 0x2f, - 0x65, 0x2a, 0x41, 0xfa, 0x24, 0x6d, 0xe4, 0xf9, 0x8a, 0xb2, 0xb0, 0x70, 0x66, 0x5b, 0x08, 0x8b, - 0x10, 0x9a, 0x94, 0x9d, 0xef, 0xe1, 0xb6, 0x99, 0xe5, 0x54, 0xc3, 0x8f, 0x6c, 0x09, 0xc1, 0x8a, - 0x0e, 0xba, 0x07, 0xe5, 0x2d, 0xe9, 0xe9, 0x26, 0xae, 0x84, 0xcc, 0x3b, 0x36, 0xd3, 0x1d, 0x8e, - 0x3f, 0x4b, 0x2a, 0x10, 0x4e, 0x48, 0xa1, 0xd7, 0xa1, 0xe8, 0x6f, 0x46, 0xbd, 0x12, 0xfd, 0xa4, - 0x2c, 0x99, 0xb8, 0x4b, 0xf4, 0xda, 0x4a, 0x03, 0xd3, 0x8a, 0xe8, 0x26, 0x14, 0xc3, 0x8d, 0x96, - 0xd0, 0xdb, 0x65, 0x9e, 0xdc, 0x78, 0xb1, 0x9a, 0xbd, 0x48, 0x38, 0x25, 0xbc, 0x58, 0xc5, 0x94, - 0x04, 0xaa, 0xc3, 0x10, 0x73, 0x6b, 0x10, 0xb7, 0x40, 0x26, 0xbf, 0xdb, 0xc3, 0x3d, 0x88, 0xfb, - 0x4d, 0x33, 0x04, 0xcc, 0x09, 0xa1, 0x75, 0x18, 0x6e, 0xb2, 0xa4, 0x30, 0x22, 0x6a, 0xf3, 0xa7, - 0x32, 0x35, 0x74, 0x3d, 0xb2, 0xe5, 0x08, 0x85, 0x15, 0xc3, 0xc0, 0x82, 0x16, 0xa3, 0x4a, 0xda, - 0xdb, 0x9b, 0x11, 0x93, 0xf0, 0xf3, 0xa8, 0xf6, 0x48, 0x02, 0x25, 0xa8, 0x32, 0x0c, 0x2c, 0x68, - 0xa1, 0xcf, 0x42, 0x61, 0xb3, 0x29, 0xbc, 0x1e, 0x32, 0x55, 0x75, 0xa6, 0x57, 0xfb, 0xe2, 0xf0, - 0xe1, 0x41, 0xa5, 0xb0, 0xb2, 0x84, 0x0b, 0x9b, 0x4d, 0xb4, 0x06, 0x23, 0x9b, 0xdc, 0x0f, 0x56, - 0x68, 0xe3, 0x9e, 0xce, 0x76, 0xd1, 0xed, 0x72, 0x95, 0xe5, 0xd6, 0xfa, 0x02, 0x80, 0x25, 0x11, - 0x16, 0x87, 0x52, 0xf9, 0xf3, 0x8a, 0x80, 0xcc, 0xf3, 0xc7, 0xf3, 0xc1, 0xe6, 0xb7, 0x72, 0xe2, - 0x15, 0x8c, 0x35, 0x8a, 0xe8, 0xcb, 0x50, 0x76, 0x64, 0xfa, 0x3f, 0x11, 0xb0, 0xe2, 0xc5, 0xcc, - 0x8d, 0xd9, 0x3b, 0x33, 0x22, 0x5f, 0xd5, 0x0a, 0x09, 0x27, 0x44, 0xd1, 0x0e, 0x4c, 0xec, 0x45, - 0xed, 0x6d, 0x22, 0x37, 0x32, 0x8b, 0x5f, 0x91, 0x73, 0x71, 0xdd, 0x13, 0x88, 0x6e, 0x18, 0x77, - 0x1c, 0xaf, 0xeb, 0xec, 0x61, 0x6f, 0xd9, 0xf7, 0x74, 0x62, 0xd8, 0xa4, 0x4d, 0x87, 0xff, 0xbd, - 0x4e, 0xb0, 0xb1, 0x1f, 0x13, 0x11, 0xc1, 0x39, 0x73, 0xf8, 0xdf, 0xe4, 0x28, 0xdd, 0xc3, 0x2f, - 0x00, 0x58, 0x12, 0xa1, 0x5b, 0xdd, 0x91, 0xa9, 0x35, 0x59, 0xe4, 0xe6, 0x9c, 0xad, 0x9e, 0x99, - 0x7f, 0x53, 0x1b, 0x14, 0x76, 0x46, 0x26, 0xa4, 0xd8, 0xd9, 0xd8, 0xde, 0x0e, 0xe2, 0xc0, 0x4f, - 0x9d, 0xcb, 0x33, 0xf9, 0x67, 0x63, 0x3d, 0x03, 0xbf, 0xfb, 0x6c, 0xcc, 0xc2, 0xc2, 0x99, 0x6d, - 0xa1, 0x16, 0x4c, 0xb6, 0x83, 0x30, 0xbe, 0x1f, 0x84, 0x72, 0x7d, 0xa1, 0x1e, 0xda, 0x04, 0x03, - 0x53, 0xb4, 0xc8, 0x22, 0x8a, 0x9b, 0x10, 0x9c, 0xa2, 0x89, 0x3e, 0x0f, 0x23, 0x51, 0xd3, 0xf1, - 0x48, 0xed, 0xce, 0xec, 0x99, 0xfc, 0x4b, 0xa7, 0xc1, 0x51, 0x72, 0x56, 0x17, 0x9b, 0x1c, 0x81, - 0x82, 0x25, 0x39, 0xb4, 0x02, 0x43, 0x2c, 0x2d, 0x00, 0x0b, 0x3e, 0x9d, 0x13, 0x18, 0xa9, 0xcb, - 0xae, 0x94, 0x9f, 0x4d, 0xac, 0x18, 0xf3, 0xea, 0x74, 0x0f, 0x08, 0xa6, 0x3a, 0x88, 0x66, 0xcf, - 0xe5, 0xef, 0x01, 0xc1, 0x8b, 0xdf, 0x69, 0xf4, 0xda, 0x03, 0x0a, 0x09, 0x27, 0x44, 0xe9, 0xc9, - 0x4c, 0x4f, 0xd3, 0xf3, 0x3d, 0xcc, 0x58, 0x72, 0xcf, 0x52, 0x76, 0x32, 0xd3, 0x93, 0x94, 0x92, - 0xb0, 0x7f, 0x77, 0xa4, 0x9b, 0x53, 0x61, 0x62, 0xd8, 0x9f, 0xb7, 0xba, 0x5e, 0xe8, 0x3e, 0x3d, - 0xa8, 0x56, 0xe8, 0x04, 0x79, 0xd4, 0xaf, 0x58, 0x70, 0xbe, 0x9d, 0xf9, 0x21, 0xe2, 0xda, 0x1f, - 0x4c, 0xb9, 0xc4, 0x3f, 0x5d, 0x05, 0x88, 0xcf, 0x86, 0xe3, 0x9c, 0x96, 0xd2, 0x72, 0x40, 0xf1, - 0x03, 0xcb, 0x01, 0xab, 0x30, 0xca, 0x58, 0xcb, 0x3e, 0x49, 0xd2, 0xd2, 0xe2, 0x10, 0x63, 0x20, - 0x96, 0x44, 0x45, 0xac, 0x48, 0xa0, 0x1f, 0xb0, 0xe0, 0x62, 0xba, 0xeb, 0x98, 0x30, 0xb0, 0x08, - 0xa7, 0xce, 0x25, 0xc0, 0x15, 0xf1, 0xfd, 0x17, 0xeb, 0xbd, 0x90, 0x8f, 0xfa, 0x21, 0xe0, 0xde, - 0x8d, 0xa1, 0x6a, 0x86, 0x08, 0x3a, 0x6c, 0xaa, 0xdd, 0x07, 0x10, 0x43, 0x5f, 0x82, 0xf1, 0xdd, - 0xa0, 0xe3, 0xc7, 0xc2, 0xea, 0x45, 0xf8, 0x29, 0xb2, 0x67, 0xe6, 0x55, 0xad, 0x1c, 0x1b, 0x58, - 0x29, 0xe1, 0x75, 0xf4, 0xa1, 0x85, 0xd7, 0x77, 0x52, 0xa9, 0xb0, 0xcb, 0xf9, 0x61, 0xfb, 0x84, - 0x9c, 0x7f, 0x8c, 0x84, 0xd8, 0xa7, 0x2b, 0x11, 0xfd, 0xb4, 0x95, 0xc1, 0xca, 0x73, 0x19, 0xf9, - 0x35, 0x53, 0x46, 0xbe, 0x92, 0x96, 0x91, 0xbb, 0x54, 0xae, 0x86, 0x78, 0x3c, 0x78, 0xec, 0xe7, - 0x41, 0x83, 0xa9, 0xd9, 0x1e, 0x5c, 0xee, 0x77, 0x2d, 0x31, 0xf3, 0xa7, 0x96, 0x7a, 0x60, 0x4b, - 0xcc, 0x9f, 0x5a, 0xb5, 0x2a, 0x66, 0x90, 0x41, 0xa3, 0x6d, 0xd8, 0xff, 0xd5, 0x82, 0x62, 0x3d, - 0x68, 0x9d, 0x82, 0x0a, 0xf9, 0x73, 0x86, 0x0a, 0xf9, 0xf1, 0x9c, 0x14, 0xe5, 0xb9, 0x0a, 0xe3, - 0xe5, 0x94, 0xc2, 0xf8, 0x62, 0x1e, 0x81, 0xde, 0xea, 0xe1, 0x9f, 0x28, 0x82, 0x9e, 0x50, 0x1d, - 0xfd, 0xc6, 0xc3, 0xd8, 0x1e, 0x17, 0x7b, 0xe5, 0x58, 0x17, 0x94, 0x99, 0xd5, 0x94, 0x74, 0xbd, - 0xfb, 0x53, 0x66, 0x82, 0xfc, 0x16, 0x71, 0xb7, 0xb6, 0x63, 0xd2, 0x4a, 0x7f, 0xce, 0xe9, 0x99, - 0x20, 0xff, 0x67, 0x0b, 0xa6, 0x52, 0xad, 0x23, 0x0f, 0x26, 0x3c, 0x5d, 0xff, 0x27, 0xd6, 0xe9, - 0x43, 0xa9, 0x0e, 0x85, 0x09, 0xa7, 0x56, 0x84, 0x4d, 0xe2, 0x68, 0x1e, 0x40, 0xbd, 0xcf, 0x49, - 0xbd, 0x17, 0xe3, 0xfa, 0xd5, 0x03, 0x5e, 0x84, 0x35, 0x0c, 0xf4, 0x32, 0x8c, 0xc5, 0x41, 0x3b, - 0xf0, 0x82, 0xad, 0xfd, 0x5b, 0x44, 0xc6, 0x77, 0x51, 0x86, 0x59, 0xeb, 0x09, 0x08, 0xeb, 0x78, - 0xf6, 0x4f, 0x15, 0x21, 0x9d, 0x84, 0xff, 0x9b, 0x6b, 0xf2, 0xa3, 0xb9, 0x26, 0xbf, 0x61, 0xc1, - 0x34, 0x6d, 0x9d, 0x19, 0x89, 0xc8, 0xcb, 0x56, 0xe5, 0xa0, 0xb1, 0x7a, 0xe4, 0xa0, 0xb9, 0x42, - 0xcf, 0xae, 0x56, 0xd0, 0x89, 0x85, 0xde, 0x4c, 0x3b, 0x9c, 0x68, 0x29, 0x16, 0x50, 0x81, 0x47, - 0xc2, 0x50, 0x78, 0x3e, 0xe9, 0x78, 0x24, 0x0c, 0xb1, 0x80, 0xca, 0x14, 0x35, 0xa5, 0x9c, 0x14, - 0x35, 0x2c, 0x5a, 0x9d, 0x30, 0x27, 0x10, 0x6c, 0x8f, 0x16, 0xad, 0x4e, 0xda, 0x19, 0x24, 0x38, - 0xf6, 0xcf, 0x15, 0x61, 0xbc, 0x1e, 0xb4, 0x92, 0x17, 0xb2, 0x97, 0x8c, 0x17, 0xb2, 0xcb, 0xa9, - 0x17, 0xb2, 0x69, 0x1d, 0xf7, 0x9b, 0xef, 0x61, 0x1f, 0xd6, 0x7b, 0xd8, 0x3f, 0xb5, 0xd8, 0xac, - 0x55, 0xd7, 0x1a, 0x22, 0x45, 0xee, 0x0b, 0x30, 0xc6, 0x0e, 0x24, 0xe6, 0x6a, 0x27, 0x9f, 0x8d, - 0x58, 0xf4, 0xf9, 0xb5, 0xa4, 0x18, 0xeb, 0x38, 0xe8, 0x2a, 0x8c, 0x46, 0xc4, 0x09, 0x9b, 0xdb, - 0xea, 0x8c, 0x13, 0x8f, 0x2a, 0xbc, 0x0c, 0x2b, 0x28, 0x7a, 0x33, 0x09, 0x94, 0x56, 0xcc, 0x4f, - 0xf6, 0xaa, 0xf7, 0x87, 0x6f, 0x91, 0xfc, 0xe8, 0x68, 0xf6, 0x5b, 0x80, 0xba, 0xf1, 0x07, 0x08, - 0x89, 0x54, 0x31, 0x43, 0x22, 0x95, 0xbb, 0xc2, 0x21, 0xfd, 0xb1, 0x05, 0x93, 0xf5, 0xa0, 0x45, - 0xb7, 0xee, 0x9f, 0xa5, 0x7d, 0xaa, 0x47, 0x89, 0x1c, 0xee, 0x11, 0x25, 0xf2, 0xef, 0x58, 0x30, - 0x52, 0x0f, 0x5a, 0xa7, 0xa0, 0x6d, 0x7f, 0xcd, 0xd4, 0xb6, 0x3f, 0x96, 0xb3, 0x24, 0x72, 0x14, - 0xec, 0xbf, 0x50, 0x84, 0x09, 0xda, 0xcf, 0x60, 0x4b, 0xce, 0x92, 0x31, 0x22, 0xd6, 0x00, 0x23, - 0x42, 0xd9, 0xdc, 0xc0, 0xf3, 0x82, 0xfb, 0xe9, 0x19, 0x5b, 0x61, 0xa5, 0x58, 0x40, 0xd1, 0x73, - 0x30, 0xda, 0x0e, 0xc9, 0x9e, 0x1b, 0x08, 0xfe, 0x51, 0x7b, 0xbb, 0xa8, 0x8b, 0x72, 0xac, 0x30, - 0xa8, 0xdc, 0x15, 0xb9, 0x7e, 0x93, 0xc8, 0x4c, 0xd3, 0x25, 0x96, 0x8c, 0x8a, 0x87, 0x7f, 0xd6, - 0xca, 0xb1, 0x81, 0x85, 0xde, 0x82, 0x32, 0xfb, 0xcf, 0x4e, 0x94, 0xe3, 0x27, 0xcf, 0x11, 0x39, - 0x17, 0x04, 0x01, 0x9c, 0xd0, 0x42, 0xd7, 0x01, 0x62, 0x19, 0x22, 0x38, 0x12, 0x91, 0x6d, 0x14, - 0xaf, 0xad, 0x82, 0x07, 0x47, 0x58, 0xc3, 0x42, 0xcf, 0x42, 0x39, 0x76, 0x5c, 0xef, 0xb6, 0xeb, - 0x93, 0x88, 0xa9, 0x9c, 0x8b, 0x32, 0xa5, 0x82, 0x28, 0xc4, 0x09, 0x9c, 0xf2, 0x3a, 0xcc, 0xed, - 0x9b, 0xa7, 0xde, 0x1a, 0x65, 0xd8, 0x8c, 0xd7, 0xb9, 0xad, 0x4a, 0xb1, 0x86, 0x61, 0xbf, 0x02, - 0xe7, 0xea, 0x41, 0xab, 0x1e, 0x84, 0xf1, 0x4a, 0x10, 0xde, 0x77, 0xc2, 0x96, 0x9c, 0xbf, 0x8a, - 0x8c, 0xee, 0x4f, 0xcf, 0x9e, 0x21, 0xbe, 0x33, 0x8d, 0xb8, 0xfd, 0x2f, 0x32, 0x6e, 0xe7, 0x98, - 0xae, 0x1c, 0x4d, 0x76, 0xef, 0xaa, 0x2c, 0x7b, 0x37, 0x9c, 0x98, 0xa0, 0x3b, 0x2c, 0x33, 0x57, - 0x72, 0x05, 0x89, 0xea, 0xcf, 0x68, 0x99, 0xb9, 0x12, 0x60, 0xe6, 0x9d, 0x65, 0xd6, 0xb7, 0x7f, - 0xb5, 0xc8, 0x4e, 0xa3, 0x54, 0xd2, 0x39, 0xf4, 0x25, 0x98, 0x8c, 0xc8, 0x6d, 0xd7, 0xef, 0x3c, - 0x90, 0x42, 0x78, 0x0f, 0x67, 0x9c, 0xc6, 0xb2, 0x8e, 0xc9, 0x55, 0x79, 0x66, 0x19, 0x4e, 0x51, - 0xa3, 0xf3, 0x14, 0x76, 0xfc, 0x85, 0xe8, 0x6e, 0x44, 0x42, 0x91, 0xf4, 0x8c, 0xcd, 0x13, 0x96, - 0x85, 0x38, 0x81, 0xd3, 0x75, 0xc9, 0xfe, 0xac, 0x05, 0x3e, 0x0e, 0x82, 0x58, 0xae, 0x64, 0x96, - 0x36, 0x47, 0x2b, 0xc7, 0x06, 0x16, 0x5a, 0x01, 0x14, 0x75, 0xda, 0x6d, 0x8f, 0x3d, 0xe7, 0x3b, - 0xde, 0x8d, 0x30, 0xe8, 0xb4, 0xf9, 0x5b, 0x67, 0x71, 0xf1, 0x3c, 0xbd, 0xc2, 0x1a, 0x5d, 0x50, - 0x9c, 0x51, 0x83, 0x9e, 0x3e, 0x9b, 0x11, 0xfb, 0xcd, 0x56, 0x77, 0x51, 0xa8, 0xd7, 0x1b, 0xac, - 0x08, 0x4b, 0x18, 0x5d, 0x4c, 0xac, 0x79, 0x8e, 0x39, 0x9c, 0x2c, 0x26, 0xac, 0x4a, 0xb1, 0x86, - 0x81, 0x96, 0x61, 0x24, 0xda, 0x8f, 0x9a, 0xb1, 0x88, 0xc3, 0x94, 0x93, 0xbe, 0xb2, 0xc1, 0x50, - 0xb4, 0x94, 0x0a, 0xbc, 0x0a, 0x96, 0x75, 0xed, 0xef, 0x62, 0x97, 0x21, 0x4b, 0x91, 0x15, 0x77, - 0x42, 0x82, 0x76, 0x61, 0xa2, 0xcd, 0xa6, 0x5c, 0x04, 0x70, 0x16, 0xf3, 0xf6, 0xd2, 0x80, 0x52, - 0xed, 0x7d, 0x7a, 0xd0, 0x28, 0xad, 0x13, 0x13, 0x17, 0xea, 0x3a, 0x39, 0x6c, 0x52, 0xb7, 0xff, - 0xd5, 0x0c, 0x3b, 0x73, 0x1b, 0x5c, 0x54, 0x1d, 0x11, 0x06, 0xc5, 0x82, 0x2f, 0x9f, 0xcb, 0xd7, - 0x99, 0x24, 0x5f, 0x24, 0x8c, 0x92, 0xb1, 0xac, 0x8b, 0xde, 0x64, 0x6f, 0xd3, 0xfc, 0xa0, 0xeb, - 0x97, 0xa9, 0x98, 0x63, 0x19, 0xcf, 0xd0, 0xa2, 0x22, 0xd6, 0x88, 0xa0, 0xdb, 0x30, 0x21, 0x32, - 0x2a, 0x09, 0xa5, 0x58, 0xd1, 0x50, 0x7a, 0x4c, 0x60, 0x1d, 0x78, 0x94, 0x2e, 0xc0, 0x66, 0x65, - 0xb4, 0x05, 0x17, 0xb5, 0xf4, 0x82, 0x37, 0x42, 0x87, 0xbd, 0x57, 0xba, 0x6c, 0x13, 0x69, 0xe7, - 0xe6, 0x93, 0x87, 0x07, 0x95, 0x8b, 0xeb, 0xbd, 0x10, 0x71, 0x6f, 0x3a, 0xe8, 0x0e, 0x9c, 0xe3, - 0x7e, 0x7b, 0x55, 0xe2, 0xb4, 0x3c, 0xd7, 0x57, 0x07, 0x33, 0x5f, 0x87, 0x17, 0x0e, 0x0f, 0x2a, - 0xe7, 0x16, 0xb2, 0x10, 0x70, 0x76, 0x3d, 0xf4, 0x1a, 0x94, 0x5b, 0x7e, 0x24, 0xc6, 0x60, 0xd8, - 0xc8, 0x9c, 0x59, 0xae, 0xae, 0x35, 0xd4, 0xf7, 0x27, 0x7f, 0x70, 0x52, 0x01, 0x6d, 0x71, 0xc5, - 0x98, 0x92, 0x43, 0x47, 0xf2, 0xb3, 0xa4, 0x8b, 0x25, 0x61, 0x78, 0xee, 0x70, 0x8d, 0xb0, 0xb2, - 0x7c, 0x35, 0x9c, 0x7a, 0x0c, 0xc2, 0xe8, 0x0d, 0x40, 0x94, 0x51, 0x73, 0x9b, 0x64, 0xa1, 0xc9, - 0xe2, 0x68, 0x33, 0x3d, 0xe2, 0xa8, 0xe1, 0x29, 0x81, 0x1a, 0x5d, 0x18, 0x38, 0xa3, 0x16, 0xba, - 0x49, 0x0f, 0x32, 0xbd, 0x54, 0x58, 0xf0, 0x4a, 0xe6, 0x7e, 0xb6, 0x4a, 0xda, 0x21, 0x69, 0x3a, - 0x31, 0x69, 0x99, 0x14, 0x71, 0xaa, 0x1e, 0xbd, 0x4b, 0x55, 0x4a, 0x1d, 0x30, 0x83, 0x65, 0x74, - 0xa7, 0xd5, 0xa1, 0x72, 0xf1, 0x76, 0x10, 0xc5, 0x6b, 0x24, 0xbe, 0x1f, 0x84, 0x3b, 0x22, 0x36, - 0x59, 0x12, 0x26, 0x33, 0x01, 0x61, 0x1d, 0x8f, 0xf2, 0xc1, 0xec, 0x71, 0xb8, 0x56, 0x65, 0x2f, - 0x74, 0xa3, 0xc9, 0x3e, 0xb9, 0xc9, 0x8b, 0xb1, 0x84, 0x4b, 0xd4, 0x5a, 0x7d, 0x89, 0xbd, 0xb6, - 0xa5, 0x50, 0x6b, 0xf5, 0x25, 0x2c, 0xe1, 0x88, 0x74, 0x67, 0x25, 0x9d, 0xcc, 0xd7, 0x6a, 0x76, - 0x5f, 0x07, 0x03, 0x26, 0x26, 0xf5, 0x61, 0x5a, 0xe5, 0x43, 0xe5, 0x41, 0xdb, 0xa2, 0xd9, 0x29, - 0xb6, 0x48, 0x06, 0x8f, 0xf8, 0xa6, 0xf4, 0xc4, 0xb5, 0x14, 0x25, 0xdc, 0x45, 0xdb, 0x08, 0x5f, - 0x32, 0xdd, 0x37, 0x25, 0xd2, 0x35, 0x28, 0x47, 0x9d, 0x8d, 0x56, 0xb0, 0xeb, 0xb8, 0x3e, 0x7b, - 0x1c, 0xd3, 0x98, 0xac, 0x86, 0x04, 0xe0, 0x04, 0x07, 0xad, 0xc0, 0xa8, 0x23, 0x95, 0xc0, 0x28, - 0x3f, 0x56, 0x81, 0x52, 0xfd, 0x72, 0xf7, 0x5d, 0xa9, 0xf6, 0x55, 0x75, 0xd1, 0xab, 0x30, 0x21, - 0xbc, 0xb5, 0x78, 0x04, 0x07, 0xf6, 0x78, 0xa5, 0x99, 0xe3, 0x37, 0x74, 0x20, 0x36, 0x71, 0xd1, - 0x17, 0x61, 0x92, 0x52, 0x49, 0x0e, 0xb6, 0xd9, 0xb3, 0x83, 0x9c, 0x88, 0x5a, 0xaa, 0x0b, 0xbd, - 0x32, 0x4e, 0x11, 0x43, 0x2d, 0x78, 0xc2, 0xe9, 0xc4, 0x01, 0x53, 0xa4, 0x9b, 0xeb, 0x7f, 0x3d, - 0xd8, 0x21, 0x3e, 0x7b, 0xc3, 0x1a, 0x5d, 0xbc, 0x7c, 0x78, 0x50, 0x79, 0x62, 0xa1, 0x07, 0x1e, - 0xee, 0x49, 0x05, 0xdd, 0x85, 0xb1, 0x38, 0xf0, 0x98, 0x61, 0x3c, 0x65, 0x25, 0xce, 0xe7, 0x87, - 0xff, 0x59, 0x57, 0x68, 0xba, 0x12, 0x49, 0x55, 0xc5, 0x3a, 0x1d, 0xb4, 0xce, 0xf7, 0x18, 0x0b, - 0x8c, 0x4a, 0xa2, 0xd9, 0xc7, 0xf2, 0x07, 0x46, 0xc5, 0x4f, 0x35, 0xb7, 0xa0, 0xa8, 0x89, 0x75, - 0x32, 0xe8, 0x06, 0xcc, 0xb4, 0x43, 0x37, 0x60, 0x0b, 0x5b, 0x3d, 0x62, 0xcc, 0x9a, 0xd9, 0x0d, - 0xea, 0x69, 0x04, 0xdc, 0x5d, 0x87, 0x0a, 0x99, 0xb2, 0x70, 0xf6, 0x02, 0x4f, 0x95, 0xc5, 0x19, - 0x6f, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb2, 0x73, 0x99, 0x8b, 0x83, 0xb3, 0x73, 0xf9, 0x31, 0x1e, - 0x74, 0xb1, 0x91, 0xf3, 0x4b, 0xea, 0x2f, 0x4e, 0x28, 0xd0, 0x7b, 0x23, 0xda, 0x76, 0x42, 0x52, - 0x0f, 0x83, 0x26, 0xe1, 0x9d, 0xe1, 0x36, 0xf9, 0x8f, 0xf3, 0xf8, 0x8d, 0xf4, 0xde, 0x68, 0x64, - 0x21, 0xe0, 0xec, 0x7a, 0xa8, 0xa5, 0x65, 0x88, 0xa6, 0x6c, 0x68, 0x34, 0xfb, 0x44, 0x0f, 0x33, - 0xa3, 0x14, 0xcf, 0x9a, 0xac, 0x45, 0xa3, 0x38, 0xc2, 0x29, 0x9a, 0xe8, 0xdb, 0x60, 0x5a, 0x84, - 0x3b, 0x4a, 0xc6, 0xfd, 0x62, 0x62, 0xbf, 0x88, 0x53, 0x30, 0xdc, 0x85, 0x3d, 0xf7, 0xad, 0x30, - 0xd3, 0x75, 0xe3, 0x1c, 0x2b, 0xf8, 0xf8, 0x1f, 0x0d, 0x41, 0x59, 0x29, 0xd3, 0xd1, 0x35, 0xf3, - 0x8d, 0xe4, 0x42, 0xfa, 0x8d, 0x64, 0x94, 0xf2, 0xf4, 0xfa, 0xb3, 0xc8, 0xba, 0x61, 0x56, 0x57, - 0xc8, 0x4f, 0xf5, 0xa5, 0x73, 0xe5, 0x7d, 0x5d, 0xf4, 0x34, 0xdd, 0x48, 0x71, 0xe0, 0xc7, 0x96, - 0x52, 0x4f, 0x75, 0xcb, 0x80, 0x99, 0x76, 0xd1, 0x53, 0x54, 0xb0, 0x69, 0xd5, 0xea, 0xe9, 0xd4, - 0x93, 0x75, 0x5a, 0x88, 0x39, 0x8c, 0x09, 0x80, 0x94, 0x3d, 0x62, 0x02, 0xe0, 0xc8, 0x43, 0x0a, - 0x80, 0x92, 0x00, 0x4e, 0x68, 0x21, 0x0f, 0x66, 0x9a, 0x66, 0xd6, 0x50, 0xe5, 0x96, 0xf7, 0x54, - 0xdf, 0xfc, 0x9d, 0x1d, 0x2d, 0x45, 0xdb, 0x52, 0x9a, 0x0a, 0xee, 0x26, 0x8c, 0x5e, 0x85, 0xd1, - 0xf7, 0x82, 0x88, 0x2d, 0x26, 0xc1, 0x23, 0x48, 0xf7, 0xa5, 0xd1, 0x37, 0xef, 0x34, 0x58, 0xf9, - 0xd1, 0x41, 0x65, 0xac, 0x1e, 0xb4, 0xe4, 0x5f, 0xac, 0x2a, 0xa0, 0x07, 0x70, 0xce, 0x38, 0x59, - 0x55, 0x77, 0x61, 0xf0, 0xee, 0x5e, 0x14, 0xcd, 0x9d, 0xab, 0x65, 0x51, 0xc2, 0xd9, 0x0d, 0xd0, - 0xe3, 0xca, 0x0f, 0x44, 0xc6, 0x5d, 0xc9, 0x87, 0x30, 0x76, 0xa3, 0xac, 0x3b, 0xaf, 0xa7, 0x10, - 0x70, 0x77, 0x1d, 0xfb, 0x97, 0xf9, 0xdb, 0x83, 0xd0, 0x50, 0x92, 0xa8, 0xe3, 0x9d, 0x46, 0x42, - 0xa7, 0x65, 0x43, 0x79, 0xfa, 0xd0, 0xef, 0x5b, 0xbf, 0x6e, 0xb1, 0xf7, 0xad, 0x75, 0xb2, 0xdb, - 0xf6, 0xa8, 0x9c, 0xfc, 0xe8, 0x3b, 0xfe, 0x26, 0x8c, 0xc6, 0xa2, 0xb5, 0x5e, 0x39, 0xa8, 0xb4, - 0x4e, 0xb1, 0x37, 0x3e, 0xc5, 0xa1, 0xc8, 0x52, 0xac, 0xc8, 0xd8, 0xff, 0x98, 0xcf, 0x80, 0x84, - 0x9c, 0x82, 0x22, 0xab, 0x6a, 0x2a, 0xb2, 0x2a, 0x7d, 0xbe, 0x20, 0x47, 0xa1, 0xf5, 0x8f, 0xcc, - 0x7e, 0x33, 0x61, 0xf0, 0xa3, 0xfe, 0xb0, 0x6a, 0xff, 0xb0, 0x05, 0x67, 0xb3, 0x2c, 0x91, 0x28, - 0x57, 0xc9, 0x45, 0x51, 0xf5, 0xd0, 0xac, 0x46, 0xf0, 0x9e, 0x28, 0xc7, 0x0a, 0x63, 0xe0, 0xf4, - 0x0e, 0xc7, 0x8b, 0xef, 0x76, 0x07, 0x26, 0xea, 0x21, 0xd1, 0xee, 0x80, 0xd7, 0xb9, 0x1f, 0x1c, - 0xef, 0xcf, 0x73, 0xc7, 0xf6, 0x81, 0xb3, 0x7f, 0xa6, 0x00, 0x67, 0xf9, 0x4b, 0xd1, 0xc2, 0x5e, - 0xe0, 0xb6, 0xea, 0x41, 0x4b, 0xa4, 0xe6, 0x78, 0x1b, 0xc6, 0xdb, 0x9a, 0xfe, 0xa0, 0x57, 0x84, - 0x29, 0x5d, 0xcf, 0x90, 0xc8, 0x71, 0x7a, 0x29, 0x36, 0x68, 0xa1, 0x16, 0x8c, 0x93, 0x3d, 0xb7, - 0xa9, 0x9e, 0x1b, 0x0a, 0xc7, 0xbe, 0x1b, 0x54, 0x2b, 0xcb, 0x1a, 0x1d, 0x6c, 0x50, 0x7d, 0x04, - 0xd9, 0xda, 0xec, 0x1f, 0xb1, 0xe0, 0xb1, 0x9c, 0x78, 0x54, 0xb4, 0xb9, 0xfb, 0xec, 0x4d, 0x4e, - 0x24, 0x7e, 0x52, 0xcd, 0xf1, 0x97, 0x3a, 0x2c, 0xa0, 0xe8, 0xf3, 0x00, 0xfc, 0xa5, 0x8d, 0x8a, - 0x35, 0xfd, 0x02, 0xf7, 0x18, 0x31, 0x47, 0xb4, 0x58, 0x11, 0xb2, 0x3e, 0xd6, 0x68, 0xd9, 0x3f, - 0x59, 0x84, 0x21, 0xf6, 0xb2, 0x83, 0x56, 0x60, 0x64, 0x9b, 0x47, 0x68, 0x1e, 0x24, 0x18, 0x74, - 0x22, 0x1f, 0xf2, 0x02, 0x2c, 0x2b, 0xa3, 0x55, 0x38, 0xc3, 0x23, 0x5c, 0x7b, 0x55, 0xe2, 0x39, - 0xfb, 0x52, 0xcd, 0xc0, 0x93, 0x25, 0xa9, 0xb8, 0x17, 0xb5, 0x6e, 0x14, 0x9c, 0x55, 0x0f, 0xbd, - 0x0e, 0x93, 0x94, 0x2f, 0x0b, 0x3a, 0xb1, 0xa4, 0xc4, 0x63, 0x5b, 0x2b, 0x46, 0x70, 0xdd, 0x80, - 0xe2, 0x14, 0x36, 0x15, 0x98, 0xda, 0x5d, 0x0a, 0x95, 0xa1, 0x44, 0x60, 0x32, 0x95, 0x28, 0x26, - 0x2e, 0x33, 0x41, 0xea, 0x30, 0x83, 0xab, 0xf5, 0xed, 0x90, 0x44, 0xdb, 0x81, 0xd7, 0x12, 0xb9, - 0xb6, 0x13, 0x13, 0xa4, 0x14, 0x1c, 0x77, 0xd5, 0xa0, 0x54, 0x36, 0x1d, 0xd7, 0xeb, 0x84, 0x24, - 0xa1, 0x32, 0x6c, 0x52, 0x59, 0x49, 0xc1, 0x71, 0x57, 0x0d, 0xba, 0x8e, 0xce, 0x89, 0xe4, 0xd7, - 0xd2, 0x1b, 0x5f, 0xd9, 0x95, 0x8d, 0x48, 0xbf, 0xa4, 0x1e, 0xe1, 0x68, 0x84, 0xe5, 0x8d, 0x4a, - 0x9f, 0xad, 0xe9, 0x01, 0x85, 0x47, 0x92, 0xa4, 0xf2, 0x30, 0x29, 0x98, 0x7f, 0xd7, 0x82, 0x33, - 0x19, 0xf6, 0xab, 0xfc, 0xa8, 0xda, 0x72, 0xa3, 0x58, 0x25, 0x84, 0xd1, 0x8e, 0x2a, 0x5e, 0x8e, - 0x15, 0x06, 0xdd, 0x0f, 0xfc, 0x30, 0x4c, 0x1f, 0x80, 0xc2, 0x3e, 0x4c, 0x40, 0x8f, 0x77, 0x00, - 0xa2, 0xcb, 0x50, 0xea, 0x44, 0x44, 0x06, 0x92, 0x52, 0xe7, 0x37, 0xd3, 0x0c, 0x33, 0x08, 0x65, - 0x4d, 0xb7, 0x94, 0x52, 0x56, 0x63, 0x4d, 0xb9, 0xa6, 0x95, 0xc3, 0xec, 0xaf, 0x16, 0xe1, 0x42, - 0xae, 0xa5, 0x3a, 0xed, 0xd2, 0x6e, 0xe0, 0xbb, 0x71, 0xa0, 0x5e, 0x0d, 0x79, 0x28, 0x13, 0xd2, - 0xde, 0x5e, 0x15, 0xe5, 0x58, 0x61, 0xa0, 0x2b, 0x32, 0x0d, 0x7b, 0x3a, 0xe5, 0xcd, 0x62, 0xd5, - 0xc8, 0xc4, 0x3e, 0x68, 0x3a, 0xb1, 0xa7, 0xa0, 0xd4, 0x0e, 0x02, 0x2f, 0x7d, 0x18, 0xd1, 0xee, - 0x06, 0x81, 0x87, 0x19, 0x10, 0x7d, 0x42, 0x8c, 0x43, 0xea, 0x99, 0x0c, 0x3b, 0xad, 0x20, 0xd2, - 0x06, 0xe3, 0x19, 0x18, 0xd9, 0x21, 0xfb, 0xa1, 0xeb, 0x6f, 0xa5, 0x9f, 0x4f, 0x6f, 0xf1, 0x62, - 0x2c, 0xe1, 0x66, 0xc6, 0x87, 0x91, 0x93, 0xce, 0x03, 0x36, 0xda, 0xf7, 0x6a, 0xfb, 0xfe, 0x22, - 0x4c, 0xe1, 0xc5, 0xea, 0x37, 0x27, 0xe2, 0x6e, 0xf7, 0x44, 0x9c, 0x74, 0x1e, 0xb0, 0xfe, 0xb3, - 0xf1, 0x0b, 0x16, 0x4c, 0xb1, 0xa8, 0xc8, 0x22, 0x80, 0x86, 0x1b, 0xf8, 0xa7, 0xc0, 0xba, 0x3d, - 0x05, 0x43, 0x21, 0x6d, 0x34, 0x9d, 0xdc, 0x87, 0xf5, 0x04, 0x73, 0x18, 0x7a, 0x02, 0x4a, 0xac, - 0x0b, 0x74, 0xf2, 0xc6, 0x79, 0x5e, 0x84, 0xaa, 0x13, 0x3b, 0x98, 0x95, 0x32, 0xaf, 0x70, 0x4c, - 0xda, 0x9e, 0xcb, 0x3b, 0x9d, 0x3c, 0x49, 0x7c, 0x34, 0xbc, 0xc2, 0x33, 0xbb, 0xf6, 0xc1, 0xbc, - 0xc2, 0xb3, 0x49, 0xf6, 0x16, 0x8b, 0xfe, 0x5b, 0x01, 0x2e, 0x65, 0xd6, 0x1b, 0xd8, 0x2b, 0xbc, - 0x77, 0xed, 0x93, 0xb1, 0x82, 0xc9, 0x36, 0x4e, 0x29, 0x9e, 0xa2, 0x71, 0x4a, 0x69, 0x50, 0xce, - 0x71, 0x68, 0x00, 0x67, 0xed, 0xcc, 0x21, 0xfb, 0x88, 0x38, 0x6b, 0x67, 0xf6, 0x2d, 0x47, 0xac, - 0xfb, 0x93, 0x42, 0xce, 0xb7, 0x30, 0x01, 0xef, 0x2a, 0x3d, 0x67, 0x18, 0x30, 0x12, 0x9c, 0xf0, - 0x38, 0x3f, 0x63, 0x78, 0x19, 0x56, 0x50, 0xe4, 0x6a, 0x6e, 0xcf, 0x85, 0xfc, 0xd4, 0x8f, 0xb9, - 0x4d, 0xcd, 0x9b, 0x2f, 0x48, 0x6a, 0x08, 0x32, 0x5c, 0xa0, 0x57, 0x35, 0xa1, 0xbc, 0x38, 0xb8, - 0x50, 0x3e, 0x9e, 0x2d, 0x90, 0xa3, 0x05, 0x98, 0xda, 0x75, 0x7d, 0x96, 0xca, 0xdf, 0x64, 0x45, - 0x55, 0x14, 0x90, 0x55, 0x13, 0x8c, 0xd3, 0xf8, 0x73, 0xaf, 0xc2, 0xc4, 0xc3, 0xab, 0x23, 0xbf, - 0x51, 0x84, 0xc7, 0x7b, 0x6c, 0x7b, 0x7e, 0xd6, 0x1b, 0x73, 0xa0, 0x9d, 0xf5, 0x5d, 0xf3, 0x50, - 0x87, 0xb3, 0x9b, 0x1d, 0xcf, 0xdb, 0x67, 0xf6, 0x9f, 0xa4, 0x25, 0x31, 0x04, 0xaf, 0xf8, 0x84, - 0xcc, 0x44, 0xb1, 0x92, 0x81, 0x83, 0x33, 0x6b, 0xa2, 0x37, 0x00, 0x05, 0x22, 0xef, 0xec, 0x0d, - 0xe2, 0x0b, 0xbd, 0x3c, 0x1b, 0xf8, 0x62, 0xb2, 0x19, 0xef, 0x74, 0x61, 0xe0, 0x8c, 0x5a, 0x94, - 0xe9, 0xa7, 0xb7, 0xd2, 0xbe, 0xea, 0x56, 0x8a, 0xe9, 0xc7, 0x3a, 0x10, 0x9b, 0xb8, 0xe8, 0x06, - 0xcc, 0x38, 0x7b, 0x8e, 0xcb, 0xa3, 0xe3, 0x49, 0x02, 0x9c, 0xeb, 0x57, 0x4a, 0xb0, 0x85, 0x34, - 0x02, 0xee, 0xae, 0x93, 0x72, 0x8c, 0x1e, 0xce, 0x77, 0x8c, 0xee, 0x7d, 0x2e, 0xf6, 0xd3, 0xe9, - 0xda, 0xff, 0xde, 0xa2, 0xd7, 0x57, 0x46, 0xee, 0x78, 0x3a, 0x0e, 0x4a, 0x37, 0xa9, 0xf9, 0x28, - 0x9f, 0xd3, 0x2c, 0x3c, 0x12, 0x20, 0x36, 0x71, 0xf9, 0x82, 0x88, 0x12, 0x27, 0x19, 0x83, 0x75, - 0x17, 0x31, 0x0e, 0x14, 0x06, 0xfa, 0x02, 0x8c, 0xb4, 0xdc, 0x3d, 0x37, 0x0a, 0x42, 0xb1, 0x59, - 0x8e, 0xe9, 0x6a, 0x90, 0x9c, 0x83, 0x55, 0x4e, 0x06, 0x4b, 0x7a, 0xf6, 0xf7, 0x17, 0x60, 0x42, - 0xb6, 0xf8, 0x66, 0x27, 0x88, 0x9d, 0x53, 0xb8, 0x96, 0x6f, 0x18, 0xd7, 0xf2, 0x27, 0x7a, 0x05, - 0x7a, 0x60, 0x5d, 0xca, 0xbd, 0x8e, 0xef, 0xa4, 0xae, 0xe3, 0xa7, 0xfb, 0x93, 0xea, 0x7d, 0x0d, - 0xff, 0x13, 0x0b, 0x66, 0x0c, 0xfc, 0x53, 0xb8, 0x0d, 0x56, 0xcc, 0xdb, 0xe0, 0xc9, 0xbe, 0xdf, - 0x90, 0x73, 0x0b, 0x7c, 0x4f, 0x31, 0xd5, 0x77, 0x76, 0xfa, 0xbf, 0x07, 0xa5, 0x6d, 0x27, 0x6c, - 0xf5, 0x0a, 0x28, 0xdb, 0x55, 0x69, 0xfe, 0xa6, 0x13, 0xb6, 0xf8, 0x19, 0xfe, 0x9c, 0xca, 0x56, - 0xe9, 0x84, 0xad, 0xbe, 0x3e, 0x61, 0xac, 0x29, 0xf4, 0x0a, 0x0c, 0x47, 0xcd, 0xa0, 0xad, 0x2c, - 0x36, 0x2f, 0xf3, 0x4c, 0x96, 0xb4, 0xe4, 0xe8, 0xa0, 0x82, 0xcc, 0xe6, 0x68, 0x31, 0x16, 0xf8, - 0xe8, 0x6d, 0x98, 0x60, 0xbf, 0x94, 0xe5, 0x42, 0x31, 0x3f, 0x8d, 0x41, 0x43, 0x47, 0xe4, 0x06, - 0x30, 0x46, 0x11, 0x36, 0x49, 0xcd, 0x6d, 0x41, 0x59, 0x7d, 0xd6, 0x23, 0xf5, 0xe5, 0xf9, 0x37, - 0x45, 0x38, 0x93, 0xb1, 0xe6, 0x50, 0x64, 0xcc, 0xc4, 0x0b, 0x03, 0x2e, 0xd5, 0x0f, 0x38, 0x17, - 0x11, 0x93, 0x86, 0x5a, 0x62, 0x6d, 0x0d, 0xdc, 0xe8, 0xdd, 0x88, 0xa4, 0x1b, 0xa5, 0x45, 0xfd, - 0x1b, 0xa5, 0x8d, 0x9d, 0xda, 0x50, 0xd3, 0x86, 0x54, 0x4f, 0x1f, 0xe9, 0x9c, 0xfe, 0x61, 0x11, - 0xce, 0x66, 0xc5, 0x9e, 0x41, 0xdf, 0x99, 0x4a, 0x69, 0xf3, 0xd2, 0xa0, 0x51, 0x6b, 0x78, 0x9e, - 0x1b, 0x91, 0xa0, 0x79, 0xde, 0x4c, 0x72, 0xd3, 0x77, 0x98, 0x45, 0x9b, 0xcc, 0x01, 0x34, 0xe4, - 0xa9, 0x88, 0xe4, 0xf1, 0xf1, 0xe9, 0x81, 0x3b, 0x20, 0x72, 0x18, 0x45, 0x29, 0x07, 0x50, 0x59, - 0xdc, 0xdf, 0x01, 0x54, 0xb6, 0x3c, 0xe7, 0xc2, 0x98, 0xf6, 0x35, 0x8f, 0x74, 0xc6, 0x77, 0xe8, - 0x6d, 0xa5, 0xf5, 0xfb, 0x91, 0xce, 0xfa, 0x8f, 0x58, 0x90, 0x32, 0x8f, 0x54, 0xea, 0x2e, 0x2b, - 0x57, 0xdd, 0x75, 0x19, 0x4a, 0x61, 0xe0, 0x91, 0x74, 0x06, 0x19, 0x1c, 0x78, 0x04, 0x33, 0x08, - 0xc5, 0x88, 0x13, 0x65, 0xc7, 0xb8, 0x2e, 0xc8, 0x09, 0x11, 0xed, 0x29, 0x18, 0xf2, 0xc8, 0x1e, - 0xf1, 0xd2, 0xe1, 0xd9, 0x6f, 0xd3, 0x42, 0xcc, 0x61, 0xf6, 0x2f, 0x94, 0xe0, 0x62, 0x4f, 0x17, - 0x6a, 0x2a, 0x0e, 0x6d, 0x39, 0x31, 0xb9, 0xef, 0xec, 0xa7, 0xe3, 0x28, 0xdf, 0xe0, 0xc5, 0x58, - 0xc2, 0x99, 0xc5, 0x38, 0x8f, 0x9b, 0x98, 0x52, 0x0e, 0x8a, 0x70, 0x89, 0x02, 0xfa, 0x08, 0x92, - 0xd3, 0x5f, 0x07, 0x88, 0x22, 0x6f, 0xd9, 0xa7, 0xdc, 0x5d, 0x4b, 0x98, 0xa2, 0x27, 0xf1, 0x35, - 0x1b, 0xb7, 0x05, 0x04, 0x6b, 0x58, 0xa8, 0x0a, 0xd3, 0xed, 0x30, 0x88, 0xb9, 0xae, 0xb5, 0xca, - 0x0d, 0x85, 0x86, 0x4c, 0xef, 0xd5, 0x7a, 0x0a, 0x8e, 0xbb, 0x6a, 0xa0, 0x97, 0x61, 0x4c, 0x78, - 0xb4, 0xd6, 0x83, 0xc0, 0x13, 0x6a, 0x20, 0x65, 0x76, 0xd2, 0x48, 0x40, 0x58, 0xc7, 0xd3, 0xaa, - 0x31, 0x05, 0xee, 0x48, 0x66, 0x35, 0xae, 0xc4, 0xd5, 0xf0, 0x52, 0x71, 0xa8, 0x46, 0x07, 0x8a, - 0x43, 0x95, 0x28, 0xc6, 0xca, 0x03, 0xbf, 0x59, 0x41, 0x5f, 0x55, 0xd2, 0xcf, 0x96, 0xe0, 0x8c, - 0x58, 0x38, 0x8f, 0x7a, 0xb9, 0x3c, 0xa2, 0x14, 0xfa, 0xdf, 0x5c, 0x33, 0xa7, 0xbd, 0x66, 0x7e, - 0xc0, 0x02, 0x93, 0xbd, 0x42, 0xff, 0x5f, 0x6e, 0x20, 0xfa, 0x97, 0x73, 0xd9, 0xb5, 0x96, 0xbc, - 0x40, 0x3e, 0x60, 0x48, 0x7a, 0xfb, 0xdf, 0x59, 0xf0, 0x64, 0x5f, 0x8a, 0x68, 0x19, 0xca, 0x8c, - 0x07, 0xd4, 0xa4, 0xb3, 0xa7, 0x95, 0x21, 0xa1, 0x04, 0xe4, 0xb0, 0xa4, 0x49, 0x4d, 0xb4, 0xdc, - 0x15, 0xf1, 0xff, 0x99, 0x8c, 0x88, 0xff, 0xe7, 0x8c, 0xe1, 0x79, 0xc8, 0x90, 0xff, 0xbf, 0x5c, - 0x84, 0x61, 0xbe, 0xe2, 0x4f, 0x41, 0x0c, 0x5b, 0x11, 0x7a, 0xdb, 0x1e, 0x91, 0xa8, 0x78, 0x5f, - 0xe6, 0xab, 0x4e, 0xec, 0x70, 0x36, 0x41, 0xdd, 0x56, 0x89, 0x86, 0x17, 0xcd, 0x1b, 0xf7, 0xd9, - 0x5c, 0x4a, 0x31, 0x09, 0x9c, 0x86, 0x76, 0xbb, 0x7d, 0x09, 0x20, 0x62, 0xd9, 0xf2, 0x29, 0x0d, - 0x11, 0xd3, 0xec, 0x93, 0x3d, 0x5a, 0x6f, 0x28, 0x64, 0xde, 0x87, 0x64, 0xa7, 0x2b, 0x00, 0xd6, - 0x28, 0xce, 0x7d, 0x06, 0xca, 0x0a, 0xb9, 0x9f, 0x16, 0x67, 0x5c, 0x67, 0x2e, 0x3e, 0x07, 0x53, - 0xa9, 0xb6, 0x8e, 0xa5, 0x04, 0xfa, 0x45, 0x0b, 0xa6, 0x78, 0x97, 0x97, 0xfd, 0x3d, 0x71, 0xa6, - 0xbe, 0x0f, 0x67, 0xbd, 0x8c, 0xb3, 0x4d, 0xcc, 0xe8, 0xe0, 0x67, 0xa1, 0x52, 0xfa, 0x64, 0x41, - 0x71, 0x66, 0x1b, 0xe8, 0x2a, 0x5d, 0xb7, 0xf4, 0xec, 0x72, 0x3c, 0xe1, 0x7d, 0x34, 0xce, 0xd7, - 0x2c, 0x2f, 0xc3, 0x0a, 0x6a, 0xff, 0xb6, 0x05, 0x33, 0xbc, 0xe7, 0xb7, 0xc8, 0xbe, 0xda, 0xe1, - 0x1f, 0x66, 0xdf, 0x45, 0x12, 0x8e, 0x42, 0x4e, 0x12, 0x0e, 0xfd, 0xd3, 0x8a, 0x3d, 0x3f, 0xed, - 0x67, 0x2c, 0x10, 0x2b, 0xf0, 0x14, 0x44, 0xf9, 0x6f, 0x35, 0x45, 0xf9, 0xb9, 0xfc, 0x45, 0x9d, - 0x23, 0xc3, 0xff, 0xb1, 0x05, 0xd3, 0x1c, 0x21, 0x79, 0x4b, 0xfe, 0x50, 0xe7, 0x61, 0x90, 0x6c, - 0x7a, 0x2a, 0xc5, 0x76, 0xf6, 0x47, 0x19, 0x93, 0x55, 0xea, 0x39, 0x59, 0x2d, 0xb9, 0x81, 0x8e, - 0x91, 0x49, 0xf2, 0xd8, 0xc1, 0xac, 0xed, 0x3f, 0xb0, 0x00, 0xf1, 0x66, 0x0c, 0xf6, 0x87, 0x32, - 0x15, 0xac, 0x54, 0xbb, 0x2e, 0x92, 0xa3, 0x46, 0x41, 0xb0, 0x86, 0x75, 0x22, 0xc3, 0x93, 0x32, - 0x08, 0x28, 0xf6, 0x37, 0x08, 0x38, 0xc6, 0x88, 0xfe, 0xef, 0x12, 0xa4, 0xdd, 0x01, 0xd0, 0x3d, - 0x18, 0x6f, 0x3a, 0x6d, 0x67, 0xc3, 0xf5, 0xdc, 0xd8, 0x25, 0x51, 0x2f, 0x4b, 0xa2, 0x25, 0x0d, - 0x4f, 0x3c, 0xf5, 0x6a, 0x25, 0xd8, 0xa0, 0x83, 0xe6, 0x01, 0xda, 0xa1, 0xbb, 0xe7, 0x7a, 0x64, - 0x8b, 0x69, 0x1c, 0x98, 0xbf, 0x23, 0x37, 0x8f, 0x91, 0xa5, 0x58, 0xc3, 0xc8, 0x70, 0x5d, 0x2b, - 0x3e, 0x3a, 0xd7, 0xb5, 0xd2, 0x31, 0x5d, 0xd7, 0x86, 0x06, 0x72, 0x5d, 0xc3, 0x70, 0x5e, 0xb2, - 0x48, 0xf4, 0xff, 0x8a, 0xeb, 0x11, 0xc1, 0x17, 0x73, 0x2f, 0xc8, 0xb9, 0xc3, 0x83, 0xca, 0x79, - 0x9c, 0x89, 0x81, 0x73, 0x6a, 0xa2, 0xcf, 0xc3, 0xac, 0xe3, 0x79, 0xc1, 0x7d, 0x35, 0x6a, 0xcb, - 0x51, 0xd3, 0xf1, 0xb8, 0xc6, 0x7e, 0x84, 0x51, 0x7d, 0xe2, 0xf0, 0xa0, 0x32, 0xbb, 0x90, 0x83, - 0x83, 0x73, 0x6b, 0xa7, 0x3c, 0xdf, 0x46, 0xfb, 0x7a, 0xbe, 0xbd, 0x06, 0xe5, 0x76, 0x18, 0x34, - 0x57, 0x35, 0x6f, 0x9c, 0x4b, 0x2c, 0x4f, 0xbd, 0x2c, 0x3c, 0x3a, 0xa8, 0x4c, 0xa8, 0x3f, 0xec, - 0x86, 0x4f, 0x2a, 0xd8, 0x3b, 0x70, 0xa6, 0x41, 0x42, 0x97, 0x65, 0xc0, 0x6c, 0x25, 0x1b, 0x7a, - 0x1d, 0xca, 0x61, 0xea, 0x08, 0x1b, 0x28, 0xb0, 0x92, 0x16, 0xe5, 0x57, 0x1e, 0x59, 0x09, 0x21, - 0xfb, 0x8f, 0x2c, 0x18, 0x11, 0x0e, 0x0d, 0xa7, 0xc0, 0x39, 0x2d, 0x18, 0x0a, 0xec, 0x4a, 0xf6, - 0x31, 0xcf, 0x3a, 0x93, 0xab, 0xba, 0xae, 0xa5, 0x54, 0xd7, 0x4f, 0xf6, 0x22, 0xd2, 0x5b, 0x69, - 0xfd, 0x37, 0x8a, 0x30, 0x69, 0x3a, 0x73, 0x9c, 0xc2, 0x10, 0xac, 0xc1, 0x48, 0x24, 0x3c, 0x87, - 0x0a, 0xf9, 0x96, 0xd3, 0xe9, 0x49, 0x4c, 0xcc, 0xa2, 0x84, 0xaf, 0x90, 0x24, 0x92, 0xe9, 0x92, - 0x54, 0x7c, 0x84, 0x2e, 0x49, 0xfd, 0xfc, 0x69, 0x4a, 0x27, 0xe1, 0x4f, 0x63, 0x7f, 0x8d, 0x5d, - 0x35, 0x7a, 0xf9, 0x29, 0x70, 0x21, 0x37, 0xcc, 0x4b, 0xc9, 0xee, 0xb1, 0xb2, 0x44, 0xa7, 0x72, - 0xb8, 0x91, 0x9f, 0xb7, 0xe0, 0x62, 0xc6, 0x57, 0x69, 0xac, 0xc9, 0x73, 0x30, 0xea, 0x74, 0x5a, - 0xae, 0xda, 0xcb, 0xda, 0x33, 0xd6, 0x82, 0x28, 0xc7, 0x0a, 0x03, 0x2d, 0xc1, 0x0c, 0x79, 0xd0, - 0x76, 0xf9, 0x3b, 0xa2, 0x6e, 0xbb, 0x58, 0xe4, 0x21, 0x66, 0x97, 0xd3, 0x40, 0xdc, 0x8d, 0xaf, - 0xdc, 0xb1, 0x8b, 0xb9, 0xee, 0xd8, 0x7f, 0xdf, 0x82, 0x31, 0xd1, 0xed, 0x53, 0x18, 0xed, 0x6f, - 0x33, 0x47, 0xfb, 0xf1, 0x1e, 0xa3, 0x9d, 0x33, 0xcc, 0x7f, 0xab, 0xa0, 0xfa, 0x5b, 0x0f, 0xc2, - 0x78, 0x00, 0x96, 0xe7, 0x15, 0x18, 0x6d, 0x87, 0x41, 0x1c, 0x34, 0x03, 0x4f, 0x70, 0x3c, 0x4f, - 0x24, 0xd1, 0x02, 0x78, 0xf9, 0x91, 0xf6, 0x1b, 0x2b, 0x6c, 0x36, 0x7a, 0x41, 0x18, 0x0b, 0x2e, - 0x23, 0x19, 0xbd, 0x20, 0x8c, 0x31, 0x83, 0xa0, 0x16, 0x40, 0xec, 0x84, 0x5b, 0x24, 0xa6, 0x65, - 0x22, 0xf0, 0x48, 0xfe, 0xe1, 0xd1, 0x89, 0x5d, 0x6f, 0xde, 0xf5, 0xe3, 0x28, 0x0e, 0xe7, 0x6b, - 0x7e, 0x7c, 0x27, 0xe4, 0x02, 0x94, 0xe6, 0xfe, 0xaf, 0x68, 0x61, 0x8d, 0xae, 0xf4, 0xd1, 0x64, - 0x6d, 0x0c, 0x99, 0x0f, 0xe2, 0x6b, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x33, 0xec, 0x2a, 0x61, 0x03, - 0x74, 0x3c, 0xcf, 0xfc, 0xaf, 0x8f, 0xaa, 0xa1, 0x65, 0xaf, 0x61, 0x55, 0xdd, 0xff, 0xbf, 0xf7, - 0xc9, 0x4d, 0x1b, 0xd6, 0xfd, 0x68, 0x92, 0x20, 0x01, 0xe8, 0xdb, 0xbb, 0xec, 0x24, 0x9e, 0xef, - 0x73, 0x05, 0x1c, 0xc3, 0x32, 0x82, 0x85, 0xbd, 0x66, 0xe1, 0x81, 0x6b, 0x75, 0xb1, 0xc8, 0xb5, - 0xb0, 0xd7, 0x02, 0x80, 0x13, 0x1c, 0x74, 0x4d, 0x88, 0xdf, 0x25, 0x23, 0xf9, 0x9d, 0x14, 0xbf, - 0xe5, 0xe7, 0x6b, 0xf2, 0xf7, 0x0b, 0x30, 0xa6, 0x92, 0xe0, 0xd5, 0x79, 0x2e, 0x31, 0x11, 0x86, - 0x65, 0x39, 0x29, 0xc6, 0x3a, 0x0e, 0x5a, 0x87, 0xa9, 0x88, 0xeb, 0x5e, 0x54, 0xb4, 0x3d, 0xae, - 0xc3, 0xfa, 0xa4, 0xb4, 0xaf, 0x68, 0x98, 0xe0, 0x23, 0x56, 0xc4, 0x8f, 0x0e, 0xe9, 0x68, 0x99, - 0x26, 0x81, 0x5e, 0x87, 0x49, 0x4f, 0x4f, 0x37, 0x5f, 0x17, 0x2a, 0x2e, 0x65, 0x7e, 0x6c, 0x24, - 0xa3, 0xaf, 0xe3, 0x14, 0x36, 0xe5, 0x94, 0xf4, 0x12, 0x11, 0x21, 0xd2, 0xf1, 0xb7, 0x48, 0x24, - 0x52, 0x78, 0x31, 0x4e, 0xe9, 0x76, 0x0e, 0x0e, 0xce, 0xad, 0x8d, 0x5e, 0x81, 0x71, 0xf9, 0xf9, - 0x9a, 0x1b, 0x71, 0x62, 0xe4, 0xae, 0xc1, 0xb0, 0x81, 0x89, 0xee, 0xc3, 0x39, 0xf9, 0x7f, 0x3d, - 0x74, 0x36, 0x37, 0xdd, 0xa6, 0xf0, 0xe2, 0xe6, 0x9e, 0x3e, 0x0b, 0xd2, 0x75, 0x68, 0x39, 0x0b, - 0xe9, 0xe8, 0xa0, 0x72, 0x59, 0x8c, 0x5a, 0x26, 0x9c, 0x4d, 0x62, 0x36, 0x7d, 0xb4, 0x0a, 0x67, - 0xb6, 0x89, 0xe3, 0xc5, 0xdb, 0x4b, 0xdb, 0xa4, 0xb9, 0x23, 0x37, 0x11, 0x73, 0x4e, 0xd6, 0x4c, - 0xc3, 0x6f, 0x76, 0xa3, 0xe0, 0xac, 0x7a, 0xe8, 0x1d, 0x98, 0x6d, 0x77, 0x36, 0x3c, 0x37, 0xda, - 0x5e, 0x0b, 0x62, 0x66, 0xd2, 0xa1, 0x72, 0xc8, 0x09, 0x2f, 0x66, 0xe5, 0x98, 0x5d, 0xcf, 0xc1, - 0xc3, 0xb9, 0x14, 0xd0, 0xfb, 0x70, 0x2e, 0xb5, 0x18, 0x84, 0x4f, 0xe5, 0x64, 0x7e, 0xbc, 0xdd, - 0x46, 0x56, 0x05, 0xe1, 0x23, 0x99, 0x05, 0xc2, 0xd9, 0x4d, 0x7c, 0x30, 0x43, 0x9f, 0xf7, 0x68, - 0x65, 0x8d, 0x29, 0x43, 0x5f, 0x86, 0x71, 0x7d, 0x15, 0x89, 0x0b, 0xe6, 0x4a, 0x36, 0xcf, 0xa2, - 0xad, 0x36, 0xce, 0xd2, 0xa9, 0x15, 0xa5, 0xc3, 0xb0, 0x41, 0xd1, 0x26, 0x90, 0xfd, 0x7d, 0xe8, - 0x36, 0x8c, 0x36, 0x3d, 0x97, 0xf8, 0x71, 0xad, 0xde, 0x2b, 0xe8, 0xc7, 0x92, 0xc0, 0x11, 0x03, - 0x26, 0x02, 0x94, 0xf2, 0x32, 0xac, 0x28, 0xd8, 0xbf, 0x56, 0x80, 0x4a, 0x9f, 0x68, 0xb7, 0x29, - 0x7d, 0xb4, 0x35, 0x90, 0x3e, 0x7a, 0x41, 0x66, 0xc4, 0x5b, 0x4b, 0x09, 0xe9, 0xa9, 0x6c, 0x77, - 0x89, 0xa8, 0x9e, 0xc6, 0x1f, 0xd8, 0x3e, 0x58, 0x57, 0x69, 0x97, 0xfa, 0x5a, 0xae, 0x1b, 0x4f, - 0x59, 0x43, 0x83, 0x0b, 0x22, 0xb9, 0xcf, 0x12, 0xf6, 0xd7, 0x0a, 0x70, 0x4e, 0x0d, 0xe1, 0x9f, - 0xdd, 0x81, 0xbb, 0xdb, 0x3d, 0x70, 0x27, 0xf0, 0xa8, 0x63, 0xdf, 0x81, 0x61, 0x1e, 0x34, 0x65, - 0x00, 0x06, 0xe8, 0x29, 0x33, 0xc2, 0x96, 0xba, 0xa6, 0x8d, 0x28, 0x5b, 0x7f, 0xc1, 0x82, 0xa9, - 0xf5, 0xa5, 0x7a, 0x23, 0x68, 0xee, 0x90, 0x78, 0x81, 0x33, 0xac, 0x58, 0xf0, 0x3f, 0xd6, 0x43, - 0xf2, 0x35, 0x59, 0x1c, 0xd3, 0x65, 0x28, 0x6d, 0x07, 0x51, 0x9c, 0x7e, 0xf1, 0xbd, 0x19, 0x44, - 0x31, 0x66, 0x10, 0xfb, 0x77, 0x2c, 0x18, 0x62, 0x79, 0x5c, 0xfb, 0x25, 0x17, 0x1e, 0xe4, 0xbb, - 0xd0, 0xcb, 0x30, 0x4c, 0x36, 0x37, 0x49, 0x33, 0x16, 0xb3, 0x2a, 0xdd, 0x51, 0x87, 0x97, 0x59, - 0x29, 0xbd, 0xf4, 0x59, 0x63, 0xfc, 0x2f, 0x16, 0xc8, 0xe8, 0x2d, 0x28, 0xc7, 0xee, 0x2e, 0x59, - 0x68, 0xb5, 0xc4, 0x9b, 0xd9, 0x43, 0x78, 0xff, 0xae, 0x4b, 0x02, 0x38, 0xa1, 0x65, 0x7f, 0xb5, - 0x00, 0x90, 0xb8, 0xfe, 0xf7, 0xfb, 0xc4, 0xc5, 0xae, 0xd7, 0x94, 0x2b, 0x19, 0xaf, 0x29, 0x28, - 0x21, 0x98, 0xf1, 0x94, 0xa2, 0x86, 0xa9, 0x38, 0xd0, 0x30, 0x95, 0x8e, 0x33, 0x4c, 0x4b, 0x30, - 0x93, 0x84, 0x2e, 0x30, 0xe3, 0xb8, 0x30, 0x21, 0x65, 0x3d, 0x0d, 0xc4, 0xdd, 0xf8, 0x36, 0x81, - 0xcb, 0x32, 0xa2, 0xa6, 0xbc, 0x6b, 0x98, 0x49, 0xe6, 0x31, 0xf2, 0x4c, 0x27, 0xcf, 0x45, 0x85, - 0xdc, 0xe7, 0xa2, 0x1f, 0xb7, 0xe0, 0x6c, 0xba, 0x1d, 0xe6, 0xfb, 0xf6, 0x7d, 0x16, 0x9c, 0x63, - 0x8f, 0x66, 0xac, 0xd5, 0xee, 0x27, 0xba, 0x97, 0xb2, 0x43, 0x3a, 0xf4, 0xee, 0x71, 0xe2, 0xf7, - 0xbc, 0x9a, 0x45, 0x1a, 0x67, 0xb7, 0x68, 0xff, 0x65, 0x0b, 0x2e, 0xe4, 0xa6, 0x0f, 0x62, 0x12, - 0x64, 0xdb, 0xe5, 0x1a, 0xa9, 0xb4, 0x04, 0x59, 0xaf, 0x71, 0x9d, 0x94, 0xc2, 0x50, 0xa9, 0x0d, - 0x0b, 0xb9, 0xa9, 0x0d, 0xfb, 0x66, 0x2a, 0xb4, 0xbf, 0xd7, 0x02, 0xe1, 0xf2, 0x34, 0xc0, 0x41, - 0xf3, 0xb6, 0xcc, 0x0c, 0x6b, 0x04, 0x34, 0xbf, 0x9c, 0xef, 0x03, 0x26, 0xc2, 0x98, 0xab, 0x8b, - 0xdd, 0x08, 0x5e, 0x6e, 0xd0, 0xb2, 0x5b, 0x20, 0xa0, 0x55, 0xc2, 0xf4, 0x56, 0xfd, 0x7b, 0x73, - 0x1d, 0xa0, 0xc5, 0x70, 0xb5, 0xfc, 0x90, 0xea, 0x1a, 0xa9, 0x2a, 0x08, 0xd6, 0xb0, 0xec, 0x1f, - 0x2c, 0xc0, 0x98, 0x0c, 0xa0, 0xdd, 0xf1, 0x07, 0x91, 0x2e, 0x8f, 0x95, 0x47, 0x87, 0x25, 0x54, - 0xa5, 0x84, 0xeb, 0x89, 0x50, 0x9e, 0x24, 0x54, 0x95, 0x00, 0x9c, 0xe0, 0xa0, 0x67, 0x60, 0x24, - 0xea, 0x6c, 0x30, 0xf4, 0x94, 0x23, 0x4f, 0x83, 0x17, 0x63, 0x09, 0x47, 0x9f, 0x87, 0x69, 0x5e, - 0x2f, 0x0c, 0xda, 0xce, 0x16, 0x57, 0x81, 0x0e, 0x29, 0xcf, 0xda, 0xe9, 0xd5, 0x14, 0xec, 0xe8, - 0xa0, 0x72, 0x36, 0x5d, 0xc6, 0x94, 0xe7, 0x5d, 0x54, 0xec, 0x2f, 0x03, 0xea, 0x8e, 0x09, 0x8e, - 0xde, 0xe0, 0xe6, 0x54, 0x6e, 0x48, 0x5a, 0xbd, 0xb4, 0xe2, 0xba, 0x23, 0xa8, 0x34, 0xa6, 0xe7, - 0xb5, 0xb0, 0xaa, 0x6f, 0xff, 0x95, 0x22, 0x4c, 0xa7, 0xdd, 0x02, 0xd1, 0x4d, 0x18, 0xe6, 0x17, - 0x9e, 0x20, 0xdf, 0xe3, 0xd1, 0x55, 0x73, 0x26, 0x64, 0x5b, 0x5f, 0xdc, 0x99, 0xa2, 0x3e, 0x7a, - 0x07, 0xc6, 0x5a, 0xc1, 0x7d, 0xff, 0xbe, 0x13, 0xb6, 0x16, 0xea, 0x35, 0xb1, 0x2e, 0x33, 0xf9, - 0xe6, 0x6a, 0x82, 0xa6, 0x3b, 0x28, 0xb2, 0x07, 0x86, 0x04, 0x84, 0x75, 0x72, 0x68, 0x9d, 0xc5, - 0x39, 0xdc, 0x74, 0xb7, 0x56, 0x9d, 0x76, 0x2f, 0xdb, 0xda, 0x25, 0x89, 0xa4, 0x51, 0x9e, 0x10, - 0xc1, 0x10, 0x39, 0x00, 0x27, 0x84, 0xd0, 0x77, 0xc2, 0x99, 0x28, 0x47, 0xd5, 0x96, 0x97, 0x22, - 0xa2, 0x97, 0xf6, 0x69, 0xf1, 0x31, 0x2a, 0xd1, 0x64, 0x29, 0xe5, 0xb2, 0x9a, 0xb1, 0xbf, 0x72, - 0x06, 0x8c, 0xdd, 0x68, 0xe4, 0x09, 0xb2, 0x4e, 0x28, 0x4f, 0x10, 0x86, 0x51, 0xb2, 0xdb, 0x8e, - 0xf7, 0xab, 0x6e, 0xd8, 0x2b, 0x8f, 0xdd, 0xb2, 0xc0, 0xe9, 0xa6, 0x29, 0x21, 0x58, 0xd1, 0xc9, - 0x4e, 0xe6, 0x54, 0xfc, 0x10, 0x93, 0x39, 0x95, 0x4e, 0x31, 0x99, 0xd3, 0x1a, 0x8c, 0x6c, 0xb9, - 0x31, 0x26, 0xed, 0x40, 0xb0, 0x9a, 0x99, 0xeb, 0xf0, 0x06, 0x47, 0xe9, 0x4e, 0x20, 0x22, 0x00, - 0x58, 0x12, 0x41, 0x6f, 0xa8, 0x1d, 0x38, 0x9c, 0x2f, 0xa9, 0x75, 0xbf, 0x0e, 0x66, 0xee, 0x41, - 0x91, 0xbc, 0x69, 0xe4, 0x61, 0x93, 0x37, 0xad, 0xc8, 0x94, 0x4b, 0xa3, 0xf9, 0x86, 0xf0, 0x2c, - 0xa3, 0x52, 0x9f, 0x44, 0x4b, 0x46, 0x72, 0xaa, 0xf2, 0xc9, 0x25, 0xa7, 0xfa, 0x5e, 0x0b, 0xce, - 0xb5, 0xb3, 0xf2, 0xb4, 0x89, 0x44, 0x49, 0x2f, 0x0f, 0x9c, 0x88, 0xce, 0x68, 0x90, 0x89, 0xec, - 0x99, 0x68, 0x38, 0xbb, 0x39, 0x3a, 0xd0, 0xe1, 0x46, 0x4b, 0x64, 0x57, 0x7a, 0x2a, 0x27, 0xcb, - 0x55, 0x8f, 0xdc, 0x56, 0xeb, 0x19, 0x19, 0x95, 0x3e, 0x9e, 0x97, 0x51, 0x69, 0xe0, 0x3c, 0x4a, - 0x6f, 0xa8, 0xfc, 0x56, 0x13, 0xf9, 0x4b, 0x89, 0x67, 0xaf, 0xea, 0x9b, 0xd5, 0xea, 0x0d, 0x95, - 0xd5, 0xaa, 0x47, 0xbc, 0x37, 0x9e, 0xb3, 0xaa, 0x6f, 0x2e, 0x2b, 0x2d, 0x1f, 0xd5, 0xd4, 0xc9, - 0xe4, 0xa3, 0x32, 0xae, 0x1a, 0x9e, 0x12, 0xe9, 0xd9, 0x3e, 0x57, 0x8d, 0x41, 0xb7, 0xf7, 0x65, - 0xc3, 0x73, 0x6f, 0xcd, 0x3c, 0x54, 0xee, 0xad, 0x7b, 0x7a, 0x2e, 0x2b, 0xd4, 0x27, 0x59, 0x13, - 0x45, 0x1a, 0x30, 0x83, 0xd5, 0x3d, 0xfd, 0x02, 0x3c, 0x93, 0x4f, 0x57, 0xdd, 0x73, 0xdd, 0x74, - 0x33, 0xaf, 0xc0, 0xae, 0xcc, 0x58, 0x67, 0x4f, 0x27, 0x33, 0xd6, 0xb9, 0x13, 0xcf, 0x8c, 0x75, - 0xfe, 0x14, 0x32, 0x63, 0x3d, 0xf6, 0xa1, 0x66, 0xc6, 0x9a, 0x7d, 0x04, 0x99, 0xb1, 0xd6, 0x92, - 0xcc, 0x58, 0x17, 0xf2, 0xa7, 0x24, 0xc3, 0x3a, 0x37, 0x27, 0x1f, 0xd6, 0x3d, 0xf6, 0x44, 0xcf, - 0xe3, 0x56, 0x88, 0x80, 0x74, 0xd9, 0xb9, 0x7f, 0xb3, 0x82, 0x5b, 0xf0, 0x29, 0x51, 0x20, 0x9c, - 0x90, 0xa2, 0x74, 0x93, 0xfc, 0x58, 0x8f, 0xf7, 0x50, 0xca, 0x66, 0xa9, 0xbb, 0xf2, 0xb3, 0x62, - 0xd9, 0x7f, 0xb1, 0x00, 0x97, 0x7a, 0xaf, 0xeb, 0x44, 0x57, 0x56, 0x4f, 0xde, 0x76, 0x52, 0xba, - 0x32, 0x2e, 0xe4, 0x24, 0x58, 0x03, 0x07, 0xf7, 0xb9, 0x01, 0x33, 0xca, 0x2c, 0xd7, 0x73, 0x9b, - 0xfb, 0x5a, 0x4e, 0x60, 0xe5, 0x7e, 0xd8, 0x48, 0x23, 0xe0, 0xee, 0x3a, 0x68, 0x01, 0xa6, 0x8c, - 0xc2, 0x5a, 0x55, 0x08, 0x33, 0x4a, 0x39, 0xd7, 0x30, 0xc1, 0x38, 0x8d, 0x6f, 0xff, 0xb4, 0x05, - 0x8f, 0xe5, 0x24, 0x8d, 0x18, 0x38, 0x76, 0xcd, 0x26, 0x4c, 0xb5, 0xcd, 0xaa, 0x7d, 0x42, 0x5c, - 0x19, 0xa9, 0x29, 0x54, 0x5f, 0x53, 0x00, 0x9c, 0x26, 0xba, 0x78, 0xf5, 0x37, 0x7f, 0xef, 0xd2, - 0xc7, 0x7e, 0xeb, 0xf7, 0x2e, 0x7d, 0xec, 0xb7, 0x7f, 0xef, 0xd2, 0xc7, 0xfe, 0xff, 0xc3, 0x4b, - 0xd6, 0x6f, 0x1e, 0x5e, 0xb2, 0x7e, 0xeb, 0xf0, 0x92, 0xf5, 0xdb, 0x87, 0x97, 0xac, 0xdf, 0x3d, - 0xbc, 0x64, 0x7d, 0xf5, 0xf7, 0x2f, 0x7d, 0xec, 0xed, 0xc2, 0xde, 0x0b, 0xff, 0x2f, 0x00, 0x00, - 0xff, 0xff, 0xae, 0x44, 0xd1, 0x00, 0x11, 0xe7, 0x00, 0x00, + // 12835 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x70, 0x64, 0x57, + 0x56, 0xd8, 0xbe, 0xee, 0xd6, 0x47, 0x1f, 0x7d, 0xdf, 0x99, 0xb1, 0x35, 0xb2, 0x67, 0x7a, 0xfc, + 0xbc, 0x3b, 0x1e, 0xaf, 0x6d, 0xcd, 0x7a, 0x6c, 0xaf, 0xcd, 0xda, 0x6b, 0x90, 0xd4, 0xd2, 0x4c, + 0x7b, 0x46, 0x9a, 0xf6, 0x6d, 0xcd, 0x78, 0xd7, 0x78, 0x97, 0x7d, 0xea, 0xbe, 0x92, 0x9e, 0xf5, + 0xf4, 0x5e, 0xfb, 0xbd, 0xd7, 0x9a, 0x91, 0x03, 0x55, 0xc9, 0x12, 0x48, 0x36, 0x50, 0xa9, 0xad, + 0xb0, 0x95, 0x0f, 0xa0, 0x48, 0x15, 0x21, 0x05, 0x84, 0x24, 0x15, 0x02, 0x01, 0xc2, 0x42, 0x42, + 0x20, 0x3f, 0xc8, 0x9f, 0x0d, 0x49, 0x55, 0x6a, 0xa9, 0xa2, 0xa2, 0x80, 0x48, 0x25, 0xc5, 0x8f, + 0x40, 0x2a, 0xe4, 0x47, 0x50, 0xa8, 0x90, 0xba, 0x9f, 0xef, 0xde, 0xd7, 0xef, 0x75, 0xb7, 0xc6, + 0x1a, 0xd9, 0x50, 0xfb, 0xaf, 0xfb, 0x9e, 0x73, 0xcf, 0xbd, 0xef, 0x7e, 0x9e, 0x73, 0xee, 0xf9, + 0x80, 0x57, 0x77, 0x5e, 0x89, 0xe6, 0xdd, 0xe0, 0xea, 0x4e, 0x67, 0x83, 0x84, 0x3e, 0x89, 0x49, + 0x74, 0x75, 0x8f, 0xf8, 0xad, 0x20, 0xbc, 0x2a, 0x00, 0x4e, 0xdb, 0xbd, 0xda, 0x0c, 0x42, 0x72, + 0x75, 0xef, 0xf9, 0xab, 0x5b, 0xc4, 0x27, 0xa1, 0x13, 0x93, 0xd6, 0x7c, 0x3b, 0x0c, 0xe2, 0x00, + 0x21, 0x8e, 0x33, 0xef, 0xb4, 0xdd, 0x79, 0x8a, 0x33, 0xbf, 0xf7, 0xfc, 0xdc, 0x73, 0x5b, 0x6e, + 0xbc, 0xdd, 0xd9, 0x98, 0x6f, 0x06, 0xbb, 0x57, 0xb7, 0x82, 0xad, 0xe0, 0x2a, 0x43, 0xdd, 0xe8, + 0x6c, 0xb2, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xbd, 0x98, 0x34, 0xb3, 0xeb, 0x34, 0xb7, + 0x5d, 0x9f, 0x84, 0xfb, 0x57, 0xdb, 0x3b, 0x5b, 0xac, 0xdd, 0x90, 0x44, 0x41, 0x27, 0x6c, 0x92, + 0x74, 0xc3, 0x3d, 0x6b, 0x45, 0x57, 0x77, 0x49, 0xec, 0x64, 0x74, 0x77, 0xee, 0x6a, 0x5e, 0xad, + 0xb0, 0xe3, 0xc7, 0xee, 0x6e, 0x77, 0x33, 0x9f, 0xee, 0x57, 0x21, 0x6a, 0x6e, 0x93, 0x5d, 0xa7, + 0xab, 0xde, 0x0b, 0x79, 0xf5, 0x3a, 0xb1, 0xeb, 0x5d, 0x75, 0xfd, 0x38, 0x8a, 0xc3, 0x74, 0x25, + 0xfb, 0x9b, 0x16, 0x5c, 0x5a, 0x78, 0xab, 0xb1, 0xec, 0x39, 0x51, 0xec, 0x36, 0x17, 0xbd, 0xa0, + 0xb9, 0xd3, 0x88, 0x83, 0x90, 0xdc, 0x0d, 0xbc, 0xce, 0x2e, 0x69, 0xb0, 0x81, 0x40, 0xcf, 0xc2, + 0xe8, 0x1e, 0xfb, 0x5f, 0xab, 0xce, 0x5a, 0x97, 0xac, 0x2b, 0xe5, 0xc5, 0xe9, 0xdf, 0x3c, 0xa8, + 0x7c, 0xec, 0xf0, 0xa0, 0x32, 0x7a, 0x57, 0x94, 0x63, 0x85, 0x81, 0x2e, 0xc3, 0xf0, 0x66, 0xb4, + 0xbe, 0xdf, 0x26, 0xb3, 0x05, 0x86, 0x3b, 0x29, 0x70, 0x87, 0x57, 0x1a, 0xb4, 0x14, 0x0b, 0x28, + 0xba, 0x0a, 0xe5, 0xb6, 0x13, 0xc6, 0x6e, 0xec, 0x06, 0xfe, 0x6c, 0xf1, 0x92, 0x75, 0x65, 0x68, + 0x71, 0x46, 0xa0, 0x96, 0xeb, 0x12, 0x80, 0x13, 0x1c, 0xda, 0x8d, 0x90, 0x38, 0xad, 0xdb, 0xbe, + 0xb7, 0x3f, 0x5b, 0xba, 0x64, 0x5d, 0x19, 0x4d, 0xba, 0x81, 0x45, 0x39, 0x56, 0x18, 0xf6, 0x0f, + 0x17, 0x60, 0x74, 0x61, 0x73, 0xd3, 0xf5, 0xdd, 0x78, 0x1f, 0xdd, 0x85, 0x71, 0x3f, 0x68, 0x11, + 0xf9, 0x9f, 0x7d, 0xc5, 0xd8, 0xb5, 0x4b, 0xf3, 0xdd, 0x4b, 0x69, 0x7e, 0x4d, 0xc3, 0x5b, 0x9c, + 0x3e, 0x3c, 0xa8, 0x8c, 0xeb, 0x25, 0xd8, 0xa0, 0x83, 0x30, 0x8c, 0xb5, 0x83, 0x96, 0x22, 0x5b, + 0x60, 0x64, 0x2b, 0x59, 0x64, 0xeb, 0x09, 0xda, 0xe2, 0xd4, 0xe1, 0x41, 0x65, 0x4c, 0x2b, 0xc0, + 0x3a, 0x11, 0xb4, 0x01, 0x53, 0xf4, 0xaf, 0x1f, 0xbb, 0x8a, 0x6e, 0x91, 0xd1, 0x7d, 0x32, 0x8f, + 0xae, 0x86, 0xba, 0x78, 0xe6, 0xf0, 0xa0, 0x32, 0x95, 0x2a, 0xc4, 0x69, 0x82, 0xf6, 0xfb, 0x30, + 0xb9, 0x10, 0xc7, 0x4e, 0x73, 0x9b, 0xb4, 0xf8, 0x0c, 0xa2, 0x17, 0xa1, 0xe4, 0x3b, 0xbb, 0x44, + 0xcc, 0xef, 0x25, 0x31, 0xb0, 0xa5, 0x35, 0x67, 0x97, 0x1c, 0x1d, 0x54, 0xa6, 0xef, 0xf8, 0xee, + 0x7b, 0x1d, 0xb1, 0x2a, 0x68, 0x19, 0x66, 0xd8, 0xe8, 0x1a, 0x40, 0x8b, 0xec, 0xb9, 0x4d, 0x52, + 0x77, 0xe2, 0x6d, 0x31, 0xdf, 0x48, 0xd4, 0x85, 0xaa, 0x82, 0x60, 0x0d, 0xcb, 0xbe, 0x0f, 0xe5, + 0x85, 0xbd, 0xc0, 0x6d, 0xd5, 0x83, 0x56, 0x84, 0x76, 0x60, 0xaa, 0x1d, 0x92, 0x4d, 0x12, 0xaa, + 0xa2, 0x59, 0xeb, 0x52, 0xf1, 0xca, 0xd8, 0xb5, 0x2b, 0x99, 0x1f, 0x6b, 0xa2, 0x2e, 0xfb, 0x71, + 0xb8, 0xbf, 0xf8, 0xa8, 0x68, 0x6f, 0x2a, 0x05, 0xc5, 0x69, 0xca, 0xf6, 0xbf, 0x2d, 0xc0, 0xb9, + 0x85, 0xf7, 0x3b, 0x21, 0xa9, 0xba, 0xd1, 0x4e, 0x7a, 0x85, 0xb7, 0xdc, 0x68, 0x67, 0x2d, 0x19, + 0x01, 0xb5, 0xb4, 0xaa, 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x1c, 0x8c, 0xd0, 0xdf, 0x77, 0x70, 0x4d, + 0x7c, 0xf2, 0x19, 0x81, 0x3c, 0x56, 0x75, 0x62, 0xa7, 0xca, 0x41, 0x58, 0xe2, 0xa0, 0x55, 0x18, + 0x6b, 0xb2, 0x0d, 0xb9, 0xb5, 0x1a, 0xb4, 0x08, 0x9b, 0xcc, 0xf2, 0xe2, 0x33, 0x14, 0x7d, 0x29, + 0x29, 0x3e, 0x3a, 0xa8, 0xcc, 0xf2, 0xbe, 0x09, 0x12, 0x1a, 0x0c, 0xeb, 0xf5, 0x91, 0xad, 0xf6, + 0x57, 0x89, 0x51, 0x82, 0x8c, 0xbd, 0x75, 0x45, 0xdb, 0x2a, 0x43, 0x6c, 0xab, 0x8c, 0x67, 0x6f, + 0x13, 0xf4, 0x3c, 0x94, 0x76, 0x5c, 0xbf, 0x35, 0x3b, 0xcc, 0x68, 0x5d, 0xa0, 0x73, 0x7e, 0xd3, + 0xf5, 0x5b, 0x47, 0x07, 0x95, 0x19, 0xa3, 0x3b, 0xb4, 0x10, 0x33, 0x54, 0xfb, 0x8f, 0x2d, 0xa8, + 0x30, 0xd8, 0x8a, 0xeb, 0x91, 0x3a, 0x09, 0x23, 0x37, 0x8a, 0x89, 0x1f, 0x1b, 0x03, 0x7a, 0x0d, + 0x20, 0x22, 0xcd, 0x90, 0xc4, 0xda, 0x90, 0xaa, 0x85, 0xd1, 0x50, 0x10, 0xac, 0x61, 0xd1, 0x03, + 0x21, 0xda, 0x76, 0x42, 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0x03, 0xa1, 0x21, 0x01, 0x38, 0xc1, 0x31, + 0x0e, 0x84, 0x62, 0xbf, 0x03, 0x01, 0x7d, 0x16, 0xa6, 0x92, 0xc6, 0xa2, 0xb6, 0xd3, 0x94, 0x03, + 0xc8, 0xb6, 0x4c, 0xc3, 0x04, 0xe1, 0x34, 0xae, 0xfd, 0x8f, 0x2c, 0xb1, 0x78, 0xe8, 0x57, 0x7f, + 0xc4, 0xbf, 0xd5, 0xfe, 0x25, 0x0b, 0x46, 0x16, 0x5d, 0xbf, 0xe5, 0xfa, 0x5b, 0xe8, 0x4b, 0x30, + 0x4a, 0xef, 0xa6, 0x96, 0x13, 0x3b, 0xe2, 0xdc, 0xfb, 0x94, 0xb6, 0xb7, 0xd4, 0x55, 0x31, 0xdf, + 0xde, 0xd9, 0xa2, 0x05, 0xd1, 0x3c, 0xc5, 0xa6, 0xbb, 0xed, 0xf6, 0xc6, 0xbb, 0xa4, 0x19, 0xaf, + 0x92, 0xd8, 0x49, 0x3e, 0x27, 0x29, 0xc3, 0x8a, 0x2a, 0xba, 0x09, 0xc3, 0xb1, 0x13, 0x6e, 0x91, + 0x58, 0x1c, 0x80, 0x99, 0x07, 0x15, 0xaf, 0x89, 0xe9, 0x8e, 0x24, 0x7e, 0x93, 0x24, 0xd7, 0xc2, + 0x3a, 0xab, 0x8a, 0x05, 0x09, 0xfb, 0x6f, 0x0c, 0xc3, 0xf9, 0xa5, 0x46, 0x2d, 0x67, 0x5d, 0x5d, + 0x86, 0xe1, 0x56, 0xe8, 0xee, 0x91, 0x50, 0x8c, 0xb3, 0xa2, 0x52, 0x65, 0xa5, 0x58, 0x40, 0xd1, + 0x2b, 0x30, 0xce, 0x2f, 0xa4, 0x1b, 0x8e, 0xdf, 0xf2, 0xe4, 0x10, 0x9f, 0x15, 0xd8, 0xe3, 0x77, + 0x35, 0x18, 0x36, 0x30, 0x8f, 0xb9, 0xa8, 0x2e, 0xa7, 0x36, 0x63, 0xde, 0x65, 0xf7, 0x15, 0x0b, + 0xa6, 0x79, 0x33, 0x0b, 0x71, 0x1c, 0xba, 0x1b, 0x9d, 0x98, 0x44, 0xb3, 0x43, 0xec, 0xa4, 0x5b, + 0xca, 0x1a, 0xad, 0xdc, 0x11, 0x98, 0xbf, 0x9b, 0xa2, 0xc2, 0x0f, 0xc1, 0x59, 0xd1, 0xee, 0x74, + 0x1a, 0x8c, 0xbb, 0x9a, 0x45, 0xdf, 0x6b, 0xc1, 0x5c, 0x33, 0xf0, 0xe3, 0x30, 0xf0, 0x3c, 0x12, + 0xd6, 0x3b, 0x1b, 0x9e, 0x1b, 0x6d, 0xf3, 0x75, 0x8a, 0xc9, 0x26, 0x3b, 0x09, 0x72, 0xe6, 0x50, + 0x21, 0x89, 0x39, 0xbc, 0x78, 0x78, 0x50, 0x99, 0x5b, 0xca, 0x25, 0x85, 0x7b, 0x34, 0x83, 0x76, + 0x00, 0xd1, 0xab, 0xb4, 0x11, 0x3b, 0x5b, 0x24, 0x69, 0x7c, 0x64, 0xf0, 0xc6, 0x1f, 0x39, 0x3c, + 0xa8, 0xa0, 0xb5, 0x2e, 0x12, 0x38, 0x83, 0x2c, 0x7a, 0x0f, 0xce, 0xd2, 0xd2, 0xae, 0x6f, 0x1d, + 0x1d, 0xbc, 0xb9, 0xd9, 0xc3, 0x83, 0xca, 0xd9, 0xb5, 0x0c, 0x22, 0x38, 0x93, 0xf4, 0xdc, 0x12, + 0x9c, 0xcb, 0x9c, 0x2a, 0x34, 0x0d, 0xc5, 0x1d, 0xc2, 0x59, 0x90, 0x32, 0xa6, 0x3f, 0xd1, 0x59, + 0x18, 0xda, 0x73, 0xbc, 0x8e, 0x58, 0xa5, 0x98, 0xff, 0xf9, 0x4c, 0xe1, 0x15, 0xcb, 0x6e, 0xc2, + 0xf8, 0x92, 0xd3, 0x76, 0x36, 0x5c, 0xcf, 0x8d, 0x5d, 0x12, 0xa1, 0xa7, 0xa0, 0xe8, 0xb4, 0x5a, + 0xec, 0x8a, 0x2c, 0x2f, 0x9e, 0x3b, 0x3c, 0xa8, 0x14, 0x17, 0x5a, 0xf4, 0xac, 0x06, 0x85, 0xb5, + 0x8f, 0x29, 0x06, 0xfa, 0x24, 0x94, 0x5a, 0x61, 0xd0, 0x9e, 0x2d, 0x30, 0x4c, 0x3a, 0x54, 0xa5, + 0x6a, 0x18, 0xb4, 0x53, 0xa8, 0x0c, 0xc7, 0xfe, 0xb5, 0x02, 0x3c, 0xbe, 0x44, 0xda, 0xdb, 0x2b, + 0x8d, 0x9c, 0x4d, 0x77, 0x05, 0x46, 0x77, 0x03, 0xdf, 0x8d, 0x83, 0x30, 0x12, 0x4d, 0xb3, 0xdb, + 0x64, 0x55, 0x94, 0x61, 0x05, 0x45, 0x97, 0xa0, 0xd4, 0x4e, 0x38, 0x81, 0x71, 0xc9, 0x45, 0x30, + 0x1e, 0x80, 0x41, 0x28, 0x46, 0x27, 0x22, 0xa1, 0xb8, 0x05, 0x15, 0xc6, 0x9d, 0x88, 0x84, 0x98, + 0x41, 0x92, 0xe3, 0x94, 0x1e, 0xb4, 0x62, 0x5b, 0xa5, 0x8e, 0x53, 0x0a, 0xc1, 0x1a, 0x16, 0xaa, + 0x43, 0x39, 0x52, 0x93, 0x3a, 0x34, 0xf8, 0xa4, 0x4e, 0xb0, 0xf3, 0x56, 0xcd, 0x64, 0x42, 0xc4, + 0x38, 0x06, 0x86, 0xfb, 0x9e, 0xb7, 0x5f, 0x2f, 0x00, 0xe2, 0x43, 0xf8, 0xe7, 0x6c, 0xe0, 0xee, + 0x74, 0x0f, 0x5c, 0x26, 0xe7, 0x75, 0x2b, 0x68, 0x3a, 0x5e, 0xfa, 0x08, 0x3f, 0xa9, 0xd1, 0xfb, + 0xdf, 0x16, 0x3c, 0xbe, 0xe4, 0xfa, 0x2d, 0x12, 0xe6, 0x2c, 0xc0, 0x87, 0x23, 0x80, 0x1c, 0xef, + 0xa4, 0x37, 0x96, 0x58, 0xe9, 0x04, 0x96, 0x98, 0xfd, 0x47, 0x16, 0x20, 0xfe, 0xd9, 0x1f, 0xb9, + 0x8f, 0xbd, 0xd3, 0xfd, 0xb1, 0x27, 0xb0, 0x2c, 0xec, 0x5b, 0x30, 0xb9, 0xe4, 0xb9, 0xc4, 0x8f, + 0x6b, 0xf5, 0xa5, 0xc0, 0xdf, 0x74, 0xb7, 0xd0, 0x67, 0x60, 0x92, 0xca, 0xb4, 0x41, 0x27, 0x6e, + 0x90, 0x66, 0xe0, 0x33, 0xf6, 0x9f, 0x4a, 0x82, 0xe8, 0xf0, 0xa0, 0x32, 0xb9, 0x6e, 0x40, 0x70, + 0x0a, 0xd3, 0xfe, 0x1d, 0x3a, 0x7e, 0xc1, 0x6e, 0x3b, 0xf0, 0x89, 0x1f, 0x2f, 0x05, 0x7e, 0x8b, + 0x8b, 0x89, 0x9f, 0x81, 0x52, 0x4c, 0xc7, 0x83, 0x8f, 0xdd, 0x65, 0xb9, 0x51, 0xe8, 0x28, 0x1c, + 0x1d, 0x54, 0x1e, 0xe9, 0xae, 0xc1, 0xc6, 0x89, 0xd5, 0x41, 0xdf, 0x06, 0xc3, 0x51, 0xec, 0xc4, + 0x9d, 0x48, 0x8c, 0xe6, 0x13, 0x72, 0x34, 0x1b, 0xac, 0xf4, 0xe8, 0xa0, 0x32, 0xa5, 0xaa, 0xf1, + 0x22, 0x2c, 0x2a, 0xa0, 0xa7, 0x61, 0x64, 0x97, 0x44, 0x91, 0xb3, 0x25, 0x39, 0xfc, 0x29, 0x51, + 0x77, 0x64, 0x95, 0x17, 0x63, 0x09, 0x47, 0x4f, 0xc2, 0x10, 0x09, 0xc3, 0x20, 0x14, 0x7b, 0x74, + 0x42, 0x20, 0x0e, 0x2d, 0xd3, 0x42, 0xcc, 0x61, 0xf6, 0xbf, 0xb7, 0x60, 0x4a, 0xf5, 0x95, 0xb7, + 0x75, 0x0a, 0xac, 0xdc, 0xdb, 0x00, 0x4d, 0xf9, 0x81, 0x11, 0xbb, 0x3d, 0xc6, 0xae, 0x5d, 0xce, + 0x64, 0x50, 0xba, 0x86, 0x31, 0xa1, 0xac, 0x8a, 0x22, 0xac, 0x51, 0xb3, 0x7f, 0xd5, 0x82, 0x33, + 0xa9, 0x2f, 0xba, 0xe5, 0x46, 0x31, 0x7a, 0xa7, 0xeb, 0xab, 0xe6, 0x07, 0xfb, 0x2a, 0x5a, 0x9b, + 0x7d, 0x93, 0x5a, 0xca, 0xb2, 0x44, 0xfb, 0xa2, 0x1b, 0x30, 0xe4, 0xc6, 0x64, 0x57, 0x7e, 0xcc, + 0x93, 0x3d, 0x3f, 0x86, 0xf7, 0x2a, 0x99, 0x91, 0x1a, 0xad, 0x89, 0x39, 0x01, 0xfb, 0x87, 0x8a, + 0x50, 0xe6, 0xcb, 0x76, 0xd5, 0x69, 0x9f, 0xc2, 0x5c, 0xd4, 0xa0, 0xc4, 0xa8, 0xf3, 0x8e, 0x3f, + 0x95, 0xdd, 0x71, 0xd1, 0x9d, 0x79, 0x2a, 0xa7, 0x71, 0x56, 0x50, 0x5d, 0x0d, 0xb4, 0x08, 0x33, + 0x12, 0xc8, 0x01, 0xd8, 0x70, 0x7d, 0x27, 0xdc, 0xa7, 0x65, 0xb3, 0x45, 0x46, 0xf0, 0xb9, 0xde, + 0x04, 0x17, 0x15, 0x3e, 0x27, 0xab, 0xfa, 0x9a, 0x00, 0xb0, 0x46, 0x74, 0xee, 0x65, 0x28, 0x2b, + 0xe4, 0xe3, 0xf0, 0x38, 0x73, 0x9f, 0x85, 0xa9, 0x54, 0x5b, 0xfd, 0xaa, 0x8f, 0xeb, 0x2c, 0xd2, + 0x2f, 0xb3, 0x53, 0x40, 0xf4, 0x7a, 0xd9, 0xdf, 0x13, 0xa7, 0xe8, 0xfb, 0x70, 0xd6, 0xcb, 0x38, + 0x9c, 0xc4, 0x54, 0x0d, 0x7e, 0x98, 0x3d, 0x2e, 0x3e, 0xfb, 0x6c, 0x16, 0x14, 0x67, 0xb6, 0x41, + 0xaf, 0xfd, 0xa0, 0x4d, 0xd7, 0xbc, 0xe3, 0xb1, 0xfe, 0x0a, 0xe9, 0xfb, 0xb6, 0x28, 0xc3, 0x0a, + 0x4a, 0x8f, 0xb0, 0xb3, 0xaa, 0xf3, 0x37, 0xc9, 0x7e, 0x83, 0x78, 0xa4, 0x19, 0x07, 0xe1, 0x87, + 0xda, 0xfd, 0x0b, 0x7c, 0xf4, 0xf9, 0x09, 0x38, 0x26, 0x08, 0x14, 0x6f, 0x92, 0x7d, 0x3e, 0x15, + 0xfa, 0xd7, 0x15, 0x7b, 0x7e, 0xdd, 0xcf, 0x5a, 0x30, 0xa1, 0xbe, 0xee, 0x14, 0xb6, 0xfa, 0xa2, + 0xb9, 0xd5, 0x2f, 0xf4, 0x5c, 0xe0, 0x39, 0x9b, 0xfc, 0xeb, 0x05, 0x38, 0xaf, 0x70, 0x28, 0xbb, + 0xcf, 0xff, 0x88, 0x55, 0x75, 0x15, 0xca, 0xbe, 0xd2, 0x1e, 0x58, 0xa6, 0xd8, 0x9e, 0xe8, 0x0e, + 0x12, 0x1c, 0xca, 0xb5, 0xf9, 0x89, 0x88, 0x3f, 0xae, 0xab, 0xd5, 0x84, 0x0a, 0x6d, 0x11, 0x8a, + 0x1d, 0xb7, 0x25, 0xee, 0x8c, 0x4f, 0xc9, 0xd1, 0xbe, 0x53, 0xab, 0x1e, 0x1d, 0x54, 0x9e, 0xc8, + 0x53, 0xe9, 0xd2, 0xcb, 0x2a, 0x9a, 0xbf, 0x53, 0xab, 0x62, 0x5a, 0x19, 0x2d, 0xc0, 0x94, 0xd4, + 0x5a, 0xdf, 0xa5, 0x1c, 0x54, 0xe0, 0x8b, 0xab, 0x45, 0xe9, 0xc6, 0xb0, 0x09, 0xc6, 0x69, 0x7c, + 0x54, 0x85, 0xe9, 0x9d, 0xce, 0x06, 0xf1, 0x48, 0xcc, 0x3f, 0xf8, 0x26, 0xe1, 0x9a, 0xa3, 0x72, + 0x22, 0x5a, 0xde, 0x4c, 0xc1, 0x71, 0x57, 0x0d, 0xfb, 0xcf, 0xd8, 0x11, 0x2f, 0x46, 0xaf, 0x1e, + 0x06, 0x74, 0x61, 0x51, 0xea, 0x1f, 0xe6, 0x72, 0x1e, 0x64, 0x55, 0xdc, 0x24, 0xfb, 0xeb, 0x01, + 0x65, 0xb6, 0xb3, 0x57, 0x85, 0xb1, 0xe6, 0x4b, 0x3d, 0xd7, 0xfc, 0xcf, 0x17, 0xe0, 0x9c, 0x1a, + 0x01, 0x83, 0xaf, 0xfb, 0xf3, 0x3e, 0x06, 0xcf, 0xc3, 0x58, 0x8b, 0x6c, 0x3a, 0x1d, 0x2f, 0x56, + 0x6a, 0xcc, 0x21, 0xae, 0xca, 0xae, 0x26, 0xc5, 0x58, 0xc7, 0x39, 0xc6, 0xb0, 0xfd, 0xc4, 0x18, + 0xbb, 0x5b, 0x63, 0x87, 0xae, 0x71, 0xb5, 0x6b, 0xac, 0xdc, 0x5d, 0xf3, 0x24, 0x0c, 0xb9, 0xbb, + 0x94, 0xd7, 0x2a, 0x98, 0x2c, 0x54, 0x8d, 0x16, 0x62, 0x0e, 0x43, 0x9f, 0x80, 0x91, 0x66, 0xb0, + 0xbb, 0xeb, 0xf8, 0x2d, 0x76, 0xe5, 0x95, 0x17, 0xc7, 0x28, 0x3b, 0xb6, 0xc4, 0x8b, 0xb0, 0x84, + 0xa1, 0xc7, 0xa1, 0xe4, 0x84, 0x5b, 0xd1, 0x6c, 0x89, 0xe1, 0x8c, 0xd2, 0x96, 0x16, 0xc2, 0xad, + 0x08, 0xb3, 0x52, 0x2a, 0x55, 0xdd, 0x0b, 0xc2, 0x1d, 0xd7, 0xdf, 0xaa, 0xba, 0xa1, 0xd8, 0x12, + 0xea, 0x2e, 0x7c, 0x4b, 0x41, 0xb0, 0x86, 0x85, 0x56, 0x60, 0xa8, 0x1d, 0x84, 0x71, 0x34, 0x3b, + 0xcc, 0x86, 0xfb, 0x89, 0x9c, 0x83, 0x88, 0x7f, 0x6d, 0x3d, 0x08, 0xe3, 0xe4, 0x03, 0xe8, 0xbf, + 0x08, 0xf3, 0xea, 0xe8, 0xdb, 0xa0, 0x48, 0xfc, 0xbd, 0xd9, 0x11, 0x46, 0x65, 0x2e, 0x8b, 0xca, + 0xb2, 0xbf, 0x77, 0xd7, 0x09, 0x93, 0x53, 0x7a, 0xd9, 0xdf, 0xc3, 0xb4, 0x0e, 0xfa, 0x3c, 0x94, + 0xe5, 0x16, 0x8f, 0x84, 0x9a, 0x23, 0x73, 0x89, 0xc9, 0x83, 0x01, 0x93, 0xf7, 0x3a, 0x6e, 0x48, + 0x76, 0x89, 0x1f, 0x47, 0xc9, 0x99, 0x26, 0xa1, 0x11, 0x4e, 0xa8, 0xa1, 0xcf, 0x4b, 0xdd, 0xda, + 0x6a, 0xd0, 0xf1, 0xe3, 0x68, 0xb6, 0xcc, 0xba, 0x97, 0xf9, 0xea, 0x71, 0x37, 0xc1, 0x4b, 0x2b, + 0xdf, 0x78, 0x65, 0x6c, 0x90, 0x42, 0x18, 0x26, 0x3c, 0x77, 0x8f, 0xf8, 0x24, 0x8a, 0xea, 0x61, + 0xb0, 0x41, 0x66, 0x81, 0xf5, 0xfc, 0x7c, 0xf6, 0x63, 0x40, 0xb0, 0x41, 0x16, 0x67, 0x0e, 0x0f, + 0x2a, 0x13, 0xb7, 0xf4, 0x3a, 0xd8, 0x24, 0x81, 0xee, 0xc0, 0x24, 0x95, 0x6b, 0xdc, 0x84, 0xe8, + 0x58, 0x3f, 0xa2, 0x4c, 0xfa, 0xc0, 0x46, 0x25, 0x9c, 0x22, 0x82, 0xde, 0x80, 0xb2, 0xe7, 0x6e, + 0x92, 0xe6, 0x7e, 0xd3, 0x23, 0xb3, 0xe3, 0x8c, 0x62, 0xe6, 0xb6, 0xba, 0x25, 0x91, 0xb8, 0x5c, + 0xa4, 0xfe, 0xe2, 0xa4, 0x3a, 0xba, 0x0b, 0x8f, 0xc4, 0x24, 0xdc, 0x75, 0x7d, 0x87, 0x6e, 0x07, + 0x21, 0x2f, 0xb0, 0x27, 0x95, 0x09, 0xb6, 0xde, 0x2e, 0x8a, 0xa1, 0x7b, 0x64, 0x3d, 0x13, 0x0b, + 0xe7, 0xd4, 0x46, 0xb7, 0x61, 0x8a, 0xed, 0x84, 0x7a, 0xc7, 0xf3, 0xea, 0x81, 0xe7, 0x36, 0xf7, + 0x67, 0x27, 0x19, 0xc1, 0x4f, 0xc8, 0x7b, 0xa1, 0x66, 0x82, 0x8f, 0x0e, 0x2a, 0x90, 0xfc, 0xc3, + 0xe9, 0xda, 0x68, 0x83, 0xe9, 0xd0, 0x3b, 0xa1, 0x1b, 0xef, 0xd3, 0xf5, 0x4b, 0xee, 0xc7, 0xb3, + 0x53, 0x3d, 0x45, 0x61, 0x1d, 0x55, 0x29, 0xda, 0xf5, 0x42, 0x9c, 0x26, 0x48, 0xb7, 0x76, 0x14, + 0xb7, 0x5c, 0x7f, 0x76, 0x9a, 0x9d, 0x18, 0x6a, 0x67, 0x34, 0x68, 0x21, 0xe6, 0x30, 0xa6, 0x3f, + 0xa7, 0x3f, 0x6e, 0xd3, 0x13, 0x74, 0x86, 0x21, 0x26, 0xfa, 0x73, 0x09, 0xc0, 0x09, 0x0e, 0x65, + 0x6a, 0xe2, 0x78, 0x7f, 0x16, 0x31, 0x54, 0xb5, 0x5d, 0xd6, 0xd7, 0x3f, 0x8f, 0x69, 0x39, 0xba, + 0x05, 0x23, 0xc4, 0xdf, 0x5b, 0x09, 0x83, 0xdd, 0xd9, 0x33, 0xf9, 0x7b, 0x76, 0x99, 0xa3, 0xf0, + 0x03, 0x3d, 0x11, 0xf0, 0x44, 0x31, 0x96, 0x24, 0xd0, 0x7d, 0x98, 0xcd, 0x98, 0x11, 0x3e, 0x01, + 0x67, 0xd9, 0x04, 0xbc, 0x26, 0xea, 0xce, 0xae, 0xe7, 0xe0, 0x1d, 0xf5, 0x80, 0xe1, 0x5c, 0xea, + 0xe8, 0x0b, 0x30, 0xc1, 0x37, 0x14, 0x7f, 0x7c, 0x8b, 0x66, 0xcf, 0xb1, 0xaf, 0xb9, 0x94, 0xbf, + 0x39, 0x39, 0xe2, 0xe2, 0x39, 0xd1, 0xa1, 0x09, 0xbd, 0x34, 0xc2, 0x26, 0x35, 0x7b, 0x03, 0x26, + 0xd5, 0xb9, 0xc5, 0x96, 0x0e, 0xaa, 0xc0, 0x10, 0xe3, 0x76, 0x84, 0x7e, 0xab, 0x4c, 0x67, 0x8a, + 0x71, 0x42, 0x98, 0x97, 0xb3, 0x99, 0x72, 0xdf, 0x27, 0x8b, 0xfb, 0x31, 0xe1, 0x52, 0x75, 0x51, + 0x9b, 0x29, 0x09, 0xc0, 0x09, 0x8e, 0xfd, 0xff, 0x38, 0xd7, 0x98, 0x1c, 0x8e, 0x03, 0x5c, 0x07, + 0xcf, 0xc2, 0xe8, 0x76, 0x10, 0xc5, 0x14, 0x9b, 0xb5, 0x31, 0x94, 0xf0, 0x89, 0x37, 0x44, 0x39, + 0x56, 0x18, 0xe8, 0x55, 0x98, 0x68, 0xea, 0x0d, 0x88, 0xbb, 0x4c, 0x0d, 0x81, 0xd1, 0x3a, 0x36, + 0x71, 0xd1, 0x2b, 0x30, 0xca, 0x9e, 0xce, 0x9b, 0x81, 0x27, 0x98, 0x2c, 0x79, 0x21, 0x8f, 0xd6, + 0x45, 0xf9, 0x91, 0xf6, 0x1b, 0x2b, 0x6c, 0x74, 0x19, 0x86, 0x69, 0x17, 0x6a, 0x75, 0x71, 0x8b, + 0x28, 0x55, 0xcd, 0x0d, 0x56, 0x8a, 0x05, 0xd4, 0xfe, 0x5b, 0x05, 0x6d, 0x94, 0xa9, 0x44, 0x4a, + 0x50, 0x1d, 0x46, 0xee, 0x39, 0x6e, 0xec, 0xfa, 0x5b, 0x82, 0x5d, 0x78, 0xba, 0xe7, 0x95, 0xc2, + 0x2a, 0xbd, 0xc5, 0x2b, 0xf0, 0x4b, 0x4f, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, 0x76, 0x7c, 0x9f, + 0x52, 0x2c, 0x0c, 0x4a, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, 0xf4, 0x0e, 0x80, + 0x5c, 0x96, 0xa4, 0x25, 0x9e, 0xac, 0x9f, 0xed, 0x4f, 0x74, 0x5d, 0xd5, 0x59, 0x9c, 0xa4, 0x57, + 0x6a, 0xf2, 0x1f, 0x6b, 0xf4, 0xec, 0x98, 0xb1, 0x55, 0xdd, 0x9d, 0x41, 0xdf, 0x49, 0x4f, 0x02, + 0x27, 0x8c, 0x49, 0x6b, 0x21, 0x16, 0x83, 0xf3, 0xc9, 0xc1, 0x64, 0x8a, 0x75, 0x77, 0x97, 0xe8, + 0xa7, 0x86, 0x20, 0x82, 0x13, 0x7a, 0xf6, 0x2f, 0x16, 0x61, 0x36, 0xaf, 0xbb, 0x74, 0xd1, 0x91, + 0xfb, 0x6e, 0xbc, 0x44, 0xb9, 0x21, 0xcb, 0x5c, 0x74, 0xcb, 0xa2, 0x1c, 0x2b, 0x0c, 0x3a, 0xfb, + 0x91, 0xbb, 0x25, 0x45, 0xc2, 0xa1, 0x64, 0xf6, 0x1b, 0xac, 0x14, 0x0b, 0x28, 0xc5, 0x0b, 0x89, + 0x13, 0x09, 0x9b, 0x08, 0x6d, 0x95, 0x60, 0x56, 0x8a, 0x05, 0x54, 0xd7, 0x37, 0x95, 0xfa, 0xe8, + 0x9b, 0x8c, 0x21, 0x1a, 0x3a, 0xd9, 0x21, 0x42, 0x5f, 0x04, 0xd8, 0x74, 0x7d, 0x37, 0xda, 0x66, + 0xd4, 0x87, 0x8f, 0x4d, 0x5d, 0xf1, 0x52, 0x2b, 0x8a, 0x0a, 0xd6, 0x28, 0xa2, 0x97, 0x60, 0x4c, + 0x6d, 0xc0, 0x5a, 0x95, 0x3d, 0x10, 0x69, 0x0f, 0xee, 0xc9, 0x69, 0x54, 0xc5, 0x3a, 0x9e, 0xfd, + 0x6e, 0x7a, 0xbd, 0x88, 0x1d, 0xa0, 0x8d, 0xaf, 0x35, 0xe8, 0xf8, 0x16, 0x7a, 0x8f, 0xaf, 0xfd, + 0xeb, 0x45, 0x98, 0x32, 0x1a, 0xeb, 0x44, 0x03, 0x9c, 0x59, 0xd7, 0xe9, 0x3d, 0xe7, 0xc4, 0x44, + 0xec, 0x3f, 0xbb, 0xff, 0x56, 0xd1, 0xef, 0x42, 0xba, 0x03, 0x78, 0x7d, 0xf4, 0x45, 0x28, 0x7b, + 0x4e, 0xc4, 0x74, 0x57, 0x44, 0xec, 0xbb, 0x41, 0x88, 0x25, 0x72, 0x84, 0x13, 0xc5, 0xda, 0x55, + 0xc3, 0x69, 0x27, 0x24, 0xe9, 0x85, 0x4c, 0x79, 0x1f, 0x69, 0x74, 0xa3, 0x3a, 0x41, 0x19, 0xa4, + 0x7d, 0xcc, 0x61, 0xe8, 0x15, 0x18, 0x0f, 0x09, 0x5b, 0x15, 0x4b, 0x94, 0x95, 0x63, 0xcb, 0x6c, + 0x28, 0xe1, 0xf9, 0xb0, 0x06, 0xc3, 0x06, 0x66, 0xc2, 0xca, 0x0f, 0xf7, 0x60, 0xe5, 0x9f, 0x86, + 0x11, 0xf6, 0x43, 0xad, 0x00, 0x35, 0x1b, 0x35, 0x5e, 0x8c, 0x25, 0x3c, 0xbd, 0x60, 0x46, 0x07, + 0x5c, 0x30, 0x9f, 0x84, 0xc9, 0xaa, 0x43, 0x76, 0x03, 0x7f, 0xd9, 0x6f, 0xb5, 0x03, 0xd7, 0x8f, + 0xd1, 0x2c, 0x94, 0xd8, 0xed, 0xc0, 0xf7, 0x76, 0x89, 0x52, 0xc0, 0x25, 0xca, 0x98, 0xdb, 0x5b, + 0x70, 0xae, 0x1a, 0xdc, 0xf3, 0xef, 0x39, 0x61, 0x6b, 0xa1, 0x5e, 0xd3, 0xe4, 0xdc, 0x35, 0x29, + 0x67, 0x71, 0x23, 0x96, 0xcc, 0x33, 0x55, 0xab, 0xc9, 0xef, 0xda, 0x15, 0xd7, 0x23, 0x39, 0xda, + 0x88, 0xbf, 0x53, 0x30, 0x5a, 0x4a, 0xf0, 0xd5, 0x83, 0x91, 0x95, 0xfb, 0x60, 0xf4, 0x26, 0x8c, + 0x6e, 0xba, 0xc4, 0x6b, 0x61, 0xb2, 0x29, 0x96, 0xd8, 0x53, 0xf9, 0xef, 0xf2, 0x2b, 0x14, 0x53, + 0x6a, 0x9f, 0xb8, 0x94, 0xb6, 0x22, 0x2a, 0x63, 0x45, 0x06, 0xed, 0xc0, 0xb4, 0x14, 0x03, 0x24, + 0x54, 0x2c, 0xb8, 0xa7, 0x7b, 0xc9, 0x16, 0x26, 0xf1, 0xb3, 0x87, 0x07, 0x95, 0x69, 0x9c, 0x22, + 0x83, 0xbb, 0x08, 0x53, 0xb1, 0x6c, 0x97, 0x1e, 0xad, 0x25, 0x36, 0xfc, 0x4c, 0x2c, 0x63, 0x12, + 0x26, 0x2b, 0xb5, 0x7f, 0xd4, 0x82, 0x47, 0xbb, 0x46, 0x46, 0x48, 0xda, 0x27, 0x3c, 0x0b, 0x69, + 0xc9, 0xb7, 0xd0, 0x5f, 0xf2, 0xb5, 0xff, 0xb1, 0x05, 0x67, 0x97, 0x77, 0xdb, 0xf1, 0x7e, 0xd5, + 0x35, 0x5f, 0x77, 0x5e, 0x86, 0xe1, 0x5d, 0xd2, 0x72, 0x3b, 0xbb, 0x62, 0xe6, 0x2a, 0xf2, 0xf8, + 0x59, 0x65, 0xa5, 0x47, 0x07, 0x95, 0x89, 0x46, 0x1c, 0x84, 0xce, 0x16, 0xe1, 0x05, 0x58, 0xa0, + 0xb3, 0x43, 0xdc, 0x7d, 0x9f, 0xdc, 0x72, 0x77, 0x5d, 0x69, 0x67, 0xd1, 0x53, 0x77, 0x36, 0x2f, + 0x07, 0x74, 0xfe, 0xcd, 0x8e, 0xe3, 0xc7, 0x6e, 0xbc, 0x2f, 0x1e, 0x66, 0x24, 0x11, 0x9c, 0xd0, + 0xb3, 0xbf, 0x69, 0xc1, 0x94, 0x5c, 0xf7, 0x0b, 0xad, 0x56, 0x48, 0xa2, 0x08, 0xcd, 0x41, 0xc1, + 0x6d, 0x8b, 0x5e, 0x82, 0xe8, 0x65, 0xa1, 0x56, 0xc7, 0x05, 0xb7, 0x8d, 0xea, 0x50, 0xe6, 0xe6, + 0x1a, 0xc9, 0xe2, 0x1a, 0xc8, 0xe8, 0x83, 0xf5, 0x60, 0x5d, 0xd6, 0xc4, 0x09, 0x11, 0xc9, 0xc1, + 0xb1, 0x33, 0xb3, 0x68, 0xbe, 0x7a, 0xdd, 0x10, 0xe5, 0x58, 0x61, 0xa0, 0x2b, 0x30, 0xea, 0x07, + 0x2d, 0x6e, 0x3d, 0xc3, 0x6f, 0x3f, 0xb6, 0x64, 0xd7, 0x44, 0x19, 0x56, 0x50, 0xfb, 0x07, 0x2d, + 0x18, 0x97, 0x5f, 0x36, 0x20, 0x33, 0x49, 0xb7, 0x56, 0xc2, 0x48, 0x26, 0x5b, 0x8b, 0x32, 0x83, + 0x0c, 0x62, 0xf0, 0x80, 0xc5, 0xe3, 0xf0, 0x80, 0xf6, 0x8f, 0x14, 0x60, 0x52, 0x76, 0xa7, 0xd1, + 0xd9, 0x88, 0x48, 0x8c, 0xd6, 0xa1, 0xec, 0xf0, 0x21, 0x27, 0x72, 0xc5, 0x3e, 0x99, 0x2d, 0x7c, + 0x18, 0xf3, 0x93, 0x5c, 0xcb, 0x0b, 0xb2, 0x36, 0x4e, 0x08, 0x21, 0x0f, 0x66, 0xfc, 0x20, 0x66, + 0x47, 0xb4, 0x82, 0xf7, 0x7a, 0x02, 0x49, 0x53, 0x3f, 0x2f, 0xa8, 0xcf, 0xac, 0xa5, 0xa9, 0xe0, + 0x6e, 0xc2, 0x68, 0x59, 0x2a, 0x3c, 0x8a, 0xf9, 0xe2, 0x86, 0x3e, 0x0b, 0xd9, 0xfa, 0x0e, 0xfb, + 0x57, 0x2c, 0x28, 0x4b, 0xb4, 0xd3, 0x78, 0xed, 0x5a, 0x85, 0x91, 0x88, 0x4d, 0x82, 0x1c, 0x1a, + 0xbb, 0x57, 0xc7, 0xf9, 0x7c, 0x25, 0x37, 0x0f, 0xff, 0x1f, 0x61, 0x49, 0x83, 0xe9, 0xbb, 0x55, + 0xf7, 0x3f, 0x22, 0xfa, 0x6e, 0xd5, 0x9f, 0x9c, 0x1b, 0xe6, 0xbf, 0xb3, 0x3e, 0x6b, 0x62, 0x2d, + 0x65, 0x90, 0xda, 0x21, 0xd9, 0x74, 0xef, 0xa7, 0x19, 0xa4, 0x3a, 0x2b, 0xc5, 0x02, 0x8a, 0xde, + 0x81, 0xf1, 0xa6, 0x54, 0x74, 0x26, 0xc7, 0xc0, 0xe5, 0x9e, 0x4a, 0x77, 0xf5, 0x3e, 0xc3, 0x2d, + 0x6b, 0x97, 0xb4, 0xfa, 0xd8, 0xa0, 0x66, 0x3e, 0xb7, 0x17, 0xfb, 0x3d, 0xb7, 0x27, 0x74, 0xf3, + 0x1f, 0x9f, 0x7f, 0xcc, 0x82, 0x61, 0xae, 0x2e, 0x1b, 0x4c, 0xbf, 0xa8, 0x3d, 0x57, 0x25, 0x63, + 0x77, 0x97, 0x16, 0x8a, 0xe7, 0x27, 0xb4, 0x0a, 0x65, 0xf6, 0x83, 0xa9, 0x0d, 0x8a, 0xf9, 0x26, + 0xc5, 0xbc, 0x55, 0xbd, 0x83, 0x77, 0x65, 0x35, 0x9c, 0x50, 0xb0, 0xbf, 0x56, 0xa4, 0x47, 0x55, + 0x82, 0x6a, 0xdc, 0xe0, 0xd6, 0xc3, 0xbb, 0xc1, 0x0b, 0x0f, 0xeb, 0x06, 0xdf, 0x82, 0xa9, 0xa6, + 0xf6, 0xb8, 0x95, 0xcc, 0xe4, 0x95, 0x9e, 0x8b, 0x44, 0x7b, 0x07, 0xe3, 0x2a, 0xa3, 0x25, 0x93, + 0x08, 0x4e, 0x53, 0x45, 0xdf, 0x09, 0xe3, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, 0x62, 0xe1, 0x13, 0xf9, + 0xeb, 0x45, 0x6f, 0x82, 0xad, 0xc4, 0x86, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0x5f, 0x1c, 0x85, 0xa1, + 0xe5, 0x3d, 0xe2, 0xc7, 0xa7, 0x70, 0x20, 0x35, 0x61, 0xd2, 0xf5, 0xf7, 0x02, 0x6f, 0x8f, 0xb4, + 0x38, 0xfc, 0x38, 0x97, 0xeb, 0x23, 0x82, 0xf4, 0x64, 0xcd, 0x20, 0x81, 0x53, 0x24, 0x1f, 0x86, + 0x84, 0x79, 0x1d, 0x86, 0xf9, 0xdc, 0x0b, 0xf1, 0x32, 0x53, 0x19, 0xcc, 0x06, 0x51, 0xec, 0x82, + 0x44, 0xfa, 0xe5, 0xda, 0x67, 0x51, 0x1d, 0xbd, 0x0b, 0x93, 0x9b, 0x6e, 0x18, 0xc5, 0x54, 0x34, + 0x8c, 0x62, 0x67, 0xb7, 0xfd, 0x00, 0x12, 0xa5, 0x1a, 0x87, 0x15, 0x83, 0x12, 0x4e, 0x51, 0x46, + 0x5b, 0x30, 0x41, 0x85, 0x9c, 0xa4, 0xa9, 0x91, 0x63, 0x37, 0xa5, 0x54, 0x46, 0xb7, 0x74, 0x42, + 0xd8, 0xa4, 0x4b, 0x0f, 0x93, 0x26, 0x13, 0x8a, 0x46, 0x19, 0x47, 0xa1, 0x0e, 0x13, 0x2e, 0x0d, + 0x71, 0x18, 0x3d, 0x93, 0x98, 0xd9, 0x4a, 0xd9, 0x3c, 0x93, 0x34, 0xe3, 0x94, 0x2f, 0x41, 0x99, + 0xd0, 0x21, 0xa4, 0x84, 0x85, 0x62, 0xfc, 0xea, 0x60, 0x7d, 0x5d, 0x75, 0x9b, 0x61, 0x60, 0xca, + 0xf2, 0xcb, 0x92, 0x12, 0x4e, 0x88, 0xa2, 0x25, 0x18, 0x8e, 0x48, 0xe8, 0x92, 0x48, 0xa8, 0xc8, + 0x7b, 0x4c, 0x23, 0x43, 0xe3, 0xb6, 0xe7, 0xfc, 0x37, 0x16, 0x55, 0xe9, 0xf2, 0x72, 0x98, 0x34, + 0xc4, 0xb4, 0xe2, 0xda, 0xf2, 0x5a, 0x60, 0xa5, 0x58, 0x40, 0xd1, 0x1b, 0x30, 0x12, 0x12, 0x8f, + 0x29, 0x8b, 0x26, 0x06, 0x5f, 0xe4, 0x5c, 0xf7, 0xc4, 0xeb, 0x61, 0x49, 0x00, 0xdd, 0x04, 0x14, + 0x12, 0xca, 0x43, 0xb8, 0xfe, 0x96, 0x32, 0xe6, 0x10, 0xba, 0xee, 0xc7, 0x44, 0xfb, 0x67, 0x70, + 0x82, 0x21, 0xad, 0x52, 0x71, 0x46, 0x35, 0x74, 0x1d, 0x66, 0x54, 0x69, 0xcd, 0x8f, 0x62, 0xc7, + 0x6f, 0x12, 0xa6, 0xe6, 0x2e, 0x27, 0x5c, 0x11, 0x4e, 0x23, 0xe0, 0xee, 0x3a, 0xf6, 0x4f, 0x53, + 0x76, 0x86, 0x8e, 0xd6, 0x29, 0xf0, 0x02, 0xaf, 0x9b, 0xbc, 0xc0, 0xf9, 0xdc, 0x99, 0xcb, 0xe1, + 0x03, 0x0e, 0x2d, 0x18, 0xd3, 0x66, 0x36, 0x59, 0xb3, 0x56, 0x8f, 0x35, 0xdb, 0x81, 0x69, 0xba, + 0xd2, 0x6f, 0x6f, 0x44, 0x24, 0xdc, 0x23, 0x2d, 0xb6, 0x30, 0x0b, 0x0f, 0xb6, 0x30, 0xd5, 0x2b, + 0xf3, 0xad, 0x14, 0x41, 0xdc, 0xd5, 0x04, 0x7a, 0x59, 0x6a, 0x4e, 0x8a, 0x86, 0x91, 0x16, 0xd7, + 0x8a, 0x1c, 0x1d, 0x54, 0xa6, 0xb5, 0x0f, 0xd1, 0x35, 0x25, 0xf6, 0x97, 0xe4, 0x37, 0xaa, 0xd7, + 0xfc, 0xa6, 0x5a, 0x2c, 0xa9, 0xd7, 0x7c, 0xb5, 0x1c, 0x70, 0x82, 0x43, 0xf7, 0x28, 0x15, 0x41, + 0xd2, 0xaf, 0xf9, 0x54, 0x40, 0xc1, 0x0c, 0x62, 0xbf, 0x00, 0xb0, 0x7c, 0x9f, 0x34, 0xf9, 0x52, + 0xd7, 0x1f, 0x20, 0xad, 0xfc, 0x07, 0x48, 0xfb, 0x3f, 0x5a, 0x30, 0xb9, 0xb2, 0x64, 0x88, 0x89, + 0xf3, 0x00, 0x5c, 0x36, 0x7a, 0xeb, 0xad, 0x35, 0xa9, 0x5b, 0xe7, 0xea, 0x51, 0x55, 0x8a, 0x35, + 0x0c, 0x74, 0x1e, 0x8a, 0x5e, 0xc7, 0x17, 0x22, 0xcb, 0xc8, 0xe1, 0x41, 0xa5, 0x78, 0xab, 0xe3, + 0x63, 0x5a, 0xa6, 0x59, 0x08, 0x16, 0x07, 0xb6, 0x10, 0xec, 0xeb, 0x5e, 0x85, 0x2a, 0x30, 0x74, + 0xef, 0x9e, 0xdb, 0xe2, 0x46, 0xec, 0x42, 0xef, 0xff, 0xd6, 0x5b, 0xb5, 0x6a, 0x84, 0x79, 0xb9, + 0xfd, 0xd5, 0x22, 0xcc, 0xad, 0x78, 0xe4, 0xfe, 0x07, 0x34, 0xe4, 0x1f, 0xd4, 0xbe, 0xf1, 0x78, + 0xfc, 0xe2, 0x71, 0x6d, 0x58, 0xfb, 0x8f, 0xc7, 0x26, 0x8c, 0xf0, 0xc7, 0x6c, 0x69, 0xd6, 0xff, + 0x6a, 0x56, 0xeb, 0xf9, 0x03, 0x32, 0xcf, 0x1f, 0xc5, 0x85, 0x39, 0xbf, 0xba, 0x69, 0x45, 0x29, + 0x96, 0xc4, 0xe7, 0x3e, 0x03, 0xe3, 0x3a, 0xe6, 0xb1, 0xac, 0xc9, 0xff, 0x4a, 0x11, 0xa6, 0x69, + 0x0f, 0x1e, 0xea, 0x44, 0xdc, 0xe9, 0x9e, 0x88, 0x93, 0xb6, 0x28, 0xee, 0x3f, 0x1b, 0xef, 0xa4, + 0x67, 0xe3, 0xf9, 0xbc, 0xd9, 0x38, 0xed, 0x39, 0xf8, 0x5e, 0x0b, 0xce, 0xac, 0x78, 0x41, 0x73, + 0x27, 0x65, 0xf5, 0xfb, 0x12, 0x8c, 0xd1, 0x73, 0x3c, 0x32, 0xbc, 0x88, 0x0c, 0xbf, 0x32, 0x01, + 0xc2, 0x3a, 0x9e, 0x56, 0xed, 0xce, 0x9d, 0x5a, 0x35, 0xcb, 0x1d, 0x4d, 0x80, 0xb0, 0x8e, 0x67, + 0x7f, 0xc3, 0x82, 0x0b, 0xd7, 0x97, 0x96, 0x93, 0xa5, 0xd8, 0xe5, 0x11, 0x47, 0xa5, 0xc0, 0x96, + 0xd6, 0x95, 0x44, 0x0a, 0xac, 0xb2, 0x5e, 0x08, 0xe8, 0x47, 0xc5, 0xdb, 0xf3, 0xa7, 0x2c, 0x38, + 0x73, 0xdd, 0x8d, 0xe9, 0xb5, 0x9c, 0xf6, 0xcd, 0xa2, 0xf7, 0x72, 0xe4, 0xc6, 0x41, 0xb8, 0x9f, + 0xf6, 0xcd, 0xc2, 0x0a, 0x82, 0x35, 0x2c, 0xde, 0xf2, 0x9e, 0xcb, 0xcc, 0xa8, 0x0a, 0xa6, 0x2a, + 0x0a, 0x8b, 0x72, 0xac, 0x30, 0xe8, 0x87, 0xb5, 0xdc, 0x90, 0x89, 0x12, 0xfb, 0xe2, 0x84, 0x55, + 0x1f, 0x56, 0x95, 0x00, 0x9c, 0xe0, 0xd8, 0x7f, 0x68, 0x41, 0xe5, 0xba, 0xd7, 0x89, 0x62, 0x12, + 0x6e, 0x46, 0x39, 0xa7, 0xe3, 0x0b, 0x50, 0x26, 0x52, 0x70, 0x17, 0xbd, 0x56, 0xac, 0xa6, 0x92, + 0xe8, 0xb9, 0x8b, 0x98, 0xc2, 0x1b, 0xc0, 0x87, 0xe0, 0x78, 0x46, 0xe0, 0x2b, 0x80, 0x88, 0xde, + 0x96, 0xee, 0x33, 0xc7, 0x9c, 0x6f, 0x96, 0xbb, 0xa0, 0x38, 0xa3, 0x86, 0xfd, 0xa3, 0x16, 0x9c, + 0x53, 0x1f, 0xfc, 0x91, 0xfb, 0x4c, 0xfb, 0xe7, 0x0a, 0x30, 0x71, 0x63, 0x7d, 0xbd, 0x7e, 0x9d, + 0xc4, 0xe2, 0xda, 0xee, 0xaf, 0x5b, 0xc7, 0x9a, 0x8a, 0xb0, 0x97, 0x14, 0xd8, 0x89, 0x5d, 0x6f, + 0x9e, 0xbb, 0x5e, 0xcf, 0xd7, 0xfc, 0xf8, 0x76, 0xd8, 0x88, 0x43, 0xd7, 0xdf, 0xca, 0x54, 0x2a, + 0x4a, 0xe6, 0xa2, 0x98, 0xc7, 0x5c, 0xa0, 0x17, 0x60, 0x98, 0xf9, 0x7e, 0xcb, 0x49, 0x78, 0x4c, + 0x09, 0x51, 0xac, 0xf4, 0xe8, 0xa0, 0x52, 0xbe, 0x83, 0x6b, 0xfc, 0x0f, 0x16, 0xa8, 0xe8, 0x0e, + 0x8c, 0x6d, 0xc7, 0x71, 0xfb, 0x06, 0x71, 0x5a, 0x24, 0x94, 0xc7, 0xe1, 0xc5, 0xac, 0xe3, 0x90, + 0x0e, 0x02, 0x47, 0x4b, 0x4e, 0x90, 0xa4, 0x2c, 0xc2, 0x3a, 0x1d, 0xbb, 0x01, 0x90, 0xc0, 0x4e, + 0x48, 0xa1, 0x62, 0xff, 0xbe, 0x05, 0x23, 0xdc, 0x0d, 0x2f, 0x44, 0xaf, 0x41, 0x89, 0xdc, 0x27, + 0x4d, 0xc1, 0x2a, 0x67, 0x76, 0x38, 0xe1, 0xb4, 0xf8, 0xf3, 0x00, 0xfd, 0x8f, 0x59, 0x2d, 0x74, + 0x03, 0x46, 0x68, 0x6f, 0xaf, 0x2b, 0x9f, 0xc4, 0x27, 0xf2, 0xbe, 0x58, 0x4d, 0x3b, 0x67, 0xce, + 0x44, 0x11, 0x96, 0xd5, 0x99, 0xaa, 0xbb, 0xd9, 0x6e, 0xd0, 0x13, 0x3b, 0xee, 0xc5, 0x58, 0xac, + 0x2f, 0xd5, 0x39, 0x92, 0xa0, 0xc6, 0x55, 0xdd, 0xb2, 0x10, 0x27, 0x44, 0xec, 0x75, 0x28, 0xd3, + 0x49, 0x5d, 0xf0, 0x5c, 0xa7, 0xb7, 0x96, 0xfd, 0x19, 0x28, 0x4b, 0x8d, 0x77, 0x24, 0x3c, 0xb9, + 0x18, 0x55, 0xa9, 0x10, 0x8f, 0x70, 0x02, 0xb7, 0x37, 0xe1, 0x2c, 0x33, 0x75, 0x70, 0xe2, 0x6d, + 0x63, 0x8f, 0xf5, 0x5f, 0xcc, 0xcf, 0x0a, 0xc9, 0x93, 0xcf, 0xcc, 0xac, 0xe6, 0x2c, 0x31, 0x2e, + 0x29, 0x26, 0x52, 0xa8, 0xfd, 0x07, 0x25, 0x78, 0xac, 0xd6, 0xc8, 0xf7, 0xd0, 0x7c, 0x05, 0xc6, + 0x39, 0x5f, 0x4a, 0x97, 0xb6, 0xe3, 0x89, 0x76, 0xd5, 0x43, 0xe0, 0xba, 0x06, 0xc3, 0x06, 0x26, + 0xba, 0x00, 0x45, 0xf7, 0x3d, 0x3f, 0x6d, 0x77, 0x5c, 0x7b, 0x73, 0x0d, 0xd3, 0x72, 0x0a, 0xa6, + 0x2c, 0x2e, 0xbf, 0x3b, 0x14, 0x58, 0xb1, 0xb9, 0xaf, 0xc3, 0xa4, 0x1b, 0x35, 0x23, 0xb7, 0xe6, + 0xd3, 0x73, 0x46, 0x3b, 0xa9, 0x94, 0x56, 0x84, 0x76, 0x5a, 0x41, 0x71, 0x0a, 0x5b, 0xbb, 0xc8, + 0x86, 0x06, 0x66, 0x93, 0xfb, 0xba, 0x36, 0x51, 0x09, 0xa0, 0xcd, 0xbe, 0x2e, 0x62, 0x56, 0x7c, + 0x42, 0x02, 0xe0, 0x1f, 0x1c, 0x61, 0x09, 0xa3, 0x22, 0x67, 0x73, 0xdb, 0x69, 0x2f, 0x74, 0xe2, + 0xed, 0xaa, 0x1b, 0x35, 0x83, 0x3d, 0x12, 0xee, 0x33, 0x6d, 0xc1, 0x68, 0x22, 0x72, 0x2a, 0xc0, + 0xd2, 0x8d, 0x85, 0x3a, 0xc5, 0xc4, 0xdd, 0x75, 0x4c, 0x36, 0x18, 0x4e, 0x82, 0x0d, 0x5e, 0x80, + 0x29, 0xd9, 0x4c, 0x83, 0x44, 0xec, 0x52, 0x1c, 0x63, 0x1d, 0x53, 0xb6, 0xc5, 0xa2, 0x58, 0x75, + 0x2b, 0x8d, 0x8f, 0x5e, 0x86, 0x09, 0xd7, 0x77, 0x63, 0xd7, 0x89, 0x83, 0x90, 0xb1, 0x14, 0x5c, + 0x31, 0xc0, 0x4c, 0xf7, 0x6a, 0x3a, 0x00, 0x9b, 0x78, 0xf6, 0x7f, 0x2d, 0xc1, 0x0c, 0x9b, 0xb6, + 0x6f, 0xad, 0xb0, 0x8f, 0xcc, 0x0a, 0xbb, 0xd3, 0xbd, 0xc2, 0x4e, 0x82, 0xbf, 0xff, 0x30, 0x97, + 0xd9, 0xbb, 0x50, 0x56, 0xc6, 0xcf, 0xd2, 0xfb, 0xc1, 0xca, 0xf1, 0x7e, 0xe8, 0xcf, 0x7d, 0xc8, + 0x77, 0xeb, 0x62, 0xe6, 0xbb, 0xf5, 0xdf, 0xb3, 0x20, 0xb1, 0x01, 0x45, 0x37, 0xa0, 0xdc, 0x0e, + 0x98, 0x9d, 0x45, 0x28, 0x8d, 0x97, 0x1e, 0xcb, 0xbc, 0xa8, 0xf8, 0xa5, 0xc8, 0xc7, 0xaf, 0x2e, + 0x6b, 0xe0, 0xa4, 0x32, 0x5a, 0x84, 0x91, 0x76, 0x48, 0x1a, 0x31, 0xf3, 0xf9, 0xed, 0x4b, 0x87, + 0xaf, 0x11, 0x8e, 0x8f, 0x65, 0x45, 0xfb, 0xe7, 0x2d, 0x00, 0xfe, 0x34, 0xec, 0xf8, 0x5b, 0xe4, + 0x14, 0xd4, 0xdd, 0x55, 0x28, 0x45, 0x6d, 0xd2, 0xec, 0x65, 0x01, 0x93, 0xf4, 0xa7, 0xd1, 0x26, + 0xcd, 0x64, 0xc0, 0xe9, 0x3f, 0xcc, 0x6a, 0xdb, 0xdf, 0x07, 0x30, 0x99, 0xa0, 0xd5, 0x62, 0xb2, + 0x8b, 0x9e, 0x33, 0x7c, 0x00, 0xcf, 0xa7, 0x7c, 0x00, 0xcb, 0x0c, 0x5b, 0xd3, 0xac, 0xbe, 0x0b, + 0xc5, 0x5d, 0xe7, 0xbe, 0x50, 0x9d, 0x3d, 0xd3, 0xbb, 0x1b, 0x94, 0xfe, 0xfc, 0xaa, 0x73, 0x9f, + 0x0b, 0x89, 0xcf, 0xc8, 0x05, 0xb2, 0xea, 0xdc, 0x3f, 0xe2, 0x76, 0x2e, 0xec, 0x90, 0xba, 0xe5, + 0x46, 0xf1, 0x97, 0xff, 0x4b, 0xf2, 0x9f, 0x2d, 0x3b, 0xda, 0x08, 0x6b, 0xcb, 0xf5, 0xc5, 0x43, + 0xe9, 0x40, 0x6d, 0xb9, 0x7e, 0xba, 0x2d, 0xd7, 0x1f, 0xa0, 0x2d, 0xd7, 0x47, 0xef, 0xc3, 0x88, + 0x30, 0x4a, 0x60, 0xc6, 0xed, 0xa6, 0x5a, 0x2e, 0xaf, 0x3d, 0x61, 0xd3, 0xc0, 0xdb, 0xbc, 0x2a, + 0x85, 0x60, 0x51, 0xda, 0xb7, 0x5d, 0xd9, 0x20, 0xfa, 0xdb, 0x16, 0x4c, 0x8a, 0xdf, 0x98, 0xbc, + 0xd7, 0x21, 0x51, 0x2c, 0x78, 0xcf, 0x4f, 0x0f, 0xde, 0x07, 0x51, 0x91, 0x77, 0xe5, 0xd3, 0xf2, + 0x98, 0x35, 0x81, 0x7d, 0x7b, 0x94, 0xea, 0x05, 0xfa, 0xa7, 0x16, 0x9c, 0xdd, 0x75, 0xee, 0xf3, + 0x16, 0x79, 0x19, 0x76, 0x62, 0x37, 0x10, 0xc6, 0xfa, 0xaf, 0x0d, 0x36, 0xfd, 0x5d, 0xd5, 0x79, + 0x27, 0xa5, 0x5d, 0xef, 0xd9, 0x2c, 0x94, 0xbe, 0x5d, 0xcd, 0xec, 0xd7, 0xdc, 0x26, 0x8c, 0xca, + 0xf5, 0x96, 0xa1, 0x6a, 0xa8, 0xea, 0x8c, 0xf5, 0xb1, 0x6d, 0x42, 0x74, 0x47, 0x3c, 0xda, 0x8e, + 0x58, 0x6b, 0x0f, 0xb5, 0x9d, 0x77, 0x61, 0x5c, 0x5f, 0x63, 0x0f, 0xb5, 0xad, 0xf7, 0xe0, 0x4c, + 0xc6, 0x5a, 0x7a, 0xa8, 0x4d, 0xde, 0x83, 0xf3, 0xb9, 0xeb, 0xe3, 0x61, 0x36, 0x6c, 0xff, 0x9c, + 0xa5, 0x9f, 0x83, 0xa7, 0xf0, 0xe6, 0xb0, 0x64, 0xbe, 0x39, 0x5c, 0xec, 0xbd, 0x73, 0x72, 0x1e, + 0x1e, 0xde, 0xd1, 0x3b, 0x4d, 0x4f, 0x75, 0xf4, 0x06, 0x0c, 0x7b, 0xb4, 0x44, 0x5a, 0xc3, 0xd8, + 0xfd, 0x77, 0x64, 0xc2, 0x4b, 0xb1, 0xf2, 0x08, 0x0b, 0x0a, 0xf6, 0x2f, 0x59, 0x50, 0x3a, 0x85, + 0x91, 0xc0, 0xe6, 0x48, 0x3c, 0x97, 0x4b, 0x5a, 0xc4, 0x70, 0x9b, 0xc7, 0xce, 0xbd, 0xe5, 0xfb, + 0x31, 0xf1, 0x23, 0x26, 0x2a, 0x66, 0x0e, 0xcc, 0x77, 0xc1, 0x99, 0x5b, 0x81, 0xd3, 0x5a, 0x74, + 0x3c, 0xc7, 0x6f, 0x92, 0xb0, 0xe6, 0x6f, 0xf5, 0x35, 0xcb, 0xd2, 0x8d, 0xa8, 0x0a, 0xfd, 0x8c, + 0xa8, 0xec, 0x6d, 0x40, 0x7a, 0x03, 0xc2, 0x70, 0x15, 0xc3, 0x88, 0xcb, 0x9b, 0x12, 0xc3, 0xff, + 0x54, 0x36, 0x77, 0xd7, 0xd5, 0x33, 0xcd, 0x24, 0x93, 0x17, 0x60, 0x49, 0xc8, 0x7e, 0x05, 0x32, + 0x9d, 0xd5, 0xfa, 0xab, 0x0d, 0xec, 0xcf, 0xc3, 0x0c, 0xab, 0x79, 0x4c, 0x91, 0xd6, 0x4e, 0x69, + 0x25, 0x33, 0x62, 0x64, 0xd9, 0x5f, 0xb1, 0x60, 0x6a, 0x2d, 0x15, 0xb0, 0xe3, 0x32, 0x7b, 0x00, + 0xcd, 0x50, 0x86, 0x37, 0x58, 0x29, 0x16, 0xd0, 0x13, 0xd7, 0x41, 0xfd, 0x99, 0x05, 0x89, 0xff, + 0xe8, 0x29, 0x30, 0x5e, 0x4b, 0x06, 0xe3, 0x95, 0xa9, 0x1b, 0x51, 0xdd, 0xc9, 0xe3, 0xbb, 0xd0, + 0x4d, 0x15, 0x2c, 0xa1, 0x87, 0x5a, 0x24, 0x21, 0xc3, 0x5d, 0xeb, 0x27, 0xcd, 0x88, 0x0a, 0x32, + 0x7c, 0x02, 0xb3, 0x9d, 0x52, 0xb8, 0x1f, 0x11, 0xdb, 0x29, 0xd5, 0x9f, 0x9c, 0x1d, 0x5a, 0xd7, + 0xba, 0xcc, 0x4e, 0xae, 0x6f, 0x67, 0xb6, 0xf0, 0x8e, 0xe7, 0xbe, 0x4f, 0x54, 0xc4, 0x97, 0x8a, + 0xb0, 0x6d, 0x17, 0xa5, 0x47, 0x07, 0x95, 0x09, 0xf5, 0x8f, 0x87, 0x05, 0x4b, 0xaa, 0xd8, 0x37, + 0x60, 0x2a, 0x35, 0x60, 0xe8, 0x25, 0x18, 0x6a, 0x6f, 0x3b, 0x11, 0x49, 0xd9, 0x8b, 0x0e, 0xd5, + 0x69, 0xe1, 0xd1, 0x41, 0x65, 0x52, 0x55, 0x60, 0x25, 0x98, 0x63, 0xdb, 0xff, 0xd3, 0x82, 0xd2, + 0x5a, 0xd0, 0x3a, 0x8d, 0xc5, 0xf4, 0xba, 0xb1, 0x98, 0x1e, 0xcf, 0x0b, 0xaa, 0x98, 0xbb, 0x8e, + 0x56, 0x52, 0xeb, 0xe8, 0x62, 0x2e, 0x85, 0xde, 0x4b, 0x68, 0x17, 0xc6, 0x58, 0xa8, 0x46, 0x61, + 0xbf, 0xfa, 0x82, 0x21, 0x03, 0x54, 0x52, 0x32, 0xc0, 0x94, 0x86, 0xaa, 0x49, 0x02, 0x4f, 0xc3, + 0x88, 0xb0, 0xa1, 0x4c, 0x5b, 0xfd, 0x0b, 0x5c, 0x2c, 0xe1, 0xf6, 0x8f, 0x15, 0xc1, 0x08, 0x0d, + 0x89, 0x7e, 0xc5, 0x82, 0xf9, 0x90, 0xbb, 0x51, 0xb6, 0xaa, 0x9d, 0xd0, 0xf5, 0xb7, 0x1a, 0xcd, + 0x6d, 0xd2, 0xea, 0x78, 0xae, 0xbf, 0x55, 0xdb, 0xf2, 0x03, 0x55, 0xbc, 0x7c, 0x9f, 0x34, 0x3b, + 0xec, 0x21, 0xa4, 0x4f, 0x1c, 0x4a, 0x65, 0xa3, 0x74, 0xed, 0xf0, 0xa0, 0x32, 0x8f, 0x8f, 0x45, + 0x1b, 0x1f, 0xb3, 0x2f, 0xe8, 0x1b, 0x16, 0x5c, 0xe5, 0x11, 0x13, 0x07, 0xef, 0x7f, 0x0f, 0x89, + 0xa9, 0x2e, 0x49, 0x25, 0x44, 0xd6, 0x49, 0xb8, 0xbb, 0xf8, 0xb2, 0x18, 0xd0, 0xab, 0xf5, 0xe3, + 0xb5, 0x85, 0x8f, 0xdb, 0x39, 0xfb, 0xdf, 0x14, 0x61, 0x42, 0x78, 0xf0, 0x8b, 0xd0, 0x30, 0x2f, + 0x19, 0x4b, 0xe2, 0x89, 0xd4, 0x92, 0x98, 0x31, 0x90, 0x4f, 0x26, 0x2a, 0x4c, 0x04, 0x33, 0x9e, + 0x13, 0xc5, 0x37, 0x88, 0x13, 0xc6, 0x1b, 0xc4, 0xe1, 0xb6, 0x3b, 0xc5, 0x63, 0xdb, 0x19, 0x29, + 0x15, 0xcd, 0xad, 0x34, 0x31, 0xdc, 0x4d, 0x1f, 0xed, 0x01, 0x62, 0x06, 0x48, 0xa1, 0xe3, 0x47, + 0xfc, 0x5b, 0x5c, 0xf1, 0x66, 0x70, 0xbc, 0x56, 0xe7, 0x44, 0xab, 0xe8, 0x56, 0x17, 0x35, 0x9c, + 0xd1, 0x82, 0x66, 0x58, 0x36, 0x34, 0xa8, 0x61, 0xd9, 0x70, 0x1f, 0xd7, 0x1a, 0x1f, 0xa6, 0xbb, + 0x82, 0x30, 0xbc, 0x0d, 0x65, 0x65, 0x00, 0x28, 0x0e, 0x9d, 0xde, 0xb1, 0x4c, 0xd2, 0x14, 0xb8, + 0x1a, 0x25, 0x31, 0x3e, 0x4d, 0xc8, 0xd9, 0xff, 0xac, 0x60, 0x34, 0xc8, 0x27, 0x71, 0x0d, 0x46, + 0x9d, 0x28, 0x72, 0xb7, 0x7c, 0xd2, 0x12, 0x3b, 0xf6, 0xe3, 0x79, 0x3b, 0xd6, 0x68, 0x86, 0x19, + 0x61, 0x2e, 0x88, 0x9a, 0x58, 0xd1, 0x40, 0x37, 0xb8, 0x85, 0xd4, 0x9e, 0xe4, 0xf9, 0x07, 0xa3, + 0x06, 0xd2, 0x86, 0x6a, 0x8f, 0x60, 0x51, 0x1f, 0x7d, 0x81, 0x9b, 0xb0, 0xdd, 0xf4, 0x83, 0x7b, + 0xfe, 0xf5, 0x20, 0x90, 0x6e, 0x77, 0x83, 0x11, 0x9c, 0x91, 0x86, 0x6b, 0xaa, 0x3a, 0x36, 0xa9, + 0x0d, 0x16, 0xa8, 0xe8, 0xbb, 0xe1, 0x0c, 0x25, 0x6d, 0x3a, 0xcf, 0x44, 0x88, 0xc0, 0x94, 0x08, + 0x0f, 0x21, 0xcb, 0xc4, 0xd8, 0x65, 0xb2, 0xf3, 0x66, 0xed, 0x44, 0xe9, 0x77, 0xd3, 0x24, 0x81, + 0xd3, 0x34, 0xed, 0x9f, 0xb4, 0x80, 0x99, 0xfd, 0x9f, 0x02, 0xcb, 0xf0, 0x59, 0x93, 0x65, 0x98, + 0xcd, 0x1b, 0xe4, 0x1c, 0x6e, 0xe1, 0x45, 0xbe, 0xb2, 0xea, 0x61, 0x70, 0x7f, 0x5f, 0x98, 0x0f, + 0xf4, 0xe7, 0x64, 0xed, 0xff, 0x6b, 0xf1, 0x43, 0x4c, 0x79, 0xe2, 0xa3, 0xef, 0x81, 0xd1, 0xa6, + 0xd3, 0x76, 0x9a, 0x3c, 0x8e, 0x71, 0xae, 0x56, 0xc7, 0xa8, 0x34, 0xbf, 0x24, 0x6a, 0x70, 0x2d, + 0x85, 0x0c, 0x33, 0x32, 0x2a, 0x8b, 0xfb, 0x6a, 0x26, 0x54, 0x93, 0x73, 0x3b, 0x30, 0x61, 0x10, + 0x7b, 0xa8, 0x22, 0xed, 0xf7, 0xf0, 0x2b, 0x56, 0x85, 0xc5, 0xd9, 0x85, 0x19, 0x5f, 0xfb, 0x4f, + 0x2f, 0x14, 0x29, 0xa6, 0x7c, 0xbc, 0xdf, 0x25, 0xca, 0x6e, 0x1f, 0xcd, 0xad, 0x21, 0x45, 0x06, + 0x77, 0x53, 0xb6, 0x7f, 0xdc, 0x82, 0x47, 0x75, 0x44, 0x2d, 0x48, 0x42, 0x3f, 0x3d, 0x71, 0x15, + 0x46, 0x83, 0x36, 0x09, 0x9d, 0x38, 0x08, 0xc5, 0xad, 0x71, 0x45, 0x0e, 0xfa, 0x6d, 0x51, 0x7e, + 0x24, 0x02, 0x4a, 0x4a, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0x54, 0x8e, 0x61, 0x83, 0x11, 0x89, 0x00, + 0x16, 0xec, 0x0c, 0x60, 0x4f, 0xa6, 0x11, 0x16, 0x10, 0xfb, 0x0f, 0x2c, 0xbe, 0xb0, 0xf4, 0xae, + 0xa3, 0xf7, 0x60, 0x7a, 0xd7, 0x89, 0x9b, 0xdb, 0xcb, 0xf7, 0xdb, 0x21, 0x57, 0x8f, 0xcb, 0x71, + 0x7a, 0xa6, 0xdf, 0x38, 0x69, 0x1f, 0x99, 0x58, 0xe5, 0xad, 0xa6, 0x88, 0xe1, 0x2e, 0xf2, 0x68, + 0x03, 0xc6, 0x58, 0x19, 0x33, 0xff, 0x8e, 0x7a, 0xb1, 0x06, 0x79, 0xad, 0xa9, 0x57, 0xe7, 0xd5, + 0x84, 0x0e, 0xd6, 0x89, 0xda, 0x5f, 0x2e, 0xf2, 0xdd, 0xce, 0xb8, 0xed, 0xa7, 0x61, 0xa4, 0x1d, + 0xb4, 0x96, 0x6a, 0x55, 0x2c, 0x66, 0x41, 0x5d, 0x23, 0x75, 0x5e, 0x8c, 0x25, 0x1c, 0xbd, 0x0a, + 0x40, 0xee, 0xc7, 0x24, 0xf4, 0x1d, 0x4f, 0x59, 0xc9, 0x28, 0xbb, 0xd0, 0x6a, 0xb0, 0x16, 0xc4, + 0x77, 0x22, 0xf2, 0x5d, 0xcb, 0x0a, 0x05, 0x6b, 0xe8, 0xe8, 0x1a, 0x40, 0x3b, 0x0c, 0xf6, 0xdc, + 0x16, 0xf3, 0x27, 0x2c, 0x9a, 0x36, 0x24, 0x75, 0x05, 0xc1, 0x1a, 0x16, 0x7a, 0x15, 0x26, 0x3a, + 0x7e, 0xc4, 0x39, 0x14, 0x67, 0x43, 0x84, 0x63, 0x1c, 0x4d, 0xac, 0x1b, 0xee, 0xe8, 0x40, 0x6c, + 0xe2, 0xa2, 0x05, 0x18, 0x8e, 0x1d, 0x66, 0x13, 0x31, 0x94, 0x6f, 0xcc, 0xb9, 0x4e, 0x31, 0xf4, + 0x28, 0xba, 0xb4, 0x02, 0x16, 0x15, 0xd1, 0xdb, 0xd2, 0x39, 0x83, 0x9f, 0xf5, 0xc2, 0x8a, 0x7a, + 0xb0, 0x7b, 0x41, 0x73, 0xcd, 0x10, 0xd6, 0xd9, 0x06, 0x2d, 0xfb, 0x1b, 0x65, 0x80, 0x84, 0x1d, + 0x47, 0xef, 0x77, 0x9d, 0x47, 0xcf, 0xf6, 0x66, 0xe0, 0x4f, 0xee, 0x30, 0x42, 0xdf, 0x6f, 0xc1, + 0x98, 0xe3, 0x79, 0x41, 0xd3, 0x89, 0xd9, 0x28, 0x17, 0x7a, 0x9f, 0x87, 0xa2, 0xfd, 0x85, 0xa4, + 0x06, 0xef, 0xc2, 0x0b, 0x72, 0xe1, 0x69, 0x90, 0xbe, 0xbd, 0xd0, 0x1b, 0x46, 0x9f, 0x92, 0x52, + 0x1a, 0x5f, 0x1e, 0x73, 0x69, 0x29, 0xad, 0xcc, 0x8e, 0x7e, 0x4d, 0x40, 0x43, 0x77, 0x8c, 0x48, + 0x7b, 0xa5, 0xfc, 0xa0, 0x13, 0x06, 0x57, 0xda, 0x2f, 0xc8, 0x1e, 0xaa, 0xeb, 0xde, 0x64, 0x43, + 0xf9, 0x91, 0x59, 0x34, 0xf1, 0xa7, 0x8f, 0x27, 0xd9, 0xbb, 0x30, 0xd5, 0x32, 0xef, 0x76, 0xb1, + 0x9a, 0x9e, 0xca, 0xa3, 0x9b, 0x62, 0x05, 0x92, 0xdb, 0x3c, 0x05, 0xc0, 0x69, 0xc2, 0xa8, 0xce, + 0xfd, 0xfa, 0x6a, 0xfe, 0x66, 0x20, 0xac, 0xf1, 0xed, 0xdc, 0xb9, 0xdc, 0x8f, 0x62, 0xb2, 0x4b, + 0x31, 0x93, 0x4b, 0x7b, 0x4d, 0xd4, 0xc5, 0x8a, 0x0a, 0x7a, 0x03, 0x86, 0x99, 0x63, 0x70, 0x34, + 0x3b, 0x9a, 0xaf, 0x4c, 0x34, 0x63, 0x5a, 0x24, 0x9b, 0x8a, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0x6e, + 0xc8, 0xc0, 0x37, 0x51, 0xcd, 0xbf, 0x13, 0x11, 0x16, 0xf8, 0xa6, 0xbc, 0xf8, 0xf1, 0x24, 0xa6, + 0x0d, 0x2f, 0xcf, 0x8c, 0x97, 0x6f, 0xd4, 0xa4, 0xcc, 0x91, 0xf8, 0x2f, 0xc3, 0xf0, 0xcf, 0x42, + 0x7e, 0xf7, 0xcc, 0x50, 0xfd, 0xc9, 0x70, 0xde, 0x35, 0x49, 0xe0, 0x34, 0x4d, 0xca, 0x68, 0xf2, + 0x9d, 0x2b, 0xec, 0xf9, 0xfb, 0xed, 0x7f, 0x2e, 0x5f, 0xb3, 0x4b, 0x86, 0x97, 0x60, 0x51, 0xff, + 0x54, 0x6f, 0xfd, 0x39, 0x1f, 0xa6, 0xd3, 0x5b, 0xf4, 0xa1, 0x72, 0x19, 0xbf, 0x5f, 0x82, 0x49, + 0x73, 0x49, 0xa1, 0xab, 0x50, 0x16, 0x44, 0x54, 0x14, 0x56, 0xb5, 0x4b, 0x56, 0x25, 0x00, 0x27, + 0x38, 0x2c, 0xf8, 0x2e, 0xab, 0xae, 0xd9, 0x61, 0x26, 0xc1, 0x77, 0x15, 0x04, 0x6b, 0x58, 0x54, + 0x5e, 0xda, 0x08, 0x82, 0x58, 0x5d, 0x2a, 0x6a, 0xdd, 0x2d, 0xb2, 0x52, 0x2c, 0xa0, 0xf4, 0x32, + 0xd9, 0x21, 0xa1, 0x4f, 0x3c, 0x33, 0xb8, 0x9b, 0xba, 0x4c, 0x6e, 0xea, 0x40, 0x6c, 0xe2, 0xd2, + 0x5b, 0x32, 0x88, 0xd8, 0x42, 0x16, 0x52, 0x59, 0x62, 0xd7, 0xda, 0xe0, 0x2e, 0xf6, 0x12, 0x8e, + 0x3e, 0x0f, 0x8f, 0x2a, 0x8f, 0x78, 0xcc, 0x15, 0xd5, 0xb2, 0xc5, 0x61, 0x43, 0x89, 0xf2, 0xe8, + 0x52, 0x36, 0x1a, 0xce, 0xab, 0x8f, 0x5e, 0x87, 0x49, 0xc1, 0xb9, 0x4b, 0x8a, 0x23, 0xa6, 0xed, + 0xc4, 0x4d, 0x03, 0x8a, 0x53, 0xd8, 0x32, 0x3c, 0x1d, 0x63, 0x9e, 0x25, 0x85, 0xd1, 0xee, 0xf0, + 0x74, 0x3a, 0x1c, 0x77, 0xd5, 0x40, 0x0b, 0x30, 0xc5, 0x59, 0x2b, 0xd7, 0xdf, 0xe2, 0x73, 0x22, + 0xdc, 0x6d, 0xd4, 0x96, 0xba, 0x6d, 0x82, 0x71, 0x1a, 0x1f, 0xbd, 0x02, 0xe3, 0x4e, 0xd8, 0xdc, + 0x76, 0x63, 0xd2, 0x8c, 0x3b, 0x21, 0xf7, 0xc3, 0xd1, 0x8c, 0x4f, 0x16, 0x34, 0x18, 0x36, 0x30, + 0xed, 0xf7, 0xe1, 0x4c, 0x86, 0xa7, 0x1e, 0x5d, 0x38, 0x4e, 0xdb, 0x95, 0xdf, 0x94, 0xb2, 0x50, + 0x5d, 0xa8, 0xd7, 0xe4, 0xd7, 0x68, 0x58, 0x74, 0x75, 0x32, 0x8f, 0x3e, 0x2d, 0xeb, 0x86, 0x5a, + 0x9d, 0x2b, 0x12, 0x80, 0x13, 0x1c, 0xfb, 0x7f, 0x15, 0x60, 0x2a, 0x43, 0xf9, 0xce, 0x32, 0x3f, + 0xa4, 0x64, 0x8f, 0x24, 0xd1, 0x83, 0x19, 0xed, 0xb0, 0x70, 0x8c, 0x68, 0x87, 0xc5, 0x7e, 0xd1, + 0x0e, 0x4b, 0x1f, 0x24, 0xda, 0xa1, 0x39, 0x62, 0x43, 0x03, 0x8d, 0x58, 0x46, 0x84, 0xc4, 0xe1, + 0x63, 0x46, 0x48, 0x34, 0x06, 0x7d, 0x64, 0x80, 0x41, 0xff, 0x5a, 0x01, 0xa6, 0xd3, 0x46, 0x72, + 0xa7, 0xa0, 0x8e, 0x7d, 0xc3, 0x50, 0xc7, 0x66, 0xe7, 0x51, 0x49, 0x9b, 0xee, 0xe5, 0xa9, 0x66, + 0x71, 0x4a, 0x35, 0xfb, 0xc9, 0x81, 0xa8, 0xf5, 0x56, 0xd3, 0xfe, 0x83, 0x02, 0x9c, 0x4b, 0x57, + 0x59, 0xf2, 0x1c, 0x77, 0xf7, 0x14, 0xc6, 0xe6, 0xb6, 0x31, 0x36, 0xcf, 0x0d, 0xf2, 0x35, 0xac, + 0x6b, 0xb9, 0x03, 0xf4, 0x56, 0x6a, 0x80, 0xae, 0x0e, 0x4e, 0xb2, 0xf7, 0x28, 0x7d, 0xb3, 0x08, + 0x17, 0x33, 0xeb, 0x25, 0xda, 0xcc, 0x15, 0x43, 0x9b, 0x79, 0x2d, 0xa5, 0xcd, 0xb4, 0x7b, 0xd7, + 0x3e, 0x19, 0xf5, 0xa6, 0x70, 0xa1, 0x64, 0x11, 0xf1, 0x1e, 0x50, 0xb5, 0x69, 0xb8, 0x50, 0x2a, + 0x42, 0xd8, 0xa4, 0xfb, 0x17, 0x49, 0xa5, 0xf9, 0xef, 0x2c, 0x38, 0x9f, 0x39, 0x37, 0xa7, 0xa0, + 0xc2, 0x5a, 0x33, 0x55, 0x58, 0x4f, 0x0f, 0xbc, 0x5a, 0x73, 0x74, 0x5a, 0xbf, 0x51, 0xca, 0xf9, + 0x16, 0x26, 0xa0, 0xdf, 0x86, 0x31, 0xa7, 0xd9, 0x24, 0x51, 0xb4, 0x1a, 0xb4, 0x54, 0x84, 0xb8, + 0xe7, 0x98, 0x9c, 0x95, 0x14, 0x1f, 0x1d, 0x54, 0xe6, 0xd2, 0x24, 0x12, 0x30, 0xd6, 0x29, 0x98, + 0x41, 0x2d, 0x0b, 0x27, 0x1a, 0xd4, 0xf2, 0x1a, 0xc0, 0x9e, 0xe2, 0xd6, 0xd3, 0x42, 0xbe, 0xc6, + 0xc7, 0x6b, 0x58, 0xe8, 0x0b, 0x30, 0x1a, 0x89, 0x6b, 0x5c, 0x2c, 0xc5, 0x17, 0x06, 0x9c, 0x2b, + 0x67, 0x83, 0x78, 0xa6, 0xaf, 0xbe, 0xd2, 0x87, 0x28, 0x92, 0xe8, 0x3b, 0x60, 0x3a, 0xe2, 0xa1, + 0x60, 0x96, 0x3c, 0x27, 0x62, 0x7e, 0x10, 0x62, 0x15, 0x32, 0x07, 0xfc, 0x46, 0x0a, 0x86, 0xbb, + 0xb0, 0xd1, 0x8a, 0xfc, 0x28, 0x16, 0xb7, 0x86, 0x2f, 0xcc, 0xcb, 0xc9, 0x07, 0x89, 0xbc, 0x53, + 0x67, 0xd3, 0xc3, 0xcf, 0x06, 0x5e, 0xab, 0x89, 0xbe, 0x00, 0x40, 0x97, 0x8f, 0xd0, 0x25, 0x8c, + 0xe4, 0x1f, 0x9e, 0xf4, 0x54, 0x69, 0x65, 0x5a, 0x7e, 0x32, 0xe7, 0xc5, 0xaa, 0x22, 0x82, 0x35, + 0x82, 0xf6, 0xd7, 0x4a, 0xf0, 0x58, 0x8f, 0x33, 0x12, 0x2d, 0x98, 0x4f, 0xa0, 0xcf, 0xa4, 0x85, + 0xeb, 0xb9, 0xcc, 0xca, 0x86, 0xb4, 0x9d, 0x5a, 0x8a, 0x85, 0x0f, 0xbc, 0x14, 0x7f, 0xc0, 0xd2, + 0xd4, 0x1e, 0xdc, 0x98, 0xef, 0xb3, 0xc7, 0x3c, 0xfb, 0x4f, 0x50, 0x0f, 0xb2, 0x99, 0xa1, 0x4c, + 0xb8, 0x36, 0x70, 0x77, 0x06, 0xd6, 0x2e, 0x9c, 0xae, 0xf2, 0xf7, 0xcb, 0x16, 0x3c, 0x91, 0xd9, + 0x5f, 0xc3, 0x64, 0xe3, 0x2a, 0x94, 0x9b, 0xb4, 0x50, 0xf3, 0x55, 0x4b, 0x9c, 0x78, 0x25, 0x00, + 0x27, 0x38, 0x86, 0x65, 0x46, 0xa1, 0xaf, 0x65, 0xc6, 0xbf, 0xb6, 0xa0, 0x6b, 0x7f, 0x9c, 0xc2, + 0x41, 0x5d, 0x33, 0x0f, 0xea, 0x8f, 0x0f, 0x32, 0x97, 0x39, 0x67, 0xf4, 0x1f, 0x4d, 0xc1, 0x23, + 0x39, 0xbe, 0x1a, 0x7b, 0x30, 0xb3, 0xd5, 0x24, 0xa6, 0x17, 0xa0, 0xf8, 0x98, 0x4c, 0x87, 0xc9, + 0x9e, 0x2e, 0x83, 0x2c, 0x1f, 0xd1, 0x4c, 0x17, 0x0a, 0xee, 0x6e, 0x02, 0x7d, 0xd9, 0x82, 0xb3, + 0xce, 0xbd, 0xa8, 0x2b, 0xeb, 0xa4, 0x58, 0x33, 0x2f, 0x66, 0x2a, 0x41, 0xfa, 0x64, 0xa9, 0xe4, + 0x09, 0x9a, 0xb2, 0xb0, 0x70, 0x66, 0x5b, 0x08, 0x8b, 0x98, 0xa1, 0x94, 0x9d, 0xef, 0xe1, 0xa7, + 0x9a, 0xe5, 0x54, 0xc3, 0x8f, 0x6c, 0x09, 0xc1, 0x8a, 0x0e, 0xfa, 0x12, 0x94, 0xb7, 0xa4, 0xa7, + 0x5b, 0xc6, 0x95, 0x90, 0x0c, 0x64, 0x6f, 0xff, 0x3f, 0xfe, 0x40, 0xa9, 0x90, 0x70, 0x42, 0x14, + 0xbd, 0x0e, 0x45, 0x7f, 0x33, 0xea, 0x95, 0xe3, 0x28, 0x65, 0xd3, 0xc4, 0xbd, 0xc1, 0xd7, 0x56, + 0x1a, 0x98, 0x56, 0x44, 0x37, 0xa0, 0x18, 0x6e, 0xb4, 0x84, 0x06, 0x2f, 0xf3, 0x0c, 0xc7, 0x8b, + 0xd5, 0x9c, 0x5e, 0x31, 0x4a, 0x78, 0xb1, 0x8a, 0x29, 0x09, 0x54, 0x87, 0x21, 0xe6, 0xe0, 0x20, + 0xee, 0x83, 0x4c, 0xce, 0xb7, 0x87, 0xa3, 0x10, 0x77, 0x19, 0x67, 0x08, 0x98, 0x13, 0x42, 0xeb, + 0x30, 0xdc, 0x64, 0xf9, 0x70, 0x44, 0xc0, 0xea, 0x4f, 0x65, 0xea, 0xea, 0x7a, 0x24, 0x0a, 0x12, + 0xaa, 0x2b, 0x86, 0x81, 0x05, 0x2d, 0x46, 0x95, 0xb4, 0xb7, 0x37, 0x23, 0x26, 0xeb, 0xe7, 0x51, + 0xed, 0x91, 0xff, 0x4a, 0x50, 0x65, 0x18, 0x58, 0xd0, 0x42, 0x9f, 0x81, 0xc2, 0x66, 0x53, 0xf8, + 0x3f, 0x64, 0x2a, 0xed, 0x4c, 0x87, 0xfe, 0xc5, 0xe1, 0xc3, 0x83, 0x4a, 0x61, 0x65, 0x09, 0x17, + 0x36, 0x9b, 0x68, 0x0d, 0x46, 0x36, 0xb9, 0x0b, 0xb0, 0xd0, 0xcb, 0x3d, 0x95, 0xed, 0x9d, 0xdc, + 0xe5, 0x25, 0xcc, 0xed, 0xf6, 0x05, 0x00, 0x4b, 0x22, 0x2c, 0x04, 0xa7, 0x72, 0x65, 0x16, 0xb1, + 0xa8, 0xe7, 0x8f, 0xe7, 0x7e, 0xce, 0xef, 0xe7, 0xc4, 0x21, 0x1a, 0x6b, 0x14, 0xe9, 0xaa, 0x76, + 0x64, 0xe6, 0x43, 0x11, 0xab, 0x23, 0x73, 0x55, 0xf7, 0x49, 0x0a, 0xc9, 0x57, 0xb5, 0x42, 0xc2, + 0x09, 0x51, 0xb4, 0x03, 0x13, 0x7b, 0x51, 0x7b, 0x9b, 0xc8, 0x2d, 0xcd, 0x42, 0x77, 0xe4, 0x5c, + 0x61, 0x77, 0x05, 0xa2, 0x1b, 0xc6, 0x1d, 0xc7, 0xeb, 0x3a, 0x85, 0xd8, 0xab, 0xf6, 0x5d, 0x9d, + 0x18, 0x36, 0x69, 0xd3, 0xe1, 0x7f, 0xaf, 0x13, 0x6c, 0xec, 0xc7, 0x44, 0x04, 0xaf, 0xce, 0x1c, + 0xfe, 0x37, 0x39, 0x4a, 0xf7, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0, 0x5d, 0x31, 0x3c, 0xec, 0xf4, + 0x9c, 0xce, 0x0f, 0xa6, 0x94, 0x99, 0x7a, 0x54, 0x1b, 0x14, 0x76, 0x5a, 0x26, 0xa4, 0xd8, 0x29, + 0xd9, 0xde, 0x0e, 0xe2, 0xc0, 0x4f, 0x9d, 0xd0, 0x33, 0xf9, 0xa7, 0x64, 0x3d, 0x03, 0xbf, 0xfb, + 0x94, 0xcc, 0xc2, 0xc2, 0x99, 0x6d, 0xa1, 0x16, 0x4c, 0xb6, 0x83, 0x30, 0xbe, 0x17, 0x84, 0x72, + 0x7d, 0xa1, 0x1e, 0x7a, 0x05, 0x03, 0x53, 0xb4, 0xc8, 0x82, 0xa9, 0x9b, 0x10, 0x9c, 0xa2, 0x89, + 0x3e, 0x07, 0x23, 0x51, 0xd3, 0xf1, 0x48, 0xed, 0xf6, 0xec, 0x99, 0xfc, 0xeb, 0xa7, 0xc1, 0x51, + 0x72, 0x56, 0x17, 0x9b, 0x1c, 0x81, 0x82, 0x25, 0x39, 0xb4, 0x02, 0x43, 0x2c, 0x23, 0x02, 0x8b, + 0xbb, 0x9d, 0x13, 0x13, 0xaa, 0xcb, 0xc2, 0x94, 0x9f, 0x4d, 0xac, 0x18, 0xf3, 0xea, 0x74, 0x0f, + 0x08, 0xf6, 0x3a, 0x88, 0x66, 0xcf, 0xe5, 0xef, 0x01, 0xc1, 0x95, 0xdf, 0x6e, 0xf4, 0xda, 0x03, + 0x0a, 0x09, 0x27, 0x44, 0xe9, 0xc9, 0x4c, 0x4f, 0xd3, 0x47, 0x7a, 0x18, 0xb4, 0xe4, 0x9e, 0xa5, + 0xec, 0x64, 0xa6, 0x27, 0x29, 0x25, 0x61, 0xff, 0xee, 0x48, 0x37, 0xcf, 0xc2, 0x04, 0xb2, 0xbf, + 0x6a, 0x75, 0xbd, 0xd5, 0x7d, 0x7a, 0x50, 0xfd, 0xd0, 0x09, 0x72, 0xab, 0x5f, 0xb6, 0xe0, 0x91, + 0x76, 0xe6, 0x87, 0x08, 0x06, 0x60, 0x30, 0x35, 0x13, 0xff, 0x74, 0x15, 0x1b, 0x3f, 0x1b, 0x8e, + 0x73, 0x5a, 0x4a, 0x4b, 0x04, 0xc5, 0x0f, 0x2c, 0x11, 0xac, 0xc2, 0x28, 0x63, 0x32, 0xfb, 0xe4, + 0x87, 0x4b, 0x0b, 0x46, 0x8c, 0x95, 0x58, 0x12, 0x15, 0xb1, 0x22, 0x81, 0x7e, 0xd0, 0x82, 0x0b, + 0xe9, 0xae, 0x63, 0xc2, 0xc0, 0x22, 0x92, 0x3c, 0x97, 0x05, 0x57, 0xc4, 0xf7, 0x5f, 0xa8, 0xf7, + 0x42, 0x3e, 0xea, 0x87, 0x80, 0x7b, 0x37, 0x86, 0xaa, 0x19, 0xc2, 0xe8, 0xb0, 0xa9, 0x80, 0x1f, + 0x40, 0x20, 0x7d, 0x11, 0xc6, 0x77, 0x83, 0x8e, 0x1f, 0x0b, 0xfb, 0x17, 0xe1, 0xb1, 0xc8, 0x1e, + 0x9c, 0x57, 0xb5, 0x72, 0x6c, 0x60, 0xa5, 0xc4, 0xd8, 0xd1, 0x07, 0x16, 0x63, 0xdf, 0x49, 0x65, + 0x01, 0x2f, 0xe7, 0x47, 0x2c, 0x14, 0x12, 0xff, 0x31, 0x72, 0x81, 0x9f, 0xae, 0x6c, 0xf4, 0xd3, + 0x56, 0x06, 0x53, 0xcf, 0xa5, 0xe5, 0xd7, 0x4c, 0x69, 0xf9, 0x72, 0x5a, 0x5a, 0xee, 0x52, 0xbe, + 0x1a, 0x82, 0xf2, 0xe0, 0x61, 0xaf, 0x07, 0x8d, 0x23, 0x67, 0x7b, 0x70, 0xa9, 0xdf, 0xb5, 0xc4, + 0x0c, 0xa1, 0x5a, 0xea, 0xa9, 0x2d, 0x31, 0x84, 0x6a, 0xd5, 0xaa, 0x98, 0x41, 0x06, 0x0d, 0x34, + 0x62, 0xff, 0x0f, 0x0b, 0x8a, 0xf5, 0xa0, 0x75, 0x0a, 0xca, 0xe4, 0xcf, 0x1a, 0xca, 0xe4, 0xc7, + 0x72, 0xb2, 0xb3, 0xe7, 0xaa, 0x8e, 0x97, 0x53, 0xaa, 0xe3, 0x0b, 0x79, 0x04, 0x7a, 0x2b, 0x8a, + 0x7f, 0xa2, 0x08, 0x7a, 0x2e, 0x79, 0xf4, 0x1b, 0x0f, 0x62, 0x85, 0x5c, 0xec, 0x95, 0x5e, 0x5e, + 0x50, 0x66, 0xf6, 0x53, 0xd2, 0x09, 0xef, 0xcf, 0x99, 0x31, 0xf2, 0x5b, 0xc4, 0xdd, 0xda, 0x8e, + 0x49, 0x2b, 0xfd, 0x39, 0xa7, 0x67, 0x8c, 0xfc, 0xdf, 0x2c, 0x98, 0x4a, 0xb5, 0x8e, 0x3c, 0x98, + 0xf0, 0x74, 0x4d, 0xa0, 0x58, 0xa7, 0x0f, 0xa4, 0x44, 0x14, 0xc6, 0x9c, 0x5a, 0x11, 0x36, 0x89, + 0xa3, 0x79, 0x00, 0xf5, 0x52, 0x27, 0x35, 0x60, 0x8c, 0xeb, 0x57, 0x4f, 0x79, 0x11, 0xd6, 0x30, + 0xd0, 0x4b, 0x30, 0x16, 0x07, 0xed, 0xc0, 0x0b, 0xb6, 0xf6, 0x6f, 0x12, 0x19, 0xda, 0x46, 0x99, + 0x68, 0xad, 0x27, 0x20, 0xac, 0xe3, 0xd9, 0x3f, 0x55, 0xe4, 0x1f, 0xea, 0xc7, 0xee, 0xb7, 0xd6, + 0xe4, 0x47, 0x7b, 0x4d, 0x7e, 0xd3, 0x82, 0x69, 0xda, 0x3a, 0x33, 0x17, 0x91, 0x97, 0xad, 0x4a, + 0xbf, 0x63, 0xf5, 0x48, 0xbf, 0x73, 0x99, 0x9e, 0x5d, 0xad, 0xa0, 0x13, 0x0b, 0x0d, 0x9a, 0x76, + 0x38, 0xd1, 0x52, 0x2c, 0xa0, 0x02, 0x8f, 0x84, 0xa1, 0xf0, 0x81, 0xd2, 0xf1, 0x48, 0x18, 0x62, + 0x01, 0x95, 0xd9, 0x79, 0x4a, 0x39, 0xd9, 0x79, 0x58, 0xa0, 0x3e, 0x61, 0x58, 0x20, 0xd8, 0x1e, + 0x2d, 0x50, 0x9f, 0xb4, 0x38, 0x48, 0x70, 0xec, 0x9f, 0x2b, 0xc2, 0x78, 0x3d, 0x68, 0x25, 0x6f, + 0x65, 0x2f, 0x1a, 0x6f, 0x65, 0x97, 0x52, 0x6f, 0x65, 0xd3, 0x3a, 0xee, 0xb7, 0x5e, 0xc6, 0x3e, + 0xac, 0x97, 0xb1, 0x7f, 0x65, 0xb1, 0x59, 0xab, 0xae, 0x35, 0x44, 0x76, 0xe0, 0xe7, 0x61, 0x8c, + 0x1d, 0x48, 0xcc, 0xe9, 0x4e, 0x3e, 0x20, 0xb1, 0xc0, 0xfb, 0x6b, 0x49, 0x31, 0xd6, 0x71, 0xd0, + 0x15, 0x18, 0x8d, 0x88, 0x13, 0x36, 0xb7, 0xd5, 0x19, 0x27, 0x9e, 0x57, 0x78, 0x19, 0x56, 0x50, + 0xf4, 0x66, 0x12, 0x23, 0xae, 0x98, 0x9f, 0xe7, 0x56, 0xef, 0x0f, 0xdf, 0x22, 0xf9, 0x81, 0xe1, + 0xec, 0xb7, 0x00, 0x75, 0xe3, 0x0f, 0x10, 0x1c, 0xa9, 0x62, 0x06, 0x47, 0x2a, 0x77, 0x05, 0x46, + 0xfa, 0x53, 0x0b, 0x26, 0xeb, 0x41, 0x8b, 0x6e, 0xdd, 0xbf, 0x48, 0xfb, 0x54, 0x0f, 0x90, 0x39, + 0xdc, 0x23, 0x40, 0xe6, 0x3f, 0xb4, 0x60, 0xa4, 0x1e, 0xb4, 0x4e, 0x41, 0xef, 0xfe, 0x9a, 0xa9, + 0x77, 0x7f, 0x34, 0x67, 0x49, 0xe4, 0xa8, 0xda, 0x7f, 0xa1, 0x08, 0x13, 0xb4, 0x9f, 0xc1, 0x96, + 0x9c, 0x25, 0x63, 0x44, 0xac, 0x01, 0x46, 0x84, 0xb2, 0xb9, 0x81, 0xe7, 0x05, 0xf7, 0xd2, 0x33, + 0xb6, 0xc2, 0x4a, 0xb1, 0x80, 0xa2, 0x67, 0x61, 0xb4, 0x1d, 0x92, 0x3d, 0x37, 0x10, 0xfc, 0xa3, + 0xf6, 0x8a, 0x51, 0x17, 0xe5, 0x58, 0x61, 0x50, 0xb9, 0x2b, 0x72, 0xfd, 0x26, 0x91, 0x49, 0xb6, + 0x4b, 0x2c, 0x0f, 0x17, 0x8f, 0x7c, 0xad, 0x95, 0x63, 0x03, 0x0b, 0xbd, 0x05, 0x65, 0xf6, 0x9f, + 0x9d, 0x28, 0xc7, 0xcf, 0x1b, 0x24, 0xd2, 0x4d, 0x08, 0x02, 0x38, 0xa1, 0x85, 0xae, 0x01, 0xc4, + 0x32, 0x3a, 0x72, 0x24, 0x62, 0xdc, 0x28, 0x5e, 0x5b, 0xc5, 0x4d, 0x8e, 0xb0, 0x86, 0x85, 0x9e, + 0x81, 0x72, 0xec, 0xb8, 0xde, 0x2d, 0xd7, 0x27, 0x11, 0x53, 0x39, 0x17, 0x65, 0x36, 0x09, 0x51, + 0x88, 0x13, 0x38, 0xe5, 0x75, 0x98, 0x03, 0x38, 0xcf, 0x3a, 0x36, 0xca, 0xb0, 0x19, 0xaf, 0x73, + 0x4b, 0x95, 0x62, 0x0d, 0xc3, 0x7e, 0x05, 0xce, 0xd5, 0x83, 0x56, 0x3d, 0x08, 0xe3, 0x95, 0x20, + 0xbc, 0xe7, 0x84, 0x2d, 0x39, 0x7f, 0x15, 0x99, 0xd8, 0x80, 0x9e, 0x3d, 0x43, 0x7c, 0x67, 0x1a, + 0x29, 0x0b, 0x5e, 0x60, 0xdc, 0xce, 0x31, 0x9d, 0x3a, 0x9a, 0xec, 0xde, 0x55, 0x09, 0x06, 0xaf, + 0x3b, 0x31, 0x41, 0xb7, 0x59, 0x52, 0xb2, 0xe4, 0x0a, 0x12, 0xd5, 0x9f, 0xd6, 0x92, 0x92, 0x25, + 0xc0, 0xcc, 0x3b, 0xcb, 0xac, 0x6f, 0xff, 0x6a, 0x91, 0x9d, 0x46, 0xa9, 0x7c, 0x7b, 0xe8, 0x8b, + 0x30, 0x19, 0x91, 0x5b, 0xae, 0xdf, 0xb9, 0x2f, 0x85, 0xf0, 0x1e, 0x6e, 0x39, 0x8d, 0x65, 0x1d, + 0x93, 0xab, 0xf2, 0xcc, 0x32, 0x9c, 0xa2, 0x46, 0xe7, 0x29, 0xec, 0xf8, 0x0b, 0xd1, 0x9d, 0x88, + 0x84, 0x22, 0xdf, 0x1b, 0x9b, 0x27, 0x2c, 0x0b, 0x71, 0x02, 0xa7, 0xeb, 0x92, 0xfd, 0x59, 0x0b, + 0x7c, 0x1c, 0x04, 0xb1, 0x5c, 0xc9, 0x2c, 0x63, 0x90, 0x56, 0x8e, 0x0d, 0x2c, 0xb4, 0x02, 0x28, + 0xea, 0xb4, 0xdb, 0x1e, 0x7b, 0xd8, 0x77, 0xbc, 0xeb, 0x61, 0xd0, 0x69, 0xf3, 0x57, 0xcf, 0x22, + 0x0f, 0x4c, 0xd8, 0xe8, 0x82, 0xe2, 0x8c, 0x1a, 0xf4, 0xf4, 0xd9, 0x8c, 0xd8, 0x6f, 0xb6, 0xba, + 0x8b, 0x42, 0xbd, 0xde, 0x60, 0x45, 0x58, 0xc2, 0xe8, 0x62, 0x62, 0xcd, 0x73, 0xcc, 0xe1, 0x64, + 0x31, 0x61, 0x55, 0x8a, 0x35, 0x0c, 0xb4, 0x0c, 0x23, 0xd1, 0x7e, 0xd4, 0x8c, 0x45, 0x44, 0xa6, + 0x9c, 0xcc, 0x9d, 0x0d, 0x86, 0xa2, 0x65, 0x93, 0xe0, 0x55, 0xb0, 0xac, 0x6b, 0x7f, 0x0f, 0xbb, + 0x0c, 0x59, 0x76, 0xb0, 0xb8, 0x13, 0x12, 0xb4, 0x0b, 0x13, 0x6d, 0x36, 0xe5, 0x22, 0x76, 0xb5, + 0x98, 0xb7, 0x17, 0x07, 0x94, 0x6a, 0xef, 0xd1, 0x83, 0x46, 0x69, 0x9d, 0x98, 0xb8, 0x50, 0xd7, + 0xc9, 0x61, 0x93, 0xba, 0xfd, 0x35, 0xc4, 0xce, 0xdc, 0x06, 0x17, 0x55, 0x47, 0x84, 0x69, 0xb1, + 0xe0, 0xcb, 0xe7, 0xf2, 0x75, 0x26, 0xc9, 0x17, 0x09, 0xf3, 0x64, 0x2c, 0xeb, 0xa2, 0x37, 0xd9, + 0x2b, 0x35, 0x3f, 0xe8, 0xfa, 0x25, 0x69, 0xe6, 0x58, 0xc6, 0x83, 0xb4, 0xa8, 0x88, 0x35, 0x22, + 0xe8, 0x16, 0x4c, 0x88, 0x64, 0x52, 0x42, 0x29, 0x56, 0x34, 0x94, 0x1e, 0x13, 0x58, 0x07, 0x1e, + 0xa5, 0x0b, 0xb0, 0x59, 0x19, 0x6d, 0xc1, 0x05, 0x2d, 0xb3, 0xe2, 0xf5, 0xd0, 0x61, 0x2f, 0x97, + 0x2e, 0xdb, 0x44, 0xda, 0xb9, 0xf9, 0xc4, 0xe1, 0x41, 0xe5, 0xc2, 0x7a, 0x2f, 0x44, 0xdc, 0x9b, + 0x0e, 0xba, 0x0d, 0xe7, 0xb8, 0x07, 0x5f, 0x95, 0x38, 0x2d, 0xcf, 0xf5, 0xd5, 0xc1, 0xcc, 0xd7, + 0xe1, 0xf9, 0xc3, 0x83, 0xca, 0xb9, 0x85, 0x2c, 0x04, 0x9c, 0x5d, 0x0f, 0xbd, 0x06, 0xe5, 0x96, + 0x1f, 0x89, 0x31, 0x18, 0x36, 0x92, 0x86, 0x96, 0xab, 0x6b, 0x0d, 0xf5, 0xfd, 0xc9, 0x1f, 0x9c, + 0x54, 0x40, 0x5b, 0x5c, 0x31, 0xa6, 0xe4, 0xd0, 0x91, 0xfc, 0x04, 0xf1, 0x62, 0x49, 0x18, 0x3e, + 0x3c, 0x5c, 0x23, 0xac, 0x6c, 0x60, 0x0d, 0xf7, 0x1e, 0x83, 0x30, 0x7a, 0x03, 0x10, 0x65, 0xd4, + 0xdc, 0x26, 0x59, 0x68, 0xb2, 0x10, 0xe2, 0x4c, 0x8f, 0x38, 0x6a, 0xf8, 0x4c, 0xa0, 0x46, 0x17, + 0x06, 0xce, 0xa8, 0x85, 0x6e, 0xd0, 0x83, 0x4c, 0x2f, 0x15, 0xb6, 0xbc, 0x92, 0xb9, 0x9f, 0xad, + 0x92, 0x76, 0x48, 0x9a, 0x4e, 0x4c, 0x5a, 0x26, 0x45, 0x9c, 0xaa, 0x47, 0xef, 0x52, 0x95, 0x4d, + 0x08, 0xcc, 0xb0, 0x19, 0xdd, 0x19, 0x85, 0xa8, 0x5c, 0xbc, 0x1d, 0x44, 0xf1, 0x1a, 0x89, 0xef, + 0x05, 0xe1, 0x8e, 0x88, 0x52, 0x96, 0x04, 0xcc, 0x4c, 0x40, 0x58, 0xc7, 0xa3, 0x7c, 0x30, 0x7b, + 0x26, 0xae, 0x55, 0xd9, 0x0b, 0xdd, 0x68, 0xb2, 0x4f, 0x6e, 0xf0, 0x62, 0x2c, 0xe1, 0x12, 0xb5, + 0x56, 0x5f, 0x62, 0xaf, 0x6d, 0x29, 0xd4, 0x5a, 0x7d, 0x09, 0x4b, 0x38, 0x22, 0xdd, 0x09, 0x59, + 0x27, 0xf3, 0xb5, 0x9a, 0xdd, 0xd7, 0xc1, 0x80, 0x39, 0x59, 0x7d, 0x98, 0x56, 0xa9, 0x60, 0x79, + 0xf8, 0xb6, 0x68, 0x76, 0x8a, 0x2d, 0x92, 0xc1, 0x63, 0xbf, 0x29, 0x3d, 0x71, 0x2d, 0x45, 0x09, + 0x77, 0xd1, 0x36, 0x02, 0x99, 0x4c, 0xf7, 0xcd, 0x06, 0x75, 0x15, 0xca, 0x51, 0x67, 0xa3, 0x15, + 0xec, 0x3a, 0xae, 0xcf, 0x1e, 0xc7, 0x34, 0x26, 0xab, 0x21, 0x01, 0x38, 0xc1, 0x41, 0x2b, 0x30, + 0xea, 0x48, 0x25, 0x30, 0xca, 0x8f, 0x5a, 0xa0, 0x54, 0xbf, 0xdc, 0x91, 0x57, 0xaa, 0x7d, 0x55, + 0x5d, 0xf4, 0x2a, 0x4c, 0x08, 0xbf, 0x2d, 0x1e, 0xcb, 0x81, 0x3d, 0x5e, 0x69, 0x86, 0xf9, 0x0d, + 0x1d, 0x88, 0x4d, 0x5c, 0xf4, 0x05, 0x98, 0xa4, 0x54, 0x92, 0x83, 0x6d, 0xf6, 0xec, 0x20, 0x27, + 0xa2, 0x96, 0xe5, 0x43, 0xaf, 0x8c, 0x53, 0xc4, 0x50, 0x0b, 0x1e, 0x77, 0x3a, 0x71, 0xc0, 0x14, + 0xe9, 0xe6, 0xfa, 0x5f, 0x0f, 0x76, 0x88, 0xcf, 0xde, 0xb0, 0x46, 0x17, 0x2f, 0x1d, 0x1e, 0x54, + 0x1e, 0x5f, 0xe8, 0x81, 0x87, 0x7b, 0x52, 0x41, 0x77, 0x60, 0x2c, 0x0e, 0x3c, 0x66, 0x22, 0x4f, + 0x59, 0x89, 0x47, 0xf2, 0x03, 0x01, 0xad, 0x2b, 0x34, 0x5d, 0x89, 0xa4, 0xaa, 0x62, 0x9d, 0x0e, + 0x5a, 0xe7, 0x7b, 0x8c, 0x85, 0x48, 0x25, 0xd1, 0xec, 0xa3, 0xf9, 0x03, 0xa3, 0x22, 0xa9, 0x9a, + 0x5b, 0x50, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x1d, 0x66, 0xda, 0xa1, 0x1b, 0xb0, 0x85, 0xad, 0x1e, + 0x31, 0x66, 0xcd, 0xc4, 0x0e, 0xf5, 0x34, 0x02, 0xee, 0xae, 0x43, 0x85, 0x4c, 0x59, 0x38, 0x7b, + 0x9e, 0x67, 0x09, 0xe3, 0x8c, 0x37, 0x2f, 0xc3, 0x0a, 0x8a, 0x56, 0xd9, 0xb9, 0xcc, 0xc5, 0xc1, + 0xd9, 0xb9, 0xfc, 0x68, 0x0f, 0xba, 0xd8, 0xc8, 0xf9, 0x25, 0xf5, 0x17, 0x27, 0x14, 0xe8, 0xbd, + 0x11, 0x6d, 0x3b, 0x21, 0xa9, 0x87, 0x41, 0x93, 0x44, 0x5a, 0x54, 0xe6, 0xc7, 0x78, 0x24, 0x47, + 0x7a, 0x6f, 0x34, 0xb2, 0x10, 0x70, 0x76, 0x3d, 0xd4, 0xd2, 0x92, 0x63, 0x53, 0x36, 0x34, 0x9a, + 0x7d, 0xbc, 0x87, 0xc1, 0x51, 0x8a, 0x67, 0x4d, 0xd6, 0xa2, 0x51, 0x1c, 0xe1, 0x14, 0x4d, 0xf4, + 0x1d, 0x30, 0x2d, 0x02, 0x1f, 0x25, 0xe3, 0x7e, 0x21, 0xb1, 0x64, 0xc4, 0x29, 0x18, 0xee, 0xc2, + 0xe6, 0xb1, 0xa8, 0x9d, 0x0d, 0x8f, 0x88, 0x45, 0x78, 0xcb, 0xf5, 0x77, 0xa2, 0xd9, 0x8b, 0xec, + 0xab, 0x45, 0x2c, 0xea, 0x34, 0x14, 0x67, 0xd4, 0x98, 0xfb, 0x76, 0x98, 0xe9, 0xba, 0xb9, 0x8e, + 0x15, 0xbf, 0xfd, 0x4f, 0x86, 0xa0, 0xac, 0x94, 0xf2, 0xe8, 0xaa, 0xf9, 0xd6, 0x72, 0x3e, 0xfd, + 0xd6, 0x32, 0x4a, 0x65, 0x03, 0xfd, 0x79, 0x65, 0xdd, 0x30, 0xd4, 0x2b, 0xe4, 0x67, 0x4b, 0xd3, + 0xb9, 0xfb, 0xbe, 0x4e, 0x7f, 0x9a, 0x8e, 0xa5, 0x38, 0xf0, 0xa3, 0x4d, 0xa9, 0xa7, 0xda, 0x66, + 0xc0, 0x64, 0xc5, 0xe8, 0x49, 0x2a, 0x20, 0xb5, 0x6a, 0xf5, 0x74, 0xf6, 0xce, 0x3a, 0x2d, 0xc4, + 0x1c, 0xc6, 0x04, 0x49, 0xca, 0x66, 0x31, 0x41, 0x72, 0xe4, 0x01, 0x05, 0x49, 0x49, 0x00, 0x27, + 0xb4, 0x90, 0x07, 0x33, 0x4d, 0x33, 0xf1, 0xaa, 0x72, 0xf4, 0x7b, 0xb2, 0x6f, 0x0a, 0xd4, 0x8e, + 0x96, 0xe5, 0x6e, 0x29, 0x4d, 0x05, 0x77, 0x13, 0x46, 0xaf, 0xc2, 0xe8, 0x7b, 0x41, 0xc4, 0x16, + 0xa5, 0xe0, 0x35, 0xa4, 0x43, 0xd4, 0xe8, 0x9b, 0xb7, 0x1b, 0xac, 0xfc, 0xe8, 0xa0, 0x32, 0x56, + 0x0f, 0x5a, 0xf2, 0x2f, 0x56, 0x15, 0xd0, 0x7d, 0x38, 0x67, 0x9c, 0xd0, 0xaa, 0xbb, 0x30, 0x78, + 0x77, 0x2f, 0x88, 0xe6, 0xce, 0xd5, 0xb2, 0x28, 0xe1, 0xec, 0x06, 0xe8, 0xb1, 0xe7, 0x07, 0x22, + 0x69, 0xb1, 0xe4, 0x67, 0x18, 0xdb, 0x52, 0xd6, 0xdd, 0xe1, 0x53, 0x08, 0xb8, 0xbb, 0x8e, 0xfd, + 0xcb, 0xfc, 0x0d, 0x43, 0x68, 0x3a, 0x49, 0xd4, 0xf1, 0x4e, 0x23, 0x27, 0xd6, 0xb2, 0xa1, 0x84, + 0x7d, 0xe0, 0x77, 0xb2, 0x5f, 0xb7, 0xd8, 0x3b, 0xd9, 0x3a, 0xd9, 0x6d, 0x7b, 0x54, 0xde, 0x7e, + 0xf8, 0x1d, 0x7f, 0x13, 0x46, 0x63, 0xd1, 0x5a, 0xaf, 0x34, 0x5e, 0x5a, 0xa7, 0xd8, 0x5b, 0xa1, + 0xe2, 0x74, 0x64, 0x29, 0x56, 0x64, 0xec, 0x7f, 0xc1, 0x67, 0x40, 0x42, 0x4e, 0x41, 0x21, 0x56, + 0x35, 0x15, 0x62, 0x95, 0x3e, 0x5f, 0x90, 0xa3, 0x18, 0xfb, 0xe7, 0x66, 0xbf, 0x99, 0x50, 0xf9, + 0x51, 0x7f, 0xa0, 0xb5, 0x7f, 0xd8, 0x82, 0xb3, 0x59, 0x16, 0x4d, 0x94, 0x3b, 0xe5, 0x22, 0xad, + 0x7a, 0xb0, 0x56, 0x23, 0x78, 0x57, 0x94, 0x63, 0x85, 0x31, 0x70, 0x86, 0x8c, 0xe3, 0x45, 0x8c, + 0xbb, 0x0d, 0x13, 0xf5, 0x90, 0x68, 0x77, 0xc0, 0xeb, 0xdc, 0xb3, 0x8e, 0xf7, 0xe7, 0xd9, 0x63, + 0x7b, 0xd5, 0xd9, 0x3f, 0x53, 0x80, 0xb3, 0xfc, 0xc5, 0x69, 0x61, 0x2f, 0x70, 0x5b, 0xf5, 0xa0, + 0x25, 0xb2, 0x9b, 0xbc, 0x0d, 0xe3, 0x6d, 0x4d, 0x0f, 0xd1, 0x2b, 0x66, 0x95, 0xae, 0xaf, 0x48, + 0xe4, 0x41, 0xbd, 0x14, 0x1b, 0xb4, 0x50, 0x0b, 0xc6, 0xc9, 0x9e, 0xdb, 0x54, 0xcf, 0x16, 0x85, + 0x63, 0xdf, 0x0d, 0xaa, 0x95, 0x65, 0x8d, 0x0e, 0x36, 0xa8, 0x3e, 0x84, 0x84, 0x77, 0xf6, 0x8f, + 0x58, 0xf0, 0x68, 0x4e, 0x84, 0x2b, 0xda, 0xdc, 0x3d, 0xf6, 0xb6, 0x27, 0x72, 0x67, 0xa9, 0xe6, + 0xf8, 0x8b, 0x1f, 0x16, 0x50, 0xf4, 0x39, 0x00, 0xfe, 0x62, 0x47, 0xc5, 0xa3, 0x7e, 0xa1, 0x80, + 0x8c, 0x28, 0x26, 0x5a, 0xf4, 0x09, 0x59, 0x1f, 0x6b, 0xb4, 0xec, 0x9f, 0x2c, 0xc2, 0x10, 0x7b, + 0x21, 0x42, 0x2b, 0x30, 0xb2, 0xcd, 0x63, 0x3e, 0x0f, 0x12, 0x5e, 0x3a, 0x91, 0x33, 0x79, 0x01, + 0x96, 0x95, 0xd1, 0x2a, 0x9c, 0xe1, 0x31, 0xb3, 0xbd, 0x2a, 0xf1, 0x9c, 0x7d, 0xa9, 0xae, 0xe0, + 0xf9, 0xa6, 0x54, 0x24, 0x8d, 0x5a, 0x37, 0x0a, 0xce, 0xaa, 0x87, 0x5e, 0x87, 0x49, 0xca, 0xdf, + 0x05, 0x9d, 0x58, 0x52, 0xe2, 0xd1, 0xb2, 0x15, 0x43, 0xb9, 0x6e, 0x40, 0x71, 0x0a, 0x9b, 0x0a, + 0x5e, 0xed, 0x2e, 0xc5, 0xcc, 0x50, 0x22, 0x78, 0x99, 0xca, 0x18, 0x13, 0x97, 0x99, 0x32, 0x75, + 0x98, 0xe1, 0xd6, 0xfa, 0x76, 0x48, 0xa2, 0xed, 0xc0, 0x6b, 0x89, 0x74, 0xe5, 0x89, 0x29, 0x53, + 0x0a, 0x8e, 0xbb, 0x6a, 0x50, 0x2a, 0x9b, 0x8e, 0xeb, 0x75, 0x42, 0x92, 0x50, 0x19, 0x36, 0xa9, + 0xac, 0xa4, 0xe0, 0xb8, 0xab, 0x06, 0x5d, 0x47, 0xe7, 0x44, 0xfe, 0x70, 0xe9, 0xdf, 0xaf, 0xec, + 0xd3, 0x46, 0xa4, 0xa7, 0x53, 0x8f, 0x00, 0x37, 0xc2, 0x82, 0x47, 0x65, 0x20, 0xd7, 0xf4, 0x89, + 0xc2, 0xc7, 0x49, 0x52, 0x79, 0x90, 0x2c, 0xd6, 0xbf, 0x6b, 0xc1, 0x99, 0x0c, 0x3b, 0x58, 0x7e, + 0x54, 0x6d, 0xb9, 0x51, 0xac, 0x72, 0xea, 0x68, 0x47, 0x15, 0x2f, 0xc7, 0x0a, 0x83, 0xee, 0x07, + 0x7e, 0x18, 0xa6, 0x0f, 0x40, 0x61, 0x67, 0x26, 0xa0, 0xc7, 0xcc, 0x4e, 0x73, 0x09, 0x4a, 0x9d, + 0x88, 0xc8, 0xd0, 0x54, 0xea, 0xfc, 0x66, 0x1a, 0x66, 0x06, 0xa1, 0xac, 0xe9, 0x96, 0x52, 0xee, + 0x6a, 0xac, 0x29, 0xd7, 0xd8, 0x72, 0x98, 0xfd, 0xd5, 0x22, 0x9c, 0xcf, 0xb5, 0x78, 0xa7, 0x5d, + 0xda, 0x0d, 0x7c, 0x37, 0x0e, 0xd4, 0xeb, 0x23, 0x0f, 0x8e, 0x42, 0xda, 0xdb, 0xab, 0xa2, 0x1c, + 0x2b, 0x0c, 0x74, 0x59, 0x66, 0xb2, 0x4f, 0x67, 0x0d, 0x5a, 0xac, 0x1a, 0xc9, 0xec, 0x07, 0xcd, + 0xc8, 0xf6, 0x24, 0x94, 0xda, 0x41, 0xe0, 0xa5, 0x0f, 0x23, 0xda, 0xdd, 0x20, 0xf0, 0x30, 0x03, + 0xa2, 0x4f, 0x88, 0x71, 0x48, 0x3d, 0xb7, 0x61, 0xa7, 0x15, 0x44, 0xda, 0x60, 0x3c, 0x0d, 0x23, + 0x3b, 0x64, 0x3f, 0x74, 0xfd, 0xad, 0xf4, 0x33, 0xec, 0x4d, 0x5e, 0x8c, 0x25, 0xdc, 0xcc, 0x21, + 0x31, 0x72, 0xd2, 0xa9, 0xd4, 0x46, 0xfb, 0x5e, 0x6d, 0x3f, 0x50, 0x84, 0x29, 0xbc, 0x58, 0xfd, + 0xd6, 0x44, 0xdc, 0xe9, 0x9e, 0x88, 0x93, 0x4e, 0xa5, 0xd6, 0x7f, 0x36, 0x7e, 0xc1, 0x82, 0x29, + 0x16, 0x67, 0x59, 0x84, 0xe4, 0x70, 0x03, 0xff, 0x14, 0x58, 0xb7, 0x27, 0x61, 0x28, 0xa4, 0x8d, + 0xa6, 0xd3, 0x05, 0xb1, 0x9e, 0x60, 0x0e, 0x43, 0x8f, 0x43, 0x89, 0x75, 0x81, 0x4e, 0xde, 0x38, + 0xcf, 0xb4, 0x50, 0x75, 0x62, 0x07, 0xb3, 0x52, 0xe6, 0x67, 0x8e, 0x49, 0xdb, 0x73, 0x79, 0xa7, + 0x93, 0xa7, 0x8d, 0x8f, 0x86, 0x9f, 0x79, 0x66, 0xd7, 0x3e, 0x98, 0x9f, 0x79, 0x36, 0xc9, 0xde, + 0x62, 0xd1, 0x1f, 0x16, 0xe0, 0x62, 0x66, 0xbd, 0x81, 0xfd, 0xcc, 0x7b, 0xd7, 0x3e, 0x19, 0x6b, + 0x9a, 0x6c, 0x23, 0x97, 0xe2, 0x29, 0x1a, 0xb9, 0x94, 0x06, 0xe5, 0x1c, 0x87, 0x06, 0x70, 0xff, + 0xce, 0x1c, 0xb2, 0x8f, 0x88, 0xfb, 0x77, 0x66, 0xdf, 0x72, 0xc4, 0xba, 0x3f, 0x2b, 0xe4, 0x7c, + 0x0b, 0x13, 0xf0, 0xae, 0xd0, 0x73, 0x86, 0x01, 0x23, 0xc1, 0x09, 0x8f, 0xf3, 0x33, 0x86, 0x97, + 0x61, 0x05, 0x45, 0xae, 0xe6, 0x48, 0x5d, 0xc8, 0xcf, 0x9e, 0x99, 0xdb, 0xd4, 0xbc, 0xf9, 0x12, + 0xa5, 0x86, 0x20, 0xc3, 0xa9, 0x7a, 0x55, 0x13, 0xca, 0x8b, 0x83, 0x0b, 0xe5, 0xe3, 0xd9, 0x02, + 0x39, 0x5a, 0x80, 0xa9, 0x5d, 0xd7, 0xa7, 0xc7, 0xe6, 0xbe, 0xc9, 0x8a, 0xaa, 0xb8, 0x22, 0xab, + 0x26, 0x18, 0xa7, 0xf1, 0xe7, 0x5e, 0x85, 0x89, 0x07, 0x57, 0x47, 0x7e, 0xb3, 0x08, 0x8f, 0xf5, + 0xd8, 0xf6, 0xfc, 0xac, 0x37, 0xe6, 0x40, 0x3b, 0xeb, 0xbb, 0xe6, 0xa1, 0x0e, 0x67, 0x37, 0x3b, + 0x9e, 0xb7, 0xcf, 0xec, 0x48, 0x49, 0x4b, 0x62, 0x08, 0x5e, 0xf1, 0x71, 0x99, 0xdb, 0x62, 0x25, + 0x03, 0x07, 0x67, 0xd6, 0x44, 0x6f, 0x00, 0x0a, 0x44, 0xea, 0xde, 0xeb, 0xc4, 0x17, 0xfa, 0x7d, + 0x36, 0xf0, 0xc5, 0x64, 0x33, 0xde, 0xee, 0xc2, 0xc0, 0x19, 0xb5, 0x28, 0xd3, 0x4f, 0x6f, 0xa5, + 0x7d, 0xd5, 0xad, 0x14, 0xd3, 0x8f, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x75, 0x98, 0x71, 0xf6, 0x1c, + 0x97, 0xc7, 0xdb, 0x93, 0x04, 0x38, 0xd7, 0xaf, 0x94, 0x60, 0x0b, 0x69, 0x04, 0xdc, 0x5d, 0x27, + 0xe5, 0x6a, 0x3d, 0x9c, 0xef, 0x6a, 0xdd, 0xfb, 0x5c, 0xec, 0xa7, 0xd3, 0xb5, 0xff, 0xb3, 0x45, + 0xaf, 0xaf, 0x8c, 0xf4, 0xfb, 0x74, 0x1c, 0x94, 0x6e, 0x52, 0xf3, 0x7a, 0x3e, 0xa7, 0x59, 0x8a, + 0x24, 0x40, 0x6c, 0xe2, 0xf2, 0x05, 0x11, 0x25, 0xce, 0x36, 0x06, 0xeb, 0x2e, 0xa2, 0x26, 0x28, + 0x0c, 0xf4, 0x79, 0x18, 0x69, 0xb9, 0x7b, 0x6e, 0x14, 0x84, 0x62, 0xb3, 0x1c, 0xd3, 0x65, 0x21, + 0x39, 0x07, 0xab, 0x9c, 0x0c, 0x96, 0xf4, 0xec, 0x1f, 0x28, 0xc0, 0x84, 0x6c, 0xf1, 0xcd, 0x4e, + 0x10, 0x3b, 0xa7, 0x70, 0x2d, 0x5f, 0x37, 0xae, 0xe5, 0x4f, 0xf4, 0x0a, 0x1d, 0xc1, 0xba, 0x94, + 0x7b, 0x1d, 0xdf, 0x4e, 0x5d, 0xc7, 0x4f, 0xf5, 0x27, 0xd5, 0xfb, 0x1a, 0xfe, 0x97, 0x16, 0xcc, + 0x18, 0xf8, 0xa7, 0x70, 0x1b, 0xac, 0x98, 0xb7, 0xc1, 0x13, 0x7d, 0xbf, 0x21, 0xe7, 0x16, 0xf8, + 0xbe, 0x62, 0xaa, 0xef, 0xec, 0xf4, 0x7f, 0x0f, 0x4a, 0xdb, 0x4e, 0xd8, 0xea, 0x15, 0xa2, 0xb6, + 0xab, 0xd2, 0xfc, 0x0d, 0x27, 0x6c, 0xf1, 0x33, 0xfc, 0x59, 0x95, 0xff, 0xd2, 0x09, 0x5b, 0x7d, + 0x7d, 0xcb, 0x58, 0x53, 0xe8, 0x15, 0x18, 0x8e, 0x9a, 0x41, 0x5b, 0x59, 0x7e, 0x5e, 0xe2, 0xb9, + 0x31, 0x69, 0xc9, 0xd1, 0x41, 0x05, 0x99, 0xcd, 0xd1, 0x62, 0x2c, 0xf0, 0xd1, 0xdb, 0x30, 0xc1, + 0x7e, 0x29, 0x0b, 0x88, 0x62, 0x7e, 0x62, 0x84, 0x86, 0x8e, 0xc8, 0x0d, 0x69, 0x8c, 0x22, 0x6c, + 0x92, 0x9a, 0xdb, 0x82, 0xb2, 0xfa, 0xac, 0x87, 0xea, 0x13, 0xf4, 0x1f, 0x8a, 0x70, 0x26, 0x63, + 0xcd, 0xa1, 0xc8, 0x98, 0x89, 0xe7, 0x07, 0x5c, 0xaa, 0x1f, 0x70, 0x2e, 0x22, 0x26, 0x0d, 0xb5, + 0xc4, 0xda, 0x1a, 0xb8, 0xd1, 0x3b, 0x11, 0x49, 0x37, 0x4a, 0x8b, 0xfa, 0x37, 0x4a, 0x1b, 0x3b, + 0xb5, 0xa1, 0xa6, 0x0d, 0xa9, 0x9e, 0x3e, 0xd4, 0x39, 0xfd, 0xe3, 0x22, 0x9c, 0xcd, 0x8a, 0x66, + 0x83, 0xbe, 0x3b, 0x95, 0x24, 0xe7, 0xc5, 0x41, 0xe3, 0xe0, 0xf0, 0xcc, 0x39, 0x22, 0xc7, 0xf5, + 0xbc, 0x99, 0x36, 0xa7, 0xef, 0x30, 0x8b, 0x36, 0x99, 0x23, 0x69, 0xc8, 0x93, 0x1b, 0xc9, 0xe3, + 0xe3, 0xd3, 0x03, 0x77, 0x40, 0x64, 0x45, 0x8a, 0x52, 0x8e, 0xa4, 0xb2, 0xb8, 0xbf, 0x23, 0xa9, + 0x6c, 0x79, 0xce, 0x85, 0x31, 0xed, 0x6b, 0x1e, 0xea, 0x8c, 0xef, 0xd0, 0xdb, 0x4a, 0xeb, 0xf7, + 0x43, 0x9d, 0xf5, 0x1f, 0xb1, 0x20, 0x65, 0x66, 0xa9, 0xd4, 0x5d, 0x56, 0xae, 0xba, 0xeb, 0x12, + 0x94, 0xc2, 0xc0, 0x23, 0xe9, 0x9c, 0x34, 0x38, 0xf0, 0x08, 0x66, 0x10, 0x8a, 0x11, 0x27, 0xca, + 0x8e, 0x71, 0x5d, 0x90, 0x13, 0x22, 0xda, 0x93, 0x30, 0xe4, 0x91, 0x3d, 0xe2, 0xa5, 0x03, 0xbe, + 0xdf, 0xa2, 0x85, 0x98, 0xc3, 0xec, 0x5f, 0x28, 0xc1, 0x85, 0x9e, 0xae, 0xd8, 0x54, 0x1c, 0xda, + 0x72, 0x62, 0x72, 0xcf, 0xd9, 0x4f, 0x47, 0x66, 0xbe, 0xce, 0x8b, 0xb1, 0x84, 0x33, 0xcb, 0x73, + 0x1e, 0x89, 0x31, 0xa5, 0x1c, 0x14, 0x01, 0x18, 0x05, 0xf4, 0x21, 0xe4, 0xf7, 0xbf, 0x06, 0x10, + 0x45, 0x1e, 0xb7, 0x1b, 0x68, 0x09, 0x93, 0xf6, 0x24, 0x62, 0x67, 0xe3, 0x96, 0x80, 0x60, 0x0d, + 0x0b, 0x55, 0x61, 0xba, 0x1d, 0x06, 0x31, 0xd7, 0xb5, 0x56, 0xb9, 0xc1, 0xd1, 0x90, 0xe9, 0x05, + 0x5b, 0x4f, 0xc1, 0x71, 0x57, 0x0d, 0xf4, 0x12, 0x8c, 0x09, 0xcf, 0xd8, 0x7a, 0x10, 0x78, 0x42, + 0x0d, 0xa4, 0xcc, 0x57, 0x1a, 0x09, 0x08, 0xeb, 0x78, 0x5a, 0x35, 0xa6, 0xc0, 0x1d, 0xc9, 0xac, + 0xc6, 0x95, 0xb8, 0x1a, 0x5e, 0x2a, 0xb2, 0xd5, 0xe8, 0x40, 0x91, 0xad, 0x12, 0xc5, 0x58, 0x79, + 0xe0, 0x37, 0x2b, 0xe8, 0xab, 0x4a, 0xfa, 0xd9, 0x12, 0x9c, 0x11, 0x0b, 0xe7, 0x61, 0x2f, 0x97, + 0x3b, 0xdd, 0xcb, 0xe5, 0x24, 0x54, 0x67, 0xdf, 0x5a, 0x33, 0xa7, 0xbd, 0x66, 0x7e, 0xd0, 0x02, + 0x93, 0xbd, 0x42, 0x7f, 0x29, 0x37, 0xb4, 0xfd, 0x4b, 0xb9, 0xec, 0x5a, 0x4b, 0x5e, 0x20, 0x1f, + 0x30, 0xc8, 0xbd, 0xfd, 0x9f, 0x2c, 0x78, 0xa2, 0x2f, 0x45, 0xb4, 0x0c, 0x65, 0xc6, 0x03, 0x6a, + 0xd2, 0xd9, 0x53, 0xca, 0x20, 0x51, 0x02, 0x72, 0x58, 0xd2, 0xa4, 0x26, 0x5a, 0xee, 0xca, 0x21, + 0xf0, 0x74, 0x46, 0x0e, 0x81, 0x73, 0xc6, 0xf0, 0x3c, 0x60, 0x12, 0x81, 0x5f, 0x2e, 0xc2, 0x30, + 0x5f, 0xf1, 0xa7, 0x20, 0x86, 0xad, 0x08, 0xbd, 0x6d, 0x8f, 0xd8, 0x56, 0xbc, 0x2f, 0xf3, 0x55, + 0x27, 0x76, 0x38, 0x9b, 0xa0, 0x6e, 0xab, 0x44, 0xc3, 0x8b, 0xe6, 0x8d, 0xfb, 0x6c, 0x2e, 0xa5, + 0x98, 0x04, 0x4e, 0x43, 0xbb, 0xdd, 0xbe, 0x08, 0x10, 0xb1, 0xfc, 0xfb, 0x94, 0x86, 0x88, 0x92, + 0xf6, 0xc9, 0x1e, 0xad, 0x37, 0x14, 0x32, 0xef, 0x43, 0xb2, 0xd3, 0x15, 0x00, 0x6b, 0x14, 0xe7, + 0x5e, 0x86, 0xb2, 0x42, 0xee, 0xa7, 0xc5, 0x19, 0xd7, 0x99, 0x8b, 0xcf, 0xc2, 0x54, 0xaa, 0xad, + 0x63, 0x29, 0x81, 0x7e, 0xd1, 0x82, 0x29, 0xde, 0xe5, 0x65, 0x7f, 0x4f, 0x9c, 0xa9, 0xef, 0xc3, + 0x59, 0x2f, 0xe3, 0x6c, 0x13, 0x33, 0x3a, 0xf8, 0x59, 0xa8, 0x94, 0x3e, 0x59, 0x50, 0x9c, 0xd9, + 0x06, 0xba, 0x42, 0xd7, 0x2d, 0x3d, 0xbb, 0x1c, 0x4f, 0x78, 0x31, 0x8d, 0xf3, 0x35, 0xcb, 0xcb, + 0xb0, 0x82, 0xda, 0xbf, 0x6d, 0xc1, 0x0c, 0xef, 0xf9, 0x4d, 0xb2, 0xaf, 0x76, 0xf8, 0x87, 0xd9, + 0x77, 0x91, 0xd6, 0xa3, 0x90, 0x93, 0xd6, 0x43, 0xff, 0xb4, 0x62, 0xcf, 0x4f, 0xfb, 0x19, 0x0b, + 0xc4, 0x0a, 0x3c, 0x05, 0x51, 0xfe, 0xdb, 0x4d, 0x51, 0x7e, 0x2e, 0x7f, 0x51, 0xe7, 0xc8, 0xf0, + 0x7f, 0x6a, 0xc1, 0x34, 0x47, 0x48, 0xde, 0x92, 0x3f, 0xd4, 0x79, 0x18, 0x24, 0x3f, 0x9f, 0x4a, + 0xda, 0x9d, 0xfd, 0x51, 0xc6, 0x64, 0x95, 0x7a, 0x4e, 0x56, 0x4b, 0x6e, 0xa0, 0x63, 0xe4, 0xa6, + 0x3c, 0x76, 0x78, 0x6c, 0xfb, 0x0f, 0x2c, 0x40, 0xbc, 0x19, 0x83, 0xfd, 0xa1, 0x4c, 0x05, 0x2b, + 0xd5, 0xae, 0x8b, 0xe4, 0xa8, 0x51, 0x10, 0xac, 0x61, 0x9d, 0xc8, 0xf0, 0xa4, 0x0c, 0x02, 0x8a, + 0xfd, 0x0d, 0x02, 0x8e, 0x31, 0xa2, 0xff, 0xa7, 0x04, 0x69, 0xb7, 0x02, 0x74, 0x17, 0xc6, 0x9b, + 0x4e, 0xdb, 0xd9, 0x70, 0x3d, 0x37, 0x76, 0x49, 0xd4, 0xcb, 0x92, 0x68, 0x49, 0xc3, 0x13, 0x4f, + 0xbd, 0x5a, 0x09, 0x36, 0xe8, 0xa0, 0x79, 0x80, 0x76, 0xe8, 0xee, 0xb9, 0x1e, 0xd9, 0x62, 0x1a, + 0x07, 0xe6, 0x37, 0xc9, 0xcd, 0x63, 0x64, 0x29, 0xd6, 0x30, 0x32, 0x5c, 0xe0, 0x8a, 0x0f, 0xcf, + 0x05, 0xae, 0x74, 0x4c, 0x17, 0xb8, 0xa1, 0x81, 0x5c, 0xe0, 0x30, 0x3c, 0x22, 0x59, 0x24, 0xfa, + 0x7f, 0xc5, 0xf5, 0x88, 0xe0, 0x8b, 0xb9, 0x37, 0xe5, 0xdc, 0xe1, 0x41, 0xe5, 0x11, 0x9c, 0x89, + 0x81, 0x73, 0x6a, 0xa2, 0xcf, 0xc1, 0xac, 0xe3, 0x79, 0xc1, 0x3d, 0x35, 0x6a, 0xcb, 0x51, 0xd3, + 0xf1, 0xb8, 0xc6, 0x7e, 0x84, 0x51, 0x7d, 0xfc, 0xf0, 0xa0, 0x32, 0xbb, 0x90, 0x83, 0x83, 0x73, + 0x6b, 0xa7, 0x3c, 0xe8, 0x46, 0xfb, 0x7a, 0xd0, 0xbd, 0x06, 0xe5, 0x76, 0x18, 0x34, 0x57, 0x35, + 0xaf, 0x9e, 0x8b, 0x2c, 0xf3, 0xbd, 0x2c, 0x3c, 0x3a, 0xa8, 0x4c, 0xa8, 0x3f, 0xec, 0x86, 0x4f, + 0x2a, 0xd8, 0x3b, 0x70, 0xa6, 0x41, 0x42, 0x97, 0xe5, 0xd4, 0x6c, 0x25, 0x1b, 0x7a, 0x1d, 0xca, + 0x61, 0xea, 0x08, 0x1b, 0x28, 0x40, 0x93, 0x16, 0x37, 0x58, 0x1e, 0x59, 0x09, 0x21, 0xfb, 0x4f, + 0x2c, 0x18, 0x11, 0x16, 0xe6, 0xa7, 0xc0, 0x39, 0x2d, 0x18, 0x0a, 0xec, 0x4a, 0xf6, 0x31, 0xcf, + 0x3a, 0x93, 0xab, 0xba, 0xae, 0xa5, 0x54, 0xd7, 0x4f, 0xf4, 0x22, 0xd2, 0x5b, 0x69, 0xfd, 0x77, + 0x8b, 0x30, 0x69, 0x3a, 0x85, 0x9c, 0xc2, 0x10, 0xac, 0xc1, 0x48, 0x24, 0x3c, 0x90, 0x0a, 0xf9, + 0x96, 0xd3, 0xe9, 0x49, 0x4c, 0xcc, 0xa2, 0x84, 0xcf, 0x91, 0x24, 0x92, 0xe9, 0xda, 0x54, 0x7c, + 0x88, 0xae, 0x4d, 0xfd, 0xfc, 0x72, 0x4a, 0x27, 0xe1, 0x97, 0x63, 0x7f, 0x9d, 0x5d, 0x35, 0x7a, + 0xf9, 0x29, 0x70, 0x21, 0xd7, 0xcd, 0x4b, 0xc9, 0xee, 0xb1, 0xb2, 0x44, 0xa7, 0x72, 0xb8, 0x91, + 0x9f, 0xb7, 0xe0, 0x42, 0xc6, 0x57, 0x69, 0xac, 0xc9, 0xb3, 0x30, 0xea, 0x74, 0x5a, 0xae, 0xda, + 0xcb, 0xda, 0x33, 0xd6, 0x82, 0x28, 0xc7, 0x0a, 0x03, 0x2d, 0xc1, 0x0c, 0xb9, 0xdf, 0x76, 0xf9, + 0x3b, 0xa2, 0x6e, 0xbb, 0x58, 0xe4, 0x41, 0x6b, 0x97, 0xd3, 0x40, 0xdc, 0x8d, 0xaf, 0xdc, 0xba, + 0x8b, 0xb9, 0x6e, 0xdd, 0xff, 0xc4, 0x82, 0x31, 0xe5, 0x6d, 0xf2, 0xd0, 0x47, 0xfb, 0x3b, 0xcc, + 0xd1, 0x7e, 0xac, 0xc7, 0x68, 0xe7, 0x0c, 0xf3, 0xdf, 0x2f, 0xa8, 0xfe, 0xd6, 0x83, 0x30, 0x1e, + 0x80, 0xe5, 0x79, 0x05, 0x46, 0xdb, 0x61, 0x10, 0x07, 0xcd, 0xc0, 0x13, 0x1c, 0xcf, 0xe3, 0x49, + 0xd4, 0x01, 0x5e, 0x7e, 0xa4, 0xfd, 0xc6, 0x0a, 0x9b, 0x8d, 0x5e, 0x10, 0xc6, 0x82, 0xcb, 0x48, + 0x46, 0x2f, 0x08, 0x63, 0xcc, 0x20, 0xa8, 0x05, 0x10, 0x3b, 0xe1, 0x16, 0x89, 0x69, 0x99, 0x08, + 0x60, 0x92, 0x7f, 0x78, 0x74, 0x62, 0xd7, 0x9b, 0x77, 0xfd, 0x38, 0x8a, 0xc3, 0xf9, 0x9a, 0x1f, + 0xdf, 0x0e, 0xb9, 0x00, 0xa5, 0x85, 0x11, 0x50, 0xb4, 0xb0, 0x46, 0x57, 0xfa, 0x7a, 0xb2, 0x36, + 0x86, 0xcc, 0x07, 0xf1, 0x35, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x32, 0xbb, 0x4a, 0xd8, 0x00, 0x1d, + 0xcf, 0xc3, 0xff, 0x1b, 0xa3, 0x6a, 0x68, 0xd9, 0x6b, 0x58, 0x55, 0x8f, 0x23, 0xd0, 0xfb, 0xe4, + 0xa6, 0x0d, 0xeb, 0x7e, 0x34, 0x49, 0xb0, 0x01, 0xf4, 0x9d, 0x5d, 0x76, 0x12, 0xcf, 0xf5, 0xb9, + 0x02, 0x8e, 0x61, 0x19, 0xc1, 0x02, 0x69, 0xb3, 0x30, 0xc3, 0xb5, 0xba, 0x58, 0xe4, 0x5a, 0x20, + 0x6d, 0x01, 0xc0, 0x09, 0x0e, 0xba, 0x2a, 0xc4, 0xef, 0x92, 0x91, 0x4e, 0x4f, 0x8a, 0xdf, 0xf2, + 0xf3, 0x35, 0xf9, 0xfb, 0x79, 0x18, 0x53, 0x69, 0xf5, 0xea, 0x3c, 0x3b, 0x99, 0x08, 0xe7, 0xb2, + 0x9c, 0x14, 0x63, 0x1d, 0x07, 0xad, 0xc3, 0x54, 0xc4, 0x75, 0x2f, 0x2a, 0x6a, 0x1f, 0xd7, 0x61, + 0x7d, 0x52, 0xda, 0x57, 0x34, 0x4c, 0xf0, 0x11, 0x2b, 0xe2, 0x47, 0x87, 0x74, 0xd8, 0x4c, 0x93, + 0x40, 0xaf, 0xc3, 0xa4, 0xa7, 0x27, 0xb0, 0xaf, 0x0b, 0x15, 0x97, 0x32, 0x3f, 0x36, 0xd2, 0xdb, + 0xd7, 0x71, 0x0a, 0x9b, 0x72, 0x4a, 0x7a, 0x89, 0x88, 0x34, 0xe9, 0xf8, 0x5b, 0x24, 0x12, 0x49, + 0xc1, 0x18, 0xa7, 0x74, 0x2b, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0xaf, 0xc0, 0xb8, 0xfc, 0x7c, 0xcd, + 0x1d, 0x39, 0x31, 0x72, 0xd7, 0x60, 0xd8, 0xc0, 0x44, 0xf7, 0xe0, 0x9c, 0xfc, 0xbf, 0x1e, 0x3a, + 0x9b, 0x9b, 0x6e, 0x53, 0x78, 0x83, 0x73, 0x4f, 0x9f, 0x05, 0xe9, 0x3a, 0xb4, 0x9c, 0x85, 0x74, + 0x74, 0x50, 0xb9, 0x24, 0x46, 0x2d, 0x13, 0xce, 0x26, 0x31, 0x9b, 0x3e, 0x5a, 0x85, 0x33, 0xdb, + 0xc4, 0xf1, 0xe2, 0xed, 0xa5, 0x6d, 0xd2, 0xdc, 0x91, 0x9b, 0x88, 0x39, 0x39, 0x6b, 0xa6, 0xe1, + 0x37, 0xba, 0x51, 0x70, 0x56, 0x3d, 0xf4, 0x0e, 0xcc, 0xb6, 0x3b, 0x1b, 0x9e, 0x1b, 0x6d, 0xaf, + 0x05, 0x31, 0x33, 0xe9, 0x50, 0x59, 0xe9, 0x84, 0x37, 0xb4, 0x72, 0xf0, 0xae, 0xe7, 0xe0, 0xe1, + 0x5c, 0x0a, 0xe8, 0x7d, 0x38, 0x97, 0x5a, 0x0c, 0xc2, 0x37, 0x73, 0x32, 0x3f, 0x6e, 0x6f, 0x23, + 0xab, 0x82, 0xf0, 0xb5, 0xcc, 0x02, 0xe1, 0xec, 0x26, 0x3e, 0x98, 0xa1, 0xcf, 0x7b, 0xb4, 0xb2, + 0xc6, 0x94, 0xa1, 0x2f, 0xc1, 0xb8, 0xbe, 0x8a, 0xc4, 0x05, 0x73, 0x39, 0x9b, 0x67, 0xd1, 0x56, + 0x1b, 0x67, 0xe9, 0xd4, 0x8a, 0xd2, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0xfe, 0x3e, 0x74, 0x0b, + 0x46, 0x9b, 0x9e, 0x4b, 0xfc, 0xb8, 0x56, 0xef, 0x15, 0x3c, 0x64, 0x49, 0xe0, 0x88, 0x01, 0x13, + 0x81, 0x4e, 0x79, 0x19, 0x56, 0x14, 0xec, 0x5f, 0x2b, 0x40, 0xa5, 0x4f, 0xd4, 0xdc, 0x94, 0x3e, + 0xda, 0x1a, 0x48, 0x1f, 0xbd, 0x20, 0x73, 0xec, 0xad, 0xa5, 0x84, 0xf4, 0x54, 0xfe, 0xbc, 0x44, + 0x54, 0x4f, 0xe3, 0x0f, 0x6c, 0x1f, 0xac, 0xab, 0xb4, 0x4b, 0x7d, 0x2d, 0xd7, 0x8d, 0xa7, 0xac, + 0xa1, 0xc1, 0x05, 0x91, 0xdc, 0x67, 0x09, 0xfb, 0xeb, 0x05, 0x38, 0xa7, 0x86, 0xf0, 0x2f, 0xee, + 0xc0, 0xdd, 0xe9, 0x1e, 0xb8, 0x13, 0x78, 0xd4, 0xb1, 0x6f, 0xc3, 0x30, 0x0f, 0xbe, 0x32, 0x00, + 0x03, 0xf4, 0xa4, 0x19, 0xa9, 0x4b, 0x5d, 0xd3, 0x46, 0xb4, 0xae, 0xbf, 0x66, 0xc1, 0xd4, 0xfa, + 0x52, 0xbd, 0x11, 0x34, 0x77, 0x48, 0xbc, 0xc0, 0x19, 0x56, 0x2c, 0xf8, 0x1f, 0xeb, 0x01, 0xf9, + 0x9a, 0x2c, 0x8e, 0xe9, 0x12, 0x94, 0xb6, 0x83, 0x28, 0x4e, 0xbf, 0xf8, 0xde, 0x08, 0xa2, 0x18, + 0x33, 0x88, 0xfd, 0x3b, 0x16, 0x0c, 0xb1, 0xcc, 0xb0, 0xfd, 0xd2, 0x15, 0x0f, 0xf2, 0x5d, 0xe8, + 0x25, 0x18, 0x26, 0x9b, 0x9b, 0xa4, 0x19, 0x8b, 0x59, 0x95, 0xee, 0xa8, 0xc3, 0xcb, 0xac, 0x94, + 0x5e, 0xfa, 0xac, 0x31, 0xfe, 0x17, 0x0b, 0x64, 0xf4, 0x16, 0x94, 0x63, 0x77, 0x97, 0x2c, 0xb4, + 0x5a, 0xe2, 0xcd, 0xec, 0x01, 0xbc, 0x7f, 0xd7, 0x25, 0x01, 0x9c, 0xd0, 0xb2, 0xbf, 0x5a, 0x00, + 0x48, 0x42, 0x08, 0xf4, 0xfb, 0xc4, 0xc5, 0xae, 0xd7, 0x94, 0xcb, 0x19, 0xaf, 0x29, 0x28, 0x21, + 0x98, 0xf1, 0x94, 0xa2, 0x86, 0xa9, 0x38, 0xd0, 0x30, 0x95, 0x8e, 0x33, 0x4c, 0x4b, 0x30, 0x93, + 0x84, 0x40, 0x30, 0xe3, 0xc1, 0x30, 0x21, 0x65, 0x3d, 0x0d, 0xc4, 0xdd, 0xf8, 0x36, 0x81, 0x4b, + 0x32, 0x32, 0xa7, 0xbc, 0x6b, 0x98, 0x49, 0xe6, 0x31, 0x32, 0x57, 0x27, 0xcf, 0x45, 0x85, 0xdc, + 0xe7, 0xa2, 0x1f, 0xb7, 0xe0, 0x6c, 0xba, 0x1d, 0xe6, 0xfb, 0xf6, 0x15, 0x0b, 0xce, 0xb1, 0x47, + 0x33, 0xd6, 0x6a, 0xf7, 0x13, 0xdd, 0x8b, 0xd9, 0xa1, 0x21, 0x7a, 0xf7, 0x38, 0xf1, 0x7b, 0x5e, + 0xcd, 0x22, 0x8d, 0xb3, 0x5b, 0xb4, 0xbf, 0x62, 0xc1, 0xf9, 0xdc, 0x84, 0x44, 0xe8, 0x0a, 0x8c, + 0x3a, 0x6d, 0x97, 0x6b, 0xa4, 0xc4, 0x7e, 0x67, 0xd2, 0x63, 0xbd, 0xc6, 0xf5, 0x51, 0x0a, 0xaa, + 0x12, 0x25, 0x16, 0x72, 0x13, 0x25, 0xf6, 0xcd, 0x7b, 0x68, 0x7f, 0xbf, 0x05, 0xc2, 0xdd, 0x69, + 0x80, 0x43, 0xe6, 0x6d, 0x99, 0x67, 0xd6, 0x08, 0x8a, 0x7e, 0x29, 0xdf, 0xff, 0x4b, 0x84, 0x42, + 0x57, 0x97, 0xba, 0x11, 0x00, 0xdd, 0xa0, 0x65, 0xb7, 0x40, 0x40, 0xab, 0x84, 0xe9, 0xac, 0xfa, + 0xf7, 0xe6, 0x1a, 0x40, 0x8b, 0xe1, 0x6a, 0xd9, 0x26, 0xd5, 0x15, 0x52, 0x55, 0x10, 0xac, 0x61, + 0xd9, 0x3f, 0x54, 0x80, 0x31, 0x19, 0x84, 0xbb, 0xe3, 0x0f, 0x22, 0x59, 0x1e, 0x2b, 0x2b, 0x0f, + 0x4b, 0xcf, 0x4a, 0x09, 0xd7, 0x13, 0x81, 0x3c, 0x49, 0xcf, 0x2a, 0x01, 0x38, 0xc1, 0x41, 0x4f, + 0xc3, 0x48, 0xd4, 0xd9, 0x60, 0xe8, 0x29, 0x27, 0x9e, 0x06, 0x2f, 0xc6, 0x12, 0x8e, 0x3e, 0x07, + 0xd3, 0xbc, 0x5e, 0x18, 0xb4, 0x9d, 0x2d, 0xae, 0xfe, 0x1c, 0x52, 0x5e, 0xb5, 0xd3, 0xab, 0x29, + 0xd8, 0xd1, 0x41, 0xe5, 0x6c, 0xba, 0x8c, 0x29, 0xce, 0xbb, 0xa8, 0xd8, 0x5f, 0x02, 0xd4, 0x1d, + 0x57, 0x1c, 0xbd, 0xc1, 0x4d, 0xa9, 0xdc, 0x90, 0xb4, 0x7a, 0x69, 0xc4, 0x75, 0x27, 0x50, 0x69, + 0x48, 0xcf, 0x6b, 0x61, 0x55, 0xdf, 0xfe, 0x9b, 0x45, 0x98, 0x4e, 0xbb, 0x04, 0xa2, 0x1b, 0x30, + 0xcc, 0x2f, 0x3b, 0x41, 0xbe, 0xc7, 0x83, 0xab, 0xe6, 0x48, 0xc8, 0xb6, 0xbd, 0xb8, 0x2f, 0x45, + 0x7d, 0xf4, 0x0e, 0x8c, 0xb5, 0x82, 0x7b, 0xfe, 0x3d, 0x27, 0x6c, 0x2d, 0xd4, 0x6b, 0x62, 0x5d, + 0x66, 0xf2, 0xcc, 0xd5, 0x04, 0x4d, 0x77, 0x4e, 0x64, 0x8f, 0x0b, 0x09, 0x08, 0xeb, 0xe4, 0xd0, + 0x3a, 0x8b, 0x95, 0xb8, 0xe9, 0x6e, 0xad, 0x3a, 0xed, 0x5e, 0x76, 0xb5, 0x4b, 0x12, 0x49, 0xa3, + 0x3c, 0x21, 0x02, 0x2a, 0x72, 0x00, 0x4e, 0x08, 0xa1, 0xef, 0x86, 0x33, 0x51, 0x8e, 0x9a, 0x2d, + 0x2f, 0xcd, 0x44, 0x2f, 0xcd, 0xd3, 0xe2, 0xa3, 0x54, 0x9a, 0xc9, 0x52, 0xc8, 0x65, 0x35, 0x63, + 0x7f, 0xf9, 0x0c, 0x18, 0xbb, 0xd1, 0xc8, 0x3a, 0x64, 0x9d, 0x50, 0xd6, 0x21, 0x0c, 0xa3, 0x64, + 0xb7, 0x1d, 0xef, 0x57, 0xdd, 0xb0, 0x57, 0x56, 0xbc, 0x65, 0x81, 0xd3, 0x4d, 0x53, 0x42, 0xb0, + 0xa2, 0x93, 0x9d, 0x1a, 0xaa, 0xf8, 0x21, 0xa6, 0x86, 0x2a, 0x9d, 0x62, 0x6a, 0xa8, 0x35, 0x18, + 0xd9, 0x72, 0x63, 0x4c, 0xda, 0x81, 0x60, 0x33, 0x33, 0xd7, 0xe1, 0x75, 0x8e, 0xd2, 0x9d, 0x84, + 0x44, 0x00, 0xb0, 0x24, 0x82, 0xde, 0x50, 0x3b, 0x70, 0x38, 0x5f, 0x4a, 0xeb, 0x7e, 0x19, 0xcc, + 0xdc, 0x83, 0x22, 0x01, 0xd4, 0xc8, 0x83, 0x26, 0x80, 0x5a, 0x91, 0x69, 0x9b, 0x46, 0xf3, 0x8d, + 0xe0, 0x59, 0x56, 0xa6, 0x3e, 0xc9, 0x9a, 0xee, 0xea, 0xa9, 0xae, 0xca, 0xf9, 0x27, 0x81, 0xca, + 0x62, 0x35, 0x60, 0x82, 0xab, 0xef, 0xb7, 0xe0, 0x5c, 0x3b, 0x2b, 0xeb, 0x9b, 0x48, 0xb6, 0xf4, + 0xd2, 0xc0, 0x69, 0xed, 0x8c, 0x06, 0x99, 0xb8, 0x9e, 0x89, 0x86, 0xb3, 0x9b, 0xa3, 0x03, 0x1d, + 0x6e, 0xb4, 0x44, 0x86, 0xa6, 0x27, 0x73, 0x32, 0x65, 0xf5, 0xc8, 0x8f, 0xb5, 0x9e, 0x91, 0x95, + 0xe9, 0xe3, 0x79, 0x59, 0x99, 0x06, 0xce, 0xc5, 0xf4, 0x86, 0xca, 0x91, 0x35, 0x91, 0xbf, 0x94, + 0x78, 0x06, 0xac, 0xbe, 0x99, 0xb1, 0xde, 0x50, 0x99, 0xb1, 0x7a, 0xc4, 0x8c, 0xe3, 0x79, 0xaf, + 0xfa, 0xe6, 0xc3, 0xd2, 0x72, 0x5a, 0x4d, 0x9d, 0x4c, 0x4e, 0x2b, 0xe3, 0xaa, 0xe1, 0x69, 0x95, + 0x9e, 0xe9, 0x73, 0xd5, 0x18, 0x74, 0x7b, 0x5f, 0x36, 0x3c, 0x7f, 0xd7, 0xcc, 0x03, 0xe5, 0xef, + 0xba, 0xab, 0xe7, 0xc3, 0x42, 0x7d, 0x12, 0x3e, 0x51, 0xa4, 0x01, 0xb3, 0x60, 0xdd, 0xd5, 0x2f, + 0xc0, 0x33, 0xf9, 0x74, 0xd5, 0x3d, 0xd7, 0x4d, 0x37, 0xf3, 0x0a, 0xec, 0xca, 0xae, 0x75, 0xf6, + 0x74, 0xb2, 0x6b, 0x9d, 0x3b, 0xf1, 0xec, 0x5a, 0x8f, 0x9c, 0x42, 0x76, 0xad, 0x47, 0x3f, 0xd4, + 0xec, 0x5a, 0xb3, 0x0f, 0x21, 0xbb, 0xd6, 0x5a, 0x92, 0x5d, 0xeb, 0x7c, 0xfe, 0x94, 0x64, 0x58, + 0xe6, 0xe6, 0xe4, 0xd4, 0xba, 0xcb, 0x9e, 0xe7, 0x79, 0xcc, 0x0a, 0x11, 0xd4, 0x2e, 0x3b, 0x93, + 0x70, 0x56, 0x60, 0x0b, 0x3e, 0x25, 0x0a, 0x84, 0x13, 0x52, 0x94, 0x6e, 0x92, 0x63, 0xeb, 0xb1, + 0x1e, 0x0a, 0xd9, 0x2c, 0x55, 0x57, 0x7e, 0x66, 0x2d, 0xfb, 0xaf, 0x17, 0xe0, 0x62, 0xef, 0x75, + 0x9d, 0xe8, 0xc9, 0xea, 0xc9, 0xbb, 0x4e, 0x4a, 0x4f, 0xc6, 0x85, 0x9c, 0x04, 0x6b, 0xe0, 0xc0, + 0x3e, 0xd7, 0x61, 0x46, 0x99, 0xe4, 0x7a, 0x6e, 0x73, 0x5f, 0xcb, 0x30, 0xac, 0x5c, 0x0f, 0x1b, + 0x69, 0x04, 0xdc, 0x5d, 0x07, 0x2d, 0xc0, 0x94, 0x51, 0x58, 0xab, 0x0a, 0x61, 0x46, 0x29, 0xe6, + 0x1a, 0x26, 0x18, 0xa7, 0xf1, 0xed, 0x9f, 0xb6, 0xe0, 0xd1, 0x9c, 0xc4, 0x13, 0x03, 0xc7, 0xad, + 0xd9, 0x84, 0xa9, 0xb6, 0x59, 0xb5, 0x4f, 0x78, 0x2b, 0x23, 0xbd, 0x85, 0xea, 0x6b, 0x0a, 0x80, + 0xd3, 0x44, 0x17, 0xaf, 0xfc, 0xe6, 0xef, 0x5d, 0xfc, 0xd8, 0x6f, 0xfd, 0xde, 0xc5, 0x8f, 0xfd, + 0xf6, 0xef, 0x5d, 0xfc, 0xd8, 0x5f, 0x3e, 0xbc, 0x68, 0xfd, 0xe6, 0xe1, 0x45, 0xeb, 0xb7, 0x0e, + 0x2f, 0x5a, 0xbf, 0x7d, 0x78, 0xd1, 0xfa, 0xdd, 0xc3, 0x8b, 0xd6, 0x57, 0x7f, 0xff, 0xe2, 0xc7, + 0xde, 0x2e, 0xec, 0x3d, 0xff, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x84, 0x97, 0x9c, 0xb4, 0x50, + 0xe8, 0x00, 0x00, } diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto index 792c19a2d746..165aa2a2450e 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto @@ -31,7 +31,7 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; option go_package = "v1"; // Represents a Persistent Disk resource in AWS. -// +// // An AWS EBS disk must exist before mounting to a container. The disk // must also be in the same AWS zone as the kubelet. An AWS EBS disk // can only be mounted as read/write once. AWS EBS volumes support @@ -198,7 +198,7 @@ message CSIPersistentVolumeSource { // ControllerPublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerPublishVolume and ControllerUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference controllerPublishSecretRef = 6; @@ -206,7 +206,7 @@ message CSIPersistentVolumeSource { // NodeStageSecretRef is a reference to the secret object containing sensitive // information to pass to the CSI driver to complete the CSI NodeStageVolume // and NodeStageVolume and NodeUnstageVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference nodeStageSecretRef = 7; @@ -214,7 +214,7 @@ message CSIPersistentVolumeSource { // NodePublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference nodePublishSecretRef = 8; @@ -436,7 +436,7 @@ message ConfigMap { // ConfigMapEnvSource selects a ConfigMap to populate the environment // variables with. -// +// // The contents of the target ConfigMap's Data field will represent the // key-value pairs as environment variables. message ConfigMapEnvSource { @@ -497,7 +497,7 @@ message ConfigMapNodeConfigSource { } // Adapts a ConfigMap into a projected volume. -// +// // The contents of the target ConfigMap's Data field will be presented in a // projected volume as files using the keys in the Data field as the file names, // unless the items element is populated with specific mappings of keys to paths. @@ -522,7 +522,7 @@ message ConfigMapProjection { } // Adapts a ConfigMap into a volume. -// +// // The contents of the target ConfigMap's Data field will be presented in a // volume as files using the keys in the Data field as the file names, unless // the items element is populated with specific mappings of keys to paths. @@ -606,6 +606,9 @@ message Container { // +optional // +patchMergeKey=containerPort // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol repeated ContainerPort ports = 6; // List of sources to populate environment variables in the container. @@ -638,7 +641,7 @@ message Container { repeated VolumeMount volumeMounts = 9; // volumeDevices is the list of block devices to be used by the container. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +patchMergeKey=devicePath // +patchStrategy=merge // +optional @@ -1314,7 +1317,7 @@ message FlockerVolumeSource { } // Represents a Persistent Disk resource in Google Compute Engine. -// +// // A GCE PD must exist before mounting to a container. The disk must // also be in the same GCE project and zone as the kubelet. A GCE PD // can only be mounted as read/write once or read-only many times. GCE @@ -1350,7 +1353,7 @@ message GCEPersistentDiskVolumeSource { // Represents a volume that is populated with the contents of a git repository. // Git repo volumes do not support ownership management. // Git repo volumes support SELinux relabeling. -// +// // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir // into the Pod's container. @@ -1370,6 +1373,30 @@ message GitRepoVolumeSource { optional string directory = 3; } +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +message GlusterfsPersistentVolumeSource { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + optional string endpoints = 1; + + // Path is the Glusterfs volume path. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + optional string path = 2; + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + optional bool readOnly = 3; + + // EndpointsNamespace is the namespace that contains Glusterfs endpoint. + // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + optional string endpointsNamespace = 4; +} + // Represents a Glusterfs mount that lasts the lifetime of a pod. // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsVolumeSource { @@ -2293,7 +2320,7 @@ message PersistentVolumeClaimSpec { // volumeMode defines what type of volume is required by the claim. // Value of Filesystem is implied when not included in claim spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional optional string volumeMode = 6; @@ -2386,7 +2413,7 @@ message PersistentVolumeSource { // exposed to the pod. Provisioned by an admin. // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md // +optional - optional GlusterfsVolumeSource glusterfs = 4; + optional GlusterfsPersistentVolumeSource glusterfs = 4; // NFS represents an NFS mount on the host. Provisioned by an admin. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs @@ -2509,7 +2536,7 @@ message PersistentVolumeSpec { // volumeMode defines if a volume is intended to be used with a formatted filesystem // or to remain in raw block state. Value of Filesystem is implied when not included in spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional optional string volumeMode = 8; @@ -2899,11 +2926,11 @@ message PodSecurityContext { // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: - // + // // 1. The owning GID will be the FSGroup // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) // 3. The permission bits are OR'd with rw-rw---- - // + // // If unset, the Kubelet will not modify the ownership and permissions of any volume. // +optional optional int64 fsGroup = 5; @@ -3126,6 +3153,11 @@ message PodSpec { // This is an alpha feature and may change in the future. // +optional optional string runtimeClassName = 29; + + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // +optional + optional bool enableServiceLinks = 30; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -3136,7 +3168,7 @@ message PodStatus { // The conditions array, the reason and message fields, and the individual container status // arrays contain more detail about the pod's status. // There are five possible phase values: - // + // // Pending: The pod has been accepted by the Kubernetes system, but one or more of the // container images has not been created. This includes time before being scheduled as // well as time spent downloading images over the network, which could take a while. @@ -3148,7 +3180,7 @@ message PodStatus { // by the system. // Unknown: For some reason the state of the pod could not be obtained, typically due to an // error in communicating with the host of the pod. - // + // // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase // +optional optional string phase = 1; @@ -3879,7 +3911,7 @@ message Secret { // SecretEnvSource selects a Secret to populate the environment // variables with. -// +// // The contents of the target Secret's Data field will represent the // key-value pairs as environment variables. message SecretEnvSource { @@ -3917,7 +3949,7 @@ message SecretList { } // Adapts a secret into a projected volume. -// +// // The contents of the target Secret's Data field will be presented in a // projected volume as files using the keys in the Data field as the file names. // Note that this is identical to a secret volume source without the default @@ -3953,7 +3985,7 @@ message SecretReference { } // Adapts a Secret into a volume. -// +// // The contents of the target Secret's Data field will be presented in a volume // as files using the keys in the Data field as the file names. // Secret volumes support ownership management and SELinux relabeling. @@ -4503,7 +4535,10 @@ message TopologySelectorTerm { // TypedLocalObjectReference contains enough information to let you locate the // typed referenced object inside the same namespace. message TypedLocalObjectReference { - // APIGroup is the group for the resource being referenced + // APIGroup is the group for the resource being referenced. + // If APIGroup is not specified, the specified Kind must be in the core API group. + // For any other third-party types, APIGroup is required. + // +optional optional string apiGroup = 1; // Kind is the type of resource being referenced diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go index a316657dc8e7..43e70b9bad57 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go @@ -191,7 +191,7 @@ type PersistentVolumeSource struct { // exposed to the pod. Provisioned by an admin. // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md // +optional - Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` + Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` // NFS represents an NFS mount on the host. Provisioned by an admin. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional @@ -326,7 +326,7 @@ type PersistentVolumeSpec struct { MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"` // volumeMode defines if a volume is intended to be used with a formatted filesystem // or to remain in raw block state. Value of Filesystem is implied when not included in spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"` // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. @@ -455,7 +455,7 @@ type PersistentVolumeClaimSpec struct { StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` // volumeMode defines what type of volume is required by the claim. // Value of Filesystem is implied when not included in claim spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` // This field requires the VolumeSnapshotDataSource alpha feature gate to be @@ -636,6 +636,30 @@ type GlusterfsVolumeSource struct { ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsPersistentVolumeSource struct { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` + + // Path is the Glusterfs volume path. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + + // EndpointsNamespace is the namespace that contains Glusterfs endpoint. + // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + EndpointsNamespace *string `json:"endpointsNamespace,omitempty" protobuf:"bytes,4,opt,name=endpointsNamespace"` +} + // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { @@ -1640,7 +1664,7 @@ type CSIPersistentVolumeSource struct { // ControllerPublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerPublishVolume and ControllerUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"` @@ -1648,7 +1672,7 @@ type CSIPersistentVolumeSource struct { // NodeStageSecretRef is a reference to the secret object containing sensitive // information to pass to the CSI driver to complete the CSI NodeStageVolume // and NodeStageVolume and NodeUnstageVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"` @@ -1656,7 +1680,7 @@ type CSIPersistentVolumeSource struct { // NodePublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"` @@ -2060,6 +2084,9 @@ type Container struct { // +optional // +patchMergeKey=containerPort // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` // List of sources to populate environment variables in the container. // The keys defined within a source must be a C_IDENTIFIER. All invalid keys @@ -2087,7 +2114,7 @@ type Container struct { // +patchStrategy=merge VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` // volumeDevices is the list of block devices to be used by the container. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +patchMergeKey=devicePath // +patchStrategy=merge // +optional @@ -2891,8 +2918,17 @@ type PodSpec struct { // This is an alpha feature and may change in the future. // +optional RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // +optional + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"` } +const ( + // The default value for enableServiceLinks attribute. + DefaultEnableServiceLinks = true +) + // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the // pod's hosts file. type HostAlias struct { @@ -3273,8 +3309,8 @@ type ReplicationControllerCondition struct { } // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicationController represents the configuration of a replication controller. @@ -4495,8 +4531,11 @@ type LocalObjectReference struct { // TypedLocalObjectReference contains enough information to let you locate the // typed referenced object inside the same namespace. type TypedLocalObjectReference struct { - // APIGroup is the group for the resource being referenced - APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` + // APIGroup is the group for the resource being referenced. + // If APIGroup is not specified, the specified Kind must be in the core API group. + // For any other third-party types, APIGroup is required. + // +optional + APIGroup *string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` // Kind is the type of resource being referenced Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` // Name is the name of resource being referenced @@ -4984,6 +5023,10 @@ const ( TLSCertKey = "tls.crt" // TLSPrivateKeyKey is the key for the private key field in a TLS secret. TLSPrivateKeyKey = "tls.key" + // SecretTypeBootstrapToken is used during the automated bootstrap process (first + // implemented by kubeadm). It stores tokens that are used to sign well known + // ConfigMaps. They are used for authn. + SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -5207,7 +5250,7 @@ type SecurityContext struct { // readonly paths and masked paths. // This requires the ProcMountType feature flag to be enabled. // +optional - ProcMount *ProcMountType `json:"procMount,omitEmpty" protobuf:"bytes,9,opt,name=procMount"` + ProcMount *ProcMountType `json:"procMount,omitempty" protobuf:"bytes,9,opt,name=procMount"` } type ProcMountType string diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 821f3d3cafa1..71f90685f35f 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -123,9 +123,9 @@ var map_CSIPersistentVolumeSource = map[string]string{ "readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".", "volumeAttributes": "Attributes of the volume to publish.", - "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -321,7 +321,7 @@ var map_Container = map[string]string{ "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", - "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", @@ -695,6 +695,18 @@ func (GitRepoVolumeSource) SwaggerDoc() map[string]string { return map_GitRepoVolumeSource } +var map_GlusterfsPersistentVolumeSource = map[string]string{ + "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", +} + +func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_GlusterfsPersistentVolumeSource +} + var map_GlusterfsVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", @@ -1210,7 +1222,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", - "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is an alpha feature and may change in the future.", + "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature.", "dataSource": "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", } @@ -1288,7 +1300,7 @@ var map_PersistentVolumeSpec = map[string]string{ "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", "mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", - "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.", + "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is a beta feature.", "nodeAffinity": "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", } @@ -1528,6 +1540,7 @@ var map_PodSpec = map[string]string{ "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md", "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future.", + "enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1636,7 +1649,7 @@ func (PreferredSchedulingTerm) SwaggerDoc() map[string]string { } var map_Probe = map[string]string{ - "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", + "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", @@ -2201,7 +2214,7 @@ func (TopologySelectorLabelRequirement) SwaggerDoc() map[string]string { } var map_TopologySelectorTerm = map[string]string{ - "": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", + "": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", "matchLabelExpressions": "A list of topology selector requirements by labels.", } @@ -2211,7 +2224,7 @@ func (TopologySelectorTerm) SwaggerDoc() map[string]string { var map_TypedLocalObjectReference = map[string]string{ "": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.", - "apiGroup": "APIGroup is the group for the resource being referenced", + "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", "kind": "Kind is the type of resource being referenced", "name": "Name is the name of resource being referenced", } @@ -2285,23 +2298,23 @@ var map_VolumeSource = map[string]string{ "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", - "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", - "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "configMap": "ConfigMap represents a configMap that should populate this volume", - "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", - "projected": "Items for all in one resources secrets, configmaps, and downward API", - "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", - "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", + "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "configMap": "ConfigMap represents a configMap that should populate this volume", + "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "projected": "Items for all in one resources secrets, configmaps, and downward API", + "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", } func (VolumeSource) SwaggerDoc() map[string]string { diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 0c5db354742f..4219c95eb09d 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -1498,6 +1498,27 @@ func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) { + *out = *in + if in.EndpointsNamespace != nil { + in, out := &in.EndpointsNamespace, &out.EndpointsNamespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource. +func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { *out = *in @@ -2687,7 +2708,7 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec if in.DataSource != nil { in, out := &in.DataSource, &out.DataSource *out = new(TypedLocalObjectReference) - **out = **in + (*in).DeepCopyInto(*out) } return } @@ -2806,8 +2827,8 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { } if in.Glusterfs != nil { in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - **out = **in + *out = new(GlusterfsPersistentVolumeSource) + (*in).DeepCopyInto(*out) } if in.NFS != nil { in, out := &in.NFS, &out.NFS @@ -3554,6 +3575,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(string) **out = **in } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } return } @@ -5090,6 +5116,11 @@ func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) { *out = *in + if in.APIGroup != nil { + in, out := &in.APIGroup, &out.APIGroup + *out = new(string) + **out = **in + } return } diff --git a/cluster-autoscaler/vendor/k8s.io/api/events/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/events/v1beta1/doc.go index 0841008b743f..99991994505c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/events/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/events/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=events.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/events/v1beta1/generated.pb.go index e24a82ab1860..bb0c881b5712 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/events/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -254,24 +253,6 @@ func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/BUILD index 57da4b778f9d..ce7106a2dfc1 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/BUILD @@ -20,7 +20,6 @@ go_library( deps = [ "//staging/src/k8s.io/api/apps/v1beta1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index 72d64db3edd1..a0dfa96620db 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -27,10 +26,6 @@ limitations under the License. It has these top-level messages: AllowedFlexVolume AllowedHostPath - CustomMetricCurrentStatus - CustomMetricCurrentStatusList - CustomMetricTarget - CustomMetricTargetList DaemonSet DaemonSetCondition DaemonSetList @@ -77,6 +72,7 @@ limitations under the License. RollbackConfig RollingUpdateDaemonSet RollingUpdateDeployment + RunAsGroupStrategyOptions RunAsUserStrategyOptions SELinuxStrategyOptions Scale @@ -91,7 +87,6 @@ import fmt "fmt" import math "math" import k8s_io_api_core_v1 "k8s.io/api/core/v1" - import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" @@ -122,255 +117,235 @@ func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } func (*AllowedHostPath) ProtoMessage() {} func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} } -func (*CustomMetricCurrentStatus) ProtoMessage() {} -func (*CustomMetricCurrentStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} - -func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurrentStatusList{} } -func (*CustomMetricCurrentStatusList) ProtoMessage() {} -func (*CustomMetricCurrentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} -} - -func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} } -func (*CustomMetricTarget) ProtoMessage() {} -func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} } -func (*CustomMetricTargetList) ProtoMessage() {} -func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} -} +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *HostPortRange) Reset() { *m = HostPortRange{} } func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *IDRange) Reset() { *m = IDRange{} } func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{34} + return fileDescriptorGenerated, []int{30} } func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{35} + return fileDescriptorGenerated, []int{31} } func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } func (*ReplicationControllerDummy) ProtoMessage() {} func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{48} + return fileDescriptorGenerated, []int{44} } func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{51} + return fileDescriptorGenerated, []int{47} +} + +func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } +func (*RunAsGroupStrategyOptions) ProtoMessage() {} +func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{48} } func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{52} + return fileDescriptorGenerated, []int{49} } func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{57} + return fileDescriptorGenerated, []int{54} } func init() { proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") - proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatus") - proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatusList") - proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTarget") - proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTargetList") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") @@ -417,6 +392,7 @@ func init() { proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") + proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsGroupStrategyOptions") proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") @@ -476,126 +452,6 @@ func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *CustomMetricCurrentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomMetricCurrentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n1, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - return i, nil -} - -func (m *CustomMetricCurrentStatusList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomMetricCurrentStatusList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *CustomMetricTarget) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomMetricTarget) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - return i, nil -} - -func (m *CustomMetricTargetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomMetricTargetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -614,27 +470,27 @@ func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n1 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) + n2, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n2 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) + n3, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n3 return i, nil } @@ -664,11 +520,11 @@ func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n6, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n4, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n4 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -698,11 +554,11 @@ func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n7, err := m.ListMeta.MarshalTo(dAtA[i:]) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -737,28 +593,28 @@ func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(dAtA[i:]) + n6, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n6 } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n9, err := m.Template.MarshalTo(dAtA[i:]) + n7, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n7 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n10, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n8, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n8 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -855,11 +711,11 @@ func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n11, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n9, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n9 } return i, nil } @@ -882,27 +738,27 @@ func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n10, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n10 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) + n11, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n11 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) + n12, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n12 return i, nil } @@ -940,19 +796,19 @@ func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n15, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + n13, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n13 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n16, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n14, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n14 return i, nil } @@ -974,11 +830,11 @@ func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n17, err := m.ListMeta.MarshalTo(dAtA[i:]) + n15, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n15 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1038,11 +894,11 @@ func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n18, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n16, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n16 return i, nil } @@ -1070,28 +926,28 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n19, err := m.Selector.MarshalTo(dAtA[i:]) + n17, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n17 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n20, err := m.Template.MarshalTo(dAtA[i:]) + n18, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n18 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n21, err := m.Strategy.MarshalTo(dAtA[i:]) + n19, err := m.Strategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n19 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -1112,11 +968,11 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n22, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n20, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n20 } if m.ProgressDeadlineSeconds != nil { dAtA[i] = 0x48 @@ -1202,11 +1058,11 @@ func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n23, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n21, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n21 } return i, nil } @@ -1267,11 +1123,11 @@ func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n24, err := m.Backend.MarshalTo(dAtA[i:]) + n22, err := m.Backend.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n22 return i, nil } @@ -1408,27 +1264,27 @@ func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n25, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n23 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n26, err := m.Spec.MarshalTo(dAtA[i:]) + n24, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n24 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n27, err := m.Status.MarshalTo(dAtA[i:]) + n25, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n25 return i, nil } @@ -1454,11 +1310,11 @@ func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n28, err := m.ServicePort.MarshalTo(dAtA[i:]) + n26, err := m.ServicePort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n26 return i, nil } @@ -1480,11 +1336,11 @@ func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n29, err := m.ListMeta.MarshalTo(dAtA[i:]) + n27, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n27 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1522,11 +1378,11 @@ func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n30, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) + n28, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n28 return i, nil } @@ -1549,11 +1405,11 @@ func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) - n31, err := m.HTTP.MarshalTo(dAtA[i:]) + n29, err := m.HTTP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n29 } return i, nil } @@ -1577,11 +1433,11 @@ func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n32, err := m.Backend.MarshalTo(dAtA[i:]) + n30, err := m.Backend.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n30 } if len(m.TLS) > 0 { for _, msg := range m.TLS { @@ -1628,11 +1484,11 @@ func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n33, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + n31, err := m.LoadBalancer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n31 return i, nil } @@ -1691,19 +1547,19 @@ func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n34, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n32, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n32 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n35, err := m.Spec.MarshalTo(dAtA[i:]) + n33, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n33 return i, nil } @@ -1809,11 +1665,11 @@ func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n36, err := m.ListMeta.MarshalTo(dAtA[i:]) + n34, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n34 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1848,31 +1704,31 @@ func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n37, err := m.PodSelector.MarshalTo(dAtA[i:]) + n35, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n35 } if m.NamespaceSelector != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n38, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + n36, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n36 } if m.IPBlock != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n39, err := m.IPBlock.MarshalTo(dAtA[i:]) + n37, err := m.IPBlock.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n37 } return i, nil } @@ -1902,11 +1758,11 @@ func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n40, err := m.Port.MarshalTo(dAtA[i:]) + n38, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n38 } return i, nil } @@ -1929,11 +1785,11 @@ func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n41, err := m.PodSelector.MarshalTo(dAtA[i:]) + n39, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n39 if len(m.Ingress) > 0 { for _, msg := range m.Ingress { dAtA[i] = 0x12 @@ -1994,19 +1850,19 @@ func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n42, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n40, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n40 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n43, err := m.Spec.MarshalTo(dAtA[i:]) + n41, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n41 return i, nil } @@ -2028,11 +1884,11 @@ func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n44, err := m.ListMeta.MarshalTo(dAtA[i:]) + n42, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n44 + i += n42 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2170,35 +2026,35 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n45, err := m.SELinux.MarshalTo(dAtA[i:]) + n43, err := m.SELinux.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n45 + i += n43 dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n46, err := m.RunAsUser.MarshalTo(dAtA[i:]) + n44, err := m.RunAsUser.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n46 + i += n44 dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n47, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) + n45, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n47 + i += n45 dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n48, err := m.FSGroup.MarshalTo(dAtA[i:]) + n46, err := m.FSGroup.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n48 + i += n46 dAtA[i] = 0x70 i++ if m.ReadOnlyRootFilesystem { @@ -2308,6 +2164,18 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if m.RunAsGroup != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsGroup.Size())) + n47, err := m.RunAsGroup.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + } return i, nil } @@ -2329,27 +2197,27 @@ func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n49, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n48, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n49 + i += n48 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n50, err := m.Spec.MarshalTo(dAtA[i:]) + n49, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n50 + i += n49 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n51, err := m.Status.MarshalTo(dAtA[i:]) + n50, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n50 return i, nil } @@ -2379,11 +2247,11 @@ func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n52, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n51, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n51 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -2413,11 +2281,11 @@ func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n53, err := m.ListMeta.MarshalTo(dAtA[i:]) + n52, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n52 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2457,20 +2325,20 @@ func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n54, err := m.Selector.MarshalTo(dAtA[i:]) + n53, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n54 + i += n53 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n55, err := m.Template.MarshalTo(dAtA[i:]) + n54, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n55 + i += n54 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -2580,11 +2448,11 @@ func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n55, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n56 + i += n55 } return i, nil } @@ -2608,21 +2476,55 @@ func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n57, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n57 + i += n56 } if m.MaxSurge != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n58, err := m.MaxSurge.MarshalTo(dAtA[i:]) + n57, err := m.MaxSurge.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n57 + } + return i, nil +} + +func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } return i, nil } @@ -2684,11 +2586,11 @@ func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n59, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n58, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n59 + i += n58 } return i, nil } @@ -2711,27 +2613,27 @@ func (m *Scale) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n60, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n59, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n60 + i += n59 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n61, err := m.Spec.MarshalTo(dAtA[i:]) + n60, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n61 + i += n60 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n62, err := m.Status.MarshalTo(dAtA[i:]) + n61, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n62 + i += n61 return i, nil } @@ -2837,24 +2739,6 @@ func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2881,63 +2765,19 @@ func (m *AllowedHostPath) Size() (n int) { return n } -func (m *CustomMetricCurrentStatus) Size() (n int) { +func (m *DaemonSet) Size() (n int) { var l int _ = l - l = len(m.Name) + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.CurrentValue.Size() + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *CustomMetricCurrentStatusList) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CustomMetricTarget) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.TargetValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CustomMetricTargetList) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DaemonSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DaemonSetCondition) Size() (n int) { +func (m *DaemonSetCondition) Size() (n int) { var l int _ = l l = len(m.Type) @@ -3535,6 +3375,10 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if m.RunAsGroup != nil { + l = m.RunAsGroup.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -3650,6 +3494,20 @@ func (m *RollingUpdateDeployment) Size() (n int) { return n } +func (m *RunAsGroupStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *RunAsUserStrategyOptions) Size() (n int) { var l int _ = l @@ -3760,48 +3618,6 @@ func (this *AllowedHostPath) String() string { }, "") return s } -func (this *CustomMetricCurrentStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomMetricCurrentStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomMetricCurrentStatusList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomMetricCurrentStatusList{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricCurrentStatus", "CustomMetricCurrentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomMetricTarget) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomMetricTarget{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomMetricTargetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomMetricTargetList{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricTarget", "CustomMetricTarget", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *DaemonSet) String() string { if this == nil { return "nil" @@ -4271,6 +4087,7 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, + `RunAsGroup:` + strings.Replace(fmt.Sprintf("%v", this.RunAsGroup), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, `}`, }, "") return s @@ -4380,6 +4197,17 @@ func (this *RollingUpdateDeployment) String() string { }, "") return s } +func (this *RunAsGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *RunAsUserStrategyOptions) String() string { if this == nil { return "nil" @@ -4597,385 +4425,16 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricCurrentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricCurrentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricCurrentStatusList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricCurrentStatusList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricCurrentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CustomMetricCurrentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricTarget) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricTarget: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricTargetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricTargetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricTargetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - var msglen int + m.PathPrefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4985,23 +4444,12 @@ func (m *CustomMetricTargetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CustomMetricTarget{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6597,51 +6045,14 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6651,41 +6062,80 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.UpdatedAnnotations[mapkey] = mapvalue - } else { - var mapvalue string - m.UpdatedAnnotations[mapkey] = mapvalue } + m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -10495,6 +9945,39 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RunAsGroup == nil { + m.RunAsGroup = &RunAsGroupStrategyOptions{} + } + if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11609,6 +11092,116 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } return nil } +func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -12114,51 +11707,14 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12168,41 +11724,80 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -12474,235 +12069,230 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 3665 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x24, 0x49, - 0x56, 0xef, 0xac, 0x2a, 0xbb, 0xca, 0xcf, 0xed, 0xaf, 0xb0, 0xdb, 0x5d, 0xdb, 0x33, 0xed, 0xea, - 0xcd, 0x91, 0x9a, 0x9e, 0xa1, 0xa7, 0x6a, 0xba, 0xe7, 0x63, 0x87, 0x69, 0xb1, 0xbb, 0x2e, 0xbb, - 0xdd, 0xed, 0xc5, 0x1f, 0x35, 0x51, 0x76, 0xb3, 0x8c, 0x98, 0x65, 0xd2, 0x55, 0xe1, 0x72, 0x8e, - 0xb3, 0x32, 0x73, 0x33, 0x22, 0xbd, 0x2e, 0x09, 0x21, 0x0e, 0x08, 0x09, 0x09, 0x04, 0x1c, 0x96, - 0x0f, 0x71, 0x61, 0x2f, 0x9c, 0x40, 0x70, 0x83, 0xc3, 0x6a, 0x24, 0xa4, 0x45, 0x1a, 0xa1, 0x45, - 0xda, 0x1b, 0x7b, 0xb2, 0x18, 0xcf, 0x09, 0xf1, 0x0f, 0xa0, 0x3e, 0x20, 0x14, 0x91, 0x91, 0xdf, - 0x99, 0xae, 0x2a, 0x4f, 0xb7, 0x85, 0xd0, 0xde, 0x2a, 0xe3, 0xbd, 0xf7, 0x7b, 0x2f, 0x22, 0x5e, - 0xbc, 0xf7, 0xe2, 0xa3, 0x60, 0xe3, 0xf8, 0x7d, 0x5a, 0xd7, 0xad, 0xc6, 0xb1, 0x7b, 0x40, 0x1c, - 0x93, 0x30, 0x42, 0x1b, 0x27, 0xc4, 0xec, 0x5a, 0x4e, 0x43, 0x12, 0x34, 0x5b, 0x6f, 0x90, 0x53, - 0x46, 0x4c, 0xaa, 0x5b, 0x26, 0x6d, 0x9c, 0x3c, 0x38, 0x20, 0x4c, 0x7b, 0xd0, 0xe8, 0x11, 0x93, - 0x38, 0x1a, 0x23, 0xdd, 0xba, 0xed, 0x58, 0xcc, 0x42, 0xb7, 0x3d, 0xf6, 0xba, 0x66, 0xeb, 0xf5, - 0x90, 0xbd, 0x2e, 0xd9, 0x6f, 0xbd, 0xd9, 0xd3, 0xd9, 0x91, 0x7b, 0x50, 0xef, 0x58, 0xfd, 0x46, - 0xcf, 0xea, 0x59, 0x0d, 0x21, 0x75, 0xe0, 0x1e, 0x8a, 0x2f, 0xf1, 0x21, 0x7e, 0x79, 0x68, 0xb7, - 0xd4, 0x88, 0xf2, 0x8e, 0xe5, 0x90, 0xc6, 0x49, 0x4a, 0xe3, 0xad, 0x77, 0x42, 0x9e, 0xbe, 0xd6, - 0x39, 0xd2, 0x4d, 0xe2, 0x0c, 0x1a, 0xf6, 0x71, 0x4f, 0x08, 0x39, 0x84, 0x5a, 0xae, 0xd3, 0x21, - 0x63, 0x49, 0xd1, 0x46, 0x9f, 0x30, 0x2d, 0x4b, 0x57, 0x23, 0x4f, 0xca, 0x71, 0x4d, 0xa6, 0xf7, - 0xd3, 0x6a, 0xde, 0x1b, 0x26, 0x40, 0x3b, 0x47, 0xa4, 0xaf, 0xa5, 0xe4, 0xde, 0xce, 0x93, 0x73, - 0x99, 0x6e, 0x34, 0x74, 0x93, 0x51, 0xe6, 0x24, 0x85, 0xd4, 0x47, 0xb0, 0xb0, 0x6a, 0x18, 0xd6, - 0x0f, 0x48, 0x77, 0xc3, 0x20, 0xa7, 0xcf, 0x2c, 0xc3, 0xed, 0x13, 0x74, 0x17, 0x26, 0xbb, 0x8e, - 0x7e, 0x42, 0x9c, 0xaa, 0x72, 0x47, 0xb9, 0x37, 0xd5, 0x9c, 0xfd, 0xfc, 0xac, 0x76, 0xed, 0xfc, - 0xac, 0x36, 0xb9, 0x2e, 0x5a, 0xb1, 0xa4, 0xaa, 0x14, 0xe6, 0xa4, 0xf0, 0x53, 0x8b, 0xb2, 0x96, - 0xc6, 0x8e, 0xd0, 0x43, 0x00, 0x5b, 0x63, 0x47, 0x2d, 0x87, 0x1c, 0xea, 0xa7, 0x52, 0x1c, 0x49, - 0x71, 0x68, 0x05, 0x14, 0x1c, 0xe1, 0x42, 0xf7, 0xa1, 0xe2, 0x10, 0xad, 0xbb, 0x6b, 0x1a, 0x83, - 0x6a, 0xe1, 0x8e, 0x72, 0xaf, 0xd2, 0x9c, 0x97, 0x12, 0x15, 0x2c, 0xdb, 0x71, 0xc0, 0xa1, 0xfe, - 0xa5, 0x02, 0x5f, 0x5b, 0x73, 0x29, 0xb3, 0xfa, 0xdb, 0x84, 0x39, 0x7a, 0x67, 0xcd, 0x75, 0x1c, - 0x62, 0xb2, 0x36, 0xd3, 0x98, 0x4b, 0xd1, 0x1d, 0x28, 0x99, 0x5a, 0x9f, 0x48, 0xcd, 0xd7, 0x25, - 0x4e, 0x69, 0x47, 0xeb, 0x13, 0x2c, 0x28, 0xe8, 0x23, 0x98, 0x38, 0xd1, 0x0c, 0x97, 0x08, 0x55, - 0xd3, 0x0f, 0xeb, 0xf5, 0xd0, 0xfb, 0x82, 0x61, 0xab, 0xdb, 0xc7, 0x3d, 0xe1, 0x8e, 0xbe, 0x2f, - 0xd4, 0x3f, 0x74, 0x35, 0x93, 0xe9, 0x6c, 0xd0, 0x5c, 0x92, 0x90, 0xd7, 0xa5, 0xde, 0x67, 0x1c, - 0x0b, 0x7b, 0x90, 0xea, 0xef, 0xc0, 0xed, 0x5c, 0xd3, 0xb6, 0x74, 0xca, 0xd0, 0xc7, 0x30, 0xa1, - 0x33, 0xd2, 0xa7, 0x55, 0xe5, 0x4e, 0xf1, 0xde, 0xf4, 0xc3, 0xf7, 0xeb, 0x17, 0xba, 0x7e, 0x3d, - 0x17, 0xac, 0x39, 0x23, 0xcd, 0x98, 0xd8, 0xe4, 0x70, 0xd8, 0x43, 0x55, 0xff, 0x54, 0x01, 0x14, - 0x95, 0xd9, 0xd3, 0x9c, 0x1e, 0x61, 0x23, 0x0c, 0xca, 0x6f, 0x7c, 0xb5, 0x41, 0x59, 0x94, 0x90, - 0xd3, 0x9e, 0xc2, 0xd8, 0x98, 0xd8, 0xb0, 0x9c, 0x36, 0x49, 0x0c, 0xc6, 0xb3, 0xf8, 0x60, 0x3c, - 0x18, 0x63, 0x30, 0x3c, 0x94, 0x9c, 0x51, 0xf8, 0x61, 0x01, 0xa6, 0xd6, 0x35, 0xd2, 0xb7, 0xcc, - 0x36, 0x61, 0xe8, 0x13, 0xa8, 0xf0, 0xa5, 0xd9, 0xd5, 0x98, 0x26, 0x06, 0x60, 0xfa, 0xe1, 0x5b, - 0x17, 0xf5, 0x8e, 0xd6, 0x39, 0x77, 0xfd, 0xe4, 0x41, 0x7d, 0xf7, 0xe0, 0x53, 0xd2, 0x61, 0xdb, - 0x84, 0x69, 0xa1, 0x07, 0x87, 0x6d, 0x38, 0x40, 0x45, 0x3b, 0x50, 0xa2, 0x36, 0xe9, 0xc8, 0xb1, - 0xbb, 0x3f, 0xa4, 0x1b, 0x81, 0x65, 0x6d, 0x9b, 0x74, 0xc2, 0xc9, 0xe0, 0x5f, 0x58, 0xe0, 0xa0, - 0x67, 0x30, 0x49, 0xc5, 0x2c, 0x57, 0x8b, 0xa9, 0xd9, 0xb8, 0x18, 0xd1, 0xf3, 0x8d, 0x60, 0xb9, - 0x7a, 0xdf, 0x58, 0xa2, 0xa9, 0xff, 0x59, 0x00, 0x14, 0xf0, 0xae, 0x59, 0x66, 0x57, 0x67, 0xba, - 0x65, 0xa2, 0x0f, 0xa0, 0xc4, 0x06, 0xb6, 0xef, 0x1d, 0x77, 0x7d, 0x83, 0xf6, 0x06, 0x36, 0x79, - 0x7e, 0x56, 0x5b, 0x4e, 0x4b, 0x70, 0x0a, 0x16, 0x32, 0x68, 0x2b, 0x30, 0xb5, 0x20, 0xa4, 0xdf, - 0x89, 0xab, 0x7e, 0x7e, 0x56, 0xcb, 0x08, 0xc7, 0xf5, 0x00, 0x29, 0x6e, 0x20, 0x3a, 0x01, 0x64, - 0x68, 0x94, 0xed, 0x39, 0x9a, 0x49, 0x3d, 0x4d, 0x7a, 0x9f, 0xc8, 0x41, 0x78, 0x63, 0xb4, 0x49, - 0xe3, 0x12, 0xcd, 0x5b, 0xd2, 0x0a, 0xb4, 0x95, 0x42, 0xc3, 0x19, 0x1a, 0x78, 0xbc, 0x73, 0x88, - 0x46, 0x2d, 0xb3, 0x5a, 0x8a, 0xc7, 0x3b, 0x2c, 0x5a, 0xb1, 0xa4, 0xa2, 0xd7, 0xa1, 0xdc, 0x27, - 0x94, 0x6a, 0x3d, 0x52, 0x9d, 0x10, 0x8c, 0x73, 0x92, 0xb1, 0xbc, 0xed, 0x35, 0x63, 0x9f, 0xae, - 0xfe, 0x58, 0x81, 0x99, 0x60, 0xe4, 0x84, 0xb7, 0xff, 0x66, 0xca, 0x0f, 0xeb, 0xa3, 0x75, 0x89, - 0x4b, 0x0b, 0x2f, 0x0c, 0xa2, 0xa2, 0xdf, 0x12, 0xf1, 0xc1, 0x6d, 0x7f, 0x2d, 0x15, 0xc4, 0x5a, - 0xba, 0x37, 0xaa, 0xcb, 0xe4, 0x2c, 0xa1, 0x3f, 0x2b, 0x45, 0xcc, 0xe7, 0xae, 0x89, 0x3e, 0x86, - 0x0a, 0x25, 0x06, 0xe9, 0x30, 0xcb, 0x91, 0xe6, 0xbf, 0x3d, 0xa2, 0xf9, 0xda, 0x01, 0x31, 0xda, - 0x52, 0xb4, 0x79, 0x9d, 0xdb, 0xef, 0x7f, 0xe1, 0x00, 0x12, 0x7d, 0x08, 0x15, 0x46, 0xfa, 0xb6, - 0xa1, 0x31, 0x3f, 0x06, 0xbd, 0x16, 0xed, 0x02, 0xf7, 0x1c, 0x0e, 0xd6, 0xb2, 0xba, 0x7b, 0x92, - 0x4d, 0x2c, 0x9f, 0x60, 0x48, 0xfc, 0x56, 0x1c, 0xc0, 0xa0, 0x13, 0x98, 0x75, 0xed, 0x2e, 0xe7, - 0x64, 0x3c, 0xe3, 0xf5, 0x06, 0xd2, 0x93, 0xde, 0x1b, 0x75, 0x6c, 0xf6, 0x63, 0xd2, 0xcd, 0x65, - 0xa9, 0x6b, 0x36, 0xde, 0x8e, 0x13, 0x5a, 0xd0, 0x2a, 0xcc, 0xf5, 0x75, 0x93, 0x67, 0xae, 0x41, - 0x9b, 0x74, 0x2c, 0xb3, 0x4b, 0x85, 0x5b, 0x4d, 0x34, 0x6f, 0x4a, 0x80, 0xb9, 0xed, 0x38, 0x19, - 0x27, 0xf9, 0xd1, 0x77, 0x00, 0xf9, 0xdd, 0x78, 0xe2, 0x25, 0x6c, 0xdd, 0x32, 0x85, 0xcf, 0x15, - 0x43, 0xe7, 0xde, 0x4b, 0x71, 0xe0, 0x0c, 0x29, 0xb4, 0x05, 0x4b, 0x0e, 0x39, 0xd1, 0x79, 0x1f, - 0x9f, 0xea, 0x94, 0x59, 0xce, 0x60, 0x4b, 0xef, 0xeb, 0xac, 0x3a, 0x29, 0x6c, 0xaa, 0x9e, 0x9f, - 0xd5, 0x96, 0x70, 0x06, 0x1d, 0x67, 0x4a, 0xa9, 0x7f, 0x3e, 0x09, 0x73, 0x89, 0x78, 0x83, 0x9e, - 0xc1, 0x72, 0xc7, 0x4b, 0x4e, 0x3b, 0x6e, 0xff, 0x80, 0x38, 0xed, 0xce, 0x11, 0xe9, 0xba, 0x06, - 0xe9, 0x0a, 0x47, 0x99, 0x68, 0xae, 0x48, 0x8b, 0x97, 0xd7, 0x32, 0xb9, 0x70, 0x8e, 0x34, 0x1f, - 0x05, 0x53, 0x34, 0x6d, 0xeb, 0x94, 0x06, 0x98, 0x05, 0x81, 0x19, 0x8c, 0xc2, 0x4e, 0x8a, 0x03, - 0x67, 0x48, 0x71, 0x1b, 0xbb, 0x84, 0xea, 0x0e, 0xe9, 0x26, 0x6d, 0x2c, 0xc6, 0x6d, 0x5c, 0xcf, - 0xe4, 0xc2, 0x39, 0xd2, 0xe8, 0x5d, 0x98, 0xf6, 0xb4, 0x89, 0xf9, 0x93, 0x13, 0x1d, 0xa4, 0xc3, - 0x9d, 0x90, 0x84, 0xa3, 0x7c, 0xbc, 0x6b, 0xd6, 0x01, 0x25, 0xce, 0x09, 0xe9, 0xe6, 0x4f, 0xf0, - 0x6e, 0x8a, 0x03, 0x67, 0x48, 0xf1, 0xae, 0x79, 0x1e, 0x98, 0xea, 0xda, 0x64, 0xbc, 0x6b, 0xfb, - 0x99, 0x5c, 0x38, 0x47, 0x9a, 0xfb, 0xb1, 0x67, 0xf2, 0xea, 0x89, 0xa6, 0x1b, 0xda, 0x81, 0x41, - 0xaa, 0xe5, 0xb8, 0x1f, 0xef, 0xc4, 0xc9, 0x38, 0xc9, 0x8f, 0x9e, 0xc0, 0x82, 0xd7, 0xb4, 0x6f, - 0x6a, 0x01, 0x48, 0x45, 0x80, 0x7c, 0x4d, 0x82, 0x2c, 0xec, 0x24, 0x19, 0x70, 0x5a, 0x06, 0x7d, - 0x00, 0xb3, 0x1d, 0xcb, 0x30, 0x84, 0x3f, 0xae, 0x59, 0xae, 0xc9, 0xaa, 0x53, 0x02, 0x05, 0xf1, - 0xf5, 0xb8, 0x16, 0xa3, 0xe0, 0x04, 0x27, 0x22, 0x00, 0x1d, 0x3f, 0xe1, 0xd0, 0x2a, 0x8c, 0x54, - 0x6b, 0xa4, 0x93, 0x5e, 0x58, 0x03, 0x04, 0x4d, 0x14, 0x47, 0x80, 0xd5, 0x7f, 0x55, 0xe0, 0x66, - 0x4e, 0xe8, 0x40, 0xdf, 0x8a, 0xa5, 0xd8, 0x5f, 0x4e, 0xa4, 0xd8, 0x57, 0x72, 0xc4, 0x22, 0x79, - 0xd6, 0x84, 0x19, 0x87, 0xf7, 0xca, 0xec, 0x79, 0x2c, 0x32, 0x46, 0xbe, 0x3b, 0xa4, 0x1b, 0x38, - 0x2a, 0x13, 0xc6, 0xfc, 0x85, 0xf3, 0xb3, 0xda, 0x4c, 0x8c, 0x86, 0xe3, 0xf0, 0xea, 0x5f, 0x14, - 0x00, 0xd6, 0x89, 0x6d, 0x58, 0x83, 0x3e, 0x31, 0xaf, 0xa2, 0x86, 0xda, 0x8d, 0xd5, 0x50, 0x6f, - 0x0e, 0x9b, 0x9e, 0xc0, 0xb4, 0xdc, 0x22, 0xea, 0xd7, 0x13, 0x45, 0x54, 0x63, 0x74, 0xc8, 0x8b, - 0xab, 0xa8, 0x7f, 0x2f, 0xc2, 0x62, 0xc8, 0x1c, 0x96, 0x51, 0x8f, 0x62, 0x73, 0xfc, 0x4b, 0x89, - 0x39, 0xbe, 0x99, 0x21, 0xf2, 0xd2, 0xea, 0xa8, 0x17, 0x5f, 0xcf, 0xa0, 0x4f, 0x61, 0x96, 0x17, - 0x4e, 0x9e, 0x7b, 0x88, 0xb2, 0x6c, 0x72, 0xec, 0xb2, 0x2c, 0x48, 0xa0, 0x5b, 0x31, 0x24, 0x9c, - 0x40, 0xce, 0x29, 0x03, 0xcb, 0x2f, 0xbb, 0x0c, 0x54, 0x3f, 0x53, 0x60, 0x36, 0x9c, 0xa6, 0x2b, - 0x28, 0xda, 0x76, 0xe2, 0x45, 0xdb, 0xeb, 0x23, 0xbb, 0x68, 0x4e, 0xd5, 0xf6, 0xdf, 0xbc, 0xc0, - 0x0f, 0x98, 0xf8, 0x02, 0x3f, 0xd0, 0x3a, 0xc7, 0x23, 0x6c, 0xff, 0x7e, 0xa8, 0x00, 0x92, 0x59, - 0x60, 0xd5, 0x34, 0x2d, 0xa6, 0x79, 0xb1, 0xd2, 0x33, 0x6b, 0x73, 0x64, 0xb3, 0x7c, 0x8d, 0xf5, - 0xfd, 0x14, 0xd6, 0x63, 0x93, 0x39, 0x83, 0x70, 0x46, 0xd2, 0x0c, 0x38, 0xc3, 0x00, 0xa4, 0x01, - 0x38, 0x12, 0x73, 0xcf, 0x92, 0x0b, 0xf9, 0xcd, 0x11, 0x62, 0x1e, 0x17, 0x58, 0xb3, 0xcc, 0x43, - 0xbd, 0x17, 0x86, 0x1d, 0x1c, 0x00, 0xe1, 0x08, 0xe8, 0xad, 0xc7, 0x70, 0x33, 0xc7, 0x5a, 0x34, - 0x0f, 0xc5, 0x63, 0x32, 0xf0, 0x86, 0x0d, 0xf3, 0x9f, 0x68, 0x29, 0xba, 0x4d, 0x9e, 0x92, 0x3b, - 0xdc, 0x0f, 0x0a, 0xef, 0x2b, 0xea, 0x8f, 0x27, 0xa2, 0xbe, 0x23, 0x2a, 0xe6, 0x7b, 0x50, 0x71, - 0x88, 0x6d, 0xe8, 0x1d, 0x8d, 0xca, 0x42, 0xe8, 0xba, 0x77, 0xa4, 0xe1, 0xb5, 0xe1, 0x80, 0x1a, - 0xab, 0xad, 0x0b, 0x2f, 0xb7, 0xb6, 0x2e, 0xbe, 0x98, 0xda, 0xfa, 0xb7, 0xa0, 0x42, 0xfd, 0xaa, - 0xba, 0x24, 0x20, 0x1f, 0x8c, 0x11, 0x5f, 0x65, 0x41, 0x1d, 0x28, 0x08, 0x4a, 0xe9, 0x00, 0x34, - 0xab, 0x88, 0x9e, 0x18, 0xb3, 0x88, 0x7e, 0xa1, 0x85, 0x2f, 0x8f, 0xa9, 0xb6, 0xe6, 0x52, 0xd2, - 0x15, 0x81, 0xa8, 0x12, 0xc6, 0xd4, 0x96, 0x68, 0xc5, 0x92, 0x8a, 0x3e, 0x8e, 0xb9, 0x6c, 0xe5, - 0x32, 0x2e, 0x3b, 0x9b, 0xef, 0xae, 0x68, 0x1f, 0x6e, 0xda, 0x8e, 0xd5, 0x73, 0x08, 0xa5, 0xeb, - 0x44, 0xeb, 0x1a, 0xba, 0x49, 0xfc, 0xf1, 0xf1, 0x2a, 0xa2, 0x57, 0xce, 0xcf, 0x6a, 0x37, 0x5b, - 0xd9, 0x2c, 0x38, 0x4f, 0x56, 0xfd, 0xbc, 0x04, 0xf3, 0xc9, 0x0c, 0x98, 0x53, 0xa4, 0x2a, 0x97, - 0x2a, 0x52, 0xef, 0x47, 0x16, 0x83, 0x57, 0xc1, 0x47, 0xce, 0xf8, 0x52, 0x0b, 0x62, 0x15, 0xe6, - 0x64, 0x34, 0xf0, 0x89, 0xb2, 0x4c, 0x0f, 0x66, 0x7f, 0x3f, 0x4e, 0xc6, 0x49, 0x7e, 0x5e, 0x7a, - 0x86, 0x15, 0xa5, 0x0f, 0x52, 0x8a, 0x97, 0x9e, 0xab, 0x49, 0x06, 0x9c, 0x96, 0x41, 0xdb, 0xb0, - 0xe8, 0x9a, 0x69, 0x28, 0xcf, 0x1b, 0x5f, 0x91, 0x50, 0x8b, 0xfb, 0x69, 0x16, 0x9c, 0x25, 0x87, - 0x0e, 0x63, 0xd5, 0xe8, 0xa4, 0x88, 0xb0, 0x0f, 0x47, 0x5e, 0x3b, 0x23, 0x97, 0xa3, 0xe8, 0x11, - 0xcc, 0x38, 0x62, 0xdf, 0xe1, 0x1b, 0xec, 0xd5, 0xee, 0x37, 0xa4, 0xd8, 0x0c, 0x8e, 0x12, 0x71, - 0x9c, 0x37, 0xa3, 0xdc, 0xae, 0x8c, 0x5a, 0x6e, 0xab, 0xff, 0xac, 0x44, 0x93, 0x50, 0x50, 0x02, - 0x0f, 0x3b, 0x65, 0x4a, 0x49, 0x44, 0xaa, 0x23, 0x2b, 0xbb, 0xfa, 0x7d, 0x6f, 0xac, 0xea, 0x37, - 0x4c, 0x9e, 0xc3, 0xcb, 0xdf, 0x1f, 0x29, 0xb0, 0xbc, 0xd1, 0x7e, 0xe2, 0x58, 0xae, 0xed, 0x9b, - 0xb3, 0x6b, 0x7b, 0xe3, 0xfa, 0x0d, 0x28, 0x39, 0xae, 0xe1, 0xf7, 0xe3, 0x35, 0xbf, 0x1f, 0xd8, - 0x35, 0x78, 0x3f, 0x16, 0x13, 0x52, 0x5e, 0x27, 0xb8, 0x00, 0xda, 0x81, 0x49, 0x47, 0x33, 0x7b, - 0xc4, 0x4f, 0xab, 0x77, 0x87, 0x58, 0xbf, 0xb9, 0x8e, 0x39, 0x7b, 0xa4, 0x78, 0x13, 0xd2, 0x58, - 0xa2, 0xa8, 0x7f, 0xa4, 0xc0, 0xdc, 0xd3, 0xbd, 0xbd, 0xd6, 0xa6, 0x29, 0x56, 0xb4, 0x38, 0x7d, - 0xbf, 0x03, 0x25, 0x5b, 0x63, 0x47, 0xc9, 0x4c, 0xcf, 0x69, 0x58, 0x50, 0xd0, 0x77, 0xa1, 0xcc, - 0x23, 0x09, 0x31, 0xbb, 0x23, 0x96, 0xda, 0x12, 0xbe, 0xe9, 0x09, 0x85, 0x15, 0xa2, 0x6c, 0xc0, - 0x3e, 0x9c, 0x7a, 0x0c, 0x4b, 0x11, 0x73, 0xf8, 0x78, 0x88, 0x63, 0x60, 0xd4, 0x86, 0x09, 0xae, - 0xd9, 0x3f, 0xe5, 0x1d, 0x76, 0x98, 0x99, 0xe8, 0x52, 0x58, 0xe9, 0xf0, 0x2f, 0x8a, 0x3d, 0x2c, - 0x75, 0x1b, 0x66, 0xc4, 0x95, 0x83, 0xe5, 0x30, 0x31, 0x2c, 0xe8, 0x36, 0x14, 0xfb, 0xba, 0x29, - 0xf3, 0xec, 0xb4, 0x94, 0x29, 0xf2, 0x1c, 0xc1, 0xdb, 0x05, 0x59, 0x3b, 0x95, 0x91, 0x27, 0x24, - 0x6b, 0xa7, 0x98, 0xb7, 0xab, 0x4f, 0xa0, 0x2c, 0x87, 0x3b, 0x0a, 0x54, 0xbc, 0x18, 0xa8, 0x98, - 0x01, 0xb4, 0x0b, 0xe5, 0xcd, 0x56, 0xd3, 0xb0, 0xbc, 0xaa, 0xab, 0xa3, 0x77, 0x9d, 0xe4, 0x5c, - 0xac, 0x6d, 0xae, 0x63, 0x2c, 0x28, 0x48, 0x85, 0x49, 0x72, 0xda, 0x21, 0x36, 0x13, 0x1e, 0x31, - 0xd5, 0x04, 0x3e, 0xcb, 0x8f, 0x45, 0x0b, 0x96, 0x14, 0xf5, 0x8f, 0x0b, 0x50, 0x96, 0xc3, 0x71, - 0x05, 0xbb, 0xb0, 0xad, 0xd8, 0x2e, 0xec, 0x8d, 0xd1, 0x5c, 0x23, 0x77, 0x0b, 0xb6, 0x97, 0xd8, - 0x82, 0xdd, 0x1f, 0x11, 0xef, 0xe2, 0xfd, 0xd7, 0x3f, 0x28, 0x30, 0x1b, 0x77, 0x4a, 0xf4, 0x2e, - 0x4c, 0xf3, 0x84, 0xa3, 0x77, 0xc8, 0x4e, 0x58, 0xe7, 0x06, 0x87, 0x30, 0xed, 0x90, 0x84, 0xa3, - 0x7c, 0xa8, 0x17, 0x88, 0x71, 0x3f, 0x92, 0x9d, 0xce, 0x1f, 0x52, 0x97, 0xe9, 0x46, 0xdd, 0xbb, - 0x46, 0xab, 0x6f, 0x9a, 0x6c, 0xd7, 0x69, 0x33, 0x47, 0x37, 0x7b, 0x29, 0x45, 0xc2, 0x29, 0xa3, - 0xc8, 0xea, 0x3f, 0x29, 0x30, 0x2d, 0x4d, 0xbe, 0x82, 0x5d, 0xc5, 0xaf, 0xc5, 0x77, 0x15, 0x77, - 0x47, 0x5c, 0xe0, 0xd9, 0x5b, 0x8a, 0xbf, 0x09, 0x4d, 0xe7, 0x4b, 0x9a, 0x7b, 0xf5, 0x91, 0x45, - 0x59, 0xd2, 0xab, 0xf9, 0x62, 0xc4, 0x82, 0x82, 0x5c, 0x98, 0xd7, 0x13, 0x31, 0x40, 0x0e, 0x6d, - 0x63, 0x34, 0x4b, 0x02, 0xb1, 0x66, 0x55, 0xc2, 0xcf, 0x27, 0x29, 0x38, 0xa5, 0x42, 0x25, 0x90, - 0xe2, 0x42, 0x1f, 0x42, 0xe9, 0x88, 0x31, 0x3b, 0xe3, 0xbc, 0x7a, 0x48, 0xe4, 0x09, 0x4d, 0xa8, - 0x88, 0xde, 0xed, 0xed, 0xb5, 0xb0, 0x80, 0x52, 0xff, 0x27, 0x1c, 0x8f, 0xb6, 0xe7, 0xe3, 0x41, - 0x3c, 0x55, 0x2e, 0x13, 0x4f, 0xa7, 0xb3, 0x62, 0x29, 0x7a, 0x0a, 0x45, 0x66, 0x8c, 0xba, 0x2d, - 0x94, 0x88, 0x7b, 0x5b, 0xed, 0x30, 0x20, 0xed, 0x6d, 0xb5, 0x31, 0x87, 0x40, 0xbb, 0x30, 0xc1, - 0xb3, 0x0f, 0x5f, 0x82, 0xc5, 0xd1, 0x97, 0x34, 0xef, 0x7f, 0xe8, 0x10, 0xfc, 0x8b, 0x62, 0x0f, - 0x47, 0xfd, 0x3e, 0xcc, 0xc4, 0xd6, 0x29, 0xfa, 0x04, 0xae, 0x1b, 0x96, 0xd6, 0x6d, 0x6a, 0x86, - 0x66, 0x76, 0x88, 0x7f, 0x39, 0x70, 0x37, 0x6b, 0x87, 0xb1, 0x15, 0xe1, 0x93, 0xab, 0x3c, 0xb8, - 0x4e, 0x8d, 0xd2, 0x70, 0x0c, 0x51, 0xd5, 0x00, 0xc2, 0x3e, 0xa2, 0x1a, 0x4c, 0x70, 0x3f, 0xf3, - 0xf2, 0xc9, 0x54, 0x73, 0x8a, 0x5b, 0xc8, 0xdd, 0x8f, 0x62, 0xaf, 0x1d, 0x3d, 0x04, 0xa0, 0xa4, - 0xe3, 0x10, 0x26, 0x82, 0x41, 0x21, 0x7e, 0x05, 0xdd, 0x0e, 0x28, 0x38, 0xc2, 0xa5, 0xfe, 0x8b, - 0x02, 0x33, 0x3b, 0x84, 0xfd, 0xc0, 0x72, 0x8e, 0x5b, 0x96, 0xa1, 0x77, 0x06, 0x57, 0x10, 0x6c, - 0x71, 0x2c, 0xd8, 0xbe, 0x35, 0x64, 0x66, 0x62, 0xd6, 0xe5, 0x85, 0x5c, 0xf5, 0x33, 0x05, 0x6e, - 0xc6, 0x38, 0x1f, 0x87, 0x4b, 0x77, 0x1f, 0x26, 0x6c, 0xcb, 0x61, 0x7e, 0x22, 0x1e, 0x4b, 0x21, - 0x0f, 0x63, 0x91, 0x54, 0xcc, 0x61, 0xb0, 0x87, 0x86, 0xb6, 0xa0, 0xc0, 0x2c, 0xe9, 0xaa, 0xe3, - 0x61, 0x12, 0xe2, 0x34, 0x41, 0x62, 0x16, 0xf6, 0x2c, 0x5c, 0x60, 0x16, 0x9f, 0x88, 0x6a, 0x8c, - 0x2b, 0x1a, 0x7c, 0x5e, 0x52, 0x0f, 0x30, 0x94, 0x0e, 0x1d, 0xab, 0x7f, 0xe9, 0x3e, 0x04, 0x13, - 0xb1, 0xe1, 0x58, 0x7d, 0x2c, 0xb0, 0xd4, 0x9f, 0x28, 0xb0, 0x10, 0xe3, 0xbc, 0x82, 0xc0, 0xff, - 0x61, 0x3c, 0xf0, 0xdf, 0x1f, 0xa7, 0x23, 0x39, 0xe1, 0xff, 0x27, 0x85, 0x44, 0x37, 0x78, 0x87, - 0xd1, 0x21, 0x4c, 0xdb, 0x56, 0xb7, 0xfd, 0x02, 0xae, 0x03, 0xe7, 0x78, 0xde, 0x6c, 0x85, 0x58, - 0x38, 0x0a, 0x8c, 0x4e, 0x61, 0xc1, 0xd4, 0xfa, 0x84, 0xda, 0x5a, 0x87, 0xb4, 0x5f, 0xc0, 0x01, - 0xc9, 0x0d, 0x71, 0xdf, 0x90, 0x44, 0xc4, 0x69, 0x25, 0x68, 0x1b, 0xca, 0xba, 0x2d, 0xea, 0x38, - 0x59, 0xbb, 0x0c, 0xcd, 0xa2, 0x5e, 0xd5, 0xe7, 0xc5, 0x73, 0xf9, 0x81, 0x7d, 0x0c, 0xf5, 0x6f, - 0x93, 0xde, 0xc0, 0xfd, 0x0f, 0x3d, 0x81, 0x8a, 0x78, 0x84, 0xd3, 0xb1, 0x0c, 0xff, 0x66, 0x80, - 0xcf, 0x6c, 0x4b, 0xb6, 0x3d, 0x3f, 0xab, 0xbd, 0x92, 0x71, 0xe8, 0xeb, 0x93, 0x71, 0x20, 0x8c, - 0x76, 0xa0, 0x64, 0x7f, 0x95, 0x0a, 0x46, 0x24, 0x39, 0x51, 0xb6, 0x08, 0x1c, 0xf5, 0xf7, 0x8a, - 0x09, 0x73, 0x45, 0xaa, 0xfb, 0xf4, 0x85, 0xcd, 0x7a, 0x50, 0x31, 0xe5, 0xce, 0xfc, 0x01, 0x94, - 0x65, 0x86, 0x97, 0xce, 0xfc, 0x8d, 0x71, 0x9c, 0x39, 0x9a, 0xc5, 0x82, 0x0d, 0x8b, 0xdf, 0xe8, - 0x03, 0xa3, 0xef, 0xc1, 0x24, 0xf1, 0x54, 0x78, 0xb9, 0xf1, 0xbd, 0x71, 0x54, 0x84, 0x71, 0x35, - 0x2c, 0x54, 0x65, 0x9b, 0x44, 0x45, 0xdf, 0xe2, 0xe3, 0xc5, 0x79, 0xf9, 0x26, 0x90, 0x56, 0x4b, - 0x22, 0x5d, 0xdd, 0xf6, 0xba, 0x1d, 0x34, 0x3f, 0x3f, 0xab, 0x41, 0xf8, 0x89, 0xa3, 0x12, 0xea, - 0xbf, 0x29, 0xb0, 0x20, 0x46, 0xa8, 0xe3, 0x3a, 0x3a, 0x1b, 0x5c, 0x59, 0x62, 0x7a, 0x16, 0x4b, - 0x4c, 0xef, 0x0c, 0x19, 0x96, 0x94, 0x85, 0xb9, 0xc9, 0xe9, 0xa7, 0x0a, 0xdc, 0x48, 0x71, 0x5f, - 0x41, 0x5c, 0xdc, 0x8f, 0xc7, 0xc5, 0xb7, 0xc6, 0xed, 0x50, 0x4e, 0x6c, 0xfc, 0xab, 0xb9, 0x8c, - 0xee, 0x88, 0x95, 0xf2, 0x10, 0xc0, 0x76, 0xf4, 0x13, 0xdd, 0x20, 0x3d, 0x79, 0x09, 0x5e, 0x89, - 0x3c, 0x82, 0x0b, 0x28, 0x38, 0xc2, 0x85, 0x28, 0x2c, 0x77, 0xc9, 0xa1, 0xe6, 0x1a, 0x6c, 0xb5, - 0xdb, 0x5d, 0xd3, 0x6c, 0xed, 0x40, 0x37, 0x74, 0xa6, 0xcb, 0xe3, 0x82, 0xa9, 0xe6, 0x23, 0xef, - 0x72, 0x3a, 0x8b, 0xe3, 0xf9, 0x59, 0xed, 0x76, 0xd6, 0xed, 0x90, 0xcf, 0x32, 0xc0, 0x39, 0xd0, - 0x68, 0x00, 0x55, 0x87, 0x7c, 0xdf, 0xd5, 0x1d, 0xd2, 0x5d, 0x77, 0x2c, 0x3b, 0xa6, 0xb6, 0x28, - 0xd4, 0xfe, 0xea, 0xf9, 0x59, 0xad, 0x8a, 0x73, 0x78, 0x86, 0x2b, 0xce, 0x85, 0x47, 0x9f, 0xc2, - 0xa2, 0xe6, 0xbd, 0x1d, 0x8c, 0x69, 0xf5, 0x56, 0xc9, 0xfb, 0xe7, 0x67, 0xb5, 0xc5, 0xd5, 0x34, - 0x79, 0xb8, 0xc2, 0x2c, 0x50, 0xd4, 0x80, 0xf2, 0x89, 0x78, 0xd9, 0x48, 0xab, 0x13, 0x02, 0x9f, - 0x27, 0x82, 0xb2, 0xf7, 0xd8, 0x91, 0x63, 0x4e, 0x6e, 0xb4, 0xc5, 0xea, 0xf3, 0xb9, 0xf8, 0x86, - 0x92, 0xd7, 0x92, 0x72, 0xc5, 0x8b, 0x13, 0xe3, 0x4a, 0x18, 0xb5, 0x9e, 0x86, 0x24, 0x1c, 0xe5, - 0x43, 0x1f, 0xc3, 0xd4, 0x91, 0x3c, 0x95, 0xa0, 0xd5, 0xf2, 0x48, 0x49, 0x38, 0x76, 0x8a, 0xd1, - 0x5c, 0x90, 0x2a, 0xa6, 0xfc, 0x66, 0x8a, 0x43, 0x44, 0xf4, 0x3a, 0x94, 0xc5, 0xc7, 0xe6, 0xba, - 0x38, 0x8e, 0xab, 0x84, 0xb1, 0xed, 0xa9, 0xd7, 0x8c, 0x7d, 0xba, 0xcf, 0xba, 0xd9, 0x5a, 0x13, - 0xc7, 0xc2, 0x09, 0xd6, 0xcd, 0xd6, 0x1a, 0xf6, 0xe9, 0xe8, 0x13, 0x28, 0x53, 0xb2, 0xa5, 0x9b, - 0xee, 0x69, 0x15, 0x46, 0xba, 0x54, 0x6e, 0x3f, 0x16, 0xdc, 0x89, 0x83, 0xb1, 0x50, 0x83, 0xa4, - 0x63, 0x1f, 0x16, 0x1d, 0xc1, 0x94, 0xe3, 0x9a, 0xab, 0x74, 0x9f, 0x12, 0xa7, 0x3a, 0x2d, 0x74, - 0x0c, 0x0b, 0xe7, 0xd8, 0xe7, 0x4f, 0x6a, 0x09, 0x46, 0x28, 0xe0, 0xc0, 0x21, 0x38, 0xfa, 0x43, - 0x05, 0x10, 0x75, 0x6d, 0xdb, 0x20, 0x7d, 0x62, 0x32, 0xcd, 0x10, 0x67, 0x71, 0xb4, 0x7a, 0x5d, - 0xe8, 0xfc, 0xf6, 0xb0, 0x7e, 0xa5, 0x04, 0x93, 0xca, 0x83, 0x43, 0xef, 0x34, 0x2b, 0xce, 0xd0, - 0xcb, 0x87, 0xf6, 0x90, 0x8a, 0xdf, 0xd5, 0x99, 0x91, 0x86, 0x36, 0xfb, 0xcc, 0x31, 0x1c, 0x5a, - 0x49, 0xc7, 0x3e, 0x2c, 0x7a, 0x06, 0xcb, 0xfe, 0xc3, 0x58, 0x6c, 0x59, 0x6c, 0x43, 0x37, 0x08, - 0x1d, 0x50, 0x46, 0xfa, 0xd5, 0x59, 0x31, 0xed, 0xc1, 0xdb, 0x0f, 0x9c, 0xc9, 0x85, 0x73, 0xa4, - 0x51, 0x1f, 0x6a, 0x7e, 0xc8, 0xe0, 0xeb, 0x29, 0x88, 0x59, 0x8f, 0x69, 0x47, 0x33, 0xbc, 0x7b, - 0x80, 0x39, 0xa1, 0xe0, 0xb5, 0xf3, 0xb3, 0x5a, 0x6d, 0xfd, 0x62, 0x56, 0x3c, 0x0c, 0x0b, 0x7d, - 0x17, 0xaa, 0x5a, 0x9e, 0x9e, 0x79, 0xa1, 0xe7, 0x55, 0x1e, 0x87, 0x72, 0x15, 0xe4, 0x4a, 0x23, - 0x06, 0xf3, 0x5a, 0xfc, 0x89, 0x32, 0xad, 0x2e, 0x8c, 0x74, 0x10, 0x99, 0x78, 0xd9, 0x1c, 0x1e, - 0x46, 0x24, 0x08, 0x14, 0xa7, 0x34, 0xa0, 0xdf, 0x06, 0xa4, 0x25, 0x5f, 0x55, 0xd3, 0x2a, 0x1a, - 0x29, 0xfd, 0xa4, 0x9e, 0x63, 0x87, 0x6e, 0x97, 0x22, 0x51, 0x9c, 0xa1, 0x07, 0x6d, 0xc1, 0x92, - 0x6c, 0xdd, 0x37, 0xa9, 0x76, 0x48, 0xda, 0x03, 0xda, 0x61, 0x06, 0xad, 0x2e, 0x8a, 0xd8, 0x27, - 0x2e, 0xbe, 0x56, 0x33, 0xe8, 0x38, 0x53, 0x0a, 0x7d, 0x1b, 0xe6, 0x0f, 0x2d, 0xe7, 0x40, 0xef, - 0x76, 0x89, 0xe9, 0x23, 0x2d, 0x09, 0xa4, 0x25, 0x3e, 0x1a, 0x1b, 0x09, 0x1a, 0x4e, 0x71, 0x23, - 0x0a, 0x37, 0x24, 0x72, 0xcb, 0xb1, 0x3a, 0xdb, 0x96, 0x6b, 0x32, 0xaf, 0x24, 0xba, 0x11, 0xa4, - 0x98, 0x1b, 0xab, 0x59, 0x0c, 0xcf, 0xcf, 0x6a, 0x77, 0xb2, 0x2b, 0xe0, 0x90, 0x09, 0x67, 0x63, - 0x8b, 0x17, 0x2c, 0xf2, 0x3e, 0xe3, 0x6a, 0x5e, 0x01, 0x8f, 0xf7, 0x82, 0x25, 0x34, 0xed, 0x85, - 0xbd, 0x60, 0x89, 0x40, 0x5e, 0x7c, 0x82, 0xfa, 0x5f, 0x05, 0x58, 0x0c, 0x99, 0x47, 0x7e, 0xc1, - 0x92, 0x21, 0xf2, 0x8b, 0x97, 0xc0, 0xc3, 0x5f, 0x02, 0x7f, 0xa6, 0xc0, 0x6c, 0x38, 0x74, 0xff, - 0xf7, 0x5e, 0x95, 0x84, 0xb6, 0xe5, 0xd4, 0xb9, 0x7f, 0x5f, 0x88, 0x76, 0xe0, 0xff, 0xfd, 0xd3, - 0x86, 0xaf, 0xfe, 0x7c, 0x57, 0xfd, 0x69, 0x11, 0xe6, 0x93, 0xab, 0x31, 0x76, 0x03, 0xae, 0x0c, - 0xbd, 0x01, 0x6f, 0xc1, 0xd2, 0xa1, 0x6b, 0x18, 0x03, 0x31, 0x0c, 0x91, 0x6b, 0x70, 0xef, 0x06, - 0xeb, 0x55, 0x29, 0xb9, 0xb4, 0x91, 0xc1, 0x83, 0x33, 0x25, 0x73, 0x6e, 0xf3, 0x8b, 0x97, 0xba, - 0xcd, 0x4f, 0x5d, 0x2e, 0x97, 0xc6, 0xb8, 0x5c, 0xce, 0xbc, 0x99, 0x9f, 0xb8, 0xc4, 0xcd, 0xfc, - 0x65, 0xae, 0xd2, 0x33, 0x82, 0xd8, 0xd0, 0x97, 0x9d, 0xaf, 0xc2, 0x2d, 0x29, 0xc6, 0xc4, 0x2d, - 0xb7, 0xc9, 0x1c, 0xcb, 0x30, 0x88, 0xb3, 0xee, 0xf6, 0xfb, 0x03, 0xf5, 0x9b, 0x30, 0x1b, 0x7f, - 0xbf, 0xe1, 0xcd, 0xb4, 0xf7, 0x84, 0x44, 0xde, 0x23, 0x46, 0x66, 0xda, 0x6b, 0xc7, 0x01, 0x87, - 0xfa, 0xfb, 0x0a, 0x2c, 0x67, 0xbf, 0xd3, 0x44, 0x06, 0xcc, 0xf6, 0xb5, 0xd3, 0xe8, 0xdb, 0x59, - 0xe5, 0x92, 0x27, 0x3c, 0xe2, 0xe2, 0x7e, 0x3b, 0x86, 0x85, 0x13, 0xd8, 0xea, 0x97, 0x0a, 0xdc, - 0xcc, 0xb9, 0x32, 0xbf, 0x5a, 0x4b, 0xd0, 0x47, 0x50, 0xe9, 0x6b, 0xa7, 0x6d, 0xd7, 0xe9, 0x91, - 0x4b, 0x9f, 0x69, 0x89, 0x88, 0xb1, 0x2d, 0x51, 0x70, 0x80, 0xa7, 0xfe, 0x48, 0x81, 0x6a, 0xde, - 0xee, 0x02, 0xbd, 0x1b, 0xbb, 0xdc, 0xff, 0x7a, 0xe2, 0x72, 0x7f, 0x21, 0x25, 0xf7, 0x92, 0xae, - 0xf6, 0xff, 0x4e, 0x81, 0xe5, 0xec, 0x5d, 0x16, 0x7a, 0x3b, 0x66, 0x61, 0x2d, 0x61, 0xe1, 0x5c, - 0x42, 0x4a, 0xda, 0xf7, 0x3d, 0x98, 0x95, 0x7b, 0x31, 0x09, 0x23, 0x47, 0x55, 0xcd, 0x8a, 0x95, - 0x12, 0xc2, 0xdf, 0x7b, 0x88, 0xf9, 0x8a, 0xb7, 0xe1, 0x04, 0x9a, 0xfa, 0x07, 0x05, 0x98, 0x68, - 0x77, 0x34, 0x83, 0x5c, 0x41, 0x99, 0xf5, 0x9d, 0x58, 0x99, 0x35, 0xec, 0x7f, 0x2e, 0xc2, 0xaa, - 0xdc, 0x0a, 0x0b, 0x27, 0x2a, 0xac, 0x37, 0x46, 0x42, 0xbb, 0xb8, 0xb8, 0xfa, 0x15, 0x98, 0x0a, - 0x94, 0x8e, 0x17, 0xf3, 0xd5, 0xbf, 0x2e, 0xc0, 0x74, 0x44, 0xc5, 0x98, 0x19, 0xe3, 0x30, 0x96, - 0x69, 0x47, 0xf9, 0x77, 0x61, 0x44, 0x57, 0xdd, 0xcf, 0xad, 0xde, 0x3b, 0xcd, 0xf0, 0x65, 0x5e, - 0x3a, 0xe5, 0x7e, 0x13, 0x66, 0x99, 0xf8, 0xf7, 0x5d, 0x70, 0x12, 0x5c, 0x14, 0xbe, 0x18, 0xbc, - 0xee, 0xdd, 0x8b, 0x51, 0x71, 0x82, 0xfb, 0xd6, 0x23, 0x98, 0x89, 0x29, 0x1b, 0xeb, 0x99, 0xe5, - 0x3f, 0x2a, 0xf0, 0xf5, 0xa1, 0xfb, 0x74, 0xd4, 0x8c, 0x2d, 0x92, 0x7a, 0x62, 0x91, 0xac, 0xe4, - 0x03, 0xbc, 0xbc, 0xe7, 0x3a, 0xcd, 0x37, 0x3f, 0xff, 0x62, 0xe5, 0xda, 0xcf, 0xbe, 0x58, 0xb9, - 0xf6, 0xf3, 0x2f, 0x56, 0xae, 0xfd, 0xee, 0xf9, 0x8a, 0xf2, 0xf9, 0xf9, 0x8a, 0xf2, 0xb3, 0xf3, - 0x15, 0xe5, 0xe7, 0xe7, 0x2b, 0xca, 0x7f, 0x9c, 0xaf, 0x28, 0x7f, 0xf2, 0xe5, 0xca, 0xb5, 0x8f, - 0xca, 0x12, 0xee, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xa7, 0xd7, 0xb5, 0x56, 0x5c, 0x3d, 0x00, - 0x00, + // 3587 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x1c, 0x47, + 0x76, 0x57, 0xcf, 0x0c, 0x39, 0xc3, 0x47, 0xf1, 0xab, 0x48, 0x91, 0x63, 0xc9, 0xe2, 0xc8, 0x6d, + 0x40, 0x91, 0x1d, 0x69, 0xc6, 0x92, 0x2d, 0x59, 0xb1, 0x10, 0xdb, 0x1c, 0x52, 0x94, 0xe8, 0xf0, + 0x63, 0x5c, 0x43, 0x2a, 0x86, 0x11, 0x3b, 0x6e, 0xce, 0x14, 0x87, 0x2d, 0xf6, 0x74, 0xb7, 0xbb, + 0x6b, 0x68, 0x0e, 0x90, 0x43, 0x0e, 0x49, 0x80, 0x00, 0x09, 0x92, 0x8b, 0x93, 0x1c, 0x63, 0x04, + 0xc8, 0x69, 0x17, 0xbb, 0xb7, 0xdd, 0x83, 0x61, 0x60, 0x01, 0x2f, 0x20, 0x2c, 0xbc, 0x80, 0x6f, + 0xeb, 0x13, 0xb1, 0xa6, 0x4f, 0x8b, 0xfd, 0x07, 0x16, 0x3a, 0x2c, 0x16, 0x55, 0x5d, 0xfd, 0xdd, + 0xad, 0x69, 0xd2, 0x12, 0xb1, 0x58, 0xec, 0x8d, 0x53, 0xef, 0xbd, 0xdf, 0x7b, 0x55, 0xf5, 0xea, + 0xbd, 0xd7, 0x55, 0x8f, 0xb0, 0xbc, 0x77, 0xdb, 0xae, 0xaa, 0x46, 0x6d, 0xaf, 0xb7, 0x4d, 0x2c, + 0x9d, 0x50, 0x62, 0xd7, 0xf6, 0x89, 0xde, 0x36, 0xac, 0x9a, 0x20, 0x28, 0xa6, 0x5a, 0x23, 0x07, + 0x94, 0xe8, 0xb6, 0x6a, 0xe8, 0x76, 0x6d, 0xff, 0xfa, 0x36, 0xa1, 0xca, 0xf5, 0x5a, 0x87, 0xe8, + 0xc4, 0x52, 0x28, 0x69, 0x57, 0x4d, 0xcb, 0xa0, 0x06, 0xba, 0xe8, 0xb0, 0x57, 0x15, 0x53, 0xad, + 0xfa, 0xec, 0x55, 0xc1, 0x7e, 0xfe, 0x5a, 0x47, 0xa5, 0xbb, 0xbd, 0xed, 0x6a, 0xcb, 0xe8, 0xd6, + 0x3a, 0x46, 0xc7, 0xa8, 0x71, 0xa9, 0xed, 0xde, 0x0e, 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x1c, 0xb4, + 0xf3, 0x72, 0x40, 0x79, 0xcb, 0xb0, 0x48, 0x6d, 0x3f, 0xa6, 0xf1, 0xfc, 0x6b, 0x3e, 0x4f, 0x57, + 0x69, 0xed, 0xaa, 0x3a, 0xb1, 0xfa, 0x35, 0x73, 0xaf, 0xc3, 0x06, 0xec, 0x5a, 0x97, 0x50, 0x25, + 0x49, 0xaa, 0x96, 0x26, 0x65, 0xf5, 0x74, 0xaa, 0x76, 0x49, 0x4c, 0xe0, 0xd6, 0x20, 0x01, 0xbb, + 0xb5, 0x4b, 0xba, 0x4a, 0x4c, 0xee, 0xd5, 0x34, 0xb9, 0x1e, 0x55, 0xb5, 0x9a, 0xaa, 0x53, 0x9b, + 0x5a, 0x51, 0x21, 0xf9, 0x0e, 0x4c, 0x2d, 0x68, 0x9a, 0xf1, 0x09, 0x69, 0x2f, 0x6b, 0xe4, 0xe0, + 0x81, 0xa1, 0xf5, 0xba, 0x04, 0x5d, 0x86, 0xe1, 0xb6, 0xa5, 0xee, 0x13, 0xab, 0x2c, 0x5d, 0x92, + 0xae, 0x8c, 0xd4, 0xc7, 0x1f, 0x1d, 0x56, 0xce, 0x1c, 0x1d, 0x56, 0x86, 0x97, 0xf8, 0x28, 0x16, + 0x54, 0xd9, 0x86, 0x09, 0x21, 0x7c, 0xdf, 0xb0, 0x69, 0x43, 0xa1, 0xbb, 0xe8, 0x06, 0x80, 0xa9, + 0xd0, 0xdd, 0x86, 0x45, 0x76, 0xd4, 0x03, 0x21, 0x8e, 0x84, 0x38, 0x34, 0x3c, 0x0a, 0x0e, 0x70, + 0xa1, 0xab, 0x50, 0xb2, 0x88, 0xd2, 0xde, 0xd0, 0xb5, 0x7e, 0x39, 0x77, 0x49, 0xba, 0x52, 0xaa, + 0x4f, 0x0a, 0x89, 0x12, 0x16, 0xe3, 0xd8, 0xe3, 0x90, 0x3f, 0xcd, 0xc1, 0xc8, 0x92, 0x42, 0xba, + 0x86, 0xde, 0x24, 0x14, 0x7d, 0x04, 0x25, 0xb6, 0xf0, 0x6d, 0x85, 0x2a, 0x5c, 0xdb, 0xe8, 0x8d, + 0x57, 0xaa, 0xbe, 0x63, 0x78, 0xeb, 0x50, 0x35, 0xf7, 0x3a, 0x6c, 0xc0, 0xae, 0x32, 0xee, 0xea, + 0xfe, 0xf5, 0xea, 0xc6, 0xf6, 0x43, 0xd2, 0xa2, 0x6b, 0x84, 0x2a, 0xbe, 0x7d, 0xfe, 0x18, 0xf6, + 0x50, 0xd1, 0x3a, 0x14, 0x6c, 0x93, 0xb4, 0xb8, 0x65, 0xa3, 0x37, 0xae, 0x56, 0x9f, 0xe8, 0x76, + 0x55, 0xcf, 0xb2, 0xa6, 0x49, 0x5a, 0xf5, 0xb3, 0x02, 0xb9, 0xc0, 0x7e, 0x61, 0x8e, 0x83, 0x1e, + 0xc0, 0xb0, 0x4d, 0x15, 0xda, 0xb3, 0xcb, 0x79, 0x8e, 0x58, 0xcd, 0x8c, 0xc8, 0xa5, 0xfc, 0xcd, + 0x70, 0x7e, 0x63, 0x81, 0x26, 0xff, 0x26, 0x07, 0xc8, 0xe3, 0x5d, 0x34, 0xf4, 0xb6, 0x4a, 0x55, + 0x43, 0x47, 0x6f, 0x40, 0x81, 0xf6, 0x4d, 0x22, 0xb6, 0xe2, 0xb2, 0x6b, 0xd0, 0x66, 0xdf, 0x24, + 0x8f, 0x0f, 0x2b, 0xb3, 0x71, 0x09, 0x46, 0xc1, 0x5c, 0x06, 0xad, 0x7a, 0xa6, 0xe6, 0xb8, 0xf4, + 0x6b, 0x61, 0xd5, 0x8f, 0x0f, 0x2b, 0x09, 0xc7, 0xa6, 0xea, 0x21, 0x85, 0x0d, 0x44, 0xfb, 0x80, + 0x34, 0xc5, 0xa6, 0x9b, 0x96, 0xa2, 0xdb, 0x8e, 0x26, 0xb5, 0x4b, 0xc4, 0x22, 0xbc, 0x9c, 0x6d, + 0xd3, 0x98, 0x44, 0xfd, 0xbc, 0xb0, 0x02, 0xad, 0xc6, 0xd0, 0x70, 0x82, 0x06, 0xe6, 0xcd, 0x16, + 0x51, 0x6c, 0x43, 0x2f, 0x17, 0xc2, 0xde, 0x8c, 0xf9, 0x28, 0x16, 0x54, 0xf4, 0x12, 0x14, 0xbb, + 0xc4, 0xb6, 0x95, 0x0e, 0x29, 0x0f, 0x71, 0xc6, 0x09, 0xc1, 0x58, 0x5c, 0x73, 0x86, 0xb1, 0x4b, + 0x97, 0x3f, 0x97, 0x60, 0xcc, 0x5b, 0xb9, 0x55, 0xd5, 0xa6, 0xe8, 0xef, 0x62, 0x7e, 0x58, 0xcd, + 0x36, 0x25, 0x26, 0xcd, 0xbd, 0xd0, 0xf3, 0x79, 0x77, 0x24, 0xe0, 0x83, 0x6b, 0x30, 0xa4, 0x52, + 0xd2, 0x65, 0xfb, 0x90, 0xbf, 0x32, 0x7a, 0xe3, 0x4a, 0x56, 0x97, 0xa9, 0x8f, 0x09, 0xd0, 0xa1, + 0x15, 0x26, 0x8e, 0x1d, 0x14, 0xf9, 0xbf, 0x0a, 0x01, 0xf3, 0x99, 0x6b, 0xa2, 0x0f, 0xa0, 0x64, + 0x13, 0x8d, 0xb4, 0xa8, 0x61, 0x09, 0xf3, 0x5f, 0xcd, 0x68, 0xbe, 0xb2, 0x4d, 0xb4, 0xa6, 0x10, + 0xad, 0x9f, 0x65, 0xf6, 0xbb, 0xbf, 0xb0, 0x07, 0x89, 0xde, 0x85, 0x12, 0x25, 0x5d, 0x53, 0x53, + 0x28, 0x11, 0xe7, 0xe8, 0xc5, 0xe0, 0x14, 0x98, 0xe7, 0x30, 0xb0, 0x86, 0xd1, 0xde, 0x14, 0x6c, + 0xfc, 0xf8, 0x78, 0x4b, 0xe2, 0x8e, 0x62, 0x0f, 0x06, 0xed, 0xc3, 0x78, 0xcf, 0x6c, 0x33, 0x4e, + 0xca, 0xe2, 0x59, 0xa7, 0x2f, 0x3c, 0xe9, 0x56, 0xd6, 0xb5, 0xd9, 0x0a, 0x49, 0xd7, 0x67, 0x85, + 0xae, 0xf1, 0xf0, 0x38, 0x8e, 0x68, 0x41, 0x0b, 0x30, 0xd1, 0x55, 0x75, 0x16, 0x97, 0xfa, 0x4d, + 0xd2, 0x32, 0xf4, 0xb6, 0xcd, 0xdd, 0x6a, 0xa8, 0x3e, 0x27, 0x00, 0x26, 0xd6, 0xc2, 0x64, 0x1c, + 0xe5, 0x47, 0xef, 0x00, 0x72, 0xa7, 0x71, 0xcf, 0x09, 0xc7, 0xaa, 0xa1, 0x73, 0x9f, 0xcb, 0xfb, + 0xce, 0xbd, 0x19, 0xe3, 0xc0, 0x09, 0x52, 0x68, 0x15, 0x66, 0x2c, 0xb2, 0xaf, 0xb2, 0x39, 0xde, + 0x57, 0x6d, 0x6a, 0x58, 0xfd, 0x55, 0xb5, 0xab, 0xd2, 0xf2, 0x30, 0xb7, 0xa9, 0x7c, 0x74, 0x58, + 0x99, 0xc1, 0x09, 0x74, 0x9c, 0x28, 0x25, 0xff, 0xf7, 0x30, 0x4c, 0x44, 0xe2, 0x0d, 0x7a, 0x00, + 0xb3, 0xad, 0x9e, 0x65, 0x11, 0x9d, 0xae, 0xf7, 0xba, 0xdb, 0xc4, 0x6a, 0xb6, 0x76, 0x49, 0xbb, + 0xa7, 0x91, 0x36, 0x77, 0x94, 0xa1, 0xfa, 0xbc, 0xb0, 0x78, 0x76, 0x31, 0x91, 0x0b, 0xa7, 0x48, + 0xb3, 0x55, 0xd0, 0xf9, 0xd0, 0x9a, 0x6a, 0xdb, 0x1e, 0x66, 0x8e, 0x63, 0x7a, 0xab, 0xb0, 0x1e, + 0xe3, 0xc0, 0x09, 0x52, 0xcc, 0xc6, 0x36, 0xb1, 0x55, 0x8b, 0xb4, 0xa3, 0x36, 0xe6, 0xc3, 0x36, + 0x2e, 0x25, 0x72, 0xe1, 0x14, 0x69, 0x74, 0x13, 0x46, 0x1d, 0x6d, 0x7c, 0xff, 0xc4, 0x46, 0x4f, + 0x0b, 0xb0, 0xd1, 0x75, 0x9f, 0x84, 0x83, 0x7c, 0x6c, 0x6a, 0xc6, 0xb6, 0x4d, 0xac, 0x7d, 0xd2, + 0x4e, 0xdf, 0xe0, 0x8d, 0x18, 0x07, 0x4e, 0x90, 0x62, 0x53, 0x73, 0x3c, 0x30, 0x36, 0xb5, 0xe1, + 0xf0, 0xd4, 0xb6, 0x12, 0xb9, 0x70, 0x8a, 0x34, 0xf3, 0x63, 0xc7, 0xe4, 0x85, 0x7d, 0x45, 0xd5, + 0x94, 0x6d, 0x8d, 0x94, 0x8b, 0x61, 0x3f, 0x5e, 0x0f, 0x93, 0x71, 0x94, 0x1f, 0xdd, 0x83, 0x29, + 0x67, 0x68, 0x4b, 0x57, 0x3c, 0x90, 0x12, 0x07, 0x79, 0x4e, 0x80, 0x4c, 0xad, 0x47, 0x19, 0x70, + 0x5c, 0x06, 0xbd, 0x01, 0xe3, 0x2d, 0x43, 0xd3, 0xb8, 0x3f, 0x2e, 0x1a, 0x3d, 0x9d, 0x96, 0x47, + 0x38, 0x0a, 0x62, 0xe7, 0x71, 0x31, 0x44, 0xc1, 0x11, 0x4e, 0x44, 0x00, 0x5a, 0x6e, 0xc2, 0xb1, + 0xcb, 0xc0, 0xe3, 0xe3, 0xf5, 0xac, 0x31, 0xc0, 0x4b, 0x55, 0x7e, 0x0d, 0xe0, 0x0d, 0xd9, 0x38, + 0x00, 0x2c, 0xff, 0x42, 0x82, 0xb9, 0x94, 0xd0, 0x81, 0xde, 0x0a, 0xa5, 0xd8, 0xbf, 0x8c, 0xa4, + 0xd8, 0x0b, 0x29, 0x62, 0x81, 0x3c, 0xab, 0xc3, 0x98, 0xc5, 0x66, 0xa5, 0x77, 0x1c, 0x16, 0x11, + 0x23, 0x6f, 0x0e, 0x98, 0x06, 0x0e, 0xca, 0xf8, 0x31, 0x7f, 0xea, 0xe8, 0xb0, 0x32, 0x16, 0xa2, + 0xe1, 0x30, 0xbc, 0xfc, 0x3f, 0x39, 0x80, 0x25, 0x62, 0x6a, 0x46, 0xbf, 0x4b, 0xf4, 0xd3, 0xa8, + 0xa1, 0x36, 0x42, 0x35, 0xd4, 0xb5, 0x41, 0xdb, 0xe3, 0x99, 0x96, 0x5a, 0x44, 0xfd, 0x6d, 0xa4, + 0x88, 0xaa, 0x65, 0x87, 0x7c, 0x72, 0x15, 0xf5, 0xab, 0x3c, 0x4c, 0xfb, 0xcc, 0x7e, 0x19, 0x75, + 0x27, 0xb4, 0xc7, 0x7f, 0x11, 0xd9, 0xe3, 0xb9, 0x04, 0x91, 0x67, 0x56, 0x47, 0x3d, 0xfd, 0x7a, + 0x06, 0x3d, 0x84, 0x71, 0x56, 0x38, 0x39, 0xee, 0xc1, 0xcb, 0xb2, 0xe1, 0x63, 0x97, 0x65, 0x5e, + 0x02, 0x5d, 0x0d, 0x21, 0xe1, 0x08, 0x72, 0x4a, 0x19, 0x58, 0x7c, 0xd6, 0x65, 0xa0, 0xfc, 0x85, + 0x04, 0xe3, 0xfe, 0x36, 0x9d, 0x42, 0xd1, 0xb6, 0x1e, 0x2e, 0xda, 0x5e, 0xca, 0xec, 0xa2, 0x29, + 0x55, 0xdb, 0xef, 0x58, 0x81, 0xef, 0x31, 0xb1, 0x03, 0xbe, 0xad, 0xb4, 0xf6, 0xd0, 0x25, 0x28, + 0xe8, 0x4a, 0xd7, 0xf5, 0x4c, 0xef, 0xb0, 0xac, 0x2b, 0x5d, 0x82, 0x39, 0x05, 0x7d, 0x2a, 0x01, + 0x12, 0x59, 0x60, 0x41, 0xd7, 0x0d, 0xaa, 0x38, 0xb1, 0xd2, 0x31, 0x6b, 0x25, 0xb3, 0x59, 0xae, + 0xc6, 0xea, 0x56, 0x0c, 0xeb, 0xae, 0x4e, 0xad, 0xbe, 0xbf, 0x23, 0x71, 0x06, 0x9c, 0x60, 0x00, + 0x52, 0x00, 0x2c, 0x81, 0xb9, 0x69, 0x88, 0x83, 0x7c, 0x2d, 0x43, 0xcc, 0x63, 0x02, 0x8b, 0x86, + 0xbe, 0xa3, 0x76, 0xfc, 0xb0, 0x83, 0x3d, 0x20, 0x1c, 0x00, 0x3d, 0x7f, 0x17, 0xe6, 0x52, 0xac, + 0x45, 0x93, 0x90, 0xdf, 0x23, 0x7d, 0x67, 0xd9, 0x30, 0xfb, 0x13, 0xcd, 0xc0, 0xd0, 0xbe, 0xa2, + 0xf5, 0x9c, 0xf0, 0x3b, 0x82, 0x9d, 0x1f, 0x6f, 0xe4, 0x6e, 0x4b, 0xf2, 0xe7, 0x43, 0x41, 0xdf, + 0xe1, 0x15, 0xf3, 0x15, 0xf6, 0xd1, 0x6a, 0x6a, 0x6a, 0x4b, 0xb1, 0x45, 0x21, 0x74, 0xd6, 0xf9, + 0x60, 0x75, 0xc6, 0xb0, 0x47, 0x0d, 0xd5, 0xd6, 0xb9, 0x67, 0x5b, 0x5b, 0xe7, 0x9f, 0x4e, 0x6d, + 0xfd, 0xf7, 0x50, 0xb2, 0xdd, 0xaa, 0xba, 0xc0, 0x21, 0xaf, 0x1f, 0x23, 0xbe, 0x8a, 0x82, 0xda, + 0x53, 0xe0, 0x95, 0xd2, 0x1e, 0x68, 0x52, 0x11, 0x3d, 0x74, 0xcc, 0x22, 0xfa, 0xa9, 0x16, 0xbe, + 0x2c, 0xa6, 0x9a, 0x4a, 0xcf, 0x26, 0x6d, 0x1e, 0x88, 0x4a, 0x7e, 0x4c, 0x6d, 0xf0, 0x51, 0x2c, + 0xa8, 0xe8, 0x83, 0x90, 0xcb, 0x96, 0x4e, 0xe2, 0xb2, 0xe3, 0xe9, 0xee, 0x8a, 0xb6, 0x60, 0xce, + 0xb4, 0x8c, 0x8e, 0x45, 0x6c, 0x7b, 0x89, 0x28, 0x6d, 0x4d, 0xd5, 0x89, 0xbb, 0x3e, 0x4e, 0x45, + 0x74, 0xe1, 0xe8, 0xb0, 0x32, 0xd7, 0x48, 0x66, 0xc1, 0x69, 0xb2, 0xf2, 0xa3, 0x02, 0x4c, 0x46, + 0x33, 0x60, 0x4a, 0x91, 0x2a, 0x9d, 0xa8, 0x48, 0xbd, 0x1a, 0x38, 0x0c, 0x4e, 0x05, 0x1f, 0xb8, + 0xc1, 0x89, 0x1d, 0x88, 0x05, 0x98, 0x10, 0xd1, 0xc0, 0x25, 0x8a, 0x32, 0xdd, 0xdb, 0xfd, 0xad, + 0x30, 0x19, 0x47, 0xf9, 0x59, 0xe9, 0xe9, 0x57, 0x94, 0x2e, 0x48, 0x21, 0x5c, 0x7a, 0x2e, 0x44, + 0x19, 0x70, 0x5c, 0x06, 0xad, 0xc1, 0x74, 0x4f, 0x8f, 0x43, 0x39, 0xde, 0x78, 0x41, 0x40, 0x4d, + 0x6f, 0xc5, 0x59, 0x70, 0x92, 0x1c, 0xda, 0x09, 0x55, 0xa3, 0xc3, 0x3c, 0xc2, 0xde, 0xc8, 0x7c, + 0x76, 0x32, 0x97, 0xa3, 0xe8, 0x0e, 0x8c, 0x59, 0xfc, 0xbb, 0xc3, 0x35, 0xd8, 0xa9, 0xdd, 0xcf, + 0x09, 0xb1, 0x31, 0x1c, 0x24, 0xe2, 0x30, 0x6f, 0x42, 0xb9, 0x5d, 0xca, 0x5a, 0x6e, 0xcb, 0x3f, + 0x93, 0x82, 0x49, 0xc8, 0x2b, 0x81, 0x07, 0xdd, 0x32, 0xc5, 0x24, 0x02, 0xd5, 0x91, 0x91, 0x5c, + 0xfd, 0xde, 0x3a, 0x56, 0xf5, 0xeb, 0x27, 0xcf, 0xc1, 0xe5, 0xef, 0x67, 0x12, 0xcc, 0x2e, 0x37, + 0xef, 0x59, 0x46, 0xcf, 0x74, 0xcd, 0xd9, 0x30, 0x9d, 0x75, 0x7d, 0x1d, 0x0a, 0x56, 0x4f, 0x73, + 0xe7, 0xf1, 0xa2, 0x3b, 0x0f, 0xdc, 0xd3, 0xd8, 0x3c, 0xa6, 0x23, 0x52, 0xce, 0x24, 0x98, 0x00, + 0x5a, 0x87, 0x61, 0x4b, 0xd1, 0x3b, 0xc4, 0x4d, 0xab, 0x97, 0x07, 0x58, 0xbf, 0xb2, 0x84, 0x19, + 0x7b, 0xa0, 0x78, 0xe3, 0xd2, 0x58, 0xa0, 0xc8, 0xff, 0x2e, 0xc1, 0xc4, 0xfd, 0xcd, 0xcd, 0xc6, + 0x8a, 0xce, 0x4f, 0x34, 0xbf, 0x5b, 0xbd, 0x04, 0x05, 0x53, 0xa1, 0xbb, 0xd1, 0x4c, 0xcf, 0x68, + 0x98, 0x53, 0xd0, 0x7b, 0x50, 0x64, 0x91, 0x84, 0xe8, 0xed, 0x8c, 0xa5, 0xb6, 0x80, 0xaf, 0x3b, + 0x42, 0x7e, 0x85, 0x28, 0x06, 0xb0, 0x0b, 0x27, 0xef, 0xc1, 0x4c, 0xc0, 0x1c, 0xb6, 0x1e, 0x0f, + 0x58, 0x76, 0x44, 0x4d, 0x18, 0x62, 0x9a, 0x59, 0x0e, 0xcc, 0x67, 0xb8, 0xcc, 0x8c, 0x4c, 0xc9, + 0xaf, 0x74, 0xd8, 0x2f, 0x1b, 0x3b, 0x58, 0xf2, 0x1a, 0x8c, 0xf1, 0x0b, 0x65, 0xc3, 0xa2, 0x7c, + 0x59, 0xd0, 0x45, 0xc8, 0x77, 0x55, 0x5d, 0xe4, 0xd9, 0x51, 0x21, 0x93, 0x67, 0x39, 0x82, 0x8d, + 0x73, 0xb2, 0x72, 0x20, 0x22, 0x8f, 0x4f, 0x56, 0x0e, 0x30, 0x1b, 0x97, 0xef, 0x41, 0x51, 0x2c, + 0x77, 0x10, 0x28, 0xff, 0x64, 0xa0, 0x7c, 0x02, 0xd0, 0x06, 0x14, 0x57, 0x1a, 0x75, 0xcd, 0x70, + 0xaa, 0xae, 0x96, 0xda, 0xb6, 0xa2, 0x7b, 0xb1, 0xb8, 0xb2, 0x84, 0x31, 0xa7, 0x20, 0x19, 0x86, + 0xc9, 0x41, 0x8b, 0x98, 0x94, 0x7b, 0xc4, 0x48, 0x1d, 0xd8, 0x2e, 0xdf, 0xe5, 0x23, 0x58, 0x50, + 0xe4, 0xff, 0xc8, 0x41, 0x51, 0x2c, 0xc7, 0x29, 0x7c, 0x85, 0xad, 0x86, 0xbe, 0xc2, 0x5e, 0xce, + 0xe6, 0x1a, 0xa9, 0x9f, 0x60, 0x9b, 0x91, 0x4f, 0xb0, 0xab, 0x19, 0xf1, 0x9e, 0xfc, 0xfd, 0xf5, + 0x63, 0x09, 0xc6, 0xc3, 0x4e, 0x89, 0x6e, 0xc2, 0x28, 0x4b, 0x38, 0x6a, 0x8b, 0xac, 0xfb, 0x75, + 0xae, 0x77, 0x09, 0xd3, 0xf4, 0x49, 0x38, 0xc8, 0x87, 0x3a, 0x9e, 0x18, 0xf3, 0x23, 0x31, 0xe9, + 0xf4, 0x25, 0xed, 0x51, 0x55, 0xab, 0x3a, 0x8f, 0x24, 0xd5, 0x15, 0x9d, 0x6e, 0x58, 0x4d, 0x6a, + 0xa9, 0x7a, 0x27, 0xa6, 0x88, 0x3b, 0x65, 0x10, 0x59, 0xfe, 0xa9, 0x04, 0xa3, 0xc2, 0xe4, 0x53, + 0xf8, 0xaa, 0xf8, 0x9b, 0xf0, 0x57, 0xc5, 0xe5, 0x8c, 0x07, 0x3c, 0xf9, 0x93, 0xe2, 0xff, 0x7d, + 0xd3, 0xd9, 0x91, 0x66, 0x5e, 0xbd, 0x6b, 0xd8, 0x34, 0xea, 0xd5, 0xec, 0x30, 0x62, 0x4e, 0x41, + 0x3d, 0x98, 0x54, 0x23, 0x31, 0x40, 0x2c, 0x6d, 0x2d, 0x9b, 0x25, 0x9e, 0x58, 0xbd, 0x2c, 0xe0, + 0x27, 0xa3, 0x14, 0x1c, 0x53, 0x21, 0x13, 0x88, 0x71, 0xa1, 0x77, 0xa1, 0xb0, 0x4b, 0xa9, 0x99, + 0x70, 0x5f, 0x3d, 0x20, 0xf2, 0xf8, 0x26, 0x94, 0xf8, 0xec, 0x36, 0x37, 0x1b, 0x98, 0x43, 0xc9, + 0xbf, 0xf7, 0xd7, 0xa3, 0xe9, 0xf8, 0xb8, 0x17, 0x4f, 0xa5, 0x93, 0xc4, 0xd3, 0xd1, 0xa4, 0x58, + 0x8a, 0xee, 0x43, 0x9e, 0x6a, 0x59, 0x3f, 0x0b, 0x05, 0xe2, 0xe6, 0x6a, 0xd3, 0x0f, 0x48, 0x9b, + 0xab, 0x4d, 0xcc, 0x20, 0xd0, 0x06, 0x0c, 0xb1, 0xec, 0xc3, 0x8e, 0x60, 0x3e, 0xfb, 0x91, 0x66, + 0xf3, 0xf7, 0x1d, 0x82, 0xfd, 0xb2, 0xb1, 0x83, 0x23, 0x7f, 0x0c, 0x63, 0xa1, 0x73, 0x8a, 0x3e, + 0x82, 0xb3, 0x9a, 0xa1, 0xb4, 0xeb, 0x8a, 0xa6, 0xe8, 0x2d, 0xe2, 0x3e, 0x0e, 0x5c, 0x4e, 0xfa, + 0xc2, 0x58, 0x0d, 0xf0, 0x89, 0x53, 0x3e, 0x23, 0x94, 0x9c, 0x0d, 0xd2, 0x70, 0x08, 0x51, 0x56, + 0x00, 0xfc, 0x39, 0xa2, 0x0a, 0x0c, 0x31, 0x3f, 0x73, 0xf2, 0xc9, 0x48, 0x7d, 0x84, 0x59, 0xc8, + 0xdc, 0xcf, 0xc6, 0xce, 0x38, 0xba, 0x01, 0x60, 0x93, 0x96, 0x45, 0x28, 0x0f, 0x06, 0xb9, 0xf0, + 0x03, 0x63, 0xd3, 0xa3, 0xe0, 0x00, 0x97, 0xfc, 0x73, 0x09, 0xc6, 0xd6, 0x09, 0xfd, 0xc4, 0xb0, + 0xf6, 0x1a, 0x86, 0xa6, 0xb6, 0xfa, 0xa7, 0x10, 0x6c, 0x71, 0x28, 0xd8, 0xbe, 0x32, 0x60, 0x67, + 0x42, 0xd6, 0xa5, 0x85, 0x5c, 0xf9, 0x0b, 0x09, 0xe6, 0x42, 0x9c, 0x77, 0xfd, 0xa3, 0xbb, 0x05, + 0x43, 0xa6, 0x61, 0x51, 0x37, 0x11, 0x1f, 0x4b, 0x21, 0x0b, 0x63, 0x81, 0x54, 0xcc, 0x60, 0xb0, + 0x83, 0x86, 0x56, 0x21, 0x47, 0x0d, 0xe1, 0xaa, 0xc7, 0xc3, 0x24, 0xc4, 0xaa, 0x83, 0xc0, 0xcc, + 0x6d, 0x1a, 0x38, 0x47, 0x0d, 0xb6, 0x11, 0xe5, 0x10, 0x57, 0x30, 0xf8, 0x3c, 0xa3, 0x19, 0x60, + 0x28, 0xec, 0x58, 0x46, 0xf7, 0xc4, 0x73, 0xf0, 0x36, 0x62, 0xd9, 0x32, 0xba, 0x98, 0x63, 0xc9, + 0x5f, 0x4a, 0x30, 0x15, 0xe2, 0x3c, 0x85, 0xc0, 0xff, 0x6e, 0x38, 0xf0, 0x5f, 0x3d, 0xce, 0x44, + 0x52, 0xc2, 0xff, 0x97, 0xb9, 0xc8, 0x34, 0xd8, 0x84, 0xd1, 0x0e, 0x8c, 0x9a, 0x46, 0xbb, 0xf9, + 0x14, 0x9e, 0x03, 0x27, 0x58, 0xde, 0x6c, 0xf8, 0x58, 0x38, 0x08, 0x8c, 0x0e, 0x60, 0x4a, 0x57, + 0xba, 0xc4, 0x36, 0x95, 0x16, 0x69, 0x3e, 0x85, 0x0b, 0x92, 0x73, 0xfc, 0xbd, 0x21, 0x8a, 0x88, + 0xe3, 0x4a, 0xd0, 0x1a, 0x14, 0x55, 0x93, 0xd7, 0x71, 0xa2, 0x76, 0x19, 0x98, 0x45, 0x9d, 0xaa, + 0xcf, 0x89, 0xe7, 0xe2, 0x07, 0x76, 0x31, 0xe4, 0x1f, 0x44, 0xbd, 0x81, 0xf9, 0x1f, 0xba, 0x07, + 0x25, 0xde, 0x62, 0xd1, 0x32, 0x34, 0xf7, 0x65, 0x80, 0xed, 0x6c, 0x43, 0x8c, 0x3d, 0x3e, 0xac, + 0x5c, 0x48, 0xb8, 0xf4, 0x75, 0xc9, 0xd8, 0x13, 0x46, 0xeb, 0x50, 0x30, 0xbf, 0x4f, 0x05, 0xc3, + 0x93, 0x1c, 0x2f, 0x5b, 0x38, 0x8e, 0xfc, 0x4f, 0xf9, 0x88, 0xb9, 0x3c, 0xd5, 0x3d, 0x7c, 0x6a, + 0xbb, 0xee, 0x55, 0x4c, 0xa9, 0x3b, 0xbf, 0x0d, 0x45, 0x91, 0xe1, 0x85, 0x33, 0xbf, 0x7e, 0x1c, + 0x67, 0x0e, 0x66, 0x31, 0xef, 0x83, 0xc5, 0x1d, 0x74, 0x81, 0xd1, 0x87, 0x30, 0x4c, 0x1c, 0x15, + 0x4e, 0x6e, 0xbc, 0x75, 0x1c, 0x15, 0x7e, 0x5c, 0xf5, 0x0b, 0x55, 0x31, 0x26, 0x50, 0xd1, 0x5b, + 0x6c, 0xbd, 0x18, 0x2f, 0xfb, 0x08, 0xb4, 0xcb, 0x05, 0x9e, 0xae, 0x2e, 0x3a, 0xd3, 0xf6, 0x86, + 0x1f, 0x1f, 0x56, 0xc0, 0xff, 0x89, 0x83, 0x12, 0xf2, 0x2f, 0x25, 0x98, 0xe2, 0x2b, 0xd4, 0xea, + 0x59, 0x2a, 0xed, 0x9f, 0x5a, 0x62, 0x7a, 0x10, 0x4a, 0x4c, 0xaf, 0x0d, 0x58, 0x96, 0x98, 0x85, + 0xa9, 0xc9, 0xe9, 0x2b, 0x09, 0xce, 0xc5, 0xb8, 0x4f, 0x21, 0x2e, 0x6e, 0x85, 0xe3, 0xe2, 0x2b, + 0xc7, 0x9d, 0x50, 0x4a, 0x6c, 0xfc, 0xe7, 0xc9, 0x84, 0xe9, 0xf0, 0x93, 0x72, 0x03, 0xc0, 0xb4, + 0xd4, 0x7d, 0x55, 0x23, 0x1d, 0xf1, 0x08, 0x5e, 0x0a, 0xb4, 0x38, 0x79, 0x14, 0x1c, 0xe0, 0x42, + 0x36, 0xcc, 0xb6, 0xc9, 0x8e, 0xd2, 0xd3, 0xe8, 0x42, 0xbb, 0xbd, 0xa8, 0x98, 0xca, 0xb6, 0xaa, + 0xa9, 0x54, 0x15, 0xd7, 0x05, 0x23, 0xf5, 0x3b, 0xce, 0xe3, 0x74, 0x12, 0xc7, 0xe3, 0xc3, 0xca, + 0xc5, 0xa4, 0xd7, 0x21, 0x97, 0xa5, 0x8f, 0x53, 0xa0, 0x51, 0x1f, 0xca, 0x16, 0xf9, 0xb8, 0xa7, + 0x5a, 0xa4, 0xbd, 0x64, 0x19, 0x66, 0x48, 0x6d, 0x9e, 0xab, 0xfd, 0xeb, 0xa3, 0xc3, 0x4a, 0x19, + 0xa7, 0xf0, 0x0c, 0x56, 0x9c, 0x0a, 0x8f, 0x1e, 0xc2, 0xb4, 0xe2, 0x74, 0x86, 0x85, 0xb4, 0x3a, + 0xa7, 0xe4, 0xf6, 0xd1, 0x61, 0x65, 0x7a, 0x21, 0x4e, 0x1e, 0xac, 0x30, 0x09, 0x14, 0xd5, 0xa0, + 0xb8, 0xcf, 0xfb, 0xd6, 0xec, 0xf2, 0x10, 0xc7, 0x67, 0x89, 0xa0, 0xe8, 0xb4, 0xb2, 0x31, 0xcc, + 0xe1, 0xe5, 0x26, 0x3f, 0x7d, 0x2e, 0x17, 0xfb, 0xa0, 0x64, 0xb5, 0xa4, 0x38, 0xf1, 0xfc, 0xc6, + 0xb8, 0xe4, 0x47, 0xad, 0xfb, 0x3e, 0x09, 0x07, 0xf9, 0xd0, 0x07, 0x30, 0xb2, 0x2b, 0x6e, 0x25, + 0xec, 0x72, 0x31, 0x53, 0x12, 0x0e, 0xdd, 0x62, 0xd4, 0xa7, 0x84, 0x8a, 0x11, 0x77, 0xd8, 0xc6, + 0x3e, 0x22, 0x7a, 0x09, 0x8a, 0xfc, 0xc7, 0xca, 0x12, 0xbf, 0x8e, 0x2b, 0xf9, 0xb1, 0xed, 0xbe, + 0x33, 0x8c, 0x5d, 0xba, 0xcb, 0xba, 0xd2, 0x58, 0xe4, 0xd7, 0xc2, 0x11, 0xd6, 0x95, 0xc6, 0x22, + 0x76, 0xe9, 0xe8, 0x23, 0x28, 0xda, 0x64, 0x55, 0xd5, 0x7b, 0x07, 0x65, 0xc8, 0xf4, 0xa8, 0xdc, + 0xbc, 0xcb, 0xb9, 0x23, 0x17, 0x63, 0xbe, 0x06, 0x41, 0xc7, 0x2e, 0x2c, 0xda, 0x85, 0x11, 0xab, + 0xa7, 0x2f, 0xd8, 0x5b, 0x36, 0xb1, 0xca, 0xa3, 0x5c, 0xc7, 0xa0, 0x70, 0x8e, 0x5d, 0xfe, 0xa8, + 0x16, 0x6f, 0x85, 0x3c, 0x0e, 0xec, 0x83, 0xa3, 0x7f, 0x93, 0x00, 0xd9, 0x3d, 0xd3, 0xd4, 0x48, + 0x97, 0xe8, 0x54, 0xd1, 0xf8, 0x5d, 0x9c, 0x5d, 0x3e, 0xcb, 0x75, 0xbe, 0x3d, 0x68, 0x5e, 0x31, + 0xc1, 0xa8, 0x72, 0xef, 0xd2, 0x3b, 0xce, 0x8a, 0x13, 0xf4, 0xb2, 0xa5, 0xdd, 0xb1, 0xf9, 0xdf, + 0xe5, 0xb1, 0x4c, 0x4b, 0x9b, 0x7c, 0xe7, 0xe8, 0x2f, 0xad, 0xa0, 0x63, 0x17, 0x16, 0x3d, 0x80, + 0x59, 0xb7, 0xed, 0x11, 0x1b, 0x06, 0x5d, 0x56, 0x35, 0x62, 0xf7, 0x6d, 0x4a, 0xba, 0xe5, 0x71, + 0xbe, 0xed, 0x5e, 0xef, 0x07, 0x4e, 0xe4, 0xc2, 0x29, 0xd2, 0xa8, 0x0b, 0x15, 0x37, 0x64, 0xb0, + 0xf3, 0xe4, 0xc5, 0xac, 0xbb, 0x76, 0x4b, 0xd1, 0x9c, 0x77, 0x80, 0x09, 0xae, 0xe0, 0xc5, 0xa3, + 0xc3, 0x4a, 0x65, 0xe9, 0xc9, 0xac, 0x78, 0x10, 0x16, 0x7a, 0x0f, 0xca, 0x4a, 0x9a, 0x9e, 0x49, + 0xae, 0xe7, 0x79, 0x16, 0x87, 0x52, 0x15, 0xa4, 0x4a, 0x23, 0x0a, 0x93, 0x4a, 0xb8, 0x01, 0xd5, + 0x2e, 0x4f, 0x65, 0xba, 0x88, 0x8c, 0xf4, 0xad, 0xfa, 0x97, 0x11, 0x11, 0x82, 0x8d, 0x63, 0x1a, + 0xd0, 0x3f, 0x00, 0x52, 0xa2, 0x3d, 0xb3, 0x76, 0x19, 0x65, 0x4a, 0x3f, 0xb1, 0x66, 0x5b, 0xdf, + 0xed, 0x62, 0x24, 0x1b, 0x27, 0xe8, 0x41, 0xab, 0x30, 0x23, 0x46, 0xb7, 0x74, 0x5b, 0xd9, 0x21, + 0xcd, 0xbe, 0xdd, 0xa2, 0x9a, 0x5d, 0x9e, 0xe6, 0xb1, 0x8f, 0x3f, 0x7c, 0x2d, 0x24, 0xd0, 0x71, + 0xa2, 0x14, 0x7a, 0x1b, 0x26, 0x77, 0x0c, 0x6b, 0x5b, 0x6d, 0xb7, 0x89, 0xee, 0x22, 0xcd, 0x70, + 0xa4, 0x19, 0xb6, 0x1a, 0xcb, 0x11, 0x1a, 0x8e, 0x71, 0x23, 0x1b, 0xce, 0x09, 0xe4, 0x86, 0x65, + 0xb4, 0xd6, 0x8c, 0x9e, 0x4e, 0x9d, 0x92, 0xe8, 0x9c, 0x97, 0x62, 0xce, 0x2d, 0x24, 0x31, 0x3c, + 0x3e, 0xac, 0x5c, 0x4a, 0xae, 0x80, 0x7d, 0x26, 0x9c, 0x8c, 0x8d, 0x76, 0x01, 0x78, 0x5c, 0x70, + 0x8e, 0xdf, 0x2c, 0x3f, 0x7e, 0xb7, 0xb3, 0x44, 0x9d, 0xc4, 0x13, 0xe8, 0x3c, 0xc9, 0x79, 0x64, + 0x1c, 0xc0, 0xe6, 0xbd, 0x32, 0xe2, 0xe5, 0xe4, 0x74, 0xfa, 0x8d, 0x8f, 0xd7, 0x2b, 0xe3, 0x9b, + 0xf6, 0xd4, 0x7a, 0x65, 0x02, 0x90, 0x4f, 0xbe, 0xab, 0xfd, 0x6d, 0x0e, 0xa6, 0x7d, 0xe6, 0xcc, + 0xbd, 0x32, 0x09, 0x22, 0x7f, 0xee, 0x39, 0x1e, 0xdc, 0x73, 0xfc, 0x85, 0x04, 0xe3, 0xfe, 0xd2, + 0xfd, 0xf1, 0xf5, 0xaf, 0xf8, 0xb6, 0xa5, 0x54, 0xd4, 0x3f, 0xca, 0x05, 0x27, 0xf0, 0x27, 0xdf, + 0x44, 0xf1, 0xfd, 0x1b, 0x85, 0xe5, 0xaf, 0xf2, 0x30, 0x19, 0x3d, 0x8d, 0xa1, 0xb7, 0x76, 0x69, + 0xe0, 0x5b, 0x7b, 0x03, 0x66, 0x76, 0x7a, 0x9a, 0xd6, 0xe7, 0xcb, 0x10, 0x78, 0x70, 0x77, 0xde, + 0xca, 0x9e, 0x17, 0x92, 0x33, 0xcb, 0x09, 0x3c, 0x38, 0x51, 0x32, 0xa5, 0x6f, 0x20, 0x7f, 0xa2, + 0xbe, 0x81, 0xd8, 0x33, 0x76, 0xe1, 0x18, 0xcf, 0xd8, 0x89, 0x3d, 0x00, 0x43, 0x27, 0xe8, 0x01, + 0x38, 0xc9, 0xa3, 0x7d, 0x42, 0x10, 0x1b, 0xd8, 0x43, 0xfa, 0x3c, 0x9c, 0x17, 0x62, 0x94, 0xbf, + 0xa7, 0xeb, 0xd4, 0x32, 0x34, 0x8d, 0x58, 0x4b, 0xbd, 0x6e, 0xb7, 0x2f, 0xbf, 0x09, 0xe3, 0xe1, + 0x4e, 0x11, 0x67, 0xa7, 0x9d, 0x66, 0x15, 0xf1, 0x62, 0x19, 0xd8, 0x69, 0x67, 0x1c, 0x7b, 0x1c, + 0xf2, 0xbf, 0x48, 0x30, 0x9b, 0xdc, 0x11, 0x8a, 0x34, 0x18, 0xef, 0x2a, 0x07, 0xc1, 0x2e, 0x5d, + 0xe9, 0x84, 0x77, 0x49, 0xbc, 0x45, 0x60, 0x2d, 0x84, 0x85, 0x23, 0xd8, 0xf2, 0x77, 0x12, 0xcc, + 0xa5, 0x3c, 0xce, 0x9f, 0xae, 0x25, 0xe8, 0x7d, 0x28, 0x75, 0x95, 0x83, 0x66, 0xcf, 0xea, 0x90, + 0x13, 0xdf, 0x9e, 0xf1, 0x88, 0xb1, 0x26, 0x50, 0xb0, 0x87, 0x27, 0xff, 0x9f, 0x04, 0xcf, 0xa5, + 0x56, 0x14, 0xe8, 0x56, 0xa8, 0x8f, 0x40, 0x8e, 0xf4, 0x11, 0xa0, 0xb8, 0xe0, 0x33, 0x6a, 0x23, + 0xf8, 0x4c, 0x82, 0x72, 0xda, 0xd7, 0x16, 0xba, 0x19, 0x32, 0xf2, 0x85, 0x88, 0x91, 0x53, 0x31, + 0xb9, 0x67, 0x64, 0xe3, 0x0f, 0x25, 0x98, 0x4d, 0xfe, 0xea, 0x44, 0xaf, 0x86, 0x2c, 0xac, 0x44, + 0x2c, 0x9c, 0x88, 0x48, 0x09, 0xfb, 0x3e, 0x84, 0x71, 0xf1, 0x6d, 0x2a, 0x60, 0xc4, 0xde, 0xcb, + 0x49, 0x11, 0x5d, 0x40, 0xb8, 0x95, 0x20, 0xf7, 0xaa, 0xf0, 0x18, 0x8e, 0xa0, 0xc9, 0xff, 0x9a, + 0x83, 0xa1, 0x66, 0x4b, 0xd1, 0xc8, 0x29, 0x14, 0x83, 0xef, 0x84, 0x8a, 0xc1, 0x41, 0xff, 0xf7, + 0xc3, 0xad, 0x4a, 0xad, 0x03, 0x71, 0xa4, 0x0e, 0x7c, 0x39, 0x13, 0xda, 0x93, 0x4b, 0xc0, 0xbf, + 0x82, 0x11, 0x4f, 0xe9, 0xf1, 0x32, 0x93, 0xfc, 0xbf, 0x39, 0x18, 0x0d, 0xa8, 0x38, 0x66, 0x5e, + 0xdb, 0x09, 0xd5, 0x03, 0xf9, 0x0c, 0xe5, 0x7f, 0x40, 0x57, 0xd5, 0xad, 0x00, 0x9c, 0xbe, 0x55, + 0xbf, 0x53, 0x31, 0x5e, 0x18, 0xbc, 0x09, 0xe3, 0x54, 0xb1, 0x3a, 0x84, 0x7a, 0x37, 0xe3, 0x79, + 0xee, 0x8b, 0x5e, 0xb7, 0xf3, 0x66, 0x88, 0x8a, 0x23, 0xdc, 0xe7, 0xef, 0xc0, 0x58, 0x48, 0xd9, + 0xb1, 0xda, 0x4e, 0x7f, 0x22, 0xc1, 0x0b, 0x03, 0xef, 0x2d, 0x50, 0x3d, 0x74, 0x48, 0xaa, 0x91, + 0x43, 0x32, 0x9f, 0x0e, 0xf0, 0xec, 0xda, 0x97, 0xea, 0xd7, 0x1e, 0x7d, 0x3b, 0x7f, 0xe6, 0xeb, + 0x6f, 0xe7, 0xcf, 0x7c, 0xf3, 0xed, 0xfc, 0x99, 0x7f, 0x3c, 0x9a, 0x97, 0x1e, 0x1d, 0xcd, 0x4b, + 0x5f, 0x1f, 0xcd, 0x4b, 0xdf, 0x1c, 0xcd, 0x4b, 0xbf, 0x3e, 0x9a, 0x97, 0xfe, 0xf3, 0xbb, 0xf9, + 0x33, 0xef, 0x17, 0x05, 0xdc, 0x1f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x51, 0x7e, 0x9f, 0x62, 0x14, + 0x3c, 0x00, 0x00, } diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 1a9b7ebb194b..efcda7ebdea5 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -22,7 +22,6 @@ syntax = 'proto2'; package k8s.io.api.extensions.v1beta1; import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -45,7 +44,7 @@ message AllowedHostPath { // pathPrefix is the path prefix that the host volume must match. // It does not support `*`. // Trailing slashes are trimmed when validating the path prefix with a host path. - // + // // Examples: // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` // `/foo` would not allow `/food` or `/etc/foo` @@ -56,31 +55,6 @@ message AllowedHostPath { optional bool readOnly = 2; } -message CustomMetricCurrentStatus { - // Custom Metric name. - optional string name = 1; - - // Custom Metric value (average). - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; -} - -message CustomMetricCurrentStatusList { - repeated CustomMetricCurrentStatus items = 1; -} - -// Alpha-level support for Custom Metrics in HPA (as annotations). -message CustomMetricTarget { - // Custom Metric name. - optional string name = 1; - - // Custom Metric value (average). - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; -} - -message CustomMetricTargetList { - repeated CustomMetricTarget items = 1; -} - // DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for // more information. // DaemonSet represents the configuration of a daemon set. @@ -335,6 +309,8 @@ message DeploymentSpec { // The number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. + // This is set to the max value of int32 (i.e. 2147483647) by default, which + // means "retaining all old RelicaSets". // +optional optional int32 revisionHistoryLimit = 6; @@ -688,7 +664,7 @@ message NetworkPolicyList { message NetworkPolicyPeer { // This is a label selector which selects Pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. - // + // // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. @@ -697,7 +673,7 @@ message NetworkPolicyPeer { // Selects Namespaces using cluster-scoped labels. This field follows standard label // selector semantics; if present but empty, it selects all namespaces. - // + // // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. @@ -847,6 +823,12 @@ message PodSecurityPolicySpec { // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. optional RunAsUserStrategyOptions runAsUser = 11; + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + optional RunAsGroupStrategyOptions runAsGroup = 22; + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. optional SupplementalGroupsStrategyOptions supplementalGroups = 12; @@ -886,7 +868,7 @@ message PodSecurityPolicySpec { // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - // + // // Examples: // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. @@ -896,7 +878,7 @@ message PodSecurityPolicySpec { // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // + // // Examples: // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. @@ -1086,6 +1068,18 @@ message RollingUpdateDeployment { optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. +message RunAsGroupStrategyOptions { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + optional string rule = 1; + + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + // RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. // Deprecated: use RunAsUserStrategyOptions from policy API Group instead. message RunAsUserStrategyOptions { diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go index 38e112d1e0e7..5ba6f9585452 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -19,7 +19,6 @@ package v1beta1 import ( appsv1beta1 "k8s.io/api/apps/v1beta1" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -50,8 +49,6 @@ type ScaleStatus struct { TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } -// +genclient -// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // represents a scaling request for a resource. @@ -77,29 +74,6 @@ type ReplicationControllerDummy struct { metav1.TypeMeta `json:",inline"` } -// Alpha-level support for Custom Metrics in HPA (as annotations). -type CustomMetricTarget struct { - // Custom Metric name. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Custom Metric value (average). - TargetValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` -} - -type CustomMetricTargetList struct { - Items []CustomMetricTarget `json:"items" protobuf:"bytes,1,rep,name=items"` -} - -type CustomMetricCurrentStatus struct { - // Custom Metric name. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Custom Metric value (average). - CurrentValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` -} - -type CustomMetricCurrentStatusList struct { - Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"` -} - // +genclient // +genclient:method=GetScale,verb=get,subresource=scale,result=Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale @@ -151,6 +125,8 @@ type DeploymentSpec struct { // The number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. + // This is set to the max value of int32 (i.e. 2147483647) by default, which + // means "retaining all old RelicaSets". // +optional RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` @@ -918,6 +894,11 @@ type PodSecurityPolicySpec struct { SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. @@ -1072,6 +1053,17 @@ type RunAsUserStrategyOptions struct { Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. +type RunAsGroupStrategyOptions struct { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + // IDRange provides a min/max of an allowed range of IDs. // Deprecated: use IDRange from policy API Group instead. type IDRange struct { @@ -1098,6 +1090,23 @@ const ( RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) +// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a +// Security Context. +// Deprecated: use RunAsGroupStrategy from policy API Group instead. +type RunAsGroupStrategy string + +const ( + // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when RunAsGroup are specified, they have to fall in the defined range. + RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" + // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. + // Deprecated: use RunAsGroupStrategyMustRunAs from policy API Group instead. + RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" + // RunAsGroupStrategyRunAsAny means that container may make requests for any gid. + // Deprecated: use RunAsGroupStrategyRunAsAny from policy API Group instead. + RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" +) + // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. // Deprecated: use FSGroupStrategyOptions from policy API Group instead. type FSGroupStrategyOptions struct { diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index cdbc490a5ef5..bce6036cd5d2 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -46,25 +46,6 @@ func (AllowedHostPath) SwaggerDoc() map[string]string { return map_AllowedHostPath } -var map_CustomMetricCurrentStatus = map[string]string{ - "name": "Custom Metric name.", - "value": "Custom Metric value (average).", -} - -func (CustomMetricCurrentStatus) SwaggerDoc() map[string]string { - return map_CustomMetricCurrentStatus -} - -var map_CustomMetricTarget = map[string]string{ - "": "Alpha-level support for Custom Metrics in HPA (as annotations).", - "name": "Custom Metric name.", - "value": "Custom Metric value (average).", -} - -func (CustomMetricTarget) SwaggerDoc() map[string]string { - return map_CustomMetricTarget -} - var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", @@ -114,7 +95,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { } var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", + "": "DaemonSetStatus represents the current status of a daemon set.", "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", @@ -193,7 +174,7 @@ var map_DeploymentSpec = map[string]string{ "template": "Template describes the pods that will be created.", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"retaining all old RelicaSets\".", "paused": "Indicates that the deployment is paused and will not be processed by the deployment controller.", "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"no deadline\".", @@ -472,6 +453,7 @@ var map_PodSecurityPolicySpec = map[string]string{ "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", + "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", @@ -584,6 +566,16 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { return map_RollingUpdateDeployment } +var map_RunAsGroupStrategyOptions = map[string]string{ + "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.", + "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", + "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsGroupStrategyOptions +} + var map_RunAsUserStrategyOptions = map[string]string{ "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.", "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", diff --git a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 65801c23edbd..8128c079b482 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -59,86 +59,6 @@ func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricCurrentStatus) DeepCopyInto(out *CustomMetricCurrentStatus) { - *out = *in - out.CurrentValue = in.CurrentValue.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatus. -func (in *CustomMetricCurrentStatus) DeepCopy() *CustomMetricCurrentStatus { - if in == nil { - return nil - } - out := new(CustomMetricCurrentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricCurrentStatusList) DeepCopyInto(out *CustomMetricCurrentStatusList) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricCurrentStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatusList. -func (in *CustomMetricCurrentStatusList) DeepCopy() *CustomMetricCurrentStatusList { - if in == nil { - return nil - } - out := new(CustomMetricCurrentStatusList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricTarget) DeepCopyInto(out *CustomMetricTarget) { - *out = *in - out.TargetValue = in.TargetValue.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTarget. -func (in *CustomMetricTarget) DeepCopy() *CustomMetricTarget { - if in == nil { - return nil - } - out := new(CustomMetricTarget) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricTargetList) DeepCopyInto(out *CustomMetricTargetList) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricTarget, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTargetList. -func (in *CustomMetricTargetList) DeepCopy() *CustomMetricTargetList { - if in == nil { - return nil - } - out := new(CustomMetricTargetList) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { *out = *in @@ -1102,6 +1022,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { } in.SELinux.DeepCopyInto(&out.SELinux) in.RunAsUser.DeepCopyInto(&out.RunAsUser) + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(RunAsGroupStrategyOptions) + (*in).DeepCopyInto(*out) + } in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) in.FSGroup.DeepCopyInto(&out.FSGroup) if in.DefaultAllowPrivilegeEscalation != nil { @@ -1368,6 +1293,27 @@ func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. +func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { *out = *in diff --git a/cluster-autoscaler/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go index b0c6b3713a32..19c09bc6f1cf 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=imagepolicy.k8s.io + package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go index 6e08dcca84bf..2a4529bfca97 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto -// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -250,24 +249,6 @@ func (m *ImageReviewStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -728,51 +709,14 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -782,41 +726,80 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -972,51 +955,14 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.AuditAnnotations == nil { m.AuditAnnotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1026,41 +972,80 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.AuditAnnotations[mapkey] = mapvalue - } else { - var mapvalue string - m.AuditAnnotations[mapkey] = mapvalue } + m.AuditAnnotations[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/doc.go index 9111e722dc39..b608499fe770 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=networking.k8s.io + package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.pb.go index 7b1c04b29a5a..86bd80c857b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -446,24 +445,6 @@ func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.proto index 4e068d08f03c..ab3731e9c399 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/networking/v1/generated.proto @@ -114,7 +114,7 @@ message NetworkPolicyList { message NetworkPolicyPeer { // This is a label selector which selects Pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. - // + // // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. @@ -123,7 +123,7 @@ message NetworkPolicyPeer { // Selects Namespaces using cluster-scoped labels. This field follows standard label // selector semantics; if present but empty, it selects all namespaces. - // + // // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. diff --git a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/doc.go index 972192786dd0..d1062837bf4e 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -15,9 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, // NetworkPolicy, etc. -// +k8s:openapi-gen=true package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index d7d62dd3ab8b..d122fcfda12b 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -38,6 +37,7 @@ limitations under the License. PodSecurityPolicy PodSecurityPolicyList PodSecurityPolicySpec + RunAsGroupStrategyOptions RunAsUserStrategyOptions SELinuxStrategyOptions SupplementalGroupsStrategyOptions @@ -125,20 +125,26 @@ func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPol func (*PodSecurityPolicySpec) ProtoMessage() {} func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } +func (*RunAsGroupStrategyOptions) ProtoMessage() {} +func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{13} +} + func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptorGenerated, []int{14} } func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{15} + return fileDescriptorGenerated, []int{16} } func init() { @@ -155,6 +161,7 @@ func init() { proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy") proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList") proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec") + proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsGroupStrategyOptions") proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsUserStrategyOptions") proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") @@ -853,6 +860,52 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if m.RunAsGroup != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsGroup.Size())) + n18, err := m.RunAsGroup.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -913,11 +966,11 @@ func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n18, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n19, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n19 } return i, nil } @@ -956,24 +1009,6 @@ func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1212,6 +1247,24 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if m.RunAsGroup != nil { + l = m.RunAsGroup.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RunAsGroupStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1441,6 +1494,18 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, + `RunAsGroup:` + strings.Replace(fmt.Sprintf("%v", this.RunAsGroup), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RunAsGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2537,51 +2602,14 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.DisruptedPods == nil { m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2591,46 +2619,85 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.DisruptedPods[mapkey] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_apis_meta_v1.Time - m.DisruptedPods[mapkey] = mapvalue } + m.DisruptedPods[mapkey] = *mapvalue iNdEx = postIndex case 3: if wireType != 0 { @@ -3537,6 +3604,149 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RunAsGroup == nil { + m.RunAsGroup = &RunAsGroupStrategyOptions{} + } + if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4000,113 +4210,115 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1715 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, - 0x15, 0xd7, 0x9a, 0x92, 0x48, 0x8d, 0x24, 0x5a, 0x1a, 0xfd, 0xe9, 0x46, 0xa8, 0xb9, 0x0e, 0x03, - 0x14, 0x6e, 0x90, 0x2c, 0x63, 0x39, 0x69, 0x8d, 0xa6, 0x2d, 0xa2, 0x35, 0x25, 0x5b, 0x81, 0x55, - 0xb1, 0x43, 0x3b, 0x68, 0x0b, 0xb7, 0xe8, 0x70, 0x77, 0x44, 0x4e, 0xb4, 0xdc, 0xdd, 0xce, 0xcc, - 0x32, 0xe4, 0xad, 0x87, 0x1e, 0x7a, 0xec, 0x17, 0xc8, 0x27, 0x28, 0x7a, 0xea, 0x97, 0x50, 0x81, - 0xa2, 0xc8, 0x31, 0xe8, 0x81, 0xa8, 0x59, 0xf4, 0x4b, 0xf8, 0xd2, 0x60, 0x87, 0xb3, 0x24, 0xf7, - 0x0f, 0x29, 0x2b, 0x40, 0x7c, 0xdb, 0x9d, 0xf7, 0xfb, 0xfd, 0xde, 0x9b, 0x37, 0x6f, 0xde, 0xce, - 0x0e, 0xb0, 0x2e, 0x1f, 0x72, 0x93, 0xfa, 0xb5, 0xcb, 0xb0, 0x45, 0x98, 0x47, 0x04, 0xe1, 0xb5, - 0x1e, 0xf1, 0x1c, 0x9f, 0xd5, 0x94, 0x01, 0x07, 0xb4, 0x16, 0xf8, 0x2e, 0xb5, 0x07, 0xb5, 0xde, - 0xfd, 0x16, 0x11, 0xf8, 0x7e, 0xad, 0x4d, 0x3c, 0xc2, 0xb0, 0x20, 0x8e, 0x19, 0x30, 0x5f, 0xf8, - 0xf0, 0xad, 0x31, 0xd4, 0xc4, 0x01, 0x35, 0xc7, 0x50, 0x53, 0x41, 0x0f, 0xde, 0x6f, 0x53, 0xd1, - 0x09, 0x5b, 0xa6, 0xed, 0x77, 0x6b, 0x6d, 0xbf, 0xed, 0xd7, 0x24, 0xa3, 0x15, 0x5e, 0xc8, 0x37, - 0xf9, 0x22, 0x9f, 0xc6, 0x4a, 0x07, 0xd5, 0x19, 0xa7, 0xb6, 0xcf, 0x48, 0xad, 0x97, 0xf1, 0x76, - 0xf0, 0xe1, 0x14, 0xd3, 0xc5, 0x76, 0x87, 0x7a, 0x84, 0x0d, 0x6a, 0xc1, 0x65, 0x3b, 0x1a, 0xe0, - 0xb5, 0x2e, 0x11, 0x38, 0x8f, 0x55, 0x9b, 0xc7, 0x62, 0xa1, 0x27, 0x68, 0x97, 0x64, 0x08, 0x3f, - 0xba, 0x8e, 0xc0, 0xed, 0x0e, 0xe9, 0xe2, 0x0c, 0xef, 0xc1, 0x3c, 0x5e, 0x28, 0xa8, 0x5b, 0xa3, - 0x9e, 0xe0, 0x82, 0xa5, 0x49, 0xd5, 0x8f, 0xc1, 0xf6, 0x91, 0xeb, 0xfa, 0x5f, 0x10, 0xe7, 0xc4, - 0x25, 0xfd, 0xcf, 0x7c, 0x37, 0xec, 0x12, 0xf8, 0x03, 0xb0, 0xea, 0x30, 0xda, 0x23, 0x4c, 0xd7, - 0xee, 0x6a, 0xf7, 0xd6, 0xac, 0xf2, 0xd5, 0xd0, 0x58, 0x1a, 0x0d, 0x8d, 0xd5, 0xba, 0x1c, 0x45, - 0xca, 0x5a, 0xe5, 0xe0, 0xb6, 0x22, 0x3f, 0xf1, 0xb9, 0x68, 0x60, 0xd1, 0x81, 0x87, 0x00, 0x04, - 0x58, 0x74, 0x1a, 0x8c, 0x5c, 0xd0, 0xbe, 0xa2, 0x43, 0x45, 0x07, 0x8d, 0x89, 0x05, 0xcd, 0xa0, - 0xe0, 0x7b, 0xa0, 0xc4, 0x08, 0x76, 0xce, 0x3d, 0x77, 0xa0, 0xdf, 0xba, 0xab, 0xdd, 0x2b, 0x59, - 0x5b, 0x8a, 0x51, 0x42, 0x6a, 0x1c, 0x4d, 0x10, 0xd5, 0x7f, 0x6b, 0xa0, 0x74, 0xdc, 0xa3, 0xb6, - 0xa0, 0xbe, 0x07, 0x7f, 0x0f, 0x4a, 0x51, 0xde, 0x1d, 0x2c, 0xb0, 0x74, 0xb6, 0x7e, 0xf8, 0x81, - 0x39, 0xad, 0x89, 0x49, 0x1a, 0xcc, 0xe0, 0xb2, 0x1d, 0x0d, 0x70, 0x33, 0x42, 0x9b, 0xbd, 0xfb, - 0xe6, 0x79, 0xeb, 0x73, 0x62, 0x8b, 0x33, 0x22, 0xf0, 0x34, 0xbc, 0xe9, 0x18, 0x9a, 0xa8, 0x42, - 0x17, 0x6c, 0x3a, 0xc4, 0x25, 0x82, 0x9c, 0x07, 0x91, 0x47, 0x2e, 0x23, 0x5c, 0x3f, 0x7c, 0xf0, - 0x7a, 0x6e, 0xea, 0xb3, 0x54, 0x6b, 0x7b, 0x34, 0x34, 0x36, 0x13, 0x43, 0x28, 0x29, 0x5e, 0xfd, - 0x52, 0x03, 0xfb, 0x27, 0xcd, 0xc7, 0xcc, 0x0f, 0x83, 0xa6, 0x88, 0xd6, 0xa9, 0x3d, 0x50, 0x26, - 0xf8, 0x63, 0xb0, 0xcc, 0x42, 0x97, 0xa8, 0x9c, 0xbe, 0xa3, 0x82, 0x5e, 0x46, 0xa1, 0x4b, 0x5e, - 0x0d, 0x8d, 0x9d, 0x14, 0xeb, 0xd9, 0x20, 0x20, 0x48, 0x12, 0xe0, 0xa7, 0x60, 0x95, 0x61, 0xaf, - 0x4d, 0xa2, 0xd0, 0x0b, 0xf7, 0xd6, 0x0f, 0xab, 0xe6, 0xdc, 0x5d, 0x63, 0x9e, 0xd6, 0x51, 0x04, - 0x9d, 0xae, 0xb8, 0x7c, 0xe5, 0x48, 0x29, 0x54, 0xcf, 0xc0, 0xa6, 0x5c, 0x6a, 0x9f, 0x09, 0x69, - 0x81, 0x77, 0x40, 0xa1, 0x4b, 0x3d, 0x19, 0xd4, 0x8a, 0xb5, 0xae, 0x58, 0x85, 0x33, 0xea, 0xa1, - 0x68, 0x5c, 0x9a, 0x71, 0x5f, 0xe6, 0x6c, 0xd6, 0x8c, 0xfb, 0x28, 0x1a, 0xaf, 0x3e, 0x06, 0x45, - 0xe5, 0x71, 0x56, 0xa8, 0xb0, 0x58, 0xa8, 0x90, 0x23, 0xf4, 0xd7, 0x5b, 0x60, 0xa7, 0xe1, 0x3b, - 0x75, 0xca, 0x59, 0x28, 0xf3, 0x65, 0x85, 0x4e, 0x9b, 0x88, 0x37, 0x50, 0x1f, 0xcf, 0xc0, 0x32, - 0x0f, 0x88, 0xad, 0xca, 0xe2, 0x70, 0x41, 0x6e, 0x73, 0xe2, 0x6b, 0x06, 0xc4, 0xb6, 0x36, 0xe2, - 0xa5, 0x8c, 0xde, 0x90, 0x54, 0x83, 0x2f, 0xc0, 0x2a, 0x17, 0x58, 0x84, 0x5c, 0x2f, 0x48, 0xdd, - 0x0f, 0x6f, 0xa8, 0x2b, 0xb9, 0xd3, 0x55, 0x1c, 0xbf, 0x23, 0xa5, 0x59, 0xfd, 0xa7, 0x06, 0xbe, - 0x97, 0xc3, 0x7a, 0x4a, 0xb9, 0x80, 0x2f, 0x32, 0x19, 0x33, 0x5f, 0x2f, 0x63, 0x11, 0x5b, 0xe6, - 0x6b, 0xb2, 0x79, 0xe3, 0x91, 0x99, 0x6c, 0x35, 0xc1, 0x0a, 0x15, 0xa4, 0x1b, 0x97, 0xa2, 0x79, - 0xb3, 0x69, 0x59, 0x9b, 0x4a, 0x7a, 0xe5, 0x34, 0x12, 0x41, 0x63, 0xad, 0xea, 0xbf, 0x6e, 0xe5, - 0x4e, 0x27, 0x4a, 0x27, 0xbc, 0x00, 0x1b, 0x5d, 0xea, 0x1d, 0xf5, 0x30, 0x75, 0x71, 0x4b, 0xed, - 0x9e, 0x45, 0x45, 0x10, 0xf5, 0x4a, 0x73, 0xdc, 0x2b, 0xcd, 0x53, 0x4f, 0x9c, 0xb3, 0xa6, 0x60, - 0xd4, 0x6b, 0x5b, 0x5b, 0xa3, 0xa1, 0xb1, 0x71, 0x36, 0xa3, 0x84, 0x12, 0xba, 0xf0, 0xb7, 0xa0, - 0xc4, 0x89, 0x4b, 0x6c, 0xe1, 0xb3, 0x9b, 0x75, 0x88, 0xa7, 0xb8, 0x45, 0xdc, 0xa6, 0xa2, 0x5a, - 0x1b, 0x51, 0xde, 0xe2, 0x37, 0x34, 0x91, 0x84, 0x2e, 0x28, 0x77, 0x71, 0xff, 0xb9, 0x87, 0x27, - 0x13, 0x29, 0x7c, 0xcb, 0x89, 0xc0, 0xd1, 0xd0, 0x28, 0x9f, 0x25, 0xb4, 0x50, 0x4a, 0xbb, 0xfa, - 0xbf, 0x65, 0xf0, 0xd6, 0xdc, 0xaa, 0x82, 0x9f, 0x02, 0xe8, 0xb7, 0x38, 0x61, 0x3d, 0xe2, 0x3c, - 0x1e, 0x7f, 0x4d, 0xa8, 0x1f, 0x6f, 0xdc, 0x03, 0xb5, 0x40, 0xf0, 0x3c, 0x83, 0x40, 0x39, 0x2c, - 0xf8, 0x27, 0x0d, 0x6c, 0x3a, 0x63, 0x37, 0xc4, 0x69, 0xf8, 0x4e, 0x5c, 0x18, 0x8f, 0xbf, 0x4d, - 0xbd, 0x9b, 0xf5, 0x59, 0xa5, 0x63, 0x4f, 0xb0, 0x81, 0xb5, 0xa7, 0x02, 0xda, 0x4c, 0xd8, 0x50, - 0xd2, 0x29, 0x3c, 0x03, 0xd0, 0x99, 0x48, 0x72, 0xf5, 0x4d, 0x93, 0x29, 0x5e, 0xb1, 0xee, 0x28, - 0x85, 0xbd, 0x84, 0xdf, 0x18, 0x84, 0x72, 0x88, 0xf0, 0xe7, 0xa0, 0x6c, 0x87, 0x8c, 0x11, 0x4f, - 0x3c, 0x21, 0xd8, 0x15, 0x9d, 0x81, 0xbe, 0x2c, 0xa5, 0xf6, 0x95, 0x54, 0xf9, 0x51, 0xc2, 0x8a, - 0x52, 0xe8, 0x88, 0xef, 0x10, 0x4e, 0x19, 0x71, 0x62, 0xfe, 0x4a, 0x92, 0x5f, 0x4f, 0x58, 0x51, - 0x0a, 0x0d, 0x1f, 0x82, 0x0d, 0xd2, 0x0f, 0x88, 0x1d, 0xe7, 0x74, 0x55, 0xb2, 0x77, 0x15, 0x7b, - 0xe3, 0x78, 0xc6, 0x86, 0x12, 0xc8, 0x03, 0x17, 0xc0, 0x6c, 0x12, 0xe1, 0x16, 0x28, 0x5c, 0x92, - 0xc1, 0xf8, 0xcb, 0x83, 0xa2, 0x47, 0xf8, 0x09, 0x58, 0xe9, 0x61, 0x37, 0x24, 0xaa, 0xd6, 0xdf, - 0x7d, 0xbd, 0x5a, 0x7f, 0x46, 0xbb, 0x04, 0x8d, 0x89, 0x3f, 0xb9, 0xf5, 0x50, 0xab, 0xfe, 0x43, - 0x03, 0xdb, 0x0d, 0xdf, 0x69, 0x12, 0x3b, 0x64, 0x54, 0x0c, 0x1a, 0x72, 0x9d, 0xdf, 0x40, 0xcf, - 0x46, 0x89, 0x9e, 0xfd, 0xc1, 0xe2, 0x5a, 0x4b, 0x46, 0x37, 0xaf, 0x63, 0x57, 0xaf, 0x34, 0xb0, - 0x97, 0x41, 0xbf, 0x81, 0x8e, 0xfa, 0xcb, 0x64, 0x47, 0x7d, 0xef, 0x26, 0x93, 0x99, 0xd3, 0x4f, - 0xff, 0x5f, 0xce, 0x99, 0x8a, 0xec, 0xa6, 0xd1, 0xe9, 0x8e, 0xd1, 0x1e, 0x75, 0x49, 0x9b, 0x38, - 0x72, 0x32, 0xa5, 0x99, 0xd3, 0xdd, 0xc4, 0x82, 0x66, 0x50, 0x90, 0x83, 0x7d, 0x87, 0x5c, 0xe0, - 0xd0, 0x15, 0x47, 0x8e, 0xf3, 0x08, 0x07, 0xb8, 0x45, 0x5d, 0x2a, 0xa8, 0x3a, 0x8e, 0xac, 0x59, - 0x1f, 0x8f, 0x86, 0xc6, 0x7e, 0x3d, 0x17, 0xf1, 0x6a, 0x68, 0xdc, 0xc9, 0x9e, 0xcb, 0xcd, 0x09, - 0x64, 0x80, 0xe6, 0x48, 0xc3, 0x01, 0xd0, 0x19, 0xf9, 0x43, 0x18, 0x6d, 0x8a, 0x3a, 0xf3, 0x83, - 0x84, 0xdb, 0x82, 0x74, 0xfb, 0xb3, 0xd1, 0xd0, 0xd0, 0xd1, 0x1c, 0xcc, 0xf5, 0x8e, 0xe7, 0xca, - 0xc3, 0xcf, 0xc1, 0x0e, 0x1e, 0xf7, 0x81, 0x84, 0xd7, 0x65, 0xe9, 0xf5, 0xe1, 0x68, 0x68, 0xec, - 0x1c, 0x65, 0xcd, 0xd7, 0x3b, 0xcc, 0x13, 0x85, 0x35, 0x50, 0xec, 0xc9, 0x23, 0x3b, 0xd7, 0x57, - 0xa4, 0xfe, 0xde, 0x68, 0x68, 0x14, 0xc7, 0xa7, 0xf8, 0x48, 0x73, 0xf5, 0xa4, 0x29, 0x0f, 0x82, - 0x31, 0x0a, 0x7e, 0x04, 0xd6, 0x3b, 0x3e, 0x17, 0xbf, 0x20, 0xe2, 0x0b, 0x9f, 0x5d, 0xca, 0xc6, - 0x50, 0xb2, 0x76, 0xd4, 0x0a, 0xae, 0x3f, 0x99, 0x9a, 0xd0, 0x2c, 0x0e, 0xfe, 0x1a, 0xac, 0x75, - 0xd4, 0xb1, 0x8f, 0xeb, 0x45, 0x59, 0x68, 0xf7, 0x16, 0x14, 0x5a, 0xe2, 0x88, 0x68, 0x6d, 0x2b, - 0xf9, 0xb5, 0x78, 0x98, 0xa3, 0xa9, 0x1a, 0xfc, 0x21, 0x28, 0xca, 0x97, 0xd3, 0xba, 0x5e, 0x92, - 0xd1, 0xdc, 0x56, 0xf0, 0xe2, 0x93, 0xf1, 0x30, 0x8a, 0xed, 0x31, 0xf4, 0xb4, 0xf1, 0x48, 0x5f, - 0xcb, 0x42, 0x4f, 0x1b, 0x8f, 0x50, 0x6c, 0x87, 0x2f, 0x40, 0x91, 0x93, 0xa7, 0xd4, 0x0b, 0xfb, - 0x3a, 0x90, 0x5b, 0xee, 0xfe, 0x82, 0x70, 0x9b, 0xc7, 0x12, 0x99, 0x3a, 0x70, 0x4f, 0xd5, 0x95, - 0x1d, 0xc5, 0x92, 0xd0, 0x01, 0x6b, 0x2c, 0xf4, 0x8e, 0xf8, 0x73, 0x4e, 0x98, 0xbe, 0x9e, 0xf9, - 0xda, 0xa7, 0xf5, 0x51, 0x8c, 0x4d, 0x7b, 0x98, 0x64, 0x66, 0x82, 0x40, 0x53, 0x61, 0xf8, 0x67, - 0x0d, 0x40, 0x1e, 0x06, 0x81, 0x4b, 0xba, 0xc4, 0x13, 0xd8, 0x95, 0xe7, 0x7b, 0xae, 0x6f, 0x48, - 0x7f, 0x3f, 0x5d, 0x34, 0x9f, 0x0c, 0x29, 0xed, 0x78, 0xf2, 0x99, 0xce, 0x42, 0x51, 0x8e, 0xcf, - 0x28, 0x9d, 0x17, 0x5c, 0x3e, 0xeb, 0x9b, 0xd7, 0xa6, 0x33, 0xff, 0xff, 0x65, 0x9a, 0x4e, 0x65, - 0x47, 0xb1, 0x24, 0xfc, 0x0c, 0xec, 0xc7, 0x7f, 0x77, 0xc8, 0xf7, 0xc5, 0x09, 0x75, 0x09, 0x1f, - 0x70, 0x41, 0xba, 0x7a, 0x59, 0x2e, 0x73, 0x45, 0x31, 0xf7, 0x51, 0x2e, 0x0a, 0xcd, 0x61, 0xc3, - 0x2e, 0x30, 0xe2, 0xf6, 0x10, 0xed, 0x9d, 0x49, 0x7f, 0x3a, 0xe6, 0x36, 0x76, 0xc7, 0xa7, 0x96, - 0xdb, 0xd2, 0xc1, 0x3b, 0xa3, 0xa1, 0x61, 0xd4, 0x17, 0x43, 0xd1, 0x75, 0x5a, 0xf0, 0x57, 0x40, - 0xc7, 0xf3, 0xfc, 0x6c, 0x49, 0x3f, 0xdf, 0x8f, 0x7a, 0xce, 0x5c, 0x07, 0x73, 0xd9, 0x30, 0x00, - 0x5b, 0x38, 0xf9, 0x9f, 0xcd, 0xf5, 0x6d, 0xb9, 0x0b, 0xdf, 0x5d, 0xb0, 0x0e, 0xa9, 0x5f, 0x73, - 0x4b, 0x57, 0x69, 0xdc, 0x4a, 0x19, 0x38, 0xca, 0xa8, 0xc3, 0x3e, 0x80, 0x38, 0x7d, 0x2d, 0xc0, - 0x75, 0x78, 0xed, 0x27, 0x26, 0x73, 0x97, 0x30, 0x2d, 0xb5, 0x8c, 0x89, 0xa3, 0x1c, 0x1f, 0xf0, - 0x29, 0xd8, 0x55, 0xa3, 0xcf, 0x3d, 0x8e, 0x2f, 0x48, 0x73, 0xc0, 0x6d, 0xe1, 0x72, 0x7d, 0x47, - 0xf6, 0x37, 0x7d, 0x34, 0x34, 0x76, 0x8f, 0x72, 0xec, 0x28, 0x97, 0x05, 0x3f, 0x01, 0x5b, 0x17, - 0x3e, 0x6b, 0x51, 0xc7, 0x21, 0x5e, 0xac, 0xb4, 0x2b, 0x95, 0x76, 0xa3, 0x4c, 0x9c, 0xa4, 0x6c, - 0x28, 0x83, 0x86, 0x1c, 0xec, 0x29, 0xe5, 0x06, 0xf3, 0xed, 0x33, 0x3f, 0xf4, 0x44, 0xd4, 0x52, - 0xb9, 0xbe, 0x37, 0xf9, 0x8c, 0xec, 0x1d, 0xe5, 0x01, 0x5e, 0x0d, 0x8d, 0xbb, 0x39, 0x2d, 0x3d, - 0x01, 0x42, 0xf9, 0xda, 0xd5, 0x2f, 0x35, 0xa0, 0xcf, 0xeb, 0x1a, 0xf0, 0xa3, 0xc4, 0x45, 0xc0, - 0xdb, 0xa9, 0x8b, 0x80, 0xed, 0x0c, 0xef, 0x3b, 0xb8, 0x06, 0xf8, 0x9b, 0x06, 0xf6, 0xf3, 0xbb, - 0x26, 0x7c, 0x90, 0x88, 0xce, 0x48, 0x45, 0x77, 0x3b, 0xc5, 0x52, 0xb1, 0xfd, 0x0e, 0x94, 0x55, - 0x6f, 0x4d, 0xde, 0xb2, 0x24, 0x62, 0x8c, 0x32, 0x18, 0x1d, 0x8b, 0x94, 0x44, 0xdc, 0x57, 0xe4, - 0x0f, 0x4d, 0x72, 0x0c, 0xa5, 0xd4, 0xaa, 0x7f, 0xd7, 0xc0, 0xdb, 0xd7, 0x76, 0x45, 0x68, 0x25, - 0x42, 0x37, 0x53, 0xa1, 0x57, 0xe6, 0x0b, 0x7c, 0x37, 0x97, 0x2d, 0xd6, 0xfb, 0x57, 0x2f, 0x2b, - 0x4b, 0x5f, 0xbd, 0xac, 0x2c, 0x7d, 0xfd, 0xb2, 0xb2, 0xf4, 0xc7, 0x51, 0x45, 0xbb, 0x1a, 0x55, - 0xb4, 0xaf, 0x46, 0x15, 0xed, 0xeb, 0x51, 0x45, 0xfb, 0xcf, 0xa8, 0xa2, 0xfd, 0xe5, 0xbf, 0x95, - 0xa5, 0xdf, 0x14, 0x95, 0xdc, 0x37, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xba, 0x23, 0xa4, 0x51, - 0x15, 0x00, 0x00, + // 1756 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x8e, 0xdb, 0xc6, + 0x15, 0x5e, 0x5a, 0xfb, 0xa3, 0x9d, 0xfd, 0x9f, 0xfd, 0x29, 0xbd, 0xa8, 0x45, 0x47, 0x01, 0x0a, + 0x37, 0x48, 0xa8, 0x78, 0x9d, 0xa4, 0x46, 0xd3, 0x16, 0x59, 0x5a, 0xbb, 0xf6, 0x06, 0xde, 0xae, + 0x3a, 0xb2, 0x83, 0xb6, 0x70, 0x8b, 0x8e, 0xc4, 0x59, 0xed, 0x64, 0x29, 0x92, 0x9d, 0x19, 0x2a, + 0xab, 0xbb, 0x5e, 0xf4, 0xa2, 0x97, 0x7d, 0x81, 0xa0, 0x0f, 0x50, 0xf4, 0xaa, 0x2f, 0xe1, 0x02, + 0x45, 0x91, 0xcb, 0xa0, 0x17, 0x42, 0xad, 0x22, 0x2f, 0xe1, 0xab, 0x80, 0xa3, 0x21, 0x25, 0xfe, + 0x49, 0x5e, 0x03, 0xf6, 0x1d, 0x39, 0xe7, 0xfb, 0xbe, 0x73, 0xe6, 0xcc, 0x99, 0x33, 0x43, 0x02, + 0xeb, 0xf2, 0x3e, 0x37, 0xa9, 0x57, 0xbb, 0x0c, 0x5a, 0x84, 0xb9, 0x44, 0x10, 0x5e, 0xeb, 0x11, + 0xd7, 0xf6, 0x58, 0x4d, 0x19, 0xb0, 0x4f, 0x6b, 0xbe, 0xe7, 0xd0, 0x76, 0xbf, 0xd6, 0xbb, 0xdb, + 0x22, 0x02, 0xdf, 0xad, 0x75, 0x88, 0x4b, 0x18, 0x16, 0xc4, 0x36, 0x7d, 0xe6, 0x09, 0x0f, 0xde, + 0x1c, 0x41, 0x4d, 0xec, 0x53, 0x73, 0x04, 0x35, 0x15, 0x74, 0xff, 0x83, 0x0e, 0x15, 0x17, 0x41, + 0xcb, 0x6c, 0x7b, 0xdd, 0x5a, 0xc7, 0xeb, 0x78, 0x35, 0xc9, 0x68, 0x05, 0xe7, 0xf2, 0x4d, 0xbe, + 0xc8, 0xa7, 0x91, 0xd2, 0x7e, 0x75, 0xc2, 0x69, 0xdb, 0x63, 0xa4, 0xd6, 0xcb, 0x78, 0xdb, 0xff, + 0x68, 0x8c, 0xe9, 0xe2, 0xf6, 0x05, 0x75, 0x09, 0xeb, 0xd7, 0xfc, 0xcb, 0x4e, 0x38, 0xc0, 0x6b, + 0x5d, 0x22, 0x70, 0x1e, 0xab, 0x56, 0xc4, 0x62, 0x81, 0x2b, 0x68, 0x97, 0x64, 0x08, 0x9f, 0xcc, + 0x22, 0xf0, 0xf6, 0x05, 0xe9, 0xe2, 0x0c, 0xef, 0x5e, 0x11, 0x2f, 0x10, 0xd4, 0xa9, 0x51, 0x57, + 0x70, 0xc1, 0xd2, 0xa4, 0xea, 0xa7, 0x60, 0xeb, 0xd0, 0x71, 0xbc, 0xaf, 0x88, 0x7d, 0xec, 0x90, + 0xab, 0x2f, 0x3c, 0x27, 0xe8, 0x12, 0xf8, 0x23, 0xb0, 0x68, 0x33, 0xda, 0x23, 0x4c, 0xd7, 0x6e, + 0x6b, 0x77, 0x96, 0xad, 0xf5, 0xe7, 0x03, 0x63, 0x6e, 0x38, 0x30, 0x16, 0xeb, 0x72, 0x14, 0x29, + 0x6b, 0x95, 0x83, 0x0d, 0x45, 0x7e, 0xe4, 0x71, 0xd1, 0xc0, 0xe2, 0x02, 0x1e, 0x00, 0xe0, 0x63, + 0x71, 0xd1, 0x60, 0xe4, 0x9c, 0x5e, 0x29, 0x3a, 0x54, 0x74, 0xd0, 0x88, 0x2d, 0x68, 0x02, 0x05, + 0xdf, 0x07, 0x65, 0x46, 0xb0, 0x7d, 0xe6, 0x3a, 0x7d, 0xfd, 0xc6, 0x6d, 0xed, 0x4e, 0xd9, 0xda, + 0x54, 0x8c, 0x32, 0x52, 0xe3, 0x28, 0x46, 0x54, 0xff, 0xab, 0x81, 0xf2, 0x51, 0x8f, 0xb6, 0x05, + 0xf5, 0x5c, 0xf8, 0x07, 0x50, 0x0e, 0xf3, 0x6e, 0x63, 0x81, 0xa5, 0xb3, 0x95, 0x83, 0x0f, 0xcd, + 0x71, 0x4d, 0xc4, 0x69, 0x30, 0xfd, 0xcb, 0x4e, 0x38, 0xc0, 0xcd, 0x10, 0x6d, 0xf6, 0xee, 0x9a, + 0x67, 0xad, 0x2f, 0x49, 0x5b, 0x9c, 0x12, 0x81, 0xc7, 0xe1, 0x8d, 0xc7, 0x50, 0xac, 0x0a, 0x1d, + 0xb0, 0x66, 0x13, 0x87, 0x08, 0x72, 0xe6, 0x87, 0x1e, 0xb9, 0x8c, 0x70, 0xe5, 0xe0, 0xde, 0xab, + 0xb9, 0xa9, 0x4f, 0x52, 0xad, 0xad, 0xe1, 0xc0, 0x58, 0x4b, 0x0c, 0xa1, 0xa4, 0x78, 0xf5, 0x6b, + 0x0d, 0xec, 0x1d, 0x37, 0x1f, 0x32, 0x2f, 0xf0, 0x9b, 0x22, 0x5c, 0xa7, 0x4e, 0x5f, 0x99, 0xe0, + 0x4f, 0xc0, 0x3c, 0x0b, 0x1c, 0xa2, 0x72, 0xfa, 0xae, 0x0a, 0x7a, 0x1e, 0x05, 0x0e, 0x79, 0x39, + 0x30, 0xb6, 0x53, 0xac, 0x27, 0x7d, 0x9f, 0x20, 0x49, 0x80, 0x9f, 0x83, 0x45, 0x86, 0xdd, 0x0e, + 0x09, 0x43, 0x2f, 0xdd, 0x59, 0x39, 0xa8, 0x9a, 0x85, 0xbb, 0xc6, 0x3c, 0xa9, 0xa3, 0x10, 0x3a, + 0x5e, 0x71, 0xf9, 0xca, 0x91, 0x52, 0xa8, 0x9e, 0x82, 0x35, 0xb9, 0xd4, 0x1e, 0x13, 0xd2, 0x02, + 0x6f, 0x81, 0x52, 0x97, 0xba, 0x32, 0xa8, 0x05, 0x6b, 0x45, 0xb1, 0x4a, 0xa7, 0xd4, 0x45, 0xe1, + 0xb8, 0x34, 0xe3, 0x2b, 0x99, 0xb3, 0x49, 0x33, 0xbe, 0x42, 0xe1, 0x78, 0xf5, 0x21, 0x58, 0x52, + 0x1e, 0x27, 0x85, 0x4a, 0xd3, 0x85, 0x4a, 0x39, 0x42, 0x7f, 0xbf, 0x01, 0xb6, 0x1b, 0x9e, 0x5d, + 0xa7, 0x9c, 0x05, 0x32, 0x5f, 0x56, 0x60, 0x77, 0x88, 0x78, 0x0b, 0xf5, 0xf1, 0x04, 0xcc, 0x73, + 0x9f, 0xb4, 0x55, 0x59, 0x1c, 0x4c, 0xc9, 0x6d, 0x4e, 0x7c, 0x4d, 0x9f, 0xb4, 0xad, 0xd5, 0x68, + 0x29, 0xc3, 0x37, 0x24, 0xd5, 0xe0, 0x33, 0xb0, 0xc8, 0x05, 0x16, 0x01, 0xd7, 0x4b, 0x52, 0xf7, + 0xa3, 0x6b, 0xea, 0x4a, 0xee, 0x78, 0x15, 0x47, 0xef, 0x48, 0x69, 0x56, 0xff, 0xad, 0x81, 0x1f, + 0xe4, 0xb0, 0x1e, 0x53, 0x2e, 0xe0, 0xb3, 0x4c, 0xc6, 0xcc, 0x57, 0xcb, 0x58, 0xc8, 0x96, 0xf9, + 0x8a, 0x37, 0x6f, 0x34, 0x32, 0x91, 0xad, 0x26, 0x58, 0xa0, 0x82, 0x74, 0xa3, 0x52, 0x34, 0xaf, + 0x37, 0x2d, 0x6b, 0x4d, 0x49, 0x2f, 0x9c, 0x84, 0x22, 0x68, 0xa4, 0x55, 0xfd, 0xcf, 0x8d, 0xdc, + 0xe9, 0x84, 0xe9, 0x84, 0xe7, 0x60, 0xb5, 0x4b, 0xdd, 0xc3, 0x1e, 0xa6, 0x0e, 0x6e, 0xa9, 0xdd, + 0x33, 0xad, 0x08, 0xc2, 0x5e, 0x69, 0x8e, 0x7a, 0xa5, 0x79, 0xe2, 0x8a, 0x33, 0xd6, 0x14, 0x8c, + 0xba, 0x1d, 0x6b, 0x73, 0x38, 0x30, 0x56, 0x4f, 0x27, 0x94, 0x50, 0x42, 0x17, 0xfe, 0x0e, 0x94, + 0x39, 0x71, 0x48, 0x5b, 0x78, 0xec, 0x7a, 0x1d, 0xe2, 0x31, 0x6e, 0x11, 0xa7, 0xa9, 0xa8, 0xd6, + 0x6a, 0x98, 0xb7, 0xe8, 0x0d, 0xc5, 0x92, 0xd0, 0x01, 0xeb, 0x5d, 0x7c, 0xf5, 0xd4, 0xc5, 0xf1, + 0x44, 0x4a, 0xaf, 0x39, 0x11, 0x38, 0x1c, 0x18, 0xeb, 0xa7, 0x09, 0x2d, 0x94, 0xd2, 0xae, 0x7e, + 0x37, 0x0f, 0x6e, 0x16, 0x56, 0x15, 0xfc, 0x1c, 0x40, 0xaf, 0xc5, 0x09, 0xeb, 0x11, 0xfb, 0xe1, + 0xe8, 0x34, 0xa1, 0x5e, 0xb4, 0x71, 0xf7, 0xd5, 0x02, 0xc1, 0xb3, 0x0c, 0x02, 0xe5, 0xb0, 0xe0, + 0x9f, 0x35, 0xb0, 0x66, 0x8f, 0xdc, 0x10, 0xbb, 0xe1, 0xd9, 0x51, 0x61, 0x3c, 0x7c, 0x9d, 0x7a, + 0x37, 0xeb, 0x93, 0x4a, 0x47, 0xae, 0x60, 0x7d, 0x6b, 0x57, 0x05, 0xb4, 0x96, 0xb0, 0xa1, 0xa4, + 0x53, 0x78, 0x0a, 0xa0, 0x1d, 0x4b, 0x72, 0x75, 0xa6, 0xc9, 0x14, 0x2f, 0x58, 0xb7, 0x94, 0xc2, + 0x6e, 0xc2, 0x6f, 0x04, 0x42, 0x39, 0x44, 0xf8, 0x0b, 0xb0, 0xde, 0x0e, 0x18, 0x23, 0xae, 0x78, + 0x44, 0xb0, 0x23, 0x2e, 0xfa, 0xfa, 0xbc, 0x94, 0xda, 0x53, 0x52, 0xeb, 0x0f, 0x12, 0x56, 0x94, + 0x42, 0x87, 0x7c, 0x9b, 0x70, 0xca, 0x88, 0x1d, 0xf1, 0x17, 0x92, 0xfc, 0x7a, 0xc2, 0x8a, 0x52, + 0x68, 0x78, 0x1f, 0xac, 0x92, 0x2b, 0x9f, 0xb4, 0xa3, 0x9c, 0x2e, 0x4a, 0xf6, 0x8e, 0x62, 0xaf, + 0x1e, 0x4d, 0xd8, 0x50, 0x02, 0xb9, 0xef, 0x00, 0x98, 0x4d, 0x22, 0xdc, 0x04, 0xa5, 0x4b, 0xd2, + 0x1f, 0x9d, 0x3c, 0x28, 0x7c, 0x84, 0x9f, 0x81, 0x85, 0x1e, 0x76, 0x02, 0xa2, 0x6a, 0xfd, 0xbd, + 0x57, 0xab, 0xf5, 0x27, 0xb4, 0x4b, 0xd0, 0x88, 0xf8, 0xd3, 0x1b, 0xf7, 0xb5, 0xea, 0xbf, 0x34, + 0xb0, 0xd5, 0xf0, 0xec, 0x26, 0x69, 0x07, 0x8c, 0x8a, 0x7e, 0x43, 0xae, 0xf3, 0x5b, 0xe8, 0xd9, + 0x28, 0xd1, 0xb3, 0x3f, 0x9c, 0x5e, 0x6b, 0xc9, 0xe8, 0x8a, 0x3a, 0x76, 0xf5, 0xb9, 0x06, 0x76, + 0x33, 0xe8, 0xb7, 0xd0, 0x51, 0x7f, 0x95, 0xec, 0xa8, 0xef, 0x5f, 0x67, 0x32, 0x05, 0xfd, 0xf4, + 0xbb, 0x8d, 0x9c, 0xa9, 0xc8, 0x6e, 0x1a, 0xde, 0xee, 0x18, 0xed, 0x51, 0x87, 0x74, 0x88, 0x2d, + 0x27, 0x53, 0x9e, 0xb8, 0xdd, 0xc5, 0x16, 0x34, 0x81, 0x82, 0x1c, 0xec, 0xd9, 0xe4, 0x1c, 0x07, + 0x8e, 0x38, 0xb4, 0xed, 0x07, 0xd8, 0xc7, 0x2d, 0xea, 0x50, 0x41, 0xd5, 0x75, 0x64, 0xd9, 0xfa, + 0x74, 0x38, 0x30, 0xf6, 0xea, 0xb9, 0x88, 0x97, 0x03, 0xe3, 0x56, 0xf6, 0x5e, 0x6e, 0xc6, 0x90, + 0x3e, 0x2a, 0x90, 0x86, 0x7d, 0xa0, 0x33, 0xf2, 0xc7, 0x20, 0xdc, 0x14, 0x75, 0xe6, 0xf9, 0x09, + 0xb7, 0x25, 0xe9, 0xf6, 0xe7, 0xc3, 0x81, 0xa1, 0xa3, 0x02, 0xcc, 0x6c, 0xc7, 0x85, 0xf2, 0xf0, + 0x4b, 0xb0, 0x8d, 0x47, 0x7d, 0x20, 0xe1, 0x75, 0x5e, 0x7a, 0xbd, 0x3f, 0x1c, 0x18, 0xdb, 0x87, + 0x59, 0xf3, 0x6c, 0x87, 0x79, 0xa2, 0xb0, 0x06, 0x96, 0x7a, 0xf2, 0xca, 0xce, 0xf5, 0x05, 0xa9, + 0xbf, 0x3b, 0x1c, 0x18, 0x4b, 0xa3, 0x5b, 0x7c, 0xa8, 0xb9, 0x78, 0xdc, 0x94, 0x17, 0xc1, 0x08, + 0x05, 0x3f, 0x06, 0x2b, 0x17, 0x1e, 0x17, 0xbf, 0x24, 0xe2, 0x2b, 0x8f, 0x5d, 0xca, 0xc6, 0x50, + 0xb6, 0xb6, 0xd5, 0x0a, 0xae, 0x3c, 0x1a, 0x9b, 0xd0, 0x24, 0x0e, 0xfe, 0x06, 0x2c, 0x5f, 0xa8, + 0x6b, 0x1f, 0xd7, 0x97, 0x64, 0xa1, 0xdd, 0x99, 0x52, 0x68, 0x89, 0x2b, 0xa2, 0xb5, 0xa5, 0xe4, + 0x97, 0xa3, 0x61, 0x8e, 0xc6, 0x6a, 0xf0, 0xc7, 0x60, 0x49, 0xbe, 0x9c, 0xd4, 0xf5, 0xb2, 0x8c, + 0x66, 0x43, 0xc1, 0x97, 0x1e, 0x8d, 0x86, 0x51, 0x64, 0x8f, 0xa0, 0x27, 0x8d, 0x07, 0xfa, 0x72, + 0x16, 0x7a, 0xd2, 0x78, 0x80, 0x22, 0x3b, 0x7c, 0x06, 0x96, 0x38, 0x79, 0x4c, 0xdd, 0xe0, 0x4a, + 0x07, 0x72, 0xcb, 0xdd, 0x9d, 0x12, 0x6e, 0xf3, 0x48, 0x22, 0x53, 0x17, 0xee, 0xb1, 0xba, 0xb2, + 0xa3, 0x48, 0x12, 0xda, 0x60, 0x99, 0x05, 0xee, 0x21, 0x7f, 0xca, 0x09, 0xd3, 0x57, 0x32, 0xa7, + 0x7d, 0x5a, 0x1f, 0x45, 0xd8, 0xb4, 0x87, 0x38, 0x33, 0x31, 0x02, 0x8d, 0x85, 0xe1, 0x5f, 0x34, + 0x00, 0x79, 0xe0, 0xfb, 0x0e, 0xe9, 0x12, 0x57, 0x60, 0x47, 0xde, 0xef, 0xb9, 0xbe, 0x2a, 0xfd, + 0xfd, 0x6c, 0xda, 0x7c, 0x32, 0xa4, 0xb4, 0xe3, 0xf8, 0x98, 0xce, 0x42, 0x51, 0x8e, 0xcf, 0x30, + 0x9d, 0xe7, 0x5c, 0x3e, 0xeb, 0x6b, 0x33, 0xd3, 0x99, 0xff, 0xfd, 0x32, 0x4e, 0xa7, 0xb2, 0xa3, + 0x48, 0x12, 0x7e, 0x01, 0xf6, 0xa2, 0xaf, 0x3b, 0xe4, 0x79, 0xe2, 0x98, 0x3a, 0x84, 0xf7, 0xb9, + 0x20, 0x5d, 0x7d, 0x5d, 0x2e, 0x73, 0x45, 0x31, 0xf7, 0x50, 0x2e, 0x0a, 0x15, 0xb0, 0x61, 0x17, + 0x18, 0x51, 0x7b, 0x08, 0xf7, 0x4e, 0xdc, 0x9f, 0x8e, 0x78, 0x1b, 0x3b, 0xa3, 0x5b, 0xcb, 0x86, + 0x74, 0xf0, 0xee, 0x70, 0x60, 0x18, 0xf5, 0xe9, 0x50, 0x34, 0x4b, 0x0b, 0xfe, 0x1a, 0xe8, 0xb8, + 0xc8, 0xcf, 0xa6, 0xf4, 0xf3, 0xc3, 0xb0, 0xe7, 0x14, 0x3a, 0x28, 0x64, 0x43, 0x1f, 0x6c, 0xe2, + 0xe4, 0x77, 0x36, 0xd7, 0xb7, 0xe4, 0x2e, 0x7c, 0x6f, 0xca, 0x3a, 0xa4, 0x3e, 0xcd, 0x2d, 0x5d, + 0xa5, 0x71, 0x33, 0x65, 0xe0, 0x28, 0xa3, 0x0e, 0xaf, 0x00, 0xc4, 0xe9, 0xdf, 0x02, 0x5c, 0x87, + 0x33, 0x8f, 0x98, 0xcc, 0xbf, 0x84, 0x71, 0xa9, 0x65, 0x4c, 0x1c, 0xe5, 0xf8, 0x80, 0x8f, 0xc1, + 0x8e, 0x1a, 0x7d, 0xea, 0x72, 0x7c, 0x4e, 0x9a, 0x7d, 0xde, 0x16, 0x0e, 0xd7, 0xb7, 0x65, 0x7f, + 0xd3, 0x87, 0x03, 0x63, 0xe7, 0x30, 0xc7, 0x8e, 0x72, 0x59, 0xf0, 0x33, 0xb0, 0x79, 0xee, 0xb1, + 0x16, 0xb5, 0x6d, 0xe2, 0x46, 0x4a, 0x3b, 0x52, 0x69, 0x27, 0xcc, 0xc4, 0x71, 0xca, 0x86, 0x32, + 0x68, 0xc8, 0xc1, 0xae, 0x52, 0x6e, 0x30, 0xaf, 0x7d, 0xea, 0x05, 0xae, 0x08, 0x5b, 0x2a, 0xd7, + 0x77, 0xe3, 0x63, 0x64, 0xf7, 0x30, 0x0f, 0xf0, 0x72, 0x60, 0xdc, 0xce, 0x69, 0xe9, 0x09, 0x10, + 0xca, 0xd7, 0x86, 0x36, 0x00, 0xb2, 0x0f, 0x8c, 0xb6, 0xdc, 0xde, 0xcc, 0x4f, 0x40, 0x14, 0x83, + 0xd3, 0xbb, 0x6e, 0x3d, 0x3c, 0x99, 0xc7, 0x66, 0x34, 0xa1, 0x5b, 0xfd, 0x9b, 0x06, 0x6e, 0x16, + 0x32, 0xe1, 0x27, 0x89, 0xff, 0x0d, 0xd5, 0xd4, 0xff, 0x06, 0x98, 0x25, 0xbe, 0x81, 0xdf, 0x0d, + 0x5f, 0x6b, 0x40, 0x2f, 0xea, 0x9e, 0xf0, 0xe3, 0x44, 0x80, 0xef, 0xa4, 0x02, 0xdc, 0xca, 0xf0, + 0xde, 0x40, 0x7c, 0xff, 0xd0, 0xc0, 0x5e, 0xfe, 0xe9, 0x01, 0xef, 0x25, 0xa2, 0x33, 0x52, 0xd1, + 0x6d, 0xa4, 0x58, 0x2a, 0xb6, 0xdf, 0x83, 0x75, 0x75, 0xc6, 0x24, 0xff, 0x36, 0x25, 0x62, 0x0c, + 0x2b, 0x29, 0xbc, 0x1e, 0x2a, 0x89, 0x68, 0xa5, 0xe5, 0x87, 0x5d, 0x72, 0x0c, 0xa5, 0xd4, 0xaa, + 0xff, 0xd4, 0xc0, 0x3b, 0x33, 0x4f, 0x07, 0x68, 0x25, 0x42, 0x37, 0x53, 0xa1, 0x57, 0x8a, 0x05, + 0xde, 0xcc, 0x4f, 0x27, 0xeb, 0x83, 0xe7, 0x2f, 0x2a, 0x73, 0xdf, 0xbc, 0xa8, 0xcc, 0x7d, 0xfb, + 0xa2, 0x32, 0xf7, 0xa7, 0x61, 0x45, 0x7b, 0x3e, 0xac, 0x68, 0xdf, 0x0c, 0x2b, 0xda, 0xb7, 0xc3, + 0x8a, 0xf6, 0xbf, 0x61, 0x45, 0xfb, 0xeb, 0xff, 0x2b, 0x73, 0xbf, 0x5d, 0x52, 0x72, 0xdf, 0x07, + 0x00, 0x00, 0xff, 0xff, 0x15, 0x2e, 0xf4, 0x72, 0x59, 0x16, 0x00, 0x00, } diff --git a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/generated.proto index aa37d948f520..e9df3c16fe4c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -42,7 +42,7 @@ message AllowedHostPath { // pathPrefix is the path prefix that the host volume must match. // It does not support `*`. // Trailing slashes are trimmed when validating the path prefix with a host path. - // + // // Examples: // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` // `/foo` would not allow `/food` or `/etc/foo` @@ -58,9 +58,11 @@ message AllowedHostPath { // created by POSTing to .../pods//evictions. message Eviction { // ObjectMeta describes the pod that is being evicted. + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DeleteOptions may be provided + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } @@ -97,17 +99,21 @@ message IDRange { // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods message PodDisruptionBudget { + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the PodDisruptionBudget. + // +optional optional PodDisruptionBudgetSpec spec = 2; // Most recently observed status of the PodDisruptionBudget. + // +optional optional PodDisruptionBudgetStatus status = 3; } // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. message PodDisruptionBudgetList { + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated PodDisruptionBudget items = 2; @@ -119,16 +125,19 @@ message PodDisruptionBudgetSpec { // "selector" will still be available after the eviction, i.e. even in the // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". + // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; // Label query over pods whose evictions are managed by the disruption // budget. + // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". + // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; } @@ -242,6 +251,12 @@ message PodSecurityPolicySpec { // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. optional RunAsUserStrategyOptions runAsUser = 11; + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + optional RunAsGroupStrategyOptions runAsGroup = 22; + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. optional SupplementalGroupsStrategyOptions supplementalGroups = 12; @@ -281,7 +296,7 @@ message PodSecurityPolicySpec { // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - // + // // Examples: // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. @@ -291,7 +306,7 @@ message PodSecurityPolicySpec { // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // + // // Examples: // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. @@ -305,6 +320,17 @@ message PodSecurityPolicySpec { repeated string allowedProcMountTypes = 21; } +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +message RunAsGroupStrategyOptions { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + optional string rule = 1; + + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + // RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. message RunAsUserStrategyOptions { // rule is the strategy that will dictate the allowable RunAsUser values that may be set. diff --git a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types.go index c1a2727509a0..91ea1185878a 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types.go @@ -28,16 +28,19 @@ type PodDisruptionBudgetSpec struct { // "selector" will still be available after the eviction, i.e. even in the // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". + // +optional MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"` // Label query over pods whose evictions are managed by the disruption // budget. + // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". + // +optional MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,3,opt,name=maxUnavailable"` } @@ -81,12 +84,15 @@ type PodDisruptionBudgetStatus struct { // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods type PodDisruptionBudget struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the PodDisruptionBudget. + // +optional Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the PodDisruptionBudget. + // +optional Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -95,6 +101,7 @@ type PodDisruptionBudget struct { // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. type PodDisruptionBudgetList struct { metav1.TypeMeta `json:",inline"` + // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -110,9 +117,11 @@ type Eviction struct { metav1.TypeMeta `json:",inline"` // ObjectMeta describes the pod that is being evicted. + // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // DeleteOptions may be provided + // +optional DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` } @@ -174,6 +183,11 @@ type PodSecurityPolicySpec struct { SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. @@ -245,6 +259,10 @@ type AllowedHostPath struct { ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` } +// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities +// field and means that any capabilities are allowed to be requested. +var AllowAllCapabilities v1.Capability = "*" + // FSType gives strong typing to different file systems that are used by volumes. type FSType string @@ -268,8 +286,15 @@ var ( DownwardAPI FSType = "downwardAPI" FC FSType = "fc" ConfigMap FSType = "configMap" + VsphereVolume FSType = "vsphereVolume" Quobyte FSType = "quobyte" AzureDisk FSType = "azureDisk" + PhotonPersistentDisk FSType = "photonPersistentDisk" + StorageOS FSType = "storageos" + Projected FSType = "projected" + PortworxVolume FSType = "portworxVolume" + ScaleIO FSType = "scaleIO" + CSI FSType = "csi" All FSType = "*" ) @@ -319,6 +344,16 @@ type RunAsUserStrategyOptions struct { Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +type RunAsGroupStrategyOptions struct { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + // IDRange provides a min/max of an allowed range of IDs. type IDRange struct { // min is the start of the range, inclusive. @@ -340,6 +375,20 @@ const ( RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) +// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a +// Security Context. +type RunAsGroupStrategy string + +const ( + // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when RunAsGroup are specified, they have to fall in the defined range. + RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" + // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. + RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" + // RunAsUserStrategyRunAsAny means that container may make requests for any gid. + RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" +) + // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. type FSGroupStrategyOptions struct { // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. @@ -356,6 +405,9 @@ type FSGroupStrategyOptions struct { type FSGroupStrategyType string const ( + // FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied. + // However, when FSGroups are specified, they have to fall in the defined range. + FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs" // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. @@ -378,6 +430,9 @@ type SupplementalGroupsStrategyOptions struct { type SupplementalGroupsStrategyType string const ( + // SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when gids are specified, they have to fall in the defined range. + SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs" // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. diff --git a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index df10b2a29b8b..547ef18ea410 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -162,6 +162,7 @@ var map_PodSecurityPolicySpec = map[string]string{ "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", + "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", @@ -178,6 +179,16 @@ func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { return map_PodSecurityPolicySpec } +var map_RunAsGroupStrategyOptions = map[string]string{ + "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.", + "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", + "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsGroupStrategyOptions +} + var map_RunAsUserStrategyOptions = map[string]string{ "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", diff --git a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 9af268a43820..1a02ae600701 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -348,6 +348,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { } in.SELinux.DeepCopyInto(&out.SELinux) in.RunAsUser.DeepCopyInto(&out.RunAsUser) + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(RunAsGroupStrategyOptions) + (*in).DeepCopyInto(*out) + } in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) in.FSGroup.DeepCopyInto(&out.FSGroup) if in.DefaultAllowPrivilegeEscalation != nil { @@ -398,6 +403,27 @@ func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. +func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { *out = *in diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/doc.go index eca05ac68c2e..d268804eaa17 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io + package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/generated.pb.go index 21010fbeeaf3..708db3276ede 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -641,24 +640,6 @@ func (m *Subject) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 0ec20c88e79c..83ce310e6fa3 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", } diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/doc.go index d8b4bdef8a7e..2e7b201623d7 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io + package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index 71eced8d4ec8..e035b331fa5e 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto -// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -641,24 +640,6 @@ func (m *Subject) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index 1d6ef30b0eb7..d7b194ae4074 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", } diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/doc.go index e2d442626ff1..81bdb7319efe 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index 71e5799e3e63..904a6e7a2dc2 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -641,24 +640,6 @@ func (m *Subject) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index 66dba6ca13eb..c80327593d78 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", } diff --git a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/doc.go index 31206998bd7a..9a00a09cfb25 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io + package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go index 97c07c98413b..0a0d481a287c 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto -// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -141,24 +140,6 @@ func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/doc.go index 2e095cc5a708..3cfee4f6d385 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go index ea8f8d5e6e38..ddb285446b7f 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -141,24 +140,6 @@ func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/settings/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/settings/v1alpha1/doc.go index 483a9207c4a0..9bc2b9fd3cbb 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/settings/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/settings/v1alpha1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=settings.k8s.io + package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go index 15285bae5083..c842131057a0 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto -// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -216,24 +215,6 @@ func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/doc.go index 8f4a4045c43b..ff8bb34ca1c8 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true + package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.pb.go index d43a982981d6..e4b29311b68a 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -27,6 +26,12 @@ limitations under the License. It has these top-level messages: StorageClass StorageClassList + VolumeAttachment + VolumeAttachmentList + VolumeAttachmentSource + VolumeAttachmentSpec + VolumeAttachmentStatus + VolumeError */ package v1 @@ -62,9 +67,39 @@ func (m *StorageClassList) Reset() { *m = StorageClassList{} func (*StorageClassList) ProtoMessage() {} func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + func init() { proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") + proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1.VolumeAttachment") + proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1.VolumeAttachmentList") + proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSource") + proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSpec") + proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus") + proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1.VolumeError") } func (m *StorageClass) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -205,24 +240,242 @@ func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 +func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n5, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PersistentVolumeName != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i += copy(dAtA[i:], *m.PersistentVolumeName) + } + return i, nil +} + +func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i += copy(dAtA[i:], m.Attacher) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) + n7, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + return i, nil +} + +func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.AttachmentMetadata) > 0 { + keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) + for k := range m.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + for _, k := range keysForAttachmentMetadata { + dAtA[i] = 0x12 + i++ + v := m.AttachmentMetadata[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.AttachError != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) + n8, err := m.AttachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.DetachError != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) + n9, err := m.DetachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *VolumeError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) + n10, err := m.Time.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -287,6 +540,87 @@ func (m *StorageClassList) Size() (n int) { return n } +func (m *VolumeAttachment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachmentSource) Size() (n int) { + var l int + _ = l + if m.PersistentVolumeName != nil { + l = len(*m.PersistentVolumeName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeAttachmentSpec) Size() (n int) { + var l int + _ = l + l = len(m.Attacher) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentStatus) Size() (n int) { + var l int + _ = l + n += 2 + if len(m.AttachmentMetadata) > 0 { + for k, v := range m.AttachmentMetadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AttachError != nil { + l = m.AttachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DetachError != nil { + l = m.DetachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeError) Size() (n int) { + var l int + _ = l + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func sovGenerated(x uint64) (n int) { for { n++ @@ -331,22 +665,911 @@ func (this *StorageClassList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSource{`, + `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSpec{`, + `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentStatus) String() string { + if this == nil { + return "nil" + } + keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) + for k := range this.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StorageClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provisioner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) + m.ReclaimPolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := VolumeBindingMode(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) + if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttachment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PersistentVolumeName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *StorageClass) Unmarshal(dAtA []byte) error { +func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -369,17 +1592,17 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + return fmt.Errorf("proto: VolumeAttachmentSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VolumeAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Attacher", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -389,27 +1612,26 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Attacher = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -419,26 +1641,27 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Provisioner = string(dAtA[iNdEx:postIndex]) + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -448,19 +1671,76 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attached", wireType) + } + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -470,12 +1750,17 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + m.Attached = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentMetadata", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -485,26 +1770,26 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Parameters == nil { - m.Parameters = make(map[string]string) + if m.AttachmentMetadata == nil { + m.AttachmentMetadata = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -514,77 +1799,86 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Parameters[mapkey] = mapvalue - } else { - var mapvalue string - m.Parameters[mapkey] = mapvalue - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF } - s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) - m.ReclaimPolicy = &s + m.AttachmentMetadata[mapkey] = mapvalue iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AttachError", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -594,75 +1888,28 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AllowVolumeExpansion = &b - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + if m.AttachError == nil { + m.AttachError = &VolumeError{} } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.AttachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - s := VolumeBindingMode(dAtA[iNdEx:postIndex]) - m.VolumeBindingMode = &s iNdEx = postIndex - case 8: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DetachError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -686,8 +1933,10 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) - if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.DetachError == nil { + m.DetachError = &VolumeError{} + } + if err := m.DetachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -712,7 +1961,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } return nil } -func (m *StorageClassList) Unmarshal(dAtA []byte) error { +func (m *VolumeError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -735,15 +1984,15 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + return fmt.Errorf("proto: VolumeError: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VolumeError: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -767,15 +2016,15 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -785,22 +2034,20 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, StorageClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -933,46 +2180,67 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 656 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x4a, - 0x14, 0x8e, 0x93, 0x9b, 0xde, 0x74, 0xd2, 0xea, 0x26, 0xbe, 0xbd, 0x92, 0x6f, 0x16, 0x4e, 0x54, - 0x36, 0x11, 0x12, 0xe3, 0xa6, 0x14, 0x54, 0x21, 0x81, 0x54, 0xa3, 0x4a, 0x20, 0xb5, 0x6a, 0xe4, - 0x56, 0x15, 0x42, 0x2c, 0x98, 0x38, 0x07, 0x77, 0x88, 0xed, 0x31, 0x33, 0x63, 0x43, 0x76, 0xbc, - 0x00, 0x12, 0xcf, 0xc3, 0x13, 0x74, 0xd9, 0x65, 0x57, 0x16, 0x35, 0x6f, 0xd1, 0x15, 0xf2, 0x0f, - 0x8d, 0x9b, 0x04, 0xd1, 0xdd, 0xcc, 0x77, 0xbe, 0xef, 0x3b, 0x33, 0xe7, 0x07, 0x3d, 0x9b, 0xec, - 0x0a, 0x4c, 0x99, 0x31, 0x09, 0x47, 0xc0, 0x7d, 0x90, 0x20, 0x8c, 0x08, 0xfc, 0x31, 0xe3, 0x46, - 0x11, 0x20, 0x01, 0x35, 0x84, 0x64, 0x9c, 0x38, 0x60, 0x44, 0x03, 0xc3, 0x01, 0x1f, 0x38, 0x91, - 0x30, 0xc6, 0x01, 0x67, 0x92, 0xa9, 0xff, 0xe5, 0x34, 0x4c, 0x02, 0x8a, 0x0b, 0x1a, 0x8e, 0x06, - 0x9d, 0x07, 0x0e, 0x95, 0x67, 0xe1, 0x08, 0xdb, 0xcc, 0x33, 0x1c, 0xe6, 0x30, 0x23, 0x63, 0x8f, - 0xc2, 0x77, 0xd9, 0x2d, 0xbb, 0x64, 0xa7, 0xdc, 0xa5, 0xb3, 0x59, 0x4a, 0x66, 0x33, 0xbe, 0x2c, - 0x53, 0x67, 0x67, 0xc6, 0xf1, 0x88, 0x7d, 0x46, 0x7d, 0xe0, 0x53, 0x23, 0x98, 0x38, 0x29, 0x20, - 0x0c, 0x0f, 0x24, 0x59, 0xa6, 0x32, 0x7e, 0xa7, 0xe2, 0xa1, 0x2f, 0xa9, 0x07, 0x0b, 0x82, 0xc7, - 0x7f, 0x12, 0x08, 0xfb, 0x0c, 0x3c, 0x32, 0xaf, 0xdb, 0xfc, 0xb2, 0x82, 0xd6, 0x8e, 0xf3, 0x02, - 0x3c, 0x77, 0x89, 0x10, 0xea, 0x5b, 0xd4, 0x48, 0x1f, 0x35, 0x26, 0x92, 0x68, 0x4a, 0x4f, 0xe9, - 0x37, 0xb7, 0xb7, 0xf0, 0xac, 0x58, 0x37, 0xde, 0x38, 0x98, 0x38, 0x29, 0x20, 0x70, 0xca, 0xc6, - 0xd1, 0x00, 0x1f, 0x8d, 0xde, 0x83, 0x2d, 0x0f, 0x41, 0x12, 0x53, 0x3d, 0x8f, 0xbb, 0x95, 0x24, - 0xee, 0xa2, 0x19, 0x66, 0xdd, 0xb8, 0xaa, 0x8f, 0x50, 0x33, 0xe0, 0x2c, 0xa2, 0x82, 0x32, 0x1f, - 0xb8, 0x56, 0xed, 0x29, 0xfd, 0x55, 0xf3, 0xdf, 0x42, 0xd2, 0x1c, 0xce, 0x42, 0x56, 0x99, 0xa7, - 0x3a, 0x08, 0x05, 0x84, 0x13, 0x0f, 0x24, 0x70, 0xa1, 0xd5, 0x7a, 0xb5, 0x7e, 0x73, 0xfb, 0x21, - 0x5e, 0xda, 0x47, 0x5c, 0xfe, 0x11, 0x1e, 0xde, 0xa8, 0xf6, 0x7d, 0xc9, 0xa7, 0xb3, 0xd7, 0xcd, - 0x02, 0x56, 0xc9, 0x5a, 0x9d, 0xa0, 0x75, 0x0e, 0xb6, 0x4b, 0xa8, 0x37, 0x64, 0x2e, 0xb5, 0xa7, - 0xda, 0x5f, 0xd9, 0x0b, 0xf7, 0x93, 0xb8, 0xbb, 0x6e, 0x95, 0x03, 0xd7, 0x71, 0x77, 0x6b, 0x71, - 0x02, 0xf0, 0x10, 0xb8, 0xa0, 0x42, 0x82, 0x2f, 0x4f, 0x99, 0x1b, 0x7a, 0x70, 0x4b, 0x63, 0xdd, - 0xf6, 0x56, 0x77, 0xd0, 0x9a, 0xc7, 0x42, 0x5f, 0x1e, 0x05, 0x92, 0x32, 0x5f, 0x68, 0xf5, 0x5e, - 0xad, 0xbf, 0x6a, 0xb6, 0x92, 0xb8, 0xbb, 0x76, 0x58, 0xc2, 0xad, 0x5b, 0x2c, 0xf5, 0x00, 0x6d, - 0x10, 0xd7, 0x65, 0x1f, 0xf3, 0x04, 0xfb, 0x9f, 0x02, 0xe2, 0xa7, 0x55, 0xd2, 0x56, 0x7a, 0x4a, - 0xbf, 0x61, 0x6a, 0x49, 0xdc, 0xdd, 0xd8, 0x5b, 0x12, 0xb7, 0x96, 0xaa, 0xd4, 0x57, 0xa8, 0x1d, - 0x65, 0x90, 0x49, 0xfd, 0x31, 0xf5, 0x9d, 0x43, 0x36, 0x06, 0xed, 0xef, 0xec, 0xd3, 0xf7, 0x93, - 0xb8, 0xdb, 0x3e, 0x9d, 0x0f, 0x5e, 0x2f, 0x03, 0xad, 0x45, 0x13, 0xf5, 0x03, 0x6a, 0x67, 0x19, - 0x61, 0x7c, 0xc2, 0x02, 0xe6, 0x32, 0x87, 0x82, 0xd0, 0x1a, 0x59, 0xeb, 0xfa, 0xe5, 0xd6, 0xa5, - 0xa5, 0x4b, 0xfb, 0x56, 0xb0, 0xa6, 0xc7, 0xe0, 0x82, 0x2d, 0x19, 0x3f, 0x01, 0xee, 0x99, 0xff, - 0x17, 0xfd, 0x6a, 0xef, 0xcd, 0x5b, 0x59, 0x8b, 0xee, 0x9d, 0xa7, 0xe8, 0x9f, 0xb9, 0x86, 0xab, - 0x2d, 0x54, 0x9b, 0xc0, 0x34, 0x9b, 0xe6, 0x55, 0x2b, 0x3d, 0xaa, 0x1b, 0xa8, 0x1e, 0x11, 0x37, - 0x84, 0x7c, 0xf8, 0xac, 0xfc, 0xf2, 0xa4, 0xba, 0xab, 0x6c, 0x7e, 0x53, 0x50, 0xab, 0x3c, 0x3d, - 0x07, 0x54, 0x48, 0xf5, 0xcd, 0xc2, 0x4e, 0xe0, 0xbb, 0xed, 0x44, 0xaa, 0xce, 0x36, 0xa2, 0x55, - 0xfc, 0xa1, 0xf1, 0x0b, 0x29, 0xed, 0xc3, 0x0b, 0x54, 0xa7, 0x12, 0x3c, 0xa1, 0x55, 0xb3, 0xc2, - 0xdc, 0xbb, 0xc3, 0x4c, 0x9b, 0xeb, 0x85, 0x5f, 0xfd, 0x65, 0xaa, 0xb4, 0x72, 0x03, 0xb3, 0x7f, - 0x7e, 0xa5, 0x57, 0x2e, 0xae, 0xf4, 0xca, 0xe5, 0x95, 0x5e, 0xf9, 0x9c, 0xe8, 0xca, 0x79, 0xa2, - 0x2b, 0x17, 0x89, 0xae, 0x5c, 0x26, 0xba, 0xf2, 0x3d, 0xd1, 0x95, 0xaf, 0x3f, 0xf4, 0xca, 0xeb, - 0x6a, 0x34, 0xf8, 0x19, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x64, 0x41, 0x83, 0x40, 0x05, 0x00, 0x00, + // 984 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3d, 0x6f, 0x23, 0x45, + 0x18, 0xce, 0xc6, 0xf9, 0x70, 0xc6, 0x09, 0x97, 0x0c, 0x01, 0x8c, 0x0b, 0x3b, 0x32, 0x05, 0xe6, + 0xe0, 0x76, 0x2f, 0xe1, 0x40, 0x27, 0x24, 0x90, 0xbc, 0x60, 0x09, 0xa4, 0xf8, 0x2e, 0x9a, 0x84, + 0x13, 0x42, 0x14, 0x4c, 0x76, 0xdf, 0xdb, 0x2c, 0xf6, 0xee, 0x2c, 0x33, 0x63, 0x43, 0x3a, 0x2a, + 0x3a, 0x24, 0x68, 0xf9, 0x29, 0x94, 0x54, 0xa1, 0xbb, 0xf2, 0x2a, 0x8b, 0x2c, 0x35, 0x7f, 0x20, + 0x15, 0x9a, 0xd9, 0x89, 0xbd, 0xb1, 0xd7, 0x9c, 0xd3, 0x5c, 0xe7, 0xf7, 0xe3, 0x79, 0xde, 0xef, + 0x59, 0xa3, 0x4f, 0x7a, 0x0f, 0x85, 0x1d, 0x32, 0xa7, 0x37, 0x38, 0x05, 0x1e, 0x83, 0x04, 0xe1, + 0x0c, 0x21, 0xf6, 0x19, 0x77, 0x8c, 0x81, 0x26, 0xa1, 0x23, 0x24, 0xe3, 0x34, 0x00, 0x67, 0xb8, + 0xef, 0x04, 0x10, 0x03, 0xa7, 0x12, 0x7c, 0x3b, 0xe1, 0x4c, 0x32, 0xfc, 0x5a, 0xe6, 0x66, 0xd3, + 0x24, 0xb4, 0x8d, 0x9b, 0x3d, 0xdc, 0xaf, 0xdd, 0x0b, 0x42, 0x79, 0x36, 0x38, 0xb5, 0x3d, 0x16, + 0x39, 0x01, 0x0b, 0x98, 0xa3, 0xbd, 0x4f, 0x07, 0x4f, 0xb5, 0xa4, 0x05, 0xfd, 0x2b, 0x63, 0xa9, + 0x35, 0x73, 0xc1, 0x3c, 0xc6, 0x8b, 0x22, 0xd5, 0x1e, 0x4c, 0x7c, 0x22, 0xea, 0x9d, 0x85, 0x31, + 0xf0, 0x73, 0x27, 0xe9, 0x05, 0x4a, 0x21, 0x9c, 0x08, 0x24, 0x2d, 0x42, 0x39, 0xf3, 0x50, 0x7c, + 0x10, 0xcb, 0x30, 0x82, 0x19, 0xc0, 0x87, 0x2f, 0x02, 0x08, 0xef, 0x0c, 0x22, 0x3a, 0x8d, 0x6b, + 0xfe, 0xb2, 0x86, 0x36, 0x8f, 0xb3, 0x06, 0x7c, 0xda, 0xa7, 0x42, 0xe0, 0x6f, 0x51, 0x59, 0x25, + 0xe5, 0x53, 0x49, 0xab, 0xd6, 0x9e, 0xd5, 0xaa, 0x1c, 0xdc, 0xb7, 0x27, 0xcd, 0x1a, 0x73, 0xdb, + 0x49, 0x2f, 0x50, 0x0a, 0x61, 0x2b, 0x6f, 0x7b, 0xb8, 0x6f, 0x3f, 0x3e, 0xfd, 0x0e, 0x3c, 0xd9, + 0x05, 0x49, 0x5d, 0x7c, 0x31, 0x6a, 0x2c, 0xa5, 0xa3, 0x06, 0x9a, 0xe8, 0xc8, 0x98, 0x15, 0x7f, + 0x80, 0x2a, 0x09, 0x67, 0xc3, 0x50, 0x84, 0x2c, 0x06, 0x5e, 0x5d, 0xde, 0xb3, 0x5a, 0x1b, 0xee, + 0xab, 0x06, 0x52, 0x39, 0x9a, 0x98, 0x48, 0xde, 0x0f, 0x07, 0x08, 0x25, 0x94, 0xd3, 0x08, 0x24, + 0x70, 0x51, 0x2d, 0xed, 0x95, 0x5a, 0x95, 0x83, 0xf7, 0xed, 0xc2, 0x39, 0xda, 0xf9, 0x8a, 0xec, + 0xa3, 0x31, 0xaa, 0x13, 0x4b, 0x7e, 0x3e, 0xc9, 0x6e, 0x62, 0x20, 0x39, 0x6a, 0xdc, 0x43, 0x5b, + 0x1c, 0xbc, 0x3e, 0x0d, 0xa3, 0x23, 0xd6, 0x0f, 0xbd, 0xf3, 0xea, 0x8a, 0xce, 0xb0, 0x93, 0x8e, + 0x1a, 0x5b, 0x24, 0x6f, 0xb8, 0x1a, 0x35, 0xee, 0xcf, 0x6e, 0x80, 0x7d, 0x04, 0x5c, 0x84, 0x42, + 0x42, 0x2c, 0x9f, 0xb0, 0xfe, 0x20, 0x82, 0x1b, 0x18, 0x72, 0x93, 0x1b, 0x3f, 0x40, 0x9b, 0x11, + 0x1b, 0xc4, 0xf2, 0x71, 0x22, 0x43, 0x16, 0x8b, 0xea, 0xea, 0x5e, 0xa9, 0xb5, 0xe1, 0x6e, 0xa7, + 0xa3, 0xc6, 0x66, 0x37, 0xa7, 0x27, 0x37, 0xbc, 0xf0, 0x21, 0xda, 0xa5, 0xfd, 0x3e, 0xfb, 0x21, + 0x0b, 0xd0, 0xf9, 0x31, 0xa1, 0xb1, 0xea, 0x52, 0x75, 0x6d, 0xcf, 0x6a, 0x95, 0xdd, 0x6a, 0x3a, + 0x6a, 0xec, 0xb6, 0x0b, 0xec, 0xa4, 0x10, 0x85, 0xbf, 0x42, 0x3b, 0x43, 0xad, 0x72, 0xc3, 0xd8, + 0x0f, 0xe3, 0xa0, 0xcb, 0x7c, 0xa8, 0xae, 0xeb, 0xa2, 0xef, 0xa6, 0xa3, 0xc6, 0xce, 0x93, 0x69, + 0xe3, 0x55, 0x91, 0x92, 0xcc, 0x92, 0xe0, 0xef, 0xd1, 0x8e, 0x8e, 0x08, 0xfe, 0x09, 0x4b, 0x58, + 0x9f, 0x05, 0x21, 0x88, 0x6a, 0x59, 0x8f, 0xae, 0x95, 0x1f, 0x9d, 0x6a, 0x9d, 0x9a, 0x9b, 0xf1, + 0x3a, 0x3f, 0x86, 0x3e, 0x78, 0x92, 0xf1, 0x13, 0xe0, 0x91, 0xfb, 0xa6, 0x99, 0xd7, 0x4e, 0x7b, + 0x9a, 0x8a, 0xcc, 0xb2, 0xd7, 0x3e, 0x46, 0x77, 0xa6, 0x06, 0x8e, 0xb7, 0x51, 0xa9, 0x07, 0xe7, + 0x7a, 0x9b, 0x37, 0x88, 0xfa, 0x89, 0x77, 0xd1, 0xea, 0x90, 0xf6, 0x07, 0x90, 0x2d, 0x1f, 0xc9, + 0x84, 0x8f, 0x96, 0x1f, 0x5a, 0xcd, 0x3f, 0x2c, 0xb4, 0x9d, 0xdf, 0x9e, 0xc3, 0x50, 0x48, 0xfc, + 0xcd, 0xcc, 0x4d, 0xd8, 0x8b, 0xdd, 0x84, 0x42, 0xeb, 0x8b, 0xd8, 0x36, 0x35, 0x94, 0xaf, 0x35, + 0xb9, 0x7b, 0xf8, 0x1c, 0xad, 0x86, 0x12, 0x22, 0x51, 0x5d, 0xd6, 0x8d, 0x79, 0x6b, 0x81, 0x9d, + 0x76, 0xb7, 0x0c, 0xdf, 0xea, 0x17, 0x0a, 0x49, 0x32, 0x82, 0xe6, 0xef, 0xcb, 0x68, 0x3b, 0x9b, + 0x4b, 0x5b, 0x4a, 0xea, 0x9d, 0x45, 0x10, 0xcb, 0x97, 0x70, 0xd0, 0x5d, 0xb4, 0x22, 0x12, 0xf0, + 0x74, 0x33, 0x2b, 0x07, 0xef, 0xce, 0xc9, 0x7f, 0x3a, 0xb1, 0xe3, 0x04, 0x3c, 0x77, 0xd3, 0x10, + 0xaf, 0x28, 0x89, 0x68, 0x1a, 0xfc, 0x25, 0x5a, 0x13, 0x92, 0xca, 0x81, 0x3a, 0x72, 0x45, 0x78, + 0x6f, 0x51, 0x42, 0x0d, 0x72, 0x5f, 0x31, 0x94, 0x6b, 0x99, 0x4c, 0x0c, 0x59, 0xf3, 0x4f, 0x0b, + 0xed, 0x4e, 0x43, 0x5e, 0xc2, 0x74, 0x0f, 0x6f, 0x4e, 0xf7, 0xed, 0x05, 0x8b, 0x99, 0x33, 0xe1, + 0xa7, 0xe8, 0xf5, 0x99, 0xb2, 0xd9, 0x80, 0x7b, 0xa0, 0x9e, 0x84, 0x64, 0xea, 0xe1, 0x79, 0x44, + 0x23, 0xc8, 0xb6, 0x3e, 0x7b, 0x12, 0x8e, 0x0a, 0xec, 0xa4, 0x10, 0xd5, 0xfc, 0xab, 0xa0, 0x59, + 0x6a, 0x44, 0xf8, 0x3d, 0x54, 0xa6, 0x5a, 0x03, 0xdc, 0x50, 0x8f, 0x8b, 0x6f, 0x1b, 0x3d, 0x19, + 0x7b, 0xe8, 0x51, 0xea, 0xf4, 0xcc, 0x6e, 0x2c, 0x3c, 0x4a, 0x0d, 0xca, 0x8d, 0x52, 0xcb, 0xc4, + 0x90, 0xa9, 0x24, 0x62, 0xe6, 0x67, 0xf5, 0x95, 0x6e, 0x26, 0xf1, 0xc8, 0xe8, 0xc9, 0xd8, 0xa3, + 0xf9, 0x6f, 0xa9, 0xa0, 0x69, 0x7a, 0x27, 0x72, 0xd5, 0xf8, 0xba, 0x9a, 0xf2, 0x4c, 0x35, 0xfe, + 0xb8, 0x1a, 0x1f, 0xff, 0x66, 0x21, 0x4c, 0xc7, 0x14, 0xdd, 0xeb, 0x9d, 0xc9, 0x06, 0xdb, 0xb9, + 0xd5, 0x96, 0xda, 0xed, 0x19, 0x9e, 0xec, 0xe3, 0x54, 0x33, 0xf1, 0xf1, 0xac, 0x03, 0x29, 0x08, + 0x8e, 0x7d, 0x54, 0xc9, 0xb4, 0x1d, 0xce, 0x19, 0x37, 0x17, 0xd3, 0xfc, 0xdf, 0x5c, 0xb4, 0xa7, + 0x5b, 0x57, 0x1f, 0xdb, 0xf6, 0x04, 0x7a, 0x35, 0x6a, 0x54, 0x72, 0x76, 0x92, 0xa7, 0x55, 0x51, + 0x7c, 0x98, 0x44, 0x59, 0xb9, 0x5d, 0x94, 0xcf, 0x60, 0x7e, 0x94, 0x1c, 0x6d, 0xad, 0x83, 0xde, + 0x98, 0xd3, 0x96, 0x5b, 0x3d, 0xe1, 0x3f, 0x5b, 0x28, 0x1f, 0x03, 0x1f, 0xa2, 0x15, 0xf5, 0x0f, + 0xc8, 0xdc, 0xf6, 0xdd, 0xc5, 0x6e, 0xfb, 0x24, 0x8c, 0x60, 0xf2, 0x3a, 0x29, 0x89, 0x68, 0x16, + 0xfc, 0x0e, 0x5a, 0x8f, 0x40, 0x08, 0x1a, 0x98, 0xc8, 0xee, 0x1d, 0xe3, 0xb4, 0xde, 0xcd, 0xd4, + 0xe4, 0xda, 0xee, 0xb6, 0x2e, 0x2e, 0xeb, 0x4b, 0xcf, 0x2e, 0xeb, 0x4b, 0xcf, 0x2f, 0xeb, 0x4b, + 0x3f, 0xa5, 0x75, 0xeb, 0x22, 0xad, 0x5b, 0xcf, 0xd2, 0xba, 0xf5, 0x3c, 0xad, 0x5b, 0x7f, 0xa7, + 0x75, 0xeb, 0xd7, 0x7f, 0xea, 0x4b, 0x5f, 0x2f, 0x0f, 0xf7, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, + 0x85, 0x2a, 0x88, 0xc0, 0xcf, 0x0a, 0x00, 0x00, } diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.proto index d1785659c02a..668c85447436 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/generated.proto @@ -31,7 +31,7 @@ option go_package = "v1"; // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. -// +// // StorageClasses are non-namespaced; the name of the storage class // according to etcd is in ObjectMeta.Name. message StorageClass { @@ -88,3 +88,99 @@ message StorageClassList { repeated StorageClass items = 2; } +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +message VolumeAttachment { + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + optional VolumeAttachmentSpec spec = 2; + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeAttachmentStatus status = 3; +} + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +message VolumeAttachmentList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of VolumeAttachments + repeated VolumeAttachment items = 2; +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +message VolumeAttachmentSource { + // Name of the persistent volume to attach. + // +optional + optional string persistentVolumeName = 1; +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +message VolumeAttachmentSpec { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + optional string attacher = 1; + + // Source represents the volume that should be attached. + optional VolumeAttachmentSource source = 2; + + // The node that the volume should be attached to. + optional string nodeName = 3; +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +message VolumeAttachmentStatus { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + optional bool attached = 1; + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + map attachmentMetadata = 2; + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError attachError = 3; + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError detachError = 4; +} + +// VolumeError captures an error encountered during a volume operation. +message VolumeError { + // Time the error was encountered. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + optional string message = 2; +} + diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/register.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/register.go index c058add84001..473c687278b9 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/register.go @@ -46,6 +46,9 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &StorageClass{}, &StorageClassList{}, + + &VolumeAttachment{}, + &VolumeAttachmentList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types.go index 30e6d6d29bc8..9f2f67b6b761 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types.go @@ -102,3 +102,110 @@ const ( // binding will occur during Pod scheduing. VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" ) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +type VolumeAttachment struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +type VolumeAttachmentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeAttachments + Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +type VolumeAttachmentSpec struct { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` + + // Source represents the volume that should be attached. + Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` + + // The node that the volume should be attached to. + NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +type VolumeAttachmentSource struct { + // Name of the persistent volume to attach. + // +optional + PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` + + // Placeholder for *VolumeSource to accommodate inline volumes in pods. +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +type VolumeAttachmentStatus struct { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"` +} + +// VolumeError captures an error encountered during a volume operation. +type VolumeError struct { + // Time the error was encountered. + // +optional + Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 23b76e28de9f..d4a022d52ecc 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -53,4 +53,67 @@ func (StorageClassList) SwaggerDoc() map[string]string { return map_StorageClassList } +var map_VolumeAttachment = map[string]string{ + "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachment) SwaggerDoc() map[string]string { + return map_VolumeAttachment +} + +var map_VolumeAttachmentList = map[string]string{ + "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of VolumeAttachments", +} + +func (VolumeAttachmentList) SwaggerDoc() map[string]string { + return map_VolumeAttachmentList +} + +var map_VolumeAttachmentSource = map[string]string{ + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "persistentVolumeName": "Name of the persistent volume to attach.", +} + +func (VolumeAttachmentSource) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSource +} + +var map_VolumeAttachmentSpec = map[string]string{ + "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", + "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "Source represents the volume that should be attached.", + "nodeName": "The node that the volume should be attached to.", +} + +func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSpec +} + +var map_VolumeAttachmentStatus = map[string]string{ + "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", + "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { + return map_VolumeAttachmentStatus +} + +var map_VolumeError = map[string]string{ + "": "VolumeError captures an error encountered during a volume operation.", + "time": "Time the error was encountered.", + "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", +} + +func (VolumeError) SwaggerDoc() map[string]string { + return map_VolumeError +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index 0e850dc34f02..3157ec67812a 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -117,3 +117,152 @@ func (in *StorageClassList) DeepCopyObject() runtime.Object { } return nil } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. +func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { + if in == nil { + return nil + } + out := new(VolumeAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. +func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { + if in == nil { + return nil + } + out := new(VolumeAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { + *out = *in + if in.PersistentVolumeName != nil { + in, out := &in.PersistentVolumeName, &out.PersistentVolumeName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. +func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { + if in == nil { + return nil + } + out := new(VolumeAttachmentSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. +func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { + if in == nil { + return nil + } + out := new(VolumeAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { + *out = *in + if in.AttachmentMetadata != nil { + in, out := &in.AttachmentMetadata, &out.AttachmentMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AttachError != nil { + in, out := &in.AttachError, &out.AttachError + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + if in.DetachError != nil { + in, out := &in.DetachError, &out.DetachError + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. +func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { + if in == nil { + return nil + } + out := new(VolumeAttachmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeError) DeepCopyInto(out *VolumeError) { + *out = *in + in.Time.DeepCopyInto(&out.Time) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. +func (in *VolumeError) DeepCopy() *VolumeError { + if in == nil { + return nil + } + out := new(VolumeError) + in.DeepCopyInto(out) + return out +} diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/doc.go index dce99a3344b7..c6a9b9e25676 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +groupName=storage.k8s.io // +k8s:openapi-gen=true + package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 507b5c1d5e68..0511ccabd811 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto -// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -324,24 +323,6 @@ func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1076,51 +1057,14 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.AttachmentMetadata == nil { m.AttachmentMetadata = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1130,41 +1074,80 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.AttachmentMetadata[mapkey] = mapvalue - } else { - var mapvalue string - m.AttachmentMetadata[mapkey] = mapvalue } + m.AttachmentMetadata[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/generated.proto index ccb947540f11..fdc4ad257d39 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -30,7 +30,7 @@ option go_package = "v1alpha1"; // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. -// +// // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index 32d7dcc52f92..3701b08640d5 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -49,7 +49,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "Name of the persistent volume to attach.", } diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/doc.go index 545d636fdfc6..a5eafb2c84d9 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index fed8c7a6308c..3e995f039fc8 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -477,24 +476,6 @@ func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -892,51 +873,14 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Parameters == nil { m.Parameters = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -946,41 +890,80 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Parameters[mapkey] = mapvalue - } else { - var mapvalue string - m.Parameters[mapkey] = mapvalue } + m.Parameters[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -1799,51 +1782,14 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.AttachmentMetadata == nil { m.AttachmentMetadata = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1853,41 +1799,80 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.AttachmentMetadata[mapkey] = mapvalue - } else { - var mapvalue string - m.AttachmentMetadata[mapkey] = mapvalue } + m.AttachmentMetadata[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.proto index ecf53bef60e6..db1f302a053a 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -31,7 +31,7 @@ option go_package = "v1beta1"; // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. -// +// // StorageClasses are non-namespaced; the name of the storage class // according to etcd is in ObjectMeta.Name. message StorageClass { @@ -90,7 +90,7 @@ message StorageClassList { // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. -// +// // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. diff --git a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 044d69f5855f..834553e1a804 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -75,7 +75,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "Name of the persistent volume to attach.", } diff --git a/cluster-autoscaler/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go b/cluster-autoscaler/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go index 24e72f91e596..8af73d2e1963 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go +++ b/cluster-autoscaler/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go @@ -40,6 +40,12 @@ const ( // // CustomResourceSubresources defines the subresources for CustomResources CustomResourceSubresources utilfeature.Feature = "CustomResourceSubresources" + + // owner: @mbohlool, @roycaihw + // alpha: v1.13 + // + // CustomResourceWebhookConversion defines the webhook conversion for Custom Resources. + CustomResourceWebhookConversion utilfeature.Feature = "CustomResourceWebhookConversion" ) func init() { @@ -50,6 +56,7 @@ func init() { // To add a new feature, define a key for it above and add it here. The features will be // available throughout Kubernetes binaries. var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ - CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, - CustomResourceSubresources: {Default: true, PreRelease: utilfeature.Beta}, + CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, + CustomResourceSubresources: {Default: true, PreRelease: utilfeature.Beta}, + CustomResourceWebhookConversion: {Default: false, PreRelease: utilfeature.Alpha}, } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index bcc032df9dd6..e736a9861400 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "net/http" + "reflect" "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -82,7 +83,20 @@ func (u *UnexpectedObjectError) Error() string { func FromObject(obj runtime.Object) error { switch t := obj.(type) { case *metav1.Status: - return &StatusError{*t} + return &StatusError{ErrStatus: *t} + case runtime.Unstructured: + var status metav1.Status + obj := t.UnstructuredContent() + if !reflect.DeepEqual(obj["kind"], "Status") { + break + } + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(t.UnstructuredContent(), &status); err != nil { + return err + } + if status.APIVersion != "v1" && status.APIVersion != "meta.k8s.io/v1" { + break + } + return &StatusError{ErrStatus: status} } return &UnexpectedObjectError{obj} } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD index 938ba9b5b73f..4ced3ce56631 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD @@ -49,7 +49,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 1c2a83cfacbb..6fe7458f6c4f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -20,7 +20,7 @@ import ( "fmt" "reflect" - "github.com/golang/glog" + "k8s.io/klog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" @@ -132,12 +132,12 @@ func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata CreationTimestamp: m.GetCreationTimestamp(), DeletionTimestamp: m.GetDeletionTimestamp(), DeletionGracePeriodSeconds: m.GetDeletionGracePeriodSeconds(), - Labels: m.GetLabels(), - Annotations: m.GetAnnotations(), - OwnerReferences: m.GetOwnerReferences(), - Finalizers: m.GetFinalizers(), - ClusterName: m.GetClusterName(), - Initializers: m.GetInitializers(), + Labels: m.GetLabels(), + Annotations: m.GetAnnotations(), + OwnerReferences: m.GetOwnerReferences(), + Finalizers: m.GetFinalizers(), + ClusterName: m.GetClusterName(), + Initializers: m.GetInitializers(), }, } } @@ -607,7 +607,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { var ret []metav1.OwnerReference s := a.ownerReferences if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - glog.Errorf("expect %v to be a pointer to slice", s) + klog.Errorf("expect %v to be a pointer to slice", s) return ret } s = s.Elem() @@ -615,7 +615,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1) for i := 0; i < s.Len(); i++ { if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil { - glog.Errorf("extractFromOwnerReference failed: %v", err) + klog.Errorf("extractFromOwnerReference failed: %v", err) return ret } } @@ -625,13 +625,13 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) { s := a.ownerReferences if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - glog.Errorf("expect %v to be a pointer to slice", s) + klog.Errorf("expect %v to be a pointer to slice", s) } s = s.Elem() newReferences := reflect.MakeSlice(s.Type(), len(references), len(references)) for i := 0; i < len(references); i++ { if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil { - glog.Errorf("setOwnerReference failed: %v", err) + klog.Errorf("setOwnerReference failed: %v", err) return } } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 802f22a63f37..9d7835bc23f9 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto -// DO NOT EDIT! /* Package resource is a generated protocol buffer package. diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index 2c615d51bf20..acc904445228 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -27,9 +27,9 @@ option go_package = "resource"; // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, // in addition to String() and Int64() accessors. -// +// // The serialization format is: -// +// // ::= // (Note that may be empty, from the "" case in .) // ::= 0 | 1 | ... | 9 @@ -43,16 +43,16 @@ option go_package = "resource"; // ::= m | "" | k | M | G | T | P | E // (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) // ::= "e" | "E" -// +// // No matter which of the three exponent forms is used, no quantity may represent // a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal // places. Numbers larger or more precise will be capped or rounded up. // (E.g.: 0.1m will rounded up to 1m.) // This may be extended in the future if we require larger or smaller quantities. -// +// // When a Quantity is parsed from a string, it will remember the type of suffix // it had, and will use the same type again when it is serialized. -// +// // Before serializing, Quantity will be put in "canonical form". // This means that Exponent/suffix will be adjusted up or down (with a // corresponding increase or decrease in Mantissa) such that: @@ -60,22 +60,22 @@ option go_package = "resource"; // b. No fractional digits will be emitted // c. The exponent (or suffix) is as large as possible. // The sign will be omitted unless the number is negative. -// +// // Examples: // 1.5 will be serialized as "1500m" // 1.5Gi will be serialized as "1536Mi" -// +// // Note that the quantity will NEVER be internally represented by a // floating point number. That is the whole point of this exercise. -// +// // Non-canonical values will still parse as long as they are well formed, // but will be re-emitted in their canonical form. (So always use canonical // form, or don't diff.) -// +// // This format is intended to make it difficult to use these numbers without // writing some sort of special handling code in the hopes that that will // cause implementors to also use a fixed point implementation. -// +// // +protobuf=true // +protobuf.embed=string // +protobuf.options.marshal=false diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/config/v1alpha1/register.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/config/v1alpha1/register.go index c09c16389770..ddc186c9aa4a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/config/v1alpha1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/config/v1alpha1/register.go @@ -21,7 +21,11 @@ import ( ) var ( - SchemeBuilder runtime.SchemeBuilder + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder runtime.SchemeBuilder + // localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package, + // defaulting and conversion init funcs are registered as well. localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = localSchemeBuilder.AddToScheme ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD index 8b02313678e1..267bccc4aa9a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD @@ -25,7 +25,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go index 52273240f0e8..7403193ab29c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=meta.k8s.io + package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index b7508f033e35..81320c9c8827 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -1768,24 +1767,6 @@ func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -5043,51 +5024,14 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.MatchLabels == nil { m.MatchLabels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5097,41 +5041,80 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.MatchLabels[mapkey] = mapvalue - } else { - var mapvalue string - m.MatchLabels[mapkey] = mapvalue } + m.MatchLabels[mapkey] = mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -6146,51 +6129,14 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6200,41 +6146,80 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 12: if wireType != 2 { @@ -6262,51 +6247,14 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6316,41 +6264,80 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 13: if wireType != 2 { diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index eb3237f2b717..989f076a1031 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -107,7 +107,7 @@ message APIResourceList { // APIVersions lists the versions that are available, to allow clients to // discover the API at /api, which is the root path of the legacy v1 API. -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message APIVersions { @@ -211,7 +211,7 @@ message GetOptions { // GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying // concepts during lookup stages without having partially valid types -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupKind { optional string group = 1; @@ -221,7 +221,7 @@ message GroupKind { // GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying // concepts during lookup stages without having partially valid types -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupResource { optional string group = 1; @@ -230,7 +230,7 @@ message GroupResource { } // GroupVersion contains the "group" and the "version", which uniquely identifies the API. -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersion { optional string group = 1; @@ -251,7 +251,7 @@ message GroupVersionForDiscovery { // GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion // to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersionKind { optional string group = 1; @@ -263,7 +263,7 @@ message GroupVersionKind { // GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion // to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersionResource { optional string group = 1; @@ -411,7 +411,7 @@ message ListOptions { // more results are available. Servers may choose not to support the limit argument and will return // all of the available results. If limit is specified and the continue field is empty, clients may // assume that no more results are available. This field is not supported if watch is true. - // + // // The server guarantees that the objects returned when using continue will be identical to issuing // a single list call without a limit - that is, no objects created, modified, or deleted after the // first request is issued will be included in any subsequent continued requests. This is sometimes @@ -432,14 +432,14 @@ message ListOptions { // a list starting from the next key, but from the latest snapshot, which is inconsistent from the // previous list results - objects that are created, modified, or deleted after the first list request // will be included in the response, as long as their keys are after the "next key". - // + // // This field is not supported when watch is true. Clients may start a watch from the last // resourceVersion value returned by the server and not miss any modifications. optional string continue = 8; } // MicroTime is version of Time with microsecond level precision. -// +// // +protobuf.options.marshal=false // +protobuf.as=Timestamp // +protobuf.options.(gogoproto.goproto_stringer)=false @@ -475,12 +475,12 @@ message ObjectMeta { // The provided value has the same validation rules as the Name field, // and may be truncated by the length of the suffix required to make the value // unique on the server. - // + // // If this field is specified and the generated name exists, the server will // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason // ServerTimeout indicating a unique name could not be found in the time allotted, and the client // should retry (optionally after the time indicated in the Retry-After header). - // + // // Applied only if Name is not specified. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency // +optional @@ -490,7 +490,7 @@ message ObjectMeta { // equivalent to the "default" namespace, but "default" is the canonical representation. // Not all objects are required to be scoped to a namespace - the value of this field for // those objects will be empty. - // + // // Must be a DNS_LABEL. // Cannot be updated. // More info: http://kubernetes.io/docs/user-guide/namespaces @@ -506,7 +506,7 @@ message ObjectMeta { // UID is the unique in time and space value for this object. It is typically generated by // the server on successful creation of a resource and is not allowed to change on PUT // operations. - // + // // Populated by the system. // Read-only. // More info: http://kubernetes.io/docs/user-guide/identifiers#uids @@ -518,7 +518,7 @@ message ObjectMeta { // concurrency, change detection, and the watch operation on a resource or set of resources. // Clients must treat these values as opaque and passed unmodified back to the server. // They may only be valid for a particular resource or set of resources. - // + // // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . @@ -534,7 +534,7 @@ message ObjectMeta { // CreationTimestamp is a timestamp representing the server time when this object was // created. It is not guaranteed to be set in happens-before order across separate operations. // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // + // // Populated by the system. // Read-only. // Null for lists. @@ -556,7 +556,7 @@ message ObjectMeta { // exist after this timestamp, until an administrator or automated process can determine the // resource is fully terminated. // If not set, graceful deletion of the object has not been requested. - // + // // Populated by the system when a graceful deletion is requested. // Read-only. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata @@ -598,7 +598,7 @@ message ObjectMeta { // this object has been completely initialized. Otherwise, the object is considered uninitialized // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to // observe uninitialized objects. - // + // // When an object is created, the system will populate this list with the current set of initializers. // Only privileged users may set or modify this list. Once it is empty, it may not be modified further // by any user. @@ -620,8 +620,8 @@ message ObjectMeta { } // OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. +// object. An owning object must be in the same namespace as the dependent, or +// be cluster-scoped, so there is no namespace field. message OwnerReference { // API version of the referent. optional string apiVersion = 5; @@ -734,7 +734,7 @@ message StatusCause { // Arrays are zero-indexed. Fields may appear more than once in an array of // causes due to fields having multiple errors. // Optional. - // + // // Examples: // "name" - the field "name" on the current resource // "items[0].name" - the field "name" on the first array entry in "items" @@ -785,7 +785,7 @@ message StatusDetails { // Time is a wrapper around time.Time which supports correct // marshaling to YAML and JSON. Wrappers are provided for many // of the factory methods that the time package offers. -// +// // +protobuf.options.marshal=false // +protobuf.as=Timestamp // +protobuf.options.(gogoproto.goproto_stringer)=false @@ -821,7 +821,7 @@ message Timestamp { // TypeMeta describes an individual object in an API response or request // with strings representing the type of the object and its API schema version. // Structures that are versioned or persisted should inline TypeMeta. -// +// // +k8s:deepcopy-gen=false message TypeMeta { // Kind is a string value representing the REST resource this object represents. @@ -852,7 +852,7 @@ message UpdateOptions { } // Verbs masks the value so protobuf can generate -// +// // +protobuf.nullable=true // +protobuf.options.(gogoproto.goproto_stringer)=false message Verbs { @@ -862,7 +862,7 @@ message Verbs { } // Event represents a single event to a watched resource. -// +// // +protobuf=true // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 4d3a55d71697..8f488ba7ea4d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -286,8 +286,8 @@ const ( ) // OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. +// object. An owning object must be in the same namespace as the dependent, or +// be cluster-scoped, so there is no namespace field. type OwnerReference struct { // API version of the referent. APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 35e800f8a488..679e709e8e39 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -240,7 +240,7 @@ func (ObjectMeta) SwaggerDoc() map[string]string { } var map_OwnerReference = map[string]string{ - "": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.", + "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", "apiVersion": "API version of the referent.", "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go index dc461cc29682..46b0e133c37c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=meta.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go index fe3df6916b08..47c03d69b964 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -148,24 +147,6 @@ func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/BUILD index 14666aec8ec5..eeffddf89e52 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/BUILD @@ -33,7 +33,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/selection:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 374d2ef1377a..f5a0888932f2 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -23,10 +23,10 @@ import ( "strconv" "strings" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/klog" ) // Requirements is AND of all requirements. @@ -211,13 +211,13 @@ func (r *Requirement) Matches(ls Labels) bool { } lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) if err != nil { - glog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) + klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) return false } // There should be only one strValue in r.strValues, and can be converted to a integer. if len(r.strValues) != 1 { - glog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) return false } @@ -225,7 +225,7 @@ func (r *Requirement) Matches(ls Labels) bool { for i := range r.strValues { rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) if err != nil { - glog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) + klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) return false } } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/BUILD index 3c07e159539d..6b52ceff0fef 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/BUILD @@ -67,7 +67,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 291d7a4e888c..dff56e03401a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "github.com/golang/glog" + "k8s.io/klog" ) // UnstructuredConverter is an interface for converting between interface{} @@ -133,10 +133,10 @@ func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj i newObj := reflect.New(t.Elem()).Interface() newErr := fromUnstructuredViaJSON(u, newObj) if (err != nil) != (newErr != nil) { - glog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) + klog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) } if err == nil && !c.comparison.DeepEqual(obj, newObj) { - glog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) + klog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) } } return err @@ -424,10 +424,10 @@ func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]inte newUnstr := map[string]interface{}{} newErr := toUnstructuredViaJSON(obj, &newUnstr) if (err != nil) != (newErr != nil) { - glog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) + klog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) } if err == nil && !c.comparison.DeepEqual(u, newUnstr) { - glog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) + klog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) } } if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index 967e0f530a32..9b15989c8270 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto -// DO NOT EDIT! /* Package runtime is a generated protocol buffer package. @@ -158,24 +157,6 @@ func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto index fb61ac96a930..0e212ec941ff 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -25,11 +25,11 @@ package k8s.io.apimachinery.pkg.runtime; option go_package = "runtime"; // RawExtension is used to hold extensions in external versions. -// +// // To use this, make a field which has RawExtension as its type in your external, versioned // struct, and Object in your internal struct. You also need to register your // various plugin types. -// +// // // Internal package: // type MyAPIObject struct { // runtime.TypeMeta `json:",inline"` @@ -38,7 +38,7 @@ option go_package = "runtime"; // type PluginA struct { // AOption string `json:"aOption"` // } -// +// // // External package: // type MyAPIObject struct { // runtime.TypeMeta `json:",inline"` @@ -47,7 +47,7 @@ option go_package = "runtime"; // type PluginA struct { // AOption string `json:"aOption"` // } -// +// // // On the wire, the JSON will look something like this: // { // "kind":"MyAPIObject", @@ -57,7 +57,7 @@ option go_package = "runtime"; // "aOption":"foo", // }, // } -// +// // So what happens? Decode first uses json or yaml to unmarshal the serialized data into // your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. // The next step is to copy (using pkg/conversion) into the internal struct. The runtime @@ -65,13 +65,13 @@ option go_package = "runtime"; // JSON stored in RawExtension, turning it into the correct object type, and storing it // in the Object. (TODO: In the case where the object is of an unknown type, a // runtime.Unknown object will be created and stored.) -// +// // +k8s:deepcopy-gen=true // +protobuf=true // +k8s:openapi-gen=true message RawExtension { // Raw is the underlying serialization of this object. - // + // // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. optional bytes raw = 1; } @@ -83,10 +83,10 @@ message RawExtension { // ... // other fields // } // func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind -// +// // TypeMeta is provided here for convenience. You may use it directly from this package or define // your own with the same fields. -// +// // +k8s:deepcopy-gen=false // +protobuf=true // +k8s:openapi-gen=true @@ -103,7 +103,7 @@ message TypeMeta { // TypeMeta features-- kind, version, etc. // TODO: Make this object have easy access to field based accessors and settors for // metadata and field mutatation. -// +// // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +protobuf=true diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index 5c9934c7399a..28a61d5fb574 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto -// DO NOT EDIT! /* Package schema is a generated protocol buffer package. diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index 5f02961d326b..4c67ed59801b 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -66,7 +66,7 @@ func (gr GroupResource) Empty() bool { return len(gr.Group) == 0 && len(gr.Resource) == 0 } -func (gr *GroupResource) String() string { +func (gr GroupResource) String() string { if len(gr.Group) == 0 { return gr.Resource } @@ -111,7 +111,7 @@ func (gvr GroupVersionResource) GroupVersion() GroupVersion { return GroupVersion{Group: gvr.Group, Version: gvr.Version} } -func (gvr *GroupVersionResource) String() string { +func (gvr GroupVersionResource) String() string { return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") } @@ -130,7 +130,7 @@ func (gk GroupKind) WithVersion(version string) GroupVersionKind { return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} } -func (gk *GroupKind) String() string { +func (gk GroupKind) String() string { if len(gk.Group) == 0 { return gk.Kind } @@ -281,8 +281,8 @@ func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersio // ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that // do not use TypeMeta. -func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) { - if gvk == nil { +func (gvk GroupVersionKind) ToAPIVersionAndKind() (string, string) { + if gvk.Empty() { return "", "" } return gvk.GroupVersion().String(), gvk.Kind diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD index 05764c34b7d0..e323052f4d46 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD @@ -21,9 +21,9 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD index 4857211b3471..8ff68498d0a4 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD @@ -34,9 +34,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/recognizer:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/framer:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/json-iterator/go:go_default_library", "//vendor/github.com/modern-go/reflect2:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 382c4858e7fb..8987e74c680e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -22,9 +22,9 @@ import ( "strconv" "unsafe" - "github.com/ghodss/yaml" jsoniter "github.com/json-iterator/go" "github.com/modern-go/reflect2" + "sigs.k8s.io/yaml" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index a5ae3ac4bb7f..00184710760b 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -18,6 +18,7 @@ package versioning import ( "io" + "reflect" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -90,7 +91,16 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru into = versioned.Last() } - obj, gvk, err := c.decoder.Decode(data, defaultGVK, into) + // If the into object is unstructured and expresses an opinion about its group/version, + // create a new instance of the type so we always exercise the conversion path (skips short-circuiting on `into == obj`) + decodeInto := into + if into != nil { + if _, ok := into.(runtime.Unstructured); ok && !into.GetObjectKind().GroupVersionKind().GroupVersion().Empty() { + decodeInto = reflect.New(reflect.TypeOf(into).Elem()).Interface().(runtime.Object) + } + } + + obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto) if err != nil { return nil, gvk, err } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go index 7c9b791d4ff0..50d9a366f368 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go @@ -136,12 +136,12 @@ func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []strin negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols) if len(negotiatedProtocol) == 0 { - w.WriteHeader(http.StatusForbidden) for i := range serverProtocols { w.Header().Add(HeaderAcceptedProtocolVersions, serverProtocols[i]) } - fmt.Fprintf(w, "unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols) - return "", fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server supports %v", clientProtocols, serverProtocols) + err := fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols) + http.Error(w, err.Error(), http.StatusForbidden) + return "", err } w.Header().Add(HeaderProtocolVersion, negotiatedProtocol) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD index 9721de40006b..2c7d03c9560e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD @@ -39,7 +39,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/third_party/forked/golang/netutil:go_default_library", "//vendor/github.com/docker/spdystream:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go index 3dc8e23ae141..9d222faa898f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go @@ -23,8 +23,8 @@ import ( "time" "github.com/docker/spdystream" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/klog" ) // connection maintains state about a spdystream.Connection and its associated @@ -128,7 +128,7 @@ func (c *connection) newSpdyStream(stream *spdystream.Stream) { err := c.newStreamHandler(stream, replySent) rejectStream := (err != nil) if rejectStream { - glog.Warningf("Stream rejected: %v", err) + klog.Warningf("Stream rejected: %v", err) stream.Reset() return } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go index dd781cbc8036..2699597e7a56 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go @@ -67,6 +67,9 @@ type SpdyRoundTripper struct { // followRedirects indicates if the round tripper should examine responses for redirects and // follow them. followRedirects bool + // requireSameHostRedirects restricts redirect following to only follow redirects to the same host + // as the original request. + requireSameHostRedirects bool } var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{} @@ -75,14 +78,18 @@ var _ utilnet.Dialer = &SpdyRoundTripper{} // NewRoundTripper creates a new SpdyRoundTripper that will use // the specified tlsConfig. -func NewRoundTripper(tlsConfig *tls.Config, followRedirects bool) httpstream.UpgradeRoundTripper { - return NewSpdyRoundTripper(tlsConfig, followRedirects) +func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) httpstream.UpgradeRoundTripper { + return NewSpdyRoundTripper(tlsConfig, followRedirects, requireSameHostRedirects) } // NewSpdyRoundTripper creates a new SpdyRoundTripper that will use // the specified tlsConfig. This function is mostly meant for unit tests. -func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects bool) *SpdyRoundTripper { - return &SpdyRoundTripper{tlsConfig: tlsConfig, followRedirects: followRedirects} +func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper { + return &SpdyRoundTripper{ + tlsConfig: tlsConfig, + followRedirects: followRedirects, + requireSameHostRedirects: requireSameHostRedirects, + } } // TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during @@ -257,7 +264,7 @@ func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) ) if s.followRedirects { - conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s) + conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s, s.requireSameHostRedirects) } else { clone := utilnet.CloneRequest(req) clone.Header = header diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go index 13353988faab..045d214d2b75 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go @@ -74,15 +74,15 @@ func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Reque connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection)) upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade)) if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { - w.WriteHeader(http.StatusBadRequest) - fmt.Fprintf(w, "unable to upgrade: missing upgrade headers in request: %#v", req.Header) + errorMsg := fmt.Sprintf("unable to upgrade: missing upgrade headers in request: %#v", req.Header) + http.Error(w, errorMsg, http.StatusBadRequest) return nil } hijacker, ok := w.(http.Hijacker) if !ok { - w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "unable to upgrade: unable to hijack response") + errorMsg := fmt.Sprintf("unable to upgrade: unable to hijack response") + http.Error(w, errorMsg, http.StatusInternalServerError) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD index 3e4b9eb71e12..50bb5fa4c70a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD @@ -10,7 +10,7 @@ go_test( name = "go_default_test", srcs = ["intstr_test.go"], embed = [":go_default_library"], - deps = ["//vendor/github.com/ghodss/yaml:go_default_library"], + deps = ["//vendor/sigs.k8s.io/yaml:go_default_library"], ) go_library( @@ -23,8 +23,8 @@ go_library( importpath = "k8s.io/apimachinery/pkg/util/intstr", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index 5c2ac4f23fde..48dd7d9c551d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto -// DO NOT EDIT! /* Package intstr is a generated protocol buffer package. @@ -81,24 +80,6 @@ func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto index 1c3ec732e7c3..e79fb9e57266 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -29,7 +29,7 @@ option go_package = "intstr"; // inner type. This allows you to have, for example, a JSON field that can // accept a name or number. // TODO: Rename to Int32OrString -// +// // +protobuf=true // +protobuf.options.(gogoproto.goproto_stringer)=false // +k8s:openapi-gen=true diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 642b83cec217..5b26ed262631 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -25,8 +25,8 @@ import ( "strconv" "strings" - "github.com/golang/glog" "github.com/google/gofuzz" + "k8s.io/klog" ) // IntOrString is a type that can hold an int32 or a string. When used in @@ -58,7 +58,7 @@ const ( // TODO: convert to (val int32) func FromInt(val int) IntOrString { if val > math.MaxInt32 || val < math.MinInt32 { - glog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) + klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) } return IntOrString{Type: Int, IntVal: int32(val)} } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD index 1ed8649a9ec5..aa444d4f6c23 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD @@ -22,7 +22,7 @@ go_library( importpath = "k8s.io/apimachinery/pkg/util/mergepatch", deps = [ "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go index d09a939be30e..990fa0d43a64 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -21,7 +21,7 @@ import ( "reflect" "github.com/davecgh/go-spew/spew" - "github.com/ghodss/yaml" + "sigs.k8s.io/yaml" ) // PreconditionFunc asserts that an incompatible change is not present within a patch. diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/BUILD index 00fba56be91d..6f4542e72c1d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/BUILD @@ -16,7 +16,12 @@ go_test( "util_test.go", ], embed = [":go_default_library"], - deps = ["//vendor/github.com/spf13/pflag:go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + ], ) go_library( @@ -32,8 +37,8 @@ go_library( importpath = "k8s.io/apimachinery/pkg/util/net", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 8abbdea82559..155667cdfc7f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -31,8 +31,8 @@ import ( "strconv" "strings" - "github.com/golang/glog" "golang.org/x/net/http2" + "k8s.io/klog" ) // JoinPreservingTrailingSlash does a path.Join of the specified elements, @@ -107,10 +107,10 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { t = SetOldTransportDefaults(t) // Allow clients to disable http2 if needed. if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { - glog.Infof("HTTP2 has been explicitly disabled") + klog.Infof("HTTP2 has been explicitly disabled") } else { if err := http2.ConfigureTransport(t); err != nil { - glog.Warningf("Transport failed http2 configuration: %v", err) + klog.Warningf("Transport failed http2 configuration: %v", err) } } return t @@ -321,9 +321,10 @@ type Dialer interface { // ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to // originalLocation). It returns the opened net.Conn and the raw response bytes. -func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer) (net.Conn, []byte, error) { +// If requireSameHostRedirects is true, only redirects to the same host are permitted. +func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer, requireSameHostRedirects bool) (net.Conn, []byte, error) { const ( - maxRedirects = 10 + maxRedirects = 9 // Fail on the 10th redirect maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers ) @@ -367,7 +368,7 @@ redirectLoop: resp, err := http.ReadResponse(respReader, nil) if err != nil { // Unable to read the backend response; let the client handle it. - glog.Warningf("Error reading backend response: %v", err) + klog.Warningf("Error reading backend response: %v", err) break redirectLoop } @@ -387,10 +388,6 @@ redirectLoop: resp.Body.Close() // not used - // Reset the connection. - intermediateConn.Close() - intermediateConn = nil - // Prepare to follow the redirect. redirectStr := resp.Header.Get("Location") if redirectStr == "" { @@ -404,6 +401,15 @@ redirectLoop: if err != nil { return nil, nil, fmt.Errorf("malformed Location header: %v", err) } + + // Only follow redirects to the same host. Otherwise, propagate the redirect response back. + if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() { + break redirectLoop + } + + // Reset the connection. + intermediateConn.Close() + intermediateConn = nil } connToReturn := intermediateConn diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/interface.go index 0ab9b36080b5..daf5d2496455 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/interface.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -26,7 +26,7 @@ import ( "strings" - "github.com/golang/glog" + "k8s.io/klog" ) type AddressFamily uint @@ -193,7 +193,7 @@ func isInterfaceUp(intf *net.Interface) bool { return false } if intf.Flags&net.FlagUp != 0 { - glog.V(4).Infof("Interface %v is up", intf.Name) + klog.V(4).Infof("Interface %v is up", intf.Name) return true } return false @@ -208,20 +208,20 @@ func isLoopbackOrPointToPoint(intf *net.Interface) bool { func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) { if len(addrs) > 0 { for i := range addrs { - glog.V(4).Infof("Checking addr %s.", addrs[i].String()) + klog.V(4).Infof("Checking addr %s.", addrs[i].String()) ip, _, err := net.ParseCIDR(addrs[i].String()) if err != nil { return nil, err } if memberOf(ip, family) { if ip.IsGlobalUnicast() { - glog.V(4).Infof("IP found %v", ip) + klog.V(4).Infof("IP found %v", ip) return ip, nil } else { - glog.V(4).Infof("Non-global unicast address found %v", ip) + klog.V(4).Infof("Non-global unicast address found %v", ip) } } else { - glog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) + klog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) } } @@ -241,13 +241,13 @@ func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInte if err != nil { return nil, err } - glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) + klog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) matchingIP, err := getMatchingGlobalIP(addrs, forFamily) if err != nil { return nil, err } if matchingIP != nil { - glog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) + klog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) return matchingIP, nil } } @@ -275,14 +275,14 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { return nil, fmt.Errorf("no interfaces found on host.") } for _, family := range []AddressFamily{familyIPv4, familyIPv6} { - glog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) + klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) for _, intf := range intfs { if !isInterfaceUp(&intf) { - glog.V(4).Infof("Skipping: down interface %q", intf.Name) + klog.V(4).Infof("Skipping: down interface %q", intf.Name) continue } if isLoopbackOrPointToPoint(&intf) { - glog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) + klog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) continue } addrs, err := nw.Addrs(&intf) @@ -290,7 +290,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { return nil, err } if len(addrs) == 0 { - glog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) + klog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) continue } for _, addr := range addrs { @@ -299,15 +299,15 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) } if !memberOf(ip, family) { - glog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) + klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) continue } // TODO: Decide if should open up to allow IPv6 LLAs in future. if !ip.IsGlobalUnicast() { - glog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) + klog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) continue } - glog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) + klog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) return ip, nil } } @@ -381,23 +381,23 @@ func getAllDefaultRoutes() ([]Route, error) { // an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP. func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) { for _, family := range []AddressFamily{familyIPv4, familyIPv6} { - glog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) + klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) for _, route := range routes { if route.Family != family { continue } - glog.V(4).Infof("Default route transits interface %q", route.Interface) + klog.V(4).Infof("Default route transits interface %q", route.Interface) finalIP, err := getIPFromInterface(route.Interface, family, nw) if err != nil { return nil, err } if finalIP != nil { - glog.V(4).Infof("Found active IP %v ", finalIP) + klog.V(4).Infof("Found active IP %v ", finalIP) return finalIP, nil } } } - glog.V(4).Infof("No active IP found by looking at default routes") + klog.V(4).Infof("No active IP found by looking at default routes") return nil, fmt.Errorf("unable to select an IP from default routes.") } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD index bd1b0cc23d06..6e9d6bababfc 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD @@ -41,10 +41,10 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/third_party/forked/golang/netutil:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/mxk/go-flowrate/flowrate:go_default_library", "//vendor/golang.org/x/net/html:go_default_library", "//vendor/golang.org/x/net/html/atom:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go index 37a5be487c0b..a59b24c8dc3e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go @@ -24,7 +24,7 @@ import ( "net/http" "net/url" - "github.com/golang/glog" + "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/third_party/forked/golang/netutil" @@ -35,7 +35,7 @@ func DialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (ne dialer, err := utilnet.DialerFor(transport) if err != nil { - glog.V(5).Infof("Unable to unwrap transport %T to get dialer: %v", transport, err) + klog.V(5).Infof("Unable to unwrap transport %T to get dialer: %v", transport, err) } switch url.Scheme { @@ -52,7 +52,7 @@ func DialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (ne var err error tlsConfig, err = utilnet.TLSClientConfig(transport) if err != nil { - glog.V(5).Infof("Unable to unwrap transport %T to get at TLS config: %v", transport, err) + klog.V(5).Infof("Unable to unwrap transport %T to get at TLS config: %v", transport, err) } if dialer != nil { @@ -64,7 +64,7 @@ func DialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (ne } if tlsConfig == nil { // tls.Client requires non-nil config - glog.Warningf("using custom dialer with no TLSClientConfig. Defaulting to InsecureSkipVerify") + klog.Warningf("using custom dialer with no TLSClientConfig. Defaulting to InsecureSkipVerify") // tls.Handshake() requires ServerName or InsecureSkipVerify tlsConfig = &tls.Config{ InsecureSkipVerify: true, diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go index 6c34ab5241de..3c8cf6da7377 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go @@ -27,9 +27,9 @@ import ( "path" "strings" - "github.com/golang/glog" "golang.org/x/net/html" "golang.org/x/net/html/atom" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/sets" @@ -236,7 +236,7 @@ func (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*ht // This is fine default: // Some encoding we don't understand-- don't try to parse this - glog.Errorf("Proxy encountered encoding %v for text/html; can't understand this so not fixing links.", encoding) + klog.Errorf("Proxy encountered encoding %v for text/html; can't understand this so not fixing links.", encoding) return resp, nil } @@ -245,7 +245,7 @@ func (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*ht } err := rewriteHTML(reader, writer, urlRewriter) if err != nil { - glog.Errorf("Failed to rewrite URLs: %v", err) + klog.Errorf("Failed to rewrite URLs: %v", err) return resp, err } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go index 4d5cd34d4878..596b1888975c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go @@ -34,8 +34,8 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "github.com/golang/glog" "github.com/mxk/go-flowrate/flowrate" + "k8s.io/klog" ) // UpgradeRequestRoundTripper provides an additional method to decorate a request @@ -68,6 +68,8 @@ type UpgradeAwareHandler struct { // InterceptRedirects determines whether the proxy should sniff backend responses for redirects, // following them as necessary. InterceptRedirects bool + // RequireSameHostRedirects only allows redirects to the same host. It is only used if InterceptRedirects=true. + RequireSameHostRedirects bool // UseRequestLocation will use the incoming request URL when talking to the backend server. UseRequestLocation bool // FlushInterval controls how often the standard HTTP proxy will flush content from the upstream. @@ -233,7 +235,7 @@ func (h *UpgradeAwareHandler) ServeHTTP(w http.ResponseWriter, req *http.Request // tryUpgrade returns true if the request was handled. func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Request) bool { if !httpstream.IsUpgradeRequest(req) { - glog.V(6).Infof("Request was not an upgrade") + klog.V(6).Infof("Request was not an upgrade") return false } @@ -255,15 +257,15 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques // handles this in the non-upgrade path. utilnet.AppendForwardedForHeader(clone) if h.InterceptRedirects { - glog.V(6).Infof("Connecting to backend proxy (intercepting redirects) %s\n Headers: %v", &location, clone.Header) - backendConn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, &location, clone.Header, req.Body, utilnet.DialerFunc(h.DialForUpgrade)) + klog.V(6).Infof("Connecting to backend proxy (intercepting redirects) %s\n Headers: %v", &location, clone.Header) + backendConn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, &location, clone.Header, req.Body, utilnet.DialerFunc(h.DialForUpgrade), h.RequireSameHostRedirects) } else { - glog.V(6).Infof("Connecting to backend proxy (direct dial) %s\n Headers: %v", &location, clone.Header) + klog.V(6).Infof("Connecting to backend proxy (direct dial) %s\n Headers: %v", &location, clone.Header) clone.URL = &location backendConn, err = h.DialForUpgrade(clone) } if err != nil { - glog.V(6).Infof("Proxy connection error: %v", err) + klog.V(6).Infof("Proxy connection error: %v", err) h.Responder.Error(w, req, err) return true } @@ -273,13 +275,13 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques // hijacking should be the last step in the upgrade. requestHijacker, ok := w.(http.Hijacker) if !ok { - glog.V(6).Infof("Unable to hijack response writer: %T", w) + klog.V(6).Infof("Unable to hijack response writer: %T", w) h.Responder.Error(w, req, fmt.Errorf("request connection cannot be hijacked: %T", w)) return true } requestHijackedConn, _, err := requestHijacker.Hijack() if err != nil { - glog.V(6).Infof("Unable to hijack response: %v", err) + klog.V(6).Infof("Unable to hijack response: %v", err) h.Responder.Error(w, req, fmt.Errorf("error hijacking connection: %v", err)) return true } @@ -287,7 +289,7 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques // Forward raw response bytes back to client. if len(rawResponse) > 0 { - glog.V(6).Infof("Writing %d bytes to hijacked connection", len(rawResponse)) + klog.V(6).Infof("Writing %d bytes to hijacked connection", len(rawResponse)) if _, err = requestHijackedConn.Write(rawResponse); err != nil { utilruntime.HandleError(fmt.Errorf("Error proxying response from backend to client: %v", err)) } @@ -309,7 +311,7 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques } _, err := io.Copy(writer, requestHijackedConn) if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - glog.Errorf("Error proxying data from client to backend: %v", err) + klog.Errorf("Error proxying data from client to backend: %v", err) } close(writerComplete) }() @@ -323,7 +325,7 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques } _, err := io.Copy(requestHijackedConn, reader) if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - glog.Errorf("Error proxying data from backend to client: %v", err) + klog.Errorf("Error proxying data from backend to client: %v", err) } close(readerComplete) }() @@ -334,7 +336,7 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques case <-writerComplete: case <-readerComplete: } - glog.V(6).Infof("Disconnecting from backend proxy %s\n Headers: %v", &location, clone.Header) + klog.V(6).Infof("Disconnecting from backend proxy %s\n Headers: %v", &location, clone.Header) return true } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go index 9421edae866a..82a473bb146f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go @@ -27,7 +27,14 @@ var rng = struct { sync.Mutex rand *rand.Rand }{ - rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())), + rand: rand.New(rand.NewSource(time.Now().UnixNano())), +} + +// Int returns a non-negative pseudo-random int. +func Int() int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Int() } // Intn generates an integer in range [0,max). diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD index 6bdeeb3d246d..cf8f21416730 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD @@ -17,7 +17,7 @@ go_library( srcs = ["runtime.go"], importmap = "k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/runtime", importpath = "k8s.io/apimachinery/pkg/util/runtime", - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index da32fe12f33b..8e34f9261397 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -63,7 +63,11 @@ func HandleCrash(additionalHandlers ...func(interface{})) { // logPanic logs the caller tree when a panic occurs. func logPanic(r interface{}) { callers := getCallers(r) - glog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) + if _, ok := r.(string); ok { + klog.Errorf("Observed a panic: %s\n%v", r, callers) + } else { + klog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) + } } func getCallers(r interface{}) string { @@ -111,7 +115,7 @@ func HandleError(err error) { // logError prints an error with the call stack of the location it was reported func logError(err error) { - glog.ErrorDepth(2, err) + klog.ErrorDepth(2, err) } type rudimentaryErrorBackoff struct { diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD index e0a947ece3fc..5e3c9b419ed9 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD @@ -21,7 +21,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/testing:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/version/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/version/BUILD new file mode 100644 index 000000000000..cb2162de6929 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/version/BUILD @@ -0,0 +1,36 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "version.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/version", + importpath = "k8s.io/apimachinery/pkg/util/version", +) + +go_test( + name = "go_default_test", + srcs = ["version_test.go"], + embed = [":go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/version/doc.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/version/doc.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/version/doc.go rename to cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/version/doc.go diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/version/version.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/version/version.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/version/version.go rename to cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/version/version.go diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD index 64fa65e75669..ad28913e0823 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD @@ -18,8 +18,8 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/yaml", importpath = "k8s.io/apimachinery/pkg/util/yaml", deps = [ - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go index 3cd85515d433..63d735a804cf 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -26,8 +26,8 @@ import ( "strings" "unicode" - "github.com/ghodss/yaml" - "github.com/golang/glog" + "k8s.io/klog" + "sigs.k8s.io/yaml" ) // ToJSON converts a single YAML document into a JSON document @@ -217,11 +217,11 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { if d.decoder == nil { buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize) if isJSON { - glog.V(4).Infof("decoding stream as JSON") + klog.V(4).Infof("decoding stream as JSON") d.decoder = json.NewDecoder(buffer) d.rawData = origData } else { - glog.V(4).Infof("decoding stream as YAML") + klog.V(4).Infof("decoding stream as YAML") d.decoder = NewYAMLToJSONDecoder(buffer) } } @@ -230,7 +230,7 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { if syntax, ok := err.(*json.SyntaxError); ok { data, readErr := ioutil.ReadAll(jsonDecoder.Buffered()) if readErr != nil { - glog.V(4).Infof("reading stream failed: %v", readErr) + klog.V(4).Infof("reading stream failed: %v", readErr) } js := string(data) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/version/doc.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/version/doc.go index 00431d489908..e64ad36ed25d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/version/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/version/doc.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package version supplies the type for version information collected at build time. // +k8s:openapi-gen=true + +// Package version supplies the type for version information collected at build time. package version diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/watch/BUILD b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/watch/BUILD index 49ebc15246e4..5b9e1880993e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/watch/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/watch/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index 93bb1cdf7f6b..d61cf5a2e58b 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -20,10 +20,10 @@ import ( "io" "sync" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog" ) // Decoder allows StreamWatcher to watch any stream for which a Decoder can be written. @@ -100,13 +100,13 @@ func (sw *StreamWatcher) receive() { case io.EOF: // watch closed normally case io.ErrUnexpectedEOF: - glog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) + klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) default: msg := "Unable to decode an event from the watch stream: %v" if net.IsProbableEOF(err) { - glog.V(5).Infof(msg, err) + klog.V(5).Infof(msg, err) } else { - glog.Errorf(msg, err) + klog.Errorf(msg, err) } } return diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/watch/watch.go index a627d1d572c3..be9c90c03d10 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -20,7 +20,7 @@ import ( "fmt" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" ) @@ -106,7 +106,7 @@ func (f *FakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.Stopped { - glog.V(4).Infof("Stopping fake watcher.") + klog.V(4).Infof("Stopping fake watcher.") close(f.result) f.Stopped = true } @@ -173,7 +173,7 @@ func (f *RaceFreeFakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.Stopped { - glog.V(4).Infof("Stopping fake watcher.") + klog.V(4).Infof("Stopping fake watcher.") close(f.result) f.Stopped = true } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/BUILD index f519566c49aa..e45f33e1ecfa 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/BUILD @@ -59,8 +59,8 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library", "//staging/src/k8s.io/apiserver/pkg/audit:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/config.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/config.go index f59d0608bced..ffda2f3262c8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/config.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/config.go @@ -25,8 +25,8 @@ import ( "path" "path/filepath" - "github.com/ghodss/yaml" - "github.com/golang/glog" + "k8s.io/klog" + "sigs.k8s.io/yaml" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -146,7 +146,7 @@ func GetAdmissionPluginConfigurationFor(pluginCfg apiserver.AdmissionPluginConfi if pluginCfg.Path != "" { content, err := ioutil.ReadFile(pluginCfg.Path) if err != nil { - glog.Fatalf("Couldn't open admission plugin configuration %s: %#v", pluginCfg.Path, err) + klog.Fatalf("Couldn't open admission plugin configuration %s: %#v", pluginCfg.Path, err) return nil, err } return bytes.NewBuffer(content), nil diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugins.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugins.go index c17d62cd4e67..bdf087e564f4 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugins.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/admission/plugins.go @@ -26,7 +26,7 @@ import ( "strings" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // Factory is a function that returns an Interface for admission decisions. @@ -75,13 +75,13 @@ func (ps *Plugins) Register(name string, plugin Factory) { if ps.registry != nil { _, found := ps.registry[name] if found { - glog.Fatalf("Admission plugin %q was registered twice", name) + klog.Fatalf("Admission plugin %q was registered twice", name) } } else { ps.registry = map[string]Factory{} } - glog.V(1).Infof("Registered admission plugin %q", name) + klog.V(1).Infof("Registered admission plugin %q", name) ps.registry[name] = plugin } @@ -155,10 +155,10 @@ func (ps *Plugins) NewFromPlugins(pluginNames []string, configProvider ConfigPro } } if len(mutationPlugins) != 0 { - glog.Infof("Loaded %d mutating admission controller(s) successfully in the following order: %s.", len(mutationPlugins), strings.Join(mutationPlugins, ",")) + klog.Infof("Loaded %d mutating admission controller(s) successfully in the following order: %s.", len(mutationPlugins), strings.Join(mutationPlugins, ",")) } if len(validationPlugins) != 0 { - glog.Infof("Loaded %d validating admission controller(s) successfully in the following order: %s.", len(validationPlugins), strings.Join(validationPlugins, ",")) + klog.Infof("Loaded %d validating admission controller(s) successfully in the following order: %s.", len(validationPlugins), strings.Join(validationPlugins, ",")) } return chainAdmissionHandler(handlers), nil } @@ -166,7 +166,7 @@ func (ps *Plugins) NewFromPlugins(pluginNames []string, configProvider ConfigPro // InitPlugin creates an instance of the named interface. func (ps *Plugins) InitPlugin(name string, config io.Reader, pluginInitializer PluginInitializer) (Interface, error) { if name == "" { - glog.Info("No admission plugin specified.") + klog.Info("No admission plugin specified.") return nil, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go index 004efce15ec5..1902ea64505a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +groupName=apiserver.k8s.io // Package apiserver is the internal version of the API. -// +groupName=apiserver.k8s.io package apiserver diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go index bd9a47a8264d..2e7cf4083fad 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/apiserver // +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. -// +groupName=apiserver.k8s.io package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS new file mode 100644 index 000000000000..8389778900fe --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS @@ -0,0 +1,7 @@ +# approval on api packages bubbles to api-approvers +reviewers: +- sig-auth-audit-approvers +- sig-auth-audit-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go index 02387cb75fc7..90b516b348c2 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=audit.k8s.io + package audit diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go index f255961533d7..3a643aacac35 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=audit.k8s.io + package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go index 756ea30baa98..7d4ce7573df5 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -610,24 +609,6 @@ func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1455,51 +1436,14 @@ func (m *Event) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1509,41 +1453,80 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 16: if wireType != 2 { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto index 4baad752efe5..c7242222c946 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto @@ -121,17 +121,17 @@ message GroupResources { optional string group = 1; // Resources is a list of resources this rule applies to. - // + // // For example: // 'pods' matches pods. // 'pods/log' matches the log subresource of pods. // '*' matches all resources and their subresources. // 'pods/*' matches all subresources of pods. // '*/scale' matches all scale subresources. - // + // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. - // + // // An empty list implies all resources and subresources in this API groups apply. // +optional repeated string resources = 2; diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go index 51dbef645cab..75109f5658cf 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=audit.k8s.io + package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go index 73a993c25c3b..6ef668a9d964 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto -// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -626,24 +625,6 @@ func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1534,51 +1515,14 @@ func (m *Event) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1588,41 +1532,80 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 18: if wireType != 2 { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto index 507f5889b73e..2a0773d1967c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto @@ -128,17 +128,17 @@ message GroupResources { optional string group = 1; // Resources is a list of resources this rule applies to. - // + // // For example: // 'pods' matches pods. // 'pods/log' matches the log subresource of pods. // '*' matches all resources and their subresources. // 'pods/*' matches all subresources of pods. // '*/scale' matches all scale subresources. - // + // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. - // + // // An empty list implies all resources and subresources in this API groups apply. // +optional repeated string resources = 2; diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go index f2e3c67d7ad0..901167b8070f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=audit.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go index 53d25d9c36f0..ecc26c2e7357 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -630,24 +629,6 @@ func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1541,51 +1522,14 @@ func (m *Event) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1595,41 +1539,80 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 18: if wireType != 2 { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto index 2ea4c6a60592..23ed8910aa84 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto @@ -131,17 +131,17 @@ message GroupResources { optional string group = 1; // Resources is a list of resources this rule applies to. - // + // // For example: // 'pods' matches pods. // 'pods/log' matches the log subresource of pods. // '*' matches all resources and their subresources. // 'pods/*' matches all subresources of pods. // '*/scale' matches all scale subresources. - // + // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. - // + // // An empty list implies all resources and subresources in this API groups apply. // +optional repeated string resources = 2; diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/BUILD index eadb944d5153..feaa8a3c3707 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/BUILD @@ -4,13 +4,18 @@ go_library( name = "go_default_library", srcs = [ "doc.go", + "register.go", "types.go", "zz_generated.deepcopy.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/config", importpath = "k8s.io/apiserver/pkg/apis/config", visibility = ["//visibility:public"], - deps = ["//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], ) filegroup( @@ -24,6 +29,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//staging/src/k8s.io/apiserver/pkg/apis/config/v1:all-srcs", "//staging/src/k8s.io/apiserver/pkg/apis/config/v1alpha1:all-srcs", "//staging/src/k8s.io/apiserver/pkg/apis/config/validation:all-srcs", ], diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/OWNERS b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/OWNERS deleted file mode 100644 index 2f7b10df4b90..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -approvers: -- api-approvers -- sttts -- luxas -reviewers: -- api-reviewers -- hanxiaoshuai diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/register.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/register.go new file mode 100644 index 000000000000..6a0aae8e560a --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds this group to a scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package. +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func addKnownTypes(scheme *runtime.Scheme) error { + // TODO this will get cleaned up with the scheme types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &EncryptionConfiguration{}, + ) + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/types.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/types.go index f6424ec4e8d2..822806d7e5df 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/types.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/types.go @@ -56,3 +56,71 @@ type DebuggingConfiguration struct { // enableProfiling is true. EnableContentionProfiling bool } + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EncryptionConfiguration stores the complete configuration for encryption providers. +type EncryptionConfiguration struct { + metav1.TypeMeta + // resources is a list containing resources, and their corresponding encryption providers. + Resources []ResourceConfiguration +} + +// ResourceConfiguration stores per resource configuration. +type ResourceConfiguration struct { + // resources is a list of kubernetes resources which have to be encrypted. + Resources []string + // providers is a list of transformers to be used for reading and writing the resources to disk. + // eg: aesgcm, aescbc, secretbox, identity. + Providers []ProviderConfiguration +} + +// ProviderConfiguration stores the provided configuration for an encryption provider. +type ProviderConfiguration struct { + // aesgcm is the configuration for the AES-GCM transformer. + AESGCM *AESConfiguration + // aescbc is the configuration for the AES-CBC transformer. + AESCBC *AESConfiguration + // secretbox is the configuration for the Secretbox based transformer. + Secretbox *SecretboxConfiguration + // identity is the (empty) configuration for the identity transformer. + Identity *IdentityConfiguration + // kms contains the name, cache size and path to configuration file for a KMS based envelope transformer. + KMS *KMSConfiguration +} + +// AESConfiguration contains the API configuration for an AES transformer. +type AESConfiguration struct { + // keys is a list of keys to be used for creating the AES transformer. + // Each key has to be 32 bytes long for AES-CBC and 16, 24 or 32 bytes for AES-GCM. + Keys []Key +} + +// SecretboxConfiguration contains the API configuration for an Secretbox transformer. +type SecretboxConfiguration struct { + // keys is a list of keys to be used for creating the Secretbox transformer. + // Each key has to be 32 bytes long. + Keys []Key +} + +// Key contains name and secret of the provided key for a transformer. +type Key struct { + // name is the name of the key to be used while storing data to disk. + Name string + // secret is the actual key, encoded in base64. + Secret string +} + +// IdentityConfiguration is an empty struct to allow identity transformer in provider configuration. +type IdentityConfiguration struct{} + +// KMSConfiguration contains the name, cache size and path to configuration file for a KMS based envelope transformer. +type KMSConfiguration struct { + // name is the name of the KMS plugin to be used. + Name string + // cacheSize is the maximum number of secrets which are cached in memory. The default value is 1000. + // +optional + CacheSize int32 + // endpoint is the gRPC server listening address, for example "unix:///var/run/kms-provider.sock". + Endpoint string +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go index 0b81eb6919e4..438dff997d5e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go @@ -20,6 +20,31 @@ limitations under the License. package config +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AESConfiguration) DeepCopyInto(out *AESConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESConfiguration. +func (in *AESConfiguration) DeepCopy() *AESConfiguration { + if in == nil { + return nil + } + out := new(AESConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DebuggingConfiguration) DeepCopyInto(out *DebuggingConfiguration) { *out = *in @@ -36,6 +61,86 @@ func (in *DebuggingConfiguration) DeepCopy() *DebuggingConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfiguration. +func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration { + if in == nil { + return nil + } + out := new(EncryptionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EncryptionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityConfiguration) DeepCopyInto(out *IdentityConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityConfiguration. +func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration { + if in == nil { + return nil + } + out := new(IdentityConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSConfiguration) DeepCopyInto(out *KMSConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfiguration. +func (in *KMSConfiguration) DeepCopy() *KMSConfiguration { + if in == nil { + return nil + } + out := new(KMSConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Key) DeepCopyInto(out *Key) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Key. +func (in *Key) DeepCopy() *Key { + if in == nil { + return nil + } + out := new(Key) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LeaderElectionConfiguration) DeepCopyInto(out *LeaderElectionConfiguration) { *out = *in @@ -54,3 +159,93 @@ func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfiguration) DeepCopyInto(out *ProviderConfiguration) { + *out = *in + if in.AESGCM != nil { + in, out := &in.AESGCM, &out.AESGCM + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.AESCBC != nil { + in, out := &in.AESCBC, &out.AESCBC + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Secretbox != nil { + in, out := &in.Secretbox, &out.Secretbox + *out = new(SecretboxConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityConfiguration) + **out = **in + } + if in.KMS != nil { + in, out := &in.KMS, &out.KMS + *out = new(KMSConfiguration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfiguration. +func (in *ProviderConfiguration) DeepCopy() *ProviderConfiguration { + if in == nil { + return nil + } + out := new(ProviderConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceConfiguration) DeepCopyInto(out *ResourceConfiguration) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]ProviderConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceConfiguration. +func (in *ResourceConfiguration) DeepCopy() *ResourceConfiguration { + if in == nil { + return nil + } + out := new(ResourceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretboxConfiguration) DeepCopyInto(out *SecretboxConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretboxConfiguration. +func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration { + if in == nil { + return nil + } + out := new(SecretboxConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/BUILD index bb74cb151c98..f71da4d42436 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/BUILD @@ -33,9 +33,9 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -64,7 +64,9 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//staging/src/k8s.io/apiserver/pkg/audit/event:all-srcs", "//staging/src/k8s.io/apiserver/pkg/audit/policy:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/audit/util:all-srcs", ], tags = ["automanaged"], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/OWNERS b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/OWNERS new file mode 100644 index 000000000000..178ce84a5ce3 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/OWNERS @@ -0,0 +1,7 @@ +approvers: +- sig-auth-audit-approvers +reviewers: +- sig-auth-audit-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/metrics.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/metrics.go index 10280e0d88e9..9b81b30cc265 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/metrics.go @@ -19,9 +19,9 @@ package audit import ( "fmt" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/klog" ) const ( @@ -52,12 +52,22 @@ var ( }, []string{"level"}, ) + + ApiserverAuditDroppedCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Subsystem: subsystem, + Name: "requests_rejected_total", + Help: "Counter of apiserver requests rejected due to an error " + + "in audit logging backend.", + }, + ) ) func init() { prometheus.MustRegister(eventCounter) prometheus.MustRegister(errorCounter) prometheus.MustRegister(levelCounter) + prometheus.MustRegister(ApiserverAuditDroppedCounter) } // ObserveEvent updates the relevant prometheus metrics for the generated audit event. @@ -83,5 +93,5 @@ func HandlePluginError(plugin string, err error, impacted ...*auditinternal.Even for _, ev := range impacted { msg = msg + EventString(ev) + "\n" } - glog.Error(msg) + klog.Error(msg) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/request.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/request.go index 9593b6c8ab4d..d4b12770eab4 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/request.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/request.go @@ -22,8 +22,8 @@ import ( "net/http" "time" - "github.com/golang/glog" "github.com/pborman/uuid" + "k8s.io/klog" "reflect" @@ -45,10 +45,10 @@ const ( func NewEventFromRequest(req *http.Request, level auditinternal.Level, attribs authorizer.Attributes) (*auditinternal.Event, error) { ev := &auditinternal.Event{ RequestReceivedTimestamp: metav1.NewMicroTime(time.Now()), - Verb: attribs.GetVerb(), - RequestURI: req.URL.RequestURI(), - UserAgent: maybeTruncateUserAgent(req), - Level: level, + Verb: attribs.GetVerb(), + RequestURI: req.URL.RequestURI(), + UserAgent: maybeTruncateUserAgent(req), + Level: level, } // prefer the id from the headers. If not available, create a new one. @@ -152,7 +152,7 @@ func LogRequestObject(ae *auditinternal.Event, obj runtime.Object, gvr schema.Gr ae.RequestObject, err = encodeObject(obj, gvr.GroupVersion(), s) if err != nil { // TODO(audit): add error slice to audit event struct - glog.Warningf("Auditing failed of %v request: %v", reflect.TypeOf(obj).Name(), err) + klog.Warningf("Auditing failed of %v request: %v", reflect.TypeOf(obj).Name(), err) return } } @@ -191,7 +191,7 @@ func LogResponseObject(ae *auditinternal.Event, obj runtime.Object, gv schema.Gr var err error ae.ResponseObject, err = encodeObject(obj, gv, s) if err != nil { - glog.Warningf("Audit failed for %q response: %v", reflect.TypeOf(obj).Name(), err) + klog.Warningf("Audit failed for %q response: %v", reflect.TypeOf(obj).Name(), err) } } @@ -223,7 +223,7 @@ func LogAnnotation(ae *auditinternal.Event, key, value string) { ae.Annotations = make(map[string]string) } if v, ok := ae.Annotations[key]; ok && v != value { - glog.Warningf("Failed to set annotations[%q] to %q for audit:%q, it has already been set to %q", key, value, ae.AuditID, ae.Annotations[key]) + klog.Warningf("Failed to set annotations[%q] to %q for audit:%q, it has already been set to %q", key, value, ae.AuditID, ae.Annotations[key]) return } ae.Annotations[key] = value diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/types.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/types.go index dbf03b0f51c2..b78bd086b051 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/types.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/types.go @@ -25,7 +25,8 @@ type Sink interface { // Errors might be logged by the sink itself. If an error should be fatal, leading to an internal // error, ProcessEvents is supposed to panic. The event must not be mutated and is reused by the caller // after the call returns, i.e. the sink has to make a deepcopy to keep a copy around if necessary. - ProcessEvents(events ...*auditinternal.Event) + // Returns true on success, may return false on error. + ProcessEvents(events ...*auditinternal.Event) bool } type Backend interface { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/union.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/union.go index 6ee441533a74..39dd74f740e8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/union.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/audit/union.go @@ -37,10 +37,12 @@ type union struct { backends []Backend } -func (u union) ProcessEvents(events ...*auditinternal.Event) { +func (u union) ProcessEvents(events ...*auditinternal.Event) bool { + success := true for _, backend := range u.backends { - backend.ProcessEvents(events...) + success = backend.ProcessEvents(events...) && success } + return success } func (u union) Run(stopCh <-chan struct{}) error { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD index 4a2741d9db0c..a8a5395dc923 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD @@ -1,13 +1,14 @@ package(default_visibility = ["//visibility:public"]) -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", - srcs = ["interfaces.go"], + srcs = [ + "audagnostic.go", + "audiences.go", + "interfaces.go", + ], importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/authentication/authenticator", importpath = "k8s.io/apiserver/pkg/authentication/authenticator", deps = ["//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library"], @@ -25,3 +26,16 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = [ + "audagnostic_test.go", + "audiences_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go new file mode 100644 index 000000000000..bcf7eb4bc9d0 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticator + +import ( + "context" + "fmt" + "net/http" +) + +func authenticate(ctx context.Context, implicitAuds Audiences, authenticate func() (*Response, bool, error)) (*Response, bool, error) { + targetAuds, ok := AudiencesFrom(ctx) + // We can remove this once api audiences is never empty. That will probably + // be N releases after TokenRequest is GA. + if !ok { + return authenticate() + } + auds := implicitAuds.Intersect(targetAuds) + if len(auds) == 0 { + return nil, false, nil + } + resp, ok, err := authenticate() + if err != nil || !ok { + return nil, false, err + } + if len(resp.Audiences) > 0 { + // maybe the authenticator was audience aware after all. + return nil, false, fmt.Errorf("audience agnostic authenticator wrapped an authenticator that returned audiences: %q", resp.Audiences) + } + resp.Audiences = auds + return resp, true, nil +} + +type audAgnosticRequestAuthenticator struct { + implicit Audiences + delegate Request +} + +var _ = Request(&audAgnosticRequestAuthenticator{}) + +func (a *audAgnosticRequestAuthenticator) AuthenticateRequest(req *http.Request) (*Response, bool, error) { + return authenticate(req.Context(), a.implicit, func() (*Response, bool, error) { + return a.delegate.AuthenticateRequest(req) + }) +} + +// WrapAudienceAgnosticRequest wraps an audience agnostic request authenticator +// to restrict its accepted audiences to a set of implicit audiences. +func WrapAudienceAgnosticRequest(implicit Audiences, delegate Request) Request { + return &audAgnosticRequestAuthenticator{ + implicit: implicit, + delegate: delegate, + } +} + +type audAgnosticTokenAuthenticator struct { + implicit Audiences + delegate Token +} + +var _ = Token(&audAgnosticTokenAuthenticator{}) + +func (a *audAgnosticTokenAuthenticator) AuthenticateToken(ctx context.Context, tok string) (*Response, bool, error) { + return authenticate(ctx, a.implicit, func() (*Response, bool, error) { + return a.delegate.AuthenticateToken(ctx, tok) + }) +} + +// WrapAudienceAgnosticToken wraps an audience agnostic token authenticator to +// restrict its accepted audiences to a set of implicit audiences. +func WrapAudienceAgnosticToken(implicit Audiences, delegate Token) Token { + return &audAgnosticTokenAuthenticator{ + implicit: implicit, + delegate: delegate, + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go new file mode 100644 index 000000000000..2a3a918896d9 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go @@ -0,0 +1,63 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticator + +import "context" + +// Audiences is a container for the Audiences of a token. +type Audiences []string + +// The key type is unexported to prevent collisions +type key int + +const ( + // audiencesKey is the context key for request audiences. + audiencesKey key = iota +) + +// WithAudiences returns a context that stores a request's expected audiences. +func WithAudiences(ctx context.Context, auds Audiences) context.Context { + return context.WithValue(ctx, audiencesKey, auds) +} + +// AudiencesFrom returns a request's expected audiences stored in the request context. +func AudiencesFrom(ctx context.Context) (Audiences, bool) { + auds, ok := ctx.Value(audiencesKey).(Audiences) + return auds, ok +} + +// Has checks if Audiences contains a specific audiences. +func (a Audiences) Has(taud string) bool { + for _, aud := range a { + if aud == taud { + return true + } + } + return false +} + +// Intersect intersects Audiences with a target Audiences and returns all +// elements in both. +func (a Audiences) Intersect(tauds Audiences) Audiences { + selected := Audiences{} + for _, taud := range tauds { + if a.Has(taud) { + selected = append(selected, taud) + } + } + return selected +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go index fd3d0383e52b..e3b1b622cbae 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go @@ -17,52 +17,64 @@ limitations under the License. package authenticator import ( + "context" "net/http" "k8s.io/apiserver/pkg/authentication/user" ) -// Token checks a string value against a backing authentication store and returns -// information about the current user and true if successful, false if not successful, -// or an error if the token could not be checked. +// Token checks a string value against a backing authentication store and +// returns a Response or an error if the token could not be checked. type Token interface { - AuthenticateToken(token string) (user.Info, bool, error) + AuthenticateToken(ctx context.Context, token string) (*Response, bool, error) } -// Request attempts to extract authentication information from a request and returns -// information about the current user and true if successful, false if not successful, -// or an error if the request could not be checked. +// Request attempts to extract authentication information from a request and +// returns a Response or an error if the request could not be checked. type Request interface { - AuthenticateRequest(req *http.Request) (user.Info, bool, error) + AuthenticateRequest(req *http.Request) (*Response, bool, error) } -// Password checks a username and password against a backing authentication store and -// returns information about the user and true if successful, false if not successful, -// or an error if the username and password could not be checked +// Password checks a username and password against a backing authentication +// store and returns a Response or an error if the password could not be +// checked. type Password interface { - AuthenticatePassword(user, password string) (user.Info, bool, error) + AuthenticatePassword(ctx context.Context, user, password string) (*Response, bool, error) } // TokenFunc is a function that implements the Token interface. -type TokenFunc func(token string) (user.Info, bool, error) +type TokenFunc func(ctx context.Context, token string) (*Response, bool, error) // AuthenticateToken implements authenticator.Token. -func (f TokenFunc) AuthenticateToken(token string) (user.Info, bool, error) { - return f(token) +func (f TokenFunc) AuthenticateToken(ctx context.Context, token string) (*Response, bool, error) { + return f(ctx, token) } // RequestFunc is a function that implements the Request interface. -type RequestFunc func(req *http.Request) (user.Info, bool, error) +type RequestFunc func(req *http.Request) (*Response, bool, error) // AuthenticateRequest implements authenticator.Request. -func (f RequestFunc) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { +func (f RequestFunc) AuthenticateRequest(req *http.Request) (*Response, bool, error) { return f(req) } // PasswordFunc is a function that implements the Password interface. -type PasswordFunc func(user, password string) (user.Info, bool, error) +type PasswordFunc func(ctx context.Context, user, password string) (*Response, bool, error) // AuthenticatePassword implements authenticator.Password. -func (f PasswordFunc) AuthenticatePassword(user, password string) (user.Info, bool, error) { - return f(user, password) +func (f PasswordFunc) AuthenticatePassword(ctx context.Context, user, password string) (*Response, bool, error) { + return f(ctx, user, password) +} + +// Response is the struct returned by authenticator interfaces upon successful +// authentication. It contains information about whether the authenticator +// authenticated the request, information about the context of the +// authentication, and information about the authenticated user. +type Response struct { + // Audiences is the set of audiences the authenticator was able to validate + // the token against. If the authenticator is not audience aware, this field + // will be empty. + Audiences Audiences + // User is the UserInfo associated with the authentication context. + User user.Info } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD index 750e8b4eae4c..1da12201175f 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD @@ -23,6 +23,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authentication/request/union:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/request/websocket:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/request/x509:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/authentication/token/cache:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go index d8e18345545e..67958c3639b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go @@ -31,6 +31,7 @@ import ( unionauth "k8s.io/apiserver/pkg/authentication/request/union" "k8s.io/apiserver/pkg/authentication/request/websocket" "k8s.io/apiserver/pkg/authentication/request/x509" + "k8s.io/apiserver/pkg/authentication/token/cache" webhooktoken "k8s.io/apiserver/plugin/pkg/authenticator/token/webhook" authenticationclient "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" "k8s.io/client-go/util/cert" @@ -50,6 +51,8 @@ type DelegatingAuthenticatorConfig struct { // ClientCAFile is the CA bundle file used to authenticate client certificates ClientCAFile string + APIAudiences authenticator.Audiences + RequestHeaderConfig *RequestHeaderConfig } @@ -85,11 +88,12 @@ func (c DelegatingAuthenticatorConfig) New() (authenticator.Request, *spec.Secur } if c.TokenAccessReviewClient != nil { - tokenAuth, err := webhooktoken.NewFromInterface(c.TokenAccessReviewClient, c.CacheTTL) + tokenAuth, err := webhooktoken.NewFromInterface(c.TokenAccessReviewClient, c.APIAudiences) if err != nil { return nil, nil, err } - authenticators = append(authenticators, bearertoken.New(tokenAuth), websocket.NewProtocolAuthenticator(tokenAuth)) + cachingTokenAuth := cache.New(tokenAuth, false, c.CacheTTL, c.CacheTTL) + authenticators = append(authenticators, bearertoken.New(cachingTokenAuth), websocket.NewProtocolAuthenticator(cachingTokenAuth)) securityDefinitions["BearerToken"] = &spec.SecurityScheme{ SecuritySchemeProps: spec.SecuritySchemeProps{ diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go index 9f0453b15f23..5ac6b2ddf9d7 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go @@ -36,25 +36,26 @@ func NewAuthenticatedGroupAdder(auth authenticator.Request) authenticator.Reques return &AuthenticatedGroupAdder{auth} } -func (g *AuthenticatedGroupAdder) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { - u, ok, err := g.Authenticator.AuthenticateRequest(req) +func (g *AuthenticatedGroupAdder) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + r, ok, err := g.Authenticator.AuthenticateRequest(req) if err != nil || !ok { return nil, ok, err } - if u.GetName() == user.Anonymous { - return u, true, nil + if r.User.GetName() == user.Anonymous { + return r, true, nil } - for _, group := range u.GetGroups() { + for _, group := range r.User.GetGroups() { if group == user.AllAuthenticated || group == user.AllUnauthenticated { - return u, true, nil + return r, true, nil } } - return &user.DefaultInfo{ - Name: u.GetName(), - UID: u.GetUID(), - Groups: append(u.GetGroups(), user.AllAuthenticated), - Extra: u.GetExtra(), - }, true, nil + r.User = &user.DefaultInfo{ + Name: r.User.GetName(), + UID: r.User.GetUID(), + Groups: append(r.User.GetGroups(), user.AllAuthenticated), + Extra: r.User.GetExtra(), + } + return r, true, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go index 1f71429b4ef9..3079dad62547 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go @@ -36,15 +36,16 @@ func NewGroupAdder(auth authenticator.Request, groups []string) authenticator.Re return &GroupAdder{auth, groups} } -func (g *GroupAdder) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { - u, ok, err := g.Authenticator.AuthenticateRequest(req) +func (g *GroupAdder) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + r, ok, err := g.Authenticator.AuthenticateRequest(req) if err != nil || !ok { return nil, ok, err } - return &user.DefaultInfo{ - Name: u.GetName(), - UID: u.GetUID(), - Groups: append(u.GetGroups(), g.Groups...), - Extra: u.GetExtra(), - }, true, nil + r.User = &user.DefaultInfo{ + Name: r.User.GetName(), + UID: r.User.GetUID(), + Groups: append(r.User.GetGroups(), g.Groups...), + Extra: r.User.GetExtra(), + } + return r, true, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go index 4f60d522f760..0ed5ee55965a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go @@ -17,6 +17,8 @@ limitations under the License. package group import ( + "context" + "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" ) @@ -34,15 +36,16 @@ func NewTokenGroupAdder(auth authenticator.Token, groups []string) authenticator return &TokenGroupAdder{auth, groups} } -func (g *TokenGroupAdder) AuthenticateToken(token string) (user.Info, bool, error) { - u, ok, err := g.Authenticator.AuthenticateToken(token) +func (g *TokenGroupAdder) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) { + r, ok, err := g.Authenticator.AuthenticateToken(ctx, token) if err != nil || !ok { return nil, ok, err } - return &user.DefaultInfo{ - Name: u.GetName(), - UID: u.GetUID(), - Groups: append(u.GetGroups(), g.Groups...), - Extra: u.GetExtra(), - }, true, nil + r.User = &user.DefaultInfo{ + Name: r.User.GetName(), + UID: r.User.GetUID(), + Groups: append(r.User.GetGroups(), g.Groups...), + Extra: r.User.GetExtra(), + } + return r, true, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go index a6d22942a325..f9177d151379 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go @@ -30,7 +30,14 @@ const ( ) func NewAuthenticator() authenticator.Request { - return authenticator.RequestFunc(func(req *http.Request) (user.Info, bool, error) { - return &user.DefaultInfo{Name: anonymousUser, Groups: []string{unauthenticatedGroup}}, true, nil + return authenticator.RequestFunc(func(req *http.Request) (*authenticator.Response, bool, error) { + auds, _ := authenticator.AudiencesFrom(req.Context()) + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: anonymousUser, + Groups: []string{unauthenticatedGroup}, + }, + Audiences: auds, + }, true, nil }) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD index b88082fbbd45..83f4f5dcca1c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD @@ -21,10 +21,7 @@ go_library( srcs = ["bearertoken.go"], importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken", importpath = "k8s.io/apiserver/pkg/authentication/request/bearertoken", - deps = [ - "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", - ], + deps = ["//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library"], ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go index 5ca22f38ba89..2de796b72328 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go @@ -22,7 +22,6 @@ import ( "strings" "k8s.io/apiserver/pkg/authentication/authenticator" - "k8s.io/apiserver/pkg/authentication/user" ) type Authenticator struct { @@ -35,7 +34,7 @@ func New(auth authenticator.Token) *Authenticator { var invalidToken = errors.New("invalid bearer token") -func (a *Authenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { +func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { auth := strings.TrimSpace(req.Header.Get("Authorization")) if auth == "" { return nil, false, nil @@ -52,7 +51,7 @@ func (a *Authenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, return nil, false, nil } - user, ok, err := a.auth.AuthenticateToken(token) + resp, ok, err := a.auth.AuthenticateToken(req.Context(), token) // if we authenticated successfully, go ahead and remove the bearer token so that no one // is ever tempted to use it inside of the API server if ok { @@ -64,5 +63,5 @@ func (a *Authenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, err = invalidToken } - return user, ok, err + return resp, ok, err } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go index 948478b80ed3..70af861d8b5d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go @@ -105,7 +105,7 @@ func NewSecure(clientCA string, proxyClientNames []string, nameHeaders []string, return x509request.NewVerifier(opts, headerAuthenticator, sets.NewString(proxyClientNames...)), nil } -func (a *requestHeaderAuthRequestHandler) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { +func (a *requestHeaderAuthRequestHandler) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { name := headerValue(req.Header, a.nameHeaders) if len(name) == 0 { return nil, false, nil @@ -126,10 +126,12 @@ func (a *requestHeaderAuthRequestHandler) AuthenticateRequest(req *http.Request) } } - return &user.DefaultInfo{ - Name: name, - Groups: groups, - Extra: extra, + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: name, + Groups: groups, + Extra: extra, + }, }, true, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD index 75e741dec7b1..86f585b52e4e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD @@ -10,7 +10,10 @@ go_test( name = "go_default_test", srcs = ["unionauth_test.go"], embed = [":go_default_library"], - deps = ["//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library"], + deps = [ + "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", + ], ) go_library( @@ -21,7 +24,6 @@ go_library( deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go index 161394098106..512063beea13 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go @@ -21,7 +21,6 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/authentication/authenticator" - "k8s.io/apiserver/pkg/authentication/user" ) // unionAuthRequestHandler authenticates requests using a chain of authenticator.Requests @@ -51,20 +50,20 @@ func NewFailOnError(authRequestHandlers ...authenticator.Request) authenticator. } // AuthenticateRequest authenticates the request using a chain of authenticator.Request objects. -func (authHandler *unionAuthRequestHandler) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { +func (authHandler *unionAuthRequestHandler) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { var errlist []error for _, currAuthRequestHandler := range authHandler.Handlers { - info, ok, err := currAuthRequestHandler.AuthenticateRequest(req) + resp, ok, err := currAuthRequestHandler.AuthenticateRequest(req) if err != nil { if authHandler.FailOnError { - return info, ok, err + return resp, ok, err } errlist = append(errlist, err) continue } if ok { - return info, ok, err + return resp, ok, err } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD index dbfbb8337ea5..53b8e0f463fc 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD @@ -13,7 +13,6 @@ go_library( importpath = "k8s.io/apiserver/pkg/authentication/request/websocket", deps = [ "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/wsstream:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go index 4a30bb6359cc..11afa84cbd0e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go @@ -25,7 +25,6 @@ import ( "unicode/utf8" "k8s.io/apiserver/pkg/authentication/authenticator" - "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/util/wsstream" ) @@ -46,7 +45,7 @@ func NewProtocolAuthenticator(auth authenticator.Token) *ProtocolAuthenticator { return &ProtocolAuthenticator{auth} } -func (a *ProtocolAuthenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { +func (a *ProtocolAuthenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { // Only accept websocket connections if !wsstream.IsWebSocketRequest(req) { return nil, false, nil @@ -91,7 +90,7 @@ func (a *ProtocolAuthenticator) AuthenticateRequest(req *http.Request) (user.Inf return nil, false, nil } - user, ok, err := a.auth.AuthenticateToken(token) + resp, ok, err := a.auth.AuthenticateToken(req.Context(), token) // on success, remove the protocol with the token if ok { @@ -105,5 +104,5 @@ func (a *ProtocolAuthenticator) AuthenticateRequest(req *http.Request) (user.Inf err = errInvalidToken } - return user, ok, err + return resp, ok, err } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD index 815cdbbc89e2..399197ea136d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD @@ -36,7 +36,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/OWNERS b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/OWNERS new file mode 100644 index 000000000000..470b7a1c92d1 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/OWNERS @@ -0,0 +1,7 @@ +approvers: +- sig-auth-certificates-approvers +reviewers: +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go index c98d7ff681fc..bc875adacf1e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go @@ -23,7 +23,6 @@ import ( "net/http" "time" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -60,14 +59,14 @@ func init() { // UserConversion defines an interface for extracting user info from a client certificate chain type UserConversion interface { - User(chain []*x509.Certificate) (user.Info, bool, error) + User(chain []*x509.Certificate) (*authenticator.Response, bool, error) } // UserConversionFunc is a function that implements the UserConversion interface. -type UserConversionFunc func(chain []*x509.Certificate) (user.Info, bool, error) +type UserConversionFunc func(chain []*x509.Certificate) (*authenticator.Response, bool, error) // User implements x509.UserConversion -func (f UserConversionFunc) User(chain []*x509.Certificate) (user.Info, bool, error) { +func (f UserConversionFunc) User(chain []*x509.Certificate) (*authenticator.Response, bool, error) { return f(chain) } @@ -84,7 +83,7 @@ func New(opts x509.VerifyOptions, user UserConversion) *Authenticator { } // AuthenticateRequest authenticates the request using presented client certificates -func (a *Authenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { +func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { if req.TLS == nil || len(req.TLS.PeerCertificates) == 0 { return nil, false, nil } @@ -136,7 +135,7 @@ func NewVerifier(opts x509.VerifyOptions, auth authenticator.Request, allowedCom } // AuthenticateRequest verifies the presented client certificate, then delegates to the wrapped auth -func (a *Verifier) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { +func (a *Verifier) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { if req.TLS == nil || len(req.TLS.PeerCertificates) == 0 { return nil, false, nil } @@ -168,8 +167,7 @@ func (a *Verifier) verifySubject(subject pkix.Name) error { if a.allowedCommonNames.Has(subject.CommonName) { return nil } - glog.Warningf("x509: subject with cn=%s is not in the allowed list: %v", subject.CommonName, a.allowedCommonNames.List()) - return fmt.Errorf("x509: subject with cn=%s is not allowed", subject.CommonName) + return fmt.Errorf("x509: subject with cn=%s is not in the allowed list", subject.CommonName) } // DefaultVerifyOptions returns VerifyOptions that use the system root certificates, current time, @@ -181,12 +179,14 @@ func DefaultVerifyOptions() x509.VerifyOptions { } // CommonNameUserConversion builds user info from a certificate chain using the subject's CommonName -var CommonNameUserConversion = UserConversionFunc(func(chain []*x509.Certificate) (user.Info, bool, error) { +var CommonNameUserConversion = UserConversionFunc(func(chain []*x509.Certificate) (*authenticator.Response, bool, error) { if len(chain[0].Subject.CommonName) == 0 { return nil, false, nil } - return &user.DefaultInfo{ - Name: chain[0].Subject.CommonName, - Groups: chain[0].Subject.Organization, + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: chain[0].Subject.CommonName, + Groups: chain[0].Subject.Organization, + }, }, true, nil }) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/BUILD new file mode 100644 index 000000000000..66c09d448448 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/BUILD @@ -0,0 +1,51 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "cache_test.go", + "cached_token_authenticator_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", + "//vendor/github.com/pborman/uuid:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "cache_simple.go", + "cache_striped.go", + "cached_token_authenticator.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/authentication/token/cache", + importpath = "k8s.io/apiserver/pkg/authentication/token/cache", + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/cache:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go new file mode 100644 index 000000000000..18d5692d7a72 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "time" + + lrucache "k8s.io/apimachinery/pkg/util/cache" + "k8s.io/apimachinery/pkg/util/clock" +) + +type simpleCache struct { + lru *lrucache.LRUExpireCache +} + +func newSimpleCache(size int, clock clock.Clock) cache { + return &simpleCache{lru: lrucache.NewLRUExpireCacheWithClock(size, clock)} +} + +func (c *simpleCache) get(key string) (*cacheRecord, bool) { + record, ok := c.lru.Get(key) + if !ok { + return nil, false + } + value, ok := record.(*cacheRecord) + return value, ok +} + +func (c *simpleCache) set(key string, value *cacheRecord, ttl time.Duration) { + c.lru.Add(key, value, ttl) +} + +func (c *simpleCache) remove(key string) { + c.lru.Remove(key) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_striped.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_striped.go new file mode 100644 index 000000000000..e5b7afe4e7d3 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_striped.go @@ -0,0 +1,60 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "hash/fnv" + "time" +) + +// split cache lookups across N striped caches +type stripedCache struct { + stripeCount uint32 + hashFunc func(string) uint32 + caches []cache +} + +type hashFunc func(string) uint32 +type newCacheFunc func() cache + +func newStripedCache(stripeCount int, hash hashFunc, newCacheFunc newCacheFunc) cache { + caches := []cache{} + for i := 0; i < stripeCount; i++ { + caches = append(caches, newCacheFunc()) + } + return &stripedCache{ + stripeCount: uint32(stripeCount), + hashFunc: hash, + caches: caches, + } +} + +func (c *stripedCache) get(key string) (*cacheRecord, bool) { + return c.caches[c.hashFunc(key)%c.stripeCount].get(key) +} +func (c *stripedCache) set(key string, value *cacheRecord, ttl time.Duration) { + c.caches[c.hashFunc(key)%c.stripeCount].set(key, value, ttl) +} +func (c *stripedCache) remove(key string) { + c.caches[c.hashFunc(key)%c.stripeCount].remove(key) +} + +func fnvHashFunc(key string) uint32 { + f := fnv.New32() + f.Write([]byte(key)) + return f.Sum32() +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go new file mode 100644 index 000000000000..ea3853a38b41 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "time" + + utilclock "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apiserver/pkg/authentication/authenticator" +) + +// cacheRecord holds the three return values of the authenticator.Token AuthenticateToken method +type cacheRecord struct { + resp *authenticator.Response + ok bool + err error +} + +type cachedTokenAuthenticator struct { + authenticator authenticator.Token + + cacheErrs bool + successTTL time.Duration + failureTTL time.Duration + + cache cache +} + +type cache interface { + // given a key, return the record, and whether or not it existed + get(key string) (value *cacheRecord, exists bool) + // caches the record for the key + set(key string, value *cacheRecord, ttl time.Duration) + // removes the record for the key + remove(key string) +} + +// New returns a token authenticator that caches the results of the specified authenticator. A ttl of 0 bypasses the cache. +func New(authenticator authenticator.Token, cacheErrs bool, successTTL, failureTTL time.Duration) authenticator.Token { + return newWithClock(authenticator, cacheErrs, successTTL, failureTTL, utilclock.RealClock{}) +} + +func newWithClock(authenticator authenticator.Token, cacheErrs bool, successTTL, failureTTL time.Duration, clock utilclock.Clock) authenticator.Token { + return &cachedTokenAuthenticator{ + authenticator: authenticator, + cacheErrs: cacheErrs, + successTTL: successTTL, + failureTTL: failureTTL, + cache: newStripedCache(32, fnvHashFunc, func() cache { return newSimpleCache(128, clock) }), + } +} + +// AuthenticateToken implements authenticator.Token +func (a *cachedTokenAuthenticator) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) { + auds, _ := authenticator.AudiencesFrom(ctx) + + key := keyFunc(auds, token) + if record, ok := a.cache.get(key); ok { + return record.resp, record.ok, record.err + } + + resp, ok, err := a.authenticator.AuthenticateToken(ctx, token) + if !a.cacheErrs && err != nil { + return resp, ok, err + } + + switch { + case ok && a.successTTL > 0: + a.cache.set(key, &cacheRecord{resp: resp, ok: ok, err: err}, a.successTTL) + case !ok && a.failureTTL > 0: + a.cache.set(key, &cacheRecord{resp: resp, ok: ok, err: err}, a.failureTTL) + } + + return resp, ok, err +} + +func keyFunc(auds []string, token string) string { + return fmt.Sprintf("%#v|%v", auds, token) +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD index 00b19d480611..b43d1ff609c0 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD @@ -19,8 +19,9 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile", importpath = "k8s.io/apiserver/pkg/authentication/token/tokenfile", deps = [ + "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go index 57bb6c596d8e..69568f17dd25 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go @@ -17,14 +17,16 @@ limitations under the License. package tokenfile import ( + "context" "encoding/csv" "fmt" "io" "os" "strings" - "github.com/golang/glog" + "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/klog" ) type TokenAuthenticator struct { @@ -65,7 +67,7 @@ func NewCSV(path string) (*TokenAuthenticator, error) { recordNum++ if record[0] == "" { - glog.Warningf("empty token has been found in token file '%s', record number '%d'", path, recordNum) + klog.Warningf("empty token has been found in token file '%s', record number '%d'", path, recordNum) continue } @@ -74,7 +76,7 @@ func NewCSV(path string) (*TokenAuthenticator, error) { UID: record[2], } if _, exist := tokens[record[0]]; exist { - glog.Warningf("duplicate token has been found in token file '%s', record number '%d'", path, recordNum) + klog.Warningf("duplicate token has been found in token file '%s', record number '%d'", path, recordNum) } tokens[record[0]] = obj @@ -88,10 +90,10 @@ func NewCSV(path string) (*TokenAuthenticator, error) { }, nil } -func (a *TokenAuthenticator) AuthenticateToken(value string) (user.Info, bool, error) { +func (a *TokenAuthenticator) AuthenticateToken(ctx context.Context, value string) (*authenticator.Response, bool, error) { user, ok := a.tokens[value] if !ok { return nil, false, nil } - return user, true, nil + return &authenticator.Response{User: user}, true, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/OWNERS b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/OWNERS index 63cf9723bb14..cb2ae800f36a 100755 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/OWNERS @@ -1,4 +1,3 @@ reviewers: - deads2k - dims -- ericchiang diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD index 5e84006a1cf2..388c1846e3a2 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD @@ -36,7 +36,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go index 95166f5c4741..fe3ae38edcd7 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go @@ -36,6 +36,9 @@ const ( // auditKey is the context key for the audit event. auditKey + + // audiencesKey is the context key for request audiences. + audiencesKey ) // NewContext instantiates a base context object for request flows. diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go index 1520bb3c9e51..cc8ae39fa2c5 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go @@ -27,7 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" - "github.com/golang/glog" + "k8s.io/klog" ) // LongRunningRequestCheck is a predicate which is true for long-running http requests. @@ -210,7 +210,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er opts := metainternalversion.ListOptions{} if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil { // An error in parsing request will result in default to "list" and not setting "name" field. - glog.Errorf("Couldn't parse request %#v: %v", req.URL.Query(), err) + klog.Errorf("Couldn't parse request %#v: %v", req.URL.Query(), err) // Reset opts to not rely on partial results from parsing. // However, if watch is set, let's report it. opts = metainternalversion.ListOptions{} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go index 3b6925722804..92418256814c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -29,11 +29,19 @@ const ( // owner: @tallclair // alpha: v1.5 + // beta: v1.6 // // StreamingProxyRedirects controls whether the apiserver should intercept (and follow) // redirects from the backend (Kubelet) for streaming requests (exec/attach/port-forward). StreamingProxyRedirects utilfeature.Feature = "StreamingProxyRedirects" + // owner: @tallclair + // alpha: v1.10 + // + // ValidateProxyRedirects controls whether the apiserver should validate that redirects are only + // followed to the same host. Only used if StreamingProxyRedirects is enabled. + ValidateProxyRedirects utilfeature.Feature = "ValidateProxyRedirects" + // owner: @tallclair // alpha: v1.7 // beta: v1.8 @@ -44,6 +52,13 @@ const ( // audited. AdvancedAuditing utilfeature.Feature = "AdvancedAuditing" + // owner: @pbarker + // alpha: v1.13 + // + // DynamicAuditing enables configuration of audit policy and webhook backends through an + // AuditSink API object. + DynamicAuditing utilfeature.Feature = "DynamicAuditing" + // owner: @ilackams // alpha: v1.7 // @@ -67,6 +82,7 @@ const ( // owner: @apelisse // alpha: v1.12 + // beta: v1.13 // // Allow requests to be processed but not stored, so that // validation, merging, mutation can be tested without @@ -83,9 +99,11 @@ func init() { // available throughout Kubernetes binaries. var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ StreamingProxyRedirects: {Default: true, PreRelease: utilfeature.Beta}, + ValidateProxyRedirects: {Default: false, PreRelease: utilfeature.Alpha}, AdvancedAuditing: {Default: true, PreRelease: utilfeature.GA}, + DynamicAuditing: {Default: false, PreRelease: utilfeature.Alpha}, APIResponseCompression: {Default: false, PreRelease: utilfeature.Alpha}, Initializers: {Default: false, PreRelease: utilfeature.Alpha}, APIListChunking: {Default: true, PreRelease: utilfeature.Beta}, - DryRun: {Default: false, PreRelease: utilfeature.Alpha}, + DryRun: {Default: true, PreRelease: utilfeature.Beta}, } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD index aab24c34ef12..7907d0e6111b 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD @@ -10,6 +10,9 @@ go_test( name = "go_default_test", srcs = ["healthz_test.go"], embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], ) go_library( @@ -21,8 +24,9 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/healthz", importpath = "k8s.io/apiserver/pkg/server/healthz", deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go index 24fafcefa0f3..17d85fbe637e 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go @@ -25,8 +25,9 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" ) @@ -76,7 +77,7 @@ func (l *log) Check(_ *http.Request) error { l.startOnce.Do(func() { l.lastVerified.Store(time.Now()) go wait.Forever(func() { - glog.Flush() + klog.Flush() l.lastVerified.Store(time.Now()) }, time.Minute) }) @@ -108,11 +109,11 @@ func InstallHandler(mux mux, checks ...HealthzChecker) { // result in a panic. func InstallPathHandler(mux mux, path string, checks ...HealthzChecker) { if len(checks) == 0 { - glog.V(5).Info("No default health checks specified. Installing the ping handler.") + klog.V(5).Info("No default health checks specified. Installing the ping handler.") checks = []HealthzChecker{PingHealthz} } - glog.V(5).Info("Installing healthz checkers:", strings.Join(checkerNames(checks...), ", ")) + klog.V(5).Info("Installing healthz checkers:", formatQuoted(checkerNames(checks...)...)) mux.Handle(path, handleRootHealthz(checks...)) for _, check := range checks { @@ -141,22 +142,43 @@ func (c *healthzCheck) Check(r *http.Request) error { return c.check(r) } +// getExcludedChecks extracts the health check names to be excluded from the query param +func getExcludedChecks(r *http.Request) sets.String { + checks, found := r.URL.Query()["exclude"] + if found { + return sets.NewString(checks...) + } + return sets.NewString() +} + // handleRootHealthz returns an http.HandlerFunc that serves the provided checks. func handleRootHealthz(checks ...HealthzChecker) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { failed := false + excluded := getExcludedChecks(r) var verboseOut bytes.Buffer for _, check := range checks { + // no-op the check if we've specified we want to exclude the check + if excluded.Has(check.Name()) { + excluded.Delete(check.Name()) + fmt.Fprintf(&verboseOut, "[+]%v excluded: ok\n", check.Name()) + continue + } if err := check.Check(r); err != nil { // don't include the error since this endpoint is public. If someone wants more detail // they should have explicit permission to the detailed checks. - glog.V(6).Infof("healthz check %v failed: %v", check.Name(), err) + klog.V(6).Infof("healthz check %v failed: %v", check.Name(), err) fmt.Fprintf(&verboseOut, "[-]%v failed: reason withheld\n", check.Name()) failed = true } else { fmt.Fprintf(&verboseOut, "[+]%v ok\n", check.Name()) } } + if excluded.Len() > 0 { + fmt.Fprintf(&verboseOut, "warn: some health checks cannot be excluded: no matches for %v\n", formatQuoted(excluded.List()...)) + klog.Warningf("cannot exclude some health checks, no health checks are installed matching %v", + formatQuoted(excluded.List()...)) + } // always be verbose on failure if failed { http.Error(w, fmt.Sprintf("%vhealthz check failed", verboseOut.String()), http.StatusInternalServerError) @@ -187,14 +209,20 @@ func adaptCheckToHandler(c func(r *http.Request) error) http.HandlerFunc { // checkerNames returns the names of the checks in the same order as passed in. func checkerNames(checks ...HealthzChecker) []string { - if len(checks) > 0 { - // accumulate the names of checks for printing them out. - checkerNames := make([]string, 0, len(checks)) - for _, check := range checks { - // quote the Name so we can disambiguate - checkerNames = append(checkerNames, fmt.Sprintf("%q", check.Name())) - } - return checkerNames + // accumulate the names of checks for printing them out. + checkerNames := make([]string, 0, len(checks)) + for _, check := range checks { + checkerNames = append(checkerNames, check.Name()) } - return nil + return checkerNames +} + +// formatQuoted returns a formatted string of the health check names, +// preserving the order passed in. +func formatQuoted(names ...string) string { + quoted := make([]string, 0, len(names)) + for _, name := range names { + quoted = append(quoted, fmt.Sprintf("%q", name)) + } + return strings.Join(quoted, ",") } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/httplog/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/httplog/BUILD index 22d299ba555e..9626af306f24 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/httplog/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/httplog/BUILD @@ -20,7 +20,7 @@ go_library( ], importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog", importpath = "k8s.io/apiserver/pkg/server/httplog", - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go index f8a8a5307aaa..dcdba69225d8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go @@ -24,7 +24,7 @@ import ( "runtime" "time" - "github.com/golang/glog" + "k8s.io/klog" ) // StacktracePred returns true if a stacktrace should be logged for this status. @@ -61,7 +61,7 @@ type passthroughLogger struct{} // Addf logs info immediately. func (passthroughLogger) Addf(format string, data ...interface{}) { - glog.V(2).Info(fmt.Sprintf(format, data...)) + klog.V(2).Info(fmt.Sprintf(format, data...)) } // DefaultStacktracePred is the default implementation of StacktracePred. @@ -143,11 +143,11 @@ func (rl *respLogger) Addf(format string, data ...interface{}) { // Log is intended to be called once at the end of your request handler, via defer func (rl *respLogger) Log() { latency := time.Since(rl.startTime) - if glog.V(3) { + if klog.V(3) { if !rl.hijacked { - glog.InfoDepth(1, fmt.Sprintf("%s %s: (%v) %v%v%v [%s %s]", rl.req.Method, rl.req.RequestURI, latency, rl.status, rl.statusStack, rl.addedInfo, rl.req.UserAgent(), rl.req.RemoteAddr)) + klog.InfoDepth(1, fmt.Sprintf("%s %s: (%v) %v%v%v [%s %s]", rl.req.Method, rl.req.RequestURI, latency, rl.status, rl.statusStack, rl.addedInfo, rl.req.UserAgent(), rl.req.RemoteAddr)) } else { - glog.InfoDepth(1, fmt.Sprintf("%s %s: (%v) hijacked [%s %s]", rl.req.Method, rl.req.RequestURI, latency, rl.req.UserAgent(), rl.req.RemoteAddr)) + klog.InfoDepth(1, fmt.Sprintf("%s %s: (%v) hijacked [%s %s]", rl.req.Method, rl.req.RequestURI, latency, rl.req.UserAgent(), rl.req.RemoteAddr)) } } } @@ -173,8 +173,8 @@ func (rl *respLogger) Write(b []byte) (int, error) { func (rl *respLogger) Flush() { if flusher, ok := rl.w.(http.Flusher); ok { flusher.Flush() - } else if glog.V(2) { - glog.InfoDepth(1, fmt.Sprintf("Unable to convert %+v into http.Flusher", rl.w)) + } else if klog.V(2) { + klog.InfoDepth(1, fmt.Sprintf("Unable to convert %+v into http.Flusher", rl.w)) } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/mux/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/mux/BUILD index e60551c82db0..dd150e0fc697 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/mux/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/mux/BUILD @@ -24,7 +24,7 @@ go_library( deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go index 2f0eb7aa5b22..16857cc8a6b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go @@ -25,7 +25,7 @@ import ( "sync" "sync/atomic" - "github.com/golang/glog" + "k8s.io/klog" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -237,20 +237,20 @@ func (m *PathRecorderMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { // ServeHTTP makes it an http.Handler func (h *pathHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if exactHandler, ok := h.pathToHandler[r.URL.Path]; ok { - glog.V(5).Infof("%v: %q satisfied by exact match", h.muxName, r.URL.Path) + klog.V(5).Infof("%v: %q satisfied by exact match", h.muxName, r.URL.Path) exactHandler.ServeHTTP(w, r) return } for _, prefixHandler := range h.prefixHandlers { if strings.HasPrefix(r.URL.Path, prefixHandler.prefix) { - glog.V(5).Infof("%v: %q satisfied by prefix %v", h.muxName, r.URL.Path, prefixHandler.prefix) + klog.V(5).Infof("%v: %q satisfied by prefix %v", h.muxName, r.URL.Path, prefixHandler.prefix) prefixHandler.handler.ServeHTTP(w, r) return } } - glog.V(5).Infof("%v: %q satisfied by NotFoundHandler", h.muxName, r.URL.Path) + klog.V(5).Infof("%v: %q satisfied by NotFoundHandler", h.muxName, r.URL.Path) h.notFoundHandler.ServeHTTP(w, r) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/BUILD index f4d56f9e3040..0b81332a3a45 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/BUILD @@ -32,8 +32,8 @@ go_library( "//vendor/github.com/elazarl/go-bindata-assetfs:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/github.com/emicklei/go-restful-swagger12:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/handler:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/flags.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/flags.go index d40f11499b3f..a03b80d3ce79 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/flags.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/flags.go @@ -24,7 +24,7 @@ import ( "path" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apiserver/pkg/server/mux" ) @@ -57,7 +57,7 @@ func (f DebugFlags) Index(w http.ResponseWriter, r *http.Request) { lock.RLock() defer lock.RUnlock() if err := indexTmpl.Execute(w, registeredFlags); err != nil { - glog.Error(err) + klog.Error(err) } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go index 06c723d37531..934bbf84a042 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go @@ -18,7 +18,7 @@ package routes import ( restful "github.com/emicklei/go-restful" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apiserver/pkg/server/mux" "k8s.io/kube-openapi/pkg/common" @@ -37,10 +37,10 @@ func (oa OpenAPI) Install(c *restful.Container, mux *mux.PathRecorderMux) { // are tracked at: https://docs.google.com/document/d/19lEqE9lc4yHJ3WJAJxS_G7TcORIJXGHyq3wpwcH28nU. _, err := handler.BuildAndRegisterOpenAPIService("/swagger.json", c.RegisteredWebServices(), oa.Config, mux) if err != nil { - glog.Fatalf("Failed to register open api spec for root: %v", err) + klog.Fatalf("Failed to register open api spec for root: %v", err) } _, err = handler.BuildAndRegisterOpenAPIVersionedService("/openapi/v2", c.RegisteredWebServices(), oa.Config, mux) if err != nil { - glog.Fatalf("Failed to register versioned open api spec for root: %v", err) + klog.Fatalf("Failed to register versioned open api spec for root: %v", err) } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD index eea138675bbe..c6d0e60fd6d9 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD @@ -8,33 +8,12 @@ load( go_test( name = "go_default_test", - srcs = [ - "api_object_versioner_test.go", - "etcd_helper_test.go", - "etcd_watcher_test.go", - ], + srcs = ["api_object_versioner_test.go"], embed = [":go_default_library"], deps = [ - "//staging/src/k8s.io/apimachinery/pkg/api/apitesting:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/apis/example:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/apis/example/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/storage/etcd/etcdtest:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/testing:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/storage/tests:go_default_library", - "//vendor/github.com/coreos/etcd/client:go_default_library", - "//vendor/github.com/stretchr/testify/require:go_default_library", ], ) @@ -43,26 +22,14 @@ go_library( srcs = [ "api_object_versioner.go", "doc.go", - "etcd_helper.go", - "etcd_watcher.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/storage/etcd", importpath = "k8s.io/apiserver/pkg/storage/etcd", deps = [ "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/cache:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/storage/etcd/metrics:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/storage/etcd/util:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/trace:go_default_library", - "//vendor/github.com/coreos/etcd/client:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go deleted file mode 100644 index c6bdd9c92cdb..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go +++ /dev/null @@ -1,637 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package etcd - -import ( - "context" - "errors" - "fmt" - "path" - "reflect" - "time" - - etcd "github.com/coreos/etcd/client" - "github.com/golang/glog" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - utilcache "k8s.io/apimachinery/pkg/util/cache" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/apiserver/pkg/storage" - "k8s.io/apiserver/pkg/storage/etcd/metrics" - etcdutil "k8s.io/apiserver/pkg/storage/etcd/util" - utiltrace "k8s.io/apiserver/pkg/util/trace" -) - -// ValueTransformer allows a string value to be transformed before being read from or written to the underlying store. The methods -// must be able to undo the transformation caused by the other. -type ValueTransformer interface { - // TransformStringFromStorage may transform the provided string from its underlying storage representation or return an error. - // Stale is true if the object on disk is stale and a write to etcd should be issued, even if the contents of the object - // have not changed. - TransformStringFromStorage(string) (value string, stale bool, err error) - // TransformStringToStorage may transform the provided string into the appropriate form in storage or return an error. - TransformStringToStorage(string) (value string, err error) -} - -type identityTransformer struct{} - -func (identityTransformer) TransformStringFromStorage(s string) (string, bool, error) { - return s, false, nil -} -func (identityTransformer) TransformStringToStorage(s string) (string, error) { return s, nil } - -// IdentityTransformer performs no transformation on the provided values. -var IdentityTransformer ValueTransformer = identityTransformer{} - -// Creates a new storage interface from the client -// TODO: deprecate in favor of storage.Config abstraction over time -func NewEtcdStorage(client etcd.Client, codec runtime.Codec, prefix string, quorum bool, cacheSize int, transformer ValueTransformer) storage.Interface { - return &etcdHelper{ - etcdMembersAPI: etcd.NewMembersAPI(client), - etcdKeysAPI: etcd.NewKeysAPI(client), - codec: codec, - versioner: APIObjectVersioner{}, - transformer: transformer, - pathPrefix: path.Join("/", prefix), - quorum: quorum, - cache: utilcache.NewCache(cacheSize), - } -} - -// etcdHelper is the reference implementation of storage.Interface. -type etcdHelper struct { - etcdMembersAPI etcd.MembersAPI - etcdKeysAPI etcd.KeysAPI - codec runtime.Codec - transformer ValueTransformer - // Note that versioner is required for etcdHelper to work correctly. - // The public constructors (NewStorage & NewEtcdStorage) are setting it - // correctly, so be careful when manipulating with it manually. - // optional, has to be set to perform any atomic operations - versioner storage.Versioner - // prefix for all etcd keys - pathPrefix string - // if true, perform quorum read - quorum bool - - // We cache objects stored in etcd. For keys we use Node.ModifiedIndex which is equivalent - // to resourceVersion. - // This depends on etcd's indexes being globally unique across all objects/types. This will - // have to revisited if we decide to do things like multiple etcd clusters, or etcd will - // support multi-object transaction that will result in many objects with the same index. - // Number of entries stored in the cache is controlled by maxEtcdCacheEntries constant. - // TODO: Measure how much this cache helps after the conversion code is optimized. - cache utilcache.Cache -} - -// Implements storage.Interface. -func (h *etcdHelper) Versioner() storage.Versioner { - return h.versioner -} - -// Implements storage.Interface. -func (h *etcdHelper) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { - trace := utiltrace.New("etcdHelper::Create " + getTypeName(obj)) - defer trace.LogIfLong(250 * time.Millisecond) - if ctx == nil { - glog.Errorf("Context is nil") - } - key = path.Join(h.pathPrefix, key) - data, err := runtime.Encode(h.codec, obj) - trace.Step("Object encoded") - if err != nil { - return err - } - if version, err := h.versioner.ObjectResourceVersion(obj); err == nil && version != 0 { - return errors.New("resourceVersion may not be set on objects to be created") - } - if err := h.versioner.PrepareObjectForStorage(obj); err != nil { - return fmt.Errorf("PrepareObjectForStorage returned an error: %v", err) - } - trace.Step("Version checked") - - startTime := time.Now() - opts := etcd.SetOptions{ - TTL: time.Duration(ttl) * time.Second, - PrevExist: etcd.PrevNoExist, - } - - newBody, err := h.transformer.TransformStringToStorage(string(data)) - if err != nil { - return storage.NewInternalError(err.Error()) - } - - response, err := h.etcdKeysAPI.Set(ctx, key, newBody, &opts) - trace.Step("Object created") - metrics.RecordEtcdRequestLatency("create", getTypeName(obj), startTime) - if err != nil { - return toStorageErr(err, key, 0) - } - if out != nil { - if _, err := conversion.EnforcePtr(out); err != nil { - panic("unable to convert output object to pointer") - } - _, _, _, err = h.extractObj(response, err, out, false, false) - } - return err -} - -// Implements storage.Interface. -func (h *etcdHelper) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions) error { - if ctx == nil { - glog.Errorf("Context is nil") - } - key = path.Join(h.pathPrefix, key) - v, err := conversion.EnforcePtr(out) - if err != nil { - panic("unable to convert output object to pointer") - } - - if preconditions == nil { - startTime := time.Now() - response, err := h.etcdKeysAPI.Delete(ctx, key, nil) - metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime) - if !etcdutil.IsEtcdNotFound(err) { - // if the object that existed prior to the delete is returned by etcd, update the out object. - if err != nil || response.PrevNode != nil { - _, _, _, err = h.extractObj(response, err, out, false, true) - } - } - return toStorageErr(err, key, 0) - } - - // Check the preconditions match. - obj := reflect.New(v.Type()).Interface().(runtime.Object) - for { - _, node, res, _, err := h.bodyAndExtractObj(ctx, key, obj, false) - if err != nil { - return toStorageErr(err, key, 0) - } - if err := preconditions.Check(key, obj); err != nil { - return toStorageErr(err, key, 0) - } - index := uint64(0) - if node != nil { - index = node.ModifiedIndex - } else if res != nil { - index = res.Index - } - opt := etcd.DeleteOptions{PrevIndex: index} - startTime := time.Now() - response, err := h.etcdKeysAPI.Delete(ctx, key, &opt) - metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime) - if !etcdutil.IsEtcdTestFailed(err) { - if !etcdutil.IsEtcdNotFound(err) { - // if the object that existed prior to the delete is returned by etcd, update the out object. - if err != nil || response.PrevNode != nil { - _, _, _, err = h.extractObj(response, err, out, false, true) - } - } - return toStorageErr(err, key, 0) - } - - glog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key) - } -} - -// Implements storage.Interface. -func (h *etcdHelper) Watch(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate) (watch.Interface, error) { - if ctx == nil { - glog.Errorf("Context is nil") - } - watchRV, err := h.versioner.ParseResourceVersion(resourceVersion) - if err != nil { - return nil, err - } - key = path.Join(h.pathPrefix, key) - w := newEtcdWatcher(false, h.quorum, nil, pred, h.codec, h.versioner, nil, h.transformer, h) - go w.etcdWatch(ctx, h.etcdKeysAPI, key, watchRV) - return w, nil -} - -// Implements storage.Interface. -func (h *etcdHelper) WatchList(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate) (watch.Interface, error) { - if ctx == nil { - glog.Errorf("Context is nil") - } - watchRV, err := h.versioner.ParseResourceVersion(resourceVersion) - if err != nil { - return nil, err - } - key = path.Join(h.pathPrefix, key) - w := newEtcdWatcher(true, h.quorum, exceptKey(key), pred, h.codec, h.versioner, nil, h.transformer, h) - go w.etcdWatch(ctx, h.etcdKeysAPI, key, watchRV) - return w, nil -} - -// Implements storage.Interface. -func (h *etcdHelper) Get(ctx context.Context, key string, resourceVersion string, objPtr runtime.Object, ignoreNotFound bool) error { - if ctx == nil { - glog.Errorf("Context is nil") - } - key = path.Join(h.pathPrefix, key) - _, _, _, _, err := h.bodyAndExtractObj(ctx, key, objPtr, ignoreNotFound) - return err -} - -// bodyAndExtractObj performs the normal Get path to etcd, returning the parsed node and response for additional information -// about the response, like the current etcd index and the ttl. -func (h *etcdHelper) bodyAndExtractObj(ctx context.Context, key string, objPtr runtime.Object, ignoreNotFound bool) (body string, node *etcd.Node, res *etcd.Response, stale bool, err error) { - if ctx == nil { - glog.Errorf("Context is nil") - } - startTime := time.Now() - - opts := &etcd.GetOptions{ - Quorum: h.quorum, - } - - response, err := h.etcdKeysAPI.Get(ctx, key, opts) - metrics.RecordEtcdRequestLatency("get", getTypeName(objPtr), startTime) - if err != nil && !etcdutil.IsEtcdNotFound(err) { - return "", nil, nil, false, toStorageErr(err, key, 0) - } - body, node, stale, err = h.extractObj(response, err, objPtr, ignoreNotFound, false) - return body, node, response, stale, toStorageErr(err, key, 0) -} - -func (h *etcdHelper) extractObj(response *etcd.Response, inErr error, objPtr runtime.Object, ignoreNotFound, prevNode bool) (body string, node *etcd.Node, stale bool, err error) { - if response != nil { - if prevNode { - node = response.PrevNode - } else { - node = response.Node - } - } - if inErr != nil || node == nil || len(node.Value) == 0 { - if ignoreNotFound { - v, err := conversion.EnforcePtr(objPtr) - if err != nil { - return "", nil, false, err - } - v.Set(reflect.Zero(v.Type())) - return "", nil, false, nil - } else if inErr != nil { - return "", nil, false, inErr - } - return "", nil, false, fmt.Errorf("unable to locate a value on the response: %#v", response) - } - - body, stale, err = h.transformer.TransformStringFromStorage(node.Value) - if err != nil { - return body, nil, stale, storage.NewInternalError(err.Error()) - } - out, gvk, err := h.codec.Decode([]byte(body), nil, objPtr) - if err != nil { - return body, nil, stale, err - } - if out != objPtr { - return body, nil, stale, fmt.Errorf("unable to decode object %s into %v", gvk.String(), reflect.TypeOf(objPtr)) - } - // being unable to set the version does not prevent the object from being extracted - _ = h.versioner.UpdateObject(objPtr, node.ModifiedIndex) - return body, node, stale, err -} - -// Implements storage.Interface. -func (h *etcdHelper) GetToList(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate, listObj runtime.Object) error { - if ctx == nil { - glog.Errorf("Context is nil") - } - trace := utiltrace.New("GetToList " + getTypeName(listObj)) - listPtr, err := meta.GetItemsPtr(listObj) - if err != nil { - return err - } - key = path.Join(h.pathPrefix, key) - startTime := time.Now() - trace.Step("About to read etcd node") - - opts := &etcd.GetOptions{ - Quorum: h.quorum, - } - response, err := h.etcdKeysAPI.Get(ctx, key, opts) - trace.Step("Etcd node read") - metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime) - if err != nil { - if etcdutil.IsEtcdNotFound(err) { - if etcdErr, ok := err.(etcd.Error); ok { - return h.versioner.UpdateList(listObj, etcdErr.Index, "") - } - return fmt.Errorf("unexpected error from storage: %#v", err) - } - return toStorageErr(err, key, 0) - } - - nodes := make([]*etcd.Node, 0) - nodes = append(nodes, response.Node) - - if err := h.decodeNodeList(nodes, pred, listPtr); err != nil { - return err - } - trace.Step("Object decoded") - if err := h.versioner.UpdateList(listObj, response.Index, ""); err != nil { - return err - } - return nil -} - -// decodeNodeList walks the tree of each node in the list and decodes into the specified object -func (h *etcdHelper) decodeNodeList(nodes []*etcd.Node, pred storage.SelectionPredicate, slicePtr interface{}) error { - trace := utiltrace.New("decodeNodeList " + getTypeName(slicePtr)) - defer trace.LogIfLong(400 * time.Millisecond) - v, err := conversion.EnforcePtr(slicePtr) - if err != nil || v.Kind() != reflect.Slice { - // This should not happen at runtime. - panic("need ptr to slice") - } - for _, node := range nodes { - if node.Dir { - // IMPORTANT: do not log each key as a discrete step in the trace log - // as it produces an immense amount of log spam when there is a large - // amount of content in the list. - if err := h.decodeNodeList(node.Nodes, pred, slicePtr); err != nil { - return err - } - continue - } - if obj, found := h.getFromCache(node.ModifiedIndex, pred); found { - // obj != nil iff it matches the pred function. - if obj != nil { - v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) - } - } else { - body, _, err := h.transformer.TransformStringFromStorage(node.Value) - if err != nil { - // omit items from lists and watches that cannot be transformed, but log the error - utilruntime.HandleError(fmt.Errorf("unable to transform key %q: %v", node.Key, err)) - continue - } - - obj, _, err := h.codec.Decode([]byte(body), nil, reflect.New(v.Type().Elem()).Interface().(runtime.Object)) - if err != nil { - return err - } - // being unable to set the version does not prevent the object from being extracted - _ = h.versioner.UpdateObject(obj, node.ModifiedIndex) - if matched, err := pred.Matches(obj); err == nil && matched { - v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) - } - if node.ModifiedIndex != 0 { - h.addToCache(node.ModifiedIndex, obj) - } - } - } - trace.Step(fmt.Sprintf("Decoded %v nodes", len(nodes))) - return nil -} - -// Implements storage.Interface. -func (h *etcdHelper) List(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate, listObj runtime.Object) error { - if ctx == nil { - glog.Errorf("Context is nil") - } - trace := utiltrace.New("List " + getTypeName(listObj)) - defer trace.LogIfLong(400 * time.Millisecond) - listPtr, err := meta.GetItemsPtr(listObj) - if err != nil { - return err - } - key = path.Join(h.pathPrefix, key) - startTime := time.Now() - trace.Step("About to list etcd node") - nodes, index, err := h.listEtcdNode(ctx, key) - trace.Step("Etcd node listed") - metrics.RecordEtcdRequestLatency("list", getTypeName(listPtr), startTime) - if err != nil { - return err - } - if err := h.decodeNodeList(nodes, pred, listPtr); err != nil { - return err - } - trace.Step("Node list decoded") - if err := h.versioner.UpdateList(listObj, index, ""); err != nil { - return err - } - return nil -} - -func (h *etcdHelper) listEtcdNode(ctx context.Context, key string) ([]*etcd.Node, uint64, error) { - if ctx == nil { - glog.Errorf("Context is nil") - } - opts := etcd.GetOptions{ - Recursive: true, - Sort: true, - Quorum: h.quorum, - } - result, err := h.etcdKeysAPI.Get(ctx, key, &opts) - if err != nil { - var index uint64 - if etcdError, ok := err.(etcd.Error); ok { - index = etcdError.Index - } - nodes := make([]*etcd.Node, 0) - if etcdutil.IsEtcdNotFound(err) { - return nodes, index, nil - } else { - return nodes, index, toStorageErr(err, key, 0) - } - } - return result.Node.Nodes, result.Index, nil -} - -// Implements storage.Interface. -func (h *etcdHelper) GuaranteedUpdate( - ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, - preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, _ ...runtime.Object) error { - // Ignore the suggestion about current object. - if ctx == nil { - glog.Errorf("Context is nil") - } - v, err := conversion.EnforcePtr(ptrToType) - if err != nil { - // Panic is appropriate, because this is a programming error. - panic("need ptr to type") - } - key = path.Join(h.pathPrefix, key) - for { - obj := reflect.New(v.Type()).Interface().(runtime.Object) - origBody, node, res, stale, err := h.bodyAndExtractObj(ctx, key, obj, ignoreNotFound) - if err != nil { - return toStorageErr(err, key, 0) - } - if err := preconditions.Check(key, obj); err != nil { - return toStorageErr(err, key, 0) - } - meta := storage.ResponseMeta{} - if node != nil { - meta.TTL = node.TTL - meta.ResourceVersion = node.ModifiedIndex - } - // Get the object to be written by calling tryUpdate. - ret, newTTL, err := tryUpdate(obj, meta) - if err != nil { - return toStorageErr(err, key, 0) - } - - index := uint64(0) - ttl := uint64(0) - if node != nil { - index = node.ModifiedIndex - if node.TTL != 0 { - ttl = uint64(node.TTL) - } - if node.Expiration != nil && ttl == 0 { - ttl = 1 - } - } else if res != nil { - index = res.Index - } - - if newTTL != nil { - if ttl != 0 && *newTTL == 0 { - // TODO: remove this after we have verified this is no longer an issue - glog.V(4).Infof("GuaranteedUpdate is clearing TTL for %q, may not be intentional", key) - } - ttl = *newTTL - } - - // Since update object may have a resourceVersion set, we need to clear it here. - if err := h.versioner.PrepareObjectForStorage(ret); err != nil { - return errors.New("resourceVersion cannot be set on objects store in etcd") - } - - newBodyData, err := runtime.Encode(h.codec, ret) - if err != nil { - return err - } - newBody := string(newBodyData) - data, err := h.transformer.TransformStringToStorage(newBody) - if err != nil { - return storage.NewInternalError(err.Error()) - } - - // First time this key has been used, try creating new value. - if index == 0 { - startTime := time.Now() - opts := etcd.SetOptions{ - TTL: time.Duration(ttl) * time.Second, - PrevExist: etcd.PrevNoExist, - } - response, err := h.etcdKeysAPI.Set(ctx, key, data, &opts) - metrics.RecordEtcdRequestLatency("create", getTypeName(ptrToType), startTime) - if etcdutil.IsEtcdNodeExist(err) { - continue - } - _, _, _, err = h.extractObj(response, err, ptrToType, false, false) - return toStorageErr(err, key, 0) - } - - // If we don't send an update, we simply return the currently existing - // version of the object. However, the value transformer may indicate that - // the on disk representation has changed and that we must commit an update. - if newBody == origBody && !stale { - _, _, _, err := h.extractObj(res, nil, ptrToType, ignoreNotFound, false) - return err - } - - startTime := time.Now() - // Swap origBody with data, if origBody is the latest etcd data. - opts := etcd.SetOptions{ - PrevIndex: index, - TTL: time.Duration(ttl) * time.Second, - } - response, err := h.etcdKeysAPI.Set(ctx, key, data, &opts) - metrics.RecordEtcdRequestLatency("compareAndSwap", getTypeName(ptrToType), startTime) - if etcdutil.IsEtcdTestFailed(err) { - // Try again. - continue - } - _, _, _, err = h.extractObj(response, err, ptrToType, false, false) - return toStorageErr(err, key, int64(index)) - } -} - -func (*etcdHelper) Count(pathPerfix string) (int64, error) { - return 0, fmt.Errorf("Count is unimplemented for etcd2!") -} - -// etcdCache defines interface used for caching objects stored in etcd. Objects are keyed by -// their Node.ModifiedIndex, which is unique across all types. -// All implementations must be thread-safe. -type etcdCache interface { - getFromCache(index uint64, pred storage.SelectionPredicate) (runtime.Object, bool) - addToCache(index uint64, obj runtime.Object) -} - -func getTypeName(obj interface{}) string { - return reflect.TypeOf(obj).String() -} - -func (h *etcdHelper) getFromCache(index uint64, pred storage.SelectionPredicate) (runtime.Object, bool) { - startTime := time.Now() - defer func() { - metrics.ObserveGetCache(startTime) - }() - obj, found := h.cache.Get(index) - if found { - if matched, err := pred.Matches(obj.(runtime.Object)); err != nil || !matched { - return nil, true - } - // We should not return the object itself to avoid polluting the cache if someone - // modifies returned values. - objCopy := obj.(runtime.Object).DeepCopyObject() - metrics.ObserveCacheHit() - return objCopy.(runtime.Object), true - } - metrics.ObserveCacheMiss() - return nil, false -} - -func (h *etcdHelper) addToCache(index uint64, obj runtime.Object) { - startTime := time.Now() - defer func() { - metrics.ObserveAddCache(startTime) - }() - objCopy := obj.DeepCopyObject() - isOverwrite := h.cache.Add(index, objCopy) - if !isOverwrite { - metrics.ObserveNewEntry() - } -} - -func toStorageErr(err error, key string, rv int64) error { - if err == nil { - return nil - } - switch { - case etcdutil.IsEtcdNotFound(err): - return storage.NewKeyNotFoundError(key, rv) - case etcdutil.IsEtcdNodeExist(err): - return storage.NewKeyExistsError(key, rv) - case etcdutil.IsEtcdTestFailed(err): - return storage.NewResourceVersionConflictsError(key, rv) - case etcdutil.IsEtcdUnreachable(err): - return storage.NewUnreachableError(key, rv) - default: - return err - } -} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher.go deleted file mode 100644 index d3a2e3c53add..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher.go +++ /dev/null @@ -1,496 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package etcd - -import ( - "context" - "fmt" - "net/http" - "reflect" - "sync" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/apiserver/pkg/storage" - etcdutil "k8s.io/apiserver/pkg/storage/etcd/util" - - etcd "github.com/coreos/etcd/client" - "github.com/golang/glog" -) - -// Etcd watch event actions -const ( - EtcdCreate = "create" - EtcdGet = "get" - EtcdSet = "set" - EtcdCAS = "compareAndSwap" - EtcdDelete = "delete" - EtcdCAD = "compareAndDelete" - EtcdExpire = "expire" -) - -// TransformFunc attempts to convert an object to another object for use with a watcher. -type TransformFunc func(runtime.Object) (runtime.Object, error) - -// includeFunc returns true if the given key should be considered part of a watch -type includeFunc func(key string) bool - -// exceptKey is an includeFunc that returns false when the provided key matches the watched key -func exceptKey(except string) includeFunc { - return func(key string) bool { - return key != except - } -} - -// etcdWatcher converts a native etcd watch to a watch.Interface. -type etcdWatcher struct { - // HighWaterMarks for performance debugging. - // Important: Since HighWaterMark is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platforms - // See: https://golang.org/pkg/sync/atomic/ for more information - incomingHWM storage.HighWaterMark - outgoingHWM storage.HighWaterMark - - encoding runtime.Codec - // Note that versioner is required for etcdWatcher to work correctly. - // There is no public constructor of it, so be careful when manipulating - // with it manually. - versioner storage.Versioner - transform TransformFunc - valueTransformer ValueTransformer - - list bool // If we're doing a recursive watch, should be true. - quorum bool // If we enable quorum, should be true - include includeFunc - pred storage.SelectionPredicate - - etcdIncoming chan *etcd.Response - etcdError chan error - ctx context.Context - cancel context.CancelFunc - etcdCallEnded chan struct{} - - outgoing chan watch.Event - userStop chan struct{} - stopped bool - stopLock sync.Mutex - // wg is used to avoid calls to etcd after Stop(), and to make sure - // that the translate goroutine is not leaked. - wg sync.WaitGroup - - // Injectable for testing. Send the event down the outgoing channel. - emit func(watch.Event) - - cache etcdCache -} - -// newEtcdWatcher returns a new etcdWatcher; if list is true, watch sub-nodes. -// The versioner must be able to handle the objects that transform creates. -func newEtcdWatcher(list bool, quorum bool, include includeFunc, pred storage.SelectionPredicate, - encoding runtime.Codec, versioner storage.Versioner, transform TransformFunc, - valueTransformer ValueTransformer, cache etcdCache) *etcdWatcher { - w := &etcdWatcher{ - encoding: encoding, - versioner: versioner, - transform: transform, - valueTransformer: valueTransformer, - - list: list, - quorum: quorum, - include: include, - pred: pred, - // Buffer this channel, so that the etcd client is not forced - // to context switch with every object it gets, and so that a - // long time spent decoding an object won't block the *next* - // object. Basically, we see a lot of "401 window exceeded" - // errors from etcd, and that's due to the client not streaming - // results but rather getting them one at a time. So we really - // want to never block the etcd client, if possible. The 100 is - // mostly arbitrary--we know it goes as high as 50, though. - // There's a V(2) log message that prints the length so we can - // monitor how much of this buffer is actually used. - etcdIncoming: make(chan *etcd.Response, 100), - etcdError: make(chan error, 1), - // Similarly to etcdIncomming, we don't want to force context - // switch on every new incoming object. - outgoing: make(chan watch.Event, 100), - userStop: make(chan struct{}), - stopped: false, - wg: sync.WaitGroup{}, - cache: cache, - ctx: nil, - cancel: nil, - } - w.emit = func(e watch.Event) { - if curLen := int64(len(w.outgoing)); w.outgoingHWM.Update(curLen) { - // Monitor if this gets backed up, and how much. - glog.V(1).Infof("watch (%v): %v objects queued in outgoing channel.", reflect.TypeOf(e.Object).String(), curLen) - } - // Give up on user stop, without this we leak a lot of goroutines in tests. - select { - case w.outgoing <- e: - case <-w.userStop: - } - } - // translate will call done. We need to Add() here because otherwise, - // if Stop() gets called before translate gets started, there'd be a - // problem. - w.wg.Add(1) - go w.translate() - return w -} - -// etcdWatch calls etcd's Watch function, and handles any errors. Meant to be called -// as a goroutine. -func (w *etcdWatcher) etcdWatch(ctx context.Context, client etcd.KeysAPI, key string, resourceVersion uint64) { - defer utilruntime.HandleCrash() - defer close(w.etcdError) - defer close(w.etcdIncoming) - - // All calls to etcd are coming from this function - once it is finished - // no other call to etcd should be generated by this watcher. - done := func() {} - - // We need to be prepared, that Stop() can be called at any time. - // It can potentially also be called, even before this function is called. - // If that is the case, we simply skip all the code here. - // See #18928 for more details. - var watcher etcd.Watcher - returned := func() bool { - w.stopLock.Lock() - defer w.stopLock.Unlock() - if w.stopped { - // Watcher has already been stopped - don't event initiate it here. - return true - } - w.wg.Add(1) - done = w.wg.Done - // Perform initialization of watcher under lock - we want to avoid situation when - // Stop() is called in the meantime (which in tests can cause etcd termination and - // strange behavior here). - if resourceVersion == 0 { - latest, err := etcdGetInitialWatchState(ctx, client, key, w.list, w.quorum, w.etcdIncoming) - if err != nil { - w.etcdError <- err - return true - } - resourceVersion = latest - } - - opts := etcd.WatcherOptions{ - Recursive: w.list, - AfterIndex: resourceVersion, - } - watcher = client.Watcher(key, &opts) - w.ctx, w.cancel = context.WithCancel(ctx) - return false - }() - defer done() - if returned { - return - } - - for { - resp, err := watcher.Next(w.ctx) - if err != nil { - w.etcdError <- err - return - } - w.etcdIncoming <- resp - } -} - -// etcdGetInitialWatchState turns an etcd Get request into a watch equivalent -func etcdGetInitialWatchState(ctx context.Context, client etcd.KeysAPI, key string, recursive bool, quorum bool, incoming chan<- *etcd.Response) (resourceVersion uint64, err error) { - opts := etcd.GetOptions{ - Recursive: recursive, - Sort: false, - Quorum: quorum, - } - resp, err := client.Get(ctx, key, &opts) - if err != nil { - if !etcdutil.IsEtcdNotFound(err) { - utilruntime.HandleError(fmt.Errorf("watch was unable to retrieve the current index for the provided key (%q): %v", key, err)) - return resourceVersion, toStorageErr(err, key, 0) - } - if etcdError, ok := err.(etcd.Error); ok { - resourceVersion = etcdError.Index - } - return resourceVersion, nil - } - resourceVersion = resp.Index - convertRecursiveResponse(resp.Node, resp, incoming) - return -} - -// convertRecursiveResponse turns a recursive get response from etcd into individual response objects -// by copying the original response. This emulates the behavior of a recursive watch. -func convertRecursiveResponse(node *etcd.Node, response *etcd.Response, incoming chan<- *etcd.Response) { - if node.Dir { - for i := range node.Nodes { - convertRecursiveResponse(node.Nodes[i], response, incoming) - } - return - } - copied := *response - copied.Action = "get" - copied.Node = node - incoming <- &copied -} - -// translate pulls stuff from etcd, converts, and pushes out the outgoing channel. Meant to be -// called as a goroutine. -func (w *etcdWatcher) translate() { - defer w.wg.Done() - defer close(w.outgoing) - defer utilruntime.HandleCrash() - - for { - select { - case err := <-w.etcdError: - if err != nil { - var status *metav1.Status - switch { - case etcdutil.IsEtcdWatchExpired(err): - status = &metav1.Status{ - Status: metav1.StatusFailure, - Message: err.Error(), - Code: http.StatusGone, // Gone - Reason: metav1.StatusReasonExpired, - } - // TODO: need to generate errors using api/errors which has a circular dependency on this package - // no other way to inject errors - // case etcdutil.IsEtcdUnreachable(err): - // status = errors.NewServerTimeout(...) - default: - status = &metav1.Status{ - Status: metav1.StatusFailure, - Message: err.Error(), - Code: http.StatusInternalServerError, - Reason: metav1.StatusReasonInternalError, - } - } - w.emit(watch.Event{ - Type: watch.Error, - Object: status, - }) - } - return - case <-w.userStop: - return - case res, ok := <-w.etcdIncoming: - if ok { - if curLen := int64(len(w.etcdIncoming)); w.incomingHWM.Update(curLen) { - // Monitor if this gets backed up, and how much. - glog.V(1).Infof("watch: %v objects queued in incoming channel.", curLen) - } - w.sendResult(res) - } - // If !ok, don't return here-- must wait for etcdError channel - // to give an error or be closed. - } - } -} - -// decodeObject extracts an object from the provided etcd node or returns an error. -func (w *etcdWatcher) decodeObject(node *etcd.Node) (runtime.Object, error) { - if obj, found := w.cache.getFromCache(node.ModifiedIndex, storage.Everything); found { - return obj, nil - } - - body, _, err := w.valueTransformer.TransformStringFromStorage(node.Value) - if err != nil { - return nil, err - } - - obj, err := runtime.Decode(w.encoding, []byte(body)) - if err != nil { - return nil, err - } - - // ensure resource version is set on the object we load from etcd - if err := w.versioner.UpdateObject(obj, node.ModifiedIndex); err != nil { - utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", node.ModifiedIndex, obj, err)) - } - - // perform any necessary transformation - if w.transform != nil { - obj, err = w.transform(obj) - if err != nil { - utilruntime.HandleError(fmt.Errorf("failure to transform api object %#v: %v", obj, err)) - return nil, err - } - } - - if node.ModifiedIndex != 0 { - w.cache.addToCache(node.ModifiedIndex, obj) - } - return obj, nil -} - -func (w *etcdWatcher) sendAdd(res *etcd.Response) { - if res.Node == nil { - utilruntime.HandleError(fmt.Errorf("unexpected nil node: %#v", res)) - return - } - if w.include != nil && !w.include(res.Node.Key) { - return - } - obj, err := w.decodeObject(res.Node) - if err != nil { - utilruntime.HandleError(fmt.Errorf("failure to decode api object: %v\n'%v' from %#v %#v", err, string(res.Node.Value), res, res.Node)) - // TODO: expose an error through watch.Interface? - // Ignore this value. If we stop the watch on a bad value, a client that uses - // the resourceVersion to resume will never be able to get past a bad value. - return - } - if matched, err := w.pred.Matches(obj); err != nil || !matched { - return - } - action := watch.Added - w.emit(watch.Event{ - Type: action, - Object: obj, - }) -} - -func (w *etcdWatcher) sendModify(res *etcd.Response) { - if res.Node == nil { - glog.Errorf("unexpected nil node: %#v", res) - return - } - if w.include != nil && !w.include(res.Node.Key) { - return - } - curObj, err := w.decodeObject(res.Node) - if err != nil { - utilruntime.HandleError(fmt.Errorf("failure to decode api object: %v\n'%v' from %#v %#v", err, string(res.Node.Value), res, res.Node)) - // TODO: expose an error through watch.Interface? - // Ignore this value. If we stop the watch on a bad value, a client that uses - // the resourceVersion to resume will never be able to get past a bad value. - return - } - curObjPasses := false - if matched, err := w.pred.Matches(curObj); err == nil && matched { - curObjPasses = true - } - oldObjPasses := false - var oldObj runtime.Object - if res.PrevNode != nil && res.PrevNode.Value != "" { - // Ignore problems reading the old object. - if oldObj, err = w.decodeObject(res.PrevNode); err == nil { - if err := w.versioner.UpdateObject(oldObj, res.Node.ModifiedIndex); err != nil { - utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", res.Node.ModifiedIndex, oldObj, err)) - } - if matched, err := w.pred.Matches(oldObj); err == nil && matched { - oldObjPasses = true - } - } - } - // Some changes to an object may cause it to start or stop matching a pred. - // We need to report those as adds/deletes. So we have to check both the previous - // and current value of the object. - switch { - case curObjPasses && oldObjPasses: - w.emit(watch.Event{ - Type: watch.Modified, - Object: curObj, - }) - case curObjPasses && !oldObjPasses: - w.emit(watch.Event{ - Type: watch.Added, - Object: curObj, - }) - case !curObjPasses && oldObjPasses: - w.emit(watch.Event{ - Type: watch.Deleted, - Object: oldObj, - }) - } - // Do nothing if neither new nor old object passed the pred. -} - -func (w *etcdWatcher) sendDelete(res *etcd.Response) { - if res.PrevNode == nil { - utilruntime.HandleError(fmt.Errorf("unexpected nil prev node: %#v", res)) - return - } - if w.include != nil && !w.include(res.PrevNode.Key) { - return - } - node := *res.PrevNode - if res.Node != nil { - // Note that this sends the *old* object with the etcd index for the time at - // which it gets deleted. This will allow users to restart the watch at the right - // index. - node.ModifiedIndex = res.Node.ModifiedIndex - } - obj, err := w.decodeObject(&node) - if err != nil { - utilruntime.HandleError(fmt.Errorf("failure to decode api object: %v\nfrom %#v %#v", err, res, res.Node)) - // TODO: expose an error through watch.Interface? - // Ignore this value. If we stop the watch on a bad value, a client that uses - // the resourceVersion to resume will never be able to get past a bad value. - return - } - if matched, err := w.pred.Matches(obj); err != nil || !matched { - return - } - w.emit(watch.Event{ - Type: watch.Deleted, - Object: obj, - }) -} - -func (w *etcdWatcher) sendResult(res *etcd.Response) { - switch res.Action { - case EtcdCreate, EtcdGet: - // "Get" will only happen in watch 0 case, where we explicitly want ADDED event - // for initial state. - w.sendAdd(res) - case EtcdSet, EtcdCAS: - w.sendModify(res) - case EtcdDelete, EtcdExpire, EtcdCAD: - w.sendDelete(res) - default: - utilruntime.HandleError(fmt.Errorf("unknown action: %v", res.Action)) - } -} - -// ResultChan implements watch.Interface. -func (w *etcdWatcher) ResultChan() <-chan watch.Event { - return w.outgoing -} - -// Stop implements watch.Interface. -func (w *etcdWatcher) Stop() { - w.stopLock.Lock() - if w.cancel != nil { - w.cancel() - w.cancel = nil - } - if !w.stopped { - w.stopped = true - close(w.userStop) - } - w.stopLock.Unlock() - - // Wait until all calls to etcd are finished and no other - // will be issued. - w.wg.Wait() -} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/util/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/util/BUILD deleted file mode 100644 index 131664d32a79..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/util/BUILD +++ /dev/null @@ -1,41 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["etcd_util_test.go"], - embed = [":go_default_library"], - deps = [ - "//vendor/github.com/coreos/etcd/client:go_default_library", - "//vendor/github.com/stretchr/testify/assert:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "etcd_util.go", - ], - importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/storage/etcd/util", - importpath = "k8s.io/apiserver/pkg/storage/etcd/util", - deps = ["//vendor/github.com/coreos/etcd/client:go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/util/doc.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/util/doc.go deleted file mode 100644 index 48a70cb737dc..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/util/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package util holds generic etcd-related utility functions that any user of ectd might want to -// use, without pulling in kubernetes-specific code. -package util diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/util/etcd_util.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/util/etcd_util.go deleted file mode 100644 index 7c71fe24fc53..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd/util/etcd_util.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - - etcd "github.com/coreos/etcd/client" -) - -// IsEtcdNotFound returns true if and only if err is an etcd not found error. -func IsEtcdNotFound(err error) bool { - return isEtcdErrorNum(err, etcd.ErrorCodeKeyNotFound) -} - -// IsEtcdNodeExist returns true if and only if err is an etcd node already exist error. -func IsEtcdNodeExist(err error) bool { - return isEtcdErrorNum(err, etcd.ErrorCodeNodeExist) -} - -// IsEtcdTestFailed returns true if and only if err is an etcd write conflict. -func IsEtcdTestFailed(err error) bool { - return isEtcdErrorNum(err, etcd.ErrorCodeTestFailed) -} - -// IsEtcdWatchExpired returns true if and only if err indicates the watch has expired. -func IsEtcdWatchExpired(err error) bool { - // NOTE: This seems weird why it wouldn't be etcd.ErrorCodeWatcherCleared - // I'm using the previous matching value - return isEtcdErrorNum(err, etcd.ErrorCodeEventIndexCleared) -} - -// IsEtcdUnreachable returns true if and only if err indicates the server could not be reached. -func IsEtcdUnreachable(err error) bool { - // NOTE: The logic has changed previous error code no longer applies - return err == etcd.ErrClusterUnavailable -} - -// isEtcdErrorNum returns true if and only if err is an etcd error, whose errorCode matches errorCode -func isEtcdErrorNum(err error, errorCode int) bool { - if err != nil { - if etcdError, ok := err.(etcd.Error); ok { - return etcdError.Code == errorCode - } - // NOTE: There are other error types returned - } - return false -} - -// GetEtcdVersion performs a version check against the provided Etcd server, -// returning the string response, and error (if any). -func GetEtcdVersion(host string) (string, error) { - response, err := http.Get(host + "/version") - if err != nil { - return "", err - } - defer response.Body.Close() - if response.StatusCode != http.StatusOK { - return "", fmt.Errorf("unsuccessful response from etcd server %q: %v", host, err) - } - versionBytes, err := ioutil.ReadAll(response.Body) - if err != nil { - return "", err - } - return string(versionBytes), nil -} - -type etcdHealth struct { - // Note this has to be public so the json library can modify it. - Health string `json:"health"` -} - -func EtcdHealthCheck(data []byte) error { - obj := etcdHealth{} - if err := json.Unmarshal(data, &obj); err != nil { - return err - } - if obj.Health != "true" { - return fmt.Errorf("Unhealthy status: %s", obj.Health) - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/feature/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/feature/BUILD index 31274c71d861..2b0386ab76bb 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/feature/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/feature/BUILD @@ -22,8 +22,8 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/util/feature", importpath = "k8s.io/apiserver/pkg/util/feature", deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go index 8847c1fb6254..a83dafd56abe 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go @@ -24,8 +24,8 @@ import ( "sync" "sync/atomic" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" ) type Feature string @@ -193,9 +193,9 @@ func (f *featureGate) SetFromMap(m map[string]bool) error { } if featureSpec.PreRelease == Deprecated { - glog.Warningf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v) + klog.Warningf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v) } else if featureSpec.PreRelease == GA { - glog.Warningf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v) + klog.Warningf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v) } } @@ -203,7 +203,7 @@ func (f *featureGate) SetFromMap(m map[string]bool) error { f.known.Store(known) f.enabled.Store(enabled) - glog.V(1).Infof("feature gates: %v", f.enabled) + klog.V(1).Infof("feature gates: %v", f.enabled) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flag/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flag/BUILD index 6370f398c224..7841681a86e5 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flag/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flag/BUILD @@ -42,8 +42,8 @@ go_library( deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/github.com/docker/docker/pkg/term:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flag/flags.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flag/flags.go index 55a3ed34a8e7..d0fff8db2e49 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flag/flags.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/flag/flags.go @@ -20,8 +20,8 @@ import ( goflag "flag" "strings" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" ) // WordSepNormalizeFunc changes all flags that contain "_" separators @@ -36,7 +36,7 @@ func WordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { func WarnWordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { if strings.Contains(name, "_") { nname := strings.Replace(name, "_", "-", -1) - glog.Warningf("%s is DEPRECATED and will be removed in a future version. Use %s instead.", name, nname) + klog.Warningf("%s is DEPRECATED and will be removed in a future version. Use %s instead.", name, nname) return pflag.NormalizedName(nname) } @@ -49,6 +49,6 @@ func InitFlags() { pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) pflag.Parse() pflag.VisitAll(func(flag *pflag.Flag) { - glog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) + klog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) }) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/logs/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/logs/BUILD index 1e5cf49750a0..d51cc6439261 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/logs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/logs/BUILD @@ -12,8 +12,8 @@ go_library( importpath = "k8s.io/apiserver/pkg/util/logs", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/logs/logs.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/logs/logs.go index a62c06094dd7..3ffe9eeb29b7 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/logs/logs.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/logs/logs.go @@ -22,9 +22,9 @@ import ( "log" "time" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" ) const logFlushFreqFlagName = "log-flush-frequency" @@ -33,6 +33,7 @@ var logFlushFreq = pflag.Duration(logFlushFreqFlagName, 5*time.Second, "Maximum // TODO(thockin): This is temporary until we agree on log dirs and put those into each cmd. func init() { + klog.InitFlags(flag.CommandLine) flag.Set("logtostderr", "true") } @@ -42,38 +43,38 @@ func AddFlags(fs *pflag.FlagSet) { fs.AddFlag(pflag.Lookup(logFlushFreqFlagName)) } -// GlogWriter serves as a bridge between the standard log package and the glog package. -type GlogWriter struct{} +// KlogWriter serves as a bridge between the standard log package and the glog package. +type KlogWriter struct{} // Write implements the io.Writer interface. -func (writer GlogWriter) Write(data []byte) (n int, err error) { - glog.InfoDepth(1, string(data)) +func (writer KlogWriter) Write(data []byte) (n int, err error) { + klog.InfoDepth(1, string(data)) return len(data), nil } // InitLogs initializes logs the way we want for kubernetes. func InitLogs() { - log.SetOutput(GlogWriter{}) + log.SetOutput(KlogWriter{}) log.SetFlags(0) // The default glog flush interval is 5 seconds. - go wait.Forever(glog.Flush, *logFlushFreq) + go wait.Forever(klog.Flush, *logFlushFreq) } // FlushLogs flushes logs immediately. func FlushLogs() { - glog.Flush() + klog.Flush() } -// NewLogger creates a new log.Logger which sends logs to glog.Info. +// NewLogger creates a new log.Logger which sends logs to klog.Info. func NewLogger(prefix string) *log.Logger { - return log.New(GlogWriter{}, prefix, 0) + return log.New(KlogWriter{}, prefix, 0) } // GlogSetter is a setter to set glog level. func GlogSetter(val string) (string, error) { - var level glog.Level + var level klog.Level if err := level.Set(val); err != nil { - return "", fmt.Errorf("failed set glog.logging.verbosity %s: %v", val, err) + return "", fmt.Errorf("failed set klog.logging.verbosity %s: %v", val, err) } - return fmt.Sprintf("successfully set glog.logging.verbosity to %s", val), nil + return fmt.Sprintf("successfully set klog.logging.verbosity to %s", val), nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/trace/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/trace/BUILD index 2e94b1d0d424..4e93ff5b498d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/trace/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/trace/BUILD @@ -10,7 +10,7 @@ go_library( srcs = ["trace.go"], importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/util/trace", importpath = "k8s.io/apiserver/pkg/util/trace", - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/trace/trace.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/trace/trace.go index b2f31c5275d1..9049a17d0dee 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/trace/trace.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/trace/trace.go @@ -22,7 +22,7 @@ import ( "math/rand" "time" - "github.com/golang/glog" + "k8s.io/klog" ) type traceStep struct { @@ -63,17 +63,17 @@ func (t *Trace) logWithStepThreshold(stepThreshold time.Duration) { lastStepTime := t.startTime for _, step := range t.steps { stepDuration := step.stepTime.Sub(lastStepTime) - if stepThreshold == 0 || stepDuration > stepThreshold || glog.V(4) { + if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) { buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] %v\n", tracenum, step.stepTime.Sub(t.startTime), stepDuration, step.msg)) } lastStepTime = step.stepTime } stepDuration := endTime.Sub(lastStepTime) - if stepThreshold == 0 || stepDuration > stepThreshold || glog.V(4) { + if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) { buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] END\n", tracenum, endTime.Sub(t.startTime), stepDuration)) } - glog.Info(buffer.String()) + klog.Info(buffer.String()) } func (t *Trace) LogIfLong(threshold time.Duration) { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/BUILD index b9b2b2d9f3ab..d8b203700079 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/BUILD @@ -13,17 +13,21 @@ go_library( "client.go", "error.go", "serviceresolver.go", + "validation.go", "webhook.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/util/webhook", importpath = "k8s.io/apiserver/pkg/util/webhook", deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go index f6d79dea3623..1d1c0ad3bc9d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go @@ -19,9 +19,11 @@ package webhook import ( "fmt" "io/ioutil" + "net/http" "strings" "time" + corev1 "k8s.io/api/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" @@ -31,6 +33,37 @@ import ( // rest.Config generated by the resolver. type AuthenticationInfoResolverWrapper func(AuthenticationInfoResolver) AuthenticationInfoResolver +// NewDefaultAuthenticationInfoResolverWrapper builds a default authn resolver wrapper +func NewDefaultAuthenticationInfoResolverWrapper( + proxyTransport *http.Transport, + kubeapiserverClientConfig *rest.Config) AuthenticationInfoResolverWrapper { + + webhookAuthResolverWrapper := func(delegate AuthenticationInfoResolver) AuthenticationInfoResolver { + return &AuthenticationInfoResolverDelegator{ + ClientConfigForFunc: func(server string) (*rest.Config, error) { + if server == "kubernetes.default.svc" { + return kubeapiserverClientConfig, nil + } + return delegate.ClientConfigFor(server) + }, + ClientConfigForServiceFunc: func(serviceName, serviceNamespace string) (*rest.Config, error) { + if serviceName == "kubernetes" && serviceNamespace == corev1.NamespaceDefault { + return kubeapiserverClientConfig, nil + } + ret, err := delegate.ClientConfigForService(serviceName, serviceNamespace) + if err != nil { + return nil, err + } + if proxyTransport != nil && proxyTransport.DialContext != nil { + ret.Dial = proxyTransport.DialContext + } + return ret, err + }, + } + } + return webhookAuthResolverWrapper +} + // AuthenticationInfoResolver builds rest.Config base on the server or service // name and service namespace. type AuthenticationInfoResolver interface { diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/client.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/client.go index 228e9f482aab..0766bcdeec67 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/client.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/client.go @@ -66,14 +66,14 @@ func NewClientManager(gv schema.GroupVersion, addToSchemaFunc func(s *runtime.Sc if err != nil { return ClientManager{}, err } - admissionScheme := runtime.NewScheme() - if err := addToSchemaFunc(admissionScheme); err != nil { + hookScheme := runtime.NewScheme() + if err := addToSchemaFunc(hookScheme); err != nil { return ClientManager{}, err } return ClientManager{ cache: cache, negotiatedSerializer: serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{ - Serializer: serializer.NewCodecFactory(admissionScheme).LegacyCodec(gv), + Serializer: serializer.NewCodecFactory(hookScheme).LegacyCodec(gv), }), }, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/error.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/error.go index 9e3b55dfbfb7..4701530205d9 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/error.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/error.go @@ -28,7 +28,7 @@ type ErrCallingWebhook struct { func (e *ErrCallingWebhook) Error() string { if e.Reason != nil { - return fmt.Sprintf("failed calling admission webhook %q: %v", e.WebhookName, e.Reason) + return fmt.Sprintf("failed calling webhook %q: %v", e.WebhookName, e.Reason) } - return fmt.Sprintf("failed calling admission webhook %q; no further details available", e.WebhookName) + return fmt.Sprintf("failed calling webhook %q; no further details available", e.WebhookName) } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go new file mode 100644 index 000000000000..2ddb2c09ab7a --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go @@ -0,0 +1,101 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "fmt" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidateWebhookURL validates webhook's URL. +func ValidateWebhookURL(fldPath *field.Path, URL string, forceHttps bool) field.ErrorList { + var allErrors field.ErrorList + const form = "; desired format: https://host[/path]" + if u, err := url.Parse(URL); err != nil { + allErrors = append(allErrors, field.Required(fldPath, "url must be a valid URL: "+err.Error()+form)) + } else { + if forceHttps && u.Scheme != "https" { + allErrors = append(allErrors, field.Invalid(fldPath, u.Scheme, "'https' is the only allowed URL scheme"+form)) + } + if len(u.Host) == 0 { + allErrors = append(allErrors, field.Invalid(fldPath, u.Host, "host must be provided"+form)) + } + if u.User != nil { + allErrors = append(allErrors, field.Invalid(fldPath, u.User.String(), "user information is not permitted in the URL")) + } + if len(u.Fragment) != 0 { + allErrors = append(allErrors, field.Invalid(fldPath, u.Fragment, "fragments are not permitted in the URL")) + } + if len(u.RawQuery) != 0 { + allErrors = append(allErrors, field.Invalid(fldPath, u.RawQuery, "query parameters are not permitted in the URL")) + } + } + return allErrors +} + +func ValidateWebhookService(fldPath *field.Path, namespace, name string, path *string) field.ErrorList { + var allErrors field.ErrorList + + if len(name) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("name"), "service name is required")) + } + + if len(namespace) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("namespace"), "service namespace is required")) + } + + if path == nil { + return allErrors + } + + // TODO: replace below with url.Parse + verifying that host is empty? + + urlPath := *path + if urlPath == "/" || len(urlPath) == 0 { + return allErrors + } + if urlPath == "//" { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, "segment[0] may not be empty")) + return allErrors + } + + if !strings.HasPrefix(urlPath, "/") { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, "must start with a '/'")) + } + + urlPathToCheck := urlPath[1:] + if strings.HasSuffix(urlPathToCheck, "/") { + urlPathToCheck = urlPathToCheck[:len(urlPathToCheck)-1] + } + steps := strings.Split(urlPathToCheck, "/") + for i, step := range steps { + if len(step) == 0 { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, fmt.Sprintf("segment[%d] may not be empty", i))) + continue + } + failures := validation.IsDNS1123Subdomain(step) + for _, failure := range failures { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, fmt.Sprintf("segment[%d]: %v", i, failure))) + } + } + + return allErrors +} diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go index 3b03fd3fd5ef..eb6c17bdb6b3 100755 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go @@ -37,7 +37,7 @@ const defaultRequestTimeout = 30 * time.Second type GenericWebhook struct { RestClient *rest.RESTClient - initialBackoff time.Duration + InitialBackoff time.Duration } // NewGenericWebhook creates a new GenericWebhook from the provided kubeconfig file. @@ -83,7 +83,7 @@ func newGenericWebhook(scheme *runtime.Scheme, codecFactory serializer.CodecFact // it returns an error for which apierrors.SuggestsClientDelay() or apierrors.IsInternalError() returns true. func (g *GenericWebhook) WithExponentialBackoff(webhookFn func() rest.Result) rest.Result { var result rest.Result - WithExponentialBackoff(g.initialBackoff, func() error { + WithExponentialBackoff(g.InitialBackoff, func() error { result = webhookFn() return result.Error() }) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/wsstream/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/wsstream/BUILD index 269af15e5ce8..5367ab2f5c6d 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/wsstream/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/wsstream/BUILD @@ -27,8 +27,8 @@ go_library( importpath = "k8s.io/apiserver/pkg/util/wsstream", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/websocket:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go index 6f26b227579b..2d1a79021363 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go @@ -25,8 +25,8 @@ import ( "strings" "time" - "github.com/golang/glog" "golang.org/x/net/websocket" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/runtime" ) @@ -137,7 +137,7 @@ type ChannelProtocolConfig struct { // channels. func NewDefaultChannelProtocols(channels []ChannelType) map[string]ChannelProtocolConfig { return map[string]ChannelProtocolConfig{ - "": {Binary: true, Channels: channels}, + "": {Binary: true, Channels: channels}, ChannelWebSocketProtocol: {Binary: true, Channels: channels}, Base64ChannelWebSocketProtocol: {Binary: false, Channels: channels}, } @@ -251,7 +251,7 @@ func (conn *Conn) handle(ws *websocket.Conn) { var data []byte if err := websocket.Message.Receive(ws, &data); err != nil { if err != io.EOF { - glog.Errorf("Error on socket receive: %v", err) + klog.Errorf("Error on socket receive: %v", err) } break } @@ -264,11 +264,11 @@ func (conn *Conn) handle(ws *websocket.Conn) { } data = data[1:] if int(channel) >= len(conn.channels) { - glog.V(6).Infof("Frame is targeted for a reader %d that is not valid, possible protocol error", channel) + klog.V(6).Infof("Frame is targeted for a reader %d that is not valid, possible protocol error", channel) continue } if _, err := conn.channels[channel].DataFromSocket(data); err != nil { - glog.Errorf("Unable to write frame to %d: %v\n%s", channel, err, string(data)) + klog.Errorf("Unable to write frame to %d: %v\n%s", channel, err, string(data)) continue } } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go index 9dd165bfabcf..4253c17cf578 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go @@ -48,7 +48,7 @@ type ReaderProtocolConfig struct { // subprotocols "", "channel.k8s.io", "base64.channel.k8s.io". func NewDefaultReaderProtocols() map[string]ReaderProtocolConfig { return map[string]ReaderProtocolConfig{ - "": {Binary: true}, + "": {Binary: true}, binaryWebSocketProtocol: {Binary: true}, base64BinaryWebSocketProtocol: {Binary: false}, } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD index fd4af99aa551..eb13b669bdf3 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD @@ -16,6 +16,8 @@ go_test( deps = [ "//staging/src/k8s.io/api/authentication/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/authentication/token/cache:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api/v1:go_default_library", ], @@ -30,13 +32,12 @@ go_library( "//staging/src/k8s.io/api/authentication/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/cache:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/webhook:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go index feb55f91d35f..cf0a83b5d979 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go @@ -18,19 +18,18 @@ limitations under the License. package webhook import ( + "context" "time" - "github.com/golang/glog" - authentication "k8s.io/api/authentication/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/cache" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/util/webhook" "k8s.io/client-go/kubernetes/scheme" authenticationclient "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" + "k8s.io/klog" ) var ( @@ -44,54 +43,82 @@ var _ authenticator.Token = (*WebhookTokenAuthenticator)(nil) type WebhookTokenAuthenticator struct { tokenReview authenticationclient.TokenReviewInterface - responseCache *cache.LRUExpireCache - ttl time.Duration initialBackoff time.Duration + implicitAuds authenticator.Audiences } -// NewFromInterface creates a webhook authenticator using the given tokenReview client -func NewFromInterface(tokenReview authenticationclient.TokenReviewInterface, ttl time.Duration) (*WebhookTokenAuthenticator, error) { - return newWithBackoff(tokenReview, ttl, retryBackoff) +// NewFromInterface creates a webhook authenticator using the given tokenReview +// client. It is recommend to wrap this authenticator with the token cache +// authenticator implemented in +// k8s.io/apiserver/pkg/authentication/token/cache. +func NewFromInterface(tokenReview authenticationclient.TokenReviewInterface, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { + return newWithBackoff(tokenReview, retryBackoff, implicitAuds) } -// New creates a new WebhookTokenAuthenticator from the provided kubeconfig file. -func New(kubeConfigFile string, ttl time.Duration) (*WebhookTokenAuthenticator, error) { +// New creates a new WebhookTokenAuthenticator from the provided kubeconfig +// file. It is recommend to wrap this authenticator with the token cache +// authenticator implemented in +// k8s.io/apiserver/pkg/authentication/token/cache. +func New(kubeConfigFile string, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { tokenReview, err := tokenReviewInterfaceFromKubeconfig(kubeConfigFile) if err != nil { return nil, err } - return newWithBackoff(tokenReview, ttl, retryBackoff) + return newWithBackoff(tokenReview, retryBackoff, implicitAuds) } // newWithBackoff allows tests to skip the sleep. -func newWithBackoff(tokenReview authenticationclient.TokenReviewInterface, ttl, initialBackoff time.Duration) (*WebhookTokenAuthenticator, error) { - return &WebhookTokenAuthenticator{tokenReview, cache.NewLRUExpireCache(1024), ttl, initialBackoff}, nil +func newWithBackoff(tokenReview authenticationclient.TokenReviewInterface, initialBackoff time.Duration, implicitAuds authenticator.Audiences) (*WebhookTokenAuthenticator, error) { + return &WebhookTokenAuthenticator{tokenReview, initialBackoff, implicitAuds}, nil } // AuthenticateToken implements the authenticator.Token interface. -func (w *WebhookTokenAuthenticator) AuthenticateToken(token string) (user.Info, bool, error) { +func (w *WebhookTokenAuthenticator) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) { + // We take implicit audiences of the API server at WebhookTokenAuthenticator + // construction time. The outline of how we validate audience here is: + // + // * if the ctx is not audience limited, don't do any audience validation. + // * if ctx is audience-limited, add the audiences to the tokenreview spec + // * if the tokenreview returns with audiences in the status that intersect + // with the audiences in the ctx, copy into the response and return success + // * if the tokenreview returns without an audience in the status, ensure + // the ctx audiences intersect with the implicit audiences, and set the + // intersection in the response. + // * otherwise return unauthenticated. + wantAuds, checkAuds := authenticator.AudiencesFrom(ctx) r := &authentication.TokenReview{ - Spec: authentication.TokenReviewSpec{Token: token}, + Spec: authentication.TokenReviewSpec{ + Token: token, + Audiences: wantAuds, + }, + } + var ( + result *authentication.TokenReview + err error + auds authenticator.Audiences + ) + webhook.WithExponentialBackoff(w.initialBackoff, func() error { + result, err = w.tokenReview.Create(r) + return err + }) + if err != nil { + // An error here indicates bad configuration or an outage. Log for debugging. + klog.Errorf("Failed to make webhook authenticator request: %v", err) + return nil, false, err } - if entry, ok := w.responseCache.Get(r.Spec); ok { - r.Status = entry.(authentication.TokenReviewStatus) - } else { - var ( - result *authentication.TokenReview - err error - ) - webhook.WithExponentialBackoff(w.initialBackoff, func() error { - result, err = w.tokenReview.Create(r) - return err - }) - if err != nil { - // An error here indicates bad configuration or an outage. Log for debugging. - glog.Errorf("Failed to make webhook authenticator request: %v", err) - return nil, false, err + + if checkAuds { + gotAuds := w.implicitAuds + if len(result.Status.Audiences) > 0 { + gotAuds = result.Status.Audiences + } + auds = wantAuds.Intersect(gotAuds) + if len(auds) == 0 { + return nil, false, nil } - r.Status = result.Status - w.responseCache.Add(r.Spec, result.Status, w.ttl) } + + r.Status = result.Status if !r.Status.Authenticated { return nil, false, nil } @@ -104,11 +131,14 @@ func (w *WebhookTokenAuthenticator) AuthenticateToken(token string) (user.Info, } } - return &user.DefaultInfo{ - Name: r.Status.User.Username, - UID: r.Status.User.UID, - Groups: r.Status.User.Groups, - Extra: extra, + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: r.Status.User.Username, + UID: r.Status.User.UID, + Groups: r.Status.User.Groups, + Extra: extra, + }, + Audiences: auds, }, true, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD index 59a23decbbe6..270aed93282c 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD @@ -38,7 +38,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/webhook:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go index 03b7bda32fe5..e05ef503f22a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go @@ -22,7 +22,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" authorization "k8s.io/api/authorization/v1beta1" "k8s.io/apimachinery/pkg/runtime" @@ -189,7 +189,7 @@ func (w *WebhookAuthorizer) Authorize(attr authorizer.Attributes) (decision auth }) if err != nil { // An error here indicates bad configuration or an outage. Log for debugging. - glog.Errorf("Failed to make webhook authorizer request: %v", err) + klog.Errorf("Failed to make webhook authorizer request: %v", err) return w.decisionOnError, "", err } r.Status = result.Status diff --git a/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go b/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go index 6f75af91b431..8fc2227cf1c4 100644 --- a/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go +++ b/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go @@ -19,6 +19,7 @@ package genericclioptions import ( "fmt" "io/ioutil" + "sort" "strings" "github.com/spf13/cobra" @@ -49,6 +50,7 @@ func (f *JSONPathPrintFlags) AllowedFormats() []string { for format := range jsonFormats { formats = append(formats, format) } + sort.Strings(formats) return formats } diff --git a/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/BUILD b/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/BUILD index 60a43ae33cc3..2cbb55099d23 100644 --- a/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/BUILD @@ -21,7 +21,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/util/jsonpath:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/json.go b/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/json.go index 1df9a8646656..bb5bec7488cd 100644 --- a/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/json.go +++ b/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/json.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" - "github.com/ghodss/yaml" + "sigs.k8s.io/yaml" ) // JSONPrinter is an implementation of ResourcePrinter which outputs an object as JSON. @@ -68,10 +68,7 @@ func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error { // YAMLPrinter is an implementation of ResourcePrinter which outputs an object as YAML. // The input object is assumed to be in the internal version of an API and is converted // to the given version first. -type YAMLPrinter struct { - version string - converter runtime.ObjectConvertor -} +type YAMLPrinter struct{} // PrintObj prints the data as YAML. func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error { diff --git a/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/jsonpath.go b/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/jsonpath.go index 0bdb3511f1e0..333b9c334439 100644 --- a/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/jsonpath.go +++ b/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/jsonpath.go @@ -17,6 +17,7 @@ limitations under the License. package printers import ( + "bytes" "encoding/json" "fmt" "io" @@ -136,10 +137,11 @@ func (j *JSONPathPrinter) PrintObj(obj runtime.Object, w io.Writer) error { } if err := j.JSONPath.Execute(w, queryObj); err != nil { - fmt.Fprintf(w, "Error executing template: %v. Printing more information for debugging the template:\n", err) - fmt.Fprintf(w, "\ttemplate was:\n\t\t%v\n", j.rawTemplate) - fmt.Fprintf(w, "\tobject given to jsonpath engine was:\n\t\t%#v\n\n", queryObj) - return fmt.Errorf("error executing jsonpath %q: %v\n", j.rawTemplate, err) + buf := bytes.NewBuffer(nil) + fmt.Fprintf(buf, "Error executing template: %v. Printing more information for debugging the template:\n", err) + fmt.Fprintf(buf, "\ttemplate was:\n\t\t%v\n", j.rawTemplate) + fmt.Fprintf(buf, "\tobject given to jsonpath engine was:\n\t\t%#v\n\n", queryObj) + return fmt.Errorf("error executing jsonpath %q: %v\n", j.rawTemplate, buf.String()) } return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/BUILD b/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/BUILD index 89259fa1db76..22b34de00871 100644 --- a/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/BUILD @@ -76,8 +76,8 @@ go_test( "//staging/src/k8s.io/client-go/restmapper:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/builder.go b/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/builder.go index 22680c517c25..42f660a4e53f 100644 --- a/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/builder.go +++ b/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/builder.go @@ -1056,7 +1056,7 @@ func (b *Builder) visitByPaths() *Result { if b.labelSelector != nil { selector, err := labels.Parse(*b.labelSelector) if err != nil { - return result.withError(fmt.Errorf("the provided selector %q is not valid: %v", b.labelSelector, err)) + return result.withError(fmt.Errorf("the provided selector %q is not valid: %v", *b.labelSelector, err)) } visitors = NewFilteredVisitor(visitors, FilterByLabelSelector(selector)) } @@ -1089,9 +1089,10 @@ func (b *Builder) Do() *Result { if b.requireObject { helpers = append(helpers, RetrieveLazy) } - r.visitor = NewDecoratedVisitor(r.visitor, helpers...) if b.continueOnError { - r.visitor = ContinueOnErrorVisitor{r.visitor} + r.visitor = NewDecoratedVisitor(ContinueOnErrorVisitor{r.visitor}, helpers...) + } else { + r.visitor = NewDecoratedVisitor(r.visitor, helpers...) } return r } diff --git a/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go b/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go index eeae7ac7d6ec..08954b241735 100644 --- a/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go +++ b/cluster-autoscaler/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go @@ -19,6 +19,7 @@ package genericclioptions import ( "fmt" "io/ioutil" + "sort" "strings" "github.com/spf13/cobra" @@ -51,6 +52,7 @@ func (f *GoTemplatePrintFlags) AllowedFormats() []string { for format := range templateFormats { formats = append(formats, format) } + sort.Strings(formats) return formats } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/BUILD index 1ddb339a4ff2..64e75dc511ca 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/BUILD @@ -28,12 +28,12 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/version:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/protobuf/proto:go_default_library", "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", "//vendor/github.com/gregjones/httpcache:go_default_library", "//vendor/github.com/gregjones/httpcache/diskcache:go_default_library", "//vendor/github.com/peterbourgon/diskv:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached_discovery.go b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached_discovery.go index d38a0bbdad31..df69d6a1930b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached_discovery.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/cached_discovery.go @@ -25,8 +25,8 @@ import ( "sync" "time" - "github.com/golang/glog" "github.com/googleapis/gnostic/OpenAPIv2" + "k8s.io/klog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -67,23 +67,23 @@ func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion stri if err == nil { cachedResources := &metav1.APIResourceList{} if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil { - glog.V(10).Infof("returning cached discovery info from %v", filename) + klog.V(10).Infof("returning cached discovery info from %v", filename) return cachedResources, nil } } liveResources, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) if err != nil { - glog.V(3).Infof("skipped caching discovery info due to %v", err) + klog.V(3).Infof("skipped caching discovery info due to %v", err) return liveResources, err } if liveResources == nil || len(liveResources.APIResources) == 0 { - glog.V(3).Infof("skipped caching discovery info, no resources found") + klog.V(3).Infof("skipped caching discovery info, no resources found") return liveResources, err } if err := d.writeCachedFile(filename, liveResources); err != nil { - glog.V(1).Infof("failed to write cache to %v due to %v", filename, err) + klog.V(1).Infof("failed to write cache to %v due to %v", filename, err) } return liveResources, nil @@ -94,6 +94,8 @@ func (d *CachedDiscoveryClient) ServerResources() ([]*metav1.APIResourceList, er return ServerResources(d) } +// ServerGroups returns the supported groups, with information like supported versions and the +// preferred version. func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { filename := filepath.Join(d.cacheDirectory, "servergroups.json") cachedBytes, err := d.getCachedFile(filename) @@ -101,23 +103,23 @@ func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { if err == nil { cachedGroups := &metav1.APIGroupList{} if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil { - glog.V(10).Infof("returning cached discovery info from %v", filename) + klog.V(10).Infof("returning cached discovery info from %v", filename) return cachedGroups, nil } } liveGroups, err := d.delegate.ServerGroups() if err != nil { - glog.V(3).Infof("skipped caching discovery info due to %v", err) + klog.V(3).Infof("skipped caching discovery info due to %v", err) return liveGroups, err } if liveGroups == nil || len(liveGroups.Groups) == 0 { - glog.V(3).Infof("skipped caching discovery info, no groups found") + klog.V(3).Infof("skipped caching discovery info, no groups found") return liveGroups, err } if err := d.writeCachedFile(filename, liveGroups); err != nil { - glog.V(1).Infof("failed to write cache to %v due to %v", filename, err) + klog.V(1).Infof("failed to write cache to %v due to %v", filename, err) } return liveGroups, nil @@ -202,26 +204,36 @@ func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Obj return err } +// RESTClient returns a RESTClient that is used to communicate with API server +// by this client implementation. func (d *CachedDiscoveryClient) RESTClient() restclient.Interface { return d.delegate.RESTClient() } +// ServerPreferredResources returns the supported resources with the version preferred by the +// server. func (d *CachedDiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { return ServerPreferredResources(d) } +// ServerPreferredNamespacedResources returns the supported namespaced resources with the +// version preferred by the server. func (d *CachedDiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { return ServerPreferredNamespacedResources(d) } +// ServerVersion retrieves and parses the server's version (git version). func (d *CachedDiscoveryClient) ServerVersion() (*version.Info, error) { return d.delegate.ServerVersion() } +// OpenAPISchema retrieves and parses the swagger API schema the server supports. func (d *CachedDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { return d.delegate.OpenAPISchema() } +// Fresh is supposed to tell the caller whether or not to retry if the cache +// fails to find something (false = retry, true = no need to retry). func (d *CachedDiscoveryClient) Fresh() bool { d.mutex.Lock() defer d.mutex.Unlock() @@ -229,6 +241,7 @@ func (d *CachedDiscoveryClient) Fresh() bool { return d.fresh } +// Invalidate enforces that no cached data is used in the future that is older than the current time. func (d *CachedDiscoveryClient) Invalidate() { d.mutex.Lock() defer d.mutex.Unlock() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go index a96602974f4e..17b39de05396 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -263,8 +263,8 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, result := []*metav1.APIResourceList{} grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource - grApiResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource - gvApiResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping + grAPIResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource + gvAPIResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping for _, apiGroup := range serverGroupList.Groups { for _, version := range apiGroup.Versions { @@ -276,11 +276,11 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, } // create empty list which is filled later in another loop - emptyApiResourceList := metav1.APIResourceList{ + emptyAPIResourceList := metav1.APIResourceList{ GroupVersion: version.GroupVersion, } - gvApiResourceLists[groupVersion] = &emptyApiResourceList - result = append(result, &emptyApiResourceList) + gvAPIResourceLists[groupVersion] = &emptyAPIResourceList + result = append(result, &emptyAPIResourceList) for i := range apiResourceList.APIResources { apiResource := &apiResourceList.APIResources[i] @@ -288,21 +288,21 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, continue } gv := schema.GroupResource{Group: apiGroup.Name, Resource: apiResource.Name} - if _, ok := grApiResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { + if _, ok := grAPIResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { // only override with preferred version continue } grVersions[gv] = version.Version - grApiResources[gv] = apiResource + grAPIResources[gv] = apiResource } } } // group selected APIResources according to GroupVersion into APIResourceLists - for groupResource, apiResource := range grApiResources { + for groupResource, apiResource := range grAPIResources { version := grVersions[groupResource] groupVersion := schema.GroupVersion{Group: groupResource.Group, Version: version} - apiResourceList := gvApiResourceLists[groupVersion] + apiResourceList := gvAPIResourceLists[groupVersion] apiResourceList.APIResources = append(apiResourceList.APIResources, *apiResource) } @@ -464,9 +464,9 @@ func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *DiscoveryClient) RESTClient() restclient.Interface { - if c == nil { +func (d *DiscoveryClient) RESTClient() restclient.Interface { + if d == nil { return nil } - return c.restClient + return d.restClient } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/fake/discovery.go b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/fake/discovery.go index 984a0ba1ec69..9565fa46c5ec 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/fake/discovery.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/fake/discovery.go @@ -36,6 +36,8 @@ type FakeDiscovery struct { FakedServerVersion *version.Info } +// ServerResourcesForGroupVersion returns the supported resources for a group +// and version. func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { action := testing.ActionImpl{ Verb: "get", @@ -50,6 +52,7 @@ func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*me return nil, fmt.Errorf("GroupVersion %q not found", groupVersion) } +// ServerResources returns the supported resources for all groups and versions. func (c *FakeDiscovery) ServerResources() ([]*metav1.APIResourceList, error) { action := testing.ActionImpl{ Verb: "get", @@ -59,14 +62,20 @@ func (c *FakeDiscovery) ServerResources() ([]*metav1.APIResourceList, error) { return c.Resources, nil } +// ServerPreferredResources returns the supported resources with the version +// preferred by the server. func (c *FakeDiscovery) ServerPreferredResources() ([]*metav1.APIResourceList, error) { return nil, nil } +// ServerPreferredNamespacedResources returns the supported namespaced resources +// with the version preferred by the server. func (c *FakeDiscovery) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { return nil, nil } +// ServerGroups returns the supported groups, with information like supported +// versions and the preferred version. func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { action := testing.ActionImpl{ Verb: "get", @@ -108,6 +117,7 @@ func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { } +// ServerVersion retrieves and parses the server's version. func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { action := testing.ActionImpl{} action.Verb = "get" @@ -122,10 +132,13 @@ func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { return &versionInfo, nil } +// OpenAPISchema retrieves and parses the swagger API schema the server supports. func (c *FakeDiscovery) OpenAPISchema() (*openapi_v2.Document, error) { return &openapi_v2.Document{}, nil } +// RESTClient returns a RESTClient that is used to communicate with API server +// by this client implementation. func (c *FakeDiscovery) RESTClient() restclient.Interface { return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/helper.go b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/helper.go index 353d34b3c5ac..3bfe514e8234 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/helper.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/helper.go @@ -31,11 +31,11 @@ import ( func MatchesServerVersion(clientVersion apimachineryversion.Info, client DiscoveryInterface) error { sVer, err := client.ServerVersion() if err != nil { - return fmt.Errorf("couldn't read version from server: %v\n", err) + return fmt.Errorf("couldn't read version from server: %v", err) } // GitVersion includes GitCommit and GitTreeState, but best to be safe? if clientVersion.GitVersion != sVer.GitVersion || clientVersion.GitCommit != sVer.GitCommit || clientVersion.GitTreeState != sVer.GitTreeState { - return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, clientVersion) + return fmt.Errorf("server version (%#v) differs from client version (%#v)", sVer, clientVersion) } return nil @@ -101,12 +101,15 @@ func FilteredBy(pred ResourcePredicate, rls []*metav1.APIResourceList) []*metav1 return result } +// ResourcePredicate has a method to check if a resource matches a given condition. type ResourcePredicate interface { Match(groupVersion string, r *metav1.APIResource) bool } +// ResourcePredicateFunc returns true if it matches a resource based on a custom condition. type ResourcePredicateFunc func(groupVersion string, r *metav1.APIResource) bool +// Match is a wrapper around ResourcePredicateFunc. func (fn ResourcePredicateFunc) Match(groupVersion string, r *metav1.APIResource) bool { return fn(groupVersion, r) } @@ -116,6 +119,7 @@ type SupportsAllVerbs struct { Verbs []string } +// Match checks if a resource contains all the given verbs. func (p SupportsAllVerbs) Match(groupVersion string, r *metav1.APIResource) bool { return sets.NewString([]string(r.Verbs)...).HasAll(p.Verbs...) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/round_tripper.go b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/round_tripper.go index 75b7f5209771..4e2bc24e774d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/round_tripper.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/round_tripper.go @@ -20,10 +20,10 @@ import ( "net/http" "path/filepath" - "github.com/golang/glog" "github.com/gregjones/httpcache" "github.com/gregjones/httpcache/diskcache" "github.com/peterbourgon/diskv" + "k8s.io/klog" ) type cacheRoundTripper struct { @@ -55,7 +55,7 @@ func (rt *cacheRoundTripper) CancelRequest(req *http.Request) { if cr, ok := rt.rt.Transport.(canceler); ok { cr.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport) + klog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport) } } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/BUILD index 97598fb563fc..5a923a9b4f09 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/dynamic/BUILD @@ -59,6 +59,8 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//staging/src/k8s.io/client-go/dynamic/dynamicinformer:all-srcs", + "//staging/src/k8s.io/client-go/dynamic/dynamiclister:all-srcs", "//staging/src/k8s.io/client-go/dynamic/fake:all-srcs", ], tags = ["automanaged"], diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/informers/BUILD index d56547686879..24af1356b8c4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/BUILD @@ -15,6 +15,7 @@ go_library( "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1beta1:go_default_library", "//staging/src/k8s.io/api/apps/v1beta2:go_default_library", + "//staging/src/k8s.io/api/auditregistration/v1alpha1:go_default_library", "//staging/src/k8s.io/api/autoscaling/v1:go_default_library", "//staging/src/k8s.io/api/autoscaling/v2beta1:go_default_library", "//staging/src/k8s.io/api/autoscaling/v2beta2:go_default_library", @@ -42,6 +43,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/informers/admissionregistration:go_default_library", "//staging/src/k8s.io/client-go/informers/apps:go_default_library", + "//staging/src/k8s.io/client-go/informers/auditregistration:go_default_library", "//staging/src/k8s.io/client-go/informers/autoscaling:go_default_library", "//staging/src/k8s.io/client-go/informers/batch:go_default_library", "//staging/src/k8s.io/client-go/informers/certificates:go_default_library", @@ -74,6 +76,7 @@ filegroup( ":package-srcs", "//staging/src/k8s.io/client-go/informers/admissionregistration:all-srcs", "//staging/src/k8s.io/client-go/informers/apps:all-srcs", + "//staging/src/k8s.io/client-go/informers/auditregistration:all-srcs", "//staging/src/k8s.io/client-go/informers/autoscaling:all-srcs", "//staging/src/k8s.io/client-go/informers/batch:all-srcs", "//staging/src/k8s.io/client-go/informers/certificates:all-srcs", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/BUILD new file mode 100644 index 000000000000..3a3afe0dac76 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["interface.go"], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/informers/auditregistration", + importpath = "k8s.io/client-go/informers/auditregistration", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/client-go/informers/auditregistration/v1alpha1:go_default_library", + "//staging/src/k8s.io/client-go/informers/internalinterfaces:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/client-go/informers/auditregistration/v1alpha1:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/interface.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/interface.go new file mode 100644 index 000000000000..0f1682c478d6 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package auditregistration + +import ( + v1alpha1 "k8s.io/client-go/informers/auditregistration/v1alpha1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/BUILD new file mode 100644 index 000000000000..846f0d680ec3 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/BUILD @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "auditsink.go", + "interface.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1", + importpath = "k8s.io/client-go/informers/auditregistration/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/auditregistration/v1alpha1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/client-go/informers/internalinterfaces:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/listers/auditregistration/v1alpha1:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go new file mode 100644 index 000000000000..69778ad2cfed --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/auditregistration/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// AuditSinkInformer provides access to a shared informer and lister for +// AuditSinks. +type AuditSinkInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.AuditSinkLister +} + +type auditSinkInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewAuditSinkInformer constructs a new informer for AuditSink type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewAuditSinkInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredAuditSinkInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredAuditSinkInformer constructs a new informer for AuditSink type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredAuditSinkInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuditregistrationV1alpha1().AuditSinks().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuditregistrationV1alpha1().AuditSinks().Watch(options) + }, + }, + &auditregistrationv1alpha1.AuditSink{}, + resyncPeriod, + indexers, + ) +} + +func (f *auditSinkInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredAuditSinkInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *auditSinkInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&auditregistrationv1alpha1.AuditSink{}, f.defaultInformer) +} + +func (f *auditSinkInformer) Lister() v1alpha1.AuditSinkLister { + return v1alpha1.NewAuditSinkLister(f.Informer().GetIndexer()) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/interface.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/interface.go new file mode 100644 index 000000000000..0a67ba821df5 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // AuditSinks returns a AuditSinkInformer. + AuditSinks() AuditSinkInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// AuditSinks returns a AuditSinkInformer. +func (v *version) AuditSinks() AuditSinkInformer { + return &auditSinkInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go index 7ae22ee2c82a..88ead6213a19 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/factory.go @@ -28,6 +28,7 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" admissionregistration "k8s.io/client-go/informers/admissionregistration" apps "k8s.io/client-go/informers/apps" + auditregistration "k8s.io/client-go/informers/auditregistration" autoscaling "k8s.io/client-go/informers/autoscaling" batch "k8s.io/client-go/informers/batch" certificates "k8s.io/client-go/informers/certificates" @@ -188,6 +189,7 @@ type SharedInformerFactory interface { Admissionregistration() admissionregistration.Interface Apps() apps.Interface + Auditregistration() auditregistration.Interface Autoscaling() autoscaling.Interface Batch() batch.Interface Certificates() certificates.Interface @@ -211,6 +213,10 @@ func (f *sharedInformerFactory) Apps() apps.Interface { return apps.New(f, f.namespace, f.tweakListOptions) } +func (f *sharedInformerFactory) Auditregistration() auditregistration.Interface { + return auditregistration.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Autoscaling() autoscaling.Interface { return autoscaling.New(f, f.namespace, f.tweakListOptions) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/generic.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/generic.go index 3af96304a5c5..09a5efe2ec89 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/generic.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/generic.go @@ -26,6 +26,7 @@ import ( v1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" v1beta2 "k8s.io/api/apps/v1beta2" + auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" autoscalingv1 "k8s.io/api/autoscaling/v1" v2beta1 "k8s.io/api/autoscaling/v2beta1" v2beta2 "k8s.io/api/autoscaling/v2beta2" @@ -120,6 +121,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1beta2.SchemeGroupVersion.WithResource("statefulsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().StatefulSets().Informer()}, nil + // Group=auditregistration.k8s.io, Version=v1alpha1 + case auditregistrationv1alpha1.SchemeGroupVersion.WithResource("auditsinks"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Auditregistration().V1alpha1().AuditSinks().Informer()}, nil + // Group=autoscaling, Version=v1 case autoscalingv1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V1().HorizontalPodAutoscalers().Informer()}, nil @@ -257,6 +262,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=storage.k8s.io, Version=v1 case storagev1.SchemeGroupVersion.WithResource("storageclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().StorageClasses().Informer()}, nil + case storagev1.SchemeGroupVersion.WithResource("volumeattachments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().VolumeAttachments().Informer()}, nil // Group=storage.k8s.io, Version=v1alpha1 case storagev1alpha1.SchemeGroupVersion.WithResource("volumeattachments"): diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go index 5e05516b1340..b00ed70cfdb7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go @@ -27,6 +27,7 @@ import ( cache "k8s.io/client-go/tools/cache" ) +// NewInformerFunc takes kubernetes.Interface and time.Duration to return a SharedIndexInformer. type NewInformerFunc func(kubernetes.Interface, time.Duration) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle @@ -35,4 +36,5 @@ type SharedInformerFactory interface { InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/BUILD index 4e3f9966ab08..25f3894667f2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/BUILD @@ -10,6 +10,7 @@ go_library( srcs = [ "interface.go", "storageclass.go", + "volumeattachment.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/informers/storage/v1", importpath = "k8s.io/client-go/informers/storage/v1", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/interface.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/interface.go index d7e4b5c49ace..64fc2bd84369 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/interface.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/interface.go @@ -26,6 +26,8 @@ import ( type Interface interface { // StorageClasses returns a StorageClassInformer. StorageClasses() StorageClassInformer + // VolumeAttachments returns a VolumeAttachmentInformer. + VolumeAttachments() VolumeAttachmentInformer } type version struct { @@ -43,3 +45,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (v *version) StorageClasses() StorageClassInformer { return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } + +// VolumeAttachments returns a VolumeAttachmentInformer. +func (v *version) VolumeAttachments() VolumeAttachmentInformer { + return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go new file mode 100644 index 000000000000..7ca3b86f22cf --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/storage/v1" + cache "k8s.io/client-go/tools/cache" +) + +// VolumeAttachmentInformer provides access to a shared informer and lister for +// VolumeAttachments. +type VolumeAttachmentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.VolumeAttachmentLister +} + +type volumeAttachmentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1().VolumeAttachments().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1().VolumeAttachments().Watch(options) + }, + }, + &storagev1.VolumeAttachment{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storagev1.VolumeAttachment{}, f.defaultInformer) +} + +func (f *volumeAttachmentInformer) Lister() v1.VolumeAttachmentLister { + return v1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/BUILD index fc7ed0d7448a..da4639ae3422 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/BUILD @@ -17,6 +17,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1:go_default_library", @@ -66,6 +67,7 @@ filegroup( "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2:all-srcs", + "//staging/src/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1:all-srcs", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/clientset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/clientset.go index 122e4bb7f198..6ad01d6db19a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -25,6 +25,7 @@ import ( appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" + auditregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1" authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" @@ -66,6 +67,9 @@ type Interface interface { AppsV1() appsv1.AppsV1Interface // Deprecated: please explicitly pick a version if possible. Apps() appsv1.AppsV1Interface + AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Auditregistration() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface AuthenticationV1() authenticationv1.AuthenticationV1Interface // Deprecated: please explicitly pick a version if possible. Authentication() authenticationv1.AuthenticationV1Interface @@ -133,6 +137,7 @@ type Clientset struct { appsV1beta1 *appsv1beta1.AppsV1beta1Client appsV1beta2 *appsv1beta2.AppsV1beta2Client appsV1 *appsv1.AppsV1Client + auditregistrationV1alpha1 *auditregistrationv1alpha1.AuditregistrationV1alpha1Client authenticationV1 *authenticationv1.AuthenticationV1Client authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client authorizationV1 *authorizationv1.AuthorizationV1Client @@ -198,6 +203,17 @@ func (c *Clientset) Apps() appsv1.AppsV1Interface { return c.appsV1 } +// AuditregistrationV1alpha1 retrieves the AuditregistrationV1alpha1Client +func (c *Clientset) AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { + return c.auditregistrationV1alpha1 +} + +// Deprecated: Auditregistration retrieves the default version of AuditregistrationClient. +// Please explicitly pick a version. +func (c *Clientset) Auditregistration() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { + return c.auditregistrationV1alpha1 +} + // AuthenticationV1 retrieves the AuthenticationV1Client func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { return c.authenticationV1 @@ -454,6 +470,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.auditregistrationV1alpha1, err = auditregistrationv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.authenticationV1, err = authenticationv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -575,6 +595,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) cs.appsV1 = appsv1.NewForConfigOrDie(c) + cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.NewForConfigOrDie(c) cs.authenticationV1 = authenticationv1.NewForConfigOrDie(c) cs.authenticationV1beta1 = authenticationv1beta1.NewForConfigOrDie(c) cs.authorizationV1 = authorizationv1.NewForConfigOrDie(c) @@ -614,6 +635,7 @@ func New(c rest.Interface) *Clientset { cs.appsV1beta1 = appsv1beta1.New(c) cs.appsV1beta2 = appsv1beta2.New(c) cs.appsV1 = appsv1.New(c) + cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.New(c) cs.authenticationV1 = authenticationv1.New(c) cs.authenticationV1beta1 = authenticationv1beta1.New(c) cs.authorizationV1 = authorizationv1.New(c) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/BUILD index 2acd97972cb6..185e530d0bf5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/BUILD @@ -16,6 +16,7 @@ go_library( "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1beta1:go_default_library", "//staging/src/k8s.io/api/apps/v1beta2:go_default_library", + "//staging/src/k8s.io/api/auditregistration/v1alpha1:go_default_library", "//staging/src/k8s.io/api/authentication/v1:go_default_library", "//staging/src/k8s.io/api/authentication/v1beta1:go_default_library", "//staging/src/k8s.io/api/authorization/v1:go_default_library", @@ -61,6 +62,8 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go index a23b3165a04a..47b63ffaed98 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -34,6 +34,8 @@ import ( fakeappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" fakeappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake" + auditregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1" + fakeauditregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake" authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" fakeauthenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1/fake" authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" @@ -166,6 +168,16 @@ func (c *Clientset) Apps() appsv1.AppsV1Interface { return &fakeappsv1.FakeAppsV1{Fake: &c.Fake} } +// AuditregistrationV1alpha1 retrieves the AuditregistrationV1alpha1Client +func (c *Clientset) AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { + return &fakeauditregistrationv1alpha1.FakeAuditregistrationV1alpha1{Fake: &c.Fake} +} + +// Auditregistration retrieves the AuditregistrationV1alpha1Client +func (c *Clientset) Auditregistration() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { + return &fakeauditregistrationv1alpha1.FakeAuditregistrationV1alpha1{Fake: &c.Fake} +} + // AuthenticationV1 retrieves the AuthenticationV1Client func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { return &fakeauthenticationv1.FakeAuthenticationV1{Fake: &c.Fake} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/register.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/register.go index c429979688b7..6e1e1fb29365 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/register.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/fake/register.go @@ -24,6 +24,7 @@ import ( appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" + auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" authenticationv1 "k8s.io/api/authentication/v1" authenticationv1beta1 "k8s.io/api/authentication/v1beta1" authorizationv1 "k8s.io/api/authorization/v1" @@ -66,6 +67,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ appsv1beta1.AddToScheme, appsv1beta2.AddToScheme, appsv1.AddToScheme, + auditregistrationv1alpha1.AddToScheme, authenticationv1.AddToScheme, authenticationv1beta1.AddToScheme, authorizationv1.AddToScheme, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/BUILD index 3490063e7e0b..e36fdd99ea67 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/BUILD @@ -15,6 +15,7 @@ go_library( "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1beta1:go_default_library", "//staging/src/k8s.io/api/apps/v1beta2:go_default_library", + "//staging/src/k8s.io/api/auditregistration/v1alpha1:go_default_library", "//staging/src/k8s.io/api/authentication/v1:go_default_library", "//staging/src/k8s.io/api/authentication/v1beta1:go_default_library", "//staging/src/k8s.io/api/authorization/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 9ca89b76e44f..e336eb9179e0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -24,6 +24,7 @@ import ( appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" + auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" authenticationv1 "k8s.io/api/authentication/v1" authenticationv1beta1 "k8s.io/api/authentication/v1beta1" authorizationv1 "k8s.io/api/authorization/v1" @@ -66,6 +67,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ appsv1beta1.AddToScheme, appsv1beta2.AddToScheme, appsv1.AddToScheme, + auditregistrationv1alpha1.AddToScheme, authenticationv1.AddToScheme, authenticationv1beta1.AddToScheme, authorizationv1.AddToScheme, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go index b927dae2cd34..4baee66104df 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go @@ -112,7 +112,7 @@ func (c *FakeInitializerConfigurations) DeleteCollection(options *v1.DeleteOptio // Patch applies the patch and returns the patched initializerConfiguration. func (c *FakeInitializerConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(initializerconfigurationsResource, name, data, subresources...), &v1alpha1.InitializerConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(initializerconfigurationsResource, name, pt, data, subresources...), &v1alpha1.InitializerConfiguration{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go index e014ea72b69e..7b8acecee9b2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *initializerConfigurations) Get(name string, options v1.GetOptions) (res // List takes label and field selectors, and returns the list of InitializerConfigurations that match those selectors. func (c *initializerConfigurations) List(opts v1.ListOptions) (result *v1alpha1.InitializerConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.InitializerConfigurationList{} err = c.client.Get(). Resource("initializerconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *initializerConfigurations) List(opts v1.ListOptions) (result *v1alpha1. // Watch returns a watch.Interface that watches the requested initializerConfigurations. func (c *initializerConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("initializerconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *initializerConfigurations) Delete(name string, options *v1.DeleteOption // DeleteCollection deletes a collection of objects. func (c *initializerConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("initializerconfigurations"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go index e06888cc13c8..d2177bad52d2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go @@ -112,7 +112,7 @@ func (c *FakeMutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteO // Patch applies the patch and returns the patched mutatingWebhookConfiguration. func (c *FakeMutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, data, subresources...), &v1beta1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.MutatingWebhookConfiguration{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go index 1069634e2368..6be2b393866b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go @@ -112,7 +112,7 @@ func (c *FakeValidatingWebhookConfigurations) DeleteCollection(options *v1.Delet // Patch applies the patch and returns the patched validatingWebhookConfiguration. func (c *FakeValidatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, data, subresources...), &v1beta1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.ValidatingWebhookConfiguration{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index cb0157102914..4524896cd6b7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.MutatingWebhookConfigurationList{} err = c.client.Get(). Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1bet // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOp // DeleteCollection deletes a collection of objects. func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("mutatingwebhookconfigurations"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 3a9339f6cdc8..7e711b3000e1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ValidatingWebhookConfigurationList{} err = c.client.Get(). Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1b // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *validatingWebhookConfigurations) Delete(name string, options *v1.Delete // DeleteCollection deletes a collection of objects. func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("validatingwebhookconfigurations"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/BUILD index 17bd3fad4d77..dd54b316b614 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/BUILD @@ -17,6 +17,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/api/apps/v1:go_default_library", + "//staging/src/k8s.io/api/autoscaling/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go index 1ddaa1a71b7d..e28e4d2a3fa6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *controllerRevisions) Get(name string, options metav1.GetOptions) (resul // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.Controll // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *controllerRevisions) Delete(name string, options *metav1.DeleteOptions) // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go index 03a87069840b..a535cdabe649 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *daemonSets) Get(name string, options metav1.GetOptions) (result *v1.Dae // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.DaemonSetList{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, er // Watch returns a watch.Interface that watches the requested daemonSets. func (c *daemonSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *daemonSets) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *daemonSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go index 73d46f8bb6cb..f9799a453955 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -19,7 +19,10 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -44,6 +47,9 @@ type DeploymentInterface interface { List(opts metav1.ListOptions) (*v1.DeploymentList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) + GetScale(deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + DeploymentExpansion } @@ -76,11 +82,16 @@ func (c *deployments) Get(name string, options metav1.GetOptions) (result *v1.De // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +99,16 @@ func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +166,15 @@ func (c *deployments) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() @@ -172,3 +193,31 @@ func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subres Into(result) return } + +// GetScale takes name of the deployment, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *deployments) GetScale(deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(deploymentName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *deployments) UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deploymentName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/BUILD index 6202fc55ddfc..d01abefea0cd 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/BUILD @@ -16,6 +16,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/api/apps/v1:go_default_library", + "//staging/src/k8s.io/api/autoscaling/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go index fc2808daf88c..eb38bca41bab 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go @@ -119,7 +119,7 @@ func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched controllerRevision. func (c *FakeControllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.ControllerRevision, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, data, subresources...), &appsv1.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &appsv1.ControllerRevision{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go index 89e72ebd399e..c06336e9701e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go @@ -131,7 +131,7 @@ func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched daemonSet. func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.DaemonSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, data, subresources...), &appsv1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &appsv1.DaemonSet{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go index 2fbd82d6b8e3..6a8cb379da81 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go @@ -20,6 +20,7 @@ package fake import ( appsv1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -131,10 +132,32 @@ func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.Deployment, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &appsv1.Deployment{}) + Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &appsv1.Deployment{}) if obj == nil { return nil, err } return obj.(*appsv1.Deployment), err } + +// GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any. +func (c *FakeDeployments) GetScale(deploymentName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(deploymentsResource, c.ns, "scale", deploymentName), &autoscalingv1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscalingv1.Scale), err +} + +// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *FakeDeployments) UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscalingv1.Scale), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go index 7b882c8630c7..e871f82f7661 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go @@ -20,6 +20,7 @@ package fake import ( appsv1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -131,10 +132,32 @@ func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched replicaSet. func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.ReplicaSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, data, subresources...), &appsv1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &appsv1.ReplicaSet{}) if obj == nil { return nil, err } return obj.(*appsv1.ReplicaSet), err } + +// GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any. +func (c *FakeReplicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(replicasetsResource, c.ns, "scale", replicaSetName), &autoscalingv1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscalingv1.Scale), err +} + +// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *FakeReplicaSets) UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscalingv1.Scale), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go index 3cd643a59792..83e80bff4973 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go @@ -20,6 +20,7 @@ package fake import ( appsv1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -131,10 +132,32 @@ func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched statefulSet. func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.StatefulSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, data, subresources...), &appsv1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &appsv1.StatefulSet{}) if obj == nil { return nil, err } return obj.(*appsv1.StatefulSet), err } + +// GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any. +func (c *FakeStatefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(statefulsetsResource, c.ns, "scale", statefulSetName), &autoscalingv1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscalingv1.Scale), err +} + +// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *FakeStatefulSets) UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscalingv1.Scale), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go index 077941162d02..ff3504e78a4e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -19,7 +19,10 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -44,6 +47,9 @@ type ReplicaSetInterface interface { List(opts metav1.ListOptions) (*v1.ReplicaSetList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) + GetScale(replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + ReplicaSetExpansion } @@ -76,11 +82,16 @@ func (c *replicaSets) Get(name string, options metav1.GetOptions) (result *v1.Re // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ReplicaSetList{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +99,16 @@ func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, // Watch returns a watch.Interface that watches the requested replicaSets. func (c *replicaSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +166,15 @@ func (c *replicaSets) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *replicaSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() @@ -172,3 +193,31 @@ func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subres Into(result) return } + +// GetScale takes name of the replicaSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *replicaSets) GetScale(replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSetName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *replicaSets) UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSetName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go index 54322d97d31b..c12c470bbae8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -19,7 +19,10 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -44,6 +47,9 @@ type StatefulSetInterface interface { List(opts metav1.ListOptions) (*v1.StatefulSetList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) + GetScale(statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + StatefulSetExpansion } @@ -76,11 +82,16 @@ func (c *statefulSets) Get(name string, options metav1.GetOptions) (result *v1.S // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +99,16 @@ func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +166,15 @@ func (c *statefulSets) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() @@ -172,3 +193,31 @@ func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subre Into(result) return } + +// GetScale takes name of the statefulSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *statefulSets) GetScale(statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSetName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *statefulSets) UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSetName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD index 34a2f829d1d9..f9fe3d400b31 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD @@ -13,7 +13,6 @@ go_library( "deployment.go", "doc.go", "generated_expansion.go", - "scale.go", "statefulset.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go index 4d882e26e7d7..2c9db886b1e2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -29,7 +29,6 @@ type AppsV1beta1Interface interface { RESTClient() rest.Interface ControllerRevisionsGetter DeploymentsGetter - ScalesGetter StatefulSetsGetter } @@ -46,10 +45,6 @@ func (c *AppsV1beta1Client) Deployments(namespace string) DeploymentInterface { return newDeployments(c, namespace) } -func (c *AppsV1beta1Client) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - func (c *AppsV1beta1Client) StatefulSets(namespace string) StatefulSetInterface { return newStatefulSets(c, namespace) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go index ec8fa9242f5a..45ddb91592de 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.Control // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go index 365e06f3f10e..05fdcb7a644f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.D // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/BUILD index d4460b98b6ce..c3bc1b48cd42 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/BUILD @@ -12,7 +12,6 @@ go_library( "fake_apps_client.go", "fake_controllerrevision.go", "fake_deployment.go", - "fake_scale.go", "fake_statefulset.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go index 2ff602be9b6f..8e65d78d29ae 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go @@ -36,10 +36,6 @@ func (c *FakeAppsV1beta1) Deployments(namespace string) v1beta1.DeploymentInterf return &FakeDeployments{c, namespace} } -func (c *FakeAppsV1beta1) Scales(namespace string) v1beta1.ScaleInterface { - return &FakeScales{c, namespace} -} - func (c *FakeAppsV1beta1) StatefulSets(namespace string) v1beta1.StatefulSetInterface { return &FakeStatefulSets{c, namespace} } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go index 924194891131..8e339d78b0fc 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go @@ -119,7 +119,7 @@ func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched controllerRevision. func (c *FakeControllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, data, subresources...), &v1beta1.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta1.ControllerRevision{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go index c4749c52b3bf..c33baba589e7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go @@ -131,7 +131,7 @@ func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go deleted file mode 100644 index de71947e5230..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -// FakeScales implements ScaleInterface -type FakeScales struct { - Fake *FakeAppsV1beta1 - ns string -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go index b0f194a7ddd4..754da5fba657 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go @@ -131,7 +131,7 @@ func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched statefulSet. func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, data, subresources...), &v1beta1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.StatefulSet{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go index b2bfd73a77bd..113455df24c5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go @@ -22,6 +22,4 @@ type ControllerRevisionExpansion interface{} type DeploymentExpansion interface{} -type ScaleExpansion interface{} - type StatefulSetExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go deleted file mode 100644 index cef27bd1457c..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - rest "k8s.io/client-go/rest" -) - -// ScalesGetter has a method to return a ScaleInterface. -// A group's client should implement this interface. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface -} - -// ScaleInterface has methods to work with Scale resources. -type ScaleInterface interface { - ScaleExpansion -} - -// scales implements ScaleInterface -type scales struct { - client rest.Interface - ns string -} - -// newScales returns a Scales -func newScales(c *AppsV1beta1Client, namespace string) *scales { - return &scales{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go index 651745451d8f..c4b35b424c70 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetLis // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD index 246b4327fd72..9edb75248386 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD @@ -15,7 +15,6 @@ go_library( "doc.go", "generated_expansion.go", "replicaset.go", - "scale.go", "statefulset.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go index 27549499fb68..99d677f40502 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go @@ -31,7 +31,6 @@ type AppsV1beta2Interface interface { DaemonSetsGetter DeploymentsGetter ReplicaSetsGetter - ScalesGetter StatefulSetsGetter } @@ -56,10 +55,6 @@ func (c *AppsV1beta2Client) ReplicaSets(namespace string) ReplicaSetInterface { return newReplicaSets(c, namespace) } -func (c *AppsV1beta2Client) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - func (c *AppsV1beta2Client) StatefulSets(namespace string) StatefulSetInterface { return newStatefulSets(c, namespace) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go index 1271cc623fef..e1d6025155ed 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "time" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta2.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.Control // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go index 683c06812162..f8b7ac259733 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "time" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta2.Da // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta2.DaemonSetList{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, e // Watch returns a watch.Interface that watches the requested daemonSets. func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go index 9a04513f1ba1..510250b06e17 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "time" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta2.D // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta2.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/BUILD index 37b34afd975a..642f08c79c55 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/BUILD @@ -14,7 +14,6 @@ go_library( "fake_daemonset.go", "fake_deployment.go", "fake_replicaset.go", - "fake_scale.go", "fake_statefulset.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go index f7d79d352258..0ec34a2cdbc3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go @@ -44,10 +44,6 @@ func (c *FakeAppsV1beta2) ReplicaSets(namespace string) v1beta2.ReplicaSetInterf return &FakeReplicaSets{c, namespace} } -func (c *FakeAppsV1beta2) Scales(namespace string) v1beta2.ScaleInterface { - return &FakeScales{c, namespace} -} - func (c *FakeAppsV1beta2) StatefulSets(namespace string) v1beta2.StatefulSetInterface { return &FakeStatefulSets{c, namespace} } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go index 954ac35df8c7..197f190cbdad 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go @@ -119,7 +119,7 @@ func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched controllerRevision. func (c *FakeControllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, data, subresources...), &v1beta2.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta2.ControllerRevision{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go index 38a1475503e6..b50747fdc9c0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go @@ -131,7 +131,7 @@ func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched daemonSet. func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, data, subresources...), &v1beta2.DaemonSet{}) + Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.DaemonSet{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go index cae2322424bc..b74d24ed7c4d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go @@ -131,7 +131,7 @@ func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &v1beta2.Deployment{}) + Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta2.Deployment{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go index 05fa78931802..ba1de33ecf2c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go @@ -131,7 +131,7 @@ func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched replicaSet. func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, data, subresources...), &v1beta2.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta2.ReplicaSet{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go deleted file mode 100644 index b06b7e8e303e..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -// FakeScales implements ScaleInterface -type FakeScales struct { - Fake *FakeAppsV1beta2 - ns string -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go index fe78512862a5..652c7cbc5d67 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go @@ -131,7 +131,7 @@ func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched statefulSet. func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, data, subresources...), &v1beta2.StatefulSet{}) + Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.StatefulSet{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go index bceae5986233..6a21749687d2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go @@ -26,6 +26,4 @@ type DeploymentExpansion interface{} type ReplicaSetExpansion interface{} -type ScaleExpansion interface{} - type StatefulSetExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go index 9fd9de930ba2..7b738774b79d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "time" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta2.R // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta2.ReplicaSetList{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, // Watch returns a watch.Interface that watches the requested replicaSets. func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go deleted file mode 100644 index f8d6a7fb0f80..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta2 - -import ( - rest "k8s.io/client-go/rest" -) - -// ScalesGetter has a method to return a ScaleInterface. -// A group's client should implement this interface. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface -} - -// ScaleInterface has methods to work with Scale resources. -type ScaleInterface interface { - ScaleExpansion -} - -// scales implements ScaleInterface -type scales struct { - client rest.Interface - ns string -} - -// newScales returns a Scales -func newScales(c *AppsV1beta2Client, namespace string) *scales { - return &scales{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go index 095601e15a58..de7c3db8b506 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta2 import ( + "time" + v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -79,11 +81,16 @@ func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta2. // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta2.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -91,11 +98,16 @@ func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetLis // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -153,10 +165,15 @@ func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/BUILD new file mode 100644 index 000000000000..cb5b1a5ede01 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/BUILD @@ -0,0 +1,40 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "auditregistration_client.go", + "auditsink.go", + "doc.go", + "generated_expansion.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1", + importpath = "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/auditregistration/v1alpha1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go new file mode 100644 index 000000000000..f007b05ef7df --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AuditregistrationV1alpha1Interface interface { + RESTClient() rest.Interface + AuditSinksGetter +} + +// AuditregistrationV1alpha1Client is used to interact with features provided by the auditregistration.k8s.io group. +type AuditregistrationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *AuditregistrationV1alpha1Client) AuditSinks() AuditSinkInterface { + return newAuditSinks(c) +} + +// NewForConfig creates a new AuditregistrationV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*AuditregistrationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuditregistrationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuditregistrationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuditregistrationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuditregistrationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *AuditregistrationV1alpha1Client { + return &AuditregistrationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuditregistrationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go new file mode 100644 index 000000000000..414d480062eb --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// AuditSinksGetter has a method to return a AuditSinkInterface. +// A group's client should implement this interface. +type AuditSinksGetter interface { + AuditSinks() AuditSinkInterface +} + +// AuditSinkInterface has methods to work with AuditSink resources. +type AuditSinkInterface interface { + Create(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) + Update(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.AuditSink, error) + List(opts v1.ListOptions) (*v1alpha1.AuditSinkList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) + AuditSinkExpansion +} + +// auditSinks implements AuditSinkInterface +type auditSinks struct { + client rest.Interface +} + +// newAuditSinks returns a AuditSinks +func newAuditSinks(c *AuditregistrationV1alpha1Client) *auditSinks { + return &auditSinks{ + client: c.RESTClient(), + } +} + +// Get takes name of the auditSink, and returns the corresponding auditSink object, and an error if there is any. +func (c *auditSinks) Get(name string, options v1.GetOptions) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Get(). + Resource("auditsinks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of AuditSinks that match those selectors. +func (c *auditSinks) List(opts v1.ListOptions) (result *v1alpha1.AuditSinkList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.AuditSinkList{} + err = c.client.Get(). + Resource("auditsinks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested auditSinks. +func (c *auditSinks) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("auditsinks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a auditSink and creates it. Returns the server's representation of the auditSink, and an error, if there is any. +func (c *auditSinks) Create(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Post(). + Resource("auditsinks"). + Body(auditSink). + Do(). + Into(result) + return +} + +// Update takes the representation of a auditSink and updates it. Returns the server's representation of the auditSink, and an error, if there is any. +func (c *auditSinks) Update(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Put(). + Resource("auditsinks"). + Name(auditSink.Name). + Body(auditSink). + Do(). + Into(result) + return +} + +// Delete takes name of the auditSink and deletes it. Returns an error if one occurs. +func (c *auditSinks) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("auditsinks"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *auditSinks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("auditsinks"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched auditSink. +func (c *auditSinks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Patch(pt). + Resource("auditsinks"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go new file mode 100644 index 000000000000..df51baa4d4c1 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/BUILD new file mode 100644 index 000000000000..76ac78a7db83 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/BUILD @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "fake_auditregistration_client.go", + "fake_auditsink.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake", + importpath = "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/auditregistration/v1alpha1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + "//staging/src/k8s.io/client-go/testing:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/doc.go new file mode 100644 index 000000000000..16f44399065e --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditregistration_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditregistration_client.go new file mode 100644 index 000000000000..c22acabcf47e --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditregistration_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAuditregistrationV1alpha1 struct { + *testing.Fake +} + +func (c *FakeAuditregistrationV1alpha1) AuditSinks() v1alpha1.AuditSinkInterface { + return &FakeAuditSinks{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAuditregistrationV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go new file mode 100644 index 000000000000..d0bb9fd00094 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeAuditSinks implements AuditSinkInterface +type FakeAuditSinks struct { + Fake *FakeAuditregistrationV1alpha1 +} + +var auditsinksResource = schema.GroupVersionResource{Group: "auditregistration.k8s.io", Version: "v1alpha1", Resource: "auditsinks"} + +var auditsinksKind = schema.GroupVersionKind{Group: "auditregistration.k8s.io", Version: "v1alpha1", Kind: "AuditSink"} + +// Get takes name of the auditSink, and returns the corresponding auditSink object, and an error if there is any. +func (c *FakeAuditSinks) Get(name string, options v1.GetOptions) (result *v1alpha1.AuditSink, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(auditsinksResource, name), &v1alpha1.AuditSink{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.AuditSink), err +} + +// List takes label and field selectors, and returns the list of AuditSinks that match those selectors. +func (c *FakeAuditSinks) List(opts v1.ListOptions) (result *v1alpha1.AuditSinkList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(auditsinksResource, auditsinksKind, opts), &v1alpha1.AuditSinkList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.AuditSinkList{ListMeta: obj.(*v1alpha1.AuditSinkList).ListMeta} + for _, item := range obj.(*v1alpha1.AuditSinkList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested auditSinks. +func (c *FakeAuditSinks) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(auditsinksResource, opts)) +} + +// Create takes the representation of a auditSink and creates it. Returns the server's representation of the auditSink, and an error, if there is any. +func (c *FakeAuditSinks) Create(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(auditsinksResource, auditSink), &v1alpha1.AuditSink{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.AuditSink), err +} + +// Update takes the representation of a auditSink and updates it. Returns the server's representation of the auditSink, and an error, if there is any. +func (c *FakeAuditSinks) Update(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(auditsinksResource, auditSink), &v1alpha1.AuditSink{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.AuditSink), err +} + +// Delete takes name of the auditSink and deletes it. Returns an error if one occurs. +func (c *FakeAuditSinks) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(auditsinksResource, name), &v1alpha1.AuditSink{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeAuditSinks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(auditsinksResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.AuditSinkList{}) + return err +} + +// Patch applies the patch and returns the patched auditSink. +func (c *FakeAuditSinks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(auditsinksResource, name, pt, data, subresources...), &v1alpha1.AuditSink{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.AuditSink), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go new file mode 100644 index 000000000000..f0f511726413 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type AuditSinkExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go index 7df8343753ca..6a4bf98810d1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go @@ -131,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOption // Patch applies the patch and returns the patched horizontalPodAutoscaler. func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *autoscalingv1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &autoscalingv1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &autoscalingv1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go index 6891b6b63ddb..0e0839fb508e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *horizontalPodAutoscalers) Get(name string, options metav1.GetOptions) ( // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.Hor // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *metav1.DeleteOpt // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go index 2d860341af81..514a787cb1d9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go @@ -131,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOption // Patch applies the patch and returns the patched horizontalPodAutoscaler. func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go index 4ac8cce71bf7..02d5cfb9b60f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package v2beta1 import ( + "time" + v2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (resu // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v2beta1.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.Ho // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go index a19b86e2d037..c0569f00adbd 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go @@ -131,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOption // Patch applies the patch and returns the patched horizontalPodAutoscaler. func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta2.HorizontalPodAutoscaler{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go index ddabda7e702c..91a0fa64f9ab 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package v2beta2 import ( + "time" + v2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (resu // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v2beta2.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.Ho // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go index f12619bb423e..06dc25c6b48d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go @@ -131,7 +131,7 @@ func (c *FakeJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li // Patch applies the patch and returns the patched job. func (c *FakeJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *batchv1.Job, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, data, subresources...), &batchv1.Job{}) + Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, pt, data, subresources...), &batchv1.Job{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go index ba8332a9a25a..b55c602b3418 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *jobs) Get(name string, options metav1.GetOptions) (result *v1.Job, err // List takes label and field selectors, and returns the list of Jobs that match those selectors. func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.JobList{} err = c.client.Get(). Namespace(c.ns). Resource("jobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) { // Watch returns a watch.Interface that watches the requested jobs. func (c *jobs) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("jobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *jobs) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *jobs) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("jobs"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go index 04637c36aa7a..d89d2fa21d41 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v1beta1.Cron // List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.CronJobList{} err = c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err e // Watch returns a watch.Interface that watches the requested cronJobs. func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go index d80ef5e67e93..3985c4037467 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go @@ -131,7 +131,7 @@ func (c *FakeCronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v // Patch applies the patch and returns the patched cronJob. func (c *FakeCronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, data, subresources...), &v1beta1.CronJob{}) + Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v1beta1.CronJob{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go index 4d922f9ae9e2..19123b60411b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go @@ -19,6 +19,8 @@ limitations under the License. package v2alpha1 import ( + "time" + v2alpha1 "k8s.io/api/batch/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.Cro // List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v2alpha1.CronJobList{} err = c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err // Watch returns a watch.Interface that watches the requested cronJobs. func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go index 75c0b17338f4..2195027d2722 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go @@ -131,7 +131,7 @@ func (c *FakeCronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v // Patch applies the patch and returns the patched cronJob. func (c *FakeCronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, data, subresources...), &v2alpha1.CronJob{}) + Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v2alpha1.CronJob{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go index b39169a8ff6e..712d3a01afa4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -73,10 +75,15 @@ func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (re // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.CertificateSigningRequestList{} err = c.client.Get(). Resource("certificatesigningrequests"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1. // Watch returns a watch.Interface that watches the requested certificateSigningRequests. func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("certificatesigningrequests"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *certificateSigningRequests) Delete(name string, options *v1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("certificatesigningrequests"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go index dfd5171951e7..aa45c8803365 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go @@ -123,7 +123,7 @@ func (c *FakeCertificateSigningRequests) DeleteCollection(options *v1.DeleteOpti // Patch applies the patch and returns the patched certificateSigningRequest. func (c *FakeCertificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, data, subresources...), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, pt, data, subresources...), &v1beta1.CertificateSigningRequest{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go index 3204e02913ed..0ebf3bffc2b6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go @@ -119,7 +119,7 @@ func (c *FakeLeases) DeleteCollection(options *v1.DeleteOptions, listOptions v1. // Patch applies the patch and returns the patched lease. func (c *FakeLeases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, data, subresources...), &v1beta1.Lease{}) + Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &v1beta1.Lease{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go index 16277255fa6c..490d815aa633 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/coordination/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *leases) Get(name string, options v1.GetOptions) (result *v1beta1.Lease, // List takes label and field selectors, and returns the list of Leases that match those selectors. func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.LeaseList{} err = c.client.Get(). Namespace(c.ns). Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error // Watch returns a watch.Interface that watches the requested leases. func (c *leases) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *leases) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *leases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("leases"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD index 76f16bfa13e1..1377227a6096 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD @@ -38,8 +38,8 @@ go_library( importpath = "k8s.io/client-go/kubernetes/typed/core/v1", deps = [ "//staging/src/k8s.io/api/authentication/v1:go_default_library", + "//staging/src/k8s.io/api/autoscaling/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go index e497661cfbe0..302b2fdc3440 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *componentStatuses) Get(name string, options metav1.GetOptions) (result // List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ComponentStatusList{} err = c.client.Get(). Resource("componentstatuses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentS // Watch returns a watch.Interface that watches the requested componentStatuses. func (c *componentStatuses) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("componentstatuses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *componentStatuses) Delete(name string, options *metav1.DeleteOptions) e // DeleteCollection deletes a collection of objects. func (c *componentStatuses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("componentstatuses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go index 0984ae70cc1c..18ce954ae23d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *configMaps) Get(name string, options metav1.GetOptions) (result *v1.Con // List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ConfigMapList{} err = c.client.Get(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, er // Watch returns a watch.Interface that watches the requested configMaps. func (c *configMaps) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *configMaps) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *configMaps) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go index dd8216789bc9..978a2a196cc5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *endpoints) Get(name string, options metav1.GetOptions) (result *v1.Endp // List takes label and field selectors, and returns the list of Endpoints that match those selectors. func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.EndpointsList{} err = c.client.Get(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err // Watch returns a watch.Interface that watches the requested endpoints. func (c *endpoints) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *endpoints) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *endpoints) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go index 57d30f9fd49b..55cfa0901b26 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *events) Get(name string, options metav1.GetOptions) (result *v1.Event, // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.EventList{} err = c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) // Watch returns a watch.Interface that watches the requested events. func (c *events) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *events) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *events) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("events"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD index 32c81b04af76..022c147acd59 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD @@ -37,8 +37,8 @@ go_library( importpath = "k8s.io/client-go/kubernetes/typed/core/v1/fake", deps = [ "//staging/src/k8s.io/api/authentication/v1:go_default_library", + "//staging/src/k8s.io/api/autoscaling/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go index d06023d48266..18beedc2d348 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go @@ -112,7 +112,7 @@ func (c *FakeComponentStatuses) DeleteCollection(options *v1.DeleteOptions, list // Patch applies the patch and returns the patched componentStatus. func (c *FakeComponentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ComponentStatus, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, data, subresources...), &corev1.ComponentStatus{}) + Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, pt, data, subresources...), &corev1.ComponentStatus{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go index b491661f2083..2361ac3fe9df 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go @@ -119,7 +119,7 @@ func (c *FakeConfigMaps) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched configMap. func (c *FakeConfigMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ConfigMap, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, data, subresources...), &corev1.ConfigMap{}) + Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, pt, data, subresources...), &corev1.ConfigMap{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go index 2c9f0de0960d..d521af4083a8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go @@ -119,7 +119,7 @@ func (c *FakeEndpoints) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched endpoints. func (c *FakeEndpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Endpoints, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, data, subresources...), &corev1.Endpoints{}) + Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, pt, data, subresources...), &corev1.Endpoints{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go index 68405a54f137..3444f4be961a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go @@ -119,7 +119,7 @@ func (c *FakeEvents) DeleteCollection(options *v1.DeleteOptions, listOptions v1. // Patch applies the patch and returns the patched event. func (c *FakeEvents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Event, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, data, subresources...), &corev1.Event{}) + Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &corev1.Event{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go index dd3fb839289a..4b4c90d7d196 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go @@ -21,6 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" core "k8s.io/client-go/testing" ) @@ -52,10 +53,13 @@ func (c *FakeEvents) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error } // PatchWithEventNamespace patches an existing event. Returns the copy of the event the server returns, or an error. +// TODO: Should take a PatchType as an argument probably. func (c *FakeEvents) PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error) { - action := core.NewRootPatchAction(eventsResource, event.Name, data) + // TODO: Should be configurable to support additional patch strategies. + pt := types.StrategicMergePatchType + action := core.NewRootPatchAction(eventsResource, event.Name, pt, data) if c.ns != "" { - action = core.NewPatchAction(eventsResource, c.ns, event.Name, data) + action = core.NewPatchAction(eventsResource, c.ns, event.Name, pt, data) } obj, err := c.Fake.Invokes(action, event) if obj == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go index 03c03c5d0e20..d110031f8325 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go @@ -119,7 +119,7 @@ func (c *FakeLimitRanges) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched limitRange. func (c *FakeLimitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.LimitRange, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, data, subresources...), &corev1.LimitRange{}) + Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, pt, data, subresources...), &corev1.LimitRange{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go index 12b918af01b6..21387b5e254f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go @@ -115,7 +115,7 @@ func (c *FakeNamespaces) Delete(name string, options *v1.DeleteOptions) error { // Patch applies the patch and returns the patched namespace. func (c *FakeNamespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Namespace, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, data, subresources...), &corev1.Namespace{}) + Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, pt, data, subresources...), &corev1.Namespace{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go index a2bc97b2c6c4..bcde116a4e42 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go @@ -123,7 +123,7 @@ func (c *FakeNodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L // Patch applies the patch and returns the patched node. func (c *FakeNodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Node, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, data, subresources...), &corev1.Node{}) + Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, pt, data, subresources...), &corev1.Node{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go index eb684fd2951d..a39022c83f44 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go @@ -18,12 +18,16 @@ package fake import ( "k8s.io/api/core/v1" + types "k8s.io/apimachinery/pkg/types" core "k8s.io/client-go/testing" ) +// TODO: Should take a PatchType as an argument probably. func (c *FakeNodes) PatchStatus(nodeName string, data []byte) (*v1.Node, error) { + // TODO: Should be configurable to support additional patch strategies. + pt := types.StrategicMergePatchType obj, err := c.Fake.Invokes( - core.NewRootPatchSubresourceAction(nodesResource, nodeName, data, "status"), &v1.Node{}) + core.NewRootPatchSubresourceAction(nodesResource, nodeName, pt, data, "status"), &v1.Node{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go index 71e2f2dc5765..843f323075e6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go @@ -123,7 +123,7 @@ func (c *FakePersistentVolumes) DeleteCollection(options *v1.DeleteOptions, list // Patch applies the patch and returns the patched persistentVolume. func (c *FakePersistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.PersistentVolume, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, data, subresources...), &corev1.PersistentVolume{}) + Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, pt, data, subresources...), &corev1.PersistentVolume{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go index a06eca81cb8f..d2557c4c83c9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go @@ -131,7 +131,7 @@ func (c *FakePersistentVolumeClaims) DeleteCollection(options *v1.DeleteOptions, // Patch applies the patch and returns the patched persistentVolumeClaim. func (c *FakePersistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.PersistentVolumeClaim, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, data, subresources...), &corev1.PersistentVolumeClaim{}) + Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, pt, data, subresources...), &corev1.PersistentVolumeClaim{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go index bbf39eafc28e..2dbecbbaaccf 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go @@ -131,7 +131,7 @@ func (c *FakePods) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li // Patch applies the patch and returns the patched pod. func (c *FakePods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Pod, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, data, subresources...), &corev1.Pod{}) + Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, pt, data, subresources...), &corev1.Pod{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go index 497cc7857059..9c4b09d3e9f9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go @@ -26,20 +26,31 @@ import ( func (c *FakePods) Bind(binding *v1.Binding) error { action := core.CreateActionImpl{} action.Verb = "create" + action.Namespace = binding.Namespace action.Resource = podsResource - action.Subresource = "bindings" + action.Subresource = "binding" action.Object = binding _, err := c.Fake.Invokes(action, binding) return err } +func (c *FakePods) GetBinding(name string) (result *v1.Binding, err error) { + obj, err := c.Fake. + Invokes(core.NewGetSubresourceAction(podsResource, c.ns, "binding", name), &v1.Binding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Binding), err +} + func (c *FakePods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { action := core.GenericActionImpl{} action.Verb = "get" action.Namespace = c.ns action.Resource = podsResource - action.Subresource = "logs" + action.Subresource = "log" action.Value = opts _, _ = c.Fake.Invokes(action, &v1.Pod{}) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go index ff242f1660f8..307f30594e2a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go @@ -119,7 +119,7 @@ func (c *FakePodTemplates) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched podTemplate. func (c *FakePodTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.PodTemplate, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, data, subresources...), &corev1.PodTemplate{}) + Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, pt, data, subresources...), &corev1.PodTemplate{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go index 64fde0b6cefe..6de94c14829b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go @@ -19,8 +19,8 @@ limitations under the License. package fake import ( + autoscalingv1 "k8s.io/api/autoscaling/v1" corev1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -132,7 +132,7 @@ func (c *FakeReplicationControllers) DeleteCollection(options *v1.DeleteOptions, // Patch applies the patch and returns the patched replicationController. func (c *FakeReplicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ReplicationController, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, data, subresources...), &corev1.ReplicationController{}) + Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, pt, data, subresources...), &corev1.ReplicationController{}) if obj == nil { return nil, err @@ -141,23 +141,23 @@ func (c *FakeReplicationControllers) Patch(name string, pt types.PatchType, data } // GetScale takes name of the replicationController, and returns the corresponding scale object, and an error if there is any. -func (c *FakeReplicationControllers) GetScale(replicationControllerName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { +func (c *FakeReplicationControllers) GetScale(replicationControllerName string, options v1.GetOptions) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(replicationcontrollersResource, c.ns, "scale", replicationControllerName), &v1beta1.Scale{}) + Invokes(testing.NewGetSubresourceAction(replicationcontrollersResource, c.ns, "scale", replicationControllerName), &autoscalingv1.Scale{}) if obj == nil { return nil, err } - return obj.(*v1beta1.Scale), err + return obj.(*autoscalingv1.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicationControllers) UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { +func (c *FakeReplicationControllers) UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "scale", c.ns, scale), &v1beta1.Scale{}) + Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) if obj == nil { return nil, err } - return obj.(*v1beta1.Scale), err + return obj.(*autoscalingv1.Scale), err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go index 069749ccff02..b521f7120bde 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go @@ -131,7 +131,7 @@ func (c *FakeResourceQuotas) DeleteCollection(options *v1.DeleteOptions, listOpt // Patch applies the patch and returns the patched resourceQuota. func (c *FakeResourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ResourceQuota, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, data, subresources...), &corev1.ResourceQuota{}) + Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, pt, data, subresources...), &corev1.ResourceQuota{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go index f59ba40bc3d4..47dba9eff444 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go @@ -119,7 +119,7 @@ func (c *FakeSecrets) DeleteCollection(options *v1.DeleteOptions, listOptions v1 // Patch applies the patch and returns the patched secret. func (c *FakeSecrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Secret, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, data, subresources...), &corev1.Secret{}) + Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, pt, data, subresources...), &corev1.Secret{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go index 2ffcdff76a16..a65de4991187 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go @@ -123,7 +123,7 @@ func (c *FakeServices) Delete(name string, options *v1.DeleteOptions) error { // Patch applies the patch and returns the patched service. func (c *FakeServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Service, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, data, subresources...), &corev1.Service{}) + Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, pt, data, subresources...), &corev1.Service{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go index 2b2c5a7b22c6..5b6d8f8be51d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go @@ -119,7 +119,7 @@ func (c *FakeServiceAccounts) DeleteCollection(options *v1.DeleteOptions, listOp // Patch applies the patch and returns the patched serviceAccount. func (c *FakeServiceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ServiceAccount, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, data, subresources...), &corev1.ServiceAccount{}) + Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, pt, data, subresources...), &corev1.ServiceAccount{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go index 5b385668b811..2eeae11a8378 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *limitRanges) Get(name string, options metav1.GetOptions) (result *v1.Li // List takes label and field selectors, and returns the list of LimitRanges that match those selectors. func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.LimitRangeList{} err = c.client.Get(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, // Watch returns a watch.Interface that watches the requested limitRanges. func (c *limitRanges) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *limitRanges) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *limitRanges) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go index e22d07decee4..8a81fe850799 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *namespaces) Get(name string, options metav1.GetOptions) (result *v1.Nam // List takes label and field selectors, and returns the list of Namespaces that match those selectors. func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.NamespaceList{} err = c.client.Get(). Resource("namespaces"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, er // Watch returns a watch.Interface that watches the requested namespaces. func (c *namespaces) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("namespaces"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go index 5c769c118572..d19fab8952f8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -73,10 +75,15 @@ func (c *nodes) Get(name string, options metav1.GetOptions) (result *v1.Node, er // List takes label and field selectors, and returns the list of Nodes that match those selectors. func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.NodeList{} err = c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) { // Watch returns a watch.Interface that watches the requested nodes. func (c *nodes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *nodes) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *nodes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("nodes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go index d5f19aef5229..74514825e987 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -73,10 +75,15 @@ func (c *persistentVolumes) Get(name string, options metav1.GetOptions) (result // List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.PersistentVolumeList{} err = c.client.Get(). Resource("persistentvolumes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.Persistent // Watch returns a watch.Interface that watches the requested persistentVolumes. func (c *persistentVolumes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("persistentvolumes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *persistentVolumes) Delete(name string, options *metav1.DeleteOptions) e // DeleteCollection deletes a collection of objects. func (c *persistentVolumes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("persistentvolumes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go index d32ae5dfd84f..410ab37dcba0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *persistentVolumeClaims) Get(name string, options metav1.GetOptions) (re // List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.PersistentVolumeClaimList{} err = c.client.Get(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.Persi // Watch returns a watch.Interface that watches the requested persistentVolumeClaims. func (c *persistentVolumeClaims) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *persistentVolumeClaims) Delete(name string, options *metav1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *persistentVolumeClaims) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index b19c5a5c3ea6..8d6b6e879632 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *pods) Get(name string, options metav1.GetOptions) (result *v1.Pod, err // List takes label and field selectors, and returns the list of Pods that match those selectors. func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.PodList{} err = c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) { // Watch returns a watch.Interface that watches the requested pods. func (c *pods) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *pods) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *pods) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("pods"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go index d644e17d778c..84d7c9805931 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *podTemplates) Get(name string, options metav1.GetOptions) (result *v1.P // List takes label and field selectors, and returns the list of PodTemplates that match those selectors. func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.PodTemplateList{} err = c.client.Get(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList // Watch returns a watch.Interface that watches the requested podTemplates. func (c *podTemplates) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *podTemplates) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *podTemplates) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go index 17622f1c2609..dd3182db65bf 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -19,8 +19,10 @@ limitations under the License. package v1 import ( + "time" + + autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -45,8 +47,8 @@ type ReplicationControllerInterface interface { List(opts metav1.ListOptions) (*v1.ReplicationControllerList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) - GetScale(replicationControllerName string, options metav1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) + GetScale(replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) ReplicationControllerExpansion } @@ -80,11 +82,16 @@ func (c *replicationControllers) Get(name string, options metav1.GetOptions) (re // List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ReplicationControllerList{} err = c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -92,11 +99,16 @@ func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.Repli // Watch returns a watch.Interface that watches the requested replicationControllers. func (c *replicationControllers) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -154,10 +166,15 @@ func (c *replicationControllers) Delete(name string, options *metav1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *replicationControllers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() @@ -177,9 +194,9 @@ func (c *replicationControllers) Patch(name string, pt types.PatchType, data []b return } -// GetScale takes name of the replicationController, and returns the corresponding v1beta1.Scale object, and an error if there is any. -func (c *replicationControllers) GetScale(replicationControllerName string, options metav1.GetOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} +// GetScale takes name of the replicationController, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *replicationControllers) GetScale(replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} err = c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). @@ -192,8 +209,8 @@ func (c *replicationControllers) GetScale(replicationControllerName string, opti } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} +func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("replicationcontrollers"). diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go index 8b74a4046f0e..5a178990ecd0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *resourceQuotas) Get(name string, options metav1.GetOptions) (result *v1 // List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ResourceQuotaList{} err = c.client.Get(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuota // Watch returns a watch.Interface that watches the requested resourceQuotas. func (c *resourceQuotas) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *resourceQuotas) Delete(name string, options *metav1.DeleteOptions) erro // DeleteCollection deletes a collection of objects. func (c *resourceQuotas) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go index 4ea9796b63dc..85c143b173dd 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *secrets) Get(name string, options metav1.GetOptions) (result *v1.Secret // List takes label and field selectors, and returns the list of Secrets that match those selectors. func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.SecretList{} err = c.client.Get(). Namespace(c.ns). Resource("secrets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err erro // Watch returns a watch.Interface that watches the requested secrets. func (c *secrets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("secrets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *secrets) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *secrets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("secrets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go index 6c42ca87a8bd..b0e09413efbb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *services) Get(name string, options metav1.GetOptions) (result *v1.Servi // List takes label and field selectors, and returns the list of Services that match those selectors. func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ServiceList{} err = c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err er // Watch returns a watch.Interface that watches the requested services. func (c *services) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go index f3ab7eb878c4..50af6a21c9bc 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *serviceAccounts) Get(name string, options metav1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ServiceAccountList{} err = c.client.Get(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccou // Watch returns a watch.Interface that watches the requested serviceAccounts. func (c *serviceAccounts) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *serviceAccounts) Delete(name string, options *metav1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *serviceAccounts) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go index af7d060d591e..143281b25cc9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/events/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *events) Get(name string, options v1.GetOptions) (result *v1beta1.Event, // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.EventList{} err = c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error // Watch returns a watch.Interface that watches the requested events. func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *events) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("events"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go index b210e40a041c..ef76ec131817 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go @@ -119,7 +119,7 @@ func (c *FakeEvents) DeleteCollection(options *v1.DeleteOptions, listOptions v1. // Patch applies the patch and returns the patched event. func (c *FakeEvents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, data, subresources...), &v1beta1.Event{}) + Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1beta1.Event{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD index 062da6216a16..6690d96b0bb7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD @@ -17,16 +17,12 @@ go_library( "ingress.go", "podsecuritypolicy.go", "replicaset.go", - "scale.go", - "scale_expansion.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1", importpath = "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", deps = [ "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go index 85294be4b98f..93b1ae9b6dd7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.Da // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.DaemonSetList{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, e // Watch returns a watch.Interface that watches the requested daemonSets. func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go index 89183d2853e0..5557b9f2b1e8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -79,11 +81,16 @@ func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.D // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -91,11 +98,16 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -153,10 +165,15 @@ func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index 1961ffc7cda0..0e9edf5cce60 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -32,7 +32,6 @@ type ExtensionsV1beta1Interface interface { IngressesGetter PodSecurityPoliciesGetter ReplicaSetsGetter - ScalesGetter } // ExtensionsV1beta1Client is used to interact with features provided by the extensions group. @@ -60,10 +59,6 @@ func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterf return newReplicaSets(c, namespace) } -func (c *ExtensionsV1beta1Client) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - // NewForConfig creates a new ExtensionsV1beta1Client for the given config. func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { config := *c diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/BUILD index d0a83e23efa0..5de2b7dfebff 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/BUILD @@ -16,8 +16,6 @@ go_library( "fake_ingress.go", "fake_podsecuritypolicy.go", "fake_replicaset.go", - "fake_scale.go", - "fake_scale_expansion.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake", importpath = "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go index 3a760b3175ea..4c98660607c2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go @@ -131,7 +131,7 @@ func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched daemonSet. func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, data, subresources...), &v1beta1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.DaemonSet{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go index f032a5563897..7b7df45cc344 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go @@ -131,7 +131,7 @@ func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go index 1aba34f9dcb8..0282c0b49908 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go @@ -48,10 +48,6 @@ func (c *FakeExtensionsV1beta1) ReplicaSets(namespace string) v1beta1.ReplicaSet return &FakeReplicaSets{c, namespace} } -func (c *FakeExtensionsV1beta1) Scales(namespace string) v1beta1.ScaleInterface { - return &FakeScales{c, namespace} -} - // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeExtensionsV1beta1) RESTClient() rest.Interface { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go index 55257a88a287..01c2521401ff 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go @@ -131,7 +131,7 @@ func (c *FakeIngresses) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched ingress. func (c *FakeIngresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, data, subresources...), &v1beta1.Ingress{}) + Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1beta1.Ingress{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go index 70b5dac28193..b97a34416e3f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go @@ -112,7 +112,7 @@ func (c *FakePodSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched podSecurityPolicy. func (c *FakePodSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, data, subresources...), &v1beta1.PodSecurityPolicy{}) + Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, pt, data, subresources...), &v1beta1.PodSecurityPolicy{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go index 2ab8f244f571..7ed16af90466 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go @@ -131,7 +131,7 @@ func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOption // Patch applies the patch and returns the patched replicaSet. func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, data, subresources...), &v1beta1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta1.ReplicaSet{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go deleted file mode 100644 index 02c4d0bab73f..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -// FakeScales implements ScaleInterface -type FakeScales struct { - Fake *FakeExtensionsV1beta1 - ns string -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go deleted file mode 100644 index 1f1d49ba1a9f..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime/schema" - core "k8s.io/client-go/testing" -) - -func (c *FakeScales) Get(kind string, name string) (result *v1beta1.Scale, err error) { - action := core.GetActionImpl{} - action.Verb = "get" - action.Namespace = c.ns - action.Resource = schema.GroupVersionResource{Resource: kind} - action.Subresource = "scale" - action.Name = name - obj, err := c.Fake.Invokes(action, &v1beta1.Scale{}) - result = obj.(*v1beta1.Scale) - return -} - -func (c *FakeScales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { - action := core.UpdateActionImpl{} - action.Verb = "update" - action.Namespace = c.ns - action.Resource = schema.GroupVersionResource{Resource: kind} - action.Subresource = "scale" - action.Object = scale - obj, err := c.Fake.Invokes(action, scale) - result = obj.(*v1beta1.Scale) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go index f8b664cbd1e4..4da51c368585 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ing // List takes label and field selectors, and returns the list of Ingresses that match those selectors. func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.IngressList{} err = c.client.Get(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err // Watch returns a watch.Interface that watches the requested ingresses. func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go index 8099d77307d6..a947a54a6f42 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.PodSecurityPolicyList{} err = c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecu // Watch returns a watch.Interface that watches the requested podSecurityPolicies. func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("podsecuritypolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go index 7e61fa2d12f8..444029058b49 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -79,11 +81,16 @@ func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.R // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ReplicaSetList{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -91,11 +98,16 @@ func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, // Watch returns a watch.Interface that watches the requested replicaSets. func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -153,10 +165,15 @@ func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go deleted file mode 100644 index 6ee677acd290..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - rest "k8s.io/client-go/rest" -) - -// ScalesGetter has a method to return a ScaleInterface. -// A group's client should implement this interface. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface -} - -// ScaleInterface has methods to work with Scale resources. -type ScaleInterface interface { - ScaleExpansion -} - -// scales implements ScaleInterface -type scales struct { - client rest.Interface - ns string -} - -// newScales returns a Scales -func newScales(c *ExtensionsV1beta1Client, namespace string) *scales { - return &scales{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go deleted file mode 100644 index c9733cb28d2e..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. -type ScaleExpansion interface { - Get(kind string, name string) (*v1beta1.Scale, error) - Update(kind string, scale *v1beta1.Scale) (*v1beta1.Scale, error) -} - -// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. -func (c *scales) Get(kind string, name string) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} - resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) - - err = c.client.Get(). - Namespace(c.ns). - Resource(resource.Resource). - Name(name). - SubResource("scale"). - Do(). - Into(result) - return -} - -func (c *scales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} - resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) - - err = c.client.Put(). - Namespace(scale.Namespace). - Resource(resource.Resource). - Name(scale.Name). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go index 7be202298de5..58667c481a82 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go @@ -119,7 +119,7 @@ func (c *FakeNetworkPolicies) DeleteCollection(options *v1.DeleteOptions, listOp // Patch applies the patch and returns the patched networkPolicy. func (c *FakeNetworkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *networkingv1.NetworkPolicy, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, data, subresources...), &networkingv1.NetworkPolicy{}) + Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &networkingv1.NetworkPolicy{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go index d8f0a6b47e96..3f39be957d8a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *networkPolicies) Get(name string, options metav1.GetOptions) (result *v // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.NetworkPolicyList{} err = c.client.Get(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolic // Watch returns a watch.Interface that watches the requested networkPolicies. func (c *networkPolicies) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *networkPolicies) Delete(name string, options *metav1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *networkPolicies) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go index 2f0d8e95370a..f3b5e93ab0d2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go @@ -26,8 +26,9 @@ func (c *FakeEvictions) Evict(eviction *policy.Eviction) error { action := core.GetActionImpl{} action.Verb = "post" action.Namespace = c.ns - action.Resource = schema.GroupVersionResource{Group: "", Version: "", Resource: "pods"} + action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} action.Subresource = "eviction" + action.Name = eviction.Name _, err := c.Fake.Invokes(action, eviction) return err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go index 3f2e78b3109f..5bfbbca47f21 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go @@ -131,7 +131,7 @@ func (c *FakePodDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, l // Patch applies the patch and returns the patched podDisruptionBudget. func (c *FakePodDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, data, subresources...), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, pt, data, subresources...), &v1beta1.PodDisruptionBudget{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go index 0df9aa15f981..32d1989f33f6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go @@ -112,7 +112,7 @@ func (c *FakePodSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched podSecurityPolicy. func (c *FakePodSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, data, subresources...), &v1beta1.PodSecurityPolicy{}) + Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, pt, data, subresources...), &v1beta1.PodSecurityPolicy{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go index a11f27eb2579..864af9a262bb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -76,11 +78,16 @@ func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result * // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.PodDisruptionBudgetList{} err = c.client.Get(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDis // Watch returns a watch.Interface that watches the requested podDisruptionBudgets. func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *podDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) er // DeleteCollection deletes a collection of objects. func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go index 355be1e9c7fb..d02096d747ae 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.PodSecurityPolicyList{} err = c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecu // Watch returns a watch.Interface that watches the requested podSecurityPolicies. func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("podsecuritypolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go index c4299d4c68ca..0a47c4411503 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoles) Get(name string, options metav1.GetOptions) (result *v1.C // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoles) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go index 30c0469a4fc4..c16ebc31222b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoleBindings) Get(name string, options metav1.GetOptions) (resul // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterR // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoleBindings) Delete(name string, options *metav1.DeleteOptions) // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go index d93ac8294422..d57f33939021 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go @@ -112,7 +112,7 @@ func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched clusterRole. func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.ClusterRole, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, data, subresources...), &rbacv1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &rbacv1.ClusterRole{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go index a8b2b57ffd72..878473ef35d1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go @@ -112,7 +112,7 @@ func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched clusterRoleBinding. func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.ClusterRoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, data, subresources...), &rbacv1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &rbacv1.ClusterRoleBinding{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go index f048bbdfb2d8..78ef3192f3db 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go @@ -119,7 +119,7 @@ func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L // Patch applies the patch and returns the patched role. func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.Role, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, data, subresources...), &rbacv1.Role{}) + Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &rbacv1.Role{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go index c71635fce4e6..6c344cadff96 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go @@ -119,7 +119,7 @@ func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched roleBinding. func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.RoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, data, subresources...), &rbacv1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &rbacv1.RoleBinding{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go index 81ea12a9ff51..a17d791fff24 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roles) Get(name string, options metav1.GetOptions) (result *v1.Role, er // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) { // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roles) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go index 17c6f9913b01..c87e457188ef 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roleBindings) Get(name string, options metav1.GetOptions) (result *v1.R // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roleBindings) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go index 37a54576231b..77e66877e725 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1 // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleLi // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go index 6050789066d9..0d1b9d2051eb 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.Cluste // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go index 13fbce4e72a5..d2d1b1c74c83 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go @@ -112,7 +112,7 @@ func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched clusterRole. func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, data, subresources...), &v1alpha1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1alpha1.ClusterRole{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go index 5076543d9f42..3e23e5f65706 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go @@ -112,7 +112,7 @@ func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched clusterRoleBinding. func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, data, subresources...), &v1alpha1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1alpha1.ClusterRoleBinding{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go index 24d8efee3c1d..7bd52373faca 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go @@ -119,7 +119,7 @@ func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L // Patch applies the patch and returns the patched role. func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, data, subresources...), &v1alpha1.Role{}) + Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Role{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go index cb01ef99db70..015050311562 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go @@ -119,7 +119,7 @@ func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched roleBinding. func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, data, subresources...), &v1alpha1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1alpha1.RoleBinding{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go index aa6954bb579d..4a4b67240b3c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go index 0941b8e86713..bf4e5a10efb3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1 // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingLi // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go index bac951c876c3..21d3cab37339 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleLis // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go index 96c91de6e2c8..47eb9e4e77b1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.Cluster // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go index 62a832197e78..2dbc3f6166e8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go @@ -112,7 +112,7 @@ func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched clusterRole. func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, data, subresources...), &v1beta1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1beta1.ClusterRole{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go index c9ab47269641..14e20bc28c1a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go @@ -112,7 +112,7 @@ func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, li // Patch applies the patch and returns the patched clusterRoleBinding. func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, data, subresources...), &v1beta1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1beta1.ClusterRoleBinding{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go index 45b07a001ed2..e31768e4e506 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go @@ -119,7 +119,7 @@ func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L // Patch applies the patch and returns the patched role. func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, data, subresources...), &v1beta1.Role{}) + Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1beta1.Role{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go index 1efd40005602..06b93c93f66c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go @@ -119,7 +119,7 @@ func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptio // Patch applies the patch and returns the patched roleBinding. func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, data, subresources...), &v1beta1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1beta1.RoleBinding{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go index 66f382c07c82..2b61aad5231b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, e // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go index 67d3d331bc26..0bd118fdfeb8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingLis // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go index 8ab4421a97d5..e592ed137faa 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go @@ -112,7 +112,7 @@ func (c *FakePriorityClasses) DeleteCollection(options *v1.DeleteOptions, listOp // Patch applies the patch and returns the patched priorityClass. func (c *FakePriorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, data, subresources...), &v1alpha1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1alpha1.PriorityClass{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go index 6845d25c3859..29d646fb1f03 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1alp // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.PriorityClassList{} err = c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityCl // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("priorityclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go index e234fec66c1e..44ce64b5ce06 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go @@ -112,7 +112,7 @@ func (c *FakePriorityClasses) DeleteCollection(options *v1.DeleteOptions, listOp // Patch applies the patch and returns the patched priorityClass. func (c *FakePriorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, data, subresources...), &v1beta1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1beta1.PriorityClass{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go index 57b9766e42ef..5e402f8e342a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/scheduling/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1bet // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.PriorityClassList{} err = c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityCla // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("priorityclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go index 90eaccec5a5b..273a027fadf0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go @@ -119,7 +119,7 @@ func (c *FakePodPresets) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched podPreset. func (c *FakePodPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podpresetsResource, c.ns, name, data, subresources...), &v1alpha1.PodPreset{}) + Invokes(testing.NewPatchSubresourceAction(podpresetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.PodPreset{}) if obj == nil { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go index f000ae486cdf..8fd6adc56b6b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/settings/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,11 +77,16 @@ func (c *podPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.P // List takes label and field selectors, and returns the list of PodPresets that match those selectors. func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.PodPresetList{} err = c.client.Get(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, // Watch returns a watch.Interface that watches the requested podPresets. func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *podPresets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD index 7948513c080b..dee1e799cd91 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD @@ -12,6 +12,7 @@ go_library( "generated_expansion.go", "storage_client.go", "storageclass.go", + "volumeattachment.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/storage/v1", importpath = "k8s.io/client-go/kubernetes/typed/storage/v1", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/BUILD index c80e0cc93771..9bdd5a4edea5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/BUILD @@ -11,6 +11,7 @@ go_library( "doc.go", "fake_storage_client.go", "fake_storageclass.go", + "fake_volumeattachment.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake", importpath = "k8s.io/client-go/kubernetes/typed/storage/v1/fake", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go index fc6f98cf6ace..967a528500e7 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go @@ -32,6 +32,10 @@ func (c *FakeStorageV1) StorageClasses() v1.StorageClassInterface { return &FakeStorageClasses{c} } +func (c *FakeStorageV1) VolumeAttachments() v1.VolumeAttachmentInterface { + return &FakeVolumeAttachments{c} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeStorageV1) RESTClient() rest.Interface { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go index 37488a2d7ad4..c7531d8793c1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go @@ -112,7 +112,7 @@ func (c *FakeStorageClasses) DeleteCollection(options *v1.DeleteOptions, listOpt // Patch applies the patch and returns the patched storageClass. func (c *FakeStorageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storagev1.StorageClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, data, subresources...), &storagev1.StorageClass{}) + Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &storagev1.StorageClass{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go new file mode 100644 index 000000000000..58e09da46be8 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go @@ -0,0 +1,131 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + storagev1 "k8s.io/api/storage/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeVolumeAttachments implements VolumeAttachmentInterface +type FakeVolumeAttachments struct { + Fake *FakeStorageV1 +} + +var volumeattachmentsResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1", Resource: "volumeattachments"} + +var volumeattachmentsKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "VolumeAttachment"} + +// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. +func (c *FakeVolumeAttachments) Get(name string, options v1.GetOptions) (result *storagev1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &storagev1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*storagev1.VolumeAttachment), err +} + +// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. +func (c *FakeVolumeAttachments) List(opts v1.ListOptions) (result *storagev1.VolumeAttachmentList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &storagev1.VolumeAttachmentList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &storagev1.VolumeAttachmentList{ListMeta: obj.(*storagev1.VolumeAttachmentList).ListMeta} + for _, item := range obj.(*storagev1.VolumeAttachmentList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested volumeAttachments. +func (c *FakeVolumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) +} + +// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *FakeVolumeAttachments) Create(volumeAttachment *storagev1.VolumeAttachment) (result *storagev1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &storagev1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*storagev1.VolumeAttachment), err +} + +// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *FakeVolumeAttachments) Update(volumeAttachment *storagev1.VolumeAttachment) (result *storagev1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &storagev1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*storagev1.VolumeAttachment), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeVolumeAttachments) UpdateStatus(volumeAttachment *storagev1.VolumeAttachment) (*storagev1.VolumeAttachment, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &storagev1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*storagev1.VolumeAttachment), err +} + +// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. +func (c *FakeVolumeAttachments) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(volumeattachmentsResource, name), &storagev1.VolumeAttachment{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOptions) + + _, err := c.Fake.Invokes(action, &storagev1.VolumeAttachmentList{}) + return err +} + +// Patch applies the patch and returns the patched volumeAttachment. +func (c *FakeVolumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storagev1.VolumeAttachment, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &storagev1.VolumeAttachment{}) + if obj == nil { + return nil, err + } + return obj.(*storagev1.VolumeAttachment), err +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go index 2bea7ec7fd98..ccac16114c87 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go @@ -19,3 +19,5 @@ limitations under the License. package v1 type StorageClassExpansion interface{} + +type VolumeAttachmentExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go index ac48f4916992..92378cf7f496 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -28,6 +28,7 @@ import ( type StorageV1Interface interface { RESTClient() rest.Interface StorageClassesGetter + VolumeAttachmentsGetter } // StorageV1Client is used to interact with features provided by the storage.k8s.io group. @@ -39,6 +40,10 @@ func (c *StorageV1Client) StorageClasses() StorageClassInterface { return newStorageClasses(c) } +func (c *StorageV1Client) VolumeAttachments() VolumeAttachmentInterface { + return newVolumeAttachments(c) +} + // NewForConfig creates a new StorageV1Client for the given config. func NewForConfig(c *rest.Config) (*StorageV1Client, error) { config := *c diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go index 0f7f57f05fbe..3f4c48f0a0c4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -19,6 +19,8 @@ limitations under the License. package v1 import ( + "time" + v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *storageClasses) Get(name string, options metav1.GetOptions) (result *v1 // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1.StorageClassList{} err = c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassL // Watch returns a watch.Interface that watches the requested storageClasses. func (c *storageClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *storageClasses) Delete(name string, options *metav1.DeleteOptions) erro // DeleteCollection deletes a collection of objects. func (c *storageClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("storageclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go new file mode 100644 index 000000000000..0f45097b2009 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. +// A group's client should implement this interface. +type VolumeAttachmentsGetter interface { + VolumeAttachments() VolumeAttachmentInterface +} + +// VolumeAttachmentInterface has methods to work with VolumeAttachment resources. +type VolumeAttachmentInterface interface { + Create(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) + Update(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) + UpdateStatus(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.VolumeAttachment, error) + List(opts metav1.ListOptions) (*v1.VolumeAttachmentList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) + VolumeAttachmentExpansion +} + +// volumeAttachments implements VolumeAttachmentInterface +type volumeAttachments struct { + client rest.Interface +} + +// newVolumeAttachments returns a VolumeAttachments +func newVolumeAttachments(c *StorageV1Client) *volumeAttachments { + return &volumeAttachments{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. +func (c *volumeAttachments) Get(name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} + err = c.client.Get(). + Resource("volumeattachments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. +func (c *volumeAttachments) List(opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.VolumeAttachmentList{} + err = c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeAttachments. +func (c *volumeAttachments) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Create(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} + err = c.client.Post(). + Resource("volumeattachments"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Update(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + SubResource("status"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. +func (c *volumeAttachments) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeAttachments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("volumeattachments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeAttachment. +func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} + err = c.client.Patch(pt). + Resource("volumeattachments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go index af04b681c4f0..86f53e2d4dc0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go @@ -123,7 +123,7 @@ func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, list // Patch applies the patch and returns the patched volumeAttachment. func (c *FakeVolumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, data, subresources...), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1alpha1.VolumeAttachment{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go index e6af00185940..7fef94e8d886 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -73,10 +75,15 @@ func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1a // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.VolumeAttachmentList{} err = c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAt // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("volumeattachments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go index cbfbab1a35a3..9fc8ca991e35 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go @@ -112,7 +112,7 @@ func (c *FakeStorageClasses) DeleteCollection(options *v1.DeleteOptions, listOpt // Patch applies the patch and returns the patched storageClass. func (c *FakeStorageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, data, subresources...), &v1beta1.StorageClass{}) + Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &v1beta1.StorageClass{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go index 04c0c463adca..043098f45579 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go @@ -123,7 +123,7 @@ func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, list // Patch applies the patch and returns the patched volumeAttachment. func (c *FakeVolumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, data, subresources...), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1beta1.VolumeAttachment{}) if obj == nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go index fbe1fd4c215b..8a8f38916194 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -72,10 +74,15 @@ func (c *storageClasses) Get(name string, options v1.GetOptions) (result *v1beta // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.StorageClassList{} err = c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClass // Watch returns a watch.Interface that watches the requested storageClasses. func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *storageClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("storageclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go index 5cd2d3919f82..d319407f2723 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -73,10 +75,15 @@ func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1b // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.VolumeAttachmentList{} err = c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAtt // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("volumeattachments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD index 640f65993434..9d50008600d2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD @@ -11,7 +11,6 @@ go_library( "controllerrevision.go", "deployment.go", "expansion_generated.go", - "scale.go", "statefulset.go", "statefulset_expansion.go", ], diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go index 8f8d08434d8e..c73cf98c7a59 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go @@ -33,11 +33,3 @@ type DeploymentListerExpansion interface{} // DeploymentNamespaceListerExpansion allows custom methods to be added to // DeploymentNamespaceLister. type DeploymentNamespaceListerExpansion interface{} - -// ScaleListerExpansion allows custom methods to be added to -// ScaleLister. -type ScaleListerExpansion interface{} - -// ScaleNamespaceListerExpansion allows custom methods to be added to -// ScaleNamespaceLister. -type ScaleNamespaceListerExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go deleted file mode 100644 index ef8a2630ec31..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ScaleLister helps list Scales. -type ScaleLister interface { - // List lists all Scales in the indexer. - List(selector labels.Selector) (ret []*v1beta1.Scale, err error) - // Scales returns an object that can list and get Scales. - Scales(namespace string) ScaleNamespaceLister - ScaleListerExpansion -} - -// scaleLister implements the ScaleLister interface. -type scaleLister struct { - indexer cache.Indexer -} - -// NewScaleLister returns a new ScaleLister. -func NewScaleLister(indexer cache.Indexer) ScaleLister { - return &scaleLister{indexer: indexer} -} - -// List lists all Scales in the indexer. -func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Scale)) - }) - return ret, err -} - -// Scales returns an object that can list and get Scales. -func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { - return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ScaleNamespaceLister helps list and get Scales. -type ScaleNamespaceLister interface { - // List lists all Scales in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta1.Scale, err error) - // Get retrieves the Scale from the indexer for a given namespace and name. - Get(name string) (*v1beta1.Scale, error) - ScaleNamespaceListerExpansion -} - -// scaleNamespaceLister implements the ScaleNamespaceLister -// interface. -type scaleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Scales in the indexer for a given namespace. -func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Scale)) - }) - return ret, err -} - -// Get retrieves the Scale from the indexer for a given namespace and name. -func (s scaleNamespaceLister) Get(name string) (*v1beta1.Scale, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("scale"), name) - } - return obj.(*v1beta1.Scale), nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD index 20e24652c46c..cbef24da637b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD @@ -16,7 +16,6 @@ go_library( "expansion_generated.go", "replicaset.go", "replicaset_expansion.go", - "scale.go", "statefulset.go", "statefulset_expansion.go", ], diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go index d468f38e7c6d..bac6ccb9a7a3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go @@ -25,11 +25,3 @@ type ControllerRevisionListerExpansion interface{} // ControllerRevisionNamespaceListerExpansion allows custom methods to be added to // ControllerRevisionNamespaceLister. type ControllerRevisionNamespaceListerExpansion interface{} - -// ScaleListerExpansion allows custom methods to be added to -// ScaleLister. -type ScaleListerExpansion interface{} - -// ScaleNamespaceListerExpansion allows custom methods to be added to -// ScaleNamespaceLister. -type ScaleNamespaceListerExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go deleted file mode 100644 index d89329864ae7..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta2 - -import ( - v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ScaleLister helps list Scales. -type ScaleLister interface { - // List lists all Scales in the indexer. - List(selector labels.Selector) (ret []*v1beta2.Scale, err error) - // Scales returns an object that can list and get Scales. - Scales(namespace string) ScaleNamespaceLister - ScaleListerExpansion -} - -// scaleLister implements the ScaleLister interface. -type scaleLister struct { - indexer cache.Indexer -} - -// NewScaleLister returns a new ScaleLister. -func NewScaleLister(indexer cache.Indexer) ScaleLister { - return &scaleLister{indexer: indexer} -} - -// List lists all Scales in the indexer. -func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta2.Scale, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.Scale)) - }) - return ret, err -} - -// Scales returns an object that can list and get Scales. -func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { - return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ScaleNamespaceLister helps list and get Scales. -type ScaleNamespaceLister interface { - // List lists all Scales in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta2.Scale, err error) - // Get retrieves the Scale from the indexer for a given namespace and name. - Get(name string) (*v1beta2.Scale, error) - ScaleNamespaceListerExpansion -} - -// scaleNamespaceLister implements the ScaleNamespaceLister -// interface. -type scaleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Scales in the indexer for a given namespace. -func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.Scale, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.Scale)) - }) - return ret, err -} - -// Get retrieves the Scale from the indexer for a given namespace and name. -func (s scaleNamespaceLister) Get(name string) (*v1beta2.Scale, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("scale"), name) - } - return obj.(*v1beta2.Scale), nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/BUILD new file mode 100644 index 000000000000..c23bd63a4004 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/BUILD @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "auditsink.go", + "expansion_generated.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1", + importpath = "k8s.io/client-go/listers/auditregistration/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/auditregistration/v1alpha1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/auditsink.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/auditsink.go new file mode 100644 index 000000000000..3ae4528c8c35 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/auditsink.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// AuditSinkLister helps list AuditSinks. +type AuditSinkLister interface { + // List lists all AuditSinks in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.AuditSink, err error) + // Get retrieves the AuditSink from the index for a given name. + Get(name string) (*v1alpha1.AuditSink, error) + AuditSinkListerExpansion +} + +// auditSinkLister implements the AuditSinkLister interface. +type auditSinkLister struct { + indexer cache.Indexer +} + +// NewAuditSinkLister returns a new AuditSinkLister. +func NewAuditSinkLister(indexer cache.Indexer) AuditSinkLister { + return &auditSinkLister{indexer: indexer} +} + +// List lists all AuditSinks in the indexer. +func (s *auditSinkLister) List(selector labels.Selector) (ret []*v1alpha1.AuditSink, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.AuditSink)) + }) + return ret, err +} + +// Get retrieves the AuditSink from the index for a given name. +func (s *auditSinkLister) Get(name string) (*v1alpha1.AuditSink, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("auditsink"), name) + } + return obj.(*v1alpha1.AuditSink), nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/expansion_generated.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/expansion_generated.go new file mode 100644 index 000000000000..533dd0631f96 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// AuditSinkListerExpansion allows custom methods to be added to +// AuditSinkLister. +type AuditSinkListerExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD index 786a63311b52..62d18fefa1e4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD @@ -18,7 +18,6 @@ go_library( "podsecuritypolicy.go", "replicaset.go", "replicaset_expansion.go", - "scale.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/listers/extensions/v1beta1", importpath = "k8s.io/client-go/listers/extensions/v1beta1", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go index b5ee8a49230f..d5c2a7a7d221 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go @@ -29,11 +29,3 @@ type IngressNamespaceListerExpansion interface{} // PodSecurityPolicyListerExpansion allows custom methods to be added to // PodSecurityPolicyLister. type PodSecurityPolicyListerExpansion interface{} - -// ScaleListerExpansion allows custom methods to be added to -// ScaleLister. -type ScaleListerExpansion interface{} - -// ScaleNamespaceListerExpansion allows custom methods to be added to -// ScaleNamespaceLister. -type ScaleNamespaceListerExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go deleted file mode 100644 index 527d4be42468..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ScaleLister helps list Scales. -type ScaleLister interface { - // List lists all Scales in the indexer. - List(selector labels.Selector) (ret []*v1beta1.Scale, err error) - // Scales returns an object that can list and get Scales. - Scales(namespace string) ScaleNamespaceLister - ScaleListerExpansion -} - -// scaleLister implements the ScaleLister interface. -type scaleLister struct { - indexer cache.Indexer -} - -// NewScaleLister returns a new ScaleLister. -func NewScaleLister(indexer cache.Indexer) ScaleLister { - return &scaleLister{indexer: indexer} -} - -// List lists all Scales in the indexer. -func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Scale)) - }) - return ret, err -} - -// Scales returns an object that can list and get Scales. -func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { - return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ScaleNamespaceLister helps list and get Scales. -type ScaleNamespaceLister interface { - // List lists all Scales in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta1.Scale, err error) - // Get retrieves the Scale from the indexer for a given namespace and name. - Get(name string) (*v1beta1.Scale, error) - ScaleNamespaceListerExpansion -} - -// scaleNamespaceLister implements the ScaleNamespaceLister -// interface. -type scaleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Scales in the indexer for a given namespace. -func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Scale)) - }) - return ret, err -} - -// Get retrieves the Scale from the indexer for a given namespace and name. -func (s scaleNamespaceLister) Get(name string) (*v1beta1.Scale, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("scale"), name) - } - return obj.(*v1beta1.Scale), nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD index 5c140cb3df00..640f0435cf59 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go index c0ab9d3ed4cc..d07d11a98dfc 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go @@ -19,11 +19,11 @@ package v1beta1 import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/klog" ) // PodDisruptionBudgetListerExpansion allows custom methods to be added to @@ -54,7 +54,7 @@ func (s *podDisruptionBudgetLister) GetPodPodDisruptionBudgets(pod *v1.Pod) ([]* pdb := list[i] selector, err = metav1.LabelSelectorAsSelector(pdb.Spec.Selector) if err != nil { - glog.Warningf("invalid selector: %v", err) + klog.Warningf("invalid selector: %v", err) // TODO(mml): add an event to the PDB continue } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/BUILD index 319290572b54..495e3300fc1d 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/BUILD @@ -10,6 +10,7 @@ go_library( srcs = [ "expansion_generated.go", "storageclass.go", + "volumeattachment.go", ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/listers/storage/v1", importpath = "k8s.io/client-go/listers/storage/v1", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go index d932470649cc..9d7d8887265e 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go @@ -21,3 +21,7 @@ package v1 // StorageClassListerExpansion allows custom methods to be added to // StorageClassLister. type StorageClassListerExpansion interface{} + +// VolumeAttachmentListerExpansion allows custom methods to be added to +// VolumeAttachmentLister. +type VolumeAttachmentListerExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go new file mode 100644 index 000000000000..14888812ec6a --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VolumeAttachmentLister helps list VolumeAttachments. +type VolumeAttachmentLister interface { + // List lists all VolumeAttachments in the indexer. + List(selector labels.Selector) (ret []*v1.VolumeAttachment, err error) + // Get retrieves the VolumeAttachment from the index for a given name. + Get(name string) (*v1.VolumeAttachment, error) + VolumeAttachmentListerExpansion +} + +// volumeAttachmentLister implements the VolumeAttachmentLister interface. +type volumeAttachmentLister struct { + indexer cache.Indexer +} + +// NewVolumeAttachmentLister returns a new VolumeAttachmentLister. +func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { + return &volumeAttachmentLister{indexer: indexer} +} + +// List lists all VolumeAttachments in the indexer. +func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1.VolumeAttachment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.VolumeAttachment)) + }) + return ret, err +} + +// Get retrieves the VolumeAttachment from the index for a given name. +func (s *volumeAttachmentLister) Get(name string) (*v1.VolumeAttachment, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("volumeattachment"), name) + } + return obj.(*v1.VolumeAttachment), nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS new file mode 100644 index 000000000000..3b7ea1b131f2 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS @@ -0,0 +1,7 @@ +# approval on api packages bubbles to api-approvers +reviewers: +- sig-auth-authenticators-approvers +- sig-auth-authenticators-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go index d67f9b6a578c..486790fa69ff 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=client.authentication.k8s.io + package clientauthentication diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go index c28eb6c9b886..dfd56e32027c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=client.authentication.k8s.io + package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go index 35a120e8b1ba..6eb6a9815746 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=client.authentication.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/pkg/version/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/pkg/version/doc.go index 380395784885..c3ace745136c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/pkg/version/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/pkg/version/doc.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:openapi-gen=true + // Package version supplies version information collected at build time to // kubernetes components. -// +k8s:openapi-gen=true package version diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD index d2459367275d..69415598332b 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD @@ -18,8 +18,8 @@ go_library( "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/transport:go_default_library", "//staging/src/k8s.io/client-go/util/connrotation:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index cae9d0d618e7..4d72526583ef 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -31,7 +31,6 @@ import ( "sync" "time" - "github.com/golang/glog" "golang.org/x/crypto/ssh/terminal" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -44,6 +43,7 @@ import ( "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/transport" "k8s.io/client-go/util/connrotation" + "k8s.io/klog" ) const execInfoEnv = "KUBERNETES_EXEC_INFO" @@ -228,7 +228,7 @@ func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { Code: int32(res.StatusCode), } if err := r.a.maybeRefreshCreds(creds, resp); err != nil { - glog.Errorf("refreshing credentials: %v", err) + klog.Errorf("refreshing credentials: %v", err) } } return res, nil diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/rest/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/rest/BUILD index 0d95e60cad39..70920303e4e4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/rest/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/rest/BUILD @@ -39,10 +39,10 @@ go_test( "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -79,9 +79,9 @@ go_library( "//staging/src/k8s.io/client-go/transport:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/rest/config.go b/cluster-autoscaler/vendor/k8s.io/client-go/rest/config.go index 87e87905523c..438eb3bedac0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/rest/config.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/rest/config.go @@ -29,7 +29,6 @@ import ( "strings" "time" - "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -37,6 +36,7 @@ import ( clientcmdapi "k8s.io/client-go/tools/clientcmd/api" certutil "k8s.io/client-go/util/cert" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" ) const ( @@ -322,7 +322,7 @@ func InClusterConfig() (*Config, error) { return nil, ErrNotInCluster } - ts := newCachedPathTokenSource(tokenFile) + ts := NewCachedFileTokenSource(tokenFile) if _, err := ts.Token(); err != nil { return nil, err @@ -331,7 +331,7 @@ func InClusterConfig() (*Config, error) { tlsClientConfig := TLSClientConfig{} if _, err := certutil.NewPool(rootCAFile); err != nil { - glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) + klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) } else { tlsClientConfig.CAFile = rootCAFile } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/rest/plugin.go b/cluster-autoscaler/vendor/k8s.io/client-go/rest/plugin.go index cf8fbabfdf1c..83ef5ae320fe 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/rest/plugin.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/rest/plugin.go @@ -21,7 +21,7 @@ import ( "net/http" "sync" - "github.com/golang/glog" + "k8s.io/klog" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) @@ -57,7 +57,7 @@ func RegisterAuthProviderPlugin(name string, plugin Factory) error { if _, found := plugins[name]; found { return fmt.Errorf("Auth Provider Plugin %q was registered twice", name) } - glog.V(4).Infof("Registered Auth Provider Plugin %q", name) + klog.V(4).Infof("Registered Auth Provider Plugin %q", name) plugins[name] = plugin return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go b/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go index 69ce1c7595ad..64901fba20d8 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/rest/request.go @@ -32,7 +32,6 @@ import ( "strings" "time" - "github.com/golang/glog" "golang.org/x/net/http2" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -44,6 +43,7 @@ import ( restclientwatch "k8s.io/client-go/rest/watch" "k8s.io/client-go/tools/metrics" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" ) var ( @@ -114,7 +114,7 @@ type Request struct { // NewRequest creates a new request helper object for accessing runtime.Objects on a server. func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request { if backoff == nil { - glog.V(2).Infof("Not implementing request backoff strategy.") + klog.V(2).Infof("Not implementing request backoff strategy.") backoff = &NoBackoff{} } @@ -455,17 +455,9 @@ func (r *Request) URL() *url.URL { // finalURLTemplate is similar to URL(), but will make all specific parameter values equal // - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query -// parameters will be reset. This creates a copy of the request so as not to change the -// underlying object. This means some useful request info (like the types of field -// selectors in use) will be lost. -// TODO: preserve field selector keys +// parameters will be reset. This creates a copy of the url so as not to change the +// underlying object. func (r Request) finalURLTemplate() url.URL { - if len(r.resourceName) != 0 { - r.resourceName = "{name}" - } - if r.namespaceSet && len(r.namespace) != 0 { - r.namespace = "{namespace}" - } newParams := url.Values{} v := []string{"{value}"} for k := range r.params { @@ -473,6 +465,59 @@ func (r Request) finalURLTemplate() url.URL { } r.params = newParams url := r.URL() + segments := strings.Split(r.URL().Path, "/") + groupIndex := 0 + index := 0 + if r.URL() != nil && r.baseURL != nil && strings.Contains(r.URL().Path, r.baseURL.Path) { + groupIndex += len(strings.Split(r.baseURL.Path, "/")) + } + if groupIndex >= len(segments) { + return *url + } + + const CoreGroupPrefix = "api" + const NamedGroupPrefix = "apis" + isCoreGroup := segments[groupIndex] == CoreGroupPrefix + isNamedGroup := segments[groupIndex] == NamedGroupPrefix + if isCoreGroup { + // checking the case of core group with /api/v1/... format + index = groupIndex + 2 + } else if isNamedGroup { + // checking the case of named group with /apis/apps/v1/... format + index = groupIndex + 3 + } else { + // this should not happen that the only two possibilities are /api... and /apis..., just want to put an + // outlet here in case more API groups are added in future if ever possible: + // https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups + // if a wrong API groups name is encountered, return the {prefix} for url.Path + url.Path = "/{prefix}" + url.RawQuery = "" + return *url + } + //switch segLength := len(segments) - index; segLength { + switch { + // case len(segments) - index == 1: + // resource (with no name) do nothing + case len(segments)-index == 2: + // /$RESOURCE/$NAME: replace $NAME with {name} + segments[index+1] = "{name}" + case len(segments)-index == 3: + if segments[index+2] == "finalize" || segments[index+2] == "status" { + // /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name} + segments[index+1] = "{name}" + } else { + // /namespace/$NAMESPACE/$RESOURCE: replace $NAMESPACE with {namespace} + segments[index+1] = "{namespace}" + } + case len(segments)-index >= 4: + segments[index+1] = "{namespace}" + // /namespace/$NAMESPACE/$RESOURCE/$NAME: replace $NAMESPACE with {namespace}, $NAME with {name} + if segments[index+3] != "finalize" && segments[index+3] != "status" { + // /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name} + segments[index+3] = "{name}" + } + } + url.Path = path.Join(segments...) return *url } @@ -482,7 +527,7 @@ func (r *Request) tryThrottle() { r.throttle.Accept() } if latency := time.Since(now); latency > longThrottleLatency { - glog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) + klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) } } @@ -638,7 +683,7 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { }() if r.err != nil { - glog.V(4).Infof("Error in request: %v", r.err) + klog.V(4).Infof("Error in request: %v", r.err) return r.err } @@ -725,13 +770,13 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { if seeker, ok := r.body.(io.Seeker); ok && r.body != nil { _, err := seeker.Seek(0, 0) if err != nil { - glog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) + klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) fn(req, resp) return true } } - glog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url) + klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url) r.backoffMgr.Sleep(time.Duration(seconds) * time.Second) return false } @@ -799,13 +844,13 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu // 2. Apiserver sends back the headers and then part of the body // 3. Apiserver closes connection. // 4. client-go should catch this and return an error. - glog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) + klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err) return Result{ err: streamErr, } default: - glog.Errorf("Unexpected error when reading response body: %#v", err) + klog.Errorf("Unexpected error when reading response body: %#v", err) unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err) return Result{ err: unexpectedErr, @@ -869,11 +914,11 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu func truncateBody(body string) string { max := 0 switch { - case bool(glog.V(10)): + case bool(klog.V(10)): return body - case bool(glog.V(9)): + case bool(klog.V(9)): max = 10240 - case bool(glog.V(8)): + case bool(klog.V(8)): max = 1024 } @@ -888,13 +933,13 @@ func truncateBody(body string) string { // allocating a new string for the body output unless necessary. Uses a simple heuristic to determine // whether the body is printable. func glogBody(prefix string, body []byte) { - if glog.V(8) { + if klog.V(8) { if bytes.IndexFunc(body, func(r rune) bool { return r < 0x0a }) != -1 { - glog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) + klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) } else { - glog.Infof("%s: %s", prefix, truncateBody(string(body))) + klog.Infof("%s: %s", prefix, truncateBody(string(body))) } } } @@ -1096,7 +1141,7 @@ func (r Result) Error() error { // to be backwards compatible with old servers that do not return a version, default to "v1" out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil) if err != nil { - glog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) + klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) return r.err } switch t := out.(type) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/rest/token_source.go b/cluster-autoscaler/vendor/k8s.io/client-go/rest/token_source.go index 296b2a0481df..c251b5eb0bb6 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/rest/token_source.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/rest/token_source.go @@ -24,8 +24,8 @@ import ( "sync" "time" - "github.com/golang/glog" "golang.org/x/oauth2" + "k8s.io/klog" ) // TokenSourceWrapTransport returns a WrapTransport that injects bearer tokens @@ -42,7 +42,9 @@ func TokenSourceWrapTransport(ts oauth2.TokenSource) func(http.RoundTripper) htt } } -func newCachedPathTokenSource(path string) oauth2.TokenSource { +// NewCachedFileTokenSource returns a oauth2.TokenSource reads a token from a +// file at a specified path and periodically reloads it. +func NewCachedFileTokenSource(path string) oauth2.TokenSource { return &cachingTokenSource{ now: time.Now, leeway: 1 * time.Minute, @@ -129,7 +131,7 @@ func (ts *cachingTokenSource) Token() (*oauth2.Token, error) { if ts.tok == nil { return nil, err } - glog.Errorf("Unable to rotate token: %v", err) + klog.Errorf("Unable to rotate token: %v", err) return ts.tok, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/rest/urlbackoff.go b/cluster-autoscaler/vendor/k8s.io/client-go/rest/urlbackoff.go index eff848abc12b..d00e42f86671 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/rest/urlbackoff.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/rest/urlbackoff.go @@ -20,9 +20,9 @@ import ( "net/url" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" ) // Set of resp. Codes that we backoff for. @@ -64,7 +64,7 @@ func (n *NoBackoff) Sleep(d time.Duration) { // Disable makes the backoff trivial, i.e., sets it to zero. This might be used // by tests which want to run 1000s of mock requests without slowing down. func (b *URLBackoff) Disable() { - glog.V(4).Infof("Disabling backoff strategy") + klog.V(4).Infof("Disabling backoff strategy") b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second) } @@ -76,7 +76,7 @@ func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string { // in the future. host, err := url.Parse(rawurl.String()) if err != nil { - glog.V(4).Infof("Error extracting url: %v", rawurl) + klog.V(4).Infof("Error extracting url: %v", rawurl) panic("bad url!") } return host.Host @@ -89,7 +89,7 @@ func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode i b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now()) return } else if responseCode >= 300 || err != nil { - glog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) + klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) } //If we got this far, there is no backoff required for this URL anymore. diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/BUILD index d13169643cec..ed8006f03180 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/BUILD @@ -15,7 +15,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/discovery:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/discovery.go b/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/discovery.go index aa158626af4f..84491f4c5d16 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/discovery.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/discovery.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/discovery" - "github.com/golang/glog" + "k8s.io/klog" ) // APIGroupResources is an API group with a mapping of versions to @@ -212,7 +212,7 @@ func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { // Reset resets the internally cached Discovery information and will // cause the next mapping request to re-discover. func (d *DeferredDiscoveryRESTMapper) Reset() { - glog.V(5).Info("Invalidating discovery information") + klog.V(5).Info("Invalidating discovery information") d.initMu.Lock() defer d.initMu.Unlock() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/shortcut.go b/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/shortcut.go index d9f4be0b6b11..6f3c9d930691 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/shortcut.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -19,7 +19,7 @@ package restmapper import ( "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -86,12 +86,12 @@ func (e shortcutExpander) getShortcutMappings() ([]*metav1.APIResourceList, []re // This can return an error *and* the results it was able to find. We don't need to fail on the error. apiResList, err := e.discoveryClient.ServerResources() if err != nil { - glog.V(1).Infof("Error loading discovery information: %v", err) + klog.V(1).Infof("Error loading discovery information: %v", err) } for _, apiResources := range apiResList { gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) if err != nil { - glog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) + klog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) continue } for _, apiRes := range apiResources.APIResources { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/scale/interfaces.go b/cluster-autoscaler/vendor/k8s.io/client-go/scale/interfaces.go index 4668c7417d18..13f2cfb8e786 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/scale/interfaces.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/scale/interfaces.go @@ -34,6 +34,6 @@ type ScaleInterface interface { // Get fetches the scale of the given scalable resource. Get(resource schema.GroupResource, name string) (*autoscalingapi.Scale, error) - // Update updates the scale of the the given scalable resource. + // Update updates the scale of the given scalable resource. Update(resource schema.GroupResource, scale *autoscalingapi.Scale) (*autoscalingapi.Scale, error) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/testing/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/testing/BUILD index 6222568fc7c0..d0849fcb1f4c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/testing/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/testing/BUILD @@ -23,10 +23,12 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", + "//vendor/github.com/evanphx/json-patch:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/testing/actions.go b/cluster-autoscaler/vendor/k8s.io/client-go/testing/actions.go index b99f231c8d7b..e6db578ed8fe 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/testing/actions.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/testing/actions.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" ) func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl { @@ -152,45 +153,49 @@ func NewUpdateAction(resource schema.GroupVersionResource, namespace string, obj return action } -func NewRootPatchAction(resource schema.GroupVersionResource, name string, patch []byte) PatchActionImpl { +func NewRootPatchAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Name = name + action.PatchType = pt action.Patch = patch return action } -func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, patch []byte) PatchActionImpl { +func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Namespace = namespace action.Name = name + action.PatchType = pt action.Patch = patch return action } -func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, patch []byte, subresources ...string) PatchActionImpl { +func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Subresource = path.Join(subresources...) action.Name = name + action.PatchType = pt action.Patch = patch return action } -func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, patch []byte, subresources ...string) PatchActionImpl { +func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Subresource = path.Join(subresources...) action.Namespace = namespace action.Name = name + action.PatchType = pt action.Patch = patch return action @@ -396,6 +401,7 @@ type DeleteCollectionAction interface { type PatchAction interface { Action GetName() string + GetPatchType() types.PatchType GetPatch() []byte } @@ -537,8 +543,9 @@ func (a UpdateActionImpl) DeepCopy() Action { type PatchActionImpl struct { ActionImpl - Name string - Patch []byte + Name string + PatchType types.PatchType + Patch []byte } func (a PatchActionImpl) GetName() string { @@ -549,12 +556,17 @@ func (a PatchActionImpl) GetPatch() []byte { return a.Patch } +func (a PatchActionImpl) GetPatchType() types.PatchType { + return a.PatchType +} + func (a PatchActionImpl) DeepCopy() Action { patch := make([]byte, len(a.Patch)) copy(patch, a.Patch) return PatchActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), Name: a.Name, + PatchType: a.PatchType, Patch: patch, } } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/testing/fixture.go b/cluster-autoscaler/vendor/k8s.io/client-go/testing/fixture.go index 00c4c49fce45..90f16f560806 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/testing/fixture.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/testing/fixture.go @@ -20,11 +20,13 @@ import ( "fmt" "sync" + "github.com/evanphx/json-patch" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/watch" @@ -137,15 +139,30 @@ func ObjectReaction(tracker ObjectTracker) ReactionFunc { if err != nil { return true, nil, err } - // Only supports strategic merge patch - // TODO: Add support for other Patch types - mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) - if err != nil { - return true, nil, err - } - - if err = json.Unmarshal(mergedByte, obj); err != nil { - return true, nil, err + // Only supports strategic merge patch and JSONPatch as coded. + switch action.GetPatchType() { + case types.JSONPatchType: + patch, err := jsonpatch.DecodePatch(action.GetPatch()) + if err != nil { + return true, nil, err + } + modified, err := patch.Apply(old) + if err != nil { + return true, nil, err + } + if err = json.Unmarshal(modified, obj); err != nil { + return true, nil, err + } + case types.StrategicMergePatchType: + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return true, nil, err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return true, nil, err + } + default: + return true, nil, fmt.Errorf("PatchType is not supported") } if err = tracker.Update(gvr, obj, ns); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/auth/OWNERS b/cluster-autoscaler/vendor/k8s.io/client-go/tools/auth/OWNERS new file mode 100644 index 000000000000..c607d2aa8c5f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/auth/OWNERS @@ -0,0 +1,7 @@ +approvers: +- sig-auth-authenticators-approvers +reviewers: +- sig-auth-authenticators-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/BUILD index 0240fa6f7002..8b8b91258559 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/BUILD @@ -83,7 +83,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/pager:go_default_library", "//staging/src/k8s.io/client-go/util/buffer:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 45c3b500d427..f818a293a694 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" - "github.com/golang/glog" + "k8s.io/klog" ) // NewDeltaFIFO returns a Store which can be used process changes to items. @@ -320,17 +320,15 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err newDeltas := append(f.items[id], Delta{actionType, obj}) newDeltas = dedupDeltas(newDeltas) - _, exists := f.items[id] if len(newDeltas) > 0 { - if !exists { + if _, exists := f.items[id]; !exists { f.queue = append(f.queue, id) } f.items[id] = newDeltas f.cond.Broadcast() - } else if exists { - // We need to remove this from our map (extra items - // in the queue are ignored if they are not in the - // map). + } else { + // We need to remove this from our map (extra items in the queue are + // ignored if they are not in the map). delete(f.items, id) } return nil @@ -348,9 +346,6 @@ func (f *DeltaFIFO) List() []interface{} { func (f *DeltaFIFO) listLocked() []interface{} { list := make([]interface{}, 0, len(f.items)) for _, item := range f.items { - // Copy item's slice so operations on this slice - // won't interfere with the object we return. - item = copyDeltas(item) list = append(list, item.Newest().Object) } return list @@ -398,10 +393,7 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err func (f *DeltaFIFO) IsClosed() bool { f.closedLock.Lock() defer f.closedLock.Unlock() - if f.closed { - return true - } - return false + return f.closed } // Pop blocks until an item is added to the queue, and then returns it. If @@ -432,10 +424,10 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { } id := f.queue[0] f.queue = f.queue[1:] - item, ok := f.items[id] if f.initialPopulationCount > 0 { f.initialPopulationCount-- } + item, ok := f.items[id] if !ok { // Item may have been deleted subsequently. continue @@ -506,10 +498,10 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { deletedObj, exists, err := f.knownObjects.GetByKey(k) if err != nil { deletedObj = nil - glog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) + klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) } else if !exists { deletedObj = nil - glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) } queuedDeletions++ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { @@ -553,10 +545,10 @@ func (f *DeltaFIFO) syncKey(key string) error { func (f *DeltaFIFO) syncKeyLocked(key string) error { obj, exists, err := f.knownObjects.GetByKey(key) if err != nil { - glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) + klog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) return nil } else if !exists { - glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) + klog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/expiration_cache.go index fa88fc407e39..b38fe70b9566 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/expiration_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -20,8 +20,8 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/klog" ) // ExpirationCache implements the store interface @@ -95,7 +95,7 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { return nil, false } if c.expirationPolicy.IsExpired(timestampedItem) { - glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj) + klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj) c.cacheStorage.Delete(key) return nil, false } @@ -179,7 +179,7 @@ func (c *ExpirationCache) Delete(obj interface{}) error { func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error { c.expirationLock.Lock() defer c.expirationLock.Unlock() - items := map[string]interface{}{} + items := make(map[string]interface{}, len(list)) ts := c.clock.Now() for _, item := range list { key, err := c.keyFunc(item) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/fifo.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/fifo.go index e05c01ee2960..508c5530c36f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -297,7 +297,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { // after calling this function. f's queue is reset, too; upon return, it // will contain the items in the map, in no particular order. func (f *FIFO) Replace(list []interface{}, resourceVersion string) error { - items := map[string]interface{}{} + items := make(map[string]interface{}, len(list)) for _, item := range list { key, err := f.keyFunc(item) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/heap.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/heap.go index 78e492455ea6..7357ff97a1f4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/heap.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/heap.go @@ -204,7 +204,7 @@ func (h *Heap) AddIfNotPresent(obj interface{}) error { return nil } -// addIfNotPresentLocked assumes the lock is already held and adds the the provided +// addIfNotPresentLocked assumes the lock is already held and adds the provided // item to the queue if it does not already exist. func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) { if _, exists := h.data.items[key]; exists { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/listers.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/listers.go index 27d51a6b3879..ce377329c7f1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/listers.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/listers.go @@ -17,7 +17,7 @@ limitations under the License. package cache import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -60,7 +60,7 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace}) if err != nil { // Ignore error; do slow search without index. - glog.Warningf("can not retrieve list of objects using index : %v", err) + klog.Warningf("can not retrieve list of objects using index : %v", err) for _, m := range indexer.List() { metadata, err := meta.Accessor(m) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/mutation_cache.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/mutation_cache.go index cbb6434ebde2..4c6686e918c1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/mutation_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/mutation_cache.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -156,7 +156,7 @@ func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, er } elements, err := fn(updated) if err != nil { - glog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) + klog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) continue } for _, inIndex := range elements { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/mutation_detector.go index e2aa44848408..adb5b8be8af4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/mutation_detector.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -24,7 +24,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/diff" @@ -45,7 +45,7 @@ func NewCacheMutationDetector(name string) CacheMutationDetector { if !mutationDetectionEnabled { return dummyMutationDetector{} } - glog.Warningln("Mutation detector is enabled, this will result in memory leakage.") + klog.Warningln("Mutation detector is enabled, this will result in memory leakage.") return &defaultCacheMutationDetector{name: name, period: 1 * time.Second} } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go index 9ee7efcbbd82..12e2a3342212 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -31,7 +31,6 @@ import ( "syscall" "time" - "github.com/golang/glog" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -41,6 +40,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog" ) // Reflector watches a specified resource and causes all changes to be reflected in the given store. @@ -128,7 +128,7 @@ var internalPackages = []string{"client-go/tools/cache/"} // Run starts a watch and handles watch events. Will restart the watch if it is closed. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { - glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) + klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) wait.Until(func() { if err := r.ListAndWatch(stopCh); err != nil { utilruntime.HandleError(err) @@ -166,7 +166,7 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { // and then use the resource version to watch. // It returns error if ListAndWatch didn't even try to initialize watch. func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { - glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name) + klog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name) var resourceVersion string // Explicitly set "0" as resource version - it's fine for the List() @@ -212,7 +212,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { return } if r.ShouldResync == nil || r.ShouldResync() { - glog.V(4).Infof("%s: forcing resync", r.name) + klog.V(4).Infof("%s: forcing resync", r.name) if err := r.store.Resync(); err != nil { resyncerrc <- err return @@ -246,7 +246,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { case io.EOF: // watch closed normally case io.ErrUnexpectedEOF: - glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err) + klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err) default: utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err)) } @@ -267,7 +267,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil { if err != errorStopRequested { - glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) } return nil } @@ -354,7 +354,7 @@ loop: r.metrics.numberOfShortWatches.Inc() return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name) } - glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount) + klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 5f8c507f9e9d..e91fc9e95583 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -28,7 +28,7 @@ import ( "k8s.io/client-go/util/buffer" "k8s.io/client-go/util/retry" - "github.com/golang/glog" + "k8s.io/klog" ) // SharedInformer has a shared data cache and is capable of distributing notifications for changes @@ -86,7 +86,7 @@ func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEve resyncCheckPeriod: defaultEventHandlerResyncPeriod, defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod, cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)), - clock: realClock, + clock: realClock, } return sharedIndexInformer } @@ -116,11 +116,11 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool }, stopCh) if err != nil { - glog.V(2).Infof("stop requested") + klog.V(2).Infof("stop requested") return false } - glog.V(4).Infof("caches populated") + klog.V(4).Infof("caches populated") return true } @@ -279,11 +279,11 @@ func determineResyncPeriod(desired, check time.Duration) time.Duration { return desired } if check == 0 { - glog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) + klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) return 0 } if desired < check { - glog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) + klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) return check } return desired @@ -296,19 +296,19 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv defer s.startedLock.Unlock() if s.stopped { - glog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler) + klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler) return } if resyncPeriod > 0 { if resyncPeriod < minimumResyncPeriod { - glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) + klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) resyncPeriod = minimumResyncPeriod } if resyncPeriod < s.resyncCheckPeriod { if s.started { - glog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) + klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) resyncPeriod = s.resyncCheckPeriod } else { // if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/store.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/store.go index 4958987f0e73..fc844efe64d6 100755 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/store.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/cache/store.go @@ -210,7 +210,7 @@ func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error) // 'c' takes ownership of the list, you should not reference the list again // after calling this function. func (c *cache) Replace(list []interface{}, resourceVersion string) error { - items := map[string]interface{}{} + items := make(map[string]interface{}, len(list)) for _, item := range list { key, err := c.keyFunc(item) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/BUILD index 59a860ffb59c..8746085e1a11 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/BUILD @@ -22,9 +22,9 @@ go_test( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api/latest:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/imdario/mergo:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) @@ -54,10 +54,10 @@ go_library( "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api/latest:go_default_library", "//staging/src/k8s.io/client-go/util/homedir:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/imdario/mergo:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/BUILD index 222cec2a545b..ce7dac4fb52a 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/BUILD @@ -13,7 +13,7 @@ go_test( "types_test.go", ], embed = [":go_default_library"], - deps = ["//vendor/github.com/ghodss/yaml:go_default_library"], + deps = ["//vendor/sigs.k8s.io/yaml:go_default_library"], ) go_library( diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go index 0a081871ac82..5871575a6693 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go @@ -15,4 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package + package api diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go index 9750cf73acca..cbf29ccf24da 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go @@ -15,4 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package + package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index b8927f71087e..dea229c91825 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -24,8 +24,8 @@ import ( "os" "strings" - "github.com/golang/glog" "github.com/imdario/mergo" + "k8s.io/klog" restclient "k8s.io/client-go/rest" clientauth "k8s.io/client-go/tools/auth" @@ -229,11 +229,11 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI if len(configAuthInfo.Token) > 0 { mergedConfig.BearerToken = configAuthInfo.Token } else if len(configAuthInfo.TokenFile) > 0 { - tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) - if err != nil { + ts := restclient.NewCachedFileTokenSource(configAuthInfo.TokenFile) + if _, err := ts.Token(); err != nil { return nil, err } - mergedConfig.BearerToken = string(tokenBytes) + mergedConfig.WrapTransport = restclient.TokenSourceWrapTransport(ts) } if len(configAuthInfo.Impersonate) > 0 { mergedConfig.Impersonate = restclient.ImpersonationConfig{ @@ -545,12 +545,12 @@ func (config *inClusterClientConfig) Possible() bool { // to the default config. func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) { if kubeconfigPath == "" && masterUrl == "" { - glog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") + klog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") kubeconfig, err := restclient.InClusterConfig() if err == nil { return kubeconfig, nil } - glog.Warning("error creating inClusterConfig, falling back to default config: ", err) + klog.Warning("error creating inClusterConfig, falling back to default config: ", err) } return NewNonInteractiveDeferredLoadingClientConfig( &ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/config.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/config.go index 9495849b0928..b8cc39688219 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/config.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/config.go @@ -24,7 +24,7 @@ import ( "reflect" "sort" - "github.com/golang/glog" + "k8s.io/klog" restclient "k8s.io/client-go/rest" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" @@ -483,7 +483,7 @@ func getConfigFromFile(filename string) (*clientcmdapi.Config, error) { func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config { config, err := getConfigFromFile(filename) if err != nil { - glog.FatalDepth(1, err) + klog.FatalDepth(1, err) } return config diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/loader.go index 6038c8d457a1..7e928a918563 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/loader.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -27,8 +27,8 @@ import ( goruntime "runtime" "strings" - "github.com/golang/glog" "github.com/imdario/mergo" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -356,7 +356,7 @@ func LoadFromFile(filename string) (*clientcmdapi.Config, error) { if err != nil { return nil, err } - glog.V(6).Infoln("Config loaded from file", filename) + klog.V(6).Infoln("Config loaded from file", filename) // set LocationOfOrigin on every Cluster, User, and Context for key, obj := range config.AuthInfos { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go index 05038133b6b8..76380db82ab9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go @@ -20,7 +20,7 @@ import ( "io" "sync" - "github.com/golang/glog" + "k8s.io/klog" restclient "k8s.io/client-go/rest" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" @@ -119,7 +119,7 @@ func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, e // check for in-cluster configuration and use it if config.icc.Possible() { - glog.V(4).Infof("Using in-cluster configuration") + klog.V(4).Infof("Using in-cluster configuration") return config.icc.ClientConfig() } @@ -156,7 +156,7 @@ func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) { } } - glog.V(4).Infof("Using in-cluster namespace") + klog.V(4).Infof("Using in-cluster namespace") // allow the namespace from the service account token directory to be used. return config.icc.Namespace() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/BUILD index 8d53bfe508af..bed0f89fee70 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/BUILD @@ -8,28 +8,36 @@ load( go_library( name = "go_default_library", - srcs = ["leaderelection.go"], + srcs = [ + "healthzadaptor.go", + "leaderelection.go", + ], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/tools/leaderelection", importpath = "k8s.io/client-go/tools/leaderelection", deps = [ "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) go_test( name = "go_default_test", - srcs = ["leaderelection_test.go"], + srcs = [ + "healthzadaptor_test.go", + "leaderelection_test.go", + ], embed = [":go_default_library"], deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go new file mode 100644 index 000000000000..b9353729190d --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go @@ -0,0 +1,69 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leaderelection + +import ( + "net/http" + "sync" + "time" +) + +// HealthzAdaptor associates the /healthz endpoint with the LeaderElection object. +// It helps deal with the /healthz endpoint being set up prior to the LeaderElection. +// This contains the code needed to act as an adaptor between the leader +// election code the health check code. It allows us to provide health +// status about the leader election. Most specifically about if the leader +// has failed to renew without exiting the process. In that case we should +// report not healthy and rely on the kubelet to take down the process. +type HealthzAdaptor struct { + pointerLock sync.Mutex + le *LeaderElector + timeout time.Duration +} + +// Name returns the name of the health check we are implementing. +func (l *HealthzAdaptor) Name() string { + return "leaderElection" +} + +// Check is called by the healthz endpoint handler. +// It fails (returns an error) if we own the lease but had not been able to renew it. +func (l *HealthzAdaptor) Check(req *http.Request) error { + l.pointerLock.Lock() + defer l.pointerLock.Unlock() + if l.le == nil { + return nil + } + return l.le.Check(l.timeout) +} + +// SetLeaderElection ties a leader election object to a HealthzAdaptor +func (l *HealthzAdaptor) SetLeaderElection(le *LeaderElector) { + l.pointerLock.Lock() + defer l.pointerLock.Unlock() + l.le = le +} + +// NewLeaderHealthzAdaptor creates a basic healthz adaptor to monitor a leader election. +// timeout determines the time beyond the lease expiry to be allowed for timeout. +// checks within the timeout period after the lease expires will still return healthy. +func NewLeaderHealthzAdaptor(timeout time.Duration) *HealthzAdaptor { + result := &HealthzAdaptor{ + timeout: timeout, + } + return result +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index 9a357b2ac1f8..2096a5996bfe 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -56,11 +56,12 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" rl "k8s.io/client-go/tools/leaderelection/resourcelock" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -90,6 +91,7 @@ func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) { } return &LeaderElector{ config: lec, + clock: clock.RealClock{}, }, nil } @@ -111,6 +113,13 @@ type LeaderElectionConfig struct { // Callbacks are callbacks that are triggered during certain lifecycle // events of the LeaderElector Callbacks LeaderCallbacks + + // WatchDog is the associated health checker + // WatchDog may be null if its not needed/configured. + WatchDog *HealthzAdaptor + + // Name is the name of the resource lock for debugging + Name string } // LeaderCallbacks are callbacks that are triggered during certain @@ -139,6 +148,12 @@ type LeaderElector struct { // value observedRecord.HolderIdentity if the transition has // not yet been reported. reportedLeader string + + // clock is wrapper around time to allow for less flaky testing + clock clock.Clock + + // name is the name of the resource lock for debugging + name string } // Run starts the leader election loop @@ -163,6 +178,9 @@ func RunOrDie(ctx context.Context, lec LeaderElectionConfig) { if err != nil { panic(err) } + if lec.WatchDog != nil { + lec.WatchDog.SetLeaderElection(le) + } le.Run(ctx) } @@ -184,16 +202,16 @@ func (le *LeaderElector) acquire(ctx context.Context) bool { defer cancel() succeeded := false desc := le.config.Lock.Describe() - glog.Infof("attempting to acquire leader lease %v...", desc) + klog.Infof("attempting to acquire leader lease %v...", desc) wait.JitterUntil(func() { succeeded = le.tryAcquireOrRenew() le.maybeReportTransition() if !succeeded { - glog.V(4).Infof("failed to acquire lease %v", desc) + klog.V(4).Infof("failed to acquire lease %v", desc) return } le.config.Lock.RecordEvent("became leader") - glog.Infof("successfully acquired lease %v", desc) + klog.Infof("successfully acquired lease %v", desc) cancel() }, le.config.RetryPeriod, JitterFactor, true, ctx.Done()) return succeeded @@ -224,11 +242,11 @@ func (le *LeaderElector) renew(ctx context.Context) { le.maybeReportTransition() desc := le.config.Lock.Describe() if err == nil { - glog.V(4).Infof("successfully renewed lease %v", desc) + klog.V(5).Infof("successfully renewed lease %v", desc) return } le.config.Lock.RecordEvent("stopped leading") - glog.Infof("failed to renew lease %v: %v", desc, err) + klog.Infof("failed to renew lease %v: %v", desc, err) cancel() }, le.config.RetryPeriod, ctx.Done()) } @@ -249,26 +267,26 @@ func (le *LeaderElector) tryAcquireOrRenew() bool { oldLeaderElectionRecord, err := le.config.Lock.Get() if err != nil { if !errors.IsNotFound(err) { - glog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err) + klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err) return false } if err = le.config.Lock.Create(leaderElectionRecord); err != nil { - glog.Errorf("error initially creating leader election record: %v", err) + klog.Errorf("error initially creating leader election record: %v", err) return false } le.observedRecord = leaderElectionRecord - le.observedTime = time.Now() + le.observedTime = le.clock.Now() return true } // 2. Record obtained, check the Identity & Time if !reflect.DeepEqual(le.observedRecord, *oldLeaderElectionRecord) { le.observedRecord = *oldLeaderElectionRecord - le.observedTime = time.Now() + le.observedTime = le.clock.Now() } if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) && !le.IsLeader() { - glog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) + klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) return false } @@ -283,11 +301,11 @@ func (le *LeaderElector) tryAcquireOrRenew() bool { // update the lock itself if err = le.config.Lock.Update(leaderElectionRecord); err != nil { - glog.Errorf("Failed to update lock: %v", err) + klog.Errorf("Failed to update lock: %v", err) return false } le.observedRecord = leaderElectionRecord - le.observedTime = time.Now() + le.observedTime = le.clock.Now() return true } @@ -300,3 +318,19 @@ func (le *LeaderElector) maybeReportTransition() { go le.config.Callbacks.OnNewLeader(le.reportedLeader) } } + +// Check will determine if the current lease is expired by more than timeout. +func (le *LeaderElector) Check(maxTolerableExpiredLease time.Duration) error { + if !le.IsLeader() { + // Currently not concerned with the case that we are hot standby + return nil + } + // If we are more than timeout seconds after the lease duration that is past the timeout + // on the lease renew. Time to start reporting ourselves as unhealthy. We should have + // died but conditions like deadlock can prevent this. (See #70819) + if le.clock.Since(le.observedTime) > le.config.LeaseDuration+maxTolerableExpiredLease { + return fmt.Errorf("failed election to renew leadership on lease %s", le.config.Name) + } + + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go index 4ff59560379e..c12daad022f5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go @@ -80,7 +80,7 @@ func (cml *ConfigMapLock) Create(ler LeaderElectionRecord) error { // Update will update an existing annotation on a given resource. func (cml *ConfigMapLock) Update(ler LeaderElectionRecord) error { if cml.cm == nil { - return errors.New("endpoint not initialized, call get or create first") + return errors.New("configmap not initialized, call get or create first") } recordBytes, err := json.Marshal(ler) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/BUILD index fc1eaf2e6715..2a8546138e30 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/BUILD @@ -50,8 +50,8 @@ go_library( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/reference:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/event.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/event.go index 168dfa80c56a..2ee69589c6d2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/event.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/record/event.go @@ -33,7 +33,7 @@ import ( "net/http" - "github.com/golang/glog" + "k8s.io/klog" ) const maxTriesPerEvent = 12 @@ -144,7 +144,7 @@ func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrela } tries++ if tries >= maxTriesPerEvent { - glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) break } // Randomize the first sleep so that various clients won't all be @@ -194,13 +194,13 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv switch err.(type) { case *restclient.RequestConstructionError: // We will construct the request the same next time, so don't keep trying. - glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) return true case *errors.StatusError: if errors.IsAlreadyExists(err) { - glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) } else { - glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) } return true case *errors.UnexpectedObjectError: @@ -209,7 +209,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv default: // This case includes actual http transport errors. Go ahead and retry. } - glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) + klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) return false } @@ -256,12 +256,12 @@ type recorderImpl struct { func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) { ref, err := ref.GetReference(recorder.scheme, object) if err != nil { - glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) + klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) return } if !validateEventType(eventtype) { - glog.Errorf("Unsupported event type: '%v'", eventtype) + klog.Errorf("Unsupported event type: '%v'", eventtype) return } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/BUILD index 9e0aac987705..043217a3ce37 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/BUILD @@ -25,6 +25,7 @@ go_library( srcs = [ "doc.go", "errorstream.go", + "reader.go", "remotecommand.go", "resize.go", "v1.go", @@ -43,7 +44,7 @@ go_library( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/transport/spdy:go_default_library", "//staging/src/k8s.io/client-go/util/exec:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/reader.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/reader.go new file mode 100644 index 000000000000..d1f1be34c9e9 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/reader.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "io" +) + +// readerWrapper delegates to an io.Reader so that only the io.Reader interface is implemented, +// to keep io.Copy from doing things we don't want when copying from the reader to the data stream. +// +// If the Stdin io.Reader provided to remotecommand implements a WriteTo function (like bytes.Buffer does[1]), +// io.Copy calls that method[2] to attempt to write the entire buffer to the stream in one call. +// That results in an oversized call to spdystream.Stream#Write [3], +// which results in a single oversized data frame[4] that is too large. +// +// [1] https://golang.org/pkg/bytes/#Buffer.WriteTo +// [2] https://golang.org/pkg/io/#Copy +// [3] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/stream.go#L66-L73 +// [4] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/spdy/write.go#L302-L304 +type readerWrapper struct { + reader io.Reader +} + +func (r readerWrapper) Read(p []byte) (int, error) { + return r.reader.Read(p) +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go index d2b29861e6ee..892d8d105dc3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go @@ -22,7 +22,7 @@ import ( "net/http" "net/url" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/apimachinery/pkg/util/remotecommand" @@ -132,7 +132,7 @@ func (e *streamExecutor) Stream(options StreamOptions) error { case remotecommand.StreamProtocolV2Name: streamer = newStreamProtocolV2(options) case "": - glog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) + klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) fallthrough case remotecommand.StreamProtocolV1Name: streamer = newStreamProtocolV1(options) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/v1.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/v1.go index 92dad727f301..4120f1f5f3dd 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/v1.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/v1.go @@ -22,9 +22,9 @@ import ( "io/ioutil" "net/http" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/klog" ) // streamProtocolV1 implements the first version of the streaming exec & attach @@ -53,10 +53,10 @@ func (p *streamProtocolV1) stream(conn streamCreator) error { errorChan := make(chan error) cp := func(s string, dst io.Writer, src io.Reader) { - glog.V(6).Infof("Copying %s", s) - defer glog.V(6).Infof("Done copying %s", s) + klog.V(6).Infof("Copying %s", s) + defer klog.V(6).Infof("Done copying %s", s) if _, err := io.Copy(dst, src); err != nil && err != io.EOF { - glog.Errorf("Error copying %s: %v", s, err) + klog.Errorf("Error copying %s: %v", s, err) } if s == v1.StreamTypeStdout || s == v1.StreamTypeStderr { doneChan <- struct{}{} @@ -127,7 +127,7 @@ func (p *streamProtocolV1) stream(conn streamCreator) error { // because stdin is not closed until the process exits. If we try to call // stdin.Close(), it returns no error but doesn't unblock the copy. It will // exit when the process exits, instead. - go cp(v1.StreamTypeStdin, p.remoteStdin, p.Stdin) + go cp(v1.StreamTypeStdin, p.remoteStdin, readerWrapper{p.Stdin}) } waitCount := 0 diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/v2.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/v2.go index b74ae8de2208..4b0001502a17 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/v2.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/remotecommand/v2.go @@ -101,7 +101,7 @@ func (p *streamProtocolV2) copyStdin() { // the executed command will remain running. defer once.Do(func() { p.remoteStdin.Close() }) - if _, err := io.Copy(p.remoteStdin, p.Stdin); err != nil { + if _, err := io.Copy(p.remoteStdin, readerWrapper{p.Stdin}); err != nil { runtime.HandleError(err) } }() diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/BUILD index d1994ebbbef7..9f7a97cd4aa1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/BUILD @@ -16,7 +16,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/until.go b/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/until.go index 933578843976..aa4bbc21169f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/until.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/tools/watch/until.go @@ -22,13 +22,13 @@ import ( "fmt" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/tools/cache" + "k8s.io/klog" ) // PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet, @@ -135,7 +135,7 @@ func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime. func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { if timeout < 0 { // This should be handled in validation - glog.Errorf("Timeout for context shall not be negative!") + klog.Errorf("Timeout for context shall not be negative!") timeout = 0 } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/transport/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/transport/BUILD index 05b0e604ac28..dc1800681d3f 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/transport/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/transport/BUILD @@ -28,7 +28,7 @@ go_library( importpath = "k8s.io/client-go/transport", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/transport/round_trippers.go b/cluster-autoscaler/vendor/k8s.io/client-go/transport/round_trippers.go index 0ebcbbc80373..da417cf96ea0 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/transport/round_trippers.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" ) @@ -62,13 +62,13 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip // DebugWrappers wraps a round tripper and logs based on the current log level. func DebugWrappers(rt http.RoundTripper) http.RoundTripper { switch { - case bool(glog.V(9)): + case bool(klog.V(9)): rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders) - case bool(glog.V(8)): + case bool(klog.V(8)): rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders) - case bool(glog.V(7)): + case bool(klog.V(7)): rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus) - case bool(glog.V(6)): + case bool(klog.V(6)): rt = newDebuggingRoundTripper(rt, debugURLTiming) } @@ -138,7 +138,7 @@ func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) + klog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -166,7 +166,7 @@ func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) + klog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -197,7 +197,7 @@ func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) + klog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -257,7 +257,7 @@ func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.delegate.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.delegate) + klog.Errorf("CancelRequest not implemented by %T", rt.delegate) } } @@ -288,7 +288,7 @@ func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) + klog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -372,7 +372,7 @@ func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper) + klog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper) } } @@ -380,17 +380,17 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e reqInfo := newRequestInfo(req) if rt.levels[debugJustURL] { - glog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) + klog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) } if rt.levels[debugCurlCommand] { - glog.Infof("%s", reqInfo.toCurl()) + klog.Infof("%s", reqInfo.toCurl()) } if rt.levels[debugRequestHeaders] { - glog.Infof("Request Headers:") + klog.Infof("Request Headers:") for key, values := range reqInfo.RequestHeaders { for _, value := range values { - glog.Infof(" %s: %s", key, value) + klog.Infof(" %s: %s", key, value) } } } @@ -402,16 +402,16 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e reqInfo.complete(response, err) if rt.levels[debugURLTiming] { - glog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + klog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } if rt.levels[debugResponseStatus] { - glog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } if rt.levels[debugResponseHeaders] { - glog.Infof("Response Headers:") + klog.Infof("Response Headers:") for key, values := range reqInfo.ResponseHeaders { for _, value := range values { - glog.Infof(" %s: %s", key, value) + klog.Infof(" %s: %s", key, value) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/transport/spdy/spdy.go b/cluster-autoscaler/vendor/k8s.io/client-go/transport/spdy/spdy.go index e0eb468ba369..53cc7ee18c52 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/transport/spdy/spdy.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/transport/spdy/spdy.go @@ -38,7 +38,7 @@ func RoundTripperFor(config *restclient.Config) (http.RoundTripper, Upgrader, er if err != nil { return nil, nil, err } - upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, true) + upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, true, false) wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper) if err != nil { return nil, nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/BUILD index 4db41c73f295..2a42b716eff1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/BUILD @@ -37,9 +37,6 @@ filegroup( filegroup( name = "all-srcs", - srcs = [ - ":package-srcs", - "//staging/src/k8s.io/client-go/util/cert/triple:all-srcs", - ], + srcs = [":package-srcs"], tags = ["automanaged"], ) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/OWNERS b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/OWNERS new file mode 100644 index 000000000000..470b7a1c92d1 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/OWNERS @@ -0,0 +1,7 @@ +approvers: +- sig-auth-certificates-approvers +reviewers: +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/cert.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/cert.go index fe2158b238de..3429c82cdff9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/cert.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/cert/cert.go @@ -18,6 +18,7 @@ package cert import ( "bytes" + "crypto" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" @@ -64,7 +65,7 @@ func NewPrivateKey() (*rsa.PrivateKey, error) { } // NewSelfSignedCACert creates a CA certificate -func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, error) { +func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) { now := time.Now() tmpl := x509.Certificate{ SerialNumber: new(big.Int).SetInt64(0), @@ -76,7 +77,7 @@ func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, er NotAfter: now.Add(duration365d * 10).UTC(), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, - IsCA: true, + IsCA: true, } certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) @@ -87,7 +88,7 @@ func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, er } // NewSignedCert creates a signed certificate using the given CA certificate and key -func NewSignedCert(cfg Config, key *rsa.PrivateKey, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, error) { +func NewSignedCert(cfg Config, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer) (*x509.Certificate, error) { serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64)) if err != nil { return nil, err @@ -187,7 +188,7 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, - IsCA: true, + IsCA: true, } caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey) @@ -259,34 +260,6 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a return certBuffer.Bytes(), keyBuffer.Bytes(), nil } -// FormatBytesCert receives byte array certificate and formats in human-readable format -func FormatBytesCert(cert []byte) (string, error) { - block, _ := pem.Decode(cert) - c, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return "", fmt.Errorf("failed to parse certificate [%v]", err) - } - return FormatCert(c), nil -} - -// FormatCert receives certificate and formats in human-readable format -func FormatCert(c *x509.Certificate) string { - var ips []string - for _, ip := range c.IPAddresses { - ips = append(ips, ip.String()) - } - altNames := append(ips, c.DNSNames...) - res := fmt.Sprintf( - "Issuer: CN=%s | Subject: CN=%s | CA: %t\n", - c.Issuer.CommonName, c.Subject.CommonName, c.IsCA, - ) - res += fmt.Sprintf("Not before: %s Not After: %s", c.NotBefore, c.NotAfter) - if len(altNames) > 0 { - res += fmt.Sprintf("\nAlternate Names: %v", altNames) - } - return res -} - func ipsToStrings(ips []net.IP) []string { ss := make([]string, 0, len(ips)) for _, ip := range ips { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/BUILD index 51e48a596e1c..f204883af919 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/BUILD @@ -24,7 +24,6 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", - "//staging/src/k8s.io/client-go/util/cert:go_default_library", ], ) @@ -46,7 +45,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/client-go/util/certificate/csr:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/OWNERS b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/OWNERS index 2dce803b34dd..470b7a1c92d1 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/OWNERS @@ -1,8 +1,7 @@ -reviewers: -- mikedanese -- liggit -- smarterclayton approvers: -- mikedanese -- liggit -- smarterclayton +- sig-auth-certificates-approvers +reviewers: +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_manager.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_manager.go index 7b07b26a3ef0..ed74559e203c 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_manager.go @@ -28,7 +28,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" certificates "k8s.io/api/certificates/v1beta1" "k8s.io/apimachinery/pkg/api/errors" @@ -227,17 +227,17 @@ func (m *manager) Start() { // signing API, so don't start the certificate manager if we don't have a // client. if m.certSigningRequestClient == nil { - glog.V(2).Infof("Certificate rotation is not enabled, no connection to the apiserver.") + klog.V(2).Infof("Certificate rotation is not enabled, no connection to the apiserver.") return } - glog.V(2).Infof("Certificate rotation is enabled.") + klog.V(2).Infof("Certificate rotation is enabled.") templateChanged := make(chan struct{}) go wait.Forever(func() { deadline := m.nextRotationDeadline() if sleepInterval := deadline.Sub(time.Now()); sleepInterval > 0 { - glog.V(2).Infof("Waiting %v for next certificate rotation", sleepInterval) + klog.V(2).Infof("Waiting %v for next certificate rotation", sleepInterval) timer := time.NewTimer(sleepInterval) defer timer.Stop() @@ -250,7 +250,7 @@ func (m *manager) Start() { // if the template now matches what we last requested, restart the rotation deadline loop return } - glog.V(2).Infof("Certificate template changed, rotating") + klog.V(2).Infof("Certificate template changed, rotating") } } @@ -274,7 +274,7 @@ func (m *manager) Start() { if m.dynamicTemplate { go wait.Forever(func() { // check if the current template matches what we last requested - if !reflect.DeepEqual(m.getLastRequest(), m.getTemplate()) { + if !m.certSatisfiesTemplate() && !reflect.DeepEqual(m.getLastRequest(), m.getTemplate()) { // if the template is different, queue up an interrupt of the rotation deadline loop. // if we've requested a CSR that matches the new template by the time the interrupt is handled, the interrupt is disregarded. templateChanged <- struct{}{} @@ -321,7 +321,7 @@ func getCurrentCertificateOrBootstrap( if _, err := store.Update(bootstrapCertificatePEM, bootstrapKeyPEM); err != nil { utilruntime.HandleError(fmt.Errorf("Unable to set the cert/key pair to the bootstrap certificate: %v", err)) } else { - glog.V(4).Infof("Updated the store to contain the initial bootstrap certificate") + klog.V(4).Infof("Updated the store to contain the initial bootstrap certificate") } return &bootstrapCert, true, nil @@ -333,7 +333,7 @@ func getCurrentCertificateOrBootstrap( // This method also keeps track of "server health" by interpreting the responses it gets // from the server on the various calls it makes. func (m *manager) rotateCerts() (bool, error) { - glog.V(2).Infof("Rotating certificates") + klog.V(2).Infof("Rotating certificates") template, csrPEM, keyPEM, privateKey, err := m.generateCSR() if err != nil { @@ -389,35 +389,30 @@ func (m *manager) rotateCerts() (bool, error) { return true, nil } -// nextRotationDeadline returns a value for the threshold at which the -// current certificate should be rotated, 80%+/-10% of the expiration of the -// certificate. -func (m *manager) nextRotationDeadline() time.Time { - // forceRotation is not protected by locks - if m.forceRotation { - m.forceRotation = false - return time.Now() - } - - m.certAccessLock.RLock() - defer m.certAccessLock.RUnlock() +// Check that the current certificate on disk satisfies the requests from the +// current template. +// +// Note that extra items in the certificate's SAN or orgs that don't exist in +// the template will not trigger a renewal. +// +// Requires certAccessLock to be locked. +func (m *manager) certSatisfiesTemplateLocked() bool { if m.cert == nil { - return time.Now() + return false } - // Ensure the currently held certificate satisfies the requested subject CN and SANs if template := m.getTemplate(); template != nil { if template.Subject.CommonName != m.cert.Leaf.Subject.CommonName { - glog.V(2).Infof("Current certificate CN (%s) does not match requested CN (%s), rotating now", m.cert.Leaf.Subject.CommonName, template.Subject.CommonName) - return time.Now() + klog.V(2).Infof("Current certificate CN (%s) does not match requested CN (%s)", m.cert.Leaf.Subject.CommonName, template.Subject.CommonName) + return false } currentDNSNames := sets.NewString(m.cert.Leaf.DNSNames...) desiredDNSNames := sets.NewString(template.DNSNames...) missingDNSNames := desiredDNSNames.Difference(currentDNSNames) if len(missingDNSNames) > 0 { - glog.V(2).Infof("Current certificate is missing requested DNS names %v, rotating now", missingDNSNames.List()) - return time.Now() + klog.V(2).Infof("Current certificate is missing requested DNS names %v", missingDNSNames.List()) + return false } currentIPs := sets.NewString() @@ -430,16 +425,50 @@ func (m *manager) nextRotationDeadline() time.Time { } missingIPs := desiredIPs.Difference(currentIPs) if len(missingIPs) > 0 { - glog.V(2).Infof("Current certificate is missing requested IP addresses %v, rotating now", missingIPs.List()) - return time.Now() + klog.V(2).Infof("Current certificate is missing requested IP addresses %v", missingIPs.List()) + return false } + + currentOrgs := sets.NewString(m.cert.Leaf.Subject.Organization...) + desiredOrgs := sets.NewString(template.Subject.Organization...) + missingOrgs := desiredOrgs.Difference(currentOrgs) + if len(missingOrgs) > 0 { + klog.V(2).Infof("Current certificate is missing requested orgs %v", missingOrgs.List()) + return false + } + } + + return true +} + +func (m *manager) certSatisfiesTemplate() bool { + m.certAccessLock.RLock() + defer m.certAccessLock.RUnlock() + return m.certSatisfiesTemplateLocked() +} + +// nextRotationDeadline returns a value for the threshold at which the +// current certificate should be rotated, 80%+/-10% of the expiration of the +// certificate. +func (m *manager) nextRotationDeadline() time.Time { + // forceRotation is not protected by locks + if m.forceRotation { + m.forceRotation = false + return time.Now() + } + + m.certAccessLock.RLock() + defer m.certAccessLock.RUnlock() + + if !m.certSatisfiesTemplateLocked() { + return time.Now() } notAfter := m.cert.Leaf.NotAfter totalDuration := float64(notAfter.Sub(m.cert.Leaf.NotBefore)) deadline := m.cert.Leaf.NotBefore.Add(jitteryDuration(totalDuration)) - glog.V(2).Infof("Certificate expiration is %v, rotation deadline is %v", notAfter, deadline) + klog.V(2).Infof("Certificate expiration is %v, rotation deadline is %v", notAfter, deadline) if m.certificateExpiration != nil { m.certificateExpiration.Set(float64(notAfter.Unix())) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_store.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_store.go index f54bd6586f31..d26666154210 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_store.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/certificate_store.go @@ -21,13 +21,12 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "io/ioutil" "os" "path/filepath" "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -128,7 +127,7 @@ func (s *fileStore) Current() (*tls.Certificate, error) { if pairFileExists, err := fileExists(pairFile); err != nil { return nil, err } else if pairFileExists { - glog.Infof("Loading cert/key pair from %q.", pairFile) + klog.Infof("Loading cert/key pair from %q.", pairFile) return loadFile(pairFile) } @@ -141,7 +140,7 @@ func (s *fileStore) Current() (*tls.Certificate, error) { return nil, err } if certFileExists && keyFileExists { - glog.Infof("Loading cert/key pair from (%q, %q).", s.certFile, s.keyFile) + klog.Infof("Loading cert/key pair from (%q, %q).", s.certFile, s.keyFile) return loadX509KeyPair(s.certFile, s.keyFile) } @@ -156,7 +155,7 @@ func (s *fileStore) Current() (*tls.Certificate, error) { return nil, err } if certFileExists && keyFileExists { - glog.Infof("Loading cert/key pair from (%q, %q).", c, k) + klog.Infof("Loading cert/key pair from (%q, %q).", c, k) return loadX509KeyPair(c, k) } @@ -171,11 +170,9 @@ func (s *fileStore) Current() (*tls.Certificate, error) { } func loadFile(pairFile string) (*tls.Certificate, error) { - certBlock, keyBlock, err := loadCertKeyBlocks(pairFile) - if err != nil { - return nil, err - } - cert, err := tls.X509KeyPair(pem.EncodeToMemory(certBlock), pem.EncodeToMemory(keyBlock)) + // LoadX509KeyPair knows how to parse combined cert and private key from + // the same file. + cert, err := tls.LoadX509KeyPair(pairFile, pairFile) if err != nil { return nil, fmt.Errorf("could not convert data from %q into cert/key pair: %v", pairFile, err) } @@ -187,22 +184,6 @@ func loadFile(pairFile string) (*tls.Certificate, error) { return &cert, nil } -func loadCertKeyBlocks(pairFile string) (cert *pem.Block, key *pem.Block, err error) { - data, err := ioutil.ReadFile(pairFile) - if err != nil { - return nil, nil, fmt.Errorf("could not load cert/key pair from %q: %v", pairFile, err) - } - certBlock, rest := pem.Decode(data) - if certBlock == nil { - return nil, nil, fmt.Errorf("could not decode the first block from %q from expected PEM format", pairFile) - } - keyBlock, _ := pem.Decode(rest) - if keyBlock == nil { - return nil, nil, fmt.Errorf("could not decode the second block from %q from expected PEM format", pairFile) - } - return certBlock, keyBlock, nil -} - func (s *fileStore) Update(certData, keyData []byte) (*tls.Certificate, error) { ts := time.Now().Format("2006-01-02-15-04-05") pemFilename := s.filename(ts) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/csr/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/csr/BUILD index e160947dbc2e..eb28433e05b9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/csr/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/csr/BUILD @@ -1,10 +1,6 @@ package(default_visibility = ["//visibility:public"]) -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", @@ -17,14 +13,13 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/watch:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -40,16 +35,3 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) - -go_test( - name = "go_default_test", - srcs = ["csr_test.go"], - embed = [":go_default_library"], - deps = [ - "//staging/src/k8s.io/api/certificates/v1beta1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", - "//staging/src/k8s.io/client-go/util/cert:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/csr/csr.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/csr/csr.go index 4a53352fee02..6338eef93319 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/csr/csr.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/certificate/csr/csr.go @@ -18,23 +18,19 @@ package csr import ( "crypto" - "crypto/sha512" "crypto/x509" - "crypto/x509/pkix" - "encoding/base64" "encoding/pem" "fmt" "reflect" "time" - "github.com/golang/glog" + "k8s.io/klog" certificates "k8s.io/api/certificates/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" certificatesclient "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" @@ -43,41 +39,6 @@ import ( certutil "k8s.io/client-go/util/cert" ) -// RequestNodeCertificate will create a certificate signing request for a node -// (Organization and CommonName for the CSR will be set as expected for node -// certificates) and send it to API server, then it will watch the object's -// status, once approved by API server, it will return the API server's issued -// certificate (pem-encoded). If there is any errors, or the watch timeouts, it -// will return an error. This is intended for use on nodes (kubelet and -// kubeadm). -func RequestNodeCertificate(client certificatesclient.CertificateSigningRequestInterface, privateKeyData []byte, nodeName types.NodeName) (certData []byte, err error) { - subject := &pkix.Name{ - Organization: []string{"system:nodes"}, - CommonName: "system:node:" + string(nodeName), - } - - privateKey, err := certutil.ParsePrivateKeyPEM(privateKeyData) - if err != nil { - return nil, fmt.Errorf("invalid private key for certificate request: %v", err) - } - csrData, err := certutil.MakeCSR(privateKey, subject, nil, nil) - if err != nil { - return nil, fmt.Errorf("unable to generate certificate request: %v", err) - } - - usages := []certificates.KeyUsage{ - certificates.UsageDigitalSignature, - certificates.UsageKeyEncipherment, - certificates.UsageClientAuth, - } - name := digestedName(privateKeyData, subject, usages) - req, err := RequestCertificate(client, csrData, name, usages, privateKey) - if err != nil { - return nil, err - } - return WaitForCertificate(client, req, 3600*time.Second) -} - // RequestCertificate will either use an existing (if this process has run // before but not to completion) or create a certificate signing request using the // PEM encoded CSR and send it to API server, then it will watch the object's @@ -104,7 +65,7 @@ func RequestCertificate(client certificatesclient.CertificateSigningRequestInter switch { case err == nil: case errors.IsAlreadyExists(err) && len(name) > 0: - glog.Infof("csr for this node already exists, reusing") + klog.Infof("csr for this node already exists, reusing") req, err = client.Get(name, metav1.GetOptions{}) if err != nil { return nil, formatError("cannot retrieve certificate signing request: %v", err) @@ -112,7 +73,7 @@ func RequestCertificate(client certificatesclient.CertificateSigningRequestInter if err := ensureCompatible(req, csr, privateKey); err != nil { return nil, fmt.Errorf("retrieved csr is not compatible: %v", err) } - glog.Infof("csr for this node is still valid") + klog.Infof("csr for this node is still valid") default: return nil, formatError("cannot create certificate signing request: %v", err) } @@ -168,57 +129,25 @@ func WaitForCertificate(client certificatesclient.CertificateSigningRequestInter return event.Object.(*certificates.CertificateSigningRequest).Status.Certificate, nil } -// This digest should include all the relevant pieces of the CSR we care about. -// We can't direcly hash the serialized CSR because of random padding that we -// regenerate every loop and we include usages which are not contained in the -// CSR. This needs to be kept up to date as we add new fields to the node -// certificates and with ensureCompatible. -func digestedName(privateKeyData []byte, subject *pkix.Name, usages []certificates.KeyUsage) string { - hash := sha512.New512_256() - - // Here we make sure two different inputs can't write the same stream - // to the hash. This delimiter is not in the base64.URLEncoding - // alphabet so there is no way to have spill over collisions. Without - // it 'CN:foo,ORG:bar' hashes to the same value as 'CN:foob,ORG:ar' - const delimiter = '|' - encode := base64.RawURLEncoding.EncodeToString - - write := func(data []byte) { - hash.Write([]byte(encode(data))) - hash.Write([]byte{delimiter}) - } - - write(privateKeyData) - write([]byte(subject.CommonName)) - for _, v := range subject.Organization { - write([]byte(v)) - } - for _, v := range usages { - write([]byte(v)) - } - - return "node-csr-" + encode(hash.Sum(nil)) -} - // ensureCompatible ensures that a CSR object is compatible with an original CSR func ensureCompatible(new, orig *certificates.CertificateSigningRequest, privateKey interface{}) error { - newCsr, err := ParseCSR(new) + newCSR, err := parseCSR(new) if err != nil { return fmt.Errorf("unable to parse new csr: %v", err) } - origCsr, err := ParseCSR(orig) + origCSR, err := parseCSR(orig) if err != nil { return fmt.Errorf("unable to parse original csr: %v", err) } - if !reflect.DeepEqual(newCsr.Subject, origCsr.Subject) { - return fmt.Errorf("csr subjects differ: new: %#v, orig: %#v", newCsr.Subject, origCsr.Subject) + if !reflect.DeepEqual(newCSR.Subject, origCSR.Subject) { + return fmt.Errorf("csr subjects differ: new: %#v, orig: %#v", newCSR.Subject, origCSR.Subject) } signer, ok := privateKey.(crypto.Signer) if !ok { return fmt.Errorf("privateKey is not a signer") } - newCsr.PublicKey = signer.Public() - if err := newCsr.CheckSignature(); err != nil { + newCSR.PublicKey = signer.Public() + if err := newCSR.CheckSignature(); err != nil { return fmt.Errorf("error validating signature new CSR against old key: %v", err) } if len(new.Status.Certificate) > 0 { @@ -247,17 +176,12 @@ func formatError(format string, err error) error { return fmt.Errorf(format, err) } -// ParseCSR extracts the CSR from the API object and decodes it. -func ParseCSR(obj *certificates.CertificateSigningRequest) (*x509.CertificateRequest, error) { +// parseCSR extracts the CSR from the API object and decodes it. +func parseCSR(obj *certificates.CertificateSigningRequest) (*x509.CertificateRequest, error) { // extract PEM from request object - pemBytes := obj.Spec.Request - block, _ := pem.Decode(pemBytes) + block, _ := pem.Decode(obj.Spec.Request) if block == nil || block.Type != "CERTIFICATE REQUEST" { return nil, fmt.Errorf("PEM block type must be CERTIFICATE REQUEST") } - csr, err := x509.ParseCertificateRequest(block.Bytes) - if err != nil { - return nil, err - } - return csr, nil + return x509.ParseCertificateRequest(block.Bytes) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/BUILD b/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/BUILD index c139977c1ca8..ff0e87a7c758 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/BUILD @@ -11,6 +11,7 @@ go_test( srcs = [ "default_rate_limiters_test.go", "delaying_queue_test.go", + "metrics_test.go", "queue_test.go", "rate_limitting_queue_test.go", ], diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/metrics.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/metrics.go index a481bdfb2660..d4c03d8378f5 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -19,6 +19,8 @@ package workqueue import ( "sync" "time" + + "k8s.io/apimachinery/pkg/util/clock" ) // This file provides abstractions for setting the provider (e.g., prometheus) @@ -28,6 +30,7 @@ type queueMetrics interface { add(item t) get(item t) done(item t) + updateUnfinishedWork() } // GaugeMetric represents a single numerical value that can arbitrarily go up @@ -37,6 +40,12 @@ type GaugeMetric interface { Dec() } +// SettableGaugeMetric represents a single numerical value that can arbitrarily go up +// and down. (Separate from GaugeMetric to preserve backwards compatibility.) +type SettableGaugeMetric interface { + Set(float64) +} + // CounterMetric represents a single numerical value that only ever // goes up. type CounterMetric interface { @@ -52,9 +61,13 @@ type noopMetric struct{} func (noopMetric) Inc() {} func (noopMetric) Dec() {} +func (noopMetric) Set(float64) {} func (noopMetric) Observe(float64) {} +// defaultQueueMetrics expects the caller to lock before setting any metrics. type defaultQueueMetrics struct { + clock clock.Clock + // current depth of a workqueue depth GaugeMetric // total number of adds handled by a workqueue @@ -65,6 +78,10 @@ type defaultQueueMetrics struct { workDuration SummaryMetric addTimes map[t]time.Time processingStartTimes map[t]time.Time + + // how long have current threads been working? + unfinishedWorkSeconds SettableGaugeMetric + longestRunningProcessor SettableGaugeMetric } func (m *defaultQueueMetrics) add(item t) { @@ -75,7 +92,7 @@ func (m *defaultQueueMetrics) add(item t) { m.adds.Inc() m.depth.Inc() if _, exists := m.addTimes[item]; !exists { - m.addTimes[item] = time.Now() + m.addTimes[item] = m.clock.Now() } } @@ -85,9 +102,9 @@ func (m *defaultQueueMetrics) get(item t) { } m.depth.Dec() - m.processingStartTimes[item] = time.Now() + m.processingStartTimes[item] = m.clock.Now() if startTime, exists := m.addTimes[item]; exists { - m.latency.Observe(sinceInMicroseconds(startTime)) + m.latency.Observe(m.sinceInMicroseconds(startTime)) delete(m.addTimes, item) } } @@ -98,14 +115,39 @@ func (m *defaultQueueMetrics) done(item t) { } if startTime, exists := m.processingStartTimes[item]; exists { - m.workDuration.Observe(sinceInMicroseconds(startTime)) + m.workDuration.Observe(m.sinceInMicroseconds(startTime)) delete(m.processingStartTimes, item) } } +func (m *defaultQueueMetrics) updateUnfinishedWork() { + // Note that a summary metric would be better for this, but prometheus + // doesn't seem to have non-hacky ways to reset the summary metrics. + var total float64 + var oldest float64 + for _, t := range m.processingStartTimes { + age := m.sinceInMicroseconds(t) + total += age + if age > oldest { + oldest = age + } + } + // Convert to seconds; microseconds is unhelpfully granular for this. + total /= 1000000 + m.unfinishedWorkSeconds.Set(total) + m.longestRunningProcessor.Set(oldest) // in microseconds. +} + +type noMetrics struct{} + +func (noMetrics) add(item t) {} +func (noMetrics) get(item t) {} +func (noMetrics) done(item t) {} +func (noMetrics) updateUnfinishedWork() {} + // Gets the time since the specified start in microseconds. -func sinceInMicroseconds(start time.Time) float64 { - return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) +func (m *defaultQueueMetrics) sinceInMicroseconds(start time.Time) float64 { + return float64(m.clock.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) } type retryMetrics interface { @@ -130,6 +172,8 @@ type MetricsProvider interface { NewAddsMetric(name string) CounterMetric NewLatencyMetric(name string) SummaryMetric NewWorkDurationMetric(name string) SummaryMetric + NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric + NewLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric NewRetriesMetric(name string) CounterMetric } @@ -151,29 +195,49 @@ func (_ noopMetricsProvider) NewWorkDurationMetric(name string) SummaryMetric { return noopMetric{} } +func (_ noopMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric { + return noopMetric{} +} + func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric { return noopMetric{} } -var metricsFactory = struct { - metricsProvider MetricsProvider - setProviders sync.Once -}{ +var globalMetricsFactory = queueMetricsFactory{ metricsProvider: noopMetricsProvider{}, } -func newQueueMetrics(name string) queueMetrics { - var ret *defaultQueueMetrics - if len(name) == 0 { - return ret +type queueMetricsFactory struct { + metricsProvider MetricsProvider + + onlyOnce sync.Once +} + +func (f *queueMetricsFactory) setProvider(mp MetricsProvider) { + f.onlyOnce.Do(func() { + f.metricsProvider = mp + }) +} + +func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) queueMetrics { + mp := f.metricsProvider + if len(name) == 0 || mp == (noopMetricsProvider{}) { + return noMetrics{} } return &defaultQueueMetrics{ - depth: metricsFactory.metricsProvider.NewDepthMetric(name), - adds: metricsFactory.metricsProvider.NewAddsMetric(name), - latency: metricsFactory.metricsProvider.NewLatencyMetric(name), - workDuration: metricsFactory.metricsProvider.NewWorkDurationMetric(name), - addTimes: map[t]time.Time{}, - processingStartTimes: map[t]time.Time{}, + clock: clock, + depth: mp.NewDepthMetric(name), + adds: mp.NewAddsMetric(name), + latency: mp.NewLatencyMetric(name), + workDuration: mp.NewWorkDurationMetric(name), + unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), + longestRunningProcessor: mp.NewLongestRunningProcessorMicrosecondsMetric(name), + addTimes: map[t]time.Time{}, + processingStartTimes: map[t]time.Time{}, } } @@ -183,13 +247,12 @@ func newRetryMetrics(name string) retryMetrics { return ret } return &defaultRetryMetrics{ - retries: metricsFactory.metricsProvider.NewRetriesMetric(name), + retries: globalMetricsFactory.metricsProvider.NewRetriesMetric(name), } } -// SetProvider sets the metrics provider of the metricsFactory. +// SetProvider sets the metrics provider for all subsequently created work +// queues. Only the first call has an effect. func SetProvider(metricsProvider MetricsProvider) { - metricsFactory.setProviders.Do(func() { - metricsFactory.metricsProvider = metricsProvider - }) + globalMetricsFactory.setProvider(metricsProvider) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/parallelizer.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/parallelizer.go index 526bd244e6c1..ad2535018249 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/parallelizer.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/parallelizer.go @@ -27,6 +27,8 @@ type DoWorkPieceFunc func(piece int) // Parallelize is a very simple framework that allows for parallelizing // N independent pieces of work. +// +// Deprecated: Use ParallelizeUntil instead. func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) { ParallelizeUntil(nil, workers, pieces, doWorkPiece) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/queue.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/queue.go index dc9a7cc7b7cb..39009b8e79a3 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -18,6 +18,9 @@ package workqueue import ( "sync" + "time" + + "k8s.io/apimachinery/pkg/util/clock" ) type Interface interface { @@ -35,14 +38,29 @@ func New() *Type { } func NewNamed(name string) *Type { - return &Type{ - dirty: set{}, - processing: set{}, - cond: sync.NewCond(&sync.Mutex{}), - metrics: newQueueMetrics(name), + rc := clock.RealClock{} + return newQueue( + rc, + globalMetricsFactory.newQueueMetrics(name, rc), + defaultUnfinishedWorkUpdatePeriod, + ) +} + +func newQueue(c clock.Clock, metrics queueMetrics, updatePeriod time.Duration) *Type { + t := &Type{ + clock: c, + dirty: set{}, + processing: set{}, + cond: sync.NewCond(&sync.Mutex{}), + metrics: metrics, + unfinishedWorkUpdatePeriod: updatePeriod, } + go t.updateUnfinishedWorkLoop() + return t } +const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond + // Type is a work queue (see the package comment). type Type struct { // queue defines the order in which we will work on items. Every @@ -64,6 +82,9 @@ type Type struct { shuttingDown bool metrics queueMetrics + + unfinishedWorkUpdatePeriod time.Duration + clock clock.Clock } type empty struct{} @@ -170,3 +191,22 @@ func (q *Type) ShuttingDown() bool { return q.shuttingDown } + +func (q *Type) updateUnfinishedWorkLoop() { + t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod) + defer t.Stop() + for range t.C() { + if !func() bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + if !q.shuttingDown { + q.metrics.updateUnfinishedWork() + return true + } + return false + + }() { + return + } + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go b/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go index 417ac001b84a..8321876acf40 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go @@ -20,10 +20,10 @@ package workqueue type RateLimitingInterface interface { DelayingInterface - // AddRateLimited adds an item to the workqueue after the rate limiter says its ok + // AddRateLimited adds an item to the workqueue after the rate limiter says it's ok AddRateLimited(item interface{}) - // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing + // Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you // still have to call `Done` on the queue. Forget(item interface{}) @@ -55,7 +55,7 @@ type rateLimitingType struct { rateLimiter RateLimiter } -// AddRateLimited AddAfter's the item based on the time when the rate limiter says its ok +// AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok func (q *rateLimitingType) AddRateLimited(item interface{}) { q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item)) } diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/BUILD b/cluster-autoscaler/vendor/k8s.io/cloud-provider/BUILD new file mode 100644 index 000000000000..4c0c21dcbe84 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/BUILD @@ -0,0 +1,38 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "cloud.go", + "doc.go", + "plugins.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/cloud-provider", + importpath = "k8s.io/cloud-provider", + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/CONTRIBUTING.md b/cluster-autoscaler/vendor/k8s.io/cloud-provider/CONTRIBUTING.md new file mode 100644 index 000000000000..7d70c78eb488 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/CONTRIBUTING.md @@ -0,0 +1,9 @@ +# Contributing guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. Changes to this repo should be discussed with [sig cloud-provider](https://github.com/kubernetes/community/tree/master/sig-cloud-provider). + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/cloud-provider](https://git.k8s.io/kubernetes/staging/src/k8s.io/cloud-provider) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/LICENSE b/cluster-autoscaler/vendor/k8s.io/cloud-provider/LICENSE new file mode 100644 index 000000000000..8dada3edaf50 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/OWNERS b/cluster-autoscaler/vendor/k8s.io/cloud-provider/OWNERS new file mode 100644 index 000000000000..7e9b0f91c118 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/OWNERS @@ -0,0 +1,42 @@ +approvers: +- mikedanese +- dims +- wlan0 +- andrewsykim +- cheftako +reviewers: +- wojtek-t +- deads2k +- derekwaynecarr +- vishh +- mikedanese +- liggitt +- gmarek +- davidopp +- pmorie +- sttts +- quinton-hoole +- dchen1107 +- saad-ali +- zmerlynn +- luxas +- justinsb +- roberthbailey +- eparis +- jlowdermilk +- piosz +- jsafrane +- dims +- krousey +- rootfs +- jszczepkowski +- markturansky +- girishkalele +- jdef +- freehan +- jingxu97 +- wlan0 +- cheftako +- andrewsykim +labels: +- sig/cloud-provider diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/README.md b/cluster-autoscaler/vendor/k8s.io/cloud-provider/README.md new file mode 100644 index 000000000000..d53fec5a4d5c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/README.md @@ -0,0 +1,32 @@ +# cloud-provider + +This repository defines the cloud-provider interface and mechanism to initialize +a cloud-provider implementation into Kubernetes. Currently multiple processes +use this code although the intent is that it will eventually only be cloud +controller manager. + +**Note:** go-get or vendor this package as `k8s.io/cloud-provider`. + +## Purpose + +This library is a shared dependency for processes which need to be able to +integrate with cloud-provider specific functionality. + +## Compatibility + +Cloud Providers are expected to keep the HEAD of their implementations in sync +with the HEAD of this repository. + +## Where does it come from? + +`cloud-provider` is synced from +https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/cloud-provider. +Code changes are made in that location, merged into k8s.io/kubernetes and +later synced here. + +## Things you should NOT do + + 1. Add an cloud provider specific code to this repo. + 2. Directly modify anything under vendor/k8s.io/cloud-provider in this repo. Those are driven from `k8s.io/kubernetes/staging/src/k8s.io/cloud-provider`. + 3. Make interface changes without first discussing them with + sig-cloudprovider. diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/SECURITY_CONTACTS b/cluster-autoscaler/vendor/k8s.io/cloud-provider/SECURITY_CONTACTS new file mode 100644 index 000000000000..e7d8230ef5fa --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/SECURITY_CONTACTS @@ -0,0 +1,15 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +cheftako +andrewsykim +dims diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go b/cluster-autoscaler/vendor/k8s.io/cloud-provider/cloud.go similarity index 93% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go rename to cluster-autoscaler/vendor/k8s.io/cloud-provider/cloud.go index f0fd8864c23d..6db0219520bf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/cloud.go @@ -25,14 +25,26 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/informers" - "k8s.io/kubernetes/pkg/controller" + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" ) +// ControllerClientBuilder allows you to get clients and configs for controllers +// Please note a copy also exists in pkg/controller/client_builder.go +// TODO: Make this depend on the separate controller utilities repo (issues/68947) +type ControllerClientBuilder interface { + Config(name string) (*restclient.Config, error) + ConfigOrDie(name string) *restclient.Config + Client(name string) (clientset.Interface, error) + ClientOrDie(name string) clientset.Interface +} + // Interface is an abstract, pluggable interface for cloud providers. type Interface interface { // Initialize provides the cloud with a kubernetes client builder and may spawn goroutines - // to perform housekeeping activities within the cloud provider. - Initialize(clientBuilder controller.ControllerClientBuilder) + // to perform housekeeping or run custom controllers specific to the cloud provider. + // Any tasks started here should be cleaned up when the stop channel closes. + Initialize(clientBuilder ControllerClientBuilder, stop <-chan struct{}) // LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise. LoadBalancer() (LoadBalancer, bool) // Instances returns an instances interface. Also returns true if the interface is supported, false otherwise. diff --git a/cluster-autoscaler/vendor/k8s.io/klog/code-of-conduct.md b/cluster-autoscaler/vendor/k8s.io/cloud-provider/code-of-conduct.md similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/klog/code-of-conduct.md rename to cluster-autoscaler/vendor/k8s.io/cloud-provider/code-of-conduct.md diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/doc.go b/cluster-autoscaler/vendor/k8s.io/cloud-provider/doc.go similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/doc.go rename to cluster-autoscaler/vendor/k8s.io/cloud-provider/doc.go diff --git a/cluster-autoscaler/vendor/k8s.io/cloud-provider/plugins.go b/cluster-autoscaler/vendor/k8s.io/cloud-provider/plugins.go new file mode 100644 index 000000000000..9fc6aff8cd25 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/cloud-provider/plugins.go @@ -0,0 +1,148 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudprovider + +import ( + "fmt" + "io" + "os" + "sync" + + "k8s.io/klog" +) + +// Factory is a function that returns a cloudprovider.Interface. +// The config parameter provides an io.Reader handler to the factory in +// order to load specific configurations. If no configuration is provided +// the parameter is nil. +type Factory func(config io.Reader) (Interface, error) + +// All registered cloud providers. +var ( + providersMutex sync.Mutex + providers = make(map[string]Factory) + deprecatedCloudProviders = []struct { + name string + external bool + detail string + }{ + {"aws", false, "The AWS provider is deprecated and will be removed in a future release"}, + {"azure", false, "The Azure provider is deprecated and will be removed in a future release"}, + {"cloudstack", false, "The CloudStack Controller project is no longer maintained."}, + {"gce", false, "The GCE provider is deprecated and will be removed in a future release"}, + {"openstack", true, "https://github.com/kubernetes/cloud-provider-openstack"}, + {"ovirt", false, "The ovirt Controller project is no longer maintained."}, + {"photon", false, "The Photon Controller project is no longer maintained."}, + {"vsphere", false, "The vSphere provider is deprecated and will be removed in a future release"}, + } +) + +const externalCloudProvider = "external" + +// RegisterCloudProvider registers a cloudprovider.Factory by name. This +// is expected to happen during app startup. +func RegisterCloudProvider(name string, cloud Factory) { + providersMutex.Lock() + defer providersMutex.Unlock() + if _, found := providers[name]; found { + klog.Fatalf("Cloud provider %q was registered twice", name) + } + klog.V(1).Infof("Registered cloud provider %q", name) + providers[name] = cloud +} + +// IsCloudProvider returns true if name corresponds to an already registered +// cloud provider. +func IsCloudProvider(name string) bool { + providersMutex.Lock() + defer providersMutex.Unlock() + _, found := providers[name] + return found +} + +// GetCloudProvider creates an instance of the named cloud provider, or nil if +// the name is unknown. The error return is only used if the named provider +// was known but failed to initialize. The config parameter specifies the +// io.Reader handler of the configuration file for the cloud provider, or nil +// for no configuration. +func GetCloudProvider(name string, config io.Reader) (Interface, error) { + providersMutex.Lock() + defer providersMutex.Unlock() + f, found := providers[name] + if !found { + return nil, nil + } + return f(config) +} + +// Detects if the string is an external cloud provider +func IsExternal(name string) bool { + return name == externalCloudProvider +} + +// InitCloudProvider creates an instance of the named cloud provider. +func InitCloudProvider(name string, configFilePath string) (Interface, error) { + var cloud Interface + var err error + + if name == "" { + klog.Info("No cloud provider specified.") + return nil, nil + } + + if IsExternal(name) { + klog.Info("External cloud provider specified") + return nil, nil + } + + for _, provider := range deprecatedCloudProviders { + if provider.name == name { + detail := provider.detail + if provider.external { + detail = fmt.Sprintf("Please use 'external' cloud provider for %s: %s", name, provider.detail) + } + klog.Warningf("WARNING: %s built-in cloud provider is now deprecated. %s", name, detail) + + break + } + } + + if configFilePath != "" { + var config *os.File + config, err = os.Open(configFilePath) + if err != nil { + klog.Fatalf("Couldn't open cloud provider configuration %s: %#v", + configFilePath, err) + } + + defer config.Close() + cloud, err = GetCloudProvider(name, config) + } else { + // Pass explicit nil so plugins can actually check for nil. See + // "Why is my nil error value not equal to nil?" in golang.org/doc/faq. + cloud, err = GetCloudProvider(name, nil) + } + + if err != nil { + return nil, fmt.Errorf("could not init cloud provider %q: %v", name, err) + } + if cloud == nil { + return nil, fmt.Errorf("unknown cloud provider %q", name) + } + + return cloud, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/apis/csi/v1alpha1/README.md b/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/apis/csi/v1alpha1/README.md new file mode 100644 index 000000000000..79a7ec17eed9 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/apis/csi/v1alpha1/README.md @@ -0,0 +1,52 @@ +# CSINodeInfo and CSIDriverInfo Usage and Lifecycle + +CSINodeInfo is an API object representing CSI Information at the Node level. +CSINodeInfo contains a Spec and a Status, each containing Drivers represent the +driver spec and status respectively. The CSIDriverInfoSpec represents the +specification of the driver and is generally not changed, whereas the +CSIDriverInfoStatus represents the current status of the driver and can be +updated and reacted to by various components. + +## Who creates it and when + +CSINodeInfo is created by Kubelet when the first CSI Driver is installed to the +cluster and it is registered through the Kubelet device registration mechanism. + +## Who updates CSIDriverInfo Spec and when + +The CSIDriverInfoSpec for a driver is created upon installation of the CSI +Driver to the cluster and it is registered through the Kubelet device +registration mechanism. The spec is populated with information about the driver +through the nodeinfomanager (inside Kubelet) and will remain unchanged from then +on. + +## Who updates Status and when + +The CSIDriverInfoStatus for the driver is created upon installation of the CSI +Driver to the cluster (the same time as the spec) and it is registered through +the Kubelet device registration mechanism. The Status contains information about +installation and the required Volume Plugin Mechanism of the driver. When the +driver is installed/uninstalled through the Kubelet device registration +mechanism the Available flag is flipped from true/false respectively. The +migration status will also be updated when the flags for migration are set to +true/false on the Kubelet for that Driver on that node. + +## Consumers of Status and Spec + +Currently the only consumer of CSINodeInfo/CSIDriverInfo is the +csi-external-provisioner. In the future, the Attach Detach Controller (ADC) will +need to read this object to determine migration status on a per driver per node +basis. The creation of the CSINodeInfo object could possibly race with the +Attach/Detach controller as for CSI Migration the controller depend on the +existence of the API object for the driver but it will not have been created +yet. The ADC is expected to fail (and retry with exponential backoff) the +operation if it is expecting the object and it has not yet been created. + +## Creation of CSINodeInfo object on Kubelet startup + +For CSI Migration Alpha we expect any user who turns on the feature has both +Kubelet and ADC at a version where the CSINodeInfo's are being created on +Kubelet startup. We will not promote the feature to Beta (on by default) until +the CSINodeInfo's are being created on Kubelet startup for a 2 version skew to +prevent the case where the CSINodeInfo does not exist when the ADC depends on +it. This prevents the race described above becoming a permanent bad state. diff --git a/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/apis/csi/v1alpha1/types.go b/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/apis/csi/v1alpha1/types.go index d2fbe0aa5713..6232b6b4d8ee 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/apis/csi/v1alpha1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/apis/csi/v1alpha1/types.go @@ -18,6 +18,16 @@ package v1alpha1 import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// VolumePluginMechanism is the type of mechanism components should use for volume operations +type VolumePluginMechanism string + +const ( + // VolumePluginMechanismInTree means components should use the in-tree plugin for volume operations + VolumePluginMechanismInTree VolumePluginMechanism = "in-tree" + // VolumePluginMechanismCSI means components should use the CSI Driver for volume operations + VolumePluginMechanismCSI VolumePluginMechanism = "csi" +) + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -63,7 +73,7 @@ type CSIDriverSpec struct { // If value is not specified, default is false -- meaning attach will not be // called. // +optional - AttachRequired *bool `json:"attachRequired"` + AttachRequired *bool `json:"attachRequired,omitempty"` // If specified, podInfoRequiredOnMount indicates this CSI volume driver // requires additional pod information (like podName, podUID, etc.) during @@ -77,7 +87,7 @@ type CSIDriverSpec struct { // "csi.storage.k8s.io/pod.namespace": pod.Namespace // "csi.storage.k8s.io/pod.uid": string(pod.UID) // +optional - PodInfoOnMountVersion *string `json:"podInfoOnMountVersion"` + PodInfoOnMountVersion *string `json:"podInfoOnMountVersion,omitempty"` } // +genclient @@ -91,18 +101,56 @@ type CSINodeInfo struct { // metadata.name must be the Kubernetes node name. metav1.ObjectMeta `json:"metadata,omitempty"` - // List of CSI drivers running on the node and their properties. - // +patchMergeKey=driver + // spec is the specification of CSINodeInfo + Spec CSINodeInfoSpec `json:"spec"` + + // status is the current status of CSINodeInfo + Status CSINodeInfoStatus `json:"status"` +} + +// CSINodeInfoSpec holds information about the specification of all CSI drivers installed on a node +type CSINodeInfoSpec struct { + // drivers is a list of specifications of CSIDriverInfo + // +patchMergeKey=name + // +patchStrategy=merge + Drivers []CSIDriverInfoSpec `json:"drivers" patchStrategy:"merge" patchMergeKey:"name"` +} + +// CSINodeInfoStatus holds information about the status of all CSI drivers installed on a node +type CSINodeInfoStatus struct { + // drivers is a list of the statuses of CSIDriverInfo + // +patchMergeKey=name // +patchStrategy=merge - CSIDrivers []CSIDriverInfo `json:"csiDrivers" patchStrategy:"merge" patchMergeKey:"driver"` + Drivers []CSIDriverInfoStatus `json:"drivers" patchStrategy:"merge" patchMergeKey:"name"` +} + +// CSIDriverInfoStatus holds information about the status of one CSI driver installed on a node +type CSIDriverInfoStatus struct { + // name is the name of the CSI driver that this object refers to. + // This MUST be the same name returned by the CSI GetPluginName() call for + // that driver. + Name string `json:"name"` + + // available is a boolean representing whether the driver has been installed + // on this node or not. + Available bool `json:"available"` + + // volumePluginMechanism announces what mechanism underlies volume plugins. + // It is set by Kubelet. It is used by the attach/detach controller, which + // needs to know how to perform attachments. The allowed values are: + // * "in-tree": the volume operation (e.g., attach/detach) ought to be + // directly performed by the attach/detach controller. + // * "csi-plugin": the attach/detach controller ought to request + // the csi plugin to perform the volume operation rather than perform it directly. + VolumePluginMechanism VolumePluginMechanism `json:"volumePluginMechanism"` } -// CSIDriverInfo contains information about one CSI driver installed on a node. -type CSIDriverInfo struct { - // driver is the name of the CSI driver that this object refers to. +// CSIDriverInfoSpec holds information about the specification of one CSI driver installed on a node +type CSIDriverInfoSpec struct { + // name is the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. - Driver string `json:"driver"` + Name string `json:"name"` // nodeID of the node from the driver point of view. // This field enables Kubernetes to communicate with storage systems that do diff --git a/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/apis/csi/v1alpha1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/apis/csi/v1alpha1/zz_generated.deepcopy.go index aa9ccbbaae85..915ef995a01b 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/apis/csi/v1alpha1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/apis/csi/v1alpha1/zz_generated.deepcopy.go @@ -52,7 +52,7 @@ func (in *CSIDriver) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIDriverInfo) DeepCopyInto(out *CSIDriverInfo) { +func (in *CSIDriverInfoSpec) DeepCopyInto(out *CSIDriverInfoSpec) { *out = *in if in.TopologyKeys != nil { in, out := &in.TopologyKeys, &out.TopologyKeys @@ -62,12 +62,28 @@ func (in *CSIDriverInfo) DeepCopyInto(out *CSIDriverInfo) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverInfo. -func (in *CSIDriverInfo) DeepCopy() *CSIDriverInfo { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverInfoSpec. +func (in *CSIDriverInfoSpec) DeepCopy() *CSIDriverInfoSpec { if in == nil { return nil } - out := new(CSIDriverInfo) + out := new(CSIDriverInfoSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriverInfoStatus) DeepCopyInto(out *CSIDriverInfoStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverInfoStatus. +func (in *CSIDriverInfoStatus) DeepCopy() *CSIDriverInfoStatus { + if in == nil { + return nil + } + out := new(CSIDriverInfoStatus) in.DeepCopyInto(out) return out } @@ -136,13 +152,8 @@ func (in *CSINodeInfo) DeepCopyInto(out *CSINodeInfo) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.CSIDrivers != nil { - in, out := &in.CSIDrivers, &out.CSIDrivers - *out = make([]CSIDriverInfo, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) return } @@ -196,3 +207,47 @@ func (in *CSINodeInfoList) DeepCopyObject() runtime.Object { } return nil } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeInfoSpec) DeepCopyInto(out *CSINodeInfoSpec) { + *out = *in + if in.Drivers != nil { + in, out := &in.Drivers, &out.Drivers + *out = make([]CSIDriverInfoSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeInfoSpec. +func (in *CSINodeInfoSpec) DeepCopy() *CSINodeInfoSpec { + if in == nil { + return nil + } + out := new(CSINodeInfoSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeInfoStatus) DeepCopyInto(out *CSINodeInfoStatus) { + *out = *in + if in.Drivers != nil { + in, out := &in.Drivers, &out.Drivers + *out = make([]CSIDriverInfoStatus, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeInfoStatus. +func (in *CSINodeInfoStatus) DeepCopy() *CSINodeInfoStatus { + if in == nil { + return nil + } + out := new(CSINodeInfoStatus) + in.DeepCopyInto(out) + return out +} diff --git a/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/clientset/versioned/typed/csi/v1alpha1/csidriver.go b/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/clientset/versioned/typed/csi/v1alpha1/csidriver.go index ffcee6193f1d..72c93f6fadc9 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/clientset/versioned/typed/csi/v1alpha1/csidriver.go +++ b/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/clientset/versioned/typed/csi/v1alpha1/csidriver.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -72,10 +74,15 @@ func (c *cSIDrivers) Get(name string, options v1.GetOptions) (result *v1alpha1.C // List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. func (c *cSIDrivers) List(opts v1.ListOptions) (result *v1alpha1.CSIDriverList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.CSIDriverList{} err = c.client.Get(). Resource("csidrivers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *cSIDrivers) List(opts v1.ListOptions) (result *v1alpha1.CSIDriverList, // Watch returns a watch.Interface that watches the requested cSIDrivers. func (c *cSIDrivers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("csidrivers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *cSIDrivers) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *cSIDrivers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("csidrivers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/clientset/versioned/typed/csi/v1alpha1/csinodeinfo.go b/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/clientset/versioned/typed/csi/v1alpha1/csinodeinfo.go index 8580127c02f1..3e5f73b0462a 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/clientset/versioned/typed/csi/v1alpha1/csinodeinfo.go +++ b/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/clientset/versioned/typed/csi/v1alpha1/csinodeinfo.go @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -37,6 +39,7 @@ type CSINodeInfosGetter interface { type CSINodeInfoInterface interface { Create(*v1alpha1.CSINodeInfo) (*v1alpha1.CSINodeInfo, error) Update(*v1alpha1.CSINodeInfo) (*v1alpha1.CSINodeInfo, error) + UpdateStatus(*v1alpha1.CSINodeInfo) (*v1alpha1.CSINodeInfo, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error Get(name string, options v1.GetOptions) (*v1alpha1.CSINodeInfo, error) @@ -72,10 +75,15 @@ func (c *cSINodeInfos) Get(name string, options v1.GetOptions) (result *v1alpha1 // List takes label and field selectors, and returns the list of CSINodeInfos that match those selectors. func (c *cSINodeInfos) List(opts v1.ListOptions) (result *v1alpha1.CSINodeInfoList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.CSINodeInfoList{} err = c.client.Get(). Resource("csinodeinfos"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +91,15 @@ func (c *cSINodeInfos) List(opts v1.ListOptions) (result *v1alpha1.CSINodeInfoLi // Watch returns a watch.Interface that watches the requested cSINodeInfos. func (c *cSINodeInfos) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("csinodeinfos"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -113,6 +126,21 @@ func (c *cSINodeInfos) Update(cSINodeInfo *v1alpha1.CSINodeInfo) (result *v1alph return } +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *cSINodeInfos) UpdateStatus(cSINodeInfo *v1alpha1.CSINodeInfo) (result *v1alpha1.CSINodeInfo, err error) { + result = &v1alpha1.CSINodeInfo{} + err = c.client.Put(). + Resource("csinodeinfos"). + Name(cSINodeInfo.Name). + SubResource("status"). + Body(cSINodeInfo). + Do(). + Into(result) + return +} + // Delete takes name of the cSINodeInfo and deletes it. Returns an error if one occurs. func (c *cSINodeInfos) Delete(name string, options *v1.DeleteOptions) error { return c.client.Delete(). @@ -125,9 +153,14 @@ func (c *cSINodeInfos) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *cSINodeInfos) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("csinodeinfos"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index 820b3b44ab96..1d6d16473b82 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -27,6 +27,7 @@ import ( versioned "k8s.io/csi-api/pkg/client/clientset/versioned" ) +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle @@ -35,4 +36,5 @@ type SharedInformerFactory interface { InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/listers/csi/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/listers/csi/v1alpha1/BUILD index 527aba2aeb1b..66ec890113ea 100644 --- a/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/listers/csi/v1alpha1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/csi-api/pkg/client/listers/csi/v1alpha1/BUILD @@ -1,19 +1,5 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) - go_library( name = "go_default_library", srcs = [ @@ -31,3 +17,17 @@ go_library( "//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/klog/.travis.yml b/cluster-autoscaler/vendor/k8s.io/klog/.travis.yml index 0f508dae66ed..fc0d2caf33f0 100644 --- a/cluster-autoscaler/vendor/k8s.io/klog/.travis.yml +++ b/cluster-autoscaler/vendor/k8s.io/klog/.travis.yml @@ -1,5 +1,4 @@ language: go -go_import_path: k8s.io/klog dist: xenial go: - 1.9.x diff --git a/cluster-autoscaler/vendor/k8s.io/klog/CONTRIBUTING.md b/cluster-autoscaler/vendor/k8s.io/klog/CONTRIBUTING.md index 574a56abbbec..de4711513724 100644 --- a/cluster-autoscaler/vendor/k8s.io/klog/CONTRIBUTING.md +++ b/cluster-autoscaler/vendor/k8s.io/klog/CONTRIBUTING.md @@ -8,6 +8,10 @@ _As contributors and maintainers of this project, and in the interest of fosteri We have full documentation on how to get started contributing here: + + - [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests - [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) - [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers @@ -16,7 +20,12 @@ We have full documentation on how to get started contributing here: - [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + diff --git a/cluster-autoscaler/vendor/k8s.io/klog/README.md b/cluster-autoscaler/vendor/k8s.io/klog/README.md index 6cb6d1683752..a747f538a848 100644 --- a/cluster-autoscaler/vendor/k8s.io/klog/README.md +++ b/cluster-autoscaler/vendor/k8s.io/klog/README.md @@ -5,32 +5,6 @@ klog is a permanant fork of https://github.com/golang/glog. original README from ---- -How to use klog -=============== -- Replace imports for `github.com/golang/glog` with `k8s.io/klog` -- Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags -- You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) -- If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) -- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md)) - -### Coexisting with glog -This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. - -## Community, discussion, contribution, and support - -Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). - -You can reach the maintainers of this project at: - -- [Slack](https://kubernetes.slack.com/messages/sig-architecture) -- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) - -### Code of conduct - -Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). - ----- - glog ==== @@ -52,20 +26,20 @@ The comment from glog.go introduces the ideas: Error, Fatal, plus formatting variants such as Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. - + Basic examples: - + glog.Info("Prepare to repel boarders") - + glog.Fatalf("Initialization failed: %s", err) - + See the documentation for the V function for an explanation of these examples: - + if glog.V(2) { glog.Info("Starting transaction...") } - + glog.V(2).Infoln("Processed", nItems, "elements") diff --git a/cluster-autoscaler/vendor/k8s.io/klog/klog.go b/cluster-autoscaler/vendor/k8s.io/klog/klog.go index d1a88132bf24..13bcc81a756b 100644 --- a/cluster-autoscaler/vendor/k8s.io/klog/klog.go +++ b/cluster-autoscaler/vendor/k8s.io/klog/klog.go @@ -412,7 +412,7 @@ func InitFlags(flagset *flag.FlagSet) { flagset.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file") flagset.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") + flagset.Var(&logging.verbosity, "v", "log level for V logs") flagset.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") @@ -739,9 +739,7 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo } data := buf.Bytes() if l.toStderr { - if s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } + os.Stderr.Write(data) } else { if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { os.Stderr.Write(data) diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go index f48700d54544..072c8ec4ae5e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go @@ -20,7 +20,6 @@ import ( "encoding/json" "fmt" "net/http" - "reflect" "strings" restful "github.com/emicklei/go-restful" @@ -58,7 +57,7 @@ func BuildOpenAPIDefinitionsForResource(model interface{}, config *common.Config o := newOpenAPI(config) // We can discard the return value of toSchema because all we care about is the side effect of calling it. // All the models created for this resource get added to o.swagger.Definitions - _, err := o.toSchema(getCanonicalTypeName(model)) + _, err := o.toSchema(util.GetCanonicalTypeName(model)) if err != nil { return nil, err } @@ -92,6 +91,7 @@ func newOpenAPI(config *common.Config) openAPI { SwaggerProps: spec.SwaggerProps{ Swagger: OpenAPIVersion, Definitions: spec.Definitions{}, + Responses: config.ResponseDefinitions, Paths: &spec.Paths{Paths: map[string]spec.PathItem{}}, Info: config.Info, }, @@ -135,21 +135,6 @@ func (o *openAPI) finalizeSwagger() (*spec.Swagger, error) { return o.swagger, nil } -func getCanonicalTypeName(model interface{}) string { - t := reflect.TypeOf(model) - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - if t.PkgPath() == "" { - return t.Name() - } - path := t.PkgPath() - if strings.Contains(path, "/vendor/") { - path = path[strings.Index(path, "/vendor/")+len("/vendor/"):] - } - return path + "." + t.Name() -} - func (o *openAPI) buildDefinitionRecursively(name string) error { uniqueName, extensions := o.config.GetDefinitionName(name) if _, ok := o.swagger.Definitions[uniqueName]; ok { @@ -335,7 +320,7 @@ func (o *openAPI) buildOperations(route restful.Route, inPathCommonParamsMap map } func (o *openAPI) buildResponse(model interface{}, description string) (spec.Response, error) { - schema, err := o.toSchema(getCanonicalTypeName(model)) + schema, err := o.toSchema(util.GetCanonicalTypeName(model)) if err != nil { return spec.Response{}, err } @@ -413,7 +398,7 @@ func (o *openAPI) buildParameter(restParam restful.ParameterData, bodySample int case restful.BodyParameterKind: if bodySample != nil { ret.In = "body" - ret.Schema, err = o.toSchema(getCanonicalTypeName(bodySample)) + ret.Schema, err = o.toSchema(util.GetCanonicalTypeName(bodySample)) return ret, err } else { // There is not enough information in the body parameter to build the definition. diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/common/common.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/common/common.go index 0d235876deb4..7d5534b24ecd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -59,6 +59,12 @@ type Config struct { // will show up as ... "responses" : {"default" : $DefaultResponse} in the spec. DefaultResponse *spec.Response + // ResponseDefinitions will be added to "responses" under the top-level swagger object. This is an object + // that holds responses definitions that can be used across operations. This property does not define + // global responses for all operations. For more info please refer: + // https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#fixed-fields + ResponseDefinitions map[string]spec.Response + // CommonResponses will be added as a response to all operation specs. This is a good place to add common // responses such as authorization failed. CommonResponses map[int]spec.Response diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index a57dcd363f60..890a39399f66 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -196,20 +196,24 @@ func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error } fields := map[string]Schema{} + fieldOrder := []string{} for _, namedSchema := range s.GetProperties().GetAdditionalProperties() { var err error - path := path.FieldPath(namedSchema.GetName()) - fields[namedSchema.GetName()], err = d.ParseSchema(namedSchema.GetValue(), &path) + name := namedSchema.GetName() + path := path.FieldPath(name) + fields[name], err = d.ParseSchema(namedSchema.GetValue(), &path) if err != nil { return nil, err } + fieldOrder = append(fieldOrder, name) } return &Kind{ BaseSchema: d.parseBaseSchema(s, path), RequiredFields: s.GetRequired(), Fields: fields, + FieldOrder: fieldOrder, }, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go index f26b5ef88104..46643aa50814 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -173,6 +173,8 @@ type Kind struct { RequiredFields []string // Maps field names to types. Fields map[string]Schema + // FieldOrder reports the canonical order for the fields. + FieldOrder []string } var _ Schema = &Kind{} diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/util.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/util.go index bcc0c4d4bb57..c5c42cd44cd4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/util/util.go @@ -16,7 +16,10 @@ limitations under the License. package util -import "strings" +import ( + "reflect" + "strings" +) // ToCanonicalName converts Golang package/type name into canonical OpenAPI name. // Examples: @@ -37,3 +40,20 @@ func ToCanonicalName(name string) string { } return strings.Join(nameParts, ".") } + +// GetCanonicalTypeName will find the canonical type name of a sample object, removing +// the "vendor" part of the path +func GetCanonicalTypeName(model interface{}) string { + t := reflect.TypeOf(model) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.PkgPath() == "" { + return t.Name() + } + path := t.PkgPath() + if strings.Contains(path, "/vendor/") { + path = path[strings.Index(path, "/vendor/")+len("/vendor/"):] + } + return path + "." + t.Name() +} diff --git a/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/doc.go index d2743d6a8a70..5937b74cf0e7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/doc.go @@ -15,5 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true // +groupName=kubeproxy.config.k8s.io + package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/register.go b/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/register.go index 92d92c4c1cb9..16ed248fae09 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-proxy/config/v1alpha1/register.go @@ -21,26 +21,20 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// GroupName is the group name use in this package +// GroupName is the group name used in this package const GroupName = "kubeproxy.config.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} var ( - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme ) -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} - +// addKnownTypes registers known types to the given scheme func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &KubeProxyConfiguration{}, diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/register.go b/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/register.go index c03c53ac584d..914dfb25fa20 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/register.go @@ -21,26 +21,20 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// GroupName is the group name use in this package +// GroupName is the group name used in this package const GroupName = "kubelet.config.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme ) -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} - -// Adds the list of known types to the given scheme. +// addKnownTypes registers known types to the given scheme func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &KubeletConfiguration{}, diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go b/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go index 281298f029da..4abe387773d3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/types.go @@ -292,9 +292,11 @@ type KubeletConfiguration struct { // Default: "4h" // +optional StreamingConnectionIdleTimeout metav1.Duration `json:"streamingConnectionIdleTimeout,omitempty"` - // nodeStatusUpdateFrequency is the frequency that kubelet posts node - // status to master. Note: be cautious when changing the constant, it - // must work with nodeMonitorGracePeriod in nodecontroller. + // nodeStatusUpdateFrequency is the frequency that kubelet computes node + // status. If node lease feature is not enabled, it is also the frequency that + // kubelet posts node status to master. + // Note: When node lease feature is not enabled, be cautious when changing the + // constant, it must work with nodeMonitorGracePeriod in nodecontroller. // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that // it may impact node scalability, and also that the node controller's // nodeMonitorGracePeriod must be set to N*NodeStatusUpdateFrequency, @@ -303,6 +305,16 @@ type KubeletConfiguration struct { // Default: "10s" // +optional NodeStatusUpdateFrequency metav1.Duration `json:"nodeStatusUpdateFrequency,omitempty"` + // nodeStatusReportFrequency is the frequency that kubelet posts node + // status to master if node status does not change. Kubelet will ignore this + // frequency and post node status immediately if any change is detected. It is + // only used when node lease feature is enabled. nodeStatusReportFrequency's + // default value is 1m. But if nodeStatusUpdateFrequency is set explicitly, + // nodeStatusReportFrequency's default value will be set to + // nodeStatusUpdateFrequency for backward compatibility. + // Default: "1m" + // +optional + NodeStatusReportFrequency metav1.Duration `json:"nodeStatusReportFrequency,omitempty"` // nodeLeaseDurationSeconds is the duration the Kubelet will set on its corresponding Lease, // when the NodeLease feature is enabled. This feature provides an indicator of node // health by having the Kublet create and periodically renew a lease, named after the node, diff --git a/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/zz_generated.deepcopy.go index 37be9cb1843d..a924fac2ec34 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubelet/config/v1beta1/zz_generated.deepcopy.go @@ -143,6 +143,7 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { } out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency + out.NodeStatusReportFrequency = in.NodeStatusReportFrequency out.ImageMinimumGCAge = in.ImageMinimumGCAge if in.ImageGCHighThresholdPercent != nil { in, out := &in.ImageGCHighThresholdPercent, &out.ImageGCHighThresholdPercent diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD index eab363f0b42d..b99cf2ca1559 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD @@ -36,7 +36,6 @@ go_library( "//pkg/util/iptables:go_default_library", "//pkg/util/ipvs:go_default_library", "//pkg/util/mount:go_default_library", - "//pkg/util/node:go_default_library", "//pkg/util/oom:go_default_library", "//pkg/util/resourcecontainer:go_default_library", "//pkg/util/sysctl:go_default_library", @@ -44,7 +43,6 @@ go_library( "//pkg/version/verflag:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/config:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", @@ -63,70 +61,80 @@ go_library( "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/kube-proxy/config/v1alpha1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ "//pkg/proxy/metrics:go_default_library", "//pkg/util/dbus:go_default_library", + "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", ], "@io_bazel_rules_go//go/platform:darwin": [ "//pkg/proxy/metrics:go_default_library", "//pkg/util/dbus:go_default_library", + "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", ], "@io_bazel_rules_go//go/platform:dragonfly": [ "//pkg/proxy/metrics:go_default_library", "//pkg/util/dbus:go_default_library", + "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", ], "@io_bazel_rules_go//go/platform:freebsd": [ "//pkg/proxy/metrics:go_default_library", "//pkg/util/dbus:go_default_library", + "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/proxy/metrics:go_default_library", "//pkg/util/dbus:go_default_library", + "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ "//pkg/proxy/metrics:go_default_library", "//pkg/util/dbus:go_default_library", + "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", ], "@io_bazel_rules_go//go/platform:netbsd": [ "//pkg/proxy/metrics:go_default_library", "//pkg/util/dbus:go_default_library", + "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", ], "@io_bazel_rules_go//go/platform:openbsd": [ "//pkg/proxy/metrics:go_default_library", "//pkg/util/dbus:go_default_library", + "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", ], "@io_bazel_rules_go//go/platform:plan9": [ "//pkg/proxy/metrics:go_default_library", "//pkg/util/dbus:go_default_library", + "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", ], "@io_bazel_rules_go//go/platform:solaris": [ "//pkg/proxy/metrics:go_default_library", "//pkg/util/dbus:go_default_library", + "//pkg/util/node:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", ], @@ -134,6 +142,7 @@ go_library( "//pkg/proxy/winkernel:go_default_library", "//pkg/proxy/winuserspace:go_default_library", "//pkg/util/netsh:go_default_library", + "//pkg/util/node:go_default_library", "//pkg/windows/service:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/conntrack.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/conntrack.go index dfb7efb94f59..f3b7776e3615 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/conntrack.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/conntrack.go @@ -22,7 +22,7 @@ import ( "strconv" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/sysctl" @@ -49,7 +49,7 @@ func (rct realConntracker) SetMax(max int) error { if err := rct.setIntSysCtl("nf_conntrack_max", max); err != nil { return err } - glog.Infof("Setting nf_conntrack_max to %d", max) + klog.Infof("Setting nf_conntrack_max to %d", max) // Linux does not support writing to /sys/module/nf_conntrack/parameters/hashsize // when the writer process is not in the initial network namespace @@ -80,7 +80,7 @@ func (rct realConntracker) SetMax(max int) error { return readOnlySysFSError } // TODO: generify this and sysctl to a new sysfs.WriteInt() - glog.Infof("Setting conntrack hashsize to %d", max/4) + klog.Infof("Setting conntrack hashsize to %d", max/4) return writeIntStringFile("/sys/module/nf_conntrack/parameters/hashsize", max/4) } @@ -95,9 +95,12 @@ func (rct realConntracker) SetTCPCloseWaitTimeout(seconds int) error { func (realConntracker) setIntSysCtl(name string, value int) error { entry := "net/netfilter/" + name - glog.Infof("Set sysctl '%v' to %v", entry, value) - if err := sysctl.New().SetSysctl(entry, value); err != nil { - return err + sys := sysctl.New() + if val, _ := sys.GetSysctl(entry); val != value { + klog.Infof("Set sysctl '%v' to %v", entry, value) + if err := sys.SetSysctl(entry, value); err != nil { + return err + } } return nil } @@ -109,7 +112,7 @@ func isSysFSWritable() (bool, error) { m := mount.New("" /* default mount path */) mountPoints, err := m.List() if err != nil { - glog.Errorf("failed to list mount points: %v", err) + klog.Errorf("failed to list mount points: %v", err) return false, err } @@ -121,7 +124,7 @@ func isSysFSWritable() (bool, error) { if len(mountPoint.Opts) > 0 && mountPoint.Opts[0] == permWritable { return true, nil } - glog.Errorf("sysfs is not writable: %+v (mount options are %v)", + klog.Errorf("sysfs is not writable: %+v (mount options are %v)", mountPoint, mountPoint.Opts) return false, readOnlySysFSError } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go index 7ee8e15d42c8..04189b98d30f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go @@ -22,7 +22,6 @@ import ( "errors" "fmt" "io/ioutil" - "net" "net/http" "os" goruntime "runtime" @@ -31,7 +30,6 @@ import ( "k8s.io/api/core/v1" apimachineryconfig "k8s.io/apimachinery/pkg/apis/config" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/runtime/serializer/json" @@ -67,7 +65,6 @@ import ( utilipset "k8s.io/kubernetes/pkg/util/ipset" utiliptables "k8s.io/kubernetes/pkg/util/iptables" utilipvs "k8s.io/kubernetes/pkg/util/ipvs" - utilnode "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/resourcecontainer" "k8s.io/kubernetes/pkg/version" @@ -75,10 +72,10 @@ import ( "k8s.io/utils/exec" utilpointer "k8s.io/utils/pointer" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" "github.com/spf13/pflag" + "k8s.io/klog" ) const ( @@ -116,6 +113,9 @@ type Options struct { scheme *runtime.Scheme codecs serializer.CodecFactory + + // hostnameOverride, if set from the command line flag, takes precedence over the `HostnameOverride` value from the config file + hostnameOverride string } // AddFlags adds flags to fs and binds them to options. @@ -140,7 +140,7 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { fs.MarkDeprecated("resource-container", "This feature will be removed in a later release.") fs.StringVar(&o.config.ClientConnection.Kubeconfig, "kubeconfig", o.config.ClientConnection.Kubeconfig, "Path to kubeconfig file with authorization information (the master location is set by the master flag).") fs.Var(utilflag.PortRangeVar{Val: &o.config.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) that may be consumed in order to proxy service traffic. If (unspecified, 0, or 0-0) then ports will be randomly chosen.") - fs.StringVar(&o.config.HostnameOverride, "hostname-override", o.config.HostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.") + fs.StringVar(&o.hostnameOverride, "hostname-override", o.hostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.") fs.Var(&o.config.Mode, "proxy-mode", "Which proxy mode to use: 'userspace' (older) or 'iptables' (faster) or 'ipvs' (experimental). If blank, use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.") fs.Int32Var(o.config.IPTables.MasqueradeBit, "iptables-masquerade-bit", utilpointer.Int32PtrDerefOr(o.config.IPTables.MasqueradeBit, 14), "If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31].") fs.DurationVar(&o.config.IPTables.SyncPeriod.Duration, "iptables-sync-period", o.config.IPTables.SyncPeriod.Duration, "The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.") @@ -191,7 +191,7 @@ func NewOptions() *Options { // Complete completes all the required options. func (o *Options) Complete() error { if len(o.ConfigFile) == 0 && len(o.WriteConfigTo) == 0 { - glog.Warning("WARNING: all flags other than --config, --write-config-to, and --cleanup are deprecated. Please begin using a config file ASAP.") + klog.Warning("WARNING: all flags other than --config, --write-config-to, and --cleanup are deprecated. Please begin using a config file ASAP.") o.applyDeprecatedHealthzPortToConfig() } @@ -204,14 +204,31 @@ func (o *Options) Complete() error { } } - err := utilfeature.DefaultFeatureGate.SetFromMap(o.config.FeatureGates) - if err != nil { + if err := o.processHostnameOverrideFlag(); err != nil { + return err + } + + if err := utilfeature.DefaultFeatureGate.SetFromMap(o.config.FeatureGates); err != nil { return err } return nil } +// processHostnameOverrideFlag processes hostname-override flag +func (o *Options) processHostnameOverrideFlag() error { + // Check if hostname-override flag is set and use value since configFile always overrides + if len(o.hostnameOverride) > 0 { + hostName := strings.TrimSpace(o.hostnameOverride) + if len(hostName) == 0 { + return fmt.Errorf("empty hostname-override is invalid") + } + o.config.HostnameOverride = strings.ToLower(hostName) + } + + return nil +} + // Validate validates all the required options. func (o *Options) Validate(args []string) error { if len(args) != 0 { @@ -263,7 +280,7 @@ func (o *Options) writeConfigFile() error { return err } - glog.Infof("Wrote configuration to: %s\n", o.WriteConfigTo) + klog.Infof("Wrote configuration to: %s\n", o.WriteConfigTo) return nil } @@ -348,23 +365,23 @@ with the apiserver API to configure the proxy.`, utilflag.PrintFlags(cmd.Flags()) if err := initForOS(opts.WindowsService); err != nil { - glog.Fatalf("failed OS init: %v", err) + klog.Fatalf("failed OS init: %v", err) } if err := opts.Complete(); err != nil { - glog.Fatalf("failed complete: %v", err) + klog.Fatalf("failed complete: %v", err) } if err := opts.Validate(args); err != nil { - glog.Fatalf("failed validate: %v", err) + klog.Fatalf("failed validate: %v", err) } - glog.Fatal(opts.Run()) + klog.Fatal(opts.Run()) }, } var err error opts.config, err = opts.ApplyDefaults(opts.config) if err != nil { - glog.Fatalf("unable to create flag defaults: %v", err) + klog.Fatalf("unable to create flag defaults: %v", err) } opts.AddFlags(cmd.Flags()) @@ -409,7 +426,7 @@ func createClients(config apimachineryconfig.ClientConnectionConfiguration, mast var err error if len(config.Kubeconfig) == 0 && len(masterOverride) == 0 { - glog.Info("Neither kubeconfig file nor master URL was specified. Falling back to in-cluster config.") + klog.Info("Neither kubeconfig file nor master URL was specified. Falling back to in-cluster config.") kubeConfig, err = rest.InClusterConfig() } else { // This creates a client, first loading any specified kubeconfig @@ -444,7 +461,7 @@ func createClients(config apimachineryconfig.ClientConnectionConfiguration, mast // Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set). func (s *ProxyServer) Run() error { // To help debugging, immediately log version - glog.Infof("Version: %+v", version.Get()) + klog.Infof("Version: %+v", version.Get()) // remove iptables rules and exit if s.CleanupAndExit { encounteredError := userspace.CleanupLeftovers(s.IptInterface) @@ -461,16 +478,16 @@ func (s *ProxyServer) Run() error { if s.OOMScoreAdj != nil { oomAdjuster = oom.NewOOMAdjuster() if err := oomAdjuster.ApplyOOMScoreAdj(0, int(*s.OOMScoreAdj)); err != nil { - glog.V(2).Info(err) + klog.V(2).Info(err) } } if len(s.ResourceContainer) != 0 { // Run in its own container. if err := resourcecontainer.RunInResourceContainer(s.ResourceContainer); err != nil { - glog.Warningf("Failed to start in resource-only container %q: %v", s.ResourceContainer, err) + klog.Warningf("Failed to start in resource-only container %q: %v", s.ResourceContainer, err) } else { - glog.V(2).Infof("Running in resource-only container %q", s.ResourceContainer) + klog.V(2).Infof("Running in resource-only container %q", s.ResourceContainer) } } @@ -578,7 +595,7 @@ func getConntrackMax(config kubeproxyconfig.KubeProxyConntrackConfiguration) (in if config.MaxPerCore != nil && *config.MaxPerCore > 0 { return -1, fmt.Errorf("invalid config: Conntrack Max and Conntrack MaxPerCore are mutually exclusive") } - glog.V(3).Infof("getConntrackMax: using absolute conntrack-max (deprecated)") + klog.V(3).Infof("getConntrackMax: using absolute conntrack-max (deprecated)") return int(*config.Max), nil } if config.MaxPerCore != nil && *config.MaxPerCore > 0 { @@ -588,26 +605,11 @@ func getConntrackMax(config kubeproxyconfig.KubeProxyConntrackConfiguration) (in } scaled := int(*config.MaxPerCore) * goruntime.NumCPU() if scaled > floor { - glog.V(3).Infof("getConntrackMax: using scaled conntrack-max-per-core") + klog.V(3).Infof("getConntrackMax: using scaled conntrack-max-per-core") return scaled, nil } - glog.V(3).Infof("getConntrackMax: using conntrack-min") + klog.V(3).Infof("getConntrackMax: using conntrack-min") return floor, nil } return 0, nil } - -func getNodeIP(client clientset.Interface, hostname string) net.IP { - var nodeIP net.IP - node, err := client.CoreV1().Nodes().Get(hostname, metav1.GetOptions{}) - if err != nil { - glog.Warningf("Failed to retrieve node info: %v", err) - return nil - } - nodeIP, err = utilnode.GetNodeHostIP(node) - if err != nil { - glog.Warningf("Failed to retrieve node IP: %v", err) - return nil - } - return nodeIP -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go index d290357994a2..a92c1492f5d6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go @@ -48,7 +48,7 @@ import ( utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) // NewProxyServer returns a new ProxyServer. @@ -75,7 +75,7 @@ func newProxyServer( protocol := utiliptables.ProtocolIpv4 if net.ParseIP(config.BindAddress).To4() == nil { - glog.V(0).Infof("IPv6 bind address (%s), assume IPv6 operation", config.BindAddress) + klog.V(0).Infof("IPv6 bind address (%s), assume IPv6 operation", config.BindAddress) protocol = utiliptables.ProtocolIpv6 } @@ -142,10 +142,10 @@ func newProxyServer( proxyMode := getProxyMode(string(config.Mode), iptInterface, kernelHandler, ipsetInterface, iptables.LinuxKernelCompatTester{}) nodeIP := net.ParseIP(config.BindAddress) if nodeIP.IsUnspecified() { - nodeIP = getNodeIP(client, hostname) + nodeIP = utilnode.GetNodeIP(client, hostname) } if proxyMode == proxyModeIPTables { - glog.V(0).Info("Using iptables Proxier.") + klog.V(0).Info("Using iptables Proxier.") if config.IPTables.MasqueradeBit == nil { // MasqueradeBit must be specified or defaulted. return nil, fmt.Errorf("unable to read IPTables MasqueradeBit from config") @@ -175,7 +175,7 @@ func newProxyServer( serviceEventHandler = proxierIPTables endpointsEventHandler = proxierIPTables // No turning back. Remove artifacts that might still exist from the userspace Proxier. - glog.V(0).Info("Tearing down inactive rules.") + klog.V(0).Info("Tearing down inactive rules.") // TODO this has side effects that should only happen when Run() is invoked. userspace.CleanupLeftovers(iptInterface) // IPVS Proxier will generate some iptables rules, need to clean them before switching to other proxy mode. @@ -186,7 +186,7 @@ func newProxyServer( ipvs.CleanupLeftovers(ipvsInterface, iptInterface, ipsetInterface, cleanupIPVS) } } else if proxyMode == proxyModeIPVS { - glog.V(0).Info("Using ipvs Proxier.") + klog.V(0).Info("Using ipvs Proxier.") proxierIPVS, err := ipvs.NewProxier( iptInterface, ipvsInterface, @@ -213,12 +213,12 @@ func newProxyServer( proxier = proxierIPVS serviceEventHandler = proxierIPVS endpointsEventHandler = proxierIPVS - glog.V(0).Info("Tearing down inactive rules.") + klog.V(0).Info("Tearing down inactive rules.") // TODO this has side effects that should only happen when Run() is invoked. userspace.CleanupLeftovers(iptInterface) iptables.CleanupLeftovers(iptInterface) } else { - glog.V(0).Info("Using userspace Proxier.") + klog.V(0).Info("Using userspace Proxier.") // This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for // our config.EndpointsConfigHandler. loadBalancer := userspace.NewLoadBalancerRR() @@ -244,7 +244,7 @@ func newProxyServer( proxier = proxierUserspace // Remove artifacts from the iptables and ipvs Proxier, if not on Windows. - glog.V(0).Info("Tearing down inactive rules.") + klog.V(0).Info("Tearing down inactive rules.") // TODO this has side effects that should only happen when Run() is invoked. iptables.CleanupLeftovers(iptInterface) // IPVS Proxier will generate some iptables rules, need to clean them before switching to other proxy mode. @@ -292,7 +292,7 @@ func getProxyMode(proxyMode string, iptver iptables.IPTablesVersioner, khandle i case proxyModeIPVS: return tryIPVSProxy(iptver, khandle, ipsetver, kcompat) } - glog.Warningf("Flag proxy-mode=%q unknown, assuming iptables proxy", proxyMode) + klog.Warningf("Flag proxy-mode=%q unknown, assuming iptables proxy", proxyMode) return tryIPTablesProxy(iptver, kcompat) } @@ -309,7 +309,7 @@ func tryIPVSProxy(iptver iptables.IPTablesVersioner, khandle ipvs.KernelHandler, } // Try to fallback to iptables before falling back to userspace - glog.V(1).Infof("Can't use ipvs proxier, trying iptables proxier") + klog.V(1).Infof("Can't use ipvs proxier, trying iptables proxier") return tryIPTablesProxy(iptver, kcompat) } @@ -324,6 +324,6 @@ func tryIPTablesProxy(iptver iptables.IPTablesVersioner, kcompat iptables.Kernel return proxyModeIPTables } // Fallback. - glog.V(1).Infof("Can't use iptables proxy, using userspace proxier") + klog.V(1).Infof("Can't use iptables proxy, using userspace proxier") return proxyModeUserspace } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go index 7a501ebe2b93..5ef2ce161824 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go @@ -42,7 +42,7 @@ import ( utilnode "k8s.io/kubernetes/pkg/util/node" "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) // NewProxyServer returns a new ProxyServer. @@ -99,7 +99,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi proxyMode := getProxyMode(string(config.Mode), winkernel.WindowsKernelCompatTester{}) if proxyMode == proxyModeKernelspace { - glog.V(0).Info("Using Kernelspace Proxier.") + klog.V(0).Info("Using Kernelspace Proxier.") proxierKernelspace, err := winkernel.NewProxier( config.IPTables.SyncPeriod.Duration, config.IPTables.MinSyncPeriod.Duration, @@ -107,7 +107,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi int(*config.IPTables.MasqueradeBit), config.ClusterCIDR, hostname, - getNodeIP(client, hostname), + utilnode.GetNodeIP(client, hostname), recorder, healthzUpdater, ) @@ -118,7 +118,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi endpointsEventHandler = proxierKernelspace serviceEventHandler = proxierKernelspace } else { - glog.V(0).Info("Using userspace Proxier.") + klog.V(0).Info("Using userspace Proxier.") execer := exec.New() var netshInterface utilnetsh.Interface netshInterface = utilnetsh.New(execer) @@ -143,7 +143,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi } proxier = proxierUserspace serviceEventHandler = proxierUserspace - glog.V(0).Info("Tearing down pure-winkernel proxy rules.") + klog.V(0).Info("Tearing down pure-winkernel proxy rules.") winkernel.CleanupLeftovers() } @@ -182,13 +182,13 @@ func tryWinKernelSpaceProxy(kcompat winkernel.KernelCompatTester) string { // guaranteed false on error, error only necessary for debugging useWinKerelProxy, err := winkernel.CanUseWinKernelProxier(kcompat) if err != nil { - glog.Errorf("Can't determine whether to use windows kernel proxy, using userspace proxier: %v", err) + klog.Errorf("Can't determine whether to use windows kernel proxy, using userspace proxier: %v", err) return proxyModeUserspace } if useWinKerelProxy { return proxyModeKernelspace } // Fallback. - glog.V(1).Infof("Can't use winkernel proxy, using userspace proxier") + klog.V(1).Infof("Can't use winkernel proxy, using userspace proxier") return proxyModeUserspace } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/BUILD index aa1948c9dfb4..309d7d07e0b0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/BUILD @@ -29,8 +29,6 @@ go_library( "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/capabilities:go_default_library", - "//pkg/client/chaosclient:go_default_library", - "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers:go_default_library", "//pkg/credentialprovider:go_default_library", "//pkg/credentialprovider/aws:go_default_library", @@ -69,7 +67,7 @@ go_library( "//pkg/version:go_default_library", "//pkg/version/verflag:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/aws_ebs:go_default_library", + "//pkg/volume/awsebs:go_default_library", "//pkg/volume/azure_dd:go_default_library", "//pkg/volume/azure_file:go_default_library", "//pkg/volume/cephfs:go_default_library", @@ -77,11 +75,11 @@ go_library( "//pkg/volume/configmap:go_default_library", "//pkg/volume/csi:go_default_library", "//pkg/volume/downwardapi:go_default_library", - "//pkg/volume/empty_dir:go_default_library", + "//pkg/volume/emptydir:go_default_library", "//pkg/volume/fc:go_default_library", "//pkg/volume/flexvolume:go_default_library", "//pkg/volume/flocker:go_default_library", - "//pkg/volume/gce_pd:go_default_library", + "//pkg/volume/gcepd:go_default_library", "//pkg/volume/git_repo:go_default_library", "//pkg/volume/glusterfs:go_default_library", "//pkg/volume/host_path:go_default_library", @@ -121,16 +119,17 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/client-go/util/certificate:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", "//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library", "//vendor/github.com/coreos/go-systemd/daemon:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/golang.org/x/exp/inotify:go_default_library", + "//vendor/github.com/sigma/go-inotify:go_default_library", ], "@io_bazel_rules_go//go/platform:windows": [ "//pkg/windows/service:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/BUILD index 42ba81e52838..d6e0d70f4a05 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/BUILD @@ -23,6 +23,7 @@ go_library( "//pkg/credentialprovider/azure:go_default_library", "//pkg/credentialprovider/gcp:go_default_library", "//pkg/features:go_default_library", + "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis/config:go_default_library", "//pkg/kubelet/apis/config/scheme:go_default_library", "//pkg/kubelet/apis/config/validation:go_default_library", @@ -33,12 +34,13 @@ go_library( "//pkg/util/taints:go_default_library", "//pkg/version/verflag:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library", "//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/google/cadvisor/container/common:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/globalflags.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/globalflags.go index db85071ffade..144837731e1f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/globalflags.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/globalflags.go @@ -29,7 +29,7 @@ import ( "k8s.io/kubernetes/pkg/version/verflag" // ensure libs have a chance to globally register their flags - _ "github.com/golang/glog" + _ "k8s.io/klog" _ "k8s.io/kubernetes/pkg/credentialprovider/azure" _ "k8s.io/kubernetes/pkg/credentialprovider/gcp" ) @@ -84,9 +84,6 @@ func addCredentialProviderFlags(fs *pflag.FlagSet) { global := pflag.CommandLine local := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) - // Note this is deprecated in the library that provides it, so we just allow that deprecation - // notice to pass through our registration here. - pflagRegister(global, local, "google-json-key") // TODO(#58034): This is not a static file, so it's not quite as straightforward as --google-json-key. // We need to figure out how ACR users can dynamically provide pull credentials before we can deprecate this. pflagRegister(global, local, "azure-container-registry-config") @@ -94,7 +91,7 @@ func addCredentialProviderFlags(fs *pflag.FlagSet) { fs.AddFlagSet(local) } -// addGlogFlags adds flags from github.com/golang/glog +// addGlogFlags adds flags from k8s.io/klog func addGlogFlags(fs *pflag.FlagSet) { // lookup flags in global flag set and re-register the values with our flagset global := flag.CommandLine @@ -107,6 +104,7 @@ func addGlogFlags(fs *pflag.FlagSet) { register(global, local, "vmodule") register(global, local, "log_backtrace_at") register(global, local, "log_dir") + register(global, local, "log_file") fs.AddFlagSet(local) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go index 85d14f5a450a..47f9eda3db84 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go @@ -27,11 +27,14 @@ import ( "github.com/spf13/pflag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/apiserver/pkg/util/flag" + "k8s.io/klog" "k8s.io/kubelet/config/v1beta1" "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/features" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/config/scheme" kubeletconfigvalidation "k8s.io/kubernetes/pkg/kubelet/apis/config/validation" @@ -212,7 +215,7 @@ func NewKubeletFlags() *KubeletFlags { if runtime.GOOS == "linux" { remoteRuntimeEndpoint = "unix:///var/run/dockershim.sock" } else if runtime.GOOS == "windows" { - remoteRuntimeEndpoint = "tcp://localhost:3735" + remoteRuntimeEndpoint = "npipe:////./pipe/dockershim" } return &KubeletFlags{ @@ -250,9 +253,40 @@ func ValidateKubeletFlags(f *KubeletFlags) error { if f.NodeStatusMaxImages < -1 { return fmt.Errorf("invalid configuration: NodeStatusMaxImages (--node-status-max-images) must be -1 or greater") } + + unknownLabels := sets.NewString() + for k := range f.NodeLabels { + if isKubernetesLabel(k) && !kubeletapis.IsKubeletLabel(k) { + unknownLabels.Insert(k) + } + } + if len(unknownLabels) > 0 { + // TODO(liggitt): in 1.15, return an error + klog.Warningf("unknown 'kubernetes.io' or 'k8s.io' labels specified with --node-labels: %v", unknownLabels.List()) + klog.Warningf("in 1.15, --node-labels in the 'kubernetes.io' namespace must begin with an allowed prefix (%s) or be in the specifically allowed set (%s)", strings.Join(kubeletapis.KubeletLabelNamespaces(), ", "), strings.Join(kubeletapis.KubeletLabels(), ", ")) + } + return nil } +func isKubernetesLabel(key string) bool { + namespace := getLabelNamespace(key) + if namespace == "kubernetes.io" || strings.HasSuffix(namespace, ".kubernetes.io") { + return true + } + if namespace == "k8s.io" || strings.HasSuffix(namespace, ".k8s.io") { + return true + } + return false +} + +func getLabelNamespace(key string) string { + if parts := strings.SplitN(key, "/", 2); len(parts) == 2 { + return parts[0] + } + return "" +} + // NewKubeletConfiguration will create a new KubeletConfiguration with default values func NewKubeletConfiguration() (*kubeletconfig.KubeletConfiguration, error) { scheme, _, err := kubeletscheme.NewSchemeAndCodecs() @@ -368,7 +402,7 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) { fs.Var(&f.DynamicConfigDir, "dynamic-config-dir", "The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. The Kubelet will create this directory if it does not already exist. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Providing this flag enables dynamic Kubelet configuration. The DynamicKubeletConfig feature gate must be enabled to pass this flag; this gate currently defaults to true because the feature is beta.") - fs.BoolVar(&f.RegisterNode, "register-node", f.RegisterNode, "Register the node with the apiserver. If --kubeconfig is not provided, this flag is irrelevant, as the Kubelet won't have an apiserver to register with. Default=true.") + fs.BoolVar(&f.RegisterNode, "register-node", f.RegisterNode, "Register the node with the apiserver. If --kubeconfig is not provided, this flag is irrelevant, as the Kubelet won't have an apiserver to register with.") fs.Var(utiltaints.NewTaintsVar(&f.RegisterWithTaints), "register-with-taints", "Register the node with the given list of taints (comma separated \"=:\"). No-op if register-node is false.") fs.BoolVar(&f.Containerized, "containerized", f.Containerized, "Running kubelet in a container.") @@ -376,18 +410,18 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) { fs.StringVar(&f.ExperimentalMounterPath, "experimental-mounter-path", f.ExperimentalMounterPath, "[Experimental] Path of mounter binary. Leave empty to use the default mount.") fs.StringSliceVar(&f.AllowedUnsafeSysctls, "allowed-unsafe-sysctls", f.AllowedUnsafeSysctls, "Comma-separated whitelist of unsafe sysctls or unsafe sysctl patterns (ending in *). Use these at your own risk. Sysctls feature gate is enabled by default.") fs.BoolVar(&f.ExperimentalKernelMemcgNotification, "experimental-kernel-memcg-notification", f.ExperimentalKernelMemcgNotification, "If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling.") - fs.StringVar(&f.RemoteRuntimeEndpoint, "container-runtime-endpoint", f.RemoteRuntimeEndpoint, "[Experimental] The endpoint of remote runtime service. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'") - fs.StringVar(&f.RemoteImageEndpoint, "image-service-endpoint", f.RemoteImageEndpoint, "[Experimental] The endpoint of remote image service. If not specified, it will be the same with container-runtime-endpoint by default. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'") + fs.StringVar(&f.RemoteRuntimeEndpoint, "container-runtime-endpoint", f.RemoteRuntimeEndpoint, "[Experimental] The endpoint of remote runtime service. Currently unix socket and tcp endpoints are supported on Linux, while npipe and tcp endpoints are supported on windows. Examples:'unix:///var/run/dockershim.sock', 'npipe:////./pipe/dockershim'") + fs.StringVar(&f.RemoteImageEndpoint, "image-service-endpoint", f.RemoteImageEndpoint, "[Experimental] The endpoint of remote image service. If not specified, it will be the same with container-runtime-endpoint by default. Currently unix socket and tcp endpoints are supported on Linux, while npipe and tcp endpoints are supported on windows. Examples:'unix:///var/run/dockershim.sock', 'npipe:////./pipe/dockershim'") fs.BoolVar(&f.ExperimentalCheckNodeCapabilitiesBeforeMount, "experimental-check-node-capabilities-before-mount", f.ExperimentalCheckNodeCapabilitiesBeforeMount, "[Experimental] if set true, the kubelet will check the underlying node for required components (binaries, etc.) before performing the mount") fs.BoolVar(&f.ExperimentalNodeAllocatableIgnoreEvictionThreshold, "experimental-allocatable-ignore-eviction", f.ExperimentalNodeAllocatableIgnoreEvictionThreshold, "When set to 'true', Hard Eviction Thresholds will be ignored while calculating Node Allocatable. See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ for more details. [default=false]") bindableNodeLabels := flag.ConfigurationMap(f.NodeLabels) - fs.Var(&bindableNodeLabels, "node-labels", " Labels to add when registering the node in the cluster. Labels must be key=value pairs separated by ','.") + fs.Var(&bindableNodeLabels, "node-labels", fmt.Sprintf(" Labels to add when registering the node in the cluster. Labels must be key=value pairs separated by ','. Labels in the 'kubernetes.io' namespace must begin with an allowed prefix (%s) or be in the specifically allowed set (%s)", strings.Join(kubeletapis.KubeletLabelNamespaces(), ", "), strings.Join(kubeletapis.KubeletLabels(), ", "))) fs.StringVar(&f.VolumePluginDir, "volume-plugin-dir", f.VolumePluginDir, "The full path of the directory in which to search for additional third party volume plugins") fs.StringVar(&f.LockFilePath, "lock-file", f.LockFilePath, " The path to file for kubelet to use as a lock file.") fs.BoolVar(&f.ExitOnLockContention, "exit-on-lock-contention", f.ExitOnLockContention, "Whether kubelet should exit upon lock-file contention.") fs.StringVar(&f.SeccompProfileRoot, "seccomp-profile-root", f.SeccompProfileRoot, " Directory path for seccomp profiles.") fs.StringVar(&f.BootstrapCheckpointPath, "bootstrap-checkpoint-path", f.BootstrapCheckpointPath, " Path to the directory where the checkpoints are stored") - fs.Int32Var(&f.NodeStatusMaxImages, "node-status-max-images", f.NodeStatusMaxImages, " The maximum number of images to report in Node.Status.Images. If -1 is specified, no cap will be applied. Default: 50") + fs.Int32Var(&f.NodeStatusMaxImages, "node-status-max-images", f.NodeStatusMaxImages, " The maximum number of images to report in Node.Status.Images. If -1 is specified, no cap will be applied.") // DEPRECATED FLAGS fs.StringVar(&f.BootstrapKubeconfig, "experimental-bootstrap-kubeconfig", f.BootstrapKubeconfig, "") diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins.go index 4f3c39223de8..5f8ceed7b008 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins.go @@ -26,7 +26,7 @@ import ( "k8s.io/utils/exec" // Volume plugins "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/aws_ebs" + "k8s.io/kubernetes/pkg/volume/awsebs" "k8s.io/kubernetes/pkg/volume/azure_dd" "k8s.io/kubernetes/pkg/volume/azure_file" "k8s.io/kubernetes/pkg/volume/cephfs" @@ -34,11 +34,11 @@ import ( "k8s.io/kubernetes/pkg/volume/configmap" "k8s.io/kubernetes/pkg/volume/csi" "k8s.io/kubernetes/pkg/volume/downwardapi" - "k8s.io/kubernetes/pkg/volume/empty_dir" + "k8s.io/kubernetes/pkg/volume/emptydir" "k8s.io/kubernetes/pkg/volume/fc" "k8s.io/kubernetes/pkg/volume/flexvolume" "k8s.io/kubernetes/pkg/volume/flocker" - "k8s.io/kubernetes/pkg/volume/gce_pd" + "k8s.io/kubernetes/pkg/volume/gcepd" "k8s.io/kubernetes/pkg/volume/git_repo" "k8s.io/kubernetes/pkg/volume/glusterfs" "k8s.io/kubernetes/pkg/volume/host_path" @@ -71,9 +71,9 @@ func ProbeVolumePlugins() []volume.VolumePlugin { // // Kubelet does not currently need to configure volume plugins. // If/when it does, see kube-controller-manager/app/plugins.go for example of using volume.VolumeConfig - allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, empty_dir.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, awsebs.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, emptydir.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, gcepd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, git_repo.ProbeVolumePlugins()...) allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(volume.VolumeConfig{})...) allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(volume.VolumeConfig{})...) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go index b5d73579ed32..04422bb89909 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go @@ -34,9 +34,9 @@ import ( "time" "github.com/coreos/go-systemd/daemon" - "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/pflag" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -56,14 +56,13 @@ import ( "k8s.io/client-go/tools/record" certutil "k8s.io/client-go/util/cert" "k8s.io/client-go/util/certificate" + cloudprovider "k8s.io/cloud-provider" csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned" kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" "k8s.io/kubernetes/cmd/kubelet/app/options" "k8s.io/kubernetes/pkg/api/legacyscheme" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/capabilities" - "k8s.io/kubernetes/pkg/client/chaosclient" - "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/credentialprovider" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet" @@ -112,7 +111,7 @@ func NewKubeletCommand(stopCh <-chan struct{}) *cobra.Command { kubeletConfig, err := options.NewKubeletConfiguration() // programmer error if err != nil { - glog.Fatal(err) + klog.Fatal(err) } cmd := &cobra.Command{ @@ -145,20 +144,20 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API // initial flag parse, since we disable cobra's flag parsing if err := cleanFlagSet.Parse(args); err != nil { cmd.Usage() - glog.Fatal(err) + klog.Fatal(err) } // check if there are non-flag arguments in the command line cmds := cleanFlagSet.Args() if len(cmds) > 0 { cmd.Usage() - glog.Fatalf("unknown command: %s", cmds[0]) + klog.Fatalf("unknown command: %s", cmds[0]) } // short-circuit on help help, err := cleanFlagSet.GetBool("help") if err != nil { - glog.Fatal(`"help" flag is non-bool, programmer error, please correct`) + klog.Fatal(`"help" flag is non-bool, programmer error, please correct`) } if help { cmd.Help() @@ -171,40 +170,40 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API // set feature gates from initial flags-based config if err := utilfeature.DefaultFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil { - glog.Fatal(err) + klog.Fatal(err) } // validate the initial KubeletFlags if err := options.ValidateKubeletFlags(kubeletFlags); err != nil { - glog.Fatal(err) + klog.Fatal(err) } if kubeletFlags.ContainerRuntime == "remote" && cleanFlagSet.Changed("pod-infra-container-image") { - glog.Warning("Warning: For remote container runtime, --pod-infra-container-image is ignored in kubelet, which should be set in that remote runtime instead") + klog.Warning("Warning: For remote container runtime, --pod-infra-container-image is ignored in kubelet, which should be set in that remote runtime instead") } // load kubelet config file, if provided if configFile := kubeletFlags.KubeletConfigFile; len(configFile) > 0 { kubeletConfig, err = loadConfigFile(configFile) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } // We must enforce flag precedence by re-parsing the command line into the new object. // This is necessary to preserve backwards-compatibility across binary upgrades. // See issue #56171 for more details. if err := kubeletConfigFlagPrecedence(kubeletConfig, args); err != nil { - glog.Fatal(err) + klog.Fatal(err) } // update feature gates based on new config if err := utilfeature.DefaultFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil { - glog.Fatal(err) + klog.Fatal(err) } } // We always validate the local configuration (command line + config file). // This is the default "last-known-good" config for dynamic config, and must always remain valid. if err := kubeletconfigvalidation.ValidateKubeletConfiguration(kubeletConfig); err != nil { - glog.Fatal(err) + klog.Fatal(err) } // use dynamic kubelet config, if enabled @@ -220,7 +219,7 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API return kubeletConfigFlagPrecedence(kc, args) }) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } // If we should just use our existing, local config, the controller will return a nil config if dynamicKubeletConfig != nil { @@ -228,7 +227,7 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API // Note: flag precedence was already enforced in the controller, prior to validation, // by our above transform function. Now we simply update feature gates from the new config. if err := utilfeature.DefaultFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil { - glog.Fatal(err) + klog.Fatal(err) } } } @@ -242,7 +241,7 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API // use kubeletServer to construct the default KubeletDeps kubeletDeps, err := UnsecuredDependencies(kubeletServer) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } // add the kubelet config controller to kubeletDeps @@ -251,15 +250,15 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API // start the experimental docker shim, if enabled if kubeletServer.KubeletFlags.ExperimentalDockershim { if err := RunDockershim(&kubeletServer.KubeletFlags, kubeletConfig, stopCh); err != nil { - glog.Fatal(err) + klog.Fatal(err) } return } // run the kubelet - glog.V(5).Infof("KubeletConfiguration: %#v", kubeletServer.KubeletConfiguration) + klog.V(5).Infof("KubeletConfiguration: %#v", kubeletServer.KubeletConfiguration) if err := Run(kubeletServer, kubeletDeps, stopCh); err != nil { - glog.Fatal(err) + klog.Fatal(err) } }, } @@ -362,7 +361,7 @@ func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, err mounter := mount.New(s.ExperimentalMounterPath) var pluginRunner = exec.New() if s.Containerized { - glog.V(2).Info("Running kubelet in containerized mode") + klog.V(2).Info("Running kubelet in containerized mode") ne, err := nsenter.NewNsenter(nsenter.DefaultHostRootFsPath, exec.New()) if err != nil { return nil, err @@ -405,7 +404,7 @@ func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, err // not be generated. func Run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan struct{}) error { // To help debugging, immediately log version - glog.Infof("Version: %+v", version.Get()) + klog.Infof("Version: %+v", version.Get()) if err := initForOS(s.KubeletFlags.WindowsService); err != nil { return fmt.Errorf("failed OS init: %v", err) } @@ -440,11 +439,11 @@ func setConfigz(cz *configz.Config, kc *kubeletconfiginternal.KubeletConfigurati func initConfigz(kc *kubeletconfiginternal.KubeletConfiguration) error { cz, err := configz.New("kubeletconfig") if err != nil { - glog.Errorf("unable to register configz: %s", err) + klog.Errorf("unable to register configz: %s", err) return err } if err := setConfigz(cz, kc); err != nil { - glog.Errorf("unable to register config: %s", err) + klog.Errorf("unable to register config: %s", err) return err } return nil @@ -457,12 +456,12 @@ func makeEventRecorder(kubeDeps *kubelet.Dependencies, nodeName types.NodeName) } eventBroadcaster := record.NewBroadcaster() kubeDeps.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: componentKubelet, Host: string(nodeName)}) - eventBroadcaster.StartLogging(glog.V(3).Infof) + eventBroadcaster.StartLogging(klog.V(3).Infof) if kubeDeps.EventClient != nil { - glog.V(4).Infof("Sending events to api server.") + klog.V(4).Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeDeps.EventClient.Events("")}) } else { - glog.Warning("No api server defined - no events will be sent to API server.") + klog.Warning("No api server defined - no events will be sent to API server.") } } @@ -483,12 +482,12 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan } done := make(chan struct{}) if s.LockFilePath != "" { - glog.Infof("acquiring file lock on %q", s.LockFilePath) + klog.Infof("acquiring file lock on %q", s.LockFilePath) if err := flock.Acquire(s.LockFilePath); err != nil { return fmt.Errorf("unable to acquire file lock on %q: %v", s.LockFilePath, err) } if s.ExitOnLockContention { - glog.Infof("watching for inotify events for: %v", s.LockFilePath) + klog.Infof("watching for inotify events for: %v", s.LockFilePath) if err := watchForLockfileContention(s.LockFilePath, done); err != nil { return err } @@ -498,7 +497,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan // Register current configuration with /configz endpoint err = initConfigz(&s.KubeletConfiguration) if err != nil { - glog.Errorf("unable to register KubeletConfiguration with configz, error: %v", err) + klog.Errorf("unable to register KubeletConfiguration with configz, error: %v", err) } // About to get clients and such, detect standaloneMode @@ -521,9 +520,9 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan return err } if cloud == nil { - glog.V(2).Infof("No cloud provider specified: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) + klog.V(2).Infof("No cloud provider specified: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) } else { - glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) + klog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) } kubeDeps.Cloud = cloud } @@ -550,7 +549,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan kubeDeps.DynamicKubeClient = nil kubeDeps.EventClient = nil kubeDeps.HeartbeatClient = nil - glog.Warningf("standalone mode, no API client") + klog.Warningf("standalone mode, no API client") } else if kubeDeps.KubeClient == nil || kubeDeps.EventClient == nil || kubeDeps.HeartbeatClient == nil || kubeDeps.DynamicKubeClient == nil { // initialize clients if not standalone mode and any of the clients are not provided var kubeClient clientset.Interface @@ -580,15 +579,15 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan kubeClient, err = clientset.NewForConfig(clientConfig) if err != nil { - glog.Warningf("New kubeClient from clientConfig error: %v", err) + klog.Warningf("New kubeClient from clientConfig error: %v", err) } else if kubeClient.CertificatesV1beta1() != nil && clientCertificateManager != nil { - glog.V(2).Info("Starting client certificate rotation.") + klog.V(2).Info("Starting client certificate rotation.") clientCertificateManager.SetCertificateSigningRequestClient(kubeClient.CertificatesV1beta1().CertificateSigningRequests()) clientCertificateManager.Start() } dynamicKubeClient, err = dynamic.NewForConfig(clientConfig) if err != nil { - glog.Warningf("Failed to initialize dynamic KubeClient: %v", err) + klog.Warningf("Failed to initialize dynamic KubeClient: %v", err) } // make a separate client for events @@ -597,7 +596,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan eventClientConfig.Burst = int(s.EventBurst) eventClient, err = v1core.NewForConfig(&eventClientConfig) if err != nil { - glog.Warningf("Failed to create API Server client for Events: %v", err) + klog.Warningf("Failed to create API Server client for Events: %v", err) } // make a separate client for heartbeat with throttling disabled and a timeout attached @@ -613,14 +612,14 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan heartbeatClientConfig.QPS = float32(-1) heartbeatClient, err = clientset.NewForConfig(&heartbeatClientConfig) if err != nil { - glog.Warningf("Failed to create API Server client for heartbeat: %v", err) + klog.Warningf("Failed to create API Server client for heartbeat: %v", err) } // csiClient works with CRDs that support json only clientConfig.ContentType = "application/json" csiClient, err := csiclientset.NewForConfig(clientConfig) if err != nil { - glog.Warningf("Failed to create CSI API client: %v", err) + klog.Warningf("Failed to create CSI API client: %v", err) } kubeDeps.KubeClient = kubeClient @@ -664,7 +663,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan if kubeDeps.ContainerManager == nil { if s.CgroupsPerQOS && s.CgroupRoot == "" { - glog.Infof("--cgroups-per-qos enabled, but --cgroup-root was not specified. defaulting to /") + klog.Infof("--cgroups-per-qos enabled, but --cgroup-root was not specified. defaulting to /") s.CgroupRoot = "/" } kubeReserved, err := parseResourceList(s.KubeReserved) @@ -728,17 +727,17 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan } if err := checkPermissions(); err != nil { - glog.Error(err) + klog.Error(err) } utilruntime.ReallyCrash = s.ReallyCrashForTesting - rand.Seed(time.Now().UTC().UnixNano()) + rand.Seed(time.Now().UnixNano()) // TODO(vmarmol): Do this through container config. oomAdjuster := kubeDeps.OOMAdjuster if err := oomAdjuster.ApplyOOMScoreAdj(0, int(s.OOMScoreAdj)); err != nil { - glog.Warning(err) + klog.Warning(err) } if err := RunKubelet(s, kubeDeps, s.RunOnce); err != nil { @@ -750,7 +749,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan go wait.Until(func() { err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress, strconv.Itoa(int(s.HealthzPort))), nil) if err != nil { - glog.Errorf("Starting health server failed: %v", err) + klog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second, wait.NeverStop) } @@ -789,7 +788,7 @@ func getNodeName(cloud cloudprovider.Interface, hostname string) (types.NodeName return "", fmt.Errorf("error fetching current node name from cloud provider: %v", err) } - glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName) + klog.V(2).Infof("cloud provider determined current node name to be %s", nodeName) return nodeName, nil } @@ -823,7 +822,7 @@ func InitializeTLS(kf *options.KubeletFlags, kc *kubeletconfiginternal.KubeletCo return nil, err } - glog.V(4).Infof("Using self-signed cert (%s, %s)", kc.TLSCertFile, kc.TLSPrivateKeyFile) + klog.V(4).Infof("Using self-signed cert (%s, %s)", kc.TLSCertFile, kc.TLSPrivateKeyFile) } } @@ -890,22 +889,9 @@ func createAPIServerClientConfig(s *options.KubeletServer) (*restclient.Config, clientConfig.QPS = float32(s.KubeAPIQPS) clientConfig.Burst = int(s.KubeAPIBurst) - addChaosToClientConfig(s, clientConfig) return clientConfig, nil } -// addChaosToClientConfig injects random errors into client connections if configured. -func addChaosToClientConfig(s *options.KubeletServer, config *restclient.Config) { - if s.ChaosChance != 0.0 { - config.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { - seed := chaosclient.NewSeed(1) - // TODO: introduce a standard chaos package with more tunables - this is just a proof of concept - // TODO: introduce random latency and stalls - return chaosclient.NewChaosRoundTripper(rt, chaosclient.LogChaos, seed.P(s.ChaosChance, chaosclient.ErrSimulatedConnectionResetByPeer)) - } - } -} - // RunKubelet is responsible for setting up and running a kubelet. It is used in three different applications: // 1 Integration tests // 2 Kubelet binary @@ -952,7 +938,7 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie capabilities.Setup(kubeServer.AllowPrivileged, privilegedSources, 0) credentialprovider.SetPreferredDockercfgPath(kubeServer.RootDirectory) - glog.V(2).Infof("Using root directory: %v", kubeServer.RootDirectory) + klog.V(2).Infof("Using root directory: %v", kubeServer.RootDirectory) if kubeDeps.OSInterface == nil { kubeDeps.OSInterface = kubecontainer.RealOS{} @@ -1007,10 +993,10 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie if _, err := k.RunOnce(podCfg.Updates()); err != nil { return fmt.Errorf("runonce failed: %v", err) } - glog.Infof("Started kubelet as runonce") + klog.Infof("Started kubelet as runonce") } else { startKubelet(k, podCfg, &kubeServer.KubeletConfiguration, kubeDeps, kubeServer.EnableServer) - glog.Infof("Started kubelet") + klog.Infof("Started kubelet") } return nil } @@ -1029,6 +1015,9 @@ func startKubelet(k kubelet.Bootstrap, podCfg *config.PodConfig, kubeCfg *kubele if kubeCfg.ReadOnlyPort > 0 { go k.ListenAndServeReadOnly(net.ParseIP(kubeCfg.Address), uint(kubeCfg.ReadOnlyPort)) } + if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPodResources) { + go k.ListenAndServePodResources() + } } func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, @@ -1194,7 +1183,7 @@ func RunDockershim(f *options.KubeletFlags, c *kubeletconfiginternal.KubeletConf if err != nil { return err } - glog.V(2).Infof("Starting the GRPC server for the docker CRI shim.") + klog.V(2).Infof("Starting the GRPC server for the docker CRI shim.") server := dockerremote.NewDockerServer(f.RemoteRuntimeEndpoint, ds) if err := server.Start(); err != nil { return err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server_linux.go index ceab334c0ba7..6f84afd9dfc7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/cmd/kubelet/app/server_linux.go @@ -17,26 +17,26 @@ limitations under the License. package app import ( - "github.com/golang/glog" - "golang.org/x/exp/inotify" + "github.com/sigma/go-inotify" + "k8s.io/klog" ) func watchForLockfileContention(path string, done chan struct{}) error { watcher, err := inotify.NewWatcher() if err != nil { - glog.Errorf("unable to create watcher for lockfile: %v", err) + klog.Errorf("unable to create watcher for lockfile: %v", err) return err } if err = watcher.AddWatch(path, inotify.IN_OPEN|inotify.IN_DELETE_SELF); err != nil { - glog.Errorf("unable to watch lockfile: %v", err) + klog.Errorf("unable to watch lockfile: %v", err) return err } go func() { select { case ev := <-watcher.Event: - glog.Infof("inotify event: %v", ev) + klog.Infof("inotify event: %v", ev) case err = <-watcher.Error: - glog.Errorf("inotify watcher error: %v", err) + klog.Errorf("inotify watcher error: %v", err) } close(done) }() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/events/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/events/BUILD deleted file mode 100644 index 7bda51ac2fb5..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/events/BUILD +++ /dev/null @@ -1,37 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["sorted_event_list.go"], - importpath = "k8s.io/kubernetes/pkg/api/events", - deps = ["//pkg/apis/core:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = ["sorted_event_list_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/apis/core:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/events/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/events/OWNERS deleted file mode 100755 index cbc4e8d9d726..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/events/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -reviewers: -- gmarek diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD deleted file mode 100644 index 77b08bc48bcf..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD +++ /dev/null @@ -1,45 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["util.go"], - importpath = "k8s.io/kubernetes/pkg/api/pod", - deps = [ - "//pkg/apis/core:go_default_library", - "//pkg/features:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["util_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/apis/core:go_default_library", - "//pkg/features:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/pod/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/pod/OWNERS deleted file mode 100755 index bead0f8a8ecb..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/pod/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: -- smarterclayton -- thockin -- david-mcmahon diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/pod/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/pod/util.go deleted file mode 100644 index b9f875e57a77..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/pod/util.go +++ /dev/null @@ -1,328 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pod - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilfeature "k8s.io/apiserver/pkg/util/feature" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/features" -) - -// Visitor is called with each object name, and returns true if visiting should continue -type Visitor func(name string) (shouldContinue bool) - -// VisitPodSecretNames invokes the visitor function with the name of every secret -// referenced by the pod spec. If visitor returns false, visiting is short-circuited. -// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. -// Returns true if visiting completed, false if visiting was short-circuited. -func VisitPodSecretNames(pod *api.Pod, visitor Visitor) bool { - for _, reference := range pod.Spec.ImagePullSecrets { - if !visitor(reference.Name) { - return false - } - } - for i := range pod.Spec.InitContainers { - if !visitContainerSecretNames(&pod.Spec.InitContainers[i], visitor) { - return false - } - } - for i := range pod.Spec.Containers { - if !visitContainerSecretNames(&pod.Spec.Containers[i], visitor) { - return false - } - } - var source *api.VolumeSource - for i := range pod.Spec.Volumes { - source = &pod.Spec.Volumes[i].VolumeSource - switch { - case source.AzureFile != nil: - if len(source.AzureFile.SecretName) > 0 && !visitor(source.AzureFile.SecretName) { - return false - } - case source.CephFS != nil: - if source.CephFS.SecretRef != nil && !visitor(source.CephFS.SecretRef.Name) { - return false - } - case source.Cinder != nil: - if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Name) { - return false - } - case source.FlexVolume != nil: - if source.FlexVolume.SecretRef != nil && !visitor(source.FlexVolume.SecretRef.Name) { - return false - } - case source.Projected != nil: - for j := range source.Projected.Sources { - if source.Projected.Sources[j].Secret != nil { - if !visitor(source.Projected.Sources[j].Secret.Name) { - return false - } - } - } - case source.RBD != nil: - if source.RBD.SecretRef != nil && !visitor(source.RBD.SecretRef.Name) { - return false - } - case source.Secret != nil: - if !visitor(source.Secret.SecretName) { - return false - } - case source.ScaleIO != nil: - if source.ScaleIO.SecretRef != nil && !visitor(source.ScaleIO.SecretRef.Name) { - return false - } - case source.ISCSI != nil: - if source.ISCSI.SecretRef != nil && !visitor(source.ISCSI.SecretRef.Name) { - return false - } - case source.StorageOS != nil: - if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Name) { - return false - } - } - } - return true -} - -func visitContainerSecretNames(container *api.Container, visitor Visitor) bool { - for _, env := range container.EnvFrom { - if env.SecretRef != nil { - if !visitor(env.SecretRef.Name) { - return false - } - } - } - for _, envVar := range container.Env { - if envVar.ValueFrom != nil && envVar.ValueFrom.SecretKeyRef != nil { - if !visitor(envVar.ValueFrom.SecretKeyRef.Name) { - return false - } - } - } - return true -} - -// VisitPodConfigmapNames invokes the visitor function with the name of every configmap -// referenced by the pod spec. If visitor returns false, visiting is short-circuited. -// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. -// Returns true if visiting completed, false if visiting was short-circuited. -func VisitPodConfigmapNames(pod *api.Pod, visitor Visitor) bool { - for i := range pod.Spec.InitContainers { - if !visitContainerConfigmapNames(&pod.Spec.InitContainers[i], visitor) { - return false - } - } - for i := range pod.Spec.Containers { - if !visitContainerConfigmapNames(&pod.Spec.Containers[i], visitor) { - return false - } - } - var source *api.VolumeSource - for i := range pod.Spec.Volumes { - source = &pod.Spec.Volumes[i].VolumeSource - switch { - case source.Projected != nil: - for j := range source.Projected.Sources { - if source.Projected.Sources[j].ConfigMap != nil { - if !visitor(source.Projected.Sources[j].ConfigMap.Name) { - return false - } - } - } - case source.ConfigMap != nil: - if !visitor(source.ConfigMap.Name) { - return false - } - } - } - return true -} - -func visitContainerConfigmapNames(container *api.Container, visitor Visitor) bool { - for _, env := range container.EnvFrom { - if env.ConfigMapRef != nil { - if !visitor(env.ConfigMapRef.Name) { - return false - } - } - } - for _, envVar := range container.Env { - if envVar.ValueFrom != nil && envVar.ValueFrom.ConfigMapKeyRef != nil { - if !visitor(envVar.ValueFrom.ConfigMapKeyRef.Name) { - return false - } - } - } - return true -} - -// IsPodReady returns true if a pod is ready; false otherwise. -func IsPodReady(pod *api.Pod) bool { - return IsPodReadyConditionTrue(pod.Status) -} - -// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. -func IsPodReadyConditionTrue(status api.PodStatus) bool { - condition := GetPodReadyCondition(status) - return condition != nil && condition.Status == api.ConditionTrue -} - -// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. -// Returns nil if the condition is not present. -func GetPodReadyCondition(status api.PodStatus) *api.PodCondition { - _, condition := GetPodCondition(&status, api.PodReady) - return condition -} - -// GetPodCondition extracts the provided condition from the given status and returns that. -// Returns nil and -1 if the condition is not present, and the index of the located condition. -func GetPodCondition(status *api.PodStatus, conditionType api.PodConditionType) (int, *api.PodCondition) { - if status == nil { - return -1, nil - } - for i := range status.Conditions { - if status.Conditions[i].Type == conditionType { - return i, &status.Conditions[i] - } - } - return -1, nil -} - -// UpdatePodCondition updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the -// status has changed. -// Returns true if pod condition has changed or has been added. -func UpdatePodCondition(status *api.PodStatus, condition *api.PodCondition) bool { - condition.LastTransitionTime = metav1.Now() - // Try to find this pod condition. - conditionIndex, oldCondition := GetPodCondition(status, condition.Type) - - if oldCondition == nil { - // We are adding new pod condition. - status.Conditions = append(status.Conditions, *condition) - return true - } - // We are updating an existing condition, so we need to check if it has changed. - if condition.Status == oldCondition.Status { - condition.LastTransitionTime = oldCondition.LastTransitionTime - } - - isEqual := condition.Status == oldCondition.Status && - condition.Reason == oldCondition.Reason && - condition.Message == oldCondition.Message && - condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) && - condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) - - status.Conditions[conditionIndex] = *condition - // Return true if one of the fields have changed. - return !isEqual -} - -// DropDisabledAlphaFields removes disabled fields from the pod spec. -// This should be called from PrepareForCreate/PrepareForUpdate for all resources containing a pod spec. -func DropDisabledAlphaFields(podSpec *api.PodSpec) { - if !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) { - podSpec.Priority = nil - podSpec.PriorityClassName = "" - } - - if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { - for i := range podSpec.Volumes { - if podSpec.Volumes[i].EmptyDir != nil { - podSpec.Volumes[i].EmptyDir.SizeLimit = nil - } - } - } - - for i := range podSpec.Containers { - DropDisabledVolumeMountsAlphaFields(podSpec.Containers[i].VolumeMounts) - } - for i := range podSpec.InitContainers { - DropDisabledVolumeMountsAlphaFields(podSpec.InitContainers[i].VolumeMounts) - } - - DropDisabledVolumeDevicesAlphaFields(podSpec) - - DropDisabledRunAsGroupField(podSpec) - - if !utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) && podSpec.RuntimeClassName != nil { - podSpec.RuntimeClassName = nil - } - - DropDisabledProcMountField(podSpec) -} - -// DropDisabledRunAsGroupField removes disabled fields from PodSpec related -// to RunAsGroup -func DropDisabledRunAsGroupField(podSpec *api.PodSpec) { - if !utilfeature.DefaultFeatureGate.Enabled(features.RunAsGroup) { - if podSpec.SecurityContext != nil { - podSpec.SecurityContext.RunAsGroup = nil - } - for i := range podSpec.Containers { - if podSpec.Containers[i].SecurityContext != nil { - podSpec.Containers[i].SecurityContext.RunAsGroup = nil - } - } - for i := range podSpec.InitContainers { - if podSpec.InitContainers[i].SecurityContext != nil { - podSpec.InitContainers[i].SecurityContext.RunAsGroup = nil - } - } - } -} - -// DropDisabledProcMountField removes disabled fields from PodSpec related -// to ProcMount -func DropDisabledProcMountField(podSpec *api.PodSpec) { - if !utilfeature.DefaultFeatureGate.Enabled(features.ProcMountType) { - defProcMount := api.DefaultProcMount - for i := range podSpec.Containers { - if podSpec.Containers[i].SecurityContext != nil { - podSpec.Containers[i].SecurityContext.ProcMount = &defProcMount - } - } - for i := range podSpec.InitContainers { - if podSpec.InitContainers[i].SecurityContext != nil { - podSpec.InitContainers[i].SecurityContext.ProcMount = &defProcMount - } - } - } -} - -// DropDisabledVolumeMountsAlphaFields removes disabled fields from []VolumeMount. -// This should be called from PrepareForCreate/PrepareForUpdate for all resources containing a VolumeMount -func DropDisabledVolumeMountsAlphaFields(volumeMounts []api.VolumeMount) { - if !utilfeature.DefaultFeatureGate.Enabled(features.MountPropagation) { - for i := range volumeMounts { - volumeMounts[i].MountPropagation = nil - } - } -} - -// DropDisabledVolumeDevicesAlphaFields removes disabled fields from []VolumeDevice. -// This should be called from PrepareForCreate/PrepareForUpdate for all resources containing a VolumeDevice -func DropDisabledVolumeDevicesAlphaFields(podSpec *api.PodSpec) { - if !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - for i := range podSpec.Containers { - podSpec.Containers[i].VolumeDevices = nil - } - for i := range podSpec.InitContainers { - podSpec.InitContainers[i].VolumeDevices = nil - } - } -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/ref/ref.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/ref/ref.go index d6576750ad11..d1a4c10de7b1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/ref/ref.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/ref/ref.go @@ -28,8 +28,8 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" ) +// Errors that could be returned by GetReference. var ( - // Errors that could be returned by GetReference. ErrNilObject = errors.New("can't reference a nil object") ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") ) @@ -80,12 +80,12 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*api.ObjectRefere if len(selfLink) == 0 { return nil, ErrNoSelfLink } - selfLinkUrl, err := url.Parse(selfLink) + selfLinkURL, err := url.Parse(selfLink) if err != nil { return nil, err } // example paths: ///* - parts := strings.Split(selfLinkUrl.Path, "/") + parts := strings.Split(selfLinkURL.Path, "/") if len(parts) < 3 { return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD deleted file mode 100644 index e6fc25e565ba..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD +++ /dev/null @@ -1,42 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["helpers.go"], - importpath = "k8s.io/kubernetes/pkg/api/resource", - deps = [ - "//pkg/apis/core:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["helpers_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/apis/core:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/resource/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/resource/helpers.go deleted file mode 100644 index 4daf3e925433..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/resource/helpers.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "fmt" - "math" - "strconv" - - "k8s.io/apimachinery/pkg/api/resource" - api "k8s.io/kubernetes/pkg/apis/core" -) - -// addResourceList adds the resources in newList to list -func addResourceList(list, new api.ResourceList) { - for name, quantity := range new { - if value, ok := list[name]; !ok { - list[name] = *quantity.Copy() - } else { - value.Add(quantity) - list[name] = value - } - } -} - -// maxResourceList sets list to the greater of list/newList for every resource -// either list -func maxResourceList(list, new api.ResourceList) { - for name, quantity := range new { - if value, ok := list[name]; !ok { - list[name] = *quantity.Copy() - continue - } else { - if quantity.Cmp(value) > 0 { - list[name] = *quantity.Copy() - } - } - } -} - -// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all -// containers of the pod. -func PodRequestsAndLimits(pod *api.Pod) (reqs api.ResourceList, limits api.ResourceList) { - reqs, limits = api.ResourceList{}, api.ResourceList{} - for _, container := range pod.Spec.Containers { - addResourceList(reqs, container.Resources.Requests) - addResourceList(limits, container.Resources.Limits) - } - // init containers define the minimum of any resource - for _, container := range pod.Spec.InitContainers { - maxResourceList(reqs, container.Resources.Requests) - maxResourceList(limits, container.Resources.Limits) - } - return -} - -// ExtractContainerResourceValue extracts the value of a resource -// in an already known container -func ExtractContainerResourceValue(fs *api.ResourceFieldSelector, container *api.Container) (string, error) { - divisor := resource.Quantity{} - if divisor.Cmp(fs.Divisor) == 0 { - divisor = resource.MustParse("1") - } else { - divisor = fs.Divisor - } - - switch fs.Resource { - case "limits.cpu": - return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) - case "limits.memory": - return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) - case "limits.ephemeral-storage": - return convertResourceEphemeralStorageToString(container.Resources.Limits.StorageEphemeral(), divisor) - case "requests.cpu": - return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) - case "requests.memory": - return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) - case "requests.ephemeral-storage": - return convertResourceEphemeralStorageToString(container.Resources.Requests.StorageEphemeral(), divisor) - } - - return "", fmt.Errorf("unsupported container resource : %v", fs.Resource) -} - -// convertResourceCPUToString converts cpu value to the format of divisor and returns -// ceiling of the value. -func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { - c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) - return strconv.FormatInt(c, 10), nil -} - -// convertResourceMemoryToString converts memory value to the format of divisor and returns -// ceiling of the value. -func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { - m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) - return strconv.FormatInt(m, 10), nil -} - -// convertResourceEphemeralStorageToString converts ephemeral storage value to the format of divisor and returns -// ceiling of the value. -func convertResourceEphemeralStorageToString(ephemeralStorage *resource.Quantity, divisor resource.Quantity) (string, error) { - m := int64(math.Ceil(float64(ephemeralStorage.Value()) / float64(divisor.Value()))) - return strconv.FormatInt(m, 10), nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/BUILD index cb66f562213f..11eec53bef4c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/BUILD @@ -18,6 +18,8 @@ go_library( "//pkg/apis/admissionregistration/install:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/apps/install:go_default_library", + "//pkg/apis/auditregistration:go_default_library", + "//pkg/apis/auditregistration/install:go_default_library", "//pkg/apis/authentication/install:go_default_library", "//pkg/apis/authorization:go_default_library", "//pkg/apis/authorization/install:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/OWNERS index ede98b352264..74b25f3aafe8 100755 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/OWNERS @@ -16,5 +16,4 @@ reviewers: - markturansky - mml - david-mcmahon -- ericchiang - jianhuiz diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/testapi.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/testapi.go index 138d82413144..32b81d29d0e7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/testapi.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/testapi/testapi.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/pkg/apis/admission" "k8s.io/kubernetes/pkg/apis/admissionregistration" "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/apis/auditregistration" "k8s.io/kubernetes/pkg/apis/authorization" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/batch" @@ -53,9 +54,11 @@ import ( "k8s.io/kubernetes/pkg/apis/settings" "k8s.io/kubernetes/pkg/apis/storage" + // Initialize install packages _ "k8s.io/kubernetes/pkg/apis/admission/install" _ "k8s.io/kubernetes/pkg/apis/admissionregistration/install" _ "k8s.io/kubernetes/pkg/apis/apps/install" + _ "k8s.io/kubernetes/pkg/apis/auditregistration/install" _ "k8s.io/kubernetes/pkg/apis/authentication/install" _ "k8s.io/kubernetes/pkg/apis/authorization/install" _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" @@ -74,6 +77,7 @@ import ( _ "k8s.io/kubernetes/pkg/apis/storage/install" ) +// Variables to store GroupName var ( Groups = make(map[string]TestGroup) Default TestGroup @@ -90,6 +94,7 @@ var ( storageSerializer runtime.SerializerInfo ) +// TestGroup contains GroupVersion to uniquely identify the API type TestGroup struct { externalGroupVersion schema.GroupVersion } @@ -264,6 +269,12 @@ func init() { externalGroupVersion: externalGroupVersion, } } + if _, ok := Groups[auditregistration.GroupName]; !ok { + externalGroupVersion := schema.GroupVersion{Group: auditregistration.GroupName, Version: legacyscheme.Scheme.PrioritizedVersionsForGroup(auditregistration.GroupName)[0].Version} + Groups[auditregistration.GroupName] = TestGroup{ + externalGroupVersion: externalGroupVersion, + } + } Default = Groups[api.GroupName] Autoscaling = Groups[autoscaling.GroupName] @@ -276,6 +287,7 @@ func init() { Admission = Groups[admission.GroupName] } +// GroupVersion makes copy of schema.GroupVersion func (g TestGroup) GroupVersion() *schema.GroupVersion { copyOfGroupVersion := g.externalGroupVersion return ©OfGroupVersion @@ -290,6 +302,7 @@ func (g TestGroup) Codec() runtime.Codec { return legacyscheme.Codecs.CodecForVersions(serializer.Serializer, legacyscheme.Codecs.UniversalDeserializer(), schema.GroupVersions{g.externalGroupVersion}, nil) } +// StorageMediaType finds media type set by KUBE_TEST_API_STORAGE_TYPE env var used to store objects in storage func StorageMediaType() string { return os.Getenv("KUBE_TEST_API_STORAGE_TYPE") } @@ -322,14 +335,13 @@ func (g TestGroup) SelfLink(resource, name string) string { return fmt.Sprintf("/api/%s/%s", g.externalGroupVersion.Version, resource) } return fmt.Sprintf("/api/%s/%s/%s", g.externalGroupVersion.Version, resource, name) - } else { - // TODO: will need a /apis prefix once we have proper multi-group - // support - if name == "" { - return fmt.Sprintf("/apis/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource) - } - return fmt.Sprintf("/apis/%s/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource, name) } + // TODO: will need a /apis prefix once we have proper multi-group + // support + if name == "" { + return fmt.Sprintf("/apis/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource) + } + return fmt.Sprintf("/apis/%s/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource, name) } // ResourcePathWithPrefix returns the appropriate path for the given prefix (watch, proxy, redirect, etc), resource, namespace and name. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go index 416221d520e5..558e8a48c1a4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go @@ -238,13 +238,13 @@ func IsPodReady(pod *v1.Pod) bool { return IsPodReadyConditionTrue(pod.Status) } -// IsPodReady returns true if a pod is ready; false otherwise. +// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. func IsPodReadyConditionTrue(status v1.PodStatus) bool { condition := GetPodReadyCondition(status) return condition != nil && condition.Status == v1.ConditionTrue } -// Extracts the pod ready condition from the given status and returns that. +// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. // Returns nil if the condition is not present. func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition { _, condition := GetPodCondition(&status, v1.PodReady) @@ -274,7 +274,7 @@ func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodC return -1, nil } -// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// UpdatePodCondition updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the // status has changed. // Returns true if pod condition has changed or has been added. func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool { @@ -286,20 +286,19 @@ func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool { // We are adding new pod condition. status.Conditions = append(status.Conditions, *condition) return true - } else { - // We are updating an existing condition, so we need to check if it has changed. - if condition.Status == oldCondition.Status { - condition.LastTransitionTime = oldCondition.LastTransitionTime - } + } + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } - isEqual := condition.Status == oldCondition.Status && - condition.Reason == oldCondition.Reason && - condition.Message == oldCondition.Message && - condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) && - condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) - status.Conditions[conditionIndex] = *condition - // Return true if one of the fields have changed. - return !isEqual - } + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/resource/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/resource/helpers.go index 6f7c59f587d4..d30ab7ed86e3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/resource/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/v1/resource/helpers.go @@ -68,7 +68,7 @@ func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) { return } -// finds and returns the request for a specific resource. +// GetResourceRequest finds and returns the request for a specific resource. func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 { if resource == v1.ResourcePods { return 1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/doc.go index b5a203700221..31833ab69f80 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=admission.k8s.io + package admission diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/doc.go index d127afc3bc20..cabb3e98878e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/admission/v1beta1 // +groupName=admission.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/doc.go index 5c0cb5b988b6..c0ed22beb7b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/doc.go @@ -15,10 +15,10 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +groupName=admissionregistration.k8s.io // Package admissionregistration is the internal version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration // InitializerConfiguration, ValidatingWebhookConfiguration, and MutatingWebhookConfiguration are for the // new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io package admissionregistration diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/types.go index 511bdd96d1c5..e8880c49c648 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/types.go @@ -290,7 +290,7 @@ const ( // connection with the webhook type WebhookClientConfig struct { // `url` gives the location of the webhook, in standard URL form - // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // (`scheme://host:port/path`). Exactly one of `url` or `service` // must be specified. // // The `host` should not refer to a service running in the cluster; use @@ -328,9 +328,9 @@ type WebhookClientConfig struct { // +optional Service *ServiceReference - // `caBundle` is a PEM encoded CA bundle which will be used to validate - // the webhook's server certificate. - // Required. + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional CABundle []byte } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/doc.go index 8c05598dc4ca..dbbb55b3aadf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/doc.go @@ -18,10 +18,10 @@ limitations under the License. // +k8s:conversion-gen-external-types=k8s.io/api/admissionregistration/v1alpha1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/admissionregistration/v1alpha1 +// +groupName=admissionregistration.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration // InitializerConfiguration, ValidatingWebhookConfiguration, and MutatingWebhookConfiguration are for the // new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/doc.go index 9b918a625861..27ebd40c6ce2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/doc.go @@ -18,10 +18,10 @@ limitations under the License. // +k8s:conversion-gen-external-types=k8s.io/api/admissionregistration/v1beta1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/admissionregistration/v1beta1 +// +groupName=admissionregistration.k8s.io // Package v1beta1 is the v1beta1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration // InitializerConfiguration, ValidatingWebhookConfiguration, and MutatingWebhookConfiguration are for the // new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD index 60c1469c0c65..0d4035331d68 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD @@ -17,10 +17,10 @@ go_library( deps = [ "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/core:go_default_library", - "//pkg/apis/extensions:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go index 8b7591807cbe..b56ec96cb36c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go @@ -20,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/extensions" ) var ( @@ -48,18 +47,18 @@ func Resource(resource string) schema.GroupResource { func addKnownTypes(scheme *runtime.Scheme) error { // TODO this will get cleaned up with the scheme types are fixed scheme.AddKnownTypes(SchemeGroupVersion, - &extensions.DaemonSet{}, - &extensions.DaemonSetList{}, - &extensions.Deployment{}, - &extensions.DeploymentList{}, - &extensions.DeploymentRollback{}, + &DaemonSet{}, + &DaemonSetList{}, + &Deployment{}, + &DeploymentList{}, + &DeploymentRollback{}, &autoscaling.Scale{}, &StatefulSet{}, &StatefulSetList{}, &ControllerRevision{}, &ControllerRevisionList{}, - &extensions.ReplicaSet{}, - &extensions.ReplicaSetList{}, + &ReplicaSet{}, + &ReplicaSetList{}, ) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go index 850694d7fc23..c15927d45ec9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go @@ -19,12 +19,11 @@ package apps import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" api "k8s.io/kubernetes/pkg/apis/core" ) // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // StatefulSet represents a set of pods with consistent identities. @@ -262,3 +261,541 @@ type ControllerRevisionList struct { // Items is the list of ControllerRevision objects. Items []ControllerRevision } + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type Deployment struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus +} + +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas int32 + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + Selector *metav1.LabelSelector + + // Template describes the pods that will be created. + Template api.PodTemplateSpec + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // This is set to the max value of int32 (i.e. 2147483647) by default, which means + // "retaining all old ReplicaSets". + // +optional + RevisionHistoryLimit *int32 + + // Indicates that the deployment is paused and will not be processed by the + // deployment controller. + // +optional + Paused bool + + // DEPRECATED. + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + RollbackTo *RollbackConfig + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. This is set to + // the max value of int32 (i.e. 2147483647) by default, which means "no deadline". + // +optional + ProgressDeadlineSeconds *int32 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED. +// DeploymentRollback stores the information required to rollback a deployment. +type DeploymentRollback struct { + metav1.TypeMeta + // Required: This must match the Name of a deployment. + Name string + // The annotations to be updated to a deployment + // +optional + UpdatedAnnotations map[string]string + // The config of this deployment rollback. + RollbackTo RollbackConfig +} + +// DEPRECATED. +type RollbackConfig struct { + // The revision to rollback to. If set to 0, rollback to the last revision. + // +optional + Revision int64 +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing RCs (and label key that is added to its pods) to prevent the existing RCs + // to select new pods (and old pods being select by new RC). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // By default, a fixed value of 1 is used. + // Example: when this is set to 30%, the old RC can be scaled down by 30% + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that at least 70% of original number of pods are available at all times + // during the update. + // +optional + MaxUnavailable intstr.IntOrString + + // The maximum number of pods that can be scheduled above the original number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of total pods at + // the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // By default, a value of 1 is used. + // Example: when this is set to 30%, the new RC can be scaled up by 30% + // immediately when the rolling update starts. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of original pods. + // +optional + MaxSurge intstr.IntOrString +} + +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + UnavailableReplicas int32 + + // Represents the latest available observations of a deployment's current state. + Conditions []DeploymentCondition + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // The last time this condition was updated. + LastUpdateTime metav1.Time + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type DeploymentList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of deployments. + Items []Deployment +} + +type DaemonSetUpdateStrategy struct { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". + // Default is OnDelete. + // +optional + Type DaemonSetUpdateStrategyType + + // Rolling update config params. Present only if type = "RollingUpdate". + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet +} + +type DaemonSetUpdateStrategyType string + +const ( + // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. + RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" +) + +// Spec to control the desired behavior of daemon set rolling update. +type RollingUpdateDaemonSet struct { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable intstr.IntOrString +} + +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // If empty, defaulted to labels on Pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + Template api.PodTemplateSpec + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + MinReadySeconds int32 + + // DEPRECATED. + // A sequence number representing a specific generation of the template. + // Populated by the system. It can be set only during the creation. + // +optional + TemplateGeneration int64 + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 +} + +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + CurrentNumberScheduled int32 + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + NumberMisscheduled int32 + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + DesiredNumberScheduled int32 + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + NumberReady int32 + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + CollisionCount *int32 + + // Represents the latest available observations of a DaemonSet's current state. + Conditions []DaemonSetCondition +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec DaemonSetSpec + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status DaemonSetStatus +} + +const ( + // DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead. + // DaemonSetTemplateGenerationKey is the key of the labels that is added + // to daemon set pods to distinguish between old and new pod templates + // during DaemonSet template update. + DaemonSetTemplateGenerationKey string = "pod-template-generation" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { + metav1.TypeMeta + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // A list of daemon sets. + Items []DaemonSet +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +type ReplicaSet struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired behavior of this ReplicaSet. + // +optional + Spec ReplicaSetSpec + + // Status is the current status of this ReplicaSet. This data may be + // out of date by some window of time. + // +optional + Status ReplicaSetStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ReplicaSet +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +// As the internal representation of a ReplicaSet, it must have +// a Template set. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + Replicas int32 + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // Selector is a label query over pods that should match the replica count. + // Must match in order to be controlled. + // If empty, defaulted to labels on pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // +optional + Template api.PodTemplateSpec +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the number of actual replicas. + Replicas int32 + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 + + // Represents the latest available observations of a replica set's current state. + // +optional + Conditions []ReplicaSetCondition +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + // +optional + Reason string + // A human readable message indicating details about the transition. + // +optional + Message string +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/BUILD index 925be0c91cee..852490223ea5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/BUILD @@ -16,7 +16,6 @@ go_library( "//pkg/apis/apps:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/v1:go_default_library", - "//pkg/apis/extensions:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -40,7 +39,6 @@ go_test( "//pkg/apis/apps/install:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/install:go_default_library", - "//pkg/apis/extensions:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/conversion.go index 565430b0e960..34f6078b467d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/conversion.go @@ -29,7 +29,6 @@ import ( "k8s.io/kubernetes/pkg/apis/apps" api "k8s.io/kubernetes/pkg/apis/core" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/apis/extensions" ) func addConversionFuncs(scheme *runtime.Scheme) error { @@ -42,29 +41,29 @@ func addConversionFuncs(scheme *runtime.Scheme) error { Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec, Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy, Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy, - Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet, - Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, + Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet, + Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet, Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus, Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus, - Convert_v1_Deployment_To_extensions_Deployment, - Convert_extensions_Deployment_To_v1_Deployment, - Convert_extensions_DaemonSet_To_v1_DaemonSet, - Convert_v1_DaemonSet_To_extensions_DaemonSet, - Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec, - Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec, - Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy, - Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy, + Convert_v1_Deployment_To_apps_Deployment, + Convert_apps_Deployment_To_v1_Deployment, + Convert_apps_DaemonSet_To_v1_DaemonSet, + Convert_v1_DaemonSet_To_apps_DaemonSet, + Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec, + Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec, + Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy, + Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy, // extensions // TODO: below conversions should be dropped in favor of auto-generated // ones, see https://github.com/kubernetes/kubernetes/issues/39865 - Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec, - Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec, - Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy, - Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy, - Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, - Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment, - Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec, - Convert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, + Convert_v1_DeploymentSpec_To_apps_DeploymentSpec, + Convert_apps_DeploymentSpec_To_v1_DeploymentSpec, + Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy, + Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy, + Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment, + Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment, + Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec, + Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec, ) if err != nil { return err @@ -72,7 +71,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error { return nil } -func Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(in *appsv1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } @@ -80,7 +79,7 @@ func Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1.Deploymen if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.RevisionHistoryLimit = in.RevisionHistoryLimit @@ -93,13 +92,13 @@ func Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1.Deploymen return nil } -func Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec(in *extensions.DeploymentSpec, out *appsv1.DeploymentSpec, s conversion.Scope) error { +func Convert_apps_DeploymentSpec_To_v1_DeploymentSpec(in *apps.DeploymentSpec, out *appsv1.DeploymentSpec, s conversion.Scope) error { out.Replicas = &in.Replicas out.Selector = in.Selector if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } if in.RevisionHistoryLimit != nil { @@ -115,11 +114,11 @@ func Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec(in *extensions.Deplo return nil } -func Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *appsv1.DeploymentStrategy, s conversion.Scope) error { +func Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(in *apps.DeploymentStrategy, out *appsv1.DeploymentStrategy, s conversion.Scope) error { out.Type = appsv1.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { out.RollingUpdate = new(appsv1.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -128,11 +127,11 @@ func Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(in *extensio return nil } -func Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *appsv1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(in *appsv1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { - out.RollingUpdate = new(extensions.RollingUpdateDeployment) - if err := Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + out.RollingUpdate = new(apps.RollingUpdateDeployment) + if err := Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -141,7 +140,7 @@ func Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *appsv1.D return nil } -func Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *appsv1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *appsv1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } @@ -151,7 +150,7 @@ func Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in return nil } -func Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *appsv1.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *appsv1.RollingUpdateDeployment, s conversion.Scope) error { if out.MaxUnavailable == nil { out.MaxUnavailable = &intstr.IntOrString{} } @@ -167,9 +166,9 @@ func Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in return nil } -func Convert_v1_Deployment_To_extensions_Deployment(in *appsv1.Deployment, out *extensions.Deployment, s conversion.Scope) error { +func Convert_v1_Deployment_To_apps_Deployment(in *appsv1.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -179,7 +178,7 @@ func Convert_v1_Deployment_To_extensions_Deployment(in *appsv1.Deployment, out * if revision64, err := strconv.ParseInt(revision, 10, 64); err != nil { return fmt.Errorf("failed to parse annotation[%s]=%s as int64: %v", appsv1.DeprecatedRollbackTo, revision, err) } else { - out.Spec.RollbackTo = new(extensions.RollbackConfig) + out.Spec.RollbackTo = new(apps.RollbackConfig) out.Spec.RollbackTo.Revision = revision64 } out.Annotations = deepCopyStringMap(out.Annotations) @@ -188,17 +187,17 @@ func Convert_v1_Deployment_To_extensions_Deployment(in *appsv1.Deployment, out * out.Spec.RollbackTo = nil } - if err := Convert_v1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_extensions_Deployment_To_v1_Deployment(in *extensions.Deployment, out *appsv1.Deployment, s conversion.Scope) error { +func Convert_apps_Deployment_To_v1_Deployment(in *apps.Deployment, out *appsv1.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Annotations = deepCopyStringMap(out.Annotations) // deep copy because we modify it below - if err := Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DeploymentSpec_To_v1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -213,13 +212,13 @@ func Convert_extensions_Deployment_To_v1_Deployment(in *extensions.Deployment, o delete(out.Annotations, appsv1.DeprecatedRollbackTo) } - if err := Convert_extensions_DeploymentStatus_To_v1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DeploymentStatus_To_v1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *appsv1.RollingUpdateDaemonSet, s conversion.Scope) error { +func Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *appsv1.RollingUpdateDaemonSet, s conversion.Scope) error { if out.MaxUnavailable == nil { out.MaxUnavailable = &intstr.IntOrString{} } @@ -229,19 +228,19 @@ func Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in * return nil } -func Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *appsv1.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { +func Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *appsv1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } return nil } -func Convert_extensions_DaemonSet_To_v1_DaemonSet(in *extensions.DaemonSet, out *appsv1.DaemonSet, s conversion.Scope) error { +func Convert_apps_DaemonSet_To_v1_DaemonSet(in *apps.DaemonSet, out *appsv1.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Annotations = deepCopyStringMap(out.Annotations) // deep copy annotations because we change them below out.Annotations[appsv1.DeprecatedTemplateGeneration] = strconv.FormatInt(in.Spec.TemplateGeneration, 10) - if err := Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := s.Convert(&in.Status, &out.Status, 0); err != nil { @@ -250,12 +249,12 @@ func Convert_extensions_DaemonSet_To_v1_DaemonSet(in *extensions.DaemonSet, out return nil } -func Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *appsv1.DaemonSetSpec, s conversion.Scope) error { +func Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(in *apps.DaemonSetSpec, out *appsv1.DaemonSetSpec, s conversion.Scope) error { out.Selector = in.Selector if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = int32(in.MinReadySeconds) @@ -268,20 +267,20 @@ func Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(in *extensions.DaemonS return nil } -func Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *appsv1.DaemonSetUpdateStrategy, s conversion.Scope) error { +func Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *appsv1.DaemonSetUpdateStrategy, s conversion.Scope) error { out.Type = appsv1.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { out.RollingUpdate = &appsv1.RollingUpdateDaemonSet{} - if err := Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { + if err := Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } return nil } -func Convert_v1_DaemonSet_To_extensions_DaemonSet(in *appsv1.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { +func Convert_v1_DaemonSet_To_apps_DaemonSet(in *appsv1.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } if value, ok := in.Annotations[appsv1.DeprecatedTemplateGeneration]; ok { @@ -299,12 +298,12 @@ func Convert_v1_DaemonSet_To_extensions_DaemonSet(in *appsv1.DaemonSet, out *ext return nil } -func Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *appsv1.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { +func Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(in *appsv1.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error { out.Selector = in.Selector if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } if in.RevisionHistoryLimit != nil { @@ -317,18 +316,18 @@ func Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *appsv1.DaemonSetSp return nil } -func Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *appsv1.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) +func Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *appsv1.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = apps.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { - out.RollingUpdate = &extensions.RollingUpdateDaemonSet{} - if err := Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { + out.RollingUpdate = &apps.RollingUpdateDaemonSet{} + if err := Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } return nil } -func Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *appsv1.ReplicaSetSpec, s conversion.Scope) error { +func Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *appsv1.ReplicaSetSpec, s conversion.Scope) error { out.Replicas = new(int32) *out.Replicas = int32(in.Replicas) out.MinReadySeconds = in.MinReadySeconds @@ -339,7 +338,7 @@ func Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *extensions.Repli return nil } -func Convert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *appsv1.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { +func Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *appsv1.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/doc.go index 9a1682faa3de..dcf18cf9f12d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions // +k8s:conversion-gen-external-types=k8s.io/api/apps/v1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/apps/v1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.conversion.go index dfeb85fe87d3..0c0c636dd85d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.conversion.go @@ -31,7 +31,6 @@ import ( apps "k8s.io/kubernetes/pkg/apis/apps" core "k8s.io/kubernetes/pkg/apis/core" apiscorev1 "k8s.io/kubernetes/pkg/apis/core/v1" - extensions "k8s.io/kubernetes/pkg/apis/extensions" ) func init() { @@ -61,193 +60,193 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DaemonSet)(nil), (*extensions.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSet_To_extensions_DaemonSet(a.(*v1.DaemonSet), b.(*extensions.DaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*v1.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSet_To_apps_DaemonSet(a.(*v1.DaemonSet), b.(*apps.DaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSet)(nil), (*v1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSet_To_v1_DaemonSet(a.(*extensions.DaemonSet), b.(*v1.DaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSet)(nil), (*v1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSet_To_v1_DaemonSet(a.(*apps.DaemonSet), b.(*v1.DaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DaemonSetCondition)(nil), (*extensions.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition(a.(*v1.DaemonSetCondition), b.(*extensions.DaemonSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1.DaemonSetCondition)(nil), (*apps.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSetCondition_To_apps_DaemonSetCondition(a.(*v1.DaemonSetCondition), b.(*apps.DaemonSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetCondition)(nil), (*v1.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition(a.(*extensions.DaemonSetCondition), b.(*v1.DaemonSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetCondition)(nil), (*v1.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetCondition_To_v1_DaemonSetCondition(a.(*apps.DaemonSetCondition), b.(*v1.DaemonSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DaemonSetList)(nil), (*extensions.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetList_To_extensions_DaemonSetList(a.(*v1.DaemonSetList), b.(*extensions.DaemonSetList), scope) + if err := s.AddGeneratedConversionFunc((*v1.DaemonSetList)(nil), (*apps.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSetList_To_apps_DaemonSetList(a.(*v1.DaemonSetList), b.(*apps.DaemonSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetList)(nil), (*v1.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetList_To_v1_DaemonSetList(a.(*extensions.DaemonSetList), b.(*v1.DaemonSetList), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetList)(nil), (*v1.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetList_To_v1_DaemonSetList(a.(*apps.DaemonSetList), b.(*v1.DaemonSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DaemonSetSpec)(nil), (*extensions.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(a.(*v1.DaemonSetSpec), b.(*extensions.DaemonSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*v1.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetSpec)(nil), (*v1.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(a.(*extensions.DaemonSetSpec), b.(*v1.DaemonSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetSpec)(nil), (*v1.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(a.(*apps.DaemonSetSpec), b.(*v1.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DaemonSetStatus)(nil), (*extensions.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(a.(*v1.DaemonSetStatus), b.(*extensions.DaemonSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1.DaemonSetStatus)(nil), (*apps.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(a.(*v1.DaemonSetStatus), b.(*apps.DaemonSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetStatus)(nil), (*v1.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(a.(*extensions.DaemonSetStatus), b.(*v1.DaemonSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetStatus)(nil), (*v1.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetStatus_To_v1_DaemonSetStatus(a.(*apps.DaemonSetStatus), b.(*v1.DaemonSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DaemonSetUpdateStrategy)(nil), (*extensions.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(a.(*v1.DaemonSetUpdateStrategy), b.(*extensions.DaemonSetUpdateStrategy), scope) + if err := s.AddGeneratedConversionFunc((*v1.DaemonSetUpdateStrategy)(nil), (*apps.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(a.(*v1.DaemonSetUpdateStrategy), b.(*apps.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetUpdateStrategy)(nil), (*v1.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(a.(*extensions.DaemonSetUpdateStrategy), b.(*v1.DaemonSetUpdateStrategy), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetUpdateStrategy)(nil), (*v1.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(a.(*apps.DaemonSetUpdateStrategy), b.(*v1.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.Deployment)(nil), (*extensions.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Deployment_To_extensions_Deployment(a.(*v1.Deployment), b.(*extensions.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*v1.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Deployment_To_apps_Deployment(a.(*v1.Deployment), b.(*apps.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.Deployment)(nil), (*v1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_Deployment_To_v1_Deployment(a.(*extensions.Deployment), b.(*v1.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.Deployment)(nil), (*v1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_Deployment_To_v1_Deployment(a.(*apps.Deployment), b.(*v1.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DeploymentCondition)(nil), (*extensions.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentCondition_To_extensions_DeploymentCondition(a.(*v1.DeploymentCondition), b.(*extensions.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1.DeploymentCondition)(nil), (*apps.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeploymentCondition_To_apps_DeploymentCondition(a.(*v1.DeploymentCondition), b.(*apps.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentCondition)(nil), (*v1.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentCondition_To_v1_DeploymentCondition(a.(*extensions.DeploymentCondition), b.(*v1.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentCondition)(nil), (*v1.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentCondition_To_v1_DeploymentCondition(a.(*apps.DeploymentCondition), b.(*v1.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DeploymentList)(nil), (*extensions.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentList_To_extensions_DeploymentList(a.(*v1.DeploymentList), b.(*extensions.DeploymentList), scope) + if err := s.AddGeneratedConversionFunc((*v1.DeploymentList)(nil), (*apps.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeploymentList_To_apps_DeploymentList(a.(*v1.DeploymentList), b.(*apps.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentList)(nil), (*v1.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentList_To_v1_DeploymentList(a.(*extensions.DeploymentList), b.(*v1.DeploymentList), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentList)(nil), (*v1.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentList_To_v1_DeploymentList(a.(*apps.DeploymentList), b.(*v1.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentSpec)(nil), (*v1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1.DeploymentSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentSpec)(nil), (*v1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DeploymentStatus)(nil), (*extensions.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentStatus_To_extensions_DeploymentStatus(a.(*v1.DeploymentStatus), b.(*extensions.DeploymentStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1.DeploymentStatus)(nil), (*apps.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeploymentStatus_To_apps_DeploymentStatus(a.(*v1.DeploymentStatus), b.(*apps.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStatus)(nil), (*v1.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStatus_To_v1_DeploymentStatus(a.(*extensions.DeploymentStatus), b.(*v1.DeploymentStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStatus)(nil), (*v1.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStatus_To_v1_DeploymentStatus(a.(*apps.DeploymentStatus), b.(*v1.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) + if err := s.AddGeneratedConversionFunc((*v1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1.DeploymentStrategy), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStrategy)(nil), (*v1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ReplicaSet)(nil), (*extensions.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSet_To_extensions_ReplicaSet(a.(*v1.ReplicaSet), b.(*extensions.ReplicaSet), scope) + if err := s.AddGeneratedConversionFunc((*v1.ReplicaSet)(nil), (*apps.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicaSet_To_apps_ReplicaSet(a.(*v1.ReplicaSet), b.(*apps.ReplicaSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSet)(nil), (*v1.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSet_To_v1_ReplicaSet(a.(*extensions.ReplicaSet), b.(*v1.ReplicaSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSet)(nil), (*v1.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSet_To_v1_ReplicaSet(a.(*apps.ReplicaSet), b.(*v1.ReplicaSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetCondition)(nil), (*extensions.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(a.(*v1.ReplicaSetCondition), b.(*extensions.ReplicaSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetCondition)(nil), (*apps.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicaSetCondition_To_apps_ReplicaSetCondition(a.(*v1.ReplicaSetCondition), b.(*apps.ReplicaSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetCondition)(nil), (*v1.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition(a.(*extensions.ReplicaSetCondition), b.(*v1.ReplicaSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetCondition)(nil), (*v1.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetCondition_To_v1_ReplicaSetCondition(a.(*apps.ReplicaSetCondition), b.(*v1.ReplicaSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetList)(nil), (*extensions.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSetList_To_extensions_ReplicaSetList(a.(*v1.ReplicaSetList), b.(*extensions.ReplicaSetList), scope) + if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetList)(nil), (*apps.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicaSetList_To_apps_ReplicaSetList(a.(*v1.ReplicaSetList), b.(*apps.ReplicaSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetList)(nil), (*v1.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetList_To_v1_ReplicaSetList(a.(*extensions.ReplicaSetList), b.(*v1.ReplicaSetList), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetList)(nil), (*v1.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetList_To_v1_ReplicaSetList(a.(*apps.ReplicaSetList), b.(*v1.ReplicaSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetSpec)(nil), (*extensions.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(a.(*v1.ReplicaSetSpec), b.(*extensions.ReplicaSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*v1.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetSpec)(nil), (*v1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(a.(*extensions.ReplicaSetSpec), b.(*v1.ReplicaSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*v1.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetStatus)(nil), (*extensions.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(a.(*v1.ReplicaSetStatus), b.(*extensions.ReplicaSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1.ReplicaSetStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(a.(*v1.ReplicaSetStatus), b.(*apps.ReplicaSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetStatus)(nil), (*v1.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(a.(*extensions.ReplicaSetStatus), b.(*v1.ReplicaSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetStatus)(nil), (*v1.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(a.(*apps.ReplicaSetStatus), b.(*v1.ReplicaSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.RollingUpdateDaemonSet)(nil), (*extensions.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(a.(*v1.RollingUpdateDaemonSet), b.(*extensions.RollingUpdateDaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*v1.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*v1.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollingUpdateDaemonSet)(nil), (*v1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(a.(*extensions.RollingUpdateDaemonSet), b.(*v1.RollingUpdateDaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*v1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*v1.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*v1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1.RollingUpdateDeployment), scope) }); err != nil { return err } @@ -321,108 +320,108 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1.StatefulSetSpec), scope) + if err := s.AddConversionFunc((*apps.DaemonSetSpec)(nil), (*v1.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(a.(*apps.DaemonSetSpec), b.(*v1.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetStatus)(nil), (*v1.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(a.(*apps.StatefulSetStatus), b.(*v1.StatefulSetStatus), scope) + if err := s.AddConversionFunc((*apps.DaemonSetUpdateStrategy)(nil), (*v1.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(a.(*apps.DaemonSetUpdateStrategy), b.(*v1.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*v1.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*v1.StatefulSetUpdateStrategy), scope) + if err := s.AddConversionFunc((*apps.DaemonSet)(nil), (*v1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSet_To_v1_DaemonSet(a.(*apps.DaemonSet), b.(*v1.DaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DaemonSetSpec)(nil), (*v1.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(a.(*extensions.DaemonSetSpec), b.(*v1.DaemonSetSpec), scope) + if err := s.AddConversionFunc((*apps.DeploymentSpec)(nil), (*v1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DaemonSetUpdateStrategy)(nil), (*v1.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(a.(*extensions.DaemonSetUpdateStrategy), b.(*v1.DaemonSetUpdateStrategy), scope) + if err := s.AddConversionFunc((*apps.DeploymentStrategy)(nil), (*v1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DaemonSet)(nil), (*v1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSet_To_v1_DaemonSet(a.(*extensions.DaemonSet), b.(*v1.DaemonSet), scope) + if err := s.AddConversionFunc((*apps.Deployment)(nil), (*v1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_Deployment_To_v1_Deployment(a.(*apps.Deployment), b.(*v1.Deployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentSpec)(nil), (*v1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1.DeploymentSpec), scope) + if err := s.AddConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*v1.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*v1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*v1.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.Deployment)(nil), (*v1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_Deployment_To_v1_Deployment(a.(*extensions.Deployment), b.(*v1.Deployment), scope) + if err := s.AddConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.ReplicaSetSpec)(nil), (*v1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(a.(*extensions.ReplicaSetSpec), b.(*v1.ReplicaSetSpec), scope) + if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1.StatefulSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.RollingUpdateDaemonSet)(nil), (*v1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(a.(*extensions.RollingUpdateDaemonSet), b.(*v1.RollingUpdateDaemonSet), scope) + if err := s.AddConversionFunc((*apps.StatefulSetStatus)(nil), (*v1.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(a.(*apps.StatefulSetStatus), b.(*v1.StatefulSetStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*v1.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*v1.StatefulSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.DaemonSetSpec)(nil), (*extensions.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(a.(*v1.DaemonSetSpec), b.(*extensions.DaemonSetSpec), scope) + if err := s.AddConversionFunc((*v1.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*v1.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.DaemonSetUpdateStrategy)(nil), (*extensions.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(a.(*v1.DaemonSetUpdateStrategy), b.(*extensions.DaemonSetUpdateStrategy), scope) + if err := s.AddConversionFunc((*v1.DaemonSetUpdateStrategy)(nil), (*apps.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(a.(*v1.DaemonSetUpdateStrategy), b.(*apps.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.DaemonSet)(nil), (*extensions.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DaemonSet_To_extensions_DaemonSet(a.(*v1.DaemonSet), b.(*extensions.DaemonSet), scope) + if err := s.AddConversionFunc((*v1.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonSet_To_apps_DaemonSet(a.(*v1.DaemonSet), b.(*apps.DaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) + if err := s.AddConversionFunc((*v1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*v1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.Deployment)(nil), (*extensions.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Deployment_To_extensions_Deployment(a.(*v1.Deployment), b.(*extensions.Deployment), scope) + if err := s.AddConversionFunc((*v1.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Deployment_To_apps_Deployment(a.(*v1.Deployment), b.(*apps.Deployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ReplicaSetSpec)(nil), (*extensions.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(a.(*v1.ReplicaSetSpec), b.(*extensions.ReplicaSetSpec), scope) + if err := s.AddConversionFunc((*v1.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*v1.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.RollingUpdateDaemonSet)(nil), (*extensions.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(a.(*v1.RollingUpdateDaemonSet), b.(*extensions.RollingUpdateDaemonSet), scope) + if err := s.AddConversionFunc((*v1.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*v1.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*v1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } @@ -514,30 +513,30 @@ func Convert_apps_ControllerRevisionList_To_v1_ControllerRevisionList(in *apps.C return autoConvert_apps_ControllerRevisionList_To_v1_ControllerRevisionList(in, out, s) } -func autoConvert_v1_DaemonSet_To_extensions_DaemonSet(in *v1.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { +func autoConvert_v1_DaemonSet_To_apps_DaemonSet(in *v1.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_extensions_DaemonSet_To_v1_DaemonSet(in *extensions.DaemonSet, out *v1.DaemonSet, s conversion.Scope) error { +func autoConvert_apps_DaemonSet_To_v1_DaemonSet(in *apps.DaemonSet, out *v1.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DaemonSetStatus_To_v1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { - out.Type = extensions.DaemonSetConditionType(in.Type) +func autoConvert_v1_DaemonSetCondition_To_apps_DaemonSetCondition(in *v1.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error { + out.Type = apps.DaemonSetConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -545,12 +544,12 @@ func autoConvert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1.D return nil } -// Convert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition is an autogenerated conversion function. -func Convert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { - return autoConvert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition(in, out, s) +// Convert_v1_DaemonSetCondition_To_apps_DaemonSetCondition is an autogenerated conversion function. +func Convert_v1_DaemonSetCondition_To_apps_DaemonSetCondition(in *v1.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_v1_DaemonSetCondition_To_apps_DaemonSetCondition(in, out, s) } -func autoConvert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1.DaemonSetCondition, s conversion.Scope) error { +func autoConvert_apps_DaemonSetCondition_To_v1_DaemonSetCondition(in *apps.DaemonSetCondition, out *v1.DaemonSetCondition, s conversion.Scope) error { out.Type = v1.DaemonSetConditionType(in.Type) out.Status = corev1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime @@ -559,18 +558,18 @@ func autoConvert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition(in *exte return nil } -// Convert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition is an autogenerated conversion function. -func Convert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1.DaemonSetCondition, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition(in, out, s) +// Convert_apps_DaemonSetCondition_To_v1_DaemonSetCondition is an autogenerated conversion function. +func Convert_apps_DaemonSetCondition_To_v1_DaemonSetCondition(in *apps.DaemonSetCondition, out *v1.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_apps_DaemonSetCondition_To_v1_DaemonSetCondition(in, out, s) } -func autoConvert_v1_DaemonSetList_To_extensions_DaemonSetList(in *v1.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { +func autoConvert_v1_DaemonSetList_To_apps_DaemonSetList(in *v1.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.DaemonSet, len(*in)) + *out = make([]apps.DaemonSet, len(*in)) for i := range *in { - if err := Convert_v1_DaemonSet_To_extensions_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1_DaemonSet_To_apps_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -580,18 +579,18 @@ func autoConvert_v1_DaemonSetList_To_extensions_DaemonSetList(in *v1.DaemonSetLi return nil } -// Convert_v1_DaemonSetList_To_extensions_DaemonSetList is an autogenerated conversion function. -func Convert_v1_DaemonSetList_To_extensions_DaemonSetList(in *v1.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { - return autoConvert_v1_DaemonSetList_To_extensions_DaemonSetList(in, out, s) +// Convert_v1_DaemonSetList_To_apps_DaemonSetList is an autogenerated conversion function. +func Convert_v1_DaemonSetList_To_apps_DaemonSetList(in *v1.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error { + return autoConvert_v1_DaemonSetList_To_apps_DaemonSetList(in, out, s) } -func autoConvert_extensions_DaemonSetList_To_v1_DaemonSetList(in *extensions.DaemonSetList, out *v1.DaemonSetList, s conversion.Scope) error { +func autoConvert_apps_DaemonSetList_To_v1_DaemonSetList(in *apps.DaemonSetList, out *v1.DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1.DaemonSet, len(*in)) for i := range *in { - if err := Convert_extensions_DaemonSet_To_v1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_DaemonSet_To_v1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -601,17 +600,17 @@ func autoConvert_extensions_DaemonSetList_To_v1_DaemonSetList(in *extensions.Dae return nil } -// Convert_extensions_DaemonSetList_To_v1_DaemonSetList is an autogenerated conversion function. -func Convert_extensions_DaemonSetList_To_v1_DaemonSetList(in *extensions.DaemonSetList, out *v1.DaemonSetList, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetList_To_v1_DaemonSetList(in, out, s) +// Convert_apps_DaemonSetList_To_v1_DaemonSetList is an autogenerated conversion function. +func Convert_apps_DaemonSetList_To_v1_DaemonSetList(in *apps.DaemonSetList, out *v1.DaemonSetList, s conversion.Scope) error { + return autoConvert_apps_DaemonSetList_To_v1_DaemonSetList(in, out, s) } -func autoConvert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { +func autoConvert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(in *v1.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error { out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := apiscorev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -619,12 +618,12 @@ func autoConvert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1.DaemonSetSp return nil } -func autoConvert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *v1.DaemonSetSpec, s conversion.Scope) error { +func autoConvert_apps_DaemonSetSpec_To_v1_DaemonSetSpec(in *apps.DaemonSetSpec, out *v1.DaemonSetSpec, s conversion.Scope) error { out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := apiscorev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -633,7 +632,7 @@ func autoConvert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(in *extensions.Dae return nil } -func autoConvert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { +func autoConvert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(in *v1.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled @@ -643,16 +642,16 @@ func autoConvert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1.DaemonS out.NumberAvailable = in.NumberAvailable out.NumberUnavailable = in.NumberUnavailable out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) - out.Conditions = *(*[]extensions.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus is an autogenerated conversion function. -func Convert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { - return autoConvert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(in, out, s) +// Convert_v1_DaemonSetStatus_To_apps_DaemonSetStatus is an autogenerated conversion function. +func Convert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(in *v1.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_v1_DaemonSetStatus_To_apps_DaemonSetStatus(in, out, s) } -func autoConvert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1.DaemonSetStatus, s conversion.Scope) error { +func autoConvert_apps_DaemonSetStatus_To_v1_DaemonSetStatus(in *apps.DaemonSetStatus, out *v1.DaemonSetStatus, s conversion.Scope) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled @@ -666,17 +665,17 @@ func autoConvert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(in *extensions return nil } -// Convert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus is an autogenerated conversion function. -func Convert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1.DaemonSetStatus, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(in, out, s) +// Convert_apps_DaemonSetStatus_To_v1_DaemonSetStatus is an autogenerated conversion function. +func Convert_apps_DaemonSetStatus_To_v1_DaemonSetStatus(in *apps.DaemonSetStatus, out *v1.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_apps_DaemonSetStatus_To_v1_DaemonSetStatus(in, out, s) } -func autoConvert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *v1.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) +func autoConvert_v1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *v1.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = apps.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDaemonSet) - if err := Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(*in, *out, s); err != nil { + *out = new(apps.RollingUpdateDaemonSet) + if err := Convert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(*in, *out, s); err != nil { return err } } else { @@ -685,12 +684,12 @@ func autoConvert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrateg return nil } -func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *v1.DaemonSetUpdateStrategy, s conversion.Scope) error { +func autoConvert_apps_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *v1.DaemonSetUpdateStrategy, s conversion.Scope) error { out.Type = v1.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(v1.RollingUpdateDaemonSet) - if err := Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(*in, *out, s); err != nil { + if err := Convert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(*in, *out, s); err != nil { return err } } else { @@ -699,30 +698,30 @@ func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrateg return nil } -func autoConvert_v1_Deployment_To_extensions_Deployment(in *v1.Deployment, out *extensions.Deployment, s conversion.Scope) error { +func autoConvert_v1_Deployment_To_apps_Deployment(in *v1.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_extensions_Deployment_To_v1_Deployment(in *extensions.Deployment, out *v1.Deployment, s conversion.Scope) error { +func autoConvert_apps_Deployment_To_v1_Deployment(in *apps.Deployment, out *v1.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DeploymentSpec_To_v1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_DeploymentStatus_To_v1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DeploymentStatus_To_v1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_v1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - out.Type = extensions.DeploymentConditionType(in.Type) +func autoConvert_v1_DeploymentCondition_To_apps_DeploymentCondition(in *v1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + out.Type = apps.DeploymentConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime @@ -731,12 +730,12 @@ func autoConvert_v1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1 return nil } -// Convert_v1_DeploymentCondition_To_extensions_DeploymentCondition is an autogenerated conversion function. -func Convert_v1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - return autoConvert_v1_DeploymentCondition_To_extensions_DeploymentCondition(in, out, s) +// Convert_v1_DeploymentCondition_To_apps_DeploymentCondition is an autogenerated conversion function. +func Convert_v1_DeploymentCondition_To_apps_DeploymentCondition(in *v1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + return autoConvert_v1_DeploymentCondition_To_apps_DeploymentCondition(in, out, s) } -func autoConvert_extensions_DeploymentCondition_To_v1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1.DeploymentCondition, s conversion.Scope) error { +func autoConvert_apps_DeploymentCondition_To_v1_DeploymentCondition(in *apps.DeploymentCondition, out *v1.DeploymentCondition, s conversion.Scope) error { out.Type = v1.DeploymentConditionType(in.Type) out.Status = corev1.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime @@ -746,18 +745,18 @@ func autoConvert_extensions_DeploymentCondition_To_v1_DeploymentCondition(in *ex return nil } -// Convert_extensions_DeploymentCondition_To_v1_DeploymentCondition is an autogenerated conversion function. -func Convert_extensions_DeploymentCondition_To_v1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1.DeploymentCondition, s conversion.Scope) error { - return autoConvert_extensions_DeploymentCondition_To_v1_DeploymentCondition(in, out, s) +// Convert_apps_DeploymentCondition_To_v1_DeploymentCondition is an autogenerated conversion function. +func Convert_apps_DeploymentCondition_To_v1_DeploymentCondition(in *apps.DeploymentCondition, out *v1.DeploymentCondition, s conversion.Scope) error { + return autoConvert_apps_DeploymentCondition_To_v1_DeploymentCondition(in, out, s) } -func autoConvert_v1_DeploymentList_To_extensions_DeploymentList(in *v1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { +func autoConvert_v1_DeploymentList_To_apps_DeploymentList(in *v1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.Deployment, len(*in)) + *out = make([]apps.Deployment, len(*in)) for i := range *in { - if err := Convert_v1_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1_Deployment_To_apps_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -767,18 +766,18 @@ func autoConvert_v1_DeploymentList_To_extensions_DeploymentList(in *v1.Deploymen return nil } -// Convert_v1_DeploymentList_To_extensions_DeploymentList is an autogenerated conversion function. -func Convert_v1_DeploymentList_To_extensions_DeploymentList(in *v1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { - return autoConvert_v1_DeploymentList_To_extensions_DeploymentList(in, out, s) +// Convert_v1_DeploymentList_To_apps_DeploymentList is an autogenerated conversion function. +func Convert_v1_DeploymentList_To_apps_DeploymentList(in *v1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { + return autoConvert_v1_DeploymentList_To_apps_DeploymentList(in, out, s) } -func autoConvert_extensions_DeploymentList_To_v1_DeploymentList(in *extensions.DeploymentList, out *v1.DeploymentList, s conversion.Scope) error { +func autoConvert_apps_DeploymentList_To_v1_DeploymentList(in *apps.DeploymentList, out *v1.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1.Deployment, len(*in)) for i := range *in { - if err := Convert_extensions_Deployment_To_v1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_Deployment_To_v1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -788,12 +787,12 @@ func autoConvert_extensions_DeploymentList_To_v1_DeploymentList(in *extensions.D return nil } -// Convert_extensions_DeploymentList_To_v1_DeploymentList is an autogenerated conversion function. -func Convert_extensions_DeploymentList_To_v1_DeploymentList(in *extensions.DeploymentList, out *v1.DeploymentList, s conversion.Scope) error { - return autoConvert_extensions_DeploymentList_To_v1_DeploymentList(in, out, s) +// Convert_apps_DeploymentList_To_v1_DeploymentList is an autogenerated conversion function. +func Convert_apps_DeploymentList_To_v1_DeploymentList(in *apps.DeploymentList, out *v1.DeploymentList, s conversion.Scope) error { + return autoConvert_apps_DeploymentList_To_v1_DeploymentList(in, out, s) } -func autoConvert_v1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func autoConvert_v1_DeploymentSpec_To_apps_DeploymentSpec(in *v1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -801,7 +800,7 @@ func autoConvert_v1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1.Deploymen if err := apiscorev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -811,7 +810,7 @@ func autoConvert_v1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1.Deploymen return nil } -func autoConvert_extensions_DeploymentSpec_To_v1_DeploymentSpec(in *extensions.DeploymentSpec, out *v1.DeploymentSpec, s conversion.Scope) error { +func autoConvert_apps_DeploymentSpec_To_v1_DeploymentSpec(in *apps.DeploymentSpec, out *v1.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -819,7 +818,7 @@ func autoConvert_extensions_DeploymentSpec_To_v1_DeploymentSpec(in *extensions.D if err := apiscorev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -830,24 +829,24 @@ func autoConvert_extensions_DeploymentSpec_To_v1_DeploymentSpec(in *extensions.D return nil } -func autoConvert_v1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { +func autoConvert_v1_DeploymentStatus_To_apps_DeploymentStatus(in *v1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.UnavailableReplicas = in.UnavailableReplicas - out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) return nil } -// Convert_v1_DeploymentStatus_To_extensions_DeploymentStatus is an autogenerated conversion function. -func Convert_v1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { - return autoConvert_v1_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s) +// Convert_v1_DeploymentStatus_To_apps_DeploymentStatus is an autogenerated conversion function. +func Convert_v1_DeploymentStatus_To_apps_DeploymentStatus(in *v1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { + return autoConvert_v1_DeploymentStatus_To_apps_DeploymentStatus(in, out, s) } -func autoConvert_extensions_DeploymentStatus_To_v1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1.DeploymentStatus, s conversion.Scope) error { +func autoConvert_apps_DeploymentStatus_To_v1_DeploymentStatus(in *apps.DeploymentStatus, out *v1.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas @@ -859,17 +858,17 @@ func autoConvert_extensions_DeploymentStatus_To_v1_DeploymentStatus(in *extensio return nil } -// Convert_extensions_DeploymentStatus_To_v1_DeploymentStatus is an autogenerated conversion function. -func Convert_extensions_DeploymentStatus_To_v1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1.DeploymentStatus, s conversion.Scope) error { - return autoConvert_extensions_DeploymentStatus_To_v1_DeploymentStatus(in, out, s) +// Convert_apps_DeploymentStatus_To_v1_DeploymentStatus is an autogenerated conversion function. +func Convert_apps_DeploymentStatus_To_v1_DeploymentStatus(in *apps.DeploymentStatus, out *v1.DeploymentStatus, s conversion.Scope) error { + return autoConvert_apps_DeploymentStatus_To_v1_DeploymentStatus(in, out, s) } -func autoConvert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *v1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func autoConvert_v1_DeploymentStrategy_To_apps_DeploymentStrategy(in *v1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDeployment) - if err := Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil { + *out = new(apps.RollingUpdateDeployment) + if err := Convert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -878,12 +877,12 @@ func autoConvert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *v1.D return nil } -func autoConvert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *v1.DeploymentStrategy, s conversion.Scope) error { +func autoConvert_apps_DeploymentStrategy_To_v1_DeploymentStrategy(in *apps.DeploymentStrategy, out *v1.DeploymentStrategy, s conversion.Scope) error { out.Type = v1.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(v1.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(*in, *out, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -892,40 +891,40 @@ func autoConvert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(in *exte return nil } -func autoConvert_v1_ReplicaSet_To_extensions_ReplicaSet(in *v1.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { +func autoConvert_v1_ReplicaSet_To_apps_ReplicaSet(in *v1.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1_ReplicaSet_To_extensions_ReplicaSet is an autogenerated conversion function. -func Convert_v1_ReplicaSet_To_extensions_ReplicaSet(in *v1.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { - return autoConvert_v1_ReplicaSet_To_extensions_ReplicaSet(in, out, s) +// Convert_v1_ReplicaSet_To_apps_ReplicaSet is an autogenerated conversion function. +func Convert_v1_ReplicaSet_To_apps_ReplicaSet(in *v1.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error { + return autoConvert_v1_ReplicaSet_To_apps_ReplicaSet(in, out, s) } -func autoConvert_extensions_ReplicaSet_To_v1_ReplicaSet(in *extensions.ReplicaSet, out *v1.ReplicaSet, s conversion.Scope) error { +func autoConvert_apps_ReplicaSet_To_v1_ReplicaSet(in *apps.ReplicaSet, out *v1.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_extensions_ReplicaSet_To_v1_ReplicaSet is an autogenerated conversion function. -func Convert_extensions_ReplicaSet_To_v1_ReplicaSet(in *extensions.ReplicaSet, out *v1.ReplicaSet, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSet_To_v1_ReplicaSet(in, out, s) +// Convert_apps_ReplicaSet_To_v1_ReplicaSet is an autogenerated conversion function. +func Convert_apps_ReplicaSet_To_v1_ReplicaSet(in *apps.ReplicaSet, out *v1.ReplicaSet, s conversion.Scope) error { + return autoConvert_apps_ReplicaSet_To_v1_ReplicaSet(in, out, s) } -func autoConvert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { - out.Type = extensions.ReplicaSetConditionType(in.Type) +func autoConvert_v1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *v1.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error { + out.Type = apps.ReplicaSetConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -933,12 +932,12 @@ func autoConvert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1 return nil } -// Convert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition is an autogenerated conversion function. -func Convert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { - return autoConvert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in, out, s) +// Convert_v1_ReplicaSetCondition_To_apps_ReplicaSetCondition is an autogenerated conversion function. +func Convert_v1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *v1.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_v1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in, out, s) } -func autoConvert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1.ReplicaSetCondition, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetCondition_To_v1_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *v1.ReplicaSetCondition, s conversion.Scope) error { out.Type = v1.ReplicaSetConditionType(in.Type) out.Status = corev1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime @@ -947,18 +946,18 @@ func autoConvert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition(in *ex return nil } -// Convert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition is an autogenerated conversion function. -func Convert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1.ReplicaSetCondition, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition(in, out, s) +// Convert_apps_ReplicaSetCondition_To_v1_ReplicaSetCondition is an autogenerated conversion function. +func Convert_apps_ReplicaSetCondition_To_v1_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *v1.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetCondition_To_v1_ReplicaSetCondition(in, out, s) } -func autoConvert_v1_ReplicaSetList_To_extensions_ReplicaSetList(in *v1.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { +func autoConvert_v1_ReplicaSetList_To_apps_ReplicaSetList(in *v1.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.ReplicaSet, len(*in)) + *out = make([]apps.ReplicaSet, len(*in)) for i := range *in { - if err := Convert_v1_ReplicaSet_To_extensions_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1_ReplicaSet_To_apps_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -968,18 +967,18 @@ func autoConvert_v1_ReplicaSetList_To_extensions_ReplicaSetList(in *v1.ReplicaSe return nil } -// Convert_v1_ReplicaSetList_To_extensions_ReplicaSetList is an autogenerated conversion function. -func Convert_v1_ReplicaSetList_To_extensions_ReplicaSetList(in *v1.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { - return autoConvert_v1_ReplicaSetList_To_extensions_ReplicaSetList(in, out, s) +// Convert_v1_ReplicaSetList_To_apps_ReplicaSetList is an autogenerated conversion function. +func Convert_v1_ReplicaSetList_To_apps_ReplicaSetList(in *v1.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error { + return autoConvert_v1_ReplicaSetList_To_apps_ReplicaSetList(in, out, s) } -func autoConvert_extensions_ReplicaSetList_To_v1_ReplicaSetList(in *extensions.ReplicaSetList, out *v1.ReplicaSetList, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetList_To_v1_ReplicaSetList(in *apps.ReplicaSetList, out *v1.ReplicaSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1.ReplicaSet, len(*in)) for i := range *in { - if err := Convert_extensions_ReplicaSet_To_v1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_ReplicaSet_To_v1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -989,12 +988,12 @@ func autoConvert_extensions_ReplicaSetList_To_v1_ReplicaSetList(in *extensions.R return nil } -// Convert_extensions_ReplicaSetList_To_v1_ReplicaSetList is an autogenerated conversion function. -func Convert_extensions_ReplicaSetList_To_v1_ReplicaSetList(in *extensions.ReplicaSetList, out *v1.ReplicaSetList, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetList_To_v1_ReplicaSetList(in, out, s) +// Convert_apps_ReplicaSetList_To_v1_ReplicaSetList is an autogenerated conversion function. +func Convert_apps_ReplicaSetList_To_v1_ReplicaSetList(in *apps.ReplicaSetList, out *v1.ReplicaSetList, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetList_To_v1_ReplicaSetList(in, out, s) } -func autoConvert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { +func autoConvert_v1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *v1.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1006,7 +1005,7 @@ func autoConvert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1.ReplicaSe return nil } -func autoConvert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *v1.ReplicaSetSpec, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *v1.ReplicaSetSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1018,22 +1017,22 @@ func autoConvert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *extensions.R return nil } -func autoConvert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { +func autoConvert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *v1.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]extensions.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus is an autogenerated conversion function. -func Convert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in, out, s) +// Convert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus is an autogenerated conversion function. +func Convert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *v1.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_v1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in, out, s) } -func autoConvert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1.ReplicaSetStatus, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *v1.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas @@ -1043,28 +1042,28 @@ func autoConvert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *extensio return nil } -// Convert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus is an autogenerated conversion function. -func Convert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1.ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(in, out, s) +// Convert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus is an autogenerated conversion function. +func Convert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *v1.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetStatus_To_v1_ReplicaSetStatus(in, out, s) } -func autoConvert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *v1.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { +func autoConvert_v1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *v1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *v1.RollingUpdateDaemonSet, s conversion.Scope) error { +func autoConvert_apps_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *v1.RollingUpdateDaemonSet, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *v1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_v1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *v1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *v1.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_apps_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *v1.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD index a4e8399d981d..a4110a88eca4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD @@ -22,7 +22,6 @@ go_library( "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/v1:go_default_library", - "//pkg/apis/extensions:go_default_library", "//staging/src/k8s.io/api/apps/v1beta1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go index 680da870a944..15804ad9bbf6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go @@ -30,7 +30,6 @@ import ( "k8s.io/kubernetes/pkg/apis/autoscaling" api "k8s.io/kubernetes/pkg/apis/core" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/apis/extensions" ) func addConversionFuncs(scheme *runtime.Scheme) error { @@ -48,12 +47,12 @@ func addConversionFuncs(scheme *runtime.Scheme) error { // ones, see https://github.com/kubernetes/kubernetes/issues/39865 Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus, Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus, - Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, - Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, - Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, - Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, - Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, - Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, + Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec, + Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec, + Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy, + Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy, + Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment, + Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, ) if err != nil { return err @@ -213,7 +212,7 @@ func Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(in *appsv1beta1.Scal return nil } -func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in *appsv1beta1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } @@ -221,14 +220,14 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta1 if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.RevisionHistoryLimit = in.RevisionHistoryLimit out.MinReadySeconds = in.MinReadySeconds out.Paused = in.Paused if in.RollbackTo != nil { - out.RollbackTo = new(extensions.RollbackConfig) + out.RollbackTo = new(apps.RollbackConfig) out.RollbackTo.Revision = in.RollbackTo.Revision } else { out.RollbackTo = nil @@ -240,13 +239,13 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta1 return nil } -func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *appsv1beta1.DeploymentSpec, s conversion.Scope) error { +func Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.DeploymentSpec, out *appsv1beta1.DeploymentSpec, s conversion.Scope) error { out.Replicas = &in.Replicas out.Selector = in.Selector if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } if in.RevisionHistoryLimit != nil { @@ -268,11 +267,11 @@ func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions. return nil } -func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *appsv1beta1.DeploymentStrategy, s conversion.Scope) error { +func Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps.DeploymentStrategy, out *appsv1beta1.DeploymentStrategy, s conversion.Scope) error { out.Type = appsv1beta1.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { out.RollingUpdate = new(appsv1beta1.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -281,11 +280,11 @@ func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *ext return nil } -func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *appsv1beta1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in *appsv1beta1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { - out.RollingUpdate = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + out.RollingUpdate = new(apps.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -294,7 +293,7 @@ func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *app return nil } -func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *appsv1beta1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *appsv1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } @@ -304,7 +303,7 @@ func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployme return nil } -func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *appsv1beta1.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *appsv1beta1.RollingUpdateDeployment, s conversion.Scope) error { if out.MaxUnavailable == nil { out.MaxUnavailable = &intstr.IntOrString{} } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go index 8492ada1c103..78403fb73902 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go @@ -16,7 +16,6 @@ limitations under the License. // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions // +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/apps/v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go index 3268019690a2..728db5da9235 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go @@ -32,7 +32,6 @@ import ( autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" core "k8s.io/kubernetes/pkg/apis/core" corev1 "k8s.io/kubernetes/pkg/apis/core/v1" - extensions "k8s.io/kubernetes/pkg/apis/extensions" ) func init() { @@ -62,93 +61,93 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.Deployment)(nil), (*extensions.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_Deployment_To_extensions_Deployment(a.(*v1beta1.Deployment), b.(*extensions.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Deployment_To_apps_Deployment(a.(*v1beta1.Deployment), b.(*apps.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.Deployment)(nil), (*v1beta1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_Deployment_To_v1beta1_Deployment(a.(*extensions.Deployment), b.(*v1beta1.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.Deployment)(nil), (*v1beta1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_Deployment_To_v1beta1_Deployment(a.(*apps.Deployment), b.(*v1beta1.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentCondition)(nil), (*extensions.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(a.(*v1beta1.DeploymentCondition), b.(*extensions.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentCondition)(nil), (*apps.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(a.(*v1beta1.DeploymentCondition), b.(*apps.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentCondition)(nil), (*v1beta1.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(a.(*extensions.DeploymentCondition), b.(*v1beta1.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentCondition)(nil), (*v1beta1.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(a.(*apps.DeploymentCondition), b.(*v1beta1.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentList)(nil), (*extensions.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(a.(*v1beta1.DeploymentList), b.(*extensions.DeploymentList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentList)(nil), (*apps.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentList_To_apps_DeploymentList(a.(*v1beta1.DeploymentList), b.(*apps.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentList)(nil), (*v1beta1.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(a.(*extensions.DeploymentList), b.(*v1beta1.DeploymentList), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentList)(nil), (*v1beta1.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentList_To_v1beta1_DeploymentList(a.(*apps.DeploymentList), b.(*v1beta1.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentRollback)(nil), (*extensions.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(a.(*v1beta1.DeploymentRollback), b.(*extensions.DeploymentRollback), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentRollback)(nil), (*apps.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(a.(*v1beta1.DeploymentRollback), b.(*apps.DeploymentRollback), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentRollback)(nil), (*v1beta1.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(a.(*extensions.DeploymentRollback), b.(*v1beta1.DeploymentRollback), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentRollback)(nil), (*v1beta1.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(a.(*apps.DeploymentRollback), b.(*v1beta1.DeploymentRollback), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStatus)(nil), (*extensions.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(a.(*v1beta1.DeploymentStatus), b.(*extensions.DeploymentStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStatus)(nil), (*apps.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(a.(*v1beta1.DeploymentStatus), b.(*apps.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStatus)(nil), (*v1beta1.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(a.(*extensions.DeploymentStatus), b.(*v1beta1.DeploymentStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStatus)(nil), (*v1beta1.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(a.(*apps.DeploymentStatus), b.(*v1beta1.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.RollbackConfig)(nil), (*extensions.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(a.(*v1beta1.RollbackConfig), b.(*extensions.RollbackConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.RollbackConfig)(nil), (*apps.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(a.(*v1beta1.RollbackConfig), b.(*apps.RollbackConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollbackConfig)(nil), (*v1beta1.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(a.(*extensions.RollbackConfig), b.(*v1beta1.RollbackConfig), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollbackConfig)(nil), (*v1beta1.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(a.(*apps.RollbackConfig), b.(*v1beta1.RollbackConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) }); err != nil { return err } @@ -252,48 +251,48 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1beta1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1beta1.StatefulSetSpec), scope) + if err := s.AddConversionFunc((*apps.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*v1beta1.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*v1beta1.StatefulSetUpdateStrategy), scope) + if err := s.AddConversionFunc((*apps.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) + if err := s.AddConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) + if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1beta1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1beta1.StatefulSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*v1beta1.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*v1beta1.StatefulSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) + if err := s.AddConversionFunc((*v1beta1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } @@ -385,40 +384,40 @@ func Convert_apps_ControllerRevisionList_To_v1beta1_ControllerRevisionList(in *a return autoConvert_apps_ControllerRevisionList_To_v1beta1_ControllerRevisionList(in, out, s) } -func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *v1beta1.Deployment, out *extensions.Deployment, s conversion.Scope) error { +func autoConvert_v1beta1_Deployment_To_apps_Deployment(in *v1beta1.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1beta1_Deployment_To_extensions_Deployment is an autogenerated conversion function. -func Convert_v1beta1_Deployment_To_extensions_Deployment(in *v1beta1.Deployment, out *extensions.Deployment, s conversion.Scope) error { - return autoConvert_v1beta1_Deployment_To_extensions_Deployment(in, out, s) +// Convert_v1beta1_Deployment_To_apps_Deployment is an autogenerated conversion function. +func Convert_v1beta1_Deployment_To_apps_Deployment(in *v1beta1.Deployment, out *apps.Deployment, s conversion.Scope) error { + return autoConvert_v1beta1_Deployment_To_apps_Deployment(in, out, s) } -func autoConvert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { +func autoConvert_apps_Deployment_To_v1beta1_Deployment(in *apps.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_extensions_Deployment_To_v1beta1_Deployment is an autogenerated conversion function. -func Convert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { - return autoConvert_extensions_Deployment_To_v1beta1_Deployment(in, out, s) +// Convert_apps_Deployment_To_v1beta1_Deployment is an autogenerated conversion function. +func Convert_apps_Deployment_To_v1beta1_Deployment(in *apps.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { + return autoConvert_apps_Deployment_To_v1beta1_Deployment(in, out, s) } -func autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - out.Type = extensions.DeploymentConditionType(in.Type) +func autoConvert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in *v1beta1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + out.Type = apps.DeploymentConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime @@ -427,12 +426,12 @@ func autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(i return nil } -// Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition is an autogenerated conversion function. -func Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in, out, s) +// Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition is an autogenerated conversion function. +func Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in *v1beta1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in, out, s) } -func autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { +func autoConvert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in *apps.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { out.Type = v1beta1.DeploymentConditionType(in.Type) out.Status = v1.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime @@ -442,18 +441,18 @@ func autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(i return nil } -// Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition is an autogenerated conversion function. -func Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { - return autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in, out, s) +// Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition is an autogenerated conversion function. +func Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in *apps.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { + return autoConvert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in, out, s) } -func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *v1beta1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentList_To_apps_DeploymentList(in *v1beta1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.Deployment, len(*in)) + *out = make([]apps.Deployment, len(*in)) for i := range *in { - if err := Convert_v1beta1_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta1_Deployment_To_apps_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -463,18 +462,18 @@ func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *v1beta1 return nil } -// Convert_v1beta1_DeploymentList_To_extensions_DeploymentList is an autogenerated conversion function. -func Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *v1beta1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in, out, s) +// Convert_v1beta1_DeploymentList_To_apps_DeploymentList is an autogenerated conversion function. +func Convert_v1beta1_DeploymentList_To_apps_DeploymentList(in *v1beta1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentList_To_apps_DeploymentList(in, out, s) } -func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { +func autoConvert_apps_DeploymentList_To_v1beta1_DeploymentList(in *apps.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta1.Deployment, len(*in)) for i := range *in { - if err := Convert_extensions_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -484,40 +483,40 @@ func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensi return nil } -// Convert_extensions_DeploymentList_To_v1beta1_DeploymentList is an autogenerated conversion function. -func Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { - return autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in, out, s) +// Convert_apps_DeploymentList_To_v1beta1_DeploymentList is an autogenerated conversion function. +func Convert_apps_DeploymentList_To_v1beta1_DeploymentList(in *apps.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { + return autoConvert_apps_DeploymentList_To_v1beta1_DeploymentList(in, out, s) } -func autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *v1beta1.DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in *v1beta1.DeploymentRollback, out *apps.DeploymentRollback, s conversion.Scope) error { out.Name = in.Name out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) - if err := Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { + if err := Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { return err } return nil } -// Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback is an autogenerated conversion function. -func Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *v1beta1.DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in, out, s) +// Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback is an autogenerated conversion function. +func Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in *v1beta1.DeploymentRollback, out *apps.DeploymentRollback, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in, out, s) } -func autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { +func autoConvert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in *apps.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { out.Name = in.Name out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) - if err := Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { + if err := Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { return err } return nil } -// Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback is an autogenerated conversion function. -func Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { - return autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s) +// Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback is an autogenerated conversion function. +func Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in *apps.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { + return autoConvert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s) } -func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in *v1beta1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -525,18 +524,18 @@ func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta1 if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) out.Paused = in.Paused - out.RollbackTo = (*extensions.RollbackConfig)(unsafe.Pointer(in.RollbackTo)) + out.RollbackTo = (*apps.RollbackConfig)(unsafe.Pointer(in.RollbackTo)) out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds)) return nil } -func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *v1beta1.DeploymentSpec, s conversion.Scope) error { +func autoConvert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.DeploymentSpec, out *v1beta1.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -544,7 +543,7 @@ func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensi if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -555,24 +554,24 @@ func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensi return nil } -func autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in *v1beta1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.UnavailableReplicas = in.UnavailableReplicas - out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) return nil } -// Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus is an autogenerated conversion function. -func Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s) +// Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus is an autogenerated conversion function. +func Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in *v1beta1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in, out, s) } -func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { +func autoConvert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in *apps.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas @@ -584,17 +583,17 @@ func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *ext return nil } -// Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus is an autogenerated conversion function. -func Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { - return autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s) +// Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus is an autogenerated conversion function. +func Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in *apps.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { + return autoConvert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s) } -func autoConvert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *v1beta1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func autoConvert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in *v1beta1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil { + *out = new(apps.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -603,12 +602,12 @@ func autoConvert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in return nil } -func autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *v1beta1.DeploymentStrategy, s conversion.Scope) error { +func autoConvert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps.DeploymentStrategy, out *v1beta1.DeploymentStrategy, s conversion.Scope) error { out.Type = v1beta1.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(v1beta1.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -617,33 +616,33 @@ func autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in return nil } -func autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *v1beta1.RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { +func autoConvert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in *v1beta1.RollbackConfig, out *apps.RollbackConfig, s conversion.Scope) error { out.Revision = in.Revision return nil } -// Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig is an autogenerated conversion function. -func Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *v1beta1.RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { - return autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in, out, s) +// Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig is an autogenerated conversion function. +func Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in *v1beta1.RollbackConfig, out *apps.RollbackConfig, s conversion.Scope) error { + return autoConvert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in, out, s) } -func autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { +func autoConvert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in *apps.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { out.Revision = in.Revision return nil } -// Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig is an autogenerated conversion function. -func Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { - return autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) +// Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig is an autogenerated conversion function. +func Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in *apps.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { + return autoConvert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) } -func autoConvert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *v1beta1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *v1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *v1beta1.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *v1beta1.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/BUILD index 37816e1e3caf..42a6b8191f47 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/BUILD @@ -22,7 +22,6 @@ go_library( "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/v1:go_default_library", - "//pkg/apis/extensions:go_default_library", "//staging/src/k8s.io/api/apps/v1beta2:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -61,7 +60,6 @@ go_test( "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/install:go_default_library", - "//pkg/apis/extensions:go_default_library", "//staging/src/k8s.io/api/apps/v1beta2:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go index 3bfd5279392f..8d0e753a35ca 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go @@ -31,7 +31,6 @@ import ( autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" api "k8s.io/kubernetes/pkg/apis/core" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/apis/extensions" ) func addConversionFuncs(scheme *runtime.Scheme) error { @@ -44,31 +43,31 @@ func addConversionFuncs(scheme *runtime.Scheme) error { Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec, Convert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy, Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy, - Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet, - Convert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, + Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet, + Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet, Convert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus, Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus, - Convert_v1beta2_Deployment_To_extensions_Deployment, - Convert_extensions_Deployment_To_v1beta2_Deployment, - Convert_extensions_DaemonSet_To_v1beta2_DaemonSet, - Convert_v1beta2_DaemonSet_To_extensions_DaemonSet, - Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec, - Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec, - Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy, - Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy, + Convert_v1beta2_Deployment_To_apps_Deployment, + Convert_apps_Deployment_To_v1beta2_Deployment, + Convert_apps_DaemonSet_To_v1beta2_DaemonSet, + Convert_v1beta2_DaemonSet_To_apps_DaemonSet, + Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec, + Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec, + Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy, + Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy, // extensions // TODO: below conversions should be dropped in favor of auto-generated // ones, see https://github.com/kubernetes/kubernetes/issues/39865 Convert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus, Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus, - Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec, - Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec, - Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy, - Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy, - Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, - Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment, - Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec, - Convert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec, + Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec, + Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec, + Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy, + Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy, + Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment, + Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment, + Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec, + Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec, ) if err != nil { return err @@ -91,7 +90,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error { return nil } -func Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *appsv1beta2.RollingUpdateDaemonSet, s conversion.Scope) error { +func Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *appsv1beta2.RollingUpdateDaemonSet, s conversion.Scope) error { if out.MaxUnavailable == nil { out.MaxUnavailable = &intstr.IntOrString{} } @@ -101,7 +100,7 @@ func Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet return nil } -func Convert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *appsv1beta2.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { +func Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *appsv1beta2.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } @@ -294,7 +293,7 @@ func Convert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus(in *appsv1beta2.Scal return nil } -func Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta2.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(in *appsv1beta2.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } @@ -302,7 +301,7 @@ func Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta2 if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.RevisionHistoryLimit = in.RevisionHistoryLimit @@ -315,13 +314,13 @@ func Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta2 return nil } -func Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(in *extensions.DeploymentSpec, out *appsv1beta2.DeploymentSpec, s conversion.Scope) error { +func Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(in *apps.DeploymentSpec, out *appsv1beta2.DeploymentSpec, s conversion.Scope) error { out.Replicas = &in.Replicas out.Selector = in.Selector if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } if in.RevisionHistoryLimit != nil { @@ -337,11 +336,11 @@ func Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(in *extensions. return nil } -func Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *extensions.DeploymentStrategy, out *appsv1beta2.DeploymentStrategy, s conversion.Scope) error { +func Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *apps.DeploymentStrategy, out *appsv1beta2.DeploymentStrategy, s conversion.Scope) error { out.Type = appsv1beta2.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { out.RollingUpdate = new(appsv1beta2.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -350,11 +349,11 @@ func Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *ext return nil } -func Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(in *appsv1beta2.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(in *appsv1beta2.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { - out.RollingUpdate = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + out.RollingUpdate = new(apps.RollingUpdateDeployment) + if err := Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -363,7 +362,7 @@ func Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(in *app return nil } -func Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *appsv1beta2.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *appsv1beta2.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } @@ -373,7 +372,7 @@ func Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployme return nil } -func Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *appsv1beta2.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *appsv1beta2.RollingUpdateDeployment, s conversion.Scope) error { if out.MaxUnavailable == nil { out.MaxUnavailable = &intstr.IntOrString{} } @@ -389,7 +388,7 @@ func Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployme return nil } -func Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *appsv1beta2.ReplicaSetSpec, s conversion.Scope) error { +func Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *appsv1beta2.ReplicaSetSpec, s conversion.Scope) error { out.Replicas = new(int32) *out.Replicas = int32(in.Replicas) out.MinReadySeconds = in.MinReadySeconds @@ -400,9 +399,9 @@ func Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *extensions. return nil } -func Convert_v1beta2_Deployment_To_extensions_Deployment(in *appsv1beta2.Deployment, out *extensions.Deployment, s conversion.Scope) error { +func Convert_v1beta2_Deployment_To_apps_Deployment(in *appsv1beta2.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -412,7 +411,7 @@ func Convert_v1beta2_Deployment_To_extensions_Deployment(in *appsv1beta2.Deploym if revision64, err := strconv.ParseInt(revision, 10, 64); err != nil { return fmt.Errorf("failed to parse annotation[%s]=%s as int64: %v", appsv1beta2.DeprecatedRollbackTo, revision, err) } else { - out.Spec.RollbackTo = new(extensions.RollbackConfig) + out.Spec.RollbackTo = new(apps.RollbackConfig) out.Spec.RollbackTo.Revision = revision64 } out.Annotations = deepCopyStringMap(out.Annotations) @@ -421,13 +420,13 @@ func Convert_v1beta2_Deployment_To_extensions_Deployment(in *appsv1beta2.Deploym out.Spec.RollbackTo = nil } - if err := Convert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *appsv1beta2.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { +func Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *appsv1beta2.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } @@ -439,11 +438,11 @@ func Convert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *appsv1beta2 return nil } -func Convert_extensions_Deployment_To_v1beta2_Deployment(in *extensions.Deployment, out *appsv1beta2.Deployment, s conversion.Scope) error { +func Convert_apps_Deployment_To_v1beta2_Deployment(in *apps.Deployment, out *appsv1beta2.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Annotations = deepCopyStringMap(out.Annotations) // deep copy because we modify annotations below - if err := Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -458,17 +457,17 @@ func Convert_extensions_Deployment_To_v1beta2_Deployment(in *extensions.Deployme delete(out.Annotations, appsv1beta2.DeprecatedRollbackTo) } - if err := Convert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_extensions_DaemonSet_To_v1beta2_DaemonSet(in *extensions.DaemonSet, out *appsv1beta2.DaemonSet, s conversion.Scope) error { +func Convert_apps_DaemonSet_To_v1beta2_DaemonSet(in *apps.DaemonSet, out *appsv1beta2.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Annotations = deepCopyStringMap(out.Annotations) out.Annotations[appsv1beta2.DeprecatedTemplateGeneration] = strconv.FormatInt(in.Spec.TemplateGeneration, 10) - if err := Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := s.Convert(&in.Status, &out.Status, 0); err != nil { @@ -477,12 +476,12 @@ func Convert_extensions_DaemonSet_To_v1beta2_DaemonSet(in *extensions.DaemonSet, return nil } -func Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *extensions.DaemonSetSpec, out *appsv1beta2.DaemonSetSpec, s conversion.Scope) error { +func Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *apps.DaemonSetSpec, out *appsv1beta2.DaemonSetSpec, s conversion.Scope) error { out.Selector = in.Selector if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = int32(in.MinReadySeconds) @@ -495,20 +494,20 @@ func Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *extensions.Da return nil } -func Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *appsv1beta2.DaemonSetUpdateStrategy, s conversion.Scope) error { +func Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *appsv1beta2.DaemonSetUpdateStrategy, s conversion.Scope) error { out.Type = appsv1beta2.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { out.RollingUpdate = &appsv1beta2.RollingUpdateDaemonSet{} - if err := Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { + if err := Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } return nil } -func Convert_v1beta2_DaemonSet_To_extensions_DaemonSet(in *appsv1beta2.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { +func Convert_v1beta2_DaemonSet_To_apps_DaemonSet(in *appsv1beta2.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } if value, ok := in.Annotations[appsv1beta2.DeprecatedTemplateGeneration]; ok { @@ -526,12 +525,12 @@ func Convert_v1beta2_DaemonSet_To_extensions_DaemonSet(in *appsv1beta2.DaemonSet return nil } -func Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(in *appsv1beta2.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { +func Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(in *appsv1beta2.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error { out.Selector = in.Selector if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } if in.RevisionHistoryLimit != nil { @@ -544,11 +543,11 @@ func Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(in *appsv1beta2.D return nil } -func Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *appsv1beta2.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) +func Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *appsv1beta2.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = apps.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { - out.RollingUpdate = &extensions.RollingUpdateDaemonSet{} - if err := Convert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { + out.RollingUpdate = &apps.RollingUpdateDaemonSet{} + if err := Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/doc.go index 4221fe419a7b..431bac820d07 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/doc.go @@ -16,7 +16,6 @@ limitations under the License. // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions // +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta2 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/apps/v1beta2 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go index 5fabfdccbdab..ae5d8692fa80 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go @@ -32,7 +32,6 @@ import ( autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" core "k8s.io/kubernetes/pkg/apis/core" corev1 "k8s.io/kubernetes/pkg/apis/core/v1" - extensions "k8s.io/kubernetes/pkg/apis/extensions" ) func init() { @@ -62,193 +61,193 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSet)(nil), (*extensions.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSet_To_extensions_DaemonSet(a.(*v1beta2.DaemonSet), b.(*extensions.DaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSet_To_apps_DaemonSet(a.(*v1beta2.DaemonSet), b.(*apps.DaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSet)(nil), (*v1beta2.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSet_To_v1beta2_DaemonSet(a.(*extensions.DaemonSet), b.(*v1beta2.DaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSet)(nil), (*v1beta2.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSet_To_v1beta2_DaemonSet(a.(*apps.DaemonSet), b.(*v1beta2.DaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetCondition)(nil), (*extensions.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition(a.(*v1beta2.DaemonSetCondition), b.(*extensions.DaemonSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetCondition)(nil), (*apps.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition(a.(*v1beta2.DaemonSetCondition), b.(*apps.DaemonSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetCondition)(nil), (*v1beta2.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition(a.(*extensions.DaemonSetCondition), b.(*v1beta2.DaemonSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetCondition)(nil), (*v1beta2.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetCondition_To_v1beta2_DaemonSetCondition(a.(*apps.DaemonSetCondition), b.(*v1beta2.DaemonSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetList)(nil), (*extensions.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetList_To_extensions_DaemonSetList(a.(*v1beta2.DaemonSetList), b.(*extensions.DaemonSetList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetList)(nil), (*apps.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSetList_To_apps_DaemonSetList(a.(*v1beta2.DaemonSetList), b.(*apps.DaemonSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetList)(nil), (*v1beta2.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetList_To_v1beta2_DaemonSetList(a.(*extensions.DaemonSetList), b.(*v1beta2.DaemonSetList), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetList)(nil), (*v1beta2.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetList_To_v1beta2_DaemonSetList(a.(*apps.DaemonSetList), b.(*v1beta2.DaemonSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetSpec)(nil), (*extensions.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(a.(*v1beta2.DaemonSetSpec), b.(*extensions.DaemonSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*v1beta2.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetSpec)(nil), (*v1beta2.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(a.(*extensions.DaemonSetSpec), b.(*v1beta2.DaemonSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetSpec)(nil), (*v1beta2.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(a.(*apps.DaemonSetSpec), b.(*v1beta2.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetStatus)(nil), (*extensions.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(a.(*v1beta2.DaemonSetStatus), b.(*extensions.DaemonSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetStatus)(nil), (*apps.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(a.(*v1beta2.DaemonSetStatus), b.(*apps.DaemonSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetStatus)(nil), (*v1beta2.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(a.(*extensions.DaemonSetStatus), b.(*v1beta2.DaemonSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetStatus)(nil), (*v1beta2.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus(a.(*apps.DaemonSetStatus), b.(*v1beta2.DaemonSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetUpdateStrategy)(nil), (*extensions.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(a.(*v1beta2.DaemonSetUpdateStrategy), b.(*extensions.DaemonSetUpdateStrategy), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DaemonSetUpdateStrategy)(nil), (*apps.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(a.(*v1beta2.DaemonSetUpdateStrategy), b.(*apps.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetUpdateStrategy)(nil), (*v1beta2.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(a.(*extensions.DaemonSetUpdateStrategy), b.(*v1beta2.DaemonSetUpdateStrategy), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetUpdateStrategy)(nil), (*v1beta2.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(a.(*apps.DaemonSetUpdateStrategy), b.(*v1beta2.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.Deployment)(nil), (*extensions.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_Deployment_To_extensions_Deployment(a.(*v1beta2.Deployment), b.(*extensions.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_Deployment_To_apps_Deployment(a.(*v1beta2.Deployment), b.(*apps.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.Deployment)(nil), (*v1beta2.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_Deployment_To_v1beta2_Deployment(a.(*extensions.Deployment), b.(*v1beta2.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.Deployment)(nil), (*v1beta2.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_Deployment_To_v1beta2_Deployment(a.(*apps.Deployment), b.(*v1beta2.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentCondition)(nil), (*extensions.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(a.(*v1beta2.DeploymentCondition), b.(*extensions.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentCondition)(nil), (*apps.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition(a.(*v1beta2.DeploymentCondition), b.(*apps.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentCondition)(nil), (*v1beta2.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition(a.(*extensions.DeploymentCondition), b.(*v1beta2.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentCondition)(nil), (*v1beta2.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentCondition_To_v1beta2_DeploymentCondition(a.(*apps.DeploymentCondition), b.(*v1beta2.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentList)(nil), (*extensions.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentList_To_extensions_DeploymentList(a.(*v1beta2.DeploymentList), b.(*extensions.DeploymentList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentList)(nil), (*apps.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DeploymentList_To_apps_DeploymentList(a.(*v1beta2.DeploymentList), b.(*apps.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentList)(nil), (*v1beta2.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentList_To_v1beta2_DeploymentList(a.(*extensions.DeploymentList), b.(*v1beta2.DeploymentList), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentList)(nil), (*v1beta2.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentList_To_v1beta2_DeploymentList(a.(*apps.DeploymentList), b.(*v1beta2.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1beta2.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1beta2.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentSpec)(nil), (*v1beta2.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1beta2.DeploymentSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentSpec)(nil), (*v1beta2.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1beta2.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentStatus)(nil), (*extensions.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(a.(*v1beta2.DeploymentStatus), b.(*extensions.DeploymentStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentStatus)(nil), (*apps.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(a.(*v1beta2.DeploymentStatus), b.(*apps.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStatus)(nil), (*v1beta2.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(a.(*extensions.DeploymentStatus), b.(*v1beta2.DeploymentStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStatus)(nil), (*v1beta2.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(a.(*apps.DeploymentStatus), b.(*v1beta2.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1beta2.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1beta2.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1beta2.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1beta2.DeploymentStrategy), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStrategy)(nil), (*v1beta2.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1beta2.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSet)(nil), (*extensions.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ReplicaSet_To_extensions_ReplicaSet(a.(*v1beta2.ReplicaSet), b.(*extensions.ReplicaSet), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSet)(nil), (*apps.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ReplicaSet_To_apps_ReplicaSet(a.(*v1beta2.ReplicaSet), b.(*apps.ReplicaSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSet)(nil), (*v1beta2.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSet_To_v1beta2_ReplicaSet(a.(*extensions.ReplicaSet), b.(*v1beta2.ReplicaSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSet)(nil), (*v1beta2.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSet_To_v1beta2_ReplicaSet(a.(*apps.ReplicaSet), b.(*v1beta2.ReplicaSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetCondition)(nil), (*extensions.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition(a.(*v1beta2.ReplicaSetCondition), b.(*extensions.ReplicaSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetCondition)(nil), (*apps.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ReplicaSetCondition_To_apps_ReplicaSetCondition(a.(*v1beta2.ReplicaSetCondition), b.(*apps.ReplicaSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetCondition)(nil), (*v1beta2.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(a.(*extensions.ReplicaSetCondition), b.(*v1beta2.ReplicaSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetCondition)(nil), (*v1beta2.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(a.(*apps.ReplicaSetCondition), b.(*v1beta2.ReplicaSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetList)(nil), (*extensions.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList(a.(*v1beta2.ReplicaSetList), b.(*extensions.ReplicaSetList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetList)(nil), (*apps.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ReplicaSetList_To_apps_ReplicaSetList(a.(*v1beta2.ReplicaSetList), b.(*apps.ReplicaSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetList)(nil), (*v1beta2.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList(a.(*extensions.ReplicaSetList), b.(*v1beta2.ReplicaSetList), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetList)(nil), (*v1beta2.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetList_To_v1beta2_ReplicaSetList(a.(*apps.ReplicaSetList), b.(*v1beta2.ReplicaSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetSpec)(nil), (*extensions.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(a.(*v1beta2.ReplicaSetSpec), b.(*extensions.ReplicaSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*v1beta2.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetSpec)(nil), (*v1beta2.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(a.(*extensions.ReplicaSetSpec), b.(*v1beta2.ReplicaSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1beta2.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*v1beta2.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetStatus)(nil), (*extensions.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus(a.(*v1beta2.ReplicaSetStatus), b.(*extensions.ReplicaSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.ReplicaSetStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(a.(*v1beta2.ReplicaSetStatus), b.(*apps.ReplicaSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetStatus)(nil), (*v1beta2.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(a.(*extensions.ReplicaSetStatus), b.(*v1beta2.ReplicaSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetStatus)(nil), (*v1beta2.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(a.(*apps.ReplicaSetStatus), b.(*v1beta2.ReplicaSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.RollingUpdateDaemonSet)(nil), (*extensions.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(a.(*v1beta2.RollingUpdateDaemonSet), b.(*extensions.RollingUpdateDaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*v1beta2.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollingUpdateDaemonSet)(nil), (*v1beta2.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(a.(*extensions.RollingUpdateDaemonSet), b.(*v1beta2.RollingUpdateDaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*v1beta2.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*v1beta2.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1beta2.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*v1beta2.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1beta2.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1beta2.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1beta2.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1beta2.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1beta2.RollingUpdateDeployment), scope) }); err != nil { return err } @@ -352,113 +351,113 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1beta2.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1beta2.StatefulSetSpec), scope) + if err := s.AddConversionFunc((*apps.DaemonSetSpec)(nil), (*v1beta2.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(a.(*apps.DaemonSetSpec), b.(*v1beta2.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetStatus)(nil), (*v1beta2.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(a.(*apps.StatefulSetStatus), b.(*v1beta2.StatefulSetStatus), scope) + if err := s.AddConversionFunc((*apps.DaemonSetUpdateStrategy)(nil), (*v1beta2.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(a.(*apps.DaemonSetUpdateStrategy), b.(*v1beta2.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*v1beta2.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*v1beta2.StatefulSetUpdateStrategy), scope) + if err := s.AddConversionFunc((*apps.DaemonSet)(nil), (*v1beta2.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSet_To_v1beta2_DaemonSet(a.(*apps.DaemonSet), b.(*v1beta2.DaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta2.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta2.ScaleStatus), scope) + if err := s.AddConversionFunc((*apps.DeploymentSpec)(nil), (*v1beta2.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1beta2.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DaemonSetSpec)(nil), (*v1beta2.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(a.(*extensions.DaemonSetSpec), b.(*v1beta2.DaemonSetSpec), scope) + if err := s.AddConversionFunc((*apps.DeploymentStrategy)(nil), (*v1beta2.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1beta2.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DaemonSetUpdateStrategy)(nil), (*v1beta2.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(a.(*extensions.DaemonSetUpdateStrategy), b.(*v1beta2.DaemonSetUpdateStrategy), scope) + if err := s.AddConversionFunc((*apps.Deployment)(nil), (*v1beta2.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_Deployment_To_v1beta2_Deployment(a.(*apps.Deployment), b.(*v1beta2.Deployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DaemonSet)(nil), (*v1beta2.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSet_To_v1beta2_DaemonSet(a.(*extensions.DaemonSet), b.(*v1beta2.DaemonSet), scope) + if err := s.AddConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1beta2.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*v1beta2.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentSpec)(nil), (*v1beta2.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1beta2.DeploymentSpec), scope) + if err := s.AddConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*v1beta2.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*v1beta2.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1beta2.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1beta2.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1beta2.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1beta2.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.Deployment)(nil), (*v1beta2.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_Deployment_To_v1beta2_Deployment(a.(*extensions.Deployment), b.(*v1beta2.Deployment), scope) + if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1beta2.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1beta2.StatefulSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.ReplicaSetSpec)(nil), (*v1beta2.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(a.(*extensions.ReplicaSetSpec), b.(*v1beta2.ReplicaSetSpec), scope) + if err := s.AddConversionFunc((*apps.StatefulSetStatus)(nil), (*v1beta2.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(a.(*apps.StatefulSetStatus), b.(*v1beta2.StatefulSetStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.RollingUpdateDaemonSet)(nil), (*v1beta2.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(a.(*extensions.RollingUpdateDaemonSet), b.(*v1beta2.RollingUpdateDaemonSet), scope) + if err := s.AddConversionFunc((*apps.StatefulSetUpdateStrategy)(nil), (*v1beta2.StatefulSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(a.(*apps.StatefulSetUpdateStrategy), b.(*v1beta2.StatefulSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1beta2.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1beta2.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta2.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta2.ScaleStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.DaemonSetSpec)(nil), (*extensions.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(a.(*v1beta2.DaemonSetSpec), b.(*extensions.DaemonSetSpec), scope) + if err := s.AddConversionFunc((*v1beta2.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*v1beta2.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.DaemonSetUpdateStrategy)(nil), (*extensions.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(a.(*v1beta2.DaemonSetUpdateStrategy), b.(*extensions.DaemonSetUpdateStrategy), scope) + if err := s.AddConversionFunc((*v1beta2.DaemonSetUpdateStrategy)(nil), (*apps.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(a.(*v1beta2.DaemonSetUpdateStrategy), b.(*apps.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.DaemonSet)(nil), (*extensions.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DaemonSet_To_extensions_DaemonSet(a.(*v1beta2.DaemonSet), b.(*extensions.DaemonSet), scope) + if err := s.AddConversionFunc((*v1beta2.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DaemonSet_To_apps_DaemonSet(a.(*v1beta2.DaemonSet), b.(*apps.DaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1beta2.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) + if err := s.AddConversionFunc((*v1beta2.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1beta2.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1beta2.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*v1beta2.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1beta2.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.Deployment)(nil), (*extensions.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_Deployment_To_extensions_Deployment(a.(*v1beta2.Deployment), b.(*extensions.Deployment), scope) + if err := s.AddConversionFunc((*v1beta2.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_Deployment_To_apps_Deployment(a.(*v1beta2.Deployment), b.(*apps.Deployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.ReplicaSetSpec)(nil), (*extensions.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(a.(*v1beta2.ReplicaSetSpec), b.(*extensions.ReplicaSetSpec), scope) + if err := s.AddConversionFunc((*v1beta2.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*v1beta2.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.RollingUpdateDaemonSet)(nil), (*extensions.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(a.(*v1beta2.RollingUpdateDaemonSet), b.(*extensions.RollingUpdateDaemonSet), scope) + if err := s.AddConversionFunc((*v1beta2.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*v1beta2.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta2.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1beta2.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*v1beta2.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1beta2.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } @@ -555,30 +554,30 @@ func Convert_apps_ControllerRevisionList_To_v1beta2_ControllerRevisionList(in *a return autoConvert_apps_ControllerRevisionList_To_v1beta2_ControllerRevisionList(in, out, s) } -func autoConvert_v1beta2_DaemonSet_To_extensions_DaemonSet(in *v1beta2.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { +func autoConvert_v1beta2_DaemonSet_To_apps_DaemonSet(in *v1beta2.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_extensions_DaemonSet_To_v1beta2_DaemonSet(in *extensions.DaemonSet, out *v1beta2.DaemonSet, s conversion.Scope) error { +func autoConvert_apps_DaemonSet_To_v1beta2_DaemonSet(in *apps.DaemonSet, out *v1beta2.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1beta2.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { - out.Type = extensions.DaemonSetConditionType(in.Type) +func autoConvert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition(in *v1beta2.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error { + out.Type = apps.DaemonSetConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -586,12 +585,12 @@ func autoConvert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition(in return nil } -// Convert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition is an autogenerated conversion function. -func Convert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1beta2.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { - return autoConvert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition(in, out, s) +// Convert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition is an autogenerated conversion function. +func Convert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition(in *v1beta2.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_v1beta2_DaemonSetCondition_To_apps_DaemonSetCondition(in, out, s) } -func autoConvert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1beta2.DaemonSetCondition, s conversion.Scope) error { +func autoConvert_apps_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in *apps.DaemonSetCondition, out *v1beta2.DaemonSetCondition, s conversion.Scope) error { out.Type = v1beta2.DaemonSetConditionType(in.Type) out.Status = v1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime @@ -600,18 +599,18 @@ func autoConvert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in return nil } -// Convert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition is an autogenerated conversion function. -func Convert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1beta2.DaemonSetCondition, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in, out, s) +// Convert_apps_DaemonSetCondition_To_v1beta2_DaemonSetCondition is an autogenerated conversion function. +func Convert_apps_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in *apps.DaemonSetCondition, out *v1beta2.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_apps_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in, out, s) } -func autoConvert_v1beta2_DaemonSetList_To_extensions_DaemonSetList(in *v1beta2.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { +func autoConvert_v1beta2_DaemonSetList_To_apps_DaemonSetList(in *v1beta2.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.DaemonSet, len(*in)) + *out = make([]apps.DaemonSet, len(*in)) for i := range *in { - if err := Convert_v1beta2_DaemonSet_To_extensions_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta2_DaemonSet_To_apps_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -621,18 +620,18 @@ func autoConvert_v1beta2_DaemonSetList_To_extensions_DaemonSetList(in *v1beta2.D return nil } -// Convert_v1beta2_DaemonSetList_To_extensions_DaemonSetList is an autogenerated conversion function. -func Convert_v1beta2_DaemonSetList_To_extensions_DaemonSetList(in *v1beta2.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { - return autoConvert_v1beta2_DaemonSetList_To_extensions_DaemonSetList(in, out, s) +// Convert_v1beta2_DaemonSetList_To_apps_DaemonSetList is an autogenerated conversion function. +func Convert_v1beta2_DaemonSetList_To_apps_DaemonSetList(in *v1beta2.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error { + return autoConvert_v1beta2_DaemonSetList_To_apps_DaemonSetList(in, out, s) } -func autoConvert_extensions_DaemonSetList_To_v1beta2_DaemonSetList(in *extensions.DaemonSetList, out *v1beta2.DaemonSetList, s conversion.Scope) error { +func autoConvert_apps_DaemonSetList_To_v1beta2_DaemonSetList(in *apps.DaemonSetList, out *v1beta2.DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta2.DaemonSet, len(*in)) for i := range *in { - if err := Convert_extensions_DaemonSet_To_v1beta2_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_DaemonSet_To_v1beta2_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -642,17 +641,17 @@ func autoConvert_extensions_DaemonSetList_To_v1beta2_DaemonSetList(in *extension return nil } -// Convert_extensions_DaemonSetList_To_v1beta2_DaemonSetList is an autogenerated conversion function. -func Convert_extensions_DaemonSetList_To_v1beta2_DaemonSetList(in *extensions.DaemonSetList, out *v1beta2.DaemonSetList, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetList_To_v1beta2_DaemonSetList(in, out, s) +// Convert_apps_DaemonSetList_To_v1beta2_DaemonSetList is an autogenerated conversion function. +func Convert_apps_DaemonSetList_To_v1beta2_DaemonSetList(in *apps.DaemonSetList, out *v1beta2.DaemonSetList, s conversion.Scope) error { + return autoConvert_apps_DaemonSetList_To_v1beta2_DaemonSetList(in, out, s) } -func autoConvert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta2.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { +func autoConvert_v1beta2_DaemonSetSpec_To_apps_DaemonSetSpec(in *v1beta2.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error { out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -660,12 +659,12 @@ func autoConvert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta2.D return nil } -func autoConvert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *extensions.DaemonSetSpec, out *v1beta2.DaemonSetSpec, s conversion.Scope) error { +func autoConvert_apps_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *apps.DaemonSetSpec, out *v1beta2.DaemonSetSpec, s conversion.Scope) error { out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -674,7 +673,7 @@ func autoConvert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *extension return nil } -func autoConvert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1beta2.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { +func autoConvert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(in *v1beta2.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled @@ -684,16 +683,16 @@ func autoConvert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1bet out.NumberAvailable = in.NumberAvailable out.NumberUnavailable = in.NumberUnavailable out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) - out.Conditions = *(*[]extensions.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus is an autogenerated conversion function. -func Convert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1beta2.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { - return autoConvert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(in, out, s) +// Convert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus is an autogenerated conversion function. +func Convert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(in *v1beta2.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_v1beta2_DaemonSetStatus_To_apps_DaemonSetStatus(in, out, s) } -func autoConvert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1beta2.DaemonSetStatus, s conversion.Scope) error { +func autoConvert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in *apps.DaemonSetStatus, out *v1beta2.DaemonSetStatus, s conversion.Scope) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled @@ -707,17 +706,17 @@ func autoConvert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in *exten return nil } -// Convert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus is an autogenerated conversion function. -func Convert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1beta2.DaemonSetStatus, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in, out, s) +// Convert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus is an autogenerated conversion function. +func Convert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in *apps.DaemonSetStatus, out *v1beta2.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_apps_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in, out, s) } -func autoConvert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *v1beta2.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) +func autoConvert_v1beta2_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *v1beta2.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = apps.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDaemonSet) - if err := Convert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(*in, *out, s); err != nil { + *out = new(apps.RollingUpdateDaemonSet) + if err := Convert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(*in, *out, s); err != nil { return err } } else { @@ -726,12 +725,12 @@ func autoConvert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateSt return nil } -func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *v1beta2.DaemonSetUpdateStrategy, s conversion.Scope) error { +func autoConvert_apps_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *v1beta2.DaemonSetUpdateStrategy, s conversion.Scope) error { out.Type = v1beta2.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(v1beta2.RollingUpdateDaemonSet) - if err := Convert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(*in, *out, s); err != nil { + if err := Convert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(*in, *out, s); err != nil { return err } } else { @@ -740,30 +739,30 @@ func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateSt return nil } -func autoConvert_v1beta2_Deployment_To_extensions_Deployment(in *v1beta2.Deployment, out *extensions.Deployment, s conversion.Scope) error { +func autoConvert_v1beta2_Deployment_To_apps_Deployment(in *v1beta2.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_extensions_Deployment_To_v1beta2_Deployment(in *extensions.Deployment, out *v1beta2.Deployment, s conversion.Scope) error { +func autoConvert_apps_Deployment_To_v1beta2_Deployment(in *apps.Deployment, out *v1beta2.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func autoConvert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta2.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - out.Type = extensions.DeploymentConditionType(in.Type) +func autoConvert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition(in *v1beta2.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + out.Type = apps.DeploymentConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime @@ -772,12 +771,12 @@ func autoConvert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(i return nil } -// Convert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition is an autogenerated conversion function. -func Convert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta2.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - return autoConvert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(in, out, s) +// Convert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition is an autogenerated conversion function. +func Convert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition(in *v1beta2.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + return autoConvert_v1beta2_DeploymentCondition_To_apps_DeploymentCondition(in, out, s) } -func autoConvert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta2.DeploymentCondition, s conversion.Scope) error { +func autoConvert_apps_DeploymentCondition_To_v1beta2_DeploymentCondition(in *apps.DeploymentCondition, out *v1beta2.DeploymentCondition, s conversion.Scope) error { out.Type = v1beta2.DeploymentConditionType(in.Type) out.Status = v1.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime @@ -787,18 +786,18 @@ func autoConvert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition(i return nil } -// Convert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition is an autogenerated conversion function. -func Convert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta2.DeploymentCondition, s conversion.Scope) error { - return autoConvert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition(in, out, s) +// Convert_apps_DeploymentCondition_To_v1beta2_DeploymentCondition is an autogenerated conversion function. +func Convert_apps_DeploymentCondition_To_v1beta2_DeploymentCondition(in *apps.DeploymentCondition, out *v1beta2.DeploymentCondition, s conversion.Scope) error { + return autoConvert_apps_DeploymentCondition_To_v1beta2_DeploymentCondition(in, out, s) } -func autoConvert_v1beta2_DeploymentList_To_extensions_DeploymentList(in *v1beta2.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { +func autoConvert_v1beta2_DeploymentList_To_apps_DeploymentList(in *v1beta2.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.Deployment, len(*in)) + *out = make([]apps.Deployment, len(*in)) for i := range *in { - if err := Convert_v1beta2_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta2_Deployment_To_apps_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -808,18 +807,18 @@ func autoConvert_v1beta2_DeploymentList_To_extensions_DeploymentList(in *v1beta2 return nil } -// Convert_v1beta2_DeploymentList_To_extensions_DeploymentList is an autogenerated conversion function. -func Convert_v1beta2_DeploymentList_To_extensions_DeploymentList(in *v1beta2.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { - return autoConvert_v1beta2_DeploymentList_To_extensions_DeploymentList(in, out, s) +// Convert_v1beta2_DeploymentList_To_apps_DeploymentList is an autogenerated conversion function. +func Convert_v1beta2_DeploymentList_To_apps_DeploymentList(in *v1beta2.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { + return autoConvert_v1beta2_DeploymentList_To_apps_DeploymentList(in, out, s) } -func autoConvert_extensions_DeploymentList_To_v1beta2_DeploymentList(in *extensions.DeploymentList, out *v1beta2.DeploymentList, s conversion.Scope) error { +func autoConvert_apps_DeploymentList_To_v1beta2_DeploymentList(in *apps.DeploymentList, out *v1beta2.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta2.Deployment, len(*in)) for i := range *in { - if err := Convert_extensions_Deployment_To_v1beta2_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_Deployment_To_v1beta2_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -829,12 +828,12 @@ func autoConvert_extensions_DeploymentList_To_v1beta2_DeploymentList(in *extensi return nil } -// Convert_extensions_DeploymentList_To_v1beta2_DeploymentList is an autogenerated conversion function. -func Convert_extensions_DeploymentList_To_v1beta2_DeploymentList(in *extensions.DeploymentList, out *v1beta2.DeploymentList, s conversion.Scope) error { - return autoConvert_extensions_DeploymentList_To_v1beta2_DeploymentList(in, out, s) +// Convert_apps_DeploymentList_To_v1beta2_DeploymentList is an autogenerated conversion function. +func Convert_apps_DeploymentList_To_v1beta2_DeploymentList(in *apps.DeploymentList, out *v1beta2.DeploymentList, s conversion.Scope) error { + return autoConvert_apps_DeploymentList_To_v1beta2_DeploymentList(in, out, s) } -func autoConvert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta2.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func autoConvert_v1beta2_DeploymentSpec_To_apps_DeploymentSpec(in *v1beta2.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -842,7 +841,7 @@ func autoConvert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta2 if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -852,7 +851,7 @@ func autoConvert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta2 return nil } -func autoConvert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(in *extensions.DeploymentSpec, out *v1beta2.DeploymentSpec, s conversion.Scope) error { +func autoConvert_apps_DeploymentSpec_To_v1beta2_DeploymentSpec(in *apps.DeploymentSpec, out *v1beta2.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -860,7 +859,7 @@ func autoConvert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(in *extensi if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -871,24 +870,24 @@ func autoConvert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(in *extensi return nil } -func autoConvert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta2.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { +func autoConvert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(in *v1beta2.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.UnavailableReplicas = in.UnavailableReplicas - out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) return nil } -// Convert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus is an autogenerated conversion function. -func Convert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta2.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { - return autoConvert_v1beta2_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s) +// Convert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus is an autogenerated conversion function. +func Convert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(in *v1beta2.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { + return autoConvert_v1beta2_DeploymentStatus_To_apps_DeploymentStatus(in, out, s) } -func autoConvert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta2.DeploymentStatus, s conversion.Scope) error { +func autoConvert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(in *apps.DeploymentStatus, out *v1beta2.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas @@ -900,17 +899,17 @@ func autoConvert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(in *ext return nil } -// Convert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus is an autogenerated conversion function. -func Convert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta2.DeploymentStatus, s conversion.Scope) error { - return autoConvert_extensions_DeploymentStatus_To_v1beta2_DeploymentStatus(in, out, s) +// Convert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus is an autogenerated conversion function. +func Convert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(in *apps.DeploymentStatus, out *v1beta2.DeploymentStatus, s conversion.Scope) error { + return autoConvert_apps_DeploymentStatus_To_v1beta2_DeploymentStatus(in, out, s) } -func autoConvert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(in *v1beta2.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func autoConvert_v1beta2_DeploymentStrategy_To_apps_DeploymentStrategy(in *v1beta2.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil { + *out = new(apps.RollingUpdateDeployment) + if err := Convert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -919,12 +918,12 @@ func autoConvert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(in return nil } -func autoConvert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *extensions.DeploymentStrategy, out *v1beta2.DeploymentStrategy, s conversion.Scope) error { +func autoConvert_apps_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in *apps.DeploymentStrategy, out *v1beta2.DeploymentStrategy, s conversion.Scope) error { out.Type = v1beta2.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(v1beta2.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(*in, *out, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -933,40 +932,40 @@ func autoConvert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(in return nil } -func autoConvert_v1beta2_ReplicaSet_To_extensions_ReplicaSet(in *v1beta2.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { +func autoConvert_v1beta2_ReplicaSet_To_apps_ReplicaSet(in *v1beta2.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1beta2_ReplicaSet_To_extensions_ReplicaSet is an autogenerated conversion function. -func Convert_v1beta2_ReplicaSet_To_extensions_ReplicaSet(in *v1beta2.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { - return autoConvert_v1beta2_ReplicaSet_To_extensions_ReplicaSet(in, out, s) +// Convert_v1beta2_ReplicaSet_To_apps_ReplicaSet is an autogenerated conversion function. +func Convert_v1beta2_ReplicaSet_To_apps_ReplicaSet(in *v1beta2.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error { + return autoConvert_v1beta2_ReplicaSet_To_apps_ReplicaSet(in, out, s) } -func autoConvert_extensions_ReplicaSet_To_v1beta2_ReplicaSet(in *extensions.ReplicaSet, out *v1beta2.ReplicaSet, s conversion.Scope) error { +func autoConvert_apps_ReplicaSet_To_v1beta2_ReplicaSet(in *apps.ReplicaSet, out *v1beta2.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_extensions_ReplicaSet_To_v1beta2_ReplicaSet is an autogenerated conversion function. -func Convert_extensions_ReplicaSet_To_v1beta2_ReplicaSet(in *extensions.ReplicaSet, out *v1beta2.ReplicaSet, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSet_To_v1beta2_ReplicaSet(in, out, s) +// Convert_apps_ReplicaSet_To_v1beta2_ReplicaSet is an autogenerated conversion function. +func Convert_apps_ReplicaSet_To_v1beta2_ReplicaSet(in *apps.ReplicaSet, out *v1beta2.ReplicaSet, s conversion.Scope) error { + return autoConvert_apps_ReplicaSet_To_v1beta2_ReplicaSet(in, out, s) } -func autoConvert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1beta2.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { - out.Type = extensions.ReplicaSetConditionType(in.Type) +func autoConvert_v1beta2_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *v1beta2.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error { + out.Type = apps.ReplicaSetConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -974,12 +973,12 @@ func autoConvert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition(i return nil } -// Convert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition is an autogenerated conversion function. -func Convert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1beta2.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { - return autoConvert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in, out, s) +// Convert_v1beta2_ReplicaSetCondition_To_apps_ReplicaSetCondition is an autogenerated conversion function. +func Convert_v1beta2_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *v1beta2.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_v1beta2_ReplicaSetCondition_To_apps_ReplicaSetCondition(in, out, s) } -func autoConvert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1beta2.ReplicaSetCondition, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *v1beta2.ReplicaSetCondition, s conversion.Scope) error { out.Type = v1beta2.ReplicaSetConditionType(in.Type) out.Status = v1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime @@ -988,18 +987,18 @@ func autoConvert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(i return nil } -// Convert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition is an autogenerated conversion function. -func Convert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1beta2.ReplicaSetCondition, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in, out, s) +// Convert_apps_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition is an autogenerated conversion function. +func Convert_apps_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *v1beta2.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in, out, s) } -func autoConvert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList(in *v1beta2.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { +func autoConvert_v1beta2_ReplicaSetList_To_apps_ReplicaSetList(in *v1beta2.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.ReplicaSet, len(*in)) + *out = make([]apps.ReplicaSet, len(*in)) for i := range *in { - if err := Convert_v1beta2_ReplicaSet_To_extensions_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta2_ReplicaSet_To_apps_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1009,18 +1008,18 @@ func autoConvert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList(in *v1beta2 return nil } -// Convert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList is an autogenerated conversion function. -func Convert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList(in *v1beta2.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { - return autoConvert_v1beta2_ReplicaSetList_To_extensions_ReplicaSetList(in, out, s) +// Convert_v1beta2_ReplicaSetList_To_apps_ReplicaSetList is an autogenerated conversion function. +func Convert_v1beta2_ReplicaSetList_To_apps_ReplicaSetList(in *v1beta2.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error { + return autoConvert_v1beta2_ReplicaSetList_To_apps_ReplicaSetList(in, out, s) } -func autoConvert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList(in *extensions.ReplicaSetList, out *v1beta2.ReplicaSetList, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetList_To_v1beta2_ReplicaSetList(in *apps.ReplicaSetList, out *v1beta2.ReplicaSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta2.ReplicaSet, len(*in)) for i := range *in { - if err := Convert_extensions_ReplicaSet_To_v1beta2_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_ReplicaSet_To_v1beta2_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1030,12 +1029,12 @@ func autoConvert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList(in *extensi return nil } -// Convert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList is an autogenerated conversion function. -func Convert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList(in *extensions.ReplicaSetList, out *v1beta2.ReplicaSetList, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList(in, out, s) +// Convert_apps_ReplicaSetList_To_v1beta2_ReplicaSetList is an autogenerated conversion function. +func Convert_apps_ReplicaSetList_To_v1beta2_ReplicaSetList(in *apps.ReplicaSetList, out *v1beta2.ReplicaSetList, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetList_To_v1beta2_ReplicaSetList(in, out, s) } -func autoConvert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1beta2.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { +func autoConvert_v1beta2_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *v1beta2.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1047,7 +1046,7 @@ func autoConvert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1beta2 return nil } -func autoConvert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *v1beta2.ReplicaSetSpec, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *v1beta2.ReplicaSetSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1059,22 +1058,22 @@ func autoConvert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *extensi return nil } -func autoConvert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1beta2.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { +func autoConvert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *v1beta2.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]extensions.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus is an autogenerated conversion function. -func Convert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1beta2.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_v1beta2_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in, out, s) +// Convert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus is an autogenerated conversion function. +func Convert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *v1beta2.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_v1beta2_ReplicaSetStatus_To_apps_ReplicaSetStatus(in, out, s) } -func autoConvert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1beta2.ReplicaSetStatus, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *v1beta2.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas @@ -1084,28 +1083,28 @@ func autoConvert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *ext return nil } -// Convert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus is an autogenerated conversion function. -func Convert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1beta2.ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in, out, s) +// Convert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus is an autogenerated conversion function. +func Convert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *v1beta2.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetStatus_To_v1beta2_ReplicaSetStatus(in, out, s) } -func autoConvert_v1beta2_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *v1beta2.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { +func autoConvert_v1beta2_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *v1beta2.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_extensions_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *v1beta2.RollingUpdateDaemonSet, s conversion.Scope) error { +func autoConvert_apps_RollingUpdateDaemonSet_To_v1beta2_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *v1beta2.RollingUpdateDaemonSet, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_v1beta2_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *v1beta2.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_v1beta2_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *v1beta2.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *v1beta2.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_apps_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *v1beta2.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/BUILD new file mode 100644 index 000000000000..507558c158ab --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/BUILD @@ -0,0 +1,54 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/apps/validation", + deps = [ + "//pkg/apis/apps:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/validation:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["validation_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/apis/apps:go_default_library", + "//pkg/apis/core:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go new file mode 100644 index 000000000000..7cdf4c2dc27f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go @@ -0,0 +1,671 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "strconv" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/apps" + api "k8s.io/kubernetes/pkg/apis/core" + apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" +) + +// ValidateStatefulSetName can be used to check whether the given StatefulSet name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +func ValidateStatefulSetName(name string, prefix bool) []string { + // TODO: Validate that there's name for the suffix inserted by the pods. + // Currently this is just "-index". In the future we may allow a user + // specified list of suffixes and we need to validate the longest one. + return apimachineryvalidation.NameIsDNSSubdomain(name, prefix) +} + +// Validates the given template and ensures that it is in accordance with the desired selector. +func ValidatePodTemplateSpecForStatefulSet(template *api.PodTemplateSpec, selector labels.Selector, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if template == nil { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else { + if !selector.Empty() { + // Verify that the StatefulSet selector matches the labels in template. + labels := labels.Set(template.Labels) + if !selector.Matches(labels) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`")) + } + } + // TODO: Add validation for PodSpec, currently this will check volumes, which we know will + // fail. We should really check that the union of the given volumes and volumeClaims match + // volume mounts in the containers. + // allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath)...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(template.Labels, fldPath.Child("labels"))...) + allErrs = append(allErrs, apivalidation.ValidateAnnotations(template.Annotations, fldPath.Child("annotations"))...) + allErrs = append(allErrs, apivalidation.ValidatePodSpecificAnnotations(template.Annotations, &template.Spec, fldPath.Child("annotations"))...) + } + return allErrs +} + +// ValidateStatefulSetSpec tests if required fields in the StatefulSet spec are set. +func ValidateStatefulSetSpec(spec *apps.StatefulSetSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + switch spec.PodManagementPolicy { + case "": + allErrs = append(allErrs, field.Required(fldPath.Child("podManagementPolicy"), "")) + case apps.OrderedReadyPodManagement, apps.ParallelPodManagement: + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("podManagementPolicy"), spec.PodManagementPolicy, fmt.Sprintf("must be '%s' or '%s'", apps.OrderedReadyPodManagement, apps.ParallelPodManagement))) + } + + switch spec.UpdateStrategy.Type { + case "": + allErrs = append(allErrs, field.Required(fldPath.Child("updateStrategy"), "")) + case apps.OnDeleteStatefulSetStrategyType: + if spec.UpdateStrategy.RollingUpdate != nil { + allErrs = append( + allErrs, + field.Invalid( + fldPath.Child("updateStrategy").Child("rollingUpdate"), + spec.UpdateStrategy.RollingUpdate, + fmt.Sprintf("only allowed for updateStrategy '%s'", apps.RollingUpdateStatefulSetStrategyType))) + } + case apps.RollingUpdateStatefulSetStrategyType: + if spec.UpdateStrategy.RollingUpdate != nil { + allErrs = append(allErrs, + apivalidation.ValidateNonnegativeField( + int64(spec.UpdateStrategy.RollingUpdate.Partition), + fldPath.Child("updateStrategy").Child("rollingUpdate").Child("partition"))...) + } + default: + allErrs = append(allErrs, + field.Invalid(fldPath.Child("updateStrategy"), spec.UpdateStrategy, + fmt.Sprintf("must be '%s' or '%s'", + apps.RollingUpdateStatefulSetStrategyType, + apps.OnDeleteStatefulSetStrategyType))) + } + + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) + if spec.Selector == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("selector"), "")) + } else { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) + if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for statefulset")) + } + } + + selector, err := metav1.LabelSelectorAsSelector(spec.Selector) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "")) + } else { + allErrs = append(allErrs, ValidatePodTemplateSpecForStatefulSet(&spec.Template, selector, fldPath.Child("template"))...) + } + + if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) + } + if spec.Template.Spec.ActiveDeadlineSeconds != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "spec", "activeDeadlineSeconds"), spec.Template.Spec.ActiveDeadlineSeconds, "must not be specified")) + } + + return allErrs +} + +// ValidateStatefulSet validates a StatefulSet. +func ValidateStatefulSet(statefulSet *apps.StatefulSet) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&statefulSet.ObjectMeta, true, ValidateStatefulSetName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateStatefulSetSpec(&statefulSet.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateStatefulSetUpdate tests if required fields in the StatefulSet are set. +func ValidateStatefulSetUpdate(statefulSet, oldStatefulSet *apps.StatefulSet) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&statefulSet.ObjectMeta, &oldStatefulSet.ObjectMeta, field.NewPath("metadata")) + + restoreReplicas := statefulSet.Spec.Replicas + statefulSet.Spec.Replicas = oldStatefulSet.Spec.Replicas + + restoreTemplate := statefulSet.Spec.Template + statefulSet.Spec.Template = oldStatefulSet.Spec.Template + + restoreStrategy := statefulSet.Spec.UpdateStrategy + statefulSet.Spec.UpdateStrategy = oldStatefulSet.Spec.UpdateStrategy + + if !apiequality.Semantic.DeepEqual(statefulSet.Spec, oldStatefulSet.Spec) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden")) + } + statefulSet.Spec.Replicas = restoreReplicas + statefulSet.Spec.Template = restoreTemplate + statefulSet.Spec.UpdateStrategy = restoreStrategy + + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(statefulSet.Spec.Replicas), field.NewPath("spec", "replicas"))...) + return allErrs +} + +// ValidateStatefulSetStatus validates a StatefulSetStatus. +func ValidateStatefulSetStatus(status *apps.StatefulSetStatus, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fieldPath.Child("replicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ReadyReplicas), fieldPath.Child("readyReplicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentReplicas), fieldPath.Child("currentReplicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedReplicas), fieldPath.Child("updatedReplicas"))...) + if status.ObservedGeneration != nil { + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.ObservedGeneration), fieldPath.Child("observedGeneration"))...) + } + if status.CollisionCount != nil { + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.CollisionCount), fieldPath.Child("collisionCount"))...) + } + + msg := "cannot be greater than status.replicas" + if status.ReadyReplicas > status.Replicas { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("readyReplicas"), status.ReadyReplicas, msg)) + } + if status.CurrentReplicas > status.Replicas { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("currentReplicas"), status.CurrentReplicas, msg)) + } + if status.UpdatedReplicas > status.Replicas { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("updatedReplicas"), status.UpdatedReplicas, msg)) + } + + return allErrs +} + +// ValidateStatefulSetStatusUpdate tests if required fields in the StatefulSet are set. +func ValidateStatefulSetStatusUpdate(statefulSet, oldStatefulSet *apps.StatefulSet) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateStatefulSetStatus(&statefulSet.Status, field.NewPath("status"))...) + allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&statefulSet.ObjectMeta, &oldStatefulSet.ObjectMeta, field.NewPath("metadata"))...) + // TODO: Validate status. + if apivalidation.IsDecremented(statefulSet.Status.CollisionCount, oldStatefulSet.Status.CollisionCount) { + value := int32(0) + if statefulSet.Status.CollisionCount != nil { + value = *statefulSet.Status.CollisionCount + } + allErrs = append(allErrs, field.Invalid(field.NewPath("status").Child("collisionCount"), value, "cannot be decremented")) + } + return allErrs +} + +// ValidateControllerRevisionName can be used to check whether the given ControllerRevision name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateControllerRevisionName = apimachineryvalidation.NameIsDNSSubdomain + +// ValidateControllerRevision collects errors for the fields of state and returns those errors as an ErrorList. If the +// returned list is empty, state is valid. Validation is performed to ensure that state is a valid ObjectMeta, its name +// is valid, and that it doesn't exceed the MaxControllerRevisionSize. +func ValidateControllerRevision(revision *apps.ControllerRevision) field.ErrorList { + errs := field.ErrorList{} + + errs = append(errs, apivalidation.ValidateObjectMeta(&revision.ObjectMeta, true, ValidateControllerRevisionName, field.NewPath("metadata"))...) + if revision.Data == nil { + errs = append(errs, field.Required(field.NewPath("data"), "data is mandatory")) + } + errs = append(errs, apivalidation.ValidateNonnegativeField(revision.Revision, field.NewPath("revision"))...) + return errs +} + +// ValidateControllerRevisionUpdate collects errors pertaining to the mutation of an ControllerRevision Object. If the +// returned ErrorList is empty the update operation is valid. Any mutation to the ControllerRevision's Data or Revision +// is considered to be invalid. +func ValidateControllerRevisionUpdate(newHistory, oldHistory *apps.ControllerRevision) field.ErrorList { + errs := field.ErrorList{} + + errs = append(errs, apivalidation.ValidateObjectMetaUpdate(&newHistory.ObjectMeta, &oldHistory.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, ValidateControllerRevision(newHistory)...) + errs = append(errs, apivalidation.ValidateImmutableField(newHistory.Data, oldHistory.Data, field.NewPath("data"))...) + return errs +} + +// ValidateDaemonSet tests if required fields in the DaemonSet are set. +func ValidateDaemonSet(ds *apps.DaemonSet) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&ds.ObjectMeta, true, ValidateDaemonSetName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateDaemonSetUpdate tests if required fields in the DaemonSet are set. +func ValidateDaemonSetUpdate(ds, oldDS *apps.DaemonSet) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateDaemonSetSpecUpdate(&ds.Spec, &oldDS.Spec, field.NewPath("spec"))...) + allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...) + return allErrs +} + +func ValidateDaemonSetSpecUpdate(newSpec, oldSpec *apps.DaemonSetSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + // TemplateGeneration shouldn't be decremented + if newSpec.TemplateGeneration < oldSpec.TemplateGeneration { + allErrs = append(allErrs, field.Invalid(fldPath.Child("templateGeneration"), newSpec.TemplateGeneration, "must not be decremented")) + } + + // TemplateGeneration should be increased when and only when template is changed + templateUpdated := !apiequality.Semantic.DeepEqual(newSpec.Template, oldSpec.Template) + if newSpec.TemplateGeneration == oldSpec.TemplateGeneration && templateUpdated { + allErrs = append(allErrs, field.Invalid(fldPath.Child("templateGeneration"), newSpec.TemplateGeneration, "must be incremented upon template update")) + } else if newSpec.TemplateGeneration > oldSpec.TemplateGeneration && !templateUpdated { + allErrs = append(allErrs, field.Invalid(fldPath.Child("templateGeneration"), newSpec.TemplateGeneration, "must not be incremented without template update")) + } + + return allErrs +} + +// validateDaemonSetStatus validates a DaemonSetStatus +func validateDaemonSetStatus(status *apps.DaemonSetStatus, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentNumberScheduled), fldPath.Child("currentNumberScheduled"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberMisscheduled), fldPath.Child("numberMisscheduled"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredNumberScheduled), fldPath.Child("desiredNumberScheduled"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberReady), fldPath.Child("numberReady"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedNumberScheduled), fldPath.Child("updatedNumberScheduled"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberAvailable), fldPath.Child("numberAvailable"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberUnavailable), fldPath.Child("numberUnavailable"))...) + if status.CollisionCount != nil { + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.CollisionCount), fldPath.Child("collisionCount"))...) + } + return allErrs +} + +// ValidateDaemonSetStatus validates tests if required fields in the DaemonSet Status section +func ValidateDaemonSetStatusUpdate(ds, oldDS *apps.DaemonSet) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, validateDaemonSetStatus(&ds.Status, field.NewPath("status"))...) + if apivalidation.IsDecremented(ds.Status.CollisionCount, oldDS.Status.CollisionCount) { + value := int32(0) + if ds.Status.CollisionCount != nil { + value = *ds.Status.CollisionCount + } + allErrs = append(allErrs, field.Invalid(field.NewPath("status").Child("collisionCount"), value, "cannot be decremented")) + } + return allErrs +} + +// ValidateDaemonSetSpec tests if required fields in the DaemonSetSpec are set. +func ValidateDaemonSetSpec(spec *apps.DaemonSetSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) + + selector, err := metav1.LabelSelectorAsSelector(spec.Selector) + if err == nil && !selector.Matches(labels.Set(spec.Template.Labels)) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "metadata", "labels"), spec.Template.Labels, "`selector` does not match template `labels`")) + } + if spec.Selector != nil && len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for daemonset")) + } + + allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(&spec.Template, fldPath.Child("template"))...) + // Daemons typically run on more than one node, so mark Read-Write persistent disks as invalid. + allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(spec.Template.Spec.Volumes, fldPath.Child("template", "spec", "volumes"))...) + // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). + if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) + } + if spec.Template.Spec.ActiveDeadlineSeconds != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "spec", "activeDeadlineSeconds"), spec.Template.Spec.ActiveDeadlineSeconds, "must not be specified")) + } + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.TemplateGeneration), fldPath.Child("templateGeneration"))...) + + allErrs = append(allErrs, ValidateDaemonSetUpdateStrategy(&spec.UpdateStrategy, fldPath.Child("updateStrategy"))...) + if spec.RevisionHistoryLimit != nil { + // zero is a valid RevisionHistoryLimit + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.RevisionHistoryLimit), fldPath.Child("revisionHistoryLimit"))...) + } + return allErrs +} + +func ValidateRollingUpdateDaemonSet(rollingUpdate *apps.RollingUpdateDaemonSet, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) + if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 { + // MaxUnavailable cannot be 0. + allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "cannot be 0")) + } + // Validate that MaxUnavailable is not more than 100%. + allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) + return allErrs +} + +func ValidateDaemonSetUpdateStrategy(strategy *apps.DaemonSetUpdateStrategy, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + switch strategy.Type { + case apps.OnDeleteDaemonSetStrategyType: + case apps.RollingUpdateDaemonSetStrategyType: + // Make sure RollingUpdate field isn't nil. + if strategy.RollingUpdate == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("rollingUpdate"), "")) + return allErrs + } + allErrs = append(allErrs, ValidateRollingUpdateDaemonSet(strategy.RollingUpdate, fldPath.Child("rollingUpdate"))...) + default: + validValues := []string{string(apps.RollingUpdateDaemonSetStrategyType), string(apps.OnDeleteDaemonSetStrategyType)} + allErrs = append(allErrs, field.NotSupported(fldPath, strategy, validValues)) + } + return allErrs +} + +// ValidateDaemonSetName can be used to check whether the given daemon set name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateDaemonSetName = apimachineryvalidation.NameIsDNSSubdomain + +// Validates that the given name can be used as a deployment name. +var ValidateDeploymentName = apimachineryvalidation.NameIsDNSSubdomain + +func ValidatePositiveIntOrPercent(intOrPercent intstr.IntOrString, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + switch intOrPercent.Type { + case intstr.String: + for _, msg := range validation.IsValidPercent(intOrPercent.StrVal) { + allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, msg)) + } + case intstr.Int: + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(intOrPercent.IntValue()), fldPath)...) + default: + allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, "must be an integer or percentage (e.g '5%%')")) + } + return allErrs +} + +func getPercentValue(intOrStringValue intstr.IntOrString) (int, bool) { + if intOrStringValue.Type != intstr.String { + return 0, false + } + if len(validation.IsValidPercent(intOrStringValue.StrVal)) != 0 { + return 0, false + } + value, _ := strconv.Atoi(intOrStringValue.StrVal[:len(intOrStringValue.StrVal)-1]) + return value, true +} + +func getIntOrPercentValue(intOrStringValue intstr.IntOrString) int { + value, isPercent := getPercentValue(intOrStringValue) + if isPercent { + return value + } + return intOrStringValue.IntValue() +} + +func IsNotMoreThan100Percent(intOrStringValue intstr.IntOrString, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + value, isPercent := getPercentValue(intOrStringValue) + if !isPercent || value <= 100 { + return nil + } + allErrs = append(allErrs, field.Invalid(fldPath, intOrStringValue, "must not be greater than 100%")) + return allErrs +} + +func ValidateRollingUpdateDeployment(rollingUpdate *apps.RollingUpdateDeployment, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) + allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...) + if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 && getIntOrPercentValue(rollingUpdate.MaxSurge) == 0 { + // Both MaxSurge and MaxUnavailable cannot be zero. + allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "may not be 0 when `maxSurge` is 0")) + } + // Validate that MaxUnavailable is not more than 100%. + allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) + return allErrs +} + +func ValidateDeploymentStrategy(strategy *apps.DeploymentStrategy, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + switch strategy.Type { + case apps.RecreateDeploymentStrategyType: + if strategy.RollingUpdate != nil { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("rollingUpdate"), "may not be specified when strategy `type` is '"+string(apps.RecreateDeploymentStrategyType+"'"))) + } + case apps.RollingUpdateDeploymentStrategyType: + // This should never happen since it's set and checked in defaults.go + if strategy.RollingUpdate == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("rollingUpdate"), "this should be defaulted and never be nil")) + } else { + allErrs = append(allErrs, ValidateRollingUpdateDeployment(strategy.RollingUpdate, fldPath.Child("rollingUpdate"))...) + } + default: + validValues := []string{string(apps.RecreateDeploymentStrategyType), string(apps.RollingUpdateDeploymentStrategyType)} + allErrs = append(allErrs, field.NotSupported(fldPath, strategy, validValues)) + } + return allErrs +} + +func ValidateRollback(rollback *apps.RollbackConfig, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + v := rollback.Revision + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(v), fldPath.Child("version"))...) + return allErrs +} + +// Validates given deployment spec. +func ValidateDeploymentSpec(spec *apps.DeploymentSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) + + if spec.Selector == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("selector"), "")) + } else { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) + if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for deployment")) + } + } + + selector, err := metav1.LabelSelectorAsSelector(spec.Selector) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector")) + } else { + allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...) + } + + allErrs = append(allErrs, ValidateDeploymentStrategy(&spec.Strategy, fldPath.Child("strategy"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) + if spec.RevisionHistoryLimit != nil { + // zero is a valid RevisionHistoryLimit + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.RevisionHistoryLimit), fldPath.Child("revisionHistoryLimit"))...) + } + if spec.RollbackTo != nil { + allErrs = append(allErrs, ValidateRollback(spec.RollbackTo, fldPath.Child("rollback"))...) + } + if spec.ProgressDeadlineSeconds != nil { + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.ProgressDeadlineSeconds), fldPath.Child("progressDeadlineSeconds"))...) + if *spec.ProgressDeadlineSeconds <= spec.MinReadySeconds { + allErrs = append(allErrs, field.Invalid(fldPath.Child("progressDeadlineSeconds"), spec.ProgressDeadlineSeconds, "must be greater than minReadySeconds")) + } + } + return allErrs +} + +// Validates given deployment status. +func ValidateDeploymentStatus(status *apps.DeploymentStatus, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fldPath.Child("replicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedReplicas), fldPath.Child("updatedReplicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ReadyReplicas), fldPath.Child("readyReplicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.AvailableReplicas), fldPath.Child("availableReplicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UnavailableReplicas), fldPath.Child("unavailableReplicas"))...) + if status.CollisionCount != nil { + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.CollisionCount), fldPath.Child("collisionCount"))...) + } + msg := "cannot be greater than status.replicas" + if status.UpdatedReplicas > status.Replicas { + allErrs = append(allErrs, field.Invalid(fldPath.Child("updatedReplicas"), status.UpdatedReplicas, msg)) + } + if status.ReadyReplicas > status.Replicas { + allErrs = append(allErrs, field.Invalid(fldPath.Child("readyReplicas"), status.ReadyReplicas, msg)) + } + if status.AvailableReplicas > status.Replicas { + allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, msg)) + } + if status.AvailableReplicas > status.ReadyReplicas { + allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas")) + } + return allErrs +} + +func ValidateDeploymentUpdate(update, old *apps.Deployment) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateDeploymentSpec(&update.Spec, field.NewPath("spec"))...) + return allErrs +} + +func ValidateDeploymentStatusUpdate(update, old *apps.Deployment) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) + fldPath := field.NewPath("status") + allErrs = append(allErrs, ValidateDeploymentStatus(&update.Status, fldPath)...) + if apivalidation.IsDecremented(update.Status.CollisionCount, old.Status.CollisionCount) { + value := int32(0) + if update.Status.CollisionCount != nil { + value = *update.Status.CollisionCount + } + allErrs = append(allErrs, field.Invalid(fldPath.Child("collisionCount"), value, "cannot be decremented")) + } + return allErrs +} + +func ValidateDeployment(obj *apps.Deployment) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, ValidateDeploymentName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateDeploymentSpec(&obj.Spec, field.NewPath("spec"))...) + return allErrs +} + +func ValidateDeploymentRollback(obj *apps.DeploymentRollback) field.ErrorList { + allErrs := apivalidation.ValidateAnnotations(obj.UpdatedAnnotations, field.NewPath("updatedAnnotations")) + if len(obj.Name) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("name"), "name is required")) + } + allErrs = append(allErrs, ValidateRollback(&obj.RollbackTo, field.NewPath("rollback"))...) + return allErrs +} + +// ValidateReplicaSetName can be used to check whether the given ReplicaSet +// name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateReplicaSetName = apimachineryvalidation.NameIsDNSSubdomain + +// ValidateReplicaSet tests if required fields in the ReplicaSet are set. +func ValidateReplicaSet(rs *apps.ReplicaSet) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&rs.ObjectMeta, true, ValidateReplicaSetName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateReplicaSetUpdate tests if required fields in the ReplicaSet are set. +func ValidateReplicaSetUpdate(rs, oldRs *apps.ReplicaSet) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateReplicaSetStatusUpdate tests if required fields in the ReplicaSet are set. +func ValidateReplicaSetStatusUpdate(rs, oldRs *apps.ReplicaSet) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, ValidateReplicaSetStatus(rs.Status, field.NewPath("status"))...) + return allErrs +} + +func ValidateReplicaSetStatus(status apps.ReplicaSetStatus, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fldPath.Child("replicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.FullyLabeledReplicas), fldPath.Child("fullyLabeledReplicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ReadyReplicas), fldPath.Child("readyReplicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.AvailableReplicas), fldPath.Child("availableReplicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ObservedGeneration), fldPath.Child("observedGeneration"))...) + msg := "cannot be greater than status.replicas" + if status.FullyLabeledReplicas > status.Replicas { + allErrs = append(allErrs, field.Invalid(fldPath.Child("fullyLabeledReplicas"), status.FullyLabeledReplicas, msg)) + } + if status.ReadyReplicas > status.Replicas { + allErrs = append(allErrs, field.Invalid(fldPath.Child("readyReplicas"), status.ReadyReplicas, msg)) + } + if status.AvailableReplicas > status.Replicas { + allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, msg)) + } + if status.AvailableReplicas > status.ReadyReplicas { + allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas")) + } + return allErrs +} + +// ValidateReplicaSetSpec tests if required fields in the ReplicaSet spec are set. +func ValidateReplicaSetSpec(spec *apps.ReplicaSetSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) + + if spec.Selector == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("selector"), "")) + } else { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) + if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for deployment")) + } + } + + selector, err := metav1.LabelSelectorAsSelector(spec.Selector) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector")) + } else { + allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...) + } + return allErrs +} + +// Validates the given template and ensures that it is in accordance with the desired selector and replicas. +func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int32, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if template == nil { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else { + if !selector.Empty() { + // Verify that the ReplicaSet selector matches the labels in template. + labels := labels.Set(template.Labels) + if !selector.Matches(labels) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`")) + } + } + allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath)...) + if replicas > 1 { + allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...) + } + // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). + if template.Spec.RestartPolicy != api.RestartPolicyAlways { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) + } + if template.Spec.ActiveDeadlineSeconds != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("spec", "activeDeadlineSeconds"), template.Spec.ActiveDeadlineSeconds, "must not be specified")) + } + } + return allErrs +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go index ac64c5c12e6e..4b4e0e6634f7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go @@ -88,6 +88,534 @@ func (in *ControllerRevisionList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet. +func (in *DaemonSet) DeepCopy() *DaemonSet { + if in == nil { + return nil + } + out := new(DaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList. +func (in *DaemonSetList) DeepCopy() *DaemonSetList { + if in == nil { + return nil + } + out := new(DaemonSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec. +func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec { + if in == nil { + return nil + } + out := new(DaemonSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus. +func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus { + if in == nil { + return nil + } + out := new(DaemonSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy. +func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { + if in == nil { + return nil + } + out := new(DaemonSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployment) DeepCopyInto(out *Deployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. +func (in *Deployment) DeepCopy() *Deployment { + if in == nil { + return nil + } + out := new(Deployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Deployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. +func (in *DeploymentList) DeepCopy() *DeploymentList { + if in == nil { + return nil + } + out := new(DeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.RollbackTo = in.RollbackTo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback. +func (in *DeploymentRollback) DeepCopy() *DeploymentRollback { + if in == nil { + return nil + } + out := new(DeploymentRollback) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentRollback) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.RollbackTo != nil { + in, out := &in.RollbackTo, &out.RollbackTo + *out = new(RollbackConfig) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. +func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { + if in == nil { + return nil + } + out := new(DeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. +func (in *ReplicaSet) DeepCopy() *ReplicaSet { + if in == nil { + return nil + } + out := new(ReplicaSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. +func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { + if in == nil { + return nil + } + out := new(ReplicaSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. +func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { + if in == nil { + return nil + } + out := new(ReplicaSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. +func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { + if in == nil { + return nil + } + out := new(ReplicaSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. +func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { + if in == nil { + return nil + } + out := new(ReplicaSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig. +func (in *RollbackConfig) DeepCopy() *RollbackConfig { + if in == nil { + return nil + } + out := new(RollbackConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { + *out = *in + out.MaxUnavailable = in.MaxUnavailable + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet. +func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { + if in == nil { + return nil + } + out := new(RollingUpdateDaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { + *out = *in + out.MaxUnavailable = in.MaxUnavailable + out.MaxSurge = in.MaxSurge + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. +func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { + if in == nil { + return nil + } + out := new(RollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { *out = *in diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/BUILD new file mode 100644 index 000000000000..65913a1c996f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/BUILD @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + "types.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/auditregistration", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/auditregistration/fuzzer:all-srcs", + "//pkg/apis/auditregistration/install:all-srcs", + "//pkg/apis/auditregistration/v1alpha1:all-srcs", + "//pkg/apis/auditregistration/validation:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/doc.go new file mode 100644 index 000000000000..ea2796e6aaf1 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=auditregistration.k8s.io + +package auditregistration diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/install/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/install/BUILD new file mode 100644 index 000000000000..09172ed23c97 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/install/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/auditregistration/install", + visibility = ["//visibility:public"], + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/auditregistration:go_default_library", + "//pkg/apis/auditregistration/v1alpha1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/install/install.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/install/install.go new file mode 100644 index 000000000000..ffb905cd131d --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/install/install.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install adds the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/auditregistration" + "k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1" +) + +func init() { + Install(legacyscheme.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(scheme *runtime.Scheme) { + utilruntime.Must(auditregistration.AddToScheme(scheme)) + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1alpha1.SchemeGroupVersion)) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/register.go new file mode 100644 index 000000000000..ebaa38109482 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auditregistration + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "auditregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder for audit registration + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme audit registration + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AuditSink{}, + &AuditSinkList{}, + ) + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/types.go new file mode 100644 index 000000000000..5ae4aa51336e --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/types.go @@ -0,0 +1,196 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true + +package auditregistration + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Level defines the amount of information logged during auditing +type Level string + +// Valid audit levels +const ( + // LevelNone disables auditing + LevelNone Level = "None" + // LevelMetadata provides the basic level of auditing. + LevelMetadata Level = "Metadata" + // LevelRequest provides Metadata level of auditing, and additionally + // logs the request object (does not apply for non-resource requests). + LevelRequest Level = "Request" + // LevelRequestResponse provides Request level of auditing, and additionally + // logs the response object (does not apply for non-resource requests and watches). + LevelRequestResponse Level = "RequestResponse" +) + +// Stage defines the stages in request handling during which audit events may be generated. +type Stage string + +// Valid audit stages. +const ( + // The stage for events generated after the audit handler receives the request, but before it + // is delegated down the handler chain. + StageRequestReceived = "RequestReceived" + // The stage for events generated after the response headers are sent, but before the response body + // is sent. This stage is only generated for long-running requests (e.g. watch). + StageResponseStarted = "ResponseStarted" + // The stage for events generated after the response body has been completed, and no more bytes + // will be sent. + StageResponseComplete = "ResponseComplete" + // The stage for events generated when a panic occurred. + StagePanic = "Panic" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuditSink represents a cluster level sink for audit data +type AuditSink struct { + metav1.TypeMeta + + // +optional + metav1.ObjectMeta + + // Spec defines the audit sink spec + Spec AuditSinkSpec +} + +// AuditSinkSpec is the spec for the audit sink object +type AuditSinkSpec struct { + // Policy defines the policy for selecting which events should be sent to the backend + // required + Policy Policy + + // Webhook to send events + // required + Webhook Webhook +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuditSinkList is a list of a audit sink items. +type AuditSinkList struct { + metav1.TypeMeta + + // +optional + metav1.ListMeta + + // List of audit configurations. + Items []AuditSink +} + +// Policy defines the configuration of how audit events are logged +type Policy struct { + // The Level that all requests are recorded at. + // available options: None, Metadata, Request, RequestResponse + // required + Level Level + + // Stages is a list of stages for which events are created. + // +optional + Stages []Stage +} + +// Webhook holds the configuration of the webhooks +type Webhook struct { + // Throttle holds the options for throttling the webhook + // +optional + Throttle *WebhookThrottleConfig + + // ClientConfig holds the connection parameters for the webhook + // required + ClientConfig WebhookClientConfig +} + +// WebhookThrottleConfig holds the configuration for throttling +type WebhookThrottleConfig struct { + // QPS maximum number of batches per second + // default 10 QPS + // +optional + QPS *int64 + + // Burst is the maximum number of events sent at the same moment + // default 15 QPS + // +optional + Burst *int64 +} + +// WebhookClientConfig contains the information to make a connection with the webhook +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // Port 443 will be used if it is open, otherwise it is an error. + // + // +optional + Service *ServiceReference + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string + + // `name` is the name of the service. + // Required + Name string + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/BUILD new file mode 100644 index 000000000000..5c5acd574491 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/BUILD @@ -0,0 +1,50 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "defaults.go", + "doc.go", + "register.go", + "zz_generated.conversion.go", + "zz_generated.defaults.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/auditregistration:go_default_library", + "//staging/src/k8s.io/api/auditregistration/v1alpha1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["defaults_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/auditregistration/install:go_default_library", + "//staging/src/k8s.io/api/auditregistration/v1alpha1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/defaults.go new file mode 100644 index 000000000000..1884a3907928 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/defaults.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" + "k8s.io/apimachinery/pkg/runtime" + utilpointer "k8s.io/utils/pointer" +) + +const ( + // DefaultQPS is the default QPS value + DefaultQPS = int64(10) + // DefaultBurst is the default burst value + DefaultBurst = int64(15) +) + +// DefaultThrottle is a default throttle config +func DefaultThrottle() *auditregistrationv1alpha1.WebhookThrottleConfig { + return &auditregistrationv1alpha1.WebhookThrottleConfig{ + QPS: utilpointer.Int64Ptr(DefaultQPS), + Burst: utilpointer.Int64Ptr(DefaultBurst), + } +} + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_AuditSink sets defaults if the audit sink isn't present +func SetDefaults_AuditSink(obj *auditregistrationv1alpha1.AuditSink) { + if obj.Spec.Webhook.Throttle != nil { + if obj.Spec.Webhook.Throttle.QPS == nil { + obj.Spec.Webhook.Throttle.QPS = utilpointer.Int64Ptr(DefaultQPS) + } + if obj.Spec.Webhook.Throttle.Burst == nil { + obj.Spec.Webhook.Throttle.Burst = utilpointer.Int64Ptr(DefaultBurst) + } + } else { + obj.Spec.Webhook.Throttle = DefaultThrottle() + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/doc.go new file mode 100644 index 000000000000..954ad6ffbfbc --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/auditregistration +// +k8s:conversion-gen-external-types=k8s.io/api/auditregistration/v1alpha1 +// +k8s:defaulter-gen=TypeMeta +// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/auditregistration/v1alpha1 + +// +groupName=auditregistration.k8s.io + +package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/register.go new file mode 100644 index 000000000000..a74b162b6ad2 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/register.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName for audit registration +const GroupName = "auditregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &auditregistrationv1alpha1.SchemeBuilder + // AddToScheme audit registration + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addDefaultingFuncs) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/zz_generated.conversion.go new file mode 100644 index 000000000000..b8f08e397e7c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,316 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + auditregistration "k8s.io/kubernetes/pkg/apis/auditregistration" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1alpha1.AuditSink)(nil), (*auditregistration.AuditSink)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AuditSink_To_auditregistration_AuditSink(a.(*v1alpha1.AuditSink), b.(*auditregistration.AuditSink), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*auditregistration.AuditSink)(nil), (*v1alpha1.AuditSink)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_auditregistration_AuditSink_To_v1alpha1_AuditSink(a.(*auditregistration.AuditSink), b.(*v1alpha1.AuditSink), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.AuditSinkList)(nil), (*auditregistration.AuditSinkList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AuditSinkList_To_auditregistration_AuditSinkList(a.(*v1alpha1.AuditSinkList), b.(*auditregistration.AuditSinkList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*auditregistration.AuditSinkList)(nil), (*v1alpha1.AuditSinkList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_auditregistration_AuditSinkList_To_v1alpha1_AuditSinkList(a.(*auditregistration.AuditSinkList), b.(*v1alpha1.AuditSinkList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.AuditSinkSpec)(nil), (*auditregistration.AuditSinkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AuditSinkSpec_To_auditregistration_AuditSinkSpec(a.(*v1alpha1.AuditSinkSpec), b.(*auditregistration.AuditSinkSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*auditregistration.AuditSinkSpec)(nil), (*v1alpha1.AuditSinkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_auditregistration_AuditSinkSpec_To_v1alpha1_AuditSinkSpec(a.(*auditregistration.AuditSinkSpec), b.(*v1alpha1.AuditSinkSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.Policy)(nil), (*auditregistration.Policy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Policy_To_auditregistration_Policy(a.(*v1alpha1.Policy), b.(*auditregistration.Policy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*auditregistration.Policy)(nil), (*v1alpha1.Policy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_auditregistration_Policy_To_v1alpha1_Policy(a.(*auditregistration.Policy), b.(*v1alpha1.Policy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.ServiceReference)(nil), (*auditregistration.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ServiceReference_To_auditregistration_ServiceReference(a.(*v1alpha1.ServiceReference), b.(*auditregistration.ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*auditregistration.ServiceReference)(nil), (*v1alpha1.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_auditregistration_ServiceReference_To_v1alpha1_ServiceReference(a.(*auditregistration.ServiceReference), b.(*v1alpha1.ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.Webhook)(nil), (*auditregistration.Webhook)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Webhook_To_auditregistration_Webhook(a.(*v1alpha1.Webhook), b.(*auditregistration.Webhook), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*auditregistration.Webhook)(nil), (*v1alpha1.Webhook)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_auditregistration_Webhook_To_v1alpha1_Webhook(a.(*auditregistration.Webhook), b.(*v1alpha1.Webhook), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.WebhookClientConfig)(nil), (*auditregistration.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WebhookClientConfig_To_auditregistration_WebhookClientConfig(a.(*v1alpha1.WebhookClientConfig), b.(*auditregistration.WebhookClientConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*auditregistration.WebhookClientConfig)(nil), (*v1alpha1.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_auditregistration_WebhookClientConfig_To_v1alpha1_WebhookClientConfig(a.(*auditregistration.WebhookClientConfig), b.(*v1alpha1.WebhookClientConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.WebhookThrottleConfig)(nil), (*auditregistration.WebhookThrottleConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WebhookThrottleConfig_To_auditregistration_WebhookThrottleConfig(a.(*v1alpha1.WebhookThrottleConfig), b.(*auditregistration.WebhookThrottleConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*auditregistration.WebhookThrottleConfig)(nil), (*v1alpha1.WebhookThrottleConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_auditregistration_WebhookThrottleConfig_To_v1alpha1_WebhookThrottleConfig(a.(*auditregistration.WebhookThrottleConfig), b.(*v1alpha1.WebhookThrottleConfig), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_AuditSink_To_auditregistration_AuditSink(in *v1alpha1.AuditSink, out *auditregistration.AuditSink, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_AuditSinkSpec_To_auditregistration_AuditSinkSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_AuditSink_To_auditregistration_AuditSink is an autogenerated conversion function. +func Convert_v1alpha1_AuditSink_To_auditregistration_AuditSink(in *v1alpha1.AuditSink, out *auditregistration.AuditSink, s conversion.Scope) error { + return autoConvert_v1alpha1_AuditSink_To_auditregistration_AuditSink(in, out, s) +} + +func autoConvert_auditregistration_AuditSink_To_v1alpha1_AuditSink(in *auditregistration.AuditSink, out *v1alpha1.AuditSink, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_auditregistration_AuditSinkSpec_To_v1alpha1_AuditSinkSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_auditregistration_AuditSink_To_v1alpha1_AuditSink is an autogenerated conversion function. +func Convert_auditregistration_AuditSink_To_v1alpha1_AuditSink(in *auditregistration.AuditSink, out *v1alpha1.AuditSink, s conversion.Scope) error { + return autoConvert_auditregistration_AuditSink_To_v1alpha1_AuditSink(in, out, s) +} + +func autoConvert_v1alpha1_AuditSinkList_To_auditregistration_AuditSinkList(in *v1alpha1.AuditSinkList, out *auditregistration.AuditSinkList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]auditregistration.AuditSink)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_AuditSinkList_To_auditregistration_AuditSinkList is an autogenerated conversion function. +func Convert_v1alpha1_AuditSinkList_To_auditregistration_AuditSinkList(in *v1alpha1.AuditSinkList, out *auditregistration.AuditSinkList, s conversion.Scope) error { + return autoConvert_v1alpha1_AuditSinkList_To_auditregistration_AuditSinkList(in, out, s) +} + +func autoConvert_auditregistration_AuditSinkList_To_v1alpha1_AuditSinkList(in *auditregistration.AuditSinkList, out *v1alpha1.AuditSinkList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha1.AuditSink)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_auditregistration_AuditSinkList_To_v1alpha1_AuditSinkList is an autogenerated conversion function. +func Convert_auditregistration_AuditSinkList_To_v1alpha1_AuditSinkList(in *auditregistration.AuditSinkList, out *v1alpha1.AuditSinkList, s conversion.Scope) error { + return autoConvert_auditregistration_AuditSinkList_To_v1alpha1_AuditSinkList(in, out, s) +} + +func autoConvert_v1alpha1_AuditSinkSpec_To_auditregistration_AuditSinkSpec(in *v1alpha1.AuditSinkSpec, out *auditregistration.AuditSinkSpec, s conversion.Scope) error { + if err := Convert_v1alpha1_Policy_To_auditregistration_Policy(&in.Policy, &out.Policy, s); err != nil { + return err + } + if err := Convert_v1alpha1_Webhook_To_auditregistration_Webhook(&in.Webhook, &out.Webhook, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_AuditSinkSpec_To_auditregistration_AuditSinkSpec is an autogenerated conversion function. +func Convert_v1alpha1_AuditSinkSpec_To_auditregistration_AuditSinkSpec(in *v1alpha1.AuditSinkSpec, out *auditregistration.AuditSinkSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_AuditSinkSpec_To_auditregistration_AuditSinkSpec(in, out, s) +} + +func autoConvert_auditregistration_AuditSinkSpec_To_v1alpha1_AuditSinkSpec(in *auditregistration.AuditSinkSpec, out *v1alpha1.AuditSinkSpec, s conversion.Scope) error { + if err := Convert_auditregistration_Policy_To_v1alpha1_Policy(&in.Policy, &out.Policy, s); err != nil { + return err + } + if err := Convert_auditregistration_Webhook_To_v1alpha1_Webhook(&in.Webhook, &out.Webhook, s); err != nil { + return err + } + return nil +} + +// Convert_auditregistration_AuditSinkSpec_To_v1alpha1_AuditSinkSpec is an autogenerated conversion function. +func Convert_auditregistration_AuditSinkSpec_To_v1alpha1_AuditSinkSpec(in *auditregistration.AuditSinkSpec, out *v1alpha1.AuditSinkSpec, s conversion.Scope) error { + return autoConvert_auditregistration_AuditSinkSpec_To_v1alpha1_AuditSinkSpec(in, out, s) +} + +func autoConvert_v1alpha1_Policy_To_auditregistration_Policy(in *v1alpha1.Policy, out *auditregistration.Policy, s conversion.Scope) error { + out.Level = auditregistration.Level(in.Level) + out.Stages = *(*[]auditregistration.Stage)(unsafe.Pointer(&in.Stages)) + return nil +} + +// Convert_v1alpha1_Policy_To_auditregistration_Policy is an autogenerated conversion function. +func Convert_v1alpha1_Policy_To_auditregistration_Policy(in *v1alpha1.Policy, out *auditregistration.Policy, s conversion.Scope) error { + return autoConvert_v1alpha1_Policy_To_auditregistration_Policy(in, out, s) +} + +func autoConvert_auditregistration_Policy_To_v1alpha1_Policy(in *auditregistration.Policy, out *v1alpha1.Policy, s conversion.Scope) error { + out.Level = v1alpha1.Level(in.Level) + out.Stages = *(*[]v1alpha1.Stage)(unsafe.Pointer(&in.Stages)) + return nil +} + +// Convert_auditregistration_Policy_To_v1alpha1_Policy is an autogenerated conversion function. +func Convert_auditregistration_Policy_To_v1alpha1_Policy(in *auditregistration.Policy, out *v1alpha1.Policy, s conversion.Scope) error { + return autoConvert_auditregistration_Policy_To_v1alpha1_Policy(in, out, s) +} + +func autoConvert_v1alpha1_ServiceReference_To_auditregistration_ServiceReference(in *v1alpha1.ServiceReference, out *auditregistration.ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + return nil +} + +// Convert_v1alpha1_ServiceReference_To_auditregistration_ServiceReference is an autogenerated conversion function. +func Convert_v1alpha1_ServiceReference_To_auditregistration_ServiceReference(in *v1alpha1.ServiceReference, out *auditregistration.ServiceReference, s conversion.Scope) error { + return autoConvert_v1alpha1_ServiceReference_To_auditregistration_ServiceReference(in, out, s) +} + +func autoConvert_auditregistration_ServiceReference_To_v1alpha1_ServiceReference(in *auditregistration.ServiceReference, out *v1alpha1.ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + return nil +} + +// Convert_auditregistration_ServiceReference_To_v1alpha1_ServiceReference is an autogenerated conversion function. +func Convert_auditregistration_ServiceReference_To_v1alpha1_ServiceReference(in *auditregistration.ServiceReference, out *v1alpha1.ServiceReference, s conversion.Scope) error { + return autoConvert_auditregistration_ServiceReference_To_v1alpha1_ServiceReference(in, out, s) +} + +func autoConvert_v1alpha1_Webhook_To_auditregistration_Webhook(in *v1alpha1.Webhook, out *auditregistration.Webhook, s conversion.Scope) error { + out.Throttle = (*auditregistration.WebhookThrottleConfig)(unsafe.Pointer(in.Throttle)) + if err := Convert_v1alpha1_WebhookClientConfig_To_auditregistration_WebhookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_Webhook_To_auditregistration_Webhook is an autogenerated conversion function. +func Convert_v1alpha1_Webhook_To_auditregistration_Webhook(in *v1alpha1.Webhook, out *auditregistration.Webhook, s conversion.Scope) error { + return autoConvert_v1alpha1_Webhook_To_auditregistration_Webhook(in, out, s) +} + +func autoConvert_auditregistration_Webhook_To_v1alpha1_Webhook(in *auditregistration.Webhook, out *v1alpha1.Webhook, s conversion.Scope) error { + out.Throttle = (*v1alpha1.WebhookThrottleConfig)(unsafe.Pointer(in.Throttle)) + if err := Convert_auditregistration_WebhookClientConfig_To_v1alpha1_WebhookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil { + return err + } + return nil +} + +// Convert_auditregistration_Webhook_To_v1alpha1_Webhook is an autogenerated conversion function. +func Convert_auditregistration_Webhook_To_v1alpha1_Webhook(in *auditregistration.Webhook, out *v1alpha1.Webhook, s conversion.Scope) error { + return autoConvert_auditregistration_Webhook_To_v1alpha1_Webhook(in, out, s) +} + +func autoConvert_v1alpha1_WebhookClientConfig_To_auditregistration_WebhookClientConfig(in *v1alpha1.WebhookClientConfig, out *auditregistration.WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + out.Service = (*auditregistration.ServiceReference)(unsafe.Pointer(in.Service)) + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_v1alpha1_WebhookClientConfig_To_auditregistration_WebhookClientConfig is an autogenerated conversion function. +func Convert_v1alpha1_WebhookClientConfig_To_auditregistration_WebhookClientConfig(in *v1alpha1.WebhookClientConfig, out *auditregistration.WebhookClientConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_WebhookClientConfig_To_auditregistration_WebhookClientConfig(in, out, s) +} + +func autoConvert_auditregistration_WebhookClientConfig_To_v1alpha1_WebhookClientConfig(in *auditregistration.WebhookClientConfig, out *v1alpha1.WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + out.Service = (*v1alpha1.ServiceReference)(unsafe.Pointer(in.Service)) + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_auditregistration_WebhookClientConfig_To_v1alpha1_WebhookClientConfig is an autogenerated conversion function. +func Convert_auditregistration_WebhookClientConfig_To_v1alpha1_WebhookClientConfig(in *auditregistration.WebhookClientConfig, out *v1alpha1.WebhookClientConfig, s conversion.Scope) error { + return autoConvert_auditregistration_WebhookClientConfig_To_v1alpha1_WebhookClientConfig(in, out, s) +} + +func autoConvert_v1alpha1_WebhookThrottleConfig_To_auditregistration_WebhookThrottleConfig(in *v1alpha1.WebhookThrottleConfig, out *auditregistration.WebhookThrottleConfig, s conversion.Scope) error { + out.QPS = (*int64)(unsafe.Pointer(in.QPS)) + out.Burst = (*int64)(unsafe.Pointer(in.Burst)) + return nil +} + +// Convert_v1alpha1_WebhookThrottleConfig_To_auditregistration_WebhookThrottleConfig is an autogenerated conversion function. +func Convert_v1alpha1_WebhookThrottleConfig_To_auditregistration_WebhookThrottleConfig(in *v1alpha1.WebhookThrottleConfig, out *auditregistration.WebhookThrottleConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_WebhookThrottleConfig_To_auditregistration_WebhookThrottleConfig(in, out, s) +} + +func autoConvert_auditregistration_WebhookThrottleConfig_To_v1alpha1_WebhookThrottleConfig(in *auditregistration.WebhookThrottleConfig, out *v1alpha1.WebhookThrottleConfig, s conversion.Scope) error { + out.QPS = (*int64)(unsafe.Pointer(in.QPS)) + out.Burst = (*int64)(unsafe.Pointer(in.Burst)) + return nil +} + +// Convert_auditregistration_WebhookThrottleConfig_To_v1alpha1_WebhookThrottleConfig is an autogenerated conversion function. +func Convert_auditregistration_WebhookThrottleConfig_To_v1alpha1_WebhookThrottleConfig(in *auditregistration.WebhookThrottleConfig, out *v1alpha1.WebhookThrottleConfig, s conversion.Scope) error { + return autoConvert_auditregistration_WebhookThrottleConfig_To_v1alpha1_WebhookThrottleConfig(in, out, s) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/zz_generated.defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/zz_generated.defaults.go new file mode 100644 index 000000000000..1f23658b80a6 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,46 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&v1alpha1.AuditSink{}, func(obj interface{}) { SetObjectDefaults_AuditSink(obj.(*v1alpha1.AuditSink)) }) + scheme.AddTypeDefaultingFunc(&v1alpha1.AuditSinkList{}, func(obj interface{}) { SetObjectDefaults_AuditSinkList(obj.(*v1alpha1.AuditSinkList)) }) + return nil +} + +func SetObjectDefaults_AuditSink(in *v1alpha1.AuditSink) { + SetDefaults_AuditSink(in) +} + +func SetObjectDefaults_AuditSinkList(in *v1alpha1.AuditSinkList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_AuditSink(a) + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/zz_generated.deepcopy.go new file mode 100644 index 000000000000..45106c0a5c74 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/auditregistration/zz_generated.deepcopy.go @@ -0,0 +1,224 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package auditregistration + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditSink) DeepCopyInto(out *AuditSink) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSink. +func (in *AuditSink) DeepCopy() *AuditSink { + if in == nil { + return nil + } + out := new(AuditSink) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuditSink) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditSinkList) DeepCopyInto(out *AuditSinkList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AuditSink, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkList. +func (in *AuditSinkList) DeepCopy() *AuditSinkList { + if in == nil { + return nil + } + out := new(AuditSinkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuditSinkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditSinkSpec) DeepCopyInto(out *AuditSinkSpec) { + *out = *in + in.Policy.DeepCopyInto(&out.Policy) + in.Webhook.DeepCopyInto(&out.Webhook) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkSpec. +func (in *AuditSinkSpec) DeepCopy() *AuditSinkSpec { + if in == nil { + return nil + } + out := new(AuditSinkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + if in.Stages != nil { + in, out := &in.Stages, &out.Stages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Webhook) DeepCopyInto(out *Webhook) { + *out = *in + if in.Throttle != nil { + in, out := &in.Throttle, &out.Throttle + *out = new(WebhookThrottleConfig) + (*in).DeepCopyInto(*out) + } + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. +func (in *Webhook) DeepCopy() *Webhook { + if in == nil { + return nil + } + out := new(Webhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookThrottleConfig) DeepCopyInto(out *WebhookThrottleConfig) { + *out = *in + if in.QPS != nil { + in, out := &in.QPS, &out.QPS + *out = new(int64) + **out = **in + } + if in.Burst != nil { + in, out := &in.Burst, &out.Burst + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookThrottleConfig. +func (in *WebhookThrottleConfig) DeepCopy() *WebhookThrottleConfig { + if in == nil { + return nil + } + out := new(WebhookThrottleConfig) + in.DeepCopyInto(out) + return out +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/OWNERS index 2bdfd0ce5bc5..3b7ea1b131f2 100755 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/OWNERS @@ -1,9 +1,7 @@ +# approval on api packages bubbles to api-approvers reviewers: -- liggitt -- lavalamp -- wojtek-t -- deads2k -- sttts -- mbohlool -- jianhuiz -- enj +- sig-auth-authenticators-approvers +- sig-auth-authenticators-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go index 67a7fe426666..a57740b088eb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io + package authentication diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/types.go index deb33cfc1983..203bf22bb348 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/types.go @@ -59,6 +59,12 @@ type TokenReview struct { type TokenReviewSpec struct { // Token is the opaque bearer token. Token string + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + Audiences []string } // TokenReviewStatus is the result of the token authentication request. @@ -68,6 +74,16 @@ type TokenReviewStatus struct { Authenticated bool // User is the UserInfo associated with the provided token. User UserInfo + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + Audiences []string // Error indicates that the token couldn't be checked Error string } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go index 06d609c635fc..1ce54b316ee1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=authentication.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authentication/v1 + package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go index afa00b1594fb..8e95da3274b1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go @@ -263,6 +263,7 @@ func Convert_authentication_TokenReview_To_v1_TokenReview(in *authentication.Tok func autoConvert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *v1.TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { out.Token = in.Token + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) return nil } @@ -273,6 +274,7 @@ func Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *v1.TokenRe func autoConvert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *v1.TokenReviewSpec, s conversion.Scope) error { out.Token = in.Token + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) return nil } @@ -286,6 +288,7 @@ func autoConvert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *v1 if err := Convert_v1_UserInfo_To_authentication_UserInfo(&in.User, &out.User, s); err != nil { return err } + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) out.Error = in.Error return nil } @@ -300,6 +303,7 @@ func autoConvert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in *au if err := Convert_authentication_UserInfo_To_v1_UserInfo(&in.User, &out.User, s); err != nil { return err } + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) out.Error = in.Error return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go index 68fcb83f3b92..f53874c83dca 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=authentication.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authentication/v1beta1 + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.conversion.go index 97437b18f1a4..95d65c5dab3e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.conversion.go @@ -113,6 +113,7 @@ func Convert_authentication_TokenReview_To_v1beta1_TokenReview(in *authenticatio func autoConvert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *v1beta1.TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { out.Token = in.Token + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) return nil } @@ -123,6 +124,7 @@ func Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *v1bet func autoConvert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *v1beta1.TokenReviewSpec, s conversion.Scope) error { out.Token = in.Token + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) return nil } @@ -136,6 +138,7 @@ func autoConvert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(i if err := Convert_v1beta1_UserInfo_To_authentication_UserInfo(&in.User, &out.User, s); err != nil { return err } + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) out.Error = in.Error return nil } @@ -150,6 +153,7 @@ func autoConvert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(i if err := Convert_authentication_UserInfo_To_v1beta1_UserInfo(&in.User, &out.User, s); err != nil { return err } + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) out.Error = in.Error return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go index 2a4cdd07abfc..1f485500439d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go @@ -136,7 +136,7 @@ func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } @@ -162,6 +162,11 @@ func (in *TokenReview) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -179,6 +184,11 @@ func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { *out = *in in.User.DeepCopyInto(&out.User) + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/OWNERS index c1613fc2e026..ff4a7f4bf9ad 100755 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/OWNERS @@ -1,17 +1,7 @@ +# approval on api packages bubbles to api-approvers reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- liggitt -- nikhiljindal -- erictune -- sttts -- ncdc -- dims -- mml -- mbohlool -- david-mcmahon -- jianhuiz -- enj +- sig-auth-authorizers-approvers +- sig-auth-authorizers-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go index b66c384b1ed4..dd6e9124669e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=authorization.k8s.io + package authorization diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go index b47e67c03637..a0536d813d43 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authorization/v1 // +groupName=authorization.k8s.io + package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go index 54ceb2ee6271..ea2e709be7c4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authorization/v1beta1 // +groupName=authorization.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/defaults.go index e08f96b92f53..5fdf8e7122c6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/defaults.go @@ -39,7 +39,7 @@ func SetDefaults_HorizontalPodAutoscaler(obj *autoscalingv2beta1.HorizontalPodAu { Type: autoscalingv2beta1.ResourceMetricSourceType, Resource: &autoscalingv2beta1.ResourceMetricSource{ - Name: v1.ResourceCPU, + Name: v1.ResourceCPU, TargetAverageUtilization: &utilizationDefaultVal, }, }, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/OWNERS index 1d1ab36e7560..796d862bd9c3 100755 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/OWNERS @@ -1,14 +1,7 @@ +# approval on api packages bubbles to api-approvers reviewers: -- thockin -- lavalamp -- smarterclayton -- deads2k -- caesarxuchao -- liggitt -- sttts -- dims -- errordeveloper -- mbohlool -- david-mcmahon -- jianhuiz -- enj +- sig-auth-certificates-approvers +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go index 6d5b784c4921..5d4ee99602ef 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=certificates.k8s.io + package certificates diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go index 5c600b763010..eabec7bccaa1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/certificates/v1beta1 // +groupName=certificates.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/coordination/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/coordination/doc.go index 9b1561f577be..e912df22b983 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/coordination/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/coordination/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=coordination.k8s.io + package coordination diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/coordination/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/coordination/v1beta1/doc.go index 36340a00d8fc..186b278b237e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/coordination/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/coordination/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/coordination/v1beta1 // +groupName=coordination.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go index a1e6daae4966..bef73c0db0bf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go @@ -82,4 +82,23 @@ const ( // // Not all cloud providers support this annotation, though AWS & GCE do. AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that + // represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z') + // of the last change, of some Pod or Service object, that triggered the endpoints object change. + // In other words, if a Pod / Service changed at time T0, that change was observed by endpoints + // controller at T1, and the Endpoints object was changed at T2, the + // EndpointsLastChangeTriggerTime would be set to T0. + // + // The "endpoints change trigger" here means any Pod or Service change that resulted in the + // Endpoints object change. + // + // Given the definition of the "endpoints change trigger", please note that this annotation will + // be set ONLY for endpoints object changes triggered by either Pod or Service change. If the + // Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's + // already set). + // + // This annotation will be used to compute the in-cluster network programming latency SLI, see + // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md + EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go index 486122298c22..10c33f66bd1d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go @@ -537,28 +537,3 @@ func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool { return false } - -// ScopedResourceSelectorRequirementsAsSelector converts the ScopedResourceSelectorRequirement api type into a struct that implements -// labels.Selector. -func ScopedResourceSelectorRequirementsAsSelector(ssr core.ScopedResourceSelectorRequirement) (labels.Selector, error) { - selector := labels.NewSelector() - var op selection.Operator - switch ssr.Operator { - case core.ScopeSelectorOpIn: - op = selection.In - case core.ScopeSelectorOpNotIn: - op = selection.NotIn - case core.ScopeSelectorOpExists: - op = selection.Exists - case core.ScopeSelectorOpDoesNotExist: - op = selection.DoesNotExist - default: - return nil, fmt.Errorf("%q is not a valid scope selector operator", ssr.Operator) - } - r, err := labels.NewRequirement(string(ssr.ScopeName), op, ssr.Values) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - return selector, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/BUILD deleted file mode 100644 index a2a24eda2da8..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/BUILD +++ /dev/null @@ -1,30 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["qos.go"], - importpath = "k8s.io/kubernetes/pkg/apis/core/helper/qos", - deps = [ - "//pkg/apis/core:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go index 1574a83340fa..251547f601bd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -26,19 +26,19 @@ import ( const ( // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients - NamespaceDefault string = "default" + NamespaceDefault = "default" // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces - NamespaceAll string = "" + NamespaceAll = "" // NamespaceNone is the argument for a context when there is no namespace. - NamespaceNone string = "" + NamespaceNone = "" // NamespaceSystem is the system namespace where we place system components. - NamespaceSystem string = "kube-system" + NamespaceSystem = "kube-system" // NamespacePublic is the namespace where we place public info (ConfigMaps) - NamespacePublic string = "kube-public" + NamespacePublic = "kube-public" // NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats) - NamespaceNodeLease string = "kube-node-lease" + NamespaceNodeLease = "kube-node-lease" // TerminationMessagePathDefault means the default path to capture the application termination message running in a container - TerminationMessagePathDefault string = "/dev/termination-log" + TerminationMessagePathDefault = "/dev/termination-log" ) // Volume represents a named volume in a pod that may be accessed by any containers in the pod. @@ -175,7 +175,7 @@ type PersistentVolumeSource struct { HostPath *HostPathVolumeSource // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod // +optional - Glusterfs *GlusterfsVolumeSource + Glusterfs *GlusterfsPersistentVolumeSource // NFS represents an NFS mount on the host that shares a pod's lifetime // +optional NFS *NFSVolumeSource @@ -229,7 +229,7 @@ type PersistentVolumeSource struct { // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource - // CSI (Container Storage Interface) represents storage that handled by an external CSI driver (Beta feature). + // CSI (Container Storage Interface) represents storage that handled by an external CSI driver. // +optional CSI *CSIPersistentVolumeSource } @@ -298,7 +298,7 @@ type PersistentVolumeSpec struct { MountOptions []string // volumeMode defines if a volume is intended to be used with a formatted filesystem // or to remain in raw block state. Value of Filesystem is implied when not included in spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional VolumeMode *PersistentVolumeMode // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. @@ -410,7 +410,7 @@ type PersistentVolumeClaimSpec struct { StorageClassName *string // volumeMode defines what type of volume is required by the claim. // Value of Filesystem is implied when not included in claim spec. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional VolumeMode *PersistentVolumeMode // This field requires the VolumeSnapshotDataSource alpha feature gate to be @@ -935,6 +935,30 @@ type GlusterfsVolumeSource struct { ReadOnly bool } +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsPersistentVolumeSource struct { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + EndpointsName string + + // Path is the Glusterfs volume path. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + Path string + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + ReadOnly bool + + // EndpointsNamespace is the namespace that contains Glusterfs endpoint. + // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + EndpointsNamespace *string +} + // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { @@ -1523,7 +1547,7 @@ type LocalVolumeSource struct { FSType *string } -// Represents storage that is managed by an external CSI volume driver (Beta feature) +// Represents storage that is managed by an external CSI volume driver. type CSIPersistentVolumeSource struct { // Driver is the name of the driver to use for this volume. // Required. @@ -1552,7 +1576,7 @@ type CSIPersistentVolumeSource struct { // ControllerPublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerPublishVolume and ControllerUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional ControllerPublishSecretRef *SecretReference @@ -1560,7 +1584,7 @@ type CSIPersistentVolumeSource struct { // NodeStageSecretRef is a reference to the secret object containing sensitive // information to pass to the CSI driver to complete the CSI NodeStageVolume // and NodeStageVolume and NodeUnstageVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional NodeStageSecretRef *SecretReference @@ -1568,7 +1592,7 @@ type CSIPersistentVolumeSource struct { // NodePublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional NodePublishSecretRef *SecretReference @@ -1944,7 +1968,7 @@ type Container struct { // +optional VolumeMounts []VolumeMount // volumeDevices is the list of block devices to be used by the container. - // This is an alpha feature and may change in the future. + // This is a beta feature. // +optional VolumeDevices []VolumeDevice // +optional @@ -2597,6 +2621,11 @@ type PodSpec struct { // This is an alpha feature and may change in the future. // +optional RuntimeClassName *string + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // If not specified, the default is true. + // +optional + EnableServiceLinks *bool } // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the @@ -3976,8 +4005,11 @@ type LocalObjectReference struct { // TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace. type TypedLocalObjectReference struct { - // APIGroup is the group for the resource being referenced - APIGroup string + // APIGroup is the group for the resource being referenced. + // If APIGroup is not specified, the specified Kind must be in the core API group. + // For any other third-party types, APIGroup is required. + // +optional + APIGroup *string // Kind is the type of resource being referenced Kind string // Name is the name of resource being referenced diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/BUILD index 180f0bad98b7..b7e881dd95c0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/BUILD @@ -13,8 +13,8 @@ go_library( importpath = "k8s.io/kubernetes/pkg/apis/core/v1", visibility = ["//visibility:public"], deps = [ + "//pkg/apis/apps:go_default_library", "//pkg/apis/core:go_default_library", - "//pkg/apis/extensions:go_default_library", "//pkg/features:go_default_library", "//pkg/util/parsers:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -36,16 +36,18 @@ go_test( srcs = [ "conversion_test.go", "defaults_test.go", + "main_test.go", ], embed = [":go_default_library"], deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", + "//pkg/apis/apps:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/fuzzer:go_default_library", - "//pkg/apis/extensions:go_default_library", + "//pkg/features:go_default_library", + "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/apitesting/fuzzer:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -55,6 +57,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go index bab07f1950c7..926a39789ea9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go @@ -26,8 +26,8 @@ import ( "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" ) func addConversionFuncs(scheme *runtime.Scheme) error { @@ -43,12 +43,12 @@ func addConversionFuncs(scheme *runtime.Scheme) error { Convert_v1_Secret_To_core_Secret, Convert_v1_ServiceSpec_To_core_ServiceSpec, Convert_v1_ResourceList_To_core_ResourceList, - Convert_v1_ReplicationController_To_extensions_ReplicaSet, - Convert_v1_ReplicationControllerSpec_To_extensions_ReplicaSetSpec, - Convert_v1_ReplicationControllerStatus_To_extensions_ReplicaSetStatus, - Convert_extensions_ReplicaSet_To_v1_ReplicationController, - Convert_extensions_ReplicaSetSpec_To_v1_ReplicationControllerSpec, - Convert_extensions_ReplicaSetStatus_To_v1_ReplicationControllerStatus, + Convert_v1_ReplicationController_To_apps_ReplicaSet, + Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec, + Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus, + Convert_apps_ReplicaSet_To_v1_ReplicationController, + Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec, + Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus, ) if err != nil { return err @@ -120,18 +120,18 @@ func addConversionFuncs(scheme *runtime.Scheme) error { return nil } -func Convert_v1_ReplicationController_To_extensions_ReplicaSet(in *v1.ReplicationController, out *extensions.ReplicaSet, s conversion.Scope) error { +func Convert_v1_ReplicationController_To_apps_ReplicaSet(in *v1.ReplicationController, out *apps.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ReplicationControllerSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1_ReplicationControllerStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_v1_ReplicationControllerSpec_To_extensions_ReplicaSetSpec(in *v1.ReplicationControllerSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { +func Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(in *v1.ReplicationControllerSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { out.Replicas = *in.Replicas out.MinReadySeconds = in.MinReadySeconds if in.Selector != nil { @@ -146,15 +146,15 @@ func Convert_v1_ReplicationControllerSpec_To_extensions_ReplicaSetSpec(in *v1.Re return nil } -func Convert_v1_ReplicationControllerStatus_To_extensions_ReplicaSetStatus(in *v1.ReplicationControllerStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { +func Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(in *v1.ReplicationControllerStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.ObservedGeneration = in.ObservedGeneration for _, cond := range in.Conditions { - out.Conditions = append(out.Conditions, extensions.ReplicaSetCondition{ - Type: extensions.ReplicaSetConditionType(cond.Type), + out.Conditions = append(out.Conditions, apps.ReplicaSetCondition{ + Type: apps.ReplicaSetConditionType(cond.Type), Status: core.ConditionStatus(cond.Status), LastTransitionTime: cond.LastTransitionTime, Reason: cond.Reason, @@ -164,9 +164,9 @@ func Convert_v1_ReplicationControllerStatus_To_extensions_ReplicaSetStatus(in *v return nil } -func Convert_extensions_ReplicaSet_To_v1_ReplicationController(in *extensions.ReplicaSet, out *v1.ReplicationController, s conversion.Scope) error { +func Convert_apps_ReplicaSet_To_v1_ReplicationController(in *apps.ReplicaSet, out *v1.ReplicationController, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_ReplicaSetSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { fieldErr, ok := err.(*field.Error) if !ok { return err @@ -176,13 +176,13 @@ func Convert_extensions_ReplicaSet_To_v1_ReplicationController(in *extensions.Re } out.Annotations[v1.NonConvertibleAnnotationPrefix+"/"+fieldErr.Field] = reflect.ValueOf(fieldErr.BadValue).String() } - if err := Convert_extensions_ReplicaSetStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_extensions_ReplicaSetSpec_To_v1_ReplicationControllerSpec(in *extensions.ReplicaSetSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { +func Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(in *apps.ReplicaSetSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { out.Replicas = new(int32) *out.Replicas = in.Replicas out.MinReadySeconds = in.MinReadySeconds @@ -197,7 +197,7 @@ func Convert_extensions_ReplicaSetSpec_To_v1_ReplicationControllerSpec(in *exten return invalidErr } -func Convert_extensions_ReplicaSetStatus_To_v1_ReplicationControllerStatus(in *extensions.ReplicaSetStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { +func Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(in *apps.ReplicaSetStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go index 1c7e2b32d660..172d3797bf5b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go @@ -160,8 +160,16 @@ func SetDefaults_Pod(obj *v1.Pod) { } } } + if obj.Spec.EnableServiceLinks == nil { + enableServiceLinks := v1.DefaultEnableServiceLinks + obj.Spec.EnableServiceLinks = &enableServiceLinks + } } func SetDefaults_PodSpec(obj *v1.PodSpec) { + // New fields added here will break upgrade tests: + // https://github.com/kubernetes/kubernetes/issues/69445 + // In most cases the new defaulted field can added to SetDefaults_Pod instead of here, so + // that it only materializes in the Pod object and not all objects with a PodSpec field. if obj.DNSPolicy == "" { obj.DNSPolicy = v1.DNSClusterFirst } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go index bf6c001b780a..fa11a6b36a92 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go @@ -500,3 +500,28 @@ func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { return "" } + +// ScopedResourceSelectorRequirementsAsSelector converts the ScopedResourceSelectorRequirement api type into a struct that implements +// labels.Selector. +func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorRequirement) (labels.Selector, error) { + selector := labels.NewSelector() + var op selection.Operator + switch ssr.Operator { + case v1.ScopeSelectorOpIn: + op = selection.In + case v1.ScopeSelectorOpNotIn: + op = selection.NotIn + case v1.ScopeSelectorOpExists: + op = selection.Exists + case v1.ScopeSelectorOpDoesNotExist: + op = selection.DoesNotExist + default: + return nil, fmt.Errorf("%q is not a valid scope selector operator", ssr.Operator) + } + r, err := labels.NewRequirement(string(ssr.ScopeName), op, ssr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + return selector, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go index 07a8b4b55d71..439401425563 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go @@ -29,8 +29,8 @@ import ( conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" + apps "k8s.io/kubernetes/pkg/apis/apps" core "k8s.io/kubernetes/pkg/apis/core" - extensions "k8s.io/kubernetes/pkg/apis/extensions" ) func init() { @@ -610,6 +610,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*v1.GlusterfsPersistentVolumeSource)(nil), (*core.GlusterfsPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(a.(*v1.GlusterfsPersistentVolumeSource), b.(*core.GlusterfsPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.GlusterfsPersistentVolumeSource)(nil), (*v1.GlusterfsPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(a.(*core.GlusterfsPersistentVolumeSource), b.(*v1.GlusterfsPersistentVolumeSource), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*v1.GlusterfsVolumeSource)(nil), (*core.GlusterfsVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(a.(*v1.GlusterfsVolumeSource), b.(*core.GlusterfsVolumeSource), scope) }); err != nil { @@ -1990,6 +2000,21 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(a.(*apps.ReplicaSetSpec), b.(*v1.ReplicationControllerSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apps.ReplicaSetStatus)(nil), (*v1.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(a.(*apps.ReplicaSetStatus), b.(*v1.ReplicationControllerStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apps.ReplicaSet)(nil), (*v1.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSet_To_v1_ReplicationController(a.(*apps.ReplicaSet), b.(*v1.ReplicationController), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*core.PodSecurityContext)(nil), (*v1.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_core_PodSecurityContext_To_v1_PodSecurityContext(a.(*core.PodSecurityContext), b.(*v1.PodSecurityContext), scope) }); err != nil { @@ -2020,21 +2045,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.ReplicaSetSpec)(nil), (*v1.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetSpec_To_v1_ReplicationControllerSpec(a.(*extensions.ReplicaSetSpec), b.(*v1.ReplicationControllerSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*extensions.ReplicaSetStatus)(nil), (*v1.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetStatus_To_v1_ReplicationControllerStatus(a.(*extensions.ReplicaSetStatus), b.(*v1.ReplicationControllerStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*extensions.ReplicaSet)(nil), (*v1.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSet_To_v1_ReplicationController(a.(*extensions.ReplicaSet), b.(*v1.ReplicationController), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*v1.PodSecurityContext)(nil), (*core.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_PodSecurityContext_To_core_PodSecurityContext(a.(*v1.PodSecurityContext), b.(*core.PodSecurityContext), scope) }); err != nil { @@ -2055,23 +2065,23 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ReplicationControllerSpec)(nil), (*core.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(a.(*v1.ReplicationControllerSpec), b.(*core.ReplicationControllerSpec), scope) + if err := s.AddConversionFunc((*v1.ReplicationControllerSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(a.(*v1.ReplicationControllerSpec), b.(*apps.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ReplicationControllerSpec)(nil), (*extensions.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationControllerSpec_To_extensions_ReplicaSetSpec(a.(*v1.ReplicationControllerSpec), b.(*extensions.ReplicaSetSpec), scope) + if err := s.AddConversionFunc((*v1.ReplicationControllerSpec)(nil), (*core.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(a.(*v1.ReplicationControllerSpec), b.(*core.ReplicationControllerSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ReplicationControllerStatus)(nil), (*extensions.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationControllerStatus_To_extensions_ReplicaSetStatus(a.(*v1.ReplicationControllerStatus), b.(*extensions.ReplicaSetStatus), scope) + if err := s.AddConversionFunc((*v1.ReplicationControllerStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(a.(*v1.ReplicationControllerStatus), b.(*apps.ReplicaSetStatus), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1.ReplicationController)(nil), (*extensions.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicationController_To_extensions_ReplicaSet(a.(*v1.ReplicationController), b.(*extensions.ReplicaSet), scope) + if err := s.AddConversionFunc((*v1.ReplicationController)(nil), (*apps.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationController_To_apps_ReplicaSet(a.(*v1.ReplicationController), b.(*apps.ReplicaSet), scope) }); err != nil { return err } @@ -3602,6 +3612,32 @@ func Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepo return autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) } +func autoConvert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in *v1.GlusterfsPersistentVolumeSource, out *core.GlusterfsPersistentVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + out.EndpointsNamespace = (*string)(unsafe.Pointer(in.EndpointsNamespace)) + return nil +} + +// Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in *v1.GlusterfsPersistentVolumeSource, out *core.GlusterfsPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in *core.GlusterfsPersistentVolumeSource, out *v1.GlusterfsPersistentVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + out.EndpointsNamespace = (*string)(unsafe.Pointer(in.EndpointsNamespace)) + return nil +} + +// Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in *core.GlusterfsPersistentVolumeSource, out *v1.GlusterfsPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in, out, s) +} + func autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error { out.EndpointsName = in.EndpointsName out.Path = in.Path @@ -4932,7 +4968,7 @@ func autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1 out.GCEPersistentDisk = (*core.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) out.AWSElasticBlockStore = (*core.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.Glusterfs = (*core.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.Glusterfs = (*core.GlusterfsPersistentVolumeSource)(unsafe.Pointer(in.Glusterfs)) out.NFS = (*core.NFSVolumeSource)(unsafe.Pointer(in.NFS)) out.RBD = (*core.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) out.ISCSI = (*core.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI)) @@ -4963,7 +4999,7 @@ func autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *co out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.Glusterfs = (*v1.GlusterfsPersistentVolumeSource)(unsafe.Pointer(in.Glusterfs)) out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) out.RBD = (*v1.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) @@ -5550,6 +5586,7 @@ func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s out.DNSConfig = (*core.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) out.ReadinessGates = *(*[]core.PodReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) out.RuntimeClassName = (*string)(unsafe.Pointer(in.RuntimeClassName)) + out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks)) return nil } @@ -5616,6 +5653,7 @@ func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s out.DNSConfig = (*v1.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) out.ReadinessGates = *(*[]v1.PodReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) out.RuntimeClassName = (*string)(unsafe.Pointer(in.RuntimeClassName)) + out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks)) return nil } @@ -7255,7 +7293,7 @@ func Convert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(in *core.Topol } func autoConvert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(in *v1.TypedLocalObjectReference, out *core.TypedLocalObjectReference, s conversion.Scope) error { - out.APIGroup = in.APIGroup + out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup)) out.Kind = in.Kind out.Name = in.Name return nil @@ -7267,7 +7305,7 @@ func Convert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(in * } func autoConvert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in *core.TypedLocalObjectReference, out *v1.TypedLocalObjectReference, s conversion.Scope) error { - out.APIGroup = in.APIGroup + out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup)) out.Kind = in.Kind out.Name = in.Name return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/BUILD index fb9d6d48f6c6..a815e5b0ac78 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/BUILD @@ -32,13 +32,14 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -46,6 +47,7 @@ go_test( name = "go_default_test", srcs = [ "events_test.go", + "main_test.go", "validation_test.go", ], embed = [":go_default_library"], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go index 8f2e6db50267..6f0302c37fa1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -27,7 +27,7 @@ import ( "regexp" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" @@ -36,6 +36,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" @@ -902,6 +903,26 @@ func validateGlusterfsVolumeSource(glusterfs *core.GlusterfsVolumeSource, fldPat } return allErrs } +func validateGlusterfsPersistentVolumeSource(glusterfs *core.GlusterfsPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(glusterfs.EndpointsName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), "")) + } + if len(glusterfs.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + if glusterfs.EndpointsNamespace != nil { + endpointNs := glusterfs.EndpointsNamespace + if *endpointNs == "" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("endpointsNamespace"), *endpointNs, "if the endpointnamespace is set, it must be a valid namespace name")) + } else { + for _, msg := range ValidateNamespaceName(*endpointNs, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("endpointsNamespace"), *endpointNs, msg)) + } + } + } + return allErrs +} func validateFlockerVolumeSource(flocker *core.FlockerVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -1114,10 +1135,6 @@ func validateMountPropagation(mountPropagation *core.MountPropagationMode, conta if mountPropagation == nil { return allErrs } - if !utilfeature.DefaultFeatureGate.Enabled(features.MountPropagation) { - allErrs = append(allErrs, field.Forbidden(fldPath, "mount propagation is disabled by feature-gate")) - return allErrs - } supportedMountPropagations := sets.NewString(string(core.MountPropagationBidirectional), string(core.MountPropagationHostToContainer), string(core.MountPropagationNone)) if !supportedMountPropagations.Has(string(*mountPropagation)) { @@ -1426,24 +1443,27 @@ func validateStorageOSPersistentVolumeSource(storageos *core.StorageOSPersistent return allErrs } -func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList { +func ValidateCSIDriverName(driverName string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if !utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) { - allErrs = append(allErrs, field.Forbidden(fldPath, "CSIPersistentVolume disabled by feature-gate")) + if len(driverName) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "")) } - if len(csi.Driver) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("driver"), "")) + if len(driverName) > 63 { + allErrs = append(allErrs, field.TooLong(fldPath, driverName, 63)) } - if len(csi.Driver) > 63 { - allErrs = append(allErrs, field.TooLong(fldPath.Child("driver"), csi.Driver, 63)) + if !csiDriverNameRexp.MatchString(driverName) { + allErrs = append(allErrs, field.Invalid(fldPath, driverName, validation.RegexError(csiDriverNameRexpErrMsg, csiDriverNameRexpFmt, "csi-hostpath"))) } + return allErrs +} - if !csiDriverNameRexp.MatchString(csi.Driver) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("driver"), csi.Driver, validation.RegexError(csiDriverNameRexpErrMsg, csiDriverNameRexpFmt, "csi-hostpath"))) - } +func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, ValidateCSIDriverName(csi.Driver, fldPath.Child("driver"))...) if len(csi.VolumeHandle) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("volumeHandle"), "")) @@ -1501,8 +1521,9 @@ var supportedReclaimPolicy = sets.NewString(string(core.PersistentVolumeReclaimD var supportedVolumeModes = sets.NewString(string(core.PersistentVolumeBlock), string(core.PersistentVolumeFilesystem)) -var supportedDataSourceKinds = sets.NewString(string("VolumeSnapshot")) -var supportedDataSourceAPIGroups = sets.NewString(string("snapshot.storage.k8s.io")) +var supportedDataSourceAPIGroupKinds = map[schema.GroupKind]bool{ + {Group: "snapshot.storage.k8s.io", Kind: "VolumeSnapshot"}: true, +} func ValidatePersistentVolume(pv *core.PersistentVolume) field.ErrorList { metaPath := field.NewPath("metadata") @@ -1569,7 +1590,7 @@ func ValidatePersistentVolume(pv *core.PersistentVolume) field.ErrorList { allErrs = append(allErrs, field.Forbidden(specPath.Child("glusterfs"), "may not specify more than 1 volume type")) } else { numVolumes++ - allErrs = append(allErrs, validateGlusterfsVolumeSource(pv.Spec.Glusterfs, specPath.Child("glusterfs"))...) + allErrs = append(allErrs, validateGlusterfsPersistentVolumeSource(pv.Spec.Glusterfs, specPath.Child("glusterfs"))...) } } if pv.Spec.Flocker != nil { @@ -1833,10 +1854,18 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld } else if spec.DataSource != nil { if len(spec.DataSource.Name) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("dataSource", "name"), "")) - } else if !supportedDataSourceKinds.Has(string(spec.DataSource.Kind)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("dataSource"), spec.DataSource.Kind, supportedDataSourceKinds.List())) - } else if !supportedDataSourceAPIGroups.Has(string(spec.DataSource.APIGroup)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("dataSource"), spec.DataSource.APIGroup, supportedDataSourceAPIGroups.List())) + } + + groupKind := schema.GroupKind{Group: "", Kind: spec.DataSource.Kind} + if spec.DataSource.APIGroup != nil { + groupKind.Group = string(*spec.DataSource.APIGroup) + } + groupKindList := make([]string, 0, len(supportedDataSourceAPIGroupKinds)) + for grp := range supportedDataSourceAPIGroupKinds { + groupKindList = append(groupKindList, grp.String()) + } + if !supportedDataSourceAPIGroupKinds[groupKind] { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("dataSource"), groupKind.String(), groupKindList)) } } @@ -4275,7 +4304,7 @@ func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList { // We made allowed changes to oldNode, and now we compare oldNode to node. Any remaining differences indicate changes to protected fields. // TODO: Add a 'real' error type for this error and provide print actual diffs. if !apiequality.Semantic.DeepEqual(oldNode, node) { - glog.V(4).Infof("Update failed validation %#v vs %#v", oldNode, node) + klog.V(4).Infof("Update failed validation %#v vs %#v", oldNode, node) allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "node updates may only change labels, taints, or capacity (or configSource, if the DynamicKubeletConfig feature gate is enabled)")) } @@ -4902,7 +4931,7 @@ func validateScopedResourceSelectorRequirement(resourceQuotaSpec *core.ResourceQ case core.ScopeSelectorOpIn, core.ScopeSelectorOpNotIn: if len(req.Values) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("values"), - "must be atleast one value when `operator` is 'In' or 'NotIn' for scope selector")) + "must be at least one value when `operator` is 'In' or 'NotIn' for scope selector")) } case core.ScopeSelectorOpExists, core.ScopeSelectorOpDoesNotExist: if len(req.Values) != 0 { @@ -5114,50 +5143,16 @@ func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *core.Namespace) return allErrs } -// Construct lookup map of old subset IPs to NodeNames. -func updateEpAddrToNodeNameMap(ipToNodeName map[string]string, addresses []core.EndpointAddress) { - for n := range addresses { - if addresses[n].NodeName == nil { - continue - } - ipToNodeName[addresses[n].IP] = *addresses[n].NodeName - } -} - -// Build a map across all subsets of IP -> NodeName -func buildEndpointAddressNodeNameMap(subsets []core.EndpointSubset) map[string]string { - ipToNodeName := make(map[string]string) - for i := range subsets { - updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].Addresses) - updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].NotReadyAddresses) - } - return ipToNodeName -} - -func validateEpAddrNodeNameTransition(addr *core.EndpointAddress, ipToNodeName map[string]string, fldPath *field.Path) field.ErrorList { - errList := field.ErrorList{} - existingNodeName, found := ipToNodeName[addr.IP] - if !found { - return errList - } - if addr.NodeName == nil || *addr.NodeName == existingNodeName { - return errList - } - // NodeName entry found for this endpoint IP, but user is attempting to change NodeName - return append(errList, field.Forbidden(fldPath, fmt.Sprintf("Cannot change NodeName for %s to %s", addr.IP, *addr.NodeName))) -} - // ValidateEndpoints tests if required fields are set. func ValidateEndpoints(endpoints *core.Endpoints) field.ErrorList { allErrs := ValidateObjectMeta(&endpoints.ObjectMeta, true, ValidateEndpointsName, field.NewPath("metadata")) allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(endpoints.Annotations, field.NewPath("annotations"))...) - allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, []core.EndpointSubset{}, field.NewPath("subsets"))...) + allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, field.NewPath("subsets"))...) return allErrs } -func validateEndpointSubsets(subsets []core.EndpointSubset, oldSubsets []core.EndpointSubset, fldPath *field.Path) field.ErrorList { +func validateEndpointSubsets(subsets []core.EndpointSubset, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - ipToNodeName := buildEndpointAddressNodeNameMap(oldSubsets) for i := range subsets { ss := &subsets[i] idxPath := fldPath.Index(i) @@ -5168,10 +5163,10 @@ func validateEndpointSubsets(subsets []core.EndpointSubset, oldSubsets []core.En allErrs = append(allErrs, field.Required(idxPath, "must specify `addresses` or `notReadyAddresses`")) } for addr := range ss.Addresses { - allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr), ipToNodeName)...) + allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr))...) } for addr := range ss.NotReadyAddresses { - allErrs = append(allErrs, validateEndpointAddress(&ss.NotReadyAddresses[addr], idxPath.Child("notReadyAddresses").Index(addr), ipToNodeName)...) + allErrs = append(allErrs, validateEndpointAddress(&ss.NotReadyAddresses[addr], idxPath.Child("notReadyAddresses").Index(addr))...) } for port := range ss.Ports { allErrs = append(allErrs, validateEndpointPort(&ss.Ports[port], len(ss.Ports) > 1, idxPath.Child("ports").Index(port))...) @@ -5181,7 +5176,7 @@ func validateEndpointSubsets(subsets []core.EndpointSubset, oldSubsets []core.En return allErrs } -func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path, ipToNodeName map[string]string) field.ErrorList { +func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for _, msg := range validation.IsValidIP(address.IP) { allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, msg)) @@ -5195,10 +5190,6 @@ func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path, allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), *address.NodeName, msg)) } } - allErrs = append(allErrs, validateEpAddrNodeNameTransition(address, ipToNodeName, fldPath.Child("nodeName"))...) - if len(allErrs) > 0 { - return allErrs - } allErrs = append(allErrs, validateNonSpecialIP(address.IP, fldPath.Child("ip"))...) return allErrs } @@ -5250,9 +5241,11 @@ func validateEndpointPort(port *core.EndpointPort, requireName bool, fldPath *fi } // ValidateEndpointsUpdate tests to make sure an endpoints update can be applied. +// NodeName changes are allowed during update to accommodate the case where nodeIP or PodCIDR is reused. +// An existing endpoint ip will have a different nodeName if this happens. func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *core.Endpoints) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newEndpoints.ObjectMeta, &oldEndpoints.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, validateEndpointSubsets(newEndpoints.Subsets, oldEndpoints.Subsets, field.NewPath("subsets"))...) + allErrs = append(allErrs, validateEndpointSubsets(newEndpoints.Subsets, field.NewPath("subsets"))...) allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(newEndpoints.Annotations, field.NewPath("annotations"))...) return allErrs } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go index 08d2045fe55a..a4801c2e313b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go @@ -1498,6 +1498,27 @@ func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) { + *out = *in + if in.EndpointsNamespace != nil { + in, out := &in.EndpointsNamespace, &out.EndpointsNamespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource. +func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { *out = *in @@ -2689,7 +2710,7 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec if in.DataSource != nil { in, out := &in.DataSource, &out.DataSource *out = new(TypedLocalObjectReference) - **out = **in + (*in).DeepCopyInto(*out) } return } @@ -2808,8 +2829,8 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { } if in.Glusterfs != nil { in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - **out = **in + *out = new(GlusterfsPersistentVolumeSource) + (*in).DeepCopyInto(*out) } if in.NFS != nil { in, out := &in.NFS, &out.NFS @@ -3556,6 +3577,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(string) **out = **in } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } return } @@ -5075,6 +5101,11 @@ func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) { *out = *in + if in.APIGroup != nil { + in, out := &in.APIGroup, &out.APIGroup + *out = new(string) + **out = **in + } return } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/events/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/events/doc.go index 187271ac933f..3ba3d5e7d7dc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/events/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/events/doc.go @@ -15,4 +15,5 @@ limitations under the License. */ // +groupName=events.k8s.io + package events diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/doc.go index 8144152cefd3..b19ec136d4df 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/events/v1beta1 // +groupName=events.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD index 764a5af867f8..d1d43ed5a1b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD @@ -15,11 +15,11 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/apis/extensions", deps = [ + "//pkg/apis/apps:go_default_library", "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/networking:go_default_library", "//pkg/apis/policy:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go index 84c2071ae664..d4644ffaee31 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go @@ -19,6 +19,7 @@ package extensions import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/networking" "k8s.io/kubernetes/pkg/apis/policy" @@ -49,16 +50,16 @@ var ( func addKnownTypes(scheme *runtime.Scheme) error { // TODO this gets cleaned up when the types are fixed scheme.AddKnownTypes(SchemeGroupVersion, - &Deployment{}, - &DeploymentList{}, - &DeploymentRollback{}, + &apps.Deployment{}, + &apps.DeploymentList{}, + &apps.DeploymentRollback{}, &ReplicationControllerDummy{}, - &DaemonSetList{}, - &DaemonSet{}, + &apps.DaemonSetList{}, + &apps.DaemonSet{}, &Ingress{}, &IngressList{}, - &ReplicaSet{}, - &ReplicaSetList{}, + &apps.ReplicaSet{}, + &apps.ReplicaSetList{}, &policy.PodSecurityPolicy{}, &policy.PodSecurityPolicyList{}, &autoscaling.Scale{}, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go index 8dd49b466540..20637e74e460 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go @@ -29,7 +29,6 @@ support is experimental. package extensions import ( - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" api "k8s.io/kubernetes/pkg/apis/core" @@ -42,458 +41,6 @@ type ReplicationControllerDummy struct { metav1.TypeMeta } -// Alpha-level support for Custom Metrics in HPA (as annotations). -type CustomMetricTarget struct { - // Custom Metric name. - Name string - // Custom Metric value (average). - TargetValue resource.Quantity -} - -type CustomMetricTargetList struct { - Items []CustomMetricTarget -} - -type CustomMetricCurrentStatus struct { - // Custom Metric name. - Name string - // Custom Metric value (average). - CurrentValue resource.Quantity -} - -type CustomMetricCurrentStatusList struct { - Items []CustomMetricCurrentStatus -} - -// +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type Deployment struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Specification of the desired behavior of the Deployment. - // +optional - Spec DeploymentSpec - - // Most recently observed status of the Deployment. - // +optional - Status DeploymentStatus -} - -type DeploymentSpec struct { - // Number of desired pods. This is a pointer to distinguish between explicit - // zero and not specified. Defaults to 1. - // +optional - Replicas int32 - - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - // +optional - Selector *metav1.LabelSelector - - // Template describes the pods that will be created. - Template api.PodTemplateSpec - - // The deployment strategy to use to replace existing pods with new ones. - // +optional - Strategy DeploymentStrategy - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 - - // The number of old ReplicaSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // +optional - RevisionHistoryLimit *int32 - - // Indicates that the deployment is paused and will not be processed by the - // deployment controller. - // +optional - Paused bool - - // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. - // +optional - RollbackTo *RollbackConfig - - // The maximum time in seconds for a deployment to make progress before it - // is considered to be failed. The deployment controller will continue to - // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Note that progress will - // not be estimated during the time a deployment is paused. This is set to - // the max value of int32 (i.e. 2147483647) by default, which means "no deadline". - // +optional - ProgressDeadlineSeconds *int32 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED. -// DeploymentRollback stores the information required to rollback a deployment. -type DeploymentRollback struct { - metav1.TypeMeta - // Required: This must match the Name of a deployment. - Name string - // The annotations to be updated to a deployment - // +optional - UpdatedAnnotations map[string]string - // The config of this deployment rollback. - RollbackTo RollbackConfig -} - -// DEPRECATED. -type RollbackConfig struct { - // The revision to rollback to. If set to 0, rollback to the last revision. - // +optional - Revision int64 -} - -const ( - // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added - // to existing RCs (and label key that is added to its pods) to prevent the existing RCs - // to select new pods (and old pods being select by new RC). - DefaultDeploymentUniqueLabelKey string = "pod-template-hash" -) - -type DeploymentStrategy struct { - // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - // +optional - Type DeploymentStrategyType - - // Rolling update config params. Present only if DeploymentStrategyType = - // RollingUpdate. - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. - // +optional - RollingUpdate *RollingUpdateDeployment -} - -type DeploymentStrategyType string - -const ( - // Kill all existing pods before creating new ones. - RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" - - // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. - RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" -) - -// Spec to control the desired behavior of rolling update. -type RollingUpdateDeployment struct { - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). - // Absolute number is calculated from percentage by rounding down. - // This can not be 0 if MaxSurge is 0. - // By default, a fixed value of 1 is used. - // Example: when this is set to 30%, the old RC can be scaled down by 30% - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring - // that at least 70% of original number of pods are available at all times - // during the update. - // +optional - MaxUnavailable intstr.IntOrString - - // The maximum number of pods that can be scheduled above the original number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of total pods at - // the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // By default, a value of 1 is used. - // Example: when this is set to 30%, the new RC can be scaled up by 30% - // immediately when the rolling update starts. Once old pods have been killed, - // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of original pods. - // +optional - MaxSurge intstr.IntOrString -} - -type DeploymentStatus struct { - // The generation observed by the deployment controller. - // +optional - ObservedGeneration int64 - - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - // +optional - Replicas int32 - - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - // +optional - UpdatedReplicas int32 - - // Total number of ready pods targeted by this deployment. - // +optional - ReadyReplicas int32 - - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - // +optional - AvailableReplicas int32 - - // Total number of unavailable pods targeted by this deployment. This is the total number of - // pods that are still required for the deployment to have 100% available capacity. They may - // either be pods that are running but not yet available or pods that still have not been created. - // +optional - UnavailableReplicas int32 - - // Represents the latest available observations of a deployment's current state. - Conditions []DeploymentCondition - - // Count of hash collisions for the Deployment. The Deployment controller uses this - // field as a collision avoidance mechanism when it needs to create the name for the - // newest ReplicaSet. - // +optional - CollisionCount *int32 -} - -type DeploymentConditionType string - -// These are valid conditions of a deployment. -const ( - // Available means the deployment is available, ie. at least the minimum available - // replicas required are up and running for at least minReadySeconds. - DeploymentAvailable DeploymentConditionType = "Available" - // Progressing means the deployment is progressing. Progress for a deployment is - // considered when a new replica set is created or adopted, and when new pods scale - // up or old pods scale down. Progress is not estimated for paused deployments or - // when progressDeadlineSeconds is not specified. - DeploymentProgressing DeploymentConditionType = "Progressing" - // ReplicaFailure is added in a deployment when one of its pods fails to be created - // or deleted. - DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" -) - -// DeploymentCondition describes the state of a deployment at a certain point. -type DeploymentCondition struct { - // Type of deployment condition. - Type DeploymentConditionType - // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus - // The last time this condition was updated. - LastUpdateTime metav1.Time - // Last time the condition transitioned from one status to another. - LastTransitionTime metav1.Time - // The reason for the condition's last transition. - Reason string - // A human readable message indicating details about the transition. - Message string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type DeploymentList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is the list of deployments. - Items []Deployment -} - -type DaemonSetUpdateStrategy struct { - // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". - // Default is OnDelete. - // +optional - Type DaemonSetUpdateStrategyType - - // Rolling update config params. Present only if type = "RollingUpdate". - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. Same as Deployment `strategy.rollingUpdate`. - // See https://github.com/kubernetes/kubernetes/issues/35345 - // +optional - RollingUpdate *RollingUpdateDaemonSet -} - -type DaemonSetUpdateStrategyType string - -const ( - // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. - RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" - - // Replace the old daemons only when it's killed - OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" -) - -// Spec to control the desired behavior of daemon set rolling update. -type RollingUpdateDaemonSet struct { - // The maximum number of DaemonSet pods that can be unavailable during the - // update. Value can be an absolute number (ex: 5) or a percentage of total - // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding up. - // This cannot be 0. - // Default value is 1. - // Example: when this is set to 30%, at most 30% of the total number of nodes - // that should be running the daemon pod (i.e. status.desiredNumberScheduled) - // can have their pods stopped for an update at any given - // time. The update starts by stopping at most 30% of those DaemonSet pods - // and then brings up new DaemonSet pods in their place. Once the new pods - // are available, it then proceeds onto other DaemonSet pods, thus ensuring - // that at least 70% of original number of DaemonSet pods are available at - // all times during the update. - // +optional - MaxUnavailable intstr.IntOrString -} - -// DaemonSetSpec is the specification of a daemon set. -type DaemonSetSpec struct { - // A label query over pods that are managed by the daemon set. - // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector - - // An object that describes the pod that will be created. - // The DaemonSet will create exactly one copy of this pod on every node - // that matches the template's node selector (or on every node if no node - // selector is specified). - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - Template api.PodTemplateSpec - - // An update strategy to replace existing DaemonSet pods with new pods. - // +optional - UpdateStrategy DaemonSetUpdateStrategy - - // The minimum number of seconds for which a newly created DaemonSet pod should - // be ready without any of its container crashing, for it to be considered - // available. Defaults to 0 (pod will be considered available as soon as it - // is ready). - // +optional - MinReadySeconds int32 - - // DEPRECATED. - // A sequence number representing a specific generation of the template. - // Populated by the system. It can be set only during the creation. - // +optional - TemplateGeneration int64 - - // The number of old history to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 10. - // +optional - RevisionHistoryLimit *int32 -} - -// DaemonSetStatus represents the current status of a daemon set. -type DaemonSetStatus struct { - // The number of nodes that are running at least 1 - // daemon pod and are supposed to run the daemon pod. - CurrentNumberScheduled int32 - - // The number of nodes that are running the daemon pod, but are - // not supposed to run the daemon pod. - NumberMisscheduled int32 - - // The total number of nodes that should be running the daemon - // pod (including nodes correctly running the daemon pod). - DesiredNumberScheduled int32 - - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. - NumberReady int32 - - // The most recent generation observed by the daemon set controller. - // +optional - ObservedGeneration int64 - - // The total number of nodes that are running updated daemon pod - // +optional - UpdatedNumberScheduled int32 - - // The number of nodes that should be running the - // daemon pod and have one or more of the daemon pod running and - // available (ready for at least spec.minReadySeconds) - // +optional - NumberAvailable int32 - - // The number of nodes that should be running the - // daemon pod and have none of the daemon pod running and available - // (ready for at least spec.minReadySeconds) - // +optional - NumberUnavailable int32 - - // Count of hash collisions for the DaemonSet. The DaemonSet controller - // uses this field as a collision avoidance mechanism when it needs to - // create the name for the newest ControllerRevision. - // +optional - CollisionCount *int32 - - // Represents the latest available observations of a DaemonSet's current state. - Conditions []DaemonSetCondition -} - -type DaemonSetConditionType string - -// TODO: Add valid condition types of a DaemonSet. - -// DaemonSetCondition describes the state of a DaemonSet at a certain point. -type DaemonSetCondition struct { - // Type of DaemonSet condition. - Type DaemonSetConditionType - // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus - // Last time the condition transitioned from one status to another. - LastTransitionTime metav1.Time - // The reason for the condition's last transition. - Reason string - // A human readable message indicating details about the transition. - Message string -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DaemonSet represents the configuration of a daemon set. -type DaemonSet struct { - metav1.TypeMeta - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta - - // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec DaemonSetSpec - - // The current status of this daemon set. This data may be - // out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status DaemonSetStatus -} - -const ( - // DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead. - // DaemonSetTemplateGenerationKey is the key of the labels that is added - // to daemon set pods to distinguish between old and new pod templates - // during DaemonSet template update. - DaemonSetTemplateGenerationKey string = "pod-template-generation" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DaemonSetList is a collection of daemon sets. -type DaemonSetList struct { - metav1.TypeMeta - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta - - // A list of daemon sets. - Items []DaemonSet -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -662,114 +209,3 @@ type IngressBackend struct { // Specifies the port of the referenced service. ServicePort intstr.IntOrString } - -// +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicaSet ensures that a specified number of pod replicas are running at any given time. -type ReplicaSet struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired behavior of this ReplicaSet. - // +optional - Spec ReplicaSetSpec - - // Status is the current status of this ReplicaSet. This data may be - // out of date by some window of time. - // +optional - Status ReplicaSetStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicaSetList is a collection of ReplicaSets. -type ReplicaSetList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ReplicaSet -} - -// ReplicaSetSpec is the specification of a ReplicaSet. -// As the internal representation of a ReplicaSet, it must have -// a Template set. -type ReplicaSetSpec struct { - // Replicas is the number of desired replicas. - Replicas int32 - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 - - // Selector is a label query over pods that should match the replica count. - // Must match in order to be controlled. - // If empty, defaulted to labels on pod template. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. - // +optional - Template api.PodTemplateSpec -} - -// ReplicaSetStatus represents the current status of a ReplicaSet. -type ReplicaSetStatus struct { - // Replicas is the number of actual replicas. - Replicas int32 - - // The number of pods that have labels matching the labels of the pod template of the replicaset. - // +optional - FullyLabeledReplicas int32 - - // The number of ready replicas for this replica set. - // +optional - ReadyReplicas int32 - - // The number of available replicas (ready for at least minReadySeconds) for this replica set. - // +optional - AvailableReplicas int32 - - // ObservedGeneration is the most recent generation observed by the controller. - // +optional - ObservedGeneration int64 - - // Represents the latest available observations of a replica set's current state. - // +optional - Conditions []ReplicaSetCondition -} - -type ReplicaSetConditionType string - -// These are valid conditions of a replica set. -const ( - // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created - // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted - // due to kubelet being down or finalizers are failing. - ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" -) - -// ReplicaSetCondition describes the state of a replica set at a certain point. -type ReplicaSetCondition struct { - // Type of replica set condition. - Type ReplicaSetConditionType - // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus - // The last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time - // The reason for the condition's last transition. - // +optional - Reason string - // A human readable message indicating details about the transition. - // +optional - Message string -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD index af78736d3d0d..3f6c91a1966b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD @@ -18,6 +18,7 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", deps = [ + "//pkg/apis/apps:go_default_library", "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go index ce7b78ae20bf..80a83b9b289a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go @@ -27,10 +27,10 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" api "k8s.io/kubernetes/pkg/apis/core" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/networking" ) @@ -39,16 +39,16 @@ func addConversionFuncs(scheme *runtime.Scheme) error { err := scheme.AddConversionFuncs( Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus, Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus, - Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, - Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, - Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, - Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, - Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, - Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, - Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet, - Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, - Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, - Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, + Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec, + Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec, + Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy, + Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy, + Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, + Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment, + Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet, + Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet, + Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, + Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec, Convert_v1beta1_NetworkPolicy_To_networking_NetworkPolicy, Convert_networking_NetworkPolicy_To_v1beta1_NetworkPolicy, Convert_v1beta1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule, @@ -106,13 +106,13 @@ func Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(in *extensionsv1beta return nil } -func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *extensionsv1beta1.DeploymentSpec, s conversion.Scope) error { +func Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.DeploymentSpec, out *extensionsv1beta1.DeploymentSpec, s conversion.Scope) error { out.Replicas = &in.Replicas out.Selector = in.Selector if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } if in.RevisionHistoryLimit != nil { @@ -134,7 +134,7 @@ func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions. return nil } -func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *extensionsv1beta1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in *extensionsv1beta1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } @@ -142,14 +142,14 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *extensionsv if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.RevisionHistoryLimit = in.RevisionHistoryLimit out.MinReadySeconds = in.MinReadySeconds out.Paused = in.Paused if in.RollbackTo != nil { - out.RollbackTo = new(extensions.RollbackConfig) + out.RollbackTo = new(apps.RollbackConfig) out.RollbackTo.Revision = in.RollbackTo.Revision } else { out.RollbackTo = nil @@ -161,11 +161,11 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *extensionsv return nil } -func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *extensionsv1beta1.DeploymentStrategy, s conversion.Scope) error { +func Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps.DeploymentStrategy, out *extensionsv1beta1.DeploymentStrategy, s conversion.Scope) error { out.Type = extensionsv1beta1.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { out.RollingUpdate = new(extensionsv1beta1.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -174,11 +174,11 @@ func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *ext return nil } -func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *extensionsv1beta1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in *extensionsv1beta1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { - out.RollingUpdate = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + out.RollingUpdate = new(apps.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { return err } } else { @@ -187,7 +187,7 @@ func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *ext return nil } -func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *extensionsv1beta1.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *extensionsv1beta1.RollingUpdateDeployment, s conversion.Scope) error { if out.MaxUnavailable == nil { out.MaxUnavailable = &intstr.IntOrString{} } @@ -203,7 +203,7 @@ func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployme return nil } -func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *extensionsv1beta1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *extensionsv1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } @@ -213,7 +213,7 @@ func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployme return nil } -func Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *extensionsv1beta1.RollingUpdateDaemonSet, s conversion.Scope) error { +func Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *extensionsv1beta1.RollingUpdateDaemonSet, s conversion.Scope) error { if out.MaxUnavailable == nil { out.MaxUnavailable = &intstr.IntOrString{} } @@ -223,14 +223,14 @@ func Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet return nil } -func Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *extensionsv1beta1.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { +func Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *extensionsv1beta1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } return nil } -func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *extensionsv1beta1.ReplicaSetSpec, s conversion.Scope) error { +func Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *extensionsv1beta1.ReplicaSetSpec, s conversion.Scope) error { out.Replicas = new(int32) *out.Replicas = int32(in.Replicas) out.MinReadySeconds = in.MinReadySeconds @@ -241,7 +241,7 @@ func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions. return nil } -func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *extensionsv1beta1.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { +func Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *extensionsv1beta1.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go index 3138696a518e..4bb885567b7e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go @@ -118,6 +118,12 @@ func SetDefaults_Deployment(obj *extensionsv1beta1.Deployment) { obj.Spec.ProgressDeadlineSeconds = new(int32) *obj.Spec.ProgressDeadlineSeconds = math.MaxInt32 } + // Set extensionsv1beta1.DeploymentSpec.RevisionHistoryLimit to MaxInt32, + // which has the same meaning as unset. + if obj.Spec.RevisionHistoryLimit == nil { + obj.Spec.RevisionHistoryLimit = new(int32) + *obj.Spec.RevisionHistoryLimit = math.MaxInt32 + } } func SetDefaults_ReplicaSet(obj *extensionsv1beta1.ReplicaSet) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go index 5211124581ff..06307d5dec0b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/policy // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go index 1b35486b828d..3159bdc69e9a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go @@ -28,6 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" + apps "k8s.io/kubernetes/pkg/apis/apps" autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" core "k8s.io/kubernetes/pkg/apis/core" corev1 "k8s.io/kubernetes/pkg/apis/core/v1" @@ -63,173 +64,133 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.CustomMetricCurrentStatus)(nil), (*extensions.CustomMetricCurrentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(a.(*v1beta1.CustomMetricCurrentStatus), b.(*extensions.CustomMetricCurrentStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSet)(nil), (*apps.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DaemonSet_To_apps_DaemonSet(a.(*v1beta1.DaemonSet), b.(*apps.DaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.CustomMetricCurrentStatus)(nil), (*v1beta1.CustomMetricCurrentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(a.(*extensions.CustomMetricCurrentStatus), b.(*v1beta1.CustomMetricCurrentStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSet)(nil), (*v1beta1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSet_To_v1beta1_DaemonSet(a.(*apps.DaemonSet), b.(*v1beta1.DaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.CustomMetricCurrentStatusList)(nil), (*extensions.CustomMetricCurrentStatusList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(a.(*v1beta1.CustomMetricCurrentStatusList), b.(*extensions.CustomMetricCurrentStatusList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetCondition)(nil), (*apps.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DaemonSetCondition_To_apps_DaemonSetCondition(a.(*v1beta1.DaemonSetCondition), b.(*apps.DaemonSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.CustomMetricCurrentStatusList)(nil), (*v1beta1.CustomMetricCurrentStatusList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(a.(*extensions.CustomMetricCurrentStatusList), b.(*v1beta1.CustomMetricCurrentStatusList), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetCondition)(nil), (*v1beta1.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetCondition_To_v1beta1_DaemonSetCondition(a.(*apps.DaemonSetCondition), b.(*v1beta1.DaemonSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.CustomMetricTarget)(nil), (*extensions.CustomMetricTarget)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(a.(*v1beta1.CustomMetricTarget), b.(*extensions.CustomMetricTarget), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetList)(nil), (*apps.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DaemonSetList_To_apps_DaemonSetList(a.(*v1beta1.DaemonSetList), b.(*apps.DaemonSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.CustomMetricTarget)(nil), (*v1beta1.CustomMetricTarget)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(a.(*extensions.CustomMetricTarget), b.(*v1beta1.CustomMetricTarget), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetList)(nil), (*v1beta1.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetList_To_v1beta1_DaemonSetList(a.(*apps.DaemonSetList), b.(*v1beta1.DaemonSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.CustomMetricTargetList)(nil), (*extensions.CustomMetricTargetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(a.(*v1beta1.CustomMetricTargetList), b.(*extensions.CustomMetricTargetList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*v1beta1.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.CustomMetricTargetList)(nil), (*v1beta1.CustomMetricTargetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(a.(*extensions.CustomMetricTargetList), b.(*v1beta1.CustomMetricTargetList), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetSpec)(nil), (*v1beta1.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec(a.(*apps.DaemonSetSpec), b.(*v1beta1.DaemonSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSet)(nil), (*extensions.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(a.(*v1beta1.DaemonSet), b.(*extensions.DaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetStatus)(nil), (*apps.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus(a.(*v1beta1.DaemonSetStatus), b.(*apps.DaemonSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSet)(nil), (*v1beta1.DaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(a.(*extensions.DaemonSet), b.(*v1beta1.DaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetStatus)(nil), (*v1beta1.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus(a.(*apps.DaemonSetStatus), b.(*v1beta1.DaemonSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetCondition)(nil), (*extensions.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition(a.(*v1beta1.DaemonSetCondition), b.(*extensions.DaemonSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetUpdateStrategy)(nil), (*apps.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(a.(*v1beta1.DaemonSetUpdateStrategy), b.(*apps.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetCondition)(nil), (*v1beta1.DaemonSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition(a.(*extensions.DaemonSetCondition), b.(*v1beta1.DaemonSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.DaemonSetUpdateStrategy)(nil), (*v1beta1.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(a.(*apps.DaemonSetUpdateStrategy), b.(*v1beta1.DaemonSetUpdateStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetList)(nil), (*extensions.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(a.(*v1beta1.DaemonSetList), b.(*extensions.DaemonSetList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.Deployment)(nil), (*apps.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Deployment_To_apps_Deployment(a.(*v1beta1.Deployment), b.(*apps.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetList)(nil), (*v1beta1.DaemonSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(a.(*extensions.DaemonSetList), b.(*v1beta1.DaemonSetList), scope) + if err := s.AddGeneratedConversionFunc((*apps.Deployment)(nil), (*v1beta1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_Deployment_To_v1beta1_Deployment(a.(*apps.Deployment), b.(*v1beta1.Deployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetSpec)(nil), (*extensions.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(a.(*v1beta1.DaemonSetSpec), b.(*extensions.DaemonSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentCondition)(nil), (*apps.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(a.(*v1beta1.DeploymentCondition), b.(*apps.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetSpec)(nil), (*v1beta1.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(a.(*extensions.DaemonSetSpec), b.(*v1beta1.DaemonSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentCondition)(nil), (*v1beta1.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(a.(*apps.DeploymentCondition), b.(*v1beta1.DeploymentCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetStatus)(nil), (*extensions.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(a.(*v1beta1.DaemonSetStatus), b.(*extensions.DaemonSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentList)(nil), (*apps.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentList_To_apps_DeploymentList(a.(*v1beta1.DeploymentList), b.(*apps.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetStatus)(nil), (*v1beta1.DaemonSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(a.(*extensions.DaemonSetStatus), b.(*v1beta1.DaemonSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentList)(nil), (*v1beta1.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentList_To_v1beta1_DeploymentList(a.(*apps.DeploymentList), b.(*v1beta1.DeploymentList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DaemonSetUpdateStrategy)(nil), (*extensions.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(a.(*v1beta1.DaemonSetUpdateStrategy), b.(*extensions.DaemonSetUpdateStrategy), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentRollback)(nil), (*apps.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(a.(*v1beta1.DeploymentRollback), b.(*apps.DeploymentRollback), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DaemonSetUpdateStrategy)(nil), (*v1beta1.DaemonSetUpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(a.(*extensions.DaemonSetUpdateStrategy), b.(*v1beta1.DaemonSetUpdateStrategy), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentRollback)(nil), (*v1beta1.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(a.(*apps.DeploymentRollback), b.(*v1beta1.DeploymentRollback), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.Deployment)(nil), (*extensions.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_Deployment_To_extensions_Deployment(a.(*v1beta1.Deployment), b.(*extensions.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.Deployment)(nil), (*v1beta1.Deployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_Deployment_To_v1beta1_Deployment(a.(*extensions.Deployment), b.(*v1beta1.Deployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentCondition)(nil), (*extensions.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(a.(*v1beta1.DeploymentCondition), b.(*extensions.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStatus)(nil), (*apps.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(a.(*v1beta1.DeploymentStatus), b.(*apps.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentCondition)(nil), (*v1beta1.DeploymentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(a.(*extensions.DeploymentCondition), b.(*v1beta1.DeploymentCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStatus)(nil), (*v1beta1.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(a.(*apps.DeploymentStatus), b.(*v1beta1.DeploymentStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentList)(nil), (*extensions.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(a.(*v1beta1.DeploymentList), b.(*extensions.DeploymentList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentList)(nil), (*v1beta1.DeploymentList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(a.(*extensions.DeploymentList), b.(*v1beta1.DeploymentList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentRollback)(nil), (*extensions.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(a.(*v1beta1.DeploymentRollback), b.(*extensions.DeploymentRollback), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentRollback)(nil), (*v1beta1.DeploymentRollback)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(a.(*extensions.DeploymentRollback), b.(*v1beta1.DeploymentRollback), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStatus)(nil), (*extensions.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(a.(*v1beta1.DeploymentStatus), b.(*extensions.DeploymentStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStatus)(nil), (*v1beta1.DeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(a.(*extensions.DeploymentStatus), b.(*v1beta1.DeploymentStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) + if err := s.AddGeneratedConversionFunc((*apps.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) }); err != nil { return err } @@ -393,53 +354,53 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSet)(nil), (*extensions.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(a.(*v1beta1.ReplicaSet), b.(*extensions.ReplicaSet), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSet)(nil), (*apps.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ReplicaSet_To_apps_ReplicaSet(a.(*v1beta1.ReplicaSet), b.(*apps.ReplicaSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSet)(nil), (*v1beta1.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(a.(*extensions.ReplicaSet), b.(*v1beta1.ReplicaSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSet)(nil), (*v1beta1.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSet_To_v1beta1_ReplicaSet(a.(*apps.ReplicaSet), b.(*v1beta1.ReplicaSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetCondition)(nil), (*extensions.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(a.(*v1beta1.ReplicaSetCondition), b.(*extensions.ReplicaSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetCondition)(nil), (*apps.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ReplicaSetCondition_To_apps_ReplicaSetCondition(a.(*v1beta1.ReplicaSetCondition), b.(*apps.ReplicaSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetCondition)(nil), (*v1beta1.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(a.(*extensions.ReplicaSetCondition), b.(*v1beta1.ReplicaSetCondition), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetCondition)(nil), (*v1beta1.ReplicaSetCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(a.(*apps.ReplicaSetCondition), b.(*v1beta1.ReplicaSetCondition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetList)(nil), (*extensions.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(a.(*v1beta1.ReplicaSetList), b.(*extensions.ReplicaSetList), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetList)(nil), (*apps.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ReplicaSetList_To_apps_ReplicaSetList(a.(*v1beta1.ReplicaSetList), b.(*apps.ReplicaSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetList)(nil), (*v1beta1.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(a.(*extensions.ReplicaSetList), b.(*v1beta1.ReplicaSetList), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetList)(nil), (*v1beta1.ReplicaSetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetList_To_v1beta1_ReplicaSetList(a.(*apps.ReplicaSetList), b.(*v1beta1.ReplicaSetList), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetSpec)(nil), (*extensions.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(a.(*v1beta1.ReplicaSetSpec), b.(*extensions.ReplicaSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*v1beta1.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetSpec)(nil), (*v1beta1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(a.(*extensions.ReplicaSetSpec), b.(*v1beta1.ReplicaSetSpec), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1beta1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*v1beta1.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetStatus)(nil), (*extensions.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(a.(*v1beta1.ReplicaSetStatus), b.(*extensions.ReplicaSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.ReplicaSetStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(a.(*v1beta1.ReplicaSetStatus), b.(*apps.ReplicaSetStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.ReplicaSetStatus)(nil), (*v1beta1.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(a.(*extensions.ReplicaSetStatus), b.(*v1beta1.ReplicaSetStatus), scope) + if err := s.AddGeneratedConversionFunc((*apps.ReplicaSetStatus)(nil), (*v1beta1.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(a.(*apps.ReplicaSetStatus), b.(*v1beta1.ReplicaSetStatus), scope) }); err != nil { return err } @@ -453,33 +414,43 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.RollbackConfig)(nil), (*extensions.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(a.(*v1beta1.RollbackConfig), b.(*extensions.RollbackConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.RollbackConfig)(nil), (*apps.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(a.(*v1beta1.RollbackConfig), b.(*apps.RollbackConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apps.RollbackConfig)(nil), (*v1beta1.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(a.(*apps.RollbackConfig), b.(*v1beta1.RollbackConfig), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollbackConfig)(nil), (*v1beta1.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(a.(*extensions.RollbackConfig), b.(*v1beta1.RollbackConfig), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*v1beta1.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.RollingUpdateDaemonSet)(nil), (*extensions.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(a.(*v1beta1.RollingUpdateDaemonSet), b.(*extensions.RollingUpdateDaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*v1beta1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*v1beta1.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollingUpdateDaemonSet)(nil), (*v1beta1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(a.(*extensions.RollingUpdateDaemonSet), b.(*v1beta1.RollingUpdateDaemonSet), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.RunAsGroupStrategyOptions)(nil), (*policy.RunAsGroupStrategyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(a.(*v1beta1.RunAsGroupStrategyOptions), b.(*policy.RunAsGroupStrategyOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*policy.RunAsGroupStrategyOptions)(nil), (*v1beta1.RunAsGroupStrategyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(a.(*policy.RunAsGroupStrategyOptions), b.(*v1beta1.RunAsGroupStrategyOptions), scope) }); err != nil { return err } @@ -543,33 +514,33 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) + if err := s.AddConversionFunc((*apps.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*apps.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentSpec)(nil), (*v1beta1.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(a.(*extensions.DeploymentSpec), b.(*v1beta1.DeploymentSpec), scope) + if err := s.AddConversionFunc((*apps.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*apps.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.DeploymentStrategy)(nil), (*v1beta1.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(a.(*extensions.DeploymentStrategy), b.(*v1beta1.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1beta1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(a.(*apps.ReplicaSetSpec), b.(*v1beta1.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.ReplicaSetSpec)(nil), (*v1beta1.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(a.(*extensions.ReplicaSetSpec), b.(*v1beta1.ReplicaSetSpec), scope) + if err := s.AddConversionFunc((*apps.RollingUpdateDaemonSet)(nil), (*v1beta1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(a.(*apps.RollingUpdateDaemonSet), b.(*v1beta1.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.RollingUpdateDaemonSet)(nil), (*v1beta1.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(a.(*extensions.RollingUpdateDaemonSet), b.(*v1beta1.RollingUpdateDaemonSet), scope) + if err := s.AddConversionFunc((*apps.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*apps.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*extensions.RollingUpdateDeployment)(nil), (*v1beta1.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(a.(*extensions.RollingUpdateDeployment), b.(*v1beta1.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) }); err != nil { return err } @@ -613,13 +584,13 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.DeploymentSpec)(nil), (*extensions.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*extensions.DeploymentSpec), scope) + if err := s.AddConversionFunc((*v1beta1.DeploymentSpec)(nil), (*apps.DeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(a.(*v1beta1.DeploymentSpec), b.(*apps.DeploymentSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*extensions.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*extensions.DeploymentStrategy), scope) + if err := s.AddConversionFunc((*v1beta1.DeploymentStrategy)(nil), (*apps.DeploymentStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(a.(*v1beta1.DeploymentStrategy), b.(*apps.DeploymentStrategy), scope) }); err != nil { return err } @@ -663,18 +634,18 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.ReplicaSetSpec)(nil), (*extensions.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(a.(*v1beta1.ReplicaSetSpec), b.(*extensions.ReplicaSetSpec), scope) + if err := s.AddConversionFunc((*v1beta1.ReplicaSetSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(a.(*v1beta1.ReplicaSetSpec), b.(*apps.ReplicaSetSpec), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.RollingUpdateDaemonSet)(nil), (*extensions.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(a.(*v1beta1.RollingUpdateDaemonSet), b.(*extensions.RollingUpdateDaemonSet), scope) + if err := s.AddConversionFunc((*v1beta1.RollingUpdateDaemonSet)(nil), (*apps.RollingUpdateDaemonSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(a.(*v1beta1.RollingUpdateDaemonSet), b.(*apps.RollingUpdateDaemonSet), scope) }); err != nil { return err } - if err := s.AddConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*extensions.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*extensions.RollingUpdateDeployment), scope) + if err := s.AddConversionFunc((*v1beta1.RollingUpdateDeployment)(nil), (*apps.RollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(a.(*v1beta1.RollingUpdateDeployment), b.(*apps.RollingUpdateDeployment), scope) }); err != nil { return err } @@ -728,124 +699,40 @@ func Convert_policy_AllowedHostPath_To_v1beta1_AllowedHostPath(in *policy.Allowe return autoConvert_policy_AllowedHostPath_To_v1beta1_AllowedHostPath(in, out, s) } -func autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *v1beta1.CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { - out.Name = in.Name - out.CurrentValue = in.CurrentValue - return nil -} - -// Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus is an autogenerated conversion function. -func Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *v1beta1.CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in, out, s) -} - -func autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *v1beta1.CustomMetricCurrentStatus, s conversion.Scope) error { - out.Name = in.Name - out.CurrentValue = in.CurrentValue - return nil -} - -// Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus is an autogenerated conversion function. -func Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *v1beta1.CustomMetricCurrentStatus, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in, out, s) -} - -func autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *v1beta1.CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { - out.Items = *(*[]extensions.CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList is an autogenerated conversion function. -func Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *v1beta1.CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in, out, s) -} - -func autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *v1beta1.CustomMetricCurrentStatusList, s conversion.Scope) error { - out.Items = *(*[]v1beta1.CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList is an autogenerated conversion function. -func Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *v1beta1.CustomMetricCurrentStatusList, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in, out, s) -} - -func autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *v1beta1.CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { - out.Name = in.Name - out.TargetValue = in.TargetValue - return nil -} - -// Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget is an autogenerated conversion function. -func Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *v1beta1.CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in, out, s) -} - -func autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *v1beta1.CustomMetricTarget, s conversion.Scope) error { - out.Name = in.Name - out.TargetValue = in.TargetValue - return nil -} - -// Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget is an autogenerated conversion function. -func Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *v1beta1.CustomMetricTarget, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in, out, s) -} - -func autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *v1beta1.CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { - out.Items = *(*[]extensions.CustomMetricTarget)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList is an autogenerated conversion function. -func Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *v1beta1.CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in, out, s) -} - -func autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *v1beta1.CustomMetricTargetList, s conversion.Scope) error { - out.Items = *(*[]v1beta1.CustomMetricTarget)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList is an autogenerated conversion function. -func Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *v1beta1.CustomMetricTargetList, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in, out, s) -} - -func autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *v1beta1.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { +func autoConvert_v1beta1_DaemonSet_To_apps_DaemonSet(in *v1beta1.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1beta1_DaemonSet_To_extensions_DaemonSet is an autogenerated conversion function. -func Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *v1beta1.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in, out, s) +// Convert_v1beta1_DaemonSet_To_apps_DaemonSet is an autogenerated conversion function. +func Convert_v1beta1_DaemonSet_To_apps_DaemonSet(in *v1beta1.DaemonSet, out *apps.DaemonSet, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSet_To_apps_DaemonSet(in, out, s) } -func autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *v1beta1.DaemonSet, s conversion.Scope) error { +func autoConvert_apps_DaemonSet_To_v1beta1_DaemonSet(in *apps.DaemonSet, out *v1beta1.DaemonSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_extensions_DaemonSet_To_v1beta1_DaemonSet is an autogenerated conversion function. -func Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *v1beta1.DaemonSet, s conversion.Scope) error { - return autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in, out, s) +// Convert_apps_DaemonSet_To_v1beta1_DaemonSet is an autogenerated conversion function. +func Convert_apps_DaemonSet_To_v1beta1_DaemonSet(in *apps.DaemonSet, out *v1beta1.DaemonSet, s conversion.Scope) error { + return autoConvert_apps_DaemonSet_To_v1beta1_DaemonSet(in, out, s) } -func autoConvert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1beta1.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { - out.Type = extensions.DaemonSetConditionType(in.Type) +func autoConvert_v1beta1_DaemonSetCondition_To_apps_DaemonSetCondition(in *v1beta1.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error { + out.Type = apps.DaemonSetConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -853,12 +740,12 @@ func autoConvert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition(in return nil } -// Convert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition is an autogenerated conversion function. -func Convert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1beta1.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition(in, out, s) +// Convert_v1beta1_DaemonSetCondition_To_apps_DaemonSetCondition is an autogenerated conversion function. +func Convert_v1beta1_DaemonSetCondition_To_apps_DaemonSetCondition(in *v1beta1.DaemonSetCondition, out *apps.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetCondition_To_apps_DaemonSetCondition(in, out, s) } -func autoConvert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1beta1.DaemonSetCondition, s conversion.Scope) error { +func autoConvert_apps_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in *apps.DaemonSetCondition, out *v1beta1.DaemonSetCondition, s conversion.Scope) error { out.Type = v1beta1.DaemonSetConditionType(in.Type) out.Status = v1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime @@ -867,18 +754,18 @@ func autoConvert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in return nil } -// Convert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition is an autogenerated conversion function. -func Convert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1beta1.DaemonSetCondition, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in, out, s) +// Convert_apps_DaemonSetCondition_To_v1beta1_DaemonSetCondition is an autogenerated conversion function. +func Convert_apps_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in *apps.DaemonSetCondition, out *v1beta1.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_apps_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in, out, s) } -func autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *v1beta1.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { +func autoConvert_v1beta1_DaemonSetList_To_apps_DaemonSetList(in *v1beta1.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.DaemonSet, len(*in)) + *out = make([]apps.DaemonSet, len(*in)) for i := range *in { - if err := Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta1_DaemonSet_To_apps_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -888,18 +775,18 @@ func autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *v1beta1.D return nil } -// Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList is an autogenerated conversion function. -func Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *v1beta1.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in, out, s) +// Convert_v1beta1_DaemonSetList_To_apps_DaemonSetList is an autogenerated conversion function. +func Convert_v1beta1_DaemonSetList_To_apps_DaemonSetList(in *v1beta1.DaemonSetList, out *apps.DaemonSetList, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetList_To_apps_DaemonSetList(in, out, s) } -func autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *v1beta1.DaemonSetList, s conversion.Scope) error { +func autoConvert_apps_DaemonSetList_To_v1beta1_DaemonSetList(in *apps.DaemonSetList, out *v1beta1.DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta1.DaemonSet, len(*in)) for i := range *in { - if err := Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_DaemonSet_To_v1beta1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -909,17 +796,17 @@ func autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extension return nil } -// Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList is an autogenerated conversion function. -func Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *v1beta1.DaemonSetList, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in, out, s) +// Convert_apps_DaemonSetList_To_v1beta1_DaemonSetList is an autogenerated conversion function. +func Convert_apps_DaemonSetList_To_v1beta1_DaemonSetList(in *apps.DaemonSetList, out *v1beta1.DaemonSetList, s conversion.Scope) error { + return autoConvert_apps_DaemonSetList_To_v1beta1_DaemonSetList(in, out, s) } -func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta1.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { +func autoConvert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec(in *v1beta1.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error { out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -928,17 +815,17 @@ func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta1.D return nil } -// Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec is an autogenerated conversion function. -func Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta1.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in, out, s) +// Convert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec is an autogenerated conversion function. +func Convert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec(in *v1beta1.DaemonSetSpec, out *apps.DaemonSetSpec, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetSpec_To_apps_DaemonSetSpec(in, out, s) } -func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *v1beta1.DaemonSetSpec, s conversion.Scope) error { +func autoConvert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *apps.DaemonSetSpec, out *v1beta1.DaemonSetSpec, s conversion.Scope) error { out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + if err := Convert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -947,12 +834,12 @@ func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extension return nil } -// Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec is an autogenerated conversion function. -func Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *v1beta1.DaemonSetSpec, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in, out, s) +// Convert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec is an autogenerated conversion function. +func Convert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *apps.DaemonSetSpec, out *v1beta1.DaemonSetSpec, s conversion.Scope) error { + return autoConvert_apps_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in, out, s) } -func autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1beta1.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { +func autoConvert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus(in *v1beta1.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled @@ -962,16 +849,16 @@ func autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1bet out.NumberAvailable = in.NumberAvailable out.NumberUnavailable = in.NumberUnavailable out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) - out.Conditions = *(*[]extensions.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus is an autogenerated conversion function. -func Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1beta1.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in, out, s) +// Convert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus is an autogenerated conversion function. +func Convert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus(in *v1beta1.DaemonSetStatus, out *apps.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetStatus_To_apps_DaemonSetStatus(in, out, s) } -func autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1beta1.DaemonSetStatus, s conversion.Scope) error { +func autoConvert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *apps.DaemonSetStatus, out *v1beta1.DaemonSetStatus, s conversion.Scope) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled @@ -985,17 +872,17 @@ func autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *exten return nil } -// Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus is an autogenerated conversion function. -func Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1beta1.DaemonSetStatus, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in, out, s) +// Convert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus is an autogenerated conversion function. +func Convert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *apps.DaemonSetStatus, out *v1beta1.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_apps_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in, out, s) } -func autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *v1beta1.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { - out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) +func autoConvert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *v1beta1.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = apps.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDaemonSet) - if err := Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(*in, *out, s); err != nil { + *out = new(apps.RollingUpdateDaemonSet) + if err := Convert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(*in, *out, s); err != nil { return err } } else { @@ -1004,17 +891,17 @@ func autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateSt return nil } -// Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy is an autogenerated conversion function. -func Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *v1beta1.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in, out, s) +// Convert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy is an autogenerated conversion function. +func Convert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in *v1beta1.DaemonSetUpdateStrategy, out *apps.DaemonSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetUpdateStrategy_To_apps_DaemonSetUpdateStrategy(in, out, s) } -func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *v1beta1.DaemonSetUpdateStrategy, s conversion.Scope) error { +func autoConvert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *v1beta1.DaemonSetUpdateStrategy, s conversion.Scope) error { out.Type = v1beta1.DaemonSetUpdateStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(v1beta1.RollingUpdateDaemonSet) - if err := Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(*in, *out, s); err != nil { + if err := Convert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(*in, *out, s); err != nil { return err } } else { @@ -1023,45 +910,45 @@ func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateSt return nil } -// Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy is an autogenerated conversion function. -func Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *v1beta1.DaemonSetUpdateStrategy, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in, out, s) +// Convert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy is an autogenerated conversion function. +func Convert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *apps.DaemonSetUpdateStrategy, out *v1beta1.DaemonSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_apps_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in, out, s) } -func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *v1beta1.Deployment, out *extensions.Deployment, s conversion.Scope) error { +func autoConvert_v1beta1_Deployment_To_apps_Deployment(in *v1beta1.Deployment, out *apps.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1beta1_Deployment_To_extensions_Deployment is an autogenerated conversion function. -func Convert_v1beta1_Deployment_To_extensions_Deployment(in *v1beta1.Deployment, out *extensions.Deployment, s conversion.Scope) error { - return autoConvert_v1beta1_Deployment_To_extensions_Deployment(in, out, s) +// Convert_v1beta1_Deployment_To_apps_Deployment is an autogenerated conversion function. +func Convert_v1beta1_Deployment_To_apps_Deployment(in *v1beta1.Deployment, out *apps.Deployment, s conversion.Scope) error { + return autoConvert_v1beta1_Deployment_To_apps_Deployment(in, out, s) } -func autoConvert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { +func autoConvert_apps_Deployment_To_v1beta1_Deployment(in *apps.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_extensions_Deployment_To_v1beta1_Deployment is an autogenerated conversion function. -func Convert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { - return autoConvert_extensions_Deployment_To_v1beta1_Deployment(in, out, s) +// Convert_apps_Deployment_To_v1beta1_Deployment is an autogenerated conversion function. +func Convert_apps_Deployment_To_v1beta1_Deployment(in *apps.Deployment, out *v1beta1.Deployment, s conversion.Scope) error { + return autoConvert_apps_Deployment_To_v1beta1_Deployment(in, out, s) } -func autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - out.Type = extensions.DeploymentConditionType(in.Type) +func autoConvert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in *v1beta1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + out.Type = apps.DeploymentConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime @@ -1070,12 +957,12 @@ func autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(i return nil } -// Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition is an autogenerated conversion function. -func Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in, out, s) +// Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition is an autogenerated conversion function. +func Convert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in *v1beta1.DeploymentCondition, out *apps.DeploymentCondition, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentCondition_To_apps_DeploymentCondition(in, out, s) } -func autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { +func autoConvert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in *apps.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { out.Type = v1beta1.DeploymentConditionType(in.Type) out.Status = v1.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime @@ -1085,18 +972,18 @@ func autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(i return nil } -// Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition is an autogenerated conversion function. -func Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { - return autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in, out, s) +// Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition is an autogenerated conversion function. +func Convert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in *apps.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { + return autoConvert_apps_DeploymentCondition_To_v1beta1_DeploymentCondition(in, out, s) } -func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *v1beta1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentList_To_apps_DeploymentList(in *v1beta1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.Deployment, len(*in)) + *out = make([]apps.Deployment, len(*in)) for i := range *in { - if err := Convert_v1beta1_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta1_Deployment_To_apps_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1106,18 +993,18 @@ func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *v1beta1 return nil } -// Convert_v1beta1_DeploymentList_To_extensions_DeploymentList is an autogenerated conversion function. -func Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *v1beta1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in, out, s) +// Convert_v1beta1_DeploymentList_To_apps_DeploymentList is an autogenerated conversion function. +func Convert_v1beta1_DeploymentList_To_apps_DeploymentList(in *v1beta1.DeploymentList, out *apps.DeploymentList, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentList_To_apps_DeploymentList(in, out, s) } -func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { +func autoConvert_apps_DeploymentList_To_v1beta1_DeploymentList(in *apps.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta1.Deployment, len(*in)) for i := range *in { - if err := Convert_extensions_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1127,40 +1014,40 @@ func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensi return nil } -// Convert_extensions_DeploymentList_To_v1beta1_DeploymentList is an autogenerated conversion function. -func Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { - return autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in, out, s) +// Convert_apps_DeploymentList_To_v1beta1_DeploymentList is an autogenerated conversion function. +func Convert_apps_DeploymentList_To_v1beta1_DeploymentList(in *apps.DeploymentList, out *v1beta1.DeploymentList, s conversion.Scope) error { + return autoConvert_apps_DeploymentList_To_v1beta1_DeploymentList(in, out, s) } -func autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *v1beta1.DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in *v1beta1.DeploymentRollback, out *apps.DeploymentRollback, s conversion.Scope) error { out.Name = in.Name out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) - if err := Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { + if err := Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { return err } return nil } -// Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback is an autogenerated conversion function. -func Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *v1beta1.DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in, out, s) +// Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback is an autogenerated conversion function. +func Convert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in *v1beta1.DeploymentRollback, out *apps.DeploymentRollback, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentRollback_To_apps_DeploymentRollback(in, out, s) } -func autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { +func autoConvert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in *apps.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { out.Name = in.Name out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) - if err := Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { + if err := Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { return err } return nil } -// Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback is an autogenerated conversion function. -func Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { - return autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s) +// Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback is an autogenerated conversion function. +func Convert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in *apps.DeploymentRollback, out *v1beta1.DeploymentRollback, s conversion.Scope) error { + return autoConvert_apps_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s) } -func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentSpec_To_apps_DeploymentSpec(in *v1beta1.DeploymentSpec, out *apps.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1168,18 +1055,18 @@ func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta1 if err := corev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) out.Paused = in.Paused - out.RollbackTo = (*extensions.RollbackConfig)(unsafe.Pointer(in.RollbackTo)) + out.RollbackTo = (*apps.RollbackConfig)(unsafe.Pointer(in.RollbackTo)) out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds)) return nil } -func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *v1beta1.DeploymentSpec, s conversion.Scope) error { +func autoConvert_apps_DeploymentSpec_To_v1beta1_DeploymentSpec(in *apps.DeploymentSpec, out *v1beta1.DeploymentSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1187,7 +1074,7 @@ func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensi if err := corev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + if err := Convert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds @@ -1198,24 +1085,24 @@ func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensi return nil } -func autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { +func autoConvert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in *v1beta1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.UnavailableReplicas = in.UnavailableReplicas - out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) return nil } -// Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus is an autogenerated conversion function. -func Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1beta1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s) +// Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus is an autogenerated conversion function. +func Convert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in *v1beta1.DeploymentStatus, out *apps.DeploymentStatus, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentStatus_To_apps_DeploymentStatus(in, out, s) } -func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { +func autoConvert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in *apps.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas @@ -1227,17 +1114,17 @@ func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *ext return nil } -// Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus is an autogenerated conversion function. -func Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { - return autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s) +// Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus is an autogenerated conversion function. +func Convert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in *apps.DeploymentStatus, out *v1beta1.DeploymentStatus, s conversion.Scope) error { + return autoConvert_apps_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s) } -func autoConvert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *v1beta1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) +func autoConvert_v1beta1_DeploymentStrategy_To_apps_DeploymentStrategy(in *v1beta1.DeploymentStrategy, out *apps.DeploymentStrategy, s conversion.Scope) error { + out.Type = apps.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil { + *out = new(apps.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -1246,12 +1133,12 @@ func autoConvert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in return nil } -func autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *v1beta1.DeploymentStrategy, s conversion.Scope) error { +func autoConvert_apps_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *apps.DeploymentStrategy, out *v1beta1.DeploymentStrategy, s conversion.Scope) error { out.Type = v1beta1.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { in, out := &in.RollingUpdate, &out.RollingUpdate *out = new(v1beta1.RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil { + if err := Convert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil { return err } } else { @@ -1650,6 +1537,7 @@ func autoConvert_v1beta1_PodSecurityPolicySpec_To_policy_PodSecurityPolicySpec(i if err := Convert_v1beta1_RunAsUserStrategyOptions_To_policy_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { return err } + out.RunAsGroup = (*policy.RunAsGroupStrategyOptions)(unsafe.Pointer(in.RunAsGroup)) if err := Convert_v1beta1_SupplementalGroupsStrategyOptions_To_policy_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { return err } @@ -1690,6 +1578,7 @@ func autoConvert_policy_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(i if err := Convert_policy_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { return err } + out.RunAsGroup = (*v1beta1.RunAsGroupStrategyOptions)(unsafe.Pointer(in.RunAsGroup)) if err := Convert_policy_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { return err } @@ -1714,40 +1603,40 @@ func Convert_policy_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *p return autoConvert_policy_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in, out, s) } -func autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *v1beta1.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { +func autoConvert_v1beta1_ReplicaSet_To_apps_ReplicaSet(in *v1beta1.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet is an autogenerated conversion function. -func Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *v1beta1.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in, out, s) +// Convert_v1beta1_ReplicaSet_To_apps_ReplicaSet is an autogenerated conversion function. +func Convert_v1beta1_ReplicaSet_To_apps_ReplicaSet(in *v1beta1.ReplicaSet, out *apps.ReplicaSet, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSet_To_apps_ReplicaSet(in, out, s) } -func autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *v1beta1.ReplicaSet, s conversion.Scope) error { +func autoConvert_apps_ReplicaSet_To_v1beta1_ReplicaSet(in *apps.ReplicaSet, out *v1beta1.ReplicaSet, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet is an autogenerated conversion function. -func Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *v1beta1.ReplicaSet, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in, out, s) +// Convert_apps_ReplicaSet_To_v1beta1_ReplicaSet is an autogenerated conversion function. +func Convert_apps_ReplicaSet_To_v1beta1_ReplicaSet(in *apps.ReplicaSet, out *v1beta1.ReplicaSet, s conversion.Scope) error { + return autoConvert_apps_ReplicaSet_To_v1beta1_ReplicaSet(in, out, s) } -func autoConvert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1beta1.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { - out.Type = extensions.ReplicaSetConditionType(in.Type) +func autoConvert_v1beta1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *v1beta1.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error { + out.Type = apps.ReplicaSetConditionType(in.Type) out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -1755,12 +1644,12 @@ func autoConvert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(i return nil } -// Convert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition is an autogenerated conversion function. -func Convert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1beta1.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in, out, s) +// Convert_v1beta1_ReplicaSetCondition_To_apps_ReplicaSetCondition is an autogenerated conversion function. +func Convert_v1beta1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in *v1beta1.ReplicaSetCondition, out *apps.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSetCondition_To_apps_ReplicaSetCondition(in, out, s) } -func autoConvert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1beta1.ReplicaSetCondition, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *v1beta1.ReplicaSetCondition, s conversion.Scope) error { out.Type = v1beta1.ReplicaSetConditionType(in.Type) out.Status = v1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime @@ -1769,18 +1658,18 @@ func autoConvert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(i return nil } -// Convert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition is an autogenerated conversion function. -func Convert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1beta1.ReplicaSetCondition, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in, out, s) +// Convert_apps_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition is an autogenerated conversion function. +func Convert_apps_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *apps.ReplicaSetCondition, out *v1beta1.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in, out, s) } -func autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *v1beta1.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { +func autoConvert_v1beta1_ReplicaSetList_To_apps_ReplicaSetList(in *v1beta1.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]extensions.ReplicaSet, len(*in)) + *out = make([]apps.ReplicaSet, len(*in)) for i := range *in { - if err := Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_v1beta1_ReplicaSet_To_apps_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1790,18 +1679,18 @@ func autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *v1beta1 return nil } -// Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList is an autogenerated conversion function. -func Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *v1beta1.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in, out, s) +// Convert_v1beta1_ReplicaSetList_To_apps_ReplicaSetList is an autogenerated conversion function. +func Convert_v1beta1_ReplicaSetList_To_apps_ReplicaSetList(in *v1beta1.ReplicaSetList, out *apps.ReplicaSetList, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSetList_To_apps_ReplicaSetList(in, out, s) } -func autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *v1beta1.ReplicaSetList, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetList_To_v1beta1_ReplicaSetList(in *apps.ReplicaSetList, out *v1beta1.ReplicaSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]v1beta1.ReplicaSet, len(*in)) for i := range *in { - if err := Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + if err := Convert_apps_ReplicaSet_To_v1beta1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1811,12 +1700,12 @@ func autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensi return nil } -// Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList is an autogenerated conversion function. -func Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *v1beta1.ReplicaSetList, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in, out, s) +// Convert_apps_ReplicaSetList_To_v1beta1_ReplicaSetList is an autogenerated conversion function. +func Convert_apps_ReplicaSetList_To_v1beta1_ReplicaSetList(in *apps.ReplicaSetList, out *v1beta1.ReplicaSetList, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetList_To_v1beta1_ReplicaSetList(in, out, s) } -func autoConvert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1beta1.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { +func autoConvert_v1beta1_ReplicaSetSpec_To_apps_ReplicaSetSpec(in *v1beta1.ReplicaSetSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1828,7 +1717,7 @@ func autoConvert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1beta1 return nil } -func autoConvert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *v1beta1.ReplicaSetSpec, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *apps.ReplicaSetSpec, out *v1beta1.ReplicaSetSpec, s conversion.Scope) error { if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } @@ -1840,22 +1729,22 @@ func autoConvert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensi return nil } -func autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1beta1.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { +func autoConvert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *v1beta1.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas out.AvailableReplicas = in.AvailableReplicas out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]extensions.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]apps.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus is an autogenerated conversion function. -func Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1beta1.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in, out, s) +// Convert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus is an autogenerated conversion function. +func Convert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in *v1beta1.ReplicaSetStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSetStatus_To_apps_ReplicaSetStatus(in, out, s) } -func autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1beta1.ReplicaSetStatus, s conversion.Scope) error { +func autoConvert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *v1beta1.ReplicaSetStatus, s conversion.Scope) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ReadyReplicas = in.ReadyReplicas @@ -1865,9 +1754,9 @@ func autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *ext return nil } -// Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus is an autogenerated conversion function. -func Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1beta1.ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in, out, s) +// Convert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus is an autogenerated conversion function. +func Convert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *apps.ReplicaSetStatus, out *v1beta1.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_apps_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in, out, s) } func autoConvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in *v1beta1.ReplicationControllerDummy, out *extensions.ReplicationControllerDummy, s conversion.Scope) error { @@ -1888,48 +1777,70 @@ func Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControl return autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in, out, s) } -func autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *v1beta1.RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { +func autoConvert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in *v1beta1.RollbackConfig, out *apps.RollbackConfig, s conversion.Scope) error { out.Revision = in.Revision return nil } -// Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig is an autogenerated conversion function. -func Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *v1beta1.RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { - return autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in, out, s) +// Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig is an autogenerated conversion function. +func Convert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in *v1beta1.RollbackConfig, out *apps.RollbackConfig, s conversion.Scope) error { + return autoConvert_v1beta1_RollbackConfig_To_apps_RollbackConfig(in, out, s) } -func autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { +func autoConvert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in *apps.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { out.Revision = in.Revision return nil } -// Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig is an autogenerated conversion function. -func Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { - return autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) +// Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig is an autogenerated conversion function. +func Convert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in *apps.RollbackConfig, out *v1beta1.RollbackConfig, s conversion.Scope) error { + return autoConvert_apps_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) } -func autoConvert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *v1beta1.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { +func autoConvert_v1beta1_RollingUpdateDaemonSet_To_apps_RollingUpdateDaemonSet(in *v1beta1.RollingUpdateDaemonSet, out *apps.RollingUpdateDaemonSet, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *v1beta1.RollingUpdateDaemonSet, s conversion.Scope) error { +func autoConvert_apps_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *apps.RollingUpdateDaemonSet, out *v1beta1.RollingUpdateDaemonSet, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *v1beta1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_v1beta1_RollingUpdateDeployment_To_apps_RollingUpdateDeployment(in *v1beta1.RollingUpdateDeployment, out *apps.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } -func autoConvert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *v1beta1.RollingUpdateDeployment, s conversion.Scope) error { +func autoConvert_apps_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *apps.RollingUpdateDeployment, out *v1beta1.RollingUpdateDeployment, s conversion.Scope) error { // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) return nil } +func autoConvert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(in *v1beta1.RunAsGroupStrategyOptions, out *policy.RunAsGroupStrategyOptions, s conversion.Scope) error { + out.Rule = policy.RunAsGroupStrategy(in.Rule) + out.Ranges = *(*[]policy.IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +// Convert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions is an autogenerated conversion function. +func Convert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(in *v1beta1.RunAsGroupStrategyOptions, out *policy.RunAsGroupStrategyOptions, s conversion.Scope) error { + return autoConvert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(in, out, s) +} + +func autoConvert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(in *policy.RunAsGroupStrategyOptions, out *v1beta1.RunAsGroupStrategyOptions, s conversion.Scope) error { + out.Rule = v1beta1.RunAsGroupStrategy(in.Rule) + out.Ranges = *(*[]v1beta1.IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +// Convert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions is an autogenerated conversion function. +func Convert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(in *policy.RunAsGroupStrategyOptions, out *v1beta1.RunAsGroupStrategyOptions, s conversion.Scope) error { + return autoConvert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(in, out, s) +} + func autoConvert_v1beta1_RunAsUserStrategyOptions_To_policy_RunAsUserStrategyOptions(in *v1beta1.RunAsUserStrategyOptions, out *policy.RunAsUserStrategyOptions, s conversion.Scope) error { out.Rule = policy.RunAsUserStrategy(in.Rule) out.Ranges = *(*[]policy.IDRange)(unsafe.Pointer(&in.Ranges)) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/BUILD deleted file mode 100644 index 7e5cb10616b2..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/BUILD +++ /dev/null @@ -1,53 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["validation.go"], - importpath = "k8s.io/kubernetes/pkg/apis/extensions/validation", - deps = [ - "//pkg/apis/core:go_default_library", - "//pkg/apis/core/validation:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/validation:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["validation_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/apis/core:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go deleted file mode 100644 index 36c191c17fb6..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go +++ /dev/null @@ -1,609 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "net" - "regexp" - "strconv" - "strings" - - apiequality "k8s.io/apimachinery/pkg/api/equality" - apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/apimachinery/pkg/util/validation/field" - api "k8s.io/kubernetes/pkg/apis/core" - apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" - "k8s.io/kubernetes/pkg/apis/extensions" -) - -// ValidateDaemonSet tests if required fields in the DaemonSet are set. -func ValidateDaemonSet(ds *extensions.DaemonSet) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&ds.ObjectMeta, true, ValidateDaemonSetName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateDaemonSetUpdate tests if required fields in the DaemonSet are set. -func ValidateDaemonSetUpdate(ds, oldDS *extensions.DaemonSet) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDaemonSetSpecUpdate(&ds.Spec, &oldDS.Spec, field.NewPath("spec"))...) - allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...) - return allErrs -} - -func ValidateDaemonSetSpecUpdate(newSpec, oldSpec *extensions.DaemonSetSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - // TemplateGeneration shouldn't be decremented - if newSpec.TemplateGeneration < oldSpec.TemplateGeneration { - allErrs = append(allErrs, field.Invalid(fldPath.Child("templateGeneration"), newSpec.TemplateGeneration, "must not be decremented")) - } - - // TemplateGeneration should be increased when and only when template is changed - templateUpdated := !apiequality.Semantic.DeepEqual(newSpec.Template, oldSpec.Template) - if newSpec.TemplateGeneration == oldSpec.TemplateGeneration && templateUpdated { - allErrs = append(allErrs, field.Invalid(fldPath.Child("templateGeneration"), newSpec.TemplateGeneration, "must be incremented upon template update")) - } else if newSpec.TemplateGeneration > oldSpec.TemplateGeneration && !templateUpdated { - allErrs = append(allErrs, field.Invalid(fldPath.Child("templateGeneration"), newSpec.TemplateGeneration, "must not be incremented without template update")) - } - - return allErrs -} - -// validateDaemonSetStatus validates a DaemonSetStatus -func validateDaemonSetStatus(status *extensions.DaemonSetStatus, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentNumberScheduled), fldPath.Child("currentNumberScheduled"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberMisscheduled), fldPath.Child("numberMisscheduled"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredNumberScheduled), fldPath.Child("desiredNumberScheduled"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberReady), fldPath.Child("numberReady"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedNumberScheduled), fldPath.Child("updatedNumberScheduled"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberAvailable), fldPath.Child("numberAvailable"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberUnavailable), fldPath.Child("numberUnavailable"))...) - if status.CollisionCount != nil { - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.CollisionCount), fldPath.Child("collisionCount"))...) - } - return allErrs -} - -// ValidateDaemonSetStatus validates tests if required fields in the DaemonSet Status section -func ValidateDaemonSetStatusUpdate(ds, oldDS *extensions.DaemonSet) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, validateDaemonSetStatus(&ds.Status, field.NewPath("status"))...) - if apivalidation.IsDecremented(ds.Status.CollisionCount, oldDS.Status.CollisionCount) { - value := int32(0) - if ds.Status.CollisionCount != nil { - value = *ds.Status.CollisionCount - } - allErrs = append(allErrs, field.Invalid(field.NewPath("status").Child("collisionCount"), value, "cannot be decremented")) - } - return allErrs -} - -// ValidateDaemonSetSpec tests if required fields in the DaemonSetSpec are set. -func ValidateDaemonSetSpec(spec *extensions.DaemonSetSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) - - selector, err := metav1.LabelSelectorAsSelector(spec.Selector) - if err == nil && !selector.Matches(labels.Set(spec.Template.Labels)) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "metadata", "labels"), spec.Template.Labels, "`selector` does not match template `labels`")) - } - if spec.Selector != nil && len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for daemonset")) - } - - allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(&spec.Template, fldPath.Child("template"))...) - // Daemons typically run on more than one node, so mark Read-Write persistent disks as invalid. - allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(spec.Template.Spec.Volumes, fldPath.Child("template", "spec", "volumes"))...) - // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). - if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) - } - if spec.Template.Spec.ActiveDeadlineSeconds != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "spec", "activeDeadlineSeconds"), spec.Template.Spec.ActiveDeadlineSeconds, "must not be specified")) - } - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.TemplateGeneration), fldPath.Child("templateGeneration"))...) - - allErrs = append(allErrs, ValidateDaemonSetUpdateStrategy(&spec.UpdateStrategy, fldPath.Child("updateStrategy"))...) - if spec.RevisionHistoryLimit != nil { - // zero is a valid RevisionHistoryLimit - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.RevisionHistoryLimit), fldPath.Child("revisionHistoryLimit"))...) - } - return allErrs -} - -func ValidateRollingUpdateDaemonSet(rollingUpdate *extensions.RollingUpdateDaemonSet, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) - if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 { - // MaxUnavailable cannot be 0. - allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "cannot be 0")) - } - // Validate that MaxUnavailable is not more than 100%. - allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) - return allErrs -} - -func ValidateDaemonSetUpdateStrategy(strategy *extensions.DaemonSetUpdateStrategy, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - switch strategy.Type { - case extensions.OnDeleteDaemonSetStrategyType: - case extensions.RollingUpdateDaemonSetStrategyType: - // Make sure RollingUpdate field isn't nil. - if strategy.RollingUpdate == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("rollingUpdate"), "")) - return allErrs - } - allErrs = append(allErrs, ValidateRollingUpdateDaemonSet(strategy.RollingUpdate, fldPath.Child("rollingUpdate"))...) - default: - validValues := []string{string(extensions.RollingUpdateDaemonSetStrategyType), string(extensions.OnDeleteDaemonSetStrategyType)} - allErrs = append(allErrs, field.NotSupported(fldPath, strategy, validValues)) - } - return allErrs -} - -// ValidateDaemonSetName can be used to check whether the given daemon set name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateDaemonSetName = apimachineryvalidation.NameIsDNSSubdomain - -// Validates that the given name can be used as a deployment name. -var ValidateDeploymentName = apimachineryvalidation.NameIsDNSSubdomain - -func ValidatePositiveIntOrPercent(intOrPercent intstr.IntOrString, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - switch intOrPercent.Type { - case intstr.String: - for _, msg := range validation.IsValidPercent(intOrPercent.StrVal) { - allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, msg)) - } - case intstr.Int: - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(intOrPercent.IntValue()), fldPath)...) - default: - allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, "must be an integer or percentage (e.g '5%%')")) - } - return allErrs -} - -func getPercentValue(intOrStringValue intstr.IntOrString) (int, bool) { - if intOrStringValue.Type != intstr.String { - return 0, false - } - if len(validation.IsValidPercent(intOrStringValue.StrVal)) != 0 { - return 0, false - } - value, _ := strconv.Atoi(intOrStringValue.StrVal[:len(intOrStringValue.StrVal)-1]) - return value, true -} - -func getIntOrPercentValue(intOrStringValue intstr.IntOrString) int { - value, isPercent := getPercentValue(intOrStringValue) - if isPercent { - return value - } - return intOrStringValue.IntValue() -} - -func IsNotMoreThan100Percent(intOrStringValue intstr.IntOrString, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - value, isPercent := getPercentValue(intOrStringValue) - if !isPercent || value <= 100 { - return nil - } - allErrs = append(allErrs, field.Invalid(fldPath, intOrStringValue, "must not be greater than 100%")) - return allErrs -} - -func ValidateRollingUpdateDeployment(rollingUpdate *extensions.RollingUpdateDeployment, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) - allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...) - if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 && getIntOrPercentValue(rollingUpdate.MaxSurge) == 0 { - // Both MaxSurge and MaxUnavailable cannot be zero. - allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "may not be 0 when `maxSurge` is 0")) - } - // Validate that MaxUnavailable is not more than 100%. - allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) - return allErrs -} - -func ValidateDeploymentStrategy(strategy *extensions.DeploymentStrategy, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - switch strategy.Type { - case extensions.RecreateDeploymentStrategyType: - if strategy.RollingUpdate != nil { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("rollingUpdate"), "may not be specified when strategy `type` is '"+string(extensions.RecreateDeploymentStrategyType+"'"))) - } - case extensions.RollingUpdateDeploymentStrategyType: - // This should never happen since it's set and checked in defaults.go - if strategy.RollingUpdate == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("rollingUpdate"), "this should be defaulted and never be nil")) - } else { - allErrs = append(allErrs, ValidateRollingUpdateDeployment(strategy.RollingUpdate, fldPath.Child("rollingUpdate"))...) - } - default: - validValues := []string{string(extensions.RecreateDeploymentStrategyType), string(extensions.RollingUpdateDeploymentStrategyType)} - allErrs = append(allErrs, field.NotSupported(fldPath, strategy, validValues)) - } - return allErrs -} - -func ValidateRollback(rollback *extensions.RollbackConfig, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - v := rollback.Revision - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(v), fldPath.Child("version"))...) - return allErrs -} - -// Validates given deployment spec. -func ValidateDeploymentSpec(spec *extensions.DeploymentSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) - - if spec.Selector == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("selector"), "")) - } else { - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) - if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for deployment")) - } - } - - selector, err := metav1.LabelSelectorAsSelector(spec.Selector) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector")) - } else { - allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...) - } - - allErrs = append(allErrs, ValidateDeploymentStrategy(&spec.Strategy, fldPath.Child("strategy"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) - if spec.RevisionHistoryLimit != nil { - // zero is a valid RevisionHistoryLimit - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.RevisionHistoryLimit), fldPath.Child("revisionHistoryLimit"))...) - } - if spec.RollbackTo != nil { - allErrs = append(allErrs, ValidateRollback(spec.RollbackTo, fldPath.Child("rollback"))...) - } - if spec.ProgressDeadlineSeconds != nil { - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.ProgressDeadlineSeconds), fldPath.Child("progressDeadlineSeconds"))...) - if *spec.ProgressDeadlineSeconds <= spec.MinReadySeconds { - allErrs = append(allErrs, field.Invalid(fldPath.Child("progressDeadlineSeconds"), spec.ProgressDeadlineSeconds, "must be greater than minReadySeconds")) - } - } - return allErrs -} - -// Validates given deployment status. -func ValidateDeploymentStatus(status *extensions.DeploymentStatus, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fldPath.Child("replicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedReplicas), fldPath.Child("updatedReplicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ReadyReplicas), fldPath.Child("readyReplicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.AvailableReplicas), fldPath.Child("availableReplicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UnavailableReplicas), fldPath.Child("unavailableReplicas"))...) - if status.CollisionCount != nil { - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.CollisionCount), fldPath.Child("collisionCount"))...) - } - msg := "cannot be greater than status.replicas" - if status.UpdatedReplicas > status.Replicas { - allErrs = append(allErrs, field.Invalid(fldPath.Child("updatedReplicas"), status.UpdatedReplicas, msg)) - } - if status.ReadyReplicas > status.Replicas { - allErrs = append(allErrs, field.Invalid(fldPath.Child("readyReplicas"), status.ReadyReplicas, msg)) - } - if status.AvailableReplicas > status.Replicas { - allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, msg)) - } - if status.AvailableReplicas > status.ReadyReplicas { - allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas")) - } - return allErrs -} - -func ValidateDeploymentUpdate(update, old *extensions.Deployment) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDeploymentSpec(&update.Spec, field.NewPath("spec"))...) - return allErrs -} - -func ValidateDeploymentStatusUpdate(update, old *extensions.Deployment) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) - fldPath := field.NewPath("status") - allErrs = append(allErrs, ValidateDeploymentStatus(&update.Status, fldPath)...) - if apivalidation.IsDecremented(update.Status.CollisionCount, old.Status.CollisionCount) { - value := int32(0) - if update.Status.CollisionCount != nil { - value = *update.Status.CollisionCount - } - allErrs = append(allErrs, field.Invalid(fldPath.Child("collisionCount"), value, "cannot be decremented")) - } - return allErrs -} - -func ValidateDeployment(obj *extensions.Deployment) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, ValidateDeploymentName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDeploymentSpec(&obj.Spec, field.NewPath("spec"))...) - return allErrs -} - -func ValidateDeploymentRollback(obj *extensions.DeploymentRollback) field.ErrorList { - allErrs := apivalidation.ValidateAnnotations(obj.UpdatedAnnotations, field.NewPath("updatedAnnotations")) - if len(obj.Name) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("name"), "name is required")) - } - allErrs = append(allErrs, ValidateRollback(&obj.RollbackTo, field.NewPath("rollback"))...) - return allErrs -} - -// ValidateIngress tests if required fields in the Ingress are set. -func ValidateIngress(ingress *extensions.Ingress) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&ingress.ObjectMeta, true, ValidateIngressName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateIngressName validates that the given name can be used as an Ingress name. -var ValidateIngressName = apimachineryvalidation.NameIsDNSSubdomain - -func validateIngressTLS(spec *extensions.IngressSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - // TODO: Perform a more thorough validation of spec.TLS.Hosts that takes - // the wildcard spec from RFC 6125 into account. - for _, itls := range spec.TLS { - for i, host := range itls.Hosts { - if strings.Contains(host, "*") { - for _, msg := range validation.IsWildcardDNS1123Subdomain(host) { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("hosts"), host, msg)) - } - continue - } - for _, msg := range validation.IsDNS1123Subdomain(host) { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("hosts"), host, msg)) - } - } - } - - return allErrs -} - -// ValidateIngressSpec tests if required fields in the IngressSpec are set. -func ValidateIngressSpec(spec *extensions.IngressSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - // TODO: Is a default backend mandatory? - if spec.Backend != nil { - allErrs = append(allErrs, validateIngressBackend(spec.Backend, fldPath.Child("backend"))...) - } else if len(spec.Rules) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath, spec.Rules, "either `backend` or `rules` must be specified")) - } - if len(spec.Rules) > 0 { - allErrs = append(allErrs, validateIngressRules(spec.Rules, fldPath.Child("rules"))...) - } - if len(spec.TLS) > 0 { - allErrs = append(allErrs, validateIngressTLS(spec, fldPath.Child("tls"))...) - } - return allErrs -} - -// ValidateIngressUpdate tests if required fields in the Ingress are set. -func ValidateIngressUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateIngressStatusUpdate tests if required fields in the Ingress are set when updating status. -func ValidateIngressStatusUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, apivalidation.ValidateLoadBalancerStatus(&ingress.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...) - return allErrs -} - -func validateIngressRules(ingressRules []extensions.IngressRule, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(ingressRules) == 0 { - return append(allErrs, field.Required(fldPath, "")) - } - for i, ih := range ingressRules { - if len(ih.Host) > 0 { - if isIP := (net.ParseIP(ih.Host) != nil); isIP { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, "must be a DNS name, not an IP address")) - } - // TODO: Ports and ips are allowed in the host part of a url - // according to RFC 3986, consider allowing them. - if strings.Contains(ih.Host, "*") { - for _, msg := range validation.IsWildcardDNS1123Subdomain(ih.Host) { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, msg)) - } - continue - } - for _, msg := range validation.IsDNS1123Subdomain(ih.Host) { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, msg)) - } - } - allErrs = append(allErrs, validateIngressRuleValue(&ih.IngressRuleValue, fldPath.Index(0))...) - } - return allErrs -} - -func validateIngressRuleValue(ingressRule *extensions.IngressRuleValue, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if ingressRule.HTTP != nil { - allErrs = append(allErrs, validateHTTPIngressRuleValue(ingressRule.HTTP, fldPath.Child("http"))...) - } - return allErrs -} - -func validateHTTPIngressRuleValue(httpIngressRuleValue *extensions.HTTPIngressRuleValue, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(httpIngressRuleValue.Paths) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("paths"), "")) - } - for i, rule := range httpIngressRuleValue.Paths { - if len(rule.Path) > 0 { - if !strings.HasPrefix(rule.Path, "/") { - allErrs = append(allErrs, field.Invalid(fldPath.Child("paths").Index(i).Child("path"), rule.Path, "must be an absolute path")) - } - // TODO: More draconian path regex validation. - // Path must be a valid regex. This is the basic requirement. - // In addition to this any characters not allowed in a path per - // RFC 3986 section-3.3 cannot appear as a literal in the regex. - // Consider the example: http://host/valid?#bar, everything after - // the last '/' is a valid regex that matches valid#bar, which - // isn't a valid path, because the path terminates at the first ? - // or #. A more sophisticated form of validation would detect that - // the user is confusing url regexes with path regexes. - _, err := regexp.CompilePOSIX(rule.Path) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("paths").Index(i).Child("path"), rule.Path, "must be a valid regex")) - } - } - allErrs = append(allErrs, validateIngressBackend(&rule.Backend, fldPath.Child("backend"))...) - } - return allErrs -} - -// validateIngressBackend tests if a given backend is valid. -func validateIngressBackend(backend *extensions.IngressBackend, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - // All backends must reference a single local service by name, and a single service port by name or number. - if len(backend.ServiceName) == 0 { - return append(allErrs, field.Required(fldPath.Child("serviceName"), "")) - } else { - for _, msg := range apivalidation.ValidateServiceName(backend.ServiceName, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceName"), backend.ServiceName, msg)) - } - } - allErrs = append(allErrs, apivalidation.ValidatePortNumOrName(backend.ServicePort, fldPath.Child("servicePort"))...) - return allErrs -} - -// ValidateReplicaSetName can be used to check whether the given ReplicaSet -// name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateReplicaSetName = apimachineryvalidation.NameIsDNSSubdomain - -// ValidateReplicaSet tests if required fields in the ReplicaSet are set. -func ValidateReplicaSet(rs *extensions.ReplicaSet) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&rs.ObjectMeta, true, ValidateReplicaSetName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateReplicaSetUpdate tests if required fields in the ReplicaSet are set. -func ValidateReplicaSetUpdate(rs, oldRs *extensions.ReplicaSet) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateReplicaSetStatusUpdate tests if required fields in the ReplicaSet are set. -func ValidateReplicaSetStatusUpdate(rs, oldRs *extensions.ReplicaSet) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidateReplicaSetStatus(rs.Status, field.NewPath("status"))...) - return allErrs -} - -func ValidateReplicaSetStatus(status extensions.ReplicaSetStatus, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fldPath.Child("replicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.FullyLabeledReplicas), fldPath.Child("fullyLabeledReplicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ReadyReplicas), fldPath.Child("readyReplicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.AvailableReplicas), fldPath.Child("availableReplicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ObservedGeneration), fldPath.Child("observedGeneration"))...) - msg := "cannot be greater than status.replicas" - if status.FullyLabeledReplicas > status.Replicas { - allErrs = append(allErrs, field.Invalid(fldPath.Child("fullyLabeledReplicas"), status.FullyLabeledReplicas, msg)) - } - if status.ReadyReplicas > status.Replicas { - allErrs = append(allErrs, field.Invalid(fldPath.Child("readyReplicas"), status.ReadyReplicas, msg)) - } - if status.AvailableReplicas > status.Replicas { - allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, msg)) - } - if status.AvailableReplicas > status.ReadyReplicas { - allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas")) - } - return allErrs -} - -// ValidateReplicaSetSpec tests if required fields in the ReplicaSet spec are set. -func ValidateReplicaSetSpec(spec *extensions.ReplicaSetSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) - - if spec.Selector == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("selector"), "")) - } else { - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) - if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for deployment")) - } - } - - selector, err := metav1.LabelSelectorAsSelector(spec.Selector) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector")) - } else { - allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...) - } - return allErrs -} - -// Validates the given template and ensures that it is in accordance with the desired selector and replicas. -func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int32, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if template == nil { - allErrs = append(allErrs, field.Required(fldPath, "")) - } else { - if !selector.Empty() { - // Verify that the ReplicaSet selector matches the labels in template. - labels := labels.Set(template.Labels) - if !selector.Matches(labels) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`")) - } - } - allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath)...) - if replicas > 1 { - allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...) - } - // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). - if template.Spec.RestartPolicy != api.RestartPolicyAlways { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) - } - if template.Spec.ActiveDeadlineSeconds != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("spec", "activeDeadlineSeconds"), template.Spec.ActiveDeadlineSeconds, "must not be specified")) - } - } - return allErrs -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go index befc3a71d63f..390f43280798 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go @@ -21,444 +21,9 @@ limitations under the License. package extensions import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricCurrentStatus) DeepCopyInto(out *CustomMetricCurrentStatus) { - *out = *in - out.CurrentValue = in.CurrentValue.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatus. -func (in *CustomMetricCurrentStatus) DeepCopy() *CustomMetricCurrentStatus { - if in == nil { - return nil - } - out := new(CustomMetricCurrentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricCurrentStatusList) DeepCopyInto(out *CustomMetricCurrentStatusList) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricCurrentStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatusList. -func (in *CustomMetricCurrentStatusList) DeepCopy() *CustomMetricCurrentStatusList { - if in == nil { - return nil - } - out := new(CustomMetricCurrentStatusList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricTarget) DeepCopyInto(out *CustomMetricTarget) { - *out = *in - out.TargetValue = in.TargetValue.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTarget. -func (in *CustomMetricTarget) DeepCopy() *CustomMetricTarget { - if in == nil { - return nil - } - out := new(CustomMetricTarget) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricTargetList) DeepCopyInto(out *CustomMetricTargetList) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricTarget, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTargetList. -func (in *CustomMetricTargetList) DeepCopy() *CustomMetricTargetList { - if in == nil { - return nil - } - out := new(CustomMetricTargetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet. -func (in *DaemonSet) DeepCopy() *DaemonSet { - if in == nil { - return nil - } - out := new(DaemonSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DaemonSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. -func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { - if in == nil { - return nil - } - out := new(DaemonSetCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DaemonSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList. -func (in *DaemonSetList) DeepCopy() *DaemonSetList { - if in == nil { - return nil - } - out := new(DaemonSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DaemonSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec. -func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec { - if in == nil { - return nil - } - out := new(DaemonSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { - *out = *in - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]DaemonSetCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus. -func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus { - if in == nil { - return nil - } - out := new(DaemonSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDaemonSet) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy. -func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { - if in == nil { - return nil - } - out := new(DaemonSetUpdateStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Deployment) DeepCopyInto(out *Deployment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. -func (in *Deployment) DeepCopy() *Deployment { - if in == nil { - return nil - } - out := new(Deployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Deployment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { - *out = *in - in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. -func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { - if in == nil { - return nil - } - out := new(DeploymentCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Deployment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. -func (in *DeploymentList) DeepCopy() *DeploymentList { - if in == nil { - return nil - } - out := new(DeploymentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DeploymentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.UpdatedAnnotations != nil { - in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - out.RollbackTo = in.RollbackTo - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback. -func (in *DeploymentRollback) DeepCopy() *DeploymentRollback { - if in == nil { - return nil - } - out := new(DeploymentRollback) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DeploymentRollback) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - in.Strategy.DeepCopyInto(&out.Strategy) - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - if in.RollbackTo != nil { - in, out := &in.RollbackTo, &out.RollbackTo - *out = new(RollbackConfig) - **out = **in - } - if in.ProgressDeadlineSeconds != nil { - in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. -func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { - if in == nil { - return nil - } - out := new(DeploymentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]DeploymentCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. -func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { - if in == nil { - return nil - } - out := new(DeploymentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDeployment) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. -func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { - if in == nil { - return nil - } - out := new(DeploymentStrategy) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { *out = *in @@ -686,129 +251,6 @@ func (in *IngressTLS) DeepCopy() *IngressTLS { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. -func (in *ReplicaSet) DeepCopy() *ReplicaSet { - if in == nil { - return nil - } - out := new(ReplicaSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicaSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. -func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { - if in == nil { - return nil - } - out := new(ReplicaSetCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicaSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. -func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { - if in == nil { - return nil - } - out := new(ReplicaSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicaSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. -func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { - if in == nil { - return nil - } - out := new(ReplicaSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ReplicaSetCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. -func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { - if in == nil { - return nil - } - out := new(ReplicaSetStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicationControllerDummy) DeepCopyInto(out *ReplicationControllerDummy) { *out = *in @@ -833,54 +275,3 @@ func (in *ReplicationControllerDummy) DeepCopyObject() runtime.Object { } return nil } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig. -func (in *RollbackConfig) DeepCopy() *RollbackConfig { - if in == nil { - return nil - } - out := new(RollbackConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { - *out = *in - out.MaxUnavailable = in.MaxUnavailable - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet. -func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { - if in == nil { - return nil - } - out := new(RollingUpdateDaemonSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { - *out = *in - out.MaxUnavailable = in.MaxUnavailable - out.MaxSurge = in.MaxSurge - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. -func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { - if in == nil { - return nil - } - out := new(RollingUpdateDeployment) - in.DeepCopyInto(out) - return out -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/OWNERS index 96ae42e027a0..02b8511e4655 100755 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/OWNERS @@ -1,4 +1,7 @@ +# approval on api packages bubbles to api-approvers reviewers: -- deads2k -- mbohlool -- jianhuiz +- sig-auth-policy-approvers +- sig-auth-policy-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/doc.go index a39220ff7c09..543dbadfe8dc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=imagepolicy.k8s.io + package imagepolicy diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/doc.go index b619e953c766..7576561783b7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/imagepolicy/v1alpha1 // +groupName=imagepolicy.k8s.io + package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go index 3c44f4d285a1..a26b8a07fee5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=networking.k8s.io + package networking diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/doc.go index 8f088130e415..a4ac99cc9899 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/networking/v1 // +groupName=networking.k8s.io + package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/OWNERS index a245fde358e9..5780104890de 100755 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/OWNERS @@ -1,8 +1,8 @@ -approvers: -- sig-apps-api-approvers +# approval on api packages bubbles to api-approvers reviewers: -- sig-apps-reviewers -- pweil- -- liggitt -- tallclair -- php-coder +- sig-apps-api-approvers +- sig-auth-policy-approvers +- sig-auth-policy-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go index 7b9628657f13..a94711bdb36c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go @@ -182,6 +182,10 @@ type PodSecurityPolicySpec struct { SELinux SELinuxStrategyOptions // RunAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. RunAsUser RunAsUserStrategyOptions + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + RunAsGroup *RunAsGroupStrategyOptions // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. SupplementalGroups SupplementalGroupsStrategyOptions // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. @@ -335,6 +339,16 @@ type RunAsUserStrategyOptions struct { Ranges []IDRange } +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +type RunAsGroupStrategyOptions struct { + // Rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + Rule RunAsGroupStrategy + // Ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange +} + // IDRange provides a min/max of an allowed range of IDs. type IDRange struct { // Min is the start of the range, inclusive. @@ -356,6 +370,20 @@ const ( RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) +// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a +// SecurityContext. +type RunAsGroupStrategy string + +const ( + // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when RunAsGroup are specified, they have to fall in the defined range. + RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" + // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. + RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" + // RunAsGroupStrategyRunAsAny means that container may make requests for any gid. + RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" +) + // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. type FSGroupStrategyOptions struct { // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. @@ -372,6 +400,9 @@ type FSGroupStrategyOptions struct { type FSGroupStrategyType string const ( + // FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied. + // However, when FSGroups are specified, they have to fall in the defined range. + FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs" // FSGroupStrategyMustRunAs means that container must have FSGroup of X applied. FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. @@ -394,6 +425,9 @@ type SupplementalGroupsStrategyOptions struct { type SupplementalGroupsStrategyType string const ( + // SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when gids are specified, they have to fall in the defined range. + SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs" // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.conversion.go index 572cbfe1b0fa..356cb47f3a4f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.conversion.go @@ -170,6 +170,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*v1beta1.RunAsGroupStrategyOptions)(nil), (*policy.RunAsGroupStrategyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(a.(*v1beta1.RunAsGroupStrategyOptions), b.(*policy.RunAsGroupStrategyOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*policy.RunAsGroupStrategyOptions)(nil), (*v1beta1.RunAsGroupStrategyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(a.(*policy.RunAsGroupStrategyOptions), b.(*v1beta1.RunAsGroupStrategyOptions), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*v1beta1.RunAsUserStrategyOptions)(nil), (*policy.RunAsUserStrategyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_RunAsUserStrategyOptions_To_policy_RunAsUserStrategyOptions(a.(*v1beta1.RunAsUserStrategyOptions), b.(*policy.RunAsUserStrategyOptions), scope) }); err != nil { @@ -525,6 +535,7 @@ func autoConvert_v1beta1_PodSecurityPolicySpec_To_policy_PodSecurityPolicySpec(i if err := Convert_v1beta1_RunAsUserStrategyOptions_To_policy_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { return err } + out.RunAsGroup = (*policy.RunAsGroupStrategyOptions)(unsafe.Pointer(in.RunAsGroup)) if err := Convert_v1beta1_SupplementalGroupsStrategyOptions_To_policy_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { return err } @@ -565,6 +576,7 @@ func autoConvert_policy_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(i if err := Convert_policy_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { return err } + out.RunAsGroup = (*v1beta1.RunAsGroupStrategyOptions)(unsafe.Pointer(in.RunAsGroup)) if err := Convert_policy_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { return err } @@ -589,6 +601,28 @@ func Convert_policy_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *p return autoConvert_policy_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in, out, s) } +func autoConvert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(in *v1beta1.RunAsGroupStrategyOptions, out *policy.RunAsGroupStrategyOptions, s conversion.Scope) error { + out.Rule = policy.RunAsGroupStrategy(in.Rule) + out.Ranges = *(*[]policy.IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +// Convert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions is an autogenerated conversion function. +func Convert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(in *v1beta1.RunAsGroupStrategyOptions, out *policy.RunAsGroupStrategyOptions, s conversion.Scope) error { + return autoConvert_v1beta1_RunAsGroupStrategyOptions_To_policy_RunAsGroupStrategyOptions(in, out, s) +} + +func autoConvert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(in *policy.RunAsGroupStrategyOptions, out *v1beta1.RunAsGroupStrategyOptions, s conversion.Scope) error { + out.Rule = v1beta1.RunAsGroupStrategy(in.Rule) + out.Ranges = *(*[]v1beta1.IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +// Convert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions is an autogenerated conversion function. +func Convert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(in *policy.RunAsGroupStrategyOptions, out *v1beta1.RunAsGroupStrategyOptions, s conversion.Scope) error { + return autoConvert_policy_RunAsGroupStrategyOptions_To_v1beta1_RunAsGroupStrategyOptions(in, out, s) +} + func autoConvert_v1beta1_RunAsUserStrategyOptions_To_policy_RunAsUserStrategyOptions(in *v1beta1.RunAsUserStrategyOptions, out *policy.RunAsUserStrategyOptions, s conversion.Scope) error { out.Rule = policy.RunAsUserStrategy(in.Rule) out.Ranges = *(*[]policy.IDRange)(unsafe.Pointer(&in.Ranges)) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/BUILD index ffa56b65c12b..a32bc3f9ca4b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/BUILD @@ -11,9 +11,9 @@ go_library( srcs = ["validation.go"], importpath = "k8s.io/kubernetes/pkg/apis/policy/validation", deps = [ + "//pkg/apis/apps/validation:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/validation:go_default_library", - "//pkg/apis/extensions/validation:go_default_library", "//pkg/apis/policy:go_default_library", "//pkg/features:go_default_library", "//pkg/security/apparmor:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go index 54db7bc88df1..fa1af17afa76 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go @@ -27,9 +27,9 @@ import ( unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" + appsvalidation "k8s.io/kubernetes/pkg/apis/apps/validation" core "k8s.io/kubernetes/pkg/apis/core" apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" - extensionsvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation" "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/security/apparmor" @@ -68,13 +68,13 @@ func ValidatePodDisruptionBudgetSpec(spec policy.PodDisruptionBudgetSpec, fldPat } if spec.MinAvailable != nil { - allErrs = append(allErrs, extensionsvalidation.ValidatePositiveIntOrPercent(*spec.MinAvailable, fldPath.Child("minAvailable"))...) - allErrs = append(allErrs, extensionsvalidation.IsNotMoreThan100Percent(*spec.MinAvailable, fldPath.Child("minAvailable"))...) + allErrs = append(allErrs, appsvalidation.ValidatePositiveIntOrPercent(*spec.MinAvailable, fldPath.Child("minAvailable"))...) + allErrs = append(allErrs, appsvalidation.IsNotMoreThan100Percent(*spec.MinAvailable, fldPath.Child("minAvailable"))...) } if spec.MaxUnavailable != nil { - allErrs = append(allErrs, extensionsvalidation.ValidatePositiveIntOrPercent(*spec.MaxUnavailable, fldPath.Child("maxUnavailable"))...) - allErrs = append(allErrs, extensionsvalidation.IsNotMoreThan100Percent(*spec.MaxUnavailable, fldPath.Child("maxUnavailable"))...) + allErrs = append(allErrs, appsvalidation.ValidatePositiveIntOrPercent(*spec.MaxUnavailable, fldPath.Child("maxUnavailable"))...) + allErrs = append(allErrs, appsvalidation.IsNotMoreThan100Percent(*spec.MaxUnavailable, fldPath.Child("maxUnavailable"))...) } allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) @@ -109,6 +109,7 @@ func ValidatePodSecurityPolicySpec(spec *policy.PodSecurityPolicySpec, fldPath * allErrs := field.ErrorList{} allErrs = append(allErrs, validatePSPRunAsUser(fldPath.Child("runAsUser"), &spec.RunAsUser)...) + allErrs = append(allErrs, validatePSPRunAsGroup(fldPath.Child("runAsGroup"), spec.RunAsGroup)...) allErrs = append(allErrs, validatePSPSELinux(fldPath.Child("seLinux"), &spec.SELinux)...) allErrs = append(allErrs, validatePSPSupplementalGroup(fldPath.Child("supplementalGroups"), &spec.SupplementalGroups)...) allErrs = append(allErrs, validatePSPFSGroup(fldPath.Child("fsGroup"), &spec.FSGroup)...) @@ -233,12 +234,45 @@ func validatePSPRunAsUser(fldPath *field.Path, runAsUser *policy.RunAsUserStrate return allErrs } +// validatePSPRunAsGroup validates the RunAsGroup fields of PodSecurityPolicy. +func validatePSPRunAsGroup(fldPath *field.Path, runAsGroup *policy.RunAsGroupStrategyOptions) field.ErrorList { + var allErrs field.ErrorList + + if runAsGroup == nil { + return allErrs + } + + switch runAsGroup.Rule { + case policy.RunAsGroupStrategyRunAsAny: + if len(runAsGroup.Ranges) != 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ranges"), runAsGroup.Ranges, "Ranges must be empty")) + } + case policy.RunAsGroupStrategyMustRunAs, policy.RunAsGroupStrategyMayRunAs: + if len(runAsGroup.Ranges) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ranges"), runAsGroup.Ranges, "must provide at least one range")) + } + // validate range settings + for idx, rng := range runAsGroup.Ranges { + allErrs = append(allErrs, validateGroupIDRange(fldPath.Child("ranges").Index(idx), rng)...) + } + default: + supportedRunAsGroupRules := []string{ + string(policy.RunAsGroupStrategyMustRunAs), + string(policy.RunAsGroupStrategyRunAsAny), + string(policy.RunAsGroupStrategyMayRunAs), + } + allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), runAsGroup.Rule, supportedRunAsGroupRules)) + } + return allErrs +} + // validatePSPFSGroup validates the FSGroupStrategyOptions fields of the PodSecurityPolicy. func validatePSPFSGroup(fldPath *field.Path, groupOptions *policy.FSGroupStrategyOptions) field.ErrorList { allErrs := field.ErrorList{} supportedRules := sets.NewString( string(policy.FSGroupStrategyMustRunAs), + string(policy.FSGroupStrategyMayRunAs), string(policy.FSGroupStrategyRunAsAny), ) if !supportedRules.Has(string(groupOptions.Rule)) { @@ -257,6 +291,7 @@ func validatePSPSupplementalGroup(fldPath *field.Path, groupOptions *policy.Supp supportedRules := sets.NewString( string(policy.SupplementalGroupsStrategyRunAsAny), + string(policy.SupplementalGroupsStrategyMayRunAs), string(policy.SupplementalGroupsStrategyMustRunAs), ) if !supportedRules.Has(string(groupOptions.Rule)) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go index b0cfe7528907..4b5eb4579464 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go @@ -348,6 +348,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { } in.SELinux.DeepCopyInto(&out.SELinux) in.RunAsUser.DeepCopyInto(&out.RunAsUser) + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(RunAsGroupStrategyOptions) + (*in).DeepCopyInto(*out) + } in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) in.FSGroup.DeepCopyInto(&out.FSGroup) if in.DefaultAllowPrivilegeEscalation != nil { @@ -393,6 +398,27 @@ func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. +func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { *out = *in diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/OWNERS index 1aefde049a0a..ff4a7f4bf9ad 100755 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/OWNERS @@ -1,17 +1,7 @@ +# approval on api packages bubbles to api-approvers reviewers: -- thockin -- lavalamp -- smarterclayton -- deads2k -- sttts -- ncdc -- dims -- krousey -- mml -- mbohlool -- david-mcmahon -- ericchiang -- lixiaobing10051267 -- jianhuiz -- liggitt -- enj +- sig-auth-authorizers-approvers +- sig-auth-authorizers-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go index 058103f2c95d..77ad4908d437 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=rbac.authorization.k8s.io + package rbac diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/doc.go index bda09190e303..6ab1259d9b66 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/doc.go @@ -21,4 +21,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=rbac.authorization.k8s.io + package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go index f952e27b7246..c611cad6133d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/rbac/v1alpha1 // +groupName=rbac.authorization.k8s.io + package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go index dbe68ec2ed1f..295cc2f6bd5f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/rbac/v1beta1 // +groupName=rbac.authorization.k8s.io + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go index 81bb7a6d6d32..399bb1e1bf95 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=scheduling.k8s.io + package scheduling diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/doc.go index 12b9ac62f016..d26930b0f245 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=scheduling.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/scheduling/v1alpha1 + package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/doc.go index 28aee530bbd7..6321d5da3281 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1beta1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=scheduling.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/scheduling/v1beta1 + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go index 1c690e460d91..cfd4a49aef14 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=settings.k8s.io + package settings diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go index b8fea41340b2..0547062a5ae6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/settings/v1alpha1 // +groupName=settings.k8s.io + package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go index 1f3c23f60633..769e29e69de1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=storage.k8s.io + package storage diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/util/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/util/BUILD deleted file mode 100644 index 1b3f629507a8..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/util/BUILD +++ /dev/null @@ -1,46 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "helpers.go", - "util.go", - ], - importpath = "k8s.io/kubernetes/pkg/apis/storage/util", - deps = [ - "//pkg/apis/storage:go_default_library", - "//pkg/features:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["util_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/apis/core:go_default_library", - "//pkg/apis/storage:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go deleted file mode 100644 index 62c27ccfc19c..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// IsDefaultStorageClassAnnotation represents a StorageClass annotation that -// marks a class as the default StorageClass -//TODO: remove Beta when no longer used -const IsDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class" -const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" - -// IsDefaultAnnotationText returns a pretty Yes/No String if -// the annotation is set -// TODO: remove Beta when no longer needed -func IsDefaultAnnotationText(obj metav1.ObjectMeta) string { - if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" { - return "Yes" - } - if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" { - return "Yes" - } - - return "No" -} - -// IsDefaultAnnotation returns a boolean if -// the annotation is set -// TODO: remove Beta when no longer needed -func IsDefaultAnnotation(obj metav1.ObjectMeta) bool { - if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" { - return true - } - if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" { - return true - } - - return false -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/util/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/util/util.go deleted file mode 100644 index 382802d96417..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/util/util.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/apis/storage" - "k8s.io/kubernetes/pkg/features" -) - -// DropDisabledAlphaFields removes disabled fields from the StorageClass object. -func DropDisabledAlphaFields(class *storage.StorageClass) { - if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { - class.VolumeBindingMode = nil - class.AllowedTopologies = nil - } -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD index f84557ef2cd0..11449ae5f12f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD @@ -47,13 +47,18 @@ filegroup( go_test( name = "go_default_test", - srcs = ["defaults_test.go"], + srcs = [ + "defaults_test.go", + "main_test.go", + ], embed = [":go_default_library"], deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/storage/install:go_default_library", + "//pkg/features:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go index 617aa14c1aa0..4019b7565716 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=storage.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/storage/v1 + package v1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go index be7a27582e61..436a86267193 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go @@ -58,6 +58,66 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*v1.VolumeAttachment)(nil), (*storage.VolumeAttachment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeAttachment_To_storage_VolumeAttachment(a.(*v1.VolumeAttachment), b.(*storage.VolumeAttachment), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachment)(nil), (*v1.VolumeAttachment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_VolumeAttachment_To_v1_VolumeAttachment(a.(*storage.VolumeAttachment), b.(*v1.VolumeAttachment), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VolumeAttachmentList)(nil), (*storage.VolumeAttachmentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeAttachmentList_To_storage_VolumeAttachmentList(a.(*v1.VolumeAttachmentList), b.(*storage.VolumeAttachmentList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentList)(nil), (*v1.VolumeAttachmentList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_VolumeAttachmentList_To_v1_VolumeAttachmentList(a.(*storage.VolumeAttachmentList), b.(*v1.VolumeAttachmentList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VolumeAttachmentSource)(nil), (*storage.VolumeAttachmentSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(a.(*v1.VolumeAttachmentSource), b.(*storage.VolumeAttachmentSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentSource)(nil), (*v1.VolumeAttachmentSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource(a.(*storage.VolumeAttachmentSource), b.(*v1.VolumeAttachmentSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VolumeAttachmentSpec)(nil), (*storage.VolumeAttachmentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(a.(*v1.VolumeAttachmentSpec), b.(*storage.VolumeAttachmentSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentSpec)(nil), (*v1.VolumeAttachmentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec(a.(*storage.VolumeAttachmentSpec), b.(*v1.VolumeAttachmentSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VolumeAttachmentStatus)(nil), (*storage.VolumeAttachmentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(a.(*v1.VolumeAttachmentStatus), b.(*storage.VolumeAttachmentStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.VolumeAttachmentStatus)(nil), (*v1.VolumeAttachmentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus(a.(*storage.VolumeAttachmentStatus), b.(*v1.VolumeAttachmentStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VolumeError)(nil), (*storage.VolumeError)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeError_To_storage_VolumeError(a.(*v1.VolumeError), b.(*storage.VolumeError), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*storage.VolumeError)(nil), (*v1.VolumeError)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_storage_VolumeError_To_v1_VolumeError(a.(*storage.VolumeError), b.(*v1.VolumeError), scope) + }); err != nil { + return err + } return nil } @@ -116,3 +176,153 @@ func autoConvert_storage_StorageClassList_To_v1_StorageClassList(in *storage.Sto func Convert_storage_StorageClassList_To_v1_StorageClassList(in *storage.StorageClassList, out *v1.StorageClassList, s conversion.Scope) error { return autoConvert_storage_StorageClassList_To_v1_StorageClassList(in, out, s) } + +func autoConvert_v1_VolumeAttachment_To_storage_VolumeAttachment(in *v1.VolumeAttachment, out *storage.VolumeAttachment, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_VolumeAttachment_To_storage_VolumeAttachment is an autogenerated conversion function. +func Convert_v1_VolumeAttachment_To_storage_VolumeAttachment(in *v1.VolumeAttachment, out *storage.VolumeAttachment, s conversion.Scope) error { + return autoConvert_v1_VolumeAttachment_To_storage_VolumeAttachment(in, out, s) +} + +func autoConvert_storage_VolumeAttachment_To_v1_VolumeAttachment(in *storage.VolumeAttachment, out *v1.VolumeAttachment, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_storage_VolumeAttachment_To_v1_VolumeAttachment is an autogenerated conversion function. +func Convert_storage_VolumeAttachment_To_v1_VolumeAttachment(in *storage.VolumeAttachment, out *v1.VolumeAttachment, s conversion.Scope) error { + return autoConvert_storage_VolumeAttachment_To_v1_VolumeAttachment(in, out, s) +} + +func autoConvert_v1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in *v1.VolumeAttachmentList, out *storage.VolumeAttachmentList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]storage.VolumeAttachment)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_VolumeAttachmentList_To_storage_VolumeAttachmentList is an autogenerated conversion function. +func Convert_v1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in *v1.VolumeAttachmentList, out *storage.VolumeAttachmentList, s conversion.Scope) error { + return autoConvert_v1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in, out, s) +} + +func autoConvert_storage_VolumeAttachmentList_To_v1_VolumeAttachmentList(in *storage.VolumeAttachmentList, out *v1.VolumeAttachmentList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.VolumeAttachment)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_storage_VolumeAttachmentList_To_v1_VolumeAttachmentList is an autogenerated conversion function. +func Convert_storage_VolumeAttachmentList_To_v1_VolumeAttachmentList(in *storage.VolumeAttachmentList, out *v1.VolumeAttachmentList, s conversion.Scope) error { + return autoConvert_storage_VolumeAttachmentList_To_v1_VolumeAttachmentList(in, out, s) +} + +func autoConvert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in *v1.VolumeAttachmentSource, out *storage.VolumeAttachmentSource, s conversion.Scope) error { + out.PersistentVolumeName = (*string)(unsafe.Pointer(in.PersistentVolumeName)) + return nil +} + +// Convert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource is an autogenerated conversion function. +func Convert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in *v1.VolumeAttachmentSource, out *storage.VolumeAttachmentSource, s conversion.Scope) error { + return autoConvert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in, out, s) +} + +func autoConvert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource(in *storage.VolumeAttachmentSource, out *v1.VolumeAttachmentSource, s conversion.Scope) error { + out.PersistentVolumeName = (*string)(unsafe.Pointer(in.PersistentVolumeName)) + return nil +} + +// Convert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource is an autogenerated conversion function. +func Convert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource(in *storage.VolumeAttachmentSource, out *v1.VolumeAttachmentSource, s conversion.Scope) error { + return autoConvert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource(in, out, s) +} + +func autoConvert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in *v1.VolumeAttachmentSpec, out *storage.VolumeAttachmentSpec, s conversion.Scope) error { + out.Attacher = in.Attacher + if err := Convert_v1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.NodeName = in.NodeName + return nil +} + +// Convert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec is an autogenerated conversion function. +func Convert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in *v1.VolumeAttachmentSpec, out *storage.VolumeAttachmentSpec, s conversion.Scope) error { + return autoConvert_v1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in, out, s) +} + +func autoConvert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec(in *storage.VolumeAttachmentSpec, out *v1.VolumeAttachmentSpec, s conversion.Scope) error { + out.Attacher = in.Attacher + if err := Convert_storage_VolumeAttachmentSource_To_v1_VolumeAttachmentSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.NodeName = in.NodeName + return nil +} + +// Convert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec is an autogenerated conversion function. +func Convert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec(in *storage.VolumeAttachmentSpec, out *v1.VolumeAttachmentSpec, s conversion.Scope) error { + return autoConvert_storage_VolumeAttachmentSpec_To_v1_VolumeAttachmentSpec(in, out, s) +} + +func autoConvert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in *v1.VolumeAttachmentStatus, out *storage.VolumeAttachmentStatus, s conversion.Scope) error { + out.Attached = in.Attached + out.AttachmentMetadata = *(*map[string]string)(unsafe.Pointer(&in.AttachmentMetadata)) + out.AttachError = (*storage.VolumeError)(unsafe.Pointer(in.AttachError)) + out.DetachError = (*storage.VolumeError)(unsafe.Pointer(in.DetachError)) + return nil +} + +// Convert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus is an autogenerated conversion function. +func Convert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in *v1.VolumeAttachmentStatus, out *storage.VolumeAttachmentStatus, s conversion.Scope) error { + return autoConvert_v1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in, out, s) +} + +func autoConvert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus(in *storage.VolumeAttachmentStatus, out *v1.VolumeAttachmentStatus, s conversion.Scope) error { + out.Attached = in.Attached + out.AttachmentMetadata = *(*map[string]string)(unsafe.Pointer(&in.AttachmentMetadata)) + out.AttachError = (*v1.VolumeError)(unsafe.Pointer(in.AttachError)) + out.DetachError = (*v1.VolumeError)(unsafe.Pointer(in.DetachError)) + return nil +} + +// Convert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus is an autogenerated conversion function. +func Convert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus(in *storage.VolumeAttachmentStatus, out *v1.VolumeAttachmentStatus, s conversion.Scope) error { + return autoConvert_storage_VolumeAttachmentStatus_To_v1_VolumeAttachmentStatus(in, out, s) +} + +func autoConvert_v1_VolumeError_To_storage_VolumeError(in *v1.VolumeError, out *storage.VolumeError, s conversion.Scope) error { + out.Time = in.Time + out.Message = in.Message + return nil +} + +// Convert_v1_VolumeError_To_storage_VolumeError is an autogenerated conversion function. +func Convert_v1_VolumeError_To_storage_VolumeError(in *v1.VolumeError, out *storage.VolumeError, s conversion.Scope) error { + return autoConvert_v1_VolumeError_To_storage_VolumeError(in, out, s) +} + +func autoConvert_storage_VolumeError_To_v1_VolumeError(in *storage.VolumeError, out *v1.VolumeError, s conversion.Scope) error { + out.Time = in.Time + out.Message = in.Message + return nil +} + +// Convert_storage_VolumeError_To_v1_VolumeError is an autogenerated conversion function. +func Convert_storage_VolumeError_To_v1_VolumeError(in *storage.VolumeError, out *v1.VolumeError, s conversion.Scope) error { + return autoConvert_storage_VolumeError_To_v1_VolumeError(in, out, s) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/doc.go index 47670b19883d..2c25add8ad5c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=storage.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/storage/v1alpha1 + package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD index 3006ca9307ac..4695a2ddb217 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD @@ -47,13 +47,18 @@ filegroup( go_test( name = "go_default_test", - srcs = ["defaults_test.go"], + srcs = [ + "defaults_test.go", + "main_test.go", + ], embed = [":go_default_library"], deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/storage/install:go_default_library", + "//pkg/features:go_default_library", "//staging/src/k8s.io/api/storage/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go index 0301f751664d..621f539889ce 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=storage.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/storage/v1beta1 + package v1beta1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/chaosclient/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/chaosclient/BUILD deleted file mode 100644 index 7145808df502..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/chaosclient/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["chaosclient.go"], - importpath = "k8s.io/kubernetes/pkg/client/chaosclient", - deps = ["//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = ["chaosclient_test.go"], - embed = [":go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/chaosclient/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/chaosclient/OWNERS deleted file mode 100755 index 32c694e91932..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/chaosclient/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: -- smarterclayton -- liggitt -- davidopp -- eparis -- resouer diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/chaosclient/chaosclient.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/chaosclient/chaosclient.go deleted file mode 100644 index eeb2c7f05d02..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/chaosclient/chaosclient.go +++ /dev/null @@ -1,156 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package chaosclient makes it easy to simulate network latency, misbehaving -// servers, and random errors from servers. It is intended to stress test components -// under failure conditions and expose weaknesses in the error handling logic -// of the codebase. -package chaosclient - -import ( - "errors" - "fmt" - "log" - "math/rand" - "net/http" - "reflect" - "runtime" - - "k8s.io/apimachinery/pkg/util/net" -) - -// chaosrt provides the ability to perform simulations of HTTP client failures -// under the Golang http.Transport interface. -type chaosrt struct { - rt http.RoundTripper - notify ChaosNotifier - c []Chaos -} - -// Chaos intercepts requests to a remote HTTP endpoint and can inject arbitrary -// failures. -type Chaos interface { - // Intercept should return true if the normal flow should be skipped, and the - // return response and error used instead. Modifications to the request will - // be ignored, but may be used to make decisions about types of failures. - Intercept(req *http.Request) (bool, *http.Response, error) -} - -// ChaosNotifier notifies another component that the ChaosRoundTripper has simulated -// a failure. -type ChaosNotifier interface { - // OnChaos is invoked when a chaotic outcome was triggered. fn is the - // source of Chaos and req was the outgoing request - OnChaos(req *http.Request, c Chaos) -} - -// ChaosFunc takes an http.Request and decides whether to alter the response. It -// returns true if it wishes to mutate the response, with a http.Response or -// error. -type ChaosFunc func(req *http.Request) (bool, *http.Response, error) - -func (fn ChaosFunc) Intercept(req *http.Request) (bool, *http.Response, error) { - return fn.Intercept(req) -} -func (fn ChaosFunc) String() string { - return runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() -} - -// NewChaosRoundTripper creates an http.RoundTripper that will intercept requests -// based on the provided Chaos functions. The notifier is invoked when a Chaos -// Intercept is fired. -func NewChaosRoundTripper(rt http.RoundTripper, notify ChaosNotifier, c ...Chaos) http.RoundTripper { - return &chaosrt{rt, notify, c} -} - -// RoundTrip gives each ChaosFunc an opportunity to intercept the request. The first -// interceptor wins. -func (rt *chaosrt) RoundTrip(req *http.Request) (*http.Response, error) { - for _, c := range rt.c { - if intercept, resp, err := c.Intercept(req); intercept { - rt.notify.OnChaos(req, c) - return resp, err - } - } - return rt.rt.RoundTrip(req) -} - -var _ = net.RoundTripperWrapper(&chaosrt{}) - -func (rt *chaosrt) WrappedRoundTripper() http.RoundTripper { - return rt.rt -} - -// Seed represents a consistent stream of chaos. -type Seed struct { - *rand.Rand -} - -// NewSeed creates an object that assists in generating random chaotic events -// based on a deterministic seed. -func NewSeed(seed int64) Seed { - return Seed{rand.New(rand.NewSource(seed))} -} - -type pIntercept struct { - Chaos - s Seed - p float64 -} - -// P returns a ChaosFunc that fires with a probability of p (p between 0.0 -// and 1.0 with 0.0 meaning never and 1.0 meaning always). -func (s Seed) P(p float64, c Chaos) Chaos { - return pIntercept{c, s, p} -} - -// Intercept intercepts requests with the provided probability p. -func (c pIntercept) Intercept(req *http.Request) (bool, *http.Response, error) { - if c.s.Float64() < c.p { - return c.Chaos.Intercept(req) - } - return false, nil, nil -} - -func (c pIntercept) String() string { - return fmt.Sprintf("P{%f %s}", c.p, c.Chaos) -} - -// ErrSimulatedConnectionResetByPeer emulates the golang net error when a connection -// is reset by a peer. -// TODO: make this more accurate -// TODO: add other error types -// TODO: add a helper for returning multiple errors randomly. -var ErrSimulatedConnectionResetByPeer = Error{errors.New("connection reset by peer")} - -// Error returns the nested error when C() is invoked. -type Error struct { - error -} - -// C returns the nested error -func (e Error) Intercept(_ *http.Request) (bool, *http.Response, error) { - return true, nil, e.error -} - -// LogChaos is the default ChaosNotifier and writes a message to the Golang log. -var LogChaos = ChaosNotifier(logChaos{}) - -type logChaos struct{} - -func (logChaos) OnChaos(req *http.Request, c Chaos) { - log.Printf("Triggered chaotic behavior for %s %s: %v", req.Method, req.URL.String(), c) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD index 0486ae006331..09144c4c39ab 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD @@ -15,6 +15,7 @@ go_library( deps = [ "//pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", + "//pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion:go_default_library", @@ -51,6 +52,7 @@ filegroup( "//pkg/client/clientset_generated/internalclientset/scheme:all-srcs", "//pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion:all-srcs", "//pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:all-srcs", + "//pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion:all-srcs", "//pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion:all-srcs", "//pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion:all-srcs", "//pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion:all-srcs", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go index 554d26807133..4a8cd33a6643 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go @@ -24,6 +24,7 @@ import ( flowcontrol "k8s.io/client-go/util/flowcontrol" admissionregistrationinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion" appsinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" + auditregistrationinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion" authenticationinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion" authorizationinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion" autoscalinginternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion" @@ -46,6 +47,7 @@ type Interface interface { Admissionregistration() admissionregistrationinternalversion.AdmissionregistrationInterface Core() coreinternalversion.CoreInterface Apps() appsinternalversion.AppsInterface + Auditregistration() auditregistrationinternalversion.AuditregistrationInterface Authentication() authenticationinternalversion.AuthenticationInterface Authorization() authorizationinternalversion.AuthorizationInterface Autoscaling() autoscalinginternalversion.AutoscalingInterface @@ -69,6 +71,7 @@ type Clientset struct { admissionregistration *admissionregistrationinternalversion.AdmissionregistrationClient core *coreinternalversion.CoreClient apps *appsinternalversion.AppsClient + auditregistration *auditregistrationinternalversion.AuditregistrationClient authentication *authenticationinternalversion.AuthenticationClient authorization *authorizationinternalversion.AuthorizationClient autoscaling *autoscalinginternalversion.AutoscalingClient @@ -100,6 +103,11 @@ func (c *Clientset) Apps() appsinternalversion.AppsInterface { return c.apps } +// Auditregistration retrieves the AuditregistrationClient +func (c *Clientset) Auditregistration() auditregistrationinternalversion.AuditregistrationInterface { + return c.auditregistration +} + // Authentication retrieves the AuthenticationClient func (c *Clientset) Authentication() authenticationinternalversion.AuthenticationInterface { return c.authentication @@ -198,6 +206,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.auditregistration, err = auditregistrationinternalversion.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.authentication, err = authenticationinternalversion.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -269,6 +281,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.admissionregistration = admissionregistrationinternalversion.NewForConfigOrDie(c) cs.core = coreinternalversion.NewForConfigOrDie(c) cs.apps = appsinternalversion.NewForConfigOrDie(c) + cs.auditregistration = auditregistrationinternalversion.NewForConfigOrDie(c) cs.authentication = authenticationinternalversion.NewForConfigOrDie(c) cs.authorization = authorizationinternalversion.NewForConfigOrDie(c) cs.autoscaling = autoscalinginternalversion.NewForConfigOrDie(c) @@ -294,6 +307,7 @@ func New(c rest.Interface) *Clientset { cs.admissionregistration = admissionregistrationinternalversion.New(c) cs.core = coreinternalversion.New(c) cs.apps = appsinternalversion.New(c) + cs.auditregistration = auditregistrationinternalversion.New(c) cs.authentication = authenticationinternalversion.New(c) cs.authorization = authorizationinternalversion.New(c) cs.autoscaling = autoscalinginternalversion.New(c) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/BUILD index 30cbab1842b2..86f91055b34d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/BUILD @@ -15,6 +15,7 @@ go_library( deps = [ "//pkg/apis/admissionregistration/install:go_default_library", "//pkg/apis/apps/install:go_default_library", + "//pkg/apis/auditregistration/install:go_default_library", "//pkg/apis/authentication/install:go_default_library", "//pkg/apis/authorization/install:go_default_library", "//pkg/apis/autoscaling/install:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/register.go index 727d0e2995f8..efc10eb859f0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/register.go @@ -25,6 +25,7 @@ import ( serializer "k8s.io/apimachinery/pkg/runtime/serializer" admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration/install" apps "k8s.io/kubernetes/pkg/apis/apps/install" + auditregistration "k8s.io/kubernetes/pkg/apis/auditregistration/install" authentication "k8s.io/kubernetes/pkg/apis/authentication/install" authorization "k8s.io/kubernetes/pkg/apis/authorization/install" autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/install" @@ -56,6 +57,7 @@ func Install(scheme *runtime.Scheme) { admissionregistration.Install(scheme) core.Install(scheme) apps.Install(scheme) + auditregistration.Install(scheme) authentication.Install(scheme) authorization.Install(scheme) autoscaling.Install(scheme) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/initializerconfiguration.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/initializerconfiguration.go index b792b5470a35..3cae439c4a3d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/initializerconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/initializerconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -72,10 +74,15 @@ func (c *initializerConfigurations) Get(name string, options v1.GetOptions) (res // List takes label and field selectors, and returns the list of InitializerConfigurations that match those selectors. func (c *initializerConfigurations) List(opts v1.ListOptions) (result *admissionregistration.InitializerConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &admissionregistration.InitializerConfigurationList{} err = c.client.Get(). Resource("initializerconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *initializerConfigurations) List(opts v1.ListOptions) (result *admission // Watch returns a watch.Interface that watches the requested initializerConfigurations. func (c *initializerConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("initializerconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *initializerConfigurations) Delete(name string, options *v1.DeleteOption // DeleteCollection deletes a collection of objects. func (c *initializerConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("initializerconfigurations"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/mutatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/mutatingwebhookconfiguration.go index c35d780fbfe9..6f3c0d1aa133 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/mutatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/mutatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -72,10 +74,15 @@ func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *admissionregistration.MutatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &admissionregistration.MutatingWebhookConfigurationList{} err = c.client.Get(). Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *admis // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOp // DeleteCollection deletes a collection of objects. func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("mutatingwebhookconfigurations"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/validatingwebhookconfiguration.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/validatingwebhookconfiguration.go index 980d9b5b8a9f..183b85bdb773 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/validatingwebhookconfiguration.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/validatingwebhookconfiguration.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -72,10 +74,15 @@ func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *admissionregistration.ValidatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &admissionregistration.ValidatingWebhookConfigurationList{} err = c.client.Get(). Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *adm // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *validatingWebhookConfigurations) Delete(name string, options *v1.Delete // DeleteCollection deletes a collection of objects. func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("validatingwebhookconfigurations"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD index 794648663b05..2f786404bccd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD @@ -10,14 +10,16 @@ go_library( srcs = [ "apps_client.go", "controllerrevision.go", + "daemonset.go", + "deployment.go", "doc.go", "generated_expansion.go", + "replicaset.go", "statefulset.go", ], importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion", deps = [ "//pkg/apis/apps:go_default_library", - "//pkg/apis/autoscaling:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/apps_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/apps_client.go index abcdc639fa86..e72ae6bb1cba 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/apps_client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/apps_client.go @@ -26,6 +26,9 @@ import ( type AppsInterface interface { RESTClient() rest.Interface ControllerRevisionsGetter + DaemonSetsGetter + DeploymentsGetter + ReplicaSetsGetter StatefulSetsGetter } @@ -38,6 +41,18 @@ func (c *AppsClient) ControllerRevisions(namespace string) ControllerRevisionInt return newControllerRevisions(c, namespace) } +func (c *AppsClient) DaemonSets(namespace string) DaemonSetInterface { + return newDaemonSets(c, namespace) +} + +func (c *AppsClient) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *AppsClient) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + func (c *AppsClient) StatefulSets(namespace string) StatefulSetInterface { return newStatefulSets(c, namespace) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/controllerrevision.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/controllerrevision.go index 33a28c3a327d..bd3b933bb194 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/controllerrevision.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/controllerrevision.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -75,11 +77,16 @@ func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *a // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts v1.ListOptions) (result *apps.ControllerRevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &apps.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *controllerRevisions) List(opts v1.ListOptions) (result *apps.Controller // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/daemonset.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/daemonset.go new file mode 100644 index 000000000000..f4b2c3ed31dd --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/daemonset.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package internalversion + +import ( + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + apps "k8s.io/kubernetes/pkg/apis/apps" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" +) + +// DaemonSetsGetter has a method to return a DaemonSetInterface. +// A group's client should implement this interface. +type DaemonSetsGetter interface { + DaemonSets(namespace string) DaemonSetInterface +} + +// DaemonSetInterface has methods to work with DaemonSet resources. +type DaemonSetInterface interface { + Create(*apps.DaemonSet) (*apps.DaemonSet, error) + Update(*apps.DaemonSet) (*apps.DaemonSet, error) + UpdateStatus(*apps.DaemonSet) (*apps.DaemonSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*apps.DaemonSet, error) + List(opts v1.ListOptions) (*apps.DaemonSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps.DaemonSet, err error) + DaemonSetExpansion +} + +// daemonSets implements DaemonSetInterface +type daemonSets struct { + client rest.Interface + ns string +} + +// newDaemonSets returns a DaemonSets +func newDaemonSets(c *AppsClient, namespace string) *daemonSets { + return &daemonSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *daemonSets) Get(name string, options v1.GetOptions) (result *apps.DaemonSet, err error) { + result = &apps.DaemonSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *daemonSets) List(opts v1.ListOptions) (result *apps.DaemonSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &apps.DaemonSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Create(daemonSet *apps.DaemonSet) (result *apps.DaemonSet, err error) { + result = &apps.DaemonSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("daemonsets"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Update(daemonSet *apps.DaemonSet) (result *apps.DaemonSet, err error) { + result = &apps.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + Body(daemonSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *daemonSets) UpdateStatus(daemonSet *apps.DaemonSet) (result *apps.DaemonSet, err error) { + result = &apps.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + SubResource("status"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched daemonSet. +func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps.DaemonSet, err error) { + result = &apps.DaemonSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("daemonsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/deployment.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/deployment.go new file mode 100644 index 000000000000..1bf0bf03bc27 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/deployment.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package internalversion + +import ( + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + apps "k8s.io/kubernetes/pkg/apis/apps" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*apps.Deployment) (*apps.Deployment, error) + Update(*apps.Deployment) (*apps.Deployment, error) + UpdateStatus(*apps.Deployment) (*apps.Deployment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*apps.Deployment, error) + List(opts v1.ListOptions) (*apps.DeploymentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps.Deployment, err error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *AppsClient, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options v1.GetOptions) (result *apps.Deployment, err error) { + result = &apps.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts v1.ListOptions) (result *apps.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &apps.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *apps.Deployment) (result *apps.Deployment, err error) { + result = &apps.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *apps.Deployment) (result *apps.Deployment, err error) { + result = &apps.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *apps.Deployment) (result *apps.Deployment, err error) { + result = &apps.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps.Deployment, err error) { + result = &apps.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/generated_expansion.go index 9bfab8576502..7987f18b259c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/generated_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/generated_expansion.go @@ -20,4 +20,10 @@ package internalversion type ControllerRevisionExpansion interface{} +type DaemonSetExpansion interface{} + +type DeploymentExpansion interface{} + +type ReplicaSetExpansion interface{} + type StatefulSetExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/replicaset.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/replicaset.go new file mode 100644 index 000000000000..ea7527f2b19b --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/replicaset.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package internalversion + +import ( + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + apps "k8s.io/kubernetes/pkg/apis/apps" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" +) + +// ReplicaSetsGetter has a method to return a ReplicaSetInterface. +// A group's client should implement this interface. +type ReplicaSetsGetter interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + Create(*apps.ReplicaSet) (*apps.ReplicaSet, error) + Update(*apps.ReplicaSet) (*apps.ReplicaSet, error) + UpdateStatus(*apps.ReplicaSet) (*apps.ReplicaSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*apps.ReplicaSet, error) + List(opts v1.ListOptions) (*apps.ReplicaSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps.ReplicaSet, err error) + ReplicaSetExpansion +} + +// replicaSets implements ReplicaSetInterface +type replicaSets struct { + client rest.Interface + ns string +} + +// newReplicaSets returns a ReplicaSets +func newReplicaSets(c *AppsClient, namespace string) *replicaSets { + return &replicaSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *replicaSets) Get(name string, options v1.GetOptions) (result *apps.ReplicaSet, err error) { + result = &apps.ReplicaSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *replicaSets) List(opts v1.ListOptions) (result *apps.ReplicaSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &apps.ReplicaSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *apps.ReplicaSet) (result *apps.ReplicaSet, err error) { + result = &apps.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *apps.ReplicaSet) (result *apps.ReplicaSet, err error) { + result = &apps.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *replicaSets) UpdateStatus(replicaSet *apps.ReplicaSet) (result *apps.ReplicaSet, err error) { + result = &apps.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched replicaSet. +func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps.ReplicaSet, err error) { + result = &apps.ReplicaSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicasets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go index 4d1e5fa16fde..d1a72132927f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go @@ -19,12 +19,13 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" apps "k8s.io/kubernetes/pkg/apis/apps" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -45,9 +46,6 @@ type StatefulSetInterface interface { List(opts v1.ListOptions) (*apps.StatefulSetList, error) Watch(opts v1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps.StatefulSet, err error) - GetScale(statefulSetName string, options v1.GetOptions) (*autoscaling.Scale, error) - UpdateScale(statefulSetName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) - StatefulSetExpansion } @@ -80,11 +78,16 @@ func (c *statefulSets) Get(name string, options v1.GetOptions) (result *apps.Sta // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts v1.ListOptions) (result *apps.StatefulSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &apps.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -92,11 +95,16 @@ func (c *statefulSets) List(opts v1.ListOptions) (result *apps.StatefulSetList, // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -154,10 +162,15 @@ func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() @@ -176,31 +189,3 @@ func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subre Into(result) return } - -// GetScale takes name of the statefulSet, and returns the corresponding autoscaling.Scale object, and an error if there is any. -func (c *statefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSetName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *statefulSets) UpdateScale(statefulSetName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSetName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/BUILD new file mode 100644 index 000000000000..3d1e522b858a --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/BUILD @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "auditregistration_client.go", + "auditsink.go", + "doc.go", + "generated_expansion.go", + ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/auditregistration:go_default_library", + "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/fake:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/auditregistration_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/auditregistration_client.go new file mode 100644 index 000000000000..07bedcbf5f32 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/auditregistration_client.go @@ -0,0 +1,96 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package internalversion + +import ( + rest "k8s.io/client-go/rest" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" +) + +type AuditregistrationInterface interface { + RESTClient() rest.Interface + AuditSinksGetter +} + +// AuditregistrationClient is used to interact with features provided by the auditregistration.k8s.io group. +type AuditregistrationClient struct { + restClient rest.Interface +} + +func (c *AuditregistrationClient) AuditSinks() AuditSinkInterface { + return newAuditSinks(c) +} + +// NewForConfig creates a new AuditregistrationClient for the given config. +func NewForConfig(c *rest.Config) (*AuditregistrationClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuditregistrationClient{client}, nil +} + +// NewForConfigOrDie creates a new AuditregistrationClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuditregistrationClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuditregistrationClient for the given RESTClient. +func New(c rest.Interface) *AuditregistrationClient { + return &AuditregistrationClient{c} +} + +func setConfigDefaults(config *rest.Config) error { + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("auditregistration.k8s.io")[0].Group { + gv := scheme.Scheme.PrioritizedVersionsForGroup("auditregistration.k8s.io")[0] + config.GroupVersion = &gv + } + config.NegotiatedSerializer = scheme.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuditregistrationClient) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/auditsink.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/auditsink.go new file mode 100644 index 000000000000..8bbb0375af29 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/auditsink.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package internalversion + +import ( + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + auditregistration "k8s.io/kubernetes/pkg/apis/auditregistration" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" +) + +// AuditSinksGetter has a method to return a AuditSinkInterface. +// A group's client should implement this interface. +type AuditSinksGetter interface { + AuditSinks() AuditSinkInterface +} + +// AuditSinkInterface has methods to work with AuditSink resources. +type AuditSinkInterface interface { + Create(*auditregistration.AuditSink) (*auditregistration.AuditSink, error) + Update(*auditregistration.AuditSink) (*auditregistration.AuditSink, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*auditregistration.AuditSink, error) + List(opts v1.ListOptions) (*auditregistration.AuditSinkList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *auditregistration.AuditSink, err error) + AuditSinkExpansion +} + +// auditSinks implements AuditSinkInterface +type auditSinks struct { + client rest.Interface +} + +// newAuditSinks returns a AuditSinks +func newAuditSinks(c *AuditregistrationClient) *auditSinks { + return &auditSinks{ + client: c.RESTClient(), + } +} + +// Get takes name of the auditSink, and returns the corresponding auditSink object, and an error if there is any. +func (c *auditSinks) Get(name string, options v1.GetOptions) (result *auditregistration.AuditSink, err error) { + result = &auditregistration.AuditSink{} + err = c.client.Get(). + Resource("auditsinks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of AuditSinks that match those selectors. +func (c *auditSinks) List(opts v1.ListOptions) (result *auditregistration.AuditSinkList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &auditregistration.AuditSinkList{} + err = c.client.Get(). + Resource("auditsinks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested auditSinks. +func (c *auditSinks) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("auditsinks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a auditSink and creates it. Returns the server's representation of the auditSink, and an error, if there is any. +func (c *auditSinks) Create(auditSink *auditregistration.AuditSink) (result *auditregistration.AuditSink, err error) { + result = &auditregistration.AuditSink{} + err = c.client.Post(). + Resource("auditsinks"). + Body(auditSink). + Do(). + Into(result) + return +} + +// Update takes the representation of a auditSink and updates it. Returns the server's representation of the auditSink, and an error, if there is any. +func (c *auditSinks) Update(auditSink *auditregistration.AuditSink) (result *auditregistration.AuditSink, err error) { + result = &auditregistration.AuditSink{} + err = c.client.Put(). + Resource("auditsinks"). + Name(auditSink.Name). + Body(auditSink). + Do(). + Into(result) + return +} + +// Delete takes name of the auditSink and deletes it. Returns an error if one occurs. +func (c *auditSinks) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("auditsinks"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *auditSinks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("auditsinks"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched auditSink. +func (c *auditSinks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *auditregistration.AuditSink, err error) { + result = &auditregistration.AuditSink{} + err = c.client.Patch(pt). + Resource("auditsinks"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/doc.go new file mode 100644 index 000000000000..86602442babd --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package internalversion diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/generated_expansion.go new file mode 100644 index 000000000000..4d3547b8d251 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/auditregistration/internalversion/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package internalversion + +type AuditSinkExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/horizontalpodautoscaler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/horizontalpodautoscaler.go index 39ec41254f81..2c5293c67ce4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/horizontalpodautoscaler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/horizontalpodautoscaler.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -76,11 +78,16 @@ func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (resu // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *autoscaling.HorizontalPodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &autoscaling.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *autoscalin // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/cronjob.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/cronjob.go index 72cb6cffa718..04f959524aa5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/cronjob.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/cronjob.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -76,11 +78,16 @@ func (c *cronJobs) Get(name string, options v1.GetOptions) (result *batch.CronJo // List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *cronJobs) List(opts v1.ListOptions) (result *batch.CronJobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &batch.CronJobList{} err = c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *cronJobs) List(opts v1.ListOptions) (result *batch.CronJobList, err err // Watch returns a watch.Interface that watches the requested cronJobs. func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/job.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/job.go index e9f9dc545ca8..fe5cd67e80f5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/job.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/job.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -76,11 +78,16 @@ func (c *jobs) Get(name string, options v1.GetOptions) (result *batch.Job, err e // List takes label and field selectors, and returns the list of Jobs that match those selectors. func (c *jobs) List(opts v1.ListOptions) (result *batch.JobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &batch.JobList{} err = c.client.Get(). Namespace(c.ns). Resource("jobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *jobs) List(opts v1.ListOptions) (result *batch.JobList, err error) { // Watch returns a watch.Interface that watches the requested jobs. func (c *jobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("jobs"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *jobs) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *jobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("jobs"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest.go index 8a5b7b380779..b1584f20195c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/certificatesigningrequest.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -73,10 +75,15 @@ func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (re // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *certificates.CertificateSigningRequestList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &certificates.CertificateSigningRequestList{} err = c.client.Get(). Resource("certificatesigningrequests"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *certific // Watch returns a watch.Interface that watches the requested certificateSigningRequests. func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("certificatesigningrequests"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *certificateSigningRequests) Delete(name string, options *v1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("certificatesigningrequests"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/lease.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/lease.go index a2023c38b907..fd4987e48972 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/lease.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion/lease.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -75,11 +77,16 @@ func (c *leases) Get(name string, options v1.GetOptions) (result *coordination.L // List takes label and field selectors, and returns the list of Leases that match those selectors. func (c *leases) List(opts v1.ListOptions) (result *coordination.LeaseList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &coordination.LeaseList{} err = c.client.Get(). Namespace(c.ns). Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *leases) List(opts v1.ListOptions) (result *coordination.LeaseList, err // Watch returns a watch.Interface that watches the requested leases. func (c *leases) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *leases) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *leases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("leases"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go index 07b75daab4c9..d3d690798fbd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -72,10 +74,15 @@ func (c *componentStatuses) Get(name string, options v1.GetOptions) (result *cor // List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. func (c *componentStatuses) List(opts v1.ListOptions) (result *core.ComponentStatusList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.ComponentStatusList{} err = c.client.Get(). Resource("componentstatuses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *componentStatuses) List(opts v1.ListOptions) (result *core.ComponentSta // Watch returns a watch.Interface that watches the requested componentStatuses. func (c *componentStatuses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("componentstatuses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *componentStatuses) Delete(name string, options *v1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *componentStatuses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("componentstatuses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go index af1fc1fa17c2..77eed84b525e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -75,11 +77,16 @@ func (c *configMaps) Get(name string, options v1.GetOptions) (result *core.Confi // List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. func (c *configMaps) List(opts v1.ListOptions) (result *core.ConfigMapList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.ConfigMapList{} err = c.client.Get(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *configMaps) List(opts v1.ListOptions) (result *core.ConfigMapList, err // Watch returns a watch.Interface that watches the requested configMaps. func (c *configMaps) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *configMaps) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *configMaps) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go index acb3fd21e57f..64c1c473120a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -75,11 +77,16 @@ func (c *endpoints) Get(name string, options v1.GetOptions) (result *core.Endpoi // List takes label and field selectors, and returns the list of Endpoints that match those selectors. func (c *endpoints) List(opts v1.ListOptions) (result *core.EndpointsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.EndpointsList{} err = c.client.Get(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *endpoints) List(opts v1.ListOptions) (result *core.EndpointsList, err e // Watch returns a watch.Interface that watches the requested endpoints. func (c *endpoints) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *endpoints) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *endpoints) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go index 16dbe6596421..ed123a58df55 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -75,11 +77,16 @@ func (c *events) Get(name string, options v1.GetOptions) (result *core.Event, er // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *events) List(opts v1.ListOptions) (result *core.EventList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.EventList{} err = c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *events) List(opts v1.ListOptions) (result *core.EventList, err error) { // Watch returns a watch.Interface that watches the requested events. func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *events) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("events"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go index d16b9b5ae1aa..4f765a5b8b43 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -75,11 +77,16 @@ func (c *limitRanges) Get(name string, options v1.GetOptions) (result *core.Limi // List takes label and field selectors, and returns the list of LimitRanges that match those selectors. func (c *limitRanges) List(opts v1.ListOptions) (result *core.LimitRangeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.LimitRangeList{} err = c.client.Get(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *limitRanges) List(opts v1.ListOptions) (result *core.LimitRangeList, er // Watch returns a watch.Interface that watches the requested limitRanges. func (c *limitRanges) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *limitRanges) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *limitRanges) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go index 5a0f413d35e3..33e2c106988c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -73,10 +75,15 @@ func (c *namespaces) Get(name string, options v1.GetOptions) (result *core.Names // List takes label and field selectors, and returns the list of Namespaces that match those selectors. func (c *namespaces) List(opts v1.ListOptions) (result *core.NamespaceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.NamespaceList{} err = c.client.Get(). Resource("namespaces"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *namespaces) List(opts v1.ListOptions) (result *core.NamespaceList, err // Watch returns a watch.Interface that watches the requested namespaces. func (c *namespaces) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("namespaces"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *namespaces) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *namespaces) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("namespaces"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go index 515663c4d692..c49058ab1f8a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -73,10 +75,15 @@ func (c *nodes) Get(name string, options v1.GetOptions) (result *core.Node, err // List takes label and field selectors, and returns the list of Nodes that match those selectors. func (c *nodes) List(opts v1.ListOptions) (result *core.NodeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.NodeList{} err = c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *nodes) List(opts v1.ListOptions) (result *core.NodeList, err error) { // Watch returns a watch.Interface that watches the requested nodes. func (c *nodes) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *nodes) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *nodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("nodes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go index fb717d8a3f6f..017ad6eee111 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -73,10 +75,15 @@ func (c *persistentVolumes) Get(name string, options v1.GetOptions) (result *cor // List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. func (c *persistentVolumes) List(opts v1.ListOptions) (result *core.PersistentVolumeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.PersistentVolumeList{} err = c.client.Get(). Resource("persistentvolumes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *persistentVolumes) List(opts v1.ListOptions) (result *core.PersistentVo // Watch returns a watch.Interface that watches the requested persistentVolumes. func (c *persistentVolumes) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("persistentvolumes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *persistentVolumes) Delete(name string, options *v1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *persistentVolumes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("persistentvolumes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go index a0111d67cdce..ed260b9f4cd0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -76,11 +78,16 @@ func (c *persistentVolumeClaims) Get(name string, options v1.GetOptions) (result // List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. func (c *persistentVolumeClaims) List(opts v1.ListOptions) (result *core.PersistentVolumeClaimList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.PersistentVolumeClaimList{} err = c.client.Get(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *persistentVolumeClaims) List(opts v1.ListOptions) (result *core.Persist // Watch returns a watch.Interface that watches the requested persistentVolumeClaims. func (c *persistentVolumeClaims) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *persistentVolumeClaims) Delete(name string, options *v1.DeleteOptions) // DeleteCollection deletes a collection of objects. func (c *persistentVolumeClaims) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go index b73b5b465870..faa40c200a6b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -76,11 +78,16 @@ func (c *pods) Get(name string, options v1.GetOptions) (result *core.Pod, err er // List takes label and field selectors, and returns the list of Pods that match those selectors. func (c *pods) List(opts v1.ListOptions) (result *core.PodList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.PodList{} err = c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *pods) List(opts v1.ListOptions) (result *core.PodList, err error) { // Watch returns a watch.Interface that watches the requested pods. func (c *pods) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *pods) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *pods) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("pods"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go index a9ef5caec882..6ef974d2b3b7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -75,11 +77,16 @@ func (c *podTemplates) Get(name string, options v1.GetOptions) (result *core.Pod // List takes label and field selectors, and returns the list of PodTemplates that match those selectors. func (c *podTemplates) List(opts v1.ListOptions) (result *core.PodTemplateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.PodTemplateList{} err = c.client.Get(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *podTemplates) List(opts v1.ListOptions) (result *core.PodTemplateList, // Watch returns a watch.Interface that watches the requested podTemplates. func (c *podTemplates) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *podTemplates) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *podTemplates) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go index 225f020fee10..cda95f2527e5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -80,11 +82,16 @@ func (c *replicationControllers) Get(name string, options v1.GetOptions) (result // List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. func (c *replicationControllers) List(opts v1.ListOptions) (result *core.ReplicationControllerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.ReplicationControllerList{} err = c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -92,11 +99,16 @@ func (c *replicationControllers) List(opts v1.ListOptions) (result *core.Replica // Watch returns a watch.Interface that watches the requested replicationControllers. func (c *replicationControllers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -154,10 +166,15 @@ func (c *replicationControllers) Delete(name string, options *v1.DeleteOptions) // DeleteCollection deletes a collection of objects. func (c *replicationControllers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go index 990451072d07..5ad2d4b8ca12 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -76,11 +78,16 @@ func (c *resourceQuotas) Get(name string, options v1.GetOptions) (result *core.R // List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. func (c *resourceQuotas) List(opts v1.ListOptions) (result *core.ResourceQuotaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.ResourceQuotaList{} err = c.client.Get(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *resourceQuotas) List(opts v1.ListOptions) (result *core.ResourceQuotaLi // Watch returns a watch.Interface that watches the requested resourceQuotas. func (c *resourceQuotas) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *resourceQuotas) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *resourceQuotas) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go index c3fe44d5d874..aea79ff03671 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -75,11 +77,16 @@ func (c *secrets) Get(name string, options v1.GetOptions) (result *core.Secret, // List takes label and field selectors, and returns the list of Secrets that match those selectors. func (c *secrets) List(opts v1.ListOptions) (result *core.SecretList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.SecretList{} err = c.client.Get(). Namespace(c.ns). Resource("secrets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *secrets) List(opts v1.ListOptions) (result *core.SecretList, err error) // Watch returns a watch.Interface that watches the requested secrets. func (c *secrets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("secrets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *secrets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *secrets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("secrets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go index e05f53376290..df8830871c7a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -76,11 +78,16 @@ func (c *services) Get(name string, options v1.GetOptions) (result *core.Service // List takes label and field selectors, and returns the list of Services that match those selectors. func (c *services) List(opts v1.ListOptions) (result *core.ServiceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.ServiceList{} err = c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *services) List(opts v1.ListOptions) (result *core.ServiceList, err erro // Watch returns a watch.Interface that watches the requested services. func (c *services) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *services) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *services) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("services"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go index c2b6012c4d06..99578c1ee7dc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -75,11 +77,16 @@ func (c *serviceAccounts) Get(name string, options v1.GetOptions) (result *core. // List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. func (c *serviceAccounts) List(opts v1.ListOptions) (result *core.ServiceAccountList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &core.ServiceAccountList{} err = c.client.Get(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *serviceAccounts) List(opts v1.ListOptions) (result *core.ServiceAccount // Watch returns a watch.Interface that watches the requested serviceAccounts. func (c *serviceAccounts) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *serviceAccounts) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *serviceAccounts) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD index 97770ae84496..6fb0ee1557ec 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD @@ -8,18 +8,13 @@ load( go_library( name = "go_default_library", srcs = [ - "daemonset.go", - "deployment.go", - "deployment_expansion.go", "doc.go", "extensions_client.go", "generated_expansion.go", "ingress.go", - "replicaset.go", ], importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion", deps = [ - "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/daemonset.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/daemonset.go deleted file mode 100644 index 66fd1af05ef1..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/daemonset.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// DaemonSetsGetter has a method to return a DaemonSetInterface. -// A group's client should implement this interface. -type DaemonSetsGetter interface { - DaemonSets(namespace string) DaemonSetInterface -} - -// DaemonSetInterface has methods to work with DaemonSet resources. -type DaemonSetInterface interface { - Create(*extensions.DaemonSet) (*extensions.DaemonSet, error) - Update(*extensions.DaemonSet) (*extensions.DaemonSet, error) - UpdateStatus(*extensions.DaemonSet) (*extensions.DaemonSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*extensions.DaemonSet, error) - List(opts v1.ListOptions) (*extensions.DaemonSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.DaemonSet, err error) - DaemonSetExpansion -} - -// daemonSets implements DaemonSetInterface -type daemonSets struct { - client rest.Interface - ns string -} - -// newDaemonSets returns a DaemonSets -func newDaemonSets(c *ExtensionsClient, namespace string) *daemonSets { - return &daemonSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(name string, options v1.GetOptions) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(opts v1.ListOptions) (result *extensions.DaemonSetList, err error) { - result = &extensions.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - Body(daemonSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *daemonSets) UpdateStatus(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go deleted file mode 100644 index 074ea47f72d6..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go +++ /dev/null @@ -1,206 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// DeploymentsGetter has a method to return a DeploymentInterface. -// A group's client should implement this interface. -type DeploymentsGetter interface { - Deployments(namespace string) DeploymentInterface -} - -// DeploymentInterface has methods to work with Deployment resources. -type DeploymentInterface interface { - Create(*extensions.Deployment) (*extensions.Deployment, error) - Update(*extensions.Deployment) (*extensions.Deployment, error) - UpdateStatus(*extensions.Deployment) (*extensions.Deployment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*extensions.Deployment, error) - List(opts v1.ListOptions) (*extensions.DeploymentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.Deployment, err error) - GetScale(deploymentName string, options v1.GetOptions) (*autoscaling.Scale, error) - UpdateScale(deploymentName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) - - DeploymentExpansion -} - -// deployments implements DeploymentInterface -type deployments struct { - client rest.Interface - ns string -} - -// newDeployments returns a Deployments -func newDeployments(c *ExtensionsClient, namespace string) *deployments { - return &deployments{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string, options v1.GetOptions) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts v1.ListOptions) (result *extensions.DeploymentList, err error) { - result = &extensions.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - Body(deployment). - Do(). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - Body(deployment). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - Body(deployment). - Do(). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} - -// GetScale takes name of the deployment, and returns the corresponding autoscaling.Scale object, and an error if there is any. -func (c *deployments) GetScale(deploymentName string, options v1.GetOptions) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(deploymentName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *deployments) UpdateScale(deploymentName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deploymentName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment_expansion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment_expansion.go deleted file mode 100644 index f8d7a1c6341c..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment_expansion.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import "k8s.io/kubernetes/pkg/apis/extensions" - -// The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface. -type DeploymentExpansion interface { - Rollback(*extensions.DeploymentRollback) error -} - -// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. -func (c *deployments) Rollback(deploymentRollback *extensions.DeploymentRollback) error { - return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error() -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go index 43ea79be3233..b7a7df1b96cd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go @@ -25,10 +25,7 @@ import ( type ExtensionsInterface interface { RESTClient() rest.Interface - DaemonSetsGetter - DeploymentsGetter IngressesGetter - ReplicaSetsGetter } // ExtensionsClient is used to interact with features provided by the extensions group. @@ -36,22 +33,10 @@ type ExtensionsClient struct { restClient rest.Interface } -func (c *ExtensionsClient) DaemonSets(namespace string) DaemonSetInterface { - return newDaemonSets(c, namespace) -} - -func (c *ExtensionsClient) Deployments(namespace string) DeploymentInterface { - return newDeployments(c, namespace) -} - func (c *ExtensionsClient) Ingresses(namespace string) IngressInterface { return newIngresses(c, namespace) } -func (c *ExtensionsClient) ReplicaSets(namespace string) ReplicaSetInterface { - return newReplicaSets(c, namespace) -} - // NewForConfig creates a new ExtensionsClient for the given config. func NewForConfig(c *rest.Config) (*ExtensionsClient, error) { config := *c diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go index 2560f4a32d14..3c1593ccf5e5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go @@ -18,8 +18,4 @@ limitations under the License. package internalversion -type DaemonSetExpansion interface{} - type IngressExpansion interface{} - -type ReplicaSetExpansion interface{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/ingress.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/ingress.go index bbe7577ccb73..2b8f27e39aa1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/ingress.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/ingress.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -76,11 +78,16 @@ func (c *ingresses) Get(name string, options v1.GetOptions) (result *extensions. // List takes label and field selectors, and returns the list of Ingresses that match those selectors. func (c *ingresses) List(opts v1.ListOptions) (result *extensions.IngressList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &extensions.IngressList{} err = c.client.Get(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *ingresses) List(opts v1.ListOptions) (result *extensions.IngressList, e // Watch returns a watch.Interface that watches the requested ingresses. func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go deleted file mode 100644 index b2263cb09ed5..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go +++ /dev/null @@ -1,206 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// ReplicaSetsGetter has a method to return a ReplicaSetInterface. -// A group's client should implement this interface. -type ReplicaSetsGetter interface { - ReplicaSets(namespace string) ReplicaSetInterface -} - -// ReplicaSetInterface has methods to work with ReplicaSet resources. -type ReplicaSetInterface interface { - Create(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) - Update(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) - UpdateStatus(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*extensions.ReplicaSet, error) - List(opts v1.ListOptions) (*extensions.ReplicaSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.ReplicaSet, err error) - GetScale(replicaSetName string, options v1.GetOptions) (*autoscaling.Scale, error) - UpdateScale(replicaSetName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) - - ReplicaSetExpansion -} - -// replicaSets implements ReplicaSetInterface -type replicaSets struct { - client rest.Interface - ns string -} - -// newReplicaSets returns a ReplicaSets -func newReplicaSets(c *ExtensionsClient, namespace string) *replicaSets { - return &replicaSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(name string, options v1.GetOptions) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(opts v1.ListOptions) (result *extensions.ReplicaSetList, err error) { - result = &extensions.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - Body(replicaSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicaSets) UpdateStatus(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} - -// GetScale takes name of the replicaSet, and returns the corresponding autoscaling.Scale object, and an error if there is any. -func (c *replicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSetName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicaSets) UpdateScale(replicaSetName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSetName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networkpolicy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networkpolicy.go index 6a0cdbc3bf60..d0e8533bf77b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networkpolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/networkpolicy.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -75,11 +77,16 @@ func (c *networkPolicies) Get(name string, options v1.GetOptions) (result *netwo // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. func (c *networkPolicies) List(opts v1.ListOptions) (result *networking.NetworkPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &networking.NetworkPolicyList{} err = c.client.Get(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *networkPolicies) List(opts v1.ListOptions) (result *networking.NetworkP // Watch returns a watch.Interface that watches the requested networkPolicies. func (c *networkPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *networkPolicies) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *networkPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/poddisruptionbudget.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/poddisruptionbudget.go index 7e7abfcb27cc..5985a5be0ab9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/poddisruptionbudget.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/poddisruptionbudget.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -76,11 +78,16 @@ func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result * // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *policy.PodDisruptionBudgetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &policy.PodDisruptionBudgetList{} err = c.client.Get(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *policy.PodDisr // Watch returns a watch.Interface that watches the requested podDisruptionBudgets. func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *podDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) er // DeleteCollection deletes a collection of objects. func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/podsecuritypolicy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/podsecuritypolicy.go index 17c162fa47a4..9ee7b09530c1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/podsecuritypolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/podsecuritypolicy.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -72,10 +74,15 @@ func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *p // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *policy.PodSecurityPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &policy.PodSecurityPolicyList{} err = c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *policy.PodSecur // Watch returns a watch.Interface that watches the requested podSecurityPolicies. func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("podsecuritypolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrole.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrole.go index 985e90f16aa8..7e9394d4a937 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrole.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrole.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -72,10 +74,15 @@ func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *rbac.Clu // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts v1.ListOptions) (result *rbac.ClusterRoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &rbac.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoles) List(opts v1.ListOptions) (result *rbac.ClusterRoleList, // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrolebinding.go index 01dcf227524d..f6a06750a66a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/clusterrolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -72,10 +74,15 @@ func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *r // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *rbac.ClusterRoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &rbac.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *rbac.ClusterRol // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/role.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/role.go index 7272550e8dbc..903388150efb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/role.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/role.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -75,11 +77,16 @@ func (c *roles) Get(name string, options v1.GetOptions) (result *rbac.Role, err // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts v1.ListOptions) (result *rbac.RoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &rbac.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roles) List(opts v1.ListOptions) (result *rbac.RoleList, err error) { // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rolebinding.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rolebinding.go index bed5dcdf9a91..0315e69bb398 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rolebinding.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/rolebinding.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -75,11 +77,16 @@ func (c *roleBindings) Get(name string, options v1.GetOptions) (result *rbac.Rol // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts v1.ListOptions) (result *rbac.RoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &rbac.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *roleBindings) List(opts v1.ListOptions) (result *rbac.RoleBindingList, // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/priorityclass.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/priorityclass.go index 8d5f992da641..18a4329946c7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/priorityclass.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/priorityclass.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -72,10 +74,15 @@ func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *sched // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *priorityClasses) List(opts v1.ListOptions) (result *scheduling.PriorityClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &scheduling.PriorityClassList{} err = c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *priorityClasses) List(opts v1.ListOptions) (result *scheduling.Priority // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("priorityclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/podpreset.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/podpreset.go index 334c2180c029..bf99269060da 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/podpreset.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/podpreset.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -75,11 +77,16 @@ func (c *podPresets) Get(name string, options v1.GetOptions) (result *settings.P // List takes label and field selectors, and returns the list of PodPresets that match those selectors. func (c *podPresets) List(opts v1.ListOptions) (result *settings.PodPresetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &settings.PodPresetList{} err = c.client.Get(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -87,11 +94,16 @@ func (c *podPresets) List(opts v1.ListOptions) (result *settings.PodPresetList, // Watch returns a watch.Interface that watches the requested podPresets. func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -133,10 +145,15 @@ func (c *podPresets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storageclass.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storageclass.go index 4939d7af8c3c..5f40850a8079 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storageclass.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storageclass.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -72,10 +74,15 @@ func (c *storageClasses) Get(name string, options v1.GetOptions) (result *storag // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *storageClasses) List(opts v1.ListOptions) (result *storage.StorageClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &storage.StorageClassList{} err = c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -83,10 +90,15 @@ func (c *storageClasses) List(opts v1.ListOptions) (result *storage.StorageClass // Watch returns a watch.Interface that watches the requested storageClasses. func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -125,9 +137,14 @@ func (c *storageClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("storageclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/volumeattachment.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/volumeattachment.go index 2ce2c9784cf3..571cd99809a8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/volumeattachment.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/volumeattachment.go @@ -19,6 +19,8 @@ limitations under the License. package internalversion import ( + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -73,10 +75,15 @@ func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *sto // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *volumeAttachments) List(opts v1.ListOptions) (result *storage.VolumeAttachmentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &storage.VolumeAttachmentList{} err = c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *volumeAttachments) List(opts v1.ListOptions) (result *storage.VolumeAtt // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("volumeattachments"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/config.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/config.go index 310f1313795f..cc9ae7efac96 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/config.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/config.go @@ -24,6 +24,7 @@ import ( ) const ( + // DefaultLeaseDuration defines a default duration of lease. DefaultLeaseDuration = 15 * time.Second ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD deleted file mode 100644 index ed3344f6a31a..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD +++ /dev/null @@ -1,39 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "cloud.go", - "doc.go", - "plugins.go", - ], - importpath = "k8s.io/kubernetes/pkg/cloudprovider", - deps = [ - "//pkg/controller:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/client-go/informers:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/cloudprovider/providers:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS deleted file mode 100644 index 314eb3d97ae5..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS +++ /dev/null @@ -1,47 +0,0 @@ -approvers: -- mikedanese -- dims -- wlan0 -- andrewsykim -reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- brendandburns -- derekwaynecarr -- caesarxuchao -- vishh -- mikedanese -- liggitt -- gmarek -- erictune -- davidopp -- pmorie -- sttts -- quinton-hoole -- dchen1107 -- saad-ali -- zmerlynn -- luxas -- justinsb -- roberthbailey -- eparis -- jlowdermilk -- piosz -- jsafrane -- dims -- krousey -- rootfs -- jszczepkowski -- markturansky -- girishkalele -- jdef -- freehan -- jingxu97 -- wlan0 -- cheftako -- andrewsykim -labels: -- sig/cloud-provider diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md deleted file mode 100644 index b59eba409f1e..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md +++ /dev/null @@ -1,16 +0,0 @@ -##### Deprecation Notice: This directory has entered maintenance mode and will not be accepting new providers. Cloud Providers in this directory will continue to be actively developed or maintained and supported at their current level of support as a longer-term solution evolves. - -## Overview: -The mechanism for supporting cloud providers is currently in transition: the original method of implementing cloud provider-specific functionality within the main kubernetes tree (here) is no longer advised; however, the proposed solution is still in development. - -#### Guidance for potential cloud providers: -* Support for cloud providers is currently in a state of flux. Background information on motivation and the proposal for improving is in the github [proposal](https://git.k8s.io/community/contributors/design-proposals/cloud-provider/cloud-provider-refactoring.md). -* In support of this plan, a new cloud-controller-manager binary was added in 1.6. This was the first of several steps (see the proposal for more information). -* Attempts to contribute new cloud providers or (to a lesser extent) persistent volumes to the core repo will likely meet with some pushback from reviewers/approvers. -* It is understood that this is an unfortunate situation in which 'the old way is no longer supported but the new way is not ready yet', but the initial path is unsustainable, and contributors are encouraged to participate in the implementation of the proposed long-term solution, as there is risk that PRs for new cloud providers here will not be approved. -* Though the fully productized support envisioned in the proposal is still 2 - 3 releases out, the foundational work is underway, and a motivated cloud provider could accomplish the work in a forward-looking way. Contributors are encouraged to assist with the implementation of the design outlined in the proposal. - -#### Some additional context on status / direction: -* 1.6 added a new cloud-controller-manager binary that may be used for testing the new out-of-core cloudprovider flow. -* Setting cloud-provider=external allows for creation of a separate controller-manager binary -* 1.7 adds [extensible admission control](https://git.k8s.io/community/contributors/design-proposals/api-machinery/admission_control_extension.md), further enabling topology customization. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go deleted file mode 100644 index 7777997f93d4..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/plugins.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cloudprovider - -import ( - "fmt" - "io" - "os" - "sync" - - "github.com/golang/glog" -) - -// Factory is a function that returns a cloudprovider.Interface. -// The config parameter provides an io.Reader handler to the factory in -// order to load specific configurations. If no configuration is provided -// the parameter is nil. -type Factory func(config io.Reader) (Interface, error) - -// All registered cloud providers. -var ( - providersMutex sync.Mutex - providers = make(map[string]Factory) - deprecatedCloudProviders = []struct { - name string - external bool - detail string - }{ - {"openstack", true, "https://github.com/kubernetes/cloud-provider-openstack"}, - {"photon", false, "The Photon Controller project is no longer maintained."}, - } -) - -const externalCloudProvider = "external" - -// RegisterCloudProvider registers a cloudprovider.Factory by name. This -// is expected to happen during app startup. -func RegisterCloudProvider(name string, cloud Factory) { - providersMutex.Lock() - defer providersMutex.Unlock() - if _, found := providers[name]; found { - glog.Fatalf("Cloud provider %q was registered twice", name) - } - glog.V(1).Infof("Registered cloud provider %q", name) - providers[name] = cloud -} - -// IsCloudProvider returns true if name corresponds to an already registered -// cloud provider. -func IsCloudProvider(name string) bool { - providersMutex.Lock() - defer providersMutex.Unlock() - _, found := providers[name] - return found -} - -// GetCloudProvider creates an instance of the named cloud provider, or nil if -// the name is unknown. The error return is only used if the named provider -// was known but failed to initialize. The config parameter specifies the -// io.Reader handler of the configuration file for the cloud provider, or nil -// for no configuration. -func GetCloudProvider(name string, config io.Reader) (Interface, error) { - providersMutex.Lock() - defer providersMutex.Unlock() - f, found := providers[name] - if !found { - return nil, nil - } - return f(config) -} - -// Detects if the string is an external cloud provider -func IsExternal(name string) bool { - return name == externalCloudProvider -} - -// InitCloudProvider creates an instance of the named cloud provider. -func InitCloudProvider(name string, configFilePath string) (Interface, error) { - var cloud Interface - var err error - - if name == "" { - glog.Info("No cloud provider specified.") - return nil, nil - } - - if IsExternal(name) { - glog.Info("External cloud provider specified") - return nil, nil - } - - for _, provider := range deprecatedCloudProviders { - if provider.name == name { - detail := provider.detail - if provider.external { - detail = fmt.Sprintf("Please use 'external' cloud provider for %s: %s", name, provider.detail) - } - glog.Warningf("WARNING: %s built-in cloud provider is now deprecated. %s", name, detail) - - break - } - } - - if configFilePath != "" { - var config *os.File - config, err = os.Open(configFilePath) - if err != nil { - glog.Fatalf("Couldn't open cloud provider configuration %s: %#v", - configFilePath, err) - } - - defer config.Close() - cloud, err = GetCloudProvider(name, config) - } else { - // Pass explicit nil so plugins can actually check for nil. See - // "Why is my nil error value not equal to nil?" in golang.org/doc/faq. - cloud, err = GetCloudProvider(name, nil) - } - - if err != nil { - return nil, fmt.Errorf("could not init cloud provider %q: %v", name, err) - } - if cloud == nil { - return nil, fmt.Errorf("unknown cloud provider %q", name) - } - - return cloud, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD index d57cb9493579..ca34b569b24a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD @@ -28,7 +28,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/aws", deps = [ "//pkg/api/v1/service:go_default_library", - "//pkg/cloudprovider:go_default_library", "//pkg/controller:go_default_library", "//pkg/credentialprovider/aws:go_default_library", "//pkg/kubelet/apis:go_default_library", @@ -44,6 +43,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library", @@ -58,9 +58,9 @@ go_library( "//vendor/github.com/aws/aws-sdk-go/service/elbv2:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/kms:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/sts:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go index 2a107db71ed3..d4f38dc75f4e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go @@ -22,13 +22,12 @@ import ( "fmt" "io" "net" + "path" "strconv" "strings" "sync" "time" - gcfg "gopkg.in/gcfg.v1" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" @@ -43,11 +42,8 @@ import ( "github.com/aws/aws-sdk-go/service/elbv2" "github.com/aws/aws-sdk-go/service/kms" "github.com/aws/aws-sdk-go/service/sts" - "github.com/golang/glog" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/record" - - "path" + gcfg "gopkg.in/gcfg.v1" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -55,10 +51,12 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/api/v1/service" - "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume" @@ -141,9 +139,14 @@ const ServiceAnnotationLoadBalancerConnectionIdleTimeout = "service.beta.kuberne const ServiceAnnotationLoadBalancerCrossZoneLoadBalancingEnabled = "service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled" // ServiceAnnotationLoadBalancerExtraSecurityGroups is the annotation used -// one the service to specify additional security groups to be added to ELB created +// on the service to specify additional security groups to be added to ELB created const ServiceAnnotationLoadBalancerExtraSecurityGroups = "service.beta.kubernetes.io/aws-load-balancer-extra-security-groups" +// ServiceAnnotationLoadBalancerSecurityGroups is the annotation used +// on the service to specify the security groups to be added to ELB created. Differently from the annotation +// "service.beta.kubernetes.io/aws-load-balancer-extra-security-groups", this replaces all other security groups previously assigned to the ELB. +const ServiceAnnotationLoadBalancerSecurityGroups = "service.beta.kubernetes.io/aws-load-balancer-security-groups" + // ServiceAnnotationLoadBalancerCertificate is the annotation used on the // service to request a secure listener. Value is a valid certificate ARN. // For more, see http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html @@ -255,7 +258,7 @@ const MaxReadThenCreateRetries = 30 // need hardcoded defaults. const DefaultVolumeType = "gp2" -// Used to call RecognizeWellKnownRegions just once +// Used to call recognizeWellKnownRegions just once var once sync.Once // AWS implements PVLabeler. @@ -423,7 +426,7 @@ type VolumeOptions struct { Encrypted bool // fully qualified resource name to the key to use for encryption. // example: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef - KmsKeyId string + KmsKeyID string } // Volumes is an interface for managing cloud-provisioned volumes @@ -506,7 +509,7 @@ type Cloud struct { // attached, to avoid a race condition where we assign a device mapping // and then get a second request before we attach the volume attachingMutex sync.Mutex - attaching map[types.NodeName]map[mountDevice]awsVolumeID + attaching map[types.NodeName]map[mountDevice]EBSVolumeID // state of our device allocator for each node deviceAllocators map[types.NodeName]DeviceAllocator @@ -964,7 +967,7 @@ func init() { Client: ec2metadata.New(sess), } } else { - glog.Infof("Using AWS assumed role %v", cfg.Global.RoleARN) + klog.Infof("Using AWS assumed role %v", cfg.Global.RoleARN) provider = &stscreds.AssumeRoleProvider{ Client: sts.New(sess), RoleARN: cfg.Global.RoleARN, @@ -1001,7 +1004,7 @@ func readAWSCloudConfig(config io.Reader) (*CloudConfig, error) { func updateConfigZone(cfg *CloudConfig, metadata EC2Metadata) error { if cfg.Global.Zone == "" { if metadata != nil { - glog.Info("Zone not specified in configuration file; querying AWS metadata service") + klog.Info("Zone not specified in configuration file; querying AWS metadata service") var err error cfg.Global.Zone, err = getAvailabilityZone(metadata) if err != nil { @@ -1016,10 +1019,6 @@ func updateConfigZone(cfg *CloudConfig, metadata EC2Metadata) error { return nil } -func getInstanceType(metadata EC2Metadata) (string, error) { - return metadata.GetMetadata("instance-type") -} - func getAvailabilityZone(metadata EC2Metadata) (string, error) { return metadata.GetMetadata("placement/availability-zone") } @@ -1039,7 +1038,7 @@ func azToRegion(az string) (string, error) { func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { // We have some state in the Cloud object - in particular the attaching map // Log so that if we are building multiple Cloud objects, it is obvious! - glog.Infof("Building AWS cloudprovider") + klog.Infof("Building AWS cloudprovider") metadata, err := awsServices.Metadata() if err != nil { @@ -1062,7 +1061,7 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { // Trust that if we get a region from configuration or AWS metadata that it is valid, // and register ECR providers - RecognizeRegion(regionName) + recognizeRegion(regionName) if !cfg.Global.DisableStrictZoneCheck { valid := isRegionValid(regionName) @@ -1071,7 +1070,7 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { return nil, fmt.Errorf("not a valid AWS zone (unknown region): %s", zone) } } else { - glog.Warningf("Strict AWS zone checking is disabled. Proceeding with zone: %s", zone) + klog.Warningf("Strict AWS zone checking is disabled. Proceeding with zone: %s", zone) } ec2, err := awsServices.Compute(regionName) @@ -1109,7 +1108,7 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { cfg: &cfg, region: regionName, - attaching: make(map[types.NodeName]map[mountDevice]awsVolumeID), + attaching: make(map[types.NodeName]map[mountDevice]EBSVolumeID), deviceAllocators: make(map[types.NodeName]DeviceAllocator), } awsCloud.instanceCache.cloud = awsCloud @@ -1118,7 +1117,7 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { if cfg.Global.VPC != "" && (cfg.Global.SubnetID != "" || cfg.Global.RoleARN != "") && tagged { // When the master is running on a different AWS account, cloud provider or on-premise // build up a dummy instance and use the VPC from the nodes account - glog.Info("Master is configured to run on a different AWS account, different cloud provider or on-premises") + klog.Info("Master is configured to run on a different AWS account, different cloud provider or on-premises") awsCloud.selfAWSInstance = &awsInstance{ nodeName: "master-dummy", vpcID: cfg.Global.VPC, @@ -1151,18 +1150,18 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { // Register regions, in particular for ECR credentials once.Do(func() { - RecognizeWellKnownRegions() + recognizeWellKnownRegions() }) return awsCloud, nil } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (c *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) { +func (c *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { c.clientBuilder = clientBuilder c.kubeClient = clientBuilder.ClientOrDie("aws-cloud-provider") c.eventBroadcaster = record.NewBroadcaster() - c.eventBroadcaster.StartLogging(glog.Infof) + c.eventBroadcaster.StartLogging(klog.Infof) c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.kubeClient.CoreV1().Events("")}) c.eventRecorder = c.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "aws-cloud-provider"}) } @@ -1233,7 +1232,7 @@ func (c *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.No if err != nil { //TODO: It would be nice to be able to determine the reason for the failure, // but the AWS client masks all failures with the same error description. - glog.V(4).Info("Could not determine public IP from AWS metadata.") + klog.V(4).Info("Could not determine public IP from AWS metadata.") } else { addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalIP, Address: externalIP}) } @@ -1242,7 +1241,7 @@ func (c *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.No if err != nil || len(internalDNS) == 0 { //TODO: It would be nice to be able to determine the reason for the failure, // but the AWS client masks all failures with the same error description. - glog.V(4).Info("Could not determine private DNS from AWS metadata.") + klog.V(4).Info("Could not determine private DNS from AWS metadata.") } else { addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: internalDNS}) addresses = append(addresses, v1.NodeAddress{Type: v1.NodeHostName, Address: internalDNS}) @@ -1252,7 +1251,7 @@ func (c *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.No if err != nil || len(externalDNS) == 0 { //TODO: It would be nice to be able to determine the reason for the failure, // but the AWS client masks all failures with the same error description. - glog.V(4).Info("Could not determine public DNS from AWS metadata.") + klog.V(4).Info("Could not determine public DNS from AWS metadata.") } else { addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalDNS, Address: externalDNS}) } @@ -1361,7 +1360,7 @@ func (c *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID strin state := instances[0].State.Name if *state == ec2.InstanceStateNameTerminated { - glog.Warningf("the instance %s is terminated", instanceID) + klog.Warningf("the instance %s is terminated", instanceID) return false, nil } @@ -1384,8 +1383,10 @@ func (c *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID str return false, err } if len(instances) == 0 { - glog.Warningf("the instance %s does not exist anymore", providerID) - return true, nil + klog.Warningf("the instance %s does not exist anymore", providerID) + // returns false, because otherwise node is not deleted from cluster + // false means that it will continue to check InstanceExistsByProviderID + return false, nil } if len(instances) > 1 { return false, fmt.Errorf("multiple instances found for instance: %s", instanceID) @@ -1395,7 +1396,7 @@ func (c *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID str if instance.State != nil { state := aws.StringValue(instance.State.Name) // valid state for detaching volumes - if state == ec2.InstanceStateNameStopped || state == ec2.InstanceStateNameTerminated { + if state == ec2.InstanceStateNameStopped { return true, nil } } @@ -1484,7 +1485,7 @@ func (c *Cloud) GetCandidateZonesForDynamicVolume() (sets.String, error) { } if master { - glog.V(4).Infof("Ignoring master instance %q in zone discovery", aws.StringValue(instance.InstanceId)) + klog.V(4).Infof("Ignoring master instance %q in zone discovery", aws.StringValue(instance.InstanceId)) continue } @@ -1494,7 +1495,7 @@ func (c *Cloud) GetCandidateZonesForDynamicVolume() (sets.String, error) { } } - glog.V(2).Infof("Found instances in zones %s", zones) + klog.V(2).Infof("Found instances in zones %s", zones) return zones, nil } @@ -1544,11 +1545,6 @@ func (c *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) } -// Abstraction around AWS Instance Types -// There isn't an API to get information for a particular instance type (that I know of) -type awsInstanceType struct { -} - // Used to represent a mount device for attaching an EBS volume // This should be stored as a single letter (i.e. c, not sdc or /dev/sdc) type mountDevice string @@ -1594,13 +1590,6 @@ func newAWSInstance(ec2Service EC2, instance *ec2.Instance) *awsInstance { return self } -// Gets the awsInstanceType that models the instance type of this instance -func (i *awsInstance) getInstanceType() *awsInstanceType { - // TODO: Make this real - awsInstanceType := &awsInstanceType{} - return awsInstanceType -} - // Gets the full information about this instance from the EC2 API func (i *awsInstance) describeInstance() (*ec2.Instance, error) { return describeInstance(i.ec2, awsInstanceID(i.awsID)) @@ -1612,14 +1601,10 @@ func (i *awsInstance) describeInstance() (*ec2.Instance, error) { func (c *Cloud) getMountDevice( i *awsInstance, info *ec2.Instance, - volumeID awsVolumeID, + volumeID EBSVolumeID, assign bool) (assigned mountDevice, alreadyAttached bool, err error) { - instanceType := i.getInstanceType() - if instanceType == nil { - return "", false, fmt.Errorf("could not get instance type for instance: %s", i.awsID) - } - deviceMappings := map[mountDevice]awsVolumeID{} + deviceMappings := map[mountDevice]EBSVolumeID{} for _, blockDevice := range info.BlockDeviceMappings { name := aws.StringValue(blockDevice.DeviceName) if strings.HasPrefix(name, "/dev/sd") { @@ -1629,9 +1614,9 @@ func (c *Cloud) getMountDevice( name = name[8:] } if len(name) < 1 || len(name) > 2 { - glog.Warningf("Unexpected EBS DeviceName: %q", aws.StringValue(blockDevice.DeviceName)) + klog.Warningf("Unexpected EBS DeviceName: %q", aws.StringValue(blockDevice.DeviceName)) } - deviceMappings[mountDevice(name)] = awsVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId)) + deviceMappings[mountDevice(name)] = EBSVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId)) } // We lock to prevent concurrent mounts from conflicting @@ -1648,7 +1633,7 @@ func (c *Cloud) getMountDevice( for mountDevice, mappingVolumeID := range deviceMappings { if volumeID == mappingVolumeID { if assign { - glog.Warningf("Got assignment call for already-assigned volume: %s@%s", mountDevice, mappingVolumeID) + klog.Warningf("Got assignment call for already-assigned volume: %s@%s", mountDevice, mappingVolumeID) } return mountDevice, true, nil } @@ -1673,24 +1658,24 @@ func (c *Cloud) getMountDevice( chosen, err := deviceAllocator.GetNext(deviceMappings) if err != nil { - glog.Warningf("Could not assign a mount device. mappings=%v, error: %v", deviceMappings, err) - return "", false, fmt.Errorf("Too many EBS volumes attached to node %s.", i.nodeName) + klog.Warningf("Could not assign a mount device. mappings=%v, error: %v", deviceMappings, err) + return "", false, fmt.Errorf("too many EBS volumes attached to node %s", i.nodeName) } attaching := c.attaching[i.nodeName] if attaching == nil { - attaching = make(map[mountDevice]awsVolumeID) + attaching = make(map[mountDevice]EBSVolumeID) c.attaching[i.nodeName] = attaching } attaching[chosen] = volumeID - glog.V(2).Infof("Assigned mount device %s -> volume %s", chosen, volumeID) + klog.V(2).Infof("Assigned mount device %s -> volume %s", chosen, volumeID) return chosen, false, nil } // endAttaching removes the entry from the "attachments in progress" map // It returns true if it was found (and removed), false otherwise -func (c *Cloud) endAttaching(i *awsInstance, volumeID awsVolumeID, mountDevice mountDevice) bool { +func (c *Cloud) endAttaching(i *awsInstance, volumeID EBSVolumeID, mountDevice mountDevice) bool { c.attachingMutex.Lock() defer c.attachingMutex.Unlock() @@ -1703,10 +1688,10 @@ func (c *Cloud) endAttaching(i *awsInstance, volumeID awsVolumeID, mountDevice m // attached to the instance (as reported by the EC2 API). So if endAttaching comes after // a 10 second poll delay, we might well have had a concurrent request to allocate a mountpoint, // which because we allocate sequentially is _very_ likely to get the immediately freed volume - glog.Infof("endAttaching on device %q assigned to different volume: %q vs %q", mountDevice, volumeID, existingVolumeID) + klog.Infof("endAttaching on device %q assigned to different volume: %q vs %q", mountDevice, volumeID, existingVolumeID) return false } - glog.V(2).Infof("Releasing in-process attachment entry: %s -> volume %s", mountDevice, volumeID) + klog.V(2).Infof("Releasing in-process attachment entry: %s -> volume %s", mountDevice, volumeID) delete(c.attaching[i.nodeName], mountDevice) return true } @@ -1717,7 +1702,7 @@ type awsDisk struct { // Name in k8s name KubernetesVolumeID // id in AWS - awsID awsVolumeID + awsID EBSVolumeID } func newAWSDisk(aws *Cloud, name KubernetesVolumeID) (*awsDisk, error) { @@ -1830,7 +1815,7 @@ func (d *awsDisk) modifyVolume(requestGiB int64) (int64, error) { func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string) { node, fetchErr := c.kubeClient.CoreV1().Nodes().Get(string(nodeName), metav1.GetOptions{}) if fetchErr != nil { - glog.Errorf("Error fetching node %s with %v", nodeName, fetchErr) + klog.Errorf("Error fetching node %s with %v", nodeName, fetchErr) return } @@ -1841,7 +1826,7 @@ func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string) } err := controller.AddOrUpdateTaintOnNode(c.kubeClient, string(nodeName), taint) if err != nil { - glog.Errorf("Error applying taint to node %s with error %v", nodeName, err) + klog.Errorf("Error applying taint to node %s with error %v", nodeName, err) return } c.eventRecorder.Eventf(node, v1.EventTypeWarning, volumeAttachmentStuck, reason) @@ -1869,7 +1854,7 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, if isAWSErrorVolumeNotFound(err) { if status == "detached" { // The disk doesn't exist, assume it's detached, log warning and stop waiting - glog.Warningf("Waiting for volume %q to be detached but the volume does not exist", d.awsID) + klog.Warningf("Waiting for volume %q to be detached but the volume does not exist", d.awsID) stateStr := "detached" attachment = &ec2.VolumeAttachment{ State: &stateStr, @@ -1878,7 +1863,7 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, } if status == "attached" { // The disk doesn't exist, complain, give up waiting and report error - glog.Warningf("Waiting for volume %q to be attached but the volume does not exist", d.awsID) + klog.Warningf("Waiting for volume %q to be attached but the volume does not exist", d.awsID) return false, err } } @@ -1886,29 +1871,30 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, if describeErrorCount > volumeAttachmentStatusConsecutiveErrorLimit { // report the error return false, err - } else { - glog.Warningf("Ignoring error from describe volume for volume %q; will retry: %q", d.awsID, err) - return false, nil } - } else { - describeErrorCount = 0 + + klog.Warningf("Ignoring error from describe volume for volume %q; will retry: %q", d.awsID, err) + return false, nil } + + describeErrorCount = 0 + if len(info.Attachments) > 1 { // Shouldn't happen; log so we know if it is - glog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info) + klog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info) } attachmentStatus := "" for _, a := range info.Attachments { if attachmentStatus != "" { // Shouldn't happen; log so we know if it is - glog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info) + klog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info) } if a.State != nil { attachment = a attachmentStatus = *a.State } else { // Shouldn't happen; log so we know if it is - glog.Warningf("Ignoring nil attachment state for volume %q: %v", d.awsID, a) + klog.Warningf("Ignoring nil attachment state for volume %q: %v", d.awsID, a) } } if attachmentStatus == "" { @@ -1919,7 +1905,7 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, return true, nil } // continue waiting - glog.V(2).Infof("Waiting for volume %q state: actual=%s, desired=%s", d.awsID, attachmentStatus, status) + klog.V(2).Infof("Waiting for volume %q state: actual=%s, desired=%s", d.awsID, attachmentStatus, status) return false, nil }) @@ -1970,23 +1956,6 @@ func (c *Cloud) buildSelfAWSInstance() (*awsInstance, error) { return newAWSInstance(c.ec2, instance), nil } -// Gets the awsInstance with for the node with the specified nodeName, or the 'self' instance if nodeName == "" -func (c *Cloud) getAwsInstance(nodeName types.NodeName) (*awsInstance, error) { - var awsInstance *awsInstance - if nodeName == "" { - awsInstance = c.selfAWSInstance - } else { - instance, err := c.getInstanceByNodeName(nodeName) - if err != nil { - return nil, err - } - - awsInstance = newAWSInstance(c.ec2, instance) - } - - return awsInstance, nil -} - // wrapAttachError wraps the error returned by an AttachVolume request with // additional information, if needed and possible. func wrapAttachError(err error, disk *awsDisk, instance string) error { @@ -1994,11 +1963,11 @@ func wrapAttachError(err error, disk *awsDisk, instance string) error { if awsError.Code() == "VolumeInUse" { info, err := disk.describeVolume() if err != nil { - glog.Errorf("Error describing volume %q: %q", disk.awsID, err) + klog.Errorf("Error describing volume %q: %q", disk.awsID, err) } else { for _, a := range info.Attachments { - if disk.awsID != awsVolumeID(aws.StringValue(a.VolumeId)) { - glog.Warningf("Expected to get attachment info of volume %q but instead got info of %q", disk.awsID, aws.StringValue(a.VolumeId)) + if disk.awsID != EBSVolumeID(aws.StringValue(a.VolumeId)) { + klog.Warningf("Expected to get attachment info of volume %q but instead got info of %q", disk.awsID, aws.StringValue(a.VolumeId)) } else if aws.StringValue(a.State) == "attached" { return fmt.Errorf("Error attaching EBS volume %q to instance %q: %q. The volume is currently attached to instance %q", disk.awsID, instance, awsError, aws.StringValue(a.InstanceId)) } @@ -2032,7 +2001,7 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) defer func() { if attachEnded { if !c.endAttaching(awsInstance, disk.awsID, mountDevice) { - glog.Errorf("endAttaching called for disk %q when attach not in progress", disk.awsID) + klog.Errorf("endAttaching called for disk %q when attach not in progress", disk.awsID) } } }() @@ -2051,7 +2020,7 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) if !alreadyAttached { available, err := c.checkIfAvailable(disk, "attaching", awsInstance.awsID) if err != nil { - glog.Error(err) + klog.Error(err) } if !available { @@ -2073,7 +2042,7 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) if da, ok := c.deviceAllocators[awsInstance.nodeName]; ok { da.Deprioritize(mountDevice) } - glog.V(2).Infof("AttachVolume volume=%q instance=%q request returned %v", disk.awsID, awsInstance.awsID, attachResponse) + klog.V(2).Infof("AttachVolume volume=%q instance=%q request returned %v", disk.awsID, awsInstance.awsID, attachResponse) } attachment, err := disk.waitForAttachmentStatus("attached") @@ -2111,15 +2080,15 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) if err != nil { if isAWSErrorVolumeNotFound(err) { // Someone deleted the volume being detached; complain, but do nothing else and return success - glog.Warningf("DetachDisk %s called for node %s but volume does not exist; assuming the volume is detached", diskName, nodeName) + klog.Warningf("DetachDisk %s called for node %s but volume does not exist; assuming the volume is detached", diskName, nodeName) return "", nil - } else { - return "", err } + + return "", err } if !attached && diskInfo.ec2Instance != nil { - glog.Warningf("DetachDisk %s called for node %s but volume is attached to node %s", diskName, nodeName, diskInfo.nodeName) + klog.Warningf("DetachDisk %s called for node %s but volume is attached to node %s", diskName, nodeName, diskInfo.nodeName) return "", nil } @@ -2135,7 +2104,7 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) } if !alreadyAttached { - glog.Warningf("DetachDisk called on non-attached disk: %s", diskName) + klog.Warningf("DetachDisk called on non-attached disk: %s", diskName) // TODO: Continue? Tolerate non-attached error from the AWS DetachVolume call? } @@ -2162,7 +2131,7 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) } if attachment != nil { // We expect it to be nil, it is (maybe) interesting if it is not - glog.V(2).Infof("waitForAttachmentStatus returned non-nil attachment with state=detached: %v", attachment) + klog.V(2).Infof("waitForAttachmentStatus returned non-nil attachment with state=detached: %v", attachment) } if mountDevice != "" { @@ -2206,47 +2175,50 @@ func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (KubernetesVolumeID, er return "", fmt.Errorf("invalid AWS VolumeType %q", volumeOptions.VolumeType) } - // TODO: Should we tag this with the cluster id (so it gets deleted when the cluster does?) request := &ec2.CreateVolumeInput{} request.AvailabilityZone = aws.String(volumeOptions.AvailabilityZone) request.Size = aws.Int64(int64(volumeOptions.CapacityGB)) request.VolumeType = aws.String(createType) request.Encrypted = aws.Bool(volumeOptions.Encrypted) - if len(volumeOptions.KmsKeyId) > 0 { - request.KmsKeyId = aws.String(volumeOptions.KmsKeyId) + if len(volumeOptions.KmsKeyID) > 0 { + request.KmsKeyId = aws.String(volumeOptions.KmsKeyID) request.Encrypted = aws.Bool(true) } if iops > 0 { request.Iops = aws.Int64(iops) } + + tags := volumeOptions.Tags + tags = c.tagging.buildTags(ResourceLifecycleOwned, tags) + + var tagList []*ec2.Tag + for k, v := range tags { + tagList = append(tagList, &ec2.Tag{ + Key: aws.String(k), Value: aws.String(v), + }) + } + request.TagSpecifications = append(request.TagSpecifications, &ec2.TagSpecification{ + Tags: tagList, + ResourceType: aws.String(ec2.ResourceTypeVolume), + }) + response, err := c.ec2.CreateVolume(request) if err != nil { return "", err } - awsID := awsVolumeID(aws.StringValue(response.VolumeId)) + awsID := EBSVolumeID(aws.StringValue(response.VolumeId)) if awsID == "" { return "", fmt.Errorf("VolumeID was not returned by CreateVolume") } volumeName := KubernetesVolumeID("aws://" + aws.StringValue(response.AvailabilityZone) + "/" + string(awsID)) - // apply tags - if err := c.tagging.createTags(c.ec2, string(awsID), ResourceLifecycleOwned, volumeOptions.Tags); err != nil { - // delete the volume and hope it succeeds - _, delerr := c.DeleteDisk(volumeName) - if delerr != nil { - // delete did not succeed, we have a stray volume! - return "", fmt.Errorf("error tagging volume %s, could not delete the volume: %q", volumeName, delerr) - } - return "", fmt.Errorf("error tagging volume %s: %q", volumeName, err) - } - // AWS has a bad habbit of reporting success when creating a volume with // encryption keys that either don't exists or have wrong permissions. // Such volume lives for couple of seconds and then it's silently deleted // by AWS. There is no other check to ensure that given KMS key is correct, // because Kubernetes may have limited permissions to the key. - if len(volumeOptions.KmsKeyId) > 0 { + if len(volumeOptions.KmsKeyID) > 0 { err := c.waitUntilVolumeAvailable(volumeName) if err != nil { if isAWSErrorVolumeNotFound(err) { @@ -2296,10 +2268,10 @@ func (c *Cloud) DeleteDisk(volumeName KubernetesVolumeID) (bool, error) { available, err := c.checkIfAvailable(awsDisk, "deleting", "") if err != nil { if isAWSErrorVolumeNotFound(err) { - glog.V(2).Infof("Volume %s not found when deleting it, assuming it's deleted", awsDisk.awsID) + klog.V(2).Infof("Volume %s not found when deleting it, assuming it's deleted", awsDisk.awsID) return false, nil } - glog.Error(err) + klog.Error(err) } if !available { @@ -2313,7 +2285,7 @@ func (c *Cloud) checkIfAvailable(disk *awsDisk, opName string, instance string) info, err := disk.describeVolume() if err != nil { - glog.Errorf("Error describing volume %q: %q", disk.awsID, err) + klog.Errorf("Error describing volume %q: %q", disk.awsID, err) // if for some reason we can not describe volume we will return error return false, err } @@ -2329,11 +2301,11 @@ func (c *Cloud) checkIfAvailable(disk *awsDisk, opName string, instance string) // Volume is attached somewhere else and we can not attach it here if len(info.Attachments) > 0 { attachment := info.Attachments[0] - instanceId := aws.StringValue(attachment.InstanceId) - attachedInstance, ierr := c.getInstanceByID(instanceId) - attachErr := fmt.Sprintf("%s since volume is currently attached to %q", opError, instanceId) + instanceID := aws.StringValue(attachment.InstanceId) + attachedInstance, ierr := c.getInstanceByID(instanceID) + attachErr := fmt.Sprintf("%s since volume is currently attached to %q", opError, instanceID) if ierr != nil { - glog.Error(attachErr) + klog.Error(attachErr) return false, errors.New(attachErr) } devicePath := aws.StringValue(attachment.Device) @@ -2350,6 +2322,7 @@ func (c *Cloud) checkIfAvailable(disk *awsDisk, opName string, instance string) return true, nil } +// GetLabelsForVolume gets the volume labels for a volume func (c *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) { // Ignore any volumes that are being provisioned if pv.Spec.AWSElasticBlockStore.VolumeID == volume.ProvisionedVolumeName { @@ -2413,16 +2386,18 @@ func (c *Cloud) DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeN if err != nil { if isAWSErrorVolumeNotFound(err) { // The disk doesn't exist, can't be attached - glog.Warningf("DiskIsAttached called for volume %s on node %s but the volume does not exist", diskName, nodeName) + klog.Warningf("DiskIsAttached called for volume %s on node %s but the volume does not exist", diskName, nodeName) return false, nil - } else { - return true, err } + + return true, err } return attached, nil } +// DisksAreAttached returns a map of nodes and Kubernetes volume IDs indicating +// if the volumes are attached to the node func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolumeID) (map[types.NodeName]map[KubernetesVolumeID]bool, error) { attached := make(map[types.NodeName]map[KubernetesVolumeID]bool) @@ -2448,7 +2423,7 @@ func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolume } if len(awsInstances) == 0 { - glog.V(2).Infof("DisksAreAttached found no instances matching node names; will assume disks not attached") + klog.V(2).Infof("DisksAreAttached found no instances matching node names; will assume disks not attached") return attached, nil } @@ -2471,7 +2446,7 @@ func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolume continue } - idToDiskName := make(map[awsVolumeID]KubernetesVolumeID) + idToDiskName := make(map[EBSVolumeID]KubernetesVolumeID) for _, diskName := range diskNames { volumeID, err := diskName.MapToAWSVolumeID() if err != nil { @@ -2481,7 +2456,7 @@ func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolume } for _, blockDevice := range awsInstance.BlockDeviceMappings { - volumeID := awsVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId)) + volumeID := EBSVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId)) diskName, found := idToDiskName[volumeID] if found { // Disk is still attached to node @@ -2493,6 +2468,8 @@ func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolume return attached, nil } +// ResizeDisk resizes an EBS volume in GiB increments, it will round up to the +// next GiB if arguments are not provided in even GiB increments func (c *Cloud) ResizeDisk( diskName KubernetesVolumeID, oldSize resource.Quantity, @@ -2541,7 +2518,7 @@ func (c *Cloud) describeLoadBalancer(name string) (*elb.LoadBalancerDescription, var ret *elb.LoadBalancerDescription for _, loadBalancer := range response.LoadBalancerDescriptions { if ret != nil { - glog.Errorf("Found multiple load balancers with name: %s", name) + klog.Errorf("Found multiple load balancers with name: %s", name) } ret = loadBalancer } @@ -2626,7 +2603,7 @@ func (c *Cloud) findSecurityGroup(securityGroupID string) (*ec2.SecurityGroup, e groups, err := c.ec2.DescribeSecurityGroups(describeSecurityGroupsRequest) if err != nil { - glog.Warningf("Error retrieving security group: %q", err) + klog.Warningf("Error retrieving security group: %q", err) return nil, err } @@ -2673,7 +2650,7 @@ func ipPermissionExists(newPermission, existing *ec2.IpPermission, compareGroupU } // Check only if newPermission is a subset of existing. Usually it has zero or one elements. // Not doing actual CIDR math yet; not clear it's needed, either. - glog.V(4).Infof("Comparing %v to %v", newPermission, existing) + klog.V(4).Infof("Comparing %v to %v", newPermission, existing) if len(newPermission.IpRanges) > len(existing.IpRanges) { return false } @@ -2708,7 +2685,7 @@ func ipPermissionExists(newPermission, existing *ec2.IpPermission, compareGroupU } func isEqualUserGroupPair(l, r *ec2.UserIdGroupPair, compareGroupUserIDs bool) bool { - glog.V(2).Infof("Comparing %v to %v", *l.GroupId, *r.GroupId) + klog.V(2).Infof("Comparing %v to %v", *l.GroupId, *r.GroupId) if isEqualStringPointer(l.GroupId, r.GroupId) { if compareGroupUserIDs { if isEqualStringPointer(l.UserId, r.UserId) { @@ -2733,7 +2710,7 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe group, err := c.findSecurityGroup(securityGroupID) if err != nil { - glog.Warningf("Error retrieving security group %q", err) + klog.Warningf("Error retrieving security group %q", err) return false, err } @@ -2741,7 +2718,7 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe return false, fmt.Errorf("security group not found: %s", securityGroupID) } - glog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions) + klog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions) actual := NewIPPermissionSet(group.IpPermissions...) @@ -2772,7 +2749,7 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe // don't want to accidentally open more than intended while we're // applying changes. if add.Len() != 0 { - glog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, add.List()) + klog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, add.List()) request := &ec2.AuthorizeSecurityGroupIngressInput{} request.GroupId = &securityGroupID @@ -2783,7 +2760,7 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe } } if remove.Len() != 0 { - glog.V(2).Infof("Remove security group ingress: %s %v", securityGroupID, remove.List()) + klog.V(2).Infof("Remove security group ingress: %s %v", securityGroupID, remove.List()) request := &ec2.RevokeSecurityGroupIngressInput{} request.GroupId = &securityGroupID @@ -2808,7 +2785,7 @@ func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions [ group, err := c.findSecurityGroup(securityGroupID) if err != nil { - glog.Warningf("Error retrieving security group: %q", err) + klog.Warningf("Error retrieving security group: %q", err) return false, err } @@ -2816,7 +2793,7 @@ func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions [ return false, fmt.Errorf("security group not found: %s", securityGroupID) } - glog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions) + klog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions) changes := []*ec2.IpPermission{} for _, addPermission := range addPermissions { @@ -2844,14 +2821,14 @@ func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions [ return false, nil } - glog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, changes) + klog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, changes) request := &ec2.AuthorizeSecurityGroupIngressInput{} request.GroupId = &securityGroupID request.IpPermissions = changes _, err = c.ec2.AuthorizeSecurityGroupIngress(request) if err != nil { - glog.Warningf("Error authorizing security group ingress %q", err) + klog.Warningf("Error authorizing security group ingress %q", err) return false, fmt.Errorf("error authorizing security group ingress: %q", err) } @@ -2869,12 +2846,12 @@ func (c *Cloud) removeSecurityGroupIngress(securityGroupID string, removePermiss group, err := c.findSecurityGroup(securityGroupID) if err != nil { - glog.Warningf("Error retrieving security group: %q", err) + klog.Warningf("Error retrieving security group: %q", err) return false, err } if group == nil { - glog.Warning("Security group not found: ", securityGroupID) + klog.Warning("Security group not found: ", securityGroupID) return false, nil } @@ -2904,14 +2881,14 @@ func (c *Cloud) removeSecurityGroupIngress(securityGroupID string, removePermiss return false, nil } - glog.V(2).Infof("Removing security group ingress: %s %v", securityGroupID, changes) + klog.V(2).Infof("Removing security group ingress: %s %v", securityGroupID, changes) request := &ec2.RevokeSecurityGroupIngressInput{} request.GroupId = &securityGroupID request.IpPermissions = changes _, err = c.ec2.RevokeSecurityGroupIngress(request) if err != nil { - glog.Warningf("Error revoking security group ingress: %q", err) + klog.Warningf("Error revoking security group ingress: %q", err) return false, err } @@ -2947,7 +2924,7 @@ func (c *Cloud) ensureSecurityGroup(name string, description string, additionalT if len(securityGroups) >= 1 { if len(securityGroups) > 1 { - glog.Warningf("Found multiple security groups with name: %q", name) + klog.Warningf("Found multiple security groups with name: %q", name) } err := c.tagging.readRepairClusterTags( c.ec2, aws.StringValue(securityGroups[0].GroupId), @@ -2970,12 +2947,12 @@ func (c *Cloud) ensureSecurityGroup(name string, description string, additionalT switch err := err.(type) { case awserr.Error: if err.Code() == "InvalidGroup.Duplicate" && attempt < MaxReadThenCreateRetries { - glog.V(2).Infof("Got InvalidGroup.Duplicate while creating security group (race?); will retry") + klog.V(2).Infof("Got InvalidGroup.Duplicate while creating security group (race?); will retry") ignore = true } } if !ignore { - glog.Errorf("Error creating security group: %q", err) + klog.Errorf("Error creating security group: %q", err) return "", err } time.Sleep(1 * time.Second) @@ -3034,7 +3011,7 @@ func (c *Cloud) findSubnets() ([]*ec2.Subnet, error) { } // Fall back to the current instance subnets, if nothing is tagged - glog.Warningf("No tagged subnets found; will fall-back to the current subnet only. This is likely to be an error in a future version of k8s.") + klog.Warningf("No tagged subnets found; will fall-back to the current subnet only. This is likely to be an error in a future version of k8s.") request = &ec2.DescribeSubnetsInput{} filters = []*ec2.Filter{newEc2Filter("subnet-id", c.selfAWSInstance.subnetID)} @@ -3071,7 +3048,7 @@ func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error) { az := aws.StringValue(subnet.AvailabilityZone) id := aws.StringValue(subnet.SubnetId) if az == "" || id == "" { - glog.Warningf("Ignoring subnet with empty az/id: %v", subnet) + klog.Warningf("Ignoring subnet with empty az/id: %v", subnet) continue } @@ -3080,7 +3057,7 @@ func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error) { return nil, err } if !internalELB && !isPublic { - glog.V(2).Infof("Ignoring private subnet for public ELB %q", id) + klog.V(2).Infof("Ignoring private subnet for public ELB %q", id) continue } @@ -3111,12 +3088,12 @@ func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error) { // If we have two subnets for the same AZ we arbitrarily choose the one that is first lexicographically. // TODO: Should this be an error. if strings.Compare(*existing.SubnetId, *subnet.SubnetId) > 0 { - glog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *subnet.SubnetId, *existing.SubnetId, *subnet.SubnetId) + klog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *subnet.SubnetId, *existing.SubnetId, *subnet.SubnetId) subnetsByAZ[az] = subnet continue } - glog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *existing.SubnetId, *existing.SubnetId, *subnet.SubnetId) + klog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *existing.SubnetId, *existing.SubnetId, *subnet.SubnetId) continue } @@ -3145,7 +3122,7 @@ func isSubnetPublic(rt []*ec2.RouteTable, subnetID string) (bool, error) { for _, table := range rt { for _, assoc := range table.Associations { if aws.BoolValue(assoc.Main) == true { - glog.V(4).Infof("Assuming implicit use of main routing table %s for %s", + klog.V(4).Infof("Assuming implicit use of main routing table %s for %s", aws.StringValue(table.RouteTableId), subnetID) subnetTable = table break @@ -3204,7 +3181,8 @@ func getPortSets(annotation string) (ports *portSets) { // attached to ELB created by a service. List always consist of at least // 1 member which is an SG created for this service or a SG from the Global config. // Extra groups can be specified via annotation, as can extra tags for any -// new groups. +// new groups. The annotation "ServiceAnnotationLoadBalancerSecurityGroups" allows for +// setting the security groups specified. func (c *Cloud) buildELBSecurityGroupList(serviceName types.NamespacedName, loadBalancerName string, annotations map[string]string) ([]string, error) { var err error var securityGroupID string @@ -3217,11 +3195,24 @@ func (c *Cloud) buildELBSecurityGroupList(serviceName types.NamespacedName, load sgDescription := fmt.Sprintf("Security group for Kubernetes ELB %s (%v)", loadBalancerName, serviceName) securityGroupID, err = c.ensureSecurityGroup(sgName, sgDescription, getLoadBalancerAdditionalTags(annotations)) if err != nil { - glog.Errorf("Error creating load balancer security group: %q", err) + klog.Errorf("Error creating load balancer security group: %q", err) return nil, err } } - sgList := []string{securityGroupID} + + sgList := []string{} + + for _, extraSG := range strings.Split(annotations[ServiceAnnotationLoadBalancerSecurityGroups], ",") { + extraSG = strings.TrimSpace(extraSG) + if len(extraSG) > 0 { + sgList = append(sgList, extraSG) + } + } + + // If no Security Groups have been specified with the ServiceAnnotationLoadBalancerSecurityGroups annotation, we add the default one. + if len(sgList) == 0 { + sgList = append(sgList, securityGroupID) + } for _, extraSG := range strings.Split(annotations[ServiceAnnotationLoadBalancerExtraSecurityGroups], ",") { extraSG = strings.TrimSpace(extraSG) @@ -3272,7 +3263,7 @@ func buildListener(port v1.ServicePort, annotations map[string]string, sslPorts // EnsureLoadBalancer implements LoadBalancer.EnsureLoadBalancer func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { annotations := apiService.Annotations - glog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", + klog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, c.region, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, annotations) if apiService.Spec.SessionAffinity != v1.ServiceAffinityNone { @@ -3294,7 +3285,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS return nil, fmt.Errorf("Only TCP LoadBalancer is supported for AWS ELB") } if port.NodePort == 0 { - glog.Errorf("Ignoring port without NodePort defined: %v", port) + klog.Errorf("Ignoring port without NodePort defined: %v", port) continue } @@ -3332,7 +3323,9 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS // Determine if this is tagged as an Internal ELB internalELB := false internalAnnotation := apiService.Annotations[ServiceAnnotationLoadBalancerInternal] - if internalAnnotation != "" { + if internalAnnotation == "false" { + internalELB = false + } else if internalAnnotation != "" { internalELB = true } @@ -3349,7 +3342,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS // Find the subnets that the ELB will live in subnetIDs, err := c.findELBSubnets(internalELB) if err != nil { - glog.Errorf("Error listing subnets in VPC: %q", err) + klog.Errorf("Error listing subnets in VPC: %q", err) return nil, err } // Bail out early if there are no subnets @@ -3388,7 +3381,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS err = c.updateInstanceSecurityGroupsForNLB(v2Mappings, instances, loadBalancerName, sourceRangeCidrs) if err != nil { - glog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err) + klog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err) return nil, err } @@ -3510,7 +3503,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS // Find the subnets that the ELB will live in subnetIDs, err := c.findELBSubnets(internalELB) if err != nil { - glog.Errorf("Error listing subnets in VPC: %q", err) + klog.Errorf("Error listing subnets in VPC: %q", err) return nil, err } @@ -3597,13 +3590,13 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS } if path, healthCheckNodePort := service.GetServiceHealthCheckPathPort(apiService); path != "" { - glog.V(4).Infof("service %v (%v) needs health checks on :%d%s)", apiService.Name, loadBalancerName, healthCheckNodePort, path) + klog.V(4).Infof("service %v (%v) needs health checks on :%d%s)", apiService.Name, loadBalancerName, healthCheckNodePort, path) err = c.ensureLoadBalancerHealthCheck(loadBalancer, "HTTP", healthCheckNodePort, path, annotations) if err != nil { return nil, fmt.Errorf("Failed to ensure health check for localized service %v on node port %v: %q", loadBalancerName, healthCheckNodePort, err) } } else { - glog.V(4).Infof("service %v does not need custom health checks", apiService.Name) + klog.V(4).Infof("service %v does not need custom health checks", apiService.Name) // We only configure a TCP health-check on the first port var tcpHealthCheckPort int32 for _, listener := range listeners { @@ -3622,17 +3615,17 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS err = c.updateInstanceSecurityGroupsForLoadBalancer(loadBalancer, instances) if err != nil { - glog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err) + klog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err) return nil, err } err = c.ensureLoadBalancerInstances(aws.StringValue(loadBalancer.LoadBalancerName), loadBalancer.Instances, instances) if err != nil { - glog.Warningf("Error registering instances with the load balancer: %q", err) + klog.Warningf("Error registering instances with the load balancer: %q", err) return nil, err } - glog.V(1).Infof("Loadbalancer %s (%v) has DNS name %s", loadBalancerName, serviceName, aws.StringValue(loadBalancer.DNSName)) + klog.V(1).Infof("Loadbalancer %s (%v) has DNS name %s", loadBalancerName, serviceName, aws.StringValue(loadBalancer.DNSName)) // TODO: Wait for creation? @@ -3689,7 +3682,7 @@ func toStatus(lb *elb.LoadBalancerDescription) *v1.LoadBalancerStatus { func v2toStatus(lb *elbv2.LoadBalancer) *v1.LoadBalancerStatus { status := &v1.LoadBalancerStatus{} if lb == nil { - glog.Error("[BUG] v2toStatus got nil input, this is a Kubernetes bug, please report") + klog.Error("[BUG] v2toStatus got nil input, this is a Kubernetes bug, please report") return status } @@ -3716,7 +3709,7 @@ func findSecurityGroupForInstance(instance *ec2.Instance, taggedSecurityGroups m for _, group := range instance.SecurityGroups { groupID := aws.StringValue(group.GroupId) if groupID == "" { - glog.Warningf("Ignoring security group without id for instance %q: %v", instanceID, group) + klog.Warningf("Ignoring security group without id for instance %q: %v", instanceID, group) continue } _, isTagged := taggedSecurityGroups[groupID] @@ -3748,7 +3741,7 @@ func findSecurityGroupForInstance(instance *ec2.Instance, taggedSecurityGroups m return untagged[0], nil } - glog.Warningf("No security group found for instance %q", instanceID) + klog.Warningf("No security group found for instance %q", instanceID) return nil, nil } @@ -3769,7 +3762,7 @@ func (c *Cloud) getTaggedSecurityGroups() (map[string]*ec2.SecurityGroup, error) id := aws.StringValue(group.GroupId) if id == "" { - glog.Warningf("Ignoring group without id: %v", group) + klog.Warningf("Ignoring group without id: %v", group) continue } m[id] = group @@ -3792,7 +3785,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer } if loadBalancerSecurityGroupID != "" { // We create LBs with one SG - glog.Warningf("Multiple security groups for load balancer: %q", aws.StringValue(lb.LoadBalancerName)) + klog.Warningf("Multiple security groups for load balancer: %q", aws.StringValue(lb.LoadBalancerName)) } loadBalancerSecurityGroupID = *securityGroup } @@ -3841,12 +3834,12 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer } if securityGroup == nil { - glog.Warning("Ignoring instance without security group: ", aws.StringValue(instance.InstanceId)) + klog.Warning("Ignoring instance without security group: ", aws.StringValue(instance.InstanceId)) continue } id := aws.StringValue(securityGroup.GroupId) if id == "" { - glog.Warningf("found security group without id: %v", securityGroup) + klog.Warningf("found security group without id: %v", securityGroup) continue } @@ -3857,7 +3850,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer for _, actualGroup := range actualGroups { actualGroupID := aws.StringValue(actualGroup.GroupId) if actualGroupID == "" { - glog.Warning("Ignoring group without ID: ", actualGroup) + klog.Warning("Ignoring group without ID: ", actualGroup) continue } @@ -3873,9 +3866,9 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer for instanceSecurityGroupID, add := range instanceSecurityGroupIds { if add { - glog.V(2).Infof("Adding rule for traffic from the load balancer (%s) to instances (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) + klog.V(2).Infof("Adding rule for traffic from the load balancer (%s) to instances (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) } else { - glog.V(2).Infof("Removing rule for traffic from the load balancer (%s) to instance (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) + klog.V(2).Infof("Removing rule for traffic from the load balancer (%s) to instance (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) } sourceGroupID := &ec2.UserIdGroupPair{} sourceGroupID.GroupId = &loadBalancerSecurityGroupID @@ -3894,7 +3887,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer return err } if !changed { - glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } else { changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, permissions) @@ -3902,7 +3895,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer return err } if !changed { - glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } } @@ -3920,7 +3913,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin return err } if lb == nil { - glog.Info("Load balancer already deleted: ", loadBalancerName) + klog.Info("Load balancer already deleted: ", loadBalancerName) return nil } @@ -4044,7 +4037,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin return err } if !changed { - glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", *matchingGroups[i].GroupId) + klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", *matchingGroups[i].GroupId) } } @@ -4062,7 +4055,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin } if lb == nil { - glog.Info("Load balancer already deleted: ", loadBalancerName) + klog.Info("Load balancer already deleted: ", loadBalancerName) return nil } @@ -4070,7 +4063,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin // De-authorize the load balancer security group from the instances security group err = c.updateInstanceSecurityGroupsForLoadBalancer(lb, nil) if err != nil { - glog.Errorf("Error deregistering load balancer from instance security groups: %q", err) + klog.Errorf("Error deregistering load balancer from instance security groups: %q", err) return err } } @@ -4083,7 +4076,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin _, err = c.elb.DeleteLoadBalancer(request) if err != nil { // TODO: Check if error was because load balancer was concurrently deleted - glog.Errorf("Error deleting load balancer: %q", err) + klog.Errorf("Error deleting load balancer: %q", err) return err } } @@ -4101,7 +4094,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin continue } if aws.StringValue(securityGroupID) == "" { - glog.Warning("Ignoring empty security group in ", service.Name) + klog.Warning("Ignoring empty security group in ", service.Name) continue } securityGroupIDs[*securityGroupID] = struct{}{} @@ -4120,7 +4113,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin ignore := false if awsError, ok := err.(awserr.Error); ok { if awsError.Code() == "DependencyViolation" { - glog.V(2).Infof("Ignoring DependencyViolation while deleting load-balancer security group (%s), assuming because LB is in process of deleting", securityGroupID) + klog.V(2).Infof("Ignoring DependencyViolation while deleting load-balancer security group (%s), assuming because LB is in process of deleting", securityGroupID) ignore = true } } @@ -4131,7 +4124,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin } if len(securityGroupIDs) == 0 { - glog.V(2).Info("Deleted all security groups for load balancer: ", service.Name) + klog.V(2).Info("Deleted all security groups for load balancer: ", service.Name) break } @@ -4144,7 +4137,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin return fmt.Errorf("timed out deleting ELB: %s. Could not delete security groups %v", service.Name, strings.Join(ids, ",")) } - glog.V(2).Info("Waiting for load-balancer to delete so we can delete security groups: ", service.Name) + klog.V(2).Info("Waiting for load-balancer to delete so we can delete security groups: ", service.Name) time.Sleep(10 * time.Second) } @@ -4275,14 +4268,14 @@ func (c *Cloud) getInstancesByNodeNames(nodeNames []string, states ...string) ([ instances, err := c.describeInstances(filters) if err != nil { - glog.V(2).Infof("Failed to describe instances %v", nodeNames) + klog.V(2).Infof("Failed to describe instances %v", nodeNames) return nil, err } ec2Instances = append(ec2Instances, instances...) } if len(ec2Instances) == 0 { - glog.V(3).Infof("Failed to find any instances %v", nodeNames) + klog.V(3).Infof("Failed to find any instances %v", nodeNames) return nil, nil } return ec2Instances, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_fakes.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_fakes.go index 774363272304..28946fa35c03 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_fakes.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_fakes.go @@ -25,10 +25,10 @@ import ( "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/elbv2" "github.com/aws/aws-sdk-go/service/kms" - - "github.com/golang/glog" + "k8s.io/klog" ) +// FakeAWSServices is an fake AWS session used for testing type FakeAWSServices struct { region string instances []*ec2.Instance @@ -45,7 +45,8 @@ type FakeAWSServices struct { kms *FakeKMS } -func NewFakeAWSServices(clusterId string) *FakeAWSServices { +// NewFakeAWSServices creates a new FakeAWSServices +func NewFakeAWSServices(clusterID string) *FakeAWSServices { s := &FakeAWSServices{} s.region = "us-east-1" s.ec2 = &FakeEC2Impl{aws: s} @@ -71,12 +72,13 @@ func NewFakeAWSServices(clusterId string) *FakeAWSServices { var tag ec2.Tag tag.Key = aws.String(TagNameKubernetesClusterLegacy) - tag.Value = aws.String(clusterId) + tag.Value = aws.String(clusterID) selfInstance.Tags = []*ec2.Tag{&tag} return s } +// WithAz sets the ec2 placement availability zone func (s *FakeAWSServices) WithAz(az string) *FakeAWSServices { if s.selfInstance.Placement == nil { s.selfInstance.Placement = &ec2.Placement{} @@ -85,30 +87,37 @@ func (s *FakeAWSServices) WithAz(az string) *FakeAWSServices { return s } +// Compute returns a fake EC2 client func (s *FakeAWSServices) Compute(region string) (EC2, error) { return s.ec2, nil } +// LoadBalancing returns a fake ELB client func (s *FakeAWSServices) LoadBalancing(region string) (ELB, error) { return s.elb, nil } +// LoadBalancingV2 returns a fake ELBV2 client func (s *FakeAWSServices) LoadBalancingV2(region string) (ELBV2, error) { return s.elbv2, nil } +// Autoscaling returns a fake ASG client func (s *FakeAWSServices) Autoscaling(region string) (ASG, error) { return s.asg, nil } +// Metadata returns a fake EC2Metadata client func (s *FakeAWSServices) Metadata() (EC2Metadata, error) { return s.metadata, nil } +// KeyManagement returns a fake KMS client func (s *FakeAWSServices) KeyManagement(region string) (KMS, error) { return s.kms, nil } +// FakeEC2 is a fake EC2 client used for testing type FakeEC2 interface { EC2 CreateSubnet(*ec2.Subnet) (*ec2.CreateSubnetOutput, error) @@ -117,6 +126,7 @@ type FakeEC2 interface { RemoveRouteTables() } +// FakeEC2Impl is an implementation of the FakeEC2 interface used for testing type FakeEC2Impl struct { aws *FakeAWSServices Subnets []*ec2.Subnet @@ -125,12 +135,13 @@ type FakeEC2Impl struct { DescribeRouteTablesInput *ec2.DescribeRouteTablesInput } +// DescribeInstances returns fake instance descriptions func (ec2i *FakeEC2Impl) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error) { matches := []*ec2.Instance{} for _, instance := range ec2i.aws.instances { if request.InstanceIds != nil { if instance.InstanceId == nil { - glog.Warning("Instance with no instance id: ", instance) + klog.Warning("Instance with no instance id: ", instance) continue } @@ -163,54 +174,73 @@ func (ec2i *FakeEC2Impl) DescribeInstances(request *ec2.DescribeInstancesInput) return matches, nil } +// AttachVolume is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) AttachVolume(request *ec2.AttachVolumeInput) (resp *ec2.VolumeAttachment, err error) { panic("Not implemented") } +// DetachVolume is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) DetachVolume(request *ec2.DetachVolumeInput) (resp *ec2.VolumeAttachment, err error) { panic("Not implemented") } +// DescribeVolumes is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.Volume, error) { panic("Not implemented") } +// CreateVolume is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) CreateVolume(request *ec2.CreateVolumeInput) (resp *ec2.Volume, err error) { panic("Not implemented") } +// DeleteVolume is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) DeleteVolume(request *ec2.DeleteVolumeInput) (resp *ec2.DeleteVolumeOutput, err error) { panic("Not implemented") } +// DescribeSecurityGroups is not implemented but is required for interface +// conformance func (ec2i *FakeEC2Impl) DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error) { panic("Not implemented") } +// CreateSecurityGroup is not implemented but is required for interface +// conformance func (ec2i *FakeEC2Impl) CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) { panic("Not implemented") } +// DeleteSecurityGroup is not implemented but is required for interface +// conformance func (ec2i *FakeEC2Impl) DeleteSecurityGroup(*ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error) { panic("Not implemented") } +// AuthorizeSecurityGroupIngress is not implemented but is required for +// interface conformance func (ec2i *FakeEC2Impl) AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error) { panic("Not implemented") } +// RevokeSecurityGroupIngress is not implemented but is required for interface +// conformance func (ec2i *FakeEC2Impl) RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error) { panic("Not implemented") } +// DescribeVolumeModifications is not implemented but is required for interface +// conformance func (ec2i *FakeEC2Impl) DescribeVolumeModifications(*ec2.DescribeVolumesModificationsInput) ([]*ec2.VolumeModification, error) { panic("Not implemented") } +// ModifyVolume is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) ModifyVolume(*ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error) { panic("Not implemented") } +// CreateSubnet creates fake subnets func (ec2i *FakeEC2Impl) CreateSubnet(request *ec2.Subnet) (*ec2.CreateSubnetOutput, error) { ec2i.Subnets = append(ec2i.Subnets, request) response := &ec2.CreateSubnetOutput{ @@ -219,24 +249,29 @@ func (ec2i *FakeEC2Impl) CreateSubnet(request *ec2.Subnet) (*ec2.CreateSubnetOut return response, nil } +// DescribeSubnets returns fake subnet descriptions func (ec2i *FakeEC2Impl) DescribeSubnets(request *ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error) { ec2i.DescribeSubnetsInput = request return ec2i.Subnets, nil } +// RemoveSubnets clears subnets on client func (ec2i *FakeEC2Impl) RemoveSubnets() { ec2i.Subnets = ec2i.Subnets[:0] } +// CreateTags is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) { panic("Not implemented") } +// DescribeRouteTables returns fake route table descriptions func (ec2i *FakeEC2Impl) DescribeRouteTables(request *ec2.DescribeRouteTablesInput) ([]*ec2.RouteTable, error) { ec2i.DescribeRouteTablesInput = request return ec2i.RouteTables, nil } +// CreateRouteTable creates fake route tables func (ec2i *FakeEC2Impl) CreateRouteTable(request *ec2.RouteTable) (*ec2.CreateRouteTableOutput, error) { ec2i.RouteTables = append(ec2i.RouteTables, request) response := &ec2.CreateRouteTableOutput{ @@ -245,30 +280,38 @@ func (ec2i *FakeEC2Impl) CreateRouteTable(request *ec2.RouteTable) (*ec2.CreateR return response, nil } +// RemoveRouteTables clears route tables on client func (ec2i *FakeEC2Impl) RemoveRouteTables() { ec2i.RouteTables = ec2i.RouteTables[:0] } +// CreateRoute is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) CreateRoute(request *ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error) { panic("Not implemented") } +// DeleteRoute is not implemented but is required for interface conformance func (ec2i *FakeEC2Impl) DeleteRoute(request *ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error) { panic("Not implemented") } +// ModifyInstanceAttribute is not implemented but is required for interface +// conformance func (ec2i *FakeEC2Impl) ModifyInstanceAttribute(request *ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) { panic("Not implemented") } +// DescribeVpcs returns fake VPC descriptions func (ec2i *FakeEC2Impl) DescribeVpcs(request *ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error) { return &ec2.DescribeVpcsOutput{Vpcs: []*ec2.Vpc{{CidrBlock: aws.String("172.20.0.0/16")}}}, nil } +// FakeMetadata is a fake EC2 metadata service client used for testing type FakeMetadata struct { aws *FakeAWSServices } +// GetMetadata returns fake EC2 metadata for testing func (m *FakeMetadata) GetMetadata(key string) (string, error) { networkInterfacesPrefix := "network/interfaces/macs/" i := m.aws.selfInstance @@ -291,199 +334,292 @@ func (m *FakeMetadata) GetMetadata(key string) (string, error) { } else if strings.HasPrefix(key, networkInterfacesPrefix) { if key == networkInterfacesPrefix { return strings.Join(m.aws.networkInterfacesMacs, "/\n") + "/\n", nil - } else { - keySplit := strings.Split(key, "/") - macParam := keySplit[3] - if len(keySplit) == 5 && keySplit[4] == "vpc-id" { - for i, macElem := range m.aws.networkInterfacesMacs { - if macParam == macElem { - return m.aws.networkInterfacesVpcIDs[i], nil - } + } + + keySplit := strings.Split(key, "/") + macParam := keySplit[3] + if len(keySplit) == 5 && keySplit[4] == "vpc-id" { + for i, macElem := range m.aws.networkInterfacesMacs { + if macParam == macElem { + return m.aws.networkInterfacesVpcIDs[i], nil } } - if len(keySplit) == 5 && keySplit[4] == "local-ipv4s" { - for i, macElem := range m.aws.networkInterfacesMacs { - if macParam == macElem { - return strings.Join(m.aws.networkInterfacesPrivateIPs[i], "/\n"), nil - } + } + if len(keySplit) == 5 && keySplit[4] == "local-ipv4s" { + for i, macElem := range m.aws.networkInterfacesMacs { + if macParam == macElem { + return strings.Join(m.aws.networkInterfacesPrivateIPs[i], "/\n"), nil } } - return "", nil } + + return "", nil } else { return "", nil } } +// FakeELB is a fake ELB client used for testing type FakeELB struct { aws *FakeAWSServices } +// CreateLoadBalancer is not implemented but is required for interface +// conformance func (elb *FakeELB) CreateLoadBalancer(*elb.CreateLoadBalancerInput) (*elb.CreateLoadBalancerOutput, error) { panic("Not implemented") } +// DeleteLoadBalancer is not implemented but is required for interface +// conformance func (elb *FakeELB) DeleteLoadBalancer(input *elb.DeleteLoadBalancerInput) (*elb.DeleteLoadBalancerOutput, error) { panic("Not implemented") } +// DescribeLoadBalancers is not implemented but is required for interface +// conformance func (elb *FakeELB) DescribeLoadBalancers(input *elb.DescribeLoadBalancersInput) (*elb.DescribeLoadBalancersOutput, error) { panic("Not implemented") } +// AddTags is not implemented but is required for interface conformance func (elb *FakeELB) AddTags(input *elb.AddTagsInput) (*elb.AddTagsOutput, error) { panic("Not implemented") } +// RegisterInstancesWithLoadBalancer is not implemented but is required for +// interface conformance func (elb *FakeELB) RegisterInstancesWithLoadBalancer(*elb.RegisterInstancesWithLoadBalancerInput) (*elb.RegisterInstancesWithLoadBalancerOutput, error) { panic("Not implemented") } +// DeregisterInstancesFromLoadBalancer is not implemented but is required for +// interface conformance func (elb *FakeELB) DeregisterInstancesFromLoadBalancer(*elb.DeregisterInstancesFromLoadBalancerInput) (*elb.DeregisterInstancesFromLoadBalancerOutput, error) { panic("Not implemented") } +// DetachLoadBalancerFromSubnets is not implemented but is required for +// interface conformance func (elb *FakeELB) DetachLoadBalancerFromSubnets(*elb.DetachLoadBalancerFromSubnetsInput) (*elb.DetachLoadBalancerFromSubnetsOutput, error) { panic("Not implemented") } +// AttachLoadBalancerToSubnets is not implemented but is required for interface +// conformance func (elb *FakeELB) AttachLoadBalancerToSubnets(*elb.AttachLoadBalancerToSubnetsInput) (*elb.AttachLoadBalancerToSubnetsOutput, error) { panic("Not implemented") } +// CreateLoadBalancerListeners is not implemented but is required for interface +// conformance func (elb *FakeELB) CreateLoadBalancerListeners(*elb.CreateLoadBalancerListenersInput) (*elb.CreateLoadBalancerListenersOutput, error) { panic("Not implemented") } +// DeleteLoadBalancerListeners is not implemented but is required for interface +// conformance func (elb *FakeELB) DeleteLoadBalancerListeners(*elb.DeleteLoadBalancerListenersInput) (*elb.DeleteLoadBalancerListenersOutput, error) { panic("Not implemented") } +// ApplySecurityGroupsToLoadBalancer is not implemented but is required for +// interface conformance func (elb *FakeELB) ApplySecurityGroupsToLoadBalancer(*elb.ApplySecurityGroupsToLoadBalancerInput) (*elb.ApplySecurityGroupsToLoadBalancerOutput, error) { panic("Not implemented") } +// ConfigureHealthCheck is not implemented but is required for interface +// conformance func (elb *FakeELB) ConfigureHealthCheck(*elb.ConfigureHealthCheckInput) (*elb.ConfigureHealthCheckOutput, error) { panic("Not implemented") } +// CreateLoadBalancerPolicy is not implemented but is required for interface +// conformance func (elb *FakeELB) CreateLoadBalancerPolicy(*elb.CreateLoadBalancerPolicyInput) (*elb.CreateLoadBalancerPolicyOutput, error) { panic("Not implemented") } +// SetLoadBalancerPoliciesForBackendServer is not implemented but is required +// for interface conformance func (elb *FakeELB) SetLoadBalancerPoliciesForBackendServer(*elb.SetLoadBalancerPoliciesForBackendServerInput) (*elb.SetLoadBalancerPoliciesForBackendServerOutput, error) { panic("Not implemented") } +// SetLoadBalancerPoliciesOfListener is not implemented but is required for +// interface conformance func (elb *FakeELB) SetLoadBalancerPoliciesOfListener(input *elb.SetLoadBalancerPoliciesOfListenerInput) (*elb.SetLoadBalancerPoliciesOfListenerOutput, error) { panic("Not implemented") } +// DescribeLoadBalancerPolicies is not implemented but is required for +// interface conformance func (elb *FakeELB) DescribeLoadBalancerPolicies(input *elb.DescribeLoadBalancerPoliciesInput) (*elb.DescribeLoadBalancerPoliciesOutput, error) { panic("Not implemented") } +// DescribeLoadBalancerAttributes is not implemented but is required for +// interface conformance func (elb *FakeELB) DescribeLoadBalancerAttributes(*elb.DescribeLoadBalancerAttributesInput) (*elb.DescribeLoadBalancerAttributesOutput, error) { panic("Not implemented") } +// ModifyLoadBalancerAttributes is not implemented but is required for +// interface conformance func (elb *FakeELB) ModifyLoadBalancerAttributes(*elb.ModifyLoadBalancerAttributesInput) (*elb.ModifyLoadBalancerAttributesOutput, error) { panic("Not implemented") } -func (self *FakeELB) expectDescribeLoadBalancers(loadBalancerName string) { +// expectDescribeLoadBalancers is not implemented but is required for interface +// conformance +func (elb *FakeELB) expectDescribeLoadBalancers(loadBalancerName string) { panic("Not implemented") } +// FakeELBV2 is a fake ELBV2 client used for testing type FakeELBV2 struct { aws *FakeAWSServices } -func (self *FakeELBV2) AddTags(input *elbv2.AddTagsInput) (*elbv2.AddTagsOutput, error) { +// AddTags is not implemented but is required for interface conformance +func (elb *FakeELBV2) AddTags(input *elbv2.AddTagsInput) (*elbv2.AddTagsOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) CreateLoadBalancer(*elbv2.CreateLoadBalancerInput) (*elbv2.CreateLoadBalancerOutput, error) { +// CreateLoadBalancer is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) CreateLoadBalancer(*elbv2.CreateLoadBalancerInput) (*elbv2.CreateLoadBalancerOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DescribeLoadBalancers(*elbv2.DescribeLoadBalancersInput) (*elbv2.DescribeLoadBalancersOutput, error) { + +// DescribeLoadBalancers is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) DescribeLoadBalancers(*elbv2.DescribeLoadBalancersInput) (*elbv2.DescribeLoadBalancersOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DeleteLoadBalancer(*elbv2.DeleteLoadBalancerInput) (*elbv2.DeleteLoadBalancerOutput, error) { + +// DeleteLoadBalancer is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) DeleteLoadBalancer(*elbv2.DeleteLoadBalancerInput) (*elbv2.DeleteLoadBalancerOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) ModifyLoadBalancerAttributes(*elbv2.ModifyLoadBalancerAttributesInput) (*elbv2.ModifyLoadBalancerAttributesOutput, error) { +// ModifyLoadBalancerAttributes is not implemented but is required for +// interface conformance +func (elb *FakeELBV2) ModifyLoadBalancerAttributes(*elbv2.ModifyLoadBalancerAttributesInput) (*elbv2.ModifyLoadBalancerAttributesOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DescribeLoadBalancerAttributes(*elbv2.DescribeLoadBalancerAttributesInput) (*elbv2.DescribeLoadBalancerAttributesOutput, error) { + +// DescribeLoadBalancerAttributes is not implemented but is required for +// interface conformance +func (elb *FakeELBV2) DescribeLoadBalancerAttributes(*elbv2.DescribeLoadBalancerAttributesInput) (*elbv2.DescribeLoadBalancerAttributesOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) CreateTargetGroup(*elbv2.CreateTargetGroupInput) (*elbv2.CreateTargetGroupOutput, error) { +// CreateTargetGroup is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) CreateTargetGroup(*elbv2.CreateTargetGroupInput) (*elbv2.CreateTargetGroupOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DescribeTargetGroups(*elbv2.DescribeTargetGroupsInput) (*elbv2.DescribeTargetGroupsOutput, error) { + +// DescribeTargetGroups is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) DescribeTargetGroups(*elbv2.DescribeTargetGroupsInput) (*elbv2.DescribeTargetGroupsOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) ModifyTargetGroup(*elbv2.ModifyTargetGroupInput) (*elbv2.ModifyTargetGroupOutput, error) { + +// ModifyTargetGroup is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) ModifyTargetGroup(*elbv2.ModifyTargetGroupInput) (*elbv2.ModifyTargetGroupOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DeleteTargetGroup(*elbv2.DeleteTargetGroupInput) (*elbv2.DeleteTargetGroupOutput, error) { + +// DeleteTargetGroup is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) DeleteTargetGroup(*elbv2.DeleteTargetGroupInput) (*elbv2.DeleteTargetGroupOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DescribeTargetHealth(input *elbv2.DescribeTargetHealthInput) (*elbv2.DescribeTargetHealthOutput, error) { +// DescribeTargetHealth is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) DescribeTargetHealth(input *elbv2.DescribeTargetHealthInput) (*elbv2.DescribeTargetHealthOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DescribeTargetGroupAttributes(*elbv2.DescribeTargetGroupAttributesInput) (*elbv2.DescribeTargetGroupAttributesOutput, error) { +// DescribeTargetGroupAttributes is not implemented but is required for +// interface conformance +func (elb *FakeELBV2) DescribeTargetGroupAttributes(*elbv2.DescribeTargetGroupAttributesInput) (*elbv2.DescribeTargetGroupAttributesOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) ModifyTargetGroupAttributes(*elbv2.ModifyTargetGroupAttributesInput) (*elbv2.ModifyTargetGroupAttributesOutput, error) { + +// ModifyTargetGroupAttributes is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) ModifyTargetGroupAttributes(*elbv2.ModifyTargetGroupAttributesInput) (*elbv2.ModifyTargetGroupAttributesOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) RegisterTargets(*elbv2.RegisterTargetsInput) (*elbv2.RegisterTargetsOutput, error) { +// RegisterTargets is not implemented but is required for interface conformance +func (elb *FakeELBV2) RegisterTargets(*elbv2.RegisterTargetsInput) (*elbv2.RegisterTargetsOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DeregisterTargets(*elbv2.DeregisterTargetsInput) (*elbv2.DeregisterTargetsOutput, error) { + +// DeregisterTargets is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) DeregisterTargets(*elbv2.DeregisterTargetsInput) (*elbv2.DeregisterTargetsOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) CreateListener(*elbv2.CreateListenerInput) (*elbv2.CreateListenerOutput, error) { +// CreateListener is not implemented but is required for interface conformance +func (elb *FakeELBV2) CreateListener(*elbv2.CreateListenerInput) (*elbv2.CreateListenerOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DescribeListeners(*elbv2.DescribeListenersInput) (*elbv2.DescribeListenersOutput, error) { + +// DescribeListeners is not implemented but is required for interface +// conformance +func (elb *FakeELBV2) DescribeListeners(*elbv2.DescribeListenersInput) (*elbv2.DescribeListenersOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) DeleteListener(*elbv2.DeleteListenerInput) (*elbv2.DeleteListenerOutput, error) { + +// DeleteListener is not implemented but is required for interface conformance +func (elb *FakeELBV2) DeleteListener(*elbv2.DeleteListenerInput) (*elbv2.DeleteListenerOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) ModifyListener(*elbv2.ModifyListenerInput) (*elbv2.ModifyListenerOutput, error) { + +// ModifyListener is not implemented but is required for interface conformance +func (elb *FakeELBV2) ModifyListener(*elbv2.ModifyListenerInput) (*elbv2.ModifyListenerOutput, error) { panic("Not implemented") } -func (self *FakeELBV2) WaitUntilLoadBalancersDeleted(*elbv2.DescribeLoadBalancersInput) error { +// WaitUntilLoadBalancersDeleted is not implemented but is required for +// interface conformance +func (elb *FakeELBV2) WaitUntilLoadBalancersDeleted(*elbv2.DescribeLoadBalancersInput) error { panic("Not implemented") } +// FakeASG is a fake Autoscaling client used for testing type FakeASG struct { aws *FakeAWSServices } +// UpdateAutoScalingGroup is not implemented but is required for interface +// conformance func (a *FakeASG) UpdateAutoScalingGroup(*autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error) { panic("Not implemented") } +// DescribeAutoScalingGroups is not implemented but is required for interface +// conformance func (a *FakeASG) DescribeAutoScalingGroups(*autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) { panic("Not implemented") } +// FakeKMS is a fake KMS client used for testing type FakeKMS struct { aws *FakeAWSServices } +// DescribeKey is not implemented but is required for interface conformance func (kms *FakeKMS) DescribeKey(*kms.DescribeKeyInput) (*kms.DescribeKeyOutput, error) { panic("Not implemented") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go index 6bf63ae6fe66..6c4c59b039e5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_instancegroups.go @@ -21,7 +21,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/golang/glog" + "k8s.io/klog" ) // AWSCloud implements InstanceGroups @@ -42,7 +42,7 @@ func ResizeInstanceGroup(asg ASG, instanceGroupName string, size int) error { return nil } -// Implement InstanceGroups.ResizeInstanceGroup +// ResizeInstanceGroup implements InstanceGroups.ResizeInstanceGroup // Set the size to the fixed size func (c *Cloud) ResizeInstanceGroup(instanceGroupName string, size int) error { return ResizeInstanceGroup(c.asg, instanceGroupName, size) @@ -64,13 +64,13 @@ func DescribeInstanceGroup(asg ASG, instanceGroupName string) (InstanceGroupInfo return nil, nil } if len(response.AutoScalingGroups) > 1 { - glog.Warning("AWS returned multiple autoscaling groups with name ", instanceGroupName) + klog.Warning("AWS returned multiple autoscaling groups with name ", instanceGroupName) } group := response.AutoScalingGroups[0] return &awsInstanceGroup{group: group}, nil } -// Implement InstanceGroups.DescribeInstanceGroup +// DescribeInstanceGroup implements InstanceGroups.DescribeInstanceGroup // Queries the cloud provider for information about the specified instance group func (c *Cloud) DescribeInstanceGroup(instanceGroupName string) (InstanceGroupInfo, error) { return DescribeInstanceGroup(c.asg, instanceGroupName) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go index 25583ca56976..0c2e75530675 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go @@ -28,15 +28,20 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" ) const ( + // ProxyProtocolPolicyName is the tag named used for the proxy protocol + // policy ProxyProtocolPolicyName = "k8s-proxyprotocol-enabled" + // SSLNegotiationPolicyNameFormat is a format string used for the SSL + // negotiation policy tag name SSLNegotiationPolicyNameFormat = "k8s-SSLNegotiationPolicy-%s" ) @@ -129,7 +134,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa }) } - glog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName) + klog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName) createResponse, err := c.elbv2.CreateLoadBalancer(createRequest) if err != nil { return nil, fmt.Errorf("Error creating load balancer: %q", err) @@ -335,7 +340,7 @@ func createTargetName(namespacedName types.NamespacedName, frontendPort, nodePor func (c *Cloud) createListenerV2(loadBalancerArn *string, mapping nlbPortMapping, namespacedName types.NamespacedName, instanceIDs []string, vpcID string) (listener *elbv2.Listener, targetGroupArn *string, err error) { targetName := createTargetName(namespacedName, mapping.FrontendPort, mapping.TrafficPort) - glog.Infof("Creating load balancer target group for %v with name: %s", namespacedName, targetName) + klog.Infof("Creating load balancer target group for %v with name: %s", namespacedName, targetName) target, err := c.ensureTargetGroup( nil, mapping, @@ -356,7 +361,7 @@ func (c *Cloud) createListenerV2(loadBalancerArn *string, mapping nlbPortMapping Type: aws.String(elbv2.ActionTypeEnumForward), }}, } - glog.Infof("Creating load balancer listener for %v", namespacedName) + klog.Infof("Creating load balancer listener for %v", namespacedName) createListenerOutput, err := c.elbv2.CreateListener(createListernerInput) if err != nil { return nil, aws.String(""), fmt.Errorf("Error creating load balancer listener: %q", err) @@ -612,7 +617,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se for _, actualGroup := range actualGroups { actualGroupID := aws.StringValue(actualGroup.GroupId) if actualGroupID == "" { - glog.Warning("Ignoring group without ID: ", actualGroup) + klog.Warning("Ignoring group without ID: ", actualGroup) continue } @@ -647,17 +652,17 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se for port, add := range portMap { if add { if clientTraffic { - glog.V(2).Infof("Adding rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) - glog.V(2).Infof("Adding rule for client traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Adding rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Adding rule for client traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) } else { - glog.V(2).Infof("Adding rule for health check traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Adding rule for health check traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) } } else { if clientTraffic { - glog.V(2).Infof("Removing rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) - glog.V(2).Infof("Removing rule for client traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Removing rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Removing rule for client traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID) } - glog.V(2).Infof("Removing rule for health check traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Removing rule for health check traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID) } if clientTraffic { @@ -712,7 +717,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se return err } if !changed { - glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } @@ -722,7 +727,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se return err } if !changed { - glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } @@ -745,12 +750,12 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se group, err := c.findSecurityGroup(instanceSecurityGroupID) if err != nil { - glog.Warningf("Error retrieving security group: %q", err) + klog.Warningf("Error retrieving security group: %q", err) return err } if group == nil { - glog.Warning("Security group not found: ", instanceSecurityGroupID) + klog.Warning("Security group not found: ", instanceSecurityGroupID) return nil } @@ -771,21 +776,21 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se // the icmp permission is missing changed, err := c.addSecurityGroupIngress(instanceSecurityGroupID, []*ec2.IpPermission{mtuPermission}) if err != nil { - glog.Warningf("Error adding MTU permission to security group: %q", err) + klog.Warningf("Error adding MTU permission to security group: %q", err) return err } if !changed { - glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } else if icmpExists && permCount == 0 { // there is no additional permissions, remove icmp changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, []*ec2.IpPermission{mtuPermission}) if err != nil { - glog.Warningf("Error removing MTU permission to security group: %q", err) + klog.Warningf("Error removing MTU permission to security group: %q", err) return err } if !changed { - glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } } @@ -864,13 +869,13 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLB(mappings []nlbPortMapping, in } if securityGroup == nil { - glog.Warningf("Ignoring instance without security group: %s", aws.StringValue(instance.InstanceId)) + klog.Warningf("Ignoring instance without security group: %s", aws.StringValue(instance.InstanceId)) continue } id := aws.StringValue(securityGroup.GroupId) if id == "" { - glog.Warningf("found security group without id: %v", securityGroup) + klog.Warningf("found security group without id: %v", securityGroup) continue } @@ -937,7 +942,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala }) } - glog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName) + klog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName) _, err := c.elb.CreateLoadBalancer(createRequest) if err != nil { return nil, err @@ -950,7 +955,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala } for _, listener := range listeners { - glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to true", *listener.InstancePort) + klog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to true", *listener.InstancePort) err := c.setBackendPolicies(loadBalancerName, *listener.InstancePort, []*string{aws.String(ProxyProtocolPolicyName)}) if err != nil { return nil, err @@ -974,7 +979,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala request := &elb.DetachLoadBalancerFromSubnetsInput{} request.LoadBalancerName = aws.String(loadBalancerName) request.Subnets = stringSetToPointers(removals) - glog.V(2).Info("Detaching load balancer from removed subnets") + klog.V(2).Info("Detaching load balancer from removed subnets") _, err := c.elb.DetachLoadBalancerFromSubnets(request) if err != nil { return nil, fmt.Errorf("error detaching AWS loadbalancer from subnets: %q", err) @@ -986,7 +991,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala request := &elb.AttachLoadBalancerToSubnetsInput{} request.LoadBalancerName = aws.String(loadBalancerName) request.Subnets = stringSetToPointers(additions) - glog.V(2).Info("Attaching load balancer to added subnets") + klog.V(2).Info("Attaching load balancer to added subnets") _, err := c.elb.AttachLoadBalancerToSubnets(request) if err != nil { return nil, fmt.Errorf("error attaching AWS loadbalancer to subnets: %q", err) @@ -1009,7 +1014,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala } else { request.SecurityGroups = aws.StringSlice(securityGroupIDs) } - glog.V(2).Info("Applying updated security groups to load balancer") + klog.V(2).Info("Applying updated security groups to load balancer") _, err := c.elb.ApplySecurityGroupsToLoadBalancer(request) if err != nil { return nil, fmt.Errorf("error applying AWS loadbalancer security groups: %q", err) @@ -1027,7 +1032,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala for _, listenerDescription := range listenerDescriptions { actual := listenerDescription.Listener if actual == nil { - glog.Warning("Ignoring empty listener in AWS loadbalancer: ", loadBalancerName) + klog.Warning("Ignoring empty listener in AWS loadbalancer: ", loadBalancerName) continue } @@ -1069,7 +1074,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala request := &elb.DeleteLoadBalancerListenersInput{} request.LoadBalancerName = aws.String(loadBalancerName) request.LoadBalancerPorts = removals - glog.V(2).Info("Deleting removed load balancer listeners") + klog.V(2).Info("Deleting removed load balancer listeners") _, err := c.elb.DeleteLoadBalancerListeners(request) if err != nil { return nil, fmt.Errorf("error deleting AWS loadbalancer listeners: %q", err) @@ -1081,7 +1086,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala request := &elb.CreateLoadBalancerListenersInput{} request.LoadBalancerName = aws.String(loadBalancerName) request.Listeners = additions - glog.V(2).Info("Creating added load balancer listeners") + klog.V(2).Info("Creating added load balancer listeners") _, err := c.elb.CreateLoadBalancerListeners(request) if err != nil { return nil, fmt.Errorf("error creating AWS loadbalancer listeners: %q", err) @@ -1133,7 +1138,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala } if setPolicy { - glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to %t", instancePort, proxyProtocol) + klog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to %t", instancePort, proxyProtocol) err := c.setBackendPolicies(loadBalancerName, instancePort, proxyPolicies) if err != nil { return nil, err @@ -1147,7 +1152,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala // corresponding listener anymore for instancePort, found := range foundBackends { if !found { - glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to false", instancePort) + klog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to false", instancePort) err := c.setBackendPolicies(loadBalancerName, instancePort, []*string{}) if err != nil { return nil, err @@ -1159,7 +1164,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala { // Add additional tags - glog.V(2).Infof("Creating additional load balancer tags for %s", loadBalancerName) + klog.V(2).Infof("Creating additional load balancer tags for %s", loadBalancerName) tags := getLoadBalancerAdditionalTags(annotations) if len(tags) > 0 { err := c.addLoadBalancerTags(loadBalancerName, tags) @@ -1178,7 +1183,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala describeAttributesRequest.LoadBalancerName = aws.String(loadBalancerName) describeAttributesOutput, err := c.elb.DescribeLoadBalancerAttributes(describeAttributesRequest) if err != nil { - glog.Warning("Unable to retrieve load balancer attributes during attribute sync") + klog.Warning("Unable to retrieve load balancer attributes during attribute sync") return nil, err } @@ -1186,7 +1191,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala // Update attributes if they're dirty if !reflect.DeepEqual(loadBalancerAttributes, foundAttributes) { - glog.V(2).Infof("Updating load-balancer attributes for %q", loadBalancerName) + klog.V(2).Infof("Updating load-balancer attributes for %q", loadBalancerName) modifyAttributesRequest := &elb.ModifyLoadBalancerAttributesInput{} modifyAttributesRequest.LoadBalancerName = aws.String(loadBalancerName) @@ -1202,7 +1207,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala if dirty { loadBalancer, err = c.describeLoadBalancer(loadBalancerName) if err != nil { - glog.Warning("Unable to retrieve load balancer after creation/update") + klog.Warning("Unable to retrieve load balancer after creation/update") return nil, err } } @@ -1326,16 +1331,16 @@ func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances removals := actual.Difference(expected) addInstances := []*elb.Instance{} - for _, instanceId := range additions.List() { + for _, instanceID := range additions.List() { addInstance := &elb.Instance{} - addInstance.InstanceId = aws.String(instanceId) + addInstance.InstanceId = aws.String(instanceID) addInstances = append(addInstances, addInstance) } removeInstances := []*elb.Instance{} - for _, instanceId := range removals.List() { + for _, instanceID := range removals.List() { removeInstance := &elb.Instance{} - removeInstance.InstanceId = aws.String(instanceId) + removeInstance.InstanceId = aws.String(instanceID) removeInstances = append(removeInstances, removeInstance) } @@ -1347,7 +1352,7 @@ func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances if err != nil { return err } - glog.V(1).Infof("Instances added to load-balancer %s", loadBalancerName) + klog.V(1).Infof("Instances added to load-balancer %s", loadBalancerName) } if len(removeInstances) > 0 { @@ -1358,7 +1363,7 @@ func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances if err != nil { return err } - glog.V(1).Infof("Instances removed from load-balancer %s", loadBalancerName) + klog.V(1).Infof("Instances removed from load-balancer %s", loadBalancerName) } return nil @@ -1377,7 +1382,7 @@ func (c *Cloud) getLoadBalancerTLSPorts(loadBalancer *elb.LoadBalancerDescriptio } func (c *Cloud) ensureSSLNegotiationPolicy(loadBalancer *elb.LoadBalancerDescription, policyName string) error { - glog.V(2).Info("Describing load balancer policies on load balancer") + klog.V(2).Info("Describing load balancer policies on load balancer") result, err := c.elb.DescribeLoadBalancerPolicies(&elb.DescribeLoadBalancerPoliciesInput{ LoadBalancerName: loadBalancer.LoadBalancerName, PolicyNames: []*string{ @@ -1398,7 +1403,7 @@ func (c *Cloud) ensureSSLNegotiationPolicy(loadBalancer *elb.LoadBalancerDescrip return nil } - glog.V(2).Infof("Creating SSL negotiation policy '%s' on load balancer", fmt.Sprintf(SSLNegotiationPolicyNameFormat, policyName)) + klog.V(2).Infof("Creating SSL negotiation policy '%s' on load balancer", fmt.Sprintf(SSLNegotiationPolicyNameFormat, policyName)) // there is an upper limit of 98 policies on an ELB, we're pretty safe from // running into it _, err = c.elb.CreateLoadBalancerPolicy(&elb.CreateLoadBalancerPolicyInput{ @@ -1427,7 +1432,7 @@ func (c *Cloud) setSSLNegotiationPolicy(loadBalancerName, sslPolicyName string, aws.String(policyName), }, } - glog.V(2).Infof("Setting SSL negotiation policy '%s' on load balancer", policyName) + klog.V(2).Infof("Setting SSL negotiation policy '%s' on load balancer", policyName) _, err := c.elb.SetLoadBalancerPoliciesOfListener(request) if err != nil { return fmt.Errorf("error setting SSL negotiation policy '%s' on load balancer: %q", policyName, err) @@ -1447,7 +1452,7 @@ func (c *Cloud) createProxyProtocolPolicy(loadBalancerName string) error { }, }, } - glog.V(2).Info("Creating proxy protocol policy on load balancer") + klog.V(2).Info("Creating proxy protocol policy on load balancer") _, err := c.elb.CreateLoadBalancerPolicy(request) if err != nil { return fmt.Errorf("error creating proxy protocol policy on load balancer: %q", err) @@ -1463,9 +1468,9 @@ func (c *Cloud) setBackendPolicies(loadBalancerName string, instancePort int64, PolicyNames: policies, } if len(policies) > 0 { - glog.V(2).Infof("Adding AWS loadbalancer backend policies on node port %d", instancePort) + klog.V(2).Infof("Adding AWS loadbalancer backend policies on node port %d", instancePort) } else { - glog.V(2).Infof("Removing AWS loadbalancer backend policies on node port %d", instancePort) + klog.V(2).Infof("Removing AWS loadbalancer backend policies on node port %d", instancePort) } _, err := c.elb.SetLoadBalancerPoliciesForBackendServer(request) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go index 3ef0ddce575f..2827596dce49 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_routes.go @@ -22,8 +22,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/klog" + + cloudprovider "k8s.io/cloud-provider" ) func (c *Cloud) findRouteTable(clusterName string) (*ec2.RouteTable, error) { @@ -116,7 +117,7 @@ func (c *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudpro route.TargetNode = mapInstanceToNodeName(instance) routes = append(routes, route) } else { - glog.Warningf("unable to find instance ID %s in the list of instances being routed to", instanceID) + klog.Warningf("unable to find instance ID %s in the list of instances being routed to", instanceID) } } } @@ -171,7 +172,7 @@ func (c *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint st } if deleteRoute != nil { - glog.Infof("deleting blackholed route: %s", aws.StringValue(deleteRoute.DestinationCidrBlock)) + klog.Infof("deleting blackholed route: %s", aws.StringValue(deleteRoute.DestinationCidrBlock)) request := &ec2.DeleteRouteInput{} request.DestinationCidrBlock = deleteRoute.DestinationCidrBlock diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_utils.go index b56699f854b7..bd373d64e588 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_utils.go @@ -18,6 +18,7 @@ package aws import ( "github.com/aws/aws-sdk-go/aws" + "k8s.io/apimachinery/pkg/util/sets" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator.go index 490b673bb189..ae5b0275773a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/device_allocator.go @@ -27,18 +27,20 @@ import ( // can be used for anything that DeviceAllocator user wants. // Only the relevant part of device name should be in the map, e.g. "ba" for // "/dev/xvdba". -type ExistingDevices map[mountDevice]awsVolumeID +type ExistingDevices map[mountDevice]EBSVolumeID -// On AWS, we should assign new (not yet used) device names to attached volumes. -// If we reuse a previously used name, we may get the volume "attaching" forever, -// see https://aws.amazon.com/premiumsupport/knowledge-center/ebs-stuck-attaching/. // DeviceAllocator finds available device name, taking into account already // assigned device names from ExistingDevices map. It tries to find the next // device name to the previously assigned one (from previous DeviceAllocator // call), so all available device names are used eventually and it minimizes // device name reuse. +// // All these allocations are in-memory, nothing is written to / read from // /dev directory. +// +// On AWS, we should assign new (not yet used) device names to attached volumes. +// If we reuse a previously used name, we may get the volume "attaching" forever, +// see https://aws.amazon.com/premiumsupport/knowledge-center/ebs-stuck-attaching/. type DeviceAllocator interface { // GetNext returns a free device name or error when there is no free device // name. Only the device suffix is returned, e.g. "ba" for "/dev/xvdba". @@ -74,9 +76,9 @@ func (p devicePairList) Len() int { return len(p) } func (p devicePairList) Less(i, j int) bool { return p[i].deviceIndex < p[j].deviceIndex } func (p devicePairList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -// Allocates device names according to scheme ba..bz, ca..cz -// it moves along the ring and always picks next device until -// device list is exhausted. +// NewDeviceAllocator allocates device names according to scheme ba..bz, ca..cz +// it moves along the ring and always picks next device until device list is +// exhausted. func NewDeviceAllocator() DeviceAllocator { possibleDevices := make(map[mountDevice]int) for _, firstChar := range []rune{'b', 'c'} { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances.go index a42834415dd0..60b10abc6a81 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances.go @@ -19,15 +19,16 @@ package aws import ( "fmt" "net/url" + "regexp" "strings" + "sync" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/api/core/v1" - "regexp" - "sync" - "time" ) // awsInstanceRegMatch represents Regex Match for AWS instance. @@ -109,12 +110,12 @@ func mapToAWSInstanceIDsTolerant(nodes []*v1.Node) []awsInstanceID { var instanceIDs []awsInstanceID for _, node := range nodes { if node.Spec.ProviderID == "" { - glog.Warningf("node %q did not have ProviderID set", node.Name) + klog.Warningf("node %q did not have ProviderID set", node.Name) continue } instanceID, err := kubernetesInstanceID(node.Spec.ProviderID).mapToAWSInstanceID() if err != nil { - glog.Warningf("unable to parse ProviderID %q for node %q", node.Spec.ProviderID, node.Name) + klog.Warningf("unable to parse ProviderID %q for node %q", node.Spec.ProviderID, node.Name) continue } instanceIDs = append(instanceIDs, instanceID) @@ -155,7 +156,7 @@ type instanceCache struct { func (c *instanceCache) describeAllInstancesUncached() (*allInstancesSnapshot, error) { now := time.Now() - glog.V(4).Infof("EC2 DescribeInstances - fetching all instances") + klog.V(4).Infof("EC2 DescribeInstances - fetching all instances") filters := []*ec2.Filter{} instances, err := c.cloud.describeInstances(filters) @@ -176,7 +177,7 @@ func (c *instanceCache) describeAllInstancesUncached() (*allInstancesSnapshot, e if c.snapshot != nil && snapshot.olderThan(c.snapshot) { // If this happens a lot, we could run this function in a mutex and only return one result - glog.Infof("Not caching concurrent AWS DescribeInstances results") + klog.Infof("Not caching concurrent AWS DescribeInstances results") } else { c.snapshot = snapshot } @@ -209,7 +210,7 @@ func (c *instanceCache) describeAllInstancesCached(criteria cacheCriteria) (*all return nil, err } } else { - glog.V(6).Infof("EC2 DescribeInstances - using cached results") + klog.V(6).Infof("EC2 DescribeInstances - using cached results") } return snapshot, nil @@ -235,7 +236,7 @@ func (s *allInstancesSnapshot) MeetsCriteria(criteria cacheCriteria) bool { // Sub() is technically broken by time changes until we have monotonic time now := time.Now() if now.Sub(s.timestamp) > criteria.MaxAge { - glog.V(6).Infof("instanceCache snapshot cannot be used as is older than MaxAge=%s", criteria.MaxAge) + klog.V(6).Infof("instanceCache snapshot cannot be used as is older than MaxAge=%s", criteria.MaxAge) return false } } @@ -243,7 +244,7 @@ func (s *allInstancesSnapshot) MeetsCriteria(criteria cacheCriteria) bool { if len(criteria.HasInstances) != 0 { for _, id := range criteria.HasInstances { if nil == s.instances[id] { - glog.V(6).Infof("instanceCache snapshot cannot be used as does not contain instance %s", id) + klog.V(6).Infof("instanceCache snapshot cannot be used as does not contain instance %s", id) return false } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/log_handler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/log_handler.go index 86aa30628db1..9328fd284ac7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/log_handler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/log_handler.go @@ -18,23 +18,23 @@ package aws import ( "github.com/aws/aws-sdk-go/aws/request" - "github.com/golang/glog" + "k8s.io/klog" ) // Handler for aws-sdk-go that logs all requests func awsHandlerLogger(req *request.Request) { service, name := awsServiceAndName(req) - glog.V(4).Infof("AWS request: %s %s", service, name) + klog.V(4).Infof("AWS request: %s %s", service, name) } func awsSendHandlerLogger(req *request.Request) { service, name := awsServiceAndName(req) - glog.V(4).Infof("AWS API Send: %s %s %v %v", service, name, req.Operation, req.Params) + klog.V(4).Infof("AWS API Send: %s %s %v %v", service, name, req.Operation, req.Params) } func awsValidateResponseHandlerLogger(req *request.Request) { service, name := awsServiceAndName(req) - glog.V(4).Infof("AWS API ValidateResponse: %s %s %v %v %s", service, name, req.Operation, req.Params, req.HTTPResponse.Status) + klog.V(4).Infof("AWS API ValidateResponse: %s %s %v %v %s", service, name, req.Operation, req.Params, req.HTTPResponse.Status) } func awsServiceAndName(req *request.Request) (string, string) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/regions.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/regions.go index dc57447729b3..f19bab6eb550 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/regions.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/regions.go @@ -17,18 +17,21 @@ limitations under the License. package aws import ( - "github.com/golang/glog" + "sync" + + "k8s.io/klog" + "k8s.io/apimachinery/pkg/util/sets" awscredentialprovider "k8s.io/kubernetes/pkg/credentialprovider/aws" - "sync" ) -// WellKnownRegions is the complete list of regions known to the AWS cloudprovider +// wellKnownRegions is the complete list of regions known to the AWS cloudprovider // and credentialprovider. -var WellKnownRegions = [...]string{ +var wellKnownRegions = [...]string{ // from `aws ec2 describe-regions --region us-east-1 --query Regions[].RegionName | sort` "ap-northeast-1", "ap-northeast-2", + "ap-northeast-3", "ap-south-1", "ap-southeast-1", "ap-southeast-2", @@ -36,6 +39,7 @@ var WellKnownRegions = [...]string{ "eu-central-1", "eu-west-1", "eu-west-2", + "eu-west-3", "sa-east-1", "us-east-1", "us-east-2", @@ -44,6 +48,7 @@ var WellKnownRegions = [...]string{ // these are not registered in many / most accounts "cn-north-1", + "cn-northwest-1", "us-gov-west-1", } @@ -53,12 +58,12 @@ var awsRegionsMutex sync.Mutex // awsRegions is a set of recognized regions var awsRegions sets.String -// RecognizeRegion is called for each AWS region we know about. +// recognizeRegion is called for each AWS region we know about. // It currently registers a credential provider for that region. // There are two paths to discovering a region: // * we hard-code some well-known regions // * if a region is discovered from instance metadata, we add that -func RecognizeRegion(region string) { +func recognizeRegion(region string) { awsRegionsMutex.Lock() defer awsRegionsMutex.Unlock() @@ -67,21 +72,21 @@ func RecognizeRegion(region string) { } if awsRegions.Has(region) { - glog.V(6).Infof("found AWS region %q again - ignoring", region) + klog.V(6).Infof("found AWS region %q again - ignoring", region) return } - glog.V(4).Infof("found AWS region %q", region) + klog.V(4).Infof("found AWS region %q", region) awscredentialprovider.RegisterCredentialsProvider(region) awsRegions.Insert(region) } -// RecognizeWellKnownRegions calls RecognizeRegion on each WellKnownRegion -func RecognizeWellKnownRegions() { - for _, region := range WellKnownRegions { - RecognizeRegion(region) +// recognizeWellKnownRegions calls RecognizeRegion on each WellKnownRegion +func recognizeWellKnownRegions() { + for _, region := range wellKnownRegions { + recognizeRegion(region) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go index f403168e620c..0fe6c2a57534 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go @@ -24,7 +24,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -40,19 +40,19 @@ type CrossRequestRetryDelay struct { backoff Backoff } -// Create a new CrossRequestRetryDelay +// NewCrossRequestRetryDelay creates a new CrossRequestRetryDelay func NewCrossRequestRetryDelay() *CrossRequestRetryDelay { c := &CrossRequestRetryDelay{} c.backoff.init(decayIntervalSeconds, decayFraction, maxDelay) return c } -// Added to the Sign chain; called before each request +// BeforeSign is added to the Sign chain; called before each request func (c *CrossRequestRetryDelay) BeforeSign(r *request.Request) { now := time.Now() delay := c.backoff.ComputeDelayForRequest(now) if delay > 0 { - glog.Warningf("Inserting delay before AWS request (%s) to avoid RequestLimitExceeded: %s", + klog.Warningf("Inserting delay before AWS request (%s) to avoid RequestLimitExceeded: %s", describeRequest(r), delay.String()) if sleepFn := r.Config.SleepDelay; sleepFn != nil { @@ -84,7 +84,7 @@ func describeRequest(r *request.Request) string { return service + "::" + operationName(r) } -// Added to the AfterRetry chain; called after any error +// AfterRetry is added to the AfterRetry chain; called after any error func (c *CrossRequestRetryDelay) AfterRetry(r *request.Request) { if r.Error == nil { return @@ -96,7 +96,7 @@ func (c *CrossRequestRetryDelay) AfterRetry(r *request.Request) { if awsError.Code() == "RequestLimitExceeded" { c.backoff.ReportError() recordAWSThrottlesMetric(operationName(r)) - glog.Warningf("Got RequestLimitExceeded error on AWS request (%s)", + klog.Warningf("Got RequestLimitExceeded error on AWS request (%s)", describeRequest(r)) } } @@ -126,7 +126,8 @@ func (b *Backoff) init(decayIntervalSeconds int, decayFraction float64, maxDelay b.maxDelay = maxDelay } -// Computes the delay required for a request, also updating internal state to count this request +// ComputeDelayForRequest computes the delay required for a request, also +// updates internal state to count this request func (b *Backoff) ComputeDelayForRequest(now time.Time) time.Duration { b.mutex.Lock() defer b.mutex.Unlock() @@ -165,7 +166,7 @@ func (b *Backoff) ComputeDelayForRequest(now time.Time) time.Duration { return time.Second * time.Duration(int(delay.Seconds())) } -// Called when we observe a throttling error +// ReportError is called when we observe a throttling error func (b *Backoff) ReportError() { b.mutex.Lock() defer b.mutex.Unlock() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/sets_ippermissions.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/sets_ippermissions.go index 8a268c4b0e27..d71948f8d22d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/sets_ippermissions.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/sets_ippermissions.go @@ -23,8 +23,10 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" ) +// IPPermissionSet maps IP strings of strings to EC2 IpPermissions type IPPermissionSet map[string]*ec2.IpPermission +// NewIPPermissionSet creates a new IPPermissionSet func NewIPPermissionSet(items ...*ec2.IpPermission) IPPermissionSet { s := make(IPPermissionSet) s.Insert(items...) @@ -97,10 +99,10 @@ func (s IPPermissionSet) List() []*ec2.IpPermission { return res } -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 IPPermissionSet) IsSuperset(s2 IPPermissionSet) bool { +// IsSuperset returns true if and only if s is a superset of s2. +func (s IPPermissionSet) IsSuperset(s2 IPPermissionSet) bool { for k := range s2 { - _, found := s1[k] + _, found := s[k] if !found { return false } @@ -108,11 +110,11 @@ func (s1 IPPermissionSet) IsSuperset(s2 IPPermissionSet) bool { return true } -// Equal returns true if and only if s1 is equal (as a set) to s2. +// Equal returns true if and only if s is equal (as a set) to s2. // Two sets are equal if their membership is identical. // (In practice, this means same elements, order doesn't matter) -func (s1 IPPermissionSet) Equal(s2 IPPermissionSet) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) +func (s IPPermissionSet) Equal(s2 IPPermissionSet) bool { + return len(s) == len(s2) && s.IsSuperset(s2) } // Difference returns a set of objects that are not in s2 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags.go index 43130c3601f6..de6cad543e35 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/tags.go @@ -18,12 +18,12 @@ package aws import ( "fmt" - "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/apimachinery/pkg/util/wait" ) @@ -38,6 +38,7 @@ const TagNameKubernetesClusterPrefix = "kubernetes.io/cluster/" // did not allow shared resources. const TagNameKubernetesClusterLegacy = "KubernetesCluster" +// ResourceLifecycle is the cluster lifecycle state used in tagging type ResourceLifecycle string const ( @@ -73,7 +74,7 @@ func (t *awsTagging) init(legacyClusterID string, clusterID string) error { t.ClusterID = clusterID if clusterID != "" { - glog.Infof("AWS cloud filtering on ClusterID: %v", clusterID) + klog.Infof("AWS cloud filtering on ClusterID: %v", clusterID) } else { return fmt.Errorf("AWS cloud failed to find ClusterID") } @@ -91,7 +92,7 @@ func (t *awsTagging) initFromTags(tags []*ec2.Tag) error { } if legacyClusterID == "" && newClusterID == "" { - glog.Errorf("Tag %q nor %q not found; Kubernetes may behave unexpectedly.", TagNameKubernetesClusterLegacy, TagNameKubernetesClusterPrefix+"...") + klog.Errorf("Tag %q nor %q not found; Kubernetes may behave unexpectedly.", TagNameKubernetesClusterLegacy, TagNameKubernetesClusterPrefix+"...") } return t.init(legacyClusterID, newClusterID) @@ -152,13 +153,13 @@ func (t *awsTagging) hasClusterTag(tags []*ec2.Tag) bool { // Ensure that a resource has the correct tags // If it has no tags, we assume that this was a problem caused by an error in between creation and tagging, // and we add the tags. If it has a different cluster's tags, that is an error. -func (c *awsTagging) readRepairClusterTags(client EC2, resourceID string, lifecycle ResourceLifecycle, additionalTags map[string]string, observedTags []*ec2.Tag) error { +func (t *awsTagging) readRepairClusterTags(client EC2, resourceID string, lifecycle ResourceLifecycle, additionalTags map[string]string, observedTags []*ec2.Tag) error { actualTagMap := make(map[string]string) for _, tag := range observedTags { actualTagMap[aws.StringValue(tag.Key)] = aws.StringValue(tag.Value) } - expectedTags := c.buildTags(lifecycle, additionalTags) + expectedTags := t.buildTags(lifecycle, additionalTags) addTags := make(map[string]string) for k, expected := range expectedTags { @@ -167,7 +168,7 @@ func (c *awsTagging) readRepairClusterTags(client EC2, resourceID string, lifecy continue } if actual == "" { - glog.Warningf("Resource %q was missing expected cluster tag %q. Will add (with value %q)", resourceID, k, expected) + klog.Warningf("Resource %q was missing expected cluster tag %q. Will add (with value %q)", resourceID, k, expected) addTags[k] = expected } else { return fmt.Errorf("resource %q has tag belonging to another cluster: %q=%q (expected %q)", resourceID, k, actual, expected) @@ -178,7 +179,7 @@ func (c *awsTagging) readRepairClusterTags(client EC2, resourceID string, lifecy return nil } - if err := c.createTags(client, resourceID, lifecycle, addTags); err != nil { + if err := t.createTags(client, resourceID, lifecycle, addTags); err != nil { return fmt.Errorf("error adding missing tags to resource %q: %q", resourceID, err) } @@ -222,7 +223,7 @@ func (t *awsTagging) createTags(client EC2, resourceID string, lifecycle Resourc // We could check that the error is retryable, but the error code changes based on what we are tagging // SecurityGroup: InvalidGroup.NotFound - glog.V(2).Infof("Failed to create tags; will retry. Error was %q", err) + klog.V(2).Infof("Failed to create tags; will retry. Error was %q", err) lastErr = err return false, nil }) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go index cb3a2aa275ac..7031c5a52c14 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go @@ -24,21 +24,22 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/apimachinery/pkg/types" ) // awsVolumeRegMatch represents Regex Match for AWS volume. var awsVolumeRegMatch = regexp.MustCompile("^vol-[^/]*$") -// awsVolumeID represents the ID of the volume in the AWS API, e.g. vol-12345678 -// The "traditional" format is "vol-12345678" -// A new longer format is also being introduced: "vol-12345678abcdef01" -// We should not assume anything about the length or format, though it seems -// reasonable to assume that volumes will continue to start with "vol-". -type awsVolumeID string +// EBSVolumeID represents the ID of the volume in the AWS API, e.g. +// vol-12345678 The "traditional" format is "vol-12345678" A new longer format +// is also being introduced: "vol-12345678abcdef01" We should not assume +// anything about the length or format, though it seems reasonable to assume +// that volumes will continue to start with "vol-". +type EBSVolumeID string -func (i awsVolumeID) awsString() *string { +func (i EBSVolumeID) awsString() *string { return aws.String(string(i)) } @@ -59,8 +60,8 @@ type diskInfo struct { disk *awsDisk } -// MapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID -func (name KubernetesVolumeID) MapToAWSVolumeID() (awsVolumeID, error) { +// MapToAWSVolumeID extracts the EBSVolumeID from the KubernetesVolumeID +func (name KubernetesVolumeID) MapToAWSVolumeID() (EBSVolumeID, error) { // name looks like aws://availability-zone/awsVolumeId // The original idea of the URL-style name was to put the AZ into the @@ -96,9 +97,10 @@ func (name KubernetesVolumeID) MapToAWSVolumeID() (awsVolumeID, error) { return "", fmt.Errorf("Invalid format for AWS volume (%s)", name) } - return awsVolumeID(awsID), nil + return EBSVolumeID(awsID), nil } +// GetAWSVolumeID converts a Kubernetes volume ID to an AWS volume ID func GetAWSVolumeID(kubeVolumeID string) (string, error) { kid := KubernetesVolumeID(kubeVolumeID) awsID, err := kid.MapToAWSVolumeID() @@ -119,7 +121,7 @@ func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName type info, err := disk.describeVolume() if err != nil { - glog.Warningf("Error describing volume %s with %v", diskName, err) + klog.Warningf("Error describing volume %s with %v", diskName, err) awsDiskInfo.volumeState = "unknown" return awsDiskInfo, false, err } @@ -136,7 +138,7 @@ func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName type // has been deleted if err != nil { fetchErr := fmt.Errorf("Error fetching instance %s for volume %s", instanceID, diskName) - glog.Warning(fetchErr) + klog.Warning(fetchErr) return awsDiskInfo, false, fetchErr } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD index 4999c921f656..d4116a63893e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD @@ -37,35 +37,39 @@ go_library( importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure", deps = [ "//pkg/api/v1/service:go_default_library", - "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/azure/auth:go_default_library", - "//pkg/controller:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/version:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", - "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library", - "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) @@ -90,16 +94,16 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/api/v1/service:go_default_library", - "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/azure/auth:go_default_library", "//pkg/kubelet/apis:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library", - "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/BUILD index cc733d385aa0..a0eca412154e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/BUILD @@ -8,8 +8,8 @@ go_library( deps = [ "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/crypto/pkcs12:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go index 08f894d72167..6a651eb05c0b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go @@ -24,8 +24,8 @@ import ( "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" - "github.com/golang/glog" "golang.org/x/crypto/pkcs12" + "k8s.io/klog" ) // AzureAuthConfig holds auth related part of cloud config @@ -55,18 +55,18 @@ type AzureAuthConfig struct { // GetServicePrincipalToken creates a new service principal token based on the configuration func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.ServicePrincipalToken, error) { if config.UseManagedIdentityExtension { - glog.V(2).Infoln("azure: using managed identity extension to retrieve access token") + klog.V(2).Infoln("azure: using managed identity extension to retrieve access token") msiEndpoint, err := adal.GetMSIVMEndpoint() if err != nil { return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err) } if len(config.UserAssignedIdentityID) > 0 { - glog.V(4).Info("azure: using User Assigned MSI ID to retrieve access token") + klog.V(4).Info("azure: using User Assigned MSI ID to retrieve access token") return adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, env.ServiceManagementEndpoint, config.UserAssignedIdentityID) } - glog.V(4).Info("azure: using System Assigned MSI to retrieve access token") + klog.V(4).Info("azure: using System Assigned MSI to retrieve access token") return adal.NewServicePrincipalTokenFromMSI( msiEndpoint, env.ServiceManagementEndpoint) @@ -78,7 +78,7 @@ func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) ( } if len(config.AADClientSecret) > 0 { - glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token") + klog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token") return adal.NewServicePrincipalToken( *oauthConfig, config.AADClientID, @@ -87,7 +87,7 @@ func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) ( } if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { - glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token") + klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token") certData, err := ioutil.ReadFile(config.AADClientCertPath) if err != nil { return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go index 5263773e59e7..e2205c37fbe4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go @@ -28,18 +28,21 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth" - "k8s.io/kubernetes/pkg/controller" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/version" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/ghodss/yaml" - "github.com/golang/glog" + "k8s.io/klog" + "sigs.k8s.io/yaml" ) const ( @@ -156,7 +159,7 @@ type Cloud struct { DisksClient DisksClient FileClient FileClient resourceRequestBackoff wait.Backoff - metadata *InstanceMetadata + metadata *InstanceMetadataService vmSet VMSet // Lock for access to node caches, includes nodeZones, nodeResourceGroups, and unmanagedNodes. @@ -183,6 +186,10 @@ type Cloud struct { // client for vm sizes list VirtualMachineSizesClient VirtualMachineSizesClient + kubeClient clientset.Interface + eventBroadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + vmCache *timedCache lbCache *timedCache nsgCache *timedCache @@ -248,11 +255,11 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { config.CloudProviderRateLimitQPSWrite, config.CloudProviderRateLimitBucketWrite) - glog.V(2).Infof("Azure cloudprovider (read ops) using rate limit config: QPS=%g, bucket=%d", + klog.V(2).Infof("Azure cloudprovider (read ops) using rate limit config: QPS=%g, bucket=%d", config.CloudProviderRateLimitQPS, config.CloudProviderRateLimitBucket) - glog.V(2).Infof("Azure cloudprovider (write ops) using rate limit config: QPS=%g, bucket=%d", + klog.V(2).Infof("Azure cloudprovider (write ops) using rate limit config: QPS=%g, bucket=%d", config.CloudProviderRateLimitQPSWrite, config.CloudProviderRateLimitBucketWrite) } @@ -314,14 +321,17 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { Duration: time.Duration(az.CloudProviderBackoffDuration) * time.Second, Jitter: az.CloudProviderBackoffJitter, } - glog.V(2).Infof("Azure cloudprovider using try backoff: retries=%d, exponent=%f, duration=%d, jitter=%f", + klog.V(2).Infof("Azure cloudprovider using try backoff: retries=%d, exponent=%f, duration=%d, jitter=%f", az.CloudProviderBackoffRetries, az.CloudProviderBackoffExponent, az.CloudProviderBackoffDuration, az.CloudProviderBackoffJitter) } - az.metadata = NewInstanceMetadata() + az.metadata, err = NewInstanceMetadataService(metadataURL) + if err != nil { + return nil, err + } if az.MaximumLoadBalancerRuleCount == 0 { az.MaximumLoadBalancerRuleCount = maximumLoadBalancerRuleCount @@ -383,7 +393,12 @@ func parseConfig(configReader io.Reader) (*Config, error) { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (az *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {} +func (az *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { + az.kubeClient = clientBuilder.ClientOrDie("azure-cloud-provider") + az.eventBroadcaster = record.NewBroadcaster() + az.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: az.kubeClient.CoreV1().Events("")}) + az.eventRecorder = az.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "azure-cloud-provider"}) +} // LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise. func (az *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { @@ -464,7 +479,7 @@ func initDiskControllers(az *Cloud) error { // SetInformers sets informers for Azure cloud provider. func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) { - glog.Infof("Setting up informers for Azure cloud provider") + klog.Infof("Setting up informers for Azure cloud provider") nodeInformer := informerFactory.Core().V1().Nodes().Informer() nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { @@ -487,12 +502,12 @@ func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) { if !isNode { deletedState, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Received unexpected object: %v", obj) + klog.Errorf("Received unexpected object: %v", obj) return } node, ok = deletedState.Obj.(*v1.Node) if !ok { - glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj) + klog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj) return } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go index 449980980332..c0a6f4a34ee0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -18,16 +18,18 @@ package azure import ( "context" + "fmt" "net/http" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" - "github.com/Azure/go-autorest/autorest" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" ) // requestBackoff if backoff is disabled in cloud provider it @@ -45,6 +47,13 @@ func (az *Cloud) requestBackoff() (resourceRequestBackoff wait.Backoff) { return resourceRequestBackoff } +// Event creates a event for the specified object. +func (az *Cloud) Event(obj runtime.Object, eventtype, reason, message string) { + if obj != nil && reason != "" { + az.eventRecorder.Event(obj, eventtype, reason, message) + } +} + // GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, error) { var machine compute.VirtualMachine @@ -55,10 +64,10 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua return true, cloudprovider.InstanceNotFound } if retryErr != nil { - glog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr) + klog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr) return false, nil } - glog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name) + klog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name) return true, nil }) if err == wait.ErrWaitTimeout { @@ -77,12 +86,12 @@ func (az *Cloud) VirtualMachineClientListWithRetry(resourceGroup string) ([]comp defer cancel() allNodes, retryErr = az.VirtualMachinesClient.List(ctx, resourceGroup) if retryErr != nil { - glog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v", + klog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v", resourceGroup, retryErr) return false, retryErr } - glog.V(2).Infof("VirtualMachinesClient.List(%v) - backoff: success", resourceGroup) + klog.V(2).Infof("VirtualMachinesClient.List(%v) - backoff: success", resourceGroup) return true, nil }) if err != nil { @@ -99,24 +108,24 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string, var retryErr error ip, publicIP, retryErr = az.getIPForMachine(name) if retryErr != nil { - glog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr) + klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr) return false, nil } - glog.V(2).Infof("GetIPForMachineWithRetry(%s): backoff success", name) + klog.V(2).Infof("GetIPForMachineWithRetry(%s): backoff success", name) return true, nil }) return ip, publicIP, err } // CreateOrUpdateSGWithRetry invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error { +func (az *Cloud) CreateOrUpdateSGWithRetry(service *v1.Service, sg network.SecurityGroup) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.ResourceGroup, *sg.Name, sg) - glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name) - done, err := processHTTPRetryResponse(resp, err) + klog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name) + done, err := az.processHTTPRetryResponse(service, "CreateOrUpdateSecurityGroup", resp, err) if done && err == nil { // Invalidate the cache right after updating az.nsgCache.Delete(*sg.Name) @@ -126,14 +135,14 @@ func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error { } // CreateOrUpdateLBWithRetry invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error { +func (az *Cloud) CreateOrUpdateLBWithRetry(service *v1.Service, lb network.LoadBalancer) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.LoadBalancerClient.CreateOrUpdate(ctx, az.ResourceGroup, *lb.Name, lb) - glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name) - done, err := processHTTPRetryResponse(resp, err) + klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name) + done, err := az.processHTTPRetryResponse(service, "CreateOrUpdateLoadBalancer", resp, err) if done && err == nil { // Invalidate the cache right after updating az.lbCache.Delete(*lb.Name) @@ -143,7 +152,7 @@ func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error { } // ListLBWithRetry invokes az.LoadBalancerClient.List with exponential backoff retry -func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) { +func (az *Cloud) ListLBWithRetry(service *v1.Service) ([]network.LoadBalancer, error) { var allLBs []network.LoadBalancer err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { @@ -153,12 +162,13 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) { allLBs, retryErr = az.LoadBalancerClient.List(ctx, az.ResourceGroup) if retryErr != nil { - glog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v", + az.Event(service, v1.EventTypeWarning, "ListLoadBalancers", retryErr.Error()) + klog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v", az.ResourceGroup, retryErr) return false, retryErr } - glog.V(2).Infof("LoadBalancerClient.List(%v) - backoff: success", az.ResourceGroup) + klog.V(2).Infof("LoadBalancerClient.List(%v) - backoff: success", az.ResourceGroup) return true, nil }) if err != nil { @@ -169,7 +179,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) { } // ListPIPWithRetry list the PIP resources in the given resource group -func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAddress, error) { +func (az *Cloud) ListPIPWithRetry(service *v1.Service, pipResourceGroup string) ([]network.PublicIPAddress, error) { var allPIPs []network.PublicIPAddress err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { @@ -179,12 +189,13 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd allPIPs, retryErr = az.PublicIPAddressesClient.List(ctx, pipResourceGroup) if retryErr != nil { - glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v", + az.Event(service, v1.EventTypeWarning, "ListPublicIPs", retryErr.Error()) + klog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v", pipResourceGroup, retryErr) return false, retryErr } - glog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", pipResourceGroup) + klog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", pipResourceGroup) return true, nil }) if err != nil { @@ -195,48 +206,48 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd } // CreateOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdatePIPWithRetry(pipResourceGroup string, pip network.PublicIPAddress) error { +func (az *Cloud) CreateOrUpdatePIPWithRetry(service *v1.Service, pipResourceGroup string, pip network.PublicIPAddress) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, *pip.Name, pip) - glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name) - return processHTTPRetryResponse(resp, err) + klog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name) + return az.processHTTPRetryResponse(service, "CreateOrUpdatePublicIPAddress", resp, err) }) } // CreateOrUpdateInterfaceWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error { +func (az *Cloud) CreateOrUpdateInterfaceWithRetry(service *v1.Service, nic network.Interface) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic) - glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name) - return processHTTPRetryResponse(resp, err) + klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name) + return az.processHTTPRetryResponse(service, "CreateOrUpdateInterface", resp, err) }) } // DeletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry -func (az *Cloud) DeletePublicIPWithRetry(pipResourceGroup string, pipName string) error { +func (az *Cloud) DeletePublicIPWithRetry(service *v1.Service, pipResourceGroup string, pipName string) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.PublicIPAddressesClient.Delete(ctx, pipResourceGroup, pipName) - return processHTTPRetryResponse(resp, err) + return az.processHTTPRetryResponse(service, "DeletePublicIPAddress", resp, err) }) } // DeleteLBWithRetry invokes az.LoadBalancerClient.Delete with exponential backoff retry -func (az *Cloud) DeleteLBWithRetry(lbName string) error { +func (az *Cloud) DeleteLBWithRetry(service *v1.Service, lbName string) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.LoadBalancerClient.Delete(ctx, az.ResourceGroup, lbName) - done, err := processHTTPRetryResponse(resp, err) + done, err := az.processHTTPRetryResponse(service, "DeleteLoadBalancer", resp, err) if done && err == nil { // Invalidate the cache right after deleting az.lbCache.Delete(lbName) @@ -252,7 +263,7 @@ func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable defer cancel() resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable) - return processHTTPRetryResponse(resp, err) + return az.processHTTPRetryResponse(nil, "", resp, err) }) } @@ -263,8 +274,8 @@ func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error { defer cancel() resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route) - glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name) - return processHTTPRetryResponse(resp, err) + klog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name) + return az.processHTTPRetryResponse(nil, "", resp, err) }) } @@ -275,8 +286,8 @@ func (az *Cloud) DeleteRouteWithRetry(routeName string) error { defer cancel() resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName) - glog.V(10).Infof("RoutesClient.Delete(%s): end", az.RouteTableName) - return processHTTPRetryResponse(resp, err) + klog.V(10).Infof("RoutesClient.Delete(%s): end", az.RouteTableName) + return az.processHTTPRetryResponse(nil, "", resp, err) }) } @@ -287,8 +298,8 @@ func (az *Cloud) CreateOrUpdateVMWithRetry(resourceGroup, vmName string, newVM c defer cancel() resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, resourceGroup, vmName, newVM) - glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName) - return processHTTPRetryResponse(resp, err) + klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName) + return az.processHTTPRetryResponse(nil, "", resp, err) }) } @@ -296,40 +307,13 @@ func (az *Cloud) CreateOrUpdateVMWithRetry(resourceGroup, vmName string, newVM c func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters) - glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID) - return processHTTPRetryResponse(resp, err) + klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID) + return az.processHTTPRetryResponse(nil, "", resp, err) }) } -// A wait.ConditionFunc function to deal with common HTTP backoff response conditions -func processRetryResponse(resp autorest.Response, err error) (bool, error) { - if isSuccessHTTPResponse(resp) { - glog.V(2).Infof("processRetryResponse: backoff success, HTTP response=%d", resp.StatusCode) - return true, nil - } - if shouldRetryAPIRequest(resp, err) { - glog.Errorf("processRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err) - // suppress the error object so that backoff process continues - return false, nil - } - // Fall-through: stop periodic backoff - return true, nil -} - -// shouldRetryAPIRequest determines if the response from an HTTP request suggests periodic retry behavior -func shouldRetryAPIRequest(resp autorest.Response, err error) bool { - if err != nil { - return true - } - // HTTP 4xx or 5xx suggests we should retry - if 399 < resp.StatusCode && resp.StatusCode < 600 { - return true - } - return false -} - // isSuccessHTTPResponse determines if the response from an HTTP request suggests success -func isSuccessHTTPResponse(resp autorest.Response) bool { +func isSuccessHTTPResponse(resp http.Response) bool { // HTTP 2xx suggests a successful response if 199 < resp.StatusCode && resp.StatusCode < 300 { return true @@ -352,19 +336,19 @@ func shouldRetryHTTPRequest(resp *http.Response, err error) bool { return false } -func processHTTPRetryResponse(resp *http.Response, err error) (bool, error) { - if resp != nil { +func (az *Cloud) processHTTPRetryResponse(service *v1.Service, reason string, resp *http.Response, err error) (bool, error) { + if resp != nil && isSuccessHTTPResponse(*resp) { // HTTP 2xx suggests a successful response - if 199 < resp.StatusCode && resp.StatusCode < 300 { - return true, nil - } + return true, nil } if shouldRetryHTTPRequest(resp, err) { if err != nil { - glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, err=%v", err) + az.Event(service, v1.EventTypeWarning, reason, err.Error()) + klog.Errorf("processHTTPRetryResponse: backoff failure, will retry, err=%v", err) } else { - glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d", resp.StatusCode) + az.Event(service, v1.EventTypeWarning, reason, fmt.Sprintf("Azure HTTP response %d", resp.StatusCode)) + klog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d", resp.StatusCode) } // suppress the error object so that backoff process continues diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go index 093bfc03714c..9d0be7148941 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -27,11 +27,11 @@ import ( "sync/atomic" "time" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" azstorage "github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/go-autorest/autorest/to" - "github.com/golang/glog" "github.com/rubiojr/go-vhd/vhd" + "k8s.io/klog" kwait "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/volume" @@ -68,7 +68,7 @@ func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error // get accounts accounts, err := c.getAllStorageAccounts() if err != nil { - glog.Errorf("azureDisk - getAllStorageAccounts error: %v", err) + klog.Errorf("azureDisk - getAllStorageAccounts error: %v", err) c.accounts = make(map[string]*storageAccountState) return &c, nil } @@ -80,7 +80,7 @@ func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error // If no storage account is given, search all the storage accounts associated with the resource group and pick one that // fits storage type and location. func (c *BlobDiskController) CreateVolume(blobName, accountName, accountType, location string, requestGB int) (string, string, int, error) { - account, key, err := c.common.cloud.ensureStorageAccount(accountName, accountType, c.common.resourceGroup, location, dedicatedDiskAccountNamePrefix) + account, key, err := c.common.cloud.ensureStorageAccount(accountName, accountType, string(defaultStorageAccountKind), c.common.resourceGroup, location, dedicatedDiskAccountNamePrefix) if err != nil { return "", "", 0, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err) } @@ -97,13 +97,13 @@ func (c *BlobDiskController) CreateVolume(blobName, accountName, accountType, lo return "", "", 0, err } - glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI) + klog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI) return diskName, diskURI, requestGB, err } // DeleteVolume deletes a VHD blob func (c *BlobDiskController) DeleteVolume(diskURI string) error { - glog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI) + klog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI) accountName, blob, err := c.common.cloud.getBlobNameAndAccountFromURI(diskURI) if err != nil { return fmt.Errorf("failed to parse vhd URI %v", err) @@ -114,7 +114,7 @@ func (c *BlobDiskController) DeleteVolume(diskURI string) error { } err = c.common.cloud.deleteVhdBlob(accountName, key, blob) if err != nil { - glog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err) + klog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err) detail := err.Error() if strings.Contains(detail, errLeaseIDMissing) { // disk is still being used @@ -123,7 +123,7 @@ func (c *BlobDiskController) DeleteVolume(diskURI string) error { } return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", diskURI, accountName, blob, err) } - glog.V(4).Infof("azureDisk - blob %s deleted", diskURI) + klog.V(4).Infof("azureDisk - blob %s deleted", diskURI) return nil } @@ -153,7 +153,7 @@ func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageC tags := make(map[string]string) tags["createdby"] = "k8sAzureDataDisk" - glog.V(4).Infof("azureDisk - creating page blob %s in container %s account %s", vhdName, containerName, accountName) + klog.V(4).Infof("azureDisk - creating page blob %s in container %s account %s", vhdName, containerName, accountName) blob := container.GetBlobReference(vhdName) blob.Properties.ContentLength = vhdSize @@ -185,7 +185,7 @@ func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageC End: uint64(vhdSize - 1), } if err = blob.WriteRange(blobRange, bytes.NewBuffer(h[:vhd.VHD_HEADER_SIZE]), nil); err != nil { - glog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n", + klog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n", vhdName, containerName, accountName, err.Error()) return "", "", err } @@ -215,7 +215,7 @@ func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName str //CreateBlobDisk : create a blob disk in a node func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error) { - glog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s", dataDiskName, storageAccountType) + klog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s", dataDiskName, storageAccountType) storageAccountName, err := c.findSANameForDisk(storageAccountType) if err != nil { @@ -247,7 +247,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error { _, ok := c.accounts[storageAccountName] if !ok { // the storage account is specified by user - glog.V(4).Infof("azureDisk - deleting volume %s", diskURI) + klog.V(4).Infof("azureDisk - deleting volume %s", diskURI) return c.DeleteVolume(diskURI) } @@ -256,7 +256,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error { return err } - glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, vhdContainerName) + klog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, vhdContainerName) container := blobSvc.GetContainerReference(vhdContainerName) blob := container.GetBlobReference(vhdName) @@ -266,7 +266,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error { if diskCount, err := c.getDiskCount(storageAccountName); err != nil { c.accounts[storageAccountName].diskCount = int32(diskCount) } else { - glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName) + klog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName) return nil // we have failed to acquire a new count. not an error condition } } @@ -291,7 +291,7 @@ func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) for _, v := range *listKeysResult.Keys { if v.Value != nil && *v.Value == "key1" { if _, ok := c.accounts[SAName]; !ok { - glog.Warningf("azureDisk - account %s was not cached while getting keys", SAName) + klog.Warningf("azureDisk - account %s was not cached while getting keys", SAName) return *v.Value, nil } } @@ -366,7 +366,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e _, provisionState, err := c.getStorageAccountState(storageAccountName) if err != nil { - glog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error()) + klog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error()) return false, nil // error performing the query - retryable } @@ -374,7 +374,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e return true, nil } - glog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet (not flagged Succeeded by ARM)", storageAccountName) + klog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet (not flagged Succeeded by ARM)", storageAccountName) return false, nil // back off and see if the account becomes ready on next retry }) // we have failed to ensure that account is ready for us to create @@ -397,7 +397,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e return err } if bCreated { - glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, vhdContainerName) + klog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, vhdContainerName) } // flag so we no longer have to check on ARM @@ -429,7 +429,7 @@ func (c *BlobDiskController) getDiskCount(SAName string) (int, error) { if err != nil { return 0, err } - glog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs)) + klog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs)) c.accounts[SAName].diskCount = int32(len(response.Blobs)) return int(c.accounts[SAName].diskCount), nil @@ -449,13 +449,13 @@ func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccount accounts := make(map[string]*storageAccountState) for _, v := range *accountListResult.Value { if v.Name == nil || v.Sku == nil { - glog.Info("azureDisk - accountListResult Name or Sku is nil") + klog.Info("azureDisk - accountListResult Name or Sku is nil") continue } if !strings.HasPrefix(*v.Name, sharedDiskAccountNamePrefix) { continue } - glog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name) + klog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name) sastate := &storageAccountState{ name: *v.Name, @@ -486,12 +486,12 @@ func (c *BlobDiskController) createStorageAccount(storageAccountName string, sto return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts) } - glog.V(2).Infof("azureDisk - Creating storage account %s type %s", storageAccountName, string(storageAccountType)) + klog.V(2).Infof("azureDisk - Creating storage account %s type %s", storageAccountName, string(storageAccountType)) cp := storage.AccountCreateParameters{ Sku: &storage.Sku{Name: storageAccountType}, // switch to use StorageV2 as it's recommended according to https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options - Kind: storage.StorageV2, + Kind: defaultStorageAccountKind, Tags: map[string]*string{"created-by": to.StringPtr("azure-dd")}, Location: &location} ctx, cancel := getContextWithCancel() @@ -542,7 +542,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam countAccounts = countAccounts + 1 // empty account if dCount == 0 { - glog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name) + klog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name) return v.name, nil // short circuit, avg is good and no need to adjust } // if this account is less allocated @@ -555,7 +555,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam // if we failed to find storageaccount if SAName == "" { - glog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account") + klog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account") SAName = generateStorageAccountName(sharedDiskAccountNamePrefix) err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true) if err != nil { @@ -571,7 +571,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam // avg are not create and we should create more accounts if we can if aboveAvg && countAccounts < maxStorageAccounts { - glog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing) + klog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing) SAName = generateStorageAccountName(sharedDiskAccountNamePrefix) err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true) if err != nil { @@ -582,7 +582,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam // averages are not ok and we are at capacity (max storage accounts allowed) if aboveAvg && countAccounts == maxStorageAccounts { - glog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts", + klog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts", avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go index 19d5e7a158ae..94a970342751 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go @@ -22,12 +22,12 @@ import ( "net/http" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/client-go/util/flowcontrol" ) @@ -179,9 +179,9 @@ func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceG return } - glog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): start", resourceGroupName, VMName) + klog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): start", resourceGroupName, VMName) defer func() { - glog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): end", resourceGroupName, VMName) + klog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): end", resourceGroupName, VMName) }() mc := newMetricContext("vm", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -201,9 +201,9 @@ func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName st return } - glog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): start", resourceGroupName, VMName) + klog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): start", resourceGroupName, VMName) defer func() { - glog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): end", resourceGroupName, VMName) + klog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): end", resourceGroupName, VMName) }() mc := newMetricContext("vm", "get", resourceGroupName, az.client.SubscriptionID) @@ -218,9 +218,9 @@ func (az *azVirtualMachinesClient) List(ctx context.Context, resourceGroupName s return } - glog.V(10).Infof("azVirtualMachinesClient.List(%q): start", resourceGroupName) + klog.V(10).Infof("azVirtualMachinesClient.List(%q): start", resourceGroupName) defer func() { - glog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName) + klog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName) }() mc := newMetricContext("vm", "list", resourceGroupName, az.client.SubscriptionID) @@ -270,9 +270,9 @@ func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupN return } - glog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkInterfaceName) + klog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkInterfaceName) defer func() { - glog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkInterfaceName) + klog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkInterfaceName) }() mc := newMetricContext("interfaces", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -293,9 +293,9 @@ func (az *azInterfacesClient) Get(ctx context.Context, resourceGroupName string, return } - glog.V(10).Infof("azInterfacesClient.Get(%q,%q): start", resourceGroupName, networkInterfaceName) + klog.V(10).Infof("azInterfacesClient.Get(%q,%q): start", resourceGroupName, networkInterfaceName) defer func() { - glog.V(10).Infof("azInterfacesClient.Get(%q,%q): end", resourceGroupName, networkInterfaceName) + klog.V(10).Infof("azInterfacesClient.Get(%q,%q): end", resourceGroupName, networkInterfaceName) }() mc := newMetricContext("interfaces", "get", resourceGroupName, az.client.SubscriptionID) @@ -310,9 +310,9 @@ func (az *azInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx cont return } - glog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) + klog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) defer func() { - glog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) + klog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) }() mc := newMetricContext("interfaces", "get_vmss_ni", resourceGroupName, az.client.SubscriptionID) @@ -349,9 +349,9 @@ func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGro return nil, err } - glog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): start", resourceGroupName, loadBalancerName) + klog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): start", resourceGroupName, loadBalancerName) defer func() { - glog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): end", resourceGroupName, loadBalancerName) + klog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): end", resourceGroupName, loadBalancerName) }() mc := newMetricContext("load_balancers", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -373,9 +373,9 @@ func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName s return nil, err } - glog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): start", resourceGroupName, loadBalancerName) + klog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): start", resourceGroupName, loadBalancerName) defer func() { - glog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): end", resourceGroupName, loadBalancerName) + klog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): end", resourceGroupName, loadBalancerName) }() mc := newMetricContext("load_balancers", "delete", resourceGroupName, az.client.SubscriptionID) @@ -396,9 +396,9 @@ func (az *azLoadBalancersClient) Get(ctx context.Context, resourceGroupName stri return } - glog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): start", resourceGroupName, loadBalancerName) + klog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): start", resourceGroupName, loadBalancerName) defer func() { - glog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): end", resourceGroupName, loadBalancerName) + klog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): end", resourceGroupName, loadBalancerName) }() mc := newMetricContext("load_balancers", "get", resourceGroupName, az.client.SubscriptionID) @@ -413,9 +413,9 @@ func (az *azLoadBalancersClient) List(ctx context.Context, resourceGroupName str return nil, err } - glog.V(10).Infof("azLoadBalancersClient.List(%q): start", resourceGroupName) + klog.V(10).Infof("azLoadBalancersClient.List(%q): start", resourceGroupName) defer func() { - glog.V(10).Infof("azLoadBalancersClient.List(%q): end", resourceGroupName) + klog.V(10).Infof("azLoadBalancersClient.List(%q): end", resourceGroupName) }() mc := newMetricContext("load_balancers", "list", resourceGroupName, az.client.SubscriptionID) @@ -465,9 +465,9 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourc return nil, err } - glog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, publicIPAddressName) + klog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, publicIPAddressName) defer func() { - glog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, publicIPAddressName) + klog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, publicIPAddressName) }() mc := newMetricContext("public_ip_addresses", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -489,9 +489,9 @@ func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupNa return nil, err } - glog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): start", resourceGroupName, publicIPAddressName) + klog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): start", resourceGroupName, publicIPAddressName) defer func() { - glog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): end", resourceGroupName, publicIPAddressName) + klog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): end", resourceGroupName, publicIPAddressName) }() mc := newMetricContext("public_ip_addresses", "delete", resourceGroupName, az.client.SubscriptionID) @@ -512,9 +512,9 @@ func (az *azPublicIPAddressesClient) Get(ctx context.Context, resourceGroupName return } - glog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): start", resourceGroupName, publicIPAddressName) + klog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): start", resourceGroupName, publicIPAddressName) defer func() { - glog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): end", resourceGroupName, publicIPAddressName) + klog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): end", resourceGroupName, publicIPAddressName) }() mc := newMetricContext("public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID) @@ -528,9 +528,9 @@ func (az *azPublicIPAddressesClient) List(ctx context.Context, resourceGroupName return nil, createRateLimitErr(false, "PublicIPList") } - glog.V(10).Infof("azPublicIPAddressesClient.List(%q): start", resourceGroupName) + klog.V(10).Infof("azPublicIPAddressesClient.List(%q): start", resourceGroupName) defer func() { - glog.V(10).Infof("azPublicIPAddressesClient.List(%q): end", resourceGroupName) + klog.V(10).Infof("azPublicIPAddressesClient.List(%q): end", resourceGroupName) }() mc := newMetricContext("public_ip_addresses", "list", resourceGroupName, az.client.SubscriptionID) @@ -580,9 +580,9 @@ func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName return } - glog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) + klog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) defer func() { - glog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) + klog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() mc := newMetricContext("subnets", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -604,9 +604,9 @@ func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string, return } - glog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) + klog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) defer func() { - glog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) + klog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() mc := newMetricContext("subnets", "delete", resourceGroupName, az.client.SubscriptionID) @@ -627,9 +627,9 @@ func (az *azSubnetsClient) Get(ctx context.Context, resourceGroupName string, vi return } - glog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) + klog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) defer func() { - glog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) + klog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() mc := newMetricContext("subnets", "get", resourceGroupName, az.client.SubscriptionID) @@ -643,9 +643,9 @@ func (az *azSubnetsClient) List(ctx context.Context, resourceGroupName string, v return nil, createRateLimitErr(false, "SubnetList") } - glog.V(10).Infof("azSubnetsClient.List(%q,%q): start", resourceGroupName, virtualNetworkName) + klog.V(10).Infof("azSubnetsClient.List(%q,%q): start", resourceGroupName, virtualNetworkName) defer func() { - glog.V(10).Infof("azSubnetsClient.List(%q,%q): end", resourceGroupName, virtualNetworkName) + klog.V(10).Infof("azSubnetsClient.List(%q,%q): end", resourceGroupName, virtualNetworkName) }() mc := newMetricContext("subnets", "list", resourceGroupName, az.client.SubscriptionID) @@ -695,9 +695,9 @@ func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGr return } - glog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkSecurityGroupName) + klog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkSecurityGroupName) defer func() { - glog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkSecurityGroupName) + klog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() mc := newMetricContext("security_groups", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -719,9 +719,9 @@ func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName return } - glog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): start", resourceGroupName, networkSecurityGroupName) + klog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): start", resourceGroupName, networkSecurityGroupName) defer func() { - glog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): end", resourceGroupName, networkSecurityGroupName) + klog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() mc := newMetricContext("security_groups", "delete", resourceGroupName, az.client.SubscriptionID) @@ -742,9 +742,9 @@ func (az *azSecurityGroupsClient) Get(ctx context.Context, resourceGroupName str return } - glog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): start", resourceGroupName, networkSecurityGroupName) + klog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): start", resourceGroupName, networkSecurityGroupName) defer func() { - glog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): end", resourceGroupName, networkSecurityGroupName) + klog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() mc := newMetricContext("security_groups", "get", resourceGroupName, az.client.SubscriptionID) @@ -758,9 +758,9 @@ func (az *azSecurityGroupsClient) List(ctx context.Context, resourceGroupName st return nil, createRateLimitErr(false, "NSGList") } - glog.V(10).Infof("azSecurityGroupsClient.List(%q): start", resourceGroupName) + klog.V(10).Infof("azSecurityGroupsClient.List(%q): start", resourceGroupName) defer func() { - glog.V(10).Infof("azSecurityGroupsClient.List(%q): end", resourceGroupName) + klog.V(10).Infof("azSecurityGroupsClient.List(%q): end", resourceGroupName) }() mc := newMetricContext("security_groups", "list", resourceGroupName, az.client.SubscriptionID) @@ -810,9 +810,9 @@ func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, r return } - glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, VMScaleSetName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, VMScaleSetName) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, VMScaleSetName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, VMScaleSetName) }() mc := newMetricContext("vmss", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -833,9 +833,9 @@ func (az *azVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGrou return } - glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): start", resourceGroupName, VMScaleSetName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): start", resourceGroupName, VMScaleSetName) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName) }() mc := newMetricContext("vmss", "get", resourceGroupName, az.client.SubscriptionID) @@ -850,9 +850,9 @@ func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGro return } - glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): start", resourceGroupName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): start", resourceGroupName) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): end", resourceGroupName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): end", resourceGroupName) }() mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID) @@ -881,9 +881,9 @@ func (az *azVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context, return } - glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): start", resourceGroupName, VMScaleSetName, VMInstanceIDs) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): start", resourceGroupName, VMScaleSetName, VMInstanceIDs) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): end", resourceGroupName, VMScaleSetName, VMInstanceIDs) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): end", resourceGroupName, VMScaleSetName, VMInstanceIDs) }() mc := newMetricContext("vmss", "update_instances", resourceGroupName, az.client.SubscriptionID) @@ -925,9 +925,9 @@ func (az *azVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGr return } - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() mc := newMetricContext("vmssvm", "get", resourceGroupName, az.client.SubscriptionID) @@ -942,9 +942,9 @@ func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context return } - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() mc := newMetricContext("vmssvm", "get_instance_view", resourceGroupName, az.client.SubscriptionID) @@ -959,9 +959,9 @@ func (az *azVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceG return } - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, filter) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, filter) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter) }() mc := newMetricContext("vmssvm", "list", resourceGroupName, az.client.SubscriptionID) @@ -989,9 +989,9 @@ func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourc return } - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() mc := newMetricContext("vmssvm", "update", resourceGroupName, az.client.SubscriptionID) @@ -1034,9 +1034,9 @@ func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName return } - glog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, routeTableName, routeName) + klog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, routeTableName, routeName) defer func() { - glog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) + klog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) }() mc := newMetricContext("routes", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -1058,9 +1058,9 @@ func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string, return } - glog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): start", resourceGroupName, routeTableName, routeName) + klog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): start", resourceGroupName, routeTableName, routeName) defer func() { - glog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) + klog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) }() mc := newMetricContext("routes", "delete", resourceGroupName, az.client.SubscriptionID) @@ -1103,9 +1103,9 @@ func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroup return } - glog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, routeTableName) + klog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, routeTableName) defer func() { - glog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, routeTableName) + klog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, routeTableName) }() mc := newMetricContext("route_tables", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -1126,9 +1126,9 @@ func (az *azRouteTablesClient) Get(ctx context.Context, resourceGroupName string return } - glog.V(10).Infof("azRouteTablesClient.Get(%q,%q): start", resourceGroupName, routeTableName) + klog.V(10).Infof("azRouteTablesClient.Get(%q,%q): start", resourceGroupName, routeTableName) defer func() { - glog.V(10).Infof("azRouteTablesClient.Get(%q,%q): end", resourceGroupName, routeTableName) + klog.V(10).Infof("azRouteTablesClient.Get(%q,%q): end", resourceGroupName, routeTableName) }() mc := newMetricContext("route_tables", "get", resourceGroupName, az.client.SubscriptionID) @@ -1164,9 +1164,9 @@ func (az *azStorageAccountClient) Create(ctx context.Context, resourceGroupName return } - glog.V(10).Infof("azStorageAccountClient.Create(%q,%q): start", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.Create(%q,%q): start", resourceGroupName, accountName) defer func() { - glog.V(10).Infof("azStorageAccountClient.Create(%q,%q): end", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.Create(%q,%q): end", resourceGroupName, accountName) }() mc := newMetricContext("storage_account", "create", resourceGroupName, az.client.SubscriptionID) @@ -1186,9 +1186,9 @@ func (az *azStorageAccountClient) Delete(ctx context.Context, resourceGroupName return } - glog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): start", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): start", resourceGroupName, accountName) defer func() { - glog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): end", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): end", resourceGroupName, accountName) }() mc := newMetricContext("storage_account", "delete", resourceGroupName, az.client.SubscriptionID) @@ -1203,9 +1203,9 @@ func (az *azStorageAccountClient) ListKeys(ctx context.Context, resourceGroupNam return } - glog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): start", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): start", resourceGroupName, accountName) defer func() { - glog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): end", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): end", resourceGroupName, accountName) }() mc := newMetricContext("storage_account", "list_keys", resourceGroupName, az.client.SubscriptionID) @@ -1220,9 +1220,9 @@ func (az *azStorageAccountClient) ListByResourceGroup(ctx context.Context, resou return } - glog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): start", resourceGroupName) + klog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): start", resourceGroupName) defer func() { - glog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): end", resourceGroupName) + klog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): end", resourceGroupName) }() mc := newMetricContext("storage_account", "list_by_resource_group", resourceGroupName, az.client.SubscriptionID) @@ -1237,9 +1237,9 @@ func (az *azStorageAccountClient) GetProperties(ctx context.Context, resourceGro return } - glog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): start", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): start", resourceGroupName, accountName) defer func() { - glog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): end", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): end", resourceGroupName, accountName) }() mc := newMetricContext("storage_account", "get_properties", resourceGroupName, az.client.SubscriptionID) @@ -1275,9 +1275,9 @@ func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName s return } - glog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): start", resourceGroupName, diskName) + klog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): start", resourceGroupName, diskName) defer func() { - glog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): end", resourceGroupName, diskName) + klog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): end", resourceGroupName, diskName) }() mc := newMetricContext("disks", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -1299,9 +1299,9 @@ func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, d return } - glog.V(10).Infof("azDisksClient.Delete(%q,%q): start", resourceGroupName, diskName) + klog.V(10).Infof("azDisksClient.Delete(%q,%q): start", resourceGroupName, diskName) defer func() { - glog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName) + klog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName) }() mc := newMetricContext("disks", "delete", resourceGroupName, az.client.SubscriptionID) @@ -1322,9 +1322,9 @@ func (az *azDisksClient) Get(ctx context.Context, resourceGroupName string, disk return } - glog.V(10).Infof("azDisksClient.Get(%q,%q): start", resourceGroupName, diskName) + klog.V(10).Infof("azDisksClient.Get(%q,%q): start", resourceGroupName, diskName) defer func() { - glog.V(10).Infof("azDisksClient.Get(%q,%q): end", resourceGroupName, diskName) + klog.V(10).Infof("azDisksClient.Get(%q,%q): end", resourceGroupName, diskName) }() mc := newMetricContext("disks", "get", resourceGroupName, az.client.SubscriptionID) @@ -1360,9 +1360,9 @@ func (az *azVirtualMachineSizesClient) List(ctx context.Context, location string return } - glog.V(10).Infof("azVirtualMachineSizesClient.List(%q): start", location) + klog.V(10).Infof("azVirtualMachineSizesClient.List(%q): start", location) defer func() { - glog.V(10).Infof("azVirtualMachineSizesClient.List(%q): end", location) + klog.V(10).Infof("azVirtualMachineSizesClient.List(%q): end", location) }() mc := newMetricContext("vmsizes", "list", "", az.client.SubscriptionID) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go index 94b64fc5eb6f..7109ea73bd47 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go @@ -20,12 +20,12 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" - "github.com/golang/glog" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" + "k8s.io/klog" "k8s.io/apimachinery/pkg/types" kwait "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" ) const ( @@ -119,7 +119,7 @@ func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName) ([]compute. func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) { disks, err := c.getNodeDataDisks(nodeName) if err != nil { - glog.Errorf("error of getting data disks for node %q: %v", nodeName, err) + klog.Errorf("error of getting data disks for node %q: %v", nodeName, err) return -1, err } @@ -128,7 +128,7 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) || (disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) { // found the disk - glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI) + klog.V(2).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI) return *disk.Lun, nil } } @@ -139,7 +139,7 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) { disks, err := c.getNodeDataDisks(nodeName) if err != nil { - glog.Errorf("error of getting data disks for node %q: %v", nodeName, err) + klog.Errorf("error of getting data disks for node %q: %v", nodeName, err) return -1, err } @@ -168,7 +168,7 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N if err != nil { if err == cloudprovider.InstanceNotFound { // if host doesn't exist, no need to detach - glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.", + klog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.", nodeName, diskNames) return attached, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_standard.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_standard.go index 5ac04bbf4372..b21964b215ea 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_standard.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_standard.go @@ -20,8 +20,8 @@ import ( "fmt" "strings" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" - "github.com/golang/glog" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" + "k8s.io/klog" "k8s.io/apimachinery/pkg/types" ) @@ -73,28 +73,21 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri }, }, } - glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", nodeResourceGroup, vmName) + klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, vmName, diskName) ctx, cancel := getContextWithCancel() defer cancel() - resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) - if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", nodeResourceGroup, vmName) - retryErr := as.CreateOrUpdateVMWithRetry(nodeResourceGroup, vmName, newVM) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", nodeResourceGroup, vmName) - } - } + + _, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) if err != nil { - glog.Errorf("azureDisk - azure attach failed, err: %v", err) + klog.Errorf("azureDisk - attach disk(%s) failed, err: %v", diskName, err) detail := err.Error() if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) { // if lease cannot be acquired or disk not found, immediately detach the disk and return the original error - glog.Infof("azureDisk - err %s, try detach", detail) + klog.V(2).Infof("azureDisk - err %v, try detach disk(%s)", err, diskName) as.DetachDiskByName(diskName, diskURI, nodeName) } } else { - glog.V(4).Info("azureDisk - azure attach succeeded") + klog.V(2).Infof("azureDisk - attach disk(%s) succeeded", diskName) // Invalidate the cache right after updating as.cloud.vmCache.Delete(vmName) } @@ -107,7 +100,7 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t vm, err := as.getVirtualMachine(nodeName) if err != nil { // if host doesn't exist, no need to detach - glog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName) + klog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName) return nil } @@ -124,7 +117,7 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) || (disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) { // found the disk - glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI) + klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI) disks = append(disks[:i], disks[i+1:]...) bFoundDisk = true break @@ -143,22 +136,14 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t }, }, } - glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", nodeResourceGroup, vmName) + klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, vmName, diskName) ctx, cancel := getContextWithCancel() defer cancel() - resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) - if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", nodeResourceGroup, vmName) - retryErr := as.CreateOrUpdateVMWithRetry(nodeResourceGroup, vmName, newVM) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", nodeResourceGroup, vmName) - } - } + _, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) if err != nil { - glog.Errorf("azureDisk - azure disk detach failed, err: %v", err) + klog.Errorf("azureDisk - detach disk(%s) failed, err: %v", diskName, err) } else { - glog.V(4).Info("azureDisk - azure disk detach succeeded") + klog.V(2).Infof("azureDisk - detach disk(%s) succeeded", diskName) // Invalidate the cache right after updating as.cloud.vmCache.Delete(vmName) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go index b5db5e37ae97..b11705fdfc06 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go @@ -20,8 +20,8 @@ import ( "fmt" "strings" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" - "github.com/golang/glog" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" + "k8s.io/klog" "k8s.io/apimachinery/pkg/types" ) @@ -71,25 +71,18 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod ctx, cancel := getContextWithCancel() defer cancel() - glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", nodeResourceGroup, nodeName) - resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm) - if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", nodeResourceGroup, nodeName) - retryErr := ss.UpdateVmssVMWithRetry(ctx, nodeResourceGroup, ssName, instanceID, vm) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", nodeResourceGroup, nodeName) - } - } + klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, nodeName, diskName) + + _, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm) if err != nil { detail := err.Error() if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) { // if lease cannot be acquired or disk not found, immediately detach the disk and return the original error - glog.Infof("azureDisk - err %s, try detach", detail) + klog.Infof("azureDisk - err %s, try detach disk(%s)", detail, diskName) ss.DetachDiskByName(diskName, diskURI, nodeName) } } else { - glog.V(4).Info("azureDisk - azure attach succeeded") + klog.V(2).Infof("azureDisk - attach disk(%s) succeeded", diskName) // Invalidate the cache right after updating key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) ss.vmssVMCache.Delete(key) @@ -121,7 +114,7 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) || (disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) { // found the disk - glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI) + klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI) disks = append(disks[:i], disks[i+1:]...) bFoundDisk = true break @@ -135,20 +128,12 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No vm.StorageProfile.DataDisks = &disks ctx, cancel := getContextWithCancel() defer cancel() - glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", nodeResourceGroup, nodeName) - resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm) - if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", nodeResourceGroup, nodeName) - retryErr := ss.UpdateVmssVMWithRetry(ctx, nodeResourceGroup, ssName, instanceID, vm) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", nodeResourceGroup, nodeName) - } - } + klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, nodeName, diskName) + _, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm) if err != nil { - glog.Errorf("azureDisk - azure disk detach %q from %s failed, err: %v", diskName, nodeName, err) + klog.Errorf("azureDisk - detach disk(%s) from %s failed, err: %v", diskName, nodeName, err) } else { - glog.V(4).Info("azureDisk - azure detach succeeded") + klog.V(2).Infof("azureDisk - detach disk(%s) succeeded", diskName) // Invalidate the cache right after updating key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) ss.vmssVMCache.Delete(key) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go index 54d9d6396595..77cc02cb64a5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go @@ -27,11 +27,11 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/to" ) @@ -207,6 +207,13 @@ func (fAPC *fakeAzurePIPClient) List(ctx context.Context, resourceGroupName stri return value, nil } +func (fAPC *fakeAzurePIPClient) setFakeStore(store map[string]map[string]network.PublicIPAddress) { + fAPC.mutex.Lock() + defer fAPC.mutex.Unlock() + + fAPC.FakeStore = store +} + type fakeAzureInterfacesClient struct { mutex *sync.Mutex FakeStore map[string]map[string]network.Interface @@ -247,7 +254,24 @@ func (fIC *fakeAzureInterfacesClient) Get(ctx context.Context, resourceGroupName } func (fIC *fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) { - return result, nil + fIC.mutex.Lock() + defer fIC.mutex.Unlock() + if _, ok := fIC.FakeStore[resourceGroupName]; ok { + if entity, ok := fIC.FakeStore[resourceGroupName][networkInterfaceName]; ok { + return entity, nil + } + } + return result, autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such Interface", + } +} + +func (fIC *fakeAzureInterfacesClient) setFakeStore(store map[string]map[string]network.Interface) { + fIC.mutex.Lock() + defer fIC.mutex.Unlock() + + fIC.FakeStore = store } type fakeAzureVirtualMachinesClient struct { @@ -874,11 +898,11 @@ func (f *fakeVMSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availa return nil, fmt.Errorf("unimplemented") } -func (f *fakeVMSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error { +func (f *fakeVMSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error { return fmt.Errorf("unimplemented") } -func (f *fakeVMSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { +func (f *fakeVMSet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { return fmt.Errorf("unimplemented") } @@ -893,3 +917,7 @@ func (f *fakeVMSet) DetachDiskByName(diskName, diskURI string, nodeName types.No func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) { return nil, fmt.Errorf("unimplemented") } + +func (f *fakeVMSet) GetPowerStatusByNodeName(name string) (string, error) { + return "", fmt.Errorf("unimplemented") +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go index 138dabb5765b..ab87cf3625ff 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go @@ -21,7 +21,7 @@ import ( azs "github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/go-autorest/autorest/azure" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -58,22 +58,11 @@ func (f *azureFileClient) createFileShare(accountName, accountKey, name string, if err != nil { return err } - // create a file share and set quota - // Note. Per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share, - // setting x-ms-share-quota can set quota on the new share, but in reality, setting quota in CreateShare - // receives error "The metadata specified is invalid. It has characters that are not permitted." - // As a result,breaking into two API calls: create share and set quota share := fileClient.GetShareReference(name) + share.Properties.Quota = sizeGiB if err = share.Create(nil); err != nil { return fmt.Errorf("failed to create file share, err: %v", err) } - share.Properties.Quota = sizeGiB - if err = share.SetProperties(nil); err != nil { - if err := share.Delete(nil); err != nil { - glog.Errorf("Error deleting share: %v", err) - } - return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err) - } return nil } @@ -93,7 +82,7 @@ func (f *azureFileClient) resizeFileShare(accountName, accountKey, name string, } share := fileClient.GetShareReference(name) if share.Properties.Quota >= sizeGiB { - glog.Warningf("file share size(%dGi) is already greater or equal than requested size(%dGi), accountName: %s, shareName: %s", + klog.Warningf("file share size(%dGi) is already greater or equal than requested size(%dGi), accountName: %s, shareName: %s", share.Properties.Quota, sizeGiB, accountName, name) return nil } @@ -101,7 +90,7 @@ func (f *azureFileClient) resizeFileShare(accountName, accountKey, name string, if err = share.SetProperties(nil); err != nil { return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err) } - glog.V(4).Infof("resize file share completed, accountName: %s, shareName: %s, sizeGiB: %d", accountName, name, sizeGiB) + klog.V(4).Infof("resize file share completed, accountName: %s, shareName: %s, sizeGiB: %d", accountName, name, sizeGiB) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instance_metadata.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instance_metadata.go index c9cf6ce1c07e..5f803b90a304 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instance_metadata.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instance_metadata.go @@ -18,11 +18,17 @@ package azure import ( "encoding/json" + "fmt" "io/ioutil" "net/http" + "time" ) -const metadataURL = "http://169.254.169.254/metadata/" +const ( + metadataCacheTTL = time.Minute + metadataCacheKey = "InstanceMetadata" + metadataURL = "http://169.254.169.254/metadata/instance" +) // NetworkMetadata contains metadata about an instance's network type NetworkMetadata struct { @@ -54,67 +60,100 @@ type Subnet struct { Prefix string `json:"prefix"` } -// InstanceMetadata knows how to query the Azure instance metadata server. -type InstanceMetadata struct { - baseURL string -} - // ComputeMetadata represents compute information type ComputeMetadata struct { - Name string `json:"name,omitempty"` - Zone string `json:"zone,omitempty"` - VMSize string `json:"vmSize,omitempty"` + SKU string `json:"sku,omitempty"` + Name string `json:"name,omitempty"` + Zone string `json:"zone,omitempty"` + VMSize string `json:"vmSize,omitempty"` + OSType string `json:"osType,omitempty"` + Location string `json:"location,omitempty"` + FaultDomain string `json:"platformFaultDomain,omitempty"` + UpdateDomain string `json:"platformUpdateDomain,omitempty"` + ResourceGroup string `json:"resourceGroupName,omitempty"` + VMScaleSetName string `json:"vmScaleSetName,omitempty"` } -// NewInstanceMetadata creates an instance of the InstanceMetadata accessor object. -func NewInstanceMetadata() *InstanceMetadata { - return &InstanceMetadata{ - baseURL: metadataURL, - } +// InstanceMetadata represents instance information. +type InstanceMetadata struct { + Compute *ComputeMetadata `json:"compute,omitempty"` + Network *NetworkMetadata `json:"network,omitempty"` } -// makeMetadataURL makes a complete metadata URL from the given path. -func (i *InstanceMetadata) makeMetadataURL(path string) string { - return i.baseURL + path +// InstanceMetadataService knows how to query the Azure instance metadata server. +type InstanceMetadataService struct { + metadataURL string + imsCache *timedCache } -// Object queries the metadata server and populates the passed in object -func (i *InstanceMetadata) Object(path string, obj interface{}) error { - data, err := i.queryMetadataBytes(path, "json") - if err != nil { - return err +// NewInstanceMetadataService creates an instance of the InstanceMetadataService accessor object. +func NewInstanceMetadataService(metadataURL string) (*InstanceMetadataService, error) { + ims := &InstanceMetadataService{ + metadataURL: metadataURL, } - return json.Unmarshal(data, obj) -} -// Text queries the metadata server and returns the corresponding text -func (i *InstanceMetadata) Text(path string) (string, error) { - data, err := i.queryMetadataBytes(path, "text") + imsCache, err := newTimedcache(metadataCacheTTL, ims.getInstanceMetadata) if err != nil { - return "", err + return nil, err } - return string(data), err -} -func (i *InstanceMetadata) queryMetadataBytes(path, format string) ([]byte, error) { - client := &http.Client{} + ims.imsCache = imsCache + return ims, nil +} - req, err := http.NewRequest("GET", i.makeMetadataURL(path), nil) +func (ims *InstanceMetadataService) getInstanceMetadata(key string) (interface{}, error) { + req, err := http.NewRequest("GET", ims.metadataURL, nil) if err != nil { return nil, err } req.Header.Add("Metadata", "True") + req.Header.Add("User-Agent", "golang/kubernetes-cloud-provider") q := req.URL.Query() - q.Add("format", format) + q.Add("format", "json") q.Add("api-version", "2017-12-01") req.URL.RawQuery = q.Encode() + client := &http.Client{} resp, err := client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() - return ioutil.ReadAll(resp.Body) + if resp.StatusCode != 200 { + return nil, fmt.Errorf("failure of getting instance metadata with response %q", resp.Status) + } + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + obj := InstanceMetadata{} + err = json.Unmarshal(data, &obj) + if err != nil { + return nil, err + } + + return &obj, nil +} + +// GetMetadata gets instance metadata from cache. +func (ims *InstanceMetadataService) GetMetadata() (*InstanceMetadata, error) { + cache, err := ims.imsCache.Get(metadataCacheKey) + if err != nil { + return nil, err + } + + // Cache shouldn't be nil, but added a check incase something wrong. + if cache == nil { + return nil, fmt.Errorf("failure of getting instance metadata") + } + + if metadata, ok := cache.(*InstanceMetadata); ok { + return metadata, nil + } + + return nil, fmt.Errorf("failure of getting instance metadata") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go index 7eb403a7b29f..158ffb976ca8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go @@ -18,14 +18,21 @@ package azure import ( "context" + "fmt" "os" "strings" "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" +) + +const ( + vmPowerStatePrefix = "PowerState/" + vmPowerStateStopped = "stopped" + vmPowerStateDeallocated = "deallocated" ) // NodeAddresses returns the addresses of the specified instance. @@ -36,14 +43,14 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N return nil, err } if unmanaged { - glog.V(4).Infof("NodeAddresses: omitting unmanaged node %q", name) + klog.V(4).Infof("NodeAddresses: omitting unmanaged node %q", name) return nil, nil } addressGetter := func(nodeName types.NodeName) ([]v1.NodeAddress, error) { ip, publicIP, err := az.GetIPForMachineWithRetry(nodeName) if err != nil { - glog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err) + klog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err) return nil, err } @@ -61,12 +68,16 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N } if az.UseInstanceMetadata { - computeMetadata, err := az.getComputeMetadata() + metadata, err := az.metadata.GetMetadata() if err != nil { return nil, err } - isLocalInstance, err := az.isCurrentInstance(name, computeMetadata.Name) + if metadata.Compute == nil || metadata.Network == nil { + return nil, fmt.Errorf("failure of getting instance metadata") + } + + isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name) if err != nil { return nil, err } @@ -76,21 +87,38 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N return addressGetter(name) } - ipAddress := IPAddress{} - err = az.metadata.Object("instance/network/interface/0/ipv4/ipAddress/0", &ipAddress) - if err != nil { - return nil, err + if len(metadata.Network.Interface) == 0 { + return nil, fmt.Errorf("no interface is found for the instance") } + + // Use ip address got from instance metadata. + ipAddress := metadata.Network.Interface[0] addresses := []v1.NodeAddress{ - {Type: v1.NodeInternalIP, Address: ipAddress.PrivateIP}, {Type: v1.NodeHostName, Address: string(name)}, } - if len(ipAddress.PublicIP) > 0 { - addr := v1.NodeAddress{ - Type: v1.NodeExternalIP, - Address: ipAddress.PublicIP, + for _, address := range ipAddress.IPV4.IPAddress { + addresses = append(addresses, v1.NodeAddress{ + Type: v1.NodeInternalIP, + Address: address.PrivateIP, + }) + if len(address.PublicIP) > 0 { + addresses = append(addresses, v1.NodeAddress{ + Type: v1.NodeExternalIP, + Address: address.PublicIP, + }) + } + } + for _, address := range ipAddress.IPV6.IPAddress { + addresses = append(addresses, v1.NodeAddress{ + Type: v1.NodeInternalIP, + Address: address.PrivateIP, + }) + if len(address.PublicIP) > 0 { + addresses = append(addresses, v1.NodeAddress{ + Type: v1.NodeExternalIP, + Address: address.PublicIP, + }) } - addresses = append(addresses, addr) } return addresses, nil } @@ -104,7 +132,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) { // Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them. if az.IsNodeUnmanagedByProviderID(providerID) { - glog.V(4).Infof("NodeAddressesByProviderID: omitting unmanaged node %q", providerID) + klog.V(4).Infof("NodeAddressesByProviderID: omitting unmanaged node %q", providerID) return nil, nil } @@ -121,7 +149,7 @@ func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID strin func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) { // Returns true for unmanaged nodes because azure cloud provider always assumes them exists. if az.IsNodeUnmanagedByProviderID(providerID) { - glog.V(4).Infof("InstanceExistsByProviderID: assuming unmanaged node %q exists", providerID) + klog.V(4).Infof("InstanceExistsByProviderID: assuming unmanaged node %q exists", providerID) return true, nil } @@ -143,18 +171,18 @@ func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID stri // InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) { - return false, cloudprovider.NotImplemented -} + nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID) + if err != nil { + return false, err + } -// getComputeMetadata gets compute information from instance metadata. -func (az *Cloud) getComputeMetadata() (*ComputeMetadata, error) { - computeInfo := ComputeMetadata{} - err := az.metadata.Object(computeMetadataURI, &computeInfo) + powerStatus, err := az.vmSet.GetPowerStatusByNodeName(string(nodeName)) if err != nil { - return nil, err + return false, err } + klog.V(5).Infof("InstanceShutdownByProviderID gets power status %q for node %q", powerStatus, nodeName) - return &computeInfo, nil + return strings.ToLower(powerStatus) == vmPowerStateStopped || strings.ToLower(powerStatus) == vmPowerStateDeallocated, nil } func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) (bool, error) { @@ -182,17 +210,21 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e } if unmanaged { // InstanceID is same with nodeName for unmanaged nodes. - glog.V(4).Infof("InstanceID: getting ID %q for unmanaged node %q", name, name) + klog.V(4).Infof("InstanceID: getting ID %q for unmanaged node %q", name, name) return nodeName, nil } if az.UseInstanceMetadata { - computeMetadata, err := az.getComputeMetadata() + metadata, err := az.metadata.GetMetadata() if err != nil { return "", err } - isLocalInstance, err := az.isCurrentInstance(name, computeMetadata.Name) + if metadata.Compute == nil { + return "", fmt.Errorf("failure of getting instance metadata") + } + + isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name) if err != nil { return "", err } @@ -203,10 +235,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e } // Get resource group name. - resourceGroup, err := az.metadata.Text("instance/compute/resourceGroupName") - if err != nil { - return "", err - } + resourceGroup := metadata.Compute.ResourceGroup // Compose instanceID based on nodeName for standard instance. if az.VMType == vmTypeStandard { @@ -214,7 +243,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e } // Get scale set name and instanceID from vmName for vmss. - ssName, instanceID, err := extractVmssVMName(computeMetadata.Name) + ssName, instanceID, err := extractVmssVMName(metadata.Compute.Name) if err != nil { if err == ErrorNotVmssInstance { // Compose machineID for standard Node. @@ -235,7 +264,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) { // Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them. if az.IsNodeUnmanagedByProviderID(providerID) { - glog.V(4).Infof("InstanceTypeByProviderID: omitting unmanaged node %q", providerID) + klog.V(4).Infof("InstanceTypeByProviderID: omitting unmanaged node %q", providerID) return "", nil } @@ -258,23 +287,27 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, return "", err } if unmanaged { - glog.V(4).Infof("InstanceType: omitting unmanaged node %q", name) + klog.V(4).Infof("InstanceType: omitting unmanaged node %q", name) return "", nil } if az.UseInstanceMetadata { - computeMetadata, err := az.getComputeMetadata() + metadata, err := az.metadata.GetMetadata() if err != nil { return "", err } - isLocalInstance, err := az.isCurrentInstance(name, computeMetadata.Name) + if metadata.Compute == nil { + return "", fmt.Errorf("failure of getting instance metadata") + } + + isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name) if err != nil { return "", err } if isLocalInstance { - if computeMetadata.VMSize != "" { - return computeMetadata.VMSize, nil + if metadata.Compute.VMSize != "" { + return metadata.Compute.VMSize, nil } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 83d5f3612022..77bfdd20de26 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -26,12 +26,12 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" + cloudprovider "k8s.io/cloud-provider" serviceapi "k8s.io/kubernetes/pkg/api/v1/service" - "k8s.io/kubernetes/pkg/cloudprovider" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -101,7 +101,7 @@ func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, servic } if !exists { serviceName := getServiceName(service) - glog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName) + klog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName) return nil, false, nil } return status, true, nil @@ -120,7 +120,7 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser // the service may be switched from an internal LB to a public one, or vise versa. // Here we'll firstly ensure service do not lie in the opposite LB. serviceName := getServiceName(service) - glog.V(5).Infof("ensureloadbalancer(%s): START clusterName=%q", serviceName, clusterName) + klog.V(5).Infof("ensureloadbalancer(%s): START clusterName=%q", serviceName, clusterName) lb, err := az.reconcileLoadBalancer(clusterName, service, nodes, true /* wantLb */) if err != nil { @@ -136,7 +136,7 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser if lbStatus != nil && len(lbStatus.Ingress) > 0 { serviceIP = &lbStatus.Ingress[0].IP } - glog.V(2).Infof("EnsureLoadBalancer: reconciling security group for service %q with IP %q, wantLb = true", serviceName, logSafe(serviceIP)) + klog.V(2).Infof("EnsureLoadBalancer: reconciling security group for service %q with IP %q, wantLb = true", serviceName, logSafe(serviceIP)) if _, err := az.reconcileSecurityGroup(clusterName, service, serviceIP, true /* wantLb */); err != nil { return nil, err } @@ -147,7 +147,7 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser return nil, err } - if _, err := az.reconcilePublicIP(clusterName, updateService, true /* wantLb */); err != nil { + if _, err := az.reconcilePublicIP(clusterName, updateService, lb, true /* wantLb */); err != nil { return nil, err } @@ -169,14 +169,14 @@ func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, ser func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) - glog.V(5).Infof("delete(%s): START clusterName=%q", serviceName, clusterName) + klog.V(5).Infof("delete(%s): START clusterName=%q", serviceName, clusterName) serviceIPToCleanup, err := az.findServiceIPAddress(ctx, clusterName, service, isInternal) if err != nil { return err } - glog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup) + klog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup) if _, err := az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, false /* wantLb */); err != nil { return err } @@ -185,11 +185,11 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri return err } - if _, err := az.reconcilePublicIP(clusterName, service, false /* wantLb */); err != nil { + if _, err := az.reconcilePublicIP(clusterName, service, nil, false /* wantLb */); err != nil { return err } - glog.V(2).Infof("delete(%s): FINISH", serviceName) + klog.V(2).Infof("delete(%s): FINISH", serviceName) return nil } @@ -210,7 +210,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, primaryVMSetName := az.vmSet.GetPrimaryVMSetName() defaultLBName := az.getAzureLoadBalancerName(clusterName, primaryVMSetName, isInternal) - existingLBs, err := az.ListLBWithRetry() + existingLBs, err := az.ListLBWithRetry(service) if err != nil { return nil, nil, false, err } @@ -279,13 +279,13 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) - glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal) + klog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal) vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes) if err != nil { - glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err) + klog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err) return nil, false, err } - glog.Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - vmSetNames %v", clusterName, serviceName, isInternal, *vmSetNames) + klog.Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - vmSetNames %v", clusterName, serviceName, isInternal, *vmSetNames) mapExistingLBs := map[string]network.LoadBalancer{} for _, lb := range *existingLBs { @@ -320,13 +320,13 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi if selectedLB == nil { err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - unable to find load balancer for selected VM sets %v", clusterName, serviceName, isInternal, *vmSetNames) - glog.Error(err) + klog.Error(err) return nil, false, err } // validate if the selected LB has not exceeded the MaximumLoadBalancerRuleCount if az.Config.MaximumLoadBalancerRuleCount != 0 && selectedLBRuleCount >= az.Config.MaximumLoadBalancerRuleCount { err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - all available load balancers have exceeded maximum rule limit %d, vmSetNames (%v)", clusterName, serviceName, isInternal, selectedLBRuleCount, *vmSetNames) - glog.Error(err) + klog.Error(err) return selectedLB, existsLb, err } @@ -335,11 +335,11 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, err error) { if lb == nil { - glog.V(10).Info("getServiceLoadBalancerStatus: lb is nil") + klog.V(10).Info("getServiceLoadBalancerStatus: lb is nil") return nil, nil } if lb.FrontendIPConfigurations == nil || *lb.FrontendIPConfigurations == nil { - glog.V(10).Info("getServiceLoadBalancerStatus: lb.FrontendIPConfigurations is nil") + klog.V(10).Info("getServiceLoadBalancerStatus: lb.FrontendIPConfigurations is nil") return nil, nil } isInternal := requiresInternalLoadBalancer(service) @@ -371,8 +371,8 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L } } - glog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", *lbIP, lbFrontendIPConfigName, serviceName) - return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: *lbIP}}}, nil + klog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", to.String(lbIP), lbFrontendIPConfigName, serviceName) + return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: to.String(lbIP)}}}, nil } } @@ -387,7 +387,7 @@ func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service) pipResourceGroup := az.getPublicIPAddressResourceGroup(service) - pips, err := az.ListPIPWithRetry(pipResourceGroup) + pips, err := az.ListPIPWithRetry(service, pipResourceGroup) if err != nil { return "", err } @@ -434,11 +434,11 @@ func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, s return "", err } if !existsLb { - glog.V(2).Infof("Expected to find an IP address for service %s but did not. Assuming it has been removed", service.Name) + klog.V(2).Infof("Expected to find an IP address for service %s but did not. Assuming it has been removed", service.Name) return "", nil } if len(lbStatus.Ingress) < 1 { - glog.V(2).Infof("Expected to find an IP address for service %s but it had no ingresses. Assuming it has been removed", service.Name) + klog.V(2).Infof("Expected to find an IP address for service %s but it had no ingresses. Assuming it has been removed", service.Name) return "", nil } @@ -473,14 +473,14 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai } } - glog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name) - glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name) - err = az.CreateOrUpdatePIPWithRetry(pipResourceGroup, pip) + klog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name) + klog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name) + err = az.CreateOrUpdatePIPWithRetry(service, pipResourceGroup, pip) if err != nil { - glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - creating", serviceName, *pip.Name) + klog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - creating", serviceName, *pip.Name) return nil, err } - glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): end", pipResourceGroup, *pip.Name) + klog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): end", pipResourceGroup, *pip.Name) ctx, cancel := getContextWithCancel() defer cancel() @@ -488,7 +488,6 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai if err != nil { return nil, err } - return &pip, nil } @@ -571,14 +570,14 @@ func (az *Cloud) isFrontendIPChanged(clusterName string, config network.Frontend func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) - glog.V(2).Infof("reconcileLoadBalancer for service(%s) - wantLb(%t): started", serviceName, wantLb) + klog.V(2).Infof("reconcileLoadBalancer for service(%s) - wantLb(%t): started", serviceName, wantLb) lb, _, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb) if err != nil { - glog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err) + klog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err) return nil, err } lbName := *lb.Name - glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb) + klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb) lbFrontendIPConfigName := az.getFrontendIPConfigName(service, subnet(service)) lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName) lbBackendPoolName := getBackendPoolName(clusterName) @@ -601,18 +600,18 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, foundBackendPool := false for _, bp := range newBackendPools { if strings.EqualFold(*bp.Name, lbBackendPoolName) { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb) foundBackendPool = true break } else { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name) } } if !foundBackendPool { newBackendPools = append(newBackendPools, network.BackendAddressPool{ Name: to.StringPtr(lbBackendPoolName), }) - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb) dirtyLb = true lb.BackendAddressPools = &newBackendPools @@ -630,7 +629,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, for i := len(newConfigs) - 1; i >= 0; i-- { config := newConfigs[i] if az.serviceOwnsFrontendIP(config, service) { - glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName) + klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName) newConfigs = append(newConfigs[:i], newConfigs[i+1:]...) dirtyConfigs = true } @@ -643,7 +642,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, return nil, err } if isFipChanged { - glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name) + klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name) newConfigs = append(newConfigs[:i], newConfigs[i+1:]...) dirtyConfigs = true } @@ -703,10 +702,10 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, newConfigs = append(newConfigs, network.FrontendIPConfiguration{ - Name: to.StringPtr(lbFrontendIPConfigName), + Name: to.StringPtr(lbFrontendIPConfigName), FrontendIPConfigurationPropertiesFormat: fipConfigurationProperties, }) - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName) dirtyConfigs = true } } @@ -727,15 +726,15 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, for i := len(updatedProbes) - 1; i >= 0; i-- { existingProbe := updatedProbes[i] if az.serviceOwnsRule(service, *existingProbe.Name) { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name) keepProbe := false if findProbe(expectedProbes, existingProbe) { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name) keepProbe = true } if !keepProbe { updatedProbes = append(updatedProbes[:i], updatedProbes[i+1:]...) - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name) dirtyProbes = true } } @@ -744,11 +743,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, for _, expectedProbe := range expectedProbes { foundProbe := false if findProbe(updatedProbes, expectedProbe) { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name) foundProbe = true } if !foundProbe { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name) updatedProbes = append(updatedProbes, expectedProbe) dirtyProbes = true } @@ -769,13 +768,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, existingRule := updatedRules[i] if az.serviceOwnsRule(service, *existingRule.Name) { keepRule := false - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) if findRule(expectedRules, existingRule) { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) keepRule = true } if !keepRule { - glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) + klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) updatedRules = append(updatedRules[:i], updatedRules[i+1:]...) dirtyRules = true } @@ -785,11 +784,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, for _, expectedRule := range expectedRules { foundRule := false if findRule(updatedRules, expectedRule) { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) foundRule = true } if !foundRule { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name) updatedRules = append(updatedRules, expectedRule) dirtyRules = true } @@ -806,31 +805,31 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, if lb.FrontendIPConfigurations == nil || len(*lb.FrontendIPConfigurations) == 0 { // When FrontendIPConfigurations is empty, we need to delete the Azure load balancer resource itself, // because an Azure load balancer cannot have an empty FrontendIPConfigurations collection - glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName) + klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName) // Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB. vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) - glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): start", lbBackendPoolID, vmSetName) - err := az.vmSet.EnsureBackendPoolDeleted(lbBackendPoolID, vmSetName, lb.BackendAddressPools) + klog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): start", lbBackendPoolID, vmSetName) + err := az.vmSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools) if err != nil { - glog.Errorf("EnsureBackendPoolDeleted(%s, %s) failed: %v", lbBackendPoolID, vmSetName, err) + klog.Errorf("EnsureBackendPoolDeleted(%s, %s) failed: %v", lbBackendPoolID, vmSetName, err) return nil, err } - glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName) + klog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName) // Remove the LB. - glog.V(10).Infof("reconcileLoadBalancer: az.DeleteLBWithRetry(%q): start", lbName) - err = az.DeleteLBWithRetry(lbName) + klog.V(10).Infof("reconcileLoadBalancer: az.DeleteLBWithRetry(%q): start", lbName) + err = az.DeleteLBWithRetry(service, lbName) if err != nil { - glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName) + klog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName) return nil, err } - glog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName) + klog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName) } else { - glog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName) - err := az.CreateOrUpdateLBWithRetry(*lb) + klog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName) + err := az.CreateOrUpdateLBWithRetry(service, *lb) if err != nil { - glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating", serviceName, lbName) + klog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating", serviceName, lbName) return nil, err } @@ -838,7 +837,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, // Refresh updated lb which will be used later in other places. newLB, exist, err := az.getAzureLoadBalancer(lbName) if err != nil { - glog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err) + klog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err) return nil, err } if !exist { @@ -852,13 +851,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, if wantLb && nodes != nil { // Add the machines to the backend pool if they're not already vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) - err := az.vmSet.EnsureHostsInPool(serviceName, nodes, lbBackendPoolID, vmSetName, isInternal) + err := az.vmSet.EnsureHostsInPool(service, nodes, lbBackendPoolID, vmSetName, isInternal) if err != nil { return nil, err } } - glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) finished", serviceName, lbName) + klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) finished", serviceName, lbName) return lb, nil } @@ -882,7 +881,7 @@ func (az *Cloud) reconcileLoadBalancerRule( for _, port := range ports { lbRuleName := az.getLoadBalancerRuleName(service, port, subnet(service)) - glog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) rule name (%s)", lbName, lbRuleName) + klog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) rule name (%s)", lbName, lbRuleName) transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol) if err != nil { @@ -957,12 +956,12 @@ func (az *Cloud) reconcileLoadBalancerRule( // This entails adding required, missing SecurityRules and removing stale rules. func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error) { serviceName := getServiceName(service) - glog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q", serviceName, clusterName) + klog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q", serviceName, clusterName) ports := service.Spec.Ports if ports == nil { if useSharedSecurityRule(service) { - glog.V(2).Infof("Attempting to reconcile security group for service %s, but service uses shared rule and we don't know which port it's for", service.Name) + klog.V(2).Infof("Attempting to reconcile security group for service %s, but service uses shared rule and we don't know which port it's for", service.Name) return nil, fmt.Errorf("No port info for reconciling shared rule for service %s", service.Name) } ports = []v1.ServicePort{} @@ -1026,8 +1025,8 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, DestinationPortRange: to.StringPtr(strconv.Itoa(int(port.Port))), SourceAddressPrefix: to.StringPtr(sourceAddressPrefixes[j]), DestinationAddressPrefix: to.StringPtr(destinationIPAddress), - Access: network.SecurityRuleAccessAllow, - Direction: network.SecurityRuleDirectionInbound, + Access: network.SecurityRuleAccessAllow, + Direction: network.SecurityRuleDirectionInbound, }, } } @@ -1035,7 +1034,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, } for _, r := range expectedSecurityRules { - glog.V(10).Infof("Expecting security rule for %s: %s:%s -> %s:%s", service.Name, *r.SourceAddressPrefix, *r.SourcePortRange, *r.DestinationAddressPrefix, *r.DestinationPortRange) + klog.V(10).Infof("Expecting security rule for %s: %s:%s -> %s:%s", service.Name, *r.SourceAddressPrefix, *r.SourcePortRange, *r.DestinationAddressPrefix, *r.DestinationPortRange) } // update security rules @@ -1046,7 +1045,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, } for _, r := range updatedRules { - glog.V(10).Infof("Existing security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange)) + klog.V(10).Infof("Existing security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange)) } // update security rules: remove unwanted rules that belong privately @@ -1054,14 +1053,14 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, for i := len(updatedRules) - 1; i >= 0; i-- { existingRule := updatedRules[i] if az.serviceOwnsRule(service, *existingRule.Name) { - glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) + klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) keepRule := false if findSecurityRule(expectedSecurityRules, existingRule) { - glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) + klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) keepRule = true } if !keepRule { - glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) + klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) updatedRules = append(updatedRules[:i], updatedRules[i+1:]...) dirtySg = true } @@ -1075,17 +1074,17 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, sharedRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefix) sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName) if !sharedRuleFound { - glog.V(4).Infof("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name) + klog.V(4).Infof("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name) return nil, fmt.Errorf("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name) } if sharedRule.DestinationAddressPrefixes == nil { - glog.V(4).Infof("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name) + klog.V(4).Infof("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name) return nil, fmt.Errorf("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name) } existingPrefixes := *sharedRule.DestinationAddressPrefixes addressIndex, found := findIndex(existingPrefixes, destinationIPAddress) if !found { - glog.V(4).Infof("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name) + klog.V(4).Infof("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name) return nil, fmt.Errorf("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name) } if len(existingPrefixes) == 1 { @@ -1115,7 +1114,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, for _, expectedRule := range expectedSecurityRules { foundRule := false if findSecurityRule(updatedRules, expectedRule) { - glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) + klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) foundRule = true } if foundRule && allowsConsolidation(expectedRule) { @@ -1124,7 +1123,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, dirtySg = true } if !foundRule { - glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - adding", serviceName, wantLb, *expectedRule.Name) + klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - adding", serviceName, wantLb, *expectedRule.Name) nextAvailablePriority, err := getNextAvailablePriority(updatedRules) if err != nil { @@ -1138,16 +1137,16 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, } for _, r := range updatedRules { - glog.V(10).Infof("Updated security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange)) + klog.V(10).Infof("Updated security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange)) } if dirtySg { sg.SecurityRules = &updatedRules - glog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name) - glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name) - err := az.CreateOrUpdateSGWithRetry(sg) + klog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name) + klog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name) + err := az.CreateOrUpdateSGWithRetry(service, sg) if err != nil { - glog.V(2).Infof("ensure(%s) abort backoff: sg(%s) - updating", serviceName, *sg.Name) + klog.V(2).Infof("ensure(%s) abort backoff: sg(%s) - updating", serviceName, *sg.Name) // TODO (Nov 2017): remove when augmented security rules are out of preview // we could try to parse the response but it's not worth it for bridging a preview errorDescription := err.Error() @@ -1158,7 +1157,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, // END TODO return nil, err } - glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): end", *sg.Name) + klog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): end", *sg.Name) } return &sg, nil } @@ -1199,13 +1198,13 @@ func findIndex(strs []string, s string) (int, bool) { } func allowsConsolidation(rule network.SecurityRule) bool { - return strings.HasPrefix(*rule.Name, "shared") + return strings.HasPrefix(to.String(rule.Name), "shared") } func findConsolidationCandidate(rules []network.SecurityRule, rule network.SecurityRule) (int, bool) { for index, r := range rules { if allowsConsolidation(r) { - if strings.EqualFold(*r.Name, *rule.Name) { + if strings.EqualFold(to.String(r.Name), to.String(rule.Name)) { return index, true } } @@ -1227,8 +1226,8 @@ func makeConsolidatable(rule network.SecurityRule) network.SecurityRule { SourceAddressPrefix: rule.SourceAddressPrefix, SourceAddressPrefixes: rule.SourceAddressPrefixes, DestinationAddressPrefixes: collectionOrSingle(rule.DestinationAddressPrefixes, rule.DestinationAddressPrefix), - Access: rule.Access, - Direction: rule.Direction, + Access: rule.Access, + Direction: rule.Direction, }, } } @@ -1249,8 +1248,8 @@ func consolidate(existingRule network.SecurityRule, newRule network.SecurityRule SourceAddressPrefix: existingRule.SourceAddressPrefix, SourceAddressPrefixes: existingRule.SourceAddressPrefixes, DestinationAddressPrefixes: destinations, - Access: existingRule.Access, - Direction: existingRule.Direction, + Access: existingRule.Access, + Direction: existingRule.Direction, }, } } @@ -1302,7 +1301,7 @@ func deduplicate(collection *[]string) *[]string { } // This reconciles the PublicIP resources similar to how the LB is reconciled. -func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, wantLb bool) (*network.PublicIPAddress, error) { +func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lb *network.LoadBalancer, wantLb bool) (*network.PublicIPAddress, error) { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) var desiredPipName string @@ -1316,12 +1315,13 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want pipResourceGroup := az.getPublicIPAddressResourceGroup(service) - pips, err := az.ListPIPWithRetry(pipResourceGroup) + pips, err := az.ListPIPWithRetry(service, pipResourceGroup) if err != nil { return nil, err } - for _, pip := range pips { + for i := range pips { + pip := pips[i] if pip.Tags != nil && (pip.Tags)["service"] != nil && *(pip.Tags)["service"] == serviceName { @@ -1331,21 +1331,13 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want // This is the only case we should preserve the // Public ip resource with match service tag } else { - glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - deleting", serviceName, pipName) - glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName) - err = az.DeletePublicIPWithRetry(pipResourceGroup, pipName) - if err != nil { - glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - deleting", serviceName, pipName) - // We let err to pass through - // It may be ignorable - } - glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): end", pipResourceGroup, pipName) // response not read yet... - - err = ignoreStatusNotFoundFromError(err) + klog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - deleting", serviceName, pipName) + err := az.safeDeletePublicIP(service, pipResourceGroup, &pip, lb) if err != nil { + klog.Errorf("safeDeletePublicIP(%s) failed with error: %v", pipName, err) return nil, err } - glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - finished", serviceName, pipName) + klog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - finished", serviceName, pipName) } } @@ -1363,9 +1355,89 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want return nil, nil } +// safeDeletePublicIP deletes public IP by removing its reference first. +func (az *Cloud) safeDeletePublicIP(service *v1.Service, pipResourceGroup string, pip *network.PublicIPAddress, lb *network.LoadBalancer) error { + // Remove references if pip.IPConfiguration is not nil. + if pip.PublicIPAddressPropertiesFormat != nil && + pip.PublicIPAddressPropertiesFormat.IPConfiguration != nil && + lb != nil && lb.LoadBalancerPropertiesFormat != nil && + lb.LoadBalancerPropertiesFormat.FrontendIPConfigurations != nil { + referencedLBRules := []network.SubResource{} + frontendIPConfigUpdated := false + loadBalancerRuleUpdated := false + + // Check whether there are still frontend IP configurations referring to it. + ipConfigurationID := to.String(pip.PublicIPAddressPropertiesFormat.IPConfiguration.ID) + if ipConfigurationID != "" { + lbFrontendIPConfigs := *lb.LoadBalancerPropertiesFormat.FrontendIPConfigurations + for i := len(lbFrontendIPConfigs) - 1; i >= 0; i-- { + config := lbFrontendIPConfigs[i] + if strings.EqualFold(ipConfigurationID, to.String(config.ID)) { + if config.FrontendIPConfigurationPropertiesFormat != nil && + config.FrontendIPConfigurationPropertiesFormat.LoadBalancingRules != nil { + referencedLBRules = *config.FrontendIPConfigurationPropertiesFormat.LoadBalancingRules + } + + frontendIPConfigUpdated = true + lbFrontendIPConfigs = append(lbFrontendIPConfigs[:i], lbFrontendIPConfigs[i+1:]...) + break + } + } + + if frontendIPConfigUpdated { + lb.LoadBalancerPropertiesFormat.FrontendIPConfigurations = &lbFrontendIPConfigs + } + } + + // Check whether there are still load balancer rules referring to it. + if len(referencedLBRules) > 0 { + referencedLBRuleIDs := sets.NewString() + for _, refer := range referencedLBRules { + referencedLBRuleIDs.Insert(to.String(refer.ID)) + } + + if lb.LoadBalancerPropertiesFormat.LoadBalancingRules != nil { + lbRules := *lb.LoadBalancerPropertiesFormat.LoadBalancingRules + for i := len(lbRules) - 1; i >= 0; i-- { + ruleID := to.String(lbRules[i].ID) + if ruleID != "" && referencedLBRuleIDs.Has(ruleID) { + loadBalancerRuleUpdated = true + lbRules = append(lbRules[:i], lbRules[i+1:]...) + } + } + + if loadBalancerRuleUpdated { + lb.LoadBalancerPropertiesFormat.LoadBalancingRules = &lbRules + } + } + } + + // Update load balancer when frontendIPConfigUpdated or loadBalancerRuleUpdated. + if frontendIPConfigUpdated || loadBalancerRuleUpdated { + err := az.CreateOrUpdateLBWithRetry(service, *lb) + if err != nil { + klog.Errorf("safeDeletePublicIP for service(%s) failed with error: %v", getServiceName(service), err) + return err + } + } + } + + pipName := to.String(pip.Name) + klog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName) + err := az.DeletePublicIPWithRetry(service, pipResourceGroup, pipName) + if err != nil { + if err = ignoreStatusNotFoundFromError(err); err != nil { + return err + } + } + klog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): end", pipResourceGroup, pipName) + + return nil +} + func findProbe(probes []network.Probe, probe network.Probe) bool { for _, existingProbe := range probes { - if strings.EqualFold(*existingProbe.Name, *probe.Name) && *existingProbe.Port == *probe.Port { + if strings.EqualFold(to.String(existingProbe.Name), to.String(probe.Name)) && to.Int32(existingProbe.Port) == to.Int32(probe.Port) { return true } } @@ -1374,7 +1446,7 @@ func findProbe(probes []network.Probe, probe network.Probe) bool { func findRule(rules []network.LoadBalancingRule, rule network.LoadBalancingRule) bool { for _, existingRule := range rules { - if strings.EqualFold(*existingRule.Name, *rule.Name) && + if strings.EqualFold(to.String(existingRule.Name), to.String(rule.Name)) && equalLoadBalancingRulePropertiesFormat(existingRule.LoadBalancingRulePropertiesFormat, rule.LoadBalancingRulePropertiesFormat) { return true } @@ -1405,23 +1477,23 @@ func equalLoadBalancingRulePropertiesFormat(s, t *network.LoadBalancingRulePrope // despite different DestinationAddressPrefixes, in order to give it a chance to consolidate the two rules. func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) bool { for _, existingRule := range rules { - if !strings.EqualFold(*existingRule.Name, *rule.Name) { + if !strings.EqualFold(to.String(existingRule.Name), to.String(rule.Name)) { continue } if existingRule.Protocol != rule.Protocol { continue } - if !strings.EqualFold(*existingRule.SourcePortRange, *rule.SourcePortRange) { + if !strings.EqualFold(to.String(existingRule.SourcePortRange), to.String(rule.SourcePortRange)) { continue } - if !strings.EqualFold(*existingRule.DestinationPortRange, *rule.DestinationPortRange) { + if !strings.EqualFold(to.String(existingRule.DestinationPortRange), to.String(rule.DestinationPortRange)) { continue } - if !strings.EqualFold(*existingRule.SourceAddressPrefix, *rule.SourceAddressPrefix) { + if !strings.EqualFold(to.String(existingRule.SourceAddressPrefix), to.String(rule.SourceAddressPrefix)) { continue } if !allowsConsolidation(existingRule) && !allowsConsolidation(rule) { - if !strings.EqualFold(*existingRule.DestinationAddressPrefix, *rule.DestinationAddressPrefix) { + if !strings.EqualFold(to.String(existingRule.DestinationAddressPrefix), to.String(rule.DestinationAddressPrefix)) { continue } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go index 946314c39a34..110636531d35 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go @@ -23,9 +23,9 @@ import ( "strconv" "strings" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" - "github.com/golang/glog" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" + "github.com/Azure/go-autorest/autorest/to" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -35,6 +35,12 @@ import ( "k8s.io/kubernetes/pkg/volume/util" ) +const ( + // default IOPS Caps & Throughput Cap (MBps) per https://docs.microsoft.com/en-us/azure/virtual-machines/linux/disks-ultra-ssd + defaultDiskIOPSReadWrite = 500 + defaultDiskMBpsReadWrite = 100 +) + //ManagedDiskController : managed disk controller struct type ManagedDiskController struct { common *controllerCommon @@ -55,7 +61,11 @@ type ManagedDiskOptions struct { // The tags of the disk. Tags map[string]string // The SKU of storage account. - StorageAccountType storage.SkuName + StorageAccountType compute.DiskStorageAccountTypes + // IOPS Caps for UltraSSD disk + DiskIOPSReadWrite string + // Throughput Cap (MBps) for UltraSSD disk + DiskMBpsReadWrite string } func newManagedDiskController(common *controllerCommon) (*ManagedDiskController, error) { @@ -65,7 +75,7 @@ func newManagedDiskController(common *controllerCommon) (*ManagedDiskController, //CreateManagedDisk : create managed disk func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) (string, error) { var err error - glog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB) + klog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB) var createZones *[]string if len(options.AvailabilityZone) > 0 { @@ -87,17 +97,49 @@ func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) ( } diskSizeGB := int32(options.SizeGB) + diskSku := compute.DiskStorageAccountTypes(options.StorageAccountType) + diskProperties := compute.DiskProperties{ + DiskSizeGB: &diskSizeGB, + CreationData: &compute.CreationData{CreateOption: compute.Empty}, + } + + if diskSku == compute.UltraSSDLRS { + diskIOPSReadWrite := int64(defaultDiskIOPSReadWrite) + if options.DiskIOPSReadWrite != "" { + v, err := strconv.Atoi(options.DiskIOPSReadWrite) + if err != nil { + return "", fmt.Errorf("AzureDisk - failed to parse DiskIOPSReadWrite: %v", err) + } + diskIOPSReadWrite = int64(v) + } + diskProperties.DiskIOPSReadWrite = to.Int64Ptr(diskIOPSReadWrite) + + diskMBpsReadWrite := int32(defaultDiskMBpsReadWrite) + if options.DiskMBpsReadWrite != "" { + v, err := strconv.Atoi(options.DiskMBpsReadWrite) + if err != nil { + return "", fmt.Errorf("AzureDisk - failed to parse DiskMBpsReadWrite: %v", err) + } + diskMBpsReadWrite = int32(v) + } + diskProperties.DiskMBpsReadWrite = to.Int32Ptr(diskMBpsReadWrite) + } else { + if options.DiskIOPSReadWrite != "" { + return "", fmt.Errorf("AzureDisk - DiskIOPSReadWrite parameter is only applicable in UltraSSD_LRS disk type") + } + if options.DiskMBpsReadWrite != "" { + return "", fmt.Errorf("AzureDisk - DiskMBpsReadWrite parameter is only applicable in UltraSSD_LRS disk type") + } + } + model := compute.Disk{ Location: &c.common.location, Tags: newTags, Zones: createZones, Sku: &compute.DiskSku{ - Name: compute.StorageAccountTypes(options.StorageAccountType), - }, - DiskProperties: &compute.DiskProperties{ - DiskSizeGB: &diskSizeGB, - CreationData: &compute.CreationData{CreateOption: compute.Empty}, + Name: diskSku, }, + DiskProperties: &diskProperties, } if options.ResourceGroup == "" { @@ -129,9 +171,9 @@ func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) ( }) if err != nil { - glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType, options.SizeGB) + klog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType, options.SizeGB) } else { - glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB) + klog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB) } return diskID, nil @@ -155,7 +197,7 @@ func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error { // We don't need poll here, k8s will immediately stop referencing the disk // the disk will be eventually deleted - cleanly - by ARM - glog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI) + klog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI) return nil } @@ -202,7 +244,7 @@ func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quan requestGiB := int32(util.RoundUpSize(requestBytes, 1024*1024*1024)) newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB)) - glog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize) + klog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize) // If disk already of greater or equal size than requested we return if *result.DiskProperties.DiskSizeGB >= requestGiB { return newSizeQuant, nil @@ -216,7 +258,7 @@ func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quan return oldSize, err } - glog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB) + klog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB) return newSizeQuant, nil } @@ -253,7 +295,7 @@ func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) { diskName := path.Base(diskURI) resourceGroup, err := getResourceGroupFromDiskURI(diskURI) if err != nil { - glog.Errorf("Failed to get resource group for AzureDisk %q: %v", diskName, err) + klog.Errorf("Failed to get resource group for AzureDisk %q: %v", diskName, err) return nil, err } @@ -262,13 +304,13 @@ func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) { defer cancel() disk, err := c.DisksClient.Get(ctx, resourceGroup, diskName) if err != nil { - glog.Errorf("Failed to get information for AzureDisk %q: %v", diskName, err) + klog.Errorf("Failed to get information for AzureDisk %q: %v", diskName, err) return nil, err } // Check whether availability zone is specified. if disk.Zones == nil || len(*disk.Zones) == 0 { - glog.V(4).Infof("Azure disk %q is not zoned", diskName) + klog.V(4).Infof("Azure disk %q is not zoned", diskName) return nil, nil } @@ -279,7 +321,7 @@ func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) { } zone := c.makeZone(zoneID) - glog.V(4).Infof("Got zone %q for Azure disk %q", zone, diskName) + klog.V(4).Infof("Got zone %q for Azure disk %q", zone, diskName) labels := map[string]string{ kubeletapis.LabelZoneRegion: c.Location, kubeletapis.LabelZoneFailureDomain: zone, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go index 467d86b78393..36219b2f917a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go @@ -20,17 +20,17 @@ import ( "context" "fmt" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" ) // ListRoutes lists all managed routes that belong to the specified clusterName func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { - glog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName) + klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName) routeTable, existsRouteTable, err := az.getRouteTable() routes, err := processRoutes(routeTable, existsRouteTable, err) if err != nil { @@ -72,7 +72,7 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl for i, route := range *routeTable.Routes { instance := mapRouteNameToNodeName(*route.Name) cidr := *route.AddressPrefix - glog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr) + klog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr) kubeRoutes[i] = &cloudprovider.Route{ Name: *route.Name, @@ -82,13 +82,13 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl } } - glog.V(10).Info("ListRoutes: FINISH") + klog.V(10).Info("ListRoutes: FINISH") return kubeRoutes, nil } func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error { if _, existsRouteTable, err := az.getRouteTable(); err != nil { - glog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) return err } else if existsRouteTable { return nil @@ -103,17 +103,17 @@ func (az *Cloud) createRouteTable() error { RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{}, } - glog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName) + klog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName) ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable) - glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName) + klog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName) if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("createRouteTableIfNotExists backing off: creating routetable. routeTableName=%q", az.RouteTableName) + klog.V(2).Infof("createRouteTableIfNotExists backing off: creating routetable. routeTableName=%q", az.RouteTableName) retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable) if retryErr != nil { err = retryErr - glog.V(2).Infof("createRouteTableIfNotExists abort backoff: creating routetable. routeTableName=%q", az.RouteTableName) + klog.V(2).Infof("createRouteTableIfNotExists abort backoff: creating routetable. routeTableName=%q", az.RouteTableName) } } if err != nil { @@ -136,14 +136,14 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s return err } if unmanaged { - glog.V(2).Infof("CreateRoute: omitting unmanaged node %q", kubeRoute.TargetNode) + klog.V(2).Infof("CreateRoute: omitting unmanaged node %q", kubeRoute.TargetNode) az.routeCIDRsLock.Lock() defer az.routeCIDRsLock.Unlock() az.routeCIDRs[nodeName] = kubeRoute.DestinationCIDR return nil } - glog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil { return err } @@ -162,24 +162,24 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s }, } - glog.V(3).Infof("CreateRoute: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(3).Infof("CreateRoute: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route) - glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName) + klog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName) if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("CreateRoute backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("CreateRoute backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) retryErr := az.CreateOrUpdateRouteWithRetry(route) if retryErr != nil { err = retryErr - glog.V(2).Infof("CreateRoute abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("CreateRoute abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) } } if err != nil { return err } - glog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) return nil } @@ -193,34 +193,34 @@ func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute return err } if unmanaged { - glog.V(2).Infof("DeleteRoute: omitting unmanaged node %q", kubeRoute.TargetNode) + klog.V(2).Infof("DeleteRoute: omitting unmanaged node %q", kubeRoute.TargetNode) az.routeCIDRsLock.Lock() defer az.routeCIDRsLock.Unlock() delete(az.routeCIDRs, nodeName) return nil } - glog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) ctx, cancel := getContextWithCancel() defer cancel() routeName := mapNodeNameToRouteName(kubeRoute.TargetNode) resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName) - glog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName) + klog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName) if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("DeleteRoute backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("DeleteRoute backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) retryErr := az.DeleteRouteWithRetry(routeName) if retryErr != nil { err = retryErr - glog.V(2).Infof("DeleteRoute abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("DeleteRoute abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) } } if err != nil { return err } - glog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard.go index f5b39274c227..95126f58b0f7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard.go @@ -27,16 +27,16 @@ import ( "strings" "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/klog" ) const ( @@ -333,10 +333,10 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) } if err != nil { if as.CloudProviderBackoff { - glog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name) + klog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name) machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name)) if err != nil { - glog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name) + klog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name) return "", err } } else { @@ -346,6 +346,26 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) return *machine.ID, nil } +// GetPowerStatusByNodeName returns the power state of the specified node. +func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState string, err error) { + vm, err := as.getVirtualMachine(types.NodeName(name)) + if err != nil { + return powerState, err + } + + if vm.InstanceView != nil && vm.InstanceView.Statuses != nil { + statuses := *vm.InstanceView.Statuses + for _, status := range statuses { + state := to.String(status.Code) + if strings.HasPrefix(state, vmPowerStatePrefix) { + return strings.TrimPrefix(state, vmPowerStatePrefix), nil + } + } + } + + return "", fmt.Errorf("failed to get power status for node %q", name) +} + // GetNodeNameByProviderID gets the node name by provider ID. func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) { // NodeName is part of providerID for standard instances. @@ -361,7 +381,7 @@ func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.Nod func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) { machine, err := as.getVirtualMachine(types.NodeName(name)) if err != nil { - glog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err) + klog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err) return "", err } @@ -413,7 +433,7 @@ func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error) ipConfig, err := getPrimaryIPConfig(nic) if err != nil { - glog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err) + klog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err) return "", "", err } @@ -442,7 +462,7 @@ func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error) func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) { vms, err := as.VirtualMachineClientListWithRetry(as.ResourceGroup) if err != nil { - glog.Errorf("as.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err) + klog.Errorf("as.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err) return nil, err } vmNameToAvailabilitySetID := make(map[string]string, len(vms)) @@ -461,7 +481,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP } asID, ok := vmNameToAvailabilitySetID[nodeName] if !ok { - glog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName) + klog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName) return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName) } if availabilitySetIDs.Has(asID) { @@ -470,7 +490,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP } asName, err := getLastSegment(asID) if err != nil { - glog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err) + klog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err) return nil, err } // AvailabilitySet ID is currently upper cased in a indeterministic way @@ -496,11 +516,11 @@ func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) } availabilitySetNames, err = as.getAgentPoolAvailabiliySets(nodes) if err != nil { - glog.Errorf("as.GetVMSetNames - getAgentPoolAvailabiliySets failed err=(%v)", err) + klog.Errorf("as.GetVMSetNames - getAgentPoolAvailabiliySets failed err=(%v)", err) return nil, err } if len(*availabilitySetNames) == 0 { - glog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes)) + klog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes)) return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes)) } // sort the list to have deterministic selection @@ -520,7 +540,7 @@ func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) } } if !found { - glog.Errorf("as.GetVMSetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx]) + klog.Errorf("as.GetVMSetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx]) return nil, fmt.Errorf("availability set (%s) - not found", serviceAvailabilitySetNames[sasx]) } } @@ -561,7 +581,7 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName)) if err != nil { - glog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName) + klog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName) return network.Interface{}, err } @@ -588,7 +608,7 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri if vmSetName != "" && !as.useStandardLoadBalancer() { expectedAvailabilitySetName := as.getAvailabilitySetID(nodeResourceGroup, vmSetName) if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) { - glog.V(3).Infof( + klog.V(3).Infof( "GetPrimaryInterface: nic (%s) is not in the availabilitySet(%s)", nicName, vmSetName) return network.Interface{}, errNotInVMSet } @@ -611,21 +631,22 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri // ensureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is // participating in the specified LoadBalancer Backend Pool. -func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error { +func (as *availabilitySet) ensureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error { vmName := mapNodeNameToVMName(nodeName) + serviceName := getServiceName(service) nic, err := as.getPrimaryInterfaceWithVMSet(vmName, vmSetName) if err != nil { if err == errNotInVMSet { - glog.V(3).Infof("ensureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName) + klog.V(3).Infof("ensureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName) return nil } - glog.Errorf("error: az.ensureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err) + klog.Errorf("error: az.ensureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err) return err } if nic.ProvisioningState != nil && *nic.ProvisioningState == nicFailedState { - glog.V(3).Infof("ensureHostInPool skips node %s because its primary nic %s is in Failed state", nodeName, *nic.Name) + klog.V(3).Infof("ensureHostInPool skips node %s because its primary nic %s is in Failed state", nodeName, *nic.Name) return nil } @@ -658,7 +679,7 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N if len(matches) == 2 { lbName := matches[1] if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal { - glog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, lbName) + klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, lbName) return nil } } @@ -673,17 +694,17 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools nicName := *nic.Name - glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName) + klog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName) ctx, cancel := getContextWithCancel() defer cancel() resp, err := as.InterfacesClient.CreateOrUpdate(ctx, as.ResourceGroup, *nic.Name, nic) - glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name) + klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name) if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err) - retryErr := as.CreateOrUpdateInterfaceWithRetry(nic) + klog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err) + retryErr := as.CreateOrUpdateInterfaceWithRetry(service, nic) if retryErr != nil { err = retryErr - glog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName) + klog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName) } } if err != nil { @@ -695,24 +716,24 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N // EnsureHostsInPool ensures the given Node's primary IP configurations are // participating in the specified LoadBalancer Backend Pool. -func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error { +func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error { hostUpdates := make([]func() error, 0, len(nodes)) for _, node := range nodes { localNodeName := node.Name if as.useStandardLoadBalancer() && as.excludeMasterNodesFromStandardLB() && isMasterNode(node) { - glog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID) + klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID) continue } if as.ShouldNodeExcludedFromLoadBalancer(node) { - glog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName) + klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName) continue } f := func() error { - err := as.ensureHostInPool(serviceName, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal) + err := as.ensureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal) if err != nil { - return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", serviceName, backendPoolID, err) + return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", getServiceName(service), backendPoolID, err) } return nil } @@ -728,7 +749,7 @@ func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Nod } // EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet. -func (as *availabilitySet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { +func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { // Do nothing for availability set. return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go index b536d3e95bb3..19b91fc99bac 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go @@ -19,24 +19,26 @@ package azure import ( "fmt" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" - "github.com/golang/glog" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" + "k8s.io/klog" ) const ( defaultStorageAccountType = string(storage.StandardLRS) + defaultStorageAccountKind = storage.StorageV2 fileShareAccountNamePrefix = "f" sharedDiskAccountNamePrefix = "ds" dedicatedDiskAccountNamePrefix = "dd" ) -// CreateFileShare creates a file share, using a matching storage account -func (az *Cloud) CreateFileShare(shareName, accountName, accountType, resourceGroup, location string, requestGiB int) (string, string, error) { +// CreateFileShare creates a file share, using a matching storage account type, account kind, etc. +// storage account will be created if specified account is not found +func (az *Cloud) CreateFileShare(shareName, accountName, accountType, accountKind, resourceGroup, location string, requestGiB int) (string, string, error) { if resourceGroup == "" { resourceGroup = az.resourceGroup } - account, key, err := az.ensureStorageAccount(accountName, accountType, resourceGroup, location, fileShareAccountNamePrefix) + account, key, err := az.ensureStorageAccount(accountName, accountType, accountKind, resourceGroup, location, fileShareAccountNamePrefix) if err != nil { return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err) } @@ -44,7 +46,7 @@ func (az *Cloud) CreateFileShare(shareName, accountName, accountType, resourceGr if err := az.createFileShare(account, key, shareName, requestGiB); err != nil { return "", "", fmt.Errorf("failed to create share %s in account %s: %v", shareName, account, err) } - glog.V(4).Infof("created share %s in account %s", shareName, account) + klog.V(4).Infof("created share %s in account %s", shareName, account) return account, key, nil } @@ -53,7 +55,7 @@ func (az *Cloud) DeleteFileShare(accountName, accountKey, shareName string) erro if err := az.deleteFileShare(accountName, accountKey, shareName); err != nil { return err } - glog.V(4).Infof("share %s deleted", shareName) + klog.V(4).Infof("share %s deleted", shareName) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go index 94e6f8629359..34871a111985 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go @@ -20,9 +20,9 @@ import ( "fmt" "strings" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" "github.com/Azure/go-autorest/autorest/to" - "github.com/golang/glog" + "k8s.io/klog" ) type accountWithLocation struct { @@ -30,7 +30,7 @@ type accountWithLocation struct { } // getStorageAccounts gets name, type, location of all storage accounts in a resource group which matches matchingAccountType, matchingLocation -func (az *Cloud) getStorageAccounts(matchingAccountType, resourceGroup, matchingLocation string) ([]accountWithLocation, error) { +func (az *Cloud) getStorageAccounts(matchingAccountType, matchingAccountKind, resourceGroup, matchingLocation string) ([]accountWithLocation, error) { ctx, cancel := getContextWithCancel() defer cancel() result, err := az.StorageAccountClient.ListByResourceGroup(ctx, resourceGroup) @@ -49,6 +49,10 @@ func (az *Cloud) getStorageAccounts(matchingAccountType, resourceGroup, matching continue } + if matchingAccountKind != "" && !strings.EqualFold(matchingAccountKind, string(acct.Kind)) { + continue + } + location := *acct.Location if matchingLocation != "" && !strings.EqualFold(matchingLocation, location) { continue @@ -86,17 +90,17 @@ func (az *Cloud) getStorageAccesskey(account, resourceGroup string) (string, err } // ensureStorageAccount search storage account, create one storage account(with genAccountNamePrefix) if not found, return accountName, accountKey -func (az *Cloud) ensureStorageAccount(accountName, accountType, resourceGroup, location, genAccountNamePrefix string) (string, string, error) { +func (az *Cloud) ensureStorageAccount(accountName, accountType, accountKind, resourceGroup, location, genAccountNamePrefix string) (string, string, error) { if len(accountName) == 0 { // find a storage account that matches accountType - accounts, err := az.getStorageAccounts(accountType, resourceGroup, location) + accounts, err := az.getStorageAccounts(accountType, accountKind, resourceGroup, location) if err != nil { return "", "", fmt.Errorf("could not list storage accounts for account type %s: %v", accountType, err) } if len(accounts) > 0 { accountName = accounts[0].Name - glog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location) + klog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location) } if len(accountName) == 0 { @@ -109,15 +113,19 @@ func (az *Cloud) ensureStorageAccount(accountName, accountType, resourceGroup, l accountType = defaultStorageAccountType } - glog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s", - accountName, resourceGroup, location, accountType) + // use StorageV2 by default per https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options + kind := defaultStorageAccountKind + if accountKind != "" { + kind = storage.Kind(accountKind) + } + klog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s, accountKind: %s", + accountName, resourceGroup, location, accountType, kind) cp := storage.AccountCreateParameters{ - Sku: &storage.Sku{Name: storage.SkuName(accountType)}, - // switch to use StorageV2 as it's recommended according to https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options - Kind: storage.StorageV2, + Sku: &storage.Sku{Name: storage.SkuName(accountType)}, + Kind: kind, AccountPropertiesCreateParameters: &storage.AccountPropertiesCreateParameters{EnableHTTPSTrafficOnly: to.BoolPtr(true)}, - Tags: map[string]*string{"created-by": to.StringPtr("azure")}, - Location: &location} + Tags: map[string]*string{"created-by": to.StringPtr("azure")}, + Location: &location} ctx, cancel := getContextWithCancel() defer cancel() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go index 35f8d0cf44d7..0d37a91c0708 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go @@ -17,12 +17,12 @@ limitations under the License. package azure import ( - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" ) // VMSet defines functions all vmsets (including scale set and availability @@ -54,9 +54,9 @@ type VMSet interface { GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) // EnsureHostsInPool ensures the given Node's primary IP configurations are // participating in the specified LoadBalancer Backend Pool. - EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error + EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error // EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet. - EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error + EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error // AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun. AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error @@ -64,4 +64,7 @@ type VMSet interface { DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error // GetDataDisks gets a list of data disks attached to the node. GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) + + // GetPowerStatusByNodeName returns the power state of the specified node. + GetPowerStatusByNodeName(name string) (string, error) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go index 2fe598e0ccf3..4d5df4046286 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go @@ -24,16 +24,16 @@ import ( "strconv" "strings" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" ) var ( @@ -113,7 +113,7 @@ func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm co return "", "", vm, err } - glog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName) + klog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName) key := buildVmssCacheKey(resourceGroup, ss.makeVmssVMName(ssName, instanceID)) cachedVM, err := ss.vmssVMCache.Get(key) if err != nil { @@ -121,13 +121,33 @@ func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm co } if cachedVM == nil { - glog.Errorf("Can't find node (%q) in any scale sets", nodeName) + klog.Errorf("Can't find node (%q) in any scale sets", nodeName) return ssName, instanceID, vm, cloudprovider.InstanceNotFound } return ssName, instanceID, *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil } +// GetPowerStatusByNodeName returns the power state of the specified node. +func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) { + _, _, vm, err := ss.getVmssVM(name) + if err != nil { + return powerState, err + } + + if vm.InstanceView != nil && vm.InstanceView.Statuses != nil { + statuses := *vm.InstanceView.Statuses + for _, status := range statuses { + state := to.String(status.Code) + if strings.HasPrefix(state, vmPowerStatePrefix) { + return strings.TrimPrefix(state, vmPowerStatePrefix), nil + } + } + } + + return "", fmt.Errorf("failed to get power status for node %q", name) +} + // getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache. // The node must belong to one of scale sets. func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) { @@ -139,7 +159,7 @@ func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceI } if cachedVM == nil { - glog.Errorf("cound't find vmss virtual machine by scaleSetName (%q) and instanceID (%q)", scaleSetName, instanceID) + klog.Errorf("couldn't find vmss virtual machine by scaleSetName (%s) and instanceID (%s)", scaleSetName, instanceID) return vm, cloudprovider.InstanceNotFound } @@ -152,7 +172,7 @@ func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceI func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) { managedByAS, err := ss.isNodeManagedByAvailabilitySet(name) if err != nil { - glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) + klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return "", err } if managedByAS { @@ -173,7 +193,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, // NodeName is not part of providerID for vmss instances. scaleSetName, err := extractScaleSetNameByProviderID(providerID) if err != nil { - glog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err) + klog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err) return ss.availabilitySet.GetNodeNameByProviderID(providerID) } @@ -184,7 +204,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, instanceID, err := getLastSegment(providerID) if err != nil { - glog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err) + klog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err) return ss.availabilitySet.GetNodeNameByProviderID(providerID) } @@ -205,7 +225,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) { managedByAS, err := ss.isNodeManagedByAvailabilitySet(name) if err != nil { - glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) + klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return "", err } if managedByAS { @@ -230,7 +250,7 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) { func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { managedByAS, err := ss.isNodeManagedByAvailabilitySet(name) if err != nil { - glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) + klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return cloudprovider.Zone{}, err } if managedByAS { @@ -271,23 +291,43 @@ func (ss *scaleSet) GetPrimaryVMSetName() string { } // GetIPByNodeName gets machine private IP and public IP by node name. -// TODO(feiskyer): Azure vmss doesn't support associating a public IP to single virtual machine yet, -// fix this after it is supported. func (ss *scaleSet) GetIPByNodeName(nodeName string) (string, string, error) { nic, err := ss.GetPrimaryInterface(nodeName) if err != nil { - glog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err) + klog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err) return "", "", err } ipConfig, err := getPrimaryIPConfig(nic) if err != nil { - glog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err) + klog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err) return "", "", err } - targetIP := *ipConfig.PrivateIPAddress - return targetIP, "", nil + internalIP := *ipConfig.PrivateIPAddress + publicIP := "" + if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil { + pipID := *ipConfig.PublicIPAddress.ID + pipName, err := getLastSegment(pipID) + if err != nil { + return "", "", fmt.Errorf("failed to get publicIP name for node %q with pipID %q", nodeName, pipID) + } + + resourceGroup, err := ss.GetNodeResourceGroup(nodeName) + if err != nil { + return "", "", err + } + + pip, existsPip, err := ss.getPublicIPAddress(resourceGroup, pipName) + if err != nil { + return "", "", err + } + if existsPip { + publicIP = *pip.IPAddress + } + } + + return internalIP, publicIP, nil } // This returns the full identifier of the primary NIC for the given VM. @@ -350,7 +390,7 @@ func (ss *scaleSet) listScaleSets(resourceGroup string) ([]string, error) { allScaleSets, err := ss.VirtualMachineScaleSetsClient.List(ctx, resourceGroup) if err != nil { - glog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", err) + klog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", err) return nil, err } @@ -370,7 +410,7 @@ func (ss *scaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]compu allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSetName, "", "", string(compute.InstanceView)) if err != nil { - glog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", err) + klog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", err) return nil, err } @@ -397,7 +437,7 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) { } if ssName == "" { - glog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName) + klog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName) continue } @@ -421,11 +461,11 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN scaleSetNames, err := ss.getAgentPoolScaleSets(nodes) if err != nil { - glog.Errorf("ss.GetVMSetNames - getAgentPoolScaleSets failed err=(%v)", err) + klog.Errorf("ss.GetVMSetNames - getAgentPoolScaleSets failed err=(%v)", err) return nil, err } if len(*scaleSetNames) == 0 { - glog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes)) + klog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes)) return nil, fmt.Errorf("No scale sets found for nodes, node count(%d)", len(nodes)) } @@ -447,7 +487,7 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN } } if !found { - glog.Errorf("ss.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetNames[sasx]) + klog.Errorf("ss.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetNames[sasx]) return nil, fmt.Errorf("scale set (%s) - not found", serviceVMSetNames[sasx]) } } @@ -471,7 +511,7 @@ func extractResourceGroupByVMSSNicID(nicID string) (string, error) { func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) { managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName) if err != nil { - glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) + klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return network.Interface{}, err } if managedByAS { @@ -486,19 +526,19 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err return ss.availabilitySet.GetPrimaryInterface(nodeName) } - glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getVmssVM(%s), err=%v", nodeName, nodeName, err) + klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getVmssVM(%s), err=%v", nodeName, nodeName, err) return network.Interface{}, err } primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm) if err != nil { - glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err) + klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err) return network.Interface{}, err } nicName, err := getLastSegment(primaryInterfaceID) if err != nil { - glog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err) + klog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err) return network.Interface{}, err } resourceGroup, err := extractResourceGroupByVMSSNicID(primaryInterfaceID) @@ -510,7 +550,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err defer cancel() nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ctx, resourceGroup, ssName, instanceID, nicName, "") if err != nil { - glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, resourceGroup, ssName, nicName, err) + klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, resourceGroup, ssName, nicName, err) return network.Interface{}, err } @@ -524,17 +564,18 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err } // getScaleSetWithRetry gets scale set with exponential backoff retry -func (ss *scaleSet) getScaleSetWithRetry(name string) (compute.VirtualMachineScaleSet, bool, error) { +func (ss *scaleSet) getScaleSetWithRetry(service *v1.Service, name string) (compute.VirtualMachineScaleSet, bool, error) { var result compute.VirtualMachineScaleSet var exists bool err := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { cached, retryErr := ss.vmssCache.Get(name) if retryErr != nil { - glog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr) + ss.Event(service, v1.EventTypeWarning, "GetVirtualMachineScaleSet", retryErr.Error()) + klog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr) return false, nil } - glog.V(4).Infof("backoff: success for scale set %q", name) + klog.V(4).Infof("backoff: success for scale set %q", name) if cached != nil { exists = true @@ -581,24 +622,24 @@ func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachine } // createOrUpdateVMSSWithRetry invokes ss.VirtualMachineScaleSetsClient.CreateOrUpdate with exponential backoff retry. -func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet compute.VirtualMachineScaleSet) error { +func (ss *scaleSet) createOrUpdateVMSSWithRetry(service *v1.Service, virtualMachineScaleSet compute.VirtualMachineScaleSet) error { return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, *virtualMachineScaleSet.Name, virtualMachineScaleSet) - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", *virtualMachineScaleSet.Name) - return processHTTPRetryResponse(resp, err) + klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", *virtualMachineScaleSet.Name) + return ss.processHTTPRetryResponse(service, "CreateOrUpdateVMSS", resp, err) }) } // updateVMSSInstancesWithRetry invokes ss.VirtualMachineScaleSetsClient.UpdateInstances with exponential backoff retry. -func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error { +func (ss *scaleSet) updateVMSSInstancesWithRetry(service *v1.Service, scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error { return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, scaleSetName, vmInstanceIDs) - glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): end", scaleSetName) - return processHTTPRetryResponse(resp, err) + klog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): end", scaleSetName) + return ss.processHTTPRetryResponse(service, "CreateOrUpdateVMSSInstance", resp, err) }) } @@ -609,18 +650,18 @@ func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String, for _, curNode := range nodes { if ss.useStandardLoadBalancer() && ss.excludeMasterNodesFromStandardLB() && isMasterNode(curNode) { - glog.V(4).Infof("Excluding master node %q from load balancer backendpool", curNode.Name) + klog.V(4).Infof("Excluding master node %q from load balancer backendpool", curNode.Name) continue } if ss.ShouldNodeExcludedFromLoadBalancer(curNode) { - glog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", curNode.Name) + klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", curNode.Name) continue } curScaleSetName, err := extractScaleSetNameByProviderID(curNode.Spec.ProviderID) if err != nil { - glog.V(4).Infof("Node %q is not belonging to any scale sets, assuming it is belong to availability sets", curNode.Name) + klog.V(4).Infof("Node %q is not belonging to any scale sets, assuming it is belong to availability sets", curNode.Name) standardNodes = append(standardNodes, curNode) continue } @@ -631,7 +672,7 @@ func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String, instanceID, err := getLastSegment(curNode.Spec.ProviderID) if err != nil { - glog.Errorf("Failed to get instance ID for node %q: %v", curNode.Spec.ProviderID, err) + klog.Errorf("Failed to get instance ID for node %q: %v", curNode.Spec.ProviderID, err) return nil, nil, err } @@ -643,16 +684,17 @@ func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String, // ensureHostsInVMSetPool ensures the given Node's primary IP configurations are // participating in the vmSet's LoadBalancer Backend Pool. -func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID string, vmSetName string, instanceIDs []string, isInternal bool) error { - glog.V(3).Infof("ensuring hosts %q of scaleset %q in LB backendpool %q", instanceIDs, vmSetName, backendPoolID) - virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName) +func (ss *scaleSet) ensureHostsInVMSetPool(service *v1.Service, backendPoolID string, vmSetName string, instanceIDs []string, isInternal bool) error { + klog.V(3).Infof("ensuring hosts %q of scaleset %q in LB backendpool %q", instanceIDs, vmSetName, backendPoolID) + serviceName := getServiceName(service) + virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(service, vmSetName) if err != nil { - glog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err) + klog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err) return err } if !exists { errorMessage := fmt.Errorf("Scale set %q not found", vmSetName) - glog.Errorf("%v", errorMessage) + klog.Errorf("%v", errorMessage) return errorMessage } @@ -693,7 +735,7 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str if len(matches) == 2 { lbName := matches[1] if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal { - glog.V(4).Infof("vmss %q has already been added to LB %q, omit adding it to a new one", vmSetName, lbName) + klog.V(4).Infof("vmss %q has already been added to LB %q, omit adding it to a new one", vmSetName, lbName) return nil } } @@ -708,15 +750,15 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str ctx, cancel := getContextWithCancel() defer cancel() - glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating", serviceName, vmSetName) + klog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating", serviceName, vmSetName) resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet) - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName) + klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err) - retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet) + klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err) + retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet) if retryErr != nil { err = retryErr - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName) + klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName) } } if err != nil { @@ -731,13 +773,13 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str ctx, cancel := getContextWithCancel() defer cancel() instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, vmSetName, vmInstanceIDs) - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName) + klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) { - glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err) - retryErr := ss.updateVMSSInstancesWithRetry(vmSetName, vmInstanceIDs) + klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err) + retryErr := ss.updateVMSSInstancesWithRetry(service, vmSetName, vmInstanceIDs) if retryErr != nil { err = retryErr - glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName) + klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName) } } if err != nil { @@ -749,10 +791,11 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str // EnsureHostsInPool ensures the given Node's primary IP configurations are // participating in the specified LoadBalancer Backend Pool. -func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error { +func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error { + serviceName := getServiceName(service) scalesets, standardNodes, err := ss.getNodesScaleSets(nodes) if err != nil { - glog.Errorf("getNodesScaleSets() for service %q failed: %v", serviceName, err) + klog.Errorf("getNodesScaleSets() for service %q failed: %v", serviceName, err) return err } @@ -764,22 +807,22 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back if instanceIDs.Len() == 0 { // This may happen when scaling a vmss capacity to 0. - glog.V(3).Infof("scale set %q has 0 nodes, adding it to load balancer anyway", ssName) + klog.V(3).Infof("scale set %q has 0 nodes, adding it to load balancer anyway", ssName) // InstanceIDs is required to update vmss, use * instead here since there are no nodes actually. instanceIDs.Insert("*") } - err := ss.ensureHostsInVMSetPool(serviceName, backendPoolID, ssName, instanceIDs.List(), isInternal) + err := ss.ensureHostsInVMSetPool(service, backendPoolID, ssName, instanceIDs.List(), isInternal) if err != nil { - glog.Errorf("ensureHostsInVMSetPool() with scaleSet %q for service %q failed: %v", ssName, serviceName, err) + klog.Errorf("ensureHostsInVMSetPool() with scaleSet %q for service %q failed: %v", ssName, serviceName, err) return err } } if ss.useStandardLoadBalancer() && len(standardNodes) > 0 { - err := ss.availabilitySet.EnsureHostsInPool(serviceName, standardNodes, backendPoolID, "", isInternal) + err := ss.availabilitySet.EnsureHostsInPool(service, standardNodes, backendPoolID, "", isInternal) if err != nil { - glog.Errorf("availabilitySet.EnsureHostsInPool() for service %q failed: %v", serviceName, err) + klog.Errorf("availabilitySet.EnsureHostsInPool() for service %q failed: %v", serviceName, err) return err } } @@ -788,15 +831,15 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back } // ensureScaleSetBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified scaleset. -func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) error { - glog.V(3).Infof("ensuring backend pool %q deleted from scaleset %q", poolID, ssName) - virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(ssName) +func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(service *v1.Service, poolID, ssName string) error { + klog.V(3).Infof("ensuring backend pool %q deleted from scaleset %q", poolID, ssName) + virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(service, ssName) if err != nil { - glog.Errorf("ss.ensureScaleSetBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, ssName, ssName, err) + klog.Errorf("ss.ensureScaleSetBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, ssName, ssName, err) return err } if !exists { - glog.V(2).Infof("ss.ensureScaleSetBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, ssName, ssName) + klog.V(2).Infof("ss.ensureScaleSetBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, ssName, ssName) return nil } @@ -823,7 +866,7 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro for i := len(existingBackendPools) - 1; i >= 0; i-- { curPool := existingBackendPools[i] if strings.EqualFold(poolID, *curPool.ID) { - glog.V(10).Infof("ensureScaleSetBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, ssName) + klog.V(10).Infof("ensureScaleSetBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, ssName) foundPool = true newBackendPools = append(existingBackendPools[:i], existingBackendPools[i+1:]...) } @@ -835,17 +878,17 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro // Update scale set with backoff. primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools - glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", ssName) + klog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", ssName) ctx, cancel := getContextWithCancel() defer cancel() resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, ssName, virtualMachineScaleSet) - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName) + klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err) - retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet) + klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err) + retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet) if retryErr != nil { err = retryErr - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName) + klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName) } } if err != nil { @@ -860,13 +903,13 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro instanceCtx, instanceCancel := getContextWithCancel() defer instanceCancel() instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(instanceCtx, ss.ResourceGroup, ssName, vmInstanceIDs) - glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", ssName) + klog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", ssName) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) { - glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", ssName, err) - retryErr := ss.updateVMSSInstancesWithRetry(ssName, vmInstanceIDs) + klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", ssName, err) + retryErr := ss.updateVMSSInstancesWithRetry(service, ssName, vmInstanceIDs) if retryErr != nil { err = retryErr - glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", ssName) + klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", ssName) } } if err != nil { @@ -878,14 +921,14 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro if len(newBackendPools) == 0 { updateCtx, updateCancel := getContextWithCancel() defer updateCancel() - glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", ssName) + klog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", ssName) resp, err = ss.VirtualMachineScaleSetsClient.CreateOrUpdate(updateCtx, ss.ResourceGroup, ssName, virtualMachineScaleSet) - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName) + klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err) - retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet) + klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err) + retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet) if retryErr != nil { - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName) + klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName) } } } @@ -894,7 +937,7 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro } // EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet. -func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { +func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { if backendAddressPools == nil { return nil } @@ -909,7 +952,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAd ssName, err := extractScaleSetNameByProviderID(*ipConfigurations.ID) if err != nil { - glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it", *ipConfigurations.ID) + klog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it", *ipConfigurations.ID) continue } @@ -925,9 +968,9 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAd continue } - err := ss.ensureScaleSetBackendPoolDeleted(poolID, ssName) + err := ss.ensureScaleSetBackendPoolDeleted(service, poolID, ssName) if err != nil { - glog.Errorf("ensureScaleSetBackendPoolDeleted() with scaleSet %q failed: %v", ssName, err) + klog.Errorf("ensureScaleSetBackendPoolDeleted() with scaleSet %q failed: %v", ssName, err) return err } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go index c09f71f588fb..a9a46ba703bf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" ) @@ -50,7 +50,7 @@ func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string { func extractVmssVMName(name string) (string, string, error) { split := strings.SplitAfter(name, vmssNameSeparator) if len(split) < 2 { - glog.V(3).Infof("Failed to extract vmssVMName %q", name) + klog.V(3).Infof("Failed to extract vmssVMName %q", name) return "", "", ErrorNotVmssInstance } @@ -74,7 +74,7 @@ func (ss *scaleSet) newVmssCache() (*timedCache, error) { } if !exists { - glog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message) + klog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message) return nil, nil } @@ -107,7 +107,7 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) { for _, vm := range vms { if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil { - glog.Warningf("failed to get computerName for vmssVM (%q)", ssName) + klog.Warningf("failed to get computerName for vmssVM (%q)", ssName) continue } @@ -195,10 +195,28 @@ func (ss *scaleSet) newVmssVMCache() (*timedCache, error) { } if !exists { - glog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message) + klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message) return nil, nil } + // Get instanceView for vmssVM. + if result.InstanceView == nil { + viewCtx, viewCancel := getContextWithCancel() + defer viewCancel() + view, err := ss.VirtualMachineScaleSetVMsClient.GetInstanceView(viewCtx, resourceGroup, ssName, instanceID) + // It is possible that the vmssVM gets removed just before this call. So check whether the VM exist again. + exists, message, realErr = checkResourceExistsFromError(err) + if realErr != nil { + return nil, realErr + } + if !exists { + klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message) + return nil, nil + } + + result.InstanceView = &view + } + return &result, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go index 86bb87325cc7..cf3632956ab0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -23,13 +23,12 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest" - "github.com/golang/glog" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" ) var ( @@ -118,7 +117,7 @@ func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pi } if !exists { - glog.V(2).Infof("Public IP %q not found with message: %q", pipName, message) + klog.V(2).Infof("Public IP %q not found with message: %q", pipName, message) return pip, false, nil } @@ -145,7 +144,7 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet } if !exists { - glog.V(2).Infof("Subnet %q not found with message: %q", subnetName, message) + klog.V(2).Infof("Subnet %q not found with message: %q", subnetName, message) return subnet, false, nil } @@ -205,7 +204,7 @@ func (az *Cloud) newVMCache() (*timedCache, error) { } if !exists { - glog.V(2).Infof("Virtual machine %q not found with message: %q", key, message) + klog.V(2).Infof("Virtual machine %q not found with message: %q", key, message) return nil, nil } @@ -227,7 +226,7 @@ func (az *Cloud) newLBCache() (*timedCache, error) { } if !exists { - glog.V(2).Infof("Load balancer %q not found with message: %q", key, message) + klog.V(2).Infof("Load balancer %q not found with message: %q", key, message) return nil, nil } @@ -248,7 +247,7 @@ func (az *Cloud) newNSGCache() (*timedCache, error) { } if !exists { - glog.V(2).Infof("Security group %q not found with message: %q", key, message) + klog.V(2).Infof("Security group %q not found with message: %q", key, message) return nil, nil } @@ -269,7 +268,7 @@ func (az *Cloud) newRouteTableCache() (*timedCache, error) { } if !exists { - glog.V(2).Infof("Route table %q not found with message: %q", key, message) + klog.V(2).Infof("Route table %q not found with message: %q", key, message) return nil, nil } @@ -302,5 +301,5 @@ func (az *Cloud) IsNodeUnmanaged(nodeName string) (bool, error) { // IsNodeUnmanagedByProviderID returns true if the node is not managed by Azure cloud provider. // All managed node's providerIDs are in format 'azure:///subscriptions//resourceGroups//providers/Microsoft.Compute/.*' func (az *Cloud) IsNodeUnmanagedByProviderID(providerID string) bool { - return azureNodeProviderIDRE.Match([]byte(providerID)) + return !azureNodeProviderIDRE.Match([]byte(providerID)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go index 08aab6d07e4f..3c2d84571af8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go @@ -21,21 +21,12 @@ import ( "fmt" "strconv" "strings" - "sync" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" ) -const ( - faultDomainURI = "v1/InstanceInfo/FD" - computeMetadataURI = "instance/compute" -) - -var faultMutex = &sync.Mutex{} -var faultDomain *string - // makeZone returns the zone value in format of -. func (az *Cloud) makeZone(zoneID int) string { return fmt.Sprintf("%s-%d", strings.ToLower(az.Location), zoneID) @@ -58,54 +49,40 @@ func (az *Cloud) GetZoneID(zoneLabel string) string { // GetZone returns the Zone containing the current availability zone and locality region that the program is running in. // If the node is not running with availability zones, then it will fall back to fault domain. func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { - computeInfo := ComputeMetadata{} - err := az.metadata.Object(computeMetadataURI, &computeInfo) + metadata, err := az.metadata.GetMetadata() if err != nil { return cloudprovider.Zone{}, err } - if computeInfo.Zone == "" { - glog.V(3).Infof("Availability zone is not enabled for the node, falling back to fault domain") - return az.getZoneFromFaultDomain() + if metadata.Compute == nil { + return cloudprovider.Zone{}, fmt.Errorf("failure of getting compute information from instance metadata") } - zoneID, err := strconv.Atoi(computeInfo.Zone) - if err != nil { - return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone ID %q: %v", computeInfo.Zone, err) + zone := "" + if metadata.Compute.Zone != "" { + zoneID, err := strconv.Atoi(metadata.Compute.Zone) + if err != nil { + return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone ID %q: %v", metadata.Compute.Zone, err) + } + zone = az.makeZone(zoneID) + } else { + klog.V(3).Infof("Availability zone is not enabled for the node, falling back to fault domain") + zone = metadata.Compute.FaultDomain } return cloudprovider.Zone{ - FailureDomain: az.makeZone(zoneID), + FailureDomain: zone, Region: az.Location, }, nil } -// getZoneFromFaultDomain gets fault domain for the instance. -// Fault domain is the fallback when availability zone is not enabled for the node. -func (az *Cloud) getZoneFromFaultDomain() (cloudprovider.Zone, error) { - faultMutex.Lock() - defer faultMutex.Unlock() - if faultDomain == nil { - var err error - faultDomain, err = az.fetchFaultDomain() - if err != nil { - return cloudprovider.Zone{}, err - } - } - zone := cloudprovider.Zone{ - FailureDomain: *faultDomain, - Region: az.Location, - } - return zone, nil -} - // GetZoneByProviderID implements Zones.GetZoneByProviderID // This is particularly useful in external cloud providers where the kubelet // does not initialize node data. func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) { // Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them. if az.IsNodeUnmanagedByProviderID(providerID) { - glog.V(2).Infof("GetZoneByProviderID: omitting unmanaged node %q", providerID) + klog.V(2).Infof("GetZoneByProviderID: omitting unmanaged node %q", providerID) return cloudprovider.Zone{}, nil } @@ -127,18 +104,9 @@ func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) return cloudprovider.Zone{}, err } if unmanaged { - glog.V(2).Infof("GetZoneByNodeName: omitting unmanaged node %q", nodeName) + klog.V(2).Infof("GetZoneByNodeName: omitting unmanaged node %q", nodeName) return cloudprovider.Zone{}, nil } return az.vmSet.GetZoneByNodeName(string(nodeName)) } - -func (az *Cloud) fetchFaultDomain() (*string, error) { - faultDomain, err := az.metadata.Text(faultDomainURI) - if err != nil { - return nil, err - } - - return &faultDomain, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/BUILD index 3f0b1d1081aa..65cd15f25721 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/BUILD @@ -18,15 +18,14 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack", deps = [ - "//pkg/cloudprovider:go_default_library", - "//pkg/controller:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/github.com/d2g/dhcp4:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/kardianos/osext:go_default_library", "//vendor/github.com/xanzy/go-cloudstack/cloudstack:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ "//vendor/github.com/d2g/dhcp4client:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack.go index 4fdab73e58d2..4769adb647f7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack.go @@ -24,13 +24,12 @@ import ( "os" "path/filepath" - "github.com/golang/glog" "github.com/kardianos/osext" "github.com/xanzy/go-cloudstack/cloudstack" "gopkg.in/gcfg.v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/controller" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" ) // ProviderName is the name of this cloud provider. @@ -99,10 +98,10 @@ func newCSCloud(cfg *CSConfig) (*CSCloud, error) { // In CloudStack your metadata is always served by the DHCP server. dhcpServer, err := findDHCPServer() if err == nil { - glog.V(4).Infof("Found metadata server: %v", dhcpServer) + klog.V(4).Infof("Found metadata server: %v", dhcpServer) cs.metadata = &metadata{dhcpServer: dhcpServer, zone: cs.zone} } else { - glog.Errorf("Error searching metadata server: %v", err) + klog.Errorf("Error searching metadata server: %v", err) } } @@ -112,7 +111,7 @@ func newCSCloud(cfg *CSConfig) (*CSCloud, error) { if cs.client == nil { if cs.metadata != nil { - glog.V(2).Infof("No API URL, key and secret are provided, so only using metadata!") + klog.V(2).Infof("No API URL, key and secret are provided, so only using metadata!") } else { return nil, errors.New("no cloud provider config given") } @@ -122,7 +121,8 @@ func newCSCloud(cfg *CSConfig) (*CSCloud, error) { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (cs *CSCloud) Initialize(clientBuilder controller.ControllerClientBuilder) {} +func (cs *CSCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { +} // LoadBalancer returns an implementation of LoadBalancer for CloudStack. func (cs *CSCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { @@ -208,7 +208,7 @@ func (cs *CSCloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { cs.zone = instance.Zonename } - glog.V(2).Infof("Current zone is %v", cs.zone) + klog.V(2).Infof("Current zone is %v", cs.zone) zone.FailureDomain = cs.zone zone.Region = cs.zone @@ -230,7 +230,7 @@ func (cs *CSCloud) GetZoneByProviderID(ctx context.Context, providerID string) ( return zone, fmt.Errorf("error retrieving zone: %v", err) } - glog.V(2).Infof("Current zone is %v", cs.zone) + klog.V(2).Infof("Current zone is %v", cs.zone) zone.FailureDomain = instance.Zonename zone.Region = instance.Zonename @@ -252,7 +252,7 @@ func (cs *CSCloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeNam return zone, fmt.Errorf("error retrieving zone: %v", err) } - glog.V(2).Infof("Current zone is %v", cs.zone) + klog.V(2).Infof("Current zone is %v", cs.zone) zone.FailureDomain = instance.Zonename zone.Region = instance.Zonename diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_instances.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_instances.go index b987a8eba3e5..55a81bdaa6f7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_instances.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_instances.go @@ -21,11 +21,11 @@ import ( "errors" "fmt" - "github.com/golang/glog" "github.com/xanzy/go-cloudstack/cloudstack" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" ) // NodeAddresses returns the addresses of the specified instance. @@ -78,7 +78,7 @@ func (cs *CSCloud) nodeAddresses(instance *cloudstack.VirtualMachine) ([]v1.Node } else { // Since there is no sane way to determine the external IP if the host isn't // using static NAT, we will just fire a log message and omit the external IP. - glog.V(4).Infof("Could not determine the public IP of host %v (%v)", instance.Name, instance.Id) + klog.V(4).Infof("Could not determine the public IP of host %v (%v)", instance.Name, instance.Id) } return addresses, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_loadbalancer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_loadbalancer.go index 899124025022..64971d02cf23 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_loadbalancer.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/cloudstack_loadbalancer.go @@ -21,9 +21,11 @@ import ( "fmt" "strconv" - "github.com/golang/glog" "github.com/xanzy/go-cloudstack/cloudstack" + "k8s.io/klog" + "k8s.io/api/core/v1" + cloudprovider "k8s.io/cloud-provider" ) type loadBalancer struct { @@ -41,7 +43,7 @@ type loadBalancer struct { // GetLoadBalancer returns whether the specified load balancer exists, and if so, what its status is. func (cs *CSCloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { - glog.V(4).Infof("GetLoadBalancer(%v, %v, %v)", clusterName, service.Namespace, service.Name) + klog.V(4).Infof("GetLoadBalancer(%v, %v, %v)", clusterName, service.Namespace, service.Name) // Get the load balancer details and existing rules. lb, err := cs.getLoadBalancer(service) @@ -54,7 +56,7 @@ func (cs *CSCloud) GetLoadBalancer(ctx context.Context, clusterName string, serv return nil, false, nil } - glog.V(4).Infof("Found a load balancer associated with IP %v", lb.ipAddr) + klog.V(4).Infof("Found a load balancer associated with IP %v", lb.ipAddr) status := &v1.LoadBalancerStatus{} status.Ingress = append(status.Ingress, v1.LoadBalancerIngress{IP: lb.ipAddr}) @@ -64,7 +66,7 @@ func (cs *CSCloud) GetLoadBalancer(ctx context.Context, clusterName string, serv // EnsureLoadBalancer creates a new load balancer, or updates the existing one. Returns the status of the balancer. func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (status *v1.LoadBalancerStatus, err error) { - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v)", clusterName, service.Namespace, service.Name, service.Spec.LoadBalancerIP, service.Spec.Ports, nodes) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v)", clusterName, service.Namespace, service.Name, service.Spec.LoadBalancerIP, service.Spec.Ports, nodes) if len(service.Spec.Ports) == 0 { return nil, fmt.Errorf("requested load balancer with no ports") @@ -102,14 +104,14 @@ func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, s defer func(lb *loadBalancer) { if err != nil { if err := lb.releaseLoadBalancerIP(); err != nil { - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) } } }(lb) } } - glog.V(4).Infof("Load balancer %v is associated with IP %v", lb.name, lb.ipAddr) + klog.V(4).Infof("Load balancer %v is associated with IP %v", lb.name, lb.ipAddr) for _, port := range service.Spec.Ports { // All ports have their own load balancer rule, so add the port to lbName to keep the names unique. @@ -121,14 +123,14 @@ func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, s return nil, err } if exists && !needsUpdate { - glog.V(4).Infof("Load balancer rule %v is up-to-date", lbRuleName) + klog.V(4).Infof("Load balancer rule %v is up-to-date", lbRuleName) // Delete the rule from the map, to prevent it being deleted. delete(lb.rules, lbRuleName) continue } if needsUpdate { - glog.V(4).Infof("Updating load balancer rule: %v", lbRuleName) + klog.V(4).Infof("Updating load balancer rule: %v", lbRuleName) if err := lb.updateLoadBalancerRule(lbRuleName); err != nil { return nil, err } @@ -137,13 +139,13 @@ func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, s continue } - glog.V(4).Infof("Creating load balancer rule: %v", lbRuleName) + klog.V(4).Infof("Creating load balancer rule: %v", lbRuleName) lbRule, err := lb.createLoadBalancerRule(lbRuleName, port) if err != nil { return nil, err } - glog.V(4).Infof("Assigning hosts (%v) to load balancer rule: %v", lb.hostIDs, lbRuleName) + klog.V(4).Infof("Assigning hosts (%v) to load balancer rule: %v", lb.hostIDs, lbRuleName) if err = lb.assignHostsToRule(lbRule, lb.hostIDs); err != nil { return nil, err } @@ -152,7 +154,7 @@ func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, s // Cleanup any rules that are now still in the rules map, as they are no longer needed. for _, lbRule := range lb.rules { - glog.V(4).Infof("Deleting obsolete load balancer rule: %v", lbRule.Name) + klog.V(4).Infof("Deleting obsolete load balancer rule: %v", lbRule.Name) if err := lb.deleteLoadBalancerRule(lbRule); err != nil { return nil, err } @@ -166,7 +168,7 @@ func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, s // UpdateLoadBalancer updates hosts under the specified load balancer. func (cs *CSCloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error { - glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v)", clusterName, service.Namespace, service.Name, nodes) + klog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v)", clusterName, service.Namespace, service.Name, nodes) // Get the load balancer details and existing rules. lb, err := cs.getLoadBalancer(service) @@ -192,14 +194,14 @@ func (cs *CSCloud) UpdateLoadBalancer(ctx context.Context, clusterName string, s assign, remove := symmetricDifference(lb.hostIDs, l.LoadBalancerRuleInstances) if len(assign) > 0 { - glog.V(4).Infof("Assigning new hosts (%v) to load balancer rule: %v", assign, lbRule.Name) + klog.V(4).Infof("Assigning new hosts (%v) to load balancer rule: %v", assign, lbRule.Name) if err := lb.assignHostsToRule(lbRule, assign); err != nil { return err } } if len(remove) > 0 { - glog.V(4).Infof("Removing old hosts (%v) from load balancer rule: %v", assign, lbRule.Name) + klog.V(4).Infof("Removing old hosts (%v) from load balancer rule: %v", assign, lbRule.Name) if err := lb.removeHostsFromRule(lbRule, remove); err != nil { return err } @@ -212,7 +214,7 @@ func (cs *CSCloud) UpdateLoadBalancer(ctx context.Context, clusterName string, s // EnsureLoadBalancerDeleted deletes the specified load balancer if it exists, returning // nil if the load balancer specified either didn't exist or was successfully deleted. func (cs *CSCloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { - glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v)", clusterName, service.Namespace, service.Name) + klog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v)", clusterName, service.Namespace, service.Name) // Get the load balancer details and existing rules. lb, err := cs.getLoadBalancer(service) @@ -221,14 +223,14 @@ func (cs *CSCloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName st } for _, lbRule := range lb.rules { - glog.V(4).Infof("Deleting load balancer rule: %v", lbRule.Name) + klog.V(4).Infof("Deleting load balancer rule: %v", lbRule.Name) if err := lb.deleteLoadBalancerRule(lbRule); err != nil { return err } } if lb.ipAddr != "" && lb.ipAddr != service.Spec.LoadBalancerIP { - glog.V(4).Infof("Releasing load balancer IP: %v", lb.ipAddr) + klog.V(4).Infof("Releasing load balancer IP: %v", lb.ipAddr) if err := lb.releaseLoadBalancerIP(); err != nil { return err } @@ -239,7 +241,7 @@ func (cs *CSCloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName st // GetLoadBalancerName retrieves the name of the LoadBalancer. func (cs *CSCloud) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string { - return cs.GetLoadBalancerName(ctx, clusterName, service) + return cloudprovider.DefaultLoadBalancerName(service) } // getLoadBalancer retrieves the IP address and ID and all the existing rules it can find. @@ -268,14 +270,14 @@ func (cs *CSCloud) getLoadBalancer(service *v1.Service) (*loadBalancer, error) { lb.rules[lbRule.Name] = lbRule if lb.ipAddr != "" && lb.ipAddr != lbRule.Publicip { - glog.Warningf("Load balancer for service %v/%v has rules associated with different IP's: %v, %v", service.Namespace, service.Name, lb.ipAddr, lbRule.Publicip) + klog.Warningf("Load balancer for service %v/%v has rules associated with different IP's: %v, %v", service.Namespace, service.Name, lb.ipAddr, lbRule.Publicip) } lb.ipAddr = lbRule.Publicip lb.ipAddrID = lbRule.Publicipid } - glog.V(4).Infof("Load balancer %v contains %d rule(s)", lb.name, len(lb.rules)) + klog.V(4).Infof("Load balancer %v contains %d rule(s)", lb.name, len(lb.rules)) return lb, nil } @@ -333,7 +335,7 @@ func (lb *loadBalancer) getLoadBalancerIP(loadBalancerIP string) error { // getPublicIPAddressID retrieves the ID of the given IP, and sets the address and it's ID. func (lb *loadBalancer) getPublicIPAddress(loadBalancerIP string) error { - glog.V(4).Infof("Retrieve load balancer IP details: %v", loadBalancerIP) + klog.V(4).Infof("Retrieve load balancer IP details: %v", loadBalancerIP) p := lb.Address.NewListPublicIpAddressesParams() p.SetIpaddress(loadBalancerIP) @@ -360,7 +362,7 @@ func (lb *loadBalancer) getPublicIPAddress(loadBalancerIP string) error { // associatePublicIPAddress associates a new IP and sets the address and it's ID. func (lb *loadBalancer) associatePublicIPAddress() error { - glog.V(4).Infof("Allocate new IP for load balancer: %v", lb.name) + klog.V(4).Infof("Allocate new IP for load balancer: %v", lb.name) // If a network belongs to a VPC, the IP address needs to be associated with // the VPC instead of with the network. network, count, err := lb.Network.GetNetworkByID(lb.networkID, cloudstack.WithProject(lb.projectID)) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata.go index bfde2f48e1bd..f2decccdf862 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata.go @@ -25,10 +25,10 @@ import ( "net/http" "github.com/d2g/dhcp4" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" ) type metadata struct { @@ -143,7 +143,7 @@ func (m *metadata) GetZone(ctx context.Context) (cloudprovider.Zone, error) { m.zone = zoneName } - glog.V(2).Infof("Current zone is %v", zone) + klog.V(2).Infof("Current zone is %v", zone) zone.FailureDomain = m.zone zone.Region = m.zone diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD index 94f70388b2e4..0e5905647074 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD @@ -20,6 +20,7 @@ go_library( "gce_clusterid.go", "gce_clusters.go", "gce_disks.go", + "gce_fake.go", "gce_firewall.go", "gce_forwardingrule.go", "gce_healthchecks.go", @@ -46,16 +47,15 @@ go_library( importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", deps = [ "//pkg/api/v1/service:go_default_library", - "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/gce/cloud:go_default_library", "//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library", "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", + "//pkg/cloudprovider/providers/gce/cloud/mock:go_default_library", "//pkg/controller:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/master/ports:go_default_library", "//pkg/util/net/sets:go_default_library", - "//pkg/util/version:go_default_library", "//pkg/version:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", @@ -67,6 +67,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", @@ -77,8 +78,8 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/cloud.google.com/go/compute/metadata:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/golang.org/x/oauth2/google:go_default_library", @@ -89,6 +90,7 @@ go_library( "//vendor/google.golang.org/api/googleapi:go_default_library", "//vendor/google.golang.org/api/tpu/v1:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -110,7 +112,6 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/api/v1/service:go_default_library", - "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/gce/cloud:go_default_library", "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", "//pkg/cloudprovider/providers/gce/cloud/mock:go_default_library", @@ -120,8 +121,8 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/golang.org/x/oauth2/google:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/OWNERS index 2941ce492b5e..f02eb9550b19 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/OWNERS @@ -3,6 +3,5 @@ approvers: - jingxu97 - bowei - freehan -- nicksardo - mrhohn -- dnardo +- cheftako diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/BUILD index de46184e547a..e3c289daa6fa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/BUILD @@ -19,11 +19,11 @@ go_library( deps = [ "//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library", "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/api/compute/v0.alpha:go_default_library", "//vendor/google.golang.org/api/compute/v0.beta:go_default_library", "//vendor/google.golang.org/api/compute/v1:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/BUILD index a2e0c7941cf7..0932666c50bd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/BUILD @@ -5,7 +5,7 @@ go_library( srcs = ["filter.go"], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) go_test( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/filter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/filter.go index c08005726c87..b65ab6391a7d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/filter.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter/filter.go @@ -34,7 +34,7 @@ import ( "regexp" "strings" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -221,7 +221,7 @@ func (fp *filterPredicate) String() string { func (fp *filterPredicate) match(o interface{}) bool { v, err := extractValue(fp.fieldName, o) - glog.V(6).Infof("extractValue(%q, %#v) = %v, %v", fp.fieldName, o, v, err) + klog.V(6).Infof("extractValue(%q, %#v) = %v, %v", fp.fieldName, o, v, err) if err != nil { return false } @@ -234,7 +234,7 @@ func (fp *filterPredicate) match(o interface{}) bool { } re, err := regexp.Compile(*fp.s) if err != nil { - glog.Errorf("Match regexp %q is invalid: %v", *fp.s, err) + klog.Errorf("Match regexp %q is invalid: %v", *fp.s, err) return false } match = re.Match([]byte(x)) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go index a25cac9909c8..a3e7cd2c657a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go @@ -25,8 +25,8 @@ import ( "net/http" "sync" - "github.com/golang/glog" "google.golang.org/api/googleapi" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" @@ -48,7 +48,7 @@ type Cloud interface { RegionBackendServices() RegionBackendServices AlphaRegionBackendServices() AlphaRegionBackendServices Disks() Disks - BetaRegionDisks() BetaRegionDisks + RegionDisks() RegionDisks Firewalls() Firewalls ForwardingRules() ForwardingRules AlphaForwardingRules() AlphaForwardingRules @@ -89,7 +89,7 @@ func NewGCE(s *Service) *GCE { gceRegionBackendServices: &GCERegionBackendServices{s}, gceAlphaRegionBackendServices: &GCEAlphaRegionBackendServices{s}, gceDisks: &GCEDisks{s}, - gceBetaRegionDisks: &GCEBetaRegionDisks{s}, + gceRegionDisks: &GCERegionDisks{s}, gceFirewalls: &GCEFirewalls{s}, gceForwardingRules: &GCEForwardingRules{s}, gceAlphaForwardingRules: &GCEAlphaForwardingRules{s}, @@ -134,7 +134,7 @@ type GCE struct { gceRegionBackendServices *GCERegionBackendServices gceAlphaRegionBackendServices *GCEAlphaRegionBackendServices gceDisks *GCEDisks - gceBetaRegionDisks *GCEBetaRegionDisks + gceRegionDisks *GCERegionDisks gceFirewalls *GCEFirewalls gceForwardingRules *GCEForwardingRules gceAlphaForwardingRules *GCEAlphaForwardingRules @@ -212,9 +212,9 @@ func (gce *GCE) Disks() Disks { return gce.gceDisks } -// BetaRegionDisks returns the interface for the beta RegionDisks. -func (gce *GCE) BetaRegionDisks() BetaRegionDisks { - return gce.gceBetaRegionDisks +// RegionDisks returns the interface for the ga RegionDisks. +func (gce *GCE) RegionDisks() RegionDisks { + return gce.gceRegionDisks } // Firewalls returns the interface for the ga Firewalls. @@ -381,7 +381,7 @@ func NewMockGCE(projectRouter ProjectRouter) *MockGCE { MockRegionBackendServices: NewMockRegionBackendServices(projectRouter, mockRegionBackendServicesObjs), MockAlphaRegionBackendServices: NewMockAlphaRegionBackendServices(projectRouter, mockRegionBackendServicesObjs), MockDisks: NewMockDisks(projectRouter, mockDisksObjs), - MockBetaRegionDisks: NewMockBetaRegionDisks(projectRouter, mockRegionDisksObjs), + MockRegionDisks: NewMockRegionDisks(projectRouter, mockRegionDisksObjs), MockFirewalls: NewMockFirewalls(projectRouter, mockFirewallsObjs), MockForwardingRules: NewMockForwardingRules(projectRouter, mockForwardingRulesObjs), MockAlphaForwardingRules: NewMockAlphaForwardingRules(projectRouter, mockForwardingRulesObjs), @@ -426,7 +426,7 @@ type MockGCE struct { MockRegionBackendServices *MockRegionBackendServices MockAlphaRegionBackendServices *MockAlphaRegionBackendServices MockDisks *MockDisks - MockBetaRegionDisks *MockBetaRegionDisks + MockRegionDisks *MockRegionDisks MockFirewalls *MockFirewalls MockForwardingRules *MockForwardingRules MockAlphaForwardingRules *MockAlphaForwardingRules @@ -504,9 +504,9 @@ func (mock *MockGCE) Disks() Disks { return mock.MockDisks } -// BetaRegionDisks returns the interface for the beta RegionDisks. -func (mock *MockGCE) BetaRegionDisks() BetaRegionDisks { - return mock.MockBetaRegionDisks +// RegionDisks returns the interface for the ga RegionDisks. +func (mock *MockGCE) RegionDisks() RegionDisks { + return mock.MockRegionDisks } // Firewalls returns the interface for the ga Firewalls. @@ -649,7 +649,7 @@ func (m *MockAddressesObj) ToAlpha() *alpha.Address { // Convert the object via JSON copying to the type that was requested. ret := &alpha.Address{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.Address via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.Address via JSON: %v", m.Obj, err) } return ret } @@ -662,7 +662,7 @@ func (m *MockAddressesObj) ToBeta() *beta.Address { // Convert the object via JSON copying to the type that was requested. ret := &beta.Address{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.Address via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.Address via JSON: %v", m.Obj, err) } return ret } @@ -675,7 +675,7 @@ func (m *MockAddressesObj) ToGA() *ga.Address { // Convert the object via JSON copying to the type that was requested. ret := &ga.Address{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) } return ret } @@ -695,7 +695,7 @@ func (m *MockBackendServicesObj) ToAlpha() *alpha.BackendService { // Convert the object via JSON copying to the type that was requested. ret := &alpha.BackendService{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) } return ret } @@ -708,7 +708,7 @@ func (m *MockBackendServicesObj) ToBeta() *beta.BackendService { // Convert the object via JSON copying to the type that was requested. ret := &beta.BackendService{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.BackendService via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.BackendService via JSON: %v", m.Obj, err) } return ret } @@ -721,7 +721,7 @@ func (m *MockBackendServicesObj) ToGA() *ga.BackendService { // Convert the object via JSON copying to the type that was requested. ret := &ga.BackendService{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.BackendService via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.BackendService via JSON: %v", m.Obj, err) } return ret } @@ -741,7 +741,7 @@ func (m *MockDisksObj) ToGA() *ga.Disk { // Convert the object via JSON copying to the type that was requested. ret := &ga.Disk{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Disk via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Disk via JSON: %v", m.Obj, err) } return ret } @@ -761,7 +761,7 @@ func (m *MockFirewallsObj) ToGA() *ga.Firewall { // Convert the object via JSON copying to the type that was requested. ret := &ga.Firewall{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Firewall via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Firewall via JSON: %v", m.Obj, err) } return ret } @@ -781,7 +781,7 @@ func (m *MockForwardingRulesObj) ToAlpha() *alpha.ForwardingRule { // Convert the object via JSON copying to the type that was requested. ret := &alpha.ForwardingRule{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.ForwardingRule via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.ForwardingRule via JSON: %v", m.Obj, err) } return ret } @@ -794,7 +794,7 @@ func (m *MockForwardingRulesObj) ToGA() *ga.ForwardingRule { // Convert the object via JSON copying to the type that was requested. ret := &ga.ForwardingRule{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) } return ret } @@ -814,7 +814,7 @@ func (m *MockGlobalAddressesObj) ToGA() *ga.Address { // Convert the object via JSON copying to the type that was requested. ret := &ga.Address{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) } return ret } @@ -834,7 +834,7 @@ func (m *MockGlobalForwardingRulesObj) ToGA() *ga.ForwardingRule { // Convert the object via JSON copying to the type that was requested. ret := &ga.ForwardingRule{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) } return ret } @@ -854,7 +854,7 @@ func (m *MockHealthChecksObj) ToAlpha() *alpha.HealthCheck { // Convert the object via JSON copying to the type that was requested. ret := &alpha.HealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.HealthCheck via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.HealthCheck via JSON: %v", m.Obj, err) } return ret } @@ -867,7 +867,7 @@ func (m *MockHealthChecksObj) ToBeta() *beta.HealthCheck { // Convert the object via JSON copying to the type that was requested. ret := &beta.HealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.HealthCheck via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.HealthCheck via JSON: %v", m.Obj, err) } return ret } @@ -880,7 +880,7 @@ func (m *MockHealthChecksObj) ToGA() *ga.HealthCheck { // Convert the object via JSON copying to the type that was requested. ret := &ga.HealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.HealthCheck via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.HealthCheck via JSON: %v", m.Obj, err) } return ret } @@ -900,7 +900,7 @@ func (m *MockHttpHealthChecksObj) ToGA() *ga.HttpHealthCheck { // Convert the object via JSON copying to the type that was requested. ret := &ga.HttpHealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.HttpHealthCheck via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.HttpHealthCheck via JSON: %v", m.Obj, err) } return ret } @@ -920,7 +920,7 @@ func (m *MockHttpsHealthChecksObj) ToGA() *ga.HttpsHealthCheck { // Convert the object via JSON copying to the type that was requested. ret := &ga.HttpsHealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.HttpsHealthCheck via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.HttpsHealthCheck via JSON: %v", m.Obj, err) } return ret } @@ -940,7 +940,7 @@ func (m *MockInstanceGroupsObj) ToGA() *ga.InstanceGroup { // Convert the object via JSON copying to the type that was requested. ret := &ga.InstanceGroup{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.InstanceGroup via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.InstanceGroup via JSON: %v", m.Obj, err) } return ret } @@ -960,7 +960,7 @@ func (m *MockInstancesObj) ToAlpha() *alpha.Instance { // Convert the object via JSON copying to the type that was requested. ret := &alpha.Instance{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.Instance via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.Instance via JSON: %v", m.Obj, err) } return ret } @@ -973,7 +973,7 @@ func (m *MockInstancesObj) ToBeta() *beta.Instance { // Convert the object via JSON copying to the type that was requested. ret := &beta.Instance{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.Instance via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.Instance via JSON: %v", m.Obj, err) } return ret } @@ -986,7 +986,7 @@ func (m *MockInstancesObj) ToGA() *ga.Instance { // Convert the object via JSON copying to the type that was requested. ret := &ga.Instance{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Instance via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Instance via JSON: %v", m.Obj, err) } return ret } @@ -1006,7 +1006,7 @@ func (m *MockNetworkEndpointGroupsObj) ToAlpha() *alpha.NetworkEndpointGroup { // Convert the object via JSON copying to the type that was requested. ret := &alpha.NetworkEndpointGroup{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.NetworkEndpointGroup via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.NetworkEndpointGroup via JSON: %v", m.Obj, err) } return ret } @@ -1019,7 +1019,7 @@ func (m *MockNetworkEndpointGroupsObj) ToBeta() *beta.NetworkEndpointGroup { // Convert the object via JSON copying to the type that was requested. ret := &beta.NetworkEndpointGroup{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.NetworkEndpointGroup via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.NetworkEndpointGroup via JSON: %v", m.Obj, err) } return ret } @@ -1039,7 +1039,7 @@ func (m *MockProjectsObj) ToGA() *ga.Project { // Convert the object via JSON copying to the type that was requested. ret := &ga.Project{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Project via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Project via JSON: %v", m.Obj, err) } return ret } @@ -1059,7 +1059,7 @@ func (m *MockRegionBackendServicesObj) ToAlpha() *alpha.BackendService { // Convert the object via JSON copying to the type that was requested. ret := &alpha.BackendService{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) } return ret } @@ -1072,7 +1072,7 @@ func (m *MockRegionBackendServicesObj) ToGA() *ga.BackendService { // Convert the object via JSON copying to the type that was requested. ret := &ga.BackendService{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.BackendService via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.BackendService via JSON: %v", m.Obj, err) } return ret } @@ -1084,15 +1084,15 @@ type MockRegionDisksObj struct { Obj interface{} } -// ToBeta retrieves the given version of the object. -func (m *MockRegionDisksObj) ToBeta() *beta.Disk { - if ret, ok := m.Obj.(*beta.Disk); ok { +// ToGA retrieves the given version of the object. +func (m *MockRegionDisksObj) ToGA() *ga.Disk { + if ret, ok := m.Obj.(*ga.Disk); ok { return ret } // Convert the object via JSON copying to the type that was requested. - ret := &beta.Disk{} + ret := &ga.Disk{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.Disk via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Disk via JSON: %v", m.Obj, err) } return ret } @@ -1112,7 +1112,7 @@ func (m *MockRegionsObj) ToGA() *ga.Region { // Convert the object via JSON copying to the type that was requested. ret := &ga.Region{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Region via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Region via JSON: %v", m.Obj, err) } return ret } @@ -1132,7 +1132,7 @@ func (m *MockRoutesObj) ToGA() *ga.Route { // Convert the object via JSON copying to the type that was requested. ret := &ga.Route{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Route via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Route via JSON: %v", m.Obj, err) } return ret } @@ -1152,7 +1152,7 @@ func (m *MockSecurityPoliciesObj) ToBeta() *beta.SecurityPolicy { // Convert the object via JSON copying to the type that was requested. ret := &beta.SecurityPolicy{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.SecurityPolicy via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.SecurityPolicy via JSON: %v", m.Obj, err) } return ret } @@ -1172,7 +1172,7 @@ func (m *MockSslCertificatesObj) ToGA() *ga.SslCertificate { // Convert the object via JSON copying to the type that was requested. ret := &ga.SslCertificate{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.SslCertificate via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.SslCertificate via JSON: %v", m.Obj, err) } return ret } @@ -1192,7 +1192,7 @@ func (m *MockTargetHttpProxiesObj) ToGA() *ga.TargetHttpProxy { // Convert the object via JSON copying to the type that was requested. ret := &ga.TargetHttpProxy{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.TargetHttpProxy via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.TargetHttpProxy via JSON: %v", m.Obj, err) } return ret } @@ -1212,7 +1212,7 @@ func (m *MockTargetHttpsProxiesObj) ToGA() *ga.TargetHttpsProxy { // Convert the object via JSON copying to the type that was requested. ret := &ga.TargetHttpsProxy{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.TargetHttpsProxy via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.TargetHttpsProxy via JSON: %v", m.Obj, err) } return ret } @@ -1232,7 +1232,7 @@ func (m *MockTargetPoolsObj) ToGA() *ga.TargetPool { // Convert the object via JSON copying to the type that was requested. ret := &ga.TargetPool{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.TargetPool via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.TargetPool via JSON: %v", m.Obj, err) } return ret } @@ -1252,7 +1252,7 @@ func (m *MockUrlMapsObj) ToGA() *ga.UrlMap { // Convert the object via JSON copying to the type that was requested. ret := &ga.UrlMap{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.UrlMap via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.UrlMap via JSON: %v", m.Obj, err) } return ret } @@ -1272,7 +1272,7 @@ func (m *MockZonesObj) ToGA() *ga.Zone { // Convert the object via JSON copying to the type that was requested. ret := &ga.Zone{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Zone via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Zone via JSON: %v", m.Obj, err) } return ret } @@ -1332,7 +1332,7 @@ type MockAddresses struct { func (m *MockAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -1344,12 +1344,12 @@ func (m *MockAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, er defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -1357,7 +1357,7 @@ func (m *MockAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, er Code: http.StatusNotFound, Message: fmt.Sprintf("MockAddresses %v not found", key), } - glog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -1365,7 +1365,7 @@ func (m *MockAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, er func (m *MockAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Address, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -1375,7 +1375,7 @@ func (m *MockAddresses) List(ctx context.Context, region string, fl *filter.F) ( if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -1391,7 +1391,7 @@ func (m *MockAddresses) List(ctx context.Context, region string, fl *filter.F) ( objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -1399,7 +1399,7 @@ func (m *MockAddresses) List(ctx context.Context, region string, fl *filter.F) ( func (m *MockAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -1411,7 +1411,7 @@ func (m *MockAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addre defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -1419,7 +1419,7 @@ func (m *MockAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addre Code: http.StatusConflict, Message: fmt.Sprintf("MockAddresses %v exists", key), } - glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -1428,7 +1428,7 @@ func (m *MockAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addre obj.SelfLink = SelfLink(meta.VersionGA, projectID, "addresses", key) m.Objects[*key] = &MockAddressesObj{obj} - glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -1436,7 +1436,7 @@ func (m *MockAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addre func (m *MockAddresses) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -1448,7 +1448,7 @@ func (m *MockAddresses) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -1456,12 +1456,12 @@ func (m *MockAddresses) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockAddresses %v not found", key), } - glog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAddresses.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAddresses.Delete(%v, %v) = nil", ctx, key) return nil } @@ -1477,10 +1477,10 @@ type GCEAddresses struct { // Get the Address named by key. func (g *GCEAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { - glog.V(5).Infof("GCEAddresses.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAddresses.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") @@ -1490,21 +1490,21 @@ func (g *GCEAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, err Version: meta.Version("ga"), Service: "Addresses", } - glog.V(5).Infof("GCEAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Addresses.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Address objects. func (g *GCEAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Address, error) { - glog.V(5).Infof("GCEAddresses.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEAddresses.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") rk := &RateLimitKey{ ProjectID: projectID, @@ -1515,30 +1515,30 @@ func (g *GCEAddresses) List(ctx context.Context, region string, fl *filter.F) ([ if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.GA.Addresses.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Address f := func(l *ga.AddressList) error { - glog.V(5).Infof("GCEAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -1546,9 +1546,9 @@ func (g *GCEAddresses) List(ctx context.Context, region string, fl *filter.F) ([ // Insert Address with key of value obj. func (g *GCEAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { - glog.V(5).Infof("GCEAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") @@ -1558,9 +1558,9 @@ func (g *GCEAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addres Version: meta.Version("ga"), Service: "Addresses", } - glog.V(5).Infof("GCEAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -1569,20 +1569,20 @@ func (g *GCEAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addres op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Address referenced by key. func (g *GCEAddresses) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAddresses.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAddresses.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") @@ -1592,9 +1592,9 @@ func (g *GCEAddresses) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "Addresses", } - glog.V(5).Infof("GCEAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Addresses.Delete(projectID, key.Region, key.Name) @@ -1602,12 +1602,12 @@ func (g *GCEAddresses) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -1666,7 +1666,7 @@ type MockAlphaAddresses struct { func (m *MockAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Address, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -1678,12 +1678,12 @@ func (m *MockAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Add defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -1691,7 +1691,7 @@ func (m *MockAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Add Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaAddresses %v not found", key), } - glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -1699,7 +1699,7 @@ func (m *MockAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Add func (m *MockAlphaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Address, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -1709,7 +1709,7 @@ func (m *MockAlphaAddresses) List(ctx context.Context, region string, fl *filter if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -1725,7 +1725,7 @@ func (m *MockAlphaAddresses) List(ctx context.Context, region string, fl *filter objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -1733,7 +1733,7 @@ func (m *MockAlphaAddresses) List(ctx context.Context, region string, fl *filter func (m *MockAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alpha.Address) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -1745,7 +1745,7 @@ func (m *MockAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alp defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -1753,7 +1753,7 @@ func (m *MockAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alp Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaAddresses %v exists", key), } - glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -1762,7 +1762,7 @@ func (m *MockAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alp obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "addresses", key) m.Objects[*key] = &MockAddressesObj{obj} - glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -1770,7 +1770,7 @@ func (m *MockAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alp func (m *MockAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -1782,7 +1782,7 @@ func (m *MockAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -1790,12 +1790,12 @@ func (m *MockAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaAddresses %v not found", key), } - glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = nil", ctx, key) return nil } @@ -1811,10 +1811,10 @@ type GCEAlphaAddresses struct { // Get the Address named by key. func (g *GCEAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Address, error) { - glog.V(5).Infof("GCEAlphaAddresses.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaAddresses.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") @@ -1824,21 +1824,21 @@ func (g *GCEAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Addr Version: meta.Version("alpha"), Service: "Addresses", } - glog.V(5).Infof("GCEAlphaAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.Addresses.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Address objects. func (g *GCEAlphaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Address, error) { - glog.V(5).Infof("GCEAlphaAddresses.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEAlphaAddresses.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") rk := &RateLimitKey{ ProjectID: projectID, @@ -1849,30 +1849,30 @@ func (g *GCEAlphaAddresses) List(ctx context.Context, region string, fl *filter. if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.Alpha.Addresses.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.Address f := func(l *alpha.AddressList) error { - glog.V(5).Infof("GCEAlphaAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -1880,9 +1880,9 @@ func (g *GCEAlphaAddresses) List(ctx context.Context, region string, fl *filter. // Insert Address with key of value obj. func (g *GCEAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alpha.Address) error { - glog.V(5).Infof("GCEAlphaAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") @@ -1892,9 +1892,9 @@ func (g *GCEAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alph Version: meta.Version("alpha"), Service: "Addresses", } - glog.V(5).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -1903,20 +1903,20 @@ func (g *GCEAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alph op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Address referenced by key. func (g *GCEAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaAddresses.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaAddresses.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") @@ -1926,9 +1926,9 @@ func (g *GCEAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("alpha"), Service: "Addresses", } - glog.V(5).Infof("GCEAlphaAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.Addresses.Delete(projectID, key.Region, key.Name) @@ -1936,12 +1936,12 @@ func (g *GCEAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -2000,7 +2000,7 @@ type MockBetaAddresses struct { func (m *MockBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Address, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -2012,12 +2012,12 @@ func (m *MockBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Addre defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -2025,7 +2025,7 @@ func (m *MockBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Addre Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaAddresses %v not found", key), } - glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -2033,7 +2033,7 @@ func (m *MockBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Addre func (m *MockBetaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Address, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -2043,7 +2043,7 @@ func (m *MockBetaAddresses) List(ctx context.Context, region string, fl *filter. if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -2059,7 +2059,7 @@ func (m *MockBetaAddresses) List(ctx context.Context, region string, fl *filter. objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -2067,7 +2067,7 @@ func (m *MockBetaAddresses) List(ctx context.Context, region string, fl *filter. func (m *MockBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta.Address) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -2079,7 +2079,7 @@ func (m *MockBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -2087,7 +2087,7 @@ func (m *MockBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaAddresses %v exists", key), } - glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -2096,7 +2096,7 @@ func (m *MockBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "addresses", key) m.Objects[*key] = &MockAddressesObj{obj} - glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -2104,7 +2104,7 @@ func (m *MockBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta func (m *MockBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -2116,7 +2116,7 @@ func (m *MockBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -2124,12 +2124,12 @@ func (m *MockBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaAddresses %v not found", key), } - glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = nil", ctx, key) return nil } @@ -2145,10 +2145,10 @@ type GCEBetaAddresses struct { // Get the Address named by key. func (g *GCEBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Address, error) { - glog.V(5).Infof("GCEBetaAddresses.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaAddresses.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") @@ -2158,21 +2158,21 @@ func (g *GCEBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Addres Version: meta.Version("beta"), Service: "Addresses", } - glog.V(5).Infof("GCEBetaAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.Addresses.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Address objects. func (g *GCEBetaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Address, error) { - glog.V(5).Infof("GCEBetaAddresses.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEBetaAddresses.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") rk := &RateLimitKey{ ProjectID: projectID, @@ -2183,30 +2183,30 @@ func (g *GCEBetaAddresses) List(ctx context.Context, region string, fl *filter.F if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEBetaAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.Beta.Addresses.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.Address f := func(l *beta.AddressList) error { - glog.V(5).Infof("GCEBetaAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -2214,9 +2214,9 @@ func (g *GCEBetaAddresses) List(ctx context.Context, region string, fl *filter.F // Insert Address with key of value obj. func (g *GCEBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta.Address) error { - glog.V(5).Infof("GCEBetaAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") @@ -2226,9 +2226,9 @@ func (g *GCEBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta. Version: meta.Version("beta"), Service: "Addresses", } - glog.V(5).Infof("GCEBetaAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -2237,20 +2237,20 @@ func (g *GCEBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Address referenced by key. func (g *GCEBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaAddresses.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaAddresses.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") @@ -2260,9 +2260,9 @@ func (g *GCEBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("beta"), Service: "Addresses", } - glog.V(5).Infof("GCEBetaAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.Addresses.Delete(projectID, key.Region, key.Name) @@ -2270,12 +2270,12 @@ func (g *GCEBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -2334,7 +2334,7 @@ type MockGlobalAddresses struct { func (m *MockGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -2346,12 +2346,12 @@ func (m *MockGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Addre defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -2359,7 +2359,7 @@ func (m *MockGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Addre Code: http.StatusNotFound, Message: fmt.Sprintf("MockGlobalAddresses %v not found", key), } - glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -2367,7 +2367,7 @@ func (m *MockGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Addre func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -2377,7 +2377,7 @@ func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Add if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -2390,7 +2390,7 @@ func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Add objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -2398,7 +2398,7 @@ func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Add func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -2410,7 +2410,7 @@ func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -2418,7 +2418,7 @@ func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga Code: http.StatusConflict, Message: fmt.Sprintf("MockGlobalAddresses %v exists", key), } - glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -2427,7 +2427,7 @@ func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga obj.SelfLink = SelfLink(meta.VersionGA, projectID, "addresses", key) m.Objects[*key] = &MockGlobalAddressesObj{obj} - glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -2435,7 +2435,7 @@ func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga func (m *MockGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -2447,7 +2447,7 @@ func (m *MockGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -2455,12 +2455,12 @@ func (m *MockGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockGlobalAddresses %v not found", key), } - glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = nil", ctx, key) return nil } @@ -2476,10 +2476,10 @@ type GCEGlobalAddresses struct { // Get the Address named by key. func (g *GCEGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { - glog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEGlobalAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") @@ -2489,21 +2489,21 @@ func (g *GCEGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Addres Version: meta.Version("ga"), Service: "GlobalAddresses", } - glog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.GlobalAddresses.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Address objects. func (g *GCEGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) { - glog.V(5).Infof("GCEGlobalAddresses.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEGlobalAddresses.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") rk := &RateLimitKey{ ProjectID: projectID, @@ -2514,30 +2514,30 @@ func (g *GCEGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Addr if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEGlobalAddresses.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEGlobalAddresses.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.GlobalAddresses.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Address f := func(l *ga.AddressList) error { - glog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -2545,9 +2545,9 @@ func (g *GCEGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Addr // Insert Address with key of value obj. func (g *GCEGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { - glog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") @@ -2557,9 +2557,9 @@ func (g *GCEGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga. Version: meta.Version("ga"), Service: "GlobalAddresses", } - glog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -2568,20 +2568,20 @@ func (g *GCEGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Address referenced by key. func (g *GCEGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEGlobalAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") @@ -2591,9 +2591,9 @@ func (g *GCEGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "GlobalAddresses", } - glog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.GlobalAddresses.Delete(projectID, key.Name) @@ -2602,12 +2602,12 @@ func (g *GCEGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -2672,7 +2672,7 @@ type MockBackendServices struct { func (m *MockBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -2684,12 +2684,12 @@ func (m *MockBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.Backe defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -2697,7 +2697,7 @@ func (m *MockBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.Backe Code: http.StatusNotFound, Message: fmt.Sprintf("MockBackendServices %v not found", key), } - glog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -2705,7 +2705,7 @@ func (m *MockBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.Backe func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -2715,7 +2715,7 @@ func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Bac if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -2728,7 +2728,7 @@ func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Bac objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -2736,7 +2736,7 @@ func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Bac func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -2748,7 +2748,7 @@ func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -2756,7 +2756,7 @@ func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga Code: http.StatusConflict, Message: fmt.Sprintf("MockBackendServices %v exists", key), } - glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -2765,7 +2765,7 @@ func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga obj.SelfLink = SelfLink(meta.VersionGA, projectID, "backendServices", key) m.Objects[*key] = &MockBackendServicesObj{obj} - glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -2773,7 +2773,7 @@ func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga func (m *MockBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -2785,7 +2785,7 @@ func (m *MockBackendServices) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -2793,12 +2793,12 @@ func (m *MockBackendServices) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockBackendServices %v not found", key), } - glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } @@ -2838,10 +2838,10 @@ type GCEBackendServices struct { // Get the BackendService named by key. func (g *GCEBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { - glog.V(5).Infof("GCEBackendServices.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -2851,21 +2851,21 @@ func (g *GCEBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.Backen Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.BackendServices.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. func (g *GCEBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) { - glog.V(5).Infof("GCEBackendServices.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEBackendServices.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, @@ -2876,30 +2876,30 @@ func (g *GCEBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Back if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.BackendServices.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.BackendService f := func(l *ga.BackendServiceList) error { - glog.V(5).Infof("GCEBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -2907,9 +2907,9 @@ func (g *GCEBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Back // Insert BackendService with key of value obj. func (g *GCEBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { - glog.V(5).Infof("GCEBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -2919,9 +2919,9 @@ func (g *GCEBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga. Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -2930,20 +2930,20 @@ func (g *GCEBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. func (g *GCEBackendServices) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBackendServices.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -2953,9 +2953,9 @@ func (g *GCEBackendServices) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.BackendServices.Delete(projectID, key.Name) @@ -2964,21 +2964,21 @@ func (g *GCEBackendServices) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } // GetHealth is a method on GCEBackendServices. func (g *GCEBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { - glog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -2988,25 +2988,25 @@ func (g *GCEBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.BackendServices.GetHealth(projectID, key.Name, arg0) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) return v, err } // Patch is a method on GCEBackendServices. func (g *GCEBackendServices) Patch(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { - glog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -3016,30 +3016,30 @@ func (g *GCEBackendServices) Patch(ctx context.Context, key *meta.Key, arg0 *ga. Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.BackendServices.Patch(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) return err } // Update is a method on GCEBackendServices. func (g *GCEBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { - glog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -3049,21 +3049,21 @@ func (g *GCEBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.BackendServices.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -3126,7 +3126,7 @@ type MockBetaBackendServices struct { func (m *MockBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -3138,12 +3138,12 @@ func (m *MockBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -3151,7 +3151,7 @@ func (m *MockBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaBackendServices %v not found", key), } - glog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -3159,7 +3159,7 @@ func (m *MockBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta func (m *MockBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*beta.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -3169,7 +3169,7 @@ func (m *MockBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*be if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -3182,7 +3182,7 @@ func (m *MockBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*be objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -3190,7 +3190,7 @@ func (m *MockBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*be func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *beta.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -3202,7 +3202,7 @@ func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -3210,7 +3210,7 @@ func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaBackendServices %v exists", key), } - glog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -3219,7 +3219,7 @@ func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "backendServices", key) m.Objects[*key] = &MockBackendServicesObj{obj} - glog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -3227,7 +3227,7 @@ func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj func (m *MockBetaBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -3239,7 +3239,7 @@ func (m *MockBetaBackendServices) Delete(ctx context.Context, key *meta.Key) err defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -3247,12 +3247,12 @@ func (m *MockBetaBackendServices) Delete(ctx context.Context, key *meta.Key) err Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaBackendServices %v not found", key), } - glog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } @@ -3284,10 +3284,10 @@ type GCEBetaBackendServices struct { // Get the BackendService named by key. func (g *GCEBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta.BackendService, error) { - glog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") @@ -3297,21 +3297,21 @@ func (g *GCEBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta. Version: meta.Version("beta"), Service: "BackendServices", } - glog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.BackendServices.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. func (g *GCEBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*beta.BackendService, error) { - glog.V(5).Infof("GCEBetaBackendServices.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEBetaBackendServices.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, @@ -3322,30 +3322,30 @@ func (g *GCEBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*bet if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.Beta.BackendServices.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.BackendService f := func(l *beta.BackendServiceList) error { - glog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -3353,9 +3353,9 @@ func (g *GCEBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*bet // Insert BackendService with key of value obj. func (g *GCEBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *beta.BackendService) error { - glog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") @@ -3365,9 +3365,9 @@ func (g *GCEBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj Version: meta.Version("beta"), Service: "BackendServices", } - glog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -3376,20 +3376,20 @@ func (g *GCEBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. func (g *GCEBetaBackendServices) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") @@ -3399,9 +3399,9 @@ func (g *GCEBetaBackendServices) Delete(ctx context.Context, key *meta.Key) erro Version: meta.Version("beta"), Service: "BackendServices", } - glog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.BackendServices.Delete(projectID, key.Name) @@ -3410,21 +3410,21 @@ func (g *GCEBetaBackendServices) Delete(ctx context.Context, key *meta.Key) erro op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } // SetSecurityPolicy is a method on GCEBetaBackendServices. func (g *GCEBetaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyReference) error { - glog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") @@ -3434,30 +3434,30 @@ func (g *GCEBetaBackendServices) SetSecurityPolicy(ctx context.Context, key *met Version: meta.Version("beta"), Service: "BackendServices", } - glog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.BackendServices.SetSecurityPolicy(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) return err } // Update is a method on GCEBetaBackendServices. func (g *GCEBetaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *beta.BackendService) error { - glog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") @@ -3467,21 +3467,21 @@ func (g *GCEBetaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("beta"), Service: "BackendServices", } - glog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.BackendServices.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -3544,7 +3544,7 @@ type MockAlphaBackendServices struct { func (m *MockAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -3556,12 +3556,12 @@ func (m *MockAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alp defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -3569,7 +3569,7 @@ func (m *MockAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alp Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaBackendServices %v not found", key), } - glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -3577,7 +3577,7 @@ func (m *MockAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alp func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -3587,7 +3587,7 @@ func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*a if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -3600,7 +3600,7 @@ func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*a objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -3608,7 +3608,7 @@ func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*a func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -3620,7 +3620,7 @@ func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, ob defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -3628,7 +3628,7 @@ func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, ob Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaBackendServices %v exists", key), } - glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -3637,7 +3637,7 @@ func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, ob obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "backendServices", key) m.Objects[*key] = &MockBackendServicesObj{obj} - glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -3645,7 +3645,7 @@ func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, ob func (m *MockAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -3657,7 +3657,7 @@ func (m *MockAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) er defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -3665,12 +3665,12 @@ func (m *MockAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) er Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaBackendServices %v not found", key), } - glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } @@ -3702,10 +3702,10 @@ type GCEAlphaBackendServices struct { // Get the BackendService named by key. func (g *GCEAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { - glog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") @@ -3715,21 +3715,21 @@ func (g *GCEAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alph Version: meta.Version("alpha"), Service: "BackendServices", } - glog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.BackendServices.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. func (g *GCEAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) { - glog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, @@ -3740,30 +3740,30 @@ func (g *GCEAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*al if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.Alpha.BackendServices.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.BackendService f := func(l *alpha.BackendServiceList) error { - glog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -3771,9 +3771,9 @@ func (g *GCEAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*al // Insert BackendService with key of value obj. func (g *GCEAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { - glog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") @@ -3783,9 +3783,9 @@ func (g *GCEAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj Version: meta.Version("alpha"), Service: "BackendServices", } - glog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -3794,20 +3794,20 @@ func (g *GCEAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. func (g *GCEAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") @@ -3817,9 +3817,9 @@ func (g *GCEAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) err Version: meta.Version("alpha"), Service: "BackendServices", } - glog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.BackendServices.Delete(projectID, key.Name) @@ -3828,21 +3828,21 @@ func (g *GCEAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) err op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } // SetSecurityPolicy is a method on GCEAlphaBackendServices. func (g *GCEAlphaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *alpha.SecurityPolicyReference) error { - glog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") @@ -3852,30 +3852,30 @@ func (g *GCEAlphaBackendServices) SetSecurityPolicy(ctx context.Context, key *me Version: meta.Version("alpha"), Service: "BackendServices", } - glog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.BackendServices.SetSecurityPolicy(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) return err } // Update is a method on GCEAlphaBackendServices. func (g *GCEAlphaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { - glog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") @@ -3885,21 +3885,21 @@ func (g *GCEAlphaBackendServices) Update(ctx context.Context, key *meta.Key, arg Version: meta.Version("alpha"), Service: "BackendServices", } - glog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.BackendServices.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -3962,7 +3962,7 @@ type MockRegionBackendServices struct { func (m *MockRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -3974,12 +3974,12 @@ func (m *MockRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -3987,7 +3987,7 @@ func (m *MockRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga Code: http.StatusNotFound, Message: fmt.Sprintf("MockRegionBackendServices %v not found", key), } - glog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -3995,7 +3995,7 @@ func (m *MockRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga func (m *MockRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*ga.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -4005,7 +4005,7 @@ func (m *MockRegionBackendServices) List(ctx context.Context, region string, fl if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -4021,7 +4021,7 @@ func (m *MockRegionBackendServices) List(ctx context.Context, region string, fl objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -4029,7 +4029,7 @@ func (m *MockRegionBackendServices) List(ctx context.Context, region string, fl func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -4041,7 +4041,7 @@ func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, o defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -4049,7 +4049,7 @@ func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, o Code: http.StatusConflict, Message: fmt.Sprintf("MockRegionBackendServices %v exists", key), } - glog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -4058,7 +4058,7 @@ func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, o obj.SelfLink = SelfLink(meta.VersionGA, projectID, "backendServices", key) m.Objects[*key] = &MockRegionBackendServicesObj{obj} - glog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -4066,7 +4066,7 @@ func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, o func (m *MockRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -4078,7 +4078,7 @@ func (m *MockRegionBackendServices) Delete(ctx context.Context, key *meta.Key) e defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -4086,12 +4086,12 @@ func (m *MockRegionBackendServices) Delete(ctx context.Context, key *meta.Key) e Code: http.StatusNotFound, Message: fmt.Sprintf("MockRegionBackendServices %v not found", key), } - glog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } @@ -4123,10 +4123,10 @@ type GCERegionBackendServices struct { // Get the BackendService named by key. func (g *GCERegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { - glog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") @@ -4136,21 +4136,21 @@ func (g *GCERegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga. Version: meta.Version("ga"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.RegionBackendServices.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCERegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. func (g *GCERegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*ga.BackendService, error) { - glog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, @@ -4161,30 +4161,30 @@ func (g *GCERegionBackendServices) List(ctx context.Context, region string, fl * if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.GA.RegionBackendServices.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.BackendService f := func(l *ga.BackendServiceList) error { - glog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -4192,9 +4192,9 @@ func (g *GCERegionBackendServices) List(ctx context.Context, region string, fl * // Insert BackendService with key of value obj. func (g *GCERegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { - glog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCERegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") @@ -4204,9 +4204,9 @@ func (g *GCERegionBackendServices) Insert(ctx context.Context, key *meta.Key, ob Version: meta.Version("ga"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -4215,20 +4215,20 @@ func (g *GCERegionBackendServices) Insert(ctx context.Context, key *meta.Key, ob op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. func (g *GCERegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") @@ -4238,9 +4238,9 @@ func (g *GCERegionBackendServices) Delete(ctx context.Context, key *meta.Key) er Version: meta.Version("ga"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.RegionBackendServices.Delete(projectID, key.Region, key.Name) @@ -4248,21 +4248,21 @@ func (g *GCERegionBackendServices) Delete(ctx context.Context, key *meta.Key) er op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } // GetHealth is a method on GCERegionBackendServices. func (g *GCERegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { - glog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") @@ -4272,25 +4272,25 @@ func (g *GCERegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, Version: meta.Version("ga"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.RegionBackendServices.GetHealth(projectID, key.Region, key.Name, arg0) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) return v, err } // Update is a method on GCERegionBackendServices. func (g *GCERegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { - glog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") @@ -4300,21 +4300,21 @@ func (g *GCERegionBackendServices) Update(ctx context.Context, key *meta.Key, ar Version: meta.Version("ga"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.RegionBackendServices.Update(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -4377,7 +4377,7 @@ type MockAlphaRegionBackendServices struct { func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -4389,12 +4389,12 @@ func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -4402,7 +4402,7 @@ func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaRegionBackendServices %v not found", key), } - glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -4410,7 +4410,7 @@ func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -4420,7 +4420,7 @@ func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -4436,7 +4436,7 @@ func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -4444,7 +4444,7 @@ func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -4456,7 +4456,7 @@ func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -4464,7 +4464,7 @@ func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.K Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaRegionBackendServices %v exists", key), } - glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -4473,7 +4473,7 @@ func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.K obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "backendServices", key) m.Objects[*key] = &MockRegionBackendServicesObj{obj} - glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -4481,7 +4481,7 @@ func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.K func (m *MockAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -4493,7 +4493,7 @@ func (m *MockAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -4501,12 +4501,12 @@ func (m *MockAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.K Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaRegionBackendServices %v not found", key), } - glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } @@ -4538,10 +4538,10 @@ type GCEAlphaRegionBackendServices struct { // Get the BackendService named by key. func (g *GCEAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { - glog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") @@ -4551,21 +4551,21 @@ func (g *GCEAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) Version: meta.Version("alpha"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.RegionBackendServices.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. func (g *GCEAlphaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) { - glog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, @@ -4576,30 +4576,30 @@ func (g *GCEAlphaRegionBackendServices) List(ctx context.Context, region string, if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.Alpha.RegionBackendServices.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.BackendService f := func(l *alpha.BackendServiceList) error { - glog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -4607,9 +4607,9 @@ func (g *GCEAlphaRegionBackendServices) List(ctx context.Context, region string, // Insert BackendService with key of value obj. func (g *GCEAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { - glog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") @@ -4619,9 +4619,9 @@ func (g *GCEAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Ke Version: meta.Version("alpha"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -4630,20 +4630,20 @@ func (g *GCEAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Ke op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. func (g *GCEAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") @@ -4653,9 +4653,9 @@ func (g *GCEAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Ke Version: meta.Version("alpha"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.RegionBackendServices.Delete(projectID, key.Region, key.Name) @@ -4663,21 +4663,21 @@ func (g *GCEAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Ke op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } // GetHealth is a method on GCEAlphaRegionBackendServices. func (g *GCEAlphaRegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) { - glog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") @@ -4687,25 +4687,25 @@ func (g *GCEAlphaRegionBackendServices) GetHealth(ctx context.Context, key *meta Version: meta.Version("alpha"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.RegionBackendServices.GetHealth(projectID, key.Region, key.Name, arg0) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) return v, err } // Update is a method on GCEAlphaRegionBackendServices. func (g *GCEAlphaRegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { - glog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") @@ -4715,21 +4715,21 @@ func (g *GCEAlphaRegionBackendServices) Update(ctx context.Context, key *meta.Ke Version: meta.Version("alpha"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.RegionBackendServices.Update(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -4790,7 +4790,7 @@ type MockDisks struct { func (m *MockDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -4802,12 +4802,12 @@ func (m *MockDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -4815,7 +4815,7 @@ func (m *MockDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { Code: http.StatusNotFound, Message: fmt.Sprintf("MockDisks %v not found", key), } - glog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -4823,7 +4823,7 @@ func (m *MockDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -4833,7 +4833,7 @@ func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga. if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockDisks.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockDisks.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -4849,7 +4849,7 @@ func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga. objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -4857,7 +4857,7 @@ func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga. func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -4869,7 +4869,7 @@ func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) err defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -4877,7 +4877,7 @@ func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) err Code: http.StatusConflict, Message: fmt.Sprintf("MockDisks %v exists", key), } - glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -4886,7 +4886,7 @@ func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) err obj.SelfLink = SelfLink(meta.VersionGA, projectID, "disks", key) m.Objects[*key] = &MockDisksObj{obj} - glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -4894,7 +4894,7 @@ func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) err func (m *MockDisks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -4906,7 +4906,7 @@ func (m *MockDisks) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -4914,12 +4914,12 @@ func (m *MockDisks) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockDisks %v not found", key), } - glog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockDisks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -4943,10 +4943,10 @@ type GCEDisks struct { // Get the Disk named by key. func (g *GCEDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { - glog.V(5).Infof("GCEDisks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEDisks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") @@ -4956,21 +4956,21 @@ func (g *GCEDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { Version: meta.Version("ga"), Service: "Disks", } - glog.V(5).Infof("GCEDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Disks.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Disk objects. func (g *GCEDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) { - glog.V(5).Infof("GCEDisks.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEDisks.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") rk := &RateLimitKey{ ProjectID: projectID, @@ -4981,30 +4981,30 @@ func (g *GCEDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.D if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.GA.Disks.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Disk f := func(l *ga.DiskList) error { - glog.V(5).Infof("GCEDisks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEDisks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -5012,9 +5012,9 @@ func (g *GCEDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.D // Insert Disk with key of value obj. func (g *GCEDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { - glog.V(5).Infof("GCEDisks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEDisks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") @@ -5024,9 +5024,9 @@ func (g *GCEDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) erro Version: meta.Version("ga"), Service: "Disks", } - glog.V(5).Infof("GCEDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -5035,20 +5035,20 @@ func (g *GCEDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) erro op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Disk referenced by key. func (g *GCEDisks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEDisks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEDisks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") @@ -5058,9 +5058,9 @@ func (g *GCEDisks) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "Disks", } - glog.V(5).Infof("GCEDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Disks.Delete(projectID, key.Zone, key.Name) @@ -5068,21 +5068,21 @@ func (g *GCEDisks) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) return err } // Resize is a method on GCEDisks. func (g *GCEDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.DisksResizeRequest) error { - glog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") @@ -5092,36 +5092,36 @@ func (g *GCEDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.DisksResi Version: meta.Version("ga"), Service: "Disks", } - glog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Disks.Resize(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) return err } -// BetaRegionDisks is an interface that allows for mocking of RegionDisks. -type BetaRegionDisks interface { - Get(ctx context.Context, key *meta.Key) (*beta.Disk, error) - List(ctx context.Context, region string, fl *filter.F) ([]*beta.Disk, error) - Insert(ctx context.Context, key *meta.Key, obj *beta.Disk) error +// RegionDisks is an interface that allows for mocking of RegionDisks. +type RegionDisks interface { + Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) + List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error Delete(ctx context.Context, key *meta.Key) error - Resize(context.Context, *meta.Key, *beta.RegionDisksResizeRequest) error + Resize(context.Context, *meta.Key, *ga.RegionDisksResizeRequest) error } -// NewMockBetaRegionDisks returns a new mock for RegionDisks. -func NewMockBetaRegionDisks(pr ProjectRouter, objs map[meta.Key]*MockRegionDisksObj) *MockBetaRegionDisks { - mock := &MockBetaRegionDisks{ +// NewMockRegionDisks returns a new mock for RegionDisks. +func NewMockRegionDisks(pr ProjectRouter, objs map[meta.Key]*MockRegionDisksObj) *MockRegionDisks { + mock := &MockRegionDisks{ ProjectRouter: pr, Objects: objs, @@ -5132,8 +5132,8 @@ func NewMockBetaRegionDisks(pr ProjectRouter, objs map[meta.Key]*MockRegionDisks return mock } -// MockBetaRegionDisks is the mock for RegionDisks. -type MockBetaRegionDisks struct { +// MockRegionDisks is the mock for RegionDisks. +type MockRegionDisks struct { Lock sync.Mutex ProjectRouter ProjectRouter @@ -5152,11 +5152,11 @@ type MockBetaRegionDisks struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionDisks) (bool, *beta.Disk, error) - ListHook func(ctx context.Context, region string, fl *filter.F, m *MockBetaRegionDisks) (bool, []*beta.Disk, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *beta.Disk, m *MockBetaRegionDisks) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionDisks) (bool, error) - ResizeHook func(context.Context, *meta.Key, *beta.RegionDisksResizeRequest, *MockBetaRegionDisks) error + GetHook func(ctx context.Context, key *meta.Key, m *MockRegionDisks) (bool, *ga.Disk, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockRegionDisks) (bool, []*ga.Disk, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Disk, m *MockRegionDisks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockRegionDisks) (bool, error) + ResizeHook func(context.Context, *meta.Key, *ga.RegionDisksResizeRequest, *MockRegionDisks) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -5164,10 +5164,10 @@ type MockBetaRegionDisks struct { } // Get returns the object from the mock. -func (m *MockBetaRegionDisks) Get(ctx context.Context, key *meta.Key) (*beta.Disk, error) { +func (m *MockRegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -5179,28 +5179,28 @@ func (m *MockBetaRegionDisks) Get(ctx context.Context, key *meta.Key) (*beta.Dis defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToGA() + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBetaRegionDisks %v not found", key), + Message: fmt.Sprintf("MockRegionDisks %v not found", key), } - glog.V(5).Infof("MockBetaRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock in the given region. -func (m *MockBetaRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Disk, error) { +func (m *MockRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockBetaRegionDisks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -5210,31 +5210,31 @@ func (m *MockBetaRegionDisks) List(ctx context.Context, region string, fl *filte if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaRegionDisks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } - var objs []*beta.Disk + var objs []*ga.Disk for key, obj := range m.Objects { if key.Region != region { continue } - if !fl.Match(obj.ToBeta()) { + if !fl.Match(obj.ToGA()) { continue } - objs = append(objs, obj.ToBeta()) + objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockBetaRegionDisks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockBetaRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *beta.Disk) error { +func (m *MockRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -5246,32 +5246,32 @@ func (m *MockBetaRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *be defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockBetaRegionDisks %v exists", key), + Message: fmt.Sprintf("MockRegionDisks %v exists", key), } - glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "beta", "disks") - obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "disks", key) + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "disks") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "disks", key) m.Objects[*key] = &MockRegionDisksObj{obj} - glog.V(5).Infof("MockBetaRegionDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockBetaRegionDisks) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockRegionDisks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -5283,207 +5283,207 @@ func (m *MockBetaRegionDisks) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBetaRegionDisks %v not found", key), + Message: fmt.Sprintf("MockRegionDisks %v not found", key), } - glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaRegionDisks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockBetaRegionDisks) Obj(o *beta.Disk) *MockRegionDisksObj { +func (m *MockRegionDisks) Obj(o *ga.Disk) *MockRegionDisksObj { return &MockRegionDisksObj{o} } // Resize is a mock for the corresponding method. -func (m *MockBetaRegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *beta.RegionDisksResizeRequest) error { +func (m *MockRegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.RegionDisksResizeRequest) error { if m.ResizeHook != nil { return m.ResizeHook(ctx, key, arg0, m) } return nil } -// GCEBetaRegionDisks is a simplifying adapter for the GCE RegionDisks. -type GCEBetaRegionDisks struct { +// GCERegionDisks is a simplifying adapter for the GCE RegionDisks. +type GCERegionDisks struct { s *Service } // Get the Disk named by key. -func (g *GCEBetaRegionDisks) Get(ctx context.Context, key *meta.Key) (*beta.Disk, error) { - glog.V(5).Infof("GCEBetaRegionDisks.Get(%v, %v): called", ctx, key) +func (g *GCERegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { + klog.V(5).Infof("GCERegionDisks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaRegionDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("beta"), + Version: meta.Version("ga"), Service: "RegionDisks", } - glog.V(5).Infof("GCEBetaRegionDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.Beta.RegionDisks.Get(projectID, key.Region, key.Name) + call := g.s.GA.RegionDisks.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaRegionDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERegionDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Disk objects. -func (g *GCEBetaRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Disk, error) { - glog.V(5).Infof("GCEBetaRegionDisks.List(%v, %v, %v) called", ctx, region, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks") +func (g *GCERegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) { + klog.V(5).Infof("GCERegionDisks.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("beta"), + Version: meta.Version("ga"), Service: "RegionDisks", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaRegionDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) - call := g.s.Beta.RegionDisks.List(projectID, region) + klog.V(5).Infof("GCERegionDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.GA.RegionDisks.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } - var all []*beta.Disk - f := func(l *beta.DiskList) error { - glog.V(5).Infof("GCEBetaRegionDisks.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*ga.Disk + f := func(l *ga.DiskList) error { + klog.V(5).Infof("GCERegionDisks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCERegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaRegionDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCERegionDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaRegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCERegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } // Insert Disk with key of value obj. -func (g *GCEBetaRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *beta.Disk) error { - glog.V(5).Infof("GCEBetaRegionDisks.Insert(%v, %v, %+v): called", ctx, key, obj) +func (g *GCERegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { + klog.V(5).Infof("GCERegionDisks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("beta"), + Version: meta.Version("ga"), Service: "RegionDisks", } - glog.V(5).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.Beta.RegionDisks.Insert(projectID, key.Region, obj) + call := g.s.GA.RegionDisks.Insert(projectID, key.Region, obj) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaRegionDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCERegionDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Disk referenced by key. -func (g *GCEBetaRegionDisks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaRegionDisks.Delete(%v, %v): called", ctx, key) +func (g *GCERegionDisks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCERegionDisks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaRegionDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("beta"), + Version: meta.Version("ga"), Service: "RegionDisks", } - glog.V(5).Infof("GCEBetaRegionDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.RegionDisks.Delete(projectID, key.Region, key.Name) + call := g.s.GA.RegionDisks.Delete(projectID, key.Region, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } -// Resize is a method on GCEBetaRegionDisks. -func (g *GCEBetaRegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *beta.RegionDisksResizeRequest) error { - glog.V(5).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): called", ctx, key) +// Resize is a method on GCERegionDisks. +func (g *GCERegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.RegionDisksResizeRequest) error { + klog.V(5).Infof("GCERegionDisks.Resize(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionDisks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Resize", - Version: meta.Version("beta"), + Version: meta.Version("ga"), Service: "RegionDisks", } - glog.V(5).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.RegionDisks.Resize(projectID, key.Region, key.Name, arg0) + call := g.s.GA.RegionDisks.Resize(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaRegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -5544,7 +5544,7 @@ type MockFirewalls struct { func (m *MockFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -5556,12 +5556,12 @@ func (m *MockFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, e defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -5569,7 +5569,7 @@ func (m *MockFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, e Code: http.StatusNotFound, Message: fmt.Sprintf("MockFirewalls %v not found", key), } - glog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -5577,7 +5577,7 @@ func (m *MockFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, e func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -5587,7 +5587,7 @@ func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockFirewalls.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockFirewalls.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -5600,7 +5600,7 @@ func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -5608,7 +5608,7 @@ func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewall) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -5620,7 +5620,7 @@ func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firew defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -5628,7 +5628,7 @@ func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firew Code: http.StatusConflict, Message: fmt.Sprintf("MockFirewalls %v exists", key), } - glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -5637,7 +5637,7 @@ func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firew obj.SelfLink = SelfLink(meta.VersionGA, projectID, "firewalls", key) m.Objects[*key] = &MockFirewallsObj{obj} - glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -5645,7 +5645,7 @@ func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firew func (m *MockFirewalls) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -5657,7 +5657,7 @@ func (m *MockFirewalls) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -5665,12 +5665,12 @@ func (m *MockFirewalls) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockFirewalls %v not found", key), } - glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = nil", ctx, key) return nil } @@ -5694,10 +5694,10 @@ type GCEFirewalls struct { // Get the Firewall named by key. func (g *GCEFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, error) { - glog.V(5).Infof("GCEFirewalls.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEFirewalls.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEFirewalls.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEFirewalls.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") @@ -5707,21 +5707,21 @@ func (g *GCEFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, er Version: meta.Version("ga"), Service: "Firewalls", } - glog.V(5).Infof("GCEFirewalls.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEFirewalls.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEFirewalls.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Firewalls.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEFirewalls.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEFirewalls.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Firewall objects. func (g *GCEFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) { - glog.V(5).Infof("GCEFirewalls.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEFirewalls.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") rk := &RateLimitKey{ ProjectID: projectID, @@ -5732,30 +5732,30 @@ func (g *GCEFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEFirewalls.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEFirewalls.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.Firewalls.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Firewall f := func(l *ga.FirewallList) error { - glog.V(5).Infof("GCEFirewalls.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEFirewalls.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -5763,9 +5763,9 @@ func (g *GCEFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, // Insert Firewall with key of value obj. func (g *GCEFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewall) error { - glog.V(5).Infof("GCEFirewalls.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEFirewalls.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEFirewalls.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEFirewalls.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") @@ -5775,9 +5775,9 @@ func (g *GCEFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewa Version: meta.Version("ga"), Service: "Firewalls", } - glog.V(5).Infof("GCEFirewalls.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEFirewalls.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -5786,20 +5786,20 @@ func (g *GCEFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewa op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEFirewalls.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEFirewalls.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Firewall referenced by key. func (g *GCEFirewalls) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEFirewalls.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEFirewalls.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEFirewalls.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEFirewalls.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") @@ -5809,9 +5809,9 @@ func (g *GCEFirewalls) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "Firewalls", } - glog.V(5).Infof("GCEFirewalls.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEFirewalls.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEFirewalls.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Firewalls.Delete(projectID, key.Name) @@ -5820,21 +5820,21 @@ func (g *GCEFirewalls) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEFirewalls. func (g *GCEFirewalls) Update(ctx context.Context, key *meta.Key, arg0 *ga.Firewall) error { - glog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEFirewalls.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEFirewalls.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") @@ -5844,21 +5844,21 @@ func (g *GCEFirewalls) Update(ctx context.Context, key *meta.Key, arg0 *ga.Firew Version: meta.Version("ga"), Service: "Firewalls", } - glog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Firewalls.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -5917,7 +5917,7 @@ type MockForwardingRules struct { func (m *MockForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -5929,12 +5929,12 @@ func (m *MockForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.Forwa defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -5942,7 +5942,7 @@ func (m *MockForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.Forwa Code: http.StatusNotFound, Message: fmt.Sprintf("MockForwardingRules %v not found", key), } - glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -5950,7 +5950,7 @@ func (m *MockForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.Forwa func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -5960,7 +5960,7 @@ func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filte if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -5976,7 +5976,7 @@ func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filte objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -5984,7 +5984,7 @@ func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filte func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -5996,7 +5996,7 @@ func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -6004,7 +6004,7 @@ func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga Code: http.StatusConflict, Message: fmt.Sprintf("MockForwardingRules %v exists", key), } - glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -6013,7 +6013,7 @@ func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga obj.SelfLink = SelfLink(meta.VersionGA, projectID, "forwardingRules", key) m.Objects[*key] = &MockForwardingRulesObj{obj} - glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -6021,7 +6021,7 @@ func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga func (m *MockForwardingRules) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -6033,7 +6033,7 @@ func (m *MockForwardingRules) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -6041,12 +6041,12 @@ func (m *MockForwardingRules) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockForwardingRules %v not found", key), } - glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = nil", ctx, key) return nil } @@ -6062,10 +6062,10 @@ type GCEForwardingRules struct { // Get the ForwardingRule named by key. func (g *GCEForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { - glog.V(5).Infof("GCEForwardingRules.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEForwardingRules.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") @@ -6075,21 +6075,21 @@ func (g *GCEForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.Forwar Version: meta.Version("ga"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.ForwardingRules.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all ForwardingRule objects. func (g *GCEForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) { - glog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, @@ -6100,30 +6100,30 @@ func (g *GCEForwardingRules) List(ctx context.Context, region string, fl *filter if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.GA.ForwardingRules.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.ForwardingRule f := func(l *ga.ForwardingRuleList) error { - glog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -6131,9 +6131,9 @@ func (g *GCEForwardingRules) List(ctx context.Context, region string, fl *filter // Insert ForwardingRule with key of value obj. func (g *GCEForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { - glog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") @@ -6143,9 +6143,9 @@ func (g *GCEForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga. Version: meta.Version("ga"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -6154,20 +6154,20 @@ func (g *GCEForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the ForwardingRule referenced by key. func (g *GCEForwardingRules) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") @@ -6177,9 +6177,9 @@ func (g *GCEForwardingRules) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.ForwardingRules.Delete(projectID, key.Region, key.Name) @@ -6187,12 +6187,12 @@ func (g *GCEForwardingRules) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -6251,7 +6251,7 @@ type MockAlphaForwardingRules struct { func (m *MockAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alpha.ForwardingRule, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -6263,12 +6263,12 @@ func (m *MockAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alp defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -6276,7 +6276,7 @@ func (m *MockAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alp Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaForwardingRules %v not found", key), } - glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -6284,7 +6284,7 @@ func (m *MockAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alp func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -6294,7 +6294,7 @@ func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl * if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -6310,7 +6310,7 @@ func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl * objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -6318,7 +6318,7 @@ func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl * func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -6330,7 +6330,7 @@ func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, ob defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -6338,7 +6338,7 @@ func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, ob Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaForwardingRules %v exists", key), } - glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -6347,7 +6347,7 @@ func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, ob obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "forwardingRules", key) m.Objects[*key] = &MockForwardingRulesObj{obj} - glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -6355,7 +6355,7 @@ func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, ob func (m *MockAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -6367,7 +6367,7 @@ func (m *MockAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) er defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -6375,12 +6375,12 @@ func (m *MockAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) er Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaForwardingRules %v not found", key), } - glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = nil", ctx, key) return nil } @@ -6396,10 +6396,10 @@ type GCEAlphaForwardingRules struct { // Get the ForwardingRule named by key. func (g *GCEAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alpha.ForwardingRule, error) { - glog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") @@ -6409,21 +6409,21 @@ func (g *GCEAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alph Version: meta.Version("alpha"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.ForwardingRules.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all ForwardingRule objects. func (g *GCEAlphaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) { - glog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, @@ -6434,30 +6434,30 @@ func (g *GCEAlphaForwardingRules) List(ctx context.Context, region string, fl *f if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.Alpha.ForwardingRules.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.ForwardingRule f := func(l *alpha.ForwardingRuleList) error { - glog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -6465,9 +6465,9 @@ func (g *GCEAlphaForwardingRules) List(ctx context.Context, region string, fl *f // Insert ForwardingRule with key of value obj. func (g *GCEAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule) error { - glog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") @@ -6477,9 +6477,9 @@ func (g *GCEAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj Version: meta.Version("alpha"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -6488,20 +6488,20 @@ func (g *GCEAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the ForwardingRule referenced by key. func (g *GCEAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") @@ -6511,9 +6511,9 @@ func (g *GCEAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) err Version: meta.Version("alpha"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.ForwardingRules.Delete(projectID, key.Region, key.Name) @@ -6521,12 +6521,12 @@ func (g *GCEAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) err op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -6587,7 +6587,7 @@ type MockGlobalForwardingRules struct { func (m *MockGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -6599,12 +6599,12 @@ func (m *MockGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -6612,7 +6612,7 @@ func (m *MockGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga Code: http.StatusNotFound, Message: fmt.Sprintf("MockGlobalForwardingRules %v not found", key), } - glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -6620,7 +6620,7 @@ func (m *MockGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -6630,7 +6630,7 @@ func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]* if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -6643,7 +6643,7 @@ func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]* objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -6651,7 +6651,7 @@ func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]* func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -6663,7 +6663,7 @@ func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, o defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -6671,7 +6671,7 @@ func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, o Code: http.StatusConflict, Message: fmt.Sprintf("MockGlobalForwardingRules %v exists", key), } - glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -6680,7 +6680,7 @@ func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, o obj.SelfLink = SelfLink(meta.VersionGA, projectID, "forwardingRules", key) m.Objects[*key] = &MockGlobalForwardingRulesObj{obj} - glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -6688,7 +6688,7 @@ func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, o func (m *MockGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -6700,7 +6700,7 @@ func (m *MockGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) e defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -6708,12 +6708,12 @@ func (m *MockGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) e Code: http.StatusNotFound, Message: fmt.Sprintf("MockGlobalForwardingRules %v not found", key), } - glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = nil", ctx, key) return nil } @@ -6737,10 +6737,10 @@ type GCEGlobalForwardingRules struct { // Get the ForwardingRule named by key. func (g *GCEGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { - glog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEGlobalForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") @@ -6750,21 +6750,21 @@ func (g *GCEGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga. Version: meta.Version("ga"), Service: "GlobalForwardingRules", } - glog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.GlobalForwardingRules.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all ForwardingRule objects. func (g *GCEGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) { - glog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, @@ -6775,30 +6775,30 @@ func (g *GCEGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*g if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.GlobalForwardingRules.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.ForwardingRule f := func(l *ga.ForwardingRuleList) error { - glog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -6806,9 +6806,9 @@ func (g *GCEGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*g // Insert ForwardingRule with key of value obj. func (g *GCEGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { - glog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") @@ -6818,9 +6818,9 @@ func (g *GCEGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, ob Version: meta.Version("ga"), Service: "GlobalForwardingRules", } - glog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -6829,20 +6829,20 @@ func (g *GCEGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, ob op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the ForwardingRule referenced by key. func (g *GCEGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEGlobalForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") @@ -6852,9 +6852,9 @@ func (g *GCEGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) er Version: meta.Version("ga"), Service: "GlobalForwardingRules", } - glog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.GlobalForwardingRules.Delete(projectID, key.Name) @@ -6863,21 +6863,21 @@ func (g *GCEGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) er op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } // SetTarget is a method on GCEGlobalForwardingRules. func (g *GCEGlobalForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *ga.TargetReference) error { - glog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") @@ -6887,21 +6887,21 @@ func (g *GCEGlobalForwardingRules) SetTarget(ctx context.Context, key *meta.Key, Version: meta.Version("ga"), Service: "GlobalForwardingRules", } - glog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.GlobalForwardingRules.SetTarget(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -6962,7 +6962,7 @@ type MockHealthChecks struct { func (m *MockHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -6974,12 +6974,12 @@ func (m *MockHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCh defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -6987,7 +6987,7 @@ func (m *MockHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCh Code: http.StatusNotFound, Message: fmt.Sprintf("MockHealthChecks %v not found", key), } - glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -6995,7 +6995,7 @@ func (m *MockHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCh func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -7005,7 +7005,7 @@ func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Health if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -7018,7 +7018,7 @@ func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Health objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -7026,7 +7026,7 @@ func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Health func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -7038,7 +7038,7 @@ func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.He defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -7046,7 +7046,7 @@ func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.He Code: http.StatusConflict, Message: fmt.Sprintf("MockHealthChecks %v exists", key), } - glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -7055,7 +7055,7 @@ func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.He obj.SelfLink = SelfLink(meta.VersionGA, projectID, "healthChecks", key) m.Objects[*key] = &MockHealthChecksObj{obj} - glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -7063,7 +7063,7 @@ func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.He func (m *MockHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -7075,7 +7075,7 @@ func (m *MockHealthChecks) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -7083,12 +7083,12 @@ func (m *MockHealthChecks) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockHealthChecks %v not found", key), } - glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -7112,10 +7112,10 @@ type GCEHealthChecks struct { // Get the HealthCheck named by key. func (g *GCEHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCheck, error) { - glog.V(5).Infof("GCEHealthChecks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") @@ -7125,21 +7125,21 @@ func (g *GCEHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthChe Version: meta.Version("ga"), Service: "HealthChecks", } - glog.V(5).Infof("GCEHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.HealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all HealthCheck objects. func (g *GCEHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) { - glog.V(5).Infof("GCEHealthChecks.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEHealthChecks.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, @@ -7150,30 +7150,30 @@ func (g *GCEHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthC if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.HealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.HealthCheck f := func(l *ga.HealthCheckList) error { - glog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -7181,9 +7181,9 @@ func (g *GCEHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthC // Insert HealthCheck with key of value obj. func (g *GCEHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HealthCheck) error { - glog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") @@ -7193,9 +7193,9 @@ func (g *GCEHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.Hea Version: meta.Version("ga"), Service: "HealthChecks", } - glog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -7204,20 +7204,20 @@ func (g *GCEHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.Hea op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the HealthCheck referenced by key. func (g *GCEHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") @@ -7227,9 +7227,9 @@ func (g *GCEHealthChecks) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "HealthChecks", } - glog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HealthChecks.Delete(projectID, key.Name) @@ -7238,21 +7238,21 @@ func (g *GCEHealthChecks) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEHealthChecks. func (g *GCEHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HealthCheck) error { - glog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") @@ -7262,21 +7262,21 @@ func (g *GCEHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.He Version: meta.Version("ga"), Service: "HealthChecks", } - glog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -7337,7 +7337,7 @@ type MockBetaHealthChecks struct { func (m *MockBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.HealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -7349,12 +7349,12 @@ func (m *MockBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.He defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -7362,7 +7362,7 @@ func (m *MockBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.He Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaHealthChecks %v not found", key), } - glog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -7370,7 +7370,7 @@ func (m *MockBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.He func (m *MockBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.HealthCheck, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -7380,7 +7380,7 @@ func (m *MockBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta. if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -7393,7 +7393,7 @@ func (m *MockBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta. objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -7401,7 +7401,7 @@ func (m *MockBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta. func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *beta.HealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -7413,7 +7413,7 @@ func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *b defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -7421,7 +7421,7 @@ func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *b Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaHealthChecks %v exists", key), } - glog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -7430,7 +7430,7 @@ func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *b obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "healthChecks", key) m.Objects[*key] = &MockHealthChecksObj{obj} - glog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -7438,7 +7438,7 @@ func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *b func (m *MockBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -7450,7 +7450,7 @@ func (m *MockBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -7458,12 +7458,12 @@ func (m *MockBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaHealthChecks %v not found", key), } - glog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -7487,10 +7487,10 @@ type GCEBetaHealthChecks struct { // Get the HealthCheck named by key. func (g *GCEBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.HealthCheck, error) { - glog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") @@ -7500,21 +7500,21 @@ func (g *GCEBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.Hea Version: meta.Version("beta"), Service: "HealthChecks", } - glog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.HealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all HealthCheck objects. func (g *GCEBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.HealthCheck, error) { - glog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, @@ -7525,30 +7525,30 @@ func (g *GCEBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.H if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.Beta.HealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.HealthCheck f := func(l *beta.HealthCheckList) error { - glog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -7556,9 +7556,9 @@ func (g *GCEBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.H // Insert HealthCheck with key of value obj. func (g *GCEBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *beta.HealthCheck) error { - glog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") @@ -7568,9 +7568,9 @@ func (g *GCEBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *be Version: meta.Version("beta"), Service: "HealthChecks", } - glog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -7579,20 +7579,20 @@ func (g *GCEBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *be op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the HealthCheck referenced by key. func (g *GCEBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") @@ -7602,9 +7602,9 @@ func (g *GCEBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("beta"), Service: "HealthChecks", } - glog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.HealthChecks.Delete(projectID, key.Name) @@ -7613,21 +7613,21 @@ func (g *GCEBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEBetaHealthChecks. func (g *GCEBetaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *beta.HealthCheck) error { - glog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") @@ -7637,21 +7637,21 @@ func (g *GCEBetaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *b Version: meta.Version("beta"), Service: "HealthChecks", } - glog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.HealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -7712,7 +7712,7 @@ type MockAlphaHealthChecks struct { func (m *MockAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha.HealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -7724,12 +7724,12 @@ func (m *MockAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha. defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -7737,7 +7737,7 @@ func (m *MockAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha. Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaHealthChecks %v not found", key), } - glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -7745,7 +7745,7 @@ func (m *MockAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha. func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -7755,7 +7755,7 @@ func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alph if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -7768,7 +7768,7 @@ func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alph objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -7776,7 +7776,7 @@ func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alph func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -7788,7 +7788,7 @@ func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -7796,7 +7796,7 @@ func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaHealthChecks %v exists", key), } - glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -7805,7 +7805,7 @@ func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "healthChecks", key) m.Objects[*key] = &MockHealthChecksObj{obj} - glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -7813,7 +7813,7 @@ func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * func (m *MockAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -7825,7 +7825,7 @@ func (m *MockAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -7833,12 +7833,12 @@ func (m *MockAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaHealthChecks %v not found", key), } - glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -7862,10 +7862,10 @@ type GCEAlphaHealthChecks struct { // Get the HealthCheck named by key. func (g *GCEAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha.HealthCheck, error) { - glog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") @@ -7875,21 +7875,21 @@ func (g *GCEAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha.H Version: meta.Version("alpha"), Service: "HealthChecks", } - glog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.HealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all HealthCheck objects. func (g *GCEAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) { - glog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, @@ -7900,30 +7900,30 @@ func (g *GCEAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.Alpha.HealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.HealthCheck f := func(l *alpha.HealthCheckList) error { - glog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -7931,9 +7931,9 @@ func (g *GCEAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha // Insert HealthCheck with key of value obj. func (g *GCEAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck) error { - glog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") @@ -7943,9 +7943,9 @@ func (g *GCEAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *a Version: meta.Version("alpha"), Service: "HealthChecks", } - glog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -7954,20 +7954,20 @@ func (g *GCEAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *a op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the HealthCheck referenced by key. func (g *GCEAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") @@ -7977,9 +7977,9 @@ func (g *GCEAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error Version: meta.Version("alpha"), Service: "HealthChecks", } - glog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.HealthChecks.Delete(projectID, key.Name) @@ -7988,21 +7988,21 @@ func (g *GCEAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEAlphaHealthChecks. func (g *GCEAlphaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *alpha.HealthCheck) error { - glog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") @@ -8012,21 +8012,21 @@ func (g *GCEAlphaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 * Version: meta.Version("alpha"), Service: "HealthChecks", } - glog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.HealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -8087,7 +8087,7 @@ type MockHttpHealthChecks struct { func (m *MockHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpHealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -8099,12 +8099,12 @@ func (m *MockHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Http defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -8112,7 +8112,7 @@ func (m *MockHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Http Code: http.StatusNotFound, Message: fmt.Sprintf("MockHttpHealthChecks %v not found", key), } - glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -8120,7 +8120,7 @@ func (m *MockHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Http func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -8130,7 +8130,7 @@ func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -8143,7 +8143,7 @@ func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -8151,7 +8151,7 @@ func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpHealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -8163,7 +8163,7 @@ func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -8171,7 +8171,7 @@ func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g Code: http.StatusConflict, Message: fmt.Sprintf("MockHttpHealthChecks %v exists", key), } - glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -8180,7 +8180,7 @@ func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g obj.SelfLink = SelfLink(meta.VersionGA, projectID, "httpHealthChecks", key) m.Objects[*key] = &MockHttpHealthChecksObj{obj} - glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -8188,7 +8188,7 @@ func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g func (m *MockHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -8200,7 +8200,7 @@ func (m *MockHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -8208,12 +8208,12 @@ func (m *MockHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error Code: http.StatusNotFound, Message: fmt.Sprintf("MockHttpHealthChecks %v not found", key), } - glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -8237,10 +8237,10 @@ type GCEHttpHealthChecks struct { // Get the HttpHealthCheck named by key. func (g *GCEHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpHealthCheck, error) { - glog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") @@ -8250,21 +8250,21 @@ func (g *GCEHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpH Version: meta.Version("ga"), Service: "HttpHealthChecks", } - glog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.HttpHealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all HttpHealthCheck objects. func (g *GCEHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) { - glog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, @@ -8275,30 +8275,30 @@ func (g *GCEHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Htt if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.HttpHealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.HttpHealthCheck f := func(l *ga.HttpHealthCheckList) error { - glog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -8306,9 +8306,9 @@ func (g *GCEHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Htt // Insert HttpHealthCheck with key of value obj. func (g *GCEHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpHealthCheck) error { - glog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") @@ -8318,9 +8318,9 @@ func (g *GCEHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga Version: meta.Version("ga"), Service: "HttpHealthChecks", } - glog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -8329,20 +8329,20 @@ func (g *GCEHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the HttpHealthCheck referenced by key. func (g *GCEHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") @@ -8352,9 +8352,9 @@ func (g *GCEHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "HttpHealthChecks", } - glog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HttpHealthChecks.Delete(projectID, key.Name) @@ -8363,21 +8363,21 @@ func (g *GCEHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEHttpHealthChecks. func (g *GCEHttpHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HttpHealthCheck) error { - glog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") @@ -8387,21 +8387,21 @@ func (g *GCEHttpHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *g Version: meta.Version("ga"), Service: "HttpHealthChecks", } - glog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HttpHealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -8462,7 +8462,7 @@ type MockHttpsHealthChecks struct { func (m *MockHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpsHealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -8474,12 +8474,12 @@ func (m *MockHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Htt defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -8487,7 +8487,7 @@ func (m *MockHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Htt Code: http.StatusNotFound, Message: fmt.Sprintf("MockHttpsHealthChecks %v not found", key), } - glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -8495,7 +8495,7 @@ func (m *MockHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Htt func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -8505,7 +8505,7 @@ func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.H if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -8518,7 +8518,7 @@ func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.H objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -8526,7 +8526,7 @@ func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.H func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpsHealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -8538,7 +8538,7 @@ func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -8546,7 +8546,7 @@ func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * Code: http.StatusConflict, Message: fmt.Sprintf("MockHttpsHealthChecks %v exists", key), } - glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -8555,7 +8555,7 @@ func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * obj.SelfLink = SelfLink(meta.VersionGA, projectID, "httpsHealthChecks", key) m.Objects[*key] = &MockHttpsHealthChecksObj{obj} - glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -8563,7 +8563,7 @@ func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * func (m *MockHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -8575,7 +8575,7 @@ func (m *MockHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -8583,12 +8583,12 @@ func (m *MockHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error Code: http.StatusNotFound, Message: fmt.Sprintf("MockHttpsHealthChecks %v not found", key), } - glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -8612,10 +8612,10 @@ type GCEHttpsHealthChecks struct { // Get the HttpsHealthCheck named by key. func (g *GCEHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpsHealthCheck, error) { - glog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpsHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpsHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") @@ -8625,21 +8625,21 @@ func (g *GCEHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Http Version: meta.Version("ga"), Service: "HttpsHealthChecks", } - glog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.HttpsHealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all HttpsHealthCheck objects. func (g *GCEHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) { - glog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, @@ -8650,30 +8650,30 @@ func (g *GCEHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.HttpsHealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.HttpsHealthCheck f := func(l *ga.HttpsHealthCheckList) error { - glog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -8681,9 +8681,9 @@ func (g *GCEHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht // Insert HttpsHealthCheck with key of value obj. func (g *GCEHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpsHealthCheck) error { - glog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") @@ -8693,9 +8693,9 @@ func (g *GCEHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g Version: meta.Version("ga"), Service: "HttpsHealthChecks", } - glog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -8704,20 +8704,20 @@ func (g *GCEHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the HttpsHealthCheck referenced by key. func (g *GCEHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpsHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpsHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") @@ -8727,9 +8727,9 @@ func (g *GCEHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error Version: meta.Version("ga"), Service: "HttpsHealthChecks", } - glog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HttpsHealthChecks.Delete(projectID, key.Name) @@ -8738,21 +8738,21 @@ func (g *GCEHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEHttpsHealthChecks. func (g *GCEHttpsHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HttpsHealthCheck) error { - glog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") @@ -8762,21 +8762,21 @@ func (g *GCEHttpsHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 * Version: meta.Version("ga"), Service: "HttpsHealthChecks", } - glog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HttpsHealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -8843,7 +8843,7 @@ type MockInstanceGroups struct { func (m *MockInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.InstanceGroup, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -8855,12 +8855,12 @@ func (m *MockInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.Instan defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -8868,7 +8868,7 @@ func (m *MockInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.Instan Code: http.StatusNotFound, Message: fmt.Sprintf("MockInstanceGroups %v not found", key), } - glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -8876,7 +8876,7 @@ func (m *MockInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.Instan func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -8886,7 +8886,7 @@ func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -8902,7 +8902,7 @@ func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -8910,7 +8910,7 @@ func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.InstanceGroup) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -8922,7 +8922,7 @@ func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga. defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -8930,7 +8930,7 @@ func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga. Code: http.StatusConflict, Message: fmt.Sprintf("MockInstanceGroups %v exists", key), } - glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -8939,7 +8939,7 @@ func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga. obj.SelfLink = SelfLink(meta.VersionGA, projectID, "instanceGroups", key) m.Objects[*key] = &MockInstanceGroupsObj{obj} - glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -8947,7 +8947,7 @@ func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga. func (m *MockInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -8959,7 +8959,7 @@ func (m *MockInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -8967,12 +8967,12 @@ func (m *MockInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockInstanceGroups %v not found", key), } - glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = nil", ctx, key) return nil } @@ -9020,10 +9020,10 @@ type GCEInstanceGroups struct { // Get the InstanceGroup named by key. func (g *GCEInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.InstanceGroup, error) { - glog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9033,21 +9033,21 @@ func (g *GCEInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.Instanc Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.InstanceGroups.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEInstanceGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEInstanceGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all InstanceGroup objects. func (g *GCEInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) { - glog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") rk := &RateLimitKey{ ProjectID: projectID, @@ -9058,30 +9058,30 @@ func (g *GCEInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.GA.InstanceGroups.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.InstanceGroup f := func(l *ga.InstanceGroupList) error { - glog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -9089,9 +9089,9 @@ func (g *GCEInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) // Insert InstanceGroup with key of value obj. func (g *GCEInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.InstanceGroup) error { - glog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9101,9 +9101,9 @@ func (g *GCEInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.I Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -9112,20 +9112,20 @@ func (g *GCEInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.I op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the InstanceGroup referenced by key. func (g *GCEInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9135,9 +9135,9 @@ func (g *GCEInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.InstanceGroups.Delete(projectID, key.Zone, key.Name) @@ -9145,21 +9145,21 @@ func (g *GCEInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) return err } // AddInstances is a method on GCEInstanceGroups. func (g *GCEInstanceGroups) AddInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsAddInstancesRequest) error { - glog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9169,30 +9169,30 @@ func (g *GCEInstanceGroups) AddInstances(ctx context.Context, key *meta.Key, arg Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.InstanceGroups.AddInstances(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) return err } // ListInstances is a method on GCEInstanceGroups. func (g *GCEInstanceGroups) ListInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsListInstancesRequest, fl *filter.F) ([]*ga.InstanceWithNamedPorts, error) { - glog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9202,41 +9202,41 @@ func (g *GCEInstanceGroups) ListInstances(ctx context.Context, key *meta.Key, ar Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.InstanceGroups.ListInstances(projectID, key.Zone, key.Name, arg0) var all []*ga.InstanceWithNamedPorts f := func(l *ga.InstanceGroupsListInstances) error { - glog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): page %+v", ctx, key, l) + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): page %+v", ctx, key, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, nil, err) + klog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) } return all, nil } // RemoveInstances is a method on GCEInstanceGroups. func (g *GCEInstanceGroups) RemoveInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsRemoveInstancesRequest) error { - glog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9246,30 +9246,30 @@ func (g *GCEInstanceGroups) RemoveInstances(ctx context.Context, key *meta.Key, Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.InstanceGroups.RemoveInstances(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) return err } // SetNamedPorts is a method on GCEInstanceGroups. func (g *GCEInstanceGroups) SetNamedPorts(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsSetNamedPortsRequest) error { - glog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9279,21 +9279,21 @@ func (g *GCEInstanceGroups) SetNamedPorts(ctx context.Context, key *meta.Key, ar Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.InstanceGroups.SetNamedPorts(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -9356,7 +9356,7 @@ type MockInstances struct { func (m *MockInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -9368,12 +9368,12 @@ func (m *MockInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, e defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -9381,7 +9381,7 @@ func (m *MockInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, e Code: http.StatusNotFound, Message: fmt.Sprintf("MockInstances %v not found", key), } - glog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -9389,7 +9389,7 @@ func (m *MockInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, e func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -9399,7 +9399,7 @@ func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([] if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -9415,7 +9415,7 @@ func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([] objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -9423,7 +9423,7 @@ func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([] func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instance) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -9435,7 +9435,7 @@ func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Insta defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -9443,7 +9443,7 @@ func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Insta Code: http.StatusConflict, Message: fmt.Sprintf("MockInstances %v exists", key), } - glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -9452,7 +9452,7 @@ func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Insta obj.SelfLink = SelfLink(meta.VersionGA, projectID, "instances", key) m.Objects[*key] = &MockInstancesObj{obj} - glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -9460,7 +9460,7 @@ func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Insta func (m *MockInstances) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -9472,7 +9472,7 @@ func (m *MockInstances) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -9480,12 +9480,12 @@ func (m *MockInstances) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockInstances %v not found", key), } - glog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockInstances.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockInstances.Delete(%v, %v) = nil", ctx, key) return nil } @@ -9517,10 +9517,10 @@ type GCEInstances struct { // Get the Instance named by key. func (g *GCEInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, error) { - glog.V(5).Infof("GCEInstances.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEInstances.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") @@ -9530,21 +9530,21 @@ func (g *GCEInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, er Version: meta.Version("ga"), Service: "Instances", } - glog.V(5).Infof("GCEInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Instances.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Instance objects. func (g *GCEInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) { - glog.V(5).Infof("GCEInstances.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEInstances.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") rk := &RateLimitKey{ ProjectID: projectID, @@ -9555,30 +9555,30 @@ func (g *GCEInstances) List(ctx context.Context, zone string, fl *filter.F) ([]* if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.GA.Instances.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Instance f := func(l *ga.InstanceList) error { - glog.V(5).Infof("GCEInstances.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEInstances.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -9586,9 +9586,9 @@ func (g *GCEInstances) List(ctx context.Context, zone string, fl *filter.F) ([]* // Insert Instance with key of value obj. func (g *GCEInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instance) error { - glog.V(5).Infof("GCEInstances.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEInstances.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") @@ -9598,9 +9598,9 @@ func (g *GCEInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instan Version: meta.Version("ga"), Service: "Instances", } - glog.V(5).Infof("GCEInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -9609,20 +9609,20 @@ func (g *GCEInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instan op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Instance referenced by key. func (g *GCEInstances) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEInstances.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEInstances.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") @@ -9632,9 +9632,9 @@ func (g *GCEInstances) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "Instances", } - glog.V(5).Infof("GCEInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Instances.Delete(projectID, key.Zone, key.Name) @@ -9642,21 +9642,21 @@ func (g *GCEInstances) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) return err } // AttachDisk is a method on GCEInstances. func (g *GCEInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *ga.AttachedDisk) error { - glog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") @@ -9666,30 +9666,30 @@ func (g *GCEInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *ga.A Version: meta.Version("ga"), Service: "Instances", } - glog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } // DetachDisk is a method on GCEInstances. func (g *GCEInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { - glog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") @@ -9699,21 +9699,21 @@ func (g *GCEInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 strin Version: meta.Version("ga"), Service: "Instances", } - glog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -9778,7 +9778,7 @@ type MockBetaInstances struct { func (m *MockBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Instance, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -9790,12 +9790,12 @@ func (m *MockBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Insta defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -9803,7 +9803,7 @@ func (m *MockBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Insta Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaInstances %v not found", key), } - glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -9811,7 +9811,7 @@ func (m *MockBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Insta func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -9821,7 +9821,7 @@ func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -9837,7 +9837,7 @@ func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -9845,7 +9845,7 @@ func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta.Instance) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -9857,7 +9857,7 @@ func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -9865,7 +9865,7 @@ func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaInstances %v exists", key), } - glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -9874,7 +9874,7 @@ func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "instances", key) m.Objects[*key] = &MockInstancesObj{obj} - glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -9882,7 +9882,7 @@ func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta func (m *MockBetaInstances) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -9894,7 +9894,7 @@ func (m *MockBetaInstances) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -9902,12 +9902,12 @@ func (m *MockBetaInstances) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaInstances %v not found", key), } - glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = nil", ctx, key) return nil } @@ -9947,10 +9947,10 @@ type GCEBetaInstances struct { // Get the Instance named by key. func (g *GCEBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Instance, error) { - glog.V(5).Infof("GCEBetaInstances.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaInstances.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -9960,21 +9960,21 @@ func (g *GCEBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Instan Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.Instances.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Instance objects. func (g *GCEBetaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) { - glog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") rk := &RateLimitKey{ ProjectID: projectID, @@ -9985,30 +9985,30 @@ func (g *GCEBetaInstances) List(ctx context.Context, zone string, fl *filter.F) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.Beta.Instances.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.Instance f := func(l *beta.InstanceList) error { - glog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -10016,9 +10016,9 @@ func (g *GCEBetaInstances) List(ctx context.Context, zone string, fl *filter.F) // Insert Instance with key of value obj. func (g *GCEBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta.Instance) error { - glog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -10028,9 +10028,9 @@ func (g *GCEBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta. Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -10039,20 +10039,20 @@ func (g *GCEBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Instance referenced by key. func (g *GCEBetaInstances) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -10062,9 +10062,9 @@ func (g *GCEBetaInstances) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.Instances.Delete(projectID, key.Zone, key.Name) @@ -10072,21 +10072,21 @@ func (g *GCEBetaInstances) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } // AttachDisk is a method on GCEBetaInstances. func (g *GCEBetaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *beta.AttachedDisk) error { - glog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -10096,30 +10096,30 @@ func (g *GCEBetaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 * Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } // DetachDisk is a method on GCEBetaInstances. func (g *GCEBetaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { - glog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -10129,30 +10129,30 @@ func (g *GCEBetaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 s Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } // UpdateNetworkInterface is a method on GCEBetaInstances. func (g *GCEBetaInstances) UpdateNetworkInterface(ctx context.Context, key *meta.Key, arg0 string, arg1 *beta.NetworkInterface) error { - glog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -10162,21 +10162,21 @@ func (g *GCEBetaInstances) UpdateNetworkInterface(ctx context.Context, key *meta Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.Instances.UpdateNetworkInterface(projectID, key.Zone, key.Name, arg0, arg1) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -10241,7 +10241,7 @@ type MockAlphaInstances struct { func (m *MockAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Instance, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -10253,12 +10253,12 @@ func (m *MockAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Ins defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -10266,7 +10266,7 @@ func (m *MockAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Ins Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaInstances %v not found", key), } - glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -10274,7 +10274,7 @@ func (m *MockAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Ins func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -10284,7 +10284,7 @@ func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -10300,7 +10300,7 @@ func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -10308,7 +10308,7 @@ func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alpha.Instance) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -10320,7 +10320,7 @@ func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alp defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -10328,7 +10328,7 @@ func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alp Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaInstances %v exists", key), } - glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -10337,7 +10337,7 @@ func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alp obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "instances", key) m.Objects[*key] = &MockInstancesObj{obj} - glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -10345,7 +10345,7 @@ func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alp func (m *MockAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -10357,7 +10357,7 @@ func (m *MockAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -10365,12 +10365,12 @@ func (m *MockAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaInstances %v not found", key), } - glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = nil", ctx, key) return nil } @@ -10410,10 +10410,10 @@ type GCEAlphaInstances struct { // Get the Instance named by key. func (g *GCEAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Instance, error) { - glog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10423,21 +10423,21 @@ func (g *GCEAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Inst Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.Instances.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Instance objects. func (g *GCEAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) { - glog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") rk := &RateLimitKey{ ProjectID: projectID, @@ -10448,30 +10448,30 @@ func (g *GCEAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.Alpha.Instances.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.Instance f := func(l *alpha.InstanceList) error { - glog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -10479,9 +10479,9 @@ func (g *GCEAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) // Insert Instance with key of value obj. func (g *GCEAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alpha.Instance) error { - glog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10491,9 +10491,9 @@ func (g *GCEAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alph Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -10502,20 +10502,20 @@ func (g *GCEAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alph op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Instance referenced by key. func (g *GCEAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10525,9 +10525,9 @@ func (g *GCEAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.Instances.Delete(projectID, key.Zone, key.Name) @@ -10535,21 +10535,21 @@ func (g *GCEAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } // AttachDisk is a method on GCEAlphaInstances. func (g *GCEAlphaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *alpha.AttachedDisk) error { - glog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10559,30 +10559,30 @@ func (g *GCEAlphaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } // DetachDisk is a method on GCEAlphaInstances. func (g *GCEAlphaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { - glog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10592,30 +10592,30 @@ func (g *GCEAlphaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } // UpdateNetworkInterface is a method on GCEAlphaInstances. func (g *GCEAlphaInstances) UpdateNetworkInterface(ctx context.Context, key *meta.Key, arg0 string, arg1 *alpha.NetworkInterface) error { - glog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10625,21 +10625,21 @@ func (g *GCEAlphaInstances) UpdateNetworkInterface(ctx context.Context, key *met Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.Instances.UpdateNetworkInterface(projectID, key.Zone, key.Name, arg0, arg1) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -10707,7 +10707,7 @@ type MockAlphaNetworkEndpointGroups struct { func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*alpha.NetworkEndpointGroup, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -10719,12 +10719,12 @@ func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -10732,7 +10732,7 @@ func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v not found", key), } - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -10740,7 +10740,7 @@ func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -10750,7 +10750,7 @@ func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -10766,7 +10766,7 @@ func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -10774,7 +10774,7 @@ func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *alpha.NetworkEndpointGroup) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -10786,7 +10786,7 @@ func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -10794,7 +10794,7 @@ func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.K Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v exists", key), } - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -10803,7 +10803,7 @@ func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.K obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "networkEndpointGroups", key) m.Objects[*key] = &MockNetworkEndpointGroupsObj{obj} - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -10811,7 +10811,7 @@ func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.K func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -10823,7 +10823,7 @@ func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -10831,12 +10831,12 @@ func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.K Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v not found", key), } - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) return nil } @@ -10844,7 +10844,7 @@ func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.K func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) { if m.AggregatedListHook != nil { if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -10854,7 +10854,7 @@ func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl if m.AggregatedListError != nil { err := *m.AggregatedListError - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) return nil, err } @@ -10863,7 +10863,7 @@ func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl res, err := ParseResourceURL(obj.ToAlpha().SelfLink) location := res.Key.Zone if err != nil { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) return nil, err } if !fl.Match(obj.ToAlpha()) { @@ -10871,7 +10871,7 @@ func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl } objs[location] = append(objs[location], obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -10911,10 +10911,10 @@ type GCEAlphaNetworkEndpointGroups struct { // Get the NetworkEndpointGroup named by key. func (g *GCEAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*alpha.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -10924,21 +10924,21 @@ func (g *GCEAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.NetworkEndpointGroups.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all NetworkEndpointGroup objects. func (g *GCEAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") rk := &RateLimitKey{ ProjectID: projectID, @@ -10949,30 +10949,30 @@ func (g *GCEAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, f if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.Alpha.NetworkEndpointGroups.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.NetworkEndpointGroup f := func(l *alpha.NetworkEndpointGroupList) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -10980,9 +10980,9 @@ func (g *GCEAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, f // Insert NetworkEndpointGroup with key of value obj. func (g *GCEAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *alpha.NetworkEndpointGroup) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -10992,9 +10992,9 @@ func (g *GCEAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -11003,20 +11003,20 @@ func (g *GCEAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the NetworkEndpointGroup referenced by key. func (g *GCEAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -11026,9 +11026,9 @@ func (g *GCEAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.NetworkEndpointGroups.Delete(projectID, key.Zone, key.Name) @@ -11036,18 +11036,18 @@ func (g *GCEAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } // AggregatedList lists all resources of the given type across all locations. func (g *GCEAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") rk := &RateLimitKey{ @@ -11057,9 +11057,9 @@ func (g *GCEAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl * Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) return nil, err } @@ -11072,33 +11072,33 @@ func (g *GCEAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl * all := map[string][]*alpha.NetworkEndpointGroup{} f := func(l *alpha.NetworkEndpointGroupAggregatedList) error { for k, v := range l.Items { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) all[k] = append(all[k], v.NetworkEndpointGroups...) } return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } // AttachNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. func (g *GCEAlphaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -11108,30 +11108,30 @@ func (g *GCEAlphaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Conte Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.NetworkEndpointGroups.AttachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } // DetachNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. func (g *GCEAlphaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -11141,30 +11141,30 @@ func (g *GCEAlphaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Conte Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.NetworkEndpointGroups.DetachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } // ListNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. func (g *GCEAlphaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*alpha.NetworkEndpointWithHealthStatus, error) { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -11174,31 +11174,31 @@ func (g *GCEAlphaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.NetworkEndpointGroups.ListNetworkEndpoints(projectID, key.Zone, key.Name, arg0) var all []*alpha.NetworkEndpointWithHealthStatus f := func(l *alpha.NetworkEndpointGroupsListNetworkEndpoints) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) } return all, nil } @@ -11267,7 +11267,7 @@ type MockBetaNetworkEndpointGroups struct { func (m *MockBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*beta.NetworkEndpointGroup, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -11279,12 +11279,12 @@ func (m *MockBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -11292,7 +11292,7 @@ func (m *MockBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaNetworkEndpointGroups %v not found", key), } - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -11300,7 +11300,7 @@ func (m *MockBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) func (m *MockBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.NetworkEndpointGroup, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -11310,7 +11310,7 @@ func (m *MockBetaNetworkEndpointGroups) List(ctx context.Context, zone string, f if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -11326,7 +11326,7 @@ func (m *MockBetaNetworkEndpointGroups) List(ctx context.Context, zone string, f objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -11334,7 +11334,7 @@ func (m *MockBetaNetworkEndpointGroups) List(ctx context.Context, zone string, f func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *beta.NetworkEndpointGroup) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -11346,7 +11346,7 @@ func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -11354,7 +11354,7 @@ func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaNetworkEndpointGroups %v exists", key), } - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -11363,7 +11363,7 @@ func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "networkEndpointGroups", key) m.Objects[*key] = &MockNetworkEndpointGroupsObj{obj} - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -11371,7 +11371,7 @@ func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke func (m *MockBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -11383,7 +11383,7 @@ func (m *MockBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -11391,12 +11391,12 @@ func (m *MockBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaNetworkEndpointGroups %v not found", key), } - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) return nil } @@ -11404,7 +11404,7 @@ func (m *MockBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke func (m *MockBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*beta.NetworkEndpointGroup, error) { if m.AggregatedListHook != nil { if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -11414,7 +11414,7 @@ func (m *MockBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl * if m.AggregatedListError != nil { err := *m.AggregatedListError - glog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) return nil, err } @@ -11423,7 +11423,7 @@ func (m *MockBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl * res, err := ParseResourceURL(obj.ToBeta().SelfLink) location := res.Key.Zone if err != nil { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) return nil, err } if !fl.Match(obj.ToBeta()) { @@ -11431,7 +11431,7 @@ func (m *MockBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl * } objs[location] = append(objs[location], obj.ToBeta()) } - glog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -11471,10 +11471,10 @@ type GCEBetaNetworkEndpointGroups struct { // Get the NetworkEndpointGroup named by key. func (g *GCEBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*beta.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11484,21 +11484,21 @@ func (g *GCEBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) ( Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.NetworkEndpointGroups.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all NetworkEndpointGroup objects. func (g *GCEBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") rk := &RateLimitKey{ ProjectID: projectID, @@ -11509,30 +11509,30 @@ func (g *GCEBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.Beta.NetworkEndpointGroups.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.NetworkEndpointGroup f := func(l *beta.NetworkEndpointGroupList) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -11540,9 +11540,9 @@ func (g *GCEBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl // Insert NetworkEndpointGroup with key of value obj. func (g *GCEBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *beta.NetworkEndpointGroup) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11552,9 +11552,9 @@ func (g *GCEBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -11563,20 +11563,20 @@ func (g *GCEBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the NetworkEndpointGroup referenced by key. func (g *GCEBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11586,9 +11586,9 @@ func (g *GCEBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.NetworkEndpointGroups.Delete(projectID, key.Zone, key.Name) @@ -11596,18 +11596,18 @@ func (g *GCEBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } // AggregatedList lists all resources of the given type across all locations. func (g *GCEBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*beta.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") rk := &RateLimitKey{ @@ -11617,9 +11617,9 @@ func (g *GCEBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *f Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) return nil, err } @@ -11632,33 +11632,33 @@ func (g *GCEBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *f all := map[string][]*beta.NetworkEndpointGroup{} f := func(l *beta.NetworkEndpointGroupAggregatedList) error { for k, v := range l.Items { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) all[k] = append(all[k], v.NetworkEndpointGroups...) } return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } // AttachNetworkEndpoints is a method on GCEBetaNetworkEndpointGroups. func (g *GCEBetaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsAttachEndpointsRequest) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11668,30 +11668,30 @@ func (g *GCEBetaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Contex Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.NetworkEndpointGroups.AttachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } // DetachNetworkEndpoints is a method on GCEBetaNetworkEndpointGroups. func (g *GCEBetaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsDetachEndpointsRequest) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11701,30 +11701,30 @@ func (g *GCEBetaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Contex Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.NetworkEndpointGroups.DetachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } // ListNetworkEndpoints is a method on GCEBetaNetworkEndpointGroups. func (g *GCEBetaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*beta.NetworkEndpointWithHealthStatus, error) { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11734,31 +11734,31 @@ func (g *GCEBetaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.NetworkEndpointGroups.ListNetworkEndpoints(projectID, key.Zone, key.Name, arg0) var all []*beta.NetworkEndpointWithHealthStatus f := func(l *beta.NetworkEndpointGroupsListNetworkEndpoints) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) } return all, nil } @@ -11859,7 +11859,7 @@ type MockRegions struct { func (m *MockRegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -11871,12 +11871,12 @@ func (m *MockRegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -11884,7 +11884,7 @@ func (m *MockRegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error Code: http.StatusNotFound, Message: fmt.Sprintf("MockRegions %v not found", key), } - glog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -11892,7 +11892,7 @@ func (m *MockRegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error func (m *MockRegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -11902,7 +11902,7 @@ func (m *MockRegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, err if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockRegions.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockRegions.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -11915,7 +11915,7 @@ func (m *MockRegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, err objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -11931,10 +11931,10 @@ type GCERegions struct { // Get the Region named by key. func (g *GCERegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error) { - glog.V(5).Infof("GCERegions.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERegions.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegions.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegions.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Regions") @@ -11944,21 +11944,21 @@ func (g *GCERegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error) Version: meta.Version("ga"), Service: "Regions", } - glog.V(5).Infof("GCERegions.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegions.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegions.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegions.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Regions.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCERegions.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERegions.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Region objects. func (g *GCERegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) { - glog.V(5).Infof("GCERegions.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCERegions.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Regions") rk := &RateLimitKey{ ProjectID: projectID, @@ -11969,30 +11969,30 @@ func (g *GCERegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, erro if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCERegions.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCERegions.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.Regions.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Region f := func(l *ga.RegionList) error { - glog.V(5).Infof("GCERegions.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCERegions.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCERegions.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCERegions.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -12053,7 +12053,7 @@ type MockRoutes struct { func (m *MockRoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -12065,12 +12065,12 @@ func (m *MockRoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -12078,7 +12078,7 @@ func (m *MockRoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) Code: http.StatusNotFound, Message: fmt.Sprintf("MockRoutes %v not found", key), } - glog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -12086,7 +12086,7 @@ func (m *MockRoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -12096,7 +12096,7 @@ func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockRoutes.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockRoutes.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -12109,7 +12109,7 @@ func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -12117,7 +12117,7 @@ func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -12129,7 +12129,7 @@ func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) e defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -12137,7 +12137,7 @@ func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) e Code: http.StatusConflict, Message: fmt.Sprintf("MockRoutes %v exists", key), } - glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -12146,7 +12146,7 @@ func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) e obj.SelfLink = SelfLink(meta.VersionGA, projectID, "routes", key) m.Objects[*key] = &MockRoutesObj{obj} - glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -12154,7 +12154,7 @@ func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) e func (m *MockRoutes) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -12166,7 +12166,7 @@ func (m *MockRoutes) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -12174,12 +12174,12 @@ func (m *MockRoutes) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockRoutes %v not found", key), } - glog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockRoutes.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = nil", ctx, key) return nil } @@ -12195,10 +12195,10 @@ type GCERoutes struct { // Get the Route named by key. func (g *GCERoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) { - glog.V(5).Infof("GCERoutes.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERoutes.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERoutes.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERoutes.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") @@ -12208,21 +12208,21 @@ func (g *GCERoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) { Version: meta.Version("ga"), Service: "Routes", } - glog.V(5).Infof("GCERoutes.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERoutes.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERoutes.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Routes.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCERoutes.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERoutes.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Route objects. func (g *GCERoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) { - glog.V(5).Infof("GCERoutes.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCERoutes.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") rk := &RateLimitKey{ ProjectID: projectID, @@ -12233,30 +12233,30 @@ func (g *GCERoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCERoutes.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCERoutes.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.Routes.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Route f := func(l *ga.RouteList) error { - glog.V(5).Infof("GCERoutes.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCERoutes.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCERoutes.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCERoutes.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -12264,9 +12264,9 @@ func (g *GCERoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) // Insert Route with key of value obj. func (g *GCERoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) error { - glog.V(5).Infof("GCERoutes.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCERoutes.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCERoutes.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERoutes.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") @@ -12276,9 +12276,9 @@ func (g *GCERoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) er Version: meta.Version("ga"), Service: "Routes", } - glog.V(5).Infof("GCERoutes.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERoutes.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERoutes.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -12287,20 +12287,20 @@ func (g *GCERoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) er op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERoutes.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERoutes.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCERoutes.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Route referenced by key. func (g *GCERoutes) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCERoutes.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERoutes.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERoutes.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERoutes.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") @@ -12310,9 +12310,9 @@ func (g *GCERoutes) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "Routes", } - glog.V(5).Infof("GCERoutes.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERoutes.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERoutes.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Routes.Delete(projectID, key.Name) @@ -12321,12 +12321,12 @@ func (g *GCERoutes) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -12395,7 +12395,7 @@ type MockBetaSecurityPolicies struct { func (m *MockBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*beta.SecurityPolicy, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -12407,12 +12407,12 @@ func (m *MockBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*bet defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -12420,7 +12420,7 @@ func (m *MockBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*bet Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaSecurityPolicies %v not found", key), } - glog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -12428,7 +12428,7 @@ func (m *MockBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*bet func (m *MockBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*beta.SecurityPolicy, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -12438,7 +12438,7 @@ func (m *MockBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*b if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -12451,7 +12451,7 @@ func (m *MockBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*b objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -12459,7 +12459,7 @@ func (m *MockBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*b func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -12471,7 +12471,7 @@ func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, ob defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -12479,7 +12479,7 @@ func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, ob Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaSecurityPolicies %v exists", key), } - glog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -12488,7 +12488,7 @@ func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, ob obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "securityPolicies", key) m.Objects[*key] = &MockSecurityPoliciesObj{obj} - glog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -12496,7 +12496,7 @@ func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, ob func (m *MockBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -12508,7 +12508,7 @@ func (m *MockBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) er defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -12516,12 +12516,12 @@ func (m *MockBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) er Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaSecurityPolicies %v not found", key), } - glog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = nil", ctx, key) return nil } @@ -12577,10 +12577,10 @@ type GCEBetaSecurityPolicies struct { // Get the SecurityPolicy named by key. func (g *GCEBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*beta.SecurityPolicy, error) { - glog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12590,21 +12590,21 @@ func (g *GCEBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*beta Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.SecurityPolicies.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all SecurityPolicy objects. func (g *GCEBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*beta.SecurityPolicy, error) { - glog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") rk := &RateLimitKey{ ProjectID: projectID, @@ -12615,30 +12615,30 @@ func (g *GCEBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*be if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.Beta.SecurityPolicies.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.SecurityPolicy f := func(l *beta.SecurityPolicyList) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -12646,9 +12646,9 @@ func (g *GCEBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*be // Insert SecurityPolicy with key of value obj. func (g *GCEBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12658,9 +12658,9 @@ func (g *GCEBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -12669,20 +12669,20 @@ func (g *GCEBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the SecurityPolicy referenced by key. func (g *GCEBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12692,9 +12692,9 @@ func (g *GCEBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) err Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.SecurityPolicies.Delete(projectID, key.Name) @@ -12703,21 +12703,21 @@ func (g *GCEBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) err op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) return err } // AddRule is a method on GCEBetaSecurityPolicies. func (g *GCEBetaSecurityPolicies) AddRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12727,30 +12727,30 @@ func (g *GCEBetaSecurityPolicies) AddRule(ctx context.Context, key *meta.Key, ar Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.SecurityPolicies.AddRule(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) return err } // GetRule is a method on GCEBetaSecurityPolicies. func (g *GCEBetaSecurityPolicies) GetRule(ctx context.Context, key *meta.Key) (*beta.SecurityPolicyRule, error) { - glog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12760,25 +12760,25 @@ func (g *GCEBetaSecurityPolicies) GetRule(ctx context.Context, key *meta.Key) (* Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.SecurityPolicies.GetRule(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...) = %+v, %v", ctx, key, v, err) return v, err } // Patch is a method on GCEBetaSecurityPolicies. func (g *GCEBetaSecurityPolicies) Patch(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicy) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12788,30 +12788,30 @@ func (g *GCEBetaSecurityPolicies) Patch(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.SecurityPolicies.Patch(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) return err } // PatchRule is a method on GCEBetaSecurityPolicies. func (g *GCEBetaSecurityPolicies) PatchRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12821,30 +12821,30 @@ func (g *GCEBetaSecurityPolicies) PatchRule(ctx context.Context, key *meta.Key, Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.SecurityPolicies.PatchRule(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) return err } // RemoveRule is a method on GCEBetaSecurityPolicies. func (g *GCEBetaSecurityPolicies) RemoveRule(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12854,21 +12854,21 @@ func (g *GCEBetaSecurityPolicies) RemoveRule(ctx context.Context, key *meta.Key) Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.SecurityPolicies.RemoveRule(projectID, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -12927,7 +12927,7 @@ type MockSslCertificates struct { func (m *MockSslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCertificate, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -12939,12 +12939,12 @@ func (m *MockSslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCe defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -12952,7 +12952,7 @@ func (m *MockSslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCe Code: http.StatusNotFound, Message: fmt.Sprintf("MockSslCertificates %v not found", key), } - glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -12960,7 +12960,7 @@ func (m *MockSslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCe func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -12970,7 +12970,7 @@ func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.Ssl if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockSslCertificates.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockSslCertificates.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -12983,7 +12983,7 @@ func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.Ssl objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -12991,7 +12991,7 @@ func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.Ssl func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga.SslCertificate) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -13003,7 +13003,7 @@ func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -13011,7 +13011,7 @@ func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga Code: http.StatusConflict, Message: fmt.Sprintf("MockSslCertificates %v exists", key), } - glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -13020,7 +13020,7 @@ func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga obj.SelfLink = SelfLink(meta.VersionGA, projectID, "sslCertificates", key) m.Objects[*key] = &MockSslCertificatesObj{obj} - glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -13028,7 +13028,7 @@ func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga func (m *MockSslCertificates) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -13040,7 +13040,7 @@ func (m *MockSslCertificates) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -13048,12 +13048,12 @@ func (m *MockSslCertificates) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockSslCertificates %v not found", key), } - glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = nil", ctx, key) return nil } @@ -13069,10 +13069,10 @@ type GCESslCertificates struct { // Get the SslCertificate named by key. func (g *GCESslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCertificate, error) { - glog.V(5).Infof("GCESslCertificates.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCESslCertificates.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCESslCertificates.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCESslCertificates.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") @@ -13082,21 +13082,21 @@ func (g *GCESslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCer Version: meta.Version("ga"), Service: "SslCertificates", } - glog.V(5).Infof("GCESslCertificates.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCESslCertificates.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCESslCertificates.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.SslCertificates.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCESslCertificates.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCESslCertificates.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all SslCertificate objects. func (g *GCESslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) { - glog.V(5).Infof("GCESslCertificates.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCESslCertificates.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") rk := &RateLimitKey{ ProjectID: projectID, @@ -13107,30 +13107,30 @@ func (g *GCESslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslC if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCESslCertificates.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCESslCertificates.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.SslCertificates.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.SslCertificate f := func(l *ga.SslCertificateList) error { - glog.V(5).Infof("GCESslCertificates.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCESslCertificates.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -13138,9 +13138,9 @@ func (g *GCESslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslC // Insert SslCertificate with key of value obj. func (g *GCESslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga.SslCertificate) error { - glog.V(5).Infof("GCESslCertificates.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCESslCertificates.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCESslCertificates.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCESslCertificates.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") @@ -13150,9 +13150,9 @@ func (g *GCESslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga. Version: meta.Version("ga"), Service: "SslCertificates", } - glog.V(5).Infof("GCESslCertificates.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCESslCertificates.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -13161,20 +13161,20 @@ func (g *GCESslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCESslCertificates.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCESslCertificates.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the SslCertificate referenced by key. func (g *GCESslCertificates) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCESslCertificates.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCESslCertificates.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCESslCertificates.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCESslCertificates.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") @@ -13184,9 +13184,9 @@ func (g *GCESslCertificates) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "SslCertificates", } - glog.V(5).Infof("GCESslCertificates.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCESslCertificates.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCESslCertificates.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.SslCertificates.Delete(projectID, key.Name) @@ -13195,12 +13195,12 @@ func (g *GCESslCertificates) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -13261,7 +13261,7 @@ type MockTargetHttpProxies struct { func (m *MockTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpProxy, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -13273,12 +13273,12 @@ func (m *MockTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.Tar defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -13286,7 +13286,7 @@ func (m *MockTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.Tar Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetHttpProxies %v not found", key), } - glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -13294,7 +13294,7 @@ func (m *MockTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.Tar func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -13304,7 +13304,7 @@ func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -13317,7 +13317,7 @@ func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -13325,7 +13325,7 @@ func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpProxy) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -13337,7 +13337,7 @@ func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj * defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -13345,7 +13345,7 @@ func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj * Code: http.StatusConflict, Message: fmt.Sprintf("MockTargetHttpProxies %v exists", key), } - glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -13354,7 +13354,7 @@ func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj * obj.SelfLink = SelfLink(meta.VersionGA, projectID, "targetHttpProxies", key) m.Objects[*key] = &MockTargetHttpProxiesObj{obj} - glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -13362,7 +13362,7 @@ func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj * func (m *MockTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -13374,7 +13374,7 @@ func (m *MockTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -13382,12 +13382,12 @@ func (m *MockTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetHttpProxies %v not found", key), } - glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = nil", ctx, key) return nil } @@ -13411,10 +13411,10 @@ type GCETargetHttpProxies struct { // Get the TargetHttpProxy named by key. func (g *GCETargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpProxy, error) { - glog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") @@ -13424,21 +13424,21 @@ func (g *GCETargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.Targ Version: meta.Version("ga"), Service: "TargetHttpProxies", } - glog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.TargetHttpProxies.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all TargetHttpProxy objects. func (g *GCETargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) { - glog.V(5).Infof("GCETargetHttpProxies.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCETargetHttpProxies.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") rk := &RateLimitKey{ ProjectID: projectID, @@ -13449,30 +13449,30 @@ func (g *GCETargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.Ta if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCETargetHttpProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCETargetHttpProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.TargetHttpProxies.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.TargetHttpProxy f := func(l *ga.TargetHttpProxyList) error { - glog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -13480,9 +13480,9 @@ func (g *GCETargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.Ta // Insert TargetHttpProxy with key of value obj. func (g *GCETargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpProxy) error { - glog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") @@ -13492,9 +13492,9 @@ func (g *GCETargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *g Version: meta.Version("ga"), Service: "TargetHttpProxies", } - glog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -13503,20 +13503,20 @@ func (g *GCETargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *g op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the TargetHttpProxy referenced by key. func (g *GCETargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") @@ -13526,9 +13526,9 @@ func (g *GCETargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error Version: meta.Version("ga"), Service: "TargetHttpProxies", } - glog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetHttpProxies.Delete(projectID, key.Name) @@ -13537,21 +13537,21 @@ func (g *GCETargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) return err } // SetUrlMap is a method on GCETargetHttpProxies. func (g *GCETargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *ga.UrlMapReference) error { - glog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") @@ -13561,21 +13561,21 @@ func (g *GCETargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg Version: meta.Version("ga"), Service: "TargetHttpProxies", } - glog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetHttpProxies.SetUrlMap(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -13638,7 +13638,7 @@ type MockTargetHttpsProxies struct { func (m *MockTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpsProxy, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -13650,12 +13650,12 @@ func (m *MockTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.Ta defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -13663,7 +13663,7 @@ func (m *MockTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.Ta Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetHttpsProxies %v not found", key), } - glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -13671,7 +13671,7 @@ func (m *MockTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.Ta func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -13681,7 +13681,7 @@ func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga. if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -13694,7 +13694,7 @@ func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga. objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -13702,7 +13702,7 @@ func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga. func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpsProxy) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -13714,7 +13714,7 @@ func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -13722,7 +13722,7 @@ func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj Code: http.StatusConflict, Message: fmt.Sprintf("MockTargetHttpsProxies %v exists", key), } - glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -13731,7 +13731,7 @@ func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj obj.SelfLink = SelfLink(meta.VersionGA, projectID, "targetHttpsProxies", key) m.Objects[*key] = &MockTargetHttpsProxiesObj{obj} - glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -13739,7 +13739,7 @@ func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj func (m *MockTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -13751,7 +13751,7 @@ func (m *MockTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) erro defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -13759,12 +13759,12 @@ func (m *MockTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) erro Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetHttpsProxies %v not found", key), } - glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = nil", ctx, key) return nil } @@ -13796,10 +13796,10 @@ type GCETargetHttpsProxies struct { // Get the TargetHttpsProxy named by key. func (g *GCETargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpsProxy, error) { - glog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpsProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpsProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") @@ -13809,21 +13809,21 @@ func (g *GCETargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.Tar Version: meta.Version("ga"), Service: "TargetHttpsProxies", } - glog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.TargetHttpsProxies.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all TargetHttpsProxy objects. func (g *GCETargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) { - glog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") rk := &RateLimitKey{ ProjectID: projectID, @@ -13834,30 +13834,30 @@ func (g *GCETargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.TargetHttpsProxies.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.TargetHttpsProxy f := func(l *ga.TargetHttpsProxyList) error { - glog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -13865,9 +13865,9 @@ func (g *GCETargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T // Insert TargetHttpsProxy with key of value obj. func (g *GCETargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpsProxy) error { - glog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") @@ -13877,9 +13877,9 @@ func (g *GCETargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj * Version: meta.Version("ga"), Service: "TargetHttpsProxies", } - glog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -13888,20 +13888,20 @@ func (g *GCETargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj * op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the TargetHttpsProxy referenced by key. func (g *GCETargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpsProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpsProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") @@ -13911,9 +13911,9 @@ func (g *GCETargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error Version: meta.Version("ga"), Service: "TargetHttpsProxies", } - glog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetHttpsProxies.Delete(projectID, key.Name) @@ -13922,21 +13922,21 @@ func (g *GCETargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } // SetSslCertificates is a method on GCETargetHttpsProxies. func (g *GCETargetHttpsProxies) SetSslCertificates(ctx context.Context, key *meta.Key, arg0 *ga.TargetHttpsProxiesSetSslCertificatesRequest) error { - glog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") @@ -13946,30 +13946,30 @@ func (g *GCETargetHttpsProxies) SetSslCertificates(ctx context.Context, key *met Version: meta.Version("ga"), Service: "TargetHttpsProxies", } - glog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetHttpsProxies.SetSslCertificates(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) return err } // SetUrlMap is a method on GCETargetHttpsProxies. func (g *GCETargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *ga.UrlMapReference) error { - glog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") @@ -13979,21 +13979,21 @@ func (g *GCETargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, ar Version: meta.Version("ga"), Service: "TargetHttpsProxies", } - glog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetHttpsProxies.SetUrlMap(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -14056,7 +14056,7 @@ type MockTargetPools struct { func (m *MockTargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPool, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -14068,12 +14068,12 @@ func (m *MockTargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPoo defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -14081,7 +14081,7 @@ func (m *MockTargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPoo Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetPools %v not found", key), } - glog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -14089,7 +14089,7 @@ func (m *MockTargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPoo func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -14099,7 +14099,7 @@ func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -14115,7 +14115,7 @@ func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -14123,7 +14123,7 @@ func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetPool) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -14135,7 +14135,7 @@ func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Tar defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -14143,7 +14143,7 @@ func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Tar Code: http.StatusConflict, Message: fmt.Sprintf("MockTargetPools %v exists", key), } - glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -14152,7 +14152,7 @@ func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Tar obj.SelfLink = SelfLink(meta.VersionGA, projectID, "targetPools", key) m.Objects[*key] = &MockTargetPoolsObj{obj} - glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -14160,7 +14160,7 @@ func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Tar func (m *MockTargetPools) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -14172,7 +14172,7 @@ func (m *MockTargetPools) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -14180,12 +14180,12 @@ func (m *MockTargetPools) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetPools %v not found", key), } - glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = nil", ctx, key) return nil } @@ -14217,10 +14217,10 @@ type GCETargetPools struct { // Get the TargetPool named by key. func (g *GCETargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPool, error) { - glog.V(5).Infof("GCETargetPools.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetPools.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetPools.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") @@ -14230,21 +14230,21 @@ func (g *GCETargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPool Version: meta.Version("ga"), Service: "TargetPools", } - glog.V(5).Infof("GCETargetPools.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetPools.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.TargetPools.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCETargetPools.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCETargetPools.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all TargetPool objects. func (g *GCETargetPools) List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) { - glog.V(5).Infof("GCETargetPools.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCETargetPools.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") rk := &RateLimitKey{ ProjectID: projectID, @@ -14255,30 +14255,30 @@ func (g *GCETargetPools) List(ctx context.Context, region string, fl *filter.F) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCETargetPools.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCETargetPools.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.GA.TargetPools.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.TargetPool f := func(l *ga.TargetPoolList) error { - glog.V(5).Infof("GCETargetPools.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCETargetPools.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -14286,9 +14286,9 @@ func (g *GCETargetPools) List(ctx context.Context, region string, fl *filter.F) // Insert TargetPool with key of value obj. func (g *GCETargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetPool) error { - glog.V(5).Infof("GCETargetPools.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCETargetPools.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCETargetPools.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") @@ -14298,9 +14298,9 @@ func (g *GCETargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Targ Version: meta.Version("ga"), Service: "TargetPools", } - glog.V(5).Infof("GCETargetPools.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -14309,20 +14309,20 @@ func (g *GCETargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Targ op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetPools.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCETargetPools.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the TargetPool referenced by key. func (g *GCETargetPools) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCETargetPools.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetPools.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetPools.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") @@ -14332,9 +14332,9 @@ func (g *GCETargetPools) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "TargetPools", } - glog.V(5).Infof("GCETargetPools.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetPools.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetPools.Delete(projectID, key.Region, key.Name) @@ -14342,21 +14342,21 @@ func (g *GCETargetPools) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } // AddInstance is a method on GCETargetPools. func (g *GCETargetPools) AddInstance(ctx context.Context, key *meta.Key, arg0 *ga.TargetPoolsAddInstanceRequest) error { - glog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetPools.AddInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.AddInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") @@ -14366,30 +14366,30 @@ func (g *GCETargetPools) AddInstance(ctx context.Context, key *meta.Key, arg0 *g Version: meta.Version("ga"), Service: "TargetPools", } - glog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetPools.AddInstance(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) return err } // RemoveInstance is a method on GCETargetPools. func (g *GCETargetPools) RemoveInstance(ctx context.Context, key *meta.Key, arg0 *ga.TargetPoolsRemoveInstanceRequest) error { - glog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") @@ -14399,21 +14399,21 @@ func (g *GCETargetPools) RemoveInstance(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("ga"), Service: "TargetPools", } - glog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetPools.RemoveInstance(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -14474,7 +14474,7 @@ type MockUrlMaps struct { func (m *MockUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -14486,12 +14486,12 @@ func (m *MockUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -14499,7 +14499,7 @@ func (m *MockUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error Code: http.StatusNotFound, Message: fmt.Sprintf("MockUrlMaps %v not found", key), } - glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -14507,7 +14507,7 @@ func (m *MockUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -14517,7 +14517,7 @@ func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, err if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockUrlMaps.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockUrlMaps.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -14530,7 +14530,7 @@ func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, err objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -14538,7 +14538,7 @@ func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, err func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -14550,7 +14550,7 @@ func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -14558,7 +14558,7 @@ func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) Code: http.StatusConflict, Message: fmt.Sprintf("MockUrlMaps %v exists", key), } - glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -14567,7 +14567,7 @@ func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) obj.SelfLink = SelfLink(meta.VersionGA, projectID, "urlMaps", key) m.Objects[*key] = &MockUrlMapsObj{obj} - glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -14575,7 +14575,7 @@ func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) func (m *MockUrlMaps) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -14587,7 +14587,7 @@ func (m *MockUrlMaps) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -14595,12 +14595,12 @@ func (m *MockUrlMaps) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockUrlMaps %v not found", key), } - glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = nil", ctx, key) return nil } @@ -14624,10 +14624,10 @@ type GCEUrlMaps struct { // Get the UrlMap named by key. func (g *GCEUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error) { - glog.V(5).Infof("GCEUrlMaps.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEUrlMaps.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEUrlMaps.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") @@ -14637,21 +14637,21 @@ func (g *GCEUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error) Version: meta.Version("ga"), Service: "UrlMaps", } - glog.V(5).Infof("GCEUrlMaps.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEUrlMaps.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.UrlMaps.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEUrlMaps.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEUrlMaps.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all UrlMap objects. func (g *GCEUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) { - glog.V(5).Infof("GCEUrlMaps.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEUrlMaps.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, @@ -14662,30 +14662,30 @@ func (g *GCEUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, erro if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEUrlMaps.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.UrlMaps.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.UrlMap f := func(l *ga.UrlMapList) error { - glog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -14693,9 +14693,9 @@ func (g *GCEUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, erro // Insert UrlMap with key of value obj. func (g *GCEUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) error { - glog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEUrlMaps.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") @@ -14705,9 +14705,9 @@ func (g *GCEUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) Version: meta.Version("ga"), Service: "UrlMaps", } - glog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -14716,20 +14716,20 @@ func (g *GCEUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the UrlMap referenced by key. func (g *GCEUrlMaps) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEUrlMaps.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") @@ -14739,9 +14739,9 @@ func (g *GCEUrlMaps) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "UrlMaps", } - glog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEUrlMaps.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.UrlMaps.Delete(projectID, key.Name) @@ -14750,21 +14750,21 @@ func (g *GCEUrlMaps) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEUrlMaps. func (g *GCEUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *ga.UrlMap) error { - glog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEUrlMaps.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") @@ -14774,21 +14774,21 @@ func (g *GCEUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *ga.UrlMap) Version: meta.Version("ga"), Service: "UrlMaps", } - glog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.UrlMaps.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -14839,7 +14839,7 @@ type MockZones struct { func (m *MockZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockZones.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockZones.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -14851,12 +14851,12 @@ func (m *MockZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockZones.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockZones.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -14864,7 +14864,7 @@ func (m *MockZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { Code: http.StatusNotFound, Message: fmt.Sprintf("MockZones %v not found", key), } - glog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -14872,7 +14872,7 @@ func (m *MockZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { func (m *MockZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockZones.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockZones.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -14882,7 +14882,7 @@ func (m *MockZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockZones.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockZones.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -14895,7 +14895,7 @@ func (m *MockZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockZones.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockZones.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -14911,10 +14911,10 @@ type GCEZones struct { // Get the Zone named by key. func (g *GCEZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { - glog.V(5).Infof("GCEZones.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEZones.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEZones.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEZones.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Zones") @@ -14924,21 +14924,21 @@ func (g *GCEZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { Version: meta.Version("ga"), Service: "Zones", } - glog.V(5).Infof("GCEZones.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEZones.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEZones.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEZones.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Zones.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEZones.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEZones.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Zone objects. func (g *GCEZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) { - glog.V(5).Infof("GCEZones.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEZones.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Zones") rk := &RateLimitKey{ ProjectID: projectID, @@ -14949,30 +14949,30 @@ func (g *GCEZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) { if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEZones.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEZones.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.Zones.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Zone f := func(l *ga.ZoneList) error { - glog.V(5).Infof("GCEZones.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEZones.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEZones.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEZones.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEZones.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEZones.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEZones.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEZones.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/meta.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/meta.go index 6a85207ed03b..04d13e0bfd29 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/meta.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/meta.go @@ -169,9 +169,9 @@ var AllServices = []*ServiceInfo{ Object: "Disk", Service: "RegionDisks", Resource: "disks", - version: VersionBeta, + version: VersionGA, keyType: Regional, - serviceType: reflect.TypeOf(&beta.RegionDisksService{}), + serviceType: reflect.TypeOf(&ga.RegionDisksService{}), additionalMethods: []string{ "Resize", }, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/BUILD new file mode 100644 index 000000000000..e6e359278c43 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["mock.go"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock", + visibility = ["//visibility:public"], + deps = [ + "//pkg/cloudprovider/providers/gce/cloud:go_default_library", + "//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library", + "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", + "//vendor/google.golang.org/api/compute/v0.alpha:go_default_library", + "//vendor/google.golang.org/api/compute/v0.beta:go_default_library", + "//vendor/google.golang.org/api/compute/v1:go_default_library", + "//vendor/google.golang.org/api/googleapi:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/mock.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/mock.go new file mode 100644 index 000000000000..0489bf8d4a31 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock/mock.go @@ -0,0 +1,640 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mock encapsulates mocks for testing GCE provider functionality. +// These methods are used to override the mock objects' methods in order to +// intercept the standard processing and to add custom logic for test purposes. +// +// // Example usage: +// cloud := cloud.NewMockGCE() +// cloud.MockTargetPools.AddInstanceHook = mock.AddInstanceHook +package mock + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "sync" + + alpha "google.golang.org/api/compute/v0.alpha" + beta "google.golang.org/api/compute/v0.beta" + ga "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" + cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +var ( + // InUseError is a shared variable with error code StatusBadRequest for error verification. + InUseError = &googleapi.Error{Code: http.StatusBadRequest, Message: "It's being used by god."} + // InternalServerError is shared variable with error code StatusInternalServerError for error verification. + InternalServerError = &googleapi.Error{Code: http.StatusInternalServerError} + // UnauthorizedErr wraps a Google API error with code StatusForbidden. + UnauthorizedErr = &googleapi.Error{Code: http.StatusForbidden} +) + +// gceObject is an abstraction of all GCE API object in go client +type gceObject interface { + MarshalJSON() ([]byte, error) +} + +// AddInstanceHook mocks adding a Instance to MockTargetPools +func AddInstanceHook(ctx context.Context, key *meta.Key, req *ga.TargetPoolsAddInstanceRequest, m *cloud.MockTargetPools) error { + pool, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in TargetPools", key.String()), + } + } + + for _, instance := range req.Instances { + pool.Instances = append(pool.Instances, instance.Instance) + } + + return nil +} + +// RemoveInstanceHook mocks removing a Instance from MockTargetPools +func RemoveInstanceHook(ctx context.Context, key *meta.Key, req *ga.TargetPoolsRemoveInstanceRequest, m *cloud.MockTargetPools) error { + pool, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in TargetPools", key.String()), + } + } + + for _, instanceToRemove := range req.Instances { + for i, instance := range pool.Instances { + if instanceToRemove.Instance == instance { + // Delete instance from pool.Instances without preserving order + pool.Instances[i] = pool.Instances[len(pool.Instances)-1] + pool.Instances = pool.Instances[:len(pool.Instances)-1] + break + } + } + } + + return nil +} + +func convertAndInsertAlphaForwardingRule(key *meta.Key, obj gceObject, mRules map[meta.Key]*cloud.MockForwardingRulesObj, version meta.Version, projectID string) (bool, error) { + if !key.Valid() { + return true, fmt.Errorf("invalid GCE key (%+v)", key) + } + + if _, ok := mRules[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockForwardingRule %v exists", key), + } + return true, err + } + + enc, err := obj.MarshalJSON() + if err != nil { + return true, err + } + var fwdRule alpha.ForwardingRule + if err := json.Unmarshal(enc, &fwdRule); err != nil { + return true, err + } + // Set the default values for the Alpha fields. + if fwdRule.NetworkTier == "" { + fwdRule.NetworkTier = cloud.NetworkTierDefault.ToGCEValue() + } + + fwdRule.Name = key.Name + if fwdRule.SelfLink == "" { + fwdRule.SelfLink = cloud.SelfLink(version, projectID, "forwardingRules", key) + } + + mRules[*key] = &cloud.MockForwardingRulesObj{Obj: fwdRule} + return true, nil +} + +// InsertFwdRuleHook mocks inserting a ForwardingRule. ForwardingRules are +// expected to default to Premium tier if no NetworkTier is specified. +func InsertFwdRuleHook(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule, m *cloud.MockForwardingRules) (bool, error) { + m.Lock.Lock() + defer m.Lock.Unlock() + + projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionGA, "forwardingRules") + return convertAndInsertAlphaForwardingRule(key, obj, m.Objects, meta.VersionGA, projectID) +} + +// InsertBetaFwdRuleHook mocks inserting a BetaForwardingRule. +func InsertBetaFwdRuleHook(ctx context.Context, key *meta.Key, obj *beta.ForwardingRule, m *cloud.MockForwardingRules) (bool, error) { + m.Lock.Lock() + defer m.Lock.Unlock() + + projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionBeta, "forwardingRules") + return convertAndInsertAlphaForwardingRule(key, obj, m.Objects, meta.VersionBeta, projectID) +} + +// InsertAlphaFwdRuleHook mocks inserting an AlphaForwardingRule. +func InsertAlphaFwdRuleHook(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule, m *cloud.MockForwardingRules) (bool, error) { + m.Lock.Lock() + defer m.Lock.Unlock() + + projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionAlpha, "forwardingRules") + return convertAndInsertAlphaForwardingRule(key, obj, m.Objects, meta.VersionAlpha, projectID) +} + +// AddressAttributes maps from Address key to a map of Instances +type AddressAttributes struct { + IPCounter int // Used to assign Addresses with no IP a unique IP address +} + +func convertAndInsertAlphaAddress(key *meta.Key, obj gceObject, mAddrs map[meta.Key]*cloud.MockAddressesObj, version meta.Version, projectID string, addressAttrs AddressAttributes) (bool, error) { + if !key.Valid() { + return true, fmt.Errorf("invalid GCE key (%+v)", key) + } + + if _, ok := mAddrs[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAddresses %v exists", key), + } + return true, err + } + + enc, err := obj.MarshalJSON() + if err != nil { + return true, err + } + var addr alpha.Address + if err := json.Unmarshal(enc, &addr); err != nil { + return true, err + } + + // Set default address type if not present. + if addr.AddressType == "" { + addr.AddressType = string(cloud.SchemeExternal) + } + + var existingAddresses []*ga.Address + for _, obj := range mAddrs { + existingAddresses = append(existingAddresses, obj.ToGA()) + } + + for _, existingAddr := range existingAddresses { + if addr.Address == existingAddr.Address { + msg := fmt.Sprintf("MockAddresses IP %v in use", addr.Address) + + // When the IP is already in use, this call returns a StatusBadRequest + // if the address is an external address, and StatusConflict if an + // internal address. This is to be consistent with actual GCE API. + errorCode := http.StatusConflict + if addr.AddressType == string(cloud.SchemeExternal) { + errorCode = http.StatusBadRequest + } + + return true, &googleapi.Error{Code: errorCode, Message: msg} + } + } + + // Set default values used in tests + addr.Name = key.Name + if addr.SelfLink == "" { + addr.SelfLink = cloud.SelfLink(version, projectID, "addresses", key) + } + + if addr.Address == "" { + addr.Address = fmt.Sprintf("1.2.3.%d", addressAttrs.IPCounter) + addressAttrs.IPCounter++ + } + + // Set the default values for the Alpha fields. + if addr.NetworkTier == "" { + addr.NetworkTier = cloud.NetworkTierDefault.ToGCEValue() + } + + mAddrs[*key] = &cloud.MockAddressesObj{Obj: addr} + return true, nil +} + +// InsertAddressHook mocks inserting an Address. +func InsertAddressHook(ctx context.Context, key *meta.Key, obj *ga.Address, m *cloud.MockAddresses) (bool, error) { + m.Lock.Lock() + defer m.Lock.Unlock() + + projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionGA, "addresses") + return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionGA, projectID, m.X.(AddressAttributes)) +} + +// InsertBetaAddressHook mocks inserting a BetaAddress. +func InsertBetaAddressHook(ctx context.Context, key *meta.Key, obj *beta.Address, m *cloud.MockAddresses) (bool, error) { + m.Lock.Lock() + defer m.Lock.Unlock() + + projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionBeta, "addresses") + return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionBeta, projectID, m.X.(AddressAttributes)) +} + +// InsertAlphaAddressHook mocks inserting an Address. Addresses are expected to +// default to Premium tier if no NetworkTier is specified. +func InsertAlphaAddressHook(ctx context.Context, key *meta.Key, obj *alpha.Address, m *cloud.MockAlphaAddresses) (bool, error) { + m.Lock.Lock() + defer m.Lock.Unlock() + + projectID := m.ProjectRouter.ProjectID(ctx, meta.VersionBeta, "addresses") + return convertAndInsertAlphaAddress(key, obj, m.Objects, meta.VersionAlpha, projectID, m.X.(AddressAttributes)) +} + +// InstanceGroupAttributes maps from InstanceGroup key to a map of Instances +type InstanceGroupAttributes struct { + InstanceMap map[meta.Key]map[string]*ga.InstanceWithNamedPorts + Lock *sync.Mutex +} + +// AddInstances adds a list of Instances passed by InstanceReference +func (igAttrs *InstanceGroupAttributes) AddInstances(key *meta.Key, instanceRefs []*ga.InstanceReference) error { + igAttrs.Lock.Lock() + defer igAttrs.Lock.Unlock() + + instancesWithNamedPorts, ok := igAttrs.InstanceMap[*key] + if !ok { + instancesWithNamedPorts = make(map[string]*ga.InstanceWithNamedPorts) + } + + for _, instance := range instanceRefs { + iWithPort := &ga.InstanceWithNamedPorts{ + Instance: instance.Instance, + } + + instancesWithNamedPorts[instance.Instance] = iWithPort + } + + igAttrs.InstanceMap[*key] = instancesWithNamedPorts + return nil +} + +// RemoveInstances removes a list of Instances passed by InstanceReference +func (igAttrs *InstanceGroupAttributes) RemoveInstances(key *meta.Key, instanceRefs []*ga.InstanceReference) error { + igAttrs.Lock.Lock() + defer igAttrs.Lock.Unlock() + + instancesWithNamedPorts, ok := igAttrs.InstanceMap[*key] + if !ok { + instancesWithNamedPorts = make(map[string]*ga.InstanceWithNamedPorts) + } + + for _, instanceToRemove := range instanceRefs { + if _, ok := instancesWithNamedPorts[instanceToRemove.Instance]; ok { + delete(instancesWithNamedPorts, instanceToRemove.Instance) + } else { + return &googleapi.Error{ + Code: http.StatusBadRequest, + Message: fmt.Sprintf("%s is not a member of %s", instanceToRemove.Instance, key.String()), + } + } + } + + igAttrs.InstanceMap[*key] = instancesWithNamedPorts + return nil +} + +// List gets a list of InstanceWithNamedPorts +func (igAttrs *InstanceGroupAttributes) List(key *meta.Key) []*ga.InstanceWithNamedPorts { + igAttrs.Lock.Lock() + defer igAttrs.Lock.Unlock() + + instancesWithNamedPorts, ok := igAttrs.InstanceMap[*key] + if !ok { + instancesWithNamedPorts = make(map[string]*ga.InstanceWithNamedPorts) + } + + var instanceList []*ga.InstanceWithNamedPorts + for _, val := range instancesWithNamedPorts { + instanceList = append(instanceList, val) + } + + return instanceList +} + +// AddInstancesHook mocks adding instances from an InstanceGroup +func AddInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroupsAddInstancesRequest, m *cloud.MockInstanceGroups) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in InstanceGroups", key.String()), + } + } + + var attrs InstanceGroupAttributes + attrs = m.X.(InstanceGroupAttributes) + attrs.AddInstances(key, req.Instances) + m.X = attrs + return nil +} + +// ListInstancesHook mocks listing instances from an InstanceGroup +func ListInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroupsListInstancesRequest, filter *filter.F, m *cloud.MockInstanceGroups) ([]*ga.InstanceWithNamedPorts, error) { + _, err := m.Get(ctx, key) + if err != nil { + return nil, &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in InstanceGroups", key.String()), + } + } + + var attrs InstanceGroupAttributes + attrs = m.X.(InstanceGroupAttributes) + instances := attrs.List(key) + + return instances, nil +} + +// RemoveInstancesHook mocks removing instances from an InstanceGroup +func RemoveInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroupsRemoveInstancesRequest, m *cloud.MockInstanceGroups) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in InstanceGroups", key.String()), + } + } + + var attrs InstanceGroupAttributes + attrs = m.X.(InstanceGroupAttributes) + attrs.RemoveInstances(key, req.Instances) + m.X = attrs + return nil +} + +// UpdateFirewallHook defines the hook for updating a Firewall. It replaces the +// object with the same key in the mock with the updated object. +func UpdateFirewallHook(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *cloud.MockFirewalls) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in Firewalls", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "firewalls") + obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "firewalls", key) + + m.Objects[*key] = &cloud.MockFirewallsObj{Obj: obj} + return nil +} + +// UpdateHealthCheckHook defines the hook for updating a HealthCheck. It +// replaces the object with the same key in the mock with the updated object. +func UpdateHealthCheckHook(ctx context.Context, key *meta.Key, obj *ga.HealthCheck, m *cloud.MockHealthChecks) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in HealthChecks", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "healthChecks") + obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "healthChecks", key) + + m.Objects[*key] = &cloud.MockHealthChecksObj{Obj: obj} + return nil +} + +// UpdateRegionBackendServiceHook defines the hook for updating a Region +// BackendsService. It replaces the object with the same key in the mock with +// the updated object. +func UpdateRegionBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *cloud.MockRegionBackendServices) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in RegionBackendServices", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "backendServices") + obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "backendServices", key) + + m.Objects[*key] = &cloud.MockRegionBackendServicesObj{Obj: obj} + return nil +} + +// UpdateBackendServiceHook defines the hook for updating a BackendService. +// It replaces the object with the same key in the mock with the updated object. +func UpdateBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *cloud.MockBackendServices) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in BackendServices", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "backendServices") + obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "backendServices", key) + + m.Objects[*key] = &cloud.MockBackendServicesObj{Obj: obj} + return nil +} + +// UpdateAlphaBackendServiceHook defines the hook for updating an alpha BackendService. +// It replaces the object with the same key in the mock with the updated object. +func UpdateAlphaBackendServiceHook(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *cloud.MockAlphaBackendServices) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in BackendServices", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "backendServices") + obj.SelfLink = cloud.SelfLink(meta.VersionAlpha, projectID, "backendServices", key) + + m.Objects[*key] = &cloud.MockBackendServicesObj{Obj: obj} + return nil +} + +// UpdateBetaBackendServiceHook defines the hook for updating an beta BackendService. +// It replaces the object with the same key in the mock with the updated object. +func UpdateBetaBackendServiceHook(ctx context.Context, key *meta.Key, obj *beta.BackendService, m *cloud.MockBetaBackendServices) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in BackendServices", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "backendServices") + obj.SelfLink = cloud.SelfLink(meta.VersionBeta, projectID, "backendServices", key) + + m.Objects[*key] = &cloud.MockBackendServicesObj{Obj: obj} + return nil +} + +// UpdateURLMapHook defines the hook for updating a UrlMap. +// It replaces the object with the same key in the mock with the updated object. +func UpdateURLMapHook(ctx context.Context, key *meta.Key, obj *ga.UrlMap, m *cloud.MockUrlMaps) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in UrlMaps", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "urlMaps") + obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "urlMaps", key) + + m.Objects[*key] = &cloud.MockUrlMapsObj{Obj: obj} + return nil +} + +// InsertFirewallsUnauthorizedErrHook mocks firewall insertion. A forbidden error will be thrown as return. +func InsertFirewallsUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *cloud.MockFirewalls) (bool, error) { + return true, &googleapi.Error{Code: http.StatusForbidden} +} + +// UpdateFirewallsUnauthorizedErrHook mocks firewall updating. A forbidden error will be thrown as return. +func UpdateFirewallsUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *cloud.MockFirewalls) error { + return &googleapi.Error{Code: http.StatusForbidden} +} + +// DeleteFirewallsUnauthorizedErrHook mocks firewall deletion. A forbidden error will be thrown as return. +func DeleteFirewallsUnauthorizedErrHook(ctx context.Context, key *meta.Key, m *cloud.MockFirewalls) (bool, error) { + return true, &googleapi.Error{Code: http.StatusForbidden} +} + +// GetFirewallsUnauthorizedErrHook mocks firewall information retrival. A forbidden error will be thrown as return. +func GetFirewallsUnauthorizedErrHook(ctx context.Context, key *meta.Key, m *cloud.MockFirewalls) (bool, *ga.Firewall, error) { + return true, nil, &googleapi.Error{Code: http.StatusForbidden} +} + +// GetTargetPoolInternalErrHook mocks getting target pool. It returns a internal server error. +func GetTargetPoolInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockTargetPools) (bool, *ga.TargetPool, error) { + return true, nil, InternalServerError +} + +// GetForwardingRulesInternalErrHook mocks getting forwarding rules and returns an internal server error. +func GetForwardingRulesInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockForwardingRules) (bool, *ga.ForwardingRule, error) { + return true, nil, InternalServerError +} + +// GetAddressesInternalErrHook mocks getting network address and returns an internal server error. +func GetAddressesInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockAddresses) (bool, *ga.Address, error) { + return true, nil, InternalServerError +} + +// GetHTTPHealthChecksInternalErrHook mocks getting http health check and returns an internal server error. +func GetHTTPHealthChecksInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockHttpHealthChecks) (bool, *ga.HttpHealthCheck, error) { + return true, nil, InternalServerError +} + +// InsertTargetPoolsInternalErrHook mocks getting target pool and returns an internal server error. +func InsertTargetPoolsInternalErrHook(ctx context.Context, key *meta.Key, obj *ga.TargetPool, m *cloud.MockTargetPools) (bool, error) { + return true, InternalServerError +} + +// InsertForwardingRulesInternalErrHook mocks getting forwarding rule and returns an internal server error. +func InsertForwardingRulesInternalErrHook(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule, m *cloud.MockForwardingRules) (bool, error) { + return true, InternalServerError +} + +// DeleteAddressesNotFoundErrHook mocks deleting network address and returns a not found error. +func DeleteAddressesNotFoundErrHook(ctx context.Context, key *meta.Key, m *cloud.MockAddresses) (bool, error) { + return true, &googleapi.Error{Code: http.StatusNotFound} +} + +// DeleteAddressesInternalErrHook mocks deleting address and returns an internal server error. +func DeleteAddressesInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockAddresses) (bool, error) { + return true, InternalServerError +} + +// InsertAlphaBackendServiceUnauthorizedErrHook mocks inserting an alpha BackendService and returns a forbidden error. +func InsertAlphaBackendServiceUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *cloud.MockAlphaBackendServices) (bool, error) { + return true, UnauthorizedErr +} + +// UpdateAlphaBackendServiceUnauthorizedErrHook mocks updating an alpha BackendService and returns a forbidden error. +func UpdateAlphaBackendServiceUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *cloud.MockAlphaBackendServices) error { + return UnauthorizedErr +} + +// GetRegionBackendServicesErrHook mocks getting region backend service and returns an internal server error. +func GetRegionBackendServicesErrHook(ctx context.Context, key *meta.Key, m *cloud.MockRegionBackendServices) (bool, *ga.BackendService, error) { + return true, nil, InternalServerError +} + +// UpdateRegionBackendServicesErrHook mocks updating a reegion backend service and returns an internal server error. +func UpdateRegionBackendServicesErrHook(ctx context.Context, key *meta.Key, svc *ga.BackendService, m *cloud.MockRegionBackendServices) error { + return InternalServerError +} + +// DeleteRegionBackendServicesErrHook mocks deleting region backend service and returns an internal server error. +func DeleteRegionBackendServicesErrHook(ctx context.Context, key *meta.Key, m *cloud.MockRegionBackendServices) (bool, error) { + return true, InternalServerError +} + +// DeleteRegionBackendServicesInUseErrHook mocks deleting region backend service and returns an InUseError. +func DeleteRegionBackendServicesInUseErrHook(ctx context.Context, key *meta.Key, m *cloud.MockRegionBackendServices) (bool, error) { + return true, InUseError +} + +// GetInstanceGroupInternalErrHook mocks getting instance group and returns an internal server error. +func GetInstanceGroupInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockInstanceGroups) (bool, *ga.InstanceGroup, error) { + return true, nil, InternalServerError +} + +// GetHealthChecksInternalErrHook mocks getting health check and returns an internal server erorr. +func GetHealthChecksInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockHealthChecks) (bool, *ga.HealthCheck, error) { + return true, nil, InternalServerError +} + +// DeleteHealthChecksInternalErrHook mocks deleting health check and returns an internal server error. +func DeleteHealthChecksInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockHealthChecks) (bool, error) { + return true, InternalServerError +} + +// DeleteHealthChecksInuseErrHook mocks deleting health check and returns an in use error. +func DeleteHealthChecksInuseErrHook(ctx context.Context, key *meta.Key, m *cloud.MockHealthChecks) (bool, error) { + return true, InUseError +} + +// DeleteForwardingRuleErrHook mocks deleting forwarding rule and returns an internal server error. +func DeleteForwardingRuleErrHook(ctx context.Context, key *meta.Key, m *cloud.MockForwardingRules) (bool, error) { + return true, InternalServerError +} + +// ListZonesInternalErrHook mocks listing zone and returns an internal server error. +func ListZonesInternalErrHook(ctx context.Context, fl *filter.F, m *cloud.MockZones) (bool, []*ga.Zone, error) { + return true, []*ga.Zone{}, InternalServerError +} + +// DeleteInstanceGroupInternalErrHook mocks deleting instance group and returns an internal server error. +func DeleteInstanceGroupInternalErrHook(ctx context.Context, key *meta.Key, m *cloud.MockInstanceGroups) (bool, error) { + return true, InternalServerError +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/op.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/op.go index 2933fe223b79..eb45c769e461 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/op.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/op.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - "github.com/golang/glog" + "k8s.io/klog" alpha "google.golang.org/api/compute/v0.alpha" beta "google.golang.org/api/compute/v0.beta" @@ -67,13 +67,13 @@ func (o *gaOperation) isDone(ctx context.Context) (bool, error) { switch o.key.Type() { case meta.Regional: op, err = o.s.GA.RegionOperations.Get(o.projectID, o.key.Region, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("GA.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) + klog.V(5).Infof("GA.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) case meta.Zonal: op, err = o.s.GA.ZoneOperations.Get(o.projectID, o.key.Zone, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("GA.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) + klog.V(5).Infof("GA.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) case meta.Global: op, err = o.s.GA.GlobalOperations.Get(o.projectID, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("GA.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) + klog.V(5).Infof("GA.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) default: return false, fmt.Errorf("invalid key type: %#v", o.key) } @@ -124,13 +124,13 @@ func (o *alphaOperation) isDone(ctx context.Context) (bool, error) { switch o.key.Type() { case meta.Regional: op, err = o.s.Alpha.RegionOperations.Get(o.projectID, o.key.Region, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Alpha.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) + klog.V(5).Infof("Alpha.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) case meta.Zonal: op, err = o.s.Alpha.ZoneOperations.Get(o.projectID, o.key.Zone, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Alpha.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) + klog.V(5).Infof("Alpha.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) case meta.Global: op, err = o.s.Alpha.GlobalOperations.Get(o.projectID, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Alpha.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) + klog.V(5).Infof("Alpha.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) default: return false, fmt.Errorf("invalid key type: %#v", o.key) } @@ -181,13 +181,13 @@ func (o *betaOperation) isDone(ctx context.Context) (bool, error) { switch o.key.Type() { case meta.Regional: op, err = o.s.Beta.RegionOperations.Get(o.projectID, o.key.Region, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Beta.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) + klog.V(5).Infof("Beta.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) case meta.Zonal: op, err = o.s.Beta.ZoneOperations.Get(o.projectID, o.key.Zone, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Beta.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) + klog.V(5).Infof("Beta.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) case meta.Global: op, err = o.s.Beta.GlobalOperations.Get(o.projectID, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Beta.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) + klog.V(5).Infof("Beta.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) default: return false, fmt.Errorf("invalid key type: %#v", o.key) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/service.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/service.go index 2f332dfff854..4d7b4c557f2a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/service.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/service.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - "github.com/golang/glog" + "k8s.io/klog" alpha "google.golang.org/api/compute/v0.alpha" beta "google.golang.org/api/compute/v0.beta" @@ -69,7 +69,7 @@ func (s *Service) wrapOperation(anyOp interface{}) (operation, error) { func (s *Service) WaitForCompletion(ctx context.Context, genericOp interface{}) error { op, err := s.wrapOperation(genericOp) if err != nil { - glog.Errorf("wrapOperation(%+v) error: %v", genericOp, err) + klog.Errorf("wrapOperation(%+v) error: %v", genericOp, err) return err } @@ -86,18 +86,18 @@ func (s *Service) pollOperation(ctx context.Context, op operation) error { // returning ctx.Err(). select { case <-ctx.Done(): - glog.V(5).Infof("op.pollOperation(%v, %v) not completed, poll count = %d, ctx.Err = %v", ctx, op, pollCount, ctx.Err()) + klog.V(5).Infof("op.pollOperation(%v, %v) not completed, poll count = %d, ctx.Err = %v", ctx, op, pollCount, ctx.Err()) return ctx.Err() default: // ctx is not canceled, continue immediately } pollCount++ - glog.V(5).Infof("op.isDone(%v) waiting; op = %v, poll count = %d", ctx, op, pollCount) + klog.V(5).Infof("op.isDone(%v) waiting; op = %v, poll count = %d", ctx, op, pollCount) s.RateLimiter.Accept(ctx, op.rateLimitKey()) done, err := op.isDone(ctx) if err != nil { - glog.V(5).Infof("op.isDone(%v) error; op = %v, poll count = %d, err = %v, retrying", ctx, op, pollCount, err) + klog.V(5).Infof("op.isDone(%v) error; op = %v, poll count = %d, err = %v, retrying", ctx, op, pollCount, err) } if done { @@ -105,6 +105,6 @@ func (s *Service) pollOperation(ctx context.Context, op operation) error { } } - glog.V(5).Infof("op.isDone(%v) complete; op = %v, poll count = %d, op.err = %v", ctx, op, pollCount, op.error()) + klog.V(5).Infof("op.isDone(%v) complete; op = %v, poll count = %d, op.err = %v", ctx, op, pollCount, op.error()) return op.error() } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go index 36ccd264eabe..616fe5f54775 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go @@ -30,13 +30,13 @@ import ( gcfg "gopkg.in/gcfg.v1" "cloud.google.com/go/compute/metadata" - "github.com/golang/glog" "golang.org/x/oauth2" "golang.org/x/oauth2/google" computealpha "google.golang.org/api/compute/v0.alpha" computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" container "google.golang.org/api/container/v1" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -49,7 +49,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/controller" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" @@ -57,6 +57,7 @@ import ( ) const ( + // ProviderName is the official const representation of the Google Cloud Provider ProviderName = "gce" k8sNodeRouteTag = "k8s-node-route" @@ -80,13 +81,13 @@ const ( maxTargetPoolCreateInstances = 200 // HTTP Load Balancer parameters - // Configure 2 second period for external health checks. - gceHcCheckIntervalSeconds = int64(2) + // Configure 8 second period for external health checks. + gceHcCheckIntervalSeconds = int64(8) gceHcTimeoutSeconds = int64(1) // Start sending requests as soon as a pod is found on the node. gceHcHealthyThreshold = int64(1) - // Defaults to 5 * 2 = 10 seconds before the LB will steer traffic away - gceHcUnhealthyThreshold = int64(5) + // Defaults to 3 * 8 = 24 seconds before the LB will steer traffic away. + gceHcUnhealthyThreshold = int64(3) gceComputeAPIEndpoint = "https://www.googleapis.com/compute/v1/" gceComputeAPIEndpointBeta = "https://www.googleapis.com/compute/beta/" @@ -97,9 +98,9 @@ type gceObject interface { MarshalJSON() ([]byte, error) } -// GCECloud is an implementation of Interface, LoadBalancer and Instances for Google Compute Engine. -type GCECloud struct { - // ClusterID contains functionality for getting (and initializing) the ingress-uid. Call GCECloud.Initialize() +// Cloud is an implementation of Interface, LoadBalancer and Instances for Google Compute Engine. +type Cloud struct { + // ClusterID contains functionality for getting (and initializing) the ingress-uid. Call Cloud.Initialize() // for the cloudprovider to start watching the configmap. ClusterID ClusterID @@ -114,6 +115,7 @@ type GCECloud struct { eventRecorder record.EventRecorder projectID string region string + regional bool localZone string // The zone in which we are running // managedZones will be set to the 1 zone if running a single zone cluster // it will be set to ALL zones in region for any multi-zone cluster @@ -144,7 +146,7 @@ type GCECloud struct { // lock to prevent shared resources from being prematurely deleted while the operation is // in progress. sharedResourceLock sync.Mutex - // AlphaFeatureGate gates gce alpha features in GCECloud instance. + // AlphaFeatureGate gates gce alpha features in Cloud instance. // Related wrapper functions that interacts with gce alpha api should examine whether // the corresponding api is enabled. // If not enabled, it should return error. @@ -157,6 +159,7 @@ type GCECloud struct { s *cloud.Service } +// ConfigGlobal is the in memory representation of the gce.conf config data // TODO: replace gcfg with json type ConfigGlobal struct { TokenURL string `gcfg:"token-url"` @@ -174,13 +177,14 @@ type ConfigGlobal struct { SecondaryRangeName string `gcfg:"secondary-range-name"` NodeTags []string `gcfg:"node-tags"` NodeInstancePrefix string `gcfg:"node-instance-prefix"` + Regional bool `gcfg:"regional"` Multizone bool `gcfg:"multizone"` - // ApiEndpoint is the GCE compute API endpoint to use. If this is blank, + // APIEndpoint is the GCE compute API endpoint to use. If this is blank, // then the default endpoint is used. - ApiEndpoint string `gcfg:"api-endpoint"` - // ContainerApiEndpoint is the GCE container API endpoint to use. If this is blank, + APIEndpoint string `gcfg:"api-endpoint"` + // ContainerAPIEndpoint is the GCE container API endpoint to use. If this is blank, // then the default endpoint is used. - ContainerApiEndpoint string `gcfg:"container-api-endpoint"` + ContainerAPIEndpoint string `gcfg:"container-api-endpoint"` // LocalZone specifies the GCE zone that gce cloud client instance is // located in (i.e. where the controller will be running). If this is // blank, then the local zone will be discovered via the metadata server. @@ -195,13 +199,14 @@ type ConfigFile struct { Global ConfigGlobal `gcfg:"global"` } -// CloudConfig includes all the necessary configuration for creating GCECloud +// CloudConfig includes all the necessary configuration for creating Cloud type CloudConfig struct { - ApiEndpoint string - ContainerApiEndpoint string + APIEndpoint string + ContainerAPIEndpoint string ProjectID string NetworkProjectID string Region string + Regional bool Zone string ManagedZones []string NetworkName string @@ -233,22 +238,22 @@ type Services struct { } // ComputeServices returns access to the internal compute services. -func (g *GCECloud) ComputeServices() *Services { +func (g *Cloud) ComputeServices() *Services { return &Services{g.service, g.serviceAlpha, g.serviceBeta} } // Compute returns the generated stubs for the compute API. -func (g *GCECloud) Compute() cloud.Cloud { +func (g *Cloud) Compute() cloud.Cloud { return g.c } // ContainerService returns the container service. -func (g *GCECloud) ContainerService() *container.Service { +func (g *Cloud) ContainerService() *container.Service { return g.containerService } -// newGCECloud creates a new instance of GCECloud. -func newGCECloud(config io.Reader) (gceCloud *GCECloud, err error) { +// newGCECloud creates a new instance of Cloud. +func newGCECloud(config io.Reader) (gceCloud *Cloud, err error) { var cloudConfig *CloudConfig var configFile *ConfigFile @@ -257,7 +262,7 @@ func newGCECloud(config io.Reader) (gceCloud *GCECloud, err error) { if err != nil { return nil, err } - glog.Infof("Using GCE provider config %+v", configFile) + klog.Infof("Using GCE provider config %+v", configFile) } cloudConfig, err = generateCloudConfig(configFile) @@ -270,7 +275,7 @@ func newGCECloud(config io.Reader) (gceCloud *GCECloud, err error) { func readConfig(reader io.Reader) (*ConfigFile, error) { cfg := &ConfigFile{} if err := gcfg.FatalOnly(gcfg.ReadInto(cfg, reader)); err != nil { - glog.Errorf("Couldn't read config: %v", err) + klog.Errorf("Couldn't read config: %v", err) return nil, err } return cfg, nil @@ -283,12 +288,12 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err cloudConfig.UseMetadataServer = true cloudConfig.AlphaFeatureGate = NewAlphaFeatureGate([]string{}) if configFile != nil { - if configFile.Global.ApiEndpoint != "" { - cloudConfig.ApiEndpoint = configFile.Global.ApiEndpoint + if configFile.Global.APIEndpoint != "" { + cloudConfig.APIEndpoint = configFile.Global.APIEndpoint } - if configFile.Global.ContainerApiEndpoint != "" { - cloudConfig.ContainerApiEndpoint = configFile.Global.ContainerApiEndpoint + if configFile.Global.ContainerAPIEndpoint != "" { + cloudConfig.ContainerAPIEndpoint = configFile.Global.ContainerAPIEndpoint } if configFile.Global.TokenURL != "" { @@ -332,9 +337,14 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err return nil, err } + // Determine if its a regional cluster + if configFile != nil && configFile.Global.Regional { + cloudConfig.Regional = true + } + // generate managedZones cloudConfig.ManagedZones = []string{cloudConfig.Zone} - if configFile != nil && configFile.Global.Multizone { + if configFile != nil && (configFile.Global.Multizone || configFile.Global.Regional) { cloudConfig.ManagedZones = nil // Use all zones in region } @@ -369,11 +379,11 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err return cloudConfig, err } -// CreateGCECloud creates a GCECloud object using the specified parameters. +// CreateGCECloud creates a Cloud object using the specified parameters. // If no networkUrl is specified, loads networkName via rest call. // If no tokenSource is specified, uses oauth2.DefaultTokenSource. // If managedZones is nil / empty all zones in the region will be managed. -func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { +func CreateGCECloud(config *CloudConfig) (*Cloud, error) { // Remove any pre-release version and build metadata from the semver, // leaving only the MAJOR.MINOR.PATCH portion. See http://semver.org/. version := strings.TrimLeft(strings.Split(strings.Split(version.Get().GitVersion, "-")[0], "+")[0], "v") @@ -421,10 +431,10 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { // Generate alpha and beta api endpoints based on override v1 api endpoint. // For example, // staging API endpoint: https://www.googleapis.com/compute/staging_v1/ - if config.ApiEndpoint != "" { - service.BasePath = fmt.Sprintf("%sprojects/", config.ApiEndpoint) - serviceBeta.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(config.ApiEndpoint, "v1", "beta", -1)) - serviceAlpha.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(config.ApiEndpoint, "v1", "alpha", -1)) + if config.APIEndpoint != "" { + service.BasePath = fmt.Sprintf("%sprojects/", config.APIEndpoint) + serviceBeta.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(config.APIEndpoint, "v1", "beta", -1)) + serviceAlpha.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(config.APIEndpoint, "v1", "alpha", -1)) } containerService, err := container.New(client) @@ -432,8 +442,8 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { return nil, err } containerService.UserAgent = userAgent - if config.ContainerApiEndpoint != "" { - containerService.BasePath = config.ContainerApiEndpoint + if config.ContainerAPIEndpoint != "" { + containerService.BasePath = config.ContainerAPIEndpoint } tpuService, err := newTPUService(client) @@ -452,17 +462,17 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { if config.NetworkURL != "" { networkURL = config.NetworkURL } else if config.NetworkName != "" { - networkURL = gceNetworkURL(config.ApiEndpoint, netProjID, config.NetworkName) + networkURL = gceNetworkURL(config.APIEndpoint, netProjID, config.NetworkName) } else { // Other consumers may use the cloudprovider without utilizing the wrapped GCE API functions // or functions requiring network/subnetwork URLs (e.g. Kubelet). - glog.Warningf("No network name or URL specified.") + klog.Warningf("No network name or URL specified.") } if config.SubnetworkURL != "" { subnetURL = config.SubnetworkURL } else if config.SubnetworkName != "" { - subnetURL = gceSubnetworkURL(config.ApiEndpoint, netProjID, config.Region, config.SubnetworkName) + subnetURL = gceSubnetworkURL(config.APIEndpoint, netProjID, config.Region, config.SubnetworkName) } else { // Determine the type of network and attempt to discover the correct subnet for AUTO mode. // Gracefully fail because kubelet calls CreateGCECloud without any config, and minions @@ -470,20 +480,20 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { if networkName := lastComponent(networkURL); networkName != "" { var n *compute.Network if n, err = getNetwork(service, netProjID, networkName); err != nil { - glog.Warningf("Could not retrieve network %q; err: %v", networkName, err) + klog.Warningf("Could not retrieve network %q; err: %v", networkName, err) } else { switch typeOfNetwork(n) { case netTypeLegacy: - glog.Infof("Network %q is type legacy - no subnetwork", networkName) + klog.Infof("Network %q is type legacy - no subnetwork", networkName) isLegacyNetwork = true case netTypeCustom: - glog.Warningf("Network %q is type custom - cannot auto select a subnetwork", networkName) + klog.Warningf("Network %q is type custom - cannot auto select a subnetwork", networkName) case netTypeAuto: subnetURL, err = determineSubnetURL(service, netProjID, networkName, config.Region) if err != nil { - glog.Warningf("Could not determine subnetwork for network %q and region %v; err: %v", networkName, config.Region, err) + klog.Warningf("Could not determine subnetwork for network %q and region %v; err: %v", networkName, config.Region, err) } else { - glog.Infof("Auto selecting subnetwork %q", subnetURL) + klog.Infof("Auto selecting subnetwork %q", subnetURL) } } } @@ -497,12 +507,12 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { } } if len(config.ManagedZones) > 1 { - glog.Infof("managing multiple zones: %v", config.ManagedZones) + klog.Infof("managing multiple zones: %v", config.ManagedZones) } operationPollRateLimiter := flowcontrol.NewTokenBucketRateLimiter(5, 5) // 5 qps, 5 burst. - gce := &GCECloud{ + gce := &Cloud{ service: service, serviceAlpha: serviceAlpha, serviceBeta: serviceBeta, @@ -512,6 +522,7 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { networkProjectID: netProjID, onXPN: onXPN, region: config.Region, + regional: config.Regional, localZone: config.Zone, managedZones: config.ManagedZones, networkURL: networkURL, @@ -541,8 +552,8 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { // SetRateLimiter adds a custom cloud.RateLimiter implementation. // WARNING: Calling this could have unexpected behavior if you have in-flight -// requests. It is best to use this immediately after creating a GCECloud. -func (g *GCECloud) SetRateLimiter(rl cloud.RateLimiter) { +// requests. It is best to use this immediately after creating a Cloud. +func (g *Cloud) SetRateLimiter(rl cloud.RateLimiter) { if rl != nil { g.s.RateLimiter = rl } @@ -577,7 +588,7 @@ func tryConvertToProjectNames(configProject, configNetworkProject string, servic if isProjectNumber(projID) { projName, err := getProjectID(service, projID) if err != nil { - glog.Warningf("Failed to retrieve project %v while trying to retrieve its name. err %v", projID, err) + klog.Warningf("Failed to retrieve project %v while trying to retrieve its name. err %v", projID, err) } else { projID = projName } @@ -590,7 +601,7 @@ func tryConvertToProjectNames(configProject, configNetworkProject string, servic if isProjectNumber(netProjID) { netProjName, err := getProjectID(service, netProjID) if err != nil { - glog.Warningf("Failed to retrieve network project %v while trying to retrieve its name. err %v", netProjID, err) + klog.Warningf("Failed to retrieve network project %v while trying to retrieve its name. err %v", netProjID, err) } else { netProjID = netProjName } @@ -601,89 +612,92 @@ func tryConvertToProjectNames(configProject, configNetworkProject string, servic // Initialize takes in a clientBuilder and spawns a goroutine for watching the clusterid configmap. // This must be called before utilizing the funcs of gce.ClusterID -func (gce *GCECloud) Initialize(clientBuilder controller.ControllerClientBuilder) { - gce.clientBuilder = clientBuilder - gce.client = clientBuilder.ClientOrDie("cloud-provider") +func (g *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { + g.clientBuilder = clientBuilder + g.client = clientBuilder.ClientOrDie("cloud-provider") - if gce.OnXPN() { - gce.eventBroadcaster = record.NewBroadcaster() - gce.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: gce.client.CoreV1().Events("")}) - gce.eventRecorder = gce.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "gce-cloudprovider"}) + if g.OnXPN() { + g.eventBroadcaster = record.NewBroadcaster() + g.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: g.client.CoreV1().Events("")}) + g.eventRecorder = g.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "g-cloudprovider"}) } - go gce.watchClusterID() + go g.watchClusterID(stop) } // LoadBalancer returns an implementation of LoadBalancer for Google Compute Engine. -func (gce *GCECloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { - return gce, true +func (g *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { + return g, true } // Instances returns an implementation of Instances for Google Compute Engine. -func (gce *GCECloud) Instances() (cloudprovider.Instances, bool) { - return gce, true +func (g *Cloud) Instances() (cloudprovider.Instances, bool) { + return g, true } // Zones returns an implementation of Zones for Google Compute Engine. -func (gce *GCECloud) Zones() (cloudprovider.Zones, bool) { - return gce, true +func (g *Cloud) Zones() (cloudprovider.Zones, bool) { + return g, true } -func (gce *GCECloud) Clusters() (cloudprovider.Clusters, bool) { - return gce, true +// Clusters returns an implementation of Clusters for Google Compute Engine. +func (g *Cloud) Clusters() (cloudprovider.Clusters, bool) { + return g, true } // Routes returns an implementation of Routes for Google Compute Engine. -func (gce *GCECloud) Routes() (cloudprovider.Routes, bool) { - return gce, true +func (g *Cloud) Routes() (cloudprovider.Routes, bool) { + return g, true } // ProviderName returns the cloud provider ID. -func (gce *GCECloud) ProviderName() string { +func (g *Cloud) ProviderName() string { return ProviderName } // ProjectID returns the ProjectID corresponding to the project this cloud is in. -func (g *GCECloud) ProjectID() string { +func (g *Cloud) ProjectID() string { return g.projectID } // NetworkProjectID returns the ProjectID corresponding to the project this cluster's network is in. -func (g *GCECloud) NetworkProjectID() string { +func (g *Cloud) NetworkProjectID() string { return g.networkProjectID } // Region returns the region -func (gce *GCECloud) Region() string { - return gce.region +func (g *Cloud) Region() string { + return g.region } // OnXPN returns true if the cluster is running on a cross project network (XPN) -func (gce *GCECloud) OnXPN() bool { - return gce.onXPN +func (g *Cloud) OnXPN() bool { + return g.onXPN } // NetworkURL returns the network url -func (gce *GCECloud) NetworkURL() string { - return gce.networkURL +func (g *Cloud) NetworkURL() string { + return g.networkURL } // SubnetworkURL returns the subnetwork url -func (gce *GCECloud) SubnetworkURL() string { - return gce.subnetworkURL +func (g *Cloud) SubnetworkURL() string { + return g.subnetworkURL } -func (gce *GCECloud) IsLegacyNetwork() bool { - return gce.isLegacyNetwork +// IsLegacyNetwork returns true if the cluster is still running a legacy network configuration. +func (g *Cloud) IsLegacyNetwork() bool { + return g.isLegacyNetwork } -func (gce *GCECloud) SetInformers(informerFactory informers.SharedInformerFactory) { - glog.Infof("Setting up informers for GCECloud") +// SetInformers sets up the zone handlers we need watching for node changes. +func (g *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) { + klog.Infof("Setting up informers for Cloud") nodeInformer := informerFactory.Core().V1().Nodes().Informer() nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { node := obj.(*v1.Node) - gce.updateNodeZones(nil, node) + g.updateNodeZones(nil, node) }, UpdateFunc: func(prev, obj interface{}) { prevNode := prev.(*v1.Node) @@ -692,7 +706,7 @@ func (gce *GCECloud) SetInformers(informerFactory informers.SharedInformerFactor prevNode.Labels[kubeletapis.LabelZoneFailureDomain] { return } - gce.updateNodeZones(prevNode, newNode) + g.updateNodeZones(prevNode, newNode) }, DeleteFunc: func(obj interface{}) { node, isNode := obj.(*v1.Node) @@ -701,46 +715,46 @@ func (gce *GCECloud) SetInformers(informerFactory informers.SharedInformerFactor if !isNode { deletedState, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Received unexpected object: %v", obj) + klog.Errorf("Received unexpected object: %v", obj) return } node, ok = deletedState.Obj.(*v1.Node) if !ok { - glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj) + klog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj) return } } - gce.updateNodeZones(node, nil) + g.updateNodeZones(node, nil) }, }) - gce.nodeInformerSynced = nodeInformer.HasSynced + g.nodeInformerSynced = nodeInformer.HasSynced } -func (gce *GCECloud) updateNodeZones(prevNode, newNode *v1.Node) { - gce.nodeZonesLock.Lock() - defer gce.nodeZonesLock.Unlock() +func (g *Cloud) updateNodeZones(prevNode, newNode *v1.Node) { + g.nodeZonesLock.Lock() + defer g.nodeZonesLock.Unlock() if prevNode != nil { prevZone, ok := prevNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] if ok { - gce.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name) - if gce.nodeZones[prevZone].Len() == 0 { - gce.nodeZones[prevZone] = nil + g.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name) + if g.nodeZones[prevZone].Len() == 0 { + g.nodeZones[prevZone] = nil } } } if newNode != nil { newZone, ok := newNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] if ok { - if gce.nodeZones[newZone] == nil { - gce.nodeZones[newZone] = sets.NewString() + if g.nodeZones[newZone] == nil { + g.nodeZones[newZone] = sets.NewString() } - gce.nodeZones[newZone].Insert(newNode.ObjectMeta.Name) + g.nodeZones[newZone].Insert(newNode.ObjectMeta.Name) } } } // HasClusterID returns true if the cluster has a clusterID -func (gce *GCECloud) HasClusterID() bool { +func (g *Cloud) HasClusterID() bool { return true } @@ -751,8 +765,8 @@ func isProjectNumber(idOrNumber string) bool { return err == nil } -// GCECloud implements cloudprovider.Interface. -var _ cloudprovider.Interface = (*GCECloud)(nil) +// Cloud implements cloudprovider.Interface. +var _ cloudprovider.Interface = (*Cloud)(nil) func gceNetworkURL(apiEndpoint, project, network string) string { if apiEndpoint == "" { @@ -857,12 +871,12 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) { oauth2.NoContext, compute.CloudPlatformScope, compute.ComputeScope) - glog.Infof("Using DefaultTokenSource %#v", tokenSource) + klog.Infof("Using DefaultTokenSource %#v", tokenSource) if err != nil { return nil, err } } else { - glog.Infof("Using existing Token Source %#v", tokenSource) + klog.Infof("Using existing Token Source %#v", tokenSource) } backoff := wait.Backoff{ @@ -873,7 +887,7 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) { } if err := wait.ExponentialBackoff(backoff, func() (bool, error) { if _, err := tokenSource.Token(); err != nil { - glog.Errorf("error fetching initial token: %v", err) + klog.Errorf("error fetching initial token: %v", err) return false, nil } return true, nil @@ -885,19 +899,19 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) { } func (manager *gceServiceManager) getProjectsAPIEndpoint() string { - projectsApiEndpoint := gceComputeAPIEndpoint + "projects/" + projectsAPIEndpoint := gceComputeAPIEndpoint + "projects/" if manager.gce.service != nil { - projectsApiEndpoint = manager.gce.service.BasePath + projectsAPIEndpoint = manager.gce.service.BasePath } - return projectsApiEndpoint + return projectsAPIEndpoint } func (manager *gceServiceManager) getProjectsAPIEndpointBeta() string { - projectsApiEndpoint := gceComputeAPIEndpointBeta + "projects/" + projectsAPIEndpoint := gceComputeAPIEndpointBeta + "projects/" if manager.gce.service != nil { - projectsApiEndpoint = manager.gce.serviceBeta.BasePath + projectsAPIEndpoint = manager.gce.serviceBeta.BasePath } - return projectsApiEndpoint + return projectsAPIEndpoint } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager.go index 449b33a0d21c..51b9bc5e718f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager.go @@ -22,7 +22,7 @@ import ( compute "google.golang.org/api/compute/v1" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" ) @@ -62,7 +62,7 @@ func (am *addressManager) HoldAddress() (string, error) { // could be reserving another address; therefore, it would need to be deleted. In the normal // case of using a controller address, retrieving the address by name results in the fewest API // calls since it indicates whether a Delete is necessary before Reserve. - glog.V(4).Infof("%v: attempting hold of IP %q Type %q", am.logPrefix, am.targetIP, am.addressType) + klog.V(4).Infof("%v: attempting hold of IP %q Type %q", am.logPrefix, am.targetIP, am.addressType) // Get the address in case it was orphaned earlier addr, err := am.svc.GetRegionAddress(am.name, am.region) if err != nil && !isNotFound(err) { @@ -73,20 +73,20 @@ func (am *addressManager) HoldAddress() (string, error) { // If address exists, check if the address had the expected attributes. validationError := am.validateAddress(addr) if validationError == nil { - glog.V(4).Infof("%v: address %q already reserves IP %q Type %q. No further action required.", am.logPrefix, addr.Name, addr.Address, addr.AddressType) + klog.V(4).Infof("%v: address %q already reserves IP %q Type %q. No further action required.", am.logPrefix, addr.Name, addr.Address, addr.AddressType) return addr.Address, nil } - glog.V(2).Infof("%v: deleting existing address because %v", am.logPrefix, validationError) + klog.V(2).Infof("%v: deleting existing address because %v", am.logPrefix, validationError) err := am.svc.DeleteRegionAddress(addr.Name, am.region) if err != nil { if isNotFound(err) { - glog.V(4).Infof("%v: address %q was not found. Ignoring.", am.logPrefix, addr.Name) + klog.V(4).Infof("%v: address %q was not found. Ignoring.", am.logPrefix, addr.Name) } else { return "", err } } else { - glog.V(4).Infof("%v: successfully deleted previous address %q", am.logPrefix, addr.Name) + klog.V(4).Infof("%v: successfully deleted previous address %q", am.logPrefix, addr.Name) } } @@ -96,23 +96,23 @@ func (am *addressManager) HoldAddress() (string, error) { // ReleaseAddress will release the address if it's owned by the controller. func (am *addressManager) ReleaseAddress() error { if !am.tryRelease { - glog.V(4).Infof("%v: not attempting release of address %q.", am.logPrefix, am.targetIP) + klog.V(4).Infof("%v: not attempting release of address %q.", am.logPrefix, am.targetIP) return nil } - glog.V(4).Infof("%v: releasing address %q named %q", am.logPrefix, am.targetIP, am.name) + klog.V(4).Infof("%v: releasing address %q named %q", am.logPrefix, am.targetIP, am.name) // Controller only ever tries to unreserve the address named with the load balancer's name. err := am.svc.DeleteRegionAddress(am.name, am.region) if err != nil { if isNotFound(err) { - glog.Warningf("%v: address %q was not found. Ignoring.", am.logPrefix, am.name) + klog.Warningf("%v: address %q was not found. Ignoring.", am.logPrefix, am.name) return nil } return err } - glog.V(4).Infof("%v: successfully released IP %q named %q", am.logPrefix, am.targetIP, am.name) + klog.V(4).Infof("%v: successfully released IP %q named %q", am.logPrefix, am.targetIP, am.name) return nil } @@ -130,7 +130,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) { reserveErr := am.svc.ReserveRegionAddress(newAddr, am.region) if reserveErr == nil { if newAddr.Address != "" { - glog.V(4).Infof("%v: successfully reserved IP %q with name %q", am.logPrefix, newAddr.Address, newAddr.Name) + klog.V(4).Infof("%v: successfully reserved IP %q with name %q", am.logPrefix, newAddr.Address, newAddr.Name) return newAddr.Address, nil } @@ -139,7 +139,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) { return "", err } - glog.V(4).Infof("%v: successfully created address %q which reserved IP %q", am.logPrefix, addr.Name, addr.Address) + klog.V(4).Infof("%v: successfully created address %q which reserved IP %q", am.logPrefix, addr.Name, addr.Address) return addr.Address, nil } else if !isHTTPErrorCode(reserveErr, http.StatusConflict) && !isHTTPErrorCode(reserveErr, http.StatusBadRequest) { // If the IP is already reserved: @@ -169,10 +169,10 @@ func (am *addressManager) ensureAddressReservation() (string, error) { if am.isManagedAddress(addr) { // The address with this name is checked at the beginning of 'HoldAddress()', but for some reason // it was re-created by this point. May be possible that two controllers are running. - glog.Warningf("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP) + klog.Warningf("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP) } else { // If the retrieved address is not named with the loadbalancer name, then the controller does not own it, but will allow use of it. - glog.V(4).Infof("%v: address %q was already reserved with name: %q, description: %q", am.logPrefix, am.targetIP, addr.Name, addr.Description) + klog.V(4).Infof("%v: address %q was already reserved with name: %q, description: %q", am.logPrefix, am.targetIP, addr.Name, addr.Description) am.tryRelease = false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go index 2bf3e20b051b..044258f1b4e3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go @@ -19,7 +19,7 @@ package gce import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" computealpha "google.golang.org/api/compute/v0.alpha" computebeta "google.golang.org/api/compute/v0.beta" @@ -42,106 +42,106 @@ func newAddressMetricContextWithVersion(request, region, version string) *metric // Caller is allocated a random IP if they do not specify an ipAddress. If an // ipAddress is specified, it must belong to the current project, eg: an // ephemeral IP associated with a global forwarding rule. -func (gce *GCECloud) ReserveGlobalAddress(addr *compute.Address) error { +func (g *Cloud) ReserveGlobalAddress(addr *compute.Address) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("reserve", "") - return mc.Observe(gce.c.GlobalAddresses().Insert(ctx, meta.GlobalKey(addr.Name), addr)) + return mc.Observe(g.c.GlobalAddresses().Insert(ctx, meta.GlobalKey(addr.Name), addr)) } // DeleteGlobalAddress deletes a global address by name. -func (gce *GCECloud) DeleteGlobalAddress(name string) error { +func (g *Cloud) DeleteGlobalAddress(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("delete", "") - return mc.Observe(gce.c.GlobalAddresses().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.GlobalAddresses().Delete(ctx, meta.GlobalKey(name))) } // GetGlobalAddress returns the global address by name. -func (gce *GCECloud) GetGlobalAddress(name string) (*compute.Address, error) { +func (g *Cloud) GetGlobalAddress(name string) (*compute.Address, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("get", "") - v, err := gce.c.GlobalAddresses().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.GlobalAddresses().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // ReserveRegionAddress creates a region address -func (gce *GCECloud) ReserveRegionAddress(addr *compute.Address, region string) error { +func (g *Cloud) ReserveRegionAddress(addr *compute.Address, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("reserve", region) - return mc.Observe(gce.c.Addresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr)) + return mc.Observe(g.c.Addresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr)) } // ReserveAlphaRegionAddress creates an Alpha, regional address. -func (gce *GCECloud) ReserveAlphaRegionAddress(addr *computealpha.Address, region string) error { +func (g *Cloud) ReserveAlphaRegionAddress(addr *computealpha.Address, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("reserve", region) - return mc.Observe(gce.c.AlphaAddresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr)) + return mc.Observe(g.c.AlphaAddresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr)) } // ReserveBetaRegionAddress creates a beta region address -func (gce *GCECloud) ReserveBetaRegionAddress(addr *computebeta.Address, region string) error { +func (g *Cloud) ReserveBetaRegionAddress(addr *computebeta.Address, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("reserve", region) - return mc.Observe(gce.c.BetaAddresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr)) + return mc.Observe(g.c.BetaAddresses().Insert(ctx, meta.RegionalKey(addr.Name, region), addr)) } // DeleteRegionAddress deletes a region address by name. -func (gce *GCECloud) DeleteRegionAddress(name, region string) error { +func (g *Cloud) DeleteRegionAddress(name, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("delete", region) - return mc.Observe(gce.c.Addresses().Delete(ctx, meta.RegionalKey(name, region))) + return mc.Observe(g.c.Addresses().Delete(ctx, meta.RegionalKey(name, region))) } // GetRegionAddress returns the region address by name -func (gce *GCECloud) GetRegionAddress(name, region string) (*compute.Address, error) { +func (g *Cloud) GetRegionAddress(name, region string) (*compute.Address, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("get", region) - v, err := gce.c.Addresses().Get(ctx, meta.RegionalKey(name, region)) + v, err := g.c.Addresses().Get(ctx, meta.RegionalKey(name, region)) return v, mc.Observe(err) } // GetAlphaRegionAddress returns the Alpha, regional address by name. -func (gce *GCECloud) GetAlphaRegionAddress(name, region string) (*computealpha.Address, error) { +func (g *Cloud) GetAlphaRegionAddress(name, region string) (*computealpha.Address, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("get", region) - v, err := gce.c.AlphaAddresses().Get(ctx, meta.RegionalKey(name, region)) + v, err := g.c.AlphaAddresses().Get(ctx, meta.RegionalKey(name, region)) return v, mc.Observe(err) } // GetBetaRegionAddress returns the beta region address by name -func (gce *GCECloud) GetBetaRegionAddress(name, region string) (*computebeta.Address, error) { +func (g *Cloud) GetBetaRegionAddress(name, region string) (*computebeta.Address, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("get", region) - v, err := gce.c.BetaAddresses().Get(ctx, meta.RegionalKey(name, region)) + v, err := g.c.BetaAddresses().Get(ctx, meta.RegionalKey(name, region)) return v, mc.Observe(err) } // GetRegionAddressByIP returns the regional address matching the given IP address. -func (gce *GCECloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Address, error) { +func (g *Cloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Address, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("list", region) - addrs, err := gce.c.Addresses().List(ctx, region, filter.Regexp("address", ipAddress)) + addrs, err := g.c.Addresses().List(ctx, region, filter.Regexp("address", ipAddress)) mc.Observe(err) if err != nil { @@ -149,7 +149,7 @@ func (gce *GCECloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Ad } if len(addrs) > 1 { - glog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs)) + klog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs)) } for _, addr := range addrs { if addr.Address == ipAddress { @@ -160,12 +160,12 @@ func (gce *GCECloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Ad } // GetBetaRegionAddressByIP returns the beta regional address matching the given IP address. -func (gce *GCECloud) GetBetaRegionAddressByIP(region, ipAddress string) (*computebeta.Address, error) { +func (g *Cloud) GetBetaRegionAddressByIP(region, ipAddress string) (*computebeta.Address, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newAddressMetricContext("list", region) - addrs, err := gce.c.BetaAddresses().List(ctx, region, filter.Regexp("address", ipAddress)) + addrs, err := g.c.BetaAddresses().List(ctx, region, filter.Regexp("address", ipAddress)) mc.Observe(err) if err != nil { @@ -173,7 +173,7 @@ func (gce *GCECloud) GetBetaRegionAddressByIP(region, ipAddress string) (*comput } if len(addrs) > 1 { - glog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs)) + klog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs)) } for _, addr := range addrs { if addr.Address == ipAddress { @@ -184,11 +184,11 @@ func (gce *GCECloud) GetBetaRegionAddressByIP(region, ipAddress string) (*comput } // TODO(#51665): retire this function once Network Tiers becomes Beta in GCP. -func (gce *GCECloud) getNetworkTierFromAddress(name, region string) (string, error) { - if !gce.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { +func (g *Cloud) getNetworkTierFromAddress(name, region string) (string, error) { + if !g.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { return cloud.NetworkTierDefault.ToGCEValue(), nil } - addr, err := gce.GetAlphaRegionAddress(name, region) + addr, err := g.GetAlphaRegionAddress(name, region) if err != nil { return handleAlphaNetworkTierGetError(err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go index 0ce698aa98e9..598f1e336d3f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go @@ -21,21 +21,24 @@ import ( ) const ( - // alpha: v1.8 (for Services) + // AlphaFeatureNetworkTiers allows Services backed by a GCP load balancer to choose + // what network tier to use. Currently supports "Standard" and "Premium" (default). // - // Allows Services backed by a GCP load balancer to choose what network - // tier to use. Currently supports "Standard" and "Premium" (default). + // alpha: v1.8 (for Services) AlphaFeatureNetworkTiers = "NetworkTiers" ) +// AlphaFeatureGate contains a mapping of alpha features to whether they are enabled type AlphaFeatureGate struct { features map[string]bool } +// Enabled returns true if the provided alpha feature is enabled func (af *AlphaFeatureGate) Enabled(key string) bool { return af.features[key] } +// NewAlphaFeatureGate marks the provided alpha features as enabled func NewAlphaFeatureGate(features []string) *AlphaFeatureGate { featureMap := make(map[string]bool) for _, name := range features { @@ -44,9 +47,9 @@ func NewAlphaFeatureGate(features []string) *AlphaFeatureGate { return &AlphaFeatureGate{featureMap} } -func (gce *GCECloud) alphaFeatureEnabled(feature string) error { - if !gce.AlphaFeatureGate.Enabled(feature) { - return fmt.Errorf("alpha feature %q is not enabled.", feature) +func (g *Cloud) alphaFeatureEnabled(feature string) error { + if !g.AlphaFeatureGate.Enabled(feature) { + return fmt.Errorf("alpha feature %q is not enabled", feature) } return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go index ccc0f4441181..39e632e0795f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go @@ -19,12 +19,13 @@ package gce import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" ) +// LoadBalancerType defines a specific type for holding load balancer types (eg. Internal) type LoadBalancerType string const ( @@ -33,23 +34,30 @@ const ( // Currently, only "internal" is supported. ServiceAnnotationLoadBalancerType = "cloud.google.com/load-balancer-type" + // LBTypeInternal is the constant for the official internal type. LBTypeInternal LoadBalancerType = "Internal" + // Deprecating the lowercase spelling of Internal. deprecatedTypeInternalLowerCase LoadBalancerType = "internal" - // ServiceAnnotationInternalBackendShare is annotated on a service with "true" when users + // ServiceAnnotationILBBackendShare is annotated on a service with "true" when users // want to share GCP Backend Services for a set of internal load balancers. // ALPHA feature - this may be removed in a future release. ServiceAnnotationILBBackendShare = "alpha.cloud.google.com/load-balancer-backend-share" + // This annotation did not correctly specify "alpha", so both annotations will be checked. deprecatedServiceAnnotationILBBackendShare = "cloud.google.com/load-balancer-backend-share" // NetworkTierAnnotationKey is annotated on a Service object to indicate which // network tier a GCP LB should use. The valid values are "Standard" and // "Premium" (default). - NetworkTierAnnotationKey = "cloud.google.com/network-tier" + NetworkTierAnnotationKey = "cloud.google.com/network-tier" + + // NetworkTierAnnotationStandard is an annotation to indicate the Service is on the Standard network tier NetworkTierAnnotationStandard = cloud.NetworkTierStandard - NetworkTierAnnotationPremium = cloud.NetworkTierPremium + + // NetworkTierAnnotationPremium is an annotation to indicate the Service is on the Premium network tier + NetworkTierAnnotationPremium = cloud.NetworkTierPremium ) // GetLoadBalancerAnnotationType returns the type of GCP load balancer which should be assembled. @@ -82,7 +90,7 @@ func GetLoadBalancerAnnotationBackendShare(service *v1.Service) bool { // Check for deprecated annotation key if l, exists := service.Annotations[deprecatedServiceAnnotationILBBackendShare]; exists && l == "true" { - glog.Warningf("Annotation %q is deprecated and replaced with an alpha-specific key: %q", deprecatedServiceAnnotationILBBackendShare, ServiceAnnotationILBBackendShare) + klog.Warningf("Annotation %q is deprecated and replaced with an alpha-specific key: %q", deprecatedServiceAnnotationILBBackendShare, ServiceAnnotationILBBackendShare) return true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go index 23dc3bf1cb3c..6560c9126068 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go @@ -35,201 +35,201 @@ func newBackendServiceMetricContextWithVersion(request, region, version string) } // GetGlobalBackendService retrieves a backend by name. -func (gce *GCECloud) GetGlobalBackendService(name string) (*compute.BackendService, error) { +func (g *Cloud) GetGlobalBackendService(name string) (*compute.BackendService, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("get", "") - v, err := gce.c.BackendServices().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.BackendServices().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // GetBetaGlobalBackendService retrieves beta backend by name. -func (gce *GCECloud) GetBetaGlobalBackendService(name string) (*computebeta.BackendService, error) { +func (g *Cloud) GetBetaGlobalBackendService(name string) (*computebeta.BackendService, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("get", "", computeBetaVersion) - v, err := gce.c.BetaBackendServices().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.BetaBackendServices().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // GetAlphaGlobalBackendService retrieves alpha backend by name. -func (gce *GCECloud) GetAlphaGlobalBackendService(name string) (*computealpha.BackendService, error) { +func (g *Cloud) GetAlphaGlobalBackendService(name string) (*computealpha.BackendService, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("get", "", computeAlphaVersion) - v, err := gce.c.AlphaBackendServices().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.AlphaBackendServices().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // UpdateGlobalBackendService applies the given BackendService as an update to // an existing service. -func (gce *GCECloud) UpdateGlobalBackendService(bg *compute.BackendService) error { +func (g *Cloud) UpdateGlobalBackendService(bg *compute.BackendService) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("update", "") - return mc.Observe(gce.c.BackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg)) + return mc.Observe(g.c.BackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg)) } // UpdateBetaGlobalBackendService applies the given beta BackendService as an // update to an existing service. -func (gce *GCECloud) UpdateBetaGlobalBackendService(bg *computebeta.BackendService) error { +func (g *Cloud) UpdateBetaGlobalBackendService(bg *computebeta.BackendService) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("update", "", computeBetaVersion) - return mc.Observe(gce.c.BetaBackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg)) + return mc.Observe(g.c.BetaBackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg)) } // UpdateAlphaGlobalBackendService applies the given alpha BackendService as an // update to an existing service. -func (gce *GCECloud) UpdateAlphaGlobalBackendService(bg *computealpha.BackendService) error { +func (g *Cloud) UpdateAlphaGlobalBackendService(bg *computealpha.BackendService) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("update", "", computeAlphaVersion) - return mc.Observe(gce.c.AlphaBackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg)) + return mc.Observe(g.c.AlphaBackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg)) } // DeleteGlobalBackendService deletes the given BackendService by name. -func (gce *GCECloud) DeleteGlobalBackendService(name string) error { +func (g *Cloud) DeleteGlobalBackendService(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("delete", "") - return mc.Observe(gce.c.BackendServices().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.BackendServices().Delete(ctx, meta.GlobalKey(name))) } // CreateGlobalBackendService creates the given BackendService. -func (gce *GCECloud) CreateGlobalBackendService(bg *compute.BackendService) error { +func (g *Cloud) CreateGlobalBackendService(bg *compute.BackendService) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("create", "") - return mc.Observe(gce.c.BackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg)) + return mc.Observe(g.c.BackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg)) } // CreateBetaGlobalBackendService creates the given beta BackendService. -func (gce *GCECloud) CreateBetaGlobalBackendService(bg *computebeta.BackendService) error { +func (g *Cloud) CreateBetaGlobalBackendService(bg *computebeta.BackendService) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("create", "", computeBetaVersion) - return mc.Observe(gce.c.BetaBackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg)) + return mc.Observe(g.c.BetaBackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg)) } // CreateAlphaGlobalBackendService creates the given alpha BackendService. -func (gce *GCECloud) CreateAlphaGlobalBackendService(bg *computealpha.BackendService) error { +func (g *Cloud) CreateAlphaGlobalBackendService(bg *computealpha.BackendService) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("create", "", computeAlphaVersion) - return mc.Observe(gce.c.AlphaBackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg)) + return mc.Observe(g.c.AlphaBackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg)) } // ListGlobalBackendServices lists all backend services in the project. -func (gce *GCECloud) ListGlobalBackendServices() ([]*compute.BackendService, error) { +func (g *Cloud) ListGlobalBackendServices() ([]*compute.BackendService, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("list", "") - v, err := gce.c.BackendServices().List(ctx, filter.None) + v, err := g.c.BackendServices().List(ctx, filter.None) return v, mc.Observe(err) } // GetGlobalBackendServiceHealth returns the health of the BackendService // identified by the given name, in the given instanceGroup. The // instanceGroupLink is the fully qualified self link of an instance group. -func (gce *GCECloud) GetGlobalBackendServiceHealth(name string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) { +func (g *Cloud) GetGlobalBackendServiceHealth(name string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("get_health", "") groupRef := &compute.ResourceGroupReference{Group: instanceGroupLink} - v, err := gce.c.BackendServices().GetHealth(ctx, meta.GlobalKey(name), groupRef) + v, err := g.c.BackendServices().GetHealth(ctx, meta.GlobalKey(name), groupRef) return v, mc.Observe(err) } // GetRegionBackendService retrieves a backend by name. -func (gce *GCECloud) GetRegionBackendService(name, region string) (*compute.BackendService, error) { +func (g *Cloud) GetRegionBackendService(name, region string) (*compute.BackendService, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("get", region) - v, err := gce.c.RegionBackendServices().Get(ctx, meta.RegionalKey(name, region)) + v, err := g.c.RegionBackendServices().Get(ctx, meta.RegionalKey(name, region)) return v, mc.Observe(err) } // UpdateRegionBackendService applies the given BackendService as an update to // an existing service. -func (gce *GCECloud) UpdateRegionBackendService(bg *compute.BackendService, region string) error { +func (g *Cloud) UpdateRegionBackendService(bg *compute.BackendService, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("update", region) - return mc.Observe(gce.c.RegionBackendServices().Update(ctx, meta.RegionalKey(bg.Name, region), bg)) + return mc.Observe(g.c.RegionBackendServices().Update(ctx, meta.RegionalKey(bg.Name, region), bg)) } // DeleteRegionBackendService deletes the given BackendService by name. -func (gce *GCECloud) DeleteRegionBackendService(name, region string) error { +func (g *Cloud) DeleteRegionBackendService(name, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("delete", region) - return mc.Observe(gce.c.RegionBackendServices().Delete(ctx, meta.RegionalKey(name, region))) + return mc.Observe(g.c.RegionBackendServices().Delete(ctx, meta.RegionalKey(name, region))) } // CreateRegionBackendService creates the given BackendService. -func (gce *GCECloud) CreateRegionBackendService(bg *compute.BackendService, region string) error { +func (g *Cloud) CreateRegionBackendService(bg *compute.BackendService, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("create", region) - return mc.Observe(gce.c.RegionBackendServices().Insert(ctx, meta.RegionalKey(bg.Name, region), bg)) + return mc.Observe(g.c.RegionBackendServices().Insert(ctx, meta.RegionalKey(bg.Name, region), bg)) } // ListRegionBackendServices lists all backend services in the project. -func (gce *GCECloud) ListRegionBackendServices(region string) ([]*compute.BackendService, error) { +func (g *Cloud) ListRegionBackendServices(region string) ([]*compute.BackendService, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("list", region) - v, err := gce.c.RegionBackendServices().List(ctx, region, filter.None) + v, err := g.c.RegionBackendServices().List(ctx, region, filter.None) return v, mc.Observe(err) } // GetRegionalBackendServiceHealth returns the health of the BackendService // identified by the given name, in the given instanceGroup. The // instanceGroupLink is the fully qualified self link of an instance group. -func (gce *GCECloud) GetRegionalBackendServiceHealth(name, region string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) { +func (g *Cloud) GetRegionalBackendServiceHealth(name, region string, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContext("get_health", region) ref := &compute.ResourceGroupReference{Group: instanceGroupLink} - v, err := gce.c.RegionBackendServices().GetHealth(ctx, meta.RegionalKey(name, region), ref) + v, err := g.c.RegionBackendServices().GetHealth(ctx, meta.RegionalKey(name, region), ref) return v, mc.Observe(err) } // SetSecurityPolicyForBetaGlobalBackendService sets the given // SecurityPolicyReference for the BackendService identified by the given name. -func (gce *GCECloud) SetSecurityPolicyForBetaGlobalBackendService(backendServiceName string, securityPolicyReference *computebeta.SecurityPolicyReference) error { +func (g *Cloud) SetSecurityPolicyForBetaGlobalBackendService(backendServiceName string, securityPolicyReference *computebeta.SecurityPolicyReference) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("set_security_policy", "", computeBetaVersion) - return mc.Observe(gce.c.BetaBackendServices().SetSecurityPolicy(ctx, meta.GlobalKey(backendServiceName), securityPolicyReference)) + return mc.Observe(g.c.BetaBackendServices().SetSecurityPolicy(ctx, meta.GlobalKey(backendServiceName), securityPolicyReference)) } // SetSecurityPolicyForAlphaGlobalBackendService sets the given // SecurityPolicyReference for the BackendService identified by the given name. -func (gce *GCECloud) SetSecurityPolicyForAlphaGlobalBackendService(backendServiceName string, securityPolicyReference *computealpha.SecurityPolicyReference) error { +func (g *Cloud) SetSecurityPolicyForAlphaGlobalBackendService(backendServiceName string, securityPolicyReference *computealpha.SecurityPolicyReference) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newBackendServiceMetricContextWithVersion("set_security_policy", "", computeAlphaVersion) - return mc.Observe(gce.c.AlphaBackendServices().SetSecurityPolicy(ctx, meta.GlobalKey(backendServiceName), securityPolicyReference)) + return mc.Observe(g.c.AlphaBackendServices().SetSecurityPolicy(ctx, meta.GlobalKey(backendServiceName), securityPolicyReference)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_cert.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_cert.go index 3b6614f816f3..5153f067e8a1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_cert.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_cert.go @@ -29,43 +29,43 @@ func newCertMetricContext(request string) *metricContext { } // GetSslCertificate returns the SslCertificate by name. -func (gce *GCECloud) GetSslCertificate(name string) (*compute.SslCertificate, error) { +func (g *Cloud) GetSslCertificate(name string) (*compute.SslCertificate, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newCertMetricContext("get") - v, err := gce.c.SslCertificates().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.SslCertificates().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // CreateSslCertificate creates and returns a SslCertificate. -func (gce *GCECloud) CreateSslCertificate(sslCerts *compute.SslCertificate) (*compute.SslCertificate, error) { +func (g *Cloud) CreateSslCertificate(sslCerts *compute.SslCertificate) (*compute.SslCertificate, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newCertMetricContext("create") - err := gce.c.SslCertificates().Insert(ctx, meta.GlobalKey(sslCerts.Name), sslCerts) + err := g.c.SslCertificates().Insert(ctx, meta.GlobalKey(sslCerts.Name), sslCerts) if err != nil { return nil, mc.Observe(err) } - return gce.GetSslCertificate(sslCerts.Name) + return g.GetSslCertificate(sslCerts.Name) } // DeleteSslCertificate deletes the SslCertificate by name. -func (gce *GCECloud) DeleteSslCertificate(name string) error { +func (g *Cloud) DeleteSslCertificate(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newCertMetricContext("delete") - return mc.Observe(gce.c.SslCertificates().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.SslCertificates().Delete(ctx, meta.GlobalKey(name))) } // ListSslCertificates lists all SslCertificates in the project. -func (gce *GCECloud) ListSslCertificates() ([]*compute.SslCertificate, error) { +func (g *Cloud) ListSslCertificates() ([]*compute.SslCertificate, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newCertMetricContext("list") - v, err := gce.c.SslCertificates().List(ctx, filter.None) + v, err := g.c.SslCertificates().List(ctx, filter.None) return v, mc.Observe(err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusterid.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusterid.go index 46b4ff4f6a0c..2f40167788d8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusterid.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusterid.go @@ -25,7 +25,6 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -33,22 +32,31 @@ import ( "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" + "k8s.io/klog" ) const ( - // Key used to persist UIDs to configmaps. + // UIDConfigMapName is the Key used to persist UIDs to configmaps. UIDConfigMapName = "ingress-uid" - // Namespace which contains the above config map + + // UIDNamespace is the namespace which contains the above config map UIDNamespace = metav1.NamespaceSystem - // Data keys for the specific ids - UIDCluster = "uid" - UIDProvider = "provider-uid" + + // UIDCluster is the data keys for looking up the clusters UID + UIDCluster = "uid" + + // UIDProvider is the data keys for looking up the providers UID + UIDProvider = "provider-uid" + + // UIDLengthBytes is the length of a UID UIDLengthBytes = 8 + // Frequency of the updateFunc event handler being called // This does not actually query the apiserver for current state - the local cache value is used. updateFuncFrequency = 10 * time.Minute ) +// ClusterID is the struct for maintaining information about this cluster's ID type ClusterID struct { idLock sync.RWMutex client clientset.Interface @@ -59,17 +67,17 @@ type ClusterID struct { } // Continually watches for changes to the cluster id config map -func (gce *GCECloud) watchClusterID() { - gce.ClusterID = ClusterID{ +func (g *Cloud) watchClusterID(stop <-chan struct{}) { + g.ClusterID = ClusterID{ cfgMapKey: fmt.Sprintf("%v/%v", UIDNamespace, UIDConfigMapName), - client: gce.client, + client: g.client, } mapEventHandler := cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { m, ok := obj.(*v1.ConfigMap) if !ok || m == nil { - glog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", obj, ok) + klog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", obj, ok) return } if m.Namespace != UIDNamespace || @@ -77,13 +85,13 @@ func (gce *GCECloud) watchClusterID() { return } - glog.V(4).Infof("Observed new configmap for clusteriD: %v, %v; setting local values", m.Name, m.Data) - gce.ClusterID.update(m) + klog.V(4).Infof("Observed new configmap for clusteriD: %v, %v; setting local values", m.Name, m.Data) + g.ClusterID.update(m) }, UpdateFunc: func(old, cur interface{}) { m, ok := cur.(*v1.ConfigMap) if !ok || m == nil { - glog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", cur, ok) + klog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", cur, ok) return } @@ -96,16 +104,16 @@ func (gce *GCECloud) watchClusterID() { return } - glog.V(4).Infof("Observed updated configmap for clusteriD %v, %v; setting local values", m.Name, m.Data) - gce.ClusterID.update(m) + klog.V(4).Infof("Observed updated configmap for clusteriD %v, %v; setting local values", m.Name, m.Data) + g.ClusterID.update(m) }, } - listerWatcher := cache.NewListWatchFromClient(gce.ClusterID.client.CoreV1().RESTClient(), "configmaps", UIDNamespace, fields.Everything()) + listerWatcher := cache.NewListWatchFromClient(g.ClusterID.client.CoreV1().RESTClient(), "configmaps", UIDNamespace, fields.Everything()) var controller cache.Controller - gce.ClusterID.store, controller = cache.NewInformer(newSingleObjectListerWatcher(listerWatcher, UIDConfigMapName), &v1.ConfigMap{}, updateFuncFrequency, mapEventHandler) + g.ClusterID.store, controller = cache.NewInformer(newSingleObjectListerWatcher(listerWatcher, UIDConfigMapName), &v1.ConfigMap{}, updateFuncFrequency, mapEventHandler) - controller.Run(nil) + controller.Run(stop) } // GetID returns the id which is unique to this cluster @@ -131,9 +139,9 @@ func (ci *ClusterID) GetID() (string, error) { return *ci.clusterID, nil } -// GetFederationId returns the id which could represent the entire Federation +// GetFederationID returns the id which could represent the entire Federation // or just the cluster if not federated. -func (ci *ClusterID) GetFederationId() (string, bool, error) { +func (ci *ClusterID) GetFederationID() (string, bool, error) { if err := ci.getOrInitialize(); err != nil { return "", false, err } @@ -141,7 +149,7 @@ func (ci *ClusterID) GetFederationId() (string, bool, error) { ci.idLock.RLock() defer ci.idLock.RUnlock() if ci.clusterID == nil { - return "", false, errors.New("Could not retrieve cluster id") + return "", false, errors.New("could not retrieve cluster id") } // If provider ID is not set, return false @@ -157,7 +165,7 @@ func (ci *ClusterID) GetFederationId() (string, bool, error) { // before the watch has begun. func (ci *ClusterID) getOrInitialize() error { if ci.store == nil { - return errors.New("GCECloud.ClusterID is not ready. Call Initialize() before using.") + return errors.New("Cloud.ClusterID is not ready. Call Initialize() before using") } if ci.clusterID != nil { @@ -172,12 +180,12 @@ func (ci *ClusterID) getOrInitialize() error { } // The configmap does not exist - let's try creating one. - newId, err := makeUID() + newID, err := makeUID() if err != nil { return err } - glog.V(4).Infof("Creating clusteriD: %v", newId) + klog.V(4).Infof("Creating clusteriD: %v", newID) cfg := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: UIDConfigMapName, @@ -185,16 +193,16 @@ func (ci *ClusterID) getOrInitialize() error { }, } cfg.Data = map[string]string{ - UIDCluster: newId, - UIDProvider: newId, + UIDCluster: newID, + UIDProvider: newID, } if _, err := ci.client.CoreV1().ConfigMaps(UIDNamespace).Create(cfg); err != nil { - glog.Errorf("GCE cloud provider failed to create %v config map to store cluster id: %v", ci.cfgMapKey, err) + klog.Errorf("GCE cloud provider failed to create %v config map to store cluster id: %v", ci.cfgMapKey, err) return err } - glog.V(2).Infof("Created a config map containing clusteriD: %v", newId) + klog.V(2).Infof("Created a config map containing clusteriD: %v", newID) ci.update(cfg) return nil } @@ -211,7 +219,7 @@ func (ci *ClusterID) getConfigMap() (bool, error) { m, ok := item.(*v1.ConfigMap) if !ok || m == nil { err = fmt.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", item, ok) - glog.Error(err) + klog.Error(err) return false, err } ci.update(m) @@ -224,8 +232,8 @@ func (ci *ClusterID) update(m *v1.ConfigMap) { if clusterID, exists := m.Data[UIDCluster]; exists { ci.clusterID = &clusterID } - if provId, exists := m.Data[UIDProvider]; exists { - ci.providerID = &provId + if provID, exists := m.Data[UIDProvider]; exists { + ci.providerID = &provID } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusters.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusters.go index ecd935716a7d..379f5396a253 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusters.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusters.go @@ -18,18 +18,22 @@ package gce import ( "context" - container "google.golang.org/api/container/v1" + "fmt" + + "google.golang.org/api/container/v1" + "k8s.io/klog" ) func newClustersMetricContext(request, zone string) *metricContext { return newGenericMetricContext("clusters", request, unusedMetricLabel, zone, computeV1Version) } -func (gce *GCECloud) ListClusters(ctx context.Context) ([]string, error) { +// ListClusters will return a list of cluster names for the associated project +func (g *Cloud) ListClusters(ctx context.Context) ([]string, error) { allClusters := []string{} - for _, zone := range gce.managedZones { - clusters, err := gce.listClustersInZone(zone) + for _, zone := range g.managedZones { + clusters, err := g.listClustersInZone(zone) if err != nil { return nil, err } @@ -40,25 +44,38 @@ func (gce *GCECloud) ListClusters(ctx context.Context) ([]string, error) { return allClusters, nil } -func (gce *GCECloud) GetManagedClusters(ctx context.Context) ([]*container.Cluster, error) { +// GetManagedClusters will return the cluster objects associated to this project +func (g *Cloud) GetManagedClusters(ctx context.Context) ([]*container.Cluster, error) { managedClusters := []*container.Cluster{} - for _, zone := range gce.managedZones { - clusters, err := gce.getClustersInZone(zone) + + if g.regional { + var err error + managedClusters, err = g.getClustersInLocation(g.region) if err != nil { return nil, err } - managedClusters = append(managedClusters, clusters...) + } else if len(g.managedZones) >= 1 { + for _, zone := range g.managedZones { + clusters, err := g.getClustersInLocation(zone) + if err != nil { + return nil, err + } + managedClusters = append(managedClusters, clusters...) + } + } else { + return nil, fmt.Errorf("no zones associated with this cluster(%s)", g.ProjectID()) } return managedClusters, nil } -func (gce *GCECloud) Master(ctx context.Context, clusterName string) (string, error) { +// Master returned the dns address of the master +func (g *Cloud) Master(ctx context.Context, clusterName string) (string, error) { return "k8s-" + clusterName + "-master.internal", nil } -func (gce *GCECloud) listClustersInZone(zone string) ([]string, error) { - clusters, err := gce.getClustersInZone(zone) +func (g *Cloud) listClustersInZone(zone string) ([]string, error) { + clusters, err := g.getClustersInLocation(zone) if err != nil { return nil, err } @@ -70,13 +87,18 @@ func (gce *GCECloud) listClustersInZone(zone string) ([]string, error) { return result, nil } -func (gce *GCECloud) getClustersInZone(zone string) ([]*container.Cluster, error) { - mc := newClustersMetricContext("list_zone", zone) +func (g *Cloud) getClustersInLocation(zoneOrRegion string) ([]*container.Cluster, error) { + // TODO: Issue/68913 migrate metric to list_location instead of list_zone. + mc := newClustersMetricContext("list_zone", zoneOrRegion) // TODO: use PageToken to list all not just the first 500 - list, err := gce.containerService.Projects.Zones.Clusters.List(gce.projectID, zone).Do() + location := getLocationName(g.projectID, zoneOrRegion) + list, err := g.containerService.Projects.Locations.Clusters.List(location).Do() if err != nil { return nil, mc.Observe(err) } + if list.Header.Get("nextPageToken") != "" { + klog.Errorf("Failed to get all clusters for request, received next page token %s", list.Header.Get("nextPageToken")) + } return list.Clusters, mc.Observe(nil) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go index 3687b1e7f281..7e10e50c32bb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go @@ -28,25 +28,28 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "github.com/golang/glog" - computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" "k8s.io/kubernetes/pkg/features" ) +// DiskType defines a specific type for holding disk types (eg. pd-ssd) type DiskType string const ( - DiskTypeSSD = "pd-ssd" + // DiskTypeSSD the type for persistent SSD storage + DiskTypeSSD = "pd-ssd" + + // DiskTypeStandard the type for standard persistent storage DiskTypeStandard = "pd-standard" diskTypeDefault = DiskTypeStandard @@ -85,7 +88,7 @@ type diskServiceManager interface { // Attach a persistent disk on GCE with the given disk spec to the specified instance. AttachDiskOnCloudProvider( - disk *GCEDisk, + disk *Disk, readWrite string, instanceZone string, instanceName string) error @@ -96,18 +99,18 @@ type diskServiceManager interface { instanceName string, devicePath string) error - ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) error - RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) error + ResizeDiskOnCloudProvider(disk *Disk, sizeGb int64, zone string) error + RegionalResizeDiskOnCloudProvider(disk *Disk, sizeGb int64) error // Gets the persistent disk from GCE with the given diskName. - GetDiskFromCloudProvider(zone string, diskName string) (*GCEDisk, error) + GetDiskFromCloudProvider(zone string, diskName string) (*Disk, error) // Gets the regional persistent disk from GCE with the given diskName. - GetRegionalDiskFromCloudProvider(diskName string) (*GCEDisk, error) + GetRegionalDiskFromCloudProvider(diskName string) (*Disk, error) } type gceServiceManager struct { - gce *GCECloud + gce *Cloud } var _ diskServiceManager = &gceServiceManager{} @@ -119,7 +122,7 @@ func (manager *gceServiceManager) CreateDiskOnCloudProvider( diskType string, zone string) error { diskTypeURI, err := manager.getDiskTypeURI( - manager.gce.region /* diskRegion */, singleZone{zone}, diskType, false /* useBetaAPI */) + manager.gce.region /* diskRegion */, singleZone{zone}, diskType) if err != nil { return err } @@ -148,17 +151,17 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider( } diskTypeURI, err := manager.getDiskTypeURI( - manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType, true /* useBetaAPI */) + manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType) if err != nil { return err } fullyQualifiedReplicaZones := []string{} for _, replicaZone := range replicaZones.UnsortedList() { fullyQualifiedReplicaZones = append( - fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone, true)) + fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone)) } - diskToCreateBeta := &computebeta.Disk{ + diskToCreate := &compute.Disk{ Name: name, SizeGb: sizeGb, Description: tagsStr, @@ -168,11 +171,11 @@ func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider( ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - return manager.gce.c.BetaRegionDisks().Insert(ctx, meta.RegionalKey(name, manager.gce.region), diskToCreateBeta) + return manager.gce.c.RegionDisks().Insert(ctx, meta.RegionalKey(name, manager.gce.region), diskToCreate) } func (manager *gceServiceManager) AttachDiskOnCloudProvider( - disk *GCEDisk, + disk *Disk, readWrite string, instanceZone string, instanceName string) error { @@ -205,13 +208,13 @@ func (manager *gceServiceManager) DetachDiskOnCloudProvider( func (manager *gceServiceManager) GetDiskFromCloudProvider( zone string, - diskName string) (*GCEDisk, error) { + diskName string) (*Disk, error) { if zone == "" { - return nil, fmt.Errorf("Can not fetch disk %q. Zone is empty.", diskName) + return nil, fmt.Errorf("can not fetch disk %q, zone is empty", diskName) } if diskName == "" { - return nil, fmt.Errorf("Can not fetch disk. Zone is specified (%q). But disk name is empty.", zone) + return nil, fmt.Errorf("can not fetch disk, zone is specified (%q), but disk name is empty", zone) } ctx, cancel := cloud.ContextWithCallTimeout() @@ -231,7 +234,7 @@ func (manager *gceServiceManager) GetDiskFromCloudProvider( return nil, fmt.Errorf("failed to extract region from zone for %q/%q err=%v", zone, diskName, err) } - return &GCEDisk{ + return &Disk{ ZoneInfo: zoneInfo, Region: region, Name: diskStable.Name, @@ -242,7 +245,7 @@ func (manager *gceServiceManager) GetDiskFromCloudProvider( } func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider( - diskName string) (*GCEDisk, error) { + diskName string) (*Disk, error) { if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) { return nil, fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk) @@ -250,7 +253,7 @@ func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider( ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - diskBeta, err := manager.gce.c.BetaRegionDisks().Get(ctx, meta.RegionalKey(diskName, manager.gce.region)) + diskBeta, err := manager.gce.c.RegionDisks().Get(ctx, meta.RegionalKey(diskName, manager.gce.region)) if err != nil { return nil, err } @@ -260,7 +263,7 @@ func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider( zones.Insert(lastComponent(zoneURI)) } - return &GCEDisk{ + return &Disk{ ZoneInfo: multiZone{zones}, Region: lastComponent(diskBeta.Region), Name: diskBeta.Name, @@ -287,10 +290,10 @@ func (manager *gceServiceManager) DeleteRegionalDiskOnCloudProvider( ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - return manager.gce.c.BetaRegionDisks().Delete(ctx, meta.RegionalKey(diskName, manager.gce.region)) + return manager.gce.c.RegionDisks().Delete(ctx, meta.RegionalKey(diskName, manager.gce.region)) } -func (manager *gceServiceManager) getDiskSourceURI(disk *GCEDisk) (string, error) { +func (manager *gceServiceManager) getDiskSourceURI(disk *Disk) (string, error) { getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint() switch zoneInfo := disk.ZoneInfo.(type) { @@ -325,14 +328,9 @@ func (manager *gceServiceManager) getDiskSourceURI(disk *GCEDisk) (string, error } func (manager *gceServiceManager) getDiskTypeURI( - diskRegion string, diskZoneInfo zoneType, diskType string, useBetaAPI bool) (string, error) { + diskRegion string, diskZoneInfo zoneType, diskType string) (string, error) { - var getProjectsAPIEndpoint string - if useBetaAPI { - getProjectsAPIEndpoint = manager.getProjectsAPIEndpointBeta() - } else { - getProjectsAPIEndpoint = manager.getProjectsAPIEndpoint() - } + getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint() switch zoneInfo := diskZoneInfo.(type) { case singleZone: @@ -361,15 +359,8 @@ func (manager *gceServiceManager) getDiskTypeURI( } } -func (manager *gceServiceManager) getReplicaZoneURI(zone string, useBetaAPI bool) string { - var getProjectsAPIEndpoint string - if useBetaAPI { - getProjectsAPIEndpoint = manager.getProjectsAPIEndpointBeta() - } else { - getProjectsAPIEndpoint = manager.getProjectsAPIEndpoint() - } - - return getProjectsAPIEndpoint + fmt.Sprintf( +func (manager *gceServiceManager) getReplicaZoneURI(zone string) string { + return manager.getProjectsAPIEndpoint() + fmt.Sprintf( replicaZoneURITemplateSingleZone, manager.gce.projectID, zone) @@ -402,14 +393,14 @@ func (manager *gceServiceManager) getRegionFromZone(zoneInfo zoneType) (string, region, err := GetGCERegion(zone) if err != nil { - glog.Warningf("failed to parse GCE region from zone %q: %v", zone, err) + klog.Warningf("failed to parse GCE region from zone %q: %v", zone, err) region = manager.gce.region } return region, nil } -func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) error { +func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *Disk, sizeGb int64, zone string) error { resizeServiceRequest := &compute.DisksResizeRequest{ SizeGb: sizeGb, } @@ -419,18 +410,18 @@ func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *GCEDisk, sizeG return manager.gce.c.Disks().Resize(ctx, meta.ZonalKey(disk.Name, zone), resizeServiceRequest) } -func (manager *gceServiceManager) RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) error { +func (manager *gceServiceManager) RegionalResizeDiskOnCloudProvider(disk *Disk, sizeGb int64) error { if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) { return fmt.Errorf("the regional PD feature is only available with the %s Kubernetes feature gate enabled", features.GCERegionalPersistentDisk) } - resizeServiceRequest := &computebeta.RegionDisksResizeRequest{ + resizeServiceRequest := &compute.RegionDisksResizeRequest{ SizeGb: sizeGb, } ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - return manager.gce.c.BetaRegionDisks().Resize(ctx, meta.RegionalKey(disk.Name, disk.Region), resizeServiceRequest) + return manager.gce.c.RegionDisks().Resize(ctx, meta.RegionalKey(disk.Name, disk.Region), resizeServiceRequest) } // Disks is interface for manipulation with GCE PDs. @@ -472,13 +463,14 @@ type Disks interface { GetAutoLabelsForPD(name string, zone string) (map[string]string, error) } -// GCECloud implements Disks. -var _ Disks = (*GCECloud)(nil) +// Cloud implements Disks. +var _ Disks = (*Cloud)(nil) -// GCECloud implements PVLabeler. -var _ cloudprovider.PVLabeler = (*GCECloud)(nil) +// Cloud implements PVLabeler. +var _ cloudprovider.PVLabeler = (*Cloud)(nil) -type GCEDisk struct { +// Disk holds all relevant data about an instance of GCE storage +type Disk struct { ZoneInfo zoneType Region string Name string @@ -510,7 +502,8 @@ func newDiskMetricContextRegional(request, region string) *metricContext { return newGenericMetricContext("disk", request, region, unusedMetricLabel, computeV1Version) } -func (gce *GCECloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) { +// GetLabelsForVolume retrieved the label info for the provided volume +func (g *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) { // Ignore any volumes that are being provisioned if pv.Spec.GCEPersistentDisk.PDName == volume.ProvisionedVolumeName { return nil, nil @@ -519,7 +512,7 @@ func (gce *GCECloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVo // If the zone is already labeled, honor the hint zone := pv.Labels[kubeletapis.LabelZoneFailureDomain] - labels, err := gce.GetAutoLabelsForPD(pv.Spec.GCEPersistentDisk.PDName, zone) + labels, err := g.GetAutoLabelsForPD(pv.Spec.GCEPersistentDisk.PDName, zone) if err != nil { return nil, err } @@ -527,28 +520,30 @@ func (gce *GCECloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVo return labels, nil } -func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool, regional bool) error { +// AttachDisk attaches given disk to the node with the specified NodeName. +// Current instance is used when instanceID is empty string. +func (g *Cloud) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool, regional bool) error { instanceName := mapNodeNameToInstanceName(nodeName) - instance, err := gce.getInstanceByName(instanceName) + instance, err := g.getInstanceByName(instanceName) if err != nil { return fmt.Errorf("error getting instance %q", instanceName) } // Try fetching as regional PD - var disk *GCEDisk + var disk *Disk var mc *metricContext if regional && utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) { - disk, err = gce.getRegionalDiskByName(diskName) + disk, err = g.getRegionalDiskByName(diskName) if err != nil { return err } - mc = newDiskMetricContextRegional("attach", gce.region) + mc = newDiskMetricContextRegional("attach", g.region) } else { - disk, err = gce.getDiskByName(diskName, instance.Zone) + disk, err = g.getDiskByName(diskName, instance.Zone) if err != nil { return err } - mc = newDiskMetricContextZonal("attach", gce.region, instance.Zone) + mc = newDiskMetricContextZonal("attach", g.region, instance.Zone) } readWrite := "READ_WRITE" @@ -556,16 +551,18 @@ func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOn readWrite = "READ_ONLY" } - return mc.Observe(gce.manager.AttachDiskOnCloudProvider(disk, readWrite, instance.Zone, instance.Name)) + return mc.Observe(g.manager.AttachDiskOnCloudProvider(disk, readWrite, instance.Zone, instance.Name)) } -func (gce *GCECloud) DetachDisk(devicePath string, nodeName types.NodeName) error { +// DetachDisk detaches given disk to the node with the specified NodeName. +// Current instance is used when nodeName is empty string. +func (g *Cloud) DetachDisk(devicePath string, nodeName types.NodeName) error { instanceName := mapNodeNameToInstanceName(nodeName) - inst, err := gce.getInstanceByName(instanceName) + inst, err := g.getInstanceByName(instanceName) if err != nil { if err == cloudprovider.InstanceNotFound { // If instance no longer exists, safe to assume volume is not attached. - glog.Warningf( + klog.Warningf( "Instance %q does not exist. DetachDisk will assume PD %q is not attached to it.", instanceName, devicePath) @@ -575,17 +572,18 @@ func (gce *GCECloud) DetachDisk(devicePath string, nodeName types.NodeName) erro return fmt.Errorf("error getting instance %q", instanceName) } - mc := newDiskMetricContextZonal("detach", gce.region, inst.Zone) - return mc.Observe(gce.manager.DetachDiskOnCloudProvider(inst.Zone, inst.Name, devicePath)) + mc := newDiskMetricContextZonal("detach", g.region, inst.Zone) + return mc.Observe(g.manager.DetachDiskOnCloudProvider(inst.Zone, inst.Name, devicePath)) } -func (gce *GCECloud) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) { +// DiskIsAttached checks if a disk is attached to the node with the specified NodeName. +func (g *Cloud) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) { instanceName := mapNodeNameToInstanceName(nodeName) - instance, err := gce.getInstanceByName(instanceName) + instance, err := g.getInstanceByName(instanceName) if err != nil { if err == cloudprovider.InstanceNotFound { // If instance no longer exists, safe to assume volume is not attached. - glog.Warningf( + klog.Warningf( "Instance %q does not exist. DiskIsAttached will assume PD %q is not attached to it.", instanceName, diskName) @@ -605,17 +603,19 @@ func (gce *GCECloud) DiskIsAttached(diskName string, nodeName types.NodeName) (b return false, nil } -func (gce *GCECloud) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) { +// DisksAreAttached is a batch function to check if a list of disks are attached +// to the node with the specified NodeName. +func (g *Cloud) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) { attached := make(map[string]bool) for _, diskName := range diskNames { attached[diskName] = false } instanceName := mapNodeNameToInstanceName(nodeName) - instance, err := gce.getInstanceByName(instanceName) + instance, err := g.getInstanceByName(instanceName) if err != nil { if err == cloudprovider.InstanceNotFound { // If instance no longer exists, safe to assume volume is not attached. - glog.Warningf( + klog.Warningf( "Instance %q does not exist. DisksAreAttached will assume PD %v are not attached to it.", instanceName, diskNames) @@ -640,11 +640,11 @@ func (gce *GCECloud) DisksAreAttached(diskNames []string, nodeName types.NodeNam // CreateDisk creates a new Persistent Disk, with the specified name & // size, in the specified zone. It stores specified tags encoded in // JSON in Description field. -func (gce *GCECloud) CreateDisk( +func (g *Cloud) CreateDisk( name string, diskType string, zone string, sizeGb int64, tags map[string]string) error { // Do not allow creation of PDs in zones that are do not have nodes. Such PDs // are not currently usable. - curZones, err := gce.GetAllCurrentZones() + curZones, err := g.GetAllCurrentZones() if err != nil { return err } @@ -652,7 +652,7 @@ func (gce *GCECloud) CreateDisk( return fmt.Errorf("kubernetes does not have a node in zone %q", zone) } - tagsStr, err := gce.encodeDiskTags(tags) + tagsStr, err := g.encodeDiskTags(tags) if err != nil { return err } @@ -662,14 +662,14 @@ func (gce *GCECloud) CreateDisk( return err } - mc := newDiskMetricContextZonal("create", gce.region, zone) + mc := newDiskMetricContextZonal("create", g.region, zone) - err = gce.manager.CreateDiskOnCloudProvider( + err = g.manager.CreateDiskOnCloudProvider( name, sizeGb, tagsStr, diskType, zone) mc.Observe(err) if isGCEError(err, "alreadyExists") { - glog.Warningf("GCE PD %q already exists, reusing", name) + klog.Warningf("GCE PD %q already exists, reusing", name) return nil } return err @@ -678,14 +678,14 @@ func (gce *GCECloud) CreateDisk( // CreateRegionalDisk creates a new Regional Persistent Disk, with the specified // name & size, replicated to the specified zones. It stores specified tags // encoded in JSON in Description field. -func (gce *GCECloud) CreateRegionalDisk( +func (g *Cloud) CreateRegionalDisk( name string, diskType string, replicaZones sets.String, sizeGb int64, tags map[string]string) error { // Do not allow creation of PDs in zones that are do not have nodes. Such PDs // are not currently usable. This functionality should be reverted to checking // against managed zones if we want users to be able to create RegionalDisks // in zones where there are no nodes - curZones, err := gce.GetAllCurrentZones() + curZones, err := g.GetAllCurrentZones() if err != nil { return err } @@ -693,7 +693,7 @@ func (gce *GCECloud) CreateRegionalDisk( return fmt.Errorf("kubernetes does not have nodes in specified zones: %q. Zones that contain nodes: %q", replicaZones.Difference(curZones), curZones) } - tagsStr, err := gce.encodeDiskTags(tags) + tagsStr, err := g.encodeDiskTags(tags) if err != nil { return err } @@ -703,14 +703,14 @@ func (gce *GCECloud) CreateRegionalDisk( return err } - mc := newDiskMetricContextRegional("create", gce.region) + mc := newDiskMetricContextRegional("create", g.region) - err = gce.manager.CreateRegionalDiskOnCloudProvider( + err = g.manager.CreateRegionalDiskOnCloudProvider( name, sizeGb, tagsStr, diskType, replicaZones) mc.Observe(err) if isGCEError(err, "alreadyExists") { - glog.Warningf("GCE PD %q already exists, reusing", name) + klog.Warningf("GCE PD %q already exists, reusing", name) return nil } return err @@ -727,8 +727,9 @@ func getDiskType(diskType string) (string, error) { } } -func (gce *GCECloud) DeleteDisk(diskToDelete string) error { - err := gce.doDeleteDisk(diskToDelete) +// DeleteDisk deletes rgw referenced persistent disk. +func (g *Cloud) DeleteDisk(diskToDelete string) error { + err := g.doDeleteDisk(diskToDelete) if isGCEError(err, "resourceInUseByAnotherResource") { return volume.NewDeletedVolumeInUseError(err.Error()) } @@ -740,8 +741,8 @@ func (gce *GCECloud) DeleteDisk(diskToDelete string) error { } // ResizeDisk expands given disk and returns new disk size -func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) { - disk, err := gce.GetDiskByNameUnknownZone(diskToResize) +func (g *Cloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) { + disk, err := g.GetDiskByNameUnknownZone(diskToResize) if err != nil { return oldSize, err } @@ -760,26 +761,24 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, switch zoneInfo := disk.ZoneInfo.(type) { case singleZone: mc = newDiskMetricContextZonal("resize", disk.Region, zoneInfo.zone) - err := gce.manager.ResizeDiskOnCloudProvider(disk, requestGIB, zoneInfo.zone) + err := g.manager.ResizeDiskOnCloudProvider(disk, requestGIB, zoneInfo.zone) if err != nil { return oldSize, mc.Observe(err) - } else { - return newSizeQuant, mc.Observe(err) } + return newSizeQuant, mc.Observe(err) case multiZone: if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) { return oldSize, fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo) } mc = newDiskMetricContextRegional("resize", disk.Region) - err := gce.manager.RegionalResizeDiskOnCloudProvider(disk, requestGIB) + err := g.manager.RegionalResizeDiskOnCloudProvider(disk, requestGIB) if err != nil { return oldSize, mc.Observe(err) - } else { - return newSizeQuant, mc.Observe(err) } + return newSizeQuant, mc.Observe(err) case nil: return oldSize, fmt.Errorf("PD has nil ZoneInfo: %v", disk) default: @@ -787,13 +786,13 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, } } -// Builds the labels that should be automatically added to a PersistentVolume backed by a GCE PD +// GetAutoLabelsForPD builds the labels that should be automatically added to a PersistentVolume backed by a GCE PD // Specifically, this builds FailureDomain (zone) and Region labels. // The PersistentVolumeLabel admission controller calls this and adds the labels when a PV is created. // If zone is specified, the volume will only be found in the specified zone, // otherwise all managed zones will be searched. -func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]string, error) { - var disk *GCEDisk +func (g *Cloud) GetAutoLabelsForPD(name string, zone string) (map[string]string, error) { + var disk *Disk var err error if zone == "" { // For regional PDs this is fine, but for zonal PDs we would like as far @@ -804,7 +803,7 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st // However, wherever possible the zone should be passed (and it is // passed for most cases that we can control, e.g. dynamic volume // provisioning). - disk, err = gce.GetDiskByNameUnknownZone(name) + disk, err = g.GetDiskByNameUnknownZone(name) if err != nil { return nil, err } @@ -815,24 +814,24 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) { zoneSet, err := volumeutil.LabelZonesToSet(zone) if err != nil { - glog.Warningf("Failed to parse zone field: %q. Will use raw field.", zone) + klog.Warningf("Failed to parse zone field: %q. Will use raw field.", zone) } if len(zoneSet) > 1 { // Regional PD - disk, err = gce.getRegionalDiskByName(name) + disk, err = g.getRegionalDiskByName(name) if err != nil { return nil, err } } else { // Zonal PD - disk, err = gce.getDiskByName(name, zone) + disk, err = g.getDiskByName(name, zone) if err != nil { return nil, err } } } else { - disk, err = gce.getDiskByName(name, zone) + disk, err = g.getDiskByName(name, zone) if err != nil { return nil, err } @@ -867,11 +866,11 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st return labels, nil } -// Returns a GCEDisk for the disk, if it is found in the specified zone. +// Returns a Disk for the disk, if it is found in the specified zone. // If not found, returns (nil, nil) -func (gce *GCECloud) findDiskByName(diskName string, zone string) (*GCEDisk, error) { - mc := newDiskMetricContextZonal("get", gce.region, zone) - disk, err := gce.manager.GetDiskFromCloudProvider(zone, diskName) +func (g *Cloud) findDiskByName(diskName string, zone string) (*Disk, error) { + mc := newDiskMetricContextZonal("get", g.region, zone) + disk, err := g.manager.GetDiskFromCloudProvider(zone, diskName) if err == nil { return disk, mc.Observe(nil) } @@ -882,19 +881,19 @@ func (gce *GCECloud) findDiskByName(diskName string, zone string) (*GCEDisk, err } // Like findDiskByName, but returns an error if the disk is not found -func (gce *GCECloud) getDiskByName(diskName string, zone string) (*GCEDisk, error) { - disk, err := gce.findDiskByName(diskName, zone) +func (g *Cloud) getDiskByName(diskName string, zone string) (*Disk, error) { + disk, err := g.findDiskByName(diskName, zone) if disk == nil && err == nil { return nil, fmt.Errorf("GCE persistent disk not found: diskName=%q zone=%q", diskName, zone) } return disk, err } -// Returns a GCEDisk for the regional disk, if it is found. +// Returns a Disk for the regional disk, if it is found. // If not found, returns (nil, nil) -func (gce *GCECloud) findRegionalDiskByName(diskName string) (*GCEDisk, error) { - mc := newDiskMetricContextRegional("get", gce.region) - disk, err := gce.manager.GetRegionalDiskFromCloudProvider(diskName) +func (g *Cloud) findRegionalDiskByName(diskName string) (*Disk, error) { + mc := newDiskMetricContextRegional("get", g.region) + disk, err := g.manager.GetRegionalDiskFromCloudProvider(diskName) if err == nil { return disk, mc.Observe(nil) } @@ -905,20 +904,20 @@ func (gce *GCECloud) findRegionalDiskByName(diskName string) (*GCEDisk, error) { } // Like findRegionalDiskByName, but returns an error if the disk is not found -func (gce *GCECloud) getRegionalDiskByName(diskName string) (*GCEDisk, error) { - disk, err := gce.findRegionalDiskByName(diskName) +func (g *Cloud) getRegionalDiskByName(diskName string) (*Disk, error) { + disk, err := g.findRegionalDiskByName(diskName) if disk == nil && err == nil { return nil, fmt.Errorf("GCE regional persistent disk not found: diskName=%q", diskName) } return disk, err } -// Scans all managed zones to return the GCE PD +// GetDiskByNameUnknownZone scans all managed zones to return the GCE PD // Prefer getDiskByName, if the zone can be established // Return cloudprovider.DiskNotFound if the given disk cannot be found in any zone -func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error) { +func (g *Cloud) GetDiskByNameUnknownZone(diskName string) (*Disk, error) { if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) { - regionalDisk, err := gce.getRegionalDiskByName(diskName) + regionalDisk, err := g.getRegionalDiskByName(diskName) if err == nil { return regionalDisk, err } @@ -934,9 +933,9 @@ func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error) // admission control, but that might be a little weird (values changing // on create) - var found *GCEDisk - for _, zone := range gce.managedZones { - disk, err := gce.findDiskByName(diskName, zone) + var found *Disk + for _, zone := range g.managedZones { + disk, err := g.findDiskByName(diskName, zone) if err != nil { return nil, err } @@ -949,7 +948,7 @@ func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error) switch zoneInfo := disk.ZoneInfo.(type) { case multiZone: if zoneInfo.replicaZones.Has(zone) { - glog.Warningf("GCE PD name (%q) was found in multiple zones (%q), but ok because it is a RegionalDisk.", + klog.Warningf("GCE PD name (%q) was found in multiple zones (%q), but ok because it is a RegionalDisk.", diskName, zoneInfo.replicaZones) continue } @@ -963,15 +962,15 @@ func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error) if found != nil { return found, nil } - glog.Warningf("GCE persistent disk %q not found in managed zones (%s)", - diskName, strings.Join(gce.managedZones, ",")) + klog.Warningf("GCE persistent disk %q not found in managed zones (%s)", + diskName, strings.Join(g.managedZones, ",")) return nil, cloudprovider.DiskNotFound } // encodeDiskTags encodes requested volume tags into JSON string, as GCE does // not support tags on GCE PDs and we use Description field as fallback. -func (gce *GCECloud) encodeDiskTags(tags map[string]string) (string, error) { +func (g *Cloud) encodeDiskTags(tags map[string]string) (string, error) { if len(tags) == 0 { // No tags -> empty JSON return "", nil @@ -984,8 +983,8 @@ func (gce *GCECloud) encodeDiskTags(tags map[string]string) (string, error) { return string(enc), nil } -func (gce *GCECloud) doDeleteDisk(diskToDelete string) error { - disk, err := gce.GetDiskByNameUnknownZone(diskToDelete) +func (g *Cloud) doDeleteDisk(diskToDelete string) error { + disk, err := g.GetDiskByNameUnknownZone(diskToDelete) if err != nil { return err } @@ -995,14 +994,14 @@ func (gce *GCECloud) doDeleteDisk(diskToDelete string) error { switch zoneInfo := disk.ZoneInfo.(type) { case singleZone: mc = newDiskMetricContextZonal("delete", disk.Region, zoneInfo.zone) - return mc.Observe(gce.manager.DeleteDiskOnCloudProvider(zoneInfo.zone, disk.Name)) + return mc.Observe(g.manager.DeleteDiskOnCloudProvider(zoneInfo.zone, disk.Name)) case multiZone: if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) { return fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo) } mc = newDiskMetricContextRegional("delete", disk.Region) - return mc.Observe(gce.manager.DeleteRegionalDiskOnCloudProvider(disk.Name)) + return mc.Observe(g.manager.DeleteRegionalDiskOnCloudProvider(disk.Name)) case nil: return fmt.Errorf("PD has nil ZoneInfo: %v", disk) default: diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_fake.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_fake.go new file mode 100644 index 000000000000..73a724d74ae0 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_fake.go @@ -0,0 +1,83 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gce + +import ( + "fmt" + "net/http" + + compute "google.golang.org/api/compute/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" +) + +// TestClusterValues holds the config values for the fake/test gce cloud object. +type TestClusterValues struct { + ProjectID string + Region string + ZoneName string + SecondaryZoneName string + ClusterID string + ClusterName string +} + +// DefaultTestClusterValues Creates a reasonable set of default cluster values +// for generating a new test fake GCE cloud instance. +func DefaultTestClusterValues() TestClusterValues { + return TestClusterValues{ + ProjectID: "test-project", + Region: "us-central1", + ZoneName: "us-central1-b", + SecondaryZoneName: "us-central1-c", + ClusterID: "test-cluster-id", + ClusterName: "Test Cluster Name", + } +} + +type fakeRoundTripper struct{} + +func (*fakeRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { + return nil, fmt.Errorf("err: test used fake http client") +} + +// Stubs ClusterID so that ClusterID.getOrInitialize() does not require calling +// gce.Initialize() +func fakeClusterID(clusterID string) ClusterID { + return ClusterID{ + clusterID: &clusterID, + store: cache.NewStore(func(obj interface{}) (string, error) { + return "", nil + }), + } +} + +// NewFakeGCECloud constructs a fake GCE Cloud from the cluster values. +func NewFakeGCECloud(vals TestClusterValues) *Cloud { + client := &http.Client{Transport: &fakeRoundTripper{}} + service, _ := compute.New(client) + gce := &Cloud{ + region: vals.Region, + service: service, + managedZones: []string{vals.ZoneName}, + projectID: vals.ProjectID, + networkProjectID: vals.ProjectID, + ClusterID: fakeClusterID(vals.ClusterID), + } + c := cloud.NewMockGCE(&gceProjectRouter{gce}) + gce.c = c + return gce +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_firewall.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_firewall.go index e138df87471b..4aea49709562 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_firewall.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_firewall.go @@ -28,38 +28,38 @@ func newFirewallMetricContext(request string) *metricContext { } // GetFirewall returns the Firewall by name. -func (gce *GCECloud) GetFirewall(name string) (*compute.Firewall, error) { +func (g *Cloud) GetFirewall(name string) (*compute.Firewall, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newFirewallMetricContext("get") - v, err := gce.c.Firewalls().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.Firewalls().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // CreateFirewall creates the passed firewall -func (gce *GCECloud) CreateFirewall(f *compute.Firewall) error { +func (g *Cloud) CreateFirewall(f *compute.Firewall) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newFirewallMetricContext("create") - return mc.Observe(gce.c.Firewalls().Insert(ctx, meta.GlobalKey(f.Name), f)) + return mc.Observe(g.c.Firewalls().Insert(ctx, meta.GlobalKey(f.Name), f)) } // DeleteFirewall deletes the given firewall rule. -func (gce *GCECloud) DeleteFirewall(name string) error { +func (g *Cloud) DeleteFirewall(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newFirewallMetricContext("delete") - return mc.Observe(gce.c.Firewalls().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.Firewalls().Delete(ctx, meta.GlobalKey(name))) } // UpdateFirewall applies the given firewall as an update to an existing service. -func (gce *GCECloud) UpdateFirewall(f *compute.Firewall) error { +func (g *Cloud) UpdateFirewall(f *compute.Firewall) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newFirewallMetricContext("update") - return mc.Observe(gce.c.Firewalls().Update(ctx, meta.GlobalKey(f.Name), f)) + return mc.Observe(g.c.Firewalls().Update(ctx, meta.GlobalKey(f.Name), f)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_forwardingrule.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_forwardingrule.go index b40652c98e32..5689cfd32eaa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_forwardingrule.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_forwardingrule.go @@ -32,129 +32,129 @@ func newForwardingRuleMetricContextWithVersion(request, region, version string) } // CreateGlobalForwardingRule creates the passed GlobalForwardingRule -func (gce *GCECloud) CreateGlobalForwardingRule(rule *compute.ForwardingRule) error { +func (g *Cloud) CreateGlobalForwardingRule(rule *compute.ForwardingRule) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("create", "") - return mc.Observe(gce.c.GlobalForwardingRules().Insert(ctx, meta.GlobalKey(rule.Name), rule)) + return mc.Observe(g.c.GlobalForwardingRules().Insert(ctx, meta.GlobalKey(rule.Name), rule)) } // SetProxyForGlobalForwardingRule links the given TargetHttp(s)Proxy with the given GlobalForwardingRule. // targetProxyLink is the SelfLink of a TargetHttp(s)Proxy. -func (gce *GCECloud) SetProxyForGlobalForwardingRule(forwardingRuleName, targetProxyLink string) error { +func (g *Cloud) SetProxyForGlobalForwardingRule(forwardingRuleName, targetProxyLink string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("set_proxy", "") target := &compute.TargetReference{Target: targetProxyLink} - return mc.Observe(gce.c.GlobalForwardingRules().SetTarget(ctx, meta.GlobalKey(forwardingRuleName), target)) + return mc.Observe(g.c.GlobalForwardingRules().SetTarget(ctx, meta.GlobalKey(forwardingRuleName), target)) } // DeleteGlobalForwardingRule deletes the GlobalForwardingRule by name. -func (gce *GCECloud) DeleteGlobalForwardingRule(name string) error { +func (g *Cloud) DeleteGlobalForwardingRule(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("delete", "") - return mc.Observe(gce.c.GlobalForwardingRules().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.GlobalForwardingRules().Delete(ctx, meta.GlobalKey(name))) } // GetGlobalForwardingRule returns the GlobalForwardingRule by name. -func (gce *GCECloud) GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error) { +func (g *Cloud) GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("get", "") - v, err := gce.c.GlobalForwardingRules().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.GlobalForwardingRules().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // ListGlobalForwardingRules lists all GlobalForwardingRules in the project. -func (gce *GCECloud) ListGlobalForwardingRules() ([]*compute.ForwardingRule, error) { +func (g *Cloud) ListGlobalForwardingRules() ([]*compute.ForwardingRule, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("list", "") - v, err := gce.c.GlobalForwardingRules().List(ctx, filter.None) + v, err := g.c.GlobalForwardingRules().List(ctx, filter.None) return v, mc.Observe(err) } // GetRegionForwardingRule returns the RegionalForwardingRule by name & region. -func (gce *GCECloud) GetRegionForwardingRule(name, region string) (*compute.ForwardingRule, error) { +func (g *Cloud) GetRegionForwardingRule(name, region string) (*compute.ForwardingRule, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("get", region) - v, err := gce.c.ForwardingRules().Get(ctx, meta.RegionalKey(name, region)) + v, err := g.c.ForwardingRules().Get(ctx, meta.RegionalKey(name, region)) return v, mc.Observe(err) } // GetAlphaRegionForwardingRule returns the Alpha forwarding rule by name & region. -func (gce *GCECloud) GetAlphaRegionForwardingRule(name, region string) (*computealpha.ForwardingRule, error) { +func (g *Cloud) GetAlphaRegionForwardingRule(name, region string) (*computealpha.ForwardingRule, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContextWithVersion("get", region, computeAlphaVersion) - v, err := gce.c.AlphaForwardingRules().Get(ctx, meta.RegionalKey(name, region)) + v, err := g.c.AlphaForwardingRules().Get(ctx, meta.RegionalKey(name, region)) return v, mc.Observe(err) } // ListRegionForwardingRules lists all RegionalForwardingRules in the project & region. -func (gce *GCECloud) ListRegionForwardingRules(region string) ([]*compute.ForwardingRule, error) { +func (g *Cloud) ListRegionForwardingRules(region string) ([]*compute.ForwardingRule, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("list", region) - v, err := gce.c.ForwardingRules().List(ctx, region, filter.None) + v, err := g.c.ForwardingRules().List(ctx, region, filter.None) return v, mc.Observe(err) } // ListAlphaRegionForwardingRules lists all RegionalForwardingRules in the project & region. -func (gce *GCECloud) ListAlphaRegionForwardingRules(region string) ([]*computealpha.ForwardingRule, error) { +func (g *Cloud) ListAlphaRegionForwardingRules(region string) ([]*computealpha.ForwardingRule, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContextWithVersion("list", region, computeAlphaVersion) - v, err := gce.c.AlphaForwardingRules().List(ctx, region, filter.None) + v, err := g.c.AlphaForwardingRules().List(ctx, region, filter.None) return v, mc.Observe(err) } // CreateRegionForwardingRule creates and returns a // RegionalForwardingRule that points to the given BackendService -func (gce *GCECloud) CreateRegionForwardingRule(rule *compute.ForwardingRule, region string) error { +func (g *Cloud) CreateRegionForwardingRule(rule *compute.ForwardingRule, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("create", region) - return mc.Observe(gce.c.ForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule)) + return mc.Observe(g.c.ForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule)) } // CreateAlphaRegionForwardingRule creates and returns an Alpha // forwarding fule in the given region. -func (gce *GCECloud) CreateAlphaRegionForwardingRule(rule *computealpha.ForwardingRule, region string) error { +func (g *Cloud) CreateAlphaRegionForwardingRule(rule *computealpha.ForwardingRule, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContextWithVersion("create", region, computeAlphaVersion) - return mc.Observe(gce.c.AlphaForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule)) + return mc.Observe(g.c.AlphaForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule)) } // DeleteRegionForwardingRule deletes the RegionalForwardingRule by name & region. -func (gce *GCECloud) DeleteRegionForwardingRule(name, region string) error { +func (g *Cloud) DeleteRegionForwardingRule(name, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newForwardingRuleMetricContext("delete", region) - return mc.Observe(gce.c.ForwardingRules().Delete(ctx, meta.RegionalKey(name, region))) + return mc.Observe(g.c.ForwardingRules().Delete(ctx, meta.RegionalKey(name, region))) } // TODO(#51665): retire this function once Network Tiers becomes Beta in GCP. -func (gce *GCECloud) getNetworkTierFromForwardingRule(name, region string) (string, error) { - if !gce.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { +func (g *Cloud) getNetworkTierFromForwardingRule(name, region string) (string, error) { + if !g.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { return cloud.NetworkTierDefault.ToGCEValue(), nil } - fwdRule, err := gce.GetAlphaRegionForwardingRule(name, region) + fwdRule, err := g.GetAlphaRegionForwardingRule(name, region) if err != nil { return handleAlphaNetworkTierGetError(err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go index 8dc913a6089d..d314376db121 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go @@ -17,18 +17,18 @@ limitations under the License. package gce import ( - "github.com/golang/glog" + "k8s.io/klog" computealpha "google.golang.org/api/compute/v0.alpha" computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" "k8s.io/api/core/v1" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" "k8s.io/kubernetes/pkg/master/ports" - utilversion "k8s.io/kubernetes/pkg/util/version" ) const ( @@ -42,7 +42,7 @@ var ( func init() { if v, err := utilversion.ParseGeneric("1.7.2"); err != nil { - glog.Fatalf("Failed to parse version for minNodesHealthCheckVersion: %v", err) + klog.Fatalf("Failed to parse version for minNodesHealthCheckVersion: %v", err) } else { minNodesHealthCheckVersion = v } @@ -56,204 +56,204 @@ func newHealthcheckMetricContextWithVersion(request, version string) *metricCont return newGenericMetricContext("healthcheck", request, unusedMetricLabel, unusedMetricLabel, version) } -// GetHttpHealthCheck returns the given HttpHealthCheck by name. -func (gce *GCECloud) GetHttpHealthCheck(name string) (*compute.HttpHealthCheck, error) { +// GetHTTPHealthCheck returns the given HttpHealthCheck by name. +func (g *Cloud) GetHTTPHealthCheck(name string) (*compute.HttpHealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("get_legacy") - v, err := gce.c.HttpHealthChecks().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.HttpHealthChecks().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } -// UpdateHttpHealthCheck applies the given HttpHealthCheck as an update. -func (gce *GCECloud) UpdateHttpHealthCheck(hc *compute.HttpHealthCheck) error { +// UpdateHTTPHealthCheck applies the given HttpHealthCheck as an update. +func (g *Cloud) UpdateHTTPHealthCheck(hc *compute.HttpHealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("update_legacy") - return mc.Observe(gce.c.HttpHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.HttpHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) } -// DeleteHttpHealthCheck deletes the given HttpHealthCheck by name. -func (gce *GCECloud) DeleteHttpHealthCheck(name string) error { +// DeleteHTTPHealthCheck deletes the given HttpHealthCheck by name. +func (g *Cloud) DeleteHTTPHealthCheck(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("delete_legacy") - return mc.Observe(gce.c.HttpHealthChecks().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.HttpHealthChecks().Delete(ctx, meta.GlobalKey(name))) } -// CreateHttpHealthCheck creates the given HttpHealthCheck. -func (gce *GCECloud) CreateHttpHealthCheck(hc *compute.HttpHealthCheck) error { +// CreateHTTPHealthCheck creates the given HttpHealthCheck. +func (g *Cloud) CreateHTTPHealthCheck(hc *compute.HttpHealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("create_legacy") - return mc.Observe(gce.c.HttpHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.HttpHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) } -// ListHttpHealthChecks lists all HttpHealthChecks in the project. -func (gce *GCECloud) ListHttpHealthChecks() ([]*compute.HttpHealthCheck, error) { +// ListHTTPHealthChecks lists all HttpHealthChecks in the project. +func (g *Cloud) ListHTTPHealthChecks() ([]*compute.HttpHealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("list_legacy") - v, err := gce.c.HttpHealthChecks().List(ctx, filter.None) + v, err := g.c.HttpHealthChecks().List(ctx, filter.None) return v, mc.Observe(err) } // Legacy HTTPS Health Checks -// GetHttpsHealthCheck returns the given HttpsHealthCheck by name. -func (gce *GCECloud) GetHttpsHealthCheck(name string) (*compute.HttpsHealthCheck, error) { +// GetHTTPSHealthCheck returns the given HttpsHealthCheck by name. +func (g *Cloud) GetHTTPSHealthCheck(name string) (*compute.HttpsHealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("get_legacy") - v, err := gce.c.HttpsHealthChecks().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.HttpsHealthChecks().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } -// UpdateHttpsHealthCheck applies the given HttpsHealthCheck as an update. -func (gce *GCECloud) UpdateHttpsHealthCheck(hc *compute.HttpsHealthCheck) error { +// UpdateHTTPSHealthCheck applies the given HttpsHealthCheck as an update. +func (g *Cloud) UpdateHTTPSHealthCheck(hc *compute.HttpsHealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("update_legacy") - return mc.Observe(gce.c.HttpsHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.HttpsHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) } -// DeleteHttpsHealthCheck deletes the given HttpsHealthCheck by name. -func (gce *GCECloud) DeleteHttpsHealthCheck(name string) error { +// DeleteHTTPSHealthCheck deletes the given HttpsHealthCheck by name. +func (g *Cloud) DeleteHTTPSHealthCheck(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("delete_legacy") - return mc.Observe(gce.c.HttpsHealthChecks().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.HttpsHealthChecks().Delete(ctx, meta.GlobalKey(name))) } -// CreateHttpsHealthCheck creates the given HttpsHealthCheck. -func (gce *GCECloud) CreateHttpsHealthCheck(hc *compute.HttpsHealthCheck) error { +// CreateHTTPSHealthCheck creates the given HttpsHealthCheck. +func (g *Cloud) CreateHTTPSHealthCheck(hc *compute.HttpsHealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("create_legacy") - return mc.Observe(gce.c.HttpsHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.HttpsHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) } -// ListHttpsHealthChecks lists all HttpsHealthChecks in the project. -func (gce *GCECloud) ListHttpsHealthChecks() ([]*compute.HttpsHealthCheck, error) { +// ListHTTPSHealthChecks lists all HttpsHealthChecks in the project. +func (g *Cloud) ListHTTPSHealthChecks() ([]*compute.HttpsHealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("list_legacy") - v, err := gce.c.HttpsHealthChecks().List(ctx, filter.None) + v, err := g.c.HttpsHealthChecks().List(ctx, filter.None) return v, mc.Observe(err) } // Generic HealthCheck // GetHealthCheck returns the given HealthCheck by name. -func (gce *GCECloud) GetHealthCheck(name string) (*compute.HealthCheck, error) { +func (g *Cloud) GetHealthCheck(name string) (*compute.HealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("get") - v, err := gce.c.HealthChecks().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.HealthChecks().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // GetAlphaHealthCheck returns the given alpha HealthCheck by name. -func (gce *GCECloud) GetAlphaHealthCheck(name string) (*computealpha.HealthCheck, error) { +func (g *Cloud) GetAlphaHealthCheck(name string) (*computealpha.HealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContextWithVersion("get", computeAlphaVersion) - v, err := gce.c.AlphaHealthChecks().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.AlphaHealthChecks().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // GetBetaHealthCheck returns the given beta HealthCheck by name. -func (gce *GCECloud) GetBetaHealthCheck(name string) (*computebeta.HealthCheck, error) { +func (g *Cloud) GetBetaHealthCheck(name string) (*computebeta.HealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContextWithVersion("get", computeBetaVersion) - v, err := gce.c.BetaHealthChecks().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.BetaHealthChecks().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // UpdateHealthCheck applies the given HealthCheck as an update. -func (gce *GCECloud) UpdateHealthCheck(hc *compute.HealthCheck) error { +func (g *Cloud) UpdateHealthCheck(hc *compute.HealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("update") - return mc.Observe(gce.c.HealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.HealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) } // UpdateAlphaHealthCheck applies the given alpha HealthCheck as an update. -func (gce *GCECloud) UpdateAlphaHealthCheck(hc *computealpha.HealthCheck) error { +func (g *Cloud) UpdateAlphaHealthCheck(hc *computealpha.HealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContextWithVersion("update", computeAlphaVersion) - return mc.Observe(gce.c.AlphaHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.AlphaHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) } // UpdateBetaHealthCheck applies the given beta HealthCheck as an update. -func (gce *GCECloud) UpdateBetaHealthCheck(hc *computebeta.HealthCheck) error { +func (g *Cloud) UpdateBetaHealthCheck(hc *computebeta.HealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContextWithVersion("update", computeBetaVersion) - return mc.Observe(gce.c.BetaHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.BetaHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc)) } // DeleteHealthCheck deletes the given HealthCheck by name. -func (gce *GCECloud) DeleteHealthCheck(name string) error { +func (g *Cloud) DeleteHealthCheck(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("delete") - return mc.Observe(gce.c.HealthChecks().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.HealthChecks().Delete(ctx, meta.GlobalKey(name))) } // CreateHealthCheck creates the given HealthCheck. -func (gce *GCECloud) CreateHealthCheck(hc *compute.HealthCheck) error { +func (g *Cloud) CreateHealthCheck(hc *compute.HealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("create") - return mc.Observe(gce.c.HealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.HealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) } // CreateAlphaHealthCheck creates the given alpha HealthCheck. -func (gce *GCECloud) CreateAlphaHealthCheck(hc *computealpha.HealthCheck) error { +func (g *Cloud) CreateAlphaHealthCheck(hc *computealpha.HealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContextWithVersion("create", computeAlphaVersion) - return mc.Observe(gce.c.AlphaHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.AlphaHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) } // CreateBetaHealthCheck creates the given beta HealthCheck. -func (gce *GCECloud) CreateBetaHealthCheck(hc *computebeta.HealthCheck) error { +func (g *Cloud) CreateBetaHealthCheck(hc *computebeta.HealthCheck) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContextWithVersion("create", computeBetaVersion) - return mc.Observe(gce.c.BetaHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) + return mc.Observe(g.c.BetaHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc)) } // ListHealthChecks lists all HealthCheck in the project. -func (gce *GCECloud) ListHealthChecks() ([]*compute.HealthCheck, error) { +func (g *Cloud) ListHealthChecks() ([]*compute.HealthCheck, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newHealthcheckMetricContext("list") - v, err := gce.c.HealthChecks().List(ctx, filter.None) + v, err := g.c.HealthChecks().List(ctx, filter.None) return v, mc.Observe(err) } @@ -274,7 +274,7 @@ func GetNodesHealthCheckPath() string { func isAtLeastMinNodesHealthCheckVersion(vstring string) bool { version, err := utilversion.ParseGeneric(vstring) if err != nil { - glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) + klog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) return false } return version.AtLeast(minNodesHealthCheckVersion) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instancegroup.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instancegroup.go index 13b2c51e503b..edc5f093339b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instancegroup.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instancegroup.go @@ -30,49 +30,49 @@ func newInstanceGroupMetricContext(request string, zone string) *metricContext { // CreateInstanceGroup creates an instance group with the given // instances. It is the callers responsibility to add named ports. -func (gce *GCECloud) CreateInstanceGroup(ig *compute.InstanceGroup, zone string) error { +func (g *Cloud) CreateInstanceGroup(ig *compute.InstanceGroup, zone string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newInstanceGroupMetricContext("create", zone) - return mc.Observe(gce.c.InstanceGroups().Insert(ctx, meta.ZonalKey(ig.Name, zone), ig)) + return mc.Observe(g.c.InstanceGroups().Insert(ctx, meta.ZonalKey(ig.Name, zone), ig)) } // DeleteInstanceGroup deletes an instance group. -func (gce *GCECloud) DeleteInstanceGroup(name string, zone string) error { +func (g *Cloud) DeleteInstanceGroup(name string, zone string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newInstanceGroupMetricContext("delete", zone) - return mc.Observe(gce.c.InstanceGroups().Delete(ctx, meta.ZonalKey(name, zone))) + return mc.Observe(g.c.InstanceGroups().Delete(ctx, meta.ZonalKey(name, zone))) } // ListInstanceGroups lists all InstanceGroups in the project and // zone. -func (gce *GCECloud) ListInstanceGroups(zone string) ([]*compute.InstanceGroup, error) { +func (g *Cloud) ListInstanceGroups(zone string) ([]*compute.InstanceGroup, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newInstanceGroupMetricContext("list", zone) - v, err := gce.c.InstanceGroups().List(ctx, zone, filter.None) + v, err := g.c.InstanceGroups().List(ctx, zone, filter.None) return v, mc.Observe(err) } // ListInstancesInInstanceGroup lists all the instances in a given // instance group and state. -func (gce *GCECloud) ListInstancesInInstanceGroup(name string, zone string, state string) ([]*compute.InstanceWithNamedPorts, error) { +func (g *Cloud) ListInstancesInInstanceGroup(name string, zone string, state string) ([]*compute.InstanceWithNamedPorts, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newInstanceGroupMetricContext("list_instances", zone) req := &compute.InstanceGroupsListInstancesRequest{InstanceState: state} - v, err := gce.c.InstanceGroups().ListInstances(ctx, meta.ZonalKey(name, zone), req, filter.None) + v, err := g.c.InstanceGroups().ListInstances(ctx, meta.ZonalKey(name, zone), req, filter.None) return v, mc.Observe(err) } // AddInstancesToInstanceGroup adds the given instances to the given // instance group. -func (gce *GCECloud) AddInstancesToInstanceGroup(name string, zone string, instanceRefs []*compute.InstanceReference) error { +func (g *Cloud) AddInstancesToInstanceGroup(name string, zone string, instanceRefs []*compute.InstanceReference) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -84,12 +84,12 @@ func (gce *GCECloud) AddInstancesToInstanceGroup(name string, zone string, insta req := &compute.InstanceGroupsAddInstancesRequest{ Instances: instanceRefs, } - return mc.Observe(gce.c.InstanceGroups().AddInstances(ctx, meta.ZonalKey(name, zone), req)) + return mc.Observe(g.c.InstanceGroups().AddInstances(ctx, meta.ZonalKey(name, zone), req)) } // RemoveInstancesFromInstanceGroup removes the given instances from // the instance group. -func (gce *GCECloud) RemoveInstancesFromInstanceGroup(name string, zone string, instanceRefs []*compute.InstanceReference) error { +func (g *Cloud) RemoveInstancesFromInstanceGroup(name string, zone string, instanceRefs []*compute.InstanceReference) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -101,25 +101,25 @@ func (gce *GCECloud) RemoveInstancesFromInstanceGroup(name string, zone string, req := &compute.InstanceGroupsRemoveInstancesRequest{ Instances: instanceRefs, } - return mc.Observe(gce.c.InstanceGroups().RemoveInstances(ctx, meta.ZonalKey(name, zone), req)) + return mc.Observe(g.c.InstanceGroups().RemoveInstances(ctx, meta.ZonalKey(name, zone), req)) } // SetNamedPortsOfInstanceGroup sets the list of named ports on a given instance group -func (gce *GCECloud) SetNamedPortsOfInstanceGroup(igName, zone string, namedPorts []*compute.NamedPort) error { +func (g *Cloud) SetNamedPortsOfInstanceGroup(igName, zone string, namedPorts []*compute.NamedPort) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newInstanceGroupMetricContext("set_namedports", zone) req := &compute.InstanceGroupsSetNamedPortsRequest{NamedPorts: namedPorts} - return mc.Observe(gce.c.InstanceGroups().SetNamedPorts(ctx, meta.ZonalKey(igName, zone), req)) + return mc.Observe(g.c.InstanceGroups().SetNamedPorts(ctx, meta.ZonalKey(igName, zone), req)) } // GetInstanceGroup returns an instance group by name. -func (gce *GCECloud) GetInstanceGroup(name string, zone string) (*compute.InstanceGroup, error) { +func (g *Cloud) GetInstanceGroup(name string, zone string) (*compute.InstanceGroup, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newInstanceGroupMetricContext("get", zone) - v, err := gce.c.InstanceGroups().Get(ctx, meta.ZonalKey(name, zone)) + v, err := g.c.InstanceGroups().Get(ctx, meta.ZonalKey(name, zone)) return v, mc.Observe(err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go index 4e791accd420..e8345abfae56 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go @@ -25,15 +25,15 @@ import ( "time" "cloud.google.com/go/compute/metadata" - "github.com/golang/glog" computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" @@ -67,22 +67,22 @@ func getZone(n *v1.Node) string { return zone } -func makeHostURL(projectsApiEndpoint, projectID, zone, host string) string { +func makeHostURL(projectsAPIEndpoint, projectID, zone, host string) string { host = canonicalizeInstanceName(host) - return projectsApiEndpoint + strings.Join([]string{projectID, "zones", zone, "instances", host}, "/") + return projectsAPIEndpoint + strings.Join([]string{projectID, "zones", zone, "instances", host}, "/") } // ToInstanceReferences returns instance references by links -func (gce *GCECloud) ToInstanceReferences(zone string, instanceNames []string) (refs []*compute.InstanceReference) { +func (g *Cloud) ToInstanceReferences(zone string, instanceNames []string) (refs []*compute.InstanceReference) { for _, ins := range instanceNames { - instanceLink := makeHostURL(gce.service.BasePath, gce.projectID, zone, ins) + instanceLink := makeHostURL(g.service.BasePath, g.projectID, zone, ins) refs = append(refs, &compute.InstanceReference{Instance: instanceLink}) } return refs } // NodeAddresses is an implementation of Instances.NodeAddresses. -func (gce *GCECloud) NodeAddresses(_ context.Context, _ types.NodeName) ([]v1.NodeAddress, error) { +func (g *Cloud) NodeAddresses(_ context.Context, _ types.NodeName) ([]v1.NodeAddress, error) { internalIP, err := metadata.Get("instance/network-interfaces/0/ip") if err != nil { return nil, fmt.Errorf("couldn't get internal IP: %v", err) @@ -97,7 +97,7 @@ func (gce *GCECloud) NodeAddresses(_ context.Context, _ types.NodeName) ([]v1.No } if internalDNSFull, err := metadata.Get("instance/hostname"); err != nil { - glog.Warningf("couldn't get full internal DNS name: %v", err) + klog.Warningf("couldn't get full internal DNS name: %v", err) } else { addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: internalDNSFull}, @@ -109,7 +109,7 @@ func (gce *GCECloud) NodeAddresses(_ context.Context, _ types.NodeName) ([]v1.No // NodeAddressesByProviderID will not be called from the node that is requesting this ID. // i.e. metadata service and other local methods cannot be used here -func (gce *GCECloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) { +func (g *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -118,7 +118,7 @@ func (gce *GCECloud) NodeAddressesByProviderID(ctx context.Context, providerID s return []v1.NodeAddress{}, err } - instance, err := gce.c.Instances().Get(ctx, meta.ZonalKey(canonicalizeInstanceName(name), zone)) + instance, err := g.c.Instances().Get(ctx, meta.ZonalKey(canonicalizeInstanceName(name), zone)) if err != nil { return []v1.NodeAddress{}, fmt.Errorf("error while querying for providerID %q: %v", providerID, err) } @@ -138,13 +138,13 @@ func (gce *GCECloud) NodeAddressesByProviderID(ctx context.Context, providerID s // instanceByProviderID returns the cloudprovider instance of the node // with the specified unique providerID -func (gce *GCECloud) instanceByProviderID(providerID string) (*gceInstance, error) { +func (g *Cloud) instanceByProviderID(providerID string) (*gceInstance, error) { project, zone, name, err := splitProviderID(providerID) if err != nil { return nil, err } - instance, err := gce.getInstanceFromProjectInZoneByName(project, zone, name) + instance, err := g.getInstanceFromProjectInZoneByName(project, zone, name) if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { return nil, cloudprovider.InstanceNotFound @@ -156,7 +156,7 @@ func (gce *GCECloud) instanceByProviderID(providerID string) (*gceInstance, erro } // InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes -func (gce *GCECloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) { +func (g *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) { return false, cloudprovider.NotImplemented } @@ -164,8 +164,8 @@ func (gce *GCECloud) InstanceShutdownByProviderID(ctx context.Context, providerI // with the specified unique providerID This method will not be called from the // node that is requesting this ID. i.e. metadata service and other local // methods cannot be used here -func (gce *GCECloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) { - instance, err := gce.instanceByProviderID(providerID) +func (g *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) { + instance, err := g.instanceByProviderID(providerID) if err != nil { return "", err } @@ -175,8 +175,8 @@ func (gce *GCECloud) InstanceTypeByProviderID(ctx context.Context, providerID st // InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running. // If false is returned with no error, the instance will be immediately deleted by the cloud controller manager. -func (gce *GCECloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) { - _, err := gce.instanceByProviderID(providerID) +func (g *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) { + _, err := g.instanceByProviderID(providerID) if err != nil { if err == cloudprovider.InstanceNotFound { return false, nil @@ -188,51 +188,53 @@ func (gce *GCECloud) InstanceExistsByProviderID(ctx context.Context, providerID } // InstanceID returns the cloud provider ID of the node with the specified NodeName. -func (gce *GCECloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) { +func (g *Cloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) { instanceName := mapNodeNameToInstanceName(nodeName) - if gce.useMetadataServer { + if g.useMetadataServer { // Use metadata, if possible, to fetch ID. See issue #12000 - if gce.isCurrentInstance(instanceName) { + if g.isCurrentInstance(instanceName) { projectID, zone, err := getProjectAndZone() if err == nil { return projectID + "/" + zone + "/" + canonicalizeInstanceName(instanceName), nil } } } - instance, err := gce.getInstanceByName(instanceName) + instance, err := g.getInstanceByName(instanceName) if err != nil { return "", err } - return gce.projectID + "/" + instance.Zone + "/" + instance.Name, nil + return g.projectID + "/" + instance.Zone + "/" + instance.Name, nil } // InstanceType returns the type of the specified node with the specified NodeName. -func (gce *GCECloud) InstanceType(ctx context.Context, nodeName types.NodeName) (string, error) { +func (g *Cloud) InstanceType(ctx context.Context, nodeName types.NodeName) (string, error) { instanceName := mapNodeNameToInstanceName(nodeName) - if gce.useMetadataServer { + if g.useMetadataServer { // Use metadata, if possible, to fetch ID. See issue #12000 - if gce.isCurrentInstance(instanceName) { + if g.isCurrentInstance(instanceName) { mType, err := getCurrentMachineTypeViaMetadata() if err == nil { return mType, nil } } } - instance, err := gce.getInstanceByName(instanceName) + instance, err := g.getInstanceByName(instanceName) if err != nil { return "", err } return instance.Type, nil } -func (gce *GCECloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error { +// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances +// expected format for the key is standard ssh-keygen format: +func (g *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() return wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) { - project, err := gce.c.Projects().Get(ctx, gce.projectID) + project, err := g.c.Projects().Get(ctx, g.projectID) if err != nil { - glog.Errorf("Could not get project: %v", err) + klog.Errorf("Could not get project: %v", err) return false, nil } keyString := fmt.Sprintf("%s:%s %s@%s", user, strings.TrimSpace(string(keyData)), user, user) @@ -241,7 +243,7 @@ func (gce *GCECloud) AddSSHKeyToAllInstances(ctx context.Context, user string, k if item.Key == "sshKeys" { if strings.Contains(*item.Value, keyString) { // We've already added the key - glog.Info("SSHKey already in project metadata") + klog.Info("SSHKey already in project metadata") return true, nil } value := *item.Value + "\n" + keyString @@ -252,7 +254,7 @@ func (gce *GCECloud) AddSSHKeyToAllInstances(ctx context.Context, user string, k } if !found { // This is super unlikely, so log. - glog.Infof("Failed to find sshKeys metadata, creating a new item") + klog.Infof("Failed to find sshKeys metadata, creating a new item") project.CommonInstanceMetadata.Items = append(project.CommonInstanceMetadata.Items, &compute.MetadataItems{ Key: "sshKeys", @@ -261,31 +263,31 @@ func (gce *GCECloud) AddSSHKeyToAllInstances(ctx context.Context, user string, k } mc := newInstancesMetricContext("add_ssh_key", "") - err = gce.c.Projects().SetCommonInstanceMetadata(ctx, gce.projectID, project.CommonInstanceMetadata) + err = g.c.Projects().SetCommonInstanceMetadata(ctx, g.projectID, project.CommonInstanceMetadata) mc.Observe(err) if err != nil { - glog.Errorf("Could not Set Metadata: %v", err) + klog.Errorf("Could not Set Metadata: %v", err) return false, nil } - glog.Infof("Successfully added sshKey to project metadata") + klog.Infof("Successfully added sshKey to project metadata") return true, nil }) } // GetAllCurrentZones returns all the zones in which k8s nodes are currently running -func (gce *GCECloud) GetAllCurrentZones() (sets.String, error) { - if gce.nodeInformerSynced == nil { - glog.Warningf("GCECloud object does not have informers set, should only happen in E2E binary.") - return gce.GetAllZonesFromCloudProvider() - } - gce.nodeZonesLock.Lock() - defer gce.nodeZonesLock.Unlock() - if !gce.nodeInformerSynced() { +func (g *Cloud) GetAllCurrentZones() (sets.String, error) { + if g.nodeInformerSynced == nil { + klog.Warningf("Cloud object does not have informers set, should only happen in E2E binary.") + return g.GetAllZonesFromCloudProvider() + } + g.nodeZonesLock.Lock() + defer g.nodeZonesLock.Unlock() + if !g.nodeInformerSynced() { return nil, fmt.Errorf("node informer is not synced when trying to GetAllCurrentZones") } zones := sets.NewString() - for zone, nodes := range gce.nodeZones { + for zone, nodes := range g.nodeZones { if len(nodes) > 0 { zones.Insert(zone) } @@ -300,13 +302,13 @@ func (gce *GCECloud) GetAllCurrentZones() (sets.String, error) { // a non-k8s compute in us-central1-a. This func will return a,b, and c. // // TODO: this should be removed from the cloud provider. -func (gce *GCECloud) GetAllZonesFromCloudProvider() (sets.String, error) { +func (g *Cloud) GetAllZonesFromCloudProvider() (sets.String, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() zones := sets.NewString() - for _, zone := range gce.managedZones { - instances, err := gce.c.Instances().List(ctx, zone, filter.None) + for _, zone := range g.managedZones { + instances, err := g.c.Instances().List(ctx, zone, filter.None) if err != nil { return sets.NewString(), err } @@ -318,22 +320,22 @@ func (gce *GCECloud) GetAllZonesFromCloudProvider() (sets.String, error) { } // InsertInstance creates a new instance on GCP -func (gce *GCECloud) InsertInstance(project string, zone string, i *compute.Instance) error { +func (g *Cloud) InsertInstance(project string, zone string, i *compute.Instance) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newInstancesMetricContext("create", zone) - return mc.Observe(gce.c.Instances().Insert(ctx, meta.ZonalKey(i.Name, zone), i)) + return mc.Observe(g.c.Instances().Insert(ctx, meta.ZonalKey(i.Name, zone), i)) } // ListInstanceNames returns a string of instance names separated by spaces. // This method should only be used for e2e testing. // TODO: remove this method. -func (gce *GCECloud) ListInstanceNames(project, zone string) (string, error) { +func (g *Cloud) ListInstanceNames(project, zone string) (string, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - l, err := gce.c.Instances().List(ctx, zone, filter.None) + l, err := g.c.Instances().List(ctx, zone, filter.None) if err != nil { return "", err } @@ -345,33 +347,34 @@ func (gce *GCECloud) ListInstanceNames(project, zone string) (string, error) { } // DeleteInstance deletes an instance specified by project, zone, and name -func (gce *GCECloud) DeleteInstance(project, zone, name string) error { +func (g *Cloud) DeleteInstance(project, zone, name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - return gce.c.Instances().Delete(ctx, meta.ZonalKey(name, zone)) + return g.c.Instances().Delete(ctx, meta.ZonalKey(name, zone)) } -// Implementation of Instances.CurrentNodeName -func (gce *GCECloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) { +// CurrentNodeName returns the name of the node we are currently running on +// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname +func (g *Cloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) { return types.NodeName(hostname), nil } // AliasRanges returns a list of CIDR ranges that are assigned to the // `node` for allocation to pods. Returns a list of the form // "/". -func (gce *GCECloud) AliasRanges(nodeName types.NodeName) (cidrs []string, err error) { +func (g *Cloud) AliasRanges(nodeName types.NodeName) (cidrs []string, err error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() var instance *gceInstance - instance, err = gce.getInstanceByName(mapNodeNameToInstanceName(nodeName)) + instance, err = g.getInstanceByName(mapNodeNameToInstanceName(nodeName)) if err != nil { return } var res *computebeta.Instance - res, err = gce.c.BetaInstances().Get(ctx, meta.ZonalKey(instance.Name, lastComponent(instance.Zone))) + res, err = g.c.BetaInstances().Get(ctx, meta.ZonalKey(instance.Name, lastComponent(instance.Zone))) if err != nil { return } @@ -386,15 +389,15 @@ func (gce *GCECloud) AliasRanges(nodeName types.NodeName) (cidrs []string, err e // AddAliasToInstance adds an alias to the given instance from the named // secondary range. -func (gce *GCECloud) AddAliasToInstance(nodeName types.NodeName, alias *net.IPNet) error { +func (g *Cloud) AddAliasToInstance(nodeName types.NodeName, alias *net.IPNet) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - v1instance, err := gce.getInstanceByName(mapNodeNameToInstanceName(nodeName)) + v1instance, err := g.getInstanceByName(mapNodeNameToInstanceName(nodeName)) if err != nil { return err } - instance, err := gce.c.BetaInstances().Get(ctx, meta.ZonalKey(v1instance.Name, lastComponent(v1instance.Zone))) + instance, err := g.c.BetaInstances().Get(ctx, meta.ZonalKey(v1instance.Name, lastComponent(v1instance.Zone))) if err != nil { return err } @@ -404,7 +407,7 @@ func (gce *GCECloud) AddAliasToInstance(nodeName types.NodeName, alias *net.IPNe return fmt.Errorf("instance %q has no network interfaces", nodeName) case 1: default: - glog.Warningf("Instance %q has more than one network interface, using only the first (%v)", + klog.Warningf("Instance %q has more than one network interface, using only the first (%v)", nodeName, instance.NetworkInterfaces) } @@ -413,38 +416,38 @@ func (gce *GCECloud) AddAliasToInstance(nodeName types.NodeName, alias *net.IPNe iface.Fingerprint = instance.NetworkInterfaces[0].Fingerprint iface.AliasIpRanges = append(iface.AliasIpRanges, &computebeta.AliasIpRange{ IpCidrRange: alias.String(), - SubnetworkRangeName: gce.secondaryRangeName, + SubnetworkRangeName: g.secondaryRangeName, }) mc := newInstancesMetricContext("add_alias", v1instance.Zone) - err = gce.c.BetaInstances().UpdateNetworkInterface(ctx, meta.ZonalKey(instance.Name, lastComponent(instance.Zone)), iface.Name, iface) + err = g.c.BetaInstances().UpdateNetworkInterface(ctx, meta.ZonalKey(instance.Name, lastComponent(instance.Zone)), iface.Name, iface) return mc.Observe(err) } // Gets the named instances, returning cloudprovider.InstanceNotFound if any // instance is not found -func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error) { +func (g *Cloud) getInstancesByNames(names []string) ([]*gceInstance, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() found := map[string]*gceInstance{} remaining := len(names) - nodeInstancePrefix := gce.nodeInstancePrefix + nodeInstancePrefix := g.nodeInstancePrefix for _, name := range names { name = canonicalizeInstanceName(name) - if !strings.HasPrefix(name, gce.nodeInstancePrefix) { - glog.Warningf("Instance %q does not conform to prefix %q, removing filter", name, gce.nodeInstancePrefix) + if !strings.HasPrefix(name, g.nodeInstancePrefix) { + klog.Warningf("Instance %q does not conform to prefix %q, removing filter", name, g.nodeInstancePrefix) nodeInstancePrefix = "" } found[name] = nil } - for _, zone := range gce.managedZones { + for _, zone := range g.managedZones { if remaining == 0 { break } - instances, err := gce.c.Instances().List(ctx, zone, filter.Regexp("name", nodeInstancePrefix+".*")) + instances, err := g.c.Instances().List(ctx, zone, filter.Regexp("name", nodeInstancePrefix+".*")) if err != nil { return nil, err } @@ -456,7 +459,7 @@ func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error) continue } if found[inst.Name] != nil { - glog.Errorf("Instance name %q was duplicated (in zone %q and %q)", inst.Name, zone, found[inst.Name].Zone) + klog.Errorf("Instance name %q was duplicated (in zone %q and %q)", inst.Name, zone, found[inst.Name].Zone) continue } found[inst.Name] = &gceInstance{ @@ -477,7 +480,7 @@ func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error) failed = append(failed, k) } } - glog.Errorf("Failed to retrieve instances: %v", failed) + klog.Errorf("Failed to retrieve instances: %v", failed) return nil, cloudprovider.InstanceNotFound } @@ -490,15 +493,15 @@ func (gce *GCECloud) getInstancesByNames(names []string) ([]*gceInstance, error) } // Gets the named instance, returning cloudprovider.InstanceNotFound if the instance is not found -func (gce *GCECloud) getInstanceByName(name string) (*gceInstance, error) { +func (g *Cloud) getInstanceByName(name string) (*gceInstance, error) { // Avoid changing behaviour when not managing multiple zones - for _, zone := range gce.managedZones { - instance, err := gce.getInstanceFromProjectInZoneByName(gce.projectID, zone, name) + for _, zone := range g.managedZones { + instance, err := g.getInstanceFromProjectInZoneByName(g.projectID, zone, name) if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { continue } - glog.Errorf("getInstanceByName: failed to get instance %s in zone %s; err: %v", name, zone, err) + klog.Errorf("getInstanceByName: failed to get instance %s in zone %s; err: %v", name, zone, err) return nil, err } return instance, nil @@ -507,13 +510,13 @@ func (gce *GCECloud) getInstanceByName(name string) (*gceInstance, error) { return nil, cloudprovider.InstanceNotFound } -func (gce *GCECloud) getInstanceFromProjectInZoneByName(project, zone, name string) (*gceInstance, error) { +func (g *Cloud) getInstanceFromProjectInZoneByName(project, zone, name string) (*gceInstance, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() name = canonicalizeInstanceName(name) mc := newInstancesMetricContext("get", zone) - res, err := gce.c.Instances().Get(ctx, meta.ZonalKey(name, zone)) + res, err := g.c.Instances().Get(ctx, meta.ZonalKey(name, zone)) mc.Observe(err) if err != nil { return nil, err @@ -554,11 +557,11 @@ func getCurrentMachineTypeViaMetadata() (string, error) { // isCurrentInstance uses metadata server to check if specified // instanceID matches current machine's instanceID -func (gce *GCECloud) isCurrentInstance(instanceID string) bool { +func (g *Cloud) isCurrentInstance(instanceID string) bool { currentInstanceID, err := getInstanceIDViaMetadata() if err != nil { // Log and swallow error - glog.Errorf("Failed to fetch instanceID via Metadata: %v", err) + klog.Errorf("Failed to fetch instanceID via Metadata: %v", err) return false } @@ -571,16 +574,16 @@ func (gce *GCECloud) isCurrentInstance(instanceID string) bool { // Invoking this method to get host tags is risky since it depends on the // format of the host names in the cluster. Only use it as a fallback if // gce.nodeTags is unspecified -func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) { +func (g *Cloud) computeHostTags(hosts []*gceInstance) ([]string, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() // TODO: We could store the tags in gceInstance, so we could have already fetched it hostNamesByZone := make(map[string]map[string]bool) // map of zones -> map of names -> bool (for easy lookup) - nodeInstancePrefix := gce.nodeInstancePrefix + nodeInstancePrefix := g.nodeInstancePrefix for _, host := range hosts { - if !strings.HasPrefix(host.Name, gce.nodeInstancePrefix) { - glog.Warningf("instance %v does not conform to prefix '%s', ignoring filter", host, gce.nodeInstancePrefix) + if !strings.HasPrefix(host.Name, g.nodeInstancePrefix) { + klog.Warningf("instance %v does not conform to prefix '%s', ignoring filter", host, g.nodeInstancePrefix) nodeInstancePrefix = "" } @@ -599,7 +602,7 @@ func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) { filt = filter.Regexp("name", nodeInstancePrefix+".*") } for zone, hostNames := range hostNamesByZone { - instances, err := gce.c.Instances().List(ctx, zone, filt) + instances, err := g.c.Instances().List(ctx, zone, filt) if err != nil { return nil, err } @@ -607,14 +610,14 @@ func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) { if !hostNames[instance.Name] { continue } - longest_tag := "" + longestTag := "" for _, tag := range instance.Tags.Items { - if strings.HasPrefix(instance.Name, tag) && len(tag) > len(longest_tag) { - longest_tag = tag + if strings.HasPrefix(instance.Name, tag) && len(tag) > len(longestTag) { + longestTag = tag } } - if len(longest_tag) > 0 { - tags.Insert(longest_tag) + if len(longestTag) > 0 { + tags.Insert(longestTag) } else { return nil, fmt.Errorf("could not find any tag that is a prefix of instance name for instance %s", instance.Name) } @@ -629,35 +632,35 @@ func (gce *GCECloud) computeHostTags(hosts []*gceInstance) ([]string, error) { // GetNodeTags will first try returning the list of tags specified in GCE cloud Configuration. // If they weren't provided, it'll compute the host tags with the given hostnames. If the list // of hostnames has not changed, a cached set of nodetags are returned. -func (gce *GCECloud) GetNodeTags(nodeNames []string) ([]string, error) { +func (g *Cloud) GetNodeTags(nodeNames []string) ([]string, error) { // If nodeTags were specified through configuration, use them - if len(gce.nodeTags) > 0 { - return gce.nodeTags, nil + if len(g.nodeTags) > 0 { + return g.nodeTags, nil } - gce.computeNodeTagLock.Lock() - defer gce.computeNodeTagLock.Unlock() + g.computeNodeTagLock.Lock() + defer g.computeNodeTagLock.Unlock() // Early return if hosts have not changed hosts := sets.NewString(nodeNames...) - if hosts.Equal(gce.lastKnownNodeNames) { - return gce.lastComputedNodeTags, nil + if hosts.Equal(g.lastKnownNodeNames) { + return g.lastComputedNodeTags, nil } // Get GCE instance data by hostname - instances, err := gce.getInstancesByNames(nodeNames) + instances, err := g.getInstancesByNames(nodeNames) if err != nil { return nil, err } // Determine list of host tags - tags, err := gce.computeHostTags(instances) + tags, err := g.computeHostTags(instances) if err != nil { return nil, err } // Save the list of tags - gce.lastKnownNodeNames = hosts - gce.lastComputedNodeTags = tags + g.lastKnownNodeNames = hosts + g.lastComputedNodeTags = tags return tags, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go index 86eea9f87d4c..35a2c6952f4a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go @@ -24,10 +24,10 @@ import ( "sort" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" netsets "k8s.io/kubernetes/pkg/util/net/sets" ) @@ -41,10 +41,6 @@ var ( lbSrcRngsFlag cidrs ) -func newLoadBalancerMetricContext(request, region string) *metricContext { - return newGenericMetricContext("loadbalancer", request, region, unusedMetricLabel, computeV1Version) -} - func init() { var err error // LB L7 proxies and all L3/4/7 health checkers have client addresses within these known CIDRs. @@ -91,9 +87,9 @@ func LoadBalancerSrcRanges() []string { } // GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer -func (gce *GCECloud) GetLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service) (*v1.LoadBalancerStatus, bool, error) { - loadBalancerName := gce.GetLoadBalancerName(ctx, clusterName, svc) - fwd, err := gce.GetRegionForwardingRule(loadBalancerName, gce.region) +func (g *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service) (*v1.LoadBalancerStatus, bool, error) { + loadBalancerName := g.GetLoadBalancerName(ctx, clusterName, svc) + fwd, err := g.GetRegionForwardingRule(loadBalancerName, g.region) if err == nil { status := &v1.LoadBalancerStatus{} status.Ingress = []v1.LoadBalancerIngress{{IP: fwd.IPAddress}} @@ -104,23 +100,23 @@ func (gce *GCECloud) GetLoadBalancer(ctx context.Context, clusterName string, sv } // GetLoadBalancerName is an implementation of LoadBalancer.GetLoadBalancerName. -func (gce *GCECloud) GetLoadBalancerName(ctx context.Context, clusterName string, svc *v1.Service) string { +func (g *Cloud) GetLoadBalancerName(ctx context.Context, clusterName string, svc *v1.Service) string { // TODO: replace DefaultLoadBalancerName to generate more meaningful loadbalancer names. return cloudprovider.DefaultLoadBalancerName(svc) } // EnsureLoadBalancer is an implementation of LoadBalancer.EnsureLoadBalancer. -func (gce *GCECloud) EnsureLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { - loadBalancerName := gce.GetLoadBalancerName(ctx, clusterName, svc) +func (g *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { + loadBalancerName := g.GetLoadBalancerName(ctx, clusterName, svc) desiredScheme := getSvcScheme(svc) - clusterID, err := gce.ClusterID.GetID() + clusterID, err := g.ClusterID.GetID() if err != nil { return nil, err } - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): ensure %v loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, desiredScheme) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): ensure %v loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, desiredScheme) - existingFwdRule, err := gce.GetRegionForwardingRule(loadBalancerName, gce.region) + existingFwdRule, err := g.GetRegionForwardingRule(loadBalancerName, g.region) if err != nil && !isNotFound(err) { return nil, err } @@ -130,14 +126,14 @@ func (gce *GCECloud) EnsureLoadBalancer(ctx context.Context, clusterName string, // If the loadbalancer type changes between INTERNAL and EXTERNAL, the old load balancer should be deleted. if existingScheme != desiredScheme { - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): deleting existing %v loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, existingScheme) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): deleting existing %v loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, existingScheme) switch existingScheme { case cloud.SchemeInternal: - err = gce.ensureInternalLoadBalancerDeleted(clusterName, clusterID, svc) + err = g.ensureInternalLoadBalancerDeleted(clusterName, clusterID, svc) default: - err = gce.ensureExternalLoadBalancerDeleted(clusterName, clusterID, svc) + err = g.ensureExternalLoadBalancerDeleted(clusterName, clusterID, svc) } - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done deleting existing %v loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, existingScheme, err) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done deleting existing %v loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, existingScheme, err) if err != nil { return nil, err } @@ -150,53 +146,53 @@ func (gce *GCECloud) EnsureLoadBalancer(ctx context.Context, clusterName string, var status *v1.LoadBalancerStatus switch desiredScheme { case cloud.SchemeInternal: - status, err = gce.ensureInternalLoadBalancer(clusterName, clusterID, svc, existingFwdRule, nodes) + status, err = g.ensureInternalLoadBalancer(clusterName, clusterID, svc, existingFwdRule, nodes) default: - status, err = gce.ensureExternalLoadBalancer(clusterName, clusterID, svc, existingFwdRule, nodes) + status, err = g.ensureExternalLoadBalancer(clusterName, clusterID, svc, existingFwdRule, nodes) } - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done ensuring loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, err) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done ensuring loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, err) return status, err } // UpdateLoadBalancer is an implementation of LoadBalancer.UpdateLoadBalancer. -func (gce *GCECloud) UpdateLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service, nodes []*v1.Node) error { - loadBalancerName := gce.GetLoadBalancerName(ctx, clusterName, svc) +func (g *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service, nodes []*v1.Node) error { + loadBalancerName := g.GetLoadBalancerName(ctx, clusterName, svc) scheme := getSvcScheme(svc) - clusterID, err := gce.ClusterID.GetID() + clusterID, err := g.ClusterID.GetID() if err != nil { return err } - glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v, %v): updating with %d nodes", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, len(nodes)) + klog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v, %v): updating with %d nodes", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, len(nodes)) switch scheme { case cloud.SchemeInternal: - err = gce.updateInternalLoadBalancer(clusterName, clusterID, svc, nodes) + err = g.updateInternalLoadBalancer(clusterName, clusterID, svc, nodes) default: - err = gce.updateExternalLoadBalancer(clusterName, svc, nodes) + err = g.updateExternalLoadBalancer(clusterName, svc, nodes) } - glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v, %v): done updating. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, err) + klog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v, %v): done updating. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, err) return err } // EnsureLoadBalancerDeleted is an implementation of LoadBalancer.EnsureLoadBalancerDeleted. -func (gce *GCECloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, svc *v1.Service) error { - loadBalancerName := gce.GetLoadBalancerName(ctx, clusterName, svc) +func (g *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, svc *v1.Service) error { + loadBalancerName := g.GetLoadBalancerName(ctx, clusterName, svc) scheme := getSvcScheme(svc) - clusterID, err := gce.ClusterID.GetID() + clusterID, err := g.ClusterID.GetID() if err != nil { return err } - glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v): deleting loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region) + klog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v): deleting loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region) switch scheme { case cloud.SchemeInternal: - err = gce.ensureInternalLoadBalancerDeleted(clusterName, clusterID, svc) + err = g.ensureInternalLoadBalancerDeleted(clusterName, clusterID, svc) default: - err = gce.ensureExternalLoadBalancerDeleted(clusterName, clusterID, svc) + err = g.ensureExternalLoadBalancerDeleted(clusterName, clusterID, svc) } - glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v): done deleting loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, gce.region, err) + klog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v): done deleting loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, err) return err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go index 9687393b4533..6b92e71ef77f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go @@ -31,9 +31,9 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" netsets "k8s.io/kubernetes/pkg/util/net/sets" - "github.com/golang/glog" computealpha "google.golang.org/api/compute/v0.alpha" compute "google.golang.org/api/compute/v1" + "k8s.io/klog" ) // ensureExternalLoadBalancer is the external implementation of LoadBalancer.EnsureLoadBalancer. @@ -44,19 +44,19 @@ import ( // Due to an interesting series of design decisions, this handles both creating // new load balancers and updating existing load balancers, recognizing when // each is needed. -func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID string, apiService *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { +func (g *Cloud) ensureExternalLoadBalancer(clusterName string, clusterID string, apiService *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { if len(nodes) == 0 { return nil, fmt.Errorf("Cannot EnsureLoadBalancer() with no hosts") } hostNames := nodeNames(nodes) supportsNodesHealthCheck := supportsNodesHealthCheck(nodes) - hosts, err := gce.getInstancesByNames(hostNames) + hosts, err := g.getInstancesByNames(hostNames) if err != nil { return nil, err } - loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, apiService) + loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, apiService) requestedIP := apiService.Spec.LoadBalancerIP ports := apiService.Spec.Ports portStr := []string{} @@ -66,27 +66,27 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name} lbRefStr := fmt.Sprintf("%v(%v)", loadBalancerName, serviceName) - glog.V(2).Infof("ensureExternalLoadBalancer(%s, %v, %v, %v, %v, %v)", lbRefStr, gce.region, requestedIP, portStr, hostNames, apiService.Annotations) + klog.V(2).Infof("ensureExternalLoadBalancer(%s, %v, %v, %v, %v, %v)", lbRefStr, g.region, requestedIP, portStr, hostNames, apiService.Annotations) // Check the current and the desired network tiers. If they do not match, // tear down the existing resources with the wrong tier. - netTier, err := gce.getServiceNetworkTier(apiService) + netTier, err := g.getServiceNetworkTier(apiService) if err != nil { - glog.Errorf("ensureExternalLoadBalancer(%s): Failed to get the desired network tier: %v.", lbRefStr, err) + klog.Errorf("ensureExternalLoadBalancer(%s): Failed to get the desired network tier: %v.", lbRefStr, err) return nil, err } - glog.V(4).Infof("ensureExternalLoadBalancer(%s): Desired network tier %q.", lbRefStr, netTier) - if gce.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { - gce.deleteWrongNetworkTieredResources(loadBalancerName, lbRefStr, netTier) + klog.V(4).Infof("ensureExternalLoadBalancer(%s): Desired network tier %q.", lbRefStr, netTier) + if g.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { + g.deleteWrongNetworkTieredResources(loadBalancerName, lbRefStr, netTier) } // Check if the forwarding rule exists, and if so, what its IP is. - fwdRuleExists, fwdRuleNeedsUpdate, fwdRuleIP, err := gce.forwardingRuleNeedsUpdate(loadBalancerName, gce.region, requestedIP, ports) + fwdRuleExists, fwdRuleNeedsUpdate, fwdRuleIP, err := g.forwardingRuleNeedsUpdate(loadBalancerName, g.region, requestedIP, ports) if err != nil { return nil, err } if !fwdRuleExists { - glog.V(2).Infof("ensureExternalLoadBalancer(%s): Forwarding rule %v doesn't exist.", lbRefStr, loadBalancerName) + klog.V(2).Infof("ensureExternalLoadBalancer(%s): Forwarding rule %v doesn't exist.", lbRefStr, loadBalancerName) } // Make sure we know which IP address will be used and have properly reserved @@ -120,22 +120,22 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st return } if isSafeToReleaseIP { - if err := gce.DeleteRegionAddress(loadBalancerName, gce.region); err != nil && !isNotFound(err) { - glog.Errorf("ensureExternalLoadBalancer(%s): Failed to release static IP %s in region %v: %v.", lbRefStr, ipAddressToUse, gce.region, err) + if err := g.DeleteRegionAddress(loadBalancerName, g.region); err != nil && !isNotFound(err) { + klog.Errorf("ensureExternalLoadBalancer(%s): Failed to release static IP %s in region %v: %v.", lbRefStr, ipAddressToUse, g.region, err) } else if isNotFound(err) { - glog.V(2).Infof("ensureExternalLoadBalancer(%s): IP address %s is not reserved.", lbRefStr, ipAddressToUse) + klog.V(2).Infof("ensureExternalLoadBalancer(%s): IP address %s is not reserved.", lbRefStr, ipAddressToUse) } else { - glog.Infof("ensureExternalLoadBalancer(%s): Released static IP %s.", lbRefStr, ipAddressToUse) + klog.Infof("ensureExternalLoadBalancer(%s): Released static IP %s.", lbRefStr, ipAddressToUse) } } else { - glog.Warningf("ensureExternalLoadBalancer(%s): Orphaning static IP %s in region %v: %v.", lbRefStr, ipAddressToUse, gce.region, err) + klog.Warningf("ensureExternalLoadBalancer(%s): Orphaning static IP %s in region %v: %v.", lbRefStr, ipAddressToUse, g.region, err) } }() if requestedIP != "" { // If user requests a specific IP address, verify first. No mutation to // the GCE resources will be performed in the verification process. - isUserOwnedIP, err = verifyUserRequestedIP(gce, gce.region, requestedIP, fwdRuleIP, lbRefStr, netTier) + isUserOwnedIP, err = verifyUserRequestedIP(g, g.region, requestedIP, fwdRuleIP, lbRefStr, netTier) if err != nil { return nil, err } @@ -145,11 +145,11 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st if !isUserOwnedIP { // If we are not using the user-owned IP, either promote the // emphemeral IP used by the fwd rule, or create a new static IP. - ipAddr, existed, err := ensureStaticIP(gce, loadBalancerName, serviceName.String(), gce.region, fwdRuleIP, netTier) + ipAddr, existed, err := ensureStaticIP(g, loadBalancerName, serviceName.String(), g.region, fwdRuleIP, netTier) if err != nil { return nil, fmt.Errorf("failed to ensure a static IP for load balancer (%s): %v", lbRefStr, err) } - glog.Infof("ensureExternalLoadBalancer(%s): Ensured IP address %s (tier: %s).", lbRefStr, ipAddr, netTier) + klog.Infof("ensureExternalLoadBalancer(%s): Ensured IP address %s (tier: %s).", lbRefStr, ipAddr, netTier) // If the IP was not owned by the user, but it already existed, it // could indicate that the previous update cycle failed. We can use // this IP and try to run through the process again, but we should @@ -167,7 +167,7 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st return nil, err } - firewallExists, firewallNeedsUpdate, err := gce.firewallNeedsUpdate(loadBalancerName, serviceName.String(), gce.region, ipAddressToUse, ports, sourceRanges) + firewallExists, firewallNeedsUpdate, err := g.firewallNeedsUpdate(loadBalancerName, serviceName.String(), g.region, ipAddressToUse, ports, sourceRanges) if err != nil { return nil, err } @@ -177,60 +177,60 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st // Unlike forwarding rules and target pools, firewalls can be updated // without needing to be deleted and recreated. if firewallExists { - glog.Infof("ensureExternalLoadBalancer(%s): Updating firewall.", lbRefStr) - if err := gce.updateFirewall(apiService, MakeFirewallName(loadBalancerName), gce.region, desc, sourceRanges, ports, hosts); err != nil { + klog.Infof("ensureExternalLoadBalancer(%s): Updating firewall.", lbRefStr) + if err := g.updateFirewall(apiService, MakeFirewallName(loadBalancerName), g.region, desc, sourceRanges, ports, hosts); err != nil { return nil, err } - glog.Infof("ensureExternalLoadBalancer(%s): Updated firewall.", lbRefStr) + klog.Infof("ensureExternalLoadBalancer(%s): Updated firewall.", lbRefStr) } else { - glog.Infof("ensureExternalLoadBalancer(%s): Creating firewall.", lbRefStr) - if err := gce.createFirewall(apiService, MakeFirewallName(loadBalancerName), gce.region, desc, sourceRanges, ports, hosts); err != nil { + klog.Infof("ensureExternalLoadBalancer(%s): Creating firewall.", lbRefStr) + if err := g.createFirewall(apiService, MakeFirewallName(loadBalancerName), g.region, desc, sourceRanges, ports, hosts); err != nil { return nil, err } - glog.Infof("ensureExternalLoadBalancer(%s): Created firewall.", lbRefStr) + klog.Infof("ensureExternalLoadBalancer(%s): Created firewall.", lbRefStr) } } - tpExists, tpNeedsRecreation, err := gce.targetPoolNeedsRecreation(loadBalancerName, gce.region, apiService.Spec.SessionAffinity) + tpExists, tpNeedsRecreation, err := g.targetPoolNeedsRecreation(loadBalancerName, g.region, apiService.Spec.SessionAffinity) if err != nil { return nil, err } if !tpExists { - glog.Infof("ensureExternalLoadBalancer(%s): Target pool for service doesn't exist.", lbRefStr) + klog.Infof("ensureExternalLoadBalancer(%s): Target pool for service doesn't exist.", lbRefStr) } // Check which health check needs to create and which health check needs to delete. // Health check management is coupled with target pool operation to prevent leaking. var hcToCreate, hcToDelete *compute.HttpHealthCheck - hcLocalTrafficExisting, err := gce.GetHttpHealthCheck(loadBalancerName) + hcLocalTrafficExisting, err := g.GetHTTPHealthCheck(loadBalancerName) if err != nil && !isHTTPErrorCode(err, http.StatusNotFound) { return nil, fmt.Errorf("error checking HTTP health check for load balancer (%s): %v", lbRefStr, err) } if path, healthCheckNodePort := apiservice.GetServiceHealthCheckPathPort(apiService); path != "" { - glog.V(4).Infof("ensureExternalLoadBalancer(%s): Service needs local traffic health checks on: %d%s.", lbRefStr, healthCheckNodePort, path) + klog.V(4).Infof("ensureExternalLoadBalancer(%s): Service needs local traffic health checks on: %d%s.", lbRefStr, healthCheckNodePort, path) if hcLocalTrafficExisting == nil { // This logic exists to detect a transition for non-OnlyLocal to OnlyLocal service // turn on the tpNeedsRecreation flag to delete/recreate fwdrule/tpool updating the // target pool to use local traffic health check. - glog.V(2).Infof("ensureExternalLoadBalancer(%s): Updating from nodes health checks to local traffic health checks.", lbRefStr) + klog.V(2).Infof("ensureExternalLoadBalancer(%s): Updating from nodes health checks to local traffic health checks.", lbRefStr) if supportsNodesHealthCheck { - hcToDelete = makeHttpHealthCheck(MakeNodesHealthCheckName(clusterID), GetNodesHealthCheckPath(), GetNodesHealthCheckPort()) + hcToDelete = makeHTTPHealthCheck(MakeNodesHealthCheckName(clusterID), GetNodesHealthCheckPath(), GetNodesHealthCheckPort()) } tpNeedsRecreation = true } - hcToCreate = makeHttpHealthCheck(loadBalancerName, path, healthCheckNodePort) + hcToCreate = makeHTTPHealthCheck(loadBalancerName, path, healthCheckNodePort) } else { - glog.V(4).Infof("ensureExternalLoadBalancer(%s): Service needs nodes health checks.", lbRefStr) + klog.V(4).Infof("ensureExternalLoadBalancer(%s): Service needs nodes health checks.", lbRefStr) if hcLocalTrafficExisting != nil { // This logic exists to detect a transition from OnlyLocal to non-OnlyLocal service // and turn on the tpNeedsRecreation flag to delete/recreate fwdrule/tpool updating the // target pool to use nodes health check. - glog.V(2).Infof("ensureExternalLoadBalancer(%s): Updating from local traffic health checks to nodes health checks.", lbRefStr) + klog.V(2).Infof("ensureExternalLoadBalancer(%s): Updating from local traffic health checks to nodes health checks.", lbRefStr) hcToDelete = hcLocalTrafficExisting tpNeedsRecreation = true } if supportsNodesHealthCheck { - hcToCreate = makeHttpHealthCheck(MakeNodesHealthCheckName(clusterID), GetNodesHealthCheckPath(), GetNodesHealthCheckPort()) + hcToCreate = makeHTTPHealthCheck(MakeNodesHealthCheckName(clusterID), GetNodesHealthCheckPath(), GetNodesHealthCheckPort()) } } // Now we get to some slightly more interesting logic. @@ -245,19 +245,19 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st // and something should fail before we recreate it, don't release the // IP. That way we can come back to it later. isSafeToReleaseIP = false - if err := gce.DeleteRegionForwardingRule(loadBalancerName, gce.region); err != nil && !isNotFound(err) { + if err := g.DeleteRegionForwardingRule(loadBalancerName, g.region); err != nil && !isNotFound(err) { return nil, fmt.Errorf("failed to delete existing forwarding rule for load balancer (%s) update: %v", lbRefStr, err) } - glog.Infof("ensureExternalLoadBalancer(%s): Deleted forwarding rule.", lbRefStr) + klog.Infof("ensureExternalLoadBalancer(%s): Deleted forwarding rule.", lbRefStr) } - if err := gce.ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation, apiService, loadBalancerName, clusterID, ipAddressToUse, hosts, hcToCreate, hcToDelete); err != nil { + if err := g.ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation, apiService, loadBalancerName, clusterID, ipAddressToUse, hosts, hcToCreate, hcToDelete); err != nil { return nil, err } if tpNeedsRecreation || fwdRuleNeedsUpdate { - glog.Infof("ensureExternalLoadBalancer(%s): Creating forwarding rule, IP %s (tier: %s).", lbRefStr, ipAddressToUse, netTier) - if err := createForwardingRule(gce, loadBalancerName, serviceName.String(), gce.region, ipAddressToUse, gce.targetPoolURL(loadBalancerName), ports, netTier); err != nil { + klog.Infof("ensureExternalLoadBalancer(%s): Creating forwarding rule, IP %s (tier: %s).", lbRefStr, ipAddressToUse, netTier) + if err := createForwardingRule(g, loadBalancerName, serviceName.String(), g.region, ipAddressToUse, g.targetPoolURL(loadBalancerName), ports, netTier); err != nil { return nil, fmt.Errorf("failed to create forwarding rule for load balancer (%s): %v", lbRefStr, err) } // End critical section. It is safe to release the static IP (which @@ -265,7 +265,7 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st // of a user-requested IP, the "is user-owned" flag will be set, // preventing it from actually being released. isSafeToReleaseIP = true - glog.Infof("ensureExternalLoadBalancer(%s): Created forwarding rule, IP %s.", lbRefStr, ipAddressToUse) + klog.Infof("ensureExternalLoadBalancer(%s): Created forwarding rule, IP %s.", lbRefStr, ipAddressToUse) } status := &v1.LoadBalancerStatus{} @@ -275,27 +275,27 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID st } // updateExternalLoadBalancer is the external implementation of LoadBalancer.UpdateLoadBalancer. -func (gce *GCECloud) updateExternalLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error { - hosts, err := gce.getInstancesByNames(nodeNames(nodes)) +func (g *Cloud) updateExternalLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error { + hosts, err := g.getInstancesByNames(nodeNames(nodes)) if err != nil { return err } - loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, service) - return gce.updateTargetPool(loadBalancerName, hosts) + loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, service) + return g.updateTargetPool(loadBalancerName, hosts) } // ensureExternalLoadBalancerDeleted is the external implementation of LoadBalancer.EnsureLoadBalancerDeleted -func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID string, service *v1.Service) error { - loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, service) +func (g *Cloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID string, service *v1.Service) error { + loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, service) serviceName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} lbRefStr := fmt.Sprintf("%v(%v)", loadBalancerName, serviceName) var hcNames []string if path, _ := apiservice.GetServiceHealthCheckPathPort(service); path != "" { - hcToDelete, err := gce.GetHttpHealthCheck(loadBalancerName) + hcToDelete, err := g.GetHTTPHealthCheck(loadBalancerName) if err != nil && !isHTTPErrorCode(err, http.StatusNotFound) { - glog.Infof("ensureExternalLoadBalancerDeleted(%s): Failed to retrieve health check:%v.", lbRefStr, err) + klog.Infof("ensureExternalLoadBalancerDeleted(%s): Failed to retrieve health check:%v.", lbRefStr, err) return err } // If we got 'StatusNotFound' LB was already deleted and it's safe to ignore. @@ -313,12 +313,12 @@ func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID st errs := utilerrors.AggregateGoroutines( func() error { - glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting firewall rule.", lbRefStr) + klog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting firewall rule.", lbRefStr) fwName := MakeFirewallName(loadBalancerName) - err := ignoreNotFound(gce.DeleteFirewall(fwName)) - if isForbidden(err) && gce.OnXPN() { - glog.V(4).Infof("ensureExternalLoadBalancerDeleted(%s): Do not have permission to delete firewall rule %v (on XPN). Raising event.", lbRefStr, fwName) - gce.raiseFirewallChangeNeededEvent(service, FirewallToGCloudDeleteCmd(fwName, gce.NetworkProjectID())) + err := ignoreNotFound(g.DeleteFirewall(fwName)) + if isForbidden(err) && g.OnXPN() { + klog.V(4).Infof("ensureExternalLoadBalancerDeleted(%s): Do not have permission to delete firewall rule %v (on XPN). Raising event.", lbRefStr, fwName) + g.raiseFirewallChangeNeededEvent(service, FirewallToGCloudDeleteCmd(fwName, g.NetworkProjectID())) return nil } return err @@ -327,18 +327,18 @@ func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID st // possible that EnsureLoadBalancer left one around in a failed // creation/update attempt, so make sure we clean it up here just in case. func() error { - glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting IP address.", lbRefStr) - return ignoreNotFound(gce.DeleteRegionAddress(loadBalancerName, gce.region)) + klog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting IP address.", lbRefStr) + return ignoreNotFound(g.DeleteRegionAddress(loadBalancerName, g.region)) }, func() error { - glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting forwarding rule.", lbRefStr) + klog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting forwarding rule.", lbRefStr) // The forwarding rule must be deleted before either the target pool can, // unfortunately, so we have to do these two serially. - if err := ignoreNotFound(gce.DeleteRegionForwardingRule(loadBalancerName, gce.region)); err != nil { + if err := ignoreNotFound(g.DeleteRegionForwardingRule(loadBalancerName, g.region)); err != nil { return err } - glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting target pool.", lbRefStr) - if err := gce.DeleteExternalTargetPoolAndChecks(service, loadBalancerName, gce.region, clusterID, hcNames...); err != nil { + klog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting target pool.", lbRefStr) + if err := g.DeleteExternalTargetPoolAndChecks(service, loadBalancerName, g.region, clusterID, hcNames...); err != nil { return err } return nil @@ -350,14 +350,15 @@ func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID st return nil } -func (gce *GCECloud) DeleteExternalTargetPoolAndChecks(service *v1.Service, name, region, clusterID string, hcNames ...string) error { +// DeleteExternalTargetPoolAndChecks Deletes an external load balancer pool and verifies the operation +func (g *Cloud) DeleteExternalTargetPoolAndChecks(service *v1.Service, name, region, clusterID string, hcNames ...string) error { serviceName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} lbRefStr := fmt.Sprintf("%v(%v)", name, serviceName) - if err := gce.DeleteTargetPool(name, region); err != nil && isHTTPErrorCode(err, http.StatusNotFound) { - glog.Infof("DeleteExternalTargetPoolAndChecks(%v): Target pool already deleted. Continuing to delete other resources.", lbRefStr) + if err := g.DeleteTargetPool(name, region); err != nil && isHTTPErrorCode(err, http.StatusNotFound) { + klog.Infof("DeleteExternalTargetPoolAndChecks(%v): Target pool already deleted. Continuing to delete other resources.", lbRefStr) } else if err != nil { - glog.Warningf("DeleteExternalTargetPoolAndChecks(%v): Failed to delete target pool, got error %s.", lbRefStr, err.Error()) + klog.Warningf("DeleteExternalTargetPoolAndChecks(%v): Failed to delete target pool, got error %s.", lbRefStr, err.Error()) return err } @@ -369,17 +370,17 @@ func (gce *GCECloud) DeleteExternalTargetPoolAndChecks(service *v1.Service, name if isNodesHealthCheck { // Lock to prevent deleting necessary nodes health check before it gets attached // to target pool. - gce.sharedResourceLock.Lock() - defer gce.sharedResourceLock.Unlock() + g.sharedResourceLock.Lock() + defer g.sharedResourceLock.Unlock() } - glog.Infof("DeleteExternalTargetPoolAndChecks(%v): Deleting health check %v.", lbRefStr, hcName) - if err := gce.DeleteHttpHealthCheck(hcName); err != nil { + klog.Infof("DeleteExternalTargetPoolAndChecks(%v): Deleting health check %v.", lbRefStr, hcName) + if err := g.DeleteHTTPHealthCheck(hcName); err != nil { // Delete nodes health checks will fail if any other target pool is using it. if isInUsedByError(err) { - glog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Health check %v is in used: %v.", lbRefStr, hcName, err) + klog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Health check %v is in used: %v.", lbRefStr, hcName, err) return nil } else if !isHTTPErrorCode(err, http.StatusNotFound) { - glog.Warningf("DeleteExternalTargetPoolAndChecks(%v): Failed to delete health check %v: %v.", lbRefStr, hcName, err) + klog.Warningf("DeleteExternalTargetPoolAndChecks(%v): Failed to delete health check %v: %v.", lbRefStr, hcName, err) return err } // StatusNotFound could happen when: @@ -389,16 +390,16 @@ func (gce *GCECloud) DeleteExternalTargetPoolAndChecks(service *v1.Service, name // - This is a retry and in previous round we failed to delete the healthcheck firewall // after deleted the healthcheck. // We continue to delete the healthcheck firewall to prevent leaking. - glog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Health check %v is already deleted.", lbRefStr, hcName) + klog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Health check %v is already deleted.", lbRefStr, hcName) } // If health check is deleted without error, it means no load-balancer is using it. // So we should delete the health check firewall as well. fwName := MakeHealthCheckFirewallName(clusterID, hcName, isNodesHealthCheck) - glog.Infof("DeleteExternalTargetPoolAndChecks(%v): Deleting health check firewall %v.", lbRefStr, fwName) - if err := ignoreNotFound(gce.DeleteFirewall(fwName)); err != nil { - if isForbidden(err) && gce.OnXPN() { - glog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Do not have permission to delete firewall rule %v (on XPN). Raising event.", lbRefStr, fwName) - gce.raiseFirewallChangeNeededEvent(service, FirewallToGCloudDeleteCmd(fwName, gce.NetworkProjectID())) + klog.Infof("DeleteExternalTargetPoolAndChecks(%v): Deleting health check firewall %v.", lbRefStr, fwName) + if err := ignoreNotFound(g.DeleteFirewall(fwName)); err != nil { + if isForbidden(err) && g.OnXPN() { + klog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Do not have permission to delete firewall rule %v (on XPN). Raising event.", lbRefStr, fwName) + g.raiseFirewallChangeNeededEvent(service, FirewallToGCloudDeleteCmd(fwName, g.NetworkProjectID())) return nil } return err @@ -428,7 +429,7 @@ func verifyUserRequestedIP(s CloudAddressService, region, requestedIP, fwdRuleIP // case we shouldn't delete it anyway). existingAddress, err := s.GetRegionAddressByIP(region, requestedIP) if err != nil && !isNotFound(err) { - glog.Errorf("verifyUserRequestedIP: failed to check whether the requested IP %q for LB %s exists: %v", requestedIP, lbRef, err) + klog.Errorf("verifyUserRequestedIP: failed to check whether the requested IP %q for LB %s exists: %v", requestedIP, lbRef, err) return false, err } if err == nil { @@ -442,27 +443,27 @@ func verifyUserRequestedIP(s CloudAddressService, region, requestedIP, fwdRuleIP } netTier := cloud.NetworkTierGCEValueToType(netTierStr) if netTier != desiredNetTier { - glog.Errorf("verifyUserRequestedIP: requested static IP %q (name: %s) for LB %s has network tier %s, need %s.", requestedIP, existingAddress.Name, lbRef, netTier, desiredNetTier) + klog.Errorf("verifyUserRequestedIP: requested static IP %q (name: %s) for LB %s has network tier %s, need %s.", requestedIP, existingAddress.Name, lbRef, netTier, desiredNetTier) return false, fmt.Errorf("requrested IP %q belongs to the %s network tier; expected %s", requestedIP, netTier, desiredNetTier) } - glog.V(4).Infof("verifyUserRequestedIP: the requested static IP %q (name: %s, tier: %s) for LB %s exists.", requestedIP, existingAddress.Name, netTier, lbRef) + klog.V(4).Infof("verifyUserRequestedIP: the requested static IP %q (name: %s, tier: %s) for LB %s exists.", requestedIP, existingAddress.Name, netTier, lbRef) return true, nil } if requestedIP == fwdRuleIP { // The requested IP is not a static IP, but is currently assigned // to this forwarding rule, so we can just use it. - glog.V(4).Infof("verifyUserRequestedIP: the requested IP %q is not static, but is currently in use by for LB %s", requestedIP, lbRef) + klog.V(4).Infof("verifyUserRequestedIP: the requested IP %q is not static, but is currently in use by for LB %s", requestedIP, lbRef) return false, nil } // The requested IP is not static and it is not assigned to the // current forwarding rule. It might be attached to a different // rule or it might not be part of this project at all. Either // way, we can't use it. - glog.Errorf("verifyUserRequestedIP: requested IP %q for LB %s is neither static nor assigned to the LB", requestedIP, lbRef) + klog.Errorf("verifyUserRequestedIP: requested IP %q for LB %s is neither static nor assigned to the LB", requestedIP, lbRef) return false, fmt.Errorf("requested ip %q is neither static nor assigned to the LB", requestedIP) } -func (gce *GCECloud) ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation bool, svc *v1.Service, loadBalancerName, clusterID, ipAddressToUse string, hosts []*gceInstance, hcToCreate, hcToDelete *compute.HttpHealthCheck) error { +func (g *Cloud) ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation bool, svc *v1.Service, loadBalancerName, clusterID, ipAddressToUse string, hosts []*gceInstance, hcToCreate, hcToDelete *compute.HttpHealthCheck) error { serviceName := types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name} lbRefStr := fmt.Sprintf("%v(%v)", loadBalancerName, serviceName) @@ -472,10 +473,10 @@ func (gce *GCECloud) ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation if hcToDelete != nil { hcNames = append(hcNames, hcToDelete.Name) } - if err := gce.DeleteExternalTargetPoolAndChecks(svc, loadBalancerName, gce.region, clusterID, hcNames...); err != nil { + if err := g.DeleteExternalTargetPoolAndChecks(svc, loadBalancerName, g.region, clusterID, hcNames...); err != nil { return fmt.Errorf("failed to delete existing target pool for load balancer (%s) update: %v", lbRefStr, err) } - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Deleted target pool.", lbRefStr) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Deleted target pool.", lbRefStr) } // Once we've deleted the resources (if necessary), build them back up (or for // the first time if they're new). @@ -484,32 +485,40 @@ func (gce *GCECloud) ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation if len(hosts) > maxTargetPoolCreateInstances { createInstances = createInstances[:maxTargetPoolCreateInstances] } - if err := gce.createTargetPoolAndHealthCheck(svc, loadBalancerName, serviceName.String(), ipAddressToUse, gce.region, clusterID, createInstances, hcToCreate); err != nil { + if err := g.createTargetPoolAndHealthCheck(svc, loadBalancerName, serviceName.String(), ipAddressToUse, g.region, clusterID, createInstances, hcToCreate); err != nil { return fmt.Errorf("failed to create target pool for load balancer (%s): %v", lbRefStr, err) } if hcToCreate != nil { - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Created health checks %v.", lbRefStr, hcToCreate.Name) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Created health checks %v.", lbRefStr, hcToCreate.Name) } if len(hosts) <= maxTargetPoolCreateInstances { - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Created target pool.", lbRefStr) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Created target pool.", lbRefStr) } else { - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Created initial target pool (now updating the remaining %d hosts).", lbRefStr, len(hosts)-maxTargetPoolCreateInstances) - if err := gce.updateTargetPool(loadBalancerName, hosts); err != nil { + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Created initial target pool (now updating the remaining %d hosts).", lbRefStr, len(hosts)-maxTargetPoolCreateInstances) + if err := g.updateTargetPool(loadBalancerName, hosts); err != nil { return fmt.Errorf("failed to update target pool for load balancer (%s): %v", lbRefStr, err) } - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Updated target pool (with %d hosts).", lbRefStr, len(hosts)-maxTargetPoolCreateInstances) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Updated target pool (with %d hosts).", lbRefStr, len(hosts)-maxTargetPoolCreateInstances) } } else if tpExists { // Ensure hosts are updated even if there is no other changes required on target pool. - if err := gce.updateTargetPool(loadBalancerName, hosts); err != nil { + if err := g.updateTargetPool(loadBalancerName, hosts); err != nil { return fmt.Errorf("failed to update target pool for load balancer (%s): %v", lbRefStr, err) } - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Updated target pool (with %d hosts).", lbRefStr, len(hosts)) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Updated target pool (with %d hosts).", lbRefStr, len(hosts)) + if hcToCreate != nil { + if hc, err := g.ensureHTTPHealthCheck(hcToCreate.Name, hcToCreate.RequestPath, int32(hcToCreate.Port)); err != nil || hc == nil { + return fmt.Errorf("Failed to ensure health check for %v port %d path %v: %v", loadBalancerName, hcToCreate.Port, hcToCreate.RequestPath, err) + } + } + } else { + // Panic worthy. + klog.Errorf("ensureTargetPoolAndHealthCheck(%s): target pool not exists and doesn't need to be created.", lbRefStr) } return nil } -func (gce *GCECloud) createTargetPoolAndHealthCheck(svc *v1.Service, name, serviceName, ipAddress, region, clusterID string, hosts []*gceInstance, hc *compute.HttpHealthCheck) error { +func (g *Cloud) createTargetPoolAndHealthCheck(svc *v1.Service, name, serviceName, ipAddress, region, clusterID string, hosts []*gceInstance, hc *compute.HttpHealthCheck) error { // health check management is coupled with targetPools to prevent leaks. A // target pool is the only thing that requires a health check, so we delete // associated checks on teardown, and ensure checks on setup. @@ -519,16 +528,16 @@ func (gce *GCECloud) createTargetPoolAndHealthCheck(svc *v1.Service, name, servi isNodesHealthCheck := hc.Name != name if isNodesHealthCheck { // Lock to prevent necessary nodes health check / firewall gets deleted. - gce.sharedResourceLock.Lock() - defer gce.sharedResourceLock.Unlock() + g.sharedResourceLock.Lock() + defer g.sharedResourceLock.Unlock() } - if err := gce.ensureHttpHealthCheckFirewall(svc, serviceName, ipAddress, region, clusterID, hosts, hc.Name, int32(hc.Port), isNodesHealthCheck); err != nil { + if err := g.ensureHTTPHealthCheckFirewall(svc, serviceName, ipAddress, region, clusterID, hosts, hc.Name, int32(hc.Port), isNodesHealthCheck); err != nil { return err } var err error hcRequestPath, hcPort := hc.RequestPath, hc.Port - if hc, err = gce.ensureHttpHealthCheck(hc.Name, hc.RequestPath, int32(hc.Port)); err != nil || hc == nil { + if hc, err = g.ensureHTTPHealthCheck(hc.Name, hc.RequestPath, int32(hc.Port)); err != nil || hc == nil { return fmt.Errorf("Failed to ensure health check for %v port %d path %v: %v", name, hcPort, hcRequestPath, err) } hcLinks = append(hcLinks, hc.SelfLink) @@ -538,7 +547,7 @@ func (gce *GCECloud) createTargetPoolAndHealthCheck(svc *v1.Service, name, servi for _, host := range hosts { instances = append(instances, host.makeComparableHostPath()) } - glog.Infof("Creating targetpool %v with %d healthchecks", name, len(hcLinks)) + klog.Infof("Creating targetpool %v with %d healthchecks", name, len(hcLinks)) pool := &compute.TargetPool{ Name: name, Description: fmt.Sprintf(`{"kubernetes.io/service-name":"%s"}`, serviceName), @@ -547,14 +556,14 @@ func (gce *GCECloud) createTargetPoolAndHealthCheck(svc *v1.Service, name, servi HealthChecks: hcLinks, } - if err := gce.CreateTargetPool(pool, region); err != nil && !isHTTPErrorCode(err, http.StatusConflict) { + if err := g.CreateTargetPool(pool, region); err != nil && !isHTTPErrorCode(err, http.StatusConflict) { return err } return nil } -func (gce *GCECloud) updateTargetPool(loadBalancerName string, hosts []*gceInstance) error { - pool, err := gce.GetTargetPool(loadBalancerName, gce.region) +func (g *Cloud) updateTargetPool(loadBalancerName string, hosts []*gceInstance) error { + pool, err := g.GetTargetPool(loadBalancerName, g.region) if err != nil { return err } @@ -577,13 +586,13 @@ func (gce *GCECloud) updateTargetPool(loadBalancerName string, hosts []*gceInsta } if len(toAdd) > 0 { - if err := gce.AddInstancesToTargetPool(loadBalancerName, gce.region, toAdd); err != nil { + if err := g.AddInstancesToTargetPool(loadBalancerName, g.region, toAdd); err != nil { return err } } if len(toRemove) > 0 { - if err := gce.RemoveInstancesFromTargetPool(loadBalancerName, gce.region, toRemove); err != nil { + if err := g.RemoveInstancesFromTargetPool(loadBalancerName, g.region, toRemove); err != nil { return err } } @@ -591,23 +600,23 @@ func (gce *GCECloud) updateTargetPool(loadBalancerName string, hosts []*gceInsta // Try to verify that the correct number of nodes are now in the target pool. // We've been bitten by a bug here before (#11327) where all nodes were // accidentally removed and want to make similar problems easier to notice. - updatedPool, err := gce.GetTargetPool(loadBalancerName, gce.region) + updatedPool, err := g.GetTargetPool(loadBalancerName, g.region) if err != nil { return err } if len(updatedPool.Instances) != len(hosts) { - glog.Errorf("Unexpected number of instances (%d) in target pool %s after updating (expected %d). Instances in updated pool: %s", + klog.Errorf("Unexpected number of instances (%d) in target pool %s after updating (expected %d). Instances in updated pool: %s", len(updatedPool.Instances), loadBalancerName, len(hosts), strings.Join(updatedPool.Instances, ",")) return fmt.Errorf("Unexpected number of instances (%d) in target pool %s after update (expected %d)", len(updatedPool.Instances), loadBalancerName, len(hosts)) } return nil } -func (gce *GCECloud) targetPoolURL(name string) string { - return gce.service.BasePath + strings.Join([]string{gce.projectID, "regions", gce.region, "targetPools", name}, "/") +func (g *Cloud) targetPoolURL(name string) string { + return g.service.BasePath + strings.Join([]string{g.projectID, "regions", g.region, "targetPools", name}, "/") } -func makeHttpHealthCheck(name, path string, port int32) *compute.HttpHealthCheck { +func makeHTTPHealthCheck(name, path string, port int32) *compute.HttpHealthCheck { return &compute.HttpHealthCheck{ Name: name, Port: int64(port), @@ -621,34 +630,67 @@ func makeHttpHealthCheck(name, path string, port int32) *compute.HttpHealthCheck } } -func (gce *GCECloud) ensureHttpHealthCheck(name, path string, port int32) (hc *compute.HttpHealthCheck, err error) { - newHC := makeHttpHealthCheck(name, path, port) - hc, err = gce.GetHttpHealthCheck(name) +// mergeHTTPHealthChecks reconciles HttpHealthCheck configures to be no smaller +// than the default values. +// E.g. old health check interval is 2s, new default is 8. +// The HC interval will be reconciled to 8 seconds. +// If the existing health check is larger than the default interval, +// the configuration will be kept. +func mergeHTTPHealthChecks(hc, newHC *compute.HttpHealthCheck) *compute.HttpHealthCheck { + if hc.CheckIntervalSec > newHC.CheckIntervalSec { + newHC.CheckIntervalSec = hc.CheckIntervalSec + } + if hc.TimeoutSec > newHC.TimeoutSec { + newHC.TimeoutSec = hc.TimeoutSec + } + if hc.UnhealthyThreshold > newHC.UnhealthyThreshold { + newHC.UnhealthyThreshold = hc.UnhealthyThreshold + } + if hc.HealthyThreshold > newHC.HealthyThreshold { + newHC.HealthyThreshold = hc.HealthyThreshold + } + return newHC +} + +// needToUpdateHTTPHealthChecks checks whether the http healthcheck needs to be +// updated. +func needToUpdateHTTPHealthChecks(hc, newHC *compute.HttpHealthCheck) bool { + changed := hc.Port != newHC.Port || hc.RequestPath != newHC.RequestPath || hc.Description != newHC.Description + changed = changed || hc.CheckIntervalSec < newHC.CheckIntervalSec || hc.TimeoutSec < newHC.TimeoutSec + changed = changed || hc.UnhealthyThreshold < newHC.UnhealthyThreshold || hc.HealthyThreshold < newHC.HealthyThreshold + return changed +} + +func (g *Cloud) ensureHTTPHealthCheck(name, path string, port int32) (hc *compute.HttpHealthCheck, err error) { + newHC := makeHTTPHealthCheck(name, path, port) + hc, err = g.GetHTTPHealthCheck(name) if hc == nil || err != nil && isHTTPErrorCode(err, http.StatusNotFound) { - glog.Infof("Did not find health check %v, creating port %v path %v", name, port, path) - if err = gce.CreateHttpHealthCheck(newHC); err != nil { + klog.Infof("Did not find health check %v, creating port %v path %v", name, port, path) + if err = g.CreateHTTPHealthCheck(newHC); err != nil { return nil, err } - hc, err = gce.GetHttpHealthCheck(name) + hc, err = g.GetHTTPHealthCheck(name) if err != nil { - glog.Errorf("Failed to get http health check %v", err) + klog.Errorf("Failed to get http health check %v", err) return nil, err } - glog.Infof("Created HTTP health check %v healthCheckNodePort: %d", name, port) + klog.Infof("Created HTTP health check %v healthCheckNodePort: %d", name, port) return hc, nil } // Validate health check fields - glog.V(4).Infof("Checking http health check params %s", name) - drift := hc.Port != int64(port) || hc.RequestPath != path || hc.Description != makeHealthCheckDescription(name) - drift = drift || hc.CheckIntervalSec != gceHcCheckIntervalSeconds || hc.TimeoutSec != gceHcTimeoutSeconds - drift = drift || hc.UnhealthyThreshold != gceHcUnhealthyThreshold || hc.HealthyThreshold != gceHcHealthyThreshold - if drift { - glog.Warningf("Health check %v exists but parameters have drifted - updating...", name) - if err := gce.UpdateHttpHealthCheck(newHC); err != nil { - glog.Warningf("Failed to reconcile http health check %v parameters", name) + klog.V(4).Infof("Checking http health check params %s", name) + if needToUpdateHTTPHealthChecks(hc, newHC) { + klog.Warningf("Health check %v exists but parameters have drifted - updating...", name) + newHC = mergeHTTPHealthChecks(hc, newHC) + if err := g.UpdateHTTPHealthCheck(newHC); err != nil { + klog.Warningf("Failed to reconcile http health check %v parameters", name) + return nil, err + } + klog.V(4).Infof("Corrected health check %v parameters successful", name) + hc, err = g.GetHTTPHealthCheck(name) + if err != nil { return nil, err } - glog.V(4).Infof("Corrected health check %v parameters successful", name) } return hc, nil } @@ -657,14 +699,14 @@ func (gce *GCECloud) ensureHttpHealthCheck(name, path string, port int32) (hc *c // IP is being requested. // Returns whether the forwarding rule exists, whether it needs to be updated, // what its IP address is (if it exists), and any error we encountered. -func (gce *GCECloud) forwardingRuleNeedsUpdate(name, region string, loadBalancerIP string, ports []v1.ServicePort) (exists bool, needsUpdate bool, ipAddress string, err error) { - fwd, err := gce.GetRegionForwardingRule(name, region) +func (g *Cloud) forwardingRuleNeedsUpdate(name, region string, loadBalancerIP string, ports []v1.ServicePort) (exists bool, needsUpdate bool, ipAddress string, err error) { + fwd, err := g.GetRegionForwardingRule(name, region) if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { return false, true, "", nil } // Err on the side of caution in case of errors. Caller should notice the error and retry. - // We never want to end up recreating resources because gce api flaked. + // We never want to end up recreating resources because g api flaked. return true, false, "", fmt.Errorf("error getting load balancer's forwarding rule: %v", err) } // If the user asks for a specific static ip through the Service spec, @@ -672,22 +714,22 @@ func (gce *GCECloud) forwardingRuleNeedsUpdate(name, region string, loadBalancer // TODO: we report loadbalancer IP through status, so we want to verify if // that matches the forwarding rule as well. if loadBalancerIP != "" && loadBalancerIP != fwd.IPAddress { - glog.Infof("LoadBalancer ip for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.IPAddress, loadBalancerIP) + klog.Infof("LoadBalancer ip for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.IPAddress, loadBalancerIP) return true, true, fwd.IPAddress, nil } portRange, err := loadBalancerPortRange(ports) if err != nil { // Err on the side of caution in case of errors. Caller should notice the error and retry. - // We never want to end up recreating resources because gce api flaked. + // We never want to end up recreating resources because g api flaked. return true, false, "", err } if portRange != fwd.PortRange { - glog.Infof("LoadBalancer port range for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.PortRange, portRange) + klog.Infof("LoadBalancer port range for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.PortRange, portRange) return true, true, fwd.IPAddress, nil } // The service controller verified all the protocols match on the ports, just check the first one if string(ports[0].Protocol) != fwd.IPProtocol { - glog.Infof("LoadBalancer protocol for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.IPProtocol, string(ports[0].Protocol)) + klog.Infof("LoadBalancer protocol for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.IPProtocol, string(ports[0].Protocol)) return true, true, fwd.IPAddress, nil } @@ -696,14 +738,14 @@ func (gce *GCECloud) forwardingRuleNeedsUpdate(name, region string, loadBalancer // Doesn't check whether the hosts have changed, since host updating is handled // separately. -func (gce *GCECloud) targetPoolNeedsRecreation(name, region string, affinityType v1.ServiceAffinity) (exists bool, needsRecreation bool, err error) { - tp, err := gce.GetTargetPool(name, region) +func (g *Cloud) targetPoolNeedsRecreation(name, region string, affinityType v1.ServiceAffinity) (exists bool, needsRecreation bool, err error) { + tp, err := g.GetTargetPool(name, region) if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { return false, true, nil } // Err on the side of caution in case of errors. Caller should notice the error and retry. - // We never want to end up recreating resources because gce api flaked. + // We never want to end up recreating resources because g api flaked. return true, false, fmt.Errorf("error getting load balancer's target pool: %v", err) } // TODO: If the user modifies their Service's session affinity, it *should* @@ -715,7 +757,7 @@ func (gce *GCECloud) targetPoolNeedsRecreation(name, region string, affinityType // target pool (which results in downtime). Fix this when we have formally // defined the defaults on either side. if tp.SessionAffinity != "" && translateAffinityType(affinityType) != tp.SessionAffinity { - glog.Infof("LoadBalancer target pool %v changed affinity from %v to %v", name, tp.SessionAffinity, affinityType) + klog.Infof("LoadBalancer target pool %v changed affinity from %v to %v", name, tp.SessionAffinity, affinityType) return true, true, nil } return true, false, nil @@ -772,13 +814,13 @@ func translateAffinityType(affinityType v1.ServiceAffinity) string { case v1.ServiceAffinityNone: return gceAffinityTypeNone default: - glog.Errorf("Unexpected affinity type: %v", affinityType) + klog.Errorf("Unexpected affinity type: %v", affinityType) return gceAffinityTypeNone } } -func (gce *GCECloud) firewallNeedsUpdate(name, serviceName, region, ipAddress string, ports []v1.ServicePort, sourceRanges netsets.IPNet) (exists bool, needsUpdate bool, err error) { - fw, err := gce.GetFirewall(MakeFirewallName(name)) +func (g *Cloud) firewallNeedsUpdate(name, serviceName, region, ipAddress string, ports []v1.ServicePort, sourceRanges netsets.IPNet) (exists bool, needsUpdate bool, err error) { + fw, err := g.GetFirewall(MakeFirewallName(name)) if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { return false, true, nil @@ -804,7 +846,7 @@ func (gce *GCECloud) firewallNeedsUpdate(name, serviceName, region, ipAddress st actualSourceRanges, err := netsets.ParseIPNets(fw.SourceRanges...) if err != nil { // This really shouldn't happen... GCE has returned something unexpected - glog.Warningf("Error parsing firewall SourceRanges: %v", fw.SourceRanges) + klog.Warningf("Error parsing firewall SourceRanges: %v", fw.SourceRanges) // We don't return the error, because we can hopefully recover from this by reconfiguring the firewall return true, true, nil } @@ -815,7 +857,7 @@ func (gce *GCECloud) firewallNeedsUpdate(name, serviceName, region, ipAddress st return true, false, nil } -func (gce *GCECloud) ensureHttpHealthCheckFirewall(svc *v1.Service, serviceName, ipAddress, region, clusterID string, hosts []*gceInstance, hcName string, hcPort int32, isNodesHealthCheck bool) error { +func (g *Cloud) ensureHTTPHealthCheckFirewall(svc *v1.Service, serviceName, ipAddress, region, clusterID string, hosts []*gceInstance, hcName string, hcPort int32, isNodesHealthCheck bool) error { // Prepare the firewall params for creating / checking. desc := fmt.Sprintf(`{"kubernetes.io/cluster-id":"%s"}`, clusterID) if !isNodesHealthCheck { @@ -825,16 +867,16 @@ func (gce *GCECloud) ensureHttpHealthCheckFirewall(svc *v1.Service, serviceName, ports := []v1.ServicePort{{Protocol: "tcp", Port: hcPort}} fwName := MakeHealthCheckFirewallName(clusterID, hcName, isNodesHealthCheck) - fw, err := gce.GetFirewall(fwName) + fw, err := g.GetFirewall(fwName) if err != nil { if !isHTTPErrorCode(err, http.StatusNotFound) { return fmt.Errorf("error getting firewall for health checks: %v", err) } - glog.Infof("Creating firewall %v for health checks.", fwName) - if err := gce.createFirewall(svc, fwName, region, desc, sourceRanges, ports, hosts); err != nil { + klog.Infof("Creating firewall %v for health checks.", fwName) + if err := g.createFirewall(svc, fwName, region, desc, sourceRanges, ports, hosts); err != nil { return err } - glog.Infof("Created firewall %v for health checks.", fwName) + klog.Infof("Created firewall %v for health checks.", fwName) return nil } // Validate firewall fields. @@ -843,12 +885,12 @@ func (gce *GCECloud) ensureHttpHealthCheckFirewall(svc *v1.Service, serviceName, fw.Allowed[0].IPProtocol != string(ports[0].Protocol) || !equalStringSets(fw.Allowed[0].Ports, []string{strconv.Itoa(int(ports[0].Port))}) || !equalStringSets(fw.SourceRanges, sourceRanges.StringSlice()) { - glog.Warningf("Firewall %v exists but parameters have drifted - updating...", fwName) - if err := gce.updateFirewall(svc, fwName, region, desc, sourceRanges, ports, hosts); err != nil { - glog.Warningf("Failed to reconcile firewall %v parameters.", fwName) + klog.Warningf("Firewall %v exists but parameters have drifted - updating...", fwName) + if err := g.updateFirewall(svc, fwName, region, desc, sourceRanges, ports, hosts); err != nil { + klog.Warningf("Failed to reconcile firewall %v parameters.", fwName) return err } - glog.V(4).Infof("Corrected firewall %v parameters successful", fwName) + klog.V(4).Infof("Corrected firewall %v parameters successful", fwName) } return nil } @@ -892,17 +934,17 @@ func createForwardingRule(s CloudForwardingRuleService, name, serviceName, regio return nil } -func (gce *GCECloud) createFirewall(svc *v1.Service, name, region, desc string, sourceRanges netsets.IPNet, ports []v1.ServicePort, hosts []*gceInstance) error { - firewall, err := gce.firewallObject(name, region, desc, sourceRanges, ports, hosts) +func (g *Cloud) createFirewall(svc *v1.Service, name, region, desc string, sourceRanges netsets.IPNet, ports []v1.ServicePort, hosts []*gceInstance) error { + firewall, err := g.firewallObject(name, region, desc, sourceRanges, ports, hosts) if err != nil { return err } - if err = gce.CreateFirewall(firewall); err != nil { + if err = g.CreateFirewall(firewall); err != nil { if isHTTPErrorCode(err, http.StatusConflict) { return nil - } else if isForbidden(err) && gce.OnXPN() { - glog.V(4).Infof("createFirewall(%v): do not have permission to create firewall rule (on XPN). Raising event.", firewall.Name) - gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudCreateCmd(firewall, gce.NetworkProjectID())) + } else if isForbidden(err) && g.OnXPN() { + klog.V(4).Infof("createFirewall(%v): do not have permission to create firewall rule (on XPN). Raising event.", firewall.Name) + g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudCreateCmd(firewall, g.NetworkProjectID())) return nil } return err @@ -910,18 +952,18 @@ func (gce *GCECloud) createFirewall(svc *v1.Service, name, region, desc string, return nil } -func (gce *GCECloud) updateFirewall(svc *v1.Service, name, region, desc string, sourceRanges netsets.IPNet, ports []v1.ServicePort, hosts []*gceInstance) error { - firewall, err := gce.firewallObject(name, region, desc, sourceRanges, ports, hosts) +func (g *Cloud) updateFirewall(svc *v1.Service, name, region, desc string, sourceRanges netsets.IPNet, ports []v1.ServicePort, hosts []*gceInstance) error { + firewall, err := g.firewallObject(name, region, desc, sourceRanges, ports, hosts) if err != nil { return err } - if err = gce.UpdateFirewall(firewall); err != nil { + if err = g.UpdateFirewall(firewall); err != nil { if isHTTPErrorCode(err, http.StatusConflict) { return nil - } else if isForbidden(err) && gce.OnXPN() { - glog.V(4).Infof("updateFirewall(%v): do not have permission to update firewall rule (on XPN). Raising event.", firewall.Name) - gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudUpdateCmd(firewall, gce.NetworkProjectID())) + } else if isForbidden(err) && g.OnXPN() { + klog.V(4).Infof("updateFirewall(%v): do not have permission to update firewall rule (on XPN). Raising event.", firewall.Name) + g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudUpdateCmd(firewall, g.NetworkProjectID())) return nil } return err @@ -929,25 +971,25 @@ func (gce *GCECloud) updateFirewall(svc *v1.Service, name, region, desc string, return nil } -func (gce *GCECloud) firewallObject(name, region, desc string, sourceRanges netsets.IPNet, ports []v1.ServicePort, hosts []*gceInstance) (*compute.Firewall, error) { +func (g *Cloud) firewallObject(name, region, desc string, sourceRanges netsets.IPNet, ports []v1.ServicePort, hosts []*gceInstance) (*compute.Firewall, error) { allowedPorts := make([]string, len(ports)) for ix := range ports { allowedPorts[ix] = strconv.Itoa(int(ports[ix].Port)) } // If the node tags to be used for this cluster have been predefined in the // provider config, just use them. Otherwise, invoke computeHostTags method to get the tags. - hostTags := gce.nodeTags + hostTags := g.nodeTags if len(hostTags) == 0 { var err error - if hostTags, err = gce.computeHostTags(hosts); err != nil { - return nil, fmt.Errorf("No node tags supplied and also failed to parse the given lists of hosts for tags. Abort creating firewall rule.") + if hostTags, err = g.computeHostTags(hosts); err != nil { + return nil, fmt.Errorf("no node tags supplied and also failed to parse the given lists of hosts for tags. Abort creating firewall rule") } } firewall := &compute.Firewall{ Name: name, Description: desc, - Network: gce.networkURL, + Network: g.networkURL, SourceRanges: sourceRanges.StringSlice(), TargetTags: hostTags, Allowed: []*compute.FirewallAllowed{ @@ -1013,8 +1055,8 @@ func ensureStaticIP(s CloudAddressService, name, serviceName, region, existingIP return addr.Address, existed, nil } -func (gce *GCECloud) getServiceNetworkTier(svc *v1.Service) (cloud.NetworkTier, error) { - if !gce.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { +func (g *Cloud) getServiceNetworkTier(svc *v1.Service) (cloud.NetworkTier, error) { + if !g.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { return cloud.NetworkTierDefault, nil } tier, err := GetServiceNetworkTier(svc) @@ -1025,12 +1067,12 @@ func (gce *GCECloud) getServiceNetworkTier(svc *v1.Service) (cloud.NetworkTier, return tier, nil } -func (gce *GCECloud) deleteWrongNetworkTieredResources(lbName, lbRef string, desiredNetTier cloud.NetworkTier) error { +func (g *Cloud) deleteWrongNetworkTieredResources(lbName, lbRef string, desiredNetTier cloud.NetworkTier) error { logPrefix := fmt.Sprintf("deleteWrongNetworkTieredResources:(%s)", lbRef) - if err := deleteFWDRuleWithWrongTier(gce, gce.region, lbName, logPrefix, desiredNetTier); err != nil { + if err := deleteFWDRuleWithWrongTier(g, g.region, lbName, logPrefix, desiredNetTier); err != nil { return err } - if err := deleteAddressWithWrongTier(gce, gce.region, lbName, logPrefix, desiredNetTier); err != nil { + if err := deleteAddressWithWrongTier(g, g.region, lbName, logPrefix, desiredNetTier); err != nil { return err } return nil @@ -1049,7 +1091,7 @@ func deleteFWDRuleWithWrongTier(s CloudForwardingRuleService, region, name, logP if existingTier == desiredNetTier { return nil } - glog.V(2).Infof("%s: Network tiers do not match; existing forwarding rule: %q, desired: %q. Deleting the forwarding rule", + klog.V(2).Infof("%s: Network tiers do not match; existing forwarding rule: %q, desired: %q. Deleting the forwarding rule", logPrefix, existingTier, desiredNetTier) err = s.DeleteRegionForwardingRule(name, region) return ignoreNotFound(err) @@ -1077,7 +1119,7 @@ func deleteAddressWithWrongTier(s CloudAddressService, region, name, logPrefix s if existingTier == desiredNetTier { return nil } - glog.V(2).Infof("%s: Network tiers do not match; existing address: %q, desired: %q. Deleting the address", + klog.V(2).Infof("%s: Network tiers do not match; existing address: %q, desired: %q. Deleting the address", logPrefix, existingTier, desiredNetTier) err = s.DeleteRegionAddress(name, region) return ignoreNotFound(err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go index 4542fa1232be..87d1be64b49e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go @@ -22,11 +22,11 @@ import ( "strconv" "strings" - "github.com/golang/glog" compute "google.golang.org/api/compute/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" v1_service "k8s.io/kubernetes/pkg/api/v1/service" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" ) @@ -35,21 +35,21 @@ const ( allInstances = "ALL" ) -func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { +func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace} ports, protocol := getPortsAndProtocol(svc.Spec.Ports) if protocol != v1.ProtocolTCP && protocol != v1.ProtocolUDP { return nil, fmt.Errorf("Invalid protocol %s, only TCP and UDP are supported", string(protocol)) } scheme := cloud.SchemeInternal - loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, svc) + loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, svc) sharedBackend := shareBackendService(svc) backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, sharedBackend, scheme, protocol, svc.Spec.SessionAffinity) - backendServiceLink := gce.getBackendServiceLink(backendServiceName) + backendServiceLink := g.getBackendServiceLink(backendServiceName) // Ensure instance groups exist and nodes are assigned to groups igName := makeInstanceGroupName(clusterID) - igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes) + igLinks, err := g.ensureInternalInstanceGroups(igName, nodes) if err != nil { return nil, err } @@ -58,14 +58,14 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s var existingBackendService *compute.BackendService if existingFwdRule != nil && existingFwdRule.BackendService != "" { existingBSName := getNameFromLink(existingFwdRule.BackendService) - if existingBackendService, err = gce.GetRegionBackendService(existingBSName, gce.region); err != nil && !isNotFound(err) { + if existingBackendService, err = g.GetRegionBackendService(existingBSName, g.region); err != nil && !isNotFound(err) { return nil, err } } // Lock the sharedResourceLock to prevent any deletions of shared resources while assembling shared resources here - gce.sharedResourceLock.Lock() - defer gce.sharedResourceLock.Unlock() + g.sharedResourceLock.Lock() + defer g.sharedResourceLock.Unlock() // Ensure health check exists before creating the backend service. The health check is shared // if externalTrafficPolicy=Cluster. @@ -76,7 +76,7 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s // Service requires a special health check, retrieve the OnlyLocal port & path hcPath, hcPort = v1_service.GetServiceHealthCheckPathPort(svc) } - hc, err := gce.ensureInternalHealthCheck(hcName, nm, sharedHealthCheck, hcPath, hcPort) + hc, err := g.ensureInternalHealthCheck(hcName, nm, sharedHealthCheck, hcPath, hcPort) if err != nil { return nil, err } @@ -88,7 +88,7 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s // If the ILB already exists, continue using the subnet that it's already using. // This is to support existing ILBs that were setup using the wrong subnet. - subnetworkURL := gce.SubnetworkURL() + subnetworkURL := g.SubnetworkURL() if existingFwdRule != nil && existingFwdRule.Subnetwork != "" { // external LBs have an empty Subnetwork field. subnetworkURL = existingFwdRule.Subnetwork @@ -96,17 +96,17 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s var addrMgr *addressManager // If the network is not a legacy network, use the address manager - if !gce.IsLegacyNetwork() { - addrMgr = newAddressManager(gce, nm.String(), gce.Region(), subnetworkURL, loadBalancerName, requestedIP, cloud.SchemeInternal) + if !g.IsLegacyNetwork() { + addrMgr = newAddressManager(g, nm.String(), g.Region(), subnetworkURL, loadBalancerName, requestedIP, cloud.SchemeInternal) ipToUse, err = addrMgr.HoldAddress() if err != nil { return nil, err } - glog.V(2).Infof("ensureInternalLoadBalancer(%v): reserved IP %q for the forwarding rule", loadBalancerName, ipToUse) + klog.V(2).Infof("ensureInternalLoadBalancer(%v): reserved IP %q for the forwarding rule", loadBalancerName, ipToUse) } // Ensure firewall rules if necessary - if err = gce.ensureInternalFirewalls(loadBalancerName, ipToUse, clusterID, nm, svc, strconv.Itoa(int(hcPort)), sharedHealthCheck, nodes); err != nil { + if err = g.ensureInternalFirewalls(loadBalancerName, ipToUse, clusterID, nm, svc, strconv.Itoa(int(hcPort)), sharedHealthCheck, nodes); err != nil { return nil, err } @@ -125,47 +125,47 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s if subnetworkURL != "" { expectedFwdRule.Subnetwork = subnetworkURL } else { - expectedFwdRule.Network = gce.networkURL + expectedFwdRule.Network = g.networkURL } fwdRuleDeleted := false if existingFwdRule != nil && !fwdRuleEqual(existingFwdRule, expectedFwdRule) { - glog.V(2).Infof("ensureInternalLoadBalancer(%v): deleting existing forwarding rule with IP address %v", loadBalancerName, existingFwdRule.IPAddress) - if err = ignoreNotFound(gce.DeleteRegionForwardingRule(loadBalancerName, gce.region)); err != nil { + klog.V(2).Infof("ensureInternalLoadBalancer(%v): deleting existing forwarding rule with IP address %v", loadBalancerName, existingFwdRule.IPAddress) + if err = ignoreNotFound(g.DeleteRegionForwardingRule(loadBalancerName, g.region)); err != nil { return nil, err } fwdRuleDeleted = true } bsDescription := makeBackendServiceDescription(nm, sharedBackend) - err = gce.ensureInternalBackendService(backendServiceName, bsDescription, svc.Spec.SessionAffinity, scheme, protocol, igLinks, hc.SelfLink) + err = g.ensureInternalBackendService(backendServiceName, bsDescription, svc.Spec.SessionAffinity, scheme, protocol, igLinks, hc.SelfLink) if err != nil { return nil, err } // If we previously deleted the forwarding rule or it never existed, finally create it. if fwdRuleDeleted || existingFwdRule == nil { - glog.V(2).Infof("ensureInternalLoadBalancer(%v): creating forwarding rule", loadBalancerName) - if err = gce.CreateRegionForwardingRule(expectedFwdRule, gce.region); err != nil { + klog.V(2).Infof("ensureInternalLoadBalancer(%v): creating forwarding rule", loadBalancerName) + if err = g.CreateRegionForwardingRule(expectedFwdRule, g.region); err != nil { return nil, err } - glog.V(2).Infof("ensureInternalLoadBalancer(%v): created forwarding rule", loadBalancerName) + klog.V(2).Infof("ensureInternalLoadBalancer(%v): created forwarding rule", loadBalancerName) } // Delete the previous internal load balancer resources if necessary if existingBackendService != nil { - gce.clearPreviousInternalResources(svc, loadBalancerName, existingBackendService, backendServiceName, hcName) + g.clearPreviousInternalResources(svc, loadBalancerName, existingBackendService, backendServiceName, hcName) } if addrMgr != nil { // Now that the controller knows the forwarding rule exists, we can release the address. if err := addrMgr.ReleaseAddress(); err != nil { - glog.Errorf("ensureInternalLoadBalancer: failed to release address reservation, possibly causing an orphan: %v", err) + klog.Errorf("ensureInternalLoadBalancer: failed to release address reservation, possibly causing an orphan: %v", err) } } // Get the most recent forwarding rule for the address. - updatedFwdRule, err := gce.GetRegionForwardingRule(loadBalancerName, gce.region) + updatedFwdRule, err := g.GetRegionForwardingRule(loadBalancerName, g.region) if err != nil { return nil, err } @@ -175,12 +175,12 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s return status, nil } -func (gce *GCECloud) clearPreviousInternalResources(svc *v1.Service, loadBalancerName string, existingBackendService *compute.BackendService, expectedBSName, expectedHCName string) { +func (g *Cloud) clearPreviousInternalResources(svc *v1.Service, loadBalancerName string, existingBackendService *compute.BackendService, expectedBSName, expectedHCName string) { // If a new backend service was created, delete the old one. if existingBackendService.Name != expectedBSName { - glog.V(2).Infof("clearPreviousInternalResources(%v): expected backend service %q does not match previous %q - deleting backend service", loadBalancerName, expectedBSName, existingBackendService.Name) - if err := gce.teardownInternalBackendService(existingBackendService.Name); err != nil && !isNotFound(err) { - glog.Warningf("clearPreviousInternalResources: could not delete old backend service: %v, err: %v", existingBackendService.Name, err) + klog.V(2).Infof("clearPreviousInternalResources(%v): expected backend service %q does not match previous %q - deleting backend service", loadBalancerName, expectedBSName, existingBackendService.Name) + if err := g.teardownInternalBackendService(existingBackendService.Name); err != nil && !isNotFound(err) { + klog.Warningf("clearPreviousInternalResources: could not delete old backend service: %v, err: %v", existingBackendService.Name, err) } } @@ -188,24 +188,24 @@ func (gce *GCECloud) clearPreviousInternalResources(svc *v1.Service, loadBalance if len(existingBackendService.HealthChecks) == 1 { existingHCName := getNameFromLink(existingBackendService.HealthChecks[0]) if existingHCName != expectedHCName { - glog.V(2).Infof("clearPreviousInternalResources(%v): expected health check %q does not match previous %q - deleting health check", loadBalancerName, expectedHCName, existingHCName) - if err := gce.teardownInternalHealthCheckAndFirewall(svc, existingHCName); err != nil { - glog.Warningf("clearPreviousInternalResources: could not delete existing healthcheck: %v, err: %v", existingHCName, err) + klog.V(2).Infof("clearPreviousInternalResources(%v): expected health check %q does not match previous %q - deleting health check", loadBalancerName, expectedHCName, existingHCName) + if err := g.teardownInternalHealthCheckAndFirewall(svc, existingHCName); err != nil { + klog.Warningf("clearPreviousInternalResources: could not delete existing healthcheck: %v, err: %v", existingHCName, err) } } } else if len(existingBackendService.HealthChecks) > 1 { - glog.Warningf("clearPreviousInternalResources(%v): more than one health check on the backend service %v, %v", loadBalancerName, existingBackendService.Name, existingBackendService.HealthChecks) + klog.Warningf("clearPreviousInternalResources(%v): more than one health check on the backend service %v, %v", loadBalancerName, existingBackendService.Name, existingBackendService.HealthChecks) } } // updateInternalLoadBalancer is called when the list of nodes has changed. Therefore, only the instance groups // and possibly the backend service need to be updated. -func (gce *GCECloud) updateInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, nodes []*v1.Node) error { - gce.sharedResourceLock.Lock() - defer gce.sharedResourceLock.Unlock() +func (g *Cloud) updateInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, nodes []*v1.Node) error { + g.sharedResourceLock.Lock() + defer g.sharedResourceLock.Unlock() igName := makeInstanceGroupName(clusterID) - igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes) + igLinks, err := g.ensureInternalInstanceGroups(igName, nodes) if err != nil { return err } @@ -213,113 +213,113 @@ func (gce *GCECloud) updateInternalLoadBalancer(clusterName, clusterID string, s // Generate the backend service name _, protocol := getPortsAndProtocol(svc.Spec.Ports) scheme := cloud.SchemeInternal - loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, svc) + loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, svc) backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, shareBackendService(svc), scheme, protocol, svc.Spec.SessionAffinity) // Ensure the backend service has the proper backend/instance-group links - return gce.ensureInternalBackendServiceGroups(backendServiceName, igLinks) + return g.ensureInternalBackendServiceGroups(backendServiceName, igLinks) } -func (gce *GCECloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID string, svc *v1.Service) error { - loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, svc) +func (g *Cloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID string, svc *v1.Service) error { + loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, svc) _, protocol := getPortsAndProtocol(svc.Spec.Ports) scheme := cloud.SchemeInternal sharedBackend := shareBackendService(svc) sharedHealthCheck := !v1_service.RequestsOnlyLocalTraffic(svc) - gce.sharedResourceLock.Lock() - defer gce.sharedResourceLock.Unlock() + g.sharedResourceLock.Lock() + defer g.sharedResourceLock.Unlock() - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): attempting delete of region internal address", loadBalancerName) - ensureAddressDeleted(gce, loadBalancerName, gce.region) + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): attempting delete of region internal address", loadBalancerName) + ensureAddressDeleted(g, loadBalancerName, g.region) - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting region internal forwarding rule", loadBalancerName) - if err := ignoreNotFound(gce.DeleteRegionForwardingRule(loadBalancerName, gce.region)); err != nil { + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting region internal forwarding rule", loadBalancerName) + if err := ignoreNotFound(g.DeleteRegionForwardingRule(loadBalancerName, g.region)); err != nil { return err } backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, sharedBackend, scheme, protocol, svc.Spec.SessionAffinity) - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting region backend service %v", loadBalancerName, backendServiceName) - if err := gce.teardownInternalBackendService(backendServiceName); err != nil { + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting region backend service %v", loadBalancerName, backendServiceName) + if err := g.teardownInternalBackendService(backendServiceName); err != nil { return err } - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting firewall for traffic", loadBalancerName) - if err := ignoreNotFound(gce.DeleteFirewall(loadBalancerName)); err != nil { - if isForbidden(err) && gce.OnXPN() { - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): could not delete traffic firewall on XPN cluster. Raising event.", loadBalancerName) - gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(loadBalancerName, gce.NetworkProjectID())) + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting firewall for traffic", loadBalancerName) + if err := ignoreNotFound(g.DeleteFirewall(loadBalancerName)); err != nil { + if isForbidden(err) && g.OnXPN() { + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): could not delete traffic firewall on XPN cluster. Raising event.", loadBalancerName) + g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(loadBalancerName, g.NetworkProjectID())) } else { return err } } hcName := makeHealthCheckName(loadBalancerName, clusterID, sharedHealthCheck) - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting health check %v and its firewall", loadBalancerName, hcName) - if err := gce.teardownInternalHealthCheckAndFirewall(svc, hcName); err != nil { + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting health check %v and its firewall", loadBalancerName, hcName) + if err := g.teardownInternalHealthCheckAndFirewall(svc, hcName); err != nil { return err } // Try deleting instance groups - expect ResourceInuse error if needed by other LBs igName := makeInstanceGroupName(clusterID) - if err := gce.ensureInternalInstanceGroupsDeleted(igName); err != nil && !isInUsedByError(err) { + if err := g.ensureInternalInstanceGroupsDeleted(igName); err != nil && !isInUsedByError(err) { return err } return nil } -func (gce *GCECloud) teardownInternalBackendService(bsName string) error { - if err := gce.DeleteRegionBackendService(bsName, gce.region); err != nil { +func (g *Cloud) teardownInternalBackendService(bsName string) error { + if err := g.DeleteRegionBackendService(bsName, g.region); err != nil { if isNotFound(err) { - glog.V(2).Infof("teardownInternalBackendService(%v): backend service already deleted. err: %v", bsName, err) + klog.V(2).Infof("teardownInternalBackendService(%v): backend service already deleted. err: %v", bsName, err) return nil } else if isInUsedByError(err) { - glog.V(2).Infof("teardownInternalBackendService(%v): backend service in use.", bsName) + klog.V(2).Infof("teardownInternalBackendService(%v): backend service in use.", bsName) return nil } else { return fmt.Errorf("failed to delete backend service: %v, err: %v", bsName, err) } } - glog.V(2).Infof("teardownInternalBackendService(%v): backend service deleted", bsName) + klog.V(2).Infof("teardownInternalBackendService(%v): backend service deleted", bsName) return nil } -func (gce *GCECloud) teardownInternalHealthCheckAndFirewall(svc *v1.Service, hcName string) error { - if err := gce.DeleteHealthCheck(hcName); err != nil { +func (g *Cloud) teardownInternalHealthCheckAndFirewall(svc *v1.Service, hcName string) error { + if err := g.DeleteHealthCheck(hcName); err != nil { if isNotFound(err) { - glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check does not exist.", hcName) + klog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check does not exist.", hcName) // Purposely do not early return - double check the firewall does not exist } else if isInUsedByError(err) { - glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check in use.", hcName) + klog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check in use.", hcName) return nil } else { return fmt.Errorf("failed to delete health check: %v, err: %v", hcName, err) } } - glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check deleted", hcName) + klog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check deleted", hcName) hcFirewallName := makeHealthCheckFirewallNameFromHC(hcName) - if err := ignoreNotFound(gce.DeleteFirewall(hcFirewallName)); err != nil { - if isForbidden(err) && gce.OnXPN() { - glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): could not delete health check traffic firewall on XPN cluster. Raising Event.", hcName) - gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(hcFirewallName, gce.NetworkProjectID())) + if err := ignoreNotFound(g.DeleteFirewall(hcFirewallName)); err != nil { + if isForbidden(err) && g.OnXPN() { + klog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): could not delete health check traffic firewall on XPN cluster. Raising Event.", hcName) + g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(hcFirewallName, g.NetworkProjectID())) return nil } return fmt.Errorf("failed to delete health check firewall: %v, err: %v", hcFirewallName, err) } - glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check firewall deleted", hcFirewallName) + klog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check firewall deleted", hcFirewallName) return nil } -func (gce *GCECloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc string, sourceRanges []string, ports []string, protocol v1.Protocol, nodes []*v1.Node) error { - glog.V(2).Infof("ensureInternalFirewall(%v): checking existing firewall", fwName) - targetTags, err := gce.GetNodeTags(nodeNames(nodes)) +func (g *Cloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc string, sourceRanges []string, ports []string, protocol v1.Protocol, nodes []*v1.Node) error { + klog.V(2).Infof("ensureInternalFirewall(%v): checking existing firewall", fwName) + targetTags, err := g.GetNodeTags(nodeNames(nodes)) if err != nil { return err } - existingFirewall, err := gce.GetFirewall(fwName) + existingFirewall, err := g.GetFirewall(fwName) if err != nil && !isNotFound(err) { return err } @@ -327,7 +327,7 @@ func (gce *GCECloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc stri expectedFirewall := &compute.Firewall{ Name: fwName, Description: fwDesc, - Network: gce.networkURL, + Network: g.networkURL, SourceRanges: sourceRanges, TargetTags: targetTags, Allowed: []*compute.FirewallAllowed{ @@ -339,11 +339,11 @@ func (gce *GCECloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc stri } if existingFirewall == nil { - glog.V(2).Infof("ensureInternalFirewall(%v): creating firewall", fwName) - err = gce.CreateFirewall(expectedFirewall) - if err != nil && isForbidden(err) && gce.OnXPN() { - glog.V(2).Infof("ensureInternalFirewall(%v): do not have permission to create firewall rule (on XPN). Raising event.", fwName) - gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudCreateCmd(expectedFirewall, gce.NetworkProjectID())) + klog.V(2).Infof("ensureInternalFirewall(%v): creating firewall", fwName) + err = g.CreateFirewall(expectedFirewall) + if err != nil && isForbidden(err) && g.OnXPN() { + klog.V(2).Infof("ensureInternalFirewall(%v): do not have permission to create firewall rule (on XPN). Raising event.", fwName) + g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudCreateCmd(expectedFirewall, g.NetworkProjectID())) return nil } return err @@ -353,17 +353,17 @@ func (gce *GCECloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc stri return nil } - glog.V(2).Infof("ensureInternalFirewall(%v): updating firewall", fwName) - err = gce.UpdateFirewall(expectedFirewall) - if err != nil && isForbidden(err) && gce.OnXPN() { - glog.V(2).Infof("ensureInternalFirewall(%v): do not have permission to update firewall rule (on XPN). Raising event.", fwName) - gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudUpdateCmd(expectedFirewall, gce.NetworkProjectID())) + klog.V(2).Infof("ensureInternalFirewall(%v): updating firewall", fwName) + err = g.UpdateFirewall(expectedFirewall) + if err != nil && isForbidden(err) && g.OnXPN() { + klog.V(2).Infof("ensureInternalFirewall(%v): do not have permission to update firewall rule (on XPN). Raising event.", fwName) + g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudUpdateCmd(expectedFirewall, g.NetworkProjectID())) return nil } return err } -func (gce *GCECloud) ensureInternalFirewalls(loadBalancerName, ipAddress, clusterID string, nm types.NamespacedName, svc *v1.Service, healthCheckPort string, sharedHealthCheck bool, nodes []*v1.Node) error { +func (g *Cloud) ensureInternalFirewalls(loadBalancerName, ipAddress, clusterID string, nm types.NamespacedName, svc *v1.Service, healthCheckPort string, sharedHealthCheck bool, nodes []*v1.Node) error { // First firewall is for ingress traffic fwDesc := makeFirewallDescription(nm.String(), ipAddress) ports, protocol := getPortsAndProtocol(svc.Spec.Ports) @@ -371,7 +371,7 @@ func (gce *GCECloud) ensureInternalFirewalls(loadBalancerName, ipAddress, cluste if err != nil { return err } - err = gce.ensureInternalFirewall(svc, loadBalancerName, fwDesc, sourceRanges.StringSlice(), ports, protocol, nodes) + err = g.ensureInternalFirewall(svc, loadBalancerName, fwDesc, sourceRanges.StringSlice(), ports, protocol, nodes) if err != nil { return err } @@ -379,48 +379,51 @@ func (gce *GCECloud) ensureInternalFirewalls(loadBalancerName, ipAddress, cluste // Second firewall is for health checking nodes / services fwHCName := makeHealthCheckFirewallName(loadBalancerName, clusterID, sharedHealthCheck) hcSrcRanges := LoadBalancerSrcRanges() - return gce.ensureInternalFirewall(svc, fwHCName, "", hcSrcRanges, []string{healthCheckPort}, v1.ProtocolTCP, nodes) + return g.ensureInternalFirewall(svc, fwHCName, "", hcSrcRanges, []string{healthCheckPort}, v1.ProtocolTCP, nodes) } -func (gce *GCECloud) ensureInternalHealthCheck(name string, svcName types.NamespacedName, shared bool, path string, port int32) (*compute.HealthCheck, error) { - glog.V(2).Infof("ensureInternalHealthCheck(%v, %v, %v): checking existing health check", name, path, port) +func (g *Cloud) ensureInternalHealthCheck(name string, svcName types.NamespacedName, shared bool, path string, port int32) (*compute.HealthCheck, error) { + klog.V(2).Infof("ensureInternalHealthCheck(%v, %v, %v): checking existing health check", name, path, port) expectedHC := newInternalLBHealthCheck(name, svcName, shared, path, port) - hc, err := gce.GetHealthCheck(name) + hc, err := g.GetHealthCheck(name) if err != nil && !isNotFound(err) { return nil, err } if hc == nil { - glog.V(2).Infof("ensureInternalHealthCheck: did not find health check %v, creating one with port %v path %v", name, port, path) - if err = gce.CreateHealthCheck(expectedHC); err != nil { + klog.V(2).Infof("ensureInternalHealthCheck: did not find health check %v, creating one with port %v path %v", name, port, path) + if err = g.CreateHealthCheck(expectedHC); err != nil { return nil, err } - hc, err = gce.GetHealthCheck(name) + hc, err = g.GetHealthCheck(name) if err != nil { - glog.Errorf("Failed to get http health check %v", err) + klog.Errorf("Failed to get http health check %v", err) return nil, err } - glog.V(2).Infof("ensureInternalHealthCheck: created health check %v", name) + klog.V(2).Infof("ensureInternalHealthCheck: created health check %v", name) return hc, nil } - if healthChecksEqual(expectedHC, hc) { - return hc, nil - } - - glog.V(2).Infof("ensureInternalHealthCheck: health check %v exists but parameters have drifted - updating...", name) - if err := gce.UpdateHealthCheck(expectedHC); err != nil { - glog.Warningf("Failed to reconcile http health check %v parameters", name) - return nil, err + if needToUpdateHealthChecks(hc, expectedHC) { + klog.V(2).Infof("ensureInternalHealthCheck: health check %v exists but parameters have drifted - updating...", name) + expectedHC = mergeHealthChecks(hc, expectedHC) + if err := g.UpdateHealthCheck(expectedHC); err != nil { + klog.Warningf("Failed to reconcile http health check %v parameters", name) + return nil, err + } + klog.V(2).Infof("ensureInternalHealthCheck: corrected health check %v parameters successful", name) + hc, err = g.GetHealthCheck(name) + if err != nil { + return nil, err + } } - glog.V(2).Infof("ensureInternalHealthCheck: corrected health check %v parameters successful", name) return hc, nil } -func (gce *GCECloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1.Node) (string, error) { - glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): checking group that it contains %v nodes", name, zone, len(nodes)) - ig, err := gce.GetInstanceGroup(name, zone) +func (g *Cloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1.Node) (string, error) { + klog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): checking group that it contains %v nodes", name, zone, len(nodes)) + ig, err := g.GetInstanceGroup(name, zone) if err != nil && !isNotFound(err) { return "", err } @@ -432,18 +435,18 @@ func (gce *GCECloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1. gceNodes := sets.NewString() if ig == nil { - glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): creating instance group", name, zone) + klog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): creating instance group", name, zone) newIG := &compute.InstanceGroup{Name: name} - if err = gce.CreateInstanceGroup(newIG, zone); err != nil { + if err = g.CreateInstanceGroup(newIG, zone); err != nil { return "", err } - ig, err = gce.GetInstanceGroup(name, zone) + ig, err = g.GetInstanceGroup(name, zone) if err != nil { return "", err } } else { - instances, err := gce.ListInstancesInInstanceGroup(name, zone, allInstances) + instances, err := g.ListInstancesInInstanceGroup(name, zone, allInstances) if err != nil { return "", err } @@ -458,18 +461,18 @@ func (gce *GCECloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1. addNodes := kubeNodes.Difference(gceNodes).List() if len(removeNodes) != 0 { - glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): removing nodes: %v", name, zone, removeNodes) - instanceRefs := gce.ToInstanceReferences(zone, removeNodes) + klog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): removing nodes: %v", name, zone, removeNodes) + instanceRefs := g.ToInstanceReferences(zone, removeNodes) // Possible we'll receive 404's here if the instance was deleted before getting to this point. - if err = gce.RemoveInstancesFromInstanceGroup(name, zone, instanceRefs); err != nil && !isNotFound(err) { + if err = g.RemoveInstancesFromInstanceGroup(name, zone, instanceRefs); err != nil && !isNotFound(err) { return "", err } } if len(addNodes) != 0 { - glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): adding nodes: %v", name, zone, addNodes) - instanceRefs := gce.ToInstanceReferences(zone, addNodes) - if err = gce.AddInstancesToInstanceGroup(name, zone, instanceRefs); err != nil { + klog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): adding nodes: %v", name, zone, addNodes) + instanceRefs := g.ToInstanceReferences(zone, addNodes) + if err = g.AddInstancesToInstanceGroup(name, zone, instanceRefs); err != nil { return "", err } } @@ -479,12 +482,12 @@ func (gce *GCECloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1. // ensureInternalInstanceGroups generates an unmanaged instance group for every zone // where a K8s node exists. It also ensures that each node belongs to an instance group -func (gce *GCECloud) ensureInternalInstanceGroups(name string, nodes []*v1.Node) ([]string, error) { +func (g *Cloud) ensureInternalInstanceGroups(name string, nodes []*v1.Node) ([]string, error) { zonedNodes := splitNodesByZone(nodes) - glog.V(2).Infof("ensureInternalInstanceGroups(%v): %d nodes over %d zones in region %v", name, len(nodes), len(zonedNodes), gce.region) + klog.V(2).Infof("ensureInternalInstanceGroups(%v): %d nodes over %d zones in region %v", name, len(nodes), len(zonedNodes), g.region) var igLinks []string for zone, nodes := range zonedNodes { - igLink, err := gce.ensureInternalInstanceGroup(name, zone, nodes) + igLink, err := g.ensureInternalInstanceGroup(name, zone, nodes) if err != nil { return []string{}, err } @@ -494,25 +497,25 @@ func (gce *GCECloud) ensureInternalInstanceGroups(name string, nodes []*v1.Node) return igLinks, nil } -func (gce *GCECloud) ensureInternalInstanceGroupsDeleted(name string) error { +func (g *Cloud) ensureInternalInstanceGroupsDeleted(name string) error { // List of nodes isn't available here - fetch all zones in region and try deleting this cluster's ig - zones, err := gce.ListZonesInRegion(gce.region) + zones, err := g.ListZonesInRegion(g.region) if err != nil { return err } - glog.V(2).Infof("ensureInternalInstanceGroupsDeleted(%v): attempting delete instance group in all %d zones", name, len(zones)) + klog.V(2).Infof("ensureInternalInstanceGroupsDeleted(%v): attempting delete instance group in all %d zones", name, len(zones)) for _, z := range zones { - if err := gce.DeleteInstanceGroup(name, z.Name); err != nil && !isNotFoundOrInUse(err) { + if err := g.DeleteInstanceGroup(name, z.Name); err != nil && !isNotFoundOrInUse(err) { return err } } return nil } -func (gce *GCECloud) ensureInternalBackendService(name, description string, affinityType v1.ServiceAffinity, scheme cloud.LbScheme, protocol v1.Protocol, igLinks []string, hcLink string) error { - glog.V(2).Infof("ensureInternalBackendService(%v, %v, %v): checking existing backend service with %d groups", name, scheme, protocol, len(igLinks)) - bs, err := gce.GetRegionBackendService(name, gce.region) +func (g *Cloud) ensureInternalBackendService(name, description string, affinityType v1.ServiceAffinity, scheme cloud.LbScheme, protocol v1.Protocol, igLinks []string, hcLink string) error { + klog.V(2).Infof("ensureInternalBackendService(%v, %v, %v): checking existing backend service with %d groups", name, scheme, protocol, len(igLinks)) + bs, err := g.GetRegionBackendService(name, g.region) if err != nil && !isNotFound(err) { return err } @@ -530,12 +533,12 @@ func (gce *GCECloud) ensureInternalBackendService(name, description string, affi // Create backend service if none was found if bs == nil { - glog.V(2).Infof("ensureInternalBackendService: creating backend service %v", name) - err := gce.CreateRegionBackendService(expectedBS, gce.region) + klog.V(2).Infof("ensureInternalBackendService: creating backend service %v", name) + err := g.CreateRegionBackendService(expectedBS, g.region) if err != nil { return err } - glog.V(2).Infof("ensureInternalBackendService: created backend service %v successfully", name) + klog.V(2).Infof("ensureInternalBackendService: created backend service %v successfully", name) return nil } @@ -543,20 +546,20 @@ func (gce *GCECloud) ensureInternalBackendService(name, description string, affi return nil } - glog.V(2).Infof("ensureInternalBackendService: updating backend service %v", name) + klog.V(2).Infof("ensureInternalBackendService: updating backend service %v", name) // Set fingerprint for optimistic locking expectedBS.Fingerprint = bs.Fingerprint - if err := gce.UpdateRegionBackendService(expectedBS, gce.region); err != nil { + if err := g.UpdateRegionBackendService(expectedBS, g.region); err != nil { return err } - glog.V(2).Infof("ensureInternalBackendService: updated backend service %v successfully", name) + klog.V(2).Infof("ensureInternalBackendService: updated backend service %v successfully", name) return nil } // ensureInternalBackendServiceGroups updates backend services if their list of backend instance groups is incorrect. -func (gce *GCECloud) ensureInternalBackendServiceGroups(name string, igLinks []string) error { - glog.V(2).Infof("ensureInternalBackendServiceGroups(%v): checking existing backend service's groups", name) - bs, err := gce.GetRegionBackendService(name, gce.region) +func (g *Cloud) ensureInternalBackendServiceGroups(name string, igLinks []string) error { + klog.V(2).Infof("ensureInternalBackendServiceGroups(%v): checking existing backend service's groups", name) + bs, err := g.GetRegionBackendService(name, g.region) if err != nil { return err } @@ -569,11 +572,11 @@ func (gce *GCECloud) ensureInternalBackendServiceGroups(name string, igLinks []s // Set the backend service's backends to the updated list. bs.Backends = backends - glog.V(2).Infof("ensureInternalBackendServiceGroups: updating backend service %v", name) - if err := gce.UpdateRegionBackendService(bs, gce.region); err != nil { + klog.V(2).Infof("ensureInternalBackendServiceGroups: updating backend service %v", name) + if err := g.UpdateRegionBackendService(bs, g.region); err != nil { return err } - glog.V(2).Infof("ensureInternalBackendServiceGroups: updated backend service %v successfully", name) + klog.V(2).Infof("ensureInternalBackendServiceGroups: updated backend service %v successfully", name) return nil } @@ -620,15 +623,37 @@ func firewallRuleEqual(a, b *compute.Firewall) bool { equalStringSets(a.TargetTags, b.TargetTags) } -func healthChecksEqual(a, b *compute.HealthCheck) bool { - return a.HttpHealthCheck != nil && b.HttpHealthCheck != nil && - a.HttpHealthCheck.Port == b.HttpHealthCheck.Port && - a.HttpHealthCheck.RequestPath == b.HttpHealthCheck.RequestPath && - a.Description == b.Description && - a.CheckIntervalSec == b.CheckIntervalSec && - a.TimeoutSec == b.TimeoutSec && - a.UnhealthyThreshold == b.UnhealthyThreshold && - a.HealthyThreshold == b.HealthyThreshold +// mergeHealthChecks reconciles HealthCheck configures to be no smaller than +// the default values. +// E.g. old health check interval is 2s, new default is 8. +// The HC interval will be reconciled to 8 seconds. +// If the existing health check is larger than the default interval, +// the configuration will be kept. +func mergeHealthChecks(hc, newHC *compute.HealthCheck) *compute.HealthCheck { + if hc.CheckIntervalSec > newHC.CheckIntervalSec { + newHC.CheckIntervalSec = hc.CheckIntervalSec + } + if hc.TimeoutSec > newHC.TimeoutSec { + newHC.TimeoutSec = hc.TimeoutSec + } + if hc.UnhealthyThreshold > newHC.UnhealthyThreshold { + newHC.UnhealthyThreshold = hc.UnhealthyThreshold + } + if hc.HealthyThreshold > newHC.HealthyThreshold { + newHC.HealthyThreshold = hc.HealthyThreshold + } + return newHC +} + +// needToUpdateHealthChecks checks whether the healthcheck needs to be updated. +func needToUpdateHealthChecks(hc, newHC *compute.HealthCheck) bool { + if hc.HttpHealthCheck == nil || newHC.HttpHealthCheck == nil { + return true + } + changed := hc.HttpHealthCheck.Port != newHC.HttpHealthCheck.Port || hc.HttpHealthCheck.RequestPath != newHC.HttpHealthCheck.RequestPath || hc.Description != newHC.Description + changed = changed || hc.CheckIntervalSec < newHC.CheckIntervalSec || hc.TimeoutSec < newHC.TimeoutSec + changed = changed || hc.UnhealthyThreshold < newHC.UnhealthyThreshold || hc.HealthyThreshold < newHC.HealthyThreshold + return changed } // backendsListEqual asserts that backend lists are equal by instance group link only @@ -682,8 +707,8 @@ func getPortsAndProtocol(svcPorts []v1.ServicePort) (ports []string, protocol v1 return ports, protocol } -func (gce *GCECloud) getBackendServiceLink(name string) string { - return gce.service.BasePath + strings.Join([]string{gce.projectID, "regions", gce.region, "backendServices", name}, "/") +func (g *Cloud) getBackendServiceLink(name string) string { + return g.service.BasePath + strings.Join([]string{g.projectID, "regions", g.region, "backendServices", name}, "/") } func getNameFromLink(link string) string { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go index f82781e08d9b..b7aa998cc34e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go @@ -27,40 +27,38 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" ) -const ( - NEGIPPortNetworkEndpointType = "GCE_VM_IP_PORT" -) - func newNetworkEndpointGroupMetricContext(request string, zone string) *metricContext { return newGenericMetricContext("networkendpointgroup_", request, unusedMetricLabel, zone, computeBetaVersion) } -func (gce *GCECloud) GetNetworkEndpointGroup(name string, zone string) (*computebeta.NetworkEndpointGroup, error) { +// GetNetworkEndpointGroup returns the collection of network endpoints for the name in zone +func (g *Cloud) GetNetworkEndpointGroup(name string, zone string) (*computebeta.NetworkEndpointGroup, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newNetworkEndpointGroupMetricContext("get", zone) - v, err := gce.c.BetaNetworkEndpointGroups().Get(ctx, meta.ZonalKey(name, zone)) + v, err := g.c.BetaNetworkEndpointGroups().Get(ctx, meta.ZonalKey(name, zone)) return v, mc.Observe(err) } -func (gce *GCECloud) ListNetworkEndpointGroup(zone string) ([]*computebeta.NetworkEndpointGroup, error) { +// ListNetworkEndpointGroup returns the collection of network endpoints for the zone +func (g *Cloud) ListNetworkEndpointGroup(zone string) ([]*computebeta.NetworkEndpointGroup, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newNetworkEndpointGroupMetricContext("list", zone) - negs, err := gce.c.BetaNetworkEndpointGroups().List(ctx, zone, filter.None) + negs, err := g.c.BetaNetworkEndpointGroups().List(ctx, zone, filter.None) return negs, mc.Observe(err) } // AggregatedListNetworkEndpointGroup returns a map of zone -> endpoint group. -func (gce *GCECloud) AggregatedListNetworkEndpointGroup() (map[string][]*computebeta.NetworkEndpointGroup, error) { +func (g *Cloud) AggregatedListNetworkEndpointGroup() (map[string][]*computebeta.NetworkEndpointGroup, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newNetworkEndpointGroupMetricContext("aggregated_list", "") // TODO: filter for the region the cluster is in. - all, err := gce.c.BetaNetworkEndpointGroups().AggregatedList(ctx, filter.None) + all, err := g.c.BetaNetworkEndpointGroups().AggregatedList(ctx, filter.None) if err != nil { return nil, mc.Observe(err) } @@ -77,23 +75,26 @@ func (gce *GCECloud) AggregatedListNetworkEndpointGroup() (map[string][]*compute return ret, mc.Observe(nil) } -func (gce *GCECloud) CreateNetworkEndpointGroup(neg *computebeta.NetworkEndpointGroup, zone string) error { +// CreateNetworkEndpointGroup creates an endpoint group in the zone +func (g *Cloud) CreateNetworkEndpointGroup(neg *computebeta.NetworkEndpointGroup, zone string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newNetworkEndpointGroupMetricContext("create", zone) - return mc.Observe(gce.c.BetaNetworkEndpointGroups().Insert(ctx, meta.ZonalKey(neg.Name, zone), neg)) + return mc.Observe(g.c.BetaNetworkEndpointGroups().Insert(ctx, meta.ZonalKey(neg.Name, zone), neg)) } -func (gce *GCECloud) DeleteNetworkEndpointGroup(name string, zone string) error { +// DeleteNetworkEndpointGroup deletes the name endpoint group from the zone +func (g *Cloud) DeleteNetworkEndpointGroup(name string, zone string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newNetworkEndpointGroupMetricContext("delete", zone) - return mc.Observe(gce.c.BetaNetworkEndpointGroups().Delete(ctx, meta.ZonalKey(name, zone))) + return mc.Observe(g.c.BetaNetworkEndpointGroups().Delete(ctx, meta.ZonalKey(name, zone))) } -func (gce *GCECloud) AttachNetworkEndpoints(name, zone string, endpoints []*computebeta.NetworkEndpoint) error { +// AttachNetworkEndpoints associates the referenced endpoints with the named endpoint group in the zone +func (g *Cloud) AttachNetworkEndpoints(name, zone string, endpoints []*computebeta.NetworkEndpoint) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -101,10 +102,11 @@ func (gce *GCECloud) AttachNetworkEndpoints(name, zone string, endpoints []*comp req := &computebeta.NetworkEndpointGroupsAttachEndpointsRequest{ NetworkEndpoints: endpoints, } - return mc.Observe(gce.c.BetaNetworkEndpointGroups().AttachNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req)) + return mc.Observe(g.c.BetaNetworkEndpointGroups().AttachNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req)) } -func (gce *GCECloud) DetachNetworkEndpoints(name, zone string, endpoints []*computebeta.NetworkEndpoint) error { +// DetachNetworkEndpoints breaks the association between the referenced endpoints and the named endpoint group in the zone +func (g *Cloud) DetachNetworkEndpoints(name, zone string, endpoints []*computebeta.NetworkEndpoint) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -112,10 +114,11 @@ func (gce *GCECloud) DetachNetworkEndpoints(name, zone string, endpoints []*comp req := &computebeta.NetworkEndpointGroupsDetachEndpointsRequest{ NetworkEndpoints: endpoints, } - return mc.Observe(gce.c.BetaNetworkEndpointGroups().DetachNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req)) + return mc.Observe(g.c.BetaNetworkEndpointGroups().DetachNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req)) } -func (gce *GCECloud) ListNetworkEndpoints(name, zone string, showHealthStatus bool) ([]*computebeta.NetworkEndpointWithHealthStatus, error) { +// ListNetworkEndpoints returns all the endpoints associated with the endpoint group in zone and optionally their status. +func (g *Cloud) ListNetworkEndpoints(name, zone string, showHealthStatus bool) ([]*computebeta.NetworkEndpointWithHealthStatus, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -127,6 +130,6 @@ func (gce *GCECloud) ListNetworkEndpoints(name, zone string, showHealthStatus bo req := &computebeta.NetworkEndpointGroupsListEndpointsRequest{ HealthStatus: healthStatus, } - l, err := gce.c.BetaNetworkEndpointGroups().ListNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req, filter.None) + l, err := g.c.BetaNetworkEndpointGroups().ListNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req, filter.None) return l, mc.Observe(err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_routes.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_routes.go index 624b581cbd94..cc3cbfc53a04 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_routes.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_routes.go @@ -22,11 +22,11 @@ import ( "net/http" "path" - "github.com/golang/glog" compute "google.golang.org/api/compute/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" @@ -37,14 +37,14 @@ func newRoutesMetricContext(request string) *metricContext { } // ListRoutes in the cloud environment. -func (gce *GCECloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { +func (g *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newRoutesMetricContext("list") prefix := truncateClusterName(clusterName) - f := filter.Regexp("name", prefix+"-.*").AndRegexp("network", gce.NetworkURL()).AndRegexp("description", k8sNodeRouteTag) - routes, err := gce.c.Routes().List(ctx, f) + f := filter.Regexp("name", prefix+"-.*").AndRegexp("network", g.NetworkURL()).AndRegexp("description", k8sNodeRouteTag) + routes, err := g.c.Routes().List(ctx, f) if err != nil { return nil, mc.Observe(err) } @@ -63,13 +63,13 @@ func (gce *GCECloud) ListRoutes(ctx context.Context, clusterName string) ([]*clo } // CreateRoute in the cloud environment. -func (gce *GCECloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, route *cloudprovider.Route) error { +func (g *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, route *cloudprovider.Route) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newRoutesMetricContext("create") - targetInstance, err := gce.getInstanceByName(mapNodeNameToInstanceName(route.TargetNode)) + targetInstance, err := g.getInstanceByName(mapNodeNameToInstanceName(route.TargetNode)) if err != nil { return mc.Observe(err) } @@ -77,25 +77,25 @@ func (gce *GCECloud) CreateRoute(ctx context.Context, clusterName string, nameHi Name: truncateClusterName(clusterName) + "-" + nameHint, DestRange: route.DestinationCIDR, NextHopInstance: fmt.Sprintf("zones/%s/instances/%s", targetInstance.Zone, targetInstance.Name), - Network: gce.NetworkURL(), + Network: g.NetworkURL(), Priority: 1000, Description: k8sNodeRouteTag, } - err = gce.c.Routes().Insert(ctx, meta.GlobalKey(cr.Name), cr) + err = g.c.Routes().Insert(ctx, meta.GlobalKey(cr.Name), cr) if isHTTPErrorCode(err, http.StatusConflict) { - glog.Infof("Route %q already exists.", cr.Name) + klog.Infof("Route %q already exists.", cr.Name) err = nil } return mc.Observe(err) } // DeleteRoute from the cloud environment. -func (gce *GCECloud) DeleteRoute(ctx context.Context, clusterName string, route *cloudprovider.Route) error { +func (g *Cloud) DeleteRoute(ctx context.Context, clusterName string, route *cloudprovider.Route) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newRoutesMetricContext("delete") - return mc.Observe(gce.c.Routes().Delete(ctx, meta.GlobalKey(route.Name))) + return mc.Observe(g.c.Routes().Delete(ctx, meta.GlobalKey(route.Name))) } func truncateClusterName(clusterName string) string { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_securitypolicy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_securitypolicy.go index 293946590b3d..3ce2f71fa540 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_securitypolicy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_securitypolicy.go @@ -29,88 +29,88 @@ func newSecurityPolicyMetricContextWithVersion(request, version string) *metricC } // GetBetaSecurityPolicy retrieves a security policy. -func (gce *GCECloud) GetBetaSecurityPolicy(name string) (*computebeta.SecurityPolicy, error) { +func (g *Cloud) GetBetaSecurityPolicy(name string) (*computebeta.SecurityPolicy, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("get", computeBetaVersion) - v, err := gce.c.BetaSecurityPolicies().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.BetaSecurityPolicies().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // ListBetaSecurityPolicy lists all security policies in the project. -func (gce *GCECloud) ListBetaSecurityPolicy() ([]*computebeta.SecurityPolicy, error) { +func (g *Cloud) ListBetaSecurityPolicy() ([]*computebeta.SecurityPolicy, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("list", computeBetaVersion) - v, err := gce.c.BetaSecurityPolicies().List(ctx, filter.None) + v, err := g.c.BetaSecurityPolicies().List(ctx, filter.None) return v, mc.Observe(err) } // CreateBetaSecurityPolicy creates the given security policy. -func (gce *GCECloud) CreateBetaSecurityPolicy(sp *computebeta.SecurityPolicy) error { +func (g *Cloud) CreateBetaSecurityPolicy(sp *computebeta.SecurityPolicy) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("create", computeBetaVersion) - return mc.Observe(gce.c.BetaSecurityPolicies().Insert(ctx, meta.GlobalKey(sp.Name), sp)) + return mc.Observe(g.c.BetaSecurityPolicies().Insert(ctx, meta.GlobalKey(sp.Name), sp)) } // DeleteBetaSecurityPolicy deletes the given security policy. -func (gce *GCECloud) DeleteBetaSecurityPolicy(name string) error { +func (g *Cloud) DeleteBetaSecurityPolicy(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("delete", computeBetaVersion) - return mc.Observe(gce.c.BetaSecurityPolicies().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.BetaSecurityPolicies().Delete(ctx, meta.GlobalKey(name))) } // PatchBetaSecurityPolicy applies the given security policy as a // patch to an existing security policy. -func (gce *GCECloud) PatchBetaSecurityPolicy(sp *computebeta.SecurityPolicy) error { +func (g *Cloud) PatchBetaSecurityPolicy(sp *computebeta.SecurityPolicy) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("patch", computeBetaVersion) - return mc.Observe(gce.c.BetaSecurityPolicies().Patch(ctx, meta.GlobalKey(sp.Name), sp)) + return mc.Observe(g.c.BetaSecurityPolicies().Patch(ctx, meta.GlobalKey(sp.Name), sp)) } // GetRuleForBetaSecurityPolicy gets rule from a security policy. -func (gce *GCECloud) GetRuleForBetaSecurityPolicy(name string) (*computebeta.SecurityPolicyRule, error) { +func (g *Cloud) GetRuleForBetaSecurityPolicy(name string) (*computebeta.SecurityPolicyRule, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("get_rule", computeBetaVersion) - v, err := gce.c.BetaSecurityPolicies().GetRule(ctx, meta.GlobalKey(name)) + v, err := g.c.BetaSecurityPolicies().GetRule(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } // AddRuletoBetaSecurityPolicy adds the given security policy rule to // a security policy. -func (gce *GCECloud) AddRuletoBetaSecurityPolicy(name string, spr *computebeta.SecurityPolicyRule) error { +func (g *Cloud) AddRuletoBetaSecurityPolicy(name string, spr *computebeta.SecurityPolicyRule) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("add_rule", computeBetaVersion) - return mc.Observe(gce.c.BetaSecurityPolicies().AddRule(ctx, meta.GlobalKey(name), spr)) + return mc.Observe(g.c.BetaSecurityPolicies().AddRule(ctx, meta.GlobalKey(name), spr)) } // PatchRuleForBetaSecurityPolicy patches the given security policy // rule to a security policy. -func (gce *GCECloud) PatchRuleForBetaSecurityPolicy(name string, spr *computebeta.SecurityPolicyRule) error { +func (g *Cloud) PatchRuleForBetaSecurityPolicy(name string, spr *computebeta.SecurityPolicyRule) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("patch_rule", computeBetaVersion) - return mc.Observe(gce.c.BetaSecurityPolicies().PatchRule(ctx, meta.GlobalKey(name), spr)) + return mc.Observe(g.c.BetaSecurityPolicies().PatchRule(ctx, meta.GlobalKey(name), spr)) } // RemoveRuleFromBetaSecurityPolicy removes rule from a security policy. -func (gce *GCECloud) RemoveRuleFromBetaSecurityPolicy(name string) error { +func (g *Cloud) RemoveRuleFromBetaSecurityPolicy(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newSecurityPolicyMetricContextWithVersion("remove_rule", computeBetaVersion) - return mc.Observe(gce.c.BetaSecurityPolicies().RemoveRule(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.BetaSecurityPolicies().RemoveRule(ctx, meta.GlobalKey(name))) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetpool.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetpool.go index 8c1127e749e6..65388b44980a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetpool.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetpool.go @@ -28,35 +28,35 @@ func newTargetPoolMetricContext(request, region string) *metricContext { } // GetTargetPool returns the TargetPool by name. -func (gce *GCECloud) GetTargetPool(name, region string) (*compute.TargetPool, error) { +func (g *Cloud) GetTargetPool(name, region string) (*compute.TargetPool, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetPoolMetricContext("get", region) - v, err := gce.c.TargetPools().Get(ctx, meta.RegionalKey(name, region)) + v, err := g.c.TargetPools().Get(ctx, meta.RegionalKey(name, region)) return v, mc.Observe(err) } // CreateTargetPool creates the passed TargetPool -func (gce *GCECloud) CreateTargetPool(tp *compute.TargetPool, region string) error { +func (g *Cloud) CreateTargetPool(tp *compute.TargetPool, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetPoolMetricContext("create", region) - return mc.Observe(gce.c.TargetPools().Insert(ctx, meta.RegionalKey(tp.Name, region), tp)) + return mc.Observe(g.c.TargetPools().Insert(ctx, meta.RegionalKey(tp.Name, region), tp)) } // DeleteTargetPool deletes TargetPool by name. -func (gce *GCECloud) DeleteTargetPool(name, region string) error { +func (g *Cloud) DeleteTargetPool(name, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetPoolMetricContext("delete", region) - return mc.Observe(gce.c.TargetPools().Delete(ctx, meta.RegionalKey(name, region))) + return mc.Observe(g.c.TargetPools().Delete(ctx, meta.RegionalKey(name, region))) } // AddInstancesToTargetPool adds instances by link to the TargetPool -func (gce *GCECloud) AddInstancesToTargetPool(name, region string, instanceRefs []*compute.InstanceReference) error { +func (g *Cloud) AddInstancesToTargetPool(name, region string, instanceRefs []*compute.InstanceReference) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -64,11 +64,11 @@ func (gce *GCECloud) AddInstancesToTargetPool(name, region string, instanceRefs Instances: instanceRefs, } mc := newTargetPoolMetricContext("add_instances", region) - return mc.Observe(gce.c.TargetPools().AddInstance(ctx, meta.RegionalKey(name, region), req)) + return mc.Observe(g.c.TargetPools().AddInstance(ctx, meta.RegionalKey(name, region), req)) } // RemoveInstancesFromTargetPool removes instances by link to the TargetPool -func (gce *GCECloud) RemoveInstancesFromTargetPool(name, region string, instanceRefs []*compute.InstanceReference) error { +func (g *Cloud) RemoveInstancesFromTargetPool(name, region string, instanceRefs []*compute.InstanceReference) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -76,5 +76,5 @@ func (gce *GCECloud) RemoveInstancesFromTargetPool(name, region string, instance Instances: instanceRefs, } mc := newTargetPoolMetricContext("remove_instances", region) - return mc.Observe(gce.c.TargetPools().RemoveInstance(ctx, meta.RegionalKey(name, region), req)) + return mc.Observe(g.c.TargetPools().RemoveInstance(ctx, meta.RegionalKey(name, region), req)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetproxy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetproxy.go index c5bd21aaedf5..64a4190628ea 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetproxy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetproxy.go @@ -28,87 +28,87 @@ func newTargetProxyMetricContext(request string) *metricContext { return newGenericMetricContext("targetproxy", request, unusedMetricLabel, unusedMetricLabel, computeV1Version) } -// GetTargetHttpProxy returns the UrlMap by name. -func (gce *GCECloud) GetTargetHttpProxy(name string) (*compute.TargetHttpProxy, error) { +// GetTargetHTTPProxy returns the UrlMap by name. +func (g *Cloud) GetTargetHTTPProxy(name string) (*compute.TargetHttpProxy, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("get") - v, err := gce.c.TargetHttpProxies().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.TargetHttpProxies().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } -// CreateTargetHttpProxy creates a TargetHttpProxy -func (gce *GCECloud) CreateTargetHttpProxy(proxy *compute.TargetHttpProxy) error { +// CreateTargetHTTPProxy creates a TargetHttpProxy +func (g *Cloud) CreateTargetHTTPProxy(proxy *compute.TargetHttpProxy) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("create") - return mc.Observe(gce.c.TargetHttpProxies().Insert(ctx, meta.GlobalKey(proxy.Name), proxy)) + return mc.Observe(g.c.TargetHttpProxies().Insert(ctx, meta.GlobalKey(proxy.Name), proxy)) } -// SetUrlMapForTargetHttpProxy sets the given UrlMap for the given TargetHttpProxy. -func (gce *GCECloud) SetUrlMapForTargetHttpProxy(proxy *compute.TargetHttpProxy, urlMapLink string) error { +// SetURLMapForTargetHTTPProxy sets the given UrlMap for the given TargetHttpProxy. +func (g *Cloud) SetURLMapForTargetHTTPProxy(proxy *compute.TargetHttpProxy, urlMapLink string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() ref := &compute.UrlMapReference{UrlMap: urlMapLink} mc := newTargetProxyMetricContext("set_url_map") - return mc.Observe(gce.c.TargetHttpProxies().SetUrlMap(ctx, meta.GlobalKey(proxy.Name), ref)) + return mc.Observe(g.c.TargetHttpProxies().SetUrlMap(ctx, meta.GlobalKey(proxy.Name), ref)) } -// DeleteTargetHttpProxy deletes the TargetHttpProxy by name. -func (gce *GCECloud) DeleteTargetHttpProxy(name string) error { +// DeleteTargetHTTPProxy deletes the TargetHttpProxy by name. +func (g *Cloud) DeleteTargetHTTPProxy(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("delete") - return mc.Observe(gce.c.TargetHttpProxies().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.TargetHttpProxies().Delete(ctx, meta.GlobalKey(name))) } -// ListTargetHttpProxies lists all TargetHttpProxies in the project. -func (gce *GCECloud) ListTargetHttpProxies() ([]*compute.TargetHttpProxy, error) { +// ListTargetHTTPProxies lists all TargetHttpProxies in the project. +func (g *Cloud) ListTargetHTTPProxies() ([]*compute.TargetHttpProxy, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("list") - v, err := gce.c.TargetHttpProxies().List(ctx, filter.None) + v, err := g.c.TargetHttpProxies().List(ctx, filter.None) return v, mc.Observe(err) } // TargetHttpsProxy management -// GetTargetHttpsProxy returns the UrlMap by name. -func (gce *GCECloud) GetTargetHttpsProxy(name string) (*compute.TargetHttpsProxy, error) { +// GetTargetHTTPSProxy returns the UrlMap by name. +func (g *Cloud) GetTargetHTTPSProxy(name string) (*compute.TargetHttpsProxy, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("get") - v, err := gce.c.TargetHttpsProxies().Get(ctx, meta.GlobalKey(name)) + v, err := g.c.TargetHttpsProxies().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } -// CreateTargetHttpsProxy creates a TargetHttpsProxy -func (gce *GCECloud) CreateTargetHttpsProxy(proxy *compute.TargetHttpsProxy) error { +// CreateTargetHTTPSProxy creates a TargetHttpsProxy +func (g *Cloud) CreateTargetHTTPSProxy(proxy *compute.TargetHttpsProxy) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("create") - return mc.Observe(gce.c.TargetHttpsProxies().Insert(ctx, meta.GlobalKey(proxy.Name), proxy)) + return mc.Observe(g.c.TargetHttpsProxies().Insert(ctx, meta.GlobalKey(proxy.Name), proxy)) } -// SetUrlMapForTargetHttpsProxy sets the given UrlMap for the given TargetHttpsProxy. -func (gce *GCECloud) SetUrlMapForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, urlMapLink string) error { +// SetURLMapForTargetHTTPSProxy sets the given UrlMap for the given TargetHttpsProxy. +func (g *Cloud) SetURLMapForTargetHTTPSProxy(proxy *compute.TargetHttpsProxy, urlMapLink string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("set_url_map") ref := &compute.UrlMapReference{UrlMap: urlMapLink} - return mc.Observe(gce.c.TargetHttpsProxies().SetUrlMap(ctx, meta.GlobalKey(proxy.Name), ref)) + return mc.Observe(g.c.TargetHttpsProxies().SetUrlMap(ctx, meta.GlobalKey(proxy.Name), ref)) } -// SetSslCertificateForTargetHttpsProxy sets the given SslCertificate for the given TargetHttpsProxy. -func (gce *GCECloud) SetSslCertificateForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, sslCertURLs []string) error { +// SetSslCertificateForTargetHTTPSProxy sets the given SslCertificate for the given TargetHttpsProxy. +func (g *Cloud) SetSslCertificateForTargetHTTPSProxy(proxy *compute.TargetHttpsProxy, sslCertURLs []string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -116,24 +116,24 @@ func (gce *GCECloud) SetSslCertificateForTargetHttpsProxy(proxy *compute.TargetH req := &compute.TargetHttpsProxiesSetSslCertificatesRequest{ SslCertificates: sslCertURLs, } - return mc.Observe(gce.c.TargetHttpsProxies().SetSslCertificates(ctx, meta.GlobalKey(proxy.Name), req)) + return mc.Observe(g.c.TargetHttpsProxies().SetSslCertificates(ctx, meta.GlobalKey(proxy.Name), req)) } -// DeleteTargetHttpsProxy deletes the TargetHttpsProxy by name. -func (gce *GCECloud) DeleteTargetHttpsProxy(name string) error { +// DeleteTargetHTTPSProxy deletes the TargetHttpsProxy by name. +func (g *Cloud) DeleteTargetHTTPSProxy(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("delete") - return mc.Observe(gce.c.TargetHttpsProxies().Delete(ctx, meta.GlobalKey(name))) + return mc.Observe(g.c.TargetHttpsProxies().Delete(ctx, meta.GlobalKey(name))) } -// ListTargetHttpsProxies lists all TargetHttpsProxies in the project. -func (gce *GCECloud) ListTargetHttpsProxies() ([]*compute.TargetHttpsProxy, error) { +// ListTargetHTTPSProxies lists all TargetHttpsProxies in the project. +func (g *Cloud) ListTargetHTTPSProxies() ([]*compute.TargetHttpsProxy, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newTargetProxyMetricContext("list") - v, err := gce.c.TargetHttpsProxies().List(ctx, filter.None) + v, err := g.c.TargetHttpsProxies().List(ctx, filter.None) return v, mc.Observe(err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_tpu.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_tpu.go index 0a78f62cb3ad..b9fae1da2fd6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_tpu.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_tpu.go @@ -23,9 +23,9 @@ import ( "net/http" "time" - "github.com/golang/glog" "google.golang.org/api/googleapi" tpuapi "google.golang.org/api/tpu/v1" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/wait" ) @@ -50,20 +50,20 @@ type tpuService struct { // CreateTPU creates the Cloud TPU node with the specified name in the // specified zone. -func (gce *GCECloud) CreateTPU(ctx context.Context, name, zone string, node *tpuapi.Node) (*tpuapi.Node, error) { +func (g *Cloud) CreateTPU(ctx context.Context, name, zone string, node *tpuapi.Node) (*tpuapi.Node, error) { var err error mc := newTPUMetricContext("create", zone) defer mc.Observe(err) var op *tpuapi.Operation - parent := getTPUParentName(gce.projectID, zone) - op, err = gce.tpuService.projects.Locations.Nodes.Create(parent, node).NodeId(name).Do() + parent := getTPUParentName(g.projectID, zone) + op, err = g.tpuService.projects.Locations.Nodes.Create(parent, node).NodeId(name).Do() if err != nil { return nil, err } - glog.V(2).Infof("Creating Cloud TPU %q in zone %q with operation %q", name, zone, op.Name) + klog.V(2).Infof("Creating Cloud TPU %q in zone %q with operation %q", name, zone, op.Name) - op, err = gce.waitForTPUOp(ctx, op) + op, err = g.waitForTPUOp(ctx, op) if err != nil { return nil, err } @@ -83,20 +83,20 @@ func (gce *GCECloud) CreateTPU(ctx context.Context, name, zone string, node *tpu // DeleteTPU deletes the Cloud TPU with the specified name in the specified // zone. -func (gce *GCECloud) DeleteTPU(ctx context.Context, name, zone string) error { +func (g *Cloud) DeleteTPU(ctx context.Context, name, zone string) error { var err error mc := newTPUMetricContext("delete", zone) defer mc.Observe(err) var op *tpuapi.Operation - name = getTPUName(gce.projectID, zone, name) - op, err = gce.tpuService.projects.Locations.Nodes.Delete(name).Do() + name = getTPUName(g.projectID, zone, name) + op, err = g.tpuService.projects.Locations.Nodes.Delete(name).Do() if err != nil { return err } - glog.V(2).Infof("Deleting Cloud TPU %q in zone %q with operation %q", name, zone, op.Name) + klog.V(2).Infof("Deleting Cloud TPU %q in zone %q with operation %q", name, zone, op.Name) - op, err = gce.waitForTPUOp(ctx, op) + op, err = g.waitForTPUOp(ctx, op) if err != nil { return err } @@ -108,11 +108,11 @@ func (gce *GCECloud) DeleteTPU(ctx context.Context, name, zone string) error { } // GetTPU returns the Cloud TPU with the specified name in the specified zone. -func (gce *GCECloud) GetTPU(ctx context.Context, name, zone string) (*tpuapi.Node, error) { +func (g *Cloud) GetTPU(ctx context.Context, name, zone string) (*tpuapi.Node, error) { mc := newTPUMetricContext("get", zone) - name = getTPUName(gce.projectID, zone, name) - node, err := gce.tpuService.projects.Locations.Nodes.Get(name).Do() + name = getTPUName(g.projectID, zone, name) + node, err := g.tpuService.projects.Locations.Nodes.Get(name).Do() if err != nil { return nil, mc.Observe(err) } @@ -120,11 +120,11 @@ func (gce *GCECloud) GetTPU(ctx context.Context, name, zone string) (*tpuapi.Nod } // ListTPUs returns Cloud TPUs in the specified zone. -func (gce *GCECloud) ListTPUs(ctx context.Context, zone string) ([]*tpuapi.Node, error) { +func (g *Cloud) ListTPUs(ctx context.Context, zone string) ([]*tpuapi.Node, error) { mc := newTPUMetricContext("list", zone) - parent := getTPUParentName(gce.projectID, zone) - response, err := gce.tpuService.projects.Locations.Nodes.List(parent).Do() + parent := getTPUParentName(g.projectID, zone) + response, err := g.tpuService.projects.Locations.Nodes.List(parent).Do() if err != nil { return nil, mc.Observe(err) } @@ -132,10 +132,10 @@ func (gce *GCECloud) ListTPUs(ctx context.Context, zone string) ([]*tpuapi.Node, } // ListLocations returns the zones where Cloud TPUs are available. -func (gce *GCECloud) ListLocations(ctx context.Context) ([]*tpuapi.Location, error) { +func (g *Cloud) ListLocations(ctx context.Context) ([]*tpuapi.Location, error) { mc := newTPUMetricContext("list_locations", "") - parent := getTPUProjectURL(gce.projectID) - response, err := gce.tpuService.projects.Locations.List(parent).Do() + parent := getTPUProjectURL(g.projectID) + response, err := g.tpuService.projects.Locations.List(parent).Do() if err != nil { return nil, mc.Observe(err) } @@ -144,32 +144,32 @@ func (gce *GCECloud) ListLocations(ctx context.Context) ([]*tpuapi.Location, err // waitForTPUOp checks whether the op is done every 30 seconds before the ctx // is cancelled. -func (gce *GCECloud) waitForTPUOp(ctx context.Context, op *tpuapi.Operation) (*tpuapi.Operation, error) { +func (g *Cloud) waitForTPUOp(ctx context.Context, op *tpuapi.Operation) (*tpuapi.Operation, error) { if err := wait.PollInfinite(30*time.Second, func() (bool, error) { // Check if context has been cancelled. select { case <-ctx.Done(): - glog.V(3).Infof("Context for operation %q has been cancelled: %s", op.Name, ctx.Err()) + klog.V(3).Infof("Context for operation %q has been cancelled: %s", op.Name, ctx.Err()) return true, ctx.Err() default: } - glog.V(3).Infof("Waiting for operation %q to complete...", op.Name) + klog.V(3).Infof("Waiting for operation %q to complete...", op.Name) start := time.Now() - gce.operationPollRateLimiter.Accept() + g.operationPollRateLimiter.Accept() duration := time.Now().Sub(start) if duration > 5*time.Second { - glog.V(2).Infof("Getting operation %q throttled for %v", op.Name, duration) + klog.V(2).Infof("Getting operation %q throttled for %v", op.Name, duration) } var err error - op, err = gce.tpuService.projects.Locations.Operations.Get(op.Name).Do() + op, err = g.tpuService.projects.Locations.Operations.Get(op.Name).Do() if err != nil { return true, err } if op.Done { - glog.V(3).Infof("Operation %q has completed", op.Name) + klog.V(3).Infof("Operation %q has completed", op.Name) return true, nil } return false, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_urlmap.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_urlmap.go index b0e60093c15b..7ee8b8213271 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_urlmap.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_urlmap.go @@ -24,53 +24,53 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" ) -func newUrlMapMetricContext(request string) *metricContext { +func newURLMapMetricContext(request string) *metricContext { return newGenericMetricContext("urlmap", request, unusedMetricLabel, unusedMetricLabel, computeV1Version) } -// GetUrlMap returns the UrlMap by name. -func (gce *GCECloud) GetUrlMap(name string) (*compute.UrlMap, error) { +// GetURLMap returns the UrlMap by name. +func (g *Cloud) GetURLMap(name string) (*compute.UrlMap, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - mc := newUrlMapMetricContext("get") - v, err := gce.c.UrlMaps().Get(ctx, meta.GlobalKey(name)) + mc := newURLMapMetricContext("get") + v, err := g.c.UrlMaps().Get(ctx, meta.GlobalKey(name)) return v, mc.Observe(err) } -// CreateUrlMap creates a url map -func (gce *GCECloud) CreateUrlMap(urlMap *compute.UrlMap) error { +// CreateURLMap creates a url map +func (g *Cloud) CreateURLMap(urlMap *compute.UrlMap) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - mc := newUrlMapMetricContext("create") - return mc.Observe(gce.c.UrlMaps().Insert(ctx, meta.GlobalKey(urlMap.Name), urlMap)) + mc := newURLMapMetricContext("create") + return mc.Observe(g.c.UrlMaps().Insert(ctx, meta.GlobalKey(urlMap.Name), urlMap)) } -// UpdateUrlMap applies the given UrlMap as an update -func (gce *GCECloud) UpdateUrlMap(urlMap *compute.UrlMap) error { +// UpdateURLMap applies the given UrlMap as an update +func (g *Cloud) UpdateURLMap(urlMap *compute.UrlMap) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - mc := newUrlMapMetricContext("update") - return mc.Observe(gce.c.UrlMaps().Update(ctx, meta.GlobalKey(urlMap.Name), urlMap)) + mc := newURLMapMetricContext("update") + return mc.Observe(g.c.UrlMaps().Update(ctx, meta.GlobalKey(urlMap.Name), urlMap)) } -// DeleteUrlMap deletes a url map by name. -func (gce *GCECloud) DeleteUrlMap(name string) error { +// DeleteURLMap deletes a url map by name. +func (g *Cloud) DeleteURLMap(name string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - mc := newUrlMapMetricContext("delete") - return mc.Observe(gce.c.UrlMaps().Delete(ctx, meta.GlobalKey(name))) + mc := newURLMapMetricContext("delete") + return mc.Observe(g.c.UrlMaps().Delete(ctx, meta.GlobalKey(name))) } -// ListUrlMaps lists all UrlMaps in the project. -func (gce *GCECloud) ListUrlMaps() ([]*compute.UrlMap, error) { +// ListURLMaps lists all UrlMaps in the project. +func (g *Cloud) ListURLMaps() ([]*compute.UrlMap, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() - mc := newUrlMapMetricContext("list") - v, err := gce.c.UrlMaps().List(ctx, filter.None) + mc := newURLMapMetricContext("list") + v, err := g.c.UrlMaps().List(ctx, filter.None) return v, mc.Observe(err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_util.go index 363b5173abee..d66029a1df6c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_util.go @@ -24,17 +24,55 @@ import ( "regexp" "sort" "strings" + "sync" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock" "cloud.google.com/go/compute/metadata" compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" ) +func fakeGCECloud(vals TestClusterValues) (*Cloud, error) { + gce := NewFakeGCECloud(vals) + + gce.AlphaFeatureGate = NewAlphaFeatureGate([]string{}) + gce.nodeInformerSynced = func() bool { return true } + + mockGCE := gce.c.(*cloud.MockGCE) + mockGCE.MockTargetPools.AddInstanceHook = mock.AddInstanceHook + mockGCE.MockTargetPools.RemoveInstanceHook = mock.RemoveInstanceHook + mockGCE.MockForwardingRules.InsertHook = mock.InsertFwdRuleHook + mockGCE.MockAddresses.InsertHook = mock.InsertAddressHook + mockGCE.MockAlphaAddresses.InsertHook = mock.InsertAlphaAddressHook + mockGCE.MockAlphaAddresses.X = mock.AddressAttributes{} + mockGCE.MockAddresses.X = mock.AddressAttributes{} + + mockGCE.MockInstanceGroups.X = mock.InstanceGroupAttributes{ + InstanceMap: make(map[meta.Key]map[string]*compute.InstanceWithNamedPorts), + Lock: &sync.Mutex{}, + } + mockGCE.MockInstanceGroups.AddInstancesHook = mock.AddInstancesHook + mockGCE.MockInstanceGroups.RemoveInstancesHook = mock.RemoveInstancesHook + mockGCE.MockInstanceGroups.ListInstancesHook = mock.ListInstancesHook + + mockGCE.MockRegionBackendServices.UpdateHook = mock.UpdateRegionBackendServiceHook + mockGCE.MockHealthChecks.UpdateHook = mock.UpdateHealthCheckHook + mockGCE.MockFirewalls.UpdateHook = mock.UpdateFirewallHook + + keyGA := meta.GlobalKey("key-ga") + mockGCE.MockZones.Objects[*keyGA] = &cloud.MockZonesObj{ + Obj: &compute.Zone{Name: vals.ZoneName, Region: gce.getRegionLink(vals.Region)}, + } + + return gce, nil +} + type gceInstance struct { Zone string Name string @@ -50,7 +88,7 @@ var ( } ) -var providerIdRE = regexp.MustCompile(`^` + ProviderName + `://([^/]+)/([^/]+)/([^/]+)$`) +var providerIDRE = regexp.MustCompile(`^` + ProviderName + `://([^/]+)/([^/]+)/([^/]+)$`) func getProjectAndZone() (string, string, error) { result, err := metadata.Get("instance/zone") @@ -69,10 +107,10 @@ func getProjectAndZone() (string, string, error) { return projectID, zone, nil } -func (gce *GCECloud) raiseFirewallChangeNeededEvent(svc *v1.Service, cmd string) { +func (g *Cloud) raiseFirewallChangeNeededEvent(svc *v1.Service, cmd string) { msg := fmt.Sprintf("Firewall change required by network admin: `%v`", cmd) - if gce.eventRecorder != nil && svc != nil { - gce.eventRecorder.Event(svc, v1.EventTypeNormal, "LoadBalancerManualChange", msg) + if g.eventRecorder != nil && svc != nil { + g.eventRecorder.Event(svc, v1.EventTypeNormal, "LoadBalancerManualChange", msg) } } @@ -82,13 +120,13 @@ func FirewallToGCloudCreateCmd(fw *compute.Firewall, projectID string) string { return fmt.Sprintf("gcloud compute firewall-rules create %v --network %v %v", fw.Name, getNameFromLink(fw.Network), args) } -// FirewallToGCloudCreateCmd generates a gcloud command to update a firewall to specified params +// FirewallToGCloudUpdateCmd generates a gcloud command to update a firewall to specified params func FirewallToGCloudUpdateCmd(fw *compute.Firewall, projectID string) string { args := firewallToGcloudArgs(fw, projectID) return fmt.Sprintf("gcloud compute firewall-rules update %v %v", fw.Name, args) } -// FirewallToGCloudCreateCmd generates a gcloud command to delete a firewall to specified params +// FirewallToGCloudDeleteCmd generates a gcloud command to delete a firewall to specified params func FirewallToGCloudDeleteCmd(fwName, projectID string) string { return fmt.Sprintf("gcloud compute firewall-rules delete %v --project %v", fwName, projectID) } @@ -138,11 +176,6 @@ func mapNodeNameToInstanceName(nodeName types.NodeName) string { return string(nodeName) } -// mapInstanceToNodeName maps a GCE Instance to a k8s NodeName -func mapInstanceToNodeName(instance *compute.Instance) types.NodeName { - return types.NodeName(instance.Name) -} - // GetGCERegion returns region of the gce zone. Zone names // are of the form: ${region-name}-${ix}. // For example, "us-central1-b" has a region of "us-central1". @@ -172,7 +205,7 @@ func isInUsedByError(err error) bool { // A providerID is build out of '${ProviderName}://${project-id}/${zone}/${instance-name}' // See cloudprovider.GetInstanceProviderID. func splitProviderID(providerID string) (project, zone, instance string, err error) { - matches := providerIdRE.FindStringSubmatch(providerID) + matches := providerIDRE.FindStringSubmatch(providerID) if len(matches) != 4 { return "", "", "", errors.New("error splitting providerID") } @@ -281,3 +314,7 @@ func typeOfNetwork(network *compute.Network) netType { return netTypeCustom } + +func getLocationName(project, zoneOrRegion string) string { + return fmt.Sprintf("projects/%s/locations/%s", project, zoneOrRegion) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_zones.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_zones.go index 503ca348f484..3e4bb059bd64 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_zones.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_zones.go @@ -23,7 +23,7 @@ import ( compute "google.golang.org/api/compute/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" ) @@ -33,17 +33,17 @@ func newZonesMetricContext(request, region string) *metricContext { } // GetZone creates a cloudprovider.Zone of the current zone and region -func (gce *GCECloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { +func (g *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { return cloudprovider.Zone{ - FailureDomain: gce.localZone, - Region: gce.region, + FailureDomain: g.localZone, + Region: g.region, }, nil } // GetZoneByProviderID implements Zones.GetZoneByProviderID // This is particularly useful in external cloud providers where the kubelet // does not initialize node data. -func (gce *GCECloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) { +func (g *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) { _, zone, _, err := splitProviderID(providerID) if err != nil { return cloudprovider.Zone{}, err @@ -58,9 +58,9 @@ func (gce *GCECloud) GetZoneByProviderID(ctx context.Context, providerID string) // GetZoneByNodeName implements Zones.GetZoneByNodeName // This is particularly useful in external cloud providers where the kubelet // does not initialize node data. -func (gce *GCECloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) { +func (g *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) { instanceName := mapNodeNameToInstanceName(nodeName) - instance, err := gce.getInstanceByName(instanceName) + instance, err := g.getInstanceByName(instanceName) if err != nil { return cloudprovider.Zone{}, err } @@ -72,18 +72,18 @@ func (gce *GCECloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeN } // ListZonesInRegion returns all zones in a GCP region -func (gce *GCECloud) ListZonesInRegion(region string) ([]*compute.Zone, error) { +func (g *Cloud) ListZonesInRegion(region string) ([]*compute.Zone, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newZonesMetricContext("list", region) - list, err := gce.c.Zones().List(ctx, filter.Regexp("region", gce.getRegionLink(region))) + list, err := g.c.Zones().List(ctx, filter.Regexp("region", g.getRegionLink(region))) if err != nil { return nil, mc.Observe(err) } return list, mc.Observe(err) } -func (gce *GCECloud) getRegionLink(region string) string { - return gce.service.BasePath + strings.Join([]string{gce.projectID, "regions", region}, "/") +func (g *Cloud) getRegionLink(region string) string { + return g.service.BasePath + strings.Join([]string{g.projectID, "regions", region}, "/") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/support.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/support.go index 7861e08acbc7..e6c471855fe6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/support.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/support.go @@ -25,7 +25,7 @@ import ( // gceProjectRouter sends requests to the appropriate project ID. type gceProjectRouter struct { - gce *GCECloud + gce *Cloud } // ProjectID returns the project ID to be used for the given operation. @@ -40,7 +40,7 @@ func (r *gceProjectRouter) ProjectID(ctx context.Context, version meta.Version, // gceRateLimiter implements cloud.RateLimiter. type gceRateLimiter struct { - gce *GCECloud + gce *Cloud } // Accept blocks until the operation can be performed. @@ -63,10 +63,10 @@ func (l *gceRateLimiter) Accept(ctx context.Context, key *cloud.RateLimitKey) er return nil } -// CreateGCECloudWithCloud is a helper function to create an instance of GCECloud with the +// CreateGCECloudWithCloud is a helper function to create an instance of Cloud with the // given Cloud interface implementation. Typical usage is to use cloud.NewMockGCE to get a // handle to a mock Cloud instance and then use that for testing. -func CreateGCECloudWithCloud(config *CloudConfig, c cloud.Cloud) (*GCECloud, error) { +func CreateGCECloudWithCloud(config *CloudConfig, c cloud.Cloud) (*Cloud, error) { gceCloud, err := CreateGCECloud(config) if err == nil { gceCloud.c = c diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/token_source.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/token_source.go index 2f4bb09119ab..e8434bd2a0c7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/token_source.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/token_source.go @@ -57,6 +57,7 @@ func init() { prometheus.MustRegister(getTokenFailCounter) } +// AltTokenSource is the structure holding the data for the functionality needed to generates tokens type AltTokenSource struct { oauthClient *http.Client tokenURL string @@ -64,6 +65,7 @@ type AltTokenSource struct { throttle flowcontrol.RateLimiter } +// Token returns a token which may be used for authentication func (a *AltTokenSource) Token() (*oauth2.Token, error) { a.throttle.Accept() getTokenCounter.Inc() @@ -100,6 +102,7 @@ func (a *AltTokenSource) token() (*oauth2.Token, error) { }, nil } +// NewAltTokenSource constructs a new alternate token source for generating tokens. func NewAltTokenSource(tokenURL, tokenBody string) oauth2.TokenSource { client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource("")) a := &AltTokenSource{ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/BUILD index 039d113728f7..6e45765123f2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/BUILD @@ -22,8 +22,6 @@ go_library( deps = [ "//pkg/api/v1/service:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/cloudprovider:go_default_library", - "//pkg/controller:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", @@ -35,7 +33,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/github.com/gophercloud/gophercloud:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions:go_default_library", @@ -63,6 +61,7 @@ go_library( "//vendor/github.com/mitchellh/mapstructure:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) @@ -76,13 +75,13 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/cloudprovider:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/github.com/gophercloud/gophercloud:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata.go index 6ef8eb98d853..2ba32f59d598 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata.go @@ -27,7 +27,7 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/utils/exec" ) @@ -121,7 +121,7 @@ func getMetadataFromConfigDrive(metadataVersion string) (*Metadata, error) { } defer os.Remove(mntdir) - glog.V(4).Infof("Attempting to mount configdrive %s on %s", dev, mntdir) + klog.V(4).Infof("Attempting to mount configdrive %s on %s", dev, mntdir) mounter := mount.New("" /* default mount path */) err = mounter.Mount(dev, mntdir, "iso9660", []string{"ro"}) @@ -133,7 +133,7 @@ func getMetadataFromConfigDrive(metadataVersion string) (*Metadata, error) { } defer mounter.Unmount(mntdir) - glog.V(4).Infof("Configdrive mounted on %s", mntdir) + klog.V(4).Infof("Configdrive mounted on %s", mntdir) configDrivePath := getConfigDrivePath(metadataVersion) f, err := os.Open( @@ -149,7 +149,7 @@ func getMetadataFromConfigDrive(metadataVersion string) (*Metadata, error) { func getMetadataFromMetadataService(metadataVersion string) (*Metadata, error) { // Try to get JSON from metadata server. metadataURL := getMetadataURL(metadataVersion) - glog.V(4).Infof("Attempting to fetch metadata from %s", metadataURL) + klog.V(4).Infof("Attempting to fetch metadata from %s", metadataURL) resp, err := http.Get(metadataURL) if err != nil { return nil, fmt.Errorf("error fetching %s: %v", metadataURL, err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go index ce08a277f583..9f13db944e19 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go @@ -41,14 +41,13 @@ import ( "github.com/mitchellh/mapstructure" "gopkg.in/gcfg.v1" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" netutil "k8s.io/apimachinery/pkg/util/net" certutil "k8s.io/client-go/util/cert" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/controller" ) const ( @@ -278,7 +277,7 @@ func readInstanceID(searchOrder string) (string, error) { if err == nil { instanceID := string(idBytes) instanceID = strings.TrimSpace(instanceID) - glog.V(3).Infof("Got instance id from %s: %s", instanceIDFile, instanceID) + klog.V(3).Infof("Got instance id from %s: %s", instanceIDFile, instanceID) if instanceID != "" { return instanceID, nil } @@ -368,7 +367,8 @@ func newOpenStack(cfg Config) (*OpenStack, error) { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (os *OpenStack) Initialize(clientBuilder controller.ControllerClientBuilder) {} +func (os *OpenStack) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { +} // mapNodeNameToServerName maps a k8s NodeName to an OpenStack Server Name // This is a simple string cast. @@ -584,10 +584,10 @@ func (os *OpenStack) HasClusterID() bool { // LoadBalancer initializes a LbaasV2 object func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) { - glog.V(4).Info("openstack.LoadBalancer() called") + klog.V(4).Info("openstack.LoadBalancer() called") if reflect.DeepEqual(os.lbOpts, LoadBalancerOpts{}) { - glog.V(4).Info("LoadBalancer section is empty/not defined in cloud-config") + klog.V(4).Info("LoadBalancer section is empty/not defined in cloud-config") return nil, false } @@ -610,11 +610,11 @@ func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) { // Currently kubernetes OpenStack cloud provider just support LBaaS v2. lbVersion := os.lbOpts.LBVersion if lbVersion != "" && lbVersion != "v2" { - glog.Warningf("Config error: currently only support LBaaS v2, unrecognised lb-version \"%v\"", lbVersion) + klog.Warningf("Config error: currently only support LBaaS v2, unrecognised lb-version \"%v\"", lbVersion) return nil, false } - glog.V(1).Info("Claiming to support LoadBalancer") + klog.V(1).Info("Claiming to support LoadBalancer") return &LbaasV2{LoadBalancer{network, compute, lb, os.lbOpts}}, true } @@ -635,7 +635,7 @@ func isNotFound(err error) bool { // Zones indicates that we support zones func (os *OpenStack) Zones() (cloudprovider.Zones, bool) { - glog.V(1).Info("Claiming to support Zones") + klog.V(1).Info("Claiming to support Zones") return os, true } @@ -650,7 +650,7 @@ func (os *OpenStack) GetZone(ctx context.Context) (cloudprovider.Zone, error) { FailureDomain: md.AvailabilityZone, Region: os.region, } - glog.V(4).Infof("Current zone is %v", zone) + klog.V(4).Infof("Current zone is %v", zone) return zone, nil } @@ -677,7 +677,7 @@ func (os *OpenStack) GetZoneByProviderID(ctx context.Context, providerID string) FailureDomain: srv.Metadata[availabilityZone], Region: os.region, } - glog.V(4).Infof("The instance %s in zone %v", srv.Name, zone) + klog.V(4).Infof("The instance %s in zone %v", srv.Name, zone) return zone, nil } @@ -702,13 +702,13 @@ func (os *OpenStack) GetZoneByNodeName(ctx context.Context, nodeName types.NodeN FailureDomain: srv.Metadata[availabilityZone], Region: os.region, } - glog.V(4).Infof("The instance %s in zone %v", srv.Name, zone) + klog.V(4).Infof("The instance %s in zone %v", srv.Name, zone) return zone, nil } // Routes initializes routes support func (os *OpenStack) Routes() (cloudprovider.Routes, bool) { - glog.V(4).Info("openstack.Routes() called") + klog.V(4).Info("openstack.Routes() called") network, err := os.NewNetworkV2() if err != nil { @@ -717,12 +717,12 @@ func (os *OpenStack) Routes() (cloudprovider.Routes, bool) { netExts, err := networkExtensions(network) if err != nil { - glog.Warningf("Failed to list neutron extensions: %v", err) + klog.Warningf("Failed to list neutron extensions: %v", err) return nil, false } if !netExts["extraroute"] { - glog.V(3).Info("Neutron extraroute extension not found, required for Routes support") + klog.V(3).Info("Neutron extraroute extension not found, required for Routes support") return nil, false } @@ -733,11 +733,11 @@ func (os *OpenStack) Routes() (cloudprovider.Routes, bool) { r, err := NewRoutes(compute, network, os.routeOpts) if err != nil { - glog.Warningf("Error initialising Routes support: %v", err) + klog.Warningf("Error initialising Routes support: %v", err) return nil, false } - glog.V(1).Info("Claiming to support Routes") + klog.V(1).Info("Claiming to support Routes") return r, true } @@ -755,21 +755,21 @@ func (os *OpenStack) volumeService(forceVersion string) (volumeService, error) { if err != nil { return nil, err } - glog.V(3).Info("Using Blockstorage API V1") + klog.V(3).Info("Using Blockstorage API V1") return &VolumesV1{sClient, os.bsOpts}, nil case "v2": sClient, err := os.NewBlockStorageV2() if err != nil { return nil, err } - glog.V(3).Info("Using Blockstorage API V2") + klog.V(3).Info("Using Blockstorage API V2") return &VolumesV2{sClient, os.bsOpts}, nil case "v3": sClient, err := os.NewBlockStorageV3() if err != nil { return nil, err } - glog.V(3).Info("Using Blockstorage API V3") + klog.V(3).Info("Using Blockstorage API V3") return &VolumesV3{sClient, os.bsOpts}, nil case "auto": // Currently kubernetes support Cinder v1 / Cinder v2 / Cinder v3. @@ -777,17 +777,17 @@ func (os *OpenStack) volumeService(forceVersion string) (volumeService, error) { // If kubernetes can't initialize cinder v2 client, try to initialize cinder v1 client. // Return appropriate message when kubernetes can't initialize them. if sClient, err := os.NewBlockStorageV3(); err == nil { - glog.V(3).Info("Using Blockstorage API V3") + klog.V(3).Info("Using Blockstorage API V3") return &VolumesV3{sClient, os.bsOpts}, nil } if sClient, err := os.NewBlockStorageV2(); err == nil { - glog.V(3).Info("Using Blockstorage API V2") + klog.V(3).Info("Using Blockstorage API V2") return &VolumesV2{sClient, os.bsOpts}, nil } if sClient, err := os.NewBlockStorageV1(); err == nil { - glog.V(3).Info("Using Blockstorage API V1") + klog.V(3).Info("Using Blockstorage API V1") return &VolumesV1{sClient, os.bsOpts}, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go index c98786661a80..c52ce21998e0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go @@ -21,13 +21,13 @@ import ( "fmt" "regexp" - "github.com/golang/glog" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" ) // Instances encapsulates an implementation of Instances for OpenStack. @@ -42,15 +42,15 @@ const ( // Instances returns an implementation of Instances for OpenStack. func (os *OpenStack) Instances() (cloudprovider.Instances, bool) { - glog.V(4).Info("openstack.Instances() called") + klog.V(4).Info("openstack.Instances() called") compute, err := os.NewComputeV2() if err != nil { - glog.Errorf("unable to access compute v2 API : %v", err) + klog.Errorf("unable to access compute v2 API : %v", err) return nil, false } - glog.V(4).Info("Claiming to support Instances") + klog.V(4).Info("Claiming to support Instances") return &Instances{ compute: compute, @@ -75,14 +75,14 @@ func (i *Instances) AddSSHKeyToAllInstances(ctx context.Context, user string, ke // NodeAddresses implements Instances.NodeAddresses func (i *Instances) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) { - glog.V(4).Infof("NodeAddresses(%v) called", name) + klog.V(4).Infof("NodeAddresses(%v) called", name) addrs, err := getAddressesByName(i.compute, name) if err != nil { return nil, err } - glog.V(4).Infof("NodeAddresses(%v) => %v", name, addrs) + klog.V(4).Infof("NodeAddresses(%v) => %v", name, addrs) return addrs, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index b4f3dee54388..26e6b095a2c1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -24,7 +24,6 @@ import ( "strings" "time" - "github.com/golang/glog" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external" @@ -38,13 +37,14 @@ import ( "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" neutronports "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/pagination" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/api/v1/service" - "k8s.io/kubernetes/pkg/cloudprovider" ) // Note: when creating a new Loadbalancer (VM), it can take some time before it is ready for use, @@ -521,16 +521,16 @@ func nodeAddressForLB(node *v1.Node) (string, error) { //getStringFromServiceAnnotation searches a given v1.Service for a specific annotationKey and either returns the annotation's value or a specified defaultSetting func getStringFromServiceAnnotation(service *v1.Service, annotationKey string, defaultSetting string) string { - glog.V(4).Infof("getStringFromServiceAnnotation(%v, %v, %v)", service, annotationKey, defaultSetting) + klog.V(4).Infof("getStringFromServiceAnnotation(%v, %v, %v)", service, annotationKey, defaultSetting) if annotationValue, ok := service.Annotations[annotationKey]; ok { //if there is an annotation for this setting, set the "setting" var to it // annotationValue can be empty, it is working as designed // it makes possible for instance provisioning loadbalancer without floatingip - glog.V(4).Infof("Found a Service Annotation: %v = %v", annotationKey, annotationValue) + klog.V(4).Infof("Found a Service Annotation: %v = %v", annotationKey, annotationValue) return annotationValue } //if there is no annotation, set "settings" var to the value from cloud config - glog.V(4).Infof("Could not find a Service Annotation; falling back on cloud-config setting: %v = %v", annotationKey, defaultSetting) + klog.V(4).Infof("Could not find a Service Annotation; falling back on cloud-config setting: %v = %v", annotationKey, defaultSetting) return defaultSetting } @@ -641,7 +641,7 @@ func getFloatingNetworkIDForLB(client *gophercloud.ServiceClient) (string, error } if err == ErrMultipleResults { - glog.V(4).Infof("find multiple external networks, pick the first one when there are no explicit configuration.") + klog.V(4).Infof("find multiple external networks, pick the first one when there are no explicit configuration.") return floatingNetworkIds[0], nil } return "", err @@ -661,7 +661,7 @@ func getFloatingNetworkIDForLB(client *gophercloud.ServiceClient) (string, error // EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodes, apiService.Annotations) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodes, apiService.Annotations) if len(nodes) == 0 { return nil, fmt.Errorf("there are no available nodes for LoadBalancer service %s/%s", apiService.Namespace, apiService.Name) @@ -673,7 +673,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string // The LB needs to be configured with instance addresses on the same subnet, so get SubnetID by one node. subnetID, err := getSubnetIDForLB(lbaas.compute, *nodes[0]) if err != nil { - glog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) + klog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) return nil, fmt.Errorf("no subnet-id for service %s/%s : subnet-id not set in cloud provider config, "+ "and failed to find subnet-id from OpenStack: %v", apiService.Namespace, apiService.Name, err) } @@ -690,7 +690,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string var err error floatingPool, err = getFloatingNetworkIDForLB(lbaas.network) if err != nil { - glog.Warningf("Failed to find floating-network-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) + klog.Warningf("Failed to find floating-network-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) } } @@ -698,11 +698,11 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string internal := getStringFromServiceAnnotation(apiService, ServiceAnnotationLoadBalancerInternal, "false") switch internal { case "true": - glog.V(4).Infof("Ensure an internal loadbalancer service.") + klog.V(4).Infof("Ensure an internal loadbalancer service.") internalAnnotation = true case "false": if len(floatingPool) != 0 { - glog.V(4).Infof("Ensure an external loadbalancer service, using floatingPool: %v", floatingPool) + klog.V(4).Infof("Ensure an external loadbalancer service, using floatingPool: %v", floatingPool) internalAnnotation = false } else { return nil, fmt.Errorf("floating-network-id or loadbalancer.openstack.org/floating-network-id should be specified when ensuring an external loadbalancer service") @@ -746,14 +746,14 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string if err != ErrNotFound { return nil, fmt.Errorf("error getting loadbalancer %s: %v", name, err) } - glog.V(2).Infof("Creating loadbalancer %s", name) + klog.V(2).Infof("Creating loadbalancer %s", name) loadbalancer, err = lbaas.createLoadBalancer(apiService, name, internalAnnotation) if err != nil { // Unknown error, retry later return nil, fmt.Errorf("error creating loadbalancer %s: %v", name, err) } } else { - glog.V(2).Infof("LoadBalancer %s already exists", name) + klog.V(2).Infof("LoadBalancer %s already exists", name) } provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) @@ -773,7 +773,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string for portIndex, port := range ports { listener := getListenerForPort(oldListeners, port) if listener == nil { - glog.V(4).Infof("Creating listener for port %d", int(port.Port)) + klog.V(4).Infof("Creating listener for port %d", int(port.Port)) listener, err = listeners.Create(lbaas.lb, listeners.CreateOpts{ Name: fmt.Sprintf("listener_%s_%d", name, portIndex), Protocol: listeners.Protocol(port.Protocol), @@ -790,7 +790,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string } } - glog.V(4).Infof("Listener for %s port %d: %s", string(port.Protocol), int(port.Port), listener.ID) + klog.V(4).Infof("Listener for %s port %d: %s", string(port.Protocol), int(port.Port), listener.ID) // After all ports have been processed, remaining listeners are removed as obsolete. // Pop valid listeners. @@ -801,7 +801,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string return nil, fmt.Errorf("error getting pool for listener %s: %v", listener.ID, err) } if pool == nil { - glog.V(4).Infof("Creating pool for listener %s", listener.ID) + klog.V(4).Infof("Creating pool for listener %s", listener.ID) pool, err = v2pools.Create(lbaas.lb, v2pools.CreateOpts{ Name: fmt.Sprintf("pool_%s_%d", name, portIndex), Protocol: v2pools.Protocol(port.Protocol), @@ -820,7 +820,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string } - glog.V(4).Infof("Pool for listener %s: %s", listener.ID, pool.ID) + klog.V(4).Infof("Pool for listener %s: %s", listener.ID, pool.ID) members, err := getMembersByPoolID(lbaas.lb, pool.ID) if err != nil && !isNotFound(err) { return nil, fmt.Errorf("error getting pool members %s: %v", pool.ID, err) @@ -830,7 +830,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string if err != nil { if err == ErrNotFound { // Node failure, do not create member - glog.Warningf("Failed to create LB pool member for node %s: %v", node.Name, err) + klog.Warningf("Failed to create LB pool member for node %s: %v", node.Name, err) continue } else { return nil, fmt.Errorf("error getting address for node %s: %v", node.Name, err) @@ -838,7 +838,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string } if !memberExists(members, addr, int(port.NodePort)) { - glog.V(4).Infof("Creating member for pool %s", pool.ID) + klog.V(4).Infof("Creating member for pool %s", pool.ID) _, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{ Name: fmt.Sprintf("member_%s_%d_%s", name, portIndex, node.Name), ProtocolPort: int(port.NodePort), @@ -858,12 +858,12 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string members = popMember(members, addr, int(port.NodePort)) } - glog.V(4).Infof("Ensured pool %s has member for %s at %s", pool.ID, node.Name, addr) + klog.V(4).Infof("Ensured pool %s has member for %s at %s", pool.ID, node.Name, addr) } // Delete obsolete members for this pool for _, member := range members { - glog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) + klog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) err := v2pools.DeleteMember(lbaas.lb, pool.ID, member.ID).ExtractErr() if err != nil && !isNotFound(err) { return nil, fmt.Errorf("error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) @@ -876,7 +876,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string monitorID := pool.MonitorID if monitorID == "" && lbaas.opts.CreateMonitor { - glog.V(4).Infof("Creating monitor for pool %s", pool.ID) + klog.V(4).Infof("Creating monitor for pool %s", pool.ID) monitor, err := v2monitors.Create(lbaas.lb, v2monitors.CreateOpts{ Name: fmt.Sprintf("monitor_%s_%d", name, portIndex), PoolID: pool.ID, @@ -894,17 +894,17 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string } monitorID = monitor.ID } else if lbaas.opts.CreateMonitor == false { - glog.V(4).Infof("Do not create monitor for pool %s when create-monitor is false", pool.ID) + klog.V(4).Infof("Do not create monitor for pool %s when create-monitor is false", pool.ID) } if monitorID != "" { - glog.V(4).Infof("Monitor for pool %s: %s", pool.ID, monitorID) + klog.V(4).Infof("Monitor for pool %s: %s", pool.ID, monitorID) } } // All remaining listeners are obsolete, delete for _, listener := range oldListeners { - glog.V(4).Infof("Deleting obsolete listener %s:", listener.ID) + klog.V(4).Infof("Deleting obsolete listener %s:", listener.ID) // get pool for listener pool, err := getPoolByListenerID(lbaas.lb, loadbalancer.ID, listener.ID) if err != nil && err != ErrNotFound { @@ -914,7 +914,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string // get and delete monitor monitorID := pool.MonitorID if monitorID != "" { - glog.V(4).Infof("Deleting obsolete monitor %s for pool %s", monitorID, pool.ID) + klog.V(4).Infof("Deleting obsolete monitor %s for pool %s", monitorID, pool.ID) err = v2monitors.Delete(lbaas.lb, monitorID).ExtractErr() if err != nil && !isNotFound(err) { return nil, fmt.Errorf("error deleting obsolete monitor %s for pool %s: %v", monitorID, pool.ID, err) @@ -931,7 +931,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string } if members != nil { for _, member := range members { - glog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) + klog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) err := v2pools.DeleteMember(lbaas.lb, pool.ID, member.ID).ExtractErr() if err != nil && !isNotFound(err) { return nil, fmt.Errorf("error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) @@ -942,7 +942,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string } } } - glog.V(4).Infof("Deleting obsolete pool %s for listener %s", pool.ID, listener.ID) + klog.V(4).Infof("Deleting obsolete pool %s for listener %s", pool.ID, listener.ID) // delete pool err = v2pools.Delete(lbaas.lb, pool.ID).ExtractErr() if err != nil && !isNotFound(err) { @@ -962,7 +962,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string if err != nil { return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) } - glog.V(2).Infof("Deleted obsolete listener: %s", listener.ID) + klog.V(2).Infof("Deleted obsolete listener: %s", listener.ID) } portID := loadbalancer.VipPortID @@ -971,7 +971,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string return nil, fmt.Errorf("error getting floating ip for port %s: %v", portID, err) } if floatIP == nil && floatingPool != "" && !internalAnnotation { - glog.V(4).Infof("Creating floating ip for loadbalancer %s port %s", loadbalancer.ID, portID) + klog.V(4).Infof("Creating floating ip for loadbalancer %s port %s", loadbalancer.ID, portID) floatIPOpts := floatingips.CreateOpts{ FloatingNetworkID: floatingPool, PortID: portID, @@ -1019,7 +1019,7 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser return fmt.Errorf("failed to find node-security-group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) } } - glog.V(4).Infof("find node-security-group %v for loadbalancer service %s/%s", lbaas.opts.NodeSecurityGroupIDs, apiService.Namespace, apiService.Name) + klog.V(4).Infof("find node-security-group %v for loadbalancer service %s/%s", lbaas.opts.NodeSecurityGroupIDs, apiService.Namespace, apiService.Name) // get service ports ports := apiService.Spec.Ports @@ -1183,7 +1183,7 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser // UpdateLoadBalancer updates hosts under the specified load balancer. func (lbaas *LbaasV2) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error { loadBalancerName := lbaas.GetLoadBalancerName(ctx, clusterName, service) - glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodes) + klog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodes) lbaas.opts.SubnetID = getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerSubnetID, lbaas.opts.SubnetID) if len(lbaas.opts.SubnetID) == 0 && len(nodes) > 0 { @@ -1191,7 +1191,7 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(ctx context.Context, clusterName string // The LB needs to be configured with instance addresses on the same subnet, so get SubnetID by one node. subnetID, err := getSubnetIDForLB(lbaas.compute, *nodes[0]) if err != nil { - glog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err) + klog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err) return fmt.Errorf("no subnet-id for service %s/%s : subnet-id not set in cloud provider config, "+ "and failed to find subnet-id from OpenStack: %v", service.Namespace, service.Name, err) } @@ -1332,7 +1332,7 @@ func (lbaas *LbaasV2) updateSecurityGroup(clusterName string, apiService *v1.Ser if err != nil { return fmt.Errorf("failed to find node-security-group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) } - glog.V(4).Infof("find node-security-group %v for loadbalancer service %s/%s", lbaas.opts.NodeSecurityGroupIDs, apiService.Namespace, apiService.Name) + klog.V(4).Infof("find node-security-group %v for loadbalancer service %s/%s", lbaas.opts.NodeSecurityGroupIDs, apiService.Namespace, apiService.Name) original := sets.NewString(originalNodeSecurityGroupIDs...) current := sets.NewString(lbaas.opts.NodeSecurityGroupIDs...) @@ -1406,7 +1406,7 @@ func (lbaas *LbaasV2) updateSecurityGroup(clusterName string, apiService *v1.Ser // EnsureLoadBalancerDeleted deletes the specified load balancer func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { loadBalancerName := lbaas.GetLoadBalancerName(ctx, clusterName, service) - glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v)", clusterName, loadBalancerName) + klog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v)", clusterName, loadBalancerName) loadbalancer, err := getLoadbalancerByName(lbaas.lb, loadBalancerName) if err != nil && err != ErrNotFound { @@ -1550,7 +1550,7 @@ func (lbaas *LbaasV2) EnsureSecurityGroupDeleted(clusterName string, service *v1 // Just happen when nodes have not Security Group, or should not happen // UpdateLoadBalancer and EnsureLoadBalancer can set lbaas.opts.NodeSecurityGroupIDs when it is empty // And service controller call UpdateLoadBalancer to set lbaas.opts.NodeSecurityGroupIDs when controller manager service is restarted. - glog.Warningf("Can not find node-security-group from all the nodes of this cluster when delete loadbalancer service %s/%s", + klog.Warningf("Can not find node-security-group from all the nodes of this cluster when delete loadbalancer service %s/%s", service.Namespace, service.Name) } else { // Delete the rules in the Node Security Group diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_routes.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_routes.go index 3f41c7339d9d..0cb31a435a74 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_routes.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_routes.go @@ -26,9 +26,9 @@ import ( "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" neutronports "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" ) var errNoRouterID = errors.New("router-id not set in cloud provider config") @@ -55,7 +55,7 @@ func NewRoutes(compute *gophercloud.ServiceClient, network *gophercloud.ServiceC // ListRoutes lists all managed routes that belong to the specified clusterName func (r *Routes) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { - glog.V(4).Infof("ListRoutes(%v)", clusterName) + klog.V(4).Infof("ListRoutes(%v)", clusterName) nodeNamesByAddr := make(map[string]types.NodeName) err := foreachServer(r.compute, servers.ListOpts{}, func(srv *servers.Server) (bool, error) { @@ -109,12 +109,12 @@ func updateRoutes(network *gophercloud.ServiceClient, router *routers.Router, ne } unwinder := func() { - glog.V(4).Info("Reverting routes change to router ", router.ID) + klog.V(4).Info("Reverting routes change to router ", router.ID) _, err := routers.Update(network, router.ID, routers.UpdateOpts{ Routes: origRoutes, }).Extract() if err != nil { - glog.Warning("Unable to reset routes during error unwind: ", err) + klog.Warning("Unable to reset routes during error unwind: ", err) } } @@ -132,12 +132,12 @@ func updateAllowedAddressPairs(network *gophercloud.ServiceClient, port *neutron } unwinder := func() { - glog.V(4).Info("Reverting allowed-address-pairs change to port ", port.ID) + klog.V(4).Info("Reverting allowed-address-pairs change to port ", port.ID) _, err := neutronports.Update(network, port.ID, neutronports.UpdateOpts{ AllowedAddressPairs: &origPairs, }).Extract() if err != nil { - glog.Warning("Unable to reset allowed-address-pairs during error unwind: ", err) + klog.Warning("Unable to reset allowed-address-pairs during error unwind: ", err) } } @@ -146,7 +146,7 @@ func updateAllowedAddressPairs(network *gophercloud.ServiceClient, port *neutron // CreateRoute creates the described managed route func (r *Routes) CreateRoute(ctx context.Context, clusterName string, nameHint string, route *cloudprovider.Route) error { - glog.V(4).Infof("CreateRoute(%v, %v, %v)", clusterName, nameHint, route) + klog.V(4).Infof("CreateRoute(%v, %v, %v)", clusterName, nameHint, route) onFailure := newCaller() @@ -158,7 +158,7 @@ func (r *Routes) CreateRoute(ctx context.Context, clusterName string, nameHint s return err } - glog.V(4).Infof("Using nexthop %v for node %v", addr, route.TargetNode) + klog.V(4).Infof("Using nexthop %v for node %v", addr, route.TargetNode) router, err := routers.Get(r.network, r.opts.RouterID).Extract() if err != nil { @@ -169,7 +169,7 @@ func (r *Routes) CreateRoute(ctx context.Context, clusterName string, nameHint s for _, item := range routes { if item.DestinationCIDR == route.DestinationCIDR && item.NextHop == addr { - glog.V(4).Infof("Skipping existing route: %v", route) + klog.V(4).Infof("Skipping existing route: %v", route) return nil } } @@ -198,7 +198,7 @@ func (r *Routes) CreateRoute(ctx context.Context, clusterName string, nameHint s found := false for _, item := range port.AllowedAddressPairs { if item.IPAddress == route.DestinationCIDR { - glog.V(4).Info("Found existing allowed-address-pair: ", item) + klog.V(4).Info("Found existing allowed-address-pair: ", item) found = true break } @@ -215,14 +215,14 @@ func (r *Routes) CreateRoute(ctx context.Context, clusterName string, nameHint s defer onFailure.call(unwind) } - glog.V(4).Infof("Route created: %v", route) + klog.V(4).Infof("Route created: %v", route) onFailure.disarm() return nil } // DeleteRoute deletes the specified managed route func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *cloudprovider.Route) error { - glog.V(4).Infof("DeleteRoute(%v, %v)", clusterName, route) + klog.V(4).Infof("DeleteRoute(%v, %v)", clusterName, route) onFailure := newCaller() @@ -255,7 +255,7 @@ func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *clo } if index == -1 { - glog.V(4).Infof("Skipping non-existent route: %v", route) + klog.V(4).Infof("Skipping non-existent route: %v", route) return nil } @@ -301,7 +301,7 @@ func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *clo defer onFailure.call(unwind) } - glog.V(4).Infof("Route deleted: %v", route) + klog.V(4).Infof("Route deleted: %v", route) onFailure.disarm() return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go index b269d2469e88..68b8f92a8123 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go @@ -29,7 +29,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" k8s_volume "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -42,7 +42,7 @@ import ( "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" "github.com/prometheus/client_golang/prometheus" - "github.com/golang/glog" + "k8s.io/klog" ) type volumeService interface { @@ -334,19 +334,19 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) { if volume.AttachedServerID != "" { if instanceID == volume.AttachedServerID { - glog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID) + klog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID) return volume.ID, nil } nodeName, err := os.GetNodeNameByID(volume.AttachedServerID) attachErr := fmt.Sprintf("disk %s path %s is attached to a different instance (%s)", volumeID, volume.AttachedDevice, volume.AttachedServerID) if err != nil { - glog.Error(attachErr) + klog.Error(attachErr) return "", errors.New(attachErr) } // using volume.AttachedDevice may cause problems because cinder does not report device path correctly see issue #33128 devicePath := volume.AttachedDevice danglingErr := volumeutil.NewDanglingError(attachErr, nodeName, devicePath) - glog.V(2).Infof("Found dangling volume %s attached to node %s", volumeID, nodeName) + klog.V(2).Infof("Found dangling volume %s attached to node %s", volumeID, nodeName) return "", danglingErr } @@ -360,7 +360,7 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) { if err != nil { return "", fmt.Errorf("failed to attach %s volume to %s compute: %v", volumeID, instanceID, err) } - glog.V(2).Infof("Successfully attached %s volume to %s compute", volumeID, instanceID) + klog.V(2).Infof("Successfully attached %s volume to %s compute", volumeID, instanceID) return volume.ID, nil } @@ -372,7 +372,7 @@ func (os *OpenStack) DetachDisk(instanceID, volumeID string) error { } if volume.Status == volumeAvailableStatus { // "available" is fine since that means the volume is detached from instance already. - glog.V(2).Infof("volume: %s has been detached from compute: %s ", volume.ID, instanceID) + klog.V(2).Infof("volume: %s has been detached from compute: %s ", volume.ID, instanceID) return nil } @@ -396,7 +396,7 @@ func (os *OpenStack) DetachDisk(instanceID, volumeID string) error { if err != nil { return fmt.Errorf("failed to delete volume %s from compute %s attached %v", volume.ID, instanceID, err) } - glog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID) + klog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID) return nil } @@ -468,7 +468,7 @@ func (os *OpenStack) CreateVolume(name string, size int, vtype, availability str return "", "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("failed to create a %d GB volume: %v", size, err) } - glog.Infof("Created volume %v in Availability Zone: %v Region: %v Ignore volume AZ: %v", volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ) + klog.Infof("Created volume %v in Availability Zone: %v Region: %v Ignore volume AZ: %v", volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ) return volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ, nil } @@ -490,13 +490,13 @@ func (os *OpenStack) GetDevicePathBySerialID(volumeID string) string { for _, f := range files { for _, c := range candidateDeviceNodes { if c == f.Name() { - glog.V(4).Infof("Found disk attached as %q; full devicepath: %s\n", f.Name(), path.Join("/dev/disk/by-id/", f.Name())) + klog.V(4).Infof("Found disk attached as %q; full devicepath: %s\n", f.Name(), path.Join("/dev/disk/by-id/", f.Name())) return path.Join("/dev/disk/by-id/", f.Name()) } } } - glog.V(4).Infof("Failed to find device for the volumeID: %q by serial ID", volumeID) + klog.V(4).Infof("Failed to find device for the volumeID: %q by serial ID", volumeID) return "" } @@ -511,14 +511,14 @@ func (os *OpenStack) getDevicePathFromInstanceMetadata(volumeID string) string { newtonMetadataVersion) if err != nil { - glog.V(4).Infof( + klog.V(4).Infof( "Could not retrieve instance metadata. Error: %v", err) return "" } for _, device := range instanceMetadata.Devices { if device.Type == "disk" && device.Serial == volumeID { - glog.V(4).Infof( + klog.V(4).Infof( "Found disk metadata for volumeID %q. Bus: %q, Address: %q", volumeID, device.Bus, device.Address) @@ -527,7 +527,7 @@ func (os *OpenStack) getDevicePathFromInstanceMetadata(volumeID string) string { device.Bus, device.Address) diskPaths, err := filepath.Glob(diskPattern) if err != nil { - glog.Errorf( + klog.Errorf( "could not retrieve disk path for volumeID: %q. Error filepath.Glob(%q): %v", volumeID, diskPattern, err) return "" @@ -537,14 +537,14 @@ func (os *OpenStack) getDevicePathFromInstanceMetadata(volumeID string) string { return diskPaths[0] } - glog.Errorf( + klog.Errorf( "expecting to find one disk path for volumeID %q, found %d: %v", volumeID, len(diskPaths), diskPaths) return "" } } - glog.V(4).Infof( + klog.V(4).Infof( "Could not retrieve device metadata for volumeID: %q", volumeID) return "" } @@ -558,7 +558,7 @@ func (os *OpenStack) GetDevicePath(volumeID string) string { } if devicePath == "" { - glog.Warningf("Failed to find device for the volumeID: %q", volumeID) + klog.Warningf("Failed to find device for the volumeID: %q", volumeID) } return devicePath @@ -610,7 +610,7 @@ func (os *OpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string, // DiskIsAttached queries if a volume is attached to a compute instance func (os *OpenStack) DiskIsAttached(instanceID, volumeID string) (bool, error) { if instanceID == "" { - glog.Warningf("calling DiskIsAttached with empty instanceid: %s %s", instanceID, volumeID) + klog.Warningf("calling DiskIsAttached with empty instanceid: %s %s", instanceID, volumeID) } volume, err := os.getVolume(volumeID) if err != nil { @@ -697,6 +697,11 @@ func (os *OpenStack) ShouldTrustDevicePath() bool { // GetLabelsForVolume implements PVLabeler.GetLabelsForVolume func (os *OpenStack) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) { + // Ignore if not Cinder. + if pv.Spec.Cinder == nil { + return nil, nil + } + // Ignore any volumes that are being provisioned if pv.Spec.Cinder.VolumeID == k8s_volume.ProvisionedVolumeName { return nil, nil @@ -712,7 +717,7 @@ func (os *OpenStack) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVo labels := make(map[string]string) labels[kubeletapis.LabelZoneFailureDomain] = volume.AvailabilityZone labels[kubeletapis.LabelZoneRegion] = os.region - glog.V(4).Infof("The Volume %s has labels %v", pv.Spec.Cinder.VolumeID, labels) + klog.V(4).Infof("The Volume %s has labels %v", pv.Spec.Cinder.VolumeID, labels) return labels, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/BUILD index bcd0e508ee48..4cdbb098800d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/BUILD @@ -11,10 +11,9 @@ go_library( srcs = ["ovirt.go"], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt", deps = [ - "//pkg/cloudprovider:go_default_library", - "//pkg/controller:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", ], ) @@ -23,7 +22,7 @@ go_test( name = "go_default_test", srcs = ["ovirt_test.go"], embed = [":go_default_library"], - deps = ["//pkg/cloudprovider:go_default_library"], + deps = ["//staging/src/k8s.io/cloud-provider:go_default_library"], ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/ovirt.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/ovirt.go index 686ba8e25eea..9937919ed16c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/ovirt.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/ovirt.go @@ -33,8 +33,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/controller" + cloudprovider "k8s.io/cloud-provider" ) const ProviderName = "ovirt" @@ -118,7 +117,8 @@ func newOVirtCloud(config io.Reader) (*OVirtCloud, error) { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (v *OVirtCloud) Initialize(clientBuilder controller.ControllerClientBuilder) {} +func (v *OVirtCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { +} func (v *OVirtCloud) Clusters() (cloudprovider.Clusters, bool) { return nil, false diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/BUILD index 73394f1baff6..57a17c416f6d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/BUILD @@ -12,13 +12,12 @@ go_library( importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/photon", deps = [ "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/cloudprovider:go_default_library", - "//pkg/controller:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/github.com/vmware/photon-controller-go-sdk/photon:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -27,9 +26,9 @@ go_test( srcs = ["photon_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/cloudprovider:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/photon.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/photon.go index 9411c15f2d5a..24f8d2b8dd8d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/photon.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/photon.go @@ -34,14 +34,13 @@ import ( "os" "strings" - "github.com/golang/glog" "github.com/vmware/photon-controller-go-sdk/photon" "gopkg.in/gcfg.v1" "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/controller" ) const ( @@ -136,7 +135,7 @@ func init() { cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) { cfg, err := readConfig(config) if err != nil { - glog.Errorf("Photon Cloud Provider: failed to read in cloud provider config file. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: failed to read in cloud provider config file. Error[%v]", err) return nil, err } return newPCCloud(cfg) @@ -147,13 +146,13 @@ func init() { func getVMIDbyNodename(pc *PCCloud, nodeName string) (string, error) { photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for getVMIDbyNodename, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for getVMIDbyNodename, error: [%v]", err) return "", err } vmList, err := photonClient.Projects.GetVMs(pc.projID, nil) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to GetVMs from project %s with nodeName %s, error: [%v]", pc.projID, nodeName, err) + klog.Errorf("Photon Cloud Provider: Failed to GetVMs from project %s with nodeName %s, error: [%v]", pc.projID, nodeName, err) return "", err } @@ -170,24 +169,24 @@ func getVMIDbyNodename(pc *PCCloud, nodeName string) (string, error) { func getVMIDbyIP(pc *PCCloud, IPAddress string) (string, error) { photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for getVMIDbyNodename, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for getVMIDbyNodename, error: [%v]", err) return "", err } vmList, err := photonClient.Projects.GetVMs(pc.projID, nil) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to GetVMs for project %s. error: [%v]", pc.projID, err) + klog.Errorf("Photon Cloud Provider: Failed to GetVMs for project %s. error: [%v]", pc.projID, err) return "", err } for _, vm := range vmList.Items { task, err := photonClient.VMs.GetNetworks(vm.ID) if err != nil { - glog.Warningf("Photon Cloud Provider: GetNetworks failed for vm.ID %s, error [%v]", vm.ID, err) + klog.Warningf("Photon Cloud Provider: GetNetworks failed for vm.ID %s, error [%v]", vm.ID, err) } else { task, err = photonClient.Tasks.Wait(task.ID) if err != nil { - glog.Warningf("Photon Cloud Provider: Wait task for GetNetworks failed for vm.ID %s, error [%v]", vm.ID, err) + klog.Warningf("Photon Cloud Provider: Wait task for GetNetworks failed for vm.ID %s, error [%v]", vm.ID, err) } else { networkConnections := task.ResourceProperties.(map[string]interface{}) networks := networkConnections["networkConnections"].([]interface{}) @@ -222,25 +221,25 @@ func getPhotonClient(pc *PCCloud) (*photon.Client, error) { // work around before metadata is available file, err := os.Open("/etc/kubernetes/pc_login_info") if err != nil { - glog.Errorf("Photon Cloud Provider: Authentication is enabled but found no username/password at /etc/kubernetes/pc_login_info. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: Authentication is enabled but found no username/password at /etc/kubernetes/pc_login_info. Error[%v]", err) return nil, err } defer file.Close() scanner := bufio.NewScanner(file) if !scanner.Scan() { - glog.Errorf("Photon Cloud Provider: Empty username inside /etc/kubernetes/pc_login_info.") + klog.Error("Photon Cloud Provider: Empty username inside /etc/kubernetes/pc_login_info.") return nil, fmt.Errorf("Failed to create authentication enabled client with invalid username") } username := scanner.Text() if !scanner.Scan() { - glog.Errorf("Photon Cloud Provider: Empty password set inside /etc/kubernetes/pc_login_info.") + klog.Error("Photon Cloud Provider: Empty password set inside /etc/kubernetes/pc_login_info.") return nil, fmt.Errorf("Failed to create authentication enabled client with invalid password") } password := scanner.Text() token_options, err := pc.photonClient.Auth.GetTokensByPassword(username, password) if err != nil { - glog.Errorf("Photon Cloud Provider: failed to get tokens by password") + klog.Error("Photon Cloud Provider: failed to get tokens by password") return nil, err } @@ -255,10 +254,10 @@ func getPhotonClient(pc *PCCloud) (*photon.Client, error) { status, err := pc.photonClient.Status.Get() if err != nil { - glog.Errorf("Photon Cloud Provider: new client creation failed. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: new client creation failed. Error[%v]", err) return nil, err } - glog.V(2).Infof("Photon Cloud Provider: Status of the new photon controller client: %v", status) + klog.V(2).Infof("Photon Cloud Provider: Status of the new photon controller client: %v", status) return pc.photonClient, nil } @@ -270,7 +269,7 @@ func newPCCloud(cfg PCConfig) (*PCCloud, error) { // Get local hostname hostname, err := os.Hostname() if err != nil { - glog.Errorf("Photon Cloud Provider: get hostname failed. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: get hostname failed. Error[%v]", err) return nil, err } pc := PCCloud{ @@ -287,7 +286,8 @@ func newPCCloud(cfg PCConfig) (*PCCloud, error) { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (pc *PCCloud) Initialize(clientBuilder controller.ControllerClientBuilder) {} +func (pc *PCCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { +} // Instances returns an implementation of Instances for Photon Controller. func (pc *PCCloud) Instances() (cloudprovider.Instances, bool) { @@ -307,14 +307,14 @@ func (pc *PCCloud) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName if name == pc.localK8sHostname { ifaces, err := net.Interfaces() if err != nil { - glog.Errorf("Photon Cloud Provider: net.Interfaces() failed for NodeAddresses. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: net.Interfaces() failed for NodeAddresses. Error[%v]", err) return nodeAddrs, err } for _, i := range ifaces { addrs, err := i.Addrs() if err != nil { - glog.Warningf("Photon Cloud Provider: Failed to extract addresses for NodeAddresses. Error[%v]", err) + klog.Warningf("Photon Cloud Provider: Failed to extract addresses for NodeAddresses. Error[%v]", err) } else { for _, addr := range addrs { if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { @@ -348,20 +348,20 @@ func (pc *PCCloud) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName // This is assumed to be done by master only. vmID, err := getInstanceID(pc, name) if err != nil { - glog.Errorf("Photon Cloud Provider: getInstanceID failed for NodeAddresses. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: getInstanceID failed for NodeAddresses. Error[%v]", err) return nodeAddrs, err } photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for NodeAddresses, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for NodeAddresses, error: [%v]", err) return nodeAddrs, err } // Retrieve the Photon VM's IP addresses from the Photon Controller endpoint based on the VM ID vmList, err := photonClient.Projects.GetVMs(pc.projID, nil) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to GetVMs for project %s. Error[%v]", pc.projID, err) + klog.Errorf("Photon Cloud Provider: Failed to GetVMs for project %s. Error[%v]", pc.projID, err) return nodeAddrs, err } @@ -369,12 +369,12 @@ func (pc *PCCloud) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName if vm.ID == vmID { task, err := photonClient.VMs.GetNetworks(vm.ID) if err != nil { - glog.Errorf("Photon Cloud Provider: GetNetworks failed for node %s with vm.ID %s. Error[%v]", name, vm.ID, err) + klog.Errorf("Photon Cloud Provider: GetNetworks failed for node %s with vm.ID %s. Error[%v]", name, vm.ID, err) return nodeAddrs, err } else { task, err = photonClient.Tasks.Wait(task.ID) if err != nil { - glog.Errorf("Photon Cloud Provider: Wait task for GetNetworks failed for node %s with vm.ID %s. Error[%v]", name, vm.ID, err) + klog.Errorf("Photon Cloud Provider: Wait task for GetNetworks failed for node %s with vm.ID %s. Error[%v]", name, vm.ID, err) return nodeAddrs, err } else { networkConnections := task.ResourceProperties.(map[string]interface{}) @@ -414,7 +414,7 @@ func (pc *PCCloud) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName } } - glog.Errorf("Failed to find the node %s from Photon Controller endpoint", name) + klog.Errorf("Failed to find the node %s from Photon Controller endpoint", name) return nodeAddrs, fmt.Errorf("Failed to find the node %s from Photon Controller endpoint", name) } @@ -474,7 +474,7 @@ func (pc *PCCloud) InstanceID(ctx context.Context, nodeName k8stypes.NodeName) ( // We assume only master need to get InstanceID of a node other than itself ID, err := getInstanceID(pc, name) if err != nil { - glog.Errorf("Photon Cloud Provider: getInstanceID failed for InstanceID. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: getInstanceID failed for InstanceID. Error[%v]", err) return ID, err } else { return ID, nil @@ -544,7 +544,7 @@ func (pc *PCCloud) HasClusterID() bool { func (pc *PCCloud) AttachDisk(ctx context.Context, pdID string, nodeName k8stypes.NodeName) error { photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for AttachDisk, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for AttachDisk, error: [%v]", err) return err } @@ -554,19 +554,19 @@ func (pc *PCCloud) AttachDisk(ctx context.Context, pdID string, nodeName k8stype vmID, err := pc.InstanceID(ctx, nodeName) if err != nil { - glog.Errorf("Photon Cloud Provider: pc.InstanceID failed for AttachDisk. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: pc.InstanceID failed for AttachDisk. Error[%v]", err) return err } task, err := photonClient.VMs.AttachDisk(vmID, operation) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to attach disk with pdID %s. Error[%v]", pdID, err) + klog.Errorf("Photon Cloud Provider: Failed to attach disk with pdID %s. Error[%v]", pdID, err) return err } _, err = photonClient.Tasks.Wait(task.ID) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to wait for task to attach disk with pdID %s. Error[%v]", pdID, err) + klog.Errorf("Photon Cloud Provider: Failed to wait for task to attach disk with pdID %s. Error[%v]", pdID, err) return err } @@ -577,7 +577,7 @@ func (pc *PCCloud) AttachDisk(ctx context.Context, pdID string, nodeName k8stype func (pc *PCCloud) DetachDisk(ctx context.Context, pdID string, nodeName k8stypes.NodeName) error { photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for DetachDisk, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for DetachDisk, error: [%v]", err) return err } @@ -587,19 +587,19 @@ func (pc *PCCloud) DetachDisk(ctx context.Context, pdID string, nodeName k8stype vmID, err := pc.InstanceID(ctx, nodeName) if err != nil { - glog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DetachDisk. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DetachDisk. Error[%v]", err) return err } task, err := photonClient.VMs.DetachDisk(vmID, operation) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to detach disk with pdID %s. Error[%v]", pdID, err) + klog.Errorf("Photon Cloud Provider: Failed to detach disk with pdID %s. Error[%v]", pdID, err) return err } _, err = photonClient.Tasks.Wait(task.ID) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to wait for task to detach disk with pdID %s. Error[%v]", pdID, err) + klog.Errorf("Photon Cloud Provider: Failed to wait for task to detach disk with pdID %s. Error[%v]", pdID, err) return err } @@ -610,23 +610,23 @@ func (pc *PCCloud) DetachDisk(ctx context.Context, pdID string, nodeName k8stype func (pc *PCCloud) DiskIsAttached(ctx context.Context, pdID string, nodeName k8stypes.NodeName) (bool, error) { photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for DiskIsAttached, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for DiskIsAttached, error: [%v]", err) return false, err } disk, err := photonClient.Disks.Get(pdID) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to Get disk with pdID %s. Error[%v]", pdID, err) + klog.Errorf("Photon Cloud Provider: Failed to Get disk with pdID %s. Error[%v]", pdID, err) return false, err } vmID, err := pc.InstanceID(ctx, nodeName) if err == cloudprovider.InstanceNotFound { - glog.Infof("Instance %q does not exist, disk %s will be detached automatically.", nodeName, pdID) + klog.Infof("Instance %q does not exist, disk %s will be detached automatically.", nodeName, pdID) return false, nil } if err != nil { - glog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DiskIsAttached. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DiskIsAttached. Error[%v]", err) return false, err } @@ -644,7 +644,7 @@ func (pc *PCCloud) DisksAreAttached(ctx context.Context, pdIDs []string, nodeNam attached := make(map[string]bool) photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for DisksAreAttached, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for DisksAreAttached, error: [%v]", err) return attached, err } @@ -654,19 +654,19 @@ func (pc *PCCloud) DisksAreAttached(ctx context.Context, pdIDs []string, nodeNam vmID, err := pc.InstanceID(ctx, nodeName) if err == cloudprovider.InstanceNotFound { - glog.Infof("Instance %q does not exist, its disks will be detached automatically.", nodeName) + klog.Infof("Instance %q does not exist, its disks will be detached automatically.", nodeName) // make all the disks as detached. return attached, nil } if err != nil { - glog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DiskIsAttached. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DiskIsAttached. Error[%v]", err) return attached, err } for _, pdID := range pdIDs { disk, err := photonClient.Disks.Get(pdID) if err != nil { - glog.Warningf("Photon Cloud Provider: failed to get VMs for persistent disk %s, err [%v]", pdID, err) + klog.Warningf("Photon Cloud Provider: failed to get VMs for persistent disk %s, err [%v]", pdID, err) } else { for _, vm := range disk.VMs { if vm == vmID { @@ -683,7 +683,7 @@ func (pc *PCCloud) DisksAreAttached(ctx context.Context, pdIDs []string, nodeNam func (pc *PCCloud) CreateDisk(volumeOptions *VolumeOptions) (pdID string, err error) { photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for CreateDisk, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for CreateDisk, error: [%v]", err) return "", err } @@ -695,13 +695,13 @@ func (pc *PCCloud) CreateDisk(volumeOptions *VolumeOptions) (pdID string, err er task, err := photonClient.Projects.CreateDisk(pc.projID, &diskSpec) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to CreateDisk. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to CreateDisk. Error[%v]", err) return "", err } waitTask, err := photonClient.Tasks.Wait(task.ID) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to wait for task to CreateDisk. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to wait for task to CreateDisk. Error[%v]", err) return "", err } @@ -712,19 +712,19 @@ func (pc *PCCloud) CreateDisk(volumeOptions *VolumeOptions) (pdID string, err er func (pc *PCCloud) DeleteDisk(pdID string) error { photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for DeleteDisk, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for DeleteDisk, error: [%v]", err) return err } task, err := photonClient.Disks.Delete(pdID) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to DeleteDisk. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to DeleteDisk. Error[%v]", err) return err } _, err = photonClient.Tasks.Wait(task.ID) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to wait for task to DeleteDisk. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to wait for task to DeleteDisk. Error[%v]", err) return err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/BUILD index 4f1e053f0abb..6e3034f86d0c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/BUILD @@ -17,23 +17,22 @@ go_library( importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere", deps = [ "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", "//pkg/cloudprovider/providers/vsphere/vclib/diskmanagers:go_default_library", - "//pkg/controller:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/github.com/vmware/govmomi/vapi/rest:go_default_library", "//vendor/github.com/vmware/govmomi/vapi/tags:go_default_library", "//vendor/github.com/vmware/govmomi/vim25:go_default_library", "//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -45,7 +44,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", "//pkg/cloudprovider/providers/vsphere/vclib/fixtures:go_default_library", "//pkg/controller:go_default_library", @@ -56,6 +54,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/github.com/vmware/govmomi/lookup/simulator:go_default_library", "//vendor/github.com/vmware/govmomi/property:go_default_library", "//vendor/github.com/vmware/govmomi/simulator:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/OWNERS index 376ca6b4ae4e..001c17509c56 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/OWNERS @@ -3,7 +3,6 @@ approvers: - baludontu - divyenpatel - imkin -- kerneltime -- luomiao - frapposelli - dougm +- SandeepPissay diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/credentialmanager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/credentialmanager.go index 95862a8a5aa2..a3b651495f61 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/credentialmanager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/credentialmanager.go @@ -19,10 +19,10 @@ package vsphere import ( "errors" "fmt" - "github.com/golang/glog" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/listers/core/v1" + "k8s.io/klog" "net/http" "strings" "sync" @@ -71,12 +71,12 @@ func (secretCredentialManager *SecretCredentialManager) GetCredential(server str return nil, err } // Handle secrets deletion by finding credentials from cache - glog.Warningf("secret %q not found in namespace %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace) + klog.Warningf("secret %q not found in namespace %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace) } credential, found := secretCredentialManager.Cache.GetCredential(server) if !found { - glog.Errorf("credentials not found for server %q", server) + klog.Errorf("credentials not found for server %q", server) return nil, ErrCredentialsNotFound } return &credential, nil @@ -88,13 +88,13 @@ func (secretCredentialManager *SecretCredentialManager) updateCredentialsMap() e } secret, err := secretCredentialManager.SecretLister.Secrets(secretCredentialManager.SecretNamespace).Get(secretCredentialManager.SecretName) if err != nil { - glog.Errorf("Cannot get secret %s in namespace %s. error: %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace, err) + klog.Errorf("Cannot get secret %s in namespace %s. error: %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace, err) return err } cacheSecret := secretCredentialManager.Cache.GetSecret() if cacheSecret != nil && cacheSecret.GetResourceVersion() == secret.GetResourceVersion() { - glog.V(4).Infof("VCP SecretCredentialManager: Secret %q will not be updated in cache. Since, secrets have same resource version %q", secretCredentialManager.SecretName, cacheSecret.GetResourceVersion()) + klog.V(4).Infof("VCP SecretCredentialManager: Secret %q will not be updated in cache. Since, secrets have same resource version %q", secretCredentialManager.SecretName, cacheSecret.GetResourceVersion()) return nil } secretCredentialManager.Cache.UpdateSecret(secret) @@ -150,13 +150,13 @@ func parseConfig(data map[string][]byte, config map[string]*Credential) error { } config[vcServer].User = string(credentialValue) } else { - glog.Errorf("Unknown secret key %s", credentialKey) + klog.Errorf("Unknown secret key %s", credentialKey) return ErrUnknownSecretKey } } for vcServer, credential := range config { if credential.User == "" || credential.Password == "" { - glog.Errorf("Username/Password is missing for server %s", vcServer) + klog.Errorf("Username/Password is missing for server %s", vcServer) return ErrCredentialMissing } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/nodemanager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/nodemanager.go index 16c1953fc337..92f4c55d5e0a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/nodemanager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/nodemanager.go @@ -22,9 +22,9 @@ import ( "strings" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" ) @@ -81,11 +81,11 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { queueChannel = make(chan *VmSearch, QUEUE_SIZE) nodeUUID, err := GetNodeUUID(node) if err != nil { - glog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err) + klog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err) return err } - glog.V(4).Infof("Discovering node %s with uuid %s", node.ObjectMeta.Name, nodeUUID) + klog.V(4).Infof("Discovering node %s with uuid %s", node.ObjectMeta.Name, nodeUUID) vmFound := false globalErr = nil @@ -124,7 +124,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { err := nm.vcConnect(ctx, vsi) if err != nil { - glog.V(4).Info("Discovering node error vc:", err) + klog.V(4).Info("Discovering node error vc:", err) setGlobalErr(err) continue } @@ -132,7 +132,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { if vsi.cfg.Datacenters == "" { datacenterObjs, err = vclib.GetAllDatacenter(ctx, vsi.conn) if err != nil { - glog.V(4).Info("Discovering node error dc:", err) + klog.V(4).Info("Discovering node error dc:", err) setGlobalErr(err) continue } @@ -145,7 +145,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { } datacenterObj, err := vclib.GetDatacenter(ctx, vsi.conn, dc) if err != nil { - glog.V(4).Info("Discovering node error dc:", err) + klog.V(4).Info("Discovering node error dc:", err) setGlobalErr(err) continue } @@ -159,7 +159,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { break } - glog.V(4).Infof("Finding node %s in vc=%s and datacenter=%s", node.Name, vc, datacenterObj.Name()) + klog.V(4).Infof("Finding node %s in vc=%s and datacenter=%s", node.Name, vc, datacenterObj.Name()) queueChannel <- &VmSearch{ vc: vc, datacenter: datacenterObj, @@ -176,18 +176,18 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { defer cancel() vm, err := res.datacenter.GetVMByUUID(ctx, nodeUUID) if err != nil { - glog.V(4).Infof("Error while looking for vm=%+v in vc=%s and datacenter=%s: %v", + klog.V(4).Infof("Error while looking for vm=%+v in vc=%s and datacenter=%s: %v", vm, res.vc, res.datacenter.Name(), err) if err != vclib.ErrNoVMFound { setGlobalErr(err) } else { - glog.V(4).Infof("Did not find node %s in vc=%s and datacenter=%s", + klog.V(4).Infof("Did not find node %s in vc=%s and datacenter=%s", node.Name, res.vc, res.datacenter.Name()) } continue } if vm != nil { - glog.V(4).Infof("Found node %s as vm=%+v in vc=%s and datacenter=%s", + klog.V(4).Infof("Found node %s as vm=%+v in vc=%s and datacenter=%s", node.Name, vm, res.vc, res.datacenter.Name()) nodeInfo := &NodeInfo{dataCenter: res.datacenter, vm: vm, vcServer: res.vc, vmUUID: nodeUUID} @@ -210,7 +210,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { return *globalErr } - glog.V(4).Infof("Discovery Node: %q vm not found", node.Name) + klog.V(4).Infof("Discovery Node: %q vm not found", node.Name) return vclib.ErrNoVMFound } @@ -276,19 +276,19 @@ func (nm *NodeManager) GetNodeInfo(nodeName k8stypes.NodeName) (NodeInfo, error) var err error if nodeInfo == nil { // Rediscover node if no NodeInfo found. - glog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", convertToString(nodeName)) + klog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", convertToString(nodeName)) err = nm.RediscoverNode(nodeName) if err != nil { - glog.Errorf("Error %q node info for node %q not found", err, convertToString(nodeName)) + klog.Errorf("Error %q node info for node %q not found", err, convertToString(nodeName)) return NodeInfo{}, err } nodeInfo = getNodeInfo(nodeName) } else { // Renew the found NodeInfo to avoid stale vSphere connection. - glog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, convertToString(nodeName)) + klog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, convertToString(nodeName)) nodeInfo, err = nm.renewNodeInfo(nodeInfo, true) if err != nil { - glog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, convertToString(nodeName)) + klog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, convertToString(nodeName)) return NodeInfo{}, err } nm.addNodeInfo(convertToString(nodeName), nodeInfo) @@ -309,7 +309,7 @@ func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) { if err != nil { return nil, err } - glog.V(4).Infof("Updated NodeInfo %v for node %q.", nodeInfo, nodeName) + klog.V(4).Infof("Updated NodeInfo %v for node %q.", nodeInfo, nodeName) nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm, nodeInfo.vmUUID}) } return nodeDetails, nil @@ -324,7 +324,7 @@ func (nm *NodeManager) addNodeInfo(nodeName string, nodeInfo *NodeInfo) { func (nm *NodeManager) GetVSphereInstance(nodeName k8stypes.NodeName) (VSphereInstance, error) { nodeInfo, err := nm.GetNodeInfo(nodeName) if err != nil { - glog.V(4).Infof("node info for node %q not found", convertToString(nodeName)) + klog.V(4).Infof("node info for node %q not found", convertToString(nodeName)) return VSphereInstance{}, err } vsphereInstance := nm.vsphereInstanceMap[nodeInfo.vcServer] @@ -379,17 +379,16 @@ func (nm *NodeManager) vcConnect(ctx context.Context, vsphereInstance *VSphereIn credentialManager := nm.CredentialManager() if !vclib.IsInvalidCredentialsError(err) || credentialManager == nil { - glog.Errorf("Cannot connect to vCenter with err: %v", err) + klog.Errorf("Cannot connect to vCenter with err: %v", err) return err } - glog.V(4).Infof("Invalid credentials. Cannot connect to server %q. "+ - "Fetching credentials from secrets.", vsphereInstance.conn.Hostname) + klog.V(4).Infof("Invalid credentials. Cannot connect to server %q. Fetching credentials from secrets.", vsphereInstance.conn.Hostname) // Get latest credentials from SecretCredentialManager credentials, err := credentialManager.GetCredential(vsphereInstance.conn.Hostname) if err != nil { - glog.Errorf("Failed to get credentials from Secret Credential Manager with err: %v", err) + klog.Errorf("Failed to get credentials from Secret Credential Manager with err: %v", err) return err } vsphereInstance.conn.UpdateCredentials(credentials.User, credentials.Password) @@ -413,19 +412,19 @@ func (nm *NodeManager) GetNodeInfoWithNodeObject(node *v1.Node) (NodeInfo, error var err error if nodeInfo == nil { // Rediscover node if no NodeInfo found. - glog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", nodeName) + klog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", nodeName) err = nm.DiscoverNode(node) if err != nil { - glog.Errorf("Error %q node info for node %q not found", err, nodeName) + klog.Errorf("Error %q node info for node %q not found", err, nodeName) return NodeInfo{}, err } nodeInfo = getNodeInfo(nodeName) } else { // Renew the found NodeInfo to avoid stale vSphere connection. - glog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, nodeName) + klog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, nodeName) nodeInfo, err = nm.renewNodeInfo(nodeInfo, true) if err != nil { - glog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, nodeName) + klog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, nodeName) return NodeInfo{}, err } nm.addNodeInfo(nodeName, nodeInfo) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/BUILD index 934054d1ec02..a81f8385a16f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/BUILD @@ -25,7 +25,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib", deps = [ "//pkg/version:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/vmware/govmomi/find:go_default_library", "//vendor/github.com/vmware/govmomi/object:go_default_library", @@ -38,6 +37,7 @@ go_library( "//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library", "//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library", "//vendor/github.com/vmware/govmomi/vim25/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/connection.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/connection.go index be4c5e3f5384..28f2228d6567 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/connection.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/connection.go @@ -25,11 +25,11 @@ import ( neturl "net/url" "sync" - "github.com/golang/glog" "github.com/vmware/govmomi/session" "github.com/vmware/govmomi/sts" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/soap" + "k8s.io/klog" "k8s.io/kubernetes/pkg/version" ) @@ -62,7 +62,7 @@ func (connection *VSphereConnection) Connect(ctx context.Context) error { if connection.Client == nil { connection.Client, err = connection.NewClient(ctx) if err != nil { - glog.Errorf("Failed to create govmomi client. err: %+v", err) + klog.Errorf("Failed to create govmomi client. err: %+v", err) return err } return nil @@ -70,17 +70,17 @@ func (connection *VSphereConnection) Connect(ctx context.Context) error { m := session.NewManager(connection.Client) userSession, err := m.UserSession(ctx) if err != nil { - glog.Errorf("Error while obtaining user session. err: %+v", err) + klog.Errorf("Error while obtaining user session. err: %+v", err) return err } if userSession != nil { return nil } - glog.Warningf("Creating new client session since the existing session is not valid or not authenticated") + klog.Warningf("Creating new client session since the existing session is not valid or not authenticated") connection.Client, err = connection.NewClient(ctx) if err != nil { - glog.Errorf("Failed to create govmomi client. err: %+v", err) + klog.Errorf("Failed to create govmomi client. err: %+v", err) return err } return nil @@ -98,21 +98,21 @@ func (connection *VSphereConnection) login(ctx context.Context, client *vim25.Cl // decide to use LoginByToken if the username value is PEM encoded. b, _ := pem.Decode([]byte(connection.Username)) if b == nil { - glog.V(3).Infof("SessionManager.Login with username '%s'", connection.Username) + klog.V(3).Infof("SessionManager.Login with username '%s'", connection.Username) return m.Login(ctx, neturl.UserPassword(connection.Username, connection.Password)) } - glog.V(3).Infof("SessionManager.LoginByToken with certificate '%s'", connection.Username) + klog.V(3).Infof("SessionManager.LoginByToken with certificate '%s'", connection.Username) cert, err := tls.X509KeyPair([]byte(connection.Username), []byte(connection.Password)) if err != nil { - glog.Errorf("Failed to load X509 key pair. err: %+v", err) + klog.Errorf("Failed to load X509 key pair. err: %+v", err) return err } tokens, err := sts.NewClient(ctx, client) if err != nil { - glog.Errorf("Failed to create STS client. err: %+v", err) + klog.Errorf("Failed to create STS client. err: %+v", err) return err } @@ -122,7 +122,7 @@ func (connection *VSphereConnection) login(ctx context.Context, client *vim25.Cl signer, err := tokens.Issue(ctx, req) if err != nil { - glog.Errorf("Failed to issue SAML token. err: %+v", err) + klog.Errorf("Failed to issue SAML token. err: %+v", err) return err } @@ -144,15 +144,15 @@ func (connection *VSphereConnection) Logout(ctx context.Context) { hasActiveSession, err := m.SessionIsActive(ctx) if err != nil { - glog.Errorf("Logout failed: %s", err) + klog.Errorf("Logout failed: %s", err) return } if !hasActiveSession { - glog.Errorf("No active session, cannot logout") + klog.Errorf("No active session, cannot logout") return } if err := m.Logout(ctx); err != nil { - glog.Errorf("Logout failed: %s", err) + klog.Errorf("Logout failed: %s", err) } } @@ -160,7 +160,7 @@ func (connection *VSphereConnection) Logout(ctx context.Context) { func (connection *VSphereConnection) NewClient(ctx context.Context) (*vim25.Client, error) { url, err := soap.ParseURL(net.JoinHostPort(connection.Hostname, connection.Port)) if err != nil { - glog.Errorf("Failed to parse URL: %s. err: %+v", url, err) + klog.Errorf("Failed to parse URL: %s. err: %+v", url, err) return nil, err } @@ -177,7 +177,7 @@ func (connection *VSphereConnection) NewClient(ctx context.Context) (*vim25.Clie client, err := vim25.NewClient(ctx, sc) if err != nil { - glog.Errorf("Failed to create new client. err: %+v", err) + klog.Errorf("Failed to create new client. err: %+v", err) return nil, err } @@ -188,10 +188,10 @@ func (connection *VSphereConnection) NewClient(ctx context.Context) (*vim25.Clie if err != nil { return nil, err } - if glog.V(3) { + if klog.V(3) { s, err := session.NewManager(client).UserSession(ctx) if err == nil { - glog.Infof("New session ID for '%s' = %s", s.UserName, s.Key) + klog.Infof("New session ID for '%s' = %s", s.UserName, s.Key) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go index 9a4eddd074d9..778e0f682900 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go @@ -23,12 +23,12 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" + "k8s.io/klog" ) // Datacenter extends the govmomi Datacenter object @@ -42,7 +42,7 @@ func GetDatacenter(ctx context.Context, connection *VSphereConnection, datacente finder := find.NewFinder(connection.Client, false) datacenter, err := finder.Datacenter(ctx, datacenterPath) if err != nil { - glog.Errorf("Failed to find the datacenter: %s. err: %+v", datacenterPath, err) + klog.Errorf("Failed to find the datacenter: %s. err: %+v", datacenterPath, err) return nil, err } dc := Datacenter{datacenter} @@ -55,7 +55,7 @@ func GetAllDatacenter(ctx context.Context, connection *VSphereConnection) ([]*Da finder := find.NewFinder(connection.Client, false) datacenters, err := finder.DatacenterList(ctx, "*") if err != nil { - glog.Errorf("Failed to find the datacenter. err: %+v", err) + klog.Errorf("Failed to find the datacenter. err: %+v", err) return nil, err } for _, datacenter := range datacenters { @@ -71,11 +71,11 @@ func (dc *Datacenter) GetVMByUUID(ctx context.Context, vmUUID string) (*VirtualM vmUUID = strings.ToLower(strings.TrimSpace(vmUUID)) svm, err := s.FindByUuid(ctx, dc.Datacenter, vmUUID, true, nil) if err != nil { - glog.Errorf("Failed to find VM by UUID. VM UUID: %s, err: %+v", vmUUID, err) + klog.Errorf("Failed to find VM by UUID. VM UUID: %s, err: %+v", vmUUID, err) return nil, err } if svm == nil { - glog.Errorf("Unable to find VM by UUID. VM UUID: %s", vmUUID) + klog.Errorf("Unable to find VM by UUID. VM UUID: %s", vmUUID) return nil, ErrNoVMFound } virtualMachine := VirtualMachine{object.NewVirtualMachine(dc.Client(), svm.Reference()), dc} @@ -89,11 +89,11 @@ func (dc *Datacenter) GetHostByVMUUID(ctx context.Context, vmUUID string) (*type pc := property.DefaultCollector(virtualMachine.Client()) err = pc.RetrieveOne(ctx, virtualMachine.Reference(), []string{"summary.runtime.host"}, &vmMo) if err != nil { - glog.Errorf("Failed to retrive VM runtime host, err: %v", err) + klog.Errorf("Failed to retrive VM runtime host, err: %v", err) return nil, err } host := vmMo.Summary.Runtime.Host - glog.Infof("%s host is %s", virtualMachine.Reference(), host) + klog.Infof("%s host is %s", virtualMachine.Reference(), host) return host, nil } @@ -103,7 +103,7 @@ func (dc *Datacenter) GetVMByPath(ctx context.Context, vmPath string) (*VirtualM finder := getFinder(dc) vm, err := finder.VirtualMachine(ctx, vmPath) if err != nil { - glog.Errorf("Failed to find VM by Path. VM Path: %s, err: %+v", vmPath, err) + klog.Errorf("Failed to find VM by Path. VM Path: %s, err: %+v", vmPath, err) return nil, err } virtualMachine := VirtualMachine{vm, dc} @@ -116,7 +116,7 @@ func (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*Datasto finder := getFinder(dc) datastores, err := finder.DatastoreList(ctx, "*") if err != nil { - glog.Errorf("Failed to get all the datastores. err: %+v", err) + klog.Errorf("Failed to get all the datastores. err: %+v", err) return nil, err } var dsList []types.ManagedObjectReference @@ -129,7 +129,7 @@ func (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*Datasto properties := []string{DatastoreInfoProperty} err = pc.Retrieve(ctx, dsList, properties, &dsMoList) if err != nil { - glog.Errorf("Failed to get Datastore managed objects from datastore objects."+ + klog.Errorf("Failed to get Datastore managed objects from datastore objects."+ " dsObjList: %+v, properties: %+v, err: %v", dsList, properties, err) return nil, err } @@ -141,7 +141,7 @@ func (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*Datasto dc}, dsMo.Info.GetDatastoreInfo()} } - glog.V(9).Infof("dsURLInfoMap : %+v", dsURLInfoMap) + klog.V(9).Infof("dsURLInfoMap : %+v", dsURLInfoMap) return dsURLInfoMap, nil } @@ -150,7 +150,7 @@ func (dc *Datacenter) GetDatastoreByPath(ctx context.Context, vmDiskPath string) datastorePathObj := new(object.DatastorePath) isSuccess := datastorePathObj.FromString(vmDiskPath) if !isSuccess { - glog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath) + klog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath) return nil, errors.New("Failed to parse vmDiskPath") } @@ -162,7 +162,7 @@ func (dc *Datacenter) GetDatastoreByName(ctx context.Context, name string) (*Dat finder := getFinder(dc) ds, err := finder.Datastore(ctx, name) if err != nil { - glog.Errorf("Failed while searching for datastore: %s. err: %+v", name, err) + klog.Errorf("Failed while searching for datastore: %s. err: %+v", name, err) return nil, err } datastore := Datastore{ds, dc} @@ -176,7 +176,7 @@ func (dc *Datacenter) GetResourcePool(ctx context.Context, resourcePoolPath stri var err error resourcePool, err = finder.ResourcePoolOrDefault(ctx, resourcePoolPath) if err != nil { - glog.Errorf("Failed to get the ResourcePool for path '%s'. err: %+v", resourcePoolPath, err) + klog.Errorf("Failed to get the ResourcePool for path '%s'. err: %+v", resourcePoolPath, err) return nil, err } return resourcePool, nil @@ -188,7 +188,7 @@ func (dc *Datacenter) GetFolderByPath(ctx context.Context, folderPath string) (* finder := getFinder(dc) vmFolder, err := finder.Folder(ctx, folderPath) if err != nil { - glog.Errorf("Failed to get the folder reference for %s. err: %+v", folderPath, err) + klog.Errorf("Failed to get the folder reference for %s. err: %+v", folderPath, err) return nil, err } folder := Folder{vmFolder, dc} @@ -200,7 +200,7 @@ func (dc *Datacenter) GetVMMoList(ctx context.Context, vmObjList []*VirtualMachi var vmMoList []mo.VirtualMachine var vmRefs []types.ManagedObjectReference if len(vmObjList) < 1 { - glog.Errorf("VirtualMachine Object list is empty") + klog.Errorf("VirtualMachine Object list is empty") return nil, fmt.Errorf("VirtualMachine Object list is empty") } @@ -210,7 +210,7 @@ func (dc *Datacenter) GetVMMoList(ctx context.Context, vmObjList []*VirtualMachi pc := property.DefaultCollector(dc.Client()) err := pc.Retrieve(ctx, vmRefs, properties, &vmMoList) if err != nil { - glog.Errorf("Failed to get VM managed objects from VM objects. vmObjList: %+v, properties: %+v, err: %v", vmObjList, properties, err) + klog.Errorf("Failed to get VM managed objects from VM objects. vmObjList: %+v, properties: %+v, err: %v", vmObjList, properties, err) return nil, err } return vmMoList, nil @@ -226,7 +226,7 @@ func (dc *Datacenter) GetVirtualDiskPage83Data(ctx context.Context, diskPath str diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc.Datacenter) if err != nil { - glog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err) + klog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err) return "", err } diskUUID = formatVirtualDiskUUID(diskUUID) @@ -238,7 +238,7 @@ func (dc *Datacenter) GetDatastoreMoList(ctx context.Context, dsObjList []*Datas var dsMoList []mo.Datastore var dsRefs []types.ManagedObjectReference if len(dsObjList) < 1 { - glog.Errorf("Datastore Object list is empty") + klog.Errorf("Datastore Object list is empty") return nil, fmt.Errorf("Datastore Object list is empty") } @@ -248,7 +248,7 @@ func (dc *Datacenter) GetDatastoreMoList(ctx context.Context, dsObjList []*Datas pc := property.DefaultCollector(dc.Client()) err := pc.Retrieve(ctx, dsRefs, properties, &dsMoList) if err != nil { - glog.Errorf("Failed to get Datastore managed objects from datastore objects. dsObjList: %+v, properties: %+v, err: %v", dsObjList, properties, err) + klog.Errorf("Failed to get Datastore managed objects from datastore objects. dsObjList: %+v, properties: %+v, err: %v", dsObjList, properties, err) return nil, err } return dsMoList, nil @@ -266,27 +266,27 @@ func (dc *Datacenter) CheckDisksAttached(ctx context.Context, nodeVolumes map[st vm, err := dc.GetVMByPath(ctx, nodeName) if err != nil { if IsNotFound(err) { - glog.Warningf("Node %q does not exist, vSphere CP will assume disks %v are not attached to it.", nodeName, volPaths) + klog.Warningf("Node %q does not exist, vSphere CP will assume disks %v are not attached to it.", nodeName, volPaths) } continue } vmList = append(vmList, vm) } if len(vmList) == 0 { - glog.V(2).Infof("vSphere CP will assume no disks are attached to any node.") + klog.V(2).Infof("vSphere CP will assume no disks are attached to any node.") return attached, nil } vmMoList, err := dc.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name"}) if err != nil { // When there is an error fetching instance information // it is safer to return nil and let volume information not be touched. - glog.Errorf("Failed to get VM Managed object for nodes: %+v. err: +%v", vmList, err) + klog.Errorf("Failed to get VM Managed object for nodes: %+v. err: +%v", vmList, err) return nil, err } for _, vmMo := range vmMoList { if vmMo.Config == nil { - glog.Errorf("Config is not available for VM: %q", vmMo.Name) + klog.Errorf("Config is not available for VM: %q", vmMo.Name) continue } for nodeName, volPaths := range nodeVolumes { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datastore.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datastore.go index 8d21789f9ce9..a57685bc76cb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datastore.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datastore.go @@ -20,12 +20,12 @@ import ( "context" "fmt" - "github.com/golang/glog" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" + "k8s.io/klog" ) // Datastore extends the govmomi Datastore object @@ -59,7 +59,7 @@ func (ds *Datastore) CreateDirectory(ctx context.Context, directoryPath string, } return err } - glog.V(LogLevel).Infof("Created dir with path as %+q", directoryPath) + klog.V(LogLevel).Infof("Created dir with path as %+q", directoryPath) return nil } @@ -69,7 +69,7 @@ func (ds *Datastore) GetType(ctx context.Context) (string, error) { pc := property.DefaultCollector(ds.Client()) err := pc.RetrieveOne(ctx, ds.Datastore.Reference(), []string{"summary"}, &dsMo) if err != nil { - glog.Errorf("Failed to retrieve datastore summary property. err: %v", err) + klog.Errorf("Failed to retrieve datastore summary property. err: %v", err) return "", err } return dsMo.Summary.Type, nil @@ -80,7 +80,7 @@ func (ds *Datastore) GetType(ctx context.Context) (string, error) { func (ds *Datastore) IsCompatibleWithStoragePolicy(ctx context.Context, storagePolicyID string) (bool, string, error) { pbmClient, err := NewPbmClient(ctx, ds.Client()) if err != nil { - glog.Errorf("Failed to get new PbmClient Object. err: %v", err) + klog.Errorf("Failed to get new PbmClient Object. err: %v", err) return false, "", err } return pbmClient.IsDatastoreCompatible(ctx, storagePolicyID, ds) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/BUILD index 377202f4cd6b..4273c972f2c4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/BUILD @@ -15,9 +15,9 @@ go_library( importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers", deps = [ "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/vmware/govmomi/object:go_default_library", "//vendor/github.com/vmware/govmomi/vim25/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vdm.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vdm.go index a643241bdb6f..9a1c4715cf25 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vdm.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vdm.go @@ -20,9 +20,9 @@ import ( "context" "time" - "github.com/golang/glog" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" ) @@ -55,13 +55,13 @@ func (diskManager virtualDiskManager) Create(ctx context.Context, datastore *vcl task, err := vdm.CreateVirtualDisk(ctx, diskManager.diskPath, datastore.Datacenter.Datacenter, vmDiskSpec) if err != nil { vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err) - glog.Errorf("Failed to create virtual disk: %s. err: %+v", diskManager.diskPath, err) + klog.Errorf("Failed to create virtual disk: %s. err: %+v", diskManager.diskPath, err) return "", err } taskInfo, err := task.WaitForResult(ctx, nil) vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err) if err != nil { - glog.Errorf("Failed to complete virtual disk creation: %s. err: %+v", diskManager.diskPath, err) + klog.Errorf("Failed to complete virtual disk creation: %s. err: %+v", diskManager.diskPath, err) return "", err } canonicalDiskPath = taskInfo.Result.(string) @@ -77,14 +77,14 @@ func (diskManager virtualDiskManager) Delete(ctx context.Context, datacenter *vc // Delete virtual disk task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter.Datacenter) if err != nil { - glog.Errorf("Failed to delete virtual disk. err: %v", err) + klog.Errorf("Failed to delete virtual disk. err: %v", err) vclib.RecordvSphereMetric(vclib.APIDeleteVolume, requestTime, err) return err } err = task.Wait(ctx) vclib.RecordvSphereMetric(vclib.APIDeleteVolume, requestTime, err) if err != nil { - glog.Errorf("Failed to delete virtual disk. err: %v", err) + klog.Errorf("Failed to delete virtual disk. err: %v", err) return err } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/virtualdisk.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/virtualdisk.go index fe905cc79f72..6d7f7f5fe905 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/virtualdisk.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/virtualdisk.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" ) @@ -65,7 +65,7 @@ func (virtualDisk *VirtualDisk) Create(ctx context.Context, datastore *vclib.Dat virtualDisk.VolumeOptions.DiskFormat = vclib.ThinDiskType } if !virtualDisk.VolumeOptions.VerifyVolumeOptions() { - glog.Error("VolumeOptions verification failed. volumeOptions: ", virtualDisk.VolumeOptions) + klog.Error("VolumeOptions verification failed. volumeOptions: ", virtualDisk.VolumeOptions) return "", vclib.ErrInvalidVolumeOptions } if virtualDisk.VolumeOptions.StoragePolicyID != "" && virtualDisk.VolumeOptions.StoragePolicyName != "" { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go index 12fba7f70245..637ac514bfd6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go @@ -22,9 +22,9 @@ import ( "hash/fnv" "strings" - "github.com/golang/glog" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" ) @@ -43,26 +43,26 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto } pbmClient, err := vclib.NewPbmClient(ctx, datastore.Client()) if err != nil { - glog.Errorf("Error occurred while creating new pbmClient, err: %+v", err) + klog.Errorf("Error occurred while creating new pbmClient, err: %+v", err) return "", err } if vmdisk.volumeOptions.StoragePolicyID == "" && vmdisk.volumeOptions.StoragePolicyName != "" { vmdisk.volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, vmdisk.volumeOptions.StoragePolicyName) if err != nil { - glog.Errorf("Error occurred while getting Profile Id from Profile Name: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyName, err) + klog.Errorf("Error occurred while getting Profile Id from Profile Name: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyName, err) return "", err } } if vmdisk.volumeOptions.StoragePolicyID != "" { compatible, faultMessage, err := datastore.IsCompatibleWithStoragePolicy(ctx, vmdisk.volumeOptions.StoragePolicyID) if err != nil { - glog.Errorf("Error occurred while checking datastore compatibility with storage policy id: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyID, err) + klog.Errorf("Error occurred while checking datastore compatibility with storage policy id: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyID, err) return "", err } if !compatible { - glog.Errorf("Datastore: %s is not compatible with Policy: %s", datastore.Name(), vmdisk.volumeOptions.StoragePolicyName) + klog.Errorf("Datastore: %s is not compatible with Policy: %s", datastore.Name(), vmdisk.volumeOptions.StoragePolicyName) return "", fmt.Errorf("User specified datastore is not compatible with the storagePolicy: %q. Failed with faults: %+q", vmdisk.volumeOptions.StoragePolicyName, faultMessage) } } @@ -79,7 +79,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto return "", err } if dsType != vclib.VSANDatastoreType { - glog.Errorf("The specified datastore: %q is not a VSAN datastore", datastore.Name()) + klog.Errorf("The specified datastore: %q is not a VSAN datastore", datastore.Name()) return "", fmt.Errorf("The specified datastore: %q is not a VSAN datastore."+ " The policy parameters will work only with VSAN Datastore."+ " So, please specify a valid VSAN datastore in Storage class definition.", datastore.Name()) @@ -90,7 +90,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto ObjectData: vmdisk.volumeOptions.VSANStorageProfileData, } } else { - glog.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set") + klog.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set") return "", fmt.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set") } var dummyVM *vclib.VirtualMachine @@ -102,10 +102,10 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto dummyVM, err = datastore.Datacenter.GetVMByPath(ctx, vmdisk.vmOptions.VMFolder.InventoryPath+"/"+dummyVMFullName) if err != nil { // Create a dummy VM - glog.V(1).Infof("Creating Dummy VM: %q", dummyVMFullName) + klog.V(1).Infof("Creating Dummy VM: %q", dummyVMFullName) dummyVM, err = vmdisk.createDummyVM(ctx, datastore.Datacenter, dummyVMFullName) if err != nil { - glog.Errorf("Failed to create Dummy VM. err: %v", err) + klog.Errorf("Failed to create Dummy VM. err: %v", err) return "", err } } @@ -114,7 +114,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto virtualMachineConfigSpec := types.VirtualMachineConfigSpec{} disk, _, err := dummyVM.CreateDiskSpec(ctx, vmdisk.diskPath, datastore, vmdisk.volumeOptions) if err != nil { - glog.Errorf("Failed to create Disk Spec. err: %v", err) + klog.Errorf("Failed to create Disk Spec. err: %v", err) return "", err } deviceConfigSpec := &types.VirtualDeviceConfigSpec{ @@ -128,7 +128,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto fileAlreadyExist := false task, err := dummyVM.Reconfigure(ctx, virtualMachineConfigSpec) if err != nil { - glog.Errorf("Failed to reconfig. err: %v", err) + klog.Errorf("Failed to reconfig. err: %v", err) return "", err } err = task.Wait(ctx) @@ -136,9 +136,9 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto fileAlreadyExist = isAlreadyExists(vmdisk.diskPath, err) if fileAlreadyExist { //Skip error and continue to detach the disk as the disk was already created on the datastore. - glog.V(vclib.LogLevel).Infof("File: %v already exists", vmdisk.diskPath) + klog.V(vclib.LogLevel).Infof("File: %v already exists", vmdisk.diskPath) } else { - glog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err) + klog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err) return "", err } } @@ -147,16 +147,16 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto if err != nil { if vclib.DiskNotFoundErrMsg == err.Error() && fileAlreadyExist { // Skip error if disk was already detached from the dummy VM but still present on the datastore. - glog.V(vclib.LogLevel).Infof("File: %v is already detached", vmdisk.diskPath) + klog.V(vclib.LogLevel).Infof("File: %v is already detached", vmdisk.diskPath) } else { - glog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmdisk.diskPath, dummyVMFullName, err) + klog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmdisk.diskPath, dummyVMFullName, err) return "", err } } // Delete the dummy VM err = dummyVM.DeleteVM(ctx) if err != nil { - glog.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err) + klog.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err) } return vmdisk.diskPath, nil } @@ -195,13 +195,13 @@ func (vmdisk vmDiskManager) createDummyVM(ctx context.Context, datacenter *vclib task, err := vmdisk.vmOptions.VMFolder.CreateVM(ctx, virtualMachineConfigSpec, vmdisk.vmOptions.VMResourcePool, nil) if err != nil { - glog.Errorf("Failed to create VM. err: %+v", err) + klog.Errorf("Failed to create VM. err: %+v", err) return nil, err } dummyVMTaskInfo, err := task.WaitForResult(ctx, nil) if err != nil { - glog.Errorf("Error occurred while waiting for create VM task result. err: %+v", err) + klog.Errorf("Error occurred while waiting for create VM task result. err: %+v", err) return nil, err } @@ -214,11 +214,11 @@ func (vmdisk vmDiskManager) createDummyVM(ctx context.Context, datacenter *vclib func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder, dc *vclib.Datacenter) error { vmList, err := folder.GetVirtualMachines(ctx) if err != nil { - glog.V(4).Infof("Failed to get virtual machines in the kubernetes cluster: %s, err: %+v", folder.InventoryPath, err) + klog.V(4).Infof("Failed to get virtual machines in the kubernetes cluster: %s, err: %+v", folder.InventoryPath, err) return err } if vmList == nil || len(vmList) == 0 { - glog.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath) + klog.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath) return fmt.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath) } var dummyVMList []*vclib.VirtualMachine @@ -226,7 +226,7 @@ func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder, dc *vclib.Datace for _, vm := range vmList { vmName, err := vm.ObjectName(ctx) if err != nil { - glog.V(4).Infof("Unable to get name from VM with err: %+v", err) + klog.V(4).Infof("Unable to get name from VM with err: %+v", err) continue } if strings.HasPrefix(vmName, vclib.DummyVMPrefixName) { @@ -237,7 +237,7 @@ func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder, dc *vclib.Datace for _, vm := range dummyVMList { err = vm.DeleteVM(ctx) if err != nil { - glog.V(4).Infof("Unable to delete dummy VM with err: %+v", err) + klog.V(4).Infof("Unable to delete dummy VM with err: %+v", err) continue } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/folder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/folder.go index 4e66de88dbb1..1e4f8e4e8889 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/folder.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/folder.go @@ -19,8 +19,8 @@ package vclib import ( "context" - "github.com/golang/glog" "github.com/vmware/govmomi/object" + "k8s.io/klog" ) // Folder extends the govmomi Folder object @@ -33,7 +33,7 @@ type Folder struct { func (folder *Folder) GetVirtualMachines(ctx context.Context) ([]*VirtualMachine, error) { vmFolders, err := folder.Children(ctx) if err != nil { - glog.Errorf("Failed to get children from Folder: %s. err: %+v", folder.InventoryPath, err) + klog.Errorf("Failed to get children from Folder: %s. err: %+v", folder.InventoryPath, err) return nil, err } var vmObjList []*VirtualMachine diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/pbm.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/pbm.go index 0a494f7d7313..8070f2004239 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/pbm.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/pbm.go @@ -20,8 +20,8 @@ import ( "context" "fmt" - "github.com/golang/glog" "github.com/vmware/govmomi/pbm" + "k8s.io/klog" pbmtypes "github.com/vmware/govmomi/pbm/types" "github.com/vmware/govmomi/vim25" @@ -36,7 +36,7 @@ type PbmClient struct { func NewPbmClient(ctx context.Context, client *vim25.Client) (*PbmClient, error) { pbmClient, err := pbm.NewClient(ctx, client) if err != nil { - glog.Errorf("Failed to create new Pbm Client. err: %+v", err) + klog.Errorf("Failed to create new Pbm Client. err: %+v", err) return nil, err } return &PbmClient{pbmClient}, nil @@ -60,7 +60,7 @@ func (pbmClient *PbmClient) IsDatastoreCompatible(ctx context.Context, storagePo } compatibilityResult, err := pbmClient.CheckRequirements(ctx, hubs, nil, req) if err != nil { - glog.Errorf("Error occurred for CheckRequirements call. err %+v", err) + klog.Errorf("Error occurred for CheckRequirements call. err %+v", err) return false, "", err } if compatibilityResult != nil && len(compatibilityResult) > 0 { @@ -70,7 +70,7 @@ func (pbmClient *PbmClient) IsDatastoreCompatible(ctx context.Context, storagePo } dsName, err := datastore.ObjectName(ctx) if err != nil { - glog.Errorf("Failed to get datastore ObjectName") + klog.Errorf("Failed to get datastore ObjectName") return false, "", err } if compatibilityResult[0].Error[0].LocalizedMessage == "" { @@ -92,7 +92,7 @@ func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, dc *Dat ) compatibilityResult, err := pbmClient.GetPlacementCompatibilityResult(ctx, storagePolicyID, datastores) if err != nil { - glog.Errorf("Error occurred while retrieving placement compatibility result for datastores: %+v with storagePolicyID: %s. err: %+v", datastores, storagePolicyID, err) + klog.Errorf("Error occurred while retrieving placement compatibility result for datastores: %+v with storagePolicyID: %s. err: %+v", datastores, storagePolicyID, err) return nil, "", err } compatibleHubs := compatibilityResult.CompatibleDatastores() @@ -114,7 +114,7 @@ func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, dc *Dat } // Return an error if there are no compatible datastores. if len(compatibleHubs) < 1 { - glog.Errorf("No compatible datastores found that satisfy the storage policy requirements: %s", storagePolicyID) + klog.Errorf("No compatible datastores found that satisfy the storage policy requirements: %s", storagePolicyID) return nil, localizedMessagesForNotCompatibleDatastores, fmt.Errorf("No compatible datastores found that satisfy the storage policy requirements") } return compatibleDatastoreList, localizedMessagesForNotCompatibleDatastores, nil @@ -138,7 +138,7 @@ func (pbmClient *PbmClient) GetPlacementCompatibilityResult(ctx context.Context, } res, err := pbmClient.CheckRequirements(ctx, hubs, nil, req) if err != nil { - glog.Errorf("Error occurred for CheckRequirements call. err: %+v", err) + klog.Errorf("Error occurred for CheckRequirements call. err: %+v", err) return nil, err } return res, nil @@ -162,7 +162,7 @@ func getDsMorNameMap(ctx context.Context, datastores []*DatastoreInfo) map[strin if err == nil { dsMorNameMap[ds.Reference().Value] = dsObjectName } else { - glog.Errorf("Error occurred while getting datastore object name. err: %+v", err) + klog.Errorf("Error occurred while getting datastore object name. err: %+v", err) } } return dsMorNameMap diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/utils.go index 36ea8d6c6ef1..161e0ebfe726 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/utils.go @@ -22,12 +22,12 @@ import ( "regexp" "strings" - "github.com/golang/glog" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" + "k8s.io/klog" ) // IsNotFound return true if err is NotFoundError or DefaultNotFoundError @@ -140,7 +140,7 @@ func GetPathFromVMDiskPath(vmDiskPath string) string { datastorePathObj := new(object.DatastorePath) isSuccess := datastorePathObj.FromString(vmDiskPath) if !isSuccess { - glog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath) + klog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath) return "" } return datastorePathObj.Path @@ -151,7 +151,7 @@ func GetDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath datastorePathObj := new(object.DatastorePath) isSuccess := datastorePathObj.FromString(vmDiskPath) if !isSuccess { - glog.Errorf("Failed to parse volPath: %s", vmDiskPath) + klog.Errorf("Failed to parse volPath: %s", vmDiskPath) return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath) } return datastorePathObj, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go index 01654b3d1ef7..f6e28cb1036c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go @@ -22,12 +22,12 @@ import ( "strings" "time" - "github.com/golang/glog" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" + "k8s.io/klog" ) // VirtualMachine extends the govmomi VirtualMachine object @@ -52,7 +52,7 @@ func (vm *VirtualMachine) IsDiskAttached(ctx context.Context, diskPath string) ( func (vm *VirtualMachine) DeleteVM(ctx context.Context) error { destroyTask, err := vm.Destroy(ctx) if err != nil { - glog.Errorf("Failed to delete the VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to delete the VM: %q. err: %+v", vm.InventoryPath, err) return err } return destroyTask.Wait(ctx) @@ -69,7 +69,7 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol vmDiskPath = RemoveStorageClusterORFolderNameFromVDiskPath(vmDiskPath) attached, err := vm.IsDiskAttached(ctx, vmDiskPath) if err != nil { - glog.Errorf("Error occurred while checking if disk is attached on VM: %q. vmDiskPath: %q, err: %+v", vm.InventoryPath, vmDiskPath, err) + klog.Errorf("Error occurred while checking if disk is attached on VM: %q. vmDiskPath: %q, err: %+v", vm.InventoryPath, vmDiskPath, err) return "", err } // If disk is already attached, return the disk UUID @@ -81,31 +81,31 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol if volumeOptions.StoragePolicyName != "" { pbmClient, err := NewPbmClient(ctx, vm.Client()) if err != nil { - glog.Errorf("Error occurred while creating new pbmClient. err: %+v", err) + klog.Errorf("Error occurred while creating new pbmClient. err: %+v", err) return "", err } volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, volumeOptions.StoragePolicyName) if err != nil { - glog.Errorf("Failed to get Profile ID by name: %s. err: %+v", volumeOptions.StoragePolicyName, err) + klog.Errorf("Failed to get Profile ID by name: %s. err: %+v", volumeOptions.StoragePolicyName, err) return "", err } } dsObj, err := vm.Datacenter.GetDatastoreByPath(ctx, vmDiskPathCopy) if err != nil { - glog.Errorf("Failed to get datastore from vmDiskPath: %q. err: %+v", vmDiskPath, err) + klog.Errorf("Failed to get datastore from vmDiskPath: %q. err: %+v", vmDiskPath, err) return "", err } // If disk is not attached, create a disk spec for disk to be attached to the VM. disk, newSCSIController, err := vm.CreateDiskSpec(ctx, vmDiskPath, dsObj, volumeOptions) if err != nil { - glog.Errorf("Error occurred while creating disk spec. err: %+v", err) + klog.Errorf("Error occurred while creating disk spec. err: %+v", err) return "", err } vmDevices, err := vm.Device(ctx) if err != nil { - glog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err) return "", err } virtualMachineConfigSpec := types.VirtualMachineConfigSpec{} @@ -125,7 +125,7 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol task, err := vm.Reconfigure(ctx, virtualMachineConfigSpec) if err != nil { RecordvSphereMetric(APIAttachVolume, requestTime, err) - glog.Errorf("Failed to attach the disk with storagePolicy: %q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err) + klog.Errorf("Failed to attach the disk with storagePolicy: %q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err) if newSCSIController != nil { vm.deleteController(ctx, newSCSIController, vmDevices) } @@ -134,7 +134,7 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol err = task.Wait(ctx) RecordvSphereMetric(APIAttachVolume, requestTime, err) if err != nil { - glog.Errorf("Failed to attach the disk with storagePolicy: %+q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err) + klog.Errorf("Failed to attach the disk with storagePolicy: %+q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err) if newSCSIController != nil { vm.deleteController(ctx, newSCSIController, vmDevices) } @@ -144,7 +144,7 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol // Once disk is attached, get the disk UUID. diskUUID, err := vm.Datacenter.GetVirtualDiskPage83Data(ctx, vmDiskPath) if err != nil { - glog.Errorf("Error occurred while getting Disk Info from VM: %q. err: %v", vm.InventoryPath, err) + klog.Errorf("Error occurred while getting Disk Info from VM: %q. err: %v", vm.InventoryPath, err) vm.DetachDisk(ctx, vmDiskPath) if newSCSIController != nil { vm.deleteController(ctx, newSCSIController, vmDevices) @@ -159,11 +159,11 @@ func (vm *VirtualMachine) DetachDisk(ctx context.Context, vmDiskPath string) err vmDiskPath = RemoveStorageClusterORFolderNameFromVDiskPath(vmDiskPath) device, err := vm.getVirtualDeviceByPath(ctx, vmDiskPath) if err != nil { - glog.Errorf("Disk ID not found for VM: %q with diskPath: %q", vm.InventoryPath, vmDiskPath) + klog.Errorf("Disk ID not found for VM: %q with diskPath: %q", vm.InventoryPath, vmDiskPath) return err } if device == nil { - glog.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath) + klog.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath) return fmt.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath) } // Detach disk from VM @@ -171,7 +171,7 @@ func (vm *VirtualMachine) DetachDisk(ctx context.Context, vmDiskPath string) err err = vm.RemoveDevice(ctx, true, device) RecordvSphereMetric(APIDetachVolume, requestTime, err) if err != nil { - glog.Errorf("Error occurred while removing disk device for VM: %q. err: %v", vm.InventoryPath, err) + klog.Errorf("Error occurred while removing disk device for VM: %q. err: %v", vm.InventoryPath, err) return err } return nil @@ -181,7 +181,7 @@ func (vm *VirtualMachine) DetachDisk(ctx context.Context, vmDiskPath string) err func (vm *VirtualMachine) GetResourcePool(ctx context.Context) (*object.ResourcePool, error) { vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"resourcePool"}) if err != nil { - glog.Errorf("Failed to get resource pool from VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to get resource pool from VM: %q. err: %+v", vm.InventoryPath, err) return nil, err } return object.NewResourcePool(vm.Client(), vmMoList[0].ResourcePool.Reference()), nil @@ -192,7 +192,7 @@ func (vm *VirtualMachine) GetResourcePool(ctx context.Context) (*object.Resource func (vm *VirtualMachine) IsActive(ctx context.Context) (bool, error) { vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"summary"}) if err != nil { - glog.Errorf("Failed to get VM Managed object with property summary. err: +%v", err) + klog.Errorf("Failed to get VM Managed object with property summary. err: +%v", err) return false, err } if vmMoList[0].Summary.Runtime.PowerState == ActivePowerState { @@ -206,14 +206,14 @@ func (vm *VirtualMachine) IsActive(ctx context.Context) (bool, error) { func (vm *VirtualMachine) GetAllAccessibleDatastores(ctx context.Context) ([]*DatastoreInfo, error) { host, err := vm.HostSystem(ctx) if err != nil { - glog.Errorf("Failed to get host system for VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to get host system for VM: %q. err: %+v", vm.InventoryPath, err) return nil, err } var hostSystemMo mo.HostSystem s := object.NewSearchIndex(vm.Client()) err = s.Properties(ctx, host.Reference(), []string{DatastoreProperty}, &hostSystemMo) if err != nil { - glog.Errorf("Failed to retrieve datastores for host: %+v. err: %+v", host, err) + klog.Errorf("Failed to retrieve datastores for host: %+v. err: %+v", host, err) return nil, err } var dsRefList []types.ManagedObjectReference @@ -226,11 +226,11 @@ func (vm *VirtualMachine) GetAllAccessibleDatastores(ctx context.Context) ([]*Da properties := []string{DatastoreInfoProperty} err = pc.Retrieve(ctx, dsRefList, properties, &dsMoList) if err != nil { - glog.Errorf("Failed to get Datastore managed objects from datastore objects."+ + klog.Errorf("Failed to get Datastore managed objects from datastore objects."+ " dsObjList: %+v, properties: %+v, err: %v", dsRefList, properties, err) return nil, err } - glog.V(9).Infof("Result dsMoList: %+v", dsMoList) + klog.V(9).Infof("Result dsMoList: %+v", dsMoList) var dsObjList []*DatastoreInfo for _, dsMo := range dsMoList { dsObjList = append(dsObjList, @@ -247,7 +247,7 @@ func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, d var newSCSIController types.BaseVirtualDevice vmDevices, err := vm.Device(ctx) if err != nil { - glog.Errorf("Failed to retrieve VM devices. err: %+v", err) + klog.Errorf("Failed to retrieve VM devices. err: %+v", err) return nil, nil, err } // find SCSI controller of particular type from VM devices @@ -256,20 +256,20 @@ func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, d if scsiController == nil { newSCSIController, err = vm.createAndAttachSCSIController(ctx, volumeOptions.SCSIControllerType) if err != nil { - glog.Errorf("Failed to create SCSI controller for VM :%q with err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to create SCSI controller for VM :%q with err: %+v", vm.InventoryPath, err) return nil, nil, err } // Get VM device list vmDevices, err := vm.Device(ctx) if err != nil { - glog.Errorf("Failed to retrieve VM devices. err: %v", err) + klog.Errorf("Failed to retrieve VM devices. err: %v", err) return nil, nil, err } // verify scsi controller in virtual machine scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, volumeOptions.SCSIControllerType) scsiController = getAvailableSCSIController(scsiControllersOfRequiredType) if scsiController == nil { - glog.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType) + klog.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType) // attempt clean up of scsi controller vm.deleteController(ctx, newSCSIController, vmDevices) return nil, nil, fmt.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType) @@ -278,7 +278,7 @@ func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, d disk := vmDevices.CreateDisk(scsiController, dsObj.Reference(), diskPath) unitNumber, err := getNextUnitNumber(vmDevices, scsiController) if err != nil { - glog.Errorf("Cannot attach disk to VM, unitNumber limit reached - %+v.", err) + klog.Errorf("Cannot attach disk to VM, unitNumber limit reached - %+v.", err) return nil, nil, err } *disk.UnitNumber = unitNumber @@ -307,7 +307,7 @@ func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, d func (vm *VirtualMachine) GetVirtualDiskPath(ctx context.Context) (string, error) { vmDevices, err := vm.Device(ctx) if err != nil { - glog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err) return "", err } // filter vm devices to retrieve device for the given vmdk file identified by disk path @@ -327,18 +327,18 @@ func (vm *VirtualMachine) createAndAttachSCSIController(ctx context.Context, dis // Get VM device list vmDevices, err := vm.Device(ctx) if err != nil { - glog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err) return nil, err } allSCSIControllers := getSCSIControllers(vmDevices) if len(allSCSIControllers) >= SCSIControllerLimit { // we reached the maximum number of controllers we can attach - glog.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit) + klog.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit) return nil, fmt.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit) } newSCSIController, err := vmDevices.CreateSCSIController(diskControllerType) if err != nil { - glog.Errorf("Failed to create new SCSI controller on VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to create new SCSI controller on VM: %q. err: %+v", vm.InventoryPath, err) return nil, err } configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController() @@ -349,7 +349,7 @@ func (vm *VirtualMachine) createAndAttachSCSIController(ctx context.Context, dis // add the scsi controller to virtual machine err = vm.AddDevice(context.TODO(), newSCSIController) if err != nil { - glog.V(LogLevel).Infof("Cannot add SCSI controller to VM: %q. err: %+v", vm.InventoryPath, err) + klog.V(LogLevel).Infof("Cannot add SCSI controller to VM: %q. err: %+v", vm.InventoryPath, err) // attempt clean up of scsi controller vm.deleteController(ctx, newSCSIController, vmDevices) return nil, err @@ -361,7 +361,7 @@ func (vm *VirtualMachine) createAndAttachSCSIController(ctx context.Context, dis func (vm *VirtualMachine) getVirtualDeviceByPath(ctx context.Context, diskPath string) (types.BaseVirtualDevice, error) { vmDevices, err := vm.Device(ctx) if err != nil { - glog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err) return nil, err } @@ -371,7 +371,7 @@ func (vm *VirtualMachine) getVirtualDeviceByPath(ctx context.Context, diskPath s virtualDevice := device.GetVirtualDevice() if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { if matchVirtualDiskAndVolPath(backing.FileName, diskPath) { - glog.V(LogLevel).Infof("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath) + klog.V(LogLevel).Infof("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath) return device, nil } } @@ -396,7 +396,7 @@ func (vm *VirtualMachine) deleteController(ctx context.Context, controllerDevice device := controllerDeviceList[len(controllerDeviceList)-1] err := vm.RemoveDevice(ctx, true, device) if err != nil { - glog.Errorf("Error occurred while removing device on VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Error occurred while removing device on VM: %q. err: %+v", vm.InventoryPath, err) return err } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/volumeoptions.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/volumeoptions.go index 75dadb437eeb..989ed4468158 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/volumeoptions.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/volumeoptions.go @@ -19,7 +19,7 @@ package vclib import ( "strings" - "github.com/golang/glog" + "k8s.io/klog" ) // VolumeOptions specifies various options for a volume. @@ -38,7 +38,7 @@ type VolumeOptions struct { var ( // DiskFormatValidType specifies the valid disk formats DiskFormatValidType = map[string]string{ - ThinDiskType: ThinDiskType, + ThinDiskType: ThinDiskType, strings.ToLower(EagerZeroedThickDiskType): EagerZeroedThickDiskType, strings.ToLower(ZeroedThickDiskType): PreallocatedDiskType, } @@ -59,7 +59,7 @@ func DiskformatValidOptions() string { // CheckDiskFormatSupported checks if the diskFormat is valid func CheckDiskFormatSupported(diskFormat string) bool { if DiskFormatValidType[diskFormat] == "" { - glog.Errorf("Not a valid Disk Format. Valid options are %+q", DiskformatValidOptions()) + klog.Errorf("Not a valid Disk Format. Valid options are %+q", DiskformatValidOptions()) return false } return true @@ -82,7 +82,7 @@ func CheckControllerSupported(ctrlType string) bool { return true } } - glog.Errorf("Not a valid SCSI Controller Type. Valid options are %q", SCSIControllerTypeValidOptions()) + klog.Errorf("Not a valid SCSI Controller Type. Valid options are %q", SCSIControllerTypeValidOptions()) return false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go index abbe5d3081da..26180e6aff92 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -33,7 +33,6 @@ import ( "gopkg.in/gcfg.v1" - "github.com/golang/glog" "github.com/vmware/govmomi/vapi/rest" "github.com/vmware/govmomi/vapi/tags" "github.com/vmware/govmomi/vim25/mo" @@ -41,11 +40,11 @@ import ( k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/informers" "k8s.io/client-go/tools/cache" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers" - "k8s.io/kubernetes/pkg/controller" ) // VSphere Cloud Provider constants @@ -242,7 +241,7 @@ func init() { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (vs *VSphere) Initialize(clientBuilder controller.ControllerClientBuilder) { +func (vs *VSphere) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { } // Initialize Node Informers @@ -265,13 +264,13 @@ func (vs *VSphere) SetInformers(informerFactory informers.SharedInformerFactory) // Only on controller node it is required to register listeners. // Register callbacks for node updates - glog.V(4).Infof("Setting up node informers for vSphere Cloud Provider") + klog.V(4).Infof("Setting up node informers for vSphere Cloud Provider") nodeInformer := informerFactory.Core().V1().Nodes().Informer() nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: vs.NodeAdded, DeleteFunc: vs.NodeDeleted, }) - glog.V(4).Infof("Node informers in vSphere cloud provider initialized") + klog.V(4).Infof("Node informers in vSphere cloud provider initialized") } @@ -281,12 +280,12 @@ func newWorkerNode() (*VSphere, error) { vs := VSphere{} vs.hostName, err = os.Hostname() if err != nil { - glog.Errorf("Failed to get hostname. err: %+v", err) + klog.Errorf("Failed to get hostname. err: %+v", err) return nil, err } vs.vmUUID, err = GetVMUUID() if err != nil { - glog.Errorf("Failed to get uuid. err: %+v", err) + klog.Errorf("Failed to get uuid. err: %+v", err) return nil, err } return &vs, nil @@ -297,18 +296,18 @@ func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance isSecretInfoProvided := true if cfg.Global.SecretName == "" || cfg.Global.SecretNamespace == "" { - glog.Warningf("SecretName and/or SecretNamespace is not provided. " + + klog.Warningf("SecretName and/or SecretNamespace is not provided. " + "VCP will use username and password from config file") isSecretInfoProvided = false } if isSecretInfoProvided { if cfg.Global.User != "" { - glog.Warning("Global.User and Secret info provided. VCP will use secret to get credentials") + klog.Warning("Global.User and Secret info provided. VCP will use secret to get credentials") cfg.Global.User = "" } if cfg.Global.Password != "" { - glog.Warning("Global.Password and Secret info provided. VCP will use secret to get credentials") + klog.Warning("Global.Password and Secret info provided. VCP will use secret to get credentials") cfg.Global.Password = "" } } @@ -316,28 +315,28 @@ func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance // Check if the vsphere.conf is in old format. In this // format the cfg.VirtualCenter will be nil or empty. if cfg.VirtualCenter == nil || len(cfg.VirtualCenter) == 0 { - glog.V(4).Infof("Config is not per virtual center and is in old format.") + klog.V(4).Infof("Config is not per virtual center and is in old format.") if !isSecretInfoProvided { if cfg.Global.User == "" { - glog.Error("Global.User is empty!") + klog.Error("Global.User is empty!") return nil, ErrUsernameMissing } if cfg.Global.Password == "" { - glog.Error("Global.Password is empty!") + klog.Error("Global.Password is empty!") return nil, ErrPasswordMissing } } if cfg.Global.WorkingDir == "" { - glog.Error("Global.WorkingDir is empty!") + klog.Error("Global.WorkingDir is empty!") return nil, errors.New("Global.WorkingDir is empty!") } if cfg.Global.VCenterIP == "" { - glog.Error("Global.VCenterIP is empty!") + klog.Error("Global.VCenterIP is empty!") return nil, errors.New("Global.VCenterIP is empty!") } if cfg.Global.Datacenter == "" { - glog.Error("Global.Datacenter is empty!") + klog.Error("Global.Datacenter is empty!") return nil, errors.New("Global.Datacenter is empty!") } cfg.Workspace.VCenterIP = cfg.Global.VCenterIP @@ -376,14 +375,14 @@ func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance if cfg.Workspace.VCenterIP == "" || cfg.Workspace.Folder == "" || cfg.Workspace.Datacenter == "" { msg := fmt.Sprintf("All fields in workspace are mandatory."+ " vsphere.conf does not have the workspace specified correctly. cfg.Workspace: %+v", cfg.Workspace) - glog.Error(msg) + klog.Error(msg) return nil, errors.New(msg) } for vcServer, vcConfig := range cfg.VirtualCenter { - glog.V(4).Infof("Initializing vc server %s", vcServer) + klog.V(4).Infof("Initializing vc server %s", vcServer) if vcServer == "" { - glog.Error("vsphere.conf does not have the VirtualCenter IP address specified") + klog.Error("vsphere.conf does not have the VirtualCenter IP address specified") return nil, errors.New("vsphere.conf does not have the VirtualCenter IP address specified") } @@ -391,24 +390,24 @@ func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance if vcConfig.User == "" { vcConfig.User = cfg.Global.User if vcConfig.User == "" { - glog.Errorf("vcConfig.User is empty for vc %s!", vcServer) + klog.Errorf("vcConfig.User is empty for vc %s!", vcServer) return nil, ErrUsernameMissing } } if vcConfig.Password == "" { vcConfig.Password = cfg.Global.Password if vcConfig.Password == "" { - glog.Errorf("vcConfig.Password is empty for vc %s!", vcServer) + klog.Errorf("vcConfig.Password is empty for vc %s!", vcServer) return nil, ErrPasswordMissing } } } else { if vcConfig.User != "" { - glog.Warningf("vcConfig.User for server %s and Secret info provided. VCP will use secret to get credentials", vcServer) + klog.Warningf("vcConfig.User for server %s and Secret info provided. VCP will use secret to get credentials", vcServer) vcConfig.User = "" } if vcConfig.Password != "" { - glog.Warningf("vcConfig.Password for server %s and Secret info provided. VCP will use secret to get credentials", vcServer) + klog.Warningf("vcConfig.Password for server %s and Secret info provided. VCP will use secret to get credentials", vcServer) vcConfig.Password = "" } } @@ -462,7 +461,7 @@ func newControllerNode(cfg VSphereConfig) (*VSphere, error) { } vs.hostName, err = os.Hostname() if err != nil { - glog.Errorf("Failed to get hostname. err: %+v", err) + klog.Errorf("Failed to get hostname. err: %+v", err) return nil, err } if cfg.Global.VMUUID != "" { @@ -470,7 +469,7 @@ func newControllerNode(cfg VSphereConfig) (*VSphere, error) { } else { vs.vmUUID, err = getVMUUID() if err != nil { - glog.Errorf("Failed to get uuid. err: %+v", err) + klog.Errorf("Failed to get uuid. err: %+v", err) return nil, err } } @@ -488,7 +487,7 @@ func buildVSphereFromConfig(cfg VSphereConfig) (*VSphere, error) { if cfg.Disk.SCSIControllerType == "" { cfg.Disk.SCSIControllerType = vclib.PVSCSIControllerType } else if !vclib.CheckControllerSupported(cfg.Disk.SCSIControllerType) { - glog.Errorf("%v is not a supported SCSI Controller type. Please configure 'lsilogic-sas' OR 'pvscsi'", cfg.Disk.SCSIControllerType) + klog.Errorf("%v is not a supported SCSI Controller type. Please configure 'lsilogic-sas' OR 'pvscsi'", cfg.Disk.SCSIControllerType) return nil, errors.New("Controller type not supported. Please configure 'lsilogic-sas' OR 'pvscsi'") } if cfg.Global.WorkingDir != "" { @@ -533,13 +532,13 @@ func getLocalIP() ([]v1.NodeAddress, error) { addrs := []v1.NodeAddress{} ifaces, err := net.Interfaces() if err != nil { - glog.Errorf("net.Interfaces() failed for NodeAddresses - %v", err) + klog.Errorf("net.Interfaces() failed for NodeAddresses - %v", err) return nil, err } for _, i := range ifaces { localAddrs, err := i.Addrs() if err != nil { - glog.Warningf("Failed to extract addresses for NodeAddresses - %v", err) + klog.Warningf("Failed to extract addresses for NodeAddresses - %v", err) } else { for _, addr := range localAddrs { if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { @@ -559,7 +558,7 @@ func getLocalIP() ([]v1.NodeAddress, error) { }, ) } - glog.V(4).Infof("Find local IP address %v and set type to %v", ipnet.IP.String(), addressType) + klog.V(4).Infof("Find local IP address %v and set type to %v", ipnet.IP.String(), addressType) } } } @@ -571,7 +570,7 @@ func getLocalIP() ([]v1.NodeAddress, error) { func (vs *VSphere) getVSphereInstance(nodeName k8stypes.NodeName) (*VSphereInstance, error) { vsphereIns, err := vs.nodeManager.GetVSphereInstance(nodeName) if err != nil { - glog.Errorf("Cannot find node %q in cache. Node not found!!!", nodeName) + klog.Errorf("Cannot find node %q in cache. Node not found!!!", nodeName) return nil, err } return &vsphereIns, nil @@ -580,13 +579,13 @@ func (vs *VSphere) getVSphereInstance(nodeName k8stypes.NodeName) (*VSphereInsta func (vs *VSphere) getVSphereInstanceForServer(vcServer string, ctx context.Context) (*VSphereInstance, error) { vsphereIns, ok := vs.vsphereInstanceMap[vcServer] if !ok { - glog.Errorf("cannot find vcServer %q in cache. VC not found!!!", vcServer) + klog.Errorf("cannot find vcServer %q in cache. VC not found!!!", vcServer) return nil, errors.New(fmt.Sprintf("Cannot find node %q in vsphere configuration map", vcServer)) } // Ensure client is logged in and session is valid err := vs.nodeManager.vcConnect(ctx, vsphereIns) if err != nil { - glog.Errorf("failed connecting to vcServer %q with error %+v", vcServer, err) + klog.Errorf("failed connecting to vcServer %q with error %+v", vcServer, err) return nil, err } @@ -636,12 +635,12 @@ func (vs *VSphere) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName vm, err := vs.getVMFromNodeName(ctx, nodeName) if err != nil { - glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) + klog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) return nil, err } vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*vclib.VirtualMachine{vm}, []string{"guest.net"}) if err != nil { - glog.Errorf("Failed to get VM Managed object with property guest.net for node: %q. err: +%v", convertToString(nodeName), err) + klog.Errorf("Failed to get VM Managed object with property guest.net for node: %q. err: +%v", convertToString(nodeName), err) return nil, err } // retrieve VM's ip(s) @@ -695,7 +694,7 @@ func convertToK8sType(vmName string) k8stypes.NodeName { func (vs *VSphere) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) { nodeName, err := vs.GetNodeNameFromProviderID(providerID) if err != nil { - glog.Errorf("Error while getting nodename for providerID %s", providerID) + klog.Errorf("Error while getting nodename for providerID %s", providerID) return false, err } _, err = vs.InstanceID(ctx, convertToK8sType(nodeName)) @@ -710,7 +709,7 @@ func (vs *VSphere) InstanceExistsByProviderID(ctx context.Context, providerID st func (vs *VSphere) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) { nodeName, err := vs.GetNodeNameFromProviderID(providerID) if err != nil { - glog.Errorf("Error while getting nodename for providerID %s", providerID) + klog.Errorf("Error while getting nodename for providerID %s", providerID) return false, err } @@ -724,12 +723,12 @@ func (vs *VSphere) InstanceShutdownByProviderID(ctx context.Context, providerID } vm, err := vs.getVMFromNodeName(ctx, convertToK8sType(nodeName)) if err != nil { - glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeName, err) + klog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeName, err) return false, err } isActive, err := vm.IsActive(ctx) if err != nil { - glog.Errorf("Failed to check whether node %q is active. err: %+v.", nodeName, err) + klog.Errorf("Failed to check whether node %q is active. err: %+v.", nodeName, err) return false, err } return !isActive, nil @@ -762,18 +761,18 @@ func (vs *VSphere) InstanceID(ctx context.Context, nodeName k8stypes.NodeName) ( } vm, err := vs.getVMFromNodeName(ctx, nodeName) if err != nil { - glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) + klog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) return "", err } isActive, err := vm.IsActive(ctx) if err != nil { - glog.Errorf("Failed to check whether node %q is active. err: %+v.", convertToString(nodeName), err) + klog.Errorf("Failed to check whether node %q is active. err: %+v.", convertToString(nodeName), err) return "", err } if isActive { return vs.vmUUID, nil } - glog.Warningf("The VM: %s is not in %s state", convertToString(nodeName), vclib.ActivePowerState) + klog.Warningf("The VM: %s is not in %s state", convertToString(nodeName), vclib.ActivePowerState) return "", cloudprovider.InstanceNotFound } @@ -782,7 +781,7 @@ func (vs *VSphere) InstanceID(ctx context.Context, nodeName k8stypes.NodeName) ( if vclib.IsManagedObjectNotFoundError(err) { err = vs.nodeManager.RediscoverNode(nodeName) if err == nil { - glog.V(4).Infof("InstanceID: Found node %q", convertToString(nodeName)) + klog.V(4).Infof("InstanceID: Found node %q", convertToString(nodeName)) instanceID, err = instanceIDInternal() } else if err == vclib.ErrNoVMFound { return "", cloudprovider.InstanceNotFound @@ -821,7 +820,7 @@ func (vs *VSphere) LoadBalancer() (cloudprovider.LoadBalancer, bool) { // Zones returns an implementation of Zones for vSphere. func (vs *VSphere) Zones() (cloudprovider.Zones, bool) { if vs.cfg == nil { - glog.V(1).Info("The vSphere cloud provider does not support zones") + klog.V(1).Info("The vSphere cloud provider does not support zones") return nil, false } return vs, true @@ -853,13 +852,13 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeN vm, err := vs.getVMFromNodeName(ctx, nodeName) if err != nil { - glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) + klog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) return "", err } diskUUID, err = vm.AttachDisk(ctx, vmDiskPath, &vclib.VolumeOptions{SCSIControllerType: vclib.PVSCSIControllerType, StoragePolicyName: storagePolicyName}) if err != nil { - glog.Errorf("Failed to attach disk: %s for node: %s. err: +%v", vmDiskPath, convertToString(nodeName), err) + klog.Errorf("Failed to attach disk: %s for node: %s. err: +%v", vmDiskPath, convertToString(nodeName), err) return "", err } return diskUUID, nil @@ -870,13 +869,13 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeN if vclib.IsManagedObjectNotFoundError(err) { err = vs.nodeManager.RediscoverNode(nodeName) if err == nil { - glog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName)) + klog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName)) diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName) - glog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", diskUUID, err) + klog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", diskUUID, err) } } } - glog.V(4).Infof("AttachDisk executed for node %s and volume %s with diskUUID %s. Err: %s", convertToString(nodeName), vmDiskPath, diskUUID, err) + klog.V(4).Infof("AttachDisk executed for node %s and volume %s with diskUUID %s. Err: %s", convertToString(nodeName), vmDiskPath, diskUUID, err) vclib.RecordvSphereMetric(vclib.OperationAttachVolume, requestTime, err) return diskUUID, err } @@ -894,7 +893,7 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error if err != nil { // If node doesn't exist, disk is already detached from node. if err == vclib.ErrNoVMFound { - glog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath) + klog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath) return nil } return err @@ -908,16 +907,16 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error if err != nil { // If node doesn't exist, disk is already detached from node. if err == vclib.ErrNoVMFound { - glog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath) + klog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath) return nil } - glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) + klog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) return err } err = vm.DetachDisk(ctx, volPath) if err != nil { - glog.Errorf("Failed to detach disk: %s for node: %s. err: +%v", volPath, convertToString(nodeName), err) + klog.Errorf("Failed to detach disk: %s for node: %s. err: +%v", volPath, convertToString(nodeName), err) return err } return nil @@ -961,22 +960,22 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b vm, err := vs.getVMFromNodeName(ctx, nodeName) if err != nil { if err == vclib.ErrNoVMFound { - glog.Warningf("Node %q does not exist, vsphere CP will assume disk %v is not attached to it.", nodeName, volPath) + klog.Warningf("Node %q does not exist, vsphere CP will assume disk %v is not attached to it.", nodeName, volPath) // make the disk as detached and return false without error. return false, nil } - glog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err) + klog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err) return false, err } volPath = vclib.RemoveStorageClusterORFolderNameFromVDiskPath(volPath) attached, err := vm.IsDiskAttached(ctx, volPath) if err != nil { - glog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached on node %q", + klog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached on node %q", volPath, vSphereInstance) } - glog.V(4).Infof("DiskIsAttached result: %v and error: %q, for volume: %q", attached, err, volPath) + klog.V(4).Infof("DiskIsAttached result: %v and error: %q, for volume: %q", attached, err, volPath) return attached, err } requestTime := time.Now() @@ -1025,7 +1024,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) for nodeName := range nodeVolumes { nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName) if err != nil { - glog.Errorf("Failed to get node info: %+v. err: %+v", nodeInfo.vm, err) + klog.Errorf("Failed to get node info: %+v. err: %+v", nodeInfo.vm, err) return nodesToRetry, err } VC_DC := nodeInfo.vcServer + nodeInfo.dataCenter.String() @@ -1043,7 +1042,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) globalErrMutex.Lock() globalErr = err globalErrMutex.Unlock() - glog.Errorf("Failed to check disk attached for nodes: %+v. err: %+v", nodes, err) + klog.Errorf("Failed to check disk attached for nodes: %+v. err: %+v", nodes, err) } } nodesToRetryMutex.Lock() @@ -1066,7 +1065,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) return nodesToRetry, nil } - glog.V(4).Infof("Starting DisksAreAttached API for vSphere with nodeVolumes: %+v", nodeVolumes) + klog.V(4).Infof("Starting DisksAreAttached API for vSphere with nodeVolumes: %+v", nodeVolumes) // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1079,7 +1078,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) // Convert VolPaths into canonical form so that it can be compared with the VM device path. vmVolumes, err := vs.convertVolPathsToDevicePaths(ctx, nodeVolumes) if err != nil { - glog.Errorf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err) + klog.Errorf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err) return nil, err } attached := make(map[string]map[string]bool) @@ -1095,10 +1094,10 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) err = vs.nodeManager.RediscoverNode(nodeName) if err != nil { if err == vclib.ErrNoVMFound { - glog.V(4).Infof("node %s not found. err: %+v", nodeName, err) + klog.V(4).Infof("node %s not found. err: %+v", nodeName, err) continue } - glog.Errorf("Failed to rediscover node %s. err: %+v", nodeName, err) + klog.Errorf("Failed to rediscover node %s. err: %+v", nodeName, err) return nil, err } remainingNodesVolumes[nodeName] = nodeVolumes[nodeName] @@ -1108,7 +1107,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) if len(remainingNodesVolumes) != 0 { nodesToRetry, err = disksAreAttach(ctx, remainingNodesVolumes, attached, true) if err != nil || len(nodesToRetry) != 0 { - glog.Errorf("Failed to retry disksAreAttach for nodes %+v. err: %+v", remainingNodesVolumes, err) + klog.Errorf("Failed to retry disksAreAttach for nodes %+v. err: %+v", remainingNodesVolumes, err) return nil, err } } @@ -1117,7 +1116,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) disksAttached[convertToK8sType(nodeName)] = volPaths } } - glog.V(4).Infof("DisksAreAttach successfully executed. result: %+v", attached) + klog.V(4).Infof("DisksAreAttach successfully executed. result: %+v", attached) return disksAttached, nil } requestTime := time.Now() @@ -1131,7 +1130,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) // return value will be [DatastoreCluster/sharedVmfs-0] kubevols/.vmdk // else return value will be [sharedVmfs-0] kubevols/.vmdk func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) { - glog.V(1).Infof("Starting to create a vSphere volume with volumeOptions: %+v", volumeOptions) + klog.V(1).Infof("Starting to create a vSphere volume with volumeOptions: %+v", volumeOptions) createVolumeInternal := func(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) { var datastore string // If datastore not specified, then use default datastore @@ -1161,21 +1160,21 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo // This routine will get executed for every 5 minutes and gets initiated only once in its entire lifetime. cleanUpRoutineInitLock.Lock() if !cleanUpRoutineInitialized { - glog.V(1).Infof("Starting a clean up routine to remove stale dummy VM's") + klog.V(1).Infof("Starting a clean up routine to remove stale dummy VM's") go vs.cleanUpDummyVMs(DummyVMPrefixName) cleanUpRoutineInitialized = true } cleanUpRoutineInitLock.Unlock() vmOptions, err = vs.setVMOptions(ctx, dc, vs.cfg.Workspace.ResourcePoolPath) if err != nil { - glog.Errorf("Failed to set VM options requires to create a vsphere volume. err: %+v", err) + klog.Errorf("Failed to set VM options requires to create a vsphere volume. err: %+v", err) return "", err } } if volumeOptions.StoragePolicyName != "" && volumeOptions.Datastore == "" { datastore, err = getPbmCompatibleDatastore(ctx, dc, volumeOptions.StoragePolicyName, vs.nodeManager) if err != nil { - glog.Errorf("Failed to get pbm compatible datastore with storagePolicy: %s. err: %+v", volumeOptions.StoragePolicyName, err) + klog.Errorf("Failed to get pbm compatible datastore with storagePolicy: %s. err: %+v", volumeOptions.StoragePolicyName, err) return "", err } } else { @@ -1183,7 +1182,7 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo // if the given datastore is a shared datastore across all node VMs. sharedDsList, err := getSharedDatastoresInK8SCluster(ctx, dc, vs.nodeManager) if err != nil { - glog.Errorf("Failed to get shared datastore: %+v", err) + klog.Errorf("Failed to get shared datastore: %+v", err) return "", err } found := false @@ -1206,7 +1205,7 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/" err = ds.CreateDirectory(ctx, kubeVolsPath, false) if err != nil && err != vclib.ErrFileAlreadyExist { - glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err) + klog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err) return "", err } volumePath := kubeVolsPath + volumeOptions.Name + ".vmdk" @@ -1217,13 +1216,13 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo } volumePath, err = disk.Create(ctx, ds) if err != nil { - glog.Errorf("Failed to create a vsphere volume with volumeOptions: %+v on datastore: %s. err: %+v", volumeOptions, datastore, err) + klog.Errorf("Failed to create a vsphere volume with volumeOptions: %+v on datastore: %s. err: %+v", volumeOptions, datastore, err) return "", err } // Get the canonical path for the volume path. canonicalVolumePath, err = getcanonicalVolumePath(ctx, dc, volumePath) if err != nil { - glog.Errorf("Failed to get canonical vsphere volume path for volume: %s with volumeOptions: %+v on datastore: %s. err: %+v", volumePath, volumeOptions, datastore, err) + klog.Errorf("Failed to get canonical vsphere volume path for volume: %s with volumeOptions: %+v on datastore: %s. err: %+v", volumePath, volumeOptions, datastore, err) return "", err } if filepath.Base(datastore) != datastore { @@ -1235,13 +1234,13 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo requestTime := time.Now() canonicalVolumePath, err = createVolumeInternal(volumeOptions) vclib.RecordCreateVolumeMetric(volumeOptions, requestTime, err) - glog.V(4).Infof("The canonical volume path for the newly created vSphere volume is %q", canonicalVolumePath) + klog.V(4).Infof("The canonical volume path for the newly created vSphere volume is %q", canonicalVolumePath) return canonicalVolumePath, err } // DeleteVolume deletes a volume given volume name. func (vs *VSphere) DeleteVolume(vmDiskPath string) error { - glog.V(1).Infof("Starting to delete vSphere volume with vmDiskPath: %s", vmDiskPath) + klog.V(1).Infof("Starting to delete vSphere volume with vmDiskPath: %s", vmDiskPath) deleteVolumeInternal := func(vmDiskPath string) error { // Create context ctx, cancel := context.WithCancel(context.Background()) @@ -1261,7 +1260,7 @@ func (vs *VSphere) DeleteVolume(vmDiskPath string) error { } err = disk.Delete(ctx, dc) if err != nil { - glog.Errorf("Failed to delete vsphere volume with vmDiskPath: %s. err: %+v", vmDiskPath, err) + klog.Errorf("Failed to delete vsphere volume with vmDiskPath: %s. err: %+v", vmDiskPath, err) } return err } @@ -1280,11 +1279,11 @@ func (vs *VSphere) HasClusterID() bool { func (vs *VSphere) NodeAdded(obj interface{}) { node, ok := obj.(*v1.Node) if node == nil || !ok { - glog.Warningf("NodeAdded: unrecognized object %+v", obj) + klog.Warningf("NodeAdded: unrecognized object %+v", obj) return } - glog.V(4).Infof("Node added: %+v", node) + klog.V(4).Infof("Node added: %+v", node) vs.nodeManager.RegisterNode(node) } @@ -1292,11 +1291,11 @@ func (vs *VSphere) NodeAdded(obj interface{}) { func (vs *VSphere) NodeDeleted(obj interface{}) { node, ok := obj.(*v1.Node) if node == nil || !ok { - glog.Warningf("NodeDeleted: unrecognized object %+v", obj) + klog.Warningf("NodeDeleted: unrecognized object %+v", obj) return } - glog.V(4).Infof("Node deleted: %+v", node) + klog.V(4).Infof("Node deleted: %+v", node) vs.nodeManager.UnRegisterNode(node) } @@ -1321,23 +1320,23 @@ func withTagsClient(ctx context.Context, connection *vclib.VSphereConnection, f func (vs *VSphere) GetZone(ctx context.Context) (cloudprovider.Zone, error) { nodeName, err := vs.CurrentNodeName(ctx, vs.hostName) if err != nil { - glog.Errorf("Cannot get node name.") + klog.Errorf("Cannot get node name.") return cloudprovider.Zone{}, err } zone := cloudprovider.Zone{} vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx) if err != nil { - glog.Errorf("Cannot connent to vsphere. Get zone for node %s error", nodeName) + klog.Errorf("Cannot connent to vsphere. Get zone for node %s error", nodeName) return cloudprovider.Zone{}, err } dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter) if err != nil { - glog.Errorf("Cannot connent to datacenter. Get zone for node %s error", nodeName) + klog.Errorf("Cannot connent to datacenter. Get zone for node %s error", nodeName) return cloudprovider.Zone{}, err } vmHost, err := dc.GetHostByVMUUID(ctx, vs.vmUUID) if err != nil { - glog.Errorf("Cannot find VM runtime host. Get zone for node %s error", nodeName) + klog.Errorf("Cannot find VM runtime host. Get zone for node %s error", nodeName) return cloudprovider.Zone{}, err } @@ -1355,23 +1354,23 @@ func (vs *VSphere) GetZone(ctx context.Context) (cloudprovider.Zone, error) { obj := objects[len(objects)-1-i] tags, err := client.ListAttachedTags(ctx, obj) if err != nil { - glog.Errorf("Cannot list attached tags. Get zone for node %s: %s", nodeName, err) + klog.Errorf("Cannot list attached tags. Get zone for node %s: %s", nodeName, err) return err } for _, value := range tags { tag, err := client.GetTag(ctx, value) if err != nil { - glog.Errorf("Get tag %s: %s", value, err) + klog.Errorf("Get tag %s: %s", value, err) return err } category, err := client.GetCategory(ctx, tag.CategoryID) if err != nil { - glog.Errorf("Get category %s error", value) + klog.Errorf("Get category %s error", value) return err } found := func() { - glog.Errorf("Found %q tag (%s) for %s attached to %s", category.Name, tag.Name, vs.vmUUID, obj.Reference()) + klog.Errorf("Found %q tag (%s) for %s attached to %s", category.Name, tag.Name, vs.vmUUID, obj.Reference()) } switch { case category.Name == vs.cfg.Labels.Zone: @@ -1402,7 +1401,7 @@ func (vs *VSphere) GetZone(ctx context.Context) (cloudprovider.Zone, error) { return nil }) if err != nil { - glog.Errorf("Get zone for node %s: %s", nodeName, err) + klog.Errorf("Get zone for node %s: %s", nodeName, err) return cloudprovider.Zone{}, err } return zone, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_util.go index 2601032457ae..04f241a89d39 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_util.go @@ -19,24 +19,23 @@ package vsphere import ( "context" "errors" + "fmt" + "io/ioutil" "os" + "path/filepath" "regexp" "strings" "time" - "github.com/golang/glog" "github.com/vmware/govmomi/vim25" - - "fmt" - "github.com/vmware/govmomi/vim25/mo" - "io/ioutil" + "k8s.io/klog" + "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers" - "k8s.io/kubernetes/pkg/util/version" - "path/filepath" ) const ( @@ -81,18 +80,6 @@ func getVSphereConfig() (*VSphereConfig, error) { return &cfg, nil } -func getVSphereConn(cfg *VSphereConfig) *vclib.VSphereConnection { - vSphereConn := &vclib.VSphereConnection{ - Username: cfg.Global.User, - Password: cfg.Global.Password, - Hostname: cfg.Global.VCenterIP, - Insecure: cfg.Global.InsecureFlag, - RoundTripperCount: cfg.Global.RoundTripperCount, - Port: cfg.Global.VCenterPort, - } - return vSphereConn -} - // Returns the accessible datastores for the given node VM. func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) { accessibleDatastores, err := nodeVmDetail.vm.GetAllAccessibleDatastores(ctx) @@ -100,27 +87,27 @@ func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nod // Check if the node VM is not found which indicates that the node info in the node manager is stale. // If so, rediscover the node and retry. if vclib.IsManagedObjectNotFoundError(err) { - glog.V(4).Infof("error %q ManagedObjectNotFound for node %q. Rediscovering...", err, nodeVmDetail.NodeName) + klog.V(4).Infof("error %q ManagedObjectNotFound for node %q. Rediscovering...", err, nodeVmDetail.NodeName) err = nodeManager.RediscoverNode(convertToK8sType(nodeVmDetail.NodeName)) if err == nil { - glog.V(4).Infof("Discovered node %s successfully", nodeVmDetail.NodeName) + klog.V(4).Infof("Discovered node %s successfully", nodeVmDetail.NodeName) nodeInfo, err := nodeManager.GetNodeInfo(convertToK8sType(nodeVmDetail.NodeName)) if err != nil { - glog.V(4).Infof("error %q getting node info for node %+v", err, nodeVmDetail) + klog.V(4).Infof("error %q getting node info for node %+v", err, nodeVmDetail) return nil, err } accessibleDatastores, err = nodeInfo.vm.GetAllAccessibleDatastores(ctx) if err != nil { - glog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail) + klog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail) return nil, err } } else { - glog.V(4).Infof("error %q rediscovering node %+v", err, nodeVmDetail) + klog.V(4).Infof("error %q rediscovering node %+v", err, nodeVmDetail) return nil, err } } else { - glog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail) + klog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail) return nil, err } } @@ -131,22 +118,22 @@ func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nod func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) { nodeVmDetails, err := nodeManager.GetNodeDetails() if err != nil { - glog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err) + klog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err) return nil, err } if len(nodeVmDetails) == 0 { msg := fmt.Sprintf("Kubernetes node nodeVmDetail details is empty. nodeVmDetails : %+v", nodeVmDetails) - glog.Error(msg) + klog.Error(msg) return nil, fmt.Errorf(msg) } var sharedDatastores []*vclib.DatastoreInfo for _, nodeVmDetail := range nodeVmDetails { - glog.V(9).Infof("Getting accessible datastores for node %s", nodeVmDetail.NodeName) + klog.V(9).Infof("Getting accessible datastores for node %s", nodeVmDetail.NodeName) accessibleDatastores, err := getAccessibleDatastores(ctx, &nodeVmDetail, nodeManager) if err != nil { if err == vclib.ErrNoVMFound { - glog.V(9).Infof("Got NoVMFound error for node %s", nodeVmDetail.NodeName) + klog.V(9).Infof("Got NoVMFound error for node %s", nodeVmDetail.NodeName) continue } return nil, err @@ -161,19 +148,19 @@ func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter, } } } - glog.V(9).Infof("sharedDatastores : %+v", sharedDatastores) + klog.V(9).Infof("sharedDatastores : %+v", sharedDatastores) sharedDatastores, err = getDatastoresForEndpointVC(ctx, dc, sharedDatastores) if err != nil { - glog.Errorf("Failed to get shared datastores from endpoint VC. err: %+v", err) + klog.Errorf("Failed to get shared datastores from endpoint VC. err: %+v", err) return nil, err } - glog.V(9).Infof("sharedDatastores at endpoint VC: %+v", sharedDatastores) + klog.V(9).Infof("sharedDatastores at endpoint VC: %+v", sharedDatastores) return sharedDatastores, nil } func intersect(list1 []*vclib.DatastoreInfo, list2 []*vclib.DatastoreInfo) []*vclib.DatastoreInfo { - glog.V(9).Infof("list1: %+v", list1) - glog.V(9).Infof("list2: %+v", list2) + klog.V(9).Infof("list1: %+v", list1) + klog.V(9).Infof("list2: %+v", list2) var sharedDs []*vclib.DatastoreInfo for _, val1 := range list1 { // Check if val1 is found in list2 @@ -215,10 +202,10 @@ func getDatastoresForEndpointVC(ctx context.Context, dc *vclib.Datacenter, share if ok { datastores = append(datastores, dsInfo) } else { - glog.V(4).Infof("Warning: Shared datastore with URL %s does not exist in endpoint VC", sharedDsInfo.Info.Url) + klog.V(4).Infof("Warning: Shared datastore with URL %s does not exist in endpoint VC", sharedDsInfo.Info.Url) } } - glog.V(9).Infof("Datastore from endpoint VC: %+v", datastores) + klog.V(9).Infof("Datastore from endpoint VC: %+v", datastores) return datastores, nil } @@ -229,32 +216,32 @@ func getPbmCompatibleDatastore(ctx context.Context, dc *vclib.Datacenter, storag } storagePolicyID, err := pbmClient.ProfileIDByName(ctx, storagePolicyName) if err != nil { - glog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err) + klog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err) return "", err } sharedDs, err := getSharedDatastoresInK8SCluster(ctx, dc, nodeManager) if err != nil { - glog.Errorf("Failed to get shared datastores. err: %+v", err) + klog.Errorf("Failed to get shared datastores. err: %+v", err) return "", err } if len(sharedDs) == 0 { msg := "No shared datastores found in the endpoint virtual center" - glog.Errorf(msg) + klog.Errorf(msg) return "", errors.New(msg) } compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, dc, storagePolicyID, sharedDs) if err != nil { - glog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v", + klog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v", sharedDs, storagePolicyID, err) return "", err } - glog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores) + klog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores) datastore, err := getMostFreeDatastoreName(ctx, dc.Client(), compatibleDatastores) if err != nil { - glog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err) + klog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err) return "", err } - glog.V(4).Infof("Most free datastore : %+s", datastore) + klog.V(4).Infof("Most free datastore : %+s", datastore) return datastore, err } @@ -264,7 +251,7 @@ func (vs *VSphere) setVMOptions(ctx context.Context, dc *vclib.Datacenter, resou if err != nil { return nil, err } - glog.V(9).Infof("Resource pool path %s, resourcePool %+v", resourcePoolPath, resourcePool) + klog.V(9).Infof("Resource pool path %s, resourcePool %+v", resourcePoolPath, resourcePool) folder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder) if err != nil { return nil, err @@ -283,26 +270,30 @@ func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) { time.Sleep(CleanUpDummyVMRoutineInterval * time.Minute) vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx) if err != nil { - glog.V(4).Infof("Failed to get VSphere instance with err: %+v. Retrying again...", err) + klog.V(4).Infof("Failed to get VSphere instance with err: %+v. Retrying again...", err) continue } dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter) if err != nil { - glog.V(4).Infof("Failed to get the datacenter: %s from VC. err: %+v", vs.cfg.Workspace.Datacenter, err) + klog.V(4).Infof("Failed to get the datacenter: %s from VC. err: %+v", vs.cfg.Workspace.Datacenter, err) continue } // Get the folder reference for global working directory where the dummy VM needs to be created. vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder) if err != nil { - glog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Workspace.Folder, err) + klog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Workspace.Folder, err) continue } // A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests. - defer cleanUpDummyVMLock.Lock() - err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder, dc) - if err != nil { - glog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Workspace.Folder, err) + cleanUpDummyVMs := func() { + cleanUpDummyVMLock.Lock() + defer cleanUpDummyVMLock.Unlock() + err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder, dc) + if err != nil { + klog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Workspace.Folder, err) + } } + cleanUpDummyVMs() } } @@ -369,7 +360,7 @@ func convertVolPathToDevicePath(ctx context.Context, dc *vclib.Datacenter, volPa // Get the canonical volume path for volPath. canonicalVolumePath, err := getcanonicalVolumePath(ctx, dc, volPath) if err != nil { - glog.Errorf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err) + klog.Errorf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err) return "", err } // Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map @@ -396,7 +387,7 @@ func (vs *VSphere) convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes for i, volPath := range volPaths { deviceVolPath, err := convertVolPathToDevicePath(ctx, nodeInfo.dataCenter, volPath) if err != nil { - glog.Errorf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err) + klog.Errorf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err) return nil, err } volPaths[i] = deviceVolPath @@ -432,7 +423,7 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN vmMoList, err := nodeInfo.dataCenter.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name", "config.uuid"}) if err != nil { if vclib.IsManagedObjectNotFoundError(err) && !retry { - glog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for property collector query for nodes: %+v vms: %+v", nodes, vmList) + klog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for property collector query for nodes: %+v vms: %+v", nodes, vmList) // Property Collector Query failed // VerifyVolumePaths per VM for _, nodeName := range nodes { @@ -443,13 +434,13 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN devices, err := nodeInfo.vm.VirtualMachine.Device(ctx) if err != nil { if vclib.IsManagedObjectNotFoundError(err) { - glog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for Kubernetes node: %s with vSphere Virtual Machine reference: %v", nodeName, nodeInfo.vm) + klog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for Kubernetes node: %s with vSphere Virtual Machine reference: %v", nodeName, nodeInfo.vm) nodesToRetry = append(nodesToRetry, nodeName) continue } return nodesToRetry, err } - glog.V(4).Infof("Verifying Volume Paths by devices for node %s and VM %s", nodeName, nodeInfo.vm) + klog.V(4).Infof("Verifying Volume Paths by devices for node %s and VM %s", nodeName, nodeInfo.vm) vclib.VerifyVolumePathsForVMDevices(devices, nodeVolumes[nodeName], convertToString(nodeName), attached) } } @@ -459,14 +450,14 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN vmMoMap := make(map[string]mo.VirtualMachine) for _, vmMo := range vmMoList { if vmMo.Config == nil { - glog.Errorf("Config is not available for VM: %q", vmMo.Name) + klog.Errorf("Config is not available for VM: %q", vmMo.Name) continue } - glog.V(9).Infof("vmMoMap vmname: %q vmuuid: %s", vmMo.Name, strings.ToLower(vmMo.Config.Uuid)) + klog.V(9).Infof("vmMoMap vmname: %q vmuuid: %s", vmMo.Name, strings.ToLower(vmMo.Config.Uuid)) vmMoMap[strings.ToLower(vmMo.Config.Uuid)] = vmMo } - glog.V(9).Infof("vmMoMap: +%v", vmMoMap) + klog.V(9).Infof("vmMoMap: +%v", vmMoMap) for _, nodeName := range nodes { node, err := vs.nodeManager.GetNode(nodeName) @@ -475,11 +466,11 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN } nodeUUID, err := GetNodeUUID(&node) if err != nil { - glog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err) + klog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err) return nodesToRetry, err } nodeUUID = strings.ToLower(nodeUUID) - glog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %v", nodeName, nodeUUID, vmMoMap) + klog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %v", nodeName, nodeUUID, vmMoMap) vclib.VerifyVolumePathsForVM(vmMoMap[nodeUUID], nodeVolumes[nodeName], convertToString(nodeName), attached) } return nodesToRetry, nil @@ -526,7 +517,7 @@ func (vs *VSphere) GetNodeNameFromProviderID(providerID string) (string, error) var nodeName string nodes, err := vs.nodeManager.GetNodeDetails() if err != nil { - glog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err) + klog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err) return "", err } for _, node := range nodes { @@ -573,12 +564,12 @@ func GetUUIDFromProviderID(providerID string) string { func IsUUIDSupportedNode(node *v1.Node) (bool, error) { newVersion, err := version.ParseSemantic("v1.9.4") if err != nil { - glog.Errorf("Failed to determine whether node %+v is old with error %v", node, err) + klog.Errorf("Failed to determine whether node %+v is old with error %v", node, err) return false, err } nodeVersion, err := version.ParseSemantic(node.Status.NodeInfo.KubeletVersion) if err != nil { - glog.Errorf("Failed to determine whether node %+v is old with error %v", node, err) + klog.Errorf("Failed to determine whether node %+v is old with error %v", node, err) return false, err } if nodeVersion.LessThan(newVersion) { @@ -590,7 +581,7 @@ func IsUUIDSupportedNode(node *v1.Node) (bool, error) { func GetNodeUUID(node *v1.Node) (string, error) { oldNode, err := IsUUIDSupportedNode(node) if err != nil { - glog.Errorf("Failed to get node UUID for node %+v with error %v", node, err) + klog.Errorf("Failed to get node UUID for node %+v with error %v", node, err) return "", err } if oldNode { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions index c45988ec9b88..31aa4d0b5044 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions @@ -11,8 +11,356 @@ "ForbiddenPrefixes": [ "k8s.io/kubernetes/pkg/client/unversioned/testclient" ] + }, + { + "SelectorRegexp": "k8s[.]io/(api/|apimachinery/|apiextensions-apiserver/|apiserver/)", + "AllowedPrefixes": [ + "k8s.io/api/apps/v1", + "k8s.io/api/apps/v1beta1", + "k8s.io/api/authentication/v1", + "k8s.io/api/authorization/v1beta1", + "k8s.io/api/autoscaling/v1", + "k8s.io/api/autoscaling/v2beta1", + "k8s.io/api/autoscaling/v2beta2", + "k8s.io/api/batch/v1", + "k8s.io/api/batch/v1beta1", + "k8s.io/api/certificates/v1beta1", + "k8s.io/api/core/v1", + "k8s.io/api/coordination/v1beta1", + "k8s.io/api/extensions/v1beta1", + "k8s.io/api/policy/v1beta1", + "k8s.io/api/rbac/v1", + "k8s.io/api/storage/v1", + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake", + "k8s.io/apimachinery/pkg/api/equality", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/api/meta", + "k8s.io/apimachinery/pkg/api/meta/testrestmapper", + "k8s.io/apimachinery/pkg/api/resource", + "k8s.io/apimachinery/pkg/apis/config", + "k8s.io/apimachinery/pkg/apis/config/v1alpha1", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "k8s.io/apimachinery/pkg/conversion", + "k8s.io/apimachinery/pkg/fields", + "k8s.io/apimachinery/pkg/labels", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/runtime/serializer", + "k8s.io/apimachinery/pkg/types", + "k8s.io/apimachinery/pkg/util/clock", + "k8s.io/apimachinery/pkg/util/diff", + "k8s.io/apimachinery/pkg/util/errors", + "k8s.io/apimachinery/pkg/util/intstr", + "k8s.io/apimachinery/pkg/util/json", + "k8s.io/apimachinery/pkg/util/rand", + "k8s.io/apimachinery/pkg/util/runtime", + "k8s.io/apimachinery/pkg/util/sets", + "k8s.io/apimachinery/pkg/util/strategicpatch", + "k8s.io/apimachinery/pkg/util/uuid", + "k8s.io/apimachinery/pkg/util/wait", + "k8s.io/apimachinery/pkg/util/version", + "k8s.io/apimachinery/pkg/watch", + "k8s.io/apiserver/pkg/apis/config", + "k8s.io/apiserver/pkg/apis/config/v1alpha1", + "k8s.io/apiserver/pkg/authentication/serviceaccount", + "k8s.io/apiserver/pkg/storage/names", + "k8s.io/apiserver/pkg/util/feature", + "k8s.io/apiextensions-apiserver/pkg/features", + "k8s.io/apimachinery/pkg/api/validation", + "k8s.io/apimachinery/pkg/apis/meta/internalversion", + "k8s.io/apimachinery/pkg/selection", + "k8s.io/apimachinery/pkg/util/validation", + "k8s.io/apimachinery/pkg/util/validation/field", + "k8s.io/apiserver/pkg/authentication/authenticator", + "k8s.io/apiserver/pkg/authentication/user", + "k8s.io/apiserver/pkg/features", + "k8s.io/apiserver/pkg/registry/generic", + "k8s.io/apimachinery/pkg/version", + "k8s.io/api/imagepolicy/v1alpha1", + "k8s.io/apiserver/pkg/admission", + "k8s.io/apiserver/pkg/storage", + "k8s.io/api/batch/v2alpha1", + "k8s.io/apiserver/pkg/registry/rest", + "k8s.io/apimachinery/pkg/util/initialization", + "k8s.io/api/scheduling/v1alpha1", + "k8s.io/api/admissionregistration/v1beta1", + "k8s.io/api/authorization/v1", + "k8s.io/api/settings/v1alpha1", + "k8s.io/api/admission/v1beta1", + "k8s.io/api/networking/v1", + "k8s.io/api/admissionregistration/v1alpha1" + ] + }, + { + "SelectorRegexp": "github[.]com/", + "AllowedPrefixes": [ + "github.com/Azure/go-autorest/autorest/to", + "github.com/cloudflare/cfssl/config", + "github.com/cloudflare/cfssl/helpers", + "github.com/cloudflare/cfssl/signer", + "github.com/cloudflare/cfssl/signer/local", + "github.com/davecgh/go-spew/spew", + "github.com/evanphx/json-patch", + "github.com/golang/groupcache/lru", + "github.com/prometheus/client_golang/prometheus", + "github.com/robfig/cron", + "github.com/spf13/pflag", + "github.com/stretchr/testify/assert", + "github.com/stretchr/testify/require", + "github.com/docker/distribution/reference", + "github.com/google/gofuzz" + ] + }, + { + "SelectorRegexp": "k8s[.]io/client-go/", + "AllowedPrefixes": [ + "k8s.io/client-go/discovery", + "k8s.io/client-go/dynamic", + "k8s.io/client-go/informers", + "k8s.io/client-go/informers/apps/v1", + "k8s.io/client-go/informers/apps/v1beta1", + "k8s.io/client-go/informers/autoscaling/v1", + "k8s.io/client-go/informers/batch/v1", + "k8s.io/client-go/informers/certificates/v1beta1", + "k8s.io/client-go/informers/core/v1", + "k8s.io/client-go/informers/extensions/v1beta1", + "k8s.io/client-go/informers/policy/v1beta1", + "k8s.io/client-go/informers/rbac/v1", + "k8s.io/client-go/informers/storage/v1", + "k8s.io/client-go/kubernetes", + "k8s.io/client-go/kubernetes/fake", + "k8s.io/client-go/kubernetes/scheme", + "k8s.io/client-go/kubernetes/typed/apps/v1", + "k8s.io/client-go/kubernetes/typed/authentication/v1", + "k8s.io/client-go/kubernetes/typed/autoscaling/v1", + "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", + "k8s.io/client-go/kubernetes/typed/core/v1", + "k8s.io/client-go/kubernetes/typed/policy/v1beta1", + "k8s.io/client-go/kubernetes/typed/rbac/v1", + "k8s.io/client-go/listers/apps/v1", + "k8s.io/client-go/listers/apps/v1beta1", + "k8s.io/client-go/listers/autoscaling/v1", + "k8s.io/client-go/listers/batch/v1", + "k8s.io/client-go/listers/certificates/v1beta1", + "k8s.io/client-go/listers/core/v1", + "k8s.io/client-go/listers/coordination/v1beta1", + "k8s.io/client-go/listers/extensions/v1beta1", + "k8s.io/client-go/listers/policy/v1beta1", + "k8s.io/client-go/listers/rbac/v1", + "k8s.io/client-go/listers/storage/v1", + "k8s.io/client-go/rest", + "k8s.io/client-go/scale", + "k8s.io/client-go/scale/fake", + "k8s.io/client-go/testing", + "k8s.io/client-go/tools/bootstrap/token/api", + "k8s.io/client-go/tools/cache", + "k8s.io/client-go/tools/leaderelection/resourcelock", + "k8s.io/client-go/tools/record", + "k8s.io/client-go/tools/reference", + "k8s.io/client-go/tools/watch", + "k8s.io/client-go/util/cert", + "k8s.io/client-go/util/flowcontrol", + "k8s.io/client-go/util/integer", + "k8s.io/client-go/util/retry", + "k8s.io/client-go/util/testing", + "k8s.io/client-go/util/workqueue" + ] + }, + { + "SelectorRegexp": "k8s[.]io/kubernetes/pkg", + "AllowedPrefixes": [ + "k8s.io/kubernetes/pkg/api/legacyscheme", + "k8s.io/kubernetes/pkg/api/testapi", + "k8s.io/kubernetes/pkg/api/v1/endpoints", + "k8s.io/kubernetes/pkg/api/v1/node", + "k8s.io/kubernetes/pkg/api/v1/pod", + "k8s.io/kubernetes/pkg/apis/apps/install", + "k8s.io/kubernetes/pkg/apis/apps/v1", + "k8s.io/kubernetes/pkg/apis/authentication/install", + "k8s.io/kubernetes/pkg/apis/authorization/install", + "k8s.io/kubernetes/pkg/apis/autoscaling", + "k8s.io/kubernetes/pkg/apis/autoscaling/install", + "k8s.io/kubernetes/pkg/apis/batch/install", + "k8s.io/kubernetes/pkg/apis/certificates/install", + "k8s.io/kubernetes/pkg/apis/certificates/v1beta1", + "k8s.io/kubernetes/pkg/apis/core", + "k8s.io/kubernetes/pkg/apis/core/helper", + "k8s.io/kubernetes/pkg/apis/core/install", + "k8s.io/kubernetes/pkg/apis/core/v1", + "k8s.io/kubernetes/pkg/apis/core/v1/helper", + "k8s.io/kubernetes/pkg/apis/core/validation", + "k8s.io/kubernetes/pkg/apis/extensions", + "k8s.io/kubernetes/pkg/apis/extensions/install", + "k8s.io/kubernetes/pkg/apis/policy/install", + "k8s.io/kubernetes/pkg/apis/rbac/install", + "k8s.io/kubernetes/pkg/apis/settings/install", + "k8s.io/kubernetes/pkg/apis/storage/install", + "k8s.io/kubernetes/pkg/client/unversioned", + "k8s.io/kubernetes/pkg/client/unversioned/testclient", + "k8s.io/kubernetes/pkg/cloudprovider", + "k8s.io/kubernetes/pkg/cloudprovider/providers/fake", + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", + "k8s.io/kubernetes/pkg/controller", + "k8s.io/kubernetes/pkg/controller/apis/config", + "k8s.io/kubernetes/pkg/controller/apis/config/scheme", + "k8s.io/kubernetes/pkg/controller/apis/config/v1alpha1", + "k8s.io/kubernetes/pkg/controller/bootstrap", + "k8s.io/kubernetes/pkg/controller/certificates", + "k8s.io/kubernetes/pkg/controller/certificates/approver", + "k8s.io/kubernetes/pkg/controller/certificates/cleaner", + "k8s.io/kubernetes/pkg/controller/certificates/signer", + "k8s.io/kubernetes/pkg/controller/cloud", + "k8s.io/kubernetes/pkg/controller/clusterroleaggregation", + "k8s.io/kubernetes/pkg/controller/cronjob", + "k8s.io/kubernetes/pkg/controller/daemon", + "k8s.io/kubernetes/pkg/controller/daemon/util", + "k8s.io/kubernetes/pkg/controller/deployment", + "k8s.io/kubernetes/pkg/controller/deployment/util", + "k8s.io/kubernetes/pkg/controller/disruption", + "k8s.io/kubernetes/pkg/controller/endpoint", + "k8s.io/kubernetes/pkg/controller/garbagecollector", + "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly", + "k8s.io/kubernetes/pkg/controller/history", + "k8s.io/kubernetes/pkg/controller/job", + "k8s.io/kubernetes/pkg/controller/namespace", + "k8s.io/kubernetes/pkg/controller/namespace/deletion", + "k8s.io/kubernetes/pkg/controller/nodeipam", + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam", + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset", + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync", + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test", + "k8s.io/kubernetes/pkg/controller/nodelifecycle", + "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler", + "k8s.io/kubernetes/pkg/controller/podautoscaler", + "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics", + "k8s.io/kubernetes/pkg/controller/podgc", + "k8s.io/kubernetes/pkg/controller/replicaset", + "k8s.io/kubernetes/pkg/controller/replicaset/options", + "k8s.io/kubernetes/pkg/controller/replication", + "k8s.io/kubernetes/pkg/controller/resourcequota", + "k8s.io/kubernetes/pkg/controller/route", + "k8s.io/kubernetes/pkg/controller/service", + "k8s.io/kubernetes/pkg/controller/serviceaccount", + "k8s.io/kubernetes/pkg/controller/statefulset", + "k8s.io/kubernetes/pkg/controller/testutil", + "k8s.io/kubernetes/pkg/controller/ttl", + "k8s.io/kubernetes/pkg/controller/ttlafterfinished", + "k8s.io/kubernetes/pkg/controller/util/node", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing", + "k8s.io/kubernetes/pkg/controller/volume/attachdetach/util", + "k8s.io/kubernetes/pkg/controller/volume/events", + "k8s.io/kubernetes/pkg/controller/volume/expand", + "k8s.io/kubernetes/pkg/controller/volume/expand/cache", + "k8s.io/kubernetes/pkg/controller/volume/persistentvolume", + "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics", + "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/options", + "k8s.io/kubernetes/pkg/controller/volume/pvcprotection", + "k8s.io/kubernetes/pkg/controller/volume/pvprotection", + "k8s.io/kubernetes/pkg/features", + "k8s.io/kubernetes/pkg/kubectl/scheme", + "k8s.io/kubernetes/pkg/kubelet/apis", + "k8s.io/kubernetes/pkg/kubelet/events", + "k8s.io/kubernetes/pkg/kubelet/types", + "k8s.io/kubernetes/pkg/kubelet/util/format", + "k8s.io/kubernetes/pkg/quota", + "k8s.io/kubernetes/pkg/quota/evaluator/core", + "k8s.io/kubernetes/pkg/quota/generic", + "k8s.io/kubernetes/pkg/quota/install", + "k8s.io/kubernetes/pkg/registry/core/secret", + "k8s.io/kubernetes/pkg/scheduler/algorithm", + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates", + "k8s.io/kubernetes/pkg/scheduler/cache", + "k8s.io/kubernetes/pkg/securitycontext", + "k8s.io/kubernetes/pkg/serviceaccount", + "k8s.io/kubernetes/pkg/util/goroutinemap", + "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff", + "k8s.io/kubernetes/pkg/util/hash", + "k8s.io/kubernetes/pkg/util/labels", + "k8s.io/kubernetes/pkg/util/metrics", + "k8s.io/kubernetes/pkg/util/mount", + "k8s.io/kubernetes/pkg/util/node", + "k8s.io/kubernetes/pkg/util/reflector/prometheus", + "k8s.io/kubernetes/pkg/util/slice", + "k8s.io/kubernetes/pkg/util/strings", + "k8s.io/kubernetes/pkg/util/system", + "k8s.io/kubernetes/pkg/util/taints", + "k8s.io/kubernetes/pkg/util/workqueue/prometheus", + "k8s.io/kubernetes/pkg/volume", + "k8s.io/kubernetes/pkg/volume/testing", + "k8s.io/kubernetes/pkg/volume/util", + "k8s.io/kubernetes/pkg/volume/util/operationexecutor", + "k8s.io/kubernetes/pkg/volume/util/recyclerclient", + "k8s.io/kubernetes/pkg/volume/util/types", + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler", + "k8s.io/kubernetes/pkg/api/service", + "k8s.io/kubernetes/pkg/api/v1/service", + "k8s.io/kubernetes/pkg/apis/networking", + "k8s.io/kubernetes/pkg/apis/policy", + "k8s.io/kubernetes/pkg/apis/scheduling", + "k8s.io/kubernetes/pkg/capabilities", + "k8s.io/kubernetes/pkg/master/ports", + "k8s.io/kubernetes/pkg/scheduler/api", + "k8s.io/kubernetes/pkg/scheduler/util", + "k8s.io/kubernetes/pkg/security/apparmor", + "k8s.io/kubernetes/pkg/util/file", + "k8s.io/kubernetes/pkg/util/net/sets", + "k8s.io/kubernetes/pkg/util/parsers", + "k8s.io/kubernetes/pkg/fieldpath", + "k8s.io/kubernetes/pkg/kubeapiserver/admission/util", + "k8s.io/kubernetes/pkg/scheduler/volumebinder", + "k8s.io/kubernetes/pkg/scheduler/internal/cache", + "k8s.io/kubernetes/pkg/util/nsenter", + "k8s.io/kubernetes/pkg/util/resizefs", + "k8s.io/kubernetes/pkg/util/version", + "k8s.io/kubernetes/pkg/apis/apps", + "k8s.io/kubernetes/pkg/version", + "k8s.io/kubernetes/pkg/util/io" + ] + }, + { + "SelectorRegexp": "k8s[.]io/(metrics/|utils/|csi-api/|heapster/|kube-controller-manager/)", + "AllowedPrefixes": [ + "k8s.io/csi-api/pkg/apis/csi/v1alpha1", + "k8s.io/csi-api/pkg/client/clientset/versioned", + "k8s.io/heapster/metrics/api/v1/types", + "k8s.io/kube-controller-manager/config/v1alpha1", + "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2", + "k8s.io/metrics/pkg/apis/external_metrics/v1beta1", + "k8s.io/metrics/pkg/apis/metrics/v1alpha1", + "k8s.io/metrics/pkg/apis/metrics/v1beta1", + "k8s.io/metrics/pkg/client/clientset/versioned/fake", + "k8s.io/metrics/pkg/client/clientset/versioned/scheme", + "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1", + "k8s.io/metrics/pkg/client/custom_metrics", + "k8s.io/metrics/pkg/client/custom_metrics/fake", + "k8s.io/metrics/pkg/client/external_metrics", + "k8s.io/metrics/pkg/client/external_metrics/fake", + "k8s.io/utils/pointer", + "k8s.io/utils/exec" + ] + }, + { + "SelectorRegexp": "golang[.]org/", + "AllowedPrefixes": [ + "golang.org/x/time/rate", + "golang.org/x/sys/unix", + "golang.org/x/oauth2", + "google.golang.org/api/compute/v1", + "google.golang.org/api/googleapi", + "google.golang.org/api/compute/v0.alpha", + "google.golang.org/api/container/v1", + "google.golang.org/api/compute/v0.beta", + "google.golang.org/api/tpu/v1" + ] } - ] } - diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/BUILD index e5c8dc7695f5..b4915ac53656 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/BUILD @@ -55,7 +55,7 @@ go_library( "//pkg/apis/core:go_default_library", "//pkg/apis/core/install:go_default_library", "//pkg/apis/core/validation:go_default_library", - "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/api:go_default_library", "//pkg/serviceaccount:go_default_library", "//pkg/util/hash:go_default_library", "//pkg/util/taints:go_default_library", @@ -88,8 +88,8 @@ go_library( "//staging/src/k8s.io/client-go/tools/watch:go_default_library", "//staging/src/k8s.io/client-go/util/integer:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go index bcf2182d0905..caac5649d2f7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go @@ -38,10 +38,12 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/serviceaccount" - "github.com/golang/glog" + "k8s.io/klog" ) // ControllerClientBuilder allows you to get clients and configs for controllers +// Please note a copy also exists in staging/src/k8s.io/cloud-provider/cloud.go +// TODO: Extract this into a separate controller utilities repo (issues/68947) type ControllerClientBuilder interface { Config(name string) (*restclient.Config, error) ConfigOrDie(name string) *restclient.Config @@ -63,7 +65,7 @@ func (b SimpleControllerClientBuilder) Config(name string) (*restclient.Config, func (b SimpleControllerClientBuilder) ConfigOrDie(name string) *restclient.Config { clientConfig, err := b.Config(name) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return clientConfig } @@ -79,7 +81,7 @@ func (b SimpleControllerClientBuilder) Client(name string) (clientset.Interface, func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interface { client, err := b.Client(name) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return client } @@ -144,15 +146,15 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro } validConfig, valid, err := b.getAuthenticatedConfig(sa, string(secret.Data[v1.ServiceAccountTokenKey])) if err != nil { - glog.Warningf("error validating API token for %s/%s in secret %s: %v", sa.Name, sa.Namespace, secret.Name, err) + klog.Warningf("error validating API token for %s/%s in secret %s: %v", sa.Name, sa.Namespace, secret.Name, err) // continue watching for good tokens return false, nil } if !valid { - glog.Warningf("secret %s contained an invalid API token for %s/%s", secret.Name, sa.Name, sa.Namespace) + klog.Warningf("secret %s contained an invalid API token for %s/%s", secret.Name, sa.Name, sa.Namespace) // try to delete the secret containing the invalid token if err := b.CoreClient.Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { - glog.Warningf("error deleting secret %s containing invalid API token for %s/%s: %v", secret.Name, sa.Name, sa.Namespace, err) + klog.Warningf("error deleting secret %s containing invalid API token for %s/%s: %v", secret.Name, sa.Name, sa.Namespace, err) } // continue watching for good tokens return false, nil @@ -206,14 +208,14 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, tokenReview := &v1authenticationapi.TokenReview{Spec: v1authenticationapi.TokenReviewSpec{Token: token}} if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(tokenReview); err == nil { if !tokenResult.Status.Authenticated { - glog.Warningf("Token for %s/%s did not authenticate correctly", sa.Name, sa.Namespace) + klog.Warningf("Token for %s/%s did not authenticate correctly", sa.Name, sa.Namespace) return nil, false, nil } if tokenResult.Status.User.Username != username { - glog.Warningf("Token for %s/%s authenticated as unexpected username: %s", sa.Name, sa.Namespace, tokenResult.Status.User.Username) + klog.Warningf("Token for %s/%s authenticated as unexpected username: %s", sa.Name, sa.Namespace, tokenResult.Status.User.Username) return nil, false, nil } - glog.V(4).Infof("Verified credential for %s/%s", sa.Name, sa.Namespace) + klog.V(4).Infof("Verified credential for %s/%s", sa.Name, sa.Namespace) return clientConfig, true, nil } @@ -227,7 +229,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, } err = client.Get().AbsPath("/apis").Do().Error() if apierrors.IsUnauthorized(err) { - glog.Warningf("Token for %s/%s did not authenticate correctly: %v", sa.Name, sa.Namespace, err) + klog.Warningf("Token for %s/%s did not authenticate correctly: %v", sa.Name, sa.Namespace, err) return nil, false, nil } @@ -237,7 +239,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, func (b SAControllerClientBuilder) ConfigOrDie(name string) *restclient.Config { clientConfig, err := b.Config(name) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return clientConfig } @@ -253,7 +255,7 @@ func (b SAControllerClientBuilder) Client(name string) (clientset.Interface, err func (b SAControllerClientBuilder) ClientOrDie(name string) clientset.Interface { client, err := b.Client(name) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return client } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go index 6cf2ac189464..f63afaca6f35 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go @@ -20,7 +20,6 @@ import ( "fmt" "sync" - "github.com/golang/glog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -28,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog" ) type BaseControllerRefManager struct { @@ -223,7 +223,7 @@ func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error { // ReleasePod sends a patch to free the pod from the control of the controller. // It returns the error if the patching fails. 404 and 422 errors are ignored. func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error { - glog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s", + klog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s", pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), pod.UID) err := m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(deleteOwnerRefPatch)) @@ -345,7 +345,7 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *apps.ReplicaSet) er // ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller. // It returns the error if the patching fails. 404 and 422 errors are ignored. func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *apps.ReplicaSet) error { - glog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s", + klog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s", replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), replicaSet.UID) err := m.rsControl.PatchReplicaSet(replicaSet.Namespace, replicaSet.Name, []byte(deleteOwnerRefPatch)) @@ -480,7 +480,7 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history // ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller. // It returns the error if the patching fails. 404 and 422 errors are ignored. func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *apps.ControllerRevision) error { - glog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s", + klog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s", history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID) err := m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(deleteOwnerRefPatch)) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go index 394407b28b54..6ccc32aed0f8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go @@ -47,11 +47,11 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" _ "k8s.io/kubernetes/pkg/apis/core/install" "k8s.io/kubernetes/pkg/apis/core/validation" - "k8s.io/kubernetes/pkg/scheduler/algorithm" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" hashutil "k8s.io/kubernetes/pkg/util/hash" taintutils "k8s.io/kubernetes/pkg/util/taints" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -90,7 +90,7 @@ var UpdateTaintBackoff = wait.Backoff{ } var ShutdownTaint = &v1.Taint{ - Key: algorithm.TaintNodeShutdown, + Key: schedulerapi.TaintNodeShutdown, Effect: v1.TaintEffectNoSchedule, } @@ -170,7 +170,7 @@ func (r *ControllerExpectations) GetExpectations(controllerKey string) (*Control func (r *ControllerExpectations) DeleteExpectations(controllerKey string) { if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists { if err := r.Delete(exp); err != nil { - glog.V(2).Infof("Error deleting expectations for controller %v: %v", controllerKey, err) + klog.V(2).Infof("Error deleting expectations for controller %v: %v", controllerKey, err) } } } @@ -181,24 +181,24 @@ func (r *ControllerExpectations) DeleteExpectations(controllerKey string) { func (r *ControllerExpectations) SatisfiedExpectations(controllerKey string) bool { if exp, exists, err := r.GetExpectations(controllerKey); exists { if exp.Fulfilled() { - glog.V(4).Infof("Controller expectations fulfilled %#v", exp) + klog.V(4).Infof("Controller expectations fulfilled %#v", exp) return true } else if exp.isExpired() { - glog.V(4).Infof("Controller expectations expired %#v", exp) + klog.V(4).Infof("Controller expectations expired %#v", exp) return true } else { - glog.V(4).Infof("Controller still waiting on expectations %#v", exp) + klog.V(4).Infof("Controller still waiting on expectations %#v", exp) return false } } else if err != nil { - glog.V(2).Infof("Error encountered while checking expectations %#v, forcing sync", err) + klog.V(2).Infof("Error encountered while checking expectations %#v, forcing sync", err) } else { // When a new controller is created, it doesn't have expectations. // When it doesn't see expected watch events for > TTL, the expectations expire. // - In this case it wakes up, creates/deletes controllees, and sets expectations again. // When it has satisfied expectations and no controllees need to be created/destroyed > TTL, the expectations expire. // - In this case it continues without setting expectations till it needs to create/delete controllees. - glog.V(4).Infof("Controller %v either never recorded expectations, or the ttl expired.", controllerKey) + klog.V(4).Infof("Controller %v either never recorded expectations, or the ttl expired.", controllerKey) } // Trigger a sync if we either encountered and error (which shouldn't happen since we're // getting from local store) or this controller hasn't established expectations. @@ -215,7 +215,7 @@ func (exp *ControlleeExpectations) isExpired() bool { // SetExpectations registers new expectations for the given controller. Forgets existing expectations. func (r *ControllerExpectations) SetExpectations(controllerKey string, add, del int) error { exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: clock.RealClock{}.Now()} - glog.V(4).Infof("Setting expectations %#v", exp) + klog.V(4).Infof("Setting expectations %#v", exp) return r.Add(exp) } @@ -232,7 +232,7 @@ func (r *ControllerExpectations) LowerExpectations(controllerKey string, add, de if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { exp.Add(int64(-add), int64(-del)) // The expectations might've been modified since the update on the previous line. - glog.V(4).Infof("Lowered expectations %#v", exp) + klog.V(4).Infof("Lowered expectations %#v", exp) } } @@ -241,7 +241,7 @@ func (r *ControllerExpectations) RaiseExpectations(controllerKey string, add, de if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { exp.Add(int64(add), int64(del)) // The expectations might've been modified since the update on the previous line. - glog.V(4).Infof("Raised expectations %#v", exp) + klog.V(4).Infof("Raised expectations %#v", exp) } } @@ -340,13 +340,13 @@ func (u *UIDTrackingControllerExpectations) ExpectDeletions(rcKey string, delete defer u.uidStoreLock.Unlock() if existing := u.GetUIDs(rcKey); existing != nil && existing.Len() != 0 { - glog.Errorf("Clobbering existing delete keys: %+v", existing) + klog.Errorf("Clobbering existing delete keys: %+v", existing) } expectedUIDs := sets.NewString() for _, k := range deletedKeys { expectedUIDs.Insert(k) } - glog.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys) + klog.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys) if err := u.uidStore.Add(&UIDSet{expectedUIDs, rcKey}); err != nil { return err } @@ -360,7 +360,7 @@ func (u *UIDTrackingControllerExpectations) DeletionObserved(rcKey, deleteKey st uids := u.GetUIDs(rcKey) if uids != nil && uids.Has(deleteKey) { - glog.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey) + klog.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey) u.ControllerExpectationsInterface.DeletionObserved(rcKey) uids.Delete(deleteKey) } @@ -375,7 +375,7 @@ func (u *UIDTrackingControllerExpectations) DeleteExpectations(rcKey string) { u.ControllerExpectationsInterface.DeleteExpectations(rcKey) if uidExp, exists, err := u.uidStore.GetByKey(rcKey); err == nil && exists { if err := u.uidStore.Delete(uidExp); err != nil { - glog.V(2).Infof("Error deleting uid expectations for controller %v: %v", rcKey, err) + klog.V(2).Infof("Error deleting uid expectations for controller %v: %v", rcKey, err) } } } @@ -581,10 +581,10 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT } else { accessor, err := meta.Accessor(object) if err != nil { - glog.Errorf("parentObject does not have ObjectMeta, %v", err) + klog.Errorf("parentObject does not have ObjectMeta, %v", err) return nil } - glog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name) + klog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name) r.Recorder.Eventf(object, v1.EventTypeNormal, SuccessfulCreatePodReason, "Created pod: %v", newPod.Name) } return nil @@ -595,7 +595,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime if err != nil { return fmt.Errorf("object does not have ObjectMeta, %v", err) } - glog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) + klog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) if err := r.KubeClient.CoreV1().Pods(namespace).Delete(podID, nil); err != nil && !apierrors.IsNotFound(err) { r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err) return fmt.Errorf("unable to delete pods: %v", err) @@ -806,7 +806,7 @@ func FilterActivePods(pods []*v1.Pod) []*v1.Pod { if IsPodActive(p) { result = append(result, p) } else { - glog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v", + klog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v", p.Namespace, p.Name, p.Status.Phase, p.DeletionTimestamp) } } @@ -1024,14 +1024,14 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n // indicating that the controller identified by controllerName is waiting for syncs, followed by // either a successful or failed sync. func WaitForCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) bool { - glog.Infof("Waiting for caches to sync for %s controller", controllerName) + klog.Infof("Waiting for caches to sync for %s controller", controllerName) if !cache.WaitForCacheSync(stopCh, cacheSyncs...) { utilruntime.HandleError(fmt.Errorf("Unable to sync caches for %s controller", controllerName)) return false } - glog.Infof("Caches are synced for %s controller", controllerName) + klog.Infof("Caches are synced for %s controller", controllerName) return true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD index faba8b5e1a60..2b563e00ae27 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD @@ -24,7 +24,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", "//staging/src/k8s.io/client-go/util/integer:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go index 42640c46a9f3..00e5c4c36e61 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go @@ -24,7 +24,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" @@ -186,7 +186,7 @@ func MaxRevision(allRSs []*apps.ReplicaSet) int64 { for _, rs := range allRSs { if v, err := Revision(rs); err != nil { // Skip the replica sets when it failed to parse their revision information - glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs) + klog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs) } else if v > max { max = v } @@ -200,7 +200,7 @@ func LastRevision(allRSs []*apps.ReplicaSet) int64 { for _, rs := range allRSs { if v, err := Revision(rs); err != nil { // Skip the replica sets when it failed to parse their revision information - glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs) + klog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs) } else if v >= max { secMax = max max = v @@ -241,7 +241,7 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic oldRevisionInt, err := strconv.ParseInt(oldRevision, 10, 64) if err != nil { if oldRevision != "" { - glog.Warningf("Updating replica set revision OldRevision not int %s", err) + klog.Warningf("Updating replica set revision OldRevision not int %s", err) return false } //If the RS annotation is empty then initialise it to 0 @@ -249,13 +249,13 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic } newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64) if err != nil { - glog.Warningf("Updating replica set revision NewRevision not int %s", err) + klog.Warningf("Updating replica set revision NewRevision not int %s", err) return false } if oldRevisionInt < newRevisionInt { newRS.Annotations[RevisionAnnotation] = newRevision annotationChanged = true - glog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision) + klog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision) } // If a revision annotation already existed and this replica set was updated with a new revision // then that means we are rolling back to this replica set. We need to preserve the old revisions @@ -376,7 +376,7 @@ func getIntFromAnnotation(rs *apps.ReplicaSet, annotationKey string) (int32, boo } intValue, err := strconv.Atoi(annotationValue) if err != nil { - glog.V(2).Infof("Cannot convert the value %q with annotation key %q for the replica set %q", annotationValue, annotationKey, rs.Name) + klog.V(2).Infof("Cannot convert the value %q with annotation key %q for the replica set %q", annotationValue, annotationKey, rs.Name) return int32(0), false } return int32(intValue), true @@ -787,7 +787,7 @@ func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentS delta := time.Duration(*deployment.Spec.ProgressDeadlineSeconds) * time.Second timedOut := from.Add(delta).Before(now) - glog.V(4).Infof("Deployment %q timed out (%t) [last progress check: %v - now: %v]", deployment.Name, timedOut, from, now) + klog.V(4).Infof("Deployment %q timed out (%t) [last progress check: %v - now: %v]", deployment.Name, timedOut, from, now) return timedOut } @@ -886,3 +886,6 @@ func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired func HasProgressDeadline(d *apps.Deployment) bool { return d.Spec.ProgressDeadlineSeconds != nil && *d.Spec.ProgressDeadlineSeconds != math.MaxInt32 } +func HasRevisionHistoryLimit(d *apps.Deployment) bool { + return d.Spec.RevisionHistoryLimit != nil && *d.Spec.RevisionHistoryLimit != math.MaxInt32 +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/BUILD index e39803a409ce..a416cc859c5e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/BUILD @@ -19,7 +19,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/volume_resize_map.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/volume_resize_map.go index d5392c1b65ae..76d85ec4d927 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/volume_resize_map.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/volume_resize_map.go @@ -21,13 +21,13 @@ import ( "fmt" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" commontypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/types" ) @@ -97,7 +97,7 @@ func NewVolumeResizeMap(kubeClient clientset.Interface) VolumeResizeMap { // the PVC and hopefully after a no-op resize in volume plugin, PVC will be updated with right values as well. func (resizeMap *volumeResizeMap) AddPVCUpdate(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { if pv.Spec.ClaimRef == nil || pvc.Namespace != pv.Spec.ClaimRef.Namespace || pvc.Name != pv.Spec.ClaimRef.Name { - glog.V(4).Infof("Persistent Volume is not bound to PVC being updated : %s", util.ClaimToClaimKey(pvc)) + klog.V(4).Infof("Persistent Volume is not bound to PVC being updated : %s", util.ClaimToClaimKey(pvc)) return } @@ -112,7 +112,7 @@ func (resizeMap *volumeResizeMap) AddPVCUpdate(pvc *v1.PersistentVolumeClaim, pv return } - glog.V(4).Infof("Adding pvc %s with Size %s/%s for resizing", util.ClaimToClaimKey(pvc), pvcSize.String(), pvcStatusSize.String()) + klog.V(4).Infof("Adding pvc %s with Size %s/%s for resizing", util.ClaimToClaimKey(pvc), pvcSize.String(), pvcStatusSize.String()) pvcRequest := &PVCWithResizeRequest{ PVC: pvc, @@ -144,7 +144,7 @@ func (resizeMap *volumeResizeMap) GetPVCsWithResizeRequest() []*PVCWithResizeReq // deleting a pvc in this map doesn't affect operations that are already inflight. func (resizeMap *volumeResizeMap) DeletePVC(pvc *v1.PersistentVolumeClaim) { pvcUniqueName := types.UniquePVCName(pvc.UID) - glog.V(5).Infof("Removing PVC %v from resize map", pvcUniqueName) + klog.V(5).Infof("Removing PVC %v from resize map", pvcUniqueName) resizeMap.Lock() defer resizeMap.Unlock() delete(resizeMap.pvcrs, pvcUniqueName) @@ -156,7 +156,7 @@ func (resizeMap *volumeResizeMap) MarkAsResized(pvcr *PVCWithResizeRequest, newS err := resizeMap.updatePVCCapacityAndConditions(pvcr, newSize, emptyCondition) if err != nil { - glog.V(4).Infof("Error updating PV spec capacity for volume %q with : %v", pvcr.QualifiedName(), err) + klog.V(4).Infof("Error updating PV spec capacity for volume %q with : %v", pvcr.QualifiedName(), err) return err } return nil @@ -205,7 +205,7 @@ func (resizeMap *volumeResizeMap) UpdatePVSize(pvcr *PVCWithResizeRequest, newSi _, updateErr := resizeMap.kubeClient.CoreV1().PersistentVolumes().Patch(pvClone.Name, commontypes.StrategicMergePatchType, patchBytes) if updateErr != nil { - glog.V(4).Infof("Error updating pv %q with error : %v", pvClone.Name, updateErr) + klog.V(4).Infof("Error updating pv %q with error : %v", pvClone.Name, updateErr) return updateErr } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD index 6936a128ef07..ae16c23f1135 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD @@ -13,6 +13,7 @@ go_library( "pv_controller.go", "pv_controller_base.go", "scheduler_assume_cache.go", + "scheduler_bind_cache_metrics.go", "scheduler_binder.go", "scheduler_binder_cache.go", "scheduler_binder_fake.go", @@ -21,7 +22,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume", deps = [ "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/cloudprovider:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/volume/events:go_default_library", "//pkg/controller/volume/persistentvolume/metrics:go_default_library", @@ -56,8 +56,10 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/tools/reference:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -68,6 +70,7 @@ go_test( "delete_test.go", "framework_test.go", "index_test.go", + "main_test.go", "provision_test.go", "pv_controller_test.go", "recycle_test.go", @@ -80,6 +83,7 @@ go_test( "//pkg/api/testapi:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/controller:go_default_library", + "//pkg/features:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/recyclerclient:go_default_library", @@ -94,6 +98,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", @@ -104,7 +109,7 @@ go_test( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/tools/reference:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go index a23cd6faa0b6..19195e815236 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go @@ -158,13 +158,13 @@ func findMatchingVolume( volumeQty := volume.Spec.Capacity[v1.ResourceStorage] - // check if volumeModes do not match (Alpha and feature gate protected) - isMisMatch, err := checkVolumeModeMisMatches(&claim.Spec, &volume.Spec) + // check if volumeModes do not match (feature gate protected) + isMismatch, err := checkVolumeModeMismatches(&claim.Spec, &volume.Spec) if err != nil { return nil, fmt.Errorf("error checking if volumeMode was a mismatch: %v", err) } // filter out mismatching volumeModes - if isMisMatch { + if isMismatch { continue } @@ -211,11 +211,18 @@ func findMatchingVolume( } // filter out: + // - volumes in non-available phase // - volumes bound to another claim // - volumes whose labels don't match the claim's selector, if specified // - volumes in Class that is not requested // - volumes whose NodeAffinity does not match the node - if volume.Spec.ClaimRef != nil { + if volume.Status.Phase != v1.VolumeAvailable { + // We ignore volumes in non-available phase, because volumes that + // satisfies matching criteria will be updated to available, binding + // them now has high chance of encountering unnecessary failures + // due to API conflicts. + continue + } else if volume.Spec.ClaimRef != nil { continue } else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) { continue @@ -251,25 +258,24 @@ func findMatchingVolume( return nil, nil } -// checkVolumeModeMatches is a convenience method that checks volumeMode for PersistentVolume -// and PersistentVolumeClaims along with making sure that the Alpha feature gate BlockVolume is -// enabled. -// This is Alpha and could change in the future. -func checkVolumeModeMisMatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) (bool, error) { - if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - if pvSpec.VolumeMode != nil && pvcSpec.VolumeMode != nil { - requestedVolumeMode := *pvcSpec.VolumeMode - pvVolumeMode := *pvSpec.VolumeMode - return requestedVolumeMode != pvVolumeMode, nil - } else { - // This also should retrun an error, this means that - // the defaulting has failed. - return true, fmt.Errorf("api defaulting for volumeMode failed") - } - } else { - // feature gate is disabled +// checkVolumeModeMismatches is a convenience method that checks volumeMode for PersistentVolume +// and PersistentVolumeClaims +func checkVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) (bool, error) { + if !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { return false, nil } + + // In HA upgrades, we cannot guarantee that the apiserver is on a version >= controller-manager. + // So we default a nil volumeMode to filesystem + requestedVolumeMode := v1.PersistentVolumeFilesystem + if pvcSpec.VolumeMode != nil { + requestedVolumeMode = *pvcSpec.VolumeMode + } + pvVolumeMode := v1.PersistentVolumeFilesystem + if pvSpec.VolumeMode != nil { + pvVolumeMode = *pvSpec.VolumeMode + } + return requestedVolumeMode != pvVolumeMode, nil } // findBestMatchForClaim is a convenience method that finds a volume by the claim's AccessModes and requests for Storage diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/BUILD index e231fc8d5c4a..d686e4df42fe 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/BUILD @@ -1,18 +1,14 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["metrics.go"], importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics", + visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -27,4 +23,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/metrics.go index 2c26935b3a4c..1184378777c1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/metrics.go @@ -21,8 +21,8 @@ import ( "k8s.io/api/core/v1" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog" ) const ( @@ -139,7 +139,7 @@ func (collector *pvAndPVCCountCollector) pvCollect(ch chan<- prometheus.Metric) float64(number), storageClassName) if err != nil { - glog.Warningf("Create bound pv number metric failed: %v", err) + klog.Warningf("Create bound pv number metric failed: %v", err) continue } ch <- metric @@ -151,7 +151,7 @@ func (collector *pvAndPVCCountCollector) pvCollect(ch chan<- prometheus.Metric) float64(number), storageClassName) if err != nil { - glog.Warningf("Create unbound pv number metric failed: %v", err) + klog.Warningf("Create unbound pv number metric failed: %v", err) continue } ch <- metric @@ -179,7 +179,7 @@ func (collector *pvAndPVCCountCollector) pvcCollect(ch chan<- prometheus.Metric) float64(number), namespace) if err != nil { - glog.Warningf("Create bound pvc number metric failed: %v", err) + klog.Warningf("Create bound pvc number metric failed: %v", err) continue } ch <- metric @@ -191,7 +191,7 @@ func (collector *pvAndPVCCountCollector) pvcCollect(ch chan<- prometheus.Metric) float64(number), namespace) if err != nil { - glog.Warningf("Create unbound pvc number metric failed: %v", err) + klog.Warningf("Create unbound pvc number metric failed: %v", err) continue } ch <- metric diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go index a65ffb1b2b0b..e50da0ad4fae 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go @@ -37,8 +37,8 @@ import ( "k8s.io/client-go/tools/record" ref "k8s.io/client-go/tools/reference" "k8s.io/client-go/util/workqueue" + cloudprovider "k8s.io/cloud-provider" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller/volume/events" "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics" "k8s.io/kubernetes/pkg/features" @@ -48,7 +48,7 @@ import ( "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/recyclerclient" - "github.com/golang/glog" + "k8s.io/klog" ) // ================================================================== @@ -238,7 +238,7 @@ type PersistentVolumeController struct { // For easier readability, it was split into syncUnboundClaim and syncBoundClaim // methods. func (ctrl *PersistentVolumeController) syncClaim(claim *v1.PersistentVolumeClaim) error { - glog.V(4).Infof("synchronizing PersistentVolumeClaim[%s]: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim)) + klog.V(4).Infof("synchronizing PersistentVolumeClaim[%s]: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim)) if !metav1.HasAnnotation(claim.ObjectMeta, annBindCompleted) { return ctrl.syncUnboundClaim(claim) @@ -270,11 +270,11 @@ func checkVolumeSatisfyClaim(volume *v1.PersistentVolume, claim *v1.PersistentVo return fmt.Errorf("storageClassName does not match") } - isMisMatch, err := checkVolumeModeMisMatches(&claim.Spec, &volume.Spec) + isMismatch, err := checkVolumeModeMismatches(&claim.Spec, &volume.Spec) if err != nil { return fmt.Errorf("error checking volumeMode: %v", err) } - if isMisMatch { + if isMismatch { return fmt.Errorf("incompatible volumeMode") } @@ -330,11 +330,11 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol // [Unit test set 1] volume, err := ctrl.volumes.findBestMatchForClaim(claim, delayBinding) if err != nil { - glog.V(2).Infof("synchronizing unbound PersistentVolumeClaim[%s]: Error finding PV for claim: %v", claimToClaimKey(claim), err) + klog.V(2).Infof("synchronizing unbound PersistentVolumeClaim[%s]: Error finding PV for claim: %v", claimToClaimKey(claim), err) return fmt.Errorf("Error finding PV for claim %q: %v", claimToClaimKey(claim), err) } if volume == nil { - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: no volume found", claimToClaimKey(claim)) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: no volume found", claimToClaimKey(claim)) // No PV could be found // OBSERVATION: pvc is "Pending", will retry switch { @@ -358,7 +358,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol } else /* pv != nil */ { // Found a PV for this claim // OBSERVATION: pvc is "Pending", pv is "Available" - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), volume.Name, getVolumeStatusForLogging(volume)) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), volume.Name, getVolumeStatusForLogging(volume)) if err = ctrl.bind(volume, claim); err != nil { // On any error saving the volume or the claim, subsequent // syncClaim will finish the binding. @@ -370,7 +370,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol } else /* pvc.Spec.VolumeName != nil */ { // [Unit test set 2] // User asked for a specific PV. - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested", claimToClaimKey(claim), claim.Spec.VolumeName) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested", claimToClaimKey(claim), claim.Spec.VolumeName) obj, found, err := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName) if err != nil { return err @@ -379,7 +379,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol // User asked for a PV that does not exist. // OBSERVATION: pvc is "Pending" // Retry later. - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and not found, will try again next time", claimToClaimKey(claim), claim.Spec.VolumeName) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and not found, will try again next time", claimToClaimKey(claim), claim.Spec.VolumeName) if _, err = ctrl.updateClaimStatus(claim, v1.ClaimPending, nil); err != nil { return err } @@ -389,13 +389,13 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol if !ok { return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, obj) } - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume)) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume)) if volume.Spec.ClaimRef == nil { // User asked for a PV that is not claimed // OBSERVATION: pvc is "Pending", pv is "Available" - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume is unbound, binding", claimToClaimKey(claim)) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume is unbound, binding", claimToClaimKey(claim)) if err = checkVolumeSatisfyClaim(volume, claim); err != nil { - glog.V(4).Infof("Can't bind the claim to volume %q: %v", volume.Name, err) + klog.V(4).Infof("Can't bind the claim to volume %q: %v", volume.Name, err) //send an event msg := fmt.Sprintf("Cannot bind to requested volume %q: %s", volume.Name, err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.VolumeMismatch, msg) @@ -413,7 +413,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol } else if isVolumeBoundToClaim(volume, claim) { // User asked for a PV that is claimed by this PVC // OBSERVATION: pvc is "Pending", pv is "Bound" - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound, finishing the binding", claimToClaimKey(claim)) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound, finishing the binding", claimToClaimKey(claim)) // Finish the volume binding by adding claim UID. if err = ctrl.bind(volume, claim); err != nil { @@ -425,7 +425,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol // User asked for a PV that is claimed by someone else // OBSERVATION: pvc is "Pending", pv is "Bound" if !metav1.HasAnnotation(claim.ObjectMeta, annBoundByController) { - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim by user, will retry later", claimToClaimKey(claim)) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim by user, will retry later", claimToClaimKey(claim)) // User asked for a specific PV, retry later if _, err = ctrl.updateClaimStatus(claim, v1.ClaimPending, nil); err != nil { return err @@ -434,7 +434,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol } else { // This should never happen because someone had to remove // annBindCompleted annotation on the claim. - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim %q by controller, THIS SHOULD NEVER HAPPEN", claimToClaimKey(claim), claimrefToClaimKey(volume.Spec.ClaimRef)) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim %q by controller, THIS SHOULD NEVER HAPPEN", claimToClaimKey(claim), claimrefToClaimKey(volume.Spec.ClaimRef)) return fmt.Errorf("Invalid binding of claim %q to volume %q: volume already claimed by %q", claimToClaimKey(claim), claim.Spec.VolumeName, claimrefToClaimKey(volume.Spec.ClaimRef)) } } @@ -472,13 +472,13 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolum return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %#v", claim.Spec.VolumeName, obj) } - glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume)) + klog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume)) if volume.Spec.ClaimRef == nil { // Claim is bound but volume has come unbound. // Or, a claim was bound and the controller has not received updated // volume yet. We can't distinguish these cases. // Bind the volume again and set all states to Bound. - glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume is unbound, fixing", claimToClaimKey(claim)) + klog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume is unbound, fixing", claimToClaimKey(claim)) if err = ctrl.bind(volume, claim); err != nil { // Objects not saved, next syncPV or syncClaim will try again return err @@ -489,7 +489,7 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolum // NOTE: syncPV can handle this so it can be left out. // NOTE: bind() call here will do nothing in most cases as // everything should be already set. - glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: claim is already correctly bound", claimToClaimKey(claim)) + klog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: claim is already correctly bound", claimToClaimKey(claim)) if err = ctrl.bind(volume, claim); err != nil { // Objects not saved, next syncPV or syncClaim will try again return err @@ -512,12 +512,12 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolum // created, updated or periodically synced. We do not differentiate between // these events. func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) error { - glog.V(4).Infof("synchronizing PersistentVolume[%s]: %s", volume.Name, getVolumeStatusForLogging(volume)) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: %s", volume.Name, getVolumeStatusForLogging(volume)) // [Unit test set 4] if volume.Spec.ClaimRef == nil { // Volume is unused - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is unused", volume.Name) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is unused", volume.Name) if _, err := ctrl.updateVolumePhase(volume, v1.VolumeAvailable, ""); err != nil { // Nothing was saved; we will fall back into the same // condition in the next call to this method @@ -529,7 +529,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) if volume.Spec.ClaimRef.UID == "" { // The PV is reserved for a PVC; that PVC has not yet been // bound to this PV; the PVC sync will handle it. - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is pre-bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is pre-bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) if _, err := ctrl.updateVolumePhase(volume, v1.VolumeAvailable, ""); err != nil { // Nothing was saved; we will fall back into the same // condition in the next call to this method @@ -537,7 +537,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) } return nil } - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) // Get the PVC by _name_ var claim *v1.PersistentVolumeClaim claimName := claimrefToClaimKey(volume.Spec.ClaimRef) @@ -570,7 +570,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) } } if !found { - glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s not found", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s not found", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) // Fall through with claim = nil } else { var ok bool @@ -578,12 +578,12 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) if !ok { return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %#v", claim.Spec.VolumeName, obj) } - glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s found: %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef), getClaimStatusForLogging(claim)) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s found: %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef), getClaimStatusForLogging(claim)) } if claim != nil && claim.UID != volume.Spec.ClaimRef.UID { // The claim that the PV was pointing to was deleted, and another // with the same name created. - glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s has different UID, the old one must have been deleted", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s has different UID, the old one must have been deleted", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) // Treat the volume as bound to a missing claim. claim = nil } @@ -598,7 +598,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) // volume. if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed { // Also, log this only once: - glog.V(2).Infof("volume %q is released and reclaim policy %q will be executed", volume.Name, volume.Spec.PersistentVolumeReclaimPolicy) + klog.V(2).Infof("volume %q is released and reclaim policy %q will be executed", volume.Name, volume.Spec.PersistentVolumeReclaimPolicy) if volume, err = ctrl.updateVolumePhase(volume, v1.VolumeReleased, ""); err != nil { // Nothing was saved; we will fall back into the same condition // in the next call to this method @@ -613,7 +613,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) } return nil } else if claim.Spec.VolumeName == "" { - if isMisMatch, err := checkVolumeModeMisMatches(&claim.Spec, &volume.Spec); err != nil || isMisMatch { + if isMismatch, err := checkVolumeModeMismatches(&claim.Spec, &volume.Spec); err != nil || isMismatch { // Binding for the volume won't be called in syncUnboundClaim, // because findBestMatchForClaim won't return the volume due to volumeMode mismatch. volumeMsg := fmt.Sprintf("Cannot bind PersistentVolume to requested PersistentVolumeClaim %q due to incompatible volumeMode.", claim.Name) @@ -626,10 +626,10 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) if metav1.HasAnnotation(volume.ObjectMeta, annBoundByController) { // The binding is not completed; let PVC sync handle it - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume not bound yet, waiting for syncClaim to fix it", volume.Name) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume not bound yet, waiting for syncClaim to fix it", volume.Name) } else { // Dangling PV; try to re-establish the link in the PVC sync - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume was bound and got unbound (by user?), waiting for syncClaim to fix it", volume.Name) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume was bound and got unbound (by user?), waiting for syncClaim to fix it", volume.Name) } // In both cases, the volume is Bound and the claim is Pending. // Next syncClaim will fix it. To speed it up, we enqueue the claim @@ -642,7 +642,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) return nil } else if claim.Spec.VolumeName == volume.Name { // Volume is bound to a claim properly, update status if necessary - glog.V(4).Infof("synchronizing PersistentVolume[%s]: all is bound", volume.Name) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: all is bound", volume.Name) if _, err = ctrl.updateVolumePhase(volume, v1.VolumeBound, ""); err != nil { // Nothing was saved; we will fall back into the same // condition in the next call to this method @@ -659,7 +659,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) // the user know. Don't overwrite existing Failed status! if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed { // Also, log this only once: - glog.V(2).Infof("dynamically volume %q is released and it will be deleted", volume.Name) + klog.V(2).Infof("dynamically volume %q is released and it will be deleted", volume.Name) if volume, err = ctrl.updateVolumePhase(volume, v1.VolumeReleased, ""); err != nil { // Nothing was saved; we will fall back into the same condition // in the next call to this method @@ -679,14 +679,14 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) // This is part of the normal operation of the controller; the // controller tried to use this volume for a claim but the claim // was fulfilled by another volume. We did this; fix it. - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by controller to a claim that is bound to another volume, unbinding", volume.Name) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by controller to a claim that is bound to another volume, unbinding", volume.Name) if err = ctrl.unbindVolume(volume); err != nil { return err } return nil } else { // The PV must have been created with this ptr; leave it alone. - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by user to a claim that is bound to another volume, waiting for the claim to get unbound", volume.Name) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by user to a claim that is bound to another volume, waiting for the claim to get unbound", volume.Name) // This just updates the volume phase and clears // volume.Spec.ClaimRef.UID. It leaves the volume pre-bound // to the claim. @@ -706,7 +706,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) // phase - phase to set // volume - volume which Capacity is set into claim.Status.Capacity func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) { - glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s", claimToClaimKey(claim), phase) + klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s", claimToClaimKey(claim), phase) dirty := false @@ -751,21 +751,21 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo if !dirty { // Nothing to do. - glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: phase %s already set", claimToClaimKey(claim), phase) + klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: phase %s already set", claimToClaimKey(claim), phase) return claim, nil } newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(claimClone) if err != nil { - glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err) + klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err) return newClaim, err } _, err = ctrl.storeClaimUpdate(newClaim) if err != nil { - glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: cannot update internal cache: %v", claimToClaimKey(claim), err) + klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: cannot update internal cache: %v", claimToClaimKey(claim), err) return newClaim, err } - glog.V(2).Infof("claim %q entered phase %q", claimToClaimKey(claim), phase) + klog.V(2).Infof("claim %q entered phase %q", claimToClaimKey(claim), phase) return newClaim, nil } @@ -778,10 +778,10 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo // volume - volume which Capacity is set into claim.Status.Capacity // eventtype, reason, message - event to send, see EventRecorder.Event() func (ctrl *PersistentVolumeController) updateClaimStatusWithEvent(claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume, eventtype, reason, message string) (*v1.PersistentVolumeClaim, error) { - glog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: set phase %s", claimToClaimKey(claim), phase) + klog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: set phase %s", claimToClaimKey(claim), phase) if claim.Status.Phase == phase { // Nothing to do. - glog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: phase %s already set", claimToClaimKey(claim), phase) + klog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: phase %s already set", claimToClaimKey(claim), phase) return claim, nil } @@ -792,7 +792,7 @@ func (ctrl *PersistentVolumeController) updateClaimStatusWithEvent(claim *v1.Per // Emit the event only when the status change happens, not every time // syncClaim is called. - glog.V(3).Infof("claim %q changed status to %q: %s", claimToClaimKey(claim), phase, message) + klog.V(3).Infof("claim %q changed status to %q: %s", claimToClaimKey(claim), phase, message) ctrl.eventRecorder.Event(newClaim, eventtype, reason, message) return newClaim, nil @@ -800,10 +800,10 @@ func (ctrl *PersistentVolumeController) updateClaimStatusWithEvent(claim *v1.Per // updateVolumePhase saves new volume phase to API server. func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentVolume, phase v1.PersistentVolumePhase, message string) (*v1.PersistentVolume, error) { - glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s", volume.Name, phase) + klog.V(4).Infof("updating PersistentVolume[%s]: set phase %s", volume.Name, phase) if volume.Status.Phase == phase { // Nothing to do. - glog.V(4).Infof("updating PersistentVolume[%s]: phase %s already set", volume.Name, phase) + klog.V(4).Infof("updating PersistentVolume[%s]: phase %s already set", volume.Name, phase) return volume, nil } @@ -813,15 +813,15 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(volumeClone) if err != nil { - glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err) + klog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err) return newVol, err } _, err = ctrl.storeVolumeUpdate(newVol) if err != nil { - glog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volume.Name, err) + klog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volume.Name, err) return newVol, err } - glog.V(2).Infof("volume %q entered phase %q", volume.Name, phase) + klog.V(2).Infof("volume %q entered phase %q", volume.Name, phase) return newVol, err } @@ -829,10 +829,10 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV // given event on the volume. It saves the phase and emits the event only when // the phase has actually changed from the version saved in API server. func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *v1.PersistentVolume, phase v1.PersistentVolumePhase, eventtype, reason, message string) (*v1.PersistentVolume, error) { - glog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: set phase %s", volume.Name, phase) + klog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: set phase %s", volume.Name, phase) if volume.Status.Phase == phase { // Nothing to do. - glog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: phase %s already set", volume.Name, phase) + klog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: phase %s already set", volume.Name, phase) return volume, nil } @@ -843,7 +843,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *v1.Pe // Emit the event only when the status change happens, not every time // syncClaim is called. - glog.V(3).Infof("volume %q changed status to %q: %s", volume.Name, phase, message) + klog.V(3).Infof("volume %q changed status to %q: %s", volume.Name, phase, message) ctrl.eventRecorder.Event(newVol, eventtype, reason, message) return newVol, nil @@ -852,7 +852,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *v1.Pe // bindVolumeToClaim modifies given volume to be bound to a claim and saves it to // API server. The claim is not modified in this method! func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) { - glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q", volume.Name, claimToClaimKey(claim)) + klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q", volume.Name, claimToClaimKey(claim)) volumeClone, dirty, err := ctrl.getBindVolumeToClaim(volume, claim) if err != nil { @@ -864,27 +864,27 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV return ctrl.updateBindVolumeToClaim(volumeClone, claim, true) } - glog.V(4).Infof("updating PersistentVolume[%s]: already bound to %q", volume.Name, claimToClaimKey(claim)) + klog.V(4).Infof("updating PersistentVolume[%s]: already bound to %q", volume.Name, claimToClaimKey(claim)) return volume, nil } // bindVolumeToClaim modifies given volume to be bound to a claim and saves it to // API server. The claim is not modified in this method! func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.PersistentVolume, claim *v1.PersistentVolumeClaim, updateCache bool) (*v1.PersistentVolume, error) { - glog.V(2).Infof("claim %q bound to volume %q", claimToClaimKey(claim), volumeClone.Name) + klog.V(2).Infof("claim %q bound to volume %q", claimToClaimKey(claim), volumeClone.Name) newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone) if err != nil { - glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimToClaimKey(claim), err) + klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimToClaimKey(claim), err) return newVol, err } if updateCache { _, err = ctrl.storeVolumeUpdate(newVol) if err != nil { - glog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volumeClone.Name, err) + klog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volumeClone.Name, err) return newVol, err } } - glog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", newVol.Name, claimToClaimKey(claim)) + klog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", newVol.Name, claimToClaimKey(claim)) return newVol, nil } @@ -928,7 +928,7 @@ func (ctrl *PersistentVolumeController) getBindVolumeToClaim(volume *v1.Persiste // bindClaimToVolume modifies the given claim to be bound to a volume and // saves it to API server. The volume is not modified in this method! func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) { - glog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q", claimToClaimKey(claim), volume.Name) + klog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q", claimToClaimKey(claim), volume.Name) dirty := false @@ -960,22 +960,22 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVo } if dirty { - glog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) + klog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claimClone) if err != nil { - glog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err) + klog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err) return newClaim, err } _, err = ctrl.storeClaimUpdate(newClaim) if err != nil { - glog.V(4).Infof("updating PersistentVolumeClaim[%s]: cannot update internal cache: %v", claimToClaimKey(claim), err) + klog.V(4).Infof("updating PersistentVolumeClaim[%s]: cannot update internal cache: %v", claimToClaimKey(claim), err) return newClaim, err } - glog.V(4).Infof("updating PersistentVolumeClaim[%s]: bound to %q", claimToClaimKey(claim), volume.Name) + klog.V(4).Infof("updating PersistentVolumeClaim[%s]: bound to %q", claimToClaimKey(claim), volume.Name) return newClaim, nil } - glog.V(4).Infof("updating PersistentVolumeClaim[%s]: already bound to %q", claimToClaimKey(claim), volume.Name) + klog.V(4).Infof("updating PersistentVolumeClaim[%s]: already bound to %q", claimToClaimKey(claim), volume.Name) return claim, nil } @@ -990,35 +990,35 @@ func (ctrl *PersistentVolumeController) bind(volume *v1.PersistentVolume, claim var updatedClaim *v1.PersistentVolumeClaim var updatedVolume *v1.PersistentVolume - glog.V(4).Infof("binding volume %q to claim %q", volume.Name, claimToClaimKey(claim)) + klog.V(4).Infof("binding volume %q to claim %q", volume.Name, claimToClaimKey(claim)) if updatedVolume, err = ctrl.bindVolumeToClaim(volume, claim); err != nil { - glog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume: %v", volume.Name, claimToClaimKey(claim), err) + klog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume: %v", volume.Name, claimToClaimKey(claim), err) return err } volume = updatedVolume if updatedVolume, err = ctrl.updateVolumePhase(volume, v1.VolumeBound, ""); err != nil { - glog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume status: %v", volume.Name, claimToClaimKey(claim), err) + klog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume status: %v", volume.Name, claimToClaimKey(claim), err) return err } volume = updatedVolume if updatedClaim, err = ctrl.bindClaimToVolume(claim, volume); err != nil { - glog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim: %v", volume.Name, claimToClaimKey(claim), err) + klog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim: %v", volume.Name, claimToClaimKey(claim), err) return err } claim = updatedClaim if updatedClaim, err = ctrl.updateClaimStatus(claim, v1.ClaimBound, volume); err != nil { - glog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim status: %v", volume.Name, claimToClaimKey(claim), err) + klog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim status: %v", volume.Name, claimToClaimKey(claim), err) return err } claim = updatedClaim - glog.V(4).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) - glog.V(4).Infof("volume %q status after binding: %s", volume.Name, getVolumeStatusForLogging(volume)) - glog.V(4).Infof("claim %q status after binding: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim)) + klog.V(4).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) + klog.V(4).Infof("volume %q status after binding: %s", volume.Name, getVolumeStatusForLogging(volume)) + klog.V(4).Infof("claim %q status after binding: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim)) return nil } @@ -1029,7 +1029,7 @@ func (ctrl *PersistentVolumeController) bind(volume *v1.PersistentVolume, claim // It returns on first error, it's up to the caller to implement some retry // mechanism. func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume) error { - glog.V(4).Infof("updating PersistentVolume[%s]: rolling back binding from %q", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + klog.V(4).Infof("updating PersistentVolume[%s]: rolling back binding from %q", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) // Save the PV only when any modification is necessary. volumeClone := volume.DeepCopy() @@ -1050,15 +1050,15 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone) if err != nil { - glog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err) + klog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err) return err } _, err = ctrl.storeVolumeUpdate(newVol) if err != nil { - glog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volume.Name, err) + klog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volume.Name, err) return err } - glog.V(4).Infof("updating PersistentVolume[%s]: rolled back", newVol.Name) + klog.V(4).Infof("updating PersistentVolume[%s]: rolled back", newVol.Name) // Update the status _, err = ctrl.updateVolumePhase(newVol, v1.VolumeAvailable, "") @@ -1070,10 +1070,10 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolume) error { switch volume.Spec.PersistentVolumeReclaimPolicy { case v1.PersistentVolumeReclaimRetain: - glog.V(4).Infof("reclaimVolume[%s]: policy is Retain, nothing to do", volume.Name) + klog.V(4).Infof("reclaimVolume[%s]: policy is Retain, nothing to do", volume.Name) case v1.PersistentVolumeReclaimRecycle: - glog.V(4).Infof("reclaimVolume[%s]: policy is Recycle", volume.Name) + klog.V(4).Infof("reclaimVolume[%s]: policy is Recycle", volume.Name) opName := fmt.Sprintf("recycle-%s[%s]", volume.Name, string(volume.UID)) ctrl.scheduleOperation(opName, func() error { ctrl.recycleVolumeOperation(volume) @@ -1081,7 +1081,7 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolum }) case v1.PersistentVolumeReclaimDelete: - glog.V(4).Infof("reclaimVolume[%s]: policy is Delete", volume.Name) + klog.V(4).Infof("reclaimVolume[%s]: policy is Delete", volume.Name) opName := fmt.Sprintf("delete-%s[%s]", volume.Name, string(volume.UID)) startTime := time.Now() ctrl.scheduleOperation(opName, func() error { @@ -1103,33 +1103,45 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolum // doRerecycleVolumeOperationcycleVolume recycles a volume. This method is // running in standalone goroutine and already has all necessary locks. func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.PersistentVolume) { - glog.V(4).Infof("recycleVolumeOperation [%s] started", volume.Name) + klog.V(4).Infof("recycleVolumeOperation [%s] started", volume.Name) // This method may have been waiting for a volume lock for some time. // Previous recycleVolumeOperation might just have saved an updated version, // so read current volume state now. newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) if err != nil { - glog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err) + klog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err) return } needsReclaim, err := ctrl.isVolumeReleased(newVolume) if err != nil { - glog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err) + klog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err) return } if !needsReclaim { - glog.V(3).Infof("volume %q no longer needs recycling, skipping", volume.Name) + klog.V(3).Infof("volume %q no longer needs recycling, skipping", volume.Name) return } pods, used, err := ctrl.isVolumeUsed(newVolume) if err != nil { - glog.V(3).Infof("can't recycle volume %q: %v", volume.Name, err) + klog.V(3).Infof("can't recycle volume %q: %v", volume.Name, err) return } - if used { + + // Verify the claim is in cache: if so, then it is a different PVC with the same name + // since the volume is known to be released at this moment. Ths new (cached) PVC must use + // a different PV -- we checked that the PV is unused in isVolumeReleased. + // So the old PV is safe to be recycled. + claimName := claimrefToClaimKey(volume.Spec.ClaimRef) + _, claimCached, err := ctrl.claims.GetByKey(claimName) + if err != nil { + klog.V(3).Infof("error getting the claim %s from cache", claimName) + return + } + + if used && !claimCached { msg := fmt.Sprintf("Volume is used by pods: %s", strings.Join(pods, ",")) - glog.V(3).Infof("can't recycle volume %q: %s", volume.Name, msg) + klog.V(3).Infof("can't recycle volume %q: %s", volume.Name, msg) ctrl.eventRecorder.Event(volume, v1.EventTypeNormal, events.VolumeFailedRecycle, msg) return } @@ -1144,7 +1156,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis if err != nil { // No recycler found. Emit an event and mark the volume Failed. if _, err = ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedRecycle, "No recycler plugin found for the volume!"); err != nil { - glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + klog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) // Save failed, retry on the next deletion attempt return } @@ -1160,7 +1172,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis // Recycler failed strerr := fmt.Sprintf("Recycle failed: %s", err) if _, err = ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedRecycle, strerr); err != nil { - glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + klog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) // Save failed, retry on the next deletion attempt return } @@ -1169,7 +1181,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis return } - glog.V(2).Infof("volume %q recycled", volume.Name) + klog.V(2).Infof("volume %q recycled", volume.Name) // Send an event ctrl.eventRecorder.Event(volume, v1.EventTypeNormal, events.VolumeRecycled, "Volume recycled") // Make the volume available again @@ -1178,7 +1190,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis // recycle the volume again on next update. We _could_ maintain a cache // of "recently recycled volumes" and avoid unnecessary recycling, this // is left out as future optimization. - glog.V(3).Infof("recycleVolumeOperation [%s]: failed to make recycled volume 'Available' (%v), we will recycle the volume again", volume.Name, err) + klog.V(3).Infof("recycleVolumeOperation [%s]: failed to make recycled volume 'Available' (%v), we will recycle the volume again", volume.Name, err) return } return @@ -1187,30 +1199,30 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis // deleteVolumeOperation deletes a volume. This method is running in standalone // goroutine and already has all necessary locks. func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.PersistentVolume) (string, error) { - glog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name) + klog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name) // This method may have been waiting for a volume lock for some time. // Previous deleteVolumeOperation might just have saved an updated version, so // read current volume state now. newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) if err != nil { - glog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err) + klog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err) return "", nil } needsReclaim, err := ctrl.isVolumeReleased(newVolume) if err != nil { - glog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err) + klog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err) return "", nil } if !needsReclaim { - glog.V(3).Infof("volume %q no longer needs deletion, skipping", volume.Name) + klog.V(3).Infof("volume %q no longer needs deletion, skipping", volume.Name) return "", nil } pluginName, deleted, err := ctrl.doDeleteVolume(volume) if err != nil { // Delete failed, update the volume and emit an event. - glog.V(3).Infof("deletion of volume %q failed: %v", volume.Name, err) + klog.V(3).Infof("deletion of volume %q failed: %v", volume.Name, err) if vol.IsDeletedVolumeInUse(err) { // The plugin needs more time, don't mark the volume as Failed // and send Normal event only @@ -1219,7 +1231,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist // The plugin failed, mark the volume as Failed and send Warning // event if _, err := ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedDelete, err.Error()); err != nil { - glog.V(4).Infof("deleteVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + klog.V(4).Infof("deleteVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) // Save failed, retry on the next deletion attempt return pluginName, err } @@ -1234,14 +1246,14 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist return pluginName, nil } - glog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name) + klog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name) // Delete the volume if err = ctrl.kubeClient.CoreV1().PersistentVolumes().Delete(volume.Name, nil); err != nil { // Oops, could not delete the volume and therefore the controller will // try to delete the volume again on next update. We _could_ maintain a // cache of "recently deleted volumes" and avoid unnecessary deletion, // this is left out as future optimization. - glog.V(3).Infof("failed to delete volume %q from database: %v", volume.Name, err) + klog.V(3).Infof("failed to delete volume %q from database: %v", volume.Name, err) return pluginName, nil } return pluginName, nil @@ -1254,13 +1266,13 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *v1.PersistentVo // A volume needs reclaim if it has ClaimRef and appropriate claim does not // exist. if volume.Spec.ClaimRef == nil { - glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is nil", volume.Name) + klog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is nil", volume.Name) return false, nil } if volume.Spec.ClaimRef.UID == "" { // This is a volume bound by user and the controller has not finished // binding to the real claim yet. - glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is not bound", volume.Name) + klog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is not bound", volume.Name) return false, nil } @@ -1287,11 +1299,11 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *v1.PersistentVo return true, nil } - glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is still valid, volume is not released", volume.Name) + klog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is still valid, volume is not released", volume.Name) return false, nil } - glog.V(2).Infof("isVolumeReleased[%s]: volume is released", volume.Name) + klog.V(2).Infof("isVolumeReleased[%s]: volume is released", volume.Name) return true, nil } @@ -1326,7 +1338,7 @@ func (ctrl *PersistentVolumeController) isVolumeUsed(pv *v1.PersistentVolume) ([ // 'false' when the volume cannot be deleted because of the deleter is external. No // error should be reported in this case. func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolume) (string, bool, error) { - glog.V(4).Infof("doDeleteVolume [%s]", volume.Name) + klog.V(4).Infof("doDeleteVolume [%s]", volume.Name) var err error plugin, err := ctrl.findDeletablePlugin(volume) @@ -1335,13 +1347,13 @@ func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolu } if plugin == nil { // External deleter is requested, do nothing - glog.V(3).Infof("external deleter for volume %q requested, ignoring", volume.Name) + klog.V(3).Infof("external deleter for volume %q requested, ignoring", volume.Name) return "", false, nil } // Plugin found pluginName := plugin.GetPluginName() - glog.V(5).Infof("found a deleter plugin %q for volume %q", pluginName, volume.Name) + klog.V(5).Infof("found a deleter plugin %q for volume %q", pluginName, volume.Name) spec := vol.NewSpecFromPersistentVolume(volume, false) deleter, err := plugin.NewDeleter(spec) if err != nil { @@ -1357,7 +1369,7 @@ func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolu return pluginName, false, err } - glog.V(2).Infof("volume %q deleted", volume.Name) + klog.V(2).Infof("volume %q deleted", volume.Name) return pluginName, true, nil } @@ -1367,7 +1379,7 @@ func (ctrl *PersistentVolumeController) provisionClaim(claim *v1.PersistentVolum if !ctrl.enableDynamicProvisioning { return nil } - glog.V(4).Infof("provisionClaim[%s]: started", claimToClaimKey(claim)) + klog.V(4).Infof("provisionClaim[%s]: started", claimToClaimKey(claim)) opName := fmt.Sprintf("provision-%s[%s]", claimToClaimKey(claim), string(claim.UID)) startTime := time.Now() ctrl.scheduleOperation(opName, func() error { @@ -1383,12 +1395,12 @@ func (ctrl *PersistentVolumeController) provisionClaim(claim *v1.PersistentVolum // standalone goroutine and already has all necessary locks. func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.PersistentVolumeClaim) (string, error) { claimClass := v1helper.GetPersistentVolumeClaimClass(claim) - glog.V(4).Infof("provisionClaimOperation [%s] started, class: %q", claimToClaimKey(claim), claimClass) + klog.V(4).Infof("provisionClaimOperation [%s] started, class: %q", claimToClaimKey(claim), claimClass) plugin, storageClass, err := ctrl.findProvisionablePlugin(claim) if err != nil { ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, err.Error()) - glog.V(2).Infof("error finding provisioning plugin for claim %s: %v", claimToClaimKey(claim), err) + klog.V(2).Infof("error finding provisioning plugin for claim %s: %v", claimToClaimKey(claim), err) // The controller will retry provisioning the volume in every // syncVolume() call. return "", err @@ -1403,7 +1415,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis newClaim, err := ctrl.setClaimProvisioner(claim, storageClass) if err != nil { // Save failed, the controller will retry in the next sync - glog.V(2).Infof("error saving claim %s: %v", claimToClaimKey(claim), err) + klog.V(2).Infof("error saving claim %s: %v", claimToClaimKey(claim), err) return pluginName, err } claim = newClaim @@ -1414,7 +1426,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis // and wait for the external provisioner msg := fmt.Sprintf("waiting for a volume to be created, either by external provisioner %q or manually created by system administrator", storageClass.Provisioner) ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.ExternalProvisioning, msg) - glog.V(3).Infof("provisioning claim %q: %s", claimToClaimKey(claim), msg) + klog.V(3).Infof("provisioning claim %q: %s", claimToClaimKey(claim), msg) return pluginName, nil } @@ -1428,7 +1440,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) if err == nil && volume != nil { // Volume has been already provisioned, nothing to do. - glog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim)) + klog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim)) return pluginName, err } @@ -1436,7 +1448,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis // provisioned) claimRef, err := ref.GetReference(scheme.Scheme, claim) if err != nil { - glog.V(3).Infof("unexpected error getting claim reference: %v", err) + klog.V(3).Infof("unexpected error getting claim reference: %v", err) return pluginName, err } @@ -1460,7 +1472,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis // of PV would be rejected by validation anyway if !plugin.SupportsMountOption() && len(options.MountOptions) > 0 { strerr := fmt.Sprintf("Mount options are not supported by the provisioner but StorageClass %q has mount options %v", storageClass.Name, options.MountOptions) - glog.V(2).Infof("Mount options are not supported by the provisioner but claim %q's StorageClass %q has mount options %v", claimToClaimKey(claim), storageClass.Name, options.MountOptions) + klog.V(2).Infof("Mount options are not supported by the provisioner but claim %q's StorageClass %q has mount options %v", claimToClaimKey(claim), storageClass.Name, options.MountOptions) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) return pluginName, fmt.Errorf("provisioner %q doesn't support mount options", plugin.GetPluginName()) } @@ -1469,7 +1481,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis provisioner, err := plugin.NewProvisioner(options) if err != nil { strerr := fmt.Sprintf("Failed to create provisioner: %v", err) - glog.V(2).Infof("failed to create provisioner for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) + klog.V(2).Infof("failed to create provisioner for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) return pluginName, err } @@ -1479,7 +1491,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis selectedNode, err = ctrl.NodeLister.Get(nodeName) if err != nil { strerr := fmt.Sprintf("Failed to get target node: %v", err) - glog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err) + klog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) return pluginName, err } @@ -1496,12 +1508,12 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis ctrl.rescheduleProvisioning(claim) strerr := fmt.Sprintf("Failed to provision volume with StorageClass %q: %v", storageClass.Name, err) - glog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) + klog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) return pluginName, err } - glog.V(3).Infof("volume %q for claim %q created", volume.Name, claimToClaimKey(claim)) + klog.V(3).Infof("volume %q for claim %q created", volume.Name, claimToClaimKey(claim)) // Create Kubernetes PV object for the volume. if volume.Name == "" { @@ -1518,26 +1530,26 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis // Try to create the PV object several times for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { - glog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name) + klog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name) var newVol *v1.PersistentVolume if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) { // Save succeeded. if err != nil { - glog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim)) + klog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim)) err = nil } else { - glog.V(3).Infof("volume %q for claim %q saved", volume.Name, claimToClaimKey(claim)) + klog.V(3).Infof("volume %q for claim %q saved", volume.Name, claimToClaimKey(claim)) _, updateErr := ctrl.storeVolumeUpdate(newVol) if updateErr != nil { // We will get an "volume added" event soon, this is not a big error - glog.V(4).Infof("provisionClaimOperation [%s]: cannot update internal cache: %v", volume.Name, updateErr) + klog.V(4).Infof("provisionClaimOperation [%s]: cannot update internal cache: %v", volume.Name, updateErr) } } break } // Save failed, try again after a while. - glog.V(3).Infof("failed to save volume %q for claim %q: %v", volume.Name, claimToClaimKey(claim), err) + klog.V(3).Infof("failed to save volume %q for claim %q: %v", volume.Name, claimToClaimKey(claim), err) time.Sleep(ctrl.createProvisionedPVInterval) } @@ -1547,7 +1559,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis // Emit some event here and try to delete the storage asset several // times. strerr := fmt.Sprintf("Error creating provisioned PV object for claim %s: %v. Deleting the volume.", claimToClaimKey(claim), err) - glog.V(3).Info(strerr) + klog.V(3).Info(strerr) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) var deleteErr error @@ -1556,18 +1568,18 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis _, deleted, deleteErr = ctrl.doDeleteVolume(volume) if deleteErr == nil && deleted { // Delete succeeded - glog.V(4).Infof("provisionClaimOperation [%s]: cleaning volume %s succeeded", claimToClaimKey(claim), volume.Name) + klog.V(4).Infof("provisionClaimOperation [%s]: cleaning volume %s succeeded", claimToClaimKey(claim), volume.Name) break } if !deleted { // This is unreachable code, the volume was provisioned by an // internal plugin and therefore there MUST be an internal // plugin that deletes it. - glog.Errorf("Error finding internal deleter for volume plugin %q", plugin.GetPluginName()) + klog.Errorf("Error finding internal deleter for volume plugin %q", plugin.GetPluginName()) break } // Delete failed, try again after a while. - glog.V(3).Infof("failed to delete volume %q: %v", volume.Name, deleteErr) + klog.V(3).Infof("failed to delete volume %q: %v", volume.Name, deleteErr) time.Sleep(ctrl.createProvisionedPVInterval) } @@ -1575,11 +1587,11 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis // Delete failed several times. There is an orphaned volume and there // is nothing we can do about it. strerr := fmt.Sprintf("Error cleaning provisioned volume for claim %s: %v. Please delete manually.", claimToClaimKey(claim), deleteErr) - glog.V(2).Info(strerr) + klog.V(2).Info(strerr) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningCleanupFailed, strerr) } } else { - glog.V(2).Infof("volume %q provisioned for claim %q", volume.Name, claimToClaimKey(claim)) + klog.V(2).Infof("volume %q provisioned for claim %q", volume.Name, claimToClaimKey(claim)) msg := fmt.Sprintf("Successfully provisioned volume %s using %s", volume.Name, plugin.GetPluginName()) ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.ProvisioningSucceeded, msg) } @@ -1600,12 +1612,12 @@ func (ctrl *PersistentVolumeController) rescheduleProvisioning(claim *v1.Persist delete(newClaim.Annotations, annSelectedNode) // Try to update the PVC object if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(newClaim); err != nil { - glog.V(4).Infof("Failed to delete annotation 'annSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err) + klog.V(4).Infof("Failed to delete annotation 'annSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err) return } if _, err := ctrl.storeClaimUpdate(newClaim); err != nil { // We will get an "claim updated" event soon, this is not a big error - glog.V(4).Infof("Updating PersistentVolumeClaim %q: cannot update internal cache: %v", claimToClaimKey(newClaim), err) + klog.V(4).Infof("Updating PersistentVolumeClaim %q: cannot update internal cache: %v", claimToClaimKey(newClaim), err) } } @@ -1618,7 +1630,7 @@ func (ctrl *PersistentVolumeController) getProvisionedVolumeNameForClaim(claim * // scheduleOperation starts given asynchronous operation on given volume. It // makes sure the operation is already not running. func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, operation func() error) { - glog.V(4).Infof("scheduleOperation[%s]", operationName) + klog.V(4).Infof("scheduleOperation[%s]", operationName) // Poke test code that an operation is just about to get started. if ctrl.preOperationHook != nil { @@ -1629,11 +1641,11 @@ func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, if err != nil { switch { case goroutinemap.IsAlreadyExists(err): - glog.V(4).Infof("operation %q is already running, skipping", operationName) + klog.V(4).Infof("operation %q is already running, skipping", operationName) case exponentialbackoff.IsExponentialBackoff(err): - glog.V(4).Infof("operation %q postponed due to exponential backoff", operationName) + klog.V(4).Infof("operation %q postponed due to exponential backoff", operationName) default: - glog.Errorf("error scheduling operation %q: %v", operationName, err) + klog.Errorf("error scheduling operation %q: %v", operationName, err) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go index 40e788248438..e83f975da34a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go @@ -38,13 +38,13 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics" "k8s.io/kubernetes/pkg/util/goroutinemap" vol "k8s.io/kubernetes/pkg/volume" - "github.com/golang/glog" + "k8s.io/klog" ) // This file contains the controller base functionality, i.e. framework to @@ -73,18 +73,18 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error) eventRecorder := p.EventRecorder if eventRecorder == nil { broadcaster := record.NewBroadcaster() - broadcaster.StartLogging(glog.Infof) + broadcaster.StartLogging(klog.Infof) broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: p.KubeClient.CoreV1().Events("")}) eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"}) } controller := &PersistentVolumeController{ - volumes: newPersistentVolumeOrderedIndex(), - claims: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc), - kubeClient: p.KubeClient, - eventRecorder: eventRecorder, - runningOperations: goroutinemap.NewGoRoutineMap(true /* exponentialBackOffOnError */), - cloud: p.Cloud, + volumes: newPersistentVolumeOrderedIndex(), + claims: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc), + kubeClient: p.KubeClient, + eventRecorder: eventRecorder, + runningOperations: goroutinemap.NewGoRoutineMap(true /* exponentialBackOffOnError */), + cloud: p.Cloud, enableDynamicProvisioning: p.EnableDynamicProvisioning, clusterName: p.ClusterName, createProvisionedPVRetryCount: createProvisionedPVRetryCount, @@ -134,27 +134,27 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error) func (ctrl *PersistentVolumeController) initializeCaches(volumeLister corelisters.PersistentVolumeLister, claimLister corelisters.PersistentVolumeClaimLister) { volumeList, err := volumeLister.List(labels.Everything()) if err != nil { - glog.Errorf("PersistentVolumeController can't initialize caches: %v", err) + klog.Errorf("PersistentVolumeController can't initialize caches: %v", err) return } for _, volume := range volumeList { volumeClone := volume.DeepCopy() if _, err = ctrl.storeVolumeUpdate(volumeClone); err != nil { - glog.Errorf("error updating volume cache: %v", err) + klog.Errorf("error updating volume cache: %v", err) } } claimList, err := claimLister.List(labels.Everything()) if err != nil { - glog.Errorf("PersistentVolumeController can't initialize caches: %v", err) + klog.Errorf("PersistentVolumeController can't initialize caches: %v", err) return } for _, claim := range claimList { if _, err = ctrl.storeClaimUpdate(claim.DeepCopy()); err != nil { - glog.Errorf("error updating claim cache: %v", err) + klog.Errorf("error updating claim cache: %v", err) } } - glog.V(4).Infof("controller initialized") + klog.V(4).Infof("controller initialized") } // enqueueWork adds volume or claim to given work queue. @@ -165,10 +165,10 @@ func (ctrl *PersistentVolumeController) enqueueWork(queue workqueue.Interface, o } objName, err := controller.KeyFunc(obj) if err != nil { - glog.Errorf("failed to get key from object: %v", err) + klog.Errorf("failed to get key from object: %v", err) return } - glog.V(5).Infof("enqueued %q for sync", objName) + klog.V(5).Infof("enqueued %q for sync", objName) queue.Add(objName) } @@ -187,7 +187,7 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume // is an old version. new, err := ctrl.storeVolumeUpdate(volume) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) } if !new { return @@ -198,9 +198,9 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume if errors.IsConflict(err) { // Version conflict error happens quite often and the controller // recovers from it easily. - glog.V(3).Infof("could not sync volume %q: %+v", volume.Name, err) + klog.V(3).Infof("could not sync volume %q: %+v", volume.Name, err) } else { - glog.Errorf("could not sync volume %q: %+v", volume.Name, err) + klog.Errorf("could not sync volume %q: %+v", volume.Name, err) } } } @@ -208,7 +208,7 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume // deleteVolume runs in worker thread and handles "volume deleted" event. func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume) { _ = ctrl.volumes.store.Delete(volume) - glog.V(4).Infof("volume %q deleted", volume.Name) + klog.V(4).Infof("volume %q deleted", volume.Name) if volume.Spec.ClaimRef == nil { return @@ -217,7 +217,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume // claim here in response to volume deletion prevents the claim from // waiting until the next sync period for its Lost status. claimKey := claimrefToClaimKey(volume.Spec.ClaimRef) - glog.V(5).Infof("deleteVolume[%s]: scheduling sync of claim %q", volume.Name, claimKey) + klog.V(5).Infof("deleteVolume[%s]: scheduling sync of claim %q", volume.Name, claimKey) ctrl.claimQueue.Add(claimKey) } @@ -228,7 +228,7 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl // an old version. new, err := ctrl.storeClaimUpdate(claim) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) } if !new { return @@ -238,9 +238,9 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl if errors.IsConflict(err) { // Version conflict error happens quite often and the controller // recovers from it easily. - glog.V(3).Infof("could not sync claim %q: %+v", claimToClaimKey(claim), err) + klog.V(3).Infof("could not sync claim %q: %+v", claimToClaimKey(claim), err) } else { - glog.Errorf("could not sync volume %q: %+v", claimToClaimKey(claim), err) + klog.Errorf("could not sync volume %q: %+v", claimToClaimKey(claim), err) } } } @@ -248,17 +248,17 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl // deleteClaim runs in worker thread and handles "claim deleted" event. func (ctrl *PersistentVolumeController) deleteClaim(claim *v1.PersistentVolumeClaim) { _ = ctrl.claims.Delete(claim) - glog.V(4).Infof("claim %q deleted", claimToClaimKey(claim)) + klog.V(4).Infof("claim %q deleted", claimToClaimKey(claim)) volumeName := claim.Spec.VolumeName if volumeName == "" { - glog.V(5).Infof("deleteClaim[%q]: volume not bound", claimToClaimKey(claim)) + klog.V(5).Infof("deleteClaim[%q]: volume not bound", claimToClaimKey(claim)) return } // sync the volume when its claim is deleted. Explicitly sync'ing the // volume here in response to claim deletion prevents the volume from // waiting until the next sync period for its Release. - glog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimToClaimKey(claim), volumeName) + klog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimToClaimKey(claim), volumeName) ctrl.volumeQueue.Add(volumeName) } @@ -268,8 +268,8 @@ func (ctrl *PersistentVolumeController) Run(stopCh <-chan struct{}) { defer ctrl.claimQueue.ShutDown() defer ctrl.volumeQueue.ShutDown() - glog.Infof("Starting persistent volume controller") - defer glog.Infof("Shutting down persistent volume controller") + klog.Infof("Starting persistent volume controller") + defer klog.Infof("Shutting down persistent volume controller") if !controller.WaitForCacheSync("persistent volume", stopCh, ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) { return @@ -296,11 +296,11 @@ func (ctrl *PersistentVolumeController) volumeWorker() { } defer ctrl.volumeQueue.Done(keyObj) key := keyObj.(string) - glog.V(5).Infof("volumeWorker[%s]", key) + klog.V(5).Infof("volumeWorker[%s]", key) _, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - glog.V(4).Infof("error getting name of volume %q to get volume from informer: %v", key, err) + klog.V(4).Infof("error getting name of volume %q to get volume from informer: %v", key, err) return false } volume, err := ctrl.volumeLister.Get(name) @@ -311,7 +311,7 @@ func (ctrl *PersistentVolumeController) volumeWorker() { return false } if !errors.IsNotFound(err) { - glog.V(2).Infof("error getting volume %q from informer: %v", key, err) + klog.V(2).Infof("error getting volume %q from informer: %v", key, err) return false } @@ -319,18 +319,18 @@ func (ctrl *PersistentVolumeController) volumeWorker() { // "delete" volumeObj, found, err := ctrl.volumes.store.GetByKey(key) if err != nil { - glog.V(2).Infof("error getting volume %q from cache: %v", key, err) + klog.V(2).Infof("error getting volume %q from cache: %v", key, err) return false } if !found { // The controller has already processed the delete event and // deleted the volume from its cache - glog.V(2).Infof("deletion of volume %q was already processed", key) + klog.V(2).Infof("deletion of volume %q was already processed", key) return false } volume, ok := volumeObj.(*v1.PersistentVolume) if !ok { - glog.Errorf("expected volume, got %+v", volumeObj) + klog.Errorf("expected volume, got %+v", volumeObj) return false } ctrl.deleteVolume(volume) @@ -338,7 +338,7 @@ func (ctrl *PersistentVolumeController) volumeWorker() { } for { if quit := workFunc(); quit { - glog.Infof("volume worker queue shutting down") + klog.Infof("volume worker queue shutting down") return } } @@ -354,11 +354,11 @@ func (ctrl *PersistentVolumeController) claimWorker() { } defer ctrl.claimQueue.Done(keyObj) key := keyObj.(string) - glog.V(5).Infof("claimWorker[%s]", key) + klog.V(5).Infof("claimWorker[%s]", key) namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - glog.V(4).Infof("error getting namespace & name of claim %q to get claim from informer: %v", key, err) + klog.V(4).Infof("error getting namespace & name of claim %q to get claim from informer: %v", key, err) return false } claim, err := ctrl.claimLister.PersistentVolumeClaims(namespace).Get(name) @@ -369,25 +369,25 @@ func (ctrl *PersistentVolumeController) claimWorker() { return false } if !errors.IsNotFound(err) { - glog.V(2).Infof("error getting claim %q from informer: %v", key, err) + klog.V(2).Infof("error getting claim %q from informer: %v", key, err) return false } // The claim is not in informer cache, the event must have been "delete" claimObj, found, err := ctrl.claims.GetByKey(key) if err != nil { - glog.V(2).Infof("error getting claim %q from cache: %v", key, err) + klog.V(2).Infof("error getting claim %q from cache: %v", key, err) return false } if !found { // The controller has already processed the delete event and // deleted the claim from its cache - glog.V(2).Infof("deletion of claim %q was already processed", key) + klog.V(2).Infof("deletion of claim %q was already processed", key) return false } claim, ok := claimObj.(*v1.PersistentVolumeClaim) if !ok { - glog.Errorf("expected claim, got %+v", claimObj) + klog.Errorf("expected claim, got %+v", claimObj) return false } ctrl.deleteClaim(claim) @@ -395,7 +395,7 @@ func (ctrl *PersistentVolumeController) claimWorker() { } for { if quit := workFunc(); quit { - glog.Infof("claim worker queue shutting down") + klog.Infof("claim worker queue shutting down") return } } @@ -405,11 +405,11 @@ func (ctrl *PersistentVolumeController) claimWorker() { // all consumers of PV/PVC shared informer to have a short resync period, // therefore we do our own. func (ctrl *PersistentVolumeController) resync() { - glog.V(4).Infof("resyncing PV controller") + klog.V(4).Infof("resyncing PV controller") pvcs, err := ctrl.claimLister.List(labels.NewSelector()) if err != nil { - glog.Warningf("cannot list claims: %s", err) + klog.Warningf("cannot list claims: %s", err) return } for _, pvc := range pvcs { @@ -418,7 +418,7 @@ func (ctrl *PersistentVolumeController) resync() { pvs, err := ctrl.volumeLister.List(labels.NewSelector()) if err != nil { - glog.Warningf("cannot list persistent volumes: %s", err) + klog.Warningf("cannot list persistent volumes: %s", err) return } for _, pv := range pvs { @@ -504,7 +504,7 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo if !found { // This is a new object - glog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion()) + klog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion()) if err = store.Add(obj); err != nil { return false, fmt.Errorf("Error adding %s %q to controller cache: %v", className, objName, err) } @@ -528,11 +528,11 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo // Throw away only older version, let the same version pass - we do want to // get periodic sync events. if oldObjResourceVersion > objResourceVersion { - glog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion()) + klog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion()) return false, nil } - glog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion()) + klog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion()) if err = store.Update(obj); err != nil { return false, fmt.Errorf("Error updating %s %q in controller cache: %v", className, objName, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go index 0e4ade0faf1a..292ee8f6ff2c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go @@ -21,7 +21,7 @@ import ( "strconv" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -152,7 +152,7 @@ func (c *assumeCache) add(obj interface{}) { name, err := cache.MetaNamespaceKeyFunc(obj) if err != nil { - glog.Errorf("add failed: %v", &errObjectName{err}) + klog.Errorf("add failed: %v", &errObjectName{err}) return } @@ -162,27 +162,27 @@ func (c *assumeCache) add(obj interface{}) { if objInfo, _ := c.getObjInfo(name); objInfo != nil { newVersion, err := c.getObjVersion(name, obj) if err != nil { - glog.Errorf("add: couldn't get object version: %v", err) + klog.Errorf("add: couldn't get object version: %v", err) return } storedVersion, err := c.getObjVersion(name, objInfo.latestObj) if err != nil { - glog.Errorf("add: couldn't get stored object version: %v", err) + klog.Errorf("add: couldn't get stored object version: %v", err) return } // Only update object if version is newer. // This is so we don't override assumed objects due to informer resync. if newVersion <= storedVersion { - glog.V(10).Infof("Skip adding %v %v to assume cache because version %v is not newer than %v", c.description, name, newVersion, storedVersion) + klog.V(10).Infof("Skip adding %v %v to assume cache because version %v is not newer than %v", c.description, name, newVersion, storedVersion) return } } objInfo := &objInfo{name: name, latestObj: obj, apiObj: obj} c.store.Update(objInfo) - glog.V(10).Infof("Adding %v %v to assume cache: %+v ", c.description, name, obj) + klog.V(10).Infof("Adding %v %v to assume cache: %+v ", c.description, name, obj) } func (c *assumeCache) update(oldObj interface{}, newObj interface{}) { @@ -196,7 +196,7 @@ func (c *assumeCache) delete(obj interface{}) { name, err := cache.MetaNamespaceKeyFunc(obj) if err != nil { - glog.Errorf("delete failed: %v", &errObjectName{err}) + klog.Errorf("delete failed: %v", &errObjectName{err}) return } @@ -206,7 +206,7 @@ func (c *assumeCache) delete(obj interface{}) { objInfo := &objInfo{name: name} err = c.store.Delete(objInfo) if err != nil { - glog.Errorf("delete: failed to delete %v %v: %v", c.description, name, err) + klog.Errorf("delete: failed to delete %v %v: %v", c.description, name, err) } } @@ -257,14 +257,14 @@ func (c *assumeCache) List(indexObj interface{}) []interface{} { allObjs := []interface{}{} objs, err := c.store.Index(c.indexName, &objInfo{latestObj: indexObj}) if err != nil { - glog.Errorf("list index error: %v", err) + klog.Errorf("list index error: %v", err) return nil } for _, obj := range objs { objInfo, ok := obj.(*objInfo) if !ok { - glog.Errorf("list error: %v", &errWrongType{"objInfo", obj}) + klog.Errorf("list error: %v", &errWrongType{"objInfo", obj}) continue } allObjs = append(allObjs, objInfo.latestObj) @@ -302,7 +302,7 @@ func (c *assumeCache) Assume(obj interface{}) error { // Only update the cached object objInfo.latestObj = obj - glog.V(4).Infof("Assumed %v %q, version %v", c.description, name, newVersion) + klog.V(4).Infof("Assumed %v %q, version %v", c.description, name, newVersion) return nil } @@ -313,10 +313,10 @@ func (c *assumeCache) Restore(objName string) { objInfo, err := c.getObjInfo(objName) if err != nil { // This could be expected if object got deleted - glog.V(5).Infof("Restore %v %q warning: %v", c.description, objName, err) + klog.V(5).Infof("Restore %v %q warning: %v", c.description, objName, err) } else { objInfo.latestObj = objInfo.apiObj - glog.V(4).Infof("Restored %v %q", c.description, objName) + klog.V(4).Infof("Restored %v %q", c.description, objName) } } @@ -366,7 +366,7 @@ func (c *pvAssumeCache) ListPVs(storageClassName string) []*v1.PersistentVolume for _, obj := range objs { pv, ok := obj.(*v1.PersistentVolume) if !ok { - glog.Errorf("ListPVs: %v", &errWrongType{"v1.PersistentVolume", obj}) + klog.Errorf("ListPVs: %v", &errWrongType{"v1.PersistentVolume", obj}) } pvs = append(pvs, pv) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_bind_cache_metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_bind_cache_metrics.go new file mode 100644 index 000000000000..9a56c2422b78 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_bind_cache_metrics.go @@ -0,0 +1,60 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// VolumeSchedulerSubsystem - subsystem name used by scheduler +const VolumeSchedulerSubsystem = "scheduler_volume" + +var ( + VolumeBindingRequestSchedulerBinderCache = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: VolumeSchedulerSubsystem, + Name: "binder_cache_requests_total", + Help: "Total number for request volume binding cache", + }, + []string{"operation"}, + ) + VolumeSchedulingStageLatency = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Subsystem: VolumeSchedulerSubsystem, + Name: "scheduling_duration_seconds", + Help: "Volume scheduling stage latency", + Buckets: prometheus.ExponentialBuckets(1000, 2, 15), + }, + []string{"operation"}, + ) + VolumeSchedulingStageFailed = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: VolumeSchedulerSubsystem, + Name: "scheduling_stage_error_total", + Help: "Volume scheduling stage error count", + }, + []string{"operation"}, + ) +) + +// RegisterVolumeSchedulingMetrics is used for scheduler, because the volume binding cache is a library +// used by scheduler process. +func RegisterVolumeSchedulingMetrics() { + prometheus.MustRegister(VolumeBindingRequestSchedulerBinderCache) + prometheus.MustRegister(VolumeSchedulingStageLatency) + prometheus.MustRegister(VolumeSchedulingStageFailed) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder.go index 5375c78a6713..85fd71cf4ebe 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder.go @@ -21,7 +21,7 @@ import ( "sort" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -144,11 +144,18 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume podName := getPodName(pod) // Warning: Below log needs high verbosity as it can be printed several times (#60933). - glog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name) + klog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name) // Initialize to true for pods that don't have volumes unboundVolumesSatisfied = true boundVolumesSatisfied = true + start := time.Now() + defer func() { + VolumeSchedulingStageLatency.WithLabelValues("predicate").Observe(time.Since(start).Seconds()) + if err != nil { + VolumeSchedulingStageFailed.WithLabelValues("predicate").Inc() + } + }() // The pod's volumes need to be processed in one call to avoid the race condition where // volumes can get bound/provisioned in between calls. @@ -197,10 +204,17 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, err error) { podName := getPodName(assumedPod) - glog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName) + klog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName) + start := time.Now() + defer func() { + VolumeSchedulingStageLatency.WithLabelValues("assume").Observe(time.Since(start).Seconds()) + if err != nil { + VolumeSchedulingStageFailed.WithLabelValues("assume").Inc() + } + }() if allBound := b.arePodVolumesBound(assumedPod); allBound { - glog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName) + klog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName) return true, nil } @@ -213,7 +227,7 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al newBindings := []*bindingInfo{} for _, binding := range claimsToBind { newPV, dirty, err := b.ctrl.getBindVolumeToClaim(binding.pv, binding.pvc) - glog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v", + klog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v", podName, binding.pv.Name, binding.pvc.Name, @@ -264,15 +278,23 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al // BindPodVolumes gets the cached bindings and PVCs to provision in podBindingCache, // makes the API update for those PVs/PVCs, and waits for the PVCs to be completely bound // by the PV controller. -func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) error { +func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) (err error) { podName := getPodName(assumedPod) - glog.V(4).Infof("BindPodVolumes for pod %q, node %q", podName, assumedPod.Spec.NodeName) + klog.V(4).Infof("BindPodVolumes for pod %q, node %q", podName, assumedPod.Spec.NodeName) + + start := time.Now() + defer func() { + VolumeSchedulingStageLatency.WithLabelValues("bind").Observe(time.Since(start).Seconds()) + if err != nil { + VolumeSchedulingStageFailed.WithLabelValues("bind").Inc() + } + }() bindings := b.podBindingCache.GetBindings(assumedPod, assumedPod.Spec.NodeName) claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, assumedPod.Spec.NodeName) // Start API operations - err := b.bindAPIUpdate(podName, bindings, claimsToProvision) + err = b.bindAPIUpdate(podName, bindings, claimsToProvision) if err != nil { return err } @@ -324,7 +346,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl // Do the actual prebinding. Let the PV controller take care of the rest // There is no API rollback if the actual binding fails for _, binding = range bindings { - glog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name) + klog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name) // TODO: does it hurt if we make an api call and nothing needs to be updated? if _, err := b.ctrl.updateBindVolumeToClaim(binding.pv, binding.pvc, false); err != nil { return err @@ -335,7 +357,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl // Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest // PV controller is expect to signal back by removing related annotations if actual provisioning fails for _, claim = range claimsToProvision { - glog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim)) + klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim)) if _, err := b.ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil { return err } @@ -404,7 +426,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*bindingInfo, claim } // All pvs and pvcs that we operated on are bound - glog.V(4).Infof("All PVCs for pod %q are bound", podName) + klog.V(4).Infof("All PVCs for pod %q are bound", podName) return true, nil } @@ -424,23 +446,24 @@ func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.Persiste Namespace: namespace, }, } - pvc, err := b.pvcCache.GetPVC(getPVCName(claim)) + pvcKey := getPVCName(claim) + pvc, err := b.pvcCache.GetPVC(pvcKey) if err != nil || pvc == nil { - return false, nil, fmt.Errorf("error getting PVC %q: %v", pvcName, err) + return false, nil, fmt.Errorf("error getting PVC %q: %v", pvcKey, err) } pvName := pvc.Spec.VolumeName if pvName != "" { if metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted) { - glog.V(5).Infof("PVC %q is fully bound to PV %q", getPVCName(pvc), pvName) + klog.V(5).Infof("PVC %q is fully bound to PV %q", pvcKey, pvName) return true, pvc, nil } else { - glog.V(5).Infof("PVC %q is not fully bound to PV %q", getPVCName(pvc), pvName) + klog.V(5).Infof("PVC %q is not fully bound to PV %q", pvcKey, pvName) return false, pvc, nil } } - glog.V(5).Infof("PVC %q is not bound", getPVCName(pvc)) + klog.V(5).Infof("PVC %q is not bound", pvcKey) return false, pvc, nil } @@ -500,13 +523,13 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node err = volumeutil.CheckNodeAffinity(pv, node.Labels) if err != nil { - glog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, err.Error(), podName) + klog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, podName, err) return false, nil } - glog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName) + klog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName) } - glog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name) + klog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name) return true, nil } @@ -530,6 +553,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI storageClassName = *storageClass } allPVs := b.pvCache.ListPVs(storageClassName) + pvcName := getPVCName(bindingInfo.pvc) // Find a matching PV bindingInfo.pv, err = findMatchingVolume(bindingInfo.pvc, allPVs, node, chosenPVs, true) @@ -537,7 +561,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI return false, nil, err } if bindingInfo.pv == nil { - glog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, getPVCName(bindingInfo.pvc), node.Name) + klog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, pvcName, node.Name) unboundClaims = append(unboundClaims, bindingInfo.pvc) foundMatches = false continue @@ -546,7 +570,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI // matching PV needs to be excluded so we don't select it again chosenPVs[bindingInfo.pv.Name] = bindingInfo.pv matchedClaims = append(matchedClaims, bindingInfo) - glog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", bindingInfo.pv.Name, getPVCName(bindingInfo.pvc), node.Name, podName) + klog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", bindingInfo.pv.Name, pvcName, node.Name, podName) } // Mark cache with all the matches for each PVC for this node @@ -555,7 +579,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI } if foundMatches { - glog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name) + klog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name) } return @@ -569,9 +593,10 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v provisionedClaims := []*v1.PersistentVolumeClaim{} for _, claim := range claimsToProvision { + pvcName := getPVCName(claim) className := v1helper.GetPersistentVolumeClaimClass(claim) if className == "" { - return false, fmt.Errorf("no class for claim %q", getPVCName(claim)) + return false, fmt.Errorf("no class for claim %q", pvcName) } class, err := b.ctrl.classLister.Get(className) @@ -580,13 +605,13 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v } provisioner := class.Provisioner if provisioner == "" || provisioner == notSupportedProvisioner { - glog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, getPVCName(claim)) + klog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, pvcName) return false, nil } // Check if the node can satisfy the topology requirement in the class if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) { - glog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, getPVCName(claim)) + klog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, pvcName) return false, nil } @@ -596,7 +621,7 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v provisionedClaims = append(provisionedClaims, claim) } - glog.V(4).Infof("Provisioning for claims of pod %q that has no matching volumes on node %q ...", podName, node.Name) + klog.V(4).Infof("Provisioning for claims of pod %q that has no matching volumes on node %q ...", podName, node.Name) // Mark cache with all the PVCs that need provisioning for this node b.podBindingCache.UpdateProvisionedPVCs(pod, node.Name, provisionedClaims) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_cache.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_cache.go index e3acc35163c4..b95538304ad9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_cache.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_cache.go @@ -77,7 +77,11 @@ func (c *podBindingCache) DeleteBindings(pod *v1.Pod) { defer c.rwMutex.Unlock() podName := getPodName(pod) - delete(c.bindingDecisions, podName) + + if _, ok := c.bindingDecisions[podName]; ok { + delete(c.bindingDecisions, podName) + VolumeBindingRequestSchedulerBinderCache.WithLabelValues("delete").Inc() + } } func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo) { @@ -95,6 +99,7 @@ func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*b decision = nodeDecision{ bindings: bindings, } + VolumeBindingRequestSchedulerBinderCache.WithLabelValues("add").Inc() } else { decision.bindings = bindings } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go index fe54b06c6248..6e4fc3fdd38b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go @@ -25,8 +25,9 @@ import ( "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" + cloudprovider "k8s.io/cloud-provider" csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned" - "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" vol "k8s.io/kubernetes/pkg/volume" ) @@ -109,6 +110,12 @@ func (ctrl *PersistentVolumeController) GetServiceAccountTokenFunc() func(_, _ s } } +func (ctrl *PersistentVolumeController) DeleteServiceAccountTokenFunc() func(types.UID) { + return func(types.UID) { + klog.Errorf("DeleteServiceAccountToken unsupported in PersistentVolumeController") + } +} + func (adc *PersistentVolumeController) GetExec(pluginName string) mount.Exec { return mount.NewOsExec() } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD index 351420ae9262..0dfa2dfdc59c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD @@ -18,8 +18,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/credentialprovider", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/docker/docker/api/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -31,7 +30,6 @@ go_test( "provider_test.go", ], embed = [":go_default_library"], - deps = ["//vendor/github.com/docker/docker/api/types:go_default_library"], ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD index 198572435588..5161307a7123 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD @@ -16,7 +16,7 @@ go_library( "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/ecr:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go index d889cbf1fa8b..89869e76eaa6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go @@ -26,7 +26,8 @@ import ( "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ecr" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/credentialprovider" ) @@ -46,7 +47,7 @@ func awsHandlerLogger(req *request.Request) { name = req.Operation.Name } - glog.V(3).Infof("AWS request: %s:%s in %s", service, name, *region) + klog.V(3).Infof("AWS request: %s:%s in %s", service, name, *region) } // An interface for testing purposes. @@ -100,7 +101,7 @@ func registryURL(region string) string { // This should be called only if using the AWS cloud provider. // This way, we avoid timeouts waiting for a non-existent provider. func RegisterCredentialsProvider(region string) { - glog.V(4).Infof("registering credentials provider for AWS region %q", region) + klog.V(4).Infof("registering credentials provider for AWS region %q", region) credentialprovider.RegisterCredentialProvider("aws-ecr-"+region, &lazyEcrProvider{ @@ -121,7 +122,7 @@ func (p *lazyEcrProvider) Enabled() bool { // provider only when we actually need it the first time. func (p *lazyEcrProvider) LazyProvide() *credentialprovider.DockerConfigEntry { if p.actualProvider == nil { - glog.V(2).Infof("Creating ecrProvider for %s", p.region) + klog.V(2).Infof("Creating ecrProvider for %s", p.region) p.actualProvider = &credentialprovider.CachingDockerConfigProvider{ Provider: newEcrProvider(p.region, nil), // Refresh credentials a little earlier than expiration time @@ -160,7 +161,7 @@ func newEcrProvider(region string, getter tokenGetter) *ecrProvider { // use ECR somehow? func (p *ecrProvider) Enabled() bool { if p.region == "" { - glog.Errorf("Called ecrProvider.Enabled() with no region set") + klog.Errorf("Called ecrProvider.Enabled() with no region set") return false } @@ -190,11 +191,11 @@ func (p *ecrProvider) Provide() credentialprovider.DockerConfig { params := &ecr.GetAuthorizationTokenInput{} output, err := p.getter.GetAuthorizationToken(params) if err != nil { - glog.Errorf("while requesting ECR authorization token %v", err) + klog.Errorf("while requesting ECR authorization token %v", err) return cfg } if output == nil { - glog.Errorf("Got back no ECR token") + klog.Errorf("Got back no ECR token") return cfg } @@ -203,7 +204,7 @@ func (p *ecrProvider) Provide() credentialprovider.DockerConfig { data.AuthorizationToken != nil { decodedToken, err := base64.StdEncoding.DecodeString(aws.StringValue(data.AuthorizationToken)) if err != nil { - glog.Errorf("while decoding token for endpoint %v %v", data.ProxyEndpoint, err) + klog.Errorf("while decoding token for endpoint %v %v", data.ProxyEndpoint, err) return cfg } parts := strings.SplitN(string(decodedToken), ":", 2) @@ -216,7 +217,7 @@ func (p *ecrProvider) Provide() credentialprovider.DockerConfig { Email: "not@val.id", } - glog.V(3).Infof("Adding credentials for user %s in %s", user, p.region) + klog.V(3).Infof("Adding credentials for user %s in %s", user, p.region) // Add our config entry for this region's registry URLs cfg[p.regionURL] = entry diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/BUILD index 5d1c3800d7f4..aa1b29b2c787 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/BUILD @@ -21,9 +21,9 @@ go_library( "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", "//vendor/github.com/dgrijalva/jwt-go:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials.go index ffeab5ea5ab6..28e43048d229 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials.go @@ -27,9 +27,9 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" - "github.com/ghodss/yaml" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" + "sigs.k8s.io/yaml" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth" "k8s.io/kubernetes/pkg/credentialprovider" @@ -133,7 +133,7 @@ func (a *acrProvider) loadConfig(rdr io.Reader) error { var err error a.config, err = parseConfig(rdr) if err != nil { - glog.Errorf("Failed to load azure credential file: %v", err) + klog.Errorf("Failed to load azure credential file: %v", err) } a.environment, err = auth.ParseAzureEnvironment(a.config.Cloud) @@ -146,26 +146,26 @@ func (a *acrProvider) loadConfig(rdr io.Reader) error { func (a *acrProvider) Enabled() bool { if a.file == nil || len(*a.file) == 0 { - glog.V(5).Infof("Azure config unspecified, disabling") + klog.V(5).Infof("Azure config unspecified, disabling") return false } f, err := os.Open(*a.file) if err != nil { - glog.Errorf("Failed to load config from file: %s", *a.file) + klog.Errorf("Failed to load config from file: %s", *a.file) return false } defer f.Close() err = a.loadConfig(f) if err != nil { - glog.Errorf("Failed to load config from file: %s", *a.file) + klog.Errorf("Failed to load config from file: %s", *a.file) return false } a.servicePrincipalToken, err = auth.GetServicePrincipalToken(a.config, a.environment) if err != nil { - glog.Errorf("Failed to create service principal token: %v", err) + klog.Errorf("Failed to create service principal token: %v", err) return false } @@ -179,16 +179,16 @@ func (a *acrProvider) Provide() credentialprovider.DockerConfig { defer cancel() if a.config.UseManagedIdentityExtension { - glog.V(4).Infof("listing registries") + klog.V(4).Infof("listing registries") result, err := a.registryClient.List(ctx) if err != nil { - glog.Errorf("Failed to list registries: %v", err) + klog.Errorf("Failed to list registries: %v", err) return cfg } for ix := range result { loginServer := getLoginServer(result[ix]) - glog.V(2).Infof("loginServer: %s", loginServer) + klog.V(2).Infof("loginServer: %s", loginServer) cred, err := getACRDockerEntryFromARMToken(a, loginServer) if err != nil { continue @@ -216,22 +216,22 @@ func getLoginServer(registry containerregistry.Registry) string { func getACRDockerEntryFromARMToken(a *acrProvider, loginServer string) (*credentialprovider.DockerConfigEntry, error) { armAccessToken := a.servicePrincipalToken.OAuthToken() - glog.V(4).Infof("discovering auth redirects for: %s", loginServer) + klog.V(4).Infof("discovering auth redirects for: %s", loginServer) directive, err := receiveChallengeFromLoginServer(loginServer) if err != nil { - glog.Errorf("failed to receive challenge: %s", err) + klog.Errorf("failed to receive challenge: %s", err) return nil, err } - glog.V(4).Infof("exchanging an acr refresh_token") + klog.V(4).Infof("exchanging an acr refresh_token") registryRefreshToken, err := performTokenExchange( loginServer, directive, a.config.TenantID, armAccessToken) if err != nil { - glog.Errorf("failed to perform token exchange: %s", err) + klog.Errorf("failed to perform token exchange: %s", err) return nil, err } - glog.V(4).Infof("adding ACR docker config entry for: %s", loginServer) + klog.V(4).Infof("adding ACR docker config entry for: %s", loginServer) return &credentialprovider.DockerConfigEntry{ Username: dockerTokenLoginUsernameGUID, Password: registryRefreshToken, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go index 433f28b15448..a43e8c2b15d4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go @@ -27,7 +27,7 @@ import ( "strings" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // DockerConfigJson represents ~/.docker/config.json file info @@ -95,21 +95,21 @@ func ReadDockercfgFile(searchPaths []string) (cfg DockerConfig, err error) { for _, configPath := range searchPaths { absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configFileName)) if err != nil { - glog.Errorf("while trying to canonicalize %s: %v", configPath, err) + klog.Errorf("while trying to canonicalize %s: %v", configPath, err) continue } - glog.V(4).Infof("looking for .dockercfg at %s", absDockerConfigFileLocation) + klog.V(4).Infof("looking for .dockercfg at %s", absDockerConfigFileLocation) contents, err := ioutil.ReadFile(absDockerConfigFileLocation) if os.IsNotExist(err) { continue } if err != nil { - glog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) + klog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) continue } cfg, err := readDockerConfigFileFromBytes(contents) if err == nil { - glog.V(4).Infof("found .dockercfg at %s", absDockerConfigFileLocation) + klog.V(4).Infof("found .dockercfg at %s", absDockerConfigFileLocation) return cfg, nil } } @@ -125,18 +125,18 @@ func ReadDockerConfigJSONFile(searchPaths []string) (cfg DockerConfig, err error for _, configPath := range searchPaths { absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configJsonFileName)) if err != nil { - glog.Errorf("while trying to canonicalize %s: %v", configPath, err) + klog.Errorf("while trying to canonicalize %s: %v", configPath, err) continue } - glog.V(4).Infof("looking for %s at %s", configJsonFileName, absDockerConfigFileLocation) + klog.V(4).Infof("looking for %s at %s", configJsonFileName, absDockerConfigFileLocation) cfg, err = ReadSpecificDockerConfigJsonFile(absDockerConfigFileLocation) if err != nil { if !os.IsNotExist(err) { - glog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) + klog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) } continue } - glog.V(4).Infof("found valid %s at %s", configJsonFileName, absDockerConfigFileLocation) + klog.V(4).Infof("found valid %s at %s", configJsonFileName, absDockerConfigFileLocation) return cfg, nil } return nil, fmt.Errorf("couldn't find valid %s after checking in %v", configJsonFileName, searchPaths) @@ -188,7 +188,7 @@ func ReadUrl(url string, client *http.Client, header *http.Header) (body []byte, defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - glog.V(2).Infof("body of failing http response: %v", resp.Body) + klog.V(2).Infof("body of failing http response: %v", resp.Body) return nil, &HttpError{ StatusCode: resp.StatusCode, Url: url, @@ -213,7 +213,7 @@ func ReadDockerConfigFileFromUrl(url string, client *http.Client, header *http.H func readDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error) { if err = json.Unmarshal(contents, &cfg); err != nil { - glog.Errorf("while trying to parse blob %q: %v", contents, err) + klog.Errorf("while trying to parse blob %q: %v", contents, err) return nil, err } return @@ -222,7 +222,7 @@ func readDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error func readDockerConfigJsonFileFromBytes(contents []byte) (cfg DockerConfig, err error) { var cfgJson DockerConfigJson if err = json.Unmarshal(contents, &cfgJson); err != nil { - glog.Errorf("while trying to parse blob %q: %v", contents, err) + klog.Errorf("while trying to parse blob %q: %v", contents, err) return nil, err } cfg = cfgJson.Auths diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/BUILD index e5d2704fb86c..1d61eaaef33c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/BUILD @@ -10,27 +10,19 @@ go_library( name = "go_default_library", srcs = [ "doc.go", - "jwt.go", "metadata.go", ], importpath = "k8s.io/kubernetes/pkg/credentialprovider/gcp", deps = [ "//pkg/credentialprovider:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/golang.org/x/oauth2:go_default_library", - "//vendor/golang.org/x/oauth2/google:go_default_library", - "//vendor/golang.org/x/oauth2/jwt:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) go_test( name = "go_default_test", - srcs = [ - "jwt_test.go", - "metadata_test.go", - ], + srcs = ["metadata_test.go"], embed = [":go_default_library"], deps = [ "//pkg/credentialprovider:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/jwt.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/jwt.go deleted file mode 100644 index d187560a3513..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/jwt.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gcp_credentials - -import ( - "io/ioutil" - "time" - - "github.com/golang/glog" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "golang.org/x/oauth2/jwt" - "k8s.io/kubernetes/pkg/credentialprovider" - - "github.com/spf13/pflag" -) - -const ( - storageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" - jwtFileFlagName = "google-json-key" -) - -var ( - flagJwtFile = pflag.String(jwtFileFlagName, "", - "The Google Cloud Platform Service Account JSON Key to use for authentication.") -) - -// A DockerConfigProvider that reads its configuration from Google -// Compute Engine metadata. -type jwtProvider struct { - path *string - config *jwt.Config - tokenUrl string -} - -// init registers the various means by which credentials may -// be resolved on GCP. -func init() { - pflag.CommandLine.MarkDeprecated(jwtFileFlagName, "Will be removed in a future version. "+ - "To maintain node-level authentication, credentials should instead be included in a docker "+ - "config.json file, located inside the Kubelet's --root-dir.") - credentialprovider.RegisterCredentialProvider("google-jwt-key", - &credentialprovider.CachingDockerConfigProvider{ - Provider: &jwtProvider{ - path: flagJwtFile, - }, - Lifetime: 30 * time.Minute, - }) -} - -// Enabled implements DockerConfigProvider for the JSON Key based implementation. -func (j *jwtProvider) Enabled() bool { - if *j.path == "" { - return false - } - - data, err := ioutil.ReadFile(*j.path) - if err != nil { - glog.Errorf("while reading file %s got %v", *j.path, err) - return false - } - config, err := google.JWTConfigFromJSON(data, storageReadOnlyScope) - if err != nil { - glog.Errorf("while parsing %s data got %v", *j.path, err) - return false - } - - j.config = config - if j.tokenUrl != "" { - j.config.TokenURL = j.tokenUrl - } - return true -} - -// LazyProvide implements DockerConfigProvider. Should never be called. -func (j *jwtProvider) LazyProvide() *credentialprovider.DockerConfigEntry { - return nil -} - -// Provide implements DockerConfigProvider -func (j *jwtProvider) Provide() credentialprovider.DockerConfig { - cfg := credentialprovider.DockerConfig{} - - ts := j.config.TokenSource(oauth2.NoContext) - token, err := ts.Token() - if err != nil { - glog.Errorf("while exchanging json key %s for access token %v", *j.path, err) - return cfg - } - if !token.Valid() { - glog.Errorf("Got back invalid token: %v", token) - return cfg - } - - entry := credentialprovider.DockerConfigEntry{ - Username: "_token", - Password: token.AccessToken, - Email: j.config.Email, - } - - // Add our entry for each of the supported container registry URLs - for _, k := range containerRegistryUrls { - cfg[k] = entry - } - return cfg -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go index 4c668d0b3847..7620f0c040fd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go @@ -23,8 +23,8 @@ import ( "strings" "time" - "github.com/golang/glog" utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/klog" "k8s.io/kubernetes/pkg/credentialprovider" ) @@ -117,7 +117,7 @@ func init() { func onGCEVM() bool { data, err := ioutil.ReadFile(gceProductNameFile) if err != nil { - glog.V(2).Infof("Error while reading product_name: %v", err) + klog.V(2).Infof("Error while reading product_name: %v", err) return false } name := strings.TrimSpace(string(data)) @@ -139,7 +139,7 @@ func (g *dockerConfigKeyProvider) Provide() credentialprovider.DockerConfig { // Read the contents of the google-dockercfg metadata key and // parse them as an alternate .dockercfg if cfg, err := credentialprovider.ReadDockerConfigFileFromUrl(dockerConfigKey, g.Client, metadataHeader); err != nil { - glog.Errorf("while reading 'google-dockercfg' metadata: %v", err) + klog.Errorf("while reading 'google-dockercfg' metadata: %v", err) } else { return cfg } @@ -156,17 +156,17 @@ func (g *dockerConfigUrlKeyProvider) LazyProvide() *credentialprovider.DockerCon func (g *dockerConfigUrlKeyProvider) Provide() credentialprovider.DockerConfig { // Read the contents of the google-dockercfg-url key and load a .dockercfg from there if url, err := credentialprovider.ReadUrl(dockerConfigUrlKey, g.Client, metadataHeader); err != nil { - glog.Errorf("while reading 'google-dockercfg-url' metadata: %v", err) + klog.Errorf("while reading 'google-dockercfg-url' metadata: %v", err) } else { if strings.HasPrefix(string(url), "http") { if cfg, err := credentialprovider.ReadDockerConfigFileFromUrl(string(url), g.Client, nil); err != nil { - glog.Errorf("while reading 'google-dockercfg-url'-specified url: %s, %v", string(url), err) + klog.Errorf("while reading 'google-dockercfg-url'-specified url: %s, %v", string(url), err) } else { return cfg } } else { // TODO(mattmoor): support reading alternate scheme URLs (e.g. gs:// or s3://) - glog.Errorf("Unsupported URL scheme: %s", string(url)) + klog.Errorf("Unsupported URL scheme: %s", string(url)) } } @@ -209,7 +209,7 @@ func (g *containerRegistryProvider) Enabled() bool { value := runWithBackoff(func() ([]byte, error) { value, err := credentialprovider.ReadUrl(serviceAccounts, g.Client, metadataHeader) if err != nil { - glog.V(2).Infof("Failed to Get service accounts from gce metadata server: %v", err) + klog.V(2).Infof("Failed to Get service accounts from gce metadata server: %v", err) } return value, err }) @@ -225,20 +225,20 @@ func (g *containerRegistryProvider) Enabled() bool { } } if !defaultServiceAccountExists { - glog.V(2).Infof("'default' service account does not exist. Found following service accounts: %q", string(value)) + klog.V(2).Infof("'default' service account does not exist. Found following service accounts: %q", string(value)) return false } url := metadataScopes + "?alt=json" value = runWithBackoff(func() ([]byte, error) { value, err := credentialprovider.ReadUrl(url, g.Client, metadataHeader) if err != nil { - glog.V(2).Infof("Failed to Get scopes in default service account from gce metadata server: %v", err) + klog.V(2).Infof("Failed to Get scopes in default service account from gce metadata server: %v", err) } return value, err }) var scopes []string if err := json.Unmarshal(value, &scopes); err != nil { - glog.Errorf("Failed to unmarshal scopes: %v", err) + klog.Errorf("Failed to unmarshal scopes: %v", err) return false } for _, v := range scopes { @@ -247,7 +247,7 @@ func (g *containerRegistryProvider) Enabled() bool { return true } } - glog.Warningf("Google container registry is disabled, no storage scope is available: %s", value) + klog.Warningf("Google container registry is disabled, no storage scope is available: %s", value) return false } @@ -268,19 +268,19 @@ func (g *containerRegistryProvider) Provide() credentialprovider.DockerConfig { tokenJsonBlob, err := credentialprovider.ReadUrl(metadataToken, g.Client, metadataHeader) if err != nil { - glog.Errorf("while reading access token endpoint: %v", err) + klog.Errorf("while reading access token endpoint: %v", err) return cfg } email, err := credentialprovider.ReadUrl(metadataEmail, g.Client, metadataHeader) if err != nil { - glog.Errorf("while reading email endpoint: %v", err) + klog.Errorf("while reading email endpoint: %v", err) return cfg } var parsedBlob tokenBlob if err := json.Unmarshal([]byte(tokenJsonBlob), &parsedBlob); err != nil { - glog.Errorf("while parsing json blob %s: %v", tokenJsonBlob, err) + klog.Errorf("while parsing json blob %s: %v", tokenJsonBlob, err) return cfg } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go index b269f474600f..6f5fad5fc478 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go @@ -23,9 +23,8 @@ import ( "sort" "strings" - "github.com/golang/glog" + "k8s.io/klog" - dockertypes "github.com/docker/docker/api/types" "k8s.io/apimachinery/pkg/util/sets" ) @@ -52,17 +51,39 @@ type lazyDockerKeyring struct { Providers []DockerConfigProvider } +// AuthConfig contains authorization information for connecting to a Registry +// This type mirrors "github.com/docker/docker/api/types.AuthConfig" +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} + // LazyAuthConfiguration wraps dockertypes.AuthConfig, potentially deferring its // binding. If Provider is non-nil, it will be used to obtain new credentials // by calling LazyProvide() on it. type LazyAuthConfiguration struct { - dockertypes.AuthConfig + AuthConfig Provider DockerConfigProvider } func DockerConfigEntryToLazyAuthConfiguration(ident DockerConfigEntry) LazyAuthConfiguration { return LazyAuthConfiguration{ - AuthConfig: dockertypes.AuthConfig{ + AuthConfig: AuthConfig{ Username: ident.Username, Password: ident.Password, Email: ident.Email, @@ -92,7 +113,7 @@ func (dk *BasicDockerKeyring) Add(cfg DockerConfig) { } parsed, err := url.Parse(value) if err != nil { - glog.Errorf("Entry %q in dockercfg invalid (%v), ignoring", loc, err) + klog.Errorf("Entry %q in dockercfg invalid (%v), ignoring", loc, err) continue } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go index c817fefa2b7e..5ea3a000e8bc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go @@ -21,7 +21,7 @@ import ( "sort" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // All registered credential providers. @@ -38,9 +38,9 @@ func RegisterCredentialProvider(name string, provider DockerConfigProvider) { defer providersMutex.Unlock() _, found := providers[name] if found { - glog.Fatalf("Credential provider %q was registered twice", name) + klog.Fatalf("Credential provider %q was registered twice", name) } - glog.V(4).Infof("Registered credential provider %q", name) + klog.V(4).Infof("Registered credential provider %q", name) providers[name] = provider } @@ -61,7 +61,7 @@ func NewDockerKeyring() DockerKeyring { for _, key := range stringKeys { provider := providers[key] if provider.Enabled() { - glog.V(4).Infof("Registering credential provider: %v", key) + klog.V(4).Infof("Registering credential provider: %v", key) keyring.Providers = append(keyring.Providers, provider) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go index 419dc43e5dfd..16b4e601a10a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go @@ -22,8 +22,7 @@ import ( "sync" "time" - dockertypes "github.com/docker/docker/api/types" - "github.com/golang/glog" + "k8s.io/klog" ) // DockerConfigProvider is the interface that registered extensions implement @@ -40,14 +39,12 @@ type DockerConfigProvider interface { LazyProvide() *DockerConfigEntry } -func LazyProvide(creds LazyAuthConfiguration) dockertypes.AuthConfig { +func LazyProvide(creds LazyAuthConfiguration) AuthConfig { if creds.Provider != nil { entry := *creds.Provider.LazyProvide() return DockerConfigEntryToLazyAuthConfiguration(entry).AuthConfig - } else { - return creds.AuthConfig } - + return creds.AuthConfig } // A DockerConfigProvider that simply reads the .dockercfg file @@ -86,7 +83,7 @@ func (d *defaultDockerConfigProvider) Provide() DockerConfig { if cfg, err := ReadDockerConfigFile(); err == nil { return cfg } else if !os.IsNotExist(err) { - glog.V(4).Infof("Unable to parse Docker config file: %v", err) + klog.V(4).Infof("Unable to parse Docker config file: %v", err) } return DockerConfig{} } @@ -116,7 +113,7 @@ func (d *CachingDockerConfigProvider) Provide() DockerConfig { return d.cacheDockerConfig } - glog.V(2).Infof("Refreshing cache for provider: %v", reflect.TypeOf(d.Provider).String()) + klog.V(2).Infof("Refreshing cache for provider: %v", reflect.TypeOf(d.Provider).String()) d.cacheDockerConfig = d.Provider.Provide() d.expiration = time.Now().Add(d.Lifetime) return d.cacheDockerConfig diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/rancher/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/rancher/BUILD index af1f490b1038..02ab95c7fce8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/rancher/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/rancher/BUILD @@ -25,8 +25,8 @@ go_library( importpath = "k8s.io/kubernetes/pkg/credentialprovider/rancher", deps = [ "//pkg/credentialprovider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/rancher/go-rancher/client:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/rancher/rancher_registry_credentials.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/rancher/rancher_registry_credentials.go index 3143560dfe95..d122308ed82b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/rancher/rancher_registry_credentials.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/credentialprovider/rancher/rancher_registry_credentials.go @@ -20,8 +20,8 @@ import ( "os" "time" - "github.com/golang/glog" "github.com/rancher/go-rancher/client" + "k8s.io/klog" "k8s.io/kubernetes/pkg/credentialprovider" ) @@ -102,13 +102,13 @@ func (g *rancherCredentialsGetter) getCredentials() []registryCredential { var registryCreds []registryCredential credColl, err := g.client.RegistryCredential.List(client.NewListOpts()) if err != nil { - glog.Errorf("Failed to pull registry credentials from rancher %v", err) + klog.Errorf("Failed to pull registry credentials from rancher %v", err) return registryCreds } for _, cred := range credColl.Data { registry := &client.Registry{} if err = g.client.GetLink(cred.Resource, "registry", registry); err != nil { - glog.Errorf("Failed to pull registry from rancher %v", err) + klog.Errorf("Failed to pull registry from rancher %v", err) return registryCreds } registryCred := registryCredential{ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index b121e91fbdaa..cd35ecb70446 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -49,6 +49,7 @@ const ( // owner: @vishh // alpha: v1.5 // + // DEPRECATED - This feature is deprecated by Pod Priority and Preemption as of Kubernetes 1.13. // Ensures guaranteed scheduling of pods marked with a special pod annotation `scheduler.alpha.kubernetes.io/critical-pod` // and also prevents them from being evicted from a node. // Note: This feature is not supported for `BestEffort` pods. @@ -60,8 +61,8 @@ const ( // Enables support for Device Plugins DevicePlugins utilfeature.Feature = "DevicePlugins" - // owner: @gmarek - // alpha: v1.6 + // owner: @Huang-Wei + // beta: v1.13 // // Changes the logic behind evicting Pods from not ready Nodes // to take advantage of NoExecute Taints and Tolerations. @@ -139,6 +140,7 @@ const ( // owner: @jsafrane // GA: v1.12 // + // Note: This feature gate is unconditionally enabled in v1.13 and will be removed in v1.14. // Enable mount propagation of volumes. MountPropagation utilfeature.Feature = "MountPropagation" @@ -179,13 +181,6 @@ const ( // Enable nodes to exclude themselves from service load balancers ServiceNodeExclusion utilfeature.Feature = "ServiceNodeExclusion" - // owner @brendandburns - // deprecated: v1.10 - // - // Enable the service proxy to contact external IP addresses. Note this feature is present - // only for backward compatibility, it will be removed in the 1.11 release. - ServiceProxyAllowExternalIPs utilfeature.Feature = "ServiceProxyAllowExternalIPs" - // owner: @jsafrane // alpha: v1.9 // @@ -193,22 +188,26 @@ const ( MountContainers utilfeature.Feature = "MountContainers" // owner: @msau42 - // alpha: v1.9 + // GA: v1.13 // // Extend the default scheduler to be aware of PV topology and handle PV binding - // Before moving to beta, resolve Kubernetes issue #56180 VolumeScheduling utilfeature.Feature = "VolumeScheduling" // owner: @vladimirvivien - // beta: v1.10 + // GA: v1.13 // // Enable mount/attachment of Container Storage Interface (CSI) backed PVs CSIPersistentVolume utilfeature.Feature = "CSIPersistentVolume" // owner: @saad-ali // alpha: v1.12 - // Enable automatic installation of CRD for csi.storage.k8s.io API objects. - CSICRDAutoInstall utilfeature.Feature = "CSICRDAutoInstall" + // Enable all logic related to the CSIDriver API object in csi.storage.k8s.io + CSIDriverRegistry utilfeature.Feature = "CSIDriverRegistry" + + // owner: @verult + // alpha: v1.12 + // Enable all logic related to the CSINodeInfo API object in csi.storage.k8s.io + CSINodeInfo utilfeature.Feature = "CSINodeInfo" // owner @MrHohn // beta: v1.10 @@ -218,6 +217,7 @@ const ( // owner: @screeley44 // alpha: v1.9 + // beta: v1.13 // // Enable Block volume support in containers. BlockVolume utilfeature.Feature = "BlockVolume" @@ -252,15 +252,8 @@ const ( // Enable Hyper-V containers on Windows HyperVContainer utilfeature.Feature = "HyperVContainer" - // owner: @joelsmith - // deprecated: v1.10 - // - // Mount secret, configMap, downwardAPI and projected volumes ReadOnly. Note: this feature - // gate is present only for backward compatibility, it will be removed in the 1.11 release. - ReadOnlyAPIDataVolumes utilfeature.Feature = "ReadOnlyAPIDataVolumes" - // owner: @k82cn - // alpha: v1.10 + // beta: v1.12 // // Schedule DaemonSet Pods by default scheduler instead of DaemonSet controller ScheduleDaemonSetPods utilfeature.Feature = "ScheduleDaemonSetPods" @@ -277,6 +270,14 @@ const ( // Enable ServiceAccountTokenVolumeProjection support in ProjectedVolumes. TokenRequestProjection utilfeature.Feature = "TokenRequestProjection" + // owner: @mikedanese + // alpha: v1.13 + // + // Migrate ServiceAccount volumes to use a projected volume consisting of a + // ServiceAccountTokenVolumeProjection. This feature adds new required flags + // to the API server. + BoundServiceAccountTokenVolume utilfeature.Feature = "BoundServiceAccountTokenVolume" + // owner: @Random-Liu // beta: v1.11 // @@ -284,7 +285,7 @@ const ( CRIContainerLogRotation utilfeature.Feature = "CRIContainerLogRotation" // owner: @verult - // beta: v1.10 + // GA: v1.13 // // Enables the regional PD feature on GCE. GCERegionalPersistentDisk utilfeature.Feature = "GCERegionalPersistentDisk" @@ -331,7 +332,7 @@ const ( VolumeSubpathEnvExpansion utilfeature.Feature = "VolumeSubpathEnvExpansion" // owner: @vikaschoudhary16 - // alpha: v1.11 + // GA: v1.13 // // // Enable probe based plugin watcher utility for discovering Kubelet plugins @@ -387,15 +388,11 @@ const ( // Allow TTL controller to clean up Pods and Jobs after they finish. TTLAfterFinished utilfeature.Feature = "TTLAfterFinished" - // owner: @jsafrane - // Kubernetes skips attaching CSI volumes that don't require attachment. - // - CSISkipAttach utilfeature.Feature = "CSISkipAttach" - - // owner: @jsafrane + // owner: @dashpole + // alpha: v1.13 // - // Kubelet sends pod information in NodePublish CSI call when a CSI driver wants so. - CSIPodInfo utilfeature.Feature = "CSIPodInfo" + // Enables the kubelet's pod resources grpc endpoint + KubeletPodResources utilfeature.Feature = "KubeletPodResources" ) func init() { @@ -406,12 +403,12 @@ func init() { // To add a new feature, define a key for it above and add it here. The features will be // available throughout Kubernetes binaries. var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ - AppArmor: {Default: true, PreRelease: utilfeature.Beta}, - DynamicKubeletConfig: {Default: true, PreRelease: utilfeature.Beta}, + AppArmor: {Default: true, PreRelease: utilfeature.Beta}, + DynamicKubeletConfig: {Default: true, PreRelease: utilfeature.Beta}, ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: utilfeature.Beta}, ExperimentalCriticalPodAnnotation: {Default: false, PreRelease: utilfeature.Alpha}, DevicePlugins: {Default: true, PreRelease: utilfeature.Beta}, - TaintBasedEvictions: {Default: false, PreRelease: utilfeature.Alpha}, + TaintBasedEvictions: {Default: true, PreRelease: utilfeature.Beta}, RotateKubeletServerCertificate: {Default: true, PreRelease: utilfeature.Beta}, RotateKubeletClientCertificate: {Default: true, PreRelease: utilfeature.Beta}, PersistentLocalVolumes: {Default: true, PreRelease: utilfeature.Beta}, @@ -427,32 +424,34 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS QOSReserved: {Default: false, PreRelease: utilfeature.Alpha}, ExpandPersistentVolumes: {Default: true, PreRelease: utilfeature.Beta}, ExpandInUsePersistentVolumes: {Default: false, PreRelease: utilfeature.Alpha}, - AttachVolumeLimit: {Default: false, PreRelease: utilfeature.Beta}, + AttachVolumeLimit: {Default: true, PreRelease: utilfeature.Beta}, CPUManager: {Default: true, PreRelease: utilfeature.Beta}, CPUCFSQuotaPeriod: {Default: false, PreRelease: utilfeature.Alpha}, ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha}, MountContainers: {Default: false, PreRelease: utilfeature.Alpha}, - VolumeScheduling: {Default: true, PreRelease: utilfeature.Beta}, - CSIPersistentVolume: {Default: true, PreRelease: utilfeature.Beta}, - CSICRDAutoInstall: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeScheduling: {Default: true, PreRelease: utilfeature.GA}, + CSIPersistentVolume: {Default: true, PreRelease: utilfeature.GA}, + CSIDriverRegistry: {Default: false, PreRelease: utilfeature.Alpha}, + CSINodeInfo: {Default: false, PreRelease: utilfeature.Alpha}, CustomPodDNS: {Default: true, PreRelease: utilfeature.Beta}, - BlockVolume: {Default: false, PreRelease: utilfeature.Alpha}, + BlockVolume: {Default: true, PreRelease: utilfeature.Beta}, StorageObjectInUseProtection: {Default: true, PreRelease: utilfeature.GA}, ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha}, SupportIPVSProxyMode: {Default: true, PreRelease: utilfeature.GA}, SupportPodPidsLimit: {Default: false, PreRelease: utilfeature.Alpha}, HyperVContainer: {Default: false, PreRelease: utilfeature.Alpha}, - ScheduleDaemonSetPods: {Default: false, PreRelease: utilfeature.Alpha}, + ScheduleDaemonSetPods: {Default: true, PreRelease: utilfeature.Beta}, TokenRequest: {Default: true, PreRelease: utilfeature.Beta}, TokenRequestProjection: {Default: true, PreRelease: utilfeature.Beta}, + BoundServiceAccountTokenVolume: {Default: false, PreRelease: utilfeature.Alpha}, CRIContainerLogRotation: {Default: true, PreRelease: utilfeature.Beta}, - GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.Beta}, + GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.GA}, RunAsGroup: {Default: false, PreRelease: utilfeature.Alpha}, VolumeSubpath: {Default: true, PreRelease: utilfeature.GA}, BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha}, PodReadinessGates: {Default: true, PreRelease: utilfeature.Beta}, VolumeSubpathEnvExpansion: {Default: false, PreRelease: utilfeature.Alpha}, - KubeletPluginsWatcher: {Default: true, PreRelease: utilfeature.Beta}, + KubeletPluginsWatcher: {Default: true, PreRelease: utilfeature.GA}, ResourceQuotaScopeSelectors: {Default: true, PreRelease: utilfeature.Beta}, CSIBlockVolume: {Default: false, PreRelease: utilfeature.Alpha}, RuntimeClass: {Default: false, PreRelease: utilfeature.Alpha}, @@ -461,24 +460,24 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS VolumeSnapshotDataSource: {Default: false, PreRelease: utilfeature.Alpha}, ProcMountType: {Default: false, PreRelease: utilfeature.Alpha}, TTLAfterFinished: {Default: false, PreRelease: utilfeature.Alpha}, - CSISkipAttach: {Default: false, PreRelease: utilfeature.Alpha}, - CSIPodInfo: {Default: false, PreRelease: utilfeature.Alpha}, + KubeletPodResources: {Default: false, PreRelease: utilfeature.Alpha}, // inherited features from generic apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: genericfeatures.StreamingProxyRedirects: {Default: true, PreRelease: utilfeature.Beta}, genericfeatures.AdvancedAuditing: {Default: true, PreRelease: utilfeature.GA}, + genericfeatures.DynamicAuditing: {Default: false, PreRelease: utilfeature.Alpha}, genericfeatures.APIResponseCompression: {Default: false, PreRelease: utilfeature.Alpha}, genericfeatures.Initializers: {Default: false, PreRelease: utilfeature.Alpha}, genericfeatures.APIListChunking: {Default: true, PreRelease: utilfeature.Beta}, - genericfeatures.DryRun: {Default: false, PreRelease: utilfeature.Alpha}, + genericfeatures.DryRun: {Default: true, PreRelease: utilfeature.Beta}, // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: - apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, - apiextensionsfeatures.CustomResourceSubresources: {Default: true, PreRelease: utilfeature.Beta}, + apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, + apiextensionsfeatures.CustomResourceSubresources: {Default: true, PreRelease: utilfeature.Beta}, + apiextensionsfeatures.CustomResourceWebhookConversion: {Default: false, PreRelease: utilfeature.Alpha}, // features that enable backwards compatibility but are scheduled to be removed - ServiceProxyAllowExternalIPs: {Default: false, PreRelease: utilfeature.Deprecated}, - ReadOnlyAPIDataVolumes: {Default: true, PreRelease: utilfeature.Deprecated}, + // ... } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/.import-restrictions b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/.import-restrictions index 79f6b5a9384d..6cc204640d31 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/.import-restrictions +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/.import-restrictions @@ -122,6 +122,7 @@ "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util", "k8s.io/kubernetes/pkg/scheduler/api", "k8s.io/kubernetes/pkg/scheduler/cache", + "k8s.io/kubernetes/pkg/scheduler/internal/cache", "k8s.io/kubernetes/pkg/scheduler/util", "k8s.io/kubernetes/pkg/scheduler/volumebinder", "k8s.io/kubernetes/pkg/security/apparmor", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD index d9cb4e0774bd..cfb06a76450c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD @@ -9,59 +9,27 @@ load( go_test( name = "go_default_test", srcs = [ - "autoscale_test.go", - "clusterrolebinding_test.go", - "configmap_test.go", - "deployment_test.go", - "env_file_test.go", - "generate_test.go", "history_test.go", - "namespace_test.go", - "pdb_test.go", - "priorityclass_test.go", - "quota_test.go", - "rolebinding_test.go", "rollback_test.go", "rolling_updater_test.go", "rollout_status_test.go", - "run_test.go", "scale_test.go", - "secret_for_docker_registry_test.go", - "secret_for_tls_test.go", - "secret_test.go", - "service_basic_test.go", - "service_test.go", - "serviceaccount_test.go", - "sorter_test.go", ], embed = [":go_default_library"], deps = [ - "//pkg/api/legacyscheme:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/testing:go_default_library", "//pkg/kubectl/scheme:go_default_library", "//pkg/kubectl/util:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", - "//staging/src/k8s.io/api/apps/v1beta1:go_default_library", "//staging/src/k8s.io/api/autoscaling/v1:go_default_library", - "//staging/src/k8s.io/api/batch/v1:go_default_library", - "//staging/src/k8s.io/api/batch/v1beta1:go_default_library", - "//staging/src/k8s.io/api/batch/v2alpha1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", - "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", - "//staging/src/k8s.io/api/rbac/v1:go_default_library", - "//staging/src/k8s.io/api/rbac/v1beta1:go_default_library", - "//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", @@ -72,9 +40,6 @@ go_test( "//staging/src/k8s.io/client-go/scale:go_default_library", "//staging/src/k8s.io/client-go/scale/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", - "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/github.com/spf13/cobra:go_default_library", - "//vendor/k8s.io/utils/pointer:go_default_library", ], ) @@ -82,80 +47,39 @@ go_library( name = "go_default_library", srcs = [ "apply.go", - "autoscale.go", - "clusterrolebinding.go", "conditions.go", - "configmap.go", - "deployment.go", "doc.go", - "env_file.go", - "generate.go", "history.go", "interfaces.go", - "namespace.go", - "pdb.go", - "priorityclass.go", - "quota.go", - "rolebinding.go", "rollback.go", "rolling_updater.go", "rollout_status.go", - "run.go", "scale.go", - "secret.go", - "secret_for_docker_registry.go", - "secret_for_tls.go", - "service.go", - "service_basic.go", - "serviceaccount.go", - "sorter.go", ], importpath = "k8s.io/kubernetes/pkg/kubectl", deps = [ - "//pkg/api/legacyscheme:go_default_library", - "//pkg/api/pod:go_default_library", - "//pkg/api/v1/pod:go_default_library", - "//pkg/apis/apps:go_default_library", - "//pkg/apis/core:go_default_library", - "//pkg/apis/core/v1:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/controller/deployment/util:go_default_library", - "//pkg/credentialprovider:go_default_library", "//pkg/kubectl/apps:go_default_library", + "//pkg/kubectl/describe/versioned:go_default_library", "//pkg/kubectl/scheme:go_default_library", "//pkg/kubectl/util:go_default_library", - "//pkg/kubectl/util/hash:go_default_library", + "//pkg/kubectl/util/deployment:go_default_library", + "//pkg/kubectl/util/podutils:go_default_library", "//pkg/kubectl/util/slice:go_default_library", - "//pkg/printers:go_default_library", - "//pkg/printers/internalversion:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", - "//staging/src/k8s.io/api/apps/v1beta1:go_default_library", "//staging/src/k8s.io/api/autoscaling/v1:go_default_library", - "//staging/src/k8s.io/api/batch/v1:go_default_library", - "//staging/src/k8s.io/api/batch/v1beta1:go_default_library", - "//staging/src/k8s.io/api/batch/v2alpha1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", - "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", - "//staging/src/k8s.io/api/rbac/v1:go_default_library", - "//staging/src/k8s.io/api/rbac/v1beta1:go_default_library", - "//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", @@ -164,12 +88,7 @@ go_library( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/scale:go_default_library", "//staging/src/k8s.io/client-go/util/integer:go_default_library", - "//staging/src/k8s.io/client-go/util/jsonpath:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/spf13/cobra:go_default_library", - "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/vbom.ml/util/sortorder:go_default_library", ], ) @@ -187,7 +106,10 @@ filegroup( "//pkg/kubectl/apply:all-srcs", "//pkg/kubectl/apps:all-srcs", "//pkg/kubectl/cmd:all-srcs", + "//pkg/kubectl/describe:all-srcs", "//pkg/kubectl/explain:all-srcs", + "//pkg/kubectl/generate:all-srcs", + "//pkg/kubectl/generated:all-srcs", "//pkg/kubectl/metricsutil:all-srcs", "//pkg/kubectl/polymorphichelpers:all-srcs", "//pkg/kubectl/proxy:all-srcs", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go index 3c82aee808e6..78447d498cf0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go @@ -135,7 +135,8 @@ func CreateApplyAnnotation(obj runtime.Object, codec runtime.Encoder) error { return setOriginalConfiguration(obj, modified) } -// Create the annotation used by kubectl apply only when createAnnotation is true +// CreateOrUpdateAnnotation creates the annotation used by +// kubectl apply only when createAnnotation is true // Otherwise, only update the annotation when it already exists func CreateOrUpdateAnnotation(createAnnotation bool, obj runtime.Object, codec runtime.Encoder) error { if createAnnotation { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go deleted file mode 100644 index 39c78ca31df2..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - autoscalingv1 "k8s.io/api/autoscaling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// HorizontalPodAutoscalerV1Generator supports stable generation of a horizontal pod autoscaler. -type HorizontalPodAutoscalerGeneratorV1 struct { - Name string - ScaleRefKind string - ScaleRefName string - ScaleRefApiVersion string - MinReplicas int32 - MaxReplicas int32 - CPUPercent int32 -} - -// Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &HorizontalPodAutoscalerGeneratorV1{} - -// StructuredGenerate outputs a horizontal pod autoscaler object using the configured fields. -func (s *HorizontalPodAutoscalerGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - - scaler := autoscalingv1.HorizontalPodAutoscaler{ - ObjectMeta: metav1.ObjectMeta{ - Name: s.Name, - }, - Spec: autoscalingv1.HorizontalPodAutoscalerSpec{ - ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{ - Kind: s.ScaleRefKind, - Name: s.ScaleRefName, - APIVersion: s.ScaleRefApiVersion, - }, - MaxReplicas: s.MaxReplicas, - }, - } - - if s.MinReplicas > 0 { - v := int32(s.MinReplicas) - scaler.Spec.MinReplicas = &v - } - if s.CPUPercent >= 0 { - c := int32(s.CPUPercent) - scaler.Spec.TargetCPUUtilizationPercentage = &c - } - - return &scaler, nil -} - -// validate check if the caller has set the right fields. -func (s HorizontalPodAutoscalerGeneratorV1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - if s.MaxReplicas < 1 { - return fmt.Errorf("'max' is a required parameter and must be at least 1") - } - if s.MinReplicas > s.MaxReplicas { - return fmt.Errorf("'max' must be greater than or equal to 'min'") - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/clusterrolebinding.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/clusterrolebinding.go deleted file mode 100644 index d94656069117..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/clusterrolebinding.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - "strings" - - rbacv1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" -) - -// ClusterRoleBindingGeneratorV1 supports stable generation of a clusterRoleBinding. -type ClusterRoleBindingGeneratorV1 struct { - // Name of clusterRoleBinding (required) - Name string - // ClusterRole for the clusterRoleBinding (required) - ClusterRole string - // Users to derive the clusterRoleBinding from (optional) - Users []string - // Groups to derive the clusterRoleBinding from (optional) - Groups []string - // ServiceAccounts to derive the clusterRoleBinding from in namespace:name format(optional) - ServiceAccounts []string -} - -// Ensure it supports the generator pattern that uses parameter injection. -var _ Generator = &ClusterRoleBindingGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &ClusterRoleBindingGeneratorV1{} - -// Generate returns a clusterRoleBinding using the specified parameters. -func (s ClusterRoleBindingGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - delegate := &ClusterRoleBindingGeneratorV1{} - userStrings, found := genericParams["user"] - if found { - fromFileArray, isArray := userStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", userStrings) - } - delegate.Users = fromFileArray - delete(genericParams, "user") - } - groupStrings, found := genericParams["group"] - if found { - fromLiteralArray, isArray := groupStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", groupStrings) - } - delegate.Groups = fromLiteralArray - delete(genericParams, "group") - } - saStrings, found := genericParams["serviceaccount"] - if found { - fromLiteralArray, isArray := saStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", saStrings) - } - delegate.ServiceAccounts = fromLiteralArray - delete(genericParams, "serviceaccount") - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate.Name = params["name"] - delegate.ClusterRole = params["clusterrole"] - return delegate.StructuredGenerate() -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern. -func (s ClusterRoleBindingGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"clusterrole", false}, - {"user", false}, - {"group", false}, - {"serviceaccount", false}, - } -} - -// StructuredGenerate outputs a clusterRoleBinding object using the configured fields. -func (s ClusterRoleBindingGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - clusterRoleBinding := &rbacv1beta1.ClusterRoleBinding{} - clusterRoleBinding.Name = s.Name - clusterRoleBinding.RoleRef = rbacv1beta1.RoleRef{ - APIGroup: rbacv1beta1.GroupName, - Kind: "ClusterRole", - Name: s.ClusterRole, - } - for _, user := range sets.NewString(s.Users...).List() { - clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbacv1beta1.Subject{ - Kind: rbacv1beta1.UserKind, - APIGroup: rbacv1beta1.GroupName, - Name: user, - }) - } - for _, group := range sets.NewString(s.Groups...).List() { - clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbacv1beta1.Subject{ - Kind: rbacv1beta1.GroupKind, - APIGroup: rbacv1beta1.GroupName, - Name: group, - }) - } - for _, sa := range sets.NewString(s.ServiceAccounts...).List() { - tokens := strings.Split(sa, ":") - if len(tokens) != 2 || tokens[0] == "" || tokens[1] == "" { - return nil, fmt.Errorf("serviceaccount must be :") - } - clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbacv1beta1.Subject{ - Kind: rbacv1beta1.ServiceAccountKind, - APIGroup: "", - Namespace: tokens[0], - Name: tokens[1], - }) - } - - return clusterRoleBinding, nil -} - -// validate validates required fields are set to support structured generation. -func (s ClusterRoleBindingGeneratorV1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - if len(s.ClusterRole) == 0 { - return fmt.Errorf("clusterrole must be specified") - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go index 36a752c9ad2d..405bc6d4634b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go @@ -26,9 +26,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/kubernetes/pkg/api/pod" - podv1 "k8s.io/kubernetes/pkg/api/v1/pod" - api "k8s.io/kubernetes/pkg/apis/core" ) // ControllerHasDesiredReplicas returns a condition that will be true if and only if @@ -56,36 +53,6 @@ func ControllerHasDesiredReplicas(rcClient corev1client.ReplicationControllersGe // the pod has already reached completed state. var ErrPodCompleted = fmt.Errorf("pod ran to completion") -// ErrContainerTerminated is returned by PodContainerRunning in the intermediate -// state where the pod indicates it's still running, but its container is already terminated -var ErrContainerTerminated = fmt.Errorf("container terminated") - -// PodRunning returns true if the pod is running, false if the pod has not yet reached running state, -// returns ErrPodCompleted if the pod has run to completion, or an error in any other case. -func PodRunning(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodRunning: - return true, nil - case api.PodFailed, api.PodSucceeded: - return false, ErrPodCompleted - } - case *corev1.Pod: - switch t.Status.Phase { - case corev1.PodRunning: - return true, nil - case corev1.PodFailed, corev1.PodSucceeded: - return false, ErrPodCompleted - } - } - return false, nil -} - // PodCompleted returns true if the pod has run to completion, false if the pod has not yet // reached running state, or an error in any other case. func PodCompleted(event watch.Event) (bool, error) { @@ -94,11 +61,6 @@ func PodCompleted(event watch.Event) (bool, error) { return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") } switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodFailed, api.PodSucceeded: - return true, nil - } case *corev1.Pod: switch t.Status.Phase { case corev1.PodFailed, corev1.PodSucceeded: @@ -117,38 +79,21 @@ func PodRunningAndReady(event watch.Event) (bool, error) { return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") } switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodFailed, api.PodSucceeded: - return false, ErrPodCompleted - case api.PodRunning: - return pod.IsPodReady(t), nil - } case *corev1.Pod: switch t.Status.Phase { case corev1.PodFailed, corev1.PodSucceeded: return false, ErrPodCompleted case corev1.PodRunning: - return podv1.IsPodReady(t), nil - } - } - return false, nil -} - -// PodNotPending returns true if the pod has left the pending state, false if it has not, -// or an error in any other case (such as if the pod was deleted). -func PodNotPending(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodPending: - return false, nil - default: - return true, nil + conditions := t.Status.Conditions + if conditions == nil { + return false, nil + } + for i := range conditions { + if conditions[i].Type == corev1.PodReady && + conditions[i].Status == corev1.ConditionTrue { + return true, nil + } + } } } return false, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go deleted file mode 100644 index ba4ac1ee2026..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go +++ /dev/null @@ -1,297 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "strings" - "unicode/utf8" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/kubernetes/pkg/kubectl/util" - "k8s.io/kubernetes/pkg/kubectl/util/hash" -) - -// ConfigMapGeneratorV1 supports stable generation of a configMap. -type ConfigMapGeneratorV1 struct { - // Name of configMap (required) - Name string - // Type of configMap (optional) - Type string - // FileSources to derive the configMap from (optional) - FileSources []string - // LiteralSources to derive the configMap from (optional) - LiteralSources []string - // EnvFileSource to derive the configMap from (optional) - EnvFileSource string - // AppendHash; if true, derive a hash from the ConfigMap and append it to the name - AppendHash bool -} - -// Ensure it supports the generator pattern that uses parameter injection. -var _ Generator = &ConfigMapGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &ConfigMapGeneratorV1{} - -// Generate returns a configMap using the specified parameters. -func (s ConfigMapGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - delegate := &ConfigMapGeneratorV1{} - fromFileStrings, found := genericParams["from-file"] - if found { - fromFileArray, isArray := fromFileStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", fromFileStrings) - } - delegate.FileSources = fromFileArray - delete(genericParams, "from-file") - } - fromLiteralStrings, found := genericParams["from-literal"] - if found { - fromLiteralArray, isArray := fromLiteralStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", fromLiteralStrings) - } - delegate.LiteralSources = fromLiteralArray - delete(genericParams, "from-literal") - } - fromEnvFileString, found := genericParams["from-env-file"] - if found { - fromEnvFile, isString := fromEnvFileString.(string) - if !isString { - return nil, fmt.Errorf("expected string, found :%v", fromEnvFileString) - } - delegate.EnvFileSource = fromEnvFile - delete(genericParams, "from-env-file") - } - hashParam, found := genericParams["append-hash"] - if found { - hashBool, isBool := hashParam.(bool) - if !isBool { - return nil, fmt.Errorf("expected bool, found :%v", hashParam) - } - delegate.AppendHash = hashBool - delete(genericParams, "append-hash") - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate.Name = params["name"] - delegate.Type = params["type"] - - return delegate.StructuredGenerate() -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern. -func (s ConfigMapGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"type", false}, - {"from-file", false}, - {"from-literal", false}, - {"from-env-file", false}, - {"force", false}, - {"hash", false}, - } -} - -// StructuredGenerate outputs a configMap object using the configured fields. -func (s ConfigMapGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - configMap := &v1.ConfigMap{} - configMap.Name = s.Name - configMap.Data = map[string]string{} - configMap.BinaryData = map[string][]byte{} - if len(s.FileSources) > 0 { - if err := handleConfigMapFromFileSources(configMap, s.FileSources); err != nil { - return nil, err - } - } - if len(s.LiteralSources) > 0 { - if err := handleConfigMapFromLiteralSources(configMap, s.LiteralSources); err != nil { - return nil, err - } - } - if len(s.EnvFileSource) > 0 { - if err := handleConfigMapFromEnvFileSource(configMap, s.EnvFileSource); err != nil { - return nil, err - } - } - if s.AppendHash { - h, err := hash.ConfigMapHash(configMap) - if err != nil { - return nil, err - } - configMap.Name = fmt.Sprintf("%s-%s", configMap.Name, h) - } - return configMap, nil -} - -// validate validates required fields are set to support structured generation. -func (s ConfigMapGeneratorV1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - if len(s.EnvFileSource) > 0 && (len(s.FileSources) > 0 || len(s.LiteralSources) > 0) { - return fmt.Errorf("from-env-file cannot be combined with from-file or from-literal") - } - return nil -} - -// handleConfigMapFromLiteralSources adds the specified literal source -// information into the provided configMap. -func handleConfigMapFromLiteralSources(configMap *v1.ConfigMap, literalSources []string) error { - for _, literalSource := range literalSources { - keyName, value, err := util.ParseLiteralSource(literalSource) - if err != nil { - return err - } - err = addKeyFromLiteralToConfigMap(configMap, keyName, value) - if err != nil { - return err - } - } - return nil -} - -// handleConfigMapFromFileSources adds the specified file source information -// into the provided configMap -func handleConfigMapFromFileSources(configMap *v1.ConfigMap, fileSources []string) error { - for _, fileSource := range fileSources { - keyName, filePath, err := util.ParseFileSource(fileSource) - if err != nil { - return err - } - info, err := os.Stat(filePath) - if err != nil { - switch err := err.(type) { - case *os.PathError: - return fmt.Errorf("error reading %s: %v", filePath, err.Err) - default: - return fmt.Errorf("error reading %s: %v", filePath, err) - } - } - if info.IsDir() { - if strings.Contains(fileSource, "=") { - return fmt.Errorf("cannot give a key name for a directory path.") - } - fileList, err := ioutil.ReadDir(filePath) - if err != nil { - return fmt.Errorf("error listing files in %s: %v", filePath, err) - } - for _, item := range fileList { - itemPath := path.Join(filePath, item.Name()) - if item.Mode().IsRegular() { - keyName = item.Name() - err = addKeyFromFileToConfigMap(configMap, keyName, itemPath) - if err != nil { - return err - } - } - } - } else { - if err := addKeyFromFileToConfigMap(configMap, keyName, filePath); err != nil { - return err - } - } - } - - return nil -} - -// handleConfigMapFromEnvFileSource adds the specified env file source information -// into the provided configMap -func handleConfigMapFromEnvFileSource(configMap *v1.ConfigMap, envFileSource string) error { - info, err := os.Stat(envFileSource) - if err != nil { - switch err := err.(type) { - case *os.PathError: - return fmt.Errorf("error reading %s: %v", envFileSource, err.Err) - default: - return fmt.Errorf("error reading %s: %v", envFileSource, err) - } - } - if info.IsDir() { - return fmt.Errorf("env config file cannot be a directory") - } - - return addFromEnvFile(envFileSource, func(key, value string) error { - return addKeyFromLiteralToConfigMap(configMap, key, value) - }) -} - -// addKeyFromFileToConfigMap adds a key with the given name to a ConfigMap, populating -// the value with the content of the given file path, or returns an error. -func addKeyFromFileToConfigMap(configMap *v1.ConfigMap, keyName, filePath string) error { - data, err := ioutil.ReadFile(filePath) - if err != nil { - return err - } - - if utf8.Valid(data) { - return addKeyFromLiteralToConfigMap(configMap, keyName, string(data)) - } - - err = validateNewConfigMap(configMap, keyName) - if err != nil { - return err - } - configMap.BinaryData[keyName] = data - return nil -} - -// addKeyFromLiteralToConfigMap adds the given key and data to the given config map, -// returning an error if the key is not valid or if the key already exists. -func addKeyFromLiteralToConfigMap(configMap *v1.ConfigMap, keyName, data string) error { - err := validateNewConfigMap(configMap, keyName) - if err != nil { - return err - } - configMap.Data[keyName] = data - return nil -} - -func validateNewConfigMap(configMap *v1.ConfigMap, keyName string) error { - // Note, the rules for ConfigMap keys are the exact same as the ones for SecretKeys. - if errs := validation.IsConfigMapKey(keyName); len(errs) != 0 { - return fmt.Errorf("%q is not a valid key name for a ConfigMap: %s", keyName, strings.Join(errs, ";")) - } - - if _, exists := configMap.Data[keyName]; exists { - return fmt.Errorf("cannot add key %s, another key by that name already exists in data: %v", keyName, configMap.Data) - } - if _, exists := configMap.BinaryData[keyName]; exists { - return fmt.Errorf("cannot add key %s, another key by that name already exists in binaryData: %v", keyName, configMap.BinaryData) - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/deployment.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/deployment.go deleted file mode 100644 index 9686d200d54e..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/deployment.go +++ /dev/null @@ -1,179 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strings" - - appsv1 "k8s.io/api/apps/v1" - appsv1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/api/core/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// BaseDeploymentGenerator: implement the common functionality of -// DeploymentBasicGeneratorV1, DeploymentBasicAppsGeneratorV1Beta1 and DeploymentBasicAppsGeneratorV1. To reduce -// confusion, it's best to keep this struct in the same file as those -// generators. -type BaseDeploymentGenerator struct { - Name string - Images []string -} - -// validate: check if the caller has forgotten to set one of our fields. -func (b BaseDeploymentGenerator) validate() error { - if len(b.Name) == 0 { - return fmt.Errorf("name must be specified") - } - if len(b.Images) == 0 { - return fmt.Errorf("at least one image must be specified") - } - return nil -} - -// structuredGenerate: determine the fields of a deployment. The struct that -// embeds BaseDeploymentGenerator should assemble these pieces into a -// runtime.Object. -func (b BaseDeploymentGenerator) structuredGenerate() ( - podSpec v1.PodSpec, - labels map[string]string, - selector metav1.LabelSelector, - err error, -) { - err = b.validate() - if err != nil { - return - } - podSpec = buildPodSpec(b.Images) - labels = map[string]string{} - labels["app"] = b.Name - selector = metav1.LabelSelector{MatchLabels: labels} - return -} - -// buildPodSpec: parse the image strings and assemble them into the Containers -// of a PodSpec. This is all you need to create the PodSpec for a deployment. -func buildPodSpec(images []string) v1.PodSpec { - podSpec := v1.PodSpec{Containers: []v1.Container{}} - for _, imageString := range images { - // Retain just the image name - imageSplit := strings.Split(imageString, "/") - name := imageSplit[len(imageSplit)-1] - // Remove any tag or hash - if strings.Contains(name, ":") { - name = strings.Split(name, ":")[0] - } - if strings.Contains(name, "@") { - name = strings.Split(name, "@")[0] - } - podSpec.Containers = append(podSpec.Containers, v1.Container{Name: name, Image: imageString}) - } - return podSpec -} - -// DeploymentBasicGeneratorV1 supports stable generation of a deployment -type DeploymentBasicGeneratorV1 struct { - BaseDeploymentGenerator -} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &DeploymentBasicGeneratorV1{} - -// StructuredGenerate outputs a deployment object using the configured fields -func (s *DeploymentBasicGeneratorV1) StructuredGenerate() (runtime.Object, error) { - podSpec, labels, selector, err := s.structuredGenerate() - one := int32(1) - return &extensionsv1beta1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: s.Name, - Labels: labels, - }, - Spec: extensionsv1beta1.DeploymentSpec{ - Replicas: &one, - Selector: &selector, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: podSpec, - }, - }, - }, err -} - -// DeploymentBasicAppsGeneratorV1Beta1 supports stable generation of a deployment under apps/v1beta1 endpoint -type DeploymentBasicAppsGeneratorV1Beta1 struct { - BaseDeploymentGenerator -} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &DeploymentBasicAppsGeneratorV1Beta1{} - -// StructuredGenerate outputs a deployment object using the configured fields -func (s *DeploymentBasicAppsGeneratorV1Beta1) StructuredGenerate() (runtime.Object, error) { - podSpec, labels, selector, err := s.structuredGenerate() - one := int32(1) - return &appsv1beta1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: s.Name, - Labels: labels, - }, - Spec: appsv1beta1.DeploymentSpec{ - Replicas: &one, - Selector: &selector, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: podSpec, - }, - }, - }, err -} - -// DeploymentBasicAppsGeneratorV1 supports stable generation of a deployment under apps/v1 endpoint -type DeploymentBasicAppsGeneratorV1 struct { - BaseDeploymentGenerator -} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &DeploymentBasicAppsGeneratorV1{} - -// StructuredGenerate outputs a deployment object using the configured fields -func (s *DeploymentBasicAppsGeneratorV1) StructuredGenerate() (runtime.Object, error) { - podSpec, labels, selector, err := s.structuredGenerate() - one := int32(1) - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: s.Name, - Labels: labels, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &one, - Selector: &selector, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: podSpec, - }, - }, - }, err -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/describe/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/describe/BUILD new file mode 100644 index 000000000000..c120954a8d2f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/describe/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["interface.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/describe", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/kubectl/describe/versioned:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/describe/interface.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/describe/interface.go new file mode 100644 index 000000000000..58429918787e --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/describe/interface.go @@ -0,0 +1,71 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package describe + +import ( + "fmt" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +const ( + // LoadBalancerWidth is the width how we describe load balancer + LoadBalancerWidth = 16 + + // LabelNodeRolePrefix is a label prefix for node roles + // It's copied over to here until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112 + LabelNodeRolePrefix = "node-role.kubernetes.io/" + + // NodeLabelRole specifies the role of a node + NodeLabelRole = "kubernetes.io/role" +) + +// DescriberFunc gives a way to display the specified RESTMapping type +type DescriberFunc func(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (Describer, error) + +// Describer generates output for the named resource or an error +// if the output could not be generated. Implementers typically +// abstract the retrieval of the named object from a remote server. +type Describer interface { + Describe(namespace, name string, describerSettings DescriberSettings) (output string, err error) +} + +// DescriberSettings holds display configuration for each object +// describer to control what is printed. +type DescriberSettings struct { + ShowEvents bool +} + +// ObjectDescriber is an interface for displaying arbitrary objects with extra +// information. Use when an object is in hand (on disk, or already retrieved). +// Implementers may ignore the additional information passed on extra, or use it +// by default. ObjectDescribers may return ErrNoDescriber if no suitable describer +// is found. +type ObjectDescriber interface { + DescribeObject(object interface{}, extra ...interface{}) (output string, err error) +} + +// ErrNoDescriber is a structured error indicating the provided object or objects +// cannot be described. +type ErrNoDescriber struct { + Types []string +} + +// Error implements the error interface. +func (e ErrNoDescriber) Error() string { + return fmt.Sprintf("no describer has been defined for %v", e.Types) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/BUILD new file mode 100644 index 000000000000..21c1a0b78c7b --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/BUILD @@ -0,0 +1,89 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["describe.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/describe/versioned", + visibility = ["//visibility:public"], + deps = [ + "//pkg/kubectl/describe:go_default_library", + "//pkg/kubectl/scheme:go_default_library", + "//pkg/kubectl/util/certificate:go_default_library", + "//pkg/kubectl/util/deployment:go_default_library", + "//pkg/kubectl/util/event:go_default_library", + "//pkg/kubectl/util/fieldpath:go_default_library", + "//pkg/kubectl/util/qos:go_default_library", + "//pkg/kubectl/util/rbac:go_default_library", + "//pkg/kubectl/util/resource:go_default_library", + "//pkg/kubectl/util/slice:go_default_library", + "//pkg/kubectl/util/storage:go_default_library", + "//staging/src/k8s.io/api/apps/v1:go_default_library", + "//staging/src/k8s.io/api/autoscaling/v2beta2:go_default_library", + "//staging/src/k8s.io/api/batch/v1:go_default_library", + "//staging/src/k8s.io/api/batch/v1beta1:go_default_library", + "//staging/src/k8s.io/api/certificates/v1beta1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", + "//staging/src/k8s.io/api/networking/v1:go_default_library", + "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", + "//staging/src/k8s.io/api/rbac/v1:go_default_library", + "//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library", + "//staging/src/k8s.io/api/storage/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/duration:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", + "//staging/src/k8s.io/client-go/dynamic:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + "//staging/src/k8s.io/client-go/tools/reference:go_default_library", + "//vendor/github.com/fatih/camelcase:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["describe_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/kubectl/describe:go_default_library", + "//staging/src/k8s.io/api/apps/v1:go_default_library", + "//staging/src/k8s.io/api/autoscaling/v2beta2:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/networking/v1:go_default_library", + "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", + "//staging/src/k8s.io/api/storage/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/describe.go similarity index 78% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/describe.go index 1fe2b0ffd1a7..6f6dba6f0bcb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/describe.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package internalversion +package versioned import ( "bytes" @@ -30,12 +30,20 @@ import ( "text/tabwriter" "time" - "github.com/golang/glog" - "github.com/fatih/camelcase" appsv1 "k8s.io/api/apps/v1" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" @@ -47,35 +55,24 @@ import ( "k8s.io/apimachinery/pkg/util/duration" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/dynamic" - externalclient "k8s.io/client-go/kubernetes" + clientset "k8s.io/client-go/kubernetes" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/api/events" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/api/ref" - resourcehelper "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/certificates" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/core/helper" - "k8s.io/kubernetes/pkg/apis/core/helper/qos" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/networking" - "k8s.io/kubernetes/pkg/apis/policy" - "k8s.io/kubernetes/pkg/apis/rbac" - rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" - "k8s.io/kubernetes/pkg/apis/scheduling" - "k8s.io/kubernetes/pkg/apis/storage" - storageutil "k8s.io/kubernetes/pkg/apis/storage/util" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" - "k8s.io/kubernetes/pkg/fieldpath" - "k8s.io/kubernetes/pkg/printers" - "k8s.io/kubernetes/pkg/registry/rbac/validation" - "k8s.io/kubernetes/pkg/util/slice" + "k8s.io/client-go/tools/reference" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/kubectl/describe" + "k8s.io/kubernetes/pkg/kubectl/scheme" + "k8s.io/kubernetes/pkg/kubectl/util/certificate" + deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment" + "k8s.io/kubernetes/pkg/kubectl/util/event" + "k8s.io/kubernetes/pkg/kubectl/util/fieldpath" + "k8s.io/kubernetes/pkg/kubectl/util/qos" + "k8s.io/kubernetes/pkg/kubectl/util/rbac" + resourcehelper "k8s.io/kubernetes/pkg/kubectl/util/resource" + "k8s.io/kubernetes/pkg/kubectl/util/slice" + storageutil "k8s.io/kubernetes/pkg/kubectl/util/storage" ) // Each level has 2 spaces for PrefixWriter @@ -86,6 +83,27 @@ const ( LEVEL_3 ) +// DescriberFn gives a way to easily override the function for unit testing if needed +var DescriberFn describe.DescriberFunc = Describer + +// Describer returns a Describer for displaying the specified RESTMapping type or an error. +func Describer(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (describe.Describer, error) { + clientConfig, err := restClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + // try to get a describer + if describer, ok := DescriberFor(mapping.GroupVersionKind.GroupKind(), clientConfig); ok { + return describer, nil + } + // if this is a kind we don't have a describer for yet, go generic if possible + if genericDescriber, ok := GenericDescriberFor(mapping, clientConfig); ok { + return genericDescriber, nil + } + // otherwise return an unregistered error + return nil, fmt.Errorf("no description has been implemented for %s", mapping.GroupVersionKind.String()) +} + // PrefixWriter can write text at various indentation levels. type PrefixWriter interface { // Write writes text with the specified indentation level. @@ -127,54 +145,49 @@ func (pw *prefixWriter) Flush() { } } -func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]printers.Describer, error) { +func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]describe.Describer, error) { c, err := clientset.NewForConfig(clientConfig) if err != nil { return nil, err } - externalclient, err := externalclient.NewForConfig(clientConfig) - if err != nil { - return nil, err - } - m := map[schema.GroupKind]printers.Describer{ - api.Kind("Pod"): &PodDescriber{c}, - api.Kind("ReplicationController"): &ReplicationControllerDescriber{c}, - api.Kind("Secret"): &SecretDescriber{c}, - api.Kind("Service"): &ServiceDescriber{c}, - api.Kind("ServiceAccount"): &ServiceAccountDescriber{c}, - api.Kind("Node"): &NodeDescriber{c}, - api.Kind("LimitRange"): &LimitRangeDescriber{c}, - api.Kind("ResourceQuota"): &ResourceQuotaDescriber{c}, - api.Kind("PersistentVolume"): &PersistentVolumeDescriber{c}, - api.Kind("PersistentVolumeClaim"): &PersistentVolumeClaimDescriber{c}, - api.Kind("Namespace"): &NamespaceDescriber{c}, - api.Kind("Endpoints"): &EndpointsDescriber{c}, - api.Kind("ConfigMap"): &ConfigMapDescriber{c}, - api.Kind("PriorityClass"): &PriorityClassDescriber{c}, - - extensions.Kind("ReplicaSet"): &ReplicaSetDescriber{c}, - extensions.Kind("NetworkPolicy"): &NetworkPolicyDescriber{c}, - extensions.Kind("PodSecurityPolicy"): &PodSecurityPolicyDescriber{c}, - autoscaling.Kind("HorizontalPodAutoscaler"): &HorizontalPodAutoscalerDescriber{c}, - extensions.Kind("DaemonSet"): &DaemonSetDescriber{c}, - extensions.Kind("Deployment"): &DeploymentDescriber{c, externalclient}, - extensions.Kind("Ingress"): &IngressDescriber{c}, - batch.Kind("Job"): &JobDescriber{c}, - batch.Kind("CronJob"): &CronJobDescriber{c, externalclient}, - apps.Kind("StatefulSet"): &StatefulSetDescriber{c}, - apps.Kind("Deployment"): &DeploymentDescriber{c, externalclient}, - apps.Kind("DaemonSet"): &DaemonSetDescriber{c}, - apps.Kind("ReplicaSet"): &ReplicaSetDescriber{c}, - certificates.Kind("CertificateSigningRequest"): &CertificateSigningRequestDescriber{c}, - storage.Kind("StorageClass"): &StorageClassDescriber{c}, - policy.Kind("PodDisruptionBudget"): &PodDisruptionBudgetDescriber{c}, - rbac.Kind("Role"): &RoleDescriber{externalclient}, - rbac.Kind("ClusterRole"): &ClusterRoleDescriber{externalclient}, - rbac.Kind("RoleBinding"): &RoleBindingDescriber{externalclient}, - rbac.Kind("ClusterRoleBinding"): &ClusterRoleBindingDescriber{externalclient}, - networking.Kind("NetworkPolicy"): &NetworkPolicyDescriber{c}, - scheduling.Kind("PriorityClass"): &PriorityClassDescriber{c}, + m := map[schema.GroupKind]describe.Describer{ + {Group: corev1.GroupName, Kind: "Pod"}: &PodDescriber{c}, + {Group: corev1.GroupName, Kind: "ReplicationController"}: &ReplicationControllerDescriber{c}, + {Group: corev1.GroupName, Kind: "Secret"}: &SecretDescriber{c}, + {Group: corev1.GroupName, Kind: "Service"}: &ServiceDescriber{c}, + {Group: corev1.GroupName, Kind: "ServiceAccount"}: &ServiceAccountDescriber{c}, + {Group: corev1.GroupName, Kind: "Node"}: &NodeDescriber{c}, + {Group: corev1.GroupName, Kind: "LimitRange"}: &LimitRangeDescriber{c}, + {Group: corev1.GroupName, Kind: "ResourceQuota"}: &ResourceQuotaDescriber{c}, + {Group: corev1.GroupName, Kind: "PersistentVolume"}: &PersistentVolumeDescriber{c}, + {Group: corev1.GroupName, Kind: "PersistentVolumeClaim"}: &PersistentVolumeClaimDescriber{c}, + {Group: corev1.GroupName, Kind: "Namespace"}: &NamespaceDescriber{c}, + {Group: corev1.GroupName, Kind: "Endpoints"}: &EndpointsDescriber{c}, + {Group: corev1.GroupName, Kind: "ConfigMap"}: &ConfigMapDescriber{c}, + {Group: corev1.GroupName, Kind: "PriorityClass"}: &PriorityClassDescriber{c}, + {Group: extensionsv1beta1.GroupName, Kind: "ReplicaSet"}: &ReplicaSetDescriber{c}, + {Group: extensionsv1beta1.GroupName, Kind: "NetworkPolicy"}: &NetworkPolicyDescriber{c}, + {Group: extensionsv1beta1.GroupName, Kind: "PodSecurityPolicy"}: &PodSecurityPolicyDescriber{c}, + {Group: autoscalingv2beta2.GroupName, Kind: "HorizontalPodAutoscaler"}: &HorizontalPodAutoscalerDescriber{c}, + {Group: extensionsv1beta1.GroupName, Kind: "DaemonSet"}: &DaemonSetDescriber{c}, + {Group: extensionsv1beta1.GroupName, Kind: "Deployment"}: &DeploymentDescriber{c}, + {Group: extensionsv1beta1.GroupName, Kind: "Ingress"}: &IngressDescriber{c}, + {Group: batchv1.GroupName, Kind: "Job"}: &JobDescriber{c}, + {Group: batchv1.GroupName, Kind: "CronJob"}: &CronJobDescriber{c}, + {Group: appsv1.GroupName, Kind: "StatefulSet"}: &StatefulSetDescriber{c}, + {Group: appsv1.GroupName, Kind: "Deployment"}: &DeploymentDescriber{c}, + {Group: appsv1.GroupName, Kind: "DaemonSet"}: &DaemonSetDescriber{c}, + {Group: appsv1.GroupName, Kind: "ReplicaSet"}: &ReplicaSetDescriber{c}, + {Group: certificatesv1beta1.GroupName, Kind: "CertificateSigningRequest"}: &CertificateSigningRequestDescriber{c}, + {Group: storagev1.GroupName, Kind: "StorageClass"}: &StorageClassDescriber{c}, + {Group: policyv1beta1.GroupName, Kind: "PodDisruptionBudget"}: &PodDisruptionBudgetDescriber{c}, + {Group: rbacv1.GroupName, Kind: "Role"}: &RoleDescriber{c}, + {Group: rbacv1.GroupName, Kind: "ClusterRole"}: &ClusterRoleDescriber{c}, + {Group: rbacv1.GroupName, Kind: "RoleBinding"}: &RoleBindingDescriber{c}, + {Group: rbacv1.GroupName, Kind: "ClusterRoleBinding"}: &ClusterRoleBindingDescriber{c}, + {Group: networkingv1.GroupName, Kind: "NetworkPolicy"}: &NetworkPolicyDescriber{c}, + {Group: schedulingv1beta1.GroupName, Kind: "PriorityClass"}: &PriorityClassDescriber{c}, } return m, nil @@ -182,10 +195,10 @@ func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]printers.Desc // DescriberFor returns the default describe functions for each of the standard // Kubernetes types. -func DescriberFor(kind schema.GroupKind, clientConfig *rest.Config) (printers.Describer, bool) { +func DescriberFor(kind schema.GroupKind, clientConfig *rest.Config) (describe.Describer, bool) { describers, err := describerMap(clientConfig) if err != nil { - glog.V(1).Info(err) + klog.V(1).Info(err) return nil, false } @@ -195,25 +208,38 @@ func DescriberFor(kind schema.GroupKind, clientConfig *rest.Config) (printers.De // GenericDescriberFor returns a generic describer for the specified mapping // that uses only information available from runtime.Unstructured -func GenericDescriberFor(mapping *meta.RESTMapping, dynamic dynamic.Interface, events coreclient.EventsGetter) printers.Describer { - return &genericDescriber{mapping, dynamic, events} +func GenericDescriberFor(mapping *meta.RESTMapping, clientConfig *rest.Config) (describe.Describer, bool) { + // used to fetch the resource + dynamicClient, err := dynamic.NewForConfig(clientConfig) + if err != nil { + return nil, false + } + + // used to get events for the resource + clientSet, err := clientset.NewForConfig(clientConfig) + if err != nil { + return nil, false + } + eventsClient := clientSet.Core() + + return &genericDescriber{mapping, dynamicClient, eventsClient}, true } type genericDescriber struct { mapping *meta.RESTMapping dynamic dynamic.Interface - events coreclient.EventsGetter + events corev1client.EventsGetter } -func (g *genericDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (output string, err error) { +func (g *genericDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (output string, err error) { obj, err := g.dynamic.Resource(g.mapping.Resource).Namespace(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = g.events.Events(namespace).Search(legacyscheme.Scheme, obj) + events, _ = g.events.Events(namespace).Search(scheme.Scheme, obj) } return tabbedString(func(out io.Writer) error { @@ -295,7 +321,7 @@ func smartLabelFor(field string) string { } // DefaultObjectDescriber can describe the default Kubernetes objects. -var DefaultObjectDescriber printers.ObjectDescriber +var DefaultObjectDescriber describe.ObjectDescriber func init() { d := &Describers{} @@ -310,7 +336,7 @@ func init() { describeNamespace, ) if err != nil { - glog.Fatalf("Cannot register describers: %v", err) + klog.Fatalf("Cannot register describers: %v", err) } DefaultObjectDescriber = d } @@ -320,7 +346,7 @@ type NamespaceDescriber struct { clientset.Interface } -func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { ns, err := d.Core().Namespaces().Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -348,7 +374,7 @@ func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings return describeNamespace(ns, resourceQuotaList, limitRangeList) } -func describeNamespace(namespace *api.Namespace, resourceQuotaList *api.ResourceQuotaList, limitRangeList *api.LimitRangeList) (string, error) { +func describeNamespace(namespace *corev1.Namespace, resourceQuotaList *corev1.ResourceQuotaList, limitRangeList *corev1.LimitRangeList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", namespace.Name) @@ -367,7 +393,7 @@ func describeNamespace(namespace *api.Namespace, resourceQuotaList *api.Resource }) } -func describeLimitRangeSpec(spec api.LimitRangeSpec, prefix string, w PrefixWriter) { +func describeLimitRangeSpec(spec corev1.LimitRangeSpec, prefix string, w PrefixWriter) { for i := range spec.Limits { item := spec.Limits[i] maxResources := item.Max @@ -376,7 +402,7 @@ func describeLimitRangeSpec(spec api.LimitRangeSpec, prefix string, w PrefixWrit defaultRequestResources := item.DefaultRequest ratio := item.MaxLimitRequestRatio - set := map[api.ResourceName]bool{} + set := map[corev1.ResourceName]bool{} for k := range maxResources { set[k] = true } @@ -433,7 +459,7 @@ func describeLimitRangeSpec(spec api.LimitRangeSpec, prefix string, w PrefixWrit } // DescribeLimitRanges merges a set of limit range items into a single tabular description -func DescribeLimitRanges(limitRanges *api.LimitRangeList, w PrefixWriter) { +func DescribeLimitRanges(limitRanges *corev1.LimitRangeList, w PrefixWriter) { if len(limitRanges.Items) == 0 { w.Write(LEVEL_0, "No resource limits.\n") return @@ -446,7 +472,7 @@ func DescribeLimitRanges(limitRanges *api.LimitRangeList, w PrefixWriter) { } // DescribeResourceQuotas merges a set of quota items into a single tabular description of all quotas -func DescribeResourceQuotas(quotas *api.ResourceQuotaList, w PrefixWriter) { +func DescribeResourceQuotas(quotas *corev1.ResourceQuotaList, w PrefixWriter) { if len(quotas.Items) == 0 { w.Write(LEVEL_0, "No resource quota.\n") return @@ -464,7 +490,7 @@ func DescribeResourceQuotas(quotas *api.ResourceQuotaList, w PrefixWriter) { sort.Strings(scopes) w.Write(LEVEL_0, " Scopes:\t%s\n", strings.Join(scopes, ", ")) for _, scope := range scopes { - helpText := helpTextForResourceQuotaScope(api.ResourceQuotaScope(scope)) + helpText := helpTextForResourceQuotaScope(corev1.ResourceQuotaScope(scope)) if len(helpText) > 0 { w.Write(LEVEL_0, " * %s\n", helpText) } @@ -474,7 +500,7 @@ func DescribeResourceQuotas(quotas *api.ResourceQuotaList, w PrefixWriter) { w.Write(LEVEL_0, " Resource\tUsed\tHard\n") w.Write(LEVEL_0, " --------\t---\t---\n") - resources := make([]api.ResourceName, 0, len(q.Status.Hard)) + resources := make([]corev1.ResourceName, 0, len(q.Status.Hard)) for resource := range q.Status.Hard { resources = append(resources, resource) } @@ -493,7 +519,7 @@ type LimitRangeDescriber struct { clientset.Interface } -func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { lr := d.Core().LimitRanges(namespace) limitRange, err := lr.Get(name, metav1.GetOptions{}) @@ -503,7 +529,7 @@ func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings return describeLimitRange(limitRange) } -func describeLimitRange(limitRange *api.LimitRange) (string, error) { +func describeLimitRange(limitRange *corev1.LimitRange) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", limitRange.Name) @@ -520,7 +546,7 @@ type ResourceQuotaDescriber struct { clientset.Interface } -func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { rq := d.Core().ResourceQuotas(namespace) resourceQuota, err := rq.Get(name, metav1.GetOptions{}) @@ -531,21 +557,21 @@ func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSetti return describeQuota(resourceQuota) } -func helpTextForResourceQuotaScope(scope api.ResourceQuotaScope) string { +func helpTextForResourceQuotaScope(scope corev1.ResourceQuotaScope) string { switch scope { - case api.ResourceQuotaScopeTerminating: + case corev1.ResourceQuotaScopeTerminating: return "Matches all pods that have an active deadline. These pods have a limited lifespan on a node before being actively terminated by the system." - case api.ResourceQuotaScopeNotTerminating: + case corev1.ResourceQuotaScopeNotTerminating: return "Matches all pods that do not have an active deadline. These pods usually include long running pods whose container command is not expected to terminate." - case api.ResourceQuotaScopeBestEffort: + case corev1.ResourceQuotaScopeBestEffort: return "Matches all pods that do not have resource requirements set. These pods have a best effort quality of service." - case api.ResourceQuotaScopeNotBestEffort: + case corev1.ResourceQuotaScopeNotBestEffort: return "Matches all pods that have at least one resource requirement set. These pods have a burstable or guaranteed quality of service." default: return "" } } -func describeQuota(resourceQuota *api.ResourceQuota) (string, error) { +func describeQuota(resourceQuota *corev1.ResourceQuota) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", resourceQuota.Name) @@ -558,7 +584,7 @@ func describeQuota(resourceQuota *api.ResourceQuota) (string, error) { sort.Strings(scopes) w.Write(LEVEL_0, "Scopes:\t%s\n", strings.Join(scopes, ", ")) for _, scope := range scopes { - helpText := helpTextForResourceQuotaScope(api.ResourceQuotaScope(scope)) + helpText := helpTextForResourceQuotaScope(corev1.ResourceQuotaScope(scope)) if len(helpText) > 0 { w.Write(LEVEL_0, " * %s\n", helpText) } @@ -567,7 +593,7 @@ func describeQuota(resourceQuota *api.ResourceQuota) (string, error) { w.Write(LEVEL_0, "Resource\tUsed\tHard\n") w.Write(LEVEL_0, "--------\t----\t----\n") - resources := make([]api.ResourceName, 0, len(resourceQuota.Status.Hard)) + resources := make([]corev1.ResourceName, 0, len(resourceQuota.Status.Hard)) for resource := range resourceQuota.Status.Hard { resources = append(resources, resource) } @@ -590,7 +616,7 @@ type PodDescriber struct { clientset.Interface } -func (d *PodDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *PodDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { pod, err := d.Core().Pods(namespace).Get(name, metav1.GetOptions{}) if err != nil { if describerSettings.ShowEvents { @@ -610,20 +636,20 @@ func (d *PodDescriber) Describe(namespace, name string, describerSettings printe return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - if ref, err := ref.GetReference(legacyscheme.Scheme, pod); err != nil { - glog.Errorf("Unable to construct reference to '%#v': %v", pod, err) + if ref, err := reference.GetReference(scheme.Scheme, pod); err != nil { + klog.Errorf("Unable to construct reference to '%#v': %v", pod, err) } else { ref.Kind = "" - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, ref) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, ref) } } return describePod(pod, events) } -func describePod(pod *api.Pod, events *api.EventList) (string, error) { +func describePod(pod *corev1.Pod, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", pod.Name) @@ -666,6 +692,21 @@ func describePod(pod *api.Pod, events *api.EventList) (string, error) { describeContainers("Init Containers", pod.Spec.InitContainers, pod.Status.InitContainerStatuses, EnvValueRetriever(pod), w, "") } describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), w, "") + if len(pod.Spec.ReadinessGates) > 0 { + w.Write(LEVEL_0, "Readiness Gates:\n Type\tStatus\n") + for _, g := range pod.Spec.ReadinessGates { + status := "" + for _, c := range pod.Status.Conditions { + if c.Type == g.ConditionType { + status = fmt.Sprintf("%v", c.Status) + break + } + } + w.Write(LEVEL_1, "%v \t%v \n", + g.ConditionType, + status) + } + } if len(pod.Status.Conditions) > 0 { w.Write(LEVEL_0, "Conditions:\n Type\tStatus\n") for _, c := range pod.Status.Conditions { @@ -696,7 +737,7 @@ func printController(controllee metav1.Object) string { return "" } -func describeVolumes(volumes []api.Volume, w PrefixWriter, space string) { +func describeVolumes(volumes []corev1.Volume, w PrefixWriter, space string) { if volumes == nil || len(volumes) == 0 { w.Write(LEVEL_0, "%sVolumes:\t\n", space) return @@ -761,13 +802,15 @@ func describeVolumes(volumes []api.Volume, w PrefixWriter, space string) { printFlexVolumeSource(volume.VolumeSource.FlexVolume, w) case volume.VolumeSource.Flocker != nil: printFlockerVolumeSource(volume.VolumeSource.Flocker, w) + case volume.VolumeSource.Projected != nil: + printProjectedVolumeSource(volume.VolumeSource.Projected, w) default: w.Write(LEVEL_1, "\n") } } } -func printHostPathVolumeSource(hostPath *api.HostPathVolumeSource, w PrefixWriter) { +func printHostPathVolumeSource(hostPath *corev1.HostPathVolumeSource, w PrefixWriter) { hostPathType := "" if hostPath.Type != nil { hostPathType = string(*hostPath.Type) @@ -778,12 +821,12 @@ func printHostPathVolumeSource(hostPath *api.HostPathVolumeSource, w PrefixWrite hostPath.Path, hostPathType) } -func printEmptyDirVolumeSource(emptyDir *api.EmptyDirVolumeSource, w PrefixWriter) { +func printEmptyDirVolumeSource(emptyDir *corev1.EmptyDirVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tEmptyDir (a temporary directory that shares a pod's lifetime)\n"+ " Medium:\t%v\n", emptyDir.Medium) } -func printGCEPersistentDiskVolumeSource(gce *api.GCEPersistentDiskVolumeSource, w PrefixWriter) { +func printGCEPersistentDiskVolumeSource(gce *corev1.GCEPersistentDiskVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tGCEPersistentDisk (a Persistent Disk resource in Google Compute Engine)\n"+ " PDName:\t%v\n"+ " FSType:\t%v\n"+ @@ -792,7 +835,7 @@ func printGCEPersistentDiskVolumeSource(gce *api.GCEPersistentDiskVolumeSource, gce.PDName, gce.FSType, gce.Partition, gce.ReadOnly) } -func printAWSElasticBlockStoreVolumeSource(aws *api.AWSElasticBlockStoreVolumeSource, w PrefixWriter) { +func printAWSElasticBlockStoreVolumeSource(aws *corev1.AWSElasticBlockStoreVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tAWSElasticBlockStore (a Persistent Disk resource in AWS)\n"+ " VolumeID:\t%v\n"+ " FSType:\t%v\n"+ @@ -801,14 +844,14 @@ func printAWSElasticBlockStoreVolumeSource(aws *api.AWSElasticBlockStoreVolumeSo aws.VolumeID, aws.FSType, aws.Partition, aws.ReadOnly) } -func printGitRepoVolumeSource(git *api.GitRepoVolumeSource, w PrefixWriter) { +func printGitRepoVolumeSource(git *corev1.GitRepoVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tGitRepo (a volume that is pulled from git when the pod is created)\n"+ " Repository:\t%v\n"+ " Revision:\t%v\n", git.Repository, git.Revision) } -func printSecretVolumeSource(secret *api.SecretVolumeSource, w PrefixWriter) { +func printSecretVolumeSource(secret *corev1.SecretVolumeSource, w PrefixWriter) { optional := secret.Optional != nil && *secret.Optional w.Write(LEVEL_2, "Type:\tSecret (a volume populated by a Secret)\n"+ " SecretName:\t%v\n"+ @@ -816,7 +859,7 @@ func printSecretVolumeSource(secret *api.SecretVolumeSource, w PrefixWriter) { secret.SecretName, optional) } -func printConfigMapVolumeSource(configMap *api.ConfigMapVolumeSource, w PrefixWriter) { +func printConfigMapVolumeSource(configMap *corev1.ConfigMapVolumeSource, w PrefixWriter) { optional := configMap.Optional != nil && *configMap.Optional w.Write(LEVEL_2, "Type:\tConfigMap (a volume populated by a ConfigMap)\n"+ " Name:\t%v\n"+ @@ -824,7 +867,27 @@ func printConfigMapVolumeSource(configMap *api.ConfigMapVolumeSource, w PrefixWr configMap.Name, optional) } -func printNFSVolumeSource(nfs *api.NFSVolumeSource, w PrefixWriter) { +func printProjectedVolumeSource(projected *corev1.ProjectedVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tProjected (a volume that contains injected data from multiple sources)\n") + for _, source := range projected.Sources { + if source.Secret != nil { + w.Write(LEVEL_2, "SecretName:\t%v\n"+ + " SecretOptionalName:\t%v\n", + source.Secret.Name, source.Secret.Optional) + } else if source.DownwardAPI != nil { + w.Write(LEVEL_2, "DownwardAPI:\ttrue\n") + } else if source.ConfigMap != nil { + w.Write(LEVEL_2, "ConfigMapName:\t%v\n"+ + " ConfigMapOptional:\t%v\n", + source.ConfigMap.Name, source.ConfigMap.Optional) + } else if source.ServiceAccountToken != nil { + w.Write(LEVEL_2, "TokenExpirationSeconds:\t%v\n", + source.ServiceAccountToken.ExpirationSeconds) + } + } +} + +func printNFSVolumeSource(nfs *corev1.NFSVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tNFS (an NFS mount that lasts the lifetime of a pod)\n"+ " Server:\t%v\n"+ " Path:\t%v\n"+ @@ -832,7 +895,7 @@ func printNFSVolumeSource(nfs *api.NFSVolumeSource, w PrefixWriter) { nfs.Server, nfs.Path, nfs.ReadOnly) } -func printQuobyteVolumeSource(quobyte *api.QuobyteVolumeSource, w PrefixWriter) { +func printQuobyteVolumeSource(quobyte *corev1.QuobyteVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tQuobyte (a Quobyte mount on the host that shares a pod's lifetime)\n"+ " Registry:\t%v\n"+ " Volume:\t%v\n"+ @@ -840,13 +903,13 @@ func printQuobyteVolumeSource(quobyte *api.QuobyteVolumeSource, w PrefixWriter) quobyte.Registry, quobyte.Volume, quobyte.ReadOnly) } -func printPortworxVolumeSource(pwxVolume *api.PortworxVolumeSource, w PrefixWriter) { +func printPortworxVolumeSource(pwxVolume *corev1.PortworxVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tPortworxVolume (a Portworx Volume resource)\n"+ " VolumeID:\t%v\n", pwxVolume.VolumeID) } -func printISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, w PrefixWriter) { +func printISCSIVolumeSource(iscsi *corev1.ISCSIVolumeSource, w PrefixWriter) { initiator := "" if iscsi.InitiatorName != nil { initiator = *iscsi.InitiatorName @@ -866,7 +929,7 @@ func printISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, w PrefixWriter) { iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiator) } -func printISCSIPersistentVolumeSource(iscsi *api.ISCSIPersistentVolumeSource, w PrefixWriter) { +func printISCSIPersistentVolumeSource(iscsi *corev1.ISCSIPersistentVolumeSource, w PrefixWriter) { initiatorName := "" if iscsi.InitiatorName != nil { initiatorName = *iscsi.InitiatorName @@ -886,7 +949,7 @@ func printISCSIPersistentVolumeSource(iscsi *api.ISCSIPersistentVolumeSource, w iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiatorName) } -func printGlusterfsVolumeSource(glusterfs *api.GlusterfsVolumeSource, w PrefixWriter) { +func printGlusterfsVolumeSource(glusterfs *corev1.GlusterfsVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+ " EndpointsName:\t%v\n"+ " Path:\t%v\n"+ @@ -894,14 +957,23 @@ func printGlusterfsVolumeSource(glusterfs *api.GlusterfsVolumeSource, w PrefixWr glusterfs.EndpointsName, glusterfs.Path, glusterfs.ReadOnly) } -func printPersistentVolumeClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeSource, w PrefixWriter) { +func printGlusterfsPersistentVolumeSource(glusterfs *corev1.GlusterfsPersistentVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+ + " EndpointsName:\t%v\n"+ + " EndpointsNamespace:\t%v\n"+ + " Path:\t%v\n"+ + " ReadOnly:\t%v\n", + glusterfs.EndpointsName, glusterfs.EndpointsNamespace, glusterfs.Path, glusterfs.ReadOnly) +} + +func printPersistentVolumeClaimVolumeSource(claim *corev1.PersistentVolumeClaimVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tPersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)\n"+ " ClaimName:\t%v\n"+ " ReadOnly:\t%v\n", claim.ClaimName, claim.ReadOnly) } -func printRBDVolumeSource(rbd *api.RBDVolumeSource, w PrefixWriter) { +func printRBDVolumeSource(rbd *corev1.RBDVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+ " CephMonitors:\t%v\n"+ " RBDImage:\t%v\n"+ @@ -914,7 +986,7 @@ func printRBDVolumeSource(rbd *api.RBDVolumeSource, w PrefixWriter) { rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly) } -func printRBDPersistentVolumeSource(rbd *api.RBDPersistentVolumeSource, w PrefixWriter) { +func printRBDPersistentVolumeSource(rbd *corev1.RBDPersistentVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+ " CephMonitors:\t%v\n"+ " RBDImage:\t%v\n"+ @@ -927,7 +999,7 @@ func printRBDPersistentVolumeSource(rbd *api.RBDPersistentVolumeSource, w Prefix rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly) } -func printDownwardAPIVolumeSource(d *api.DownwardAPIVolumeSource, w PrefixWriter) { +func printDownwardAPIVolumeSource(d *corev1.DownwardAPIVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tDownwardAPI (a volume populated by information about the pod)\n Items:\n") for _, mapping := range d.Items { if mapping.FieldRef != nil { @@ -939,7 +1011,7 @@ func printDownwardAPIVolumeSource(d *api.DownwardAPIVolumeSource, w PrefixWriter } } -func printAzureDiskVolumeSource(d *api.AzureDiskVolumeSource, w PrefixWriter) { +func printAzureDiskVolumeSource(d *corev1.AzureDiskVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tAzureDisk (an Azure Data Disk mount on the host and bind mount to the pod)\n"+ " DiskName:\t%v\n"+ " DiskURI:\t%v\n"+ @@ -950,7 +1022,7 @@ func printAzureDiskVolumeSource(d *api.AzureDiskVolumeSource, w PrefixWriter) { d.DiskName, d.DataDiskURI, *d.Kind, *d.FSType, *d.CachingMode, *d.ReadOnly) } -func printVsphereVolumeSource(vsphere *api.VsphereVirtualDiskVolumeSource, w PrefixWriter) { +func printVsphereVolumeSource(vsphere *corev1.VsphereVirtualDiskVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tvSphereVolume (a Persistent Disk resource in vSphere)\n"+ " VolumePath:\t%v\n"+ " FSType:\t%v\n"+ @@ -958,14 +1030,14 @@ func printVsphereVolumeSource(vsphere *api.VsphereVirtualDiskVolumeSource, w Pre vsphere.VolumePath, vsphere.FSType, vsphere.StoragePolicyName) } -func printPhotonPersistentDiskVolumeSource(photon *api.PhotonPersistentDiskVolumeSource, w PrefixWriter) { +func printPhotonPersistentDiskVolumeSource(photon *corev1.PhotonPersistentDiskVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tPhotonPersistentDisk (a Persistent Disk resource in photon platform)\n"+ " PdID:\t%v\n"+ " FSType:\t%v\n", photon.PdID, photon.FSType) } -func printCinderVolumeSource(cinder *api.CinderVolumeSource, w PrefixWriter) { +func printCinderVolumeSource(cinder *corev1.CinderVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+ " VolumeID:\t%v\n"+ " FSType:\t%v\n"+ @@ -974,7 +1046,7 @@ func printCinderVolumeSource(cinder *api.CinderVolumeSource, w PrefixWriter) { cinder.VolumeID, cinder.FSType, cinder.ReadOnly, cinder.SecretRef) } -func printCinderPersistentVolumeSource(cinder *api.CinderPersistentVolumeSource, w PrefixWriter) { +func printCinderPersistentVolumeSource(cinder *corev1.CinderPersistentVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+ " VolumeID:\t%v\n"+ " FSType:\t%v\n"+ @@ -983,7 +1055,7 @@ func printCinderPersistentVolumeSource(cinder *api.CinderPersistentVolumeSource, cinder.VolumeID, cinder.SecretRef, cinder.FSType, cinder.ReadOnly, cinder.SecretRef) } -func printScaleIOVolumeSource(sio *api.ScaleIOVolumeSource, w PrefixWriter) { +func printScaleIOVolumeSource(sio *corev1.ScaleIOVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tScaleIO (a persistent volume backed by a block device in ScaleIO)\n"+ " Gateway:\t%v\n"+ " System:\t%v\n"+ @@ -996,7 +1068,7 @@ func printScaleIOVolumeSource(sio *api.ScaleIOVolumeSource, w PrefixWriter) { sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, sio.FSType, sio.ReadOnly) } -func printScaleIOPersistentVolumeSource(sio *api.ScaleIOPersistentVolumeSource, w PrefixWriter) { +func printScaleIOPersistentVolumeSource(sio *corev1.ScaleIOPersistentVolumeSource, w PrefixWriter) { var secretNS, secretName string if sio.SecretRef != nil { secretName = sio.SecretRef.Name @@ -1016,13 +1088,13 @@ func printScaleIOPersistentVolumeSource(sio *api.ScaleIOPersistentVolumeSource, sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, secretName, secretNS, sio.FSType, sio.ReadOnly) } -func printLocalVolumeSource(ls *api.LocalVolumeSource, w PrefixWriter) { +func printLocalVolumeSource(ls *corev1.LocalVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tLocalVolume (a persistent volume backed by local storage on a node)\n"+ " Path:\t%v\n", ls.Path) } -func printCephFSVolumeSource(cephfs *api.CephFSVolumeSource, w PrefixWriter) { +func printCephFSVolumeSource(cephfs *corev1.CephFSVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tCephFS (a CephFS mount on the host that shares a pod's lifetime)\n"+ " Monitors:\t%v\n"+ " Path:\t%v\n"+ @@ -1033,7 +1105,7 @@ func printCephFSVolumeSource(cephfs *api.CephFSVolumeSource, w PrefixWriter) { cephfs.Monitors, cephfs.Path, cephfs.User, cephfs.SecretFile, cephfs.SecretRef, cephfs.ReadOnly) } -func printCephFSPersistentVolumeSource(cephfs *api.CephFSPersistentVolumeSource, w PrefixWriter) { +func printCephFSPersistentVolumeSource(cephfs *corev1.CephFSPersistentVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tCephFS (a CephFS mount on the host that shares a pod's lifetime)\n"+ " Monitors:\t%v\n"+ " Path:\t%v\n"+ @@ -1044,7 +1116,7 @@ func printCephFSPersistentVolumeSource(cephfs *api.CephFSPersistentVolumeSource, cephfs.Monitors, cephfs.Path, cephfs.User, cephfs.SecretFile, cephfs.SecretRef, cephfs.ReadOnly) } -func printStorageOSVolumeSource(storageos *api.StorageOSVolumeSource, w PrefixWriter) { +func printStorageOSVolumeSource(storageos *corev1.StorageOSVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tStorageOS (a StorageOS Persistent Disk resource)\n"+ " VolumeName:\t%v\n"+ " VolumeNamespace:\t%v\n"+ @@ -1053,7 +1125,7 @@ func printStorageOSVolumeSource(storageos *api.StorageOSVolumeSource, w PrefixWr storageos.VolumeName, storageos.VolumeNamespace, storageos.FSType, storageos.ReadOnly) } -func printStorageOSPersistentVolumeSource(storageos *api.StorageOSPersistentVolumeSource, w PrefixWriter) { +func printStorageOSPersistentVolumeSource(storageos *corev1.StorageOSPersistentVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tStorageOS (a StorageOS Persistent Disk resource)\n"+ " VolumeName:\t%v\n"+ " VolumeNamespace:\t%v\n"+ @@ -1062,7 +1134,7 @@ func printStorageOSPersistentVolumeSource(storageos *api.StorageOSPersistentVolu storageos.VolumeName, storageos.VolumeNamespace, storageos.FSType, storageos.ReadOnly) } -func printFCVolumeSource(fc *api.FCVolumeSource, w PrefixWriter) { +func printFCVolumeSource(fc *corev1.FCVolumeSource, w PrefixWriter) { lun := "" if fc.Lun != nil { lun = strconv.Itoa(int(*fc.Lun)) @@ -1075,7 +1147,7 @@ func printFCVolumeSource(fc *api.FCVolumeSource, w PrefixWriter) { strings.Join(fc.TargetWWNs, ", "), lun, fc.FSType, fc.ReadOnly) } -func printAzureFileVolumeSource(azureFile *api.AzureFileVolumeSource, w PrefixWriter) { +func printAzureFileVolumeSource(azureFile *corev1.AzureFileVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tAzureFile (an Azure File Service mount on the host and bind mount to the pod)\n"+ " SecretName:\t%v\n"+ " ShareName:\t%v\n"+ @@ -1083,7 +1155,7 @@ func printAzureFileVolumeSource(azureFile *api.AzureFileVolumeSource, w PrefixWr azureFile.SecretName, azureFile.ShareName, azureFile.ReadOnly) } -func printAzureFilePersistentVolumeSource(azureFile *api.AzureFilePersistentVolumeSource, w PrefixWriter) { +func printAzureFilePersistentVolumeSource(azureFile *corev1.AzureFilePersistentVolumeSource, w PrefixWriter) { ns := "" if azureFile.SecretNamespace != nil { ns = *azureFile.SecretNamespace @@ -1096,7 +1168,7 @@ func printAzureFilePersistentVolumeSource(azureFile *api.AzureFilePersistentVolu azureFile.SecretName, ns, azureFile.ShareName, azureFile.ReadOnly) } -func printFlexPersistentVolumeSource(flex *api.FlexPersistentVolumeSource, w PrefixWriter) { +func printFlexPersistentVolumeSource(flex *corev1.FlexPersistentVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+ " Driver:\t%v\n"+ " FSType:\t%v\n"+ @@ -1106,7 +1178,7 @@ func printFlexPersistentVolumeSource(flex *api.FlexPersistentVolumeSource, w Pre flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options) } -func printFlexVolumeSource(flex *api.FlexVolumeSource, w PrefixWriter) { +func printFlexVolumeSource(flex *corev1.FlexVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+ " Driver:\t%v\n"+ " FSType:\t%v\n"+ @@ -1116,14 +1188,14 @@ func printFlexVolumeSource(flex *api.FlexVolumeSource, w PrefixWriter) { flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options) } -func printFlockerVolumeSource(flocker *api.FlockerVolumeSource, w PrefixWriter) { +func printFlockerVolumeSource(flocker *corev1.FlockerVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tFlocker (a Flocker volume mounted by the Flocker agent)\n"+ " DatasetName:\t%v\n"+ " DatasetUUID:\t%v\n", flocker.DatasetName, flocker.DatasetUUID) } -func printCSIPersistentVolumeSource(csi *api.CSIPersistentVolumeSource, w PrefixWriter) { +func printCSIPersistentVolumeSource(csi *corev1.CSIPersistentVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tCSI (a Container Storage Interface (CSI) volume source)\n"+ " Driver:\t%v\n"+ " VolumeHandle:\t%v\n"+ @@ -1177,7 +1249,7 @@ type PersistentVolumeDescriber struct { clientset.Interface } -func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Core().PersistentVolumes() pv, err := c.Get(name, metav1.GetOptions{}) @@ -1185,15 +1257,15 @@ func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSe return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, pv) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, pv) } return describePersistentVolume(pv, events) } -func printVolumeNodeAffinity(w PrefixWriter, affinity *api.VolumeNodeAffinity) { +func printVolumeNodeAffinity(w PrefixWriter, affinity *corev1.VolumeNodeAffinity) { w.Write(LEVEL_0, "Node Affinity:\t") if affinity == nil || affinity.Required == nil { w.WriteLine("") @@ -1215,7 +1287,7 @@ func printVolumeNodeAffinity(w PrefixWriter, affinity *api.VolumeNodeAffinity) { } // printLabelsMultiline prints multiple labels with a user-defined alignment. -func printNodeSelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []api.NodeSelectorRequirement) { +func printNodeSelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []corev1.NodeSelectorRequirement) { w.Write(indentLevel, "%s:%s", title, innerIndent) if len(reqs) == 0 { @@ -1235,14 +1307,14 @@ func printNodeSelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, } } -func describePersistentVolume(pv *api.PersistentVolume, events *api.EventList) (string, error) { +func describePersistentVolume(pv *corev1.PersistentVolume, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", pv.Name) printLabelsMultiline(w, "Labels", pv.ObjectMeta.Labels) printAnnotationsMultiline(w, "Annotations", pv.ObjectMeta.Annotations) w.Write(LEVEL_0, "Finalizers:\t%v\n", pv.ObjectMeta.Finalizers) - w.Write(LEVEL_0, "StorageClass:\t%s\n", helper.GetPersistentVolumeClass(pv)) + w.Write(LEVEL_0, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClass(pv)) if pv.ObjectMeta.DeletionTimestamp != nil { w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampUntil(*pv.ObjectMeta.DeletionTimestamp)) } else { @@ -1254,11 +1326,11 @@ func describePersistentVolume(pv *api.PersistentVolume, events *api.EventList) ( w.Write(LEVEL_0, "Claim:\t%s\n", "") } w.Write(LEVEL_0, "Reclaim Policy:\t%v\n", pv.Spec.PersistentVolumeReclaimPolicy) - w.Write(LEVEL_0, "Access Modes:\t%s\n", helper.GetAccessModesAsString(pv.Spec.AccessModes)) + w.Write(LEVEL_0, "Access Modes:\t%s\n", storageutil.GetAccessModesAsString(pv.Spec.AccessModes)) if pv.Spec.VolumeMode != nil { w.Write(LEVEL_0, "VolumeMode:\t%v\n", *pv.Spec.VolumeMode) } - storage := pv.Spec.Capacity[api.ResourceStorage] + storage := pv.Spec.Capacity[corev1.ResourceStorage] w.Write(LEVEL_0, "Capacity:\t%s\n", storage.String()) printVolumeNodeAffinity(w, pv.Spec.NodeAffinity) w.Write(LEVEL_0, "Message:\t%s\n", pv.Status.Message) @@ -1276,7 +1348,7 @@ func describePersistentVolume(pv *api.PersistentVolume, events *api.EventList) ( case pv.Spec.ISCSI != nil: printISCSIPersistentVolumeSource(pv.Spec.ISCSI, w) case pv.Spec.Glusterfs != nil: - printGlusterfsVolumeSource(pv.Spec.Glusterfs, w) + printGlusterfsPersistentVolumeSource(pv.Spec.Glusterfs, w) case pv.Spec.RBD != nil: printRBDPersistentVolumeSource(pv.Spec.RBD, w) case pv.Spec.Quobyte != nil: @@ -1325,7 +1397,7 @@ type PersistentVolumeClaimDescriber struct { clientset.Interface } -func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Core().PersistentVolumeClaims(namespace) pvc, err := c.Get(name, metav1.GetOptions{}) @@ -1340,18 +1412,18 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, descri return "", err } - events, _ := d.Core().Events(namespace).Search(legacyscheme.Scheme, pvc) + events, _ := d.Core().Events(namespace).Search(scheme.Scheme, pvc) return describePersistentVolumeClaim(pvc, events, mountPods) } -func getMountPods(c coreclient.PodInterface, pvcName string) ([]api.Pod, error) { +func getMountPods(c corev1client.PodInterface, pvcName string) ([]corev1.Pod, error) { nsPods, err := c.List(metav1.ListOptions{}) if err != nil { - return []api.Pod{}, err + return []corev1.Pod{}, err } - var pods []api.Pod + var pods []corev1.Pod for _, pod := range nsPods.Items { pvcs := getPvcs(pod.Spec.Volumes) @@ -1366,8 +1438,8 @@ func getMountPods(c coreclient.PodInterface, pvcName string) ([]api.Pod, error) return pods, nil } -func getPvcs(volumes []api.Volume) []api.Volume { - var pvcs []api.Volume +func getPvcs(volumes []corev1.Volume) []corev1.Volume { + var pvcs []corev1.Volume for _, volume := range volumes { if volume.VolumeSource.PersistentVolumeClaim != nil { @@ -1378,12 +1450,12 @@ func getPvcs(volumes []api.Volume) []api.Volume { return pvcs } -func describePersistentVolumeClaim(pvc *api.PersistentVolumeClaim, events *api.EventList, mountPods []api.Pod) (string, error) { +func describePersistentVolumeClaim(pvc *corev1.PersistentVolumeClaim, events *corev1.EventList, mountPods []corev1.Pod) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", pvc.Name) w.Write(LEVEL_0, "Namespace:\t%s\n", pvc.Namespace) - w.Write(LEVEL_0, "StorageClass:\t%s\n", helper.GetPersistentVolumeClaimClass(pvc)) + w.Write(LEVEL_0, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClaimClass(pvc)) if pvc.ObjectMeta.DeletionTimestamp != nil { w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampUntil(*pvc.ObjectMeta.DeletionTimestamp)) } else { @@ -1393,12 +1465,12 @@ func describePersistentVolumeClaim(pvc *api.PersistentVolumeClaim, events *api.E printLabelsMultiline(w, "Labels", pvc.Labels) printAnnotationsMultiline(w, "Annotations", pvc.Annotations) w.Write(LEVEL_0, "Finalizers:\t%v\n", pvc.ObjectMeta.Finalizers) - storage := pvc.Spec.Resources.Requests[api.ResourceStorage] + storage := pvc.Spec.Resources.Requests[corev1.ResourceStorage] capacity := "" accessModes := "" if pvc.Spec.VolumeName != "" { - accessModes = helper.GetAccessModesAsString(pvc.Status.AccessModes) - storage = pvc.Status.Capacity[api.ResourceStorage] + accessModes = storageutil.GetAccessModesAsString(pvc.Status.AccessModes) + storage = pvc.Status.Capacity[corev1.ResourceStorage] capacity = storage.String() } w.Write(LEVEL_0, "Capacity:\t%s\n", capacity) @@ -1430,9 +1502,9 @@ func describePersistentVolumeClaim(pvc *api.PersistentVolumeClaim, events *api.E }) } -func describeContainers(label string, containers []api.Container, containerStatuses []api.ContainerStatus, +func describeContainers(label string, containers []corev1.Container, containerStatuses []corev1.ContainerStatus, resolverFn EnvVarResolverFunc, w PrefixWriter, space string) { - statuses := map[string]api.ContainerStatus{} + statuses := map[string]corev1.ContainerStatus{} for _, status := range containerStatuses { statuses[status.Name] = status } @@ -1456,7 +1528,7 @@ func describeContainers(label string, containers []api.Container, containerStatu } } -func describeContainersLabel(containers []api.Container, label, space string, w PrefixWriter) { +func describeContainersLabel(containers []corev1.Container, label, space string, w PrefixWriter) { none := "" if len(containers) == 0 { none = " " @@ -1464,7 +1536,7 @@ func describeContainersLabel(containers []api.Container, label, space string, w w.Write(LEVEL_0, "%s%s:%s\n", space, label, none) } -func describeContainerBasicInfo(container api.Container, status api.ContainerStatus, ok bool, space string, w PrefixWriter) { +func describeContainerBasicInfo(container corev1.Container, status corev1.ContainerStatus, ok bool, space string, w PrefixWriter) { nameIndent := "" if len(space) > 0 { nameIndent = " " @@ -1491,7 +1563,7 @@ func describeContainerBasicInfo(container api.Container, status api.ContainerSta } } -func describeContainerPorts(cPorts []api.ContainerPort) string { +func describeContainerPorts(cPorts []corev1.ContainerPort) string { ports := make([]string, 0, len(cPorts)) for _, cPort := range cPorts { ports = append(ports, fmt.Sprintf("%d/%s", cPort.ContainerPort, cPort.Protocol)) @@ -1499,7 +1571,7 @@ func describeContainerPorts(cPorts []api.ContainerPort) string { return strings.Join(ports, ", ") } -func describeContainerHostPorts(cPorts []api.ContainerPort) string { +func describeContainerHostPorts(cPorts []corev1.ContainerPort) string { ports := make([]string, 0, len(cPorts)) for _, cPort := range cPorts { ports = append(ports, fmt.Sprintf("%d/%s", cPort.HostPort, cPort.Protocol)) @@ -1507,7 +1579,7 @@ func describeContainerHostPorts(cPorts []api.ContainerPort) string { return strings.Join(ports, ", ") } -func describeContainerCommand(container api.Container, w PrefixWriter) { +func describeContainerCommand(container corev1.Container, w PrefixWriter) { if len(container.Command) > 0 { w.Write(LEVEL_2, "Command:\n") for _, c := range container.Command { @@ -1526,7 +1598,7 @@ func describeContainerCommand(container api.Container, w PrefixWriter) { } } -func describeContainerResource(container api.Container, w PrefixWriter) { +func describeContainerResource(container corev1.Container, w PrefixWriter) { resources := container.Resources if len(resources.Limits) > 0 { w.Write(LEVEL_2, "Limits:\n") @@ -1545,7 +1617,7 @@ func describeContainerResource(container api.Container, w PrefixWriter) { } } -func describeContainerState(status api.ContainerStatus, w PrefixWriter) { +func describeContainerState(status corev1.ContainerStatus, w PrefixWriter) { describeStatus("State", status.State, w) if status.LastTerminationState.Terminated != nil { describeStatus("Last State", status.LastTerminationState, w) @@ -1554,7 +1626,7 @@ func describeContainerState(status api.ContainerStatus, w PrefixWriter) { w.Write(LEVEL_2, "Restart Count:\t%d\n", status.RestartCount) } -func describeContainerProbe(container api.Container, w PrefixWriter) { +func describeContainerProbe(container corev1.Container, w PrefixWriter) { if container.LivenessProbe != nil { probe := DescribeProbe(container.LivenessProbe) w.Write(LEVEL_2, "Liveness:\t%s\n", probe) @@ -1565,7 +1637,7 @@ func describeContainerProbe(container api.Container, w PrefixWriter) { } } -func describeContainerVolumes(container api.Container, w PrefixWriter) { +func describeContainerVolumes(container corev1.Container, w PrefixWriter) { // Show volumeMounts none := "" if len(container.VolumeMounts) == 0 { @@ -1595,7 +1667,7 @@ func describeContainerVolumes(container api.Container, w PrefixWriter) { } } -func describeContainerEnvVars(container api.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) { +func describeContainerEnvVars(container corev1.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) { none := "" if len(container.Env) == 0 { none = "\t" @@ -1641,7 +1713,7 @@ func describeContainerEnvVars(container api.Container, resolverFn EnvVarResolver } } -func describeContainerEnvFrom(container api.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) { +func describeContainerEnvFrom(container corev1.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) { none := "" if len(container.EnvFrom) == 0 { none = "\t" @@ -1670,7 +1742,7 @@ func describeContainerEnvFrom(container api.Container, resolverFn EnvVarResolver } // DescribeProbe is exported for consumers in other API groups that have probes -func DescribeProbe(probe *api.Probe) string { +func DescribeProbe(probe *corev1.Probe) string { attrs := fmt.Sprintf("delay=%ds timeout=%ds period=%ds #success=%d #failure=%d", probe.InitialDelaySeconds, probe.TimeoutSeconds, probe.PeriodSeconds, probe.SuccessThreshold, probe.FailureThreshold) switch { case probe.Exec != nil: @@ -1691,17 +1763,17 @@ func DescribeProbe(probe *api.Probe) string { return fmt.Sprintf("unknown %s", attrs) } -type EnvVarResolverFunc func(e api.EnvVar) string +type EnvVarResolverFunc func(e corev1.EnvVar) string // EnvValueFrom is exported for use by describers in other packages -func EnvValueRetriever(pod *api.Pod) EnvVarResolverFunc { - return func(e api.EnvVar) string { +func EnvValueRetriever(pod *corev1.Pod) EnvVarResolverFunc { + return func(e corev1.EnvVar) string { gv, err := schema.ParseGroupVersion(e.ValueFrom.FieldRef.APIVersion) if err != nil { return "" } gvk := gv.WithKind("Pod") - internalFieldPath, _, err := legacyscheme.Scheme.ConvertFieldLabel(gvk, e.ValueFrom.FieldRef.FieldPath, "") + internalFieldPath, _, err := scheme.Scheme.ConvertFieldLabel(gvk, e.ValueFrom.FieldRef.FieldPath, "") if err != nil { return "" // pod validation should catch this on create } @@ -1715,7 +1787,7 @@ func EnvValueRetriever(pod *api.Pod) EnvVarResolverFunc { } } -func describeStatus(stateName string, state api.ContainerState, w PrefixWriter) { +func describeStatus(stateName string, state corev1.ContainerState, w PrefixWriter) { switch { case state.Running != nil: w.Write(LEVEL_2, "%s:\tRunning\n", stateName) @@ -1744,7 +1816,7 @@ func describeStatus(stateName string, state api.ContainerState, w PrefixWriter) } } -func describeVolumeClaimTemplates(templates []api.PersistentVolumeClaim, w PrefixWriter) { +func describeVolumeClaimTemplates(templates []corev1.PersistentVolumeClaim, w PrefixWriter) { if len(templates) == 0 { w.Write(LEVEL_0, "Volume Claims:\t\n") return @@ -1752,10 +1824,10 @@ func describeVolumeClaimTemplates(templates []api.PersistentVolumeClaim, w Prefi w.Write(LEVEL_0, "Volume Claims:\n") for _, pvc := range templates { w.Write(LEVEL_1, "Name:\t%s\n", pvc.Name) - w.Write(LEVEL_1, "StorageClass:\t%s\n", helper.GetPersistentVolumeClaimClass(&pvc)) + w.Write(LEVEL_1, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClaimClass(&pvc)) printLabelsMultilineWithIndent(w, " ", "Labels", "\t", pvc.Labels, sets.NewString()) printLabelsMultilineWithIndent(w, " ", "Annotations", "\t", pvc.Annotations, sets.NewString()) - if capacity, ok := pvc.Spec.Resources.Requests[api.ResourceStorage]; ok { + if capacity, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; ok { w.Write(LEVEL_1, "Capacity:\t%s\n", capacity.String()) } else { w.Write(LEVEL_1, "Capacity:\t%s\n", "") @@ -1786,7 +1858,7 @@ type ReplicationControllerDescriber struct { clientset.Interface } -func (d *ReplicationControllerDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *ReplicationControllerDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { rc := d.Core().ReplicationControllers(namespace) pc := d.Core().Pods(namespace) @@ -1800,15 +1872,15 @@ func (d *ReplicationControllerDescriber) Describe(namespace, name string, descri return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, controller) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, controller) } return describeReplicationController(controller, events, running, waiting, succeeded, failed) } -func describeReplicationController(controller *api.ReplicationController, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { +func describeReplicationController(controller *corev1.ReplicationController, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", controller.Name) @@ -1816,7 +1888,7 @@ func describeReplicationController(controller *api.ReplicationController, events w.Write(LEVEL_0, "Selector:\t%s\n", labels.FormatLabels(controller.Spec.Selector)) printLabelsMultiline(w, "Labels", controller.Labels) printAnnotationsMultiline(w, "Annotations", controller.Annotations) - w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", controller.Status.Replicas, controller.Spec.Replicas) + w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", controller.Status.Replicas, *controller.Spec.Replicas) w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) DescribePodTemplate(controller.Spec.Template, w) if len(controller.Status.Conditions) > 0 { @@ -1833,7 +1905,7 @@ func describeReplicationController(controller *api.ReplicationController, events }) } -func DescribePodTemplate(template *api.PodTemplateSpec, w PrefixWriter) { +func DescribePodTemplate(template *corev1.PodTemplateSpec, w PrefixWriter) { w.Write(LEVEL_0, "Pod Template:\n") if template == nil { w.Write(LEVEL_1, "") @@ -1858,8 +1930,8 @@ type ReplicaSetDescriber struct { clientset.Interface } -func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { - rsc := d.Extensions().ReplicaSets(namespace) +func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { + rsc := d.Apps().ReplicaSets(namespace) pc := d.Core().Pods(namespace) rs, err := rsc.Get(name, metav1.GetOptions{}) @@ -1874,15 +1946,15 @@ func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings running, waiting, succeeded, failed, getPodErr := getPodStatusForController(pc, selector, rs.UID) - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, rs) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, rs) } return describeReplicaSet(rs, events, running, waiting, succeeded, failed, getPodErr) } -func describeReplicaSet(rs *extensions.ReplicaSet, events *api.EventList, running, waiting, succeeded, failed int, getPodErr error) (string, error) { +func describeReplicaSet(rs *appsv1.ReplicaSet, events *corev1.EventList, running, waiting, succeeded, failed int, getPodErr error) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", rs.Name) @@ -1893,7 +1965,7 @@ func describeReplicaSet(rs *extensions.ReplicaSet, events *api.EventList, runnin if controlledBy := printController(rs); len(controlledBy) > 0 { w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy) } - w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", rs.Status.Replicas, rs.Spec.Replicas) + w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", rs.Status.Replicas, *rs.Spec.Replicas) w.Write(LEVEL_0, "Pods Status:\t") if getPodErr != nil { w.Write(LEVEL_0, "error in fetching pods: %s\n", getPodErr) @@ -1920,21 +1992,21 @@ type JobDescriber struct { clientset.Interface } -func (d *JobDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *JobDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { job, err := d.Batch().Jobs(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, job) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, job) } return describeJob(job, events) } -func describeJob(job *batch.Job, events *api.EventList) (string, error) { +func describeJob(job *batchv1.Job, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", job.Name) @@ -1975,30 +2047,23 @@ func describeJob(job *batch.Job, events *api.EventList) (string, error) { // CronJobDescriber generates information about a cron job and the jobs it has created. type CronJobDescriber struct { - clientset.Interface - external externalclient.Interface + client clientset.Interface } -func (d *CronJobDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { - cronJob, err := d.external.BatchV1beta1().CronJobs(namespace).Get(name, metav1.GetOptions{}) +func (d *CronJobDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { + cronJob, err := d.client.BatchV1beta1().CronJobs(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, cronJob) - } - - internalCronJob := &batch.CronJob{} - if err := legacyscheme.Scheme.Convert(cronJob, internalCronJob, nil); err != nil { - return "", err + events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, cronJob) } - - return describeCronJob(internalCronJob, events) + return describeCronJob(cronJob, events) } -func describeCronJob(cronJob *batch.CronJob, events *api.EventList) (string, error) { +func describeCronJob(cronJob *batchv1beta1.CronJob, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", cronJob.Name) @@ -2027,7 +2092,7 @@ func describeCronJob(cronJob *batch.CronJob, events *api.EventList) (string, err }) } -func describeJobTemplate(jobTemplate batch.JobTemplateSpec, w PrefixWriter) { +func describeJobTemplate(jobTemplate batchv1beta1.JobTemplateSpec, w PrefixWriter) { if jobTemplate.Spec.Selector != nil { selector, _ := metav1.LabelSelectorAsSelector(jobTemplate.Spec.Selector) w.Write(LEVEL_0, "Selector:\t%s\n", selector) @@ -2050,7 +2115,7 @@ func describeJobTemplate(jobTemplate batch.JobTemplateSpec, w PrefixWriter) { DescribePodTemplate(&jobTemplate.Spec.Template, w) } -func printActiveJobs(w PrefixWriter, title string, jobs []api.ObjectReference) { +func printActiveJobs(w PrefixWriter, title string, jobs []corev1.ObjectReference) { w.Write(LEVEL_0, "%s:\t", title) if len(jobs) == 0 { w.WriteLine("") @@ -2071,8 +2136,8 @@ type DaemonSetDescriber struct { clientset.Interface } -func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { - dc := d.Extensions().DaemonSets(namespace) +func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { + dc := d.Apps().DaemonSets(namespace) pc := d.Core().Pods(namespace) daemon, err := dc.Get(name, metav1.GetOptions{}) @@ -2089,15 +2154,15 @@ func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, daemon) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, daemon) } return describeDaemonSet(daemon, events, running, waiting, succeeded, failed) } -func describeDaemonSet(daemon *extensions.DaemonSet, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { +func describeDaemonSet(daemon *appsv1.DaemonSet, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", daemon.Name) @@ -2129,7 +2194,7 @@ type SecretDescriber struct { clientset.Interface } -func (d *SecretDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *SecretDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Core().Secrets(namespace) secret, err := c.Get(name, metav1.GetOptions{}) @@ -2140,13 +2205,13 @@ func (d *SecretDescriber) Describe(namespace, name string, describerSettings pri return describeSecret(secret) } -func describeSecret(secret *api.Secret) (string, error) { +func describeSecret(secret *corev1.Secret) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", secret.Name) w.Write(LEVEL_0, "Namespace:\t%s\n", secret.Namespace) printLabelsMultiline(w, "Labels", secret.Labels) - skipAnnotations := sets.NewString(api.LastAppliedConfigAnnotation) + skipAnnotations := sets.NewString(corev1.LastAppliedConfigAnnotation) printAnnotationsMultilineWithFilter(w, "Annotations", secret.Annotations, skipAnnotations) w.Write(LEVEL_0, "\nType:\t%s\n", secret.Type) @@ -2154,7 +2219,7 @@ func describeSecret(secret *api.Secret) (string, error) { w.Write(LEVEL_0, "\nData\n====\n") for k, v := range secret.Data { switch { - case k == api.ServiceAccountTokenKey && secret.Type == api.SecretTypeServiceAccountToken: + case k == corev1.ServiceAccountTokenKey && secret.Type == corev1.SecretTypeServiceAccountToken: w.Write(LEVEL_0, "%s:\t%s\n", k, string(v)) default: w.Write(LEVEL_0, "%s:\t%d bytes\n", k, len(v)) @@ -2169,7 +2234,7 @@ type IngressDescriber struct { clientset.Interface } -func (i *IngressDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (i *IngressDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := i.Extensions().Ingresses(namespace) ing, err := c.Get(name, metav1.GetOptions{}) if err != nil { @@ -2178,7 +2243,7 @@ func (i *IngressDescriber) Describe(namespace, name string, describerSettings pr return i.describeIngress(ing, describerSettings) } -func (i *IngressDescriber) describeBackend(ns string, backend *extensions.IngressBackend) string { +func (i *IngressDescriber) describeBackend(ns string, backend *extensionsv1beta1.IngressBackend) string { endpoints, _ := i.Core().Endpoints(ns).Get(backend.ServiceName, metav1.GetOptions{}) service, _ := i.Core().Services(ns).Get(backend.ServiceName, metav1.GetOptions{}) spName := "" @@ -2198,7 +2263,7 @@ func (i *IngressDescriber) describeBackend(ns string, backend *extensions.Ingres return formatEndpoints(endpoints, sets.NewString(spName)) } -func (i *IngressDescriber) describeIngress(ing *extensions.Ingress, describerSettings printers.DescriberSettings) (string, error) { +func (i *IngressDescriber) describeIngress(ing *extensionsv1beta1.Ingress, describerSettings describe.DescriberSettings) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%v\n", ing.Name) @@ -2209,7 +2274,7 @@ func (i *IngressDescriber) describeIngress(ing *extensions.Ingress, describerSet if def == nil { // Ingresses that don't specify a default backend inherit the // default backend in the kube-system namespace. - def = &extensions.IngressBackend{ + def = &extensionsv1beta1.IngressBackend{ ServiceName: "default-http-backend", ServicePort: intstr.IntOrString{Type: intstr.Int, IntVal: 80}, } @@ -2242,7 +2307,7 @@ func (i *IngressDescriber) describeIngress(ing *extensions.Ingress, describerSet describeIngressAnnotations(w, ing.Annotations) if describerSettings.ShowEvents { - events, _ := i.Core().Events(ing.Namespace).Search(legacyscheme.Scheme, ing) + events, _ := i.Core().Events(ing.Namespace).Search(scheme.Scheme, ing) if events != nil { DescribeEvents(events, w) } @@ -2251,7 +2316,7 @@ func (i *IngressDescriber) describeIngress(ing *extensions.Ingress, describerSet }) } -func describeIngressTLS(w PrefixWriter, ingTLS []extensions.IngressTLS) { +func describeIngressTLS(w PrefixWriter, ingTLS []extensionsv1beta1.IngressTLS) { w.Write(LEVEL_0, "TLS:\n") for _, t := range ingTLS { if t.SecretName == "" { @@ -2277,7 +2342,7 @@ type ServiceDescriber struct { clientset.Interface } -func (d *ServiceDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *ServiceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Core().Services(namespace) service, err := c.Get(name, metav1.GetOptions{}) @@ -2286,14 +2351,14 @@ func (d *ServiceDescriber) Describe(namespace, name string, describerSettings pr } endpoints, _ := d.Core().Endpoints(namespace).Get(name, metav1.GetOptions{}) - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, service) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, service) } return describeService(service, endpoints, events) } -func buildIngressString(ingress []api.LoadBalancerIngress) string { +func buildIngressString(ingress []corev1.LoadBalancerIngress) string { var buffer bytes.Buffer for i := range ingress { @@ -2309,9 +2374,9 @@ func buildIngressString(ingress []api.LoadBalancerIngress) string { return buffer.String() } -func describeService(service *api.Service, endpoints *api.Endpoints, events *api.EventList) (string, error) { +func describeService(service *corev1.Service, endpoints *corev1.Endpoints, events *corev1.EventList) (string, error) { if endpoints == nil { - endpoints = &api.Endpoints{} + endpoints = &corev1.Endpoints{} } return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) @@ -2375,7 +2440,7 @@ type EndpointsDescriber struct { clientset.Interface } -func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Core().Endpoints(namespace) ep, err := c.Get(name, metav1.GetOptions{}) @@ -2383,15 +2448,15 @@ func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, ep) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, ep) } return describeEndpoints(ep, events) } -func describeEndpoints(ep *api.Endpoints, events *api.EventList) (string, error) { +func describeEndpoints(ep *corev1.Endpoints, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", ep.Name) @@ -2450,7 +2515,7 @@ type ServiceAccountDescriber struct { clientset.Interface } -func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Core().ServiceAccounts(namespace) serviceAccount, err := c.Get(name, metav1.GetOptions{}) @@ -2458,7 +2523,7 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett return "", err } - tokens := []api.Secret{} + tokens := []corev1.Secret{} // missingSecrets is the set of all secrets present in the // serviceAccount but not present in the set of existing secrets. @@ -2473,9 +2538,9 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett existingSecrets := sets.NewString() for _, s := range secrets.Items { - if s.Type == api.SecretTypeServiceAccountToken { - name, _ := s.Annotations[api.ServiceAccountNameKey] - uid, _ := s.Annotations[api.ServiceAccountUIDKey] + if s.Type == corev1.SecretTypeServiceAccountToken { + name, _ := s.Annotations[corev1.ServiceAccountNameKey] + uid, _ := s.Annotations[corev1.ServiceAccountUIDKey] if name == serviceAccount.Name && uid == string(serviceAccount.UID) { tokens = append(tokens, s) } @@ -2495,15 +2560,15 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett } } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, serviceAccount) + events, _ = d.Core().Events(namespace).Search(scheme.Scheme, serviceAccount) } return describeServiceAccount(serviceAccount, tokens, missingSecrets, events) } -func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Secret, missingSecrets sets.String, events *api.EventList) (string, error) { +func describeServiceAccount(serviceAccount *corev1.ServiceAccount, tokens []corev1.Secret, missingSecrets sets.String, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", serviceAccount.Name) @@ -2564,10 +2629,10 @@ func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Sec // RoleDescriber generates information about a node. type RoleDescriber struct { - externalclient.Interface + clientset.Interface } -func (d *RoleDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *RoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { role, err := d.Rbac().Roles(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -2575,14 +2640,14 @@ func (d *RoleDescriber) Describe(namespace, name string, describerSettings print breakdownRules := []rbacv1.PolicyRule{} for _, rule := range role.Rules { - breakdownRules = append(breakdownRules, validation.BreakdownRule(rule)...) + breakdownRules = append(breakdownRules, rbac.BreakdownRule(rule)...) } - compactRules, err := validation.CompactRules(breakdownRules) + compactRules, err := rbac.CompactRules(breakdownRules) if err != nil { return "", err } - sort.Stable(rbacv1helpers.SortableRuleSlice(compactRules)) + sort.Stable(rbac.SortableRuleSlice(compactRules)) return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) @@ -2603,10 +2668,10 @@ func (d *RoleDescriber) Describe(namespace, name string, describerSettings print // ClusterRoleDescriber generates information about a node. type ClusterRoleDescriber struct { - externalclient.Interface + clientset.Interface } -func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { role, err := d.Rbac().ClusterRoles().Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -2614,14 +2679,14 @@ func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSetting breakdownRules := []rbacv1.PolicyRule{} for _, rule := range role.Rules { - breakdownRules = append(breakdownRules, validation.BreakdownRule(rule)...) + breakdownRules = append(breakdownRules, rbac.BreakdownRule(rule)...) } - compactRules, err := validation.CompactRules(breakdownRules) + compactRules, err := rbac.CompactRules(breakdownRules) if err != nil { return "", err } - sort.Stable(rbacv1helpers.SortableRuleSlice(compactRules)) + sort.Stable(rbac.SortableRuleSlice(compactRules)) return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) @@ -2659,10 +2724,10 @@ func combineResourceGroup(resource, group []string) string { // RoleBindingDescriber generates information about a node. type RoleBindingDescriber struct { - externalclient.Interface + clientset.Interface } -func (d *RoleBindingDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *RoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { binding, err := d.Rbac().RoleBindings(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -2691,10 +2756,10 @@ func (d *RoleBindingDescriber) Describe(namespace, name string, describerSetting // ClusterRoleBindingDescriber generates information about a node. type ClusterRoleBindingDescriber struct { - externalclient.Interface + clientset.Interface } -func (d *ClusterRoleBindingDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *ClusterRoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { binding, err := d.Rbac().ClusterRoleBindings().Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -2726,14 +2791,14 @@ type NodeDescriber struct { clientset.Interface } -func (d *NodeDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *NodeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { mc := d.Core().Nodes() node, err := mc.Get(name, metav1.GetOptions{}) if err != nil { return "", err } - fieldSelector, err := fields.ParseSelector("spec.nodeName=" + name + ",status.phase!=" + string(api.PodSucceeded) + ",status.phase!=" + string(api.PodFailed)) + fieldSelector, err := fields.ParseSelector("spec.nodeName=" + name + ",status.phase!=" + string(corev1.PodSucceeded) + ",status.phase!=" + string(corev1.PodFailed)) if err != nil { return "", err } @@ -2748,21 +2813,21 @@ func (d *NodeDescriber) Describe(namespace, name string, describerSettings print canViewPods = false } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - if ref, err := ref.GetReference(legacyscheme.Scheme, node); err != nil { - glog.Errorf("Unable to construct reference to '%#v': %v", node, err) + if ref, err := reference.GetReference(scheme.Scheme, node); err != nil { + klog.Errorf("Unable to construct reference to '%#v': %v", node, err) } else { // TODO: We haven't decided the namespace for Node object yet. ref.UID = types.UID(ref.Name) - events, _ = d.Core().Events("").Search(legacyscheme.Scheme, ref) + events, _ = d.Core().Events("").Search(scheme.Scheme, ref) } } return describeNode(node, nodeNonTerminatedPodsList, events, canViewPods) } -func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events *api.EventList, canViewPods bool) (string, error) { +func describeNode(node *corev1.Node, nodeNonTerminatedPodsList *corev1.PodList, events *corev1.EventList, canViewPods bool) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", node.Name) @@ -2795,8 +2860,8 @@ func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events w.Write(LEVEL_1, "%s:\t%s\n", address.Type, address.Address) } - printResourceList := func(resourceList api.ResourceList) { - resources := make([]api.ResourceName, 0, len(resourceList)) + printResourceList := func(resourceList corev1.ResourceList) { + resources := make([]corev1.ResourceName, 0, len(resourceList)) for resource := range resourceList { resources = append(resources, resource) } @@ -2850,7 +2915,7 @@ type StatefulSetDescriber struct { client clientset.Interface } -func (p *StatefulSetDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (p *StatefulSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { ps, err := p.client.Apps().StatefulSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -2867,15 +2932,15 @@ func (p *StatefulSetDescriber) Describe(namespace, name string, describerSetting return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = p.client.Core().Events(namespace).Search(legacyscheme.Scheme, ps) + events, _ = p.client.Core().Events(namespace).Search(scheme.Scheme, ps) } return describeStatefulSet(ps, selector, events, running, waiting, succeeded, failed) } -func describeStatefulSet(ps *apps.StatefulSet, selector labels.Selector, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { +func describeStatefulSet(ps *appsv1.StatefulSet, selector labels.Selector, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", ps.ObjectMeta.Name) @@ -2888,7 +2953,7 @@ func describeStatefulSet(ps *apps.StatefulSet, selector labels.Selector, events w.Write(LEVEL_0, "Update Strategy:\t%s\n", ps.Spec.UpdateStrategy.Type) if ps.Spec.UpdateStrategy.RollingUpdate != nil { ru := ps.Spec.UpdateStrategy.RollingUpdate - if ru.Partition != 0 { + if ru.Partition != nil { w.Write(LEVEL_1, "Partition:\t%d\n", ru.Partition) } } @@ -2908,13 +2973,13 @@ type CertificateSigningRequestDescriber struct { client clientset.Interface } -func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { csr, err := p.client.Certificates().CertificateSigningRequests().Get(name, metav1.GetOptions{}) if err != nil { return "", err } - cr, err := certificates.ParseCSR(csr) + cr, err := certificate.ParseCSR(csr) if err != nil { return "", fmt.Errorf("Error parsing CSR: %v", err) } @@ -2923,15 +2988,15 @@ func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, de return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = p.client.Core().Events(namespace).Search(legacyscheme.Scheme, csr) + events, _ = p.client.Core().Events(namespace).Search(scheme.Scheme, csr) } return describeCertificateSigningRequest(csr, cr, status, events) } -func describeCertificateSigningRequest(csr *certificates.CertificateSigningRequest, cr *x509.CertificateRequest, status string, events *api.EventList) (string, error) { +func describeCertificateSigningRequest(csr *certificatesv1beta1.CertificateSigningRequest, cr *x509.CertificateRequest, status string, events *corev1.EventList) (string, error) { printListHelper := func(w PrefixWriter, prefix, name string, values []string) { if len(values) == 0 { return @@ -2985,21 +3050,21 @@ type HorizontalPodAutoscalerDescriber struct { client clientset.Interface } -func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { - hpa, err := d.client.Autoscaling().HorizontalPodAutoscalers(namespace).Get(name, metav1.GetOptions{}) +func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { + hpa, err := d.client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = d.client.Core().Events(namespace).Search(legacyscheme.Scheme, hpa) + events, _ = d.client.Core().Events(namespace).Search(scheme.Scheme, hpa) } return describeHorizontalPodAutoscaler(hpa, events, d) } -func describeHorizontalPodAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler, events *api.EventList, d *HorizontalPodAutoscalerDescriber) (string, error) { +func describeHorizontalPodAutoscaler(hpa *autoscalingv2beta2.HorizontalPodAutoscaler, events *corev1.EventList, d *HorizontalPodAutoscalerDescriber) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", hpa.Name) @@ -3013,7 +3078,7 @@ func describeHorizontalPodAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler, e w.Write(LEVEL_0, "Metrics:\t( current / target )\n") for i, metric := range hpa.Spec.Metrics { switch metric.Type { - case autoscaling.ExternalMetricSourceType: + case autoscalingv2beta2.ExternalMetricSourceType: if metric.External.Target.AverageValue != nil { current := "" if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].External != nil && @@ -3029,19 +3094,19 @@ func describeHorizontalPodAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler, e w.Write(LEVEL_1, "%q (target value):\t%s / %s\n", metric.External.Metric.Name, current, metric.External.Target.Value.String()) } - case autoscaling.PodsMetricSourceType: + case autoscalingv2beta2.PodsMetricSourceType: current := "" if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Pods != nil { current = hpa.Status.CurrentMetrics[i].Pods.Current.AverageValue.String() } w.Write(LEVEL_1, "%q on pods:\t%s / %s\n", metric.Pods.Metric.Name, current, metric.Pods.Target.AverageValue.String()) - case autoscaling.ObjectMetricSourceType: + case autoscalingv2beta2.ObjectMetricSourceType: current := "" if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Object != nil { current = hpa.Status.CurrentMetrics[i].Object.Current.Value.String() } w.Write(LEVEL_1, "%q on %s/%s:\t%s / %s\n", metric.Object.Metric.Name, metric.Object.DescribedObject.Kind, metric.Object.DescribedObject.Name, current, metric.Object.Target.Value.String()) - case autoscaling.ResourceMetricSourceType: + case autoscalingv2beta2.ResourceMetricSourceType: w.Write(LEVEL_1, "resource %s on pods", string(metric.Resource.Name)) if metric.Resource.Target.AverageValue != nil { current := "" @@ -3091,10 +3156,10 @@ func describeHorizontalPodAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler, e }) } -func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node, w PrefixWriter) { +func describeNodeResource(nodeNonTerminatedPodsList *corev1.PodList, node *corev1.Node, w PrefixWriter) { w.Write(LEVEL_0, "Non-terminated Pods:\t(%d in total)\n", len(nodeNonTerminatedPodsList.Items)) - w.Write(LEVEL_1, "Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") - w.Write(LEVEL_1, "---------\t----\t\t------------\t----------\t---------------\t-------------\n") + w.Write(LEVEL_1, "Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\tAGE\n") + w.Write(LEVEL_1, "---------\t----\t\t------------\t----------\t---------------\t-------------\t---\n") allocatable := node.Status.Capacity if len(node.Status.Allocatable) > 0 { allocatable = node.Status.Allocatable @@ -3102,21 +3167,22 @@ func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node for _, pod := range nodeNonTerminatedPodsList.Items { req, limit := resourcehelper.PodRequestsAndLimits(&pod) - cpuReq, cpuLimit, memoryReq, memoryLimit := req[api.ResourceCPU], limit[api.ResourceCPU], req[api.ResourceMemory], limit[api.ResourceMemory] + cpuReq, cpuLimit, memoryReq, memoryLimit := req[corev1.ResourceCPU], limit[corev1.ResourceCPU], req[corev1.ResourceMemory], limit[corev1.ResourceMemory] fractionCpuReq := float64(cpuReq.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 fractionCpuLimit := float64(cpuLimit.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 fractionMemoryReq := float64(memoryReq.Value()) / float64(allocatable.Memory().Value()) * 100 fractionMemoryLimit := float64(memoryLimit.Value()) / float64(allocatable.Memory().Value()) * 100 - w.Write(LEVEL_1, "%s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", pod.Namespace, pod.Name, + w.Write(LEVEL_1, "%s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s\n", pod.Namespace, pod.Name, cpuReq.String(), int64(fractionCpuReq), cpuLimit.String(), int64(fractionCpuLimit), - memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit)) + memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit), translateTimestampSince(pod.CreationTimestamp)) } w.Write(LEVEL_0, "Allocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n") w.Write(LEVEL_1, "Resource\tRequests\tLimits\n") w.Write(LEVEL_1, "--------\t--------\t------\n") reqs, limits := getPodsTotalRequestsAndLimits(nodeNonTerminatedPodsList) - cpuReqs, cpuLimits, memoryReqs, memoryLimits := reqs[api.ResourceCPU], limits[api.ResourceCPU], reqs[api.ResourceMemory], limits[api.ResourceMemory] + cpuReqs, cpuLimits, memoryReqs, memoryLimits, ephemeralstorageReqs, ephemeralstorageLimits := + reqs[corev1.ResourceCPU], limits[corev1.ResourceCPU], reqs[corev1.ResourceMemory], limits[corev1.ResourceMemory], reqs[corev1.ResourceEphemeralStorage], limits[corev1.ResourceEphemeralStorage] fractionCpuReqs := float64(0) fractionCpuLimits := float64(0) if allocatable.Cpu().MilliValue() != 0 { @@ -3129,25 +3195,33 @@ func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node fractionMemoryReqs = float64(memoryReqs.Value()) / float64(allocatable.Memory().Value()) * 100 fractionMemoryLimits = float64(memoryLimits.Value()) / float64(allocatable.Memory().Value()) * 100 } + fractionEphemeralStorageReqs := float64(0) + fractionEphemeralStorageLimits := float64(0) + if allocatable.StorageEphemeral().Value() != 0 { + fractionEphemeralStorageReqs = float64(ephemeralstorageReqs.Value()) / float64(allocatable.StorageEphemeral().Value()) * 100 + fractionEphemeralStorageLimits = float64(ephemeralstorageLimits.Value()) / float64(allocatable.StorageEphemeral().Value()) * 100 + } w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", - api.ResourceCPU, cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits)) + corev1.ResourceCPU, cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits)) w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", - api.ResourceMemory, memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits)) + corev1.ResourceMemory, memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits)) + w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", + corev1.ResourceEphemeralStorage, ephemeralstorageReqs.String(), int64(fractionEphemeralStorageReqs), ephemeralstorageLimits.String(), int64(fractionEphemeralStorageLimits)) extResources := make([]string, 0, len(allocatable)) for resource := range allocatable { - if !helper.IsStandardContainerResourceName(string(resource)) && resource != api.ResourcePods { + if !resourcehelper.IsStandardContainerResourceName(string(resource)) && resource != corev1.ResourcePods { extResources = append(extResources, string(resource)) } } sort.Strings(extResources) for _, ext := range extResources { - extRequests, extLimits := reqs[api.ResourceName(ext)], limits[api.ResourceName(ext)] + extRequests, extLimits := reqs[corev1.ResourceName(ext)], limits[corev1.ResourceName(ext)] w.Write(LEVEL_1, "%s\t%s\t%s\n", ext, extRequests.String(), extLimits.String()) } } -func getPodsTotalRequestsAndLimits(podList *api.PodList) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity) { - reqs, limits = map[api.ResourceName]resource.Quantity{}, map[api.ResourceName]resource.Quantity{} +func getPodsTotalRequestsAndLimits(podList *corev1.PodList) (reqs map[corev1.ResourceName]resource.Quantity, limits map[corev1.ResourceName]resource.Quantity) { + reqs, limits = map[corev1.ResourceName]resource.Quantity{}, map[corev1.ResourceName]resource.Quantity{} for _, pod := range podList.Items { podReqs, podLimits := resourcehelper.PodRequestsAndLimits(&pod) for podReqName, podReqValue := range podReqs { @@ -3170,13 +3244,13 @@ func getPodsTotalRequestsAndLimits(podList *api.PodList) (reqs map[api.ResourceN return } -func DescribeEvents(el *api.EventList, w PrefixWriter) { +func DescribeEvents(el *corev1.EventList, w PrefixWriter) { if len(el.Items) == 0 { w.Write(LEVEL_0, "Events:\t\n") return } w.Flush() - sort.Sort(events.SortableEvents(el.Items)) + sort.Sort(event.SortableEvents(el.Items)) w.Write(LEVEL_0, "Events:\n Type\tReason\tAge\tFrom\tMessage\n") w.Write(LEVEL_1, "----\t------\t----\t----\t-------\n") for _, e := range el.Items { @@ -3198,12 +3272,11 @@ func DescribeEvents(el *api.EventList, w PrefixWriter) { // DeploymentDescriber generates information about a deployment. type DeploymentDescriber struct { - clientset.Interface - external externalclient.Interface + client clientset.Interface } -func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { - d, err := dd.external.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) +func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { + d, err := dd.client.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -3211,20 +3284,15 @@ func (dd *DeploymentDescriber) Describe(namespace, name string, describerSetting if err != nil { return "", err } - internalDeployment := &extensions.Deployment{} - if err := legacyscheme.Scheme.Convert(d, internalDeployment, extensions.SchemeGroupVersion); err != nil { - return "", err - } - - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = dd.Core().Events(namespace).Search(legacyscheme.Scheme, d) + events, _ = dd.client.CoreV1().Events(namespace).Search(scheme.Scheme, d) } - return describeDeployment(d, selector, internalDeployment, events, dd) + return describeDeployment(d, selector, d, events, dd) } -func describeDeployment(d *appsv1.Deployment, selector labels.Selector, internalDeployment *extensions.Deployment, events *api.EventList, dd *DeploymentDescriber) (string, error) { +func describeDeployment(d *appsv1.Deployment, selector labels.Selector, internalDeployment *appsv1.Deployment, events *corev1.EventList, dd *DeploymentDescriber) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", d.ObjectMeta.Name) @@ -3248,7 +3316,7 @@ func describeDeployment(d *appsv1.Deployment, selector labels.Selector, internal w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason) } } - oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd.external.AppsV1()) + oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd.client.AppsV1()) if err == nil { w.Write(LEVEL_0, "OldReplicaSets:\t%s\n", printReplicaSetsByLabels(oldRSs)) var newRSs []*appsv1.ReplicaSet @@ -3279,7 +3347,7 @@ func printReplicaSetsByLabels(matchingRSs []*appsv1.ReplicaSet) string { return list } -func getPodStatusForController(c coreclient.PodInterface, selector labels.Selector, uid types.UID) (running, waiting, succeeded, failed int, err error) { +func getPodStatusForController(c corev1client.PodInterface, selector labels.Selector, uid types.UID) (running, waiting, succeeded, failed int, err error) { options := metav1.ListOptions{LabelSelector: selector.String()} rcPods, err := c.List(options) if err != nil { @@ -3292,13 +3360,13 @@ func getPodStatusForController(c coreclient.PodInterface, selector labels.Select continue } switch pod.Status.Phase { - case api.PodRunning: + case corev1.PodRunning: running++ - case api.PodPending: + case corev1.PodPending: waiting++ - case api.PodSucceeded: + case corev1.PodSucceeded: succeeded++ - case api.PodFailed: + case corev1.PodFailed: failed++ } } @@ -3310,7 +3378,7 @@ type ConfigMapDescriber struct { clientset.Interface } -func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Core().ConfigMaps(namespace) configMap, err := c.Get(name, metav1.GetOptions{}) @@ -3331,7 +3399,7 @@ func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings w.Write(LEVEL_0, "%s\n", string(v)) } if describerSettings.ShowEvents { - events, err := d.Core().Events(namespace).Search(legacyscheme.Scheme, configMap) + events, err := d.Core().Events(namespace).Search(scheme.Scheme, configMap) if err != nil { return err } @@ -3343,12 +3411,12 @@ func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings }) } -// NetworkPolicyDescriber generates information about a networking.NetworkPolicy +// NetworkPolicyDescriber generates information about a networkingv1.NetworkPolicy type NetworkPolicyDescriber struct { clientset.Interface } -func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { c := d.Networking().NetworkPolicies(namespace) networkPolicy, err := c.Get(name, metav1.GetOptions{}) @@ -3359,7 +3427,7 @@ func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSetti return describeNetworkPolicy(networkPolicy) } -func describeNetworkPolicy(networkPolicy *networking.NetworkPolicy) (string, error) { +func describeNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", networkPolicy.Name) @@ -3372,7 +3440,7 @@ func describeNetworkPolicy(networkPolicy *networking.NetworkPolicy) (string, err }) } -func describeNetworkPolicySpec(nps networking.NetworkPolicySpec, w PrefixWriter) { +func describeNetworkPolicySpec(nps networkingv1.NetworkPolicySpec, w PrefixWriter) { w.Write(LEVEL_0, "Spec:\n") w.Write(LEVEL_1, "PodSelector: ") if len(nps.PodSelector.MatchLabels) == 0 && len(nps.PodSelector.MatchExpressions) == 0 { @@ -3387,7 +3455,7 @@ func describeNetworkPolicySpec(nps networking.NetworkPolicySpec, w PrefixWriter) w.Write(LEVEL_1, "Policy Types: %v\n", policyTypesToString(nps.PolicyTypes)) } -func printNetworkPolicySpecIngressFrom(npirs []networking.NetworkPolicyIngressRule, initialIndent string, w PrefixWriter) { +func printNetworkPolicySpecIngressFrom(npirs []networkingv1.NetworkPolicyIngressRule, initialIndent string, w PrefixWriter) { if len(npirs) == 0 { w.Write(LEVEL_0, "%s%s\n", initialIndent, " (Selected pods are isolated for ingress connectivity)") return @@ -3397,11 +3465,11 @@ func printNetworkPolicySpecIngressFrom(npirs []networking.NetworkPolicyIngressRu w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: (traffic allowed to all ports)") } else { for _, port := range npir.Ports { - var proto api.Protocol + var proto corev1.Protocol if port.Protocol != nil { proto = *port.Protocol } else { - proto = api.ProtocolTCP + proto = corev1.ProtocolTCP } w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto) } @@ -3431,7 +3499,7 @@ func printNetworkPolicySpecIngressFrom(npirs []networking.NetworkPolicyIngressRu } } -func printNetworkPolicySpecEgressTo(npers []networking.NetworkPolicyEgressRule, initialIndent string, w PrefixWriter) { +func printNetworkPolicySpecEgressTo(npers []networkingv1.NetworkPolicyEgressRule, initialIndent string, w PrefixWriter) { if len(npers) == 0 { w.Write(LEVEL_0, "%s%s\n", initialIndent, " (Selected pods are isolated for egress connectivity)") return @@ -3441,11 +3509,11 @@ func printNetworkPolicySpecEgressTo(npers []networking.NetworkPolicyEgressRule, w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: (traffic allowed to all ports)") } else { for _, port := range nper.Ports { - var proto api.Protocol + var proto corev1.Protocol if port.Protocol != nil { proto = *port.Protocol } else { - proto = api.ProtocolTCP + proto = corev1.ProtocolTCP } w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto) } @@ -3479,21 +3547,21 @@ type StorageClassDescriber struct { clientset.Interface } -func (s *StorageClassDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (s *StorageClassDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { sc, err := s.Storage().StorageClasses().Get(name, metav1.GetOptions{}) if err != nil { return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = s.Core().Events(namespace).Search(legacyscheme.Scheme, sc) + events, _ = s.Core().Events(namespace).Search(scheme.Scheme, sc) } return describeStorageClass(sc, events) } -func describeStorageClass(sc *storage.StorageClass, events *api.EventList) (string, error) { +func describeStorageClass(sc *storagev1.StorageClass, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", sc.Name) @@ -3527,7 +3595,7 @@ func describeStorageClass(sc *storage.StorageClass, events *api.EventList) (stri }) } -func printAllowedTopologies(w PrefixWriter, topologies []api.TopologySelectorTerm) { +func printAllowedTopologies(w PrefixWriter, topologies []corev1.TopologySelectorTerm) { w.Write(LEVEL_0, "AllowedTopologies:\t") if len(topologies) == 0 { w.WriteLine("") @@ -3539,7 +3607,7 @@ func printAllowedTopologies(w PrefixWriter, topologies []api.TopologySelectorTer } } -func printTopologySelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []api.TopologySelectorLabelRequirement) { +func printTopologySelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []corev1.TopologySelectorLabelRequirement) { w.Write(indentLevel, "%s:%s", title, innerIndent) if len(reqs) == 0 { @@ -3563,21 +3631,21 @@ type PodDisruptionBudgetDescriber struct { clientset.Interface } -func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { pdb, err := p.Policy().PodDisruptionBudgets(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = p.Core().Events(namespace).Search(legacyscheme.Scheme, pdb) + events, _ = p.Core().Events(namespace).Search(scheme.Scheme, pdb) } return describePodDisruptionBudget(pdb, events) } -func describePodDisruptionBudget(pdb *policy.PodDisruptionBudget, events *api.EventList) (string, error) { +func describePodDisruptionBudget(pdb *policyv1beta1.PodDisruptionBudget, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", pdb.Name) @@ -3612,21 +3680,21 @@ type PriorityClassDescriber struct { clientset.Interface } -func (s *PriorityClassDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (s *PriorityClassDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { pc, err := s.Scheduling().PriorityClasses().Get(name, metav1.GetOptions{}) if err != nil { return "", err } - var events *api.EventList + var events *corev1.EventList if describerSettings.ShowEvents { - events, _ = s.Core().Events(namespace).Search(legacyscheme.Scheme, pc) + events, _ = s.Core().Events(namespace).Search(scheme.Scheme, pc) } return describePriorityClass(pc, events) } -func describePriorityClass(pc *scheduling.PriorityClass, events *api.EventList) (string, error) { +func describePriorityClass(pc *schedulingv1beta1.PriorityClass, events *corev1.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", pc.Name) @@ -3643,12 +3711,12 @@ func describePriorityClass(pc *scheduling.PriorityClass, events *api.EventList) }) } -// PodSecurityPolicyDescriber generates information about a PodSecurityPolicy. +// PodSecurityPolicyDescriber generates information about a PodSecuritypolicyv1beta1. type PodSecurityPolicyDescriber struct { clientset.Interface } -func (d *PodSecurityPolicyDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { +func (d *PodSecurityPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { psp, err := d.Policy().PodSecurityPolicies().Get(name, metav1.GetOptions{}) if err != nil { return "", err @@ -3657,7 +3725,7 @@ func (d *PodSecurityPolicyDescriber) Describe(namespace, name string, describerS return describePodSecurityPolicy(psp) } -func describePodSecurityPolicy(psp *policy.PodSecurityPolicy) (string, error) { +func describePodSecurityPolicy(psp *policyv1beta1.PodSecurityPolicy) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", psp.Name) @@ -3723,7 +3791,7 @@ func stringOrDefaultValue(s, defaultValue string) string { return defaultValue } -func fsTypeToString(volumes []policy.FSType) string { +func fsTypeToString(volumes []policyv1beta1.FSType) string { strVolumes := []string{} for _, v := range volumes { strVolumes = append(strVolumes, string(v)) @@ -3731,7 +3799,7 @@ func fsTypeToString(volumes []policy.FSType) string { return stringOrNone(strings.Join(strVolumes, ",")) } -func flexVolumesToString(flexVolumes []policy.AllowedFlexVolume) string { +func flexVolumesToString(flexVolumes []policyv1beta1.AllowedFlexVolume) string { volumes := []string{} for _, flexVolume := range flexVolumes { volumes = append(volumes, "driver="+flexVolume.Driver) @@ -3743,7 +3811,7 @@ func sysctlsToString(sysctls []string) string { return stringOrNone(strings.Join(sysctls, ",")) } -func hostPortRangeToString(ranges []policy.HostPortRange) string { +func hostPortRangeToString(ranges []policyv1beta1.HostPortRange) string { formattedString := "" if ranges != nil { strRanges := []string{} @@ -3755,7 +3823,7 @@ func hostPortRangeToString(ranges []policy.HostPortRange) string { return stringOrNone(formattedString) } -func idRangeToString(ranges []policy.IDRange) string { +func idRangeToString(ranges []policyv1beta1.IDRange) string { formattedString := "" if ranges != nil { strRanges := []string{} @@ -3767,7 +3835,7 @@ func idRangeToString(ranges []policy.IDRange) string { return stringOrNone(formattedString) } -func capsToString(caps []api.Capability) string { +func capsToString(caps []corev1.Capability) string { formattedString := "" if caps != nil { strCaps := []string{} @@ -3779,7 +3847,7 @@ func capsToString(caps []api.Capability) string { return stringOrNone(formattedString) } -func policyTypesToString(pts []networking.PolicyType) string { +func policyTypesToString(pts []networkingv1.PolicyType) string { formattedString := "" if pts != nil { strPts := []string{} @@ -3797,7 +3865,7 @@ func newErrNoDescriber(types ...reflect.Type) error { for _, t := range types { names = append(names, t.String()) } - return printers.ErrNoDescriber{Types: names} + return describe.ErrNoDescriber{Types: names} } // Describers implements ObjectDescriber against functions registered via Add. Those functions can @@ -3844,7 +3912,7 @@ func (d *Describers) DescribeObject(exact interface{}, extra ...interface{}) (st return "", newErrNoDescriber(append([]reflect.Type{exactType}, types...)...) } -// Add adds one or more describer functions to the printers.Describer. The passed function must +// Add adds one or more describer functions to the describe.Describer. The passed function must // match the signature: // // func(...) (string, error) @@ -3971,12 +4039,12 @@ func printLabelsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerI } // printTaintsMultiline prints multiple taints with a proper alignment. -func printNodeTaintsMultiline(w PrefixWriter, title string, taints []api.Taint) { +func printNodeTaintsMultiline(w PrefixWriter, title string, taints []corev1.Taint) { printTaintsMultilineWithIndent(w, "", title, "\t", taints) } // printTaintsMultilineWithIndent prints multiple taints with a user-defined alignment. -func printTaintsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, taints []api.Taint) { +func printTaintsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, taints []corev1.Taint) { w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent) if taints == nil || len(taints) == 0 { @@ -3986,7 +4054,7 @@ func printTaintsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerI // to print taints in the sorted order sort.Slice(taints, func(i, j int) bool { - cmpKey := func(taint api.Taint) string { + cmpKey := func(taint corev1.Taint) string { return string(taint.Effect) + "," + taint.Key } return cmpKey(taints[i]) < cmpKey(taints[j]) @@ -4002,12 +4070,12 @@ func printTaintsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerI } // printPodsMultiline prints multiple pods with a proper alignment. -func printPodsMultiline(w PrefixWriter, title string, pods []api.Pod) { +func printPodsMultiline(w PrefixWriter, title string, pods []corev1.Pod) { printPodsMultilineWithIndent(w, "", title, "\t", pods) } // printPodsMultilineWithIndent prints multiple pods with a user-defined alignment. -func printPodsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, pods []api.Pod) { +func printPodsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, pods []corev1.Pod) { w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent) if pods == nil || len(pods) == 0 { @@ -4017,7 +4085,7 @@ func printPodsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerInd // to print pods in the sorted order sort.Slice(pods, func(i, j int) bool { - cmpKey := func(pod api.Pod) string { + cmpKey := func(pod corev1.Pod) string { return pod.Name } return cmpKey(pods[i]) < cmpKey(pods[j]) @@ -4033,12 +4101,12 @@ func printPodsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerInd } // printPodTolerationsMultiline prints multiple tolerations with a proper alignment. -func printPodTolerationsMultiline(w PrefixWriter, title string, tolerations []api.Toleration) { +func printPodTolerationsMultiline(w PrefixWriter, title string, tolerations []corev1.Toleration) { printTolerationsMultilineWithIndent(w, "", title, "\t", tolerations) } // printTolerationsMultilineWithIndent prints multiple tolerations with a user-defined alignment. -func printTolerationsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, tolerations []api.Toleration) { +func printTolerationsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, tolerations []corev1.Toleration) { w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent) if tolerations == nil || len(tolerations) == 0 { @@ -4089,7 +4157,7 @@ func tabbedString(f func(io.Writer) error) (string, error) { return str, nil } -type SortableResourceNames []api.ResourceName +type SortableResourceNames []corev1.ResourceName func (list SortableResourceNames) Len() int { return len(list) @@ -4104,8 +4172,8 @@ func (list SortableResourceNames) Less(i, j int) bool { } // SortedResourceNames returns the sorted resource names of a resource list. -func SortedResourceNames(list api.ResourceList) []api.ResourceName { - resources := make([]api.ResourceName, 0, len(list)) +func SortedResourceNames(list corev1.ResourceList) []corev1.ResourceName { + resources := make([]corev1.ResourceName, 0, len(list)) for res := range list { resources = append(resources, res) } @@ -4113,7 +4181,7 @@ func SortedResourceNames(list api.ResourceList) []api.ResourceName { return resources } -type SortableResourceQuotas []api.ResourceQuota +type SortableResourceQuotas []corev1.ResourceQuota func (list SortableResourceQuotas) Len() int { return len(list) @@ -4127,7 +4195,7 @@ func (list SortableResourceQuotas) Less(i, j int) bool { return list[i].Name < list[j].Name } -type SortableVolumeMounts []api.VolumeMount +type SortableVolumeMounts []corev1.VolumeMount func (list SortableVolumeMounts) Len() int { return len(list) @@ -4141,7 +4209,7 @@ func (list SortableVolumeMounts) Less(i, j int) bool { return list[i].MountPath < list[j].MountPath } -type SortableVolumeDevices []api.VolumeDevice +type SortableVolumeDevices []corev1.VolumeDevice func (list SortableVolumeDevices) Len() int { return len(list) @@ -4215,3 +4283,156 @@ func shorten(s string, maxLength int) string { } return s } + +// translateTimestampUntil returns the elapsed time until timestamp in +// human-readable approximation. +func translateTimestampUntil(timestamp metav1.Time) string { + if timestamp.IsZero() { + return "" + } + + return duration.HumanDuration(time.Until(timestamp.Time)) +} + +// translateTimestampSince returns the elapsed time since timestamp in +// human-readable approximation. +func translateTimestampSince(timestamp metav1.Time) string { + if timestamp.IsZero() { + return "" + } + + return duration.HumanDuration(time.Since(timestamp.Time)) +} + +// formatEventSource formats EventSource as a comma separated string excluding Host when empty +func formatEventSource(es corev1.EventSource) string { + EventSourceString := []string{es.Component} + if len(es.Host) > 0 { + EventSourceString = append(EventSourceString, es.Host) + } + return strings.Join(EventSourceString, ", ") +} + +// Pass ports=nil for all ports. +func formatEndpoints(endpoints *corev1.Endpoints, ports sets.String) string { + if len(endpoints.Subsets) == 0 { + return "" + } + list := []string{} + max := 3 + more := false + count := 0 + for i := range endpoints.Subsets { + ss := &endpoints.Subsets[i] + if len(ss.Ports) == 0 { + // It's possible to have headless services with no ports. + for i := range ss.Addresses { + if len(list) == max { + more = true + } + if !more { + list = append(list, ss.Addresses[i].IP) + } + count++ + } + } else { + // "Normal" services with ports defined. + for i := range ss.Ports { + port := &ss.Ports[i] + if ports == nil || ports.Has(port.Name) { + for i := range ss.Addresses { + if len(list) == max { + more = true + } + addr := &ss.Addresses[i] + if !more { + hostPort := net.JoinHostPort(addr.IP, strconv.Itoa(int(port.Port))) + list = append(list, hostPort) + } + count++ + } + } + } + } + } + ret := strings.Join(list, ",") + if more { + return fmt.Sprintf("%s + %d more...", ret, count-max) + } + return ret +} + +func extractCSRStatus(csr *certificatesv1beta1.CertificateSigningRequest) (string, error) { + var approved, denied bool + for _, c := range csr.Status.Conditions { + switch c.Type { + case certificatesv1beta1.CertificateApproved: + approved = true + case certificatesv1beta1.CertificateDenied: + denied = true + default: + return "", fmt.Errorf("unknown csr condition %q", c) + } + } + var status string + // must be in order of presidence + if denied { + status += "Denied" + } else if approved { + status += "Approved" + } else { + status += "Pending" + } + if len(csr.Status.Certificate) > 0 { + status += ",Issued" + } + return status, nil +} + +// backendStringer behaves just like a string interface and converts the given backend to a string. +func backendStringer(backend *extensionsv1beta1.IngressBackend) string { + if backend == nil { + return "" + } + return fmt.Sprintf("%v:%v", backend.ServiceName, backend.ServicePort.String()) +} + +// findNodeRoles returns the roles of a given node. +// The roles are determined by looking for: +// * a node-role.kubernetes.io/="" label +// * a kubernetes.io/role="" label +func findNodeRoles(node *corev1.Node) []string { + roles := sets.NewString() + for k, v := range node.Labels { + switch { + case strings.HasPrefix(k, describe.LabelNodeRolePrefix): + if role := strings.TrimPrefix(k, describe.LabelNodeRolePrefix); len(role) > 0 { + roles.Insert(role) + } + + case k == describe.NodeLabelRole && v != "": + roles.Insert(v) + } + } + return roles.List() +} + +// loadBalancerStatusStringer behaves mostly like a string interface and converts the given status to a string. +// `wide` indicates whether the returned value is meant for --o=wide output. If not, it's clipped to 16 bytes. +func loadBalancerStatusStringer(s corev1.LoadBalancerStatus, wide bool) string { + ingress := s.Ingress + result := sets.NewString() + for i := range ingress { + if ingress[i].IP != "" { + result.Insert(ingress[i].IP) + } else if ingress[i].Hostname != "" { + result.Insert(ingress[i].Hostname) + } + } + + r := strings.Join(result.List(), ",") + if !wide && len(r) > describe.LoadBalancerWidth { + r = r[0:(describe.LoadBalancerWidth-3)] + "..." + } + return r +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/env_file.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/env_file.go deleted file mode 100644 index 8221d5936cd1..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/env_file.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "bufio" - "bytes" - "fmt" - "os" - "strings" - "unicode" - "unicode/utf8" - - "k8s.io/apimachinery/pkg/util/validation" -) - -var utf8bom = []byte{0xEF, 0xBB, 0xBF} - -// proccessEnvFileLine returns a blank key if the line is empty or a comment. -// The value will be retrieved from the environment if necessary. -func proccessEnvFileLine(line []byte, filePath string, - currentLine int) (key, value string, err error) { - - if !utf8.Valid(line) { - return ``, ``, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", - filePath, currentLine+1, line) - } - - // We trim UTF8 BOM from the first line of the file but no others - if currentLine == 0 { - line = bytes.TrimPrefix(line, utf8bom) - } - - // trim the line from all leading whitespace first - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - - // If the line is empty or a comment, we return a blank key/value pair. - if len(line) == 0 || line[0] == '#' { - return ``, ``, nil - } - - data := strings.SplitN(string(line), "=", 2) - key = data[0] - if errs := validation.IsEnvVarName(key); len(errs) != 0 { - return ``, ``, fmt.Errorf("%q is not a valid key name: %s", key, strings.Join(errs, ";")) - } - - if len(data) == 2 { - value = data[1] - } else { - // No value (no `=` in the line) is a signal to obtain the value - // from the environment. - value = os.Getenv(key) - } - return -} - -// addFromEnvFile processes an env file allows a generic addTo to handle the -// collection of key value pairs or returns an error. -func addFromEnvFile(filePath string, addTo func(key, value string) error) error { - f, err := os.Open(filePath) - if err != nil { - return err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - currentLine := 0 - for scanner.Scan() { - // Process the current line, retrieving a key/value pair if - // possible. - scannedBytes := scanner.Bytes() - key, value, err := proccessEnvFileLine(scannedBytes, filePath, currentLine) - if err != nil { - return err - } - currentLine++ - - if len(key) == 0 { - // no key means line was empty or a comment - continue - } - - if err = addTo(key, value); err != nil { - return err - } - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/generate.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/generate.go deleted file mode 100644 index 07d8bb893207..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/generate.go +++ /dev/null @@ -1,211 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "k8s.io/apimachinery/pkg/runtime" - utilerrors "k8s.io/apimachinery/pkg/util/errors" -) - -// GeneratorParam is a parameter for a generator -// TODO: facilitate structured json generator input schemes -type GeneratorParam struct { - Name string - Required bool -} - -// Generator is an interface for things that can generate API objects from input -// parameters. One example is the "expose" generator that is capable of exposing -// new replication controllers and services, among other things. -type Generator interface { - // Generate creates an API object given a set of parameters - Generate(params map[string]interface{}) (runtime.Object, error) - // ParamNames returns the list of parameters that this generator uses - ParamNames() []GeneratorParam -} - -// StructuredGenerator is an interface for things that can generate API objects not using parameter injection -type StructuredGenerator interface { - // StructuredGenerator creates an API object using pre-configured parameters - StructuredGenerate() (runtime.Object, error) -} - -func IsZero(i interface{}) bool { - if i == nil { - return true - } - return reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) -} - -// ValidateParams ensures that all required params are present in the params map -func ValidateParams(paramSpec []GeneratorParam, params map[string]interface{}) error { - allErrs := []error{} - for ix := range paramSpec { - if paramSpec[ix].Required { - value, found := params[paramSpec[ix].Name] - if !found || IsZero(value) { - allErrs = append(allErrs, fmt.Errorf("Parameter: %s is required", paramSpec[ix].Name)) - } - } - } - return utilerrors.NewAggregate(allErrs) -} - -// AnnotateFlags annotates all flags that are used by generators. -func AnnotateFlags(cmd *cobra.Command, generators map[string]Generator) { - // Iterate over all generators and mark any flags used by them. - for name, generator := range generators { - generatorParams := map[string]struct{}{} - for _, param := range generator.ParamNames() { - generatorParams[param.Name] = struct{}{} - } - - cmd.Flags().VisitAll(func(flag *pflag.Flag) { - if _, found := generatorParams[flag.Name]; !found { - // This flag is not used by the current generator - // so skip it. - return - } - if flag.Annotations == nil { - flag.Annotations = map[string][]string{} - } - if annotations := flag.Annotations["generator"]; annotations == nil { - flag.Annotations["generator"] = []string{} - } - flag.Annotations["generator"] = append(flag.Annotations["generator"], name) - }) - } -} - -// EnsureFlagsValid ensures that no invalid flags are being used against a generator. -func EnsureFlagsValid(cmd *cobra.Command, generators map[string]Generator, generatorInUse string) error { - AnnotateFlags(cmd, generators) - - allErrs := []error{} - cmd.Flags().VisitAll(func(flag *pflag.Flag) { - // If the flag hasn't changed, don't validate it. - if !flag.Changed { - return - } - // Look into the flag annotations for the generators that can use it. - if annotations := flag.Annotations["generator"]; len(annotations) > 0 { - annotationMap := map[string]struct{}{} - for _, ann := range annotations { - annotationMap[ann] = struct{}{} - } - // If the current generator is not annotated, then this flag shouldn't - // be used with it. - if _, found := annotationMap[generatorInUse]; !found { - allErrs = append(allErrs, fmt.Errorf("cannot use --%s with --generator=%s", flag.Name, generatorInUse)) - } - } - }) - return utilerrors.NewAggregate(allErrs) -} - -// MakeParams is a utility that creates generator parameters from a command line -func MakeParams(cmd *cobra.Command, params []GeneratorParam) map[string]interface{} { - result := map[string]interface{}{} - for ix := range params { - f := cmd.Flags().Lookup(params[ix].Name) - if f != nil { - result[params[ix].Name] = f.Value.String() - } - } - return result -} - -func MakeProtocols(protocols map[string]string) string { - out := []string{} - for key, value := range protocols { - out = append(out, fmt.Sprintf("%s/%s", key, value)) - } - return strings.Join(out, ",") -} - -func ParseProtocols(protocols interface{}) (map[string]string, error) { - protocolsString, isString := protocols.(string) - if !isString { - return nil, fmt.Errorf("expected string, found %v", protocols) - } - if len(protocolsString) == 0 { - return nil, fmt.Errorf("no protocols passed") - } - portProtocolMap := map[string]string{} - protocolsSlice := strings.Split(protocolsString, ",") - for ix := range protocolsSlice { - portProtocol := strings.Split(protocolsSlice[ix], "/") - if len(portProtocol) != 2 { - return nil, fmt.Errorf("unexpected port protocol mapping: %s", protocolsSlice[ix]) - } - if len(portProtocol[0]) == 0 { - return nil, fmt.Errorf("unexpected empty port") - } - if len(portProtocol[1]) == 0 { - return nil, fmt.Errorf("unexpected empty protocol") - } - portProtocolMap[portProtocol[0]] = portProtocol[1] - } - return portProtocolMap, nil -} - -func MakeLabels(labels map[string]string) string { - out := []string{} - for key, value := range labels { - out = append(out, fmt.Sprintf("%s=%s", key, value)) - } - return strings.Join(out, ",") -} - -// ParseLabels turns a string representation of a label set into a map[string]string -func ParseLabels(labelSpec interface{}) (map[string]string, error) { - labelString, isString := labelSpec.(string) - if !isString { - return nil, fmt.Errorf("expected string, found %v", labelSpec) - } - if len(labelString) == 0 { - return nil, fmt.Errorf("no label spec passed") - } - labels := map[string]string{} - labelSpecs := strings.Split(labelString, ",") - for ix := range labelSpecs { - labelSpec := strings.Split(labelSpecs[ix], "=") - if len(labelSpec) != 2 { - return nil, fmt.Errorf("unexpected label spec: %s", labelSpecs[ix]) - } - if len(labelSpec[0]) == 0 { - return nil, fmt.Errorf("unexpected empty label key") - } - labels[labelSpec[0]] = labelSpec[1] - } - return labels, nil -} - -func GetBool(params map[string]string, key string, defValue bool) (bool, error) { - if val, found := params[key]; !found { - return defValue, nil - } else { - return strconv.ParseBool(val) - } -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/history.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/history.go index 4fcd4e526d7e..a1a4829aeb6a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/history.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/history.go @@ -23,7 +23,7 @@ import ( "text/tabwriter" appsv1 "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,12 +34,10 @@ import ( "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/kubernetes" clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" - api "k8s.io/kubernetes/pkg/apis/core" - apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" - deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" kapps "k8s.io/kubernetes/pkg/kubectl/apps" + describe "k8s.io/kubernetes/pkg/kubectl/describe/versioned" + deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment" sliceutil "k8s.io/kubernetes/pkg/kubectl/util/slice" - printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" ) const ( @@ -116,7 +114,7 @@ func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision i allRSs = append(allRSs, newRS) } - historyInfo := make(map[int64]*v1.PodTemplateSpec) + historyInfo := make(map[int64]*corev1.PodTemplateSpec) for _, rs := range allRSs { v, err := deploymentutil.Revision(rs) if err != nil { @@ -166,14 +164,10 @@ func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision i }) } -func printTemplate(template *v1.PodTemplateSpec) (string, error) { +func printTemplate(template *corev1.PodTemplateSpec) (string, error) { buf := bytes.NewBuffer([]byte{}) - internalTemplate := &api.PodTemplateSpec{} - if err := apiv1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(template, internalTemplate, nil); err != nil { - return "", fmt.Errorf("failed to convert podtemplate, %v", err) - } - w := printersinternal.NewPrefixWriter(buf) - printersinternal.DescribePodTemplate(internalTemplate, w) + w := describe.NewPrefixWriter(buf) + describe.DescribePodTemplate(template, w) return buf.String(), nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go deleted file mode 100644 index 03723cdaa84f..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// NamespaceGeneratorV1 supports stable generation of a namespace -type NamespaceGeneratorV1 struct { - // Name of namespace - Name string -} - -// Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &NamespaceGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &NamespaceGeneratorV1{} - -// Generate returns a namespace using the specified parameters -func (g NamespaceGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(g.ParamNames(), genericParams) - if err != nil { - return nil, err - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate := &NamespaceGeneratorV1{Name: params["name"]} - return delegate.StructuredGenerate() -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (g NamespaceGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - } -} - -// StructuredGenerate outputs a namespace object using the configured fields -func (g *NamespaceGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := g.validate(); err != nil { - return nil, err - } - namespace := &v1.Namespace{} - namespace.Name = g.Name - return namespace, nil -} - -// validate validates required fields are set to support structured generation -func (g *NamespaceGeneratorV1) validate() error { - if len(g.Name) == 0 { - return fmt.Errorf("name must be specified") - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/pdb.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/pdb.go deleted file mode 100644 index df2cc8f53b33..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/pdb.go +++ /dev/null @@ -1,213 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - policy "k8s.io/api/policy/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// PodDisruptionBudgetV1Generator supports stable generation of a pod disruption budget. -type PodDisruptionBudgetV1Generator struct { - Name string - MinAvailable string - Selector string -} - -// Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &PodDisruptionBudgetV1Generator{} - -func (PodDisruptionBudgetV1Generator) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"min-available", false}, - {"selector", true}, - } -} - -func (s PodDisruptionBudgetV1Generator) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) - if err != nil { - return nil, err - } - name, isString := params["name"].(string) - if !isString { - return nil, fmt.Errorf("expected string, found %T for 'name'", params["name"]) - } - minAvailable, isString := params["min-available"].(string) - if !isString { - return nil, fmt.Errorf("expected string, found %T for 'min-available'", params["min-available"]) - } - selector, isString := params["selector"].(string) - if !isString { - return nil, fmt.Errorf("expected string, found %T for 'selector'", params["selector"]) - } - delegate := &PodDisruptionBudgetV1Generator{Name: name, MinAvailable: minAvailable, Selector: selector} - return delegate.StructuredGenerate() -} - -// StructuredGenerate outputs a pod disruption budget object using the configured fields. -func (s *PodDisruptionBudgetV1Generator) StructuredGenerate() (runtime.Object, error) { - if len(s.MinAvailable) == 0 { - // defaulting behavior seen in Kubernetes 1.6 and below. - s.MinAvailable = "1" - } - - if err := s.validate(); err != nil { - return nil, err - } - - selector, err := metav1.ParseToLabelSelector(s.Selector) - if err != nil { - return nil, err - } - - minAvailable := intstr.Parse(s.MinAvailable) - return &policy.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: s.Name, - }, - Spec: policy.PodDisruptionBudgetSpec{ - MinAvailable: &minAvailable, - Selector: selector, - }, - }, nil -} - -// validate validates required fields are set to support structured generation. -func (s *PodDisruptionBudgetV1Generator) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - if len(s.Selector) == 0 { - return fmt.Errorf("a selector must be specified") - } - if len(s.MinAvailable) == 0 { - return fmt.Errorf("the minimum number of available pods required must be specified") - } - return nil -} - -// PodDisruptionBudgetV2Generator supports stable generation of a pod disruption budget. -type PodDisruptionBudgetV2Generator struct { - Name string - MinAvailable string - MaxUnavailable string - Selector string -} - -// Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &PodDisruptionBudgetV2Generator{} - -func (PodDisruptionBudgetV2Generator) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"min-available", false}, - {"max-unavailable", false}, - {"selector", true}, - } -} - -func (s PodDisruptionBudgetV2Generator) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) - if err != nil { - return nil, err - } - - name, isString := params["name"].(string) - if !isString { - return nil, fmt.Errorf("expected string, found %T for 'name'", params["name"]) - } - - minAvailable, isString := params["min-available"].(string) - if !isString { - return nil, fmt.Errorf("expected string, found %T for 'min-available'", params["min-available"]) - } - - maxUnavailable, isString := params["max-unavailable"].(string) - if !isString { - return nil, fmt.Errorf("expected string, found %T for 'max-unavailable'", params["max-unavailable"]) - } - - selector, isString := params["selector"].(string) - if !isString { - return nil, fmt.Errorf("expected string, found %T for 'selector'", params["selector"]) - } - delegate := &PodDisruptionBudgetV2Generator{Name: name, MinAvailable: minAvailable, MaxUnavailable: maxUnavailable, Selector: selector} - return delegate.StructuredGenerate() -} - -// StructuredGenerate outputs a pod disruption budget object using the configured fields. -func (s *PodDisruptionBudgetV2Generator) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - - selector, err := metav1.ParseToLabelSelector(s.Selector) - if err != nil { - return nil, err - } - - if len(s.MaxUnavailable) > 0 { - maxUnavailable := intstr.Parse(s.MaxUnavailable) - return &policy.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: s.Name, - }, - Spec: policy.PodDisruptionBudgetSpec{ - MaxUnavailable: &maxUnavailable, - Selector: selector, - }, - }, nil - } - - if len(s.MinAvailable) > 0 { - minAvailable := intstr.Parse(s.MinAvailable) - return &policy.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: s.Name, - }, - Spec: policy.PodDisruptionBudgetSpec{ - MinAvailable: &minAvailable, - Selector: selector, - }, - }, nil - } - - return nil, err -} - -// validate validates required fields are set to support structured generation. -func (s *PodDisruptionBudgetV2Generator) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - if len(s.Selector) == 0 { - return fmt.Errorf("a selector must be specified") - } - if len(s.MaxUnavailable) == 0 && len(s.MinAvailable) == 0 { - return fmt.Errorf("one of min-available or max-unavailable must be specified") - } - if len(s.MaxUnavailable) > 0 && len(s.MinAvailable) > 0 { - return fmt.Errorf("min-available and max-unavailable cannot be both specified") - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/priorityclass.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/priorityclass.go deleted file mode 100644 index 51c71a570189..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/priorityclass.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - scheduling "k8s.io/api/scheduling/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// PriorityClassV1Generator supports stable generation of a priorityClass. -type PriorityClassV1Generator struct { - Name string - Value int32 - GlobalDefault bool - Description string -} - -// Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &PriorityClassV1Generator{} - -func (PriorityClassV1Generator) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"value", true}, - {"global-default", false}, - {"description", false}, - } -} - -func (s PriorityClassV1Generator) Generate(params map[string]interface{}) (runtime.Object, error) { - if err := ValidateParams(s.ParamNames(), params); err != nil { - return nil, err - } - - name, found := params["name"].(string) - if !found { - return nil, fmt.Errorf("expected string, saw %v for 'name'", name) - } - - value, found := params["value"].(int32) - if !found { - return nil, fmt.Errorf("expected int32, found %v", value) - } - - globalDefault, found := params["global-default"].(bool) - if !found { - return nil, fmt.Errorf("expected bool, found %v", globalDefault) - } - - description, found := params["description"].(string) - if !found { - return nil, fmt.Errorf("expected string, found %v", description) - } - delegate := &PriorityClassV1Generator{Name: name, Value: value, GlobalDefault: globalDefault, Description: description} - return delegate.StructuredGenerate() -} - -// StructuredGenerate outputs a priorityClass object using the configured fields. -func (s *PriorityClassV1Generator) StructuredGenerate() (runtime.Object, error) { - return &scheduling.PriorityClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: s.Name, - }, - Value: s.Value, - GlobalDefault: s.GlobalDefault, - Description: s.Description, - }, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/quota.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/quota.go deleted file mode 100644 index d51b74af951c..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/quota.go +++ /dev/null @@ -1,125 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strings" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// ResourceQuotaGeneratorV1 supports stable generation of a resource quota -type ResourceQuotaGeneratorV1 struct { - // The name of a quota object. - Name string - - // The hard resource limit string before parsing. - Hard string - - // The scopes of a quota object before parsing. - Scopes string -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (g ResourceQuotaGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"hard", true}, - {"scopes", false}, - } -} - -// Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &ResourceQuotaGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &ResourceQuotaGeneratorV1{} - -func (g ResourceQuotaGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(g.ParamNames(), genericParams) - if err != nil { - return nil, err - } - - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - - delegate := &ResourceQuotaGeneratorV1{} - delegate.Name = params["name"] - delegate.Hard = params["hard"] - delegate.Scopes = params["scopes"] - return delegate.StructuredGenerate() -} - -// StructuredGenerate outputs a ResourceQuota object using the configured fields -func (g *ResourceQuotaGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := g.validate(); err != nil { - return nil, err - } - - resourceList, err := populateResourceListV1(g.Hard) - if err != nil { - return nil, err - } - - scopes, err := parseScopes(g.Scopes) - if err != nil { - return nil, err - } - - resourceQuota := &v1.ResourceQuota{} - resourceQuota.Name = g.Name - resourceQuota.Spec.Hard = resourceList - resourceQuota.Spec.Scopes = scopes - return resourceQuota, nil -} - -// validate validates required fields are set to support structured generation -func (r *ResourceQuotaGeneratorV1) validate() error { - if len(r.Name) == 0 { - return fmt.Errorf("name must be specified") - } - return nil -} - -func parseScopes(spec string) ([]v1.ResourceQuotaScope, error) { - // empty input gets a nil response to preserve generator test expected behaviors - if spec == "" { - return nil, nil - } - - scopes := strings.Split(spec, ",") - result := make([]v1.ResourceQuotaScope, 0, len(scopes)) - for _, scope := range scopes { - // intentionally do not verify the scope against the valid scope list. This is done by the apiserver anyway. - - if scope == "" { - return nil, fmt.Errorf("invalid resource quota scope \"\"") - } - - result = append(result, v1.ResourceQuotaScope(scope)) - } - return result, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/rolebinding.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/rolebinding.go deleted file mode 100644 index 0eddb1e0342c..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/rolebinding.go +++ /dev/null @@ -1,173 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - "strings" - - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" -) - -// RoleBindingGeneratorV1 supports stable generation of a roleBinding. -type RoleBindingGeneratorV1 struct { - // Name of roleBinding (required) - Name string - // ClusterRole for the roleBinding - ClusterRole string - // Role for the roleBinding - Role string - // Users to derive the roleBinding from (optional) - Users []string - // Groups to derive the roleBinding from (optional) - Groups []string - // ServiceAccounts to derive the roleBinding from in namespace:name format(optional) - ServiceAccounts []string -} - -// Ensure it supports the generator pattern that uses parameter injection. -var _ Generator = &RoleBindingGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &RoleBindingGeneratorV1{} - -// Generate returns a roleBinding using the specified parameters. -func (s RoleBindingGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - delegate := &RoleBindingGeneratorV1{} - userStrings, found := genericParams["user"] - if found { - fromFileArray, isArray := userStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", userStrings) - } - delegate.Users = fromFileArray - delete(genericParams, "user") - } - groupStrings, found := genericParams["group"] - if found { - fromLiteralArray, isArray := groupStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", groupStrings) - } - delegate.Groups = fromLiteralArray - delete(genericParams, "group") - } - saStrings, found := genericParams["serviceaccount"] - if found { - fromLiteralArray, isArray := saStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", saStrings) - } - delegate.ServiceAccounts = fromLiteralArray - delete(genericParams, "serviceaccount") - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate.Name = params["name"] - delegate.ClusterRole = params["clusterrole"] - delegate.Role = params["role"] - return delegate.StructuredGenerate() -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern. -func (s RoleBindingGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"clusterrole", false}, - {"role", false}, - {"user", false}, - {"group", false}, - {"serviceaccount", false}, - } -} - -// StructuredGenerate outputs a roleBinding object using the configured fields. -func (s RoleBindingGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - roleBinding := &rbacv1.RoleBinding{} - roleBinding.Name = s.Name - - switch { - case len(s.Role) > 0: - roleBinding.RoleRef = rbacv1.RoleRef{ - APIGroup: rbacv1.GroupName, - Kind: "Role", - Name: s.Role, - } - case len(s.ClusterRole) > 0: - roleBinding.RoleRef = rbacv1.RoleRef{ - APIGroup: rbacv1.GroupName, - Kind: "ClusterRole", - Name: s.ClusterRole, - } - } - - for _, user := range sets.NewString(s.Users...).List() { - roleBinding.Subjects = append(roleBinding.Subjects, rbacv1.Subject{ - Kind: rbacv1.UserKind, - APIGroup: rbacv1.GroupName, - Name: user, - }) - } - for _, group := range sets.NewString(s.Groups...).List() { - roleBinding.Subjects = append(roleBinding.Subjects, rbacv1.Subject{ - Kind: rbacv1.GroupKind, - APIGroup: rbacv1.GroupName, - Name: group, - }) - } - for _, sa := range sets.NewString(s.ServiceAccounts...).List() { - tokens := strings.Split(sa, ":") - if len(tokens) != 2 || tokens[1] == "" { - return nil, fmt.Errorf("serviceaccount must be :") - } - roleBinding.Subjects = append(roleBinding.Subjects, rbacv1.Subject{ - Kind: rbacv1.ServiceAccountKind, - APIGroup: "", - Namespace: tokens[0], - Name: tokens[1], - }) - } - - return roleBinding, nil -} - -// validate validates required fields are set to support structured generation. -func (s RoleBindingGeneratorV1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - if (len(s.ClusterRole) == 0) == (len(s.Role) == 0) { - return fmt.Errorf("exactly one of clusterrole or role must be specified") - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go index 027abadc4f46..8851d5741051 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go @@ -19,14 +19,11 @@ package kubectl import ( "bytes" "fmt" - "os" - "os/signal" "sort" - "syscall" appsv1 "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -34,17 +31,10 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api/legacyscheme" - api "k8s.io/kubernetes/pkg/apis/core" - apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/apis/extensions" kapps "k8s.io/kubernetes/pkg/kubectl/apps" - sliceutil "k8s.io/kubernetes/pkg/kubectl/util/slice" - printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" - // kubectl should not be taking dependencies on logic in the controllers - deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" + "k8s.io/kubernetes/pkg/kubectl/scheme" + deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment" ) const ( @@ -105,144 +95,159 @@ type DeploymentRollbacker struct { } func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) { - d, ok := obj.(*extensions.Deployment) - if !ok { - return "", fmt.Errorf("passed object is not a Deployment: %#v", obj) + if toRevision < 0 { + return "", revisionNotFoundErr(toRevision) + } + accessor, err := meta.Accessor(obj) + if err != nil { + return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error()) + } + name := accessor.GetName() + namespace := accessor.GetNamespace() + + // TODO: Fix this after kubectl has been removed from core. It is not possible to convert the runtime.Object + // to the external appsv1 Deployment without round-tripping through an internal version of Deployment. We're + // currently getting rid of all internal versions of resources. So we specifically request the appsv1 version + // here. This follows the same pattern as for DaemonSet and StatefulSet. + deployment, err := r.c.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("failed to retrieve Deployment %s: %v", name, err) + } + + rsForRevision, err := deploymentRevision(deployment, r.c, toRevision) + if err != nil { + return "", err } if dryRun { - return simpleDryRun(d, r.c, toRevision) + return printTemplate(&rsForRevision.Spec.Template) } - if d.Spec.Paused { - return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/%s' and try again", d.Name) + if deployment.Spec.Paused { + return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/%s' and try again", name) } - deploymentRollback := &extensionsv1beta1.DeploymentRollback{ - Name: d.Name, - UpdatedAnnotations: updatedAnnotations, - RollbackTo: extensionsv1beta1.RollbackConfig{ - Revision: toRevision, - }, + + // Skip if the revision already matches current Deployment + if equalIgnoreHash(&rsForRevision.Spec.Template, &deployment.Spec.Template) { + return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil } - result := "" - // Get current events - events, err := r.c.CoreV1().Events(d.Namespace).List(metav1.ListOptions{}) - if err != nil { - return result, err + // remove hash label before patching back into the deployment + delete(rsForRevision.Spec.Template.Labels, appsv1.DefaultDeploymentUniqueLabelKey) + + // compute deployment annotations + annotations := map[string]string{} + for k := range annotationsToSkip { + if v, ok := deployment.Annotations[k]; ok { + annotations[k] = v + } } - // Do the rollback - if err := r.c.ExtensionsV1beta1().Deployments(d.Namespace).Rollback(deploymentRollback); err != nil { - return result, err + for k, v := range rsForRevision.Annotations { + if !annotationsToSkip[k] { + annotations[k] = v + } } - // Watch for the changes of events - watch, err := r.c.CoreV1().Events(d.Namespace).Watch(metav1.ListOptions{Watch: true, ResourceVersion: events.ResourceVersion}) + + // make patch to restore + patchType, patch, err := getDeploymentPatch(&rsForRevision.Spec.Template, annotations) if err != nil { - return result, err + return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } - result = watchRollbackEvent(watch) - return result, err -} -// watchRollbackEvent watches for rollback events and returns rollback result -func watchRollbackEvent(w watch.Interface) string { - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt, os.Kill, syscall.SIGTERM) - for { - select { - case event, ok := <-w.ResultChan(): - if !ok { - return "" - } - obj, ok := event.Object.(*api.Event) - if !ok { - w.Stop() - return "" - } - isRollback, result := isRollbackEvent(obj) - if isRollback { - w.Stop() - return result - } - case <-signals: - w.Stop() - } + // Restore revision + if _, err = r.c.AppsV1().Deployments(namespace).Patch(name, patchType, patch); err != nil { + return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } + return rollbackSuccess, nil } -// isRollbackEvent checks if the input event is about rollback, and returns true and -// related result string back if it is. -func isRollbackEvent(e *api.Event) (bool, string) { - rollbackEventReasons := []string{deploymentutil.RollbackRevisionNotFound, deploymentutil.RollbackTemplateUnchanged, deploymentutil.RollbackDone} - for _, reason := range rollbackEventReasons { - if e.Reason == reason { - if reason == deploymentutil.RollbackDone { - return true, rollbackSuccess - } - return true, fmt.Sprintf("%s (%s: %s)", rollbackSkipped, e.Reason, e.Message) - } - } - return false, "" +// equalIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] +// We ignore pod-template-hash because: +// 1. The hash result would be different upon podTemplateSpec API changes +// (e.g. the addition of a new field will cause the hash code to change) +// 2. The deployment template won't have hash labels +func equalIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool { + t1Copy := template1.DeepCopy() + t2Copy := template2.DeepCopy() + // Remove hash labels from template.Labels before comparing + delete(t1Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) + delete(t2Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) + return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) } -func simpleDryRun(deployment *extensions.Deployment, c kubernetes.Interface, toRevision int64) (string, error) { - externalDeployment := &appsv1.Deployment{} - if err := legacyscheme.Scheme.Convert(deployment, externalDeployment, nil); err != nil { - return "", fmt.Errorf("failed to convert deployment, %v", err) - } +// annotationsToSkip lists the annotations that should be preserved from the deployment and not +// copied from the replicaset when rolling a deployment back +var annotationsToSkip = map[string]bool{ + corev1.LastAppliedConfigAnnotation: true, + deploymentutil.RevisionAnnotation: true, + deploymentutil.RevisionHistoryAnnotation: true, + deploymentutil.DesiredReplicasAnnotation: true, + deploymentutil.MaxReplicasAnnotation: true, + appsv1.DeprecatedRollbackTo: true, +} + +// getPatch returns a patch that can be applied to restore a Deployment to a +// previous version. If the returned error is nil the patch is valid. +func getDeploymentPatch(podTemplate *corev1.PodTemplateSpec, annotations map[string]string) (types.PatchType, []byte, error) { + // Create a patch of the Deployment that replaces spec.template + patch, err := json.Marshal([]interface{}{ + map[string]interface{}{ + "op": "replace", + "path": "/spec/template", + "value": podTemplate, + }, + map[string]interface{}{ + "op": "replace", + "path": "/metadata/annotations", + "value": annotations, + }, + }) + return types.JSONPatchType, patch, err +} - _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(externalDeployment, c.AppsV1()) +func deploymentRevision(deployment *appsv1.Deployment, c kubernetes.Interface, toRevision int64) (revision *appsv1.ReplicaSet, err error) { + + _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1()) if err != nil { - return "", fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", deployment.Name, err) + return nil, fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", deployment.Name, err) } allRSs := allOldRSs if newRS != nil { allRSs = append(allRSs, newRS) } - revisionToSpec := make(map[int64]*v1.PodTemplateSpec) + var ( + latestReplicaSet *appsv1.ReplicaSet + latestRevision = int64(-1) + previousReplicaSet *appsv1.ReplicaSet + previousRevision = int64(-1) + ) for _, rs := range allRSs { - v, err := deploymentutil.Revision(rs) - if err != nil { - continue + if v, err := deploymentutil.Revision(rs); err == nil { + if toRevision == 0 { + if latestRevision < v { + // newest one we've seen so far + previousRevision = latestRevision + previousReplicaSet = latestReplicaSet + latestRevision = v + latestReplicaSet = rs + } else if previousRevision < v { + // second newest one we've seen so far + previousRevision = v + previousReplicaSet = rs + } + } else if toRevision == v { + return rs, nil + } } - revisionToSpec[v] = &rs.Spec.Template - } - - if len(revisionToSpec) < 2 { - return "", fmt.Errorf("no rollout history found for deployment %q", deployment.Name) } if toRevision > 0 { - template, ok := revisionToSpec[toRevision] - if !ok { - return "", revisionNotFoundErr(toRevision) - } - buf := bytes.NewBuffer([]byte{}) - internalTemplate := &api.PodTemplateSpec{} - if err := apiv1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(template, internalTemplate, nil); err != nil { - return "", fmt.Errorf("failed to convert podtemplate, %v", err) - } - w := printersinternal.NewPrefixWriter(buf) - printersinternal.DescribePodTemplate(internalTemplate, w) - return buf.String(), nil - } - - // Sort the revisionToSpec map by revision - revisions := make([]int64, 0, len(revisionToSpec)) - for r := range revisionToSpec { - revisions = append(revisions, r) + return nil, revisionNotFoundErr(toRevision) } - sliceutil.SortInts64(revisions) - template, _ := revisionToSpec[revisions[len(revisions)-2]] - buf := bytes.NewBuffer([]byte{}) - buf.WriteString("\n") - internalTemplate := &api.PodTemplateSpec{} - if err := apiv1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(template, internalTemplate, nil); err != nil { - return "", fmt.Errorf("failed to convert podtemplate, %v", err) + if previousReplicaSet == nil { + return nil, fmt.Errorf("no rollout history found for deployment %q", deployment.Name) } - w := printersinternal.NewPrefixWriter(buf) - printersinternal.DescribePodTemplate(internalTemplate, w) - return buf.String(), nil + return previousReplicaSet, nil } type DaemonSetRollbacker struct { @@ -382,7 +387,7 @@ func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations return rollbackSuccess, nil } -var appsCodec = legacyscheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion) +var appsCodec = scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion) // applyRevision returns a new StatefulSet constructed by restoring the state in revision to set. If the returned error // is nil, the returned StatefulSet is valid. @@ -459,15 +464,12 @@ func findHistory(toRevision int64, allHistory []*appsv1.ControllerRevision) *app } // printPodTemplate converts a given pod template into a human-readable string. -func printPodTemplate(specTemplate *v1.PodTemplateSpec) (string, error) { - content := bytes.NewBuffer([]byte{}) - w := printersinternal.NewPrefixWriter(content) - internalTemplate := &api.PodTemplateSpec{} - if err := apiv1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(specTemplate, internalTemplate, nil); err != nil { - return "", fmt.Errorf("failed to convert podtemplate while printing: %v", err) - } - printersinternal.DescribePodTemplate(internalTemplate, w) - return fmt.Sprintf("will roll back to %s", content.String()), nil +func printPodTemplate(specTemplate *corev1.PodTemplateSpec) (string, error) { + podSpec, err := printTemplate(specTemplate) + if err != nil { + return "", err + } + return fmt.Sprintf("will roll back to %s", podSpec), nil } func revisionNotFoundErr(r int64) error { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go index 1c750614bba7..b6df08c41207 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go @@ -35,9 +35,9 @@ import ( scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/util/integer" "k8s.io/client-go/util/retry" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" - deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/pkg/kubectl/util" + deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment" + "k8s.io/kubernetes/pkg/kubectl/util/podutils" ) func newInt32Ptr(val int) *int32 { @@ -54,7 +54,7 @@ func valOrZero(val *int32) int32 { const ( kubectlAnnotationPrefix = "kubectl.kubernetes.io/" - sourceIdAnnotation = kubectlAnnotationPrefix + "update-source-id" + sourceIDAnnotation = kubectlAnnotationPrefix + "update-source-id" desiredReplicasAnnotation = kubectlAnnotationPrefix + "desired-replicas" originalReplicasAnnotation = kubectlAnnotationPrefix + "original-replicas" nextControllerAnnotation = kubectlAnnotationPrefix + "next-controller-id" @@ -135,7 +135,7 @@ type RollingUpdater struct { scaleAndWait func(rc *corev1.ReplicationController, retry *RetryParams, wait *RetryParams) (*corev1.ReplicationController, error) //getOrCreateTargetController gets and validates an existing controller or //makes a new one. - getOrCreateTargetController func(controller *corev1.ReplicationController, sourceId string) (*corev1.ReplicationController, bool, error) + getOrCreateTargetController func(controller *corev1.ReplicationController, sourceID string) (*corev1.ReplicationController, bool, error) // cleanup performs post deployment cleanup tasks for newRc and oldRc. cleanup func(oldRc, newRc *corev1.ReplicationController, config *RollingUpdaterConfig) error // getReadyPods returns the amount of old and new ready pods. @@ -188,8 +188,8 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { // Find an existing controller (for continuing an interrupted update) or // create a new one if necessary. - sourceId := fmt.Sprintf("%s:%s", oldRc.Name, oldRc.UID) - newRc, existed, err := r.getOrCreateTargetController(config.NewRc, sourceId) + sourceID := fmt.Sprintf("%s:%s", oldRc.Name, oldRc.UID) + newRc, existed, err := r.getOrCreateTargetController(config.NewRc, sourceID) if err != nil { return err } @@ -443,7 +443,7 @@ func (r *RollingUpdater) readyPods(oldRc, newRc *corev1.ReplicationController, m if v1Pod.DeletionTimestamp != nil { continue } - if !podutil.IsPodAvailable(&v1Pod, minReadySeconds, r.nowFn()) { + if !podutils.IsPodAvailable(&v1Pod, minReadySeconds, r.nowFn()) { continue } switch controller.Name { @@ -458,14 +458,14 @@ func (r *RollingUpdater) readyPods(oldRc, newRc *corev1.ReplicationController, m } // getOrCreateTargetControllerWithClient looks for an existing controller with -// sourceId. If found, the existing controller is returned with true +// sourceID. If found, the existing controller is returned with true // indicating that the controller already exists. If the controller isn't // found, a new one is created and returned along with false indicating the // controller was created. // -// Existing controllers are validated to ensure their sourceIdAnnotation -// matches sourceId; if there's a mismatch, an error is returned. -func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *corev1.ReplicationController, sourceId string) (*corev1.ReplicationController, bool, error) { +// Existing controllers are validated to ensure their sourceIDAnnotation +// matches sourceID; if there's a mismatch, an error is returned. +func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *corev1.ReplicationController, sourceID string) (*corev1.ReplicationController, bool, error) { existingRc, err := r.existingController(controller) if err != nil { if !errors.IsNotFound(err) { @@ -474,24 +474,24 @@ func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *corev return nil, false, err } if valOrZero(controller.Spec.Replicas) <= 0 { - return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d\n", controller.Name, valOrZero(controller.Spec.Replicas)) + return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d", controller.Name, valOrZero(controller.Spec.Replicas)) } // The controller wasn't found, so create it. if controller.Annotations == nil { controller.Annotations = map[string]string{} } controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", valOrZero(controller.Spec.Replicas)) - controller.Annotations[sourceIdAnnotation] = sourceId + controller.Annotations[sourceIDAnnotation] = sourceID controller.Spec.Replicas = newInt32Ptr(0) newRc, err := r.rcClient.ReplicationControllers(r.ns).Create(controller) return newRc, false, err } // Validate and use the existing controller. annotations := existingRc.Annotations - source := annotations[sourceIdAnnotation] + source := annotations[sourceIDAnnotation] _, ok := annotations[desiredReplicasAnnotation] - if source != sourceId || !ok { - return nil, false, fmt.Errorf("Missing/unexpected annotations for controller %s, expected %s : %s", controller.Name, sourceId, annotations) + if source != sourceID || !ok { + return nil, false, fmt.Errorf("Missing/unexpected annotations for controller %s, expected %s : %s", controller.Name, sourceID, annotations) } return existingRc, true, nil } @@ -517,7 +517,7 @@ func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *corev1.ReplicationCont return err } applyUpdate := func(rc *corev1.ReplicationController) { - delete(rc.Annotations, sourceIdAnnotation) + delete(rc.Annotations, sourceIDAnnotation) delete(rc.Annotations, desiredReplicasAnnotation) } if newRc, err = updateRcWithRetries(r.rcClient, r.ns, newRc, applyUpdate); err != nil { @@ -662,7 +662,7 @@ func AbortRollingUpdate(c *RollingUpdaterConfig) error { if c.NewRc.Annotations == nil { c.NewRc.Annotations = map[string]string{} } - c.NewRc.Annotations[sourceIdAnnotation] = fmt.Sprintf("%s:%s", c.OldRc.Name, c.OldRc.UID) + c.NewRc.Annotations[sourceIDAnnotation] = fmt.Sprintf("%s:%s", c.OldRc.Name, c.OldRc.UID) // Use the original value since the replica count change from old to new // could be asymmetric. If we don't know the original count, we can't safely @@ -841,7 +841,7 @@ func FindSourceController(r corev1client.ReplicationControllersGetter, namespace } for ix := range list.Items { rc := &list.Items[ix] - if rc.Annotations != nil && strings.HasPrefix(rc.Annotations[sourceIdAnnotation], name) { + if rc.Annotations != nil && strings.HasPrefix(rc.Annotations[sourceIDAnnotation], name) { return rc, nil } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go index 184cb892494a..932e905175ef 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go @@ -23,11 +23,8 @@ import ( extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes" - clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/pkg/kubectl/scheme" + deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment" ) // StatusViewer provides an interface for resources that have rollout status. @@ -36,32 +33,28 @@ type StatusViewer interface { } // StatusViewerFor returns a StatusViewer for the resource specified by kind. -func StatusViewerFor(kind schema.GroupKind, c kubernetes.Interface) (StatusViewer, error) { +func StatusViewerFor(kind schema.GroupKind) (StatusViewer, error) { switch kind { - case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), apps.Kind("Deployment"): - return &DeploymentStatusViewer{c.AppsV1()}, nil - case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(), apps.Kind("DaemonSet"): - return &DaemonSetStatusViewer{c.AppsV1()}, nil - case apps.Kind("StatefulSet"): - return &StatefulSetStatusViewer{c.AppsV1()}, nil + case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind(): + return &DeploymentStatusViewer{}, nil + case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(): + return &DaemonSetStatusViewer{}, nil + case appsv1.SchemeGroupVersion.WithKind("StatefulSet").GroupKind(): + return &StatefulSetStatusViewer{}, nil } return nil, fmt.Errorf("no status viewer has been implemented for %v", kind) } // DeploymentStatusViewer implements the StatusViewer interface. -type DeploymentStatusViewer struct { - c clientappsv1.DeploymentsGetter -} +type DeploymentStatusViewer struct{} // DaemonSetStatusViewer implements the StatusViewer interface. -type DaemonSetStatusViewer struct { - c clientappsv1.DaemonSetsGetter -} +type DaemonSetStatusViewer struct{} // StatefulSetStatusViewer implements the StatusViewer interface. -type StatefulSetStatusViewer struct { - c clientappsv1.StatefulSetsGetter -} +type StatefulSetStatusViewer struct{} // Status returns a message describing deployment status, and a bool value indicating if the status is considered done. func (s *DeploymentStatusViewer) Status(obj runtime.Unstructured, revision int64) (string, bool, error) { @@ -72,7 +65,7 @@ func (s *DeploymentStatusViewer) Status(obj runtime.Unstructured, revision int64 } if revision > 0 { - deploymentRev, err := util.Revision(deployment) + deploymentRev, err := deploymentutil.Revision(deployment) if err != nil { return "", false, fmt.Errorf("cannot get the revision of deployment %q: %v", deployment.Name, err) } @@ -81,8 +74,8 @@ func (s *DeploymentStatusViewer) Status(obj runtime.Unstructured, revision int64 } } if deployment.Generation <= deployment.Status.ObservedGeneration { - cond := util.GetDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing) - if cond != nil && cond.Reason == util.TimedOutReason { + cond := deploymentutil.GetDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing) + if cond != nil && cond.Reason == deploymentutil.TimedOutReason { return "", false, fmt.Errorf("deployment %q exceeded its progress deadline", deployment.Name) } if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas { @@ -141,7 +134,7 @@ func (s *StatefulSetStatusViewer) Status(obj runtime.Unstructured, revision int6 if sts.Spec.Replicas != nil && sts.Status.ReadyReplicas < *sts.Spec.Replicas { return fmt.Sprintf("Waiting for %d pods to be ready...\n", *sts.Spec.Replicas-sts.Status.ReadyReplicas), false, nil } - if sts.Spec.UpdateStrategy.Type == apps.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil { + if sts.Spec.UpdateStrategy.Type == appsv1.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil { if sts.Spec.Replicas != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil { if sts.Status.UpdatedReplicas < (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition) { return fmt.Sprintf("Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...\n", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/run.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/run.go deleted file mode 100644 index 7fe1f2fe2b43..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/run.go +++ /dev/null @@ -1,932 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strconv" - "strings" - - appsv1beta1 "k8s.io/api/apps/v1beta1" - batchv1 "k8s.io/api/batch/v1" - batchv1beta1 "k8s.io/api/batch/v1beta1" - batchv2alpha1 "k8s.io/api/batch/v2alpha1" - "k8s.io/api/core/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation" -) - -type DeploymentV1Beta1 struct{} - -func (DeploymentV1Beta1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"replicas", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"serviceaccount", false}, - } -} - -func (DeploymentV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getEnvs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, name) - if err != nil { - return nil, err - } - - count, err := strconv.Atoi(params["replicas"]) - if err != nil { - return nil, err - } - - podSpec, err := makePodSpec(params, name) - if err != nil { - return nil, err - } - - imagePullPolicy := v1.PullPolicy(params["image-pull-policy"]) - if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { - return nil, err - } - - if err := updatePodPorts(params, podSpec); err != nil { - return nil, err - } - - count32 := int32(count) - deployment := extensionsv1beta1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: extensionsv1beta1.DeploymentSpec{ - Replicas: &count32, - Selector: &metav1.LabelSelector{MatchLabels: labels}, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: *podSpec, - }, - }, - } - return &deployment, nil -} - -type DeploymentAppsV1Beta1 struct{} - -func (DeploymentAppsV1Beta1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"replicas", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"serviceaccount", false}, - } -} - -func (DeploymentAppsV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getEnvs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, name) - if err != nil { - return nil, err - } - - count, err := strconv.Atoi(params["replicas"]) - if err != nil { - return nil, err - } - - podSpec, err := makePodSpec(params, name) - if err != nil { - return nil, err - } - - imagePullPolicy := v1.PullPolicy(params["image-pull-policy"]) - if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { - return nil, err - } - - if err := updatePodPorts(params, podSpec); err != nil { - return nil, err - } - - count32 := int32(count) - deployment := appsv1beta1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: appsv1beta1.DeploymentSpec{ - Replicas: &count32, - Selector: &metav1.LabelSelector{MatchLabels: labels}, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: *podSpec, - }, - }, - } - return &deployment, nil -} - -// getLabels returns map of labels. -func getLabels(params map[string]string, name string) (map[string]string, error) { - labelString, found := params["labels"] - var labels map[string]string - var err error - if found && len(labelString) > 0 { - labels, err = ParseLabels(labelString) - if err != nil { - return nil, err - } - } else { - labels = map[string]string{ - "run": name, - } - } - return labels, nil -} - -// getName returns the name of newly created resource. -func getName(params map[string]string) (string, error) { - name, found := params["name"] - if !found || len(name) == 0 { - name, found = params["default-name"] - if !found || len(name) == 0 { - return "", fmt.Errorf("'name' is a required parameter") - } - } - return name, nil -} - -// getParams returns map of generic parameters. -func getParams(genericParams map[string]interface{}) (map[string]string, error) { - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - return params, nil -} - -// getArgs returns arguments for the container command. -func getArgs(genericParams map[string]interface{}) ([]string, error) { - args := []string{} - val, found := genericParams["args"] - if found { - var isArray bool - args, isArray = val.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found: %v", val) - } - delete(genericParams, "args") - } - return args, nil -} - -// getEnvs returns environment variables. -func getEnvs(genericParams map[string]interface{}) ([]v1.EnvVar, error) { - var envs []v1.EnvVar - envStrings, found := genericParams["env"] - if found { - if envStringArray, isArray := envStrings.([]string); isArray { - var err error - envs, err = parseEnvs(envStringArray) - if err != nil { - return nil, err - } - delete(genericParams, "env") - } else { - return nil, fmt.Errorf("expected []string, found: %v", envStrings) - } - } - return envs, nil -} - -type JobV1 struct{} - -func (JobV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"leave-stdin-open", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"restart", false}, - {"serviceaccount", false}, - } -} - -func (JobV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getEnvs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, name) - if err != nil { - return nil, err - } - - podSpec, err := makePodSpec(params, name) - if err != nil { - return nil, err - } - - imagePullPolicy := v1.PullPolicy(params["image-pull-policy"]) - if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { - return nil, err - } - - leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) - if err != nil { - return nil, err - } - podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin - - if err := updatePodPorts(params, podSpec); err != nil { - return nil, err - } - - restartPolicy := v1.RestartPolicy(params["restart"]) - if len(restartPolicy) == 0 { - restartPolicy = v1.RestartPolicyNever - } - podSpec.RestartPolicy = restartPolicy - - job := batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: batchv1.JobSpec{ - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: *podSpec, - }, - }, - } - - return &job, nil -} - -type CronJobV2Alpha1 struct{} - -func (CronJobV2Alpha1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"leave-stdin-open", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"restart", false}, - {"schedule", true}, - {"serviceaccount", false}, - } -} - -func (CronJobV2Alpha1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getEnvs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, name) - if err != nil { - return nil, err - } - - podSpec, err := makePodSpec(params, name) - if err != nil { - return nil, err - } - - imagePullPolicy := v1.PullPolicy(params["image-pull-policy"]) - if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { - return nil, err - } - - leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) - if err != nil { - return nil, err - } - podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin - - if err := updatePodPorts(params, podSpec); err != nil { - return nil, err - } - - restartPolicy := v1.RestartPolicy(params["restart"]) - if len(restartPolicy) == 0 { - restartPolicy = v1.RestartPolicyNever - } - podSpec.RestartPolicy = restartPolicy - - cronJob := batchv2alpha1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: batchv2alpha1.CronJobSpec{ - Schedule: params["schedule"], - ConcurrencyPolicy: batchv2alpha1.AllowConcurrent, - JobTemplate: batchv2alpha1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: *podSpec, - }, - }, - }, - }, - } - - return &cronJob, nil -} - -type CronJobV1Beta1 struct{} - -func (CronJobV1Beta1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"leave-stdin-open", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"restart", false}, - {"schedule", true}, - {"serviceaccount", false}, - } -} - -func (CronJobV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getEnvs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, name) - if err != nil { - return nil, err - } - - podSpec, err := makePodSpec(params, name) - if err != nil { - return nil, err - } - - imagePullPolicy := v1.PullPolicy(params["image-pull-policy"]) - if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { - return nil, err - } - - leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) - if err != nil { - return nil, err - } - podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin - - if err := updatePodPorts(params, podSpec); err != nil { - return nil, err - } - - restartPolicy := v1.RestartPolicy(params["restart"]) - if len(restartPolicy) == 0 { - restartPolicy = v1.RestartPolicyNever - } - podSpec.RestartPolicy = restartPolicy - - cronJob := batchv1beta1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: batchv1beta1.CronJobSpec{ - Schedule: params["schedule"], - ConcurrencyPolicy: batchv1beta1.AllowConcurrent, - JobTemplate: batchv1beta1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: *podSpec, - }, - }, - }, - }, - } - - return &cronJob, nil -} - -type BasicReplicationController struct{} - -func (BasicReplicationController) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"replicas", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"serviceaccount", false}, - } -} - -// populateResourceListV1 takes strings of form =,= -// and returns ResourceList. -func populateResourceListV1(spec string) (v1.ResourceList, error) { - // empty input gets a nil response to preserve generator test expected behaviors - if spec == "" { - return nil, nil - } - - result := v1.ResourceList{} - resourceStatements := strings.Split(spec, ",") - for _, resourceStatement := range resourceStatements { - parts := strings.Split(resourceStatement, "=") - if len(parts) != 2 { - return nil, fmt.Errorf("Invalid argument syntax %v, expected =", resourceStatement) - } - resourceName := v1.ResourceName(parts[0]) - resourceQuantity, err := resource.ParseQuantity(parts[1]) - if err != nil { - return nil, err - } - result[resourceName] = resourceQuantity - } - return result, nil -} - -// HandleResourceRequirementsV1 parses the limits and requests parameters if specified -// and returns ResourceRequirements. -func HandleResourceRequirementsV1(params map[string]string) (v1.ResourceRequirements, error) { - result := v1.ResourceRequirements{} - limits, err := populateResourceListV1(params["limits"]) - if err != nil { - return result, err - } - result.Limits = limits - requests, err := populateResourceListV1(params["requests"]) - if err != nil { - return result, err - } - result.Requests = requests - return result, nil -} - -// makePodSpec returns PodSpec filled with passed parameters. -func makePodSpec(params map[string]string, name string) (*v1.PodSpec, error) { - stdin, err := GetBool(params, "stdin", false) - if err != nil { - return nil, err - } - - tty, err := GetBool(params, "tty", false) - if err != nil { - return nil, err - } - - resourceRequirements, err := HandleResourceRequirementsV1(params) - if err != nil { - return nil, err - } - - spec := v1.PodSpec{ - ServiceAccountName: params["serviceaccount"], - Containers: []v1.Container{ - { - Name: name, - Image: params["image"], - Stdin: stdin, - TTY: tty, - Resources: resourceRequirements, - }, - }, - } - return &spec, nil -} - -func (BasicReplicationController) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getEnvs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, name) - if err != nil { - return nil, err - } - - count, err := strconv.Atoi(params["replicas"]) - if err != nil { - return nil, err - } - - podSpec, err := makePodSpec(params, name) - if err != nil { - return nil, err - } - - imagePullPolicy := v1.PullPolicy(params["image-pull-policy"]) - if err = updatePodContainers(params, args, envs, imagePullPolicy, podSpec); err != nil { - return nil, err - } - - if err := updatePodPorts(params, podSpec); err != nil { - return nil, err - } - - count32 := int32(count) - controller := v1.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: v1.ReplicationControllerSpec{ - Replicas: &count32, - Selector: labels, - Template: &v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: *podSpec, - }, - }, - } - return &controller, nil -} - -// updatePodContainers updates PodSpec.Containers with passed parameters. -func updatePodContainers(params map[string]string, args []string, envs []v1.EnvVar, imagePullPolicy v1.PullPolicy, podSpec *v1.PodSpec) error { - if len(args) > 0 { - command, err := GetBool(params, "command", false) - if err != nil { - return err - } - if command { - podSpec.Containers[0].Command = args - } else { - podSpec.Containers[0].Args = args - } - } - - if len(envs) > 0 { - podSpec.Containers[0].Env = envs - } - - if len(imagePullPolicy) > 0 { - // imagePullPolicy should be valid here since we have verified it before. - podSpec.Containers[0].ImagePullPolicy = imagePullPolicy - } - return nil -} - -// updatePodContainers updates PodSpec.Containers.Ports with passed parameters. -func updatePodPorts(params map[string]string, podSpec *v1.PodSpec) (err error) { - port := -1 - hostPort := -1 - if len(params["port"]) > 0 { - port, err = strconv.Atoi(params["port"]) - if err != nil { - return err - } - } - - if len(params["hostport"]) > 0 { - hostPort, err = strconv.Atoi(params["hostport"]) - if err != nil { - return err - } - if hostPort > 0 && port < 0 { - return fmt.Errorf("--hostport requires --port to be specified") - } - } - - // Don't include the port if it was not specified. - if len(params["port"]) > 0 { - podSpec.Containers[0].Ports = []v1.ContainerPort{ - { - ContainerPort: int32(port), - }, - } - if hostPort > 0 { - podSpec.Containers[0].Ports[0].HostPort = int32(hostPort) - } - } - return nil -} - -type BasicPod struct{} - -func (BasicPod) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"image", true}, - {"image-pull-policy", false}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"leave-stdin-open", false}, - {"tty", false}, - {"restart", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"serviceaccount", false}, - } -} - -func (BasicPod) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getEnvs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, name) - if err != nil { - return nil, err - } - - stdin, err := GetBool(params, "stdin", false) - if err != nil { - return nil, err - } - leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) - if err != nil { - return nil, err - } - - tty, err := GetBool(params, "tty", false) - if err != nil { - return nil, err - } - - resourceRequirements, err := HandleResourceRequirementsV1(params) - if err != nil { - return nil, err - } - - restartPolicy := v1.RestartPolicy(params["restart"]) - if len(restartPolicy) == 0 { - restartPolicy = v1.RestartPolicyAlways - } - pod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: v1.PodSpec{ - ServiceAccountName: params["serviceaccount"], - Containers: []v1.Container{ - { - Name: name, - Image: params["image"], - Stdin: stdin, - StdinOnce: !leaveStdinOpen && stdin, - TTY: tty, - Resources: resourceRequirements, - }, - }, - DNSPolicy: v1.DNSClusterFirst, - RestartPolicy: restartPolicy, - }, - } - imagePullPolicy := v1.PullPolicy(params["image-pull-policy"]) - if err = updatePodContainers(params, args, envs, imagePullPolicy, &pod.Spec); err != nil { - return nil, err - } - - if err := updatePodPorts(params, &pod.Spec); err != nil { - return nil, err - } - return &pod, nil -} - -// parseEnvs converts string into EnvVar objects. -func parseEnvs(envArray []string) ([]v1.EnvVar, error) { - envs := make([]v1.EnvVar, 0, len(envArray)) - for _, env := range envArray { - pos := strings.Index(env, "=") - if pos == -1 { - return nil, fmt.Errorf("invalid env: %v", env) - } - name := env[:pos] - value := env[pos+1:] - if len(name) == 0 { - return nil, fmt.Errorf("invalid env: %v", env) - } - if len(validation.IsEnvVarName(name)) != 0 { - return nil, fmt.Errorf("invalid env: %v", env) - } - envVar := v1.EnvVar{Name: name, Value: value} - envs = append(envs, envVar) - } - return envs, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go index f1d46b7a110b..54f96fd2c340 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go @@ -175,7 +175,7 @@ func scaleHasDesiredReplicas(sClient scaleclient.ScalesGetter, gr schema.GroupRe // or returns error when timeout happens func WaitForScaleHasDesiredReplicas(sClient scaleclient.ScalesGetter, gr schema.GroupResource, resourceName string, namespace string, newSize uint, waitForReplicas *RetryParams) error { if waitForReplicas == nil { - return fmt.Errorf("waitForReplicas parameter cannot be nil!") + return fmt.Errorf("waitForReplicas parameter cannot be nil") } err := wait.PollImmediate( waitForReplicas.Interval, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/scheme.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/scheme.go index 880b115b178a..a7b0833f6d14 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/scheme.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/scheme.go @@ -30,6 +30,9 @@ var Scheme = runtime.NewScheme() // Codecs provides access to encoding and decoding for the scheme var Codecs = serializer.NewCodecFactory(Scheme) +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(Scheme) + // DefaultJSONEncoder returns a default encoder for our scheme func DefaultJSONEncoder() runtime.Encoder { return unstructured.JSONFallbackEncoder{Encoder: Codecs.LegacyCodec(Scheme.PrioritizedVersionsAllGroups()...)} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go deleted file mode 100644 index 9a0ff5281d41..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go +++ /dev/null @@ -1,271 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "strings" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/kubernetes/pkg/kubectl/util" - "k8s.io/kubernetes/pkg/kubectl/util/hash" -) - -// SecretGeneratorV1 supports stable generation of an opaque secret -type SecretGeneratorV1 struct { - // Name of secret (required) - Name string - // Type of secret (optional) - Type string - // FileSources to derive the secret from (optional) - FileSources []string - // LiteralSources to derive the secret from (optional) - LiteralSources []string - // EnvFileSource to derive the secret from (optional) - EnvFileSource string - // AppendHash; if true, derive a hash from the Secret data and type and append it to the name - AppendHash bool -} - -// Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &SecretGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &SecretGeneratorV1{} - -// Generate returns a secret using the specified parameters -func (s SecretGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - delegate := &SecretGeneratorV1{} - fromFileStrings, found := genericParams["from-file"] - if found { - fromFileArray, isArray := fromFileStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", fromFileStrings) - } - delegate.FileSources = fromFileArray - delete(genericParams, "from-file") - } - fromLiteralStrings, found := genericParams["from-literal"] - if found { - fromLiteralArray, isArray := fromLiteralStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", fromLiteralStrings) - } - delegate.LiteralSources = fromLiteralArray - delete(genericParams, "from-literal") - } - fromEnvFileString, found := genericParams["from-env-file"] - if found { - fromEnvFile, isString := fromEnvFileString.(string) - if !isString { - return nil, fmt.Errorf("expected string, found :%v", fromEnvFileString) - } - delegate.EnvFileSource = fromEnvFile - delete(genericParams, "from-env-file") - } - - hashParam, found := genericParams["append-hash"] - if found { - hashBool, isBool := hashParam.(bool) - if !isBool { - return nil, fmt.Errorf("expected bool, found :%v", hashParam) - } - delegate.AppendHash = hashBool - delete(genericParams, "append-hash") - } - - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate.Name = params["name"] - delegate.Type = params["type"] - - return delegate.StructuredGenerate() -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (s SecretGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"type", false}, - {"from-file", false}, - {"from-literal", false}, - {"from-env-file", false}, - {"force", false}, - {"append-hash", false}, - } -} - -// StructuredGenerate outputs a secret object using the configured fields -func (s SecretGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - secret := &v1.Secret{} - secret.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Secret")) - secret.Name = s.Name - secret.Data = map[string][]byte{} - if len(s.Type) > 0 { - secret.Type = v1.SecretType(s.Type) - } - if len(s.FileSources) > 0 { - if err := handleFromFileSources(secret, s.FileSources); err != nil { - return nil, err - } - } - if len(s.LiteralSources) > 0 { - if err := handleFromLiteralSources(secret, s.LiteralSources); err != nil { - return nil, err - } - } - if len(s.EnvFileSource) > 0 { - if err := handleFromEnvFileSource(secret, s.EnvFileSource); err != nil { - return nil, err - } - } - if s.AppendHash { - h, err := hash.SecretHash(secret) - if err != nil { - return nil, err - } - secret.Name = fmt.Sprintf("%s-%s", secret.Name, h) - } - return secret, nil -} - -// validate validates required fields are set to support structured generation -func (s SecretGeneratorV1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - if len(s.EnvFileSource) > 0 && (len(s.FileSources) > 0 || len(s.LiteralSources) > 0) { - return fmt.Errorf("from-env-file cannot be combined with from-file or from-literal") - } - return nil -} - -// handleFromLiteralSources adds the specified literal source information into the provided secret -func handleFromLiteralSources(secret *v1.Secret, literalSources []string) error { - for _, literalSource := range literalSources { - keyName, value, err := util.ParseLiteralSource(literalSource) - if err != nil { - return err - } - if err = addKeyFromLiteralToSecret(secret, keyName, []byte(value)); err != nil { - return err - } - } - return nil -} - -// handleFromFileSources adds the specified file source information into the provided secret -func handleFromFileSources(secret *v1.Secret, fileSources []string) error { - for _, fileSource := range fileSources { - keyName, filePath, err := util.ParseFileSource(fileSource) - if err != nil { - return err - } - info, err := os.Stat(filePath) - if err != nil { - switch err := err.(type) { - case *os.PathError: - return fmt.Errorf("error reading %s: %v", filePath, err.Err) - default: - return fmt.Errorf("error reading %s: %v", filePath, err) - } - } - if info.IsDir() { - if strings.Contains(fileSource, "=") { - return fmt.Errorf("cannot give a key name for a directory path.") - } - fileList, err := ioutil.ReadDir(filePath) - if err != nil { - return fmt.Errorf("error listing files in %s: %v", filePath, err) - } - for _, item := range fileList { - itemPath := path.Join(filePath, item.Name()) - if item.Mode().IsRegular() { - keyName = item.Name() - if err = addKeyFromFileToSecret(secret, keyName, itemPath); err != nil { - return err - } - } - } - } else { - if err := addKeyFromFileToSecret(secret, keyName, filePath); err != nil { - return err - } - } - } - - return nil -} - -// handleFromEnvFileSource adds the specified env file source information -// into the provided secret -func handleFromEnvFileSource(secret *v1.Secret, envFileSource string) error { - info, err := os.Stat(envFileSource) - if err != nil { - switch err := err.(type) { - case *os.PathError: - return fmt.Errorf("error reading %s: %v", envFileSource, err.Err) - default: - return fmt.Errorf("error reading %s: %v", envFileSource, err) - } - } - if info.IsDir() { - return fmt.Errorf("env secret file cannot be a directory") - } - - return addFromEnvFile(envFileSource, func(key, value string) error { - return addKeyFromLiteralToSecret(secret, key, []byte(value)) - }) -} - -func addKeyFromFileToSecret(secret *v1.Secret, keyName, filePath string) error { - data, err := ioutil.ReadFile(filePath) - if err != nil { - return err - } - return addKeyFromLiteralToSecret(secret, keyName, data) -} - -func addKeyFromLiteralToSecret(secret *v1.Secret, keyName string, data []byte) error { - if errs := validation.IsConfigMapKey(keyName); len(errs) != 0 { - return fmt.Errorf("%q is not a valid key name for a Secret: %s", keyName, strings.Join(errs, ";")) - } - - if _, entryExists := secret.Data[keyName]; entryExists { - return fmt.Errorf("cannot add key %s, another key by that name already exists: %v.", keyName, secret.Data) - } - secret.Data[keyName] = data - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go deleted file mode 100644 index da95e6d45886..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "encoding/json" - "fmt" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/credentialprovider" - "k8s.io/kubernetes/pkg/kubectl/util/hash" -) - -// SecretForDockerRegistryGeneratorV1 supports stable generation of a docker registry secret -type SecretForDockerRegistryGeneratorV1 struct { - // Name of secret (required) - Name string - // FileSources to derive the secret from (optional) - FileSources []string - // Username for registry (required) - Username string - // Email for registry (optional) - Email string - // Password for registry (required) - Password string - // Server for registry (required) - Server string - // AppendHash; if true, derive a hash from the Secret and append it to the name - AppendHash bool -} - -// Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &SecretForDockerRegistryGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &SecretForDockerRegistryGeneratorV1{} - -// Generate returns a secret using the specified parameters -func (s SecretForDockerRegistryGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - delegate := &SecretForDockerRegistryGeneratorV1{} - hashParam, found := genericParams["append-hash"] - if found { - hashBool, isBool := hashParam.(bool) - if !isBool { - return nil, fmt.Errorf("expected bool, found :%v", hashParam) - } - delegate.AppendHash = hashBool - delete(genericParams, "append-hash") - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate.Name = params["name"] - delegate.Username = params["docker-username"] - delegate.Email = params["docker-email"] - delegate.Password = params["docker-password"] - delegate.Server = params["docker-server"] - return delegate.StructuredGenerate() -} - -// StructuredGenerate outputs a secret object using the configured fields -func (s SecretForDockerRegistryGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - secret := &v1.Secret{} - secret.Name = s.Name - secret.Type = v1.SecretTypeDockerConfigJson - secret.Data = map[string][]byte{} - if len(s.FileSources) > 0 { - if err := handleFromFileSources(secret, s.FileSources); err != nil { - return nil, err - } - } - if len(s.FileSources) == 0 { - dockercfgJsonContent, err := handleDockerCfgJsonContent(s.Username, s.Password, s.Email, s.Server) - if err != nil { - return nil, err - } - secret.Data[v1.DockerConfigJsonKey] = dockercfgJsonContent - } - if s.AppendHash { - h, err := hash.SecretHash(secret) - if err != nil { - return nil, err - } - secret.Name = fmt.Sprintf("%s-%s", secret.Name, h) - } - return secret, nil -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (s SecretForDockerRegistryGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"from-file", false}, - {"docker-username", true}, - {"docker-email", false}, - {"docker-password", true}, - {"docker-server", true}, - {"append-hash", false}, - } -} - -// validate validates required fields are set to support structured generation -func (s SecretForDockerRegistryGeneratorV1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - - if len(s.FileSources) == 0 { - if len(s.Username) == 0 { - return fmt.Errorf("username must be specified") - } - if len(s.Password) == 0 { - return fmt.Errorf("password must be specified") - } - if len(s.Server) == 0 { - return fmt.Errorf("server must be specified") - } - } - return nil -} - -// handleDockerCfgJsonContent serializes a ~/.docker/config.json file -func handleDockerCfgJsonContent(username, password, email, server string) ([]byte, error) { - dockercfgAuth := credentialprovider.DockerConfigEntry{ - Username: username, - Password: password, - Email: email, - } - - dockerCfgJson := credentialprovider.DockerConfigJson{ - Auths: map[string]credentialprovider.DockerConfigEntry{server: dockercfgAuth}, - } - - return json.Marshal(dockerCfgJson) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go deleted file mode 100644 index 825e759086f5..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "crypto/tls" - "fmt" - "io/ioutil" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/kubectl/util/hash" -) - -// SecretForTLSGeneratorV1 supports stable generation of a TLS secret. -type SecretForTLSGeneratorV1 struct { - // Name is the name of this TLS secret. - Name string - // Key is the path to the user's private key. - Key string - // Cert is the path to the user's public key certificate. - Cert string - // AppendHash; if true, derive a hash from the Secret and append it to the name - AppendHash bool -} - -// Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &SecretForTLSGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &SecretForTLSGeneratorV1{} - -// Generate returns a secret using the specified parameters -func (s SecretForTLSGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - delegate := &SecretForTLSGeneratorV1{} - hashParam, found := genericParams["append-hash"] - if found { - hashBool, isBool := hashParam.(bool) - if !isBool { - return nil, fmt.Errorf("expected bool, found :%v", hashParam) - } - delegate.AppendHash = hashBool - delete(genericParams, "append-hash") - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate.Name = params["name"] - delegate.Key = params["key"] - delegate.Cert = params["cert"] - return delegate.StructuredGenerate() -} - -// StructuredGenerate outputs a secret object using the configured fields -func (s SecretForTLSGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - tlsCrt, err := readFile(s.Cert) - if err != nil { - return nil, err - } - tlsKey, err := readFile(s.Key) - if err != nil { - return nil, err - } - - if _, err := tls.X509KeyPair(tlsCrt, tlsKey); err != nil { - return nil, fmt.Errorf("failed to load key pair %v", err) - } - // TODO: Add more validation. - // 1. If the certificate contains intermediates, it is a valid chain. - // 2. Format etc. - - secret := &v1.Secret{} - secret.Name = s.Name - secret.Type = v1.SecretTypeTLS - secret.Data = map[string][]byte{} - secret.Data[v1.TLSCertKey] = []byte(tlsCrt) - secret.Data[v1.TLSPrivateKeyKey] = []byte(tlsKey) - if s.AppendHash { - h, err := hash.SecretHash(secret) - if err != nil { - return nil, err - } - secret.Name = fmt.Sprintf("%s-%s", secret.Name, h) - } - return secret, nil -} - -// readFile just reads a file into a byte array. -func readFile(file string) ([]byte, error) { - b, err := ioutil.ReadFile(file) - if err != nil { - return []byte{}, fmt.Errorf("Cannot read file %v, %v", file, err) - } - return b, nil -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (s SecretForTLSGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"key", true}, - {"cert", true}, - {"append-hash", false}, - } -} - -// validate validates required fields are set to support structured generation -func (s SecretForTLSGeneratorV1) validate() error { - // TODO: This is not strictly necessary. We can generate a self signed cert - // if no key/cert is given. The only requirement is that we either get both - // or none. See test/e2e/ingress_utils for self signed cert generation. - if len(s.Key) == 0 { - return fmt.Errorf("key must be specified") - } - if len(s.Cert) == 0 { - return fmt.Errorf("certificate must be specified") - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/service.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/service.go deleted file mode 100644 index 7f0e0d869f72..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/service.go +++ /dev/null @@ -1,239 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strconv" - "strings" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// The only difference between ServiceGeneratorV1 and V2 is that the service port is named "default" in V1, while it is left unnamed in V2. -type ServiceGeneratorV1 struct{} - -func (ServiceGeneratorV1) ParamNames() []GeneratorParam { - return paramNames() -} - -func (ServiceGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { - params["port-name"] = "default" - return generate(params) -} - -type ServiceGeneratorV2 struct{} - -func (ServiceGeneratorV2) ParamNames() []GeneratorParam { - return paramNames() -} - -func (ServiceGeneratorV2) Generate(params map[string]interface{}) (runtime.Object, error) { - return generate(params) -} - -func paramNames() []GeneratorParam { - return []GeneratorParam{ - {"default-name", true}, - {"name", false}, - {"selector", true}, - // port will be used if a user specifies --port OR the exposed object - // has one port - {"port", false}, - // ports will be used iff a user doesn't specify --port AND the - // exposed object has multiple ports - {"ports", false}, - {"labels", false}, - {"external-ip", false}, - {"load-balancer-ip", false}, - {"type", false}, - {"protocol", false}, - // protocols will be used to keep port-protocol mapping derived from - // exposed object - {"protocols", false}, - {"container-port", false}, // alias of target-port - {"target-port", false}, - {"port-name", false}, - {"session-affinity", false}, - {"cluster-ip", false}, - } -} - -func generate(genericParams map[string]interface{}) (runtime.Object, error) { - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - selectorString, found := params["selector"] - if !found || len(selectorString) == 0 { - return nil, fmt.Errorf("'selector' is a required parameter.") - } - selector, err := ParseLabels(selectorString) - if err != nil { - return nil, err - } - - labelsString, found := params["labels"] - var labels map[string]string - if found && len(labelsString) > 0 { - labels, err = ParseLabels(labelsString) - if err != nil { - return nil, err - } - } - - name, found := params["name"] - if !found || len(name) == 0 { - name, found = params["default-name"] - if !found || len(name) == 0 { - return nil, fmt.Errorf("'name' is a required parameter.") - } - } - - isHeadlessService := params["cluster-ip"] == "None" - - ports := []v1.ServicePort{} - servicePortName, found := params["port-name"] - if !found { - // Leave the port unnamed. - servicePortName = "" - } - - protocolsString, found := params["protocols"] - var portProtocolMap map[string]string - if found && len(protocolsString) > 0 { - portProtocolMap, err = ParseProtocols(protocolsString) - if err != nil { - return nil, err - } - } - // ports takes precedence over port since it will be - // specified only when the user hasn't specified a port - // via --port and the exposed object has multiple ports. - var portString string - if portString, found = params["ports"]; !found { - portString, found = params["port"] - if !found && !isHeadlessService { - return nil, fmt.Errorf("'ports' or 'port' is a required parameter.") - } - } - - if portString != "" { - portStringSlice := strings.Split(portString, ",") - for i, stillPortString := range portStringSlice { - port, err := strconv.Atoi(stillPortString) - if err != nil { - return nil, err - } - name := servicePortName - // If we are going to assign multiple ports to a service, we need to - // generate a different name for each one. - if len(portStringSlice) > 1 { - name = fmt.Sprintf("port-%d", i+1) - } - protocol := params["protocol"] - - switch { - case len(protocol) == 0 && len(portProtocolMap) == 0: - // Default to TCP, what the flag was doing previously. - protocol = "TCP" - case len(protocol) > 0 && len(portProtocolMap) > 0: - // User has specified the --protocol while exposing a multiprotocol resource - // We should stomp multiple protocols with the one specified ie. do nothing - case len(protocol) == 0 && len(portProtocolMap) > 0: - // no --protocol and we expose a multiprotocol resource - protocol = "TCP" // have the default so we can stay sane - if exposeProtocol, found := portProtocolMap[stillPortString]; found { - protocol = exposeProtocol - } - } - ports = append(ports, v1.ServicePort{ - Name: name, - Port: int32(port), - Protocol: v1.Protocol(protocol), - }) - } - } - - service := v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: v1.ServiceSpec{ - Selector: selector, - Ports: ports, - }, - } - targetPortString := params["target-port"] - if len(targetPortString) == 0 { - targetPortString = params["container-port"] - } - if len(targetPortString) > 0 { - var targetPort intstr.IntOrString - if portNum, err := strconv.Atoi(targetPortString); err != nil { - targetPort = intstr.FromString(targetPortString) - } else { - targetPort = intstr.FromInt(portNum) - } - // Use the same target-port for every port - for i := range service.Spec.Ports { - service.Spec.Ports[i].TargetPort = targetPort - } - } else { - // If --target-port or --container-port haven't been specified, this - // should be the same as Port - for i := range service.Spec.Ports { - port := service.Spec.Ports[i].Port - service.Spec.Ports[i].TargetPort = intstr.FromInt(int(port)) - } - } - if len(params["external-ip"]) > 0 { - service.Spec.ExternalIPs = []string{params["external-ip"]} - } - if len(params["type"]) != 0 { - service.Spec.Type = v1.ServiceType(params["type"]) - } - if service.Spec.Type == v1.ServiceTypeLoadBalancer { - service.Spec.LoadBalancerIP = params["load-balancer-ip"] - } - if len(params["session-affinity"]) != 0 { - switch v1.ServiceAffinity(params["session-affinity"]) { - case v1.ServiceAffinityNone: - service.Spec.SessionAffinity = v1.ServiceAffinityNone - case v1.ServiceAffinityClientIP: - service.Spec.SessionAffinity = v1.ServiceAffinityClientIP - default: - return nil, fmt.Errorf("unknown session affinity: %s", params["session-affinity"]) - } - } - if len(params["cluster-ip"]) != 0 { - if params["cluster-ip"] == "None" { - service.Spec.ClusterIP = v1.ClusterIPNone - } else { - service.Spec.ClusterIP = params["cluster-ip"] - } - } - return &service, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go deleted file mode 100644 index 1f31bd09a2a3..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go +++ /dev/null @@ -1,259 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strconv" - "strings" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/validation" -) - -type ServiceCommonGeneratorV1 struct { - Name string - TCP []string - Type v1.ServiceType - ClusterIP string - NodePort int - ExternalName string -} - -type ServiceClusterIPGeneratorV1 struct { - ServiceCommonGeneratorV1 -} - -type ServiceNodePortGeneratorV1 struct { - ServiceCommonGeneratorV1 -} - -type ServiceLoadBalancerGeneratorV1 struct { - ServiceCommonGeneratorV1 -} - -// TODO: is this really necessary? -type ServiceExternalNameGeneratorV1 struct { - ServiceCommonGeneratorV1 -} - -func (ServiceClusterIPGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"tcp", true}, - {"clusterip", false}, - } -} -func (ServiceNodePortGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"tcp", true}, - {"nodeport", true}, - } -} -func (ServiceLoadBalancerGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"tcp", true}, - } -} - -func (ServiceExternalNameGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"externalname", true}, - } -} - -func parsePorts(portString string) (int32, intstr.IntOrString, error) { - portStringSlice := strings.Split(portString, ":") - - port, err := strconv.Atoi(portStringSlice[0]) - if err != nil { - return 0, intstr.FromInt(0), err - } - - if errs := validation.IsValidPortNum(port); len(errs) != 0 { - return 0, intstr.FromInt(0), fmt.Errorf(strings.Join(errs, ",")) - } - - if len(portStringSlice) == 1 { - return int32(port), intstr.FromInt(int(port)), nil - } - - var targetPort intstr.IntOrString - if portNum, err := strconv.Atoi(portStringSlice[1]); err != nil { - if errs := validation.IsValidPortName(portStringSlice[1]); len(errs) != 0 { - return 0, intstr.FromInt(0), fmt.Errorf(strings.Join(errs, ",")) - } - targetPort = intstr.FromString(portStringSlice[1]) - } else { - if errs := validation.IsValidPortNum(portNum); len(errs) != 0 { - return 0, intstr.FromInt(0), fmt.Errorf(strings.Join(errs, ",")) - } - targetPort = intstr.FromInt(portNum) - } - return int32(port), targetPort, nil -} - -func (s ServiceCommonGeneratorV1) GenerateCommon(params map[string]interface{}) error { - name, isString := params["name"].(string) - if !isString { - return fmt.Errorf("expected string, saw %v for 'name'", name) - } - tcpStrings, isArray := params["tcp"].([]string) - if !isArray { - return fmt.Errorf("expected []string, found :%v", tcpStrings) - } - clusterip, isString := params["clusterip"].(string) - if !isString { - return fmt.Errorf("expected string, saw %v for 'clusterip'", clusterip) - } - externalname, isString := params["externalname"].(string) - if !isString { - return fmt.Errorf("expected string, saw %v for 'externalname'", externalname) - } - s.Name = name - s.TCP = tcpStrings - s.ClusterIP = clusterip - s.ExternalName = externalname - return nil -} - -func (s ServiceLoadBalancerGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) - if err != nil { - return nil, err - } - delegate := &ServiceCommonGeneratorV1{Type: v1.ServiceTypeLoadBalancer, ClusterIP: ""} - err = delegate.GenerateCommon(params) - if err != nil { - return nil, err - } - return delegate.StructuredGenerate() -} - -func (s ServiceNodePortGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) - if err != nil { - return nil, err - } - delegate := &ServiceCommonGeneratorV1{Type: v1.ServiceTypeNodePort, ClusterIP: ""} - err = delegate.GenerateCommon(params) - if err != nil { - return nil, err - } - return delegate.StructuredGenerate() -} - -func (s ServiceClusterIPGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) - if err != nil { - return nil, err - } - delegate := &ServiceCommonGeneratorV1{Type: v1.ServiceTypeClusterIP, ClusterIP: ""} - err = delegate.GenerateCommon(params) - if err != nil { - return nil, err - } - return delegate.StructuredGenerate() -} - -func (s ServiceExternalNameGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), params) - if err != nil { - return nil, err - } - delegate := &ServiceCommonGeneratorV1{Type: v1.ServiceTypeExternalName, ClusterIP: ""} - err = delegate.GenerateCommon(params) - if err != nil { - return nil, err - } - return delegate.StructuredGenerate() -} - -// validate validates required fields are set to support structured generation -// TODO(xiangpengzhao): validate ports are identity mapped for headless service when we enforce that in validation.validateServicePort. -func (s ServiceCommonGeneratorV1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - if len(s.Type) == 0 { - return fmt.Errorf("type must be specified") - } - if s.ClusterIP == v1.ClusterIPNone && s.Type != v1.ServiceTypeClusterIP { - return fmt.Errorf("ClusterIP=None can only be used with ClusterIP service type") - } - if s.ClusterIP != v1.ClusterIPNone && len(s.TCP) == 0 && s.Type != v1.ServiceTypeExternalName { - return fmt.Errorf("at least one tcp port specifier must be provided") - } - if s.Type == v1.ServiceTypeExternalName { - if errs := validation.IsDNS1123Subdomain(s.ExternalName); len(errs) != 0 { - return fmt.Errorf("invalid service external name %s", s.ExternalName) - } - } - return nil -} - -func (s ServiceCommonGeneratorV1) StructuredGenerate() (runtime.Object, error) { - err := s.validate() - if err != nil { - return nil, err - } - ports := []v1.ServicePort{} - for _, tcpString := range s.TCP { - port, targetPort, err := parsePorts(tcpString) - if err != nil { - return nil, err - } - - portName := strings.Replace(tcpString, ":", "-", -1) - ports = append(ports, v1.ServicePort{ - Name: portName, - Port: port, - TargetPort: targetPort, - Protocol: v1.Protocol("TCP"), - NodePort: int32(s.NodePort), - }) - } - - // setup default label and selector - labels := map[string]string{} - labels["app"] = s.Name - selector := map[string]string{} - selector["app"] = s.Name - - service := v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: s.Name, - Labels: labels, - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceType(s.Type), - Selector: selector, - Ports: ports, - ExternalName: s.ExternalName, - }, - } - if len(s.ClusterIP) > 0 { - service.Spec.ClusterIP = s.ClusterIP - } - return &service, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go deleted file mode 100644 index fa701e140a24..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// ServiceAccountGeneratorV1 supports stable generation of a service account -type ServiceAccountGeneratorV1 struct { - // Name of service account - Name string -} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &ServiceAccountGeneratorV1{} - -// StructuredGenerate outputs a service account object using the configured fields -func (g *ServiceAccountGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := g.validate(); err != nil { - return nil, err - } - serviceAccount := &v1.ServiceAccount{} - serviceAccount.Name = g.Name - return serviceAccount, nil -} - -// validate validates required fields are set to support structured generation -func (g *ServiceAccountGeneratorV1) validate() error { - if len(g.Name) == 0 { - return fmt.Errorf("name must be specified") - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/sorter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/sorter.go deleted file mode 100644 index b7c339dfb2bb..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/sorter.go +++ /dev/null @@ -1,380 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "io" - "reflect" - "sort" - - "github.com/golang/glog" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/util/integer" - "k8s.io/client-go/util/jsonpath" - "k8s.io/kubernetes/pkg/printers" - - "vbom.ml/util/sortorder" -) - -// Sorting printer sorts list types before delegating to another printer. -// Non-list types are simply passed through -type SortingPrinter struct { - SortField string - Delegate printers.ResourcePrinter - Decoder runtime.Decoder -} - -func (s *SortingPrinter) PrintObj(obj runtime.Object, out io.Writer) error { - if !meta.IsListType(obj) { - return s.Delegate.PrintObj(obj, out) - } - - if err := s.sortObj(obj); err != nil { - return err - } - return s.Delegate.PrintObj(obj, out) -} - -func (s *SortingPrinter) sortObj(obj runtime.Object) error { - objs, err := meta.ExtractList(obj) - if err != nil { - return err - } - if len(objs) == 0 { - return nil - } - - sorter, err := SortObjects(s.Decoder, objs, s.SortField) - if err != nil { - return err - } - - switch list := obj.(type) { - case *v1.List: - outputList := make([]runtime.RawExtension, len(objs)) - for ix := range objs { - outputList[ix] = list.Items[sorter.OriginalPosition(ix)] - } - list.Items = outputList - return nil - } - return meta.SetList(obj, objs) -} - -func SortObjects(decoder runtime.Decoder, objs []runtime.Object, fieldInput string) (*RuntimeSort, error) { - for ix := range objs { - item := objs[ix] - switch u := item.(type) { - case *runtime.Unknown: - var err error - // decode runtime.Unknown to runtime.Unstructured for sorting. - // we don't actually want the internal versions of known types. - if objs[ix], _, err = decoder.Decode(u.Raw, nil, &unstructured.Unstructured{}); err != nil { - return nil, err - } - } - } - - field, err := printers.RelaxedJSONPathExpression(fieldInput) - if err != nil { - return nil, err - } - - parser := jsonpath.New("sorting").AllowMissingKeys(true) - if err := parser.Parse(field); err != nil { - return nil, err - } - - // We don't do any model validation here, so we traverse all objects to be sorted - // and, if the field is valid to at least one of them, we consider it to be a - // valid field; otherwise error out. - // Note that this requires empty fields to be considered later, when sorting. - var fieldFoundOnce bool - for _, obj := range objs { - values, err := findJSONPathResults(parser, obj) - if err != nil { - return nil, err - } - if len(values) > 0 && len(values[0]) > 0 { - fieldFoundOnce = true - break - } - } - if !fieldFoundOnce { - return nil, fmt.Errorf("couldn't find any field with path %q in the list of objects", field) - } - - sorter := NewRuntimeSort(field, objs) - sort.Sort(sorter) - return sorter, nil -} - -// RuntimeSort is an implementation of the golang sort interface that knows how to sort -// lists of runtime.Object -type RuntimeSort struct { - field string - objs []runtime.Object - origPosition []int -} - -func NewRuntimeSort(field string, objs []runtime.Object) *RuntimeSort { - sorter := &RuntimeSort{field: field, objs: objs, origPosition: make([]int, len(objs))} - for ix := range objs { - sorter.origPosition[ix] = ix - } - return sorter -} - -func (r *RuntimeSort) Len() int { - return len(r.objs) -} - -func (r *RuntimeSort) Swap(i, j int) { - r.objs[i], r.objs[j] = r.objs[j], r.objs[i] - r.origPosition[i], r.origPosition[j] = r.origPosition[j], r.origPosition[i] -} - -func isLess(i, j reflect.Value) (bool, error) { - switch i.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return i.Int() < j.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return i.Uint() < j.Uint(), nil - case reflect.Float32, reflect.Float64: - return i.Float() < j.Float(), nil - case reflect.String: - return sortorder.NaturalLess(i.String(), j.String()), nil - case reflect.Ptr: - return isLess(i.Elem(), j.Elem()) - case reflect.Struct: - // sort metav1.Time - in := i.Interface() - if t, ok := in.(metav1.Time); ok { - time := j.Interface().(metav1.Time) - return t.Before(&time), nil - } - // fallback to the fields comparison - for idx := 0; idx < i.NumField(); idx++ { - less, err := isLess(i.Field(idx), j.Field(idx)) - if err != nil || !less { - return less, err - } - } - return true, nil - case reflect.Array, reflect.Slice: - // note: the length of i and j may be different - for idx := 0; idx < integer.IntMin(i.Len(), j.Len()); idx++ { - less, err := isLess(i.Index(idx), j.Index(idx)) - if err != nil || !less { - return less, err - } - } - return true, nil - - case reflect.Interface: - switch itype := i.Interface().(type) { - case uint8: - if jtype, ok := j.Interface().(uint8); ok { - return itype < jtype, nil - } - case uint16: - if jtype, ok := j.Interface().(uint16); ok { - return itype < jtype, nil - } - case uint32: - if jtype, ok := j.Interface().(uint32); ok { - return itype < jtype, nil - } - case uint64: - if jtype, ok := j.Interface().(uint64); ok { - return itype < jtype, nil - } - case int8: - if jtype, ok := j.Interface().(int8); ok { - return itype < jtype, nil - } - case int16: - if jtype, ok := j.Interface().(int16); ok { - return itype < jtype, nil - } - case int32: - if jtype, ok := j.Interface().(int32); ok { - return itype < jtype, nil - } - case int64: - if jtype, ok := j.Interface().(int64); ok { - return itype < jtype, nil - } - case uint: - if jtype, ok := j.Interface().(uint); ok { - return itype < jtype, nil - } - case int: - if jtype, ok := j.Interface().(int); ok { - return itype < jtype, nil - } - case float32: - if jtype, ok := j.Interface().(float32); ok { - return itype < jtype, nil - } - case float64: - if jtype, ok := j.Interface().(float64); ok { - return itype < jtype, nil - } - case string: - if jtype, ok := j.Interface().(string); ok { - return sortorder.NaturalLess(itype, jtype), nil - } - default: - return false, fmt.Errorf("unsortable type: %T", itype) - } - return false, fmt.Errorf("unsortable interface: %v", i.Kind()) - - default: - return false, fmt.Errorf("unsortable type: %v", i.Kind()) - } -} - -func (r *RuntimeSort) Less(i, j int) bool { - iObj := r.objs[i] - jObj := r.objs[j] - - var iValues [][]reflect.Value - var jValues [][]reflect.Value - var err error - - parser := jsonpath.New("sorting").AllowMissingKeys(true) - err = parser.Parse(r.field) - if err != nil { - panic(err) - } - - iValues, err = findJSONPathResults(parser, iObj) - if err != nil { - glog.Fatalf("Failed to get i values for %#v using %s (%#v)", iObj, r.field, err) - } - - jValues, err = findJSONPathResults(parser, jObj) - if err != nil { - glog.Fatalf("Failed to get j values for %#v using %s (%v)", jObj, r.field, err) - } - - if len(iValues) == 0 || len(iValues[0]) == 0 { - return true - } - if len(jValues) == 0 || len(jValues[0]) == 0 { - return false - } - iField := iValues[0][0] - jField := jValues[0][0] - - less, err := isLess(iField, jField) - if err != nil { - glog.Fatalf("Field %s in %T is an unsortable type: %s, err: %v", r.field, iObj, iField.Kind().String(), err) - } - return less -} - -// Returns the starting (original) position of a particular index. e.g. If OriginalPosition(0) returns 5 than the -// the item currently at position 0 was at position 5 in the original unsorted array. -func (r *RuntimeSort) OriginalPosition(ix int) int { - if ix < 0 || ix > len(r.origPosition) { - return -1 - } - return r.origPosition[ix] -} - -type TableSorter struct { - field string - obj *metav1beta1.Table -} - -func (t *TableSorter) Len() int { - return len(t.obj.Rows) -} - -func (t *TableSorter) Swap(i, j int) { - t.obj.Rows[i], t.obj.Rows[j] = t.obj.Rows[j], t.obj.Rows[i] -} - -func (t *TableSorter) Less(i, j int) bool { - iObj := t.obj.Rows[i].Object.Object - jObj := t.obj.Rows[j].Object.Object - - var iValues [][]reflect.Value - var jValues [][]reflect.Value - var err error - - parser := jsonpath.New("sorting").AllowMissingKeys(true) - err = parser.Parse(t.field) - if err != nil { - glog.Fatalf("sorting error: %v\n", err) - } - - // TODO(juanvallejo): this is expensive for very large sets. - // To improve runtime complexity, build an array which contains all - // resolved fields, and sort that instead. - iValues, err = findJSONPathResults(parser, iObj) - if err != nil { - glog.Fatalf("Failed to get i values for %#v using %s (%#v)", iObj, t.field, err) - } - - jValues, err = findJSONPathResults(parser, jObj) - if err != nil { - glog.Fatalf("Failed to get j values for %#v using %s (%v)", jObj, t.field, err) - } - - if len(iValues) == 0 || len(iValues[0]) == 0 || len(jValues) == 0 || len(jValues[0]) == 0 { - glog.Fatalf("couldn't find any field with path %q in the list of objects", t.field) - } - - iField := iValues[0][0] - jField := jValues[0][0] - - less, err := isLess(iField, jField) - if err != nil { - glog.Fatalf("Field %s in %T is an unsortable type: %s, err: %v", t.field, iObj, iField.Kind().String(), err) - } - return less -} - -func (t *TableSorter) Sort() error { - sort.Sort(t) - return nil -} - -func NewTableSorter(table *metav1beta1.Table, field string) *TableSorter { - return &TableSorter{ - obj: table, - field: field, - } -} - -func findJSONPathResults(parser *jsonpath.JSONPath, from runtime.Object) ([][]reflect.Value, error) { - if unstructuredObj, ok := from.(*unstructured.Unstructured); ok { - return parser.FindResults(unstructuredObj.Object) - } - - return parser.FindResults(reflect.ValueOf(from).Elem().Interface()) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD index 8eea40ed1027..fc010ff59979 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD @@ -7,6 +7,7 @@ load( go_library( name = "go_default_library", srcs = [ + "pod_port.go", "service_port.go", "umask.go", "umask_windows.go", @@ -65,10 +66,21 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//pkg/kubectl/util/certificate:all-srcs", + "//pkg/kubectl/util/deployment:all-srcs", + "//pkg/kubectl/util/event:all-srcs", + "//pkg/kubectl/util/fieldpath:all-srcs", "//pkg/kubectl/util/hash:all-srcs", "//pkg/kubectl/util/i18n:all-srcs", "//pkg/kubectl/util/logs:all-srcs", + "//pkg/kubectl/util/podutils:all-srcs", + "//pkg/kubectl/util/printers:all-srcs", + "//pkg/kubectl/util/qos:all-srcs", + "//pkg/kubectl/util/rbac:all-srcs", + "//pkg/kubectl/util/resource:all-srcs", "//pkg/kubectl/util/slice:all-srcs", + "//pkg/kubectl/util/storage:all-srcs", + "//pkg/kubectl/util/templates:all-srcs", "//pkg/kubectl/util/term:all-srcs", ], tags = ["automanaged"], @@ -78,6 +90,7 @@ filegroup( go_test( name = "go_default_test", srcs = [ + "pod_port_test.go", "service_port_test.go", "util_test.go", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/BUILD new file mode 100644 index 000000000000..cac97d417052 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["certificate.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/certificate", + visibility = ["//visibility:public"], + deps = ["//staging/src/k8s.io/api/certificates/v1beta1:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/certificate.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/certificate.go new file mode 100644 index 000000000000..201958c6fca7 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/certificate.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate + +import ( + "crypto/x509" + "encoding/pem" + "errors" + + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" +) + +// TODO(yue9944882): Remove this helper package once it's copied to k/api + +// ParseCSR extracts the CSR from the API object and decodes it. +func ParseCSR(obj *certificatesv1beta1.CertificateSigningRequest) (*x509.CertificateRequest, error) { + // extract PEM from request object + pemBytes := obj.Spec.Request + block, _ := pem.Decode(pemBytes) + if block == nil || block.Type != "CERTIFICATE REQUEST" { + return nil, errors.New("PEM block type must be CERTIFICATE REQUEST") + } + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, err + } + return csr, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/BUILD new file mode 100644 index 000000000000..5fffa4803e11 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/BUILD @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["deployment.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/deployment", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/apps/v1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/deployment.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/deployment.go new file mode 100644 index 000000000000..72f99f7f2ee4 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/deployment.go @@ -0,0 +1,230 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "sort" + "strconv" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + intstrutil "k8s.io/apimachinery/pkg/util/intstr" + appsclient "k8s.io/client-go/kubernetes/typed/apps/v1" +) + +const ( + // RevisionAnnotation is the revision annotation of a deployment's replica sets which records its rollout sequence + RevisionAnnotation = "deployment.kubernetes.io/revision" + // RevisionHistoryAnnotation maintains the history of all old revisions that a replica set has served for a deployment. + RevisionHistoryAnnotation = "deployment.kubernetes.io/revision-history" + // DesiredReplicasAnnotation is the desired replicas for a deployment recorded as an annotation + // in its replica sets. Helps in separating scaling events from the rollout process and for + // determining if the new replica set for a deployment is really saturated. + DesiredReplicasAnnotation = "deployment.kubernetes.io/desired-replicas" + // MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which + // is deployment.spec.replicas + maxSurge. Used by the underlying replica sets to estimate their + // proportions in case the deployment has surge replicas. + MaxReplicasAnnotation = "deployment.kubernetes.io/max-replicas" + // RollbackRevisionNotFound is not found rollback event reason + RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound" + // RollbackTemplateUnchanged is the template unchanged rollback event reason + RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged" + // RollbackDone is the done rollback event reason + RollbackDone = "DeploymentRollback" + // TimedOutReason is added in a deployment when its newest replica set fails to show any progress + // within the given deadline (progressDeadlineSeconds). + TimedOutReason = "ProgressDeadlineExceeded" +) + +// GetDeploymentCondition returns the condition with the provided type. +func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition { + for i := range status.Conditions { + c := status.Conditions[i] + if c.Type == condType { + return &c + } + } + return nil +} + +// Revision returns the revision number of the input object. +func Revision(obj runtime.Object) (int64, error) { + acc, err := meta.Accessor(obj) + if err != nil { + return 0, err + } + v, ok := acc.GetAnnotations()[RevisionAnnotation] + if !ok { + return 0, nil + } + return strconv.ParseInt(v, 10, 64) +} + +// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and +// ReplicaSetList from client interface. Note that the first set of old replica sets doesn't include the ones +// with no pods, and the second set of old replica sets include all old replica sets. The third returned value +// is the new replica set, and it may be nil if it doesn't exist yet. +func GetAllReplicaSets(deployment *appsv1.Deployment, c appsclient.AppsV1Interface) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet, *appsv1.ReplicaSet, error) { + rsList, err := listReplicaSets(deployment, rsListFromClient(c)) + if err != nil { + return nil, nil, nil, err + } + oldRSes, allOldRSes := findOldReplicaSets(deployment, rsList) + newRS := findNewReplicaSet(deployment, rsList) + return oldRSes, allOldRSes, newRS, nil +} + +// RsListFromClient returns an rsListFunc that wraps the given client. +func rsListFromClient(c appsclient.AppsV1Interface) rsListFunc { + return func(namespace string, options metav1.ListOptions) ([]*appsv1.ReplicaSet, error) { + rsList, err := c.ReplicaSets(namespace).List(options) + if err != nil { + return nil, err + } + var ret []*appsv1.ReplicaSet + for i := range rsList.Items { + ret = append(ret, &rsList.Items[i]) + } + return ret, err + } +} + +// TODO: switch this to full namespacers +type rsListFunc func(string, metav1.ListOptions) ([]*appsv1.ReplicaSet, error) +type podListFunc func(string, metav1.ListOptions) (*corev1.PodList, error) + +// listReplicaSets returns a slice of RSes the given deployment targets. +// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan), +// because only the controller itself should do that. +// However, it does filter out anything whose ControllerRef doesn't match. +func listReplicaSets(deployment *appsv1.Deployment, getRSList rsListFunc) ([]*appsv1.ReplicaSet, error) { + // TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector + // should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830. + namespace := deployment.Namespace + selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) + if err != nil { + return nil, err + } + options := metav1.ListOptions{LabelSelector: selector.String()} + all, err := getRSList(namespace, options) + if err != nil { + return nil, err + } + // Only include those whose ControllerRef matches the Deployment. + owned := make([]*appsv1.ReplicaSet, 0, len(all)) + for _, rs := range all { + if metav1.IsControlledBy(rs, deployment) { + owned = append(owned, rs) + } + } + return owned, nil +} + +// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] +// We ignore pod-template-hash because: +// 1. The hash result would be different upon podTemplateSpec API changes +// (e.g. the addition of a new field will cause the hash code to change) +// 2. The deployment template won't have hash labels +func equalIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool { + t1Copy := template1.DeepCopy() + t2Copy := template2.DeepCopy() + // Remove hash labels from template.Labels before comparing + delete(t1Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) + delete(t2Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) + return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) +} + +// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template). +func findNewReplicaSet(deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) *appsv1.ReplicaSet { + sort.Sort(replicaSetsByCreationTimestamp(rsList)) + for i := range rsList { + if equalIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) { + // In rare cases, such as after cluster upgrades, Deployment may end up with + // having more than one new ReplicaSets that have the same template as its template, + // see https://github.com/kubernetes/kubernetes/issues/40415 + // We deterministically choose the oldest new ReplicaSet. + return rsList[i] + } + } + // new ReplicaSet does not exist. + return nil +} + +// replicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker. +type replicaSetsByCreationTimestamp []*appsv1.ReplicaSet + +func (o replicaSetsByCreationTimestamp) Len() int { return len(o) } +func (o replicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o replicaSetsByCreationTimestamp) Less(i, j int) bool { + if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) +} + +// // FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes. +// // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. +func findOldReplicaSets(deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet) { + var requiredRSs []*appsv1.ReplicaSet + var allRSs []*appsv1.ReplicaSet + newRS := findNewReplicaSet(deployment, rsList) + for _, rs := range rsList { + // Filter out new replica set + if newRS != nil && rs.UID == newRS.UID { + continue + } + allRSs = append(allRSs, rs) + if *(rs.Spec.Replicas) != 0 { + requiredRSs = append(requiredRSs, rs) + } + } + return requiredRSs, allRSs +} + +// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one +// step. For example: +// +// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1) +// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1) +// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) +// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) +func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { + surge, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true) + if err != nil { + return 0, 0, err + } + unavailable, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false) + if err != nil { + return 0, 0, err + } + + if surge == 0 && unavailable == 0 { + // Validation should never allow the user to explicitly use zero values for both maxSurge + // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. + // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the + // theory that surge might not work due to quota. + unavailable = 1 + } + + return int32(surge), int32(unavailable), nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/BUILD new file mode 100644 index 000000000000..1c0fe6300d36 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/BUILD @@ -0,0 +1,37 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["sorted_event_list.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/event", + deps = ["//staging/src/k8s.io/api/core/v1:go_default_library"], +) + +go_test( + name = "go_default_test", + srcs = ["sorted_event_list_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/events/sorted_event_list.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/sorted_event_list.go similarity index 91% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/events/sorted_event_list.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/sorted_event_list.go index 9976c10ce734..9967f953e68f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/api/events/sorted_event_list.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/sorted_event_list.go @@ -14,14 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package events +package event import ( - api "k8s.io/kubernetes/pkg/apis/core" + corev1 "k8s.io/api/core/v1" ) // SortableEvents implements sort.Interface for []api.Event based on the Timestamp field -type SortableEvents []api.Event +type SortableEvents []corev1.Event func (list SortableEvents) Len() int { return len(list) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/BUILD new file mode 100644 index 000000000000..188a07d00d42 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/BUILD @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["fieldpath.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/fieldpath", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/fieldpath.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/fieldpath.go new file mode 100644 index 000000000000..efd08cde0f53 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/fieldpath.go @@ -0,0 +1,111 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" +) + +// TODO(yue9944882): Remove this helper package once it's copied to k/apimachinery + +// FormatMap formats map[string]string to a string. +func FormatMap(m map[string]string) (fmtStr string) { + // output with keys in sorted order to provide stable output + keys := sets.NewString() + for key := range m { + keys.Insert(key) + } + for _, key := range keys.List() { + fmtStr += fmt.Sprintf("%v=%q\n", key, m[key]) + } + fmtStr = strings.TrimSuffix(fmtStr, "\n") + + return +} + +// ExtractFieldPathAsString extracts the field from the given object +// and returns it as a string. The object must be a pointer to an +// API type. +func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", nil + } + + if path, subscript, ok := SplitMaybeSubscriptedPath(fieldPath); ok { + switch path { + case "metadata.annotations": + if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetAnnotations()[subscript], nil + case "metadata.labels": + if errs := validation.IsQualifiedName(subscript); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetLabels()[subscript], nil + default: + return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath) + } + } + + switch fieldPath { + case "metadata.annotations": + return FormatMap(accessor.GetAnnotations()), nil + case "metadata.labels": + return FormatMap(accessor.GetLabels()), nil + case "metadata.name": + return accessor.GetName(), nil + case "metadata.namespace": + return accessor.GetNamespace(), nil + case "metadata.uid": + return string(accessor.GetUID()), nil + } + + return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath) +} + +// SplitMaybeSubscriptedPath checks whether the specified fieldPath is +// subscripted, and +// - if yes, this function splits the fieldPath into path and subscript, and +// returns (path, subscript, true). +// - if no, this function returns (fieldPath, "", false). +// +// Example inputs and outputs: +// - "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true) +// - "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true) +// - "metadata.labels['']" --> ("metadata.labels", "", true) +// - "metadata.labels" --> ("metadata.labels", "", false) +func SplitMaybeSubscriptedPath(fieldPath string) (string, string, bool) { + if !strings.HasSuffix(fieldPath, "']") { + return fieldPath, "", false + } + s := strings.TrimSuffix(fieldPath, "']") + parts := strings.SplitN(s, "['", 2) + if len(parts) < 2 { + return fieldPath, "", false + } + if len(parts[0]) == 0 { + return fieldPath, "", false + } + return parts[0], parts[1], true +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/hash/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/hash/BUILD deleted file mode 100644 index d930302bb9dc..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/hash/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["hash_test.go"], - embed = [":go_default_library"], - deps = ["//staging/src/k8s.io/api/core/v1:go_default_library"], -) - -go_library( - name = "go_default_library", - srcs = ["hash.go"], - importpath = "k8s.io/kubernetes/pkg/kubectl/util/hash", - deps = ["//staging/src/k8s.io/api/core/v1:go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/hash/hash.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/hash/hash.go deleted file mode 100644 index de0036245d2f..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/hash/hash.go +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package hash - -import ( - "crypto/sha256" - "encoding/json" - "fmt" - - "k8s.io/api/core/v1" -) - -// ConfigMapHash returns a hash of the ConfigMap. -// The Data, Kind, and Name are taken into account. -func ConfigMapHash(cm *v1.ConfigMap) (string, error) { - encoded, err := encodeConfigMap(cm) - if err != nil { - return "", err - } - h, err := encodeHash(hash(encoded)) - if err != nil { - return "", err - } - return h, nil -} - -// SecretHash returns a hash of the Secret. -// The Data, Kind, Name, and Type are taken into account. -func SecretHash(sec *v1.Secret) (string, error) { - encoded, err := encodeSecret(sec) - if err != nil { - return "", err - } - h, err := encodeHash(hash(encoded)) - if err != nil { - return "", err - } - return h, nil -} - -// encodeConfigMap encodes a ConfigMap. -// Data, Kind, and Name are taken into account. -func encodeConfigMap(cm *v1.ConfigMap) (string, error) { - // json.Marshal sorts the keys in a stable order in the encoding - m := map[string]interface{}{"kind": "ConfigMap", "name": cm.Name, "data": cm.Data} - if len(cm.BinaryData) > 0 { - m["binaryData"] = cm.BinaryData - } - data, err := json.Marshal(m) - if err != nil { - return "", err - } - return string(data), nil -} - -// encodeSecret encodes a Secret. -// Data, Kind, Name, and Type are taken into account. -func encodeSecret(sec *v1.Secret) (string, error) { - // json.Marshal sorts the keys in a stable order in the encoding - data, err := json.Marshal(map[string]interface{}{"kind": "Secret", "type": sec.Type, "name": sec.Name, "data": sec.Data}) - if err != nil { - return "", err - } - return string(data), nil -} - -// encodeHash extracts the first 40 bits of the hash from the hex string -// (1 hex char represents 4 bits), and then maps vowels and vowel-like hex -// characters to consonants to prevent bad words from being formed (the theory -// is that no vowels makes it really hard to make bad words). Since the string -// is hex, the only vowels it can contain are 'a' and 'e'. -// We picked some arbitrary consonants to map to from the same character set as GenerateName. -// See: https://github.com/kubernetes/apimachinery/blob/dc1f89aff9a7509782bde3b68824c8043a3e58cc/pkg/util/rand/rand.go#L75 -// If the hex string contains fewer than ten characters, returns an error. -func encodeHash(hex string) (string, error) { - if len(hex) < 10 { - return "", fmt.Errorf("the hex string must contain at least 10 characters") - } - enc := []rune(hex[:10]) - for i := range enc { - switch enc[i] { - case '0': - enc[i] = 'g' - case '1': - enc[i] = 'h' - case '3': - enc[i] = 'k' - case 'a': - enc[i] = 'm' - case 'e': - enc[i] = 't' - } - } - return string(enc), nil -} - -// hash hashes `data` with sha256 and returns the hex string -func hash(data string) string { - return fmt.Sprintf("%x", sha256.Sum256([]byte(data))) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/pod_port.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/pod_port.go new file mode 100644 index 000000000000..6d78501a89a9 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/pod_port.go @@ -0,0 +1,36 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + "k8s.io/api/core/v1" +) + +// LookupContainerPortNumberByName find containerPort number by its named port name +func LookupContainerPortNumberByName(pod v1.Pod, name string) (int32, error) { + for _, ctr := range pod.Spec.Containers { + for _, ctrportspec := range ctr.Ports { + if ctrportspec.Name == name { + return ctrportspec.ContainerPort, nil + } + } + } + + return int32(-1), fmt.Errorf("Pod '%s' does not have a named port '%s'", pod.Name, name) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/BUILD new file mode 100644 index 000000000000..51522abecabe --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/BUILD @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["podutils.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/podutils", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/client-go/util/integer:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/podutils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/podutils.go new file mode 100644 index 000000000000..2da66ef9c3f6 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/podutils.go @@ -0,0 +1,190 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podutils + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/integer" +) + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *corev1.Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := getPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *corev1.Pod) bool { + return isPodReadyConditionTrue(pod.Status) +} + +// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. +func isPodReadyConditionTrue(status corev1.PodStatus) bool { + condition := getPodReadyCondition(status) + return condition != nil && condition.Status == corev1.ConditionTrue +} + +// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func getPodReadyCondition(status corev1.PodStatus) *corev1.PodCondition { + _, condition := getPodCondition(&status, corev1.PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func getPodCondition(status *corev1.PodStatus, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) { + if status == nil { + return -1, nil + } + return getPodConditionFromList(status.Conditions, conditionType) +} + +// GetPodConditionFromList extracts the provided condition from the given list of condition and +// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present. +func getPodConditionFromList(conditions []corev1.PodCondition, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) { + if conditions == nil { + return -1, nil + } + for i := range conditions { + if conditions[i].Type == conditionType { + return i, &conditions[i] + } + } + return -1, nil +} + +// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs. +type ByLogging []*corev1.Pod + +func (s ByLogging) Len() int { return len(s) } +func (s ByLogging) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s ByLogging) Less(i, j int) bool { + // 1. assigned < unassigned + if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { + return len(s[i].Spec.NodeName) > 0 + } + // 2. PodRunning < PodUnknown < PodPending + m := map[corev1.PodPhase]int{corev1.PodRunning: 0, corev1.PodUnknown: 1, corev1.PodPending: 2} + if m[s[i].Status.Phase] != m[s[j].Status.Phase] { + return m[s[i].Status.Phase] < m[s[j].Status.Phase] + } + // 3. ready < not ready + if IsPodReady(s[i]) != IsPodReady(s[j]) { + return IsPodReady(s[i]) + } + // TODO: take availability into account when we push minReadySeconds information from deployment into pods, + // see https://github.com/kubernetes/kubernetes/issues/22065 + // 4. Been ready for more time < less time < empty time + if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { + return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i])) + } + // 5. Pods with containers with higher restart counts < lower restart counts + if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { + return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) + } + // 6. older pods < newer pods < empty timestamp pods + if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) { + return afterOrZero(&s[j].CreationTimestamp, &s[i].CreationTimestamp) + } + return false +} + +// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete. +type ActivePods []*corev1.Pod + +func (s ActivePods) Len() int { return len(s) } +func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s ActivePods) Less(i, j int) bool { + // 1. Unassigned < assigned + // If only one of the pods is unassigned, the unassigned one is smaller + if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { + return len(s[i].Spec.NodeName) == 0 + } + // 2. PodPending < PodUnknown < PodRunning + m := map[corev1.PodPhase]int{corev1.PodPending: 0, corev1.PodUnknown: 1, corev1.PodRunning: 2} + if m[s[i].Status.Phase] != m[s[j].Status.Phase] { + return m[s[i].Status.Phase] < m[s[j].Status.Phase] + } + // 3. Not ready < ready + // If only one of the pods is not ready, the not ready one is smaller + if IsPodReady(s[i]) != IsPodReady(s[j]) { + return !IsPodReady(s[i]) + } + // TODO: take availability into account when we push minReadySeconds information from deployment into pods, + // see https://github.com/kubernetes/kubernetes/issues/22065 + // 4. Been ready for empty time < less time < more time + // If both pods are ready, the latest ready one is smaller + if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { + return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j])) + } + // 5. Pods with containers with higher restart counts < lower restart counts + if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { + return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) + } + // 6. Empty creation time pods < newer pods < older pods + if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) { + return afterOrZero(&s[i].CreationTimestamp, &s[j].CreationTimestamp) + } + return false +} + +// afterOrZero checks if time t1 is after time t2; if one of them +// is zero, the zero time is seen as after non-zero time. +func afterOrZero(t1, t2 *metav1.Time) bool { + if t1.Time.IsZero() || t2.Time.IsZero() { + return t1.Time.IsZero() + } + return t1.After(t2.Time) +} + +func podReadyTime(pod *corev1.Pod) *metav1.Time { + if IsPodReady(pod) { + for _, c := range pod.Status.Conditions { + // we only care about pod ready conditions + if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue { + return &c.LastTransitionTime + } + } + } + return &metav1.Time{} +} + +func maxContainerRestarts(pod *corev1.Pod) int { + maxRestarts := 0 + for _, c := range pod.Status.ContainerStatuses { + maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount)) + } + return maxRestarts +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/BUILD new file mode 100644 index 000000000000..bed95210fc25 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/BUILD @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["qos.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/qos", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/qos.go similarity index 78% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/qos.go index fad6fb240745..73b25acac2d6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/qos.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,19 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -// NOTE: DO NOT use those helper functions through client-go, the -// package path will be changed in the future. package qos import ( + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/apis/core" ) -var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory)) +var supportedQoSComputeResources = sets.NewString(string(corev1.ResourceCPU), string(corev1.ResourceMemory)) -func isSupportedQoSComputeResource(name core.ResourceName) bool { +func isSupportedQoSComputeResource(name corev1.ResourceName) bool { return supportedQoSComputeResources.Has(string(name)) } @@ -34,9 +32,9 @@ func isSupportedQoSComputeResource(name core.ResourceName) bool { // A pod is besteffort if none of its containers have specified any requests or limits. // A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. // A pod is burstable if limits and requests do not match across all containers. -func GetPodQOS(pod *core.Pod) core.PodQOSClass { - requests := core.ResourceList{} - limits := core.ResourceList{} +func GetPodQOS(pod *corev1.Pod) corev1.PodQOSClass { + requests := corev1.ResourceList{} + limits := corev1.ResourceList{} zeroQuantity := resource.MustParse("0") isGuaranteed := true for _, container := range pod.Spec.Containers { @@ -73,12 +71,12 @@ func GetPodQOS(pod *core.Pod) core.PodQOSClass { } } - if !qosLimitsFound.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) { + if !qosLimitsFound.HasAll(string(corev1.ResourceMemory), string(corev1.ResourceCPU)) { isGuaranteed = false } } if len(requests) == 0 && len(limits) == 0 { - return core.PodQOSBestEffort + return corev1.PodQOSBestEffort } // Check is requests match limits for all resources. if isGuaranteed { @@ -91,7 +89,7 @@ func GetPodQOS(pod *core.Pod) core.PodQOSClass { } if isGuaranteed && len(requests) == len(limits) { - return core.PodQOSGuaranteed + return corev1.PodQOSGuaranteed } - return core.PodQOSBurstable + return corev1.PodQOSBurstable } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/BUILD new file mode 100644 index 000000000000..6a398bc854c4 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["rbac.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/rbac", + visibility = ["//visibility:public"], + deps = ["//staging/src/k8s.io/api/rbac/v1:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/rbac.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/rbac.go new file mode 100644 index 000000000000..a149a51afb08 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/rbac.go @@ -0,0 +1,128 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + rbacv1 "k8s.io/api/rbac/v1" + "reflect" + "strings" +) + +type simpleResource struct { + Group string + Resource string + ResourceNameExist bool + ResourceName string +} + +// CompactRules combines rules that contain a single APIGroup/Resource, differ only by verb, and contain no other attributes. +// this is a fast check, and works well with the decomposed "missing rules" list from a Covers check. +func CompactRules(rules []rbacv1.PolicyRule) ([]rbacv1.PolicyRule, error) { + compacted := make([]rbacv1.PolicyRule, 0, len(rules)) + + simpleRules := map[simpleResource]*rbacv1.PolicyRule{} + for _, rule := range rules { + if resource, isSimple := isSimpleResourceRule(&rule); isSimple { + if existingRule, ok := simpleRules[resource]; ok { + // Add the new verbs to the existing simple resource rule + if existingRule.Verbs == nil { + existingRule.Verbs = []string{} + } + existingRule.Verbs = append(existingRule.Verbs, rule.Verbs...) + } else { + // Copy the rule to accumulate matching simple resource rules into + simpleRules[resource] = rule.DeepCopy() + } + } else { + compacted = append(compacted, rule) + } + } + + // Once we've consolidated the simple resource rules, add them to the compacted list + for _, simpleRule := range simpleRules { + compacted = append(compacted, *simpleRule) + } + + return compacted, nil +} + +// isSimpleResourceRule returns true if the given rule contains verbs, a single resource, a single API group, at most one Resource Name, and no other values +func isSimpleResourceRule(rule *rbacv1.PolicyRule) (simpleResource, bool) { + resource := simpleResource{} + + // If we have "complex" rule attributes, return early without allocations or expensive comparisons + if len(rule.ResourceNames) > 1 || len(rule.NonResourceURLs) > 0 { + return resource, false + } + // If we have multiple api groups or resources, return early + if len(rule.APIGroups) != 1 || len(rule.Resources) != 1 { + return resource, false + } + + // Test if this rule only contains APIGroups/Resources/Verbs/ResourceNames + simpleRule := &rbacv1.PolicyRule{APIGroups: rule.APIGroups, Resources: rule.Resources, Verbs: rule.Verbs, ResourceNames: rule.ResourceNames} + if !reflect.DeepEqual(simpleRule, rule) { + return resource, false + } + + if len(rule.ResourceNames) == 0 { + resource = simpleResource{Group: rule.APIGroups[0], Resource: rule.Resources[0], ResourceNameExist: false} + } else { + resource = simpleResource{Group: rule.APIGroups[0], Resource: rule.Resources[0], ResourceNameExist: true, ResourceName: rule.ResourceNames[0]} + } + + return resource, true +} + +// BreakdownRule takes a rule and builds an equivalent list of rules that each have at most one verb, one +// resource, and one resource name +func BreakdownRule(rule rbacv1.PolicyRule) []rbacv1.PolicyRule { + subrules := []rbacv1.PolicyRule{} + for _, group := range rule.APIGroups { + for _, resource := range rule.Resources { + for _, verb := range rule.Verbs { + if len(rule.ResourceNames) > 0 { + for _, resourceName := range rule.ResourceNames { + subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}, ResourceNames: []string{resourceName}}) + } + + } else { + subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}}) + } + + } + } + } + + // Non-resource URLs are unique because they only combine with verbs. + for _, nonResourceURL := range rule.NonResourceURLs { + for _, verb := range rule.Verbs { + subrules = append(subrules, rbacv1.PolicyRule{NonResourceURLs: []string{nonResourceURL}, Verbs: []string{verb}}) + } + } + + return subrules +} + +// SortableRuleSlice is used to sort rule slice +type SortableRuleSlice []rbacv1.PolicyRule + +func (s SortableRuleSlice) Len() int { return len(s) } +func (s SortableRuleSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s SortableRuleSlice) Less(i, j int) bool { + return strings.Compare(s[i].String(), s[j].String()) < 0 +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/BUILD new file mode 100644 index 000000000000..bbbe532b7108 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/BUILD @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["resource.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/resource", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/resource.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/resource.go new file mode 100644 index 000000000000..0a6f177a6471 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/resource.go @@ -0,0 +1,138 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "math" + "strconv" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/sets" +) + +// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all +// containers of the pod. +func PodRequestsAndLimits(pod *corev1.Pod) (reqs, limits corev1.ResourceList) { + reqs, limits = corev1.ResourceList{}, corev1.ResourceList{} + for _, container := range pod.Spec.Containers { + addResourceList(reqs, container.Resources.Requests) + addResourceList(limits, container.Resources.Limits) + } + // init containers define the minimum of any resource + for _, container := range pod.Spec.InitContainers { + maxResourceList(reqs, container.Resources.Requests) + maxResourceList(limits, container.Resources.Limits) + } + return +} + +// addResourceList adds the resources in newList to list +func addResourceList(list, new corev1.ResourceList) { + for name, quantity := range new { + if value, ok := list[name]; !ok { + list[name] = *quantity.Copy() + } else { + value.Add(quantity) + list[name] = value + } + } +} + +// maxResourceList sets list to the greater of list/newList for every resource +// either list +func maxResourceList(list, new corev1.ResourceList) { + for name, quantity := range new { + if value, ok := list[name]; !ok { + list[name] = *quantity.Copy() + continue + } else { + if quantity.Cmp(value) > 0 { + list[name] = *quantity.Copy() + } + } + } +} + +// ExtractContainerResourceValue extracts the value of a resource +// in an already known container +func ExtractContainerResourceValue(fs *corev1.ResourceFieldSelector, container *corev1.Container) (string, error) { + divisor := resource.Quantity{} + if divisor.Cmp(fs.Divisor) == 0 { + divisor = resource.MustParse("1") + } else { + divisor = fs.Divisor + } + + switch fs.Resource { + case "limits.cpu": + return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) + case "limits.memory": + return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) + case "limits.ephemeral-storage": + return convertResourceEphemeralStorageToString(container.Resources.Limits.StorageEphemeral(), divisor) + case "requests.cpu": + return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) + case "requests.memory": + return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) + case "requests.ephemeral-storage": + return convertResourceEphemeralStorageToString(container.Resources.Requests.StorageEphemeral(), divisor) + } + + return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) +} + +// convertResourceCPUToString converts cpu value to the format of divisor and returns +// ceiling of the value. +func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { + c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) + return strconv.FormatInt(c, 10), nil +} + +// convertResourceMemoryToString converts memory value to the format of divisor and returns +// ceiling of the value. +func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { + m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) + return strconv.FormatInt(m, 10), nil +} + +// convertResourceEphemeralStorageToString converts ephemeral storage value to the format of divisor and returns +// ceiling of the value. +func convertResourceEphemeralStorageToString(ephemeralStorage *resource.Quantity, divisor resource.Quantity) (string, error) { + m := int64(math.Ceil(float64(ephemeralStorage.Value()) / float64(divisor.Value()))) + return strconv.FormatInt(m, 10), nil +} + +var standardContainerResources = sets.NewString( + string(corev1.ResourceCPU), + string(corev1.ResourceMemory), + string(corev1.ResourceEphemeralStorage), +) + +// IsStandardContainerResourceName returns true if the container can make a resource request +// for the specified resource +func IsStandardContainerResourceName(str string) bool { + return standardContainerResources.Has(str) || IsHugePageResourceName(corev1.ResourceName(str)) +} + +// IsHugePageResourceName returns true if the resource name has the huge page +// resource prefix. +func IsHugePageResourceName(name corev1.ResourceName) bool { + return strings.HasPrefix(string(name), corev1.ResourceHugePagesPrefix) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/service_port.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/service_port.go index 8c9caf91d892..bc56ab7d6a90 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/service_port.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/service_port.go @@ -23,21 +23,8 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) -// Lookup containerPort number by its named port name -func lookupContainerPortNumberByName(pod v1.Pod, name string) (int32, error) { - for _, ctr := range pod.Spec.Containers { - for _, ctrportspec := range ctr.Ports { - if ctrportspec.Name == name { - return ctrportspec.ContainerPort, nil - } - } - } - - return int32(-1), fmt.Errorf("Pod '%s' does not have a named port '%s'", pod.Name, name) -} - -// Lookup containerPort number from Service port number -// It implements the handling of resolving container named port, as well as ignoring targetPort when clusterIP=None +// LookupContainerPortNumberByServicePort implements +// the handling of resolving container named port, as well as ignoring targetPort when clusterIP=None // It returns an error when a named port can't find a match (with -1 returned), or when the service does not // declare such port (with the input port number returned). func LookupContainerPortNumberByServicePort(svc v1.Service, pod v1.Pod, port int32) (int32, error) { @@ -52,12 +39,21 @@ func LookupContainerPortNumberByServicePort(svc v1.Service, pod v1.Pod, port int if svcportspec.TargetPort.IntValue() == 0 { // targetPort is omitted, and the IntValue() would be zero return svcportspec.Port, nil - } else { - return int32(svcportspec.TargetPort.IntValue()), nil } - } else { - return lookupContainerPortNumberByName(pod, svcportspec.TargetPort.String()) + return int32(svcportspec.TargetPort.IntValue()), nil } + return LookupContainerPortNumberByName(pod, svcportspec.TargetPort.String()) } return port, fmt.Errorf("Service %s does not have a service port %d", svc.Name, port) } + +// LookupServicePortNumberByName find service port number by its named port name +func LookupServicePortNumberByName(svc v1.Service, name string) (int32, error) { + for _, svcportspec := range svc.Spec.Ports { + if svcportspec.Name == name { + return svcportspec.Port, nil + } + } + + return int32(-1), fmt.Errorf("Service '%s' does not have a named port '%s'", svc.Name, name) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/slice.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/slice.go index 8130753c300f..f997d5cb4059 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/slice.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/slice.go @@ -20,5 +20,19 @@ import ( "sort" ) -// Sorts []int64 in increasing order +// SortInts64 sorts []int64 in increasing order func SortInts64(a []int64) { sort.Slice(a, func(i, j int) bool { return a[i] < a[j] }) } + +// ContainsString checks if a given slice of strings contains the provided string. +// If a modifier func is provided, it is called with the slice item before the comparation. +func ContainsString(slice []string, s string, modifier func(s string) string) bool { + for _, item := range slice { + if item == s { + return true + } + if modifier != nil && modifier(item) == s { + return true + } + } + return false +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/BUILD new file mode 100644 index 000000000000..cd1ed24f7004 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/storage", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/storage.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/storage.go new file mode 100644 index 000000000000..c62cc4ea1270 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/storage.go @@ -0,0 +1,107 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" +) + +// TODO(yue9944882): Remove this helper package once it's copied to k/api + +// IsDefaultStorageClassAnnotation represents a StorageClass annotation that +// marks a class as the default StorageClass +const IsDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class" + +// BetaIsDefaultStorageClassAnnotation is the beta version of BetaIsDefaultStorageClassAnnotation. +const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" + +// IsDefaultAnnotationText returns a pretty Yes/No String if +// the annotation is set +func IsDefaultAnnotationText(obj metav1.ObjectMeta) string { + if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" { + return "Yes" + } + if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" { + return "Yes" + } + + return "No" +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, v1.ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, v1.ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, v1.ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode { + accessModes := []v1.PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *v1.PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask.go index 93e14473c225..67add4e9140a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask.go @@ -22,6 +22,7 @@ import ( "golang.org/x/sys/unix" ) +// Umask is a wrapper for `unix.Umask()` on non-Windows platforms func Umask(mask int) (old int, err error) { return unix.Umask(mask), nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask_windows.go index 7a1ba15386f8..5b4f54bb795b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask_windows.go @@ -22,6 +22,7 @@ import ( "errors" ) +// Umask returns an error on Windows func Umask(mask int) (int, error) { return 0, errors.New("platform and architecture is not supported") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/util.go index 41427780c71d..0c1973dfb8ed 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubectl/util/util.go @@ -41,6 +41,7 @@ func ParseRFC3339(s string, nowFn func() metav1.Time) (metav1.Time, error) { return metav1.Time{Time: t}, nil } +// HashObject returns the hash of a Object hash by a Codec func HashObject(obj runtime.Object, codec runtime.Codec) (string, error) { data, err := runtime.Encode(codec, obj) if err != nil { @@ -63,11 +64,11 @@ func ParseFileSource(source string) (keyName, filePath string, err error) { case numSeparators == 0: return path.Base(filepath.ToSlash(source)), source, nil case numSeparators == 1 && strings.HasPrefix(source, "="): - return "", "", fmt.Errorf("key name for file path %v missing.", strings.TrimPrefix(source, "=")) + return "", "", fmt.Errorf("key name for file path %v missing", strings.TrimPrefix(source, "=")) case numSeparators == 1 && strings.HasSuffix(source, "="): - return "", "", fmt.Errorf("file path for key name %v missing.", strings.TrimSuffix(source, "=")) + return "", "", fmt.Errorf("file path for key name %v missing", strings.TrimSuffix(source, "=")) case numSeparators > 1: - return "", "", errors.New("Key names or file paths cannot contain '='.") + return "", "", errors.New("Key names or file paths cannot contain '='") default: components := strings.Split(source, "=") return components[0], components[1], nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD index 62e4602f9291..fd5e6391c517 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD @@ -11,6 +11,7 @@ go_library( srcs = [ "active_deadline.go", "doc.go", + "errors.go", "kubelet.go", "kubelet_getters.go", "kubelet_network.go", @@ -39,14 +40,14 @@ go_library( "//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper/qos:go_default_library", "//pkg/capabilities:go_default_library", - "//pkg/cloudprovider:go_default_library", "//pkg/features:go_default_library", "//pkg/fieldpath:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis/config:go_default_library", "//pkg/kubelet/apis/cri:go_default_library", "//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library", - "//pkg/kubelet/apis/pluginregistration/v1alpha1:go_default_library", + "//pkg/kubelet/apis/pluginregistration/v1:go_default_library", + "//pkg/kubelet/apis/podresources:go_default_library", "//pkg/kubelet/cadvisor:go_default_library", "//pkg/kubelet/certificate:go_default_library", "//pkg/kubelet/checkpointmanager:go_default_library", @@ -96,8 +97,8 @@ go_library( "//pkg/kubelet/util/queue:go_default_library", "//pkg/kubelet/util/sliceutils:go_default_library", "//pkg/kubelet/volumemanager:go_default_library", - "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/api:go_default_library", "//pkg/security/apparmor:go_default_library", "//pkg/security/podsecuritypolicy/sysctl:go_default_library", "//pkg/securitycontext:go_default_library", @@ -117,6 +118,7 @@ go_library( "//pkg/volume/validation:go_default_library", "//staging/src/k8s.io/api/authentication/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -139,13 +141,14 @@ go_library( "//staging/src/k8s.io/client-go/util/certificate:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//staging/src/k8s.io/client-go/util/integer:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", "//third_party/forked/golang/expansion:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", "//vendor/github.com/google/cadvisor/events:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) @@ -163,6 +166,7 @@ go_test( "kubelet_resources_test.go", "kubelet_test.go", "kubelet_volumes_test.go", + "main_test.go", "oom_watcher_test.go", "pod_container_deletor_test.go", "pod_workers_test.go", @@ -204,15 +208,15 @@ go_test( "//pkg/kubelet/util/queue:go_default_library", "//pkg/kubelet/util/sliceutils:go_default_library", "//pkg/kubelet/volumemanager:go_default_library", - "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/cache:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/taints:go_default_library", "//pkg/version:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/aws_ebs:go_default_library", + "//pkg/volume/awsebs:go_default_library", "//pkg/volume/azure_dd:go_default_library", - "//pkg/volume/gce_pd:go_default_library", + "//pkg/volume/gcepd:go_default_library", "//pkg/volume/host_path:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util:go_default_library", @@ -233,6 +237,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD index c4a2fb508722..926c8add6ca4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD @@ -13,7 +13,9 @@ go_library( "well_known_labels.go", ], importpath = "k8s.io/kubernetes/pkg/kubelet/apis", - deps = select({ + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ] + select({ "@io_bazel_rules_go//go/platform:windows": [ "//pkg/features:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", @@ -37,8 +39,10 @@ filegroup( "//pkg/kubelet/apis/cri:all-srcs", "//pkg/kubelet/apis/deviceplugin/v1alpha:all-srcs", "//pkg/kubelet/apis/deviceplugin/v1beta1:all-srcs", + "//pkg/kubelet/apis/pluginregistration/v1:all-srcs", "//pkg/kubelet/apis/pluginregistration/v1alpha1:all-srcs", "//pkg/kubelet/apis/pluginregistration/v1beta1:all-srcs", + "//pkg/kubelet/apis/podresources:all-srcs", "//pkg/kubelet/apis/stats/v1alpha1:all-srcs", ], tags = ["automanaged"], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/doc.go index 9e59d5772bb7..78af11794764 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/doc.go @@ -15,6 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// TODO: This file should also enable openapi-gen like the other packages +// +groupName=kubelet.config.k8s.io package config diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/register.go index 450e5802fbf4..cbebd990ab97 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/register.go @@ -21,29 +21,21 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// GroupName is the group name use in this package +// GroupName is the group name used in this package const GroupName = "kubelet.config.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) +// addKnownTypes registers known types to the given scheme func addKnownTypes(scheme *runtime.Scheme) error { - // TODO this will get cleaned up with the scheme types are fixed scheme.AddKnownTypes(SchemeGroupVersion, &KubeletConfiguration{}, &SerializedNodeConfigSource{}, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go index ab2321574934..941d83748669 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go @@ -151,10 +151,16 @@ type KubeletConfiguration struct { // streamingConnectionIdleTimeout is the maximum time a streaming connection // can be idle before the connection is automatically closed. StreamingConnectionIdleTimeout metav1.Duration - // nodeStatusUpdateFrequency is the frequency that kubelet posts node - // status to master. Note: be cautious when changing the constant, it - // must work with nodeMonitorGracePeriod in nodecontroller. + // nodeStatusUpdateFrequency is the frequency that kubelet computes node + // status. If node lease feature is not enabled, it is also the frequency that + // kubelet posts node status to master. In that case, be cautious when + // changing the constant, it must work with nodeMonitorGracePeriod in nodecontroller. NodeStatusUpdateFrequency metav1.Duration + // nodeStatusReportFrequency is the frequency that kubelet posts node + // status to master if node status does not change. Kubelet will ignore this + // frequency and post node status immediately if any change is detected. It is + // only used when node lease feature is enabled. + NodeStatusReportFrequency metav1.Duration // nodeLeaseDurationSeconds is the duration the Kubelet will set on its corresponding Lease. NodeLeaseDurationSeconds int32 // imageMinimumGCAge is the minimum age for an unused image before it is diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/BUILD index 8ac93dc4d6f3..c747871adbc0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/BUILD @@ -19,7 +19,6 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1", deps = [ - "//pkg/features:go_default_library", "//pkg/kubelet/apis/config:go_default_library", "//pkg/kubelet/qos:go_default_library", "//pkg/kubelet/types:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/defaults.go index ca23a0b492c1..0aebb8447b84 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/defaults.go @@ -23,7 +23,6 @@ import ( kruntime "k8s.io/apimachinery/pkg/runtime" kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" // TODO: Cut references to k8s.io/kubernetes, eventually there should be none from this package - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/qos" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/master/ports" @@ -108,6 +107,16 @@ func SetDefaults_KubeletConfiguration(obj *kubeletconfigv1beta1.KubeletConfigura if obj.StreamingConnectionIdleTimeout == zeroDuration { obj.StreamingConnectionIdleTimeout = metav1.Duration{Duration: 4 * time.Hour} } + if obj.NodeStatusReportFrequency == zeroDuration { + // For backward compatibility, NodeStatusReportFrequency's default value is + // set to NodeStatusUpdateFrequency if NodeStatusUpdateFrequency is set + // explicitly. + if obj.NodeStatusUpdateFrequency == zeroDuration { + obj.NodeStatusReportFrequency = metav1.Duration{Duration: time.Minute} + } else { + obj.NodeStatusReportFrequency = obj.NodeStatusUpdateFrequency + } + } if obj.NodeStatusUpdateFrequency == zeroDuration { obj.NodeStatusUpdateFrequency = metav1.Duration{Duration: 10 * time.Second} } @@ -159,7 +168,7 @@ func SetDefaults_KubeletConfiguration(obj *kubeletconfigv1beta1.KubeletConfigura if obj.CPUCFSQuota == nil { obj.CPUCFSQuota = utilpointer.BoolPtr(true) } - if obj.CPUCFSQuotaPeriod == nil && obj.FeatureGates[string(features.CPUCFSQuotaPeriod)] { + if obj.CPUCFSQuotaPeriod == nil { obj.CPUCFSQuotaPeriod = &metav1.Duration{Duration: 100 * time.Millisecond} } if obj.MaxOpenFiles == 0 { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/register.go index b458bb4d97fe..1b981b5791db 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/register.go @@ -21,15 +21,18 @@ import ( kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" ) -// GroupName is the group name use in this package +// GroupName is the group name used in this package const GroupName = "kubelet.config.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} var ( + // localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package, + // defaulting and conversion init funcs are registered as well. localSchemeBuilder = &kubeletconfigv1beta1.SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = localSchemeBuilder.AddToScheme ) func init() { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go index 9e67bf9e9a99..b264f2423b25 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1/zz_generated.conversion.go @@ -251,6 +251,7 @@ func autoConvert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration(in out.ClusterDNS = *(*[]string)(unsafe.Pointer(&in.ClusterDNS)) out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency + out.NodeStatusReportFrequency = in.NodeStatusReportFrequency out.NodeLeaseDurationSeconds = in.NodeLeaseDurationSeconds out.ImageMinimumGCAge = in.ImageMinimumGCAge if err := v1.Convert_Pointer_int32_To_int32(&in.ImageGCHighThresholdPercent, &out.ImageGCHighThresholdPercent, s); err != nil { @@ -380,6 +381,7 @@ func autoConvert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration(in out.ClusterDNS = *(*[]string)(unsafe.Pointer(&in.ClusterDNS)) out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency + out.NodeStatusReportFrequency = in.NodeStatusReportFrequency out.NodeLeaseDurationSeconds = in.NodeLeaseDurationSeconds out.ImageMinimumGCAge = in.ImageMinimumGCAge if err := v1.Convert_int32_To_Pointer_int32(&in.ImageGCHighThresholdPercent, &out.ImageGCHighThresholdPercent, s); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go index 266a862943fc..1644f968c56b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go @@ -112,6 +112,7 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { } out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency + out.NodeStatusReportFrequency = in.NodeStatusReportFrequency out.ImageMinimumGCAge = in.ImageMinimumGCAge out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2/api.pb.go index ca0cb6a918d6..d65bf34d2e17 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2/api.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: api.proto -// DO NOT EDIT! /* Package v1alpha2 is a generated protocol buffer package. @@ -9046,24 +9045,6 @@ func (m *ReopenContainerLogResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Api(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Api(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintApi(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -13542,51 +13523,14 @@ func (m *LinuxPodSandboxConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Sysctls == nil { m.Sysctls = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -13596,41 +13540,80 @@ func (m *LinuxPodSandboxConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Sysctls[mapkey] = mapvalue - } else { - var mapvalue string - m.Sysctls[mapkey] = mapvalue } + m.Sysctls[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -14019,51 +14002,14 @@ func (m *PodSandboxConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -14073,41 +14019,80 @@ func (m *PodSandboxConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 7: if wireType != 2 { @@ -14135,51 +14120,14 @@ func (m *PodSandboxConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -14189,41 +14137,80 @@ func (m *PodSandboxConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 8: if wireType != 2 { @@ -15293,51 +15280,14 @@ func (m *PodSandboxStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if m.Labels == nil { + m.Labels = make(map[string]string) } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Labels == nil { - m.Labels = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -15347,41 +15297,80 @@ func (m *PodSandboxStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 8: if wireType != 2 { @@ -15409,51 +15398,14 @@ func (m *PodSandboxStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -15463,41 +15415,80 @@ func (m *PodSandboxStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -15608,51 +15599,14 @@ func (m *PodSandboxStatusResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Info == nil { m.Info = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -15662,41 +15616,80 @@ func (m *PodSandboxStatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Info[mapkey] = mapvalue - } else { - var mapvalue string - m.Info[mapkey] = mapvalue } + m.Info[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -15905,51 +15898,14 @@ func (m *PodSandboxFilter) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.LabelSelector == nil { m.LabelSelector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -15959,41 +15915,80 @@ func (m *PodSandboxFilter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.LabelSelector[mapkey] = mapvalue - } else { - var mapvalue string - m.LabelSelector[mapkey] = mapvalue } + m.LabelSelector[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -16254,51 +16249,14 @@ func (m *PodSandbox) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -16308,41 +16266,80 @@ func (m *PodSandbox) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 6: if wireType != 2 { @@ -16370,51 +16367,14 @@ func (m *PodSandbox) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -16424,41 +16384,80 @@ func (m *PodSandbox) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -18644,59 +18643,22 @@ func (m *ContainerConfig) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Devices = append(m.Devices, &Device{}) - if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, &Device{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } - var stringLenmapkey uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -18706,26 +18668,26 @@ func (m *ContainerConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthApi } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -18735,41 +18697,80 @@ func (m *ContainerConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 10: if wireType != 2 { @@ -18797,51 +18798,14 @@ func (m *ContainerConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -18851,41 +18815,80 @@ func (m *ContainerConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 11: if wireType != 2 { @@ -19908,51 +19911,14 @@ func (m *ContainerFilter) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.LabelSelector == nil { m.LabelSelector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -19962,41 +19928,80 @@ func (m *ContainerFilter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.LabelSelector[mapkey] = mapvalue - } else { - var mapvalue string - m.LabelSelector[mapkey] = mapvalue } + m.LabelSelector[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -20283,50 +20288,12 @@ func (m *Container) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } m.ImageRef = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= (ContainerState(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } - var msglen int + m.State = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -20336,19 +20303,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.State |= (ContainerState(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) } - var keykey uint64 + m.CreatedAt = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -20358,12 +20322,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift + m.CreatedAt |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapkey uint64 + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -20373,26 +20341,26 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthApi } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -20402,41 +20370,80 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 9: if wireType != 2 { @@ -20464,51 +20471,14 @@ func (m *Container) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -20518,41 +20488,80 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -21087,51 +21096,14 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -21141,41 +21113,80 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 13: if wireType != 2 { @@ -21196,58 +21207,21 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { break } } - if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthApi } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -21257,41 +21231,80 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 14: if wireType != 2 { @@ -21462,51 +21475,14 @@ func (m *ContainerStatusResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Info == nil { m.Info = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -21516,41 +21492,80 @@ func (m *ContainerStatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Info[mapkey] = mapvalue - } else { - var mapvalue string - m.Info[mapkey] = mapvalue } + m.Info[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -23374,51 +23389,14 @@ func (m *ImageStatusResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Info == nil { m.Info = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -23428,41 +23406,80 @@ func (m *ImageStatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Info[mapkey] = mapvalue - } else { - var mapvalue string - m.Info[mapkey] = mapvalue } + m.Info[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -24761,95 +24778,97 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Info == nil { m.Info = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } - if iNdEx >= l { + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + if skippy < 0 { + return ErrInvalidLengthApi } - if iNdEx >= l { + if (iNdEx + skippy) > postIndex { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi + iNdEx += skippy } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Info[mapkey] = mapvalue - } else { - var mapvalue string - m.Info[mapkey] = mapvalue } + m.Info[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -25677,51 +25696,14 @@ func (m *ContainerStatsFilter) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.LabelSelector == nil { m.LabelSelector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -25731,41 +25713,80 @@ func (m *ContainerStatsFilter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.LabelSelector[mapkey] = mapvalue - } else { - var mapvalue string - m.LabelSelector[mapkey] = mapvalue } + m.LabelSelector[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -25986,51 +26007,14 @@ func (m *ContainerAttributes) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -26040,41 +26024,80 @@ func (m *ContainerAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -26102,51 +26125,14 @@ func (m *ContainerAttributes) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -26156,41 +26142,80 @@ func (m *ContainerAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1/api.pb.go index 2aeb99f3bab6..839a37b0d568 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1/api.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: api.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -1137,24 +1136,6 @@ func (m *DeviceSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Api(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Api(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintApi(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2434,51 +2415,14 @@ func (m *ContainerAllocateResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Envs == nil { m.Envs = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -2488,41 +2432,80 @@ func (m *ContainerAllocateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Envs[mapkey] = mapvalue - } else { - var mapvalue string - m.Envs[mapkey] = mapvalue } + m.Envs[mapkey] = mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -2612,51 +2595,14 @@ func (m *ContainerAllocateResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi @@ -2666,41 +2612,80 @@ func (m *ContainerAllocateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthApi - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/BUILD new file mode 100644 index 000000000000..644c05e1be24 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/BUILD @@ -0,0 +1,34 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "api.pb.go", + "constants.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1", + deps = [ + "//vendor/github.com/gogo/protobuf/gogoproto:go_default_library", + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/api.pb.go new file mode 100644 index 000000000000..d478726ab25e --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/api.pb.go @@ -0,0 +1,1008 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: api.proto + +/* + Package pluginregistration is a generated protocol buffer package. + + It is generated from these files: + api.proto + + It has these top-level messages: + PluginInfo + RegistrationStatus + RegistrationStatusResponse + InfoRequest +*/ +package pluginregistration + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginInfo is the message sent from a plugin to the Kubelet pluginwatcher for plugin registration +type PluginInfo struct { + // Type of the Plugin. CSIPlugin or DevicePlugin + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Plugin name that uniquely identifies the plugin for the given plugin type. + // For DevicePlugin, this is the resource name that the plugin manages and + // should follow the extended resource name convention. + // For CSI, this is the CSI driver registrar name. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Optional endpoint location. If found set by Kubelet component, + // Kubelet component will use this endpoint for specific requests. + // This allows the plugin to register using one endpoint and possibly use + // a different socket for control operations. CSI uses this model to delegate + // its registration external from the plugin. + Endpoint string `protobuf:"bytes,3,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + // Plugin service API versions the plugin supports. + // For DevicePlugin, this maps to the deviceplugin API versions the + // plugin supports at the given socket. + // The Kubelet component communicating with the plugin should be able + // to choose any preferred version from this list, or returns an error + // if none of the listed versions is supported. + SupportedVersions []string `protobuf:"bytes,4,rep,name=supported_versions,json=supportedVersions" json:"supported_versions,omitempty"` +} + +func (m *PluginInfo) Reset() { *m = PluginInfo{} } +func (*PluginInfo) ProtoMessage() {} +func (*PluginInfo) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{0} } + +func (m *PluginInfo) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PluginInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginInfo) GetEndpoint() string { + if m != nil { + return m.Endpoint + } + return "" +} + +func (m *PluginInfo) GetSupportedVersions() []string { + if m != nil { + return m.SupportedVersions + } + return nil +} + +// RegistrationStatus is the message sent from Kubelet pluginwatcher to the plugin for notification on registration status +type RegistrationStatus struct { + // True if plugin gets registered successfully at Kubelet + PluginRegistered bool `protobuf:"varint,1,opt,name=plugin_registered,json=pluginRegistered,proto3" json:"plugin_registered,omitempty"` + // Error message in case plugin fails to register, empty string otherwise + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *RegistrationStatus) Reset() { *m = RegistrationStatus{} } +func (*RegistrationStatus) ProtoMessage() {} +func (*RegistrationStatus) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{1} } + +func (m *RegistrationStatus) GetPluginRegistered() bool { + if m != nil { + return m.PluginRegistered + } + return false +} + +func (m *RegistrationStatus) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +// RegistrationStatusResponse is sent by plugin to kubelet in response to RegistrationStatus RPC +type RegistrationStatusResponse struct { +} + +func (m *RegistrationStatusResponse) Reset() { *m = RegistrationStatusResponse{} } +func (*RegistrationStatusResponse) ProtoMessage() {} +func (*RegistrationStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{2} } + +// InfoRequest is the empty request message from Kubelet +type InfoRequest struct { +} + +func (m *InfoRequest) Reset() { *m = InfoRequest{} } +func (*InfoRequest) ProtoMessage() {} +func (*InfoRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{3} } + +func init() { + proto.RegisterType((*PluginInfo)(nil), "pluginregistration.PluginInfo") + proto.RegisterType((*RegistrationStatus)(nil), "pluginregistration.RegistrationStatus") + proto.RegisterType((*RegistrationStatusResponse)(nil), "pluginregistration.RegistrationStatusResponse") + proto.RegisterType((*InfoRequest)(nil), "pluginregistration.InfoRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Registration service + +type RegistrationClient interface { + GetInfo(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*PluginInfo, error) + NotifyRegistrationStatus(ctx context.Context, in *RegistrationStatus, opts ...grpc.CallOption) (*RegistrationStatusResponse, error) +} + +type registrationClient struct { + cc *grpc.ClientConn +} + +func NewRegistrationClient(cc *grpc.ClientConn) RegistrationClient { + return ®istrationClient{cc} +} + +func (c *registrationClient) GetInfo(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*PluginInfo, error) { + out := new(PluginInfo) + err := grpc.Invoke(ctx, "/pluginregistration.Registration/GetInfo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *registrationClient) NotifyRegistrationStatus(ctx context.Context, in *RegistrationStatus, opts ...grpc.CallOption) (*RegistrationStatusResponse, error) { + out := new(RegistrationStatusResponse) + err := grpc.Invoke(ctx, "/pluginregistration.Registration/NotifyRegistrationStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Registration service + +type RegistrationServer interface { + GetInfo(context.Context, *InfoRequest) (*PluginInfo, error) + NotifyRegistrationStatus(context.Context, *RegistrationStatus) (*RegistrationStatusResponse, error) +} + +func RegisterRegistrationServer(s *grpc.Server, srv RegistrationServer) { + s.RegisterService(&_Registration_serviceDesc, srv) +} + +func _Registration_GetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RegistrationServer).GetInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pluginregistration.Registration/GetInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistrationServer).GetInfo(ctx, req.(*InfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Registration_NotifyRegistrationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegistrationStatus) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RegistrationServer).NotifyRegistrationStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pluginregistration.Registration/NotifyRegistrationStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistrationServer).NotifyRegistrationStatus(ctx, req.(*RegistrationStatus)) + } + return interceptor(ctx, in, info, handler) +} + +var _Registration_serviceDesc = grpc.ServiceDesc{ + ServiceName: "pluginregistration.Registration", + HandlerType: (*RegistrationServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetInfo", + Handler: _Registration_GetInfo_Handler, + }, + { + MethodName: "NotifyRegistrationStatus", + Handler: _Registration_NotifyRegistrationStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "api.proto", +} + +func (m *PluginInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Name) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Endpoint) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Endpoint))) + i += copy(dAtA[i:], m.Endpoint) + } + if len(m.SupportedVersions) > 0 { + for _, s := range m.SupportedVersions { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *RegistrationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegistrationStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PluginRegistered { + dAtA[i] = 0x8 + i++ + if m.PluginRegistered { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Error) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + return i, nil +} + +func (m *RegistrationStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegistrationStatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *InfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginInfo) Size() (n int) { + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Endpoint) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.SupportedVersions) > 0 { + for _, s := range m.SupportedVersions { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *RegistrationStatus) Size() (n int) { + var l int + _ = l + if m.PluginRegistered { + n += 2 + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *RegistrationStatusResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *InfoRequest) Size() (n int) { + var l int + _ = l + return n +} + +func sovApi(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PluginInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PluginInfo{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, + `SupportedVersions:` + fmt.Sprintf("%v", this.SupportedVersions) + `,`, + `}`, + }, "") + return s +} +func (this *RegistrationStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RegistrationStatus{`, + `PluginRegistered:` + fmt.Sprintf("%v", this.PluginRegistered) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *RegistrationStatusResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RegistrationStatusResponse{`, + `}`, + }, "") + return s +} +func (this *InfoRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InfoRequest{`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PluginInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SupportedVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SupportedVersions = append(m.SupportedVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RegistrationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RegistrationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RegistrationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PluginRegistered", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PluginRegistered = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RegistrationStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RegistrationStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RegistrationStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthApi + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipApi(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("api.proto", fileDescriptorApi) } + +var fileDescriptorApi = []byte{ + // 337 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0x41, 0x4b, 0x33, 0x31, + 0x14, 0xdc, 0x7c, 0xed, 0xa7, 0xed, 0x53, 0xc1, 0x06, 0x0f, 0xcb, 0x52, 0x62, 0xd9, 0x83, 0x14, + 0xa4, 0x5b, 0xd0, 0x7f, 0xe0, 0x45, 0x04, 0x11, 0x89, 0xa0, 0xc7, 0xb2, 0xb5, 0xaf, 0x6b, 0xc0, + 0x26, 0x31, 0xc9, 0x0a, 0x3d, 0xe9, 0x4f, 0xf0, 0x67, 0xf5, 0x28, 0x9e, 0x3c, 0xda, 0xf5, 0x8f, + 0x48, 0xb3, 0x65, 0x2d, 0xb4, 0x07, 0x6f, 0x6f, 0xe6, 0x4d, 0x1e, 0x33, 0x43, 0xa0, 0x99, 0x6a, + 0x91, 0x68, 0xa3, 0x9c, 0xa2, 0x54, 0x3f, 0xe6, 0x99, 0x90, 0x06, 0x33, 0x61, 0x9d, 0x49, 0x9d, + 0x50, 0x32, 0xea, 0x65, 0xc2, 0x3d, 0xe4, 0xc3, 0xe4, 0x5e, 0x4d, 0xfa, 0x99, 0xca, 0x54, 0xdf, + 0x4b, 0x87, 0xf9, 0xd8, 0x23, 0x0f, 0xfc, 0x54, 0x9e, 0x88, 0x5f, 0x00, 0xae, 0xfd, 0x91, 0x0b, + 0x39, 0x56, 0x94, 0x42, 0xdd, 0x4d, 0x35, 0x86, 0xa4, 0x43, 0xba, 0x4d, 0xee, 0xe7, 0x05, 0x27, + 0xd3, 0x09, 0x86, 0xff, 0x4a, 0x6e, 0x31, 0xd3, 0x08, 0x1a, 0x28, 0x47, 0x5a, 0x09, 0xe9, 0xc2, + 0x9a, 0xe7, 0x2b, 0x4c, 0x7b, 0x40, 0x6d, 0xae, 0xb5, 0x32, 0x0e, 0x47, 0x83, 0x67, 0x34, 0x56, + 0x28, 0x69, 0xc3, 0x7a, 0xa7, 0xd6, 0x6d, 0xf2, 0x56, 0xb5, 0xb9, 0x5d, 0x2e, 0xe2, 0x3b, 0xa0, + 0x7c, 0xc5, 0xff, 0x8d, 0x4b, 0x5d, 0x6e, 0xe9, 0x31, 0xb4, 0xca, 0x6c, 0x83, 0x32, 0x1c, 0x1a, + 0x1c, 0x79, 0x57, 0x0d, 0xbe, 0x5f, 0x2e, 0x78, 0xc5, 0xd3, 0x03, 0xf8, 0x8f, 0xc6, 0x28, 0xb3, + 0xb4, 0x58, 0x82, 0xb8, 0x0d, 0xd1, 0xfa, 0x61, 0x8e, 0x56, 0x2b, 0x69, 0x31, 0xde, 0x83, 0x9d, + 0x45, 0x62, 0x8e, 0x4f, 0x39, 0x5a, 0x77, 0xf2, 0x41, 0x60, 0x77, 0x55, 0x4d, 0x2f, 0x61, 0xfb, + 0x1c, 0x9d, 0x2f, 0xe5, 0x30, 0x59, 0xaf, 0x39, 0x59, 0x79, 0x1c, 0xb1, 0x4d, 0x82, 0xdf, 0x56, + 0xe3, 0x80, 0x3a, 0x08, 0xaf, 0x94, 0x13, 0xe3, 0xe9, 0x86, 0xa8, 0x47, 0x9b, 0x5e, 0xaf, 0xeb, + 0xa2, 0xe4, 0x6f, 0xba, 0x2a, 0x61, 0x70, 0xd6, 0x9e, 0xcd, 0x19, 0xf9, 0x9c, 0xb3, 0xe0, 0xb5, + 0x60, 0x64, 0x56, 0x30, 0xf2, 0x5e, 0x30, 0xf2, 0x55, 0x30, 0xf2, 0xf6, 0xcd, 0x82, 0xe1, 0x96, + 0xff, 0x00, 0xa7, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe4, 0xc0, 0xe3, 0x42, 0x50, 0x02, 0x00, + 0x00, +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1/api.proto b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/api.proto similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1/api.proto rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/api.proto diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/constants.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/constants.go new file mode 100644 index 000000000000..7708f758fa2b --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1/constants.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pluginregistration + +const ( + // CSIPlugin identifier for registered CSI plugins + CSIPlugin = "CSIPlugin" + // DevicePlugin identifier for registered device plugins + DevicePlugin = "DevicePlugin" +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1/BUILD deleted file mode 100644 index 8939e14df198..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "api.pb.go", - "constants.go", - ], - importpath = "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1", - deps = [ - "//vendor/github.com/gogo/protobuf/gogoproto:go_default_library", - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1/api.pb.go deleted file mode 100644 index 96e1d571dba0..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1/api.pb.go +++ /dev/null @@ -1,1027 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: api.proto -// DO NOT EDIT! - -/* - Package pluginregistration is a generated protocol buffer package. - - It is generated from these files: - api.proto - - It has these top-level messages: - PluginInfo - RegistrationStatus - RegistrationStatusResponse - InfoRequest -*/ -package pluginregistration - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -// PluginInfo is the message sent from a plugin to the Kubelet pluginwatcher for plugin registration -type PluginInfo struct { - // Type of the Plugin. CSIPlugin or DevicePlugin - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Plugin name that uniquely identifies the plugin for the given plugin type. - // For DevicePlugin, this is the resource name that the plugin manages and - // should follow the extended resource name convention. - // For CSI, this is the CSI driver registrar name. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // Optional endpoint location. If found set by Kubelet component, - // Kubelet component will use this endpoint for specific requests. - // This allows the plugin to register using one endpoint and possibly use - // a different socket for control operations. CSI uses this model to delegate - // its registration external from the plugin. - Endpoint string `protobuf:"bytes,3,opt,name=endpoint,proto3" json:"endpoint,omitempty"` - // Plugin service API versions the plugin supports. - // For DevicePlugin, this maps to the deviceplugin API versions the - // plugin supports at the given socket. - // The Kubelet component communicating with the plugin should be able - // to choose any preferred version from this list, or returns an error - // if none of the listed versions is supported. - SupportedVersions []string `protobuf:"bytes,4,rep,name=supported_versions,json=supportedVersions" json:"supported_versions,omitempty"` -} - -func (m *PluginInfo) Reset() { *m = PluginInfo{} } -func (*PluginInfo) ProtoMessage() {} -func (*PluginInfo) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{0} } - -func (m *PluginInfo) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *PluginInfo) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginInfo) GetEndpoint() string { - if m != nil { - return m.Endpoint - } - return "" -} - -func (m *PluginInfo) GetSupportedVersions() []string { - if m != nil { - return m.SupportedVersions - } - return nil -} - -// RegistrationStatus is the message sent from Kubelet pluginwatcher to the plugin for notification on registration status -type RegistrationStatus struct { - // True if plugin gets registered successfully at Kubelet - PluginRegistered bool `protobuf:"varint,1,opt,name=plugin_registered,json=pluginRegistered,proto3" json:"plugin_registered,omitempty"` - // Error message in case plugin fails to register, empty string otherwise - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` -} - -func (m *RegistrationStatus) Reset() { *m = RegistrationStatus{} } -func (*RegistrationStatus) ProtoMessage() {} -func (*RegistrationStatus) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{1} } - -func (m *RegistrationStatus) GetPluginRegistered() bool { - if m != nil { - return m.PluginRegistered - } - return false -} - -func (m *RegistrationStatus) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -// RegistrationStatusResponse is sent by plugin to kubelet in response to RegistrationStatus RPC -type RegistrationStatusResponse struct { -} - -func (m *RegistrationStatusResponse) Reset() { *m = RegistrationStatusResponse{} } -func (*RegistrationStatusResponse) ProtoMessage() {} -func (*RegistrationStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{2} } - -// InfoRequest is the empty request message from Kubelet -type InfoRequest struct { -} - -func (m *InfoRequest) Reset() { *m = InfoRequest{} } -func (*InfoRequest) ProtoMessage() {} -func (*InfoRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{3} } - -func init() { - proto.RegisterType((*PluginInfo)(nil), "pluginregistration.PluginInfo") - proto.RegisterType((*RegistrationStatus)(nil), "pluginregistration.RegistrationStatus") - proto.RegisterType((*RegistrationStatusResponse)(nil), "pluginregistration.RegistrationStatusResponse") - proto.RegisterType((*InfoRequest)(nil), "pluginregistration.InfoRequest") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Registration service - -type RegistrationClient interface { - GetInfo(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*PluginInfo, error) - NotifyRegistrationStatus(ctx context.Context, in *RegistrationStatus, opts ...grpc.CallOption) (*RegistrationStatusResponse, error) -} - -type registrationClient struct { - cc *grpc.ClientConn -} - -func NewRegistrationClient(cc *grpc.ClientConn) RegistrationClient { - return ®istrationClient{cc} -} - -func (c *registrationClient) GetInfo(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*PluginInfo, error) { - out := new(PluginInfo) - err := grpc.Invoke(ctx, "/pluginregistration.Registration/GetInfo", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *registrationClient) NotifyRegistrationStatus(ctx context.Context, in *RegistrationStatus, opts ...grpc.CallOption) (*RegistrationStatusResponse, error) { - out := new(RegistrationStatusResponse) - err := grpc.Invoke(ctx, "/pluginregistration.Registration/NotifyRegistrationStatus", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Registration service - -type RegistrationServer interface { - GetInfo(context.Context, *InfoRequest) (*PluginInfo, error) - NotifyRegistrationStatus(context.Context, *RegistrationStatus) (*RegistrationStatusResponse, error) -} - -func RegisterRegistrationServer(s *grpc.Server, srv RegistrationServer) { - s.RegisterService(&_Registration_serviceDesc, srv) -} - -func _Registration_GetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(InfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RegistrationServer).GetInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/pluginregistration.Registration/GetInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RegistrationServer).GetInfo(ctx, req.(*InfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Registration_NotifyRegistrationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RegistrationStatus) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RegistrationServer).NotifyRegistrationStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/pluginregistration.Registration/NotifyRegistrationStatus", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RegistrationServer).NotifyRegistrationStatus(ctx, req.(*RegistrationStatus)) - } - return interceptor(ctx, in, info, handler) -} - -var _Registration_serviceDesc = grpc.ServiceDesc{ - ServiceName: "pluginregistration.Registration", - HandlerType: (*RegistrationServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetInfo", - Handler: _Registration_GetInfo_Handler, - }, - { - MethodName: "NotifyRegistrationStatus", - Handler: _Registration_NotifyRegistrationStatus_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "api.proto", -} - -func (m *PluginInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginInfo) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Type) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintApi(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - } - if len(m.Name) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Endpoint) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintApi(dAtA, i, uint64(len(m.Endpoint))) - i += copy(dAtA[i:], m.Endpoint) - } - if len(m.SupportedVersions) > 0 { - for _, s := range m.SupportedVersions { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *RegistrationStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RegistrationStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PluginRegistered { - dAtA[i] = 0x8 - i++ - if m.PluginRegistered { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Error) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintApi(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - } - return i, nil -} - -func (m *RegistrationStatusResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RegistrationStatusResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *InfoRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func encodeFixed64Api(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Api(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintApi(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PluginInfo) Size() (n int) { - var l int - _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.Endpoint) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - if len(m.SupportedVersions) > 0 { - for _, s := range m.SupportedVersions { - l = len(s) - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *RegistrationStatus) Size() (n int) { - var l int - _ = l - if m.PluginRegistered { - n += 2 - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *RegistrationStatusResponse) Size() (n int) { - var l int - _ = l - return n -} - -func (m *InfoRequest) Size() (n int) { - var l int - _ = l - return n -} - -func sovApi(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozApi(x uint64) (n int) { - return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PluginInfo) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PluginInfo{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, - `SupportedVersions:` + fmt.Sprintf("%v", this.SupportedVersions) + `,`, - `}`, - }, "") - return s -} -func (this *RegistrationStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RegistrationStatus{`, - `PluginRegistered:` + fmt.Sprintf("%v", this.PluginRegistered) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *RegistrationStatusResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RegistrationStatusResponse{`, - `}`, - }, "") - return s -} -func (this *InfoRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&InfoRequest{`, - `}`, - }, "") - return s -} -func valueToStringApi(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PluginInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Endpoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SupportedVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SupportedVersions = append(m.SupportedVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RegistrationStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RegistrationStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RegistrationStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PluginRegistered", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PluginRegistered = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RegistrationStatusResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RegistrationStatusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RegistrationStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InfoRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipApi(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthApi - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipApi(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("api.proto", fileDescriptorApi) } - -var fileDescriptorApi = []byte{ - // 337 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0x41, 0x4b, 0x33, 0x31, - 0x14, 0xdc, 0x7c, 0xed, 0xa7, 0xed, 0x53, 0xc1, 0x06, 0x0f, 0xcb, 0x52, 0x62, 0xd9, 0x83, 0x14, - 0xa4, 0x5b, 0xd0, 0x7f, 0xe0, 0x45, 0x04, 0x11, 0x89, 0xa0, 0xc7, 0xb2, 0xb5, 0xaf, 0x6b, 0xc0, - 0x26, 0x31, 0xc9, 0x0a, 0x3d, 0xe9, 0x4f, 0xf0, 0x67, 0xf5, 0x28, 0x9e, 0x3c, 0xda, 0xf5, 0x8f, - 0x48, 0xb3, 0x65, 0x2d, 0xb4, 0x07, 0x6f, 0x6f, 0xe6, 0x4d, 0x1e, 0x33, 0x43, 0xa0, 0x99, 0x6a, - 0x91, 0x68, 0xa3, 0x9c, 0xa2, 0x54, 0x3f, 0xe6, 0x99, 0x90, 0x06, 0x33, 0x61, 0x9d, 0x49, 0x9d, - 0x50, 0x32, 0xea, 0x65, 0xc2, 0x3d, 0xe4, 0xc3, 0xe4, 0x5e, 0x4d, 0xfa, 0x99, 0xca, 0x54, 0xdf, - 0x4b, 0x87, 0xf9, 0xd8, 0x23, 0x0f, 0xfc, 0x54, 0x9e, 0x88, 0x5f, 0x00, 0xae, 0xfd, 0x91, 0x0b, - 0x39, 0x56, 0x94, 0x42, 0xdd, 0x4d, 0x35, 0x86, 0xa4, 0x43, 0xba, 0x4d, 0xee, 0xe7, 0x05, 0x27, - 0xd3, 0x09, 0x86, 0xff, 0x4a, 0x6e, 0x31, 0xd3, 0x08, 0x1a, 0x28, 0x47, 0x5a, 0x09, 0xe9, 0xc2, - 0x9a, 0xe7, 0x2b, 0x4c, 0x7b, 0x40, 0x6d, 0xae, 0xb5, 0x32, 0x0e, 0x47, 0x83, 0x67, 0x34, 0x56, - 0x28, 0x69, 0xc3, 0x7a, 0xa7, 0xd6, 0x6d, 0xf2, 0x56, 0xb5, 0xb9, 0x5d, 0x2e, 0xe2, 0x3b, 0xa0, - 0x7c, 0xc5, 0xff, 0x8d, 0x4b, 0x5d, 0x6e, 0xe9, 0x31, 0xb4, 0xca, 0x6c, 0x83, 0x32, 0x1c, 0x1a, - 0x1c, 0x79, 0x57, 0x0d, 0xbe, 0x5f, 0x2e, 0x78, 0xc5, 0xd3, 0x03, 0xf8, 0x8f, 0xc6, 0x28, 0xb3, - 0xb4, 0x58, 0x82, 0xb8, 0x0d, 0xd1, 0xfa, 0x61, 0x8e, 0x56, 0x2b, 0x69, 0x31, 0xde, 0x83, 0x9d, - 0x45, 0x62, 0x8e, 0x4f, 0x39, 0x5a, 0x77, 0xf2, 0x41, 0x60, 0x77, 0x55, 0x4d, 0x2f, 0x61, 0xfb, - 0x1c, 0x9d, 0x2f, 0xe5, 0x30, 0x59, 0xaf, 0x39, 0x59, 0x79, 0x1c, 0xb1, 0x4d, 0x82, 0xdf, 0x56, - 0xe3, 0x80, 0x3a, 0x08, 0xaf, 0x94, 0x13, 0xe3, 0xe9, 0x86, 0xa8, 0x47, 0x9b, 0x5e, 0xaf, 0xeb, - 0xa2, 0xe4, 0x6f, 0xba, 0x2a, 0x61, 0x70, 0xd6, 0x9e, 0xcd, 0x19, 0xf9, 0x9c, 0xb3, 0xe0, 0xb5, - 0x60, 0x64, 0x56, 0x30, 0xf2, 0x5e, 0x30, 0xf2, 0x55, 0x30, 0xf2, 0xf6, 0xcd, 0x82, 0xe1, 0x96, - 0xff, 0x00, 0xa7, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe4, 0xc0, 0xe3, 0x42, 0x50, 0x02, 0x00, - 0x00, -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1/constants.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1/constants.go deleted file mode 100644 index cfc1b7c6d7c9..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1/constants.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pluginregistration - -const ( - CSIPlugin = "CSIPlugin" - DevicePlugin = "DevicePlugin" -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/BUILD new file mode 100644 index 000000000000..8bca4b897022 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/BUILD @@ -0,0 +1,48 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "client.go", + "constants.go", + "server.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/podresources", + visibility = ["//visibility:public"], + deps = [ + "//pkg/kubelet/apis/podresources/v1alpha1:go_default_library", + "//pkg/kubelet/util:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["server_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/kubelet/apis/podresources/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/github.com/stretchr/testify/mock:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/kubelet/apis/podresources/v1alpha1:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/client.go new file mode 100644 index 000000000000..cb3ca222ed3e --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/client.go @@ -0,0 +1,44 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podresources + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc" + + podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" + "k8s.io/kubernetes/pkg/kubelet/util" +) + +// GetClient returns a client for the PodResourcesLister grpc service +func GetClient(socket string, connectionTimeout time.Duration, maxMsgSize int) (podresourcesapi.PodResourcesListerClient, *grpc.ClientConn, error) { + addr, dialer, err := util.GetAddressAndDialer(socket) + if err != nil { + return nil, nil, err + } + ctx, cancel := context.WithTimeout(context.Background(), connectionTimeout) + defer cancel() + + conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithDialer(dialer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) + if err != nil { + return nil, nil, fmt.Errorf("Error dialing socket %s: %v", socket, err) + } + return podresourcesapi.NewPodResourcesListerClient(conn), conn, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/constants.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/constants.go new file mode 100644 index 000000000000..6cc4c6a261a3 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/constants.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podresources + +const ( + // Socket is the name of the podresources server socket + Socket = "kubelet" +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server.go new file mode 100644 index 000000000000..f39e2b26ce0c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server.go @@ -0,0 +1,75 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podresources + +import ( + "context" + + "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" +) + +// DevicesProvider knows how to provide the devices used by the given container +type DevicesProvider interface { + GetDevices(podUID, containerName string) []*v1alpha1.ContainerDevices +} + +// PodsProvider knows how to provide the pods admitted by the node +type PodsProvider interface { + GetPods() []*v1.Pod +} + +// podResourcesServer implements PodResourcesListerServer +type podResourcesServer struct { + podsProvider PodsProvider + devicesProvider DevicesProvider +} + +// NewPodResourcesServer returns a PodResourcesListerServer which lists pods provided by the PodsProvider +// with device information provided by the DevicesProvider +func NewPodResourcesServer(podsProvider PodsProvider, devicesProvider DevicesProvider) v1alpha1.PodResourcesListerServer { + return &podResourcesServer{ + podsProvider: podsProvider, + devicesProvider: devicesProvider, + } +} + +// List returns information about the resources assigned to pods on the node +func (p *podResourcesServer) List(ctx context.Context, req *v1alpha1.ListPodResourcesRequest) (*v1alpha1.ListPodResourcesResponse, error) { + pods := p.podsProvider.GetPods() + podResources := make([]*v1alpha1.PodResources, len(pods)) + + for i, pod := range pods { + pRes := v1alpha1.PodResources{ + Name: pod.Name, + Namespace: pod.Namespace, + Containers: make([]*v1alpha1.ContainerResources, len(pod.Spec.Containers)), + } + + for j, container := range pod.Spec.Containers { + pRes.Containers[j] = &v1alpha1.ContainerResources{ + Name: container.Name, + Devices: p.devicesProvider.GetDevices(string(pod.UID), container.Name), + } + } + podResources[i] = &pRes + } + + return &v1alpha1.ListPodResourcesResponse{ + PodResources: podResources, + }, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1/BUILD new file mode 100644 index 000000000000..a56deb71467c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1/BUILD @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["api.pb.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/gogo/protobuf/gogoproto:go_default_library", + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1/api.pb.go new file mode 100644 index 000000000000..df4d4300f21a --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1/api.pb.go @@ -0,0 +1,1182 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: api.proto + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + api.proto + + It has these top-level messages: + ListPodResourcesRequest + ListPodResourcesResponse + PodResources + ContainerResources + ContainerDevices +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// ListPodResourcesRequest is the request made to the PodResourcesLister service +type ListPodResourcesRequest struct { +} + +func (m *ListPodResourcesRequest) Reset() { *m = ListPodResourcesRequest{} } +func (*ListPodResourcesRequest) ProtoMessage() {} +func (*ListPodResourcesRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{0} } + +// ListPodResourcesResponse is the response returned by List function +type ListPodResourcesResponse struct { + PodResources []*PodResources `protobuf:"bytes,1,rep,name=pod_resources,json=podResources" json:"pod_resources,omitempty"` +} + +func (m *ListPodResourcesResponse) Reset() { *m = ListPodResourcesResponse{} } +func (*ListPodResourcesResponse) ProtoMessage() {} +func (*ListPodResourcesResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{1} } + +func (m *ListPodResourcesResponse) GetPodResources() []*PodResources { + if m != nil { + return m.PodResources + } + return nil +} + +// PodResources contains information about the node resources assigned to a pod +type PodResources struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Containers []*ContainerResources `protobuf:"bytes,3,rep,name=containers" json:"containers,omitempty"` +} + +func (m *PodResources) Reset() { *m = PodResources{} } +func (*PodResources) ProtoMessage() {} +func (*PodResources) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{2} } + +func (m *PodResources) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PodResources) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *PodResources) GetContainers() []*ContainerResources { + if m != nil { + return m.Containers + } + return nil +} + +// ContainerResources contains information about the resources assigned to a container +type ContainerResources struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Devices []*ContainerDevices `protobuf:"bytes,2,rep,name=devices" json:"devices,omitempty"` +} + +func (m *ContainerResources) Reset() { *m = ContainerResources{} } +func (*ContainerResources) ProtoMessage() {} +func (*ContainerResources) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{3} } + +func (m *ContainerResources) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ContainerResources) GetDevices() []*ContainerDevices { + if m != nil { + return m.Devices + } + return nil +} + +// ContainerDevices contains information about the devices assigned to a container +type ContainerDevices struct { + ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + DeviceIds []string `protobuf:"bytes,2,rep,name=device_ids,json=deviceIds" json:"device_ids,omitempty"` +} + +func (m *ContainerDevices) Reset() { *m = ContainerDevices{} } +func (*ContainerDevices) ProtoMessage() {} +func (*ContainerDevices) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{4} } + +func (m *ContainerDevices) GetResourceName() string { + if m != nil { + return m.ResourceName + } + return "" +} + +func (m *ContainerDevices) GetDeviceIds() []string { + if m != nil { + return m.DeviceIds + } + return nil +} + +func init() { + proto.RegisterType((*ListPodResourcesRequest)(nil), "v1alpha1.ListPodResourcesRequest") + proto.RegisterType((*ListPodResourcesResponse)(nil), "v1alpha1.ListPodResourcesResponse") + proto.RegisterType((*PodResources)(nil), "v1alpha1.PodResources") + proto.RegisterType((*ContainerResources)(nil), "v1alpha1.ContainerResources") + proto.RegisterType((*ContainerDevices)(nil), "v1alpha1.ContainerDevices") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for PodResourcesLister service + +type PodResourcesListerClient interface { + List(ctx context.Context, in *ListPodResourcesRequest, opts ...grpc.CallOption) (*ListPodResourcesResponse, error) +} + +type podResourcesListerClient struct { + cc *grpc.ClientConn +} + +func NewPodResourcesListerClient(cc *grpc.ClientConn) PodResourcesListerClient { + return &podResourcesListerClient{cc} +} + +func (c *podResourcesListerClient) List(ctx context.Context, in *ListPodResourcesRequest, opts ...grpc.CallOption) (*ListPodResourcesResponse, error) { + out := new(ListPodResourcesResponse) + err := grpc.Invoke(ctx, "/v1alpha1.PodResourcesLister/List", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for PodResourcesLister service + +type PodResourcesListerServer interface { + List(context.Context, *ListPodResourcesRequest) (*ListPodResourcesResponse, error) +} + +func RegisterPodResourcesListerServer(s *grpc.Server, srv PodResourcesListerServer) { + s.RegisterService(&_PodResourcesLister_serviceDesc, srv) +} + +func _PodResourcesLister_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListPodResourcesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PodResourcesListerServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1alpha1.PodResourcesLister/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PodResourcesListerServer).List(ctx, req.(*ListPodResourcesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _PodResourcesLister_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v1alpha1.PodResourcesLister", + HandlerType: (*PodResourcesListerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "List", + Handler: _PodResourcesLister_List_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "api.proto", +} + +func (m *ListPodResourcesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListPodResourcesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ListPodResourcesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListPodResourcesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.PodResources) > 0 { + for _, msg := range m.PodResources { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodResources) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Namespace) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + } + if len(m.Containers) > 0 { + for _, msg := range m.Containers { + dAtA[i] = 0x1a + i++ + i = encodeVarintApi(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ContainerResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResources) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Devices) > 0 { + for _, msg := range m.Devices { + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ContainerDevices) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerDevices) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ResourceName) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.ResourceName))) + i += copy(dAtA[i:], m.ResourceName) + } + if len(m.DeviceIds) > 0 { + for _, s := range m.DeviceIds { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ListPodResourcesRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ListPodResourcesResponse) Size() (n int) { + var l int + _ = l + if len(m.PodResources) > 0 { + for _, e := range m.PodResources { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *PodResources) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *ContainerResources) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *ContainerDevices) Size() (n int) { + var l int + _ = l + l = len(m.ResourceName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.DeviceIds) > 0 { + for _, s := range m.DeviceIds { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func sovApi(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ListPodResourcesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListPodResourcesRequest{`, + `}`, + }, "") + return s +} +func (this *ListPodResourcesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListPodResourcesResponse{`, + `PodResources:` + strings.Replace(fmt.Sprintf("%v", this.PodResources), "PodResources", "PodResources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodResources{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Containers:` + strings.Replace(fmt.Sprintf("%v", this.Containers), "ContainerResources", "ContainerResources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResources{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Devices:` + strings.Replace(fmt.Sprintf("%v", this.Devices), "ContainerDevices", "ContainerDevices", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerDevices) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerDevices{`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `DeviceIds:` + fmt.Sprintf("%v", this.DeviceIds) + `,`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ListPodResourcesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListPodResourcesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListPodResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListPodResourcesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListPodResourcesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListPodResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodResources = append(m.PodResources, &PodResources{}) + if err := m.PodResources[len(m.PodResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, &ContainerResources{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, &ContainerDevices{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerDevices) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerDevices: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerDevices: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeviceIds = append(m.DeviceIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthApi + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipApi(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("api.proto", fileDescriptorApi) } + +var fileDescriptorApi = []byte{ + // 343 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0xb1, 0x4e, 0xc3, 0x30, + 0x10, 0xad, 0xdb, 0x0a, 0xc8, 0xd1, 0x4a, 0xc8, 0x03, 0x84, 0xaa, 0x58, 0xc5, 0x2c, 0x5d, 0x48, + 0xd5, 0xc2, 0x06, 0x13, 0xb0, 0x20, 0x21, 0x40, 0x19, 0x60, 0xa3, 0x4a, 0x13, 0xd3, 0x46, 0xa2, + 0xb1, 0x89, 0x93, 0x8e, 0x88, 0x4f, 0xe0, 0xb3, 0x3a, 0x32, 0x32, 0xd2, 0xf0, 0x23, 0x28, 0xb6, + 0xac, 0x04, 0x5a, 0x98, 0x7c, 0x77, 0xef, 0x9d, 0xdf, 0xf3, 0x9d, 0xc1, 0xf2, 0x44, 0xe8, 0x88, + 0x98, 0x27, 0x1c, 0x6f, 0xcc, 0xfa, 0xde, 0x93, 0x98, 0x78, 0xfd, 0xd6, 0xe1, 0x38, 0x4c, 0x26, + 0xe9, 0xc8, 0xf1, 0xf9, 0xb4, 0x37, 0xe6, 0x63, 0xde, 0x53, 0x84, 0x51, 0xfa, 0xa8, 0x32, 0x95, + 0xa8, 0x48, 0x37, 0xd2, 0x5d, 0xd8, 0xb9, 0x0a, 0x65, 0x72, 0xcb, 0x03, 0x97, 0x49, 0x9e, 0xc6, + 0x3e, 0x93, 0x2e, 0x7b, 0x4e, 0x99, 0x4c, 0xe8, 0x3d, 0xd8, 0xcb, 0x90, 0x14, 0x3c, 0x92, 0x0c, + 0x9f, 0x40, 0x53, 0xf0, 0x60, 0x18, 0x1b, 0xc0, 0x46, 0x9d, 0x5a, 0x77, 0x73, 0xb0, 0xed, 0x18, + 0x1f, 0xce, 0x8f, 0xb6, 0x86, 0x28, 0x65, 0xf4, 0x05, 0x1a, 0x65, 0x14, 0x63, 0xa8, 0x47, 0xde, + 0x94, 0xd9, 0xa8, 0x83, 0xba, 0x96, 0xab, 0x62, 0xdc, 0x06, 0x2b, 0x3f, 0xa5, 0xf0, 0x7c, 0x66, + 0x57, 0x15, 0x50, 0x14, 0xf0, 0x29, 0x80, 0xcf, 0xa3, 0xc4, 0x0b, 0x23, 0x16, 0x4b, 0xbb, 0xa6, + 0xb4, 0xdb, 0x85, 0xf6, 0xb9, 0xc1, 0x0a, 0x07, 0x25, 0x3e, 0x7d, 0x00, 0xbc, 0xcc, 0x58, 0xe9, + 0xe2, 0x18, 0xd6, 0x03, 0x36, 0x0b, 0xf3, 0x07, 0x56, 0x95, 0x48, 0x6b, 0x85, 0xc8, 0x85, 0x66, + 0xb8, 0x86, 0x4a, 0xef, 0x60, 0xeb, 0x37, 0x88, 0x0f, 0xa0, 0x69, 0x86, 0x35, 0x2c, 0xc9, 0x34, + 0x4c, 0xf1, 0x3a, 0x97, 0xdb, 0x03, 0xd0, 0x77, 0x0c, 0xc3, 0x40, 0x2b, 0x5a, 0xae, 0xa5, 0x2b, + 0x97, 0x81, 0x1c, 0x30, 0xc0, 0xe5, 0xb9, 0xe5, 0xcb, 0x61, 0x31, 0xbe, 0x81, 0x7a, 0x1e, 0xe1, + 0xfd, 0xc2, 0xda, 0x1f, 0x1b, 0x6d, 0xd1, 0xff, 0x28, 0x7a, 0xb3, 0xb4, 0x72, 0xd6, 0x9e, 0x2f, + 0x08, 0xfa, 0x58, 0x90, 0xca, 0x6b, 0x46, 0xd0, 0x3c, 0x23, 0xe8, 0x3d, 0x23, 0xe8, 0x33, 0x23, + 0xe8, 0xed, 0x8b, 0x54, 0x46, 0x6b, 0xea, 0xdf, 0x1c, 0x7d, 0x07, 0x00, 0x00, 0xff, 0xff, 0xc0, + 0xce, 0xf2, 0x80, 0x7d, 0x02, 0x00, 0x00, +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1/api.proto b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1/api.proto new file mode 100644 index 000000000000..eedcd59d7370 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1/api.proto @@ -0,0 +1,48 @@ +// To regenerate api.pb.go run hack/update-generated-pod-resources.sh +syntax = 'proto3'; + +package v1alpha1; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option (gogoproto.goproto_getters_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_unrecognized_all) = false; + + +// PodResourcesLister is a service provided by the kubelet that provides information about the +// node resources consumed by pods and containers on the node +service PodResourcesLister { + rpc List(ListPodResourcesRequest) returns (ListPodResourcesResponse) {} +} + +// ListPodResourcesRequest is the request made to the PodResourcesLister service +message ListPodResourcesRequest {} + +// ListPodResourcesResponse is the response returned by List function +message ListPodResourcesResponse { + repeated PodResources pod_resources = 1; +} + +// PodResources contains information about the node resources assigned to a pod +message PodResources { + string name = 1; + string namespace = 2; + repeated ContainerResources containers = 3; +} + +// ContainerResources contains information about the resources assigned to a container +message ContainerResources { + string name = 1; + repeated ContainerDevices devices = 2; +} + +// ContainerDevices contains information about the devices assigned to a container +message ContainerDevices { + string resource_name = 1; + repeated string device_ids = 2; +} \ No newline at end of file diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go index 5a0db552c82b..869952556dde 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go @@ -16,6 +16,12 @@ limitations under the License. package apis +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + const ( LabelHostname = "kubernetes.io/hostname" LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" @@ -26,8 +32,78 @@ const ( LabelOS = "beta.kubernetes.io/os" LabelArch = "beta.kubernetes.io/arch" + + // GA versions of the legacy beta labels. + // TODO: update kubelet and controllers to set both beta and GA labels, then export these constants + labelZoneFailureDomainGA = "failure-domain.kubernetes.io/zone" + labelZoneRegionGA = "failure-domain.kubernetes.io/region" + labelInstanceTypeGA = "kubernetes.io/instance-type" + labelOSGA = "kubernetes.io/os" + labelArchGA = "kubernetes.io/arch" + + // LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*) + LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io" + // LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*) + LabelNamespaceSuffixNode = "node.kubernetes.io" + + // LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled + LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io" ) // When the --failure-domains scheduler flag is not specified, // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity. var DefaultFailureDomains string = LabelHostname + "," + LabelZoneFailureDomain + "," + LabelZoneRegion + +var kubeletLabels = sets.NewString( + LabelHostname, + LabelZoneFailureDomain, + LabelZoneRegion, + LabelInstanceType, + LabelOS, + LabelArch, + + labelZoneFailureDomainGA, + labelZoneRegionGA, + labelInstanceTypeGA, + labelOSGA, + labelArchGA, +) + +var kubeletLabelNamespaces = sets.NewString( + LabelNamespaceSuffixKubelet, + LabelNamespaceSuffixNode, +) + +// KubeletLabels returns the list of label keys kubelets are allowed to set on their own Node objects +func KubeletLabels() []string { + return kubeletLabels.List() +} + +// KubeletLabelNamespaces returns the list of label key namespaces kubelets are allowed to set on their own Node objects +func KubeletLabelNamespaces() []string { + return kubeletLabelNamespaces.List() +} + +// IsKubeletLabel returns true if the label key is one that kubelets are allowed to set on their own Node object. +// This checks if the key is in the KubeletLabels() list, or has a namespace in the KubeletLabelNamespaces() list. +func IsKubeletLabel(key string) bool { + if kubeletLabels.Has(key) { + return true + } + + namespace := getLabelNamespace(key) + for allowedNamespace := range kubeletLabelNamespaces { + if namespace == allowedNamespace || strings.HasSuffix(namespace, "."+allowedNamespace) { + return true + } + } + + return false +} + +func getLabelNamespace(key string) string { + if parts := strings.SplitN(key, "/", 2); len(parts) == 2 { + return parts[0] + } + return "" +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/BUILD index 7199ed3dfd71..d3370f13cc81 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/BUILD @@ -31,13 +31,13 @@ go_library( "//vendor/github.com/google/cadvisor/info/v2:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/cache/memory:go_default_library", "//vendor/github.com/google/cadvisor/container:go_default_library", "//vendor/github.com/google/cadvisor/fs:go_default_library", "//vendor/github.com/google/cadvisor/manager:go_default_library", "//vendor/github.com/google/cadvisor/metrics:go_default_library", "//vendor/github.com/google/cadvisor/utils/sysfs:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:windows": [ "//pkg/kubelet/winstats:go_default_library", @@ -50,16 +50,19 @@ go_test( name = "go_default_test", srcs = [ "cadvisor_linux_test.go", + "main_test.go", "util_test.go", ], embed = [":go_default_library"], - deps = select({ + deps = [ + "//pkg/features:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", + ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/features:go_default_library", "//pkg/kubelet/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/metrics:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go index 795408b6d416..d15907fe8694 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go @@ -26,7 +26,6 @@ import ( "path" "time" - "github.com/golang/glog" "github.com/google/cadvisor/cache/memory" cadvisormetrics "github.com/google/cadvisor/container" "github.com/google/cadvisor/events" @@ -35,6 +34,7 @@ import ( "github.com/google/cadvisor/manager" "github.com/google/cadvisor/metrics" "github.com/google/cadvisor/utils/sysfs" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -67,7 +67,7 @@ func init() { f.DefValue = defaultValue f.Value.Set(defaultValue) } else { - glog.Errorf("Expected cAdvisor flag %q not found", name) + klog.Errorf("Expected cAdvisor flag %q not found", name) } } } @@ -104,18 +104,23 @@ func containerLabels(c *cadvisorapi.ContainerInfo) map[string]string { func New(imageFsInfoProvider ImageFsInfoProvider, rootPath string, usingLegacyStats bool) (Interface, error) { sysFs := sysfs.NewRealSysFs() - ignoreMetrics := cadvisormetrics.MetricSet{ - cadvisormetrics.NetworkTcpUsageMetrics: struct{}{}, - cadvisormetrics.NetworkUdpUsageMetrics: struct{}{}, - cadvisormetrics.PerCpuUsageMetrics: struct{}{}, - cadvisormetrics.ProcessSchedulerMetrics: struct{}{}, + includedMetrics := cadvisormetrics.MetricSet{ + cadvisormetrics.CpuUsageMetrics: struct{}{}, + cadvisormetrics.MemoryUsageMetrics: struct{}{}, + cadvisormetrics.CpuLoadMetrics: struct{}{}, + cadvisormetrics.DiskIOMetrics: struct{}{}, + cadvisormetrics.NetworkUsageMetrics: struct{}{}, + cadvisormetrics.AcceleratorUsageMetrics: struct{}{}, + cadvisormetrics.AppMetrics: struct{}{}, } - if !usingLegacyStats { - ignoreMetrics[cadvisormetrics.DiskUsageMetrics] = struct{}{} + if usingLegacyStats { + includedMetrics[cadvisormetrics.DiskUsageMetrics] = struct{}{} } + // collect metrics for all cgroups + rawContainerCgroupPathPrefixWhiteList := []string{"/"} // Create and start the cAdvisor container manager. - m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, maxHousekeepingInterval, allowDynamicHousekeeping, ignoreMetrics, http.DefaultClient) + m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, maxHousekeepingInterval, allowDynamicHousekeeping, includedMetrics, http.DefaultClient, rawContainerCgroupPathPrefixWhiteList) if err != nil { return nil, err } @@ -194,7 +199,7 @@ func (cc *cadvisorClient) getFsInfo(label string) (cadvisorapiv2.FsInfo, error) } // TODO(vmarmol): Handle this better when a label has more than one image filesystem. if len(res) > 1 { - glog.Warningf("More than one filesystem labeled %q: %#v. Only using the first one", label, res) + klog.Warningf("More than one filesystem labeled %q: %#v. Only using the first one", label, res) } return res[0], nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/util.go index 16596daa9d9a..9a7e8746436b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/util.go @@ -76,5 +76,5 @@ func EphemeralStorageCapacityFromFsInfo(info cadvisorapi2.FsInfo) v1.ResourceLis // UsingLegacyCadvisorStats returns true if container stats are provided by cadvisor instead of through the CRI func UsingLegacyCadvisorStats(runtime, runtimeEndpoint string) bool { return (runtime == kubetypes.DockerContainerRuntime && goruntime.GOOS == "linux") || - runtimeEndpoint == CrioSocket + runtimeEndpoint == CrioSocket || runtimeEndpoint == "unix://"+CrioSocket } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/BUILD index fe6599840285..5c85202a4b77 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/BUILD @@ -26,8 +26,8 @@ go_library( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/util/certificate:go_default_library", "//staging/src/k8s.io/client-go/util/connrotation:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/OWNERS index dcb32d2b8c87..470b7a1c92d1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/OWNERS @@ -1,7 +1,7 @@ -reviewers: -- mikedanese -- liggitt -- awly approvers: -- mikedanese -- liggitt +- sig-auth-certificates-approvers +reviewers: +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/BUILD index b0f21d503013..257ec672e180 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/BUILD @@ -11,8 +11,13 @@ go_test( srcs = ["bootstrap_test.go"], embed = [":go_default_library"], deps = [ + "//staging/src/k8s.io/api/certificates/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", + "//staging/src/k8s.io/client-go/util/cert:go_default_library", ], ) @@ -21,6 +26,7 @@ go_library( srcs = ["bootstrap.go"], importpath = "k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap", deps = [ + "//staging/src/k8s.io/api/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", @@ -34,7 +40,7 @@ go_library( "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/client-go/util/certificate:go_default_library", "//staging/src/k8s.io/client-go/util/certificate/csr:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go index e8e87dcdc0fa..efe34ef15107 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go @@ -18,20 +18,24 @@ package bootstrap import ( "context" + "crypto/sha512" + "crypto/x509/pkix" + "encoding/base64" "errors" "fmt" "os" "path/filepath" "time" - "github.com/golang/glog" + "k8s.io/klog" + certificates "k8s.io/api/certificates/v1beta1" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes/scheme" - certificates "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" + certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" @@ -54,18 +58,18 @@ func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string, return err } if ok { - glog.V(2).Infof("Kubeconfig %s exists and is valid, skipping bootstrap", kubeconfigPath) + klog.V(2).Infof("Kubeconfig %s exists and is valid, skipping bootstrap", kubeconfigPath) return nil } - glog.V(2).Info("Using bootstrap kubeconfig to generate TLS client cert, key and kubeconfig file") + klog.V(2).Info("Using bootstrap kubeconfig to generate TLS client cert, key and kubeconfig file") bootstrapClientConfig, err := loadRESTClientConfig(bootstrapPath) if err != nil { return fmt.Errorf("unable to load bootstrap kubeconfig: %v", err) } - bootstrapClient, err := certificates.NewForConfig(bootstrapClientConfig) + bootstrapClient, err := certificatesv1beta1.NewForConfig(bootstrapClientConfig) if err != nil { return fmt.Errorf("unable to create certificates signing request client: %v", err) } @@ -89,7 +93,7 @@ func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string, // managed by the store. privKeyPath := filepath.Join(certDir, tmpPrivateKeyFile) if !verifyKeyData(keyData) { - glog.V(2).Infof("No valid private key and/or certificate found, reusing existing private key or creating a new one") + klog.V(2).Infof("No valid private key and/or certificate found, reusing existing private key or creating a new one") // Note: always call LoadOrGenerateKeyFile so that private key is // reused on next startup if CSR request fails. keyData, _, err = certutil.LoadOrGenerateKeyFile(privKeyPath) @@ -99,10 +103,10 @@ func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string, } if err := waitForServer(*bootstrapClientConfig, 1*time.Minute); err != nil { - glog.Warningf("Error waiting for apiserver to come up: %v", err) + klog.Warningf("Error waiting for apiserver to come up: %v", err) } - certData, err := csr.RequestNodeCertificate(bootstrapClient.CertificateSigningRequests(), keyData, nodeName) + certData, err := requestNodeCertificate(bootstrapClient.CertificateSigningRequests(), keyData, nodeName) if err != nil { return err } @@ -110,7 +114,7 @@ func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string, return err } if err := os.Remove(privKeyPath); err != nil && !os.IsNotExist(err) { - glog.V(2).Infof("failed cleaning up private key file %q: %v", privKeyPath, err) + klog.V(2).Infof("failed cleaning up private key file %q: %v", privKeyPath, err) } pemPath := store.CurrentPath() @@ -232,7 +236,7 @@ func waitForServer(cfg restclient.Config, deadline time.Duration) error { var connected bool wait.JitterUntil(func() { if _, err := cli.Get().AbsPath("/healthz").Do().Raw(); err != nil { - glog.Infof("Failed to connect to apiserver: %v", err) + klog.Infof("Failed to connect to apiserver: %v", err) return } cancel() @@ -244,3 +248,70 @@ func waitForServer(cfg restclient.Config, deadline time.Duration) error { } return nil } + +// requestNodeCertificate will create a certificate signing request for a node +// (Organization and CommonName for the CSR will be set as expected for node +// certificates) and send it to API server, then it will watch the object's +// status, once approved by API server, it will return the API server's issued +// certificate (pem-encoded). If there is any errors, or the watch timeouts, it +// will return an error. This is intended for use on nodes (kubelet and +// kubeadm). +func requestNodeCertificate(client certificatesv1beta1.CertificateSigningRequestInterface, privateKeyData []byte, nodeName types.NodeName) (certData []byte, err error) { + subject := &pkix.Name{ + Organization: []string{"system:nodes"}, + CommonName: "system:node:" + string(nodeName), + } + + privateKey, err := certutil.ParsePrivateKeyPEM(privateKeyData) + if err != nil { + return nil, fmt.Errorf("invalid private key for certificate request: %v", err) + } + csrData, err := certutil.MakeCSR(privateKey, subject, nil, nil) + if err != nil { + return nil, fmt.Errorf("unable to generate certificate request: %v", err) + } + + usages := []certificates.KeyUsage{ + certificates.UsageDigitalSignature, + certificates.UsageKeyEncipherment, + certificates.UsageClientAuth, + } + name := digestedName(privateKeyData, subject, usages) + req, err := csr.RequestCertificate(client, csrData, name, usages, privateKey) + if err != nil { + return nil, err + } + return csr.WaitForCertificate(client, req, 3600*time.Second) +} + +// This digest should include all the relevant pieces of the CSR we care about. +// We can't direcly hash the serialized CSR because of random padding that we +// regenerate every loop and we include usages which are not contained in the +// CSR. This needs to be kept up to date as we add new fields to the node +// certificates and with ensureCompatible. +func digestedName(privateKeyData []byte, subject *pkix.Name, usages []certificates.KeyUsage) string { + hash := sha512.New512_256() + + // Here we make sure two different inputs can't write the same stream + // to the hash. This delimiter is not in the base64.URLEncoding + // alphabet so there is no way to have spill over collisions. Without + // it 'CN:foo,ORG:bar' hashes to the same value as 'CN:foob,ORG:ar' + const delimiter = '|' + encode := base64.RawURLEncoding.EncodeToString + + write := func(data []byte) { + hash.Write([]byte(encode(data))) + hash.Write([]byte{delimiter}) + } + + write(privateKeyData) + write([]byte(subject.CommonName)) + for _, v := range subject.Organization { + write([]byte(v)) + } + for _, v := range usages { + write([]byte(v)) + } + + return "node-csr-" + encode(hash.Sum(nil)) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/transport.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/transport.go index 436bb8b4c968..442f4a8a379c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/transport.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/transport.go @@ -23,7 +23,7 @@ import ( "net/http" "time" - "github.com/golang/glog" + "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" @@ -105,18 +105,18 @@ func addCertRotation(stopCh <-chan struct{}, period time.Duration, clientConfig // the certificate has been deleted from disk or is otherwise corrupt if now.After(lastCertAvailable.Add(exitAfter)) { if clientCertificateManager.ServerHealthy() { - glog.Fatalf("It has been %s since a valid client cert was found and the server is responsive, exiting.", exitAfter) + klog.Fatalf("It has been %s since a valid client cert was found and the server is responsive, exiting.", exitAfter) } else { - glog.Errorf("It has been %s since a valid client cert was found, but the server is not responsive. A restart may be necessary to retrieve new initial credentials.", exitAfter) + klog.Errorf("It has been %s since a valid client cert was found, but the server is not responsive. A restart may be necessary to retrieve new initial credentials.", exitAfter) } } } else { // the certificate is expired if now.After(curr.Leaf.NotAfter) { if clientCertificateManager.ServerHealthy() { - glog.Fatalf("The currently active client certificate has expired and the server is responsive, exiting.") + klog.Fatalf("The currently active client certificate has expired and the server is responsive, exiting.") } else { - glog.Errorf("The currently active client certificate has expired, but the server is not responsive. A restart may be necessary to retrieve new initial credentials.") + klog.Errorf("The currently active client certificate has expired, but the server is not responsive. A restart may be necessary to retrieve new initial credentials.") } } lastCertAvailable = now @@ -129,7 +129,7 @@ func addCertRotation(stopCh <-chan struct{}, period time.Duration, clientConfig } lastCert = curr - glog.Infof("certificate rotation detected, shutting down client connections to start using new credentials") + klog.Infof("certificate rotation detected, shutting down client connections to start using new credentials") // The cert has been rotated. Close all existing connections to force the client // to reperform its TLS handshake with new cert. // diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/BUILD index a57fff4cdbda..d8d1fe7fd30d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/BUILD @@ -10,7 +10,7 @@ go_library( "//pkg/kubelet/checkpointmanager:go_default_library", "//pkg/kubelet/checkpointmanager/checksum:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/checkpoint.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/checkpoint.go index e5801312d6a4..f1fa9bdb7ae6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/checkpoint.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/checkpoint.go @@ -20,7 +20,7 @@ import ( "encoding/json" "fmt" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/apis/core" @@ -82,7 +82,7 @@ func checkAnnotations(pod *v1.Pod) bool { //getPodKey returns the full qualified path for the pod checkpoint func getPodKey(pod *v1.Pod) string { - return fmt.Sprintf("Pod%v%v.yaml", delimiter, pod.GetUID()) + return fmt.Sprintf("%s%s%v.yaml", podPrefix, delimiter, pod.GetUID()) } // LoadPods Loads All Checkpoints from disk @@ -93,14 +93,14 @@ func LoadPods(cpm checkpointmanager.CheckpointManager) ([]*v1.Pod, error) { checkpointKeys := []string{} checkpointKeys, err = cpm.ListCheckpoints() if err != nil { - glog.Errorf("Failed to list checkpoints: %v", err) + klog.Errorf("Failed to list checkpoints: %v", err) } for _, key := range checkpointKeys { checkpoint := NewPodCheckpoint(nil) err := cpm.GetCheckpoint(key, checkpoint) if err != nil { - glog.Errorf("Failed to retrieve checkpoint for pod %q: %v", key, err) + klog.Errorf("Failed to retrieve checkpoint for pod %q: %v", key, err) continue } pods = append(pods, checkpoint.GetPod()) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cloudresource/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cloudresource/BUILD index ec93978d78bf..1ab5c46dec23 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cloudresource/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cloudresource/BUILD @@ -6,11 +6,11 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubelet/cloudresource", visibility = ["//visibility:public"], deps = [ - "//pkg/cloudprovider:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cloudresource/cloud_request_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cloudresource/cloud_request_manager.go index cea1affde7a9..dbc05f094dff 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cloudresource/cloud_request_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cloudresource/cloud_request_manager.go @@ -25,9 +25,9 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" - "github.com/golang/glog" + "k8s.io/klog" ) var nodeAddressesRetryPeriod = 5 * time.Second @@ -85,7 +85,7 @@ func (manager *cloudResourceSyncManager) NodeAddresses() ([]v1.NodeAddress, erro for { nodeAddresses, err := manager.getNodeAddressSafe() if len(nodeAddresses) == 0 && err == nil { - glog.V(5).Infof("Waiting for %v for cloud provider to provide node addresses", nodeAddressesRetryPeriod) + klog.V(5).Infof("Waiting for %v for cloud provider to provide node addresses", nodeAddressesRetryPeriod) time.Sleep(nodeAddressesRetryPeriod) continue } @@ -94,7 +94,7 @@ func (manager *cloudResourceSyncManager) NodeAddresses() ([]v1.NodeAddress, erro } func (manager *cloudResourceSyncManager) collectNodeAddresses(ctx context.Context, nodeName types.NodeName) { - glog.V(5).Infof("Requesting node addresses from cloud provider for node %q", nodeName) + klog.V(5).Infof("Requesting node addresses from cloud provider for node %q", nodeName) instances, ok := manager.cloud.Instances() if !ok { @@ -110,10 +110,10 @@ func (manager *cloudResourceSyncManager) collectNodeAddresses(ctx context.Contex nodeAddresses, err := instances.NodeAddresses(ctx, nodeName) if err != nil { manager.setNodeAddressSafe(nil, fmt.Errorf("failed to get node address from cloud provider: %v", err)) - glog.V(2).Infof("Node addresses from cloud provider for node %q not collected", nodeName) + klog.V(2).Infof("Node addresses from cloud provider for node %q not collected", nodeName) } else { manager.setNodeAddressSafe(nodeAddresses, nil) - glog.V(5).Infof("Node addresses from cloud provider for node %q collected", nodeName) + klog.V(5).Infof("Node addresses from cloud provider for node %q collected", nodeName) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD index 6800caf05fdd..6ead9ea8c3dd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD @@ -11,10 +11,11 @@ go_library( "container_manager_unsupported.go", "container_manager_windows.go", "fake_internal_container_lifecycle.go", + "helpers.go", "helpers_linux.go", "helpers_unsupported.go", "internal_container_lifecycle.go", - "node_container_manager.go", + "node_container_manager_linux.go", "pod_container_manager_linux.go", "pod_container_manager_stub.go", "pod_container_manager_unsupported.go", @@ -26,6 +27,7 @@ go_library( deps = [ "//pkg/features:go_default_library", "//pkg/kubelet/apis/cri:go_default_library", + "//pkg/kubelet/apis/podresources/v1alpha1:go_default_library", "//pkg/kubelet/cm/cpumanager:go_default_library", "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/container:go_default_library", @@ -39,7 +41,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ "//pkg/kubelet/cadvisor:go_default_library", @@ -77,8 +79,8 @@ go_library( "//pkg/util/oom:go_default_library", "//pkg/util/procfs:go_default_library", "//pkg/util/sysctl:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//vendor/github.com/docker/go-units:go_default_library", @@ -128,7 +130,7 @@ go_test( "cgroup_manager_test.go", "container_manager_linux_test.go", "helpers_linux_test.go", - "node_container_manager_test.go", + "node_container_manager_linux_test.go", "pod_container_manager_linux_test.go", ], embed = [":go_default_library"], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go index 6a4d65cd8588..f1c81369367f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go @@ -25,11 +25,11 @@ import ( "time" units "github.com/docker/go-units" - "github.com/golang/glog" libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups" cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs" cgroupsystemd "github.com/opencontainers/runc/libcontainer/cgroups/systemd" libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -67,7 +67,10 @@ func NewCgroupName(base CgroupName, components ...string) CgroupName { panic(fmt.Errorf("invalid character in component [%q] of CgroupName", component)) } } - return CgroupName(append(base, components...)) + // copy data from the base cgroup to eliminate cases where CgroupNames share underlying slices. See #68416 + baseCopy := make([]string, len(base)) + copy(baseCopy, base) + return CgroupName(append(baseCopy, components...)) } func escapeSystemdCgroupName(part string) string { @@ -204,18 +207,16 @@ func NewCgroupManager(cs *CgroupSubsystems, cgroupDriver string) CgroupManager { func (m *cgroupManagerImpl) Name(name CgroupName) string { if m.adapter.cgroupManagerType == libcontainerSystemd { return name.ToSystemd() - } else { - return name.ToCgroupfs() } + return name.ToCgroupfs() } // CgroupName converts the literal cgroupfs name on the host to an internal identifier. func (m *cgroupManagerImpl) CgroupName(name string) CgroupName { if m.adapter.cgroupManagerType == libcontainerSystemd { return ParseSystemdToCgroupName(name) - } else { - return ParseCgroupfsToCgroupName(name) } + return ParseCgroupfsToCgroupName(name) } // buildCgroupPaths builds a path to each cgroup subsystem for the specified name. @@ -257,6 +258,7 @@ func (m *cgroupManagerImpl) Exists(name CgroupName) bool { // once resolved, we can remove this code. whitelistControllers := sets.NewString("cpu", "cpuacct", "cpuset", "memory", "systemd") + var missingPaths []string // If even one cgroup path doesn't exist, then the cgroup doesn't exist. for controller, path := range cgroupPaths { // ignore mounts we don't care about @@ -264,10 +266,15 @@ func (m *cgroupManagerImpl) Exists(name CgroupName) bool { continue } if !libcontainercgroups.PathExists(path) { - return false + missingPaths = append(missingPaths, path) } } + if len(missingPaths) > 0 { + klog.V(4).Infof("The Cgroup %v has some missing paths: %v", name, missingPaths) + return false + } + return true } @@ -343,7 +350,7 @@ func setSupportedSubsystems(cgroupConfig *libcontainerconfigs.Cgroup) error { return fmt.Errorf("Failed to find subsystem mount for required subsystem: %v", sys.Name()) } // the cgroup is not mounted, but its not required so continue... - glog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name()) + klog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name()) continue } if err := sys.Set(cgroupConfig.Paths[sys.Name()], cgroupConfig); err != nil { @@ -505,7 +512,7 @@ func (m *cgroupManagerImpl) Pids(name CgroupName) []int { // WalkFunc which is called for each file and directory in the pod cgroup dir visitor := func(path string, info os.FileInfo, err error) error { if err != nil { - glog.V(4).Infof("cgroup manager encountered error scanning cgroup path %q: %v", path, err) + klog.V(4).Infof("cgroup manager encountered error scanning cgroup path %q: %v", path, err) return filepath.SkipDir } if !info.IsDir() { @@ -513,7 +520,7 @@ func (m *cgroupManagerImpl) Pids(name CgroupName) []int { } pids, err = getCgroupProcs(path) if err != nil { - glog.V(4).Infof("cgroup manager encountered error getting procs for cgroup path %q: %v", path, err) + klog.V(4).Infof("cgroup manager encountered error getting procs for cgroup path %q: %v", path, err) return filepath.SkipDir } pidsToKill.Insert(pids...) @@ -523,7 +530,7 @@ func (m *cgroupManagerImpl) Pids(name CgroupName) []int { // container cgroups haven't been GCed yet. Get attached processes to // all such unwanted containers under the pod cgroup if err = filepath.Walk(dir, visitor); err != nil { - glog.V(4).Infof("cgroup manager encountered error scanning pids for directory: %q: %v", dir, err) + klog.V(4).Infof("cgroup manager encountered error scanning pids for directory: %q: %v", dir, err) } } return pidsToKill.List() @@ -551,7 +558,7 @@ func getStatsSupportedSubsystems(cgroupPaths map[string]string) (*libcontainercg return nil, fmt.Errorf("Failed to find subsystem mount for required subsystem: %v", sys.Name()) } // the cgroup is not mounted, but its not required so continue... - glog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name()) + klog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name()) continue } if err := sys.GetStats(cgroupPaths[sys.Name()], stats); err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go index 8d467bf2054e..a9fe926ee045 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go @@ -23,6 +23,7 @@ import ( // TODO: Migrate kubelet to either use its own internal objects or client library. "k8s.io/api/core/v1" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" + podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" @@ -95,7 +96,14 @@ type ContainerManager interface { // GetPodCgroupRoot returns the cgroup which contains all pods. GetPodCgroupRoot() string - GetPluginRegistrationHandlerCallback() pluginwatcher.RegisterCallbackFn + + // GetPluginRegistrationHandler returns a plugin registration handler + // The pluginwatcher's Handlers allow to have a single module for handling + // registration. + GetPluginRegistrationHandler() pluginwatcher.PluginHandler + + // GetDevices returns information about the devices assigned to pods and containers + GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices } type NodeConfig struct { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go index 2f460ba8cbab..9432fc71adc4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go @@ -29,20 +29,22 @@ import ( "sync" "time" - "github.com/golang/glog" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fs" "github.com/opencontainers/runc/libcontainer/configs" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" kubefeatures "k8s.io/kubernetes/pkg/features" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" + podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" @@ -59,7 +61,6 @@ import ( "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/procfs" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" - utilversion "k8s.io/kubernetes/pkg/util/version" ) const ( @@ -180,11 +181,11 @@ func validateSystemRequirements(mountUtil mount.Interface) (features, error) { // CPU cgroup is required and so it expected to be mounted at this point. periodExists, err := utilfile.FileExists(path.Join(cpuMountPoint, "cpu.cfs_period_us")) if err != nil { - glog.Errorf("failed to detect if CPU cgroup cpu.cfs_period_us is available - %v", err) + klog.Errorf("failed to detect if CPU cgroup cpu.cfs_period_us is available - %v", err) } quotaExists, err := utilfile.FileExists(path.Join(cpuMountPoint, "cpu.cfs_quota_us")) if err != nil { - glog.Errorf("failed to detect if CPU cgroup cpu.cfs_quota_us is available - %v", err) + klog.Errorf("failed to detect if CPU cgroup cpu.cfs_quota_us is available - %v", err) } if quotaExists && periodExists { f.cpuHardcapping = true @@ -244,12 +245,12 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I if !cgroupManager.Exists(cgroupRoot) { return nil, fmt.Errorf("invalid configuration: cgroup-root %q doesn't exist: %v", cgroupRoot, err) } - glog.Infof("container manager verified user specified cgroup-root exists: %v", cgroupRoot) + klog.Infof("container manager verified user specified cgroup-root exists: %v", cgroupRoot) // Include the top level cgroup for enforcing node allocatable into cgroup-root. // This way, all sub modules can avoid having to understand the concept of node allocatable. cgroupRoot = NewCgroupName(cgroupRoot, defaultNodeAllocatableCgroupName) } - glog.Infof("Creating Container Manager object based on Node Config: %+v", nodeConfig) + klog.Infof("Creating Container Manager object based on Node Config: %+v", nodeConfig) qosContainerManager, err := NewQOSContainerManager(subsystems, cgroupRoot, nodeConfig, cgroupManager) if err != nil { @@ -268,7 +269,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I qosContainerManager: qosContainerManager, } - glog.Infof("Creating device plugin manager: %t", devicePluginEnabled) + klog.Infof("Creating device plugin manager: %t", devicePluginEnabled) if devicePluginEnabled { cm.deviceManager, err = devicemanager.NewManagerImpl() } else { @@ -288,7 +289,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I nodeConfig.KubeletRootDir, ) if err != nil { - glog.Errorf("failed to initialize cpu manager: %v", err) + klog.Errorf("failed to initialize cpu manager: %v", err) return nil, err } } @@ -370,9 +371,9 @@ func setupKernelTunables(option KernelTunableBehavior) error { case KernelTunableError: errList = append(errList, fmt.Errorf("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)) case KernelTunableWarn: - glog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val) + klog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val) case KernelTunableModify: - glog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val) + klog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val) err = sysctl.SetSysctl(flag, expectedValue) if err != nil { errList = append(errList, err) @@ -424,13 +425,13 @@ func (cm *containerManagerImpl) setupNode(activePods ActivePodsFunc) error { // the cgroup for docker and serve stats for the runtime. // TODO(#27097): Fix this after NodeSpec is clearly defined. cm.periodicTasks = append(cm.periodicTasks, func() { - glog.V(4).Infof("[ContainerManager]: Adding periodic tasks for docker CRI integration") + klog.V(4).Infof("[ContainerManager]: Adding periodic tasks for docker CRI integration") cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile) if err != nil { - glog.Error(err) + klog.Error(err) return } - glog.V(2).Infof("[ContainerManager]: Discovered runtime cgroups name: %s", cont) + klog.V(2).Infof("[ContainerManager]: Discovered runtime cgroups name: %s", cont) cm.Lock() defer cm.Unlock() cm.RuntimeCgroupsName = cont @@ -467,12 +468,12 @@ func (cm *containerManagerImpl) setupNode(activePods ActivePodsFunc) error { } else { cm.periodicTasks = append(cm.periodicTasks, func() { if err := ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, nil); err != nil { - glog.Error(err) + klog.Error(err) return } cont, err := getContainer(os.Getpid()) if err != nil { - glog.Errorf("failed to find cgroups of kubelet - %v", err) + klog.Errorf("failed to find cgroups of kubelet - %v", err) return } cm.Lock() @@ -579,7 +580,7 @@ func (cm *containerManagerImpl) Start(node *v1.Node, for _, cont := range cm.systemContainers { if cont.ensureStateFunc != nil { if err := cont.ensureStateFunc(cont.manager); err != nil { - glog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err) + klog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err) } } } @@ -605,8 +606,8 @@ func (cm *containerManagerImpl) Start(node *v1.Node, return nil } -func (cm *containerManagerImpl) GetPluginRegistrationHandlerCallback() pluginwatcher.RegisterCallbackFn { - return cm.deviceManager.GetWatcherCallback() +func (cm *containerManagerImpl) GetPluginRegistrationHandler() pluginwatcher.PluginHandler { + return cm.deviceManager.GetWatcherHandler() } // TODO: move the GetResources logic to PodContainerManager. @@ -652,12 +653,12 @@ func isProcessRunningInHost(pid int) (bool, error) { if err != nil { return false, fmt.Errorf("failed to find pid namespace of init process") } - glog.V(10).Infof("init pid ns is %q", initPidNs) + klog.V(10).Infof("init pid ns is %q", initPidNs) processPidNs, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/pid", pid)) if err != nil { return false, fmt.Errorf("failed to find pid namespace of process %q", pid) } - glog.V(10).Infof("Pid %d pid ns is %q", pid, processPidNs) + klog.V(10).Infof("Pid %d pid ns is %q", pid, processPidNs) return initPidNs == processPidNs, nil } @@ -699,7 +700,7 @@ func getPidsForProcess(name, pidFile string) ([]int, error) { // Return error from getPidFromPidFile since that should have worked // and is the real source of the problem. - glog.V(4).Infof("unable to get pid from %s: %v", pidFile, err) + klog.V(4).Infof("unable to get pid from %s: %v", pidFile, err) return []int{}, err } @@ -737,7 +738,7 @@ func ensureProcessInContainerWithOOMScore(pid int, oomScoreAdj int, manager *fs. return err } else if !runningInHost { // Process is running inside a container. Don't touch that. - glog.V(2).Infof("pid %d is not running in the host namespaces", pid) + klog.V(2).Infof("pid %d is not running in the host namespaces", pid) return nil } @@ -758,9 +759,9 @@ func ensureProcessInContainerWithOOMScore(pid int, oomScoreAdj int, manager *fs. // Also apply oom-score-adj to processes oomAdjuster := oom.NewOOMAdjuster() - glog.V(5).Infof("attempting to apply oom_score_adj of %d to pid %d", oomScoreAdj, pid) + klog.V(5).Infof("attempting to apply oom_score_adj of %d to pid %d", oomScoreAdj, pid) if err := oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err != nil { - glog.V(3).Infof("Failed to apply oom_score_adj %d for pid %d: %v", oomScoreAdj, pid, err) + klog.V(3).Infof("Failed to apply oom_score_adj %d for pid %d: %v", oomScoreAdj, pid, err) errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d: %v", oomScoreAdj, pid, err)) } return utilerrors.NewAggregate(errs) @@ -800,10 +801,10 @@ func getContainer(pid int) (string, error) { // in addition, you would not get memory or cpu accounting for the runtime unless accounting was enabled on its unit (or globally). if systemd, found := cgs["name=systemd"]; found { if systemd != cpu { - glog.Warningf("CPUAccounting not enabled for pid: %d", pid) + klog.Warningf("CPUAccounting not enabled for pid: %d", pid) } if systemd != memory { - glog.Warningf("MemoryAccounting not enabled for pid: %d", pid) + klog.Warningf("MemoryAccounting not enabled for pid: %d", pid) } return systemd, nil } @@ -841,14 +842,14 @@ func ensureSystemCgroups(rootCgroupPath string, manager *fs.Manager) error { pids = append(pids, pid) } - glog.Infof("Found %d PIDs in root, %d of them are not to be moved", len(allPids), len(allPids)-len(pids)) + klog.Infof("Found %d PIDs in root, %d of them are not to be moved", len(allPids), len(allPids)-len(pids)) // Check if we have moved all the non-kernel PIDs. if len(pids) == 0 { break } - glog.Infof("Moving non-kernel processes: %v", pids) + klog.Infof("Moving non-kernel processes: %v", pids) for _, pid := range pids { err := manager.Apply(pid) if err != nil { @@ -878,3 +879,7 @@ func (cm *containerManagerImpl) GetCapacity() v1.ResourceList { func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) { return cm.deviceManager.GetCapacity() } + +func (cm *containerManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices { + return cm.deviceManager.GetDevices(podUID, containerName) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go index ed2198084929..4563dc530489 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go @@ -17,11 +17,12 @@ limitations under the License. package cm import ( - "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/resource" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" + podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -36,7 +37,7 @@ type containerManagerStub struct{} var _ ContainerManager = &containerManagerStub{} func (cm *containerManagerStub) Start(_ *v1.Node, _ ActivePodsFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService) error { - glog.V(2).Infof("Starting stub container manager") + klog.V(2).Infof("Starting stub container manager") return nil } @@ -77,10 +78,8 @@ func (cm *containerManagerStub) GetCapacity() v1.ResourceList { return c } -func (cm *containerManagerStub) GetPluginRegistrationHandlerCallback() pluginwatcher.RegisterCallbackFn { - return func(name string, endpoint string, versions []string, sockPath string) (chan bool, error) { - return nil, nil - } +func (cm *containerManagerStub) GetPluginRegistrationHandler() pluginwatcher.PluginHandler { + return nil } func (cm *containerManagerStub) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) { @@ -107,6 +106,10 @@ func (cm *containerManagerStub) GetPodCgroupRoot() string { return "" } +func (cm *containerManagerStub) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices { + return nil +} + func NewStubContainerManager() ContainerManager { return &containerManagerStub{} } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go index 56bdbb9d3636..a8a84d5f22de 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go @@ -16,31 +16,158 @@ See the License for the specific language governing permissions and limitations under the License. */ +// containerManagerImpl implements container manager on Windows. +// Only GetNodeAllocatableReservation() and GetCapacity() are implemented now. + package cm import ( - "github.com/golang/glog" + "fmt" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" + "k8s.io/klog" + kubefeatures "k8s.io/kubernetes/pkg/features" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" + podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cadvisor" + "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" "k8s.io/kubernetes/pkg/kubelet/config" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/status" + "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" + schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" "k8s.io/kubernetes/pkg/util/mount" ) type containerManagerImpl struct { - containerManagerStub + // Capacity of this node. + capacity v1.ResourceList + // Interface for cadvisor. + cadvisorInterface cadvisor.Interface + // Config of this node. + nodeConfig NodeConfig } -var _ ContainerManager = &containerManagerImpl{} +func (cm *containerManagerImpl) Start(node *v1.Node, + activePods ActivePodsFunc, + sourcesReady config.SourcesReady, + podStatusProvider status.PodStatusProvider, + runtimeService internalapi.RuntimeService) error { + klog.V(2).Infof("Starting Windows container manager") + + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.LocalStorageCapacityIsolation) { + rootfs, err := cm.cadvisorInterface.RootFsInfo() + if err != nil { + return fmt.Errorf("failed to get rootfs info: %v", err) + } + for rName, rCap := range cadvisor.EphemeralStorageCapacityFromFsInfo(rootfs) { + cm.capacity[rName] = rCap + } + } -func (cm *containerManagerImpl) Start(_ *v1.Node, _ ActivePodsFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService) error { - glog.V(2).Infof("Starting Windows stub container manager") return nil } +// NewContainerManager creates windows container manager. func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.Interface, nodeConfig NodeConfig, failSwapOn bool, devicePluginEnabled bool, recorder record.EventRecorder) (ContainerManager, error) { - return &containerManagerImpl{}, nil + var capacity = v1.ResourceList{} + // It is safe to invoke `MachineInfo` on cAdvisor before logically initializing cAdvisor here because + // machine info is computed and cached once as part of cAdvisor object creation. + // But `RootFsInfo` and `ImagesFsInfo` are not available at this moment so they will be called later during manager starts + machineInfo, err := cadvisorInterface.MachineInfo() + if err != nil { + return nil, err + } + capacity = cadvisor.CapacityFromMachineInfo(machineInfo) + + return &containerManagerImpl{ + capacity: capacity, + nodeConfig: nodeConfig, + cadvisorInterface: cadvisorInterface, + }, nil +} + +func (cm *containerManagerImpl) SystemCgroupsLimit() v1.ResourceList { + return v1.ResourceList{} +} + +func (cm *containerManagerImpl) GetNodeConfig() NodeConfig { + return NodeConfig{} +} + +func (cm *containerManagerImpl) GetMountedSubsystems() *CgroupSubsystems { + return &CgroupSubsystems{} +} + +func (cm *containerManagerImpl) GetQOSContainersInfo() QOSContainersInfo { + return QOSContainersInfo{} +} + +func (cm *containerManagerImpl) UpdateQOSCgroups() error { + return nil +} + +func (cm *containerManagerImpl) Status() Status { + return Status{} +} + +func (cm *containerManagerImpl) GetNodeAllocatableReservation() v1.ResourceList { + evictionReservation := hardEvictionReservation(cm.nodeConfig.HardEvictionThresholds, cm.capacity) + result := make(v1.ResourceList) + for k := range cm.capacity { + value := resource.NewQuantity(0, resource.DecimalSI) + if cm.nodeConfig.SystemReserved != nil { + value.Add(cm.nodeConfig.SystemReserved[k]) + } + if cm.nodeConfig.KubeReserved != nil { + value.Add(cm.nodeConfig.KubeReserved[k]) + } + if evictionReservation != nil { + value.Add(evictionReservation[k]) + } + if !value.IsZero() { + result[k] = *value + } + } + return result +} + +func (cm *containerManagerImpl) GetCapacity() v1.ResourceList { + return cm.capacity +} + +func (cm *containerManagerImpl) GetPluginRegistrationHandler() pluginwatcher.PluginHandler { + return nil +} + +func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string) { + return nil, nil, []string{} +} + +func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager { + return &podContainerManagerStub{} +} + +func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) { + return &kubecontainer.RunContainerOptions{}, nil +} + +func (cm *containerManagerImpl) UpdatePluginResources(*schedulercache.NodeInfo, *lifecycle.PodAdmitAttributes) error { + return nil +} + +func (cm *containerManagerImpl) InternalContainerLifecycle() InternalContainerLifecycle { + return &internalContainerLifecycleImpl{cpumanager.NewFakeManager()} +} + +func (cm *containerManagerImpl) GetPodCgroupRoot() string { + return "" +} + +func (cm *containerManagerImpl) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices { + return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD index 4b3baa3184ec..4e597c9b3882 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD @@ -22,8 +22,8 @@ go_library( "//pkg/kubelet/status:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go index 3f5f33bafeda..be6babab14c5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go @@ -20,7 +20,7 @@ import ( "fmt" "sort" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" @@ -160,7 +160,7 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num // least a socket's-worth of CPUs. for _, s := range acc.freeSockets() { if acc.needs(acc.topo.CPUsPerSocket()) { - glog.V(4).Infof("[cpumanager] takeByTopology: claiming socket [%d]", s) + klog.V(4).Infof("[cpumanager] takeByTopology: claiming socket [%d]", s) acc.take(acc.details.CPUsInSocket(s)) if acc.isSatisfied() { return acc.result, nil @@ -172,7 +172,7 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num // a core's-worth of CPUs. for _, c := range acc.freeCores() { if acc.needs(acc.topo.CPUsPerCore()) { - glog.V(4).Infof("[cpumanager] takeByTopology: claiming core [%d]", c) + klog.V(4).Infof("[cpumanager] takeByTopology: claiming core [%d]", c) acc.take(acc.details.CPUsInCore(c)) if acc.isSatisfied() { return acc.result, nil @@ -184,7 +184,7 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num // on the same sockets as the whole cores we have already taken in this // allocation. for _, c := range acc.freeCPUs() { - glog.V(4).Infof("[cpumanager] takeByTopology: claiming CPU [%d]", c) + klog.V(4).Infof("[cpumanager] takeByTopology: claiming CPU [%d]", c) if acc.needs(1) { acc.take(cpuset.NewCPUSet(c)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go index 0754a4da0925..4ccddd554da5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -22,10 +22,10 @@ import ( "sync" "time" - "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" @@ -110,7 +110,7 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo if err != nil { return nil, err } - glog.Infof("[cpumanager] detected CPU topology: %v", topo) + klog.Infof("[cpumanager] detected CPU topology: %v", topo) reservedCPUs, ok := nodeAllocatableReservation[v1.ResourceCPU] if !ok { // The static policy cannot initialize without this information. @@ -132,7 +132,7 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo policy = NewStaticPolicy(topo, numReservedCPUs) default: - glog.Errorf("[cpumanager] Unknown policy \"%s\", falling back to default policy \"%s\"", cpuPolicyName, PolicyNone) + klog.Errorf("[cpumanager] Unknown policy \"%s\", falling back to default policy \"%s\"", cpuPolicyName, PolicyNone) policy = NewNonePolicy() } @@ -152,8 +152,8 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo } func (m *manager) Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) { - glog.Infof("[cpumanager] starting with %s policy", m.policy.Name()) - glog.Infof("[cpumanager] reconciling every %v", m.reconcilePeriod) + klog.Infof("[cpumanager] starting with %s policy", m.policy.Name()) + klog.Infof("[cpumanager] reconciling every %v", m.reconcilePeriod) m.activePods = activePods m.podStatusProvider = podStatusProvider @@ -170,7 +170,7 @@ func (m *manager) AddContainer(p *v1.Pod, c *v1.Container, containerID string) e m.Lock() err := m.policy.AddContainer(m.state, p, c, containerID) if err != nil { - glog.Errorf("[cpumanager] AddContainer error: %v", err) + klog.Errorf("[cpumanager] AddContainer error: %v", err) m.Unlock() return err } @@ -180,17 +180,17 @@ func (m *manager) AddContainer(p *v1.Pod, c *v1.Container, containerID string) e if !cpus.IsEmpty() { err = m.updateContainerCPUSet(containerID, cpus) if err != nil { - glog.Errorf("[cpumanager] AddContainer error: %v", err) + klog.Errorf("[cpumanager] AddContainer error: %v", err) m.Lock() err := m.policy.RemoveContainer(m.state, containerID) if err != nil { - glog.Errorf("[cpumanager] AddContainer rollback state error: %v", err) + klog.Errorf("[cpumanager] AddContainer rollback state error: %v", err) } m.Unlock() } return err } - glog.V(5).Infof("[cpumanager] update container resources is skipped due to cpu set is empty") + klog.V(5).Infof("[cpumanager] update container resources is skipped due to cpu set is empty") return nil } @@ -200,7 +200,7 @@ func (m *manager) RemoveContainer(containerID string) error { err := m.policy.RemoveContainer(m.state, containerID) if err != nil { - glog.Errorf("[cpumanager] RemoveContainer error: %v", err) + klog.Errorf("[cpumanager] RemoveContainer error: %v", err) return err } return nil @@ -226,14 +226,14 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec for _, container := range allContainers { status, ok := m.podStatusProvider.GetPodStatus(pod.UID) if !ok { - glog.Warningf("[cpumanager] reconcileState: skipping pod; status not found (pod: %s, container: %s)", pod.Name, container.Name) + klog.Warningf("[cpumanager] reconcileState: skipping pod; status not found (pod: %s, container: %s)", pod.Name, container.Name) failure = append(failure, reconciledContainer{pod.Name, container.Name, ""}) break } containerID, err := findContainerIDByName(&status, container.Name) if err != nil { - glog.Warningf("[cpumanager] reconcileState: skipping container; ID not found in status (pod: %s, container: %s, error: %v)", pod.Name, container.Name, err) + klog.Warningf("[cpumanager] reconcileState: skipping container; ID not found in status (pod: %s, container: %s, error: %v)", pod.Name, container.Name, err) failure = append(failure, reconciledContainer{pod.Name, container.Name, ""}) continue } @@ -244,10 +244,10 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec // - container has been removed from state by RemoveContainer call (DeletionTimestamp is set) if _, ok := m.state.GetCPUSet(containerID); !ok { if status.Phase == v1.PodRunning && pod.DeletionTimestamp == nil { - glog.V(4).Infof("[cpumanager] reconcileState: container is not present in state - trying to add (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID) + klog.V(4).Infof("[cpumanager] reconcileState: container is not present in state - trying to add (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID) err := m.AddContainer(pod, &container, containerID) if err != nil { - glog.Errorf("[cpumanager] reconcileState: failed to add container (pod: %s, container: %s, container id: %s, error: %v)", pod.Name, container.Name, containerID, err) + klog.Errorf("[cpumanager] reconcileState: failed to add container (pod: %s, container: %s, container id: %s, error: %v)", pod.Name, container.Name, containerID, err) failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID}) continue } @@ -261,15 +261,15 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec cset := m.state.GetCPUSetOrDefault(containerID) if cset.IsEmpty() { // NOTE: This should not happen outside of tests. - glog.Infof("[cpumanager] reconcileState: skipping container; assigned cpuset is empty (pod: %s, container: %s)", pod.Name, container.Name) + klog.Infof("[cpumanager] reconcileState: skipping container; assigned cpuset is empty (pod: %s, container: %s)", pod.Name, container.Name) failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID}) continue } - glog.V(4).Infof("[cpumanager] reconcileState: updating container (pod: %s, container: %s, container id: %s, cpuset: \"%v\")", pod.Name, container.Name, containerID, cset) + klog.V(4).Infof("[cpumanager] reconcileState: updating container (pod: %s, container: %s, container id: %s, cpuset: \"%v\")", pod.Name, container.Name, containerID, cset) err = m.updateContainerCPUSet(containerID, cset) if err != nil { - glog.Errorf("[cpumanager] reconcileState: failed to update container (pod: %s, container: %s, container id: %s, cpuset: \"%v\", error: %v)", pod.Name, container.Name, containerID, cset, err) + klog.Errorf("[cpumanager] reconcileState: failed to update container (pod: %s, container: %s, container id: %s, cpuset: \"%v\", error: %v)", pod.Name, container.Name, containerID, cset, err) failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID}) continue } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go index f4af5667ea68..8240bbd497d9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go @@ -17,8 +17,8 @@ limitations under the License. package cpumanager import ( - "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" "k8s.io/kubernetes/pkg/kubelet/status" ) @@ -28,21 +28,21 @@ type fakeManager struct { } func (m *fakeManager) Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) { - glog.Info("[fake cpumanager] Start()") + klog.Info("[fake cpumanager] Start()") } func (m *fakeManager) Policy() Policy { - glog.Info("[fake cpumanager] Policy()") + klog.Info("[fake cpumanager] Policy()") return NewNonePolicy() } func (m *fakeManager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) error { - glog.Infof("[fake cpumanager] AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID) + klog.Infof("[fake cpumanager] AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID) return nil } func (m *fakeManager) RemoveContainer(containerID string) error { - glog.Infof("[fake cpumanager] RemoveContainer (container id: %s)", containerID) + klog.Infof("[fake cpumanager] RemoveContainer (container id: %s)", containerID) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go index 26e3335f73af..294edc6bf31d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_none.go @@ -17,8 +17,8 @@ limitations under the License. package cpumanager import ( - "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" ) @@ -39,7 +39,7 @@ func (p *nonePolicy) Name() string { } func (p *nonePolicy) Start(s state.State) { - glog.Info("[cpumanager] none policy: Start") + klog.Info("[cpumanager] none policy: Start") } func (p *nonePolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go index 651e5fe79c9b..d72a44ad0e9c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go @@ -19,8 +19,8 @@ package cpumanager import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/klog" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" @@ -94,7 +94,7 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int) Policy panic(fmt.Sprintf("[cpumanager] unable to reserve the required amount of CPUs (size of %s did not equal %d)", reserved, numReservedCPUs)) } - glog.Infof("[cpumanager] reserved %d CPUs (\"%s\") not available for exclusive assignment", reserved.Size(), reserved) + klog.Infof("[cpumanager] reserved %d CPUs (\"%s\") not available for exclusive assignment", reserved.Size(), reserved) return &staticPolicy{ topology: topology, @@ -108,7 +108,7 @@ func (p *staticPolicy) Name() string { func (p *staticPolicy) Start(s state.State) { if err := p.validateState(s); err != nil { - glog.Errorf("[cpumanager] static policy invalid state: %s\n", err.Error()) + klog.Errorf("[cpumanager] static policy invalid state: %s\n", err.Error()) panic("[cpumanager] - please drain node and remove policy state file") } } @@ -172,17 +172,17 @@ func (p *staticPolicy) assignableCPUs(s state.State) cpuset.CPUSet { func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error { if numCPUs := guaranteedCPUs(pod, container); numCPUs != 0 { - glog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID) + klog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID) // container belongs in an exclusively allocated pool if _, ok := s.GetCPUSet(containerID); ok { - glog.Infof("[cpumanager] static policy: container already present in state, skipping (container: %s, container id: %s)", container.Name, containerID) + klog.Infof("[cpumanager] static policy: container already present in state, skipping (container: %s, container id: %s)", container.Name, containerID) return nil } cpuset, err := p.allocateCPUs(s, numCPUs) if err != nil { - glog.Errorf("[cpumanager] unable to allocate %d CPUs (container id: %s, error: %v)", numCPUs, containerID, err) + klog.Errorf("[cpumanager] unable to allocate %d CPUs (container id: %s, error: %v)", numCPUs, containerID, err) return err } s.SetCPUSet(containerID, cpuset) @@ -192,7 +192,7 @@ func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Co } func (p *staticPolicy) RemoveContainer(s state.State, containerID string) error { - glog.Infof("[cpumanager] static policy: RemoveContainer (container id: %s)", containerID) + klog.Infof("[cpumanager] static policy: RemoveContainer (container id: %s)", containerID) if toRelease, ok := s.GetCPUSet(containerID); ok { s.Delete(containerID) // Mutate the shared pool, adding released cpus. @@ -202,7 +202,7 @@ func (p *staticPolicy) RemoveContainer(s state.State, containerID string) error } func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int) (cpuset.CPUSet, error) { - glog.Infof("[cpumanager] allocateCpus: (numCPUs: %d)", numCPUs) + klog.Infof("[cpumanager] allocateCpus: (numCPUs: %d)", numCPUs) result, err := takeByTopology(p.topology, p.assignableCPUs(s), numCPUs) if err != nil { return cpuset.NewCPUSet(), err @@ -210,7 +210,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int) (cpuset.CPUSet, // Remove allocated CPUs from the shared CPUSet. s.SetDefaultCPUSet(s.GetDefaultCPUSet().Difference(result)) - glog.Infof("[cpumanager] allocateCPUs: returning \"%v\"", result) + klog.Infof("[cpumanager] allocateCPUs: returning \"%v\"", result) return result, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/BUILD index d39211962e8d..6a95d1af163e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/BUILD @@ -16,7 +16,7 @@ go_library( "//pkg/kubelet/checkpointmanager/checksum:go_default_library", "//pkg/kubelet/checkpointmanager/errors:go_default_library", "//pkg/kubelet/cm/cpuset:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go index 6d92573b8665..badafab43d81 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go @@ -21,7 +21,7 @@ import ( "path" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" @@ -97,8 +97,8 @@ func (sc *stateCheckpoint) restoreState() error { sc.cache.SetDefaultCPUSet(tmpDefaultCPUSet) sc.cache.SetCPUAssignments(tmpAssignments) - glog.V(2).Info("[cpumanager] state checkpoint: restored state from checkpoint") - glog.V(2).Infof("[cpumanager] state checkpoint: defaultCPUSet: %s", tmpDefaultCPUSet.String()) + klog.V(2).Info("[cpumanager] state checkpoint: restored state from checkpoint") + klog.V(2).Infof("[cpumanager] state checkpoint: defaultCPUSet: %s", tmpDefaultCPUSet.String()) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file.go index 6c2353cf10f5..90d16693dc3d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file.go @@ -19,8 +19,8 @@ package state import ( "encoding/json" "fmt" - "github.com/golang/glog" "io/ioutil" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "os" "sync" @@ -79,7 +79,7 @@ func (sf *stateFile) tryRestoreState() error { // If the state file does not exist or has zero length, write a new file. if os.IsNotExist(err) || len(content) == 0 { sf.storeState() - glog.Infof("[cpumanager] state file: created new state file \"%s\"", sf.stateFilePath) + klog.Infof("[cpumanager] state file: created new state file \"%s\"", sf.stateFilePath) return nil } @@ -92,7 +92,7 @@ func (sf *stateFile) tryRestoreState() error { var readState stateFileData if err = json.Unmarshal(content, &readState); err != nil { - glog.Errorf("[cpumanager] state file: could not unmarshal, corrupted state file - \"%s\"", sf.stateFilePath) + klog.Errorf("[cpumanager] state file: could not unmarshal, corrupted state file - \"%s\"", sf.stateFilePath) return err } @@ -101,13 +101,13 @@ func (sf *stateFile) tryRestoreState() error { } if tmpDefaultCPUSet, err = cpuset.Parse(readState.DefaultCPUSet); err != nil { - glog.Errorf("[cpumanager] state file: could not parse state file - [defaultCpuSet:\"%s\"]", readState.DefaultCPUSet) + klog.Errorf("[cpumanager] state file: could not parse state file - [defaultCpuSet:\"%s\"]", readState.DefaultCPUSet) return err } for containerID, cpuString := range readState.Entries { if tmpContainerCPUSet, err = cpuset.Parse(cpuString); err != nil { - glog.Errorf("[cpumanager] state file: could not parse state file - container id: %s, cpuset: \"%s\"", containerID, cpuString) + klog.Errorf("[cpumanager] state file: could not parse state file - container id: %s, cpuset: \"%s\"", containerID, cpuString) return err } tmpAssignments[containerID] = tmpContainerCPUSet @@ -116,8 +116,8 @@ func (sf *stateFile) tryRestoreState() error { sf.cache.SetDefaultCPUSet(tmpDefaultCPUSet) sf.cache.SetCPUAssignments(tmpAssignments) - glog.V(2).Infof("[cpumanager] state file: restored state from state file \"%s\"", sf.stateFilePath) - glog.V(2).Infof("[cpumanager] state file: defaultCPUSet: %s", tmpDefaultCPUSet.String()) + klog.V(2).Infof("[cpumanager] state file: restored state from state file \"%s\"", sf.stateFilePath) + klog.V(2).Infof("[cpumanager] state file: defaultCPUSet: %s", tmpDefaultCPUSet.String()) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go index 816c89868e58..77c5f4a525cc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go @@ -19,7 +19,7 @@ package state import ( "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ) @@ -33,7 +33,7 @@ var _ State = &stateMemory{} // NewMemoryState creates new State for keeping track of cpu/pod assignment func NewMemoryState() State { - glog.Infof("[cpumanager] initializing new in-memory state store") + klog.Infof("[cpumanager] initializing new in-memory state store") return &stateMemory{ assignments: ContainerCPUAssignments{}, defaultCPUSet: cpuset.NewCPUSet(), @@ -73,7 +73,7 @@ func (s *stateMemory) SetCPUSet(containerID string, cset cpuset.CPUSet) { defer s.Unlock() s.assignments[containerID] = cset - glog.Infof("[cpumanager] updated desired cpuset (container id: %s, cpuset: \"%s\")", containerID, cset) + klog.Infof("[cpumanager] updated desired cpuset (container id: %s, cpuset: \"%s\")", containerID, cset) } func (s *stateMemory) SetDefaultCPUSet(cset cpuset.CPUSet) { @@ -81,7 +81,7 @@ func (s *stateMemory) SetDefaultCPUSet(cset cpuset.CPUSet) { defer s.Unlock() s.defaultCPUSet = cset - glog.Infof("[cpumanager] updated default cpuset: \"%s\"", cset) + klog.Infof("[cpumanager] updated default cpuset: \"%s\"", cset) } func (s *stateMemory) SetCPUAssignments(a ContainerCPUAssignments) { @@ -89,7 +89,7 @@ func (s *stateMemory) SetCPUAssignments(a ContainerCPUAssignments) { defer s.Unlock() s.assignments = a.Clone() - glog.Infof("[cpumanager] updated cpuset assignments: \"%v\"", a) + klog.Infof("[cpumanager] updated cpuset assignments: \"%v\"", a) } func (s *stateMemory) Delete(containerID string) { @@ -97,7 +97,7 @@ func (s *stateMemory) Delete(containerID string) { defer s.Unlock() delete(s.assignments, containerID) - glog.V(2).Infof("[cpumanager] deleted cpuset assignment (container id: %s)", containerID) + klog.V(2).Infof("[cpumanager] deleted cpuset assignment (container id: %s)", containerID) } func (s *stateMemory) ClearState() { @@ -106,5 +106,5 @@ func (s *stateMemory) ClearState() { s.defaultCPUSet = cpuset.CPUSet{} s.assignments = make(ContainerCPUAssignments) - glog.V(2).Infof("[cpumanager] cleared state") + klog.V(2).Infof("[cpumanager] cleared state") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/BUILD index e9f640785548..18ab41a33f4c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/BUILD @@ -10,8 +10,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/kubelet/cm/cpuset:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go index 2491abe6c06a..adb0aa7fc70b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go @@ -20,8 +20,8 @@ import ( "fmt" "sort" - "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ) @@ -156,7 +156,7 @@ func Discover(machineInfo *cadvisorapi.MachineInfo) (*CPUTopology, error) { numPhysicalCores += len(socket.Cores) for _, core := range socket.Cores { if coreID, err = getUniqueCoreID(core.Threads); err != nil { - glog.Errorf("could not get unique coreID for socket: %d core %d threads: %v", + klog.Errorf("could not get unique coreID for socket: %d core %d threads: %v", socket.Id, core.Id, core.Threads) return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/BUILD index 89126d680e32..9b81edc1961f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/BUILD @@ -5,7 +5,7 @@ go_library( srcs = ["cpuset.go"], importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpuset", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) go_test( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/cpuset.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/cpuset.go index ccf577d819cb..d87efc785938 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/cpuset.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/cpuset.go @@ -19,7 +19,7 @@ package cpuset import ( "bytes" "fmt" - "github.com/golang/glog" + "k8s.io/klog" "reflect" "sort" "strconv" @@ -221,7 +221,7 @@ func (s CPUSet) String() string { func MustParse(s string) CPUSet { res, err := Parse(s) if err != nil { - glog.Fatalf("unable to parse [%s] as CPUSet: %v", s, err) + klog.Fatalf("unable to parse [%s] as CPUSet: %v", s, err) } return res } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/BUILD index 719dbbd65e17..885f783db79a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/BUILD @@ -15,7 +15,8 @@ go_library( deps = [ "//pkg/apis/core/v1/helper:go_default_library", "//pkg/kubelet/apis/deviceplugin/v1beta1:go_default_library", - "//pkg/kubelet/apis/pluginregistration/v1alpha1:go_default_library", + "//pkg/kubelet/apis/pluginregistration/v1:go_default_library", + "//pkg/kubelet/apis/podresources/v1alpha1:go_default_library", "//pkg/kubelet/checkpointmanager:go_default_library", "//pkg/kubelet/checkpointmanager/errors:go_default_library", "//pkg/kubelet/cm/devicemanager/checkpoint:go_default_library", @@ -28,8 +29,8 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -42,7 +43,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/kubelet/apis/deviceplugin/v1beta1:go_default_library", - "//pkg/kubelet/apis/pluginregistration/v1alpha1:go_default_library", + "//pkg/kubelet/apis/pluginregistration/v1:go_default_library", "//pkg/kubelet/checkpointmanager:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/util/pluginwatcher:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/device_plugin_stub.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/device_plugin_stub.go index d4040fd1ec93..f0c7bc2641cc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/device_plugin_stub.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/device_plugin_stub.go @@ -28,7 +28,7 @@ import ( "google.golang.org/grpc" pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1" - watcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1" + watcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1" ) // Stub implementation for DevicePlugin. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/endpoint.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/endpoint.go index eed50de6561a..624643e5efb0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/endpoint.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/endpoint.go @@ -23,8 +23,8 @@ import ( "sync" "time" - "github.com/golang/glog" "google.golang.org/grpc" + "k8s.io/klog" pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1" ) @@ -59,7 +59,7 @@ type endpointImpl struct { func newEndpointImpl(socketPath, resourceName string, callback monitorCallback) (*endpointImpl, error) { client, c, err := dial(socketPath) if err != nil { - glog.Errorf("Can't create new endpoint with path %s err %v", socketPath, err) + klog.Errorf("Can't create new endpoint with path %s err %v", socketPath, err) return nil, err } @@ -95,7 +95,7 @@ func (e *endpointImpl) callback(resourceName string, devices []pluginapi.Device) func (e *endpointImpl) run() { stream, err := e.client.ListAndWatch(context.Background(), &pluginapi.Empty{}) if err != nil { - glog.Errorf(errListAndWatch, e.resourceName, err) + klog.Errorf(errListAndWatch, e.resourceName, err) return } @@ -103,12 +103,12 @@ func (e *endpointImpl) run() { for { response, err := stream.Recv() if err != nil { - glog.Errorf(errListAndWatch, e.resourceName, err) + klog.Errorf(errListAndWatch, e.resourceName, err) return } devs := response.Devices - glog.V(2).Infof("State pushed for device plugin %s", e.resourceName) + klog.V(2).Infof("State pushed for device plugin %s", e.resourceName) var newDevs []pluginapi.Device for _, d := range devs { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go index 9aacd08af156..fde7f52b075c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go @@ -25,14 +25,15 @@ import ( "sync" "time" - "github.com/golang/glog" "google.golang.org/grpc" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1" + podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint" @@ -56,7 +57,7 @@ type ManagerImpl struct { socketname string socketdir string - endpoints map[string]endpoint // Key is ResourceName + endpoints map[string]endpointInfo // Key is ResourceName mutex sync.Mutex server *grpc.Server @@ -86,10 +87,14 @@ type ManagerImpl struct { // podDevices contains pod to allocated device mapping. podDevices podDevices - pluginOpts map[string]*pluginapi.DevicePluginOptions checkpointManager checkpointmanager.CheckpointManager } +type endpointInfo struct { + e endpoint + opts *pluginapi.DevicePluginOptions +} + type sourcesReadyStub struct{} func (s *sourcesReadyStub) AddSource(source string) {} @@ -101,7 +106,7 @@ func NewManagerImpl() (*ManagerImpl, error) { } func newManagerImpl(socketPath string) (*ManagerImpl, error) { - glog.V(2).Infof("Creating Device Plugin manager at %s", socketPath) + klog.V(2).Infof("Creating Device Plugin manager at %s", socketPath) if socketPath == "" || !filepath.IsAbs(socketPath) { return nil, fmt.Errorf(errBadSocket+" %s", socketPath) @@ -109,13 +114,13 @@ func newManagerImpl(socketPath string) (*ManagerImpl, error) { dir, file := filepath.Split(socketPath) manager := &ManagerImpl{ - endpoints: make(map[string]endpoint), + endpoints: make(map[string]endpointInfo), + socketname: file, socketdir: dir, healthyDevices: make(map[string]sets.String), unhealthyDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.String), - pluginOpts: make(map[string]*pluginapi.DevicePluginOptions), podDevices: make(podDevices), } manager.callback = manager.genericDeviceUpdateCallback @@ -165,7 +170,7 @@ func (m *ManagerImpl) removeContents(dir string) error { } stat, err := os.Stat(filePath) if err != nil { - glog.Errorf("Failed to stat file %s: %v", filePath, err) + klog.Errorf("Failed to stat file %s: %v", filePath, err) continue } if stat.IsDir() { @@ -188,7 +193,7 @@ func (m *ManagerImpl) checkpointFile() string { // podDevices and allocatedDevices information from checkpoint-ed state and // starts device plugin registration service. func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady) error { - glog.V(2).Infof("Starting Device Plugin manager") + klog.V(2).Infof("Starting Device Plugin manager") m.activePods = activePods m.sourcesReady = sourcesReady @@ -196,7 +201,7 @@ func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.Sourc // Loads in allocatedDevices information from disk. err := m.readCheckpoint() if err != nil { - glog.Warningf("Continue after failing to read checkpoint file. Device allocation info may NOT be up-to-date. Err: %v", err) + klog.Warningf("Continue after failing to read checkpoint file. Device allocation info may NOT be up-to-date. Err: %v", err) } socketPath := filepath.Join(m.socketdir, m.socketname) @@ -205,12 +210,12 @@ func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.Sourc // Removes all stale sockets in m.socketdir. Device plugins can monitor // this and use it as a signal to re-register with the new Kubelet. if err := m.removeContents(m.socketdir); err != nil { - glog.Errorf("Fail to clean up stale contents under %s: %v", m.socketdir, err) + klog.Errorf("Fail to clean up stale contents under %s: %v", m.socketdir, err) } s, err := net.Listen("unix", socketPath) if err != nil { - glog.Errorf(errListenSocket+" %v", err) + klog.Errorf(errListenSocket+" %v", err) return err } @@ -223,30 +228,71 @@ func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.Sourc m.server.Serve(s) }() - glog.V(2).Infof("Serving device plugin registration server on %q", socketPath) + klog.V(2).Infof("Serving device plugin registration server on %q", socketPath) return nil } -// GetWatcherCallback returns callback function to be registered with plugin watcher -func (m *ManagerImpl) GetWatcherCallback() watcher.RegisterCallbackFn { +// GetWatcherHandler returns the plugin handler +func (m *ManagerImpl) GetWatcherHandler() watcher.PluginHandler { if f, err := os.Create(m.socketdir + "DEPRECATION"); err != nil { - glog.Errorf("Failed to create deprecation file at %s", m.socketdir) + klog.Errorf("Failed to create deprecation file at %s", m.socketdir) } else { f.Close() - glog.V(4).Infof("created deprecation file %s", f.Name()) + klog.V(4).Infof("created deprecation file %s", f.Name()) } - return func(name string, endpoint string, versions []string, sockPath string) (chan bool, error) { - if !m.isVersionCompatibleWithPlugin(versions) { - return nil, fmt.Errorf("manager version, %s, is not among plugin supported versions %v", pluginapi.Version, versions) - } + return watcher.PluginHandler(m) +} - if !v1helper.IsExtendedResourceName(v1.ResourceName(name)) { - return nil, fmt.Errorf("invalid name of device plugin socket: %s", fmt.Sprintf(errInvalidResourceName, name)) - } +// ValidatePlugin validates a plugin if the version is correct and the name has the format of an extended resource +func (m *ManagerImpl) ValidatePlugin(pluginName string, endpoint string, versions []string, foundInDeprecatedDir bool) error { + klog.V(2).Infof("Got Plugin %s at endpoint %s with versions %v", pluginName, endpoint, versions) + + if !m.isVersionCompatibleWithPlugin(versions) { + return fmt.Errorf("manager version, %s, is not among plugin supported versions %v", pluginapi.Version, versions) + } + + if !v1helper.IsExtendedResourceName(v1.ResourceName(pluginName)) { + return fmt.Errorf("invalid name of device plugin socket: %s", fmt.Sprintf(errInvalidResourceName, pluginName)) + } + + return nil +} + +// RegisterPlugin starts the endpoint and registers it +// TODO: Start the endpoint and wait for the First ListAndWatch call +// before registering the plugin +func (m *ManagerImpl) RegisterPlugin(pluginName string, endpoint string, versions []string) error { + klog.V(2).Infof("Registering Plugin %s at endpoint %s", pluginName, endpoint) - return m.addEndpointProbeMode(name, sockPath) + e, err := newEndpointImpl(endpoint, pluginName, m.callback) + if err != nil { + return fmt.Errorf("Failed to dial device plugin with socketPath %s: %v", endpoint, err) + } + + options, err := e.client.GetDevicePluginOptions(context.Background(), &pluginapi.Empty{}) + if err != nil { + return fmt.Errorf("Failed to get device plugin options: %v", err) + } + + m.registerEndpoint(pluginName, options, e) + go m.runEndpoint(pluginName, e) + + return nil +} + +// DeRegisterPlugin deregisters the plugin +// TODO work on the behavior for deregistering plugins +// e.g: Should we delete the resource +func (m *ManagerImpl) DeRegisterPlugin(pluginName string) { + m.mutex.Lock() + defer m.mutex.Unlock() + + // Note: This will mark the resource unhealthy as per the behavior + // in runEndpoint + if eI, ok := m.endpoints[pluginName]; ok { + eI.e.stop() } } @@ -297,7 +343,7 @@ func (m *ManagerImpl) Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.P // Register registers a device plugin. func (m *ManagerImpl) Register(ctx context.Context, r *pluginapi.RegisterRequest) (*pluginapi.Empty, error) { - glog.Infof("Got registration request from device plugin with resource name %q", r.ResourceName) + klog.Infof("Got registration request from device plugin with resource name %q", r.ResourceName) metrics.DevicePluginRegistrationCount.WithLabelValues(r.ResourceName).Inc() var versionCompatible bool for _, v := range pluginapi.SupportedVersions { @@ -308,13 +354,13 @@ func (m *ManagerImpl) Register(ctx context.Context, r *pluginapi.RegisterRequest } if !versionCompatible { errorString := fmt.Sprintf(errUnsupportedVersion, r.Version, pluginapi.SupportedVersions) - glog.Infof("Bad registration request from device plugin with resource name %q: %s", r.ResourceName, errorString) + klog.Infof("Bad registration request from device plugin with resource name %q: %s", r.ResourceName, errorString) return &pluginapi.Empty{}, fmt.Errorf(errorString) } if !v1helper.IsExtendedResourceName(v1.ResourceName(r.ResourceName)) { errorString := fmt.Sprintf(errInvalidResourceName, r.ResourceName) - glog.Infof("Bad registration request from device plugin: %s", errorString) + klog.Infof("Bad registration request from device plugin: %s", errorString) return &pluginapi.Empty{}, fmt.Errorf(errorString) } @@ -333,8 +379,8 @@ func (m *ManagerImpl) Register(ctx context.Context, r *pluginapi.RegisterRequest func (m *ManagerImpl) Stop() error { m.mutex.Lock() defer m.mutex.Unlock() - for _, e := range m.endpoints { - e.stop() + for _, eI := range m.endpoints { + eI.e.stop() } if m.server == nil { @@ -346,57 +392,32 @@ func (m *ManagerImpl) Stop() error { return nil } -func (m *ManagerImpl) addEndpointProbeMode(resourceName string, socketPath string) (chan bool, error) { - chanForAckOfNotification := make(chan bool) - - new, err := newEndpointImpl(socketPath, resourceName, m.callback) - if err != nil { - glog.Errorf("Failed to dial device plugin with socketPath %s: %v", socketPath, err) - return nil, fmt.Errorf("Failed to dial device plugin with socketPath %s: %v", socketPath, err) - } - - options, err := new.client.GetDevicePluginOptions(context.Background(), &pluginapi.Empty{}) - if err != nil { - glog.Errorf("Failed to get device plugin options: %v", err) - return nil, fmt.Errorf("Failed to get device plugin options: %v", err) - } - m.registerEndpoint(resourceName, options, new) - - go func() { - select { - case <-chanForAckOfNotification: - close(chanForAckOfNotification) - m.runEndpoint(resourceName, new) - case <-time.After(time.Second): - glog.Errorf("Timed out while waiting for notification ack from plugin") - } - }() - return chanForAckOfNotification, nil -} - -func (m *ManagerImpl) registerEndpoint(resourceName string, options *pluginapi.DevicePluginOptions, e *endpointImpl) { +func (m *ManagerImpl) registerEndpoint(resourceName string, options *pluginapi.DevicePluginOptions, e endpoint) { m.mutex.Lock() defer m.mutex.Unlock() - m.pluginOpts[resourceName] = options - m.endpoints[resourceName] = e - glog.V(2).Infof("Registered endpoint %v", e) + + m.endpoints[resourceName] = endpointInfo{e: e, opts: options} + klog.V(2).Infof("Registered endpoint %v", e) } -func (m *ManagerImpl) runEndpoint(resourceName string, e *endpointImpl) { +func (m *ManagerImpl) runEndpoint(resourceName string, e endpoint) { e.run() e.stop() + m.mutex.Lock() defer m.mutex.Unlock() - if old, ok := m.endpoints[resourceName]; ok && old == e { + + if old, ok := m.endpoints[resourceName]; ok && old.e == e { m.markResourceUnhealthy(resourceName) } - glog.V(2).Infof("Unregistered endpoint %v", e) + + klog.V(2).Infof("Endpoint (%s, %v) became unhealthy", resourceName, e) } func (m *ManagerImpl) addEndpoint(r *pluginapi.RegisterRequest) { new, err := newEndpointImpl(filepath.Join(m.socketdir, r.Endpoint), r.ResourceName, m.callback) if err != nil { - glog.Errorf("Failed to dial device plugin with request %v: %v", r, err) + klog.Errorf("Failed to dial device plugin with request %v: %v", r, err) return } m.registerEndpoint(r.ResourceName, r.Options, new) @@ -406,7 +427,7 @@ func (m *ManagerImpl) addEndpoint(r *pluginapi.RegisterRequest) { } func (m *ManagerImpl) markResourceUnhealthy(resourceName string) { - glog.V(2).Infof("Mark all resources Unhealthy for resource %s", resourceName) + klog.V(2).Infof("Mark all resources Unhealthy for resource %s", resourceName) healthyDevices := sets.NewString() if _, ok := m.healthyDevices[resourceName]; ok { healthyDevices = m.healthyDevices[resourceName] @@ -437,13 +458,13 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string) deletedResources := sets.NewString() m.mutex.Lock() for resourceName, devices := range m.healthyDevices { - e, ok := m.endpoints[resourceName] - if (ok && e.stopGracePeriodExpired()) || !ok { + eI, ok := m.endpoints[resourceName] + if (ok && eI.e.stopGracePeriodExpired()) || !ok { // The resources contained in endpoints and (un)healthyDevices // should always be consistent. Otherwise, we run with the risk // of failing to garbage collect non-existing resources or devices. if !ok { - glog.Errorf("unexpected: healthyDevices and endpoints are out of sync") + klog.Errorf("unexpected: healthyDevices and endpoints are out of sync") } delete(m.endpoints, resourceName) delete(m.healthyDevices, resourceName) @@ -455,10 +476,10 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string) } } for resourceName, devices := range m.unhealthyDevices { - e, ok := m.endpoints[resourceName] - if (ok && e.stopGracePeriodExpired()) || !ok { + eI, ok := m.endpoints[resourceName] + if (ok && eI.e.stopGracePeriodExpired()) || !ok { if !ok { - glog.Errorf("unexpected: unhealthyDevices and endpoints are out of sync") + klog.Errorf("unexpected: unhealthyDevices and endpoints are out of sync") } delete(m.endpoints, resourceName) delete(m.unhealthyDevices, resourceName) @@ -504,7 +525,7 @@ func (m *ManagerImpl) readCheckpoint() error { err := m.checkpointManager.GetCheckpoint(kubeletDeviceManagerCheckpoint, cp) if err != nil { if err == errors.ErrCheckpointNotFound { - glog.Warningf("Failed to retrieve checkpoint for %q: %v", kubeletDeviceManagerCheckpoint, err) + klog.Warningf("Failed to retrieve checkpoint for %q: %v", kubeletDeviceManagerCheckpoint, err) return nil } return err @@ -519,7 +540,7 @@ func (m *ManagerImpl) readCheckpoint() error { // will stay zero till the corresponding device plugin re-registers. m.healthyDevices[resource] = sets.NewString() m.unhealthyDevices[resource] = sets.NewString() - m.endpoints[resource] = newStoppedEndpointImpl(resource) + m.endpoints[resource] = endpointInfo{e: newStoppedEndpointImpl(resource), opts: nil} } return nil } @@ -541,7 +562,7 @@ func (m *ManagerImpl) updateAllocatedDevices(activePods []*v1.Pod) { if len(podsToBeRemoved) <= 0 { return } - glog.V(3).Infof("pods to be removed: %v", podsToBeRemoved.List()) + klog.V(3).Infof("pods to be removed: %v", podsToBeRemoved.List()) m.podDevices.delete(podsToBeRemoved.List()) // Regenerated allocatedDevices after we update pod allocation information. m.allocatedDevices = m.podDevices.devices() @@ -557,7 +578,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi // This can happen if a container restarts for example. devices := m.podDevices.containerDevices(podUID, contName, resource) if devices != nil { - glog.V(3).Infof("Found pre-allocated devices for resource %s container %q in Pod %q: %v", resource, contName, podUID, devices.List()) + klog.V(3).Infof("Found pre-allocated devices for resource %s container %q in Pod %q: %v", resource, contName, podUID, devices.List()) needed = needed - devices.Len() // A pod's resource is not expected to change once admitted by the API server, // so just fail loudly here. We can revisit this part if this no longer holds. @@ -569,7 +590,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi // No change, no work. return nil, nil } - glog.V(3).Infof("Needs to allocate %d %q for pod %q container %q", needed, resource, podUID, contName) + klog.V(3).Infof("Needs to allocate %d %q for pod %q container %q", needed, resource, podUID, contName) // Needs to allocate additional devices. if _, ok := m.healthyDevices[resource]; !ok { return nil, fmt.Errorf("can't allocate unregistered device %s", resource) @@ -620,7 +641,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont for k, v := range container.Resources.Limits { resource := string(k) needed := int(v.Value()) - glog.V(3).Infof("needs %d %s", needed, resource) + klog.V(3).Infof("needs %d %s", needed, resource) if !m.isDevicePluginResource(resource) { continue } @@ -652,7 +673,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont // plugin Allocate grpc calls if it becomes common that a container may require // resources from multiple device plugins. m.mutex.Lock() - e, ok := m.endpoints[resource] + eI, ok := m.endpoints[resource] m.mutex.Unlock() if !ok { m.mutex.Lock() @@ -664,8 +685,8 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont devs := allocDevices.UnsortedList() // TODO: refactor this part of code to just append a ContainerAllocationRequest // in a passed in AllocateRequest pointer, and issues a single Allocate call per pod. - glog.V(3).Infof("Making allocation request for devices %v for device plugin %s", devs, resource) - resp, err := e.allocate(devs) + klog.V(3).Infof("Making allocation request for devices %v for device plugin %s", devs, resource) + resp, err := eI.e.allocate(devs) metrics.DevicePluginAllocationLatency.WithLabelValues(resource).Observe(metrics.SinceInMicroseconds(startRPCTime)) if err != nil { // In case of allocation failure, we want to restore m.allocatedDevices @@ -715,13 +736,15 @@ func (m *ManagerImpl) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Co // with PreStartRequired option set. func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource string) error { m.mutex.Lock() - opts, ok := m.pluginOpts[resource] + eI, ok := m.endpoints[resource] if !ok { m.mutex.Unlock() - return fmt.Errorf("Plugin options not found in cache for resource: %s", resource) - } else if opts == nil || !opts.PreStartRequired { + return fmt.Errorf("endpoint not found in cache for a registered resource: %s", resource) + } + + if eI.opts == nil || !eI.opts.PreStartRequired { m.mutex.Unlock() - glog.V(4).Infof("Plugin options indicate to skip PreStartContainer for resource: %s", resource) + klog.V(4).Infof("Plugin options indicate to skip PreStartContainer for resource: %s", resource) return nil } @@ -731,16 +754,10 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s return fmt.Errorf("no devices found allocated in local cache for pod %s, container %s, resource %s", podUID, contName, resource) } - e, ok := m.endpoints[resource] - if !ok { - m.mutex.Unlock() - return fmt.Errorf("endpoint not found in cache for a registered resource: %s", resource) - } - m.mutex.Unlock() devs := devices.UnsortedList() - glog.V(4).Infof("Issuing an PreStartContainer call for container, %s, of pod %s", contName, podUID) - _, err := e.preStartContainer(devs) + klog.V(4).Infof("Issuing an PreStartContainer call for container, %s, of pod %s", contName, podUID) + _, err := eI.e.preStartContainer(devs) if err != nil { return fmt.Errorf("device plugin PreStartContainer rpc failed with err: %v", err) } @@ -786,3 +803,10 @@ func (m *ManagerImpl) isDevicePluginResource(resource string) bool { } return false } + +// GetDevices returns the devices used by the specified container +func (m *ManagerImpl) GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices { + m.mutex.Lock() + defer m.mutex.Unlock() + return m.podDevices.getContainerDevices(podUID, containerName) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager_stub.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager_stub.go index 66f8d1004cd0..e32b671ffb20 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager_stub.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager_stub.go @@ -18,6 +18,7 @@ package devicemanager import ( "k8s.io/api/core/v1" + podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" @@ -57,9 +58,12 @@ func (h *ManagerStub) GetCapacity() (v1.ResourceList, v1.ResourceList, []string) return nil, nil, []string{} } -// GetWatcherCallback returns plugin watcher callback -func (h *ManagerStub) GetWatcherCallback() pluginwatcher.RegisterCallbackFn { - return func(name string, endpoint string, versions []string, sockPath string) (chan bool, error) { - return nil, nil - } +// GetWatcherHandler returns plugin watcher interface +func (h *ManagerStub) GetWatcherHandler() pluginwatcher.PluginHandler { + return nil +} + +// GetDevices returns nil +func (h *ManagerStub) GetDevices(_, _ string) []*podresourcesapi.ContainerDevices { + return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/pod_devices.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/pod_devices.go index 7df0e201c711..4fd4b196c4ed 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/pod_devices.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/pod_devices.go @@ -17,10 +17,11 @@ limitations under the License. package devicemanager import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1" + podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -135,13 +136,13 @@ func (pdev podDevices) toCheckpointData() []checkpoint.PodDevicesEntry { for resource, devices := range resources { devIds := devices.deviceIds.UnsortedList() if devices.allocResp == nil { - glog.Errorf("Can't marshal allocResp for %v %v %v: allocation response is missing", podUID, conName, resource) + klog.Errorf("Can't marshal allocResp for %v %v %v: allocation response is missing", podUID, conName, resource) continue } allocResp, err := devices.allocResp.Marshal() if err != nil { - glog.Errorf("Can't marshal allocResp for %v %v %v: %v", podUID, conName, resource, err) + klog.Errorf("Can't marshal allocResp for %v %v %v: %v", podUID, conName, resource, err) continue } data = append(data, checkpoint.PodDevicesEntry{ @@ -159,7 +160,7 @@ func (pdev podDevices) toCheckpointData() []checkpoint.PodDevicesEntry { // Populates podDevices from the passed in checkpointData. func (pdev podDevices) fromCheckpointData(data []checkpoint.PodDevicesEntry) { for _, entry := range data { - glog.V(2).Infof("Get checkpoint entry: %v %v %v %v %v\n", + klog.V(2).Infof("Get checkpoint entry: %v %v %v %v %v\n", entry.PodUID, entry.ContainerName, entry.ResourceName, entry.DeviceIDs, entry.AllocResp) devIDs := sets.NewString() for _, devID := range entry.DeviceIDs { @@ -168,7 +169,7 @@ func (pdev podDevices) fromCheckpointData(data []checkpoint.PodDevicesEntry) { allocResp := &pluginapi.ContainerAllocateResponse{} err := allocResp.Unmarshal(entry.AllocResp) if err != nil { - glog.Errorf("Can't unmarshal allocResp for %v %v %v: %v", entry.PodUID, entry.ContainerName, entry.ResourceName, err) + klog.Errorf("Can't unmarshal allocResp for %v %v %v: %v", entry.PodUID, entry.ContainerName, entry.ResourceName, err) continue } pdev.insert(entry.PodUID, entry.ContainerName, entry.ResourceName, devIDs, allocResp) @@ -203,13 +204,13 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic // Updates RunContainerOptions.Envs. for k, v := range resp.Envs { if e, ok := envsMap[k]; ok { - glog.V(4).Infof("Skip existing env %s %s", k, v) + klog.V(4).Infof("Skip existing env %s %s", k, v) if e != v { - glog.Errorf("Environment variable %s has conflicting setting: %s and %s", k, e, v) + klog.Errorf("Environment variable %s has conflicting setting: %s and %s", k, e, v) } continue } - glog.V(4).Infof("Add env %s %s", k, v) + klog.V(4).Infof("Add env %s %s", k, v) envsMap[k] = v opts.Envs = append(opts.Envs, kubecontainer.EnvVar{Name: k, Value: v}) } @@ -217,14 +218,14 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic // Updates RunContainerOptions.Devices. for _, dev := range resp.Devices { if d, ok := devsMap[dev.ContainerPath]; ok { - glog.V(4).Infof("Skip existing device %s %s", dev.ContainerPath, dev.HostPath) + klog.V(4).Infof("Skip existing device %s %s", dev.ContainerPath, dev.HostPath) if d != dev.HostPath { - glog.Errorf("Container device %s has conflicting mapping host devices: %s and %s", + klog.Errorf("Container device %s has conflicting mapping host devices: %s and %s", dev.ContainerPath, d, dev.HostPath) } continue } - glog.V(4).Infof("Add device %s %s", dev.ContainerPath, dev.HostPath) + klog.V(4).Infof("Add device %s %s", dev.ContainerPath, dev.HostPath) devsMap[dev.ContainerPath] = dev.HostPath opts.Devices = append(opts.Devices, kubecontainer.DeviceInfo{ PathOnHost: dev.HostPath, @@ -236,14 +237,14 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic // Updates RunContainerOptions.Mounts. for _, mount := range resp.Mounts { if m, ok := mountsMap[mount.ContainerPath]; ok { - glog.V(4).Infof("Skip existing mount %s %s", mount.ContainerPath, mount.HostPath) + klog.V(4).Infof("Skip existing mount %s %s", mount.ContainerPath, mount.HostPath) if m != mount.HostPath { - glog.Errorf("Container mount %s has conflicting mapping host mounts: %s and %s", + klog.Errorf("Container mount %s has conflicting mapping host mounts: %s and %s", mount.ContainerPath, m, mount.HostPath) } continue } - glog.V(4).Infof("Add mount %s %s", mount.ContainerPath, mount.HostPath) + klog.V(4).Infof("Add mount %s %s", mount.ContainerPath, mount.HostPath) mountsMap[mount.ContainerPath] = mount.HostPath opts.Mounts = append(opts.Mounts, kubecontainer.Mount{ Name: mount.ContainerPath, @@ -258,16 +259,34 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic // Updates for Annotations for k, v := range resp.Annotations { if e, ok := annotationsMap[k]; ok { - glog.V(4).Infof("Skip existing annotation %s %s", k, v) + klog.V(4).Infof("Skip existing annotation %s %s", k, v) if e != v { - glog.Errorf("Annotation %s has conflicting setting: %s and %s", k, e, v) + klog.Errorf("Annotation %s has conflicting setting: %s and %s", k, e, v) } continue } - glog.V(4).Infof("Add annotation %s %s", k, v) + klog.V(4).Infof("Add annotation %s %s", k, v) annotationsMap[k] = v opts.Annotations = append(opts.Annotations, kubecontainer.Annotation{Name: k, Value: v}) } } return opts } + +// getContainerDevices returns the devices assigned to the provided container for all ResourceNames +func (pdev podDevices) getContainerDevices(podUID, contName string) []*podresourcesapi.ContainerDevices { + if _, podExists := pdev[podUID]; !podExists { + return nil + } + if _, contExists := pdev[podUID][contName]; !contExists { + return nil + } + cDev := []*podresourcesapi.ContainerDevices{} + for resource, allocateInfo := range pdev[podUID][contName] { + cDev = append(cDev, &podresourcesapi.ContainerDevices{ + ResourceName: resource, + DeviceIds: allocateInfo.deviceIds.UnsortedList(), + }) + } + return cDev +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/types.go index 52176dec71a9..8396378b4074 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/types.go @@ -20,6 +20,7 @@ import ( "time" "k8s.io/api/core/v1" + podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/lifecycle" @@ -53,7 +54,10 @@ type Manager interface { // GetCapacity returns the amount of available device plugin resource capacity, resource allocatable // and inactive device plugin resources previously registered on the node. GetCapacity() (v1.ResourceList, v1.ResourceList, []string) - GetWatcherCallback() watcher.RegisterCallbackFn + GetWatcherHandler() watcher.PluginHandler + + // GetDevices returns information about the devices assigned to pods and containers + GetDevices(podUID, containerName string) []*podresourcesapi.ContainerDevices } // DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers.go new file mode 100644 index 000000000000..dbbea4a80404 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cm + +import ( + "k8s.io/api/core/v1" + evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" +) + +// hardEvictionReservation returns a resourcelist that includes reservation of resources based on hard eviction thresholds. +func hardEvictionReservation(thresholds []evictionapi.Threshold, capacity v1.ResourceList) v1.ResourceList { + if len(thresholds) == 0 { + return nil + } + ret := v1.ResourceList{} + for _, threshold := range thresholds { + if threshold.Operator != evictionapi.OpLessThan { + continue + } + switch threshold.Signal { + case evictionapi.SignalMemoryAvailable: + memoryCapacity := capacity[v1.ResourceMemory] + value := evictionapi.GetThresholdQuantity(threshold.Value, &memoryCapacity) + ret[v1.ResourceMemory] = *value + case evictionapi.SignalNodeFsAvailable: + storageCapacity := capacity[v1.ResourceEphemeralStorage] + value := evictionapi.GetThresholdQuantity(threshold.Value, &storageCapacity) + ret[v1.ResourceEphemeralStorage] = *value + } + } + return ret +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager.go deleted file mode 100644 index 1267b3f7d6e4..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager.go +++ /dev/null @@ -1,258 +0,0 @@ -// +build linux - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "fmt" - "strings" - "time" - - "github.com/golang/glog" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/types" - utilfeature "k8s.io/apiserver/pkg/util/feature" - kubefeatures "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/kubelet/events" - evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" - kubetypes "k8s.io/kubernetes/pkg/kubelet/types" -) - -const ( - defaultNodeAllocatableCgroupName = "kubepods" -) - -//createNodeAllocatableCgroups creates Node Allocatable Cgroup when CgroupsPerQOS flag is specified as true -func (cm *containerManagerImpl) createNodeAllocatableCgroups() error { - cgroupConfig := &CgroupConfig{ - Name: cm.cgroupRoot, - // The default limits for cpu shares can be very low which can lead to CPU starvation for pods. - ResourceParameters: getCgroupConfig(cm.capacity), - } - if cm.cgroupManager.Exists(cgroupConfig.Name) { - return nil - } - if err := cm.cgroupManager.Create(cgroupConfig); err != nil { - glog.Errorf("Failed to create %q cgroup", cm.cgroupRoot) - return err - } - return nil -} - -// enforceNodeAllocatableCgroups enforce Node Allocatable Cgroup settings. -func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { - nc := cm.NodeConfig.NodeAllocatableConfig - - // We need to update limits on node allocatable cgroup no matter what because - // default cpu shares on cgroups are low and can cause cpu starvation. - nodeAllocatable := cm.capacity - // Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable. - if cm.CgroupsPerQOS && nc.EnforceNodeAllocatable.Has(kubetypes.NodeAllocatableEnforcementKey) { - nodeAllocatable = cm.getNodeAllocatableAbsolute() - } - - glog.V(4).Infof("Attempting to enforce Node Allocatable with config: %+v", nc) - - cgroupConfig := &CgroupConfig{ - Name: cm.cgroupRoot, - ResourceParameters: getCgroupConfig(nodeAllocatable), - } - - // Using ObjectReference for events as the node maybe not cached; refer to #42701 for detail. - nodeRef := &v1.ObjectReference{ - Kind: "Node", - Name: cm.nodeInfo.Name, - UID: types.UID(cm.nodeInfo.Name), - Namespace: "", - } - - // If Node Allocatable is enforced on a node that has not been drained or is updated on an existing node to a lower value, - // existing memory usage across pods might be higher than current Node Allocatable Memory Limits. - // Pod Evictions are expected to bring down memory usage to below Node Allocatable limits. - // Until evictions happen retry cgroup updates. - // Update limits on non root cgroup-root to be safe since the default limits for CPU can be too low. - // Check if cgroupRoot is set to a non-empty value (empty would be the root container) - if len(cm.cgroupRoot) > 0 { - go func() { - for { - err := cm.cgroupManager.Update(cgroupConfig) - if err == nil { - cm.recorder.Event(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated Node Allocatable limit across pods") - return - } - message := fmt.Sprintf("Failed to update Node Allocatable Limits %q: %v", cm.cgroupRoot, err) - cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) - time.Sleep(time.Minute) - } - }() - } - // Now apply kube reserved and system reserved limits if required. - if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedEnforcementKey) { - glog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved) - if err := enforceExistingCgroup(cm.cgroupManager, ParseCgroupfsToCgroupName(nc.SystemReservedCgroupName), nc.SystemReserved); err != nil { - message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err) - cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) - return fmt.Errorf(message) - } - cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName) - } - if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedEnforcementKey) { - glog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved) - if err := enforceExistingCgroup(cm.cgroupManager, ParseCgroupfsToCgroupName(nc.KubeReservedCgroupName), nc.KubeReserved); err != nil { - message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err) - cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) - return fmt.Errorf(message) - } - cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName) - } - return nil -} - -// enforceExistingCgroup updates the limits `rl` on existing cgroup `cName` using `cgroupManager` interface. -func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1.ResourceList) error { - cgroupConfig := &CgroupConfig{ - Name: cName, - ResourceParameters: getCgroupConfig(rl), - } - if cgroupConfig.ResourceParameters == nil { - return fmt.Errorf("%q cgroup is not config properly", cgroupConfig.Name) - } - glog.V(4).Infof("Enforcing limits on cgroup %q with %d cpu shares and %d bytes of memory", cName, cgroupConfig.ResourceParameters.CpuShares, cgroupConfig.ResourceParameters.Memory) - if !cgroupManager.Exists(cgroupConfig.Name) { - return fmt.Errorf("%q cgroup does not exist", cgroupConfig.Name) - } - if err := cgroupManager.Update(cgroupConfig); err != nil { - return err - } - return nil -} - -// getCgroupConfig returns a ResourceConfig object that can be used to create or update cgroups via CgroupManager interface. -func getCgroupConfig(rl v1.ResourceList) *ResourceConfig { - // TODO(vishh): Set CPU Quota if necessary. - if rl == nil { - return nil - } - var rc ResourceConfig - if q, exists := rl[v1.ResourceMemory]; exists { - // Memory is defined in bytes. - val := q.Value() - rc.Memory = &val - } - if q, exists := rl[v1.ResourceCPU]; exists { - // CPU is defined in milli-cores. - val := MilliCPUToShares(q.MilliValue()) - rc.CpuShares = &val - } - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.HugePages) { - rc.HugePageLimit = HugePageLimits(rl) - } - - return &rc -} - -// getNodeAllocatableAbsolute returns the absolute value of Node Allocatable which is primarily useful for enforcement. -// Note that not all resources that are available on the node are included in the returned list of resources. -// Returns a ResourceList. -func (cm *containerManagerImpl) getNodeAllocatableAbsolute() v1.ResourceList { - result := make(v1.ResourceList) - for k, v := range cm.capacity { - value := *(v.Copy()) - if cm.NodeConfig.SystemReserved != nil { - value.Sub(cm.NodeConfig.SystemReserved[k]) - } - if cm.NodeConfig.KubeReserved != nil { - value.Sub(cm.NodeConfig.KubeReserved[k]) - } - if value.Sign() < 0 { - // Negative Allocatable resources don't make sense. - value.Set(0) - } - result[k] = value - } - return result - -} - -// GetNodeAllocatableReservation returns amount of compute or storage resource that have to be reserved on this node from scheduling. -func (cm *containerManagerImpl) GetNodeAllocatableReservation() v1.ResourceList { - evictionReservation := hardEvictionReservation(cm.HardEvictionThresholds, cm.capacity) - result := make(v1.ResourceList) - for k := range cm.capacity { - value := resource.NewQuantity(0, resource.DecimalSI) - if cm.NodeConfig.SystemReserved != nil { - value.Add(cm.NodeConfig.SystemReserved[k]) - } - if cm.NodeConfig.KubeReserved != nil { - value.Add(cm.NodeConfig.KubeReserved[k]) - } - if evictionReservation != nil { - value.Add(evictionReservation[k]) - } - if !value.IsZero() { - result[k] = *value - } - } - return result -} - -// hardEvictionReservation returns a resourcelist that includes reservation of resources based on hard eviction thresholds. -func hardEvictionReservation(thresholds []evictionapi.Threshold, capacity v1.ResourceList) v1.ResourceList { - if len(thresholds) == 0 { - return nil - } - ret := v1.ResourceList{} - for _, threshold := range thresholds { - if threshold.Operator != evictionapi.OpLessThan { - continue - } - switch threshold.Signal { - case evictionapi.SignalMemoryAvailable: - memoryCapacity := capacity[v1.ResourceMemory] - value := evictionapi.GetThresholdQuantity(threshold.Value, &memoryCapacity) - ret[v1.ResourceMemory] = *value - case evictionapi.SignalNodeFsAvailable: - storageCapacity := capacity[v1.ResourceEphemeralStorage] - value := evictionapi.GetThresholdQuantity(threshold.Value, &storageCapacity) - ret[v1.ResourceEphemeralStorage] = *value - } - } - return ret -} - -// validateNodeAllocatable ensures that the user specified Node Allocatable Configuration doesn't reserve more than the node capacity. -// Returns error if the configuration is invalid, nil otherwise. -func (cm *containerManagerImpl) validateNodeAllocatable() error { - var errors []string - nar := cm.GetNodeAllocatableReservation() - for k, v := range nar { - value := cm.capacity[k].DeepCopy() - value.Sub(v) - - if value.Sign() < 0 { - errors = append(errors, fmt.Sprintf("Resource %q has an allocatable of %v, capacity of %v", k, v, value)) - } - } - - if len(errors) > 0 { - return fmt.Errorf("Invalid Node Allocatable configuration. %s", strings.Join(errors, " ")) - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go new file mode 100644 index 000000000000..8d7f16f1d190 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go @@ -0,0 +1,232 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cm + +import ( + "fmt" + "strings" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/types" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" + kubefeatures "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/kubelet/events" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" +) + +const ( + defaultNodeAllocatableCgroupName = "kubepods" +) + +//createNodeAllocatableCgroups creates Node Allocatable Cgroup when CgroupsPerQOS flag is specified as true +func (cm *containerManagerImpl) createNodeAllocatableCgroups() error { + cgroupConfig := &CgroupConfig{ + Name: cm.cgroupRoot, + // The default limits for cpu shares can be very low which can lead to CPU starvation for pods. + ResourceParameters: getCgroupConfig(cm.capacity), + } + if cm.cgroupManager.Exists(cgroupConfig.Name) { + return nil + } + if err := cm.cgroupManager.Create(cgroupConfig); err != nil { + klog.Errorf("Failed to create %q cgroup", cm.cgroupRoot) + return err + } + return nil +} + +// enforceNodeAllocatableCgroups enforce Node Allocatable Cgroup settings. +func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { + nc := cm.NodeConfig.NodeAllocatableConfig + + // We need to update limits on node allocatable cgroup no matter what because + // default cpu shares on cgroups are low and can cause cpu starvation. + nodeAllocatable := cm.capacity + // Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable. + if cm.CgroupsPerQOS && nc.EnforceNodeAllocatable.Has(kubetypes.NodeAllocatableEnforcementKey) { + nodeAllocatable = cm.getNodeAllocatableAbsolute() + } + + klog.V(4).Infof("Attempting to enforce Node Allocatable with config: %+v", nc) + + cgroupConfig := &CgroupConfig{ + Name: cm.cgroupRoot, + ResourceParameters: getCgroupConfig(nodeAllocatable), + } + + // Using ObjectReference for events as the node maybe not cached; refer to #42701 for detail. + nodeRef := &v1.ObjectReference{ + Kind: "Node", + Name: cm.nodeInfo.Name, + UID: types.UID(cm.nodeInfo.Name), + Namespace: "", + } + + // If Node Allocatable is enforced on a node that has not been drained or is updated on an existing node to a lower value, + // existing memory usage across pods might be higher than current Node Allocatable Memory Limits. + // Pod Evictions are expected to bring down memory usage to below Node Allocatable limits. + // Until evictions happen retry cgroup updates. + // Update limits on non root cgroup-root to be safe since the default limits for CPU can be too low. + // Check if cgroupRoot is set to a non-empty value (empty would be the root container) + if len(cm.cgroupRoot) > 0 { + go func() { + for { + err := cm.cgroupManager.Update(cgroupConfig) + if err == nil { + cm.recorder.Event(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated Node Allocatable limit across pods") + return + } + message := fmt.Sprintf("Failed to update Node Allocatable Limits %q: %v", cm.cgroupRoot, err) + cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) + time.Sleep(time.Minute) + } + }() + } + // Now apply kube reserved and system reserved limits if required. + if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedEnforcementKey) { + klog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved) + if err := enforceExistingCgroup(cm.cgroupManager, ParseCgroupfsToCgroupName(nc.SystemReservedCgroupName), nc.SystemReserved); err != nil { + message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err) + cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) + return fmt.Errorf(message) + } + cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName) + } + if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedEnforcementKey) { + klog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved) + if err := enforceExistingCgroup(cm.cgroupManager, ParseCgroupfsToCgroupName(nc.KubeReservedCgroupName), nc.KubeReserved); err != nil { + message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err) + cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) + return fmt.Errorf(message) + } + cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName) + } + return nil +} + +// enforceExistingCgroup updates the limits `rl` on existing cgroup `cName` using `cgroupManager` interface. +func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1.ResourceList) error { + cgroupConfig := &CgroupConfig{ + Name: cName, + ResourceParameters: getCgroupConfig(rl), + } + if cgroupConfig.ResourceParameters == nil { + return fmt.Errorf("%q cgroup is not config properly", cgroupConfig.Name) + } + klog.V(4).Infof("Enforcing limits on cgroup %q with %d cpu shares and %d bytes of memory", cName, cgroupConfig.ResourceParameters.CpuShares, cgroupConfig.ResourceParameters.Memory) + if !cgroupManager.Exists(cgroupConfig.Name) { + return fmt.Errorf("%q cgroup does not exist", cgroupConfig.Name) + } + if err := cgroupManager.Update(cgroupConfig); err != nil { + return err + } + return nil +} + +// getCgroupConfig returns a ResourceConfig object that can be used to create or update cgroups via CgroupManager interface. +func getCgroupConfig(rl v1.ResourceList) *ResourceConfig { + // TODO(vishh): Set CPU Quota if necessary. + if rl == nil { + return nil + } + var rc ResourceConfig + if q, exists := rl[v1.ResourceMemory]; exists { + // Memory is defined in bytes. + val := q.Value() + rc.Memory = &val + } + if q, exists := rl[v1.ResourceCPU]; exists { + // CPU is defined in milli-cores. + val := MilliCPUToShares(q.MilliValue()) + rc.CpuShares = &val + } + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.HugePages) { + rc.HugePageLimit = HugePageLimits(rl) + } + + return &rc +} + +// getNodeAllocatableAbsolute returns the absolute value of Node Allocatable which is primarily useful for enforcement. +// Note that not all resources that are available on the node are included in the returned list of resources. +// Returns a ResourceList. +func (cm *containerManagerImpl) getNodeAllocatableAbsolute() v1.ResourceList { + result := make(v1.ResourceList) + for k, v := range cm.capacity { + value := *(v.Copy()) + if cm.NodeConfig.SystemReserved != nil { + value.Sub(cm.NodeConfig.SystemReserved[k]) + } + if cm.NodeConfig.KubeReserved != nil { + value.Sub(cm.NodeConfig.KubeReserved[k]) + } + if value.Sign() < 0 { + // Negative Allocatable resources don't make sense. + value.Set(0) + } + result[k] = value + } + return result + +} + +// GetNodeAllocatableReservation returns amount of compute or storage resource that have to be reserved on this node from scheduling. +func (cm *containerManagerImpl) GetNodeAllocatableReservation() v1.ResourceList { + evictionReservation := hardEvictionReservation(cm.HardEvictionThresholds, cm.capacity) + result := make(v1.ResourceList) + for k := range cm.capacity { + value := resource.NewQuantity(0, resource.DecimalSI) + if cm.NodeConfig.SystemReserved != nil { + value.Add(cm.NodeConfig.SystemReserved[k]) + } + if cm.NodeConfig.KubeReserved != nil { + value.Add(cm.NodeConfig.KubeReserved[k]) + } + if evictionReservation != nil { + value.Add(evictionReservation[k]) + } + if !value.IsZero() { + result[k] = *value + } + } + return result +} + +// validateNodeAllocatable ensures that the user specified Node Allocatable Configuration doesn't reserve more than the node capacity. +// Returns error if the configuration is invalid, nil otherwise. +func (cm *containerManagerImpl) validateNodeAllocatable() error { + var errors []string + nar := cm.GetNodeAllocatableReservation() + for k, v := range nar { + value := cm.capacity[k].DeepCopy() + value.Sub(v) + + if value.Sign() < 0 { + errors = append(errors, fmt.Sprintf("Resource %q has an allocatable of %v, capacity of %v", k, v, value)) + } + } + + if len(errors) > 0 { + return fmt.Errorf("Invalid Node Allocatable configuration. %s", strings.Join(errors, " ")) + } + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go index d0c3a829947c..b91c5babe92c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go @@ -23,11 +23,11 @@ import ( "path" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" kubefeatures "k8s.io/kubernetes/pkg/features" ) @@ -137,11 +137,10 @@ func (m *podContainerManagerImpl) killOnePid(pid int) error { // Hate parsing strings, but // vendor/github.com/opencontainers/runc/libcontainer/ // also does this. - glog.V(3).Infof("process with pid %v no longer exists", pid) + klog.V(3).Infof("process with pid %v no longer exists", pid) return nil - } else { - return err } + return err } return nil } @@ -160,18 +159,18 @@ func (m *podContainerManagerImpl) tryKillingCgroupProcesses(podCgroup CgroupName // We try killing all the pids multiple times for i := 0; i < 5; i++ { if i != 0 { - glog.V(3).Infof("Attempt %v failed to kill all unwanted process. Retyring", i) + klog.V(3).Infof("Attempt %v failed to kill all unwanted process. Retyring", i) } errlist = []error{} for _, pid := range pidsToKill { - glog.V(3).Infof("Attempt to kill process with pid: %v", pid) + klog.V(3).Infof("Attempt to kill process with pid: %v", pid) if err := m.killOnePid(pid); err != nil { - glog.V(3).Infof("failed to kill process with pid: %v", pid) + klog.V(3).Infof("failed to kill process with pid: %v", pid) errlist = append(errlist, err) } } if len(errlist) == 0 { - glog.V(3).Infof("successfully killed all unwanted processes.") + klog.V(3).Infof("successfully killed all unwanted processes.") return nil } } @@ -182,7 +181,7 @@ func (m *podContainerManagerImpl) tryKillingCgroupProcesses(podCgroup CgroupName func (m *podContainerManagerImpl) Destroy(podCgroup CgroupName) error { // Try killing all the processes attached to the pod cgroup if err := m.tryKillingCgroupProcesses(podCgroup); err != nil { - glog.V(3).Infof("failed to kill all the processes attached to the %v cgroups", podCgroup) + klog.V(3).Infof("failed to kill all the processes attached to the %v cgroups", podCgroup) return fmt.Errorf("failed to kill all the processes attached to the %v cgroups : %v", podCgroup, err) } @@ -270,7 +269,7 @@ func (m *podContainerManagerImpl) GetAllPodsFromCgroups() (map[types.UID]CgroupN parts := strings.Split(basePath, podCgroupNamePrefix) // the uid is missing, so we log the unexpected cgroup not of form pod if len(parts) != 2 { - glog.Errorf("pod cgroup manager ignoring unexpected cgroup %v because it is not a pod", cgroupfsPath) + klog.Errorf("pod cgroup manager ignoring unexpected cgroup %v because it is not a pod", cgroupfsPath) continue } podUID := parts[1] diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go index 2cfc198c3a5b..cebc9756e438 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/wait" @@ -138,7 +138,7 @@ func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceLis go wait.Until(func() { err := m.UpdateCgroups() if err != nil { - glog.Warningf("[ContainerManager] Failed to reserve QoS requests: %v", err) + klog.Warningf("[ContainerManager] Failed to reserve QoS requests: %v", err) } }, periodicQOSCgroupUpdateInterval, wait.NeverStop) @@ -222,17 +222,17 @@ func (m *qosContainerManagerImpl) setMemoryReserve(configs map[v1.PodQOSClass]*C resources := m.getNodeAllocatable() allocatableResource, ok := resources[v1.ResourceMemory] if !ok { - glog.V(2).Infof("[Container Manager] Allocatable memory value could not be determined. Not setting QOS memory limts.") + klog.V(2).Infof("[Container Manager] Allocatable memory value could not be determined. Not setting QOS memory limts.") return } allocatable := allocatableResource.Value() if allocatable == 0 { - glog.V(2).Infof("[Container Manager] Memory allocatable reported as 0, might be in standalone mode. Not setting QOS memory limts.") + klog.V(2).Infof("[Container Manager] Memory allocatable reported as 0, might be in standalone mode. Not setting QOS memory limts.") return } for qos, limits := range qosMemoryRequests { - glog.V(2).Infof("[Container Manager] %s pod requests total %d bytes (reserve %d%%)", qos, limits, percentReserve) + klog.V(2).Infof("[Container Manager] %s pod requests total %d bytes (reserve %d%%)", qos, limits, percentReserve) } // Calculate QOS memory limits @@ -252,7 +252,7 @@ func (m *qosContainerManagerImpl) retrySetMemoryReserve(configs map[v1.PodQOSCla for qos, config := range configs { stats, err := m.cgroupManager.GetResourceStats(config.Name) if err != nil { - glog.V(2).Infof("[Container Manager] %v", err) + klog.V(2).Infof("[Container Manager] %v", err) return } usage := stats.MemoryStats.Usage @@ -312,7 +312,7 @@ func (m *qosContainerManagerImpl) UpdateCgroups() error { } } if updateSuccess { - glog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration") + klog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration") return nil } @@ -330,12 +330,12 @@ func (m *qosContainerManagerImpl) UpdateCgroups() error { for _, config := range qosConfigs { err := m.cgroupManager.Update(config) if err != nil { - glog.Errorf("[ContainerManager]: Failed to update QoS cgroup configuration") + klog.Errorf("[ContainerManager]: Failed to update QoS cgroup configuration") return err } } - glog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration") + klog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration") return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/BUILD index 82cb3a86ecde..5e89141c02a7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/BUILD @@ -43,12 +43,12 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", - "//vendor/golang.org/x/exp/inotify:go_default_library", + "//vendor/github.com/sigma/go-inotify:go_default_library", ], "//conditions:default": [], }), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/common.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/common.go index 8fb4199f831a..933ee29e7863 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/common.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/common.go @@ -40,7 +40,7 @@ import ( kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/util/hash" - "github.com/golang/glog" + "k8s.io/klog" ) // Generate a pod name that is unique among nodes by appending the nodeName. @@ -59,16 +59,16 @@ func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName types.Node } hash.DeepHashObject(hasher, pod) pod.UID = types.UID(hex.EncodeToString(hasher.Sum(nil)[0:])) - glog.V(5).Infof("Generated UID %q pod %q from %s", pod.UID, pod.Name, source) + klog.V(5).Infof("Generated UID %q pod %q from %s", pod.UID, pod.Name, source) } pod.Name = generatePodName(pod.Name, nodeName) - glog.V(5).Infof("Generated Name %q for UID %q from URL %s", pod.Name, pod.UID, source) + klog.V(5).Infof("Generated Name %q for UID %q from URL %s", pod.Name, pod.UID, source) if pod.Namespace == "" { pod.Namespace = metav1.NamespaceDefault } - glog.V(5).Infof("Using namespace %q for pod %q from %s", pod.Namespace, pod.Name, source) + klog.V(5).Infof("Using namespace %q for pod %q from %s", pod.Namespace, pod.Name, source) // Set the Host field to indicate this pod is scheduled on the current node. pod.Spec.NodeName = string(nodeName) @@ -132,7 +132,7 @@ func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *v } v1Pod := &v1.Pod{} if err := k8s_api_v1.Convert_core_Pod_To_v1_Pod(newPod, v1Pod, nil); err != nil { - glog.Errorf("Pod %q failed to convert to v1", newPod.Name) + klog.Errorf("Pod %q failed to convert to v1", newPod.Name) return true, nil, err } return true, v1Pod, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go index 51c2e29c5462..75d42e0e7313 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go @@ -21,11 +21,11 @@ import ( "reflect" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/checkpoint" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -97,7 +97,7 @@ func (c *PodConfig) SeenAllSources(seenSources sets.String) bool { if c.pods == nil { return false } - glog.V(5).Infof("Looking for %v, have seen %v", c.sources.List(), seenSources) + klog.V(5).Infof("Looking for %v, have seen %v", c.sources.List(), seenSources) return seenSources.HasAll(c.sources.List()...) && c.pods.seenSources(c.sources.List()...) } @@ -279,16 +279,16 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de switch update.Op { case kubetypes.ADD, kubetypes.UPDATE, kubetypes.DELETE: if update.Op == kubetypes.ADD { - glog.V(4).Infof("Adding new pods from source %s : %v", source, update.Pods) + klog.V(4).Infof("Adding new pods from source %s : %v", source, update.Pods) } else if update.Op == kubetypes.DELETE { - glog.V(4).Infof("Graceful deleting pods from source %s : %v", source, update.Pods) + klog.V(4).Infof("Graceful deleting pods from source %s : %v", source, update.Pods) } else { - glog.V(4).Infof("Updating pods from source %s : %v", source, update.Pods) + klog.V(4).Infof("Updating pods from source %s : %v", source, update.Pods) } updatePodsFunc(update.Pods, pods, pods) case kubetypes.REMOVE: - glog.V(4).Infof("Removing pods from source %s : %v", source, update.Pods) + klog.V(4).Infof("Removing pods from source %s : %v", source, update.Pods) for _, value := range update.Pods { if existing, found := pods[value.UID]; found { // this is a delete @@ -300,7 +300,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de } case kubetypes.SET: - glog.V(4).Infof("Setting pods for source %s", source) + klog.V(4).Infof("Setting pods for source %s", source) s.markSourceSet(source) // Clear the old map entries by just creating a new map oldPods := pods @@ -313,13 +313,13 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de } } case kubetypes.RESTORE: - glog.V(4).Infof("Restoring pods for source %s", source) + klog.V(4).Infof("Restoring pods for source %s", source) for _, value := range update.Pods { restorePods = append(restorePods, value) } default: - glog.Warningf("Received invalid update type: %v", update) + klog.Warningf("Received invalid update type: %v", update) } @@ -354,7 +354,7 @@ func filterInvalidPods(pods []*v1.Pod, source string, recorder record.EventRecor // This function only checks if there is any naming conflict. name := kubecontainer.GetPodFullName(pod) if names.Has(name) { - glog.Warningf("Pod[%d] (%s) from %s failed validation due to duplicate pod name %q, ignoring", i+1, format.Pod(pod), source, pod.Name) + klog.Warningf("Pod[%d] (%s) from %s failed validation due to duplicate pod name %q, ignoring", i+1, format.Pod(pod), source, pod.Name) recorder.Eventf(pod, v1.EventTypeWarning, events.FailedValidation, "Error validating pod %s from %s due to duplicate pod name %q, ignoring", format.Pod(pod), source, pod.Name) continue } else { @@ -411,7 +411,7 @@ func isAnnotationMapEqual(existingMap, candidateMap map[string]string) bool { // recordFirstSeenTime records the first seen time of this pod. func recordFirstSeenTime(pod *v1.Pod) { - glog.V(4).Infof("Receiving a new pod %q", format.Pod(pod)) + klog.V(4).Infof("Receiving a new pod %q", format.Pod(pod)) pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = kubetypes.NewTimestamp().GetString() } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go index b90306968f1f..96ab9cebac8f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go @@ -17,10 +17,12 @@ limitations under the License. package config const ( - DefaultKubeletPodsDirName = "pods" - DefaultKubeletVolumesDirName = "volumes" - DefaultKubeletVolumeDevicesDirName = "volumeDevices" - DefaultKubeletPluginsDirName = "plugins" - DefaultKubeletContainersDirName = "containers" - DefaultKubeletPluginContainersDirName = "plugin-containers" + DefaultKubeletPodsDirName = "pods" + DefaultKubeletVolumesDirName = "volumes" + DefaultKubeletVolumeDevicesDirName = "volumeDevices" + DefaultKubeletPluginsDirName = "plugins" + DefaultKubeletPluginsRegistrationDirName = "plugins_registry" + DefaultKubeletContainersDirName = "containers" + DefaultKubeletPluginContainersDirName = "plugin-containers" + DefaultKubeletPodResourcesDirName = "pod-resources" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/file.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/file.go index 683dc9a3c94e..a08a987795e2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/file.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/file.go @@ -26,7 +26,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -61,11 +61,11 @@ type sourceFile struct { } func NewSourceFile(path string, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) { - // "golang.org/x/exp/inotify" requires a path without trailing "/" + // "github.com/sigma/go-inotify" requires a path without trailing "/" path = strings.TrimRight(path, string(os.PathSeparator)) config := newSourceFile(path, nodeName, period, updates) - glog.V(1).Infof("Watching path %q", path) + klog.V(1).Infof("Watching path %q", path) config.run() } @@ -95,17 +95,17 @@ func (s *sourceFile) run() { go func() { // Read path immediately to speed up startup. if err := s.listConfig(); err != nil { - glog.Errorf("Unable to read config path %q: %v", s.path, err) + klog.Errorf("Unable to read config path %q: %v", s.path, err) } for { select { case <-listTicker.C: if err := s.listConfig(); err != nil { - glog.Errorf("Unable to read config path %q: %v", s.path, err) + klog.Errorf("Unable to read config path %q: %v", s.path, err) } case e := <-s.watchEvents: if err := s.consumeWatchEvent(e); err != nil { - glog.Errorf("Unable to process watch event: %v", err) + klog.Errorf("Unable to process watch event: %v", err) } } } @@ -173,31 +173,31 @@ func (s *sourceFile) extractFromDir(name string) ([]*v1.Pod, error) { for _, path := range dirents { statInfo, err := os.Stat(path) if err != nil { - glog.Errorf("Can't get metadata for %q: %v", path, err) + klog.Errorf("Can't get metadata for %q: %v", path, err) continue } switch { case statInfo.Mode().IsDir(): - glog.Errorf("Not recursing into manifest path %q", path) + klog.Errorf("Not recursing into manifest path %q", path) case statInfo.Mode().IsRegular(): pod, err := s.extractFromFile(path) if err != nil { if !os.IsNotExist(err) { - glog.Errorf("Can't process manifest file %q: %v", path, err) + klog.Errorf("Can't process manifest file %q: %v", path, err) } } else { pods = append(pods, pod) } default: - glog.Errorf("Manifest path %q is not a directory or file: %v", path, statInfo.Mode()) + klog.Errorf("Manifest path %q is not a directory or file: %v", path, statInfo.Mode()) } } return pods, nil } func (s *sourceFile) extractFromFile(filename string) (pod *v1.Pod, err error) { - glog.V(3).Infof("Reading config file %q", filename) + klog.V(3).Infof("Reading config file %q", filename) defer func() { if err == nil && pod != nil { objKey, keyErr := cache.MetaNamespaceKeyFunc(pod) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/file_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/file_linux.go index 4e6b1a5a2376..85ed24569000 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/file_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/file_linux.go @@ -26,8 +26,8 @@ import ( "strings" "time" - "github.com/golang/glog" - "golang.org/x/exp/inotify" + "github.com/sigma/go-inotify" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -58,7 +58,7 @@ func (s *sourceFile) startWatch() { } if err := s.doWatch(); err != nil { - glog.Errorf("Unable to read config path %q: %v", s.path, err) + klog.Errorf("Unable to read config path %q: %v", s.path, err) if _, retryable := err.(*retryableError); !retryable { backOff.Next(backOffId, time.Now()) } @@ -103,13 +103,13 @@ func (s *sourceFile) doWatch() error { func (s *sourceFile) produceWatchEvent(e *inotify.Event) error { // Ignore file start with dots if strings.HasPrefix(filepath.Base(e.Name), ".") { - glog.V(4).Infof("Ignored pod manifest: %s, because it starts with dots", e.Name) + klog.V(4).Infof("Ignored pod manifest: %s, because it starts with dots", e.Name) return nil } var eventType podEventType switch { case (e.Mask & inotify.IN_ISDIR) > 0: - glog.Errorf("Not recursing into manifest path %q", s.path) + klog.Errorf("Not recursing into manifest path %q", s.path) return nil case (e.Mask & inotify.IN_CREATE) > 0: eventType = podAdd diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/file_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/file_unsupported.go index 4bee74f544df..d46b5f361deb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/file_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/file_unsupported.go @@ -22,11 +22,11 @@ package config import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" ) func (s *sourceFile) startWatch() { - glog.Errorf("Watching source file is unsupported in this build") + klog.Errorf("Watching source file is unsupported in this build") } func (s *sourceFile) consumeWatchEvent(e *watchEvent) error { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/http.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/http.go index 0ef43e062a54..1eb8ad042a92 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/http.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/config/http.go @@ -29,8 +29,8 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" ) type sourceURL struct { @@ -54,7 +54,7 @@ func NewSourceURL(url string, header http.Header, nodeName types.NodeName, perio // read the manifest URL passed to kubelet. client: &http.Client{Timeout: 10 * time.Second}, } - glog.V(1).Infof("Watching URL %s", url) + klog.V(1).Infof("Watching URL %s", url) go wait.Until(config.run, period, wait.NeverStop) } @@ -63,16 +63,16 @@ func (s *sourceURL) run() { // Don't log this multiple times per minute. The first few entries should be // enough to get the point across. if s.failureLogs < 3 { - glog.Warningf("Failed to read pods from URL: %v", err) + klog.Warningf("Failed to read pods from URL: %v", err) } else if s.failureLogs == 3 { - glog.Warningf("Failed to read pods from URL. Dropping verbosity of this message to V(4): %v", err) + klog.Warningf("Failed to read pods from URL. Dropping verbosity of this message to V(4): %v", err) } else { - glog.V(4).Infof("Failed to read pods from URL: %v", err) + klog.V(4).Infof("Failed to read pods from URL: %v", err) } s.failureLogs++ } else { if s.failureLogs > 0 { - glog.Info("Successfully read pods from URL.") + klog.Info("Successfully read pods from URL.") s.failureLogs = 0 } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD index d7b267083338..3995b79805a8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD @@ -34,7 +34,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//third_party/forked/golang/expansion:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/container_gc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/container_gc.go index 72fa4bd722ee..790596b2289e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/container_gc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/container_gc.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" ) // Specified a policy for garbage collecting containers. @@ -82,6 +82,6 @@ func (cgc *realContainerGC) GarbageCollect() error { } func (cgc *realContainerGC) DeleteAllUnusedContainers() error { - glog.Infof("attempting to delete unused containers") + klog.Infof("attempting to delete unused containers") return cgc.runtime.GarbageCollect(cgc.policy, cgc.sourcesReadyProvider.AllReady(), true) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go index 01fe7129d38f..10d848a2d871 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go @@ -21,7 +21,7 @@ import ( "hash/fnv" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -75,13 +75,13 @@ func ShouldContainerBeRestarted(container *v1.Container, pod *v1.Pod, podStatus } // Check RestartPolicy for dead container if pod.Spec.RestartPolicy == v1.RestartPolicyNever { - glog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, format.Pod(pod)) + klog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, format.Pod(pod)) return false } if pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure { // Check the exit code. if status.ExitCode == 0 { - glog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, format.Pod(pod)) + klog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, format.Pod(pod)) return false } } @@ -311,7 +311,7 @@ func MakePortMappings(container *v1.Container) (ports []PortMapping) { // Protect against exposing the same protocol-port more than once in a container. if _, ok := names[pm.Name]; ok { - glog.Warningf("Port name conflicted, %q is defined more than once", pm.Name) + klog.Warningf("Port name conflicted, %q is defined more than once", pm.Name) continue } ports = append(ports, pm) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go index 4859342abcbc..2d9fcd33fd99 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go @@ -25,11 +25,11 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/remotecommand" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/volume" ) @@ -203,7 +203,7 @@ func BuildContainerID(typ, ID string) ContainerID { func ParseContainerID(containerID string) ContainerID { var id ContainerID if err := id.ParseString(containerID); err != nil { - glog.Error(err) + klog.Error(err) } return id } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD index d835f2570609..5915ed021245 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD @@ -67,7 +67,7 @@ go_library( "//vendor/github.com/docker/docker/api/types/strslice:go_default_library", "//vendor/github.com/docker/docker/pkg/jsonmessage:go_default_library", "//vendor/github.com/docker/go-connections/nat:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:windows": [ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD index 5ca2b2afc634..80736ce24105 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD @@ -31,11 +31,11 @@ go_library( "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/dockershim/libdocker:go_default_library", "//pkg/kubelet/qos:go_default_library", - "//pkg/util/version:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ "//pkg/kubelet/dockershim/libdocker:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go index 17baaee11ef6..b59c6a08364c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go @@ -25,13 +25,13 @@ import ( "strconv" "time" - "github.com/golang/glog" "github.com/opencontainers/runc/libcontainer/cgroups/fs" "github.com/opencontainers/runc/libcontainer/configs" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" kubecm "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/qos" - utilversion "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" ) @@ -83,29 +83,36 @@ func (m *containerManager) Start() error { func (m *containerManager) doWork() { v, err := m.client.Version() if err != nil { - glog.Errorf("Unable to get docker version: %v", err) + klog.Errorf("Unable to get docker version: %v", err) return } version, err := utilversion.ParseGeneric(v.APIVersion) if err != nil { - glog.Errorf("Unable to parse docker version %q: %v", v.APIVersion, err) + klog.Errorf("Unable to parse docker version %q: %v", v.APIVersion, err) return } // EnsureDockerInContainer does two things. // 1. Ensure processes run in the cgroups if m.cgroupsManager is not nil. // 2. Ensure processes have the OOM score applied. if err := kubecm.EnsureDockerInContainer(version, dockerOOMScoreAdj, m.cgroupsManager); err != nil { - glog.Errorf("Unable to ensure the docker processes run in the desired containers: %v", err) + klog.Errorf("Unable to ensure the docker processes run in the desired containers: %v", err) } } func createCgroupManager(name string) (*fs.Manager, error) { var memoryLimit uint64 + memoryCapacity, err := getMemoryCapacity() - if err != nil || memoryCapacity*dockerMemoryLimitThresholdPercent/100 < minDockerMemoryLimit { + if err != nil { + klog.Errorf("Failed to get the memory capacity on machine: %v", err) + } else { + memoryLimit = memoryCapacity * dockerMemoryLimitThresholdPercent / 100 + } + + if err != nil || memoryLimit < minDockerMemoryLimit { memoryLimit = minDockerMemoryLimit } - glog.V(2).Infof("Configure resource-only container %q with memory limit: %d", name, memoryLimit) + klog.V(2).Infof("Configure resource-only container %q with memory limit: %d", name, memoryLimit) allowAllDevices := true cm := &fs.Manager{ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go index 6343e9be1975..3c6b9b484972 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go @@ -27,7 +27,7 @@ import ( dockercontainer "github.com/docker/docker/api/types/container" dockerfilters "github.com/docker/docker/api/types/filters" dockerstrslice "github.com/docker/docker/api/types/strslice" - "github.com/golang/glog" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" @@ -71,7 +71,7 @@ func (ds *dockerService) ListContainers(_ context.Context, r *runtimeapi.ListCon converted, err := toRuntimeAPIContainer(&c) if err != nil { - glog.V(4).Infof("Unable to convert docker to runtime API container: %v", err) + klog.V(4).Infof("Unable to convert docker to runtime API container: %v", err) continue } @@ -191,7 +191,7 @@ func (ds *dockerService) createContainerLogSymlink(containerID string) error { } if path == "" { - glog.V(5).Infof("Container %s log path isn't specified, will not create the symlink", containerID) + klog.V(5).Infof("Container %s log path isn't specified, will not create the symlink", containerID) return nil } @@ -199,7 +199,7 @@ func (ds *dockerService) createContainerLogSymlink(containerID string) error { // Only create the symlink when container log path is specified and log file exists. // Delete possibly existing file first if err = ds.os.Remove(path); err == nil { - glog.Warningf("Deleted previously existing symlink file: %q", path) + klog.Warningf("Deleted previously existing symlink file: %q", path) } if err = ds.os.Symlink(realPath, path); err != nil { return fmt.Errorf("failed to create symbolic link %q to the container log file %q for container %q: %v", @@ -208,14 +208,14 @@ func (ds *dockerService) createContainerLogSymlink(containerID string) error { } else { supported, err := ds.IsCRISupportedLogDriver() if err != nil { - glog.Warningf("Failed to check supported logging driver by CRI: %v", err) + klog.Warningf("Failed to check supported logging driver by CRI: %v", err) return nil } if supported { - glog.Warningf("Cannot create symbolic link because container log file doesn't exist!") + klog.Warningf("Cannot create symbolic link because container log file doesn't exist!") } else { - glog.V(5).Infof("Unsupported logging driver by CRI") + klog.V(5).Infof("Unsupported logging driver by CRI") } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image.go index e4c450bc8b0c..c1089a037a3d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image.go @@ -25,7 +25,7 @@ import ( dockerfilters "github.com/docker/docker/api/types/filters" "github.com/docker/docker/pkg/jsonmessage" - "github.com/golang/glog" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" ) @@ -52,7 +52,7 @@ func (ds *dockerService) ListImages(_ context.Context, r *runtimeapi.ListImagesR for _, i := range images { apiImage, err := imageToRuntimeAPIImage(&i) if err != nil { - glog.V(5).Infof("Failed to convert docker API image %+v to runtime API image: %v", i, err) + klog.V(5).Infof("Failed to convert docker API image %+v to runtime API image: %v", i, err) continue } result = append(result, apiImage) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_windows.go index 8fd6d2c869e0..e9df2f663fa6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_windows.go @@ -22,7 +22,7 @@ import ( "context" "time" - "github.com/golang/glog" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/kubelet/winstats" @@ -32,14 +32,14 @@ import ( func (ds *dockerService) ImageFsInfo(_ context.Context, _ *runtimeapi.ImageFsInfoRequest) (*runtimeapi.ImageFsInfoResponse, error) { info, err := ds.client.Info() if err != nil { - glog.Errorf("Failed to get docker info: %v", err) + klog.Errorf("Failed to get docker info: %v", err) return nil, err } statsClient := &winstats.StatsClient{} fsinfo, err := statsClient.GetDirFsInfo(info.DockerRootDir) if err != nil { - glog.Errorf("Failed to get dir fsInfo for %q: %v", info.DockerRootDir, err) + klog.Errorf("Failed to get dir fsInfo for %q: %v", info.DockerRootDir, err) return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go index b114bf052b46..0443793d8db1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go @@ -18,6 +18,7 @@ package dockershim import ( "context" + "encoding/json" "fmt" "os" "strings" @@ -26,9 +27,8 @@ import ( dockertypes "github.com/docker/docker/api/types" dockercontainer "github.com/docker/docker/api/types/container" dockerfilters "github.com/docker/docker/api/types/filters" - "github.com/golang/glog" - utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" @@ -165,7 +165,16 @@ func (ds *dockerService) RunPodSandbox(ctx context.Context, r *runtimeapi.RunPod // on the host as well, to satisfy parts of the pod spec that aren't // recognized by the CNI standard yet. cID := kubecontainer.BuildContainerID(runtimeName, createResp.ID) - err = ds.network.SetUpPod(config.GetMetadata().Namespace, config.GetMetadata().Name, cID, config.Annotations) + networkOptions := make(map[string]string) + if dnsConfig := config.GetDnsConfig(); dnsConfig != nil { + // Build DNS options. + dnsOption, err := json.Marshal(dnsConfig) + if err != nil { + return nil, fmt.Errorf("failed to marshal dns config for pod %q: %v", config.Metadata.Name, err) + } + networkOptions["dns"] = string(dnsOption) + } + err = ds.network.SetUpPod(config.GetMetadata().Namespace, config.GetMetadata().Name, cID, config.Annotations, networkOptions) if err != nil { errList := []error{fmt.Errorf("failed to set up sandbox container %q network for pod %q: %v", createResp.ID, config.Metadata.Name, err)} @@ -216,11 +225,11 @@ func (ds *dockerService) StopPodSandbox(ctx context.Context, r *runtimeapi.StopP if checkpointErr != errors.ErrCheckpointNotFound { err := ds.checkpointManager.RemoveCheckpoint(podSandboxID) if err != nil { - glog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", podSandboxID, err) + klog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", podSandboxID, err) } } if libdocker.IsContainerNotFoundError(statusErr) { - glog.Warningf("Both sandbox container and checkpoint for id %q could not be found. "+ + klog.Warningf("Both sandbox container and checkpoint for id %q could not be found. "+ "Proceed without further sandbox information.", podSandboxID) } else { return nil, utilerrors.NewAggregate([]error{ @@ -255,7 +264,7 @@ func (ds *dockerService) StopPodSandbox(ctx context.Context, r *runtimeapi.StopP if err := ds.client.StopContainer(podSandboxID, defaultSandboxGracePeriod); err != nil { // Do not return error if the container does not exist if !libdocker.IsContainerNotFoundError(err) { - glog.Errorf("Failed to stop sandbox %q: %v", podSandboxID, err) + klog.Errorf("Failed to stop sandbox %q: %v", podSandboxID, err) errList = append(errList, err) } else { // remove the checkpoint for any sandbox that is not found in the runtime @@ -372,7 +381,7 @@ func (ds *dockerService) getIP(podSandboxID string, sandbox *dockertypes.Contain // If all else fails, warn but don't return an error, as pod status // should generally not return anything except fatal errors // FIXME: handle network errors by restarting the pod somehow? - glog.Warningf("failed to read pod IP from plugin/docker: %v", err) + klog.Warningf("failed to read pod IP from plugin/docker: %v", err) return "" } @@ -489,7 +498,7 @@ func (ds *dockerService) ListPodSandbox(_ context.Context, r *runtimeapi.ListPod if filter == nil { checkpoints, err = ds.checkpointManager.ListCheckpoints() if err != nil { - glog.Errorf("Failed to list checkpoints: %v", err) + klog.Errorf("Failed to list checkpoints: %v", err) } } @@ -506,7 +515,7 @@ func (ds *dockerService) ListPodSandbox(_ context.Context, r *runtimeapi.ListPod c := containers[i] converted, err := containerToRuntimeAPISandbox(&c) if err != nil { - glog.V(4).Infof("Unable to convert docker to runtime API sandbox %+v: %v", c, err) + klog.V(4).Infof("Unable to convert docker to runtime API sandbox %+v: %v", c, err) continue } if filterOutReadySandboxes && converted.State == runtimeapi.PodSandboxState_SANDBOX_READY { @@ -526,11 +535,11 @@ func (ds *dockerService) ListPodSandbox(_ context.Context, r *runtimeapi.ListPod checkpoint := NewPodSandboxCheckpoint("", "", &CheckpointData{}) err := ds.checkpointManager.GetCheckpoint(id, checkpoint) if err != nil { - glog.Errorf("Failed to retrieve checkpoint for sandbox %q: %v", id, err) + klog.Errorf("Failed to retrieve checkpoint for sandbox %q: %v", id, err) if err == errors.ErrCorruptCheckpoint { err = ds.checkpointManager.RemoveCheckpoint(id) if err != nil { - glog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", id, err) + klog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", id, err) } } continue @@ -678,14 +687,14 @@ func toCheckpointProtocol(protocol runtimeapi.Protocol) Protocol { case runtimeapi.Protocol_SCTP: return protocolSCTP } - glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol) + klog.Warningf("Unknown protocol %q: defaulting to TCP", protocol) return protocolTCP } // rewriteResolvFile rewrites resolv.conf file generated by docker. func rewriteResolvFile(resolvFilePath string, dns []string, dnsSearch []string, dnsOptions []string) error { if len(resolvFilePath) == 0 { - glog.Errorf("ResolvConfPath is empty.") + klog.Errorf("ResolvConfPath is empty.") return nil } @@ -710,9 +719,9 @@ func rewriteResolvFile(resolvFilePath string, dns []string, dnsSearch []string, resolvFileContentStr := strings.Join(resolvFileContent, "\n") resolvFileContentStr += "\n" - glog.V(4).Infof("Will attempt to re-write config file %s with: \n%s", resolvFilePath, resolvFileContent) + klog.V(4).Infof("Will attempt to re-write config file %s with: \n%s", resolvFilePath, resolvFileContent) if err := rewriteFile(resolvFilePath, resolvFileContentStr); err != nil { - glog.Errorf("resolv.conf could not be updated: %v", err) + klog.Errorf("resolv.conf could not be updated: %v", err) return err } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go index ae1f70f5218d..97f6543c4bb8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go @@ -27,7 +27,7 @@ import ( "github.com/blang/semver" dockertypes "github.com/docker/docker/api/types" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" @@ -233,7 +233,7 @@ func NewDockerService(config *ClientConfig, podSandboxImage string, streamingCon // lead to retries of the same failure, so just fail hard. return nil, err } - glog.Infof("Hairpin mode set to %q", pluginSettings.HairpinMode) + klog.Infof("Hairpin mode set to %q", pluginSettings.HairpinMode) // dockershim currently only supports CNI plugins. pluginSettings.PluginBinDirs = cni.SplitDirs(pluginSettings.PluginBinDirString) @@ -248,25 +248,25 @@ func NewDockerService(config *ClientConfig, podSandboxImage string, streamingCon return nil, fmt.Errorf("didn't find compatible CNI plugin with given settings %+v: %v", pluginSettings, err) } ds.network = network.NewPluginManager(plug) - glog.Infof("Docker cri networking managed by %v", plug.Name()) + klog.Infof("Docker cri networking managed by %v", plug.Name()) // NOTE: cgroup driver is only detectable in docker 1.11+ cgroupDriver := defaultCgroupDriver dockerInfo, err := ds.client.Info() - glog.Infof("Docker Info: %+v", dockerInfo) + klog.Infof("Docker Info: %+v", dockerInfo) if err != nil { - glog.Errorf("Failed to execute Info() call to the Docker client: %v", err) - glog.Warningf("Falling back to use the default driver: %q", cgroupDriver) + klog.Errorf("Failed to execute Info() call to the Docker client: %v", err) + klog.Warningf("Falling back to use the default driver: %q", cgroupDriver) } else if len(dockerInfo.CgroupDriver) == 0 { - glog.Warningf("No cgroup driver is set in Docker") - glog.Warningf("Falling back to use the default driver: %q", cgroupDriver) + klog.Warningf("No cgroup driver is set in Docker") + klog.Warningf("Falling back to use the default driver: %q", cgroupDriver) } else { cgroupDriver = dockerInfo.CgroupDriver } if len(kubeCgroupDriver) != 0 && kubeCgroupDriver != cgroupDriver { return nil, fmt.Errorf("misconfiguration: kubelet cgroup driver: %q is different from docker cgroup driver: %q", kubeCgroupDriver, cgroupDriver) } - glog.Infof("Setting cgroupDriver to %s", cgroupDriver) + klog.Infof("Setting cgroupDriver to %s", cgroupDriver) ds.cgroupDriver = cgroupDriver ds.versionCache = cache.NewObjectCache( func() (interface{}, error) { @@ -342,7 +342,7 @@ func (ds *dockerService) UpdateRuntimeConfig(_ context.Context, r *runtimeapi.Up return &runtimeapi.UpdateRuntimeConfigResponse{}, nil } - glog.Infof("docker cri received runtime config %+v", runtimeConfig) + klog.Infof("docker cri received runtime config %+v", runtimeConfig) if ds.network != nil && runtimeConfig.NetworkConfig.PodCidr != "" { event := make(map[string]interface{}) event[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = runtimeConfig.NetworkConfig.PodCidr @@ -375,7 +375,7 @@ func (ds *dockerService) GetPodPortMappings(podSandboxID string) ([]*hostport.Po } errRem := ds.checkpointManager.RemoveCheckpoint(podSandboxID) if errRem != nil { - glog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", podSandboxID, errRem) + klog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", podSandboxID, errRem) } return nil, err } @@ -398,7 +398,7 @@ func (ds *dockerService) Start() error { if ds.startLocalStreamingServer { go func() { if err := ds.streamingServer.Start(true); err != nil { - glog.Fatalf("Streaming server stopped unexpectedly: %v", err) + klog.Fatalf("Streaming server stopped unexpectedly: %v", err) } }() } @@ -450,7 +450,7 @@ func (ds *dockerService) GenerateExpectedCgroupParent(cgroupParent string) (stri cgroupParent = path.Base(cgroupParent) } } - glog.V(3).Infof("Setting cgroup parent to: %q", cgroupParent) + klog.V(3).Infof("Setting cgroup parent to: %q", cgroupParent) return cgroupParent, nil } @@ -518,7 +518,7 @@ func toAPIProtocol(protocol Protocol) v1.Protocol { case protocolSCTP: return v1.ProtocolSCTP } - glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol) + klog.Warningf("Unknown protocol %q: defaulting to TCP", protocol) return v1.ProtocolTCP } @@ -537,7 +537,7 @@ func effectiveHairpinMode(s *NetworkPluginSettings) error { // This is not a valid combination, since promiscuous-bridge only works on kubenet. Users might be using the // default values (from before the hairpin-mode flag existed) and we // should keep the old behavior. - glog.Warningf("Hairpin mode set to %q but kubenet is not enabled, falling back to %q", s.HairpinMode, kubeletconfig.HairpinVeth) + klog.Warningf("Hairpin mode set to %q but kubenet is not enabled, falling back to %q", s.HairpinMode, kubeletconfig.HairpinVeth) s.HairpinMode = kubeletconfig.HairpinVeth return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go index d65b970041ac..1c4dc813b082 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go @@ -27,7 +27,7 @@ import ( "time" dockertypes "github.com/docker/docker/api/types" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/client-go/tools/remotecommand" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" @@ -199,7 +199,7 @@ func portForward(client libdocker.Interface, podSandboxID string, port int32, st } commandString := fmt.Sprintf("%s %s", nsenterPath, strings.Join(args, " ")) - glog.V(4).Infof("executing port forwarding command: %s", commandString) + klog.V(4).Infof("executing port forwarding command: %s", commandString) command := exec.Command(nsenterPath, args...) command.Stdout = stream diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/exec.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/exec.go index aaaff8487d34..4b0d085b5a17 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/exec.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/exec.go @@ -22,7 +22,7 @@ import ( "time" dockertypes "github.com/docker/docker/api/types" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/client-go/tools/remotecommand" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -124,7 +124,7 @@ func (*NativeExecHandler) ExecInContainer(client libdocker.Interface, container count++ if count == 5 { - glog.Errorf("Exec session %s in container %s terminated but process still running!", execObj.ID, container.ID) + klog.Errorf("Exec session %s in container %s terminated but process still running!", execObj.ID, container.ID) break } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go index 05d23c75b29a..21166b82d1f9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go @@ -26,7 +26,7 @@ import ( dockercontainer "github.com/docker/docker/api/types/container" dockerfilters "github.com/docker/docker/api/types/filters" dockernat "github.com/docker/go-connections/nat" - "github.com/golang/glog" + "k8s.io/klog" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/kubernetes/pkg/credentialprovider" @@ -142,7 +142,7 @@ func generateMountBindings(mounts []*runtimeapi.Mount) []string { case runtimeapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER: attrs = append(attrs, "rslave") default: - glog.Warningf("unknown propagation mode for hostPath %q", m.HostPath) + klog.Warningf("unknown propagation mode for hostPath %q", m.HostPath) // Falls back to "private" } @@ -175,7 +175,7 @@ func makePortsAndBindings(pm []*runtimeapi.PortMapping) (dockernat.PortSet, map[ case runtimeapi.Protocol_SCTP: protocol = "/sctp" default: - glog.Warningf("Unknown protocol %q: defaulting to TCP", port.Protocol) + klog.Warningf("Unknown protocol %q: defaulting to TCP", port.Protocol) protocol = "/tcp" } @@ -283,12 +283,12 @@ func recoverFromCreationConflictIfNeeded(client libdocker.Interface, createConfi } id := matches[1] - glog.Warningf("Unable to create pod sandbox due to conflict. Attempting to remove sandbox %q", id) + klog.Warningf("Unable to create pod sandbox due to conflict. Attempting to remove sandbox %q", id) if rmErr := client.RemoveContainer(id, dockertypes.ContainerRemoveOptions{RemoveVolumes: true}); rmErr == nil { - glog.V(2).Infof("Successfully removed conflicting container %q", id) + klog.V(2).Infof("Successfully removed conflicting container %q", id) return nil, err } else { - glog.Errorf("Failed to remove the conflicting container %q: %v", id, rmErr) + klog.Errorf("Failed to remove the conflicting container %q: %v", id, rmErr) // Return if the error is not container not found error. if !libdocker.IsContainerNotFoundError(rmErr) { return nil, err @@ -297,7 +297,7 @@ func recoverFromCreationConflictIfNeeded(client libdocker.Interface, createConfi // randomize the name to avoid conflict. createConfig.Name = randomizeName(createConfig.Name) - glog.V(2).Infof("Create the container with randomized name %s", createConfig.Name) + klog.V(2).Infof("Create the container with randomized name %s", createConfig.Name) return client.CreateContainer(createConfig) } @@ -332,7 +332,7 @@ func ensureSandboxImageExists(client libdocker.Interface, image string) error { keyring := credentialprovider.NewDockerKeyring() creds, withCredentials := keyring.Lookup(repoToPull) if !withCredentials { - glog.V(3).Infof("Pulling image %q without credentials", image) + klog.V(3).Infof("Pulling image %q without credentials", image) err := client.PullImage(image, dockertypes.AuthConfig{}, dockertypes.ImagePullOptions{}) if err != nil { @@ -344,7 +344,7 @@ func ensureSandboxImageExists(client libdocker.Interface, image string) error { var pullErrs []error for _, currentCreds := range creds { - authConfig := credentialprovider.LazyProvide(currentCreds) + authConfig := dockertypes.AuthConfig(credentialprovider.LazyProvide(currentCreds)) err := client.PullImage(image, authConfig, dockertypes.ImagePullOptions{}) // If there was no error, return success if err == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_unsupported.go index 2867898f301f..d78a7eb7b539 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_unsupported.go @@ -23,7 +23,7 @@ import ( "github.com/blang/semver" dockertypes "github.com/docker/docker/api/types" - "github.com/golang/glog" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" ) @@ -32,7 +32,7 @@ func DefaultMemorySwap() int64 { } func (ds *dockerService) getSecurityOpts(seccompProfile string, separator rune) ([]string, error) { - glog.Warningf("getSecurityOpts is unsupported in this build") + klog.Warningf("getSecurityOpts is unsupported in this build") return nil, nil } @@ -41,12 +41,12 @@ func (ds *dockerService) updateCreateConfig( config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig, podSandboxID string, securityOptSep rune, apiVersion *semver.Version) error { - glog.Warningf("updateCreateConfig is unsupported in this build") + klog.Warningf("updateCreateConfig is unsupported in this build") return nil } func (ds *dockerService) determinePodIPBySandboxID(uid string) string { - glog.Warningf("determinePodIPBySandboxID is unsupported in this build") + klog.Warningf("determinePodIPBySandboxID is unsupported in this build") return "" } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go index 436701546c58..d6c8ebabb96d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go @@ -25,7 +25,7 @@ import ( dockertypes "github.com/docker/docker/api/types" dockercontainer "github.com/docker/docker/api/types/container" dockerfilters "github.com/docker/docker/api/types/filters" - "github.com/golang/glog" + "k8s.io/klog" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" @@ -37,7 +37,7 @@ func DefaultMemorySwap() int64 { func (ds *dockerService) getSecurityOpts(seccompProfile string, separator rune) ([]string, error) { if seccompProfile != "" { - glog.Warningf("seccomp annotations are not supported on windows") + klog.Warningf("seccomp annotations are not supported on windows") } return nil, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/BUILD index 25c1a0e2d381..f706d66e3e88 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/BUILD @@ -40,8 +40,8 @@ go_library( "//vendor/github.com/docker/docker/client:go_default_library", "//vendor/github.com/docker/docker/pkg/jsonmessage:go_default_library", "//vendor/github.com/docker/docker/pkg/stdcopy:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/opencontainers/go-digest:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/client.go index 73f0a4dc7d73..d43da1bd22e3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/client.go @@ -23,7 +23,7 @@ import ( dockercontainer "github.com/docker/docker/api/types/container" dockerimagetypes "github.com/docker/docker/api/types/image" dockerapi "github.com/docker/docker/client" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -72,7 +72,7 @@ type Interface interface { // DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT path per their spec func getDockerClient(dockerEndpoint string) (*dockerapi.Client, error) { if len(dockerEndpoint) > 0 { - glog.Infof("Connecting to docker on %s", dockerEndpoint) + klog.Infof("Connecting to docker on %s", dockerEndpoint) return dockerapi.NewClient(dockerEndpoint, "", nil, nil) } return dockerapi.NewEnvClient() @@ -99,8 +99,8 @@ func ConnectToDockerOrDie(dockerEndpoint string, requestTimeout, imagePullProgre } client, err := getDockerClient(dockerEndpoint) if err != nil { - glog.Fatalf("Couldn't connect to docker: %v", err) + klog.Fatalf("Couldn't connect to docker: %v", err) } - glog.Infof("Start docker client with request timeout=%v", requestTimeout) + klog.Infof("Start docker client with request timeout=%v", requestTimeout) return newKubeDockerClient(client, requestTimeout, imagePullProgressDeadline) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/helpers.go index f8a785cbd0a4..a26ca48a36e2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/helpers.go @@ -22,8 +22,8 @@ import ( dockerref "github.com/docker/distribution/reference" dockertypes "github.com/docker/docker/api/types" - "github.com/golang/glog" godigest "github.com/opencontainers/go-digest" + "k8s.io/klog" ) // ParseDockerTimestamp parses the timestamp returned by Interface from string to time.Time @@ -42,7 +42,7 @@ func matchImageTagOrSHA(inspected dockertypes.ImageInspect, image string) bool { // https://github.com/docker/distribution/blob/master/reference/reference.go#L4 named, err := dockerref.ParseNormalizedNamed(image) if err != nil { - glog.V(4).Infof("couldn't parse image reference %q: %v", image, err) + klog.V(4).Infof("couldn't parse image reference %q: %v", image, err) return false } _, isTagged := named.(dockerref.Tagged) @@ -100,7 +100,7 @@ func matchImageTagOrSHA(inspected dockertypes.ImageInspect, image string) bool { for _, repoDigest := range inspected.RepoDigests { named, err := dockerref.ParseNormalizedNamed(repoDigest) if err != nil { - glog.V(4).Infof("couldn't parse image RepoDigest reference %q: %v", repoDigest, err) + klog.V(4).Infof("couldn't parse image RepoDigest reference %q: %v", repoDigest, err) continue } if d, isDigested := named.(dockerref.Digested); isDigested { @@ -114,14 +114,14 @@ func matchImageTagOrSHA(inspected dockertypes.ImageInspect, image string) bool { // process the ID as a digest id, err := godigest.Parse(inspected.ID) if err != nil { - glog.V(4).Infof("couldn't parse image ID reference %q: %v", id, err) + klog.V(4).Infof("couldn't parse image ID reference %q: %v", id, err) return false } if digest.Digest().Algorithm().String() == id.Algorithm().String() && digest.Digest().Hex() == id.Hex() { return true } } - glog.V(4).Infof("Inspected image (%q) does not match %s", inspected.ID, image) + klog.V(4).Infof("Inspected image (%q) does not match %s", inspected.ID, image) return false } @@ -138,19 +138,19 @@ func matchImageIDOnly(inspected dockertypes.ImageInspect, image string) bool { // Otherwise, we should try actual parsing to be more correct ref, err := dockerref.Parse(image) if err != nil { - glog.V(4).Infof("couldn't parse image reference %q: %v", image, err) + klog.V(4).Infof("couldn't parse image reference %q: %v", image, err) return false } digest, isDigested := ref.(dockerref.Digested) if !isDigested { - glog.V(4).Infof("the image reference %q was not a digest reference", image) + klog.V(4).Infof("the image reference %q was not a digest reference", image) return false } id, err := godigest.Parse(inspected.ID) if err != nil { - glog.V(4).Infof("couldn't parse image ID reference %q: %v", id, err) + klog.V(4).Infof("couldn't parse image ID reference %q: %v", id, err) return false } @@ -158,7 +158,7 @@ func matchImageIDOnly(inspected dockertypes.ImageInspect, image string) bool { return true } - glog.V(4).Infof("The reference %s does not directly refer to the given image's ID (%q)", image, inspected.ID) + klog.V(4).Infof("The reference %s does not directly refer to the given image's ID (%q)", image, inspected.ID) return false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go index 13a1eab55592..0cdb9955e3d7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go @@ -28,7 +28,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" dockertypes "github.com/docker/docker/api/types" dockercontainer "github.com/docker/docker/api/types/container" @@ -88,8 +88,8 @@ func newKubeDockerClient(dockerClient *dockerapi.Client, requestTimeout, imagePu // Notice that this assumes that docker is running before kubelet is started. v, err := k.Version() if err != nil { - glog.Errorf("failed to retrieve docker version: %v", err) - glog.Warningf("Using empty version for docker client, this may sometimes cause compatibility issue.") + klog.Errorf("failed to retrieve docker version: %v", err) + klog.Warningf("Using empty version for docker client, this may sometimes cause compatibility issue.") } else { // Update client version with real api version. dockerClient.NegotiateAPIVersionPing(dockertypes.Ping{APIVersion: v.APIVersion}) @@ -318,10 +318,10 @@ type progressReporter struct { // newProgressReporter creates a new progressReporter for specific image with specified reporting interval func newProgressReporter(image string, cancel context.CancelFunc, imagePullProgressDeadline time.Duration) *progressReporter { return &progressReporter{ - progress: newProgress(), - image: image, - cancel: cancel, - stopCh: make(chan struct{}), + progress: newProgress(), + image: image, + cancel: cancel, + stopCh: make(chan struct{}), imagePullProgressDeadline: imagePullProgressDeadline, } } @@ -338,14 +338,14 @@ func (p *progressReporter) start() { progress, timestamp := p.progress.get() // If there is no progress for p.imagePullProgressDeadline, cancel the operation. if time.Since(timestamp) > p.imagePullProgressDeadline { - glog.Errorf("Cancel pulling image %q because of no progress for %v, latest progress: %q", p.image, p.imagePullProgressDeadline, progress) + klog.Errorf("Cancel pulling image %q because of no progress for %v, latest progress: %q", p.image, p.imagePullProgressDeadline, progress) p.cancel() return } - glog.V(2).Infof("Pulling image %q: %q", p.image, progress) + klog.V(2).Infof("Pulling image %q: %q", p.image, progress) case <-p.stopCh: progress, _ := p.progress.get() - glog.V(2).Infof("Stop pulling image %q: %q", p.image, progress) + klog.V(2).Infof("Stop pulling image %q: %q", p.image, progress) return } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/metrics.go index 2ce548f76a63..907647970d5f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/metrics.go @@ -30,7 +30,7 @@ const ( DockerOperationsLatencyKey = "docker_operations_latency_microseconds" // DockerOperationsErrorsKey is the key for the operation error metrics. DockerOperationsErrorsKey = "docker_operations_errors" - // DockerOperationsTimeoutKey is the key for the operation timoeut metrics. + // DockerOperationsTimeoutKey is the key for the operation timeout metrics. DockerOperationsTimeoutKey = "docker_operations_timeout" // Keep the "kubelet" subsystem for backward compatibility. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/BUILD index 13ab4c3d8530..330764a3c087 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/BUILD @@ -18,7 +18,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/BUILD index 9396ba84e034..8691c7344f2a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/BUILD @@ -16,12 +16,13 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni", deps = [ "//pkg/kubelet/apis/config:go_default_library", + "//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/dockershim/network:go_default_library", "//pkg/util/bandwidth:go_default_library", "//vendor/github.com/containernetworking/cni/libcni:go_default_library", "//vendor/github.com/containernetworking/cni/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:windows": [ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni.go index e0bbf97c1e85..77546a0974c6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni.go @@ -17,16 +17,19 @@ limitations under the License. package cni import ( + "encoding/json" "errors" "fmt" + "math" "sort" "strings" "sync" "github.com/containernetworking/cni/libcni" cnitypes "github.com/containernetworking/cni/pkg/types" - "github.com/golang/glog" + "k8s.io/klog" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim/network" "k8s.io/kubernetes/pkg/util/bandwidth" @@ -89,6 +92,18 @@ type cniIpRange struct { Subnet string `json:"subnet"` } +// cniDNSConfig maps to the windows CNI dns Capability. +// see: https://github.com/containernetworking/cni/blob/master/CONVENTIONS.md +// Note that dns capability is only used for Windows containers. +type cniDNSConfig struct { + // List of DNS servers of the cluster. + Servers []string `json:"servers,omitempty"` + // List of DNS search domains of the cluster. + Searches []string `json:"searches,omitempty"` + // List of DNS options. + Options []string `json:"options,omitempty"` +} + func SplitDirs(dirs string) []string { // Use comma rather than colon to work better with Windows too return strings.Split(dirs, ",") @@ -131,34 +146,34 @@ func getDefaultCNINetwork(confDir string, binDirs []string) (*cniNetwork, error) if strings.HasSuffix(confFile, ".conflist") { confList, err = libcni.ConfListFromFile(confFile) if err != nil { - glog.Warningf("Error loading CNI config list file %s: %v", confFile, err) + klog.Warningf("Error loading CNI config list file %s: %v", confFile, err) continue } } else { conf, err := libcni.ConfFromFile(confFile) if err != nil { - glog.Warningf("Error loading CNI config file %s: %v", confFile, err) + klog.Warningf("Error loading CNI config file %s: %v", confFile, err) continue } // Ensure the config has a "type" so we know what plugin to run. // Also catches the case where somebody put a conflist into a conf file. if conf.Network.Type == "" { - glog.Warningf("Error loading CNI config file %s: no 'type'; perhaps this is a .conflist?", confFile) + klog.Warningf("Error loading CNI config file %s: no 'type'; perhaps this is a .conflist?", confFile) continue } confList, err = libcni.ConfListFromConf(conf) if err != nil { - glog.Warningf("Error converting CNI config file %s to list: %v", confFile, err) + klog.Warningf("Error converting CNI config file %s to list: %v", confFile, err) continue } } if len(confList.Plugins) == 0 { - glog.Warningf("CNI config list %s has no networks, skipping", confFile) + klog.Warningf("CNI config list %s has no networks, skipping", confFile) continue } - glog.V(4).Infof("Using CNI configuration file %s", confFile) + klog.V(4).Infof("Using CNI configuration file %s", confFile) network := &cniNetwork{ name: confList.Name, @@ -185,7 +200,7 @@ func (plugin *cniNetworkPlugin) Init(host network.Host, hairpinMode kubeletconfi func (plugin *cniNetworkPlugin) syncNetworkConfig() { network, err := getDefaultCNINetwork(plugin.confDir, plugin.binDirs) if err != nil { - glog.Warningf("Unable to update cni config: %s", err) + klog.Warningf("Unable to update cni config: %s", err) return } plugin.setDefaultNetwork(network) @@ -232,12 +247,12 @@ func (plugin *cniNetworkPlugin) Event(name string, details map[string]interface{ podCIDR, ok := details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR].(string) if !ok { - glog.Warningf("%s event didn't contain pod CIDR", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE) + klog.Warningf("%s event didn't contain pod CIDR", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE) return } if plugin.podCidr != "" { - glog.Warningf("Ignoring subsequent pod CIDR update to %s", podCIDR) + klog.Warningf("Ignoring subsequent pod CIDR update to %s", podCIDR) return } @@ -256,7 +271,7 @@ func (plugin *cniNetworkPlugin) Status() error { return plugin.checkInitialized() } -func (plugin *cniNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID, annotations map[string]string) error { +func (plugin *cniNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID, annotations, options map[string]string) error { if err := plugin.checkInitialized(); err != nil { return err } @@ -267,18 +282,12 @@ func (plugin *cniNetworkPlugin) SetUpPod(namespace string, name string, id kubec // Windows doesn't have loNetwork. It comes only with Linux if plugin.loNetwork != nil { - if _, err = plugin.addToNetwork(plugin.loNetwork, name, namespace, id, netnsPath, annotations); err != nil { - glog.Errorf("Error while adding to cni lo network: %s", err) + if _, err = plugin.addToNetwork(plugin.loNetwork, name, namespace, id, netnsPath, annotations, options); err != nil { return err } } - _, err = plugin.addToNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath, annotations) - if err != nil { - glog.Errorf("Error while adding to cni network: %s", err) - return err - } - + _, err = plugin.addToNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath, annotations, options) return err } @@ -290,53 +299,57 @@ func (plugin *cniNetworkPlugin) TearDownPod(namespace string, name string, id ku // Lack of namespace should not be fatal on teardown netnsPath, err := plugin.host.GetNetNS(id.ID) if err != nil { - glog.Warningf("CNI failed to retrieve network namespace path: %v", err) + klog.Warningf("CNI failed to retrieve network namespace path: %v", err) } return plugin.deleteFromNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath, nil) } -func (plugin *cniNetworkPlugin) addToNetwork(network *cniNetwork, podName string, podNamespace string, podSandboxID kubecontainer.ContainerID, podNetnsPath string, annotations map[string]string) (cnitypes.Result, error) { - rt, err := plugin.buildCNIRuntimeConf(podName, podNamespace, podSandboxID, podNetnsPath, annotations) +func podDesc(namespace, name string, id kubecontainer.ContainerID) string { + return fmt.Sprintf("%s_%s/%s", namespace, name, id.ID) +} + +func (plugin *cniNetworkPlugin) addToNetwork(network *cniNetwork, podName string, podNamespace string, podSandboxID kubecontainer.ContainerID, podNetnsPath string, annotations, options map[string]string) (cnitypes.Result, error) { + rt, err := plugin.buildCNIRuntimeConf(podName, podNamespace, podSandboxID, podNetnsPath, annotations, options) if err != nil { - glog.Errorf("Error adding network when building cni runtime conf: %v", err) + klog.Errorf("Error adding network when building cni runtime conf: %v", err) return nil, err } + pdesc := podDesc(podNamespace, podName, podSandboxID) netConf, cniNet := network.NetworkConfig, network.CNIConfig - glog.V(4).Infof("About to add CNI network %v (type=%v)", netConf.Name, netConf.Plugins[0].Network.Type) + klog.V(4).Infof("Adding %s to network %s/%s netns %q", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, podNetnsPath) res, err := cniNet.AddNetworkList(netConf, rt) if err != nil { - glog.Errorf("Error adding network: %v", err) + klog.Errorf("Error adding %s to network %s/%s: %v", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, err) return nil, err } - + klog.V(4).Infof("Added %s to network %s: %v", pdesc, netConf.Name, res) return res, nil } func (plugin *cniNetworkPlugin) deleteFromNetwork(network *cniNetwork, podName string, podNamespace string, podSandboxID kubecontainer.ContainerID, podNetnsPath string, annotations map[string]string) error { - rt, err := plugin.buildCNIRuntimeConf(podName, podNamespace, podSandboxID, podNetnsPath, annotations) + rt, err := plugin.buildCNIRuntimeConf(podName, podNamespace, podSandboxID, podNetnsPath, annotations, nil) if err != nil { - glog.Errorf("Error deleting network when building cni runtime conf: %v", err) + klog.Errorf("Error deleting network when building cni runtime conf: %v", err) return err } + pdesc := podDesc(podNamespace, podName, podSandboxID) netConf, cniNet := network.NetworkConfig, network.CNIConfig - glog.V(4).Infof("About to del CNI network %v (type=%v)", netConf.Name, netConf.Plugins[0].Network.Type) + klog.V(4).Infof("Deleting %s from network %s/%s netns %q", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, podNetnsPath) err = cniNet.DelNetworkList(netConf, rt) // The pod may not get deleted successfully at the first time. // Ignore "no such file or directory" error in case the network has already been deleted in previous attempts. if err != nil && !strings.Contains(err.Error(), "no such file or directory") { - glog.Errorf("Error deleting network: %v", err) + klog.Errorf("Error deleting %s from network %s/%s: %v", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, err) return err } + klog.V(4).Infof("Deleted %s from network %s/%s", pdesc, netConf.Plugins[0].Network.Type, netConf.Name) return nil } -func (plugin *cniNetworkPlugin) buildCNIRuntimeConf(podName string, podNs string, podSandboxID kubecontainer.ContainerID, podNetnsPath string, annotations map[string]string) (*libcni.RuntimeConf, error) { - glog.V(4).Infof("Got netns path %v", podNetnsPath) - glog.V(4).Infof("Using podns path %v", podNs) - +func (plugin *cniNetworkPlugin) buildCNIRuntimeConf(podName string, podNs string, podSandboxID kubecontainer.ContainerID, podNetnsPath string, annotations, options map[string]string) (*libcni.RuntimeConf, error) { rt := &libcni.RuntimeConf{ ContainerID: podSandboxID.ID, NetNS: podNetnsPath, @@ -379,11 +392,11 @@ func (plugin *cniNetworkPlugin) buildCNIRuntimeConf(podName string, podNs string bandwidthParam := cniBandwidthEntry{} if ingress != nil { bandwidthParam.IngressRate = int(ingress.Value() / 1000) - bandwidthParam.IngressBurst = 0 // default to no limit + bandwidthParam.IngressBurst = math.MaxInt32 // no limit } if egress != nil { bandwidthParam.EgressRate = int(egress.Value() / 1000) - bandwidthParam.EgressBurst = 0 // default to no limit + bandwidthParam.EgressBurst = math.MaxInt32 // no limit } rt.CapabilityArgs["bandwidth"] = bandwidthParam } @@ -391,5 +404,17 @@ func (plugin *cniNetworkPlugin) buildCNIRuntimeConf(podName string, podNs string // Set the PodCIDR rt.CapabilityArgs["ipRanges"] = [][]cniIpRange{{{Subnet: plugin.podCidr}}} + // Set dns capability args. + if dnsOptions, ok := options["dns"]; ok { + dnsConfig := runtimeapi.DNSConfig{} + err := json.Unmarshal([]byte(dnsOptions), &dnsConfig) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal dns config %q: %v", dnsOptions, err) + } + if dnsParam := buildDNSCapabilities(&dnsConfig); dnsParam != nil { + rt.CapabilityArgs["dns"] = *dnsParam + } + } + return rt, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_others.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_others.go index 56f75ca3a960..1712f8abe3c4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_others.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_others.go @@ -22,6 +22,7 @@ import ( "fmt" "github.com/containernetworking/cni/libcni" + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim/network" ) @@ -75,3 +76,8 @@ func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name strin return &network.PodNetworkStatus{IP: ip}, nil } + +// buildDNSCapabilities builds cniDNSConfig from runtimeapi.DNSConfig. +func buildDNSCapabilities(dnsConfig *runtimeapi.DNSConfig) *cniDNSConfig { + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_windows.go index 0c9044455018..76b78e143cba 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/cni_windows.go @@ -22,7 +22,8 @@ import ( "fmt" cniTypes020 "github.com/containernetworking/cni/pkg/types/020" - "github.com/golang/glog" + "k8s.io/klog" + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim/network" ) @@ -42,11 +43,11 @@ func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name strin return nil, fmt.Errorf("CNI failed to retrieve network namespace path: %v", err) } - result, err := plugin.addToNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath, nil) + result, err := plugin.addToNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath, nil, nil) - glog.V(5).Infof("GetPodNetworkStatus result %+v", result) + klog.V(5).Infof("GetPodNetworkStatus result %+v", result) if err != nil { - glog.Errorf("error while adding to cni network: %s", err) + klog.Errorf("error while adding to cni network: %s", err) return nil, err } @@ -54,8 +55,21 @@ func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name strin var result020 *cniTypes020.Result result020, err = cniTypes020.GetResult(result) if err != nil { - glog.Errorf("error while cni parsing result: %s", err) + klog.Errorf("error while cni parsing result: %s", err) return nil, err } return &network.PodNetworkStatus{IP: result020.IP4.IP.IP}, nil } + +// buildDNSCapabilities builds cniDNSConfig from runtimeapi.DNSConfig. +func buildDNSCapabilities(dnsConfig *runtimeapi.DNSConfig) *cniDNSConfig { + if dnsConfig != nil { + return &cniDNSConfig{ + Servers: dnsConfig.Servers, + Searches: dnsConfig.Searches, + Options: dnsConfig.Options, + } + } + + return nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/BUILD index d1f438e0ff8a..cceadb121aed 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport.go index e04107fa3899..4f9f7751b3f0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport.go @@ -21,7 +21,7 @@ import ( "net" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" utiliptables "k8s.io/kubernetes/pkg/util/iptables" @@ -97,7 +97,7 @@ func openLocalPort(hp *hostport) (closeable, error) { default: return nil, fmt.Errorf("unknown protocol %q", hp.protocol) } - glog.V(3).Infof("Opened local port %s", hp.String()) + klog.V(3).Infof("Opened local port %s", hp.String()) return socket, nil } @@ -111,7 +111,7 @@ func portMappingToHostport(portMapping *PortMapping) hostport { // ensureKubeHostportChains ensures the KUBE-HOSTPORTS chain is setup correctly func ensureKubeHostportChains(iptables utiliptables.Interface, natInterfaceName string) error { - glog.V(4).Info("Ensuring kubelet hostport chains") + klog.V(4).Info("Ensuring kubelet hostport chains") // Ensure kubeHostportChain if _, err := iptables.EnsureChain(utiliptables.TableNAT, kubeHostportsChain); err != nil { return fmt.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, kubeHostportsChain, err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_manager.go index 70bfd16dab7b..ce5108fd318a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_manager.go @@ -25,9 +25,9 @@ import ( "strings" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog" iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables" "k8s.io/kubernetes/pkg/util/conntrack" utiliptables "k8s.io/kubernetes/pkg/util/iptables" @@ -65,7 +65,7 @@ func NewHostportManager(iptables utiliptables.Interface) HostPortManager { } h.conntrackFound = conntrack.Exists(h.execer) if !h.conntrackFound { - glog.Warningf("The binary conntrack is not installed, this can cause failures in network connection cleanup.") + klog.Warningf("The binary conntrack is not installed, this can cause failures in network connection cleanup.") } return h } @@ -173,11 +173,11 @@ func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInt // create a new conntrack entry without any DNAT. That will result in blackhole of the traffic even after correct // iptables rules have been added back. if hm.execer != nil && hm.conntrackFound { - glog.Infof("Starting to delete udp conntrack entries: %v, isIPv6 - %v", conntrackPortsToRemove, isIpv6) + klog.Infof("Starting to delete udp conntrack entries: %v, isIPv6 - %v", conntrackPortsToRemove, isIpv6) for _, port := range conntrackPortsToRemove { err = conntrack.ClearEntriesForPort(hm.execer, port, isIpv6, v1.ProtocolUDP) if err != nil { - glog.Errorf("Failed to clear udp conntrack for port %d, error: %v", port, err) + klog.Errorf("Failed to clear udp conntrack for port %d, error: %v", port, err) } } } @@ -246,7 +246,7 @@ func (hm *hostportManager) Remove(id string, podPortMapping *PodPortMapping) (er // syncIPTables executes iptables-restore with given lines func (hm *hostportManager) syncIPTables(lines []byte) error { - glog.V(3).Infof("Restoring iptables rules: %s", lines) + klog.V(3).Infof("Restoring iptables rules: %s", lines) err := hm.iptables.RestoreAll(lines, utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { return fmt.Errorf("Failed to execute iptables-restore: %v", err) @@ -283,7 +283,7 @@ func (hm *hostportManager) openHostports(podPortMapping *PodPortMapping) (map[ho if retErr != nil { for hp, socket := range ports { if err := socket.Close(); err != nil { - glog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, getPodFullName(podPortMapping), err) + klog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, getPodFullName(podPortMapping), err) } } return nil, retErr @@ -297,7 +297,7 @@ func (hm *hostportManager) closeHostports(hostportMappings []*PortMapping) error for _, pm := range hostportMappings { hp := portMappingToHostport(pm) if socket, ok := hm.hostPortMap[hp]; ok { - glog.V(2).Infof("Closing host port %s", hp.String()) + klog.V(2).Infof("Closing host port %s", hp.String()) if err := socket.Close(); err != nil { errList = append(errList, fmt.Errorf("failed to close host port %s: %v", hp.String(), err)) continue diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go index 1f9df7e9b988..5274f890dff1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go @@ -25,7 +25,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables" @@ -97,7 +97,7 @@ func (h *hostportSyncer) openHostports(podHostportMapping *PodPortMapping) error if retErr != nil { for hp, socket := range ports { if err := socket.Close(); err != nil { - glog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, getPodFullName(podHostportMapping), err) + klog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, getPodFullName(podHostportMapping), err) } } return retErr @@ -188,7 +188,7 @@ func (h *hostportSyncer) OpenPodHostportsAndSync(newPortMapping *PodPortMapping, func (h *hostportSyncer) SyncHostports(natInterfaceName string, activePodPortMappings []*PodPortMapping) error { start := time.Now() defer func() { - glog.V(4).Infof("syncHostportsRules took %v", time.Since(start)) + klog.V(4).Infof("syncHostportsRules took %v", time.Since(start)) }() hostportPodMap, err := gatherAllHostports(activePodPortMappings) @@ -205,7 +205,7 @@ func (h *hostportSyncer) SyncHostports(natInterfaceName string, activePodPortMap iptablesData := bytes.NewBuffer(nil) err = h.iptables.SaveInto(utiliptables.TableNAT, iptablesData) if err != nil { // if we failed to get any rules - glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) + klog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) } else { // otherwise parse the output existingNATChains = utiliptables.GetChainLines(utiliptables.TableNAT, iptablesData.Bytes()) } @@ -283,7 +283,7 @@ func (h *hostportSyncer) SyncHostports(natInterfaceName string, activePodPortMap writeLine(natRules, "COMMIT") natLines := append(natChains.Bytes(), natRules.Bytes()...) - glog.V(3).Infof("Restoring iptables rules: %s", natLines) + klog.V(3).Infof("Restoring iptables rules: %s", natLines) err = h.iptables.RestoreAll(natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { return fmt.Errorf("Failed to execute iptables-restore: %v", err) @@ -309,7 +309,7 @@ func (h *hostportSyncer) cleanupHostportMap(containerPortMap map[*PortMapping]ta for hp, socket := range h.hostPortMap { if _, ok := currentHostports[hp]; !ok { socket.Close() - glog.V(3).Infof("Closed local port %s", hp.String()) + klog.V(3).Infof("Closed local port %s", hp.String()) delete(h.hostPortMap, hp) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/BUILD index 92613205c5bb..0b4c606aebe7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/BUILD @@ -51,9 +51,9 @@ go_library( "//vendor/github.com/containernetworking/cni/libcni:go_default_library", "//vendor/github.com/containernetworking/cni/pkg/types:go_default_library", "//vendor/github.com/containernetworking/cni/pkg/types/020:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/vishvananda/netlink:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go index 0aec8c38cf76..c3909b1c7f41 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go @@ -29,12 +29,12 @@ import ( "github.com/containernetworking/cni/libcni" cnitypes "github.com/containernetworking/cni/pkg/types" cnitypes020 "github.com/containernetworking/cni/pkg/types/020" - "github.com/golang/glog" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilnet "k8s.io/apimachinery/pkg/util/net" utilsets "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim/network" @@ -124,10 +124,10 @@ func (plugin *kubenetNetworkPlugin) Init(host network.Host, hairpinMode kubeletc if mtu == network.UseDefaultMTU { if link, err := findMinMTU(); err == nil { plugin.mtu = link.MTU - glog.V(5).Infof("Using interface %s MTU %d as bridge MTU", link.Name, link.MTU) + klog.V(5).Infof("Using interface %s MTU %d as bridge MTU", link.Name, link.MTU) } else { plugin.mtu = fallbackMTU - glog.Warningf("Failed to find default bridge MTU, using %d: %v", fallbackMTU, err) + klog.Warningf("Failed to find default bridge MTU, using %d: %v", fallbackMTU, err) } } else { plugin.mtu = mtu @@ -142,7 +142,7 @@ func (plugin *kubenetNetworkPlugin) Init(host network.Host, hairpinMode kubeletc plugin.execer.Command("modprobe", "br-netfilter").CombinedOutput() err := plugin.sysctl.SetSysctl(sysctlBridgeCallIPTables, 1) if err != nil { - glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIPTables, err) + klog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIPTables, err) } plugin.loConfig, err = libcni.ConfFromBytes([]byte(`{ @@ -234,16 +234,16 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf podCIDR, ok := details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR].(string) if !ok { - glog.Warningf("%s event didn't contain pod CIDR", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE) + klog.Warningf("%s event didn't contain pod CIDR", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE) return } if plugin.netConfig != nil { - glog.Warningf("Ignoring subsequent pod CIDR update to %s", podCIDR) + klog.Warningf("Ignoring subsequent pod CIDR update to %s", podCIDR) return } - glog.V(5).Infof("PodCIDR is set to %q", podCIDR) + klog.V(5).Infof("PodCIDR is set to %q", podCIDR) _, cidr, err := net.ParseCIDR(podCIDR) if err == nil { setHairpin := plugin.hairpinMode == kubeletconfig.HairpinVeth @@ -251,10 +251,10 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf cidr.IP[len(cidr.IP)-1] += 1 json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, podCIDR, cidr.IP.String()) - glog.V(2).Infof("CNI network config set to %v", json) + klog.V(2).Infof("CNI network config set to %v", json) plugin.netConfig, err = libcni.ConfFromBytes([]byte(json)) if err == nil { - glog.V(5).Infof("CNI network config:\n%s", json) + klog.V(5).Infof("CNI network config:\n%s", json) // Ensure cbr0 has no conflicting addresses; CNI's 'bridge' // plugin will bail out if the bridge has an unexpected one @@ -265,7 +265,7 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf } if err != nil { - glog.Warningf("Failed to generate CNI network config: %v", err) + klog.Warningf("Failed to generate CNI network config: %v", err) } } @@ -282,7 +282,7 @@ func (plugin *kubenetNetworkPlugin) clearBridgeAddressesExcept(keep *net.IPNet) for _, addr := range addrs { if !utilnet.IPNetEqual(addr.IPNet, keep) { - glog.V(2).Infof("Removing old address %s from %s", addr.IPNet.String(), BridgeName) + klog.V(2).Infof("Removing old address %s from %s", addr.IPNet.String(), BridgeName) netlink.AddrDel(bridge, &addr) } } @@ -300,7 +300,7 @@ func (plugin *kubenetNetworkPlugin) Capabilities() utilsets.Int { func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kubecontainer.ContainerID, annotations map[string]string) error { // Disable DAD so we skip the kernel delay on bringing up new interfaces. if err := plugin.disableContainerDAD(id); err != nil { - glog.V(3).Infof("Failed to disable DAD in container: %v", err) + klog.V(3).Infof("Failed to disable DAD in container: %v", err) } // Bring up container loopback interface @@ -379,13 +379,13 @@ func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kube return nil } -func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID, annotations map[string]string) error { +func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID, annotations, options map[string]string) error { plugin.mu.Lock() defer plugin.mu.Unlock() start := time.Now() defer func() { - glog.V(4).Infof("SetUpPod took %v for %s/%s", time.Since(start), namespace, name) + klog.V(4).Infof("SetUpPod took %v for %s/%s", time.Since(start), namespace, name) }() if err := plugin.Status(); err != nil { @@ -397,14 +397,14 @@ func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id k podIP, _ := plugin.podIPs[id] if err := plugin.teardown(namespace, name, id, podIP); err != nil { // Not a hard error or warning - glog.V(4).Infof("Failed to clean up %s/%s after SetUpPod failure: %v", namespace, name, err) + klog.V(4).Infof("Failed to clean up %s/%s after SetUpPod failure: %v", namespace, name, err) } return err } // Need to SNAT outbound traffic from cluster if err := plugin.ensureMasqRule(); err != nil { - glog.Errorf("Failed to ensure MASQ rule: %v", err) + klog.Errorf("Failed to ensure MASQ rule: %v", err) } return nil @@ -416,11 +416,11 @@ func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id k errList := []error{} if podIP != "" { - glog.V(5).Infof("Removing pod IP %s from shaper", podIP) + klog.V(5).Infof("Removing pod IP %s from shaper", podIP) // shaper wants /32 if err := plugin.shaper().Reset(fmt.Sprintf("%s/32", podIP)); err != nil { // Possible bandwidth shaping wasn't enabled for this pod anyways - glog.V(4).Infof("Failed to remove pod IP %s from shaper: %v", podIP, err) + klog.V(4).Infof("Failed to remove pod IP %s from shaper: %v", podIP, err) } delete(plugin.podIPs, id) @@ -429,7 +429,7 @@ func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id k if err := plugin.delContainerFromNetwork(plugin.netConfig, network.DefaultInterfaceName, namespace, name, id); err != nil { // This is to prevent returning error when TearDownPod is called twice on the same pod. This helps to reduce event pollution. if podIP != "" { - glog.Warningf("Failed to delete container from kubenet: %v", err) + klog.Warningf("Failed to delete container from kubenet: %v", err) } else { errList = append(errList, err) } @@ -457,7 +457,7 @@ func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, i start := time.Now() defer func() { - glog.V(4).Infof("TearDownPod took %v for %s/%s", time.Since(start), namespace, name) + klog.V(4).Infof("TearDownPod took %v for %s/%s", time.Since(start), namespace, name) }() if plugin.netConfig == nil { @@ -472,7 +472,7 @@ func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, i // Need to SNAT outbound traffic from cluster if err := plugin.ensureMasqRule(); err != nil { - glog.Errorf("Failed to ensure MASQ rule: %v", err) + klog.Errorf("Failed to ensure MASQ rule: %v", err) } return nil @@ -550,7 +550,7 @@ func (plugin *kubenetNetworkPlugin) checkRequiredCNIPluginsInOneDir(dir string) func (plugin *kubenetNetworkPlugin) buildCNIRuntimeConf(ifName string, id kubecontainer.ContainerID, needNetNs bool) (*libcni.RuntimeConf, error) { netnsPath, err := plugin.host.GetNetNS(id.ID) if needNetNs && err != nil { - glog.Errorf("Kubenet failed to retrieve network namespace path: %v", err) + klog.Errorf("Kubenet failed to retrieve network namespace path: %v", err) } return &libcni.RuntimeConf{ @@ -566,7 +566,7 @@ func (plugin *kubenetNetworkPlugin) addContainerToNetwork(config *libcni.Network return nil, fmt.Errorf("Error building CNI config: %v", err) } - glog.V(3).Infof("Adding %s/%s to '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) + klog.V(3).Infof("Adding %s/%s to '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) // The network plugin can take up to 3 seconds to execute, // so yield the lock while it runs. plugin.mu.Unlock() @@ -584,7 +584,7 @@ func (plugin *kubenetNetworkPlugin) delContainerFromNetwork(config *libcni.Netwo return fmt.Errorf("Error building CNI config: %v", err) } - glog.V(3).Infof("Removing %s/%s from '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) + klog.V(3).Infof("Removing %s/%s from '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) err = plugin.cniConfig.DelNetwork(config, rt) // The pod may not get deleted successfully at the first time. // Ignore "no such file or directory" error in case the network has already been deleted in previous attempts. @@ -609,40 +609,40 @@ func (plugin *kubenetNetworkPlugin) shaper() bandwidth.BandwidthShaper { func (plugin *kubenetNetworkPlugin) syncEbtablesDedupRules(macAddr net.HardwareAddr) { if plugin.ebtables == nil { plugin.ebtables = utilebtables.New(plugin.execer) - glog.V(3).Infof("Flushing dedup chain") + klog.V(3).Infof("Flushing dedup chain") if err := plugin.ebtables.FlushChain(utilebtables.TableFilter, dedupChain); err != nil { - glog.Errorf("Failed to flush dedup chain: %v", err) + klog.Errorf("Failed to flush dedup chain: %v", err) } } _, err := plugin.ebtables.GetVersion() if err != nil { - glog.Warningf("Failed to get ebtables version. Skip syncing ebtables dedup rules: %v", err) + klog.Warningf("Failed to get ebtables version. Skip syncing ebtables dedup rules: %v", err) return } - glog.V(3).Infof("Filtering packets with ebtables on mac address: %v, gateway: %v, pod CIDR: %v", macAddr.String(), plugin.gateway.String(), plugin.podCidr) + klog.V(3).Infof("Filtering packets with ebtables on mac address: %v, gateway: %v, pod CIDR: %v", macAddr.String(), plugin.gateway.String(), plugin.podCidr) _, err = plugin.ebtables.EnsureChain(utilebtables.TableFilter, dedupChain) if err != nil { - glog.Errorf("Failed to ensure %v chain %v", utilebtables.TableFilter, dedupChain) + klog.Errorf("Failed to ensure %v chain %v", utilebtables.TableFilter, dedupChain) return } _, err = plugin.ebtables.EnsureRule(utilebtables.Append, utilebtables.TableFilter, utilebtables.ChainOutput, "-j", string(dedupChain)) if err != nil { - glog.Errorf("Failed to ensure %v chain %v jump to %v chain: %v", utilebtables.TableFilter, utilebtables.ChainOutput, dedupChain, err) + klog.Errorf("Failed to ensure %v chain %v jump to %v chain: %v", utilebtables.TableFilter, utilebtables.ChainOutput, dedupChain, err) return } commonArgs := []string{"-p", "IPv4", "-s", macAddr.String(), "-o", "veth+"} _, err = plugin.ebtables.EnsureRule(utilebtables.Prepend, utilebtables.TableFilter, dedupChain, append(commonArgs, "--ip-src", plugin.gateway.String(), "-j", "ACCEPT")...) if err != nil { - glog.Errorf("Failed to ensure packets from cbr0 gateway to be accepted") + klog.Errorf("Failed to ensure packets from cbr0 gateway to be accepted") return } _, err = plugin.ebtables.EnsureRule(utilebtables.Append, utilebtables.TableFilter, dedupChain, append(commonArgs, "--ip-src", plugin.podCidr, "-j", "DROP")...) if err != nil { - glog.Errorf("Failed to ensure packets from podCidr but has mac address of cbr0 to get dropped.") + klog.Errorf("Failed to ensure packets from podCidr but has mac address of cbr0 to get dropped.") return } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_unsupported.go index ebb56e40ec3a..d2a59126b3e8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_unsupported.go @@ -42,7 +42,7 @@ func (plugin *kubenetNetworkPlugin) Name() string { return "kubenet" } -func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID, annotations map[string]string) error { +func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID, annotations, options map[string]string) error { return fmt.Errorf("Kubenet is not supported in this build") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/plugins.go index bcef49f77c58..c67c1a355b6f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/plugins.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/plugins.go @@ -23,11 +23,11 @@ import ( "sync" "time" - "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilsets "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/klog" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport" @@ -63,7 +63,7 @@ type NetworkPlugin interface { // SetUpPod is the method called after the infra container of // the pod has been created but before the other containers of the // pod are launched. - SetUpPod(namespace string, name string, podSandboxID kubecontainer.ContainerID, annotations map[string]string) error + SetUpPod(namespace string, name string, podSandboxID kubecontainer.ContainerID, annotations, options map[string]string) error // TearDownPod is the method called before a pod's infra container will be deleted TearDownPod(namespace string, name string, podSandboxID kubecontainer.ContainerID) error @@ -156,7 +156,7 @@ func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host H if err != nil { allErrs = append(allErrs, fmt.Errorf("Network plugin %q failed init: %v", networkPluginName, err)) } else { - glog.V(1).Infof("Loaded network plugin %q", networkPluginName) + klog.V(1).Infof("Loaded network plugin %q", networkPluginName) } } else { allErrs = append(allErrs, fmt.Errorf("Network plugin %q not found.", networkPluginName)) @@ -183,12 +183,12 @@ func (plugin *NoopNetworkPlugin) Init(host Host, hairpinMode kubeletconfig.Hairp // it was built-in. utilexec.New().Command("modprobe", "br-netfilter").CombinedOutput() if err := plugin.Sysctl.SetSysctl(sysctlBridgeCallIPTables, 1); err != nil { - glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIPTables, err) + klog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIPTables, err) } if val, err := plugin.Sysctl.GetSysctl(sysctlBridgeCallIP6Tables); err == nil { if val != 1 { if err = plugin.Sysctl.SetSysctl(sysctlBridgeCallIP6Tables, 1); err != nil { - glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIP6Tables, err) + klog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIP6Tables, err) } } } @@ -207,7 +207,7 @@ func (plugin *NoopNetworkPlugin) Capabilities() utilsets.Int { return utilsets.NewInt() } -func (plugin *NoopNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID, annotations map[string]string) error { +func (plugin *NoopNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID, annotations, options map[string]string) error { return nil } @@ -334,12 +334,12 @@ func (pm *PluginManager) podUnlock(fullPodName string) { lock, ok := pm.pods[fullPodName] if !ok { - glog.Warningf("Unbalanced pod lock unref for %s", fullPodName) + klog.Warningf("Unbalanced pod lock unref for %s", fullPodName) return } else if lock.refcount == 0 { // This should never ever happen, but handle it anyway delete(pm.pods, fullPodName) - glog.Warningf("Pod lock for %s still in map with zero refcount", fullPodName) + klog.Warningf("Pod lock for %s still in map with zero refcount", fullPodName) return } lock.refcount-- @@ -368,14 +368,14 @@ func (pm *PluginManager) GetPodNetworkStatus(podNamespace, podName string, id ku return netStatus, nil } -func (pm *PluginManager) SetUpPod(podNamespace, podName string, id kubecontainer.ContainerID, annotations map[string]string) error { +func (pm *PluginManager) SetUpPod(podNamespace, podName string, id kubecontainer.ContainerID, annotations, options map[string]string) error { defer recordOperation("set_up_pod", time.Now()) fullPodName := kubecontainer.BuildPodFullName(podName, podNamespace) pm.podLock(fullPodName).Lock() defer pm.podUnlock(fullPodName) - glog.V(3).Infof("Calling network plugin %s to set up pod %q", pm.plugin.Name(), fullPodName) - if err := pm.plugin.SetUpPod(podNamespace, podName, id, annotations); err != nil { + klog.V(3).Infof("Calling network plugin %s to set up pod %q", pm.plugin.Name(), fullPodName) + if err := pm.plugin.SetUpPod(podNamespace, podName, id, annotations, options); err != nil { return fmt.Errorf("NetworkPlugin %s failed to set up pod %q network: %v", pm.plugin.Name(), fullPodName, err) } @@ -388,7 +388,7 @@ func (pm *PluginManager) TearDownPod(podNamespace, podName string, id kubecontai pm.podLock(fullPodName).Lock() defer pm.podUnlock(fullPodName) - glog.V(3).Infof("Calling network plugin %s to tear down pod %q", pm.plugin.Name(), fullPodName) + klog.V(3).Infof("Calling network plugin %s to tear down pod %q", pm.plugin.Name(), fullPodName) if err := pm.plugin.TearDownPod(podNamespace, podName, id); err != nil { return fmt.Errorf("NetworkPlugin %s failed to teardown pod %q network: %v", pm.plugin.Name(), fullPodName, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/BUILD index a40ce09eaaea..06740b1f1603 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/BUILD @@ -13,8 +13,8 @@ go_library( "//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library", "//pkg/kubelet/dockershim:go_default_library", "//pkg/kubelet/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_server.go index 546c3b6c4abb..734f61cca04b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_server.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_server.go @@ -19,8 +19,8 @@ package remote import ( "fmt" - "github.com/golang/glog" "google.golang.org/grpc" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/kubelet/dockershim" "k8s.io/kubernetes/pkg/kubelet/util" @@ -52,11 +52,11 @@ func NewDockerServer(endpoint string, s dockershim.CRIService) *DockerServer { func (s *DockerServer) Start() error { // Start the internal service. if err := s.service.Start(); err != nil { - glog.Errorf("Unable to start docker service") + klog.Errorf("Unable to start docker service") return err } - glog.V(2).Infof("Start dockershim grpc server") + klog.V(2).Infof("Start dockershim grpc server") l, err := util.CreateListener(s.endpoint) if err != nil { return fmt.Errorf("failed to listen on %q: %v", s.endpoint, err) @@ -70,7 +70,7 @@ func (s *DockerServer) Start() error { runtimeapi.RegisterImageServiceServer(s.server, s.service) go func() { if err := s.server.Serve(l); err != nil { - glog.Fatalf("Failed to serve connections: %v", err) + klog.Fatalf("Failed to serve connections: %v", err) } }() return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go index e2724357136f..17969a1172fd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go @@ -137,8 +137,10 @@ func modifyHostConfig(sc *runtimeapi.LinuxContainerSecurityContext, hostConfig * hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, "no-new-privileges") } - hostConfig.MaskedPaths = sc.MaskedPaths - hostConfig.ReadonlyPaths = sc.ReadonlyPaths + if !hostConfig.Privileged { + hostConfig.MaskedPaths = sc.MaskedPaths + hostConfig.ReadonlyPaths = sc.ReadonlyPaths + } return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/errors.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/errors.go new file mode 100644 index 000000000000..eae36e5ee595 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/errors.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +const ( + NetworkNotReadyErrorMsg = "network is not ready" +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/BUILD index 164b3ed034dd..65968751a8d2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/BUILD @@ -11,6 +11,7 @@ go_test( srcs = [ "eviction_manager_test.go", "helpers_test.go", + "main_test.go", "memory_threshold_notifier_test.go", "mock_threshold_notifier_test.go", ], @@ -28,6 +29,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//vendor/github.com/stretchr/testify/mock:go_default_library", ], @@ -59,7 +61,7 @@ go_library( "//pkg/kubelet/server/stats:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/kubelet/util/format:go_default_library", - "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -67,7 +69,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go index 2b6fe919b261..7e1ede9eeb75 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -41,7 +41,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/server/stats" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" - "k8s.io/kubernetes/pkg/scheduler/algorithm" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" ) const ( @@ -106,14 +106,14 @@ func NewManager( clock clock.Clock, ) (Manager, lifecycle.PodAdmitHandler) { manager := &managerImpl{ - clock: clock, - killPodFunc: killPodFunc, - imageGC: imageGC, - containerGC: containerGC, - config: config, - recorder: recorder, - summaryProvider: summaryProvider, - nodeRef: nodeRef, + clock: clock, + killPodFunc: killPodFunc, + imageGC: imageGC, + containerGC: containerGC, + config: config, + recorder: recorder, + summaryProvider: summaryProvider, + nodeRef: nodeRef, nodeConditionsLastObservedAt: nodeConditionsObservedAt{}, thresholdsFirstObservedAt: thresholdsObservedAt{}, dedicatedImageFs: nil, @@ -145,7 +145,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd // admit it if tolerates memory pressure taint, fail for other tolerations, e.g. OutOfDisk. if utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) && v1helper.TolerationsTolerateTaint(attrs.Pod.Spec.Tolerations, &v1.Taint{ - Key: algorithm.TaintNodeMemoryPressure, + Key: schedulerapi.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule, }) { return lifecycle.PodAdmitResult{Admit: true} @@ -153,7 +153,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd } // reject pods when under memory pressure (if pod is best effort), or if under disk pressure. - glog.Warningf("Failed to admit pod %s - node has conditions: %v", format.Pod(attrs.Pod), m.nodeConditions) + klog.Warningf("Failed to admit pod %s - node has conditions: %v", format.Pod(attrs.Pod), m.nodeConditions) return lifecycle.PodAdmitResult{ Admit: false, Reason: Reason, @@ -164,7 +164,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd // Start starts the control loop to observe and response to low compute resources. func (m *managerImpl) Start(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc, podCleanedUpFunc PodCleanedUpFunc, monitoringInterval time.Duration) { thresholdHandler := func(message string) { - glog.Infof(message) + klog.Infof(message) m.synchronize(diskInfoProvider, podFunc) } if m.config.KernelMemcgNotification { @@ -172,7 +172,7 @@ func (m *managerImpl) Start(diskInfoProvider DiskInfoProvider, podFunc ActivePod if threshold.Signal == evictionapi.SignalMemoryAvailable || threshold.Signal == evictionapi.SignalAllocatableMemoryAvailable { notifier, err := NewMemoryThresholdNotifier(threshold, m.config.PodCgroupRoot, &CgroupNotifierFactory{}, thresholdHandler) if err != nil { - glog.Warningf("eviction manager: failed to create memory threshold notifier: %v", err) + klog.Warningf("eviction manager: failed to create memory threshold notifier: %v", err) } else { go notifier.Start() m.thresholdNotifiers = append(m.thresholdNotifiers, notifier) @@ -184,7 +184,7 @@ func (m *managerImpl) Start(diskInfoProvider DiskInfoProvider, podFunc ActivePod go func() { for { if evictedPods := m.synchronize(diskInfoProvider, podFunc); evictedPods != nil { - glog.Infof("eviction manager: pods %s evicted, waiting for pod to be cleaned up", format.Pods(evictedPods)) + klog.Infof("eviction manager: pods %s evicted, waiting for pod to be cleaned up", format.Pods(evictedPods)) m.waitForPodsCleanup(podCleanedUpFunc, evictedPods) } else { time.Sleep(monitoringInterval) @@ -223,7 +223,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act return nil } - glog.V(3).Infof("eviction manager: synchronize housekeeping") + klog.V(3).Infof("eviction manager: synchronize housekeeping") // build the ranking functions (if not yet known) // TODO: have a function in cadvisor that lets us know if global housekeeping has completed if m.dedicatedImageFs == nil { @@ -240,7 +240,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act updateStats := true summary, err := m.summaryProvider.Get(updateStats) if err != nil { - glog.Errorf("eviction manager: failed to get get summary stats: %v", err) + klog.Errorf("eviction manager: failed to get summary stats: %v", err) return nil } @@ -248,7 +248,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act m.thresholdsLastUpdated = m.clock.Now() for _, notifier := range m.thresholdNotifiers { if err := notifier.UpdateThreshold(summary); err != nil { - glog.Warningf("eviction manager: failed to update %s: %v", notifier.Description(), err) + klog.Warningf("eviction manager: failed to update %s: %v", notifier.Description(), err) } } } @@ -275,7 +275,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act // the set of node conditions that are triggered by currently observed thresholds nodeConditions := nodeConditions(thresholds) if len(nodeConditions) > 0 { - glog.V(3).Infof("eviction manager: node conditions - observed: %v", nodeConditions) + klog.V(3).Infof("eviction manager: node conditions - observed: %v", nodeConditions) } // track when a node condition was last observed @@ -284,7 +284,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act // node conditions report true if it has been observed within the transition period window nodeConditions = nodeConditionsObservedSince(nodeConditionsLastObservedAt, m.config.PressureTransitionPeriod, now) if len(nodeConditions) > 0 { - glog.V(3).Infof("eviction manager: node conditions - transition period not met: %v", nodeConditions) + klog.V(3).Infof("eviction manager: node conditions - transition period not met: %v", nodeConditions) } // determine the set of thresholds we need to drive eviction behavior (i.e. all grace periods are met) @@ -314,7 +314,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act } if len(thresholds) == 0 { - glog.V(3).Infof("eviction manager: no resources are starved") + klog.V(3).Infof("eviction manager: no resources are starved") return nil } @@ -323,39 +323,39 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act thresholdToReclaim := thresholds[0] resourceToReclaim, found := signalToResource[thresholdToReclaim.Signal] if !found { - glog.V(3).Infof("eviction manager: threshold %s was crossed, but reclaim is not implemented for this threshold.", thresholdToReclaim.Signal) + klog.V(3).Infof("eviction manager: threshold %s was crossed, but reclaim is not implemented for this threshold.", thresholdToReclaim.Signal) return nil } - glog.Warningf("eviction manager: attempting to reclaim %v", resourceToReclaim) + klog.Warningf("eviction manager: attempting to reclaim %v", resourceToReclaim) // record an event about the resources we are now attempting to reclaim via eviction m.recorder.Eventf(m.nodeRef, v1.EventTypeWarning, "EvictionThresholdMet", "Attempting to reclaim %s", resourceToReclaim) // check if there are node-level resources we can reclaim to reduce pressure before evicting end-user pods. if m.reclaimNodeLevelResources(thresholdToReclaim.Signal, resourceToReclaim) { - glog.Infof("eviction manager: able to reduce %v pressure without evicting pods.", resourceToReclaim) + klog.Infof("eviction manager: able to reduce %v pressure without evicting pods.", resourceToReclaim) return nil } - glog.Infof("eviction manager: must evict pod(s) to reclaim %v", resourceToReclaim) + klog.Infof("eviction manager: must evict pod(s) to reclaim %v", resourceToReclaim) // rank the pods for eviction rank, ok := m.signalToRankFunc[thresholdToReclaim.Signal] if !ok { - glog.Errorf("eviction manager: no ranking function for signal %s", thresholdToReclaim.Signal) + klog.Errorf("eviction manager: no ranking function for signal %s", thresholdToReclaim.Signal) return nil } // the only candidates viable for eviction are those pods that had anything running. if len(activePods) == 0 { - glog.Errorf("eviction manager: eviction thresholds have been met, but no pods are active to evict") + klog.Errorf("eviction manager: eviction thresholds have been met, but no pods are active to evict") return nil } // rank the running pods for eviction for the specified resource rank(activePods, statsFunc) - glog.Infof("eviction manager: pods ranked for eviction: %s", format.Pods(activePods)) + klog.Infof("eviction manager: pods ranked for eviction: %s", format.Pods(activePods)) //record age of metrics for met thresholds that we are using for evictions. for _, t := range thresholds { @@ -377,7 +377,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act return []*v1.Pod{pod} } } - glog.Infof("eviction manager: unable to evict any pods from the node") + klog.Infof("eviction manager: unable to evict any pods from the node") return nil } @@ -389,7 +389,7 @@ func (m *managerImpl) waitForPodsCleanup(podCleanedUpFunc PodCleanedUpFunc, pods for { select { case <-timeout.C(): - glog.Warningf("eviction manager: timed out waiting for pods %s to be cleaned up", format.Pods(pods)) + klog.Warningf("eviction manager: timed out waiting for pods %s to be cleaned up", format.Pods(pods)) return case <-ticker.C(): for i, pod := range pods { @@ -397,7 +397,7 @@ func (m *managerImpl) waitForPodsCleanup(podCleanedUpFunc PodCleanedUpFunc, pods break } if i == len(pods)-1 { - glog.Infof("eviction manager: pods %s successfully cleaned up", format.Pods(pods)) + klog.Infof("eviction manager: pods %s successfully cleaned up", format.Pods(pods)) return } } @@ -411,14 +411,14 @@ func (m *managerImpl) reclaimNodeLevelResources(signalToReclaim evictionapi.Sign for _, nodeReclaimFunc := range nodeReclaimFuncs { // attempt to reclaim the pressured resource. if err := nodeReclaimFunc(); err != nil { - glog.Warningf("eviction manager: unexpected error when attempting to reduce %v pressure: %v", resourceToReclaim, err) + klog.Warningf("eviction manager: unexpected error when attempting to reduce %v pressure: %v", resourceToReclaim, err) } } if len(nodeReclaimFuncs) > 0 { summary, err := m.summaryProvider.Get(true) if err != nil { - glog.Errorf("eviction manager: failed to get get summary stats after resource reclaim: %v", err) + klog.Errorf("eviction manager: failed to get summary stats after resource reclaim: %v", err) return false } @@ -502,7 +502,7 @@ func (m *managerImpl) podEphemeralStorageLimitEviction(podStats statsapi.PodStat } podEphemeralUsage, err := podLocalEphemeralStorageUsage(podStats, pod, fsStatsSet) if err != nil { - glog.Errorf("eviction manager: error getting pod disk usage %v", err) + klog.Errorf("eviction manager: error getting pod disk usage %v", err) return false } @@ -545,7 +545,7 @@ func (m *managerImpl) evictPod(pod *v1.Pod, gracePeriodOverride int64, evictMsg // do not evict such pods. Static pods are not re-admitted after evictions. // https://github.com/kubernetes/kubernetes/issues/40573 has more details. if kubelettypes.IsCriticalPod(pod) && kubepod.IsStaticPod(pod) { - glog.Errorf("eviction manager: cannot evict a critical static pod %s", format.Pod(pod)) + klog.Errorf("eviction manager: cannot evict a critical static pod %s", format.Pod(pod)) return false } status := v1.PodStatus{ @@ -558,9 +558,9 @@ func (m *managerImpl) evictPod(pod *v1.Pod, gracePeriodOverride int64, evictMsg // this is a blocking call and should only return when the pod and its containers are killed. err := m.killPodFunc(pod, status, &gracePeriodOverride) if err != nil { - glog.Errorf("eviction manager: pod %s failed to evict %v", format.Pod(pod), err) + klog.Errorf("eviction manager: pod %s failed to evict %v", format.Pod(pod), err) } else { - glog.Infof("eviction manager: pod %s is evicted successfully", format.Pod(pod)) + klog.Infof("eviction manager: pod %s is evicted successfully", format.Pod(pod)) } return true } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go index c078ce93d00d..ef34b97a0ba4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go @@ -23,10 +23,10 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" @@ -732,7 +732,7 @@ func makeSignalObservations(summary *statsapi.Summary) (signalObservations, stat } } if allocatableContainer, err := getSysContainer(summary.Node.SystemContainers, statsapi.SystemContainerPods); err != nil { - glog.Errorf("eviction manager: failed to construct signal: %q error: %v", evictionapi.SignalAllocatableMemoryAvailable, err) + klog.Errorf("eviction manager: failed to construct signal: %q error: %v", evictionapi.SignalAllocatableMemoryAvailable, err) } else { if memory := allocatableContainer.Memory; memory != nil && memory.AvailableBytes != nil && memory.WorkingSetBytes != nil { result[evictionapi.SignalAllocatableMemoryAvailable] = signalObservation{ @@ -805,7 +805,7 @@ func thresholdsMet(thresholds []evictionapi.Threshold, observations signalObserv threshold := thresholds[i] observed, found := observations[threshold.Signal] if !found { - glog.Warningf("eviction manager: no observation found for eviction signal %v", threshold.Signal) + klog.Warningf("eviction manager: no observation found for eviction signal %v", threshold.Signal) continue } // determine if we have met the specified threshold @@ -828,20 +828,20 @@ func thresholdsMet(thresholds []evictionapi.Threshold, observations signalObserv } func debugLogObservations(logPrefix string, observations signalObservations) { - if !glog.V(3) { + if !klog.V(3) { return } for k, v := range observations { if !v.time.IsZero() { - glog.Infof("eviction manager: %v: signal=%v, available: %v, capacity: %v, time: %v", logPrefix, k, v.available, v.capacity, v.time) + klog.Infof("eviction manager: %v: signal=%v, available: %v, capacity: %v, time: %v", logPrefix, k, v.available, v.capacity, v.time) } else { - glog.Infof("eviction manager: %v: signal=%v, available: %v, capacity: %v", logPrefix, k, v.available, v.capacity) + klog.Infof("eviction manager: %v: signal=%v, available: %v, capacity: %v", logPrefix, k, v.available, v.capacity) } } } func debugLogThresholdsWithObservation(logPrefix string, thresholds []evictionapi.Threshold, observations signalObservations) { - if !glog.V(3) { + if !klog.V(3) { return } for i := range thresholds { @@ -849,9 +849,9 @@ func debugLogThresholdsWithObservation(logPrefix string, thresholds []evictionap observed, found := observations[threshold.Signal] if found { quantity := evictionapi.GetThresholdQuantity(threshold.Value, observed.capacity) - glog.Infof("eviction manager: %v: threshold [signal=%v, quantity=%v] observed %v", logPrefix, threshold.Signal, quantity, observed.available) + klog.Infof("eviction manager: %v: threshold [signal=%v, quantity=%v] observed %v", logPrefix, threshold.Signal, quantity, observed.available) } else { - glog.Infof("eviction manager: %v: threshold [signal=%v] had no observation", logPrefix, threshold.Signal) + klog.Infof("eviction manager: %v: threshold [signal=%v] had no observation", logPrefix, threshold.Signal) } } } @@ -862,7 +862,7 @@ func thresholdsUpdatedStats(thresholds []evictionapi.Threshold, observations, la threshold := thresholds[i] observed, found := observations[threshold.Signal] if !found { - glog.Warningf("eviction manager: no observation found for eviction signal %v", threshold.Signal) + klog.Warningf("eviction manager: no observation found for eviction signal %v", threshold.Signal) continue } last, found := lastObservations[threshold.Signal] @@ -892,7 +892,7 @@ func thresholdsMetGracePeriod(observedAt thresholdsObservedAt, now time.Time) [] for threshold, at := range observedAt { duration := now.Sub(at) if duration < threshold.GracePeriod { - glog.V(2).Infof("eviction manager: eviction criteria not yet met for %v, duration: %v", formatThreshold(threshold), duration) + klog.V(2).Infof("eviction manager: eviction criteria not yet met for %v, duration: %v", formatThreshold(threshold), duration) continue } results = append(results, threshold) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/memory_threshold_notifier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/memory_threshold_notifier.go index 8d86944f39d0..b60393e8c64e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/memory_threshold_notifier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/memory_threshold_notifier.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/resource" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" @@ -71,7 +71,7 @@ func NewMemoryThresholdNotifier(threshold evictionapi.Threshold, cgroupRoot stri } func (m *memoryThresholdNotifier) Start() { - glog.Infof("eviction manager: created %s", m.Description()) + klog.Infof("eviction manager: created %s", m.Description()) for range m.events { m.handler(fmt.Sprintf("eviction manager: %s crossed", m.Description())) } @@ -98,7 +98,7 @@ func (m *memoryThresholdNotifier) UpdateThreshold(summary *statsapi.Summary) err memcgThreshold.Sub(*evictionThresholdQuantity) memcgThreshold.Add(*inactiveFile) - glog.V(3).Infof("eviction manager: setting %s to %s\n", m.Description(), memcgThreshold.String()) + klog.V(3).Infof("eviction manager: setting %s to %s\n", m.Description(), memcgThreshold.String()) if m.notifier != nil { m.notifier.Stop() } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/threshold_notifier_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/threshold_notifier_linux.go index 4fabd7345a5c..1d097fd293fd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/threshold_notifier_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/threshold_notifier_linux.go @@ -21,8 +21,8 @@ import ( "sync" "time" - "github.com/golang/glog" "golang.org/x/sys/unix" + "k8s.io/klog" ) const ( @@ -104,7 +104,7 @@ func (n *linuxCgroupNotifier) Start(eventCh chan<- struct{}) { Events: unix.EPOLLIN, }) if err != nil { - glog.Warningf("eviction manager: error adding epoll eventfd: %v", err) + klog.Warningf("eviction manager: error adding epoll eventfd: %v", err) return } for { @@ -115,7 +115,7 @@ func (n *linuxCgroupNotifier) Start(eventCh chan<- struct{}) { } event, err := wait(n.epfd, n.eventfd, notifierRefreshInterval) if err != nil { - glog.Warningf("eviction manager: error while waiting for memcg events: %v", err) + klog.Warningf("eviction manager: error while waiting for memcg events: %v", err) return } else if !event { // Timeout on wait. This is expected if the threshold was not crossed @@ -125,7 +125,7 @@ func (n *linuxCgroupNotifier) Start(eventCh chan<- struct{}) { buf := make([]byte, eventSize) _, err = unix.Read(n.eventfd, buf) if err != nil { - glog.Warningf("eviction manager: error reading memcg events: %v", err) + klog.Warningf("eviction manager: error reading memcg events: %v", err) return } eventCh <- struct{}{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/threshold_notifier_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/threshold_notifier_unsupported.go index 7078c7865a91..afa92fe5fcea 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/threshold_notifier_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/threshold_notifier_unsupported.go @@ -18,11 +18,11 @@ limitations under the License. package eviction -import "github.com/golang/glog" +import "k8s.io/klog" // NewCgroupNotifier creates a cgroup notifier that does nothing because cgroups do not exist on non-linux systems. func NewCgroupNotifier(path, attribute string, threshold int64) (CgroupNotifier, error) { - glog.V(5).Infof("cgroup notifications not supported") + klog.V(5).Infof("cgroup notifications not supported") return &unsupportedThresholdNotifier{}, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/BUILD index 5f5594b3f3c8..cbb23f945e1d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/BUILD @@ -30,7 +30,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//vendor/github.com/docker/distribution/reference:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/helpers.go index b8b5bd3824fc..231ba730ddfe 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/helpers.go @@ -46,5 +46,5 @@ func (ts throttledImageService) PullImage(image kubecontainer.ImageSpec, secrets if ts.limiter.TryAccept() { return ts.ImageService.PullImage(image, secrets) } - return "", fmt.Errorf("pull QPS exceeded.") + return "", fmt.Errorf("pull QPS exceeded") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_gc_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_gc_manager.go index 2c58cb1cfbeb..4c6feabb17b4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_gc_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_gc_manager.go @@ -24,7 +24,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/errors" @@ -44,8 +44,7 @@ type StatsProvider interface { ImageFsStats() (*statsapi.FsStats, error) } -// Manages lifecycle of all images. -// +// ImageGCManager is an interface for managing lifecycle of all images. // Implementation is thread-safe. type ImageGCManager interface { // Applies the garbage collection policy. Errors include being unable to free @@ -61,7 +60,7 @@ type ImageGCManager interface { DeleteUnusedImages() error } -// A policy for garbage collecting images. Policy defines an allowed band in +// ImageGCPolicy is a policy for garbage collecting images. Policy defines an allowed band in // which garbage collection will be run. type ImageGCPolicy struct { // Any usage above this threshold will always trigger garbage collection. @@ -144,6 +143,7 @@ type imageRecord struct { size int64 } +// NewImageGCManager instantiates a new ImageGCManager object. func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy, sandboxImage string) (ImageGCManager, error) { // Validate policy. if policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 { @@ -178,7 +178,7 @@ func (im *realImageGCManager) Start() { } _, err := im.detectImages(ts) if err != nil { - glog.Warningf("[imageGCManager] Failed to monitor images: %v", err) + klog.Warningf("[imageGCManager] Failed to monitor images: %v", err) } else { im.initialized = true } @@ -189,7 +189,7 @@ func (im *realImageGCManager) Start() { go wait.Until(func() { images, err := im.runtime.ListImages() if err != nil { - glog.Warningf("[imageGCManager] Failed to update image list: %v", err) + klog.Warningf("[imageGCManager] Failed to update image list: %v", err) } else { im.imageCache.set(images) } @@ -223,7 +223,7 @@ func (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, e // Make a set of images in use by containers. for _, pod := range pods { for _, container := range pod.Containers { - glog.V(5).Infof("Pod %s/%s, container %s uses image %s(%s)", pod.Namespace, pod.Name, container.Name, container.Image, container.ImageID) + klog.V(5).Infof("Pod %s/%s, container %s uses image %s(%s)", pod.Namespace, pod.Name, container.Name, container.Image, container.ImageID) imagesInUse.Insert(container.ImageID) } } @@ -234,12 +234,12 @@ func (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, e im.imageRecordsLock.Lock() defer im.imageRecordsLock.Unlock() for _, image := range images { - glog.V(5).Infof("Adding image ID %s to currentImages", image.ID) + klog.V(5).Infof("Adding image ID %s to currentImages", image.ID) currentImages.Insert(image.ID) // New image, set it as detected now. if _, ok := im.imageRecords[image.ID]; !ok { - glog.V(5).Infof("Image ID %s is new", image.ID) + klog.V(5).Infof("Image ID %s is new", image.ID) im.imageRecords[image.ID] = &imageRecord{ firstDetected: detectTime, } @@ -247,18 +247,18 @@ func (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, e // Set last used time to now if the image is being used. if isImageUsed(image.ID, imagesInUse) { - glog.V(5).Infof("Setting Image ID %s lastUsed to %v", image.ID, now) + klog.V(5).Infof("Setting Image ID %s lastUsed to %v", image.ID, now) im.imageRecords[image.ID].lastUsed = now } - glog.V(5).Infof("Image ID %s has size %d", image.ID, image.Size) + klog.V(5).Infof("Image ID %s has size %d", image.ID, image.Size) im.imageRecords[image.ID].size = image.Size } // Remove old images from our records. for image := range im.imageRecords { if !currentImages.Has(image) { - glog.V(5).Infof("Image ID %s is no longer present; removing from imageRecords", image) + klog.V(5).Infof("Image ID %s is no longer present; removing from imageRecords", image) delete(im.imageRecords, image) } } @@ -282,7 +282,7 @@ func (im *realImageGCManager) GarbageCollect() error { } if available > capacity { - glog.Warningf("available %d is larger than capacity %d", available, capacity) + klog.Warningf("available %d is larger than capacity %d", available, capacity) available = capacity } @@ -297,7 +297,7 @@ func (im *realImageGCManager) GarbageCollect() error { usagePercent := 100 - int(available*100/capacity) if usagePercent >= im.policy.HighThresholdPercent { amountToFree := capacity*int64(100-im.policy.LowThresholdPercent)/100 - available - glog.Infof("[imageGCManager]: Disk usage on image filesystem is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes", usagePercent, im.policy.HighThresholdPercent, amountToFree) + klog.Infof("[imageGCManager]: Disk usage on image filesystem is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes down to the low threshold (%d%%).", usagePercent, im.policy.HighThresholdPercent, amountToFree, im.policy.LowThresholdPercent) freed, err := im.freeSpace(amountToFree, time.Now()) if err != nil { return err @@ -314,7 +314,7 @@ func (im *realImageGCManager) GarbageCollect() error { } func (im *realImageGCManager) DeleteUnusedImages() error { - glog.Infof("attempting to delete unused images") + klog.Infof("attempting to delete unused images") _, err := im.freeSpace(math.MaxInt64, time.Now()) return err } @@ -338,7 +338,7 @@ func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) ( images := make([]evictionInfo, 0, len(im.imageRecords)) for image, record := range im.imageRecords { if isImageUsed(image, imagesInUse) { - glog.V(5).Infof("Image ID %s is being used", image) + klog.V(5).Infof("Image ID %s is being used", image) continue } images = append(images, evictionInfo{ @@ -352,10 +352,10 @@ func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) ( var deletionErrors []error spaceFreed := int64(0) for _, image := range images { - glog.V(5).Infof("Evaluating image ID %s for possible garbage collection", image.id) + klog.V(5).Infof("Evaluating image ID %s for possible garbage collection", image.id) // Images that are currently in used were given a newer lastUsed. if image.lastUsed.Equal(freeTime) || image.lastUsed.After(freeTime) { - glog.V(5).Infof("Image ID %s has lastUsed=%v which is >= freeTime=%v, not eligible for garbage collection", image.id, image.lastUsed, freeTime) + klog.V(5).Infof("Image ID %s has lastUsed=%v which is >= freeTime=%v, not eligible for garbage collection", image.id, image.lastUsed, freeTime) continue } @@ -363,12 +363,12 @@ func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) ( // In such a case, the image may have just been pulled down, and will be used by a container right away. if freeTime.Sub(image.firstDetected) < im.policy.MinAge { - glog.V(5).Infof("Image ID %s has age %v which is less than the policy's minAge of %v, not eligible for garbage collection", image.id, freeTime.Sub(image.firstDetected), im.policy.MinAge) + klog.V(5).Infof("Image ID %s has age %v which is less than the policy's minAge of %v, not eligible for garbage collection", image.id, freeTime.Sub(image.firstDetected), im.policy.MinAge) continue } // Remove image. Continue despite errors. - glog.Infof("[imageGCManager]: Removing image %q to free %d bytes", image.id, image.size) + klog.Infof("[imageGCManager]: Removing image %q to free %d bytes", image.id, image.size) err := im.runtime.RemoveImage(container.ImageSpec{Image: image.id}) if err != nil { deletionErrors = append(deletionErrors, err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager.go index 413df5a727d6..36b36a9e9916 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_manager.go @@ -20,10 +20,10 @@ import ( "fmt" dockerref "github.com/docker/distribution/reference" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/util/parsers" @@ -40,6 +40,7 @@ type imageManager struct { var _ ImageManager = &imageManager{} +// NewImageManager instantiates a new ImageManager object. func NewImageManager(recorder record.EventRecorder, imageService kubecontainer.ImageService, imageBackOff *flowcontrol.Backoff, serialized bool, qps float32, burst int) ImageManager { imageService = throttleImagePulling(imageService, qps, burst) @@ -87,14 +88,14 @@ func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, p logPrefix := fmt.Sprintf("%s/%s", pod.Name, container.Image) ref, err := kubecontainer.GenerateContainerRef(pod, container) if err != nil { - glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err) + klog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err) } // If the image contains no tag or digest, a default tag should be applied. image, err := applyDefaultImageTag(container.Image) if err != nil { msg := fmt.Sprintf("Failed to apply default image tag %q: %v", container.Image, err) - m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning) + m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, klog.Warning) return "", msg, ErrInvalidImageName } @@ -102,7 +103,7 @@ func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, p imageRef, err := m.imageService.GetImageRef(spec) if err != nil { msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err) - m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning) + m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, klog.Warning) return "", msg, ErrImageInspect } @@ -110,36 +111,35 @@ func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, p if !shouldPullImage(container, present) { if present { msg := fmt.Sprintf("Container image %q already present on machine", container.Image) - m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, msg, glog.Info) + m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, msg, klog.Info) return imageRef, "", nil - } else { - msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image) - m.logIt(ref, v1.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning) - return "", msg, ErrImageNeverPull } + msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image) + m.logIt(ref, v1.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, klog.Warning) + return "", msg, ErrImageNeverPull } backOffKey := fmt.Sprintf("%s_%s", pod.UID, container.Image) if m.backOff.IsInBackOffSinceUpdate(backOffKey, m.backOff.Clock.Now()) { msg := fmt.Sprintf("Back-off pulling image %q", container.Image) - m.logIt(ref, v1.EventTypeNormal, events.BackOffPullImage, logPrefix, msg, glog.Info) + m.logIt(ref, v1.EventTypeNormal, events.BackOffPullImage, logPrefix, msg, klog.Info) return "", msg, ErrImagePullBackOff } - m.logIt(ref, v1.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info) + m.logIt(ref, v1.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("pulling image %q", container.Image), klog.Info) pullChan := make(chan pullResult) m.puller.pullImage(spec, pullSecrets, pullChan) imagePullResult := <-pullChan if imagePullResult.err != nil { - m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, imagePullResult.err), glog.Warning) + m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, imagePullResult.err), klog.Warning) m.backOff.Next(backOffKey, m.backOff.Clock.Now()) - if imagePullResult.err == RegistryUnavailable { + if imagePullResult.err == ErrRegistryUnavailable { msg := fmt.Sprintf("image pull failed for %s because the registry is unavailable.", container.Image) return "", msg, imagePullResult.err } return "", imagePullResult.err.Error(), ErrImagePull } - m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info) + m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), klog.Info) m.backOff.GC() return imagePullResult.imageRef, "", nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/types.go index 105906b5ccf2..897655c66f1b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/images/types.go @@ -23,22 +23,22 @@ import ( ) var ( - // Container image pull failed, kubelet is backing off image pull + // ErrImagePullBackOff - Container image pull failed, kubelet is backing off image pull ErrImagePullBackOff = errors.New("ImagePullBackOff") - // Unable to inspect image + // ErrImageInspect - Unable to inspect image ErrImageInspect = errors.New("ImageInspectError") - // General image pull error + // ErrImagePull - General image pull error ErrImagePull = errors.New("ErrImagePull") - // Required Image is absent on host and PullPolicy is NeverPullImage + // ErrImageNeverPull - Required Image is absent on host and PullPolicy is NeverPullImage ErrImageNeverPull = errors.New("ErrImageNeverPull") - // Get http error when pulling image from registry - RegistryUnavailable = errors.New("RegistryUnavailable") + // ErrRegistryUnavailable - Get http error when pulling image from registry + ErrRegistryUnavailable = errors.New("RegistryUnavailable") - // Unable to parse the image name. + // ErrInvalidImageName - Unable to parse the image name. ErrInvalidImageName = errors.New("InvalidImageName") ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go index 506f409f7274..f9993aca2e3d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go @@ -31,8 +31,6 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" - cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapiv2 "github.com/google/cadvisor/info/v2" "k8s.io/api/core/v1" @@ -54,13 +52,15 @@ import ( "k8s.io/client-go/util/certificate" "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/integer" + cloudprovider "k8s.io/cloud-provider" csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned" + "k8s.io/klog" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/features" kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" - pluginwatcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1" + pluginwatcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1" + "k8s.io/kubernetes/pkg/kubelet/apis/podresources" "k8s.io/kubernetes/pkg/kubelet/cadvisor" kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" @@ -98,6 +98,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/sysctl" "k8s.io/kubernetes/pkg/kubelet/token" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/manager" "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" @@ -193,6 +194,7 @@ type Bootstrap interface { StartGarbageCollection() ListenAndServe(address net.IP, port uint, tlsOptions *server.TLSOptions, auth server.AuthInterface, enableDebuggingHandlers, enableContentionProfiling bool) ListenAndServeReadOnly(address net.IP, port uint) + ListenAndServePodResources() Run(<-chan kubetypes.PodUpdate) RunOnce(<-chan kubetypes.PodUpdate) ([]RunPodResult, error) } @@ -276,13 +278,13 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku // define file config source if kubeCfg.StaticPodPath != "" { - glog.Infof("Adding pod path: %v", kubeCfg.StaticPodPath) + klog.Infof("Adding pod path: %v", kubeCfg.StaticPodPath) config.NewSourceFile(kubeCfg.StaticPodPath, nodeName, kubeCfg.FileCheckFrequency.Duration, cfg.Channel(kubetypes.FileSource)) } // define url config source if kubeCfg.StaticPodURL != "" { - glog.Infof("Adding pod url %q with HTTP header %v", kubeCfg.StaticPodURL, manifestURLHeader) + klog.Infof("Adding pod url %q with HTTP header %v", kubeCfg.StaticPodURL, manifestURLHeader) config.NewSourceURL(kubeCfg.StaticPodURL, manifestURLHeader, nodeName, kubeCfg.HTTPCheckFrequency.Duration, cfg.Channel(kubetypes.HTTPSource)) } @@ -292,7 +294,7 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku var updatechannel chan<- interface{} if bootstrapCheckpointPath != "" { - glog.Infof("Adding checkpoint path: %v", bootstrapCheckpointPath) + klog.Infof("Adding checkpoint path: %v", bootstrapCheckpointPath) updatechannel = cfg.Channel(kubetypes.ApiserverSource) err := cfg.Restore(bootstrapCheckpointPath, updatechannel) if err != nil { @@ -301,7 +303,7 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku } if kubeDeps.KubeClient != nil { - glog.Infof("Watching apiserver") + klog.Infof("Watching apiserver") if updatechannel == nil { updatechannel = cfg.Channel(kubetypes.ApiserverSource) } @@ -392,7 +394,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, return nil, fmt.Errorf("error fetching current instance name from cloud provider: %v", err) } - glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName) + klog.V(2).Infof("cloud provider determined current node name to be %s", nodeName) } if kubeDeps.PodConfig == nil { @@ -471,7 +473,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, for _, ipEntry := range kubeCfg.ClusterDNS { ip := net.ParseIP(ipEntry) if ip == nil { - glog.Warningf("Invalid clusterDNS ip '%q'", ipEntry) + klog.Warningf("Invalid clusterDNS ip '%q'", ipEntry) } else { clusterDNS = append(clusterDNS, ip) } @@ -480,52 +482,53 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, parsedNodeIP := net.ParseIP(nodeIP) protocol := utilipt.ProtocolIpv4 if parsedNodeIP != nil && parsedNodeIP.To4() == nil { - glog.V(0).Infof("IPv6 node IP (%s), assume IPv6 operation", nodeIP) + klog.V(0).Infof("IPv6 node IP (%s), assume IPv6 operation", nodeIP) protocol = utilipt.ProtocolIpv6 } klet := &Kubelet{ - hostname: hostname, - hostnameOverridden: len(hostnameOverride) > 0, - nodeName: nodeName, - kubeClient: kubeDeps.KubeClient, - csiClient: kubeDeps.CSIClient, - heartbeatClient: kubeDeps.HeartbeatClient, - onRepeatedHeartbeatFailure: kubeDeps.OnHeartbeatFailure, - rootDirectory: rootDirectory, - resyncInterval: kubeCfg.SyncFrequency.Duration, - sourcesReady: config.NewSourcesReady(kubeDeps.PodConfig.SeenAllSources), - registerNode: registerNode, - registerWithTaints: registerWithTaints, - registerSchedulable: registerSchedulable, - dnsConfigurer: dns.NewConfigurer(kubeDeps.Recorder, nodeRef, parsedNodeIP, clusterDNS, kubeCfg.ClusterDomain, kubeCfg.ResolverConfig), - serviceLister: serviceLister, - nodeInfo: nodeInfo, - masterServiceNamespace: masterServiceNamespace, - streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration, - recorder: kubeDeps.Recorder, - cadvisor: kubeDeps.CAdvisorInterface, - cloud: kubeDeps.Cloud, - externalCloudProvider: cloudprovider.IsExternal(cloudProvider), - providerID: providerID, - nodeRef: nodeRef, - nodeLabels: nodeLabels, - nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration, - os: kubeDeps.OSInterface, - oomWatcher: oomWatcher, - cgroupsPerQOS: kubeCfg.CgroupsPerQOS, - cgroupRoot: kubeCfg.CgroupRoot, - mounter: kubeDeps.Mounter, - maxPods: int(kubeCfg.MaxPods), - podsPerCore: int(kubeCfg.PodsPerCore), - syncLoopMonitor: atomic.Value{}, - daemonEndpoints: daemonEndpoints, - containerManager: kubeDeps.ContainerManager, - containerRuntimeName: containerRuntime, - redirectContainerStreaming: crOptions.RedirectContainerStreaming, - nodeIP: parsedNodeIP, - nodeIPValidator: validateNodeIP, - clock: clock.RealClock{}, + hostname: hostname, + hostnameOverridden: len(hostnameOverride) > 0, + nodeName: nodeName, + kubeClient: kubeDeps.KubeClient, + csiClient: kubeDeps.CSIClient, + heartbeatClient: kubeDeps.HeartbeatClient, + onRepeatedHeartbeatFailure: kubeDeps.OnHeartbeatFailure, + rootDirectory: rootDirectory, + resyncInterval: kubeCfg.SyncFrequency.Duration, + sourcesReady: config.NewSourcesReady(kubeDeps.PodConfig.SeenAllSources), + registerNode: registerNode, + registerWithTaints: registerWithTaints, + registerSchedulable: registerSchedulable, + dnsConfigurer: dns.NewConfigurer(kubeDeps.Recorder, nodeRef, parsedNodeIP, clusterDNS, kubeCfg.ClusterDomain, kubeCfg.ResolverConfig), + serviceLister: serviceLister, + nodeInfo: nodeInfo, + masterServiceNamespace: masterServiceNamespace, + streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration, + recorder: kubeDeps.Recorder, + cadvisor: kubeDeps.CAdvisorInterface, + cloud: kubeDeps.Cloud, + externalCloudProvider: cloudprovider.IsExternal(cloudProvider), + providerID: providerID, + nodeRef: nodeRef, + nodeLabels: nodeLabels, + nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration, + nodeStatusReportFrequency: kubeCfg.NodeStatusReportFrequency.Duration, + os: kubeDeps.OSInterface, + oomWatcher: oomWatcher, + cgroupsPerQOS: kubeCfg.CgroupsPerQOS, + cgroupRoot: kubeCfg.CgroupRoot, + mounter: kubeDeps.Mounter, + maxPods: int(kubeCfg.MaxPods), + podsPerCore: int(kubeCfg.PodsPerCore), + syncLoopMonitor: atomic.Value{}, + daemonEndpoints: daemonEndpoints, + containerManager: kubeDeps.ContainerManager, + containerRuntimeName: containerRuntime, + redirectContainerStreaming: crOptions.RedirectContainerStreaming, + nodeIP: parsedNodeIP, + nodeIPValidator: validateNodeIP, + clock: clock.RealClock{}, enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach, iptClient: utilipt.New(utilexec.New(), utildbus.New(), protocol), makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains, @@ -563,7 +566,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.configMapManager = configMapManager if klet.experimentalHostUserNamespaceDefaulting { - glog.Infof("Experimental host user namespace defaulting is enabled.") + klog.Infof("Experimental host user namespace defaulting is enabled.") } machineInfo, err := klet.cadvisor.MachineInfo() @@ -607,7 +610,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.resourceAnalyzer = serverstats.NewResourceAnalyzer(klet, kubeCfg.VolumeStatsAggPeriod.Duration) if containerRuntime == "rkt" { - glog.Fatalln("rktnetes has been deprecated in favor of rktlet. Please see https://github.com/kubernetes-incubator/rktlet for more information.") + klog.Fatalln("rktnetes has been deprecated in favor of rktlet. Please see https://github.com/kubernetes-incubator/rktlet for more information.") } // if left at nil, that means it is unneeded @@ -627,10 +630,10 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, } // The unix socket for kubelet <-> dockershim communication. - glog.V(5).Infof("RemoteRuntimeEndpoint: %q, RemoteImageEndpoint: %q", + klog.V(5).Infof("RemoteRuntimeEndpoint: %q, RemoteImageEndpoint: %q", remoteRuntimeEndpoint, remoteImageEndpoint) - glog.V(2).Infof("Starting the GRPC server for the docker CRI shim.") + klog.V(2).Infof("Starting the GRPC server for the docker CRI shim.") server := dockerremote.NewDockerServer(remoteRuntimeEndpoint, ds) if err := server.Start(); err != nil { return nil, err @@ -657,7 +660,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, } klet.runtimeService = runtimeService - if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) { + if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) && kubeDeps.DynamicKubeClient != nil { klet.runtimeClassManager = runtimeclass.NewManager(kubeDeps.DynamicKubeClient) } @@ -690,6 +693,12 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.streamingRuntime = runtime klet.runner = runtime + runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime) + if err != nil { + return nil, err + } + klet.runtimeCache = runtimeCache + if cadvisor.UsingLegacyCadvisorStats(containerRuntime, remoteRuntimeEndpoint) { klet.StatsProvider = stats.NewCadvisorStatsProvider( klet.cadvisor, @@ -711,8 +720,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, plegChannelCapacity, plegRelistPeriod, klet.podCache, clock.RealClock{}) klet.runtimeState = newRuntimeState(maxWaitForContainerRuntime) klet.runtimeState.addHealthCheck("PLEG", klet.pleg.Healthy) - if err := klet.updatePodCIDR(kubeCfg.PodCIDR); err != nil { - glog.Errorf("Pod CIDR update failed %v", err) + if _, err := klet.updatePodCIDR(kubeCfg.PodCIDR); err != nil { + klog.Errorf("Pod CIDR update failed %v", err) } // setup containerGC @@ -771,7 +780,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, tokenManager := token.NewManager(kubeDeps.KubeClient) if !utilfeature.DefaultFeatureGate.Enabled(features.MountPropagation) { - glog.Warning("Mount propagation feature gate has been deprecated and will be removed in the next release") + return nil, fmt.Errorf("mount propagation feature gate has been deprecated and will be removed in 1.14") } klet.volumePluginMgr, err = @@ -780,7 +789,10 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, return nil, err } if klet.enablePluginsWatcher { - klet.pluginWatcher = pluginwatcher.NewWatcher(klet.getPluginsDir()) + klet.pluginWatcher = pluginwatcher.NewWatcher( + klet.getPluginsRegistrationDir(), /* sockDir */ + klet.getPluginsDir(), /* deprecatedSockDir */ + ) } // If the experimentalMounterPathFlag is set, we do not want to @@ -807,11 +819,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, experimentalCheckNodeCapabilitiesBeforeMount, keepTerminatedPodVolumes) - runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime) - if err != nil { - return nil, err - } - klet.runtimeCache = runtimeCache klet.reasonCache = NewReasonCache() klet.workQueue = queue.NewBasicWorkQueue(klet.clock) klet.podWorkers = newPodWorkers(klet.syncPod, kubeDeps.Recorder, klet.workQueue, klet.resyncInterval, backOffPeriod, klet.podCache) @@ -1035,8 +1042,9 @@ type Kubelet struct { // used for generating ContainerStatus. reasonCache *ReasonCache - // nodeStatusUpdateFrequency specifies how often kubelet posts node status to master. - // Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod + // nodeStatusUpdateFrequency specifies how often kubelet computes node status. If node lease + // feature is not enabled, it is also the frequency that kubelet posts node status to master. + // In that case, be cautious when changing the constant, it must work with nodeMonitorGracePeriod // in nodecontroller. There are several constraints: // 1. nodeMonitorGracePeriod must be N times more than nodeStatusUpdateFrequency, where // N means number of retries allowed for kubelet to post node status. It is pointless @@ -1048,6 +1056,13 @@ type Kubelet struct { // as it takes time to gather all necessary node information. nodeStatusUpdateFrequency time.Duration + // nodeStatusUpdateFrequency is the frequency that kubelet posts node + // status to master. It is only used when node lease feature is enabled. + nodeStatusReportFrequency time.Duration + + // lastStatusReportTime is the time when node status was last reported. + lastStatusReportTime time.Time + // syncNodeStatusMux is a lock on updating the node status, because this path is not thread-safe. // This lock is used by Kublet.syncNodeStatus function and shouldn't be used anywhere else. syncNodeStatusMux sync.Mutex @@ -1194,7 +1209,7 @@ type Kubelet struct { // pluginwatcher is a utility for Kubelet to register different types of node-level plugins // such as device plugins or CSI plugins. It discovers plugins by monitoring inotify events under the // directory returned by kubelet.getPluginsDir() - pluginWatcher pluginwatcher.Watcher + pluginWatcher *pluginwatcher.Watcher // This flag sets a maximum number of images to report in the node status. nodeStatusMaxImages int32 @@ -1233,6 +1248,7 @@ func allGlobalUnicastIPs() ([]net.IP, error) { // 1. the root directory // 2. the pods directory // 3. the plugins directory +// 4. the pod-resources directory func (kl *Kubelet) setupDataDirs() error { kl.rootDirectory = path.Clean(kl.rootDirectory) if err := os.MkdirAll(kl.getRootDir(), 0750); err != nil { @@ -1247,6 +1263,12 @@ func (kl *Kubelet) setupDataDirs() error { if err := os.MkdirAll(kl.getPluginsDir(), 0750); err != nil { return fmt.Errorf("error creating plugins directory: %v", err) } + if err := os.MkdirAll(kl.getPluginsRegistrationDir(), 0750); err != nil { + return fmt.Errorf("error creating plugins registry directory: %v", err) + } + if err := os.MkdirAll(kl.getPodResourcesDir(), 0750); err != nil { + return fmt.Errorf("error creating podresources directory: %v", err) + } return nil } @@ -1255,23 +1277,23 @@ func (kl *Kubelet) StartGarbageCollection() { loggedContainerGCFailure := false go wait.Until(func() { if err := kl.containerGC.GarbageCollect(); err != nil { - glog.Errorf("Container garbage collection failed: %v", err) + klog.Errorf("Container garbage collection failed: %v", err) kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.ContainerGCFailed, err.Error()) loggedContainerGCFailure = true } else { - var vLevel glog.Level = 4 + var vLevel klog.Level = 4 if loggedContainerGCFailure { vLevel = 1 loggedContainerGCFailure = false } - glog.V(vLevel).Infof("Container garbage collection succeeded") + klog.V(vLevel).Infof("Container garbage collection succeeded") } }, ContainerGCPeriod, wait.NeverStop) // when the high threshold is set to 100, stub the image GC manager if kl.kubeletConfiguration.ImageGCHighThresholdPercent == 100 { - glog.V(2).Infof("ImageGCHighThresholdPercent is set 100, Disable image GC") + klog.V(2).Infof("ImageGCHighThresholdPercent is set 100, Disable image GC") return } @@ -1279,21 +1301,21 @@ func (kl *Kubelet) StartGarbageCollection() { go wait.Until(func() { if err := kl.imageManager.GarbageCollect(); err != nil { if prevImageGCFailed { - glog.Errorf("Image garbage collection failed multiple times in a row: %v", err) + klog.Errorf("Image garbage collection failed multiple times in a row: %v", err) // Only create an event for repeated failures kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.ImageGCFailed, err.Error()) } else { - glog.Errorf("Image garbage collection failed once. Stats initialization may not have completed yet: %v", err) + klog.Errorf("Image garbage collection failed once. Stats initialization may not have completed yet: %v", err) } prevImageGCFailed = true } else { - var vLevel glog.Level = 4 + var vLevel klog.Level = 4 if prevImageGCFailed { vLevel = 1 prevImageGCFailed = false } - glog.V(vLevel).Infof("Image garbage collection succeeded") + klog.V(vLevel).Infof("Image garbage collection succeeded") } }, ImageGCPeriod, wait.NeverStop) } @@ -1302,7 +1324,11 @@ func (kl *Kubelet) StartGarbageCollection() { // Note that the modules here must not depend on modules that are not initialized here. func (kl *Kubelet) initializeModules() error { // Prometheus metrics. - metrics.Register(kl.runtimeCache, collectors.NewVolumeStatsCollector(kl)) + metrics.Register( + kl.runtimeCache, + collectors.NewVolumeStatsCollector(kl), + collectors.NewLogMetricsCollector(kl.StatsProvider.ListPodStats), + ) // Setup filesystem directories. if err := kl.setupDataDirs(); err != nil { @@ -1312,7 +1338,7 @@ func (kl *Kubelet) initializeModules() error { // If the container logs directory does not exist, create it. if _, err := os.Stat(ContainerLogsDir); err != nil { if err := kl.os.MkdirAll(ContainerLogsDir, 0755); err != nil { - glog.Errorf("Failed to create directory %q: %v", ContainerLogsDir, err) + klog.Errorf("Failed to create directory %q: %v", ContainerLogsDir, err) } } @@ -1340,7 +1366,7 @@ func (kl *Kubelet) initializeRuntimeDependentModules() { if err := kl.cadvisor.Start(); err != nil { // Fail kubelet and rely on the babysitter to retry starting kubelet. // TODO(random-liu): Add backoff logic in the babysitter - glog.Fatalf("Failed to start cAdvisor %v", err) + klog.Fatalf("Failed to start cAdvisor %v", err) } // trigger on-demand stats collection once so that we have capacity information for ephemeral storage. @@ -1350,12 +1376,12 @@ func (kl *Kubelet) initializeRuntimeDependentModules() { node, err := kl.getNodeAnyWay() if err != nil { // Fail kubelet and rely on the babysitter to retry starting kubelet. - glog.Fatalf("Kubelet failed to get node info: %v", err) + klog.Fatalf("Kubelet failed to get node info: %v", err) } // containerManager must start after cAdvisor because it needs filesystem capacity information if err := kl.containerManager.Start(node, kl.GetActivePods, kl.sourcesReady, kl.statusManager, kl.runtimeService); err != nil { // Fail kubelet and rely on the babysitter to retry starting kubelet. - glog.Fatalf("Failed to start ContainerManager %v", err) + klog.Fatalf("Failed to start ContainerManager %v", err) } // eviction manager must start after cadvisor because it needs to know if the container runtime has a dedicated imagefs kl.evictionManager.Start(kl.StatsProvider, kl.GetActivePods, kl.podResourcesAreReclaimed, evictionMonitoringPeriod) @@ -1365,14 +1391,14 @@ func (kl *Kubelet) initializeRuntimeDependentModules() { kl.containerLogManager.Start() if kl.enablePluginsWatcher { // Adding Registration Callback function for CSI Driver - kl.pluginWatcher.AddHandler("CSIPlugin", csi.RegistrationCallback) + kl.pluginWatcher.AddHandler(pluginwatcherapi.CSIPlugin, pluginwatcher.PluginHandler(csi.PluginHandler)) // Adding Registration Callback function for Device Manager - kl.pluginWatcher.AddHandler(pluginwatcherapi.DevicePlugin, kl.containerManager.GetPluginRegistrationHandlerCallback()) + kl.pluginWatcher.AddHandler(pluginwatcherapi.DevicePlugin, kl.containerManager.GetPluginRegistrationHandler()) // Start the plugin watcher - glog.V(4).Infof("starting watcher") + klog.V(4).Infof("starting watcher") if err := kl.pluginWatcher.Start(); err != nil { kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error()) - glog.Fatalf("failed to start Plugin Watcher. err: %v", err) + klog.Fatalf("failed to start Plugin Watcher. err: %v", err) } } } @@ -1383,7 +1409,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { kl.logServer = http.StripPrefix("/logs/", http.FileServer(http.Dir("/var/log/"))) } if kl.kubeClient == nil { - glog.Warning("No api server defined - no node status update will be sent.") + klog.Warning("No api server defined - no node status update will be sent.") } // Start the cloud provider sync manager @@ -1393,7 +1419,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { if err := kl.initializeModules(); err != nil { kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error()) - glog.Fatal(err) + klog.Fatal(err) } // Start volume manager @@ -1501,7 +1527,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // since kubelet first saw the pod if firstSeenTime is set. metrics.PodWorkerStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime)) } else { - glog.V(3).Infof("First seen time not recorded for pod %q", pod.UID) + klog.V(3).Infof("First seen time not recorded for pod %q", pod.UID) } } @@ -1560,8 +1586,8 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // If the network plugin is not ready, only start the pod if it uses the host network if rs := kl.runtimeState.networkErrors(); len(rs) != 0 && !kubecontainer.IsHostNetworkPod(pod) { - kl.recorder.Eventf(pod, v1.EventTypeWarning, events.NetworkNotReady, "network is not ready: %v", rs) - return fmt.Errorf("network is not ready: %v", rs) + kl.recorder.Eventf(pod, v1.EventTypeWarning, events.NetworkNotReady, "%s: %v", NetworkNotReadyErrorMsg, rs) + return fmt.Errorf("%s: %v", NetworkNotReadyErrorMsg, rs) } // Create Cgroups for the pod and apply resource parameters @@ -1600,7 +1626,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { if !(podKilled && pod.Spec.RestartPolicy == v1.RestartPolicyNever) { if !pcm.Exists(pod) { if err := kl.containerManager.UpdateQOSCgroups(); err != nil { - glog.V(2).Infof("Failed to update QoS cgroups while syncing pod: %v", err) + klog.V(2).Infof("Failed to update QoS cgroups while syncing pod: %v", err) } if err := pcm.EnsureExists(pod); err != nil { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToCreatePodContainer, "unable to ensure pod container exists: %v", err) @@ -1618,9 +1644,9 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { if mirrorPod.DeletionTimestamp != nil || !kl.podManager.IsMirrorPodOf(mirrorPod, pod) { // The mirror pod is semantically different from the static pod. Remove // it. The mirror pod will get recreated later. - glog.Warningf("Deleting mirror pod %q because it is outdated", format.Pod(mirrorPod)) + klog.Warningf("Deleting mirror pod %q because it is outdated", format.Pod(mirrorPod)) if err := kl.podManager.DeleteMirrorPod(podFullName); err != nil { - glog.Errorf("Failed deleting mirror pod %q: %v", format.Pod(mirrorPod), err) + klog.Errorf("Failed deleting mirror pod %q: %v", format.Pod(mirrorPod), err) } else { deleted = true } @@ -1629,11 +1655,11 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { if mirrorPod == nil || deleted { node, err := kl.GetNode() if err != nil || node.DeletionTimestamp != nil { - glog.V(4).Infof("No need to create a mirror pod, since node %q has been removed from the cluster", kl.nodeName) + klog.V(4).Infof("No need to create a mirror pod, since node %q has been removed from the cluster", kl.nodeName) } else { - glog.V(4).Infof("Creating a mirror pod for static pod %q", format.Pod(pod)) + klog.V(4).Infof("Creating a mirror pod for static pod %q", format.Pod(pod)) if err := kl.podManager.CreateMirrorPod(pod); err != nil { - glog.Errorf("Failed creating a mirror pod for %q: %v", format.Pod(pod), err) + klog.Errorf("Failed creating a mirror pod for %q: %v", format.Pod(pod), err) } } } @@ -1642,7 +1668,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // Make data directories for the pod if err := kl.makePodDataDirs(pod); err != nil { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToMakePodDataDirectories, "error making pod data directories: %v", err) - glog.Errorf("Unable to make pod data directories for pod %q: %v", format.Pod(pod), err) + klog.Errorf("Unable to make pod data directories for pod %q: %v", format.Pod(pod), err) return err } @@ -1651,7 +1677,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // Wait for volumes to attach/mount if err := kl.volumeManager.WaitForAttachAndMount(pod); err != nil { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err) - glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", format.Pod(pod), err) + klog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", format.Pod(pod), err) return err } } @@ -1800,8 +1826,8 @@ func (kl *Kubelet) canRunPod(pod *v1.Pod) lifecycle.PodAdmitResult { // no changes are seen to the configuration, will synchronize the last known desired // state every sync-frequency seconds. Never returns. func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHandler) { - glog.Info("Starting kubelet main sync loop.") - // The resyncTicker wakes up kubelet to checks if there are any pod workers + klog.Info("Starting kubelet main sync loop.") + // The syncTicker wakes up kubelet to checks if there are any pod workers // that need to be sync'd. A one-second period is sufficient because the // sync interval is defaulted to 10s. syncTicker := time.NewTicker(time.Second) @@ -1817,7 +1843,7 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand duration := base for { if rs := kl.runtimeState.runtimeErrors(); len(rs) != 0 { - glog.Infof("skipping pod synchronization - %v", rs) + klog.Infof("skipping pod synchronization - %v", rs) // exponential backoff time.Sleep(duration) duration = time.Duration(math.Min(float64(max), factor*float64(duration))) @@ -1873,39 +1899,39 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle // Update from a config source; dispatch it to the right handler // callback. if !open { - glog.Errorf("Update channel is closed. Exiting the sync loop.") + klog.Errorf("Update channel is closed. Exiting the sync loop.") return false } switch u.Op { case kubetypes.ADD: - glog.V(2).Infof("SyncLoop (ADD, %q): %q", u.Source, format.Pods(u.Pods)) + klog.V(2).Infof("SyncLoop (ADD, %q): %q", u.Source, format.Pods(u.Pods)) // After restarting, kubelet will get all existing pods through // ADD as if they are new pods. These pods will then go through the // admission process and *may* be rejected. This can be resolved // once we have checkpointing. handler.HandlePodAdditions(u.Pods) case kubetypes.UPDATE: - glog.V(2).Infof("SyncLoop (UPDATE, %q): %q", u.Source, format.PodsWithDeletionTimestamps(u.Pods)) + klog.V(2).Infof("SyncLoop (UPDATE, %q): %q", u.Source, format.PodsWithDeletionTimestamps(u.Pods)) handler.HandlePodUpdates(u.Pods) case kubetypes.REMOVE: - glog.V(2).Infof("SyncLoop (REMOVE, %q): %q", u.Source, format.Pods(u.Pods)) + klog.V(2).Infof("SyncLoop (REMOVE, %q): %q", u.Source, format.Pods(u.Pods)) handler.HandlePodRemoves(u.Pods) case kubetypes.RECONCILE: - glog.V(4).Infof("SyncLoop (RECONCILE, %q): %q", u.Source, format.Pods(u.Pods)) + klog.V(4).Infof("SyncLoop (RECONCILE, %q): %q", u.Source, format.Pods(u.Pods)) handler.HandlePodReconcile(u.Pods) case kubetypes.DELETE: - glog.V(2).Infof("SyncLoop (DELETE, %q): %q", u.Source, format.Pods(u.Pods)) + klog.V(2).Infof("SyncLoop (DELETE, %q): %q", u.Source, format.Pods(u.Pods)) // DELETE is treated as a UPDATE because of graceful deletion. handler.HandlePodUpdates(u.Pods) case kubetypes.RESTORE: - glog.V(2).Infof("SyncLoop (RESTORE, %q): %q", u.Source, format.Pods(u.Pods)) + klog.V(2).Infof("SyncLoop (RESTORE, %q): %q", u.Source, format.Pods(u.Pods)) // These are pods restored from the checkpoint. Treat them as new // pods. handler.HandlePodAdditions(u.Pods) case kubetypes.SET: // TODO: Do we want to support this? - glog.Errorf("Kubelet does not support snapshot update") + klog.Errorf("Kubelet does not support snapshot update") } if u.Op != kubetypes.RESTORE { @@ -1924,11 +1950,11 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle if isSyncPodWorthy(e) { // PLEG event for a pod; sync it. if pod, ok := kl.podManager.GetPodByUID(e.ID); ok { - glog.V(2).Infof("SyncLoop (PLEG): %q, event: %#v", format.Pod(pod), e) + klog.V(2).Infof("SyncLoop (PLEG): %q, event: %#v", format.Pod(pod), e) handler.HandlePodSyncs([]*v1.Pod{pod}) } else { // If the pod no longer exists, ignore the event. - glog.V(4).Infof("SyncLoop (PLEG): ignore irrelevant event: %#v", e) + klog.V(4).Infof("SyncLoop (PLEG): ignore irrelevant event: %#v", e) } } @@ -1943,7 +1969,7 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle if len(podsToSync) == 0 { break } - glog.V(4).Infof("SyncLoop (SYNC): %d pods; %s", len(podsToSync), format.Pods(podsToSync)) + klog.V(4).Infof("SyncLoop (SYNC): %d pods; %s", len(podsToSync), format.Pods(podsToSync)) handler.HandlePodSyncs(podsToSync) case update := <-kl.livenessManager.Updates(): if update.Result == proberesults.Failure { @@ -1954,21 +1980,21 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle pod, ok := kl.podManager.GetPodByUID(update.PodUID) if !ok { // If the pod no longer exists, ignore the update. - glog.V(4).Infof("SyncLoop (container unhealthy): ignore irrelevant update: %#v", update) + klog.V(4).Infof("SyncLoop (container unhealthy): ignore irrelevant update: %#v", update) break } - glog.V(1).Infof("SyncLoop (container unhealthy): %q", format.Pod(pod)) + klog.V(1).Infof("SyncLoop (container unhealthy): %q", format.Pod(pod)) handler.HandlePodSyncs([]*v1.Pod{pod}) } case <-housekeepingCh: if !kl.sourcesReady.AllReady() { // If the sources aren't ready or volume manager has not yet synced the states, // skip housekeeping, as we may accidentally delete pods from unready sources. - glog.V(4).Infof("SyncLoop (housekeeping, skipped): sources aren't ready yet.") + klog.V(4).Infof("SyncLoop (housekeeping, skipped): sources aren't ready yet.") } else { - glog.V(4).Infof("SyncLoop (housekeeping)") + klog.V(4).Infof("SyncLoop (housekeeping)") if err := handler.HandlePodCleanups(); err != nil { - glog.Errorf("Failed cleaning pods: %v", err) + klog.Errorf("Failed cleaning pods: %v", err) } } } @@ -2091,7 +2117,7 @@ func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) { // Deletion is allowed to fail because the periodic cleanup routine // will trigger deletion again. if err := kl.deletePod(pod); err != nil { - glog.V(2).Infof("Failed to delete pod %q, err: %v", format.Pod(pod), err) + klog.V(2).Infof("Failed to delete pod %q, err: %v", format.Pod(pod), err) } kl.probeManager.RemovePod(pod) } @@ -2150,20 +2176,20 @@ func (kl *Kubelet) updateRuntimeUp() { s, err := kl.containerRuntime.Status() if err != nil { - glog.Errorf("Container runtime sanity check failed: %v", err) + klog.Errorf("Container runtime sanity check failed: %v", err) return } if s == nil { - glog.Errorf("Container runtime status is nil") + klog.Errorf("Container runtime status is nil") return } // Periodically log the whole runtime status for debugging. // TODO(random-liu): Consider to send node event when optional // condition is unmet. - glog.V(4).Infof("Container runtime status: %v", s) + klog.V(4).Infof("Container runtime status: %v", s) networkReady := s.GetRuntimeCondition(kubecontainer.NetworkReady) if networkReady == nil || !networkReady.Status { - glog.Errorf("Container runtime network not ready: %v", networkReady) + klog.Errorf("Container runtime network not ready: %v", networkReady) kl.runtimeState.setNetworkState(fmt.Errorf("runtime network not ready: %v", networkReady)) } else { // Set nil if the container runtime network is ready. @@ -2175,7 +2201,7 @@ func (kl *Kubelet) updateRuntimeUp() { runtimeReady := s.GetRuntimeCondition(kubecontainer.RuntimeReady) // If RuntimeReady is not set or is false, report an error. if runtimeReady == nil || !runtimeReady.Status { - glog.Errorf("Container runtime not ready: %v", runtimeReady) + klog.Errorf("Container runtime not ready: %v", runtimeReady) return } kl.oneTimeInitializer.Do(kl.initializeRuntimeDependentModules) @@ -2208,6 +2234,11 @@ func (kl *Kubelet) ListenAndServeReadOnly(address net.IP, port uint) { server.ListenAndServeKubeletReadOnlyServer(kl, kl.resourceAnalyzer, address, port) } +// ListenAndServePodResources runs the kubelet podresources grpc service +func (kl *Kubelet) ListenAndServePodResources() { + server.ListenAndServePodResources(util.LocalEndpoint(kl.getPodResourcesDir(), podresources.Socket), kl.podManager, kl.containerManager) +} + // Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around. func (kl *Kubelet) cleanUpContainersInPod(podID types.UID, exitedContainerID string) { if podStatus, err := kl.podCache.Get(podID); err == nil { @@ -2232,12 +2263,12 @@ func (kl *Kubelet) fastStatusUpdateOnce() { time.Sleep(100 * time.Millisecond) node, err := kl.GetNode() if err != nil { - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) continue } if node.Spec.PodCIDR != "" { - if err := kl.updatePodCIDR(node.Spec.PodCIDR); err != nil { - glog.Errorf("Pod CIDR update failed %v", err) + if _, err := kl.updatePodCIDR(node.Spec.PodCIDR); err != nil { + klog.Errorf("Pod CIDR update failed %v", err) continue } kl.updateRuntimeUp() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go index 89077eb7f73c..eb05a578c077 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go @@ -22,8 +22,8 @@ import ( "net" "path/filepath" - "github.com/golang/glog" cadvisorapiv1 "github.com/google/cadvisor/info/v1" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -57,6 +57,14 @@ func (kl *Kubelet) getPluginsDir() string { return filepath.Join(kl.getRootDir(), config.DefaultKubeletPluginsDirName) } +// getPluginsRegistrationDir returns the full path to the directory under which +// plugins socket should be placed to be registered. +// More information is available about plugin registration in the pluginwatcher +// module +func (kl *Kubelet) getPluginsRegistrationDir() string { + return filepath.Join(kl.getRootDir(), config.DefaultKubeletPluginsRegistrationDirName) +} + // getPluginDir returns a data directory name for a given plugin name. // Plugins can use these directories to store data that they need to persist. // For per-pod plugin data, see getPodPluginDir. @@ -139,6 +147,11 @@ func (kl *Kubelet) getPodContainerDir(podUID types.UID, ctrName string) string { return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletContainersDirName, ctrName) } +// getPodResourcesSocket returns the full path to the directory containing the pod resources socket +func (kl *Kubelet) getPodResourcesDir() string { + return filepath.Join(kl.getRootDir(), config.DefaultKubeletPodResourcesDirName) +} + // GetPods returns all pods bound to the kubelet and their spec, and the mirror // pods. func (kl *Kubelet) GetPods() []*v1.Pod { @@ -261,13 +274,13 @@ func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, err if pathExists, pathErr := volumeutil.PathExists(podVolDir); pathErr != nil { return volumes, fmt.Errorf("Error checking if path %q exists: %v", podVolDir, pathErr) } else if !pathExists { - glog.Warningf("Path %q does not exist", podVolDir) + klog.Warningf("Path %q does not exist", podVolDir) return volumes, nil } volumePluginDirs, err := ioutil.ReadDir(podVolDir) if err != nil { - glog.Errorf("Could not read directory %s: %v", podVolDir, err) + klog.Errorf("Could not read directory %s: %v", podVolDir, err) return volumes, err } for _, volumePluginDir := range volumePluginDirs { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network.go index 0528d2c1e57d..c604eddb4c10 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network.go @@ -19,8 +19,8 @@ package kubelet import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" utiliptables "k8s.io/kubernetes/pkg/util/iptables" ) @@ -55,26 +55,28 @@ func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool { } // updatePodCIDR updates the pod CIDR in the runtime state if it is different -// from the current CIDR. -func (kl *Kubelet) updatePodCIDR(cidr string) error { +// from the current CIDR. Return true if pod CIDR is actually changed. +func (kl *Kubelet) updatePodCIDR(cidr string) (bool, error) { kl.updatePodCIDRMux.Lock() defer kl.updatePodCIDRMux.Unlock() podCIDR := kl.runtimeState.podCIDR() if podCIDR == cidr { - return nil + return false, nil } // kubelet -> generic runtime -> runtime shim -> network plugin // docker/non-cri implementations have a passthrough UpdatePodCIDR if err := kl.getRuntime().UpdatePodCIDR(cidr); err != nil { - return fmt.Errorf("failed to update pod CIDR: %v", err) + // If updatePodCIDR would fail, theoretically pod CIDR could not change. + // But it is better to be on the safe side to still return true here. + return true, fmt.Errorf("failed to update pod CIDR: %v", err) } - glog.Infof("Setting Pod CIDR: %v -> %v", podCIDR, cidr) + klog.Infof("Setting Pod CIDR: %v -> %v", podCIDR, cidr) kl.runtimeState.setPodCIDR(cidr) - return nil + return true, nil } // GetPodDNS returns DNS settings for the pod. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go index 002b226b1907..ec7d41d9557e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go @@ -21,7 +21,7 @@ package kubelet import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" utiliptables "k8s.io/kubernetes/pkg/util/iptables" ) @@ -33,73 +33,73 @@ import ( // Marked connection will get SNAT on POSTROUTING Chain in nat table func (kl *Kubelet) syncNetworkUtil() { if kl.iptablesMasqueradeBit < 0 || kl.iptablesMasqueradeBit > 31 { - glog.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", kl.iptablesMasqueradeBit) + klog.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", kl.iptablesMasqueradeBit) return } if kl.iptablesDropBit < 0 || kl.iptablesDropBit > 31 { - glog.Errorf("invalid iptables-drop-bit %v not in [0, 31]", kl.iptablesDropBit) + klog.Errorf("invalid iptables-drop-bit %v not in [0, 31]", kl.iptablesDropBit) return } if kl.iptablesDropBit == kl.iptablesMasqueradeBit { - glog.Errorf("iptables-masquerade-bit %v and iptables-drop-bit %v must be different", kl.iptablesMasqueradeBit, kl.iptablesDropBit) + klog.Errorf("iptables-masquerade-bit %v and iptables-drop-bit %v must be different", kl.iptablesMasqueradeBit, kl.iptablesDropBit) return } // Setup KUBE-MARK-DROP rules dropMark := getIPTablesMark(kl.iptablesDropBit) if _, err := kl.iptClient.EnsureChain(utiliptables.TableNAT, KubeMarkDropChain); err != nil { - glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkDropChain, err) + klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkDropChain, err) return } if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkDropChain, "-j", "MARK", "--set-xmark", dropMark); err != nil { - glog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkDropChain, err) + klog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkDropChain, err) return } if _, err := kl.iptClient.EnsureChain(utiliptables.TableFilter, KubeFirewallChain); err != nil { - glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableFilter, KubeFirewallChain, err) + klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableFilter, KubeFirewallChain, err) return } if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableFilter, KubeFirewallChain, "-m", "comment", "--comment", "kubernetes firewall for dropping marked packets", "-m", "mark", "--mark", dropMark, "-j", "DROP"); err != nil { - glog.Errorf("Failed to ensure rule to drop packet marked by %v in %v chain %v: %v", KubeMarkDropChain, utiliptables.TableFilter, KubeFirewallChain, err) + klog.Errorf("Failed to ensure rule to drop packet marked by %v in %v chain %v: %v", KubeMarkDropChain, utiliptables.TableFilter, KubeFirewallChain, err) return } if _, err := kl.iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainOutput, "-j", string(KubeFirewallChain)); err != nil { - glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainOutput, KubeFirewallChain, err) + klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainOutput, KubeFirewallChain, err) return } if _, err := kl.iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainInput, "-j", string(KubeFirewallChain)); err != nil { - glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainInput, KubeFirewallChain, err) + klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainInput, KubeFirewallChain, err) return } // Setup KUBE-MARK-MASQ rules masqueradeMark := getIPTablesMark(kl.iptablesMasqueradeBit) if _, err := kl.iptClient.EnsureChain(utiliptables.TableNAT, KubeMarkMasqChain); err != nil { - glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkMasqChain, err) + klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkMasqChain, err) return } if _, err := kl.iptClient.EnsureChain(utiliptables.TableNAT, KubePostroutingChain); err != nil { - glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubePostroutingChain, err) + klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubePostroutingChain, err) return } if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkMasqChain, "-j", "MARK", "--set-xmark", masqueradeMark); err != nil { - glog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkMasqChain, err) + klog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkMasqChain, err) return } if _, err := kl.iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableNAT, utiliptables.ChainPostrouting, "-m", "comment", "--comment", "kubernetes postrouting rules", "-j", string(KubePostroutingChain)); err != nil { - glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, KubePostroutingChain, err) + klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, KubePostroutingChain, err) return } if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubePostroutingChain, "-m", "comment", "--comment", "kubernetes service traffic requiring SNAT", "-m", "mark", "--mark", masqueradeMark, "-j", "MASQUERADE"); err != nil { - glog.Errorf("Failed to ensure SNAT rule for packets marked by %v in %v chain %v: %v", KubeMarkMasqChain, utiliptables.TableNAT, KubePostroutingChain, err) + klog.Errorf("Failed to ensure SNAT rule for packets marked by %v in %v chain %v: %v", KubeMarkMasqChain, utiliptables.TableNAT, KubePostroutingChain, err) return } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go index 54f54f149aee..d15e4167e38d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go @@ -21,25 +21,27 @@ import ( "fmt" "net" goruntime "runtime" + "sort" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + cloudprovider "k8s.io/cloud-provider" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/nodestatus" "k8s.io/kubernetes/pkg/kubelet/util" - "k8s.io/kubernetes/pkg/scheduler/algorithm" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" nodeutil "k8s.io/kubernetes/pkg/util/node" taintutil "k8s.io/kubernetes/pkg/util/taints" volutil "k8s.io/kubernetes/pkg/volume/util" @@ -63,14 +65,14 @@ func (kl *Kubelet) registerWithAPIServer() { node, err := kl.initialNode() if err != nil { - glog.Errorf("Unable to construct v1.Node object for kubelet: %v", err) + klog.Errorf("Unable to construct v1.Node object for kubelet: %v", err) continue } - glog.Infof("Attempting to register node %s", node.Name) + klog.Infof("Attempting to register node %s", node.Name) registered := kl.tryRegisterWithAPIServer(node) if registered { - glog.Infof("Successfully registered node %s", node.Name) + klog.Infof("Successfully registered node %s", node.Name) kl.registrationCompleted = true return } @@ -89,27 +91,27 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { } if !apierrors.IsAlreadyExists(err) { - glog.Errorf("Unable to register node %q with API server: %v", kl.nodeName, err) + klog.Errorf("Unable to register node %q with API server: %v", kl.nodeName, err) return false } existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(string(kl.nodeName), metav1.GetOptions{}) if err != nil { - glog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err) + klog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err) return false } if existingNode == nil { - glog.Errorf("Unable to register node %q with API server: no node instance returned", kl.nodeName) + klog.Errorf("Unable to register node %q with API server: no node instance returned", kl.nodeName) return false } originalNode := existingNode.DeepCopy() if originalNode == nil { - glog.Errorf("Nil %q node object", kl.nodeName) + klog.Errorf("Nil %q node object", kl.nodeName) return false } - glog.Infof("Node %s was previously registered", kl.nodeName) + klog.Infof("Node %s was previously registered", kl.nodeName) // Edge case: the node was previously registered; reconcile // the value of the controller-managed attach-detach @@ -119,7 +121,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { requiresUpdate = kl.reconcileExtendedResource(node, existingNode) || requiresUpdate if requiresUpdate { if _, _, err := nodeutil.PatchNodeStatus(kl.kubeClient.CoreV1(), types.NodeName(kl.nodeName), originalNode, existingNode); err != nil { - glog.Errorf("Unable to reconcile node %q with API server: error updating node: %v", kl.nodeName, err) + klog.Errorf("Unable to reconcile node %q with API server: error updating node: %v", kl.nodeName, err) return false } } @@ -151,7 +153,7 @@ func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool kubeletapis.LabelArch, } - var needsUpdate bool = false + needsUpdate := false if existingNode.Labels == nil { existingNode.Labels = make(map[string]string) } @@ -191,10 +193,10 @@ func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v // not have the same value, update the existing node with // the correct value of the annotation. if !newSet { - glog.Info("Controller attach-detach setting changed to false; updating existing Node") + klog.Info("Controller attach-detach setting changed to false; updating existing Node") delete(existingNode.Annotations, volutil.ControllerManagedAttachAnnotation) } else { - glog.Info("Controller attach-detach setting changed to true; updating existing Node") + klog.Info("Controller attach-detach setting changed to true; updating existing Node") if existingNode.Annotations == nil { existingNode.Annotations = make(map[string]string) } @@ -232,7 +234,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { } unschedulableTaint := v1.Taint{ - Key: algorithm.TaintNodeUnschedulable, + Key: schedulerapi.TaintNodeUnschedulable, Effect: v1.TaintEffectNoSchedule, } @@ -247,7 +249,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { if kl.externalCloudProvider { taint := v1.Taint{ - Key: algorithm.TaintExternalCloudProvider, + Key: schedulerapi.TaintExternalCloudProvider, Value: "true", Effect: v1.TaintEffectNoSchedule, } @@ -273,24 +275,24 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { node.Annotations = make(map[string]string) } - glog.Infof("Setting node annotation to enable volume controller attach/detach") + klog.Infof("Setting node annotation to enable volume controller attach/detach") node.Annotations[volutil.ControllerManagedAttachAnnotation] = "true" } else { - glog.Infof("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes") + klog.Infof("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes") } if kl.keepTerminatedPodVolumes { if node.Annotations == nil { node.Annotations = make(map[string]string) } - glog.Infof("Setting node annotation to keep pod volumes of terminated pods attached to the node") + klog.Infof("Setting node annotation to keep pod volumes of terminated pods attached to the node") node.Annotations[volutil.KeepTerminatedPodVolumesAnnotation] = "true" } // @question: should this be place after the call to the cloud provider? which also applies labels for k, v := range kl.nodeLabels { if cv, found := node.ObjectMeta.Labels[k]; found { - glog.Warningf("the node label %s=%s will overwrite default setting %s", k, v, cv) + klog.Warningf("the node label %s=%s will overwrite default setting %s", k, v, cv) } node.ObjectMeta.Labels[k] = v } @@ -321,7 +323,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { return nil, err } if instanceType != "" { - glog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType) + klog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType) node.ObjectMeta.Labels[kubeletapis.LabelInstanceType] = instanceType } // If the cloud has zone information, label the node with the zone information @@ -332,11 +334,11 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err) } if zone.FailureDomain != "" { - glog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain) + klog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain) node.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = zone.FailureDomain } if zone.Region != "" { - glog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region) + klog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region) node.ObjectMeta.Labels[kubeletapis.LabelZoneRegion] = zone.Region } } @@ -348,8 +350,8 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { } // syncNodeStatus should be called periodically from a goroutine. -// It synchronizes node status to master, registering the kubelet first if -// necessary. +// It synchronizes node status to master if there is any change or enough time +// passed from the last sync, registering the kubelet first if necessary. func (kl *Kubelet) syncNodeStatus() { kl.syncNodeStatusMux.Lock() defer kl.syncNodeStatusMux.Unlock() @@ -362,19 +364,20 @@ func (kl *Kubelet) syncNodeStatus() { kl.registerWithAPIServer() } if err := kl.updateNodeStatus(); err != nil { - glog.Errorf("Unable to update node status: %v", err) + klog.Errorf("Unable to update node status: %v", err) } } -// updateNodeStatus updates node status to master with retries. +// updateNodeStatus updates node status to master with retries if there is any +// change or enough time passed from the last sync. func (kl *Kubelet) updateNodeStatus() error { - glog.V(5).Infof("Updating node status") + klog.V(5).Infof("Updating node status") for i := 0; i < nodeStatusUpdateRetry; i++ { if err := kl.tryUpdateNodeStatus(i); err != nil { if i > 0 && kl.onRepeatedHeartbeatFailure != nil { kl.onRepeatedHeartbeatFailure() } - glog.Errorf("Error updating node status, will retry: %v", err) + klog.Errorf("Error updating node status, will retry: %v", err) } else { return nil } @@ -382,7 +385,8 @@ func (kl *Kubelet) updateNodeStatus() error { return fmt.Errorf("update node status exceeds retry count") } -// tryUpdateNodeStatus tries to update node status to master. +// tryUpdateNodeStatus tries to update node status to master if there is any +// change or enough time passed from the last sync. func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error { // In large clusters, GET and PUT operations on Node objects coming // from here are the majority of load on apiserver and etcd. @@ -404,18 +408,31 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error { return fmt.Errorf("nil %q node object", kl.nodeName) } + podCIDRChanged := false if node.Spec.PodCIDR != "" { - if err := kl.updatePodCIDR(node.Spec.PodCIDR); err != nil { - glog.Errorf(err.Error()) + // Pod CIDR could have been updated before, so we cannot rely on + // node.Spec.PodCIDR being non-empty. We also need to know if pod CIDR is + // actually changed. + if podCIDRChanged, err = kl.updatePodCIDR(node.Spec.PodCIDR); err != nil { + klog.Errorf(err.Error()) } } kl.setNodeStatus(node) + + now := kl.clock.Now() + if utilfeature.DefaultFeatureGate.Enabled(features.NodeLease) && now.Before(kl.lastStatusReportTime.Add(kl.nodeStatusReportFrequency)) { + if !podCIDRChanged && !nodeStatusHasChanged(&originalNode.Status, &node.Status) { + return nil + } + } + // Patch the current status on the API server updatedNode, _, err := nodeutil.PatchNodeStatus(kl.heartbeatClient.CoreV1(), types.NodeName(kl.nodeName), originalNode, node) if err != nil { return err } + kl.lastStatusReportTime = now kl.setLastObservedNodeAddresses(updatedNode.Status.Addresses) // If update finishes successfully, mark the volumeInUse as reportedInUse to indicate // those volumes are already updated in the node's status @@ -426,7 +443,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error { // recordNodeStatusEvent records an event of the given type with the given // message for the node. func (kl *Kubelet) recordNodeStatusEvent(eventType, event string) { - glog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName) + klog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName) // TODO: This requires a transaction, either both node status is updated // and event is recorded or neither should happen, see issue #6055. kl.recorder.Eventf(kl.nodeRef, eventType, event, "Node %s status is now: %s", kl.nodeName, event) @@ -458,9 +475,9 @@ func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) error { // refactor the node status condition code out to a different file. func (kl *Kubelet) setNodeStatus(node *v1.Node) { for i, f := range kl.setNodeStatusFuncs { - glog.V(5).Infof("Setting node status at position %v", i) + klog.V(5).Infof("Setting node status at position %v", i) if err := f(node); err != nil { - glog.Warningf("Failed to set some node status fields: %s", err) + klog.Warningf("Failed to set some node status fields: %s", err) } } } @@ -502,7 +519,6 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error { setters = append(setters, nodestatus.VolumeLimits(kl.volumePluginMgr.ListVolumePluginWithLimits)) } setters = append(setters, - nodestatus.OutOfDiskCondition(kl.clock.Now, kl.recordNodeStatusEvent), nodestatus.MemoryPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderMemoryPressure, kl.recordNodeStatusEvent), nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent), nodestatus.PIDPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderPIDPressure, kl.recordNodeStatusEvent), @@ -554,3 +570,53 @@ func validateNodeIP(nodeIP net.IP) error { } return fmt.Errorf("Node IP: %q not found in the host's network interfaces", nodeIP.String()) } + +// nodeStatusHasChanged compares the original node and current node's status and +// returns true if any change happens. The heartbeat timestamp is ignored. +func nodeStatusHasChanged(originalStatus *v1.NodeStatus, status *v1.NodeStatus) bool { + if originalStatus == nil && status == nil { + return false + } + if originalStatus == nil || status == nil { + return true + } + + // Compare node conditions here because we need to ignore the heartbeat timestamp. + if nodeConditionsHaveChanged(originalStatus.Conditions, status.Conditions) { + return true + } + + // Compare other fields of NodeStatus. + originalStatusCopy := originalStatus.DeepCopy() + statusCopy := status.DeepCopy() + originalStatusCopy.Conditions = nil + statusCopy.Conditions = nil + return !apiequality.Semantic.DeepEqual(originalStatusCopy, statusCopy) +} + +// nodeConditionsHaveChanged compares the original node and current node's +// conditions and returns true if any change happens. The heartbeat timestamp is +// ignored. +func nodeConditionsHaveChanged(originalConditions []v1.NodeCondition, conditions []v1.NodeCondition) bool { + if len(originalConditions) != len(conditions) { + return true + } + + originalConditionsCopy := make([]v1.NodeCondition, 0, len(originalConditions)) + originalConditionsCopy = append(originalConditionsCopy, originalConditions...) + conditionsCopy := make([]v1.NodeCondition, 0, len(conditions)) + conditionsCopy = append(conditionsCopy, conditions...) + + sort.SliceStable(originalConditionsCopy, func(i, j int) bool { return originalConditionsCopy[i].Type < originalConditionsCopy[j].Type }) + sort.SliceStable(conditionsCopy, func(i, j int) bool { return conditionsCopy[i].Type < conditionsCopy[j].Type }) + + replacedheartbeatTime := metav1.Time{} + for i := range conditionsCopy { + originalConditionsCopy[i].LastHeartbeatTime = replacedheartbeatTime + conditionsCopy[i].LastHeartbeatTime = replacedheartbeatTime + if !apiequality.Semantic.DeepEqual(&originalConditionsCopy[i], &conditionsCopy[i]) { + return true + } + } + return false +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go index 1fed05dfb528..59a2fd60bb49 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go @@ -32,7 +32,6 @@ import ( "strings" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -41,6 +40,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilvalidation "k8s.io/apimachinery/pkg/util/validation" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/api/v1/resource" podshelper "k8s.io/kubernetes/pkg/apis/core/pods" @@ -104,7 +104,7 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol } vol, ok := podVolumes[device.Name] if !ok || vol.BlockVolumeMapper == nil { - glog.Errorf("Block volume cannot be satisfied for container %q, because the volume is missing or the volume mapper is nil: %+v", container.Name, device) + klog.Errorf("Block volume cannot be satisfied for container %q, because the volume is missing or the volume mapper is nil: %+v", container.Name, device) return nil, fmt.Errorf("cannot find volume %q to pass into container %q", device.Name, container.Name) } // Get a symbolic link associated to a block device under pod device path @@ -118,7 +118,7 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol if vol.ReadOnly { permission = "r" } - glog.V(4).Infof("Device will be attached to container %q. Path on host: %v", container.Name, symlinkPath) + klog.V(4).Infof("Device will be attached to container %q. Path on host: %v", container.Name, symlinkPath) devices = append(devices, kubecontainer.DeviceInfo{PathOnHost: symlinkPath, PathInContainer: device.DevicePath, Permissions: permission}) } } @@ -128,7 +128,6 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol // makeMounts determines the mount points for the given container. func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap, mounter mountutil.Interface, expandEnvs []kubecontainer.EnvVar) ([]kubecontainer.Mount, func(), error) { - // Kubernetes only mounts on /etc/hosts if: // - container is not an infrastructure (pause) container // - container is not already mounting on /etc/hosts @@ -136,15 +135,15 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h // Kubernetes will not mount /etc/hosts if: // - when the Pod sandbox is being created, its IP is still unknown. Hence, PodIP will not have been set. mountEtcHostsFile := len(podIP) > 0 && runtime.GOOS != "windows" - glog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile) + klog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile) mounts := []kubecontainer.Mount{} - var cleanupAction func() = nil + var cleanupAction func() for i, mount := range container.VolumeMounts { // do not mount /etc/hosts if container is already mounting on the path mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath) vol, ok := podVolumes[mount.Name] if !ok || vol.Mounter == nil { - glog.Errorf("Mount cannot be satisfied for container %q, because the volume is missing or the volume mounter is nil: %+v", container.Name, mount) + klog.Errorf("Mount cannot be satisfied for container %q, because the volume is missing or the volume mounter is nil: %+v", container.Name, mount) return nil, cleanupAction, fmt.Errorf("cannot find volume %q to mount into container %q", mount.Name, container.Name) } @@ -183,7 +182,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h hostPath = filepath.Join(volumePath, mount.SubPath) if subPathExists, err := mounter.ExistsPath(hostPath); err != nil { - glog.Errorf("Could not determine if subPath %s exists; will not attempt to change its permissions", hostPath) + klog.Errorf("Could not determine if subPath %s exists; will not attempt to change its permissions", hostPath) } else if !subPathExists { // Create the sub path now because if it's auto-created later when referenced, it may have an // incorrect ownership and mode. For example, the sub path directory must have at least g+rwx @@ -196,7 +195,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h } if err := mounter.SafeMakeDir(mount.SubPath, volumePath, perm); err != nil { // Don't pass detailed error back to the user because it could give information about host filesystem - glog.Errorf("failed to create subPath directory for volumeMount %q of container %q: %v", mount.Name, container.Name, err) + klog.Errorf("failed to create subPath directory for volumeMount %q of container %q: %v", mount.Name, container.Name, err) return nil, cleanupAction, fmt.Errorf("failed to create subPath directory for volumeMount %q of container %q", mount.Name, container.Name) } } @@ -210,19 +209,19 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h }) if err != nil { // Don't pass detailed error back to the user because it could give information about host filesystem - glog.Errorf("failed to prepare subPath for volumeMount %q of container %q: %v", mount.Name, container.Name, err) + klog.Errorf("failed to prepare subPath for volumeMount %q of container %q: %v", mount.Name, container.Name, err) return nil, cleanupAction, fmt.Errorf("failed to prepare subPath for volumeMount %q of container %q", mount.Name, container.Name) } } // Docker Volume Mounts fail on Windows if it is not of the form C:/ - containerPath := mount.MountPath - if runtime.GOOS == "windows" { - if (strings.HasPrefix(hostPath, "/") || strings.HasPrefix(hostPath, "\\")) && !strings.Contains(hostPath, ":") { - hostPath = "c:" + hostPath - } + if volumeutil.IsWindowsLocalPath(runtime.GOOS, hostPath) { + hostPath = volumeutil.MakeAbsolutePath(runtime.GOOS, hostPath) } - if !filepath.IsAbs(containerPath) { + + containerPath := mount.MountPath + // IsAbs returns false for UNC path/SMB shares/named pipes in Windows. So check for those specifically and skip MakeAbsolutePath + if !volumeutil.IsWindowsUNCPath(runtime.GOOS, containerPath) && !filepath.IsAbs(containerPath) { containerPath = volumeutil.MakeAbsolutePath(runtime.GOOS, containerPath) } @@ -230,9 +229,9 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h if err != nil { return nil, cleanupAction, err } - glog.V(5).Infof("Pod %q container %q mount %q has propagation %q", format.Pod(pod), container.Name, mount.Name, propagation) + klog.V(5).Infof("Pod %q container %q mount %q has propagation %q", format.Pod(pod), container.Name, mount.Name, propagation) - mustMountRO := vol.Mounter.GetAttributes().ReadOnly && utilfeature.DefaultFeatureGate.Enabled(features.ReadOnlyAPIDataVolumes) + mustMountRO := vol.Mounter.GetAttributes().ReadOnly mounts = append(mounts, kubecontainer.Mount{ Name: mount.Name, @@ -263,10 +262,6 @@ func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.M return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil } - if !utilfeature.DefaultFeatureGate.Enabled(features.MountPropagation) { - // mount propagation is disabled, use private as in the old versions - return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil - } switch { case mountMode == nil: // PRIVATE is the default @@ -361,11 +356,9 @@ func hostsEntriesFromHostAliases(hostAliases []v1.HostAlias) []byte { var buffer bytes.Buffer buffer.WriteString("\n") buffer.WriteString("# Entries added by HostAliases.\n") - // write each IP/hostname pair as an entry into hosts file + // for each IP, write all aliases onto single line in hosts file for _, hostAlias := range hostAliases { - for _, hostname := range hostAlias.Hostnames { - buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostAlias.IP, hostname)) - } + buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostAlias.IP, strings.Join(hostAlias.Hostnames, "\t"))) } return buffer.Bytes() } @@ -378,7 +371,7 @@ func truncatePodHostnameIfNeeded(podName, hostname string) (string, error) { return hostname, nil } truncated := hostname[:hostnameMaxLen] - glog.Errorf("hostname for pod:%q was longer than %d. Truncated hostname to :%q", podName, hostnameMaxLen, truncated) + klog.Errorf("hostname for pod:%q was longer than %d. Truncated hostname to :%q", podName, hostnameMaxLen, truncated) // hostname should not end with '-' or '.' truncated = strings.TrimRight(truncated, "-.") if len(truncated) == 0 { @@ -470,7 +463,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai if len(container.TerminationMessagePath) != 0 && runtime.GOOS != "windows" { p := kl.getPodContainerDir(pod.UID, container.Name) if err := os.MkdirAll(p, 0750); err != nil { - glog.Errorf("Error on creating %q: %v", p, err) + klog.Errorf("Error on creating %q: %v", p, err) } else { opts.PodContainerDir = p } @@ -488,7 +481,7 @@ var masterServices = sets.NewString("kubernetes") // getServiceEnvVarMap makes a map[string]string of env vars for services a // pod in namespace ns should see. -func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { +func (kl *Kubelet) getServiceEnvVarMap(ns string, enableServiceLinks bool) (map[string]string, error) { var ( serviceMap = make(map[string]*v1.Service) m = make(map[string]string) @@ -514,19 +507,16 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { } serviceName := service.Name - switch service.Namespace { - // for the case whether the master service namespace is the namespace the pod - // is in, the pod should receive all the services in the namespace. - // - // ordering of the case clauses below enforces this - case ns: - serviceMap[serviceName] = service - case kl.masterServiceNamespace: - if masterServices.Has(serviceName) { - if _, exists := serviceMap[serviceName]; !exists { - serviceMap[serviceName] = service - } + // We always want to add environment variabled for master services + // from the master service namespace, even if enableServiceLinks is false. + // We also add environment variables for other services in the same + // namespace, if enableServiceLinks is true. + if service.Namespace == kl.masterServiceNamespace && masterServices.Has(serviceName) { + if _, exists := serviceMap[serviceName]; !exists { + serviceMap[serviceName] = service } + } else if service.Namespace == ns && enableServiceLinks { + serviceMap[serviceName] = service } } @@ -553,7 +543,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container // To avoid this users can: (1) wait between starting a service and starting; or (2) detect // missing service env var and exit and be restarted; or (3) use DNS instead of env vars // and keep trying to resolve the DNS name of the service (recommended). - serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace) + serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace, *pod.Spec.EnableServiceLinks) if err != nil { return result, err } @@ -811,7 +801,7 @@ func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *k return err } if err := kl.containerManager.UpdateQOSCgroups(); err != nil { - glog.V(2).Infof("Failed to update QoS cgroups while killing pod: %v", err) + klog.V(2).Infof("Failed to update QoS cgroups while killing pod: %v", err) } return nil } @@ -839,7 +829,7 @@ func (kl *Kubelet) getPullSecretsForPod(pod *v1.Pod) []v1.Secret { for _, secretRef := range pod.Spec.ImagePullSecrets { secret, err := kl.secretManager.GetSecret(pod.Namespace, secretRef.Name) if err != nil { - glog.Warningf("Unable to retrieve pull secret %s/%s for %s/%s due to %v. The image pull may not succeed.", pod.Namespace, secretRef.Name, pod.Namespace, pod.Name, err) + klog.Warningf("Unable to retrieve pull secret %s/%s for %s/%s due to %v. The image pull may not succeed.", pod.Namespace, secretRef.Name, pod.Namespace, pod.Name, err) continue } @@ -893,13 +883,13 @@ func (kl *Kubelet) IsPodDeleted(uid types.UID) bool { func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bool { if !notRunning(status.ContainerStatuses) { // We shouldnt delete pods that still have running containers - glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod)) + klog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod)) return false } // pod's containers should be deleted runtimeStatus, err := kl.podCache.Get(pod.UID) if err != nil { - glog.V(3).Infof("Pod %q is terminated, Error getting runtimeStatus from the podCache: %s", format.Pod(pod), err) + klog.V(3).Infof("Pod %q is terminated, Error getting runtimeStatus from the podCache: %s", format.Pod(pod), err) return false } if len(runtimeStatus.ContainerStatuses) > 0 { @@ -907,18 +897,18 @@ func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bo for _, status := range runtimeStatus.ContainerStatuses { statusStr += fmt.Sprintf("%+v ", *status) } - glog.V(3).Infof("Pod %q is terminated, but some containers have not been cleaned up: %s", format.Pod(pod), statusStr) + klog.V(3).Infof("Pod %q is terminated, but some containers have not been cleaned up: %s", format.Pod(pod), statusStr) return false } if kl.podVolumesExist(pod.UID) && !kl.keepTerminatedPodVolumes { // We shouldnt delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes - glog.V(3).Infof("Pod %q is terminated, but some volumes have not been cleaned up", format.Pod(pod)) + klog.V(3).Infof("Pod %q is terminated, but some volumes have not been cleaned up", format.Pod(pod)) return false } if kl.kubeletConfiguration.CgroupsPerQOS { pcm := kl.containerManager.NewPodContainerManager() if pcm.Exists(pod) { - glog.V(3).Infof("Pod %q is terminated, but pod cgroup sandbox has not been cleaned up", format.Pod(pod)) + klog.V(3).Infof("Pod %q is terminated, but pod cgroup sandbox has not been cleaned up", format.Pod(pod)) return false } } @@ -1017,7 +1007,7 @@ func (kl *Kubelet) HandlePodCleanups() error { runningPods, err := kl.runtimeCache.GetPods() if err != nil { - glog.Errorf("Error listing containers: %#v", err) + klog.Errorf("Error listing containers: %#v", err) return err } for _, pod := range runningPods { @@ -1033,7 +1023,7 @@ func (kl *Kubelet) HandlePodCleanups() error { // TODO: Evaluate the performance impact of bypassing the runtime cache. runningPods, err = kl.containerRuntime.GetPods(false) if err != nil { - glog.Errorf("Error listing containers: %#v", err) + klog.Errorf("Error listing containers: %#v", err) return err } @@ -1046,7 +1036,7 @@ func (kl *Kubelet) HandlePodCleanups() error { // We want all cleanup tasks to be run even if one of them failed. So // we just log an error here and continue other cleanup tasks. // This also applies to the other clean up tasks. - glog.Errorf("Failed cleaning up orphaned pod directories: %v", err) + klog.Errorf("Failed cleaning up orphaned pod directories: %v", err) } // Remove any orphaned mirror pods. @@ -1080,10 +1070,10 @@ func (kl *Kubelet) podKiller() { if !exists { go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod) { - glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name) + klog.V(2).Infof("Killing unwanted pod %q", runningPod.Name) err := kl.killPod(apiPod, runningPod, nil, nil) if err != nil { - glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err) + klog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err) } lock.Lock() killing.Delete(string(runningPod.ID)) @@ -1289,7 +1279,7 @@ func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase { case pendingInitialization > 0: fallthrough case waiting > 0: - glog.V(5).Infof("pod waiting > 0, pending") + klog.V(5).Infof("pod waiting > 0, pending") // One or more containers has not been started return v1.PodPending case running > 0 && unknown == 0: @@ -1316,7 +1306,7 @@ func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase { // and in the process of restarting return v1.PodRunning default: - glog.V(5).Infof("pod default case, pending") + klog.V(5).Infof("pod default case, pending") return v1.PodPending } } @@ -1324,21 +1314,20 @@ func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase { // generateAPIPodStatus creates the final API pod status for a pod, given the // internal pod status. func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus { - glog.V(3).Infof("Generating status for %q", format.Pod(pod)) + klog.V(3).Infof("Generating status for %q", format.Pod(pod)) + + s := kl.convertStatusToAPIStatus(pod, podStatus) // check if an internal module has requested the pod is evicted. for _, podSyncHandler := range kl.PodSyncHandlers { if result := podSyncHandler.ShouldEvict(pod); result.Evict { - return v1.PodStatus{ - Phase: v1.PodFailed, - Reason: result.Reason, - Message: result.Message, - } + s.Phase = v1.PodFailed + s.Reason = result.Reason + s.Message = result.Message + return *s } } - s := kl.convertStatusToAPIStatus(pod, podStatus) - // Assume info is ready to process spec := &pod.Spec allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...) @@ -1347,7 +1336,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded { // API server shows terminal phase; transitions are not allowed if s.Phase != pod.Status.Phase { - glog.Errorf("Pod attempted illegal phase transition from %s to %s: %v", pod.Status.Phase, s.Phase, s) + klog.Errorf("Pod attempted illegal phase transition from %s to %s: %v", pod.Status.Phase, s.Phase, s) // Force back to phase from the API server s.Phase = pod.Status.Phase } @@ -1367,7 +1356,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po if kl.kubeClient != nil { hostIP, err := kl.getHostIPAnyWay() if err != nil { - glog.V(4).Infof("Cannot get host IP: %v", err) + klog.V(4).Infof("Cannot get host IP: %v", err) } else { s.HostIP = hostIP.String() if kubecontainer.IsHostNetworkPod(pod) && s.PodIP == "" { @@ -1674,13 +1663,13 @@ func (kl *Kubelet) cleanupOrphanedPodCgroups(cgroupPods map[types.UID]cm.CgroupN // process in the cgroup to the minimum value while we wait. if the kubelet // is configured to keep terminated volumes, we will delete the cgroup and not block. if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist && !kl.keepTerminatedPodVolumes { - glog.V(3).Infof("Orphaned pod %q found, but volumes not yet removed. Reducing cpu to minimum", uid) + klog.V(3).Infof("Orphaned pod %q found, but volumes not yet removed. Reducing cpu to minimum", uid) if err := pcm.ReduceCPULimits(val); err != nil { - glog.Warningf("Failed to reduce cpu time for pod %q pending volume cleanup due to %v", uid, err) + klog.Warningf("Failed to reduce cpu time for pod %q pending volume cleanup due to %v", uid, err) } continue } - glog.V(3).Infof("Orphaned pod %q found, removing pod cgroups", uid) + klog.V(3).Infof("Orphaned pod %q found, removing pod cgroups", uid) // Destroy all cgroups of pod that should not be running, // by first killing all the attached processes to these cgroups. // We ignore errors thrown by the method, as the housekeeping loop would @@ -1743,13 +1732,13 @@ func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool { if volume.PersistentVolumeClaim != nil { pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) if err != nil { - glog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err) + klog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err) continue } if pvc != nil { referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) if err != nil { - glog.Warningf("unable to retrieve pv %s - %v", pvc.Spec.VolumeName, err) + klog.Warningf("unable to retrieve pv %s - %v", pvc.Spec.VolumeName, err) continue } if referencedVolume != nil && referencedVolume.Spec.HostPath != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_resources.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_resources.go index dc47a85880e3..be6d29738bac 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_resources.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_resources.go @@ -19,7 +19,7 @@ package kubelet import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/api/v1/resource" @@ -42,7 +42,7 @@ func (kl *Kubelet) defaultPodLimitsForDownwardAPI(pod *v1.Pod, container *v1.Con return nil, nil, fmt.Errorf("failed to find node object, expected a node") } allocatable := node.Status.Allocatable - glog.Infof("allocatable: %v", allocatable) + klog.Infof("allocatable: %v", allocatable) outputPod := pod.DeepCopy() for idx := range outputPod.Spec.Containers { resource.MergeContainerResourceLimits(&outputPod.Spec.Containers[idx], allocatable) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go index 09179fec8041..7681ee6529e9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go @@ -19,11 +19,11 @@ package kubelet import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/util/removeall" "k8s.io/kubernetes/pkg/volume" @@ -62,11 +62,11 @@ func (kl *Kubelet) podVolumesExist(podUID types.UID) bool { // There are some volume plugins such as flexvolume might not have mounts. See issue #61229 volumePaths, err := kl.getMountedVolumePathListFromDisk(podUID) if err != nil { - glog.Errorf("pod %q found, but error %v occurred during checking mounted volumes from disk", podUID, err) + klog.Errorf("pod %q found, but error %v occurred during checking mounted volumes from disk", podUID, err) return true } if len(volumePaths) > 0 { - glog.V(4).Infof("pod %q found, but volumes are still mounted on disk %v", podUID, volumePaths) + klog.V(4).Infof("pod %q found, but volumes are still mounted on disk %v", podUID, volumePaths) return true } @@ -85,7 +85,7 @@ func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *v1.Pod, o if err != nil { return nil, fmt.Errorf("failed to instantiate mounter for volume: %s using plugin: %s with a root cause: %v", spec.Name(), plugin.GetPluginName(), err) } - glog.V(10).Infof("Using volume plugin %q to mount %s", plugin.GetPluginName(), spec.Name()) + klog.V(10).Infof("Using volume plugin %q to mount %s", plugin.GetPluginName(), spec.Name()) return physicalMounter, nil } @@ -115,7 +115,7 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon // If volumes have not been unmounted/detached, do not delete directory. // Doing so may result in corruption of data. if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist { - glog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up", uid) + klog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up", uid) continue } // If there are still volume directories, do not delete directory @@ -128,18 +128,18 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume paths are still present on disk", uid)) continue } - glog.V(3).Infof("Orphaned pod %q found, removing", uid) + klog.V(3).Infof("Orphaned pod %q found, removing", uid) if err := removeall.RemoveAllOneFilesystem(kl.mounter, kl.getPodDir(uid)); err != nil { - glog.Errorf("Failed to remove orphaned pod %q dir; err: %v", uid, err) + klog.Errorf("Failed to remove orphaned pod %q dir; err: %v", uid, err) orphanRemovalErrors = append(orphanRemovalErrors, err) } } logSpew := func(errs []error) { if len(errs) > 0 { - glog.Errorf("%v : There were a total of %v errors similar to this. Turn up verbosity to see them.", errs[0], len(errs)) + klog.Errorf("%v : There were a total of %v errors similar to this. Turn up verbosity to see them.", errs[0], len(errs)) for _, err := range errs { - glog.V(5).Infof("Orphan pod: %v", err) + klog.V(5).Infof("Orphan pod: %v", err) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/BUILD index 03e386f8086a..55383942ff0d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/BUILD @@ -1,9 +1,6 @@ package(default_visibility = ["//visibility:public"]) -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -33,7 +30,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -60,3 +57,15 @@ filegroup( ], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["controller_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/kubelet/kubeletconfig/checkpoint:go_default_library", + "//pkg/kubelet/kubeletconfig/checkpoint/store:go_default_library", + "//pkg/kubelet/kubeletconfig/status:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store/fakestore.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store/fakestore.go index 6e442cf2cc20..854294806b0d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store/fakestore.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store/fakestore.go @@ -32,6 +32,11 @@ type fakeStore struct { var _ Store = (*fakeStore)(nil) +// NewFakeStore constructs a fake Store +func NewFakeStore() Store { + return &fakeStore{} +} + func (s *fakeStore) Initialize() error { return fmt.Errorf("Initialize method not supported") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configsync.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configsync.go index cb92fb7e6545..7a897f3badf0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configsync.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configsync.go @@ -21,7 +21,7 @@ import ( "os" "time" - "github.com/golang/glog" + "k8s.io/klog" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -197,7 +197,7 @@ func restartForNewConfig(eventClient v1core.EventsGetter, nodeName string, sourc // we directly log and send the event, instead of using the event recorder, // because the event recorder won't flush its queue before we exit (we'd lose the event) event := makeEvent(nodeName, apiv1.EventTypeNormal, KubeletConfigChangedEventReason, message) - glog.V(3).Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message) + klog.V(3).Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message) if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(event); err != nil { utillog.Errorf("failed to send event, error: %v", err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/controller.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/controller.go index 0dcc36c75231..50b65a67a3ae 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/controller.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/controller.go @@ -309,7 +309,7 @@ func (cc *Controller) graduateAssignedToLastKnownGood() error { } // if the sources are equal, no need to change if assigned == lastKnownGood || - assigned != nil && lastKnownGood != nil && apiequality.Semantic.DeepEqual(assigned, lastKnownGood) { + assigned != nil && lastKnownGood != nil && apiequality.Semantic.DeepEqual(assigned.NodeConfigSource(), lastKnownGood.NodeConfigSource()) { return nil } // update last-known-good diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log/BUILD index 34b20a27e345..f47e2eead47e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log/BUILD @@ -9,7 +9,7 @@ go_library( name = "go_default_library", srcs = ["log.go"], importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log", - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log/log.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log/log.go index b4ecfe4dc996..6e68b46a0fe6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log/log.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log/log.go @@ -19,7 +19,7 @@ package log import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" ) const logFmt = "kubelet config controller: %s" @@ -33,7 +33,7 @@ func Errorf(format string, args ...interface{}) { } else { s = format } - glog.ErrorDepth(1, fmt.Sprintf(logFmt, s)) + klog.ErrorDepth(1, fmt.Sprintf(logFmt, s)) } // Infof shim that inserts "kubelet config controller" at the beginning of the log message, @@ -45,5 +45,5 @@ func Infof(format string, args ...interface{}) { } else { s = format } - glog.InfoDepth(1, fmt.Sprintf(logFmt, s)) + klog.InfoDepth(1, fmt.Sprintf(logFmt, s)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD index d1045f5a4afd..361c34871156 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD @@ -54,21 +54,21 @@ go_library( "//pkg/util/parsers:go_default_library", "//pkg/util/selinux:go_default_library", "//pkg/util/tail:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/tools/reference:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//vendor/github.com/armon/circbuf:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/qos:go_default_library", @@ -95,6 +95,7 @@ go_test( "kuberuntime_sandbox_test.go", "labels_test.go", "legacy_test.go", + "main_test.go", "security_context_test.go", ], embed = [":go_default_library"], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go index 6eb64ee7b020..683f13043173 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go @@ -70,7 +70,7 @@ func (f *fakePodStateProvider) IsPodTerminated(uid types.UID) bool { return !found } -func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, keyring credentialprovider.DockerKeyring) (*kubeGenericRuntimeManager, error) { +func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, keyring credentialprovider.DockerKeyring) (*kubeGenericRuntimeManager, error) { recorder := &record.FakeRecorder{} kubeRuntimeManager := &kubeGenericRuntimeManager{ recorder: recorder, @@ -93,7 +93,7 @@ func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS return nil, err } - kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, newFakePodStateProvider(), kubeRuntimeManager) + kubeRuntimeManager.containerGC = newContainerGC(runtimeService, newFakePodStateProvider(), kubeRuntimeManager) kubeRuntimeManager.runtimeName = typedVersion.RuntimeName kubeRuntimeManager.imagePuller = images.NewImageManager( kubecontainer.FilterEventRecorder(recorder), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go index fd977d5a16e2..056e4d0ae75f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go @@ -22,10 +22,10 @@ import ( "strconv" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -83,7 +83,7 @@ func toRuntimeProtocol(protocol v1.Protocol) runtimeapi.Protocol { return runtimeapi.Protocol_SCTP } - glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol) + klog.Warningf("Unknown protocol %q: defaulting to TCP", protocol) return runtimeapi.Protocol_TCP } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go index 5575c45181e9..9407a03c63a1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -33,7 +33,7 @@ import ( "google.golang.org/grpc" "github.com/armon/circbuf" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -50,10 +50,14 @@ import ( ) var ( + // ErrCreateContainerConfig - failed to create container config ErrCreateContainerConfig = errors.New("CreateContainerConfigError") - ErrCreateContainer = errors.New("CreateContainerError") - ErrPreStartHook = errors.New("PreStartHookError") - ErrPostStartHook = errors.New("PostStartHookError") + // ErrCreateContainer - failed to create container + ErrCreateContainer = errors.New("CreateContainerError") + // ErrPreStartHook - failed to execute PreStartHook + ErrPreStartHook = errors.New("PreStartHookError") + // ErrPostStartHook - failed to execute PostStartHook + ErrPostStartHook = errors.New("PostStartHookError") ) // recordContainerEvent should be used by the runtime manager for all container related events. @@ -64,7 +68,7 @@ var ( func (m *kubeGenericRuntimeManager) recordContainerEvent(pod *v1.Pod, container *v1.Container, containerID, eventType, reason, message string, args ...interface{}) { ref, err := kubecontainer.GenerateContainerRef(pod, container) if err != nil { - glog.Errorf("Can't make a ref to pod %q, container %v: %v", format.Pod(pod), container.Name, err) + klog.Errorf("Can't make a ref to pod %q, container %v: %v", format.Pod(pod), container.Name, err) return } eventMessage := message @@ -97,9 +101,9 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb // Step 2: create the container. ref, err := kubecontainer.GenerateContainerRef(pod, container) if err != nil { - glog.Errorf("Can't make a ref to pod %q, container %v: %v", format.Pod(pod), container.Name, err) + klog.Errorf("Can't make a ref to pod %q, container %v: %v", format.Pod(pod), container.Name, err) } - glog.V(4).Infof("Generating ref for container %s: %#v", container.Name, ref) + klog.V(4).Infof("Generating ref for container %s: %#v", container.Name, ref) // For a new container, the RestartCount should be 0 restartCount := 0 @@ -158,7 +162,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb // to create it in the first place. it happens when journald logging driver is used with docker. if _, err := m.osInterface.Stat(containerLog); !os.IsNotExist(err) { if err := m.osInterface.Symlink(containerLog, legacySymlink); err != nil { - glog.Errorf("Failed to create legacy symbolic link %q to container %q log %q: %v", + klog.Errorf("Failed to create legacy symbolic link %q to container %q log %q: %v", legacySymlink, containerID, containerLog, err) } } @@ -173,7 +177,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb if handlerErr != nil { m.recordContainerEvent(pod, container, kubeContainerID.ID, v1.EventTypeWarning, events.FailedPostStartHook, msg) if err := m.killContainer(pod, kubeContainerID, container.Name, "FailedPostStartHook", nil); err != nil { - glog.Errorf("Failed to kill container %q(id=%q) in pod %q: %v, %v", + klog.Errorf("Failed to kill container %q(id=%q) in pod %q: %v, %v", container.Name, kubeContainerID.String(), format.Pod(pod), ErrPostStartHook, err) } return msg, fmt.Errorf("%s: %v", ErrPostStartHook, handlerErr) @@ -328,7 +332,7 @@ func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([] containers, err := m.runtimeService.ListContainers(filter) if err != nil { - glog.Errorf("getKubeletContainers failed: %v", err) + klog.Errorf("getKubeletContainers failed: %v", err) return nil, err } @@ -381,7 +385,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(uid)}, }) if err != nil { - glog.Errorf("ListContainers error: %v", err) + klog.Errorf("ListContainers error: %v", err) return nil, err } @@ -390,7 +394,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n for i, c := range containers { status, err := m.runtimeService.ContainerStatus(c.Id) if err != nil { - glog.Errorf("ContainerStatus for %s error: %v", c.Id, err) + klog.Errorf("ContainerStatus for %s error: %v", c.Id, err) return nil, err } cStatus := toKubeContainerStatus(status, m.runtimeName) @@ -457,7 +461,7 @@ func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName strin // executePreStopHook runs the pre-stop lifecycle hooks if applicable and returns the duration it takes. func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID kubecontainer.ContainerID, containerSpec *v1.Container, gracePeriod int64) int64 { - glog.V(3).Infof("Running preStop hook for container %q", containerID.String()) + klog.V(3).Infof("Running preStop hook for container %q", containerID.String()) start := metav1.Now() done := make(chan struct{}) @@ -465,16 +469,16 @@ func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID defer close(done) defer utilruntime.HandleCrash() if msg, err := m.runner.Run(containerID, pod, containerSpec, containerSpec.Lifecycle.PreStop); err != nil { - glog.Errorf("preStop hook for container %q failed: %v", containerSpec.Name, err) + klog.Errorf("preStop hook for container %q failed: %v", containerSpec.Name, err) m.recordContainerEvent(pod, containerSpec, containerID.ID, v1.EventTypeWarning, events.FailedPreStopHook, msg) } }() select { case <-time.After(time.Duration(gracePeriod) * time.Second): - glog.V(2).Infof("preStop hook for container %q did not complete in %d seconds", containerID, gracePeriod) + klog.V(2).Infof("preStop hook for container %q did not complete in %d seconds", containerID, gracePeriod) case <-done: - glog.V(3).Infof("preStop hook for container %q completed", containerID) + klog.V(3).Infof("preStop hook for container %q completed", containerID) } return int64(metav1.Now().Sub(start.Time).Seconds()) @@ -512,8 +516,8 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID }, } container = &v1.Container{ - Name: l.ContainerName, - Ports: a.ContainerPorts, + Name: l.ContainerName, + Ports: a.ContainerPorts, TerminationMessagePath: a.TerminationMessagePath, } if a.PreStopHandler != nil { @@ -552,7 +556,7 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec gracePeriod = *pod.Spec.TerminationGracePeriodSeconds } - glog.V(2).Infof("Killing container %q with %d second grace period", containerID.String(), gracePeriod) + klog.V(2).Infof("Killing container %q with %d second grace period", containerID.String(), gracePeriod) // Run internal pre-stop lifecycle hook if err := m.internalLifecycle.PreStopContainer(containerID.ID); err != nil { @@ -569,14 +573,14 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec } if gracePeriodOverride != nil { gracePeriod = *gracePeriodOverride - glog.V(3).Infof("Killing container %q, but using %d second grace period override", containerID, gracePeriod) + klog.V(3).Infof("Killing container %q, but using %d second grace period override", containerID, gracePeriod) } err := m.runtimeService.StopContainer(containerID.ID, gracePeriod) if err != nil { - glog.Errorf("Container %q termination failed with gracePeriod %d: %v", containerID.String(), gracePeriod, err) + klog.Errorf("Container %q termination failed with gracePeriod %d: %v", containerID.String(), gracePeriod, err) } else { - glog.V(3).Infof("Container %q exited normally", containerID.String()) + klog.V(3).Infof("Container %q exited normally", containerID.String()) } message := fmt.Sprintf("Killing container with id %s", containerID.String()) @@ -639,7 +643,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *v1.Pod, continue } // prune all other init containers that match this container name - glog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count) + klog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count) if err := m.removeContainer(status.ID.ID); err != nil { utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod))) continue @@ -649,7 +653,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *v1.Pod, if _, ok := m.containerRefManager.GetRef(status.ID); ok { m.containerRefManager.ClearRef(status.ID) } else { - glog.Warningf("No ref for container %q", status.ID) + klog.Warningf("No ref for container %q", status.ID) } } } @@ -671,7 +675,7 @@ func (m *kubeGenericRuntimeManager) purgeInitContainers(pod *v1.Pod, podStatus * } count++ // Purge all init containers that match this container name - glog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count) + klog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count) if err := m.removeContainer(status.ID.ID); err != nil { utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod))) continue @@ -680,7 +684,7 @@ func (m *kubeGenericRuntimeManager) purgeInitContainers(pod *v1.Pod, podStatus * if _, ok := m.containerRefManager.GetRef(status.ID); ok { m.containerRefManager.ClearRef(status.ID) } else { - glog.Warningf("No ref for container %q", status.ID) + klog.Warningf("No ref for container %q", status.ID) } } } @@ -735,7 +739,7 @@ func findNextInitContainerToRun(pod *v1.Pod, podStatus *kubecontainer.PodStatus) func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) { status, err := m.runtimeService.ContainerStatus(containerID.ID) if err != nil { - glog.V(4).Infof("failed to get container status for %v: %v", containerID.String(), err) + klog.V(4).Infof("failed to get container status for %v: %v", containerID.String(), err) return fmt.Errorf("Unable to retrieve container logs for %v", containerID.String()) } return m.ReadLogs(ctx, status.GetLogPath(), containerID.ID, logOptions, stdout, stderr) @@ -791,7 +795,7 @@ func (m *kubeGenericRuntimeManager) RunInContainer(id kubecontainer.ContainerID, // Notice that we assume that the container should only be removed in non-running state, and // it will not write container logs anymore in that state. func (m *kubeGenericRuntimeManager) removeContainer(containerID string) error { - glog.V(4).Infof("Removing container %q", containerID) + klog.V(4).Infof("Removing container %q", containerID) // Call internal container post-stop lifecycle hook. if err := m.internalLifecycle.PostStopContainer(containerID); err != nil { return err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go index a66a07370cbb..e2ba00d0645a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go @@ -23,9 +23,9 @@ import ( "sort" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -39,7 +39,7 @@ type containerGC struct { } // NewContainerGC creates a new containerGC. -func NewContainerGC(client internalapi.RuntimeService, podStateProvider podStateProvider, manager *kubeGenericRuntimeManager) *containerGC { +func newContainerGC(client internalapi.RuntimeService, podStateProvider podStateProvider, manager *kubeGenericRuntimeManager) *containerGC { return &containerGC{ client: client, manager: manager, @@ -123,7 +123,7 @@ func (cgc *containerGC) removeOldestN(containers []containerGCInfo, toRemove int numToKeep := len(containers) - toRemove for i := len(containers) - 1; i >= numToKeep; i-- { if err := cgc.manager.removeContainer(containers[i].id); err != nil { - glog.Errorf("Failed to remove container %q: %v", containers[i].id, err) + klog.Errorf("Failed to remove container %q: %v", containers[i].id, err) } } @@ -145,16 +145,16 @@ func (cgc *containerGC) removeOldestNSandboxes(sandboxes []sandboxGCInfo, toRemo // removeSandbox removes the sandbox by sandboxID. func (cgc *containerGC) removeSandbox(sandboxID string) { - glog.V(4).Infof("Removing sandbox %q", sandboxID) + klog.V(4).Infof("Removing sandbox %q", sandboxID) // In normal cases, kubelet should've already called StopPodSandbox before // GC kicks in. To guard against the rare cases where this is not true, try // stopping the sandbox before removing it. if err := cgc.client.StopPodSandbox(sandboxID); err != nil { - glog.Errorf("Failed to stop sandbox %q before removing: %v", sandboxID, err) + klog.Errorf("Failed to stop sandbox %q before removing: %v", sandboxID, err) return } if err := cgc.client.RemovePodSandbox(sandboxID); err != nil { - glog.Errorf("Failed to remove sandbox %q: %v", sandboxID, err) + klog.Errorf("Failed to remove sandbox %q: %v", sandboxID, err) } } @@ -328,7 +328,7 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error { } err := osInterface.RemoveAll(filepath.Join(podLogsRootDirectory, name)) if err != nil { - glog.Errorf("Failed to remove pod logs directory %q: %v", name, err) + klog.Errorf("Failed to remove pod logs directory %q: %v", name, err) } } } @@ -340,7 +340,7 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error { if _, err := osInterface.Stat(logSymlink); os.IsNotExist(err) { err := osInterface.Remove(logSymlink) if err != nil { - glog.Errorf("Failed to remove container log dead symlink %q: %v", logSymlink, err) + klog.Errorf("Failed to remove container log dead symlink %q: %v", logSymlink, err) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_image.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_image.go index a8f2c1c060be..60fc6201c018 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_image.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_image.go @@ -17,9 +17,9 @@ limitations under the License. package kuberuntime import ( - "github.com/golang/glog" "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog" "k8s.io/kubernetes/pkg/credentialprovider" credentialprovidersecrets "k8s.io/kubernetes/pkg/credentialprovider/secrets" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" @@ -44,11 +44,11 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul imgSpec := &runtimeapi.ImageSpec{Image: img} creds, withCredentials := keyring.Lookup(repoToPull) if !withCredentials { - glog.V(3).Infof("Pulling image %q without credentials", img) + klog.V(3).Infof("Pulling image %q without credentials", img) imageRef, err := m.imageService.PullImage(imgSpec, nil) if err != nil { - glog.Errorf("Pull image %q failed: %v", img, err) + klog.Errorf("Pull image %q failed: %v", img, err) return "", err } @@ -84,7 +84,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul func (m *kubeGenericRuntimeManager) GetImageRef(image kubecontainer.ImageSpec) (string, error) { status, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image.Image}) if err != nil { - glog.Errorf("ImageStatus for image %q failed: %v", image, err) + klog.Errorf("ImageStatus for image %q failed: %v", image, err) return "", err } if status == nil { @@ -99,7 +99,7 @@ func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error) allImages, err := m.imageService.ListImages(nil) if err != nil { - glog.Errorf("ListImages failed: %v", err) + klog.Errorf("ListImages failed: %v", err) return nil, err } @@ -119,7 +119,7 @@ func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error) func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) error { err := m.imageService.RemoveImage(&runtimeapi.ImageSpec{Image: image.Image}) if err != nil { - glog.Errorf("Remove image %q failed: %v", image.Image, err) + klog.Errorf("Remove image %q failed: %v", image.Image, err) return err } @@ -133,7 +133,7 @@ func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) e func (m *kubeGenericRuntimeManager) ImageStats() (*kubecontainer.ImageStats, error) { allImages, err := m.imageService.ListImages(nil) if err != nil { - glog.Errorf("ListImages failed: %v", err) + klog.Errorf("ListImages failed: %v", err) return nil, err } stats := &kubecontainer.ImageStats{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 2818990b11bf..90039beda094 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -22,13 +22,14 @@ import ( "os" "time" - "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubetypes "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/tools/record" ref "k8s.io/client-go/tools/reference" "k8s.io/client-go/util/flowcontrol" @@ -46,7 +47,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/cache" "k8s.io/kubernetes/pkg/kubelet/util/format" - utilversion "k8s.io/kubernetes/pkg/util/version" ) const ( @@ -125,6 +125,7 @@ type kubeGenericRuntimeManager struct { runtimeClassManager *runtimeclass.Manager } +// KubeGenericRuntime is a interface contains interfaces for container runtime and command. type KubeGenericRuntime interface { kubecontainer.Runtime kubecontainer.StreamingRuntime @@ -180,21 +181,21 @@ func NewKubeGenericRuntimeManager( typedVersion, err := kubeRuntimeManager.runtimeService.Version(kubeRuntimeAPIVersion) if err != nil { - glog.Errorf("Get runtime version failed: %v", err) + klog.Errorf("Get runtime version failed: %v", err) return nil, err } // Only matching kubeRuntimeAPIVersion is supported now // TODO: Runtime API machinery is under discussion at https://github.com/kubernetes/kubernetes/issues/28642 if typedVersion.Version != kubeRuntimeAPIVersion { - glog.Errorf("Runtime api version %s is not supported, only %s is supported now", + klog.Errorf("Runtime api version %s is not supported, only %s is supported now", typedVersion.Version, kubeRuntimeAPIVersion) return nil, ErrVersionNotSupported } kubeRuntimeManager.runtimeName = typedVersion.RuntimeName - glog.Infof("Container runtime %s initialized, version: %s, apiVersion: %s", + klog.Infof("Container runtime %s initialized, version: %s, apiVersion: %s", typedVersion.RuntimeName, typedVersion.RuntimeVersion, typedVersion.RuntimeApiVersion) @@ -204,7 +205,7 @@ func NewKubeGenericRuntimeManager( // new runtime interface if _, err := osInterface.Stat(podLogsRootDirectory); os.IsNotExist(err) { if err := osInterface.MkdirAll(podLogsRootDirectory, 0755); err != nil { - glog.Errorf("Failed to create directory %q: %v", podLogsRootDirectory, err) + klog.Errorf("Failed to create directory %q: %v", podLogsRootDirectory, err) } } @@ -216,7 +217,7 @@ func NewKubeGenericRuntimeManager( imagePullQPS, imagePullBurst) kubeRuntimeManager.runner = lifecycle.NewHandlerRunner(httpClient, kubeRuntimeManager, kubeRuntimeManager) - kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, podStateProvider, kubeRuntimeManager) + kubeRuntimeManager.containerGC = newContainerGC(runtimeService, podStateProvider, kubeRuntimeManager) kubeRuntimeManager.versionCache = cache.NewObjectCache( func() (interface{}, error) { @@ -243,7 +244,7 @@ func newRuntimeVersion(version string) (*utilversion.Version, error) { func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionResponse, error) { typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion) if err != nil { - glog.Errorf("Get remote runtime typed version failed: %v", err) + klog.Errorf("Get remote runtime typed version failed: %v", err) return nil, err } return typedVersion, nil @@ -253,7 +254,7 @@ func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionRespon func (m *kubeGenericRuntimeManager) Version() (kubecontainer.Version, error) { typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion) if err != nil { - glog.Errorf("Get remote runtime version failed: %v", err) + klog.Errorf("Get remote runtime version failed: %v", err) return nil, err } @@ -295,7 +296,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err for i := range sandboxes { s := sandboxes[i] if s.Metadata == nil { - glog.V(4).Infof("Sandbox does not have metadata: %+v", s) + klog.V(4).Infof("Sandbox does not have metadata: %+v", s) continue } podUID := kubetypes.UID(s.Metadata.Uid) @@ -309,7 +310,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err p := pods[podUID] converted, err := m.sandboxToKubeContainer(s) if err != nil { - glog.V(4).Infof("Convert %q sandbox %v of pod %q failed: %v", m.runtimeName, s, podUID, err) + klog.V(4).Infof("Convert %q sandbox %v of pod %q failed: %v", m.runtimeName, s, podUID, err) continue } p.Sandboxes = append(p.Sandboxes, converted) @@ -322,7 +323,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err for i := range containers { c := containers[i] if c.Metadata == nil { - glog.V(4).Infof("Container does not have metadata: %+v", c) + klog.V(4).Infof("Container does not have metadata: %+v", c) continue } @@ -339,7 +340,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err converted, err := m.toKubeContainer(c) if err != nil { - glog.V(4).Infof("Convert %s container %v of pod %q failed: %v", m.runtimeName, c, labelledInfo.PodUID, err) + klog.V(4).Infof("Convert %s container %v of pod %q failed: %v", m.runtimeName, c, labelledInfo.PodUID, err) continue } @@ -393,7 +394,7 @@ type podActions struct { // (changed, new attempt, original sandboxID if exist). func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, uint32, string) { if len(podStatus.SandboxStatuses) == 0 { - glog.V(2).Infof("No sandbox for pod %q can be found. Need to start a new one", format.Pod(pod)) + klog.V(2).Infof("No sandbox for pod %q can be found. Need to start a new one", format.Pod(pod)) return true, 0, "" } @@ -407,23 +408,23 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *ku // Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one. sandboxStatus := podStatus.SandboxStatuses[0] if readySandboxCount > 1 { - glog.V(2).Infof("More than 1 sandboxes for pod %q are ready. Need to reconcile them", format.Pod(pod)) + klog.V(2).Infof("More than 1 sandboxes for pod %q are ready. Need to reconcile them", format.Pod(pod)) return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id } if sandboxStatus.State != runtimeapi.PodSandboxState_SANDBOX_READY { - glog.V(2).Infof("No ready sandbox for pod %q can be found. Need to start a new one", format.Pod(pod)) + klog.V(2).Infof("No ready sandbox for pod %q can be found. Need to start a new one", format.Pod(pod)) return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id } // Needs to create a new sandbox when network namespace changed. if sandboxStatus.GetLinux().GetNamespaces().GetOptions().GetNetwork() != networkNamespaceForPod(pod) { - glog.V(2).Infof("Sandbox for pod %q has changed. Need to start a new one", format.Pod(pod)) + klog.V(2).Infof("Sandbox for pod %q has changed. Need to start a new one", format.Pod(pod)) return true, sandboxStatus.Metadata.Attempt + 1, "" } // Needs to create a new sandbox when the sandbox does not have an IP address. if !kubecontainer.IsHostNetworkPod(pod) && sandboxStatus.Network.Ip == "" { - glog.V(2).Infof("Sandbox for pod %q has no IP address. Need to start a new one", format.Pod(pod)) + klog.V(2).Infof("Sandbox for pod %q has no IP address. Need to start a new one", format.Pod(pod)) return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id } @@ -449,7 +450,7 @@ func containerSucceeded(c *v1.Container, podStatus *kubecontainer.PodStatus) boo // computePodActions checks whether the pod spec has changed and returns the changes if true. func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *kubecontainer.PodStatus) podActions { - glog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod) + klog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod) createPodSandbox, attempt, sandboxID := m.podSandboxChanged(pod, podStatus) changes := podActions{ @@ -466,6 +467,10 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku if createPodSandbox { if !shouldRestartOnFailure(pod) && attempt != 0 { // Should not restart the pod, just return. + // we should not create a sandbox for a pod if it is already done. + // if all containers are done and should not be started, there is no need to create a new sandbox. + // this stops confusing logs on pods whose containers all have exit codes, but we recreate a sandbox before terminating it. + changes.CreateSandbox = false return changes } if len(pod.Spec.InitContainers) != 0 { @@ -511,7 +516,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku // to it. if containerStatus != nil && containerStatus.State != kubecontainer.ContainerStateRunning { if err := m.internalLifecycle.PostStopContainer(containerStatus.ID.ID); err != nil { - glog.Errorf("internal container post-stop lifecycle hook failed for container %v in pod %v with error %v", + klog.Errorf("internal container post-stop lifecycle hook failed for container %v in pod %v with error %v", container.Name, pod.Name, err) } } @@ -521,7 +526,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning { if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) { message := fmt.Sprintf("Container %+v is dead, but RestartPolicy says that we should restart it.", container) - glog.V(3).Infof(message) + klog.V(3).Infof(message) changes.ContainersToStart = append(changes.ContainersToStart, idx) } continue @@ -539,7 +544,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku reason = "Container failed liveness probe." } else { // Keep the container. - keepCount += 1 + keepCount++ continue } @@ -557,7 +562,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku container: &pod.Spec.Containers[idx], message: message, } - glog.V(2).Infof("Container %q (%q) of pod %s: %s", container.Name, containerStatus.ID, format.Pod(pod), message) + klog.V(2).Infof("Container %q (%q) of pod %s: %s", container.Name, containerStatus.ID, format.Pod(pod), message) } if keepCount == 0 && len(changes.ContainersToStart) == 0 { @@ -578,31 +583,31 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { // Step 1: Compute sandbox and container changes. podContainerChanges := m.computePodActions(pod, podStatus) - glog.V(3).Infof("computePodActions got %+v for pod %q", podContainerChanges, format.Pod(pod)) + klog.V(3).Infof("computePodActions got %+v for pod %q", podContainerChanges, format.Pod(pod)) if podContainerChanges.CreateSandbox { ref, err := ref.GetReference(legacyscheme.Scheme, pod) if err != nil { - glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err) + klog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err) } if podContainerChanges.SandboxID != "" { m.recorder.Eventf(ref, v1.EventTypeNormal, events.SandboxChanged, "Pod sandbox changed, it will be killed and re-created.") } else { - glog.V(4).Infof("SyncPod received new pod %q, will create a sandbox for it", format.Pod(pod)) + klog.V(4).Infof("SyncPod received new pod %q, will create a sandbox for it", format.Pod(pod)) } } // Step 2: Kill the pod if the sandbox has changed. if podContainerChanges.KillPod { if !podContainerChanges.CreateSandbox { - glog.V(4).Infof("Stopping PodSandbox for %q because all other containers are dead.", format.Pod(pod)) + klog.V(4).Infof("Stopping PodSandbox for %q because all other containers are dead.", format.Pod(pod)) } else { - glog.V(4).Infof("Stopping PodSandbox for %q, will start new one", format.Pod(pod)) + klog.V(4).Infof("Stopping PodSandbox for %q, will start new one", format.Pod(pod)) } killResult := m.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil) result.AddPodSyncResult(killResult) if killResult.Error() != nil { - glog.Errorf("killPodWithSyncResult failed: %v", killResult.Error()) + klog.Errorf("killPodWithSyncResult failed: %v", killResult.Error()) return } @@ -612,12 +617,12 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat } else { // Step 3: kill any running containers in this pod which are not to keep. for containerID, containerInfo := range podContainerChanges.ContainersToKill { - glog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerInfo.name, containerID, format.Pod(pod)) + klog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerInfo.name, containerID, format.Pod(pod)) killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerInfo.name) result.AddSyncResult(killContainerResult) if err := m.killContainer(pod, containerID, containerInfo.name, containerInfo.message, nil); err != nil { killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error()) - glog.Errorf("killContainer %q(id=%q) for pod %q failed: %v", containerInfo.name, containerID, format.Pod(pod), err) + klog.Errorf("killContainer %q(id=%q) for pod %q failed: %v", containerInfo.name, containerID, format.Pod(pod), err) return } } @@ -648,30 +653,30 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat var msg string var err error - glog.V(4).Infof("Creating sandbox for pod %q", format.Pod(pod)) + klog.V(4).Infof("Creating sandbox for pod %q", format.Pod(pod)) createSandboxResult := kubecontainer.NewSyncResult(kubecontainer.CreatePodSandbox, format.Pod(pod)) result.AddSyncResult(createSandboxResult) podSandboxID, msg, err = m.createPodSandbox(pod, podContainerChanges.Attempt) if err != nil { createSandboxResult.Fail(kubecontainer.ErrCreatePodSandbox, msg) - glog.Errorf("createPodSandbox for pod %q failed: %v", format.Pod(pod), err) + klog.Errorf("createPodSandbox for pod %q failed: %v", format.Pod(pod), err) ref, referr := ref.GetReference(legacyscheme.Scheme, pod) if referr != nil { - glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr) + klog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr) } m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedCreatePodSandBox, "Failed create pod sandbox: %v", err) return } - glog.V(4).Infof("Created PodSandbox %q for pod %q", podSandboxID, format.Pod(pod)) + klog.V(4).Infof("Created PodSandbox %q for pod %q", podSandboxID, format.Pod(pod)) podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID) if err != nil { ref, referr := ref.GetReference(legacyscheme.Scheme, pod) if referr != nil { - glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr) + klog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr) } m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedStatusPodSandBox, "Unable to get pod sandbox status: %v", err) - glog.Errorf("Failed to get pod sandbox status: %v; Skipping pod %q", err, format.Pod(pod)) + klog.Errorf("Failed to get pod sandbox status: %v; Skipping pod %q", err, format.Pod(pod)) result.Fail(err) return } @@ -681,7 +686,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat if !kubecontainer.IsHostNetworkPod(pod) { // Overwrite the podIP passed in the pod status, since we just started the pod sandbox. podIP = m.determinePodSandboxIP(pod.Namespace, pod.Name, podSandboxStatus) - glog.V(4).Infof("Determined the ip %q for pod %q after sandbox changed", podIP, format.Pod(pod)) + klog.V(4).Infof("Determined the ip %q for pod %q after sandbox changed", podIP, format.Pod(pod)) } } @@ -691,7 +696,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat podSandboxConfig, err := m.generatePodSandboxConfig(pod, podContainerChanges.Attempt) if err != nil { message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err) - glog.Error(message) + klog.Error(message) configPodSandboxResult.Fail(kubecontainer.ErrConfigPodSandbox, message) return } @@ -704,11 +709,11 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat isInBackOff, msg, err := m.doBackOff(pod, container, podStatus, backOff) if isInBackOff { startContainerResult.Fail(err, msg) - glog.V(4).Infof("Backing Off restarting init container %+v in pod %v", container, format.Pod(pod)) + klog.V(4).Infof("Backing Off restarting init container %+v in pod %v", container, format.Pod(pod)) return } - glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod)) + klog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod)) if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP, kubecontainer.ContainerTypeInit); err != nil { startContainerResult.Fail(err, msg) utilruntime.HandleError(fmt.Errorf("init container start failed: %v: %s", err, msg)) @@ -716,7 +721,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat } // Successfully started the container; clear the entry in the failure - glog.V(4).Infof("Completed init container %q for pod %q", container.Name, format.Pod(pod)) + klog.V(4).Infof("Completed init container %q for pod %q", container.Name, format.Pod(pod)) } // Step 6: start containers in podContainerChanges.ContainersToStart. @@ -728,18 +733,18 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat isInBackOff, msg, err := m.doBackOff(pod, container, podStatus, backOff) if isInBackOff { startContainerResult.Fail(err, msg) - glog.V(4).Infof("Backing Off restarting container %+v in pod %v", container, format.Pod(pod)) + klog.V(4).Infof("Backing Off restarting container %+v in pod %v", container, format.Pod(pod)) continue } - glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod)) + klog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod)) if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP, kubecontainer.ContainerTypeRegular); err != nil { startContainerResult.Fail(err, msg) // known errors that are logged in other places are logged at higher levels here to avoid // repetitive log spam switch { case err == images.ErrImagePullBackOff: - glog.V(3).Infof("container start failed: %v: %s", err, msg) + klog.V(3).Infof("container start failed: %v: %s", err, msg) default: utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg)) } @@ -765,7 +770,7 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain return false, "", nil } - glog.V(3).Infof("checking backoff for container %q in pod %q", container.Name, format.Pod(pod)) + klog.V(3).Infof("checking backoff for container %q in pod %q", container.Name, format.Pod(pod)) // Use the finished time of the latest exited container as the start point to calculate whether to do back-off. ts := cStatus.FinishedAt // backOff requires a unique key to identify the container. @@ -775,7 +780,7 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain m.recorder.Eventf(ref, v1.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed container") } err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(key), container.Name, format.Pod(pod)) - glog.V(3).Infof("%s", err.Error()) + klog.V(3).Infof("%s", err.Error()) return true, err.Error(), kubecontainer.ErrCrashLoopBackOff } @@ -807,7 +812,7 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPo for _, podSandbox := range runningPod.Sandboxes { if err := m.runtimeService.StopPodSandbox(podSandbox.ID.ID); err != nil { killSandboxResult.Fail(kubecontainer.ErrKillPodSandbox, err.Error()) - glog.Errorf("Failed to stop sandbox %q", podSandbox.ID) + klog.Errorf("Failed to stop sandbox %q", podSandbox.ID) } } @@ -842,14 +847,14 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp UID: uid, }, }) - glog.V(4).Infof("getSandboxIDByPodUID got sandbox IDs %q for pod %q", podSandboxIDs, podFullName) + klog.V(4).Infof("getSandboxIDByPodUID got sandbox IDs %q for pod %q", podSandboxIDs, podFullName) sandboxStatuses := make([]*runtimeapi.PodSandboxStatus, len(podSandboxIDs)) podIP := "" for idx, podSandboxID := range podSandboxIDs { podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID) if err != nil { - glog.Errorf("PodSandboxStatus of sandbox %q for pod %q error: %v", podSandboxID, podFullName, err) + klog.Errorf("PodSandboxStatus of sandbox %q for pod %q error: %v", podSandboxID, podFullName, err) return nil, err } sandboxStatuses[idx] = podSandboxStatus @@ -863,7 +868,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp // Get statuses of all containers visible in the pod. containerStatuses, err := m.getPodContainerStatuses(uid, name, namespace) if err != nil { - glog.Errorf("getPodContainerStatuses for pod %q failed: %v", podFullName, err) + klog.Errorf("getPodContainerStatuses for pod %q failed: %v", podFullName, err) return nil, err } @@ -894,7 +899,7 @@ func (m *kubeGenericRuntimeManager) GarbageCollect(gcPolicy kubecontainer.Contai func (m *kubeGenericRuntimeManager) GetPodContainerID(pod *kubecontainer.Pod) (kubecontainer.ContainerID, error) { formattedPod := kubecontainer.FormatPod(pod) if len(pod.Sandboxes) == 0 { - glog.Errorf("No sandboxes are found for pod %q", formattedPod) + klog.Errorf("No sandboxes are found for pod %q", formattedPod) return kubecontainer.ContainerID{}, fmt.Errorf("sandboxes for pod %q not found", formattedPod) } @@ -907,7 +912,7 @@ func (m *kubeGenericRuntimeManager) GetPodContainerID(pod *kubecontainer.Pod) (k func (m *kubeGenericRuntimeManager) UpdatePodCIDR(podCIDR string) error { // TODO(#35531): do we really want to write a method on this manager for each // field of the config? - glog.Infof("updating runtime config through cri with podcidr %v", podCIDR) + klog.Infof("updating runtime config through cri with podcidr %v", podCIDR) return m.runtimeService.UpdateRuntimeConfig( &runtimeapi.RuntimeConfig{ NetworkConfig: &runtimeapi.NetworkConfig{ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go index 15f9b4b59c98..dd51bda32ca9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go @@ -22,10 +22,10 @@ import ( "net/url" "sort" - "github.com/golang/glog" "k8s.io/api/core/v1" kubetypes "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -38,7 +38,7 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32 podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt) if err != nil { message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err) - glog.Error(message) + klog.Error(message) return "", message, err } @@ -46,12 +46,12 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32 err = m.osInterface.MkdirAll(podSandboxConfig.LogDirectory, 0755) if err != nil { message := fmt.Sprintf("Create pod log directory for pod %q failed: %v", format.Pod(pod), err) - glog.Errorf(message) + klog.Errorf(message) return "", message, err } runtimeHandler := "" - if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) { + if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) && m.runtimeClassManager != nil { runtimeHandler, err = m.runtimeClassManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName) if err != nil { message := fmt.Sprintf("CreatePodSandbox for pod %q failed: %v", format.Pod(pod), err) @@ -62,7 +62,7 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32 podSandBoxID, err := m.runtimeService.RunPodSandbox(podSandboxConfig, runtimeHandler) if err != nil { message := fmt.Sprintf("CreatePodSandbox for pod %q failed: %v", format.Pod(pod), err) - glog.Error(message) + klog.Error(message) return "", message, err } @@ -204,7 +204,7 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi resp, err := m.runtimeService.ListPodSandbox(filter) if err != nil { - glog.Errorf("ListPodSandbox failed: %v", err) + klog.Errorf("ListPodSandbox failed: %v", err) return nil, err } @@ -214,14 +214,14 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi // determinePodSandboxIP determines the IP address of the given pod sandbox. func (m *kubeGenericRuntimeManager) determinePodSandboxIP(podNamespace, podName string, podSandbox *runtimeapi.PodSandboxStatus) string { if podSandbox.Network == nil { - glog.Warningf("Pod Sandbox status doesn't have network information, cannot report IP") + klog.Warningf("Pod Sandbox status doesn't have network information, cannot report IP") return "" } ip := podSandbox.Network.Ip if len(ip) != 0 && net.ParseIP(ip) == nil { // ip could be an empty string if runtime is not responsible for the // IP (e.g., host networking). - glog.Warningf("Pod Sandbox reported an unparseable IP %v", ip) + klog.Warningf("Pod Sandbox reported an unparseable IP %v", ip) return "" } return ip @@ -240,7 +240,7 @@ func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, s } sandboxes, err := m.runtimeService.ListPodSandbox(filter) if err != nil { - glog.Errorf("ListPodSandbox with pod UID %q failed: %v", podUID, err) + klog.Errorf("ListPodSandbox with pod UID %q failed: %v", podUID, err) return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/labels.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/labels.go index 9adc0205a26d..5f36e0b5d984 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/labels.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/labels.go @@ -20,10 +20,10 @@ import ( "encoding/json" "strconv" - "github.com/golang/glog" "k8s.io/api/core/v1" kubetypes "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/types" @@ -135,7 +135,7 @@ func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount // Using json encoding so that the PreStop handler object is readable after writing as a label rawPreStop, err := json.Marshal(container.Lifecycle.PreStop) if err != nil { - glog.Errorf("Unable to marshal lifecycle PreStop handler for container %q of pod %q: %v", container.Name, format.Pod(pod), err) + klog.Errorf("Unable to marshal lifecycle PreStop handler for container %q of pod %q: %v", container.Name, format.Pod(pod), err) } else { annotations[containerPreStopHandlerLabel] = string(rawPreStop) } @@ -144,7 +144,7 @@ func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount if len(container.Ports) > 0 { rawContainerPorts, err := json.Marshal(container.Ports) if err != nil { - glog.Errorf("Unable to marshal container ports for container %q for pod %q: %v", container.Name, format.Pod(pod), err) + klog.Errorf("Unable to marshal container ports for container %q for pod %q: %v", container.Name, format.Pod(pod), err) } else { annotations[containerPortsLabel] = string(rawContainerPorts) } @@ -203,28 +203,28 @@ func getContainerInfoFromAnnotations(annotations map[string]string) *annotatedCo } if containerInfo.Hash, err = getUint64ValueFromLabel(annotations, containerHashLabel); err != nil { - glog.Errorf("Unable to get %q from annotations %q: %v", containerHashLabel, annotations, err) + klog.Errorf("Unable to get %q from annotations %q: %v", containerHashLabel, annotations, err) } if containerInfo.RestartCount, err = getIntValueFromLabel(annotations, containerRestartCountLabel); err != nil { - glog.Errorf("Unable to get %q from annotations %q: %v", containerRestartCountLabel, annotations, err) + klog.Errorf("Unable to get %q from annotations %q: %v", containerRestartCountLabel, annotations, err) } if containerInfo.PodDeletionGracePeriod, err = getInt64PointerFromLabel(annotations, podDeletionGracePeriodLabel); err != nil { - glog.Errorf("Unable to get %q from annotations %q: %v", podDeletionGracePeriodLabel, annotations, err) + klog.Errorf("Unable to get %q from annotations %q: %v", podDeletionGracePeriodLabel, annotations, err) } if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(annotations, podTerminationGracePeriodLabel); err != nil { - glog.Errorf("Unable to get %q from annotations %q: %v", podTerminationGracePeriodLabel, annotations, err) + klog.Errorf("Unable to get %q from annotations %q: %v", podTerminationGracePeriodLabel, annotations, err) } preStopHandler := &v1.Handler{} if found, err := getJSONObjectFromLabel(annotations, containerPreStopHandlerLabel, preStopHandler); err != nil { - glog.Errorf("Unable to get %q from annotations %q: %v", containerPreStopHandlerLabel, annotations, err) + klog.Errorf("Unable to get %q from annotations %q: %v", containerPreStopHandlerLabel, annotations, err) } else if found { containerInfo.PreStopHandler = preStopHandler } containerPorts := []v1.ContainerPort{} if found, err := getJSONObjectFromLabel(annotations, containerPortsLabel, &containerPorts); err != nil { - glog.Errorf("Unable to get %q from annotations %q: %v", containerPortsLabel, annotations, err) + klog.Errorf("Unable to get %q from annotations %q: %v", containerPortsLabel, annotations, err) } else if found { containerInfo.ContainerPorts = containerPorts } @@ -237,7 +237,7 @@ func getStringValueFromLabel(labels map[string]string, label string) string { return value } // Do not report error, because there should be many old containers without label now. - glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) + klog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) // Return empty string "" for these containers, the caller will get value by other ways. return "" } @@ -252,7 +252,7 @@ func getIntValueFromLabel(labels map[string]string, label string) (int, error) { return intValue, nil } // Do not report error, because there should be many old containers without label now. - glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) + klog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) // Just set the value to 0 return 0, nil } @@ -267,7 +267,7 @@ func getUint64ValueFromLabel(labels map[string]string, label string) (uint64, er return intValue, nil } // Do not report error, because there should be many old containers without label now. - glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) + klog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) // Just set the value to 0 return 0, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/legacy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/legacy.go index 2a00ad012f1d..1a77e6f0c8dd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/legacy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/legacy.go @@ -44,9 +44,9 @@ func legacyLogSymlink(containerID string, containerName, podName, podNamespace s containerName, containerID) } -func logSymlink(containerLogsDir, podFullName, containerName, dockerId string) string { +func logSymlink(containerLogsDir, podFullName, containerName, dockerID string) string { suffix := fmt.Sprintf(".%s", legacyLogSuffix) - logPath := fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerId) + logPath := fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerID) // Length of a filename cannot exceed 255 characters in ext4 on Linux. if len(logPath) > ext4MaxFileNameLen-len(suffix) { logPath = logPath[:ext4MaxFileNameLen-len(suffix)] diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/BUILD index ba0dc3f49e9a..61eeead77f3b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/BUILD @@ -12,7 +12,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog:go_default_library", "//vendor/github.com/fsnotify/fsnotify:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go index c81194d1e638..e679cee84b6c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go @@ -30,7 +30,7 @@ import ( "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" "github.com/fsnotify/fsnotify" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" @@ -293,7 +293,7 @@ func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, r msg := &logMessage{} for { if stop { - glog.V(2).Infof("Finish parsing log file %q", path) + klog.V(2).Infof("Finish parsing log file %q", path) return nil } l, err := r.ReadBytes(eol[0]) @@ -328,7 +328,7 @@ func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, r if len(l) == 0 { continue } - glog.Warningf("Incomplete line in log file %q: %q", path, l) + klog.Warningf("Incomplete line in log file %q: %q", path, l) } if parse == nil { // Initialize the log parsing function. @@ -340,16 +340,16 @@ func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, r // Parse the log line. msg.reset() if err := parse(l, msg); err != nil { - glog.Errorf("Failed with err %v when parsing log for log file %q: %q", err, path, l) + klog.Errorf("Failed with err %v when parsing log for log file %q: %q", err, path, l) continue } // Write the log line into the stream. if err := writer.write(msg); err != nil { if err == errMaximumWrite { - glog.V(2).Infof("Finish parsing log file %q, hit bytes limit %d(bytes)", path, opts.bytes) + klog.V(2).Infof("Finish parsing log file %q, hit bytes limit %d(bytes)", path, opts.bytes) return nil } - glog.Errorf("Failed with err %v when writing log for log file %q: %+v", err, path, msg) + klog.Errorf("Failed with err %v when writing log for log file %q: %+v", err, path, msg) return err } } @@ -362,7 +362,7 @@ func isContainerRunning(id string, r internalapi.RuntimeService) (bool, error) { } // Only keep following container log when it is running. if s.State != runtimeapi.ContainerState_CONTAINER_RUNNING { - glog.V(5).Infof("Container %q is not running (state=%q)", id, s.State) + klog.V(5).Infof("Container %q is not running (state=%q)", id, s.State) // Do not return error because it's normal that the container stops // during waiting. return false, nil @@ -387,10 +387,10 @@ func waitLogs(ctx context.Context, id string, w *fsnotify.Watcher, runtimeServic case fsnotify.Write: return true, nil default: - glog.Errorf("Unexpected fsnotify event: %v, retrying...", e) + klog.Errorf("Unexpected fsnotify event: %v, retrying...", e) } case err := <-w.Errors: - glog.Errorf("Fsnotify watch error: %v, %d error retries remaining", err, errRetry) + klog.Errorf("Fsnotify watch error: %v, %d error retries remaining", err, errRetry) if errRetry == 0 { return false, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/leaky/leaky.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/leaky/leaky.go index 4e3e1e1f27ca..7c75002c47ee 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/leaky/leaky.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/leaky/leaky.go @@ -19,7 +19,7 @@ limitations under the License. package leaky const ( - // This is used in a few places outside of Kubelet, such as indexing + // PodInfraContainerName is used in a few places outside of Kubelet, such as indexing // into the container info. PodInfraContainerName = "POD" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/BUILD index e46f993837b5..ff518ddfdf1f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/BUILD @@ -28,7 +28,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go index c0630ebb2aa1..8b5a8b8d4067 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go @@ -23,10 +23,10 @@ import ( "net/http" "strconv" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" @@ -59,20 +59,20 @@ func (hr *HandlerRunner) Run(containerID kubecontainer.ContainerID, pod *v1.Pod, output, err := hr.commandRunner.RunInContainer(containerID, handler.Exec.Command, 0) if err != nil { msg = fmt.Sprintf("Exec lifecycle hook (%v) for Container %q in Pod %q failed - error: %v, message: %q", handler.Exec.Command, container.Name, format.Pod(pod), err, string(output)) - glog.V(1).Infof(msg) + klog.V(1).Infof(msg) } return msg, err case handler.HTTPGet != nil: msg, err := hr.runHTTPHandler(pod, container, handler) if err != nil { msg = fmt.Sprintf("Http lifecycle hook (%s) for Container %q in Pod %q failed - error: %v, message: %q", handler.HTTPGet.Path, container.Name, format.Pod(pod), err, msg) - glog.V(1).Infof(msg) + klog.V(1).Infof(msg) } return msg, err default: err := fmt.Errorf("Invalid handler: %v", handler) msg := fmt.Sprintf("Cannot run handler: %v", err) - glog.Errorf(msg) + klog.Errorf(msg) return msg, err } } @@ -105,7 +105,7 @@ func (hr *HandlerRunner) runHTTPHandler(pod *v1.Pod, container *v1.Container, ha if len(host) == 0 { status, err := hr.containerManager.GetPodStatus(pod.UID, pod.Name, pod.Namespace) if err != nil { - glog.Errorf("Unable to get pod info, event handlers may be invalid.") + klog.Errorf("Unable to get pod info, event handlers may be invalid.") return "", err } if status.IP == "" { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go index ba6d25b584c9..df4a32d1addd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go @@ -19,7 +19,7 @@ package lifecycle import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" @@ -58,7 +58,7 @@ func NewPredicateAdmitHandler(getNodeAnyWayFunc getNodeAnyWayFuncType, admission func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult { node, err := w.getNodeAnyWayFunc() if err != nil { - glog.Errorf("Cannot get Node info: %v", err) + klog.Errorf("Cannot get Node info: %v", err) return PodAdmitResult{ Admit: false, Reason: "InvalidNodeInfo", @@ -72,7 +72,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult // ensure the node has enough plugin resources for that required in pods if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil { message := fmt.Sprintf("Update plugin resources failed due to %v, which is unexpected.", err) - glog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) + klog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) return PodAdmitResult{ Admit: false, Reason: "UnexpectedAdmissionError", @@ -93,7 +93,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult fit, reasons, err := predicates.GeneralPredicates(podWithoutMissingExtendedResources, nil, nodeInfo) if err != nil { message := fmt.Sprintf("GeneralPredicates failed due to %v, which is unexpected.", err) - glog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) + klog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) return PodAdmitResult{ Admit: fit, Reason: "UnexpectedAdmissionError", @@ -104,7 +104,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult fit, reasons, err = w.admissionFailureHandler.HandleAdmissionFailure(admitPod, reasons) if err != nil { message := fmt.Sprintf("Unexpected error while attempting to recover from admission failure: %v", err) - glog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) + klog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) return PodAdmitResult{ Admit: fit, Reason: "UnexpectedAdmissionError", @@ -117,7 +117,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult var message string if len(reasons) == 0 { message = fmt.Sprint("GeneralPredicates failed due to unknown reason, which is unexpected.") - glog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) + klog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) return PodAdmitResult{ Admit: fit, Reason: "UnknownReason", @@ -130,19 +130,19 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult case *predicates.PredicateFailureError: reason = re.PredicateName message = re.Error() - glog.V(2).Infof("Predicate failed on Pod: %v, for reason: %v", format.Pod(admitPod), message) + klog.V(2).Infof("Predicate failed on Pod: %v, for reason: %v", format.Pod(admitPod), message) case *predicates.InsufficientResourceError: reason = fmt.Sprintf("OutOf%s", re.ResourceName) message = re.Error() - glog.V(2).Infof("Predicate failed on Pod: %v, for reason: %v", format.Pod(admitPod), message) + klog.V(2).Infof("Predicate failed on Pod: %v, for reason: %v", format.Pod(admitPod), message) case *predicates.FailureReason: reason = re.GetReason() message = fmt.Sprintf("Failure: %s", re.GetReason()) - glog.V(2).Infof("Predicate failed on Pod: %v, for reason: %v", format.Pod(admitPod), message) + klog.V(2).Infof("Predicate failed on Pod: %v, for reason: %v", format.Pod(admitPod), message) default: reason = "UnexpectedPredicateFailureType" message = fmt.Sprintf("GeneralPredicates failed due to %v, which is unexpected.", r) - glog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) + klog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) } return PodAdmitResult{ Admit: fit, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/logs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/logs/BUILD index 41e4281dbaa2..588106814349 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/logs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/logs/BUILD @@ -14,7 +14,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/logs/container_log_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/logs/container_log_manager.go index cae78993d1b0..50cfcf495347 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/logs/container_log_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/logs/container_log_manager.go @@ -26,7 +26,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/clock" @@ -171,7 +171,7 @@ func (c *containerLogManager) Start() { // Start a goroutine periodically does container log rotation. go wait.Forever(func() { if err := c.rotateLogs(); err != nil { - glog.Errorf("Failed to rotate container logs: %v", err) + klog.Errorf("Failed to rotate container logs: %v", err) } }, logMonitorPeriod) } @@ -193,27 +193,27 @@ func (c *containerLogManager) rotateLogs() error { // Note that we should not block log rotate for an error of a single container. status, err := c.runtimeService.ContainerStatus(id) if err != nil { - glog.Errorf("Failed to get container status for %q: %v", id, err) + klog.Errorf("Failed to get container status for %q: %v", id, err) continue } path := status.GetLogPath() info, err := os.Stat(path) if err != nil { if !os.IsNotExist(err) { - glog.Errorf("Failed to stat container log %q: %v", path, err) + klog.Errorf("Failed to stat container log %q: %v", path, err) continue } // In rotateLatestLog, there are several cases that we may // lose original container log after ReopenContainerLog fails. // We try to recover it by reopening container log. if err := c.runtimeService.ReopenContainerLog(id); err != nil { - glog.Errorf("Container %q log %q doesn't exist, reopen container log failed: %v", id, path, err) + klog.Errorf("Container %q log %q doesn't exist, reopen container log failed: %v", id, path, err) continue } // The container log should be recovered. info, err = os.Stat(path) if err != nil { - glog.Errorf("Failed to stat container log %q after reopen: %v", path, err) + klog.Errorf("Failed to stat container log %q after reopen: %v", path, err) continue } } @@ -222,7 +222,7 @@ func (c *containerLogManager) rotateLogs() error { } // Perform log rotation. if err := c.rotateLog(id, path); err != nil { - glog.Errorf("Failed to rotate log %q for container %q: %v", path, id, err) + klog.Errorf("Failed to rotate log %q for container %q: %v", path, id, err) continue } } @@ -379,7 +379,7 @@ func (c *containerLogManager) rotateLatestLog(id, log string) error { // This shouldn't happen. // Report an error if this happens, because we will lose original // log. - glog.Errorf("Failed to rename rotated log %q back to %q: %v, reopen container log error: %v", rotated, log, renameErr, err) + klog.Errorf("Failed to rename rotated log %q back to %q: %v, reopen container log error: %v", rotated, log, renameErr, err) } return fmt.Errorf("failed to reopen container log %q: %v", id, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/BUILD index 4587b6b79002..e104574eba83 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/BUILD @@ -14,8 +14,8 @@ go_library( "//pkg/kubelet/container:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/BUILD index 4b41a1a32272..64022f366761 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/BUILD @@ -2,7 +2,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", - srcs = ["volume_stats.go"], + srcs = [ + "log_metrics.go", + "volume_stats.go", + ], importpath = "k8s.io/kubernetes/pkg/kubelet/metrics/collectors", visibility = ["//visibility:public"], deps = [ @@ -10,8 +13,8 @@ go_library( "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/server/stats:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -19,6 +22,7 @@ go_test( name = "go_default_test", srcs = [ "helper_test.go", + "log_metrics_test.go", "volume_stats_test.go", ], embed = [":go_default_library"], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/log_metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/log_metrics.go new file mode 100644 index 000000000000..c43915e0708d --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/log_metrics.go @@ -0,0 +1,77 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog" + + statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" +) + +var ( + descLogSize = prometheus.NewDesc( + "kubelet_container_log_filesystem_used_bytes", + "Bytes used by the container's logs on the filesystem.", + []string{ + "namespace", + "pod", + "container", + }, nil, + ) +) + +type logMetricsCollector struct { + podStats func() ([]statsapi.PodStats, error) +} + +// NewLogMetricsCollector implements the prometheus.Collector interface and +// exposes metrics about container's log volume size. +func NewLogMetricsCollector(podStats func() ([]statsapi.PodStats, error)) prometheus.Collector { + return &logMetricsCollector{ + podStats: podStats, + } +} + +// Describe implements the prometheus.Collector interface. +func (c *logMetricsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- descLogSize +} + +// Collect implements the prometheus.Collector interface. +func (c *logMetricsCollector) Collect(ch chan<- prometheus.Metric) { + podStats, err := c.podStats() + if err != nil { + klog.Errorf("failed to get pod stats: %v", err) + return + } + + for _, ps := range podStats { + for _, c := range ps.Containers { + if c.Logs != nil && c.Logs.UsedBytes != nil { + ch <- prometheus.MustNewConstMetric( + descLogSize, + prometheus.GaugeValue, + float64(*c.Logs.UsedBytes), + ps.PodRef.Namespace, + ps.PodRef.Name, + c.Name, + ) + } + } + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/volume_stats.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/volume_stats.go index e6f1cf36da86..c281aaea57fa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/volume_stats.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/volume_stats.go @@ -17,9 +17,9 @@ limitations under the License. package collectors import ( - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/metrics" serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats" @@ -87,7 +87,7 @@ func (collector *volumeStatsCollector) Collect(ch chan<- prometheus.Metric) { lv = append([]string{pvcRef.Namespace, pvcRef.Name}, lv...) metric, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, v, lv...) if err != nil { - glog.Warningf("Failed to generate metric: %v", err) + klog.Warningf("Failed to generate metric: %v", err) return } ch <- metric diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go index 8a2e027a2851..ec1a7166c306 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go @@ -21,10 +21,10 @@ import ( "sync" "time" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" corev1 "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -270,7 +270,7 @@ func (pc *podAndContainerCollector) Describe(ch chan<- *prometheus.Desc) { func (pc *podAndContainerCollector) Collect(ch chan<- prometheus.Metric) { runningPods, err := pc.containerCache.GetPods() if err != nil { - glog.Warningf("Failed to get running container information while collecting metrics: %v", err) + klog.Warningf("Failed to get running container information while collecting metrics: %v", err) return } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/BUILD index f7d56e3b1f72..411da640f203 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/BUILD @@ -25,7 +25,7 @@ go_test( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/BUILD index acc66b182e96..f7d550391f38 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/BUILD @@ -12,23 +12,29 @@ go_library( "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/util/format:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) go_test( name = "go_default_test", - srcs = ["dns_test.go"], + srcs = [ + "dns_test.go", + "main_test.go", + ], embed = [":go_default_library"], deps = [ + "//pkg/features:go_default_library", "//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go index dd414f6029dc..aa3a8e01a5f4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go @@ -26,6 +26,7 @@ import ( "strings" "k8s.io/api/core/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/apis/core/validation" @@ -34,7 +35,7 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/util/format" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -118,7 +119,7 @@ func (c *Configurer) formDNSSearchFitsLimits(composedSearch []string, pod *v1.Po if limitsExceeded { log := fmt.Sprintf("Search Line limits were exceeded, some search paths have been omitted, the applied search line is: %s", strings.Join(composedSearch, " ")) c.recorder.Event(pod, v1.EventTypeWarning, "DNSConfigForming", log) - glog.Error(log) + klog.Error(log) } return composedSearch } @@ -128,7 +129,7 @@ func (c *Configurer) formDNSNameserversFitsLimits(nameservers []string, pod *v1. nameservers = nameservers[0:validation.MaxDNSNameservers] log := fmt.Sprintf("Nameserver limits were exceeded, some nameservers have been omitted, the applied nameserver line is: %s", strings.Join(nameservers, " ")) c.recorder.Event(pod, v1.EventTypeWarning, "DNSConfigForming", log) - glog.Error(log) + klog.Error(log) } return nameservers } @@ -156,7 +157,7 @@ func (c *Configurer) CheckLimitsForResolvConf() { f, err := os.Open(c.ResolverConfig) if err != nil { c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error()) - glog.V(4).Infof("CheckLimitsForResolvConf: " + err.Error()) + klog.V(4).Infof("CheckLimitsForResolvConf: " + err.Error()) return } defer f.Close() @@ -164,7 +165,7 @@ func (c *Configurer) CheckLimitsForResolvConf() { _, hostSearch, _, err := parseResolvConf(f) if err != nil { c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error()) - glog.V(4).Infof("CheckLimitsForResolvConf: " + err.Error()) + klog.V(4).Infof("CheckLimitsForResolvConf: " + err.Error()) return } @@ -177,14 +178,14 @@ func (c *Configurer) CheckLimitsForResolvConf() { if len(hostSearch) > domainCountLimit { log := fmt.Sprintf("Resolv.conf file '%s' contains search line consisting of more than %d domains!", c.ResolverConfig, domainCountLimit) c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log) - glog.V(4).Infof("CheckLimitsForResolvConf: " + log) + klog.V(4).Infof("CheckLimitsForResolvConf: " + log) return } if len(strings.Join(hostSearch, " ")) > validation.MaxDNSSearchListChars { log := fmt.Sprintf("Resolv.conf file '%s' contains search line which length is more than allowed %d chars!", c.ResolverConfig, validation.MaxDNSSearchListChars) c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log) - glog.V(4).Infof("CheckLimitsForResolvConf: " + log) + klog.V(4).Infof("CheckLimitsForResolvConf: " + log) return } @@ -209,6 +210,7 @@ func parseResolvConf(reader io.Reader) (nameservers []string, searches []string, // Each option is recorded as an element in the array. options = []string{} + var allErrors []error lines := strings.Split(string(file), "\n") for l := range lines { trimmed := strings.TrimSpace(lines[l]) @@ -219,8 +221,12 @@ func parseResolvConf(reader io.Reader) (nameservers []string, searches []string, if len(fields) == 0 { continue } - if fields[0] == "nameserver" && len(fields) >= 2 { - nameservers = append(nameservers, fields[1]) + if fields[0] == "nameserver" { + if len(fields) >= 2 { + nameservers = append(nameservers, fields[1]) + } else { + allErrors = append(allErrors, fmt.Errorf("nameserver list is empty ")) + } } if fields[0] == "search" { searches = fields[1:] @@ -230,10 +236,10 @@ func parseResolvConf(reader io.Reader) (nameservers []string, searches []string, } } - return nameservers, searches, options, nil + return nameservers, searches, options, utilerrors.NewAggregate(allErrors) } -func (c *Configurer) getHostDNSConfig(pod *v1.Pod) (*runtimeapi.DNSConfig, error) { +func (c *Configurer) getHostDNSConfig() (*runtimeapi.DNSConfig, error) { var hostDNS, hostSearch, hostOptions []string // Get host DNS settings if c.ResolverConfig != "" { @@ -323,14 +329,14 @@ func appendDNSConfig(existingDNSConfig *runtimeapi.DNSConfig, dnsConfig *v1.PodD // GetPodDNS returns DNS settings for the pod. func (c *Configurer) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) { - dnsConfig, err := c.getHostDNSConfig(pod) + dnsConfig, err := c.getHostDNSConfig() if err != nil { return nil, err } dnsType, err := getPodDNSType(pod) if err != nil { - glog.Errorf("Failed to get DNS type for pod %q: %v. Falling back to DNSClusterFirst policy.", format.Pod(pod), err) + klog.Errorf("Failed to get DNS type for pod %q: %v. Falling back to DNSClusterFirst policy.", format.Pod(pod), err) dnsType = podDNSCluster } switch dnsType { @@ -394,11 +400,11 @@ func (c *Configurer) SetupDNSinContainerizedMounter(mounterPath string) { f, err := os.Open(c.ResolverConfig) defer f.Close() if err != nil { - glog.Error("Could not open resolverConf file") + klog.Error("Could not open resolverConf file") } else { _, hostSearch, _, err := parseResolvConf(f) if err != nil { - glog.Errorf("Error for parsing the reslov.conf file: %v", err) + klog.Errorf("Error for parsing the reslov.conf file: %v", err) } else { dnsString = dnsString + "search" for _, search := range hostSearch { @@ -409,6 +415,6 @@ func (c *Configurer) SetupDNSinContainerizedMounter(mounterPath string) { } } if err := ioutil.WriteFile(resolvePath, []byte(dnsString), 0600); err != nil { - glog.Errorf("Could not write dns nameserver in file %s, with error %v", resolvePath, err) + klog.Errorf("Could not write dns nameserver in file %s, with error %v", resolvePath, err) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/BUILD index 2fe0520311b8..579a2ddbf609 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/BUILD @@ -14,7 +14,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ], ) @@ -25,10 +25,13 @@ go_test( embed = [":go_default_library"], deps = [ "//staging/src/k8s.io/api/coordination/v1beta1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller.go index 533adaf71713..c614368517b3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodelease/controller.go @@ -29,7 +29,7 @@ import ( coordclientset "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" "k8s.io/utils/pointer" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -52,7 +52,8 @@ type Controller interface { } type controller struct { - client coordclientset.LeaseInterface + client clientset.Interface + leaseClient coordclientset.LeaseInterface holderIdentity string leaseDurationSeconds int32 renewInterval time.Duration @@ -67,19 +68,20 @@ func NewController(clock clock.Clock, client clientset.Interface, holderIdentity leaseClient = client.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease) } return &controller{ - client: leaseClient, - holderIdentity: holderIdentity, - leaseDurationSeconds: leaseDurationSeconds, - renewInterval: renewInterval, - clock: clock, + client: client, + leaseClient: leaseClient, + holderIdentity: holderIdentity, + leaseDurationSeconds: leaseDurationSeconds, + renewInterval: renewInterval, + clock: clock, onRepeatedHeartbeatFailure: onRepeatedHeartbeatFailure, } } // Run runs the controller func (c *controller) Run(stopCh <-chan struct{}) { - if c.client == nil { - glog.Infof("node lease controller has nil client, will not claim or renew leases") + if c.leaseClient == nil { + klog.Infof("node lease controller has nil lease client, will not claim or renew leases") return } wait.Until(c.sync, c.renewInterval, stopCh) @@ -110,7 +112,7 @@ func (c *controller) backoffEnsureLease() (*coordv1beta1.Lease, bool) { break } sleep = minDuration(2*sleep, maxBackoff) - glog.Errorf("failed to ensure node lease exists, will retry in %v, error: %v", sleep, err) + klog.Errorf("failed to ensure node lease exists, will retry in %v, error: %v", sleep, err) // backoff wait c.clock.Sleep(sleep) } @@ -120,10 +122,10 @@ func (c *controller) backoffEnsureLease() (*coordv1beta1.Lease, bool) { // ensureLease creates the lease if it does not exist. Returns the lease and // a bool (true if this call created the lease), or any error that occurs. func (c *controller) ensureLease() (*coordv1beta1.Lease, bool, error) { - lease, err := c.client.Get(c.holderIdentity, metav1.GetOptions{}) + lease, err := c.leaseClient.Get(c.holderIdentity, metav1.GetOptions{}) if apierrors.IsNotFound(err) { // lease does not exist, create it - lease, err := c.client.Create(c.newLease(nil)) + lease, err := c.leaseClient.Create(c.newLease(nil)) if err != nil { return nil, false, err } @@ -140,33 +142,59 @@ func (c *controller) ensureLease() (*coordv1beta1.Lease, bool, error) { // call this once you're sure the lease has been created func (c *controller) retryUpdateLease(base *coordv1beta1.Lease) { for i := 0; i < maxUpdateRetries; i++ { - _, err := c.client.Update(c.newLease(base)) + _, err := c.leaseClient.Update(c.newLease(base)) if err == nil { return } - glog.Errorf("failed to update node lease, error: %v", err) + klog.Errorf("failed to update node lease, error: %v", err) if i > 0 && c.onRepeatedHeartbeatFailure != nil { c.onRepeatedHeartbeatFailure() } } - glog.Errorf("failed %d attempts to update node lease, will retry after %v", maxUpdateRetries, c.renewInterval) + klog.Errorf("failed %d attempts to update node lease, will retry after %v", maxUpdateRetries, c.renewInterval) } // newLease constructs a new lease if base is nil, or returns a copy of base // with desired state asserted on the copy. func (c *controller) newLease(base *coordv1beta1.Lease) *coordv1beta1.Lease { + // Use the bare minimum set of fields; other fields exist for debugging/legacy, + // but we don't need to make node heartbeats more complicated by using them. var lease *coordv1beta1.Lease if base == nil { - lease = &coordv1beta1.Lease{} + lease = &coordv1beta1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.holderIdentity, + Namespace: corev1.NamespaceNodeLease, + }, + Spec: coordv1beta1.LeaseSpec{ + HolderIdentity: pointer.StringPtr(c.holderIdentity), + LeaseDurationSeconds: pointer.Int32Ptr(c.leaseDurationSeconds), + }, + } } else { lease = base.DeepCopy() } - // Use the bare minimum set of fields; other fields exist for debugging/legacy, - // but we don't need to make node heartbeats more complicated by using them. - lease.Name = c.holderIdentity - lease.Spec.HolderIdentity = pointer.StringPtr(c.holderIdentity) - lease.Spec.LeaseDurationSeconds = pointer.Int32Ptr(c.leaseDurationSeconds) lease.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()} + + // Setting owner reference needs node's UID. Note that it is different from + // kubelet.nodeRef.UID. When lease is initially created, it is possible that + // the connection between master and node is not ready yet. So try to set + // owner reference every time when renewing the lease, until successful. + if lease.OwnerReferences == nil || len(lease.OwnerReferences) == 0 { + if node, err := c.client.CoreV1().Nodes().Get(c.holderIdentity, metav1.GetOptions{}); err == nil { + lease.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version, + Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind, + Name: c.holderIdentity, + UID: node.UID, + }, + } + } else { + klog.Errorf("failed to get node %q when trying to set owner ref to the node lease: %v", c.holderIdentity, err) + } + } + return lease } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/BUILD index d8c5f632d8e8..a626ce19866a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/BUILD @@ -7,7 +7,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/cloudprovider:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/cadvisor:go_default_library", @@ -21,8 +20,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/setters.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/setters.go index 8718dac16201..d16bcec29499 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/setters.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/nodestatus/setters.go @@ -31,8 +31,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilnet "k8s.io/apimachinery/pkg/util/net" utilfeature "k8s.io/apiserver/pkg/util/feature" + cloudprovider "k8s.io/cloud-provider" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/kubelet/cadvisor" @@ -42,7 +42,7 @@ import ( "k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/volume" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -69,7 +69,7 @@ func NodeAddress(nodeIP net.IP, // typically Kubelet.nodeIP if err := validateNodeIPFunc(nodeIP); err != nil { return fmt.Errorf("failed to validate nodeIP: %v", err) } - glog.V(2).Infof("Using node IP: %q", nodeIP.String()) + klog.V(2).Infof("Using node IP: %q", nodeIP.String()) } if externalCloudProvider { @@ -137,11 +137,11 @@ func NodeAddress(nodeIP net.IP, // typically Kubelet.nodeIP if existingHostnameAddress == nil { // no existing Hostname address found, add it - glog.Warningf("adding overridden hostname of %v to cloudprovider-reported addresses", hostname) + klog.Warningf("adding overridden hostname of %v to cloudprovider-reported addresses", hostname) nodeAddresses = append(nodeAddresses, v1.NodeAddress{Type: v1.NodeHostName, Address: hostname}) } else { // override the Hostname address reported by the cloud provider - glog.Warningf("replacing cloudprovider-reported hostname of %v with overridden hostname of %v", existingHostnameAddress.Address, hostname) + klog.Warningf("replacing cloudprovider-reported hostname of %v with overridden hostname of %v", existingHostnameAddress.Address, hostname) existingHostnameAddress.Address = hostname } } @@ -239,7 +239,7 @@ func MachineInfo(nodeName string, node.Status.Capacity[v1.ResourceCPU] = *resource.NewMilliQuantity(0, resource.DecimalSI) node.Status.Capacity[v1.ResourceMemory] = resource.MustParse("0Gi") node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(int64(maxPods), resource.DecimalSI) - glog.Errorf("Error getting machine info: %v", err) + klog.Errorf("Error getting machine info: %v", err) } else { node.Status.NodeInfo.MachineID = info.MachineID node.Status.NodeInfo.SystemUUID = info.SystemUUID @@ -278,14 +278,14 @@ func MachineInfo(nodeName string, if devicePluginCapacity != nil { for k, v := range devicePluginCapacity { if old, ok := node.Status.Capacity[k]; !ok || old.Value() != v.Value() { - glog.V(2).Infof("Update capacity for %s to %d", k, v.Value()) + klog.V(2).Infof("Update capacity for %s to %d", k, v.Value()) } node.Status.Capacity[k] = v } } for _, removedResource := range removedDevicePlugins { - glog.V(2).Infof("Set capacity for %s to 0 on device removal", removedResource) + klog.V(2).Infof("Set capacity for %s to 0 on device removal", removedResource) // Set the capacity of the removed resource to 0 instead of // removing the resource from the node status. This is to indicate // that the resource is managed by device plugin and had been @@ -326,7 +326,7 @@ func MachineInfo(nodeName string, if devicePluginAllocatable != nil { for k, v := range devicePluginAllocatable { if old, ok := node.Status.Allocatable[k]; !ok || old.Value() != v.Value() { - glog.V(2).Infof("Update allocatable for %s to %d", k, v.Value()) + klog.V(2).Infof("Update allocatable for %s to %d", k, v.Value()) } node.Status.Allocatable[k] = v } @@ -357,7 +357,7 @@ func VersionInfo(versionInfoFunc func() (*cadvisorapiv1.VersionInfo, error), // verinfo, err := versionInfoFunc() if err != nil { // TODO(mtaufen): consider removing this log line, since returned error will be logged - glog.Errorf("Error getting version info: %v", err) + klog.Errorf("Error getting version info: %v", err) return fmt.Errorf("error getting version info: %v", err) } @@ -397,7 +397,7 @@ func Images(nodeStatusMaxImages int32, containerImages, err := imageListFunc() if err != nil { // TODO(mtaufen): consider removing this log line, since returned error will be logged - glog.Errorf("Error getting image list: %v", err) + klog.Errorf("Error getting image list: %v", err) node.Status.Images = imagesOnNode return fmt.Errorf("error getting image list: %v", err) } @@ -515,7 +515,7 @@ func ReadyCondition( recordEventFunc(v1.EventTypeNormal, events.NodeReady) } else { recordEventFunc(v1.EventTypeNormal, events.NodeNotReady) - glog.Infof("Node became not ready: %+v", newNodeReadyCondition) + klog.Infof("Node became not ready: %+v", newNodeReadyCondition) } } return nil @@ -705,45 +705,6 @@ func DiskPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock. } } -// OutOfDiskCondition returns a Setter that updates the v1.NodeOutOfDisk condition on the node. -// TODO(#65658): remove this condition -func OutOfDiskCondition(nowFunc func() time.Time, // typically Kubelet.clock.Now - recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent -) Setter { - return func(node *v1.Node) error { - currentTime := metav1.NewTime(nowFunc()) - var nodeOODCondition *v1.NodeCondition - - // Check if NodeOutOfDisk condition already exists and if it does, just pick it up for update. - for i := range node.Status.Conditions { - if node.Status.Conditions[i].Type == v1.NodeOutOfDisk { - nodeOODCondition = &node.Status.Conditions[i] - } - } - - newOODCondition := nodeOODCondition == nil - if newOODCondition { - nodeOODCondition = &v1.NodeCondition{} - } - if nodeOODCondition.Status != v1.ConditionFalse { - nodeOODCondition.Type = v1.NodeOutOfDisk - nodeOODCondition.Status = v1.ConditionFalse - nodeOODCondition.Reason = "KubeletHasSufficientDisk" - nodeOODCondition.Message = "kubelet has sufficient disk space available" - nodeOODCondition.LastTransitionTime = currentTime - recordEventFunc(v1.EventTypeNormal, "NodeHasSufficientDisk") - } - - // Update the heartbeat time irrespective of all the conditions. - nodeOODCondition.LastHeartbeatTime = currentTime - - if newOODCondition { - node.Status.Conditions = append(node.Status.Conditions, *nodeOODCondition) - } - return nil - } -} - // VolumesInUse returns a Setter that updates the volumes in use on the node. func VolumesInUse(syncedFunc func() bool, // typically Kubelet.volumeManager.ReconcilerStatesHasBeenSynced volumesInUseFunc func() []v1.UniqueVolumeName, // typically Kubelet.volumeManager.GetVolumesInUse @@ -772,7 +733,7 @@ func VolumeLimits(volumePluginListFunc func() []volume.VolumePluginWithAttachLim for _, volumePlugin := range pluginWithLimits { attachLimits, err := volumePlugin.GetVolumeLimits() if err != nil { - glog.V(4).Infof("Error getting volume limit for plugin %s", volumePlugin.GetPluginName()) + klog.V(4).Infof("Error getting volume limit for plugin %s", volumePlugin.GetPluginName()) continue } for limitKey, value := range attachLimits { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/oom_watcher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/oom_watcher.go index 448082b05f1d..1ca014b4babf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/oom_watcher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/oom_watcher.go @@ -17,13 +17,13 @@ limitations under the License. package kubelet import ( - "github.com/golang/glog" "github.com/google/cadvisor/events" cadvisorapi "github.com/google/cadvisor/info/v1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/record" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cadvisor" ) @@ -65,10 +65,10 @@ func (ow *realOOMWatcher) Start(ref *v1.ObjectReference) error { defer runtime.HandleCrash() for event := range eventChannel.GetChannel() { - glog.V(2).Infof("Got sys oom event from cadvisor: %v", event) + klog.V(2).Infof("Got sys oom event from cadvisor: %v", event) ow.recorder.PastEventf(ref, metav1.Time{Time: event.Timestamp}, v1.EventTypeWarning, systemOOMEvent, "System OOM encountered") } - glog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor") + klog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor") }() return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD index f3d4073e340a..2d54aee08868 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go index d9286c96c629..4de9c721130f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go @@ -21,11 +21,11 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/metrics" @@ -106,6 +106,7 @@ type podRecord struct { type podRecords map[types.UID]*podRecord +// NewGenericPLEG instantiates a new GenericPLEG object and return it. func NewGenericPLEG(runtime kubecontainer.Runtime, channelCapacity int, relistPeriod time.Duration, cache kubecontainer.Cache, clock clock.Clock) PodLifecycleEventGenerator { return &GenericPLEG{ @@ -118,7 +119,7 @@ func NewGenericPLEG(runtime kubecontainer.Runtime, channelCapacity int, } } -// Returns a channel from which the subscriber can receive PodLifecycleEvent +// Watch returns a channel from which the subscriber can receive PodLifecycleEvent // events. // TODO: support multiple subscribers. func (g *GenericPLEG) Watch() chan *PodLifecycleEvent { @@ -130,8 +131,13 @@ func (g *GenericPLEG) Start() { go wait.Until(g.relist, g.relistPeriod, wait.NeverStop) } +// Healthy check if PLEG work properly. +// relistThreshold is the maximum interval between two relist. func (g *GenericPLEG) Healthy() (bool, error) { relistTime := g.getRelistTime() + if relistTime.IsZero() { + return false, fmt.Errorf("pleg has yet to be successful") + } elapsed := g.clock.Since(relistTime) if elapsed > relistThreshold { return false, fmt.Errorf("pleg was last seen active %v ago; threshold is %v", elapsed, relistThreshold) @@ -144,7 +150,7 @@ func generateEvents(podID types.UID, cid string, oldState, newState plegContaine return nil } - glog.V(4).Infof("GenericPLEG: %v/%v: %v -> %v", podID, cid, oldState, newState) + klog.V(4).Infof("GenericPLEG: %v/%v: %v -> %v", podID, cid, oldState, newState) switch newState { case plegContainerRunning: return []*PodLifecycleEvent{{ID: podID, Type: ContainerStarted, Data: cid}} @@ -180,7 +186,7 @@ func (g *GenericPLEG) updateRelistTime(timestamp time.Time) { // relist queries the container runtime for list of pods/containers, compare // with the internal pods/containers, and generates events accordingly. func (g *GenericPLEG) relist() { - glog.V(5).Infof("GenericPLEG: Relisting") + klog.V(5).Infof("GenericPLEG: Relisting") if lastRelistTime := g.getRelistTime(); !lastRelistTime.IsZero() { metrics.PLEGRelistInterval.Observe(metrics.SinceInMicroseconds(lastRelistTime)) @@ -194,7 +200,7 @@ func (g *GenericPLEG) relist() { // Get all the pods. podList, err := g.runtime.GetPods(true) if err != nil { - glog.Errorf("GenericPLEG: Unable to retrieve pods: %v", err) + klog.Errorf("GenericPLEG: Unable to retrieve pods: %v", err) return } @@ -238,7 +244,7 @@ func (g *GenericPLEG) relist() { // serially may take a while. We should be aware of this and // parallelize if needed. if err := g.updateCache(pod, pid); err != nil { - glog.Errorf("PLEG: Ignoring events for pod %s/%s: %v", pod.Name, pod.Namespace, err) + klog.Errorf("PLEG: Ignoring events for pod %s/%s: %v", pod.Name, pod.Namespace, err) // make sure we try to reinspect the pod during the next relisting needsReinspection[pid] = pod @@ -265,10 +271,10 @@ func (g *GenericPLEG) relist() { if g.cacheEnabled() { // reinspect any pods that failed inspection during the previous relist if len(g.podsToReinspect) > 0 { - glog.V(5).Infof("GenericPLEG: Reinspecting pods that previously failed inspection") + klog.V(5).Infof("GenericPLEG: Reinspecting pods that previously failed inspection") for pid, pod := range g.podsToReinspect { if err := g.updateCache(pod, pid); err != nil { - glog.Errorf("PLEG: pod %s/%s failed reinspection: %v", pod.Name, pod.Namespace, err) + klog.Errorf("PLEG: pod %s/%s failed reinspection: %v", pod.Name, pod.Namespace, err) needsReinspection[pid] = pod } } @@ -368,7 +374,7 @@ func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error { if pod == nil { // The pod is missing in the current relist. This means that // the pod has no visible (active or inactive) containers. - glog.V(4).Infof("PLEG: Delete status for pod %q", string(pid)) + klog.V(4).Infof("PLEG: Delete status for pod %q", string(pid)) g.cache.Delete(pid) return nil } @@ -377,7 +383,7 @@ func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error { // GetPodStatus(pod *kubecontainer.Pod) so that Docker can avoid listing // all containers again. status, err := g.runtime.GetPodStatus(pod.ID, pod.Name, pod.Namespace) - glog.V(4).Infof("PLEG: Write status for %s/%s: %#v (err: %v)", pod.Name, pod.Namespace, status, err) + klog.V(4).Infof("PLEG: Write status for %s/%s: %#v (err: %v)", pod.Name, pod.Namespace, status, err) if err == nil { // Preserve the pod IP across cache updates if the new IP is empty. // When a pod is torn down, kubelet may race with PLEG and retrieve diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/pleg.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/pleg.go index aa0e46e465da..86b48c7ef135 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/pleg.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/pleg.go @@ -20,16 +20,20 @@ import ( "k8s.io/apimachinery/pkg/types" ) +// PodLifeCycleEventType define the event type of pod life cycle events. type PodLifeCycleEventType string const ( + // ContainerStarted - event type when the new state of container is running. ContainerStarted PodLifeCycleEventType = "ContainerStarted" - ContainerDied PodLifeCycleEventType = "ContainerDied" + // ContainerDied - event type when the new state of container is exited. + ContainerDied PodLifeCycleEventType = "ContainerDied" + // ContainerRemoved - event type when the old state of container is exited. ContainerRemoved PodLifeCycleEventType = "ContainerRemoved" // PodSync is used to trigger syncing of a pod when the observed change of // the state of the pod cannot be captured by any single event above. PodSync PodLifeCycleEventType = "PodSync" - // Do not use the events below because they are disabled in GenericPLEG. + // ContainerChanged - event type when the new state of container is unknown. ContainerChanged PodLifeCycleEventType = "ContainerChanged" ) @@ -45,6 +49,7 @@ type PodLifecycleEvent struct { Data interface{} } +// PodLifecycleEventGenerator contains functions for generating pod life cycle events. type PodLifecycleEventGenerator interface { Start() Watch() chan *PodLifecycleEvent diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/BUILD index 4096c025864e..8d4811ab4599 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/BUILD @@ -25,7 +25,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go index 9b8add5bc5a0..b4b6abe61ce8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go @@ -17,11 +17,11 @@ limitations under the License. package pod import ( - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -79,13 +79,13 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string) error { } name, namespace, err := kubecontainer.ParsePodFullName(podFullName) if err != nil { - glog.Errorf("Failed to parse a pod full name %q", podFullName) + klog.Errorf("Failed to parse a pod full name %q", podFullName) return err } - glog.V(2).Infof("Deleting a mirror pod %q", podFullName) + klog.V(2).Infof("Deleting a mirror pod %q", podFullName) // TODO(random-liu): Delete the mirror pod with uid precondition in mirror pod manager if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) { - glog.Errorf("Failed deleting a mirror pod %q: %v", podFullName, err) + klog.Errorf("Failed deleting a mirror pod %q: %v", podFullName, err) } return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go index 6033ae8d50a8..ce5c1c30c62d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go @@ -19,7 +19,7 @@ package pod import ( "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -163,7 +163,7 @@ func (pm *basicManager) UpdatePod(pod *v1.Pod) { pm.updatePodsInternal(pod) if pm.checkpointManager != nil { if err := checkpoint.WritePod(pm.checkpointManager, pod); err != nil { - glog.Errorf("Error writing checkpoint for pod: %v", pod.GetName()) + klog.Errorf("Error writing checkpoint for pod: %v", pod.GetName()) } } } @@ -226,7 +226,7 @@ func (pm *basicManager) DeletePod(pod *v1.Pod) { } if pm.checkpointManager != nil { if err := checkpoint.DeletePod(pm.checkpointManager, pod); err != nil { - glog.Errorf("Error deleting checkpoint for pod: %v", pod.GetName()) + klog.Errorf("Error deleting checkpoint for pod: %v", pod.GetName()) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_container_deletor.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_container_deletor.go index 48a85958db34..0a00ac90698c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_container_deletor.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_container_deletor.go @@ -19,8 +19,8 @@ package kubelet import ( "sort" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -72,7 +72,7 @@ func getContainersToDeleteInPod(filterContainerID string, podStatus *kubecontain }(filterContainerID, podStatus) if filterContainerID != "" && matchedContainer == nil { - glog.Warningf("Container %q not found in pod's containers", filterContainerID) + klog.Warningf("Container %q not found in pod's containers", filterContainerID) return containerStatusbyCreatedList{} } @@ -106,7 +106,7 @@ func (p *podContainerDeletor) deleteContainersInPod(filterContainerID string, po select { case p.worker <- candidate.ID: default: - glog.Warningf("Failed to issue the request to remove container %v", candidate.ID) + klog.Warningf("Failed to issue the request to remove container %v", candidate.ID) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go index 5a1fb9927251..6b7c2244f34c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go @@ -18,15 +18,16 @@ package kubelet import ( "fmt" + "strings" "sync" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/eviction" @@ -96,8 +97,11 @@ const ( // jitter factor for resyncInterval workerResyncIntervalJitterFactor = 0.5 - // jitter factor for backOffPeriod + // jitter factor for backOffPeriod and backOffOnTransientErrorPeriod workerBackOffPeriodJitterFactor = 0.5 + + // backoff period when transient error occurred. + backOffOnTransientErrorPeriod = time.Second ) type podWorkers struct { @@ -183,7 +187,7 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan UpdatePodOptions) { } if err != nil { // IMPORTANT: we do not log errors here, the syncPodFn is responsible for logging errors - glog.Errorf("Error syncing pod %s (%q), skipping: %v", update.Pod.UID, format.Pod(update.Pod), err) + klog.Errorf("Error syncing pod %s (%q), skipping: %v", update.Pod.UID, format.Pod(update.Pod), err) } p.wrapUp(update.Pod.UID, err) } @@ -263,6 +267,9 @@ func (p *podWorkers) wrapUp(uid types.UID, syncErr error) { case syncErr == nil: // No error; requeue at the regular resync interval. p.workQueue.Enqueue(uid, wait.Jitter(p.resyncInterval, workerResyncIntervalJitterFactor)) + case strings.Contains(syncErr.Error(), NetworkNotReadyErrorMsg): + // Network is not ready; back off for short period of time and retry as network might be ready soon. + p.workQueue.Enqueue(uid, wait.Jitter(backOffOnTransientErrorPeriod, workerBackOffPeriodJitterFactor)) default: // Error occurred during the sync; back off and then retry. p.workQueue.Enqueue(uid, wait.Jitter(p.backOffPeriod, workerBackOffPeriodJitterFactor)) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/BUILD index 9e8e44331b8c..d1a06818df38 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/BUILD @@ -22,7 +22,7 @@ go_library( "//pkg/scheduler/algorithm/predicates:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -41,16 +41,21 @@ filegroup( go_test( name = "go_default_test", - srcs = ["preemption_test.go"], + srcs = [ + "main_test.go", + "preemption_test.go", + ], embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/scheduling:go_default_library", + "//pkg/features:go_default_library", "//pkg/kubelet/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go index c7236e2c5f71..94a3afad6b5c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go @@ -20,9 +20,9 @@ import ( "fmt" "math" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" + "k8s.io/klog" "k8s.io/kubernetes/pkg/api/v1/resource" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/kubelet/events" @@ -96,7 +96,7 @@ func (c *CriticalPodAdmissionHandler) evictPodsToFreeRequests(admitPod *v1.Pod, if err != nil { return fmt.Errorf("preemption: error finding a set of pods to preempt: %v", err) } - glog.Infof("preemption: attempting to evict pods %v, in order to free up resources: %s", podsToPreempt, insufficientResources.toString()) + klog.Infof("preemption: attempting to evict pods %v, in order to free up resources: %s", podsToPreempt, insufficientResources.toString()) for _, pod := range podsToPreempt { status := v1.PodStatus{ Phase: v1.PodFailed, @@ -110,7 +110,7 @@ func (c *CriticalPodAdmissionHandler) evictPodsToFreeRequests(admitPod *v1.Pod, if err != nil { return fmt.Errorf("preemption: pod %s failed to evict %v", format.Pod(pod), err) } - glog.Infof("preemption: pod %s evicted successfully", format.Pod(pod)) + klog.Infof("preemption: pod %s evicted successfully", format.Pod(pod)) } return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD index 78394bf72894..18922e59e333 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD @@ -32,8 +32,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) @@ -64,7 +64,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go index d81ee8f377ef..efec60c98d2a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go @@ -39,20 +39,20 @@ import ( tcprobe "k8s.io/kubernetes/pkg/probe/tcp" "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) const maxProbeRetries = 3 // Prober helps to check the liveness/readiness of a container. type prober struct { - exec execprobe.ExecProber + exec execprobe.Prober // probe types needs different httprobe instances so they don't // share a connection pool which can cause collsions to the // same host:port and transient failures. See #49740. - readinessHttp httprobe.HTTPProber - livenessHttp httprobe.HTTPProber - tcp tcprobe.TCPProber + readinessHttp httprobe.Prober + livenessHttp httprobe.Prober + tcp tcprobe.Prober runner kubecontainer.ContainerCommandRunner refManager *kubecontainer.RefManager @@ -91,7 +91,7 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c ctrName := fmt.Sprintf("%s:%s", format.Pod(pod), container.Name) if probeSpec == nil { - glog.Warningf("%s probe for %s is nil", probeType, ctrName) + klog.Warningf("%s probe for %s is nil", probeType, ctrName) return results.Success, nil } @@ -100,22 +100,22 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c // Probe failed in one way or another. ref, hasRef := pb.refManager.GetRef(containerID) if !hasRef { - glog.Warningf("No ref for container %q (%s)", containerID.String(), ctrName) + klog.Warningf("No ref for container %q (%s)", containerID.String(), ctrName) } if err != nil { - glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err) + klog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err) if hasRef { pb.recorder.Eventf(ref, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe errored: %v", probeType, err) } } else { // result != probe.Success - glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output) + klog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output) if hasRef { pb.recorder.Eventf(ref, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe failed: %s", probeType, output) } } return results.Failure, err } - glog.V(3).Infof("%s probe for %q succeeded", probeType, ctrName) + klog.V(3).Infof("%s probe for %q succeeded", probeType, ctrName) return results.Success, nil } @@ -147,7 +147,7 @@ func buildHeader(headerList []v1.HTTPHeader) http.Header { func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) { timeout := time.Duration(p.TimeoutSeconds) * time.Second if p.Exec != nil { - glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod, container, p.Exec.Command) + klog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod, container, p.Exec.Command) command := kubecontainer.ExpandContainerCommandOnlyStatic(p.Exec.Command, container.Env) return pb.exec.Probe(pb.newExecInContainer(container, containerID, command, timeout)) } @@ -162,10 +162,10 @@ func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status return probe.Unknown, "", err } path := p.HTTPGet.Path - glog.V(4).Infof("HTTP-Probe Host: %v://%v, Port: %v, Path: %v", scheme, host, port, path) + klog.V(4).Infof("HTTP-Probe Host: %v://%v, Port: %v, Path: %v", scheme, host, port, path) url := formatURL(scheme, host, port, path) headers := buildHeader(p.HTTPGet.HTTPHeaders) - glog.V(4).Infof("HTTP-Probe Headers: %v", headers) + klog.V(4).Infof("HTTP-Probe Headers: %v", headers) if probeType == liveness { return pb.livenessHttp.Probe(url, headers, timeout) } else { // readiness @@ -181,10 +181,10 @@ func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status if host == "" { host = status.PodIP } - glog.V(4).Infof("TCP-Probe Host: %v, Port: %v, Timeout: %v", host, port, timeout) + klog.V(4).Infof("TCP-Probe Host: %v, Port: %v, Timeout: %v", host, port, timeout) return pb.tcp.Probe(host, port, timeout) } - glog.Warningf("Failed to find probe builder for container: %v", container) + klog.Warningf("Failed to find probe builder for container: %v", container) return probe.Unknown, "", fmt.Errorf("Missing probe handler for %s:%s", format.Pod(pod), container.Name) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_manager.go index 0e53e094076f..a913598ef699 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_manager.go @@ -19,13 +19,13 @@ package prober import ( "sync" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/prober/results" "k8s.io/kubernetes/pkg/kubelet/status" @@ -149,7 +149,7 @@ func (m *manager) AddPod(pod *v1.Pod) { if c.ReadinessProbe != nil { key.probeType = readiness if _, ok := m.workers[key]; ok { - glog.Errorf("Readiness probe already exists! %v - %v", + klog.Errorf("Readiness probe already exists! %v - %v", format.Pod(pod), c.Name) return } @@ -161,7 +161,7 @@ func (m *manager) AddPod(pod *v1.Pod) { if c.LivenessProbe != nil { key.probeType = liveness if _, ok := m.workers[key]; ok { - glog.Errorf("Liveness probe already exists! %v - %v", + klog.Errorf("Liveness probe already exists! %v - %v", format.Pod(pod), c.Name) return } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/worker.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/worker.go index cdefc1da2c67..0602419d7b6d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/worker.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/prober/worker.go @@ -20,10 +20,10 @@ import ( "math/rand" "time" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog" podutil "k8s.io/kubernetes/pkg/api/v1/pod" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/prober/results" @@ -160,13 +160,13 @@ func (w *worker) doProbe() (keepGoing bool) { status, ok := w.probeManager.statusManager.GetPodStatus(w.pod.UID) if !ok { // Either the pod has not been created yet, or it was already deleted. - glog.V(3).Infof("No status for pod: %v", format.Pod(w.pod)) + klog.V(3).Infof("No status for pod: %v", format.Pod(w.pod)) return true } // Worker should terminate if pod is terminated. if status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded { - glog.V(3).Infof("Pod %v %v, exiting probe worker", + klog.V(3).Infof("Pod %v %v, exiting probe worker", format.Pod(w.pod), status.Phase) return false } @@ -174,7 +174,7 @@ func (w *worker) doProbe() (keepGoing bool) { c, ok := podutil.GetContainerStatus(status.ContainerStatuses, w.container.Name) if !ok || len(c.ContainerID) == 0 { // Either the container has not been created yet, or it was deleted. - glog.V(3).Infof("Probe target container not found: %v - %v", + klog.V(3).Infof("Probe target container not found: %v - %v", format.Pod(w.pod), w.container.Name) return true // Wait for more information. } @@ -195,7 +195,7 @@ func (w *worker) doProbe() (keepGoing bool) { } if c.State.Running == nil { - glog.V(3).Infof("Non-running container probed: %v - %v", + klog.V(3).Infof("Non-running container probed: %v - %v", format.Pod(w.pod), w.container.Name) if !w.containerID.IsEmpty() { w.resultsManager.Set(w.containerID, results.Failure, w.pod) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/remote/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/remote/BUILD index 05c6efd271af..cf19206ce2bd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/remote/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/remote/BUILD @@ -19,8 +19,8 @@ go_library( "//pkg/kubelet/apis/cri:go_default_library", "//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library", "//pkg/kubelet/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/remote/remote_image.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/remote/remote_image.go index d6eae5e38f9d..17b90574dc05 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/remote/remote_image.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/remote/remote_image.go @@ -17,12 +17,13 @@ limitations under the License. package remote import ( + "context" "errors" "fmt" "time" - "github.com/golang/glog" "google.golang.org/grpc" + "k8s.io/klog" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" @@ -37,15 +38,18 @@ type RemoteImageService struct { // NewRemoteImageService creates a new internalapi.ImageManagerService. func NewRemoteImageService(endpoint string, connectionTimeout time.Duration) (internalapi.ImageManagerService, error) { - glog.V(3).Infof("Connecting to image service %s", endpoint) + klog.V(3).Infof("Connecting to image service %s", endpoint) addr, dailer, err := util.GetAddressAndDialer(endpoint) if err != nil { return nil, err } - conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(connectionTimeout), grpc.WithDialer(dailer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) + ctx, cancel := context.WithTimeout(context.Background(), connectionTimeout) + defer cancel() + + conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithDialer(dailer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) if err != nil { - glog.Errorf("Connect remote image service %s failed: %v", addr, err) + klog.Errorf("Connect remote image service %s failed: %v", addr, err) return nil, err } @@ -64,7 +68,7 @@ func (r *RemoteImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runt Filter: filter, }) if err != nil { - glog.Errorf("ListImages with filter %+v from image service failed: %v", filter, err) + klog.Errorf("ListImages with filter %+v from image service failed: %v", filter, err) return nil, err } @@ -80,14 +84,14 @@ func (r *RemoteImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimea Image: image, }) if err != nil { - glog.Errorf("ImageStatus %q from image service failed: %v", image.Image, err) + klog.Errorf("ImageStatus %q from image service failed: %v", image.Image, err) return nil, err } if resp.Image != nil { if resp.Image.Id == "" || resp.Image.Size_ == 0 { errorMessage := fmt.Sprintf("Id or size of image %q is not set", image.Image) - glog.Errorf("ImageStatus failed: %s", errorMessage) + klog.Errorf("ImageStatus failed: %s", errorMessage) return nil, errors.New(errorMessage) } } @@ -105,13 +109,13 @@ func (r *RemoteImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtim Auth: auth, }) if err != nil { - glog.Errorf("PullImage %q from image service failed: %v", image.Image, err) + klog.Errorf("PullImage %q from image service failed: %v", image.Image, err) return "", err } if resp.ImageRef == "" { errorMessage := fmt.Sprintf("imageRef of image %q is not set", image.Image) - glog.Errorf("PullImage failed: %s", errorMessage) + klog.Errorf("PullImage failed: %s", errorMessage) return "", errors.New(errorMessage) } @@ -127,7 +131,7 @@ func (r *RemoteImageService) RemoveImage(image *runtimeapi.ImageSpec) error { Image: image, }) if err != nil { - glog.Errorf("RemoveImage %q from image service failed: %v", image.Image, err) + klog.Errorf("RemoveImage %q from image service failed: %v", image.Image, err) return err } @@ -143,7 +147,7 @@ func (r *RemoteImageService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error resp, err := r.imageClient.ImageFsInfo(ctx, &runtimeapi.ImageFsInfoRequest{}) if err != nil { - glog.Errorf("ImageFsInfo from image service failed: %v", err) + klog.Errorf("ImageFsInfo from image service failed: %v", err) return nil, err } return resp.GetImageFilesystems(), nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/remote/remote_runtime.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/remote/remote_runtime.go index 01d59ee4c29b..16e16daff84b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/remote/remote_runtime.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/remote/remote_runtime.go @@ -23,8 +23,8 @@ import ( "strings" "time" - "github.com/golang/glog" "google.golang.org/grpc" + "k8s.io/klog" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" @@ -40,14 +40,17 @@ type RemoteRuntimeService struct { // NewRemoteRuntimeService creates a new internalapi.RuntimeService. func NewRemoteRuntimeService(endpoint string, connectionTimeout time.Duration) (internalapi.RuntimeService, error) { - glog.V(3).Infof("Connecting to runtime service %s", endpoint) + klog.V(3).Infof("Connecting to runtime service %s", endpoint) addr, dailer, err := util.GetAddressAndDialer(endpoint) if err != nil { return nil, err } - conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(connectionTimeout), grpc.WithDialer(dailer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) + ctx, cancel := context.WithTimeout(context.Background(), connectionTimeout) + defer cancel() + + conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithDialer(dailer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) if err != nil { - glog.Errorf("Connect remote runtime %s failed: %v", addr, err) + klog.Errorf("Connect remote runtime %s failed: %v", addr, err) return nil, err } @@ -66,7 +69,7 @@ func (r *RemoteRuntimeService) Version(apiVersion string) (*runtimeapi.VersionRe Version: apiVersion, }) if err != nil { - glog.Errorf("Version from runtime service failed: %v", err) + klog.Errorf("Version from runtime service failed: %v", err) return nil, err } @@ -90,13 +93,13 @@ func (r *RemoteRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig RuntimeHandler: runtimeHandler, }) if err != nil { - glog.Errorf("RunPodSandbox from runtime service failed: %v", err) + klog.Errorf("RunPodSandbox from runtime service failed: %v", err) return "", err } if resp.PodSandboxId == "" { errorMessage := fmt.Sprintf("PodSandboxId is not set for sandbox %q", config.GetMetadata()) - glog.Errorf("RunPodSandbox failed: %s", errorMessage) + klog.Errorf("RunPodSandbox failed: %s", errorMessage) return "", errors.New(errorMessage) } @@ -113,7 +116,7 @@ func (r *RemoteRuntimeService) StopPodSandbox(podSandBoxID string) error { PodSandboxId: podSandBoxID, }) if err != nil { - glog.Errorf("StopPodSandbox %q from runtime service failed: %v", podSandBoxID, err) + klog.Errorf("StopPodSandbox %q from runtime service failed: %v", podSandBoxID, err) return err } @@ -130,7 +133,7 @@ func (r *RemoteRuntimeService) RemovePodSandbox(podSandBoxID string) error { PodSandboxId: podSandBoxID, }) if err != nil { - glog.Errorf("RemovePodSandbox %q from runtime service failed: %v", podSandBoxID, err) + klog.Errorf("RemovePodSandbox %q from runtime service failed: %v", podSandBoxID, err) return err } @@ -167,7 +170,7 @@ func (r *RemoteRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilte Filter: filter, }) if err != nil { - glog.Errorf("ListPodSandbox with filter %+v from runtime service failed: %v", filter, err) + klog.Errorf("ListPodSandbox with filter %+v from runtime service failed: %v", filter, err) return nil, err } @@ -185,13 +188,13 @@ func (r *RemoteRuntimeService) CreateContainer(podSandBoxID string, config *runt SandboxConfig: sandboxConfig, }) if err != nil { - glog.Errorf("CreateContainer in sandbox %q from runtime service failed: %v", podSandBoxID, err) + klog.Errorf("CreateContainer in sandbox %q from runtime service failed: %v", podSandBoxID, err) return "", err } if resp.ContainerId == "" { errorMessage := fmt.Sprintf("ContainerId is not set for container %q", config.GetMetadata()) - glog.Errorf("CreateContainer failed: %s", errorMessage) + klog.Errorf("CreateContainer failed: %s", errorMessage) return "", errors.New(errorMessage) } @@ -207,7 +210,7 @@ func (r *RemoteRuntimeService) StartContainer(containerID string) error { ContainerId: containerID, }) if err != nil { - glog.Errorf("StartContainer %q from runtime service failed: %v", containerID, err) + klog.Errorf("StartContainer %q from runtime service failed: %v", containerID, err) return err } @@ -227,7 +230,7 @@ func (r *RemoteRuntimeService) StopContainer(containerID string, timeout int64) Timeout: timeout, }) if err != nil { - glog.Errorf("StopContainer %q from runtime service failed: %v", containerID, err) + klog.Errorf("StopContainer %q from runtime service failed: %v", containerID, err) return err } @@ -244,7 +247,7 @@ func (r *RemoteRuntimeService) RemoveContainer(containerID string) error { ContainerId: containerID, }) if err != nil { - glog.Errorf("RemoveContainer %q from runtime service failed: %v", containerID, err) + klog.Errorf("RemoveContainer %q from runtime service failed: %v", containerID, err) return err } @@ -260,7 +263,7 @@ func (r *RemoteRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter Filter: filter, }) if err != nil { - glog.Errorf("ListContainers with filter %+v from runtime service failed: %v", filter, err) + klog.Errorf("ListContainers with filter %+v from runtime service failed: %v", filter, err) return nil, err } @@ -276,13 +279,13 @@ func (r *RemoteRuntimeService) ContainerStatus(containerID string) (*runtimeapi. ContainerId: containerID, }) if err != nil { - glog.Errorf("ContainerStatus %q from runtime service failed: %v", containerID, err) + klog.Errorf("ContainerStatus %q from runtime service failed: %v", containerID, err) return nil, err } if resp.Status != nil { if err := verifyContainerStatus(resp.Status); err != nil { - glog.Errorf("ContainerStatus of %q failed: %v", containerID, err) + klog.Errorf("ContainerStatus of %q failed: %v", containerID, err) return nil, err } } @@ -300,7 +303,7 @@ func (r *RemoteRuntimeService) UpdateContainerResources(containerID string, reso Linux: resources, }) if err != nil { - glog.Errorf("UpdateContainerResources %q from runtime service failed: %v", containerID, err) + klog.Errorf("UpdateContainerResources %q from runtime service failed: %v", containerID, err) return err } @@ -330,7 +333,7 @@ func (r *RemoteRuntimeService) ExecSync(containerID string, cmd []string, timeou } resp, err := r.runtimeClient.ExecSync(ctx, req) if err != nil { - glog.Errorf("ExecSync %s '%s' from runtime service failed: %v", containerID, strings.Join(cmd, " "), err) + klog.Errorf("ExecSync %s '%s' from runtime service failed: %v", containerID, strings.Join(cmd, " "), err) return nil, nil, err } @@ -352,13 +355,13 @@ func (r *RemoteRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.Ex resp, err := r.runtimeClient.Exec(ctx, req) if err != nil { - glog.Errorf("Exec %s '%s' from runtime service failed: %v", req.ContainerId, strings.Join(req.Cmd, " "), err) + klog.Errorf("Exec %s '%s' from runtime service failed: %v", req.ContainerId, strings.Join(req.Cmd, " "), err) return nil, err } if resp.Url == "" { errorMessage := "URL is not set" - glog.Errorf("Exec failed: %s", errorMessage) + klog.Errorf("Exec failed: %s", errorMessage) return nil, errors.New(errorMessage) } @@ -372,13 +375,13 @@ func (r *RemoteRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeap resp, err := r.runtimeClient.Attach(ctx, req) if err != nil { - glog.Errorf("Attach %s from runtime service failed: %v", req.ContainerId, err) + klog.Errorf("Attach %s from runtime service failed: %v", req.ContainerId, err) return nil, err } if resp.Url == "" { errorMessage := "URL is not set" - glog.Errorf("Exec failed: %s", errorMessage) + klog.Errorf("Exec failed: %s", errorMessage) return nil, errors.New(errorMessage) } return resp, nil @@ -391,13 +394,13 @@ func (r *RemoteRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) ( resp, err := r.runtimeClient.PortForward(ctx, req) if err != nil { - glog.Errorf("PortForward %s from runtime service failed: %v", req.PodSandboxId, err) + klog.Errorf("PortForward %s from runtime service failed: %v", req.PodSandboxId, err) return nil, err } if resp.Url == "" { errorMessage := "URL is not set" - glog.Errorf("Exec failed: %s", errorMessage) + klog.Errorf("Exec failed: %s", errorMessage) return nil, errors.New(errorMessage) } @@ -432,13 +435,13 @@ func (r *RemoteRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) { resp, err := r.runtimeClient.Status(ctx, &runtimeapi.StatusRequest{}) if err != nil { - glog.Errorf("Status from runtime service failed: %v", err) + klog.Errorf("Status from runtime service failed: %v", err) return nil, err } if resp.Status == nil || len(resp.Status.Conditions) < 2 { errorMessage := "RuntimeReady or NetworkReady condition are not set" - glog.Errorf("Status failed: %s", errorMessage) + klog.Errorf("Status failed: %s", errorMessage) return nil, errors.New(errorMessage) } @@ -454,7 +457,7 @@ func (r *RemoteRuntimeService) ContainerStats(containerID string) (*runtimeapi.C ContainerId: containerID, }) if err != nil { - glog.Errorf("ContainerStatus %q from runtime service failed: %v", containerID, err) + klog.Errorf("ContainerStatus %q from runtime service failed: %v", containerID, err) return nil, err } @@ -471,7 +474,7 @@ func (r *RemoteRuntimeService) ListContainerStats(filter *runtimeapi.ContainerSt Filter: filter, }) if err != nil { - glog.Errorf("ListContainerStats with filter %+v from runtime service failed: %v", filter, err) + klog.Errorf("ListContainerStats with filter %+v from runtime service failed: %v", filter, err) return nil, err } @@ -484,7 +487,7 @@ func (r *RemoteRuntimeService) ReopenContainerLog(containerID string) error { _, err := r.runtimeClient.ReopenContainerLog(ctx, &runtimeapi.ReopenContainerLogRequest{ContainerId: containerID}) if err != nil { - glog.Errorf("ReopenContainerLog %q from runtime service failed: %v", containerID, err) + klog.Errorf("ReopenContainerLog %q from runtime service failed: %v", containerID, err) return err } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go index 294580e7a093..86c1f36f0c73 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go @@ -21,8 +21,8 @@ import ( "os" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" @@ -51,15 +51,15 @@ func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult, // If the container logs directory does not exist, create it. if _, err := os.Stat(ContainerLogsDir); err != nil { if err := kl.os.MkdirAll(ContainerLogsDir, 0755); err != nil { - glog.Errorf("Failed to create directory %q: %v", ContainerLogsDir, err) + klog.Errorf("Failed to create directory %q: %v", ContainerLogsDir, err) } } select { case u := <-updates: - glog.Infof("processing manifest with %d pods", len(u.Pods)) + klog.Infof("processing manifest with %d pods", len(u.Pods)) result, err := kl.runOnce(u.Pods, runOnceRetryDelay) - glog.Infof("finished processing %d pods", len(u.Pods)) + klog.Infof("finished processing %d pods", len(u.Pods)) return result, err case <-time.After(runOnceManifestDelay): return nil, fmt.Errorf("no pod manifest update after %v", runOnceManifestDelay) @@ -85,7 +85,7 @@ func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results [] }(pod) } - glog.Infof("Waiting for %d pods", len(admitted)) + klog.Infof("Waiting for %d pods", len(admitted)) failedPods := []string{} for i := 0; i < len(admitted); i++ { res := <-ch @@ -93,19 +93,19 @@ func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results [] if res.Err != nil { faliedContainerName, err := kl.getFailedContainers(res.Pod) if err != nil { - glog.Infof("unable to get failed containers' names for pod %q, error:%v", format.Pod(res.Pod), err) + klog.Infof("unable to get failed containers' names for pod %q, error:%v", format.Pod(res.Pod), err) } else { - glog.Infof("unable to start pod %q because container:%v failed", format.Pod(res.Pod), faliedContainerName) + klog.Infof("unable to start pod %q because container:%v failed", format.Pod(res.Pod), faliedContainerName) } failedPods = append(failedPods, format.Pod(res.Pod)) } else { - glog.Infof("started pod %q", format.Pod(res.Pod)) + klog.Infof("started pod %q", format.Pod(res.Pod)) } } if len(failedPods) > 0 { return results, fmt.Errorf("error running pods: %v", failedPods) } - glog.Infof("%d pods started", len(pods)) + klog.Infof("%d pods started", len(pods)) return results, err } @@ -120,14 +120,14 @@ func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error { } if kl.isPodRunning(pod, status) { - glog.Infof("pod %q containers running", format.Pod(pod)) + klog.Infof("pod %q containers running", format.Pod(pod)) return nil } - glog.Infof("pod %q containers not running: syncing", format.Pod(pod)) + klog.Infof("pod %q containers not running: syncing", format.Pod(pod)) - glog.Infof("Creating a mirror pod for static pod %q", format.Pod(pod)) + klog.Infof("Creating a mirror pod for static pod %q", format.Pod(pod)) if err := kl.podManager.CreateMirrorPod(pod); err != nil { - glog.Errorf("Failed creating a mirror pod %q: %v", format.Pod(pod), err) + klog.Errorf("Failed creating a mirror pod %q: %v", format.Pod(pod), err) } mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) if err = kl.syncPod(syncPodOptions{ @@ -142,7 +142,7 @@ func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error { return fmt.Errorf("timeout error: pod %q containers not running after %d retries", format.Pod(pod), runOnceMaxRetries) } // TODO(proppy): health checking would be better than waiting + checking the state at the next iteration. - glog.Infof("pod %q containers synced, waiting for %v", format.Pod(pod), delay) + klog.Infof("pod %q containers synced, waiting for %v", format.Pod(pod), delay) time.Sleep(delay) retry++ delay *= runOnceRetryDelayBackoff @@ -154,7 +154,7 @@ func (kl *Kubelet) isPodRunning(pod *v1.Pod, status *kubecontainer.PodStatus) bo for _, c := range pod.Spec.Containers { cs := status.FindContainerStatusByName(c.Name) if cs == nil || cs.State != kubecontainer.ContainerStateRunning { - glog.Infof("Container %q for pod %q not running", c.Name, format.Pod(pod)) + klog.Infof("Container %q for pod %q not running", c.Name, format.Pod(pod)) return false } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/runtime.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/runtime.go index 2ee89b3891bd..7a739460be06 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/runtime.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/runtime.go @@ -27,7 +27,6 @@ type runtimeState struct { lastBaseRuntimeSync time.Time baseRuntimeSyncThreshold time.Duration networkError error - internalError error cidr string healthChecks []*healthCheck } @@ -75,12 +74,11 @@ func (s *runtimeState) runtimeErrors() []string { s.RLock() defer s.RUnlock() var ret []string - if !s.lastBaseRuntimeSync.Add(s.baseRuntimeSyncThreshold).After(time.Now()) { + if s.lastBaseRuntimeSync.IsZero() { + ret = append(ret, "container runtime status check may not have completed yet") + } else if !s.lastBaseRuntimeSync.Add(s.baseRuntimeSyncThreshold).After(time.Now()) { ret = append(ret, "container runtime is down") } - if s.internalError != nil { - ret = append(ret, s.internalError.Error()) - } for _, hc := range s.healthChecks { if ok, err := hc.fn(); !ok { ret = append(ret, fmt.Sprintf("%s is not healthy: %v", hc.name, err)) @@ -107,6 +105,5 @@ func newRuntimeState( lastBaseRuntimeSync: time.Time{}, baseRuntimeSyncThreshold: runtimeSyncThreshold, networkError: fmt.Errorf("network state unknown"), - internalError: nil, } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD index affcfad56741..595bca6af35e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD @@ -18,6 +18,8 @@ go_library( "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/v1/validation:go_default_library", + "//pkg/kubelet/apis/podresources:go_default_library", + "//pkg/kubelet/apis/podresources/v1alpha1:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/prober:go_default_library", "//pkg/kubelet/server/portforward:go_default_library", @@ -25,6 +27,7 @@ go_library( "//pkg/kubelet/server/stats:go_default_library", "//pkg/kubelet/server/streaming:go_default_library", "//pkg/kubelet/types:go_default_library", + "//pkg/kubelet/util:go_default_library", "//pkg/util/configz:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -42,11 +45,13 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/flushwriter:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/google/cadvisor/container:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/metrics:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus/promhttp:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -76,6 +81,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/httpstream:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/auth.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/auth.go index 94a4d30915d3..8b8805cf5cf4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/auth.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/auth.go @@ -20,11 +20,11 @@ import ( "net/http" "strings" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/klog" ) // KubeletAuth implements AuthInterface @@ -108,7 +108,7 @@ func (n nodeAuthorizerAttributesGetter) GetRequestAttributes(u user.Info, r *htt attrs.Subresource = "spec" } - glog.V(5).Infof("Node request attributes: attrs=%#v", attrs) + klog.V(5).Infof("Node request attributes: user=%#v attrs=%#v", attrs.GetUser(), attrs) return attrs } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/BUILD index 07541328a3ad..46c57de81bf6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/wsstream:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go index be2426eaf42c..43393bd57ae7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go @@ -30,7 +30,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" api "k8s.io/kubernetes/pkg/apis/core" - "github.com/golang/glog" + "k8s.io/klog" ) func handleHttpStreams(req *http.Request, w http.ResponseWriter, portForwarder PortForwarder, podName string, uid types.UID, supportedPortForwardProtocols []string, idleTimeout, streamCreationTimeout time.Duration) error { @@ -42,7 +42,7 @@ func handleHttpStreams(req *http.Request, w http.ResponseWriter, portForwarder P } streamChan := make(chan httpstream.Stream, 1) - glog.V(5).Infof("Upgrading port forward response") + klog.V(5).Infof("Upgrading port forward response") upgrader := spdy.NewResponseUpgrader() conn := upgrader.UpgradeResponse(w, req, httpStreamReceived(streamChan)) if conn == nil { @@ -50,7 +50,7 @@ func handleHttpStreams(req *http.Request, w http.ResponseWriter, portForwarder P } defer conn.Close() - glog.V(5).Infof("(conn=%p) setting port forwarding streaming connection idle timeout to %v", conn, idleTimeout) + klog.V(5).Infof("(conn=%p) setting port forwarding streaming connection idle timeout to %v", conn, idleTimeout) conn.SetIdleTimeout(idleTimeout) h := &httpStreamHandler{ @@ -58,9 +58,9 @@ func handleHttpStreams(req *http.Request, w http.ResponseWriter, portForwarder P streamChan: streamChan, streamPairs: make(map[string]*httpStreamPair), streamCreationTimeout: streamCreationTimeout, - pod: podName, - uid: uid, - forwarder: portForwarder, + pod: podName, + uid: uid, + forwarder: portForwarder, } h.run() @@ -121,11 +121,11 @@ func (h *httpStreamHandler) getStreamPair(requestID string) (*httpStreamPair, bo defer h.streamPairsLock.Unlock() if p, ok := h.streamPairs[requestID]; ok { - glog.V(5).Infof("(conn=%p, request=%s) found existing stream pair", h.conn, requestID) + klog.V(5).Infof("(conn=%p, request=%s) found existing stream pair", h.conn, requestID) return p, false } - glog.V(5).Infof("(conn=%p, request=%s) creating new stream pair", h.conn, requestID) + klog.V(5).Infof("(conn=%p, request=%s) creating new stream pair", h.conn, requestID) p := newPortForwardPair(requestID) h.streamPairs[requestID] = p @@ -143,7 +143,7 @@ func (h *httpStreamHandler) monitorStreamPair(p *httpStreamPair, timeout <-chan utilruntime.HandleError(err) p.printError(err.Error()) case <-p.complete: - glog.V(5).Infof("(conn=%v, request=%s) successfully received error and data streams", h.conn, p.requestID) + klog.V(5).Infof("(conn=%v, request=%s) successfully received error and data streams", h.conn, p.requestID) } h.removeStreamPair(p.requestID) } @@ -170,7 +170,7 @@ func (h *httpStreamHandler) removeStreamPair(requestID string) { func (h *httpStreamHandler) requestID(stream httpstream.Stream) string { requestID := stream.Headers().Get(api.PortForwardRequestIDHeader) if len(requestID) == 0 { - glog.V(5).Infof("(conn=%p) stream received without %s header", h.conn, api.PortForwardRequestIDHeader) + klog.V(5).Infof("(conn=%p) stream received without %s header", h.conn, api.PortForwardRequestIDHeader) // If we get here, it's because the connection came from an older client // that isn't generating the request id header // (https://github.com/kubernetes/kubernetes/blob/843134885e7e0b360eb5441e85b1410a8b1a7a0c/pkg/client/unversioned/portforward/portforward.go#L258-L287) @@ -197,7 +197,7 @@ func (h *httpStreamHandler) requestID(stream httpstream.Stream) string { requestID = strconv.Itoa(int(stream.Identifier()) - 2) } - glog.V(5).Infof("(conn=%p) automatically assigning request ID=%q from stream type=%s, stream ID=%d", h.conn, requestID, streamType, stream.Identifier()) + klog.V(5).Infof("(conn=%p) automatically assigning request ID=%q from stream type=%s, stream ID=%d", h.conn, requestID, streamType, stream.Identifier()) } return requestID } @@ -206,17 +206,17 @@ func (h *httpStreamHandler) requestID(stream httpstream.Stream) string { // streams, invoking portForward for each complete stream pair. The loop exits // when the httpstream.Connection is closed. func (h *httpStreamHandler) run() { - glog.V(5).Infof("(conn=%p) waiting for port forward streams", h.conn) + klog.V(5).Infof("(conn=%p) waiting for port forward streams", h.conn) Loop: for { select { case <-h.conn.CloseChan(): - glog.V(5).Infof("(conn=%p) upgraded connection closed", h.conn) + klog.V(5).Infof("(conn=%p) upgraded connection closed", h.conn) break Loop case stream := <-h.streamChan: requestID := h.requestID(stream) streamType := stream.Headers().Get(api.StreamType) - glog.V(5).Infof("(conn=%p, request=%s) received new stream of type %s", h.conn, requestID, streamType) + klog.V(5).Infof("(conn=%p, request=%s) received new stream of type %s", h.conn, requestID, streamType) p, created := h.getStreamPair(requestID) if created { @@ -242,9 +242,9 @@ func (h *httpStreamHandler) portForward(p *httpStreamPair) { portString := p.dataStream.Headers().Get(api.PortHeader) port, _ := strconv.ParseInt(portString, 10, 32) - glog.V(5).Infof("(conn=%p, request=%s) invoking forwarder.PortForward for port %s", h.conn, p.requestID, portString) + klog.V(5).Infof("(conn=%p, request=%s) invoking forwarder.PortForward for port %s", h.conn, p.requestID, portString) err := h.forwarder.PortForward(h.pod, h.uid, int32(port), p.dataStream) - glog.V(5).Infof("(conn=%p, request=%s) done invoking forwarder.PortForward for port %s", h.conn, p.requestID, portString) + klog.V(5).Infof("(conn=%p, request=%s) done invoking forwarder.PortForward for port %s", h.conn, p.requestID, portString) if err != nil { msg := fmt.Errorf("error forwarding port %d to pod %s, uid %v: %v", port, h.pod, h.uid, err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go index 8bd6eb709803..1b23d74b519b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go @@ -26,7 +26,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" @@ -185,9 +185,9 @@ func (h *websocketStreamHandler) portForward(p *websocketStreamPair) { defer p.dataStream.Close() defer p.errorStream.Close() - glog.V(5).Infof("(conn=%p) invoking forwarder.PortForward for port %d", h.conn, p.port) + klog.V(5).Infof("(conn=%p) invoking forwarder.PortForward for port %d", h.conn, p.port) err := h.forwarder.PortForward(h.pod, h.uid, p.port, p.dataStream) - glog.V(5).Infof("(conn=%p) done invoking forwarder.PortForward for port %d", h.conn, p.port) + klog.V(5).Infof("(conn=%p) done invoking forwarder.PortForward for port %d", h.conn, p.port) if err != nil { msg := fmt.Errorf("error forwarding port %d to pod %s, uid %v: %v", p.port, h.pod, h.uid, err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/BUILD index 776abc764c9e..fef6fec9c2cc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/BUILD @@ -27,7 +27,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/wsstream:go_default_library", "//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go index 387ad3d5a76e..8bff323ec7a9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go @@ -34,7 +34,7 @@ import ( "k8s.io/client-go/tools/remotecommand" api "k8s.io/kubernetes/pkg/apis/core" - "github.com/golang/glog" + "k8s.io/klog" ) // Options contains details about which streams are required for @@ -54,7 +54,7 @@ func NewOptions(req *http.Request) (*Options, error) { stderr := req.FormValue(api.ExecStderrParam) == "1" if tty && stderr { // TODO: make this an error before we reach this method - glog.V(4).Infof("Access to exec with tty and stderr is not supported, bypassing stderr") + klog.V(4).Infof("Access to exec with tty and stderr is not supported, bypassing stderr") stderr = false } @@ -125,8 +125,7 @@ func createStreams(req *http.Request, w http.ResponseWriter, opts *Options, supp func createHttpStreamStreams(req *http.Request, w http.ResponseWriter, opts *Options, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*context, bool) { protocol, err := httpstream.Handshake(req, w, supportedStreamProtocols) if err != nil { - w.WriteHeader(http.StatusBadRequest) - fmt.Fprint(w, err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) return nil, false } @@ -156,7 +155,7 @@ func createHttpStreamStreams(req *http.Request, w http.ResponseWriter, opts *Opt case remotecommandconsts.StreamProtocolV2Name: handler = &v2ProtocolHandler{} case "": - glog.V(4).Infof("Client did not request protocol negotiation. Falling back to %q", remotecommandconsts.StreamProtocolV1Name) + klog.V(4).Infof("Client did not request protocol negotiation. Falling back to %q", remotecommandconsts.StreamProtocolV1Name) fallthrough case remotecommandconsts.StreamProtocolV1Name: handler = &v1ProtocolHandler{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go index 118719931084..66bc7f54d958 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go @@ -31,12 +31,14 @@ import ( "strings" "time" - restful "github.com/emicklei/go-restful" - "github.com/golang/glog" + "github.com/emicklei/go-restful" + cadvisormetrics "github.com/google/cadvisor/container" cadvisorapi "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/metrics" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + "google.golang.org/grpc" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -55,6 +57,8 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/v1/validation" + "k8s.io/kubernetes/pkg/kubelet/apis/podresources" + podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/prober" "k8s.io/kubernetes/pkg/kubelet/server/portforward" @@ -62,6 +66,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/server/stats" "k8s.io/kubernetes/pkg/kubelet/server/streaming" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/util/configz" ) @@ -129,7 +134,7 @@ func ListenAndServeKubeletServer( enableContentionProfiling, redirectContainerStreaming bool, criHandler http.Handler) { - glog.Infof("Starting to listen on %s:%d", address, port) + klog.Infof("Starting to listen on %s:%d", address, port) handler := NewServer(host, resourceAnalyzer, auth, enableDebuggingHandlers, enableContentionProfiling, redirectContainerStreaming, criHandler) s := &http.Server{ Addr: net.JoinHostPort(address.String(), strconv.FormatUint(uint64(port), 10)), @@ -141,15 +146,15 @@ func ListenAndServeKubeletServer( // Passing empty strings as the cert and key files means no // cert/keys are specified and GetCertificate in the TLSConfig // should be called instead. - glog.Fatal(s.ListenAndServeTLS(tlsOptions.CertFile, tlsOptions.KeyFile)) + klog.Fatal(s.ListenAndServeTLS(tlsOptions.CertFile, tlsOptions.KeyFile)) } else { - glog.Fatal(s.ListenAndServe()) + klog.Fatal(s.ListenAndServe()) } } // ListenAndServeKubeletReadOnlyServer initializes a server to respond to HTTP network requests on the Kubelet. func ListenAndServeKubeletReadOnlyServer(host HostInterface, resourceAnalyzer stats.ResourceAnalyzer, address net.IP, port uint) { - glog.V(1).Infof("Starting to listen read-only on %s:%d", address, port) + klog.V(1).Infof("Starting to listen read-only on %s:%d", address, port) s := NewServer(host, resourceAnalyzer, nil, false, false, false, nil) server := &http.Server{ @@ -157,7 +162,18 @@ func ListenAndServeKubeletReadOnlyServer(host HostInterface, resourceAnalyzer st Handler: &s, MaxHeaderBytes: 1 << 20, } - glog.Fatal(server.ListenAndServe()) + klog.Fatal(server.ListenAndServe()) +} + +// ListenAndServePodResources initializes a grpc server to serve the PodResources service +func ListenAndServePodResources(socket string, podsProvider podresources.PodsProvider, devicesProvider podresources.DevicesProvider) { + server := grpc.NewServer() + podresourcesapi.RegisterPodResourcesListerServer(server, podresources.NewPodResourcesServer(podsProvider, devicesProvider)) + l, err := util.CreateListener(socket) + if err != nil { + klog.Fatalf("Failed to create listener for podResources endpoint: %v", err) + } + klog.Fatal(server.Serve(l)) } // AuthInterface contains all methods required by the auth filters @@ -220,9 +236,9 @@ func NewServer( func (s *Server) InstallAuthFilter() { s.restfulCont.Filter(func(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { // Authenticate - u, ok, err := s.auth.AuthenticateRequest(req.Request) + info, ok, err := s.auth.AuthenticateRequest(req.Request) if err != nil { - glog.Errorf("Unable to authenticate the request due to an error: %v", err) + klog.Errorf("Unable to authenticate the request due to an error: %v", err) resp.WriteErrorString(http.StatusUnauthorized, "Unauthorized") return } @@ -232,19 +248,19 @@ func (s *Server) InstallAuthFilter() { } // Get authorization attributes - attrs := s.auth.GetRequestAttributes(u, req.Request) + attrs := s.auth.GetRequestAttributes(info.User, req.Request) // Authorize decision, _, err := s.auth.Authorize(attrs) if err != nil { - msg := fmt.Sprintf("Authorization error (user=%s, verb=%s, resource=%s, subresource=%s)", u.GetName(), attrs.GetVerb(), attrs.GetResource(), attrs.GetSubresource()) - glog.Errorf(msg, err) + msg := fmt.Sprintf("Authorization error (user=%s, verb=%s, resource=%s, subresource=%s)", attrs.GetUser().GetName(), attrs.GetVerb(), attrs.GetResource(), attrs.GetSubresource()) + klog.Errorf(msg, err) resp.WriteErrorString(http.StatusInternalServerError, msg) return } if decision != authorizer.DecisionAllow { - msg := fmt.Sprintf("Forbidden (user=%s, verb=%s, resource=%s, subresource=%s)", u.GetName(), attrs.GetVerb(), attrs.GetResource(), attrs.GetSubresource()) - glog.V(2).Info(msg) + msg := fmt.Sprintf("Forbidden (user=%s, verb=%s, resource=%s, subresource=%s)", attrs.GetUser().GetName(), attrs.GetVerb(), attrs.GetResource(), attrs.GetSubresource()) + klog.V(2).Info(msg) resp.WriteErrorString(http.StatusForbidden, msg) return } @@ -276,7 +292,18 @@ func (s *Server) InstallDefaultHandlers() { // cAdvisor metrics are exposed under the secured handler as well r := prometheus.NewRegistry() - r.MustRegister(metrics.NewPrometheusCollector(prometheusHostAdapter{s.host}, containerPrometheusLabelsFunc(s.host))) + + includedMetrics := cadvisormetrics.MetricSet{ + cadvisormetrics.CpuUsageMetrics: struct{}{}, + cadvisormetrics.MemoryUsageMetrics: struct{}{}, + cadvisormetrics.CpuLoadMetrics: struct{}{}, + cadvisormetrics.DiskIOMetrics: struct{}{}, + cadvisormetrics.DiskUsageMetrics: struct{}{}, + cadvisormetrics.NetworkUsageMetrics: struct{}{}, + cadvisormetrics.AcceleratorUsageMetrics: struct{}{}, + cadvisormetrics.AppMetrics: struct{}{}, + } + r.MustRegister(metrics.NewPrometheusCollector(prometheusHostAdapter{s.host}, containerPrometheusLabelsFunc(s.host), includedMetrics)) s.restfulCont.Handle(cadvisorMetricsPath, promhttp.HandlerFor(r, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}), ) @@ -303,7 +330,7 @@ const pprofBasePath = "/debug/pprof/" // InstallDebuggingHandlers registers the HTTP request patterns that serve logs or run commands/containers func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) { - glog.Infof("Adding debug handlers to kubelet server.") + klog.Infof("Adding debug handlers to kubelet server.") ws := new(restful.WebService) ws. @@ -636,7 +663,7 @@ type responder struct { } func (r *responder) Error(w http.ResponseWriter, req *http.Request, err error) { - glog.Errorf("Error while proxying request: %v", err) + klog.Errorf("Error while proxying request: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) } @@ -733,7 +760,7 @@ func writeJsonResponse(response *restful.Response, data []byte) { response.Header().Set(restful.HEADER_ContentType, restful.MIME_JSON) response.WriteHeader(http.StatusOK) if _, err := response.Write(data); err != nil { - glog.Errorf("Error writing response: %v", err) + klog.Errorf("Error writing response: %v", err) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD index c3623fd98d1f..6fec6f2eb998 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD @@ -25,8 +25,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/fs_resource_analyzer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/fs_resource_analyzer.go index 5f72134c4f55..331a0b27ba6d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/fs_resource_analyzer.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/fs_resource_analyzer.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" - "github.com/golang/glog" + "k8s.io/klog" ) // Map to PodVolumeStats pointers since the addresses for map values are not constant and can cause pain @@ -60,10 +60,10 @@ func newFsResourceAnalyzer(statsProvider StatsProvider, calcVolumePeriod time.Du func (s *fsResourceAnalyzer) Start() { s.startOnce.Do(func() { if s.calcPeriod <= 0 { - glog.Info("Volume stats collection disabled.") + klog.Info("Volume stats collection disabled.") return } - glog.Info("Starting FS ResourceAnalyzer") + klog.Info("Starting FS ResourceAnalyzer") go wait.Forever(func() { s.updateCachedPodVolumeStats() }, s.calcPeriod) }) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/handler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/handler.go index f069cd898cb2..f87e5c820c5c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/handler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/handler.go @@ -25,8 +25,8 @@ import ( "time" restful "github.com/emicklei/go-restful" - "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -43,6 +43,8 @@ type StatsProvider interface { // // ListPodStats returns the stats of all the containers managed by pods. ListPodStats() ([]statsapi.PodStats, error) + // ListPodCPUAndMemoryStats returns the CPU and memory stats of all the containers managed by pods. + ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) // ImageFsStats returns the stats of the image filesystem. ImageFsStats() (*statsapi.FsStats, error) @@ -51,6 +53,9 @@ type StatsProvider interface { // GetCgroupStats returns the stats and the networking usage of the cgroup // with the specified cgroupName. GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) + // GetCgroupCPUAndMemoryStats returns the CPU and memory stats of the cgroup with the specified cgroupName. + GetCgroupCPUAndMemoryStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, error) + // RootFsStats returns the stats of the node root filesystem. RootFsStats() (*statsapi.FsStats, error) @@ -192,10 +197,23 @@ func (h *handler) handleStats(request *restful.Request, response *restful.Respon } // Handles stats summary requests to /stats/summary +// If "only_cpu_and_memory" GET param is true then only cpu and memory is returned in response. func (h *handler) handleSummary(request *restful.Request, response *restful.Response) { - // external calls to the summary API use cached stats - forceStatsUpdate := false - summary, err := h.summaryProvider.Get(forceStatsUpdate) + onlyCPUAndMemory := false + request.Request.ParseForm() + if onlyCluAndMemoryParam, found := request.Request.Form["only_cpu_and_memory"]; found && + len(onlyCluAndMemoryParam) == 1 && onlyCluAndMemoryParam[0] == "true" { + onlyCPUAndMemory = true + } + var summary *statsapi.Summary + var err error + if onlyCPUAndMemory { + summary, err = h.summaryProvider.GetCPUAndMemoryStats() + } else { + // external calls to the summary API use cached stats + forceStatsUpdate := false + summary, err = h.summaryProvider.Get(forceStatsUpdate) + } if err != nil { handleError(response, "/stats/summary", err) } else { @@ -218,7 +236,7 @@ func (h *handler) handleSystemContainer(request *restful.Request, response *rest if err != nil { if _, ok := stats[containerName]; ok { // If the failure is partial, log it and return a best-effort response. - glog.Errorf("Partial failure issuing GetRawContainerInfo(%v): %v", query, err) + klog.Errorf("Partial failure issuing GetRawContainerInfo(%v): %v", query, err) } else { handleError(response, fmt.Sprintf("/stats/container %v", query), err) return @@ -254,7 +272,7 @@ func (h *handler) handlePodContainer(request *restful.Request, response *restful pod, ok := h.provider.GetPodByName(params["namespace"], params["podName"]) if !ok { - glog.V(4).Infof("Container not found: %v", params) + klog.V(4).Infof("Container not found: %v", params) response.WriteError(http.StatusNotFound, kubecontainer.ErrContainerNotFound) return } @@ -273,7 +291,7 @@ func (h *handler) handlePodContainer(request *restful.Request, response *restful func writeResponse(response *restful.Response, stats interface{}) { if err := response.WriteAsJson(stats); err != nil { - glog.Errorf("Error writing response: %v", err) + klog.Errorf("Error writing response: %v", err) } } @@ -285,7 +303,7 @@ func handleError(response *restful.Response, request string, err error) { response.WriteError(http.StatusNotFound, err) default: msg := fmt.Sprintf("Internal Error: %v", err) - glog.Errorf("HTTP InternalServerError serving %s: %s", request, msg) + klog.Errorf("HTTP InternalServerError serving %s: %s", request, msg) response.WriteErrorString(http.StatusInternalServerError, msg) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary.go index 900a8ad97d7a..fb646c5d2f31 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary.go @@ -26,6 +26,8 @@ type SummaryProvider interface { // Get provides a new Summary with the stats from Kubelet, // and will update some stats if updateStats is true Get(updateStats bool) (*statsapi.Summary, error) + // GetCPUAndMemoryStats provides a new Summary with the CPU and memory stats from Kubelet, + GetCPUAndMemoryStats() (*statsapi.Summary, error) } // summaryProviderImpl implements the SummaryProvider interface. @@ -87,3 +89,35 @@ func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error) } return &summary, nil } + +func (sp *summaryProviderImpl) GetCPUAndMemoryStats() (*statsapi.Summary, error) { + // TODO(timstclair): Consider returning a best-effort response if any of + // the following errors occur. + node, err := sp.provider.GetNode() + if err != nil { + return nil, fmt.Errorf("failed to get node info: %v", err) + } + nodeConfig := sp.provider.GetNodeConfig() + rootStats, err := sp.provider.GetCgroupCPUAndMemoryStats("/", false) + if err != nil { + return nil, fmt.Errorf("failed to get root cgroup stats: %v", err) + } + + podStats, err := sp.provider.ListPodCPUAndMemoryStats() + if err != nil { + return nil, fmt.Errorf("failed to list pod stats: %v", err) + } + + nodeStats := statsapi.NodeStats{ + NodeName: node.Name, + CPU: rootStats.CPU, + Memory: rootStats.Memory, + StartTime: rootStats.StartTime, + SystemContainers: sp.GetSystemContainersCPUAndMemoryStats(nodeConfig, podStats, false), + } + summary := statsapi.Summary{ + Node: nodeStats, + Pods: podStats, + } + return &summary, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_sys_containers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_sys_containers.go index 7c4205c93b3f..baaff0ab1bd6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_sys_containers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_sys_containers.go @@ -19,7 +19,7 @@ limitations under the License. package stats import ( - "github.com/golang/glog" + "k8s.io/klog" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cm" @@ -42,7 +42,7 @@ func (sp *summaryProviderImpl) GetSystemContainersStats(nodeConfig cm.NodeConfig } s, _, err := sp.provider.GetCgroupStats(cont.name, cont.forceStatsUpdate) if err != nil { - glog.Errorf("Failed to get system container stats for %q: %v", cont.name, err) + klog.Errorf("Failed to get system container stats for %q: %v", cont.name, err) continue } // System containers don't have a filesystem associated with them. @@ -53,3 +53,30 @@ func (sp *summaryProviderImpl) GetSystemContainersStats(nodeConfig cm.NodeConfig return stats } + +func (sp *summaryProviderImpl) GetSystemContainersCPUAndMemoryStats(nodeConfig cm.NodeConfig, podStats []statsapi.PodStats, updateStats bool) (stats []statsapi.ContainerStats) { + systemContainers := map[string]struct { + name string + forceStatsUpdate bool + }{ + statsapi.SystemContainerKubelet: {nodeConfig.KubeletCgroupsName, false}, + statsapi.SystemContainerRuntime: {nodeConfig.RuntimeCgroupsName, false}, + statsapi.SystemContainerMisc: {nodeConfig.SystemCgroupsName, false}, + statsapi.SystemContainerPods: {sp.provider.GetPodCgroupRoot(), updateStats}, + } + for sys, cont := range systemContainers { + // skip if cgroup name is undefined (not all system containers are required) + if cont.name == "" { + continue + } + s, err := sp.provider.GetCgroupCPUAndMemoryStats(cont.name, cont.forceStatsUpdate) + if err != nil { + klog.Errorf("Failed to get system container stats for %q: %v", cont.name, err) + continue + } + s.Name = sys + stats = append(stats, *s) + } + + return stats +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_sys_containers_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_sys_containers_windows.go index b5e3affb7b30..cb8dcb3c8984 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_sys_containers_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_sys_containers_windows.go @@ -27,11 +27,16 @@ import ( ) func (sp *summaryProviderImpl) GetSystemContainersStats(nodeConfig cm.NodeConfig, podStats []statsapi.PodStats, updateStats bool) (stats []statsapi.ContainerStats) { - stats = append(stats, sp.getSystemPodsStats(nodeConfig, podStats, updateStats)) + stats = append(stats, sp.getSystemPodsCPUAndMemoryStats(nodeConfig, podStats, updateStats)) return stats } -func (sp *summaryProviderImpl) getSystemPodsStats(nodeConfig cm.NodeConfig, podStats []statsapi.PodStats, updateStats bool) statsapi.ContainerStats { +func (sp *summaryProviderImpl) GetSystemContainersCPUAndMemoryStats(nodeConfig cm.NodeConfig, podStats []statsapi.PodStats, updateStats bool) (stats []statsapi.ContainerStats) { + stats = append(stats, sp.getSystemPodsCPUAndMemoryStats(nodeConfig, podStats, updateStats)) + return stats +} + +func (sp *summaryProviderImpl) getSystemPodsCPUAndMemoryStats(nodeConfig cm.NodeConfig, podStats []statsapi.PodStats, updateStats bool) statsapi.ContainerStats { now := metav1.NewTime(time.Now()) podsSummary := statsapi.ContainerStats{ StartTime: now, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/volume_stat_calculator.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/volume_stat_calculator.go index 2c535f56241a..220d8785f534 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/volume_stat_calculator.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/volume_stat_calculator.go @@ -27,7 +27,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/volume" - "github.com/golang/glog" + "k8s.io/klog" ) // volumeStatCalculator calculates volume metrics for a given pod periodically in the background and caches the result @@ -109,7 +109,7 @@ func (s *volumeStatCalculator) calcAndStoreStats() { if err != nil { // Expected for Volumes that don't support Metrics if !volume.IsNotSupported(err) { - glog.V(4).Infof("Failed to calculate volume metrics for pod %s volume %s: %+v", format.Pod(s.pod), name, err) + klog.V(4).Infof("Failed to calculate volume metrics for pod %s volume %s: %+v", format.Pod(s.pod), name, err) } continue } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/BUILD index ddd6639ea26a..07ca1b545405 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/BUILD @@ -28,10 +28,10 @@ go_library( "//pkg/volume:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/fs:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cadvisor_stats_provider.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cadvisor_stats_provider.go index 8529872cd506..9549c12a1a66 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cadvisor_stats_provider.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cadvisor_stats_provider.go @@ -22,8 +22,8 @@ import ( "sort" "strings" - "github.com/golang/glog" cadvisorapiv2 "github.com/google/cadvisor/info/v2" + "k8s.io/klog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -145,6 +145,68 @@ func (p *cadvisorStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { return result, nil } +// ListPodCPUAndMemoryStats returns the cpu and memory stats of all the pod-managed containers. +func (p *cadvisorStatsProvider) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) { + infos, err := getCadvisorContainerInfo(p.cadvisor) + if err != nil { + return nil, fmt.Errorf("failed to get container info from cadvisor: %v", err) + } + // removeTerminatedContainerInfo will also remove pod level cgroups, so save the infos into allInfos first + allInfos := infos + infos = removeTerminatedContainerInfo(infos) + // Map each container to a pod and update the PodStats with container data. + podToStats := map[statsapi.PodReference]*statsapi.PodStats{} + for key, cinfo := range infos { + // On systemd using devicemapper each mount into the container has an + // associated cgroup. We ignore them to ensure we do not get duplicate + // entries in our summary. For details on .mount units: + // http://man7.org/linux/man-pages/man5/systemd.mount.5.html + if strings.HasSuffix(key, ".mount") { + continue + } + // Build the Pod key if this container is managed by a Pod + if !isPodManagedContainer(&cinfo) { + continue + } + ref := buildPodRef(cinfo.Spec.Labels) + + // Lookup the PodStats for the pod using the PodRef. If none exists, + // initialize a new entry. + podStats, found := podToStats[ref] + if !found { + podStats = &statsapi.PodStats{PodRef: ref} + podToStats[ref] = podStats + } + + // Update the PodStats entry with the stats from the container by + // adding it to podStats.Containers. + containerName := kubetypes.GetContainerName(cinfo.Spec.Labels) + if containerName == leaky.PodInfraContainerName { + // Special case for infrastructure container which is hidden from + // the user and has network stats. + podStats.StartTime = metav1.NewTime(cinfo.Spec.CreationTime) + } else { + podStats.Containers = append(podStats.Containers, *cadvisorInfoToContainerCPUAndMemoryStats(containerName, &cinfo)) + } + } + + // Add each PodStats to the result. + result := make([]statsapi.PodStats, 0, len(podToStats)) + for _, podStats := range podToStats { + podUID := types.UID(podStats.PodRef.UID) + // Lookup the pod-level cgroup's CPU and memory stats + podInfo := getCadvisorPodInfoFromPodUID(podUID, allInfos) + if podInfo != nil { + cpu, memory := cadvisorInfoToCPUandMemoryStats(podInfo) + podStats.CPU = cpu + podStats.Memory = memory + } + result = append(result, *podStats) + } + + return result, nil +} + func calcEphemeralStorage(containers []statsapi.ContainerStats, volumes []statsapi.VolumeStats, rootFsInfo *cadvisorapiv2.FsInfo) *statsapi.FsStats { result := &statsapi.FsStats{ Time: metav1.NewTime(rootFsInfo.Timestamp), @@ -244,7 +306,7 @@ func isPodManagedContainer(cinfo *cadvisorapiv2.ContainerInfo) bool { podNamespace := kubetypes.GetPodNamespace(cinfo.Spec.Labels) managed := podName != "" && podNamespace != "" if !managed && podName != podNamespace { - glog.Warningf( + klog.Warningf( "Expect container to have either both podName (%s) and podNamespace (%s) labels, or neither.", podName, podNamespace) } @@ -367,7 +429,7 @@ func getCadvisorContainerInfo(ca cadvisor.Interface) (map[string]cadvisorapiv2.C if _, ok := infos["/"]; ok { // If the failure is partial, log it and return a best-effort // response. - glog.Errorf("Partial failure issuing cadvisor.ContainerInfoV2: %v", err) + klog.Errorf("Partial failure issuing cadvisor.ContainerInfoV2: %v", err) } else { return nil, fmt.Errorf("failed to get root cgroup stats: %v", err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go index 6580f46ead00..bafd24d65c4f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go @@ -24,8 +24,8 @@ import ( "strings" "time" - "github.com/golang/glog" cadvisorfs "github.com/google/cadvisor/fs" + "k8s.io/klog" cadvisorapiv2 "github.com/google/cadvisor/info/v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -154,7 +154,7 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { // container stats caStats, caFound := caInfos[containerID] if !caFound { - glog.V(4).Infof("Unable to find cadvisor stats for %q", containerID) + klog.V(4).Infof("Unable to find cadvisor stats for %q", containerID) } else { p.addCadvisorContainerStats(cs, &caStats) } @@ -169,6 +169,87 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { return result, nil } +// ListPodCPUAndMemoryStats returns the CPU and Memory stats of all the pod-managed containers. +func (p *criStatsProvider) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) { + containers, err := p.runtimeService.ListContainers(&runtimeapi.ContainerFilter{}) + if err != nil { + return nil, fmt.Errorf("failed to list all containers: %v", err) + } + + // Creates pod sandbox map. + podSandboxMap := make(map[string]*runtimeapi.PodSandbox) + podSandboxes, err := p.runtimeService.ListPodSandbox(&runtimeapi.PodSandboxFilter{}) + if err != nil { + return nil, fmt.Errorf("failed to list all pod sandboxes: %v", err) + } + for _, s := range podSandboxes { + podSandboxMap[s.Id] = s + } + + // sandboxIDToPodStats is a temporary map from sandbox ID to its pod stats. + sandboxIDToPodStats := make(map[string]*statsapi.PodStats) + + resp, err := p.runtimeService.ListContainerStats(&runtimeapi.ContainerStatsFilter{}) + if err != nil { + return nil, fmt.Errorf("failed to list all container stats: %v", err) + } + + containers = removeTerminatedContainer(containers) + // Creates container map. + containerMap := make(map[string]*runtimeapi.Container) + for _, c := range containers { + containerMap[c.Id] = c + } + + allInfos, err := getCadvisorContainerInfo(p.cadvisor) + if err != nil { + return nil, fmt.Errorf("failed to fetch cadvisor stats: %v", err) + } + caInfos := getCRICadvisorStats(allInfos) + + for _, stats := range resp { + containerID := stats.Attributes.Id + container, found := containerMap[containerID] + if !found { + continue + } + + podSandboxID := container.PodSandboxId + podSandbox, found := podSandboxMap[podSandboxID] + if !found { + continue + } + + // Creates the stats of the pod (if not created yet) which the + // container belongs to. + ps, found := sandboxIDToPodStats[podSandboxID] + if !found { + ps = buildPodStats(podSandbox) + sandboxIDToPodStats[podSandboxID] = ps + } + + // Fill available CPU and memory stats for full set of required pod stats + cs := p.makeContainerCPUAndMemoryStats(stats, container) + p.addPodCPUMemoryStats(ps, types.UID(podSandbox.Metadata.Uid), allInfos, cs) + + // If cadvisor stats is available for the container, use it to populate + // container stats + caStats, caFound := caInfos[containerID] + if !caFound { + klog.V(4).Infof("Unable to find cadvisor stats for %q", containerID) + } else { + p.addCadvisorContainerStats(cs, &caStats) + } + ps.Containers = append(ps.Containers, *cs) + } + + result := make([]statsapi.PodStats, 0, len(sandboxIDToPodStats)) + for _, s := range sandboxIDToPodStats { + result = append(result, *s) + } + return result, nil +} + // ImageFsStats returns the stats of the image filesystem. func (p *criStatsProvider) ImageFsStats() (*statsapi.FsStats, error) { resp, err := p.imageService.ImageFsInfo() @@ -226,7 +307,7 @@ func (p *criStatsProvider) ImageFsDevice() (string, error) { // nil. func (p *criStatsProvider) getFsInfo(fsID *runtimeapi.FilesystemIdentifier) *cadvisorapiv2.FsInfo { if fsID == nil { - glog.V(2).Infof("Failed to get filesystem info: fsID is nil.") + klog.V(2).Infof("Failed to get filesystem info: fsID is nil.") return nil } mountpoint := fsID.GetMountpoint() @@ -234,9 +315,9 @@ func (p *criStatsProvider) getFsInfo(fsID *runtimeapi.FilesystemIdentifier) *cad if err != nil { msg := fmt.Sprintf("Failed to get the info of the filesystem with mountpoint %q: %v.", mountpoint, err) if err == cadvisorfs.ErrNoSuchDevice { - glog.V(2).Info(msg) + klog.V(2).Info(msg) } else { - glog.Error(msg) + klog.Error(msg) } return nil } @@ -281,7 +362,7 @@ func (p *criStatsProvider) addPodNetworkStats( } // TODO: sum Pod network stats from container stats. - glog.V(4).Infof("Unable to find cadvisor stats for sandbox %q", podSandboxID) + klog.V(4).Infof("Unable to find cadvisor stats for sandbox %q", podSandboxID) } func (p *criStatsProvider) addPodCPUMemoryStats( @@ -393,6 +474,33 @@ func (p *criStatsProvider) makeContainerStats( return result } +func (p *criStatsProvider) makeContainerCPUAndMemoryStats( + stats *runtimeapi.ContainerStats, + container *runtimeapi.Container, +) *statsapi.ContainerStats { + result := &statsapi.ContainerStats{ + Name: stats.Attributes.Metadata.Name, + // The StartTime in the summary API is the container creation time. + StartTime: metav1.NewTime(time.Unix(0, container.CreatedAt)), + CPU: &statsapi.CPUStats{}, + Memory: &statsapi.MemoryStats{}, + // UserDefinedMetrics is not supported by CRI. + } + if stats.Cpu != nil { + result.CPU.Time = metav1.NewTime(time.Unix(0, stats.Cpu.Timestamp)) + if stats.Cpu.UsageCoreNanoSeconds != nil { + result.CPU.UsageCoreNanoSeconds = &stats.Cpu.UsageCoreNanoSeconds.Value + } + } + if stats.Memory != nil { + result.Memory.Time = metav1.NewTime(time.Unix(0, stats.Memory.Timestamp)) + if stats.Memory.WorkingSetBytes != nil { + result.Memory.WorkingSetBytes = &stats.Memory.WorkingSetBytes.Value + } + } + return result +} + // removeTerminatedContainer returns the specified container but with // the stats of the terminated containers removed. func removeTerminatedContainer(containers []*runtimeapi.Container) []*runtimeapi.Container { @@ -471,7 +579,7 @@ func (p *criStatsProvider) getContainerLogStats(path string, rootFsInfo *cadviso m := p.logMetricsService.createLogMetricsProvider(path) logMetrics, err := m.GetMetrics() if err != nil { - glog.Errorf("Unable to fetch container log stats for path %s: %v ", path, err) + klog.Errorf("Unable to fetch container log stats for path %s: %v ", path, err) return nil } result := &statsapi.FsStats{ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/helper.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/helper.go index a195b9c634a2..54f3093e5530 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/helper.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/helper.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" cadvisorapiv1 "github.com/google/cadvisor/info/v1" cadvisorapiv2 "github.com/google/cadvisor/info/v2" @@ -132,6 +132,21 @@ func cadvisorInfoToContainerStats(name string, info *cadvisorapiv2.ContainerInfo return result } +// cadvisorInfoToContainerCPUAndMemoryStats returns the statsapi.ContainerStats converted +// from the container and filesystem info. +func cadvisorInfoToContainerCPUAndMemoryStats(name string, info *cadvisorapiv2.ContainerInfo) *statsapi.ContainerStats { + result := &statsapi.ContainerStats{ + StartTime: metav1.NewTime(info.Spec.CreationTime), + Name: name, + } + + cpu, memory := cadvisorInfoToCPUandMemoryStats(info) + result.CPU = cpu + result.Memory = memory + + return result +} + // cadvisorInfoToNetworkStats returns the statsapi.NetworkStats converted from // the container info from cadvisor. func cadvisorInfoToNetworkStats(name string, info *cadvisorapiv2.ContainerInfo) *statsapi.NetworkStats { @@ -191,7 +206,7 @@ func cadvisorInfoToUserDefinedMetrics(info *cadvisorapiv2.ContainerInfo) []stats for name, values := range stat.CustomMetrics { specVal, ok := udmMap[name] if !ok { - glog.Warningf("spec for custom metric %q is missing from cAdvisor output. Spec: %+v, Metrics: %+v", name, info.Spec, stat.CustomMetrics) + klog.Warningf("spec for custom metric %q is missing from cAdvisor output. Spec: %+v, Metrics: %+v", name, info.Spec, stat.CustomMetrics) continue } for _, value := range values { @@ -211,8 +226,8 @@ func cadvisorInfoToUserDefinedMetrics(info *cadvisorapiv2.ContainerInfo) []stats for _, specVal := range udmMap { udm = append(udm, statsapi.UserDefinedMetric{ UserDefinedMetricDescriptor: specVal.ref, - Time: metav1.NewTime(specVal.time), - Value: specVal.value, + Time: metav1.NewTime(specVal.time), + Value: specVal.value, }) } return udm diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/stats_provider.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/stats_provider.go index 29a24a1b3c29..903f8678a649 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/stats_provider.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/stats/stats_provider.go @@ -85,6 +85,7 @@ type StatsProvider struct { // containers managed by pods. type containerStatsProvider interface { ListPodStats() ([]statsapi.PodStats, error) + ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) ImageFsStats() (*statsapi.FsStats, error) ImageFsDevice() (string, error) } @@ -106,6 +107,18 @@ func (p *StatsProvider) GetCgroupStats(cgroupName string, updateStats bool) (*st return s, n, nil } +// GetCgroupCPUAndMemoryStats returns the CPU and memory stats of the cgroup with the cgroupName. Note that +// this function doesn't generate filesystem stats. +func (p *StatsProvider) GetCgroupCPUAndMemoryStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, error) { + info, err := getCgroupInfo(p.cadvisor, cgroupName, updateStats) + if err != nil { + return nil, fmt.Errorf("failed to get cgroup stats for %q: %v", cgroupName, err) + } + // Rootfs and imagefs doesn't make sense for raw cgroup. + s := cadvisorInfoToContainerCPUAndMemoryStats(cgroupName, info) + return s, nil +} + // RootFsStats returns the stats of the node root filesystem. func (p *StatsProvider) RootFsStats() (*statsapi.FsStats, error) { rootFsInfo, err := p.cadvisor.RootFsInfo() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/BUILD index 6e341e9b4983..0b590440e585 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/BUILD @@ -28,7 +28,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go index 1dbfc6aa5e7b..37d5bfa03545 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go @@ -24,7 +24,6 @@ import ( clientset "k8s.io/client-go/kubernetes" - "github.com/golang/glog" "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" @@ -32,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" podutil "k8s.io/kubernetes/pkg/api/v1/pod" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubepod "k8s.io/kubernetes/pkg/kubelet/pod" @@ -145,17 +145,17 @@ func (m *manager) Start() { // on the master, where the kubelet is responsible for bootstrapping the pods // of the master components. if m.kubeClient == nil { - glog.Infof("Kubernetes client is nil, not starting status manager.") + klog.Infof("Kubernetes client is nil, not starting status manager.") return } - glog.Info("Starting to sync pod status with apiserver") + klog.Info("Starting to sync pod status with apiserver") syncTicker := time.Tick(syncPeriod) // syncPod and syncBatch share the same go routine to avoid sync races. go wait.Forever(func() { select { case syncRequest := <-m.podStatusChannel: - glog.V(5).Infof("Status Manager: syncing pod: %q, with status: (%d, %v) from podStatusChannel", + klog.V(5).Infof("Status Manager: syncing pod: %q, with status: (%d, %v) from podStatusChannel", syncRequest.podUID, syncRequest.status.version, syncRequest.status.status) m.syncPod(syncRequest.podUID, syncRequest.status) case <-syncTicker: @@ -177,7 +177,7 @@ func (m *manager) SetPodStatus(pod *v1.Pod, status v1.PodStatus) { for _, c := range pod.Status.Conditions { if !kubetypes.PodConditionByKubelet(c.Type) { - glog.Errorf("Kubelet is trying to update pod condition %q for pod %q. "+ + klog.Errorf("Kubelet is trying to update pod condition %q for pod %q. "+ "But it is not owned by kubelet.", string(c.Type), format.Pod(pod)) } } @@ -196,13 +196,13 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai pod, ok := m.podManager.GetPodByUID(podUID) if !ok { - glog.V(4).Infof("Pod %q has been deleted, no need to update readiness", string(podUID)) + klog.V(4).Infof("Pod %q has been deleted, no need to update readiness", string(podUID)) return } oldStatus, found := m.podStatuses[pod.UID] if !found { - glog.Warningf("Container readiness changed before pod has synced: %q - %q", + klog.Warningf("Container readiness changed before pod has synced: %q - %q", format.Pod(pod), containerID.String()) return } @@ -210,13 +210,13 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai // Find the container to update. containerStatus, _, ok := findContainerStatus(&oldStatus.status, containerID.String()) if !ok { - glog.Warningf("Container readiness changed for unknown container: %q - %q", + klog.Warningf("Container readiness changed for unknown container: %q - %q", format.Pod(pod), containerID.String()) return } if containerStatus.Ready == ready { - glog.V(4).Infof("Container readiness unchanged (%v): %q - %q", ready, + klog.V(4).Infof("Container readiness unchanged (%v): %q - %q", ready, format.Pod(pod), containerID.String()) return } @@ -238,7 +238,7 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai if conditionIndex != -1 { status.Conditions[conditionIndex] = condition } else { - glog.Warningf("PodStatus missing %s type condition: %+v", conditionType, status) + klog.Warningf("PodStatus missing %s type condition: %+v", conditionType, status) status.Conditions = append(status.Conditions, condition) } } @@ -328,14 +328,17 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp // Check for illegal state transition in containers if err := checkContainerStateTransition(oldStatus.ContainerStatuses, status.ContainerStatuses, pod.Spec.RestartPolicy); err != nil { - glog.Errorf("Status update on pod %v/%v aborted: %v", pod.Namespace, pod.Name, err) + klog.Errorf("Status update on pod %v/%v aborted: %v", pod.Namespace, pod.Name, err) return false } if err := checkContainerStateTransition(oldStatus.InitContainerStatuses, status.InitContainerStatuses, pod.Spec.RestartPolicy); err != nil { - glog.Errorf("Status update on pod %v/%v aborted: %v", pod.Namespace, pod.Name, err) + klog.Errorf("Status update on pod %v/%v aborted: %v", pod.Namespace, pod.Name, err) return false } + // Set ContainersReadyCondition.LastTransitionTime. + updateLastTransitionTime(&status, &oldStatus, v1.ContainersReady) + // Set ReadyCondition.LastTransitionTime. updateLastTransitionTime(&status, &oldStatus, v1.PodReady) @@ -358,7 +361,7 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp // The intent here is to prevent concurrent updates to a pod's status from // clobbering each other so the phase of a pod progresses monotonically. if isCached && isPodStatusByKubeletEqual(&cachedStatus.status, &status) && !forceUpdate { - glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status) + klog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status) return false // No new status. } @@ -372,13 +375,13 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp select { case m.podStatusChannel <- podStatusSyncRequest{pod.UID, newStatus}: - glog.V(5).Infof("Status Manager: adding pod: %q, with status: (%q, %v) to podStatusChannel", + klog.V(5).Infof("Status Manager: adding pod: %q, with status: (%q, %v) to podStatusChannel", pod.UID, newStatus.version, newStatus.status) return true default: // Let the periodic syncBatch handle the update if the channel is full. // We can't block, since we hold the mutex lock. - glog.V(4).Infof("Skipping the status update for pod %q for now because the channel is full; status: %+v", + klog.V(4).Infof("Skipping the status update for pod %q for now because the channel is full; status: %+v", format.Pod(pod), status) return false } @@ -412,7 +415,7 @@ func (m *manager) RemoveOrphanedStatuses(podUIDs map[types.UID]bool) { defer m.podStatusesLock.Unlock() for key := range m.podStatuses { if _, ok := podUIDs[key]; !ok { - glog.V(5).Infof("Removing %q from status map.", key) + klog.V(5).Infof("Removing %q from status map.", key) delete(m.podStatuses, key) } } @@ -439,7 +442,7 @@ func (m *manager) syncBatch() { syncedUID := kubetypes.MirrorPodUID(uid) if mirrorUID, ok := podToMirror[kubetypes.ResolvedPodUID(uid)]; ok { if mirrorUID == "" { - glog.V(5).Infof("Static pod %q (%s/%s) does not have a corresponding mirror pod; skipping", uid, status.podName, status.podNamespace) + klog.V(5).Infof("Static pod %q (%s/%s) does not have a corresponding mirror pod; skipping", uid, status.podName, status.podNamespace) continue } syncedUID = mirrorUID @@ -458,7 +461,7 @@ func (m *manager) syncBatch() { }() for _, update := range updatedStatuses { - glog.V(5).Infof("Status Manager: syncPod in syncbatch. pod UID: %q", update.podUID) + klog.V(5).Infof("Status Manager: syncPod in syncbatch. pod UID: %q", update.podUID) m.syncPod(update.podUID, update.status) } } @@ -466,41 +469,41 @@ func (m *manager) syncBatch() { // syncPod syncs the given status with the API server. The caller must not hold the lock. func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { if !m.needsUpdate(uid, status) { - glog.V(1).Infof("Status for pod %q is up-to-date; skipping", uid) + klog.V(1).Infof("Status for pod %q is up-to-date; skipping", uid) return } // TODO: make me easier to express from client code pod, err := m.kubeClient.CoreV1().Pods(status.podNamespace).Get(status.podName, metav1.GetOptions{}) if errors.IsNotFound(err) { - glog.V(3).Infof("Pod %q (%s) does not exist on the server", status.podName, uid) + klog.V(3).Infof("Pod %q (%s) does not exist on the server", status.podName, uid) // If the Pod is deleted the status will be cleared in // RemoveOrphanedStatuses, so we just ignore the update here. return } if err != nil { - glog.Warningf("Failed to get status for pod %q: %v", format.PodDesc(status.podName, status.podNamespace, uid), err) + klog.Warningf("Failed to get status for pod %q: %v", format.PodDesc(status.podName, status.podNamespace, uid), err) return } translatedUID := m.podManager.TranslatePodUID(pod.UID) // Type convert original uid just for the purpose of comparison. if len(translatedUID) > 0 && translatedUID != kubetypes.ResolvedPodUID(uid) { - glog.V(2).Infof("Pod %q was deleted and then recreated, skipping status update; old UID %q, new UID %q", format.Pod(pod), uid, translatedUID) + klog.V(2).Infof("Pod %q was deleted and then recreated, skipping status update; old UID %q, new UID %q", format.Pod(pod), uid, translatedUID) m.deletePodStatus(uid) return } oldStatus := pod.Status.DeepCopy() newPod, patchBytes, err := statusutil.PatchPodStatus(m.kubeClient, pod.Namespace, pod.Name, *oldStatus, mergePodStatus(*oldStatus, status.status)) - glog.V(3).Infof("Patch status for pod %q with %q", format.Pod(pod), patchBytes) + klog.V(3).Infof("Patch status for pod %q with %q", format.Pod(pod), patchBytes) if err != nil { - glog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err) + klog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err) return } pod = newPod - glog.V(3).Infof("Status for pod %q updated successfully: (%d, %+v)", format.Pod(pod), status.version, status.status) + klog.V(3).Infof("Status for pod %q updated successfully: (%d, %+v)", format.Pod(pod), status.version, status.status) m.apiStatusVersions[kubetypes.MirrorPodUID(pod.UID)] = status.version // We don't handle graceful deletion of mirror pods. @@ -510,10 +513,10 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod.UID)) err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, deleteOptions) if err != nil { - glog.Warningf("Failed to delete status for pod %q: %v", format.Pod(pod), err) + klog.Warningf("Failed to delete status for pod %q: %v", format.Pod(pod), err) return } - glog.V(3).Infof("Pod %q fully terminated and removed from etcd", format.Pod(pod)) + klog.V(3).Infof("Pod %q fully terminated and removed from etcd", format.Pod(pod)) m.deletePodStatus(uid) } } @@ -552,14 +555,14 @@ func (m *manager) needsReconcile(uid types.UID, status v1.PodStatus) bool { // The pod could be a static pod, so we should translate first. pod, ok := m.podManager.GetPodByUID(uid) if !ok { - glog.V(4).Infof("Pod %q has been deleted, no need to reconcile", string(uid)) + klog.V(4).Infof("Pod %q has been deleted, no need to reconcile", string(uid)) return false } // If the pod is a static pod, we should check its mirror pod, because only status in mirror pod is meaningful to us. if kubepod.IsStaticPod(pod) { mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod) if !ok { - glog.V(4).Infof("Static pod %q has no corresponding mirror pod, no need to reconcile", format.Pod(pod)) + klog.V(4).Infof("Static pod %q has no corresponding mirror pod, no need to reconcile", format.Pod(pod)) return false } pod = mirrorPod @@ -573,7 +576,7 @@ func (m *manager) needsReconcile(uid types.UID, status v1.PodStatus) bool { // reconcile is not needed. Just return. return false } - glog.V(3).Infof("Pod status is inconsistent with cached status for pod %q, a reconciliation should be triggered:\n %+v", format.Pod(pod), + klog.V(3).Infof("Pod status is inconsistent with cached status for pod %q, a reconciliation should be triggered:\n %+v", format.Pod(pod), diff.ObjectDiff(podStatus, status)) return true diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/BUILD index df60c72f5246..9f7d4db7caa9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/BUILD @@ -21,10 +21,11 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/api/authentication/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -35,6 +36,7 @@ go_test( deps = [ "//staging/src/k8s.io/api/authentication/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/OWNERS index 33904f77888e..d914c0d7195a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/OWNERS @@ -1,6 +1,7 @@ approvers: -- mikedanese +- sig-auth-serviceaccounts-approvers reviewers: -- mikedanese -- awly -- tallclair +- sig-auth-serviceaccounts-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/token_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/token_manager.go index d081bdd149d6..76819ac72df9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/token_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/token/token_manager.go @@ -24,11 +24,12 @@ import ( "sync" "time" - "github.com/golang/glog" authenticationv1 "k8s.io/api/authentication/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" ) const ( @@ -74,6 +75,7 @@ type Manager struct { // * If refresh fails and the old token is no longer valid, return an error func (m *Manager) GetServiceAccountToken(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { key := keyFunc(name, namespace, tr) + ctr, ok := m.get(key) if ok && !m.requiresRefresh(ctr) { @@ -88,7 +90,7 @@ func (m *Manager) GetServiceAccountToken(namespace, name string, tr *authenticat case m.expired(ctr): return nil, fmt.Errorf("token %s expired and refresh failed: %v", key, err) default: - glog.Errorf("couldn't update token %s: %v", key, err) + klog.Errorf("couldn't update token %s: %v", key, err) return ctr, nil } } @@ -97,6 +99,18 @@ func (m *Manager) GetServiceAccountToken(namespace, name string, tr *authenticat return tr, nil } +// DeleteServiceAccountToken should be invoked when pod got deleted. It simply +// clean token manager cache. +func (m *Manager) DeleteServiceAccountToken(podUID types.UID) { + m.cacheMutex.Lock() + defer m.cacheMutex.Unlock() + for k, tr := range m.cache { + if tr.Spec.BoundObjectRef.UID == podUID { + delete(m.cache, k) + } + } +} + func (m *Manager) cleanup() { m.cacheMutex.Lock() defer m.cacheMutex.Unlock() @@ -128,7 +142,7 @@ func (m *Manager) expired(t *authenticationv1.TokenRequest) bool { // ttl, or if the token is older than 24 hours. func (m *Manager) requiresRefresh(tr *authenticationv1.TokenRequest) bool { if tr.Spec.ExpirationSeconds == nil { - glog.Errorf("expiration seconds was nil for tr: %#v", tr) + klog.Errorf("expiration seconds was nil for tr: %#v", tr) return false } now := m.clock.Now() @@ -147,5 +161,15 @@ func (m *Manager) requiresRefresh(tr *authenticationv1.TokenRequest) bool { // keys should be nonconfidential and safe to log func keyFunc(name, namespace string, tr *authenticationv1.TokenRequest) string { - return fmt.Sprintf("%q/%q/%#v", name, namespace, tr.Spec) + var exp int64 + if tr.Spec.ExpirationSeconds != nil { + exp = *tr.Spec.ExpirationSeconds + } + + var ref authenticationv1.BoundObjectReference + if tr.Spec.BoundObjectRef != nil { + ref = *tr.Spec.BoundObjectRef + } + + return fmt.Sprintf("%q/%q/%#v/%#v/%#v", name, namespace, tr.Spec.Audiences, exp, ref) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD index bc62f58d673e..b0fceedbbbd8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD @@ -32,15 +32,18 @@ go_test( name = "go_default_test", srcs = [ "labels_test.go", + "main_test.go", "pod_status_test.go", "pod_update_test.go", "types_test.go", ], embed = [":go_default_library"], deps = [ + "//pkg/features:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go index 062d10e1230e..bdc5ee7944e5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go @@ -159,13 +159,12 @@ func IsCriticalPod(pod *v1.Pod) bool { return false } -// Preemptable returns true if preemptor pod can preempt preemptee pod: -// - If preemptor's is greater than preemptee's priority, it's preemptable (return true) -// - If preemptor (or its priority) is nil and preemptee bears the critical pod annotation key, -// preemptee can not be preempted (return false) -// - If preemptor (or its priority) is nil and preemptee's priority is greater than or equal to -// SystemCriticalPriority, preemptee can not be preempted (return false) +// Preemptable returns true if preemptor pod can preempt preemptee pod +// if preemptee is not critical or if preemptor's priority is greater than preemptee's priority func Preemptable(preemptor, preemptee *v1.Pod) bool { + if IsCriticalPod(preemptor) && !IsCriticalPod(preemptee) { + return true + } if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) { if (preemptor != nil && preemptor.Spec.Priority != nil) && (preemptee != nil && preemptee.Spec.Priority != nil) { @@ -173,7 +172,7 @@ func Preemptable(preemptor, preemptee *v1.Pod) bool { } } - return !IsCriticalPod(preemptee) + return false } // IsCritical returns true if parameters bear the critical pod annotation diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD index d25898d22c2c..4165864e0a30 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD @@ -8,11 +8,27 @@ load( go_test( name = "go_default_test", - srcs = ["util_test.go"], - embed = [":go_default_library"], - deps = [ - "//vendor/github.com/stretchr/testify/assert:go_default_library", + srcs = [ + "util_unix_test.go", + "util_windows_test.go", ], + embed = [":go_default_library"], + deps = select({ + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + ], + "//conditions:default": [], + }), ) go_library( @@ -29,16 +45,19 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/github.com/Microsoft/go-winio:go_default_library", ], "//conditions:default": [], }), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/BUILD index 7b887b444f56..5f4e61e5e54a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/BUILD @@ -1,10 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -12,18 +6,31 @@ go_library( "example_handler.go", "example_plugin.go", "plugin_watcher.go", + "types.go", ], importpath = "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher", + visibility = ["//visibility:public"], deps = [ - "//pkg/kubelet/apis/pluginregistration/v1alpha1:go_default_library", + "//pkg/kubelet/apis/pluginregistration/v1:go_default_library", "//pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1:go_default_library", "//pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2:go_default_library", "//pkg/util/filesystem:go_default_library", "//vendor/github.com/fsnotify/fsnotify:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["plugin_watcher_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/kubelet/apis/pluginregistration/v1:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -44,14 +51,3 @@ filegroup( tags = ["automanaged"], visibility = ["//visibility:public"], ) - -go_test( - name = "go_default_test", - srcs = ["plugin_watcher_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/kubelet/apis/pluginregistration/v1alpha1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/stretchr/testify/require:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/README b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/README deleted file mode 100644 index c8b6cc284401..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/README +++ /dev/null @@ -1,34 +0,0 @@ -This folder contains a utility, pluginwatcher, for Kubelet to register -different types of node-level plugins such as device plugins or CSI plugins. -It discovers plugins by monitoring inotify events under the directory returned by -kubelet.getPluginsDir(). Lets refer this directory as PluginsSockDir. -For any discovered plugin, pluginwatcher issues Registration.GetInfo grpc call -to get plugin type, name and supported service API versions. For any registered plugin type, -pluginwatcher calls the registered callback function with the received plugin -name, supported service API versions, and the full socket path. The Kubelet -component that receives this callback can acknowledge or reject the plugin -according to its own logic, and use the socket path to establish its service -communication with any API version supported by the plugin. - -Here are the general rules that Kubelet plugin developers should follow: -- Run as 'root' user. Currently creating socket under PluginsSockDir, a root owned directory, requires - plugin process to be running as 'root'. - -- Implements the Registration service specified in - pkg/kubelet/apis/pluginregistration/v*/api.proto. - -- The plugin name sent during Registration.GetInfo grpc should be unique - for the given plugin type (CSIPlugin or DevicePlugin). - -- The socket path needs to be unique within one directory, in normal case, - each plugin type has its own sub directory, but the design does support socket file - under any sub directory of PluginSockDir. - -- A plugin should clean up its own socket upon exiting or when a new instance - comes up. A plugin should NOT remove any sockets belonging to other plugins. - -- A plugin should make sure it has service ready for any supported service API - version listed in the PluginInfo. - -- For an example plugin implementation, take a look at example_plugin.go - included in this directory. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/README.md b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/README.md new file mode 100644 index 000000000000..1b68c9c50799 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/README.md @@ -0,0 +1,79 @@ +# Plugin Registration Service + +This folder contains a utility, pluginwatcher, for Kubelet to register +different types of node-level plugins such as device plugins or CSI plugins. +It discovers plugins by monitoring inotify events under the directory returned by +kubelet.getPluginsDir(). We will refer to this directory as PluginsDir. + +Plugins are expected to implement the gRPC registration service specified in +pkg/kubelet/apis/pluginregistration/v*/api.proto. + +## Plugin Discovery + +The pluginwatcher service will discover plugins in the PluginDir when they +place a socket in that directory or, at Kubelet start if the socket is already +there. + +This socket filename should not start with a '.' as it will be ignored. + + +## gRPC Service Lifecycle + +For any discovered plugin, kubelet will issue a Registration.GetInfo gRPC call +to get plugin type, name, endpoint and supported service API versions. + +Kubelet will then go through a plugin initialization phase where it will issue +Plugin specific calls (e.g: DevicePlugin::GetDevicePluginOptions). + +Once Kubelet determines that it is ready to use your plugin it will issue a +Registration.NotifyRegistrationStatus gRPC call. + +If the plugin removes its socket from the PluginDir this will be interpreted +as a plugin Deregistration + + +## gRPC Service Overview + +Here are the general rules that Kubelet plugin developers should follow: +- Run plugin as 'root' user. Currently creating socket under PluginsDir, a root owned + directory, requires plugin process to be running as 'root'. + +- The plugin name sent during Registration.GetInfo grpc should be unique + for the given plugin type (CSIPlugin or DevicePlugin). + +- The socket path needs to be unique within one directory, in normal case, + each plugin type has its own sub directory, but the design does support socket file + under any sub directory of PluginSockDir. + +- A plugin should clean up its own socket upon exiting or when a new instance + comes up. A plugin should NOT remove any sockets belonging to other plugins. + +- A plugin should make sure it has service ready for any supported service API + version listed in the PluginInfo. + +- For an example plugin implementation, take a look at example_plugin.go + included in this directory. + + +# Kubelet Interface + +For any kubelet components using the pluginwatcher module, you will need to +implement the PluginHandler interface defined in the types.go file. + +The interface is documented and the implementations are registered with the +pluginwatcher module in kubelet.go by calling AddHandler(pluginType, handler). + + +The lifecycle follows a simple state machine: + + Validate -> Register -> DeRegister + ^ + + | | + +--------- + + +The pluginwatcher calls the functions with the received plugin name, supported +service API versions and the endpoint to call the plugin on. + +The Kubelet component that receives this callback can acknowledge or reject +the plugin according to its own logic, and use the socket path to establish +its service communication with any API version supported by the plugin. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_handler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_handler.go index 4eae4188d694..d4fa2f5a0299 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_handler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_handler.go @@ -24,47 +24,75 @@ import ( "time" "golang.org/x/net/context" + "k8s.io/klog" v1beta1 "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1" v1beta2 "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2" ) type exampleHandler struct { - registeredPlugins map[string]struct{} - mutex sync.Mutex - chanForHandlerAckErrors chan error // for testing + SupportedVersions []string + ExpectedNames map[string]int + + eventChans map[string]chan examplePluginEvent // map[pluginName]eventChan + + m sync.Mutex + count int + + permitDeprecatedDir bool } +type examplePluginEvent int + +const ( + exampleEventValidate examplePluginEvent = 0 + exampleEventRegister examplePluginEvent = 1 + exampleEventDeRegister examplePluginEvent = 2 + exampleEventError examplePluginEvent = 3 +) + // NewExampleHandler provide a example handler -func NewExampleHandler() *exampleHandler { +func NewExampleHandler(supportedVersions []string, permitDeprecatedDir bool) *exampleHandler { return &exampleHandler{ - chanForHandlerAckErrors: make(chan error), - registeredPlugins: make(map[string]struct{}), + SupportedVersions: supportedVersions, + ExpectedNames: make(map[string]int), + + eventChans: make(map[string]chan examplePluginEvent), + permitDeprecatedDir: permitDeprecatedDir, } } -func (h *exampleHandler) Cleanup() error { - h.mutex.Lock() - defer h.mutex.Unlock() - h.registeredPlugins = make(map[string]struct{}) - return nil -} +func (p *exampleHandler) ValidatePlugin(pluginName string, endpoint string, versions []string, foundInDeprecatedDir bool) error { + if foundInDeprecatedDir && !p.permitDeprecatedDir { + return fmt.Errorf("device plugin socket was found in a directory that is no longer supported and this test does not permit plugins from deprecated dir") + } + + p.SendEvent(pluginName, exampleEventValidate) -func (h *exampleHandler) Handler(pluginName string, endpoint string, versions []string, sockPath string) (chan bool, error) { + n, ok := p.DecreasePluginCount(pluginName) + if !ok && n > 0 { + return fmt.Errorf("pluginName('%s') wasn't expected (count is %d)", pluginName, n) + } - // check for supported versions - if !reflect.DeepEqual([]string{"v1beta1", "v1beta2"}, versions) { - return nil, fmt.Errorf("not the supported versions: %s", versions) + if !reflect.DeepEqual(versions, p.SupportedVersions) { + return fmt.Errorf("versions('%v') != supported versions('%v')", versions, p.SupportedVersions) } // this handler expects non-empty endpoint as an example if len(endpoint) == 0 { - return nil, errors.New("expecting non empty endpoint") + return errors.New("expecting non empty endpoint") } - _, conn, err := dial(sockPath) + return nil +} + +func (p *exampleHandler) RegisterPlugin(pluginName, endpoint string, versions []string) error { + p.SendEvent(pluginName, exampleEventRegister) + + // Verifies the grpcServer is ready to serve services. + _, conn, err := dial(endpoint, time.Second) if err != nil { - return nil, err + return fmt.Errorf("Failed dialing endpoint (%s): %v", endpoint, err) } defer conn.Close() @@ -73,33 +101,54 @@ func (h *exampleHandler) Handler(pluginName string, endpoint string, versions [] v1beta2Client := v1beta2.NewExampleClient(conn) // Tests v1beta1 GetExampleInfo - if _, err = v1beta1Client.GetExampleInfo(context.Background(), &v1beta1.ExampleRequest{}); err != nil { - return nil, err + _, err = v1beta1Client.GetExampleInfo(context.Background(), &v1beta1.ExampleRequest{}) + if err != nil { + return fmt.Errorf("Failed GetExampleInfo for v1beta2Client(%s): %v", endpoint, err) } - // Tests v1beta2 GetExampleInfo - if _, err = v1beta2Client.GetExampleInfo(context.Background(), &v1beta2.ExampleRequest{}); err != nil { - return nil, err + // Tests v1beta1 GetExampleInfo + _, err = v1beta2Client.GetExampleInfo(context.Background(), &v1beta2.ExampleRequest{}) + if err != nil { + return fmt.Errorf("Failed GetExampleInfo for v1beta2Client(%s): %v", endpoint, err) } - // handle registered plugin - h.mutex.Lock() - if _, exist := h.registeredPlugins[pluginName]; exist { - h.mutex.Unlock() - return nil, fmt.Errorf("plugin %s already registered", pluginName) + return nil +} + +func (p *exampleHandler) DeRegisterPlugin(pluginName string) { + p.SendEvent(pluginName, exampleEventDeRegister) +} + +func (p *exampleHandler) EventChan(pluginName string) chan examplePluginEvent { + return p.eventChans[pluginName] +} + +func (p *exampleHandler) SendEvent(pluginName string, event examplePluginEvent) { + klog.V(2).Infof("Sending %v for plugin %s over chan %v", event, pluginName, p.eventChans[pluginName]) + p.eventChans[pluginName] <- event +} + +func (p *exampleHandler) AddPluginName(pluginName string) { + p.m.Lock() + defer p.m.Unlock() + + v, ok := p.ExpectedNames[pluginName] + if !ok { + p.eventChans[pluginName] = make(chan examplePluginEvent) + v = 1 + } + + p.ExpectedNames[pluginName] = v +} + +func (p *exampleHandler) DecreasePluginCount(pluginName string) (old int, ok bool) { + p.m.Lock() + defer p.m.Unlock() + + v, ok := p.ExpectedNames[pluginName] + if !ok { + v = -1 } - h.registeredPlugins[pluginName] = struct{}{} - h.mutex.Unlock() - - chanForAckOfNotification := make(chan bool) - go func() { - select { - case <-chanForAckOfNotification: - // TODO: handle the negative scenario - close(chanForAckOfNotification) - case <-time.After(time.Second): - h.chanForHandlerAckErrors <- errors.New("Timed out while waiting for notification ack") - } - }() - return chanForAckOfNotification, nil + + return v, ok } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin.go index 5c2dd966ba4b..d55130ef0bcb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin.go @@ -18,15 +18,17 @@ package pluginwatcher import ( "errors" + "fmt" "net" + "os" "sync" "time" - "github.com/golang/glog" "golang.org/x/net/context" "google.golang.org/grpc" + "k8s.io/klog" - registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1" + registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1" v1beta1 "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1" v1beta2 "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2" ) @@ -39,6 +41,7 @@ type examplePlugin struct { endpoint string // for testing pluginName string pluginType string + versions []string } type pluginServiceV1Beta1 struct { @@ -46,7 +49,7 @@ type pluginServiceV1Beta1 struct { } func (s *pluginServiceV1Beta1) GetExampleInfo(ctx context.Context, rqt *v1beta1.ExampleRequest) (*v1beta1.ExampleResponse, error) { - glog.Infof("GetExampleInfo v1beta1field: %s", rqt.V1Beta1Field) + klog.Infof("GetExampleInfo v1beta1field: %s", rqt.V1Beta1Field) return &v1beta1.ExampleResponse{}, nil } @@ -59,7 +62,7 @@ type pluginServiceV1Beta2 struct { } func (s *pluginServiceV1Beta2) GetExampleInfo(ctx context.Context, rqt *v1beta2.ExampleRequest) (*v1beta2.ExampleResponse, error) { - glog.Infof("GetExampleInfo v1beta2_field: %s", rqt.V1Beta2Field) + klog.Infof("GetExampleInfo v1beta2_field: %s", rqt.V1Beta2Field) return &v1beta2.ExampleResponse{}, nil } @@ -73,12 +76,13 @@ func NewExamplePlugin() *examplePlugin { } // NewTestExamplePlugin returns an initialized examplePlugin instance for testing -func NewTestExamplePlugin(pluginName string, pluginType string, endpoint string) *examplePlugin { +func NewTestExamplePlugin(pluginName string, pluginType string, endpoint string, advertisedVersions ...string) *examplePlugin { return &examplePlugin{ pluginName: pluginName, pluginType: pluginType, - registrationStatus: make(chan registerapi.RegistrationStatus), endpoint: endpoint, + versions: advertisedVersions, + registrationStatus: make(chan registerapi.RegistrationStatus), } } @@ -88,36 +92,48 @@ func (e *examplePlugin) GetInfo(ctx context.Context, req *registerapi.InfoReques Type: e.pluginType, Name: e.pluginName, Endpoint: e.endpoint, - SupportedVersions: []string{"v1beta1", "v1beta2"}, + SupportedVersions: e.versions, }, nil } func (e *examplePlugin) NotifyRegistrationStatus(ctx context.Context, status *registerapi.RegistrationStatus) (*registerapi.RegistrationStatusResponse, error) { + klog.Errorf("Registration is: %v\n", status) + if e.registrationStatus != nil { e.registrationStatus <- *status } - if !status.PluginRegistered { - glog.Errorf("Registration failed: %s\n", status.Error) - } + return ®isterapi.RegistrationStatusResponse{}, nil } -// Serve starts example plugin grpc server -func (e *examplePlugin) Serve(socketPath string) error { - glog.Infof("starting example server at: %s\n", socketPath) - lis, err := net.Listen("unix", socketPath) +// Serve starts a pluginwatcher server and one or more of the plugin services +func (e *examplePlugin) Serve(services ...string) error { + klog.Infof("starting example server at: %s\n", e.endpoint) + lis, err := net.Listen("unix", e.endpoint) if err != nil { return err } - glog.Infof("example server started at: %s\n", socketPath) + + klog.Infof("example server started at: %s\n", e.endpoint) e.grpcServer = grpc.NewServer() + // Registers kubelet plugin watcher api. registerapi.RegisterRegistrationServer(e.grpcServer, e) - // Registers services for both v1beta1 and v1beta2 versions. - v1beta1 := &pluginServiceV1Beta1{server: e} - v1beta1.RegisterService() - v1beta2 := &pluginServiceV1Beta2{server: e} - v1beta2.RegisterService() + + for _, service := range services { + switch service { + case "v1beta1": + v1beta1 := &pluginServiceV1Beta1{server: e} + v1beta1.RegisterService() + break + case "v1beta2": + v1beta2 := &pluginServiceV1Beta2{server: e} + v1beta2.RegisterService() + break + default: + return fmt.Errorf("Unsupported service: '%s'", service) + } + } // Starts service e.wg.Add(1) @@ -125,25 +141,33 @@ func (e *examplePlugin) Serve(socketPath string) error { defer e.wg.Done() // Blocking call to accept incoming connections. if err := e.grpcServer.Serve(lis); err != nil { - glog.Errorf("example server stopped serving: %v", err) + klog.Errorf("example server stopped serving: %v", err) } }() + return nil } func (e *examplePlugin) Stop() error { - glog.Infof("Stopping example server\n") + klog.Infof("Stopping example server at: %s\n", e.endpoint) + e.grpcServer.Stop() c := make(chan struct{}) go func() { defer close(c) e.wg.Wait() }() + select { case <-c: - return nil + break case <-time.After(time.Second): - glog.Errorf("Timed out on waiting for stop completion") return errors.New("Timed out on waiting for stop completion") } + + if err := os.Remove(e.endpoint); err != nil && !os.IsNotExist(err) { + return err + } + + return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.pb.go index 671e3df493b5..296dc91fc461 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: api.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -225,24 +224,6 @@ func (m *ExampleResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Api(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Api(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintApi(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.pb.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.pb.go index 0c63b31429f2..73f36b7bef02 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.pb.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: api.proto -// DO NOT EDIT! /* Package v1beta2 is a generated protocol buffer package. @@ -226,24 +225,6 @@ func (m *ExampleResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Api(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Api(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintApi(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/plugin_watcher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/plugin_watcher.go index 6db743dd4fbe..9116222caeb8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/plugin_watcher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/plugin_watcher.go @@ -20,51 +20,166 @@ import ( "fmt" "net" "os" + "path/filepath" + "strings" "sync" "time" "github.com/fsnotify/fsnotify" - "github.com/golang/glog" "github.com/pkg/errors" "golang.org/x/net/context" "google.golang.org/grpc" - registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1" + "k8s.io/klog" + + registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1" utilfs "k8s.io/kubernetes/pkg/util/filesystem" ) -// RegisterCallbackFn is the type of the callback function that handlers will provide -type RegisterCallbackFn func(pluginName string, endpoint string, versions []string, socketPath string) (chan bool, error) - // Watcher is the plugin watcher type Watcher struct { - path string - handlers map[string]RegisterCallbackFn - stopCh chan interface{} - fs utilfs.Filesystem - fsWatcher *fsnotify.Watcher - wg sync.WaitGroup - mutex sync.Mutex + path string + deprecatedPath string + stopCh chan interface{} + fs utilfs.Filesystem + fsWatcher *fsnotify.Watcher + wg sync.WaitGroup + + mutex sync.Mutex + handlers map[string]PluginHandler + plugins map[string]pathInfo + pluginsPool map[string]map[string]*sync.Mutex // map[pluginType][pluginName] +} + +type pathInfo struct { + pluginType string + pluginName string } // NewWatcher provides a new watcher -func NewWatcher(sockDir string) Watcher { - return Watcher{ - path: sockDir, - handlers: make(map[string]RegisterCallbackFn), - fs: &utilfs.DefaultFs{}, +// deprecatedSockDir refers to a pre-GA directory that was used by older plugins +// for socket registration. New plugins should not use this directory. +func NewWatcher(sockDir string, deprecatedSockDir string) *Watcher { + return &Watcher{ + path: sockDir, + deprecatedPath: deprecatedSockDir, + fs: &utilfs.DefaultFs{}, + + handlers: make(map[string]PluginHandler), + plugins: make(map[string]pathInfo), + pluginsPool: make(map[string]map[string]*sync.Mutex), } } -// AddHandler registers a callback to be invoked for a particular type of plugin -func (w *Watcher) AddHandler(pluginType string, handlerCbkFn RegisterCallbackFn) { +func (w *Watcher) AddHandler(pluginType string, handler PluginHandler) { + w.mutex.Lock() + defer w.mutex.Unlock() + + w.handlers[pluginType] = handler +} + +func (w *Watcher) getHandler(pluginType string) (PluginHandler, bool) { w.mutex.Lock() defer w.mutex.Unlock() - w.handlers[pluginType] = handlerCbkFn + + h, ok := w.handlers[pluginType] + return h, ok } -// Creates the plugin directory, if it doesn't already exist. -func (w *Watcher) createPluginDir() error { - glog.V(4).Infof("Ensuring Plugin directory at %s ", w.path) +// Start watches for the creation of plugin sockets at the path +func (w *Watcher) Start() error { + klog.V(2).Infof("Plugin Watcher Start at %s", w.path) + w.stopCh = make(chan interface{}) + + // Creating the directory to be watched if it doesn't exist yet, + // and walks through the directory to discover the existing plugins. + if err := w.init(); err != nil { + return err + } + + fsWatcher, err := fsnotify.NewWatcher() + if err != nil { + return fmt.Errorf("failed to start plugin fsWatcher, err: %v", err) + } + w.fsWatcher = fsWatcher + + w.wg.Add(1) + go func(fsWatcher *fsnotify.Watcher) { + defer w.wg.Done() + for { + select { + case event := <-fsWatcher.Events: + //TODO: Handle errors by taking corrective measures + + w.wg.Add(1) + go func() { + defer w.wg.Done() + + if event.Op&fsnotify.Create == fsnotify.Create { + err := w.handleCreateEvent(event) + if err != nil { + klog.Errorf("error %v when handling create event: %s", err, event) + } + } else if event.Op&fsnotify.Remove == fsnotify.Remove { + err := w.handleDeleteEvent(event) + if err != nil { + klog.Errorf("error %v when handling delete event: %s", err, event) + } + } + return + }() + continue + case err := <-fsWatcher.Errors: + if err != nil { + klog.Errorf("fsWatcher received error: %v", err) + } + continue + case <-w.stopCh: + return + } + } + }(fsWatcher) + + // Traverse plugin dir after starting the plugin processing goroutine + if err := w.traversePluginDir(w.path); err != nil { + w.Stop() + return fmt.Errorf("failed to traverse plugin socket path %q, err: %v", w.path, err) + } + + // Traverse deprecated plugin dir, if specified. + if len(w.deprecatedPath) != 0 { + if err := w.traversePluginDir(w.deprecatedPath); err != nil { + w.Stop() + return fmt.Errorf("failed to traverse deprecated plugin socket path %q, err: %v", w.deprecatedPath, err) + } + } + + return nil +} + +// Stop stops probing the creation of plugin sockets at the path +func (w *Watcher) Stop() error { + close(w.stopCh) + + c := make(chan struct{}) + go func() { + defer close(c) + w.wg.Wait() + }() + + select { + case <-c: + case <-time.After(11 * time.Second): + return fmt.Errorf("timeout on stopping watcher") + } + + w.fsWatcher.Close() + + return nil +} + +func (w *Watcher) init() error { + klog.V(4).Infof("Ensuring Plugin directory at %s ", w.path) + if err := w.fs.MkdirAll(w.path, 0755); err != nil { return fmt.Errorf("error (re-)creating root %s: %v", w.path, err) } @@ -73,40 +188,80 @@ func (w *Watcher) createPluginDir() error { } // Walks through the plugin directory discover any existing plugin sockets. +// Goroutines started here will be waited for in Stop() before cleaning up. +// Ignore all errors except root dir not being walkable func (w *Watcher) traversePluginDir(dir string) error { return w.fs.Walk(dir, func(path string, info os.FileInfo, err error) error { if err != nil { - return fmt.Errorf("error accessing path: %s error: %v", path, err) + if path == dir { + return fmt.Errorf("error accessing path: %s error: %v", path, err) + } + + klog.Errorf("error accessing path: %s error: %v", path, err) + return nil } switch mode := info.Mode(); { case mode.IsDir(): + if w.containsBlacklistedDir(path) { + return filepath.SkipDir + } + if err := w.fsWatcher.Add(path); err != nil { return fmt.Errorf("failed to watch %s, err: %v", path, err) } case mode&os.ModeSocket != 0: + w.wg.Add(1) go func() { + defer w.wg.Done() w.fsWatcher.Events <- fsnotify.Event{ Name: path, Op: fsnotify.Create, } }() + default: + klog.V(5).Infof("Ignoring file %s with mode %v", path, mode) } return nil }) } -func (w *Watcher) init() error { - if err := w.createPluginDir(); err != nil { - return err +// Handle filesystem notify event. +// Files names: +// - MUST NOT start with a '.' +func (w *Watcher) handleCreateEvent(event fsnotify.Event) error { + klog.V(6).Infof("Handling create event: %v", event) + + if w.containsBlacklistedDir(event.Name) { + return nil } - return nil + + fi, err := os.Stat(event.Name) + if err != nil { + return fmt.Errorf("stat file %s failed: %v", event.Name, err) + } + + if strings.HasPrefix(fi.Name(), ".") { + klog.V(5).Infof("Ignoring file (starts with '.'): %s", fi.Name()) + return nil + } + + if !fi.IsDir() { + if fi.Mode()&os.ModeSocket == 0 { + klog.V(5).Infof("Ignoring non socket file %s", fi.Name()) + return nil + } + + return w.handlePluginRegistration(event.Name) + } + + return w.traversePluginDir(event.Name) } -func (w *Watcher) registerPlugin(socketPath string) error { +func (w *Watcher) handlePluginRegistration(socketPath string) error { //TODO: Implement rate limiting to mitigate any DOS kind of attacks. - client, conn, err := dial(socketPath) + client, conn, err := dial(socketPath, 10*time.Second) if err != nil { return fmt.Errorf("dial failed at socket %s, err: %v", socketPath, err) } @@ -114,154 +269,166 @@ func (w *Watcher) registerPlugin(socketPath string) error { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() + infoResp, err := client.GetInfo(ctx, ®isterapi.InfoRequest{}) if err != nil { return fmt.Errorf("failed to get plugin info using RPC GetInfo at socket %s, err: %v", socketPath, err) } - return w.invokeRegistrationCallbackAtHandler(ctx, client, infoResp, socketPath) -} - -func (w *Watcher) invokeRegistrationCallbackAtHandler(ctx context.Context, client registerapi.RegistrationClient, infoResp *registerapi.PluginInfo, socketPath string) error { - var handlerCbkFn RegisterCallbackFn - var ok bool - handlerCbkFn, ok = w.handlers[infoResp.Type] + handler, ok := w.handlers[infoResp.Type] if !ok { - errStr := fmt.Sprintf("no handler registered for plugin type: %s at socket %s", infoResp.Type, socketPath) - if _, err := client.NotifyRegistrationStatus(ctx, ®isterapi.RegistrationStatus{ - PluginRegistered: false, - Error: errStr, - }); err != nil { - return errors.Wrap(err, errStr) - } - return errors.New(errStr) + return w.notifyPlugin(client, false, fmt.Sprintf("no handler registered for plugin type: %s at socket %s", infoResp.Type, socketPath)) } - var versions []string - for _, version := range infoResp.SupportedVersions { - versions = append(versions, version) + // ReRegistration: We want to handle multiple plugins registering at the same time with the same name sequentially. + // See the state machine for more information. + // This is done by using a Lock for each plugin with the same name and type + pool := w.getPluginPool(infoResp.Type, infoResp.Name) + + pool.Lock() + defer pool.Unlock() + + if infoResp.Endpoint == "" { + infoResp.Endpoint = socketPath } + + foundInDeprecatedDir := w.foundInDeprecatedDir(socketPath) + // calls handler callback to verify registration request - chanForAckOfNotification, err := handlerCbkFn(infoResp.Name, infoResp.Endpoint, versions, socketPath) - if err != nil { - errStr := fmt.Sprintf("plugin registration failed with err: %v", err) - if _, err := client.NotifyRegistrationStatus(ctx, ®isterapi.RegistrationStatus{ - PluginRegistered: false, - Error: errStr, - }); err != nil { - return errors.Wrap(err, errStr) - } - return errors.New(errStr) + if err := handler.ValidatePlugin(infoResp.Name, infoResp.Endpoint, infoResp.SupportedVersions, foundInDeprecatedDir); err != nil { + return w.notifyPlugin(client, false, fmt.Sprintf("plugin validation failed with err: %v", err)) } - if _, err := client.NotifyRegistrationStatus(ctx, ®isterapi.RegistrationStatus{ - PluginRegistered: true, - }); err != nil { - chanForAckOfNotification <- false + // We add the plugin to the pluginwatcher's map before calling a plugin consumer's Register handle + // so that if we receive a delete event during Register Plugin, we can process it as a DeRegister call. + w.registerPlugin(socketPath, infoResp.Type, infoResp.Name) + + if err := handler.RegisterPlugin(infoResp.Name, infoResp.Endpoint, infoResp.SupportedVersions); err != nil { + return w.notifyPlugin(client, false, fmt.Sprintf("plugin registration failed with err: %v", err)) + } + + // Notify is called after register to guarantee that even if notify throws an error Register will always be called after validate + if err := w.notifyPlugin(client, true, ""); err != nil { return fmt.Errorf("failed to send registration status at socket %s, err: %v", socketPath, err) } - chanForAckOfNotification <- true return nil } -// Handle filesystem notify event. -func (w *Watcher) handleFsNotifyEvent(event fsnotify.Event) error { - if event.Op&fsnotify.Create != fsnotify.Create { +func (w *Watcher) handleDeleteEvent(event fsnotify.Event) error { + klog.V(6).Infof("Handling delete event: %v", event) + + plugin, ok := w.getPlugin(event.Name) + if !ok { + klog.V(5).Infof("could not find plugin for deleted file %s", event.Name) return nil } - fi, err := os.Stat(event.Name) - if err != nil { - return fmt.Errorf("stat file %s failed: %v", event.Name, err) - } + // You should not get a Deregister call while registering a plugin + pool := w.getPluginPool(plugin.pluginType, plugin.pluginName) - if !fi.IsDir() { - return w.registerPlugin(event.Name) + pool.Lock() + defer pool.Unlock() + + // ReRegisteration: When waiting for the lock a plugin with the same name (not socketPath) could have registered + // In that case, we don't want to issue a DeRegister call for that plugin + // When ReRegistering, the new plugin will have removed the current mapping (map[socketPath] = plugin) and replaced + // it with it's own socketPath. + if _, ok = w.getPlugin(event.Name); !ok { + klog.V(2).Infof("A newer plugin watcher has been registered for plugin %v, dropping DeRegister call", plugin) + return nil } - if err := w.traversePluginDir(event.Name); err != nil { - return fmt.Errorf("failed to traverse plugin path %s, err: %v", event.Name, err) + h, ok := w.getHandler(plugin.pluginType) + if !ok { + return fmt.Errorf("could not find handler %s for plugin %s at path %s", plugin.pluginType, plugin.pluginName, event.Name) } + klog.V(2).Infof("DeRegistering plugin %v at path %s", plugin, event.Name) + w.deRegisterPlugin(event.Name, plugin.pluginType, plugin.pluginName) + h.DeRegisterPlugin(plugin.pluginName) + return nil } -// Start watches for the creation of plugin sockets at the path -func (w *Watcher) Start() error { - glog.V(2).Infof("Plugin Watcher Start at %s", w.path) - w.stopCh = make(chan interface{}) +func (w *Watcher) registerPlugin(socketPath, pluginType, pluginName string) { + w.mutex.Lock() + defer w.mutex.Unlock() - // Creating the directory to be watched if it doesn't exist yet, - // and walks through the directory to discover the existing plugins. - if err := w.init(); err != nil { - return err - } + // Reregistration case, if this plugin is already in the map, remove it + // This will prevent handleDeleteEvent to issue a DeRegister call + for path, info := range w.plugins { + if info.pluginType != pluginType || info.pluginName != pluginName { + continue + } - fsWatcher, err := fsnotify.NewWatcher() - if err != nil { - return fmt.Errorf("failed to start plugin fsWatcher, err: %v", err) + delete(w.plugins, path) + break } - w.fsWatcher = fsWatcher - if err := w.traversePluginDir(w.path); err != nil { - fsWatcher.Close() - return fmt.Errorf("failed to traverse plugin socket path, err: %v", err) + w.plugins[socketPath] = pathInfo{ + pluginType: pluginType, + pluginName: pluginName, } +} - w.wg.Add(1) - go func(fsWatcher *fsnotify.Watcher) { - defer w.wg.Done() - for { - select { - case event := <-fsWatcher.Events: - //TODO: Handle errors by taking corrective measures - go func() { - err := w.handleFsNotifyEvent(event) - if err != nil { - glog.Errorf("error %v when handle event: %s", err, event) - } - }() - continue - case err := <-fsWatcher.Errors: - if err != nil { - glog.Errorf("fsWatcher received error: %v", err) - } - continue - case <-w.stopCh: - fsWatcher.Close() - return - } - } - }(fsWatcher) - return nil +func (w *Watcher) deRegisterPlugin(socketPath, pluginType, pluginName string) { + w.mutex.Lock() + defer w.mutex.Unlock() + + delete(w.plugins, socketPath) + delete(w.pluginsPool[pluginType], pluginName) } -// Stop stops probing the creation of plugin sockets at the path -func (w *Watcher) Stop() error { - close(w.stopCh) - c := make(chan struct{}) - go func() { - defer close(c) - w.wg.Wait() - }() - select { - case <-c: - case <-time.After(10 * time.Second): - return fmt.Errorf("timeout on stopping watcher") +func (w *Watcher) getPlugin(socketPath string) (pathInfo, bool) { + w.mutex.Lock() + defer w.mutex.Unlock() + + plugin, ok := w.plugins[socketPath] + return plugin, ok +} + +func (w *Watcher) getPluginPool(pluginType, pluginName string) *sync.Mutex { + w.mutex.Lock() + defer w.mutex.Unlock() + + if _, ok := w.pluginsPool[pluginType]; !ok { + w.pluginsPool[pluginType] = make(map[string]*sync.Mutex) } - return nil + + if _, ok := w.pluginsPool[pluginType][pluginName]; !ok { + w.pluginsPool[pluginType][pluginName] = &sync.Mutex{} + } + + return w.pluginsPool[pluginType][pluginName] } -// Cleanup cleans the path by removing sockets -func (w *Watcher) Cleanup() error { - return os.RemoveAll(w.path) +func (w *Watcher) notifyPlugin(client registerapi.RegistrationClient, registered bool, errStr string) error { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + status := ®isterapi.RegistrationStatus{ + PluginRegistered: registered, + Error: errStr, + } + + if _, err := client.NotifyRegistrationStatus(ctx, status); err != nil { + return errors.Wrap(err, errStr) + } + + if errStr != "" { + return errors.New(errStr) + } + + return nil } // Dial establishes the gRPC communication with the picked up plugin socket. https://godoc.org/google.golang.org/grpc#Dial -func dial(unixSocketPath string) (registerapi.RegistrationClient, *grpc.ClientConn, error) { - c, err := grpc.Dial(unixSocketPath, grpc.WithInsecure(), grpc.WithBlock(), - grpc.WithTimeout(10*time.Second), +func dial(unixSocketPath string, timeout time.Duration) (registerapi.RegistrationClient, *grpc.ClientConn, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + c, err := grpc.DialContext(ctx, unixSocketPath, grpc.WithInsecure(), grpc.WithBlock(), grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout("unix", addr, timeout) }), @@ -273,3 +440,27 @@ func dial(unixSocketPath string) (registerapi.RegistrationClient, *grpc.ClientCo return registerapi.NewRegistrationClient(c), c, nil } + +// While deprecated dir is supported, to add extra protection around #69015 +// we will explicitly blacklist kubernetes.io directory. +func (w *Watcher) containsBlacklistedDir(path string) bool { + return strings.HasPrefix(path, w.deprecatedPath+"/kubernetes.io/") || + path == w.deprecatedPath+"/kubernetes.io" +} + +func (w *Watcher) foundInDeprecatedDir(socketPath string) bool { + if len(w.deprecatedPath) != 0 { + if socketPath == w.deprecatedPath { + return true + } + + deprecatedPath := w.deprecatedPath + if !strings.HasSuffix(deprecatedPath, "/") { + deprecatedPath = deprecatedPath + "/" + } + if strings.HasPrefix(socketPath, deprecatedPath) { + return true + } + } + return false +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/types.go new file mode 100644 index 000000000000..83b96b1bc867 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/types.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pluginwatcher + +// PluginHandler is an interface a client of the pluginwatcher API needs to implement in +// order to consume plugins +// The PluginHandler follows the simple following state machine: +// +// +--------------------------------------+ +// | ReRegistration | +// | Socket created with same plugin name | +// | | +// | | +// Socket Created v + Socket Deleted +// +------------------> Validate +---------------------------> Register +------------------> DeRegister +// + + + +// | | | +// | Error | Error | +// | | | +// v v v +// Out Out Out +// +// The pluginwatcher module follows strictly and sequentially this state machine for each *plugin name*. +// e.g: If you are Registering a plugin foo, you cannot get a DeRegister call for plugin foo +// until the Register("foo") call returns. Nor will you get a Validate("foo", "Different endpoint", ...) +// call until the Register("foo") call returns. +// +// ReRegistration: Socket created with same plugin name, usually for a plugin update +// e.g: plugin with name foo registers at foo.com/foo-1.9.7 later a plugin with name foo +// registers at foo.com/foo-1.9.9 +// +// DeRegistration: When ReRegistration happens only the deletion of the new socket will trigger a DeRegister call + +type PluginHandler interface { + // Validate returns an error if the information provided by + // the potential plugin is erroneous (unsupported version, ...) + ValidatePlugin(pluginName string, endpoint string, versions []string, foundInDeprecatedDir bool) error + // RegisterPlugin is called so that the plugin can be register by any + // plugin consumer + // Error encountered here can still be Notified to the plugin. + RegisterPlugin(pluginName, endpoint string, versions []string) error + // DeRegister is called once the pluginwatcher observes that the socket has + // been deleted. + DeRegisterPlugin(pluginName string) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util.go index eb7cf142754b..ba52058a10ba 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util.go @@ -17,9 +17,6 @@ limitations under the License. package util import ( - "fmt" - "net/url" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -28,20 +25,3 @@ import ( func FromApiserverCache(opts *metav1.GetOptions) { opts.ResourceVersion = "0" } - -func parseEndpoint(endpoint string) (string, string, error) { - u, err := url.Parse(endpoint) - if err != nil { - return "", "", err - } - - if u.Scheme == "tcp" { - return "tcp", u.Host, nil - } else if u.Scheme == "unix" { - return "unix", u.Path, nil - } else if u.Scheme == "" { - return "", "", fmt.Errorf("Using %q as endpoint is deprecated, please consider using full url format", endpoint) - } else { - return u.Scheme, "", fmt.Errorf("protocol %q not supported", u.Scheme) - } -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go index 0b8fea9c84ac..0ddc63843914 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go @@ -21,11 +21,13 @@ package util import ( "fmt" "net" + "net/url" "os" + "path/filepath" "time" - "github.com/golang/glog" "golang.org/x/sys/unix" + "k8s.io/klog" ) const ( @@ -72,8 +74,38 @@ func parseEndpointWithFallbackProtocol(endpoint string, fallbackProtocol string) fallbackEndpoint := fallbackProtocol + "://" + endpoint protocol, addr, err = parseEndpoint(fallbackEndpoint) if err == nil { - glog.Warningf("Using %q as endpoint is deprecated, please consider using full url format %q.", endpoint, fallbackEndpoint) + klog.Warningf("Using %q as endpoint is deprecated, please consider using full url format %q.", endpoint, fallbackEndpoint) } } return } + +func parseEndpoint(endpoint string) (string, string, error) { + u, err := url.Parse(endpoint) + if err != nil { + return "", "", err + } + + switch u.Scheme { + case "tcp": + return "tcp", u.Host, nil + + case "unix": + return "unix", u.Path, nil + + case "": + return "", "", fmt.Errorf("Using %q as endpoint is deprecated, please consider using full url format", endpoint) + + default: + return u.Scheme, "", fmt.Errorf("protocol %q not supported", u.Scheme) + } +} + +// LocalEndpoint returns the full path to a unix socket at the given endpoint +func LocalEndpoint(path, file string) string { + u := url.URL{ + Scheme: unixProtocol, + Path: path, + } + return filepath.Join(u.String(), file+".sock") +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go index 77f14ea5255a..6661678acedf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go @@ -40,3 +40,8 @@ func LockAndCheckSubPath(volumePath, subPath string) ([]uintptr, error) { // UnlockPath empty implementation func UnlockPath(fileHandles []uintptr) { } + +// LocalEndpoint empty implementation +func LocalEndpoint(path, file string) string { + return "" +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go index 108f4eb914f0..7123728ff94a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go @@ -21,11 +21,16 @@ package util import ( "fmt" "net" + "net/url" + "strings" "time" + + "github.com/Microsoft/go-winio" ) const ( - tcpProtocol = "tcp" + tcpProtocol = "tcp" + npipeProtocol = "npipe" ) func CreateListener(endpoint string) (net.Listener, error) { @@ -33,11 +38,17 @@ func CreateListener(endpoint string) (net.Listener, error) { if err != nil { return nil, err } - if protocol != tcpProtocol { - return nil, fmt.Errorf("only support tcp endpoint") - } - return net.Listen(protocol, addr) + switch protocol { + case tcpProtocol: + return net.Listen(tcpProtocol, addr) + + case npipeProtocol: + return winio.ListenPipe(addr, nil) + + default: + return nil, fmt.Errorf("only support tcp and npipe endpoint") + } } func GetAddressAndDialer(endpoint string) (string, func(addr string, timeout time.Duration) (net.Conn, error), error) { @@ -45,13 +56,59 @@ func GetAddressAndDialer(endpoint string) (string, func(addr string, timeout tim if err != nil { return "", nil, err } - if protocol != tcpProtocol { - return "", nil, fmt.Errorf("only support tcp endpoint") + + if protocol == tcpProtocol { + return addr, tcpDial, nil + } + + if protocol == npipeProtocol { + return addr, npipeDial, nil } - return addr, dial, nil + return "", nil, fmt.Errorf("only support tcp and npipe endpoint") } -func dial(addr string, timeout time.Duration) (net.Conn, error) { +func tcpDial(addr string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout(tcpProtocol, addr, timeout) } + +func npipeDial(addr string, timeout time.Duration) (net.Conn, error) { + return winio.DialPipe(addr, &timeout) +} + +func parseEndpoint(endpoint string) (string, string, error) { + // url.Parse doesn't recognize \, so replace with / first. + endpoint = strings.Replace(endpoint, "\\", "/", -1) + u, err := url.Parse(endpoint) + if err != nil { + return "", "", err + } + + if u.Scheme == "tcp" { + return "tcp", u.Host, nil + } else if u.Scheme == "npipe" { + if strings.HasPrefix(u.Path, "//./pipe") { + return "npipe", u.Path, nil + } + + // fallback host if not provided. + host := u.Host + if host == "" { + host = "." + } + return "npipe", fmt.Sprintf("//%s%s", host, u.Path), nil + } else if u.Scheme == "" { + return "", "", fmt.Errorf("Using %q as endpoint is deprecated, please consider using full url format", endpoint) + } else { + return u.Scheme, "", fmt.Errorf("protocol %q not supported", u.Scheme) + } +} + +// LocalEndpoint returns the full path to a windows named pipe +func LocalEndpoint(path, file string) string { + u := url.URL{ + Scheme: npipeProtocol, + Path: path, + } + return u.String() + "//./pipe/" + file +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go index fda1f356c90a..425afebdbf22 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go @@ -21,7 +21,7 @@ import ( "net" "runtime" - "github.com/golang/glog" + "k8s.io/klog" authenticationv1 "k8s.io/api/authentication/v1" "k8s.io/api/core/v1" @@ -29,8 +29,8 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" + cloudprovider "k8s.io/cloud-provider" csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned" - "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/configmap" "k8s.io/kubernetes/pkg/kubelet/container" @@ -162,7 +162,7 @@ func (kvh *kubeletVolumeHost) GetCloudProvider() cloudprovider.Interface { func (kvh *kubeletVolumeHost) GetMounter(pluginName string) mount.Interface { exec, err := kvh.getMountExec(pluginName) if err != nil { - glog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error()) + klog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error()) // Use the default mounter exec = nil } @@ -200,6 +200,10 @@ func (kvh *kubeletVolumeHost) GetServiceAccountTokenFunc() func(namespace, name return kvh.tokenManager.GetServiceAccountToken } +func (kvh *kubeletVolumeHost) DeleteServiceAccountTokenFunc() func(podUID types.UID) { + return kvh.tokenManager.DeleteServiceAccountToken +} + func (kvh *kubeletVolumeHost) GetNodeLabels() (map[string]string, error) { node, err := kvh.kubelet.GetNode() if err != nil { @@ -219,7 +223,7 @@ func (kvh *kubeletVolumeHost) GetEventRecorder() record.EventRecorder { func (kvh *kubeletVolumeHost) GetExec(pluginName string) mount.Exec { exec, err := kvh.getMountExec(pluginName) if err != nil { - glog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error()) + klog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error()) // Use the default exec exec = nil } @@ -234,7 +238,7 @@ func (kvh *kubeletVolumeHost) GetExec(pluginName string) mount.Exec { // os.Exec should be used. func (kvh *kubeletVolumeHost) getMountExec(pluginName string) (mount.Exec, error) { if !utilfeature.DefaultFeatureGate.Enabled(features.MountContainers) { - glog.V(5).Infof("using default mounter/exec for %s", pluginName) + klog.V(5).Infof("using default mounter/exec for %s", pluginName) return nil, nil } @@ -244,10 +248,10 @@ func (kvh *kubeletVolumeHost) getMountExec(pluginName string) (mount.Exec, error } if pod == nil { // Use default mounter/exec for this plugin - glog.V(5).Infof("using default mounter/exec for %s", pluginName) + klog.V(5).Infof("using default mounter/exec for %s", pluginName) return nil, nil } - glog.V(5).Infof("using container %s/%s/%s to execute mount utilities for %s", pod.Namespace, pod.Name, container, pluginName) + klog.V(5).Infof("using container %s/%s/%s to execute mount utilities for %s", pod.Namespace, pod.Name, container, pluginName) return &containerExec{ pod: pod, containerName: container, @@ -267,6 +271,6 @@ var _ mount.Exec = &containerExec{} func (e *containerExec) Run(cmd string, args ...string) ([]byte, error) { cmdline := append([]string{cmd}, args...) - glog.V(5).Infof("Exec mounter running in pod %s/%s/%s: %v", e.pod.Namespace, e.pod.Name, e.containerName, cmdline) + klog.V(5).Infof("Exec mounter running in pod %s/%s/%s: %v", e.pod.Namespace, e.pod.Name, e.containerName, cmdline) return e.kl.RunInContainer(container.GetPodFullName(e.pod), e.pod.UID, e.containerName, cmdline) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD index 855d1b65700d..269eb189192d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD @@ -33,7 +33,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/BUILD index e9861638ab64..3bc1a3886bbe 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index 82aef86c7202..43a9b0f8cdf1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -24,11 +24,10 @@ import ( "fmt" "sync" - "github.com/golang/glog" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" @@ -155,6 +154,11 @@ type ActualStateOfWorld interface { // mounted for the specified pod as requiring file system resize (if the plugin for the // volume indicates it requires file system resize). MarkFSResizeRequired(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) + + // GetAttachedVolumes returns a list of volumes that is known to be attached + // to the node. This list can be used to determine volumes that are either in-use + // or have a mount/unmount operation pending. + GetAttachedVolumes() []AttachedVolume } // MountedVolume represents a volume that has successfully been mounted to a pod. @@ -407,7 +411,7 @@ func (asw *actualStateOfWorld) addVolume( } else { // If volume object already exists, update the fields such as device path volumeObj.devicePath = devicePath - glog.V(2).Infof("Volume %q is already added to attachedVolume list, update device path %q", + klog.V(2).Infof("Volume %q is already added to attachedVolume list, update device path %q", volumeName, devicePath) } @@ -476,7 +480,7 @@ func (asw *actualStateOfWorld) MarkVolumeAsResized( volumeName) } - glog.V(5).Infof("Volume %s(OuterVolumeSpecName %s) of pod %s has been resized", + klog.V(5).Infof("Volume %s(OuterVolumeSpecName %s) of pod %s has been resized", volumeName, podObj.outerVolumeSpecName, podName) podObj.fsResizeRequired = false asw.attachedVolumes[volumeName].mountedPods[podName] = podObj @@ -497,7 +501,7 @@ func (asw *actualStateOfWorld) MarkRemountRequired( asw.volumePluginMgr.FindPluginBySpec(podObj.volumeSpec) if err != nil || volumePlugin == nil { // Log and continue processing - glog.Errorf( + klog.Errorf( "MarkRemountRequired failed to FindPluginBySpec for pod %q (podUid %q) volume: %q (volSpecName: %q)", podObj.podName, podObj.podUID, @@ -521,13 +525,13 @@ func (asw *actualStateOfWorld) MarkFSResizeRequired( defer asw.Unlock() volumeObj, exist := asw.attachedVolumes[volumeName] if !exist { - glog.Warningf("MarkFSResizeRequired for volume %s failed as volume not exist", volumeName) + klog.Warningf("MarkFSResizeRequired for volume %s failed as volume not exist", volumeName) return } podObj, exist := volumeObj.mountedPods[podName] if !exist { - glog.Warningf("MarkFSResizeRequired for volume %s failed "+ + klog.Warningf("MarkFSResizeRequired for volume %s failed "+ "as pod(%s) not exist", volumeName, podName) return } @@ -536,7 +540,7 @@ func (asw *actualStateOfWorld) MarkFSResizeRequired( asw.volumePluginMgr.FindExpandablePluginBySpec(podObj.volumeSpec) if err != nil || volumePlugin == nil { // Log and continue processing - glog.Errorf( + klog.Errorf( "MarkFSResizeRequired failed to find expandable plugin for pod %q volume: %q (volSpecName: %q)", podObj.podName, volumeObj.volumeName, @@ -546,7 +550,7 @@ func (asw *actualStateOfWorld) MarkFSResizeRequired( if volumePlugin.RequiresFSResize() { if !podObj.fsResizeRequired { - glog.V(3).Infof("PVC volume %s(OuterVolumeSpecName %s) of pod %s requires file system resize", + klog.V(3).Infof("PVC volume %s(OuterVolumeSpecName %s) of pod %s requires file system resize", volumeName, podObj.outerVolumeSpecName, podName) podObj.fsResizeRequired = true } @@ -568,7 +572,9 @@ func (asw *actualStateOfWorld) SetVolumeGloballyMounted( volumeObj.globallyMounted = globallyMounted volumeObj.deviceMountPath = deviceMountPath - volumeObj.devicePath = devicePath + if devicePath != "" { + volumeObj.devicePath = devicePath + } asw.attachedVolumes[volumeName] = volumeObj return nil } @@ -709,6 +715,20 @@ func (asw *actualStateOfWorld) GetGloballyMountedVolumes() []AttachedVolume { return globallyMountedVolumes } +func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume { + asw.RLock() + defer asw.RUnlock() + allAttachedVolumes := make( + []AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */) + for _, volumeObj := range asw.attachedVolumes { + allAttachedVolumes = append( + allAttachedVolumes, + asw.newAttachedVolume(&volumeObj)) + } + + return allAttachedVolumes +} + func (asw *actualStateOfWorld) GetUnmountedVolumes() []AttachedVolume { asw.RLock() defer asw.RUnlock() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/BUILD index 6d44fabf88b3..522eb010844b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/BUILD @@ -8,8 +8,9 @@ go_library( deps = [ "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/volume:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//pkg/volume/util:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/metrics.go index c428401e08ff..2536eb559170 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics/metrics.go @@ -19,10 +19,11 @@ package metrics import ( "sync" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/volume" + volumeutil "k8s.io/kubernetes/pkg/volume/util" ) const ( @@ -85,7 +86,7 @@ func (c *totalVolumesCollector) Collect(ch chan<- prometheus.Metric) { pluginName, stateName) if err != nil { - glog.Warningf("Failed to create metric : %v", err) + klog.Warningf("Failed to create metric : %v", err) } ch <- metric } @@ -95,7 +96,7 @@ func (c *totalVolumesCollector) Collect(ch chan<- prometheus.Metric) { func (c *totalVolumesCollector) getVolumeCount() volumeCount { counter := make(volumeCount) for _, mountedVolume := range c.asw.GetMountedVolumes() { - pluginName := mountedVolume.PluginName + pluginName := volumeutil.GetFullQualifiedPluginNameForVolume(mountedVolume.PluginName, mountedVolume.VolumeSpec) if pluginName == "" { pluginName = pluginNameNotAvailable } @@ -105,7 +106,7 @@ func (c *totalVolumesCollector) getVolumeCount() volumeCount { for _, volumeToMount := range c.dsw.GetVolumesToMount() { pluginName := pluginNameNotAvailable if plugin, err := c.pluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec); err == nil { - pluginName = plugin.GetPluginName() + pluginName = volumeutil.GetFullQualifiedPluginNameForVolume(plugin.GetPluginName(), volumeToMount.VolumeSpec) } counter.add("desired_state_of_world", pluginName) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD index 6a370b5124fe..a41a69e0cf5f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD @@ -28,7 +28,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -47,7 +47,10 @@ filegroup( go_test( name = "go_default_test", - srcs = ["desired_state_of_world_populator_test.go"], + srcs = [ + "desired_state_of_world_populator_test.go", + "main_test.go", + ], embed = [":go_default_library"], deps = [ "//pkg/features:go_default_library", @@ -67,6 +70,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index 3f1b71a05ef8..81bf6e60e91a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -25,7 +25,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -127,7 +127,7 @@ type processedPods struct { func (dswp *desiredStateOfWorldPopulator) Run(sourcesReady config.SourcesReady, stopCh <-chan struct{}) { // Wait for the completion of a loop that started after sources are all ready, then set hasAddedPods accordingly - glog.Infof("Desired state populator starts to run") + klog.Infof("Desired state populator starts to run") wait.PollUntil(dswp.loopSleepDuration, func() (bool, error) { done := sourcesReady.AllReady() dswp.populatorLoop() @@ -159,7 +159,7 @@ func (dswp *desiredStateOfWorldPopulator) populatorLoop() { // findAndRemoveDeletedPods() is called independently of the main // populator loop. if time.Since(dswp.timeOfLastGetPodStatus) < dswp.getPodStatusRetryDuration { - glog.V(5).Infof( + klog.V(5).Infof( "Skipping findAndRemoveDeletedPods(). Not permitted until %v (getPodStatusRetryDuration %v).", dswp.timeOfLastGetPodStatus.Add(dswp.getPodStatusRetryDuration), dswp.getPodStatusRetryDuration) @@ -230,7 +230,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { var getPodsErr error runningPods, getPodsErr = dswp.kubeContainerRuntime.GetPods(false) if getPodsErr != nil { - glog.Errorf( + klog.Errorf( "kubeContainerRuntime.findAndRemoveDeletedPods returned error %v.", getPodsErr) continue @@ -252,17 +252,17 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { } if runningContainers { - glog.V(4).Infof( + klog.V(4).Infof( "Pod %q has been removed from pod manager. However, it still has one or more containers in the non-exited state. Therefore, it will not be removed from volume manager.", format.Pod(volumeToMount.Pod)) continue } if !dswp.actualStateOfWorld.VolumeExists(volumeToMount.VolumeName) && podExists { - glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Actual state has not yet has this information skip removing volume from desired state", "")) + klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Actual state has not yet has this information skip removing volume from desired state", "")) continue } - glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Removing volume from desired state", "")) + klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Removing volume from desired state", "")) dswp.desiredStateOfWorld.DeletePodFromVolume( volumeToMount.PodName, volumeToMount.VolumeName) @@ -293,7 +293,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes( pvc, volumeSpec, volumeGidValue, err := dswp.createVolumeSpec(podVolume, pod.Name, pod.Namespace, mountsMap, devicesMap) if err != nil { - glog.Errorf( + klog.Errorf( "Error processing volume %q for pod %q: %v", podVolume.Name, format.Pod(pod), @@ -306,7 +306,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes( _, err = dswp.desiredStateOfWorld.AddPodToVolume( uniquePodName, pod, volumeSpec, podVolume.Name, volumeGidValue) if err != nil { - glog.Errorf( + klog.Errorf( "Failed to add volume %q (specName: %q) for pod %q to desiredStateOfWorld. err=%v", podVolume.Name, volumeSpec.Name(), @@ -315,7 +315,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes( allVolumesAdded = false } - glog.V(4).Infof( + klog.V(4).Infof( "Added volume %q (volSpec=%q) for pod %q to desired state.", podVolume.Name, volumeSpec.Name(), @@ -365,12 +365,12 @@ func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize( } fsVolume, err := util.CheckVolumeModeFilesystem(volumeSpec) if err != nil { - glog.Errorf("Check volume mode failed for volume %s(OuterVolumeSpecName %s): %v", + klog.Errorf("Check volume mode failed for volume %s(OuterVolumeSpecName %s): %v", uniqueVolumeName, podVolume.Name, err) return } if !fsVolume { - glog.V(5).Infof("Block mode volume needn't to check file system resize request") + klog.V(5).Infof("Block mode volume needn't to check file system resize request") return } if processedVolumesForFSResize.Has(string(uniqueVolumeName)) { @@ -380,7 +380,7 @@ func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize( } if mountedReadOnlyByPod(podVolume, pod) { // This volume is used as read only by this pod, we don't perform resize for read only volumes. - glog.V(5).Infof("Skip file system resize check for volume %s in pod %s/%s "+ + klog.V(5).Infof("Skip file system resize check for volume %s in pod %s/%s "+ "as the volume is mounted as readonly", podVolume.Name, pod.Namespace, pod.Name) return } @@ -474,7 +474,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( podVolume v1.Volume, podName string, podNamespace string, mountsMap map[string]bool, devicesMap map[string]bool) (*v1.PersistentVolumeClaim, *volume.Spec, string, error) { if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil { - glog.V(5).Infof( + klog.V(5).Infof( "Found PVC, ClaimName: %q/%q", podNamespace, pvcSource.ClaimName) @@ -491,7 +491,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( } pvName, pvcUID := pvc.Spec.VolumeName, pvc.UID - glog.V(5).Infof( + klog.V(5).Infof( "Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q", podNamespace, pvcSource.ClaimName, @@ -509,7 +509,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( err) } - glog.V(5).Infof( + klog.V(5).Infof( "Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)", volumeSpec.Name(), pvName, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD index 586067da32e2..53508804484a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD @@ -29,13 +29,16 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) go_test( name = "go_default_test", - srcs = ["reconciler_test.go"], + srcs = [ + "main_test.go", + "reconciler_test.go", + ], embed = [":go_default_library"], deps = [ "//pkg/features:go_default_library", @@ -52,10 +55,12 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go index 2ffb5c99fa71..1b6bbbfbe778 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -26,13 +26,13 @@ import ( "path" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" @@ -151,7 +151,7 @@ func (rc *reconciler) reconciliationLoopFunc() func() { // Otherwise, the reconstruct process may clean up pods' volumes that are still in use because // desired state of world does not contain a complete list of pods. if rc.populatorHasAddedPods() && !rc.StatesHasBeenSynced() { - glog.Infof("Reconciler: start to sync state") + klog.Infof("Reconciler: start to sync state") rc.sync() } } @@ -167,7 +167,7 @@ func (rc *reconciler) reconcile() { for _, mountedVolume := range rc.actualStateOfWorld.GetMountedVolumes() { if !rc.desiredStateOfWorld.PodExistsInVolume(mountedVolume.PodName, mountedVolume.VolumeName) { // Volume is mounted, unmount it - glog.V(5).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountVolume", "")) + klog.V(5).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountVolume", "")) err := rc.operationExecutor.UnmountVolume( mountedVolume.MountedVolume, rc.actualStateOfWorld, rc.kubeletPodsDir) if err != nil && @@ -175,10 +175,10 @@ func (rc *reconciler) reconcile() { !exponentialbackoff.IsExponentialBackoff(err) { // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) + klog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) } if err == nil { - glog.Infof(mountedVolume.GenerateMsgDetailed("operationExecutor.UnmountVolume started", "")) + klog.Infof(mountedVolume.GenerateMsgDetailed("operationExecutor.UnmountVolume started", "")) } } } @@ -191,7 +191,7 @@ func (rc *reconciler) reconcile() { if rc.controllerAttachDetachEnabled || !volumeToMount.PluginIsAttachable { // Volume is not attached (or doesn't implement attacher), kubelet attach is disabled, wait // for controller to finish attaching volume. - glog.V(5).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", "")) + klog.V(5).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", "")) err := rc.operationExecutor.VerifyControllerAttachedVolume( volumeToMount.VolumeToMount, rc.nodeName, @@ -201,10 +201,10 @@ func (rc *reconciler) reconcile() { !exponentialbackoff.IsExponentialBackoff(err) { // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.VerifyControllerAttachedVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) + klog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.VerifyControllerAttachedVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) } if err == nil { - glog.Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.VerifyControllerAttachedVolume started", "")) + klog.Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.VerifyControllerAttachedVolume started", "")) } } else { // Volume is not attached to node, kubelet attach is enabled, volume implements an attacher, @@ -214,17 +214,17 @@ func (rc *reconciler) reconcile() { VolumeSpec: volumeToMount.VolumeSpec, NodeName: rc.nodeName, } - glog.V(5).Infof(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", "")) + klog.V(5).Infof(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", "")) err := rc.operationExecutor.AttachVolume(volumeToAttach, rc.actualStateOfWorld) if err != nil && !nestedpendingoperations.IsAlreadyExists(err) && !exponentialbackoff.IsExponentialBackoff(err) { // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.AttachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) + klog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.AttachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) } if err == nil { - glog.Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.AttachVolume started", "")) + klog.Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.AttachVolume started", "")) } } } else if !volMounted || cache.IsRemountRequiredError(err) { @@ -234,7 +234,7 @@ func (rc *reconciler) reconcile() { if isRemount { remountingLogStr = "Volume is already mounted to pod, but remount was requested." } - glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.MountVolume", remountingLogStr)) + klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.MountVolume", remountingLogStr)) err := rc.operationExecutor.MountVolume( rc.waitForAttachTimeout, volumeToMount.VolumeToMount, @@ -245,18 +245,18 @@ func (rc *reconciler) reconcile() { !exponentialbackoff.IsExponentialBackoff(err) { // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.MountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) + klog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.MountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) } if err == nil { if remountingLogStr == "" { - glog.V(1).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr)) + klog.V(1).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr)) } else { - glog.V(5).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr)) + klog.V(5).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr)) } } } else if cache.IsFSResizeRequiredError(err) && utilfeature.DefaultFeatureGate.Enabled(features.ExpandInUsePersistentVolumes) { - glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.ExpandVolumeFSWithoutUnmounting", "")) + klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.ExpandVolumeFSWithoutUnmounting", "")) err := rc.operationExecutor.ExpandVolumeFSWithoutUnmounting( volumeToMount.VolumeToMount, rc.actualStateOfWorld) @@ -265,10 +265,10 @@ func (rc *reconciler) reconcile() { !exponentialbackoff.IsExponentialBackoff(err) { // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(volumeToMount.GenerateErrorDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting failed", err).Error()) + klog.Errorf(volumeToMount.GenerateErrorDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting failed", err).Error()) } if err == nil { - glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting started", "")) + klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting started", "")) } } } @@ -280,7 +280,7 @@ func (rc *reconciler) reconcile() { !rc.operationExecutor.IsOperationPending(attachedVolume.VolumeName, nestedpendingoperations.EmptyUniquePodName) { if attachedVolume.GloballyMounted { // Volume is globally mounted to device, unmount it - glog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountDevice", "")) + klog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountDevice", "")) err := rc.operationExecutor.UnmountDevice( attachedVolume.AttachedVolume, rc.actualStateOfWorld, rc.mounter) if err != nil && @@ -288,20 +288,20 @@ func (rc *reconciler) reconcile() { !exponentialbackoff.IsExponentialBackoff(err) { // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountDevice failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) + klog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountDevice failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) } if err == nil { - glog.Infof(attachedVolume.GenerateMsgDetailed("operationExecutor.UnmountDevice started", "")) + klog.Infof(attachedVolume.GenerateMsgDetailed("operationExecutor.UnmountDevice started", "")) } } else { // Volume is attached to node, detach it // Kubelet not responsible for detaching or this volume has a non-attachable volume plugin. if rc.controllerAttachDetachEnabled || !attachedVolume.PluginIsAttachable { rc.actualStateOfWorld.MarkVolumeAsDetached(attachedVolume.VolumeName, attachedVolume.NodeName) - glog.Infof(attachedVolume.GenerateMsgDetailed("Volume detached", fmt.Sprintf("DevicePath %q", attachedVolume.DevicePath))) + klog.Infof(attachedVolume.GenerateMsgDetailed("Volume detached", fmt.Sprintf("DevicePath %q", attachedVolume.DevicePath))) } else { // Only detach if kubelet detach is enabled - glog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", "")) + klog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", "")) err := rc.operationExecutor.DetachVolume( attachedVolume.AttachedVolume, false /* verifySafeToDetach */, rc.actualStateOfWorld) if err != nil && @@ -309,10 +309,10 @@ func (rc *reconciler) reconcile() { !exponentialbackoff.IsExponentialBackoff(err) { // Ignore nestedpendingoperations.IsAlreadyExists && exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.DetachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) + klog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.DetachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) } if err == nil { - glog.Infof(attachedVolume.GenerateMsgDetailed("operationExecutor.DetachVolume started", "")) + klog.Infof(attachedVolume.GenerateMsgDetailed("operationExecutor.DetachVolume started", "")) } } } @@ -369,14 +369,14 @@ func (rc *reconciler) syncStates() { // Get volumes information by reading the pod's directory podVolumes, err := getVolumesFromPodDir(rc.kubeletPodsDir) if err != nil { - glog.Errorf("Cannot get volumes from disk %v", err) + klog.Errorf("Cannot get volumes from disk %v", err) return } volumesNeedUpdate := make(map[v1.UniqueVolumeName]*reconstructedVolume) volumeNeedReport := []v1.UniqueVolumeName{} for _, volume := range podVolumes { if rc.actualStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) { - glog.V(4).Infof("Volume exists in actual state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName) + klog.V(4).Infof("Volume exists in actual state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName) // There is nothing to reconstruct continue } @@ -387,11 +387,11 @@ func (rc *reconciler) syncStates() { if volumeInDSW { // Some pod needs the volume, don't clean it up and hope that // reconcile() calls SetUp and reconstructs the volume in ASW. - glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName) + klog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName) continue } // No pod needs the volume. - glog.Warningf("Could not construct volume information, cleanup the mounts. (pod.UID %s, volume.SpecName %s): %v", volume.podName, volume.volumeSpecName, err) + klog.Warningf("Could not construct volume information, cleanup the mounts. (pod.UID %s, volume.SpecName %s): %v", volume.podName, volume.volumeSpecName, err) rc.cleanupMounts(volume) continue } @@ -402,14 +402,14 @@ func (rc *reconciler) syncStates() { // this new kubelet so reconcile() calls SetUp and re-mounts the // volume if it's necessary. volumeNeedReport = append(volumeNeedReport, reconstructedVolume.volumeName) - glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), marking as InUse", volume.volumeSpecName, volume.podName) + klog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), marking as InUse", volume.volumeSpecName, volume.podName) continue } // There is no pod that uses the volume. if rc.operationExecutor.IsOperationPending(reconstructedVolume.volumeName, nestedpendingoperations.EmptyUniquePodName) { - glog.Warning("Volume is in pending operation, skip cleaning up mounts") + klog.Warning("Volume is in pending operation, skip cleaning up mounts") } - glog.V(2).Infof( + klog.V(2).Infof( "Reconciler sync states: could not find pod information in desired state, update it in actual state: %+v", reconstructedVolume) volumesNeedUpdate[reconstructedVolume.volumeName] = reconstructedVolume @@ -417,7 +417,7 @@ func (rc *reconciler) syncStates() { if len(volumesNeedUpdate) > 0 { if err = rc.updateStates(volumesNeedUpdate); err != nil { - glog.Errorf("Error occurred during reconstruct volume from disk: %v", err) + klog.Errorf("Error occurred during reconstruct volume from disk: %v", err) } } if len(volumeNeedReport) > 0 { @@ -426,7 +426,7 @@ func (rc *reconciler) syncStates() { } func (rc *reconciler) cleanupMounts(volume podVolume) { - glog.V(2).Infof("Reconciler sync states: could not find information (PID: %s) (Volume SpecName: %s) in desired state, clean up the mount points", + klog.V(2).Infof("Reconciler sync states: could not find information (PID: %s) (Volume SpecName: %s) in desired state, clean up the mount points", volume.podName, volume.volumeSpecName) mountedVolume := operationexecutor.MountedVolume{ PodName: volume.podName, @@ -439,7 +439,7 @@ func (rc *reconciler) cleanupMounts(volume podVolume) { // to unmount both volume and device in the same routine. err := rc.operationExecutor.UnmountVolume(mountedVolume, rc.actualStateOfWorld, rc.kubeletPodsDir) if err != nil { - glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("volumeHandler.UnmountVolumeHandler for UnmountVolume failed"), err).Error()) + klog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("volumeHandler.UnmountVolumeHandler for UnmountVolume failed"), err).Error()) return } } @@ -557,13 +557,13 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, func (rc *reconciler) updateDevicePath(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) { node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(string(rc.nodeName), metav1.GetOptions{}) if fetchErr != nil { - glog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr) + klog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr) } else { for _, attachedVolume := range node.Status.VolumesAttached { if volume, exists := volumesNeedUpdate[attachedVolume.Name]; exists { volume.devicePath = attachedVolume.DevicePath volumesNeedUpdate[attachedVolume.Name] = volume - glog.V(4).Infof("Update devicePath from node status for volume (%q): %q", attachedVolume.Name, volume.devicePath) + klog.V(4).Infof("Update devicePath from node status for volume (%q): %q", attachedVolume.Name, volume.devicePath) } } } @@ -599,7 +599,7 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re //TODO: the devicePath might not be correct for some volume plugins: see issue #54108 volume.volumeName, volume.volumeSpec, "" /* nodeName */, volume.devicePath) if err != nil { - glog.Errorf("Could not add volume information to actual state of world: %v", err) + klog.Errorf("Could not add volume information to actual state of world: %v", err) continue } err = rc.actualStateOfWorld.MarkVolumeAsMounted( @@ -612,22 +612,22 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re volume.volumeGidValue, volume.volumeSpec) if err != nil { - glog.Errorf("Could not add pod to volume information to actual state of world: %v", err) + klog.Errorf("Could not add pod to volume information to actual state of world: %v", err) continue } - glog.V(4).Infof("Volume: %s (pod UID %s) is marked as mounted and added into the actual state", volume.volumeName, volume.podName) + klog.V(4).Infof("Volume: %s (pod UID %s) is marked as mounted and added into the actual state", volume.volumeName, volume.podName) if volume.attachablePlugin != nil { deviceMountPath, err := getDeviceMountPath(volume) if err != nil { - glog.Errorf("Could not find device mount path for volume %s", volume.volumeName) + klog.Errorf("Could not find device mount path for volume %s", volume.volumeName) continue } err = rc.actualStateOfWorld.MarkDeviceAsMounted(volume.volumeName, volume.devicePath, deviceMountPath) if err != nil { - glog.Errorf("Could not mark device is mounted to actual state of world: %v", err) + klog.Errorf("Could not mark device is mounted to actual state of world: %v", err) continue } - glog.V(4).Infof("Volume: %s (pod UID %s) is marked device as mounted and added into the actual state", volume.volumeName, volume.podName) + klog.V(4).Infof("Volume: %s (pod UID %s) is marked device as mounted and added into the actual state", volume.volumeName, volume.podName) } } return nil @@ -671,13 +671,13 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) { volumePluginPath := path.Join(volumesDir, pluginName) volumePluginDirs, err := utilfile.ReadDirNoStat(volumePluginPath) if err != nil { - glog.Errorf("Could not read volume plugin directory %q: %v", volumePluginPath, err) + klog.Errorf("Could not read volume plugin directory %q: %v", volumePluginPath, err) continue } unescapePluginName := utilstrings.UnescapeQualifiedNameForDisk(pluginName) for _, volumeName := range volumePluginDirs { mountPath := path.Join(volumePluginPath, volumeName) - glog.V(5).Infof("podName: %v, mount path from volume plugin directory: %v, ", podName, mountPath) + klog.V(5).Infof("podName: %v, mount path from volume plugin directory: %v, ", podName, mountPath) volumes = append(volumes, podVolume{ podName: volumetypes.UniquePodName(podName), volumeSpecName: volumeName, @@ -689,6 +689,6 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) { } } } - glog.V(4).Infof("Get volumes from pod directory %q %+v", podDir, volumes) + klog.V(4).Infof("Get volumes from pod directory %q %+v", podDir, volumes) return volumes, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go index 9e04b89b9b3f..561733d592c9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go @@ -22,7 +22,6 @@ import ( "strconv" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" @@ -30,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/pod" @@ -50,21 +50,21 @@ import ( const ( // reconcilerLoopSleepPeriod is the amount of time the reconciler loop waits // between successive executions - reconcilerLoopSleepPeriod time.Duration = 100 * time.Millisecond + reconcilerLoopSleepPeriod = 100 * time.Millisecond // reconcilerSyncStatesSleepPeriod is the amount of time the reconciler reconstruct process // waits between successive executions - reconcilerSyncStatesSleepPeriod time.Duration = 3 * time.Minute + reconcilerSyncStatesSleepPeriod = 3 * time.Minute // desiredStateOfWorldPopulatorLoopSleepPeriod is the amount of time the // DesiredStateOfWorldPopulator loop waits between successive executions - desiredStateOfWorldPopulatorLoopSleepPeriod time.Duration = 100 * time.Millisecond + desiredStateOfWorldPopulatorLoopSleepPeriod = 100 * time.Millisecond // desiredStateOfWorldPopulatorGetPodStatusRetryDuration is the amount of // time the DesiredStateOfWorldPopulator loop waits between successive pod // cleanup calls (to prevent calling containerruntime.GetPodStatus too // frequently). - desiredStateOfWorldPopulatorGetPodStatusRetryDuration time.Duration = 2 * time.Second + desiredStateOfWorldPopulatorGetPodStatusRetryDuration = 2 * time.Second // podAttachAndMountTimeout is the maximum amount of time the // WaitForAttachAndMount call will wait for all volumes in the specified pod @@ -75,11 +75,11 @@ const ( // request to the pod). // Value is slightly offset from 2 minutes to make timeouts due to this // constant recognizable. - podAttachAndMountTimeout time.Duration = 2*time.Minute + 3*time.Second + podAttachAndMountTimeout = 2*time.Minute + 3*time.Second // podAttachAndMountRetryInterval is the amount of time the GetVolumesForPod // call waits before retrying - podAttachAndMountRetryInterval time.Duration = 300 * time.Millisecond + podAttachAndMountRetryInterval = 300 * time.Millisecond // waitForAttachTimeout is the maximum amount of time a // operationexecutor.Mount call will wait for a volume to be attached. @@ -87,7 +87,7 @@ const ( // minutes to complete for some volume plugins in some cases. While this // operation is waiting it only blocks other operations on the same device, // other devices are not affected. - waitForAttachTimeout time.Duration = 10 * time.Minute + waitForAttachTimeout = 10 * time.Minute ) // VolumeManager runs a set of asynchronous loops that figure out which volumes @@ -243,15 +243,15 @@ func (vm *volumeManager) Run(sourcesReady config.SourcesReady, stopCh <-chan str defer runtime.HandleCrash() go vm.desiredStateOfWorldPopulator.Run(sourcesReady, stopCh) - glog.V(2).Infof("The desired_state_of_world populator starts") + klog.V(2).Infof("The desired_state_of_world populator starts") - glog.Infof("Starting Kubelet Volume Manager") + klog.Infof("Starting Kubelet Volume Manager") go vm.reconciler.Run(stopCh) metrics.Register(vm.actualStateOfWorld, vm.desiredStateOfWorld, vm.volumePluginMgr) <-stopCh - glog.Infof("Shutting down Kubelet Volume Manager") + klog.Infof("Shutting down Kubelet Volume Manager") } func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap { @@ -295,9 +295,9 @@ func (vm *volumeManager) GetVolumesInUse() []v1.UniqueVolumeName { // that volumes are marked in use as soon as the decision is made that the // volume *should* be attached to this node until it is safely unmounted. desiredVolumes := vm.desiredStateOfWorld.GetVolumesToMount() - mountedVolumes := vm.actualStateOfWorld.GetGloballyMountedVolumes() - volumesToReportInUse := make([]v1.UniqueVolumeName, 0, len(desiredVolumes)+len(mountedVolumes)) - desiredVolumesMap := make(map[v1.UniqueVolumeName]bool, len(desiredVolumes)+len(mountedVolumes)) + allAttachedVolumes := vm.actualStateOfWorld.GetAttachedVolumes() + volumesToReportInUse := make([]v1.UniqueVolumeName, 0, len(desiredVolumes)+len(allAttachedVolumes)) + desiredVolumesMap := make(map[v1.UniqueVolumeName]bool, len(desiredVolumes)+len(allAttachedVolumes)) for _, volume := range desiredVolumes { if volume.PluginIsAttachable { @@ -308,7 +308,7 @@ func (vm *volumeManager) GetVolumesInUse() []v1.UniqueVolumeName { } } - for _, volume := range mountedVolumes { + for _, volume := range allAttachedVolumes { if volume.PluginIsAttachable { if _, exists := desiredVolumesMap[volume.VolumeName]; !exists { volumesToReportInUse = append(volumesToReportInUse, volume.VolumeName) @@ -347,7 +347,7 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error { return nil } - glog.V(3).Infof("Waiting for volumes to attach and mount for pod %q", format.Pod(pod)) + klog.V(3).Infof("Waiting for volumes to attach and mount for pod %q", format.Pod(pod)) uniquePodName := util.GetUniquePodName(pod) // Some pods expect to have Setup called over and over again to update. @@ -380,7 +380,7 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error { unattachedVolumes) } - glog.V(3).Infof("All volumes are attached and mounted for pod %q", format.Pod(pod)) + klog.V(3).Infof("All volumes are attached and mounted for pod %q", format.Pod(pod)) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/BUILD index 731b86303c2f..25afdd4cfc04 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/BUILD @@ -15,10 +15,10 @@ go_library( "@io_bazel_rules_go//go/platform:windows": [ "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/github.com/JeffAshton/win_pdh:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", "//vendor/golang.org/x/sys/windows:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "//conditions:default": [], }), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats.go index 9777d88745a8..51fb15ae93da 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats.go @@ -26,10 +26,10 @@ import ( "time" "unsafe" - "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" "golang.org/x/sys/windows" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" ) // MemoryStatusEx is the same as Windows structure MEMORYSTATUSEX @@ -141,19 +141,19 @@ func (p *perfCounterNodeStatsClient) getNodeInfo() nodeInfo { func (p *perfCounterNodeStatsClient) collectMetricsData(cpuCounter, memWorkingSetCounter, memCommittedBytesCounter *perfCounter) { cpuValue, err := cpuCounter.getData() if err != nil { - glog.Errorf("Unable to get cpu perf counter data; err: %v", err) + klog.Errorf("Unable to get cpu perf counter data; err: %v", err) return } memWorkingSetValue, err := memWorkingSetCounter.getData() if err != nil { - glog.Errorf("Unable to get memWorkingSet perf counter data; err: %v", err) + klog.Errorf("Unable to get memWorkingSet perf counter data; err: %v", err) return } memCommittedBytesValue, err := memCommittedBytesCounter.getData() if err != nil { - glog.Errorf("Unable to get memCommittedBytes perf counter data; err: %v", err) + klog.Errorf("Unable to get memCommittedBytes perf counter data; err: %v", err) return } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/BUILD index 4460f8736889..0eea6adcfe60 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/BUILD @@ -33,7 +33,8 @@ go_library( "//pkg/util/node:go_default_library", "//pkg/util/oom:go_default_library", "//pkg/util/sysctl:go_default_library", - "//pkg/volume/empty_dir:go_default_library", + "//pkg/volume/emptydir:go_default_library", + "//pkg/volume/projected:go_default_library", "//pkg/volume/secret:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -48,7 +49,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//test/utils:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/controller.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/controller.go index aa7482ea3bef..3ae38b2f5ef7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/controller.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/controller.go @@ -33,7 +33,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/controller" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -99,7 +99,7 @@ func NewKubemarkController(externalClient kubeclient.Interface, externalInformer nodesToDelete: make(map[string]bool), nodesToDeleteLock: sync.Mutex{}, }, - rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())), + rand: rand.New(rand.NewSource(time.Now().UnixNano())), createNodeQueue: make(chan string, 1000), nodeGroupQueueSize: make(map[string]int), nodeGroupQueueSizeLock: sync.Mutex{}, @@ -125,7 +125,7 @@ func (kubemarkController *KubemarkController) WaitForCacheSync(stopCh chan struc func (kubemarkController *KubemarkController) Run(stopCh chan struct{}) { nodeTemplate, err := kubemarkController.getNodeTemplate() if err != nil { - glog.Fatalf("failed to get node template: %s", err) + klog.Fatalf("failed to get node template: %s", err) } kubemarkController.nodeTemplate = nodeTemplate @@ -239,7 +239,7 @@ func (kubemarkController *KubemarkController) addNodeToNodeGroup(nodeGroup strin func (kubemarkController *KubemarkController) RemoveNodeFromNodeGroup(nodeGroup string, node string) error { pod := kubemarkController.getPodByName(node) if pod == nil { - glog.Warningf("Can't delete node %s from nodegroup %s. Node does not exist.", node, nodeGroup) + klog.Warningf("Can't delete node %s from nodegroup %s. Node does not exist.", node, nodeGroup) return nil } if pod.ObjectMeta.Labels[nodeGroupLabel] != nodeGroup { @@ -252,7 +252,7 @@ func (kubemarkController *KubemarkController) RemoveNodeFromNodeGroup(nodeGroup pod.ObjectMeta.Labels["name"], &metav1.DeleteOptions{PropagationPolicy: &policy}) if err == nil { - glog.Infof("marking node %s for deletion", node) + klog.Infof("marking node %s for deletion", node) // Mark node for deletion from kubemark cluster. // Once it becomes unready after replication controller // deletion has been noticed, we will delete it explicitly. @@ -340,7 +340,7 @@ func (kubemarkController *KubemarkController) runNodeCreation(stop <-chan struct kubemarkController.nodeGroupQueueSizeLock.Lock() err := kubemarkController.addNodeToNodeGroup(nodeGroup) if err != nil { - glog.Errorf("failed to add node to node group %s: %v", nodeGroup, err) + klog.Errorf("failed to add node to node group %s: %v", nodeGroup, err) } else { kubemarkController.nodeGroupQueueSize[nodeGroup]-- } @@ -376,7 +376,7 @@ func (kubemarkCluster *kubemarkCluster) removeUnneededNodes(oldObj interface{}, if kubemarkCluster.nodesToDelete[node.Name] { kubemarkCluster.nodesToDelete[node.Name] = false if err := kubemarkCluster.client.CoreV1().Nodes().Delete(node.Name, &metav1.DeleteOptions{}); err != nil { - glog.Errorf("failed to delete node %s from kubemark cluster, err: %v", node.Name, err) + klog.Errorf("failed to delete node %s from kubemark cluster, err: %v", node.Name, err) } } return diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_kubelet.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_kubelet.go index bd83d93d2590..875d84c30033 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_kubelet.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_kubelet.go @@ -32,11 +32,12 @@ import ( kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/oom" - "k8s.io/kubernetes/pkg/volume/empty_dir" + "k8s.io/kubernetes/pkg/volume/emptydir" + "k8s.io/kubernetes/pkg/volume/projected" "k8s.io/kubernetes/pkg/volume/secret" "k8s.io/kubernetes/test/utils" - "github.com/golang/glog" + "k8s.io/klog" ) type HollowKubelet struct { @@ -62,8 +63,9 @@ func NewHollowKubelet( // ----------------- // Injected objects // ----------------- - volumePlugins := empty_dir.ProbeVolumePlugins() + volumePlugins := emptydir.ProbeVolumePlugins() volumePlugins = append(volumePlugins, secret.ProbeVolumePlugins()...) + volumePlugins = append(volumePlugins, projected.ProbeVolumePlugins()...) d := &kubelet.Dependencies{ KubeClient: client, HeartbeatClient: client, @@ -91,7 +93,7 @@ func (hk *HollowKubelet) Run() { KubeletFlags: *hk.KubeletFlags, KubeletConfiguration: *hk.KubeletConfiguration, }, hk.KubeletDeps, false); err != nil { - glog.Fatalf("Failed to run HollowKubelet: %v. Exiting.", err) + klog.Fatalf("Failed to run HollowKubelet: %v. Exiting.", err) } select {} } @@ -107,7 +109,7 @@ func GetHollowKubeletConfig( testRootDir := utils.MakeTempDirOrDie("hollow-kubelet.", "") podFilePath := utils.MakeTempDirOrDie("static-pods", testRootDir) - glog.Infof("Using %s as root dir for hollow-kubelet", testRootDir) + klog.Infof("Using %s as root dir for hollow-kubelet", testRootDir) // Flags struct f := options.NewKubeletFlags() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go index 8af54ae17639..5e4a7ec8897f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubemark/hollow_proxy.go @@ -18,11 +18,9 @@ package kubemark import ( "fmt" - "net" "time" "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" @@ -37,7 +35,7 @@ import ( utilexec "k8s.io/utils/exec" utilpointer "k8s.io/utils/pointer" - "github.com/golang/glog" + "k8s.io/klog" ) type HollowProxy struct { @@ -90,7 +88,7 @@ func NewHollowProxyOrDie( 0, "10.0.0.0/8", nodeName, - getNodeIP(client, nodeName), + utilnode.GetNodeIP(client, nodeName), recorder, nil, []string{}, @@ -135,21 +133,6 @@ func NewHollowProxyOrDie( func (hp *HollowProxy) Run() { if err := hp.ProxyServer.Run(); err != nil { - glog.Fatalf("Error while running proxy: %v\n", err) + klog.Fatalf("Error while running proxy: %v\n", err) } } - -func getNodeIP(client clientset.Interface, hostname string) net.IP { - var nodeIP net.IP - node, err := client.CoreV1().Nodes().Get(hostname, metav1.GetOptions{}) - if err != nil { - glog.Warningf("Failed to retrieve node info: %v", err) - return nil - } - nodeIP, err = utilnode.GetNodeHostIP(node) - if err != nil { - glog.Warningf("Failed to retrieve node IP: %v", err) - return nil - } - return nodeIP -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go index 19207a1012b9..23faba1d3ec5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go @@ -23,9 +23,10 @@ const ( // KubeletPort is the default port for the kubelet server on each host machine. // May be overridden by a flag at startup. KubeletPort = 10250 - // SchedulerPort is the default port for the scheduler status server. + // InsecureSchedulerPort is the default port for the scheduler status server. // May be overridden by a flag at startup. - SchedulerPort = 10251 + // Deprecated: use the secure KubeSchedulerPort instead. + InsecureSchedulerPort = 10251 // InsecureKubeControllerManagerPort is the default port for the controller manager status server. // May be overridden by a flag at startup. // Deprecated: use the secure KubeControllerManagerPort instead. @@ -49,4 +50,8 @@ const ( // CloudControllerManagerPort is the default port for the cloud controller manager server. // This value may be overridden by a flag at startup. CloudControllerManagerPort = 10258 + + // KubeSchedulerPort is the default port for the scheduler status server. + // May be overridden by a flag at startup. + KubeSchedulerPort = 10259 ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/.import-restrictions b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/.import-restrictions deleted file mode 100644 index 5ec789450231..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/.import-restrictions +++ /dev/null @@ -1,10 +0,0 @@ -{ - "Rules": [ - { - "SelectorRegexp": "k8s[.]io/kubernetes/pkg/(api$|apis/)", - "ForbiddenPrefixes": [ - "k8s.io/kubernetes/pkg/printers" - ] - } - ] -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/BUILD deleted file mode 100644 index 4d4ab061e8e6..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/BUILD +++ /dev/null @@ -1,70 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "customcolumn.go", - "customcolumn_flags.go", - "humanreadable.go", - "interface.go", - "tabwriter.go", - ], - importpath = "k8s.io/kubernetes/pkg/printers", - deps = [ - "//pkg/kubectl/scheme:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", - "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", - "//staging/src/k8s.io/client-go/util/jsonpath:go_default_library", - "//vendor/github.com/spf13/cobra:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/printers/internalversion:all-srcs", - "//pkg/printers/storage:all-srcs", - ], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = [ - "customcolumn_flags_test.go", - "customcolumn_test.go", - "humanreadable_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//pkg/api/legacyscheme:go_default_library", - "//pkg/apis/core:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", - ], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/OWNERS deleted file mode 100644 index a8ab5736b889..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -approvers: -- smarterclayton -- sig-cli-maintainers -reviewers: -- smarterclayton -- sig-cli diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/customcolumn.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/customcolumn.go deleted file mode 100644 index b0ab6bc239cb..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/customcolumn.go +++ /dev/null @@ -1,242 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "bufio" - "bytes" - "fmt" - "io" - "reflect" - "regexp" - "strings" - "text/tabwriter" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/cli-runtime/pkg/genericclioptions/printers" - "k8s.io/client-go/util/jsonpath" -) - -var jsonRegexp = regexp.MustCompile("^\\{\\.?([^{}]+)\\}$|^\\.?([^{}]+)$") - -// RelaxedJSONPathExpression attempts to be flexible with JSONPath expressions, it accepts: -// * metadata.name (no leading '.' or curly braces '{...}' -// * {metadata.name} (no leading '.') -// * .metadata.name (no curly braces '{...}') -// * {.metadata.name} (complete expression) -// And transforms them all into a valid jsonpath expression: -// {.metadata.name} -func RelaxedJSONPathExpression(pathExpression string) (string, error) { - if len(pathExpression) == 0 { - return pathExpression, nil - } - submatches := jsonRegexp.FindStringSubmatch(pathExpression) - if submatches == nil { - return "", fmt.Errorf("unexpected path string, expected a 'name1.name2' or '.name1.name2' or '{name1.name2}' or '{.name1.name2}'") - } - if len(submatches) != 3 { - return "", fmt.Errorf("unexpected submatch list: %v", submatches) - } - var fieldSpec string - if len(submatches[1]) != 0 { - fieldSpec = submatches[1] - } else { - fieldSpec = submatches[2] - } - return fmt.Sprintf("{.%s}", fieldSpec), nil -} - -// NewCustomColumnsPrinterFromSpec creates a custom columns printer from a comma separated list of
    : pairs. -// e.g. NAME:metadata.name,API_VERSION:apiVersion creates a printer that prints: -// -// NAME API_VERSION -// foo bar -func NewCustomColumnsPrinterFromSpec(spec string, decoder runtime.Decoder, noHeaders bool) (*CustomColumnsPrinter, error) { - if len(spec) == 0 { - return nil, fmt.Errorf("custom-columns format specified but no custom columns given") - } - parts := strings.Split(spec, ",") - columns := make([]Column, len(parts)) - for ix := range parts { - colSpec := strings.Split(parts[ix], ":") - if len(colSpec) != 2 { - return nil, fmt.Errorf("unexpected custom-columns spec: %s, expected
    :", parts[ix]) - } - spec, err := RelaxedJSONPathExpression(colSpec[1]) - if err != nil { - return nil, err - } - columns[ix] = Column{Header: colSpec[0], FieldSpec: spec} - } - return &CustomColumnsPrinter{Columns: columns, Decoder: decoder, NoHeaders: noHeaders}, nil -} - -func splitOnWhitespace(line string) []string { - lineScanner := bufio.NewScanner(bytes.NewBufferString(line)) - lineScanner.Split(bufio.ScanWords) - result := []string{} - for lineScanner.Scan() { - result = append(result, lineScanner.Text()) - } - return result -} - -// NewCustomColumnsPrinterFromTemplate creates a custom columns printer from a template stream. The template is expected -// to consist of two lines, whitespace separated. The first line is the header line, the second line is the jsonpath field spec -// For example, the template below: -// NAME API_VERSION -// {metadata.name} {apiVersion} -func NewCustomColumnsPrinterFromTemplate(templateReader io.Reader, decoder runtime.Decoder) (*CustomColumnsPrinter, error) { - scanner := bufio.NewScanner(templateReader) - if !scanner.Scan() { - return nil, fmt.Errorf("invalid template, missing header line. Expected format is one line of space separated headers, one line of space separated column specs.") - } - headers := splitOnWhitespace(scanner.Text()) - - if !scanner.Scan() { - return nil, fmt.Errorf("invalid template, missing spec line. Expected format is one line of space separated headers, one line of space separated column specs.") - } - specs := splitOnWhitespace(scanner.Text()) - - if len(headers) != len(specs) { - return nil, fmt.Errorf("number of headers (%d) and field specifications (%d) don't match", len(headers), len(specs)) - } - - columns := make([]Column, len(headers)) - for ix := range headers { - spec, err := RelaxedJSONPathExpression(specs[ix]) - if err != nil { - return nil, err - } - columns[ix] = Column{ - Header: headers[ix], - FieldSpec: spec, - } - } - return &CustomColumnsPrinter{Columns: columns, Decoder: decoder, NoHeaders: false}, nil -} - -// Column represents a user specified column -type Column struct { - // The header to print above the column, general style is ALL_CAPS - Header string - // The pointer to the field in the object to print in JSONPath form - // e.g. {.ObjectMeta.Name}, see pkg/util/jsonpath for more details. - FieldSpec string -} - -// CustomColumnPrinter is a printer that knows how to print arbitrary columns -// of data from templates specified in the `Columns` array -type CustomColumnsPrinter struct { - Columns []Column - Decoder runtime.Decoder - NoHeaders bool - // lastType records type of resource printed last so that we don't repeat - // header while printing same type of resources. - lastType reflect.Type -} - -func (s *CustomColumnsPrinter) PrintObj(obj runtime.Object, out io.Writer) error { - // we use reflect.Indirect here in order to obtain the actual value from a pointer. - // we need an actual value in order to retrieve the package path for an object. - // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. - if printers.InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(printers.InternalObjectPrinterErr) - } - - if w, found := out.(*tabwriter.Writer); !found { - w = GetNewTabWriter(out) - out = w - defer w.Flush() - } - - t := reflect.TypeOf(obj) - if !s.NoHeaders && t != s.lastType { - headers := make([]string, len(s.Columns)) - for ix := range s.Columns { - headers[ix] = s.Columns[ix].Header - } - fmt.Fprintln(out, strings.Join(headers, "\t")) - s.lastType = t - } - parsers := make([]*jsonpath.JSONPath, len(s.Columns)) - for ix := range s.Columns { - parsers[ix] = jsonpath.New(fmt.Sprintf("column%d", ix)).AllowMissingKeys(true) - if err := parsers[ix].Parse(s.Columns[ix].FieldSpec); err != nil { - return err - } - } - - if meta.IsListType(obj) { - objs, err := meta.ExtractList(obj) - if err != nil { - return err - } - for ix := range objs { - if err := s.printOneObject(objs[ix], parsers, out); err != nil { - return err - } - } - } else { - if err := s.printOneObject(obj, parsers, out); err != nil { - return err - } - } - return nil -} - -func (s *CustomColumnsPrinter) printOneObject(obj runtime.Object, parsers []*jsonpath.JSONPath, out io.Writer) error { - columns := make([]string, len(parsers)) - switch u := obj.(type) { - case *runtime.Unknown: - if len(u.Raw) > 0 { - var err error - if obj, err = runtime.Decode(s.Decoder, u.Raw); err != nil { - return fmt.Errorf("can't decode object for printing: %v (%s)", err, u.Raw) - } - } - } - - for ix := range parsers { - parser := parsers[ix] - - var values [][]reflect.Value - var err error - if unstructured, ok := obj.(runtime.Unstructured); ok { - values, err = parser.FindResults(unstructured.UnstructuredContent()) - } else { - values, err = parser.FindResults(reflect.ValueOf(obj).Elem().Interface()) - } - - if err != nil { - return err - } - valueStrings := []string{} - if len(values) == 0 || len(values[0]) == 0 { - valueStrings = append(valueStrings, "") - } - for arrIx := range values { - for valIx := range values[arrIx] { - valueStrings = append(valueStrings, fmt.Sprintf("%v", values[arrIx][valIx].Interface())) - } - } - columns[ix] = strings.Join(valueStrings, ",") - } - fmt.Fprintln(out, strings.Join(columns, "\t")) - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/customcolumn_flags.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/customcolumn_flags.go deleted file mode 100644 index 599d91c2728f..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/customcolumn_flags.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "fmt" - "os" - "strings" - - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/scheme" -) - -var columnsFormats = map[string]bool{ - "custom-columns-file": true, - "custom-columns": true, -} - -// CustomColumnsPrintFlags provides default flags necessary for printing -// custom resource columns from an inline-template or file. -type CustomColumnsPrintFlags struct { - NoHeaders bool - TemplateArgument string -} - -func (f *CustomColumnsPrintFlags) AllowedFormats() []string { - formats := make([]string, 0, len(columnsFormats)) - for format := range columnsFormats { - formats = append(formats, format) - } - return formats -} - -// ToPrinter receives an templateFormat and returns a printer capable of -// handling custom-column printing. -// Returns false if the specified templateFormat does not match a supported format. -// Supported format types can be found in pkg/printers/printers.go -func (f *CustomColumnsPrintFlags) ToPrinter(templateFormat string) (ResourcePrinter, error) { - if len(templateFormat) == 0 { - return nil, genericclioptions.NoCompatiblePrinterError{} - } - - templateValue := "" - - if len(f.TemplateArgument) == 0 { - for format := range columnsFormats { - format = format + "=" - if strings.HasPrefix(templateFormat, format) { - templateValue = templateFormat[len(format):] - templateFormat = format[:len(format)-1] - break - } - } - } else { - templateValue = f.TemplateArgument - } - - if _, supportedFormat := columnsFormats[templateFormat]; !supportedFormat { - return nil, genericclioptions.NoCompatiblePrinterError{OutputFormat: &templateFormat, AllowedFormats: f.AllowedFormats()} - } - - if len(templateValue) == 0 { - return nil, fmt.Errorf("custom-columns format specified but no custom columns given") - } - - decoder := scheme.Codecs.UniversalDecoder() - - if templateFormat == "custom-columns-file" { - file, err := os.Open(templateValue) - if err != nil { - return nil, fmt.Errorf("error reading template %s, %v\n", templateValue, err) - } - defer file.Close() - p, err := NewCustomColumnsPrinterFromTemplate(file, decoder) - return p, err - } - - return NewCustomColumnsPrinterFromSpec(templateValue, decoder, f.NoHeaders) -} - -// AddFlags receives a *cobra.Command reference and binds -// flags related to custom-columns printing -func (f *CustomColumnsPrintFlags) AddFlags(c *cobra.Command) {} - -// NewCustomColumnsPrintFlags returns flags associated with -// custom-column printing, with default values set. -// NoHeaders and TemplateArgument should be set by callers. -func NewCustomColumnsPrintFlags() *CustomColumnsPrintFlags { - return &CustomColumnsPrintFlags{ - NoHeaders: false, - TemplateArgument: "", - } -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/humanreadable.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/humanreadable.go deleted file mode 100644 index bf4a761a4fb3..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/humanreadable.go +++ /dev/null @@ -1,872 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" - "text/tabwriter" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" -) - -type TablePrinter interface { - PrintTable(obj runtime.Object, options PrintOptions) (*metav1beta1.Table, error) -} - -type PrintHandler interface { - Handler(columns, columnsWithWide []string, printFunc interface{}) error - TableHandler(columns []metav1beta1.TableColumnDefinition, printFunc interface{}) error - DefaultTableHandler(columns []metav1beta1.TableColumnDefinition, printFunc interface{}) error -} - -var withNamespacePrefixColumns = []string{"NAMESPACE"} // TODO(erictune): print cluster name too. - -type handlerEntry struct { - columnDefinitions []metav1beta1.TableColumnDefinition - printRows bool - printFunc reflect.Value - args []reflect.Value -} - -// HumanReadablePrinter is an implementation of ResourcePrinter which attempts to provide -// more elegant output. It is not threadsafe, but you may call PrintObj repeatedly; headers -// will only be printed if the object type changes. This makes it useful for printing items -// received from watches. -type HumanReadablePrinter struct { - handlerMap map[reflect.Type]*handlerEntry - defaultHandler *handlerEntry - options PrintOptions - lastType interface{} - skipTabWriter bool - encoder runtime.Encoder - decoder runtime.Decoder -} - -var _ PrintHandler = &HumanReadablePrinter{} - -// NewHumanReadablePrinter creates a HumanReadablePrinter. -// If encoder and decoder are provided, an attempt to convert unstructured types to internal types is made. -func NewHumanReadablePrinter(decoder runtime.Decoder, options PrintOptions) *HumanReadablePrinter { - printer := &HumanReadablePrinter{ - handlerMap: make(map[reflect.Type]*handlerEntry), - options: options, - decoder: decoder, - } - return printer -} - -// NewTablePrinter creates a HumanReadablePrinter suitable for calling PrintTable(). -func NewTablePrinter() *HumanReadablePrinter { - return &HumanReadablePrinter{ - handlerMap: make(map[reflect.Type]*handlerEntry), - } -} - -// AddTabWriter sets whether the PrintObj function will format with tabwriter (true -// by default). -func (a *HumanReadablePrinter) AddTabWriter(t bool) *HumanReadablePrinter { - a.skipTabWriter = !t - return a -} - -func (a *HumanReadablePrinter) With(fns ...func(PrintHandler)) *HumanReadablePrinter { - for _, fn := range fns { - fn(a) - } - return a -} - -// EnsurePrintHeaders sets the HumanReadablePrinter option "NoHeaders" to false -// and removes the .lastType that was printed, which forces headers to be -// printed in cases where multiple lists of the same resource are printed -// consecutively, but are separated by non-printer related information. -func (h *HumanReadablePrinter) EnsurePrintHeaders() { - h.options.NoHeaders = false - h.lastType = nil -} - -// Handler adds a print handler with a given set of columns to HumanReadablePrinter instance. -// See ValidatePrintHandlerFunc for required method signature. -func (h *HumanReadablePrinter) Handler(columns, columnsWithWide []string, printFunc interface{}) error { - var columnDefinitions []metav1beta1.TableColumnDefinition - for i, column := range columns { - format := "" - if i == 0 && strings.EqualFold(column, "name") { - format = "name" - } - - columnDefinitions = append(columnDefinitions, metav1beta1.TableColumnDefinition{ - Name: column, - Description: column, - Type: "string", - Format: format, - }) - } - for _, column := range columnsWithWide { - columnDefinitions = append(columnDefinitions, metav1beta1.TableColumnDefinition{ - Name: column, - Description: column, - Type: "string", - Priority: 1, - }) - } - - printFuncValue := reflect.ValueOf(printFunc) - if err := ValidatePrintHandlerFunc(printFuncValue); err != nil { - utilruntime.HandleError(fmt.Errorf("unable to register print function: %v", err)) - return err - } - - entry := &handlerEntry{ - columnDefinitions: columnDefinitions, - printFunc: printFuncValue, - } - - objType := printFuncValue.Type().In(0) - if _, ok := h.handlerMap[objType]; ok { - err := fmt.Errorf("registered duplicate printer for %v", objType) - utilruntime.HandleError(err) - return err - } - h.handlerMap[objType] = entry - return nil -} - -// TableHandler adds a print handler with a given set of columns to HumanReadablePrinter instance. -// See ValidateRowPrintHandlerFunc for required method signature. -func (h *HumanReadablePrinter) TableHandler(columnDefinitions []metav1beta1.TableColumnDefinition, printFunc interface{}) error { - printFuncValue := reflect.ValueOf(printFunc) - if err := ValidateRowPrintHandlerFunc(printFuncValue); err != nil { - utilruntime.HandleError(fmt.Errorf("unable to register print function: %v", err)) - return err - } - entry := &handlerEntry{ - columnDefinitions: columnDefinitions, - printRows: true, - printFunc: printFuncValue, - } - - objType := printFuncValue.Type().In(0) - if _, ok := h.handlerMap[objType]; ok { - err := fmt.Errorf("registered duplicate printer for %v", objType) - utilruntime.HandleError(err) - return err - } - h.handlerMap[objType] = entry - return nil -} - -// DefaultTableHandler registers a set of columns and a print func that is given a chance to process -// any object without an explicit handler. Only the most recently set print handler is used. -// See ValidateRowPrintHandlerFunc for required method signature. -func (h *HumanReadablePrinter) DefaultTableHandler(columnDefinitions []metav1beta1.TableColumnDefinition, printFunc interface{}) error { - printFuncValue := reflect.ValueOf(printFunc) - if err := ValidateRowPrintHandlerFunc(printFuncValue); err != nil { - utilruntime.HandleError(fmt.Errorf("unable to register print function: %v", err)) - return err - } - entry := &handlerEntry{ - columnDefinitions: columnDefinitions, - printRows: true, - printFunc: printFuncValue, - } - - h.defaultHandler = entry - return nil -} - -// ValidateRowPrintHandlerFunc validates print handler signature. -// printFunc is the function that will be called to print an object. -// It must be of the following type: -// func printFunc(object ObjectType, options PrintOptions) ([]metav1beta1.TableRow, error) -// where ObjectType is the type of the object that will be printed, and the first -// return value is an array of rows, with each row containing a number of cells that -// match the number of columns defined for that printer function. -func ValidateRowPrintHandlerFunc(printFunc reflect.Value) error { - if printFunc.Kind() != reflect.Func { - return fmt.Errorf("invalid print handler. %#v is not a function", printFunc) - } - funcType := printFunc.Type() - if funcType.NumIn() != 2 || funcType.NumOut() != 2 { - return fmt.Errorf("invalid print handler." + - "Must accept 2 parameters and return 2 value.") - } - if funcType.In(1) != reflect.TypeOf((*PrintOptions)(nil)).Elem() || - funcType.Out(0) != reflect.TypeOf((*[]metav1beta1.TableRow)(nil)).Elem() || - funcType.Out(1) != reflect.TypeOf((*error)(nil)).Elem() { - return fmt.Errorf("invalid print handler. The expected signature is: "+ - "func handler(obj %v, options PrintOptions) ([]metav1beta1.TableRow, error)", funcType.In(0)) - } - return nil -} - -// ValidatePrintHandlerFunc validates print handler signature. -// printFunc is the function that will be called to print an object. -// It must be of the following type: -// func printFunc(object ObjectType, w io.Writer, options PrintOptions) error -// where ObjectType is the type of the object that will be printed. -// DEPRECATED: will be replaced with ValidateRowPrintHandlerFunc -func ValidatePrintHandlerFunc(printFunc reflect.Value) error { - if printFunc.Kind() != reflect.Func { - return fmt.Errorf("invalid print handler. %#v is not a function", printFunc) - } - funcType := printFunc.Type() - if funcType.NumIn() != 3 || funcType.NumOut() != 1 { - return fmt.Errorf("invalid print handler." + - "Must accept 3 parameters and return 1 value.") - } - if funcType.In(1) != reflect.TypeOf((*io.Writer)(nil)).Elem() || - funcType.In(2) != reflect.TypeOf((*PrintOptions)(nil)).Elem() || - funcType.Out(0) != reflect.TypeOf((*error)(nil)).Elem() { - return fmt.Errorf("invalid print handler. The expected signature is: "+ - "func handler(obj %v, w io.Writer, options PrintOptions) error", funcType.In(0)) - } - return nil -} - -func (h *HumanReadablePrinter) HandledResources() []string { - keys := make([]string, 0) - - for k := range h.handlerMap { - // k.String looks like "*api.PodList" and we want just "pod" - api := strings.Split(k.String(), ".") - resource := api[len(api)-1] - if strings.HasSuffix(resource, "List") { - continue - } - resource = strings.ToLower(resource) - keys = append(keys, resource) - } - return keys -} - -func (h *HumanReadablePrinter) unknown(data []byte, w io.Writer) error { - _, err := fmt.Fprintf(w, "Unknown object: %s", string(data)) - return err -} - -func printHeader(columnNames []string, w io.Writer) error { - if _, err := fmt.Fprintf(w, "%s\n", strings.Join(columnNames, "\t")); err != nil { - return err - } - return nil -} - -// PrintObj prints the obj in a human-friendly format according to the type of the obj. -func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) error { - w, found := output.(*tabwriter.Writer) - if !found && !h.skipTabWriter { - w = GetNewTabWriter(output) - output = w - defer w.Flush() - } - - // display tables following the rules of options - if table, ok := obj.(*metav1beta1.Table); ok { - if err := DecorateTable(table, h.options); err != nil { - return err - } - return PrintTable(table, output, h.options) - } - - // check if the object is unstructured. If so, let's attempt to convert it to a type we can understand before - // trying to print, since the printers are keyed by type. This is extremely expensive. - if h.decoder != nil { - obj, _ = decodeUnknownObject(obj, h.decoder) - } - - // print with a registered handler - t := reflect.TypeOf(obj) - if handler := h.handlerMap[t]; handler != nil { - includeHeaders := h.lastType != t && !h.options.NoHeaders - - if h.lastType != nil && h.lastType != t && !h.options.NoHeaders { - fmt.Fprintln(output) - } - - if err := printRowsForHandlerEntry(output, handler, obj, h.options, includeHeaders); err != nil { - return err - } - h.lastType = t - return nil - } - - // print with the default handler if set, and use the columns from the last time - if h.defaultHandler != nil { - includeHeaders := h.lastType != h.defaultHandler && !h.options.NoHeaders - - if h.lastType != nil && h.lastType != h.defaultHandler && !h.options.NoHeaders { - fmt.Fprintln(output) - } - - if err := printRowsForHandlerEntry(output, h.defaultHandler, obj, h.options, includeHeaders); err != nil { - return err - } - h.lastType = h.defaultHandler - return nil - } - - // we failed all reasonable printing efforts, report failure - return fmt.Errorf("error: unknown type %#v", obj) -} - -func hasCondition(conditions []metav1beta1.TableRowCondition, t metav1beta1.RowConditionType) bool { - for _, condition := range conditions { - if condition.Type == t { - return condition.Status == metav1beta1.ConditionTrue - } - } - return false -} - -// PrintTable prints a table to the provided output respecting the filtering rules for options -// for wide columns and filtered rows. It filters out rows that are Completed. You should call -// DecorateTable if you receive a table from a remote server before calling PrintTable. -func PrintTable(table *metav1beta1.Table, output io.Writer, options PrintOptions) error { - if !options.NoHeaders { - // avoid printing headers if we have no rows to display - if len(table.Rows) == 0 { - return nil - } - - first := true - for _, column := range table.ColumnDefinitions { - if !options.Wide && column.Priority != 0 { - continue - } - if first { - first = false - } else { - fmt.Fprint(output, "\t") - } - fmt.Fprint(output, strings.ToUpper(column.Name)) - } - fmt.Fprintln(output) - } - for _, row := range table.Rows { - first := true - for i, cell := range row.Cells { - if i >= len(table.ColumnDefinitions) { - // https://issue.k8s.io/66379 - // don't panic in case of bad output from the server, with more cells than column definitions - break - } - column := table.ColumnDefinitions[i] - if !options.Wide && column.Priority != 0 { - continue - } - if first { - first = false - } else { - fmt.Fprint(output, "\t") - } - if cell != nil { - fmt.Fprint(output, cell) - } - } - fmt.Fprintln(output) - } - return nil -} - -// DecorateTable takes a table and attempts to add label columns and the -// namespace column. It will fill empty columns with nil (if the object -// does not expose metadata). It returns an error if the table cannot -// be decorated. -func DecorateTable(table *metav1beta1.Table, options PrintOptions) error { - width := len(table.ColumnDefinitions) + len(options.ColumnLabels) - if options.WithNamespace { - width++ - } - if options.ShowLabels { - width++ - } - - columns := table.ColumnDefinitions - - nameColumn := -1 - if options.WithKind && !options.Kind.Empty() { - for i := range columns { - if columns[i].Format == "name" && columns[i].Type == "string" { - nameColumn = i - break - } - } - } - - if width != len(table.ColumnDefinitions) { - columns = make([]metav1beta1.TableColumnDefinition, 0, width) - if options.WithNamespace { - columns = append(columns, metav1beta1.TableColumnDefinition{ - Name: "Namespace", - Type: "string", - }) - } - columns = append(columns, table.ColumnDefinitions...) - for _, label := range formatLabelHeaders(options.ColumnLabels) { - columns = append(columns, metav1beta1.TableColumnDefinition{ - Name: label, - Type: "string", - }) - } - if options.ShowLabels { - columns = append(columns, metav1beta1.TableColumnDefinition{ - Name: "Labels", - Type: "string", - }) - } - } - - rows := table.Rows - - includeLabels := len(options.ColumnLabels) > 0 || options.ShowLabels - if includeLabels || options.WithNamespace || nameColumn != -1 { - for i := range rows { - row := rows[i] - - if nameColumn != -1 { - row.Cells[nameColumn] = fmt.Sprintf("%s/%s", strings.ToLower(options.Kind.String()), row.Cells[nameColumn]) - } - - var m metav1.Object - if obj := row.Object.Object; obj != nil { - if acc, err := meta.Accessor(obj); err == nil { - m = acc - } - } - // if we can't get an accessor, fill out the appropriate columns with empty spaces - if m == nil { - if options.WithNamespace { - r := make([]interface{}, 1, width) - row.Cells = append(r, row.Cells...) - } - for j := 0; j < width-len(row.Cells); j++ { - row.Cells = append(row.Cells, nil) - } - rows[i] = row - continue - } - - if options.WithNamespace { - r := make([]interface{}, 1, width) - r[0] = m.GetNamespace() - row.Cells = append(r, row.Cells...) - } - if includeLabels { - row.Cells = appendLabelCells(row.Cells, m.GetLabels(), options) - } - rows[i] = row - } - } - - table.ColumnDefinitions = columns - table.Rows = rows - return nil -} - -// PrintTable returns a table for the provided object, using the printer registered for that type. It returns -// a table that includes all of the information requested by options, but will not remove rows or columns. The -// caller is responsible for applying rules related to filtering rows or columns. -func (h *HumanReadablePrinter) PrintTable(obj runtime.Object, options PrintOptions) (*metav1beta1.Table, error) { - t := reflect.TypeOf(obj) - handler, ok := h.handlerMap[t] - if !ok { - return nil, fmt.Errorf("no table handler registered for this type %v", t) - } - if !handler.printRows { - return h.legacyPrinterToTable(obj, handler) - } - - args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(options)} - results := handler.printFunc.Call(args) - if !results[1].IsNil() { - return nil, results[1].Interface().(error) - } - - columns := handler.columnDefinitions - if !options.Wide { - columns = make([]metav1beta1.TableColumnDefinition, 0, len(handler.columnDefinitions)) - for i := range handler.columnDefinitions { - if handler.columnDefinitions[i].Priority != 0 { - continue - } - columns = append(columns, handler.columnDefinitions[i]) - } - } - table := &metav1beta1.Table{ - ListMeta: metav1.ListMeta{ - ResourceVersion: "", - }, - ColumnDefinitions: columns, - Rows: results[0].Interface().([]metav1beta1.TableRow), - } - if m, err := meta.ListAccessor(obj); err == nil { - table.ResourceVersion = m.GetResourceVersion() - table.SelfLink = m.GetSelfLink() - table.Continue = m.GetContinue() - } else { - if m, err := meta.CommonAccessor(obj); err == nil { - table.ResourceVersion = m.GetResourceVersion() - table.SelfLink = m.GetSelfLink() - } - } - if err := DecorateTable(table, options); err != nil { - return nil, err - } - return table, nil -} - -// printRowsForHandlerEntry prints the incremental table output (headers if the current type is -// different from lastType) including all the rows in the object. It returns the current type -// or an error, if any. -func printRowsForHandlerEntry(output io.Writer, handler *handlerEntry, obj runtime.Object, options PrintOptions, includeHeaders bool) error { - var results []reflect.Value - if handler.printRows { - args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(options)} - results = handler.printFunc.Call(args) - if !results[1].IsNil() { - return results[1].Interface().(error) - } - } - - if includeHeaders { - var headers []string - for _, column := range handler.columnDefinitions { - if column.Priority != 0 && !options.Wide { - continue - } - headers = append(headers, strings.ToUpper(column.Name)) - } - headers = append(headers, formatLabelHeaders(options.ColumnLabels)...) - // LABELS is always the last column. - headers = append(headers, formatShowLabelsHeader(options.ShowLabels)...) - if options.WithNamespace { - headers = append(withNamespacePrefixColumns, headers...) - } - printHeader(headers, output) - } - - if !handler.printRows { - // TODO: this code path is deprecated and will be removed when all handlers are row printers - args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(output), reflect.ValueOf(options)} - resultValue := handler.printFunc.Call(args)[0] - if resultValue.IsNil() { - return nil - } - return resultValue.Interface().(error) - } - - if results[1].IsNil() { - rows := results[0].Interface().([]metav1beta1.TableRow) - printRows(output, rows, options) - return nil - } - return results[1].Interface().(error) -} - -// printRows writes the provided rows to output. -func printRows(output io.Writer, rows []metav1beta1.TableRow, options PrintOptions) { - for _, row := range rows { - if options.WithNamespace { - if obj := row.Object.Object; obj != nil { - if m, err := meta.Accessor(obj); err == nil { - fmt.Fprint(output, m.GetNamespace()) - } - } - fmt.Fprint(output, "\t") - } - - for i, cell := range row.Cells { - if i != 0 { - fmt.Fprint(output, "\t") - } else { - // TODO: remove this once we drop the legacy printers - if options.WithKind && !options.Kind.Empty() { - fmt.Fprintf(output, "%s/%s", strings.ToLower(options.Kind.String()), cell) - continue - } - } - fmt.Fprint(output, cell) - } - - hasLabels := len(options.ColumnLabels) > 0 - if obj := row.Object.Object; obj != nil && (hasLabels || options.ShowLabels) { - if m, err := meta.Accessor(obj); err == nil { - for _, value := range labelValues(m.GetLabels(), options) { - output.Write([]byte("\t")) - output.Write([]byte(value)) - } - } - } - - output.Write([]byte("\n")) - } -} - -// legacyPrinterToTable uses the old printFunc with tabbed writer to generate a table. -// TODO: remove when all legacy printers are removed. -func (h *HumanReadablePrinter) legacyPrinterToTable(obj runtime.Object, handler *handlerEntry) (*metav1beta1.Table, error) { - printFunc := handler.printFunc - table := &metav1beta1.Table{ - ColumnDefinitions: handler.columnDefinitions, - } - - options := PrintOptions{ - NoHeaders: true, - Wide: true, - } - buf := &bytes.Buffer{} - args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(buf), reflect.ValueOf(options)} - - if meta.IsListType(obj) { - listInterface, ok := obj.(metav1.ListInterface) - if ok { - table.ListMeta.SelfLink = listInterface.GetSelfLink() - table.ListMeta.ResourceVersion = listInterface.GetResourceVersion() - table.ListMeta.Continue = listInterface.GetContinue() - } - - // TODO: this uses more memory than it has to, as we refactor printers we should remove the need - // for this. - args[0] = reflect.ValueOf(obj) - resultValue := printFunc.Call(args)[0] - if !resultValue.IsNil() { - return nil, resultValue.Interface().(error) - } - data := buf.Bytes() - i := 0 - items, err := meta.ExtractList(obj) - if err != nil { - return nil, err - } - for len(data) > 0 { - cells, remainder := tabbedLineToCells(data, len(table.ColumnDefinitions)) - table.Rows = append(table.Rows, metav1beta1.TableRow{ - Cells: cells, - Object: runtime.RawExtension{Object: items[i]}, - }) - data = remainder - i++ - } - } else { - args[0] = reflect.ValueOf(obj) - resultValue := printFunc.Call(args)[0] - if !resultValue.IsNil() { - return nil, resultValue.Interface().(error) - } - data := buf.Bytes() - cells, _ := tabbedLineToCells(data, len(table.ColumnDefinitions)) - table.Rows = append(table.Rows, metav1beta1.TableRow{ - Cells: cells, - Object: runtime.RawExtension{Object: obj}, - }) - } - return table, nil -} - -// TODO: this method assumes the meta/v1 server API, so should be refactored out of this package -func printUnstructured(unstructured runtime.Unstructured, w io.Writer, additionalFields []string, options PrintOptions) error { - metadata, err := meta.Accessor(unstructured) - if err != nil { - return err - } - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", metadata.GetNamespace()); err != nil { - return err - } - } - - content := unstructured.UnstructuredContent() - kind := "" - if objKind, ok := content["kind"]; ok { - if str, ok := objKind.(string); ok { - kind = str - } - } - if objAPIVersion, ok := content["apiVersion"]; ok { - if str, ok := objAPIVersion.(string); ok { - version, err := schema.ParseGroupVersion(str) - if err != nil { - return err - } - kind = kind + "." + version.Version + "." + version.Group - } - } - - name := FormatResourceName(options.Kind, metadata.GetName(), options.WithKind) - - if _, err := fmt.Fprintf(w, "%s\t%s", name, kind); err != nil { - return err - } - for _, field := range additionalFields { - if value, ok := content[field]; ok { - var formattedValue string - switch typedValue := value.(type) { - case []interface{}: - formattedValue = fmt.Sprintf("%d item(s)", len(typedValue)) - default: - formattedValue = fmt.Sprintf("%v", value) - } - if _, err := fmt.Fprintf(w, "\t%s", formattedValue); err != nil { - return err - } - } - } - if _, err := fmt.Fprint(w, AppendLabels(metadata.GetLabels(), options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, metadata.GetLabels())); err != nil { - return err - } - - return nil -} - -func formatLabelHeaders(columnLabels []string) []string { - formHead := make([]string, len(columnLabels)) - for i, l := range columnLabels { - p := strings.Split(l, "/") - formHead[i] = strings.ToUpper((p[len(p)-1])) - } - return formHead -} - -// headers for --show-labels=true -func formatShowLabelsHeader(showLabels bool) []string { - if showLabels { - return []string{"LABELS"} - } - return nil -} - -// labelValues returns a slice of value columns matching the requested print options. -func labelValues(itemLabels map[string]string, opts PrintOptions) []string { - var values []string - for _, key := range opts.ColumnLabels { - values = append(values, itemLabels[key]) - } - if opts.ShowLabels { - values = append(values, labels.FormatLabels(itemLabels)) - } - return values -} - -// appendLabelCells returns a slice of value columns matching the requested print options. -// Intended for use with tables. -func appendLabelCells(values []interface{}, itemLabels map[string]string, opts PrintOptions) []interface{} { - for _, key := range opts.ColumnLabels { - values = append(values, itemLabels[key]) - } - if opts.ShowLabels { - values = append(values, labels.FormatLabels(itemLabels)) - } - return values -} - -// FormatResourceName receives a resource kind, name, and boolean specifying -// whether or not to update the current name to "kind/name" -func FormatResourceName(kind schema.GroupKind, name string, withKind bool) string { - if !withKind || kind.Empty() { - return name - } - - return strings.ToLower(kind.String()) + "/" + name -} - -func AppendLabels(itemLabels map[string]string, columnLabels []string) string { - var buffer bytes.Buffer - - for _, cl := range columnLabels { - buffer.WriteString(fmt.Sprint("\t")) - if il, ok := itemLabels[cl]; ok { - buffer.WriteString(fmt.Sprint(il)) - } else { - buffer.WriteString("") - } - } - - return buffer.String() -} - -// Append all labels to a single column. We need this even when show-labels flag* is -// false, since this adds newline delimiter to the end of each row. -func AppendAllLabels(showLabels bool, itemLabels map[string]string) string { - var buffer bytes.Buffer - - if showLabels { - buffer.WriteString(fmt.Sprint("\t")) - buffer.WriteString(labels.FormatLabels(itemLabels)) - } - buffer.WriteString("\n") - - return buffer.String() -} - -// check if the object is unstructured. If so, attempt to convert it to a type we can understand. -func decodeUnknownObject(obj runtime.Object, decoder runtime.Decoder) (runtime.Object, error) { - var err error - switch t := obj.(type) { - case runtime.Unstructured: - if objBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj); err == nil { - if decodedObj, err := runtime.Decode(decoder, objBytes); err == nil { - obj = decodedObj - } - } - case *runtime.Unknown: - if decodedObj, err := runtime.Decode(decoder, t.Raw); err == nil { - obj = decodedObj - } - } - - return obj, err -} - -func tabbedLineToCells(data []byte, expected int) ([]interface{}, []byte) { - var remainder []byte - max := bytes.Index(data, []byte("\n")) - if max != -1 { - remainder = data[max+1:] - data = data[:max] - } - cells := make([]interface{}, expected) - for i := 0; i < expected; i++ { - next := bytes.Index(data, []byte("\t")) - if next == -1 { - cells[i] = string(data) - // fill the remainder with empty strings, this indicates a printer bug - for j := i + 1; j < expected; j++ { - cells[j] = "" - } - break - } - cells[i] = string(data[:next]) - data = data[next+1:] - } - return cells, remainder -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/interface.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/interface.go deleted file mode 100644 index f528de5caa61..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/interface.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "fmt" - "io" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// ResourcePrinter is an interface that knows how to print runtime objects. -type ResourcePrinter interface { - // Print receives a runtime object, formats it and prints it to a writer. - PrintObj(runtime.Object, io.Writer) error -} - -// ResourcePrinterFunc is a function that can print objects -type ResourcePrinterFunc func(runtime.Object, io.Writer) error - -// PrintObj implements ResourcePrinter -func (fn ResourcePrinterFunc) PrintObj(obj runtime.Object, w io.Writer) error { - return fn(obj, w) -} - -type PrintOptions struct { - // supported Format types can be found in pkg/printers/printers.go - OutputFormatType string - OutputFormatArgument string - - NoHeaders bool - WithNamespace bool - WithKind bool - Wide bool - ShowAll bool - ShowLabels bool - AbsoluteTimestamps bool - Kind schema.GroupKind - ColumnLabels []string - - SortBy string - - // indicates if it is OK to ignore missing keys for rendering an output template. - AllowMissingKeys bool -} - -// Describer generates output for the named resource or an error -// if the output could not be generated. Implementers typically -// abstract the retrieval of the named object from a remote server. -type Describer interface { - Describe(namespace, name string, describerSettings DescriberSettings) (output string, err error) -} - -// DescriberSettings holds display configuration for each object -// describer to control what is printed. -type DescriberSettings struct { - ShowEvents bool -} - -// ObjectDescriber is an interface for displaying arbitrary objects with extra -// information. Use when an object is in hand (on disk, or already retrieved). -// Implementers may ignore the additional information passed on extra, or use it -// by default. ObjectDescribers may return ErrNoDescriber if no suitable describer -// is found. -type ObjectDescriber interface { - DescribeObject(object interface{}, extra ...interface{}) (output string, err error) -} - -// ErrNoDescriber is a structured error indicating the provided object or objects -// cannot be described. -type ErrNoDescriber struct { - Types []string -} - -// Error implements the error interface. -func (e ErrNoDescriber) Error() string { - return fmt.Sprintf("no describer has been defined for %v", e.Types) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/internalversion/.import-restrictions b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/internalversion/.import-restrictions deleted file mode 100644 index ce172cfbd9d5..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/internalversion/.import-restrictions +++ /dev/null @@ -1,4 +0,0 @@ -{ - "Rules": [ - ] -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/internalversion/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/internalversion/BUILD deleted file mode 100644 index e27e9f9ec15a..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/internalversion/BUILD +++ /dev/null @@ -1,133 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = [ - "describe_test.go", - "printers_test.go", - "sorted_resource_name_list_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//pkg/api/legacyscheme:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/apis/apps:go_default_library", - "//pkg/apis/autoscaling:go_default_library", - "//pkg/apis/batch:go_default_library", - "//pkg/apis/coordination:go_default_library", - "//pkg/apis/core:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/apis/networking:go_default_library", - "//pkg/apis/policy:go_default_library", - "//pkg/apis/storage:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", - "//pkg/printers:go_default_library", - "//staging/src/k8s.io/api/apps/v1:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/yaml:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", - "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/k8s.io/utils/pointer:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = [ - "describe.go", - "printers.go", - ], - importpath = "k8s.io/kubernetes/pkg/printers/internalversion", - deps = [ - "//pkg/api/events:go_default_library", - "//pkg/api/legacyscheme:go_default_library", - "//pkg/api/ref:go_default_library", - "//pkg/api/resource:go_default_library", - "//pkg/apis/apps:go_default_library", - "//pkg/apis/autoscaling:go_default_library", - "//pkg/apis/batch:go_default_library", - "//pkg/apis/certificates:go_default_library", - "//pkg/apis/coordination:go_default_library", - "//pkg/apis/core:go_default_library", - "//pkg/apis/core/helper:go_default_library", - "//pkg/apis/core/helper/qos:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/apis/networking:go_default_library", - "//pkg/apis/policy:go_default_library", - "//pkg/apis/rbac:go_default_library", - "//pkg/apis/rbac/v1:go_default_library", - "//pkg/apis/scheduling:go_default_library", - "//pkg/apis/storage:go_default_library", - "//pkg/apis/storage/util:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//pkg/controller/deployment/util:go_default_library", - "//pkg/fieldpath:go_default_library", - "//pkg/printers:go_default_library", - "//pkg/registry/rbac/validation:go_default_library", - "//pkg/util/node:go_default_library", - "//pkg/util/slice:go_default_library", - "//staging/src/k8s.io/api/apps/v1:go_default_library", - "//staging/src/k8s.io/api/apps/v1beta1:go_default_library", - "//staging/src/k8s.io/api/autoscaling/v2beta1:go_default_library", - "//staging/src/k8s.io/api/batch/v1:go_default_library", - "//staging/src/k8s.io/api/batch/v1beta1:go_default_library", - "//staging/src/k8s.io/api/certificates/v1beta1:go_default_library", - "//staging/src/k8s.io/api/coordination/v1beta1:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", - "//staging/src/k8s.io/api/rbac/v1:go_default_library", - "//staging/src/k8s.io/api/rbac/v1beta1:go_default_library", - "//staging/src/k8s.io/api/storage/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/duration:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/client-go/dynamic:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//staging/src/k8s.io/client-go/rest:go_default_library", - "//vendor/github.com/fatih/camelcase:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/internalversion/printers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/internalversion/printers.go deleted file mode 100644 index cb4a3e07e97c..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/internalversion/printers.go +++ /dev/null @@ -1,1886 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - "bytes" - "fmt" - "io" - "net" - "strconv" - "strings" - "time" - - appsv1beta1 "k8s.io/api/apps/v1beta1" - autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" - batchv1 "k8s.io/api/batch/v1" - batchv1beta1 "k8s.io/api/batch/v1beta1" - certificatesv1beta1 "k8s.io/api/certificates/v1beta1" - coordinationv1beta1 "k8s.io/api/coordination/v1beta1" - apiv1 "k8s.io/api/core/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" - storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/duration" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/certificates" - "k8s.io/kubernetes/pkg/apis/coordination" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/core/helper" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/networking" - "k8s.io/kubernetes/pkg/apis/policy" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/apis/storage" - storageutil "k8s.io/kubernetes/pkg/apis/storage/util" - "k8s.io/kubernetes/pkg/printers" - "k8s.io/kubernetes/pkg/util/node" -) - -const ( - loadBalancerWidth = 16 - - // labelNodeRolePrefix is a label prefix for node roles - // It's copied over to here until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112 - labelNodeRolePrefix = "node-role.kubernetes.io/" - - // nodeLabelRole specifies the role of a node - nodeLabelRole = "kubernetes.io/role" -) - -// AddHandlers adds print handlers for default Kubernetes types dealing with internal versions. -// TODO: handle errors from Handler -func AddHandlers(h printers.PrintHandler) { - podColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Ready", Type: "string", Description: "The aggregate readiness state of this pod for accepting traffic."}, - {Name: "Status", Type: "string", Description: "The aggregate status of the containers in this pod."}, - {Name: "Restarts", Type: "integer", Description: "The number of times the containers in this pod have been restarted."}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - {Name: "IP", Type: "string", Priority: 1, Description: apiv1.PodStatus{}.SwaggerDoc()["podIP"]}, - {Name: "Node", Type: "string", Priority: 1, Description: apiv1.PodSpec{}.SwaggerDoc()["nodeName"]}, - {Name: "Nominated Node", Type: "string", Priority: 1, Description: apiv1.PodStatus{}.SwaggerDoc()["nominatedNodeName"]}, - } - h.TableHandler(podColumnDefinitions, printPodList) - h.TableHandler(podColumnDefinitions, printPod) - - podTemplateColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Containers", Type: "string", Description: "Names of each container in the template."}, - {Name: "Images", Type: "string", Description: "Images referenced by each container in the template."}, - {Name: "Pod Labels", Type: "string", Description: "The labels for the pod template."}, - } - h.TableHandler(podTemplateColumnDefinitions, printPodTemplate) - h.TableHandler(podTemplateColumnDefinitions, printPodTemplateList) - - podDisruptionBudgetColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Min Available", Type: "string", Description: "The minimum number of pods that must be available."}, - {Name: "Max Unavailable", Type: "string", Description: "The maximum number of pods that may be unavailable."}, - {Name: "Allowed Disruptions", Type: "integer", Description: "Calculated number of pods that may be disrupted at this time."}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - h.TableHandler(podDisruptionBudgetColumnDefinitions, printPodDisruptionBudget) - h.TableHandler(podDisruptionBudgetColumnDefinitions, printPodDisruptionBudgetList) - - replicationControllerColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Desired", Type: "integer", Description: apiv1.ReplicationControllerSpec{}.SwaggerDoc()["replicas"]}, - {Name: "Current", Type: "integer", Description: apiv1.ReplicationControllerStatus{}.SwaggerDoc()["replicas"]}, - {Name: "Ready", Type: "integer", Description: apiv1.ReplicationControllerStatus{}.SwaggerDoc()["readyReplicas"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - {Name: "Containers", Type: "string", Priority: 1, Description: "Names of each container in the template."}, - {Name: "Images", Type: "string", Priority: 1, Description: "Images referenced by each container in the template."}, - {Name: "Selector", Type: "string", Priority: 1, Description: apiv1.ReplicationControllerSpec{}.SwaggerDoc()["selector"]}, - } - h.TableHandler(replicationControllerColumnDefinitions, printReplicationController) - h.TableHandler(replicationControllerColumnDefinitions, printReplicationControllerList) - - replicaSetColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Desired", Type: "integer", Description: extensionsv1beta1.ReplicaSetSpec{}.SwaggerDoc()["replicas"]}, - {Name: "Current", Type: "integer", Description: extensionsv1beta1.ReplicaSetStatus{}.SwaggerDoc()["replicas"]}, - {Name: "Ready", Type: "integer", Description: extensionsv1beta1.ReplicaSetStatus{}.SwaggerDoc()["readyReplicas"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - {Name: "Containers", Type: "string", Priority: 1, Description: "Names of each container in the template."}, - {Name: "Images", Type: "string", Priority: 1, Description: "Images referenced by each container in the template."}, - {Name: "Selector", Type: "string", Priority: 1, Description: extensionsv1beta1.ReplicaSetSpec{}.SwaggerDoc()["selector"]}, - } - h.TableHandler(replicaSetColumnDefinitions, printReplicaSet) - h.TableHandler(replicaSetColumnDefinitions, printReplicaSetList) - - daemonSetColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Desired", Type: "integer", Description: extensionsv1beta1.DaemonSetStatus{}.SwaggerDoc()["desiredNumberScheduled"]}, - {Name: "Current", Type: "integer", Description: extensionsv1beta1.DaemonSetStatus{}.SwaggerDoc()["currentNumberScheduled"]}, - {Name: "Ready", Type: "integer", Description: extensionsv1beta1.DaemonSetStatus{}.SwaggerDoc()["numberReady"]}, - {Name: "Up-to-date", Type: "integer", Description: extensionsv1beta1.DaemonSetStatus{}.SwaggerDoc()["updatedNumberScheduled"]}, - {Name: "Available", Type: "integer", Description: extensionsv1beta1.DaemonSetStatus{}.SwaggerDoc()["numberAvailable"]}, - {Name: "Node Selector", Type: "string", Description: apiv1.PodSpec{}.SwaggerDoc()["nodeSelector"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - {Name: "Containers", Type: "string", Priority: 1, Description: "Names of each container in the template."}, - {Name: "Images", Type: "string", Priority: 1, Description: "Images referenced by each container in the template."}, - {Name: "Selector", Type: "string", Priority: 1, Description: extensionsv1beta1.DaemonSetSpec{}.SwaggerDoc()["selector"]}, - } - h.TableHandler(daemonSetColumnDefinitions, printDaemonSet) - h.TableHandler(daemonSetColumnDefinitions, printDaemonSetList) - - jobColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Completions", Type: "string", Description: batchv1.JobStatus{}.SwaggerDoc()["succeeded"]}, - {Name: "Duration", Type: "string", Description: "Time required to complete the job."}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - {Name: "Containers", Type: "string", Priority: 1, Description: "Names of each container in the template."}, - {Name: "Images", Type: "string", Priority: 1, Description: "Images referenced by each container in the template."}, - {Name: "Selector", Type: "string", Priority: 1, Description: batchv1.JobSpec{}.SwaggerDoc()["selector"]}, - } - h.TableHandler(jobColumnDefinitions, printJob) - h.TableHandler(jobColumnDefinitions, printJobList) - - cronJobColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Schedule", Type: "string", Description: batchv1beta1.CronJobSpec{}.SwaggerDoc()["schedule"]}, - {Name: "Suspend", Type: "boolean", Description: batchv1beta1.CronJobSpec{}.SwaggerDoc()["suspend"]}, - {Name: "Active", Type: "integer", Description: batchv1beta1.CronJobStatus{}.SwaggerDoc()["active"]}, - {Name: "Last Schedule", Type: "string", Description: batchv1beta1.CronJobStatus{}.SwaggerDoc()["lastScheduleTime"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - {Name: "Containers", Type: "string", Priority: 1, Description: "Names of each container in the template."}, - {Name: "Images", Type: "string", Priority: 1, Description: "Images referenced by each container in the template."}, - {Name: "Selector", Type: "string", Priority: 1, Description: batchv1.JobSpec{}.SwaggerDoc()["selector"]}, - } - h.TableHandler(cronJobColumnDefinitions, printCronJob) - h.TableHandler(cronJobColumnDefinitions, printCronJobList) - - serviceColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Type", Type: "string", Description: apiv1.ServiceSpec{}.SwaggerDoc()["type"]}, - {Name: "Cluster-IP", Type: "string", Description: apiv1.ServiceSpec{}.SwaggerDoc()["clusterIP"]}, - {Name: "External-IP", Type: "string", Description: apiv1.ServiceSpec{}.SwaggerDoc()["externalIPs"]}, - {Name: "Port(s)", Type: "string", Description: apiv1.ServiceSpec{}.SwaggerDoc()["ports"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - {Name: "Selector", Type: "string", Priority: 1, Description: apiv1.ServiceSpec{}.SwaggerDoc()["selector"]}, - } - - h.TableHandler(serviceColumnDefinitions, printService) - h.TableHandler(serviceColumnDefinitions, printServiceList) - - ingressColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Hosts", Type: "string", Description: "Hosts that incoming requests are matched against before the ingress rule"}, - {Name: "Address", Type: "string", Description: "Address is a list containing ingress points for the load-balancer"}, - {Name: "Ports", Type: "string", Description: "Ports of TLS configurations that open"}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - h.TableHandler(ingressColumnDefinitions, printIngress) - h.TableHandler(ingressColumnDefinitions, printIngressList) - - statefulSetColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Desired", Type: "string", Description: appsv1beta1.StatefulSetSpec{}.SwaggerDoc()["replicas"]}, - {Name: "Current", Type: "string", Description: appsv1beta1.StatefulSetStatus{}.SwaggerDoc()["replicas"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - {Name: "Containers", Type: "string", Priority: 1, Description: "Names of each container in the template."}, - {Name: "Images", Type: "string", Priority: 1, Description: "Images referenced by each container in the template."}, - } - h.TableHandler(statefulSetColumnDefinitions, printStatefulSet) - h.TableHandler(statefulSetColumnDefinitions, printStatefulSetList) - - endpointColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Endpoints", Type: "string", Description: apiv1.Endpoints{}.SwaggerDoc()["subsets"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - h.TableHandler(endpointColumnDefinitions, printEndpoints) - h.TableHandler(endpointColumnDefinitions, printEndpointsList) - - nodeColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Status", Type: "string", Description: "The status of the node"}, - {Name: "Roles", Type: "string", Description: "The roles of the node"}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - {Name: "Version", Type: "string", Description: apiv1.NodeSystemInfo{}.SwaggerDoc()["kubeletVersion"]}, - {Name: "Internal-IP", Type: "string", Priority: 1, Description: apiv1.NodeStatus{}.SwaggerDoc()["addresses"]}, - {Name: "External-IP", Type: "string", Priority: 1, Description: apiv1.NodeStatus{}.SwaggerDoc()["addresses"]}, - {Name: "OS-Image", Type: "string", Priority: 1, Description: apiv1.NodeSystemInfo{}.SwaggerDoc()["osImage"]}, - {Name: "Kernel-Version", Type: "string", Priority: 1, Description: apiv1.NodeSystemInfo{}.SwaggerDoc()["kernelVersion"]}, - {Name: "Container-Runtime", Type: "string", Priority: 1, Description: apiv1.NodeSystemInfo{}.SwaggerDoc()["containerRuntimeVersion"]}, - } - - h.TableHandler(nodeColumnDefinitions, printNode) - h.TableHandler(nodeColumnDefinitions, printNodeList) - - eventColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Last Seen", Type: "string", Description: apiv1.Event{}.SwaggerDoc()["lastTimestamp"]}, - {Name: "Type", Type: "string", Description: apiv1.Event{}.SwaggerDoc()["type"]}, - {Name: "Reason", Type: "string", Description: apiv1.Event{}.SwaggerDoc()["reason"]}, - {Name: "Kind", Type: "string", Description: apiv1.Event{}.InvolvedObject.SwaggerDoc()["kind"]}, - {Name: "Source", Type: "string", Priority: 1, Description: apiv1.Event{}.SwaggerDoc()["source"]}, - {Name: "Message", Type: "string", Description: apiv1.Event{}.SwaggerDoc()["message"]}, - {Name: "Subobject", Type: "string", Priority: 1, Description: apiv1.Event{}.InvolvedObject.SwaggerDoc()["fieldPath"]}, - {Name: "First Seen", Type: "string", Priority: 1, Description: apiv1.Event{}.SwaggerDoc()["firstTimestamp"]}, - {Name: "Count", Type: "string", Priority: 1, Description: apiv1.Event{}.SwaggerDoc()["count"]}, - {Name: "Name", Type: "string", Priority: 1, Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - } - h.TableHandler(eventColumnDefinitions, printEvent) - h.TableHandler(eventColumnDefinitions, printEventList) - - namespaceColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Status", Type: "string", Description: "The status of the namespace"}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - h.TableHandler(namespaceColumnDefinitions, printNamespace) - h.TableHandler(namespaceColumnDefinitions, printNamespaceList) - - secretColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Type", Type: "string", Description: apiv1.Secret{}.SwaggerDoc()["type"]}, - {Name: "Data", Type: "string", Description: apiv1.Secret{}.SwaggerDoc()["data"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - h.TableHandler(secretColumnDefinitions, printSecret) - h.TableHandler(secretColumnDefinitions, printSecretList) - - serviceAccountColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Secrets", Type: "string", Description: apiv1.ServiceAccount{}.SwaggerDoc()["secrets"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - h.TableHandler(serviceAccountColumnDefinitions, printServiceAccount) - h.TableHandler(serviceAccountColumnDefinitions, printServiceAccountList) - - persistentVolumeColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Capacity", Type: "string", Description: apiv1.PersistentVolumeSpec{}.SwaggerDoc()["capacity"]}, - {Name: "Access Modes", Type: "string", Description: apiv1.PersistentVolumeSpec{}.SwaggerDoc()["accessModes"]}, - {Name: "Reclaim Policy", Type: "string", Description: apiv1.PersistentVolumeSpec{}.SwaggerDoc()["persistentVolumeReclaimPolicy"]}, - {Name: "Status", Type: "string", Description: apiv1.PersistentVolumeStatus{}.SwaggerDoc()["phase"]}, - {Name: "Claim", Type: "string", Description: apiv1.PersistentVolumeSpec{}.SwaggerDoc()["claimRef"]}, - {Name: "StorageClass", Type: "string", Description: "StorageClass of the pv"}, - {Name: "Reason", Type: "string", Description: apiv1.PersistentVolumeStatus{}.SwaggerDoc()["reason"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - h.TableHandler(persistentVolumeColumnDefinitions, printPersistentVolume) - h.TableHandler(persistentVolumeColumnDefinitions, printPersistentVolumeList) - - persistentVolumeClaimColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Status", Type: "string", Description: apiv1.PersistentVolumeClaimStatus{}.SwaggerDoc()["phase"]}, - {Name: "Volume", Type: "string", Description: apiv1.PersistentVolumeClaimSpec{}.SwaggerDoc()["volumeName"]}, - {Name: "Capacity", Type: "string", Description: apiv1.PersistentVolumeClaimStatus{}.SwaggerDoc()["capacity"]}, - {Name: "Access Modes", Type: "string", Description: apiv1.PersistentVolumeClaimStatus{}.SwaggerDoc()["accessModes"]}, - {Name: "StorageClass", Type: "string", Description: "StorageClass of the pvc"}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - h.TableHandler(persistentVolumeClaimColumnDefinitions, printPersistentVolumeClaim) - h.TableHandler(persistentVolumeClaimColumnDefinitions, printPersistentVolumeClaimList) - - componentStatusColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Status", Type: "string", Description: "Status of the component conditions"}, - {Name: "Message", Type: "string", Description: "Message of the component conditions"}, - {Name: "Error", Type: "string", Description: "Error of the component conditions"}, - } - h.TableHandler(componentStatusColumnDefinitions, printComponentStatus) - h.TableHandler(componentStatusColumnDefinitions, printComponentStatusList) - - deploymentColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Desired", Type: "string", Description: extensionsv1beta1.DeploymentSpec{}.SwaggerDoc()["replicas"]}, - {Name: "Current", Type: "string", Description: extensionsv1beta1.DeploymentStatus{}.SwaggerDoc()["replicas"]}, - {Name: "Up-to-date", Type: "string", Description: extensionsv1beta1.DeploymentStatus{}.SwaggerDoc()["updatedReplicas"]}, - {Name: "Available", Type: "string", Description: extensionsv1beta1.DeploymentStatus{}.SwaggerDoc()["availableReplicas"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - {Name: "Containers", Type: "string", Priority: 1, Description: "Names of each container in the template."}, - {Name: "Images", Type: "string", Priority: 1, Description: "Images referenced by each container in the template."}, - {Name: "Selector", Type: "string", Priority: 1, Description: extensionsv1beta1.DeploymentSpec{}.SwaggerDoc()["selector"]}, - } - h.TableHandler(deploymentColumnDefinitions, printDeployment) - h.TableHandler(deploymentColumnDefinitions, printDeploymentList) - - horizontalPodAutoscalerColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Reference", Type: "string", Description: autoscalingv2beta1.HorizontalPodAutoscalerSpec{}.SwaggerDoc()["scaleTargetRef"]}, - {Name: "Targets", Type: "string", Description: autoscalingv2beta1.HorizontalPodAutoscalerSpec{}.SwaggerDoc()["metrics"]}, - {Name: "MinPods", Type: "string", Description: autoscalingv2beta1.HorizontalPodAutoscalerSpec{}.SwaggerDoc()["minReplicas"]}, - {Name: "MaxPods", Type: "string", Description: autoscalingv2beta1.HorizontalPodAutoscalerSpec{}.SwaggerDoc()["maxReplicas"]}, - {Name: "Replicas", Type: "string", Description: autoscalingv2beta1.HorizontalPodAutoscalerStatus{}.SwaggerDoc()["currentReplicas"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - h.TableHandler(horizontalPodAutoscalerColumnDefinitions, printHorizontalPodAutoscaler) - h.TableHandler(horizontalPodAutoscalerColumnDefinitions, printHorizontalPodAutoscalerList) - - configMapColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Data", Type: "string", Description: apiv1.ConfigMap{}.SwaggerDoc()["data"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - h.TableHandler(configMapColumnDefinitions, printConfigMap) - h.TableHandler(configMapColumnDefinitions, printConfigMapList) - - podSecurityPolicyColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Priv", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["privileged"]}, - {Name: "Caps", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["allowedCapabilities"]}, - {Name: "SELinux", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["seLinux"]}, - {Name: "RunAsUser", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["runAsUser"]}, - {Name: "FsGroup", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["fsGroup"]}, - {Name: "SupGroup", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["supplementalGroups"]}, - {Name: "ReadOnlyRootFs", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["readOnlyRootFilesystem"]}, - {Name: "Volumes", Type: "string", Description: extensionsv1beta1.PodSecurityPolicySpec{}.SwaggerDoc()["volumes"]}, - } - h.TableHandler(podSecurityPolicyColumnDefinitions, printPodSecurityPolicy) - h.TableHandler(podSecurityPolicyColumnDefinitions, printPodSecurityPolicyList) - - networkPolicyColumnDefinitioins := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Pod-Selector", Type: "string", Description: extensionsv1beta1.NetworkPolicySpec{}.SwaggerDoc()["podSelector"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - h.TableHandler(networkPolicyColumnDefinitioins, printNetworkPolicy) - h.TableHandler(networkPolicyColumnDefinitioins, printNetworkPolicyList) - - roleBindingsColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - {Name: "Role", Type: "string", Priority: 1, Description: rbacv1beta1.RoleBinding{}.SwaggerDoc()["roleRef"]}, - {Name: "Users", Type: "string", Priority: 1, Description: "Users in the roleBinding"}, - {Name: "Groups", Type: "string", Priority: 1, Description: "Gruops in the roleBinding"}, - {Name: "ServiceAccounts", Type: "string", Priority: 1, Description: "ServiceAccounts in the roleBinding"}, - } - h.TableHandler(roleBindingsColumnDefinitions, printRoleBinding) - h.TableHandler(roleBindingsColumnDefinitions, printRoleBindingList) - - clusterRoleBindingsColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - {Name: "Role", Type: "string", Priority: 1, Description: rbacv1beta1.ClusterRoleBinding{}.SwaggerDoc()["roleRef"]}, - {Name: "Users", Type: "string", Priority: 1, Description: "Users in the roleBinding"}, - {Name: "Groups", Type: "string", Priority: 1, Description: "Gruops in the roleBinding"}, - {Name: "ServiceAccounts", Type: "string", Priority: 1, Description: "ServiceAccounts in the roleBinding"}, - } - h.TableHandler(clusterRoleBindingsColumnDefinitions, printClusterRoleBinding) - h.TableHandler(clusterRoleBindingsColumnDefinitions, printClusterRoleBindingList) - - certificateSigningRequestColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - {Name: "Requestor", Type: "string", Description: certificatesv1beta1.CertificateSigningRequestSpec{}.SwaggerDoc()["request"]}, - {Name: "Condition", Type: "string", Description: certificatesv1beta1.CertificateSigningRequestStatus{}.SwaggerDoc()["conditions"]}, - } - h.TableHandler(certificateSigningRequestColumnDefinitions, printCertificateSigningRequest) - h.TableHandler(certificateSigningRequestColumnDefinitions, printCertificateSigningRequestList) - - leaseColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Holder", Type: "string", Description: coordinationv1beta1.LeaseSpec{}.SwaggerDoc()["holderIdentity"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - h.TableHandler(leaseColumnDefinitions, printLease) - h.TableHandler(leaseColumnDefinitions, printLeaseList) - - storageClassColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Provisioner", Type: "string", Description: storagev1.StorageClass{}.SwaggerDoc()["provisioner"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - - h.TableHandler(storageClassColumnDefinitions, printStorageClass) - h.TableHandler(storageClassColumnDefinitions, printStorageClassList) - - statusColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Status", Type: "string", Description: metav1.Status{}.SwaggerDoc()["status"]}, - {Name: "Reason", Type: "string", Description: metav1.Status{}.SwaggerDoc()["reason"]}, - {Name: "Message", Type: "string", Description: metav1.Status{}.SwaggerDoc()["Message"]}, - } - - h.TableHandler(statusColumnDefinitions, printStatus) - - controllerRevisionColumnDefinition := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Controller", Type: "string", Description: "Controller of the object"}, - {Name: "Revision", Type: "string", Description: appsv1beta1.ControllerRevision{}.SwaggerDoc()["revision"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - h.TableHandler(controllerRevisionColumnDefinition, printControllerRevision) - h.TableHandler(controllerRevisionColumnDefinition, printControllerRevisionList) - - AddDefaultHandlers(h) -} - -// AddDefaultHandlers adds handlers that can work with most Kubernetes objects. -func AddDefaultHandlers(h printers.PrintHandler) { - // types without defined columns - objectMetaColumnDefinitions := []metav1beta1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - h.DefaultTableHandler(objectMetaColumnDefinitions, printObjectMeta) -} - -func printObjectMeta(obj runtime.Object, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - if meta.IsListType(obj) { - rows := make([]metav1beta1.TableRow, 0, 16) - err := meta.EachListItem(obj, func(obj runtime.Object) error { - nestedRows, err := printObjectMeta(obj, options) - if err != nil { - return err - } - rows = append(rows, nestedRows...) - return nil - }) - if err != nil { - return nil, err - } - return rows, nil - } - - rows := make([]metav1beta1.TableRow, 0, 1) - m, err := meta.Accessor(obj) - if err != nil { - return nil, err - } - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - row.Cells = append(row.Cells, m.GetName(), translateTimestampSince(m.GetCreationTimestamp())) - rows = append(rows, row) - return rows, nil -} - -// Pass ports=nil for all ports. -func formatEndpoints(endpoints *api.Endpoints, ports sets.String) string { - if len(endpoints.Subsets) == 0 { - return "" - } - list := []string{} - max := 3 - more := false - count := 0 - for i := range endpoints.Subsets { - ss := &endpoints.Subsets[i] - if len(ss.Ports) == 0 { - // It's possible to have headless services with no ports. - for i := range ss.Addresses { - if len(list) == max { - more = true - } - if !more { - list = append(list, ss.Addresses[i].IP) - } - count++ - } - } else { - // "Normal" services with ports defined. - for i := range ss.Ports { - port := &ss.Ports[i] - if ports == nil || ports.Has(port.Name) { - for i := range ss.Addresses { - if len(list) == max { - more = true - } - addr := &ss.Addresses[i] - if !more { - hostPort := net.JoinHostPort(addr.IP, strconv.Itoa(int(port.Port))) - list = append(list, hostPort) - } - count++ - } - } - } - } - } - ret := strings.Join(list, ",") - if more { - return fmt.Sprintf("%s + %d more...", ret, count-max) - } - return ret -} - -// translateTimestampSince returns the elapsed time since timestamp in -// human-readable approximation. -func translateTimestampSince(timestamp metav1.Time) string { - if timestamp.IsZero() { - return "" - } - - return duration.HumanDuration(time.Since(timestamp.Time)) -} - -// translateTimestampUntil returns the elapsed time until timestamp in -// human-readable approximation. -func translateTimestampUntil(timestamp metav1.Time) string { - if timestamp.IsZero() { - return "" - } - - return duration.HumanDuration(time.Until(timestamp.Time)) -} - -var ( - podSuccessConditions = []metav1beta1.TableRowCondition{{Type: metav1beta1.RowCompleted, Status: metav1beta1.ConditionTrue, Reason: string(api.PodSucceeded), Message: "The pod has completed successfully."}} - podFailedConditions = []metav1beta1.TableRowCondition{{Type: metav1beta1.RowCompleted, Status: metav1beta1.ConditionTrue, Reason: string(api.PodFailed), Message: "The pod failed."}} -) - -func printPodList(podList *api.PodList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(podList.Items)) - for i := range podList.Items { - r, err := printPod(&podList.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printPod(pod *api.Pod, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - restarts := 0 - totalContainers := len(pod.Spec.Containers) - readyContainers := 0 - - reason := string(pod.Status.Phase) - if pod.Status.Reason != "" { - reason = pod.Status.Reason - } - - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: pod}, - } - - switch pod.Status.Phase { - case api.PodSucceeded: - row.Conditions = podSuccessConditions - case api.PodFailed: - row.Conditions = podFailedConditions - } - - initializing := false - for i := range pod.Status.InitContainerStatuses { - container := pod.Status.InitContainerStatuses[i] - restarts += int(container.RestartCount) - switch { - case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0: - continue - case container.State.Terminated != nil: - // initialization is failed - if len(container.State.Terminated.Reason) == 0 { - if container.State.Terminated.Signal != 0 { - reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal) - } else { - reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode) - } - } else { - reason = "Init:" + container.State.Terminated.Reason - } - initializing = true - case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing": - reason = "Init:" + container.State.Waiting.Reason - initializing = true - default: - reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers)) - initializing = true - } - break - } - if !initializing { - restarts = 0 - hasRunning := false - for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { - container := pod.Status.ContainerStatuses[i] - - restarts += int(container.RestartCount) - if container.State.Waiting != nil && container.State.Waiting.Reason != "" { - reason = container.State.Waiting.Reason - } else if container.State.Terminated != nil && container.State.Terminated.Reason != "" { - reason = container.State.Terminated.Reason - } else if container.State.Terminated != nil && container.State.Terminated.Reason == "" { - if container.State.Terminated.Signal != 0 { - reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal) - } else { - reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode) - } - } else if container.Ready && container.State.Running != nil { - hasRunning = true - readyContainers++ - } - } - - // change pod status back to "Running" if there is at least one container still reporting as "Running" status - if reason == "Completed" && hasRunning { - reason = "Running" - } - } - - if pod.DeletionTimestamp != nil && pod.Status.Reason == node.NodeUnreachablePodReason { - reason = "Unknown" - } else if pod.DeletionTimestamp != nil { - reason = "Terminating" - } - - row.Cells = append(row.Cells, pod.Name, fmt.Sprintf("%d/%d", readyContainers, totalContainers), reason, int64(restarts), translateTimestampSince(pod.CreationTimestamp)) - - if options.Wide { - nodeName := pod.Spec.NodeName - nominatedNodeName := pod.Status.NominatedNodeName - podIP := pod.Status.PodIP - if podIP == "" { - podIP = "" - } - if nodeName == "" { - nodeName = "" - } - if nominatedNodeName == "" { - nominatedNodeName = "" - } - row.Cells = append(row.Cells, podIP, nodeName, nominatedNodeName) - } - - return []metav1beta1.TableRow{row}, nil -} - -func printPodTemplate(obj *api.PodTemplate, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - names, images := layoutContainerCells(obj.Template.Spec.Containers) - row.Cells = append(row.Cells, obj.Name, names, images, labels.FormatLabels(obj.Template.Labels)) - return []metav1beta1.TableRow{row}, nil -} - -func printPodTemplateList(list *api.PodTemplateList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printPodTemplate(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printPodDisruptionBudget(obj *policy.PodDisruptionBudget, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - var minAvailable string - var maxUnavailable string - if obj.Spec.MinAvailable != nil { - minAvailable = obj.Spec.MinAvailable.String() - } else { - minAvailable = "N/A" - } - - if obj.Spec.MaxUnavailable != nil { - maxUnavailable = obj.Spec.MaxUnavailable.String() - } else { - maxUnavailable = "N/A" - } - - row.Cells = append(row.Cells, obj.Name, minAvailable, maxUnavailable, int64(obj.Status.PodDisruptionsAllowed), translateTimestampSince(obj.CreationTimestamp)) - return []metav1beta1.TableRow{row}, nil -} - -func printPodDisruptionBudgetList(list *policy.PodDisruptionBudgetList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printPodDisruptionBudget(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -// TODO(AdoHe): try to put wide output in a single method -func printReplicationController(obj *api.ReplicationController, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - desiredReplicas := obj.Spec.Replicas - currentReplicas := obj.Status.Replicas - readyReplicas := obj.Status.ReadyReplicas - - row.Cells = append(row.Cells, obj.Name, int64(desiredReplicas), int64(currentReplicas), int64(readyReplicas), translateTimestampSince(obj.CreationTimestamp)) - if options.Wide { - names, images := layoutContainerCells(obj.Spec.Template.Spec.Containers) - row.Cells = append(row.Cells, names, images, labels.FormatLabels(obj.Spec.Selector)) - } - return []metav1beta1.TableRow{row}, nil -} - -func printReplicationControllerList(list *api.ReplicationControllerList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printReplicationController(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printReplicaSet(obj *extensions.ReplicaSet, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - desiredReplicas := obj.Spec.Replicas - currentReplicas := obj.Status.Replicas - readyReplicas := obj.Status.ReadyReplicas - - row.Cells = append(row.Cells, obj.Name, int64(desiredReplicas), int64(currentReplicas), int64(readyReplicas), translateTimestampSince(obj.CreationTimestamp)) - if options.Wide { - names, images := layoutContainerCells(obj.Spec.Template.Spec.Containers) - row.Cells = append(row.Cells, names, images, metav1.FormatLabelSelector(obj.Spec.Selector)) - } - return []metav1beta1.TableRow{row}, nil -} - -func printReplicaSetList(list *extensions.ReplicaSetList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printReplicaSet(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printJob(obj *batch.Job, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - var completions string - if obj.Spec.Completions != nil { - completions = fmt.Sprintf("%d/%d", obj.Status.Succeeded, *obj.Spec.Completions) - } else { - parallelism := int32(0) - if obj.Spec.Parallelism != nil { - parallelism = *obj.Spec.Parallelism - } - if parallelism > 1 { - completions = fmt.Sprintf("%d/1 of %d", obj.Status.Succeeded, parallelism) - } else { - completions = fmt.Sprintf("%d/1", obj.Status.Succeeded) - } - } - var jobDuration string - switch { - case obj.Status.StartTime == nil: - case obj.Status.CompletionTime == nil: - jobDuration = duration.HumanDuration(time.Now().Sub(obj.Status.StartTime.Time)) - default: - jobDuration = duration.HumanDuration(obj.Status.CompletionTime.Sub(obj.Status.StartTime.Time)) - } - - row.Cells = append(row.Cells, obj.Name, completions, jobDuration, translateTimestampSince(obj.CreationTimestamp)) - if options.Wide { - names, images := layoutContainerCells(obj.Spec.Template.Spec.Containers) - row.Cells = append(row.Cells, names, images, metav1.FormatLabelSelector(obj.Spec.Selector)) - } - return []metav1beta1.TableRow{row}, nil -} - -func printJobList(list *batch.JobList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printJob(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printCronJob(obj *batch.CronJob, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - lastScheduleTime := "" - if obj.Status.LastScheduleTime != nil { - lastScheduleTime = translateTimestampSince(*obj.Status.LastScheduleTime) - } - - row.Cells = append(row.Cells, obj.Name, obj.Spec.Schedule, printBoolPtr(obj.Spec.Suspend), int64(len(obj.Status.Active)), lastScheduleTime, translateTimestampSince(obj.CreationTimestamp)) - if options.Wide { - names, images := layoutContainerCells(obj.Spec.JobTemplate.Spec.Template.Spec.Containers) - row.Cells = append(row.Cells, names, images, metav1.FormatLabelSelector(obj.Spec.JobTemplate.Spec.Selector)) - } - return []metav1beta1.TableRow{row}, nil -} - -func printCronJobList(list *batch.CronJobList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printCronJob(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -// loadBalancerStatusStringer behaves mostly like a string interface and converts the given status to a string. -// `wide` indicates whether the returned value is meant for --o=wide output. If not, it's clipped to 16 bytes. -func loadBalancerStatusStringer(s api.LoadBalancerStatus, wide bool) string { - ingress := s.Ingress - result := sets.NewString() - for i := range ingress { - if ingress[i].IP != "" { - result.Insert(ingress[i].IP) - } else if ingress[i].Hostname != "" { - result.Insert(ingress[i].Hostname) - } - } - - r := strings.Join(result.List(), ",") - if !wide && len(r) > loadBalancerWidth { - r = r[0:(loadBalancerWidth-3)] + "..." - } - return r -} - -func getServiceExternalIP(svc *api.Service, wide bool) string { - switch svc.Spec.Type { - case api.ServiceTypeClusterIP: - if len(svc.Spec.ExternalIPs) > 0 { - return strings.Join(svc.Spec.ExternalIPs, ",") - } - return "" - case api.ServiceTypeNodePort: - if len(svc.Spec.ExternalIPs) > 0 { - return strings.Join(svc.Spec.ExternalIPs, ",") - } - return "" - case api.ServiceTypeLoadBalancer: - lbIps := loadBalancerStatusStringer(svc.Status.LoadBalancer, wide) - if len(svc.Spec.ExternalIPs) > 0 { - results := []string{} - if len(lbIps) > 0 { - results = append(results, strings.Split(lbIps, ",")...) - } - results = append(results, svc.Spec.ExternalIPs...) - return strings.Join(results, ",") - } - if len(lbIps) > 0 { - return lbIps - } - return "" - case api.ServiceTypeExternalName: - return svc.Spec.ExternalName - } - return "" -} - -func makePortString(ports []api.ServicePort) string { - pieces := make([]string, len(ports)) - for ix := range ports { - port := &ports[ix] - pieces[ix] = fmt.Sprintf("%d/%s", port.Port, port.Protocol) - if port.NodePort > 0 { - pieces[ix] = fmt.Sprintf("%d:%d/%s", port.Port, port.NodePort, port.Protocol) - } - } - return strings.Join(pieces, ",") -} - -func printService(obj *api.Service, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - svcType := obj.Spec.Type - internalIP := obj.Spec.ClusterIP - if len(internalIP) == 0 { - internalIP = "" - } - externalIP := getServiceExternalIP(obj, options.Wide) - svcPorts := makePortString(obj.Spec.Ports) - if len(svcPorts) == 0 { - svcPorts = "" - } - - row.Cells = append(row.Cells, obj.Name, string(svcType), internalIP, externalIP, svcPorts, translateTimestampSince(obj.CreationTimestamp)) - if options.Wide { - row.Cells = append(row.Cells, labels.FormatLabels(obj.Spec.Selector)) - } - - return []metav1beta1.TableRow{row}, nil -} - -func printServiceList(list *api.ServiceList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printService(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -// backendStringer behaves just like a string interface and converts the given backend to a string. -func backendStringer(backend *extensions.IngressBackend) string { - if backend == nil { - return "" - } - return fmt.Sprintf("%v:%v", backend.ServiceName, backend.ServicePort.String()) -} - -func formatHosts(rules []extensions.IngressRule) string { - list := []string{} - max := 3 - more := false - for _, rule := range rules { - if len(list) == max { - more = true - } - if !more && len(rule.Host) != 0 { - list = append(list, rule.Host) - } - } - if len(list) == 0 { - return "*" - } - ret := strings.Join(list, ",") - if more { - return fmt.Sprintf("%s + %d more...", ret, len(rules)-max) - } - return ret -} - -func formatPorts(tls []extensions.IngressTLS) string { - if len(tls) != 0 { - return "80, 443" - } - return "80" -} - -func printIngress(obj *extensions.Ingress, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - hosts := formatHosts(obj.Spec.Rules) - address := loadBalancerStatusStringer(obj.Status.LoadBalancer, options.Wide) - ports := formatPorts(obj.Spec.TLS) - createTime := translateTimestampSince(obj.CreationTimestamp) - row.Cells = append(row.Cells, obj.Name, hosts, address, ports, createTime) - return []metav1beta1.TableRow{row}, nil -} - -func printIngressList(list *extensions.IngressList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printIngress(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printStatefulSet(obj *apps.StatefulSet, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - desiredReplicas := obj.Spec.Replicas - currentReplicas := obj.Status.Replicas - createTime := translateTimestampSince(obj.CreationTimestamp) - row.Cells = append(row.Cells, obj.Name, int64(desiredReplicas), int64(currentReplicas), createTime) - if options.Wide { - names, images := layoutContainerCells(obj.Spec.Template.Spec.Containers) - row.Cells = append(row.Cells, names, images) - } - return []metav1beta1.TableRow{row}, nil -} - -func printStatefulSetList(list *apps.StatefulSetList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printStatefulSet(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printDaemonSet(obj *extensions.DaemonSet, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - desiredScheduled := obj.Status.DesiredNumberScheduled - currentScheduled := obj.Status.CurrentNumberScheduled - numberReady := obj.Status.NumberReady - numberUpdated := obj.Status.UpdatedNumberScheduled - numberAvailable := obj.Status.NumberAvailable - - row.Cells = append(row.Cells, obj.Name, int64(desiredScheduled), int64(currentScheduled), int64(numberReady), int64(numberUpdated), int64(numberAvailable), labels.FormatLabels(obj.Spec.Template.Spec.NodeSelector), translateTimestampSince(obj.CreationTimestamp)) - if options.Wide { - names, images := layoutContainerCells(obj.Spec.Template.Spec.Containers) - row.Cells = append(row.Cells, names, images, metav1.FormatLabelSelector(obj.Spec.Selector)) - } - return []metav1beta1.TableRow{row}, nil -} - -func printDaemonSetList(list *extensions.DaemonSetList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printDaemonSet(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printEndpoints(obj *api.Endpoints, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - row.Cells = append(row.Cells, obj.Name, formatEndpoints(obj, nil), translateTimestampSince(obj.CreationTimestamp)) - return []metav1beta1.TableRow{row}, nil -} - -func printEndpointsList(list *api.EndpointsList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printEndpoints(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printNamespace(obj *api.Namespace, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - row.Cells = append(row.Cells, obj.Name, string(obj.Status.Phase), translateTimestampSince(obj.CreationTimestamp)) - return []metav1beta1.TableRow{row}, nil -} - -func printNamespaceList(list *api.NamespaceList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printNamespace(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printSecret(obj *api.Secret, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - row.Cells = append(row.Cells, obj.Name, string(obj.Type), int64(len(obj.Data)), translateTimestampSince(obj.CreationTimestamp)) - return []metav1beta1.TableRow{row}, nil -} - -func printSecretList(list *api.SecretList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printSecret(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printServiceAccount(obj *api.ServiceAccount, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - row.Cells = append(row.Cells, obj.Name, int64(len(obj.Secrets)), translateTimestampSince(obj.CreationTimestamp)) - return []metav1beta1.TableRow{row}, nil -} - -func printServiceAccountList(list *api.ServiceAccountList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printServiceAccount(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printNode(obj *api.Node, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - conditionMap := make(map[api.NodeConditionType]*api.NodeCondition) - NodeAllConditions := []api.NodeConditionType{api.NodeReady} - for i := range obj.Status.Conditions { - cond := obj.Status.Conditions[i] - conditionMap[cond.Type] = &cond - } - var status []string - for _, validCondition := range NodeAllConditions { - if condition, ok := conditionMap[validCondition]; ok { - if condition.Status == api.ConditionTrue { - status = append(status, string(condition.Type)) - } else { - status = append(status, "Not"+string(condition.Type)) - } - } - } - if len(status) == 0 { - status = append(status, "Unknown") - } - if obj.Spec.Unschedulable { - status = append(status, "SchedulingDisabled") - } - - roles := strings.Join(findNodeRoles(obj), ",") - if len(roles) == 0 { - roles = "" - } - - row.Cells = append(row.Cells, obj.Name, strings.Join(status, ","), roles, translateTimestampSince(obj.CreationTimestamp), obj.Status.NodeInfo.KubeletVersion) - if options.Wide { - osImage, kernelVersion, crVersion := obj.Status.NodeInfo.OSImage, obj.Status.NodeInfo.KernelVersion, obj.Status.NodeInfo.ContainerRuntimeVersion - if osImage == "" { - osImage = "" - } - if kernelVersion == "" { - kernelVersion = "" - } - if crVersion == "" { - crVersion = "" - } - row.Cells = append(row.Cells, getNodeInternalIP(obj), getNodeExternalIP(obj), osImage, kernelVersion, crVersion) - } - - return []metav1beta1.TableRow{row}, nil -} - -// Returns first external ip of the node or "" if none is found. -func getNodeExternalIP(node *api.Node) string { - for _, address := range node.Status.Addresses { - if address.Type == api.NodeExternalIP { - return address.Address - } - } - - return "" -} - -// Returns the internal IP of the node or "" if none is found. -func getNodeInternalIP(node *api.Node) string { - for _, address := range node.Status.Addresses { - if address.Type == api.NodeInternalIP { - return address.Address - } - } - - return "" -} - -// findNodeRoles returns the roles of a given node. -// The roles are determined by looking for: -// * a node-role.kubernetes.io/="" label -// * a kubernetes.io/role="" label -func findNodeRoles(node *api.Node) []string { - roles := sets.NewString() - for k, v := range node.Labels { - switch { - case strings.HasPrefix(k, labelNodeRolePrefix): - if role := strings.TrimPrefix(k, labelNodeRolePrefix); len(role) > 0 { - roles.Insert(role) - } - - case k == nodeLabelRole && v != "": - roles.Insert(v) - } - } - return roles.List() -} - -func printNodeList(list *api.NodeList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printNode(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printPersistentVolume(obj *api.PersistentVolume, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - claimRefUID := "" - if obj.Spec.ClaimRef != nil { - claimRefUID += obj.Spec.ClaimRef.Namespace - claimRefUID += "/" - claimRefUID += obj.Spec.ClaimRef.Name - } - - modesStr := helper.GetAccessModesAsString(obj.Spec.AccessModes) - reclaimPolicyStr := string(obj.Spec.PersistentVolumeReclaimPolicy) - - aQty := obj.Spec.Capacity[api.ResourceStorage] - aSize := aQty.String() - - phase := obj.Status.Phase - if obj.ObjectMeta.DeletionTimestamp != nil { - phase = "Terminating" - } - - row.Cells = append(row.Cells, obj.Name, aSize, modesStr, reclaimPolicyStr, - string(phase), claimRefUID, helper.GetPersistentVolumeClass(obj), - obj.Status.Reason, - translateTimestampSince(obj.CreationTimestamp)) - return []metav1beta1.TableRow{row}, nil -} - -func printPersistentVolumeList(list *api.PersistentVolumeList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printPersistentVolume(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printPersistentVolumeClaim(obj *api.PersistentVolumeClaim, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - phase := obj.Status.Phase - if obj.ObjectMeta.DeletionTimestamp != nil { - phase = "Terminating" - } - - storage := obj.Spec.Resources.Requests[api.ResourceStorage] - capacity := "" - accessModes := "" - if obj.Spec.VolumeName != "" { - accessModes = helper.GetAccessModesAsString(obj.Status.AccessModes) - storage = obj.Status.Capacity[api.ResourceStorage] - capacity = storage.String() - } - - row.Cells = append(row.Cells, obj.Name, string(phase), obj.Spec.VolumeName, capacity, accessModes, helper.GetPersistentVolumeClaimClass(obj), translateTimestampSince(obj.CreationTimestamp)) - return []metav1beta1.TableRow{row}, nil -} - -func printPersistentVolumeClaimList(list *api.PersistentVolumeClaimList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printPersistentVolumeClaim(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printEvent(obj *api.Event, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - // While watching event, we should print absolute time. - var firstTimestamp, lastTimestamp string - if options.AbsoluteTimestamps { - firstTimestamp = obj.FirstTimestamp.String() - lastTimestamp = obj.LastTimestamp.String() - } else { - firstTimestamp = translateTimestampSince(obj.FirstTimestamp) - lastTimestamp = translateTimestampSince(obj.LastTimestamp) - } - if options.Wide { - row.Cells = append(row.Cells, - lastTimestamp, - obj.Type, - obj.Reason, - obj.InvolvedObject.Kind, - formatEventSource(obj.Source), - strings.TrimSpace(obj.Message), - obj.InvolvedObject.FieldPath, - firstTimestamp, - int64(obj.Count), - obj.Name, - ) - } else { - row.Cells = append(row.Cells, - lastTimestamp, - obj.Type, - obj.Reason, - obj.InvolvedObject.Kind, - strings.TrimSpace(obj.Message), - ) - } - - return []metav1beta1.TableRow{row}, nil -} - -// Sorts and prints the EventList in a human-friendly format. -func printEventList(list *api.EventList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printEvent(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printRoleBinding(obj *rbac.RoleBinding, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - row.Cells = append(row.Cells, obj.Name, translateTimestampSince(obj.CreationTimestamp)) - if options.Wide { - roleRef := fmt.Sprintf("%s/%s", obj.RoleRef.Kind, obj.RoleRef.Name) - users, groups, sas, _ := rbac.SubjectsStrings(obj.Subjects) - row.Cells = append(row.Cells, roleRef, strings.Join(users, ", "), strings.Join(groups, ", "), strings.Join(sas, ", ")) - } - return []metav1beta1.TableRow{row}, nil -} - -// Prints the RoleBinding in a human-friendly format. -func printRoleBindingList(list *rbac.RoleBindingList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printRoleBinding(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printClusterRoleBinding(obj *rbac.ClusterRoleBinding, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - row.Cells = append(row.Cells, obj.Name, translateTimestampSince(obj.CreationTimestamp)) - if options.Wide { - roleRef := fmt.Sprintf("%s/%s", obj.RoleRef.Kind, obj.RoleRef.Name) - users, groups, sas, _ := rbac.SubjectsStrings(obj.Subjects) - row.Cells = append(row.Cells, roleRef, strings.Join(users, ", "), strings.Join(groups, ", "), strings.Join(sas, ", ")) - } - return []metav1beta1.TableRow{row}, nil -} - -// Prints the ClusterRoleBinding in a human-friendly format. -func printClusterRoleBindingList(list *rbac.ClusterRoleBindingList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printClusterRoleBinding(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printCertificateSigningRequest(obj *certificates.CertificateSigningRequest, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - status, err := extractCSRStatus(obj) - if err != nil { - return nil, err - } - row.Cells = append(row.Cells, obj.Name, translateTimestampSince(obj.CreationTimestamp), obj.Spec.Username, status) - return []metav1beta1.TableRow{row}, nil -} - -func extractCSRStatus(csr *certificates.CertificateSigningRequest) (string, error) { - var approved, denied bool - for _, c := range csr.Status.Conditions { - switch c.Type { - case certificates.CertificateApproved: - approved = true - case certificates.CertificateDenied: - denied = true - default: - return "", fmt.Errorf("unknown csr condition %q", c) - } - } - var status string - // must be in order of presidence - if denied { - status += "Denied" - } else if approved { - status += "Approved" - } else { - status += "Pending" - } - if len(csr.Status.Certificate) > 0 { - status += ",Issued" - } - return status, nil -} - -func printCertificateSigningRequestList(list *certificates.CertificateSigningRequestList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printCertificateSigningRequest(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printComponentStatus(obj *api.ComponentStatus, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - status := "Unknown" - message := "" - error := "" - for _, condition := range obj.Conditions { - if condition.Type == api.ComponentHealthy { - if condition.Status == api.ConditionTrue { - status = "Healthy" - } else { - status = "Unhealthy" - } - message = condition.Message - error = condition.Error - break - } - } - row.Cells = append(row.Cells, obj.Name, status, message, error) - return []metav1beta1.TableRow{row}, nil -} - -func printComponentStatusList(list *api.ComponentStatusList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printComponentStatus(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func truncate(str string, maxLen int) string { - if len(str) > maxLen { - return str[0:maxLen] + "..." - } - return str -} - -func printDeployment(obj *extensions.Deployment, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - desiredReplicas := obj.Spec.Replicas - currentReplicas := obj.Status.Replicas - updatedReplicas := obj.Status.UpdatedReplicas - availableReplicas := obj.Status.AvailableReplicas - age := translateTimestampSince(obj.CreationTimestamp) - containers := obj.Spec.Template.Spec.Containers - selector, err := metav1.LabelSelectorAsSelector(obj.Spec.Selector) - if err != nil { - // this shouldn't happen if LabelSelector passed validation - return nil, err - } - row.Cells = append(row.Cells, obj.Name, int64(desiredReplicas), int64(currentReplicas), int64(updatedReplicas), int64(availableReplicas), age) - if options.Wide { - containers, images := layoutContainerCells(containers) - row.Cells = append(row.Cells, containers, images, selector.String()) - } - return []metav1beta1.TableRow{row}, nil -} - -func printDeploymentList(list *extensions.DeploymentList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printDeployment(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func formatHPAMetrics(specs []autoscaling.MetricSpec, statuses []autoscaling.MetricStatus) string { - if len(specs) == 0 { - return "" - } - list := []string{} - max := 2 - more := false - count := 0 - for i, spec := range specs { - switch spec.Type { - case autoscaling.ExternalMetricSourceType: - if spec.External.Target.AverageValue != nil { - current := "" - if len(statuses) > i && statuses[i].External != nil && &statuses[i].External.Current.AverageValue != nil { - current = statuses[i].External.Current.AverageValue.String() - } - list = append(list, fmt.Sprintf("%s/%s (avg)", current, spec.External.Target.AverageValue.String())) - } else { - current := "" - if len(statuses) > i && statuses[i].External != nil { - current = statuses[i].External.Current.Value.String() - } - list = append(list, fmt.Sprintf("%s/%s", current, spec.External.Target.Value.String())) - } - case autoscaling.PodsMetricSourceType: - current := "" - if len(statuses) > i && statuses[i].Pods != nil { - current = statuses[i].Pods.Current.AverageValue.String() - } - list = append(list, fmt.Sprintf("%s/%s", current, spec.Pods.Target.AverageValue.String())) - case autoscaling.ObjectMetricSourceType: - current := "" - if len(statuses) > i && statuses[i].Object != nil { - current = statuses[i].Object.Current.Value.String() - } - list = append(list, fmt.Sprintf("%s/%s", current, spec.Object.Target.Value.String())) - case autoscaling.ResourceMetricSourceType: - if spec.Resource.Target.AverageValue != nil { - current := "" - if len(statuses) > i && statuses[i].Resource != nil { - current = statuses[i].Resource.Current.AverageValue.String() - } - list = append(list, fmt.Sprintf("%s/%s", current, spec.Resource.Target.AverageValue.String())) - } else { - current := "" - if len(statuses) > i && statuses[i].Resource != nil && statuses[i].Resource.Current.AverageUtilization != nil { - current = fmt.Sprintf("%d%%", *statuses[i].Resource.Current.AverageUtilization) - } - - target := "" - if spec.Resource.Target.AverageUtilization != nil { - target = fmt.Sprintf("%d%%", *spec.Resource.Target.AverageUtilization) - } - list = append(list, fmt.Sprintf("%s/%s", current, target)) - } - default: - list = append(list, "") - } - - count++ - } - - if count > max { - list = list[:max] - more = true - } - - ret := strings.Join(list, ", ") - if more { - return fmt.Sprintf("%s + %d more...", ret, count-max) - } - return ret -} - -func printHorizontalPodAutoscaler(obj *autoscaling.HorizontalPodAutoscaler, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - reference := fmt.Sprintf("%s/%s", - obj.Spec.ScaleTargetRef.Kind, - obj.Spec.ScaleTargetRef.Name) - minPods := "" - metrics := formatHPAMetrics(obj.Spec.Metrics, obj.Status.CurrentMetrics) - if obj.Spec.MinReplicas != nil { - minPods = fmt.Sprintf("%d", *obj.Spec.MinReplicas) - } - maxPods := obj.Spec.MaxReplicas - currentReplicas := obj.Status.CurrentReplicas - row.Cells = append(row.Cells, obj.Name, reference, metrics, minPods, int64(maxPods), int64(currentReplicas), translateTimestampSince(obj.CreationTimestamp)) - return []metav1beta1.TableRow{row}, nil -} - -func printHorizontalPodAutoscalerList(list *autoscaling.HorizontalPodAutoscalerList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printHorizontalPodAutoscaler(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printConfigMap(obj *api.ConfigMap, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - row.Cells = append(row.Cells, obj.Name, int64(len(obj.Data)), translateTimestampSince(obj.CreationTimestamp)) - return []metav1beta1.TableRow{row}, nil -} - -func printConfigMapList(list *api.ConfigMapList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printConfigMap(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printPodSecurityPolicy(obj *policy.PodSecurityPolicy, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - capabilities := make([]string, len(obj.Spec.AllowedCapabilities)) - for i, c := range obj.Spec.AllowedCapabilities { - capabilities[i] = string(c) - } - volumes := make([]string, len(obj.Spec.Volumes)) - for i, v := range obj.Spec.Volumes { - volumes[i] = string(v) - } - row.Cells = append(row.Cells, obj.Name, fmt.Sprintf("%v", obj.Spec.Privileged), - strings.Join(capabilities, ","), string(obj.Spec.SELinux.Rule), - string(obj.Spec.RunAsUser.Rule), string(obj.Spec.FSGroup.Rule), - string(obj.Spec.SupplementalGroups.Rule), obj.Spec.ReadOnlyRootFilesystem, - strings.Join(volumes, ",")) - return []metav1beta1.TableRow{row}, nil -} - -func printPodSecurityPolicyList(list *policy.PodSecurityPolicyList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printPodSecurityPolicy(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printNetworkPolicy(obj *networking.NetworkPolicy, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - row.Cells = append(row.Cells, obj.Name, metav1.FormatLabelSelector(&obj.Spec.PodSelector), translateTimestampSince(obj.CreationTimestamp)) - return []metav1beta1.TableRow{row}, nil -} - -func printNetworkPolicyList(list *networking.NetworkPolicyList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printNetworkPolicy(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printStorageClass(obj *storage.StorageClass, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - name := obj.Name - if storageutil.IsDefaultAnnotation(obj.ObjectMeta) { - name += " (default)" - } - provtype := obj.Provisioner - row.Cells = append(row.Cells, name, provtype, translateTimestampSince(obj.CreationTimestamp)) - - return []metav1beta1.TableRow{row}, nil -} - -func printStorageClassList(list *storage.StorageClassList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printStorageClass(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printLease(obj *coordination.Lease, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - var holderIdentity string - if obj.Spec.HolderIdentity != nil { - holderIdentity = *obj.Spec.HolderIdentity - } - row.Cells = append(row.Cells, obj.Name, holderIdentity, translateTimestampSince(obj.CreationTimestamp)) - return []metav1beta1.TableRow{row}, nil -} - -func printLeaseList(list *coordination.LeaseList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printLease(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - -func printStatus(obj *metav1.Status, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - row.Cells = append(row.Cells, obj.Status, obj.Reason, obj.Message) - - return []metav1beta1.TableRow{row}, nil -} - -// Lay out all the containers on one line if use wide output. -// DEPRECATED: convert to TableHandler and use layoutContainerCells -func layoutContainers(containers []api.Container, w io.Writer) error { - var namesBuffer bytes.Buffer - var imagesBuffer bytes.Buffer - - for i, container := range containers { - namesBuffer.WriteString(container.Name) - imagesBuffer.WriteString(container.Image) - if i != len(containers)-1 { - namesBuffer.WriteString(",") - imagesBuffer.WriteString(",") - } - } - _, err := fmt.Fprintf(w, "\t%s\t%s", namesBuffer.String(), imagesBuffer.String()) - return err -} - -// Lay out all the containers on one line if use wide output. -func layoutContainerCells(containers []api.Container) (names string, images string) { - var namesBuffer bytes.Buffer - var imagesBuffer bytes.Buffer - - for i, container := range containers { - namesBuffer.WriteString(container.Name) - imagesBuffer.WriteString(container.Image) - if i != len(containers)-1 { - namesBuffer.WriteString(",") - imagesBuffer.WriteString(",") - } - } - return namesBuffer.String(), imagesBuffer.String() -} - -// formatEventSource formats EventSource as a comma separated string excluding Host when empty -func formatEventSource(es api.EventSource) string { - EventSourceString := []string{es.Component} - if len(es.Host) > 0 { - EventSourceString = append(EventSourceString, es.Host) - } - return strings.Join(EventSourceString, ", ") -} - -func printControllerRevision(obj *apps.ControllerRevision, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - row := metav1beta1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - controllerRef := metav1.GetControllerOf(obj) - controllerName := "" - if controllerRef != nil { - withKind := true - gv, err := schema.ParseGroupVersion(controllerRef.APIVersion) - if err != nil { - return nil, err - } - gvk := gv.WithKind(controllerRef.Kind) - controllerName = printers.FormatResourceName(gvk.GroupKind(), controllerRef.Name, withKind) - } - revision := obj.Revision - age := translateTimestampSince(obj.CreationTimestamp) - row.Cells = append(row.Cells, obj.Name, controllerName, revision, age) - return []metav1beta1.TableRow{row}, nil -} - -func printControllerRevisionList(list *apps.ControllerRevisionList, options printers.PrintOptions) ([]metav1beta1.TableRow, error) { - rows := make([]metav1beta1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printControllerRevision(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/tabwriter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/tabwriter.go deleted file mode 100644 index d1d4cb53fc5b..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/printers/tabwriter.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "io" - "text/tabwriter" -) - -const ( - tabwriterMinWidth = 6 - tabwriterWidth = 4 - tabwriterPadding = 3 - tabwriterPadChar = ' ' - tabwriterFlags = 0 -) - -// GetNewTabWriter returns a tabwriter that translates tabbed columns in input into properly aligned text. -func GetNewTabWriter(output io.Writer) *tabwriter.Writer { - return tabwriter.NewWriter(output, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/OWNERS new file mode 100644 index 000000000000..1d2bef03480c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/OWNERS @@ -0,0 +1,9 @@ +approvers: + - Random-Liu + - dchen1107 + - derekwaynecarr + - tallclair + - vishh + - yujuhong +reviewers: + - sig-node-reviewers diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD index 42b68b510b69..317dc0fc0cda 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD @@ -12,7 +12,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/probe/exec", deps = [ "//pkg/probe:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/exec.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/exec.go index 817492a609f5..a6ae523aa629 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/exec.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/exec/exec.go @@ -20,30 +20,34 @@ import ( "k8s.io/kubernetes/pkg/probe" "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) -func New() ExecProber { +// New creates a Prober. +func New() Prober { return execProber{} } -type ExecProber interface { +// Prober is an interface defining the Probe object for container readiness/liveness checks. +type Prober interface { Probe(e exec.Cmd) (probe.Result, string, error) } type execProber struct{} +// Probe executes a command to check the liveness/readiness of container +// from executing a command. Returns the Result status, command output, and +// errors if any. func (pr execProber) Probe(e exec.Cmd) (probe.Result, string, error) { data, err := e.CombinedOutput() - glog.V(4).Infof("Exec probe response: %q", string(data)) + klog.V(4).Infof("Exec probe response: %q", string(data)) if err != nil { exit, ok := err.(exec.ExitError) if ok { if exit.ExitStatus() == 0 { return probe.Success, string(data), nil - } else { - return probe.Failure, string(data), nil } + return probe.Failure, string(data), nil } return probe.Unknown, "", err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/BUILD index 5c788fc3ab9f..98387df305b2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/BUILD @@ -14,7 +14,7 @@ go_library( "//pkg/probe:go_default_library", "//pkg/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/http.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/http.go index b9821be05fc7..e9bcc0f3e1bb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/http.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/http.go @@ -28,21 +28,23 @@ import ( "k8s.io/kubernetes/pkg/probe" "k8s.io/kubernetes/pkg/version" - "github.com/golang/glog" + "k8s.io/klog" ) -func New() HTTPProber { +// New creates Prober that will skip TLS verification while probing. +func New() Prober { tlsConfig := &tls.Config{InsecureSkipVerify: true} return NewWithTLSConfig(tlsConfig) } // NewWithTLSConfig takes tls config as parameter. -func NewWithTLSConfig(config *tls.Config) HTTPProber { +func NewWithTLSConfig(config *tls.Config) Prober { transport := utilnet.SetTransportDefaults(&http.Transport{TLSClientConfig: config, DisableKeepAlives: true}) return httpProber{transport} } -type HTTPProber interface { +// Prober is an interface that defines the Probe function for doing HTTP readiness/liveness checks. +type Prober interface { Probe(url *url.URL, headers http.Header, timeout time.Duration) (probe.Result, string, error) } @@ -50,12 +52,13 @@ type httpProber struct { transport *http.Transport } -// Probe returns a ProbeRunner capable of running an http check. +// Probe returns a ProbeRunner capable of running an HTTP check. func (pr httpProber) Probe(url *url.URL, headers http.Header, timeout time.Duration) (probe.Result, string, error) { return DoHTTPProbe(url, headers, &http.Client{Timeout: timeout, Transport: pr.transport}) } -type HTTPGetInterface interface { +// GetHTTPInterface is an interface for making HTTP requests, that returns a response and error. +type GetHTTPInterface interface { Do(req *http.Request) (*http.Response, error) } @@ -63,7 +66,7 @@ type HTTPGetInterface interface { // If the HTTP response code is successful (i.e. 400 > code >= 200), it returns Success. // If the HTTP response code is unsuccessful or HTTP communication fails, it returns Failure. // This is exported because some other packages may want to do direct HTTP probes. -func DoHTTPProbe(url *url.URL, headers http.Header, client HTTPGetInterface) (probe.Result, string, error) { +func DoHTTPProbe(url *url.URL, headers http.Header, client GetHTTPInterface) (probe.Result, string, error) { req, err := http.NewRequest("GET", url.String(), nil) if err != nil { // Convert errors into failures to catch timeouts. @@ -93,9 +96,9 @@ func DoHTTPProbe(url *url.URL, headers http.Header, client HTTPGetInterface) (pr } body := string(b) if res.StatusCode >= http.StatusOK && res.StatusCode < http.StatusBadRequest { - glog.V(4).Infof("Probe succeeded for %s, Response: %v", url.String(), *res) + klog.V(4).Infof("Probe succeeded for %s, Response: %v", url.String(), *res) return probe.Success, body, nil } - glog.V(4).Infof("Probe failed for %s with request headers %v, response body: %v", url.String(), headers, body) + klog.V(4).Infof("Probe failed for %s with request headers %v, response body: %v", url.String(), headers, body) return probe.Failure, fmt.Sprintf("HTTP probe failed with statuscode: %d", res.StatusCode), nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/probe.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/probe.go index ebbb607c46df..d57bf43874e5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/probe.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/probe.go @@ -16,10 +16,14 @@ limitations under the License. package probe +// Result is a string used to handle the results for probing container readiness/livenss type Result string const ( + // Success Result Success Result = "success" + // Failure Result Failure Result = "failure" + // Unknown Result Unknown Result = "unknown" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/tcp/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/tcp/BUILD index b14c197e2557..929f1e4fad37 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/tcp/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/tcp/BUILD @@ -12,7 +12,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/probe/tcp", deps = [ "//pkg/probe:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/tcp/tcp.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/tcp/tcp.go index cce3be9935e9..7b183e0b45a7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/tcp/tcp.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/tcp/tcp.go @@ -23,19 +23,22 @@ import ( "k8s.io/kubernetes/pkg/probe" - "github.com/golang/glog" + "k8s.io/klog" ) -func New() TCPProber { +// New creates Prober. +func New() Prober { return tcpProber{} } -type TCPProber interface { +// Prober is an interface that defines the Probe function for doing TCP readiness/liveness checks. +type Prober interface { Probe(host string, port int, timeout time.Duration) (probe.Result, string, error) } type tcpProber struct{} +// Probe returns a ProbeRunner capable of running an TCP check. func (pr tcpProber) Probe(host string, port int, timeout time.Duration) (probe.Result, string, error) { return DoTCPProbe(net.JoinHostPort(host, strconv.Itoa(port)), timeout) } @@ -52,7 +55,7 @@ func DoTCPProbe(addr string, timeout time.Duration) (probe.Result, string, error } err = conn.Close() if err != nil { - glog.Errorf("Unexpected error closing TCP probe socket: %v (%#v)", err, err) + klog.Errorf("Unexpected error closing TCP probe socket: %v (%#v)", err, err) } return probe.Success, "", nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/BUILD index 9920c511765c..bb290126fea2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/doc.go index 2fdbebc49aa8..6760677bf804 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/doc.go @@ -15,5 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +groupName=kubeproxy.config.k8s.io package config diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/register.go index c4032da71151..fda569221cbd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/register.go @@ -21,29 +21,21 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// GroupName is the group name use in this package +// GroupName is the group name used in this package const GroupName = "kubeproxy.config.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) +// addKnownTypes registers known types to the given scheme func addKnownTypes(scheme *runtime.Scheme) error { - // TODO this will get cleaned up with the scheme types are fixed scheme.AddKnownTypes(SchemeGroupVersion, &KubeProxyConfiguration{}, ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go index 7df427a7f806..dacee32bb3a6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go @@ -172,7 +172,7 @@ type IPVSSchedulerMethod string const ( // RoundRobin distributes jobs equally amongst the available real servers. RoundRobin IPVSSchedulerMethod = "rr" - // WeightedRoundRobin assigns jobs to real servers proportionally to there real servers' weight. + // WeightedRoundRobin assigns jobs to real servers proportionally to their real servers' weight. // Servers with higher weights receive new jobs first and get more jobs than servers with lower weights. // Servers with equal weights get an equal distribution of new jobs. WeightedRoundRobin IPVSSchedulerMethod = "wrr" @@ -199,7 +199,7 @@ const ( DestinationHashing IPVSSchedulerMethod = "dh" // ShortestExpectedDelay assigns an incoming job to the server with the shortest expected delay. // The expected delay that the job will experience is (Ci + 1) / Ui if sent to the ith server, in which - // Ci is the number of jobs on the the ith server and Ui is the fixed service rate (weight) of the ith server. + // Ci is the number of jobs on the ith server and Ui is the fixed service rate (weight) of the ith server. ShortestExpectedDelay IPVSSchedulerMethod = "sed" // NeverQueue assigns an incoming job to an idle server if there is, instead of waiting for a fast one; // if all the servers are busy, it adopts the ShortestExpectedDelay policy to assign the job. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/doc.go index 89619167e204..7cadbf80e99d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../../vendor/k8s.io/kube-proxy/config/v1alpha1 // +groupName=kubeproxy.config.k8s.io + package v1alpha1 diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/register.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/register.go index e21fa22982c6..a053b1e77839 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/register.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1/register.go @@ -21,16 +21,18 @@ import ( kubeproxyconfigv1alpha1 "k8s.io/kube-proxy/config/v1alpha1" ) -// GroupName is the group name use in this package +// GroupName is the group name used in this package const GroupName = "kubeproxy.config.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} var ( - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + // localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package, + // defaulting and conversion init funcs are registered as well. localSchemeBuilder = &kubeproxyconfigv1alpha1.SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = localSchemeBuilder.AddToScheme ) func init() { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD index e45009ff4a8f..b68b6f036e2a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD @@ -20,7 +20,7 @@ go_library( "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go index 0a2bd65f3e57..2b22e643b97d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go @@ -20,12 +20,12 @@ import ( "fmt" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" coreinformers "k8s.io/client-go/informers/core/v1" listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller" ) @@ -99,15 +99,15 @@ func (c *EndpointsConfig) RegisterEventHandler(handler EndpointsHandler) { func (c *EndpointsConfig) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() - glog.Info("Starting endpoints config controller") - defer glog.Info("Shutting down endpoints config controller") + klog.Info("Starting endpoints config controller") + defer klog.Info("Shutting down endpoints config controller") if !controller.WaitForCacheSync("endpoints config", stopCh, c.listerSynced) { return } for i := range c.eventHandlers { - glog.V(3).Infof("Calling handler.OnEndpointsSynced()") + klog.V(3).Infof("Calling handler.OnEndpointsSynced()") c.eventHandlers[i].OnEndpointsSynced() } @@ -121,7 +121,7 @@ func (c *EndpointsConfig) handleAddEndpoints(obj interface{}) { return } for i := range c.eventHandlers { - glog.V(4).Infof("Calling handler.OnEndpointsAdd") + klog.V(4).Infof("Calling handler.OnEndpointsAdd") c.eventHandlers[i].OnEndpointsAdd(endpoints) } } @@ -138,7 +138,7 @@ func (c *EndpointsConfig) handleUpdateEndpoints(oldObj, newObj interface{}) { return } for i := range c.eventHandlers { - glog.V(4).Infof("Calling handler.OnEndpointsUpdate") + klog.V(4).Infof("Calling handler.OnEndpointsUpdate") c.eventHandlers[i].OnEndpointsUpdate(oldEndpoints, endpoints) } } @@ -157,7 +157,7 @@ func (c *EndpointsConfig) handleDeleteEndpoints(obj interface{}) { } } for i := range c.eventHandlers { - glog.V(4).Infof("Calling handler.OnEndpointsDelete") + klog.V(4).Infof("Calling handler.OnEndpointsDelete") c.eventHandlers[i].OnEndpointsDelete(endpoints) } } @@ -199,15 +199,15 @@ func (c *ServiceConfig) RegisterEventHandler(handler ServiceHandler) { func (c *ServiceConfig) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() - glog.Info("Starting service config controller") - defer glog.Info("Shutting down service config controller") + klog.Info("Starting service config controller") + defer klog.Info("Shutting down service config controller") if !controller.WaitForCacheSync("service config", stopCh, c.listerSynced) { return } for i := range c.eventHandlers { - glog.V(3).Infof("Calling handler.OnServiceSynced()") + klog.V(3).Info("Calling handler.OnServiceSynced()") c.eventHandlers[i].OnServiceSynced() } @@ -221,7 +221,7 @@ func (c *ServiceConfig) handleAddService(obj interface{}) { return } for i := range c.eventHandlers { - glog.V(4).Infof("Calling handler.OnServiceAdd") + klog.V(4).Info("Calling handler.OnServiceAdd") c.eventHandlers[i].OnServiceAdd(service) } } @@ -238,7 +238,7 @@ func (c *ServiceConfig) handleUpdateService(oldObj, newObj interface{}) { return } for i := range c.eventHandlers { - glog.V(4).Infof("Calling handler.OnServiceUpdate") + klog.V(4).Info("Calling handler.OnServiceUpdate") c.eventHandlers[i].OnServiceUpdate(oldService, service) } } @@ -257,7 +257,7 @@ func (c *ServiceConfig) handleDeleteService(obj interface{}) { } } for i := range c.eventHandlers { - glog.V(4).Infof("Calling handler.OnServiceDelete") + klog.V(4).Info("Calling handler.OnServiceDelete") c.eventHandlers[i].OnServiceDelete(service) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go index 06c7d3ad5ab6..067be71d79b4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go @@ -22,7 +22,7 @@ import ( "strconv" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -197,7 +197,7 @@ func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *v1.Endpoint for i := range ss.Ports { port := &ss.Ports[i] if port.Port == 0 { - glog.Warningf("ignoring invalid endpoint port %s", port.Name) + klog.Warningf("ignoring invalid endpoint port %s", port.Name) continue } svcPortName := ServicePortName{ @@ -207,7 +207,7 @@ func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *v1.Endpoint for i := range ss.Addresses { addr := &ss.Addresses[i] if addr.IP == "" { - glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name) + klog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name) continue } // Filter out the incorrect IP version case. @@ -226,12 +226,12 @@ func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *v1.Endpoint endpointsMap[svcPortName] = append(endpointsMap[svcPortName], baseEndpointInfo) } } - if glog.V(3) { + if klog.V(3) { newEPList := []string{} for _, ep := range endpointsMap[svcPortName] { newEPList = append(newEPList, ep.String()) } - glog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList) + klog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList) } } } @@ -299,7 +299,7 @@ func detectStaleConnections(oldEndpointsMap, newEndpointsMap EndpointsMap, stale } } if stale { - glog.V(4).Infof("Stale endpoint %v -> %v", svcPortName, ep.String()) + klog.V(4).Infof("Stale endpoint %v -> %v", svcPortName, ep.String()) *staleEndpoints = append(*staleEndpoints, ServiceEndpoint{Endpoint: ep.String(), ServicePortName: svcPortName}) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD index b711fcdb07d9..f94de593ce05 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD @@ -20,8 +20,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/renstrom/dedent:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go index 93e5edf6112f..5dc3f009ecb6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go @@ -25,8 +25,8 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" "github.com/renstrom/dedent" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -133,9 +133,9 @@ func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) err // Remove any that are not needed any more. for nsn, svc := range hcs.services { if port, found := newServices[nsn]; !found || port != svc.port { - glog.V(2).Infof("Closing healthcheck %q on port %d", nsn.String(), svc.port) + klog.V(2).Infof("Closing healthcheck %q on port %d", nsn.String(), svc.port) if err := svc.listener.Close(); err != nil { - glog.Errorf("Close(%v): %v", svc.listener.Addr(), err) + klog.Errorf("Close(%v): %v", svc.listener.Addr(), err) } delete(hcs.services, nsn) } @@ -144,11 +144,11 @@ func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) err // Add any that are needed. for nsn, port := range newServices { if hcs.services[nsn] != nil { - glog.V(3).Infof("Existing healthcheck %q on port %d", nsn.String(), port) + klog.V(3).Infof("Existing healthcheck %q on port %d", nsn.String(), port) continue } - glog.V(2).Infof("Opening healthcheck %q on port %d", nsn.String(), port) + klog.V(2).Infof("Opening healthcheck %q on port %d", nsn.String(), port) svc := &hcInstance{port: port} addr := fmt.Sprintf(":%d", port) svc.server = hcs.httpFactory.New(addr, hcHandler{name: nsn, hcs: hcs}) @@ -166,19 +166,19 @@ func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) err UID: types.UID(nsn.String()), }, api.EventTypeWarning, "FailedToStartServiceHealthcheck", msg) } - glog.Error(msg) + klog.Error(msg) continue } hcs.services[nsn] = svc go func(nsn types.NamespacedName, svc *hcInstance) { // Serve() will exit when the listener is closed. - glog.V(3).Infof("Starting goroutine for healthcheck %q on port %d", nsn.String(), svc.port) + klog.V(3).Infof("Starting goroutine for healthcheck %q on port %d", nsn.String(), svc.port) if err := svc.server.Serve(svc.listener); err != nil { - glog.V(3).Infof("Healthcheck %q closed: %v", nsn.String(), err) + klog.V(3).Infof("Healthcheck %q closed: %v", nsn.String(), err) return } - glog.V(3).Infof("Healthcheck %q closed", nsn.String()) + klog.V(3).Infof("Healthcheck %q closed", nsn.String()) }(nsn, svc) } return nil @@ -203,7 +203,7 @@ func (h hcHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { svc, ok := h.hcs.services[h.name] if !ok || svc == nil { h.hcs.lock.Unlock() - glog.Errorf("Received request for closed healthcheck %q", h.name.String()) + klog.Errorf("Received request for closed healthcheck %q", h.name.String()) return } count := svc.endpoints @@ -232,10 +232,10 @@ func (hcs *server) SyncEndpoints(newEndpoints map[types.NamespacedName]int) erro for nsn, count := range newEndpoints { if hcs.services[nsn] == nil { - glog.V(3).Infof("Not saving endpoints for unknown healthcheck %q", nsn.String()) + klog.V(3).Infof("Not saving endpoints for unknown healthcheck %q", nsn.String()) continue } - glog.V(3).Infof("Reporting %d endpoints for healthcheck %q", count, nsn.String()) + klog.V(3).Infof("Reporting %d endpoints for healthcheck %q", count, nsn.String()) hcs.services[nsn].endpoints = count } for nsn, hci := range hcs.services { @@ -306,7 +306,7 @@ func (hs *HealthzServer) Run() { server := hs.httpFactory.New(hs.addr, serveMux) go wait.Until(func() { - glog.V(3).Infof("Starting goroutine for healthz on %s", hs.addr) + klog.V(3).Infof("Starting goroutine for healthz on %s", hs.addr) listener, err := hs.listener.Listen(hs.addr) if err != nil { @@ -314,15 +314,15 @@ func (hs *HealthzServer) Run() { if hs.recorder != nil { hs.recorder.Eventf(hs.nodeRef, api.EventTypeWarning, "FailedToStartNodeHealthcheck", msg) } - glog.Error(msg) + klog.Error(msg) return } if err := server.Serve(listener); err != nil { - glog.Errorf("Healthz closed with error: %v", err) + klog.Errorf("Healthz closed with error: %v", err) return } - glog.Errorf("Unexpected healthz closed.") + klog.Error("Unexpected healthz closed.") }, nodeHealthzRetryInterval, wait.NeverStop) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD index 1fb1875316be..2d1794529cd3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD @@ -20,12 +20,12 @@ go_library( "//pkg/util/iptables:go_default_library", "//pkg/util/net:go_default_library", "//pkg/util/sysctl:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) @@ -46,7 +46,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/exec/testing:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go index 41f03d49cde0..1fb994be32f7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go @@ -32,10 +32,11 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/proxy" @@ -47,7 +48,6 @@ import ( utiliptables "k8s.io/kubernetes/pkg/util/iptables" utilnet "k8s.io/kubernetes/pkg/util/net" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" - utilversion "k8s.io/kubernetes/pkg/util/version" utilexec "k8s.io/utils/exec" ) @@ -182,7 +182,7 @@ func newEndpointInfo(baseInfo *proxy.BaseEndpointInfo) proxy.Endpoint { func (e *endpointsInfo) Equal(other proxy.Endpoint) bool { o, ok := other.(*endpointsInfo) if !ok { - glog.Errorf("Failed to cast endpointsInfo") + klog.Error("Failed to cast endpointsInfo") return false } return e.Endpoint == o.Endpoint && @@ -293,15 +293,17 @@ func NewProxier(ipt utiliptables.Interface, nodePortAddresses []string, ) (*Proxier, error) { // Set the route_localnet sysctl we need for - if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil { - return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err) + if val, _ := sysctl.GetSysctl(sysctlRouteLocalnet); val != 1 { + if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil { + return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err) + } } // Proxy needs br_netfilter and bridge-nf-call-iptables=1 when containers // are connected to a Linux bridge (but not SDN bridges). Until most // plugins handle this, log when config is missing if val, err := sysctl.GetSysctl(sysctlBridgeCallIPTables); err == nil && val != 1 { - glog.Warningf("missing br-netfilter module or unset sysctl br-nf-call-iptables; proxy may not work as intended") + klog.Warning("missing br-netfilter module or unset sysctl br-nf-call-iptables; proxy may not work as intended") } // Generate the masquerade mark to use for SNAT rules. @@ -309,12 +311,12 @@ func NewProxier(ipt utiliptables.Interface, masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) if nodeIP == nil { - glog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") + klog.Warning("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") nodeIP = net.ParseIP("127.0.0.1") } if len(clusterCIDR) == 0 { - glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic") + klog.Warning("clusterCIDR not specified, unable to distinguish between internal and external traffic") } else if utilnet.IsIPv6CIDR(clusterCIDR) != ipt.IsIpv6() { return nil, fmt.Errorf("clusterCIDR %s has incorrect IP version: expect isIPv6=%t", clusterCIDR, ipt.IsIpv6()) } @@ -350,7 +352,7 @@ func NewProxier(ipt utiliptables.Interface, networkInterfacer: utilproxy.RealNetwork{}, } burstSyncs := 2 - glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs) + klog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs) proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs) return proxier, nil } @@ -390,7 +392,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { ) if err := ipt.DeleteRule(chain.table, chain.sourceChain, args...); err != nil { if !utiliptables.IsNotFoundError(err) { - glog.Errorf("Error removing pure-iptables proxy rule: %v", err) + klog.Errorf("Error removing pure-iptables proxy rule: %v", err) encounteredError = true } } @@ -399,7 +401,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { // Flush and remove all of our "-t nat" chains. iptablesData := bytes.NewBuffer(nil) if err := ipt.SaveInto(utiliptables.TableNAT, iptablesData); err != nil { - glog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableNAT, err) + klog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableNAT, err) encounteredError = true } else { existingNATChains := utiliptables.GetChainLines(utiliptables.TableNAT, iptablesData.Bytes()) @@ -427,7 +429,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { // Write it. err = ipt.Restore(utiliptables.TableNAT, natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { - glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableNAT, err) + klog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableNAT, err) encounteredError = true } } @@ -435,7 +437,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { // Flush and remove all of our "-t filter" chains. iptablesData.Reset() if err := ipt.SaveInto(utiliptables.TableFilter, iptablesData); err != nil { - glog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableFilter, err) + klog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableFilter, err) encounteredError = true } else { existingFilterChains := utiliptables.GetChainLines(utiliptables.TableFilter, iptablesData.Bytes()) @@ -453,7 +455,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { filterLines := append(filterChains.Bytes(), filterRules.Bytes()...) // Write it. if err := ipt.Restore(utiliptables.TableFilter, filterLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil { - glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableFilter, err) + klog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableFilter, err) encounteredError = true } } @@ -607,7 +609,7 @@ func (proxier *Proxier) deleteEndpointConnections(connectionMap []proxy.ServiceE endpointIP := utilproxy.IPPart(epSvcPair.Endpoint) err := conntrack.ClearEntriesForNAT(proxier.exec, svcInfo.ClusterIPString(), endpointIP, v1.ProtocolUDP) if err != nil { - glog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.ServicePortName.String(), err) + klog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.ServicePortName.String(), err) } } } @@ -636,11 +638,11 @@ func (proxier *Proxier) syncProxyRules() { start := time.Now() defer func() { metrics.SyncProxyRulesLatency.Observe(metrics.SinceInMicroseconds(start)) - glog.V(4).Infof("syncProxyRules took %v", time.Since(start)) + klog.V(4).Infof("syncProxyRules took %v", time.Since(start)) }() // don't sync rules till we've received services and endpoints if !proxier.endpointsSynced || !proxier.servicesSynced { - glog.V(2).Info("Not syncing iptables until Services and Endpoints have been received from master") + klog.V(2).Info("Not syncing iptables until Services and Endpoints have been received from master") return } @@ -654,17 +656,17 @@ func (proxier *Proxier) syncProxyRules() { // merge stale services gathered from updateEndpointsMap for _, svcPortName := range endpointUpdateResult.StaleServiceNames { if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.GetProtocol() == v1.ProtocolUDP { - glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.ClusterIPString()) + klog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.ClusterIPString()) staleServices.Insert(svcInfo.ClusterIPString()) } } - glog.V(3).Infof("Syncing iptables rules") + klog.V(3).Info("Syncing iptables rules") // Create and link the kube chains. for _, chain := range iptablesJumpChains { if _, err := proxier.iptables.EnsureChain(chain.table, chain.chain); err != nil { - glog.Errorf("Failed to ensure that %s chain %s exists: %v", chain.table, kubeServicesChain, err) + klog.Errorf("Failed to ensure that %s chain %s exists: %v", chain.table, kubeServicesChain, err) return } args := append(chain.extraArgs, @@ -672,7 +674,7 @@ func (proxier *Proxier) syncProxyRules() { "-j", string(chain.chain), ) if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, chain.table, chain.sourceChain, args...); err != nil { - glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", chain.table, chain.sourceChain, chain.chain, err) + klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", chain.table, chain.sourceChain, chain.chain, err) return } } @@ -687,7 +689,7 @@ func (proxier *Proxier) syncProxyRules() { proxier.existingFilterChainsData.Reset() err := proxier.iptables.SaveInto(utiliptables.TableFilter, proxier.existingFilterChainsData) if err != nil { // if we failed to get any rules - glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) + klog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) } else { // otherwise parse the output existingFilterChains = utiliptables.GetChainLines(utiliptables.TableFilter, proxier.existingFilterChainsData.Bytes()) } @@ -697,7 +699,7 @@ func (proxier *Proxier) syncProxyRules() { proxier.iptablesData.Reset() err = proxier.iptables.SaveInto(utiliptables.TableNAT, proxier.iptablesData) if err != nil { // if we failed to get any rules - glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) + klog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) } else { // otherwise parse the output existingNATChains = utiliptables.GetChainLines(utiliptables.TableNAT, proxier.iptablesData.Bytes()) } @@ -778,7 +780,7 @@ func (proxier *Proxier) syncProxyRules() { for svcName, svc := range proxier.serviceMap { svcInfo, ok := svc.(*serviceInfo) if !ok { - glog.Errorf("Failed to cast serviceInfo %q", svcName.String()) + klog.Errorf("Failed to cast serviceInfo %q", svcName.String()) continue } isIPv6 := utilnet.IsIPv6(svcInfo.ClusterIP) @@ -846,7 +848,7 @@ func (proxier *Proxier) syncProxyRules() { // machine, hold the local port open so no other process can open it // (because the socket might open but it would never work). if local, err := utilproxy.IsLocalIP(externalIP); err != nil { - glog.Errorf("can't determine if IP is local, assuming not: %v", err) + klog.Errorf("can't determine if IP is local, assuming not: %v", err) } else if local && (svcInfo.GetProtocol() != v1.ProtocolSCTP) { lp := utilproxy.LocalPort{ Description: "externalIP for " + svcNameString, @@ -855,7 +857,7 @@ func (proxier *Proxier) syncProxyRules() { Protocol: protocol, } if proxier.portsMap[lp] != nil { - glog.V(4).Infof("Port %s was open before and is still needed", lp.String()) + klog.V(4).Infof("Port %s was open before and is still needed", lp.String()) replacementPortsMap[lp] = proxier.portsMap[lp] } else { socket, err := proxier.portMapper.OpenLocalPort(&lp) @@ -869,7 +871,7 @@ func (proxier *Proxier) syncProxyRules() { UID: types.UID(proxier.hostname), Namespace: "", }, v1.EventTypeWarning, err.Error(), msg) - glog.Error(msg) + klog.Error(msg) continue } replacementPortsMap[lp] = socket @@ -989,7 +991,7 @@ func (proxier *Proxier) syncProxyRules() { // (because the socket might open but it would never work). addresses, err := utilproxy.GetNodeAddresses(proxier.nodePortAddresses, proxier.networkInterfacer) if err != nil { - glog.Errorf("Failed to get node ip address matching nodeport cidr: %v", err) + klog.Errorf("Failed to get node ip address matching nodeport cidr: %v", err) continue } @@ -1014,12 +1016,12 @@ func (proxier *Proxier) syncProxyRules() { // For ports on node IPs, open the actual port and hold it. for _, lp := range lps { if proxier.portsMap[lp] != nil { - glog.V(4).Infof("Port %s was open before and is still needed", lp.String()) + klog.V(4).Infof("Port %s was open before and is still needed", lp.String()) replacementPortsMap[lp] = proxier.portsMap[lp] } else if svcInfo.GetProtocol() != v1.ProtocolSCTP { socket, err := proxier.portMapper.OpenLocalPort(&lp) if err != nil { - glog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err) + klog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err) continue } if lp.Protocol == "udp" { @@ -1029,7 +1031,7 @@ func (proxier *Proxier) syncProxyRules() { // See issue: https://github.com/kubernetes/kubernetes/issues/49881 err := conntrack.ClearEntriesForPort(proxier.exec, lp.Port, isIPv6, v1.ProtocolUDP) if err != nil { - glog.Errorf("Failed to clear udp conntrack for port %d, error: %v", lp.Port, err) + klog.Errorf("Failed to clear udp conntrack for port %d, error: %v", lp.Port, err) } } replacementPortsMap[lp] = socket @@ -1085,7 +1087,7 @@ func (proxier *Proxier) syncProxyRules() { for _, ep := range proxier.endpointsMap[svcName] { epInfo, ok := ep.(*endpointsInfo) if !ok { - glog.Errorf("Failed to cast endpointsInfo %q", ep.String()) + klog.Errorf("Failed to cast endpointsInfo %q", ep.String()) continue } endpoints = append(endpoints, epInfo) @@ -1251,7 +1253,7 @@ func (proxier *Proxier) syncProxyRules() { // other service portal rules. addresses, err := utilproxy.GetNodeAddresses(proxier.nodePortAddresses, proxier.networkInterfacer) if err != nil { - glog.Errorf("Failed to get node ip address matching nodeport cidr") + klog.Errorf("Failed to get node ip address matching nodeport cidr") } else { isIPv6 := proxier.iptables.IsIpv6() for address := range addresses { @@ -1268,7 +1270,7 @@ func (proxier *Proxier) syncProxyRules() { } // Ignore IP addresses with incorrect version if isIPv6 && !utilnet.IsIPv6String(address) || !isIPv6 && utilnet.IsIPv6String(address) { - glog.Errorf("IP address %s has incorrect IP version", address) + klog.Errorf("IP address %s has incorrect IP version", address) continue } // create nodeport rules for each IP one by one @@ -1327,12 +1329,12 @@ func (proxier *Proxier) syncProxyRules() { proxier.iptablesData.Write(proxier.natChains.Bytes()) proxier.iptablesData.Write(proxier.natRules.Bytes()) - glog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes()) + klog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes()) err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { - glog.Errorf("Failed to execute iptables-restore: %v", err) + klog.Errorf("Failed to execute iptables-restore: %v", err) // Revert new local ports. - glog.V(2).Infof("Closing local ports after iptables-restore failure") + klog.V(2).Infof("Closing local ports after iptables-restore failure") utilproxy.RevertPorts(replacementPortsMap, proxier.portsMap) return } @@ -1354,17 +1356,17 @@ func (proxier *Proxier) syncProxyRules() { // not "OnlyLocal", but the services list will not, and the healthChecker // will just drop those endpoints. if err := proxier.healthChecker.SyncServices(serviceUpdateResult.HCServiceNodePorts); err != nil { - glog.Errorf("Error syncing healtcheck services: %v", err) + klog.Errorf("Error syncing healthcheck services: %v", err) } if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.HCEndpointsLocalIPSize); err != nil { - glog.Errorf("Error syncing healthcheck endpoints: %v", err) + klog.Errorf("Error syncing healthcheck endpoints: %v", err) } // Finish housekeeping. // TODO: these could be made more consistent. for _, svcIP := range staleServices.UnsortedList() { if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil { - glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) + klog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) } } proxier.deleteEndpointConnections(endpointUpdateResult.StaleEndpoints) @@ -1422,6 +1424,6 @@ func openLocalPort(lp *utilproxy.LocalPort) (utilproxy.Closeable, error) { default: return nil, fmt.Errorf("unknown protocol %q", lp.Protocol) } - glog.V(2).Infof("Opened local port %s", lp.String()) + klog.V(2).Infof("Opened local port %s", lp.String()) return socket, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/BUILD index 6b1141aadf1e..002b616f3f84 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/BUILD @@ -37,6 +37,7 @@ go_test( go_library( name = "go_default_library", srcs = [ + "graceful_termination.go", "ipset.go", "netlink.go", "netlink_linux.go", @@ -56,13 +57,13 @@ go_library( "//pkg/util/ipvs:go_default_library", "//pkg/util/net:go_default_library", "//pkg/util/sysctl:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/README.md b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/README.md index fcdca8074492..eb0f456f690a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/README.md +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/README.md @@ -38,7 +38,7 @@ Differences between IPVS mode and IPTABLES mode are as follows: ### When ipvs falls back to iptables IPVS proxier will employ iptables in doing packet filtering, SNAT or masquerade. -Specifically, ipvs proxier will use ipset to store source or destination address of traffics that need DROP or do masquared, to make sure the number of iptables rules be constant, no metter how many services we have. +Specifically, ipvs proxier will use ipset to store source or destination address of traffics that need DROP or do masquerade, to make sure the number of iptables rules be constant, no metter how many services we have. Here is the table of ipset sets that ipvs proxier used. @@ -259,7 +259,7 @@ ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXT Currently, local-up scripts, GCE scripts and kubeadm support switching IPVS proxy mode via exporting environment variables or specifying flags. ### Prerequisite -Ensure IPVS required kernel modules +Ensure IPVS required kernel modules (**Notes**: use `nf_conntrack` instead of `nf_conntrack_ipv4` for Linux kernel 4.19 and later) ```shell ip_vs ip_vs_rr diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/graceful_termination.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/graceful_termination.go new file mode 100644 index 000000000000..d9357d2c6d8c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/graceful_termination.go @@ -0,0 +1,220 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipvs + +import ( + "sync" + "time" + + "fmt" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" + utilipvs "k8s.io/kubernetes/pkg/util/ipvs" +) + +const ( + rsGracefulDeletePeriod = 15 * time.Minute + rsCheckDeleteInterval = 1 * time.Minute +) + +// listItem stores real server information and the process time. +// If nothing special happened, real server will be delete after process time. +type listItem struct { + VirtualServer *utilipvs.VirtualServer + RealServer *utilipvs.RealServer +} + +// String return the unique real server name(with virtual server information) +func (g *listItem) String() string { + return GetUniqueRSName(g.VirtualServer, g.RealServer) +} + +// GetUniqueRSName return a string type unique rs name with vs information +func GetUniqueRSName(vs *utilipvs.VirtualServer, rs *utilipvs.RealServer) string { + return vs.String() + "/" + rs.String() +} + +type graceTerminateRSList struct { + lock sync.Mutex + list map[string]*listItem +} + +// add push an new element to the rsList +func (q *graceTerminateRSList) add(rs *listItem) bool { + q.lock.Lock() + defer q.lock.Unlock() + + uniqueRS := rs.String() + if _, ok := q.list[uniqueRS]; ok { + return false + } + + klog.V(5).Infof("Adding rs %v to graceful delete rsList", rs) + q.list[uniqueRS] = rs + return true +} + +// remove remove an element from the rsList +func (q *graceTerminateRSList) remove(rs *listItem) bool { + q.lock.Lock() + defer q.lock.Unlock() + + uniqueRS := rs.String() + if _, ok := q.list[uniqueRS]; ok { + return false + } + delete(q.list, uniqueRS) + return true +} + +func (q *graceTerminateRSList) flushList(handler func(rsToDelete *listItem) (bool, error)) bool { + success := true + for name, rs := range q.list { + deleted, err := handler(rs) + if err != nil { + klog.Errorf("Try delete rs %q err: %v", name, err) + success = false + } + if deleted { + klog.Infof("lw: remote out of the list: %s", name) + q.remove(rs) + } + } + return success +} + +// exist check whether the specified unique RS is in the rsList +func (q *graceTerminateRSList) exist(uniqueRS string) (*listItem, bool) { + q.lock.Lock() + defer q.lock.Unlock() + + if rs, ok := q.list[uniqueRS]; ok { + return rs, true + } + return nil, false +} + +// GracefulTerminationManager manage rs graceful termination information and do graceful termination work +// rsList is the rs list to graceful termination, ipvs is the ipvsinterface to do ipvs delete/update work +type GracefulTerminationManager struct { + rsList graceTerminateRSList + ipvs utilipvs.Interface +} + +// NewGracefulTerminationManager create a gracefulTerminationManager to manage ipvs rs graceful termination work +func NewGracefulTerminationManager(ipvs utilipvs.Interface) *GracefulTerminationManager { + l := make(map[string]*listItem) + return &GracefulTerminationManager{ + rsList: graceTerminateRSList{ + list: l, + }, + ipvs: ipvs, + } +} + +// InTerminationList to check whether specified unique rs name is in graceful termination list +func (m *GracefulTerminationManager) InTerminationList(uniqueRS string) bool { + _, exist := m.rsList.exist(uniqueRS) + return exist +} + +// GracefulDeleteRS to update rs weight to 0, and add rs to graceful terminate list +func (m *GracefulTerminationManager) GracefulDeleteRS(vs *utilipvs.VirtualServer, rs *utilipvs.RealServer) error { + // Try to delete rs before add it to graceful delete list + ele := &listItem{ + VirtualServer: vs, + RealServer: rs, + } + deleted, err := m.deleteRsFunc(ele) + if err != nil { + klog.Errorf("Delete rs %q err: %v", ele.String(), err) + } + if deleted { + return nil + } + rs.Weight = 0 + err = m.ipvs.UpdateRealServer(vs, rs) + if err != nil { + return err + } + klog.V(5).Infof("Adding an element to graceful delete rsList: %+v", ele) + m.rsList.add(ele) + return nil +} + +func (m *GracefulTerminationManager) deleteRsFunc(rsToDelete *listItem) (bool, error) { + klog.Infof("Trying to delete rs: %s", rsToDelete.String()) + rss, err := m.ipvs.GetRealServers(rsToDelete.VirtualServer) + if err != nil { + return false, err + } + for _, rs := range rss { + if rsToDelete.RealServer.Equal(rs) { + if rs.ActiveConn != 0 { + return false, nil + } + klog.Infof("Deleting rs: %s", rsToDelete.String()) + err := m.ipvs.DeleteRealServer(rsToDelete.VirtualServer, rs) + if err != nil { + return false, fmt.Errorf("Delete destination %q err: %v", rs.String(), err) + } + return true, nil + } + } + return true, fmt.Errorf("Failed to delete rs %q, can't find the real server", rsToDelete.String()) +} + +func (m *GracefulTerminationManager) tryDeleteRs() { + if !m.rsList.flushList(m.deleteRsFunc) { + klog.Errorf("Try flush graceful termination list err") + } +} + +// MoveRSOutofGracefulDeleteList to delete an rs and remove it from the rsList immediately +func (m *GracefulTerminationManager) MoveRSOutofGracefulDeleteList(uniqueRS string) error { + rsToDelete, find := m.rsList.exist(uniqueRS) + if !find || rsToDelete == nil { + return fmt.Errorf("failed to find rs: %q", uniqueRS) + } + err := m.ipvs.DeleteRealServer(rsToDelete.VirtualServer, rsToDelete.RealServer) + if err != nil { + return err + } + m.rsList.remove(rsToDelete) + return nil +} + +// Run start a goroutine to try to delete rs in the graceful delete rsList with an interval 1 minute +func (m *GracefulTerminationManager) Run() { + // before start, add leftover in delete rs to graceful delete rsList + vss, err := m.ipvs.GetVirtualServers() + if err != nil { + klog.Errorf("IPVS graceful delete manager failed to get IPVS virtualserver") + } + for _, vs := range vss { + rss, err := m.ipvs.GetRealServers(vs) + if err != nil { + klog.Errorf("IPVS graceful delete manager failed to get %v realserver", vs) + continue + } + for _, rs := range rss { + m.GracefulDeleteRS(vs, rs) + } + } + + go wait.Until(m.tryDeleteRs, rsCheckDeleteInterval, wait.NeverStop) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/ipset.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/ipset.go index 99bdc6d1542d..e449cc1ee3ff 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/ipset.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/ipset.go @@ -18,11 +18,11 @@ package ipvs import ( "k8s.io/apimachinery/pkg/util/sets" + utilversion "k8s.io/apimachinery/pkg/util/version" utilipset "k8s.io/kubernetes/pkg/util/ipset" - utilversion "k8s.io/kubernetes/pkg/util/version" "fmt" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -125,7 +125,7 @@ func (set *IPSet) resetEntries() { func (set *IPSet) syncIPSetEntries() { appliedEntries, err := set.handle.ListEntries(set.Name) if err != nil { - glog.Errorf("Failed to list ip set entries, error: %v", err) + klog.Errorf("Failed to list ip set entries, error: %v", err) return } @@ -140,18 +140,18 @@ func (set *IPSet) syncIPSetEntries() { for _, entry := range currentIPSetEntries.Difference(set.activeEntries).List() { if err := set.handle.DelEntry(entry, set.Name); err != nil { if !utilipset.IsNotFoundError(err) { - glog.Errorf("Failed to delete ip set entry: %s from ip set: %s, error: %v", entry, set.Name, err) + klog.Errorf("Failed to delete ip set entry: %s from ip set: %s, error: %v", entry, set.Name, err) } } else { - glog.V(3).Infof("Successfully delete legacy ip set entry: %s from ip set: %s", entry, set.Name) + klog.V(3).Infof("Successfully delete legacy ip set entry: %s from ip set: %s", entry, set.Name) } } // Create active entries for _, entry := range set.activeEntries.Difference(currentIPSetEntries).List() { if err := set.handle.AddEntry(entry, &set.IPSet, true); err != nil { - glog.Errorf("Failed to add entry: %v to ip set: %s, error: %v", entry, set.Name, err) + klog.Errorf("Failed to add entry: %v to ip set: %s, error: %v", entry, set.Name, err) } else { - glog.V(3).Infof("Successfully add entry: %v to ip set: %s", entry, set.Name) + klog.V(3).Infof("Successfully add entry: %v to ip set: %s", entry, set.Name) } } } @@ -159,7 +159,7 @@ func (set *IPSet) syncIPSetEntries() { func ensureIPSet(set *IPSet) error { if err := set.handle.CreateSet(&set.IPSet, true); err != nil { - glog.Errorf("Failed to make sure ip set: %v exist, error: %v", set, err) + klog.Errorf("Failed to make sure ip set: %v exist, error: %v", set, err) return err } return nil @@ -169,13 +169,13 @@ func ensureIPSet(set *IPSet) error { func checkMinVersion(vstring string) bool { version, err := utilversion.ParseGeneric(vstring) if err != nil { - glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) + klog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) return false } minVersion, err := utilversion.ParseGeneric(MinIPSetCheckVersion) if err != nil { - glog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinIPSetCheckVersion, err) + klog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinIPSetCheckVersion, err) return false } return !version.LessThan(minVersion) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go index 95c4d17f3384..0c671200f035 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go @@ -115,7 +115,7 @@ func (h *netlinkHandle) ListBindAddress(devName string) ([]string, error) { if err != nil { return nil, fmt.Errorf("error list bound address of interface: %s, err: %v", devName, err) } - ips := make([]string, 0) + var ips []string for _, addr := range addrs { ips = append(ips, addr.IP.String()) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go index 0fce48eb7269..242a6025ca62 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go @@ -28,7 +28,7 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -113,23 +113,22 @@ var iptablesChains = []struct { var ipsetInfo = []struct { name string setType utilipset.Type - isIPv6 bool comment string }{ - {kubeLoopBackIPSet, utilipset.HashIPPortIP, true, kubeLoopBackIPSetComment}, - {kubeClusterIPSet, utilipset.HashIPPort, true, kubeClusterIPSetComment}, - {kubeExternalIPSet, utilipset.HashIPPort, true, kubeExternalIPSetComment}, - {kubeLoadBalancerSet, utilipset.HashIPPort, true, kubeLoadBalancerSetComment}, - {kubeLoadbalancerFWSet, utilipset.HashIPPort, true, kubeLoadbalancerFWSetComment}, - {kubeLoadBalancerLocalSet, utilipset.HashIPPort, true, kubeLoadBalancerLocalSetComment}, - {kubeLoadBalancerSourceIPSet, utilipset.HashIPPortIP, true, kubeLoadBalancerSourceIPSetComment}, - {kubeLoadBalancerSourceCIDRSet, utilipset.HashIPPortNet, true, kubeLoadBalancerSourceCIDRSetComment}, - {kubeNodePortSetTCP, utilipset.BitmapPort, false, kubeNodePortSetTCPComment}, - {kubeNodePortLocalSetTCP, utilipset.BitmapPort, false, kubeNodePortLocalSetTCPComment}, - {kubeNodePortSetUDP, utilipset.BitmapPort, false, kubeNodePortSetUDPComment}, - {kubeNodePortLocalSetUDP, utilipset.BitmapPort, false, kubeNodePortLocalSetUDPComment}, - {kubeNodePortSetSCTP, utilipset.BitmapPort, false, kubeNodePortSetSCTPComment}, - {kubeNodePortLocalSetSCTP, utilipset.BitmapPort, false, kubeNodePortLocalSetSCTPComment}, + {kubeLoopBackIPSet, utilipset.HashIPPortIP, kubeLoopBackIPSetComment}, + {kubeClusterIPSet, utilipset.HashIPPort, kubeClusterIPSetComment}, + {kubeExternalIPSet, utilipset.HashIPPort, kubeExternalIPSetComment}, + {kubeLoadBalancerSet, utilipset.HashIPPort, kubeLoadBalancerSetComment}, + {kubeLoadbalancerFWSet, utilipset.HashIPPort, kubeLoadbalancerFWSetComment}, + {kubeLoadBalancerLocalSet, utilipset.HashIPPort, kubeLoadBalancerLocalSetComment}, + {kubeLoadBalancerSourceIPSet, utilipset.HashIPPortIP, kubeLoadBalancerSourceIPSetComment}, + {kubeLoadBalancerSourceCIDRSet, utilipset.HashIPPortNet, kubeLoadBalancerSourceCIDRSetComment}, + {kubeNodePortSetTCP, utilipset.BitmapPort, kubeNodePortSetTCPComment}, + {kubeNodePortLocalSetTCP, utilipset.BitmapPort, kubeNodePortLocalSetTCPComment}, + {kubeNodePortSetUDP, utilipset.BitmapPort, kubeNodePortSetUDPComment}, + {kubeNodePortLocalSetUDP, utilipset.BitmapPort, kubeNodePortLocalSetUDPComment}, + {kubeNodePortSetSCTP, utilipset.BitmapPort, kubeNodePortSetSCTPComment}, + {kubeNodePortLocalSetSCTP, utilipset.BitmapPort, kubeNodePortLocalSetSCTPComment}, } // ipsetWithIptablesChain is the ipsets list with iptables source chain and the chain jump to @@ -158,19 +157,14 @@ var ipsetWithIptablesChain = []struct { {kubeNodePortLocalSetSCTP, string(KubeNodePortChain), "RETURN", "dst", "sctp"}, } -var ipvsModules = []string{ - "ip_vs", - "ip_vs_rr", - "ip_vs_wrr", - "ip_vs_sh", - "nf_conntrack_ipv4", -} - // In IPVS proxy mode, the following flags need to be set const sysctlRouteLocalnet = "net/ipv4/conf/all/route_localnet" const sysctlBridgeCallIPTables = "net/bridge/bridge-nf-call-iptables" const sysctlVSConnTrack = "net/ipv4/vs/conntrack" +const sysctlConnReuse = "net/ipv4/vs/conn_reuse_mode" const sysctlForward = "net/ipv4/ip_forward" +const sysctlArpIgnore = "net/ipv4/conf/all/arp_ignore" +const sysctlArpAnnounce = "net/ipv4/conf/all/arp_announce" // Proxier is an ipvs based proxy for connections between a localhost:lport // and services that provide the actual backends. @@ -231,7 +225,8 @@ type Proxier struct { nodePortAddresses []string // networkInterfacer defines an interface for several net library functions. // Inject for test purpose. - networkInterfacer utilproxy.NetworkInterfacer + networkInterfacer utilproxy.NetworkInterfacer + gracefuldeleteManager *GracefulTerminationManager } // IPGetter helps get node network interface IP @@ -299,25 +294,52 @@ func NewProxier(ipt utiliptables.Interface, nodePortAddresses []string, ) (*Proxier, error) { // Set the route_localnet sysctl we need for - if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil { - return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err) + if val, _ := sysctl.GetSysctl(sysctlRouteLocalnet); val != 1 { + if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil { + return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err) + } } // Proxy needs br_netfilter and bridge-nf-call-iptables=1 when containers // are connected to a Linux bridge (but not SDN bridges). Until most // plugins handle this, log when config is missing if val, err := sysctl.GetSysctl(sysctlBridgeCallIPTables); err == nil && val != 1 { - glog.Infof("missing br-netfilter module or unset sysctl br-nf-call-iptables; proxy may not work as intended") + klog.Infof("missing br-netfilter module or unset sysctl br-nf-call-iptables; proxy may not work as intended") } // Set the conntrack sysctl we need for - if err := sysctl.SetSysctl(sysctlVSConnTrack, 1); err != nil { - return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlVSConnTrack, err) + if val, _ := sysctl.GetSysctl(sysctlVSConnTrack); val != 1 { + if err := sysctl.SetSysctl(sysctlVSConnTrack, 1); err != nil { + return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlVSConnTrack, err) + } + } + + // Set the connection reuse mode + if val, _ := sysctl.GetSysctl(sysctlConnReuse); val != 0 { + if err := sysctl.SetSysctl(sysctlConnReuse, 0); err != nil { + return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlConnReuse, err) + } } // Set the ip_forward sysctl we need for - if err := sysctl.SetSysctl(sysctlForward, 1); err != nil { - return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlForward, err) + if val, _ := sysctl.GetSysctl(sysctlForward); val != 1 { + if err := sysctl.SetSysctl(sysctlForward, 1); err != nil { + return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlForward, err) + } + } + + // Set the arp_ignore sysctl we need for + if val, _ := sysctl.GetSysctl(sysctlArpIgnore); val != 1 { + if err := sysctl.SetSysctl(sysctlArpIgnore, 1); err != nil { + return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlArpIgnore, err) + } + } + + // Set the arp_announce sysctl we need for + if val, _ := sysctl.GetSysctl(sysctlArpAnnounce); val != 2 { + if err := sysctl.SetSysctl(sysctlArpAnnounce, 2); err != nil { + return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlArpAnnounce, err) + } } // Generate the masquerade mark to use for SNAT rules. @@ -325,72 +347,71 @@ func NewProxier(ipt utiliptables.Interface, masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) if nodeIP == nil { - glog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") + klog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") nodeIP = net.ParseIP("127.0.0.1") } isIPv6 := utilnet.IsIPv6(nodeIP) - glog.V(2).Infof("nodeIP: %v, isIPv6: %v", nodeIP, isIPv6) + klog.V(2).Infof("nodeIP: %v, isIPv6: %v", nodeIP, isIPv6) if len(clusterCIDR) == 0 { - glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic") + klog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic") } else if utilnet.IsIPv6CIDR(clusterCIDR) != isIPv6 { return nil, fmt.Errorf("clusterCIDR %s has incorrect IP version: expect isIPv6=%t", clusterCIDR, isIPv6) } if len(scheduler) == 0 { - glog.Warningf("IPVS scheduler not specified, use %s by default", DefaultScheduler) + klog.Warningf("IPVS scheduler not specified, use %s by default", DefaultScheduler) scheduler = DefaultScheduler } healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps proxier := &Proxier{ - portsMap: make(map[utilproxy.LocalPort]utilproxy.Closeable), - serviceMap: make(proxy.ServiceMap), - serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, &isIPv6, recorder), - endpointsMap: make(proxy.EndpointsMap), - endpointsChanges: proxy.NewEndpointChangeTracker(hostname, nil, &isIPv6, recorder), - syncPeriod: syncPeriod, - minSyncPeriod: minSyncPeriod, - excludeCIDRs: excludeCIDRs, - iptables: ipt, - masqueradeAll: masqueradeAll, - masqueradeMark: masqueradeMark, - exec: exec, - clusterCIDR: clusterCIDR, - hostname: hostname, - nodeIP: nodeIP, - portMapper: &listenPortOpener{}, - recorder: recorder, - healthChecker: healthChecker, - healthzServer: healthzServer, - ipvs: ipvs, - ipvsScheduler: scheduler, - ipGetter: &realIPGetter{nl: NewNetLinkHandle()}, - iptablesData: bytes.NewBuffer(nil), - filterChainsData: bytes.NewBuffer(nil), - natChains: bytes.NewBuffer(nil), - natRules: bytes.NewBuffer(nil), - filterChains: bytes.NewBuffer(nil), - filterRules: bytes.NewBuffer(nil), - netlinkHandle: NewNetLinkHandle(), - ipset: ipset, - nodePortAddresses: nodePortAddresses, - networkInterfacer: utilproxy.RealNetwork{}, + portsMap: make(map[utilproxy.LocalPort]utilproxy.Closeable), + serviceMap: make(proxy.ServiceMap), + serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, &isIPv6, recorder), + endpointsMap: make(proxy.EndpointsMap), + endpointsChanges: proxy.NewEndpointChangeTracker(hostname, nil, &isIPv6, recorder), + syncPeriod: syncPeriod, + minSyncPeriod: minSyncPeriod, + excludeCIDRs: excludeCIDRs, + iptables: ipt, + masqueradeAll: masqueradeAll, + masqueradeMark: masqueradeMark, + exec: exec, + clusterCIDR: clusterCIDR, + hostname: hostname, + nodeIP: nodeIP, + portMapper: &listenPortOpener{}, + recorder: recorder, + healthChecker: healthChecker, + healthzServer: healthzServer, + ipvs: ipvs, + ipvsScheduler: scheduler, + ipGetter: &realIPGetter{nl: NewNetLinkHandle()}, + iptablesData: bytes.NewBuffer(nil), + filterChainsData: bytes.NewBuffer(nil), + natChains: bytes.NewBuffer(nil), + natRules: bytes.NewBuffer(nil), + filterChains: bytes.NewBuffer(nil), + filterRules: bytes.NewBuffer(nil), + netlinkHandle: NewNetLinkHandle(), + ipset: ipset, + nodePortAddresses: nodePortAddresses, + networkInterfacer: utilproxy.RealNetwork{}, + gracefuldeleteManager: NewGracefulTerminationManager(ipvs), } // initialize ipsetList with all sets we needed proxier.ipsetList = make(map[string]*IPSet) for _, is := range ipsetInfo { - if is.isIPv6 { - proxier.ipsetList[is.name] = NewIPSet(ipset, is.name, is.setType, isIPv6, is.comment) - } - proxier.ipsetList[is.name] = NewIPSet(ipset, is.name, is.setType, false, is.comment) + proxier.ipsetList[is.name] = NewIPSet(ipset, is.name, is.setType, isIPv6, is.comment) } burstSyncs := 2 - glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs) + klog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs) proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs) + proxier.gracefuldeleteManager.Run() return proxier, nil } @@ -433,16 +454,14 @@ func NewLinuxKernelHandler() *LinuxKernelHandler { // GetModules returns all installed kernel modules. func (handle *LinuxKernelHandler) GetModules() ([]string, error) { // Check whether IPVS required kernel modules are built-in - kernelVersionFile := "/proc/sys/kernel/osrelease" - b, err := ioutil.ReadFile(kernelVersionFile) + kernelVersion, ipvsModules, err := utilipvs.GetKernelVersionAndIPVSMods(handle.executor) if err != nil { - glog.Errorf("Failed to read file %s with error %v", kernelVersionFile, err) + return nil, err } - kernelVersion := strings.TrimSpace(string(b)) builtinModsFilePath := fmt.Sprintf("/lib/modules/%s/modules.builtin", kernelVersion) - b, err = ioutil.ReadFile(builtinModsFilePath) + b, err := ioutil.ReadFile(builtinModsFilePath) if err != nil { - glog.Errorf("Failed to read file %s with error %v", builtinModsFilePath, err) + klog.Warningf("Failed to read file %s with error %v. You can ignore this message when kube-proxy is running inside container without mounting /lib/modules", builtinModsFilePath, err) } var bmods []string for _, module := range ipvsModules { @@ -455,7 +474,7 @@ func (handle *LinuxKernelHandler) GetModules() ([]string, error) { for _, kmod := range ipvsModules { err := handle.executor.Command("modprobe", "--", kmod).Run() if err != nil { - glog.Warningf("Failed to load kernel module %v with modprobe. "+ + klog.Warningf("Failed to load kernel module %v with modprobe. "+ "You can ignore this message when kube-proxy is running inside container without mounting /lib/modules", kmod) } } @@ -481,11 +500,26 @@ func CanUseIPVSProxier(handle KernelHandler, ipsetver IPSetVersioner) (bool, err } wantModules := sets.NewString() loadModules := sets.NewString() + linuxKernelHandler := NewLinuxKernelHandler() + _, ipvsModules, _ := utilipvs.GetKernelVersionAndIPVSMods(linuxKernelHandler.executor) wantModules.Insert(ipvsModules...) loadModules.Insert(mods...) modules := wantModules.Difference(loadModules).UnsortedList() - if len(modules) != 0 { - return false, fmt.Errorf("IPVS proxier will not be used because the following required kernel modules are not loaded: %v", modules) + var missingMods []string + ConntrackiMissingCounter := 0 + for _, mod := range modules { + if strings.Contains(mod, "nf_conntrack") { + ConntrackiMissingCounter++ + } else { + missingMods = append(missingMods, mod) + } + } + if ConntrackiMissingCounter == 2 { + missingMods = append(missingMods, "nf_conntrack_ipv4(or nf_conntrack for Linux kernel 4.19 and later)") + } + + if len(missingMods) != 0 { + return false, fmt.Errorf("IPVS proxier will not be used because the following required kernel modules are not loaded: %v", missingMods) } // Check ipset version @@ -510,7 +544,7 @@ func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool } if err := ipt.DeleteRule(jc.table, jc.from, args...); err != nil { if !utiliptables.IsNotFoundError(err) { - glog.Errorf("Error removing iptables rules in ipvs proxier: %v", err) + klog.Errorf("Error removing iptables rules in ipvs proxier: %v", err) encounteredError = true } } @@ -520,13 +554,13 @@ func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool for _, ch := range iptablesChains { if err := ipt.FlushChain(ch.table, ch.chain); err != nil { if !utiliptables.IsNotFoundError(err) { - glog.Errorf("Error removing iptables rules in ipvs proxier: %v", err) + klog.Errorf("Error removing iptables rules in ipvs proxier: %v", err) encounteredError = true } } if err := ipt.DeleteChain(ch.table, ch.chain); err != nil { if !utiliptables.IsNotFoundError(err) { - glog.Errorf("Error removing iptables rules in ipvs proxier: %v", err) + klog.Errorf("Error removing iptables rules in ipvs proxier: %v", err) encounteredError = true } } @@ -545,7 +579,7 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset encounteredError = false err := ipvs.Flush() if err != nil { - glog.Errorf("Error flushing IPVS rules: %v", err) + klog.Errorf("Error flushing IPVS rules: %v", err) encounteredError = true } } @@ -553,7 +587,7 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset nl := NewNetLinkHandle() err := nl.DeleteDummyDevice(DefaultDummyDevice) if err != nil { - glog.Errorf("Error deleting dummy device %s created by IPVS proxier: %v", DefaultDummyDevice, err) + klog.Errorf("Error deleting dummy device %s created by IPVS proxier: %v", DefaultDummyDevice, err) encounteredError = true } // Clear iptables created by ipvs Proxier. @@ -564,7 +598,7 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset err = ipset.DestroySet(set.name) if err != nil { if !utilipset.IsNotFoundError(err) { - glog.Errorf("Error removing ipset %s, error: %v", set.name, err) + klog.Errorf("Error removing ipset %s, error: %v", set.name, err) encounteredError = true } } @@ -666,11 +700,11 @@ func (proxier *Proxier) syncProxyRules() { start := time.Now() defer func() { metrics.SyncProxyRulesLatency.Observe(metrics.SinceInMicroseconds(start)) - glog.V(4).Infof("syncProxyRules took %v", time.Since(start)) + klog.V(4).Infof("syncProxyRules took %v", time.Since(start)) }() // don't sync rules till we've received services and endpoints if !proxier.endpointsSynced || !proxier.servicesSynced { - glog.V(2).Info("Not syncing ipvs rules until Services and Endpoints have been received from master") + klog.V(2).Info("Not syncing ipvs rules until Services and Endpoints have been received from master") return } @@ -684,12 +718,12 @@ func (proxier *Proxier) syncProxyRules() { // merge stale services gathered from updateEndpointsMap for _, svcPortName := range endpointUpdateResult.StaleServiceNames { if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.GetProtocol() == v1.ProtocolUDP { - glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.ClusterIPString()) + klog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.ClusterIPString()) staleServices.Insert(svcInfo.ClusterIPString()) } } - glog.V(3).Infof("Syncing ipvs Proxier rules") + klog.V(3).Infof("Syncing ipvs Proxier rules") // Begin install iptables @@ -697,6 +731,8 @@ func (proxier *Proxier) syncProxyRules() { // This is to avoid memory reallocations and thus improve performance. proxier.natChains.Reset() proxier.natRules.Reset() + proxier.filterChains.Reset() + proxier.filterRules.Reset() // Write table headers. writeLine(proxier.filterChains, "*filter") @@ -707,7 +743,7 @@ func (proxier *Proxier) syncProxyRules() { // make sure dummy interface exists in the system where ipvs Proxier will bind service address on it _, err := proxier.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice) if err != nil { - glog.Errorf("Failed to create dummy interface: %s, error: %v", DefaultDummyDevice, err) + klog.Errorf("Failed to create dummy interface: %s, error: %v", DefaultDummyDevice, err) return } @@ -732,7 +768,7 @@ func (proxier *Proxier) syncProxyRules() { for svcName, svc := range proxier.serviceMap { svcInfo, ok := svc.(*serviceInfo) if !ok { - glog.Errorf("Failed to cast serviceInfo %q", svcName.String()) + klog.Errorf("Failed to cast serviceInfo %q", svcName.String()) continue } protocol := strings.ToLower(string(svcInfo.Protocol)) @@ -744,7 +780,10 @@ func (proxier *Proxier) syncProxyRules() { for _, e := range proxier.endpointsMap[svcName] { ep, ok := e.(*proxy.BaseEndpointInfo) if !ok { - glog.Errorf("Failed to cast BaseEndpointInfo %q", e.String()) + klog.Errorf("Failed to cast BaseEndpointInfo %q", e.String()) + continue + } + if !ep.IsLocal { continue } epIP := ep.IP() @@ -761,7 +800,7 @@ func (proxier *Proxier) syncProxyRules() { SetType: utilipset.HashIPPortIP, } if valid := proxier.ipsetList[kubeLoopBackIPSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoopBackIPSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoopBackIPSet].Name)) continue } proxier.ipsetList[kubeLoopBackIPSet].activeEntries.Insert(entry.String()) @@ -778,7 +817,7 @@ func (proxier *Proxier) syncProxyRules() { // add service Cluster IP:Port to kubeServiceAccess ip set for the purpose of solving hairpin. // proxier.kubeServiceAccessSet.activeEntries.Insert(entry.String()) if valid := proxier.ipsetList[kubeClusterIPSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeClusterIPSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeClusterIPSet].Name)) continue } proxier.ipsetList[kubeClusterIPSet].activeEntries.Insert(entry.String()) @@ -801,16 +840,16 @@ func (proxier *Proxier) syncProxyRules() { // ExternalTrafficPolicy only works for NodePort and external LB traffic, does not affect ClusterIP // So we still need clusterIP rules in onlyNodeLocalEndpoints mode. if err := proxier.syncEndpoint(svcName, false, serv); err != nil { - glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) } } else { - glog.Errorf("Failed to sync service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync service: %v, err: %v", serv, err) } // Capture externalIPs. for _, externalIP := range svcInfo.ExternalIPs { if local, err := utilproxy.IsLocalIP(externalIP); err != nil { - glog.Errorf("can't determine if IP is local, assuming not: %v", err) + klog.Errorf("can't determine if IP is local, assuming not: %v", err) // We do not start listening on SCTP ports, according to our agreement in the // SCTP support KEP } else if local && (svcInfo.GetProtocol() != v1.ProtocolSCTP) { @@ -821,7 +860,7 @@ func (proxier *Proxier) syncProxyRules() { Protocol: protocol, } if proxier.portsMap[lp] != nil { - glog.V(4).Infof("Port %s was open before and is still needed", lp.String()) + klog.V(4).Infof("Port %s was open before and is still needed", lp.String()) replacementPortsMap[lp] = proxier.portsMap[lp] } else { socket, err := proxier.portMapper.OpenLocalPort(&lp) @@ -835,7 +874,7 @@ func (proxier *Proxier) syncProxyRules() { UID: types.UID(proxier.hostname), Namespace: "", }, v1.EventTypeWarning, err.Error(), msg) - glog.Error(msg) + klog.Error(msg) continue } replacementPortsMap[lp] = socket @@ -851,7 +890,7 @@ func (proxier *Proxier) syncProxyRules() { } // We have to SNAT packets to external IPs. if valid := proxier.ipsetList[kubeExternalIPSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeExternalIPSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeExternalIPSet].Name)) continue } proxier.ipsetList[kubeExternalIPSet].activeEntries.Insert(entry.String()) @@ -871,10 +910,10 @@ func (proxier *Proxier) syncProxyRules() { activeIPVSServices[serv.String()] = true activeBindAddrs[serv.Address.String()] = true if err := proxier.syncEndpoint(svcName, false, serv); err != nil { - glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) } } else { - glog.Errorf("Failed to sync service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync service: %v, err: %v", serv, err) } } @@ -893,14 +932,14 @@ func (proxier *Proxier) syncProxyRules() { // If we are proxying globally, we need to masquerade in case we cross nodes. // If we are proxying only locally, we can retain the source IP. if valid := proxier.ipsetList[kubeLoadBalancerSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerSet].Name)) continue } proxier.ipsetList[kubeLoadBalancerSet].activeEntries.Insert(entry.String()) // insert loadbalancer entry to lbIngressLocalSet if service externaltrafficpolicy=local if svcInfo.OnlyNodeLocalEndpoints { if valid := proxier.ipsetList[kubeLoadBalancerLocalSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerLocalSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerLocalSet].Name)) continue } proxier.ipsetList[kubeLoadBalancerLocalSet].activeEntries.Insert(entry.String()) @@ -910,7 +949,7 @@ func (proxier *Proxier) syncProxyRules() { // This currently works for loadbalancers that preserves source ips. // For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply. if valid := proxier.ipsetList[kubeLoadbalancerFWSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadbalancerFWSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadbalancerFWSet].Name)) continue } proxier.ipsetList[kubeLoadbalancerFWSet].activeEntries.Insert(entry.String()) @@ -926,7 +965,7 @@ func (proxier *Proxier) syncProxyRules() { } // enumerate all white list source cidr if valid := proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].Name)) continue } proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].activeEntries.Insert(entry.String()) @@ -950,7 +989,7 @@ func (proxier *Proxier) syncProxyRules() { } // enumerate all white list source ip if valid := proxier.ipsetList[kubeLoadBalancerSourceIPSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerSourceIPSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerSourceIPSet].Name)) continue } proxier.ipsetList[kubeLoadBalancerSourceIPSet].activeEntries.Insert(entry.String()) @@ -974,10 +1013,10 @@ func (proxier *Proxier) syncProxyRules() { activeIPVSServices[serv.String()] = true activeBindAddrs[serv.Address.String()] = true if err := proxier.syncEndpoint(svcName, onlyLocal, serv); err != nil { - glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) } } else { - glog.Errorf("Failed to sync service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync service: %v, err: %v", serv, err) } } } @@ -985,11 +1024,11 @@ func (proxier *Proxier) syncProxyRules() { if svcInfo.NodePort != 0 { addresses, err := utilproxy.GetNodeAddresses(proxier.nodePortAddresses, proxier.networkInterfacer) if err != nil { - glog.Errorf("Failed to get node ip address matching nodeport cidr: %v", err) + klog.Errorf("Failed to get node ip address matching nodeport cidr: %v", err) continue } - lps := make([]utilproxy.LocalPort, 0) + var lps []utilproxy.LocalPort for address := range addresses { lp := utilproxy.LocalPort{ Description: "nodePort for " + svcNameString, @@ -1010,14 +1049,14 @@ func (proxier *Proxier) syncProxyRules() { // For ports on node IPs, open the actual port and hold it. for _, lp := range lps { if proxier.portsMap[lp] != nil { - glog.V(4).Infof("Port %s was open before and is still needed", lp.String()) + klog.V(4).Infof("Port %s was open before and is still needed", lp.String()) replacementPortsMap[lp] = proxier.portsMap[lp] // We do not start listening on SCTP ports, according to our agreement in the // SCTP support KEP } else if svcInfo.GetProtocol() != v1.ProtocolSCTP { socket, err := proxier.portMapper.OpenLocalPort(&lp) if err != nil { - glog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err) + klog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err) continue } if lp.Protocol == "udp" { @@ -1046,11 +1085,11 @@ func (proxier *Proxier) syncProxyRules() { nodePortSet = proxier.ipsetList[kubeNodePortSetSCTP] default: // It should never hit - glog.Errorf("Unsupported protocol type: %s", protocol) + klog.Errorf("Unsupported protocol type: %s", protocol) } if nodePortSet != nil { if valid := nodePortSet.validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, nodePortSet.Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, nodePortSet.Name)) continue } nodePortSet.activeEntries.Insert(entry.String()) @@ -1068,11 +1107,11 @@ func (proxier *Proxier) syncProxyRules() { nodePortLocalSet = proxier.ipsetList[kubeNodePortLocalSetSCTP] default: // It should never hit - glog.Errorf("Unsupported protocol type: %s", protocol) + klog.Errorf("Unsupported protocol type: %s", protocol) } if nodePortLocalSet != nil { if valid := nodePortLocalSet.validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, nodePortLocalSet.Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, nodePortLocalSet.Name)) continue } nodePortLocalSet.activeEntries.Insert(entry.String()) @@ -1080,7 +1119,7 @@ func (proxier *Proxier) syncProxyRules() { } // Build ipvs kernel routes for each node ip address - nodeIPs := make([]net.IP, 0) + var nodeIPs []net.IP for address := range addresses { if !utilproxy.IsZeroCIDR(address) { nodeIPs = append(nodeIPs, net.ParseIP(address)) @@ -1089,7 +1128,7 @@ func (proxier *Proxier) syncProxyRules() { // zero cidr nodeIPs, err = proxier.ipGetter.NodeIPs() if err != nil { - glog.Errorf("Failed to list all node IPs from host, err: %v", err) + klog.Errorf("Failed to list all node IPs from host, err: %v", err) } } for _, nodeIP := range nodeIPs { @@ -1108,10 +1147,10 @@ func (proxier *Proxier) syncProxyRules() { if err := proxier.syncService(svcNameString, serv, false); err == nil { activeIPVSServices[serv.String()] = true if err := proxier.syncEndpoint(svcName, svcInfo.OnlyNodeLocalEndpoints, serv); err != nil { - glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) } } else { - glog.Errorf("Failed to sync service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync service: %v, err: %v", serv, err) } } } @@ -1131,11 +1170,13 @@ func (proxier *Proxier) syncProxyRules() { proxier.iptablesData.Reset() proxier.iptablesData.Write(proxier.natChains.Bytes()) proxier.iptablesData.Write(proxier.natRules.Bytes()) + proxier.iptablesData.Write(proxier.filterChains.Bytes()) + proxier.iptablesData.Write(proxier.filterRules.Bytes()) - glog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes()) + klog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes()) err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { - glog.Errorf("Failed to execute iptables-restore: %v\nRules:\n%s", err, proxier.iptablesData.Bytes()) + klog.Errorf("Failed to execute iptables-restore: %v\nRules:\n%s", err, proxier.iptablesData.Bytes()) // Revert new local ports. utilproxy.RevertPorts(replacementPortsMap, proxier.portsMap) return @@ -1156,7 +1197,7 @@ func (proxier *Proxier) syncProxyRules() { currentIPVSServices[appliedSvc.String()] = appliedSvc } } else { - glog.Errorf("Failed to get ipvs service, err: %v", err) + klog.Errorf("Failed to get ipvs service, err: %v", err) } proxier.cleanLegacyService(activeIPVSServices, currentIPVSServices) @@ -1164,7 +1205,7 @@ func (proxier *Proxier) syncProxyRules() { // currentBindAddrs represents ip addresses bind to DefaultDummyDevice from the system currentBindAddrs, err := proxier.netlinkHandle.ListBindAddress(DefaultDummyDevice) if err != nil { - glog.Errorf("Failed to get bind address, err: %v", err) + klog.Errorf("Failed to get bind address, err: %v", err) } proxier.cleanLegacyBindAddr(activeBindAddrs, currentBindAddrs) @@ -1177,17 +1218,17 @@ func (proxier *Proxier) syncProxyRules() { // not "OnlyLocal", but the services list will not, and the healthChecker // will just drop those endpoints. if err := proxier.healthChecker.SyncServices(serviceUpdateResult.HCServiceNodePorts); err != nil { - glog.Errorf("Error syncing healtcheck services: %v", err) + klog.Errorf("Error syncing healthcheck services: %v", err) } if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.HCEndpointsLocalIPSize); err != nil { - glog.Errorf("Error syncing healthcheck endpoints: %v", err) + klog.Errorf("Error syncing healthcheck endpoints: %v", err) } // Finish housekeeping. // TODO: these could be made more consistent. for _, svcIP := range staleServices.UnsortedList() { if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil { - glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) + klog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) } } proxier.deleteEndpointConnections(endpointUpdateResult.StaleEndpoints) @@ -1365,7 +1406,7 @@ func (proxier *Proxier) createAndLinkeKubeChain() { // Make sure we keep stats for the top-level chains for _, ch := range iptablesChains { if _, err := proxier.iptables.EnsureChain(ch.table, ch.chain); err != nil { - glog.Errorf("Failed to ensure that %s chain %s exists: %v", ch.table, ch.chain, err) + klog.Errorf("Failed to ensure that %s chain %s exists: %v", ch.table, ch.chain, err) return } if ch.table == utiliptables.TableNAT { @@ -1386,7 +1427,7 @@ func (proxier *Proxier) createAndLinkeKubeChain() { for _, jc := range iptablesJumpChain { args := []string{"-m", "comment", "--comment", jc.comment, "-j", string(jc.to)} if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, jc.table, jc.from, args...); err != nil { - glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", jc.table, jc.from, jc.to, err) + klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", jc.table, jc.from, jc.to, err) } } @@ -1416,7 +1457,7 @@ func (proxier *Proxier) getExistingChains(buffer *bytes.Buffer, table utiliptabl buffer.Reset() err := proxier.iptables.SaveInto(table, buffer) if err != nil { // if we failed to get any rules - glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) + klog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) } else { // otherwise parse the output return utiliptables.GetChainLines(table, buffer.Bytes()) } @@ -1432,7 +1473,7 @@ func (proxier *Proxier) deleteEndpointConnections(connectionMap []proxy.ServiceE endpointIP := utilproxy.IPPart(epSvcPair.Endpoint) err := conntrack.ClearEntriesForNAT(proxier.exec, svcInfo.ClusterIPString(), endpointIP, v1.ProtocolUDP) if err != nil { - glog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.ServicePortName.String(), err) + klog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.ServicePortName.String(), err) } } } @@ -1443,17 +1484,17 @@ func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer, if appliedVirtualServer == nil || !appliedVirtualServer.Equal(vs) { if appliedVirtualServer == nil { // IPVS service is not found, create a new service - glog.V(3).Infof("Adding new service %q %s:%d/%s", svcName, vs.Address, vs.Port, vs.Protocol) + klog.V(3).Infof("Adding new service %q %s:%d/%s", svcName, vs.Address, vs.Port, vs.Protocol) if err := proxier.ipvs.AddVirtualServer(vs); err != nil { - glog.Errorf("Failed to add IPVS service %q: %v", svcName, err) + klog.Errorf("Failed to add IPVS service %q: %v", svcName, err) return err } } else { // IPVS service was changed, update the existing one // During updates, service VIP will not go down - glog.V(3).Infof("IPVS service %s was changed", svcName) + klog.V(3).Infof("IPVS service %s was changed", svcName) if err := proxier.ipvs.UpdateVirtualServer(vs); err != nil { - glog.Errorf("Failed to update IPVS service, err:%v", err) + klog.Errorf("Failed to update IPVS service, err:%v", err) return err } } @@ -1462,10 +1503,10 @@ func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer, // bind service address to dummy interface even if service not changed, // in case that service IP was removed by other processes if bindAddr { - glog.V(4).Infof("Bind addr %s", vs.Address.String()) + klog.V(4).Infof("Bind addr %s", vs.Address.String()) _, err := proxier.netlinkHandle.EnsureAddressBind(vs.Address.String(), DefaultDummyDevice) if err != nil { - glog.Errorf("Failed to bind service address to dummy device %q: %v", svcName, err) + klog.Errorf("Failed to bind service address to dummy device %q: %v", svcName, err) return err } } @@ -1475,7 +1516,7 @@ func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer, func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNodeLocalEndpoints bool, vs *utilipvs.VirtualServer) error { appliedVirtualServer, err := proxier.ipvs.GetVirtualServer(vs) if err != nil || appliedVirtualServer == nil { - glog.Errorf("Failed to get IPVS service, error: %v", err) + klog.Errorf("Failed to get IPVS service, error: %v", err) return err } @@ -1486,7 +1527,7 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode curDests, err := proxier.ipvs.GetRealServers(appliedVirtualServer) if err != nil { - glog.Errorf("Failed to list IPVS destinations, error: %v", err) + klog.Errorf("Failed to list IPVS destinations, error: %v", err) return err } for _, des := range curDests { @@ -1500,53 +1541,72 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode newEndpoints.Insert(epInfo.String()) } - if !curEndpoints.Equal(newEndpoints) { - // Create new endpoints - for _, ep := range newEndpoints.Difference(curEndpoints).UnsortedList() { - ip, port, err := net.SplitHostPort(ep) - if err != nil { - glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err) - continue - } - portNum, err := strconv.Atoi(port) - if err != nil { - glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err) - continue - } + // Create new endpoints + for _, ep := range newEndpoints.List() { + ip, port, err := net.SplitHostPort(ep) + if err != nil { + klog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err) + continue + } + portNum, err := strconv.Atoi(port) + if err != nil { + klog.Errorf("Failed to parse endpoint port %s, error: %v", port, err) + continue + } - newDest := &utilipvs.RealServer{ - Address: net.ParseIP(ip), - Port: uint16(portNum), - Weight: 1, - } - err = proxier.ipvs.AddRealServer(appliedVirtualServer, newDest) - if err != nil { - glog.Errorf("Failed to add destination: %v, error: %v", newDest, err) - continue - } + newDest := &utilipvs.RealServer{ + Address: net.ParseIP(ip), + Port: uint16(portNum), + Weight: 1, } - // Delete old endpoints - for _, ep := range curEndpoints.Difference(newEndpoints).UnsortedList() { - ip, port, err := net.SplitHostPort(ep) - if err != nil { - glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err) + + if curEndpoints.Has(ep) { + // check if newEndpoint is in gracefulDelete list, if true, delete this ep immediately + uniqueRS := GetUniqueRSName(vs, newDest) + if !proxier.gracefuldeleteManager.InTerminationList(uniqueRS) { continue } - portNum, err := strconv.Atoi(port) + klog.V(5).Infof("new ep %q is in graceful delete list", uniqueRS) + err := proxier.gracefuldeleteManager.MoveRSOutofGracefulDeleteList(uniqueRS) if err != nil { - glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err) + klog.Errorf("Failed to delete endpoint: %v in gracefulDeleteQueue, error: %v", ep, err) continue } + } + err = proxier.ipvs.AddRealServer(appliedVirtualServer, newDest) + if err != nil { + klog.Errorf("Failed to add destination: %v, error: %v", newDest, err) + continue + } + } + // Delete old endpoints + for _, ep := range curEndpoints.Difference(newEndpoints).UnsortedList() { + // if curEndpoint is in gracefulDelete, skip + uniqueRS := vs.String() + "/" + ep + if proxier.gracefuldeleteManager.InTerminationList(uniqueRS) { + continue + } + ip, port, err := net.SplitHostPort(ep) + if err != nil { + klog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err) + continue + } + portNum, err := strconv.Atoi(port) + if err != nil { + klog.Errorf("Failed to parse endpoint port %s, error: %v", port, err) + continue + } - delDest := &utilipvs.RealServer{ - Address: net.ParseIP(ip), - Port: uint16(portNum), - } - err = proxier.ipvs.DeleteRealServer(appliedVirtualServer, delDest) - if err != nil { - glog.Errorf("Failed to delete destination: %v, error: %v", delDest, err) - continue - } + delDest := &utilipvs.RealServer{ + Address: net.ParseIP(ip), + Port: uint16(portNum), + } + + klog.V(5).Infof("Using graceful delete to delete: %v", delDest) + err = proxier.gracefuldeleteManager.GracefulDeleteRS(appliedVirtualServer, delDest) + if err != nil { + klog.Errorf("Failed to delete destination: %v, error: %v", delDest, err) + continue } } return nil @@ -1559,6 +1619,15 @@ func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, curre // This service was not processed in the latest sync loop so before deleting it, // make sure it does not fall within an excluded CIDR range. okayToDelete := true + rsList, _ := proxier.ipvs.GetRealServers(svc) + for _, rs := range rsList { + uniqueRS := GetUniqueRSName(svc, rs) + // if there are in terminating real server in this service, then handle it later + if proxier.gracefuldeleteManager.InTerminationList(uniqueRS) { + okayToDelete = false + break + } + } for _, excludedCIDR := range proxier.excludeCIDRs { // Any validation of this CIDR already should have occurred. _, n, _ := net.ParseCIDR(excludedCIDR) @@ -1569,7 +1638,7 @@ func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, curre } if okayToDelete { if err := proxier.ipvs.DeleteVirtualServer(svc); err != nil { - glog.Errorf("Failed to delete service, error: %v", err) + klog.Errorf("Failed to delete service, error: %v", err) } } } @@ -1580,11 +1649,11 @@ func (proxier *Proxier) cleanLegacyBindAddr(activeBindAddrs map[string]bool, cur for _, addr := range currentBindAddrs { if _, ok := activeBindAddrs[addr]; !ok { // This address was not processed in the latest sync loop - glog.V(4).Infof("Unbind addr %s", addr) + klog.V(4).Infof("Unbind addr %s", addr) err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice) // Ignore no such address error when try to unbind address if err != nil { - glog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err) + klog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err) } } } @@ -1650,7 +1719,7 @@ func openLocalPort(lp *utilproxy.LocalPort) (utilproxy.Closeable, error) { default: return nil, fmt.Errorf("unknown protocol %q", lp.Protocol) } - glog.V(2).Infof("Opened local port %s", lp.String()) + klog.V(2).Infof("Opened local port %s", lp.String()) return socket, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go index 5853ef9ca00c..8386e62c0e26 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/service.go @@ -23,7 +23,7 @@ import ( "strings" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -119,7 +119,7 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic if apiservice.NeedsHealthCheck(service) { p := service.Spec.HealthCheckNodePort if p == 0 { - glog.Errorf("Service %s/%s has no healthcheck nodeport", service.Namespace, service.Name) + klog.Errorf("Service %s/%s has no healthcheck nodeport", service.Namespace, service.Name) } else { info.HealthCheckNodePort = int(p) } @@ -306,9 +306,9 @@ func (sm *ServiceMap) merge(other ServiceMap) sets.String { existingPorts.Insert(svcPortName.String()) _, exists := (*sm)[svcPortName] if !exists { - glog.V(1).Infof("Adding new service port %q at %s", svcPortName, info.String()) + klog.V(1).Infof("Adding new service port %q at %s", svcPortName, info.String()) } else { - glog.V(1).Infof("Updating existing service port %q at %s", svcPortName, info.String()) + klog.V(1).Infof("Updating existing service port %q at %s", svcPortName, info.String()) } (*sm)[svcPortName] = info } @@ -331,13 +331,13 @@ func (sm *ServiceMap) unmerge(other ServiceMap, UDPStaleClusterIP sets.String) { for svcPortName := range other { info, exists := (*sm)[svcPortName] if exists { - glog.V(1).Infof("Removing service port %q", svcPortName) + klog.V(1).Infof("Removing service port %q", svcPortName) if info.GetProtocol() == v1.ProtocolUDP { UDPStaleClusterIP.Insert(info.ClusterIPString()) } delete(*sm, svcPortName) } else { - glog.Errorf("Service port %q doesn't exists", svcPortName) + klog.Errorf("Service port %q doesn't exists", svcPortName) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD index 0d800fc66d38..b7887c85b5d0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD @@ -32,7 +32,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go index 38ee0c2ce6a9..661092b1b2b4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go @@ -25,13 +25,13 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/proxy" utilproxy "k8s.io/kubernetes/pkg/proxy/util" @@ -81,7 +81,7 @@ func (info *ServiceInfo) IsAlive() bool { func logTimeout(err error) bool { if e, ok := err.(net.Error); ok { if e.Timeout() { - glog.V(3).Infof("connection to endpoint closed due to inactivity") + klog.V(3).Infof("connection to endpoint closed due to inactivity") return true } } @@ -184,7 +184,7 @@ func NewCustomProxier(loadBalancer LoadBalancer, listenIP net.IP, iptables iptab proxyPorts := newPortAllocator(pr) - glog.V(2).Infof("Setting proxy IP to %v and initializing iptables", hostIP) + klog.V(2).Infof("Setting proxy IP to %v and initializing iptables", hostIP) return createProxier(loadBalancer, listenIP, iptables, exec, hostIP, proxyPorts, syncPeriod, minSyncPeriod, udpIdleTimeout, makeProxySocket) } @@ -229,13 +229,13 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) { args := []string{"-m", "comment", "--comment", "handle ClusterIPs; NOTE: this must be before the NodePort rules"} if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainOutput, append(args, "-j", string(iptablesHostPortalChain))...); err != nil { if !iptables.IsNotFoundError(err) { - glog.Errorf("Error removing userspace rule: %v", err) + klog.Errorf("Error removing userspace rule: %v", err) encounteredError = true } } if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainPrerouting, append(args, "-j", string(iptablesContainerPortalChain))...); err != nil { if !iptables.IsNotFoundError(err) { - glog.Errorf("Error removing userspace rule: %v", err) + klog.Errorf("Error removing userspace rule: %v", err) encounteredError = true } } @@ -243,20 +243,20 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) { args = append(args, "-m", "comment", "--comment", "handle service NodePorts; NOTE: this must be the last rule in the chain") if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainOutput, append(args, "-j", string(iptablesHostNodePortChain))...); err != nil { if !iptables.IsNotFoundError(err) { - glog.Errorf("Error removing userspace rule: %v", err) + klog.Errorf("Error removing userspace rule: %v", err) encounteredError = true } } if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainPrerouting, append(args, "-j", string(iptablesContainerNodePortChain))...); err != nil { if !iptables.IsNotFoundError(err) { - glog.Errorf("Error removing userspace rule: %v", err) + klog.Errorf("Error removing userspace rule: %v", err) encounteredError = true } } args = []string{"-m", "comment", "--comment", "Ensure that non-local NodePort traffic can flow"} if err := ipt.DeleteRule(iptables.TableFilter, iptables.ChainInput, append(args, "-j", string(iptablesNonLocalNodePortChain))...); err != nil { if !iptables.IsNotFoundError(err) { - glog.Errorf("Error removing userspace rule: %v", err) + klog.Errorf("Error removing userspace rule: %v", err) encounteredError = true } } @@ -271,13 +271,13 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) { // flush chain, then if successful delete, delete will fail if flush fails. if err := ipt.FlushChain(table, c); err != nil { if !iptables.IsNotFoundError(err) { - glog.Errorf("Error flushing userspace chain: %v", err) + klog.Errorf("Error flushing userspace chain: %v", err) encounteredError = true } } else { if err = ipt.DeleteChain(table, c); err != nil { if !iptables.IsNotFoundError(err) { - glog.Errorf("Error deleting userspace chain: %v", err) + klog.Errorf("Error deleting userspace chain: %v", err) encounteredError = true } } @@ -290,7 +290,7 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) { // Sync is called to immediately synchronize the proxier state to iptables func (proxier *Proxier) Sync() { if err := iptablesInit(proxier.iptables); err != nil { - glog.Errorf("Failed to ensure iptables: %v", err) + klog.Errorf("Failed to ensure iptables: %v", err) } proxier.ensurePortals() proxier.cleanupStaleStickySessions() @@ -302,7 +302,7 @@ func (proxier *Proxier) SyncLoop() { defer t.Stop() for { <-t.C - glog.V(6).Infof("Periodic sync") + klog.V(6).Infof("Periodic sync") proxier.Sync() } } @@ -315,7 +315,7 @@ func (proxier *Proxier) ensurePortals() { for name, info := range proxier.serviceMap { err := proxier.openPortal(name, info) if err != nil { - glog.Errorf("Failed to ensure portal for %q: %v", name, err) + klog.Errorf("Failed to ensure portal for %q: %v", name, err) } } } @@ -388,7 +388,7 @@ func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol } proxier.setServiceInfo(service, si) - glog.V(2).Infof("Proxying for service %q on %s port %d", service, protocol, portNum) + klog.V(2).Infof("Proxying for service %q on %s port %d", service, protocol, portNum) go func(service proxy.ServicePortName, proxier *Proxier) { defer runtime.HandleCrash() atomic.AddInt32(&proxier.numProxyLoops, 1) @@ -405,7 +405,7 @@ func (proxier *Proxier) mergeService(service *v1.Service) sets.String { } svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} if !helper.IsServiceIPSet(service) { - glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) + klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) return nil } existingPorts := sets.NewString() @@ -420,25 +420,25 @@ func (proxier *Proxier) mergeService(service *v1.Service) sets.String { continue } if exists { - glog.V(4).Infof("Something changed for service %q: stopping it", serviceName) + klog.V(4).Infof("Something changed for service %q: stopping it", serviceName) if err := proxier.closePortal(serviceName, info); err != nil { - glog.Errorf("Failed to close portal for %q: %v", serviceName, err) + klog.Errorf("Failed to close portal for %q: %v", serviceName, err) } if err := proxier.stopProxy(serviceName, info); err != nil { - glog.Errorf("Failed to stop service %q: %v", serviceName, err) + klog.Errorf("Failed to stop service %q: %v", serviceName, err) } } proxyPort, err := proxier.proxyPorts.AllocateNext() if err != nil { - glog.Errorf("failed to allocate proxy port for service %q: %v", serviceName, err) + klog.Errorf("failed to allocate proxy port for service %q: %v", serviceName, err) continue } serviceIP := net.ParseIP(service.Spec.ClusterIP) - glog.V(1).Infof("Adding new service %q at %s/%s", serviceName, net.JoinHostPort(serviceIP.String(), strconv.Itoa(int(servicePort.Port))), servicePort.Protocol) + klog.V(1).Infof("Adding new service %q at %s/%s", serviceName, net.JoinHostPort(serviceIP.String(), strconv.Itoa(int(servicePort.Port))), servicePort.Protocol) info, err = proxier.addServiceOnPort(serviceName, servicePort.Protocol, proxyPort, proxier.udpIdleTimeout) if err != nil { - glog.Errorf("Failed to start proxy for %q: %v", serviceName, err) + klog.Errorf("Failed to start proxy for %q: %v", serviceName, err) continue } info.portal.ip = serviceIP @@ -453,10 +453,10 @@ func (proxier *Proxier) mergeService(service *v1.Service) sets.String { info.stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds) } - glog.V(4).Infof("info: %#v", info) + klog.V(4).Infof("info: %#v", info) if err := proxier.openPortal(serviceName, info); err != nil { - glog.Errorf("Failed to open portal for %q: %v", serviceName, err) + klog.Errorf("Failed to open portal for %q: %v", serviceName, err) } proxier.loadBalancer.NewService(serviceName, info.sessionAffinityType, info.stickyMaxAgeSeconds) } @@ -470,7 +470,7 @@ func (proxier *Proxier) unmergeService(service *v1.Service, existingPorts sets.S } svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} if !helper.IsServiceIPSet(service) { - glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) + klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) return } @@ -484,10 +484,10 @@ func (proxier *Proxier) unmergeService(service *v1.Service, existingPorts sets.S } serviceName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name} - glog.V(1).Infof("Stopping service %q", serviceName) + klog.V(1).Infof("Stopping service %q", serviceName) info, exists := proxier.serviceMap[serviceName] if !exists { - glog.Errorf("Service %q is being removed but doesn't exist", serviceName) + klog.Errorf("Service %q is being removed but doesn't exist", serviceName) continue } @@ -496,16 +496,16 @@ func (proxier *Proxier) unmergeService(service *v1.Service, existingPorts sets.S } if err := proxier.closePortal(serviceName, info); err != nil { - glog.Errorf("Failed to close portal for %q: %v", serviceName, err) + klog.Errorf("Failed to close portal for %q: %v", serviceName, err) } if err := proxier.stopProxyInternal(serviceName, info); err != nil { - glog.Errorf("Failed to stop service %q: %v", serviceName, err) + klog.Errorf("Failed to stop service %q: %v", serviceName, err) } proxier.loadBalancer.DeleteService(serviceName) } for _, svcIP := range staleUDPServices.UnsortedList() { if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil { - glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) + klog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) } } } @@ -600,31 +600,31 @@ func (proxier *Proxier) openOnePortal(portal portal, protocol v1.Protocol, proxy portalAddress := net.JoinHostPort(portal.ip.String(), strconv.Itoa(portal.port)) existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerPortalChain, args...) if err != nil { - glog.Errorf("Failed to install iptables %s rule for service %q, args:%v", iptablesContainerPortalChain, name, args) + klog.Errorf("Failed to install iptables %s rule for service %q, args:%v", iptablesContainerPortalChain, name, args) return err } if !existed { - glog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s", name, protocol, portalAddress) + klog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s", name, protocol, portalAddress) } if portal.isExternal { args := proxier.iptablesContainerPortalArgs(portal.ip, false, true, portal.port, protocol, proxyIP, proxyPort, name) existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerPortalChain, args...) if err != nil { - glog.Errorf("Failed to install iptables %s rule that opens service %q for local traffic, args:%v", iptablesContainerPortalChain, name, args) + klog.Errorf("Failed to install iptables %s rule that opens service %q for local traffic, args:%v", iptablesContainerPortalChain, name, args) return err } if !existed { - glog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s for local traffic", name, protocol, portalAddress) + klog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s for local traffic", name, protocol, portalAddress) } args = proxier.iptablesHostPortalArgs(portal.ip, true, portal.port, protocol, proxyIP, proxyPort, name) existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostPortalChain, args...) if err != nil { - glog.Errorf("Failed to install iptables %s rule for service %q for dst-local traffic", iptablesHostPortalChain, name) + klog.Errorf("Failed to install iptables %s rule for service %q for dst-local traffic", iptablesHostPortalChain, name) return err } if !existed { - glog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s for dst-local traffic", name, protocol, portalAddress) + klog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s for dst-local traffic", name, protocol, portalAddress) } return nil } @@ -633,11 +633,11 @@ func (proxier *Proxier) openOnePortal(portal portal, protocol v1.Protocol, proxy args = proxier.iptablesHostPortalArgs(portal.ip, false, portal.port, protocol, proxyIP, proxyPort, name) existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostPortalChain, args...) if err != nil { - glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostPortalChain, name) + klog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostPortalChain, name) return err } if !existed { - glog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s", name, protocol, portalAddress) + klog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s", name, protocol, portalAddress) } return nil } @@ -665,7 +665,7 @@ func (proxier *Proxier) claimNodePort(ip net.IP, port int, protocol v1.Protocol, return fmt.Errorf("can't open node port for %s: %v", key.String(), err) } proxier.portMap[key] = &portMapValue{owner: owner, socket: socket} - glog.V(2).Infof("Claimed local port %s", key.String()) + klog.V(2).Infof("Claimed local port %s", key.String()) return nil } if existing.owner == owner { @@ -685,7 +685,7 @@ func (proxier *Proxier) releaseNodePort(ip net.IP, port int, protocol v1.Protoco existing, found := proxier.portMap[key] if !found { // We tolerate this, it happens if we are cleaning up a failed allocation - glog.Infof("Ignoring release on unowned port: %v", key) + klog.Infof("Ignoring release on unowned port: %v", key) return nil } if existing.owner != owner { @@ -709,32 +709,32 @@ func (proxier *Proxier) openNodePort(nodePort int, protocol v1.Protocol, proxyIP args := proxier.iptablesContainerPortalArgs(nil, false, false, nodePort, protocol, proxyIP, proxyPort, name) existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerNodePortChain, args...) if err != nil { - glog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerNodePortChain, name) + klog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerNodePortChain, name) return err } if !existed { - glog.Infof("Opened iptables from-containers public port for service %q on %s port %d", name, protocol, nodePort) + klog.Infof("Opened iptables from-containers public port for service %q on %s port %d", name, protocol, nodePort) } // Handle traffic from the host. args = proxier.iptablesHostNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name) existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostNodePortChain, args...) if err != nil { - glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostNodePortChain, name) + klog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostNodePortChain, name) return err } if !existed { - glog.Infof("Opened iptables from-host public port for service %q on %s port %d", name, protocol, nodePort) + klog.Infof("Opened iptables from-host public port for service %q on %s port %d", name, protocol, nodePort) } args = proxier.iptablesNonLocalNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name) existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableFilter, iptablesNonLocalNodePortChain, args...) if err != nil { - glog.Errorf("Failed to install iptables %s rule for service %q", iptablesNonLocalNodePortChain, name) + klog.Errorf("Failed to install iptables %s rule for service %q", iptablesNonLocalNodePortChain, name) return err } if !existed { - glog.Infof("Opened iptables from-non-local public port for service %q on %s port %d", name, protocol, nodePort) + klog.Infof("Opened iptables from-non-local public port for service %q on %s port %d", name, protocol, nodePort) } return nil @@ -755,9 +755,9 @@ func (proxier *Proxier) closePortal(service proxy.ServicePortName, info *Service el = append(el, proxier.closeNodePort(info.nodePort, info.protocol, proxier.listenIP, info.proxyPort, service)...) } if len(el) == 0 { - glog.V(3).Infof("Closed iptables portals for service %q", service) + klog.V(3).Infof("Closed iptables portals for service %q", service) } else { - glog.Errorf("Some errors closing iptables portals for service %q", service) + klog.Errorf("Some errors closing iptables portals for service %q", service) } return utilerrors.NewAggregate(el) } @@ -776,20 +776,20 @@ func (proxier *Proxier) closeOnePortal(portal portal, protocol v1.Protocol, prox // Handle traffic from containers. args := proxier.iptablesContainerPortalArgs(portal.ip, portal.isExternal, false, portal.port, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerPortalChain, args...); err != nil { - glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name) + klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name) el = append(el, err) } if portal.isExternal { args := proxier.iptablesContainerPortalArgs(portal.ip, false, true, portal.port, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerPortalChain, args...); err != nil { - glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name) + klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name) el = append(el, err) } args = proxier.iptablesHostPortalArgs(portal.ip, true, portal.port, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostPortalChain, args...); err != nil { - glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name) + klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name) el = append(el, err) } return el @@ -798,7 +798,7 @@ func (proxier *Proxier) closeOnePortal(portal portal, protocol v1.Protocol, prox // Handle traffic from the host (portalIP is not external). args = proxier.iptablesHostPortalArgs(portal.ip, false, portal.port, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostPortalChain, args...); err != nil { - glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name) + klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name) el = append(el, err) } @@ -811,21 +811,21 @@ func (proxier *Proxier) closeNodePort(nodePort int, protocol v1.Protocol, proxyI // Handle traffic from containers. args := proxier.iptablesContainerPortalArgs(nil, false, false, nodePort, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerNodePortChain, args...); err != nil { - glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerNodePortChain, name) + klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerNodePortChain, name) el = append(el, err) } // Handle traffic from the host. args = proxier.iptablesHostNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostNodePortChain, args...); err != nil { - glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostNodePortChain, name) + klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostNodePortChain, name) el = append(el, err) } // Handle traffic not local to the host args = proxier.iptablesNonLocalNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableFilter, iptablesNonLocalNodePortChain, args...); err != nil { - glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesNonLocalNodePortChain, name) + klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesNonLocalNodePortChain, name) el = append(el, err) } @@ -934,7 +934,7 @@ func iptablesFlush(ipt iptables.Interface) error { el = append(el, err) } if len(el) != 0 { - glog.Errorf("Some errors flushing old iptables portals: %v", el) + klog.Errorf("Some errors flushing old iptables portals: %v", el) } return utilerrors.NewAggregate(el) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxysocket.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxysocket.go index 098f68c15aa2..2ff498054652 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxysocket.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxysocket.go @@ -25,9 +25,9 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog" "k8s.io/kubernetes/pkg/proxy" ) @@ -95,10 +95,10 @@ func TryConnectEndpoints(service proxy.ServicePortName, srcAddr net.Addr, protoc for _, dialTimeout := range EndpointDialTimeouts { endpoint, err := loadBalancer.NextEndpoint(service, srcAddr, sessionAffinityReset) if err != nil { - glog.Errorf("Couldn't find an endpoint for %s: %v", service, err) + klog.Errorf("Couldn't find an endpoint for %s: %v", service, err) return nil, err } - glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint) + klog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint) // TODO: This could spin up a new goroutine to make the outbound connection, // and keep accepting inbound traffic. outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout) @@ -106,7 +106,7 @@ func TryConnectEndpoints(service proxy.ServicePortName, srcAddr net.Addr, protoc if isTooManyFDsError(err) { panic("Dial failed: " + err.Error()) } - glog.Errorf("Dial failed: %v", err) + klog.Errorf("Dial failed: %v", err) sessionAffinityReset = true continue } @@ -135,13 +135,13 @@ func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv // Then the service port was just closed so the accept failure is to be expected. return } - glog.Errorf("Accept failed: %v", err) + klog.Errorf("Accept failed: %v", err) continue } - glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr()) + klog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr()) outConn, err := TryConnectEndpoints(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", loadBalancer) if err != nil { - glog.Errorf("Failed to connect to balancer: %v", err) + klog.Errorf("Failed to connect to balancer: %v", err) inConn.Close() continue } @@ -154,7 +154,7 @@ func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv func ProxyTCP(in, out *net.TCPConn) { var wg sync.WaitGroup wg.Add(2) - glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v", + klog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v", in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr()) go copyBytes("from backend", in, out, &wg) go copyBytes("to backend", out, in, &wg) @@ -163,14 +163,14 @@ func ProxyTCP(in, out *net.TCPConn) { func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) { defer wg.Done() - glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr()) + klog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr()) n, err := io.Copy(dest, src) if err != nil { if !isClosedError(err) { - glog.Errorf("I/O error: %v", err) + klog.Errorf("I/O error: %v", err) } } - glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr()) + klog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr()) dest.Close() src.Close() } @@ -215,11 +215,11 @@ func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv if err != nil { if e, ok := err.(net.Error); ok { if e.Temporary() { - glog.V(1).Infof("ReadFrom had a temporary failure: %v", err) + klog.V(1).Infof("ReadFrom had a temporary failure: %v", err) continue } } - glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err) + klog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err) break } // If this is a client we know already, reuse the connection and goroutine. @@ -232,14 +232,14 @@ func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv _, err = svrConn.Write(buffer[0:n]) if err != nil { if !logTimeout(err) { - glog.Errorf("Write failed: %v", err) + klog.Errorf("Write failed: %v", err) // TODO: Maybe tear down the goroutine for this client/server pair? } continue } err = svrConn.SetDeadline(time.Now().Add(myInfo.Timeout)) if err != nil { - glog.Errorf("SetDeadline failed: %v", err) + klog.Errorf("SetDeadline failed: %v", err) continue } } @@ -253,14 +253,14 @@ func (udp *udpProxySocket) getBackendConn(activeClients *ClientCache, cliAddr ne if !found { // TODO: This could spin up a new goroutine to make the outbound connection, // and keep accepting inbound traffic. - glog.V(3).Infof("New UDP connection from %s", cliAddr) + klog.V(3).Infof("New UDP connection from %s", cliAddr) var err error svrConn, err = TryConnectEndpoints(service, cliAddr, "udp", loadBalancer) if err != nil { return nil, err } if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil { - glog.Errorf("SetDeadline failed: %v", err) + klog.Errorf("SetDeadline failed: %v", err) return nil, err } activeClients.Clients[cliAddr.String()] = svrConn @@ -281,19 +281,19 @@ func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activ n, err := svrConn.Read(buffer[0:]) if err != nil { if !logTimeout(err) { - glog.Errorf("Read failed: %v", err) + klog.Errorf("Read failed: %v", err) } break } err = svrConn.SetDeadline(time.Now().Add(timeout)) if err != nil { - glog.Errorf("SetDeadline failed: %v", err) + klog.Errorf("SetDeadline failed: %v", err) break } n, err = udp.WriteTo(buffer[0:n], cliAddr) if err != nil { if !logTimeout(err) { - glog.Errorf("WriteTo failed: %v", err) + klog.Errorf("WriteTo failed: %v", err) } break } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go index 5e84e8c27c95..6bbc558eae80 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go @@ -25,9 +25,9 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/util/slice" ) @@ -82,7 +82,7 @@ func NewLoadBalancerRR() *LoadBalancerRR { } func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) error { - glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort) + klog.V(4).Infof("LoadBalancerRR NewService %q", svcPort) lb.lock.Lock() defer lb.lock.Unlock() lb.newServiceInternal(svcPort, affinityType, ttlSeconds) @@ -97,7 +97,7 @@ func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affi if _, exists := lb.services[svcPort]; !exists { lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)} - glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort) + klog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort) } else if affinityType != "" { lb.services[svcPort].affinity.affinityType = affinityType } @@ -105,7 +105,7 @@ func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affi } func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) { - glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort) + klog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort) lb.lock.Lock() defer lb.lock.Unlock() delete(lb.services, svcPort) @@ -145,7 +145,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne if len(state.endpoints) == 0 { return "", ErrMissingEndpoints } - glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints) + klog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints) sessionAffinityEnabled := isSessionAffinity(&state.affinity) @@ -163,7 +163,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne // Affinity wins. endpoint := sessionAffinity.endpoint sessionAffinity.lastUsed = time.Now() - glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint) + klog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint) return endpoint, nil } } @@ -182,7 +182,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne affinity.lastUsed = time.Now() affinity.endpoint = endpoint affinity.clientIP = ipaddr - glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr]) + klog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr]) } return endpoint, nil @@ -214,7 +214,7 @@ func flattenValidEndpoints(endpoints []hostPortPair) []string { func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) { for _, affinity := range state.affinity.affinityMap { if affinity.endpoint == endpoint { - glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort) + klog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort) delete(state.affinity.affinityMap, affinity.clientIP) } } @@ -237,7 +237,7 @@ func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEn } for mKey, mVal := range allEndpoints { if mVal == 1 { - glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort) + klog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort) removeSessionAffinityByEndpoint(state, svcPort, mKey) } } @@ -273,7 +273,7 @@ func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *v1.Endpoints) { state, exists := lb.services[svcPort] if !exists || state == nil || len(newEndpoints) > 0 { - glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) + klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) lb.updateAffinityMap(svcPort, newEndpoints) // OnEndpointsAdd can be called without NewService being called externally. // To be safe we will call it here. A new service will only be created @@ -307,7 +307,7 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoint } if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) { - glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) + klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) lb.updateAffinityMap(svcPort, newEndpoints) // OnEndpointsUpdate can be called without NewService being called externally. // To be safe we will call it here. A new service will only be created @@ -335,7 +335,7 @@ func (lb *LoadBalancerRR) resetService(svcPort proxy.ServicePortName) { // If the service is still around, reset but don't delete. if state, ok := lb.services[svcPort]; ok { if len(state.endpoints) > 0 { - glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort) + klog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort) state.endpoints = []string{} } state.index = 0 @@ -379,7 +379,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa } for ip, affinity := range state.affinity.affinityMap { if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds { - glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort) + klog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort) delete(state.affinity.affinityMap, ip) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/BUILD index 999dcf88daeb..fbbb01b18e76 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/BUILD @@ -17,7 +17,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go index 7c91b9cd6a10..716491cd25c6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go @@ -21,7 +21,7 @@ import ( "net" "strconv" - "github.com/golang/glog" + "k8s.io/klog" ) // IPPart returns just the IP part of an IP or IP:port or endpoint string. If the IP @@ -35,14 +35,14 @@ func IPPart(s string) string { // Must be IP:port host, _, err := net.SplitHostPort(s) if err != nil { - glog.Errorf("Error parsing '%s': %v", s, err) + klog.Errorf("Error parsing '%s': %v", s, err) return "" } // Check if host string is a valid IP address if ip := net.ParseIP(host); ip != nil { return ip.String() } else { - glog.Errorf("invalid IP part '%s'", host) + klog.Errorf("invalid IP part '%s'", host) } return "" } @@ -52,12 +52,12 @@ func PortPart(s string) (int, error) { // Must be IP:port _, port, err := net.SplitHostPort(s) if err != nil { - glog.Errorf("Error parsing '%s': %v", s, err) + klog.Errorf("Error parsing '%s': %v", s, err) return -1, err } portNumber, err := strconv.Atoi(port) if err != nil { - glog.Errorf("Error parsing '%s': %v", port, err) + klog.Errorf("Error parsing '%s': %v", port, err) return -1, err } return portNumber, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/port.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/port.go index 96317b1dc820..35924e05e871 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/port.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/port.go @@ -21,7 +21,7 @@ import ( "net" "strconv" - "github.com/golang/glog" + "k8s.io/klog" ) // LocalPort describes a port on specific IP address and protocol @@ -60,7 +60,7 @@ func RevertPorts(replacementPortsMap, originalPortsMap map[LocalPort]Closeable) for k, v := range replacementPortsMap { // Only close newly opened local ports - leave ones that were open before this update if originalPortsMap[k] == nil { - glog.V(2).Infof("Closing local port %s", k.String()) + klog.V(2).Infof("Closing local port %s", k.String()) v.Close() } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go index ca1e6c8fc997..f1db309a941e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go @@ -27,7 +27,7 @@ import ( helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" utilnet "k8s.io/kubernetes/pkg/util/net" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -62,12 +62,12 @@ func IsLocalIP(ip string) (bool, error) { func ShouldSkipService(svcName types.NamespacedName, service *v1.Service) bool { // if ClusterIP is "None" or empty, skip proxying if !helper.IsServiceIPSet(service) { - glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) + klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) return true } // Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied if service.Spec.Type == v1.ServiceTypeExternalName { - glog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName) + klog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName) return true } return false @@ -134,7 +134,7 @@ func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error) // LogAndEmitIncorrectIPVersionEvent logs and emits incorrect IP version event. func LogAndEmitIncorrectIPVersionEvent(recorder record.EventRecorder, fieldName, fieldValue, svcNamespace, svcName string, svcUID types.UID) { errMsg := fmt.Sprintf("%s in %s has incorrect IP version", fieldValue, fieldName) - glog.Errorf("%s (service %s/%s).", errMsg, svcNamespace, svcName) + klog.Errorf("%s (service %s/%s).", errMsg, svcNamespace, svcName) if recorder != nil { recorder.Eventf( &v1.ObjectReference{ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/BUILD index 0e3636f1990c..4d6b5e0f8814 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/BUILD @@ -24,7 +24,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//vendor/github.com/Microsoft/hcsshim:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "//conditions:default": [], }), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/OWNERS index 605001e0a112..557cda002be1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/OWNERS @@ -1,3 +1,8 @@ +approvers: +- dineshgovindasamy +- madhanrm +- feiskyer reviewers: - dineshgovindasamy - madhanrm +- feiskyer diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go index e9d7f72edbbf..b0289ffe4d7f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go @@ -30,7 +30,7 @@ import ( "github.com/Microsoft/hcsshim" "github.com/davecgh/go-spew/spew" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -107,14 +107,14 @@ type hnsNetworkInfo struct { id string } -func Log(v interface{}, message string, level glog.Level) { - glog.V(level).Infof("%s, %s", message, spew.Sdump(v)) +func Log(v interface{}, message string, level klog.Level) { + klog.V(level).Infof("%s, %s", message, spew.Sdump(v)) } -func LogJson(v interface{}, message string, level glog.Level) { +func LogJson(v interface{}, message string, level klog.Level) { jsonString, err := json.Marshal(v) if err == nil { - glog.V(level).Infof("%s, %s", message, string(jsonString)) + klog.V(level).Infof("%s, %s", message, string(jsonString)) } } @@ -128,12 +128,23 @@ type endpointsInfo struct { refCount uint16 } +//Uses mac prefix and IPv4 address to return a mac address +//This ensures mac addresses are unique for proper load balancing +//Does not support IPv6 and returns a dummy mac +func conjureMac(macPrefix string, ip net.IP) string { + if ip4 := ip.To4(); ip4 != nil { + a, b, c, d := ip4[0], ip4[1], ip4[2], ip4[3] + return fmt.Sprintf("%v-%02x-%02x-%02x-%02x", macPrefix, a, b, c, d) + } + return "02-11-22-33-44-55" +} + func newEndpointInfo(ip string, port uint16, isLocal bool) *endpointsInfo { info := &endpointsInfo{ ip: ip, port: port, isLocal: isLocal, - macAddress: "00:11:22:33:44:55", // Hardcoding to some Random Mac + macAddress: conjureMac("02-11", net.ParseIP(ip)), refCount: 0, hnsID: "", } @@ -148,7 +159,7 @@ func (ep *endpointsInfo) Cleanup() { // Never delete a Local Endpoint. Local Endpoints are already created by other entities. // Remove only remote endpoints created by this service if ep.refCount <= 0 && !ep.isLocal { - glog.V(4).Infof("Removing endpoints for %v, since no one is referencing it", ep) + klog.V(4).Infof("Removing endpoints for %v, since no one is referencing it", ep) deleteHnsEndpoint(ep.hnsID) ep.hnsID = "" } @@ -169,10 +180,12 @@ func newServiceInfo(svcPortName proxy.ServicePortName, port *v1.ServicePort, ser stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds) } info := &serviceInfo{ - clusterIP: net.ParseIP(service.Spec.ClusterIP), - port: int(port.Port), - protocol: port.Protocol, - nodePort: int(port.NodePort), + clusterIP: net.ParseIP(service.Spec.ClusterIP), + port: int(port.Port), + protocol: port.Protocol, + nodePort: int(port.NodePort), + // targetPort is zero if it is specified as a name in port.TargetPort. + // Its real value would be got later from endpoints. targetPort: port.TargetPort.IntValue(), // Deep-copy in case the service instance changes loadBalancerStatus: *service.Status.LoadBalancer.DeepCopy(), @@ -193,7 +206,7 @@ func newServiceInfo(svcPortName proxy.ServicePortName, port *v1.ServicePort, ser if apiservice.NeedsHealthCheck(service) { p := service.Spec.HealthCheckNodePort if p == 0 { - glog.Errorf("Service %q has no healthcheck nodeport", svcPortName.NamespacedName.String()) + klog.Errorf("Service %q has no healthcheck nodeport", svcPortName.NamespacedName.String()) } else { info.healthCheckNodePort = int(p) } @@ -290,9 +303,9 @@ func (sm *proxyServiceMap) merge(other proxyServiceMap, curEndpoints proxyEndpoi existingPorts.Insert(svcPortName.Port) svcInfo, exists := (*sm)[svcPortName] if !exists { - glog.V(1).Infof("Adding new service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) + klog.V(1).Infof("Adding new service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) } else { - glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) + klog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) svcInfo.cleanupAllPolicies(curEndpoints[svcPortName]) delete(*sm, svcPortName) } @@ -308,14 +321,14 @@ func (sm *proxyServiceMap) unmerge(other proxyServiceMap, existingPorts, staleSe } info, exists := (*sm)[svcPortName] if exists { - glog.V(1).Infof("Removing service port %q", svcPortName) + klog.V(1).Infof("Removing service port %q", svcPortName) if info.protocol == v1.ProtocolUDP { staleServices.Insert(info.clusterIP.String()) } info.cleanupAllPolicies(curEndpoints[svcPortName]) delete(*sm, svcPortName) } else { - glog.Errorf("Service port %q removed, but doesn't exists", svcPortName) + klog.Errorf("Service port %q removed, but doesn't exists", svcPortName) } } } @@ -327,13 +340,13 @@ func (em proxyEndpointsMap) merge(other proxyEndpointsMap, curServices proxyServ if exists { // info, exists := curServices[svcPortName] - glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) + klog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) if exists { - glog.V(2).Infof("Endpoints are modified. Service [%v] is stale", svcPortName) + klog.V(2).Infof("Endpoints are modified. Service [%v] is stale", svcPortName) info.cleanupAllPolicies(epInfos) } else { // If no service exists, just cleanup the remote endpoints - glog.V(2).Infof("Endpoints are orphaned. Cleaning up") + klog.V(2).Infof("Endpoints are orphaned. Cleaning up") // Cleanup Endpoints references for _, ep := range epInfos { ep.Cleanup() @@ -352,11 +365,11 @@ func (em proxyEndpointsMap) unmerge(other proxyEndpointsMap, curServices proxySe for svcPortName := range other { info, exists := curServices[svcPortName] if exists { - glog.V(2).Infof("Service [%v] is stale", info) + klog.V(2).Infof("Service [%v] is stale", info) info.cleanupAllPolicies(em[svcPortName]) } else { // If no service exists, just cleanup the remote endpoints - glog.V(2).Infof("Endpoints are orphaned. Cleaning up") + klog.V(2).Infof("Endpoints are orphaned. Cleaning up") // Cleanup Endpoints references epInfos, exists := em[svcPortName] if exists { @@ -457,12 +470,12 @@ func NewProxier( masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) if nodeIP == nil { - glog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") + klog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") nodeIP = net.ParseIP("127.0.0.1") } if len(clusterCIDR) == 0 { - glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic") + klog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic") } healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps @@ -474,11 +487,11 @@ func NewProxier( } hnsNetwork, err := getHnsNetworkInfo(hnsNetworkName) if err != nil { - glog.Fatalf("Unable to find Hns Network specified by %s. Please check environment variable KUBE_NETWORK", hnsNetworkName) + klog.Fatalf("Unable to find Hns Network specified by %s. Please check environment variable KUBE_NETWORK", hnsNetworkName) return nil, err } - glog.V(1).Infof("Hns Network loaded with info = %v", hnsNetwork) + klog.V(1).Infof("Hns Network loaded with info = %v", hnsNetwork) proxier := &Proxier{ portsMap: make(map[localPort]closeable), @@ -498,7 +511,7 @@ func NewProxier( } burstSyncs := 2 - glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs) + klog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs) proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs) return proxier, nil @@ -517,15 +530,14 @@ func CleanupLeftovers() (encounteredError bool) { func (svcInfo *serviceInfo) cleanupAllPolicies(endpoints []*endpointsInfo) { Log(svcInfo, "Service Cleanup", 3) - if svcInfo.policyApplied { - svcInfo.deleteAllHnsLoadBalancerPolicy() - // Cleanup Endpoints references - for _, ep := range endpoints { - ep.Cleanup() - } - - svcInfo.policyApplied = false + // Skip the svcInfo.policyApplied check to remove all the policies + svcInfo.deleteAllHnsLoadBalancerPolicy() + // Cleanup Endpoints references + for _, ep := range endpoints { + ep.Cleanup() } + + svcInfo.policyApplied = false } func (svcInfo *serviceInfo) deleteAllHnsLoadBalancerPolicy() { @@ -556,7 +568,7 @@ func deleteAllHnsLoadBalancerPolicy() { LogJson(plist, "Remove Policy", 3) _, err = plist.Delete() if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) } } @@ -617,36 +629,36 @@ func deleteHnsLoadBalancerPolicy(hnsID string) { // Cleanup HNS policies hnsloadBalancer, err := hcsshim.GetPolicyListByID(hnsID) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) return } LogJson(hnsloadBalancer, "Removing Policy", 2) _, err = hnsloadBalancer.Delete() if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) } } func deleteHnsEndpoint(hnsID string) { hnsendpoint, err := hcsshim.GetHNSEndpointByID(hnsID) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) return } _, err = hnsendpoint.Delete() if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) } - glog.V(3).Infof("Remote endpoint resource deleted id %s", hnsID) + klog.V(3).Infof("Remote endpoint resource deleted id %s", hnsID) } func getHnsNetworkInfo(hnsNetworkName string) (*hnsNetworkInfo, error) { hnsnetwork, err := hcsshim.GetHNSNetworkByName(hnsNetworkName) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) return nil, err } @@ -659,7 +671,7 @@ func getHnsNetworkInfo(hnsNetworkName string) (*hnsNetworkInfo, error) { func getHnsEndpointByIpAddress(ip net.IP, networkName string) (*hcsshim.HNSEndpoint, error) { hnsnetwork, err := hcsshim.GetHNSNetworkByName(networkName) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) return nil, err } @@ -734,12 +746,12 @@ func (proxier *Proxier) OnServiceSynced() { func shouldSkipService(svcName types.NamespacedName, service *v1.Service) bool { // if ClusterIP is "None" or empty, skip proxying if !helper.IsServiceIPSet(service) { - glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) + klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) return true } // Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied if service.Spec.Type == v1.ServiceTypeExternalName { - glog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName) + klog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName) return true } return false @@ -868,7 +880,7 @@ func endpointsToEndpointsMap(endpoints *v1.Endpoints, hostname string) proxyEndp for i := range ss.Ports { port := &ss.Ports[i] if port.Port == 0 { - glog.Warningf("ignoring invalid endpoint port %s", port.Name) + klog.Warningf("ignoring invalid endpoint port %s", port.Name) continue } svcPortName := proxy.ServicePortName{ @@ -878,19 +890,19 @@ func endpointsToEndpointsMap(endpoints *v1.Endpoints, hostname string) proxyEndp for i := range ss.Addresses { addr := &ss.Addresses[i] if addr.IP == "" { - glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name) + klog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name) continue } isLocal := addr.NodeName != nil && *addr.NodeName == hostname epInfo := newEndpointInfo(addr.IP, uint16(port.Port), isLocal) endpointsMap[svcPortName] = append(endpointsMap[svcPortName], epInfo) } - if glog.V(3) { + if klog.V(3) { newEPList := []*endpointsInfo{} for _, ep := range endpointsMap[svcPortName] { newEPList = append(newEPList, ep) } - glog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList) + klog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList) } } } @@ -927,11 +939,11 @@ func (proxier *Proxier) syncProxyRules() { start := time.Now() defer func() { SyncProxyRulesLatency.Observe(sinceInMicroseconds(start)) - glog.V(4).Infof("syncProxyRules took %v", time.Since(start)) + klog.V(4).Infof("syncProxyRules took %v", time.Since(start)) }() // don't sync rules till we've received services and endpoints if !proxier.endpointsSynced || !proxier.servicesSynced { - glog.V(2).Info("Not syncing hns until Services and Endpoints have been received from master") + klog.V(2).Info("Not syncing hns until Services and Endpoints have been received from master") return } @@ -945,28 +957,36 @@ func (proxier *Proxier) syncProxyRules() { // merge stale services gathered from updateEndpointsMap for svcPortName := range endpointUpdateResult.staleServiceNames { if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.protocol == v1.ProtocolUDP { - glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.clusterIP.String()) + klog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.clusterIP.String()) staleServices.Insert(svcInfo.clusterIP.String()) } } - glog.V(3).Infof("Syncing Policies") + klog.V(3).Infof("Syncing Policies") // Program HNS by adding corresponding policies for each service. for svcName, svcInfo := range proxier.serviceMap { if svcInfo.policyApplied { - glog.V(4).Infof("Policy already applied for %s", spew.Sdump(svcInfo)) + klog.V(4).Infof("Policy already applied for %s", spew.Sdump(svcInfo)) continue } var hnsEndpoints []hcsshim.HNSEndpoint - glog.V(4).Infof("====Applying Policy for %s====", svcName) + klog.V(4).Infof("====Applying Policy for %s====", svcName) // Create Remote endpoints for every endpoint, corresponding to the service for _, ep := range proxier.endpointsMap[svcName] { var newHnsEndpoint *hcsshim.HNSEndpoint hnsNetworkName := proxier.network.name var err error + + // targetPort is zero if it is specified as a name in port.TargetPort, so the real port should be got from endpoints. + // Note that hcsshim.AddLoadBalancer() doesn't support endpoints with different ports, so only port from first endpoint is used. + // TODO(feiskyer): add support of different endpoint ports after hcsshim.AddLoadBalancer() add that. + if svcInfo.targetPort == 0 { + svcInfo.targetPort = int(ep.port) + } + if len(ep.hnsID) > 0 { newHnsEndpoint, err = hcsshim.GetHNSEndpointByID(ep.hnsID) } @@ -980,13 +1000,13 @@ func (proxier *Proxier) syncProxyRules() { if newHnsEndpoint == nil { if ep.isLocal { - glog.Errorf("Local endpoint not found for %v: err: %v on network %s", ep.ip, err, hnsNetworkName) + klog.Errorf("Local endpoint not found for %v: err: %v on network %s", ep.ip, err, hnsNetworkName) continue } // hns Endpoint resource was not found, create one hnsnetwork, err := hcsshim.GetHNSNetworkByName(hnsNetworkName) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) continue } @@ -997,7 +1017,7 @@ func (proxier *Proxier) syncProxyRules() { newHnsEndpoint, err = hnsnetwork.CreateRemoteEndpoint(hnsEndpoint) if err != nil { - glog.Errorf("Remote endpoint creation failed: %v", err) + klog.Errorf("Remote endpoint creation failed: %v", err) continue } } @@ -1010,19 +1030,19 @@ func (proxier *Proxier) syncProxyRules() { Log(ep, "Endpoint resource found", 3) } - glog.V(3).Infof("Associated endpoints [%s] for service [%s]", spew.Sdump(hnsEndpoints), svcName) + klog.V(3).Infof("Associated endpoints [%s] for service [%s]", spew.Sdump(hnsEndpoints), svcName) if len(svcInfo.hnsID) > 0 { // This should not happen - glog.Warningf("Load Balancer already exists %s -- Debug ", svcInfo.hnsID) + klog.Warningf("Load Balancer already exists %s -- Debug ", svcInfo.hnsID) } if len(hnsEndpoints) == 0 { - glog.Errorf("Endpoint information not available for service %s. Not applying any policy", svcName) + klog.Errorf("Endpoint information not available for service %s. Not applying any policy", svcName) continue } - glog.V(4).Infof("Trying to Apply Policies for service %s", spew.Sdump(svcInfo)) + klog.V(4).Infof("Trying to Apply Policies for service %s", spew.Sdump(svcInfo)) var hnsLoadBalancer *hcsshim.PolicyList hnsLoadBalancer, err := getHnsLoadBalancer( @@ -1034,12 +1054,12 @@ func (proxier *Proxier) syncProxyRules() { uint16(svcInfo.port), ) if err != nil { - glog.Errorf("Policy creation failed: %v", err) + klog.Errorf("Policy creation failed: %v", err) continue } svcInfo.hnsID = hnsLoadBalancer.ID - glog.V(3).Infof("Hns LoadBalancer resource created for cluster ip resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID) + klog.V(3).Infof("Hns LoadBalancer resource created for cluster ip resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID) // If nodePort is specified, user should be able to use nodeIP:nodePort to reach the backend endpoints if svcInfo.nodePort > 0 { @@ -1052,12 +1072,12 @@ func (proxier *Proxier) syncProxyRules() { uint16(svcInfo.nodePort), ) if err != nil { - glog.Errorf("Policy creation failed: %v", err) + klog.Errorf("Policy creation failed: %v", err) continue } svcInfo.nodePorthnsID = hnsLoadBalancer.ID - glog.V(3).Infof("Hns LoadBalancer resource created for nodePort resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID) + klog.V(3).Infof("Hns LoadBalancer resource created for nodePort resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID) } // Create a Load Balancer Policy for each external IP @@ -1072,11 +1092,11 @@ func (proxier *Proxier) syncProxyRules() { uint16(svcInfo.port), ) if err != nil { - glog.Errorf("Policy creation failed: %v", err) + klog.Errorf("Policy creation failed: %v", err) continue } externalIp.hnsID = hnsLoadBalancer.ID - glog.V(3).Infof("Hns LoadBalancer resource created for externalIp resources %v, Id[%s]", externalIp, hnsLoadBalancer.ID) + klog.V(3).Infof("Hns LoadBalancer resource created for externalIp resources %v, Id[%s]", externalIp, hnsLoadBalancer.ID) } // Create a Load Balancer Policy for each loadbalancer ingress for _, lbIngressIp := range svcInfo.loadBalancerIngressIPs { @@ -1090,11 +1110,11 @@ func (proxier *Proxier) syncProxyRules() { uint16(svcInfo.port), ) if err != nil { - glog.Errorf("Policy creation failed: %v", err) + klog.Errorf("Policy creation failed: %v", err) continue } lbIngressIp.hnsID = hnsLoadBalancer.ID - glog.V(3).Infof("Hns LoadBalancer resource created for loadBalancer Ingress resources %v", lbIngressIp) + klog.V(3).Infof("Hns LoadBalancer resource created for loadBalancer Ingress resources %v", lbIngressIp) } svcInfo.policyApplied = true Log(svcInfo, "+++Policy Successfully applied for service +++", 2) @@ -1109,17 +1129,17 @@ func (proxier *Proxier) syncProxyRules() { // not "OnlyLocal", but the services list will not, and the healthChecker // will just drop those endpoints. if err := proxier.healthChecker.SyncServices(serviceUpdateResult.hcServices); err != nil { - glog.Errorf("Error syncing healtcheck services: %v", err) + klog.Errorf("Error syncing healthcheck services: %v", err) } if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.hcEndpoints); err != nil { - glog.Errorf("Error syncing healthcheck endpoints: %v", err) + klog.Errorf("Error syncing healthcheck endpoints: %v", err) } // Finish housekeeping. // TODO: these could be made more consistent. for _, svcIP := range staleServices.UnsortedList() { // TODO : Check if this is required to cleanup stale services here - glog.V(5).Infof("Pending delete stale service IP %s connections", svcIP) + klog.V(5).Infof("Pending delete stale service IP %s connections", svcIP) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD index 2c2e4d4e5d97..3925dc96d72c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD @@ -26,8 +26,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/miekg/dns:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go index 9770451e7b85..4b5a218cfc0b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go @@ -25,7 +25,7 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -70,7 +70,7 @@ func (info *serviceInfo) isAlive() bool { func logTimeout(err error) bool { if e, ok := err.(net.Error); ok { if e.Timeout() { - glog.V(3).Infof("connection to endpoint closed due to inactivity") + klog.V(3).Infof("connection to endpoint closed due to inactivity") return true } } @@ -140,7 +140,7 @@ func NewProxier(loadBalancer LoadBalancer, listenIP net.IP, netsh netsh.Interfac return nil, fmt.Errorf("failed to select a host interface: %v", err) } - glog.V(2).Infof("Setting proxy IP to %v", hostIP) + klog.V(2).Infof("Setting proxy IP to %v", hostIP) return createProxier(loadBalancer, listenIP, netsh, hostIP, syncPeriod, udpIdleTimeout) } @@ -167,7 +167,7 @@ func (proxier *Proxier) SyncLoop() { defer t.Stop() for { <-t.C - glog.V(6).Infof("Periodic sync") + klog.V(6).Infof("Periodic sync") proxier.Sync() } } @@ -234,7 +234,7 @@ func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPo if existed, err := proxier.netsh.EnsureIPAddress(args, serviceIP); err != nil { return nil, err } else if !existed { - glog.V(3).Infof("Added ip address to fowarder interface for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol) + klog.V(3).Infof("Added ip address to fowarder interface for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol) } } @@ -259,7 +259,7 @@ func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPo } proxier.setServiceInfo(servicePortPortalName, si) - glog.V(2).Infof("Proxying for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol) + klog.V(2).Infof("Proxying for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol) go func(service ServicePortPortalName, proxier *Proxier) { defer runtime.HandleCrash() atomic.AddInt32(&proxier.numProxyLoops, 1) @@ -313,7 +313,7 @@ func (proxier *Proxier) mergeService(service *v1.Service) map[ServicePortPortalN } svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} if !helper.IsServiceIPSet(service) { - glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) + klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) return nil } existingPortPortals := make(map[ServicePortPortalName]bool) @@ -337,19 +337,19 @@ func (proxier *Proxier) mergeService(service *v1.Service) map[ServicePortPortalN continue } if exists { - glog.V(4).Infof("Something changed for service %q: stopping it", servicePortPortalName) + klog.V(4).Infof("Something changed for service %q: stopping it", servicePortPortalName) if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil { - glog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err) + klog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err) } } - glog.V(1).Infof("Adding new service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(listenPort)), protocol) + klog.V(1).Infof("Adding new service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(listenPort)), protocol) info, err := proxier.addServicePortPortal(servicePortPortalName, protocol, listenIP, listenPort, proxier.udpIdleTimeout) if err != nil { - glog.Errorf("Failed to start proxy for %q: %v", servicePortPortalName, err) + klog.Errorf("Failed to start proxy for %q: %v", servicePortPortalName, err) continue } info.sessionAffinityType = service.Spec.SessionAffinity - glog.V(10).Infof("info: %#v", info) + klog.V(10).Infof("info: %#v", info) } if len(listenIPPortMap) > 0 { // only one loadbalancer per service port portal @@ -377,7 +377,7 @@ func (proxier *Proxier) unmergeService(service *v1.Service, existingPortPortals } svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} if !helper.IsServiceIPSet(service) { - glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) + klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) return } @@ -409,15 +409,15 @@ func (proxier *Proxier) unmergeService(service *v1.Service, existingPortPortals continue } - glog.V(1).Infof("Stopping service %q", servicePortPortalName) + klog.V(1).Infof("Stopping service %q", servicePortPortalName) info, exists := proxier.getServiceInfo(servicePortPortalName) if !exists { - glog.Errorf("Service %q is being removed but doesn't exist", servicePortPortalName) + klog.Errorf("Service %q is being removed but doesn't exist", servicePortPortalName) continue } if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil { - glog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err) + klog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go index df7d334d948e..c7c5691e3b35 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go @@ -26,11 +26,11 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" "github.com/miekg/dns" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/util/ipconfig" "k8s.io/utils/exec" @@ -133,10 +133,10 @@ func tryConnect(service ServicePortPortalName, srcAddr net.Addr, protocol string } endpoint, err := proxier.loadBalancer.NextEndpoint(servicePortName, srcAddr, sessionAffinityReset) if err != nil { - glog.Errorf("Couldn't find an endpoint for %s: %v", service, err) + klog.Errorf("Couldn't find an endpoint for %s: %v", service, err) return nil, err } - glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint) + klog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint) // TODO: This could spin up a new goroutine to make the outbound connection, // and keep accepting inbound traffic. outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout) @@ -144,7 +144,7 @@ func tryConnect(service ServicePortPortalName, srcAddr net.Addr, protocol string if isTooManyFDsError(err) { panic("Dial failed: " + err.Error()) } - glog.Errorf("Dial failed: %v", err) + klog.Errorf("Dial failed: %v", err) sessionAffinityReset = true continue } @@ -173,13 +173,13 @@ func (tcp *tcpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv // Then the service port was just closed so the accept failure is to be expected. return } - glog.Errorf("Accept failed: %v", err) + klog.Errorf("Accept failed: %v", err) continue } - glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr()) + klog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr()) outConn, err := tryConnect(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", proxier) if err != nil { - glog.Errorf("Failed to connect to balancer: %v", err) + klog.Errorf("Failed to connect to balancer: %v", err) inConn.Close() continue } @@ -192,7 +192,7 @@ func (tcp *tcpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv func proxyTCP(in, out *net.TCPConn) { var wg sync.WaitGroup wg.Add(2) - glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v", + klog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v", in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr()) go copyBytes("from backend", in, out, &wg) go copyBytes("to backend", out, in, &wg) @@ -201,14 +201,14 @@ func proxyTCP(in, out *net.TCPConn) { func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) { defer wg.Done() - glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr()) + klog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr()) n, err := io.Copy(dest, src) if err != nil { if !isClosedError(err) { - glog.Errorf("I/O error: %v", err) + klog.Errorf("I/O error: %v", err) } } - glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr()) + klog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr()) dest.Close() src.Close() } @@ -283,7 +283,7 @@ func appendDNSSuffix(msg *dns.Msg, buffer []byte, length int, dnsSuffix string) msg.Question[0].Name = origName if err != nil { - glog.Warningf("Unable to pack DNS packet. Error is: %v", err) + klog.Warningf("Unable to pack DNS packet. Error is: %v", err) return length, err } @@ -310,7 +310,7 @@ func recoverDNSQuestion(origName string, msg *dns.Msg, buffer []byte, length int mbuf, err := msg.PackBuffer(buffer) if err != nil { - glog.Warningf("Unable to pack DNS packet. Error is: %v", err) + klog.Warningf("Unable to pack DNS packet. Error is: %v", err) return length, err } @@ -330,7 +330,7 @@ func processUnpackedDNSQueryPacket( length int, dnsSearch []string) int { if dnsSearch == nil || len(dnsSearch) == 0 { - glog.V(1).Infof("DNS search list is not initialized and is empty.") + klog.V(1).Infof("DNS search list is not initialized and is empty.") return length } @@ -348,13 +348,13 @@ func processUnpackedDNSQueryPacket( state.msg.MsgHdr.Id = msg.MsgHdr.Id if index < 0 || index >= int32(len(dnsSearch)) { - glog.V(1).Infof("Search index %d is out of range.", index) + klog.V(1).Infof("Search index %d is out of range.", index) return length } length, err := appendDNSSuffix(msg, buffer, length, dnsSearch[index]) if err != nil { - glog.Errorf("Append DNS suffix failed: %v", err) + klog.Errorf("Append DNS suffix failed: %v", err) } return length @@ -373,7 +373,7 @@ func processUnpackedDNSResponsePacket( var drop bool var err error if dnsSearch == nil || len(dnsSearch) == 0 { - glog.V(1).Infof("DNS search list is not initialized and is empty.") + klog.V(1).Infof("DNS search list is not initialized and is empty.") return drop, length } @@ -389,19 +389,19 @@ func processUnpackedDNSResponsePacket( drop = true length, err = appendDNSSuffix(state.msg, buffer, length, dnsSearch[index]) if err != nil { - glog.Errorf("Append DNS suffix failed: %v", err) + klog.Errorf("Append DNS suffix failed: %v", err) } _, err = svrConn.Write(buffer[0:length]) if err != nil { if !logTimeout(err) { - glog.Errorf("Write failed: %v", err) + klog.Errorf("Write failed: %v", err) } } } else { length, err = recoverDNSQuestion(state.msg.Question[0].Name, msg, buffer, length) if err != nil { - glog.Errorf("Recover DNS question failed: %v", err) + klog.Errorf("Recover DNS question failed: %v", err) } dnsClients.mu.Lock() @@ -421,7 +421,7 @@ func processDNSQueryPacket( dnsSearch []string) (int, error) { msg := &dns.Msg{} if err := msg.Unpack(buffer[:length]); err != nil { - glog.Warningf("Unable to unpack DNS packet. Error is: %v", err) + klog.Warningf("Unable to unpack DNS packet. Error is: %v", err) return length, err } @@ -432,14 +432,14 @@ func processDNSQueryPacket( // QDCOUNT if len(msg.Question) != 1 { - glog.V(1).Infof("Number of entries in the question section of the DNS packet is: %d", len(msg.Question)) - glog.V(1).Infof("DNS suffix appending does not support more than one question.") + klog.V(1).Infof("Number of entries in the question section of the DNS packet is: %d", len(msg.Question)) + klog.V(1).Infof("DNS suffix appending does not support more than one question.") return length, nil } // ANCOUNT, NSCOUNT, ARCOUNT if len(msg.Answer) != 0 || len(msg.Ns) != 0 || len(msg.Extra) != 0 { - glog.V(1).Infof("DNS packet contains more than question section.") + klog.V(1).Infof("DNS packet contains more than question section.") return length, nil } @@ -448,7 +448,7 @@ func processDNSQueryPacket( if packetRequiresDNSSuffix(dnsQType, dnsQClass) { host, _, err := net.SplitHostPort(cliAddr.String()) if err != nil { - glog.V(1).Infof("Failed to get host from client address: %v", err) + klog.V(1).Infof("Failed to get host from client address: %v", err) host = cliAddr.String() } @@ -468,7 +468,7 @@ func processDNSResponsePacket( var drop bool msg := &dns.Msg{} if err := msg.Unpack(buffer[:length]); err != nil { - glog.Warningf("Unable to unpack DNS packet. Error is: %v", err) + klog.Warningf("Unable to unpack DNS packet. Error is: %v", err) return drop, length, err } @@ -479,7 +479,7 @@ func processDNSResponsePacket( // QDCOUNT if len(msg.Question) != 1 { - glog.V(1).Infof("Number of entries in the response section of the DNS packet is: %d", len(msg.Answer)) + klog.V(1).Infof("Number of entries in the response section of the DNS packet is: %d", len(msg.Answer)) return drop, length, nil } @@ -488,7 +488,7 @@ func processDNSResponsePacket( if packetRequiresDNSSuffix(dnsQType, dnsQClass) { host, _, err := net.SplitHostPort(cliAddr.String()) if err != nil { - glog.V(1).Infof("Failed to get host from client address: %v", err) + klog.V(1).Infof("Failed to get host from client address: %v", err) host = cliAddr.String() } @@ -505,7 +505,7 @@ func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv dnsSearch = []string{"", namespaceServiceDomain, serviceDomain, clusterDomain} execer := exec.New() ipconfigInterface := ipconfig.New(execer) - suffixList, err := ipconfigInterface.GetDnsSuffixSearchList() + suffixList, err := ipconfigInterface.GetDNSSuffixSearchList() if err == nil { for _, suffix := range suffixList { dnsSearch = append(dnsSearch, suffix) @@ -525,11 +525,11 @@ func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv if err != nil { if e, ok := err.(net.Error); ok { if e.Temporary() { - glog.V(1).Infof("ReadFrom had a temporary failure: %v", err) + klog.V(1).Infof("ReadFrom had a temporary failure: %v", err) continue } } - glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err) + klog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err) break } @@ -537,7 +537,7 @@ func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv if isDNSService(service.Port) { n, err = processDNSQueryPacket(myInfo.dnsClients, cliAddr, buffer[:], n, dnsSearch) if err != nil { - glog.Errorf("Process DNS query packet failed: %v", err) + klog.Errorf("Process DNS query packet failed: %v", err) } } @@ -551,14 +551,14 @@ func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv _, err = svrConn.Write(buffer[0:n]) if err != nil { if !logTimeout(err) { - glog.Errorf("Write failed: %v", err) + klog.Errorf("Write failed: %v", err) // TODO: Maybe tear down the goroutine for this client/server pair? } continue } err = svrConn.SetDeadline(time.Now().Add(myInfo.timeout)) if err != nil { - glog.Errorf("SetDeadline failed: %v", err) + klog.Errorf("SetDeadline failed: %v", err) continue } } @@ -572,14 +572,14 @@ func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, dnsClients if !found { // TODO: This could spin up a new goroutine to make the outbound connection, // and keep accepting inbound traffic. - glog.V(3).Infof("New UDP connection from %s", cliAddr) + klog.V(3).Infof("New UDP connection from %s", cliAddr) var err error svrConn, err = tryConnect(service, cliAddr, "udp", proxier) if err != nil { return nil, err } if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil { - glog.Errorf("SetDeadline failed: %v", err) + klog.Errorf("SetDeadline failed: %v", err) return nil, err } activeClients.clients[cliAddr.String()] = svrConn @@ -600,7 +600,7 @@ func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activ n, err := svrConn.Read(buffer[0:]) if err != nil { if !logTimeout(err) { - glog.Errorf("Read failed: %v", err) + klog.Errorf("Read failed: %v", err) } break } @@ -609,20 +609,20 @@ func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activ if isDNSService(service.Port) { drop, n, err = processDNSResponsePacket(svrConn, dnsClients, cliAddr, buffer[:], n, dnsSearch) if err != nil { - glog.Errorf("Process DNS response packet failed: %v", err) + klog.Errorf("Process DNS response packet failed: %v", err) } } if !drop { err = svrConn.SetDeadline(time.Now().Add(timeout)) if err != nil { - glog.Errorf("SetDeadline failed: %v", err) + klog.Errorf("SetDeadline failed: %v", err) break } n, err = udp.WriteTo(buffer[0:n], cliAddr) if err != nil { if !logTimeout(err) { - glog.Errorf("WriteTo failed: %v", err) + klog.Errorf("WriteTo failed: %v", err) } break } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go index d3135a038afa..a712ed60bd12 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go @@ -25,9 +25,9 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/util/slice" ) @@ -82,7 +82,7 @@ func NewLoadBalancerRR() *LoadBalancerRR { } func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) error { - glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort) + klog.V(4).Infof("LoadBalancerRR NewService %q", svcPort) lb.lock.Lock() defer lb.lock.Unlock() lb.newServiceInternal(svcPort, affinityType, ttlSeconds) @@ -97,7 +97,7 @@ func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affi if _, exists := lb.services[svcPort]; !exists { lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)} - glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort) + klog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort) } else if affinityType != "" { lb.services[svcPort].affinity.affinityType = affinityType } @@ -105,7 +105,7 @@ func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affi } func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) { - glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort) + klog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort) lb.lock.Lock() defer lb.lock.Unlock() delete(lb.services, svcPort) @@ -135,7 +135,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne if len(state.endpoints) == 0 { return "", ErrMissingEndpoints } - glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints) + klog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints) sessionAffinityEnabled := isSessionAffinity(&state.affinity) @@ -153,7 +153,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne // Affinity wins. endpoint := sessionAffinity.endpoint sessionAffinity.lastUsed = time.Now() - glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint) + klog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint) return endpoint, nil } } @@ -172,7 +172,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne affinity.lastUsed = time.Now() affinity.endpoint = endpoint affinity.clientIP = ipaddr - glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr]) + klog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr]) } return endpoint, nil @@ -204,7 +204,7 @@ func flattenValidEndpoints(endpoints []hostPortPair) []string { func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) { for _, affinity := range state.affinity.affinityMap { if affinity.endpoint == endpoint { - glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort) + klog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort) delete(state.affinity.affinityMap, affinity.clientIP) } } @@ -227,7 +227,7 @@ func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEn } for mKey, mVal := range allEndpoints { if mVal == 1 { - glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort) + klog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort) removeSessionAffinityByEndpoint(state, svcPort, mKey) } } @@ -263,7 +263,7 @@ func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *v1.Endpoints) { state, exists := lb.services[svcPort] if !exists || state == nil || len(newEndpoints) > 0 { - glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) + klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) lb.updateAffinityMap(svcPort, newEndpoints) // OnEndpointsAdd can be called without NewService being called externally. // To be safe we will call it here. A new service will only be created @@ -297,7 +297,7 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoint } if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) { - glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) + klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) lb.updateAffinityMap(svcPort, newEndpoints) // OnEndpointsUpdate can be called without NewService being called externally. // To be safe we will call it here. A new service will only be created @@ -315,7 +315,7 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoint for portname := range oldPortsToEndpoints { svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname} if _, exists := registeredEndpoints[svcPort]; !exists { - glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort) + klog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort) // Reset but don't delete. state := lb.services[svcPort] state.endpoints = []string{} @@ -333,7 +333,7 @@ func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *v1.Endpoints) { for portname := range portsToEndpoints { svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname} - glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort) + klog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort) // If the service is still around, reset but don't delete. if state, ok := lb.services[svcPort]; ok { state.endpoints = []string{} @@ -367,7 +367,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa } for ip, affinity := range state.affinity.affinityMap { if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds { - glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort) + klog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort) delete(state.affinity.affinityMap, ip) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/BUILD deleted file mode 100644 index 1fb064349934..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/BUILD +++ /dev/null @@ -1,54 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "interfaces.go", - "resources.go", - ], - importpath = "k8s.io/kubernetes/pkg/quota", - deps = [ - "//pkg/apis/core:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", - "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["resources_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/apis/core:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/quota/evaluator/core:all-srcs", - "//pkg/quota/generic:all-srcs", - "//pkg/quota/install:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/resources.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/resources.go deleted file mode 100644 index b261aedef5f0..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/resources.go +++ /dev/null @@ -1,283 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package quota - -import ( - "strings" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/sets" - api "k8s.io/kubernetes/pkg/apis/core" -) - -// Equals returns true if the two lists are equivalent -func Equals(a api.ResourceList, b api.ResourceList) bool { - if len(a) != len(b) { - return false - } - - for key, value1 := range a { - value2, found := b[key] - if !found { - return false - } - if value1.Cmp(value2) != 0 { - return false - } - } - - return true -} - -// V1Equals returns true if the two lists are equivalent -func V1Equals(a v1.ResourceList, b v1.ResourceList) bool { - if len(a) != len(b) { - return false - } - - for key, value1 := range a { - value2, found := b[key] - if !found { - return false - } - if value1.Cmp(value2) != 0 { - return false - } - } - - return true -} - -// LessThanOrEqual returns true if a < b for each key in b -// If false, it returns the keys in a that exceeded b -func LessThanOrEqual(a api.ResourceList, b api.ResourceList) (bool, []api.ResourceName) { - result := true - resourceNames := []api.ResourceName{} - for key, value := range b { - if other, found := a[key]; found { - if other.Cmp(value) > 0 { - result = false - resourceNames = append(resourceNames, key) - } - } - } - return result, resourceNames -} - -// Max returns the result of Max(a, b) for each named resource -func Max(a api.ResourceList, b api.ResourceList) api.ResourceList { - result := api.ResourceList{} - for key, value := range a { - if other, found := b[key]; found { - if value.Cmp(other) <= 0 { - result[key] = *other.Copy() - continue - } - } - result[key] = *value.Copy() - } - for key, value := range b { - if _, found := result[key]; !found { - result[key] = *value.Copy() - } - } - return result -} - -// Add returns the result of a + b for each named resource -func Add(a api.ResourceList, b api.ResourceList) api.ResourceList { - result := api.ResourceList{} - for key, value := range a { - quantity := *value.Copy() - if other, found := b[key]; found { - quantity.Add(other) - } - result[key] = quantity - } - for key, value := range b { - if _, found := result[key]; !found { - quantity := *value.Copy() - result[key] = quantity - } - } - return result -} - -// SubtractWithNonNegativeResult - subtracts and returns result of a - b but -// makes sure we don't return negative values to prevent negative resource usage. -func SubtractWithNonNegativeResult(a api.ResourceList, b api.ResourceList) api.ResourceList { - zero := resource.MustParse("0") - - result := api.ResourceList{} - for key, value := range a { - quantity := *value.Copy() - if other, found := b[key]; found { - quantity.Sub(other) - } - if quantity.Cmp(zero) > 0 { - result[key] = quantity - } else { - result[key] = zero - } - } - - for key := range b { - if _, found := result[key]; !found { - result[key] = zero - } - } - return result -} - -// Subtract returns the result of a - b for each named resource -func Subtract(a api.ResourceList, b api.ResourceList) api.ResourceList { - result := api.ResourceList{} - for key, value := range a { - quantity := *value.Copy() - if other, found := b[key]; found { - quantity.Sub(other) - } - result[key] = quantity - } - for key, value := range b { - if _, found := result[key]; !found { - quantity := *value.Copy() - quantity.Neg() - result[key] = quantity - } - } - return result -} - -// Mask returns a new resource list that only has the values with the specified names -func Mask(resources api.ResourceList, names []api.ResourceName) api.ResourceList { - nameSet := ToSet(names) - result := api.ResourceList{} - for key, value := range resources { - if nameSet.Has(string(key)) { - result[key] = *value.Copy() - } - } - return result -} - -// ResourceNames returns a list of all resource names in the ResourceList -func ResourceNames(resources api.ResourceList) []api.ResourceName { - result := []api.ResourceName{} - for resourceName := range resources { - result = append(result, resourceName) - } - return result -} - -// Contains returns true if the specified item is in the list of items -func Contains(items []api.ResourceName, item api.ResourceName) bool { - return ToSet(items).Has(string(item)) -} - -// ContainsPrefix returns true if the specified item has a prefix that contained in given prefix Set -func ContainsPrefix(prefixSet []string, item api.ResourceName) bool { - for _, prefix := range prefixSet { - if strings.HasPrefix(string(item), prefix) { - return true - } - } - return false -} - -// Intersection returns the intersection of both list of resources -func Intersection(a []api.ResourceName, b []api.ResourceName) []api.ResourceName { - setA := ToSet(a) - setB := ToSet(b) - setC := setA.Intersection(setB) - result := []api.ResourceName{} - for _, resourceName := range setC.List() { - result = append(result, api.ResourceName(resourceName)) - } - return result -} - -// IsZero returns true if each key maps to the quantity value 0 -func IsZero(a api.ResourceList) bool { - zero := resource.MustParse("0") - for _, v := range a { - if v.Cmp(zero) != 0 { - return false - } - } - return true -} - -// IsNegative returns the set of resource names that have a negative value. -func IsNegative(a api.ResourceList) []api.ResourceName { - results := []api.ResourceName{} - zero := resource.MustParse("0") - for k, v := range a { - if v.Cmp(zero) < 0 { - results = append(results, k) - } - } - return results -} - -// ToSet takes a list of resource names and converts to a string set -func ToSet(resourceNames []api.ResourceName) sets.String { - result := sets.NewString() - for _, resourceName := range resourceNames { - result.Insert(string(resourceName)) - } - return result -} - -// CalculateUsage calculates and returns the requested ResourceList usage -func CalculateUsage(namespaceName string, scopes []api.ResourceQuotaScope, hardLimits api.ResourceList, registry Registry, scopeSelector *api.ScopeSelector) (api.ResourceList, error) { - // find the intersection between the hard resources on the quota - // and the resources this controller can track to know what we can - // look to measure updated usage stats for - hardResources := ResourceNames(hardLimits) - potentialResources := []api.ResourceName{} - evaluators := registry.List() - for _, evaluator := range evaluators { - potentialResources = append(potentialResources, evaluator.MatchingResources(hardResources)...) - } - // NOTE: the intersection just removes duplicates since the evaluator match intersects with hard - matchedResources := Intersection(hardResources, potentialResources) - - // sum the observed usage from each evaluator - newUsage := api.ResourceList{} - for _, evaluator := range evaluators { - // only trigger the evaluator if it matches a resource in the quota, otherwise, skip calculating anything - intersection := evaluator.MatchingResources(matchedResources) - if len(intersection) == 0 { - continue - } - - usageStatsOptions := UsageStatsOptions{Namespace: namespaceName, Scopes: scopes, Resources: intersection, ScopeSelector: scopeSelector} - stats, err := evaluator.UsageStats(usageStatsOptions) - if err != nil { - return nil, err - } - newUsage = Add(newUsage, stats.Used) - } - - // mask the observed usage to only the set of resources tracked by this quota - // merge our observed usage with the quota usage status - // if the new usage is different than the last usage, we will need to do an update - newUsage = Mask(newUsage, matchedResources) - return newUsage, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/v1/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/v1/BUILD new file mode 100644 index 000000000000..7b3cb195b78f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/v1/BUILD @@ -0,0 +1,53 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "interfaces.go", + "resources.go", + ], + importpath = "k8s.io/kubernetes/pkg/quota/v1", + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["resources_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/quota/v1/evaluator/core:all-srcs", + "//pkg/quota/v1/generic:all-srcs", + "//pkg/quota/v1/install:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/v1/OWNERS similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/OWNERS rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/v1/OWNERS diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/interfaces.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/v1/interfaces.go similarity index 80% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/interfaces.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/v1/interfaces.go index e6723b8aef78..d71b66418309 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/interfaces.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/v1/interfaces.go @@ -17,11 +17,11 @@ limitations under the License. package quota import ( + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/apis/core" ) // UsageStatsOptions is an options structs that describes how stats should be calculated @@ -29,37 +29,37 @@ type UsageStatsOptions struct { // Namespace where stats should be calculate Namespace string // Scopes that must match counted objects - Scopes []api.ResourceQuotaScope + Scopes []corev1.ResourceQuotaScope // Resources are the set of resources to include in the measurement - Resources []api.ResourceName - ScopeSelector *api.ScopeSelector + Resources []corev1.ResourceName + ScopeSelector *corev1.ScopeSelector } // UsageStats is result of measuring observed resource use in the system type UsageStats struct { // Used maps resource to quantity used - Used api.ResourceList + Used corev1.ResourceList } // Evaluator knows how to evaluate quota usage for a particular group resource type Evaluator interface { // Constraints ensures that each required resource is present on item - Constraints(required []api.ResourceName, item runtime.Object) error + Constraints(required []corev1.ResourceName, item runtime.Object) error // GroupResource returns the groupResource that this object knows how to evaluate GroupResource() schema.GroupResource // Handles determines if quota could be impacted by the specified attribute. // If true, admission control must perform quota processing for the operation, otherwise it is safe to ignore quota. Handles(operation admission.Attributes) bool // Matches returns true if the specified quota matches the input item - Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) + Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) // MatchingScopes takes the input specified list of scopes and input object and returns the set of scopes that matches input object. - MatchingScopes(item runtime.Object, scopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) + MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope - UncoveredQuotaScopes(limitedScopes []api.ScopedResourceSelectorRequirement, matchedQuotaScopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) + UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) // MatchingResources takes the input specified list of resources and returns the set of resources evaluator matches. - MatchingResources(input []api.ResourceName) []api.ResourceName + MatchingResources(input []corev1.ResourceName) []corev1.ResourceName // Usage returns the resource usage for the specified object - Usage(item runtime.Object) (api.ResourceList, error) + Usage(item runtime.Object) (corev1.ResourceList, error) // UsageStats calculates latest observed usage stats for all objects UsageStats(options UsageStatsOptions) (UsageStats, error) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/v1/resources.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/v1/resources.go new file mode 100644 index 000000000000..b6aa3210d4b8 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/quota/v1/resources.go @@ -0,0 +1,282 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package quota + +import ( + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Equals returns true if the two lists are equivalent +func Equals(a corev1.ResourceList, b corev1.ResourceList) bool { + if len(a) != len(b) { + return false + } + + for key, value1 := range a { + value2, found := b[key] + if !found { + return false + } + if value1.Cmp(value2) != 0 { + return false + } + } + + return true +} + +// V1Equals returns true if the two lists are equivalent +func V1Equals(a corev1.ResourceList, b corev1.ResourceList) bool { + if len(a) != len(b) { + return false + } + + for key, value1 := range a { + value2, found := b[key] + if !found { + return false + } + if value1.Cmp(value2) != 0 { + return false + } + } + + return true +} + +// LessThanOrEqual returns true if a < b for each key in b +// If false, it returns the keys in a that exceeded b +func LessThanOrEqual(a corev1.ResourceList, b corev1.ResourceList) (bool, []corev1.ResourceName) { + result := true + resourceNames := []corev1.ResourceName{} + for key, value := range b { + if other, found := a[key]; found { + if other.Cmp(value) > 0 { + result = false + resourceNames = append(resourceNames, key) + } + } + } + return result, resourceNames +} + +// Max returns the result of Max(a, b) for each named resource +func Max(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + for key, value := range a { + if other, found := b[key]; found { + if value.Cmp(other) <= 0 { + result[key] = *other.Copy() + continue + } + } + result[key] = *value.Copy() + } + for key, value := range b { + if _, found := result[key]; !found { + result[key] = *value.Copy() + } + } + return result +} + +// Add returns the result of a + b for each named resource +func Add(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + for key, value := range a { + quantity := *value.Copy() + if other, found := b[key]; found { + quantity.Add(other) + } + result[key] = quantity + } + for key, value := range b { + if _, found := result[key]; !found { + quantity := *value.Copy() + result[key] = quantity + } + } + return result +} + +// SubtractWithNonNegativeResult - subtracts and returns result of a - b but +// makes sure we don't return negative values to prevent negative resource usage. +func SubtractWithNonNegativeResult(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + zero := resource.MustParse("0") + + result := corev1.ResourceList{} + for key, value := range a { + quantity := *value.Copy() + if other, found := b[key]; found { + quantity.Sub(other) + } + if quantity.Cmp(zero) > 0 { + result[key] = quantity + } else { + result[key] = zero + } + } + + for key := range b { + if _, found := result[key]; !found { + result[key] = zero + } + } + return result +} + +// Subtract returns the result of a - b for each named resource +func Subtract(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + for key, value := range a { + quantity := *value.Copy() + if other, found := b[key]; found { + quantity.Sub(other) + } + result[key] = quantity + } + for key, value := range b { + if _, found := result[key]; !found { + quantity := *value.Copy() + quantity.Neg() + result[key] = quantity + } + } + return result +} + +// Mask returns a new resource list that only has the values with the specified names +func Mask(resources corev1.ResourceList, names []corev1.ResourceName) corev1.ResourceList { + nameSet := ToSet(names) + result := corev1.ResourceList{} + for key, value := range resources { + if nameSet.Has(string(key)) { + result[key] = *value.Copy() + } + } + return result +} + +// ResourceNames returns a list of all resource names in the ResourceList +func ResourceNames(resources corev1.ResourceList) []corev1.ResourceName { + result := []corev1.ResourceName{} + for resourceName := range resources { + result = append(result, resourceName) + } + return result +} + +// Contains returns true if the specified item is in the list of items +func Contains(items []corev1.ResourceName, item corev1.ResourceName) bool { + return ToSet(items).Has(string(item)) +} + +// ContainsPrefix returns true if the specified item has a prefix that contained in given prefix Set +func ContainsPrefix(prefixSet []string, item corev1.ResourceName) bool { + for _, prefix := range prefixSet { + if strings.HasPrefix(string(item), prefix) { + return true + } + } + return false +} + +// Intersection returns the intersection of both list of resources +func Intersection(a []corev1.ResourceName, b []corev1.ResourceName) []corev1.ResourceName { + setA := ToSet(a) + setB := ToSet(b) + setC := setA.Intersection(setB) + result := []corev1.ResourceName{} + for _, resourceName := range setC.List() { + result = append(result, corev1.ResourceName(resourceName)) + } + return result +} + +// IsZero returns true if each key maps to the quantity value 0 +func IsZero(a corev1.ResourceList) bool { + zero := resource.MustParse("0") + for _, v := range a { + if v.Cmp(zero) != 0 { + return false + } + } + return true +} + +// IsNegative returns the set of resource names that have a negative value. +func IsNegative(a corev1.ResourceList) []corev1.ResourceName { + results := []corev1.ResourceName{} + zero := resource.MustParse("0") + for k, v := range a { + if v.Cmp(zero) < 0 { + results = append(results, k) + } + } + return results +} + +// ToSet takes a list of resource names and converts to a string set +func ToSet(resourceNames []corev1.ResourceName) sets.String { + result := sets.NewString() + for _, resourceName := range resourceNames { + result.Insert(string(resourceName)) + } + return result +} + +// CalculateUsage calculates and returns the requested ResourceList usage +func CalculateUsage(namespaceName string, scopes []corev1.ResourceQuotaScope, hardLimits corev1.ResourceList, registry Registry, scopeSelector *corev1.ScopeSelector) (corev1.ResourceList, error) { + // find the intersection between the hard resources on the quota + // and the resources this controller can track to know what we can + // look to measure updated usage stats for + hardResources := ResourceNames(hardLimits) + potentialResources := []corev1.ResourceName{} + evaluators := registry.List() + for _, evaluator := range evaluators { + potentialResources = append(potentialResources, evaluator.MatchingResources(hardResources)...) + } + // NOTE: the intersection just removes duplicates since the evaluator match intersects with hard + matchedResources := Intersection(hardResources, potentialResources) + + // sum the observed usage from each evaluator + newUsage := corev1.ResourceList{} + for _, evaluator := range evaluators { + // only trigger the evaluator if it matches a resource in the quota, otherwise, skip calculating anything + intersection := evaluator.MatchingResources(matchedResources) + if len(intersection) == 0 { + continue + } + + usageStatsOptions := UsageStatsOptions{Namespace: namespaceName, Scopes: scopes, Resources: intersection, ScopeSelector: scopeSelector} + stats, err := evaluator.UsageStats(usageStatsOptions) + if err != nil { + return nil, err + } + newUsage = Add(newUsage, stats.Used) + } + + // mask the observed usage to only the set of resources tracked by this quota + // merge our observed usage with the quota usage status + // if the new usage is different than the last usage, we will need to do an update + newUsage = Mask(newUsage, matchedResources) + return newUsage, nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/BUILD deleted file mode 100644 index ad664075455b..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/BUILD +++ /dev/null @@ -1,59 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = [ - "policy_compact_test.go", - "policy_comparator_test.go", - "rule_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//pkg/apis/rbac/v1:go_default_library", - "//staging/src/k8s.io/api/rbac/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = [ - "internal_version_adapter.go", - "policy_compact.go", - "policy_comparator.go", - "rule.go", - ], - importpath = "k8s.io/kubernetes/pkg/registry/rbac/validation", - deps = [ - "//pkg/apis/rbac:go_default_library", - "//pkg/apis/rbac/v1:go_default_library", - "//staging/src/k8s.io/api/rbac/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/internal_version_adapter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/internal_version_adapter.go deleted file mode 100644 index bfb57242df97..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/internal_version_adapter.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "context" - - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/kubernetes/pkg/apis/rbac" - rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" -) - -func ConfirmNoEscalationInternal(ctx context.Context, ruleResolver AuthorizationRuleResolver, inRules []rbac.PolicyRule) error { - rules := []rbacv1.PolicyRule{} - for i := range inRules { - v1Rule := rbacv1.PolicyRule{} - err := rbacv1helpers.Convert_rbac_PolicyRule_To_v1_PolicyRule(&inRules[i], &v1Rule, nil) - if err != nil { - return err - } - rules = append(rules, v1Rule) - } - - return ConfirmNoEscalation(ctx, ruleResolver, rules) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/policy_compact.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/policy_compact.go deleted file mode 100644 index 182657b1ca9e..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/policy_compact.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "reflect" - - rbacv1 "k8s.io/api/rbac/v1" -) - -type simpleResource struct { - Group string - Resource string - ResourceNameExist bool - ResourceName string -} - -// CompactRules combines rules that contain a single APIGroup/Resource, differ only by verb, and contain no other attributes. -// this is a fast check, and works well with the decomposed "missing rules" list from a Covers check. -func CompactRules(rules []rbacv1.PolicyRule) ([]rbacv1.PolicyRule, error) { - compacted := make([]rbacv1.PolicyRule, 0, len(rules)) - - simpleRules := map[simpleResource]*rbacv1.PolicyRule{} - for _, rule := range rules { - if resource, isSimple := isSimpleResourceRule(&rule); isSimple { - if existingRule, ok := simpleRules[resource]; ok { - // Add the new verbs to the existing simple resource rule - if existingRule.Verbs == nil { - existingRule.Verbs = []string{} - } - existingRule.Verbs = append(existingRule.Verbs, rule.Verbs...) - } else { - // Copy the rule to accumulate matching simple resource rules into - simpleRules[resource] = rule.DeepCopy() - } - } else { - compacted = append(compacted, rule) - } - } - - // Once we've consolidated the simple resource rules, add them to the compacted list - for _, simpleRule := range simpleRules { - compacted = append(compacted, *simpleRule) - } - - return compacted, nil -} - -// isSimpleResourceRule returns true if the given rule contains verbs, a single resource, a single API group, at most one Resource Name, and no other values -func isSimpleResourceRule(rule *rbacv1.PolicyRule) (simpleResource, bool) { - resource := simpleResource{} - - // If we have "complex" rule attributes, return early without allocations or expensive comparisons - if len(rule.ResourceNames) > 1 || len(rule.NonResourceURLs) > 0 { - return resource, false - } - // If we have multiple api groups or resources, return early - if len(rule.APIGroups) != 1 || len(rule.Resources) != 1 { - return resource, false - } - - // Test if this rule only contains APIGroups/Resources/Verbs/ResourceNames - simpleRule := &rbacv1.PolicyRule{APIGroups: rule.APIGroups, Resources: rule.Resources, Verbs: rule.Verbs, ResourceNames: rule.ResourceNames} - if !reflect.DeepEqual(simpleRule, rule) { - return resource, false - } - - if len(rule.ResourceNames) == 0 { - resource = simpleResource{Group: rule.APIGroups[0], Resource: rule.Resources[0], ResourceNameExist: false} - } else { - resource = simpleResource{Group: rule.APIGroups[0], Resource: rule.Resources[0], ResourceNameExist: true, ResourceName: rule.ResourceNames[0]} - } - - return resource, true -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/policy_comparator.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/policy_comparator.go deleted file mode 100644 index 7a0268b5e9ee..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/policy_comparator.go +++ /dev/null @@ -1,173 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "strings" - - rbacv1 "k8s.io/api/rbac/v1" -) - -// Covers determines whether or not the ownerRules cover the servantRules in terms of allowed actions. -// It returns whether or not the ownerRules cover and a list of the rules that the ownerRules do not cover. -func Covers(ownerRules, servantRules []rbacv1.PolicyRule) (bool, []rbacv1.PolicyRule) { - // 1. Break every servantRule into individual rule tuples: group, verb, resource, resourceName - // 2. Compare the mini-rules against each owner rule. Because the breakdown is down to the most atomic level, we're guaranteed that each mini-servant rule will be either fully covered or not covered by a single owner rule - // 3. Any left over mini-rules means that we are not covered and we have a nice list of them. - // TODO: it might be nice to collapse the list down into something more human readable - - subrules := []rbacv1.PolicyRule{} - for _, servantRule := range servantRules { - subrules = append(subrules, BreakdownRule(servantRule)...) - } - - uncoveredRules := []rbacv1.PolicyRule{} - for _, subrule := range subrules { - covered := false - for _, ownerRule := range ownerRules { - if ruleCovers(ownerRule, subrule) { - covered = true - break - } - } - - if !covered { - uncoveredRules = append(uncoveredRules, subrule) - } - } - - return (len(uncoveredRules) == 0), uncoveredRules -} - -// BreadownRule takes a rule and builds an equivalent list of rules that each have at most one verb, one -// resource, and one resource name -func BreakdownRule(rule rbacv1.PolicyRule) []rbacv1.PolicyRule { - subrules := []rbacv1.PolicyRule{} - for _, group := range rule.APIGroups { - for _, resource := range rule.Resources { - for _, verb := range rule.Verbs { - if len(rule.ResourceNames) > 0 { - for _, resourceName := range rule.ResourceNames { - subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}, ResourceNames: []string{resourceName}}) - } - - } else { - subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}}) - } - - } - } - } - - // Non-resource URLs are unique because they only combine with verbs. - for _, nonResourceURL := range rule.NonResourceURLs { - for _, verb := range rule.Verbs { - subrules = append(subrules, rbacv1.PolicyRule{NonResourceURLs: []string{nonResourceURL}, Verbs: []string{verb}}) - } - } - - return subrules -} - -func has(set []string, ele string) bool { - for _, s := range set { - if s == ele { - return true - } - } - return false -} - -func hasAll(set, contains []string) bool { - owning := make(map[string]struct{}, len(set)) - for _, ele := range set { - owning[ele] = struct{}{} - } - for _, ele := range contains { - if _, ok := owning[ele]; !ok { - return false - } - } - return true -} - -func resourceCoversAll(setResources, coversResources []string) bool { - // if we have a star or an exact match on all resources, then we match - if has(setResources, rbacv1.ResourceAll) || hasAll(setResources, coversResources) { - return true - } - - for _, path := range coversResources { - // if we have an exact match, then we match. - if has(setResources, path) { - continue - } - // if we're not a subresource, then we definitely don't match. fail. - if !strings.Contains(path, "/") { - return false - } - tokens := strings.SplitN(path, "/", 2) - resourceToCheck := "*/" + tokens[1] - if !has(setResources, resourceToCheck) { - return false - } - } - - return true -} - -func nonResourceURLsCoversAll(set, covers []string) bool { - for _, path := range covers { - covered := false - for _, owner := range set { - if nonResourceURLCovers(owner, path) { - covered = true - break - } - } - if !covered { - return false - } - } - return true -} - -func nonResourceURLCovers(ownerPath, subPath string) bool { - if ownerPath == subPath { - return true - } - return strings.HasSuffix(ownerPath, "*") && strings.HasPrefix(subPath, strings.TrimRight(ownerPath, "*")) -} - -// ruleCovers determines whether the ownerRule (which may have multiple verbs, resources, and resourceNames) covers -// the subrule (which may only contain at most one verb, resource, and resourceName) -func ruleCovers(ownerRule, subRule rbacv1.PolicyRule) bool { - verbMatches := has(ownerRule.Verbs, rbacv1.VerbAll) || hasAll(ownerRule.Verbs, subRule.Verbs) - groupMatches := has(ownerRule.APIGroups, rbacv1.APIGroupAll) || hasAll(ownerRule.APIGroups, subRule.APIGroups) - resourceMatches := resourceCoversAll(ownerRule.Resources, subRule.Resources) - nonResourceURLMatches := nonResourceURLsCoversAll(ownerRule.NonResourceURLs, subRule.NonResourceURLs) - - resourceNameMatches := false - - if len(subRule.ResourceNames) == 0 { - resourceNameMatches = (len(ownerRule.ResourceNames) == 0) - } else { - resourceNameMatches = (len(ownerRule.ResourceNames) == 0) || hasAll(ownerRule.ResourceNames, subRule.ResourceNames) - } - - return verbMatches && groupMatches && resourceMatches && resourceNameMatches && nonResourceURLMatches -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/rule.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/rule.go deleted file mode 100644 index 833ffc1e6c1e..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/rule.go +++ /dev/null @@ -1,357 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "context" - "errors" - "fmt" - "strings" - - "github.com/golang/glog" - - rbacv1 "k8s.io/api/rbac/v1" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apiserver/pkg/authentication/serviceaccount" - "k8s.io/apiserver/pkg/authentication/user" - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" -) - -type AuthorizationRuleResolver interface { - // GetRoleReferenceRules attempts to resolve the role reference of a RoleBinding or ClusterRoleBinding. The passed namespace should be the namepsace - // of the role binding, the empty string if a cluster role binding. - GetRoleReferenceRules(roleRef rbacv1.RoleRef, namespace string) ([]rbacv1.PolicyRule, error) - - // RulesFor returns the list of rules that apply to a given user in a given namespace and error. If an error is returned, the slice of - // PolicyRules may not be complete, but it contains all retrievable rules. This is done because policy rules are purely additive and policy determinations - // can be made on the basis of those rules that are found. - RulesFor(user user.Info, namespace string) ([]rbacv1.PolicyRule, error) - - // VisitRulesFor invokes visitor() with each rule that applies to a given user in a given namespace, and each error encountered resolving those rules. - // If visitor() returns false, visiting is short-circuited. - VisitRulesFor(user user.Info, namespace string, visitor func(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool) -} - -// ConfirmNoEscalation determines if the roles for a given user in a given namespace encompass the provided role. -func ConfirmNoEscalation(ctx context.Context, ruleResolver AuthorizationRuleResolver, rules []rbacv1.PolicyRule) error { - ruleResolutionErrors := []error{} - - user, ok := genericapirequest.UserFrom(ctx) - if !ok { - return fmt.Errorf("no user on context") - } - namespace, _ := genericapirequest.NamespaceFrom(ctx) - - ownerRules, err := ruleResolver.RulesFor(user, namespace) - if err != nil { - // As per AuthorizationRuleResolver contract, this may return a non fatal error with an incomplete list of policies. Log the error and continue. - glog.V(1).Infof("non-fatal error getting local rules for %v: %v", user, err) - ruleResolutionErrors = append(ruleResolutionErrors, err) - } - - ownerRightsCover, missingRights := Covers(ownerRules, rules) - if !ownerRightsCover { - compactMissingRights := missingRights - if compact, err := CompactRules(missingRights); err == nil { - compactMissingRights = compact - } - - missingDescriptions := sets.NewString() - for _, missing := range compactMissingRights { - missingDescriptions.Insert(rbacv1helpers.CompactString(missing)) - } - - msg := fmt.Sprintf("user %q (groups=%q) is attempting to grant RBAC permissions not currently held:\n%s", user.GetName(), user.GetGroups(), strings.Join(missingDescriptions.List(), "\n")) - if len(ruleResolutionErrors) > 0 { - msg = msg + fmt.Sprintf("; resolution errors: %v", ruleResolutionErrors) - } - - return errors.New(msg) - } - return nil -} - -type DefaultRuleResolver struct { - roleGetter RoleGetter - roleBindingLister RoleBindingLister - clusterRoleGetter ClusterRoleGetter - clusterRoleBindingLister ClusterRoleBindingLister -} - -func NewDefaultRuleResolver(roleGetter RoleGetter, roleBindingLister RoleBindingLister, clusterRoleGetter ClusterRoleGetter, clusterRoleBindingLister ClusterRoleBindingLister) *DefaultRuleResolver { - return &DefaultRuleResolver{roleGetter, roleBindingLister, clusterRoleGetter, clusterRoleBindingLister} -} - -type RoleGetter interface { - GetRole(namespace, name string) (*rbacv1.Role, error) -} - -type RoleBindingLister interface { - ListRoleBindings(namespace string) ([]*rbacv1.RoleBinding, error) -} - -type ClusterRoleGetter interface { - GetClusterRole(name string) (*rbacv1.ClusterRole, error) -} - -type ClusterRoleBindingLister interface { - ListClusterRoleBindings() ([]*rbacv1.ClusterRoleBinding, error) -} - -func (r *DefaultRuleResolver) RulesFor(user user.Info, namespace string) ([]rbacv1.PolicyRule, error) { - visitor := &ruleAccumulator{} - r.VisitRulesFor(user, namespace, visitor.visit) - return visitor.rules, utilerrors.NewAggregate(visitor.errors) -} - -type ruleAccumulator struct { - rules []rbacv1.PolicyRule - errors []error -} - -func (r *ruleAccumulator) visit(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool { - if rule != nil { - r.rules = append(r.rules, *rule) - } - if err != nil { - r.errors = append(r.errors, err) - } - return true -} - -func describeSubject(s *rbacv1.Subject, bindingNamespace string) string { - switch s.Kind { - case rbacv1.ServiceAccountKind: - if len(s.Namespace) > 0 { - return fmt.Sprintf("%s %q", s.Kind, s.Name+"/"+s.Namespace) - } - return fmt.Sprintf("%s %q", s.Kind, s.Name+"/"+bindingNamespace) - default: - return fmt.Sprintf("%s %q", s.Kind, s.Name) - } -} - -type clusterRoleBindingDescriber struct { - binding *rbacv1.ClusterRoleBinding - subject *rbacv1.Subject -} - -func (d *clusterRoleBindingDescriber) String() string { - return fmt.Sprintf("ClusterRoleBinding %q of %s %q to %s", - d.binding.Name, - d.binding.RoleRef.Kind, - d.binding.RoleRef.Name, - describeSubject(d.subject, ""), - ) -} - -type roleBindingDescriber struct { - binding *rbacv1.RoleBinding - subject *rbacv1.Subject -} - -func (d *roleBindingDescriber) String() string { - return fmt.Sprintf("RoleBinding %q of %s %q to %s", - d.binding.Name+"/"+d.binding.Namespace, - d.binding.RoleRef.Kind, - d.binding.RoleRef.Name, - describeSubject(d.subject, d.binding.Namespace), - ) -} - -func (r *DefaultRuleResolver) VisitRulesFor(user user.Info, namespace string, visitor func(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool) { - if clusterRoleBindings, err := r.clusterRoleBindingLister.ListClusterRoleBindings(); err != nil { - if !visitor(nil, nil, err) { - return - } - } else { - sourceDescriber := &clusterRoleBindingDescriber{} - for _, clusterRoleBinding := range clusterRoleBindings { - subjectIndex, applies := appliesTo(user, clusterRoleBinding.Subjects, "") - if !applies { - continue - } - rules, err := r.GetRoleReferenceRules(clusterRoleBinding.RoleRef, "") - if err != nil { - if !visitor(nil, nil, err) { - return - } - continue - } - sourceDescriber.binding = clusterRoleBinding - sourceDescriber.subject = &clusterRoleBinding.Subjects[subjectIndex] - for i := range rules { - if !visitor(sourceDescriber, &rules[i], nil) { - return - } - } - } - } - - if len(namespace) > 0 { - if roleBindings, err := r.roleBindingLister.ListRoleBindings(namespace); err != nil { - if !visitor(nil, nil, err) { - return - } - } else { - sourceDescriber := &roleBindingDescriber{} - for _, roleBinding := range roleBindings { - subjectIndex, applies := appliesTo(user, roleBinding.Subjects, namespace) - if !applies { - continue - } - rules, err := r.GetRoleReferenceRules(roleBinding.RoleRef, namespace) - if err != nil { - if !visitor(nil, nil, err) { - return - } - continue - } - sourceDescriber.binding = roleBinding - sourceDescriber.subject = &roleBinding.Subjects[subjectIndex] - for i := range rules { - if !visitor(sourceDescriber, &rules[i], nil) { - return - } - } - } - } - } -} - -// GetRoleReferenceRules attempts to resolve the RoleBinding or ClusterRoleBinding. -func (r *DefaultRuleResolver) GetRoleReferenceRules(roleRef rbacv1.RoleRef, bindingNamespace string) ([]rbacv1.PolicyRule, error) { - switch roleRef.Kind { - case "Role": - role, err := r.roleGetter.GetRole(bindingNamespace, roleRef.Name) - if err != nil { - return nil, err - } - return role.Rules, nil - - case "ClusterRole": - clusterRole, err := r.clusterRoleGetter.GetClusterRole(roleRef.Name) - if err != nil { - return nil, err - } - return clusterRole.Rules, nil - - default: - return nil, fmt.Errorf("unsupported role reference kind: %q", roleRef.Kind) - } -} - -// appliesTo returns whether any of the bindingSubjects applies to the specified subject, -// and if true, the index of the first subject that applies -func appliesTo(user user.Info, bindingSubjects []rbacv1.Subject, namespace string) (int, bool) { - for i, bindingSubject := range bindingSubjects { - if appliesToUser(user, bindingSubject, namespace) { - return i, true - } - } - return 0, false -} - -func appliesToUser(user user.Info, subject rbacv1.Subject, namespace string) bool { - switch subject.Kind { - case rbacv1.UserKind: - return user.GetName() == subject.Name - - case rbacv1.GroupKind: - return has(user.GetGroups(), subject.Name) - - case rbacv1.ServiceAccountKind: - // default the namespace to namespace we're working in if its available. This allows rolebindings that reference - // SAs in th local namespace to avoid having to qualify them. - saNamespace := namespace - if len(subject.Namespace) > 0 { - saNamespace = subject.Namespace - } - if len(saNamespace) == 0 { - return false - } - return serviceaccount.MakeUsername(saNamespace, subject.Name) == user.GetName() - default: - return false - } -} - -// NewTestRuleResolver returns a rule resolver from lists of role objects. -func NewTestRuleResolver(roles []*rbacv1.Role, roleBindings []*rbacv1.RoleBinding, clusterRoles []*rbacv1.ClusterRole, clusterRoleBindings []*rbacv1.ClusterRoleBinding) (AuthorizationRuleResolver, *StaticRoles) { - r := StaticRoles{ - roles: roles, - roleBindings: roleBindings, - clusterRoles: clusterRoles, - clusterRoleBindings: clusterRoleBindings, - } - return newMockRuleResolver(&r), &r -} - -func newMockRuleResolver(r *StaticRoles) AuthorizationRuleResolver { - return NewDefaultRuleResolver(r, r, r, r) -} - -// StaticRoles is a rule resolver that resolves from lists of role objects. -type StaticRoles struct { - roles []*rbacv1.Role - roleBindings []*rbacv1.RoleBinding - clusterRoles []*rbacv1.ClusterRole - clusterRoleBindings []*rbacv1.ClusterRoleBinding -} - -func (r *StaticRoles) GetRole(namespace, name string) (*rbacv1.Role, error) { - if len(namespace) == 0 { - return nil, errors.New("must provide namespace when getting role") - } - for _, role := range r.roles { - if role.Namespace == namespace && role.Name == name { - return role, nil - } - } - return nil, errors.New("role not found") -} - -func (r *StaticRoles) GetClusterRole(name string) (*rbacv1.ClusterRole, error) { - for _, clusterRole := range r.clusterRoles { - if clusterRole.Name == name { - return clusterRole, nil - } - } - return nil, errors.New("clusterrole not found") -} - -func (r *StaticRoles) ListRoleBindings(namespace string) ([]*rbacv1.RoleBinding, error) { - if len(namespace) == 0 { - return nil, errors.New("must provide namespace when listing role bindings") - } - - roleBindingList := []*rbacv1.RoleBinding{} - for _, roleBinding := range r.roleBindings { - if roleBinding.Namespace != namespace { - continue - } - // TODO(ericchiang): need to implement label selectors? - roleBindingList = append(roleBindingList, roleBinding) - } - return roleBindingList, nil -} - -func (r *StaticRoles) ListClusterRoleBindings() ([]*rbacv1.ClusterRoleBinding, error) { - return r.clusterRoleBindings, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/BUILD deleted file mode 100644 index db13be0d1ac9..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/BUILD +++ /dev/null @@ -1,90 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["scheduler_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/api/legacyscheme:go_default_library", - "//pkg/controller/volume/persistentvolume:go_default_library", - "//pkg/scheduler/algorithm:go_default_library", - "//pkg/scheduler/algorithm/predicates:go_default_library", - "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/cache:go_default_library", - "//pkg/scheduler/core:go_default_library", - "//pkg/scheduler/testing:go_default_library", - "//pkg/scheduler/volumebinder:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//staging/src/k8s.io/client-go/tools/record:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = [ - "scheduler.go", - "testutil.go", - ], - importpath = "k8s.io/kubernetes/pkg/scheduler", - deps = [ - "//pkg/features:go_default_library", - "//pkg/scheduler/algorithm:go_default_library", - "//pkg/scheduler/algorithm/predicates:go_default_library", - "//pkg/scheduler/api:go_default_library", - "//pkg/scheduler/cache:go_default_library", - "//pkg/scheduler/core:go_default_library", - "//pkg/scheduler/core/equivalence:go_default_library", - "//pkg/scheduler/metrics:go_default_library", - "//pkg/scheduler/util:go_default_library", - "//pkg/scheduler/volumebinder:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", - "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/scheduler/algorithm:all-srcs", - "//pkg/scheduler/algorithmprovider:all-srcs", - "//pkg/scheduler/api:all-srcs", - "//pkg/scheduler/apis/config:all-srcs", - "//pkg/scheduler/cache:all-srcs", - "//pkg/scheduler/core:all-srcs", - "//pkg/scheduler/factory:all-srcs", - "//pkg/scheduler/metrics:all-srcs", - "//pkg/scheduler/testing:all-srcs", - "//pkg/scheduler/util:all-srcs", - "//pkg/scheduler/volumebinder:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/OWNERS deleted file mode 100644 index fe768eadb546..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -approvers: -- sig-scheduling-maintainers -reviewers: -- sig-scheduling -labels: -- sig/scheduling diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD index 254199dd6ff3..0b19967bafa4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD @@ -12,15 +12,15 @@ go_library( "doc.go", "scheduler_interface.go", "types.go", - "well_known_labels.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm", deps = [ - "//pkg/apis/core:go_default_library", "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/BUILD index e4de1bb63f98..58ee7b487cc5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/BUILD @@ -24,6 +24,7 @@ go_library( "//pkg/kubelet/apis:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", + "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/util:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", @@ -40,7 +41,7 @@ go_library( "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -48,6 +49,7 @@ go_test( name = "go_default_test", srcs = [ "csi_volume_predicate_test.go", + "main_test.go", "max_attachable_volume_predicate_test.go", "metadata_test.go", "predicates_test.go", @@ -59,6 +61,7 @@ go_test( "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/scheduler/algorithm:go_default_library", + "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/testing:go_default_library", "//pkg/volume/util:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go index 8e155ea2f18a..ff2215eb28da 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go @@ -19,9 +19,9 @@ package predicates import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/algorithm" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" @@ -126,26 +126,26 @@ func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes( pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) if err != nil { - glog.V(4).Infof("Unable to look up PVC info for %s/%s", namespace, pvcName) + klog.V(4).Infof("Unable to look up PVC info for %s/%s", namespace, pvcName) continue } pvName := pvc.Spec.VolumeName // TODO - the actual handling of unbound PVCs will be fixed by late binding design. if pvName == "" { - glog.V(4).Infof("Persistent volume had no name for claim %s/%s", namespace, pvcName) + klog.V(4).Infof("Persistent volume had no name for claim %s/%s", namespace, pvcName) continue } pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) if err != nil { - glog.V(4).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName) + klog.V(4).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName) continue } csiSource := pv.Spec.PersistentVolumeSource.CSI if csiSource == nil { - glog.V(4).Infof("Not considering non-CSI volume %s/%s", namespace, pvcName) + klog.V(4).Infof("Not considering non-CSI volume %s/%s", namespace, pvcName) continue } driverName := csiSource.Driver diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata.go index f3dd8c895d18..9284cda23818 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/metadata.go @@ -17,10 +17,11 @@ limitations under the License. package predicates import ( + "context" "fmt" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -72,10 +73,10 @@ type predicateMetadata struct { topologyPairsAntiAffinityPodsMap *topologyPairsMaps // A map of topology pairs to a list of Pods that can potentially match - // the affinity rules of the "pod" and its inverse. + // the affinity terms of the "pod" and its inverse. topologyPairsPotentialAffinityPods *topologyPairsMaps // A map of topology pairs to a list of Pods that can potentially match - // the anti-affinity rules of the "pod" and its inverse. + // the anti-affinity terms of the "pod" and its inverse. topologyPairsPotentialAntiAffinityPods *topologyPairsMaps serviceAffinityInUse bool serviceAffinityMatchingPodList []*v1.Pod @@ -130,26 +131,29 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf if pod == nil { return nil } - topologyPairsMaps, err := getMatchingTopologyPairs(pod, nodeNameToInfoMap) + // existingPodAntiAffinityMap will be used later for efficient check on existing pods' anti-affinity + existingPodAntiAffinityMap, err := getTPMapMatchingExistingAntiAffinity(pod, nodeNameToInfoMap) if err != nil { return nil } - topologyPairsAffinityPodsMaps, topologyPairsAntiAffinityPodsMaps, err := getPodsMatchingAffinity(pod, nodeNameToInfoMap) + // incomingPodAffinityMap will be used later for efficient check on incoming pod's affinity + // incomingPodAntiAffinityMap will be used later for efficient check on incoming pod's anti-affinity + incomingPodAffinityMap, incomingPodAntiAffinityMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(pod, nodeNameToInfoMap) if err != nil { - glog.Errorf("[predicate meta data generation] error finding pods that match affinity terms: %v", err) + klog.Errorf("[predicate meta data generation] error finding pods that match affinity terms: %v", err) return nil } predicateMetadata := &predicateMetadata{ - pod: pod, - podBestEffort: isPodBestEffort(pod), - podRequest: GetResourceRequest(pod), - podPorts: schedutil.GetContainerPorts(pod), - topologyPairsPotentialAffinityPods: topologyPairsAffinityPodsMaps, - topologyPairsPotentialAntiAffinityPods: topologyPairsAntiAffinityPodsMaps, - topologyPairsAntiAffinityPodsMap: topologyPairsMaps, + pod: pod, + podBestEffort: isPodBestEffort(pod), + podRequest: GetResourceRequest(pod), + podPorts: schedutil.GetContainerPorts(pod), + topologyPairsPotentialAffinityPods: incomingPodAffinityMap, + topologyPairsPotentialAntiAffinityPods: incomingPodAntiAffinityMap, + topologyPairsAntiAffinityPodsMap: existingPodAntiAffinityMap, } for predicateName, precomputeFunc := range predicateMetadataProducers { - glog.V(10).Infof("Precompute: %v", predicateName) + klog.V(10).Infof("Precompute: %v", predicateName) precomputeFunc(predicateMetadata) } return predicateMetadata @@ -185,6 +189,9 @@ func (topologyPairsMaps *topologyPairsMaps) removePod(deletedPod *v1.Pod) { } func (topologyPairsMaps *topologyPairsMaps) appendMaps(toAppend *topologyPairsMaps) { + if toAppend == nil { + return + } for pair := range toAppend.topologyPairToPods { for pod := range toAppend.topologyPairToPods[pair] { topologyPairsMaps.addTopologyPair(pair, pod) @@ -232,13 +239,11 @@ func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulercache return fmt.Errorf("invalid node in nodeInfo") } // Add matching anti-affinity terms of the addedPod to the map. - topologyPairsMaps, err := getMatchingTopologyPairsOfExistingPod(meta.pod, addedPod, nodeInfo.Node()) + topologyPairsMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(meta.pod, addedPod, nodeInfo.Node()) if err != nil { return err } - if len(topologyPairsMaps.podToTopologyPairs) > 0 { - meta.topologyPairsAntiAffinityPodsMap.appendMaps(topologyPairsMaps) - } + meta.topologyPairsAntiAffinityPodsMap.appendMaps(topologyPairsMaps) // Add the pod to nodeNameToMatchingAffinityPods and nodeNameToMatchingAntiAffinityPods if needed. affinity := meta.pod.Spec.Affinity podNodeName := addedPod.Spec.NodeName @@ -325,8 +330,8 @@ func getAffinityTermProperties(pod *v1.Pod, terms []v1.PodAffinityTerm) (propert return properties, nil } -// podMatchesAffinityTermProperties return true IFF the given pod matches all the given properties. -func podMatchesAffinityTermProperties(pod *v1.Pod, properties []*affinityTermProperties) bool { +// podMatchesAllAffinityTermProperties returns true IFF the given pod matches all the given properties. +func podMatchesAllAffinityTermProperties(pod *v1.Pod, properties []*affinityTermProperties) bool { if len(properties) == 0 { return false } @@ -338,18 +343,77 @@ func podMatchesAffinityTermProperties(pod *v1.Pod, properties []*affinityTermPro return true } -// getPodsMatchingAffinity finds existing Pods that match affinity terms of the given "pod". -// It ignores topology. It returns a set of Pods that are checked later by the affinity -// predicate. With this set of pods available, the affinity predicate does not -// need to check all the pods in the cluster. -func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (topologyPairsAffinityPodsMaps *topologyPairsMaps, topologyPairsAntiAffinityPodsMaps *topologyPairsMaps, err error) { +// podMatchesAnyAffinityTermProperties returns true if the given pod matches any given property. +func podMatchesAnyAffinityTermProperties(pod *v1.Pod, properties []*affinityTermProperties) bool { + if len(properties) == 0 { + return false + } + for _, property := range properties { + if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, property.namespaces, property.selector) { + return true + } + } + return false +} + +// getTPMapMatchingExistingAntiAffinity calculates the following for each existing pod on each node: +// (1) Whether it has PodAntiAffinity +// (2) Whether any AffinityTerm matches the incoming pod +func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (*topologyPairsMaps, error) { allNodeNames := make([]string, 0, len(nodeInfoMap)) + for name := range nodeInfoMap { + allNodeNames = append(allNodeNames, name) + } + + var lock sync.Mutex + var firstError error + + topologyMaps := newTopologyPairsMaps() + + appendTopologyPairsMaps := func(toAppend *topologyPairsMaps) { + lock.Lock() + defer lock.Unlock() + topologyMaps.appendMaps(toAppend) + } + catchError := func(err error) { + lock.Lock() + defer lock.Unlock() + if firstError == nil { + firstError = err + } + } + + processNode := func(i int) { + nodeInfo := nodeInfoMap[allNodeNames[i]] + node := nodeInfo.Node() + if node == nil { + catchError(fmt.Errorf("node not found")) + return + } + for _, existingPod := range nodeInfo.PodsWithAffinity() { + existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, node) + if err != nil { + catchError(err) + return + } + appendTopologyPairsMaps(existingPodTopologyMaps) + } + } + workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode) + return topologyMaps, firstError +} +// getTPMapMatchingIncomingAffinityAntiAffinity finds existing Pods that match affinity terms of the given "pod". +// It returns a topologyPairsMaps that are checked later by the affinity +// predicate. With this topologyPairsMaps available, the affinity predicate does not +// need to check all the pods in the cluster. +func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (topologyPairsAffinityPodsMaps *topologyPairsMaps, topologyPairsAntiAffinityPodsMaps *topologyPairsMaps, err error) { affinity := pod.Spec.Affinity if affinity == nil || (affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) { return newTopologyPairsMaps(), newTopologyPairsMaps(), nil } + allNodeNames := make([]string, 0, len(nodeInfoMap)) for name := range nodeInfoMap { allNodeNames = append(allNodeNames, name) } @@ -377,17 +441,13 @@ func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache } } - affinityProperties, err := getAffinityTermProperties(pod, GetPodAffinityTerms(affinity.PodAffinity)) - if err != nil { - return nil, nil, err - } - antiAffinityProperties, err := getAffinityTermProperties(pod, GetPodAntiAffinityTerms(affinity.PodAntiAffinity)) + affinityTerms := GetPodAffinityTerms(affinity.PodAffinity) + affinityProperties, err := getAffinityTermProperties(pod, affinityTerms) if err != nil { return nil, nil, err } - - affinityTerms := GetPodAffinityTerms(affinity.PodAffinity) antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity) + processNode := func(i int) { nodeInfo := nodeInfoMap[allNodeNames[i]] node := nodeInfo.Node() @@ -399,7 +459,7 @@ func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache nodeTopologyPairsAntiAffinityPodsMaps := newTopologyPairsMaps() for _, existingPod := range nodeInfo.Pods() { // Check affinity properties. - if podMatchesAffinityTermProperties(existingPod, affinityProperties) { + if podMatchesAllAffinityTermProperties(existingPod, affinityProperties) { for _, term := range affinityTerms { if topologyValue, ok := node.Labels[term.TopologyKey]; ok { pair := topologyPair{key: term.TopologyKey, value: topologyValue} @@ -408,8 +468,14 @@ func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache } } // Check anti-affinity properties. - if podMatchesAffinityTermProperties(existingPod, antiAffinityProperties) { - for _, term := range antiAffinityTerms { + for _, term := range antiAffinityTerms { + namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, &term) + selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) + if err != nil { + catchError(err) + return + } + if priorityutil.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) { if topologyValue, ok := node.Labels[term.TopologyKey]; ok { pair := topologyPair{key: term.TopologyKey, value: topologyValue} nodeTopologyPairsAntiAffinityPodsMaps.addTopologyPair(pair, existingPod) @@ -421,12 +487,12 @@ func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache appendResult(node.Name, nodeTopologyPairsAffinityPodsMaps, nodeTopologyPairsAntiAffinityPodsMaps) } } - workqueue.Parallelize(16, len(allNodeNames), processNode) + workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode) return topologyPairsAffinityPodsMaps, topologyPairsAntiAffinityPodsMaps, firstError } -// podMatchesAffinity returns true if "targetPod" matches any affinity rule of -// "pod". Similar to getPodsMatchingAffinity, this function does not check topology. +// targetPodMatchesAffinityOfPod returns true if "targetPod" matches ALL affinity terms of +// "pod". This function does not check topology. // So, whether the targetPod actually matches or not needs further checks for a specific // node. func targetPodMatchesAffinityOfPod(pod, targetPod *v1.Pod) bool { @@ -436,14 +502,14 @@ func targetPodMatchesAffinityOfPod(pod, targetPod *v1.Pod) bool { } affinityProperties, err := getAffinityTermProperties(pod, GetPodAffinityTerms(affinity.PodAffinity)) if err != nil { - glog.Errorf("error in getting affinity properties of Pod %v", pod.Name) + klog.Errorf("error in getting affinity properties of Pod %v", pod.Name) return false } - return podMatchesAffinityTermProperties(targetPod, affinityProperties) + return podMatchesAllAffinityTermProperties(targetPod, affinityProperties) } -// targetPodMatchesAntiAffinityOfPod returns true if "targetPod" matches any anti-affinity -// rule of "pod". Similar to getPodsMatchingAffinity, this function does not check topology. +// targetPodMatchesAntiAffinityOfPod returns true if "targetPod" matches ANY anti-affinity +// term of "pod". This function does not check topology. // So, whether the targetPod actually matches or not needs further checks for a specific // node. func targetPodMatchesAntiAffinityOfPod(pod, targetPod *v1.Pod) bool { @@ -453,8 +519,8 @@ func targetPodMatchesAntiAffinityOfPod(pod, targetPod *v1.Pod) bool { } properties, err := getAffinityTermProperties(pod, GetPodAntiAffinityTerms(affinity.PodAntiAffinity)) if err != nil { - glog.Errorf("error in getting anti-affinity properties of Pod %v", pod.Name) + klog.Errorf("error in getting anti-affinity properties of Pod %v", pod.Name) return false } - return podMatchesAffinityTermProperties(targetPod, properties) + return podMatchesAnyAffinityTermProperties(targetPod, properties) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates.go index 61e7fb5343b9..7594c76405d6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates.go @@ -22,9 +22,8 @@ import ( "os" "regexp" "strconv" - "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -37,13 +36,13 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" corelisters "k8s.io/client-go/listers/core/v1" storagelisters "k8s.io/client-go/listers/storage/v1" - "k8s.io/client-go/util/workqueue" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/scheduler/algorithm" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedutil "k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/volumebinder" @@ -96,12 +95,6 @@ const ( // CheckNodePIDPressurePred defines the name of predicate CheckNodePIDPressure. CheckNodePIDPressurePred = "CheckNodePIDPressure" - // DefaultMaxEBSVolumes is the limit for volumes attached to an instance. - // Amazon recommends no more than 40; the system root volume uses at least one. - // See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits - DefaultMaxEBSVolumes = 39 - // DefaultMaxEBSM5VolumeLimit is default EBS volume limit on m5 and c5 instances - DefaultMaxEBSM5VolumeLimit = 25 // DefaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE // GCE instances can have up to 16 PD volumes attached. DefaultMaxGCEPDVolumes = 16 @@ -336,7 +329,7 @@ func NewMaxPDVolumeCountPredicate( filter = AzureDiskVolumeFilter volumeLimitKey = v1.ResourceName(volumeutil.AzureVolumeLimitKey) default: - glog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType, + klog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType, GCEPDVolumeFilterType, AzureDiskVolumeFilterType) return nil @@ -380,19 +373,19 @@ func getMaxVolumeFunc(filterName string) func(node *v1.Node) int { } func getMaxEBSVolume(nodeInstanceType string) int { - if ok, _ := regexp.MatchString("^[cm]5.*", nodeInstanceType); ok { - return DefaultMaxEBSM5VolumeLimit + if ok, _ := regexp.MatchString(volumeutil.EBSNitroLimitRegex, nodeInstanceType); ok { + return volumeutil.DefaultMaxEBSNitroVolumeLimit } - return DefaultMaxEBSVolumes + return volumeutil.DefaultMaxEBSVolumes } // getMaxVolLimitFromEnv checks the max PD volumes environment variable, otherwise returning a default value func getMaxVolLimitFromEnv() int { if rawMaxVols := os.Getenv(KubeMaxPDVols); rawMaxVols != "" { if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil { - glog.Errorf("Unable to parse maximum PD volumes value, using default: %v", err) + klog.Errorf("Unable to parse maximum PD volumes value, using default: %v", err) } else if parsedMaxVols <= 0 { - glog.Errorf("Maximum PD volumes must be a positive value, using default ") + klog.Errorf("Maximum PD volumes must be a positive value, using default ") } else { return parsedMaxVols } @@ -420,7 +413,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) if err != nil || pvc == nil { // if the PVC is not found, log the error and count the PV towards the PV limit - glog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err) + klog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err) filteredVolumes[pvID] = true continue } @@ -431,7 +424,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s // it was forcefully unbound by admin. The pod can still use the // original PV where it was bound to -> log the error and count // the PV towards the PV limit - glog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName) + klog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName) filteredVolumes[pvID] = true continue } @@ -440,7 +433,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s if err != nil || pv == nil { // if the PV is not found, log the error // and count the PV towards the PV limit - glog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err) + klog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err) filteredVolumes[pvID] = true continue } @@ -672,12 +665,12 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetad nodeV, _ := nodeConstraints[k] volumeVSet, err := volumeutil.LabelZonesToSet(v) if err != nil { - glog.Warningf("Failed to parse label for %q: %q. Ignoring the label. err=%v. ", k, v, err) + klog.Warningf("Failed to parse label for %q: %q. Ignoring the label. err=%v. ", k, v, err) continue } if !volumeVSet.Has(nodeV) { - glog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k) + klog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k) return false, []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}, nil } } @@ -788,11 +781,11 @@ func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s } } - if glog.V(10) { + if klog.V(10) { if len(predicateFails) == 0 { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is + // We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. - glog.Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", + klog.Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", podName(pod), node.Name, len(nodeInfo.Pods()), allowedPodNumber) } } @@ -841,14 +834,14 @@ func podMatchesNodeSelectorAndAffinityTerms(pod *v1.Pod, node *v1.Node) bool { // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. // if nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { // nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution.NodeSelectorTerms - // glog.V(10).Infof("Match for RequiredDuringSchedulingRequiredDuringExecution node selector terms %+v", nodeSelectorTerms) + // klog.V(10).Infof("Match for RequiredDuringSchedulingRequiredDuringExecution node selector terms %+v", nodeSelectorTerms) // nodeAffinityMatches = nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms) // } // Match node selector for requiredDuringSchedulingIgnoredDuringExecution. if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms - glog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms) + klog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms) nodeAffinityMatches = nodeAffinityMatches && nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms) } @@ -940,7 +933,7 @@ type ServiceAffinity struct { // only should be referenced by NewServiceAffinityPredicate. func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata) { if pm.pod == nil { - glog.Errorf("Cannot precompute service affinity, a pod is required to calculate service affinity.") + klog.Errorf("Cannot precompute service affinity, a pod is required to calculate service affinity.") return } pm.serviceAffinityInUse = true @@ -952,7 +945,7 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata) // In the future maybe we will return them as part of the function. if errSvc != nil || errList != nil { - glog.Errorf("Some Error were found while precomputing svc affinity: \nservices:%v , \npods:%v", errSvc, errList) + klog.Errorf("Some Error were found while precomputing svc affinity: \nservices:%v , \npods:%v", errSvc, errList) } // consider only the pods that belong to the same namespace pm.serviceAffinityMatchingPodList = FilterPodsByNamespace(allMatches, pm.pod.Namespace) @@ -1179,10 +1172,10 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm return false, failedPredicates, error } - if glog.V(10) { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is + if klog.V(10) { + // We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. - glog.Infof("Schedule Pod %+v on Node %+v is allowed, pod (anti)affinity constraints satisfied", + klog.Infof("Schedule Pod %+v on Node %+v is allowed, pod (anti)affinity constraints satisfied", podName(pod), node.Name) } return true, nil, nil @@ -1193,7 +1186,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm // targetPod matches all the terms and their topologies, 2) whether targetPod // matches all the terms label selector and namespaces (AKA term properties), // 3) any error. -func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod *v1.Pod, targetPod *v1.Pod, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) (bool, bool, error) { +func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) (bool, bool, error) { if len(terms) == 0 { return false, false, fmt.Errorf("terms array is empty") } @@ -1201,7 +1194,7 @@ func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod *v1.Pod, targetPod * if err != nil { return false, false, err } - if !podMatchesAffinityTermProperties(targetPod, props) { + if !podMatchesAllAffinityTermProperties(targetPod, props) { return false, false, nil } // Namespace and selector of the terms have matched. Now we check topology of the terms. @@ -1248,112 +1241,55 @@ func GetPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.Po return terms } -func getMatchingTopologyPairs(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (*topologyPairsMaps, error) { - allNodeNames := make([]string, 0, len(nodeInfoMap)) - for name := range nodeInfoMap { - allNodeNames = append(allNodeNames, name) +// getMatchingAntiAffinityTopologyPairs calculates the following for "existingPod" on given node: +// (1) Whether it has PodAntiAffinity +// (2) Whether ANY AffinityTerm matches the incoming pod +func getMatchingAntiAffinityTopologyPairsOfPod(newPod *v1.Pod, existingPod *v1.Pod, node *v1.Node) (*topologyPairsMaps, error) { + affinity := existingPod.Spec.Affinity + if affinity == nil || affinity.PodAntiAffinity == nil { + return nil, nil } - var lock sync.Mutex - var firstError error - topologyMaps := newTopologyPairsMaps() - - appendTopologyPairsMaps := func(toAppend *topologyPairsMaps) { - lock.Lock() - defer lock.Unlock() - topologyMaps.appendMaps(toAppend) - } - catchError := func(err error) { - lock.Lock() - defer lock.Unlock() - if firstError == nil { - firstError = err - } - } - - processNode := func(i int) { - nodeInfo := nodeInfoMap[allNodeNames[i]] - node := nodeInfo.Node() - if node == nil { - catchError(fmt.Errorf("node not found")) - return - } - nodeTopologyMaps := newTopologyPairsMaps() - for _, existingPod := range nodeInfo.PodsWithAffinity() { - affinity := existingPod.Spec.Affinity - if affinity == nil { - continue - } - for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) { - namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) - selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) - if err != nil { - catchError(err) - return - } - if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) { - if topologyValue, ok := node.Labels[term.TopologyKey]; ok { - pair := topologyPair{key: term.TopologyKey, value: topologyValue} - nodeTopologyMaps.addTopologyPair(pair, existingPod) - } - } - } - if len(nodeTopologyMaps.podToTopologyPairs) > 0 { - appendTopologyPairsMaps(nodeTopologyMaps) - } + for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) { + namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) + selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) + if err != nil { + return nil, err } - } - workqueue.Parallelize(16, len(allNodeNames), processNode) - return topologyMaps, firstError -} - -func getMatchingTopologyPairsOfExistingPod(newPod *v1.Pod, existingPod *v1.Pod, node *v1.Node) (*topologyPairsMaps, error) { - topologyMaps := newTopologyPairsMaps() - affinity := existingPod.Spec.Affinity - if affinity != nil && affinity.PodAntiAffinity != nil { - for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) { - namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) - selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) - if err != nil { - return nil, err - } - if priorityutil.PodMatchesTermsNamespaceAndSelector(newPod, namespaces, selector) { - if topologyValue, ok := node.Labels[term.TopologyKey]; ok { - pair := topologyPair{key: term.TopologyKey, value: topologyValue} - topologyMaps.addTopologyPair(pair, existingPod) - } + if priorityutil.PodMatchesTermsNamespaceAndSelector(newPod, namespaces, selector) { + if topologyValue, ok := node.Labels[term.TopologyKey]; ok { + pair := topologyPair{key: term.TopologyKey, value: topologyValue} + topologyMaps.addTopologyPair(pair, existingPod) } } } return topologyMaps, nil } -func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairs(pod *v1.Pod, allPods []*v1.Pod) (*topologyPairsMaps, error) { + +func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.Pod, existingPods []*v1.Pod) (*topologyPairsMaps, error) { topologyMaps := newTopologyPairsMaps() - for _, existingPod := range allPods { - affinity := existingPod.Spec.Affinity - if affinity != nil && affinity.PodAntiAffinity != nil { - existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName) - if err != nil { - if apierrors.IsNotFound(err) { - glog.Errorf("Node not found, %v", existingPod.Spec.NodeName) - continue - } - return nil, err - } - existingPodsTopologyMaps, err := getMatchingTopologyPairsOfExistingPod(pod, existingPod, existingPodNode) - if err != nil { - return nil, err + for _, existingPod := range existingPods { + existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName) + if err != nil { + if apierrors.IsNotFound(err) { + klog.Errorf("Node not found, %v", existingPod.Spec.NodeName) + continue } - topologyMaps.appendMaps(existingPodsTopologyMaps) + return nil, err } + existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, existingPodNode) + if err != nil { + return nil, err + } + topologyMaps.appendMaps(existingPodTopologyMaps) } return topologyMaps, nil } // Checks if scheduling the pod onto this node would break any anti-affinity -// rules indicated by the existing pods. +// terms indicated by the existing pods. func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { @@ -1368,36 +1304,36 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta filteredPods, err := c.podLister.FilteredList(nodeInfo.Filter, labels.Everything()) if err != nil { errMessage := fmt.Sprintf("Failed to get all pods, %+v", err) - glog.Error(errMessage) + klog.Error(errMessage) return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage) } - if topologyMaps, err = c.getMatchingAntiAffinityTopologyPairs(pod, filteredPods); err != nil { + if topologyMaps, err = c.getMatchingAntiAffinityTopologyPairsOfPods(pod, filteredPods); err != nil { errMessage := fmt.Sprintf("Failed to get all terms that pod %+v matches, err: %+v", podName(pod), err) - glog.Error(errMessage) + klog.Error(errMessage) return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage) } } // Iterate over topology pairs to get any of the pods being affected by - // the scheduled pod anti-affinity rules + // the scheduled pod anti-affinity terms for topologyKey, topologyValue := range node.Labels { if topologyMaps.topologyPairToPods[topologyPair{key: topologyKey, value: topologyValue}] != nil { - glog.V(10).Infof("Cannot schedule pod %+v onto node %v", podName(pod), node.Name) + klog.V(10).Infof("Cannot schedule pod %+v onto node %v", podName(pod), node.Name) return ErrExistingPodsAntiAffinityRulesNotMatch, nil } } - if glog.V(10) { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is + if klog.V(10) { + // We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. - glog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity rules satisfied.", + klog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity terms satisfied.", podName(pod), node.Name) } return nil, nil } -// nodeMatchesTopologyTerms checks whether "nodeInfo" matches +// nodeMatchesAllTopologyTerms checks whether "nodeInfo" matches // topology of all the "terms" for the given "pod". -func (c *PodAffinityChecker) nodeMatchesTopologyTerms(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) bool { +func (c *PodAffinityChecker) nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) bool { node := nodeInfo.Node() for _, term := range terms { if topologyValue, ok := node.Labels[term.TopologyKey]; ok { @@ -1412,7 +1348,22 @@ func (c *PodAffinityChecker) nodeMatchesTopologyTerms(pod *v1.Pod, topologyPairs return true } -// Checks if scheduling the pod onto this node would break any rules of this pod. +// nodeMatchesAnyTopologyTerm checks whether "nodeInfo" matches +// topology of any "term" for the given "pod". +func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) bool { + node := nodeInfo.Node() + for _, term := range terms { + if topologyValue, ok := node.Labels[term.TopologyKey]; ok { + pair := topologyPair{key: term.TopologyKey, value: topologyValue} + if _, ok := topologyPairs.topologyPairToPods[pair]; ok { + return true + } + } + } + return false +} + +// Checks if scheduling the pod onto this node would break any term of this pod. func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo, affinity *v1.Affinity) (algorithm.PredicateFailureReason, error) { @@ -1424,14 +1375,14 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, // Check all affinity terms. topologyPairsPotentialAffinityPods := predicateMeta.topologyPairsPotentialAffinityPods if affinityTerms := GetPodAffinityTerms(affinity.PodAffinity); len(affinityTerms) > 0 { - matchExists := c.nodeMatchesTopologyTerms(pod, topologyPairsPotentialAffinityPods, nodeInfo, affinityTerms) + matchExists := c.nodeMatchesAllTopologyTerms(pod, topologyPairsPotentialAffinityPods, nodeInfo, affinityTerms) if !matchExists { // This pod may the first pod in a series that have affinity to themselves. In order // to not leave such pods in pending state forever, we check that if no other pod // in the cluster matches the namespace and selector of this pod and the pod matches // its own terms, then we allow the pod to pass the affinity check. if !(len(topologyPairsPotentialAffinityPods.topologyPairToPods) == 0 && targetPodMatchesAffinityOfPod(pod, pod)) { - glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity", + klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity", podName(pod), node.Name) return ErrPodAffinityRulesNotMatch, nil } @@ -1441,14 +1392,14 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, // Check all anti-affinity terms. topologyPairsPotentialAntiAffinityPods := predicateMeta.topologyPairsPotentialAntiAffinityPods if antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity); len(antiAffinityTerms) > 0 { - matchExists := c.nodeMatchesTopologyTerms(pod, topologyPairsPotentialAntiAffinityPods, nodeInfo, antiAffinityTerms) + matchExists := c.nodeMatchesAnyTopologyTerm(pod, topologyPairsPotentialAntiAffinityPods, nodeInfo, antiAffinityTerms) if matchExists { - glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinity", + klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinity", podName(pod), node.Name) return ErrPodAntiAffinityRulesNotMatch, nil } } - } else { // We don't have precomputed metadata. We have to follow a slow path to check affinity rules. + } else { // We don't have precomputed metadata. We have to follow a slow path to check affinity terms. filteredPods, err := c.podLister.FilteredList(nodeInfo.Filter, labels.Everything()) if err != nil { return ErrPodAffinityRulesNotMatch, err @@ -1463,7 +1414,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, affTermsMatch, termsSelectorMatch, err := c.podMatchesPodAffinityTerms(pod, targetPod, nodeInfo, affinityTerms) if err != nil { errMessage := fmt.Sprintf("Cannot schedule pod %+v onto node %v, because of PodAffinity, err: %v", podName(pod), node.Name, err) - glog.Error(errMessage) + klog.Error(errMessage) return ErrPodAffinityRulesNotMatch, errors.New(errMessage) } if termsSelectorMatch { @@ -1478,7 +1429,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, if len(antiAffinityTerms) > 0 { antiAffTermsMatch, _, err := c.podMatchesPodAffinityTerms(pod, targetPod, nodeInfo, antiAffinityTerms) if err != nil || antiAffTermsMatch { - glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinityTerm, err: %v", + klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinityTerm, err: %v", podName(pod), node.Name, err) return ErrPodAntiAffinityRulesNotMatch, nil } @@ -1486,29 +1437,29 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, } if !matchFound && len(affinityTerms) > 0 { - // We have not been able to find any matches for the pod's affinity rules. + // We have not been able to find any matches for the pod's affinity terms. // This pod may be the first pod in a series that have affinity to themselves. In order // to not leave such pods in pending state forever, we check that if no other pod // in the cluster matches the namespace and selector of this pod and the pod matches // its own terms, then we allow the pod to pass the affinity check. if termsSelectorMatchFound { - glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity", + klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity", podName(pod), node.Name) return ErrPodAffinityRulesNotMatch, nil } // Check if pod matches its own affinity properties (namespace and label selector). if !targetPodMatchesAffinityOfPod(pod, pod) { - glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity", + klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity", podName(pod), node.Name) return ErrPodAffinityRulesNotMatch, nil } } } - if glog.V(10) { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is + if klog.V(10) { + // We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. - glog.Infof("Schedule Pod %+v on Node %+v is allowed, pod affinity/anti-affinity constraints satisfied.", + klog.Infof("Schedule Pod %+v on Node %+v is allowed, pod affinity/anti-affinity constraints satisfied.", podName(pod), node.Name) } return nil, nil @@ -1520,7 +1471,14 @@ func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetada return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil } - if nodeInfo.Node().Spec.Unschedulable { + // If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`. + podToleratesUnschedulable := v1helper.TolerationsTolerateTaint(pod.Spec.Tolerations, &v1.Taint{ + Key: schedulerapi.TaintNodeUnschedulable, + Effect: v1.TaintEffectNoSchedule, + }) + + // TODO (k82cn): deprecates `node.Spec.Unschedulable` in 1.13. + if nodeInfo.Node().Spec.Unschedulable && !podToleratesUnschedulable { return false, []algorithm.PredicateFailureReason{ErrNodeUnschedulable}, nil } @@ -1676,12 +1634,12 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe failReasons := []algorithm.PredicateFailureReason{} if !boundSatisfied { - glog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) + klog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) failReasons = append(failReasons, ErrVolumeNodeConflict) } if !unboundSatisfied { - glog.V(5).Infof("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) + klog.V(5).Infof("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) failReasons = append(failReasons, ErrVolumeBindConflict) } @@ -1690,6 +1648,6 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe } // All volumes bound or matching PVs found for all unbound PVCs - glog.V(5).Infof("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) + klog.V(5).Infof("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) return true, nil, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/utils.go index 2b9c94f9dc83..de2826a4df49 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/utils.go @@ -19,7 +19,7 @@ package predicates import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - schedutil "k8s.io/kubernetes/pkg/scheduler/util" + schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" ) // FindLabelsInSet gets as many key/value pairs as possible out of a label set. @@ -68,7 +68,7 @@ func CreateSelectorFromLabels(aL map[string]string) labels.Selector { // portsConflict check whether existingPorts and wantPorts conflict with each other // return true if we have a conflict -func portsConflict(existingPorts schedutil.HostPortInfo, wantPorts []*v1.ContainerPort) bool { +func portsConflict(existingPorts schedulercache.HostPortInfo, wantPorts []*v1.ContainerPort) bool { for _, cp := range wantPorts { if existingPorts.CheckConflict(cp.HostIP, string(cp.Protocol), cp.HostPort) { return true diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/BUILD index 2dfe92abdcee..e8a08011f351 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/BUILD @@ -44,7 +44,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -55,6 +55,7 @@ go_test( "image_locality_test.go", "interpod_affinity_test.go", "least_requested_test.go", + "main_test.go", "metadata_test.go", "most_requested_test.go", "node_affinity_test.go", @@ -79,6 +80,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go index 6aa5a3896ddc..32cf27c83bfc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go @@ -17,6 +17,7 @@ limitations under the License. package priorities import ( + "context" "sync" "k8s.io/api/core/v1" @@ -29,7 +30,7 @@ import ( schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - "github.com/golang/glog" + "k8s.io/klog" ) // InterPodAffinity contains information to calculate inter pod affinity. @@ -136,7 +137,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node existingPodNode, err := ipa.info.GetNodeInfo(existingPod.Spec.NodeName) if err != nil { if apierrors.IsNotFound(err) { - glog.Errorf("Node not found, %v", existingPod.Spec.NodeName) + klog.Errorf("Node not found, %v", existingPod.Spec.NodeName) return nil } return err @@ -210,7 +211,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node } } } - workqueue.Parallelize(16, len(allNodeNames), processNode) + workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode) if pm.firstError != nil { return nil, pm.firstError } @@ -232,8 +233,8 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node fScore = float64(schedulerapi.MaxPriority) * ((pm.counts[node.Name] - minCount) / (maxCount - minCount)) } result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)}) - if glog.V(10) { - glog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore)) + if klog.V(10) { + klog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore)) } } return result, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go index 8d709dcb44e1..027eabae5ff9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go @@ -19,9 +19,9 @@ package priorities import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" @@ -64,9 +64,9 @@ func (r *ResourceAllocationPriority) PriorityMap( score = r.scorer(&requested, &allocatable, false, 0, 0) } - if glog.V(10) { + if klog.V(10) { if len(pod.Spec.Volumes) >= 0 && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && nodeInfo.TransientInfo != nil { - glog.Infof( + klog.Infof( "%v -> %v: %v, capacity %d millicores %d memory bytes, %d volumes, total request %d millicores %d memory bytes %d volumes, score %d", pod.Name, node.Name, r.Name, allocatable.MilliCPU, allocatable.Memory, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount, @@ -75,7 +75,7 @@ func (r *ResourceAllocationPriority) PriorityMap( score, ) } else { - glog.Infof( + klog.Infof( "%v -> %v: %v, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d", pod.Name, node.Name, r.Name, allocatable.MilliCPU, allocatable.Memory, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_limits.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_limits.go index 816b423f588c..82b803cbf721 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_limits.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_limits.go @@ -23,7 +23,7 @@ import ( schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - "github.com/golang/glog" + "k8s.io/klog" ) // ResourceLimitsPriorityMap is a priority function that increases score of input node by 1 if the node satisfies @@ -52,10 +52,10 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule score = 1 } - if glog.V(10) { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is + if klog.V(10) { + // We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. - glog.Infof( + klog.Infof( "%v -> %v: Resource Limits Priority, allocatable %d millicores %d memory bytes, pod limits %d millicores %d memory bytes, score %d", pod.Name, node.Name, allocatableResources.MilliCPU, allocatableResources.Memory, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/selector_spreading.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/selector_spreading.go index 52bb98044127..1371d765a53a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/selector_spreading.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/selector_spreading.go @@ -26,7 +26,7 @@ import ( schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" utilnode "k8s.io/kubernetes/pkg/util/node" - "github.com/golang/glog" + "k8s.io/klog" ) // When zone information is present, give 2/3 of the weighting to zone spreading, 1/3 to node spreading @@ -94,7 +94,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{ // Ignore the previous deleted version for spreading purposes // (it can still be considered for resource restrictions etc.) if nodePod.DeletionTimestamp != nil { - glog.V(4).Infof("skipping pending-deleted pod: %s/%s", nodePod.Namespace, nodePod.Name) + klog.V(4).Infof("skipping pending-deleted pod: %s/%s", nodePod.Namespace, nodePod.Name) continue } for _, selector := range selectors { @@ -160,8 +160,8 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa } } result[i].Score = int(fScore) - if glog.V(10) { - glog.Infof( + if klog.V(10) { + klog.Infof( "%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int(fScore), ) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/test_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/test_util.go index 74ffef34cfb2..da85c6b391bc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/test_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/test_util.go @@ -41,18 +41,18 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node { } } -func priorityFunction(mapFn algorithm.PriorityMapFunction, reduceFn algorithm.PriorityReduceFunction, mataData interface{}) algorithm.PriorityFunction { +func priorityFunction(mapFn algorithm.PriorityMapFunction, reduceFn algorithm.PriorityReduceFunction, metaData interface{}) algorithm.PriorityFunction { return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { result := make(schedulerapi.HostPriorityList, 0, len(nodes)) for i := range nodes { - hostResult, err := mapFn(pod, mataData, nodeNameToInfo[nodes[i].Name]) + hostResult, err := mapFn(pod, metaData, nodeNameToInfo[nodes[i].Name]) if err != nil { return nil, err } result = append(result, hostResult) } if reduceFn != nil { - if err := reduceFn(pod, mataData, nodeNameToInfo, result); err != nil { + if err := reduceFn(pod, metaData, nodeNameToInfo, result); err != nil { return nil, err } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface.go index ffa275d45f03..d74af089d351 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface.go @@ -26,6 +26,9 @@ import ( // decisions made by Kubernetes. This is typically needed for resources not directly // managed by Kubernetes. type SchedulerExtender interface { + // Name returns a unique name that identifies the extender. + Name() string + // Filter based on extender-implemented predicate functions. The filtered list is // expected to be a subset of the supplied list. failedNodesMap optionally contains // the list of failed nodes and failure reasons. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go index cd1535cb5588..835a5a0bfb8a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go @@ -19,15 +19,17 @@ package algorithm import ( apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/labels" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" ) // NodeFieldSelectorKeys is a map that: the key are node field selector keys; the values are // the functions to get the value of the node field. var NodeFieldSelectorKeys = map[string]func(*v1.Node) string{ - NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name }, + schedulerapi.NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name }, } // FitPredicate is a function that indicates if a pod fits into an existing node. @@ -97,7 +99,7 @@ type PodLister interface { List(labels.Selector) ([]*v1.Pod, error) // This is similar to "List()", but the returned slice does not // contain pods that don't pass `podFilter`. - FilteredList(podFilter schedulercache.PodFilter, selector labels.Selector) ([]*v1.Pod, error) + FilteredList(podFilter schedulerinternalcache.PodFilter, selector labels.Selector) ([]*v1.Pod, error) } // ServiceLister interface represents anything that can produce a list of services; the list is consumed by a scheduler. @@ -122,6 +124,12 @@ type ReplicaSetLister interface { GetPodReplicaSets(*v1.Pod) ([]*apps.ReplicaSet, error) } +// PDBLister interface represents anything that can list PodDisruptionBudget objects. +type PDBLister interface { + // List() returns a list of PodDisruptionBudgets matching the selector. + List(labels.Selector) ([]*policyv1beta1.PodDisruptionBudget, error) +} + var _ ControllerLister = &EmptyControllerLister{} // EmptyControllerLister implements ControllerLister on []v1.ReplicationController returning empty data diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD index a4afc8fa4756..1e3a118c9f20 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/BUILD @@ -15,11 +15,16 @@ go_library( go_test( name = "go_default_test", - srcs = ["plugins_test.go"], + srcs = [ + "main_test.go", + "plugins_test.go", + ], embed = [":go_default_library"], deps = [ + "//pkg/features:go_default_library", "//pkg/scheduler/factory:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/BUILD index 7e624f3737f5..ae77ea8609c4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/BUILD @@ -19,7 +19,7 @@ go_library( "//pkg/scheduler/factory:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/defaults.go index 6b18c4fa9fd3..74467fe17a53 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -17,7 +17,7 @@ limitations under the License. package defaults import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -199,19 +199,24 @@ func ApplyFeatureGates() { // Fit is determined based on whether a pod can tolerate all of the node's taints factory.RegisterMandatoryFitPredicate(predicates.PodToleratesNodeTaintsPred, predicates.PodToleratesNodeTaints) + // Fit is determined based on whether a pod can tolerate unschedulable of node + factory.RegisterMandatoryFitPredicate(predicates.CheckNodeUnschedulablePred, predicates.CheckNodeUnschedulablePredicate) // Insert Key "PodToleratesNodeTaints" and "CheckNodeUnschedulable" To All Algorithm Provider // The key will insert to all providers which in algorithmProviderMap[] // if you just want insert to specific provider, call func InsertPredicateKeyToAlgoProvider() factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.PodToleratesNodeTaintsPred) + factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.CheckNodeUnschedulablePred) - glog.Warningf("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory") + klog.Infof("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory") } // Prioritizes nodes that satisfy pod's resource limits if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) { + klog.Infof("Registering resourcelimits priority function") factory.RegisterPriorityFunction2("ResourceLimitsPriority", priorities.ResourceLimitsPriorityMap, nil, 1) + // Register the priority function to specific provider too. + factory.InsertPriorityKeyToAlgorithmProviderMap(factory.RegisterPriorityFunction2("ResourceLimitsPriority", priorities.ResourceLimitsPriorityMap, nil, 1)) } - } func registerAlgorithmProvider(predSet, priSet sets.String) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD index a160d036fa9d..0f6361b6a626 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD @@ -11,10 +11,12 @@ go_library( "doc.go", "register.go", "types.go", + "well_known_labels.go", "zz_generated.deepcopy.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/api", deps = [ + "//pkg/apis/core:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/well_known_labels.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/well_known_labels.go similarity index 99% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/well_known_labels.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/well_known_labels.go index 3482649b4160..afe64dd50f7f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/well_known_labels.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/api/well_known_labels.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package algorithm +package api import ( api "k8s.io/kubernetes/pkg/apis/core" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/BUILD index 98d474beba4f..ba0ac4da9a7b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/BUILD @@ -3,55 +3,36 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ - "cache.go", - "interface.go", + "host_ports.go", "node_info.go", - "node_tree.go", "util.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/cache", visibility = ["//visibility:public"], deps = [ "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/features:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", - "//pkg/scheduler/util:go_default_library", - "//pkg/util/node:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) go_test( name = "go_default_test", srcs = [ - "cache_test.go", + "host_ports_test.go", "node_info_test.go", - "node_tree_test.go", "util_test.go", ], embed = [":go_default_library"], deps = [ - "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", - "//pkg/scheduler/algorithm/priorities/util:go_default_library", - "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/host_ports.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/host_ports.go new file mode 100644 index 000000000000..e96c6be374ab --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/host_ports.go @@ -0,0 +1,135 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/api/core/v1" +) + +// DefaultBindAllHostIP defines the default ip address used to bind to all host. +const DefaultBindAllHostIP = "0.0.0.0" + +// ProtocolPort represents a protocol port pair, e.g. tcp:80. +type ProtocolPort struct { + Protocol string + Port int32 +} + +// NewProtocolPort creates a ProtocolPort instance. +func NewProtocolPort(protocol string, port int32) *ProtocolPort { + pp := &ProtocolPort{ + Protocol: protocol, + Port: port, + } + + if len(pp.Protocol) == 0 { + pp.Protocol = string(v1.ProtocolTCP) + } + + return pp +} + +// HostPortInfo stores mapping from ip to a set of ProtocolPort +type HostPortInfo map[string]map[ProtocolPort]struct{} + +// Add adds (ip, protocol, port) to HostPortInfo +func (h HostPortInfo) Add(ip, protocol string, port int32) { + if port <= 0 { + return + } + + h.sanitize(&ip, &protocol) + + pp := NewProtocolPort(protocol, port) + if _, ok := h[ip]; !ok { + h[ip] = map[ProtocolPort]struct{}{ + *pp: {}, + } + return + } + + h[ip][*pp] = struct{}{} +} + +// Remove removes (ip, protocol, port) from HostPortInfo +func (h HostPortInfo) Remove(ip, protocol string, port int32) { + if port <= 0 { + return + } + + h.sanitize(&ip, &protocol) + + pp := NewProtocolPort(protocol, port) + if m, ok := h[ip]; ok { + delete(m, *pp) + if len(h[ip]) == 0 { + delete(h, ip) + } + } +} + +// Len returns the total number of (ip, protocol, port) tuple in HostPortInfo +func (h HostPortInfo) Len() int { + length := 0 + for _, m := range h { + length += len(m) + } + return length +} + +// CheckConflict checks if the input (ip, protocol, port) conflicts with the existing +// ones in HostPortInfo. +func (h HostPortInfo) CheckConflict(ip, protocol string, port int32) bool { + if port <= 0 { + return false + } + + h.sanitize(&ip, &protocol) + + pp := NewProtocolPort(protocol, port) + + // If ip is 0.0.0.0 check all IP's (protocol, port) pair + if ip == DefaultBindAllHostIP { + for _, m := range h { + if _, ok := m[*pp]; ok { + return true + } + } + return false + } + + // If ip isn't 0.0.0.0, only check IP and 0.0.0.0's (protocol, port) pair + for _, key := range []string{DefaultBindAllHostIP, ip} { + if m, ok := h[key]; ok { + if _, ok2 := m[*pp]; ok2 { + return true + } + } + } + + return false +} + +// sanitize the parameters +func (h HostPortInfo) sanitize(ip, protocol *string) { + if len(*ip) == 0 { + *ip = DefaultBindAllHostIP + } + if len(*protocol) == 0 { + *protocol = string(v1.ProtocolTCP) + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/interface.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/interface.go deleted file mode 100644 index 21eba905ef1d..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/interface.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/labels" -) - -// PodFilter is a function to filter a pod. If pod passed return true else return false. -type PodFilter func(*v1.Pod) bool - -// Cache collects pods' information and provides node-level aggregated information. -// It's intended for generic scheduler to do efficient lookup. -// Cache's operations are pod centric. It does incremental updates based on pod events. -// Pod events are sent via network. We don't have guaranteed delivery of all events: -// We use Reflector to list and watch from remote. -// Reflector might be slow and do a relist, which would lead to missing events. -// -// State Machine of a pod's events in scheduler's cache: -// -// -// +-------------------------------------------+ +----+ -// | Add | | | -// | | | | Update -// + Assume Add v v | -//Initial +--------> Assumed +------------+---> Added <--+ -// ^ + + | + -// | | | | | -// | | | Add | | Remove -// | | | | | -// | | | + | -// +----------------+ +-----------> Expired +----> Deleted -// Forget Expire -// -// -// Note that an assumed pod can expire, because if we haven't received Add event notifying us -// for a while, there might be some problems and we shouldn't keep the pod in cache anymore. -// -// Note that "Initial", "Expired", and "Deleted" pods do not actually exist in cache. -// Based on existing use cases, we are making the following assumptions: -// - No pod would be assumed twice -// - A pod could be added without going through scheduler. In this case, we will see Add but not Assume event. -// - If a pod wasn't added, it wouldn't be removed or updated. -// - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue, -// a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache. -type Cache interface { - // AssumePod assumes a pod scheduled and aggregates the pod's information into its node. - // The implementation also decides the policy to expire pod before being confirmed (receiving Add event). - // After expiration, its information would be subtracted. - AssumePod(pod *v1.Pod) error - - // FinishBinding signals that cache for assumed pod can be expired - FinishBinding(pod *v1.Pod) error - - // ForgetPod removes an assumed pod from cache. - ForgetPod(pod *v1.Pod) error - - // AddPod either confirms a pod if it's assumed, or adds it back if it's expired. - // If added back, the pod's information would be added again. - AddPod(pod *v1.Pod) error - - // UpdatePod removes oldPod's information and adds newPod's information. - UpdatePod(oldPod, newPod *v1.Pod) error - - // RemovePod removes a pod. The pod's information would be subtracted from assigned node. - RemovePod(pod *v1.Pod) error - - // GetPod returns the pod from the cache with the same namespace and the - // same name of the specified pod. - GetPod(pod *v1.Pod) (*v1.Pod, error) - - // IsAssumedPod returns true if the pod is assumed and not expired. - IsAssumedPod(pod *v1.Pod) (bool, error) - - // AddNode adds overall information about node. - AddNode(node *v1.Node) error - - // UpdateNode updates overall information about node. - UpdateNode(oldNode, newNode *v1.Node) error - - // RemoveNode removes overall information about node. - RemoveNode(node *v1.Node) error - - // AddPDB adds a PodDisruptionBudget object to the cache. - AddPDB(pdb *policy.PodDisruptionBudget) error - - // UpdatePDB updates a PodDisruptionBudget object in the cache. - UpdatePDB(oldPDB, newPDB *policy.PodDisruptionBudget) error - - // RemovePDB removes a PodDisruptionBudget object from the cache. - RemovePDB(pdb *policy.PodDisruptionBudget) error - - // List lists all cached PDBs matching the selector. - ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error) - - // UpdateNodeNameToInfoMap updates the passed infoMap to the current contents of Cache. - // The node info contains aggregated information of pods scheduled (including assumed to be) - // on this node. - UpdateNodeNameToInfoMap(infoMap map[string]*NodeInfo) error - - // List lists all cached pods (including assumed ones). - List(labels.Selector) ([]*v1.Pod, error) - - // FilteredList returns all cached pods that pass the filter. - FilteredList(filter PodFilter, selector labels.Selector) ([]*v1.Pod, error) - - // Snapshot takes a snapshot on current cache - Snapshot() *Snapshot - - // IsUpToDate returns true if the given NodeInfo matches the current data in the cache. - IsUpToDate(n *NodeInfo) bool - - // NodeTree returns a node tree structure - NodeTree() *NodeTree -} - -// Snapshot is a snapshot of cache state -type Snapshot struct { - AssumedPods map[string]bool - Nodes map[string]*NodeInfo - Pdbs map[string]*policy.PodDisruptionBudget -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_info.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_info.go index c6b5f96cd881..8b623c72ca3a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_info.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_info.go @@ -22,13 +22,12 @@ import ( "sync" "sync/atomic" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" - "k8s.io/kubernetes/pkg/scheduler/util" ) var ( @@ -36,6 +35,14 @@ var ( generation int64 ) +// ImageStateSummary provides summarized information about the state of an image. +type ImageStateSummary struct { + // Size of the image + Size int64 + // Used to track how many nodes have this image + NumNodes int +} + // NodeInfo is node level aggregated information. type NodeInfo struct { // Overall node information. @@ -43,7 +50,7 @@ type NodeInfo struct { pods []*v1.Pod podsWithAffinity []*v1.Pod - usedPorts util.HostPortInfo + usedPorts HostPortInfo // Total requested resource of all pods on this node. // It includes assumed pods which scheduler sends binding to apiserver but @@ -66,7 +73,7 @@ type NodeInfo struct { // TransientInfo holds the information pertaining to a scheduling cycle. This will be destructed at the end of // scheduling cycle. // TODO: @ravig. Remove this once we have a clear approach for message passing across predicates and priorities. - TransientInfo *transientSchedulerInfo + TransientInfo *TransientSchedulerInfo // Cached conditions of node for faster lookup. memoryPressureCondition v1.ConditionStatus @@ -99,28 +106,28 @@ type nodeTransientInfo struct { RequestedVolumes int } -// transientSchedulerInfo is a transient structure which is destructed at the end of each scheduling cycle. +// TransientSchedulerInfo is a transient structure which is destructed at the end of each scheduling cycle. // It consists of items that are valid for a scheduling cycle and is used for message passing across predicates and // priorities. Some examples which could be used as fields are number of volumes being used on node, current utilization // on node etc. // IMPORTANT NOTE: Make sure that each field in this structure is documented along with usage. Expand this structure // only when absolutely needed as this data structure will be created and destroyed during every scheduling cycle. -type transientSchedulerInfo struct { +type TransientSchedulerInfo struct { TransientLock sync.Mutex // NodeTransInfo holds the information related to nodeTransientInformation. NodeName is the key here. TransNodeInfo nodeTransientInfo } -// newTransientSchedulerInfo returns a new scheduler transient structure with initialized values. -func newTransientSchedulerInfo() *transientSchedulerInfo { - tsi := &transientSchedulerInfo{ +// NewTransientSchedulerInfo returns a new scheduler transient structure with initialized values. +func NewTransientSchedulerInfo() *TransientSchedulerInfo { + tsi := &TransientSchedulerInfo{ TransNodeInfo: initializeNodeTransientInfo(), } return tsi } -// resetTransientSchedulerInfo resets the transientSchedulerInfo. -func (transientSchedInfo *transientSchedulerInfo) resetTransientSchedulerInfo() { +// ResetTransientSchedulerInfo resets the TransientSchedulerInfo. +func (transientSchedInfo *TransientSchedulerInfo) ResetTransientSchedulerInfo() { transientSchedInfo.TransientLock.Lock() defer transientSchedInfo.TransientLock.Unlock() // Reset TransientNodeInfo. @@ -259,9 +266,9 @@ func NewNodeInfo(pods ...*v1.Pod) *NodeInfo { requestedResource: &Resource{}, nonzeroRequest: &Resource{}, allocatableResource: &Resource{}, - TransientInfo: newTransientSchedulerInfo(), + TransientInfo: NewTransientSchedulerInfo(), generation: nextGeneration(), - usedPorts: make(util.HostPortInfo), + usedPorts: make(HostPortInfo), imageStates: make(map[string]*ImageStateSummary), } for _, pod := range pods { @@ -286,14 +293,24 @@ func (n *NodeInfo) Pods() []*v1.Pod { return n.pods } +// SetPods sets all pods scheduled (including assumed to be) on this node. +func (n *NodeInfo) SetPods(pods []*v1.Pod) { + n.pods = pods +} + // UsedPorts returns used ports on this node. -func (n *NodeInfo) UsedPorts() util.HostPortInfo { +func (n *NodeInfo) UsedPorts() HostPortInfo { if n == nil { return nil } return n.usedPorts } +// SetUsedPorts sets the used ports on this node. +func (n *NodeInfo) SetUsedPorts(newUsedPorts HostPortInfo) { + n.usedPorts = newUsedPorts +} + // ImageStates returns the state information of all images. func (n *NodeInfo) ImageStates() map[string]*ImageStateSummary { if n == nil { @@ -302,6 +319,11 @@ func (n *NodeInfo) ImageStates() map[string]*ImageStateSummary { return n.imageStates } +// SetImageStates sets the state information of all images. +func (n *NodeInfo) SetImageStates(newImageStates map[string]*ImageStateSummary) { + n.imageStates = newImageStates +} + // PodsWithAffinity return all pods with (anti)affinity constraints on this node. func (n *NodeInfo) PodsWithAffinity() []*v1.Pod { if n == nil { @@ -326,6 +348,11 @@ func (n *NodeInfo) Taints() ([]v1.Taint, error) { return n.taints, n.taintsErr } +// SetTaints sets the taints list on this node. +func (n *NodeInfo) SetTaints(newTaints []v1.Taint) { + n.taints = newTaints +} + // MemoryPressureCondition returns the memory pressure condition status on this node. func (n *NodeInfo) MemoryPressureCondition() v1.ConditionStatus { if n == nil { @@ -358,6 +385,11 @@ func (n *NodeInfo) RequestedResource() Resource { return *n.requestedResource } +// SetRequestedResource sets the aggregated resource request of pods on this node. +func (n *NodeInfo) SetRequestedResource(newResource *Resource) { + n.requestedResource = newResource +} + // NonZeroRequest returns aggregated nonzero resource request of pods on this node. func (n *NodeInfo) NonZeroRequest() Resource { if n == nil { @@ -366,6 +398,11 @@ func (n *NodeInfo) NonZeroRequest() Resource { return *n.nonzeroRequest } +// SetNonZeroRequest sets the aggregated nonzero resource request of pods on this node. +func (n *NodeInfo) SetNonZeroRequest(newResource *Resource) { + n.nonzeroRequest = newResource +} + // AllocatableResource returns allocatable resources on a given node. func (n *NodeInfo) AllocatableResource() Resource { if n == nil { @@ -380,6 +417,19 @@ func (n *NodeInfo) SetAllocatableResource(allocatableResource *Resource) { n.generation = nextGeneration() } +// GetGeneration returns the generation on this node. +func (n *NodeInfo) GetGeneration() int64 { + if n == nil { + return 0 + } + return n.generation +} + +// SetGeneration sets the generation on this node. This is for testing only. +func (n *NodeInfo) SetGeneration(newGeneration int64) { + n.generation = newGeneration +} + // Clone returns a copy of this node. func (n *NodeInfo) Clone() *NodeInfo { clone := &NodeInfo{ @@ -392,7 +442,7 @@ func (n *NodeInfo) Clone() *NodeInfo { memoryPressureCondition: n.memoryPressureCondition, diskPressureCondition: n.diskPressureCondition, pidPressureCondition: n.pidPressureCondition, - usedPorts: make(util.HostPortInfo), + usedPorts: make(HostPortInfo), imageStates: n.imageStates, generation: n.generation, } @@ -400,10 +450,10 @@ func (n *NodeInfo) Clone() *NodeInfo { clone.pods = append([]*v1.Pod(nil), n.pods...) } if len(n.usedPorts) > 0 { - // util.HostPortInfo is a map-in-map struct + // HostPortInfo is a map-in-map struct // make sure it's deep copied for ip, portMap := range n.usedPorts { - clone.usedPorts[ip] = make(map[util.ProtocolPort]struct{}) + clone.usedPorts[ip] = make(map[ProtocolPort]struct{}) for protocolPort, v := range portMap { clone.usedPorts[ip][protocolPort] = v } @@ -464,22 +514,22 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) { } // Consume ports when pods added. - n.updateUsedPorts(pod, true) + n.UpdateUsedPorts(pod, true) n.generation = nextGeneration() } // RemovePod subtracts pod information from this NodeInfo. func (n *NodeInfo) RemovePod(pod *v1.Pod) error { - k1, err := getPodKey(pod) + k1, err := GetPodKey(pod) if err != nil { return err } for i := range n.podsWithAffinity { - k2, err := getPodKey(n.podsWithAffinity[i]) + k2, err := GetPodKey(n.podsWithAffinity[i]) if err != nil { - glog.Errorf("Cannot get pod key, err: %v", err) + klog.Errorf("Cannot get pod key, err: %v", err) continue } if k1 == k2 { @@ -490,9 +540,9 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error { } } for i := range n.pods { - k2, err := getPodKey(n.pods[i]) + k2, err := GetPodKey(n.pods[i]) if err != nil { - glog.Errorf("Cannot get pod key, err: %v", err) + klog.Errorf("Cannot get pod key, err: %v", err) continue } if k1 == k2 { @@ -515,7 +565,7 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error { n.nonzeroRequest.Memory -= non0Mem // Release ports when remove Pods. - n.updateUsedPorts(pod, false) + n.UpdateUsedPorts(pod, false) n.generation = nextGeneration() @@ -539,7 +589,8 @@ func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64) return } -func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, add bool) { +// UpdateUsedPorts updates the UsedPorts of NodeInfo. +func (n *NodeInfo) UpdateUsedPorts(pod *v1.Pod, add bool) { for j := range pod.Spec.Containers { container := &pod.Spec.Containers[j] for k := range container.Ports { @@ -573,7 +624,7 @@ func (n *NodeInfo) SetNode(node *v1.Node) error { // We ignore other conditions. } } - n.TransientInfo = newTransientSchedulerInfo() + n.TransientInfo = NewTransientSchedulerInfo() n.generation = nextGeneration() return nil } @@ -614,9 +665,9 @@ func (n *NodeInfo) FilterOutPods(pods []*v1.Pod) []*v1.Pod { continue } // If pod is on the given node, add it to 'filtered' only if it is present in nodeInfo. - podKey, _ := getPodKey(p) + podKey, _ := GetPodKey(p) for _, np := range n.Pods() { - npodkey, _ := getPodKey(np) + npodkey, _ := GetPodKey(np) if npodkey == podKey { filtered = append(filtered, p) break @@ -626,8 +677,8 @@ func (n *NodeInfo) FilterOutPods(pods []*v1.Pod) []*v1.Pod { return filtered } -// getPodKey returns the string key of a pod. -func getPodKey(pod *v1.Pod) (string, error) { +// GetPodKey returns the string key of a pod. +func GetPodKey(pod *v1.Pod) (string, error) { uid := string(pod.UID) if len(uid) == 0 { return "", errors.New("Cannot get cache key for pod with empty UID") diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/BUILD index 873c0150cf7b..7df943d3f7e3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/BUILD @@ -1,71 +1,66 @@ -package(default_visibility = ["//visibility:public"]) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", +go_library( + name = "go_default_library", srcs = [ - "extender_test.go", - "generic_scheduler_test.go", - "scheduling_queue_test.go", + "extender.go", + "generic_scheduler.go", ], - embed = [":go_default_library"], + importpath = "k8s.io/kubernetes/pkg/scheduler/core", + visibility = ["//visibility:public"], deps = [ "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", - "//pkg/scheduler/algorithm/priorities:go_default_library", - "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/core/equivalence:go_default_library", - "//pkg/scheduler/testing:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", + "//pkg/scheduler/internal/queue:go_default_library", + "//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/util:go_default_library", - "//staging/src/k8s.io/api/apps/v1:go_default_library", + "//pkg/scheduler/volumebinder:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/trace:go_default_library", + "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) -go_library( - name = "go_default_library", +go_test( + name = "go_default_test", srcs = [ - "extender.go", - "generic_scheduler.go", - "scheduling_queue.go", + "extender_test.go", + "generic_scheduler_test.go", ], - importpath = "k8s.io/kubernetes/pkg/scheduler/core", + embed = [":go_default_library"], deps = [ - "//pkg/api/v1/pod:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/core/equivalence:go_default_library", - "//pkg/scheduler/metrics:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", + "//pkg/scheduler/internal/queue:go_default_library", + "//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/util:go_default_library", - "//pkg/scheduler/volumebinder:go_default_library", + "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/trace:go_default_library", - "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", - "//staging/src/k8s.io/client-go/rest:go_default_library", - "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", ], ) @@ -83,4 +78,5 @@ filegroup( "//pkg/scheduler/core/equivalence:all-srcs", ], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/equivalence/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/equivalence/BUILD index 2c56de7ebca8..a6ad12ff724b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/equivalence/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/equivalence/BUILD @@ -15,7 +15,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -27,7 +27,6 @@ go_test( "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/cache:go_default_library", - "//pkg/scheduler/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/equivalence/eqivalence.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/equivalence/eqivalence.go index 8101809ad21f..d776981999bd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/equivalence/eqivalence.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/equivalence/eqivalence.go @@ -23,10 +23,10 @@ import ( "hash/fnv" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" @@ -35,6 +35,9 @@ import ( hashutil "k8s.io/kubernetes/pkg/util/hash" ) +// nodeMap stores a *NodeCache for each node. +type nodeMap map[string]*NodeCache + // Cache is a thread safe map saves and reuses the output of predicate functions, // it uses node name as key to access those cached results. // @@ -42,13 +45,23 @@ import ( // class". (Equivalence class is defined in the `Class` type.) Saved results // will be reused until an appropriate invalidation function is called. type Cache struct { - // i.e. map[string]*NodeCache - sync.Map + // NOTE(harry): Theoretically sync.Map has better performance in machine with 8+ CPUs, while + // the reality is lock contention in first level cache is rare. + mu sync.RWMutex + nodeToCache nodeMap + predicateIDMap map[string]int } // NewCache create an empty equiv class cache. -func NewCache() *Cache { - return new(Cache) +func NewCache(predicates []string) *Cache { + predicateIDMap := make(map[string]int, len(predicates)) + for id, predicate := range predicates { + predicateIDMap[predicate] = id + } + return &Cache{ + nodeToCache: make(nodeMap), + predicateIDMap: predicateIDMap, + } } // NodeCache saves and reuses the output of predicate functions. Use RunPredicate to @@ -63,35 +76,91 @@ func NewCache() *Cache { type NodeCache struct { mu sync.RWMutex cache predicateMap + // generation is current generation of node cache, incremented on node + // invalidation. + generation uint64 + // snapshotGeneration saves snapshot of generation of node cache. + snapshotGeneration uint64 + // predicateGenerations stores generation numbers for predicates, incremented on + // predicate invalidation. Created on first update. Use 0 if does not + // exist. + predicateGenerations []uint64 + // snapshotPredicateGenerations saves snapshot of generation numbers for predicates. + snapshotPredicateGenerations []uint64 } // newNodeCache returns an empty NodeCache. -func newNodeCache() *NodeCache { +func newNodeCache(n int) *NodeCache { return &NodeCache{ - cache: make(predicateMap), + cache: make(predicateMap, n), + predicateGenerations: make([]uint64, n), + snapshotPredicateGenerations: make([]uint64, n), + } +} + +// Snapshot snapshots current generations of cache. +// NOTE: We snapshot generations of all node caches before using it and these +// operations are serialized, we can save snapshot as member of node cache +// itself. +func (c *Cache) Snapshot() { + c.mu.RLock() + defer c.mu.RUnlock() + for _, n := range c.nodeToCache { + n.mu.Lock() + // snapshot predicate generations + copy(n.snapshotPredicateGenerations, n.predicateGenerations) + // snapshot node generation + n.snapshotGeneration = n.generation + n.mu.Unlock() } + return } // GetNodeCache returns the existing NodeCache for given node if present. Otherwise, // it creates the NodeCache and returns it. // The boolean flag is true if the value was loaded, false if created. func (c *Cache) GetNodeCache(name string) (nodeCache *NodeCache, exists bool) { - v, exists := c.LoadOrStore(name, newNodeCache()) - nodeCache = v.(*NodeCache) + c.mu.Lock() + defer c.mu.Unlock() + if nodeCache, exists = c.nodeToCache[name]; !exists { + nodeCache = newNodeCache(len(c.predicateIDMap)) + c.nodeToCache[name] = nodeCache + } return } +// LoadNodeCache returns the existing NodeCache for given node, nil if not +// present. +func (c *Cache) LoadNodeCache(node string) *NodeCache { + c.mu.RLock() + defer c.mu.RUnlock() + return c.nodeToCache[node] +} + +func (c *Cache) predicateKeysToIDs(predicateKeys sets.String) []int { + predicateIDs := make([]int, 0, len(predicateKeys)) + for predicateKey := range predicateKeys { + if id, ok := c.predicateIDMap[predicateKey]; ok { + predicateIDs = append(predicateIDs, id) + } else { + klog.Errorf("predicate key %q not found", predicateKey) + } + } + return predicateIDs +} + // InvalidatePredicates clears all cached results for the given predicates. func (c *Cache) InvalidatePredicates(predicateKeys sets.String) { if len(predicateKeys) == 0 { return } - c.Range(func(k, v interface{}) bool { - n := v.(*NodeCache) - n.invalidatePreds(predicateKeys) - return true - }) - glog.V(5).Infof("Cache invalidation: node=*,predicates=%v", predicateKeys) + c.mu.RLock() + defer c.mu.RUnlock() + predicateIDs := c.predicateKeysToIDs(predicateKeys) + for _, n := range c.nodeToCache { + n.invalidatePreds(predicateIDs) + } + klog.V(5).Infof("Cache invalidation: node=*,predicates=%v", predicateKeys) } @@ -100,17 +169,23 @@ func (c *Cache) InvalidatePredicatesOnNode(nodeName string, predicateKeys sets.S if len(predicateKeys) == 0 { return } - if v, ok := c.Load(nodeName); ok { - n := v.(*NodeCache) - n.invalidatePreds(predicateKeys) + c.mu.RLock() + defer c.mu.RUnlock() + predicateIDs := c.predicateKeysToIDs(predicateKeys) + if n, ok := c.nodeToCache[nodeName]; ok { + n.invalidatePreds(predicateIDs) } - glog.V(5).Infof("Cache invalidation: node=%s,predicates=%v", nodeName, predicateKeys) + klog.V(5).Infof("Cache invalidation: node=%s,predicates=%v", nodeName, predicateKeys) } // InvalidateAllPredicatesOnNode clears all cached results for one node. func (c *Cache) InvalidateAllPredicatesOnNode(nodeName string) { - c.Delete(nodeName) - glog.V(5).Infof("Cache invalidation: node=%s,predicates=*", nodeName) + c.mu.RLock() + defer c.mu.RUnlock() + if node, ok := c.nodeToCache[nodeName]; ok { + node.invalidate() + } + klog.V(5).Infof("Cache invalidation: node=%s,predicates=*", nodeName) } // InvalidateCachedPredicateItemForPodAdd is a wrapper of @@ -186,8 +261,8 @@ func NewClass(pod *v1.Pod) *Class { return nil } -// predicateMap stores resultMaps with predicate name as the key. -type predicateMap map[string]resultMap +// predicateMap stores resultMaps with predicate ID as the key. +type predicateMap []resultMap // resultMap stores PredicateResult with pod equivalence hash as the key. type resultMap map[uint64]predicateResult @@ -201,22 +276,22 @@ type predicateResult struct { // RunPredicate returns a cached predicate result. In case of a cache miss, the predicate will be // run and its results cached for the next call. // -// NOTE: RunPredicate will not update the equivalence cache if the given NodeInfo is stale. +// NOTE: RunPredicate will not update the equivalence cache if generation does not match live version. func (n *NodeCache) RunPredicate( pred algorithm.FitPredicate, predicateKey string, + predicateID int, pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo, equivClass *Class, - cache schedulercache.Cache, ) (bool, []algorithm.PredicateFailureReason, error) { if nodeInfo == nil || nodeInfo.Node() == nil { // This may happen during tests. return false, []algorithm.PredicateFailureReason{}, fmt.Errorf("nodeInfo is nil or node is invalid") } - result, ok := n.lookupResult(pod.GetName(), nodeInfo.Node().GetName(), predicateKey, equivClass.hash) + result, ok := n.lookupResult(pod.GetName(), nodeInfo.Node().GetName(), predicateKey, predicateID, equivClass.hash) if ok { return result.Fit, result.FailReasons, nil } @@ -224,19 +299,17 @@ func (n *NodeCache) RunPredicate( if err != nil { return fit, reasons, err } - if cache != nil { - n.updateResult(pod.GetName(), predicateKey, fit, reasons, equivClass.hash, cache, nodeInfo) - } + n.updateResult(pod.GetName(), predicateKey, predicateID, fit, reasons, equivClass.hash, nodeInfo) return fit, reasons, nil } // updateResult updates the cached result of a predicate. func (n *NodeCache) updateResult( podName, predicateKey string, + predicateID int, fit bool, reasons []algorithm.PredicateFailureReason, equivalenceHash uint64, - cache schedulercache.Cache, nodeInfo *schedulercache.NodeInfo, ) { if nodeInfo == nil || nodeInfo.Node() == nil { @@ -244,11 +317,6 @@ func (n *NodeCache) updateResult( metrics.EquivalenceCacheWrites.WithLabelValues("discarded_bad_node").Inc() return } - // Skip update if NodeInfo is stale. - if !cache.IsUpToDate(nodeInfo) { - metrics.EquivalenceCacheWrites.WithLabelValues("discarded_stale").Inc() - return - } predicateItem := predicateResult{ Fit: fit, @@ -257,18 +325,26 @@ func (n *NodeCache) updateResult( n.mu.Lock() defer n.mu.Unlock() + if (n.snapshotGeneration != n.generation) || (n.snapshotPredicateGenerations[predicateID] != n.predicateGenerations[predicateID]) { + // Generation of node or predicate has been updated since we last took + // a snapshot, this indicates that we received a invalidation request + // during this time. Cache may be stale, skip update. + metrics.EquivalenceCacheWrites.WithLabelValues("discarded_stale").Inc() + return + } // If cached predicate map already exists, just update the predicate by key - if predicates, ok := n.cache[predicateKey]; ok { + if predicates := n.cache[predicateID]; predicates != nil { // maps in golang are references, no need to add them back predicates[equivalenceHash] = predicateItem } else { - n.cache[predicateKey] = + n.cache[predicateID] = resultMap{ equivalenceHash: predicateItem, } } + n.predicateGenerations[predicateID]++ - glog.V(5).Infof("Cache update: node=%s, predicate=%s,pod=%s,value=%v", + klog.V(5).Infof("Cache update: node=%s, predicate=%s,pod=%s,value=%v", nodeInfo.Node().Name, predicateKey, podName, predicateItem) } @@ -276,11 +352,12 @@ func (n *NodeCache) updateResult( // cache entry was found. func (n *NodeCache) lookupResult( podName, nodeName, predicateKey string, + predicateID int, equivalenceHash uint64, ) (value predicateResult, ok bool) { n.mu.RLock() defer n.mu.RUnlock() - value, ok = n.cache[predicateKey][equivalenceHash] + value, ok = n.cache[predicateID][equivalenceHash] if ok { metrics.EquivalenceCacheHits.Inc() } else { @@ -289,15 +366,24 @@ func (n *NodeCache) lookupResult( return value, ok } -// invalidatePreds deletes cached predicates by given keys. -func (n *NodeCache) invalidatePreds(predicateKeys sets.String) { +// invalidatePreds deletes cached predicates by given IDs. +func (n *NodeCache) invalidatePreds(predicateIDs []int) { n.mu.Lock() defer n.mu.Unlock() - for predicateKey := range predicateKeys { - delete(n.cache, predicateKey) + for _, predicateID := range predicateIDs { + n.cache[predicateID] = nil + n.predicateGenerations[predicateID]++ } } +// invalidate invalidates node cache. +func (n *NodeCache) invalidate() { + n.mu.Lock() + defer n.mu.Unlock() + n.cache = make(predicateMap, len(n.cache)) + n.generation++ +} + // equivalencePod is the set of pod attributes which must match for two pods to // be considered equivalent for scheduling purposes. For correctness, this must // include any Pod field which is used by a FitPredicate. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/extender.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/extender.go index 94d143493656..9053a2e4ea21 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/extender.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/extender.go @@ -107,6 +107,11 @@ func NewHTTPExtender(config *schedulerapi.ExtenderConfig) (algorithm.SchedulerEx }, nil } +// Name returns extenderURL to identifies the extender. +func (h *HTTPExtender) Name() string { + return h.extenderURL +} + // IsIgnorable returns true indicates scheduling should not fail when this extender // is unavailable func (h *HTTPExtender) IsIgnorable() bool { @@ -138,7 +143,7 @@ func (h *HTTPExtender) ProcessPreemption( // If extender has cached node info, pass NodeNameToMetaVictims in args. nodeNameToMetaVictims := convertToNodeNameToMetaVictims(nodeToVictims) args = &schedulerapi.ExtenderPreemptionArgs{ - Pod: pod, + Pod: pod, NodeNameToMetaVictims: nodeNameToMetaVictims, } } else { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go index 1ab9e576a32b..5f163889c48f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/generic_scheduler.go @@ -26,7 +26,7 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" @@ -41,6 +41,8 @@ import ( schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" "k8s.io/kubernetes/pkg/scheduler/core/equivalence" + schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" + internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/volumebinder" @@ -94,9 +96,9 @@ func (f *FitError) Error() string { } type genericScheduler struct { - cache schedulercache.Cache + cache schedulerinternalcache.Cache equivalenceCache *equivalence.Cache - schedulingQueue SchedulingQueue + schedulingQueue internalqueue.SchedulingQueue predicates map[string]algorithm.FitPredicate priorityMetaProducer algorithm.PriorityMetadataProducer predicateMetaProducer algorithm.PredicateMetadataProducer @@ -107,10 +109,30 @@ type genericScheduler struct { cachedNodeInfoMap map[string]*schedulercache.NodeInfo volumeBinder *volumebinder.VolumeBinder pvcLister corelisters.PersistentVolumeClaimLister + pdbLister algorithm.PDBLister disablePreemption bool percentageOfNodesToScore int32 } +// snapshot snapshots equivalane cache and node infos for all fit and priority +// functions. +func (g *genericScheduler) snapshot() error { + // IMPORTANT NOTE: We must snapshot equivalence cache before snapshotting + // scheduler cache, otherwise stale data may be written into equivalence + // cache, e.g. + // 1. snapshot cache + // 2. event arrives, updating cache and invalidating predicates or whole node cache + // 3. snapshot ecache + // 4. evaludate predicates + // 5. stale result will be written to ecache + if g.equivalenceCache != nil { + g.equivalenceCache.Snapshot() + } + + // Used for all fit and priority funcs. + return g.cache.UpdateNodeNameToInfoMap(g.cachedNodeInfoMap) +} + // Schedule tries to schedule the given pod to one of the nodes in the node list. // If it succeeds, it will return the name of the node. // If it fails, it will return a FitError error with reasons. @@ -130,8 +152,7 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister return "", ErrNoNodesAvailable } - // Used for all fit and priority funcs. - err = g.cache.UpdateNodeNameToInfoMap(g.cachedNodeInfoMap) + err = g.snapshot() if err != nil { return "", err } @@ -227,12 +248,12 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister, if !ok || fitError == nil { return nil, nil, nil, nil } - err := g.cache.UpdateNodeNameToInfoMap(g.cachedNodeInfoMap) + err := g.snapshot() if err != nil { return nil, nil, nil, err } if !podEligibleToPreemptOthers(pod, g.cachedNodeInfoMap) { - glog.V(5).Infof("Pod %v/%v is not eligible for more preemption.", pod.Namespace, pod.Name) + klog.V(5).Infof("Pod %v/%v is not eligible for more preemption.", pod.Namespace, pod.Name) return nil, nil, nil, nil } allNodes, err := nodeLister.List() @@ -244,11 +265,11 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister, } potentialNodes := nodesWherePreemptionMightHelp(allNodes, fitError.FailedPredicates) if len(potentialNodes) == 0 { - glog.V(3).Infof("Preemption will not help schedule pod %v/%v on any node.", pod.Namespace, pod.Name) + klog.V(3).Infof("Preemption will not help schedule pod %v/%v on any node.", pod.Namespace, pod.Name) // In this case, we should clean-up any existing nominated node name of the pod. return nil, nil, []*v1.Pod{pod}, nil } - pdbs, err := g.cache.ListPDBs(labels.Everything()) + pdbs, err := g.pdbLister.List(labels.Everything()) if err != nil { return nil, nil, nil, err } @@ -300,7 +321,7 @@ func (g *genericScheduler) processPreemptionWithExtenders( ) if err != nil { if extender.IsIgnorable() { - glog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set", + klog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set", extender, err) continue } @@ -396,14 +417,13 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v var nodeCache *equivalence.NodeCache nodeName := g.cache.NodeTree().Next() if g.equivalenceCache != nil { - nodeCache, _ = g.equivalenceCache.GetNodeCache(nodeName) + nodeCache = g.equivalenceCache.LoadNodeCache(nodeName) } fits, failedPredicates, err := podFitsOnNode( pod, meta, g.cachedNodeInfoMap[nodeName], g.predicates, - g.cache, nodeCache, g.schedulingQueue, g.alwaysCheckAllPredicates, @@ -448,7 +468,7 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v filteredList, failedMap, err := extender.Filter(pod, filtered, g.cachedNodeInfoMap) if err != nil { if extender.IsIgnorable() { - glog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set", + klog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set", extender, err) continue } else { @@ -474,8 +494,8 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v // addNominatedPods adds pods with equal or greater priority which are nominated // to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether // any pod was found, 2) augmented meta data, 3) augmented nodeInfo. -func addNominatedPods(podPriority int32, meta algorithm.PredicateMetadata, - nodeInfo *schedulercache.NodeInfo, queue SchedulingQueue) (bool, algorithm.PredicateMetadata, +func addNominatedPods(pod *v1.Pod, meta algorithm.PredicateMetadata, + nodeInfo *schedulercache.NodeInfo, queue internalqueue.SchedulingQueue) (bool, algorithm.PredicateMetadata, *schedulercache.NodeInfo) { if queue == nil || nodeInfo == nil || nodeInfo.Node() == nil { // This may happen only in tests. @@ -491,7 +511,7 @@ func addNominatedPods(podPriority int32, meta algorithm.PredicateMetadata, } nodeInfoOut := nodeInfo.Clone() for _, p := range nominatedPods { - if util.GetPodPriority(p) >= podPriority { + if util.GetPodPriority(p) >= util.GetPodPriority(pod) && p.UID != pod.UID { nodeInfoOut.AddPod(p) if metaOut != nil { metaOut.AddPod(p, nodeInfoOut) @@ -516,9 +536,8 @@ func podFitsOnNode( meta algorithm.PredicateMetadata, info *schedulercache.NodeInfo, predicateFuncs map[string]algorithm.FitPredicate, - cache schedulercache.Cache, nodeCache *equivalence.NodeCache, - queue SchedulingQueue, + queue internalqueue.SchedulingQueue, alwaysCheckAllPredicates bool, equivClass *equivalence.Class, ) (bool, []algorithm.PredicateFailureReason, error) { @@ -550,7 +569,7 @@ func podFitsOnNode( metaToUse := meta nodeInfoToUse := info if i == 0 { - podsAdded, metaToUse, nodeInfoToUse = addNominatedPods(util.GetPodPriority(pod), meta, info, queue) + podsAdded, metaToUse, nodeInfoToUse = addNominatedPods(pod, meta, info, queue) } else if !podsAdded || len(failedPredicates) != 0 { break } @@ -558,7 +577,7 @@ func podFitsOnNode( // TODO(bsalamat): consider using eCache and adding proper eCache invalidations // when pods are nominated or their nominations change. eCacheAvailable = equivClass != nil && nodeCache != nil && !podsAdded - for _, predicateKey := range predicates.Ordering() { + for predicateID, predicateKey := range predicates.Ordering() { var ( fit bool reasons []algorithm.PredicateFailureReason @@ -567,7 +586,7 @@ func podFitsOnNode( //TODO (yastij) : compute average predicate restrictiveness to export it as Prometheus metric if predicate, exist := predicateFuncs[predicateKey]; exist { if eCacheAvailable { - fit, reasons, err = nodeCache.RunPredicate(predicate, predicateKey, pod, metaToUse, nodeInfoToUse, equivClass, cache) + fit, reasons, err = nodeCache.RunPredicate(predicate, predicateKey, predicateID, pod, metaToUse, nodeInfoToUse, equivClass) } else { fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse) } @@ -580,7 +599,7 @@ func podFitsOnNode( failedPredicates = append(failedPredicates, reasons...) // if alwaysCheckAllPredicates is false, short circuit all predicates when one predicate fails. if !alwaysCheckAllPredicates { - glog.V(5).Infoln("since alwaysCheckAllPredicates has not been set, the predicate " + + klog.V(5).Infoln("since alwaysCheckAllPredicates has not been set, the predicate " + "evaluation is short circuited and there are chances " + "of other predicates failing as well.") break @@ -634,37 +653,38 @@ func PrioritizeNodes( results := make([]schedulerapi.HostPriorityList, len(priorityConfigs), len(priorityConfigs)) - for i, priorityConfig := range priorityConfigs { - if priorityConfig.Function != nil { - // DEPRECATED - wg.Add(1) - go func(index int, config algorithm.PriorityConfig) { - defer wg.Done() - var err error - results[index], err = config.Function(pod, nodeNameToInfo, nodes) - if err != nil { - appendError(err) - } - }(i, priorityConfig) - } else { + // DEPRECATED: we can remove this when all priorityConfigs implement the + // Map-Reduce pattern. + workqueue.ParallelizeUntil(context.TODO(), 16, len(priorityConfigs), func(i int) { + priorityConfig := priorityConfigs[i] + if priorityConfig.Function == nil { results[i] = make(schedulerapi.HostPriorityList, len(nodes)) + return } - } - processNode := func(index int) { - nodeInfo := nodeNameToInfo[nodes[index].Name] + var err error - for i := range priorityConfigs { - if priorityConfigs[i].Function != nil { + results[i], err = priorityConfig.Function(pod, nodeNameToInfo, nodes) + if err != nil { + appendError(err) + } + }) + + workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), func(index int) { + nodeInfo := nodeNameToInfo[nodes[index].Name] + for i, priorityConfig := range priorityConfigs { + if priorityConfig.Function != nil { continue } + + var err error results[i][index], err = priorityConfigs[i].Map(pod, meta, nodeInfo) if err != nil { appendError(err) - return + results[i][index].Host = nodes[index].Name } } - } - workqueue.Parallelize(16, len(nodes), processNode) + }) + for i, priorityConfig := range priorityConfigs { if priorityConfig.Reduce == nil { continue @@ -675,9 +695,9 @@ func PrioritizeNodes( if err := config.Reduce(pod, meta, nodeNameToInfo, results[index]); err != nil { appendError(err) } - if glog.V(10) { + if klog.V(10) { for _, hostPriority := range results[index] { - glog.Infof("%v -> %v: %v, Score: (%d)", pod.Name, hostPriority.Host, config.Name, hostPriority.Score) + klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, config.Name, hostPriority.Score) } } }(i, priorityConfig) @@ -715,6 +735,9 @@ func PrioritizeNodes( mu.Lock() for i := range *prioritizedList { host, score := (*prioritizedList)[i].Host, (*prioritizedList)[i].Score + if klog.V(10) { + klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, ext.Name(), score) + } combinedScores[host] += score * weight } mu.Unlock() @@ -727,9 +750,9 @@ func PrioritizeNodes( } } - if glog.V(10) { + if klog.V(10) { for i := range result { - glog.V(10).Infof("Host %s => Score %d", result[i].Host, result[i].Score) + klog.Infof("Host %s => Score %d", result[i].Host, result[i].Score) } } return result, nil @@ -858,7 +881,7 @@ func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims) if lenNodes2 > 0 { return minNodes2[0] } - glog.Errorf("Error in logic of node scoring for preemption. We should never reach here!") + klog.Errorf("Error in logic of node scoring for preemption. We should never reach here!") return nil } @@ -869,10 +892,9 @@ func selectNodesForPreemption(pod *v1.Pod, potentialNodes []*v1.Node, predicates map[string]algorithm.FitPredicate, metadataProducer algorithm.PredicateMetadataProducer, - queue SchedulingQueue, + queue internalqueue.SchedulingQueue, pdbs []*policy.PodDisruptionBudget, ) (map[*v1.Node]*schedulerapi.Victims, error) { - nodeToVictims := map[*v1.Node]*schedulerapi.Victims{} var resultLock sync.Mutex @@ -895,7 +917,7 @@ func selectNodesForPreemption(pod *v1.Pod, resultLock.Unlock() } } - workqueue.Parallelize(16, len(potentialNodes), checkNode) + workqueue.ParallelizeUntil(context.TODO(), 16, len(potentialNodes), checkNode) return nodeToVictims, nil } @@ -958,9 +980,12 @@ func selectVictimsOnNode( meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo, fitPredicates map[string]algorithm.FitPredicate, - queue SchedulingQueue, + queue internalqueue.SchedulingQueue, pdbs []*policy.PodDisruptionBudget, ) ([]*v1.Pod, int, bool) { + if nodeInfo == nil { + return nil, 0, false + } potentialVictims := util.SortableList{CompFunc: util.HigherPriorityPod} nodeInfoCopy := nodeInfo.Clone() @@ -991,9 +1016,9 @@ func selectVictimsOnNode( // that we should check is if the "pod" is failing to schedule due to pod affinity // failure. // TODO(bsalamat): Consider checking affinity to lower priority pods if feasible with reasonable performance. - if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, nil, queue, false, nil); !fits { + if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue, false, nil); !fits { if err != nil { - glog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err) + klog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err) } return nil, 0, false } @@ -1005,11 +1030,11 @@ func selectVictimsOnNode( violatingVictims, nonViolatingVictims := filterPodsWithPDBViolation(potentialVictims.Items, pdbs) reprievePod := func(p *v1.Pod) bool { addPod(p) - fits, _, _ := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, nil, queue, false, nil) + fits, _, _ := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue, false, nil) if !fits { removePod(p) victims = append(victims, p) - glog.V(5).Infof("Pod %v is a potential preemption victim on node %v.", p.Name, nodeInfo.Node().Name) + klog.V(5).Infof("Pod %v is a potential preemption victim on node %v.", p.Name, nodeInfo.Node().Name) } return fits } @@ -1064,7 +1089,7 @@ func nodesWherePreemptionMightHelp(nodes []*v1.Node, failedPredicatesMap FailedP } } if !found || !unresolvableReasonExist { - glog.V(3).Infof("Node %v is a potential node for preemption.", node.Name) + klog.V(3).Infof("Node %v is a potential node for preemption.", node.Name) potentialNodes = append(potentialNodes, node) } } @@ -1120,9 +1145,9 @@ func podPassesBasicChecks(pod *v1.Pod, pvcLister corelisters.PersistentVolumeCla // NewGenericScheduler creates a genericScheduler object. func NewGenericScheduler( - cache schedulercache.Cache, + cache schedulerinternalcache.Cache, eCache *equivalence.Cache, - podQueue SchedulingQueue, + podQueue internalqueue.SchedulingQueue, predicates map[string]algorithm.FitPredicate, predicateMetaProducer algorithm.PredicateMetadataProducer, prioritizers []algorithm.PriorityConfig, @@ -1130,6 +1155,7 @@ func NewGenericScheduler( extenders []algorithm.SchedulerExtender, volumeBinder *volumebinder.VolumeBinder, pvcLister corelisters.PersistentVolumeClaimLister, + pdbLister algorithm.PDBLister, alwaysCheckAllPredicates bool, disablePreemption bool, percentageOfNodesToScore int32, @@ -1146,6 +1172,7 @@ func NewGenericScheduler( cachedNodeInfoMap: make(map[string]*schedulercache.NodeInfo), volumeBinder: volumeBinder, pvcLister: pvcLister, + pdbLister: pdbLister, alwaysCheckAllPredicates: alwaysCheckAllPredicates, disablePreemption: disablePreemption, percentageOfNodesToScore: percentageOfNodesToScore, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD index 6565076c9b91..7be893b06067 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/BUILD @@ -1,39 +1,33 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ - "cache_comparer.go", "factory.go", "plugins.go", "signal.go", "signal_windows.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/factory", + visibility = ["//visibility:public"], deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/apis/core/helper:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", - "//pkg/scheduler:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/priorities:go_default_library", "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api/validation:go_default_library", - "//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/core:go_default_library", "//pkg/scheduler/core/equivalence:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", + "//pkg/scheduler/internal/cache/debugger:go_default_library", + "//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/util:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -43,6 +37,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library", "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", @@ -54,41 +49,39 @@ go_library( "//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) go_test( name = "go_default_test", srcs = [ - "cache_comparer_test.go", "factory_test.go", "plugins_test.go", ], embed = [":go_default_library"], deps = [ "//pkg/api/testing:go_default_library", - "//pkg/scheduler:go_default_library", "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/priorities:go_default_library", "//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api/latest:go_default_library", "//pkg/scheduler/cache:go_default_library", - "//pkg/scheduler/core:go_default_library", + "//pkg/scheduler/internal/cache/fake:go_default_library", + "//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//staging/src/k8s.io/client-go/rest:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake:go_default_library", + "//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], ) @@ -104,4 +97,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/cache_comparer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/cache_comparer.go deleted file mode 100644 index fadd7be2c991..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/cache_comparer.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package factory - -import ( - "sort" - "strings" - - "github.com/golang/glog" - "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/labels" - corelisters "k8s.io/client-go/listers/core/v1" - v1beta1 "k8s.io/client-go/listers/policy/v1beta1" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - "k8s.io/kubernetes/pkg/scheduler/core" -) - -type cacheComparer struct { - nodeLister corelisters.NodeLister - podLister corelisters.PodLister - pdbLister v1beta1.PodDisruptionBudgetLister - cache schedulercache.Cache - podQueue core.SchedulingQueue - - compareStrategy -} - -func (c *cacheComparer) Compare() error { - glog.V(3).Info("cache comparer started") - defer glog.V(3).Info("cache comparer finished") - - nodes, err := c.nodeLister.List(labels.Everything()) - if err != nil { - return err - } - - pods, err := c.podLister.List(labels.Everything()) - if err != nil { - return err - } - - pdbs, err := c.pdbLister.List(labels.Everything()) - if err != nil { - return err - } - - snapshot := c.cache.Snapshot() - - waitingPods := c.podQueue.WaitingPods() - - if missed, redundant := c.CompareNodes(nodes, snapshot.Nodes); len(missed)+len(redundant) != 0 { - glog.Warningf("cache mismatch: missed nodes: %s; redundant nodes: %s", missed, redundant) - } - - if missed, redundant := c.ComparePods(pods, waitingPods, snapshot.Nodes); len(missed)+len(redundant) != 0 { - glog.Warningf("cache mismatch: missed pods: %s; redundant pods: %s", missed, redundant) - } - - if missed, redundant := c.ComparePdbs(pdbs, snapshot.Pdbs); len(missed)+len(redundant) != 0 { - glog.Warningf("cache mismatch: missed pdbs: %s; redundant pdbs: %s", missed, redundant) - } - - return nil -} - -type compareStrategy struct { -} - -func (c compareStrategy) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) { - actual := []string{} - for _, node := range nodes { - actual = append(actual, node.Name) - } - - cached := []string{} - for nodeName := range nodeinfos { - cached = append(cached, nodeName) - } - - return compareStrings(actual, cached) -} - -func (c compareStrategy) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) { - actual := []string{} - for _, pod := range pods { - actual = append(actual, string(pod.UID)) - } - - cached := []string{} - for _, nodeinfo := range nodeinfos { - for _, pod := range nodeinfo.Pods() { - cached = append(cached, string(pod.UID)) - } - } - for _, pod := range waitingPods { - cached = append(cached, string(pod.UID)) - } - - return compareStrings(actual, cached) -} - -func (c compareStrategy) ComparePdbs(pdbs []*policy.PodDisruptionBudget, pdbCache map[string]*policy.PodDisruptionBudget) (missed, redundant []string) { - actual := []string{} - for _, pdb := range pdbs { - actual = append(actual, string(pdb.UID)) - } - - cached := []string{} - for pdbUID := range pdbCache { - cached = append(cached, pdbUID) - } - - return compareStrings(actual, cached) -} - -func compareStrings(actual, cached []string) (missed, redundant []string) { - missed, redundant = []string{}, []string{} - - sort.Strings(actual) - sort.Strings(cached) - - compare := func(i, j int) int { - if i == len(actual) { - return 1 - } else if j == len(cached) { - return -1 - } - return strings.Compare(actual[i], cached[j]) - } - - for i, j := 0, 0; i < len(actual) || j < len(cached); { - switch compare(i, j) { - case 0: - i++ - j++ - case -1: - missed = append(missed, actual[i]) - i++ - case 1: - redundant = append(redundant, cached[j]) - j++ - } - } - - return -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go index dfffc75e055c..aa420bcb173b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/factory.go @@ -25,10 +25,9 @@ import ( "reflect" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" - "k8s.io/api/policy/v1beta1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,6 +37,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" appsinformers "k8s.io/client-go/informers/apps/v1" coreinformers "k8s.io/client-go/informers/core/v1" @@ -49,18 +49,20 @@ import ( policylisters "k8s.io/client-go/listers/policy/v1beta1" storagelisters "k8s.io/client-go/listers/storage/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - "k8s.io/kubernetes/pkg/scheduler" "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/pkg/scheduler/api/validation" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" "k8s.io/kubernetes/pkg/scheduler/core" "k8s.io/kubernetes/pkg/scheduler/core/equivalence" + schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" + cachedebugger "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger" + internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) @@ -78,11 +80,106 @@ var ( maxPDVolumeCountPredicateKeys = []string{predicates.MaxGCEPDVolumeCountPred, predicates.MaxAzureDiskVolumeCountPred, predicates.MaxEBSVolumeCountPred} ) +// Binder knows how to write a binding. +type Binder interface { + Bind(binding *v1.Binding) error +} + +// PodConditionUpdater updates the condition of a pod based on the passed +// PodCondition +type PodConditionUpdater interface { + Update(pod *v1.Pod, podCondition *v1.PodCondition) error +} + +// Config is an implementation of the Scheduler's configured input data. +// TODO over time we should make this struct a hidden implementation detail of the scheduler. +type Config struct { + // It is expected that changes made via SchedulerCache will be observed + // by NodeLister and Algorithm. + SchedulerCache schedulerinternalcache.Cache + // Ecache is used for optimistically invalid affected cache items after + // successfully binding a pod + Ecache *equivalence.Cache + NodeLister algorithm.NodeLister + Algorithm algorithm.ScheduleAlgorithm + GetBinder func(pod *v1.Pod) Binder + // PodConditionUpdater is used only in case of scheduling errors. If we succeed + // with scheduling, PodScheduled condition will be updated in apiserver in /bind + // handler so that binding and setting PodCondition it is atomic. + PodConditionUpdater PodConditionUpdater + // PodPreemptor is used to evict pods and update pod annotations. + PodPreemptor PodPreemptor + + // NextPod should be a function that blocks until the next pod + // is available. We don't use a channel for this, because scheduling + // a pod may take some amount of time and we don't want pods to get + // stale while they sit in a channel. + NextPod func() *v1.Pod + + // WaitForCacheSync waits for scheduler cache to populate. + // It returns true if it was successful, false if the controller should shutdown. + WaitForCacheSync func() bool + + // Error is called if there is an error. It is passed the pod in + // question, and the error + Error func(*v1.Pod, error) + + // Recorder is the EventRecorder to use + Recorder record.EventRecorder + + // Close this to shut down the scheduler. + StopEverything <-chan struct{} + + // VolumeBinder handles PVC/PV binding for the pod. + VolumeBinder *volumebinder.VolumeBinder + + // Disable pod preemption or not. + DisablePreemption bool + + // SchedulingQueue holds pods to be scheduled + SchedulingQueue internalqueue.SchedulingQueue +} + +// PodPreemptor has methods needed to delete a pod and to update +// annotations of the preemptor pod. +type PodPreemptor interface { + GetUpdatedPod(pod *v1.Pod) (*v1.Pod, error) + DeletePod(pod *v1.Pod) error + SetNominatedNodeName(pod *v1.Pod, nominatedNode string) error + RemoveNominatedNodeName(pod *v1.Pod) error +} + +// Configurator defines I/O, caching, and other functionality needed to +// construct a new scheduler. An implementation of this can be seen in +// factory.go. +type Configurator interface { + // Exposed for testing + GetHardPodAffinitySymmetricWeight() int32 + // Exposed for testing + MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue internalqueue.SchedulingQueue) func(pod *v1.Pod, err error) + + // Predicate related accessors to be exposed for use by k8s.io/autoscaler/cluster-autoscaler + GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) + GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) + + // Needs to be exposed for things like integration tests where we want to make fake nodes. + GetNodeLister() corelisters.NodeLister + // Exposed for testing + GetClient() clientset.Interface + // Exposed for testing + GetScheduledPodLister() corelisters.PodLister + + Create() (*Config, error) + CreateFromProvider(providerName string) (*Config, error) + CreateFromConfig(policy schedulerapi.Policy) (*Config, error) + CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*Config, error) +} + // configFactory is the default implementation of the scheduler.Configurator interface. type configFactory struct { client clientset.Interface // queue for pods that need scheduling - podQueue core.SchedulingQueue + podQueue internalqueue.SchedulingQueue // a means to list all known scheduled pods. scheduledPodLister corelisters.PodLister // a means to list all known scheduled pods and pods assumed to have been scheduled. @@ -107,11 +204,11 @@ type configFactory struct { storageClassLister storagelisters.StorageClassLister // Close this to stop all reflectors - StopEverything chan struct{} + StopEverything <-chan struct{} scheduledPodsHasSynced cache.InformerSynced - schedulerCache schedulercache.Cache + schedulerCache schedulerinternalcache.Cache // SchedulerName of a scheduler is used to select which pods will be // processed by this scheduler, based on pods's "spec.schedulerName". @@ -131,7 +228,7 @@ type configFactory struct { // Handles volume binding decisions volumeBinder *volumebinder.VolumeBinder - // always check all predicates even if the middle of one predicate fails. + // Always check all predicates even if the middle of one predicate fails. alwaysCheckAllPredicates bool // Disable pod preemption or not. @@ -160,13 +257,17 @@ type ConfigFactoryArgs struct { DisablePreemption bool PercentageOfNodesToScore int32 BindTimeoutSeconds int64 + StopCh <-chan struct{} } -// NewConfigFactory initializes the default implementation of a Configurator To encourage eventual privatization of the struct type, we only +// NewConfigFactory initializes the default implementation of a Configurator. To encourage eventual privatization of the struct type, we only // return the interface. -func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { - stopEverything := make(chan struct{}) - schedulerCache := schedulercache.New(30*time.Second, stopEverything) +func NewConfigFactory(args *ConfigFactoryArgs) Configurator { + stopEverything := args.StopCh + if stopEverything == nil { + stopEverything = wait.NeverStop + } + schedulerCache := schedulerinternalcache.New(30*time.Second, stopEverything) // storageClassInformer is only enabled through VolumeScheduling feature gate var storageClassLister storagelisters.StorageClassLister @@ -176,7 +277,8 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { c := &configFactory{ client: args.Client, podLister: schedulerCache, - podQueue: core.NewSchedulingQueue(), + podQueue: internalqueue.NewSchedulingQueue(), + nodeLister: args.NodeInformer.Lister(), pVLister: args.PvInformer.Lister(), pVCLister: args.PvcInformer.Lister(), serviceLister: args.ServiceInformer.Lister(), @@ -201,10 +303,10 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { FilterFunc: func(obj interface{}) bool { switch t := obj.(type) { case *v1.Pod: - return assignedNonTerminatedPod(t) + return assignedPod(t) case cache.DeletedFinalStateUnknown: if pod, ok := t.Obj.(*v1.Pod); ok { - return assignedNonTerminatedPod(pod) + return assignedPod(pod) } runtime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, c)) return false @@ -226,10 +328,10 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { FilterFunc: func(obj interface{}) bool { switch t := obj.(type) { case *v1.Pod: - return unassignedNonTerminatedPod(t) && responsibleForPod(t, args.SchedulerName) + return !assignedPod(t) && responsibleForPod(t, args.SchedulerName) case cache.DeletedFinalStateUnknown: if pod, ok := t.Obj.(*v1.Pod); ok { - return unassignedNonTerminatedPod(pod) && responsibleForPod(pod, args.SchedulerName) + return !assignedPod(pod) && responsibleForPod(pod, args.SchedulerName) } runtime.HandleError(fmt.Errorf("unable to convert object %T to *v1.Pod in %T", obj, c)) return false @@ -256,16 +358,6 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { DeleteFunc: c.deleteNodeFromCache, }, ) - c.nodeLister = args.NodeInformer.Lister() - - args.PdbInformer.Informer().AddEventHandler( - cache.ResourceEventHandlerFuncs{ - AddFunc: c.addPDBToCache, - UpdateFunc: c.updatePDBInCache, - DeleteFunc: c.deletePDBFromCache, - }, - ) - c.pdbLister = args.PdbInformer.Lister() // On add and delete of PVs, it will affect equivalence cache items // related to persistent volume @@ -277,7 +369,6 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { DeleteFunc: c.onPvDelete, }, ) - c.pVLister = args.PvInformer.Lister() // This is for MaxPDVolumeCountPredicate: add/delete PVC will affect counts of PV when it is bound. args.PvcInformer.Informer().AddEventHandler( @@ -287,7 +378,6 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { DeleteFunc: c.onPvcDelete, }, ) - c.pVCLister = args.PvcInformer.Lister() // This is for ServiceAffinity: affected by the selector of the service is updated. // Also, if new service is added, equivalence cache will also become invalid since @@ -299,7 +389,6 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { DeleteFunc: c.onServiceDelete, }, ) - c.serviceLister = args.ServiceInformer.Lister() // Existing equivalence cache should not be affected by add/delete RC/Deployment etc, // it only make sense when pod is scheduled or deleted @@ -317,13 +406,12 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { } // Setup cache comparer - comparer := &cacheComparer{ - podLister: args.PodInformer.Lister(), - nodeLister: args.NodeInformer.Lister(), - pdbLister: args.PdbInformer.Lister(), - cache: c.schedulerCache, - podQueue: c.podQueue, - } + debugger := cachedebugger.New( + args.NodeInformer.Lister(), + args.PodInformer.Lister(), + c.schedulerCache, + c.podQueue, + ) ch := make(chan os.Signal, 1) signal.Notify(ch, compareSignal) @@ -332,9 +420,11 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { for { select { case <-c.StopEverything: + c.podQueue.Close() return case <-ch: - comparer.Compare() + debugger.Comparer.Compare() + debugger.Dumper.DumpAll() } } }() @@ -385,7 +475,7 @@ func (c *configFactory) skipPodUpdate(pod *v1.Pod) bool { if !reflect.DeepEqual(assumedPodCopy, podCopy) { return false } - glog.V(3).Infof("Skipping pod %s/%s update", pod.Namespace, pod.Name) + klog.V(3).Infof("Skipping pod %s/%s update", pod.Namespace, pod.Name) return true } @@ -393,7 +483,7 @@ func (c *configFactory) onPvAdd(obj interface{}) { if c.enableEquivalenceClassCache { pv, ok := obj.(*v1.PersistentVolume) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolume: %v", obj) + klog.Errorf("cannot convert to *v1.PersistentVolume: %v", obj) return } c.invalidatePredicatesForPv(pv) @@ -411,12 +501,12 @@ func (c *configFactory) onPvUpdate(old, new interface{}) { if c.enableEquivalenceClassCache { newPV, ok := new.(*v1.PersistentVolume) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolume: %v", new) + klog.Errorf("cannot convert to *v1.PersistentVolume: %v", new) return } oldPV, ok := old.(*v1.PersistentVolume) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolume: %v", old) + klog.Errorf("cannot convert to *v1.PersistentVolume: %v", old) return } c.invalidatePredicatesForPvUpdate(oldPV, newPV) @@ -462,11 +552,11 @@ func (c *configFactory) onPvDelete(obj interface{}) { var ok bool pv, ok = t.Obj.(*v1.PersistentVolume) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolume: %v", t.Obj) + klog.Errorf("cannot convert to *v1.PersistentVolume: %v", t.Obj) return } default: - glog.Errorf("cannot convert to *v1.PersistentVolume: %v", t) + klog.Errorf("cannot convert to *v1.PersistentVolume: %v", t) return } c.invalidatePredicatesForPv(pv) @@ -513,7 +603,7 @@ func (c *configFactory) onPvcAdd(obj interface{}) { if c.enableEquivalenceClassCache { pvc, ok := obj.(*v1.PersistentVolumeClaim) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", obj) + klog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", obj) return } c.invalidatePredicatesForPvc(pvc) @@ -529,12 +619,12 @@ func (c *configFactory) onPvcUpdate(old, new interface{}) { if c.enableEquivalenceClassCache { newPVC, ok := new.(*v1.PersistentVolumeClaim) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", new) + klog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", new) return } oldPVC, ok := old.(*v1.PersistentVolumeClaim) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", old) + klog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", old) return } c.invalidatePredicatesForPvcUpdate(oldPVC, newPVC) @@ -552,11 +642,11 @@ func (c *configFactory) onPvcDelete(obj interface{}) { var ok bool pvc, ok = t.Obj.(*v1.PersistentVolumeClaim) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", t.Obj) + klog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", t.Obj) return } default: - glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", t) + klog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", t) return } c.invalidatePredicatesForPvc(pvc) @@ -605,7 +695,7 @@ func (c *configFactory) invalidatePredicatesForPvcUpdate(old, new *v1.Persistent func (c *configFactory) onStorageClassAdd(obj interface{}) { sc, ok := obj.(*storagev1.StorageClass) if !ok { - glog.Errorf("cannot convert to *storagev1.StorageClass: %v", obj) + klog.Errorf("cannot convert to *storagev1.StorageClass: %v", obj) return } @@ -630,11 +720,11 @@ func (c *configFactory) onStorageClassDelete(obj interface{}) { var ok bool sc, ok = t.Obj.(*storagev1.StorageClass) if !ok { - glog.Errorf("cannot convert to *storagev1.StorageClass: %v", t.Obj) + klog.Errorf("cannot convert to *storagev1.StorageClass: %v", t.Obj) return } default: - glog.Errorf("cannot convert to *storagev1.StorageClass: %v", t) + klog.Errorf("cannot convert to *storagev1.StorageClass: %v", t) return } c.invalidatePredicatesForStorageClass(sc) @@ -707,12 +797,12 @@ func (c *configFactory) GetScheduledPodLister() corelisters.PodLister { func (c *configFactory) addPodToCache(obj interface{}) { pod, ok := obj.(*v1.Pod) if !ok { - glog.Errorf("cannot convert to *v1.Pod: %v", obj) + klog.Errorf("cannot convert to *v1.Pod: %v", obj) return } if err := c.schedulerCache.AddPod(pod); err != nil { - glog.Errorf("scheduler cache AddPod failed: %v", err) + klog.Errorf("scheduler cache AddPod failed: %v", err) } c.podQueue.AssignedPodAdded(pod) @@ -724,20 +814,22 @@ func (c *configFactory) addPodToCache(obj interface{}) { func (c *configFactory) updatePodInCache(oldObj, newObj interface{}) { oldPod, ok := oldObj.(*v1.Pod) if !ok { - glog.Errorf("cannot convert oldObj to *v1.Pod: %v", oldObj) + klog.Errorf("cannot convert oldObj to *v1.Pod: %v", oldObj) return } newPod, ok := newObj.(*v1.Pod) if !ok { - glog.Errorf("cannot convert newObj to *v1.Pod: %v", newObj) + klog.Errorf("cannot convert newObj to *v1.Pod: %v", newObj) return } - // NOTE: Because the scheduler uses snapshots of schedulerCache and the live - // version of equivalencePodCache, updates must be written to schedulerCache - // before invalidating equivalencePodCache. + // NOTE: Updates must be written to scheduler cache before invalidating + // equivalence cache, because we could snapshot equivalence cache after the + // invalidation and then snapshot the cache itself. If the cache is + // snapshotted before updates are written, we would update equivalence + // cache with stale information which is based on snapshot of old cache. if err := c.schedulerCache.UpdatePod(oldPod, newPod); err != nil { - glog.Errorf("scheduler cache UpdatePod failed: %v", err) + klog.Errorf("scheduler cache UpdatePod failed: %v", err) } c.invalidateCachedPredicatesOnUpdatePod(newPod, oldPod) @@ -815,18 +907,20 @@ func (c *configFactory) deletePodFromCache(obj interface{}) { var ok bool pod, ok = t.Obj.(*v1.Pod) if !ok { - glog.Errorf("cannot convert to *v1.Pod: %v", t.Obj) + klog.Errorf("cannot convert to *v1.Pod: %v", t.Obj) return } default: - glog.Errorf("cannot convert to *v1.Pod: %v", t) + klog.Errorf("cannot convert to *v1.Pod: %v", t) return } - // NOTE: Because the scheduler uses snapshots of schedulerCache and the live - // version of equivalencePodCache, updates must be written to schedulerCache - // before invalidating equivalencePodCache. + // NOTE: Updates must be written to scheduler cache before invalidating + // equivalence cache, because we could snapshot equivalence cache after the + // invalidation and then snapshot the cache itself. If the cache is + // snapshotted before updates are written, we would update equivalence + // cache with stale information which is based on snapshot of old cache. if err := c.schedulerCache.RemovePod(pod); err != nil { - glog.Errorf("scheduler cache RemovePod failed: %v", err) + klog.Errorf("scheduler cache RemovePod failed: %v", err) } c.invalidateCachedPredicatesOnDeletePod(pod) @@ -857,19 +951,21 @@ func (c *configFactory) invalidateCachedPredicatesOnDeletePod(pod *v1.Pod) { func (c *configFactory) addNodeToCache(obj interface{}) { node, ok := obj.(*v1.Node) if !ok { - glog.Errorf("cannot convert to *v1.Node: %v", obj) + klog.Errorf("cannot convert to *v1.Node: %v", obj) return } - if err := c.schedulerCache.AddNode(node); err != nil { - glog.Errorf("scheduler cache AddNode failed: %v", err) - } - + // NOTE: Because the scheduler uses equivalence cache for nodes, we need + // to create it before adding node into scheduler cache. if c.enableEquivalenceClassCache { // GetNodeCache() will lazily create NodeCache for given node if it does not exist. c.equivalencePodCache.GetNodeCache(node.GetName()) } + if err := c.schedulerCache.AddNode(node); err != nil { + klog.Errorf("scheduler cache AddNode failed: %v", err) + } + c.podQueue.MoveAllToActiveQueue() // NOTE: add a new node does not affect existing predicates in equivalence cache } @@ -877,20 +973,22 @@ func (c *configFactory) addNodeToCache(obj interface{}) { func (c *configFactory) updateNodeInCache(oldObj, newObj interface{}) { oldNode, ok := oldObj.(*v1.Node) if !ok { - glog.Errorf("cannot convert oldObj to *v1.Node: %v", oldObj) + klog.Errorf("cannot convert oldObj to *v1.Node: %v", oldObj) return } newNode, ok := newObj.(*v1.Node) if !ok { - glog.Errorf("cannot convert newObj to *v1.Node: %v", newObj) + klog.Errorf("cannot convert newObj to *v1.Node: %v", newObj) return } - // NOTE: Because the scheduler uses snapshots of schedulerCache and the live - // version of equivalencePodCache, updates must be written to schedulerCache - // before invalidating equivalencePodCache. + // NOTE: Updates must be written to scheduler cache before invalidating + // equivalence cache, because we could snapshot equivalence cache after the + // invalidation and then snapshot the cache itself. If the cache is + // snapshotted before updates are written, we would update equivalence + // cache with stale information which is based on snapshot of old cache. if err := c.schedulerCache.UpdateNode(oldNode, newNode); err != nil { - glog.Errorf("scheduler cache UpdateNode failed: %v", err) + klog.Errorf("scheduler cache UpdateNode failed: %v", err) } c.invalidateCachedPredicatesOnNodeUpdate(newNode, oldNode) @@ -924,11 +1022,11 @@ func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node, oldTaints, oldErr := helper.GetTaintsFromNodeAnnotations(oldNode.GetAnnotations()) if oldErr != nil { - glog.Errorf("Failed to get taints from old node annotation for equivalence cache") + klog.Errorf("Failed to get taints from old node annotation for equivalence cache") } newTaints, newErr := helper.GetTaintsFromNodeAnnotations(newNode.GetAnnotations()) if newErr != nil { - glog.Errorf("Failed to get taints from new node annotation for equivalence cache") + klog.Errorf("Failed to get taints from new node annotation for equivalence cache") } if !reflect.DeepEqual(oldTaints, newTaints) || !reflect.DeepEqual(oldNode.Spec.Taints, newNode.Spec.Taints) { @@ -975,93 +1073,44 @@ func (c *configFactory) deleteNodeFromCache(obj interface{}) { var ok bool node, ok = t.Obj.(*v1.Node) if !ok { - glog.Errorf("cannot convert to *v1.Node: %v", t.Obj) + klog.Errorf("cannot convert to *v1.Node: %v", t.Obj) return } default: - glog.Errorf("cannot convert to *v1.Node: %v", t) + klog.Errorf("cannot convert to *v1.Node: %v", t) return } - // NOTE: Because the scheduler uses snapshots of schedulerCache and the live - // version of equivalencePodCache, updates must be written to schedulerCache - // before invalidating equivalencePodCache. + // NOTE: Updates must be written to scheduler cache before invalidating + // equivalence cache, because we could snapshot equivalence cache after the + // invalidation and then snapshot the cache itself. If the cache is + // snapshotted before updates are written, we would update equivalence + // cache with stale information which is based on snapshot of old cache. if err := c.schedulerCache.RemoveNode(node); err != nil { - glog.Errorf("scheduler cache RemoveNode failed: %v", err) + klog.Errorf("scheduler cache RemoveNode failed: %v", err) } if c.enableEquivalenceClassCache { c.equivalencePodCache.InvalidateAllPredicatesOnNode(node.GetName()) } } -func (c *configFactory) addPDBToCache(obj interface{}) { - pdb, ok := obj.(*v1beta1.PodDisruptionBudget) - if !ok { - glog.Errorf("cannot convert to *v1beta1.PodDisruptionBudget: %v", obj) - return - } - - if err := c.schedulerCache.AddPDB(pdb); err != nil { - glog.Errorf("scheduler cache AddPDB failed: %v", err) - } -} - -func (c *configFactory) updatePDBInCache(oldObj, newObj interface{}) { - oldPDB, ok := oldObj.(*v1beta1.PodDisruptionBudget) - if !ok { - glog.Errorf("cannot convert oldObj to *v1beta1.PodDisruptionBudget: %v", oldObj) - return - } - newPDB, ok := newObj.(*v1beta1.PodDisruptionBudget) - if !ok { - glog.Errorf("cannot convert newObj to *v1beta1.PodDisruptionBudget: %v", newObj) - return - } - - if err := c.schedulerCache.UpdatePDB(oldPDB, newPDB); err != nil { - glog.Errorf("scheduler cache UpdatePDB failed: %v", err) - } -} - -func (c *configFactory) deletePDBFromCache(obj interface{}) { - var pdb *v1beta1.PodDisruptionBudget - switch t := obj.(type) { - case *v1beta1.PodDisruptionBudget: - pdb = t - case cache.DeletedFinalStateUnknown: - var ok bool - pdb, ok = t.Obj.(*v1beta1.PodDisruptionBudget) - if !ok { - glog.Errorf("cannot convert to *v1beta1.PodDisruptionBudget: %v", t.Obj) - return - } - default: - glog.Errorf("cannot convert to *v1beta1.PodDisruptionBudget: %v", t) - return - } - if err := c.schedulerCache.RemovePDB(pdb); err != nil { - glog.Errorf("scheduler cache RemovePDB failed: %v", err) - } -} - // Create creates a scheduler with the default algorithm provider. -func (c *configFactory) Create() (*scheduler.Config, error) { +func (c *configFactory) Create() (*Config, error) { return c.CreateFromProvider(DefaultProvider) } // Creates a scheduler from the name of a registered algorithm provider. -func (c *configFactory) CreateFromProvider(providerName string) (*scheduler.Config, error) { - glog.V(2).Infof("Creating scheduler from algorithm provider '%v'", providerName) +func (c *configFactory) CreateFromProvider(providerName string) (*Config, error) { + klog.V(2).Infof("Creating scheduler from algorithm provider '%v'", providerName) provider, err := GetAlgorithmProvider(providerName) if err != nil { return nil, err } - return c.CreateFromKeys(provider.FitPredicateKeys, provider.PriorityFunctionKeys, []algorithm.SchedulerExtender{}) } // Creates a scheduler from the configuration file -func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler.Config, error) { - glog.V(2).Infof("Creating scheduler from configuration: %v", policy) +func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*Config, error) { + klog.V(2).Infof("Creating scheduler from configuration: %v", policy) // validate the policy configuration if err := validation.ValidatePolicy(policy); err != nil { @@ -1070,7 +1119,7 @@ func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler predicateKeys := sets.NewString() if policy.Predicates == nil { - glog.V(2).Infof("Using predicates from algorithm provider '%v'", DefaultProvider) + klog.V(2).Infof("Using predicates from algorithm provider '%v'", DefaultProvider) provider, err := GetAlgorithmProvider(DefaultProvider) if err != nil { return nil, err @@ -1078,14 +1127,14 @@ func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler predicateKeys = provider.FitPredicateKeys } else { for _, predicate := range policy.Predicates { - glog.V(2).Infof("Registering predicate: %s", predicate.Name) + klog.V(2).Infof("Registering predicate: %s", predicate.Name) predicateKeys.Insert(RegisterCustomFitPredicate(predicate)) } } priorityKeys := sets.NewString() if policy.Priorities == nil { - glog.V(2).Infof("Using priorities from algorithm provider '%v'", DefaultProvider) + klog.V(2).Infof("Using priorities from algorithm provider '%v'", DefaultProvider) provider, err := GetAlgorithmProvider(DefaultProvider) if err != nil { return nil, err @@ -1093,7 +1142,7 @@ func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler priorityKeys = provider.PriorityFunctionKeys } else { for _, priority := range policy.Priorities { - glog.V(2).Infof("Registering priority: %s", priority.Name) + klog.V(2).Infof("Registering priority: %s", priority.Name) priorityKeys.Insert(RegisterCustomPriorityFunction(priority)) } } @@ -1102,7 +1151,7 @@ func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler if len(policy.ExtenderConfigs) != 0 { ignoredExtendedResources := sets.NewString() for ii := range policy.ExtenderConfigs { - glog.V(2).Infof("Creating extender with config %+v", policy.ExtenderConfigs[ii]) + klog.V(2).Infof("Creating extender with config %+v", policy.ExtenderConfigs[ii]) extender, err := core.NewHTTPExtender(&policy.ExtenderConfigs[ii]) if err != nil { return nil, err @@ -1131,7 +1180,7 @@ func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler } // getBinderFunc returns an func which returns an extender that supports bind or a default binder based on the given pod. -func (c *configFactory) getBinderFunc(extenders []algorithm.SchedulerExtender) func(pod *v1.Pod) scheduler.Binder { +func (c *configFactory) getBinderFunc(extenders []algorithm.SchedulerExtender) func(pod *v1.Pod) Binder { var extenderBinder algorithm.SchedulerExtender for i := range extenders { if extenders[i].IsBinder() { @@ -1140,7 +1189,7 @@ func (c *configFactory) getBinderFunc(extenders []algorithm.SchedulerExtender) f } } defaultBinder := &binder{c.client} - return func(pod *v1.Pod) scheduler.Binder { + return func(pod *v1.Pod) Binder { if extenderBinder != nil && extenderBinder.IsInterested(pod) { return extenderBinder } @@ -1149,8 +1198,8 @@ func (c *configFactory) getBinderFunc(extenders []algorithm.SchedulerExtender) f } // Creates a scheduler from a set of registered fit predicate keys and priority keys. -func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*scheduler.Config, error) { - glog.V(2).Infof("Creating scheduler with fit predicates '%v' and priority functions '%v'", predicateKeys, priorityKeys) +func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*Config, error) { + klog.V(2).Infof("Creating scheduler with fit predicates '%v' and priority functions '%v'", predicateKeys, priorityKeys) if c.GetHardPodAffinitySymmetricWeight() < 1 || c.GetHardPodAffinitySymmetricWeight() > 100 { return nil, fmt.Errorf("invalid hardPodAffinitySymmetricWeight: %d, must be in the range 1-100", c.GetHardPodAffinitySymmetricWeight()) @@ -1178,8 +1227,8 @@ func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, // Init equivalence class cache if c.enableEquivalenceClassCache { - c.equivalencePodCache = equivalence.NewCache() - glog.Info("Created equivalence class cache") + c.equivalencePodCache = equivalence.NewCache(predicates.Ordering()) + klog.Info("Created equivalence class cache") } algo := core.NewGenericScheduler( @@ -1193,13 +1242,14 @@ func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders, c.volumeBinder, c.pVCLister, + c.pdbLister, c.alwaysCheckAllPredicates, c.disablePreemption, c.percentageOfNodesToScore, ) podBackoff := util.CreateDefaultPodBackoff() - return &scheduler.Config{ + return &Config{ SchedulerCache: c.schedulerCache, Ecache: c.equivalencePodCache, // The scheduler only needs to consider schedulable nodes. @@ -1214,9 +1264,10 @@ func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, NextPod: func() *v1.Pod { return c.getNextPod() }, - Error: c.MakeDefaultErrorFunc(podBackoff, c.podQueue), - StopEverything: c.StopEverything, - VolumeBinder: c.volumeBinder, + Error: c.MakeDefaultErrorFunc(podBackoff, c.podQueue), + StopEverything: c.StopEverything, + VolumeBinder: c.volumeBinder, + SchedulingQueue: c.podQueue, }, nil } @@ -1271,6 +1322,7 @@ func (c *configFactory) getPluginArgs() (*PluginFactoryArgs, error) { ReplicaSetLister: c.replicaSetLister, StatefulSetLister: c.statefulSetLister, NodeLister: &nodeLister{c.nodeLister}, + PDBLister: c.pdbLister, NodeInfo: &predicates.CachedNodeInfo{NodeLister: c.nodeLister}, PVInfo: &predicates.CachedPersistentVolumeInfo{PersistentVolumeLister: c.pVLister}, PVCInfo: &predicates.CachedPersistentVolumeClaimInfo{PersistentVolumeClaimLister: c.pVCLister}, @@ -1283,33 +1335,16 @@ func (c *configFactory) getPluginArgs() (*PluginFactoryArgs, error) { func (c *configFactory) getNextPod() *v1.Pod { pod, err := c.podQueue.Pop() if err == nil { - glog.V(4).Infof("About to try and schedule pod %v/%v", pod.Namespace, pod.Name) + klog.V(4).Infof("About to try and schedule pod %v/%v", pod.Namespace, pod.Name) return pod } - glog.Errorf("Error while retrieving next pod from scheduling queue: %v", err) + klog.Errorf("Error while retrieving next pod from scheduling queue: %v", err) return nil } -// unassignedNonTerminatedPod selects pods that are unassigned and non-terminal. -func unassignedNonTerminatedPod(pod *v1.Pod) bool { - if len(pod.Spec.NodeName) != 0 { - return false - } - if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed { - return false - } - return true -} - -// assignedNonTerminatedPod selects pods that are assigned and non-terminal (scheduled and running). -func assignedNonTerminatedPod(pod *v1.Pod) bool { - if len(pod.Spec.NodeName) == 0 { - return false - } - if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed { - return false - } - return true +// assignedPod selects pods that are assigned (scheduled and running). +func assignedPod(pod *v1.Pod) bool { + return len(pod.Spec.NodeName) != 0 } // responsibleForPod returns true if the pod has asked to be scheduled by the given scheduler. @@ -1399,13 +1434,13 @@ func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) core } } -func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue core.SchedulingQueue) func(pod *v1.Pod, err error) { +func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue internalqueue.SchedulingQueue) func(pod *v1.Pod, err error) { return func(pod *v1.Pod, err error) { if err == core.ErrNoNodesAvailable { - glog.V(4).Infof("Unable to schedule %v/%v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name) + klog.V(4).Infof("Unable to schedule %v/%v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name) } else { if _, ok := err.(*core.FitError); ok { - glog.V(4).Infof("Unable to schedule %v/%v: no fit: %v; waiting", pod.Namespace, pod.Name, err) + klog.V(4).Infof("Unable to schedule %v/%v: no fit: %v; waiting", pod.Namespace, pod.Name, err) } else if errors.IsNotFound(err) { if errStatus, ok := err.(errors.APIStatus); ok && errStatus.Status().Details.Kind == "node" { nodeName := errStatus.Status().Details.Name @@ -1414,9 +1449,11 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue _, err := c.client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} - // NOTE: Because the scheduler uses snapshots of schedulerCache and the live - // version of equivalencePodCache, updates must be written to schedulerCache - // before invalidating equivalencePodCache. + // NOTE: Updates must be written to scheduler cache before invalidating + // equivalence cache, because we could snapshot equivalence cache after the + // invalidation and then snapshot the cache itself. If the cache is + // snapshotted before updates are written, we would update equivalence + // cache with stale information which is based on snapshot of old cache. c.schedulerCache.RemoveNode(&node) // invalidate cached predicate for the node if c.enableEquivalenceClassCache { @@ -1425,7 +1462,7 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue } } } else { - glog.Errorf("Error scheduling %v/%v: %v; retrying", pod.Namespace, pod.Name, err) + klog.Errorf("Error scheduling %v/%v: %v; retrying", pod.Namespace, pod.Name, err) } } @@ -1447,7 +1484,7 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue if !util.PodPriorityEnabled() { entry := backoff.GetEntry(podID) if !entry.TryWait(backoff.MaxDuration()) { - glog.Warningf("Request for pod %v already in flight, abandoning", podID) + klog.Warningf("Request for pod %v already in flight, abandoning", podID) return } } @@ -1467,7 +1504,7 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue break } if errors.IsNotFound(err) { - glog.Warningf("A pod %v no longer exists", podID) + klog.Warningf("A pod %v no longer exists", podID) if c.volumeBinder != nil { // Volume binder only wants to keep unassigned pods @@ -1475,7 +1512,7 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue } return } - glog.Errorf("Error getting pod %v for retry: %v; retrying...", podID, err) + klog.Errorf("Error getting pod %v for retry: %v; retrying...", podID, err) if getBackoff = getBackoff * 2; getBackoff > maximalGetBackoff { getBackoff = maximalGetBackoff } @@ -1509,7 +1546,7 @@ type binder struct { // Bind just does a POST binding RPC. func (b *binder) Bind(binding *v1.Binding) error { - glog.V(3).Infof("Attempting to bind %v to %v", binding.Name, binding.Target.Name) + klog.V(3).Infof("Attempting to bind %v to %v", binding.Name, binding.Target.Name) return b.Client.CoreV1().Pods(binding.Namespace).Bind(binding) } @@ -1518,7 +1555,7 @@ type podConditionUpdater struct { } func (p *podConditionUpdater) Update(pod *v1.Pod, condition *v1.PodCondition) error { - glog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status) + klog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status) if podutil.UpdatePodCondition(&pod.Status, condition) { _, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(pod) return err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins.go index f9a668b4a9ba..216a93f7e60a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/factory/plugins.go @@ -30,7 +30,7 @@ import ( schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/pkg/scheduler/volumebinder" - "github.com/golang/glog" + "k8s.io/klog" ) // PluginFactoryArgs are passed to all plugin factory functions. @@ -41,6 +41,7 @@ type PluginFactoryArgs struct { ReplicaSetLister algorithm.ReplicaSetLister StatefulSetLister algorithm.StatefulSetLister NodeLister algorithm.NodeLister + PDBLister algorithm.PDBLister NodeInfo predicates.NodeInfo PVInfo predicates.PersistentVolumeInfo PVCInfo predicates.PersistentVolumeClaimInfo @@ -166,6 +167,17 @@ func InsertPredicateKeyToAlgorithmProviderMap(key string) { return } +// InsertPriorityKeyToAlgorithmProviderMap inserts a priority function to all algorithmProviders which are in algorithmProviderMap. +func InsertPriorityKeyToAlgorithmProviderMap(key string) { + schedulerFactoryMutex.Lock() + defer schedulerFactoryMutex.Unlock() + + for _, provider := range algorithmProviderMap { + provider.PriorityFunctionKeys.Insert(key) + } + return +} + // RegisterMandatoryFitPredicate registers a fit predicate with the algorithm registry, the predicate is used by // kubelet, DaemonSet; it is always included in configuration. Returns the name with which the predicate was // registered. @@ -221,12 +233,12 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string { } } else if predicateFactory, ok = fitPredicateMap[policy.Name]; ok { // checking to see if a pre-defined predicate is requested - glog.V(2).Infof("Predicate type %s already registered, reusing.", policy.Name) + klog.V(2).Infof("Predicate type %s already registered, reusing.", policy.Name) return policy.Name } if predicateFactory == nil { - glog.Fatalf("Invalid configuration: Predicate type not found for %s", policy.Name) + klog.Fatalf("Invalid configuration: Predicate type not found for %s", policy.Name) } return RegisterFitPredicateFactory(policy.Name, predicateFactory) @@ -333,7 +345,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string { } } } else if existingPcf, ok := priorityFunctionMap[policy.Name]; ok { - glog.V(2).Infof("Priority type %s already registered, reusing.", policy.Name) + klog.V(2).Infof("Priority type %s already registered, reusing.", policy.Name) // set/update the weight based on the policy pcf = &PriorityConfigFactory{ Function: existingPcf.Function, @@ -343,7 +355,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string { } if pcf == nil { - glog.Fatalf("Invalid configuration: Priority type not found for %s", policy.Name) + klog.Fatalf("Invalid configuration: Priority type not found for %s", policy.Name) } return RegisterPriorityConfigFactory(policy.Name, *pcf) @@ -357,7 +369,7 @@ func buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(arguments *s } shape, err := priorities.NewFunctionShape(points) if err != nil { - glog.Fatalf("invalid RequestedToCapacityRatioPriority arguments: %s", err.Error()) + klog.Fatalf("invalid RequestedToCapacityRatioPriority arguments: %s", err.Error()) } return shape } @@ -488,7 +500,7 @@ var validName = regexp.MustCompile("^[a-zA-Z0-9]([-a-zA-Z0-9]*[a-zA-Z0-9])$") func validateAlgorithmNameOrDie(name string) { if !validName.MatchString(name) { - glog.Fatalf("Algorithm name %v does not match the name validation regexp \"%v\".", name, validName) + klog.Fatalf("Algorithm name %v does not match the name validation regexp \"%v\".", name, validName) } } @@ -502,7 +514,7 @@ func validatePredicateOrDie(predicate schedulerapi.PredicatePolicy) { numArgs++ } if numArgs != 1 { - glog.Fatalf("Exactly 1 predicate argument is required, numArgs: %v, Predicate: %s", numArgs, predicate.Name) + klog.Fatalf("Exactly 1 predicate argument is required, numArgs: %v, Predicate: %s", numArgs, predicate.Name) } } } @@ -520,7 +532,7 @@ func validatePriorityOrDie(priority schedulerapi.PriorityPolicy) { numArgs++ } if numArgs != 1 { - glog.Fatalf("Exactly 1 priority argument is required, numArgs: %v, Priority: %s", numArgs, priority.Name) + klog.Fatalf("Exactly 1 priority argument is required, numArgs: %v, Priority: %s", numArgs, priority.Name) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/BUILD new file mode 100644 index 000000000000..61202c8c2864 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/BUILD @@ -0,0 +1,64 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "cache.go", + "interface.go", + "node_tree.go", + ], + importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache", + visibility = ["//visibility:public"], + deps = [ + "//pkg/features:go_default_library", + "//pkg/scheduler/cache:go_default_library", + "//pkg/util/node:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "cache_test.go", + "main_test.go", + "node_tree_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/features:go_default_library", + "//pkg/kubelet/apis:go_default_library", + "//pkg/scheduler/algorithm/priorities/util:go_default_library", + "//pkg/scheduler/cache:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/scheduler/internal/cache/debugger:all-srcs", + "//pkg/scheduler/internal/cache/fake:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/cache.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go similarity index 81% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/cache.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go index 5fc398049220..535236e5c1fb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/cache.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go @@ -27,9 +27,9 @@ import ( "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" + schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - "github.com/golang/glog" - policy "k8s.io/api/policy/v1beta1" + "k8s.io/klog" ) var ( @@ -58,9 +58,8 @@ type schedulerCache struct { assumedPods map[string]bool // a map from pod key to podState. podStates map[string]*podState - nodes map[string]*NodeInfo + nodes map[string]*schedulercache.NodeInfo nodeTree *NodeTree - pdbs map[string]*policy.PodDisruptionBudget // A map from image name to its imageState. imageStates map[string]*imageState } @@ -80,17 +79,9 @@ type imageState struct { nodes sets.String } -// ImageStateSummary provides summarized information about the state of an image. -type ImageStateSummary struct { - // Size of the image - Size int64 - // Used to track how many nodes have this image - NumNodes int -} - // createImageStateSummary returns a summarizing snapshot of the given image's state. -func (cache *schedulerCache) createImageStateSummary(state *imageState) *ImageStateSummary { - return &ImageStateSummary{ +func (cache *schedulerCache) createImageStateSummary(state *imageState) *schedulercache.ImageStateSummary { + return &schedulercache.ImageStateSummary{ Size: state.size, NumNodes: len(state.nodes), } @@ -102,22 +93,21 @@ func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedul period: period, stop: stop, - nodes: make(map[string]*NodeInfo), + nodes: make(map[string]*schedulercache.NodeInfo), nodeTree: newNodeTree(nil), assumedPods: make(map[string]bool), podStates: make(map[string]*podState), - pdbs: make(map[string]*policy.PodDisruptionBudget), imageStates: make(map[string]*imageState), } } -// Snapshot takes a snapshot of the current schedulerCache. The method has performance impact, +// Snapshot takes a snapshot of the current schedulerinternalcache. The method has performance impact, // and should be only used in non-critical path. func (cache *schedulerCache) Snapshot() *Snapshot { cache.mu.RLock() defer cache.mu.RUnlock() - nodes := make(map[string]*NodeInfo) + nodes := make(map[string]*schedulercache.NodeInfo) for k, v := range cache.nodes { nodes[k] = v.Clone() } @@ -127,28 +117,22 @@ func (cache *schedulerCache) Snapshot() *Snapshot { assumedPods[k] = v } - pdbs := make(map[string]*policy.PodDisruptionBudget) - for k, v := range cache.pdbs { - pdbs[k] = v.DeepCopy() - } - return &Snapshot{ Nodes: nodes, AssumedPods: assumedPods, - Pdbs: pdbs, } } -func (cache *schedulerCache) UpdateNodeNameToInfoMap(nodeNameToInfo map[string]*NodeInfo) error { +func (cache *schedulerCache) UpdateNodeNameToInfoMap(nodeNameToInfo map[string]*schedulercache.NodeInfo) error { cache.mu.Lock() defer cache.mu.Unlock() for name, info := range cache.nodes { if utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && info.TransientInfo != nil { // Transient scheduler info is reset here. - info.TransientInfo.resetTransientSchedulerInfo() + info.TransientInfo.ResetTransientSchedulerInfo() } - if current, ok := nodeNameToInfo[name]; !ok || current.generation != info.generation { + if current, ok := nodeNameToInfo[name]; !ok || current.GetGeneration() != info.GetGeneration() { nodeNameToInfo[name] = info.Clone() } } @@ -173,11 +157,11 @@ func (cache *schedulerCache) FilteredList(podFilter PodFilter, selector labels.S // pre-allocating capacity. maxSize := 0 for _, info := range cache.nodes { - maxSize += len(info.pods) + maxSize += len(info.Pods()) } pods := make([]*v1.Pod, 0, maxSize) for _, info := range cache.nodes { - for _, pod := range info.pods { + for _, pod := range info.Pods() { if podFilter(pod) && selector.Matches(labels.Set(pod.Labels)) { pods = append(pods, pod) } @@ -187,7 +171,7 @@ func (cache *schedulerCache) FilteredList(podFilter PodFilter, selector labels.S } func (cache *schedulerCache) AssumePod(pod *v1.Pod) error { - key, err := getPodKey(pod) + key, err := schedulercache.GetPodKey(pod) if err != nil { return err } @@ -213,7 +197,7 @@ func (cache *schedulerCache) FinishBinding(pod *v1.Pod) error { // finishBinding exists to make tests determinitistic by injecting now as an argument func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error { - key, err := getPodKey(pod) + key, err := schedulercache.GetPodKey(pod) if err != nil { return err } @@ -221,7 +205,7 @@ func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error { cache.mu.RLock() defer cache.mu.RUnlock() - glog.V(5).Infof("Finished binding for pod %v. Can be expired.", key) + klog.V(5).Infof("Finished binding for pod %v. Can be expired.", key) currState, ok := cache.podStates[key] if ok && cache.assumedPods[key] { dl := now.Add(cache.ttl) @@ -232,7 +216,7 @@ func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error { } func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error { - key, err := getPodKey(pod) + key, err := schedulercache.GetPodKey(pod) if err != nil { return err } @@ -264,7 +248,7 @@ func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error { func (cache *schedulerCache) addPod(pod *v1.Pod) { n, ok := cache.nodes[pod.Spec.NodeName] if !ok { - n = NewNodeInfo() + n = schedulercache.NewNodeInfo() cache.nodes[pod.Spec.NodeName] = n } n.AddPod(pod) @@ -285,14 +269,14 @@ func (cache *schedulerCache) removePod(pod *v1.Pod) error { if err := n.RemovePod(pod); err != nil { return err } - if len(n.pods) == 0 && n.node == nil { + if len(n.Pods()) == 0 && n.Node() == nil { delete(cache.nodes, pod.Spec.NodeName) } return nil } func (cache *schedulerCache) AddPod(pod *v1.Pod) error { - key, err := getPodKey(pod) + key, err := schedulercache.GetPodKey(pod) if err != nil { return err } @@ -305,7 +289,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error { case ok && cache.assumedPods[key]: if currState.pod.Spec.NodeName != pod.Spec.NodeName { // The pod was added to a different node than it was assumed to. - glog.Warningf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) + klog.Warningf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) // Clean this up. cache.removePod(currState.pod) cache.addPod(pod) @@ -327,7 +311,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error { } func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error { - key, err := getPodKey(oldPod) + key, err := schedulercache.GetPodKey(oldPod) if err != nil { return err } @@ -341,8 +325,8 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error { // before Update event, in which case the state would change from Assumed to Added. case ok && !cache.assumedPods[key]: if currState.pod.Spec.NodeName != newPod.Spec.NodeName { - glog.Errorf("Pod %v updated on a different node than previously added to.", key) - glog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions") + klog.Errorf("Pod %v updated on a different node than previously added to.", key) + klog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions") } if err := cache.updatePod(oldPod, newPod); err != nil { return err @@ -355,7 +339,7 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error { } func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { - key, err := getPodKey(pod) + key, err := schedulercache.GetPodKey(pod) if err != nil { return err } @@ -369,8 +353,8 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { // before Remove event, in which case the state would change from Assumed to Added. case ok && !cache.assumedPods[key]: if currState.pod.Spec.NodeName != pod.Spec.NodeName { - glog.Errorf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) - glog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions") + klog.Errorf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) + klog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions") } err := cache.removePod(currState.pod) if err != nil { @@ -384,7 +368,7 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { } func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) { - key, err := getPodKey(pod) + key, err := schedulercache.GetPodKey(pod) if err != nil { return false, err } @@ -400,7 +384,7 @@ func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) { } func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) { - key, err := getPodKey(pod) + key, err := schedulercache.GetPodKey(pod) if err != nil { return nil, err } @@ -422,10 +406,10 @@ func (cache *schedulerCache) AddNode(node *v1.Node) error { n, ok := cache.nodes[node.Name] if !ok { - n = NewNodeInfo() + n = schedulercache.NewNodeInfo() cache.nodes[node.Name] = n } else { - cache.removeNodeImageStates(n.node) + cache.removeNodeImageStates(n.Node()) } cache.nodeTree.AddNode(node) @@ -439,10 +423,10 @@ func (cache *schedulerCache) UpdateNode(oldNode, newNode *v1.Node) error { n, ok := cache.nodes[newNode.Name] if !ok { - n = NewNodeInfo() + n = schedulercache.NewNodeInfo() cache.nodes[newNode.Name] = n } else { - cache.removeNodeImageStates(n.node) + cache.removeNodeImageStates(n.Node()) } cache.nodeTree.UpdateNode(oldNode, newNode) @@ -462,7 +446,7 @@ func (cache *schedulerCache) RemoveNode(node *v1.Node) error { // We can't do it unconditionally, because notifications about pods are delivered // in a different watch, and thus can potentially be observed later, even though // they happened before node removal. - if len(n.pods) == 0 && n.node == nil { + if len(n.Pods()) == 0 && n.Node() == nil { delete(cache.nodes, node.Name) } @@ -473,8 +457,8 @@ func (cache *schedulerCache) RemoveNode(node *v1.Node) error { // addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in // scheduler cache. This function assumes the lock to scheduler cache has been acquired. -func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *NodeInfo) { - newSum := make(map[string]*ImageStateSummary) +func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *schedulercache.NodeInfo) { + newSum := make(map[string]*schedulercache.ImageStateSummary) for _, image := range node.Status.Images { for _, name := range image.Names { @@ -495,7 +479,7 @@ func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *NodeInf } } } - nodeInfo.imageStates = newSum + nodeInfo.SetImageStates(newSum) } // removeNodeImageStates removes the given node record from image entries having the node @@ -522,46 +506,6 @@ func (cache *schedulerCache) removeNodeImageStates(node *v1.Node) { } } -func (cache *schedulerCache) AddPDB(pdb *policy.PodDisruptionBudget) error { - cache.mu.Lock() - defer cache.mu.Unlock() - - // Unconditionally update cache. - cache.pdbs[string(pdb.UID)] = pdb - return nil -} - -func (cache *schedulerCache) UpdatePDB(oldPDB, newPDB *policy.PodDisruptionBudget) error { - return cache.AddPDB(newPDB) -} - -func (cache *schedulerCache) RemovePDB(pdb *policy.PodDisruptionBudget) error { - cache.mu.Lock() - defer cache.mu.Unlock() - - delete(cache.pdbs, string(pdb.UID)) - return nil -} - -func (cache *schedulerCache) ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error) { - cache.mu.RLock() - defer cache.mu.RUnlock() - var pdbs []*policy.PodDisruptionBudget - for _, pdb := range cache.pdbs { - if selector.Matches(labels.Set(pdb.Labels)) { - pdbs = append(pdbs, pdb) - } - } - return pdbs, nil -} - -func (cache *schedulerCache) IsUpToDate(n *NodeInfo) bool { - cache.mu.RLock() - defer cache.mu.RUnlock() - node, ok := cache.nodes[n.Node().Name] - return ok && n.generation == node.generation -} - func (cache *schedulerCache) run() { go wait.Until(cache.cleanupExpiredAssumedPods, cache.period, cache.stop) } @@ -582,14 +526,14 @@ func (cache *schedulerCache) cleanupAssumedPods(now time.Time) { panic("Key found in assumed set but not in podStates. Potentially a logical error.") } if !ps.bindingFinished { - glog.V(3).Infof("Couldn't expire cache for pod %v/%v. Binding is still in progress.", + klog.V(3).Infof("Couldn't expire cache for pod %v/%v. Binding is still in progress.", ps.pod.Namespace, ps.pod.Name) continue } if now.After(*ps.deadline) { - glog.Warningf("Pod %s/%s expired", ps.pod.Namespace, ps.pod.Name) + klog.Warningf("Pod %s/%s expired", ps.pod.Namespace, ps.pod.Name) if err := cache.expirePod(key, ps); err != nil { - glog.Errorf("ExpirePod failed for %s: %v", key, err) + klog.Errorf("ExpirePod failed for %s: %v", key, err) } } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/BUILD new file mode 100644 index 000000000000..320c9734fba3 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/BUILD @@ -0,0 +1,46 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "comparer.go", + "debugger.go", + "dumper.go", + ], + importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger", + visibility = ["//pkg/scheduler:__subpackages__"], + deps = [ + "//pkg/scheduler/cache:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", + "//pkg/scheduler/internal/queue:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["comparer_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/cache:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/comparer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/comparer.go new file mode 100644 index 000000000000..e78df11184a5 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/comparer.go @@ -0,0 +1,135 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debugger + +import ( + "sort" + "strings" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/klog" + schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" + schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" + internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" +) + +// CacheComparer is an implementation of the Scheduler's cache comparer. +type CacheComparer struct { + NodeLister corelisters.NodeLister + PodLister corelisters.PodLister + Cache schedulerinternalcache.Cache + PodQueue internalqueue.SchedulingQueue +} + +// Compare compares the nodes and pods of NodeLister with Cache.Snapshot. +func (c *CacheComparer) Compare() error { + klog.V(3).Info("cache comparer started") + defer klog.V(3).Info("cache comparer finished") + + nodes, err := c.NodeLister.List(labels.Everything()) + if err != nil { + return err + } + + pods, err := c.PodLister.List(labels.Everything()) + if err != nil { + return err + } + + snapshot := c.Cache.Snapshot() + + waitingPods := c.PodQueue.WaitingPods() + + if missed, redundant := c.CompareNodes(nodes, snapshot.Nodes); len(missed)+len(redundant) != 0 { + klog.Warningf("cache mismatch: missed nodes: %s; redundant nodes: %s", missed, redundant) + } + + if missed, redundant := c.ComparePods(pods, waitingPods, snapshot.Nodes); len(missed)+len(redundant) != 0 { + klog.Warningf("cache mismatch: missed pods: %s; redundant pods: %s", missed, redundant) + } + + return nil +} + +// CompareNodes compares actual nodes with cached nodes. +func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) { + actual := []string{} + for _, node := range nodes { + actual = append(actual, node.Name) + } + + cached := []string{} + for nodeName := range nodeinfos { + cached = append(cached, nodeName) + } + + return compareStrings(actual, cached) +} + +// ComparePods compares actual pods with cached pods. +func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) { + actual := []string{} + for _, pod := range pods { + actual = append(actual, string(pod.UID)) + } + + cached := []string{} + for _, nodeinfo := range nodeinfos { + for _, pod := range nodeinfo.Pods() { + cached = append(cached, string(pod.UID)) + } + } + for _, pod := range waitingPods { + cached = append(cached, string(pod.UID)) + } + + return compareStrings(actual, cached) +} + +func compareStrings(actual, cached []string) (missed, redundant []string) { + missed, redundant = []string{}, []string{} + + sort.Strings(actual) + sort.Strings(cached) + + compare := func(i, j int) int { + if i == len(actual) { + return 1 + } else if j == len(cached) { + return -1 + } + return strings.Compare(actual[i], cached[j]) + } + + for i, j := 0, 0; i < len(actual) || j < len(cached); { + switch compare(i, j) { + case 0: + i++ + j++ + case -1: + missed = append(missed, actual[i]) + i++ + case 1: + redundant = append(redundant, cached[j]) + j++ + } + } + + return +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/debugger.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/debugger.go new file mode 100644 index 000000000000..64428d5693ef --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/debugger.go @@ -0,0 +1,50 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debugger + +import ( + corelisters "k8s.io/client-go/listers/core/v1" + internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" + internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" +) + +// CacheDebugger provides ways to check and write cache information for debugging. +type CacheDebugger struct { + Comparer CacheComparer + Dumper CacheDumper +} + +// New creates a CacheDebugger. +func New( + nodeLister corelisters.NodeLister, + podLister corelisters.PodLister, + cache internalcache.Cache, + podQueue internalqueue.SchedulingQueue, +) *CacheDebugger { + return &CacheDebugger{ + Comparer: CacheComparer{ + NodeLister: nodeLister, + PodLister: podLister, + Cache: cache, + PodQueue: podQueue, + }, + Dumper: CacheDumper{ + cache: cache, + podQueue: podQueue, + }, + } +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/dumper.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/dumper.go new file mode 100644 index 000000000000..b9084d377d96 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger/dumper.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debugger + +import ( + "fmt" + "strings" + + "k8s.io/klog" + + "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/scheduler/cache" + internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" + "k8s.io/kubernetes/pkg/scheduler/internal/queue" +) + +// CacheDumper writes some information from the scheduler cache and the scheduling queue to the +// scheduler logs for debugging purposes. +type CacheDumper struct { + cache internalcache.Cache + podQueue queue.SchedulingQueue +} + +// DumpAll writes cached nodes and scheduling queue information to the scheduler logs. +func (d *CacheDumper) DumpAll() { + d.dumpNodes() + d.dumpSchedulingQueue() +} + +// dumpNodes writes NodeInfo to the scheduler logs. +func (d *CacheDumper) dumpNodes() { + snapshot := d.cache.Snapshot() + klog.Info("Dump of cached NodeInfo") + for _, nodeInfo := range snapshot.Nodes { + klog.Info(printNodeInfo(nodeInfo)) + } +} + +// dumpSchedulingQueue writes pods in the scheduling queue to the scheduler logs. +func (d *CacheDumper) dumpSchedulingQueue() { + waitingPods := d.podQueue.WaitingPods() + var podData strings.Builder + for _, p := range waitingPods { + podData.WriteString(printPod(p)) + } + klog.Infof("Dump of scheduling queue:\n%s", podData.String()) +} + +// printNodeInfo writes parts of NodeInfo to a string. +func printNodeInfo(n *cache.NodeInfo) string { + var nodeData strings.Builder + nodeData.WriteString(fmt.Sprintf("\nNode name: %+v\nRequested Resources: %+v\nAllocatable Resources:%+v\nNumber of Pods: %v\nPods:\n", + n.Node().Name, n.RequestedResource(), n.AllocatableResource(), len(n.Pods()))) + // Dumping Pod Info + for _, p := range n.Pods() { + nodeData.WriteString(printPod(p)) + } + return nodeData.String() +} + +// printPod writes parts of a Pod object to a string. +func printPod(p *v1.Pod) string { + return fmt.Sprintf("name: %v, namespace: %v, uid: %v, phase: %v, nominated node: %v\n", p.Name, p.Namespace, p.UID, p.Status.Phase, p.Status.NominatedNodeName) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go new file mode 100644 index 000000000000..878c2aa07411 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go @@ -0,0 +1,122 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" +) + +// PodFilter is a function to filter a pod. If pod passed return true else return false. +type PodFilter func(*v1.Pod) bool + +// Cache collects pods' information and provides node-level aggregated information. +// It's intended for generic scheduler to do efficient lookup. +// Cache's operations are pod centric. It does incremental updates based on pod events. +// Pod events are sent via network. We don't have guaranteed delivery of all events: +// We use Reflector to list and watch from remote. +// Reflector might be slow and do a relist, which would lead to missing events. +// +// State Machine of a pod's events in scheduler's cache: +// +// +// +-------------------------------------------+ +----+ +// | Add | | | +// | | | | Update +// + Assume Add v v | +//Initial +--------> Assumed +------------+---> Added <--+ +// ^ + + | + +// | | | | | +// | | | Add | | Remove +// | | | | | +// | | | + | +// +----------------+ +-----------> Expired +----> Deleted +// Forget Expire +// +// +// Note that an assumed pod can expire, because if we haven't received Add event notifying us +// for a while, there might be some problems and we shouldn't keep the pod in cache anymore. +// +// Note that "Initial", "Expired", and "Deleted" pods do not actually exist in cache. +// Based on existing use cases, we are making the following assumptions: +// - No pod would be assumed twice +// - A pod could be added without going through scheduler. In this case, we will see Add but not Assume event. +// - If a pod wasn't added, it wouldn't be removed or updated. +// - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue, +// a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache. +type Cache interface { + // AssumePod assumes a pod scheduled and aggregates the pod's information into its node. + // The implementation also decides the policy to expire pod before being confirmed (receiving Add event). + // After expiration, its information would be subtracted. + AssumePod(pod *v1.Pod) error + + // FinishBinding signals that cache for assumed pod can be expired + FinishBinding(pod *v1.Pod) error + + // ForgetPod removes an assumed pod from cache. + ForgetPod(pod *v1.Pod) error + + // AddPod either confirms a pod if it's assumed, or adds it back if it's expired. + // If added back, the pod's information would be added again. + AddPod(pod *v1.Pod) error + + // UpdatePod removes oldPod's information and adds newPod's information. + UpdatePod(oldPod, newPod *v1.Pod) error + + // RemovePod removes a pod. The pod's information would be subtracted from assigned node. + RemovePod(pod *v1.Pod) error + + // GetPod returns the pod from the cache with the same namespace and the + // same name of the specified pod. + GetPod(pod *v1.Pod) (*v1.Pod, error) + + // IsAssumedPod returns true if the pod is assumed and not expired. + IsAssumedPod(pod *v1.Pod) (bool, error) + + // AddNode adds overall information about node. + AddNode(node *v1.Node) error + + // UpdateNode updates overall information about node. + UpdateNode(oldNode, newNode *v1.Node) error + + // RemoveNode removes overall information about node. + RemoveNode(node *v1.Node) error + + // UpdateNodeNameToInfoMap updates the passed infoMap to the current contents of Cache. + // The node info contains aggregated information of pods scheduled (including assumed to be) + // on this node. + UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error + + // List lists all cached pods (including assumed ones). + List(labels.Selector) ([]*v1.Pod, error) + + // FilteredList returns all cached pods that pass the filter. + FilteredList(filter PodFilter, selector labels.Selector) ([]*v1.Pod, error) + + // Snapshot takes a snapshot on current cache + Snapshot() *Snapshot + + // NodeTree returns a node tree structure + NodeTree() *NodeTree +} + +// Snapshot is a snapshot of cache state +type Snapshot struct { + AssumedPods map[string]bool + Nodes map[string]*schedulercache.NodeInfo +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_tree.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/node_tree.go similarity index 79% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_tree.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/node_tree.go index 4a8e08d22abf..80ce6d195fa8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/cache/node_tree.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/node_tree.go @@ -21,21 +21,19 @@ import ( "sync" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" utilnode "k8s.io/kubernetes/pkg/util/node" - "github.com/golang/glog" + "k8s.io/klog" ) // NodeTree is a tree-like data structure that holds node names in each zone. Zone names are // keys to "NodeTree.tree" and values of "NodeTree.tree" are arrays of node names. type NodeTree struct { - tree map[string]*nodeArray // a map from zone (region-zone) to an array of nodes in the zone. - zones []string // a list of all the zones in the tree (keys) - zoneIndex int - exhaustedZones sets.String // set of zones that all of their nodes are returned by next() - NumNodes int - mu sync.RWMutex + tree map[string]*nodeArray // a map from zone (region-zone) to an array of nodes in the zone. + zones []string // a list of all the zones in the tree (keys) + zoneIndex int + NumNodes int + mu sync.RWMutex } // nodeArray is a struct that has nodes that are in a zone. @@ -48,7 +46,7 @@ type nodeArray struct { func (na *nodeArray) next() (nodeName string, exhausted bool) { if len(na.nodes) == 0 { - glog.Error("The nodeArray is empty. It should have been deleted from NodeTree.") + klog.Error("The nodeArray is empty. It should have been deleted from NodeTree.") return "", false } if na.lastIndex >= len(na.nodes) { @@ -59,10 +57,10 @@ func (na *nodeArray) next() (nodeName string, exhausted bool) { return nodeName, false } +// newNodeTree creates a NodeTree from nodes. func newNodeTree(nodes []*v1.Node) *NodeTree { nt := &NodeTree{ - tree: make(map[string]*nodeArray), - exhaustedZones: sets.NewString(), + tree: make(map[string]*nodeArray), } for _, n := range nodes { nt.AddNode(n) @@ -83,7 +81,7 @@ func (nt *NodeTree) addNode(n *v1.Node) { if na, ok := nt.tree[zone]; ok { for _, nodeName := range na.nodes { if nodeName == n.Name { - glog.Warningf("node %v already exist in the NodeTree", n.Name) + klog.Warningf("node %v already exist in the NodeTree", n.Name) return } } @@ -92,7 +90,7 @@ func (nt *NodeTree) addNode(n *v1.Node) { nt.zones = append(nt.zones, zone) nt.tree[zone] = &nodeArray{nodes: []string{n.Name}, lastIndex: 0} } - glog.V(5).Infof("Added node %v in group %v to NodeTree", n.Name, zone) + klog.V(5).Infof("Added node %v in group %v to NodeTree", n.Name, zone) nt.NumNodes++ } @@ -112,13 +110,13 @@ func (nt *NodeTree) removeNode(n *v1.Node) error { if len(na.nodes) == 0 { nt.removeZone(zone) } - glog.V(5).Infof("Removed node %v in group %v from NodeTree", n.Name, zone) + klog.V(5).Infof("Removed node %v in group %v from NodeTree", n.Name, zone) nt.NumNodes-- return nil } } } - glog.Errorf("Node %v in group %v was not found", n.Name, zone) + klog.Errorf("Node %v in group %v was not found", n.Name, zone) return fmt.Errorf("node %v in group %v was not found", n.Name, zone) } @@ -155,7 +153,7 @@ func (nt *NodeTree) resetExhausted() { for _, na := range nt.tree { na.lastIndex = 0 } - nt.exhaustedZones = sets.NewString() + nt.zoneIndex = 0 } // Next returns the name of the next node. NodeTree iterates over zones and in each zone iterates @@ -166,18 +164,19 @@ func (nt *NodeTree) Next() string { if len(nt.zones) == 0 { return "" } + numExhaustedZones := 0 for { if nt.zoneIndex >= len(nt.zones) { nt.zoneIndex = 0 } zone := nt.zones[nt.zoneIndex] nt.zoneIndex++ - // We do not check the set of exhausted zones before calling next() on the zone. This ensures + // We do not check the exhausted zones before calling next() on the zone. This ensures // that if more nodes are added to a zone after it is exhausted, we iterate over the new nodes. nodeName, exhausted := nt.tree[zone].next() if exhausted { - nt.exhaustedZones.Insert(zone) - if len(nt.exhaustedZones) == len(nt.zones) { // all zones are exhausted. we should reset. + numExhaustedZones++ + if numExhaustedZones >= len(nt.zones) { // all zones are exhausted. we should reset. nt.resetExhausted() } } else { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/BUILD new file mode 100644 index 000000000000..c675f8592482 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/BUILD @@ -0,0 +1,43 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["scheduling_queue.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/internal/queue", + visibility = ["//pkg/scheduler:__subpackages__"], + deps = [ + "//pkg/api/v1/pod:go_default_library", + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/algorithm/priorities/util:go_default_library", + "//pkg/scheduler/util:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["scheduling_queue_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/util:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/scheduling_queue.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go similarity index 90% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/scheduling_queue.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go index 3c87aa246e04..6f5aa682c98f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/core/scheduling_queue.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/internal/queue/scheduling_queue.go @@ -24,7 +24,7 @@ limitations under the License. // FIFO is here for flag-gating purposes and allows us to use the traditional // scheduling queue when util.PodPriorityEnabled() returns false. -package core +package queue import ( "container/heap" @@ -32,7 +32,7 @@ import ( "reflect" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -43,6 +43,10 @@ import ( "k8s.io/kubernetes/pkg/scheduler/util" ) +var ( + queueClosed = "scheduling queue is closed" +) + // SchedulingQueue is an interface for a queue to store pods waiting to be scheduled. // The interface follows a pattern similar to cache.FIFO and cache.Heap and // makes it easy to use those data structures as a SchedulingQueue. @@ -50,6 +54,8 @@ type SchedulingQueue interface { Add(pod *v1.Pod) error AddIfNotPresent(pod *v1.Pod) error AddUnschedulableIfNotPresent(pod *v1.Pod) error + // Pop removes the head of the queue and returns it. It blocks if the + // queue is empty and waits until a new item is added to the queue. Pop() (*v1.Pod, error) Update(oldPod, newPod *v1.Pod) error Delete(pod *v1.Pod) error @@ -58,6 +64,11 @@ type SchedulingQueue interface { AssignedPodUpdated(pod *v1.Pod) WaitingPodsForNode(nodeName string) []*v1.Pod WaitingPods() []*v1.Pod + // Close closes the SchedulingQueue so that the goroutine which is + // waiting to pop items can exit gracefully. + Close() + // DeleteNominatedPodIfExists deletes nominatedPod from internal cache + DeleteNominatedPodIfExists(pod *v1.Pod) } // NewSchedulingQueue initializes a new scheduling queue. If pod priority is @@ -109,12 +120,11 @@ func (f *FIFO) Delete(pod *v1.Pod) error { // shouldn't be used in production code, but scheduler has always been using it. // This function does minimal error checking. func (f *FIFO) Pop() (*v1.Pod, error) { - var result interface{} - f.FIFO.Pop(func(obj interface{}) error { - result = obj - return nil - }) - return result.(*v1.Pod), nil + result, err := f.FIFO.Pop(func(obj interface{}) error { return nil }) + if err == cache.FIFOClosedError { + return nil, fmt.Errorf(queueClosed) + } + return result.(*v1.Pod), err } // WaitingPods returns all the waiting pods in the queue. @@ -144,6 +154,14 @@ func (f *FIFO) WaitingPodsForNode(nodeName string) []*v1.Pod { return nil } +// Close closes the FIFO queue. +func (f *FIFO) Close() { + f.FIFO.Close() +} + +// DeleteNominatedPodIfExists does nothing in FIFO. +func (f *FIFO) DeleteNominatedPodIfExists(pod *v1.Pod) {} + // NewFIFO creates a FIFO object. func NewFIFO() *FIFO { return &FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)} @@ -179,6 +197,10 @@ type PriorityQueue struct { // pod was in flight (we were trying to schedule it). In such a case, we put // the pod back into the activeQ if it is determined unschedulable. receivedMoveRequest bool + + // closed indicates that the queue is closed. + // It is mainly used to let Pop() exit its control loop while waiting for an item. + closed bool } // Making sure that PriorityQueue implements SchedulingQueue. @@ -202,7 +224,7 @@ func (p *PriorityQueue) addNominatedPodIfNeeded(pod *v1.Pod) { if len(nnn) > 0 { for _, np := range p.nominatedPods[nnn] { if np.UID == pod.UID { - glog.Errorf("Pod %v/%v already exists in the nominated map!", pod.Namespace, pod.Name) + klog.V(4).Infof("Pod %v/%v already exists in the nominated map!", pod.Namespace, pod.Name) return } } @@ -211,6 +233,7 @@ func (p *PriorityQueue) addNominatedPodIfNeeded(pod *v1.Pod) { } // deleteNominatedPodIfExists deletes a pod from the nominatedPods. +// NOTE: this function assumes lock has been acquired in caller. func (p *PriorityQueue) deleteNominatedPodIfExists(pod *v1.Pod) { nnn := NominatedNodeName(pod) if len(nnn) > 0 { @@ -241,10 +264,10 @@ func (p *PriorityQueue) Add(pod *v1.Pod) error { defer p.lock.Unlock() err := p.activeQ.Add(pod) if err != nil { - glog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) + klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) } else { if p.unschedulableQ.get(pod) != nil { - glog.Errorf("Error: pod %v/%v is already in the unschedulable queue.", pod.Namespace, pod.Name) + klog.Errorf("Error: pod %v/%v is already in the unschedulable queue.", pod.Namespace, pod.Name) p.deleteNominatedPodIfExists(pod) p.unschedulableQ.delete(pod) } @@ -267,7 +290,7 @@ func (p *PriorityQueue) AddIfNotPresent(pod *v1.Pod) error { } err := p.activeQ.Add(pod) if err != nil { - glog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) + klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) } else { p.addNominatedPodIfNeeded(pod) p.cond.Broadcast() @@ -312,6 +335,12 @@ func (p *PriorityQueue) Pop() (*v1.Pod, error) { p.lock.Lock() defer p.lock.Unlock() for len(p.activeQ.data.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the p.closed is set and the condition is broadcast, + // which causes this loop to continue and return from the Pop(). + if p.closed { + return nil, fmt.Errorf(queueClosed) + } p.cond.Wait() } obj, err := p.activeQ.Pop() @@ -319,7 +348,6 @@ func (p *PriorityQueue) Pop() (*v1.Pod, error) { return nil, err } pod := obj.(*v1.Pod) - p.deleteNominatedPodIfExists(pod) p.receivedMoveRequest = false return pod, err } @@ -388,13 +416,17 @@ func (p *PriorityQueue) Delete(pod *v1.Pod) error { // AssignedPodAdded is called when a bound pod is added. Creation of this pod // may make pending pods with matching affinity terms schedulable. func (p *PriorityQueue) AssignedPodAdded(pod *v1.Pod) { + p.lock.Lock() p.movePodsToActiveQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod)) + p.lock.Unlock() } // AssignedPodUpdated is called when a bound pod is updated. Change of labels // may make pending pods with matching affinity terms schedulable. func (p *PriorityQueue) AssignedPodUpdated(pod *v1.Pod) { + p.lock.Lock() p.movePodsToActiveQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod)) + p.lock.Unlock() } // MoveAllToActiveQueue moves all pods from unschedulableQ to activeQ. This @@ -410,7 +442,7 @@ func (p *PriorityQueue) MoveAllToActiveQueue() { defer p.lock.Unlock() for _, pod := range p.unschedulableQ.pods { if err := p.activeQ.Add(pod); err != nil { - glog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) + klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) } } p.unschedulableQ.clear() @@ -418,14 +450,13 @@ func (p *PriorityQueue) MoveAllToActiveQueue() { p.cond.Broadcast() } +// NOTE: this function assumes lock has been acquired in caller func (p *PriorityQueue) movePodsToActiveQueue(pods []*v1.Pod) { - p.lock.Lock() - defer p.lock.Unlock() for _, pod := range pods { if err := p.activeQ.Add(pod); err == nil { p.unschedulableQ.delete(pod) } else { - glog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) + klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) } } p.receivedMoveRequest = true @@ -434,9 +465,8 @@ func (p *PriorityQueue) movePodsToActiveQueue(pods []*v1.Pod) { // getUnschedulablePodsWithMatchingAffinityTerm returns unschedulable pods which have // any affinity term that matches "pod". +// NOTE: this function assumes lock has been acquired in caller. func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod) []*v1.Pod { - p.lock.RLock() - defer p.lock.RUnlock() var podsToMove []*v1.Pod for _, up := range p.unschedulableQ.pods { affinity := up.Spec.Affinity @@ -446,7 +476,7 @@ func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(up, &term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { - glog.Errorf("Error getting label selectors for pod: %v.", up.Name) + klog.Errorf("Error getting label selectors for pod: %v.", up.Name) } if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) { podsToMove = append(podsToMove, up) @@ -485,6 +515,21 @@ func (p *PriorityQueue) WaitingPods() []*v1.Pod { return result } +// Close closes the priority queue. +func (p *PriorityQueue) Close() { + p.lock.Lock() + defer p.lock.Unlock() + p.closed = true + p.cond.Broadcast() +} + +// DeleteNominatedPodIfExists deletes pod from internal cache if it's a nominatedPod +func (p *PriorityQueue) DeleteNominatedPodIfExists(pod *v1.Pod) { + p.lock.Lock() + p.deleteNominatedPodIfExists(pod) + p.lock.Unlock() +} + // UnschedulablePodsMap holds pods that cannot be scheduled. This data structure // is used to implement unschedulableQ. type UnschedulablePodsMap struct { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/BUILD index 81d6d19d5876..9b2bce0ff958 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/BUILD @@ -9,7 +9,10 @@ go_library( name = "go_default_library", srcs = ["metrics.go"], importpath = "k8s.io/kubernetes/pkg/scheduler/metrics", - deps = ["//vendor/github.com/prometheus/client_golang/prometheus:go_default_library"], + deps = [ + "//pkg/controller/volume/persistentvolume:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + ], ) filegroup( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go index bd02b23fb4a2..0ebbd4f8ef65 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/metrics/metrics.go @@ -21,6 +21,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" ) const ( @@ -46,6 +47,18 @@ const ( // All the histogram based metrics have 1ms as size for the smallest bucket. var ( + scheduleAttempts = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: SchedulerSubsystem, + Name: "schedule_attempts_total", + Help: "Number of attempts to schedule pods, by the result. 'unschedulable' means a pod could not be scheduled, while 'error' means an internal scheduler problem.", + }, []string{"result"}) + // PodScheduleSuccesses counts how many pods were scheduled. + PodScheduleSuccesses = scheduleAttempts.With(prometheus.Labels{"result": "scheduled"}) + // PodScheduleFailures counts how many pods could not be scheduled. + PodScheduleFailures = scheduleAttempts.With(prometheus.Labels{"result": "unschedulable"}) + // PodScheduleErrors counts how many pods could not be scheduled due to a scheduler error. + PodScheduleErrors = scheduleAttempts.With(prometheus.Labels{"result": "error"}) SchedulingLatency = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Subsystem: SchedulerSubsystem, @@ -135,6 +148,7 @@ var ( }, []string{"result"}) metricsList = []prometheus.Collector{ + scheduleAttempts, SchedulingLatency, E2eSchedulingLatency, SchedulingAlgorithmLatency, @@ -158,6 +172,8 @@ func Register() { for _, metric := range metricsList { prometheus.MustRegister(metric) } + + persistentvolume.RegisterVolumeSchedulingMetrics() }) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go deleted file mode 100644 index 3d836b6ee57d..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go +++ /dev/null @@ -1,466 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheduler - -import ( - "time" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" - clientset "k8s.io/client-go/kubernetes" - corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/algorithm" - "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - "k8s.io/kubernetes/pkg/scheduler/core" - "k8s.io/kubernetes/pkg/scheduler/core/equivalence" - "k8s.io/kubernetes/pkg/scheduler/metrics" - "k8s.io/kubernetes/pkg/scheduler/util" - "k8s.io/kubernetes/pkg/scheduler/volumebinder" - - "github.com/golang/glog" -) - -// Binder knows how to write a binding. -type Binder interface { - Bind(binding *v1.Binding) error -} - -// PodConditionUpdater updates the condition of a pod based on the passed -// PodCondition -type PodConditionUpdater interface { - Update(pod *v1.Pod, podCondition *v1.PodCondition) error -} - -// PodPreemptor has methods needed to delete a pod and to update -// annotations of the preemptor pod. -type PodPreemptor interface { - GetUpdatedPod(pod *v1.Pod) (*v1.Pod, error) - DeletePod(pod *v1.Pod) error - SetNominatedNodeName(pod *v1.Pod, nominatedNode string) error - RemoveNominatedNodeName(pod *v1.Pod) error -} - -// Scheduler watches for new unscheduled pods. It attempts to find -// nodes that they fit on and writes bindings back to the api server. -type Scheduler struct { - config *Config -} - -// StopEverything closes the scheduler config's StopEverything channel, to shut -// down the Scheduler. -func (sched *Scheduler) StopEverything() { - close(sched.config.StopEverything) -} - -// Cache returns the cache in scheduler for test to check the data in scheduler. -func (sched *Scheduler) Cache() schedulercache.Cache { - return sched.config.SchedulerCache -} - -// Configurator defines I/O, caching, and other functionality needed to -// construct a new scheduler. An implementation of this can be seen in -// factory.go. -type Configurator interface { - // Exposed for testing - GetHardPodAffinitySymmetricWeight() int32 - // Exposed for testing - MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue core.SchedulingQueue) func(pod *v1.Pod, err error) - - // Predicate related accessors to be exposed for use by k8s.io/autoscaler/cluster-autoscaler - GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) - GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) - - // Needs to be exposed for things like integration tests where we want to make fake nodes. - GetNodeLister() corelisters.NodeLister - // Exposed for testing - GetClient() clientset.Interface - // Exposed for testing - GetScheduledPodLister() corelisters.PodLister - - Create() (*Config, error) - CreateFromProvider(providerName string) (*Config, error) - CreateFromConfig(policy schedulerapi.Policy) (*Config, error) - CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*Config, error) -} - -// Config is an implementation of the Scheduler's configured input data. -// TODO over time we should make this struct a hidden implementation detail of the scheduler. -type Config struct { - // It is expected that changes made via SchedulerCache will be observed - // by NodeLister and Algorithm. - SchedulerCache schedulercache.Cache - // Ecache is used for optimistically invalid affected cache items after - // successfully binding a pod - Ecache *equivalence.Cache - NodeLister algorithm.NodeLister - Algorithm algorithm.ScheduleAlgorithm - GetBinder func(pod *v1.Pod) Binder - // PodConditionUpdater is used only in case of scheduling errors. If we succeed - // with scheduling, PodScheduled condition will be updated in apiserver in /bind - // handler so that binding and setting PodCondition it is atomic. - PodConditionUpdater PodConditionUpdater - // PodPreemptor is used to evict pods and update pod annotations. - PodPreemptor PodPreemptor - - // NextPod should be a function that blocks until the next pod - // is available. We don't use a channel for this, because scheduling - // a pod may take some amount of time and we don't want pods to get - // stale while they sit in a channel. - NextPod func() *v1.Pod - - // WaitForCacheSync waits for scheduler cache to populate. - // It returns true if it was successful, false if the controller should shutdown. - WaitForCacheSync func() bool - - // Error is called if there is an error. It is passed the pod in - // question, and the error - Error func(*v1.Pod, error) - - // Recorder is the EventRecorder to use - Recorder record.EventRecorder - - // Close this to shut down the scheduler. - StopEverything chan struct{} - - // VolumeBinder handles PVC/PV binding for the pod. - VolumeBinder *volumebinder.VolumeBinder - - // Disable pod preemption or not. - DisablePreemption bool -} - -// NewFromConfigurator returns a new scheduler that is created entirely by the Configurator. Assumes Create() is implemented. -// Supports intermediate Config mutation for now if you provide modifier functions which will run after Config is created. -func NewFromConfigurator(c Configurator, modifiers ...func(c *Config)) (*Scheduler, error) { - cfg, err := c.Create() - if err != nil { - return nil, err - } - // Mutate it if any functions were provided, changes might be required for certain types of tests (i.e. change the recorder). - for _, modifier := range modifiers { - modifier(cfg) - } - // From this point on the config is immutable to the outside. - s := &Scheduler{ - config: cfg, - } - metrics.Register() - return s, nil -} - -// NewFromConfig returns a new scheduler using the provided Config. -func NewFromConfig(config *Config) *Scheduler { - metrics.Register() - return &Scheduler{ - config: config, - } -} - -// Run begins watching and scheduling. It waits for cache to be synced, then starts a goroutine and returns immediately. -func (sched *Scheduler) Run() { - if !sched.config.WaitForCacheSync() { - return - } - - go wait.Until(sched.scheduleOne, 0, sched.config.StopEverything) -} - -// Config return scheduler's config pointer. It is exposed for testing purposes. -func (sched *Scheduler) Config() *Config { - return sched.config -} - -// schedule implements the scheduling algorithm and returns the suggested host. -func (sched *Scheduler) schedule(pod *v1.Pod) (string, error) { - host, err := sched.config.Algorithm.Schedule(pod, sched.config.NodeLister) - if err != nil { - pod = pod.DeepCopy() - sched.config.Error(pod, err) - sched.config.Recorder.Eventf(pod, v1.EventTypeWarning, "FailedScheduling", "%v", err) - sched.config.PodConditionUpdater.Update(pod, &v1.PodCondition{ - Type: v1.PodScheduled, - Status: v1.ConditionFalse, - Reason: v1.PodReasonUnschedulable, - Message: err.Error(), - }) - return "", err - } - return host, err -} - -// preempt tries to create room for a pod that has failed to schedule, by preempting lower priority pods if possible. -// If it succeeds, it adds the name of the node where preemption has happened to the pod annotations. -// It returns the node name and an error if any. -func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, error) { - if !util.PodPriorityEnabled() || sched.config.DisablePreemption { - glog.V(3).Infof("Pod priority feature is not enabled or preemption is disabled by scheduler configuration." + - " No preemption is performed.") - return "", nil - } - preemptor, err := sched.config.PodPreemptor.GetUpdatedPod(preemptor) - if err != nil { - glog.Errorf("Error getting the updated preemptor pod object: %v", err) - return "", err - } - - node, victims, nominatedPodsToClear, err := sched.config.Algorithm.Preempt(preemptor, sched.config.NodeLister, scheduleErr) - metrics.PreemptionVictims.Set(float64(len(victims))) - if err != nil { - glog.Errorf("Error preempting victims to make room for %v/%v.", preemptor.Namespace, preemptor.Name) - return "", err - } - var nodeName = "" - if node != nil { - nodeName = node.Name - err = sched.config.PodPreemptor.SetNominatedNodeName(preemptor, nodeName) - if err != nil { - glog.Errorf("Error in preemption process. Cannot update pod %v/%v annotations: %v", preemptor.Namespace, preemptor.Name, err) - return "", err - } - for _, victim := range victims { - if err := sched.config.PodPreemptor.DeletePod(victim); err != nil { - glog.Errorf("Error preempting pod %v/%v: %v", victim.Namespace, victim.Name, err) - return "", err - } - sched.config.Recorder.Eventf(victim, v1.EventTypeNormal, "Preempted", "by %v/%v on node %v", preemptor.Namespace, preemptor.Name, nodeName) - } - } - // Clearing nominated pods should happen outside of "if node != nil". Node could - // be nil when a pod with nominated node name is eligible to preempt again, - // but preemption logic does not find any node for it. In that case Preempt() - // function of generic_scheduler.go returns the pod itself for removal of the annotation. - for _, p := range nominatedPodsToClear { - rErr := sched.config.PodPreemptor.RemoveNominatedNodeName(p) - if rErr != nil { - glog.Errorf("Cannot remove nominated node annotation of pod: %v", rErr) - // We do not return as this error is not critical. - } - } - return nodeName, err -} - -// assumeVolumes will update the volume cache with the chosen bindings -// -// This function modifies assumed if volume binding is required. -func (sched *Scheduler) assumeVolumes(assumed *v1.Pod, host string) (allBound bool, err error) { - if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { - allBound, err = sched.config.VolumeBinder.Binder.AssumePodVolumes(assumed, host) - if err != nil { - sched.config.Error(assumed, err) - sched.config.Recorder.Eventf(assumed, v1.EventTypeWarning, "FailedScheduling", "AssumePodVolumes failed: %v", err) - sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{ - Type: v1.PodScheduled, - Status: v1.ConditionFalse, - Reason: "SchedulerError", - Message: err.Error(), - }) - } - // Invalidate ecache because assumed volumes could have affected the cached - // pvs for other pods - if sched.config.Ecache != nil { - invalidPredicates := sets.NewString(predicates.CheckVolumeBindingPred) - sched.config.Ecache.InvalidatePredicates(invalidPredicates) - } - } - return -} - -// bindVolumes will make the API update with the assumed bindings and wait until -// the PV controller has completely finished the binding operation. -// -// If binding errors, times out or gets undone, then an error will be returned to -// retry scheduling. -func (sched *Scheduler) bindVolumes(assumed *v1.Pod) error { - var reason string - var eventType string - - glog.V(5).Infof("Trying to bind volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name) - err := sched.config.VolumeBinder.Binder.BindPodVolumes(assumed) - if err != nil { - glog.V(1).Infof("Failed to bind volumes for pod \"%v/%v\": %v", assumed.Namespace, assumed.Name, err) - - // Unassume the Pod and retry scheduling - if forgetErr := sched.config.SchedulerCache.ForgetPod(assumed); forgetErr != nil { - glog.Errorf("scheduler cache ForgetPod failed: %v", forgetErr) - } - - reason = "VolumeBindingFailed" - eventType = v1.EventTypeWarning - sched.config.Error(assumed, err) - sched.config.Recorder.Eventf(assumed, eventType, "FailedScheduling", "%v", err) - sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{ - Type: v1.PodScheduled, - Status: v1.ConditionFalse, - Reason: reason, - }) - return err - } - - glog.V(5).Infof("Success binding volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name) - return nil -} - -// assume signals to the cache that a pod is already in the cache, so that binding can be asynchronous. -// assume modifies `assumed`. -func (sched *Scheduler) assume(assumed *v1.Pod, host string) error { - // Optimistically assume that the binding will succeed and send it to apiserver - // in the background. - // If the binding fails, scheduler will release resources allocated to assumed pod - // immediately. - assumed.Spec.NodeName = host - // NOTE: Because the scheduler uses snapshots of SchedulerCache and the live - // version of Ecache, updates must be written to SchedulerCache before - // invalidating Ecache. - if err := sched.config.SchedulerCache.AssumePod(assumed); err != nil { - glog.Errorf("scheduler cache AssumePod failed: %v", err) - - // This is most probably result of a BUG in retrying logic. - // We report an error here so that pod scheduling can be retried. - // This relies on the fact that Error will check if the pod has been bound - // to a node and if so will not add it back to the unscheduled pods queue - // (otherwise this would cause an infinite loop). - sched.config.Error(assumed, err) - sched.config.Recorder.Eventf(assumed, v1.EventTypeWarning, "FailedScheduling", "AssumePod failed: %v", err) - sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{ - Type: v1.PodScheduled, - Status: v1.ConditionFalse, - Reason: "SchedulerError", - Message: err.Error(), - }) - return err - } - - // Optimistically assume that the binding will succeed, so we need to invalidate affected - // predicates in equivalence cache. - // If the binding fails, these invalidated item will not break anything. - if sched.config.Ecache != nil { - sched.config.Ecache.InvalidateCachedPredicateItemForPodAdd(assumed, host) - } - return nil -} - -// bind binds a pod to a given node defined in a binding object. We expect this to run asynchronously, so we -// handle binding metrics internally. -func (sched *Scheduler) bind(assumed *v1.Pod, b *v1.Binding) error { - bindingStart := time.Now() - // If binding succeeded then PodScheduled condition will be updated in apiserver so that - // it's atomic with setting host. - err := sched.config.GetBinder(assumed).Bind(b) - if err := sched.config.SchedulerCache.FinishBinding(assumed); err != nil { - glog.Errorf("scheduler cache FinishBinding failed: %v", err) - } - if err != nil { - glog.V(1).Infof("Failed to bind pod: %v/%v", assumed.Namespace, assumed.Name) - if err := sched.config.SchedulerCache.ForgetPod(assumed); err != nil { - glog.Errorf("scheduler cache ForgetPod failed: %v", err) - } - sched.config.Error(assumed, err) - sched.config.Recorder.Eventf(assumed, v1.EventTypeWarning, "FailedScheduling", "Binding rejected: %v", err) - sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{ - Type: v1.PodScheduled, - Status: v1.ConditionFalse, - Reason: "BindingRejected", - }) - return err - } - - metrics.BindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart)) - metrics.SchedulingLatency.WithLabelValues(metrics.Binding).Observe(metrics.SinceInSeconds(bindingStart)) - sched.config.Recorder.Eventf(assumed, v1.EventTypeNormal, "Scheduled", "Successfully assigned %v/%v to %v", assumed.Namespace, assumed.Name, b.Target.Name) - return nil -} - -// scheduleOne does the entire scheduling workflow for a single pod. It is serialized on the scheduling algorithm's host fitting. -func (sched *Scheduler) scheduleOne() { - pod := sched.config.NextPod() - if pod.DeletionTimestamp != nil { - sched.config.Recorder.Eventf(pod, v1.EventTypeWarning, "FailedScheduling", "skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name) - glog.V(3).Infof("Skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name) - return - } - - glog.V(3).Infof("Attempting to schedule pod: %v/%v", pod.Namespace, pod.Name) - - // Synchronously attempt to find a fit for the pod. - start := time.Now() - suggestedHost, err := sched.schedule(pod) - if err != nil { - // schedule() may have failed because the pod would not fit on any host, so we try to - // preempt, with the expectation that the next time the pod is tried for scheduling it - // will fit due to the preemption. It is also possible that a different pod will schedule - // into the resources that were preempted, but this is harmless. - if fitError, ok := err.(*core.FitError); ok { - preemptionStartTime := time.Now() - sched.preempt(pod, fitError) - metrics.PreemptionAttempts.Inc() - metrics.SchedulingAlgorithmPremptionEvaluationDuration.Observe(metrics.SinceInMicroseconds(preemptionStartTime)) - metrics.SchedulingLatency.WithLabelValues(metrics.PreemptionEvaluation).Observe(metrics.SinceInSeconds(preemptionStartTime)) - } - return - } - metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start)) - // Tell the cache to assume that a pod now is running on a given node, even though it hasn't been bound yet. - // This allows us to keep scheduling without waiting on binding to occur. - assumedPod := pod.DeepCopy() - - // Assume volumes first before assuming the pod. - // - // If all volumes are completely bound, then allBound is true and binding will be skipped. - // - // Otherwise, binding of volumes is started after the pod is assumed, but before pod binding. - // - // This function modifies 'assumedPod' if volume binding is required. - allBound, err := sched.assumeVolumes(assumedPod, suggestedHost) - if err != nil { - return - } - - // assume modifies `assumedPod` by setting NodeName=suggestedHost - err = sched.assume(assumedPod, suggestedHost) - if err != nil { - return - } - // bind the pod to its host asynchronously (we can do this b/c of the assumption step above). - go func() { - // Bind volumes first before Pod - if !allBound { - err = sched.bindVolumes(assumedPod) - if err != nil { - return - } - } - - err := sched.bind(assumedPod, &v1.Binding{ - ObjectMeta: metav1.ObjectMeta{Namespace: assumedPod.Namespace, Name: assumedPod.Name, UID: assumedPod.UID}, - Target: v1.ObjectReference{ - Kind: "Node", - Name: suggestedHost, - }, - }) - metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start)) - if err != nil { - glog.Errorf("Internal error binding pod: (%v)", err) - } - }() -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/testutil.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/testutil.go deleted file mode 100644 index ded22c0efdd4..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/testutil.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheduler - -import ( - "fmt" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - clientset "k8s.io/client-go/kubernetes" - corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/kubernetes/pkg/scheduler/algorithm" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - "k8s.io/kubernetes/pkg/scheduler/core" - "k8s.io/kubernetes/pkg/scheduler/util" -) - -// FakeConfigurator is an implementation for test. -type FakeConfigurator struct { - Config *Config -} - -// GetPredicateMetadataProducer is not implemented yet. -func (fc *FakeConfigurator) GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) { - return nil, fmt.Errorf("not implemented") -} - -// GetPredicates is not implemented yet. -func (fc *FakeConfigurator) GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) { - return nil, fmt.Errorf("not implemented") -} - -// GetHardPodAffinitySymmetricWeight is not implemented yet. -func (fc *FakeConfigurator) GetHardPodAffinitySymmetricWeight() int32 { - panic("not implemented") -} - -// MakeDefaultErrorFunc is not implemented yet. -func (fc *FakeConfigurator) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue core.SchedulingQueue) func(pod *v1.Pod, err error) { - return nil -} - -// GetNodeLister is not implemented yet. -func (fc *FakeConfigurator) GetNodeLister() corelisters.NodeLister { - return nil -} - -// GetClient is not implemented yet. -func (fc *FakeConfigurator) GetClient() clientset.Interface { - return nil -} - -// GetScheduledPodLister is not implemented yet. -func (fc *FakeConfigurator) GetScheduledPodLister() corelisters.PodLister { - return nil -} - -// Create returns FakeConfigurator.Config -func (fc *FakeConfigurator) Create() (*Config, error) { - return fc.Config, nil -} - -// CreateFromProvider returns FakeConfigurator.Config -func (fc *FakeConfigurator) CreateFromProvider(providerName string) (*Config, error) { - return fc.Config, nil -} - -// CreateFromConfig returns FakeConfigurator.Config -func (fc *FakeConfigurator) CreateFromConfig(policy schedulerapi.Policy) (*Config, error) { - return fc.Config, nil -} - -// CreateFromKeys returns FakeConfigurator.Config -func (fc *FakeConfigurator) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*Config, error) { - return fc.Config, nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/BUILD index 57839b6708ea..810d2c5cb0a9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/BUILD @@ -34,7 +34,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/backoff_utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/backoff_utils.go index 50920ae86c83..506cd1270acc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/backoff_utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/backoff_utils.go @@ -24,7 +24,7 @@ import ( ktypes "k8s.io/apimachinery/pkg/types" - "github.com/golang/glog" + "k8s.io/klog" ) type clock interface { @@ -76,7 +76,7 @@ func (b *BackoffEntry) getBackoff(maxDuration time.Duration) time.Duration { newDuration = maxDuration } b.backoff = newDuration - glog.V(4).Infof("Backing off %s", duration.String()) + klog.V(4).Infof("Backing off %s", duration.String()) return duration } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go index 731ad7a01455..d6fe01b37610 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go @@ -25,120 +25,6 @@ import ( "k8s.io/kubernetes/pkg/features" ) -// DefaultBindAllHostIP defines the default ip address used to bind to all host. -const DefaultBindAllHostIP = "0.0.0.0" - -// ProtocolPort represents a protocol port pair, e.g. tcp:80. -type ProtocolPort struct { - Protocol string - Port int32 -} - -// NewProtocolPort creates a ProtocolPort instance. -func NewProtocolPort(protocol string, port int32) *ProtocolPort { - pp := &ProtocolPort{ - Protocol: protocol, - Port: port, - } - - if len(pp.Protocol) == 0 { - pp.Protocol = string(v1.ProtocolTCP) - } - - return pp -} - -// HostPortInfo stores mapping from ip to a set of ProtocolPort -type HostPortInfo map[string]map[ProtocolPort]struct{} - -// Add adds (ip, protocol, port) to HostPortInfo -func (h HostPortInfo) Add(ip, protocol string, port int32) { - if port <= 0 { - return - } - - h.sanitize(&ip, &protocol) - - pp := NewProtocolPort(protocol, port) - if _, ok := h[ip]; !ok { - h[ip] = map[ProtocolPort]struct{}{ - *pp: {}, - } - return - } - - h[ip][*pp] = struct{}{} -} - -// Remove removes (ip, protocol, port) from HostPortInfo -func (h HostPortInfo) Remove(ip, protocol string, port int32) { - if port <= 0 { - return - } - - h.sanitize(&ip, &protocol) - - pp := NewProtocolPort(protocol, port) - if m, ok := h[ip]; ok { - delete(m, *pp) - if len(h[ip]) == 0 { - delete(h, ip) - } - } -} - -// Len returns the total number of (ip, protocol, port) tuple in HostPortInfo -func (h HostPortInfo) Len() int { - length := 0 - for _, m := range h { - length += len(m) - } - return length -} - -// CheckConflict checks if the input (ip, protocol, port) conflicts with the existing -// ones in HostPortInfo. -func (h HostPortInfo) CheckConflict(ip, protocol string, port int32) bool { - if port <= 0 { - return false - } - - h.sanitize(&ip, &protocol) - - pp := NewProtocolPort(protocol, port) - - // If ip is 0.0.0.0 check all IP's (protocol, port) pair - if ip == DefaultBindAllHostIP { - for _, m := range h { - if _, ok := m[*pp]; ok { - return true - } - } - return false - } - - // If ip isn't 0.0.0.0, only check IP and 0.0.0.0's (protocol, port) pair - for _, key := range []string{DefaultBindAllHostIP, ip} { - if m, ok := h[key]; ok { - if _, ok2 := m[*pp]; ok2 { - return true - } - } - } - - return false -} - -// sanitize the parameters -func (h HostPortInfo) sanitize(ip, protocol *string) { - if len(*ip) == 0 { - *ip = DefaultBindAllHostIP - } - if len(*protocol) == 0 { - *protocol = string(v1.ProtocolTCP) - } -} - // GetContainerPorts returns the used host ports of Pods: if 'port' was used, a 'port:true' pair // will be in the result; but it does not resolve port conflict. func GetContainerPorts(pods ...*v1.Pod) []*v1.ContainerPort { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go index 740698f205c4..25ea591fca08 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go @@ -36,7 +36,7 @@ import ( // Set to true if the wrong build tags are set (see validate_disabled.go). var isDisabledBuild bool -// Interface for validating that a pod with with an AppArmor profile can be run by a Node. +// Interface for validating that a pod with an AppArmor profile can be run by a Node. type Validator interface { Validate(pod *v1.Pod) error ValidateHost() error diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/BUILD index e1f87a5d3ae4..7e4100e0095a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/BUILD @@ -15,7 +15,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util", deps = [ "//pkg/apis/core:go_default_library", - "//pkg/apis/policy:go_default_library", + "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", ], ) @@ -26,7 +26,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", - "//pkg/apis/policy:go_default_library", + "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go index 3f29f6e7a443..c0a25da17523 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go @@ -20,9 +20,9 @@ import ( "fmt" "strings" + policy "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/util/sets" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/policy" ) const ( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/securitycontext/accessors.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/securitycontext/accessors.go index cf372f28662f..739ca126f62d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/securitycontext/accessors.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/securitycontext/accessors.go @@ -29,6 +29,7 @@ type PodSecurityContextAccessor interface { HostIPC() bool SELinuxOptions() *api.SELinuxOptions RunAsUser() *int64 + RunAsGroup() *int64 RunAsNonRoot() *bool SupplementalGroups() []int64 FSGroup() *int64 @@ -43,6 +44,7 @@ type PodSecurityContextMutator interface { SetHostIPC(bool) SetSELinuxOptions(*api.SELinuxOptions) SetRunAsUser(*int64) + SetRunAsGroup(*int64) SetRunAsNonRoot(*bool) SetSupplementalGroups([]int64) SetFSGroup(*int64) @@ -142,6 +144,20 @@ func (w *podSecurityContextWrapper) SetRunAsUser(v *int64) { w.ensurePodSC() w.podSC.RunAsUser = v } +func (w *podSecurityContextWrapper) RunAsGroup() *int64 { + if w.podSC == nil { + return nil + } + return w.podSC.RunAsGroup +} +func (w *podSecurityContextWrapper) SetRunAsGroup(v *int64) { + if w.podSC == nil && v == nil { + return + } + w.ensurePodSC() + w.podSC.RunAsGroup = v +} + func (w *podSecurityContextWrapper) RunAsNonRoot() *bool { if w.podSC == nil { return nil @@ -191,6 +207,7 @@ type ContainerSecurityContextAccessor interface { ProcMount() api.ProcMountType SELinuxOptions() *api.SELinuxOptions RunAsUser() *int64 + RunAsGroup() *int64 RunAsNonRoot() *bool ReadOnlyRootFilesystem() *bool AllowPrivilegeEscalation() *bool @@ -205,6 +222,7 @@ type ContainerSecurityContextMutator interface { SetPrivileged(*bool) SetSELinuxOptions(*api.SELinuxOptions) SetRunAsUser(*int64) + SetRunAsGroup(*int64) SetRunAsNonRoot(*bool) SetReadOnlyRootFilesystem(*bool) SetAllowPrivilegeEscalation(*bool) @@ -293,6 +311,20 @@ func (w *containerSecurityContextWrapper) SetRunAsUser(v *int64) { w.ensureContainerSC() w.containerSC.RunAsUser = v } +func (w *containerSecurityContextWrapper) RunAsGroup() *int64 { + if w.containerSC == nil { + return nil + } + return w.containerSC.RunAsGroup +} +func (w *containerSecurityContextWrapper) SetRunAsGroup(v *int64) { + if w.containerSC == nil && v == nil { + return + } + w.ensureContainerSC() + w.containerSC.RunAsGroup = v +} + func (w *containerSecurityContextWrapper) RunAsNonRoot() *bool { if w.containerSC == nil { return nil @@ -391,6 +423,18 @@ func (w *effectiveContainerSecurityContextWrapper) SetRunAsUser(v *int64) { w.containerSC.SetRunAsUser(v) } } +func (w *effectiveContainerSecurityContextWrapper) RunAsGroup() *int64 { + if v := w.containerSC.RunAsGroup(); v != nil { + return v + } + return w.podSC.RunAsGroup() +} +func (w *effectiveContainerSecurityContextWrapper) SetRunAsGroup(v *int64) { + if !reflect.DeepEqual(w.RunAsGroup(), v) { + w.containerSC.SetRunAsGroup(v) + } +} + func (w *effectiveContainerSecurityContextWrapper) RunAsNonRoot() *bool { if v := w.containerSC.RunAsNonRoot(); v != nil { return v diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD index 71a9836dce4b..d0ad133b6261 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD @@ -22,9 +22,9 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/gopkg.in/square/go-jose.v2:go_default_library", "//vendor/gopkg.in/square/go-jose.v2/jwt:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -54,6 +54,7 @@ go_test( "//pkg/controller/serviceaccount:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/OWNERS index b2ae9a65995d..d914c0d7195a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/OWNERS @@ -1,10 +1,7 @@ approvers: -- liggitt -- deads2k -- mikedanese +- sig-auth-serviceaccounts-approvers reviewers: -- liggitt -- deads2k -- mikedanese -- ericchiang -- enj +- sig-auth-serviceaccounts-reviewers +labels: +- sig/auth + diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/claims.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/claims.go index 070418ffc8cf..3d48b6f2dce4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/claims.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/claims.go @@ -21,11 +21,11 @@ import ( "fmt" "time" - "github.com/golang/glog" + "gopkg.in/square/go-jose.v2/jwt" + "k8s.io/klog" + apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/kubernetes/pkg/apis/core" - - "gopkg.in/square/go-jose.v2/jwt" ) // time.Now stubbed out to allow testing @@ -80,15 +80,13 @@ func Claims(sa core.ServiceAccount, pod *core.Pod, secret *core.Secret, expirati return sc, pc } -func NewValidator(audiences []string, getter ServiceAccountTokenGetter) Validator { +func NewValidator(getter ServiceAccountTokenGetter) Validator { return &validator{ - auds: audiences, getter: getter, } } type validator struct { - auds []string getter ServiceAccountTokenGetter } @@ -97,7 +95,7 @@ var _ = Validator(&validator{}) func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{}) (*ServiceAccountInfo, error) { private, ok := privateObj.(*privateClaims) if !ok { - glog.Errorf("jwt validator expected private claim of type *privateClaims but got: %T", privateObj) + klog.Errorf("jwt validator expected private claim of type *privateClaims but got: %T", privateObj) return nil, errors.New("Token could not be validated.") } err := public.Validate(jwt.Expected{ @@ -108,23 +106,10 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ case err == jwt.ErrExpired: return nil, errors.New("Token has expired.") default: - glog.Errorf("unexpected validation error: %T", err) + klog.Errorf("unexpected validation error: %T", err) return nil, errors.New("Token could not be validated.") } - var audValid bool - - for _, aud := range v.auds { - audValid = public.Audience.Contains(aud) - if audValid { - break - } - } - - if !audValid { - return nil, errors.New("Token is invalid for this audience.") - } - namespace := private.Kubernetes.Namespace saref := private.Kubernetes.Svcacct podref := private.Kubernetes.Pod @@ -132,15 +117,15 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ // Make sure service account still exists (name and UID) serviceAccount, err := v.getter.GetServiceAccount(namespace, saref.Name) if err != nil { - glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, saref.Name, err) + klog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, saref.Name, err) return nil, err } if serviceAccount.DeletionTimestamp != nil { - glog.V(4).Infof("Service account has been deleted %s/%s", namespace, saref.Name) + klog.V(4).Infof("Service account has been deleted %s/%s", namespace, saref.Name) return nil, fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, saref.Name) } if string(serviceAccount.UID) != saref.UID { - glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, saref.Name, string(serviceAccount.UID), saref.UID) + klog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, saref.Name, string(serviceAccount.UID), saref.UID) return nil, fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, saref.UID) } @@ -148,15 +133,15 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ // Make sure token hasn't been invalidated by deletion of the secret secret, err := v.getter.GetSecret(namespace, secref.Name) if err != nil { - glog.V(4).Infof("Could not retrieve bound secret %s/%s for service account %s/%s: %v", namespace, secref.Name, namespace, saref.Name, err) + klog.V(4).Infof("Could not retrieve bound secret %s/%s for service account %s/%s: %v", namespace, secref.Name, namespace, saref.Name, err) return nil, errors.New("Token has been invalidated") } if secret.DeletionTimestamp != nil { - glog.V(4).Infof("Bound secret is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secref.Name, namespace, saref.Name) + klog.V(4).Infof("Bound secret is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secref.Name, namespace, saref.Name) return nil, errors.New("Token has been invalidated") } if secref.UID != string(secret.UID) { - glog.V(4).Infof("Secret UID no longer matches %s/%s: %q != %q", namespace, secref.Name, string(secret.UID), secref.UID) + klog.V(4).Infof("Secret UID no longer matches %s/%s: %q != %q", namespace, secref.Name, string(secret.UID), secref.UID) return nil, fmt.Errorf("Secret UID (%s) does not match claim (%s)", secret.UID, secref.UID) } } @@ -166,15 +151,15 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ // Make sure token hasn't been invalidated by deletion of the pod pod, err := v.getter.GetPod(namespace, podref.Name) if err != nil { - glog.V(4).Infof("Could not retrieve bound pod %s/%s for service account %s/%s: %v", namespace, podref.Name, namespace, saref.Name, err) + klog.V(4).Infof("Could not retrieve bound pod %s/%s for service account %s/%s: %v", namespace, podref.Name, namespace, saref.Name, err) return nil, errors.New("Token has been invalidated") } if pod.DeletionTimestamp != nil { - glog.V(4).Infof("Bound pod is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, podref.Name, namespace, saref.Name) + klog.V(4).Infof("Bound pod is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, podref.Name, namespace, saref.Name) return nil, errors.New("Token has been invalidated") } if podref.UID != string(pod.UID) { - glog.V(4).Infof("Pod UID no longer matches %s/%s: %q != %q", namespace, podref.Name, string(pod.UID), podref.UID) + klog.V(4).Infof("Pod UID no longer matches %s/%s: %q != %q", namespace, podref.Name, string(pod.UID), podref.UID) return nil, fmt.Errorf("Pod UID (%s) does not match claim (%s)", pod.UID, podref.UID) } podName = podref.Name diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go index fe4dc5b70359..233fdee2d68e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go @@ -17,6 +17,7 @@ limitations under the License. package serviceaccount import ( + "context" "crypto/ecdsa" "crypto/elliptic" "crypto/rsa" @@ -25,13 +26,12 @@ import ( "fmt" "strings" + jose "gopkg.in/square/go-jose.v2" + "gopkg.in/square/go-jose.v2/jwt" + "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/authentication/authenticator" - "k8s.io/apiserver/pkg/authentication/user" - - jose "gopkg.in/square/go-jose.v2" - "gopkg.in/square/go-jose.v2/jwt" ) // ServiceAccountTokenGetter defines functions to retrieve a named service account and secret @@ -111,21 +111,23 @@ func (j *jwtTokenGenerator) GenerateToken(claims *jwt.Claims, privateClaims inte // JWTTokenAuthenticator authenticates tokens as JWT tokens produced by JWTTokenGenerator // Token signatures are verified using each of the given public keys until one works (allowing key rotation) // If lookup is true, the service account and secret referenced as claims inside the token are retrieved and verified with the provided ServiceAccountTokenGetter -func JWTTokenAuthenticator(iss string, keys []interface{}, validator Validator) authenticator.Token { +func JWTTokenAuthenticator(iss string, keys []interface{}, implicitAuds authenticator.Audiences, validator Validator) authenticator.Token { return &jwtTokenAuthenticator{ - iss: iss, - keys: keys, - validator: validator, + iss: iss, + keys: keys, + implicitAuds: implicitAuds, + validator: validator, } } type jwtTokenAuthenticator struct { - iss string - keys []interface{} - validator Validator + iss string + keys []interface{} + validator Validator + implicitAuds authenticator.Audiences } -// Validator is called by the JWT token authentictaor to apply domain specific +// Validator is called by the JWT token authenticator to apply domain specific // validation to a token and extract user information. type Validator interface { // Validate validates a token and returns user information or an error. @@ -140,7 +142,7 @@ type Validator interface { NewPrivateClaims() interface{} } -func (j *jwtTokenAuthenticator) AuthenticateToken(tokenData string) (user.Info, bool, error) { +func (j *jwtTokenAuthenticator) AuthenticateToken(ctx context.Context, tokenData string) (*authenticator.Response, bool, error) { if !j.hasCorrectIssuer(tokenData) { return nil, false, nil } @@ -170,6 +172,23 @@ func (j *jwtTokenAuthenticator) AuthenticateToken(tokenData string) (user.Info, return nil, false, utilerrors.NewAggregate(errlist) } + tokenAudiences := authenticator.Audiences(public.Audience) + if len(tokenAudiences) == 0 { + // only apiserver audiences are allowed for legacy tokens + tokenAudiences = j.implicitAuds + } + + requestedAudiences, ok := authenticator.AudiencesFrom(ctx) + if !ok { + // default to apiserver audiences + requestedAudiences = j.implicitAuds + } + + auds := authenticator.Audiences(tokenAudiences).Intersect(requestedAudiences) + if len(auds) == 0 && len(j.implicitAuds) != 0 { + return nil, false, fmt.Errorf("token audiences %q is invalid for the target audiences %q", tokenAudiences, requestedAudiences) + } + // If we get here, we have a token with a recognized signature and // issuer string. sa, err := j.validator.Validate(tokenData, public, private) @@ -177,7 +196,10 @@ func (j *jwtTokenAuthenticator) AuthenticateToken(tokenData string) (user.Info, return nil, false, err } - return sa.UserInfo(), true, nil + return &authenticator.Response{ + User: sa.UserInfo(), + Audiences: auds, + }, true, nil } // hasCorrectIssuer returns true if tokenData is a valid JWT in compact diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go index 79ca8f1b851d..57c482f0ba62 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go @@ -21,11 +21,11 @@ import ( "errors" "fmt" + "gopkg.in/square/go-jose.v2/jwt" + "k8s.io/klog" + "k8s.io/api/core/v1" apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" - - "github.com/golang/glog" - "gopkg.in/square/go-jose.v2/jwt" ) func LegacyClaims(serviceAccount v1.ServiceAccount, secret v1.Secret) (*jwt.Claims, interface{}) { @@ -65,7 +65,7 @@ var _ = Validator(&legacyValidator{}) func (v *legacyValidator) Validate(tokenData string, public *jwt.Claims, privateObj interface{}) (*ServiceAccountInfo, error) { private, ok := privateObj.(*legacyPrivateClaims) if !ok { - glog.Errorf("jwt validator expected private claim of type *legacyPrivateClaims but got: %T", privateObj) + klog.Errorf("jwt validator expected private claim of type *legacyPrivateClaims but got: %T", privateObj) return nil, errors.New("Token could not be validated.") } @@ -99,30 +99,30 @@ func (v *legacyValidator) Validate(tokenData string, public *jwt.Claims, private // Make sure token hasn't been invalidated by deletion of the secret secret, err := v.getter.GetSecret(namespace, secretName) if err != nil { - glog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err) + klog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err) return nil, errors.New("Token has been invalidated") } if secret.DeletionTimestamp != nil { - glog.V(4).Infof("Token is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) + klog.V(4).Infof("Token is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) return nil, errors.New("Token has been invalidated") } if bytes.Compare(secret.Data[v1.ServiceAccountTokenKey], []byte(tokenData)) != 0 { - glog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) + klog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) return nil, errors.New("Token does not match server's copy") } // Make sure service account still exists (name and UID) serviceAccount, err := v.getter.GetServiceAccount(namespace, serviceAccountName) if err != nil { - glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err) + klog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err) return nil, err } if serviceAccount.DeletionTimestamp != nil { - glog.V(4).Infof("Service account has been deleted %s/%s", namespace, serviceAccountName) + klog.V(4).Infof("Service account has been deleted %s/%s", namespace, serviceAccountName) return nil, fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, serviceAccountName) } if string(serviceAccount.UID) != serviceAccountUID { - glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID) + klog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID) return nil, fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, serviceAccountUID) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/BUILD index 57830ca887a7..c201c307be9e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/BUILD @@ -15,7 +15,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/util/async", deps = [ "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/bounded_frequency_runner.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/bounded_frequency_runner.go index f03a7ec2d5d2..20a06ad668a8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/bounded_frequency_runner.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/async/bounded_frequency_runner.go @@ -23,7 +23,7 @@ import ( "k8s.io/client-go/util/flowcontrol" - "github.com/golang/glog" + "k8s.io/klog" ) // BoundedFrequencyRunner manages runs of a user-provided function. @@ -167,13 +167,13 @@ func construct(name string, fn func(), minInterval, maxInterval time.Duration, b // Loop handles the periodic timer and run requests. This is expected to be // called as a goroutine. func (bfr *BoundedFrequencyRunner) Loop(stop <-chan struct{}) { - glog.V(3).Infof("%s Loop running", bfr.name) + klog.V(3).Infof("%s Loop running", bfr.name) bfr.timer.Reset(bfr.maxInterval) for { select { case <-stop: bfr.stop() - glog.V(3).Infof("%s Loop stopping", bfr.name) + klog.V(3).Infof("%s Loop stopping", bfr.name) return case <-bfr.timer.C(): bfr.tryRun() @@ -218,7 +218,7 @@ func (bfr *BoundedFrequencyRunner) tryRun() { bfr.lastRun = bfr.timer.Now() bfr.timer.Stop() bfr.timer.Reset(bfr.maxInterval) - glog.V(3).Infof("%s: ran, next possible in %v, periodic in %v", bfr.name, bfr.minInterval, bfr.maxInterval) + klog.V(3).Infof("%s: ran, next possible in %v, periodic in %v", bfr.name, bfr.minInterval, bfr.maxInterval) return } @@ -227,13 +227,13 @@ func (bfr *BoundedFrequencyRunner) tryRun() { elapsed := bfr.timer.Since(bfr.lastRun) // how long since last run nextPossible := bfr.minInterval - elapsed // time to next possible run nextScheduled := bfr.maxInterval - elapsed // time to next periodic run - glog.V(4).Infof("%s: %v since last run, possible in %v, scheduled in %v", bfr.name, elapsed, nextPossible, nextScheduled) + klog.V(4).Infof("%s: %v since last run, possible in %v, scheduled in %v", bfr.name, elapsed, nextPossible, nextScheduled) if nextPossible < nextScheduled { // Set the timer for ASAP, but don't drain here. Assuming Loop is running, // it might get a delivery in the mean time, but that is OK. bfr.timer.Stop() bfr.timer.Reset(nextPossible) - glog.V(3).Infof("%s: throttled, scheduling run in %v", bfr.name, nextPossible) + klog.V(3).Infof("%s: throttled, scheduling run in %v", bfr.name, nextPossible) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/bandwidth/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/bandwidth/BUILD index 2854892376d7..bc590bbfa7c2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/bandwidth/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/bandwidth/BUILD @@ -22,7 +22,7 @@ go_library( ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], "//conditions:default": [], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/bandwidth/linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/bandwidth/linux.go index b8936a34f014..7050b4f763ca 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/bandwidth/linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/bandwidth/linux.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) // tcShaper provides an implementation of the BandwidthShaper interface on Linux using the 'tc' tool. @@ -53,10 +53,10 @@ func NewTCShaper(iface string) BandwidthShaper { } func (t *tcShaper) execAndLog(cmdStr string, args ...string) error { - glog.V(6).Infof("Running: %s %s", cmdStr, strings.Join(args, " ")) + klog.V(6).Infof("Running: %s %s", cmdStr, strings.Join(args, " ")) cmd := t.e.Command(cmdStr, args...) out, err := cmd.CombinedOutput() - glog.V(6).Infof("Output from tc: %s", string(out)) + klog.V(6).Infof("Output from tc: %s", string(out)) return err } @@ -259,7 +259,7 @@ func (t *tcShaper) ReconcileInterface() error { return err } if !exists { - glog.V(4).Info("Didn't find bandwidth interface, creating") + klog.V(4).Info("Didn't find bandwidth interface, creating") return t.initializeInterface() } fields := strings.Split(output, " ") diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/fake_dbus.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/fake_dbus.go index 28cde82fb2f0..cceec4ca7722 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/fake_dbus.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/dbus/fake_dbus.go @@ -23,25 +23,25 @@ import ( godbus "github.com/godbus/dbus" ) -// DBusFake is a simple fake Interface type. -type DBusFake struct { - systemBus *DBusFakeConnection - sessionBus *DBusFakeConnection +// Fake is a simple fake Interface type. +type Fake struct { + systemBus *FakeConnection + sessionBus *FakeConnection } -// DBusFakeConnection represents a fake D-Bus connection -type DBusFakeConnection struct { +// FakeConnection represents a fake D-Bus connection +type FakeConnection struct { lock sync.Mutex busObject *fakeObject objects map[string]*fakeObject signalHandlers []chan<- *godbus.Signal } -// DBusFakeHandler is used to handle fake D-Bus method calls -type DBusFakeHandler func(method string, args ...interface{}) ([]interface{}, error) +// FakeHandler is used to handle fake D-Bus method calls +type FakeHandler func(method string, args ...interface{}) ([]interface{}, error) type fakeObject struct { - handler DBusFakeHandler + handler FakeHandler } type fakeCall struct { @@ -50,46 +50,45 @@ type fakeCall struct { } // NewFake returns a new Interface which will fake talking to D-Bus -func NewFake(systemBus *DBusFakeConnection, sessionBus *DBusFakeConnection) *DBusFake { - return &DBusFake{systemBus, sessionBus} +func NewFake(systemBus *FakeConnection, sessionBus *FakeConnection) *Fake { + return &Fake{systemBus, sessionBus} } -func NewFakeConnection() *DBusFakeConnection { - return &DBusFakeConnection{ +// NewFakeConnection returns a FakeConnection Interface +func NewFakeConnection() *FakeConnection { + return &FakeConnection{ objects: make(map[string]*fakeObject), } } // SystemBus is part of Interface -func (db *DBusFake) SystemBus() (Connection, error) { +func (db *Fake) SystemBus() (Connection, error) { if db.systemBus != nil { return db.systemBus, nil - } else { - return nil, fmt.Errorf("DBus is not running") } + return nil, fmt.Errorf("DBus is not running") } // SessionBus is part of Interface -func (db *DBusFake) SessionBus() (Connection, error) { +func (db *Fake) SessionBus() (Connection, error) { if db.sessionBus != nil { return db.sessionBus, nil - } else { - return nil, fmt.Errorf("DBus is not running") } + return nil, fmt.Errorf("DBus is not running") } // BusObject is part of the Connection interface -func (conn *DBusFakeConnection) BusObject() Object { +func (conn *FakeConnection) BusObject() Object { return conn.busObject } // Object is part of the Connection interface -func (conn *DBusFakeConnection) Object(name, path string) Object { +func (conn *FakeConnection) Object(name, path string) Object { return conn.objects[name+path] } // Signal is part of the Connection interface -func (conn *DBusFakeConnection) Signal(ch chan<- *godbus.Signal) { +func (conn *FakeConnection) Signal(ch chan<- *godbus.Signal) { conn.lock.Lock() defer conn.lock.Unlock() for i := range conn.signalHandlers { @@ -102,17 +101,17 @@ func (conn *DBusFakeConnection) Signal(ch chan<- *godbus.Signal) { } // SetBusObject sets the handler for the BusObject of conn -func (conn *DBusFakeConnection) SetBusObject(handler DBusFakeHandler) { +func (conn *FakeConnection) SetBusObject(handler FakeHandler) { conn.busObject = &fakeObject{handler} } // AddObject adds a handler for the Object at name and path -func (conn *DBusFakeConnection) AddObject(name, path string, handler DBusFakeHandler) { +func (conn *FakeConnection) AddObject(name, path string, handler FakeHandler) { conn.objects[name+path] = &fakeObject{handler} } // EmitSignal emits a signal on conn -func (conn *DBusFakeConnection) EmitSignal(name, path, iface, signal string, args ...interface{}) { +func (conn *FakeConnection) EmitSignal(name, path, iface, signal string, args ...interface{}) { conn.lock.Lock() defer conn.lock.Unlock() sig := &godbus.Signal{ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/flag/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/flag/BUILD index ac88251b33c0..976dd85000b2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/flag/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/flag/BUILD @@ -12,8 +12,8 @@ go_library( importpath = "k8s.io/kubernetes/pkg/util/flag", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/flag/flags.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/flag/flags.go index b58a52cc0e7f..1d57c3e8d6b3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/flag/flags.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/flag/flags.go @@ -21,8 +21,8 @@ import ( "net" "strconv" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" ) @@ -30,7 +30,7 @@ import ( // PrintFlags logs the flags in the flagset func PrintFlags(flags *pflag.FlagSet) { flags.VisitAll(func(flag *pflag.Flag) { - glog.V(1).Infof("FLAG: --%s=%q", flag.Name, flag.Value) + klog.V(1).Infof("FLAG: --%s=%q", flag.Name, flag.Value) }) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/BUILD index f1d77a033547..92688fde6868 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/BUILD @@ -13,7 +13,7 @@ go_library( deps = [ "//pkg/util/goroutinemap/exponentialbackoff:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/goroutinemap.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/goroutinemap.go index 474e1215cc38..9b6eb7319270 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/goroutinemap.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/goroutinemap.go @@ -25,8 +25,8 @@ import ( "fmt" "sync" - "github.com/golang/glog" k8sRuntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" ) @@ -135,7 +135,7 @@ func (grm *goRoutineMap) operationComplete( delete(grm.operations, operationName) if *err != nil { // Log error - glog.Errorf("operation for %q failed with: %v", + klog.Errorf("operation for %q failed with: %v", operationName, *err) } @@ -147,7 +147,7 @@ func (grm *goRoutineMap) operationComplete( grm.operations[operationName] = existingOp // Log error - glog.Errorf("%v", + klog.Errorf("%v", existingOp.expBackoff.GenerateNoRetriesPermittedMsg(operationName)) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipconfig/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipconfig/BUILD index ffcecee10316..39504f89bbbb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipconfig/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipconfig/BUILD @@ -14,7 +14,7 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/util/ipconfig", deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipconfig/ipconfig.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipconfig/ipconfig.go index 907c087f8eaf..5764511602b9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipconfig/ipconfig.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipconfig/ipconfig.go @@ -21,15 +21,15 @@ import ( "strings" "sync" - "github.com/golang/glog" + "k8s.io/klog" utilexec "k8s.io/utils/exec" ) // Interface is an injectable interface for running ipconfig commands. Implementations must be goroutine-safe. type Interface interface { - // GetDnsSuffixSearchList returns the list of DNS suffix to search - GetDnsSuffixSearchList() ([]string, error) + // GetDNSSuffixSearchList returns the list of DNS suffix to search + GetDNSSuffixSearchList() ([]string, error) } const ( @@ -54,8 +54,8 @@ func New(exec utilexec.Interface) Interface { return runner } -// GetDnsSuffixSearchList returns the list of DNS suffix to search -func (runner *runner) GetDnsSuffixSearchList() ([]string, error) { +// GetDNSSuffixSearchList returns the list of DNS suffix to search +func (runner *runner) GetDNSSuffixSearchList() ([]string, error) { // Parse the DNS suffix search list from ipconfig output // ipconfig /all on Windows displays the entry of DNS suffix search list // An example output contains: @@ -66,7 +66,7 @@ func (runner *runner) GetDnsSuffixSearchList() ([]string, error) { // TODO: this does not work when the label is localized suffixList := []string{} if runtime.GOOS != "windows" { - glog.V(1).Infof("ipconfig not supported on GOOS=%s", runtime.GOOS) + klog.V(1).Infof("ipconfig not supported on GOOS=%s", runtime.GOOS) return suffixList, nil } @@ -92,7 +92,7 @@ func (runner *runner) GetDnsSuffixSearchList() ([]string, error) { } } } else { - glog.V(1).Infof("Running %s %s failed: %v", cmdIpconfig, cmdDefaultArgs, err) + klog.V(1).Infof("Running %s %s failed: %v", cmdIpconfig, cmdDefaultArgs, err) } return suffixList, err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipset/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipset/BUILD index 8be0d8d63d9d..186fa432043a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipset/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipset/BUILD @@ -9,7 +9,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/util/ipset", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipset/ipset.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipset/ipset.go index 4bc4a739d386..2d400a6244e7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipset/ipset.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipset/ipset.go @@ -24,7 +24,7 @@ import ( "strconv" "strings" - "github.com/golang/glog" + "k8s.io/klog" utilexec "k8s.io/utils/exec" ) @@ -111,12 +111,12 @@ func (set *IPSet) Validate() bool { } // check hash size value of ipset if set.HashSize <= 0 { - glog.Errorf("Invalid hashsize value %d, should be >0", set.HashSize) + klog.Errorf("Invalid hashsize value %d, should be >0", set.HashSize) return false } // check max elem value of ipset if set.MaxElem <= 0 { - glog.Errorf("Invalid maxelem value %d, should be >0", set.MaxElem) + klog.Errorf("Invalid maxelem value %d, should be >0", set.MaxElem) return false } @@ -167,7 +167,7 @@ type Entry struct { // Validate checks if a given ipset entry is valid or not. The set parameter is the ipset that entry belongs to. func (e *Entry) Validate(set *IPSet) bool { if e.Port < 0 { - glog.Errorf("Entry %v port number %d should be >=0 for ipset %v", e, e.Port, set) + klog.Errorf("Entry %v port number %d should be >=0 for ipset %v", e, e.Port, set) return false } switch e.SetType { @@ -184,7 +184,7 @@ func (e *Entry) Validate(set *IPSet) bool { // IP2 can not be empty for `hash:ip,port,ip` type ip set if net.ParseIP(e.IP2) == nil { - glog.Errorf("Error parsing entry %v second ip address %v for ipset %v", e, e.IP2, set) + klog.Errorf("Error parsing entry %v second ip address %v for ipset %v", e, e.IP2, set) return false } case HashIPPortNet: @@ -195,22 +195,22 @@ func (e *Entry) Validate(set *IPSet) bool { // Net can not be empty for `hash:ip,port,net` type ip set if _, ipNet, err := net.ParseCIDR(e.Net); ipNet == nil { - glog.Errorf("Error parsing entry %v ip net %v for ipset %v, error: %v", e, e.Net, set, err) + klog.Errorf("Error parsing entry %v ip net %v for ipset %v, error: %v", e, e.Net, set, err) return false } case BitmapPort: // check if port number satisfies its ipset's requirement of port range if set == nil { - glog.Errorf("Unable to reference ip set where the entry %v exists", e) + klog.Errorf("Unable to reference ip set where the entry %v exists", e) return false } begin, end, err := parsePortRange(set.PortRange) if err != nil { - glog.Errorf("Failed to parse set %v port range %s for ipset %v, error: %v", set, set.PortRange, set, err) + klog.Errorf("Failed to parse set %v port range %s for ipset %v, error: %v", set, set.PortRange, set, err) return false } if e.Port < begin || e.Port > end { - glog.Errorf("Entry %v port number %d is not in the port range %s of its ipset %v", e, e.Port, set.PortRange, set) + klog.Errorf("Entry %v port number %d is not in the port range %s of its ipset %v", e, e.Port, set.PortRange, set) return false } } @@ -251,7 +251,7 @@ func (e *Entry) checkIPandProtocol(set *IPSet) bool { } if net.ParseIP(e.IP) == nil { - glog.Errorf("Error parsing entry %v ip address %v for ipset %v", e, e.IP, set) + klog.Errorf("Error parsing entry %v ip address %v for ipset %v", e, e.IP, set) return false } @@ -286,7 +286,7 @@ func (runner *runner) CreateSet(set *IPSet, ignoreExistErr bool) error { // otherwise raised when the same set (setname and create parameters are identical) already exists. func (runner *runner) createSet(set *IPSet, ignoreExistErr bool) error { args := []string{"create", set.Name, string(set.SetType)} - if set.SetType == HashIPPortIP || set.SetType == HashIPPort { + if set.SetType == HashIPPortIP || set.SetType == HashIPPort || set.SetType == HashIPPortNet { args = append(args, "family", set.HashFamily, "hashsize", strconv.Itoa(set.HashSize), @@ -424,17 +424,17 @@ func getIPSetVersionString(exec utilexec.Interface) (string, error) { func validatePortRange(portRange string) bool { strs := strings.Split(portRange, "-") if len(strs) != 2 { - glog.Errorf("port range should be in the format of `a-b`") + klog.Errorf("port range should be in the format of `a-b`") return false } for i := range strs { num, err := strconv.Atoi(strs[i]) if err != nil { - glog.Errorf("Failed to parse %s, error: %v", strs[i], err) + klog.Errorf("Failed to parse %s, error: %v", strs[i], err) return false } if num < 0 { - glog.Errorf("port number %d should be >=0", num) + klog.Errorf("port number %d should be >=0", num) return false } } @@ -448,7 +448,7 @@ func validateIPSetType(set Type) bool { return true } } - glog.Errorf("Currently supported ipset types are: %v, %s is not supported", ValidIPSetTypes, set) + klog.Errorf("Currently supported ipset types are: %v, %s is not supported", ValidIPSetTypes, set) return false } @@ -457,7 +457,7 @@ func validateHashFamily(family string) bool { if family == ProtocolFamilyIPV4 || family == ProtocolFamilyIPV6 { return true } - glog.Errorf("Currently supported ip set hash families are: [%s, %s], %s is not supported", ProtocolFamilyIPV4, ProtocolFamilyIPV6, family) + klog.Errorf("Currently supported ip set hash families are: [%s, %s], %s is not supported", ProtocolFamilyIPV4, ProtocolFamilyIPV6, family) return false } @@ -485,7 +485,7 @@ func validateProtocol(protocol string) bool { if protocol == ProtocolTCP || protocol == ProtocolUDP || protocol == ProtocolSCTP { return true } - glog.Errorf("Invalid entry's protocol: %s, supported protocols are [%s, %s]", protocol, ProtocolTCP, ProtocolUDP, ProtocolSCTP) + klog.Errorf("Invalid entry's protocol: %s, supported protocols are [%s, %s, %s]", protocol, ProtocolTCP, ProtocolUDP, ProtocolSCTP) return false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/BUILD index 3e58721fe55f..9b289951f520 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/BUILD @@ -18,11 +18,11 @@ go_library( importpath = "k8s.io/kubernetes/pkg/util/iptables", deps = [ "//pkg/util/dbus:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/trace:go_default_library", "//vendor/github.com/godbus/dbus:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go index 8c9b6722aff2..c9f3d9860d1c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go @@ -26,11 +26,11 @@ import ( "time" godbus "github.com/godbus/dbus" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/sets" + utilversion "k8s.io/apimachinery/pkg/util/version" utiltrace "k8s.io/apiserver/pkg/util/trace" + "k8s.io/klog" utildbus "k8s.io/kubernetes/pkg/util/dbus" - utilversion "k8s.io/kubernetes/pkg/util/version" utilexec "k8s.io/utils/exec" ) @@ -152,7 +152,7 @@ type runner struct { func newInternal(exec utilexec.Interface, dbus utildbus.Interface, protocol Protocol, lockfilePath string) Interface { vstring, err := getIPTablesVersionString(exec, protocol) if err != nil { - glog.Warningf("Error checking iptables version, assuming version at least %s: %v", MinCheckVersion, err) + klog.Warningf("Error checking iptables version, assuming version at least %s: %v", MinCheckVersion, err) vstring = MinCheckVersion } @@ -197,7 +197,7 @@ const ( func (runner *runner) connectToFirewallD() { bus, err := runner.dbus.SystemBus() if err != nil { - glog.V(1).Infof("Could not connect to D-Bus system bus: %s", err) + klog.V(1).Infof("Could not connect to D-Bus system bus: %s", err) return } runner.hasListener = true @@ -324,7 +324,7 @@ func (runner *runner) SaveInto(table Table, buffer *bytes.Buffer) error { // run and return iptablesSaveCmd := iptablesSaveCommand(runner.protocol) args := []string{"-t", string(table)} - glog.V(4).Infof("running %s %v", iptablesSaveCmd, args) + klog.V(4).Infof("running %s %v", iptablesSaveCmd, args) cmd := runner.exec.Command(iptablesSaveCmd, args...) // Since CombinedOutput() doesn't support redirecting it to a buffer, // we need to workaround it by redirecting stdout and stderr to buffer @@ -380,7 +380,7 @@ func (runner *runner) restoreInternal(args []string, data []byte, flush FlushFla trace.Step("Locks grabbed") defer func(locker iptablesLocker) { if err := locker.Close(); err != nil { - glog.Errorf("Failed to close iptables locks: %v", err) + klog.Errorf("Failed to close iptables locks: %v", err) } }(locker) } @@ -388,7 +388,7 @@ func (runner *runner) restoreInternal(args []string, data []byte, flush FlushFla // run the command and return the output or an error including the output and error fullArgs := append(runner.restoreWaitFlag, args...) iptablesRestoreCmd := iptablesRestoreCommand(runner.protocol) - glog.V(4).Infof("running %s %v", iptablesRestoreCmd, fullArgs) + klog.V(4).Infof("running %s %v", iptablesRestoreCmd, fullArgs) cmd := runner.exec.Command(iptablesRestoreCmd, fullArgs...) cmd.SetStdin(bytes.NewBuffer(data)) b, err := cmd.CombinedOutput() @@ -430,7 +430,7 @@ func (runner *runner) runContext(ctx context.Context, op operation, args []strin iptablesCmd := iptablesCommand(runner.protocol) fullArgs := append(runner.waitFlag, string(op)) fullArgs = append(fullArgs, args...) - glog.V(5).Infof("running iptables %s %v", string(op), args) + klog.V(5).Infof("running iptables %s %v", string(op), args) if ctx == nil { return runner.exec.Command(iptablesCmd, fullArgs...).CombinedOutput() } @@ -458,7 +458,7 @@ func trimhex(s string) string { // of hack and half-measures. We should nix this ASAP. func (runner *runner) checkRuleWithoutCheck(table Table, chain Chain, args ...string) (bool, error) { iptablesSaveCmd := iptablesSaveCommand(runner.protocol) - glog.V(1).Infof("running %s -t %s", iptablesSaveCmd, string(table)) + klog.V(1).Infof("running %s -t %s", iptablesSaveCmd, string(table)) out, err := runner.exec.Command(iptablesSaveCmd, "-t", string(table)).CombinedOutput() if err != nil { return false, fmt.Errorf("error checking rule: %v", err) @@ -497,7 +497,7 @@ func (runner *runner) checkRuleWithoutCheck(table Table, chain Chain, args ...st if sets.NewString(fields...).IsSuperset(argset) { return true, nil } - glog.V(5).Infof("DBG: fields is not a superset of args: fields=%v args=%v", fields, args) + klog.V(5).Infof("DBG: fields is not a superset of args: fields=%v args=%v", fields, args) } return false, nil @@ -544,12 +544,12 @@ func makeFullArgs(table Table, chain Chain, args ...string) []string { func getIPTablesHasCheckCommand(vstring string) bool { minVersion, err := utilversion.ParseGeneric(MinCheckVersion) if err != nil { - glog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinCheckVersion, err) + klog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinCheckVersion, err) return true } version, err := utilversion.ParseGeneric(vstring) if err != nil { - glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) + klog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) return true } return version.AtLeast(minVersion) @@ -559,13 +559,13 @@ func getIPTablesHasCheckCommand(vstring string) bool { func getIPTablesWaitFlag(vstring string) []string { version, err := utilversion.ParseGeneric(vstring) if err != nil { - glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) + klog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) return nil } minVersion, err := utilversion.ParseGeneric(WaitMinVersion) if err != nil { - glog.Errorf("WaitMinVersion (%s) is not a valid version string: %v", WaitMinVersion, err) + klog.Errorf("WaitMinVersion (%s) is not a valid version string: %v", WaitMinVersion, err) return nil } if version.LessThan(minVersion) { @@ -574,7 +574,7 @@ func getIPTablesWaitFlag(vstring string) []string { minVersion, err = utilversion.ParseGeneric(WaitSecondsMinVersion) if err != nil { - glog.Errorf("WaitSecondsMinVersion (%s) is not a valid version string: %v", WaitSecondsMinVersion, err) + klog.Errorf("WaitSecondsMinVersion (%s) is not a valid version string: %v", WaitSecondsMinVersion, err) return nil } if version.LessThan(minVersion) { @@ -608,11 +608,11 @@ func getIPTablesVersionString(exec utilexec.Interface, protocol Protocol) (strin func getIPTablesRestoreWaitFlag(exec utilexec.Interface, protocol Protocol) []string { vstring, err := getIPTablesRestoreVersionString(exec, protocol) if err != nil || vstring == "" { - glog.V(3).Infof("couldn't get iptables-restore version; assuming it doesn't support --wait") + klog.V(3).Infof("couldn't get iptables-restore version; assuming it doesn't support --wait") return nil } if _, err := utilversion.ParseGeneric(vstring); err != nil { - glog.V(3).Infof("couldn't parse iptables-restore version; assuming it doesn't support --wait") + klog.V(3).Infof("couldn't parse iptables-restore version; assuming it doesn't support --wait") return nil } @@ -691,7 +691,7 @@ func (runner *runner) AddReloadFunc(reloadFunc func()) { // runs all reload funcs to re-sync iptables rules func (runner *runner) reload() { - glog.V(1).Infof("reloading iptables rules") + klog.V(1).Infof("reloading iptables rules") for _, f := range runner.reloadFuncs { f() diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD index 9e487e4968ac..784a343e37d6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD @@ -34,42 +34,14 @@ go_library( "kernelcheck_unsupported.go", ], importpath = "k8s.io/kubernetes/pkg/util/ipvs", - deps = select({ - "@io_bazel_rules_go//go/platform:android": [ - "//vendor/k8s.io/utils/exec:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/k8s.io/utils/exec:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/k8s.io/utils/exec:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/k8s.io/utils/exec:go_default_library", - ], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/utils/exec:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/k8s.io/utils/exec:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/k8s.io/utils/exec:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/k8s.io/utils/exec:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//vendor/k8s.io/utils/exec:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/k8s.io/utils/exec:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "//conditions:default": [], }), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs.go index 58e76a56c901..6cd081851cee 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs.go @@ -19,6 +19,11 @@ package ipvs import ( "net" "strconv" + "strings" + + "fmt" + "k8s.io/apimachinery/pkg/util/version" + "k8s.io/utils/exec" ) // Interface is an injectable interface for running ipvs commands. Implementations must be goroutine-safe. @@ -41,6 +46,8 @@ type Interface interface { GetRealServers(*VirtualServer) ([]*RealServer, error) // DeleteRealServer deletes the specified real server from the specified virtual server. DeleteRealServer(*VirtualServer, *RealServer) error + // UpdateRealServer updates the specified real server from the specified virtual server. + UpdateRealServer(*VirtualServer, *RealServer) error } // VirtualServer is an user-oriented definition of an IPVS virtual server in its entirety. @@ -65,14 +72,21 @@ const ( IPVSProxyMode = "ipvs" ) -// Sets of IPVS required kernel modules. -var ipvsModules = []string{ - "ip_vs", - "ip_vs_rr", - "ip_vs_wrr", - "ip_vs_sh", - "nf_conntrack_ipv4", -} +// IPVS required kernel modules. +const ( + // ModIPVS is the kernel module "ip_vs" + ModIPVS string = "ip_vs" + // ModIPVSRR is the kernel module "ip_vs_rr" + ModIPVSRR string = "ip_vs_rr" + // ModIPVSWRR is the kernel module "ip_vs_wrr" + ModIPVSWRR string = "ip_vs_wrr" + // ModIPVSSH is the kernel module "ip_vs_sh" + ModIPVSSH string = "ip_vs_sh" + // ModNfConntrackIPV4 is the module "nf_conntrack_ipv4" + ModNfConntrackIPV4 string = "nf_conntrack_ipv4" + // ModNfConntrack is the kernel module "nf_conntrack" + ModNfConntrack string = "nf_conntrack" +) // Equal check the equality of virtual server. // We don't use struct == since it doesn't work because of slice. @@ -91,9 +105,11 @@ func (svc *VirtualServer) String() string { // RealServer is an user-oriented definition of an IPVS real server in its entirety. type RealServer struct { - Address net.IP - Port uint16 - Weight int + Address net.IP + Port uint16 + Weight int + ActiveConn int + InactiveConn int } func (rs *RealServer) String() string { @@ -104,6 +120,31 @@ func (rs *RealServer) String() string { // We don't use struct == since it doesn't work because of slice. func (rs *RealServer) Equal(other *RealServer) bool { return rs.Address.Equal(other.Address) && - rs.Port == other.Port && - rs.Weight == other.Weight + rs.Port == other.Port +} + +// GetKernelVersionAndIPVSMods returns the linux kernel version and the required ipvs modules +func GetKernelVersionAndIPVSMods(Executor exec.Interface) (kernelVersion string, ipvsModules []string, err error) { + kernelVersionFile := "/proc/sys/kernel/osrelease" + out, err := Executor.Command("cut", "-f1", "-d", " ", kernelVersionFile).CombinedOutput() + if err != nil { + return "", nil, fmt.Errorf("error getting os release kernel version: %v(%s)", err, out) + } + kernelVersion = strings.TrimSpace(string(out)) + // parse kernel version + ver1, err := version.ParseGeneric(kernelVersion) + if err != nil { + return kernelVersion, nil, fmt.Errorf("error parsing kernel version: %v(%s)", err, kernelVersion) + } + // "nf_conntrack_ipv4" has been removed since v4.19 + // see https://github.com/torvalds/linux/commit/a0ae2562c6c4b2721d9fddba63b7286c13517d9f + ver2, _ := version.ParseGeneric("4.19") + // get required ipvs modules + if ver1.LessThan(ver2) { + ipvsModules = append(ipvsModules, ModIPVS, ModIPVSRR, ModIPVSWRR, ModIPVSSH, ModNfConntrackIPV4) + } else { + ipvsModules = append(ipvsModules, ModIPVS, ModIPVSRR, ModIPVSWRR, ModIPVSSH, ModNfConntrack) + } + + return kernelVersion, ipvsModules, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go index 47c640c183fe..b4f53be6fdb9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go @@ -26,7 +26,7 @@ import ( "syscall" libipvs "github.com/docker/libnetwork/ipvs" - "github.com/golang/glog" + "k8s.io/klog" utilexec "k8s.io/utils/exec" ) @@ -43,7 +43,7 @@ type Protocol uint16 func New(exec utilexec.Interface) Interface { handle, err := libipvs.New("") if err != nil { - glog.Errorf("IPVS interface can't be initialized, error: %v", err) + klog.Errorf("IPVS interface can't be initialized, error: %v", err) return nil } return &runner{ @@ -144,6 +144,18 @@ func (runner *runner) DeleteRealServer(vs *VirtualServer, rs *RealServer) error return runner.ipvsHandle.DelDestination(svc, dst) } +func (runner *runner) UpdateRealServer(vs *VirtualServer, rs *RealServer) error { + svc, err := toIPVSService(vs) + if err != nil { + return err + } + dst, err := toIPVSDestination(rs) + if err != nil { + return err + } + return runner.ipvsHandle.UpdateDestination(svc, dst) +} + // GetRealServers is part of ipvs.Interface. func (runner *runner) GetRealServers(vs *VirtualServer) ([]*RealServer, error) { svc, err := toIPVSService(vs) @@ -203,9 +215,11 @@ func toRealServer(dst *libipvs.Destination) (*RealServer, error) { return nil, errors.New("ipvs destination should not be empty") } return &RealServer{ - Address: dst.Address, - Port: dst.Port, - Weight: dst.Weight, + Address: dst.Address, + Port: dst.Port, + Weight: dst.Weight, + ActiveConn: dst.ActiveConnections, + InactiveConn: dst.InactiveConnections, }, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_unsupported.go index dd4d5b625be7..86447d57c593 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_unsupported.go @@ -68,4 +68,8 @@ func (runner *runner) DeleteRealServer(*VirtualServer, *RealServer) error { return fmt.Errorf("IPVS not supported for this platform") } +func (runner *runner) UpdateRealServer(*VirtualServer, *RealServer) error { + return fmt.Errorf("IPVS not supported for this platform") +} + var _ = Interface(&runner{}) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/kernelcheck_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/kernelcheck_linux.go index 83c3e6b0fe90..286a3098c0c4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/kernelcheck_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/ipvs/kernelcheck_linux.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilsexec "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) // RequiredIPVSKernelModulesAvailableCheck tests IPVS required kernel modules. @@ -42,7 +42,12 @@ func (r RequiredIPVSKernelModulesAvailableCheck) Name() string { // Check try to validates IPVS required kernel modules exists or not. // The name of function can not be changed. func (r RequiredIPVSKernelModulesAvailableCheck) Check() (warnings, errors []error) { - glog.V(1).Infoln("validating the kernel module IPVS required exists in machine or not") + klog.V(1).Infoln("validating the kernel module IPVS required exists in machine or not") + + kernelVersion, ipvsModules, err := GetKernelVersionAndIPVSMods(r.Executor) + if err != nil { + errors = append(errors, err) + } // Find out loaded kernel modules out, err := r.Executor.Command("cut", "-f1", "-d", " ", "/proc/modules").CombinedOutput() @@ -60,14 +65,6 @@ func (r RequiredIPVSKernelModulesAvailableCheck) Check() (warnings, errors []err // Check builtin modules exist or not if len(modules) != 0 { - kernelVersionFile := "/proc/sys/kernel/osrelease" - b, err := r.Executor.Command("cut", "-f1", "-d", " ", kernelVersionFile).CombinedOutput() - if err != nil { - errors = append(errors, fmt.Errorf("error getting os release kernel version: %v(%s)", err, out)) - return nil, errors - } - - kernelVersion := strings.TrimSpace(string(b)) builtinModsFilePath := fmt.Sprintf("/lib/modules/%s/modules.builtin", kernelVersion) out, err := r.Executor.Command("cut", "-f1", "-d", " ", builtinModsFilePath).CombinedOutput() if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/keymutex/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/keymutex/BUILD index 256ed34181eb..267a4b1f6336 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/keymutex/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/keymutex/BUILD @@ -13,7 +13,7 @@ go_library( "keymutex.go", ], importpath = "k8s.io/kubernetes/pkg/util/keymutex", - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) go_test( diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/keymutex/hashed.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/keymutex/hashed.go index 5fe9a025c245..5176ae916c22 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/keymutex/hashed.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/keymutex/hashed.go @@ -21,7 +21,7 @@ import ( "runtime" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // NewHashed returns a new instance of KeyMutex which hashes arbitrary keys to @@ -44,16 +44,16 @@ type hashedKeyMutex struct { // Acquires a lock associated with the specified ID. func (km *hashedKeyMutex) LockKey(id string) { - glog.V(5).Infof("hashedKeyMutex.LockKey(...) called for id %q\r\n", id) + klog.V(5).Infof("hashedKeyMutex.LockKey(...) called for id %q\r\n", id) km.mutexes[km.hash(id)%len(km.mutexes)].Lock() - glog.V(5).Infof("hashedKeyMutex.LockKey(...) for id %q completed.\r\n", id) + klog.V(5).Infof("hashedKeyMutex.LockKey(...) for id %q completed.\r\n", id) } // Releases the lock associated with the specified ID. func (km *hashedKeyMutex) UnlockKey(id string) error { - glog.V(5).Infof("hashedKeyMutex.UnlockKey(...) called for id %q\r\n", id) + klog.V(5).Infof("hashedKeyMutex.UnlockKey(...) called for id %q\r\n", id) km.mutexes[km.hash(id)%len(km.mutexes)].Unlock() - glog.V(5).Infof("hashedKeyMutex.UnlockKey(...) for id %q completed.\r\n", id) + klog.V(5).Infof("hashedKeyMutex.UnlockKey(...) for id %q completed.\r\n", id) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD index e8bf8b8617e3..221afb7a9c65 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD @@ -18,7 +18,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/util/mount", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ @@ -68,6 +68,7 @@ go_test( srcs = [ "exec_mount_test.go", "mount_linux_test.go", + "mount_test.go", "mount_windows_test.go", "nsenter_mount_test.go", "safe_format_and_mount_test.go", @@ -78,8 +79,8 @@ go_test( ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//pkg/util/nsenter:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], "@io_bazel_rules_go//go/platform:windows": [ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go index 3c6638328f69..634189dea9bc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go @@ -22,7 +22,7 @@ import ( "fmt" "os" - "github.com/golang/glog" + "k8s.io/klog" ) // ExecMounter is a mounter that uses provided Exec interface to mount and @@ -44,10 +44,10 @@ var _ Interface = &execMounter{} // Mount runs mount(8) using given exec interface. func (m *execMounter) Mount(source string, target string, fstype string, options []string) error { - bind, bindRemountOpts := isBind(options) + bind, bindOpts, bindRemountOpts := isBind(options) if bind { - err := m.doExecMount(source, target, fstype, []string{"bind"}) + err := m.doExecMount(source, target, fstype, bindOpts) if err != nil { return err } @@ -59,10 +59,10 @@ func (m *execMounter) Mount(source string, target string, fstype string, options // doExecMount calls exec(mount ) using given exec interface. func (m *execMounter) doExecMount(source, target, fstype string, options []string) error { - glog.V(5).Infof("Exec Mounting %s %s %s %v", source, target, fstype, options) + klog.V(5).Infof("Exec Mounting %s %s %s %v", source, target, fstype, options) mountArgs := makeMountArgs(source, target, fstype, options) output, err := m.exec.Run("mount", mountArgs...) - glog.V(5).Infof("Exec mounted %v: %v: %s", mountArgs, err, string(output)) + klog.V(5).Infof("Exec mounted %v: %v: %s", mountArgs, err, string(output)) if err != nil { return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s\n", err, "mount", source, target, fstype, options, string(output)) @@ -75,9 +75,9 @@ func (m *execMounter) doExecMount(source, target, fstype string, options []strin func (m *execMounter) Unmount(target string) error { outputBytes, err := m.exec.Run("umount", target) if err == nil { - glog.V(5).Infof("Exec unmounted %s: %s", target, string(outputBytes)) + klog.V(5).Infof("Exec unmounted %s: %s", target, string(outputBytes)) } else { - glog.V(5).Infof("Failed to exec unmount %s: err: %q, umount output: %s", target, err, string(outputBytes)) + klog.V(5).Infof("Failed to exec unmount %s: err: %q, umount output: %s", target, err, string(outputBytes)) } return err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go index e834e297b3eb..06e0fcccdc16 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go @@ -22,7 +22,7 @@ import ( "path/filepath" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // FakeMounter implements mount.Interface for tests. @@ -83,11 +83,8 @@ func (f *FakeMounter) Mount(source string, target string, fstype string, options } } } - // find 'ro' option - if option == "ro" { - // reuse MountPoint.Opts field to mark mount as readonly - opts = append(opts, "ro") - } + // reuse MountPoint.Opts field to mark mount as readonly + opts = append(opts, option) } // If target is a symlink, get its absolute path @@ -95,9 +92,8 @@ func (f *FakeMounter) Mount(source string, target string, fstype string, options if err != nil { absTarget = target } - f.MountPoints = append(f.MountPoints, MountPoint{Device: source, Path: absTarget, Type: fstype, Opts: opts}) - glog.V(5).Infof("Fake mounter: mounted %s to %s", source, absTarget) + klog.V(5).Infof("Fake mounter: mounted %s to %s", source, absTarget) f.Log = append(f.Log, FakeAction{Action: FakeActionMount, Target: absTarget, Source: source, FSType: fstype}) return nil } @@ -115,7 +111,7 @@ func (f *FakeMounter) Unmount(target string) error { newMountpoints := []MountPoint{} for _, mp := range f.MountPoints { if mp.Path == absTarget { - glog.V(5).Infof("Fake mounter: unmounted %s from %s", mp.Device, absTarget) + klog.V(5).Infof("Fake mounter: unmounted %s from %s", mp.Device, absTarget) // Don't copy it to newMountpoints continue } @@ -158,11 +154,11 @@ func (f *FakeMounter) IsLikelyNotMountPoint(file string) (bool, error) { for _, mp := range f.MountPoints { if mp.Path == absFile { - glog.V(5).Infof("isLikelyNotMountPoint for %s: mounted %s, false", file, mp.Path) + klog.V(5).Infof("isLikelyNotMountPoint for %s: mounted %s, false", file, mp.Path) return false, nil } } - glog.V(5).Infof("isLikelyNotMountPoint for %s: true", file) + klog.V(5).Infof("isLikelyNotMountPoint for %s: true", file) return true, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go index b48caaffbb60..48dfde3da4d2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go @@ -267,6 +267,13 @@ func IsNotMountPoint(mounter Interface, file string) (bool, error) { if notMnt == false { return notMnt, nil } + + // Resolve any symlinks in file, kernel would do the same and use the resolved path in /proc/mounts + resolvedFile, err := mounter.EvalHostSymlinks(file) + if err != nil { + return true, err + } + // check all mountpoints since IsLikelyNotMountPoint // is not reliable for some mountpoint types mountPoints, mountPointsErr := mounter.List() @@ -274,7 +281,7 @@ func IsNotMountPoint(mounter Interface, file string) (bool, error) { return notMnt, mountPointsErr } for _, mp := range mountPoints { - if mounter.IsMountPointMatch(mp, file) { + if mounter.IsMountPointMatch(mp, resolvedFile) { notMnt = false break } @@ -286,7 +293,7 @@ func IsNotMountPoint(mounter Interface, file string) (bool, error) { // use in case of bind mount, due to the fact that bind mount doesn't respect mount options. // The list equals: // options - 'bind' + 'remount' (no duplicate) -func isBind(options []string) (bool, []string) { +func isBind(options []string) (bool, []string, []string) { // Because we have an FD opened on the subpath bind mount, the "bind" option // needs to be included, otherwise the mount target will error as busy if you // remount as readonly. @@ -295,22 +302,36 @@ func isBind(options []string) (bool, []string) { // volume mount to be read only. bindRemountOpts := []string{"bind", "remount"} bind := false + bindOpts := []string{"bind"} - if len(options) != 0 { - for _, option := range options { - switch option { - case "bind": - bind = true - break - case "remount": - break - default: - bindRemountOpts = append(bindRemountOpts, option) - } + // _netdev is a userspace mount option and does not automatically get added when + // bind mount is created and hence we must carry it over. + if checkForNetDev(options) { + bindOpts = append(bindOpts, "_netdev") + } + + for _, option := range options { + switch option { + case "bind": + bind = true + break + case "remount": + break + default: + bindRemountOpts = append(bindRemountOpts, option) } } - return bind, bindRemountOpts + return bind, bindOpts, bindRemountOpts +} + +func checkForNetDev(options []string) bool { + for _, option := range options { + if option == "_netdev" { + return true + } + } + return false } // TODO: this is a workaround for the unmount device issue caused by gci mounter. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go index 6b1c0010442f..6ebeff053b59 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go @@ -30,9 +30,9 @@ import ( "strings" "syscall" - "github.com/golang/glog" "golang.org/x/sys/unix" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" utilfile "k8s.io/kubernetes/pkg/util/file" utilio "k8s.io/kubernetes/pkg/util/io" utilexec "k8s.io/utils/exec" @@ -89,9 +89,9 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio // Path to mounter binary if containerized mounter is needed. Otherwise, it is set to empty. // All Linux distros are expected to be shipped with a mount utility that a support bind mounts. mounterPath := "" - bind, bindRemountOpts := isBind(options) + bind, bindOpts, bindRemountOpts := isBind(options) if bind { - err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, []string{"bind"}) + err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts) if err != nil { return err } @@ -143,12 +143,12 @@ func (m *Mounter) doMount(mounterPath string, mountCmd string, source string, ta // No code here, mountCmd and mountArgs are already populated. } - glog.V(4).Infof("Mounting cmd (%s) with arguments (%s)", mountCmd, mountArgs) + klog.V(4).Infof("Mounting cmd (%s) with arguments (%s)", mountCmd, mountArgs) command := exec.Command(mountCmd, mountArgs...) output, err := command.CombinedOutput() if err != nil { args := strings.Join(mountArgs, " ") - glog.Errorf("Mount failed: %v\nMounting command: %s\nMounting arguments: %s\nOutput: %s\n", err, mountCmd, args, string(output)) + klog.Errorf("Mount failed: %v\nMounting command: %s\nMounting arguments: %s\nOutput: %s\n", err, mountCmd, args, string(output)) return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s\nOutput: %s\n", err, mountCmd, args, string(output)) } @@ -161,7 +161,7 @@ func (m *Mounter) doMount(mounterPath string, mountCmd string, source string, ta // systemd-runs (needed by Mount()) works. func detectSystemd() bool { if _, err := exec.LookPath("systemd-run"); err != nil { - glog.V(2).Infof("Detected OS without systemd") + klog.V(2).Infof("Detected OS without systemd") return false } // Try to run systemd-run --scope /bin/true, that should be enough @@ -171,12 +171,12 @@ func detectSystemd() bool { cmd := exec.Command("systemd-run", "--description=Kubernetes systemd probe", "--scope", "true") output, err := cmd.CombinedOutput() if err != nil { - glog.V(2).Infof("Cannot run systemd-run, assuming non-systemd OS") - glog.V(4).Infof("systemd-run failed with: %v", err) - glog.V(4).Infof("systemd-run output: %s", string(output)) + klog.V(2).Infof("Cannot run systemd-run, assuming non-systemd OS") + klog.V(4).Infof("systemd-run failed with: %v", err) + klog.V(4).Infof("systemd-run output: %s", string(output)) return false } - glog.V(2).Infof("Detected OS with systemd") + klog.V(2).Infof("Detected OS with systemd") return true } @@ -208,7 +208,7 @@ func addSystemdScope(systemdRunPath, mountName, command string, args []string) ( // Unmount unmounts the target. func (mounter *Mounter) Unmount(target string) error { - glog.V(4).Infof("Unmounting %s", target) + klog.V(4).Infof("Unmounting %s", target) command := exec.Command("umount", target) output, err := command.CombinedOutput() if err != nil { @@ -290,7 +290,7 @@ func exclusiveOpenFailsOnDevice(pathname string) (bool, error) { } if !isDevice { - glog.Errorf("Path %q is not referring to a device.", pathname) + klog.Errorf("Path %q is not referring to a device.", pathname) return false, nil } fd, errno := unix.Open(pathname, unix.O_RDONLY|unix.O_EXCL, 0) @@ -319,11 +319,11 @@ func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (str func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (string, error) { refs, err := mounter.GetMountRefs(mountPath) if err != nil { - glog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) + klog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) return "", err } if len(refs) == 0 { - glog.V(4).Infof("Directory %s is not mounted", mountPath) + klog.V(4).Infof("Directory %s is not mounted", mountPath) return "", fmt.Errorf("directory %s is not mounted", mountPath) } basemountPath := path.Join(pluginDir, MountsInGlobalPDPath) @@ -331,7 +331,7 @@ func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (str if strings.HasPrefix(ref, basemountPath) { volumeID, err := filepath.Rel(basemountPath, ref) if err != nil { - glog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) + klog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) return "", err } return volumeID, nil @@ -437,26 +437,26 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, if !readOnly { // Run fsck on the disk to fix repairable issues, only do this for volumes requested as rw. - glog.V(4).Infof("Checking for issues with fsck on disk: %s", source) + klog.V(4).Infof("Checking for issues with fsck on disk: %s", source) args := []string{"-a", source} out, err := mounter.Exec.Run("fsck", args...) if err != nil { ee, isExitError := err.(utilexec.ExitError) switch { case err == utilexec.ErrExecutableNotFound: - glog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.") + klog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.") case isExitError && ee.ExitStatus() == fsckErrorsCorrected: - glog.Infof("Device %s has errors which were corrected by fsck.", source) + klog.Infof("Device %s has errors which were corrected by fsck.", source) case isExitError && ee.ExitStatus() == fsckErrorsUncorrected: return fmt.Errorf("'fsck' found errors on device %s but could not correct them: %s.", source, string(out)) case isExitError && ee.ExitStatus() > fsckErrorsUncorrected: - glog.Infof("`fsck` error %s", string(out)) + klog.Infof("`fsck` error %s", string(out)) } } } // Try to mount the disk - glog.V(4).Infof("Attempting to mount disk: %s %s %s", fstype, source, target) + klog.V(4).Infof("Attempting to mount disk: %s %s %s", fstype, source, target) mountErr := mounter.Interface.Mount(source, target, fstype, options) if mountErr != nil { // Mount failed. This indicates either that the disk is unformatted or @@ -485,14 +485,14 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, source, } } - glog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args) + klog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args) _, err := mounter.Exec.Run("mkfs."+fstype, args...) if err == nil { // the disk has been formatted successfully try to mount it again. - glog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target) + klog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target) return mounter.Interface.Mount(source, target, fstype, options) } - glog.Errorf("format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", source, fstype, target, options, err) + klog.Errorf("format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", source, fstype, target, options, err) return err } else { // Disk is already formatted and failed to mount @@ -511,10 +511,10 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, // GetDiskFormat uses 'blkid' to see if the given disk is unformated func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { args := []string{"-p", "-s", "TYPE", "-s", "PTTYPE", "-o", "export", disk} - glog.V(4).Infof("Attempting to determine if disk %q is formatted using blkid with args: (%v)", disk, args) + klog.V(4).Infof("Attempting to determine if disk %q is formatted using blkid with args: (%v)", disk, args) dataOut, err := mounter.Exec.Run("blkid", args...) output := string(dataOut) - glog.V(4).Infof("Output: %q, err: %v", output, err) + klog.V(4).Infof("Output: %q, err: %v", output, err) if err != nil { if exit, ok := err.(utilexec.ExitError); ok { @@ -526,7 +526,7 @@ func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { return "", nil } } - glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err) + klog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err) return "", err } @@ -552,7 +552,7 @@ func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { } if len(pttype) > 0 { - glog.V(4).Infof("Disk %s detected partition table type: %s", disk, pttype) + klog.V(4).Infof("Disk %s detected partition table type: %s", disk, pttype) // Returns a special non-empty string as filesystem type, then kubelet // will not format it. return "unknown data, probably partitions", nil @@ -686,11 +686,11 @@ func doMakeRShared(path string, mountInfoFilename string) error { return err } if shared { - glog.V(4).Infof("Directory %s is already on a shared mount", path) + klog.V(4).Infof("Directory %s is already on a shared mount", path) return nil } - glog.V(2).Infof("Bind-mounting %q with shared mount propagation", path) + klog.V(2).Infof("Bind-mounting %q with shared mount propagation", path) // mount --bind /var/lib/kubelet /var/lib/kubelet if err := syscall.Mount(path, path, "" /*fstype*/, syscall.MS_BIND, "" /*data*/); err != nil { return fmt.Errorf("failed to bind-mount %s: %v", path, err) @@ -766,7 +766,7 @@ func prepareSubpathTarget(mounter Interface, subpath Subpath) (bool, string, err } if !notMount { // It's already mounted - glog.V(5).Infof("Skipping bind-mounting subpath %s: already mounted", bindPathTarget) + klog.V(5).Infof("Skipping bind-mounting subpath %s: already mounted", bindPathTarget) return true, bindPathTarget, nil } @@ -819,7 +819,7 @@ func doBindSubPath(mounter Interface, subpath Subpath) (hostPath string, err err if err != nil { return "", fmt.Errorf("error resolving symlinks in %q: %v", subpath.Path, err) } - glog.V(5).Infof("doBindSubPath %q (%q) for volumepath %q", subpath.Path, newPath, subpath.VolumePath) + klog.V(5).Infof("doBindSubPath %q (%q) for volumepath %q", subpath.Path, newPath, subpath.VolumePath) subpath.VolumePath = newVolumePath subpath.Path = newPath @@ -841,9 +841,9 @@ func doBindSubPath(mounter Interface, subpath Subpath) (hostPath string, err err defer func() { // Cleanup subpath on error if !success { - glog.V(4).Infof("doBindSubPath() failed for %q, cleaning up subpath", bindPathTarget) + klog.V(4).Infof("doBindSubPath() failed for %q, cleaning up subpath", bindPathTarget) if cleanErr := cleanSubPath(mounter, subpath); cleanErr != nil { - glog.Errorf("Failed to clean subpath %q: %v", bindPathTarget, cleanErr) + klog.Errorf("Failed to clean subpath %q: %v", bindPathTarget, cleanErr) } } }() @@ -853,13 +853,13 @@ func doBindSubPath(mounter Interface, subpath Subpath) (hostPath string, err err // Do the bind mount options := []string{"bind"} - glog.V(5).Infof("bind mounting %q at %q", mountSource, bindPathTarget) + klog.V(5).Infof("bind mounting %q at %q", mountSource, bindPathTarget) if err = mounter.Mount(mountSource, bindPathTarget, "" /*fstype*/, options); err != nil { return "", fmt.Errorf("error mounting %s: %s", subpath.Path, err) } success = true - glog.V(3).Infof("Bound SubPath %s into %s", subpath.Path, bindPathTarget) + klog.V(3).Infof("Bound SubPath %s into %s", subpath.Path, bindPathTarget) return bindPathTarget, nil } @@ -871,7 +871,7 @@ func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error { func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error { // scan /var/lib/kubelet/pods//volume-subpaths//* subPathDir := filepath.Join(podDir, containerSubPathDirectoryName, volumeName) - glog.V(4).Infof("Cleaning up subpath mounts for %s", subPathDir) + klog.V(4).Infof("Cleaning up subpath mounts for %s", subPathDir) containerDirs, err := ioutil.ReadDir(subPathDir) if err != nil { @@ -883,10 +883,10 @@ func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error for _, containerDir := range containerDirs { if !containerDir.IsDir() { - glog.V(4).Infof("Container file is not a directory: %s", containerDir.Name()) + klog.V(4).Infof("Container file is not a directory: %s", containerDir.Name()) continue } - glog.V(4).Infof("Cleaning up subpath mounts for container %s", containerDir.Name()) + klog.V(4).Infof("Cleaning up subpath mounts for container %s", containerDir.Name()) // scan /var/lib/kubelet/pods//volume-subpaths///* fullContainerDirPath := filepath.Join(subPathDir, containerDir.Name()) @@ -903,27 +903,27 @@ func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error if err := os.Remove(fullContainerDirPath); err != nil { return fmt.Errorf("error deleting %s: %s", fullContainerDirPath, err) } - glog.V(5).Infof("Removed %s", fullContainerDirPath) + klog.V(5).Infof("Removed %s", fullContainerDirPath) } // Whole pod volume subpaths have been cleaned up, remove its subpath directory. if err := os.Remove(subPathDir); err != nil { return fmt.Errorf("error deleting %s: %s", subPathDir, err) } - glog.V(5).Infof("Removed %s", subPathDir) + klog.V(5).Infof("Removed %s", subPathDir) // Remove entire subpath directory if it's the last one podSubPathDir := filepath.Join(podDir, containerSubPathDirectoryName) if err := os.Remove(podSubPathDir); err != nil && !os.IsExist(err) { return fmt.Errorf("error deleting %s: %s", podSubPathDir, err) } - glog.V(5).Infof("Removed %s", podSubPathDir) + klog.V(5).Infof("Removed %s", podSubPathDir) return nil } // doCleanSubPath tears down the single subpath bind mount func doCleanSubPath(mounter Interface, fullContainerDirPath, subPathIndex string) error { // process /var/lib/kubelet/pods//volume-subpaths/// - glog.V(4).Infof("Cleaning up subpath mounts for subpath %v", subPathIndex) + klog.V(4).Infof("Cleaning up subpath mounts for subpath %v", subPathIndex) fullSubPath := filepath.Join(fullContainerDirPath, subPathIndex) notMnt, err := IsNotMountPoint(mounter, fullSubPath) if err != nil { @@ -934,13 +934,13 @@ func doCleanSubPath(mounter Interface, fullContainerDirPath, subPathIndex string if err = mounter.Unmount(fullSubPath); err != nil { return fmt.Errorf("error unmounting %s: %s", fullSubPath, err) } - glog.V(5).Infof("Unmounted %s", fullSubPath) + klog.V(5).Infof("Unmounted %s", fullSubPath) } // Remove it *non*-recursively, just in case there were some hiccups. if err = os.Remove(fullSubPath); err != nil { return fmt.Errorf("error deleting %s: %s", fullSubPath, err) } - glog.V(5).Infof("Removed %s", fullSubPath) + klog.V(5).Infof("Removed %s", fullSubPath) return nil } @@ -972,7 +972,7 @@ func removeEmptyDirs(baseDir, endDir string) error { s, err := os.Stat(curDir) if err != nil { if os.IsNotExist(err) { - glog.V(5).Infof("curDir %q doesn't exist, skipping", curDir) + klog.V(5).Infof("curDir %q doesn't exist, skipping", curDir) continue } return fmt.Errorf("error stat %q: %v", curDir, err) @@ -983,12 +983,12 @@ func removeEmptyDirs(baseDir, endDir string) error { err = os.Remove(curDir) if os.IsExist(err) { - glog.V(5).Infof("Directory %q not empty, not removing", curDir) + klog.V(5).Infof("Directory %q not empty, not removing", curDir) break } else if err != nil { return fmt.Errorf("error removing directory %q: %v", curDir, err) } - glog.V(5).Infof("Removed directory %q", curDir) + klog.V(5).Infof("Removed directory %q", curDir) } return nil } @@ -1005,6 +1005,11 @@ func (mounter *Mounter) SafeMakeDir(subdir string, base string, perm os.FileMode } func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { + if _, err := os.Stat(pathname); os.IsNotExist(err) { + return []string{}, nil + } else if err != nil { + return nil, err + } realpath, err := filepath.EvalSymlinks(pathname) if err != nil { return nil, err @@ -1050,7 +1055,7 @@ func getMode(pathname string) (os.FileMode, error) { // and base must be either already resolved symlinks or thet will be resolved in // kubelet's mount namespace (in case it runs containerized). func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { - glog.V(4).Infof("Creating directory %q within base %q", pathname, base) + klog.V(4).Infof("Creating directory %q within base %q", pathname, base) if !PathWithinBase(pathname, base) { return fmt.Errorf("path %s is outside of allowed base %s", pathname, base) @@ -1063,7 +1068,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { if s.IsDir() { // The directory already exists. It can be outside of the parent, // but there is no race-proof check. - glog.V(4).Infof("Directory %s already exists", pathname) + klog.V(4).Infof("Directory %s already exists", pathname) return nil } return &os.PathError{Op: "mkdir", Path: pathname, Err: syscall.ENOTDIR} @@ -1083,7 +1088,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { return fmt.Errorf("path %s is outside of allowed base %s", fullExistingPath, err) } - glog.V(4).Infof("%q already exists, %q to create", fullExistingPath, filepath.Join(toCreate...)) + klog.V(4).Infof("%q already exists, %q to create", fullExistingPath, filepath.Join(toCreate...)) parentFD, err := doSafeOpen(fullExistingPath, base) if err != nil { return fmt.Errorf("cannot open directory %s: %s", existingPath, err) @@ -1092,12 +1097,12 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { defer func() { if parentFD != -1 { if err = syscall.Close(parentFD); err != nil { - glog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err) + klog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err) } } if childFD != -1 { if err = syscall.Close(childFD); err != nil { - glog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", childFD, pathname, err) + klog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", childFD, pathname, err) } } }() @@ -1107,7 +1112,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { // created directory into symlink. for _, dir := range toCreate { currentPath = filepath.Join(currentPath, dir) - glog.V(4).Infof("Creating %s", dir) + klog.V(4).Infof("Creating %s", dir) err = syscall.Mkdirat(parentFD, currentPath, uint32(perm)) if err != nil { return fmt.Errorf("cannot create directory %s: %s", currentPath, err) @@ -1126,7 +1131,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { // and user either gets error or the file that it can already access. if err = syscall.Close(parentFD); err != nil { - glog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err) + klog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err) } parentFD = childFD childFD = -1 @@ -1175,7 +1180,7 @@ func findExistingPrefix(base, pathname string) (string, []string, error) { } defer func() { if err = syscall.Close(fd); err != nil { - glog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err) + klog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err) } }() for i, dir := range dirs { @@ -1189,7 +1194,7 @@ func findExistingPrefix(base, pathname string) (string, []string, error) { return base, nil, err } if err = syscall.Close(fd); err != nil { - glog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err) + klog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err) } fd = childFD currentPath = filepath.Join(currentPath, dir) @@ -1221,7 +1226,7 @@ func doSafeOpen(pathname string, base string) (int, error) { defer func() { if parentFD != -1 { if err = syscall.Close(parentFD); err != nil { - glog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", parentFD, pathname, err) + klog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", parentFD, pathname, err) } } }() @@ -1230,7 +1235,7 @@ func doSafeOpen(pathname string, base string) (int, error) { defer func() { if childFD != -1 { if err = syscall.Close(childFD); err != nil { - glog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", childFD, pathname, err) + klog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", childFD, pathname, err) } } }() @@ -1245,7 +1250,7 @@ func doSafeOpen(pathname string, base string) (int, error) { return -1, fmt.Errorf("path %s is outside of allowed base %s", currentPath, base) } - glog.V(5).Infof("Opening path %s", currentPath) + klog.V(5).Infof("Opening path %s", currentPath) childFD, err = syscall.Openat(parentFD, seg, openFDFlags, 0) if err != nil { return -1, fmt.Errorf("cannot open %s: %s", currentPath, err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go index b3206d18c1cd..dfdcdfc33771 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go @@ -28,7 +28,7 @@ import ( "strings" "syscall" - "github.com/golang/glog" + "k8s.io/klog" utilfile "k8s.io/kubernetes/pkg/util/file" ) @@ -54,7 +54,7 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio target = normalizeWindowsPath(target) if source == "tmpfs" { - glog.V(3).Infof("azureMount: mounting source (%q), target (%q), with options (%q)", source, target, options) + klog.V(3).Infof("azureMount: mounting source (%q), target (%q), with options (%q)", source, target, options) return os.MkdirAll(target, 0755) } @@ -63,17 +63,17 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio return err } - glog.V(4).Infof("azureMount: mount options(%q) source:%q, target:%q, fstype:%q, begin to mount", + klog.V(4).Infof("azureMount: mount options(%q) source:%q, target:%q, fstype:%q, begin to mount", options, source, target, fstype) bindSource := "" // tell it's going to mount azure disk or azure file according to options - if bind, _ := isBind(options); bind { + if bind, _, _ := isBind(options); bind { // mount azure disk bindSource = normalizeWindowsPath(source) } else { if len(options) < 2 { - glog.Warningf("azureMount: mount options(%q) command number(%d) less than 2, source:%q, target:%q, skip mounting", + klog.Warningf("azureMount: mount options(%q) command number(%d) less than 2, source:%q, target:%q, skip mounting", options, len(options), source, target) return nil } @@ -102,7 +102,7 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio } if output, err := exec.Command("cmd", "/c", "mklink", "/D", target, bindSource).CombinedOutput(); err != nil { - glog.Errorf("mklink failed: %v, source(%q) target(%q) output: %q", err, bindSource, target, string(output)) + klog.Errorf("mklink failed: %v, source(%q) target(%q) output: %q", err, bindSource, target, string(output)) return err } @@ -111,10 +111,10 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio // Unmount unmounts the target. func (mounter *Mounter) Unmount(target string) error { - glog.V(4).Infof("azureMount: Unmount target (%q)", target) + klog.V(4).Infof("azureMount: Unmount target (%q)", target) target = normalizeWindowsPath(target) if output, err := exec.Command("cmd", "/c", "rmdir", target).CombinedOutput(); err != nil { - glog.Errorf("rmdir failed: %v, output: %q", err, string(output)) + klog.Errorf("rmdir failed: %v, output: %q", err, string(output)) return err } return nil @@ -168,7 +168,7 @@ func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (str func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (string, error) { refs, err := mounter.GetMountRefs(mountPath) if err != nil { - glog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) + klog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) return "", err } if len(refs) == 0 { @@ -179,7 +179,7 @@ func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (str if strings.Contains(ref, basemountPath) { volumeID, err := filepath.Rel(normalizeWindowsPath(basemountPath), ref) if err != nil { - glog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) + klog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) return "", err } return volumeID, nil @@ -362,10 +362,10 @@ func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error { func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error { // Try to mount the disk - glog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, source, target) + klog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, source, target) if err := ValidateDiskNumber(source); err != nil { - glog.Errorf("diskMount: formatAndMount failed, err: %v", err) + klog.Errorf("diskMount: formatAndMount failed, err: %v", err) return err } @@ -380,7 +380,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, if output, err := mounter.Exec.Run("powershell", "/c", cmd); err != nil { return fmt.Errorf("diskMount: format disk failed, error: %v, output: %q", err, string(output)) } - glog.V(4).Infof("diskMount: Disk successfully formatted, disk: %q, fstype: %q", source, fstype) + klog.V(4).Infof("diskMount: Disk successfully formatted, disk: %q, fstype: %q", source, fstype) driveLetter, err := getDriveLetterByDiskNumber(source, mounter.Exec) if err != nil { @@ -388,9 +388,9 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, } driverPath := driveLetter + ":" target = normalizeWindowsPath(target) - glog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, driverPath, target) + klog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, driverPath, target) if output, err := mounter.Exec.Run("cmd", "/c", "mklink", "/D", target, driverPath); err != nil { - glog.Errorf("mklink failed: %v, output: %q", err, string(output)) + klog.Errorf("mklink failed: %v, output: %q", err, string(output)) return err } return nil @@ -458,12 +458,14 @@ func getAllParentLinks(path string) ([]string, error) { return links, nil } +// GetMountRefs : empty implementation here since there is no place to query all mount points on Windows func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { - refs, err := getAllParentLinks(normalizeWindowsPath(pathname)) - if err != nil { + if _, err := os.Stat(normalizeWindowsPath(pathname)); os.IsNotExist(err) { + return []string{}, nil + } else if err != nil { return nil, err } - return refs, nil + return []string{pathname}, nil } // Note that on windows, it always returns 0. We actually don't set FSGroup on @@ -497,7 +499,7 @@ func (mounter *Mounter) SafeMakeDir(subdir string, base string, perm os.FileMode } func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { - glog.V(4).Infof("Creating directory %q within base %q", pathname, base) + klog.V(4).Infof("Creating directory %q within base %q", pathname, base) if !PathWithinBase(pathname, base) { return fmt.Errorf("path %s is outside of allowed base %s", pathname, base) @@ -510,7 +512,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { if s.IsDir() { // The directory already exists. It can be outside of the parent, // but there is no race-proof check. - glog.V(4).Infof("Directory %s already exists", pathname) + klog.V(4).Infof("Directory %s already exists", pathname) return nil } return &os.PathError{Op: "mkdir", Path: pathname, Err: syscall.ENOTDIR} @@ -545,13 +547,13 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { return err } - glog.V(4).Infof("%q already exists, %q to create", fullExistingPath, filepath.Join(toCreate...)) + klog.V(4).Infof("%q already exists, %q to create", fullExistingPath, filepath.Join(toCreate...)) currentPath := fullExistingPath // create the directories one by one, making sure nobody can change // created directory into symlink by lock that directory immediately for _, dir := range toCreate { currentPath = filepath.Join(currentPath, dir) - glog.V(4).Infof("Creating %s", dir) + klog.V(4).Infof("Creating %s", dir) if err := os.Mkdir(currentPath, perm); err != nil { return fmt.Errorf("cannot create directory %s: %s", currentPath, err) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go index a798defe9bfc..ff2eceaf7c35 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go @@ -25,8 +25,8 @@ import ( "strings" "syscall" - "github.com/golang/glog" "golang.org/x/sys/unix" + "k8s.io/klog" utilfile "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/util/nsenter" ) @@ -61,10 +61,10 @@ var _ = Interface(&NsenterMounter{}) // Mount runs mount(8) in the host's root mount namespace. Aside from this // aspect, Mount has the same semantics as the mounter returned by mount.New() func (n *NsenterMounter) Mount(source string, target string, fstype string, options []string) error { - bind, bindRemountOpts := isBind(options) + bind, bindOpts, bindRemountOpts := isBind(options) if bind { - err := n.doNsenterMount(source, target, fstype, []string{"bind"}) + err := n.doNsenterMount(source, target, fstype, bindOpts) if err != nil { return err } @@ -77,11 +77,11 @@ func (n *NsenterMounter) Mount(source string, target string, fstype string, opti // doNsenterMount nsenters the host's mount namespace and performs the // requested mount. func (n *NsenterMounter) doNsenterMount(source, target, fstype string, options []string) error { - glog.V(5).Infof("nsenter mount %s %s %s %v", source, target, fstype, options) + klog.V(5).Infof("nsenter mount %s %s %s %v", source, target, fstype, options) cmd, args := n.makeNsenterArgs(source, target, fstype, options) outputBytes, err := n.ne.Exec(cmd, args).CombinedOutput() if len(outputBytes) != 0 { - glog.V(5).Infof("Output of mounting %s to %s: %v", source, target, string(outputBytes)) + klog.V(5).Infof("Output of mounting %s to %s: %v", source, target, string(outputBytes)) } return err } @@ -131,10 +131,10 @@ func (n *NsenterMounter) Unmount(target string) error { // No need to execute systemd-run here, it's enough that unmount is executed // in the host's mount namespace. It will finish appropriate fuse daemon(s) // running in any scope. - glog.V(5).Infof("nsenter unmount args: %v", args) + klog.V(5).Infof("nsenter unmount args: %v", args) outputBytes, err := n.ne.Exec("umount", args).CombinedOutput() if len(outputBytes) != 0 { - glog.V(5).Infof("Output of unmounting %s: %v", target, string(outputBytes)) + klog.V(5).Infof("Output of unmounting %s: %v", target, string(outputBytes)) } return err } @@ -163,18 +163,25 @@ func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { // Check the directory exists if _, err = os.Stat(file); os.IsNotExist(err) { - glog.V(5).Infof("findmnt: directory %s does not exist", file) + klog.V(5).Infof("findmnt: directory %s does not exist", file) return true, err } + + // Resolve any symlinks in file, kernel would do the same and use the resolved path in /proc/mounts + resolvedFile, err := n.EvalHostSymlinks(file) + if err != nil { + return true, err + } + // Add --first-only option: since we are testing for the absence of a mountpoint, it is sufficient to get only // the first of multiple possible mountpoints using --first-only. // Also add fstype output to make sure that the output of target file will give the full path // TODO: Need more refactoring for this function. Track the solution with issue #26996 - args := []string{"-o", "target,fstype", "--noheadings", "--first-only", "--target", file} - glog.V(5).Infof("nsenter findmnt args: %v", args) + args := []string{"-o", "target,fstype", "--noheadings", "--first-only", "--target", resolvedFile} + klog.V(5).Infof("nsenter findmnt args: %v", args) out, err := n.ne.Exec("findmnt", args).CombinedOutput() if err != nil { - glog.V(2).Infof("Failed findmnt command for path %s: %s %v", file, out, err) + klog.V(2).Infof("Failed findmnt command for path %s: %s %v", resolvedFile, out, err) // Different operating systems behave differently for paths which are not mount points. // On older versions (e.g. 2.20.1) we'd get error, on newer ones (e.g. 2.26.2) we'd get "/". // It's safer to assume that it's not a mount point. @@ -185,13 +192,13 @@ func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { return false, err } - glog.V(5).Infof("IsLikelyNotMountPoint findmnt output for path %s: %v:", file, mountTarget) + klog.V(5).Infof("IsLikelyNotMountPoint findmnt output for path %s: %v:", resolvedFile, mountTarget) - if mountTarget == file { - glog.V(5).Infof("IsLikelyNotMountPoint: %s is a mount point", file) + if mountTarget == resolvedFile { + klog.V(5).Infof("IsLikelyNotMountPoint: %s is a mount point", resolvedFile) return false, nil } - glog.V(5).Infof("IsLikelyNotMountPoint: %s is not a mount point", file) + klog.V(5).Infof("IsLikelyNotMountPoint: %s is not a mount point", resolvedFile) return true, nil } @@ -337,6 +344,13 @@ func (mounter *NsenterMounter) SafeMakeDir(subdir string, base string, perm os.F } func (mounter *NsenterMounter) GetMountRefs(pathname string) ([]string, error) { + exists, err := mounter.ExistsPath(pathname) + if err != nil { + return nil, err + } + if !exists { + return []string{}, nil + } hostpath, err := mounter.ne.EvalSymlinks(pathname, true /* mustExist */) if err != nil { return nil, err @@ -361,7 +375,7 @@ func doNsEnterBindSubPath(mounter *NsenterMounter, subpath Subpath) (hostPath st if err != nil { return "", fmt.Errorf("error resolving symlinks in %q: %v", subpath.Path, err) } - glog.V(5).Infof("doBindSubPath %q (%q) for volumepath %q", subpath.Path, evaluatedHostSubpath, subpath.VolumePath) + klog.V(5).Infof("doBindSubPath %q (%q) for volumepath %q", subpath.Path, evaluatedHostSubpath, subpath.VolumePath) subpath.VolumePath = mounter.ne.KubeletPath(evaluatedHostVolumePath) subpath.Path = mounter.ne.KubeletPath(evaluatedHostSubpath) @@ -384,9 +398,9 @@ func doNsEnterBindSubPath(mounter *NsenterMounter, subpath Subpath) (hostPath st defer func() { // Cleanup subpath on error if !success { - glog.V(4).Infof("doNsEnterBindSubPath() failed for %q, cleaning up subpath", bindPathTarget) + klog.V(4).Infof("doNsEnterBindSubPath() failed for %q, cleaning up subpath", bindPathTarget) if cleanErr := cleanSubPath(mounter, subpath); cleanErr != nil { - glog.Errorf("Failed to clean subpath %q: %v", bindPathTarget, cleanErr) + klog.Errorf("Failed to clean subpath %q: %v", bindPathTarget, cleanErr) } } }() @@ -394,7 +408,7 @@ func doNsEnterBindSubPath(mounter *NsenterMounter, subpath Subpath) (hostPath st // Leap of faith: optimistically expect that nobody has modified previously // expanded evalSubPath with evil symlinks and bind-mount it. // Mount is done on the host! don't use kubelet path! - glog.V(5).Infof("bind mounting %q at %q", evaluatedHostSubpath, bindPathTarget) + klog.V(5).Infof("bind mounting %q at %q", evaluatedHostSubpath, bindPathTarget) if err = mounter.Mount(evaluatedHostSubpath, bindPathTarget, "" /*fstype*/, []string{"bind"}); err != nil { return "", fmt.Errorf("error mounting %s: %s", evaluatedHostSubpath, err) } @@ -407,7 +421,7 @@ func doNsEnterBindSubPath(mounter *NsenterMounter, subpath Subpath) (hostPath st } success = true - glog.V(3).Infof("Bound SubPath %s into %s", subpath.Path, bindPathTarget) + klog.V(3).Infof("Bound SubPath %s into %s", subpath.Path, bindPathTarget) return bindPathTarget, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/netsh/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/netsh/BUILD index 980f16ba9add..a356f094a9e3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/netsh/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/netsh/BUILD @@ -14,7 +14,7 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/util/netsh", deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/netsh/netsh.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/netsh/netsh.go index 30b66536b1db..a9f1731b45dc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/netsh/netsh.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/netsh/netsh.go @@ -24,7 +24,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" utilexec "k8s.io/utils/exec" ) @@ -68,7 +68,7 @@ func New(exec utilexec.Interface) Interface { // EnsurePortProxyRule checks if the specified redirect exists, if not creates it. func (runner *runner) EnsurePortProxyRule(args []string) (bool, error) { - glog.V(4).Infof("running netsh interface portproxy add v4tov4 %v", args) + klog.V(4).Infof("running netsh interface portproxy add v4tov4 %v", args) out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() if err == nil { @@ -87,7 +87,7 @@ func (runner *runner) EnsurePortProxyRule(args []string) (bool, error) { // DeletePortProxyRule deletes the specified portproxy rule. If the rule did not exist, return error. func (runner *runner) DeletePortProxyRule(args []string) error { - glog.V(4).Infof("running netsh interface portproxy delete v4tov4 %v", args) + klog.V(4).Infof("running netsh interface portproxy delete v4tov4 %v", args) out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() if err == nil { @@ -116,12 +116,12 @@ func (runner *runner) EnsureIPAddress(args []string, ip net.IP) (bool, error) { exists, _ := checkIPExists(ipToCheck, argsShowAddress, runner) if exists == true { - glog.V(4).Infof("not adding IP address %q as it already exists", ipToCheck) + klog.V(4).Infof("not adding IP address %q as it already exists", ipToCheck) return true, nil } // IP Address is not already added, add it now - glog.V(4).Infof("running netsh interface ipv4 add address %v", args) + klog.V(4).Infof("running netsh interface ipv4 add address %v", args) out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() if err == nil { @@ -129,7 +129,7 @@ func (runner *runner) EnsureIPAddress(args []string, ip net.IP) (bool, error) { // Query all the IP addresses and see if the one we added is present // PS: We are using netsh interface ipv4 show address here to query all the IP addresses, instead of // querying net.InterfaceAddrs() as it returns the IP address as soon as it is added even though it is uninitialized - glog.V(3).Infof("Waiting until IP: %v is added to the network adapter", ipToCheck) + klog.V(3).Infof("Waiting until IP: %v is added to the network adapter", ipToCheck) for { if exists, _ := checkIPExists(ipToCheck, argsShowAddress, runner); exists { return true, nil @@ -149,7 +149,7 @@ func (runner *runner) EnsureIPAddress(args []string, ip net.IP) (bool, error) { // DeleteIPAddress checks if the specified IP address is present and, if so, deletes it. func (runner *runner) DeleteIPAddress(args []string) error { - glog.V(4).Infof("running netsh interface ipv4 delete address %v", args) + klog.V(4).Infof("running netsh interface ipv4 delete address %v", args) out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() if err == nil { @@ -187,7 +187,7 @@ func checkIPExists(ipToCheck string, args []string, runner *runner) (bool, error return false, err } ipAddressString := string(ipAddress[:]) - glog.V(3).Infof("Searching for IP: %v in IP dump: %v", ipToCheck, ipAddressString) + klog.V(3).Infof("Searching for IP: %v in IP dump: %v", ipToCheck, ipAddressString) showAddressArray := strings.Split(ipAddressString, "\n") for _, showAddress := range showAddressArray { if strings.Contains(showAddress, "IP") { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/BUILD index f07fb2f734df..7e1696cefea3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/BUILD @@ -18,6 +18,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/node.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/node.go index ddb29aac2b74..ff5038551507 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/node.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/node/node.go @@ -24,6 +24,8 @@ import ( "strings" "time" + "k8s.io/klog" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -34,9 +36,11 @@ import ( ) const ( - // The reason and message set on a pod when its state cannot be confirmed as kubelet is unresponsive + // NodeUnreachablePodReason is the reason on a pod when its state cannot be confirmed as kubelet is unresponsive + // on the node it is (was) running. + NodeUnreachablePodReason = "NodeLost" + // NodeUnreachablePodMessage is the message on a pod when its state cannot be confirmed as kubelet is unresponsive // on the node it is (was) running. - NodeUnreachablePodReason = "NodeLost" NodeUnreachablePodMessage = "Node %v which was running pod %v is unresponsive" ) @@ -91,6 +95,22 @@ func GetNodeHostIP(node *v1.Node) (net.IP, error) { return nil, fmt.Errorf("host IP unknown; known addresses: %v", addresses) } +// GetNodeIP returns the ip of node with the provided hostname +func GetNodeIP(client clientset.Interface, hostname string) net.IP { + var nodeIP net.IP + node, err := client.CoreV1().Nodes().Get(hostname, metav1.GetOptions{}) + if err != nil { + klog.Warningf("Failed to retrieve node info: %v", err) + return nil + } + nodeIP, err = GetNodeHostIP(node) + if err != nil { + klog.Warningf("Failed to retrieve node IP: %v", err) + return nil + } + return nodeIP +} + // GetZoneKey is a helper function that builds a string identifier that is unique per failure-zone; // it returns empty-string for no zone. func GetZoneKey(node *v1.Node) string { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/nsenter/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/nsenter/BUILD index 05765dcc9be6..9a94fae10156 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/nsenter/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/nsenter/BUILD @@ -24,7 +24,7 @@ go_library( "//vendor/k8s.io/utils/exec:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/nsenter/exec.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/nsenter/exec.go index 201f1270c772..134497f0a752 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/nsenter/exec.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/nsenter/exec.go @@ -23,7 +23,7 @@ import ( "fmt" "path/filepath" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/utils/exec" ) @@ -49,7 +49,7 @@ func NewNsenterExecutor(hostRootFsPath string, executor exec.Interface) *Executo func (nsExecutor *Executor) Command(cmd string, args ...string) exec.Cmd { fullArgs := append([]string{fmt.Sprintf("--mount=%s", nsExecutor.hostProcMountNsPath), "--"}, append([]string{cmd}, args...)...) - glog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) + klog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) return nsExecutor.executor.Command(nsenterPath, fullArgs...) } @@ -57,7 +57,7 @@ func (nsExecutor *Executor) Command(cmd string, args ...string) exec.Cmd { func (nsExecutor *Executor) CommandContext(ctx context.Context, cmd string, args ...string) exec.Cmd { fullArgs := append([]string{fmt.Sprintf("--mount=%s", nsExecutor.hostProcMountNsPath), "--"}, append([]string{cmd}, args...)...) - glog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) + klog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) return nsExecutor.executor.CommandContext(ctx, nsenterPath, fullArgs...) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/nsenter/nsenter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/nsenter/nsenter.go index e928a57ac9fe..56361e7846e7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/nsenter/nsenter.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/nsenter/nsenter.go @@ -28,7 +28,7 @@ import ( "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -127,7 +127,7 @@ func (ne *Nsenter) Exec(cmd string, args []string) exec.Cmd { hostProcMountNsPath := filepath.Join(ne.hostRootFsPath, mountNsPath) fullArgs := append([]string{fmt.Sprintf("--mount=%s", hostProcMountNsPath), "--"}, append([]string{ne.AbsHostPath(cmd)}, args...)...) - glog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) + klog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) return ne.executor.Command(nsenterPath, fullArgs...) } @@ -170,7 +170,7 @@ func (ne *Nsenter) EvalSymlinks(pathname string, mustExist bool) (string, error) } outBytes, err := ne.Exec("realpath", args).CombinedOutput() if err != nil { - glog.Infof("failed to resolve symbolic links on %s: %v", pathname, err) + klog.Infof("failed to resolve symbolic links on %s: %v", pathname, err) return "", err } return strings.TrimSpace(string(outBytes)), nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/oom/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/oom/BUILD index 712fc9061a6e..b27ec22804eb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/oom/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/oom/BUILD @@ -19,7 +19,7 @@ go_library( deps = select({ "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/cm/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "//conditions:default": [], }), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/oom/oom_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/oom/oom_linux.go index ad6d5c264b55..243c30d0cc40 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/oom/oom_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/oom/oom_linux.go @@ -29,7 +29,7 @@ import ( cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util" - "github.com/golang/glog" + "k8s.io/klog" ) func NewOOMAdjuster() *OOMAdjuster { @@ -62,24 +62,24 @@ func applyOOMScoreAdj(pid int, oomScoreAdj int) error { maxTries := 2 oomScoreAdjPath := path.Join("/proc", pidStr, "oom_score_adj") value := strconv.Itoa(oomScoreAdj) - glog.V(4).Infof("attempting to set %q to %q", oomScoreAdjPath, value) + klog.V(4).Infof("attempting to set %q to %q", oomScoreAdjPath, value) var err error for i := 0; i < maxTries; i++ { err = ioutil.WriteFile(oomScoreAdjPath, []byte(value), 0700) if err != nil { if os.IsNotExist(err) { - glog.V(2).Infof("%q does not exist", oomScoreAdjPath) + klog.V(2).Infof("%q does not exist", oomScoreAdjPath) return os.ErrNotExist } - glog.V(3).Info(err) + klog.V(3).Info(err) time.Sleep(100 * time.Millisecond) continue } return nil } if err != nil { - glog.V(2).Infof("failed to set %q to %q: %v", oomScoreAdjPath, value, err) + klog.V(2).Infof("failed to set %q to %q: %v", oomScoreAdjPath, value, err) } return err } @@ -97,20 +97,20 @@ func (oomAdjuster *OOMAdjuster) applyOOMScoreAdjContainer(cgroupName string, oom return os.ErrNotExist } continueAdjusting = true - glog.V(10).Infof("Error getting process list for cgroup %s: %+v", cgroupName, err) + klog.V(10).Infof("Error getting process list for cgroup %s: %+v", cgroupName, err) } else if len(pidList) == 0 { - glog.V(10).Infof("Pid list is empty") + klog.V(10).Infof("Pid list is empty") continueAdjusting = true } else { for _, pid := range pidList { if !adjustedProcessSet[pid] { - glog.V(10).Infof("pid %d needs to be set", pid) + klog.V(10).Infof("pid %d needs to be set", pid) if err = oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err == nil { adjustedProcessSet[pid] = true } else if err == os.ErrNotExist { continue } else { - glog.V(10).Infof("cannot adjust oom score for pid %d - %v", pid, err) + klog.V(10).Infof("cannot adjust oom score for pid %d - %v", pid, err) continueAdjusting = true } // Processes can come and go while we try to apply oom score adjust value. So ignore errors here. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/procfs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/procfs/BUILD index 1c02e4297011..223f2265fb6c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/procfs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/procfs/BUILD @@ -19,7 +19,7 @@ go_library( deps = select({ "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "//conditions:default": [], }), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/procfs/procfs_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/procfs/procfs_linux.go index fefc25bfbd68..46059c33e77e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/procfs/procfs_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/procfs/procfs_linux.go @@ -21,6 +21,7 @@ package procfs import ( "bytes" "fmt" + "io" "io/ioutil" "os" "path" @@ -31,8 +32,8 @@ import ( "syscall" "unicode" - "github.com/golang/glog" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog" ) type ProcFS struct{} @@ -105,47 +106,60 @@ func PidOf(name string) ([]int, error) { func getPids(re *regexp.Regexp) []int { pids := []int{} - filepath.Walk("/proc", func(path string, info os.FileInfo, err error) error { + + dirFD, err := os.Open("/proc") + if err != nil { + return nil + } + defer dirFD.Close() + + for { + // Read a small number at a time in case there are many entries, we don't want to + // allocate a lot here. + ls, err := dirFD.Readdir(10) + if err == io.EOF { + break + } if err != nil { - // We should continue processing other directories/files return nil } - base := filepath.Base(path) - // Traverse only the directories we are interested in - if info.IsDir() && path != "/proc" { + + for _, entry := range ls { + if !entry.IsDir() { + continue + } + // If the directory is not a number (i.e. not a PID), skip it - if _, err := strconv.Atoi(base); err != nil { - return filepath.SkipDir + pid, err := strconv.Atoi(entry.Name()) + if err != nil { + continue + } + + cmdline, err := ioutil.ReadFile(filepath.Join("/proc", entry.Name(), "cmdline")) + if err != nil { + klog.V(4).Infof("Error reading file %s: %+v", filepath.Join("/proc", entry.Name(), "cmdline"), err) + continue + } + + // The bytes we read have '\0' as a separator for the command line + parts := bytes.SplitN(cmdline, []byte{0}, 2) + if len(parts) == 0 { + continue + } + // Split the command line itself we are interested in just the first part + exe := strings.FieldsFunc(string(parts[0]), func(c rune) bool { + return unicode.IsSpace(c) || c == ':' + }) + if len(exe) == 0 { + continue + } + // Check if the name of the executable is what we are looking for + if re.MatchString(exe[0]) { + // Grab the PID from the directory path + pids = append(pids, pid) } } - if base != "cmdline" { - return nil - } - cmdline, err := ioutil.ReadFile(path) - if err != nil { - glog.V(4).Infof("Error reading file %s: %+v", path, err) - return nil - } - // The bytes we read have '\0' as a separator for the command line - parts := bytes.SplitN(cmdline, []byte{0}, 2) - if len(parts) == 0 { - return nil - } - // Split the command line itself we are interested in just the first part - exe := strings.FieldsFunc(string(parts[0]), func(c rune) bool { - return unicode.IsSpace(c) || c == ':' - }) - if len(exe) == 0 { - return nil - } - // Check if the name of the executable is what we are looking for - if re.MatchString(exe[0]) { - dirname := filepath.Base(filepath.Dir(path)) - // Grab the PID from the directory path - pid, _ := strconv.Atoi(dirname) - pids = append(pids, pid) - } - return nil - }) + } + return pids } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/BUILD index 9d1cf01487f0..04f11e1fc0c5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/BUILD @@ -23,7 +23,7 @@ go_library( ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/util/mount:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ "//pkg/util/mount:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go index 518eba2bb603..4eabdb1ddc0b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go @@ -21,7 +21,7 @@ package resizefs import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" ) @@ -50,7 +50,7 @@ func (resizefs *ResizeFs) Resize(devicePath string, deviceMountPath string) (boo return false, nil } - glog.V(3).Infof("ResizeFS.Resize - Expanding mounted volume %s", devicePath) + klog.V(3).Infof("ResizeFS.Resize - Expanding mounted volume %s", devicePath) switch format { case "ext3", "ext4": return resizefs.extResize(devicePath) @@ -63,7 +63,7 @@ func (resizefs *ResizeFs) Resize(devicePath string, deviceMountPath string) (boo func (resizefs *ResizeFs) extResize(devicePath string) (bool, error) { output, err := resizefs.mounter.Exec.Run("resize2fs", devicePath) if err == nil { - glog.V(2).Infof("Device %s resized successfully", devicePath) + klog.V(2).Infof("Device %s resized successfully", devicePath) return true, nil } @@ -77,7 +77,7 @@ func (resizefs *ResizeFs) xfsResize(deviceMountPath string) (bool, error) { output, err := resizefs.mounter.Exec.Run("xfs_growfs", args...) if err == nil { - glog.V(2).Infof("Device %s resized successfully", deviceMountPath) + klog.V(2).Infof("Device %s resized successfully", deviceMountPath) return true, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resourcecontainer/resource_container_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resourcecontainer/resource_container_linux.go index 86477c5aa2ed..efb654685768 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resourcecontainer/resource_container_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resourcecontainer/resource_container_linux.go @@ -25,7 +25,7 @@ import ( "github.com/opencontainers/runc/libcontainer/configs" ) -// Creates resource-only containerName if it does not already exist and moves +// RunInResourceContainer creates resource-only containerName if it does not already exist and moves // the current process to it. // // containerName must be an absolute container name. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resourcecontainer/resource_container_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resourcecontainer/resource_container_unsupported.go index da4713126510..2c9db7b064ce 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resourcecontainer/resource_container_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/resourcecontainer/resource_container_unsupported.go @@ -22,6 +22,7 @@ import ( "errors" ) +// RunInResourceContainer creates resource-only containerName unsupported. func RunInResourceContainer(containerName string) error { return errors.New("resource-only containers unsupported in this platform") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/strings/strings.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/strings/strings.go index 1015671aa917..29be3170a250 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/strings/strings.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/strings/strings.go @@ -22,7 +22,7 @@ import ( "unicode" ) -// Splits a fully qualified name and returns its namespace and name. +// SplitQualifiedName splits a fully qualified name and returns its namespace and name. // Assumes that the input 'str' has been validated. func SplitQualifiedName(str string) (string, string) { parts := strings.Split(str, "/") @@ -32,19 +32,18 @@ func SplitQualifiedName(str string) (string, string) { return parts[0], parts[1] } -// Joins 'namespace' and 'name' and returns a fully qualified name +// JoinQualifiedName joins 'namespace' and 'name' and returns a fully qualified name // Assumes that the input is valid. func JoinQualifiedName(namespace, name string) string { return path.Join(namespace, name) } -// Returns the first N slice of a string. +// ShortenString returns the first N slice of a string. func ShortenString(str string, n int) string { if len(str) <= n { return str - } else { - return str[:n] } + return str[:n] } // isVowel returns true if the rune is a vowel (case insensitive). diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go index 76e4bb866814..0777f6d69223 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// package taints implements utilites for working with taints +// package taints implements utilities for working with taints package taints import ( @@ -240,7 +240,7 @@ func DeleteTaintsByKey(taints []v1.Taint, taintKey string) ([]v1.Taint, bool) { return newTaints, deleted } -// DeleteTaint removes all the the taints that have the same key and effect to given taintToDelete. +// DeleteTaint removes all the taints that have the same key and effect to given taintToDelete. func DeleteTaint(taints []v1.Taint, taintToDelete *v1.Taint) ([]v1.Taint, bool) { newTaints := []v1.Taint{} deleted := false diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/version/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/version/BUILD deleted file mode 100644 index 3b53281f771c..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/util/version/BUILD +++ /dev/null @@ -1,35 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "version.go", - ], - importpath = "k8s.io/kubernetes/pkg/util/version", -) - -go_test( - name = "go_default_test", - srcs = ["version_test.go"], - embed = [":go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/doc.go index 380395784885..c3ace745136c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/version/doc.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:openapi-gen=true + // Package version supplies version information collected at build time to // kubernetes components. -// +k8s:openapi-gen=true package version diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/BUILD index bbe76dd8dcd5..ba5d04ba0780 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/BUILD @@ -9,6 +9,7 @@ go_library( "metrics_errors.go", "metrics_nil.go", "metrics_statfs.go", + "noop_expandable_plugin.go", "plugins.go", "volume.go", "volume_linux.go", @@ -17,7 +18,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume", visibility = ["//visibility:public"], deps = [ - "//pkg/cloudprovider:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume/util/fs:go_default_library", "//pkg/volume/util/recyclerclient:go_default_library", @@ -30,8 +30,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -69,7 +70,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//pkg/volume/aws_ebs:all-srcs", + "//pkg/volume/awsebs:all-srcs", "//pkg/volume/azure_dd:all-srcs", "//pkg/volume/azure_file:all-srcs", "//pkg/volume/cephfs:all-srcs", @@ -77,11 +78,11 @@ filegroup( "//pkg/volume/configmap:all-srcs", "//pkg/volume/csi:all-srcs", "//pkg/volume/downwardapi:all-srcs", - "//pkg/volume/empty_dir:all-srcs", + "//pkg/volume/emptydir:all-srcs", "//pkg/volume/fc:all-srcs", "//pkg/volume/flexvolume:all-srcs", "//pkg/volume/flocker:all-srcs", - "//pkg/volume/gce_pd:all-srcs", + "//pkg/volume/gcepd:all-srcs", "//pkg/volume/git_repo:all-srcs", "//pkg/volume/glusterfs:all-srcs", "//pkg/volume/host_path:all-srcs", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD deleted file mode 100644 index 532ddf8b7298..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD +++ /dev/null @@ -1,72 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "attacher.go", - "aws_ebs.go", - "aws_ebs_block.go", - "aws_util.go", - "doc.go", - ], - importpath = "k8s.io/kubernetes/pkg/volume/aws_ebs", - deps = [ - "//pkg/cloudprovider:go_default_library", - "//pkg/cloudprovider/providers/aws:go_default_library", - "//pkg/features:go_default_library", - "//pkg/util/mount:go_default_library", - "//pkg/util/strings:go_default_library", - "//pkg/volume:go_default_library", - "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumepathhandler:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "attacher_test.go", - "aws_ebs_block_test.go", - "aws_ebs_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//pkg/cloudprovider/providers/aws:go_default_library", - "//pkg/util/mount:go_default_library", - "//pkg/volume:go_default_library", - "//pkg/volume/testing:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", - "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go deleted file mode 100644 index da25691d41a7..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go +++ /dev/null @@ -1,291 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package aws_ebs - -import ( - "fmt" - "os" - "path" - "strconv" - "time" - - "github.com/golang/glog" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" - "k8s.io/kubernetes/pkg/util/mount" - "k8s.io/kubernetes/pkg/volume" - volumeutil "k8s.io/kubernetes/pkg/volume/util" -) - -type awsElasticBlockStoreAttacher struct { - host volume.VolumeHost - awsVolumes aws.Volumes -} - -var _ volume.Attacher = &awsElasticBlockStoreAttacher{} - -var _ volume.DeviceMounter = &awsElasticBlockStoreAttacher{} - -var _ volume.AttachableVolumePlugin = &awsElasticBlockStorePlugin{} - -var _ volume.DeviceMountableVolumePlugin = &awsElasticBlockStorePlugin{} - -func (plugin *awsElasticBlockStorePlugin) NewAttacher() (volume.Attacher, error) { - awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) - if err != nil { - return nil, err - } - - return &awsElasticBlockStoreAttacher{ - host: plugin.host, - awsVolumes: awsCloud, - }, nil -} - -func (plugin *awsElasticBlockStorePlugin) NewDeviceMounter() (volume.DeviceMounter, error) { - return plugin.NewAttacher() -} - -func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) { - mounter := plugin.host.GetMounter(plugin.GetPluginName()) - return mounter.GetMountRefs(deviceMountPath) -} - -func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { - volumeSource, _, err := getVolumeSource(spec) - if err != nil { - return "", err - } - - volumeID := aws.KubernetesVolumeID(volumeSource.VolumeID) - - // awsCloud.AttachDisk checks if disk is already attached to node and - // succeeds in that case, so no need to do that separately. - devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName) - if err != nil { - glog.Errorf("Error attaching volume %q to node %q: %+v", volumeID, nodeName, err) - return "", err - } - - return devicePath, nil -} - -func (attacher *awsElasticBlockStoreAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { - - glog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for AWS", nodeName) - volumeNodeMap := map[types.NodeName][]*volume.Spec{ - nodeName: specs, - } - nodeVolumesResult := make(map[*volume.Spec]bool) - nodesVerificationMap, err := attacher.BulkVerifyVolumes(volumeNodeMap) - if err != nil { - glog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err) - return nodeVolumesResult, err - } - - if result, ok := nodesVerificationMap[nodeName]; ok { - return result, nil - } - return nodeVolumesResult, nil -} - -func (attacher *awsElasticBlockStoreAttacher) BulkVerifyVolumes(volumesByNode map[types.NodeName][]*volume.Spec) (map[types.NodeName]map[*volume.Spec]bool, error) { - volumesAttachedCheck := make(map[types.NodeName]map[*volume.Spec]bool) - diskNamesByNode := make(map[types.NodeName][]aws.KubernetesVolumeID) - volumeSpecMap := make(map[aws.KubernetesVolumeID]*volume.Spec) - - for nodeName, volumeSpecs := range volumesByNode { - for _, volumeSpec := range volumeSpecs { - volumeSource, _, err := getVolumeSource(volumeSpec) - - if err != nil { - glog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err) - continue - } - - name := aws.KubernetesVolumeID(volumeSource.VolumeID) - diskNamesByNode[nodeName] = append(diskNamesByNode[nodeName], name) - - nodeDisk, nodeDiskExists := volumesAttachedCheck[nodeName] - - if !nodeDiskExists { - nodeDisk = make(map[*volume.Spec]bool) - } - nodeDisk[volumeSpec] = true - volumeSpecMap[name] = volumeSpec - volumesAttachedCheck[nodeName] = nodeDisk - } - } - attachedResult, err := attacher.awsVolumes.DisksAreAttached(diskNamesByNode) - - if err != nil { - glog.Errorf("Error checking if volumes are attached to nodes err = %v", err) - return volumesAttachedCheck, err - } - - for nodeName, nodeDisks := range attachedResult { - for diskName, attached := range nodeDisks { - if !attached { - spec := volumeSpecMap[diskName] - setNodeDisk(volumesAttachedCheck, spec, nodeName, false) - } - } - } - - return volumesAttachedCheck, nil -} - -func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { - volumeSource, _, err := getVolumeSource(spec) - if err != nil { - return "", err - } - - volumeID := volumeSource.VolumeID - partition := "" - if volumeSource.Partition != 0 { - partition = strconv.Itoa(int(volumeSource.Partition)) - } - - if devicePath == "" { - return "", fmt.Errorf("WaitForAttach failed for AWS Volume %q: devicePath is empty.", volumeID) - } - - ticker := time.NewTicker(checkSleepDuration) - defer ticker.Stop() - timer := time.NewTimer(timeout) - defer timer.Stop() - - for { - select { - case <-ticker.C: - glog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID) - devicePaths := getDiskByIdPaths(aws.KubernetesVolumeID(volumeSource.VolumeID), partition, devicePath) - path, err := verifyDevicePath(devicePaths) - if err != nil { - // Log error, if any, and continue checking periodically. See issue #11321 - glog.Errorf("Error verifying AWS Volume (%q) is attached: %v", volumeID, err) - } else if path != "" { - // A device path has successfully been created for the PD - glog.Infof("Successfully found attached AWS Volume %q.", volumeID) - return path, nil - } - case <-timer.C: - return "", fmt.Errorf("Could not find attached AWS Volume %q. Timeout waiting for mount paths to be created.", volumeID) - } - } -} - -func (attacher *awsElasticBlockStoreAttacher) GetDeviceMountPath( - spec *volume.Spec) (string, error) { - volumeSource, _, err := getVolumeSource(spec) - if err != nil { - return "", err - } - - return makeGlobalPDPath(attacher.host, aws.KubernetesVolumeID(volumeSource.VolumeID)), nil -} - -// FIXME: this method can be further pruned. -func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error { - mounter := attacher.host.GetMounter(awsElasticBlockStorePluginName) - notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath) - if err != nil { - if os.IsNotExist(err) { - if err := os.MkdirAll(deviceMountPath, 0750); err != nil { - return err - } - notMnt = true - } else { - return err - } - } - - volumeSource, readOnly, err := getVolumeSource(spec) - if err != nil { - return err - } - - options := []string{} - if readOnly { - options = append(options, "ro") - } - if notMnt { - diskMounter := volumeutil.NewSafeFormatAndMountFromHost(awsElasticBlockStorePluginName, attacher.host) - mountOptions := volumeutil.MountOptionFromSpec(spec, options...) - err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions) - if err != nil { - os.Remove(deviceMountPath) - return err - } - } - return nil -} - -type awsElasticBlockStoreDetacher struct { - mounter mount.Interface - awsVolumes aws.Volumes -} - -var _ volume.Detacher = &awsElasticBlockStoreDetacher{} - -var _ volume.DeviceUnmounter = &awsElasticBlockStoreDetacher{} - -func (plugin *awsElasticBlockStorePlugin) NewDetacher() (volume.Detacher, error) { - awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) - if err != nil { - return nil, err - } - - return &awsElasticBlockStoreDetacher{ - mounter: plugin.host.GetMounter(plugin.GetPluginName()), - awsVolumes: awsCloud, - }, nil -} - -func (plugin *awsElasticBlockStorePlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) { - return plugin.NewDetacher() -} - -func (detacher *awsElasticBlockStoreDetacher) Detach(volumeName string, nodeName types.NodeName) error { - volumeID := aws.KubernetesVolumeID(path.Base(volumeName)) - - if _, err := detacher.awsVolumes.DetachDisk(volumeID, nodeName); err != nil { - glog.Errorf("Error detaching volumeID %q: %v", volumeID, err) - return err - } - return nil -} - -func (detacher *awsElasticBlockStoreDetacher) UnmountDevice(deviceMountPath string) error { - return volumeutil.UnmountPath(deviceMountPath, detacher.mounter) -} - -func setNodeDisk( - nodeDiskMap map[types.NodeName]map[*volume.Spec]bool, - volumeSpec *volume.Spec, - nodeName types.NodeName, - check bool) { - - volumeMap := nodeDiskMap[nodeName] - if volumeMap == nil { - volumeMap = make(map[*volume.Spec]bool) - nodeDiskMap[nodeName] = volumeMap - } - volumeMap[volumeSpec] = check -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/doc.go deleted file mode 100644 index 8632d39c4e55..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package aws_ebs contains the internal representation of AWS Elastic -// Block Store volumes. -package aws_ebs diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/BUILD new file mode 100644 index 000000000000..27213a3b6222 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/BUILD @@ -0,0 +1,72 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "attacher.go", + "aws_ebs.go", + "aws_ebs_block.go", + "aws_util.go", + "doc.go", + ], + importpath = "k8s.io/kubernetes/pkg/volume/awsebs", + deps = [ + "//pkg/cloudprovider/providers/aws:go_default_library", + "//pkg/features:go_default_library", + "//pkg/util/mount:go_default_library", + "//pkg/util/strings:go_default_library", + "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", + "//pkg/volume/util/volumepathhandler:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "attacher_test.go", + "aws_ebs_block_test.go", + "aws_ebs_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/cloudprovider/providers/aws:go_default_library", + "//pkg/util/mount:go_default_library", + "//pkg/volume:go_default_library", + "//pkg/volume/testing:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", + "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/OWNERS similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/OWNERS rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/OWNERS diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher.go new file mode 100644 index 000000000000..a4a320633c2f --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/attacher.go @@ -0,0 +1,292 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package awsebs + +import ( + "fmt" + "os" + "path" + "strconv" + "time" + + "k8s.io/klog" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/volume" + volumeutil "k8s.io/kubernetes/pkg/volume/util" +) + +type awsElasticBlockStoreAttacher struct { + host volume.VolumeHost + awsVolumes aws.Volumes +} + +var _ volume.Attacher = &awsElasticBlockStoreAttacher{} + +var _ volume.DeviceMounter = &awsElasticBlockStoreAttacher{} + +var _ volume.AttachableVolumePlugin = &awsElasticBlockStorePlugin{} + +var _ volume.DeviceMountableVolumePlugin = &awsElasticBlockStorePlugin{} + +func (plugin *awsElasticBlockStorePlugin) NewAttacher() (volume.Attacher, error) { + awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) + if err != nil { + return nil, err + } + + return &awsElasticBlockStoreAttacher{ + host: plugin.host, + awsVolumes: awsCloud, + }, nil +} + +func (plugin *awsElasticBlockStorePlugin) NewDeviceMounter() (volume.DeviceMounter, error) { + return plugin.NewAttacher() +} + +func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) { + mounter := plugin.host.GetMounter(plugin.GetPluginName()) + return mounter.GetMountRefs(deviceMountPath) +} + +func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { + volumeSource, _, err := getVolumeSource(spec) + if err != nil { + return "", err + } + + volumeID := aws.KubernetesVolumeID(volumeSource.VolumeID) + + // awsCloud.AttachDisk checks if disk is already attached to node and + // succeeds in that case, so no need to do that separately. + devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName) + if err != nil { + klog.Errorf("Error attaching volume %q to node %q: %+v", volumeID, nodeName, err) + return "", err + } + + return devicePath, nil +} + +func (attacher *awsElasticBlockStoreAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { + + klog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for AWS", nodeName) + volumeNodeMap := map[types.NodeName][]*volume.Spec{ + nodeName: specs, + } + nodeVolumesResult := make(map[*volume.Spec]bool) + nodesVerificationMap, err := attacher.BulkVerifyVolumes(volumeNodeMap) + if err != nil { + klog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err) + return nodeVolumesResult, err + } + + if result, ok := nodesVerificationMap[nodeName]; ok { + return result, nil + } + return nodeVolumesResult, nil +} + +func (attacher *awsElasticBlockStoreAttacher) BulkVerifyVolumes(volumesByNode map[types.NodeName][]*volume.Spec) (map[types.NodeName]map[*volume.Spec]bool, error) { + volumesAttachedCheck := make(map[types.NodeName]map[*volume.Spec]bool) + diskNamesByNode := make(map[types.NodeName][]aws.KubernetesVolumeID) + volumeSpecMap := make(map[aws.KubernetesVolumeID]*volume.Spec) + + for nodeName, volumeSpecs := range volumesByNode { + for _, volumeSpec := range volumeSpecs { + volumeSource, _, err := getVolumeSource(volumeSpec) + + if err != nil { + klog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err) + continue + } + + name := aws.KubernetesVolumeID(volumeSource.VolumeID) + diskNamesByNode[nodeName] = append(diskNamesByNode[nodeName], name) + + nodeDisk, nodeDiskExists := volumesAttachedCheck[nodeName] + + if !nodeDiskExists { + nodeDisk = make(map[*volume.Spec]bool) + } + nodeDisk[volumeSpec] = true + volumeSpecMap[name] = volumeSpec + volumesAttachedCheck[nodeName] = nodeDisk + } + } + attachedResult, err := attacher.awsVolumes.DisksAreAttached(diskNamesByNode) + + if err != nil { + klog.Errorf("Error checking if volumes are attached to nodes err = %v", err) + return volumesAttachedCheck, err + } + + for nodeName, nodeDisks := range attachedResult { + for diskName, attached := range nodeDisks { + if !attached { + spec := volumeSpecMap[diskName] + setNodeDisk(volumesAttachedCheck, spec, nodeName, false) + } + } + } + + return volumesAttachedCheck, nil +} + +func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { + volumeSource, _, err := getVolumeSource(spec) + if err != nil { + return "", err + } + + volumeID := volumeSource.VolumeID + partition := "" + if volumeSource.Partition != 0 { + partition = strconv.Itoa(int(volumeSource.Partition)) + } + + if devicePath == "" { + return "", fmt.Errorf("waitForAttach failed for AWS Volume %q: devicePath is empty", volumeID) + } + + ticker := time.NewTicker(checkSleepDuration) + defer ticker.Stop() + timer := time.NewTimer(timeout) + defer timer.Stop() + + for { + select { + case <-ticker.C: + klog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID) + devicePaths := getDiskByIDPaths(aws.KubernetesVolumeID(volumeSource.VolumeID), partition, devicePath) + path, err := verifyDevicePath(devicePaths) + if err != nil { + // Log error, if any, and continue checking periodically. See issue #11321 + klog.Errorf("Error verifying AWS Volume (%q) is attached: %v", volumeID, err) + } else if path != "" { + // A device path has successfully been created for the PD + klog.Infof("Successfully found attached AWS Volume %q.", volumeID) + return path, nil + } + case <-timer.C: + return "", fmt.Errorf("could not find attached AWS Volume %q. Timeout waiting for mount paths to be created", volumeID) + } + } +} + +func (attacher *awsElasticBlockStoreAttacher) GetDeviceMountPath( + spec *volume.Spec) (string, error) { + volumeSource, _, err := getVolumeSource(spec) + if err != nil { + return "", err + } + + return makeGlobalPDPath(attacher.host, aws.KubernetesVolumeID(volumeSource.VolumeID)), nil +} + +// FIXME: this method can be further pruned. +func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error { + mounter := attacher.host.GetMounter(awsElasticBlockStorePluginName) + notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath) + if err != nil { + if os.IsNotExist(err) { + if err := os.MkdirAll(deviceMountPath, 0750); err != nil { + return err + } + notMnt = true + } else { + return err + } + } + + volumeSource, readOnly, err := getVolumeSource(spec) + if err != nil { + return err + } + + options := []string{} + if readOnly { + options = append(options, "ro") + } + if notMnt { + diskMounter := volumeutil.NewSafeFormatAndMountFromHost(awsElasticBlockStorePluginName, attacher.host) + mountOptions := volumeutil.MountOptionFromSpec(spec, options...) + err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions) + if err != nil { + os.Remove(deviceMountPath) + return err + } + } + return nil +} + +type awsElasticBlockStoreDetacher struct { + mounter mount.Interface + awsVolumes aws.Volumes +} + +var _ volume.Detacher = &awsElasticBlockStoreDetacher{} + +var _ volume.DeviceUnmounter = &awsElasticBlockStoreDetacher{} + +func (plugin *awsElasticBlockStorePlugin) NewDetacher() (volume.Detacher, error) { + awsCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) + if err != nil { + return nil, err + } + + return &awsElasticBlockStoreDetacher{ + mounter: plugin.host.GetMounter(plugin.GetPluginName()), + awsVolumes: awsCloud, + }, nil +} + +func (plugin *awsElasticBlockStorePlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) { + return plugin.NewDetacher() +} + +func (detacher *awsElasticBlockStoreDetacher) Detach(volumeName string, nodeName types.NodeName) error { + volumeID := aws.KubernetesVolumeID(path.Base(volumeName)) + + if _, err := detacher.awsVolumes.DetachDisk(volumeID, nodeName); err != nil { + klog.Errorf("Error detaching volumeID %q: %v", volumeID, err) + return err + } + return nil +} + +func (detacher *awsElasticBlockStoreDetacher) UnmountDevice(deviceMountPath string) error { + return volumeutil.UnmountPath(deviceMountPath, detacher.mounter) +} + +func setNodeDisk( + nodeDiskMap map[types.NodeName]map[*volume.Spec]bool, + volumeSpec *volume.Spec, + nodeName types.NodeName, + check bool) { + + volumeMap := nodeDiskMap[nodeName] + if volumeMap == nil { + volumeMap = make(map[*volume.Spec]bool) + nodeDiskMap[nodeName] = volumeMap + } + volumeMap[volumeSpec] = check +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs.go similarity index 90% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs.go index f557827ba81e..c6270c1752a1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package aws_ebs +package awsebs import ( "context" @@ -25,7 +25,8 @@ import ( "strconv" "strings" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -39,7 +40,7 @@ import ( "k8s.io/kubernetes/pkg/volume/util" ) -// This is the primary entrypoint for volume plugins. +// ProbeVolumePlugins is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { return []volume.VolumePlugin{&awsElasticBlockStorePlugin{nil}} } @@ -99,7 +100,7 @@ func (plugin *awsElasticBlockStorePlugin) SupportsBulkVolumeVerification() bool func (plugin *awsElasticBlockStorePlugin) GetVolumeLimits() (map[string]int64, error) { volumeLimits := map[string]int64{ - util.EBSVolumeLimitKey: 39, + util.EBSVolumeLimitKey: util.DefaultMaxEBSVolumes, } cloud := plugin.host.GetCloudProvider() @@ -117,18 +118,18 @@ func (plugin *awsElasticBlockStorePlugin) GetVolumeLimits() (map[string]int64, e instances, ok := cloud.Instances() if !ok { - glog.V(3).Infof("Failed to get instances from cloud provider") + klog.V(3).Infof("Failed to get instances from cloud provider") return volumeLimits, nil } instanceType, err := instances.InstanceType(context.TODO(), plugin.host.GetNodeName()) if err != nil { - glog.Errorf("Failed to get instance type from AWS cloud provider") + klog.Errorf("Failed to get instance type from AWS cloud provider") return volumeLimits, nil } - if ok, _ := regexp.MatchString("^[cm]5.*", instanceType); ok { - volumeLimits[util.EBSVolumeLimitKey] = 25 + if ok, _ := regexp.MatchString(util.EBSNitroLimitRegex, instanceType); ok { + volumeLimits[util.EBSVolumeLimitKey] = util.DefaultMaxEBSNitroVolumeLimit } return volumeLimits, nil @@ -175,9 +176,11 @@ func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec, plugin: plugin, MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)), }, - fsType: fsType, - readOnly: readOnly, - diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil + fsType: fsType, + readOnly: readOnly, + diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host), + mountOptions: util.MountOptionFromSpec(spec), + }, nil } func (plugin *awsElasticBlockStorePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { @@ -202,7 +205,7 @@ func (plugin *awsElasticBlockStorePlugin) NewDeleter(spec *volume.Spec) (volume. func (plugin *awsElasticBlockStorePlugin) newDeleterInternal(spec *volume.Spec, manager ebsManager) (volume.Deleter, error) { if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore == nil { - glog.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil") + klog.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil") return nil, fmt.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil") } return &awsElasticBlockStoreDeleter{ @@ -270,7 +273,7 @@ func (plugin *awsElasticBlockStorePlugin) ConstructVolumeSpec(volName, mountPath if length == 3 { sourceName = awsURLNamePrefix + names[1] + "/" + volName // names[1] is the zone label } - glog.V(4).Infof("Convert aws volume name from %q to %q ", volumeID, sourceName) + klog.V(4).Infof("Convert aws volume name from %q to %q ", volumeID, sourceName) } awsVolume := &v1.Volume{ @@ -309,6 +312,12 @@ func (plugin *awsElasticBlockStorePlugin) ExpandVolumeDevice( return awsVolume.ResizeDisk(volumeID, oldSize, newSize) } +func (plugin *awsElasticBlockStorePlugin) ExpandFS(spec *volume.Spec, devicePath, deviceMountPath string, _, _ resource.Quantity) error { + _, err := util.GenericResizeFS(plugin.host, plugin.GetPluginName(), devicePath, deviceMountPath) + return err +} + +var _ volume.FSResizableVolumePlugin = &awsElasticBlockStorePlugin{} var _ volume.ExpandableVolumePlugin = &awsElasticBlockStorePlugin{} var _ volume.VolumePluginWithAttachLimits = &awsElasticBlockStorePlugin{} @@ -343,7 +352,8 @@ type awsElasticBlockStoreMounter struct { // Specifies whether the disk will be attached as read-only. readOnly bool // diskMounter provides the interface that is used to mount the actual block device. - diskMounter *mount.SafeFormatAndMount + diskMounter *mount.SafeFormatAndMount + mountOptions []string } var _ volume.Mounter = &awsElasticBlockStoreMounter{} @@ -372,9 +382,9 @@ func (b *awsElasticBlockStoreMounter) SetUp(fsGroup *int64) error { func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error { // TODO: handle failed mounts here. notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err) + klog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mount point: %s %v", dir, err) + klog.Errorf("cannot validate mount point: %s %v", dir, err) return err } if !notMnt { @@ -392,31 +402,32 @@ func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error if b.readOnly { options = append(options, "ro") } - err = b.mounter.Mount(globalPDPath, dir, "", options) + mountOptions := util.JoinMountOptions(options, b.mountOptions) + err = b.mounter.Mount(globalPDPath, dir, "", mountOptions) if err != nil { notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr) return err } if !notMnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("failed to unmount %s: %v", dir, mntErr) + klog.Errorf("failed to unmount %s: %v", dir, mntErr) return err } notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr) return err } if !notMnt { // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) return err } } os.Remove(dir) - glog.Errorf("Mount of disk %s failed: %v", dir, err) + klog.Errorf("Mount of disk %s failed: %v", dir, err) return err } @@ -424,7 +435,7 @@ func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error volume.SetVolumeOwnership(b, fsGroup) } - glog.V(4).Infof("Successfully mounted %s", dir) + klog.V(4).Infof("Successfully mounted %s", dir) return nil } @@ -440,11 +451,11 @@ func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (stri basePath := filepath.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath) rel, err := filepath.Rel(basePath, globalPath) if err != nil { - glog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err) + klog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err) return "", err } if strings.Contains(rel, "../") { - glog.Errorf("Unexpected mount path: %s", globalPath) + klog.Errorf("Unexpected mount path: %s", globalPath) return "", fmt.Errorf("unexpected mount path: " + globalPath) } // Reverse the :// replacement done in makeGlobalPDPath @@ -452,7 +463,7 @@ func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (stri if strings.HasPrefix(volumeID, "aws/") { volumeID = strings.Replace(volumeID, "aws/", "aws://", 1) } - glog.V(2).Info("Mapping mount dir ", globalPath, " to volumeID ", volumeID) + klog.V(2).Info("Mapping mount dir ", globalPath, " to volumeID ", volumeID) return volumeID, nil } @@ -506,7 +517,7 @@ func (c *awsElasticBlockStoreProvisioner) Provision(selectedNode *v1.Node, allow volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c, selectedNode, allowedTopologies) if err != nil { - glog.Errorf("Provision failed: %v", err) + klog.Errorf("Provision failed: %v", err) return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs_block.go similarity index 98% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs_block.go index 997f5bbc7ef5..a6d64b5c4957 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs_block.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_ebs_block.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package aws_ebs +package awsebs import ( "fmt" @@ -22,7 +22,8 @@ import ( "strconv" "strings" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" @@ -42,7 +43,7 @@ func (plugin *awsElasticBlockStorePlugin) ConstructBlockVolumeSpec(podUID types. if err != nil { return nil, err } - glog.V(5).Infof("globalMapPathUUID: %s", globalMapPathUUID) + klog.V(5).Infof("globalMapPathUUID: %s", globalMapPathUUID) globalMapPath := filepath.Dir(globalMapPathUUID) if len(globalMapPath) <= 1 { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_util.go similarity index 89% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_util.go index 95a5154a3519..e1c85aa8b706 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/aws_util.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package aws_ebs +package awsebs import ( "fmt" @@ -24,11 +24,12 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -45,8 +46,10 @@ const ( ebsMaxReplicasInAZ = 1 ) +// AWSDiskUtil provides operations for EBS volume. type AWSDiskUtil struct{} +// DeleteVolume deletes an AWS EBS volume. func (util *AWSDiskUtil) DeleteVolume(d *awsElasticBlockStoreDeleter) error { cloud, err := getCloudProvider(d.awsElasticBlockStore.plugin.host.GetCloudProvider()) if err != nil { @@ -57,13 +60,13 @@ func (util *AWSDiskUtil) DeleteVolume(d *awsElasticBlockStoreDeleter) error { if err != nil { // AWS cloud provider returns volume.deletedVolumeInUseError when // necessary, no handling needed here. - glog.V(2).Infof("Error deleting EBS Disk volume %s: %v", d.volumeID, err) + klog.V(2).Infof("Error deleting EBS Disk volume %s: %v", d.volumeID, err) return err } if deleted { - glog.V(2).Infof("Successfully deleted EBS Disk volume %s", d.volumeID) + klog.V(2).Infof("Successfully deleted EBS Disk volume %s", d.volumeID) } else { - glog.V(2).Infof("Successfully deleted EBS Disk volume %s (actually already deleted)", d.volumeID) + klog.V(2).Infof("Successfully deleted EBS Disk volume %s (actually already deleted)", d.volumeID) } return nil } @@ -94,7 +97,7 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner, node * volumeOptions, err := populateVolumeOptions(c.plugin.GetPluginName(), c.options.PVC.Name, capacity, tags, c.options.Parameters, node, allowedTopologies, zonesWithNodes) if err != nil { - glog.V(2).Infof("Error populating EBS options: %v", err) + klog.V(2).Infof("Error populating EBS options: %v", err) return "", 0, nil, "", err } @@ -105,15 +108,15 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner, node * name, err := cloud.CreateDisk(volumeOptions) if err != nil { - glog.V(2).Infof("Error creating EBS Disk volume: %v", err) + klog.V(2).Infof("Error creating EBS Disk volume: %v", err) return "", 0, nil, "", err } - glog.V(2).Infof("Successfully created EBS Disk volume %s", name) + klog.V(2).Infof("Successfully created EBS Disk volume %s", name) labels, err := cloud.GetVolumeLabels(name) if err != nil { // We don't really want to leak the volume here... - glog.Errorf("error building labels for new EBS volume %q: %v", name, err) + klog.Errorf("error building labels for new EBS volume %q: %v", name, err) } fstype := "" @@ -166,7 +169,7 @@ func populateVolumeOptions(pluginName, pvcName string, capacityGB resource.Quant return nil, fmt.Errorf("invalid encrypted boolean value %q, must be true or false: %v", v, err) } case "kmskeyid": - volumeOptions.KmsKeyId = v + volumeOptions.KmsKeyID = v case volume.VolumeParameterFSType: // Do nothing but don't make this fail default: @@ -198,11 +201,11 @@ func verifyDevicePath(devicePaths []string) (string, error) { func verifyAllPathsRemoved(devicePaths []string) (bool, error) { allPathsRemoved := true for _, path := range devicePaths { - if exists, err := volumeutil.PathExists(path); err != nil { + exists, err := volumeutil.PathExists(path) + if err != nil { return false, fmt.Errorf("Error checking if path exists: %v", err) - } else { - allPathsRemoved = allPathsRemoved && !exists } + allPathsRemoved = allPathsRemoved && !exists } return allPathsRemoved, nil @@ -211,7 +214,7 @@ func verifyAllPathsRemoved(devicePaths []string) (bool, error) { // Returns list of all paths for given EBS mount // This is more interesting on GCE (where we are able to identify volumes under /dev/disk-by-id) // Here it is mostly about applying the partition path -func getDiskByIdPaths(volumeID aws.KubernetesVolumeID, partition string, devicePath string) []string { +func getDiskByIDPaths(volumeID aws.KubernetesVolumeID, partition string, devicePath string) []string { devicePaths := []string{} if devicePath != "" { devicePaths = append(devicePaths, devicePath) @@ -227,14 +230,14 @@ func getDiskByIdPaths(volumeID aws.KubernetesVolumeID, partition string, deviceP // and we have to get the volume id from the nvme interface awsVolumeID, err := volumeID.MapToAWSVolumeID() if err != nil { - glog.Warningf("error mapping volume %q to AWS volume: %v", volumeID, err) + klog.Warningf("error mapping volume %q to AWS volume: %v", volumeID, err) } else { // This is the magic name on which AWS presents NVME devices under /dev/disk/by-id/ // For example, vol-0fab1d5e3f72a5e23 creates a symlink at /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23 nvmeName := "nvme-Amazon_Elastic_Block_Store_" + strings.Replace(string(awsVolumeID), "-", "", -1) nvmePath, err := findNvmeVolume(nvmeName) if err != nil { - glog.Warningf("error looking for nvme volume %q: %v", volumeID, err) + klog.Warningf("error looking for nvme volume %q: %v", volumeID, err) } else if nvmePath != "" { devicePaths = append(devicePaths, nvmePath) } @@ -260,14 +263,14 @@ func findNvmeVolume(findName string) (device string, err error) { stat, err := os.Lstat(p) if err != nil { if os.IsNotExist(err) { - glog.V(6).Infof("nvme path not found %q", p) + klog.V(6).Infof("nvme path not found %q", p) return "", nil } return "", fmt.Errorf("error getting stat of %q: %v", p, err) } if stat.Mode()&os.ModeSymlink != os.ModeSymlink { - glog.Warningf("nvme file %q found, but was not a symlink", p) + klog.Warningf("nvme file %q found, but was not a symlink", p) return "", nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/doc.go new file mode 100644 index 000000000000..d3f30c0d18eb --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/awsebs/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package awsebs contains the internal representation of AWS Elastic +// Block Store volumes. +package awsebs diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD index 647bb9198026..fd28f977d79f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD @@ -22,7 +22,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/azure_dd", deps = [ "//pkg/apis/core:go_default_library", - "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/azure:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", @@ -39,9 +38,10 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute:go_default_library", - "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -75,8 +75,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute:go_default_library", - "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go index 6633ed8a5954..5787518aa52c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go @@ -25,13 +25,13 @@ import ( "strconv" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" - "github.com/golang/glog" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" @@ -62,13 +62,13 @@ var getLunMutex = keymutex.NewHashed(0) func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Warningf("failed to get azure disk spec (%v)", err) + klog.Warningf("failed to get azure disk spec (%v)", err) return "", err } instanceid, err := a.cloud.InstanceID(context.TODO(), nodeName) if err != nil { - glog.Warningf("failed to get azure instance id (%v)", err) + klog.Warningf("failed to get azure instance id (%v)", err) return "", fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) } @@ -80,31 +80,31 @@ func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) ( lun, err := diskController.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName) if err == cloudprovider.InstanceNotFound { // Log error and continue with attach - glog.Warningf( + klog.Warningf( "Error checking if volume is already attached to current node (%q). Will continue and try attach anyway. err=%v", instanceid, err) } if err == nil { // Volume is already attached to node. - glog.V(4).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, instanceid, lun) + klog.V(2).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, instanceid, lun) } else { - glog.V(4).Infof("GetDiskLun returned: %v. Initiating attaching volume %q to node %q.", err, volumeSource.DataDiskURI, nodeName) + klog.V(2).Infof("GetDiskLun returned: %v. Initiating attaching volume %q to node %q.", err, volumeSource.DataDiskURI, nodeName) getLunMutex.LockKey(instanceid) defer getLunMutex.UnlockKey(instanceid) lun, err = diskController.GetNextDiskLun(nodeName) if err != nil { - glog.Warningf("no LUN available for instance %q (%v)", nodeName, err) + klog.Warningf("no LUN available for instance %q (%v)", nodeName, err) return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q (%v)", volumeSource.DiskName, instanceid, err) } - glog.V(4).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName) + klog.V(2).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName) isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode)) if err == nil { - glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName) + klog.V(2).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName) } else { - glog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err) + klog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err) return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, instanceid, err) } } @@ -119,7 +119,7 @@ func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty for _, spec := range specs { volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err) + klog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err) continue } @@ -135,7 +135,7 @@ func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty attachedResult, err := diskController.DisksAreAttached(volumeIDList, nodeName) if err != nil { // Log error and continue with attach - glog.Errorf( + klog.Errorf( "azureDisk - Error checking if volumes (%v) are attached to current node (%q). err=%v", volumeIDList, nodeName, err) return volumesAttachedCheck, err @@ -145,15 +145,13 @@ func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty if !attached { spec := volumeSpecMap[volumeID] volumesAttachedCheck[spec] = false - glog.V(2).Infof("azureDisk - VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name()) + klog.V(2).Infof("azureDisk - VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name()) } } return volumesAttachedCheck, nil } func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { - var err error - volumeSource, _, err := getVolumeSource(spec) if err != nil { return "", err @@ -167,13 +165,22 @@ func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, nodeName := types.NodeName(a.plugin.host.GetHostName()) diskName := volumeSource.DiskName - glog.V(5).Infof("azureDisk - WaitForAttach: begin to GetDiskLun by diskName(%s), DataDiskURI(%s), nodeName(%s), devicePath(%s)", - diskName, volumeSource.DataDiskURI, nodeName, devicePath) - lun, err := diskController.GetDiskLun(diskName, volumeSource.DataDiskURI, nodeName) - if err != nil { - return "", err + var lun int32 + if runtime.GOOS == "windows" { + klog.V(2).Infof("azureDisk - WaitForAttach: begin to GetDiskLun by diskName(%s), DataDiskURI(%s), nodeName(%s), devicePath(%s)", + diskName, volumeSource.DataDiskURI, nodeName, devicePath) + lun, err = diskController.GetDiskLun(diskName, volumeSource.DataDiskURI, nodeName) + if err != nil { + return "", err + } + klog.V(2).Infof("azureDisk - WaitForAttach: GetDiskLun succeeded, got lun(%v)", lun) + } else { + lun, err = getDiskLUN(devicePath) + if err != nil { + return "", err + } } - glog.V(5).Infof("azureDisk - WaitForAttach: GetDiskLun succeeded, got lun(%v)", lun) + exec := a.plugin.host.GetExec(a.plugin.GetPluginName()) io := &osIOHandler{} @@ -240,9 +247,9 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str // testing original mount point, make sure the mount link is valid if _, err := (&osIOHandler{}).ReadDir(deviceMountPath); err != nil { // mount link is invalid, now unmount and remount later - glog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", deviceMountPath, err) + klog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", deviceMountPath, err) if err := mounter.Unmount(deviceMountPath); err != nil { - glog.Errorf("azureDisk - Unmount deviceMountPath %s failed with %v", deviceMountPath, err) + klog.Errorf("azureDisk - Unmount deviceMountPath %s failed with %v", deviceMountPath, err) return err } notMnt = true @@ -277,11 +284,11 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro instanceid, err := d.cloud.InstanceID(context.TODO(), nodeName) if err != nil { - glog.Warningf("no instance id for node %q, skip detaching (%v)", nodeName, err) + klog.Warningf("no instance id for node %q, skip detaching (%v)", nodeName, err) return nil } - glog.V(4).Infof("detach %v from node %q", diskURI, nodeName) + klog.V(2).Infof("detach %v from node %q", diskURI, nodeName) diskController, err := getDiskController(d.plugin.host) if err != nil { @@ -293,10 +300,10 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro err = diskController.DetachDiskByName("", diskURI, nodeName) if err != nil { - glog.Errorf("failed to detach azure disk %q, err %v", diskURI, err) + klog.Errorf("failed to detach azure disk %q, err %v", diskURI, err) } - glog.V(2).Infof("azureDisk - disk:%s was detached from node:%v", diskURI, nodeName) + klog.V(2).Infof("azureDisk - disk:%s was detached from node:%v", diskURI, nodeName) return err } @@ -304,9 +311,9 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error { err := util.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName())) if err == nil { - glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath) + klog.V(2).Infof("azureDisk - Device %s was unmounted", deviceMountPath) } else { - glog.Infof("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error()) + klog.Warningf("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error()) } return err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go index 652dc05e1d25..9de53fe3368e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go @@ -21,9 +21,11 @@ import ( "io/ioutil" "os" "path/filepath" + "regexp" + "strconv" libstrings "strings" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -36,7 +38,7 @@ import ( ) const ( - defaultStorageAccountType = storage.StandardLRS + defaultStorageAccountType = compute.StandardLRS defaultAzureDiskKind = v1.AzureManagedDisk defaultAzureDataDiskCachingMode = v1.AzureDataDiskCachingNone ) @@ -59,6 +61,8 @@ var ( string(api.AzureSharedBlobDisk), string(api.AzureDedicatedBlobDisk), string(api.AzureManagedDisk)) + + lunPathRE = regexp.MustCompile(`/dev/disk/azure/scsi(?:.*)/lun(.+)`) ) func getPath(uid types.UID, volName string, host volume.VolumeHost) string { @@ -120,13 +124,13 @@ func normalizeKind(kind string) (v1.AzureDataDiskKind, error) { return v1.AzureDataDiskKind(kind), nil } -func normalizeStorageAccountType(storageAccountType string) (storage.SkuName, error) { +func normalizeStorageAccountType(storageAccountType string) (compute.DiskStorageAccountTypes, error) { if storageAccountType == "" { return defaultStorageAccountType, nil } - sku := storage.SkuName(storageAccountType) - supportedSkuNames := storage.PossibleSkuNameValues() + sku := compute.DiskStorageAccountTypes(storageAccountType) + supportedSkuNames := compute.PossibleDiskStorageAccountTypesValues() for _, s := range supportedSkuNames { if sku == s { return sku, nil @@ -201,3 +205,25 @@ func strFirstLetterToUpper(str string) string { } return libstrings.ToUpper(string(str[0])) + str[1:] } + +// getDiskLUN : deviceInfo could be a LUN number or a device path, e.g. /dev/disk/azure/scsi1/lun2 +func getDiskLUN(deviceInfo string) (int32, error) { + var diskLUN string + if len(deviceInfo) <= 2 { + diskLUN = deviceInfo + } else { + // extract the LUN num from a device path + matches := lunPathRE.FindStringSubmatch(deviceInfo) + if len(matches) == 2 { + diskLUN = matches[1] + } else { + return -1, fmt.Errorf("cannot parse deviceInfo: %s", deviceInfo) + } + } + + lun, err := strconv.Atoi(diskLUN) + if err != nil { + return -1, err + } + return int32(lun), nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go index 383b46f46876..6b693b17e2b0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go @@ -24,7 +24,7 @@ import ( "strconv" libstrings "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" ) @@ -42,21 +42,21 @@ func listAzureDiskPath(io ioHandler) []string { } } } - glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList) + klog.V(12).Infof("Azure sys disks paths: %v", azureDiskList) return azureDiskList } // getDiskLinkByDevName get disk link by device name from devLinkPath, e.g. /dev/disk/azure/, /dev/disk/by-id/ func getDiskLinkByDevName(io ioHandler, devLinkPath, devName string) (string, error) { dirs, err := io.ReadDir(devLinkPath) - glog.V(12).Infof("azureDisk - begin to find %s from %s", devName, devLinkPath) + klog.V(12).Infof("azureDisk - begin to find %s from %s", devName, devLinkPath) if err == nil { for _, f := range dirs { diskPath := devLinkPath + f.Name() - glog.V(12).Infof("azureDisk - begin to Readlink: %s", diskPath) + klog.V(12).Infof("azureDisk - begin to Readlink: %s", diskPath) link, linkErr := io.Readlink(diskPath) if linkErr != nil { - glog.Warningf("azureDisk - read link (%s) error: %v", diskPath, linkErr) + klog.Warningf("azureDisk - read link (%s) error: %v", diskPath, linkErr) continue } if libstrings.HasSuffix(link, devName) { @@ -75,11 +75,11 @@ func scsiHostRescan(io ioHandler, exec mount.Exec) { name := scsi_path + f.Name() + "/scan" data := []byte("- - -") if err = io.WriteFile(name, data, 0666); err != nil { - glog.Warningf("failed to rescan scsi host %s", name) + klog.Warningf("failed to rescan scsi host %s", name) } } } else { - glog.Warningf("failed to read %s, err %v", scsi_path, err) + klog.Warningf("failed to read %s, err %v", scsi_path, err) } } @@ -101,10 +101,10 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st continue } if len(azureDisks) == 0 { - glog.V(4).Infof("/dev/disk/azure is not populated, now try to parse %v directly", name) + klog.V(4).Infof("/dev/disk/azure is not populated, now try to parse %v directly", name) target, err := strconv.Atoi(arr[0]) if err != nil { - glog.Errorf("failed to parse target from %v (%v), err %v", arr[0], name, err) + klog.Errorf("failed to parse target from %v (%v), err %v", arr[0], name, err) continue } // as observed, targets 0-3 are used by OS disks. Skip them @@ -118,7 +118,7 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st l, err := strconv.Atoi(arr[3]) if err != nil { // unknown path format, continue to read the next one - glog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[3], name, err) + klog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[3], name, err) continue } if lun == l { @@ -127,24 +127,24 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st vendorPath := filepath.Join(sys_path, name, "vendor") vendorBytes, err := io.ReadFile(vendorPath) if err != nil { - glog.Errorf("failed to read device vendor, err: %v", err) + klog.Errorf("failed to read device vendor, err: %v", err) continue } vendor := libstrings.TrimSpace(string(vendorBytes)) if libstrings.ToUpper(vendor) != "MSFT" { - glog.V(4).Infof("vendor doesn't match VHD, got %s", vendor) + klog.V(4).Infof("vendor doesn't match VHD, got %s", vendor) continue } modelPath := filepath.Join(sys_path, name, "model") modelBytes, err := io.ReadFile(modelPath) if err != nil { - glog.Errorf("failed to read device model, err: %v", err) + klog.Errorf("failed to read device model, err: %v", err) continue } model := libstrings.TrimSpace(string(modelBytes)) if libstrings.ToUpper(model) != "VIRTUAL DISK" { - glog.V(4).Infof("model doesn't match VHD, got %s", model) + klog.V(4).Infof("model doesn't match VHD, got %s", model) continue } @@ -154,7 +154,7 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st found := false devName := dev[0].Name() for _, diskName := range azureDisks { - glog.V(12).Infof("azureDisk - validating disk %q with sys disk %q", devName, diskName) + klog.V(12).Infof("azureDisk - validating disk %q with sys disk %q", devName, diskName) if devName == diskName { found = true break @@ -165,10 +165,10 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st for _, devLinkPath := range devLinkPaths { diskPath, err := getDiskLinkByDevName(io, devLinkPath, devName) if err == nil { - glog.V(4).Infof("azureDisk - found %s by %s under %s", diskPath, devName, devLinkPath) + klog.V(4).Infof("azureDisk - found %s by %s under %s", diskPath, devName, devLinkPath) return diskPath, nil } - glog.Warningf("azureDisk - getDiskLinkByDevName by %s under %s failed, error: %v", devName, devLinkPath, err) + klog.Warningf("azureDisk - getDiskLinkByDevName by %s under %s failed, error: %v", devName, devLinkPath, err) } return "/dev/" + devName, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_windows.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_windows.go index 6ee1a6d05585..c48f191f3092 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_windows.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_windows.go @@ -24,7 +24,7 @@ import ( "strconv" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" ) @@ -33,7 +33,7 @@ func scsiHostRescan(io ioHandler, exec mount.Exec) { cmd := "Update-HostStorageCache" output, err := exec.Run("powershell", "/c", cmd) if err != nil { - glog.Errorf("Update-HostStorageCache failed in scsiHostRescan, error: %v, output: %q", err, string(output)) + klog.Errorf("Update-HostStorageCache failed in scsiHostRescan, error: %v, output: %q", err, string(output)) } } @@ -42,7 +42,7 @@ func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error cmd := `Get-Disk | select number, location | ConvertTo-Json` output, err := exec.Run("powershell", "/c", cmd) if err != nil { - glog.Errorf("Get-Disk failed in findDiskByLun, error: %v, output: %q", err, string(output)) + klog.Errorf("Get-Disk failed in findDiskByLun, error: %v, output: %q", err, string(output)) return "", err } @@ -52,7 +52,7 @@ func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error var data []map[string]interface{} if err = json.Unmarshal(output, &data); err != nil { - glog.Errorf("Get-Disk output is not a json array, output: %q", string(output)) + klog.Errorf("Get-Disk output is not a json array, output: %q", string(output)) return "", err } @@ -66,27 +66,27 @@ func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error arr := strings.Split(location, " ") arrLen := len(arr) if arrLen < 3 { - glog.Warningf("unexpected json structure from Get-Disk, location: %q", jsonLocation) + klog.Warningf("unexpected json structure from Get-Disk, location: %q", jsonLocation) continue } - glog.V(4).Infof("found a disk, locatin: %q, lun: %q", location, arr[arrLen-1]) + klog.V(4).Infof("found a disk, locatin: %q, lun: %q", location, arr[arrLen-1]) //last element of location field is LUN number, e.g. // "location": "Integrated : Adapter 3 : Port 0 : Target 0 : LUN 1" l, err := strconv.Atoi(arr[arrLen-1]) if err != nil { - glog.Warningf("cannot parse element from data structure, location: %q, element: %q", location, arr[arrLen-1]) + klog.Warningf("cannot parse element from data structure, location: %q, element: %q", location, arr[arrLen-1]) continue } if l == lun { - glog.V(4).Infof("found a disk and lun, locatin: %q, lun: %d", location, lun) + klog.V(4).Infof("found a disk and lun, locatin: %q, lun: %d", location, lun) if d, ok := v["number"]; ok { if diskNum, ok := d.(float64); ok { - glog.V(2).Infof("azureDisk Mount: got disk number(%d) by LUN(%d)", int(diskNum), lun) + klog.V(2).Infof("azureDisk Mount: got disk number(%d) by LUN(%d)", int(diskNum), lun) return strconv.Itoa(int(diskNum)), nil } - glog.Warningf("LUN(%d) found, but could not get disk number(%q), location: %q", lun, d, location) + klog.Warningf("LUN(%d) found, but could not get disk number(%q), location: %q", lun, d, location) } return "", fmt.Errorf("LUN(%d) found, but could not get disk number, location: %q", lun, location) } @@ -99,7 +99,7 @@ func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) { if err := mount.ValidateDiskNumber(disk); err != nil { - glog.Errorf("azureDisk Mount: formatIfNotFormatted failed, err: %v\n", err) + klog.Errorf("azureDisk Mount: formatIfNotFormatted failed, err: %v\n", err) return } @@ -111,8 +111,8 @@ func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) { cmd += fmt.Sprintf(" | New-Partition -AssignDriveLetter -UseMaximumSize | Format-Volume -FileSystem %s -Confirm:$false", fstype) output, err := exec.Run("powershell", "/c", cmd) if err != nil { - glog.Errorf("azureDisk Mount: Get-Disk failed, error: %v, output: %q", err, string(output)) + klog.Errorf("azureDisk Mount: Get-Disk failed, error: %v, output: %q", err, string(output)) } else { - glog.Infof("azureDisk Mount: Disk successfully formatted, disk: %q, fstype: %q\n", disk, fstype) + klog.Infof("azureDisk Mount: Disk successfully formatted, disk: %q, fstype: %q\n", disk, fstype) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go index 5e237f5e55f5..961d5a35714a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go @@ -21,9 +21,9 @@ import ( "fmt" "strings" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" - "github.com/golang/glog" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -144,26 +144,25 @@ func (plugin *azureDataDiskPlugin) GetVolumeLimits() (map[string]int64, error) { // hoping external CCM or admin can set it. Returning // default values from here will mean, no one can // override them. - glog.Errorf("failed to get azure cloud in GetVolumeLimits, plugin.host: %s", plugin.host.GetHostName()) - return volumeLimits, nil + return nil, fmt.Errorf("failed to get azure cloud in GetVolumeLimits, plugin.host: %s", plugin.host.GetHostName()) } instances, ok := az.Instances() if !ok { - glog.Warningf("Failed to get instances from cloud provider") + klog.Warningf("Failed to get instances from cloud provider") return volumeLimits, nil } instanceType, err := instances.InstanceType(context.TODO(), plugin.host.GetNodeName()) if err != nil { - glog.Errorf("Failed to get instance type from Azure cloud provider, nodeName: %s", plugin.host.GetNodeName()) + klog.Errorf("Failed to get instance type from Azure cloud provider, nodeName: %s", plugin.host.GetNodeName()) return volumeLimits, nil } if vmSizeList == nil { result, err := az.VirtualMachineSizesClient.List(context.TODO(), az.Location) if err != nil || result.Value == nil { - glog.Errorf("failed to list vm sizes in GetVolumeLimits, plugin.host: %s, location: %s", plugin.host.GetHostName(), az.Location) + klog.Errorf("failed to list vm sizes in GetVolumeLimits, plugin.host: %s, location: %s", plugin.host.GetHostName(), az.Location) return volumeLimits, nil } vmSizeList = result.Value @@ -184,11 +183,11 @@ func getMaxDataDiskCount(instanceType string, sizeList *[]compute.VirtualMachine vmsize := strings.ToUpper(instanceType) for _, size := range *sizeList { if size.Name == nil || size.MaxDataDiskCount == nil { - glog.Errorf("failed to get vm size in getMaxDataDiskCount") + klog.Errorf("failed to get vm size in getMaxDataDiskCount") continue } if strings.ToUpper(*size.Name) == vmsize { - glog.V(2).Infof("got a matching size in getMaxDataDiskCount, Name: %s, MaxDataDiskCount: %s", *size.Name, *size.MaxDataDiskCount) + klog.V(2).Infof("got a matching size in getMaxDataDiskCount, Name: %s, MaxDataDiskCount: %d", *size.Name, *size.MaxDataDiskCount) return int64(*size.MaxDataDiskCount) } } @@ -209,7 +208,7 @@ func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessM func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) { azure, err := getCloud(plugin.host) if err != nil { - glog.Errorf("failed to get azure cloud in NewAttacher, plugin.host : %s, err:%v", plugin.host.GetHostName(), err) + klog.Errorf("failed to get azure cloud in NewAttacher, plugin.host : %s, err:%v", plugin.host.GetHostName(), err) return nil, err } @@ -222,7 +221,7 @@ func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) { func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) { azure, err := getCloud(plugin.host) if err != nil { - glog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName()) + klog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName()) return nil, err } @@ -302,6 +301,13 @@ func (plugin *azureDataDiskPlugin) ExpandVolumeDevice( return diskController.ResizeDisk(spec.PersistentVolume.Spec.AzureDisk.DataDiskURI, oldSize, newSize) } +func (plugin *azureDataDiskPlugin) ExpandFS(spec *volume.Spec, devicePath, deviceMountPath string, _, _ resource.Quantity) error { + _, err := util.GenericResizeFS(plugin.host, plugin.GetPluginName(), devicePath, deviceMountPath) + return err +} + +var _ volume.FSResizableVolumePlugin = &azureDataDiskPlugin{} + func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { mounter := plugin.host.GetMounter(plugin.GetPluginName()) pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName()) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block.go index af1d195047e1..704304994639 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_block.go @@ -20,10 +20,10 @@ import ( "fmt" "path/filepath" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -44,7 +44,7 @@ func (plugin *azureDataDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID, vo if err != nil { return nil, err } - glog.V(5).Infof("constructing block volume spec from globalMapPathUUID: %s", globalMapPathUUID) + klog.V(5).Infof("constructing block volume spec from globalMapPathUUID: %s", globalMapPathUUID) globalMapPath := filepath.Dir(globalMapPathUUID) if len(globalMapPath) <= 1 { @@ -63,7 +63,7 @@ func getVolumeSpecFromGlobalMapPath(globalMapPath, volumeName string) (*volume.S if len(diskName) <= 1 { return nil, fmt.Errorf("failed to get diskName from global path=%s", globalMapPath) } - glog.V(5).Infof("got diskName(%s) from globalMapPath: %s", globalMapPath, diskName) + klog.V(5).Infof("got diskName(%s) from globalMapPath: %s", globalMapPath, diskName) block := v1.PersistentVolumeBlock pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go index d8b7ae50df6b..66c0e12910b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go @@ -21,8 +21,8 @@ import ( "os" "runtime" - "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) @@ -46,7 +46,7 @@ func (m *azureDiskMounter) GetAttributes() volume.Attributes { readOnly := false volumeSource, _, err := getVolumeSource(m.spec) if err != nil { - glog.Infof("azureDisk - mounter failed to get volume source for spec %s %v", m.spec.Name(), err) + klog.Infof("azureDisk - mounter failed to get volume source for spec %s %v", m.spec.Name(), err) } else if volumeSource.ReadOnly != nil { readOnly = *volumeSource.ReadOnly } @@ -74,7 +74,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { volumeSource, _, err := getVolumeSource(m.spec) if err != nil { - glog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name()) + klog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name()) return err } @@ -82,20 +82,20 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { mountPoint, err := mounter.IsLikelyNotMountPoint(dir) if err != nil && !os.IsNotExist(err) { - glog.Infof("azureDisk - cannot validate mount point for disk %s on %s %v", diskName, dir, err) + klog.Infof("azureDisk - cannot validate mount point for disk %s on %s %v", diskName, dir, err) return err } if !mountPoint { // testing original mount point, make sure the mount link is valid _, err := (&osIOHandler{}).ReadDir(dir) if err == nil { - glog.V(4).Infof("azureDisk - already mounted to target %s", dir) + klog.V(4).Infof("azureDisk - already mounted to target %s", dir) return nil } // mount link is invalid, now unmount and remount later - glog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", dir, err) + klog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", dir, err) if err := mounter.Unmount(dir); err != nil { - glog.Errorf("azureDisk - Unmount directory %s failed with %v", dir, err) + klog.Errorf("azureDisk - Unmount directory %s failed with %v", dir, err) return err } mountPoint = true @@ -104,7 +104,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { if runtime.GOOS != "windows" { // in windows, we will use mklink to mount, will MkdirAll in Mount func if err := os.MkdirAll(dir, 0750); err != nil { - glog.Errorf("azureDisk - mkdir failed on disk %s on dir: %s (%v)", diskName, dir, err) + klog.Errorf("azureDisk - mkdir failed on disk %s on dir: %s (%v)", diskName, dir, err) return err } } @@ -119,7 +119,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { options = util.JoinMountOptions(m.options.MountOptions, options) } - glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir) + klog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir) isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) globalPDPath, err := makeGlobalPDPath(m.plugin.host, volumeSource.DataDiskURI, isManagedDisk) @@ -131,7 +131,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { // Everything in the following control flow is meant as an // attempt cleanup a failed setupAt (bind mount) if mountErr != nil { - glog.Infof("azureDisk - SetupAt:Mount disk:%s at dir:%s failed during mounting with error:%v, will attempt to clean up", diskName, dir, mountErr) + klog.Infof("azureDisk - SetupAt:Mount disk:%s at dir:%s failed during mounting with error:%v, will attempt to clean up", diskName, dir, mountErr) mountPoint, err := mounter.IsLikelyNotMountPoint(dir) if err != nil { return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint check failed for disk:%s on dir:%s with error %v original-mountErr:%v", diskName, dir, err, mountErr) @@ -155,7 +155,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { return fmt.Errorf("azureDisk - SetupAt:Mount:Failure error cleaning up (removing dir:%s) with error:%v original-mountErr:%v", dir, err, mountErr) } - glog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, mountErr) + klog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, mountErr) return mountErr } @@ -163,7 +163,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { volume.SetVolumeOwnership(m, fsGroup) } - glog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir) + klog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir) return nil } @@ -175,11 +175,11 @@ func (u *azureDiskUnmounter) TearDownAt(dir string) error { if pathExists, pathErr := util.PathExists(dir); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) return nil } - glog.V(4).Infof("azureDisk - TearDownAt: %s", dir) + klog.V(4).Infof("azureDisk - TearDownAt: %s", dir) mounter := u.plugin.host.GetMounter(u.plugin.GetPluginName()) mountPoint, err := mounter.IsLikelyNotMountPoint(dir) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go index 519e7eb4b3dc..ca49550a5c48 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go @@ -22,6 +22,7 @@ import ( "strconv" "strings" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -127,6 +128,9 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie availabilityZone string availabilityZones sets.String selectedAvailabilityZone string + + diskIopsReadWrite string + diskMbpsReadWrite string ) // maxLength = 79 - (4 for ".vhd") = 75 name := util.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75) @@ -165,6 +169,10 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie } case "zoned": strZoned = v + case "diskiopsreadwrite": + diskIopsReadWrite = v + case "diskmbpsreadwrite": + diskMbpsReadWrite = v default: return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k) } @@ -241,6 +249,8 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie SizeGB: requestGiB, Tags: tags, AvailabilityZone: selectedAvailabilityZone, + DiskIOPSReadWrite: diskIopsReadWrite, + DiskMBpsReadWrite: diskMbpsReadWrite, } diskURI, err = diskController.CreateManagedDisk(volumeOptions) if err != nil { @@ -257,7 +267,7 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie return nil, err } } else { - diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGiB) + diskURI, err = diskController.CreateBlobDisk(name, storage.SkuName(storageAccountType), requestGiB) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD index 910bbf7ce8dd..b4fa68effead 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD @@ -16,7 +16,6 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/volume/azure_file", deps = [ - "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/azure:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", @@ -27,7 +26,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go index f36b47cb3eea..e0234ce780ba 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go @@ -22,12 +22,12 @@ import ( "os" "runtime" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" @@ -237,20 +237,20 @@ func (b *azureFileMounter) SetUp(fsGroup *int64) error { func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("AzureFile mount set up: %s %v %v", dir, !notMnt, err) + klog.V(4).Infof("AzureFile mount set up: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { return err } if !notMnt { // testing original mount point, make sure the mount link is valid if _, err := ioutil.ReadDir(dir); err == nil { - glog.V(4).Infof("azureFile - already mounted to target %s", dir) + klog.V(4).Infof("azureFile - already mounted to target %s", dir) return nil } // mount link is invalid, now unmount and remount later - glog.Warningf("azureFile - ReadDir %s failed with %v, unmount this directory", dir, err) + klog.Warningf("azureFile - ReadDir %s failed with %v, unmount this directory", dir, err) if err := b.mounter.Unmount(dir); err != nil { - glog.Errorf("azureFile - Unmount directory %s failed with %v", dir, err) + klog.Errorf("azureFile - Unmount directory %s failed with %v", dir, err) return err } notMnt = true @@ -269,7 +269,9 @@ func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error { if runtime.GOOS == "windows" { mountOptions = []string{fmt.Sprintf("AZURE\\%s", accountName), accountKey} } else { - os.MkdirAll(dir, 0700) + if err := os.MkdirAll(dir, 0700); err != nil { + return err + } // parameters suggested by https://azure.microsoft.com/en-us/documentation/articles/storage-how-to-use-files-linux/ options := []string{fmt.Sprintf("username=%s,password=%s", accountName, accountKey)} if b.readOnly { @@ -283,22 +285,22 @@ func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error { if err != nil { notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) return err } } @@ -374,7 +376,7 @@ func getStorageEndpointSuffix(cloudprovider cloudprovider.Interface) string { const publicCloudStorageEndpointSuffix = "core.windows.net" azure, err := getAzureCloud(cloudprovider) if err != nil { - glog.Warningf("No Azure cloud provider found. Using the Azure public cloud endpoint: %s", publicCloudStorageEndpointSuffix) + klog.Warningf("No Azure cloud provider found. Using the Azure public cloud endpoint: %s", publicCloudStorageEndpointSuffix) return publicCloudStorageEndpointSuffix } return azure.Environment.StorageEndpointSuffix diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_provision.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_provision.go index f1cb301ab12f..60466cedab62 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_provision.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_provision.go @@ -20,11 +20,13 @@ import ( "fmt" "strings" - "github.com/golang/glog" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" + "k8s.io/klog" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -38,7 +40,7 @@ var _ volume.ProvisionableVolumePlugin = &azureFilePlugin{} // azure cloud provider should implement it type azureCloudProvider interface { // create a file share - CreateFileShare(shareName, accountName, accountType, resourceGroup, location string, requestGiB int) (string, string, error) + CreateFileShare(shareName, accountName, accountType, accountKind, resourceGroup, location string, requestGiB int) (string, string, error) // delete a file share DeleteFileShare(accountName, accountKey, shareName string) error // resize a file share @@ -54,7 +56,7 @@ type azureFileDeleter struct { func (plugin *azureFilePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) if err != nil { - glog.V(4).Infof("failed to get azure provider") + klog.V(4).Infof("failed to get azure provider") return nil, err } @@ -90,7 +92,7 @@ func (plugin *azureFilePlugin) newDeleterInternal(spec *volume.Spec, util azureU func (plugin *azureFilePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) if err != nil { - glog.V(4).Infof("failed to get azure provider") + klog.V(4).Infof("failed to get azure provider") return nil, err } if len(options.PVC.Spec.AccessModes) == 0 { @@ -118,7 +120,7 @@ func (f *azureFileDeleter) GetPath() string { } func (f *azureFileDeleter) Delete() error { - glog.V(4).Infof("deleting volume %s", f.shareName) + klog.V(4).Infof("deleting volume %s", f.shareName) return f.azureProvider.DeleteFileShare(f.accountName, f.accountKey, f.shareName) } @@ -171,7 +173,12 @@ func (a *azureFileProvisioner) Provision(selectedNode *v1.Node, allowedTopologie return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure file") } - account, key, err := a.azureProvider.CreateFileShare(name, account, sku, resourceGroup, location, requestGiB) + // when use azure file premium, account kind should be specified as FileStorage + accountKind := string(storage.StorageV2) + if strings.HasPrefix(strings.ToLower(sku), "premium") { + accountKind = string(storage.FileStorage) + } + account, key, err := a.azureProvider.CreateFileShare(name, account, sku, accountKind, resourceGroup, location, requestGiB) if err != nil { return nil, err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go index cb3b9da37318..165f83afdeca 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go @@ -31,8 +31,8 @@ const ( dirMode = "dir_mode" gid = "gid" vers = "vers" - defaultFileMode = "0755" - defaultDirMode = "0755" + defaultFileMode = "0777" + defaultDirMode = "0777" defaultVers = "3.0" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD index 63d03a82c05e..eeb4ee0dce5b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD @@ -21,7 +21,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go index e1f506c0a9c9..748fe11a4751 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cephfs/cephfs.go @@ -24,17 +24,17 @@ import ( "runtime" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) -// This is the primary entrypoint for volume plugins. +// ProbeVolumePlugins is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { return []volume.VolumePlugin{&cephfsPlugin{nil}} } @@ -110,7 +110,7 @@ func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume. } for name, data := range secrets.Data { secret = string(data) - glog.V(4).Infof("found ceph secret info: %s", name) + klog.V(4).Infof("found ceph secret info: %s", name) } } return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter(plugin.GetPluginName()), secret) @@ -144,7 +144,7 @@ func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.U path: path, secret: secret, id: id, - secret_file: secretFile, + secretFile: secretFile, readonly: readOnly, mounter: mounter, plugin: plugin, @@ -182,16 +182,16 @@ func (plugin *cephfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (* // CephFS volumes represent a bare host file or directory mount of an CephFS export. type cephfs struct { - volName string - podUID types.UID - mon []string - path string - id string - secret string - secret_file string - readonly bool - mounter mount.Interface - plugin *cephfsPlugin + volName string + podUID types.UID + mon []string + path string + id string + secret string + secretFile string + readonly bool + mounter mount.Interface + plugin *cephfsPlugin volume.MetricsNil mountOptions []string } @@ -213,7 +213,7 @@ func (cephfsVolume *cephfsMounter) GetAttributes() volume.Attributes { // Checks prior to mount operations to verify that the required components (binaries, etc.) // to mount the volume are available on the underlying node. // If not, it returns an error -func (cephfsMounter *cephfsMounter) CanMount() error { +func (cephfsVolume *cephfsMounter) CanMount() error { return nil } @@ -225,7 +225,7 @@ func (cephfsVolume *cephfsMounter) SetUp(fsGroup *int64) error { // SetUpAt attaches the disk and bind mounts to the volume path. func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := cephfsVolume.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("CephFS mount set up: %s %v %v", dir, !notMnt, err) + klog.V(4).Infof("CephFS mount set up: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { return err } @@ -239,7 +239,7 @@ func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error { // check whether it belongs to fuse, if not, default to use kernel mount. if cephfsVolume.checkFuseMount() { - glog.V(4).Info("CephFS fuse mount.") + klog.V(4).Info("CephFS fuse mount.") err = cephfsVolume.execFuseMount(dir) // cleanup no matter if fuse mount fail. keyringPath := cephfsVolume.GetKeyringPath() @@ -250,12 +250,12 @@ func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error { if err == nil { // cephfs fuse mount succeeded. return nil - } else { - // if cephfs fuse mount failed, fallback to kernel mount. - glog.V(4).Infof("CephFS fuse mount failed: %v, fallback to kernel mount.", err) } + // if cephfs fuse mount failed, fallback to kernel mount. + klog.V(2).Infof("CephFS fuse mount failed: %v, fallback to kernel mount.", err) + } - glog.V(4).Info("CephFS kernel mount.") + klog.V(4).Info("CephFS kernel mount.") err = cephfsVolume.execMount(dir) if err != nil { @@ -298,19 +298,19 @@ func (cephfsVolume *cephfs) GetKeyringPath() string { func (cephfsVolume *cephfs) execMount(mountpoint string) error { // cephfs mount option - ceph_opt := "" + cephOpt := "" // override secretfile if secret is provided if cephfsVolume.secret != "" { - ceph_opt = "name=" + cephfsVolume.id + ",secret=" + cephfsVolume.secret + cephOpt = "name=" + cephfsVolume.id + ",secret=" + cephfsVolume.secret } else { - ceph_opt = "name=" + cephfsVolume.id + ",secretfile=" + cephfsVolume.secret_file + cephOpt = "name=" + cephfsVolume.id + ",secretfile=" + cephfsVolume.secretFile } // build option array opt := []string{} if cephfsVolume.readonly { opt = append(opt, "ro") } - opt = append(opt, ceph_opt) + opt = append(opt, cephOpt) // build src like mon1:6789,mon2:6789,mon3:6789:/ hosts := cephfsVolume.mon @@ -331,12 +331,12 @@ func (cephfsVolume *cephfs) execMount(mountpoint string) error { return nil } -func (cephfsMounter *cephfsMounter) checkFuseMount() bool { - execute := cephfsMounter.plugin.host.GetExec(cephfsMounter.plugin.GetPluginName()) +func (cephfsVolume *cephfsMounter) checkFuseMount() bool { + execute := cephfsVolume.plugin.host.GetExec(cephfsVolume.plugin.GetPluginName()) switch runtime.GOOS { case "linux": if _, err := execute.Run("/usr/bin/test", "-x", "/sbin/mount.fuse.ceph"); err == nil { - glog.V(4).Info("/sbin/mount.fuse.ceph exists, it should be fuse mount.") + klog.V(4).Info("/sbin/mount.fuse.ceph exists, it should be fuse mount.") return true } return false @@ -346,12 +346,12 @@ func (cephfsMounter *cephfsMounter) checkFuseMount() bool { func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error { // cephfs keyring file - keyring_file := "" + keyringFile := "" // override secretfile if secret is provided if cephfsVolume.secret != "" { // TODO: cephfs fuse currently doesn't support secret option, // remove keyring file create once secret option is supported. - glog.V(4).Info("cephfs mount begin using fuse.") + klog.V(4).Info("cephfs mount begin using fuse.") keyringPath := cephfsVolume.GetKeyringPath() os.MkdirAll(keyringPath, 0750) @@ -370,20 +370,20 @@ func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error { writerContext := fmt.Sprintf("cephfuse:%v.keyring", cephfsVolume.id) writer, err := util.NewAtomicWriter(keyringPath, writerContext) if err != nil { - glog.Errorf("failed to create atomic writer: %v", err) + klog.Errorf("failed to create atomic writer: %v", err) return err } err = writer.Write(payload) if err != nil { - glog.Errorf("failed to write payload to dir: %v", err) + klog.Errorf("failed to write payload to dir: %v", err) return err } - keyring_file = path.Join(keyringPath, fileName) + keyringFile = path.Join(keyringPath, fileName) } else { - keyring_file = cephfsVolume.secret_file + keyringFile = cephfsVolume.secretFile } // build src like mon1:6789,mon2:6789,mon3:6789:/ @@ -399,7 +399,7 @@ func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error { mountArgs := []string{} mountArgs = append(mountArgs, "-k") - mountArgs = append(mountArgs, keyring_file) + mountArgs = append(mountArgs, keyringFile) mountArgs = append(mountArgs, "-m") mountArgs = append(mountArgs, src) mountArgs = append(mountArgs, mountpoint) @@ -419,11 +419,11 @@ func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error { mountArgs = append(mountArgs, strings.Join(opt, ",")) } - glog.V(4).Infof("Mounting cmd ceph-fuse with arguments (%s)", mountArgs) + klog.V(4).Infof("Mounting cmd ceph-fuse with arguments (%s)", mountArgs) command := exec.Command("ceph-fuse", mountArgs...) output, err := command.CombinedOutput() if err != nil || !(strings.Contains(string(output), "starting fuse")) { - return fmt.Errorf("Ceph-fuse failed: %v\narguments: %s\nOutput: %s\n", err, mountArgs, string(output)) + return fmt.Errorf("Ceph-fuse failed: %v\narguments: %s\nOutput: %s", err, mountArgs, string(output)) } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD index c37c3f20881b..9435be1ec42a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD @@ -17,7 +17,6 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/volume/cinder", deps = [ - "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/openstack:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", @@ -35,7 +34,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) @@ -49,7 +49,7 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/cloudprovider:go_default_library", + "//pkg/kubelet/apis:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", @@ -58,7 +58,8 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go index 2cdc6da7a589..0db22a27386e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go @@ -24,10 +24,10 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -145,31 +145,31 @@ func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.Nod attached, err := attacher.cinderProvider.DiskIsAttached(instanceID, volumeID) if err != nil { // Log error and continue with attach - glog.Warningf( + klog.Warningf( "Error checking if volume (%q) is already attached to current instance (%q). Will continue and try attach anyway. err=%v", volumeID, instanceID, err) } if err == nil && attached { // Volume is already attached to instance. - glog.Infof("Attach operation is successful. volume %q is already attached to instance %q.", volumeID, instanceID) + klog.Infof("Attach operation is successful. volume %q is already attached to instance %q.", volumeID, instanceID) } else { _, err = attacher.cinderProvider.AttachDisk(instanceID, volumeID) if err == nil { if err = attacher.waitDiskAttached(instanceID, volumeID); err != nil { - glog.Errorf("Error waiting for volume %q to be attached from node %q: %v", volumeID, nodeName, err) + klog.Errorf("Error waiting for volume %q to be attached from node %q: %v", volumeID, nodeName, err) return "", err } - glog.Infof("Attach operation successful: volume %q attached to instance %q.", volumeID, instanceID) + klog.Infof("Attach operation successful: volume %q attached to instance %q.", volumeID, instanceID) } else { - glog.Infof("Attach volume %q to instance %q failed with: %v", volumeID, instanceID, err) + klog.Infof("Attach volume %q to instance %q failed with: %v", volumeID, instanceID, err) return "", err } } devicePath, err := attacher.cinderProvider.GetAttachmentDiskPath(instanceID, volumeID) if err != nil { - glog.Infof("Can not get device path of volume %q which be attached to instance %q, failed with: %v", volumeID, instanceID, err) + klog.Infof("Can not get device path of volume %q which be attached to instance %q, failed with: %v", volumeID, instanceID, err) return "", err } @@ -183,7 +183,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod for _, spec := range specs { volumeID, _, _, err := getVolumeInfo(spec) if err != nil { - glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) + klog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) continue } @@ -195,7 +195,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod attachedResult, err := attacher.cinderProvider.DisksAreAttachedByName(nodeName, volumeIDList) if err != nil { // Log error and continue with attach - glog.Errorf( + klog.Errorf( "Error checking if Volumes (%v) are already attached to current node (%q). Will continue and try attach anyway. err=%v", volumeIDList, nodeName, err) return volumesAttachedCheck, err @@ -205,7 +205,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod if !attached { spec := volumeSpecMap[volumeID] volumesAttachedCheck[spec] = false - glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name()) + klog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name()) } } return volumesAttachedCheck, nil @@ -231,7 +231,7 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath for { select { case <-ticker.C: - glog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID) + klog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID) probeAttachedVolume() if !attacher.cinderProvider.ShouldTrustDevicePath() { // Using the Cinder volume ID, find the real device path (See Issue #33128) @@ -239,11 +239,11 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath } exists, err := volumeutil.PathExists(devicePath) if exists && err == nil { - glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath) + klog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath) return devicePath, nil } // Log an error, and continue checking periodically - glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err) + klog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err) // Using exponential backoff instead of linear ticker.Stop() duration = time.Duration(float64(duration) * probeVolumeFactor) @@ -379,26 +379,26 @@ func (detacher *cinderDiskDetacher) Detach(volumeName string, nodeName types.Nod attached, instanceID, err := detacher.cinderProvider.DiskIsAttachedByName(nodeName, volumeID) if err != nil { // Log error and continue with detach - glog.Errorf( + klog.Errorf( "Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v", volumeID, nodeName, err) } if err == nil && !attached { // Volume is already detached from node. - glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName) + klog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName) return nil } if err = detacher.cinderProvider.DetachDisk(instanceID, volumeID); err != nil { - glog.Errorf("Error detaching volume %q from node %q: %v", volumeID, nodeName, err) + klog.Errorf("Error detaching volume %q from node %q: %v", volumeID, nodeName, err) return err } if err = detacher.waitDiskDetached(instanceID, volumeID); err != nil { - glog.Errorf("Error waiting for volume %q to detach from node %q: %v", volumeID, nodeName, err) + klog.Errorf("Error waiting for volume %q to detach from node %q: %v", volumeID, nodeName, err) return err } - glog.Infof("detached volume %q from node %q", volumeID, nodeName) + klog.Infof("detached volume %q from node %q", volumeID, nodeName) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go index 7086cc06f80d..953c4660f455 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go @@ -22,13 +22,13 @@ import ( "os" "path" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/keymutex" @@ -148,7 +148,9 @@ func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.U }, fsType: fsType, readOnly: readOnly, - blockDeviceMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil + blockDeviceMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host), + mountOptions: util.MountOptionFromSpec(spec), + }, nil } func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { @@ -232,7 +234,7 @@ func (plugin *cinderPlugin) ConstructVolumeSpec(volumeName, mountPath string) (* if err != nil { return nil, err } - glog.V(4).Infof("Found volume %s mounted to %s", sourceName, mountPath) + klog.V(4).Infof("Found volume %s mounted to %s", sourceName, mountPath) cinderVolume := &v1.Volume{ Name: volumeName, VolumeSource: v1.VolumeSource{ @@ -261,10 +263,17 @@ func (plugin *cinderPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resour return oldSize, err } - glog.V(2).Infof("volume %s expanded to new size %d successfully", volumeID, int(newSize.Value())) + klog.V(2).Infof("volume %s expanded to new size %d successfully", volumeID, int(newSize.Value())) return expandedSize, nil } +func (plugin *cinderPlugin) ExpandFS(spec *volume.Spec, devicePath, deviceMountPath string, _, _ resource.Quantity) error { + _, err := util.GenericResizeFS(plugin.host, plugin.GetPluginName(), devicePath, deviceMountPath) + return err +} + +var _ volume.FSResizableVolumePlugin = &cinderPlugin{} + func (plugin *cinderPlugin) RequiresFSResize() bool { return true } @@ -276,7 +285,7 @@ type cdManager interface { // Detaches the disk from the kubelet's host machine. DetachDisk(unmounter *cinderVolumeUnmounter) error // Creates a volume - CreateVolume(provisioner *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) + CreateVolume(provisioner *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) // Deletes a volume DeleteVolume(deleter *cinderVolumeDeleter) error } @@ -288,6 +297,7 @@ type cinderVolumeMounter struct { fsType string readOnly bool blockDeviceMounter *mount.SafeFormatAndMount + mountOptions []string } // cinderPersistentDisk volumes are disk resources provided by C3 @@ -332,18 +342,18 @@ func (b *cinderVolumeMounter) SetUp(fsGroup *int64) error { // SetUp bind mounts to the volume path. func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { - glog.V(5).Infof("Cinder SetUp %s to %s", b.pdName, dir) + klog.V(5).Infof("Cinder SetUp %s to %s", b.pdName, dir) b.plugin.volumeLocks.LockKey(b.pdName) defer b.plugin.volumeLocks.UnlockKey(b.pdName) notmnt, err := b.mounter.IsLikelyNotMountPoint(dir) if err != nil && !os.IsNotExist(err) { - glog.Errorf("Cannot validate mount point: %s %v", dir, err) + klog.Errorf("Cannot validate mount point: %s %v", dir, err) return err } if !notmnt { - glog.V(4).Infof("Something is already mounted to target %s", dir) + klog.V(4).Infof("Something is already mounted to target %s", dir) return nil } globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName) @@ -354,45 +364,46 @@ func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } if err := os.MkdirAll(dir, 0750); err != nil { - glog.V(4).Infof("Could not create directory %s: %v", dir, err) + klog.V(4).Infof("Could not create directory %s: %v", dir, err) return err } + mountOptions := util.JoinMountOptions(options, b.mountOptions) // Perform a bind mount to the full path to allow duplicate mounts of the same PD. - glog.V(4).Infof("Attempting to mount cinder volume %s to %s with options %v", b.pdName, dir, options) + klog.V(4).Infof("Attempting to mount cinder volume %s to %s with options %v", b.pdName, dir, mountOptions) err = b.mounter.Mount(globalPDPath, dir, "", options) if err != nil { - glog.V(4).Infof("Mount failed: %v", err) + klog.V(4).Infof("Mount failed: %v", err) notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notmnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notmnt { // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath()) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath()) return err } } os.Remove(dir) - glog.Errorf("Failed to mount %s: %v", dir, err) + klog.Errorf("Failed to mount %s: %v", dir, err) return err } if !b.readOnly { volume.SetVolumeOwnership(b, fsGroup) } - glog.V(3).Infof("Cinder volume %s mounted to %s", b.pdName, dir) + klog.V(3).Infof("Cinder volume %s mounted to %s", b.pdName, dir) return nil } @@ -421,18 +432,18 @@ func (c *cinderVolumeUnmounter) TearDownAt(dir string) error { if pathExists, pathErr := util.PathExists(dir); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) return nil } - glog.V(5).Infof("Cinder TearDown of %s", dir) + klog.V(5).Infof("Cinder TearDown of %s", dir) notmnt, err := c.mounter.IsLikelyNotMountPoint(dir) if err != nil { - glog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err) + klog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err) return err } if notmnt { - glog.V(4).Infof("Nothing is mounted to %s, ignoring", dir) + klog.V(4).Infof("Nothing is mounted to %s, ignoring", dir) return os.Remove(dir) } @@ -441,15 +452,15 @@ func (c *cinderVolumeUnmounter) TearDownAt(dir string) error { // NewMounter. We could then find volumeID there without probing MountRefs. refs, err := c.mounter.GetMountRefs(dir) if err != nil { - glog.V(4).Infof("GetMountRefs failed: %v", err) + klog.V(4).Infof("GetMountRefs failed: %v", err) return err } if len(refs) == 0 { - glog.V(4).Infof("Directory %s is not mounted", dir) + klog.V(4).Infof("Directory %s is not mounted", dir) return fmt.Errorf("directory %s is not mounted", dir) } c.pdName = path.Base(refs[0]) - glog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir) + klog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir) // lock the volume (and thus wait for any concurrrent SetUpAt to finish) c.plugin.volumeLocks.LockKey(c.pdName) @@ -458,23 +469,23 @@ func (c *cinderVolumeUnmounter) TearDownAt(dir string) error { // Reload list of references, there might be SetUpAt finished in the meantime refs, err = c.mounter.GetMountRefs(dir) if err != nil { - glog.V(4).Infof("GetMountRefs failed: %v", err) + klog.V(4).Infof("GetMountRefs failed: %v", err) return err } if err := c.mounter.Unmount(dir); err != nil { - glog.V(4).Infof("Unmount failed: %v", err) + klog.V(4).Infof("Unmount failed: %v", err) return err } - glog.V(3).Infof("Successfully unmounted: %s\n", dir) + klog.V(3).Infof("Successfully unmounted: %s\n", dir) notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if notmnt { if err := os.Remove(dir); err != nil { - glog.V(4).Infof("Failed to remove directory after unmount: %v", err) + klog.V(4).Infof("Failed to remove directory after unmount: %v", err) return err } } @@ -507,7 +518,7 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes()) } - volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c) + volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c, selectedNode, allowedTopologies) if err != nil { return nil, err } @@ -550,6 +561,21 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo pv.Spec.AccessModes = c.plugin.GetAccessModes() } + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + requirements := make([]v1.NodeSelectorRequirement, 0) + for k, v := range labels { + if v != "" { + requirements = append(requirements, v1.NodeSelectorRequirement{Key: k, Operator: v1.NodeSelectorOpIn, Values: []string{v}}) + } + } + if len(requirements) > 0 { + pv.Spec.NodeAffinity = new(v1.VolumeNodeAffinity) + pv.Spec.NodeAffinity.Required = new(v1.NodeSelector) + pv.Spec.NodeAffinity.Required.NodeSelectorTerms = make([]v1.NodeSelectorTerm, 1) + pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions = requirements + } + } + return pv, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_block.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_block.go index 02a5d7445007..90e2056e0496 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_block.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_block.go @@ -20,9 +20,9 @@ import ( "fmt" "path/filepath" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -44,7 +44,7 @@ func (plugin *cinderPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeNam if err != nil { return nil, err } - glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) + klog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) globalMapPath := filepath.Dir(globalMapPathUUID) if len(globalMapPath) <= 1 { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go index ab115f83add0..57c5254e80bb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go @@ -24,8 +24,8 @@ import ( "strings" "time" - "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -95,7 +95,7 @@ func (util *DiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath string) er os.Remove(globalPDPath) return err } - glog.V(2).Infof("Safe mount successful: %q\n", devicePath) + klog.V(2).Infof("Safe mount successful: %q\n", devicePath) } return nil } @@ -109,7 +109,7 @@ func (util *DiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error { if err := os.Remove(globalPDPath); err != nil { return err } - glog.V(2).Infof("Successfully unmounted main device: %s\n", globalPDPath) + klog.V(2).Infof("Successfully unmounted main device: %s\n", globalPDPath) cloud, err := cd.plugin.getCloudProvider() if err != nil { @@ -122,7 +122,7 @@ func (util *DiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error { if err = cloud.DetachDisk(instanceid, cd.pdName); err != nil { return err } - glog.V(2).Infof("Successfully detached cinder volume %s", cd.pdName) + klog.V(2).Infof("Successfully detached cinder volume %s", cd.pdName) return nil } @@ -136,10 +136,10 @@ func (util *DiskUtil) DeleteVolume(cd *cinderVolumeDeleter) error { if err = cloud.DeleteVolume(cd.pdName); err != nil { // OpenStack cloud provider returns volume.tryAgainError when necessary, // no handling needed here. - glog.V(2).Infof("Error deleting cinder volume %s: %v", cd.pdName, err) + klog.V(2).Infof("Error deleting cinder volume %s: %v", cd.pdName, err) return err } - glog.V(2).Infof("Successfully deleted cinder volume %s", cd.pdName) + klog.V(2).Infof("Successfully deleted cinder volume %s", cd.pdName) return nil } @@ -149,7 +149,7 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) { zones := make(sets.String) nodes, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { - glog.V(2).Infof("Error listing nodes") + klog.V(2).Infof("Error listing nodes") return zones, err } for _, node := range nodes.Items { @@ -157,12 +157,12 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) { zones.Insert(zone) } } - glog.V(4).Infof("zones found: %v", zones) + klog.V(4).Infof("zones found: %v", zones) return zones, nil } // CreateVolume uses the cloud provider entrypoint for creating a volume -func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) { +func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) { cloud, err := c.plugin.getCloudProvider() if err != nil { return "", 0, nil, "", err @@ -201,28 +201,36 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, // No zone specified, choose one randomly in the same region zones, err := getZonesFromNodes(c.plugin.host.GetKubeClient()) if err != nil { - glog.V(2).Infof("error getting zone information: %v", err) + klog.V(2).Infof("error getting zone information: %v", err) return "", 0, nil, "", err } // if we did not get any zones, lets leave it blank and gophercloud will // use zone "nova" as default if len(zones) > 0 { - availability = volutil.ChooseZoneForVolume(zones, c.options.PVC.Name) + availability, err = volutil.SelectZoneForVolume(false, false, "", nil, zones, node, allowedTopologies, c.options.PVC.Name) + if err != nil { + klog.V(2).Infof("error selecting zone for volume: %v", err) + return "", 0, nil, "", err + } } } volumeID, volumeAZ, volumeRegion, IgnoreVolumeAZ, err := cloud.CreateVolume(name, volSizeGiB, vtype, availability, c.options.CloudTags) if err != nil { - glog.V(2).Infof("Error creating cinder volume: %v", err) + klog.V(2).Infof("Error creating cinder volume: %v", err) return "", 0, nil, "", err } - glog.V(2).Infof("Successfully created cinder volume %s", volumeID) + klog.V(2).Infof("Successfully created cinder volume %s", volumeID) // these are needed that pod is spawning to same AZ volumeLabels = make(map[string]string) if IgnoreVolumeAZ == false { - volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ - volumeLabels[kubeletapis.LabelZoneRegion] = volumeRegion + if volumeAZ != "" { + volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ + } + if volumeRegion != "" { + volumeLabels[kubeletapis.LabelZoneRegion] = volumeRegion + } } return volumeID, volSizeGiB, volumeLabels, fstype, nil } @@ -240,17 +248,17 @@ func probeAttachedVolume() error { cmdSettle := executor.Command("udevadm", argsSettle...) _, errSettle := cmdSettle.CombinedOutput() if errSettle != nil { - glog.Errorf("error running udevadm settle %v\n", errSettle) + klog.Errorf("error running udevadm settle %v\n", errSettle) } args := []string{"trigger"} cmd := executor.Command("udevadm", args...) _, err := cmd.CombinedOutput() if err != nil { - glog.Errorf("error running udevadm trigger %v\n", err) + klog.Errorf("error running udevadm trigger %v\n", err) return err } - glog.V(4).Infof("Successfully probed all attachments") + klog.V(4).Infof("Successfully probed all attachments") return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD index a6b4e871f7d7..1047a1b090f2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -32,7 +32,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/volume:go_default_library", - "//pkg/volume/empty_dir:go_default_library", + "//pkg/volume/emptydir:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go index 897355af5c9e..823ce53d4b36 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go @@ -19,18 +19,18 @@ package configmap import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) -// ProbeVolumePlugin is the entry point for plugin detection in a package. +// ProbeVolumePlugins is the entry point for plugin detection in a package. func ProbeVolumePlugins() []volume.VolumePlugin { return []volume.VolumePlugin{&configMapPlugin{}} } @@ -180,7 +180,7 @@ func (b *configMapVolumeMounter) SetUp(fsGroup *int64) error { } func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { - glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir) + klog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir) // Wrap EmptyDir, let it do the setup. wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, *b.opts) @@ -192,7 +192,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { configMap, err := b.getConfigMap(b.pod.Namespace, b.source.Name) if err != nil { if !(errors.IsNotFound(err) && optional) { - glog.Errorf("Couldn't get configMap %v/%v: %v", b.pod.Namespace, b.source.Name, err) + klog.Errorf("Couldn't get configMap %v/%v: %v", b.pod.Namespace, b.source.Name, err) return err } configMap = &v1.ConfigMap{ @@ -203,15 +203,8 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } } - if err := wrapped.SetUpAt(dir, fsGroup); err != nil { - return err - } - if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil { - return err - } - totalBytes := totalBytes(configMap) - glog.V(3).Infof("Received configMap %v/%v containing (%v) pieces of data, %v total bytes", + klog.V(3).Infof("Received configMap %v/%v containing (%v) pieces of data, %v total bytes", b.pod.Namespace, b.source.Name, len(configMap.Data)+len(configMap.BinaryData), @@ -222,28 +215,52 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return err } + setupSuccess := false + if err := wrapped.SetUpAt(dir, fsGroup); err != nil { + return err + } + if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil { + return err + } + + defer func() { + // Clean up directories if setup fails + if !setupSuccess { + unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID) + if unmountCreateErr != nil { + klog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr) + return + } + tearDownErr := unmounter.TearDown() + if tearDownErr != nil { + klog.Errorf("Error tearing down volume %s with : %v", b.volName, tearDownErr) + } + } + }() + writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName) writer, err := volumeutil.NewAtomicWriter(dir, writerContext) if err != nil { - glog.Errorf("Error creating atomic writer: %v", err) + klog.Errorf("Error creating atomic writer: %v", err) return err } err = writer.Write(payload) if err != nil { - glog.Errorf("Error writing payload to dir: %v", err) + klog.Errorf("Error writing payload to dir: %v", err) return err } err = volume.SetVolumeOwnership(b, fsGroup) if err != nil { - glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) + klog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) return err } + setupSuccess = true return nil } -// Note: this function is exported so that it can be called from the projection volume driver +// MakePayload function is exported so that it can be called from the projection volume driver func MakePayload(mappings []v1.KeyToPath, configMap *v1.ConfigMap, defaultMode *int32, optional bool) (map[string]volumeutil.FileProjection, error) { if defaultMode == nil { return nil, fmt.Errorf("No defaultMode used, not even the default value for it") diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD index e57b9446bce7..3450ab307029 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD @@ -16,13 +16,15 @@ go_library( "//pkg/features:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/csi/nodeupdater:go_default_library", + "//pkg/volume/csi/csiv0:go_default_library", + "//pkg/volume/csi/nodeinfomanager:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", @@ -30,9 +32,9 @@ go_library( "//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions/csi/v1alpha1:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/listers/csi/v1alpha1:go_default_library", - "//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -44,6 +46,7 @@ go_test( "csi_client_test.go", "csi_mounter_test.go", "csi_plugin_test.go", + "main_test.go", ], embed = [":go_default_library"], deps = [ @@ -59,6 +62,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", @@ -68,8 +72,8 @@ go_test( "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake:go_default_library", - "//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -84,8 +88,9 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//pkg/volume/csi/csiv0:all-srcs", "//pkg/volume/csi/fake:all-srcs", - "//pkg/volume/csi/nodeupdater:all-srcs", + "//pkg/volume/csi/nodeinfomanager:all-srcs", ], tags = ["automanaged"], visibility = ["//visibility:public"], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go index 963fda0d09c3..d13a7310120d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go @@ -27,9 +27,8 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" - csipb "github.com/container-storage-interface/spec/lib/go/csi/v0" "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1beta1" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -60,23 +59,23 @@ var _ volume.DeviceMounter = &csiAttacher{} func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { if spec == nil { - glog.Error(log("attacher.Attach missing volume.Spec")) + klog.Error(log("attacher.Attach missing volume.Spec")) return "", errors.New("missing spec") } csiSource, err := getCSISourceFromSpec(spec) if err != nil { - glog.Error(log("attacher.Attach failed to get CSI persistent source: %v", err)) + klog.Error(log("attacher.Attach failed to get CSI persistent source: %v", err)) return "", err } skip, err := c.plugin.skipAttach(csiSource.Driver) if err != nil { - glog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err)) + klog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err)) return "", err } if skip { - glog.V(4).Infof(log("skipping attach for driver %s", csiSource.Driver)) + klog.V(4).Infof(log("skipping attach for driver %s", csiSource.Driver)) return "", nil } @@ -95,48 +94,50 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string PersistentVolumeName: &pvName, }, }, - Status: storage.VolumeAttachmentStatus{Attached: false}, } _, err = c.k8s.StorageV1beta1().VolumeAttachments().Create(attachment) alreadyExist := false if err != nil { if !apierrs.IsAlreadyExists(err) { - glog.Error(log("attacher.Attach failed: %v", err)) + klog.Error(log("attacher.Attach failed: %v", err)) return "", err } alreadyExist = true } if alreadyExist { - glog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attachID, csiSource.VolumeHandle)) + klog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attachID, csiSource.VolumeHandle)) } else { - glog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle)) + klog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle)) } if _, err := c.waitForVolumeAttachment(csiSource.VolumeHandle, attachID, csiTimeout); err != nil { return "", err } - glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment object [%s]", attachID)) + klog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment object [%s]", attachID)) + // TODO(71164): In 1.15, return empty devicePath return attachID, nil } -func (c *csiAttacher) WaitForAttach(spec *volume.Spec, attachID string, pod *v1.Pod, timeout time.Duration) (string, error) { +func (c *csiAttacher) WaitForAttach(spec *volume.Spec, _ string, pod *v1.Pod, timeout time.Duration) (string, error) { source, err := getCSISourceFromSpec(spec) if err != nil { - glog.Error(log("attacher.WaitForAttach failed to extract CSI volume source: %v", err)) + klog.Error(log("attacher.WaitForAttach failed to extract CSI volume source: %v", err)) return "", err } + attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(c.plugin.host.GetNodeName())) + skip, err := c.plugin.skipAttach(source.Driver) if err != nil { - glog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err)) + klog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err)) return "", err } if skip { - glog.V(4).Infof(log("Driver is not attachable, skip waiting for attach")) + klog.V(4).Infof(log("Driver is not attachable, skip waiting for attach")) return "", nil } @@ -144,7 +145,7 @@ func (c *csiAttacher) WaitForAttach(spec *volume.Spec, attachID string, pod *v1. } func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, timeout time.Duration) (string, error) { - glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID)) + klog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID)) timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable defer timer.Stop() @@ -153,27 +154,19 @@ func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, tim } func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) (string, error) { - glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) + klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{}) if err != nil { - glog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err)) + klog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err)) return "", fmt.Errorf("volume %v has GET error for volume attachment %v: %v", volumeHandle, attachID, err) } - // if being deleted, fail fast - if attach.GetDeletionTimestamp() != nil { - glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID)) - return "", errors.New("volume attachment is being deleted") + successful, err := verifyAttachmentStatus(attach, volumeHandle) + if err != nil { + return "", err } - // attachment OK - if attach.Status.Attached { + if successful { return attachID, nil } - // driver reports attach error - attachErr := attach.Status.AttachError - if attachErr != nil { - glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message)) - return "", errors.New(attachErr.Message) - } watcher, err := c.k8s.StorageV1beta1().VolumeAttachments().Watch(meta.SingleObject(meta.ObjectMeta{Name: attachID, ResourceVersion: attach.ResourceVersion})) if err != nil { @@ -187,31 +180,23 @@ func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID str select { case event, ok := <-ch: if !ok { - glog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID) + klog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID) return "", errors.New("volume attachment watch channel had been closed") } switch event.Type { case watch.Added, watch.Modified: attach, _ := event.Object.(*storage.VolumeAttachment) - // if being deleted, fail fast - if attach.GetDeletionTimestamp() != nil { - glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID)) - return "", errors.New("volume attachment is being deleted") + successful, err := verifyAttachmentStatus(attach, volumeHandle) + if err != nil { + return "", err } - // attachment OK - if attach.Status.Attached { + if successful { return attachID, nil } - // driver reports attach error - attachErr := attach.Status.AttachError - if attachErr != nil { - glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message)) - return "", errors.New(attachErr.Message) - } case watch.Deleted: // if deleted, fail fast - glog.Error(log("VolumeAttachment [%s] has been deleted, will not continue to wait for attachment", attachID)) + klog.Error(log("VolumeAttachment [%s] has been deleted, will not continue to wait for attachment", attachID)) return "", errors.New("volume attachment has been deleted") case watch.Error: @@ -220,30 +205,49 @@ func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID str } case <-timer.C: - glog.Error(log("attacher.WaitForAttach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID)) + klog.Error(log("attacher.WaitForAttach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID)) return "", fmt.Errorf("attachment timeout for volume %v", volumeHandle) } } } +func verifyAttachmentStatus(attachment *storage.VolumeAttachment, volumeHandle string) (bool, error) { + // if being deleted, fail fast + if attachment.GetDeletionTimestamp() != nil { + klog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachment.Name)) + return false, errors.New("volume attachment is being deleted") + } + // attachment OK + if attachment.Status.Attached { + return true, nil + } + // driver reports attach error + attachErr := attachment.Status.AttachError + if attachErr != nil { + klog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message)) + return false, errors.New(attachErr.Message) + } + return false, nil +} + func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { - glog.V(4).Info(log("probing attachment status for %d volume(s) ", len(specs))) + klog.V(4).Info(log("probing attachment status for %d volume(s) ", len(specs))) attached := make(map[*volume.Spec]bool) for _, spec := range specs { if spec == nil { - glog.Error(log("attacher.VolumesAreAttached missing volume.Spec")) + klog.Error(log("attacher.VolumesAreAttached missing volume.Spec")) return nil, errors.New("missing spec") } source, err := getCSISourceFromSpec(spec) if err != nil { - glog.Error(log("attacher.VolumesAreAttached failed: %v", err)) + klog.Error(log("attacher.VolumesAreAttached failed: %v", err)) continue } skip, err := c.plugin.skipAttach(source.Driver) if err != nil { - glog.Error(log("Failed to check CSIDriver for %s: %s", source.Driver, err)) + klog.Error(log("Failed to check CSIDriver for %s: %s", source.Driver, err)) } else { if skip { // This volume is not attachable, pretend it's attached @@ -253,14 +257,14 @@ func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.No } attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(nodeName)) - glog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID)) + klog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID)) attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{}) if err != nil { attached[spec] = false - glog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err)) + klog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err)) continue } - glog.V(4).Info(log("attacher.VolumesAreAttached attachment [%v] has status.attached=%t", attachID, attach.Status.Attached)) + klog.V(4).Info(log("attacher.VolumesAreAttached attachment [%v] has status.attached=%t", attachID, attach.Status.Attached)) attached[spec] = attach.Status.Attached } @@ -268,18 +272,18 @@ func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.No } func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) { - glog.V(4).Info(log("attacher.GetDeviceMountPath(%v)", spec)) + klog.V(4).Info(log("attacher.GetDeviceMountPath(%v)", spec)) deviceMountPath, err := makeDeviceMountPath(c.plugin, spec) if err != nil { - glog.Error(log("attacher.GetDeviceMountPath failed to make device mount path: %v", err)) + klog.Error(log("attacher.GetDeviceMountPath failed to make device mount path: %v", err)) return "", err } - glog.V(4).Infof("attacher.GetDeviceMountPath succeeded, deviceMountPath: %s", deviceMountPath) + klog.V(4).Infof("attacher.GetDeviceMountPath succeeded, deviceMountPath: %s", deviceMountPath) return deviceMountPath, nil } func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) (err error) { - glog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath)) + klog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath)) if deviceMountPath == "" { err = fmt.Errorf("attacher.MountDevice failed, deviceMountPath is empty") @@ -288,12 +292,12 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo mounted, err := isDirMounted(c.plugin, deviceMountPath) if err != nil { - glog.Error(log("attacher.MountDevice failed while checking mount status for dir [%s]", deviceMountPath)) + klog.Error(log("attacher.MountDevice failed while checking mount status for dir [%s]", deviceMountPath)) return err } if mounted { - glog.V(4).Info(log("attacher.MountDevice skipping mount, dir already mounted [%s]", deviceMountPath)) + klog.V(4).Info(log("attacher.MountDevice skipping mount, dir already mounted [%s]", deviceMountPath)) return nil } @@ -303,60 +307,64 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo } csiSource, err := getCSISourceFromSpec(spec) if err != nil { - glog.Error(log("attacher.MountDevice failed to get CSI persistent source: %v", err)) + klog.Error(log("attacher.MountDevice failed to get CSI persistent source: %v", err)) return err } // Store volume metadata for UnmountDevice. Keep it around even if the // driver does not support NodeStage, UnmountDevice still needs it. if err = os.MkdirAll(deviceMountPath, 0750); err != nil { - glog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err)) + klog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err)) return err } - glog.V(4).Info(log("created target path successfully [%s]", deviceMountPath)) + klog.V(4).Info(log("created target path successfully [%s]", deviceMountPath)) dataDir := filepath.Dir(deviceMountPath) data := map[string]string{ volDataKey.volHandle: csiSource.VolumeHandle, volDataKey.driverName: csiSource.Driver, } if err = saveVolumeData(dataDir, volDataFileName, data); err != nil { - glog.Error(log("failed to save volume info data: %v", err)) + klog.Error(log("failed to save volume info data: %v", err)) if cleanerr := os.RemoveAll(dataDir); err != nil { - glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanerr)) + klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanerr)) } return err } defer func() { if err != nil { // clean up metadata - glog.Errorf(log("attacher.MountDevice failed: %v", err)) + klog.Errorf(log("attacher.MountDevice failed: %v", err)) if err := removeMountDir(c.plugin, deviceMountPath); err != nil { - glog.Error(log("attacher.MountDevice failed to remove mount dir after errir [%s]: %v", deviceMountPath, err)) + klog.Error(log("attacher.MountDevice failed to remove mount dir after errir [%s]: %v", deviceMountPath, err)) } } }() if c.csiClient == nil { - c.csiClient = newCsiDriverClient(csiSource.Driver) + c.csiClient, err = newCsiDriverClient(csiDriverName(csiSource.Driver)) + if err != nil { + klog.Errorf(log("attacher.MountDevice failed to create newCsiDriverClient: %v", err)) + return err + } } csi := c.csiClient ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) defer cancel() // Check whether "STAGE_UNSTAGE_VOLUME" is set - stageUnstageSet, err := hasStageUnstageCapability(ctx, csi) + stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx) if err != nil { return err } if !stageUnstageSet { - glog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice...")) + klog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice...")) // defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there. return nil } // Start MountDevice nodeName := string(c.plugin.host.GetNodeName()) - publishVolumeInfo, err := c.plugin.getPublishVolumeInfo(c.k8s, csiSource.VolumeHandle, csiSource.Driver, nodeName) + publishContext, err := c.plugin.getPublishContext(c.k8s, csiSource.VolumeHandle, csiSource.Driver, nodeName) nodeStageSecrets := map[string]string{} if csiSource.NodeStageSecretRef != nil { @@ -377,7 +385,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo fsType := csiSource.FSType err = csi.NodeStageVolume(ctx, csiSource.VolumeHandle, - publishVolumeInfo, + publishContext, deviceMountPath, fsType, accessMode, @@ -388,7 +396,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo return err } - glog.V(4).Infof(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath)) + klog.V(4).Infof(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath)) return nil } @@ -399,12 +407,12 @@ var _ volume.DeviceUnmounter = &csiAttacher{} func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error { // volumeName in format driverNamevolumeHandle generated by plugin.GetVolumeName() if volumeName == "" { - glog.Error(log("detacher.Detach missing value for parameter volumeName")) + klog.Error(log("detacher.Detach missing value for parameter volumeName")) return errors.New("missing expected parameter volumeName") } parts := strings.Split(volumeName, volNameSep) if len(parts) != 2 { - glog.Error(log("detacher.Detach insufficient info encoded in volumeName")) + klog.Error(log("detacher.Detach insufficient info encoded in volumeName")) return errors.New("volumeName missing expected data") } @@ -414,19 +422,19 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error { if err := c.k8s.StorageV1beta1().VolumeAttachments().Delete(attachID, nil); err != nil { if apierrs.IsNotFound(err) { // object deleted or never existed, done - glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID)) + klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID)) return nil } - glog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err)) + klog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err)) return err } - glog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID)) + klog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID)) return c.waitForVolumeDetachment(volID, attachID) } func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) error { - glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID)) + klog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID)) timeout := c.waitSleepTime * 10 timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable @@ -436,21 +444,21 @@ func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) err } func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) error { - glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) + klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{}) if err != nil { if apierrs.IsNotFound(err) { //object deleted or never existed, done - glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle)) + klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle)) return nil } - glog.Error(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err)) + klog.Error(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err)) return err } // driver reports attach error detachErr := attach.Status.DetachError if detachErr != nil { - glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message)) + klog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message)) return errors.New(detachErr.Message) } @@ -465,7 +473,7 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str select { case event, ok := <-ch: if !ok { - glog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID) + klog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID) return errors.New("volume attachment watch channel had been closed") } @@ -475,12 +483,12 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str // driver reports attach error detachErr := attach.Status.DetachError if detachErr != nil { - glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message)) + klog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message)) return errors.New(detachErr.Message) } case watch.Deleted: //object deleted - glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] has been deleted", attachID, volumeHandle)) + klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] has been deleted", attachID, volumeHandle)) return nil case watch.Error: @@ -489,14 +497,14 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str } case <-timer.C: - glog.Error(log("detacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID)) + klog.Error(log("detacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID)) return fmt.Errorf("detachment timeout for volume %v", volumeHandle) } } } func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { - glog.V(4).Info(log("attacher.UnmountDevice(%s)", deviceMountPath)) + klog.V(4).Info(log("attacher.UnmountDevice(%s)", deviceMountPath)) // Setup var driverName, volID string @@ -506,31 +514,35 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { driverName = data[volDataKey.driverName] volID = data[volDataKey.volHandle] } else { - glog.Error(log("UnmountDevice failed to load volume data file [%s]: %v", dataDir, err)) + klog.Error(log("UnmountDevice failed to load volume data file [%s]: %v", dataDir, err)) // The volume might have been mounted by old CSI volume plugin. Fall back to the old behavior: read PV from API server driverName, volID, err = getDriverAndVolNameFromDeviceMountPath(c.k8s, deviceMountPath) if err != nil { - glog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err)) + klog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err)) return err } } if c.csiClient == nil { - c.csiClient = newCsiDriverClient(driverName) + c.csiClient, err = newCsiDriverClient(csiDriverName(driverName)) + if err != nil { + klog.Errorf(log("attacher.UnmountDevice failed to create newCsiDriverClient: %v", err)) + return err + } } csi := c.csiClient ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) defer cancel() // Check whether "STAGE_UNSTAGE_VOLUME" is set - stageUnstageSet, err := hasStageUnstageCapability(ctx, csi) + stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx) if err != nil { - glog.Errorf(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err)) + klog.Errorf(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err)) return err } if !stageUnstageSet { - glog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice...")) + klog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice...")) // Just delete the global directory + json file if err := removeMountDir(c.plugin, deviceMountPath); err != nil { return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err) @@ -545,7 +557,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { deviceMountPath) if err != nil { - glog.Errorf(log("attacher.UnmountDevice failed: %v", err)) + klog.Errorf(log("attacher.UnmountDevice failed: %v", err)) return err } @@ -554,28 +566,10 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err) } - glog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeStageVolume [%s]", deviceMountPath)) + klog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeStageVolume [%s]", deviceMountPath)) return nil } -func hasStageUnstageCapability(ctx context.Context, csi csiClient) (bool, error) { - capabilities, err := csi.NodeGetCapabilities(ctx) - if err != nil { - return false, err - } - - stageUnstageSet := false - if capabilities == nil { - return false, nil - } - for _, capability := range capabilities { - if capability.GetRpc().GetType() == csipb.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME { - stageUnstageSet = true - } - } - return stageUnstageSet, nil -} - // getAttachmentName returns csi- func getAttachmentName(volName, csiDriverName, nodeName string) string { result := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", volName, csiDriverName, nodeName))) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go index 6be3ec249475..3a80352c580c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_block.go @@ -21,22 +21,26 @@ import ( "errors" "fmt" "os" + "path" "path/filepath" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1beta1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" + kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" + ioutil "k8s.io/kubernetes/pkg/volume/util" ) type csiBlockMapper struct { k8s kubernetes.Interface csiClient csiClient plugin *csiPlugin - driverName string + driverName csiDriverName specName string volumeID string readOnly bool @@ -47,80 +51,61 @@ type csiBlockMapper struct { var _ volume.BlockVolumeMapper = &csiBlockMapper{} -// GetGlobalMapPath returns a path (on the node) to a device file which will be symlinked to -// Example: plugins/kubernetes.io/csi/volumeDevices/{volumeID}/dev +// GetGlobalMapPath returns a global map path (on the node) to a device file which will be symlinked to +// Example: plugins/kubernetes.io/csi/volumeDevices/{pvname}/dev func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) { dir := getVolumeDevicePluginDir(spec.Name(), m.plugin.host) - glog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir)) + klog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir)) return dir, nil } +// getStagingPath returns a staging path for a directory (on the node) that should be used on NodeStageVolume/NodeUnstageVolume +// Example: plugins/kubernetes.io/csi/volumeDevices/staging/{pvname} +func (m *csiBlockMapper) getStagingPath() string { + sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(m.specName) + return path.Join(m.plugin.host.GetVolumeDevicePluginDir(csiPluginName), "staging", sanitizedSpecVolID) +} + +// getPublishPath returns a publish path for a file (on the node) that should be used on NodePublishVolume/NodeUnpublishVolume +// Example: plugins/kubernetes.io/csi/volumeDevices/publish/{pvname} +func (m *csiBlockMapper) getPublishPath() string { + sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(m.specName) + return path.Join(m.plugin.host.GetVolumeDevicePluginDir(csiPluginName), "publish", sanitizedSpecVolID) +} + // GetPodDeviceMapPath returns pod's device file which will be mapped to a volume -// returns: pods/{podUid}/volumeDevices/kubernetes.io~csi/{volumeID}/dev, {volumeID} +// returns: pods/{podUid}/volumeDevices/kubernetes.io~csi, {pvname} func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) { - path := filepath.Join(m.plugin.host.GetPodVolumeDeviceDir(m.podUID, csiPluginName), m.specName, "dev") + path := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, kstrings.EscapeQualifiedNameForDisk(csiPluginName)) specName := m.specName - glog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, specName)) + klog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, specName)) return path, specName } -// SetUpDevice ensures the device is attached returns path where the device is located. -func (m *csiBlockMapper) SetUpDevice() (string, error) { - if !m.plugin.blockEnabled { - return "", errors.New("CSIBlockVolume feature not enabled") - } - - glog.V(4).Infof(log("blockMapper.SetupDevice called")) - - if m.spec == nil { - glog.Error(log("blockMapper.Map spec is nil")) - return "", fmt.Errorf("spec is nil") - } - csiSource, err := getCSISourceFromSpec(m.spec) - if err != nil { - glog.Error(log("blockMapper.SetupDevice failed to get CSI persistent source: %v", err)) - return "", err - } - - globalMapPath, err := m.GetGlobalMapPath(m.spec) - if err != nil { - glog.Error(log("blockMapper.SetupDevice failed to get global map path: %v", err)) - return "", err - } - - globalMapPathBlockFile := filepath.Join(globalMapPath, "file") - glog.V(4).Infof(log("blockMapper.SetupDevice global device map path file set [%s]", globalMapPathBlockFile)) +// stageVolumeForBlock stages a block volume to stagingPath +func (m *csiBlockMapper) stageVolumeForBlock( + ctx context.Context, + csi csiClient, + accessMode v1.PersistentVolumeAccessMode, + csiSource *v1.CSIPersistentVolumeSource, + attachment *storage.VolumeAttachment, +) (string, error) { + klog.V(4).Infof(log("blockMapper.stageVolumeForBlock called")) - csi := m.csiClient - ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) - defer cancel() + stagingPath := m.getStagingPath() + klog.V(4).Infof(log("blockMapper.stageVolumeForBlock stagingPath set [%s]", stagingPath)) // Check whether "STAGE_UNSTAGE_VOLUME" is set - stageUnstageSet, err := hasStageUnstageCapability(ctx, csi) + stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx) if err != nil { - glog.Error(log("blockMapper.SetupDevice failed to check STAGE_UNSTAGE_VOLUME capability: %v", err)) + klog.Error(log("blockMapper.stageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err)) return "", err } if !stageUnstageSet { - glog.Infof(log("blockMapper.SetupDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice...")) + klog.Infof(log("blockMapper.stageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice...")) return "", nil } - // Start MountDevice - nodeName := string(m.plugin.host.GetNodeName()) - attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName) - - // search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName - attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{}) - if err != nil { - glog.Error(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err)) - return "", err - } - - if attachment == nil { - glog.Error(log("blockMapper.SetupDevice unable to find VolumeAttachment [id=%s]", attachID)) - return "", errors.New("no existing VolumeAttachment found") - } publishVolumeInfo := attachment.Status.AttachmentMetadata nodeStageSecrets := map[string]string{} @@ -132,193 +117,244 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) { } } - // setup path globalMapPath and block file before call to NodeStageVolume - if err := os.MkdirAll(globalMapPath, 0750); err != nil { - glog.Error(log("blockMapper.SetupDevice failed to create dir %s: %v", globalMapPath, err)) - return "", err - } - glog.V(4).Info(log("blockMapper.SetupDevice created global device map path successfully [%s]", globalMapPath)) - - // create block device file - blockFile, err := os.OpenFile(globalMapPathBlockFile, os.O_CREATE|os.O_RDWR, 0750) - if err != nil { - glog.Error(log("blockMapper.SetupDevice failed to create dir %s: %v", globalMapPathBlockFile, err)) - return "", err - } - if err := blockFile.Close(); err != nil { - glog.Error(log("blockMapper.SetupDevice failed to close file %s: %v", globalMapPathBlockFile, err)) + // Creating a stagingPath directory before call to NodeStageVolume + if err := os.MkdirAll(stagingPath, 0750); err != nil { + klog.Error(log("blockMapper.stageVolumeForBlock failed to create dir %s: %v", stagingPath, err)) return "", err } - glog.V(4).Info(log("blockMapper.SetupDevice created global map path block device file successfully [%s]", globalMapPathBlockFile)) - - //TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI - accessMode := v1.ReadWriteOnce - if m.spec.PersistentVolume.Spec.AccessModes != nil { - accessMode = m.spec.PersistentVolume.Spec.AccessModes[0] - } + klog.V(4).Info(log("blockMapper.stageVolumeForBlock created stagingPath directory successfully [%s]", stagingPath)) + // Request to stage a block volume to stagingPath. + // Expected implementation for driver is creating driver specific resource on stagingPath and + // attaching the block volume to the node. err = csi.NodeStageVolume(ctx, csiSource.VolumeHandle, publishVolumeInfo, - globalMapPathBlockFile, + stagingPath, fsTypeBlockName, accessMode, nodeStageSecrets, csiSource.VolumeAttributes) if err != nil { - glog.Error(log("blockMapper.SetupDevice failed: %v", err)) - if err := os.RemoveAll(globalMapPath); err != nil { - glog.Error(log("blockMapper.SetupDevice failed to remove dir after a NodeStageVolume() error [%s]: %v", globalMapPath, err)) - } + klog.Error(log("blockMapper.stageVolumeForBlock failed: %v", err)) return "", err } - glog.V(4).Infof(log("blockMapper.SetupDevice successfully requested NodeStageVolume [%s]", globalMapPathBlockFile)) - return globalMapPathBlockFile, nil + klog.V(4).Infof(log("blockMapper.stageVolumeForBlock successfully requested NodeStageVolume [%s]", stagingPath)) + return stagingPath, nil } -func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error { - if !m.plugin.blockEnabled { - return errors.New("CSIBlockVolume feature not enabled") - } +// publishVolumeForBlock publishes a block volume to publishPath +func (m *csiBlockMapper) publishVolumeForBlock( + ctx context.Context, + csi csiClient, + accessMode v1.PersistentVolumeAccessMode, + csiSource *v1.CSIPersistentVolumeSource, + attachment *storage.VolumeAttachment, + stagingPath string, +) (string, error) { + klog.V(4).Infof(log("blockMapper.publishVolumeForBlock called")) - glog.V(4).Infof(log("blockMapper.MapDevice mapping block device %s", devicePath)) + publishVolumeInfo := attachment.Status.AttachmentMetadata - if m.spec == nil { - glog.Error(log("blockMapper.MapDevice spec is nil")) - return fmt.Errorf("spec is nil") + nodePublishSecrets := map[string]string{} + var err error + if csiSource.NodePublishSecretRef != nil { + nodePublishSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodePublishSecretRef) + if err != nil { + klog.Errorf("blockMapper.publishVolumeForBlock failed to get NodePublishSecretRef %s/%s: %v", + csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err) + return "", err + } } - csiSource, err := getCSISourceFromSpec(m.spec) + publishPath := m.getPublishPath() + // Setup a parent directory for publishPath before call to NodePublishVolume + publishDir := filepath.Dir(publishPath) + if err := os.MkdirAll(publishDir, 0750); err != nil { + klog.Error(log("blockMapper.publishVolumeForBlock failed to create dir %s: %v", publishDir, err)) + return "", err + } + klog.V(4).Info(log("blockMapper.publishVolumeForBlock created directory for publishPath successfully [%s]", publishDir)) + + // Request to publish a block volume to publishPath. + // Expectation for driver is to place a block volume on the publishPath, by bind-mounting the device file on the publishPath or + // creating device file on the publishPath. + // Parent directory for publishPath is created by k8s, but driver is responsible for creating publishPath itself. + // If driver doesn't implement NodeStageVolume, attaching the block volume to the node may be done, here. + err = csi.NodePublishVolume( + ctx, + m.volumeID, + m.readOnly, + stagingPath, + publishPath, + accessMode, + publishVolumeInfo, + csiSource.VolumeAttributes, + nodePublishSecrets, + fsTypeBlockName, + []string{}, + ) + if err != nil { - glog.Error(log("blockMapper.MapDevice failed to get CSI persistent source: %v", err)) - return err + klog.Errorf(log("blockMapper.publishVolumeForBlock failed: %v", err)) + return "", err } - csi := m.csiClient - ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) - defer cancel() + return publishPath, nil +} - globalMapPathBlockFile := devicePath - dir, _ := m.GetPodDeviceMapPath() - targetBlockFilePath := filepath.Join(dir, "file") - glog.V(4).Infof(log("blockMapper.MapDevice target volume map file path %s", targetBlockFilePath)) +// SetUpDevice ensures the device is attached returns path where the device is located. +func (m *csiBlockMapper) SetUpDevice() (string, error) { + if !m.plugin.blockEnabled { + return "", errors.New("CSIBlockVolume feature not enabled") + } + klog.V(4).Infof(log("blockMapper.SetUpDevice called")) - stageCapable, err := hasStageUnstageCapability(ctx, csi) - if err != nil { - glog.Error(log("blockMapper.MapDevice failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err)) - return err + // Get csiSource from spec + if m.spec == nil { + klog.Error(log("blockMapper.SetUpDevice spec is nil")) + return "", fmt.Errorf("spec is nil") } - if !stageCapable { - globalMapPathBlockFile = "" + csiSource, err := getCSISourceFromSpec(m.spec) + if err != nil { + klog.Error(log("blockMapper.SetUpDevice failed to get CSI persistent source: %v", err)) + return "", err } + // Search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName nodeName := string(m.plugin.host.GetNodeName()) attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName) - - // search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{}) if err != nil { - glog.Error(log("blockMapper.MapDevice failed to get volume attachment [id=%v]: %v", attachID, err)) - return err + klog.Error(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err)) + return "", err } if attachment == nil { - glog.Error(log("blockMapper.MapDevice unable to find VolumeAttachment [id=%s]", attachID)) - return errors.New("no existing VolumeAttachment found") + klog.Error(log("blockMapper.SetupDevice unable to find VolumeAttachment [id=%s]", attachID)) + return "", errors.New("no existing VolumeAttachment found") } - publishVolumeInfo := attachment.Status.AttachmentMetadata - nodePublishSecrets := map[string]string{} - if csiSource.NodePublishSecretRef != nil { - nodePublishSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodePublishSecretRef) - if err != nil { - glog.Errorf("blockMapper.MapDevice failed to get NodePublishSecretRef %s/%s: %v", - csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err) - return err - } + //TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI + accessMode := v1.ReadWriteOnce + if m.spec.PersistentVolume.Spec.AccessModes != nil { + accessMode = m.spec.PersistentVolume.Spec.AccessModes[0] } - if err := os.MkdirAll(dir, 0750); err != nil { - glog.Error(log("blockMapper.MapDevice failed to create dir %s: %v", dir, err)) - return err + ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) + defer cancel() + + // Call NodeStageVolume + stagingPath, err := m.stageVolumeForBlock(ctx, m.csiClient, accessMode, csiSource, attachment) + if err != nil { + return "", err } - glog.V(4).Info(log("blockMapper.MapDevice created target volume map path successfully [%s]", dir)) - // create target map volume block file - targetBlockFile, err := os.OpenFile(targetBlockFilePath, os.O_CREATE|os.O_RDWR, 0750) + // Call NodePublishVolume + publishPath, err := m.publishVolumeForBlock(ctx, m.csiClient, accessMode, csiSource, attachment, stagingPath) if err != nil { - glog.Error(log("blockMapper.MapDevice failed to create file %s: %v", targetBlockFilePath, err)) - return err + return "", err } - if err := targetBlockFile.Close(); err != nil { - glog.Error(log("blockMapper.MapDevice failed to close file %s: %v", targetBlockFilePath, err)) + + return publishPath, nil +} + +func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error { + return ioutil.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID) +} + +var _ volume.BlockVolumeUnmapper = &csiBlockMapper{} + +// unpublishVolumeForBlock unpublishes a block volume from publishPath +func (m *csiBlockMapper) unpublishVolumeForBlock(ctx context.Context, csi csiClient, publishPath string) error { + // Request to unpublish a block volume from publishPath. + // Expectation for driver is to remove block volume from the publishPath, by unmounting bind-mounted device file + // or deleting device file. + // Driver is responsible for deleting publishPath itself. + // If driver doesn't implement NodeUnstageVolume, detaching the block volume from the node may be done, here. + if err := csi.NodeUnpublishVolume(ctx, m.volumeID, publishPath); err != nil { + klog.Error(log("blockMapper.unpublishVolumeForBlock failed: %v", err)) return err } - glog.V(4).Info(log("blockMapper.MapDevice created target volume map file successfully [%s]", targetBlockFilePath)) + klog.V(4).Infof(log("blockMapper.unpublishVolumeForBlock NodeUnpublished successfully [%s]", publishPath)) - //TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI - accessMode := v1.ReadWriteOnce - if m.spec.PersistentVolume.Spec.AccessModes != nil { - accessMode = m.spec.PersistentVolume.Spec.AccessModes[0] + return nil +} + +// unstageVolumeForBlock unstages a block volume from stagingPath +func (m *csiBlockMapper) unstageVolumeForBlock(ctx context.Context, csi csiClient, stagingPath string) error { + // Check whether "STAGE_UNSTAGE_VOLUME" is set + stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx) + if err != nil { + klog.Error(log("blockMapper.unstageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err)) + return err + } + if !stageUnstageSet { + klog.Infof(log("blockMapper.unstageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping unstageVolumeForBlock ...")) + return nil } - err = csi.NodePublishVolume( - ctx, - m.volumeID, - m.readOnly, - globalMapPathBlockFile, - targetBlockFilePath, - accessMode, - publishVolumeInfo, - csiSource.VolumeAttributes, - nodePublishSecrets, - fsTypeBlockName, - ) + // Request to unstage a block volume from stagingPath. + // Expected implementation for driver is removing driver specific resource in stagingPath and + // detaching the block volume from the node. + if err := csi.NodeUnstageVolume(ctx, m.volumeID, stagingPath); err != nil { + klog.Errorf(log("blockMapper.unstageVolumeForBlock failed: %v", err)) + return err + } + klog.V(4).Infof(log("blockMapper.unstageVolumeForBlock NodeUnstageVolume successfully [%s]", stagingPath)) - if err != nil { - glog.Errorf(log("blockMapper.MapDevice failed: %v", err)) - if err := os.RemoveAll(dir); err != nil { - glog.Error(log("blockMapper.MapDevice failed to remove mapped dir after a NodePublish() error [%s]: %v", dir, err)) - } + // Remove stagingPath directory and its contents + if err := os.RemoveAll(stagingPath); err != nil { + klog.Error(log("blockMapper.unstageVolumeForBlock failed to remove staging path after NodeUnstageVolume() error [%s]: %v", stagingPath, err)) return err } return nil } -var _ volume.BlockVolumeUnmapper = &csiBlockMapper{} - // TearDownDevice removes traces of the SetUpDevice. func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error { if !m.plugin.blockEnabled { return errors.New("CSIBlockVolume feature not enabled") } - glog.V(4).Infof(log("unmapper.TearDownDevice(globalMapPath=%s; devicePath=%s)", globalMapPath, devicePath)) + klog.V(4).Infof(log("unmapper.TearDownDevice(globalMapPath=%s; devicePath=%s)", globalMapPath, devicePath)) - csi := m.csiClient ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) defer cancel() - // unmap global device map path - if err := csi.NodeUnstageVolume(ctx, m.volumeID, globalMapPath); err != nil { - glog.Errorf(log("blockMapper.TearDownDevice failed: %v", err)) - return err + // Call NodeUnpublishVolume + publishPath := m.getPublishPath() + if _, err := os.Stat(publishPath); err != nil { + if os.IsNotExist(err) { + klog.V(4).Infof(log("blockMapper.TearDownDevice publishPath(%s) has already been deleted, skip calling NodeUnpublishVolume", publishPath)) + } else { + return err + } + } else { + err := m.unpublishVolumeForBlock(ctx, m.csiClient, publishPath) + if err != nil { + return err + } } - glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnstageVolume successfully [%s]", globalMapPath)) - // request to remove pod volume map path also - podVolumePath, volumeName := m.GetPodDeviceMapPath() - podVolumeMapPath := filepath.Join(podVolumePath, volumeName) - if err := csi.NodeUnpublishVolume(ctx, m.volumeID, podVolumeMapPath); err != nil { - glog.Error(log("blockMapper.TearDownDevice failed: %v", err)) - return err + // Call NodeUnstageVolume + stagingPath := m.getStagingPath() + if _, err := os.Stat(stagingPath); err != nil { + if os.IsNotExist(err) { + klog.V(4).Infof(log("blockMapper.TearDownDevice stagingPath(%s) has already been deleted, skip calling NodeUnstageVolume", stagingPath)) + } else { + return err + } + } else { + err := m.unstageVolumeForBlock(ctx, m.csiClient, stagingPath) + if err != nil { + return err + } } - glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnpublished successfully [%s]", podVolumeMapPath)) - return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go index 4a4cb4176bf6..bbe30134629b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go @@ -20,22 +20,25 @@ import ( "context" "errors" "fmt" + "io" "net" "time" - csipb "github.com/container-storage-interface/spec/lib/go/csi/v0" - "github.com/golang/glog" + csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc" api "k8s.io/api/core/v1" + utilversion "k8s.io/apimachinery/pkg/util/version" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" + csipbv0 "k8s.io/kubernetes/pkg/volume/csi/csiv0" ) type csiClient interface { NodeGetInfo(ctx context.Context) ( nodeID string, maxVolumePerNode int64, - accessibleTopology *csipb.Topology, + accessibleTopology map[string]string, err error) NodePublishVolume( ctx context.Context, @@ -44,10 +47,11 @@ type csiClient interface { stagingTargetPath string, targetPath string, accessMode api.PersistentVolumeAccessMode, - volumeInfo map[string]string, - volumeAttribs map[string]string, - nodePublishSecrets map[string]string, + publishContext map[string]string, + volumeContext map[string]string, + secrets map[string]string, fsType string, + mountOptions []string, ) error NodeUnpublishVolume( ctx context.Context, @@ -60,42 +64,176 @@ type csiClient interface { stagingTargetPath string, fsType string, accessMode api.PersistentVolumeAccessMode, - nodeStageSecrets map[string]string, - volumeAttribs map[string]string, + secrets map[string]string, + volumeContext map[string]string, ) error NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error - NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) + NodeSupportsStageUnstage(ctx context.Context) (bool, error) } +// Strongly typed address +type csiAddr string + +// Strongly typed driver name +type csiDriverName string + // csiClient encapsulates all csi-plugin methods type csiDriverClient struct { - driverName string - nodeClient csipb.NodeClient + driverName csiDriverName + addr csiAddr + nodeV1ClientCreator nodeV1ClientCreator + nodeV0ClientCreator nodeV0ClientCreator } var _ csiClient = &csiDriverClient{} -func newCsiDriverClient(driverName string) *csiDriverClient { - c := &csiDriverClient{driverName: driverName} - return c +type nodeV1ClientCreator func(addr csiAddr) ( + nodeClient csipbv1.NodeClient, + closer io.Closer, + err error, +) + +type nodeV0ClientCreator func(addr csiAddr) ( + nodeClient csipbv0.NodeClient, + closer io.Closer, + err error, +) + +// newV1NodeClient creates a new NodeClient with the internally used gRPC +// connection set up. It also returns a closer which must to be called to close +// the gRPC connection when the NodeClient is not used anymore. +// This is the default implementation for the nodeV1ClientCreator, used in +// newCsiDriverClient. +func newV1NodeClient(addr csiAddr) (nodeClient csipbv1.NodeClient, closer io.Closer, err error) { + var conn *grpc.ClientConn + conn, err = newGrpcConn(addr) + if err != nil { + return nil, nil, err + } + + nodeClient = csipbv1.NewNodeClient(conn) + return nodeClient, conn, nil +} + +// newV0NodeClient creates a new NodeClient with the internally used gRPC +// connection set up. It also returns a closer which must to be called to close +// the gRPC connection when the NodeClient is not used anymore. +// This is the default implementation for the nodeV1ClientCreator, used in +// newCsiDriverClient. +func newV0NodeClient(addr csiAddr) (nodeClient csipbv0.NodeClient, closer io.Closer, err error) { + var conn *grpc.ClientConn + conn, err = newGrpcConn(addr) + if err != nil { + return nil, nil, err + } + + nodeClient = csipbv0.NewNodeClient(conn) + return nodeClient, conn, nil +} + +func newCsiDriverClient(driverName csiDriverName) (*csiDriverClient, error) { + if driverName == "" { + return nil, fmt.Errorf("driver name is empty") + } + + addr := fmt.Sprintf(csiAddrTemplate, driverName) + requiresV0Client := true + if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPluginsWatcher) { + var existingDriver csiDriver + driverExists := false + func() { + csiDrivers.RLock() + defer csiDrivers.RUnlock() + existingDriver, driverExists = csiDrivers.driversMap[string(driverName)] + }() + + if !driverExists { + return nil, fmt.Errorf("driver name %s not found in the list of registered CSI drivers", driverName) + } + + addr = existingDriver.driverEndpoint + requiresV0Client = versionRequiresV0Client(existingDriver.highestSupportedVersion) + } + + nodeV1ClientCreator := newV1NodeClient + nodeV0ClientCreator := newV0NodeClient + if requiresV0Client { + nodeV1ClientCreator = nil + } else { + nodeV0ClientCreator = nil + } + + return &csiDriverClient{ + driverName: driverName, + addr: csiAddr(addr), + nodeV1ClientCreator: nodeV1ClientCreator, + nodeV0ClientCreator: nodeV0ClientCreator, + }, nil } func (c *csiDriverClient) NodeGetInfo(ctx context.Context) ( nodeID string, maxVolumePerNode int64, - accessibleTopology *csipb.Topology, + accessibleTopology map[string]string, + err error) { + klog.V(4).Info(log("calling NodeGetInfo rpc")) + if c.nodeV1ClientCreator != nil { + return c.nodeGetInfoV1(ctx) + } else if c.nodeV0ClientCreator != nil { + return c.nodeGetInfoV0(ctx) + } + + err = fmt.Errorf("failed to call NodeGetInfo. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") + + return nodeID, maxVolumePerNode, accessibleTopology, err +} + +func (c *csiDriverClient) nodeGetInfoV1(ctx context.Context) ( + nodeID string, + maxVolumePerNode int64, + accessibleTopology map[string]string, + err error) { + + nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) + if err != nil { + return "", 0, nil, err + } + defer closer.Close() + + res, err := nodeClient.NodeGetInfo(ctx, &csipbv1.NodeGetInfoRequest{}) + if err != nil { + return "", 0, nil, err + } + + topology := res.GetAccessibleTopology() + if topology != nil { + accessibleTopology = topology.Segments + } + return res.GetNodeId(), res.GetMaxVolumesPerNode(), accessibleTopology, nil +} + +func (c *csiDriverClient) nodeGetInfoV0(ctx context.Context) ( + nodeID string, + maxVolumePerNode int64, + accessibleTopology map[string]string, err error) { - glog.V(4).Info(log("calling NodeGetInfo rpc")) - conn, err := newGrpcConn(c.driverName) + nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) if err != nil { return "", 0, nil, err } - defer conn.Close() - nodeClient := csipb.NewNodeClient(conn) + defer closer.Close() - res, err := nodeClient.NodeGetInfo(ctx, &csipb.NodeGetInfoRequest{}) - return res.GetNodeId(), res.GetMaxVolumesPerNode(), res.GetAccessibleTopology(), nil + res, err := nodeClient.NodeGetInfo(ctx, &csipbv0.NodeGetInfoRequest{}) + if err != nil { + return "", 0, nil, err + } + + topology := res.GetAccessibleTopology() + if topology != nil { + accessibleTopology = topology.Segments + } + return res.GetNodeId(), res.GetMaxVolumesPerNode(), accessibleTopology, nil } func (c *csiDriverClient) NodePublishVolume( @@ -105,36 +243,135 @@ func (c *csiDriverClient) NodePublishVolume( stagingTargetPath string, targetPath string, accessMode api.PersistentVolumeAccessMode, - volumeInfo map[string]string, - volumeAttribs map[string]string, - nodePublishSecrets map[string]string, + publishContext map[string]string, + volumeContext map[string]string, + secrets map[string]string, fsType string, + mountOptions []string, ) error { - glog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath)) + klog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath)) if volID == "" { return errors.New("missing volume id") } if targetPath == "" { return errors.New("missing target path") } + if c.nodeV1ClientCreator != nil { + return c.nodePublishVolumeV1( + ctx, + volID, + readOnly, + stagingTargetPath, + targetPath, + accessMode, + publishContext, + volumeContext, + secrets, + fsType, + mountOptions, + ) + } else if c.nodeV0ClientCreator != nil { + return c.nodePublishVolumeV0( + ctx, + volID, + readOnly, + stagingTargetPath, + targetPath, + accessMode, + publishContext, + volumeContext, + secrets, + fsType, + mountOptions, + ) + } + + return fmt.Errorf("failed to call NodePublishVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") + +} + +func (c *csiDriverClient) nodePublishVolumeV1( + ctx context.Context, + volID string, + readOnly bool, + stagingTargetPath string, + targetPath string, + accessMode api.PersistentVolumeAccessMode, + publishContext map[string]string, + volumeContext map[string]string, + secrets map[string]string, + fsType string, + mountOptions []string, +) error { + nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) + if err != nil { + return err + } + defer closer.Close() + + req := &csipbv1.NodePublishVolumeRequest{ + VolumeId: volID, + TargetPath: targetPath, + Readonly: readOnly, + PublishContext: publishContext, + VolumeContext: volumeContext, + Secrets: secrets, + VolumeCapability: &csipbv1.VolumeCapability{ + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: asCSIAccessModeV1(accessMode), + }, + }, + } + if stagingTargetPath != "" { + req.StagingTargetPath = stagingTargetPath + } + + if fsType == fsTypeBlockName { + req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Block{ + Block: &csipbv1.VolumeCapability_BlockVolume{}, + } + } else { + req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{ + Mount: &csipbv1.VolumeCapability_MountVolume{ + FsType: fsType, + MountFlags: mountOptions, + }, + } + } + + _, err = nodeClient.NodePublishVolume(ctx, req) + return err +} - conn, err := newGrpcConn(c.driverName) +func (c *csiDriverClient) nodePublishVolumeV0( + ctx context.Context, + volID string, + readOnly bool, + stagingTargetPath string, + targetPath string, + accessMode api.PersistentVolumeAccessMode, + publishContext map[string]string, + volumeContext map[string]string, + secrets map[string]string, + fsType string, + mountOptions []string, +) error { + nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) if err != nil { return err } - defer conn.Close() - nodeClient := csipb.NewNodeClient(conn) + defer closer.Close() - req := &csipb.NodePublishVolumeRequest{ + req := &csipbv0.NodePublishVolumeRequest{ VolumeId: volID, TargetPath: targetPath, Readonly: readOnly, - PublishInfo: volumeInfo, - VolumeAttributes: volumeAttribs, - NodePublishSecrets: nodePublishSecrets, - VolumeCapability: &csipb.VolumeCapability{ - AccessMode: &csipb.VolumeCapability_AccessMode{ - Mode: asCSIAccessMode(accessMode), + PublishInfo: publishContext, + VolumeAttributes: volumeContext, + NodePublishSecrets: secrets, + VolumeCapability: &csipbv0.VolumeCapability{ + AccessMode: &csipbv0.VolumeCapability_AccessMode{ + Mode: asCSIAccessModeV0(accessMode), }, }, } @@ -143,13 +380,14 @@ func (c *csiDriverClient) NodePublishVolume( } if fsType == fsTypeBlockName { - req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{ - Block: &csipb.VolumeCapability_BlockVolume{}, + req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Block{ + Block: &csipbv0.VolumeCapability_BlockVolume{}, } } else { - req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{ - Mount: &csipb.VolumeCapability_MountVolume{ - FsType: fsType, + req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Mount{ + Mount: &csipbv0.VolumeCapability_MountVolume{ + FsType: fsType, + MountFlags: mountOptions, }, } } @@ -159,7 +397,7 @@ func (c *csiDriverClient) NodePublishVolume( } func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error { - glog.V(4).Info(log("calling NodeUnpublishVolume rpc: [volid=%s, target_path=%s", volID, targetPath)) + klog.V(4).Info(log("calling NodeUnpublishVolume rpc: [volid=%s, target_path=%s", volID, targetPath)) if volID == "" { return errors.New("missing volume id") } @@ -167,14 +405,39 @@ func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, return errors.New("missing target path") } - conn, err := newGrpcConn(c.driverName) + if c.nodeV1ClientCreator != nil { + return c.nodeUnpublishVolumeV1(ctx, volID, targetPath) + } else if c.nodeV0ClientCreator != nil { + return c.nodeUnpublishVolumeV0(ctx, volID, targetPath) + } + + return fmt.Errorf("failed to call NodeUnpublishVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") +} + +func (c *csiDriverClient) nodeUnpublishVolumeV1(ctx context.Context, volID string, targetPath string) error { + nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) + if err != nil { + return err + } + defer closer.Close() + + req := &csipbv1.NodeUnpublishVolumeRequest{ + VolumeId: volID, + TargetPath: targetPath, + } + + _, err = nodeClient.NodeUnpublishVolume(ctx, req) + return err +} + +func (c *csiDriverClient) nodeUnpublishVolumeV0(ctx context.Context, volID string, targetPath string) error { + nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) if err != nil { return err } - defer conn.Close() - nodeClient := csipb.NewNodeClient(conn) + defer closer.Close() - req := &csipb.NodeUnpublishVolumeRequest{ + req := &csipbv0.NodeUnpublishVolumeRequest{ VolumeId: volID, TargetPath: targetPath, } @@ -185,14 +448,14 @@ func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, func (c *csiDriverClient) NodeStageVolume(ctx context.Context, volID string, - publishInfo map[string]string, + publishContext map[string]string, stagingTargetPath string, fsType string, accessMode api.PersistentVolumeAccessMode, - nodeStageSecrets map[string]string, - volumeAttribs map[string]string, + secrets map[string]string, + volumeContext map[string]string, ) error { - glog.V(4).Info(log("calling NodeStageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath)) + klog.V(4).Info(log("calling NodeStageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath)) if volID == "" { return errors.New("missing volume id") } @@ -200,33 +463,96 @@ func (c *csiDriverClient) NodeStageVolume(ctx context.Context, return errors.New("missing staging target path") } - conn, err := newGrpcConn(c.driverName) + if c.nodeV1ClientCreator != nil { + return c.nodeStageVolumeV1(ctx, volID, publishContext, stagingTargetPath, fsType, accessMode, secrets, volumeContext) + } else if c.nodeV0ClientCreator != nil { + return c.nodeStageVolumeV0(ctx, volID, publishContext, stagingTargetPath, fsType, accessMode, secrets, volumeContext) + } + + return fmt.Errorf("failed to call NodeStageVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") +} + +func (c *csiDriverClient) nodeStageVolumeV1( + ctx context.Context, + volID string, + publishContext map[string]string, + stagingTargetPath string, + fsType string, + accessMode api.PersistentVolumeAccessMode, + secrets map[string]string, + volumeContext map[string]string, +) error { + nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) + if err != nil { + return err + } + defer closer.Close() + + req := &csipbv1.NodeStageVolumeRequest{ + VolumeId: volID, + PublishContext: publishContext, + StagingTargetPath: stagingTargetPath, + VolumeCapability: &csipbv1.VolumeCapability{ + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: asCSIAccessModeV1(accessMode), + }, + }, + Secrets: secrets, + VolumeContext: volumeContext, + } + + if fsType == fsTypeBlockName { + req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Block{ + Block: &csipbv1.VolumeCapability_BlockVolume{}, + } + } else { + req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{ + Mount: &csipbv1.VolumeCapability_MountVolume{ + FsType: fsType, + }, + } + } + + _, err = nodeClient.NodeStageVolume(ctx, req) + return err +} + +func (c *csiDriverClient) nodeStageVolumeV0( + ctx context.Context, + volID string, + publishContext map[string]string, + stagingTargetPath string, + fsType string, + accessMode api.PersistentVolumeAccessMode, + secrets map[string]string, + volumeContext map[string]string, +) error { + nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) if err != nil { return err } - defer conn.Close() - nodeClient := csipb.NewNodeClient(conn) + defer closer.Close() - req := &csipb.NodeStageVolumeRequest{ + req := &csipbv0.NodeStageVolumeRequest{ VolumeId: volID, - PublishInfo: publishInfo, + PublishInfo: publishContext, StagingTargetPath: stagingTargetPath, - VolumeCapability: &csipb.VolumeCapability{ - AccessMode: &csipb.VolumeCapability_AccessMode{ - Mode: asCSIAccessMode(accessMode), + VolumeCapability: &csipbv0.VolumeCapability{ + AccessMode: &csipbv0.VolumeCapability_AccessMode{ + Mode: asCSIAccessModeV0(accessMode), }, }, - NodeStageSecrets: nodeStageSecrets, - VolumeAttributes: volumeAttribs, + NodeStageSecrets: secrets, + VolumeAttributes: volumeContext, } if fsType == fsTypeBlockName { - req.VolumeCapability.AccessType = &csipb.VolumeCapability_Block{ - Block: &csipb.VolumeCapability_BlockVolume{}, + req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Block{ + Block: &csipbv0.VolumeCapability_BlockVolume{}, } } else { - req.VolumeCapability.AccessType = &csipb.VolumeCapability_Mount{ - Mount: &csipb.VolumeCapability_MountVolume{ + req.VolumeCapability.AccessType = &csipbv0.VolumeCapability_Mount{ + Mount: &csipbv0.VolumeCapability_MountVolume{ FsType: fsType, }, } @@ -237,7 +563,7 @@ func (c *csiDriverClient) NodeStageVolume(ctx context.Context, } func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error { - glog.V(4).Info(log("calling NodeUnstageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath)) + klog.V(4).Info(log("calling NodeUnstageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath)) if volID == "" { return errors.New("missing volume id") } @@ -245,14 +571,38 @@ func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingT return errors.New("missing staging target path") } - conn, err := newGrpcConn(c.driverName) + if c.nodeV1ClientCreator != nil { + return c.nodeUnstageVolumeV1(ctx, volID, stagingTargetPath) + } else if c.nodeV0ClientCreator != nil { + return c.nodeUnstageVolumeV0(ctx, volID, stagingTargetPath) + } + + return fmt.Errorf("failed to call NodeUnstageVolume. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") +} + +func (c *csiDriverClient) nodeUnstageVolumeV1(ctx context.Context, volID, stagingTargetPath string) error { + nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) + if err != nil { + return err + } + defer closer.Close() + + req := &csipbv1.NodeUnstageVolumeRequest{ + VolumeId: volID, + StagingTargetPath: stagingTargetPath, + } + _, err = nodeClient.NodeUnstageVolume(ctx, req) + return err +} + +func (c *csiDriverClient) nodeUnstageVolumeV0(ctx context.Context, volID, stagingTargetPath string) error { + nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) if err != nil { return err } - defer conn.Close() - nodeClient := csipb.NewNodeClient(conn) + defer closer.Close() - req := &csipb.NodeUnstageVolumeRequest{ + req := &csipbv0.NodeUnstageVolumeRequest{ VolumeId: volID, StagingTargetPath: stagingTargetPath, } @@ -260,57 +610,113 @@ func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingT return err } -func (c *csiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) { - glog.V(4).Info(log("calling NodeGetCapabilities rpc")) +func (c *csiDriverClient) NodeSupportsStageUnstage(ctx context.Context) (bool, error) { + klog.V(4).Info(log("calling NodeGetCapabilities rpc to determine if NodeSupportsStageUnstage")) + + if c.nodeV1ClientCreator != nil { + return c.nodeSupportsStageUnstageV1(ctx) + } else if c.nodeV0ClientCreator != nil { + return c.nodeSupportsStageUnstageV0(ctx) + } + + return false, fmt.Errorf("failed to call NodeSupportsStageUnstage. Both nodeV1ClientCreator and nodeV0ClientCreator are nil") +} + +func (c *csiDriverClient) nodeSupportsStageUnstageV1(ctx context.Context) (bool, error) { + nodeClient, closer, err := c.nodeV1ClientCreator(c.addr) + if err != nil { + return false, err + } + defer closer.Close() + + req := &csipbv1.NodeGetCapabilitiesRequest{} + resp, err := nodeClient.NodeGetCapabilities(ctx, req) + if err != nil { + return false, err + } + + capabilities := resp.GetCapabilities() + + stageUnstageSet := false + if capabilities == nil { + return false, nil + } + for _, capability := range capabilities { + if capability.GetRpc().GetType() == csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME { + stageUnstageSet = true + } + } + return stageUnstageSet, nil +} - conn, err := newGrpcConn(c.driverName) +func (c *csiDriverClient) nodeSupportsStageUnstageV0(ctx context.Context) (bool, error) { + nodeClient, closer, err := c.nodeV0ClientCreator(c.addr) if err != nil { - return nil, err + return false, err } - defer conn.Close() - nodeClient := csipb.NewNodeClient(conn) + defer closer.Close() - req := &csipb.NodeGetCapabilitiesRequest{} + req := &csipbv0.NodeGetCapabilitiesRequest{} resp, err := nodeClient.NodeGetCapabilities(ctx, req) if err != nil { - return nil, err + return false, err + } + + capabilities := resp.GetCapabilities() + + stageUnstageSet := false + if capabilities == nil { + return false, nil } - return resp.GetCapabilities(), nil + for _, capability := range capabilities { + if capability.GetRpc().GetType() == csipbv0.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME { + stageUnstageSet = true + } + } + return stageUnstageSet, nil } -func asCSIAccessMode(am api.PersistentVolumeAccessMode) csipb.VolumeCapability_AccessMode_Mode { +func asCSIAccessModeV1(am api.PersistentVolumeAccessMode) csipbv1.VolumeCapability_AccessMode_Mode { switch am { case api.ReadWriteOnce: - return csipb.VolumeCapability_AccessMode_SINGLE_NODE_WRITER + return csipbv1.VolumeCapability_AccessMode_SINGLE_NODE_WRITER case api.ReadOnlyMany: - return csipb.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY + return csipbv1.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY case api.ReadWriteMany: - return csipb.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER + return csipbv1.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER } - return csipb.VolumeCapability_AccessMode_UNKNOWN + return csipbv1.VolumeCapability_AccessMode_UNKNOWN } -func newGrpcConn(driverName string) (*grpc.ClientConn, error) { - if driverName == "" { - return nil, fmt.Errorf("driver name is empty") - } - addr := fmt.Sprintf(csiAddrTemplate, driverName) - // TODO once KubeletPluginsWatcher graduates to beta, remove FeatureGate check - if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPluginsWatcher) { - driver, ok := csiDrivers.driversMap[driverName] - if !ok { - return nil, fmt.Errorf("driver name %s not found in the list of registered CSI drivers", driverName) - } - addr = driver.driverEndpoint +func asCSIAccessModeV0(am api.PersistentVolumeAccessMode) csipbv0.VolumeCapability_AccessMode_Mode { + switch am { + case api.ReadWriteOnce: + return csipbv0.VolumeCapability_AccessMode_SINGLE_NODE_WRITER + case api.ReadOnlyMany: + return csipbv0.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY + case api.ReadWriteMany: + return csipbv0.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER } + return csipbv0.VolumeCapability_AccessMode_UNKNOWN +} + +func newGrpcConn(addr csiAddr) (*grpc.ClientConn, error) { network := "unix" - glog.V(4).Infof(log("creating new gRPC connection for [%s://%s]", network, addr)) + klog.V(4).Infof(log("creating new gRPC connection for [%s://%s]", network, addr)) return grpc.Dial( - addr, + string(addr), grpc.WithInsecure(), grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) { return net.Dial(network, target) }), ) } + +func versionRequiresV0Client(version *utilversion.Version) bool { + if version != nil && version.Major() == 0 { + return true + } + + return false +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go index b1e8ea958ae9..e5c02fad49c0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go @@ -23,7 +23,7 @@ import ( "os" "path" - "github.com/golang/glog" + "k8s.io/klog" api "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -55,18 +55,18 @@ var ( ) type csiMountMgr struct { - csiClient csiClient - k8s kubernetes.Interface - plugin *csiPlugin - driverName string - volumeID string - specVolumeID string - readOnly bool - spec *volume.Spec - pod *api.Pod - podUID types.UID - options volume.VolumeOptions - volumeInfo map[string]string + csiClient csiClient + k8s kubernetes.Interface + plugin *csiPlugin + driverName csiDriverName + volumeID string + specVolumeID string + readOnly bool + spec *volume.Spec + pod *api.Pod + podUID types.UID + options volume.VolumeOptions + publishContext map[string]string volume.MetricsNil } @@ -75,7 +75,7 @@ var _ volume.Volume = &csiMountMgr{} func (c *csiMountMgr) GetPath() string { dir := path.Join(getTargetPath(c.podUID, c.specVolumeID, c.plugin.host), "/mount") - glog.V(4).Info(log("mounter.GetPath generated [%s]", dir)) + klog.V(4).Info(log("mounter.GetPath generated [%s]", dir)) return dir } @@ -96,22 +96,22 @@ func (c *csiMountMgr) SetUp(fsGroup *int64) error { } func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { - glog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir)) + klog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir)) mounted, err := isDirMounted(c.plugin, dir) if err != nil { - glog.Error(log("mounter.SetUpAt failed while checking mount status for dir [%s]", dir)) + klog.Error(log("mounter.SetUpAt failed while checking mount status for dir [%s]", dir)) return err } if mounted { - glog.V(4).Info(log("mounter.SetUpAt skipping mount, dir already mounted [%s]", dir)) + klog.V(4).Info(log("mounter.SetUpAt skipping mount, dir already mounted [%s]", dir)) return nil } csiSource, err := getCSISourceFromSpec(c.spec) if err != nil { - glog.Error(log("mounter.SetupAt failed to get CSI persistent source: %v", err)) + klog.Error(log("mounter.SetupAt failed to get CSI persistent source: %v", err)) return err } @@ -121,23 +121,23 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { // Check for STAGE_UNSTAGE_VOLUME set and populate deviceMountPath if so deviceMountPath := "" - stageUnstageSet, err := hasStageUnstageCapability(ctx, csi) + stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx) if err != nil { - glog.Error(log("mounter.SetUpAt failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err)) + klog.Error(log("mounter.SetUpAt failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err)) return err } if stageUnstageSet { deviceMountPath, err = makeDeviceMountPath(c.plugin, c.spec) if err != nil { - glog.Error(log("mounter.SetUpAt failed to make device mount path: %v", err)) + klog.Error(log("mounter.SetUpAt failed to make device mount path: %v", err)) return err } } // search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName - if c.volumeInfo == nil { + if c.publishContext == nil { nodeName := string(c.plugin.host.GetNodeName()) - c.volumeInfo, err = c.plugin.getPublishVolumeInfo(c.k8s, c.volumeID, c.driverName, nodeName) + c.publishContext, err = c.plugin.getPublishContext(c.k8s, c.volumeID, string(c.driverName), nodeName) if err != nil { return err } @@ -156,10 +156,10 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { // create target_dir before call to NodePublish if err := os.MkdirAll(dir, 0750); err != nil { - glog.Error(log("mouter.SetUpAt failed to create dir %#v: %v", dir, err)) + klog.Error(log("mouter.SetUpAt failed to create dir %#v: %v", dir, err)) return err } - glog.V(4).Info(log("created target path successfully [%s]", dir)) + klog.V(4).Info(log("created target path successfully [%s]", dir)) //TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI accessMode := api.ReadWriteOnce @@ -170,7 +170,7 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { // Inject pod information into volume_attributes podAttrs, err := c.podAttributes() if err != nil { - glog.Error(log("mouter.SetUpAt failed to assemble volume attributes: %v", err)) + klog.Error(log("mouter.SetUpAt failed to assemble volume attributes: %v", err)) return err } if podAttrs != nil { @@ -191,64 +191,58 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { deviceMountPath, dir, accessMode, - c.volumeInfo, + c.publishContext, attribs, nodePublishSecrets, fsType, + c.spec.PersistentVolume.Spec.MountOptions, ) if err != nil { - glog.Errorf(log("mounter.SetupAt failed: %v", err)) + klog.Errorf(log("mounter.SetupAt failed: %v", err)) if removeMountDirErr := removeMountDir(c.plugin, dir); removeMountDirErr != nil { - glog.Error(log("mounter.SetupAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, removeMountDirErr)) + klog.Error(log("mounter.SetupAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, removeMountDirErr)) } return err } // apply volume ownership - if !c.readOnly && fsGroup != nil { - err := volume.SetVolumeOwnership(c, fsGroup) - if err != nil { - // attempt to rollback mount. - glog.Error(log("mounter.SetupAt failed to set fsgroup volume ownership for [%s]: %v", c.volumeID, err)) - glog.V(4).Info(log("mounter.SetupAt attempting to unpublish volume %s due to previous error", c.volumeID)) - if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil { - glog.Error(log( - "mounter.SetupAt failed to unpublish volume [%s]: %v (caused by previous NodePublish error: %v)", - c.volumeID, unpubErr, err, - )) - return fmt.Errorf("%v (caused by %v)", unpubErr, err) - } + // The following logic is derived from https://github.com/kubernetes/kubernetes/issues/66323 + // if fstype is "", then skip fsgroup (could be indication of non-block filesystem) + // if fstype is provided and pv.AccessMode == ReadWriteOnly, then apply fsgroup - if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil { - glog.Error(log( - "mounter.SetupAt failed to clean mount dir [%s]: %v (caused by previous NodePublish error: %v)", - dir, unmountErr, err, - )) - return fmt.Errorf("%v (caused by %v)", unmountErr, err) - } + err = c.applyFSGroup(fsType, fsGroup) + if err != nil { + // attempt to rollback mount. + fsGrpErr := fmt.Errorf("applyFSGroup failed for vol %s: %v", c.volumeID, err) + if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil { + klog.Error(log("NodeUnpublishVolume failed for [%s]: %v", c.volumeID, unpubErr)) + return fsGrpErr + } - return err + if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil { + klog.Error(log("removeMountDir failed for [%s]: %v", dir, unmountErr)) + return fsGrpErr } - glog.V(4).Info(log("mounter.SetupAt sets fsGroup to [%d] for %s", *fsGroup, c.volumeID)) + return fsGrpErr } - glog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir)) + klog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir)) return nil } func (c *csiMountMgr) podAttributes() (map[string]string, error) { - if !utilfeature.DefaultFeatureGate.Enabled(features.CSIPodInfo) { + if !utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) { return nil, nil } if c.plugin.csiDriverLister == nil { return nil, errors.New("CSIDriver lister does not exist") } - csiDriver, err := c.plugin.csiDriverLister.Get(c.driverName) + csiDriver, err := c.plugin.csiDriverLister.Get(string(c.driverName)) if err != nil { if apierrs.IsNotFound(err) { - glog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", c.driverName)) + klog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", c.driverName)) return nil, nil } return nil, err @@ -256,7 +250,7 @@ func (c *csiMountMgr) podAttributes() (map[string]string, error) { // if PodInfoOnMountVersion is not set or not v1 we do not set pod attributes if csiDriver.Spec.PodInfoOnMountVersion == nil || *csiDriver.Spec.PodInfoOnMountVersion != currentPodInfoMountVersion { - glog.V(4).Infof(log("CSIDriver %q does not require pod information", c.driverName)) + klog.V(4).Infof(log("CSIDriver %q does not require pod information", c.driverName)) return nil, nil } @@ -266,7 +260,7 @@ func (c *csiMountMgr) podAttributes() (map[string]string, error) { "csi.storage.k8s.io/pod.uid": string(c.pod.UID), "csi.storage.k8s.io/serviceAccount.name": c.pod.Spec.ServiceAccountName, } - glog.V(4).Infof(log("CSIDriver %q requires pod information", c.driverName)) + klog.V(4).Infof(log("CSIDriver %q requires pod information", c.driverName)) return attrs, nil } @@ -275,7 +269,7 @@ func (c *csiMountMgr) GetAttributes() volume.Attributes { path := c.GetPath() supportSelinux, err := mounter.GetSELinuxSupport(path) if err != nil { - glog.V(2).Info(log("error checking for SELinux support: %s", err)) + klog.V(2).Info(log("error checking for SELinux support: %s", err)) // Best guess supportSelinux = false } @@ -293,19 +287,19 @@ func (c *csiMountMgr) TearDown() error { return c.TearDownAt(c.GetPath()) } func (c *csiMountMgr) TearDownAt(dir string) error { - glog.V(4).Infof(log("Unmounter.TearDown(%s)", dir)) + klog.V(4).Infof(log("Unmounter.TearDown(%s)", dir)) // is dir even mounted ? // TODO (vladimirvivien) this check may not work for an emptyDir or local storage // see https://github.com/kubernetes/kubernetes/pull/56836#discussion_r155834524 mounted, err := isDirMounted(c.plugin, dir) if err != nil { - glog.Error(log("unmounter.Teardown failed while checking mount status for dir [%s]: %v", dir, err)) + klog.Error(log("unmounter.Teardown failed while checking mount status for dir [%s]: %v", dir, err)) return err } if !mounted { - glog.V(4).Info(log("unmounter.Teardown skipping unmount, dir not mounted [%s]", dir)) + klog.V(4).Info(log("unmounter.Teardown skipping unmount, dir not mounted [%s]", dir)) return nil } @@ -316,16 +310,53 @@ func (c *csiMountMgr) TearDownAt(dir string) error { defer cancel() if err := csi.NodeUnpublishVolume(ctx, volID, dir); err != nil { - glog.Errorf(log("mounter.TearDownAt failed: %v", err)) + klog.Errorf(log("mounter.TearDownAt failed: %v", err)) return err } // clean mount point dir if err := removeMountDir(c.plugin, dir); err != nil { - glog.Error(log("mounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err)) + klog.Error(log("mounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err)) return err } - glog.V(4).Infof(log("mounte.TearDownAt successfully unmounted dir [%s]", dir)) + klog.V(4).Infof(log("mounte.TearDownAt successfully unmounted dir [%s]", dir)) + + return nil +} + +// applyFSGroup applies the volume ownership it derives its logic +// from https://github.com/kubernetes/kubernetes/issues/66323 +// 1) if fstype is "", then skip fsgroup (could be indication of non-block filesystem) +// 2) if fstype is provided and pv.AccessMode == ReadWriteOnly and !c.spec.ReadOnly then apply fsgroup +func (c *csiMountMgr) applyFSGroup(fsType string, fsGroup *int64) error { + if fsGroup != nil { + if fsType == "" { + klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, fsType not provided")) + return nil + } + + accessModes := c.spec.PersistentVolume.Spec.AccessModes + if c.spec.PersistentVolume.Spec.AccessModes == nil { + klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, access modes not provided")) + return nil + } + if !hasReadWriteOnce(accessModes) { + klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, only support ReadWriteOnce access mode")) + return nil + } + + if c.readOnly { + klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, volume is readOnly")) + return nil + } + + err := volume.SetVolumeOwnership(c, fsGroup) + if err != nil { + return err + } + + klog.V(4).Info(log("mounter.SetupAt fsGroup [%d] applied successfully to %s", *fsGroup, c.volumeID)) + } return nil } @@ -335,7 +366,7 @@ func isDirMounted(plug *csiPlugin, dir string) (bool, error) { mounter := plug.host.GetMounter(plug.GetPluginName()) notMnt, err := mounter.IsLikelyNotMountPoint(dir) if err != nil && !os.IsNotExist(err) { - glog.Error(log("isDirMounted IsLikelyNotMountPoint test failed for dir [%v]", dir)) + klog.Error(log("isDirMounted IsLikelyNotMountPoint test failed for dir [%v]", dir)) return false, err } return !notMnt, nil @@ -343,39 +374,39 @@ func isDirMounted(plug *csiPlugin, dir string) (bool, error) { // removeMountDir cleans the mount dir when dir is not mounted and removed the volume data file in dir func removeMountDir(plug *csiPlugin, mountPath string) error { - glog.V(4).Info(log("removing mount path [%s]", mountPath)) + klog.V(4).Info(log("removing mount path [%s]", mountPath)) if pathExists, pathErr := util.PathExists(mountPath); pathErr != nil { - glog.Error(log("failed while checking mount path stat [%s]", pathErr)) + klog.Error(log("failed while checking mount path stat [%s]", pathErr)) return pathErr } else if !pathExists { - glog.Warning(log("skipping mount dir removal, path does not exist [%v]", mountPath)) + klog.Warning(log("skipping mount dir removal, path does not exist [%v]", mountPath)) return nil } mounter := plug.host.GetMounter(plug.GetPluginName()) notMnt, err := mounter.IsLikelyNotMountPoint(mountPath) if err != nil { - glog.Error(log("mount dir removal failed [%s]: %v", mountPath, err)) + klog.Error(log("mount dir removal failed [%s]: %v", mountPath, err)) return err } if notMnt { - glog.V(4).Info(log("dir not mounted, deleting it [%s]", mountPath)) + klog.V(4).Info(log("dir not mounted, deleting it [%s]", mountPath)) if err := os.Remove(mountPath); err != nil && !os.IsNotExist(err) { - glog.Error(log("failed to remove dir [%s]: %v", mountPath, err)) + klog.Error(log("failed to remove dir [%s]: %v", mountPath, err)) return err } // remove volume data file as well volPath := path.Dir(mountPath) dataFile := path.Join(volPath, volDataFileName) - glog.V(4).Info(log("also deleting volume info data file [%s]", dataFile)) + klog.V(4).Info(log("also deleting volume info data file [%s]", dataFile)) if err := os.Remove(dataFile); err != nil && !os.IsNotExist(err) { - glog.Error(log("failed to delete volume data file [%s]: %v", dataFile, err)) + klog.Error(log("failed to delete volume data file [%s]: %v", dataFile, err)) return err } // remove volume path - glog.V(4).Info(log("deleting volume path [%s]", volPath)) + klog.V(4).Info(log("deleting volume path [%s]", volPath)) if err := os.Remove(volPath); err != nil && !os.IsNotExist(err) { - glog.Error(log("failed to delete volume path [%s]: %v", volPath, err)) + klog.Error(log("failed to delete volume path [%s]: %v", volPath, err)) return err } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go index 258362192d11..1732dc7f9231 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go @@ -21,17 +21,20 @@ import ( "fmt" "os" "path" + "sort" "strings" "sync" "time" "context" - "github.com/golang/glog" + "k8s.io/klog" + api "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" @@ -40,7 +43,7 @@ import ( csilister "k8s.io/csi-api/pkg/client/listers/csi/v1alpha1" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/csi/nodeupdater" + "k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager" ) const ( @@ -60,6 +63,8 @@ const ( csiResyncPeriod = time.Minute ) +var deprecatedSocketDirVersions = []string{"0.1.0", "0.2.0", "0.3.0", "0.4.0"} + type csiPlugin struct { host volume.VolumeHost blockEnabled bool @@ -80,8 +85,9 @@ func ProbeVolumePlugins() []volume.VolumePlugin { var _ volume.VolumePlugin = &csiPlugin{} type csiDriver struct { - driverName string - driverEndpoint string + driverName string + driverEndpoint string + highestSupportedVersion *utilversion.Version } type csiDriversStore struct { @@ -89,69 +95,163 @@ type csiDriversStore struct { sync.RWMutex } +// RegistrationHandler is the handler which is fed to the pluginwatcher API. +type RegistrationHandler struct { +} + // TODO (verult) consider using a struct instead of global variables // csiDrivers map keep track of all registered CSI drivers on the node and their // corresponding sockets var csiDrivers csiDriversStore -var nodeUpdater nodeupdater.Interface +var nim nodeinfomanager.Interface + +// PluginHandler is the plugin registration handler interface passed to the +// pluginwatcher module in kubelet +var PluginHandler = &RegistrationHandler{} -// RegistrationCallback is called by kubelet's plugin watcher upon detection +// ValidatePlugin is called by kubelet's plugin watcher upon detection // of a new registration socket opened by CSI Driver registrar side car. -func RegistrationCallback(pluginName string, endpoint string, versions []string, socketPath string) (chan bool, error) { +func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string, foundInDeprecatedDir bool) error { + klog.Infof(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s, foundInDeprecatedDir: %v", + pluginName, endpoint, strings.Join(versions, ","), foundInDeprecatedDir)) + + if foundInDeprecatedDir { + // CSI 0.x drivers used /var/lib/kubelet/plugins as the socket dir. + // This was deprecated as the socket dir for kubelet drivers, in lieu of a dedicated dir /var/lib/kubelet/plugins_registry + // The deprecated dir will only be allowed for a whitelisted set of old versions. + // CSI 1.x drivers should use the /var/lib/kubelet/plugins_registry + if !isDeprecatedSocketDirAllowed(versions) { + err := fmt.Errorf("socket for CSI driver %q versions %v was found in a deprecated dir. Drivers implementing CSI 1.x+ must use the new dir", pluginName, versions) + klog.Error(err) + return err + } + } + + _, err := h.validateVersions("ValidatePlugin", pluginName, endpoint, versions) + return err +} - glog.Infof(log("Callback from kubelet with plugin name: %s endpoint: %s versions: %s socket path: %s", - pluginName, endpoint, strings.Join(versions, ","), socketPath)) +// RegisterPlugin is called when a plugin can be registered +func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string, versions []string) error { + klog.Infof(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint)) - if endpoint == "" { - endpoint = socketPath + highestSupportedVersion, err := h.validateVersions("RegisterPlugin", pluginName, endpoint, versions) + if err != nil { + return err } - // Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key - // all other CSI components will be able to get the actual socket of CSI drivers by its name. - csiDrivers.Lock() - defer csiDrivers.Unlock() - csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint} + func() { + // Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key + // all other CSI components will be able to get the actual socket of CSI drivers by its name. + + // It's not necessary to lock the entire RegistrationCallback() function because only the CSI + // client depends on this driver map, and the CSI client does not depend on node information + // updated in the rest of the function. + csiDrivers.Lock() + defer csiDrivers.Unlock() + csiDrivers.driversMap[pluginName] = csiDriver{driverName: pluginName, driverEndpoint: endpoint, highestSupportedVersion: highestSupportedVersion} + }() // Get node info from the driver. - csi := newCsiDriverClient(pluginName) + csi, err := newCsiDriverClient(csiDriverName(pluginName)) + if err != nil { + return err + } + // TODO (verult) retry with exponential backoff, possibly added in csi client library. ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) defer cancel() - driverNodeID, maxVolumePerNode, _, err := csi.NodeGetInfo(ctx) + + driverNodeID, maxVolumePerNode, accessibleTopology, err := csi.NodeGetInfo(ctx) if err != nil { - return nil, fmt.Errorf("error during CSI NodeGetInfo() call: %v", err) + klog.Error(log("registrationHandler.RegisterPlugin failed at CSI.NodeGetInfo: %v", err)) + if unregErr := unregisterDriver(pluginName); unregErr != nil { + klog.Error(log("registrationHandler.RegisterPlugin failed to unregister plugin due to previous: %v", unregErr)) + return unregErr + } + return err } - // Calling nodeLabelManager to update annotations and labels for newly registered CSI driver - err = nodeUpdater.AddLabelsAndLimits(pluginName, driverNodeID, maxVolumePerNode) + err = nim.InstallCSIDriver(pluginName, driverNodeID, maxVolumePerNode, accessibleTopology) if err != nil { - // Unregister the driver and return error - csiDrivers.Lock() - defer csiDrivers.Unlock() - delete(csiDrivers.driversMap, pluginName) + klog.Error(log("registrationHandler.RegisterPlugin failed at AddNodeInfo: %v", err)) + if unregErr := unregisterDriver(pluginName); unregErr != nil { + klog.Error(log("registrationHandler.RegisterPlugin failed to unregister plugin due to previous error: %v", unregErr)) + return unregErr + } + return err + } + + return nil +} + +func (h *RegistrationHandler) validateVersions(callerName, pluginName string, endpoint string, versions []string) (*utilversion.Version, error) { + if len(versions) == 0 { + err := fmt.Errorf("%s for CSI driver %q failed. Plugin returned an empty list for supported versions", callerName, pluginName) + klog.Error(err) + return nil, err + } + + // Validate version + newDriverHighestVersion, err := highestSupportedVersion(versions) + if err != nil { + err := fmt.Errorf("%s for CSI driver %q failed. None of the versions specified %q are supported. err=%v", callerName, pluginName, versions, err) + klog.Error(err) return nil, err } - return nil, nil + // Check for existing drivers with the same name + var existingDriver csiDriver + driverExists := false + func() { + csiDrivers.RLock() + defer csiDrivers.RUnlock() + existingDriver, driverExists = csiDrivers.driversMap[pluginName] + }() + + if driverExists { + if !existingDriver.highestSupportedVersion.LessThan(newDriverHighestVersion) { + err := fmt.Errorf("%s for CSI driver %q failed. Another driver with the same name is already registered with a higher supported version: %q", callerName, pluginName, existingDriver.highestSupportedVersion) + klog.Error(err) + return nil, err + } + } + + return newDriverHighestVersion, nil +} + +// DeRegisterPlugin is called when a plugin removed its socket, signaling +// it is no longer available +func (h *RegistrationHandler) DeRegisterPlugin(pluginName string) { + klog.V(4).Info(log("registrationHandler.DeRegisterPlugin request for plugin %s", pluginName)) + if err := unregisterDriver(pluginName); err != nil { + klog.Error(log("registrationHandler.DeRegisterPlugin failed: %v", err)) + } } func (p *csiPlugin) Init(host volume.VolumeHost) error { - glog.Info(log("plugin initializing...")) p.host = host + if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) { + csiClient := host.GetCSIClient() + if csiClient == nil { + klog.Warning("The client for CSI Custom Resources is not available, skipping informer initialization") + } else { + // Start informer for CSIDrivers. + factory := csiapiinformer.NewSharedInformerFactory(csiClient, csiResyncPeriod) + p.csiDriverInformer = factory.Csi().V1alpha1().CSIDrivers() + p.csiDriverLister = p.csiDriverInformer.Lister() + go factory.Start(wait.NeverStop) + } + } + // Initializing csiDrivers map and label management channels csiDrivers = csiDriversStore{driversMap: map[string]csiDriver{}} - nodeUpdater = nodeupdater.NewNodeUpdater(host.GetNodeName(), host.GetKubeClient()) + nim = nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host) - csiClient := host.GetCSIClient() - if csiClient != nil { - // Start informer for CSIDrivers. - factory := csiapiinformer.NewSharedInformerFactory(csiClient, csiResyncPeriod) - p.csiDriverInformer = factory.Csi().V1alpha1().CSIDrivers() - p.csiDriverLister = p.csiDriverInformer.Lister() - go factory.Start(wait.NeverStop) - } + // TODO(#70514) Init CSINodeInfo object if the CRD exists and create Driver + // objects for migrated drivers. return nil } @@ -165,7 +265,7 @@ func (p *csiPlugin) GetPluginName() string { func (p *csiPlugin) GetVolumeName(spec *volume.Spec) (string, error) { csi, err := getCSISourceFromSpec(spec) if err != nil { - glog.Error(log("plugin.GetVolumeName failed to extract volume source from spec: %v", err)) + klog.Error(log("plugin.GetVolumeName failed to extract volume source from spec: %v", err)) return "", err } @@ -198,11 +298,14 @@ func (p *csiPlugin) NewMounter( k8s := p.host.GetKubeClient() if k8s == nil { - glog.Error(log("failed to get a kubernetes client")) + klog.Error(log("failed to get a kubernetes client")) return nil, errors.New("failed to get a Kubernetes client") } - csi := newCsiDriverClient(pvSource.Driver) + csi, err := newCsiDriverClient(csiDriverName(pvSource.Driver)) + if err != nil { + return nil, err + } mounter := &csiMountMgr{ plugin: p, @@ -210,7 +313,7 @@ func (p *csiPlugin) NewMounter( spec: spec, pod: pod, podUID: pod.UID, - driverName: pvSource.Driver, + driverName: csiDriverName(pvSource.Driver), volumeID: pvSource.VolumeHandle, specVolumeID: spec.Name(), csiClient: csi, @@ -222,10 +325,10 @@ func (p *csiPlugin) NewMounter( dataDir := path.Dir(dir) // dropoff /mount at end if err := os.MkdirAll(dataDir, 0750); err != nil { - glog.Error(log("failed to create dir %#v: %v", dataDir, err)) + klog.Error(log("failed to create dir %#v: %v", dataDir, err)) return nil, err } - glog.V(4).Info(log("created path successfully [%s]", dataDir)) + klog.V(4).Info(log("created path successfully [%s]", dataDir)) // persist volume info data for teardown node := string(p.host.GetNodeName()) @@ -239,21 +342,21 @@ func (p *csiPlugin) NewMounter( } if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil { - glog.Error(log("failed to save volume info data: %v", err)) + klog.Error(log("failed to save volume info data: %v", err)) if err := os.RemoveAll(dataDir); err != nil { - glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err)) + klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err)) return nil, err } return nil, err } - glog.V(4).Info(log("mounter created successfully")) + klog.V(4).Info(log("mounter created successfully")) return mounter, nil } func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) { - glog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID)) + klog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID)) unmounter := &csiMountMgr{ plugin: p, @@ -266,26 +369,29 @@ func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmo dataDir := path.Dir(dir) // dropoff /mount at end data, err := loadVolumeData(dataDir, volDataFileName) if err != nil { - glog.Error(log("unmounter failed to load volume data file [%s]: %v", dir, err)) + klog.Error(log("unmounter failed to load volume data file [%s]: %v", dir, err)) return nil, err } - unmounter.driverName = data[volDataKey.driverName] + unmounter.driverName = csiDriverName(data[volDataKey.driverName]) unmounter.volumeID = data[volDataKey.volHandle] - unmounter.csiClient = newCsiDriverClient(unmounter.driverName) + unmounter.csiClient, err = newCsiDriverClient(unmounter.driverName) + if err != nil { + return nil, err + } return unmounter, nil } func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { - glog.V(4).Info(log("plugin.ConstructVolumeSpec [pv.Name=%v, path=%v]", volumeName, mountPath)) + klog.V(4).Info(log("plugin.ConstructVolumeSpec [pv.Name=%v, path=%v]", volumeName, mountPath)) volData, err := loadVolumeData(mountPath, volDataFileName) if err != nil { - glog.Error(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err)) + klog.Error(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err)) return nil, err } - glog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData)) + klog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData)) fsMode := api.PersistentVolumeFilesystem pv := &api.PersistentVolume{ @@ -309,7 +415,10 @@ func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.S func (p *csiPlugin) SupportsMountOption() bool { // TODO (vladimirvivien) use CSI VolumeCapability.MountVolume.mount_flags // to probe for the result for this method - return false + // (bswartz) Until the CSI spec supports probing, our only option is to + // make plugins register their support for mount options or lack thereof + // directly with kubernetes. + return true } func (p *csiPlugin) SupportsBulkVolumeVerification() bool { @@ -324,7 +433,7 @@ var _ volume.DeviceMountableVolumePlugin = &csiPlugin{} func (p *csiPlugin) NewAttacher() (volume.Attacher, error) { k8s := p.host.GetKubeClient() if k8s == nil { - glog.Error(log("unable to get kubernetes client from host")) + klog.Error(log("unable to get kubernetes client from host")) return nil, errors.New("unable to get Kubernetes client") } @@ -342,7 +451,7 @@ func (p *csiPlugin) NewDeviceMounter() (volume.DeviceMounter, error) { func (p *csiPlugin) NewDetacher() (volume.Detacher, error) { k8s := p.host.GetKubeClient() if k8s == nil { - glog.Error(log("unable to get kubernetes client from host")) + klog.Error(log("unable to get kubernetes client from host")) return nil, errors.New("unable to get Kubernetes client") } @@ -379,12 +488,15 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt return nil, err } - glog.V(4).Info(log("setting up block mapper for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver)) - client := newCsiDriverClient(pvSource.Driver) + klog.V(4).Info(log("setting up block mapper for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver)) + client, err := newCsiDriverClient(csiDriverName(pvSource.Driver)) + if err != nil { + return nil, err + } k8s := p.host.GetKubeClient() if k8s == nil { - glog.Error(log("failed to get a kubernetes client")) + klog.Error(log("failed to get a kubernetes client")) return nil, errors.New("failed to get a Kubernetes client") } @@ -393,7 +505,7 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt k8s: k8s, plugin: p, volumeID: pvSource.VolumeHandle, - driverName: pvSource.Driver, + driverName: csiDriverName(pvSource.Driver), readOnly: readOnly, spec: spec, specName: spec.Name(), @@ -404,10 +516,10 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt dataDir := getVolumeDeviceDataDir(spec.Name(), p.host) if err := os.MkdirAll(dataDir, 0750); err != nil { - glog.Error(log("failed to create data dir %s: %v", dataDir, err)) + klog.Error(log("failed to create data dir %s: %v", dataDir, err)) return nil, err } - glog.V(4).Info(log("created path successfully [%s]", dataDir)) + klog.V(4).Info(log("created path successfully [%s]", dataDir)) // persist volume info data for teardown node := string(p.host.GetNodeName()) @@ -421,9 +533,9 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt } if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil { - glog.Error(log("failed to save volume info data: %v", err)) + klog.Error(log("failed to save volume info data: %v", err)) if err := os.RemoveAll(dataDir); err != nil { - glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err)) + klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err)) return nil, err } return nil, err @@ -437,7 +549,7 @@ func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (vo return nil, errors.New("CSIBlockVolume feature not enabled") } - glog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID)) + klog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID)) unmapper := &csiBlockMapper{ plugin: p, podUID: podUID, @@ -448,12 +560,15 @@ func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (vo dataDir := getVolumeDeviceDataDir(unmapper.specName, p.host) data, err := loadVolumeData(dataDir, volDataFileName) if err != nil { - glog.Error(log("unmapper failed to load volume data file [%s]: %v", dataDir, err)) + klog.Error(log("unmapper failed to load volume data file [%s]: %v", dataDir, err)) return nil, err } - unmapper.driverName = data[volDataKey.driverName] + unmapper.driverName = csiDriverName(data[volDataKey.driverName]) unmapper.volumeID = data[volDataKey.volHandle] - unmapper.csiClient = newCsiDriverClient(unmapper.driverName) + unmapper.csiClient, err = newCsiDriverClient(unmapper.driverName) + if err != nil { + return nil, err + } return unmapper, nil } @@ -463,16 +578,16 @@ func (p *csiPlugin) ConstructBlockVolumeSpec(podUID types.UID, specVolName, mapP return nil, errors.New("CSIBlockVolume feature not enabled") } - glog.V(4).Infof("plugin.ConstructBlockVolumeSpec [podUID=%s, specVolName=%s, path=%s]", string(podUID), specVolName, mapPath) + klog.V(4).Infof("plugin.ConstructBlockVolumeSpec [podUID=%s, specVolName=%s, path=%s]", string(podUID), specVolName, mapPath) dataDir := getVolumeDeviceDataDir(specVolName, p.host) volData, err := loadVolumeData(dataDir, volDataFileName) if err != nil { - glog.Error(log("plugin.ConstructBlockVolumeSpec failed loading volume data using [%s]: %v", mapPath, err)) + klog.Error(log("plugin.ConstructBlockVolumeSpec failed loading volume data using [%s]: %v", mapPath, err)) return nil, err } - glog.V(4).Info(log("plugin.ConstructBlockVolumeSpec extracted [%#v]", volData)) + klog.V(4).Info(log("plugin.ConstructBlockVolumeSpec extracted [%#v]", volData)) blockMode := api.PersistentVolumeBlock pv := &api.PersistentVolume{ @@ -494,7 +609,7 @@ func (p *csiPlugin) ConstructBlockVolumeSpec(podUID types.UID, specVolName, mapP } func (p *csiPlugin) skipAttach(driver string) (bool, error) { - if !utilfeature.DefaultFeatureGate.Enabled(features.CSISkipAttach) { + if !utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) { return false, nil } if p.csiDriverLister == nil { @@ -514,7 +629,7 @@ func (p *csiPlugin) skipAttach(driver string) (bool, error) { return false, nil } -func (p *csiPlugin) getPublishVolumeInfo(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) { +func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) { skip, err := p.skipAttach(driver) if err != nil { return nil, err @@ -537,3 +652,84 @@ func (p *csiPlugin) getPublishVolumeInfo(client clientset.Interface, handle, dri } return attachment.Status.AttachmentMetadata, nil } + +func unregisterDriver(driverName string) error { + func() { + csiDrivers.Lock() + defer csiDrivers.Unlock() + delete(csiDrivers.driversMap, driverName) + }() + + if err := nim.UninstallCSIDriver(driverName); err != nil { + klog.Errorf("Error uninstalling CSI driver: %v", err) + return err + } + + return nil +} + +// Return the highest supported version +func highestSupportedVersion(versions []string) (*utilversion.Version, error) { + if len(versions) == 0 { + return nil, fmt.Errorf("CSI driver reporting empty array for supported versions") + } + + // Sort by lowest to highest version + sort.Slice(versions, func(i, j int) bool { + parsedVersionI, err := utilversion.ParseGeneric(versions[i]) + if err != nil { + // Push bad values to the bottom + return true + } + + parsedVersionJ, err := utilversion.ParseGeneric(versions[j]) + if err != nil { + // Push bad values to the bottom + return false + } + + return parsedVersionI.LessThan(parsedVersionJ) + }) + + for i := len(versions) - 1; i >= 0; i-- { + highestSupportedVersion, err := utilversion.ParseGeneric(versions[i]) + if err != nil { + return nil, err + } + + if highestSupportedVersion.Major() <= 1 { + return highestSupportedVersion, nil + } + } + + return nil, fmt.Errorf("None of the CSI versions reported by this driver are supported") +} + +// Only CSI 0.x drivers are allowed to use deprecated socket dir. +func isDeprecatedSocketDirAllowed(versions []string) bool { + for _, version := range versions { + if !isV0Version(version) { + return false + } + } + + return true +} + +func isV0Version(version string) bool { + parsedVersion, err := utilversion.ParseGeneric(version) + if err != nil { + return false + } + + return parsedVersion.Major() == 0 +} + +func isV1Version(version string) bool { + parsedVersion, err := utilversion.ParseGeneric(version) + if err != nil { + return false + } + + return parsedVersion.Major() == 1 +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go index 6a0b67212e57..3fe103584c41 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go @@ -22,19 +22,25 @@ import ( "os" "path" - "github.com/golang/glog" api "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + "k8s.io/klog" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" + "time" +) + +const ( + testInformerSyncPeriod = 100 * time.Millisecond + testInformerSyncTimeout = 30 * time.Second ) func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretReference) (map[string]string, error) { credentials := map[string]string{} secret, err := k8s.CoreV1().Secrets(secretRef.Namespace).Get(secretRef.Name, meta.GetOptions{}) if err != nil { - glog.Errorf("failed to find the secret %s in the namespace %s with error: %v\n", secretRef.Name, secretRef.Namespace, err) + klog.Errorf("failed to find the secret %s in the namespace %s with error: %v\n", secretRef.Name, secretRef.Namespace, err) return credentials, err } for key, value := range secret.Data { @@ -47,18 +53,18 @@ func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretRef // saveVolumeData persists parameter data as json file at the provided location func saveVolumeData(dir string, fileName string, data map[string]string) error { dataFilePath := path.Join(dir, fileName) - glog.V(4).Info(log("saving volume data file [%s]", dataFilePath)) + klog.V(4).Info(log("saving volume data file [%s]", dataFilePath)) file, err := os.Create(dataFilePath) if err != nil { - glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err)) + klog.Error(log("failed to save volume data file %s: %v", dataFilePath, err)) return err } defer file.Close() if err := json.NewEncoder(file).Encode(data); err != nil { - glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err)) + klog.Error(log("failed to save volume data file %s: %v", dataFilePath, err)) return err } - glog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath)) + klog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath)) return nil } @@ -66,17 +72,17 @@ func saveVolumeData(dir string, fileName string, data map[string]string) error { func loadVolumeData(dir string, fileName string) (map[string]string, error) { // remove /mount at the end dataFileName := path.Join(dir, fileName) - glog.V(4).Info(log("loading volume data file [%s]", dataFileName)) + klog.V(4).Info(log("loading volume data file [%s]", dataFileName)) file, err := os.Open(dataFileName) if err != nil { - glog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err)) + klog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err)) return nil, err } defer file.Close() data := map[string]string{} if err := json.NewDecoder(file).Decode(&data); err != nil { - glog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err)) + klog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err)) return nil, err } @@ -121,3 +127,16 @@ func getVolumeDeviceDataDir(specVolID string, host volume.VolumeHost) string { sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(specVolID) return path.Join(host.GetVolumeDevicePluginDir(csiPluginName), sanitizedSpecVolID, "data") } + +// hasReadWriteOnce returns true if modes contains v1.ReadWriteOnce +func hasReadWriteOnce(modes []api.PersistentVolumeAccessMode) bool { + if modes == nil { + return false + } + for _, mode := range modes { + if mode == api.ReadWriteOnce { + return true + } + } + return false +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/BUILD new file mode 100644 index 000000000000..6dcb1ab22910 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/BUILD @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["csi.pb.go"], + importpath = "k8s.io/kubernetes/pkg/volume/csi/csiv0", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/golang/protobuf/ptypes/wrappers:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/csi.pb.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/csi.pb.go new file mode 100644 index 000000000000..174badd75a06 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csiv0/csi.pb.go @@ -0,0 +1,5007 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// For backwards compatibility with CSI 0.x we carry a copy of the +// CSI 0.3 client. + +package csiv0 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type PluginCapability_Service_Type int32 + +const ( + PluginCapability_Service_UNKNOWN PluginCapability_Service_Type = 0 + // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for + // the ControllerService. Plugins SHOULD provide this capability. + // In rare cases certain plugins may wish to omit the + // ControllerService entirely from their implementation, but such + // SHOULD NOT be the common case. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED ControllerService RPCs, as well + // as specific RPCs as indicated by ControllerGetCapabilities. + PluginCapability_Service_CONTROLLER_SERVICE PluginCapability_Service_Type = 1 + // ACCESSIBILITY_CONSTRAINTS indicates that the volumes for this + // plugin may not be equally accessible by all nodes in the + // cluster. The CO MUST use the topology information returned by + // CreateVolumeRequest along with the topology information + // returned by NodeGetInfo to ensure that a given volume is + // accessible from a given node when scheduling workloads. + PluginCapability_Service_ACCESSIBILITY_CONSTRAINTS PluginCapability_Service_Type = 2 +) + +var PluginCapability_Service_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CONTROLLER_SERVICE", + 2: "ACCESSIBILITY_CONSTRAINTS", +} +var PluginCapability_Service_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CONTROLLER_SERVICE": 1, + "ACCESSIBILITY_CONSTRAINTS": 2, +} + +func (x PluginCapability_Service_Type) String() string { + return proto.EnumName(PluginCapability_Service_Type_name, int32(x)) +} +func (PluginCapability_Service_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{4, 0, 0} +} + +type VolumeCapability_AccessMode_Mode int32 + +const ( + VolumeCapability_AccessMode_UNKNOWN VolumeCapability_AccessMode_Mode = 0 + // Can only be published once as read/write on a single node, at + // any given time. + VolumeCapability_AccessMode_SINGLE_NODE_WRITER VolumeCapability_AccessMode_Mode = 1 + // Can only be published once as readonly on a single node, at + // any given time. + VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 2 + // Can be published as readonly at multiple nodes simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 3 + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER VolumeCapability_AccessMode_Mode = 4 + // Can be published as read/write at multiple nodes + // simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 5 +) + +var VolumeCapability_AccessMode_Mode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SINGLE_NODE_WRITER", + 2: "SINGLE_NODE_READER_ONLY", + 3: "MULTI_NODE_READER_ONLY", + 4: "MULTI_NODE_SINGLE_WRITER", + 5: "MULTI_NODE_MULTI_WRITER", +} +var VolumeCapability_AccessMode_Mode_value = map[string]int32{ + "UNKNOWN": 0, + "SINGLE_NODE_WRITER": 1, + "SINGLE_NODE_READER_ONLY": 2, + "MULTI_NODE_READER_ONLY": 3, + "MULTI_NODE_SINGLE_WRITER": 4, + "MULTI_NODE_MULTI_WRITER": 5, +} + +func (x VolumeCapability_AccessMode_Mode) String() string { + return proto.EnumName(VolumeCapability_AccessMode_Mode_name, int32(x)) +} +func (VolumeCapability_AccessMode_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{10, 2, 0} +} + +type ControllerServiceCapability_RPC_Type int32 + +const ( + ControllerServiceCapability_RPC_UNKNOWN ControllerServiceCapability_RPC_Type = 0 + ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME ControllerServiceCapability_RPC_Type = 1 + ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME ControllerServiceCapability_RPC_Type = 2 + ControllerServiceCapability_RPC_LIST_VOLUMES ControllerServiceCapability_RPC_Type = 3 + ControllerServiceCapability_RPC_GET_CAPACITY ControllerServiceCapability_RPC_Type = 4 + // Currently the only way to consume a snapshot is to create + // a volume from it. Therefore plugins supporting + // CREATE_DELETE_SNAPSHOT MUST support creating volume from + // snapshot. + ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT ControllerServiceCapability_RPC_Type = 5 + // LIST_SNAPSHOTS is NOT REQUIRED. For plugins that need to upload + // a snapshot after it is being cut, LIST_SNAPSHOTS COULD be used + // with the snapshot_id as the filter to query whether the + // uploading process is complete or not. + ControllerServiceCapability_RPC_LIST_SNAPSHOTS ControllerServiceCapability_RPC_Type = 6 +) + +var ControllerServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATE_DELETE_VOLUME", + 2: "PUBLISH_UNPUBLISH_VOLUME", + 3: "LIST_VOLUMES", + 4: "GET_CAPACITY", + 5: "CREATE_DELETE_SNAPSHOT", + 6: "LIST_SNAPSHOTS", +} +var ControllerServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CREATE_DELETE_VOLUME": 1, + "PUBLISH_UNPUBLISH_VOLUME": 2, + "LIST_VOLUMES": 3, + "GET_CAPACITY": 4, + "CREATE_DELETE_SNAPSHOT": 5, + "LIST_SNAPSHOTS": 6, +} + +func (x ControllerServiceCapability_RPC_Type) String() string { + return proto.EnumName(ControllerServiceCapability_RPC_Type_name, int32(x)) +} +func (ControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{29, 0, 0} +} + +type SnapshotStatus_Type int32 + +const ( + SnapshotStatus_UNKNOWN SnapshotStatus_Type = 0 + // A snapshot is ready for use. + SnapshotStatus_READY SnapshotStatus_Type = 1 + // A snapshot is cut and is now being uploaded. + // Some cloud providers and storage systems uploads the snapshot + // to the cloud after the snapshot is cut. During this phase, + // `thaw` can be done so the application can be running again if + // `freeze` was done before taking the snapshot. + SnapshotStatus_UPLOADING SnapshotStatus_Type = 2 + // An error occurred during the snapshot uploading process. + // This error status is specific for uploading because + // `CreateSnaphot` is a blocking call before the snapshot is + // cut and therefore it SHOULD NOT come back with an error + // status when an error occurs. Instead a gRPC error code SHALL + // be returned by `CreateSnapshot` when an error occurs before + // a snapshot is cut. + SnapshotStatus_ERROR_UPLOADING SnapshotStatus_Type = 3 +) + +var SnapshotStatus_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "READY", + 2: "UPLOADING", + 3: "ERROR_UPLOADING", +} +var SnapshotStatus_Type_value = map[string]int32{ + "UNKNOWN": 0, + "READY": 1, + "UPLOADING": 2, + "ERROR_UPLOADING": 3, +} + +func (x SnapshotStatus_Type) String() string { + return proto.EnumName(SnapshotStatus_Type_name, int32(x)) +} +func (SnapshotStatus_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{33, 0} +} + +type NodeServiceCapability_RPC_Type int32 + +const ( + NodeServiceCapability_RPC_UNKNOWN NodeServiceCapability_RPC_Type = 0 + NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME NodeServiceCapability_RPC_Type = 1 +) + +var NodeServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STAGE_UNSTAGE_VOLUME", +} +var NodeServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "STAGE_UNSTAGE_VOLUME": 1, +} + +func (x NodeServiceCapability_RPC_Type) String() string { + return proto.EnumName(NodeServiceCapability_RPC_Type_name, int32(x)) +} +func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{50, 0, 0} +} + +type GetPluginInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginInfoRequest) Reset() { *m = GetPluginInfoRequest{} } +func (m *GetPluginInfoRequest) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoRequest) ProtoMessage() {} +func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{0} +} +func (m *GetPluginInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginInfoRequest.Unmarshal(m, b) +} +func (m *GetPluginInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginInfoRequest.Marshal(b, m, deterministic) +} +func (dst *GetPluginInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginInfoRequest.Merge(dst, src) +} +func (m *GetPluginInfoRequest) XXX_Size() int { + return xxx_messageInfo_GetPluginInfoRequest.Size(m) +} +func (m *GetPluginInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginInfoRequest proto.InternalMessageInfo + +type GetPluginInfoResponse struct { + // The name MUST follow reverse domain name notation format + // (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). + // It SHOULD include the plugin's host company name and the plugin + // name, to minimize the possibility of collisions. It MUST be 63 + // characters or less, beginning and ending with an alphanumeric + // character ([a-z0-9A-Z]) with dashes (-), underscores (_), + // dots (.), and alphanumerics between. This field is REQUIRED. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // This field is REQUIRED. Value of this field is opaque to the CO. + VendorVersion string `protobuf:"bytes,2,opt,name=vendor_version,json=vendorVersion" json:"vendor_version,omitempty"` + // This field is OPTIONAL. Values are opaque to the CO. + Manifest map[string]string `protobuf:"bytes,3,rep,name=manifest" json:"manifest,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginInfoResponse) Reset() { *m = GetPluginInfoResponse{} } +func (m *GetPluginInfoResponse) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoResponse) ProtoMessage() {} +func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{1} +} +func (m *GetPluginInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginInfoResponse.Unmarshal(m, b) +} +func (m *GetPluginInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginInfoResponse.Marshal(b, m, deterministic) +} +func (dst *GetPluginInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginInfoResponse.Merge(dst, src) +} +func (m *GetPluginInfoResponse) XXX_Size() int { + return xxx_messageInfo_GetPluginInfoResponse.Size(m) +} +func (m *GetPluginInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginInfoResponse proto.InternalMessageInfo + +func (m *GetPluginInfoResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetPluginInfoResponse) GetVendorVersion() string { + if m != nil { + return m.VendorVersion + } + return "" +} + +func (m *GetPluginInfoResponse) GetManifest() map[string]string { + if m != nil { + return m.Manifest + } + return nil +} + +type GetPluginCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginCapabilitiesRequest) Reset() { *m = GetPluginCapabilitiesRequest{} } +func (m *GetPluginCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*GetPluginCapabilitiesRequest) ProtoMessage() {} +func (*GetPluginCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{2} +} +func (m *GetPluginCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginCapabilitiesRequest.Unmarshal(m, b) +} +func (m *GetPluginCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (dst *GetPluginCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginCapabilitiesRequest.Merge(dst, src) +} +func (m *GetPluginCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_GetPluginCapabilitiesRequest.Size(m) +} +func (m *GetPluginCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginCapabilitiesRequest proto.InternalMessageInfo + +type GetPluginCapabilitiesResponse struct { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + Capabilities []*PluginCapability `protobuf:"bytes,2,rep,name=capabilities" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginCapabilitiesResponse) Reset() { *m = GetPluginCapabilitiesResponse{} } +func (m *GetPluginCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*GetPluginCapabilitiesResponse) ProtoMessage() {} +func (*GetPluginCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{3} +} +func (m *GetPluginCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginCapabilitiesResponse.Unmarshal(m, b) +} +func (m *GetPluginCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (dst *GetPluginCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginCapabilitiesResponse.Merge(dst, src) +} +func (m *GetPluginCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_GetPluginCapabilitiesResponse.Size(m) +} +func (m *GetPluginCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginCapabilitiesResponse proto.InternalMessageInfo + +func (m *GetPluginCapabilitiesResponse) GetCapabilities() []*PluginCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the plugin. +type PluginCapability struct { + // Types that are valid to be assigned to Type: + // *PluginCapability_Service_ + Type isPluginCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginCapability) Reset() { *m = PluginCapability{} } +func (m *PluginCapability) String() string { return proto.CompactTextString(m) } +func (*PluginCapability) ProtoMessage() {} +func (*PluginCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{4} +} +func (m *PluginCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginCapability.Unmarshal(m, b) +} +func (m *PluginCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginCapability.Marshal(b, m, deterministic) +} +func (dst *PluginCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginCapability.Merge(dst, src) +} +func (m *PluginCapability) XXX_Size() int { + return xxx_messageInfo_PluginCapability.Size(m) +} +func (m *PluginCapability) XXX_DiscardUnknown() { + xxx_messageInfo_PluginCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginCapability proto.InternalMessageInfo + +type isPluginCapability_Type interface { + isPluginCapability_Type() +} + +type PluginCapability_Service_ struct { + Service *PluginCapability_Service `protobuf:"bytes,1,opt,name=service,oneof"` +} + +func (*PluginCapability_Service_) isPluginCapability_Type() {} + +func (m *PluginCapability) GetType() isPluginCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *PluginCapability) GetService() *PluginCapability_Service { + if x, ok := m.GetType().(*PluginCapability_Service_); ok { + return x.Service + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PluginCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PluginCapability_OneofMarshaler, _PluginCapability_OneofUnmarshaler, _PluginCapability_OneofSizer, []interface{}{ + (*PluginCapability_Service_)(nil), + } +} + +func _PluginCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PluginCapability) + // type + switch x := m.Type.(type) { + case *PluginCapability_Service_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Service); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PluginCapability.Type has unexpected type %T", x) + } + return nil +} + +func _PluginCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PluginCapability) + switch tag { + case 1: // type.service + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PluginCapability_Service) + err := b.DecodeMessage(msg) + m.Type = &PluginCapability_Service_{msg} + return true, err + default: + return false, nil + } +} + +func _PluginCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PluginCapability) + // type + switch x := m.Type.(type) { + case *PluginCapability_Service_: + s := proto.Size(x.Service) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type PluginCapability_Service struct { + Type PluginCapability_Service_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.PluginCapability_Service_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginCapability_Service) Reset() { *m = PluginCapability_Service{} } +func (m *PluginCapability_Service) String() string { return proto.CompactTextString(m) } +func (*PluginCapability_Service) ProtoMessage() {} +func (*PluginCapability_Service) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{4, 0} +} +func (m *PluginCapability_Service) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginCapability_Service.Unmarshal(m, b) +} +func (m *PluginCapability_Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginCapability_Service.Marshal(b, m, deterministic) +} +func (dst *PluginCapability_Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginCapability_Service.Merge(dst, src) +} +func (m *PluginCapability_Service) XXX_Size() int { + return xxx_messageInfo_PluginCapability_Service.Size(m) +} +func (m *PluginCapability_Service) XXX_DiscardUnknown() { + xxx_messageInfo_PluginCapability_Service.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginCapability_Service proto.InternalMessageInfo + +func (m *PluginCapability_Service) GetType() PluginCapability_Service_Type { + if m != nil { + return m.Type + } + return PluginCapability_Service_UNKNOWN +} + +type ProbeRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProbeRequest) Reset() { *m = ProbeRequest{} } +func (m *ProbeRequest) String() string { return proto.CompactTextString(m) } +func (*ProbeRequest) ProtoMessage() {} +func (*ProbeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{5} +} +func (m *ProbeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProbeRequest.Unmarshal(m, b) +} +func (m *ProbeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProbeRequest.Marshal(b, m, deterministic) +} +func (dst *ProbeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbeRequest.Merge(dst, src) +} +func (m *ProbeRequest) XXX_Size() int { + return xxx_messageInfo_ProbeRequest.Size(m) +} +func (m *ProbeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ProbeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbeRequest proto.InternalMessageInfo + +type ProbeResponse struct { + // Readiness allows a plugin to report its initialization status back + // to the CO. Initialization for some plugins MAY be time consuming + // and it is important for a CO to distinguish between the following + // cases: + // + // 1) The plugin is in an unhealthy state and MAY need restarting. In + // this case a gRPC error code SHALL be returned. + // 2) The plugin is still initializing, but is otherwise perfectly + // healthy. In this case a successful response SHALL be returned + // with a readiness value of `false`. Calls to the plugin's + // Controller and/or Node services MAY fail due to an incomplete + // initialization state. + // 3) The plugin has finished initializing and is ready to service + // calls to its Controller and/or Node services. A successful + // response is returned with a readiness value of `true`. + // + // This field is OPTIONAL. If not present, the caller SHALL assume + // that the plugin is in a ready state and is accepting calls to its + // Controller and/or Node services (according to the plugin's reported + // capabilities). + Ready *wrappers.BoolValue `protobuf:"bytes,1,opt,name=ready" json:"ready,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProbeResponse) Reset() { *m = ProbeResponse{} } +func (m *ProbeResponse) String() string { return proto.CompactTextString(m) } +func (*ProbeResponse) ProtoMessage() {} +func (*ProbeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{6} +} +func (m *ProbeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProbeResponse.Unmarshal(m, b) +} +func (m *ProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProbeResponse.Marshal(b, m, deterministic) +} +func (dst *ProbeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbeResponse.Merge(dst, src) +} +func (m *ProbeResponse) XXX_Size() int { + return xxx_messageInfo_ProbeResponse.Size(m) +} +func (m *ProbeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ProbeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbeResponse proto.InternalMessageInfo + +func (m *ProbeResponse) GetReady() *wrappers.BoolValue { + if m != nil { + return m.Ready + } + return nil +} + +type CreateVolumeRequest struct { + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. If `CreateVolume` fails, the volume may or may not + // be provisioned. In this case, the CO may call `CreateVolume` + // again, with the same name, to ensure the volume exists. The + // Plugin should ensure that multiple `CreateVolume` calls for the + // same name do not result in more than one piece of storage + // provisioned corresponding to that name. If a Plugin is unable to + // enforce idempotency, the CO's error recovery logic could result + // in multiple (unused) volumes being provisioned. + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + CapacityRange *CapacityRange `protobuf:"bytes,2,opt,name=capacity_range,json=capacityRange" json:"capacity_range,omitempty"` + // The capabilities that the provisioned volume MUST have: the Plugin + // MUST provision a volume that could satisfy ALL of the + // capabilities specified in this list. The Plugin MUST assume that + // the CO MAY use the provisioned volume later with ANY of the + // capabilities specified in this list. This also enables the CO to do + // early validation: if ANY of the specified volume capabilities are + // not supported by the Plugin, the call SHALL fail. This field is + // REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Secrets required by plugin to complete volume creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + ControllerCreateSecrets map[string]string `protobuf:"bytes,5,rep,name=controller_create_secrets,json=controllerCreateSecrets" json:"controller_create_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // If specified, the new volume will be pre-populated with data from + // this source. This field is OPTIONAL. + VolumeContentSource *VolumeContentSource `protobuf:"bytes,6,opt,name=volume_content_source,json=volumeContentSource" json:"volume_content_source,omitempty"` + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume MUST be accessible from. + // An SP SHALL advertise the requirements for topological + // accessibility information in documentation. COs SHALL only specify + // topological accessibility information supported by the SP. + // This field is OPTIONAL. + // This field SHALL NOT be specified unless the SP has the + // ACCESSIBILITY_CONSTRAINTS plugin capability. + // If this field is not specified and the SP has the + // ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY choose + // where the provisioned volume is accessible from. + AccessibilityRequirements *TopologyRequirement `protobuf:"bytes,7,opt,name=accessibility_requirements,json=accessibilityRequirements" json:"accessibility_requirements,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateVolumeRequest) Reset() { *m = CreateVolumeRequest{} } +func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeRequest) ProtoMessage() {} +func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{7} +} +func (m *CreateVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateVolumeRequest.Unmarshal(m, b) +} +func (m *CreateVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *CreateVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateVolumeRequest.Merge(dst, src) +} +func (m *CreateVolumeRequest) XXX_Size() int { + return xxx_messageInfo_CreateVolumeRequest.Size(m) +} +func (m *CreateVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateVolumeRequest proto.InternalMessageInfo + +func (m *CreateVolumeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateVolumeRequest) GetCapacityRange() *CapacityRange { + if m != nil { + return m.CapacityRange + } + return nil +} + +func (m *CreateVolumeRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *CreateVolumeRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *CreateVolumeRequest) GetControllerCreateSecrets() map[string]string { + if m != nil { + return m.ControllerCreateSecrets + } + return nil +} + +func (m *CreateVolumeRequest) GetVolumeContentSource() *VolumeContentSource { + if m != nil { + return m.VolumeContentSource + } + return nil +} + +func (m *CreateVolumeRequest) GetAccessibilityRequirements() *TopologyRequirement { + if m != nil { + return m.AccessibilityRequirements + } + return nil +} + +// Specifies what source the volume will be created from. One of the +// type fields MUST be specified. +type VolumeContentSource struct { + // Types that are valid to be assigned to Type: + // *VolumeContentSource_Snapshot + Type isVolumeContentSource_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeContentSource) Reset() { *m = VolumeContentSource{} } +func (m *VolumeContentSource) String() string { return proto.CompactTextString(m) } +func (*VolumeContentSource) ProtoMessage() {} +func (*VolumeContentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{8} +} +func (m *VolumeContentSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeContentSource.Unmarshal(m, b) +} +func (m *VolumeContentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeContentSource.Marshal(b, m, deterministic) +} +func (dst *VolumeContentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource.Merge(dst, src) +} +func (m *VolumeContentSource) XXX_Size() int { + return xxx_messageInfo_VolumeContentSource.Size(m) +} +func (m *VolumeContentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeContentSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeContentSource proto.InternalMessageInfo + +type isVolumeContentSource_Type interface { + isVolumeContentSource_Type() +} + +type VolumeContentSource_Snapshot struct { + Snapshot *VolumeContentSource_SnapshotSource `protobuf:"bytes,1,opt,name=snapshot,oneof"` +} + +func (*VolumeContentSource_Snapshot) isVolumeContentSource_Type() {} + +func (m *VolumeContentSource) GetType() isVolumeContentSource_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *VolumeContentSource) GetSnapshot() *VolumeContentSource_SnapshotSource { + if x, ok := m.GetType().(*VolumeContentSource_Snapshot); ok { + return x.Snapshot + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*VolumeContentSource) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _VolumeContentSource_OneofMarshaler, _VolumeContentSource_OneofUnmarshaler, _VolumeContentSource_OneofSizer, []interface{}{ + (*VolumeContentSource_Snapshot)(nil), + } +} + +func _VolumeContentSource_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*VolumeContentSource) + // type + switch x := m.Type.(type) { + case *VolumeContentSource_Snapshot: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Snapshot); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("VolumeContentSource.Type has unexpected type %T", x) + } + return nil +} + +func _VolumeContentSource_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*VolumeContentSource) + switch tag { + case 1: // type.snapshot + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeContentSource_SnapshotSource) + err := b.DecodeMessage(msg) + m.Type = &VolumeContentSource_Snapshot{msg} + return true, err + default: + return false, nil + } +} + +func _VolumeContentSource_OneofSizer(msg proto.Message) (n int) { + m := msg.(*VolumeContentSource) + // type + switch x := m.Type.(type) { + case *VolumeContentSource_Snapshot: + s := proto.Size(x.Snapshot) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type VolumeContentSource_SnapshotSource struct { + // Contains identity information for the existing source snapshot. + // This field is REQUIRED. Plugin is REQUIRED to support creating + // volume from snapshot if it supports the capability + // CREATE_DELETE_SNAPSHOT. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeContentSource_SnapshotSource) Reset() { *m = VolumeContentSource_SnapshotSource{} } +func (m *VolumeContentSource_SnapshotSource) String() string { return proto.CompactTextString(m) } +func (*VolumeContentSource_SnapshotSource) ProtoMessage() {} +func (*VolumeContentSource_SnapshotSource) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{8, 0} +} +func (m *VolumeContentSource_SnapshotSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeContentSource_SnapshotSource.Unmarshal(m, b) +} +func (m *VolumeContentSource_SnapshotSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeContentSource_SnapshotSource.Marshal(b, m, deterministic) +} +func (dst *VolumeContentSource_SnapshotSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource_SnapshotSource.Merge(dst, src) +} +func (m *VolumeContentSource_SnapshotSource) XXX_Size() int { + return xxx_messageInfo_VolumeContentSource_SnapshotSource.Size(m) +} +func (m *VolumeContentSource_SnapshotSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeContentSource_SnapshotSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeContentSource_SnapshotSource proto.InternalMessageInfo + +func (m *VolumeContentSource_SnapshotSource) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type CreateVolumeResponse struct { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + Volume *Volume `protobuf:"bytes,1,opt,name=volume" json:"volume,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateVolumeResponse) Reset() { *m = CreateVolumeResponse{} } +func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeResponse) ProtoMessage() {} +func (*CreateVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{9} +} +func (m *CreateVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateVolumeResponse.Unmarshal(m, b) +} +func (m *CreateVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *CreateVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateVolumeResponse.Merge(dst, src) +} +func (m *CreateVolumeResponse) XXX_Size() int { + return xxx_messageInfo_CreateVolumeResponse.Size(m) +} +func (m *CreateVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateVolumeResponse proto.InternalMessageInfo + +func (m *CreateVolumeResponse) GetVolume() *Volume { + if m != nil { + return m.Volume + } + return nil +} + +// Specify a capability of a volume. +type VolumeCapability struct { + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + // + // Types that are valid to be assigned to AccessType: + // *VolumeCapability_Block + // *VolumeCapability_Mount + AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"` + // This is a REQUIRED field. + AccessMode *VolumeCapability_AccessMode `protobuf:"bytes,3,opt,name=access_mode,json=accessMode" json:"access_mode,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability) Reset() { *m = VolumeCapability{} } +func (m *VolumeCapability) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability) ProtoMessage() {} +func (*VolumeCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{10} +} +func (m *VolumeCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability.Unmarshal(m, b) +} +func (m *VolumeCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability.Marshal(b, m, deterministic) +} +func (dst *VolumeCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability.Merge(dst, src) +} +func (m *VolumeCapability) XXX_Size() int { + return xxx_messageInfo_VolumeCapability.Size(m) +} +func (m *VolumeCapability) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability proto.InternalMessageInfo + +type isVolumeCapability_AccessType interface { + isVolumeCapability_AccessType() +} + +type VolumeCapability_Block struct { + Block *VolumeCapability_BlockVolume `protobuf:"bytes,1,opt,name=block,oneof"` +} +type VolumeCapability_Mount struct { + Mount *VolumeCapability_MountVolume `protobuf:"bytes,2,opt,name=mount,oneof"` +} + +func (*VolumeCapability_Block) isVolumeCapability_AccessType() {} +func (*VolumeCapability_Mount) isVolumeCapability_AccessType() {} + +func (m *VolumeCapability) GetAccessType() isVolumeCapability_AccessType { + if m != nil { + return m.AccessType + } + return nil +} + +func (m *VolumeCapability) GetBlock() *VolumeCapability_BlockVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Block); ok { + return x.Block + } + return nil +} + +func (m *VolumeCapability) GetMount() *VolumeCapability_MountVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Mount); ok { + return x.Mount + } + return nil +} + +func (m *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode { + if m != nil { + return m.AccessMode + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*VolumeCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _VolumeCapability_OneofMarshaler, _VolumeCapability_OneofUnmarshaler, _VolumeCapability_OneofSizer, []interface{}{ + (*VolumeCapability_Block)(nil), + (*VolumeCapability_Mount)(nil), + } +} + +func _VolumeCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*VolumeCapability) + // access_type + switch x := m.AccessType.(type) { + case *VolumeCapability_Block: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Block); err != nil { + return err + } + case *VolumeCapability_Mount: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Mount); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("VolumeCapability.AccessType has unexpected type %T", x) + } + return nil +} + +func _VolumeCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*VolumeCapability) + switch tag { + case 1: // access_type.block + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeCapability_BlockVolume) + err := b.DecodeMessage(msg) + m.AccessType = &VolumeCapability_Block{msg} + return true, err + case 2: // access_type.mount + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeCapability_MountVolume) + err := b.DecodeMessage(msg) + m.AccessType = &VolumeCapability_Mount{msg} + return true, err + default: + return false, nil + } +} + +func _VolumeCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*VolumeCapability) + // access_type + switch x := m.AccessType.(type) { + case *VolumeCapability_Block: + s := proto.Size(x.Block) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *VolumeCapability_Mount: + s := proto.Size(x.Mount) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Indicate that the volume will be accessed via the block device API. +type VolumeCapability_BlockVolume struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability_BlockVolume) Reset() { *m = VolumeCapability_BlockVolume{} } +func (m *VolumeCapability_BlockVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_BlockVolume) ProtoMessage() {} +func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{10, 0} +} +func (m *VolumeCapability_BlockVolume) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability_BlockVolume.Unmarshal(m, b) +} +func (m *VolumeCapability_BlockVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability_BlockVolume.Marshal(b, m, deterministic) +} +func (dst *VolumeCapability_BlockVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_BlockVolume.Merge(dst, src) +} +func (m *VolumeCapability_BlockVolume) XXX_Size() int { + return xxx_messageInfo_VolumeCapability_BlockVolume.Size(m) +} +func (m *VolumeCapability_BlockVolume) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability_BlockVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability_BlockVolume proto.InternalMessageInfo + +// Indicate that the volume will be accessed via the filesystem API. +type VolumeCapability_MountVolume struct { + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + FsType string `protobuf:"bytes,1,opt,name=fs_type,json=fsType" json:"fs_type,omitempty"` + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + MountFlags []string `protobuf:"bytes,2,rep,name=mount_flags,json=mountFlags" json:"mount_flags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability_MountVolume) Reset() { *m = VolumeCapability_MountVolume{} } +func (m *VolumeCapability_MountVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_MountVolume) ProtoMessage() {} +func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{10, 1} +} +func (m *VolumeCapability_MountVolume) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability_MountVolume.Unmarshal(m, b) +} +func (m *VolumeCapability_MountVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability_MountVolume.Marshal(b, m, deterministic) +} +func (dst *VolumeCapability_MountVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_MountVolume.Merge(dst, src) +} +func (m *VolumeCapability_MountVolume) XXX_Size() int { + return xxx_messageInfo_VolumeCapability_MountVolume.Size(m) +} +func (m *VolumeCapability_MountVolume) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability_MountVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability_MountVolume proto.InternalMessageInfo + +func (m *VolumeCapability_MountVolume) GetFsType() string { + if m != nil { + return m.FsType + } + return "" +} + +func (m *VolumeCapability_MountVolume) GetMountFlags() []string { + if m != nil { + return m.MountFlags + } + return nil +} + +// Specify how a volume can be accessed. +type VolumeCapability_AccessMode struct { + // This field is REQUIRED. + Mode VolumeCapability_AccessMode_Mode `protobuf:"varint,1,opt,name=mode,enum=csi.v0.VolumeCapability_AccessMode_Mode" json:"mode,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability_AccessMode) Reset() { *m = VolumeCapability_AccessMode{} } +func (m *VolumeCapability_AccessMode) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_AccessMode) ProtoMessage() {} +func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{10, 2} +} +func (m *VolumeCapability_AccessMode) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability_AccessMode.Unmarshal(m, b) +} +func (m *VolumeCapability_AccessMode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability_AccessMode.Marshal(b, m, deterministic) +} +func (dst *VolumeCapability_AccessMode) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_AccessMode.Merge(dst, src) +} +func (m *VolumeCapability_AccessMode) XXX_Size() int { + return xxx_messageInfo_VolumeCapability_AccessMode.Size(m) +} +func (m *VolumeCapability_AccessMode) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability_AccessMode.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability_AccessMode proto.InternalMessageInfo + +func (m *VolumeCapability_AccessMode) GetMode() VolumeCapability_AccessMode_Mode { + if m != nil { + return m.Mode + } + return VolumeCapability_AccessMode_UNKNOWN +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` SHALL be set to the same value. At +// least one of the these fields MUST be specified. +type CapacityRange struct { + // Volume MUST be at least this big. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + RequiredBytes int64 `protobuf:"varint,1,opt,name=required_bytes,json=requiredBytes" json:"required_bytes,omitempty"` + // Volume MUST not be bigger than this. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + LimitBytes int64 `protobuf:"varint,2,opt,name=limit_bytes,json=limitBytes" json:"limit_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CapacityRange) Reset() { *m = CapacityRange{} } +func (m *CapacityRange) String() string { return proto.CompactTextString(m) } +func (*CapacityRange) ProtoMessage() {} +func (*CapacityRange) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{11} +} +func (m *CapacityRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CapacityRange.Unmarshal(m, b) +} +func (m *CapacityRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CapacityRange.Marshal(b, m, deterministic) +} +func (dst *CapacityRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapacityRange.Merge(dst, src) +} +func (m *CapacityRange) XXX_Size() int { + return xxx_messageInfo_CapacityRange.Size(m) +} +func (m *CapacityRange) XXX_DiscardUnknown() { + xxx_messageInfo_CapacityRange.DiscardUnknown(m) +} + +var xxx_messageInfo_CapacityRange proto.InternalMessageInfo + +func (m *CapacityRange) GetRequiredBytes() int64 { + if m != nil { + return m.RequiredBytes + } + return 0 +} + +func (m *CapacityRange) GetLimitBytes() int64 { + if m != nil { + return m.LimitBytes + } + return 0 +} + +// The information about a provisioned volume. +type Volume struct { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set (value of 0), it indicates that the capacity of the volume is + // unknown (e.g., NFS share). + // The value of this field MUST NOT be negative. + CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes" json:"capacity_bytes,omitempty"` + // Contains identity information for the created volume. This field is + // REQUIRED. The identity information will be used by the CO in + // subsequent calls to refer to the provisioned volume. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // Attributes reflect static properties of a volume and MUST be passed + // to volume validation and publishing calls. + // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable + // and SHALL be safe for the CO to cache. Attributes SHOULD NOT + // contain sensitive information. Attributes MAY NOT uniquely identify + // a volume. A volume uniquely identified by `id` SHALL always report + // the same attributes. This field is OPTIONAL and when present MUST + // be passed to volume validation and publishing calls. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // If specified, indicates that the volume is not empty and is + // pre-populated with data from the specified source. + // This field is OPTIONAL. + ContentSource *VolumeContentSource `protobuf:"bytes,4,opt,name=content_source,json=contentSource" json:"content_source,omitempty"` + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume is accessible from. + // A plugin that returns this field MUST also set the + // ACCESSIBILITY_CONSTRAINTS plugin capability. + // An SP MAY specify multiple topologies to indicate the volume is + // accessible from multiple locations. + // COs MAY use this information along with the topology information + // returned by NodeGetInfo to ensure that a given volume is accessible + // from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the volume is equally accessible from all nodes in the cluster and + // may schedule workloads referencing the volume on any available + // node. + // + // Example 1: + // accessible_topology = {"region": "R1", "zone": "Z2"} + // Indicates a volume accessible only from the "region" "R1" and the + // "zone" "Z2". + // + // Example 2: + // accessible_topology = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" + // in the "region" "R1". + AccessibleTopology []*Topology `protobuf:"bytes,5,rep,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Volume) Reset() { *m = Volume{} } +func (m *Volume) String() string { return proto.CompactTextString(m) } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{12} +} +func (m *Volume) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Volume.Unmarshal(m, b) +} +func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Volume.Marshal(b, m, deterministic) +} +func (dst *Volume) XXX_Merge(src proto.Message) { + xxx_messageInfo_Volume.Merge(dst, src) +} +func (m *Volume) XXX_Size() int { + return xxx_messageInfo_Volume.Size(m) +} +func (m *Volume) XXX_DiscardUnknown() { + xxx_messageInfo_Volume.DiscardUnknown(m) +} + +var xxx_messageInfo_Volume proto.InternalMessageInfo + +func (m *Volume) GetCapacityBytes() int64 { + if m != nil { + return m.CapacityBytes + } + return 0 +} + +func (m *Volume) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Volume) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Volume) GetContentSource() *VolumeContentSource { + if m != nil { + return m.ContentSource + } + return nil +} + +func (m *Volume) GetAccessibleTopology() []*Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +type TopologyRequirement struct { + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, than the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, than the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + Requisite []*Topology `protobuf:"bytes,1,rep,name=requisite" json:"requisite,omitempty"` + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + Preferred []*Topology `protobuf:"bytes,2,rep,name=preferred" json:"preferred,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TopologyRequirement) Reset() { *m = TopologyRequirement{} } +func (m *TopologyRequirement) String() string { return proto.CompactTextString(m) } +func (*TopologyRequirement) ProtoMessage() {} +func (*TopologyRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{13} +} +func (m *TopologyRequirement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TopologyRequirement.Unmarshal(m, b) +} +func (m *TopologyRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TopologyRequirement.Marshal(b, m, deterministic) +} +func (dst *TopologyRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologyRequirement.Merge(dst, src) +} +func (m *TopologyRequirement) XXX_Size() int { + return xxx_messageInfo_TopologyRequirement.Size(m) +} +func (m *TopologyRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_TopologyRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologyRequirement proto.InternalMessageInfo + +func (m *TopologyRequirement) GetRequisite() []*Topology { + if m != nil { + return m.Requisite + } + return nil +} + +func (m *TopologyRequirement) GetPreferred() []*Topology { + if m != nil { + return m.Preferred + } + return nil +} + +// Topology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an optional prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is required. The prefix is optional. +// Both the key name and the prefix MUST each be 63 characters or less, +// begin and end with an alphanumeric character ([a-z0-9A-Z]) and +// contain only dashes (-), underscores (_), dots (.), or alphanumerics +// in between, for example "zone". +// The key prefix MUST follow reverse domain name notation format +// (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string `protobuf:"bytes,1,rep,name=segments" json:"segments,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Topology) Reset() { *m = Topology{} } +func (m *Topology) String() string { return proto.CompactTextString(m) } +func (*Topology) ProtoMessage() {} +func (*Topology) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{14} +} +func (m *Topology) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Topology.Unmarshal(m, b) +} +func (m *Topology) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Topology.Marshal(b, m, deterministic) +} +func (dst *Topology) XXX_Merge(src proto.Message) { + xxx_messageInfo_Topology.Merge(dst, src) +} +func (m *Topology) XXX_Size() int { + return xxx_messageInfo_Topology.Size(m) +} +func (m *Topology) XXX_DiscardUnknown() { + xxx_messageInfo_Topology.DiscardUnknown(m) +} + +var xxx_messageInfo_Topology proto.InternalMessageInfo + +func (m *Topology) GetSegments() map[string]string { + if m != nil { + return m.Segments + } + return nil +} + +type DeleteVolumeRequest struct { + // The ID of the volume to be deprovisioned. + // This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // Secrets required by plugin to complete volume deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + ControllerDeleteSecrets map[string]string `protobuf:"bytes,2,rep,name=controller_delete_secrets,json=controllerDeleteSecrets" json:"controller_delete_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteVolumeRequest) Reset() { *m = DeleteVolumeRequest{} } +func (m *DeleteVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeRequest) ProtoMessage() {} +func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{15} +} +func (m *DeleteVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteVolumeRequest.Unmarshal(m, b) +} +func (m *DeleteVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteVolumeRequest.Merge(dst, src) +} +func (m *DeleteVolumeRequest) XXX_Size() int { + return xxx_messageInfo_DeleteVolumeRequest.Size(m) +} +func (m *DeleteVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteVolumeRequest proto.InternalMessageInfo + +func (m *DeleteVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *DeleteVolumeRequest) GetControllerDeleteSecrets() map[string]string { + if m != nil { + return m.ControllerDeleteSecrets + } + return nil +} + +type DeleteVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteVolumeResponse) Reset() { *m = DeleteVolumeResponse{} } +func (m *DeleteVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeResponse) ProtoMessage() {} +func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{16} +} +func (m *DeleteVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteVolumeResponse.Unmarshal(m, b) +} +func (m *DeleteVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *DeleteVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteVolumeResponse.Merge(dst, src) +} +func (m *DeleteVolumeResponse) XXX_Size() int { + return xxx_messageInfo_DeleteVolumeResponse.Size(m) +} +func (m *DeleteVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteVolumeResponse proto.InternalMessageInfo + +type ControllerPublishVolumeRequest struct { + // The ID of the volume to be used on a node. + // This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The ID of the node. This field is REQUIRED. The CO SHALL set this + // field to match the node ID returned by `NodeGetInfo`. + NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,3,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + Readonly bool `protobuf:"varint,4,opt,name=readonly" json:"readonly,omitempty"` + // Secrets required by plugin to complete controller publish volume + // request. This field is OPTIONAL. Refer to the + // `Secrets Requirements` section on how to use this field. + ControllerPublishSecrets map[string]string `protobuf:"bytes,5,rep,name=controller_publish_secrets,json=controllerPublishSecrets" json:"controller_publish_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Attributes of the volume to be used on a node. This field is + // OPTIONAL and MUST match the attributes of the Volume identified + // by `volume_id`. + VolumeAttributes map[string]string `protobuf:"bytes,6,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerPublishVolumeRequest) Reset() { *m = ControllerPublishVolumeRequest{} } +func (m *ControllerPublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeRequest) ProtoMessage() {} +func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{17} +} +func (m *ControllerPublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerPublishVolumeRequest.Unmarshal(m, b) +} +func (m *ControllerPublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerPublishVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *ControllerPublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerPublishVolumeRequest.Merge(dst, src) +} +func (m *ControllerPublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_ControllerPublishVolumeRequest.Size(m) +} +func (m *ControllerPublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerPublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerPublishVolumeRequest proto.InternalMessageInfo + +func (m *ControllerPublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerPublishVolumeRequest) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *ControllerPublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *ControllerPublishVolumeRequest) GetControllerPublishSecrets() map[string]string { + if m != nil { + return m.ControllerPublishSecrets + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetVolumeAttributes() map[string]string { + if m != nil { + return m.VolumeAttributes + } + return nil +} + +type ControllerPublishVolumeResponse struct { + // The SP specific information that will be passed to the Plugin in + // the subsequent `NodeStageVolume` or `NodePublishVolume` calls + // for the given volume. + // This information is opaque to the CO. This field is OPTIONAL. + PublishInfo map[string]string `protobuf:"bytes,1,rep,name=publish_info,json=publishInfo" json:"publish_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerPublishVolumeResponse) Reset() { *m = ControllerPublishVolumeResponse{} } +func (m *ControllerPublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeResponse) ProtoMessage() {} +func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{18} +} +func (m *ControllerPublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerPublishVolumeResponse.Unmarshal(m, b) +} +func (m *ControllerPublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerPublishVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *ControllerPublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerPublishVolumeResponse.Merge(dst, src) +} +func (m *ControllerPublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_ControllerPublishVolumeResponse.Size(m) +} +func (m *ControllerPublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerPublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerPublishVolumeResponse proto.InternalMessageInfo + +func (m *ControllerPublishVolumeResponse) GetPublishInfo() map[string]string { + if m != nil { + return m.PublishInfo + } + return nil +} + +type ControllerUnpublishVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The ID of the node. This field is OPTIONAL. The CO SHOULD set this + // field to match the node ID returned by `NodeGetInfo` or leave it + // unset. If the value is set, the SP MUST unpublish the volume from + // the specified node. If the value is unset, the SP MUST unpublish + // the volume from all nodes it is published to. + NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + // Secrets required by plugin to complete controller unpublish volume + // request. This SHOULD be the same secrets passed to the + // ControllerPublishVolume call for the specified volume. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + ControllerUnpublishSecrets map[string]string `protobuf:"bytes,3,rep,name=controller_unpublish_secrets,json=controllerUnpublishSecrets" json:"controller_unpublish_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerUnpublishVolumeRequest) Reset() { *m = ControllerUnpublishVolumeRequest{} } +func (m *ControllerUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeRequest) ProtoMessage() {} +func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{19} +} +func (m *ControllerUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerUnpublishVolumeRequest.Unmarshal(m, b) +} +func (m *ControllerUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerUnpublishVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *ControllerUnpublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerUnpublishVolumeRequest.Merge(dst, src) +} +func (m *ControllerUnpublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_ControllerUnpublishVolumeRequest.Size(m) +} +func (m *ControllerUnpublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerUnpublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerUnpublishVolumeRequest proto.InternalMessageInfo + +func (m *ControllerUnpublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerUnpublishVolumeRequest) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *ControllerUnpublishVolumeRequest) GetControllerUnpublishSecrets() map[string]string { + if m != nil { + return m.ControllerUnpublishSecrets + } + return nil +} + +type ControllerUnpublishVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerUnpublishVolumeResponse) Reset() { *m = ControllerUnpublishVolumeResponse{} } +func (m *ControllerUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeResponse) ProtoMessage() {} +func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{20} +} +func (m *ControllerUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerUnpublishVolumeResponse.Unmarshal(m, b) +} +func (m *ControllerUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerUnpublishVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *ControllerUnpublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerUnpublishVolumeResponse.Merge(dst, src) +} +func (m *ControllerUnpublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_ControllerUnpublishVolumeResponse.Size(m) +} +func (m *ControllerUnpublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerUnpublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerUnpublishVolumeResponse proto.InternalMessageInfo + +type ValidateVolumeCapabilitiesRequest struct { + // The ID of the volume to check. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "supported" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,2,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` + // Attributes of the volume to check. This field is OPTIONAL and MUST + // match the attributes of the Volume identified by `volume_id`. + VolumeAttributes map[string]string `protobuf:"bytes,3,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Specifies where (regions, zones, racks, etc.) the caller believes + // the volume is accessible from. + // A caller MAY specify multiple topologies to indicate they believe + // the volume to be accessible from multiple locations. + // This field is OPTIONAL. This field SHALL NOT be set unless the + // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. + AccessibleTopology []*Topology `protobuf:"bytes,4,rep,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateVolumeCapabilitiesRequest) Reset() { *m = ValidateVolumeCapabilitiesRequest{} } +func (m *ValidateVolumeCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesRequest) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{21} +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Unmarshal(m, b) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (dst *ValidateVolumeCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Merge(dst, src) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Size(m) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateVolumeCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateVolumeCapabilitiesRequest proto.InternalMessageInfo + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeAttributes() map[string]string { + if m != nil { + return m.VolumeAttributes + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetAccessibleTopology() []*Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +type ValidateVolumeCapabilitiesResponse struct { + // True if the Plugin supports the specified capabilities for the + // given volume. This field is REQUIRED. + Supported bool `protobuf:"varint,1,opt,name=supported" json:"supported,omitempty"` + // Message to the CO if `supported` above is false. This field is + // OPTIONAL. + // An empty string is equal to an unspecified field value. + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateVolumeCapabilitiesResponse) Reset() { *m = ValidateVolumeCapabilitiesResponse{} } +func (m *ValidateVolumeCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesResponse) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{22} +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Unmarshal(m, b) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (dst *ValidateVolumeCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Merge(dst, src) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Size(m) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateVolumeCapabilitiesResponse proto.InternalMessageInfo + +func (m *ValidateVolumeCapabilitiesResponse) GetSupported() bool { + if m != nil { + return m.Supported + } + return false +} + +func (m *ValidateVolumeCapabilitiesResponse) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +type ListVolumesRequest struct { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListVolumes` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries" json:"max_entries,omitempty"` + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken" json:"starting_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesRequest) Reset() { *m = ListVolumesRequest{} } +func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) } +func (*ListVolumesRequest) ProtoMessage() {} +func (*ListVolumesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{23} +} +func (m *ListVolumesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesRequest.Unmarshal(m, b) +} +func (m *ListVolumesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesRequest.Marshal(b, m, deterministic) +} +func (dst *ListVolumesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesRequest.Merge(dst, src) +} +func (m *ListVolumesRequest) XXX_Size() int { + return xxx_messageInfo_ListVolumesRequest.Size(m) +} +func (m *ListVolumesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesRequest proto.InternalMessageInfo + +func (m *ListVolumesRequest) GetMaxEntries() int32 { + if m != nil { + return m.MaxEntries + } + return 0 +} + +func (m *ListVolumesRequest) GetStartingToken() string { + if m != nil { + return m.StartingToken + } + return "" +} + +type ListVolumesResponse struct { + Entries []*ListVolumesResponse_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken" json:"next_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesResponse) Reset() { *m = ListVolumesResponse{} } +func (m *ListVolumesResponse) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse) ProtoMessage() {} +func (*ListVolumesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{24} +} +func (m *ListVolumesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesResponse.Unmarshal(m, b) +} +func (m *ListVolumesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesResponse.Marshal(b, m, deterministic) +} +func (dst *ListVolumesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesResponse.Merge(dst, src) +} +func (m *ListVolumesResponse) XXX_Size() int { + return xxx_messageInfo_ListVolumesResponse.Size(m) +} +func (m *ListVolumesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesResponse proto.InternalMessageInfo + +func (m *ListVolumesResponse) GetEntries() []*ListVolumesResponse_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *ListVolumesResponse) GetNextToken() string { + if m != nil { + return m.NextToken + } + return "" +} + +type ListVolumesResponse_Entry struct { + Volume *Volume `protobuf:"bytes,1,opt,name=volume" json:"volume,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesResponse_Entry) Reset() { *m = ListVolumesResponse_Entry{} } +func (m *ListVolumesResponse_Entry) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse_Entry) ProtoMessage() {} +func (*ListVolumesResponse_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{24, 0} +} +func (m *ListVolumesResponse_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesResponse_Entry.Unmarshal(m, b) +} +func (m *ListVolumesResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesResponse_Entry.Marshal(b, m, deterministic) +} +func (dst *ListVolumesResponse_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesResponse_Entry.Merge(dst, src) +} +func (m *ListVolumesResponse_Entry) XXX_Size() int { + return xxx_messageInfo_ListVolumesResponse_Entry.Size(m) +} +func (m *ListVolumesResponse_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesResponse_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesResponse_Entry proto.InternalMessageInfo + +func (m *ListVolumesResponse_Entry) GetVolume() *Volume { + if m != nil { + return m.Volume + } + return nil +} + +type GetCapacityRequest struct { + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,1,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that in the specified + // `accessible_topology`. This is the same as the + // `accessible_topology` the CO returns in a `CreateVolumeResponse`. + // This field is OPTIONAL. This field SHALL NOT be set unless the + // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability. + AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCapacityRequest) Reset() { *m = GetCapacityRequest{} } +func (m *GetCapacityRequest) String() string { return proto.CompactTextString(m) } +func (*GetCapacityRequest) ProtoMessage() {} +func (*GetCapacityRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{25} +} +func (m *GetCapacityRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetCapacityRequest.Unmarshal(m, b) +} +func (m *GetCapacityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetCapacityRequest.Marshal(b, m, deterministic) +} +func (dst *GetCapacityRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCapacityRequest.Merge(dst, src) +} +func (m *GetCapacityRequest) XXX_Size() int { + return xxx_messageInfo_GetCapacityRequest.Size(m) +} +func (m *GetCapacityRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetCapacityRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCapacityRequest proto.InternalMessageInfo + +func (m *GetCapacityRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *GetCapacityRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *GetCapacityRequest) GetAccessibleTopology() *Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +type GetCapacityResponse struct { + // The available capacity, in bytes, of the storage that can be used + // to provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + // The value of this field MUST NOT be negative. + AvailableCapacity int64 `protobuf:"varint,1,opt,name=available_capacity,json=availableCapacity" json:"available_capacity,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCapacityResponse) Reset() { *m = GetCapacityResponse{} } +func (m *GetCapacityResponse) String() string { return proto.CompactTextString(m) } +func (*GetCapacityResponse) ProtoMessage() {} +func (*GetCapacityResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{26} +} +func (m *GetCapacityResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetCapacityResponse.Unmarshal(m, b) +} +func (m *GetCapacityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetCapacityResponse.Marshal(b, m, deterministic) +} +func (dst *GetCapacityResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCapacityResponse.Merge(dst, src) +} +func (m *GetCapacityResponse) XXX_Size() int { + return xxx_messageInfo_GetCapacityResponse.Size(m) +} +func (m *GetCapacityResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetCapacityResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCapacityResponse proto.InternalMessageInfo + +func (m *GetCapacityResponse) GetAvailableCapacity() int64 { + if m != nil { + return m.AvailableCapacity + } + return 0 +} + +type ControllerGetCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerGetCapabilitiesRequest) Reset() { *m = ControllerGetCapabilitiesRequest{} } +func (m *ControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesRequest) ProtoMessage() {} +func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{27} +} +func (m *ControllerGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerGetCapabilitiesRequest.Unmarshal(m, b) +} +func (m *ControllerGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerGetCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (dst *ControllerGetCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerGetCapabilitiesRequest.Merge(dst, src) +} +func (m *ControllerGetCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_ControllerGetCapabilitiesRequest.Size(m) +} +func (m *ControllerGetCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerGetCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerGetCapabilitiesRequest proto.InternalMessageInfo + +type ControllerGetCapabilitiesResponse struct { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + Capabilities []*ControllerServiceCapability `protobuf:"bytes,2,rep,name=capabilities" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerGetCapabilitiesResponse) Reset() { *m = ControllerGetCapabilitiesResponse{} } +func (m *ControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesResponse) ProtoMessage() {} +func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{28} +} +func (m *ControllerGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerGetCapabilitiesResponse.Unmarshal(m, b) +} +func (m *ControllerGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerGetCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (dst *ControllerGetCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerGetCapabilitiesResponse.Merge(dst, src) +} +func (m *ControllerGetCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_ControllerGetCapabilitiesResponse.Size(m) +} +func (m *ControllerGetCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerGetCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerGetCapabilitiesResponse proto.InternalMessageInfo + +func (m *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the controller service. +type ControllerServiceCapability struct { + // Types that are valid to be assigned to Type: + // *ControllerServiceCapability_Rpc + Type isControllerServiceCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerServiceCapability) Reset() { *m = ControllerServiceCapability{} } +func (m *ControllerServiceCapability) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability) ProtoMessage() {} +func (*ControllerServiceCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{29} +} +func (m *ControllerServiceCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerServiceCapability.Unmarshal(m, b) +} +func (m *ControllerServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerServiceCapability.Marshal(b, m, deterministic) +} +func (dst *ControllerServiceCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerServiceCapability.Merge(dst, src) +} +func (m *ControllerServiceCapability) XXX_Size() int { + return xxx_messageInfo_ControllerServiceCapability.Size(m) +} +func (m *ControllerServiceCapability) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerServiceCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerServiceCapability proto.InternalMessageInfo + +type isControllerServiceCapability_Type interface { + isControllerServiceCapability_Type() +} + +type ControllerServiceCapability_Rpc struct { + Rpc *ControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` +} + +func (*ControllerServiceCapability_Rpc) isControllerServiceCapability_Type() {} + +func (m *ControllerServiceCapability) GetType() isControllerServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC { + if x, ok := m.GetType().(*ControllerServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ControllerServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ControllerServiceCapability_OneofMarshaler, _ControllerServiceCapability_OneofUnmarshaler, _ControllerServiceCapability_OneofSizer, []interface{}{ + (*ControllerServiceCapability_Rpc)(nil), + } +} + +func _ControllerServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ControllerServiceCapability) + // type + switch x := m.Type.(type) { + case *ControllerServiceCapability_Rpc: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Rpc); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ControllerServiceCapability.Type has unexpected type %T", x) + } + return nil +} + +func _ControllerServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ControllerServiceCapability) + switch tag { + case 1: // type.rpc + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ControllerServiceCapability_RPC) + err := b.DecodeMessage(msg) + m.Type = &ControllerServiceCapability_Rpc{msg} + return true, err + default: + return false, nil + } +} + +func _ControllerServiceCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ControllerServiceCapability) + // type + switch x := m.Type.(type) { + case *ControllerServiceCapability_Rpc: + s := proto.Size(x.Rpc) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ControllerServiceCapability_RPC struct { + Type ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.ControllerServiceCapability_RPC_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerServiceCapability_RPC) Reset() { *m = ControllerServiceCapability_RPC{} } +func (m *ControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability_RPC) ProtoMessage() {} +func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{29, 0} +} +func (m *ControllerServiceCapability_RPC) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerServiceCapability_RPC.Unmarshal(m, b) +} +func (m *ControllerServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerServiceCapability_RPC.Marshal(b, m, deterministic) +} +func (dst *ControllerServiceCapability_RPC) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerServiceCapability_RPC.Merge(dst, src) +} +func (m *ControllerServiceCapability_RPC) XXX_Size() int { + return xxx_messageInfo_ControllerServiceCapability_RPC.Size(m) +} +func (m *ControllerServiceCapability_RPC) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerServiceCapability_RPC.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerServiceCapability_RPC proto.InternalMessageInfo + +func (m *ControllerServiceCapability_RPC) GetType() ControllerServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return ControllerServiceCapability_RPC_UNKNOWN +} + +type CreateSnapshotRequest struct { + // The ID of the source volume to be snapshotted. + // This field is REQUIRED. + SourceVolumeId string `protobuf:"bytes,1,opt,name=source_volume_id,json=sourceVolumeId" json:"source_volume_id,omitempty"` + // The suggested name for the snapshot. This field is REQUIRED for + // idempotency. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // Secrets required by plugin to complete snapshot creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + CreateSnapshotSecrets map[string]string `protobuf:"bytes,3,rep,name=create_snapshot_secrets,json=createSnapshotSecrets" json:"create_snapshot_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + // Use cases for opaque parameters: + // - Specify a policy to automatically clean up the snapshot. + // - Specify an expiration date for the snapshot. + // - Specify whether the snapshot is readonly or read/write. + // - Specify if the snapshot should be replicated to some place. + // - Specify primary or secondary for replication systems that + // support snapshotting only on primary. + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSnapshotRequest) Reset() { *m = CreateSnapshotRequest{} } +func (m *CreateSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSnapshotRequest) ProtoMessage() {} +func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{30} +} +func (m *CreateSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSnapshotRequest.Unmarshal(m, b) +} +func (m *CreateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSnapshotRequest.Marshal(b, m, deterministic) +} +func (dst *CreateSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSnapshotRequest.Merge(dst, src) +} +func (m *CreateSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_CreateSnapshotRequest.Size(m) +} +func (m *CreateSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSnapshotRequest proto.InternalMessageInfo + +func (m *CreateSnapshotRequest) GetSourceVolumeId() string { + if m != nil { + return m.SourceVolumeId + } + return "" +} + +func (m *CreateSnapshotRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateSnapshotRequest) GetCreateSnapshotSecrets() map[string]string { + if m != nil { + return m.CreateSnapshotSecrets + } + return nil +} + +func (m *CreateSnapshotRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +type CreateSnapshotResponse struct { + // Contains all attributes of the newly created snapshot that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the snapshot. This field is REQUIRED. + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSnapshotResponse) Reset() { *m = CreateSnapshotResponse{} } +func (m *CreateSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*CreateSnapshotResponse) ProtoMessage() {} +func (*CreateSnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{31} +} +func (m *CreateSnapshotResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSnapshotResponse.Unmarshal(m, b) +} +func (m *CreateSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSnapshotResponse.Marshal(b, m, deterministic) +} +func (dst *CreateSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSnapshotResponse.Merge(dst, src) +} +func (m *CreateSnapshotResponse) XXX_Size() int { + return xxx_messageInfo_CreateSnapshotResponse.Size(m) +} +func (m *CreateSnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSnapshotResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSnapshotResponse proto.InternalMessageInfo + +func (m *CreateSnapshotResponse) GetSnapshot() *Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +// The information about a provisioned snapshot. +type Snapshot struct { + // This is the complete size of the snapshot in bytes. The purpose of + // this field is to give CO guidance on how much space is needed to + // create a volume from this snapshot. The size of the volume MUST NOT + // be less than the size of the source snapshot. This field is + // OPTIONAL. If this field is not set, it indicates that this size is + // unknown. The value of this field MUST NOT be negative and a size of + // zero means it is unspecified. + SizeBytes int64 `protobuf:"varint,1,opt,name=size_bytes,json=sizeBytes" json:"size_bytes,omitempty"` + // Uniquely identifies a snapshot and is generated by the plugin. It + // will not change over time. This field is REQUIRED. The identity + // information will be used by the CO in subsequent calls to refer to + // the provisioned snapshot. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // Identity information for the source volume. Note that creating a + // snapshot from a snapshot is not supported here so the source has to + // be a volume. This field is REQUIRED. + SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId" json:"source_volume_id,omitempty"` + // Timestamp when the point-in-time snapshot is taken on the storage + // system. The format of this field should be a Unix nanoseconds time + // encoded as an int64. On Unix, the command `date +%s%N` returns the + // current time in nanoseconds since 1970-01-01 00:00:00 UTC. This + // field is REQUIRED. + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` + // The status of a snapshot. + Status *SnapshotStatus `protobuf:"bytes,5,opt,name=status" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{32} +} +func (m *Snapshot) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Snapshot.Unmarshal(m, b) +} +func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) +} +func (dst *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(dst, src) +} +func (m *Snapshot) XXX_Size() int { + return xxx_messageInfo_Snapshot.Size(m) +} +func (m *Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_Snapshot proto.InternalMessageInfo + +func (m *Snapshot) GetSizeBytes() int64 { + if m != nil { + return m.SizeBytes + } + return 0 +} + +func (m *Snapshot) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Snapshot) GetSourceVolumeId() string { + if m != nil { + return m.SourceVolumeId + } + return "" +} + +func (m *Snapshot) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *Snapshot) GetStatus() *SnapshotStatus { + if m != nil { + return m.Status + } + return nil +} + +// The status of a snapshot. +type SnapshotStatus struct { + // This field is REQUIRED. + Type SnapshotStatus_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.SnapshotStatus_Type" json:"type,omitempty"` + // Additional information to describe why a snapshot ended up in the + // `ERROR_UPLOADING` status. This field is OPTIONAL. + Details string `protobuf:"bytes,2,opt,name=details" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SnapshotStatus) Reset() { *m = SnapshotStatus{} } +func (m *SnapshotStatus) String() string { return proto.CompactTextString(m) } +func (*SnapshotStatus) ProtoMessage() {} +func (*SnapshotStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{33} +} +func (m *SnapshotStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SnapshotStatus.Unmarshal(m, b) +} +func (m *SnapshotStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SnapshotStatus.Marshal(b, m, deterministic) +} +func (dst *SnapshotStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotStatus.Merge(dst, src) +} +func (m *SnapshotStatus) XXX_Size() int { + return xxx_messageInfo_SnapshotStatus.Size(m) +} +func (m *SnapshotStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapshotStatus proto.InternalMessageInfo + +func (m *SnapshotStatus) GetType() SnapshotStatus_Type { + if m != nil { + return m.Type + } + return SnapshotStatus_UNKNOWN +} + +func (m *SnapshotStatus) GetDetails() string { + if m != nil { + return m.Details + } + return "" +} + +type DeleteSnapshotRequest struct { + // The ID of the snapshot to be deleted. + // This field is REQUIRED. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId" json:"snapshot_id,omitempty"` + // Secrets required by plugin to complete snapshot deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + DeleteSnapshotSecrets map[string]string `protobuf:"bytes,2,rep,name=delete_snapshot_secrets,json=deleteSnapshotSecrets" json:"delete_snapshot_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSnapshotRequest) Reset() { *m = DeleteSnapshotRequest{} } +func (m *DeleteSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSnapshotRequest) ProtoMessage() {} +func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{34} +} +func (m *DeleteSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteSnapshotRequest.Unmarshal(m, b) +} +func (m *DeleteSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteSnapshotRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSnapshotRequest.Merge(dst, src) +} +func (m *DeleteSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_DeleteSnapshotRequest.Size(m) +} +func (m *DeleteSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSnapshotRequest proto.InternalMessageInfo + +func (m *DeleteSnapshotRequest) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +func (m *DeleteSnapshotRequest) GetDeleteSnapshotSecrets() map[string]string { + if m != nil { + return m.DeleteSnapshotSecrets + } + return nil +} + +type DeleteSnapshotResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSnapshotResponse) Reset() { *m = DeleteSnapshotResponse{} } +func (m *DeleteSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteSnapshotResponse) ProtoMessage() {} +func (*DeleteSnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{35} +} +func (m *DeleteSnapshotResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteSnapshotResponse.Unmarshal(m, b) +} +func (m *DeleteSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteSnapshotResponse.Marshal(b, m, deterministic) +} +func (dst *DeleteSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSnapshotResponse.Merge(dst, src) +} +func (m *DeleteSnapshotResponse) XXX_Size() int { + return xxx_messageInfo_DeleteSnapshotResponse.Size(m) +} +func (m *DeleteSnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSnapshotResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSnapshotResponse proto.InternalMessageInfo + +// List all snapshots on the storage system regardless of how they were +// created. +type ListSnapshotsRequest struct { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries" json:"max_entries,omitempty"` + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListSnapshots` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken" json:"starting_token,omitempty"` + // Identity information for the source volume. This field is OPTIONAL. + // It can be used to list snapshots by volume. + SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId" json:"source_volume_id,omitempty"` + // Identity information for a specific snapshot. This field is + // OPTIONAL. It can be used to list only a specific snapshot. + // ListSnapshots will return with current snapshot information + // and will not block if the snapshot is being uploaded. + SnapshotId string `protobuf:"bytes,4,opt,name=snapshot_id,json=snapshotId" json:"snapshot_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsRequest) Reset() { *m = ListSnapshotsRequest{} } +func (m *ListSnapshotsRequest) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsRequest) ProtoMessage() {} +func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{36} +} +func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsRequest.Unmarshal(m, b) +} +func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic) +} +func (dst *ListSnapshotsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsRequest.Merge(dst, src) +} +func (m *ListSnapshotsRequest) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsRequest.Size(m) +} +func (m *ListSnapshotsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsRequest proto.InternalMessageInfo + +func (m *ListSnapshotsRequest) GetMaxEntries() int32 { + if m != nil { + return m.MaxEntries + } + return 0 +} + +func (m *ListSnapshotsRequest) GetStartingToken() string { + if m != nil { + return m.StartingToken + } + return "" +} + +func (m *ListSnapshotsRequest) GetSourceVolumeId() string { + if m != nil { + return m.SourceVolumeId + } + return "" +} + +func (m *ListSnapshotsRequest) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +type ListSnapshotsResponse struct { + Entries []*ListSnapshotsResponse_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + // This token allows you to get the next page of entries for + // `ListSnapshots` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListSnapshots` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken" json:"next_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsResponse) Reset() { *m = ListSnapshotsResponse{} } +func (m *ListSnapshotsResponse) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsResponse) ProtoMessage() {} +func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{37} +} +func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsResponse.Unmarshal(m, b) +} +func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic) +} +func (dst *ListSnapshotsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsResponse.Merge(dst, src) +} +func (m *ListSnapshotsResponse) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsResponse.Size(m) +} +func (m *ListSnapshotsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsResponse proto.InternalMessageInfo + +func (m *ListSnapshotsResponse) GetEntries() []*ListSnapshotsResponse_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *ListSnapshotsResponse) GetNextToken() string { + if m != nil { + return m.NextToken + } + return "" +} + +type ListSnapshotsResponse_Entry struct { + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsResponse_Entry) Reset() { *m = ListSnapshotsResponse_Entry{} } +func (m *ListSnapshotsResponse_Entry) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsResponse_Entry) ProtoMessage() {} +func (*ListSnapshotsResponse_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{37, 0} +} +func (m *ListSnapshotsResponse_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsResponse_Entry.Unmarshal(m, b) +} +func (m *ListSnapshotsResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsResponse_Entry.Marshal(b, m, deterministic) +} +func (dst *ListSnapshotsResponse_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsResponse_Entry.Merge(dst, src) +} +func (m *ListSnapshotsResponse_Entry) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsResponse_Entry.Size(m) +} +func (m *ListSnapshotsResponse_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsResponse_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsResponse_Entry proto.InternalMessageInfo + +func (m *ListSnapshotsResponse_Entry) GetSnapshot() *Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type NodeStageVolumeRequest struct { + // The ID of the volume to publish. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + PublishInfo map[string]string `protobuf:"bytes,2,rep,name=publish_info,json=publishInfo" json:"publish_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure that there is only one + // staging_target_path per volume. + // This is a REQUIRED field. + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath" json:"staging_target_path,omitempty"` + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` + // Secrets required by plugin to complete node stage volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + NodeStageSecrets map[string]string `protobuf:"bytes,5,rep,name=node_stage_secrets,json=nodeStageSecrets" json:"node_stage_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Attributes of the volume to publish. This field is OPTIONAL and + // MUST match the attributes of the `Volume` identified by + // `volume_id`. + VolumeAttributes map[string]string `protobuf:"bytes,6,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeStageVolumeRequest) Reset() { *m = NodeStageVolumeRequest{} } +func (m *NodeStageVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeStageVolumeRequest) ProtoMessage() {} +func (*NodeStageVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{38} +} +func (m *NodeStageVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeStageVolumeRequest.Unmarshal(m, b) +} +func (m *NodeStageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeStageVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *NodeStageVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStageVolumeRequest.Merge(dst, src) +} +func (m *NodeStageVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeStageVolumeRequest.Size(m) +} +func (m *NodeStageVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStageVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeStageVolumeRequest proto.InternalMessageInfo + +func (m *NodeStageVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeStageVolumeRequest) GetPublishInfo() map[string]string { + if m != nil { + return m.PublishInfo + } + return nil +} + +func (m *NodeStageVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +func (m *NodeStageVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *NodeStageVolumeRequest) GetNodeStageSecrets() map[string]string { + if m != nil { + return m.NodeStageSecrets + } + return nil +} + +func (m *NodeStageVolumeRequest) GetVolumeAttributes() map[string]string { + if m != nil { + return m.VolumeAttributes + } + return nil +} + +type NodeStageVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeStageVolumeResponse) Reset() { *m = NodeStageVolumeResponse{} } +func (m *NodeStageVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeStageVolumeResponse) ProtoMessage() {} +func (*NodeStageVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{39} +} +func (m *NodeStageVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeStageVolumeResponse.Unmarshal(m, b) +} +func (m *NodeStageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeStageVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *NodeStageVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStageVolumeResponse.Merge(dst, src) +} +func (m *NodeStageVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeStageVolumeResponse.Size(m) +} +func (m *NodeStageVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStageVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeStageVolumeResponse proto.InternalMessageInfo + +type NodeUnstageVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + StagingTargetPath string `protobuf:"bytes,2,opt,name=staging_target_path,json=stagingTargetPath" json:"staging_target_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnstageVolumeRequest) Reset() { *m = NodeUnstageVolumeRequest{} } +func (m *NodeUnstageVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeUnstageVolumeRequest) ProtoMessage() {} +func (*NodeUnstageVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{40} +} +func (m *NodeUnstageVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnstageVolumeRequest.Unmarshal(m, b) +} +func (m *NodeUnstageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnstageVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *NodeUnstageVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnstageVolumeRequest.Merge(dst, src) +} +func (m *NodeUnstageVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeUnstageVolumeRequest.Size(m) +} +func (m *NodeUnstageVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnstageVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnstageVolumeRequest proto.InternalMessageInfo + +func (m *NodeUnstageVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeUnstageVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +type NodeUnstageVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnstageVolumeResponse) Reset() { *m = NodeUnstageVolumeResponse{} } +func (m *NodeUnstageVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeUnstageVolumeResponse) ProtoMessage() {} +func (*NodeUnstageVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{41} +} +func (m *NodeUnstageVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnstageVolumeResponse.Unmarshal(m, b) +} +func (m *NodeUnstageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnstageVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *NodeUnstageVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnstageVolumeResponse.Merge(dst, src) +} +func (m *NodeUnstageVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeUnstageVolumeResponse.Size(m) +} +func (m *NodeUnstageVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnstageVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnstageVolumeResponse proto.InternalMessageInfo + +type NodePublishVolumeRequest struct { + // The ID of the volume to publish. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + PublishInfo map[string]string `protobuf:"bytes,2,rep,name=publish_info,json=publishInfo" json:"publish_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The path to which the device was mounted by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + // This is an OPTIONAL field. + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath" json:"staging_target_path,omitempty"` + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the path exists, and that the process + // serving the request has `read` and `write` permissions to the path. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,4,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + Readonly bool `protobuf:"varint,6,opt,name=readonly" json:"readonly,omitempty"` + // Secrets required by plugin to complete node publish volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + NodePublishSecrets map[string]string `protobuf:"bytes,7,rep,name=node_publish_secrets,json=nodePublishSecrets" json:"node_publish_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Attributes of the volume to publish. This field is OPTIONAL and + // MUST match the attributes of the Volume identified by + // `volume_id`. + VolumeAttributes map[string]string `protobuf:"bytes,8,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodePublishVolumeRequest) Reset() { *m = NodePublishVolumeRequest{} } +func (m *NodePublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeRequest) ProtoMessage() {} +func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{42} +} +func (m *NodePublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodePublishVolumeRequest.Unmarshal(m, b) +} +func (m *NodePublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodePublishVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *NodePublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePublishVolumeRequest.Merge(dst, src) +} +func (m *NodePublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodePublishVolumeRequest.Size(m) +} +func (m *NodePublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodePublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodePublishVolumeRequest proto.InternalMessageInfo + +func (m *NodePublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodePublishVolumeRequest) GetPublishInfo() map[string]string { + if m != nil { + return m.PublishInfo + } + return nil +} + +func (m *NodePublishVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +func (m *NodePublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +func (m *NodePublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *NodePublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *NodePublishVolumeRequest) GetNodePublishSecrets() map[string]string { + if m != nil { + return m.NodePublishSecrets + } + return nil +} + +func (m *NodePublishVolumeRequest) GetVolumeAttributes() map[string]string { + if m != nil { + return m.VolumeAttributes + } + return nil +} + +type NodePublishVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodePublishVolumeResponse) Reset() { *m = NodePublishVolumeResponse{} } +func (m *NodePublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeResponse) ProtoMessage() {} +func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{43} +} +func (m *NodePublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodePublishVolumeResponse.Unmarshal(m, b) +} +func (m *NodePublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodePublishVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *NodePublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePublishVolumeResponse.Merge(dst, src) +} +func (m *NodePublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodePublishVolumeResponse.Size(m) +} +func (m *NodePublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodePublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodePublishVolumeResponse proto.InternalMessageInfo + +type NodeUnpublishVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,2,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnpublishVolumeRequest) Reset() { *m = NodeUnpublishVolumeRequest{} } +func (m *NodeUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeRequest) ProtoMessage() {} +func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{44} +} +func (m *NodeUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnpublishVolumeRequest.Unmarshal(m, b) +} +func (m *NodeUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnpublishVolumeRequest.Marshal(b, m, deterministic) +} +func (dst *NodeUnpublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnpublishVolumeRequest.Merge(dst, src) +} +func (m *NodeUnpublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeUnpublishVolumeRequest.Size(m) +} +func (m *NodeUnpublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnpublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnpublishVolumeRequest proto.InternalMessageInfo + +func (m *NodeUnpublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeUnpublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +type NodeUnpublishVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnpublishVolumeResponse) Reset() { *m = NodeUnpublishVolumeResponse{} } +func (m *NodeUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeResponse) ProtoMessage() {} +func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{45} +} +func (m *NodeUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnpublishVolumeResponse.Unmarshal(m, b) +} +func (m *NodeUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnpublishVolumeResponse.Marshal(b, m, deterministic) +} +func (dst *NodeUnpublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnpublishVolumeResponse.Merge(dst, src) +} +func (m *NodeUnpublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeUnpublishVolumeResponse.Size(m) +} +func (m *NodeUnpublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnpublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnpublishVolumeResponse proto.InternalMessageInfo + +type NodeGetIdRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetIdRequest) Reset() { *m = NodeGetIdRequest{} } +func (m *NodeGetIdRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetIdRequest) ProtoMessage() {} +func (*NodeGetIdRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{46} +} +func (m *NodeGetIdRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetIdRequest.Unmarshal(m, b) +} +func (m *NodeGetIdRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetIdRequest.Marshal(b, m, deterministic) +} +func (dst *NodeGetIdRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetIdRequest.Merge(dst, src) +} +func (m *NodeGetIdRequest) XXX_Size() int { + return xxx_messageInfo_NodeGetIdRequest.Size(m) +} +func (m *NodeGetIdRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetIdRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetIdRequest proto.InternalMessageInfo + +type NodeGetIdResponse struct { + // The ID of the node as understood by the SP which SHALL be used by + // CO in subsequent `ControllerPublishVolume`. + // This is a REQUIRED field. + NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetIdResponse) Reset() { *m = NodeGetIdResponse{} } +func (m *NodeGetIdResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetIdResponse) ProtoMessage() {} +func (*NodeGetIdResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{47} +} +func (m *NodeGetIdResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetIdResponse.Unmarshal(m, b) +} +func (m *NodeGetIdResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetIdResponse.Marshal(b, m, deterministic) +} +func (dst *NodeGetIdResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetIdResponse.Merge(dst, src) +} +func (m *NodeGetIdResponse) XXX_Size() int { + return xxx_messageInfo_NodeGetIdResponse.Size(m) +} +func (m *NodeGetIdResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetIdResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetIdResponse proto.InternalMessageInfo + +func (m *NodeGetIdResponse) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +type NodeGetCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetCapabilitiesRequest) Reset() { *m = NodeGetCapabilitiesRequest{} } +func (m *NodeGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesRequest) ProtoMessage() {} +func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{48} +} +func (m *NodeGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetCapabilitiesRequest.Unmarshal(m, b) +} +func (m *NodeGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (dst *NodeGetCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetCapabilitiesRequest.Merge(dst, src) +} +func (m *NodeGetCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_NodeGetCapabilitiesRequest.Size(m) +} +func (m *NodeGetCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetCapabilitiesRequest proto.InternalMessageInfo + +type NodeGetCapabilitiesResponse struct { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + Capabilities []*NodeServiceCapability `protobuf:"bytes,1,rep,name=capabilities" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetCapabilitiesResponse) Reset() { *m = NodeGetCapabilitiesResponse{} } +func (m *NodeGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesResponse) ProtoMessage() {} +func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{49} +} +func (m *NodeGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetCapabilitiesResponse.Unmarshal(m, b) +} +func (m *NodeGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (dst *NodeGetCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetCapabilitiesResponse.Merge(dst, src) +} +func (m *NodeGetCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_NodeGetCapabilitiesResponse.Size(m) +} +func (m *NodeGetCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetCapabilitiesResponse proto.InternalMessageInfo + +func (m *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the node service. +type NodeServiceCapability struct { + // Types that are valid to be assigned to Type: + // *NodeServiceCapability_Rpc + Type isNodeServiceCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeServiceCapability) Reset() { *m = NodeServiceCapability{} } +func (m *NodeServiceCapability) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability) ProtoMessage() {} +func (*NodeServiceCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{50} +} +func (m *NodeServiceCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeServiceCapability.Unmarshal(m, b) +} +func (m *NodeServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeServiceCapability.Marshal(b, m, deterministic) +} +func (dst *NodeServiceCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeServiceCapability.Merge(dst, src) +} +func (m *NodeServiceCapability) XXX_Size() int { + return xxx_messageInfo_NodeServiceCapability.Size(m) +} +func (m *NodeServiceCapability) XXX_DiscardUnknown() { + xxx_messageInfo_NodeServiceCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeServiceCapability proto.InternalMessageInfo + +type isNodeServiceCapability_Type interface { + isNodeServiceCapability_Type() +} + +type NodeServiceCapability_Rpc struct { + Rpc *NodeServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` +} + +func (*NodeServiceCapability_Rpc) isNodeServiceCapability_Type() {} + +func (m *NodeServiceCapability) GetType() isNodeServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC { + if x, ok := m.GetType().(*NodeServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NodeServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NodeServiceCapability_OneofMarshaler, _NodeServiceCapability_OneofUnmarshaler, _NodeServiceCapability_OneofSizer, []interface{}{ + (*NodeServiceCapability_Rpc)(nil), + } +} + +func _NodeServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NodeServiceCapability) + // type + switch x := m.Type.(type) { + case *NodeServiceCapability_Rpc: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Rpc); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NodeServiceCapability.Type has unexpected type %T", x) + } + return nil +} + +func _NodeServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NodeServiceCapability) + switch tag { + case 1: // type.rpc + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NodeServiceCapability_RPC) + err := b.DecodeMessage(msg) + m.Type = &NodeServiceCapability_Rpc{msg} + return true, err + default: + return false, nil + } +} + +func _NodeServiceCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NodeServiceCapability) + // type + switch x := m.Type.(type) { + case *NodeServiceCapability_Rpc: + s := proto.Size(x.Rpc) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type NodeServiceCapability_RPC struct { + Type NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.NodeServiceCapability_RPC_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeServiceCapability_RPC) Reset() { *m = NodeServiceCapability_RPC{} } +func (m *NodeServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability_RPC) ProtoMessage() {} +func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{50, 0} +} +func (m *NodeServiceCapability_RPC) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeServiceCapability_RPC.Unmarshal(m, b) +} +func (m *NodeServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeServiceCapability_RPC.Marshal(b, m, deterministic) +} +func (dst *NodeServiceCapability_RPC) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeServiceCapability_RPC.Merge(dst, src) +} +func (m *NodeServiceCapability_RPC) XXX_Size() int { + return xxx_messageInfo_NodeServiceCapability_RPC.Size(m) +} +func (m *NodeServiceCapability_RPC) XXX_DiscardUnknown() { + xxx_messageInfo_NodeServiceCapability_RPC.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeServiceCapability_RPC proto.InternalMessageInfo + +func (m *NodeServiceCapability_RPC) GetType() NodeServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return NodeServiceCapability_RPC_UNKNOWN +} + +type NodeGetInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetInfoRequest) Reset() { *m = NodeGetInfoRequest{} } +func (m *NodeGetInfoRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetInfoRequest) ProtoMessage() {} +func (*NodeGetInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{51} +} +func (m *NodeGetInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetInfoRequest.Unmarshal(m, b) +} +func (m *NodeGetInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetInfoRequest.Marshal(b, m, deterministic) +} +func (dst *NodeGetInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetInfoRequest.Merge(dst, src) +} +func (m *NodeGetInfoRequest) XXX_Size() int { + return xxx_messageInfo_NodeGetInfoRequest.Size(m) +} +func (m *NodeGetInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetInfoRequest proto.InternalMessageInfo + +type NodeGetInfoResponse struct { + // The ID of the node as understood by the SP which SHALL be used by + // CO in subsequent calls to `ControllerPublishVolume`. + // This is a REQUIRED field. + NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + // Maximum number of volumes that controller can publish to the node. + // If value is not set or zero CO SHALL decide how many volumes of + // this type can be published by the controller to the node. The + // plugin MUST NOT set negative values here. + // This field is OPTIONAL. + MaxVolumesPerNode int64 `protobuf:"varint,2,opt,name=max_volumes_per_node,json=maxVolumesPerNode" json:"max_volumes_per_node,omitempty"` + // Specifies where (regions, zones, racks, etc.) the node is + // accessible from. + // A plugin that returns this field MUST also set the + // ACCESSIBILITY_CONSTRAINTS plugin capability. + // COs MAY use this information along with the topology information + // returned in CreateVolumeResponse to ensure that a given volume is + // accessible from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the node is not subject to any topological constraint, and MAY + // schedule workloads that reference any volume V, such that there are + // no topological constraints declared for V. + // + // Example 1: + // accessible_topology = + // {"region": "R1", "zone": "R2"} + // Indicates the node exists within the "region" "R1" and the "zone" + // "Z2". + AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetInfoResponse) Reset() { *m = NodeGetInfoResponse{} } +func (m *NodeGetInfoResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetInfoResponse) ProtoMessage() {} +func (*NodeGetInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_csi_31237507707d37ec, []int{52} +} +func (m *NodeGetInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetInfoResponse.Unmarshal(m, b) +} +func (m *NodeGetInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetInfoResponse.Marshal(b, m, deterministic) +} +func (dst *NodeGetInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetInfoResponse.Merge(dst, src) +} +func (m *NodeGetInfoResponse) XXX_Size() int { + return xxx_messageInfo_NodeGetInfoResponse.Size(m) +} +func (m *NodeGetInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetInfoResponse proto.InternalMessageInfo + +func (m *NodeGetInfoResponse) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *NodeGetInfoResponse) GetMaxVolumesPerNode() int64 { + if m != nil { + return m.MaxVolumesPerNode + } + return 0 +} + +func (m *NodeGetInfoResponse) GetAccessibleTopology() *Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +func init() { + proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.v0.GetPluginInfoRequest") + proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.v0.GetPluginInfoResponse") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.GetPluginInfoResponse.ManifestEntry") + proto.RegisterType((*GetPluginCapabilitiesRequest)(nil), "csi.v0.GetPluginCapabilitiesRequest") + proto.RegisterType((*GetPluginCapabilitiesResponse)(nil), "csi.v0.GetPluginCapabilitiesResponse") + proto.RegisterType((*PluginCapability)(nil), "csi.v0.PluginCapability") + proto.RegisterType((*PluginCapability_Service)(nil), "csi.v0.PluginCapability.Service") + proto.RegisterType((*ProbeRequest)(nil), "csi.v0.ProbeRequest") + proto.RegisterType((*ProbeResponse)(nil), "csi.v0.ProbeResponse") + proto.RegisterType((*CreateVolumeRequest)(nil), "csi.v0.CreateVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateVolumeRequest.ControllerCreateSecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateVolumeRequest.ParametersEntry") + proto.RegisterType((*VolumeContentSource)(nil), "csi.v0.VolumeContentSource") + proto.RegisterType((*VolumeContentSource_SnapshotSource)(nil), "csi.v0.VolumeContentSource.SnapshotSource") + proto.RegisterType((*CreateVolumeResponse)(nil), "csi.v0.CreateVolumeResponse") + proto.RegisterType((*VolumeCapability)(nil), "csi.v0.VolumeCapability") + proto.RegisterType((*VolumeCapability_BlockVolume)(nil), "csi.v0.VolumeCapability.BlockVolume") + proto.RegisterType((*VolumeCapability_MountVolume)(nil), "csi.v0.VolumeCapability.MountVolume") + proto.RegisterType((*VolumeCapability_AccessMode)(nil), "csi.v0.VolumeCapability.AccessMode") + proto.RegisterType((*CapacityRange)(nil), "csi.v0.CapacityRange") + proto.RegisterType((*Volume)(nil), "csi.v0.Volume") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.Volume.AttributesEntry") + proto.RegisterType((*TopologyRequirement)(nil), "csi.v0.TopologyRequirement") + proto.RegisterType((*Topology)(nil), "csi.v0.Topology") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.Topology.SegmentsEntry") + proto.RegisterType((*DeleteVolumeRequest)(nil), "csi.v0.DeleteVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.DeleteVolumeRequest.ControllerDeleteSecretsEntry") + proto.RegisterType((*DeleteVolumeResponse)(nil), "csi.v0.DeleteVolumeResponse") + proto.RegisterType((*ControllerPublishVolumeRequest)(nil), "csi.v0.ControllerPublishVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerPublishVolumeRequest.ControllerPublishSecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerPublishVolumeRequest.VolumeAttributesEntry") + proto.RegisterType((*ControllerPublishVolumeResponse)(nil), "csi.v0.ControllerPublishVolumeResponse") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerPublishVolumeResponse.PublishInfoEntry") + proto.RegisterType((*ControllerUnpublishVolumeRequest)(nil), "csi.v0.ControllerUnpublishVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.ControllerUnpublishVolumeRequest.ControllerUnpublishSecretsEntry") + proto.RegisterType((*ControllerUnpublishVolumeResponse)(nil), "csi.v0.ControllerUnpublishVolumeResponse") + proto.RegisterType((*ValidateVolumeCapabilitiesRequest)(nil), "csi.v0.ValidateVolumeCapabilitiesRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.ValidateVolumeCapabilitiesRequest.VolumeAttributesEntry") + proto.RegisterType((*ValidateVolumeCapabilitiesResponse)(nil), "csi.v0.ValidateVolumeCapabilitiesResponse") + proto.RegisterType((*ListVolumesRequest)(nil), "csi.v0.ListVolumesRequest") + proto.RegisterType((*ListVolumesResponse)(nil), "csi.v0.ListVolumesResponse") + proto.RegisterType((*ListVolumesResponse_Entry)(nil), "csi.v0.ListVolumesResponse.Entry") + proto.RegisterType((*GetCapacityRequest)(nil), "csi.v0.GetCapacityRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.GetCapacityRequest.ParametersEntry") + proto.RegisterType((*GetCapacityResponse)(nil), "csi.v0.GetCapacityResponse") + proto.RegisterType((*ControllerGetCapabilitiesRequest)(nil), "csi.v0.ControllerGetCapabilitiesRequest") + proto.RegisterType((*ControllerGetCapabilitiesResponse)(nil), "csi.v0.ControllerGetCapabilitiesResponse") + proto.RegisterType((*ControllerServiceCapability)(nil), "csi.v0.ControllerServiceCapability") + proto.RegisterType((*ControllerServiceCapability_RPC)(nil), "csi.v0.ControllerServiceCapability.RPC") + proto.RegisterType((*CreateSnapshotRequest)(nil), "csi.v0.CreateSnapshotRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateSnapshotRequest.CreateSnapshotSecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.CreateSnapshotRequest.ParametersEntry") + proto.RegisterType((*CreateSnapshotResponse)(nil), "csi.v0.CreateSnapshotResponse") + proto.RegisterType((*Snapshot)(nil), "csi.v0.Snapshot") + proto.RegisterType((*SnapshotStatus)(nil), "csi.v0.SnapshotStatus") + proto.RegisterType((*DeleteSnapshotRequest)(nil), "csi.v0.DeleteSnapshotRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.DeleteSnapshotRequest.DeleteSnapshotSecretsEntry") + proto.RegisterType((*DeleteSnapshotResponse)(nil), "csi.v0.DeleteSnapshotResponse") + proto.RegisterType((*ListSnapshotsRequest)(nil), "csi.v0.ListSnapshotsRequest") + proto.RegisterType((*ListSnapshotsResponse)(nil), "csi.v0.ListSnapshotsResponse") + proto.RegisterType((*ListSnapshotsResponse_Entry)(nil), "csi.v0.ListSnapshotsResponse.Entry") + proto.RegisterType((*NodeStageVolumeRequest)(nil), "csi.v0.NodeStageVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodeStageVolumeRequest.NodeStageSecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodeStageVolumeRequest.PublishInfoEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodeStageVolumeRequest.VolumeAttributesEntry") + proto.RegisterType((*NodeStageVolumeResponse)(nil), "csi.v0.NodeStageVolumeResponse") + proto.RegisterType((*NodeUnstageVolumeRequest)(nil), "csi.v0.NodeUnstageVolumeRequest") + proto.RegisterType((*NodeUnstageVolumeResponse)(nil), "csi.v0.NodeUnstageVolumeResponse") + proto.RegisterType((*NodePublishVolumeRequest)(nil), "csi.v0.NodePublishVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodePublishVolumeRequest.NodePublishSecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodePublishVolumeRequest.PublishInfoEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v0.NodePublishVolumeRequest.VolumeAttributesEntry") + proto.RegisterType((*NodePublishVolumeResponse)(nil), "csi.v0.NodePublishVolumeResponse") + proto.RegisterType((*NodeUnpublishVolumeRequest)(nil), "csi.v0.NodeUnpublishVolumeRequest") + proto.RegisterType((*NodeUnpublishVolumeResponse)(nil), "csi.v0.NodeUnpublishVolumeResponse") + proto.RegisterType((*NodeGetIdRequest)(nil), "csi.v0.NodeGetIdRequest") + proto.RegisterType((*NodeGetIdResponse)(nil), "csi.v0.NodeGetIdResponse") + proto.RegisterType((*NodeGetCapabilitiesRequest)(nil), "csi.v0.NodeGetCapabilitiesRequest") + proto.RegisterType((*NodeGetCapabilitiesResponse)(nil), "csi.v0.NodeGetCapabilitiesResponse") + proto.RegisterType((*NodeServiceCapability)(nil), "csi.v0.NodeServiceCapability") + proto.RegisterType((*NodeServiceCapability_RPC)(nil), "csi.v0.NodeServiceCapability.RPC") + proto.RegisterType((*NodeGetInfoRequest)(nil), "csi.v0.NodeGetInfoRequest") + proto.RegisterType((*NodeGetInfoResponse)(nil), "csi.v0.NodeGetInfoResponse") + proto.RegisterEnum("csi.v0.PluginCapability_Service_Type", PluginCapability_Service_Type_name, PluginCapability_Service_Type_value) + proto.RegisterEnum("csi.v0.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value) + proto.RegisterEnum("csi.v0.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value) + proto.RegisterEnum("csi.v0.SnapshotStatus_Type", SnapshotStatus_Type_name, SnapshotStatus_Type_value) + proto.RegisterEnum("csi.v0.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Identity service + +type IdentityClient interface { + GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) + GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) + Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) +} + +type identityClient struct { + cc *grpc.ClientConn +} + +func NewIdentityClient(cc *grpc.ClientConn) IdentityClient { + return &identityClient{cc} +} + +func (c *identityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) { + out := new(GetPluginInfoResponse) + err := grpc.Invoke(ctx, "/csi.v0.Identity/GetPluginInfo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) { + out := new(GetPluginCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.v0.Identity/GetPluginCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) { + out := new(ProbeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Identity/Probe", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Identity service + +type IdentityServer interface { + GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error) + GetPluginCapabilities(context.Context, *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) + Probe(context.Context, *ProbeRequest) (*ProbeResponse, error) +} + +func RegisterIdentityServer(s *grpc.Server, srv IdentityServer) { + s.RegisterService(&_Identity_serviceDesc, srv) +} + +func _Identity_GetPluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Identity/GetPluginInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginInfo(ctx, req.(*GetPluginInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_GetPluginCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Identity/GetPluginCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginCapabilities(ctx, req.(*GetPluginCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_Probe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProbeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).Probe(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Identity/Probe", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).Probe(ctx, req.(*ProbeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Identity_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v0.Identity", + HandlerType: (*IdentityServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetPluginInfo", + Handler: _Identity_GetPluginInfo_Handler, + }, + { + MethodName: "GetPluginCapabilities", + Handler: _Identity_GetPluginCapabilities_Handler, + }, + { + MethodName: "Probe", + Handler: _Identity_Probe_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +// Client API for Controller service + +type ControllerClient interface { + CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) + DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) + ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) + GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) + ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) + CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) + DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) + ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) +} + +type controllerClient struct { + cc *grpc.ClientConn +} + +func NewControllerClient(cc *grpc.ClientConn) ControllerClient { + return &controllerClient{cc} +} + +func (c *controllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) { + out := new(CreateVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/CreateVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) { + out := new(DeleteVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/DeleteVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) { + out := new(ControllerPublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ControllerPublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) { + out := new(ControllerUnpublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ControllerUnpublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) { + out := new(ValidateVolumeCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ValidateVolumeCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) { + out := new(ListVolumesResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ListVolumes", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) { + out := new(GetCapacityResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/GetCapacity", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) { + out := new(ControllerGetCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ControllerGetCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) { + out := new(CreateSnapshotResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/CreateSnapshot", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) { + out := new(DeleteSnapshotResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/DeleteSnapshot", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) { + out := new(ListSnapshotsResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ListSnapshots", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Controller service + +type ControllerServer interface { + CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) + DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error) + ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error) + GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error) + ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) + CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error) + DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) + ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) +} + +func RegisterControllerServer(s *grpc.Server, srv ControllerServer) { + s.RegisterService(&_Controller_serviceDesc, srv) +} + +func _Controller_CreateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).CreateVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/CreateVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).CreateVolume(ctx, req.(*CreateVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DeleteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DeleteVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/DeleteVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DeleteVolume(ctx, req.(*DeleteVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerPublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerPublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerPublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ControllerPublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerPublishVolume(ctx, req.(*ControllerPublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ControllerUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, req.(*ControllerUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ValidateVolumeCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateVolumeCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ValidateVolumeCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, req.(*ValidateVolumeCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ListVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListVolumesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ListVolumes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ListVolumes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ListVolumes(ctx, req.(*ListVolumesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_GetCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCapacityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).GetCapacity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/GetCapacity", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).GetCapacity(ctx, req.(*GetCapacityRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ControllerGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, req.(*ControllerGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).CreateSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/CreateSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DeleteSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/DeleteSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnapshotsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ListSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ListSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Controller_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v0.Controller", + HandlerType: (*ControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateVolume", + Handler: _Controller_CreateVolume_Handler, + }, + { + MethodName: "DeleteVolume", + Handler: _Controller_DeleteVolume_Handler, + }, + { + MethodName: "ControllerPublishVolume", + Handler: _Controller_ControllerPublishVolume_Handler, + }, + { + MethodName: "ControllerUnpublishVolume", + Handler: _Controller_ControllerUnpublishVolume_Handler, + }, + { + MethodName: "ValidateVolumeCapabilities", + Handler: _Controller_ValidateVolumeCapabilities_Handler, + }, + { + MethodName: "ListVolumes", + Handler: _Controller_ListVolumes_Handler, + }, + { + MethodName: "GetCapacity", + Handler: _Controller_GetCapacity_Handler, + }, + { + MethodName: "ControllerGetCapabilities", + Handler: _Controller_ControllerGetCapabilities_Handler, + }, + { + MethodName: "CreateSnapshot", + Handler: _Controller_CreateSnapshot_Handler, + }, + { + MethodName: "DeleteSnapshot", + Handler: _Controller_DeleteSnapshot_Handler, + }, + { + MethodName: "ListSnapshots", + Handler: _Controller_ListSnapshots_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +// Client API for Node service + +type NodeClient interface { + NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) + NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) + NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) + // NodeGetId is being deprecated in favor of NodeGetInfo and will be + // removed in CSI 1.0. Existing drivers, however, may depend on this + // RPC call and hence this RPC call MUST be implemented by the CSI + // plugin prior to v1.0. + NodeGetId(ctx context.Context, in *NodeGetIdRequest, opts ...grpc.CallOption) (*NodeGetIdResponse, error) + NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) + // Prior to CSI 1.0 - CSI plugins MUST implement both NodeGetId and + // NodeGetInfo RPC calls. + NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) +} + +type nodeClient struct { + cc *grpc.ClientConn +} + +func NewNodeClient(cc *grpc.ClientConn) NodeClient { + return &nodeClient{cc} +} + +func (c *nodeClient) NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) { + out := new(NodeStageVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeStageVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) { + out := new(NodeUnstageVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeUnstageVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) { + out := new(NodePublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodePublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) { + out := new(NodeUnpublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeUnpublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Deprecated: Do not use. +func (c *nodeClient) NodeGetId(ctx context.Context, in *NodeGetIdRequest, opts ...grpc.CallOption) (*NodeGetIdResponse, error) { + out := new(NodeGetIdResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeGetId", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) { + out := new(NodeGetCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeGetCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) { + out := new(NodeGetInfoResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeGetInfo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Node service + +type NodeServer interface { + NodeStageVolume(context.Context, *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) + NodeUnstageVolume(context.Context, *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) + NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) + // NodeGetId is being deprecated in favor of NodeGetInfo and will be + // removed in CSI 1.0. Existing drivers, however, may depend on this + // RPC call and hence this RPC call MUST be implemented by the CSI + // plugin prior to v1.0. + NodeGetId(context.Context, *NodeGetIdRequest) (*NodeGetIdResponse, error) + NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) + // Prior to CSI 1.0 - CSI plugins MUST implement both NodeGetId and + // NodeGetInfo RPC calls. + NodeGetInfo(context.Context, *NodeGetInfoRequest) (*NodeGetInfoResponse, error) +} + +func RegisterNodeServer(s *grpc.Server, srv NodeServer) { + s.RegisterService(&_Node_serviceDesc, srv) +} + +func _Node_NodeStageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeStageVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeStageVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeStageVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeStageVolume(ctx, req.(*NodeStageVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnstageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnstageVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnstageVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeUnstageVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnstageVolume(ctx, req.(*NodeUnstageVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodePublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodePublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodePublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodePublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodePublishVolume(ctx, req.(*NodePublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnpublishVolume(ctx, req.(*NodeUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetId_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetIdRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetId(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeGetId", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetId(ctx, req.(*NodeGetIdRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetCapabilities(ctx, req.(*NodeGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeGetInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetInfo(ctx, req.(*NodeGetInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Node_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v0.Node", + HandlerType: (*NodeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "NodeStageVolume", + Handler: _Node_NodeStageVolume_Handler, + }, + { + MethodName: "NodeUnstageVolume", + Handler: _Node_NodeUnstageVolume_Handler, + }, + { + MethodName: "NodePublishVolume", + Handler: _Node_NodePublishVolume_Handler, + }, + { + MethodName: "NodeUnpublishVolume", + Handler: _Node_NodeUnpublishVolume_Handler, + }, + { + MethodName: "NodeGetId", + Handler: _Node_NodeGetId_Handler, + }, + { + MethodName: "NodeGetCapabilities", + Handler: _Node_NodeGetCapabilities_Handler, + }, + { + MethodName: "NodeGetInfo", + Handler: _Node_NodeGetInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +func init() { proto.RegisterFile("csi.proto", fileDescriptor_csi_31237507707d37ec) } + +var fileDescriptor_csi_31237507707d37ec = []byte{ + // 2932 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4d, 0x73, 0x23, 0x47, + 0xd5, 0xa3, 0x0f, 0xdb, 0x7a, 0x5e, 0x3b, 0xda, 0xf6, 0x97, 0x3c, 0xb6, 0x77, 0xbd, 0xb3, 0xd9, + 0x64, 0x13, 0x12, 0x6d, 0x30, 0x24, 0x15, 0x92, 0x4d, 0x40, 0x96, 0x15, 0x5b, 0x59, 0x5b, 0x36, + 0x23, 0xd9, 0xa9, 0x5d, 0x42, 0x4d, 0xc6, 0x52, 0x5b, 0x3b, 0xac, 0x3c, 0xa3, 0xcc, 0x8c, 0xcc, + 0x9a, 0x1b, 0x70, 0x01, 0x4e, 0xf0, 0x0b, 0x52, 0x95, 0x1b, 0x14, 0xb9, 0x50, 0xdc, 0xa8, 0xe2, + 0x46, 0x15, 0x27, 0xce, 0x9c, 0xb8, 0xa7, 0xe0, 0xc8, 0x89, 0x2a, 0xaa, 0xa8, 0x9e, 0xee, 0x19, + 0x4d, 0xb7, 0x7a, 0xf4, 0x91, 0xdd, 0x4a, 0x71, 0x92, 0xe6, 0x7d, 0xf5, 0xeb, 0xd7, 0xef, 0xbd, + 0x7e, 0xef, 0xcd, 0x40, 0xae, 0xe9, 0x59, 0xc5, 0xae, 0xeb, 0xf8, 0x0e, 0x9a, 0x26, 0x7f, 0x2f, + 0xdf, 0x50, 0x6f, 0xb4, 0x1d, 0xa7, 0xdd, 0xc1, 0xf7, 0x02, 0xe8, 0x59, 0xef, 0xfc, 0xde, 0x8f, + 0x5d, 0xb3, 0xdb, 0xc5, 0xae, 0x47, 0xe9, 0xb4, 0x15, 0x58, 0xda, 0xc3, 0xfe, 0x71, 0xa7, 0xd7, + 0xb6, 0xec, 0xaa, 0x7d, 0xee, 0xe8, 0xf8, 0xd3, 0x1e, 0xf6, 0x7c, 0xed, 0xef, 0x0a, 0x2c, 0x0b, + 0x08, 0xaf, 0xeb, 0xd8, 0x1e, 0x46, 0x08, 0x32, 0xb6, 0x79, 0x81, 0x0b, 0xca, 0x96, 0x72, 0x37, + 0xa7, 0x07, 0xff, 0xd1, 0x1d, 0x58, 0xb8, 0xc4, 0x76, 0xcb, 0x71, 0x8d, 0x4b, 0xec, 0x7a, 0x96, + 0x63, 0x17, 0x52, 0x01, 0x76, 0x9e, 0x42, 0x4f, 0x29, 0x10, 0xed, 0xc1, 0xec, 0x85, 0x69, 0x5b, + 0xe7, 0xd8, 0xf3, 0x0b, 0xe9, 0xad, 0xf4, 0xdd, 0xb9, 0xed, 0x6f, 0x14, 0xa9, 0x9e, 0x45, 0xe9, + 0x5a, 0xc5, 0x43, 0x46, 0x5d, 0xb1, 0x7d, 0xf7, 0x4a, 0x8f, 0x98, 0xd5, 0x77, 0x61, 0x9e, 0x43, + 0xa1, 0x3c, 0xa4, 0x9f, 0xe0, 0x2b, 0xa6, 0x13, 0xf9, 0x8b, 0x96, 0x20, 0x7b, 0x69, 0x76, 0x7a, + 0x98, 0x69, 0x42, 0x1f, 0xde, 0x49, 0xbd, 0xad, 0x68, 0x37, 0x60, 0x23, 0x5a, 0xad, 0x6c, 0x76, + 0xcd, 0x33, 0xab, 0x63, 0xf9, 0x16, 0xf6, 0xc2, 0xad, 0xff, 0x10, 0x36, 0x13, 0xf0, 0xcc, 0x02, + 0xf7, 0xe1, 0x5a, 0x33, 0x06, 0x2f, 0xa4, 0x82, 0xad, 0x14, 0xc2, 0xad, 0x08, 0x9c, 0x57, 0x3a, + 0x47, 0xad, 0xfd, 0x53, 0x81, 0xbc, 0x48, 0x82, 0xee, 0xc3, 0x8c, 0x87, 0xdd, 0x4b, 0xab, 0x49, + 0xed, 0x3a, 0xb7, 0xbd, 0x95, 0x24, 0xad, 0x58, 0xa7, 0x74, 0xfb, 0x53, 0x7a, 0xc8, 0xa2, 0xfe, + 0x5a, 0x81, 0x19, 0x06, 0x46, 0xdf, 0x81, 0x8c, 0x7f, 0xd5, 0xa5, 0x62, 0x16, 0xb6, 0xef, 0x8c, + 0x12, 0x53, 0x6c, 0x5c, 0x75, 0xb1, 0x1e, 0xb0, 0x68, 0x1f, 0x42, 0x86, 0x3c, 0xa1, 0x39, 0x98, + 0x39, 0xa9, 0x3d, 0xa8, 0x1d, 0x7d, 0x54, 0xcb, 0x4f, 0xa1, 0x15, 0x40, 0xe5, 0xa3, 0x5a, 0x43, + 0x3f, 0x3a, 0x38, 0xa8, 0xe8, 0x46, 0xbd, 0xa2, 0x9f, 0x56, 0xcb, 0x95, 0xbc, 0x82, 0x36, 0x61, + 0xad, 0x54, 0x2e, 0x57, 0xea, 0xf5, 0xea, 0x4e, 0xf5, 0xa0, 0xda, 0x78, 0x68, 0x94, 0x8f, 0x6a, + 0xf5, 0x86, 0x5e, 0xaa, 0xd6, 0x1a, 0xf5, 0x7c, 0x6a, 0x67, 0x9a, 0xaa, 0xa1, 0x2d, 0xc0, 0xb5, + 0x63, 0xd7, 0x39, 0xc3, 0xa1, 0x71, 0x4b, 0x30, 0xcf, 0x9e, 0x99, 0x31, 0xdf, 0x80, 0xac, 0x8b, + 0xcd, 0xd6, 0x15, 0xdb, 0xb7, 0x5a, 0xa4, 0x0e, 0x5b, 0x0c, 0x1d, 0xb6, 0xb8, 0xe3, 0x38, 0x9d, + 0x53, 0x72, 0x78, 0x3a, 0x25, 0xd4, 0xbe, 0xc8, 0xc2, 0x62, 0xd9, 0xc5, 0xa6, 0x8f, 0x4f, 0x9d, + 0x4e, 0xef, 0x22, 0x14, 0x2d, 0x75, 0xcc, 0xfb, 0xb0, 0x40, 0x8c, 0xdf, 0xb4, 0xfc, 0x2b, 0xc3, + 0x35, 0xed, 0x36, 0x75, 0x87, 0xb9, 0xed, 0xe5, 0xd0, 0x2e, 0x65, 0x86, 0xd5, 0x09, 0x52, 0x9f, + 0x6f, 0xc6, 0x1f, 0x51, 0x15, 0x16, 0x2f, 0x83, 0x25, 0x0c, 0xee, 0xbc, 0xd3, 0xfc, 0x79, 0x53, + 0x2d, 0x62, 0xe7, 0x8d, 0x2e, 0x79, 0x88, 0x85, 0x3d, 0xf4, 0x00, 0xa0, 0x6b, 0xba, 0xe6, 0x05, + 0xf6, 0xb1, 0xeb, 0x15, 0x32, 0xbc, 0xf3, 0x4b, 0x76, 0x53, 0x3c, 0x8e, 0xa8, 0xa9, 0xf3, 0xc7, + 0xd8, 0x91, 0x0f, 0x6b, 0x4d, 0xc7, 0xf6, 0x5d, 0xa7, 0xd3, 0xc1, 0xae, 0xd1, 0x0c, 0xb8, 0x0d, + 0x0f, 0x37, 0x5d, 0xec, 0x7b, 0x85, 0x6c, 0x20, 0xfb, 0xed, 0x61, 0xb2, 0xcb, 0x11, 0x33, 0xc5, + 0xd6, 0x29, 0x2b, 0x5d, 0x68, 0xb5, 0x29, 0xc7, 0xa2, 0x23, 0x58, 0x0e, 0xad, 0xe1, 0xd8, 0x3e, + 0xb6, 0x7d, 0xc3, 0x73, 0x7a, 0x6e, 0x13, 0x17, 0xa6, 0x03, 0x93, 0xae, 0x0b, 0xf6, 0xa0, 0x34, + 0xf5, 0x80, 0x44, 0x67, 0x76, 0xe4, 0x80, 0xe8, 0x11, 0xa8, 0x66, 0xb3, 0x89, 0x3d, 0xcf, 0xa2, + 0x86, 0x33, 0x5c, 0xfc, 0x69, 0xcf, 0x72, 0xf1, 0x05, 0xb6, 0x7d, 0xaf, 0x30, 0xc3, 0x4b, 0x6d, + 0x38, 0x5d, 0xa7, 0xe3, 0xb4, 0xaf, 0xf4, 0x3e, 0x8d, 0xbe, 0xc6, 0xb1, 0xc7, 0x30, 0x9e, 0xfa, + 0x1e, 0xbc, 0x20, 0x58, 0x70, 0x92, 0x1c, 0xa1, 0x7e, 0x08, 0x1b, 0xc3, 0x8c, 0x34, 0x51, 0xbe, + 0xf9, 0xa5, 0x02, 0x8b, 0x12, 0x9b, 0xa0, 0x7d, 0x98, 0xf5, 0x6c, 0xb3, 0xeb, 0x3d, 0x76, 0x7c, + 0xe6, 0xfc, 0xaf, 0x0e, 0x31, 0x61, 0xb1, 0xce, 0x68, 0xe9, 0xe3, 0xfe, 0x94, 0x1e, 0x71, 0xab, + 0x5b, 0xb0, 0xc0, 0x63, 0xd1, 0x02, 0xa4, 0xac, 0x16, 0x53, 0x2f, 0x65, 0xb5, 0xa2, 0x70, 0x7c, + 0x1f, 0x96, 0x78, 0x87, 0x60, 0x51, 0xf8, 0x12, 0x4c, 0xd3, 0x13, 0x62, 0x9a, 0x2c, 0xf0, 0x9a, + 0xe8, 0x0c, 0xab, 0xfd, 0x2e, 0x03, 0x79, 0xd1, 0xdf, 0xd1, 0x7d, 0xc8, 0x9e, 0x75, 0x9c, 0xe6, + 0x13, 0xc6, 0xfb, 0x62, 0x52, 0x60, 0x14, 0x77, 0x08, 0x15, 0x85, 0xee, 0x4f, 0xe9, 0x94, 0x89, + 0x70, 0x5f, 0x38, 0x3d, 0xdb, 0x67, 0x91, 0x99, 0xcc, 0x7d, 0x48, 0xa8, 0xfa, 0xdc, 0x01, 0x13, + 0xda, 0x85, 0x39, 0xea, 0x04, 0xc6, 0x85, 0xd3, 0xc2, 0x85, 0x74, 0x20, 0xe3, 0x76, 0xa2, 0x8c, + 0x52, 0x40, 0x7b, 0xe8, 0xb4, 0xb0, 0x0e, 0x66, 0xf4, 0x5f, 0x9d, 0x87, 0xb9, 0x98, 0x6e, 0xea, + 0x1e, 0xcc, 0xc5, 0x16, 0x43, 0xab, 0x30, 0x73, 0xee, 0x19, 0x51, 0x56, 0xcd, 0xe9, 0xd3, 0xe7, + 0x5e, 0x90, 0x28, 0x6f, 0xc2, 0x5c, 0xa0, 0x85, 0x71, 0xde, 0x31, 0xdb, 0xf4, 0x1e, 0xc8, 0xe9, + 0x10, 0x80, 0x3e, 0x20, 0x10, 0xf5, 0x5f, 0x0a, 0x40, 0x7f, 0x49, 0x74, 0x1f, 0x32, 0x81, 0x96, + 0x34, 0x37, 0xdf, 0x1d, 0x43, 0xcb, 0x62, 0xa0, 0x6a, 0xc0, 0xa5, 0x7d, 0xa6, 0x40, 0x26, 0x10, + 0x23, 0xe6, 0xe7, 0x7a, 0xb5, 0xb6, 0x77, 0x50, 0x31, 0x6a, 0x47, 0xbb, 0x15, 0xe3, 0x23, 0xbd, + 0xda, 0xa8, 0xe8, 0x79, 0x05, 0xad, 0xc3, 0x6a, 0x1c, 0xae, 0x57, 0x4a, 0xbb, 0x15, 0xdd, 0x38, + 0xaa, 0x1d, 0x3c, 0xcc, 0xa7, 0x90, 0x0a, 0x2b, 0x87, 0x27, 0x07, 0x8d, 0xea, 0x20, 0x2e, 0x8d, + 0x36, 0xa0, 0x10, 0xc3, 0x31, 0x19, 0x4c, 0x6c, 0x86, 0x88, 0x8d, 0x61, 0xe9, 0x5f, 0x86, 0xcc, + 0xee, 0xcc, 0x47, 0x87, 0x11, 0x38, 0xdb, 0x47, 0x30, 0xcf, 0xa5, 0x57, 0x52, 0x26, 0xb0, 0x10, + 0x6f, 0x19, 0x67, 0x57, 0x3e, 0xf6, 0x02, 0x4b, 0xa4, 0xf5, 0xf9, 0x10, 0xba, 0x43, 0x80, 0xc4, + 0xac, 0x1d, 0xeb, 0xc2, 0xf2, 0x19, 0x4d, 0x2a, 0xa0, 0x81, 0x00, 0x14, 0x10, 0x68, 0x7f, 0x49, + 0xc1, 0x34, 0x3b, 0x9b, 0x3b, 0xb1, 0x04, 0xcf, 0x89, 0x0c, 0xa1, 0x54, 0x24, 0x8d, 0x87, 0x54, + 0x18, 0x0f, 0xe8, 0x7d, 0x00, 0xd3, 0xf7, 0x5d, 0xeb, 0xac, 0xe7, 0x47, 0x09, 0xfd, 0x06, 0x7f, + 0x1e, 0xc5, 0x52, 0x44, 0xc0, 0x32, 0x70, 0x9f, 0x03, 0xed, 0xc0, 0x82, 0x90, 0x04, 0x33, 0xa3, + 0x93, 0xe0, 0x7c, 0x93, 0x8b, 0xff, 0x12, 0x2c, 0x86, 0xf9, 0xab, 0x83, 0x0d, 0x9f, 0xe5, 0x37, + 0x96, 0xbf, 0xf3, 0x03, 0x79, 0x0f, 0xf5, 0x89, 0x43, 0x18, 0xc9, 0x72, 0x82, 0x96, 0x13, 0x65, + 0xa6, 0x1e, 0x2c, 0x4a, 0xd2, 0x2a, 0x2a, 0x42, 0x2e, 0x38, 0x10, 0xcf, 0xf2, 0x89, 0xaf, 0xca, + 0xd5, 0xe9, 0x93, 0x10, 0xfa, 0xae, 0x8b, 0xcf, 0xb1, 0xeb, 0xe2, 0x16, 0x2b, 0x86, 0x24, 0xf4, + 0x11, 0x89, 0xf6, 0x73, 0x05, 0x66, 0x43, 0x38, 0x7a, 0x07, 0x66, 0x3d, 0xdc, 0xa6, 0x29, 0x5f, + 0xe1, 0xcf, 0x21, 0xa4, 0x29, 0xd6, 0x19, 0x01, 0x2b, 0x03, 0x43, 0x7a, 0x52, 0x06, 0x72, 0xa8, + 0x89, 0x36, 0xff, 0x6f, 0x05, 0x16, 0x77, 0x71, 0x07, 0x8b, 0x65, 0xc4, 0x3a, 0xe4, 0xd8, 0x35, + 0x17, 0x65, 0xd0, 0x59, 0x0a, 0xa8, 0xb6, 0x84, 0x9b, 0xb7, 0x15, 0xb0, 0x47, 0x37, 0x6f, 0x8a, + 0xbf, 0x79, 0x25, 0xc2, 0x63, 0x37, 0x2f, 0xc5, 0x26, 0xdd, 0xbc, 0x1c, 0x96, 0xbf, 0x8d, 0x06, + 0x19, 0x27, 0xda, 0xf6, 0x0a, 0x2c, 0xf1, 0x8a, 0xd1, 0x1b, 0x40, 0xfb, 0x53, 0x06, 0x6e, 0xf4, + 0x17, 0x39, 0xee, 0x9d, 0x75, 0x2c, 0xef, 0xf1, 0x04, 0x96, 0x59, 0x85, 0x19, 0xdb, 0x69, 0x05, + 0x28, 0xba, 0xe6, 0x34, 0x79, 0xac, 0xb6, 0x50, 0x05, 0xae, 0x8b, 0x45, 0xd4, 0x15, 0xcb, 0xd3, + 0xc9, 0x25, 0x54, 0xfe, 0x52, 0xbc, 0x64, 0x54, 0x98, 0x25, 0xe5, 0x9f, 0x63, 0x77, 0xae, 0x82, + 0x58, 0x9b, 0xd5, 0xa3, 0x67, 0xf4, 0x33, 0x05, 0xd4, 0xd8, 0xb1, 0x74, 0xa9, 0xf2, 0x42, 0x45, + 0xb4, 0x1b, 0x55, 0x44, 0x43, 0x77, 0x39, 0x88, 0xe6, 0xce, 0xa8, 0xd0, 0x4c, 0x40, 0x23, 0x2b, + 0xda, 0x67, 0x2c, 0xb3, 0x4c, 0x07, 0x4b, 0xdf, 0x1f, 0x73, 0x69, 0xfa, 0x24, 0xe6, 0x1d, 0x66, + 0x8b, 0x3e, 0x58, 0x7d, 0x00, 0x9b, 0x43, 0xb5, 0x9c, 0xa8, 0xd4, 0x29, 0xc3, 0xb2, 0x74, 0xdd, + 0x89, 0xbc, 0xea, 0xcf, 0x0a, 0xdc, 0x4c, 0xdc, 0x1c, 0xab, 0x31, 0x7e, 0x00, 0xd7, 0xc2, 0x93, + 0xb1, 0xec, 0x73, 0x87, 0x45, 0xfb, 0xdb, 0x23, 0x6d, 0xc3, 0x7a, 0x41, 0x06, 0x25, 0xfd, 0x21, + 0xb5, 0xcb, 0x5c, 0xb7, 0x0f, 0x51, 0xdf, 0x87, 0xbc, 0x48, 0x30, 0xd1, 0x06, 0xfe, 0x98, 0x82, + 0xad, 0xbe, 0x06, 0x27, 0x76, 0xf7, 0xf9, 0x05, 0xc0, 0xaf, 0x14, 0xd8, 0x88, 0x79, 0x67, 0xcf, + 0x16, 0xfd, 0x93, 0x5e, 0x3f, 0xfb, 0x83, 0x86, 0x90, 0xab, 0x21, 0x23, 0xe0, 0x7c, 0x34, 0x16, + 0x0b, 0x22, 0x81, 0x7a, 0x18, 0x3f, 0x27, 0x29, 0xfb, 0x44, 0x66, 0xbb, 0x0d, 0xb7, 0x86, 0xa8, + 0xcb, 0x52, 0xcb, 0x4f, 0xd3, 0x70, 0xeb, 0xd4, 0xec, 0x58, 0xad, 0xa8, 0xee, 0x94, 0xb4, 0xdd, + 0xc3, 0x8d, 0x9b, 0xd0, 0x89, 0xa5, 0xbe, 0x42, 0x27, 0xd6, 0x91, 0xc5, 0x29, 0x3d, 0x82, 0xef, + 0x46, 0x82, 0x46, 0x69, 0x3b, 0x6e, 0xa8, 0x26, 0x5d, 0xf2, 0x99, 0x09, 0x2e, 0xf9, 0xe7, 0x12, + 0xa0, 0x1f, 0x83, 0x36, 0x6c, 0x53, 0x2c, 0x44, 0x37, 0x20, 0xe7, 0xf5, 0xba, 0x5d, 0xc7, 0xf5, + 0x31, 0x3d, 0x83, 0x59, 0xbd, 0x0f, 0x40, 0x05, 0x98, 0xb9, 0xc0, 0x9e, 0x67, 0xb6, 0x43, 0xf9, + 0xe1, 0xa3, 0xf6, 0x31, 0xa0, 0x03, 0xcb, 0x63, 0xf5, 0x72, 0x74, 0xa2, 0xa4, 0x3c, 0x36, 0x9f, + 0x1a, 0xd8, 0xf6, 0x5d, 0x8b, 0x15, 0x66, 0x59, 0x1d, 0x2e, 0xcc, 0xa7, 0x15, 0x0a, 0x21, 0xc5, + 0x9b, 0xe7, 0x9b, 0xae, 0x6f, 0xd9, 0x6d, 0xc3, 0x77, 0x9e, 0xe0, 0x68, 0x6c, 0x14, 0x42, 0x1b, + 0x04, 0xa8, 0x7d, 0xae, 0xc0, 0x22, 0x27, 0x9e, 0x69, 0xfb, 0x2e, 0xcc, 0xf4, 0x65, 0x13, 0x7b, + 0xde, 0x0a, 0xed, 0x29, 0xa1, 0x2e, 0xd2, 0x13, 0x0a, 0x39, 0xd0, 0x26, 0x80, 0x8d, 0x9f, 0xfa, + 0xdc, 0xba, 0x39, 0x02, 0x09, 0xd6, 0x54, 0xef, 0x41, 0x96, 0x1a, 0x79, 0xdc, 0xce, 0xe8, 0x8b, + 0x14, 0xa0, 0x3d, 0xec, 0x47, 0x05, 0x2f, 0xb3, 0x41, 0x82, 0xe3, 0x2a, 0x5f, 0xc1, 0x71, 0x3f, + 0xe4, 0x46, 0x08, 0xd4, 0xf5, 0x5f, 0x8d, 0xcd, 0xcf, 0x84, 0xa5, 0x87, 0x4e, 0x10, 0x12, 0xdc, + 0x92, 0x5e, 0xcb, 0x63, 0xd7, 0x9e, 0xcf, 0xd0, 0x61, 0x6b, 0xbb, 0xb0, 0xc8, 0xe9, 0xcc, 0xce, + 0xf4, 0x75, 0x40, 0xe6, 0xa5, 0x69, 0x75, 0x4c, 0xa2, 0x57, 0x58, 0xc3, 0xb3, 0x9a, 0xfe, 0x7a, + 0x84, 0x09, 0xd9, 0x34, 0x2d, 0x9e, 0xb5, 0x99, 0x3c, 0x71, 0x9e, 0xd7, 0x89, 0xe7, 0xa8, 0x01, + 0x1a, 0xb6, 0xee, 0x9e, 0x74, 0xa6, 0x77, 0x7b, 0x30, 0x27, 0xb3, 0xb9, 0x59, 0xe2, 0x78, 0xef, + 0x6f, 0x29, 0x58, 0x1f, 0x42, 0x8d, 0xde, 0x85, 0xb4, 0xdb, 0x6d, 0x32, 0x67, 0x7a, 0x79, 0x0c, + 0xf9, 0x45, 0xfd, 0xb8, 0xbc, 0x3f, 0xa5, 0x13, 0x2e, 0xf5, 0x4b, 0x05, 0xd2, 0xfa, 0x71, 0x19, + 0x7d, 0x8f, 0x1b, 0xf2, 0xbd, 0x36, 0xa6, 0x94, 0xf8, 0xac, 0x8f, 0x34, 0x93, 0x83, 0xc3, 0xbe, + 0x02, 0x2c, 0x95, 0xf5, 0x4a, 0xa9, 0x51, 0x31, 0x76, 0x2b, 0x07, 0x95, 0x46, 0xc5, 0x38, 0x3d, + 0x3a, 0x38, 0x39, 0xac, 0xe4, 0x15, 0xd2, 0x15, 0x1e, 0x9f, 0xec, 0x1c, 0x54, 0xeb, 0xfb, 0xc6, + 0x49, 0x2d, 0xfc, 0xc7, 0xb0, 0x29, 0x94, 0x87, 0x6b, 0x07, 0xd5, 0x7a, 0x83, 0x01, 0xea, 0xf9, + 0x34, 0x81, 0xec, 0x55, 0x1a, 0x46, 0xb9, 0x74, 0x5c, 0x2a, 0x57, 0x1b, 0x0f, 0xf3, 0x19, 0xd2, + 0x73, 0xf2, 0xb2, 0xeb, 0xb5, 0xd2, 0x71, 0x7d, 0xff, 0xa8, 0x91, 0xcf, 0x22, 0x04, 0x0b, 0x01, + 0x7f, 0x08, 0xaa, 0xe7, 0xa7, 0xa3, 0x91, 0xc5, 0x67, 0x69, 0x58, 0x66, 0x13, 0x18, 0x36, 0xe3, + 0x08, 0x63, 0xeb, 0x2e, 0xe4, 0x69, 0xf3, 0x65, 0x88, 0x17, 0xc7, 0x02, 0x85, 0x9f, 0x86, 0xd7, + 0x47, 0x38, 0x1a, 0x4c, 0xc5, 0x46, 0x83, 0x5d, 0x58, 0x0d, 0x27, 0x67, 0x4c, 0xae, 0x70, 0x21, + 0x0b, 0x23, 0x34, 0x61, 0x75, 0x01, 0xca, 0x5d, 0xc0, 0xcb, 0x4d, 0x19, 0x0e, 0x1d, 0x4a, 0x66, + 0x80, 0xaf, 0x0f, 0x5f, 0x64, 0x48, 0x0c, 0xab, 0xfb, 0xa0, 0x26, 0xeb, 0x30, 0x51, 0x09, 0xf8, + 0x8c, 0xa1, 0xfc, 0x01, 0xac, 0x88, 0xda, 0xb3, 0xa8, 0x7a, 0x6d, 0x60, 0xc4, 0x15, 0xe5, 0x96, + 0x88, 0x36, 0xa2, 0xd0, 0xfe, 0xa0, 0xc0, 0x6c, 0x08, 0x26, 0xf9, 0xd9, 0xb3, 0x7e, 0x82, 0xb9, + 0xa6, 0x3e, 0x47, 0x20, 0xf2, 0x86, 0x5e, 0xe6, 0x0b, 0x69, 0xa9, 0x2f, 0x6c, 0x02, 0xd0, 0xe3, + 0x69, 0x19, 0xa6, 0x1f, 0xb4, 0x12, 0x69, 0x3d, 0xc7, 0x20, 0x25, 0xd2, 0xfc, 0x4e, 0x7b, 0xbe, + 0xe9, 0xf7, 0x48, 0xdb, 0x40, 0x14, 0x5e, 0x11, 0x15, 0xae, 0x07, 0x58, 0x9d, 0x51, 0x91, 0x40, + 0x5a, 0xe0, 0x51, 0xe8, 0x1e, 0x17, 0x9d, 0xeb, 0x72, 0x01, 0xb1, 0x60, 0x24, 0x17, 0x6b, 0x0b, + 0xfb, 0xa6, 0xd5, 0xf1, 0xc2, 0x8b, 0x95, 0x3d, 0x6a, 0x3b, 0xb2, 0x28, 0xcd, 0x41, 0x56, 0xaf, + 0x94, 0x76, 0x1f, 0xe6, 0x15, 0x34, 0x0f, 0xb9, 0x93, 0xe3, 0x83, 0xa3, 0xd2, 0x6e, 0xb5, 0xb6, + 0x97, 0x4f, 0xa1, 0x45, 0x78, 0xa1, 0xa2, 0xeb, 0x47, 0xba, 0xd1, 0x07, 0xa6, 0x49, 0xa3, 0xbb, + 0xcc, 0x9a, 0x46, 0x21, 0x80, 0x6e, 0xc2, 0x5c, 0xe4, 0xfb, 0x51, 0xec, 0x40, 0x08, 0xaa, 0xb6, + 0x48, 0x8c, 0x84, 0x3d, 0xae, 0x18, 0x23, 0xd2, 0x66, 0x57, 0x74, 0x5f, 0x1e, 0xca, 0xc7, 0x48, + 0x4b, 0x86, 0x23, 0x4e, 0x9d, 0xcc, 0x34, 0x91, 0x57, 0x16, 0x60, 0x45, 0x54, 0x8a, 0xd5, 0xa3, + 0xbf, 0x55, 0x60, 0x89, 0x54, 0x08, 0x21, 0xe2, 0x79, 0x17, 0x2c, 0x13, 0x38, 0xa3, 0x70, 0x02, + 0x19, 0xf1, 0x04, 0xb4, 0xdf, 0x2b, 0xb0, 0x2c, 0xe8, 0xca, 0x62, 0xeb, 0x3d, 0xb1, 0xfa, 0xb9, + 0x1d, 0xaf, 0x7e, 0x06, 0xe8, 0x27, 0xac, 0x7f, 0xde, 0x0c, 0xeb, 0x9f, 0xc9, 0x42, 0xf8, 0x37, + 0x59, 0x58, 0xa9, 0x39, 0x2d, 0x5c, 0xf7, 0xcd, 0xf6, 0x24, 0x73, 0x15, 0x5d, 0xe8, 0x0d, 0xa9, + 0x77, 0xdd, 0x0b, 0x57, 0x92, 0x8b, 0x1c, 0xde, 0x12, 0xa2, 0x22, 0x2c, 0x7a, 0xbe, 0xd9, 0x0e, + 0xce, 0xca, 0x74, 0xdb, 0xd8, 0x37, 0xba, 0xa6, 0xff, 0x98, 0x1d, 0xc4, 0x75, 0x86, 0x6a, 0x04, + 0x98, 0x63, 0xd3, 0x7f, 0x2c, 0x1f, 0x54, 0x64, 0x26, 0x1e, 0x54, 0x9c, 0x01, 0x0a, 0xfa, 0x40, + 0xb2, 0x80, 0xf8, 0x56, 0xe6, 0xdb, 0x23, 0x36, 0x14, 0x81, 0xb9, 0x50, 0xc9, 0xdb, 0x02, 0x18, + 0x99, 0xc9, 0xb3, 0x86, 0x51, 0x4b, 0x8c, 0x3b, 0x63, 0x78, 0xc6, 0x86, 0x9a, 0x74, 0x2d, 0xd2, + 0xdd, 0x7c, 0xfd, 0xb3, 0x89, 0x35, 0x58, 0x1d, 0xb0, 0x05, 0xcb, 0x04, 0x6d, 0x28, 0x10, 0xd4, + 0x89, 0xed, 0x4d, 0xe8, 0xaf, 0x09, 0xbe, 0x95, 0x4a, 0xf0, 0x2d, 0x6d, 0x1d, 0xd6, 0x24, 0x0b, + 0x31, 0x2d, 0xfe, 0x91, 0xa5, 0x6a, 0x4c, 0x3e, 0x74, 0x6b, 0x48, 0xc3, 0xe6, 0x9b, 0x71, 0x17, + 0x90, 0x0e, 0x9a, 0x9e, 0x6f, 0xe0, 0xdc, 0x84, 0xb9, 0x38, 0x1d, 0x4b, 0x62, 0xfe, 0x88, 0xc8, + 0xca, 0x3e, 0xd3, 0x08, 0x70, 0x5a, 0x18, 0x01, 0xfe, 0x08, 0x96, 0x82, 0xa8, 0x13, 0x67, 0x2b, + 0x33, 0xfc, 0x35, 0x95, 0x68, 0x91, 0x18, 0x82, 0x8b, 0xbd, 0x20, 0x96, 0x85, 0x49, 0x5f, 0x53, + 0x16, 0x7d, 0xb3, 0xc1, 0x42, 0x6f, 0x8d, 0x5c, 0xe8, 0xeb, 0x8a, 0xbf, 0x0a, 0xf5, 0xfa, 0xff, + 0x8b, 0xe9, 0x20, 0xf3, 0x7e, 0xe9, 0x5c, 0x4f, 0x7b, 0x04, 0x2a, 0x0d, 0x8d, 0xc9, 0x47, 0x6e, + 0x82, 0xe3, 0xa5, 0x44, 0xc7, 0xd3, 0x36, 0x61, 0x5d, 0x2a, 0x9b, 0x2d, 0x8d, 0x20, 0x4f, 0xd0, + 0x7b, 0xd8, 0xaf, 0xb6, 0xc2, 0x6e, 0xf1, 0x35, 0xb8, 0x1e, 0x83, 0xb1, 0xbb, 0x36, 0x36, 0xdb, + 0x53, 0xe2, 0xb3, 0x3d, 0x6d, 0x83, 0x2a, 0x9f, 0xd0, 0x79, 0x7e, 0x42, 0x97, 0x4f, 0xea, 0x39, + 0x4b, 0x42, 0xcf, 0x49, 0xaf, 0xf1, 0x4d, 0x2e, 0x81, 0x8f, 0xe8, 0x36, 0xff, 0xaa, 0xb0, 0x34, + 0x3b, 0xd0, 0x67, 0xbe, 0x19, 0xef, 0x33, 0x6f, 0x0d, 0x95, 0x19, 0xef, 0x30, 0xbb, 0xb4, 0xc1, + 0x7c, 0x87, 0x2b, 0x61, 0x5f, 0x1a, 0xc9, 0x1e, 0x6f, 0x2d, 0x5f, 0x4f, 0xe8, 0x2c, 0xeb, 0x8d, + 0xd2, 0x5e, 0xc5, 0x38, 0xa9, 0xd1, 0xdf, 0xb0, 0xb3, 0x8c, 0xfa, 0xbc, 0x25, 0x40, 0xa1, 0xe1, + 0x63, 0xdf, 0x21, 0x7d, 0xae, 0xc0, 0x22, 0x07, 0x1e, 0x71, 0x22, 0xe8, 0x1e, 0x2c, 0x91, 0x1a, + 0x8e, 0xfa, 0x88, 0x67, 0x74, 0xb1, 0x6b, 0x10, 0x0c, 0x7b, 0x8b, 0x78, 0xfd, 0xc2, 0x7c, 0xca, + 0x06, 0x43, 0xc7, 0xd8, 0x25, 0x82, 0x9f, 0xc3, 0x28, 0x64, 0xfb, 0x3f, 0x0a, 0xcc, 0x56, 0x5b, + 0xd8, 0xf6, 0x89, 0xe1, 0x6b, 0x30, 0xcf, 0x7d, 0xcc, 0x84, 0x36, 0x12, 0xbe, 0x71, 0x0a, 0x36, + 0xa8, 0x6e, 0x0e, 0xfd, 0x02, 0x4a, 0x9b, 0x42, 0xe7, 0xb1, 0x0f, 0xb1, 0xb8, 0x79, 0xd0, 0x8b, + 0x03, 0x9c, 0x12, 0x1f, 0x54, 0xef, 0x8c, 0xa0, 0x8a, 0xd6, 0x79, 0x0b, 0xb2, 0xc1, 0x97, 0x39, + 0x68, 0x29, 0xfa, 0x66, 0x28, 0xf6, 0xe1, 0x8e, 0xba, 0x2c, 0x40, 0x43, 0xbe, 0xed, 0xff, 0xce, + 0x00, 0xf4, 0x07, 0x0f, 0xe8, 0x01, 0x5c, 0x8b, 0x7f, 0x61, 0x80, 0xd6, 0x87, 0x7c, 0x88, 0xa2, + 0x6e, 0xc8, 0x91, 0x91, 0x4e, 0x0f, 0xe0, 0x5a, 0xfc, 0x65, 0x55, 0x5f, 0x98, 0xe4, 0xdd, 0x5a, + 0x5f, 0x98, 0xf4, 0xfd, 0xd6, 0x14, 0xea, 0xc0, 0x6a, 0xc2, 0x3b, 0x06, 0xf4, 0xd2, 0x78, 0x2f, + 0x68, 0xd4, 0x97, 0xc7, 0x7c, 0x59, 0xa1, 0x4d, 0x21, 0x17, 0xd6, 0x12, 0x27, 0xe3, 0xe8, 0xee, + 0xb8, 0xb3, 0x7e, 0xf5, 0x95, 0x31, 0x28, 0xa3, 0x35, 0x7b, 0xa0, 0x26, 0x0f, 0x79, 0xd1, 0x2b, + 0x63, 0x4f, 0xb7, 0xd5, 0x57, 0xc7, 0x21, 0x8d, 0x96, 0xdd, 0x87, 0xb9, 0xd8, 0xc0, 0x15, 0xa9, + 0xd2, 0x29, 0x2c, 0x15, 0xbc, 0x3e, 0x64, 0x42, 0x4b, 0x25, 0xc5, 0x86, 0x82, 0x7d, 0x49, 0x83, + 0xd3, 0xcd, 0xbe, 0x24, 0xc9, 0x14, 0x51, 0x34, 0xbf, 0x90, 0x80, 0x65, 0xe6, 0x97, 0x67, 0x70, + 0x99, 0xf9, 0x13, 0xb2, 0xb9, 0x36, 0x85, 0xbe, 0x0f, 0x0b, 0xfc, 0x1c, 0x04, 0x6d, 0x0e, 0x9d, + 0xee, 0xa8, 0x37, 0x92, 0xd0, 0x71, 0x91, 0x7c, 0x13, 0xdb, 0x17, 0x29, 0xed, 0xb8, 0xfb, 0x22, + 0x13, 0x7a, 0xdf, 0x29, 0x92, 0x9f, 0xb8, 0x06, 0xb1, 0x9f, 0x9f, 0x64, 0x3d, 0x71, 0x3f, 0x3f, + 0x49, 0xbb, 0x4a, 0x6d, 0x6a, 0xfb, 0xcb, 0x0c, 0x64, 0x82, 0x44, 0xda, 0x80, 0x17, 0x84, 0x3a, + 0x1b, 0xdd, 0x18, 0xde, 0x8c, 0xa8, 0x37, 0x13, 0xf1, 0x91, 0xba, 0x8f, 0xe8, 0x7d, 0xcc, 0x55, + 0xce, 0x68, 0x2b, 0xce, 0x27, 0xab, 0xde, 0xd5, 0x5b, 0x43, 0x28, 0x44, 0xd9, 0x7c, 0x2e, 0xd8, + 0x1a, 0x55, 0xc2, 0xf1, 0xb2, 0x93, 0xe2, 0xff, 0x13, 0x7a, 0x6f, 0x89, 0x91, 0xaf, 0xf1, 0x7a, + 0x49, 0x63, 0xfe, 0xf6, 0x50, 0x9a, 0x68, 0x85, 0x0a, 0xe4, 0xa2, 0x4a, 0x05, 0x15, 0xe2, 0x3c, + 0xf1, 0x82, 0x46, 0x5d, 0x93, 0x60, 0x98, 0x8c, 0xf4, 0x2f, 0x52, 0x4a, 0xa8, 0xa8, 0x18, 0x23, + 0x9a, 0xc0, 0x26, 0x8b, 0x8e, 0xdb, 0x43, 0x69, 0xe2, 0x51, 0x1d, 0xbb, 0xc2, 0xfb, 0x51, 0x3d, + 0x78, 0xdd, 0xf7, 0xa3, 0x5a, 0x72, 0xe7, 0x6b, 0x53, 0x3b, 0xd9, 0x47, 0xe9, 0xa6, 0x67, 0x9d, + 0x4d, 0x07, 0x1f, 0x87, 0x7e, 0xeb, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9c, 0x3b, 0x5a, 0x51, + 0xf0, 0x2c, 0x00, 0x00, +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/BUILD new file mode 100644 index 000000000000..6af576c4221a --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/BUILD @@ -0,0 +1,67 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["nodeinfomanager.go"], + importpath = "k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager", + visibility = ["//visibility:public"], + deps = [ + "//pkg/features:go_default_library", + "//pkg/util/node:go_default_library", + "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library", + "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["nodeinfomanager_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/apis/core/helper:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/features:go_default_library", + "//pkg/volume/testing:go_default_library", + "//pkg/volume/util:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", + "//staging/src/k8s.io/client-go/testing:go_default_library", + "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library", + "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go new file mode 100644 index 000000000000..42615c168aad --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go @@ -0,0 +1,660 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package nodeinfomanager includes internal functions used to add/delete labels to +// kubernetes nodes for corresponding CSI drivers +package nodeinfomanager + +import ( + "encoding/json" + "fmt" + "strings" + + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + utilfeature "k8s.io/apiserver/pkg/util/feature" + csiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1" + csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/features" + nodeutil "k8s.io/kubernetes/pkg/util/node" + "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" +) + +const ( + // Name of node annotation that contains JSON map of driver names to node + annotationKeyNodeID = "csi.volume.kubernetes.io/nodeid" +) + +var ( + nodeKind = v1.SchemeGroupVersion.WithKind("Node") + updateBackoff = wait.Backoff{ + Steps: 4, + Duration: 10 * time.Millisecond, + Factor: 5.0, + Jitter: 0.1, + } +) + +// nodeInfoManager contains necessary common dependencies to update node info on both +// the Node and CSINodeInfo objects. +type nodeInfoManager struct { + nodeName types.NodeName + volumeHost volume.VolumeHost +} + +// If no updates is needed, the function must return the same Node object as the input. +type nodeUpdateFunc func(*v1.Node) (newNode *v1.Node, updated bool, err error) + +// Interface implements an interface for managing labels of a node +type Interface interface { + CreateCSINodeInfo() (*csiv1alpha1.CSINodeInfo, error) + + // Record in the cluster the given node information from the CSI driver with the given name. + // Concurrent calls to InstallCSIDriver() is allowed, but they should not be intertwined with calls + // to other methods in this interface. + InstallCSIDriver(driverName string, driverNodeID string, maxVolumeLimit int64, topology map[string]string) error + + // Remove in the cluster node information from the CSI driver with the given name. + // Concurrent calls to UninstallCSIDriver() is allowed, but they should not be intertwined with calls + // to other methods in this interface. + UninstallCSIDriver(driverName string) error +} + +// NewNodeInfoManager initializes nodeInfoManager +func NewNodeInfoManager( + nodeName types.NodeName, + volumeHost volume.VolumeHost) Interface { + return &nodeInfoManager{ + nodeName: nodeName, + volumeHost: volumeHost, + } +} + +// InstallCSIDriver updates the node ID annotation in the Node object and CSIDrivers field in the +// CSINodeInfo object. If the CSINodeInfo object doesn't yet exist, it will be created. +// If multiple calls to InstallCSIDriver() are made in parallel, some calls might receive Node or +// CSINodeInfo update conflicts, which causes the function to retry the corresponding update. +func (nim *nodeInfoManager) InstallCSIDriver(driverName string, driverNodeID string, maxAttachLimit int64, topology map[string]string) error { + if driverNodeID == "" { + return fmt.Errorf("error adding CSI driver node info: driverNodeID must not be empty") + } + + nodeUpdateFuncs := []nodeUpdateFunc{ + updateNodeIDInNode(driverName, driverNodeID), + } + + if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) { + nodeUpdateFuncs = append(nodeUpdateFuncs, updateTopologyLabels(topology)) + } + + if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) { + nodeUpdateFuncs = append(nodeUpdateFuncs, updateMaxAttachLimit(driverName, maxAttachLimit)) + } + + err := nim.updateNode(nodeUpdateFuncs...) + if err != nil { + return fmt.Errorf("error updating Node object with CSI driver node info: %v", err) + } + + if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) { + err = nim.updateCSINodeInfo(driverName, driverNodeID, topology) + if err != nil { + return fmt.Errorf("error updating CSINodeInfo object with CSI driver node info: %v", err) + } + } + return nil +} + +// UninstallCSIDriver removes the node ID annotation from the Node object and CSIDrivers field from the +// CSINodeInfo object. If the CSINOdeInfo object contains no CSIDrivers, it will be deleted. +// If multiple calls to UninstallCSIDriver() are made in parallel, some calls might receive Node or +// CSINodeInfo update conflicts, which causes the function to retry the corresponding update. +func (nim *nodeInfoManager) UninstallCSIDriver(driverName string) error { + if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) { + err := nim.uninstallDriverFromCSINodeInfo(driverName) + if err != nil { + return fmt.Errorf("error uninstalling CSI driver from CSINodeInfo object %v", err) + } + } + + err := nim.updateNode( + removeMaxAttachLimit(driverName), + removeNodeIDFromNode(driverName), + ) + if err != nil { + return fmt.Errorf("error removing CSI driver node info from Node object %v", err) + } + return nil +} + +func (nim *nodeInfoManager) updateNode(updateFuncs ...nodeUpdateFunc) error { + var updateErrs []error + err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) { + if err := nim.tryUpdateNode(updateFuncs...); err != nil { + updateErrs = append(updateErrs, err) + return false, nil + } + return true, nil + }) + if err != nil { + return fmt.Errorf("error updating node: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs)) + } + return nil +} + +// updateNode repeatedly attempts to update the corresponding node object +// which is modified by applying the given update functions sequentially. +// Because updateFuncs are applied sequentially, later updateFuncs should take into account +// the effects of previous updateFuncs to avoid potential conflicts. For example, if multiple +// functions update the same field, updates in the last function are persisted. +func (nim *nodeInfoManager) tryUpdateNode(updateFuncs ...nodeUpdateFunc) error { + // Retrieve the latest version of Node before attempting update, so that + // existing changes are not overwritten. + + kubeClient := nim.volumeHost.GetKubeClient() + if kubeClient == nil { + return fmt.Errorf("error getting kube client") + } + + nodeClient := kubeClient.CoreV1().Nodes() + originalNode, err := nodeClient.Get(string(nim.nodeName), metav1.GetOptions{}) + if err != nil { + return err + } + node := originalNode.DeepCopy() + + needUpdate := false + for _, update := range updateFuncs { + newNode, updated, err := update(node) + if err != nil { + return err + } + node = newNode + needUpdate = needUpdate || updated + } + + if needUpdate { + // PatchNodeStatus can update both node's status and labels or annotations + // Updating status by directly updating node does not work + _, _, updateErr := nodeutil.PatchNodeStatus(kubeClient.CoreV1(), types.NodeName(node.Name), originalNode, node) + return updateErr + } + + return nil +} + +// Guarantees the map is non-nil if no error is returned. +func buildNodeIDMapFromAnnotation(node *v1.Node) (map[string]string, error) { + var previousAnnotationValue string + if node.ObjectMeta.Annotations != nil { + previousAnnotationValue = + node.ObjectMeta.Annotations[annotationKeyNodeID] + } + + var existingDriverMap map[string]string + if previousAnnotationValue != "" { + // Parse previousAnnotationValue as JSON + if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil { + return nil, fmt.Errorf( + "failed to parse node's %q annotation value (%q) err=%v", + annotationKeyNodeID, + previousAnnotationValue, + err) + } + } + + if existingDriverMap == nil { + return make(map[string]string), nil + } + return existingDriverMap, nil +} + +// updateNodeIDInNode returns a function that updates a Node object with the given +// Node ID information. +func updateNodeIDInNode( + csiDriverName string, + csiDriverNodeID string) nodeUpdateFunc { + return func(node *v1.Node) (*v1.Node, bool, error) { + existingDriverMap, err := buildNodeIDMapFromAnnotation(node) + if err != nil { + return nil, false, err + } + + if val, ok := existingDriverMap[csiDriverName]; ok { + if val == csiDriverNodeID { + // Value already exists in node annotation, nothing more to do + return node, false, nil + } + } + + // Add/update annotation value + existingDriverMap[csiDriverName] = csiDriverNodeID + jsonObj, err := json.Marshal(existingDriverMap) + if err != nil { + return nil, false, fmt.Errorf( + "error while marshalling node ID map updated with driverName=%q, nodeID=%q: %v", + csiDriverName, + csiDriverNodeID, + err) + } + + if node.ObjectMeta.Annotations == nil { + node.ObjectMeta.Annotations = make(map[string]string) + } + node.ObjectMeta.Annotations[annotationKeyNodeID] = string(jsonObj) + + return node, true, nil + } +} + +// removeNodeIDFromNode returns a function that removes node ID information matching the given +// driver name from a Node object. +func removeNodeIDFromNode(csiDriverName string) nodeUpdateFunc { + return func(node *v1.Node) (*v1.Node, bool, error) { + var previousAnnotationValue string + if node.ObjectMeta.Annotations != nil { + previousAnnotationValue = + node.ObjectMeta.Annotations[annotationKeyNodeID] + } + + if previousAnnotationValue == "" { + return node, false, nil + } + + // Parse previousAnnotationValue as JSON + existingDriverMap := map[string]string{} + if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil { + return nil, false, fmt.Errorf( + "failed to parse node's %q annotation value (%q) err=%v", + annotationKeyNodeID, + previousAnnotationValue, + err) + } + + if _, ok := existingDriverMap[csiDriverName]; !ok { + // Value is already missing in node annotation, nothing more to do + return node, false, nil + } + + // Delete annotation value + delete(existingDriverMap, csiDriverName) + if len(existingDriverMap) == 0 { + delete(node.ObjectMeta.Annotations, annotationKeyNodeID) + } else { + jsonObj, err := json.Marshal(existingDriverMap) + if err != nil { + return nil, false, fmt.Errorf( + "failed while trying to remove key %q from node %q annotation. Existing data: %v", + csiDriverName, + annotationKeyNodeID, + previousAnnotationValue) + } + + node.ObjectMeta.Annotations[annotationKeyNodeID] = string(jsonObj) + } + + return node, true, nil + } +} + +// updateTopologyLabels returns a function that updates labels of a Node object with the given +// topology information. +func updateTopologyLabels(topology map[string]string) nodeUpdateFunc { + return func(node *v1.Node) (*v1.Node, bool, error) { + if topology == nil || len(topology) == 0 { + return node, false, nil + } + + for k, v := range topology { + if curVal, exists := node.Labels[k]; exists && curVal != v { + return nil, false, fmt.Errorf("detected topology value collision: driver reported %q:%q but existing label is %q:%q", k, v, k, curVal) + } + } + + if node.Labels == nil { + node.Labels = make(map[string]string) + } + for k, v := range topology { + node.Labels[k] = v + } + return node, true, nil + } +} + +func (nim *nodeInfoManager) updateCSINodeInfo( + driverName string, + driverNodeID string, + topology map[string]string) error { + + csiKubeClient := nim.volumeHost.GetCSIClient() + if csiKubeClient == nil { + return fmt.Errorf("error getting CSI client") + } + + var updateErrs []error + err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) { + if err := nim.tryUpdateCSINodeInfo(csiKubeClient, driverName, driverNodeID, topology); err != nil { + updateErrs = append(updateErrs, err) + return false, nil + } + return true, nil + }) + if err != nil { + return fmt.Errorf("error updating CSINodeInfo: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs)) + } + return nil +} + +func (nim *nodeInfoManager) tryUpdateCSINodeInfo( + csiKubeClient csiclientset.Interface, + driverName string, + driverNodeID string, + topology map[string]string) error { + + nodeInfo, err := csiKubeClient.CsiV1alpha1().CSINodeInfos().Get(string(nim.nodeName), metav1.GetOptions{}) + if nodeInfo == nil || errors.IsNotFound(err) { + nodeInfo, err = nim.CreateCSINodeInfo() + } + if err != nil { + return err + } + + return nim.installDriverToCSINodeInfo(nodeInfo, driverName, driverNodeID, topology) +} + +func (nim *nodeInfoManager) CreateCSINodeInfo() (*csiv1alpha1.CSINodeInfo, error) { + + kubeClient := nim.volumeHost.GetKubeClient() + if kubeClient == nil { + return nil, fmt.Errorf("error getting kube client") + } + + csiKubeClient := nim.volumeHost.GetCSIClient() + if csiKubeClient == nil { + return nil, fmt.Errorf("error getting CSI client") + } + + node, err := kubeClient.CoreV1().Nodes().Get(string(nim.nodeName), metav1.GetOptions{}) + if err != nil { + return nil, err + } + + nodeInfo := &csiv1alpha1.CSINodeInfo{ + ObjectMeta: metav1.ObjectMeta{ + Name: string(nim.nodeName), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: nodeKind.Version, + Kind: nodeKind.Kind, + Name: node.Name, + UID: node.UID, + }, + }, + }, + Spec: csiv1alpha1.CSINodeInfoSpec{ + Drivers: []csiv1alpha1.CSIDriverInfoSpec{}, + }, + Status: csiv1alpha1.CSINodeInfoStatus{ + Drivers: []csiv1alpha1.CSIDriverInfoStatus{}, + }, + } + + return csiKubeClient.CsiV1alpha1().CSINodeInfos().Create(nodeInfo) +} + +func (nim *nodeInfoManager) installDriverToCSINodeInfo( + nodeInfo *csiv1alpha1.CSINodeInfo, + driverName string, + driverNodeID string, + topology map[string]string) error { + + csiKubeClient := nim.volumeHost.GetCSIClient() + if csiKubeClient == nil { + return fmt.Errorf("error getting CSI client") + } + + topologyKeys := make(sets.String) + for k := range topology { + topologyKeys.Insert(k) + } + + specModified := true + statusModified := true + // Clone driver list, omitting the driver that matches the given driverName + newDriverSpecs := []csiv1alpha1.CSIDriverInfoSpec{} + for _, driverInfoSpec := range nodeInfo.Spec.Drivers { + if driverInfoSpec.Name == driverName { + if driverInfoSpec.NodeID == driverNodeID && + sets.NewString(driverInfoSpec.TopologyKeys...).Equal(topologyKeys) { + specModified = false + } + } else { + // Omit driverInfoSpec matching given driverName + newDriverSpecs = append(newDriverSpecs, driverInfoSpec) + } + } + newDriverStatuses := []csiv1alpha1.CSIDriverInfoStatus{} + for _, driverInfoStatus := range nodeInfo.Status.Drivers { + if driverInfoStatus.Name == driverName { + if driverInfoStatus.Available && + /* TODO(https://github.com/kubernetes/enhancements/issues/625): Add actual migration status */ + driverInfoStatus.VolumePluginMechanism == csiv1alpha1.VolumePluginMechanismInTree { + statusModified = false + } + } else { + // Omit driverInfoSpec matching given driverName + newDriverStatuses = append(newDriverStatuses, driverInfoStatus) + } + } + + if !specModified && !statusModified { + return nil + } + + // Append new driver + driverSpec := csiv1alpha1.CSIDriverInfoSpec{ + Name: driverName, + NodeID: driverNodeID, + TopologyKeys: topologyKeys.List(), + } + driverStatus := csiv1alpha1.CSIDriverInfoStatus{ + Name: driverName, + Available: true, + // TODO(https://github.com/kubernetes/enhancements/issues/625): Add actual migration status + VolumePluginMechanism: csiv1alpha1.VolumePluginMechanismInTree, + } + + newDriverSpecs = append(newDriverSpecs, driverSpec) + newDriverStatuses = append(newDriverStatuses, driverStatus) + nodeInfo.Spec.Drivers = newDriverSpecs + nodeInfo.Status.Drivers = newDriverStatuses + + err := validateCSINodeInfo(nodeInfo) + if err != nil { + return err + } + _, err = csiKubeClient.CsiV1alpha1().CSINodeInfos().Update(nodeInfo) + return err +} + +func (nim *nodeInfoManager) uninstallDriverFromCSINodeInfo( + csiDriverName string) error { + + csiKubeClient := nim.volumeHost.GetCSIClient() + if csiKubeClient == nil { + return fmt.Errorf("error getting CSI client") + } + + var updateErrs []error + err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) { + if err := nim.tryUninstallDriverFromCSINodeInfo(csiKubeClient, csiDriverName); err != nil { + updateErrs = append(updateErrs, err) + return false, nil + } + return true, nil + }) + if err != nil { + return fmt.Errorf("error updating CSINodeInfo: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs)) + } + return nil +} + +func (nim *nodeInfoManager) tryUninstallDriverFromCSINodeInfo( + csiKubeClient csiclientset.Interface, + csiDriverName string) error { + + nodeInfoClient := csiKubeClient.CsiV1alpha1().CSINodeInfos() + nodeInfo, err := nodeInfoClient.Get(string(nim.nodeName), metav1.GetOptions{}) + if err != nil { + return err // do not wrap error + } + + hasModified := false + newDriverStatuses := []csiv1alpha1.CSIDriverInfoStatus{} + for _, driverStatus := range nodeInfo.Status.Drivers { + if driverStatus.Name == csiDriverName { + // Uninstall the driver if we find it + hasModified = driverStatus.Available + driverStatus.Available = false + } + newDriverStatuses = append(newDriverStatuses, driverStatus) + } + + nodeInfo.Status.Drivers = newDriverStatuses + + if !hasModified { + // No changes, don't update + return nil + } + + err = validateCSINodeInfo(nodeInfo) + if err != nil { + return err + } + _, updateErr := nodeInfoClient.Update(nodeInfo) + return updateErr // do not wrap error + +} + +func updateMaxAttachLimit(driverName string, maxLimit int64) nodeUpdateFunc { + return func(node *v1.Node) (*v1.Node, bool, error) { + if maxLimit <= 0 { + klog.V(4).Infof("skipping adding attach limit for %s", driverName) + return node, false, nil + } + + if node.Status.Capacity == nil { + node.Status.Capacity = v1.ResourceList{} + } + if node.Status.Allocatable == nil { + node.Status.Allocatable = v1.ResourceList{} + } + limitKeyName := util.GetCSIAttachLimitKey(driverName) + node.Status.Capacity[v1.ResourceName(limitKeyName)] = *resource.NewQuantity(maxLimit, resource.DecimalSI) + node.Status.Allocatable[v1.ResourceName(limitKeyName)] = *resource.NewQuantity(maxLimit, resource.DecimalSI) + + return node, true, nil + } +} + +func removeMaxAttachLimit(driverName string) nodeUpdateFunc { + return func(node *v1.Node) (*v1.Node, bool, error) { + limitKey := v1.ResourceName(util.GetCSIAttachLimitKey(driverName)) + + capacityExists := false + if node.Status.Capacity != nil { + _, capacityExists = node.Status.Capacity[limitKey] + } + + allocatableExists := false + if node.Status.Allocatable != nil { + _, allocatableExists = node.Status.Allocatable[limitKey] + } + + if !capacityExists && !allocatableExists { + return node, false, nil + } + + delete(node.Status.Capacity, limitKey) + if len(node.Status.Capacity) == 0 { + node.Status.Capacity = nil + } + + delete(node.Status.Allocatable, limitKey) + if len(node.Status.Allocatable) == 0 { + node.Status.Allocatable = nil + } + + return node, true, nil + } +} + +// validateCSINodeInfo ensures members of CSINodeInfo object satisfies map and set semantics. +// Before calling CSINodeInfoInterface.Update(), validateCSINodeInfo() should be invoked to +// make sure the CSINodeInfo is compliant +func validateCSINodeInfo(nodeInfo *csiv1alpha1.CSINodeInfo) error { + if len(nodeInfo.Status.Drivers) < 1 { + return fmt.Errorf("at least one Driver entry is required in driver statuses") + } + if len(nodeInfo.Spec.Drivers) < 1 { + return fmt.Errorf("at least one Driver entry is required in driver specs") + } + if len(nodeInfo.Status.Drivers) != len(nodeInfo.Spec.Drivers) { + return fmt.Errorf("") + } + // check for duplicate entries for the same driver in statuses + var errors []string + driverNamesInStatuses := make(sets.String) + for _, driverInfo := range nodeInfo.Status.Drivers { + if driverNamesInStatuses.Has(driverInfo.Name) { + errors = append(errors, fmt.Sprintf("duplicate entries found for driver: %s in driver statuses", driverInfo.Name)) + } + driverNamesInStatuses.Insert(driverInfo.Name) + } + // check for duplicate entries for the same driver in specs + driverNamesInSpecs := make(sets.String) + for _, driverInfo := range nodeInfo.Spec.Drivers { + if driverNamesInSpecs.Has(driverInfo.Name) { + errors = append(errors, fmt.Sprintf("duplicate entries found for driver: %s in driver specs", driverInfo.Name)) + } + driverNamesInSpecs.Insert(driverInfo.Name) + topoKeys := make(sets.String) + for _, key := range driverInfo.TopologyKeys { + if topoKeys.Has(key) { + errors = append(errors, fmt.Sprintf("duplicate topology keys %s found for driver %s in driver specs", key, driverInfo.Name)) + } + topoKeys.Insert(key) + } + } + // check all entries in specs and status match + if !driverNamesInSpecs.Equal(driverNamesInStatuses) { + errors = append(errors, fmt.Sprintf("list of drivers in specs: %v does not match list of drivers in statuses: %v", driverNamesInSpecs.List(), driverNamesInStatuses.List())) + } + if len(errors) == 0 { + return nil + } + return fmt.Errorf(strings.Join(errors, ", ")) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeupdater/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeupdater/BUILD deleted file mode 100644 index 2b8e18a753bf..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeupdater/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["nodeupdater.go"], - importpath = "k8s.io/kubernetes/pkg/volume/csi/nodeupdater", - visibility = ["//visibility:public"], - deps = [ - "//pkg/volume/util:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//staging/src/k8s.io/client-go/util/retry:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeupdater/nodeupdater.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeupdater/nodeupdater.go deleted file mode 100644 index b308f80a4fb3..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeupdater/nodeupdater.go +++ /dev/null @@ -1,193 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package nodeupdater includes internal functions used to add/delete labels to -// kubernetes nodes for corresponding CSI drivers -package nodeupdater - -import ( - "encoding/json" - "fmt" - - "github.com/golang/glog" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" - corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/util/retry" - "k8s.io/kubernetes/pkg/volume/util" -) - -const ( - // Name of node annotation that contains JSON map of driver names to node - // names - annotationKey = "csi.volume.kubernetes.io/nodeid" -) - -// labelManagementStruct is struct of channels used for communication between the driver registration -// code and the go routine responsible for managing the node's labels -type nodeUpdateStruct struct { - nodeName types.NodeName - k8s kubernetes.Interface -} - -// Interface implements an interface for managing labels of a node -type Interface interface { - AddLabelsAndLimits(driverName string, driverNodeId string, maxLimit int64) error -} - -// NewNodeupdater initializes nodeUpdateStruct and returns available interfaces -func NewNodeUpdater(nodeName types.NodeName, kubeClient kubernetes.Interface) Interface { - return nodeUpdateStruct{ - nodeName: nodeName, - k8s: kubeClient, - } -} - -// AddLabelsAndLimits nodeUpdater waits for labeling requests initiated by the driver's registration -// process and updates labels and attach limits -func (nodeUpdater nodeUpdateStruct) AddLabelsAndLimits(driverName string, driverNodeId string, maxLimit int64) error { - err := addLabelsAndLimits(string(nodeUpdater.nodeName), nodeUpdater.k8s.CoreV1().Nodes(), driverName, driverNodeId, maxLimit) - if err != nil { - return err - } - return nil -} - -func addMaxAttachLimitToNode(node *v1.Node, driverName string, maxLimit int64) *v1.Node { - if maxLimit <= 0 { - glog.V(4).Infof("skipping adding attach limit for %s", driverName) - return node - } - - if node.Status.Capacity == nil { - node.Status.Capacity = v1.ResourceList{} - } - if node.Status.Allocatable == nil { - node.Status.Allocatable = v1.ResourceList{} - } - limitKeyName := util.GetCSIAttachLimitKey(driverName) - node.Status.Capacity[v1.ResourceName(limitKeyName)] = *resource.NewQuantity(maxLimit, resource.DecimalSI) - node.Status.Allocatable[v1.ResourceName(limitKeyName)] = *resource.NewQuantity(maxLimit, resource.DecimalSI) - return node -} - -// Clones the given map and returns a new map with the given key and value added. -// Returns the given map, if annotationKey is empty. -func cloneAndAddAnnotation( - annotations map[string]string, - annotationKey, - annotationValue string) map[string]string { - if annotationKey == "" { - // Don't need to add an annotation. - return annotations - } - // Clone. - newAnnotations := map[string]string{} - for key, value := range annotations { - newAnnotations[key] = value - } - newAnnotations[annotationKey] = annotationValue - return newAnnotations -} - -func addNodeIdToNode(node *v1.Node, driverName string, csiDriverNodeId string) (*v1.Node, error) { - var previousAnnotationValue string - if node.ObjectMeta.Annotations != nil { - previousAnnotationValue = - node.ObjectMeta.Annotations[annotationKey] - glog.V(3).Infof( - "previousAnnotationValue=%q", previousAnnotationValue) - } - - existingDriverMap := map[string]string{} - if previousAnnotationValue != "" { - // Parse previousAnnotationValue as JSON - if err := json.Unmarshal([]byte(previousAnnotationValue), &existingDriverMap); err != nil { - return node, fmt.Errorf( - "failed to parse node's %q annotation value (%q) err=%v", - annotationKey, - previousAnnotationValue, - err) - } - } - - if val, ok := existingDriverMap[driverName]; ok { - if val == csiDriverNodeId { - // Value already exists in node annotation, nothing more to do - glog.V(2).Infof( - "The key value {%q: %q} alredy eixst in node %q annotation, no need to update: %v", - driverName, - csiDriverNodeId, - annotationKey, - previousAnnotationValue) - return node, nil - } - } - - // Add/update annotation value - existingDriverMap[driverName] = csiDriverNodeId - jsonObj, err := json.Marshal(existingDriverMap) - if err != nil { - return node, fmt.Errorf( - "failed while trying to add key value {%q: %q} to node %q annotation. Existing value: %v", - driverName, - csiDriverNodeId, - annotationKey, - previousAnnotationValue) - } - - node.ObjectMeta.Annotations = cloneAndAddAnnotation( - node.ObjectMeta.Annotations, - annotationKey, - string(jsonObj)) - return node, nil -} - -func addLabelsAndLimits(nodeName string, nodeClient corev1.NodeInterface, driverName string, csiDriverNodeId string, maxLimit int64) error { - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - // Retrieve the latest version of Node before attempting update, so that - // existing changes are not overwritten. RetryOnConflict uses - // exponential backoff to avoid exhausting the apiserver. - node, getErr := nodeClient.Get(nodeName, metav1.GetOptions{}) - if getErr != nil { - glog.Errorf("Failed to get latest version of Node: %v", getErr) - return getErr // do not wrap error - } - var labelErr error - node, labelErr = addNodeIdToNode(node, driverName, csiDriverNodeId) - if labelErr != nil { - return labelErr - } - node = addMaxAttachLimitToNode(node, driverName, maxLimit) - - _, updateErr := nodeClient.Update(node) - if updateErr == nil { - glog.V(2).Infof( - "Updated node %q successfully for CSI driver %q and CSI node name %q", - nodeName, - driverName, - csiDriverNodeId) - } - return updateErr // do not wrap error - }) - if retryErr != nil { - return fmt.Errorf("error setting attach limit and labels for %s with : %v", driverName, retryErr) - } - return nil -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD index 1e6070fd3b3c..d223844116f0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD @@ -19,7 +19,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -30,7 +30,7 @@ go_test( deps = [ "//pkg/fieldpath:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/empty_dir:go_default_library", + "//pkg/volume/emptydir:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go index 88f7b68f7594..391198ad9dd3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go @@ -18,7 +18,6 @@ package downwardapi import ( "fmt" - "path" "path/filepath" "k8s.io/api/core/v1" @@ -30,7 +29,7 @@ import ( "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "github.com/golang/glog" + "k8s.io/klog" ) // ProbeVolumePlugins is the entry point for plugin detection in a package. @@ -171,22 +170,23 @@ func (b *downwardAPIVolumeMounter) SetUp(fsGroup *int64) error { } func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { - glog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir) + klog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir) // Wrap EmptyDir. Here we rely on the idempotency of the wrapped plugin to avoid repeatedly mounting wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), b.pod, *b.opts) if err != nil { - glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) + klog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) return err } data, err := CollectData(b.source.Items, b.pod, b.plugin.host, b.source.DefaultMode) if err != nil { - glog.Errorf("Error preparing data for downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) + klog.Errorf("Error preparing data for downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) return err } + setupSuccess := false if err := wrapped.SetUpAt(dir, fsGroup); err != nil { - glog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) + klog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) return err } @@ -194,25 +194,41 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return err } + defer func() { + // Clean up directories if setup fails + if !setupSuccess { + unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID) + if unmountCreateErr != nil { + klog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr) + return + } + tearDownErr := unmounter.TearDown() + if tearDownErr != nil { + klog.Errorf("error tearing down volume %s with : %v", b.volName, tearDownErr) + } + } + }() + writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName) writer, err := volumeutil.NewAtomicWriter(dir, writerContext) if err != nil { - glog.Errorf("Error creating atomic writer: %v", err) + klog.Errorf("Error creating atomic writer: %v", err) return err } err = writer.Write(data) if err != nil { - glog.Errorf("Error writing payload to dir: %v", err) + klog.Errorf("Error writing payload to dir: %v", err) return err } err = volume.SetVolumeOwnership(b, fsGroup) if err != nil { - glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) + klog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) return err } + setupSuccess = true return nil } @@ -239,7 +255,7 @@ func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.Volu if fileInfo.FieldRef != nil { // TODO: unify with Kubelet.podFieldSelectorRuntimeValue if values, err := fieldpath.ExtractFieldPathAsString(pod, fileInfo.FieldRef.FieldPath); err != nil { - glog.Errorf("Unable to extract field %s: %s", fileInfo.FieldRef.FieldPath, err.Error()) + klog.Errorf("Unable to extract field %s: %s", fileInfo.FieldRef.FieldPath, err.Error()) errlist = append(errlist, err) } else { fileProjection.Data = []byte(values) @@ -250,7 +266,7 @@ func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.Volu if err != nil { errlist = append(errlist, err) } else if values, err := resource.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, pod, containerName, nodeAllocatable); err != nil { - glog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error()) + klog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error()) errlist = append(errlist, err) } else { fileProjection.Data = []byte(values) @@ -282,10 +298,6 @@ func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error { return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID) } -func (b *downwardAPIVolumeMounter) getMetaDir() string { - return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName)), b.volName) -} - func getVolumeSource(spec *volume.Spec) (*v1.DownwardAPIVolumeSource, bool) { var readOnly bool var volumeSource *v1.DownwardAPIVolumeSource diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/BUILD deleted file mode 100644 index de802241801c..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/BUILD +++ /dev/null @@ -1,68 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "empty_dir.go", - "empty_dir_linux.go", - "empty_dir_unsupported.go", - ], - importpath = "k8s.io/kubernetes/pkg/volume/empty_dir", - deps = [ - "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/util/mount:go_default_library", - "//pkg/util/strings:go_default_library", - "//pkg/volume:go_default_library", - "//pkg/volume/util:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "//conditions:default": [], - }), -) - -go_test( - name = "go_default_test", - srcs = ["empty_dir_test.go"], - embed = [":go_default_library"], - deps = select({ - "@io_bazel_rules_go//go/platform:linux": [ - "//pkg/util/mount:go_default_library", - "//pkg/volume:go_default_library", - "//pkg/volume/testing:go_default_library", - "//pkg/volume/util:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/client-go/util/testing:go_default_library", - ], - "//conditions:default": [], - }), -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/doc.go deleted file mode 100644 index 10d7abd212fb..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package empty_dir contains the internal representation of emptyDir -// volumes. -package empty_dir diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/BUILD new file mode 100644 index 000000000000..fd4eaff0332d --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/BUILD @@ -0,0 +1,68 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "empty_dir.go", + "empty_dir_linux.go", + "empty_dir_unsupported.go", + ], + importpath = "k8s.io/kubernetes/pkg/volume/emptydir", + deps = [ + "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/util/mount:go_default_library", + "//pkg/util/strings:go_default_library", + "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ] + select({ + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "//conditions:default": [], + }), +) + +go_test( + name = "go_default_test", + srcs = ["empty_dir_test.go"], + embed = [":go_default_library"], + deps = select({ + "@io_bazel_rules_go//go/platform:linux": [ + "//pkg/util/mount:go_default_library", + "//pkg/volume:go_default_library", + "//pkg/volume/testing:go_default_library", + "//pkg/volume/util:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/client-go/util/testing:go_default_library", + ], + "//conditions:default": [], + }), +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/OWNERS similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/OWNERS rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/OWNERS diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/doc.go new file mode 100644 index 000000000000..0e44b36753b9 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package emptydir contains the internal representation of emptyDir +// volumes. +package emptydir diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir.go similarity index 96% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir.go index e0687e4e89df..0dd6c6508a00 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir.go @@ -14,18 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package empty_dir +package emptydir import ( "fmt" "os" "path" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/util/mount" stringsutil "k8s.io/kubernetes/pkg/util/strings" @@ -40,7 +40,7 @@ import ( // http://issue.k8s.io/2630 const perm os.FileMode = 0777 -// This is the primary entrypoint for volume plugins. +// ProbeVolumePlugins is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { return []volume.VolumePlugin{ &emptyDirPlugin{nil}, @@ -184,7 +184,7 @@ func (ed *emptyDir) GetAttributes() volume.Attributes { // Checks prior to mount operations to verify that the required components (binaries, etc.) // to mount the volume are available on the underlying node. // If not, it returns an error -func (b *emptyDir) CanMount() error { +func (ed *emptyDir) CanMount() error { return nil } @@ -253,7 +253,7 @@ func (ed *emptyDir) setupTmpfs(dir string) error { return nil } - glog.V(3).Infof("pod %v: mounting tmpfs for volume %v", ed.pod.UID, ed.volName) + klog.V(3).Infof("pod %v: mounting tmpfs for volume %v", ed.pod.UID, ed.volName) return ed.mounter.Mount("tmpfs", dir, "tmpfs", nil /* options */) } @@ -281,7 +281,7 @@ func (ed *emptyDir) setupHugepages(dir string) error { return err } - glog.V(3).Infof("pod %v: mounting hugepages for volume %v", ed.pod.UID, ed.volName) + klog.V(3).Infof("pod %v: mounting hugepages for volume %v", ed.pod.UID, ed.volName) return ed.mounter.Mount("nodev", dir, "hugetlbfs", []string{pageSizeMountOption}) } @@ -311,7 +311,7 @@ func getPageSizeMountOptionFromPod(pod *v1.Pod) (string, error) { } if !pageSizeFound { - return "", fmt.Errorf("hugePages storage requested, but there is no resource request for huge pages.") + return "", fmt.Errorf("hugePages storage requested, but there is no resource request for huge pages") } return fmt.Sprintf("%s=%s", hugePagesPageSizeMountOption, pageSize.String()), nil @@ -349,7 +349,7 @@ func (ed *emptyDir) setupDir(dir string) error { } if fileinfo.Mode().Perm() != perm.Perm() { - glog.Errorf("Expected directory %q permissions to be: %s; got: %s", dir, perm.Perm(), fileinfo.Mode().Perm()) + klog.Errorf("Expected directory %q permissions to be: %s; got: %s", dir, perm.Perm(), fileinfo.Mode().Perm()) } } @@ -370,7 +370,7 @@ func (ed *emptyDir) TearDownAt(dir string) error { if pathExists, pathErr := volumeutil.PathExists(dir); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir_linux.go similarity index 91% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_linux.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir_linux.go index ba289fee9fbd..3f8b16282745 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir_linux.go @@ -16,13 +16,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package empty_dir +package emptydir import ( "fmt" - "github.com/golang/glog" "golang.org/x/sys/unix" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/util/mount" @@ -40,7 +40,7 @@ type realMountDetector struct { } func (m *realMountDetector) GetMountMedium(path string) (v1.StorageMedium, bool, error) { - glog.V(5).Infof("Determining mount medium of %v", path) + klog.V(5).Infof("Determining mount medium of %v", path) notMnt, err := m.mounter.IsLikelyNotMountPoint(path) if err != nil { return v1.StorageMediumDefault, false, fmt.Errorf("IsLikelyNotMountPoint(%q): %v", path, err) @@ -50,7 +50,7 @@ func (m *realMountDetector) GetMountMedium(path string) (v1.StorageMedium, bool, return v1.StorageMediumDefault, false, fmt.Errorf("statfs(%q): %v", path, err) } - glog.V(5).Infof("Statfs_t of %v: %+v", path, buf) + klog.V(5).Infof("Statfs_t of %v: %+v", path, buf) if buf.Type == linuxTmpfsMagic { return v1.StorageMediumMemory, !notMnt, nil } else if int64(buf.Type) == linuxHugetlbfsMagic { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir_unsupported.go similarity index 98% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_unsupported.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir_unsupported.go index defbfc5e196a..caeb6a771827 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir_unsupported.go @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package empty_dir +package emptydir import ( "k8s.io/api/core/v1" diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD index 9b8a102b84c0..8c7328601db8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD @@ -27,7 +27,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go index 7030a949993e..748c14f38e13 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go @@ -23,10 +23,10 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" @@ -49,7 +49,7 @@ var _ volume.DeviceMountableVolumePlugin = &fcPlugin{} func (plugin *fcPlugin) NewAttacher() (volume.Attacher, error) { return &fcAttacher{ host: plugin.host, - manager: &FCUtil{}, + manager: &fcUtil{}, }, nil } @@ -78,7 +78,7 @@ func (attacher *fcAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty func (attacher *fcAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { mounter, err := volumeSpecToMounter(spec, attacher.host) if err != nil { - glog.Warningf("failed to get fc mounter: %v", err) + klog.Warningf("failed to get fc mounter: %v", err) return "", err } return attacher.manager.AttachDisk(*mounter) @@ -88,7 +88,7 @@ func (attacher *fcAttacher) GetDeviceMountPath( spec *volume.Spec) (string, error) { mounter, err := volumeSpecToMounter(spec, attacher.host) if err != nil { - glog.Warningf("failed to get fc mounter: %v", err) + klog.Warningf("failed to get fc mounter: %v", err) return "", err } @@ -142,7 +142,7 @@ var _ volume.DeviceUnmounter = &fcDetacher{} func (plugin *fcPlugin) NewDetacher() (volume.Detacher, error) { return &fcDetacher{ mounter: plugin.host.GetMounter(plugin.GetPluginName()), - manager: &FCUtil{}, + manager: &fcUtil{}, }, nil } @@ -158,7 +158,7 @@ func (detacher *fcDetacher) UnmountDevice(deviceMountPath string) error { // Specify device name for DetachDisk later devName, _, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath) if err != nil { - glog.Errorf("fc: failed to get device from mnt: %s\nError: %v", deviceMountPath, err) + klog.Errorf("fc: failed to get device from mnt: %s\nError: %v", deviceMountPath, err) return err } // Unmount for deviceMountPath(=globalPDPath) @@ -171,7 +171,7 @@ func (detacher *fcDetacher) UnmountDevice(deviceMountPath string) error { if err != nil { return fmt.Errorf("fc: failed to detach disk: %s\nError: %v", devName, err) } - glog.V(4).Infof("fc: successfully detached disk: %s", devName) + klog.V(4).Infof("fc: successfully detached disk: %s", devName) return nil } @@ -206,7 +206,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMoun if err != nil { return nil, err } - glog.V(5).Infof("fc: volumeSpecToMounter volumeMode %s", volumeMode) + klog.V(5).Infof("fc: volumeSpecToMounter volumeMode %s", volumeMode) return &fcDiskMounter{ fcDisk: fcDisk, fsType: fc.FSType, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go index 13cf923a9233..602b7aa0bdbd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go @@ -19,9 +19,10 @@ package fc import ( "os" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" ) // Abstract interface to disk operations. @@ -42,14 +43,14 @@ func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mou noMnt, err := mounter.IsLikelyNotMountPoint(volPath) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mountpoint: %s", volPath) + klog.Errorf("cannot validate mountpoint: %s", volPath) return err } if !noMnt { return nil } if err := os.MkdirAll(volPath, 0750); err != nil { - glog.Errorf("failed to mkdir:%s", volPath) + klog.Errorf("failed to mkdir:%s", volPath) return err } // Perform a bind mount to the full path to allow duplicate mounts of the same disk. @@ -57,27 +58,28 @@ func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mou if b.readOnly { options = append(options, "ro") } - err = mounter.Mount(globalPDPath, volPath, "", options) + mountOptions := util.JoinMountOptions(options, b.mountOptions) + err = mounter.Mount(globalPDPath, volPath, "", mountOptions) if err != nil { - glog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err) + klog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err) noMnt, mntErr := b.mounter.IsLikelyNotMountPoint(volPath) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !noMnt { if mntErr = b.mounter.Unmount(volPath); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } noMnt, mntErr = b.mounter.IsLikelyNotMountPoint(volPath) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !noMnt { // will most likely retry on next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", volPath) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", volPath) return err } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go index 4bbacfce7017..cbd247aebcaf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go @@ -22,11 +22,11 @@ import ( "strconv" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" @@ -35,7 +35,7 @@ import ( "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) -// This is the primary entrypoint for volume plugins. +// ProbeVolumePlugins is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { return []volume.VolumePlugin{&fcPlugin{nil}} } @@ -106,7 +106,7 @@ func (plugin *fcPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode { func (plugin *fcPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { // Inject real implementations here, test through the internal function. - return plugin.newMounterInternal(spec, pod.UID, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName())) + return plugin.newMounterInternal(spec, pod.UID, &fcUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName())) } func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.Mounter, error) { @@ -137,22 +137,24 @@ func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, if err != nil { return nil, err } - glog.V(5).Infof("fc: newMounterInternal volumeMode %s", volumeMode) + klog.V(5).Infof("fc: newMounterInternal volumeMode %s", volumeMode) return &fcDiskMounter{ - fcDisk: fcDisk, - fsType: fc.FSType, - volumeMode: volumeMode, - readOnly: readOnly, - mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, - deviceUtil: util.NewDeviceHandler(util.NewIOHandler()), + fcDisk: fcDisk, + fsType: fc.FSType, + volumeMode: volumeMode, + readOnly: readOnly, + mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, + deviceUtil: util.NewDeviceHandler(util.NewIOHandler()), + mountOptions: []string{}, }, nil } return &fcDiskMounter{ - fcDisk: fcDisk, - fsType: fc.FSType, - readOnly: readOnly, - mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, - deviceUtil: util.NewDeviceHandler(util.NewIOHandler()), + fcDisk: fcDisk, + fsType: fc.FSType, + readOnly: readOnly, + mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, + deviceUtil: util.NewDeviceHandler(util.NewIOHandler()), + mountOptions: util.MountOptionFromSpec(spec), }, nil } @@ -164,7 +166,7 @@ func (plugin *fcPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ v if pod != nil { uid = pod.UID } - return plugin.newBlockVolumeMapperInternal(spec, uid, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName())) + return plugin.newBlockVolumeMapperInternal(spec, uid, &fcUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName())) } func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.BlockVolumeMapper, error) { @@ -196,7 +198,7 @@ func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID t func (plugin *fcPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { // Inject real implementations here, test through the internal function. - return plugin.newUnmounterInternal(volName, podUID, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName())) + return plugin.newUnmounterInternal(volName, podUID, &fcUtil{}, plugin.host.GetMounter(plugin.GetPluginName())) } func (plugin *fcPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Unmounter, error) { @@ -214,7 +216,7 @@ func (plugin *fcPlugin) newUnmounterInternal(volName string, podUID types.UID, m } func (plugin *fcPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) { - return plugin.newUnmapperInternal(volName, podUID, &FCUtil{}) + return plugin.newUnmapperInternal(volName, podUID, &fcUtil{}) } func (plugin *fcPlugin) newUnmapperInternal(volName string, podUID types.UID, manager diskManager) (volume.BlockVolumeUnmapper, error) { @@ -274,7 +276,7 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volu FC: &v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32}, }, } - glog.V(5).Infof("ConstructVolumeSpec: TargetWWNs: %v, Lun: %v", + klog.V(5).Infof("ConstructVolumeSpec: TargetWWNs: %v, Lun: %v", fcVolume.VolumeSource.FC.TargetWWNs, *fcVolume.VolumeSource.FC.Lun) } else { fcVolume = &v1.Volume{ @@ -283,7 +285,7 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volu FC: &v1.FCVolumeSource{WWIDs: []string{volumeInfo}}, }, } - glog.V(5).Infof("ConstructVolumeSpec: WWIDs: %v", fcVolume.VolumeSource.FC.WWIDs) + klog.V(5).Infof("ConstructVolumeSpec: WWIDs: %v", fcVolume.VolumeSource.FC.WWIDs) } return volume.NewSpecFromVolume(fcVolume), nil } @@ -302,7 +304,7 @@ func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, m if err != nil { return nil, err } - glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) + klog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) // Retrieve volumePluginDependentPath from globalMapPathUUID // globalMapPathUUID examples: @@ -326,13 +328,13 @@ func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, m lun32 := int32(lun) fcPV = createPersistentVolumeFromFCVolumeSource(volumeName, v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32}) - glog.V(5).Infof("ConstructBlockVolumeSpec: TargetWWNs: %v, Lun: %v", + klog.V(5).Infof("ConstructBlockVolumeSpec: TargetWWNs: %v, Lun: %v", fcPV.Spec.PersistentVolumeSource.FC.TargetWWNs, *fcPV.Spec.PersistentVolumeSource.FC.Lun) } else { fcPV = createPersistentVolumeFromFCVolumeSource(volumeName, v1.FCVolumeSource{WWIDs: []string{volumeInfo}}) - glog.V(5).Infof("ConstructBlockVolumeSpec: WWIDs: %v", fcPV.Spec.PersistentVolumeSource.FC.WWIDs) + klog.V(5).Infof("ConstructBlockVolumeSpec: WWIDs: %v", fcPV.Spec.PersistentVolumeSource.FC.WWIDs) } return volume.NewSpecFromPersistentVolume(fcPV, false), nil } @@ -361,7 +363,7 @@ func (fc *fcDisk) GetPath() string { func (fc *fcDisk) fcGlobalMapPath(spec *volume.Spec) (string, error) { mounter, err := volumeSpecToMounter(spec, fc.plugin.host) if err != nil { - glog.Warningf("failed to get fc mounter: %v", err) + klog.Warningf("failed to get fc mounter: %v", err) return "", err } return fc.manager.MakeGlobalVDPDName(*mounter.fcDisk), nil @@ -374,11 +376,12 @@ func (fc *fcDisk) fcPodDeviceMapPath() (string, string) { type fcDiskMounter struct { *fcDisk - readOnly bool - fsType string - volumeMode v1.PersistentVolumeMode - mounter *mount.SafeFormatAndMount - deviceUtil util.DeviceUtil + readOnly bool + fsType string + volumeMode v1.PersistentVolumeMode + mounter *mount.SafeFormatAndMount + deviceUtil util.DeviceUtil + mountOptions []string } var _ volume.Mounter = &fcDiskMounter{} @@ -406,7 +409,7 @@ func (b *fcDiskMounter) SetUpAt(dir string, fsGroup *int64) error { // diskSetUp checks mountpoints and prevent repeated calls err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup) if err != nil { - glog.Errorf("fc: failed to setup") + klog.Errorf("fc: failed to setup") } return err } @@ -459,12 +462,12 @@ func (c *fcDiskUnmapper) TearDownDevice(mapPath, devicePath string) error { if err != nil { return fmt.Errorf("fc: failed to detach disk: %s\nError: %v", mapPath, err) } - glog.V(4).Infof("fc: %q is unmounted, deleting the directory", mapPath) + klog.V(4).Infof("fc: %q is unmounted, deleting the directory", mapPath) err = os.RemoveAll(mapPath) if err != nil { return fmt.Errorf("fc: failed to delete the directory: %s\nError: %v", mapPath, err) } - glog.V(4).Infof("fc: successfully detached disk: %s", mapPath) + klog.V(4).Infof("fc: successfully detached disk: %s", mapPath) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go index a68925a5ec48..2beafee94bf7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go @@ -24,13 +24,12 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) type ioHandler interface { @@ -62,15 +61,15 @@ func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.File // given a wwn and lun, find the device and associated devicemapper parent func findDisk(wwn, lun string, io ioHandler, deviceUtil volumeutil.DeviceUtil) (string, string) { - fc_path := "-fc-0x" + wwn + "-lun-" + lun - dev_path := byPath - if dirs, err := io.ReadDir(dev_path); err == nil { + fcPath := "-fc-0x" + wwn + "-lun-" + lun + devPath := byPath + if dirs, err := io.ReadDir(devPath); err == nil { for _, f := range dirs { name := f.Name() - if strings.Contains(name, fc_path) { - if disk, err1 := io.EvalSymlinks(dev_path + name); err1 == nil { + if strings.Contains(name, fcPath) { + if disk, err1 := io.EvalSymlinks(devPath + name); err1 == nil { dm := deviceUtil.FindMultipathDeviceForDevice(disk) - glog.Infof("fc: find disk: %v, dm: %v", disk, dm) + klog.Infof("fc: find disk: %v, dm: %v", disk, dm) return disk, dm } } @@ -90,41 +89,41 @@ func findDiskWWIDs(wwid string, io ioHandler, deviceUtil volumeutil.DeviceUtil) // The wwid could contain white space and it will be replaced // underscore when wwid is exposed under /dev/by-id. - fc_path := "scsi-" + wwid - dev_id := byID - if dirs, err := io.ReadDir(dev_id); err == nil { + fcPath := "scsi-" + wwid + devID := byID + if dirs, err := io.ReadDir(devID); err == nil { for _, f := range dirs { name := f.Name() - if name == fc_path { - disk, err := io.EvalSymlinks(dev_id + name) + if name == fcPath { + disk, err := io.EvalSymlinks(devID + name) if err != nil { - glog.V(2).Infof("fc: failed to find a corresponding disk from symlink[%s], error %v", dev_id+name, err) + klog.V(2).Infof("fc: failed to find a corresponding disk from symlink[%s], error %v", devID+name, err) return "", "" } dm := deviceUtil.FindMultipathDeviceForDevice(disk) - glog.Infof("fc: find disk: %v, dm: %v", disk, dm) + klog.Infof("fc: find disk: %v, dm: %v", disk, dm) return disk, dm } } } - glog.V(2).Infof("fc: failed to find a disk [%s]", dev_id+fc_path) + klog.V(2).Infof("fc: failed to find a disk [%s]", devID+fcPath) return "", "" } // Removes a scsi device based upon /dev/sdX name func removeFromScsiSubsystem(deviceName string, io ioHandler) { fileName := "/sys/block/" + deviceName + "/device/delete" - glog.V(4).Infof("fc: remove device from scsi-subsystem: path: %s", fileName) + klog.V(4).Infof("fc: remove device from scsi-subsystem: path: %s", fileName) data := []byte("1") io.WriteFile(fileName, data, 0666) } // rescan scsi bus func scsiHostRescan(io ioHandler) { - scsi_path := "/sys/class/scsi_host/" - if dirs, err := io.ReadDir(scsi_path); err == nil { + scsiPath := "/sys/class/scsi_host/" + if dirs, err := io.ReadDir(scsiPath); err == nil { for _, f := range dirs { - name := scsi_path + f.Name() + "/scan" + name := scsiPath + f.Name() + "/scan" data := []byte("- - -") io.WriteFile(name, data, 0666) } @@ -135,33 +134,31 @@ func scsiHostRescan(io ioHandler) { func makePDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids []string) string { if len(wwns) != 0 { return path.Join(host.GetPluginDir(fcPluginName), wwns[0]+"-lun-"+lun) - } else { - return path.Join(host.GetPluginDir(fcPluginName), wwids[0]) } + return path.Join(host.GetPluginDir(fcPluginName), wwids[0]) } // make a directory like /var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/target-lun-0 func makeVDPDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids []string) string { if len(wwns) != 0 { return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwns[0]+"-lun-"+lun) - } else { - return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwids[0]) } + return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwids[0]) } -type FCUtil struct{} +type fcUtil struct{} -func (util *FCUtil) MakeGlobalPDName(fc fcDisk) string { +func (util *fcUtil) MakeGlobalPDName(fc fcDisk) string { return makePDNameInternal(fc.plugin.host, fc.wwns, fc.lun, fc.wwids) } // Global volume device plugin dir -func (util *FCUtil) MakeGlobalVDPDName(fc fcDisk) string { +func (util *fcUtil) MakeGlobalVDPDName(fc fcDisk) string { return makeVDPDNameInternal(fc.plugin.host, fc.wwns, fc.lun, fc.wwids) } func searchDisk(b fcDiskMounter) (string, error) { - var diskIds []string + var diskIDs []string var disk string var dm string io := b.io @@ -170,9 +167,9 @@ func searchDisk(b fcDiskMounter) (string, error) { lun := b.lun if len(wwns) != 0 { - diskIds = wwns + diskIDs = wwns } else { - diskIds = wwids + diskIDs = wwids } rescaned := false @@ -180,11 +177,11 @@ func searchDisk(b fcDiskMounter) (string, error) { // first phase, search existing device path, if a multipath dm is found, exit loop // otherwise, in second phase, rescan scsi bus and search again, return with any findings for true { - for _, diskId := range diskIds { + for _, diskID := range diskIDs { if len(wwns) != 0 { - disk, dm = findDisk(diskId, lun, io, b.deviceUtil) + disk, dm = findDisk(diskID, lun, io, b.deviceUtil) } else { - disk, dm = findDiskWWIDs(diskId, io, b.deviceUtil) + disk, dm = findDiskWWIDs(diskID, io, b.deviceUtil) } // if multipath device is found, break if dm != "" { @@ -212,7 +209,7 @@ func searchDisk(b fcDiskMounter) (string, error) { return disk, nil } -func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) { +func (util *fcUtil) AttachDisk(b fcDiskMounter) (string, error) { devicePath, err := searchDisk(b) if err != nil { return "", err @@ -221,7 +218,7 @@ func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) { if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { // If the volumeMode is 'Block', plugin don't have to format the volume. // The globalPDPath will be created by operationexecutor. Just return devicePath here. - glog.V(5).Infof("fc: AttachDisk volumeMode: %s, devicePath: %s", b.volumeMode, devicePath) + klog.V(5).Infof("fc: AttachDisk volumeMode: %s, devicePath: %s", b.volumeMode, devicePath) if b.volumeMode == v1.PersistentVolumeBlock { return devicePath, nil } @@ -238,7 +235,7 @@ func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) { return devicePath, fmt.Errorf("Heuristic determination of mount point failed:%v", err) } if !noMnt { - glog.Infof("fc: %s already mounted", globalPDPath) + klog.Infof("fc: %s already mounted", globalPDPath) return devicePath, nil } @@ -251,7 +248,7 @@ func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) { } // DetachDisk removes scsi device file such as /dev/sdX from the node. -func (util *FCUtil) DetachDisk(c fcDiskUnmounter, devicePath string) error { +func (util *fcUtil) DetachDisk(c fcDiskUnmounter, devicePath string) error { var devices []string // devicePath might be like /dev/mapper/mpathX. Find destination. dstPath, err := c.io.EvalSymlinks(devicePath) @@ -265,24 +262,24 @@ func (util *FCUtil) DetachDisk(c fcDiskUnmounter, devicePath string) error { // Add single devicepath to devices devices = append(devices, dstPath) } - glog.V(4).Infof("fc: DetachDisk devicePath: %v, dstPath: %v, devices: %v", devicePath, dstPath, devices) + klog.V(4).Infof("fc: DetachDisk devicePath: %v, dstPath: %v, devices: %v", devicePath, dstPath, devices) var lastErr error for _, device := range devices { err := util.detachFCDisk(c.io, device) if err != nil { - glog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) + klog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) lastErr = fmt.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) } } if lastErr != nil { - glog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr) + klog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr) return lastErr } return nil } // detachFCDisk removes scsi device file such as /dev/sdX from the node. -func (util *FCUtil) detachFCDisk(io ioHandler, devicePath string) error { +func (util *fcUtil) detachFCDisk(io ioHandler, devicePath string) error { // Remove scsi device from the node. if !strings.HasPrefix(devicePath, "/dev/") { return fmt.Errorf("fc detach disk: invalid device name: %s", devicePath) @@ -295,7 +292,7 @@ func (util *FCUtil) detachFCDisk(io ioHandler, devicePath string) error { // DetachBlockFCDisk detaches a volume from kubelet node, removes scsi device file // such as /dev/sdX from the node, and then removes loopback for the scsi device. -func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath string) error { +func (util *fcUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath string) error { // Check if devicePath is valid if len(devicePath) != 0 { if pathExists, pathErr := checkPathExists(devicePath); !pathExists || pathErr != nil { @@ -304,7 +301,7 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri } else { // TODO: FC plugin can't obtain the devicePath from kubelet because devicePath // in volume object isn't updated when volume is attached to kubelet node. - glog.Infof("fc: devicePath is empty. Try to retrieve FC configuration from global map path: %v", mapPath) + klog.Infof("fc: devicePath is empty. Try to retrieve FC configuration from global map path: %v", mapPath) } // Check if global map path is valid @@ -335,7 +332,7 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri for _, fi := range fis { if strings.Contains(fi.Name(), volumeInfo) { devicePath = path.Join(searchPath, fi.Name()) - glog.V(5).Infof("fc: updated devicePath: %s", devicePath) + klog.V(5).Infof("fc: updated devicePath: %s", devicePath) break } } @@ -346,27 +343,13 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri if err != nil { return err } - glog.V(4).Infof("fc: find destination device path from symlink: %v", dstPath) + klog.V(4).Infof("fc: find destination device path from symlink: %v", dstPath) - // Get loopback device which takes fd lock for device beofore detaching a volume from node. - // TODO: This is a workaround for issue #54108 - // Currently local attach plugins such as FC, iSCSI, RBD can't obtain devicePath during - // GenerateUnmapDeviceFunc() in operation_generator. As a result, these plugins fail to get - // and remove loopback device then it will be remained on kubelet node. To avoid the problem, - // local attach plugins needs to remove loopback device during TearDownDevice(). var devices []string - blkUtil := volumepathhandler.NewBlockVolumePathHandler() dm := c.deviceUtil.FindMultipathDeviceForDevice(dstPath) if len(dm) != 0 { dstPath = dm } - loop, err := volumepathhandler.BlockVolumePathHandler.GetLoopDevice(blkUtil, dstPath) - if err != nil { - if err.Error() != volumepathhandler.ErrDeviceNotFound { - return fmt.Errorf("fc: failed to get loopback for destination path: %v, err: %v", dstPath, err) - } - glog.Warningf("fc: loopback for destination path: %s not found", dstPath) - } // Detach volume from kubelet node if len(dm) != 0 { @@ -380,21 +363,14 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri for _, device := range devices { err = util.detachFCDisk(c.io, device) if err != nil { - glog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) + klog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) lastErr = fmt.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) } } if lastErr != nil { - glog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr) + klog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr) return lastErr } - if len(loop) != 0 { - // The volume was successfully detached from node. We can safely remove the loopback. - err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop) - if err != nil { - return fmt.Errorf("fc: failed to remove loopback :%v, err: %v", loop, err) - } - } return nil } @@ -402,7 +378,7 @@ func checkPathExists(path string) (bool, error) { if pathExists, pathErr := volumeutil.PathExists(path); pathErr != nil { return pathExists, fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmap skipped because path does not exist: %v", path) + klog.Warningf("Warning: Unmap skipped because path does not exist: %v", path) return pathExists, nil } return true, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD index fff15f102eea..f337fc42bb36 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD @@ -14,6 +14,8 @@ go_library( "detacher.go", "detacher-defaults.go", "driver-call.go", + "expander.go", + "expander-defaults.go", "fake_watcher.go", "mounter.go", "mounter-defaults.go", @@ -33,10 +35,11 @@ go_library( "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/github.com/fsnotify/fsnotify:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) @@ -64,6 +67,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//test/utils/harness:go_default_library", "//vendor/github.com/fsnotify/fsnotify:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher-defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher-defaults.go index 368ac1be0bdb..3c82f264b914 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher-defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher-defaults.go @@ -19,7 +19,7 @@ package flexvolume import ( "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/util/mount" @@ -30,13 +30,13 @@ type attacherDefaults flexVolumeAttacher // Attach is part of the volume.Attacher interface func (a *attacherDefaults) Attach(spec *volume.Spec, hostName types.NodeName) (string, error) { - glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default Attach for volume ", spec.Name(), ", host ", hostName) + klog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default Attach for volume ", spec.Name(), ", host ", hostName) return "", nil } // WaitForAttach is part of the volume.Attacher interface func (a *attacherDefaults) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) { - glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default WaitForAttach for volume ", spec.Name(), ", device ", devicePath) + klog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default WaitForAttach for volume ", spec.Name(), ", device ", devicePath) return devicePath, nil } @@ -47,7 +47,7 @@ func (a *attacherDefaults) GetDeviceMountPath(spec *volume.Spec, mountsDir strin // MountDevice is part of the volume.Attacher interface func (a *attacherDefaults) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, mounter mount.Interface) error { - glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default MountDevice for volume ", spec.Name(), ", device ", devicePath, ", deviceMountPath ", deviceMountPath) + klog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default MountDevice for volume ", spec.Name(), ", device ", devicePath, ", deviceMountPath ", deviceMountPath) volSourceFSType, err := getFSType(spec) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher.go index f7767d38948f..3b98eefa0792 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher.go @@ -19,9 +19,9 @@ package flexvolume import ( "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" ) @@ -91,9 +91,8 @@ func (a *flexVolumeAttacher) MountDevice(spec *volume.Spec, devicePath string, d // plugin does not implement attach interface. if devicePath != "" { return (*attacherDefaults)(a).MountDevice(spec, devicePath, deviceMountPath, a.plugin.host.GetMounter(a.plugin.GetPluginName())) - } else { - return nil } + return nil } return err } @@ -113,7 +112,7 @@ func (a *flexVolumeAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName t } else if err == nil { if !status.Attached { volumesAttachedCheck[spec] = false - glog.V(2).Infof("VolumesAreAttached: check volume (%q) is no longer attached", spec.Name()) + klog.V(2).Infof("VolumesAreAttached: check volume (%q) is no longer attached", spec.Name()) } } else { return nil, err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher-defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher-defaults.go index 181f87bde487..f0f43262394c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher-defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher-defaults.go @@ -19,8 +19,8 @@ package flexvolume import ( "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume/util" ) @@ -28,18 +28,18 @@ type detacherDefaults flexVolumeDetacher // Detach is part of the volume.Detacher interface. func (d *detacherDefaults) Detach(volumeName string, hostName types.NodeName) error { - glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default Detach for volume ", volumeName, ", host ", hostName) + klog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default Detach for volume ", volumeName, ", host ", hostName) return nil } // WaitForDetach is part of the volume.Detacher interface. func (d *detacherDefaults) WaitForDetach(devicePath string, timeout time.Duration) error { - glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default WaitForDetach for device ", devicePath) + klog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default WaitForDetach for device ", devicePath) return nil } // UnmountDevice is part of the volume.Detacher interface. func (d *detacherDefaults) UnmountDevice(deviceMountPath string) error { - glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default UnmountDevice for device mount path ", deviceMountPath) + klog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default UnmountDevice for device mount path ", deviceMountPath) return util.UnmountPath(deviceMountPath, d.plugin.host.GetMounter(d.plugin.GetPluginName())) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go index 6b5760258469..d98791f3d1bc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go @@ -20,8 +20,8 @@ import ( "fmt" "os" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) @@ -53,7 +53,7 @@ func (d *flexVolumeDetacher) UnmountDevice(deviceMountPath string) error { pathExists, pathErr := util.PathExists(deviceMountPath) if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath) return nil } if pathErr != nil && !util.IsCorruptedMnt(pathErr) { @@ -70,7 +70,7 @@ func (d *flexVolumeDetacher) UnmountDevice(deviceMountPath string) error { } if notmnt { - glog.Warningf("Warning: Path: %v already unmounted", deviceMountPath) + klog.Warningf("Warning: Path: %v already unmounted", deviceMountPath) } else { call := d.plugin.NewDriverCall(unmountDeviceCmd) call.Append(deviceMountPath) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go index 5b089df07a6f..c25b59991915 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/driver-call.go @@ -22,7 +22,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" ) @@ -44,6 +44,9 @@ const ( mountCmd = "mount" unmountCmd = "unmount" + expandVolumeCmd = "expandvolume" + expandFSCmd = "expandfs" + // Option keys optionFSType = "kubernetes.io/fsType" optionReadWrite = "kubernetes.io/readwrite" @@ -67,7 +70,7 @@ const ( ) var ( - TimeoutError = fmt.Errorf("Timeout") + errTimeout = fmt.Errorf("Timeout") ) // DriverCall implements the basic contract between FlexVolume and its driver. @@ -92,10 +95,12 @@ func (plugin *flexVolumePlugin) NewDriverCallWithTimeout(command string, timeout } } +// Append appends arg into driver call argument list func (dc *DriverCall) Append(arg string) { dc.args = append(dc.args, arg) } +// AppendSpec appends volume spec to driver call argument list func (dc *DriverCall) AppendSpec(spec *volume.Spec, host volume.VolumeHost, extraOptions map[string]string) error { optionsForDriver, err := NewOptionsForDriver(spec, host, extraOptions) if err != nil { @@ -111,6 +116,7 @@ func (dc *DriverCall) AppendSpec(spec *volume.Spec, host volume.VolumeHost, extr return nil } +// Run executes the driver call func (dc *DriverCall) Run() (*DriverStatus, error) { if dc.plugin.isUnsupported(dc.Command) { return nil, errors.New(StatusNotSupported) @@ -131,17 +137,17 @@ func (dc *DriverCall) Run() (*DriverStatus, error) { output, execErr := cmd.CombinedOutput() if execErr != nil { if timeout { - return nil, TimeoutError + return nil, errTimeout } _, err := handleCmdResponse(dc.Command, output) if err == nil { - glog.Errorf("FlexVolume: driver bug: %s: exec error (%s) but no error in response.", execPath, execErr) + klog.Errorf("FlexVolume: driver bug: %s: exec error (%s) but no error in response.", execPath, execErr) return nil, execErr } if isCmdNotSupportedErr(err) { dc.plugin.unsupported(dc.Command) } else { - glog.Warningf("FlexVolume: driver call failed: executable: %s, args: %s, error: %s, output: %q", execPath, dc.args, execErr.Error(), output) + klog.Warningf("FlexVolume: driver call failed: executable: %s, args: %s, error: %s, output: %q", execPath, dc.args, execErr.Error(), output) } return nil, err } @@ -160,6 +166,7 @@ func (dc *DriverCall) Run() (*DriverStatus, error) { // OptionsForDriver represents the spec given to the driver. type OptionsForDriver map[string]string +// NewOptionsForDriver create driver options given volume spec func NewOptionsForDriver(spec *volume.Spec, host volume.VolumeHost, extraOptions map[string]string) (OptionsForDriver, error) { volSourceFSType, err := getFSType(spec) @@ -217,17 +224,26 @@ type DriverStatus struct { // By default we assume all the capabilities are supported. // If the plugin does not support a capability, it can return false for that capability. Capabilities *DriverCapabilities `json:",omitempty"` + // Returns the actual size of the volume after resizing is done, the size is in bytes. + ActualVolumeSize int64 `json:"volumeNewSize,omitempty"` } +// DriverCapabilities represents what driver can do type DriverCapabilities struct { - Attach bool `json:"attach"` - SELinuxRelabel bool `json:"selinuxRelabel"` + Attach bool `json:"attach"` + SELinuxRelabel bool `json:"selinuxRelabel"` + SupportsMetrics bool `json:"supportsMetrics"` + FSGroup bool `json:"fsGroup"` + RequiresFSResize bool `json:"requiresFSResize"` } func defaultCapabilities() *DriverCapabilities { return &DriverCapabilities{ - Attach: true, - SELinuxRelabel: true, + Attach: true, + SELinuxRelabel: true, + SupportsMetrics: false, + FSGroup: true, + RequiresFSResize: true, } } @@ -248,14 +264,14 @@ func handleCmdResponse(cmd string, output []byte) (*DriverStatus, error) { Capabilities: defaultCapabilities(), } if err := json.Unmarshal(output, &status); err != nil { - glog.Errorf("Failed to unmarshal output for command: %s, output: %q, error: %s", cmd, string(output), err.Error()) + klog.Errorf("Failed to unmarshal output for command: %s, output: %q, error: %s", cmd, string(output), err.Error()) return nil, err } else if status.Status == StatusNotSupported { - glog.V(5).Infof("%s command is not supported by the driver", cmd) + klog.V(5).Infof("%s command is not supported by the driver", cmd) return nil, errors.New(status.Status) } else if status.Status != StatusSuccess { errMsg := fmt.Sprintf("%s command failed, status: %s, reason: %s", cmd, status.Status, status.Message) - glog.Errorf(errMsg) + klog.Errorf(errMsg) return nil, fmt.Errorf("%s", errMsg) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/expander-defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/expander-defaults.go new file mode 100644 index 000000000000..4a33e184e38c --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/expander-defaults.go @@ -0,0 +1,45 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flexvolume + +import ( + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" +) + +type expanderDefaults struct { + plugin *flexVolumePlugin +} + +func newExpanderDefaults(plugin *flexVolumePlugin) *expanderDefaults { + return &expanderDefaults{plugin} +} + +func (e *expanderDefaults) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { + klog.Warning(logPrefix(e.plugin), "using default expand for volume ", spec.Name(), ", to size ", newSize, " from ", oldSize) + return newSize, nil +} + +// the defaults for ExpandFS return a generic resize indicator that will trigger the operation executor to go ahead with +// generic filesystem resize +func (e *expanderDefaults) ExpandFS(spec *volume.Spec, devicePath, deviceMountPath string, _, _ resource.Quantity) error { + klog.Warning(logPrefix(e.plugin), "using default filesystem resize for volume ", spec.Name(), ", at ", devicePath) + _, err := util.GenericResizeFS(e.plugin.host, e.plugin.GetPluginName(), devicePath, deviceMountPath) + return err +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/expander.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/expander.go new file mode 100644 index 000000000000..345bad26d7b7 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/expander.go @@ -0,0 +1,67 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flexvolume + +import ( + "fmt" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/kubernetes/pkg/volume" + "strconv" +) + +func (plugin *flexVolumePlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { + call := plugin.NewDriverCall(expandVolumeCmd) + call.AppendSpec(spec, plugin.host, nil) + + devicePath, err := plugin.getDeviceMountPath(spec) + if err != nil { + return newSize, err + } + call.Append(devicePath) + call.Append(strconv.FormatInt(newSize.Value(), 10)) + call.Append(strconv.FormatInt(oldSize.Value(), 10)) + + _, err = call.Run() + if isCmdNotSupportedErr(err) { + return newExpanderDefaults(plugin).ExpandVolumeDevice(spec, newSize, oldSize) + } + return newSize, err +} + +func (plugin *flexVolumePlugin) ExpandFS(spec *volume.Spec, devicePath, deviceMountPath string, newSize, oldSize resource.Quantity) error { + // This method is called after we spec.PersistentVolume.Spec.Capacity + // has been updated to the new size. The underlying driver thus sees + // the _new_ (requested) size and can find out the _current_ size from + // its underlying storage implementation + + if spec.PersistentVolume == nil { + return fmt.Errorf("PersistentVolume not found for spec: %s", spec.Name()) + } + + call := plugin.NewDriverCall(expandFSCmd) + call.AppendSpec(spec, plugin.host, nil) + call.Append(devicePath) + call.Append(deviceMountPath) + call.Append(strconv.FormatInt(newSize.Value(), 10)) + call.Append(strconv.FormatInt(oldSize.Value(), 10)) + + _, err := call.Run() + if isCmdNotSupportedErr(err) { + return newExpanderDefaults(plugin).ExpandFS(spec, devicePath, deviceMountPath, newSize, oldSize) + } + return err +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/fake_watcher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/fake_watcher.go index b427f379eaea..ce834043fbab 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/fake_watcher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/fake_watcher.go @@ -29,7 +29,7 @@ type fakeWatcher struct { var _ utilfs.FSWatcher = &fakeWatcher{} -func NewFakeWatcher() *fakeWatcher { +func newFakeWatcher() *fakeWatcher { return &fakeWatcher{ watches: nil, } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter-defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter-defaults.go index b3cd9430ff53..930806118972 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter-defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter-defaults.go @@ -17,7 +17,7 @@ limitations under the License. package flexvolume import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" ) @@ -27,7 +27,7 @@ type mounterDefaults flexVolumeMounter // SetUpAt is part of the volume.Mounter interface. // This implementation relies on the attacher's device mount path and does a bind mount to dir. func (f *mounterDefaults) SetUpAt(dir string, fsGroup *int64) error { - glog.Warning(logPrefix(f.plugin), "using default SetUpAt to ", dir) + klog.Warning(logPrefix(f.plugin), "using default SetUpAt to ", dir) src, err := f.plugin.getDeviceMountPath(f.spec) if err != nil { @@ -43,7 +43,7 @@ func (f *mounterDefaults) SetUpAt(dir string, fsGroup *int64) error { // Returns the default volume attributes. func (f *mounterDefaults) GetAttributes() volume.Attributes { - glog.V(5).Infof(logPrefix(f.plugin), "using default GetAttributes") + klog.V(5).Infof(logPrefix(f.plugin), "using default GetAttributes") return volume.Attributes{ ReadOnly: f.readOnly, Managed: !f.readOnly, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go index 5085293cc174..8c246043cb7e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go @@ -32,7 +32,6 @@ type flexVolumeMounter struct { // the considered volume spec spec *volume.Spec readOnly bool - volume.MetricsNil } var _ volume.Mounter = &flexVolumeMounter{} @@ -93,7 +92,9 @@ func (f *flexVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } if !f.readOnly { - volume.SetVolumeOwnership(f, fsGroup) + if f.plugin.capabilities.FSGroup { + volume.SetVolumeOwnership(f, fsGroup) + } } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin-defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin-defaults.go index 6f090b4e1884..9f9e1587b0a2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin-defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin-defaults.go @@ -17,7 +17,7 @@ limitations under the License. package flexvolume import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" ) @@ -29,6 +29,6 @@ func logPrefix(plugin *flexVolumePlugin) string { } func (plugin *pluginDefaults) GetVolumeName(spec *volume.Spec) (string, error) { - glog.Warning(logPrefix((*flexVolumePlugin)(plugin)), "using default GetVolumeName for volume ", spec.Name()) + klog.Warning(logPrefix((*flexVolumePlugin)(plugin)), "using default GetVolumeName for volume ", spec.Name()) return spec.Name(), nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin.go index 70d4b009f6fa..2e2a9f7e532d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin.go @@ -23,7 +23,7 @@ import ( "strings" "sync" - "github.com/golang/glog" + "k8s.io/klog" api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -57,9 +57,12 @@ type flexVolumeAttachablePlugin struct { var _ volume.AttachableVolumePlugin = &flexVolumeAttachablePlugin{} var _ volume.PersistentVolumePlugin = &flexVolumePlugin{} +var _ volume.FSResizableVolumePlugin = &flexVolumePlugin{} +var _ volume.ExpandableVolumePlugin = &flexVolumePlugin{} var _ volume.DeviceMountableVolumePlugin = &flexVolumeAttachablePlugin{} +// PluginFactory create flex volume plugin type PluginFactory interface { NewFlexVolumePlugin(pluginDir, driverName string, runner exec.Interface) (volume.VolumePlugin, error) } @@ -89,9 +92,8 @@ func (pluginFactory) NewFlexVolumePlugin(pluginDir, name string, runner exec.Int if flexPlugin.capabilities.Attach { // Plugin supports attach/detach, so return flexVolumeAttachablePlugin return &flexVolumeAttachablePlugin{flexVolumePlugin: flexPlugin}, nil - } else { - return flexPlugin, nil } + return flexPlugin, nil } // Init is part of the volume.VolumePlugin interface. @@ -133,7 +135,7 @@ func (plugin *flexVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) return "", err } - glog.Warning(logPrefix(plugin), "GetVolumeName is not supported yet. Defaulting to PV or volume name: ", name) + klog.Warning(logPrefix(plugin), "GetVolumeName is not supported yet. Defaulting to PV or volume name: ", name) return name, nil } @@ -177,6 +179,14 @@ func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *api.P return nil, err } + var metricsProvider volume.MetricsProvider + if plugin.capabilities.SupportsMetrics { + metricsProvider = volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir( + pod.UID, utilstrings.EscapeQualifiedNameForDisk(sourceDriver), spec.Name())) + } else { + metricsProvider = &volume.MetricsNil{} + } + return &flexVolumeMounter{ flexVolume: &flexVolume{ driverName: sourceDriver, @@ -188,6 +198,7 @@ func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *api.P podNamespace: pod.Namespace, podServiceAccountName: pod.Spec.ServiceAccountName, volName: spec.Name(), + MetricsProvider: metricsProvider, }, runner: runner, spec: spec, @@ -202,14 +213,23 @@ func (plugin *flexVolumePlugin) NewUnmounter(volName string, podUID types.UID) ( // newUnmounterInternal is the internal unmounter routine to clean the volume. func (plugin *flexVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface, runner exec.Interface) (volume.Unmounter, error) { + var metricsProvider volume.MetricsProvider + if plugin.capabilities.SupportsMetrics { + metricsProvider = volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir( + podUID, utilstrings.EscapeQualifiedNameForDisk(plugin.driverName), volName)) + } else { + metricsProvider = &volume.MetricsNil{} + } + return &flexVolumeUnmounter{ flexVolume: &flexVolume{ - driverName: plugin.driverName, - execPath: plugin.getExecutable(), - mounter: mounter, - plugin: plugin, - podUID: podUID, - volName: volName, + driverName: plugin.driverName, + execPath: plugin.getExecutable(), + mounter: mounter, + plugin: plugin, + podUID: podUID, + volName: volName, + MetricsProvider: metricsProvider, }, runner: runner, }, nil @@ -287,3 +307,7 @@ func (plugin *flexVolumePlugin) getDeviceMountPath(spec *volume.Spec) (string, e mountsDir := path.Join(plugin.host.GetPluginDir(flexVolumePluginName), plugin.driverName, "mounts") return path.Join(mountsDir, volumeName), nil } + +func (plugin *flexVolumePlugin) RequiresFSResize() bool { + return plugin.capabilities.RequiresFSResize +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/probe.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/probe.go index 40d2dd3300f0..7a929e5ce99b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/probe.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/probe.go @@ -17,7 +17,7 @@ limitations under the License. package flexvolume import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" "k8s.io/utils/exec" @@ -46,6 +46,7 @@ type flexVolumeProber struct { eventsMap map[string]volume.ProbeOperation // the key is the driver directory path, the value is the coresponding operation } +// GetDynamicPluginProber creates dynamic plugin prober func GetDynamicPluginProber(pluginDir string, runner exec.Interface) volume.DynamicPluginProber { return &flexVolumeProber{ pluginDir: pluginDir, @@ -233,7 +234,7 @@ func (prober *flexVolumeProber) addWatchRecursive(filename string) error { addWatch := func(path string, info os.FileInfo, err error) error { if err == nil && info.IsDir() { if err := prober.watcher.AddWatch(path); err != nil { - glog.Errorf("Error recursively adding watch: %v", err) + klog.Errorf("Error recursively adding watch: %v", err) } } return nil @@ -246,10 +247,10 @@ func (prober *flexVolumeProber) addWatchRecursive(filename string) error { func (prober *flexVolumeProber) initWatcher() error { err := prober.watcher.Init(func(event fsnotify.Event) { if err := prober.handleWatchEvent(event); err != nil { - glog.Errorf("Flexvolume prober watch: %s", err) + klog.Errorf("Flexvolume prober watch: %s", err) } }, func(err error) { - glog.Errorf("Received an error from watcher: %s", err) + klog.Errorf("Received an error from watcher: %s", err) }) if err != nil { return fmt.Errorf("Error initializing watcher: %s", err) @@ -267,7 +268,7 @@ func (prober *flexVolumeProber) initWatcher() error { // Creates the plugin directory, if it doesn't already exist. func (prober *flexVolumeProber) createPluginDir() error { if _, err := prober.fs.Stat(prober.pluginDir); os.IsNotExist(err) { - glog.Warningf("Flexvolume plugin directory at %s does not exist. Recreating.", prober.pluginDir) + klog.Warningf("Flexvolume plugin directory at %s does not exist. Recreating.", prober.pluginDir) err := prober.fs.MkdirAll(prober.pluginDir, 0755) if err != nil { return fmt.Errorf("Error (re-)creating driver directory: %s", err) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter-defaults.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter-defaults.go index 67d9facf79dd..919a6be890ed 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter-defaults.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter-defaults.go @@ -17,13 +17,13 @@ limitations under the License. package flexvolume import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume/util" ) type unmounterDefaults flexVolumeUnmounter func (f *unmounterDefaults) TearDownAt(dir string) error { - glog.Warning(logPrefix(f.plugin), "using default TearDownAt for ", dir) + klog.Warning(logPrefix(f.plugin), "using default TearDownAt for ", dir) return util.UnmountPath(dir, f.mounter) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter.go index 67e26fe15cc5..a62636aba409 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter.go @@ -20,7 +20,7 @@ import ( "fmt" "os" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/utils/exec" @@ -31,7 +31,6 @@ type flexVolumeUnmounter struct { *flexVolume // Runner used to teardown the volume. runner exec.Interface - volume.MetricsNil } var _ volume.Unmounter = &flexVolumeUnmounter{} @@ -46,7 +45,7 @@ func (f *flexVolumeUnmounter) TearDownAt(dir string) error { pathExists, pathErr := util.PathExists(dir) if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/util.go index b706712101c7..4a3019e25e39 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/util.go @@ -21,7 +21,7 @@ import ( "fmt" "os" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" @@ -49,13 +49,13 @@ func addSecretsToOptions(options map[string]string, spec *volume.Spec, namespace } for name, data := range secrets { options[optionKeySecret+"/"+name] = base64.StdEncoding.EncodeToString([]byte(data)) - glog.V(1).Infof("found flex volume secret info: %s", name) + klog.V(1).Infof("found flex volume secret info: %s", name) } return nil } -var notFlexVolume = fmt.Errorf("not a flex volume") +var errNotFlexVolume = fmt.Errorf("not a flex volume") func getDriver(spec *volume.Spec) (string, error) { if spec.Volume != nil && spec.Volume.FlexVolume != nil { @@ -64,7 +64,7 @@ func getDriver(spec *volume.Spec) (string, error) { if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil { return spec.PersistentVolume.Spec.FlexVolume.Driver, nil } - return "", notFlexVolume + return "", errNotFlexVolume } func getFSType(spec *volume.Spec) (string, error) { @@ -74,7 +74,7 @@ func getFSType(spec *volume.Spec) (string, error) { if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil { return spec.PersistentVolume.Spec.FlexVolume.FSType, nil } - return "", notFlexVolume + return "", errNotFlexVolume } func getSecretNameAndNamespace(spec *volume.Spec, podNamespace string) (string, string, error) { @@ -95,7 +95,7 @@ func getSecretNameAndNamespace(spec *volume.Spec, podNamespace string) (string, } return secretName, secretNamespace, nil } - return "", "", notFlexVolume + return "", "", errNotFlexVolume } func getReadOnly(spec *volume.Spec) (bool, error) { @@ -106,7 +106,7 @@ func getReadOnly(spec *volume.Spec) (bool, error) { // ReadOnly is specified at the PV level return spec.ReadOnly, nil } - return false, notFlexVolume + return false, errNotFlexVolume } func getOptions(spec *volume.Spec) (map[string]string, error) { @@ -116,7 +116,7 @@ func getOptions(spec *volume.Spec) (map[string]string, error) { if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil { return spec.PersistentVolume.Spec.FlexVolume.Options, nil } - return nil, notFlexVolume + return nil, errNotFlexVolume } func prepareForMount(mounter mount.Interface, deviceMountPath string) (bool, error) { @@ -141,7 +141,7 @@ func prepareForMount(mounter mount.Interface, deviceMountPath string) (bool, err func doMount(mounter mount.Interface, devicePath, deviceMountPath, fsType string, options []string) error { err := mounter.Mount(devicePath, deviceMountPath, fsType, options) if err != nil { - glog.Errorf("Failed to mount the volume at %s, device: %s, error: %s", deviceMountPath, devicePath, err.Error()) + klog.Errorf("Failed to mount the volume at %s, device: %s, error: %s", deviceMountPath, devicePath, err.Error()) return err } return nil @@ -150,7 +150,7 @@ func doMount(mounter mount.Interface, devicePath, deviceMountPath, fsType string func isNotMounted(mounter mount.Interface, deviceMountPath string) (bool, error) { notmnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath) if err != nil { - glog.Errorf("Error checking mount point %s, error: %v", deviceMountPath, err) + klog.Errorf("Error checking mount point %s, error: %v", deviceMountPath, err) return false, err } return notmnt, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/volume.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/volume.go index abeeb0529700..f4988d839e63 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/volume.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/volume.go @@ -20,6 +20,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" + "k8s.io/kubernetes/pkg/volume" ) type flexVolume struct { @@ -42,6 +43,8 @@ type flexVolume struct { volName string // the underlying plugin plugin *flexVolumePlugin + // the metric plugin + volume.MetricsProvider } // volume.Volume interface diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD index d3c41b0fade8..0067622c19d8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD @@ -27,7 +27,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//vendor/github.com/clusterhq/flocker-go:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker.go index 18a59b742282..8651ba490832 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker.go @@ -22,9 +22,9 @@ import ( "path" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/env" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" @@ -34,7 +34,7 @@ import ( "k8s.io/kubernetes/pkg/volume/util" ) -// This is the primary entrypoint for volume plugins. +// ProbeVolumePlugins is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { return []volume.VolumePlugin{&flockerPlugin{nil}} } @@ -116,11 +116,11 @@ func (p *flockerPlugin) SupportsMountOption() bool { return false } -func (plugin *flockerPlugin) SupportsBulkVolumeVerification() bool { +func (p *flockerPlugin) SupportsBulkVolumeVerification() bool { return false } -func (plugin *flockerPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode { +func (p *flockerPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode { return []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, } @@ -136,12 +136,12 @@ func (p *flockerPlugin) getFlockerVolumeSource(spec *volume.Spec) (*v1.FlockerVo return spec.PersistentVolume.Spec.Flocker, readOnly } -func (plugin *flockerPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { +func (p *flockerPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { // Inject real implementations here, test through the internal function. - return plugin.newMounterInternal(spec, pod.UID, &FlockerUtil{}, plugin.host.GetMounter(plugin.GetPluginName())) + return p.newMounterInternal(spec, pod.UID, &flockerUtil{}, p.host.GetMounter(p.GetPluginName())) } -func (plugin *flockerPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager volumeManager, mounter mount.Interface) (volume.Mounter, error) { +func (p *flockerPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager volumeManager, mounter mount.Interface) (volume.Mounter, error) { volumeSource, readOnly, err := getVolumeSource(spec) if err != nil { return nil, err @@ -158,15 +158,15 @@ func (plugin *flockerPlugin) newMounterInternal(spec *volume.Spec, podUID types. datasetUUID: datasetUUID, mounter: mounter, manager: manager, - plugin: plugin, - MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)), + plugin: p, + MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), p.host)), }, readOnly: readOnly}, nil } func (p *flockerPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { // Inject real implementations here, test through the internal function. - return p.newUnmounterInternal(volName, podUID, &FlockerUtil{}, p.host.GetMounter(p.GetPluginName())) + return p.newUnmounterInternal(volName, podUID, &flockerUtil{}, p.host.GetMounter(p.GetPluginName())) } func (p *flockerPlugin) newUnmounterInternal(volName string, podUID types.UID, manager volumeManager, mounter mount.Interface) (volume.Unmounter, error) { @@ -304,15 +304,15 @@ func (b *flockerVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } _, err := b.flockerClient.GetDatasetState(datasetUUID) if err != nil { - return fmt.Errorf("The volume with datasetUUID='%s' migrated unsuccessfully.", datasetUUID) + return fmt.Errorf("The volume with datasetUUID='%s' migrated unsuccessfully", datasetUUID) } } // TODO: handle failed mounts here. notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("flockerVolume set up: %s %v %v, datasetUUID %v readOnly %v", dir, !notMnt, err, datasetUUID, b.readOnly) + klog.V(4).Infof("flockerVolume set up: %s %v %v, datasetUUID %v readOnly %v", dir, !notMnt, err, datasetUUID, b.readOnly) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mount point: %s %v", dir, err) + klog.Errorf("cannot validate mount point: %s %v", dir, err) return err } if !notMnt { @@ -320,7 +320,7 @@ func (b *flockerVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } if err := os.MkdirAll(dir, 0750); err != nil { - glog.Errorf("mkdir failed on disk %s (%v)", dir, err) + klog.Errorf("mkdir failed on disk %s (%v)", dir, err) return err } @@ -331,33 +331,33 @@ func (b *flockerVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } globalFlockerPath := makeGlobalFlockerPath(datasetUUID) - glog.V(4).Infof("attempting to mount %s", dir) + klog.V(4).Infof("attempting to mount %s", dir) err = b.mounter.Mount(globalFlockerPath, dir, "", options) if err != nil { notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("isLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("isLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("failed to unmount: %v", mntErr) + klog.Errorf("failed to unmount: %v", mntErr) return err } notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("isLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("isLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) return err } } os.Remove(dir) - glog.Errorf("mount of disk %s failed: %v", dir, err) + klog.Errorf("mount of disk %s failed: %v", dir, err) return err } @@ -365,7 +365,7 @@ func (b *flockerVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { volume.SetVolumeOwnership(b, fsGroup) } - glog.V(4).Infof("successfully mounted %s", dir) + klog.V(4).Infof("successfully mounted %s", dir) return nil } @@ -433,11 +433,11 @@ func (c *flockerVolumeUnmounter) TearDownAt(dir string) error { return util.UnmountPath(dir, c.mounter) } -func (plugin *flockerPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { - return plugin.newDeleterInternal(spec, &FlockerUtil{}) +func (p *flockerPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { + return p.newDeleterInternal(spec, &flockerUtil{}) } -func (plugin *flockerPlugin) newDeleterInternal(spec *volume.Spec, manager volumeManager) (volume.Deleter, error) { +func (p *flockerPlugin) newDeleterInternal(spec *volume.Spec, manager volumeManager) (volume.Deleter, error) { if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Flocker == nil { return nil, fmt.Errorf("spec.PersistentVolumeSource.Flocker is nil") } @@ -450,15 +450,15 @@ func (plugin *flockerPlugin) newDeleterInternal(spec *volume.Spec, manager volum }}, nil } -func (plugin *flockerPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { - return plugin.newProvisionerInternal(options, &FlockerUtil{}) +func (p *flockerPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { + return p.newProvisionerInternal(options, &flockerUtil{}) } -func (plugin *flockerPlugin) newProvisionerInternal(options volume.VolumeOptions, manager volumeManager) (volume.Provisioner, error) { +func (p *flockerPlugin) newProvisionerInternal(options volume.VolumeOptions, manager volumeManager) (volume.Provisioner, error) { return &flockerVolumeProvisioner{ flockerVolume: &flockerVolume{ manager: manager, - plugin: plugin, + plugin: p, }, options: options, }, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_util.go index 4798ed692ef0..e753da9acd03 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_util.go @@ -18,7 +18,6 @@ package flocker import ( "fmt" - "time" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/rand" @@ -26,12 +25,12 @@ import ( volutil "k8s.io/kubernetes/pkg/volume/util" flockerapi "github.com/clusterhq/flocker-go" - "github.com/golang/glog" + "k8s.io/klog" ) -type FlockerUtil struct{} +type flockerUtil struct{} -func (util *FlockerUtil) DeleteVolume(d *flockerVolumeDeleter) error { +func (util *flockerUtil) DeleteVolume(d *flockerVolumeDeleter) error { var err error if d.flockerClient == nil { @@ -49,7 +48,7 @@ func (util *FlockerUtil) DeleteVolume(d *flockerVolumeDeleter) error { return d.flockerClient.DeleteDataset(datasetUUID) } -func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGiB int, labels map[string]string, err error) { +func (util *flockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGiB int, labels map[string]string, err error) { if c.flockerClient == nil { c.flockerClient, err = c.plugin.newFlockerClient("") @@ -68,9 +67,8 @@ func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID } // select random node - rand.Seed(time.Now().UTC().UnixNano()) node := nodes[rand.Intn(len(nodes))] - glog.V(2).Infof("selected flocker node with UUID '%s' to provision dataset", node.UUID) + klog.V(2).Infof("selected flocker node with UUID '%s' to provision dataset", node.UUID) capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestBytes := capacity.Value() @@ -94,7 +92,7 @@ func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID } datasetUUID = datasetState.DatasetID - glog.V(2).Infof("successfully created Flocker dataset with UUID '%s'", datasetUUID) + klog.V(2).Infof("successfully created Flocker dataset with UUID '%s'", datasetUUID) return } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_volume.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_volume.go index e25e2d2fdcb2..3ce91874de8a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_volume.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flocker/flocker_volume.go @@ -43,8 +43,8 @@ func (b *flockerVolumeDeleter) GetPath() string { return getPath(b.podUID, b.volName, b.plugin.host) } -func (d *flockerVolumeDeleter) Delete() error { - return d.manager.DeleteVolume(d) +func (b *flockerVolumeDeleter) Delete() error { + return b.manager.DeleteVolume(b) } type flockerVolumeProvisioner struct { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/BUILD deleted file mode 100644 index 1dcbf19938bc..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/BUILD +++ /dev/null @@ -1,78 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "attacher.go", - "doc.go", - "gce_pd.go", - "gce_pd_block.go", - "gce_util.go", - ], - importpath = "k8s.io/kubernetes/pkg/volume/gce_pd", - deps = [ - "//pkg/cloudprovider:go_default_library", - "//pkg/cloudprovider/providers/gce:go_default_library", - "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", - "//pkg/util/file:go_default_library", - "//pkg/util/mount:go_default_library", - "//pkg/util/strings:go_default_library", - "//pkg/volume:go_default_library", - "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumepathhandler:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/utils/exec:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "attacher_test.go", - "gce_pd_block_test.go", - "gce_pd_test.go", - "gce_util_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//pkg/kubelet/apis:go_default_library", - "//pkg/util/mount:go_default_library", - "//pkg/volume:go_default_library", - "//pkg/volume/testing:go_default_library", - "//pkg/volume/util:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", - "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher.go deleted file mode 100644 index 1a84fc4cba98..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher.go +++ /dev/null @@ -1,289 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gce_pd - -import ( - "fmt" - "os" - "path" - "path/filepath" - "strconv" - "time" - - "github.com/golang/glog" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" - "k8s.io/kubernetes/pkg/volume" - volumeutil "k8s.io/kubernetes/pkg/volume/util" -) - -type gcePersistentDiskAttacher struct { - host volume.VolumeHost - gceDisks gce.Disks -} - -var _ volume.Attacher = &gcePersistentDiskAttacher{} - -var _ volume.DeviceMounter = &gcePersistentDiskAttacher{} - -var _ volume.AttachableVolumePlugin = &gcePersistentDiskPlugin{} - -var _ volume.DeviceMountableVolumePlugin = &gcePersistentDiskPlugin{} - -func (plugin *gcePersistentDiskPlugin) NewAttacher() (volume.Attacher, error) { - gceCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) - if err != nil { - return nil, err - } - - return &gcePersistentDiskAttacher{ - host: plugin.host, - gceDisks: gceCloud, - }, nil -} - -func (plugin *gcePersistentDiskPlugin) NewDeviceMounter() (volume.DeviceMounter, error) { - return plugin.NewAttacher() -} - -func (plugin *gcePersistentDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) { - mounter := plugin.host.GetMounter(plugin.GetPluginName()) - return mounter.GetMountRefs(deviceMountPath) -} - -// Attach checks with the GCE cloud provider if the specified volume is already -// attached to the node with the specified Name. -// If the volume is attached, it succeeds (returns nil). -// If it is not, Attach issues a call to the GCE cloud provider to attach it. -// Callers are responsible for retrying on failure. -// Callers are responsible for thread safety between concurrent attach and -// detach operations. -func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { - volumeSource, readOnly, err := getVolumeSource(spec) - if err != nil { - return "", err - } - - pdName := volumeSource.PDName - - attached, err := attacher.gceDisks.DiskIsAttached(pdName, nodeName) - if err != nil { - // Log error and continue with attach - glog.Errorf( - "Error checking if PD (%q) is already attached to current node (%q). Will continue and try attach anyway. err=%v", - pdName, nodeName, err) - } - - if err == nil && attached { - // Volume is already attached to node. - glog.Infof("Attach operation is successful. PD %q is already attached to node %q.", pdName, nodeName) - } else { - if err := attacher.gceDisks.AttachDisk(pdName, nodeName, readOnly, isRegionalPD(spec)); err != nil { - glog.Errorf("Error attaching PD %q to node %q: %+v", pdName, nodeName, err) - return "", err - } - } - - return path.Join(diskByIdPath, diskGooglePrefix+pdName), nil -} - -func (attacher *gcePersistentDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { - volumesAttachedCheck := make(map[*volume.Spec]bool) - volumePdNameMap := make(map[string]*volume.Spec) - pdNameList := []string{} - for _, spec := range specs { - volumeSource, _, err := getVolumeSource(spec) - // If error is occurred, skip this volume and move to the next one - if err != nil { - glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) - continue - } - pdNameList = append(pdNameList, volumeSource.PDName) - volumesAttachedCheck[spec] = true - volumePdNameMap[volumeSource.PDName] = spec - } - attachedResult, err := attacher.gceDisks.DisksAreAttached(pdNameList, nodeName) - if err != nil { - // Log error and continue with attach - glog.Errorf( - "Error checking if PDs (%v) are already attached to current node (%q). err=%v", - pdNameList, nodeName, err) - return volumesAttachedCheck, err - } - - for pdName, attached := range attachedResult { - if !attached { - spec := volumePdNameMap[pdName] - volumesAttachedCheck[spec] = false - glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", pdName, spec.Name()) - } - } - return volumesAttachedCheck, nil -} - -func (attacher *gcePersistentDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { - ticker := time.NewTicker(checkSleepDuration) - defer ticker.Stop() - timer := time.NewTimer(timeout) - defer timer.Stop() - - volumeSource, _, err := getVolumeSource(spec) - if err != nil { - return "", err - } - - pdName := volumeSource.PDName - partition := "" - if volumeSource.Partition != 0 { - partition = strconv.Itoa(int(volumeSource.Partition)) - } - - sdBefore, err := filepath.Glob(diskSDPattern) - if err != nil { - glog.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err) - } - sdBeforeSet := sets.NewString(sdBefore...) - - devicePaths := getDiskByIdPaths(pdName, partition) - for { - select { - case <-ticker.C: - glog.V(5).Infof("Checking GCE PD %q is attached.", pdName) - path, err := verifyDevicePath(devicePaths, sdBeforeSet, pdName) - if err != nil { - // Log error, if any, and continue checking periodically. See issue #11321 - glog.Errorf("Error verifying GCE PD (%q) is attached: %v", pdName, err) - } else if path != "" { - // A device path has successfully been created for the PD - glog.Infof("Successfully found attached GCE PD %q.", pdName) - return path, nil - } - case <-timer.C: - return "", fmt.Errorf("Could not find attached GCE PD %q. Timeout waiting for mount paths to be created.", pdName) - } - } -} - -func (attacher *gcePersistentDiskAttacher) GetDeviceMountPath( - spec *volume.Spec) (string, error) { - volumeSource, _, err := getVolumeSource(spec) - if err != nil { - return "", err - } - - return makeGlobalPDName(attacher.host, volumeSource.PDName), nil -} - -func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error { - // Only mount the PD globally once. - mounter := attacher.host.GetMounter(gcePersistentDiskPluginName) - notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath) - if err != nil { - if os.IsNotExist(err) { - if err := os.MkdirAll(deviceMountPath, 0750); err != nil { - return err - } - notMnt = true - } else { - return err - } - } - - volumeSource, readOnly, err := getVolumeSource(spec) - if err != nil { - return err - } - - options := []string{} - if readOnly { - options = append(options, "ro") - } - if notMnt { - diskMounter := volumeutil.NewSafeFormatAndMountFromHost(gcePersistentDiskPluginName, attacher.host) - mountOptions := volumeutil.MountOptionFromSpec(spec, options...) - err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions) - if err != nil { - os.Remove(deviceMountPath) - return err - } - glog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options) - } - return nil -} - -type gcePersistentDiskDetacher struct { - host volume.VolumeHost - gceDisks gce.Disks -} - -var _ volume.Detacher = &gcePersistentDiskDetacher{} - -var _ volume.DeviceUnmounter = &gcePersistentDiskDetacher{} - -func (plugin *gcePersistentDiskPlugin) NewDetacher() (volume.Detacher, error) { - gceCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) - if err != nil { - return nil, err - } - - return &gcePersistentDiskDetacher{ - host: plugin.host, - gceDisks: gceCloud, - }, nil -} - -func (plugin *gcePersistentDiskPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) { - return plugin.NewDetacher() -} - -// Detach checks with the GCE cloud provider if the specified volume is already -// attached to the specified node. If the volume is not attached, it succeeds -// (returns nil). If it is attached, Detach issues a call to the GCE cloud -// provider to attach it. -// Callers are responsible for retrying on failure. -// Callers are responsible for thread safety between concurrent attach and detach -// operations. -func (detacher *gcePersistentDiskDetacher) Detach(volumeName string, nodeName types.NodeName) error { - pdName := path.Base(volumeName) - - attached, err := detacher.gceDisks.DiskIsAttached(pdName, nodeName) - if err != nil { - // Log error and continue with detach - glog.Errorf( - "Error checking if PD (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v", - pdName, nodeName, err) - } - - if err == nil && !attached { - // Volume is not attached to node. Success! - glog.Infof("Detach operation is successful. PD %q was not attached to node %q.", pdName, nodeName) - return nil - } - - if err = detacher.gceDisks.DetachDisk(pdName, nodeName); err != nil { - glog.Errorf("Error detaching PD %q from node %q: %v", pdName, nodeName, err) - return err - } - - return nil -} - -func (detacher *gcePersistentDiskDetacher) UnmountDevice(deviceMountPath string) error { - return volumeutil.UnmountPath(deviceMountPath, detacher.host.GetMounter(gcePersistentDiskPluginName)) -} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/doc.go deleted file mode 100644 index 39b5db1d9a6b..000000000000 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package gce_pd contains the internal representation of GCE PersistentDisk -// volumes. -package gce_pd diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/BUILD new file mode 100644 index 000000000000..938b57573308 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/BUILD @@ -0,0 +1,78 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "attacher.go", + "doc.go", + "gce_pd.go", + "gce_pd_block.go", + "gce_util.go", + ], + importpath = "k8s.io/kubernetes/pkg/volume/gcepd", + deps = [ + "//pkg/cloudprovider/providers/gce:go_default_library", + "//pkg/features:go_default_library", + "//pkg/kubelet/apis:go_default_library", + "//pkg/util/file:go_default_library", + "//pkg/util/mount:go_default_library", + "//pkg/util/strings:go_default_library", + "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", + "//pkg/volume/util/volumepathhandler:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "attacher_test.go", + "gce_pd_block_test.go", + "gce_pd_test.go", + "gce_util_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//pkg/kubelet/apis:go_default_library", + "//pkg/util/mount:go_default_library", + "//pkg/volume:go_default_library", + "//pkg/volume/testing:go_default_library", + "//pkg/volume/util:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", + "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/OWNERS similarity index 100% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/OWNERS rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/OWNERS diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/attacher.go new file mode 100644 index 000000000000..5567e29f28ff --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/attacher.go @@ -0,0 +1,289 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gcepd + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strconv" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" + "k8s.io/kubernetes/pkg/volume" + volumeutil "k8s.io/kubernetes/pkg/volume/util" +) + +type gcePersistentDiskAttacher struct { + host volume.VolumeHost + gceDisks gce.Disks +} + +var _ volume.Attacher = &gcePersistentDiskAttacher{} + +var _ volume.DeviceMounter = &gcePersistentDiskAttacher{} + +var _ volume.AttachableVolumePlugin = &gcePersistentDiskPlugin{} + +var _ volume.DeviceMountableVolumePlugin = &gcePersistentDiskPlugin{} + +func (plugin *gcePersistentDiskPlugin) NewAttacher() (volume.Attacher, error) { + gceCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) + if err != nil { + return nil, err + } + + return &gcePersistentDiskAttacher{ + host: plugin.host, + gceDisks: gceCloud, + }, nil +} + +func (plugin *gcePersistentDiskPlugin) NewDeviceMounter() (volume.DeviceMounter, error) { + return plugin.NewAttacher() +} + +func (plugin *gcePersistentDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) { + mounter := plugin.host.GetMounter(plugin.GetPluginName()) + return mounter.GetMountRefs(deviceMountPath) +} + +// Attach checks with the GCE cloud provider if the specified volume is already +// attached to the node with the specified Name. +// If the volume is attached, it succeeds (returns nil). +// If it is not, Attach issues a call to the GCE cloud provider to attach it. +// Callers are responsible for retrying on failure. +// Callers are responsible for thread safety between concurrent attach and +// detach operations. +func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { + volumeSource, readOnly, err := getVolumeSource(spec) + if err != nil { + return "", err + } + + pdName := volumeSource.PDName + + attached, err := attacher.gceDisks.DiskIsAttached(pdName, nodeName) + if err != nil { + // Log error and continue with attach + klog.Errorf( + "Error checking if PD (%q) is already attached to current node (%q). Will continue and try attach anyway. err=%v", + pdName, nodeName, err) + } + + if err == nil && attached { + // Volume is already attached to node. + klog.Infof("Attach operation is successful. PD %q is already attached to node %q.", pdName, nodeName) + } else { + if err := attacher.gceDisks.AttachDisk(pdName, nodeName, readOnly, isRegionalPD(spec)); err != nil { + klog.Errorf("Error attaching PD %q to node %q: %+v", pdName, nodeName, err) + return "", err + } + } + + return path.Join(diskByIDPath, diskGooglePrefix+pdName), nil +} + +func (attacher *gcePersistentDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { + volumesAttachedCheck := make(map[*volume.Spec]bool) + volumePdNameMap := make(map[string]*volume.Spec) + pdNameList := []string{} + for _, spec := range specs { + volumeSource, _, err := getVolumeSource(spec) + // If error is occurred, skip this volume and move to the next one + if err != nil { + klog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) + continue + } + pdNameList = append(pdNameList, volumeSource.PDName) + volumesAttachedCheck[spec] = true + volumePdNameMap[volumeSource.PDName] = spec + } + attachedResult, err := attacher.gceDisks.DisksAreAttached(pdNameList, nodeName) + if err != nil { + // Log error and continue with attach + klog.Errorf( + "Error checking if PDs (%v) are already attached to current node (%q). err=%v", + pdNameList, nodeName, err) + return volumesAttachedCheck, err + } + + for pdName, attached := range attachedResult { + if !attached { + spec := volumePdNameMap[pdName] + volumesAttachedCheck[spec] = false + klog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", pdName, spec.Name()) + } + } + return volumesAttachedCheck, nil +} + +func (attacher *gcePersistentDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { + ticker := time.NewTicker(checkSleepDuration) + defer ticker.Stop() + timer := time.NewTimer(timeout) + defer timer.Stop() + + volumeSource, _, err := getVolumeSource(spec) + if err != nil { + return "", err + } + + pdName := volumeSource.PDName + partition := "" + if volumeSource.Partition != 0 { + partition = strconv.Itoa(int(volumeSource.Partition)) + } + + sdBefore, err := filepath.Glob(diskSDPattern) + if err != nil { + klog.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err) + } + sdBeforeSet := sets.NewString(sdBefore...) + + devicePaths := getDiskByIDPaths(pdName, partition) + for { + select { + case <-ticker.C: + klog.V(5).Infof("Checking GCE PD %q is attached.", pdName) + path, err := verifyDevicePath(devicePaths, sdBeforeSet, pdName) + if err != nil { + // Log error, if any, and continue checking periodically. See issue #11321 + klog.Errorf("Error verifying GCE PD (%q) is attached: %v", pdName, err) + } else if path != "" { + // A device path has successfully been created for the PD + klog.Infof("Successfully found attached GCE PD %q.", pdName) + return path, nil + } + case <-timer.C: + return "", fmt.Errorf("could not find attached GCE PD %q. Timeout waiting for mount paths to be created", pdName) + } + } +} + +func (attacher *gcePersistentDiskAttacher) GetDeviceMountPath( + spec *volume.Spec) (string, error) { + volumeSource, _, err := getVolumeSource(spec) + if err != nil { + return "", err + } + + return makeGlobalPDName(attacher.host, volumeSource.PDName), nil +} + +func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error { + // Only mount the PD globally once. + mounter := attacher.host.GetMounter(gcePersistentDiskPluginName) + notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath) + if err != nil { + if os.IsNotExist(err) { + if err := os.MkdirAll(deviceMountPath, 0750); err != nil { + return err + } + notMnt = true + } else { + return err + } + } + + volumeSource, readOnly, err := getVolumeSource(spec) + if err != nil { + return err + } + + options := []string{} + if readOnly { + options = append(options, "ro") + } + if notMnt { + diskMounter := volumeutil.NewSafeFormatAndMountFromHost(gcePersistentDiskPluginName, attacher.host) + mountOptions := volumeutil.MountOptionFromSpec(spec, options...) + err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions) + if err != nil { + os.Remove(deviceMountPath) + return err + } + klog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options) + } + return nil +} + +type gcePersistentDiskDetacher struct { + host volume.VolumeHost + gceDisks gce.Disks +} + +var _ volume.Detacher = &gcePersistentDiskDetacher{} + +var _ volume.DeviceUnmounter = &gcePersistentDiskDetacher{} + +func (plugin *gcePersistentDiskPlugin) NewDetacher() (volume.Detacher, error) { + gceCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) + if err != nil { + return nil, err + } + + return &gcePersistentDiskDetacher{ + host: plugin.host, + gceDisks: gceCloud, + }, nil +} + +func (plugin *gcePersistentDiskPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) { + return plugin.NewDetacher() +} + +// Detach checks with the GCE cloud provider if the specified volume is already +// attached to the specified node. If the volume is not attached, it succeeds +// (returns nil). If it is attached, Detach issues a call to the GCE cloud +// provider to attach it. +// Callers are responsible for retrying on failure. +// Callers are responsible for thread safety between concurrent attach and detach +// operations. +func (detacher *gcePersistentDiskDetacher) Detach(volumeName string, nodeName types.NodeName) error { + pdName := path.Base(volumeName) + + attached, err := detacher.gceDisks.DiskIsAttached(pdName, nodeName) + if err != nil { + // Log error and continue with detach + klog.Errorf( + "Error checking if PD (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v", + pdName, nodeName, err) + } + + if err == nil && !attached { + // Volume is not attached to node. Success! + klog.Infof("Detach operation is successful. PD %q was not attached to node %q.", pdName, nodeName) + return nil + } + + if err = detacher.gceDisks.DetachDisk(pdName, nodeName); err != nil { + klog.Errorf("Error detaching PD %q from node %q: %v", pdName, nodeName, err) + return err + } + + return nil +} + +func (detacher *gcePersistentDiskDetacher) UnmountDevice(deviceMountPath string) error { + return volumeutil.UnmountPath(deviceMountPath, detacher.host.GetMounter(gcePersistentDiskPluginName)) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/doc.go new file mode 100644 index 000000000000..9d6fa04045fb --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package gcepd contains the internal representation of GCE PersistentDisk +// volumes. +package gcepd diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd.go similarity index 92% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd.go index ebe68e4fc83a..455722033200 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package gce_pd +package gcepd import ( "context" @@ -24,12 +24,12 @@ import ( "strconv" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" @@ -39,7 +39,7 @@ import ( "k8s.io/kubernetes/pkg/volume/util" ) -// This is the primary entrypoint for volume plugins. +// ProbeVolumePlugins is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { return []volume.VolumePlugin{&gcePersistentDiskPlugin{nil}} } @@ -137,13 +137,13 @@ func (plugin *gcePersistentDiskPlugin) GetVolumeLimits() (map[string]int64, erro instances, ok := cloud.Instances() if !ok { - glog.Warning("Failed to get instances from cloud provider") + klog.Warning("Failed to get instances from cloud provider") return volumeLimits, nil } instanceType, err := instances.InstanceType(context.TODO(), plugin.host.GetNodeName()) if err != nil { - glog.Errorf("Failed to get instance type from GCE cloud provider") + klog.Errorf("Failed to get instance type from GCE cloud provider") return volumeLimits, nil } if strings.HasPrefix(instanceType, "n1-") { @@ -211,7 +211,8 @@ func (plugin *gcePersistentDiskPlugin) newMounterInternal(spec *volume.Spec, pod plugin: plugin, MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)), }, - readOnly: readOnly}, nil + mountOptions: util.MountOptionFromSpec(spec), + readOnly: readOnly}, nil } func (plugin *gcePersistentDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { @@ -283,6 +284,13 @@ func (plugin *gcePersistentDiskPlugin) ExpandVolumeDevice( return updatedQuantity, nil } +func (plugin *gcePersistentDiskPlugin) ExpandFS(spec *volume.Spec, devicePath, deviceMountPath string, _, _ resource.Quantity) error { + _, err := util.GenericResizeFS(plugin.host, plugin.GetPluginName(), devicePath, deviceMountPath) + return err +} + +var _ volume.FSResizableVolumePlugin = &gcePersistentDiskPlugin{} + func (plugin *gcePersistentDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { mounter := plugin.host.GetMounter(plugin.GetPluginName()) pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName()) @@ -329,7 +337,8 @@ type gcePersistentDisk struct { type gcePersistentDiskMounter struct { *gcePersistentDisk // Specifies whether the disk will be mounted as read-only. - readOnly bool + readOnly bool + mountOptions []string } var _ volume.Mounter = &gcePersistentDiskMounter{} @@ -358,9 +367,9 @@ func (b *gcePersistentDiskMounter) SetUp(fsGroup *int64) error { func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { // TODO: handle failed mounts here. notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("GCE PersistentDisk set up: Dir (%s) PD name (%q) Mounted (%t) Error (%v), ReadOnly (%t)", dir, b.pdName, !notMnt, err, b.readOnly) + klog.V(4).Infof("GCE PersistentDisk set up: Dir (%s) PD name (%q) Mounted (%t) Error (%v), ReadOnly (%t)", dir, b.pdName, !notMnt, err, b.readOnly) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mount point: %s %v", dir, err) + klog.Errorf("cannot validate mount point: %s %v", dir, err) return err } if !notMnt { @@ -368,7 +377,7 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { } if err := os.MkdirAll(dir, 0750); err != nil { - glog.Errorf("mkdir failed on disk %s (%v)", dir, err) + klog.Errorf("mkdir failed on disk %s (%v)", dir, err) return err } @@ -379,33 +388,35 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { } globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName) - glog.V(4).Infof("attempting to mount %s", dir) + klog.V(4).Infof("attempting to mount %s", dir) + + mountOptions := util.JoinMountOptions(b.mountOptions, options) - err = b.mounter.Mount(globalPDPath, dir, "", options) + err = b.mounter.Mount(globalPDPath, dir, "", mountOptions) if err != nil { notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) return err } } os.Remove(dir) - glog.Errorf("Mount of disk %s failed: %v", dir, err) + klog.Errorf("Mount of disk %s failed: %v", dir, err) return err } @@ -413,7 +424,7 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { volume.SetVolumeOwnership(b, fsGroup) } - glog.V(4).Infof("Successfully mounted %s", dir) + klog.V(4).Infof("Successfully mounted %s", dir) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_block.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd_block.go similarity index 98% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_block.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd_block.go index 24dcfce731f1..f4e7e2c7252d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd_block.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd_block.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package gce_pd +package gcepd import ( "fmt" @@ -22,9 +22,9 @@ import ( "path/filepath" "strconv" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -46,7 +46,7 @@ func (plugin *gcePersistentDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID if err != nil { return nil, err } - glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) + klog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) globalMapPath := filepath.Dir(globalMapPathUUID) if len(globalMapPath) <= 1 { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_util.go similarity index 84% rename from cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go rename to cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_util.go index 4bfe275445e9..153bab8716d1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_util.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package gce_pd +package gcepd import ( "fmt" @@ -24,11 +24,11 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" @@ -39,7 +39,7 @@ import ( ) const ( - diskByIdPath = "/dev/disk/by-id/" + diskByIDPath = "/dev/disk/by-id/" diskGooglePrefix = "google-" diskScsiGooglePrefix = "scsi-0Google_PersistentDisk_" diskPartitionSuffix = "-part" @@ -61,14 +61,17 @@ const ( var ( // errorSleepDuration is modified only in unit tests and should be constant // otherwise. - errorSleepDuration time.Duration = 5 * time.Second + errorSleepDuration = 5 * time.Second // regex to parse scsi_id output and extract the serial scsiRegex = regexp.MustCompile(scsiPattern) ) +// GCEDiskUtil provides operation for GCE PD type GCEDiskUtil struct{} +// DeleteVolume deletes a GCE PD +// Returns: error func (util *GCEDiskUtil) DeleteVolume(d *gcePersistentDiskDeleter) error { cloud, err := getCloudProvider(d.gcePersistentDisk.plugin.host.GetCloudProvider()) if err != nil { @@ -76,18 +79,18 @@ func (util *GCEDiskUtil) DeleteVolume(d *gcePersistentDiskDeleter) error { } if err = cloud.DeleteDisk(d.pdName); err != nil { - glog.V(2).Infof("Error deleting GCE PD volume %s: %v", d.pdName, err) + klog.V(2).Infof("Error deleting GCE PD volume %s: %v", d.pdName, err) // GCE cloud provider returns volume.deletedVolumeInUseError when // necessary, no handling needed here. return err } - glog.V(2).Infof("Successfully deleted GCE PD volume %s", d.pdName) + klog.V(2).Infof("Successfully deleted GCE PD volume %s", d.pdName) return nil } // CreateVolume creates a GCE PD. // Returns: gcePDName, volumeSizeGB, labels, fsType, error -func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (string, int, map[string]string, string, error) { +func (util *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (string, int, map[string]string, string, error) { cloud, err := getCloudProvider(c.gcePersistentDisk.plugin.host.GetCloudProvider()) if err != nil { return "", 0, nil, "", err @@ -151,7 +154,7 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node * case replicationTypeRegionalPD: selectedZones, err := volumeutil.SelectZonesForVolume(zonePresent, zonesPresent, configuredZone, configuredZones, activezones, node, allowedTopologies, c.options.PVC.Name, maxRegionalPDZones) if err != nil { - glog.V(2).Infof("Error selecting zones for regional GCE PD volume: %v", err) + klog.V(2).Infof("Error selecting zones for regional GCE PD volume: %v", err) return "", 0, nil, "", err } if err = cloud.CreateRegionalDisk( @@ -160,10 +163,10 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node * selectedZones, int64(requestGB), *c.options.CloudTags); err != nil { - glog.V(2).Infof("Error creating regional GCE PD volume: %v", err) + klog.V(2).Infof("Error creating regional GCE PD volume: %v", err) return "", 0, nil, "", err } - glog.V(2).Infof("Successfully created Regional GCE PD volume %s", name) + klog.V(2).Infof("Successfully created Regional GCE PD volume %s", name) case replicationTypeNone: selectedZone, err := volumeutil.SelectZoneForVolume(zonePresent, zonesPresent, configuredZone, configuredZones, activezones, node, allowedTopologies, c.options.PVC.Name) @@ -176,10 +179,10 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node * selectedZone, int64(requestGB), *c.options.CloudTags); err != nil { - glog.V(2).Infof("Error creating single-zone GCE PD volume: %v", err) + klog.V(2).Infof("Error creating single-zone GCE PD volume: %v", err) return "", 0, nil, "", err } - glog.V(2).Infof("Successfully created single-zone GCE PD volume %s", name) + klog.V(2).Infof("Successfully created single-zone GCE PD volume %s", name) default: return "", 0, nil, "", fmt.Errorf("replication-type of '%s' is not supported", replicationType) @@ -188,7 +191,7 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node * labels, err := cloud.GetAutoLabelsForPD(name, "" /* zone */) if err != nil { // We don't really want to leak the volume here... - glog.Errorf("error getting labels for volume %q: %v", name, err) + klog.Errorf("error getting labels for volume %q: %v", name, err) } return name, int(requestGB), labels, fstype, nil @@ -200,7 +203,7 @@ func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String, diskName st // It's possible udevadm was called on other disks so it should not block this // call. If it did fail on this disk, then the devicePath will either // not exist or be wrong. If it's wrong, then the scsi_id check below will fail. - glog.Errorf("udevadmChangeToNewDrives failed with: %v", err) + klog.Errorf("udevadmChangeToNewDrives failed with: %v", err) } for _, path := range devicePaths { @@ -216,7 +219,7 @@ func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String, diskName st // The device link is not pointing to the correct device // Trigger udev on this device to try to fix the link if udevErr := udevadmChangeToDrive(path); udevErr != nil { - glog.Errorf("udevadmChangeToDrive %q failed with: %v", path, err) + klog.Errorf("udevadmChangeToDrive %q failed with: %v", path, err) } // Return error to retry WaitForAttach and verifyDevicePath @@ -238,7 +241,7 @@ func getScsiSerial(devicePath, diskName string) (string, error) { } if !exists { - glog.V(6).Infof("scsi_id doesn't exist; skipping check for %v", devicePath) + klog.V(6).Infof("scsi_id doesn't exist; skipping check for %v", devicePath) return diskName, nil } @@ -248,7 +251,7 @@ func getScsiSerial(devicePath, diskName string) (string, error) { "--whitelisted", fmt.Sprintf("--device=%v", devicePath)).CombinedOutput() if err != nil { - return "", fmt.Errorf("scsi_id failed for device %q with %v.", devicePath, err) + return "", fmt.Errorf("scsi_id failed for device %q with %v", devicePath, err) } return parseScsiSerial(string(out)) @@ -265,10 +268,10 @@ func parseScsiSerial(output string) (string, error) { } // Returns list of all /dev/disk/by-id/* paths for given PD. -func getDiskByIdPaths(pdName string, partition string) []string { +func getDiskByIDPaths(pdName string, partition string) []string { devicePaths := []string{ - path.Join(diskByIdPath, diskGooglePrefix+pdName), - path.Join(diskByIdPath, diskScsiGooglePrefix+pdName), + path.Join(diskByIDPath, diskGooglePrefix+pdName), + path.Join(diskByIDPath, diskScsiGooglePrefix+pdName), } if partition != "" { @@ -281,13 +284,13 @@ func getDiskByIdPaths(pdName string, partition string) []string { } // Return cloud provider -func getCloudProvider(cloudProvider cloudprovider.Interface) (*gcecloud.GCECloud, error) { +func getCloudProvider(cloudProvider cloudprovider.Interface) (*gcecloud.Cloud, error) { var err error for numRetries := 0; numRetries < maxRetries; numRetries++ { - gceCloudProvider, ok := cloudProvider.(*gcecloud.GCECloud) + gceCloudProvider, ok := cloudProvider.(*gcecloud.Cloud) if !ok || gceCloudProvider == nil { // Retry on error. See issue #11321 - glog.Errorf("Failed to get GCE Cloud Provider. plugin.host.GetCloudProvider returned %v instead", cloudProvider) + klog.Errorf("Failed to get GCE Cloud Provider. plugin.host.GetCloudProvider returned %v instead", cloudProvider) time.Sleep(errorSleepDuration) continue } @@ -305,7 +308,7 @@ func getCloudProvider(cloudProvider cloudprovider.Interface) (*gcecloud.GCECloud func udevadmChangeToNewDrives(sdBeforeSet sets.String) error { sdAfter, err := filepath.Glob(diskSDPattern) if err != nil { - return fmt.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err) + return fmt.Errorf("error filepath.Glob(\"%s\"): %v\r", diskSDPattern, err) } for _, sd := range sdAfter { @@ -321,18 +324,18 @@ func udevadmChangeToNewDrives(sdBeforeSet sets.String) error { // drivePath must be the block device path to trigger on, in the format "/dev/sd*", or a symlink to it. // This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed. func udevadmChangeToDrive(drivePath string) error { - glog.V(5).Infof("udevadmChangeToDrive: drive=%q", drivePath) + klog.V(5).Infof("udevadmChangeToDrive: drive=%q", drivePath) // Evaluate symlink, if any drive, err := filepath.EvalSymlinks(drivePath) if err != nil { - return fmt.Errorf("udevadmChangeToDrive: filepath.EvalSymlinks(%q) failed with %v.", drivePath, err) + return fmt.Errorf("udevadmChangeToDrive: filepath.EvalSymlinks(%q) failed with %v", drivePath, err) } - glog.V(5).Infof("udevadmChangeToDrive: symlink path is %q", drive) + klog.V(5).Infof("udevadmChangeToDrive: symlink path is %q", drive) // Check to make sure input is "/dev/sd*" if !strings.Contains(drive, diskSDPath) { - return fmt.Errorf("udevadmChangeToDrive: expected input in the form \"%s\" but drive is %q.", diskSDPattern, drive) + return fmt.Errorf("udevadmChangeToDrive: expected input in the form \"%s\" but drive is %q", diskSDPattern, drive) } // Call "udevadm trigger --action=change --property-match=DEVNAME=/dev/sd..." @@ -342,7 +345,7 @@ func udevadmChangeToDrive(drivePath string) error { "--action=change", fmt.Sprintf("--property-match=DEVNAME=%s", drive)).CombinedOutput() if err != nil { - return fmt.Errorf("udevadmChangeToDrive: udevadm trigger failed for drive %q with %v.", drive, err) + return fmt.Errorf("udevadmChangeToDrive: udevadm trigger failed for drive %q with %v", drive, err) } return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/BUILD index 0cca9e06c744..48cfa4308dde 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/BUILD @@ -29,7 +29,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/volume:go_default_library", - "//pkg/volume/empty_dir:go_default_library", + "//pkg/volume/emptydir:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD index bd7d5e0cc28d..52e33003414f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD @@ -30,9 +30,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/heketi/heketi/client/api/go-client:go_default_library", "//vendor/github.com/heketi/heketi/pkg/glusterfs/api:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go index 4e29984e4318..5e1811263d64 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go @@ -26,7 +26,6 @@ import ( dstrings "strings" "sync" - "github.com/golang/glog" gcli "github.com/heketi/heketi/client/api/go-client" gapi "github.com/heketi/heketi/pkg/glusterfs/api" "k8s.io/api/core/v1" @@ -38,6 +37,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" @@ -67,7 +67,7 @@ var _ volume.Deleter = &glusterfsVolumeDeleter{} const ( glusterfsPluginName = "kubernetes.io/glusterfs" volPrefix = "vol_" - dynamicEpSvcPrefix = "glusterfs-dynamic-" + dynamicEpSvcPrefix = "glusterfs-dynamic" replicaCount = 3 durabilityType = "replicate" secretKeyName = "key" // key name used in secret @@ -75,6 +75,14 @@ const ( defaultGidMin = 2000 defaultGidMax = math.MaxInt32 + // maxCustomEpNamePrefix is the maximum number of chars. + // which can be used as ep/svc name prefix. This number is carved + // out from below formula. + // max length of name of an ep - length of pvc uuid + // where max length of name of an ep is 63 and length of uuid is 37 + + maxCustomEpNamePrefixLen = 26 + // absoluteGidMin/Max are currently the same as the // default values, but they play a different role and // could take a different value. Only thing we need is: @@ -98,15 +106,30 @@ func (plugin *glusterfsPlugin) GetPluginName() string { } func (plugin *glusterfsPlugin) GetVolumeName(spec *volume.Spec) (string, error) { - volumeSource, _, err := getVolumeSource(spec) + var endpointName string + var endpointsNsPtr *string + + volPath, _, err := getVolumeInfo(spec) if err != nil { return "", err } - return fmt.Sprintf( - "%v:%v", - volumeSource.EndpointsName, - volumeSource.Path), nil + if spec.Volume != nil && spec.Volume.Glusterfs != nil { + endpointName = spec.Volume.Glusterfs.EndpointsName + } else if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.Glusterfs != nil { + endpointName = spec.PersistentVolume.Spec.Glusterfs.EndpointsName + endpointsNsPtr = spec.PersistentVolume.Spec.Glusterfs.EndpointsNamespace + if endpointsNsPtr != nil && *endpointsNsPtr != "" { + return fmt.Sprintf("%v:%v:%v", endpointName, *endpointsNsPtr, volPath), nil + } + return "", fmt.Errorf("invalid endpointsnamespace in provided glusterfs PV spec") + + } else { + return "", fmt.Errorf("unable to fetch required parameters from provided glusterfs spec") + } + + return fmt.Sprintf("%v:%v", endpointName, volPath), nil } func (plugin *glusterfsPlugin) CanSupport(spec *volume.Spec) bool { @@ -139,29 +162,57 @@ func (plugin *glusterfsPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode } func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { - source, _, err := getVolumeSource(spec) + epName, epNamespace, err := plugin.getEndpointNameAndNamespace(spec, pod.Namespace) if err != nil { - glog.Errorf("failed to get gluster volumesource: %v", err) return nil, err } - epName := source.EndpointsName - // PVC/POD is in same namespace. - podNs := pod.Namespace + kubeClient := plugin.host.GetKubeClient() if kubeClient == nil { return nil, fmt.Errorf("failed to get kube client to initialize mounter") } - ep, err := kubeClient.CoreV1().Endpoints(podNs).Get(epName, metav1.GetOptions{}) + ep, err := kubeClient.Core().Endpoints(epNamespace).Get(epName, metav1.GetOptions{}) + if err != nil { - glog.Errorf("failed to get endpoint %s: %v", epName, err) + klog.Errorf("failed to get endpoint %s: %v", epName, err) return nil, err } - glog.V(4).Infof("glusterfs pv endpoint %v", ep) + klog.V(4).Infof("glusterfs pv endpoint %v", ep) return plugin.newMounterInternal(spec, ep, pod, plugin.host.GetMounter(plugin.GetPluginName())) } +func (plugin *glusterfsPlugin) getEndpointNameAndNamespace(spec *volume.Spec, defaultNamespace string) (string, string, error) { + if spec.Volume != nil && spec.Volume.Glusterfs != nil { + endpoints := spec.Volume.Glusterfs.EndpointsName + if endpoints == "" { + return "", "", fmt.Errorf("no glusterFS endpoint specified") + } + return endpoints, defaultNamespace, nil + + } else if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.Glusterfs != nil { + endpoints := spec.PersistentVolume.Spec.Glusterfs.EndpointsName + endpointsNs := defaultNamespace + + overriddenNs := spec.PersistentVolume.Spec.Glusterfs.EndpointsNamespace + if overriddenNs != nil { + if len(*overriddenNs) > 0 { + endpointsNs = *overriddenNs + } else { + return "", "", fmt.Errorf("endpointnamespace field set, but no endpointnamespace specified") + } + } + return endpoints, endpointsNs, nil + } + return "", "", fmt.Errorf("Spec does not reference a GlusterFS volume type") + +} func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endpoints, pod *v1.Pod, mounter mount.Interface) (volume.Mounter, error) { - source, readOnly, _ := getVolumeSource(spec) + volPath, readOnly, err := getVolumeInfo(spec) + if err != nil { + klog.Errorf("failed to get volumesource : %v", err) + return nil, err + } return &glusterfsMounter{ glusterfs: &glusterfs{ volName: spec.Name(), @@ -171,7 +222,7 @@ func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endp MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(pod.UID, strings.EscapeQualifiedNameForDisk(glusterfsPluginName), spec.Name())), }, hosts: ep, - path: source.Path, + path: volPath, readOnly: readOnly, mountOptions: volutil.MountOptionFromSpec(spec), }, nil @@ -247,7 +298,7 @@ func (b *glusterfsMounter) SetUp(fsGroup *int64) error { func (b *glusterfsMounter) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("mount setup: %s %v %v", dir, !notMnt, err) + klog.V(4).Infof("mount setup: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { return err } @@ -290,6 +341,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { var errs error options := []string{} hasLogFile := false + hasLogLevel := false log := "" if b.readOnly { @@ -297,13 +349,19 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { } - // Check logfile has been provided by user, if provided, use that as the log file. + // Check for log-file,log-level options existence in user supplied mount options, if provided, use those. for _, userOpt := range b.mountOptions { - if dstrings.HasPrefix(userOpt, "log-file") { - glog.V(4).Infof("log-file mount option has provided") + + switch { + case dstrings.HasPrefix(userOpt, "log-file"): + klog.V(4).Infof("log-file mount option has provided") hasLogFile = true - break + + case dstrings.HasPrefix(userOpt, "log-level"): + klog.V(4).Infof("log-level mount option has provided") + hasLogLevel = true } + } // If logfile has not been provided, create driver specific log file. @@ -319,11 +377,14 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { // its own log based on PV + Pod log = path.Join(p, b.pod.Name+"-glusterfs.log") + // Use derived log file in gluster fuse mount + options = append(options, "log-file="+log) + } - // Use derived/provided log file in gluster fuse mount - options = append(options, "log-file="+log) - options = append(options, "log-level=ERROR") + if !hasLogLevel { + options = append(options, "log-level=ERROR") + } var addrlist []string if b.hosts == nil { @@ -357,7 +418,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { ip := addrlist[0] errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", mountOptions) if errs == nil { - glog.Infof("successfully mounted directory %s", dir) + klog.Infof("successfully mounted directory %s", dir) return nil } @@ -374,7 +435,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { } errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", noAutoMountOptions) if errs == nil { - glog.Infof("successfully mounted %s", dir) + klog.Infof("successfully mounted %s", dir) return nil } } @@ -393,15 +454,16 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { } -func getVolumeSource(spec *volume.Spec) (*v1.GlusterfsVolumeSource, bool, error) { +//getVolumeInfo returns 'path' and 'readonly' field values from the provided glusterfs spec. +func getVolumeInfo(spec *volume.Spec) (string, bool, error) { if spec.Volume != nil && spec.Volume.Glusterfs != nil { - return spec.Volume.Glusterfs, spec.Volume.Glusterfs.ReadOnly, nil + return spec.Volume.Glusterfs.Path, spec.Volume.Glusterfs.ReadOnly, nil + } else if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Glusterfs != nil { - return spec.PersistentVolume.Spec.Glusterfs, spec.ReadOnly, nil + return spec.PersistentVolume.Spec.Glusterfs.Path, spec.ReadOnly, nil } - - return nil, false, fmt.Errorf("Spec does not reference a Glusterfs volume type") + return "", false, fmt.Errorf("Spec does not reference a Glusterfs volume type") } func (plugin *glusterfsPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { @@ -433,6 +495,7 @@ type provisionerConfig struct { volumeOptions []string volumeNamePrefix string thinPoolSnapFactor float32 + customEpNamePrefix string } type glusterfsVolumeProvisioner struct { @@ -510,7 +573,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll } pvList, err := kubeClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { - glog.Error("failed to get existing persistent volumes") + klog.Error("failed to get existing persistent volumes") return err } @@ -524,21 +587,21 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll gidStr, ok := pv.Annotations[volutil.VolumeGidAnnotationKey] if !ok { - glog.Warningf("no GID found in pv %v", pvName) + klog.Warningf("no GID found in pv %v", pvName) continue } gid, err := convertGid(gidStr) if err != nil { - glog.Errorf("failed to parse gid %s: %v", gidStr, err) + klog.Errorf("failed to parse gid %s: %v", gidStr, err) continue } _, err = gidTable.Allocate(gid) if err == ErrConflict { - glog.Warningf("GID %v found in pv %v was already allocated", gid, pvName) + klog.Warningf("GID %v found in pv %v was already allocated", gid, pvName) } else if err != nil { - glog.Errorf("failed to store gid %v found in pv %v: %v", gid, pvName, err) + klog.Errorf("failed to store gid %v found in pv %v: %v", gid, pvName, err) return err } } @@ -614,7 +677,7 @@ func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) { } func (d *glusterfsVolumeDeleter) Delete() error { - glog.V(2).Infof("delete volume %s", d.glusterfsMounter.path) + klog.V(2).Infof("delete volume %s", d.glusterfsMounter.path) volumeName := d.glusterfsMounter.path volumeID, err := getVolumeID(d.spec, volumeName) @@ -633,11 +696,11 @@ func (d *glusterfsVolumeDeleter) Delete() error { } d.provisionerConfig = *cfg - glog.V(4).Infof("deleting volume %q", volumeID) + klog.V(4).Infof("deleting volume %q", volumeID) gid, exists, err := d.getGid() if err != nil { - glog.Error(err) + klog.Error(err) } else if exists { gidTable, err := d.plugin.getGidTable(class.Name, cfg.gidMin, cfg.gidMax) if err != nil { @@ -652,37 +715,37 @@ func (d *glusterfsVolumeDeleter) Delete() error { cli := gcli.NewClient(d.url, d.user, d.secretValue) if cli == nil { - glog.Errorf("failed to create glusterfs REST client") + klog.Errorf("failed to create glusterfs REST client") return fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed") } err = cli.VolumeDelete(volumeID) if err != nil { - glog.Errorf("failed to delete volume %s: %v", volumeName, err) + klog.Errorf("failed to delete volume %s: %v", volumeName, err) return err } - glog.V(2).Infof("volume %s deleted successfully", volumeName) + klog.V(2).Infof("volume %s deleted successfully", volumeName) //Deleter takes endpoint and namespace from pv spec. pvSpec := d.spec.Spec var dynamicEndpoint, dynamicNamespace string if pvSpec.ClaimRef == nil { - glog.Errorf("ClaimRef is nil") + klog.Errorf("ClaimRef is nil") return fmt.Errorf("ClaimRef is nil") } if pvSpec.ClaimRef.Namespace == "" { - glog.Errorf("namespace is nil") + klog.Errorf("namespace is nil") return fmt.Errorf("namespace is nil") } dynamicNamespace = pvSpec.ClaimRef.Namespace if pvSpec.Glusterfs.EndpointsName != "" { dynamicEndpoint = pvSpec.Glusterfs.EndpointsName } - glog.V(3).Infof("dynamic namespace and endpoint %v/%v", dynamicNamespace, dynamicEndpoint) + klog.V(3).Infof("dynamic namespace and endpoint %v/%v", dynamicNamespace, dynamicEndpoint) err = d.deleteEndpointService(dynamicNamespace, dynamicEndpoint) if err != nil { - glog.Errorf("failed to delete endpoint/service %v/%v: %v", dynamicNamespace, dynamicEndpoint, err) + klog.Errorf("failed to delete endpoint/service %v/%v: %v", dynamicNamespace, dynamicEndpoint, err) } else { - glog.V(1).Infof("endpoint %v/%v is deleted successfully ", dynamicNamespace, dynamicEndpoint) + klog.V(1).Infof("endpoint %v/%v is deleted successfully ", dynamicNamespace, dynamicEndpoint) } return nil } @@ -693,7 +756,7 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop } if p.options.PVC.Spec.Selector != nil { - glog.V(4).Infof("not able to parse your claim Selector") + klog.V(4).Infof("not able to parse your claim Selector") return nil, fmt.Errorf("not able to parse your claim Selector") } @@ -701,7 +764,7 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop return nil, fmt.Errorf("%s does not support block volume provisioning", p.plugin.GetPluginName()) } - glog.V(4).Infof("Provision VolumeOptions %v", p.options) + klog.V(4).Infof("Provision VolumeOptions %v", p.options) scName := v1helper.GetPersistentVolumeClaimClass(p.options.PVC) cfg, err := parseClassParameters(p.options.Parameters, p.plugin.host.GetKubeClient()) if err != nil { @@ -716,19 +779,19 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop gid, _, err := gidTable.AllocateNext() if err != nil { - glog.Errorf("failed to reserve GID from table: %v", err) + klog.Errorf("failed to reserve GID from table: %v", err) return nil, fmt.Errorf("failed to reserve GID from table: %v", err) } - glog.V(2).Infof("Allocated GID %d for PVC %s", gid, p.options.PVC.Name) + klog.V(2).Infof("Allocated GID %d for PVC %s", gid, p.options.PVC.Name) glusterfs, sizeGiB, volID, err := p.CreateVolume(gid) if err != nil { if releaseErr := gidTable.Release(gid); releaseErr != nil { - glog.Errorf("error when releasing GID in storageclass %s: %v", scName, releaseErr) + klog.Errorf("error when releasing GID in storageclass %s: %v", scName, releaseErr) } - glog.Errorf("failed to create volume: %v", err) + klog.Errorf("failed to create volume: %v", err) return nil, fmt.Errorf("failed to create volume: %v", err) } mode := v1.PersistentVolumeFilesystem @@ -759,9 +822,11 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop return pv, nil } -func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, volID string, err error) { +func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersistentVolumeSource, size int, volID string, err error) { var clusterIDs []string customVolumeName := "" + epServiceName := "" + capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] // GlusterFS/heketi creates volumes in units of GiB. @@ -769,20 +834,20 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum if err != nil { return nil, 0, "", err } - glog.V(2).Infof("create volume of size %dGiB", sz) + klog.V(2).Infof("create volume of size %dGiB", sz) if p.url == "" { - glog.Errorf("REST server endpoint is empty") + klog.Errorf("REST server endpoint is empty") return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty") } cli := gcli.NewClient(p.url, p.user, p.secretValue) if cli == nil { - glog.Errorf("failed to create glusterfs REST client") + klog.Errorf("failed to create glusterfs REST client") return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed") } if p.provisionerConfig.clusterID != "" { clusterIDs = dstrings.Split(p.clusterID, ",") - glog.V(4).Infof("provided clusterIDs %v", clusterIDs) + klog.V(4).Infof("provided clusterIDs %v", clusterIDs) } if p.provisionerConfig.volumeNamePrefix != "" { @@ -801,40 +866,45 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions, Snapshot: snaps} volume, err := cli.VolumeCreate(volumeReq) if err != nil { - glog.Errorf("failed to create volume: %v", err) + klog.Errorf("failed to create volume: %v", err) return nil, 0, "", fmt.Errorf("failed to create volume: %v", err) } - glog.V(1).Infof("volume with size %d and name %s created", volume.Size, volume.Name) + klog.V(1).Infof("volume with size %d and name %s created", volume.Size, volume.Name) volID = volume.Id dynamicHostIps, err := getClusterNodes(cli, volume.Cluster) if err != nil { - glog.Errorf("failed to get cluster nodes for volume %s: %v", volume, err) + klog.Errorf("failed to get cluster nodes for volume %s: %v", volume, err) return nil, 0, "", fmt.Errorf("failed to get cluster nodes for volume %s: %v", volume, err) } - // The 'endpointname' is created in form of 'glusterfs-dynamic-'. - // createEndpointService() checks for this 'endpoint' existence in PVC's namespace and - // If not found, it create an endpoint and service using the IPs we dynamically picked at time - // of volume creation. - epServiceName := dynamicEpSvcPrefix + p.options.PVC.Name + if len(p.provisionerConfig.customEpNamePrefix) == 0 { + epServiceName = string(p.options.PVC.UID) + } else { + epServiceName = p.provisionerConfig.customEpNamePrefix + "-" + string(p.options.PVC.UID) + } epNamespace := p.options.PVC.Namespace endpoint, service, err := p.createEndpointService(epNamespace, epServiceName, dynamicHostIps, p.options.PVC.Name) if err != nil { - glog.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err) + klog.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err) deleteErr := cli.VolumeDelete(volume.Id) if deleteErr != nil { - glog.Errorf("failed to delete volume: %v, manual deletion of the volume required", deleteErr) + klog.Errorf("failed to delete volume: %v, manual deletion of the volume required", deleteErr) } return nil, 0, "", fmt.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err) } - glog.V(3).Infof("dynamic endpoint %v and service %v ", endpoint, service) - return &v1.GlusterfsVolumeSource{ - EndpointsName: endpoint.Name, - Path: volume.Name, - ReadOnly: false, + klog.V(3).Infof("dynamic endpoint %v and service %v ", endpoint, service) + return &v1.GlusterfsPersistentVolumeSource{ + EndpointsName: endpoint.Name, + EndpointsNamespace: &epNamespace, + Path: volume.Name, + ReadOnly: false, }, sz, volID, nil } +// createEndpointService() makes sure an endpoint and service +// exist for the given namespace, PVC name, endpoint name, and +// set of IPs. I.e. the endpoint or service is only created +// if it does not exist yet. func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epServiceName string, hostips []string, pvcname string) (endpoint *v1.Endpoints, service *v1.Service, err error) { addrlist := make([]v1.EndpointAddress, len(hostips)) @@ -860,11 +930,11 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS } _, err = kubeClient.CoreV1().Endpoints(namespace).Create(endpoint) if err != nil && errors.IsAlreadyExists(err) { - glog.V(1).Infof("endpoint %s already exist in namespace %s", endpoint, namespace) + klog.V(1).Infof("endpoint %s already exist in namespace %s", endpoint, namespace) err = nil } if err != nil { - glog.Errorf("failed to create endpoint: %v", err) + klog.Errorf("failed to create endpoint: %v", err) return nil, nil, fmt.Errorf("failed to create endpoint: %v", err) } service = &v1.Service{ @@ -880,11 +950,11 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS {Protocol: "TCP", Port: 1}}}} _, err = kubeClient.CoreV1().Services(namespace).Create(service) if err != nil && errors.IsAlreadyExists(err) { - glog.V(1).Infof("service %s already exist in namespace %s", service, namespace) + klog.V(1).Infof("service %s already exist in namespace %s", service, namespace) err = nil } if err != nil { - glog.Errorf("failed to create service: %v", err) + klog.Errorf("failed to create service: %v", err) return nil, nil, fmt.Errorf("error creating service: %v", err) } return endpoint, service, nil @@ -897,10 +967,10 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi } err = kubeClient.CoreV1().Services(namespace).Delete(epServiceName, nil) if err != nil { - glog.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err) + klog.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err) return fmt.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err) } - glog.V(1).Infof("service/endpoint: %s/%s deleted successfully", namespace, epServiceName) + klog.V(1).Infof("service/endpoint: %s/%s deleted successfully", namespace, epServiceName) return nil } @@ -908,7 +978,7 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) { secretMap, err := volutil.GetSecretForPV(namespace, secretName, glusterfsPluginName, kubeClient) if err != nil { - glog.Errorf("failed to get secret: %s/%s: %v", namespace, secretName, err) + klog.Errorf("failed to get secret: %s/%s: %v", namespace, secretName, err) return "", fmt.Errorf("failed to get secret %s/%s: %v", namespace, secretName, err) } if len(secretMap) == 0 { @@ -930,7 +1000,7 @@ func parseSecret(namespace, secretName string, kubeClient clientset.Interface) ( func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string, err error) { clusterinfo, err := cli.ClusterInfo(cluster) if err != nil { - glog.Errorf("failed to get cluster details: %v", err) + klog.Errorf("failed to get cluster details: %v", err) return nil, fmt.Errorf("failed to get cluster details: %v", err) } @@ -940,15 +1010,15 @@ func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string, for _, node := range clusterinfo.Nodes { nodeInfo, err := cli.NodeInfo(string(node)) if err != nil { - glog.Errorf("failed to get host ipaddress: %v", err) + klog.Errorf("failed to get host ipaddress: %v", err) return nil, fmt.Errorf("failed to get host ipaddress: %v", err) } ipaddr := dstrings.Join(nodeInfo.NodeAddRequest.Hostnames.Storage, "") dynamicHostIps = append(dynamicHostIps, ipaddr) } - glog.V(3).Infof("host list :%v", dynamicHostIps) + klog.V(3).Infof("host list :%v", dynamicHostIps) if len(dynamicHostIps) == 0 { - glog.Errorf("no hosts found: %v", err) + klog.Errorf("no hosts found: %v", err) return nil, fmt.Errorf("no hosts found: %v", err) } return dynamicHostIps, nil @@ -960,6 +1030,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa var err error cfg.gidMin = defaultGidMin cfg.gidMax = defaultGidMax + cfg.customEpNamePrefix = dynamicEpSvcPrefix authEnabled := true parseVolumeType := "" @@ -1027,7 +1098,16 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa if len(v) != 0 { parseThinPoolSnapFactor = v } - + case "customepnameprefix": + // If the string has > 'maxCustomEpNamePrefixLen' chars, the final endpoint name will + // exceed the limitation of 63 chars, so fail if prefix is > 'maxCustomEpNamePrefixLen' + // characters. This is only applicable for 'customepnameprefix' string and default ep name + // string will always pass. + if len(v) <= maxCustomEpNamePrefixLen { + cfg.customEpNamePrefix = v + } else { + return nil, fmt.Errorf("'customepnameprefix' value should be < %d characters", maxCustomEpNamePrefixLen) + } default: return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, glusterfsPluginName) } @@ -1152,7 +1232,7 @@ func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error) { func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { pvSpec := spec.PersistentVolume.Spec volumeName := pvSpec.Glusterfs.Path - glog.V(2).Infof("Received request to expand volume %s", volumeName) + klog.V(2).Infof("Received request to expand volume %s", volumeName) volumeID, err := getVolumeID(spec.PersistentVolume, volumeName) if err != nil { @@ -1169,12 +1249,12 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res return oldSize, err } - glog.V(4).Infof("expanding volume: %q", volumeID) + klog.V(4).Infof("expanding volume: %q", volumeID) //Create REST server connection cli := gcli.NewClient(cfg.url, cfg.user, cfg.secretValue) if cli == nil { - glog.Errorf("failed to create glusterfs REST client") + klog.Errorf("failed to create glusterfs REST client") return oldSize, fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed") } @@ -1188,7 +1268,7 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res //Check the existing volume size currentVolumeInfo, err := cli.VolumeInfo(volumeID) if err != nil { - glog.Errorf("error when fetching details of volume %s: %v", volumeName, err) + klog.Errorf("error when fetching details of volume %s: %v", volumeName, err) return oldSize, err } @@ -1202,11 +1282,11 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res // Expand the volume volumeInfoRes, err := cli.VolumeExpand(volumeID, volumeExpandReq) if err != nil { - glog.Errorf("failed to expand volume %s: %v", volumeName, err) + klog.Errorf("failed to expand volume %s: %v", volumeName, err) return oldSize, err } - glog.V(2).Infof("volume %s expanded to new size %d successfully", volumeName, volumeInfoRes.Size) + klog.V(2).Infof("volume %s expanded to new size %d successfully", volumeName, volumeInfoRes.Size) newVolumeSize := resource.MustParse(fmt.Sprintf("%dGi", volumeInfoRes.Size)) return newVolumeSize, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_util.go index 83d8a13c7696..2b19bf709f8d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_util.go @@ -21,7 +21,7 @@ import ( "fmt" "os" - "github.com/golang/glog" + "k8s.io/klog" ) // readGlusterLog will take the last 2 lines of the log file @@ -34,7 +34,7 @@ func readGlusterLog(path string, podName string) error { var line2 string linecount := 0 - glog.Infof("failure, now attempting to read the gluster log for pod %s", podName) + klog.Infof("failure, now attempting to read the gluster log for pod %s", podName) // Check and make sure path exists if len(path) == 0 { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD index 1fb3b31bf4e9..1143c892e86c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD @@ -28,7 +28,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go index 82eccfd3d293..922bc31de6c6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go @@ -21,10 +21,10 @@ import ( "os" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" @@ -79,7 +79,7 @@ func (attacher *iscsiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName func (attacher *iscsiAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) { mounter, err := volumeSpecToMounter(spec, attacher.host, attacher.targetLocks, pod) if err != nil { - glog.Warningf("failed to get iscsi mounter: %v", err) + klog.Warningf("failed to get iscsi mounter: %v", err) return "", err } return attacher.manager.AttachDisk(*mounter) @@ -89,7 +89,7 @@ func (attacher *iscsiAttacher) GetDeviceMountPath( spec *volume.Spec) (string, error) { mounter, err := volumeSpecToMounter(spec, attacher.host, attacher.targetLocks, nil) if err != nil { - glog.Warningf("failed to get iscsi mounter: %v", err) + klog.Warningf("failed to get iscsi mounter: %v", err) return "", err } if mounter.InitiatorName != "" { @@ -137,6 +137,7 @@ type iscsiDetacher struct { host volume.VolumeHost mounter mount.Interface manager diskManager + plugin *iscsiPlugin } var _ volume.Detacher = &iscsiDetacher{} @@ -148,6 +149,7 @@ func (plugin *iscsiPlugin) NewDetacher() (volume.Detacher, error) { host: plugin.host, mounter: plugin.host.GetMounter(iscsiPluginName), manager: &ISCSIUtil{}, + plugin: plugin, }, nil } @@ -160,17 +162,17 @@ func (detacher *iscsiDetacher) Detach(volumeName string, nodeName types.NodeName } func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error { - unMounter := volumeSpecToUnmounter(detacher.mounter, detacher.host) + unMounter := volumeSpecToUnmounter(detacher.mounter, detacher.host, detacher.plugin) err := detacher.manager.DetachDisk(*unMounter, deviceMountPath) if err != nil { return fmt.Errorf("iscsi: failed to detach disk: %s\nError: %v", deviceMountPath, err) } - glog.V(4).Infof("iscsi: %q is unmounted, deleting the directory", deviceMountPath) + klog.V(4).Infof("iscsi: %q is unmounted, deleting the directory", deviceMountPath) err = os.RemoveAll(deviceMountPath) if err != nil { return fmt.Errorf("iscsi: failed to delete the directory: %s\nError: %v", deviceMountPath, err) } - glog.V(4).Infof("iscsi: successfully detached disk: %s", deviceMountPath) + klog.V(4).Infof("iscsi: successfully detached disk: %s", deviceMountPath) return nil } @@ -204,7 +206,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, targetLocks if err != nil { return nil, err } - glog.V(5).Infof("iscsi: VolumeSpecToMounter volumeMode %s", volumeMode) + klog.V(5).Infof("iscsi: VolumeSpecToMounter volumeMode %s", volumeMode) return &iscsiDiskMounter{ iscsiDisk: iscsiDisk, fsType: fsType, @@ -225,11 +227,11 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, targetLocks }, nil } -func volumeSpecToUnmounter(mounter mount.Interface, host volume.VolumeHost) *iscsiDiskUnmounter { +func volumeSpecToUnmounter(mounter mount.Interface, host volume.VolumeHost, plugin *iscsiPlugin) *iscsiDiskUnmounter { exec := host.GetExec(iscsiPluginName) return &iscsiDiskUnmounter{ iscsiDisk: &iscsiDisk{ - plugin: &iscsiPlugin{}, + plugin: plugin, }, mounter: mounter, exec: exec, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go index aa1caeaf99cd..aff2045d7fff 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go @@ -19,7 +19,7 @@ package iscsi import ( "os" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" @@ -43,7 +43,7 @@ type diskManager interface { func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter mount.Interface, fsGroup *int64) error { notMnt, err := mounter.IsLikelyNotMountPoint(volPath) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mountpoint: %s", volPath) + klog.Errorf("cannot validate mountpoint: %s", volPath) return err } if !notMnt { @@ -51,7 +51,7 @@ func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter } if err := os.MkdirAll(volPath, 0750); err != nil { - glog.Errorf("failed to mkdir:%s", volPath) + klog.Errorf("failed to mkdir:%s", volPath) return err } // Perform a bind mount to the full path to allow duplicate mounts of the same disk. @@ -67,25 +67,25 @@ func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter mountOptions := util.JoinMountOptions(b.mountOptions, options) err = mounter.Mount(globalPDPath, volPath, "", mountOptions) if err != nil { - glog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err) + klog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err) noMnt, mntErr := b.mounter.IsLikelyNotMountPoint(volPath) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !noMnt { if mntErr = b.mounter.Unmount(volPath); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } noMnt, mntErr = b.mounter.IsLikelyNotMountPoint(volPath) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !noMnt { // will most likely retry on next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", volPath) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", volPath) return err } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go index f7f2cc2b130d..f8612d9dc199 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go @@ -23,10 +23,10 @@ import ( "strconv" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" @@ -252,7 +252,7 @@ func (plugin *iscsiPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName if err != nil { return nil, err } - glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) + klog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) // Retrieve volume information from globalMapPathUUID // globalMapPathUUID example: // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} @@ -287,7 +287,7 @@ func (iscsi *iscsiDisk) GetPath() string { func (iscsi *iscsiDisk) iscsiGlobalMapPath(spec *volume.Spec) (string, error) { mounter, err := volumeSpecToMounter(spec, iscsi.plugin.host, iscsi.plugin.targetLocks, nil /* pod */) if err != nil { - glog.Warningf("failed to get iscsi mounter: %v", err) + klog.Warningf("failed to get iscsi mounter: %v", err) return "", err } return iscsi.manager.MakeGlobalVDPDName(*mounter.iscsiDisk), nil @@ -334,7 +334,7 @@ func (b *iscsiDiskMounter) SetUpAt(dir string, fsGroup *int64) error { // diskSetUp checks mountpoints and prevent repeated calls err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup) if err != nil { - glog.Errorf("iscsi: failed to setup") + klog.Errorf("iscsi: failed to setup") } return err } @@ -392,12 +392,12 @@ func (c *iscsiDiskUnmapper) TearDownDevice(mapPath, _ string) error { if err != nil { return fmt.Errorf("iscsi: failed to detach disk: %s\nError: %v", mapPath, err) } - glog.V(4).Infof("iscsi: %q is unmounted, deleting the directory", mapPath) + klog.V(4).Infof("iscsi: %q is unmounted, deleting the directory", mapPath) err = os.RemoveAll(mapPath) if err != nil { return fmt.Errorf("iscsi: failed to delete the directory: %s\nError: %v", mapPath, err) } - glog.V(4).Infof("iscsi: successfully detached disk: %s", mapPath) + klog.V(4).Infof("iscsi: successfully detached disk: %s", mapPath) return nil } @@ -582,7 +582,7 @@ func createSecretMap(spec *volume.Spec, plugin *iscsiPlugin, namespace string) ( } secret = make(map[string]string) for name, data := range secretObj.Data { - glog.V(4).Infof("retrieving CHAP secret name: %s", name) + klog.V(4).Infof("retrieving CHAP secret name: %s", name) secret[name] = string(data) } } @@ -649,7 +649,7 @@ func getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath string) (*volume.S ISCSIInterface: iface, }, ) - glog.V(5).Infof("ConstructBlockVolumeSpec: TargetPortal: %v, IQN: %v, Lun: %v, ISCSIInterface: %v", + klog.V(5).Infof("ConstructBlockVolumeSpec: TargetPortal: %v, IQN: %v, Lun: %v, ISCSIInterface: %v", iscsiPV.Spec.PersistentVolumeSource.ISCSI.TargetPortal, iscsiPV.Spec.PersistentVolumeSource.ISCSI.IQN, iscsiPV.Spec.PersistentVolumeSource.ISCSI.Lun, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go index dda88d03c6f5..ae9c7a70fc8e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go @@ -23,18 +23,34 @@ import ( "path" "path/filepath" "regexp" + "strconv" "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" - "strconv" +) + +const ( + // Minimum number of paths that the volume plugin considers enough when a multipath volume is requested. + minMultipathCount = 2 + + // Minimal number of attempts to attach all paths of a multipath volumes. If at least minMultipathCount paths + // are available after this nr. of attempts, the volume plugin continues with mounting the volume. + minAttachAttempts = 2 + + // Total number of attempts to attach at least minMultipathCount paths. If there are less than minMultipathCount, + // the volume plugin tries to attach the remaining paths at least this number of times in total. After + // maxAttachAttempts attempts, it mounts even a single path. + maxAttachAttempts = 5 + + // How many seconds to wait for a multipath device if at least two paths are available. + multipathDeviceTimeout = 10 ) var ( @@ -237,7 +253,7 @@ func scanOneLun(hostNumber int, lunNumber int) error { return fmt.Errorf("No data written to file: %s", filename) } - glog.V(3).Infof("Scanned SCSI host %d LUN %d", hostNumber, lunNumber) + klog.V(3).Infof("Scanned SCSI host %d LUN %d", hostNumber, lunNumber) return nil } @@ -269,13 +285,13 @@ func waitForMultiPathToExist(devicePaths []string, maxRetries int, deviceUtil vo // AttachDisk returns devicePath of volume if attach succeeded otherwise returns error func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { var devicePath string - var devicePaths []string + devicePaths := map[string]string{} var iscsiTransport string var lastErr error out, err := b.exec.Run("iscsiadm", "-m", "iface", "-I", b.Iface, "-o", "show") if err != nil { - glog.Errorf("iscsi: could not read iface %s error: %s", b.Iface, string(out)) + klog.Errorf("iscsi: could not read iface %s error: %s", b.Iface, string(out)) return "", err } @@ -289,7 +305,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { newIface := bkpPortal[0] + ":" + b.VolName err = cloneIface(b, newIface) if err != nil { - glog.Errorf("iscsi: failed to clone iface: %s error: %v", b.Iface, err) + klog.Errorf("iscsi: failed to clone iface: %s error: %v", b.Iface, err) return "", err } // update iface name @@ -307,123 +323,150 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { if err != nil { return "", err } - glog.V(4).Infof("AttachDisk portal->host map for %s is %v", b.Iqn, portalHostMap) + klog.V(4).Infof("AttachDisk portal->host map for %s is %v", b.Iqn, portalHostMap) - for _, tp := range bkpPortal { - hostNumber, loggedIn := portalHostMap[tp] - if !loggedIn { - glog.V(4).Infof("Could not get SCSI host number for portal %s, will attempt login", tp) - - // build discoverydb and discover iscsi target - b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "new") - // update discoverydb with CHAP secret - err = updateISCSIDiscoverydb(b, tp) - if err != nil { - lastErr = fmt.Errorf("iscsi: failed to update discoverydb to portal %s error: %v", tp, err) + for i := 1; i <= maxAttachAttempts; i++ { + for _, tp := range bkpPortal { + if _, found := devicePaths[tp]; found { + klog.V(4).Infof("Device for portal %q already known", tp) continue } - out, err = b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "--discover") - if err != nil { - // delete discoverydb record - b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "delete") - lastErr = fmt.Errorf("iscsi: failed to sendtargets to portal %s output: %s, err %v", tp, string(out), err) - continue - } - err = updateISCSINode(b, tp) - if err != nil { - // failure to update node db is rare. But deleting record will likely impact those who already start using it. - lastErr = fmt.Errorf("iscsi: failed to update iscsi node to portal %s error: %v", tp, err) - continue - } - // login to iscsi target - out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-I", b.Iface, "--login") - if err != nil { - // delete the node record from database - b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-I", b.Iface, "-T", b.Iqn, "-o", "delete") - lastErr = fmt.Errorf("iscsi: failed to attach disk: Error: %s (%v)", string(out), err) - continue + + hostNumber, loggedIn := portalHostMap[tp] + if !loggedIn { + klog.V(4).Infof("Could not get SCSI host number for portal %s, will attempt login", tp) + + // build discoverydb and discover iscsi target + b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "new") + + // update discoverydb with CHAP secret + err = updateISCSIDiscoverydb(b, tp) + if err != nil { + lastErr = fmt.Errorf("iscsi: failed to update discoverydb to portal %s error: %v", tp, err) + continue + } + + out, err = b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "--discover") + if err != nil { + // delete discoverydb record + b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "delete") + lastErr = fmt.Errorf("iscsi: failed to sendtargets to portal %s output: %s, err %v", tp, string(out), err) + continue + } + + err = updateISCSINode(b, tp) + if err != nil { + // failure to update node db is rare. But deleting record will likely impact those who already start using it. + lastErr = fmt.Errorf("iscsi: failed to update iscsi node to portal %s error: %v", tp, err) + continue + } + + // login to iscsi target + out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-I", b.Iface, "--login") + if err != nil { + // delete the node record from database + b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-I", b.Iface, "-T", b.Iqn, "-o", "delete") + lastErr = fmt.Errorf("iscsi: failed to attach disk: Error: %s (%v)", string(out), err) + continue + } + + // in case of node failure/restart, explicitly set to manual login so it doesn't hang on boot + out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-o", "update", "-n", "node.startup", "-v", "manual") + if err != nil { + // don't fail if we can't set startup mode, but log warning so there is a clue + klog.Warningf("Warning: Failed to set iSCSI login mode to manual. Error: %v", err) + } + + // Rebuild the host map after logging in + portalHostMap, err := b.deviceUtil.GetISCSIPortalHostMapForTarget(b.Iqn) + if err != nil { + return "", err + } + klog.V(6).Infof("AttachDisk portal->host map for %s is %v", b.Iqn, portalHostMap) + + hostNumber, loggedIn = portalHostMap[tp] + if !loggedIn { + klog.Warningf("Could not get SCSI host number for portal %s after logging in", tp) + continue + } } - // in case of node failure/restart, explicitly set to manual login so it doesn't hang on boot - out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-o", "update", "-n", "node.startup", "-v", "manual") + + klog.V(5).Infof("AttachDisk: scanning SCSI host %d LUN %s", hostNumber, b.Lun) + lunNumber, err := strconv.Atoi(b.Lun) if err != nil { - // don't fail if we can't set startup mode, but log warning so there is a clue - glog.Warningf("Warning: Failed to set iSCSI login mode to manual. Error: %v", err) + return "", fmt.Errorf("AttachDisk: lun is not a number: %s\nError: %v", b.Lun, err) } - // Rebuild the host map after logging in - portalHostMap, err := b.deviceUtil.GetISCSIPortalHostMapForTarget(b.Iqn) + // Scan the iSCSI bus for the LUN + err = scanOneLun(hostNumber, lunNumber) if err != nil { return "", err } - glog.V(6).Infof("AttachDisk portal->host map for %s is %v", b.Iqn, portalHostMap) - hostNumber, loggedIn = portalHostMap[tp] - if !loggedIn { - glog.Warningf("Could not get SCSI host number for portal %s after logging in", tp) - continue + if iscsiTransport == "" { + klog.Errorf("iscsi: could not find transport name in iface %s", b.Iface) + return "", fmt.Errorf("Could not parse iface file for %s", b.Iface) + } + if iscsiTransport == "tcp" { + devicePath = strings.Join([]string{"/dev/disk/by-path/ip", tp, "iscsi", b.Iqn, "lun", b.Lun}, "-") + } else { + devicePath = strings.Join([]string{"/dev/disk/by-path/pci", "*", "ip", tp, "iscsi", b.Iqn, "lun", b.Lun}, "-") } - } - - glog.V(5).Infof("AttachDisk: scanning SCSI host %d LUN %s", hostNumber, b.Lun) - lunNumber, err := strconv.Atoi(b.Lun) - if err != nil { - return "", fmt.Errorf("AttachDisk: lun is not a number: %s\nError: %v", b.Lun, err) - } - - // Scan the iSCSI bus for the LUN - err = scanOneLun(hostNumber, lunNumber) - if err != nil { - return "", err - } - if iscsiTransport == "" { - glog.Errorf("iscsi: could not find transport name in iface %s", b.Iface) - return "", fmt.Errorf("Could not parse iface file for %s", b.Iface) + if exist := waitForPathToExist(&devicePath, multipathDeviceTimeout, iscsiTransport); !exist { + klog.Errorf("Could not attach disk: Timeout after 10s") + // update last error + lastErr = fmt.Errorf("Could not attach disk: Timeout after 10s") + continue + } else { + devicePaths[tp] = devicePath + } } - if iscsiTransport == "tcp" { - devicePath = strings.Join([]string{"/dev/disk/by-path/ip", tp, "iscsi", b.Iqn, "lun", b.Lun}, "-") - } else { - devicePath = strings.Join([]string{"/dev/disk/by-path/pci", "*", "ip", tp, "iscsi", b.Iqn, "lun", b.Lun}, "-") + klog.V(4).Infof("iscsi: tried all devices for %q %d times, %d paths found", b.Iqn, i, len(devicePaths)) + if len(devicePaths) == 0 { + // No path attached, report error and stop trying. kubelet will try again in a short while + // delete cloned iface + b.exec.Run("iscsiadm", "-m", "iface", "-I", b.Iface, "-o", "delete") + klog.Errorf("iscsi: failed to get any path for iscsi disk, last err seen:\n%v", lastErr) + return "", fmt.Errorf("failed to get any path for iscsi disk, last err seen:\n%v", lastErr) + } + if len(devicePaths) == len(bkpPortal) { + // We have all paths + klog.V(4).Infof("iscsi: all devices for %q found", b.Iqn) + break } - - if exist := waitForPathToExist(&devicePath, 10, iscsiTransport); !exist { - glog.Errorf("Could not attach disk: Timeout after 10s") - // update last error - lastErr = fmt.Errorf("Could not attach disk: Timeout after 10s") - continue - } else { - devicePaths = append(devicePaths, devicePath) + if len(devicePaths) >= minMultipathCount && i >= minAttachAttempts { + // We have at least two paths for multipath and we tried the other paths long enough + klog.V(4).Infof("%d devices found for %q", len(devicePaths), b.Iqn) + break } } - if len(devicePaths) == 0 { - // delete cloned iface - b.exec.Run("iscsiadm", "-m", "iface", "-I", b.Iface, "-o", "delete") - glog.Errorf("iscsi: failed to get any path for iscsi disk, last err seen:\n%v", lastErr) - return "", fmt.Errorf("failed to get any path for iscsi disk, last err seen:\n%v", lastErr) - } if lastErr != nil { - glog.Errorf("iscsi: last error occurred during iscsi init:\n%v", lastErr) + klog.Errorf("iscsi: last error occurred during iscsi init:\n%v", lastErr) } + devicePathList := []string{} + for _, path := range devicePaths { + devicePathList = append(devicePathList, path) + } // Try to find a multipath device for the volume - if 1 < len(bkpPortal) { - // If the PV has 2 or more portals, wait up to 10 seconds for the multipath - // device to appear - devicePath = waitForMultiPathToExist(devicePaths, 10, b.deviceUtil) + if len(bkpPortal) > 1 { + // Multipath volume was requested. Wait up to 10 seconds for the multipath device to appear. + devicePath = waitForMultiPathToExist(devicePathList, 10, b.deviceUtil) } else { // For PVs with 1 portal, just try one time to find the multipath device. This // avoids a long pause when the multipath device will never get created, and // matches legacy behavior. - devicePath = waitForMultiPathToExist(devicePaths, 1, b.deviceUtil) + devicePath = waitForMultiPathToExist(devicePathList, 1, b.deviceUtil) } // When no multipath device is found, just use the first (and presumably only) device if devicePath == "" { - devicePath = devicePaths[0] + devicePath = devicePathList[0] } - glog.V(5).Infof("iscsi: AttachDisk devicePath: %s", devicePath) + klog.V(5).Infof("iscsi: AttachDisk devicePath: %s", devicePath) // run global mount path related operations based on volumeMode return globalPDPathOperation(b)(b, devicePath, util) } @@ -436,14 +479,14 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { func globalPDPathOperation(b iscsiDiskMounter) func(iscsiDiskMounter, string, *ISCSIUtil) (string, error) { // TODO: remove feature gate check after no longer needed if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - glog.V(5).Infof("iscsi: AttachDisk volumeMode: %s", b.volumeMode) + klog.V(5).Infof("iscsi: AttachDisk volumeMode: %s", b.volumeMode) if b.volumeMode == v1.PersistentVolumeBlock { // If the volumeMode is 'Block', plugin don't need to format the volume. return func(b iscsiDiskMounter, devicePath string, util *ISCSIUtil) (string, error) { globalPDPath := b.manager.MakeGlobalVDPDName(*b.iscsiDisk) // Create dir like /var/lib/kubelet/plugins/kubernetes.io/iscsi/volumeDevices/{ifaceName}/{portal-some_iqn-lun-lun_id} if err := os.MkdirAll(globalPDPath, 0750); err != nil { - glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath) + klog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath) return "", err } // Persist iscsi disk config to json file for DetachDisk path @@ -463,12 +506,12 @@ func globalPDPathOperation(b iscsiDiskMounter) func(iscsiDiskMounter, string, *I } // Return confirmed devicePath to caller if !notMnt { - glog.Infof("iscsi: %s already mounted", globalPDPath) + klog.Infof("iscsi: %s already mounted", globalPDPath) return devicePath, nil } // Create dir like /var/lib/kubelet/plugins/kubernetes.io/iscsi/{ifaceName}/{portal-some_iqn-lun-lun_id} if err := os.MkdirAll(globalPDPath, 0750); err != nil { - glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath) + klog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath) return "", err } // Persist iscsi disk config to json file for DetachDisk path @@ -476,7 +519,7 @@ func globalPDPathOperation(b iscsiDiskMounter) func(iscsiDiskMounter, string, *I err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil) if err != nil { - glog.Errorf("iscsi: failed to mount iscsi volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err) + klog.Errorf("iscsi: failed to mount iscsi volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err) } return devicePath, nil @@ -498,7 +541,7 @@ func deleteDevice(deviceName string) error { } else if 0 == written { return fmt.Errorf("No data written to file: %s", filename) } - glog.V(4).Infof("Deleted block device: %s", deviceName) + klog.V(4).Infof("Deleted block device: %s", deviceName) return nil } @@ -507,13 +550,13 @@ func deleteDevice(deviceName string) error { func deleteDevices(c iscsiDiskUnmounter) error { lunNumber, err := strconv.Atoi(c.iscsiDisk.Lun) if err != nil { - glog.Errorf("iscsi delete devices: lun is not a number: %s\nError: %v", c.iscsiDisk.Lun, err) + klog.Errorf("iscsi delete devices: lun is not a number: %s\nError: %v", c.iscsiDisk.Lun, err) return err } // Enumerate the devices so we can delete them deviceNames, err := c.deviceUtil.FindDevicesForISCSILun(c.iscsiDisk.Iqn, lunNumber) if err != nil { - glog.Errorf("iscsi delete devices: could not get devices associated with LUN %d on target %s\nError: %v", + klog.Errorf("iscsi delete devices: could not get devices associated with LUN %d on target %s\nError: %v", lunNumber, c.iscsiDisk.Iqn, err) return err } @@ -530,15 +573,15 @@ func deleteDevices(c iscsiDiskUnmounter) error { for mpathDevice := range mpathDevices { _, err = c.exec.Run("multipath", "-f", mpathDevice) if err != nil { - glog.Warningf("Warning: Failed to flush multipath device map: %s\nError: %v", mpathDevice, err) + klog.Warningf("Warning: Failed to flush multipath device map: %s\nError: %v", mpathDevice, err) // Fall through -- keep deleting the block devices } - glog.V(4).Infof("Flushed multipath device: %s", mpathDevice) + klog.V(4).Infof("Flushed multipath device: %s", mpathDevice) } for _, deviceName := range deviceNames { err = deleteDevice(deviceName) if err != nil { - glog.Warningf("Warning: Failed to delete block device: %s\nError: %v", deviceName, err) + klog.Warningf("Warning: Failed to delete block device: %s\nError: %v", deviceName, err) // Fall through -- keep deleting other block devices } } @@ -550,13 +593,21 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { if pathExists, pathErr := volumeutil.PathExists(mntPath); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mntPath) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", mntPath) return nil } - if err := c.mounter.Unmount(mntPath); err != nil { - glog.Errorf("iscsi detach disk: failed to unmount: %s\nError: %v", mntPath, err) + + notMnt, err := c.mounter.IsLikelyNotMountPoint(mntPath) + if err != nil { return err } + if !notMnt { + if err := c.mounter.Unmount(mntPath); err != nil { + klog.Errorf("iscsi detach disk: failed to unmount: %s\nError: %v", mntPath, err) + return err + } + } + // if device is no longer used, see if need to logout the target device, prefix, err := extractDeviceAndPrefix(mntPath) if err != nil { @@ -588,7 +639,7 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { // Delete all the scsi devices and any multipath devices after unmounting if err = deleteDevices(c); err != nil { - glog.Warningf("iscsi detach disk: failed to delete devices\nError: %v", err) + klog.Warningf("iscsi detach disk: failed to delete devices\nError: %v", err) // Fall through -- even if deleting fails, a logout may fix problems } @@ -619,7 +670,7 @@ func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string) if pathExists, pathErr := volumeutil.PathExists(mapPath); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath) + klog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath) return nil } // If we arrive here, device is no longer used, see if need to logout the target @@ -660,7 +711,7 @@ func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string) } devicePath := getDevByPath(portals[0], iqn, lun) - glog.V(5).Infof("iscsi: devicePath: %s", devicePath) + klog.V(5).Infof("iscsi: devicePath: %s", devicePath) if _, err = os.Stat(devicePath); err != nil { return fmt.Errorf("failed to validate devicePath: %s", devicePath) } @@ -668,32 +719,11 @@ func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string) if mappedDevicePath := c.deviceUtil.FindMultipathDeviceForDevice(devicePath); mappedDevicePath != "" { devicePath = mappedDevicePath } - // Get loopback device which takes fd lock for devicePath before detaching a volume from node. - // TODO: This is a workaround for issue #54108 - // Currently local attach plugins such as FC, iSCSI, RBD can't obtain devicePath during - // GenerateUnmapDeviceFunc() in operation_generator. As a result, these plugins fail to get - // and remove loopback device then it will be remained on kubelet node. To avoid the problem, - // local attach plugins needs to remove loopback device during TearDownDevice(). - blkUtil := volumepathhandler.NewBlockVolumePathHandler() - loop, err := volumepathhandler.BlockVolumePathHandler.GetLoopDevice(blkUtil, devicePath) - if err != nil { - if err.Error() != volumepathhandler.ErrDeviceNotFound { - return fmt.Errorf("failed to get loopback for device: %v, err: %v", devicePath, err) - } - glog.Warningf("iscsi: loopback for device: %s not found", device) - } // Detach a volume from kubelet node err = util.detachISCSIDisk(c.exec, portals, iqn, iface, volName, initiatorName, found) if err != nil { return fmt.Errorf("failed to finish detachISCSIDisk, err: %v", err) } - if len(loop) != 0 { - // The volume was successfully detached from node. We can safely remove the loopback. - err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop) - if err != nil { - return fmt.Errorf("failed to remove loopback :%v, err: %v", loop, err) - } - } return nil } @@ -705,16 +735,16 @@ func (util *ISCSIUtil) detachISCSIDisk(exec mount.Exec, portals []string, iqn, i logoutArgs = append(logoutArgs, []string{"-I", iface}...) deleteArgs = append(deleteArgs, []string{"-I", iface}...) } - glog.Infof("iscsi: log out target %s iqn %s iface %s", portal, iqn, iface) + klog.Infof("iscsi: log out target %s iqn %s iface %s", portal, iqn, iface) out, err := exec.Run("iscsiadm", logoutArgs...) if err != nil { - glog.Errorf("iscsi: failed to detach disk Error: %s", string(out)) + klog.Errorf("iscsi: failed to detach disk Error: %s", string(out)) } // Delete the node record - glog.Infof("iscsi: delete node record target %s iqn %s", portal, iqn) + klog.Infof("iscsi: delete node record target %s iqn %s", portal, iqn) out, err = exec.Run("iscsiadm", deleteArgs...) if err != nil { - glog.Errorf("iscsi: failed to delete node record Error: %s", string(out)) + klog.Errorf("iscsi: failed to delete node record Error: %s", string(out)) } } // Delete the iface after all sessions have logged out @@ -723,7 +753,7 @@ func (util *ISCSIUtil) detachISCSIDisk(exec mount.Exec, portals []string, iqn, i deleteArgs := []string{"-m", "iface", "-I", iface, "-o", "delete"} out, err := exec.Run("iscsiadm", deleteArgs...) if err != nil { - glog.Errorf("iscsi: failed to delete iface Error: %s", string(out)) + klog.Errorf("iscsi: failed to delete iface Error: %s", string(out)) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/BUILD index 8fe054c70bb7..a17a11a58bc5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/BUILD @@ -20,7 +20,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go index ca4b53759167..45249313424c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go @@ -23,7 +23,7 @@ import ( "runtime" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -87,7 +87,7 @@ func (plugin *localVolumePlugin) RequiresRemount() bool { } func (plugin *localVolumePlugin) SupportsMountOption() bool { - return false + return true } func (plugin *localVolumePlugin) SupportsBulkVolumeVerification() bool { @@ -110,7 +110,7 @@ func getVolumeSource(spec *volume.Spec) (*v1.LocalVolumeSource, bool, error) { } func (plugin *localVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { - volumeSource, readOnly, err := getVolumeSource(spec) + _, readOnly, err := getVolumeSource(spec) if err != nil { return nil, err } @@ -128,9 +128,10 @@ func (plugin *localVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ vo mounter: plugin.host.GetMounter(plugin.GetPluginName()), plugin: plugin, globalPath: globalLocalPath, - MetricsProvider: volume.NewMetricsStatFS(volumeSource.Path), + MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(pod.UID, stringsutil.EscapeQualifiedNameForDisk(localVolumePluginName), spec.Name())), }, - readOnly: readOnly, + mountOptions: util.MountOptionFromSpec(spec), + readOnly: readOnly, }, nil } @@ -256,7 +257,7 @@ func (plugin *localVolumePlugin) NewDeviceMounter() (volume.DeviceMounter, error } func (dm *deviceMounter) mountLocalBlockDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error { - glog.V(4).Infof("local: mounting device %s to %s", devicePath, deviceMountPath) + klog.V(4).Infof("local: mounting device %s to %s", devicePath, deviceMountPath) notMnt, err := dm.mounter.IsLikelyNotMountPoint(deviceMountPath) if err != nil { if os.IsNotExist(err) { @@ -290,7 +291,7 @@ func (dm *deviceMounter) mountLocalBlockDevice(spec *volume.Spec, devicePath str os.Remove(deviceMountPath) return fmt.Errorf("local: failed to mount device %s at %s (fstype: %s), error %v", devicePath, deviceMountPath, fstype, err) } - glog.V(3).Infof("local: successfully mount device %s at %s (fstype: %s)", devicePath, deviceMountPath, fstype) + klog.V(3).Infof("local: successfully mount device %s at %s (fstype: %s)", devicePath, deviceMountPath, fstype) return nil } @@ -321,10 +322,9 @@ func getVolumeSourceFSType(spec *volume.Spec) (string, error) { spec.PersistentVolume.Spec.Local != nil { if spec.PersistentVolume.Spec.Local.FSType != nil { return *spec.PersistentVolume.Spec.Local.FSType, nil - } else { - // if the FSType is not set in local PV spec, setting it to default ("ext4") - return defaultFSType, nil } + // if the FSType is not set in local PV spec, setting it to default ("ext4") + return defaultFSType, nil } return "", fmt.Errorf("spec does not reference a Local volume type") @@ -393,7 +393,8 @@ func (l *localVolume) GetPath() string { type localVolumeMounter struct { *localVolume - readOnly bool + readOnly bool + mountOptions []string } var _ volume.Mounter = &localVolumeMounter{} @@ -433,9 +434,9 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } notMnt, err := m.mounter.IsNotMountPoint(dir) - glog.V(4).Infof("LocalVolume mount setup: PodDir(%s) VolDir(%s) Mounted(%t) Error(%v), ReadOnly(%t)", dir, m.globalPath, !notMnt, err, m.readOnly) + klog.V(4).Infof("LocalVolume mount setup: PodDir(%s) VolDir(%s) Mounted(%t) Error(%v), ReadOnly(%t)", dir, m.globalPath, !notMnt, err, m.readOnly) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mount point: %s %v", dir, err) + klog.Errorf("cannot validate mount point: %s %v", dir, err) return err } @@ -445,7 +446,7 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { refs, err := m.mounter.GetMountRefs(m.globalPath) if fsGroup != nil { if err != nil { - glog.Errorf("cannot collect mounting information: %s %v", m.globalPath, err) + klog.Errorf("cannot collect mounting information: %s %v", m.globalPath, err) return err } @@ -467,7 +468,7 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { if runtime.GOOS != "windows" { // skip below MkdirAll for windows since the "bind mount" logic is implemented differently in mount_wiondows.go if err := os.MkdirAll(dir, 0750); err != nil { - glog.Errorf("mkdir failed on disk %s (%v)", dir, err) + klog.Errorf("mkdir failed on disk %s (%v)", dir, err) return err } } @@ -476,30 +477,31 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { if m.readOnly { options = append(options, "ro") } + mountOptions := util.JoinMountOptions(options, m.mountOptions) - glog.V(4).Infof("attempting to mount %s", dir) + klog.V(4).Infof("attempting to mount %s", dir) globalPath := util.MakeAbsolutePath(runtime.GOOS, m.globalPath) - err = m.mounter.Mount(globalPath, dir, "", options) + err = m.mounter.Mount(globalPath, dir, "", mountOptions) if err != nil { - glog.Errorf("Mount of volume %s failed: %v", dir, err) + klog.Errorf("Mount of volume %s failed: %v", dir, err) notMnt, mntErr := m.mounter.IsNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsNotMountPoint check failed: %v", mntErr) return err } if !notMnt { if mntErr = m.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notMnt, mntErr = m.mounter.IsNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsNotMountPoint check failed: %v", mntErr) return err } if !notMnt { // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) return err } } @@ -539,7 +541,7 @@ func (u *localVolumeUnmounter) TearDown() error { // TearDownAt unmounts the bind mount func (u *localVolumeUnmounter) TearDownAt(dir string) error { - glog.V(4).Infof("Unmounting volume %q at path %q\n", u.volName, dir) + klog.V(4).Infof("Unmounting volume %q at path %q\n", u.volName, dir) return util.UnmountMountPoint(dir, u.mounter, true) /* extensiveMountPointCheck = true */ } @@ -554,7 +556,7 @@ var _ volume.BlockVolumeMapper = &localVolumeMapper{} // SetUpDevice provides physical device path for the local PV. func (m *localVolumeMapper) SetUpDevice() (string, error) { globalPath := util.MakeAbsolutePath(runtime.GOOS, m.globalPath) - glog.V(4).Infof("SetupDevice returning path %s", globalPath) + klog.V(4).Infof("SetupDevice returning path %s", globalPath) return globalPath, nil } @@ -570,8 +572,8 @@ type localVolumeUnmapper struct { var _ volume.BlockVolumeUnmapper = &localVolumeUnmapper{} // TearDownDevice will undo SetUpDevice procedure. In local PV, all of this already handled by operation_generator. -func (u *localVolumeUnmapper) TearDownDevice(mapPath, devicePath string) error { - glog.V(4).Infof("local: TearDownDevice completed for: %s", mapPath) +func (u *localVolumeUnmapper) TearDownDevice(mapPath, _ string) error { + klog.V(4).Infof("local: TearDownDevice completed for: %s", mapPath) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/BUILD index 81fae48e2a26..28885d7fb4f9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/nfs.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/nfs.go index 588687cdebb6..92384b3db013 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/nfs.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/nfs/nfs.go @@ -21,10 +21,10 @@ import ( "os" "runtime" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -235,7 +235,7 @@ func (b *nfsMounter) SetUp(fsGroup *int64) error { func (b *nfsMounter) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := b.mounter.IsNotMountPoint(dir) - glog.V(4).Infof("NFS mount set up: %s %v %v", dir, !notMnt, err) + klog.V(4).Infof("NFS mount set up: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { return err } @@ -255,22 +255,22 @@ func (b *nfsMounter) SetUpAt(dir string, fsGroup *int64) error { if err != nil { notMnt, mntErr := b.mounter.IsNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsNotMountPoint check failed: %v", mntErr) return err } if !notMnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notMnt, mntErr := b.mounter.IsNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsNotMountPoint check failed: %v", mntErr) return err } if !notMnt { // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) return err } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/noop_expandable_plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/noop_expandable_plugin.go new file mode 100644 index 000000000000..3d3d5e1dfd75 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/noop_expandable_plugin.go @@ -0,0 +1,77 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volume + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/types" +) + +type noopExpandableVolumePluginInstance struct { + spec *Spec +} + +var _ ExpandableVolumePlugin = &noopExpandableVolumePluginInstance{} + +func (n *noopExpandableVolumePluginInstance) ExpandVolumeDevice(spec *Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { + return newSize, nil +} + +func (n *noopExpandableVolumePluginInstance) Init(host VolumeHost) error { + return nil +} + +func (n *noopExpandableVolumePluginInstance) GetPluginName() string { + return n.spec.KubeletExpandablePluginName() +} + +func (n *noopExpandableVolumePluginInstance) GetVolumeName(spec *Spec) (string, error) { + return n.spec.Name(), nil +} + +func (n *noopExpandableVolumePluginInstance) CanSupport(spec *Spec) bool { + return true +} + +func (n *noopExpandableVolumePluginInstance) RequiresRemount() bool { + return false +} + +func (n *noopExpandableVolumePluginInstance) NewMounter(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (Mounter, error) { + return nil, nil +} + +func (n *noopExpandableVolumePluginInstance) NewUnmounter(name string, podUID types.UID) (Unmounter, error) { + return nil, nil +} + +func (n *noopExpandableVolumePluginInstance) ConstructVolumeSpec(volumeName, mountPath string) (*Spec, error) { + return n.spec, nil +} + +func (n *noopExpandableVolumePluginInstance) SupportsMountOption() bool { + return true +} + +func (n *noopExpandableVolumePluginInstance) SupportsBulkVolumeVerification() bool { + return false +} + +func (n *noopExpandableVolumePluginInstance) RequiresFSResize() bool { + return true +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/BUILD index d0b5870a5b78..61d5467e4dfe 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/BUILD @@ -15,7 +15,6 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/volume/photon_pd", deps = [ - "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/photon:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", @@ -25,7 +24,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -44,7 +44,7 @@ go_test( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/attacher.go index 016fd65b8391..d2570a4a7430 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/attacher.go @@ -25,9 +25,9 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/photon" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" @@ -50,7 +50,7 @@ var _ volume.DeviceMountableVolumePlugin = &photonPersistentDiskPlugin{} func (plugin *photonPersistentDiskPlugin) NewAttacher() (volume.Attacher, error) { photonCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) if err != nil { - glog.Errorf("Photon Controller attacher: NewAttacher failed to get cloud provider") + klog.Errorf("Photon Controller attacher: NewAttacher failed to get cloud provider") return nil, err } @@ -74,22 +74,22 @@ func (attacher *photonPersistentDiskAttacher) Attach(spec *volume.Spec, nodeName hostName := string(nodeName) volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Photon Controller attacher: Attach failed to get volume source") + klog.Errorf("Photon Controller attacher: Attach failed to get volume source") return "", err } attached, err := attacher.photonDisks.DiskIsAttached(context.TODO(), volumeSource.PdID, nodeName) if err != nil { - glog.Warningf("Photon Controller: couldn't check if disk is Attached for host %s, will try attach disk: %+v", hostName, err) + klog.Warningf("Photon Controller: couldn't check if disk is Attached for host %s, will try attach disk: %+v", hostName, err) attached = false } if !attached { - glog.V(4).Infof("Photon Controller: Attach disk called for host %s", hostName) + klog.V(4).Infof("Photon Controller: Attach disk called for host %s", hostName) err = attacher.photonDisks.AttachDisk(context.TODO(), volumeSource.PdID, nodeName) if err != nil { - glog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.PdID, nodeName, err) + klog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.PdID, nodeName, err) return "", err } } @@ -105,7 +105,7 @@ func (attacher *photonPersistentDiskAttacher) VolumesAreAttached(specs []*volume for _, spec := range specs { volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) + klog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) continue } @@ -115,7 +115,7 @@ func (attacher *photonPersistentDiskAttacher) VolumesAreAttached(specs []*volume } attachedResult, err := attacher.photonDisks.DisksAreAttached(context.TODO(), pdIDList, nodeName) if err != nil { - glog.Errorf( + klog.Errorf( "Error checking if volumes (%v) are attached to current node (%q). err=%v", pdIDList, nodeName, err) return volumesAttachedCheck, err @@ -125,7 +125,7 @@ func (attacher *photonPersistentDiskAttacher) VolumesAreAttached(specs []*volume if !attached { spec := volumeSpecMap[pdID] volumesAttachedCheck[spec] = false - glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", pdID, spec.Name()) + klog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", pdID, spec.Name()) } } return volumesAttachedCheck, nil @@ -134,7 +134,7 @@ func (attacher *photonPersistentDiskAttacher) VolumesAreAttached(specs []*volume func (attacher *photonPersistentDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Photon Controller attacher: WaitForAttach failed to get volume source") + klog.Errorf("Photon Controller attacher: WaitForAttach failed to get volume source") return "", err } @@ -154,14 +154,14 @@ func (attacher *photonPersistentDiskAttacher) WaitForAttach(spec *volume.Spec, d for { select { case <-ticker.C: - glog.V(4).Infof("Checking PD %s is attached", volumeSource.PdID) + klog.V(4).Infof("Checking PD %s is attached", volumeSource.PdID) checkPath, err := verifyDevicePath(devicePath) if err != nil { // Log error, if any, and continue checking periodically. See issue #11321 - glog.Warningf("Photon Controller attacher: WaitForAttach with devicePath %s Checking PD %s Error verify path", devicePath, volumeSource.PdID) + klog.Warningf("Photon Controller attacher: WaitForAttach with devicePath %s Checking PD %s Error verify path", devicePath, volumeSource.PdID) } else if checkPath != "" { // A device path has successfully been created for the VMDK - glog.V(4).Infof("Successfully found attached PD %s.", volumeSource.PdID) + klog.V(4).Infof("Successfully found attached PD %s.", volumeSource.PdID) // map path with spec.Name() volName := spec.Name() realPath, _ := filepath.EvalSymlinks(devicePath) @@ -180,7 +180,7 @@ func (attacher *photonPersistentDiskAttacher) WaitForAttach(spec *volume.Spec, d func (attacher *photonPersistentDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) { volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Photon Controller attacher: GetDeviceMountPath failed to get volume source") + klog.Errorf("Photon Controller attacher: GetDeviceMountPath failed to get volume source") return "", err } @@ -201,7 +201,7 @@ func (attacher *photonPersistentDiskAttacher) MountDevice(spec *volume.Spec, dev if err != nil { if os.IsNotExist(err) { if err := os.MkdirAll(deviceMountPath, 0750); err != nil { - glog.Errorf("Failed to create directory at %#v. err: %s", deviceMountPath, err) + klog.Errorf("Failed to create directory at %#v. err: %s", deviceMountPath, err) return err } notMnt = true @@ -212,7 +212,7 @@ func (attacher *photonPersistentDiskAttacher) MountDevice(spec *volume.Spec, dev volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Photon Controller attacher: MountDevice failed to get volume source. err: %s", err) + klog.Errorf("Photon Controller attacher: MountDevice failed to get volume source. err: %s", err) return err } @@ -226,7 +226,7 @@ func (attacher *photonPersistentDiskAttacher) MountDevice(spec *volume.Spec, dev os.Remove(deviceMountPath) return err } - glog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options) + klog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options) } return nil } @@ -243,7 +243,7 @@ var _ volume.DeviceUnmounter = &photonPersistentDiskDetacher{} func (plugin *photonPersistentDiskPlugin) NewDetacher() (volume.Detacher, error) { photonCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) if err != nil { - glog.Errorf("Photon Controller attacher: NewDetacher failed to get cloud provider. err: %s", err) + klog.Errorf("Photon Controller attacher: NewDetacher failed to get cloud provider. err: %s", err) return nil, err } @@ -265,19 +265,19 @@ func (detacher *photonPersistentDiskDetacher) Detach(volumeName string, nodeName attached, err := detacher.photonDisks.DiskIsAttached(context.TODO(), pdID, nodeName) if err != nil { // Log error and continue with detach - glog.Errorf( + klog.Errorf( "Error checking if persistent disk (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v", pdID, hostName, err) } if err == nil && !attached { // Volume is already detached from node. - glog.V(4).Infof("detach operation was successful. persistent disk %q is already detached from node %q.", pdID, hostName) + klog.V(4).Infof("detach operation was successful. persistent disk %q is already detached from node %q.", pdID, hostName) return nil } if err := detacher.photonDisks.DetachDisk(context.TODO(), pdID, nodeName); err != nil { - glog.Errorf("Error detaching volume %q: %v", pdID, err) + klog.Errorf("Error detaching volume %q: %v", pdID, err) return err } return nil @@ -292,7 +292,7 @@ func (detacher *photonPersistentDiskDetacher) WaitForDetach(devicePath string, t for { select { case <-ticker.C: - glog.V(4).Infof("Checking device %q is detached.", devicePath) + klog.V(4).Infof("Checking device %q is detached.", devicePath) if pathExists, err := volumeutil.PathExists(devicePath); err != nil { return fmt.Errorf("Error checking if device path exists: %v", err) } else if !pathExists { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd.go index 89c385fdb735..03cbd9b0ecb4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_pd.go @@ -21,11 +21,11 @@ import ( "os" "path" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -62,7 +62,7 @@ func (plugin *photonPersistentDiskPlugin) GetPluginName() string { func (plugin *photonPersistentDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) { volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Photon volume plugin: GetVolumeName failed to get volume source") + klog.Errorf("Photon volume plugin: GetVolumeName failed to get volume source") return "", err } @@ -97,7 +97,7 @@ func (plugin *photonPersistentDiskPlugin) NewUnmounter(volName string, podUID ty func (plugin *photonPersistentDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Mounter, error) { vvol, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Photon volume plugin: newMounterInternal failed to get volume source") + klog.Errorf("Photon volume plugin: newMounterInternal failed to get volume source") return nil, err } @@ -114,7 +114,9 @@ func (plugin *photonPersistentDiskPlugin) newMounterInternal(spec *volume.Spec, plugin: plugin, }, fsType: fsType, - diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil + diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host), + mountOption: util.MountOptionFromSpec(spec), + }, nil } func (plugin *photonPersistentDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Unmounter, error) { @@ -177,6 +179,7 @@ type photonPersistentDiskMounter struct { *photonPersistentDisk fsType string diskMounter *mount.SafeFormatAndMount + mountOption []string } func (b *photonPersistentDiskMounter) GetAttributes() volume.Attributes { @@ -199,12 +202,12 @@ func (b *photonPersistentDiskMounter) SetUp(fsGroup *int64) error { // SetUp attaches the disk and bind mounts to the volume path. func (b *photonPersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { - glog.V(4).Infof("Photon Persistent Disk setup %s to %s", b.pdID, dir) + klog.V(4).Infof("Photon Persistent Disk setup %s to %s", b.pdID, dir) // TODO: handle failed mounts here. notmnt, err := b.mounter.IsLikelyNotMountPoint(dir) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mount point: %s %v", dir, err) + klog.Errorf("cannot validate mount point: %s %v", dir, err) return err } if !notmnt { @@ -212,7 +215,7 @@ func (b *photonPersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error } if err := os.MkdirAll(dir, 0750); err != nil { - glog.Errorf("mkdir failed on disk %s (%v)", dir, err) + klog.Errorf("mkdir failed on disk %s (%v)", dir, err) return err } @@ -220,32 +223,33 @@ func (b *photonPersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error // Perform a bind mount to the full path to allow duplicate mounts of the same PD. globalPDPath := makeGlobalPDPath(b.plugin.host, b.pdID) - glog.V(4).Infof("attempting to mount %s", dir) + klog.V(4).Infof("attempting to mount %s", dir) - err = b.mounter.Mount(globalPDPath, dir, "", options) + mountOptions := util.JoinMountOptions(options, b.mountOption) + err = b.mounter.Mount(globalPDPath, dir, "", mountOptions) if err != nil { notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notmnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notmnt { - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath()) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath()) return err } } os.Remove(dir) - glog.Errorf("Mount of disk %s failed: %v", dir, err) + klog.Errorf("Mount of disk %s failed: %v", dir, err) return err } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_util.go index a7d3a0fe9f33..fe3a6c24c6d7 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/photon_util.go @@ -23,9 +23,9 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/photon" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -63,7 +63,7 @@ func scsiHostScan() { name := scsi_path + f.Name() + "/scan" data := []byte("- - -") ioutil.WriteFile(name, data, 0666) - glog.Errorf("scsiHostScan scan for %s", name) + klog.Errorf("scsiHostScan scan for %s", name) } } } @@ -75,7 +75,7 @@ func verifyDevicePath(path string) (string, error) { return path, nil } - glog.V(4).Infof("verifyDevicePath: path not exists yet") + klog.V(4).Infof("verifyDevicePath: path not exists yet") return "", nil } @@ -83,7 +83,7 @@ func verifyDevicePath(path string) (string, error) { func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pdID string, capacityGB int, fstype string, err error) { cloud, err := getCloudProvider(p.plugin.host.GetCloudProvider()) if err != nil { - glog.Errorf("Photon Controller Util: CreateVolume failed to get cloud provider. Error [%v]", err) + klog.Errorf("Photon Controller Util: CreateVolume failed to get cloud provider. Error [%v]", err) return "", 0, "", err } @@ -106,20 +106,20 @@ func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pd volumeOptions.Flavor = value case volume.VolumeParameterFSType: fstype = value - glog.V(4).Infof("Photon Controller Util: Setting fstype to %s", fstype) + klog.V(4).Infof("Photon Controller Util: Setting fstype to %s", fstype) default: - glog.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName()) + klog.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName()) return "", 0, "", fmt.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName()) } } pdID, err = cloud.CreateDisk(volumeOptions) if err != nil { - glog.Errorf("Photon Controller Util: failed to CreateDisk. Error [%v]", err) + klog.Errorf("Photon Controller Util: failed to CreateDisk. Error [%v]", err) return "", 0, "", err } - glog.V(4).Infof("Successfully created Photon Controller persistent disk %s", name) + klog.V(4).Infof("Successfully created Photon Controller persistent disk %s", name) return pdID, volSizeGB, "", nil } @@ -127,28 +127,28 @@ func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pd func (util *PhotonDiskUtil) DeleteVolume(pd *photonPersistentDiskDeleter) error { cloud, err := getCloudProvider(pd.plugin.host.GetCloudProvider()) if err != nil { - glog.Errorf("Photon Controller Util: DeleteVolume failed to get cloud provider. Error [%v]", err) + klog.Errorf("Photon Controller Util: DeleteVolume failed to get cloud provider. Error [%v]", err) return err } if err = cloud.DeleteDisk(pd.pdID); err != nil { - glog.Errorf("Photon Controller Util: failed to DeleteDisk for pdID %s. Error [%v]", pd.pdID, err) + klog.Errorf("Photon Controller Util: failed to DeleteDisk for pdID %s. Error [%v]", pd.pdID, err) return err } - glog.V(4).Infof("Successfully deleted PhotonController persistent disk %s", pd.pdID) + klog.V(4).Infof("Successfully deleted PhotonController persistent disk %s", pd.pdID) return nil } func getCloudProvider(cloud cloudprovider.Interface) (*photon.PCCloud, error) { if cloud == nil { - glog.Errorf("Photon Controller Util: Cloud provider not initialized properly") + klog.Errorf("Photon Controller Util: Cloud provider not initialized properly") return nil, fmt.Errorf("Photon Controller Util: Cloud provider not initialized properly") } pcc := cloud.(*photon.PCCloud) if pcc == nil { - glog.Errorf("Invalid cloud provider: expected Photon Controller") + klog.Errorf("Invalid cloud provider: expected Photon Controller") return nil, fmt.Errorf("Invalid cloud provider: expected Photon Controller") } return pcc, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/plugins.go index e9ccf7e29677..a0cb57164643 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/plugins.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/plugins.go @@ -22,7 +22,6 @@ import ( "strings" "sync" - "github.com/golang/glog" authenticationv1 "k8s.io/api/authentication/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -32,8 +31,9 @@ import ( "k8s.io/apimachinery/pkg/util/validation" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" + cloudprovider "k8s.io/cloud-provider" csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned" - "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume/util/recyclerclient" ) @@ -225,6 +225,13 @@ type ExpandableVolumePlugin interface { RequiresFSResize() bool } +// FSResizableVolumePlugin is an extension of ExpandableVolumePlugin and is used for volumes (flex) +// that require extra steps on nodes for expansion to complete +type FSResizableVolumePlugin interface { + ExpandableVolumePlugin + ExpandFS(spec *Spec, devicePath, deviceMountPath string, newSize, oldSize resource.Quantity) error +} + // VolumePluginWithAttachLimits is an extended interface of VolumePlugin that restricts number of // volumes that can be attached to a node. type VolumePluginWithAttachLimits interface { @@ -347,6 +354,8 @@ type VolumeHost interface { GetServiceAccountTokenFunc() func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) + DeleteServiceAccountTokenFunc() func(podUID types.UID) + // Returns an interface that should be used to execute any utilities in volume plugins GetExec(pluginName string) mount.Exec @@ -388,6 +397,36 @@ func (spec *Spec) Name() string { } } +// IsKubeletExpandable returns true for volume types that can be expanded only by the node +// and not the controller. Currently Flex volume is the only one in this category since +// it is typically not installed on the controller +func (spec *Spec) IsKubeletExpandable() bool { + switch { + case spec.Volume != nil: + return spec.Volume.FlexVolume != nil + case spec.PersistentVolume != nil: + return spec.PersistentVolume.Spec.FlexVolume != nil + default: + return false + + } +} + +// KubeletExpandablePluginName creates and returns a name for the plugin +// this is used in context on the controller where the plugin lookup fails +// as volume expansion on controller isn't supported, but a plugin name is +// required +func (spec *Spec) KubeletExpandablePluginName() string { + switch { + case spec.Volume != nil && spec.Volume.FlexVolume != nil: + return spec.Volume.FlexVolume.Driver + case spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil: + return spec.PersistentVolume.Spec.FlexVolume.Driver + default: + return "" + } +} + // VolumeConfig is how volume plugins receive configuration. An instance // specific to the plugin will be passed to the plugin's // ProbeVolumePlugins(config) func. Reasonable defaults will be provided by @@ -475,7 +514,7 @@ func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, prober DynamicPlu } if err := pm.prober.Init(); err != nil { // Prober init failure should not affect the initialization of other plugins. - glog.Errorf("Error initializing dynamic plugin prober: %s", err) + klog.Errorf("Error initializing dynamic plugin prober: %s", err) pm.prober = &dummyPluginProber{} } @@ -500,12 +539,12 @@ func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, prober DynamicPlu } err := plugin.Init(host) if err != nil { - glog.Errorf("Failed to load volume plugin %s, error: %s", name, err.Error()) + klog.Errorf("Failed to load volume plugin %s, error: %s", name, err.Error()) allErrs = append(allErrs, err) continue } pm.plugins[name] = plugin - glog.V(1).Infof("Loaded volume plugin %q", name) + klog.V(1).Infof("Loaded volume plugin %q", name) } return utilerrors.NewAggregate(allErrs) } @@ -521,7 +560,7 @@ func (pm *VolumePluginMgr) initProbedPlugin(probedPlugin VolumePlugin) error { return fmt.Errorf("Failed to load volume plugin %s, error: %s", name, err.Error()) } - glog.V(1).Infof("Loaded volume plugin %q", name) + klog.V(1).Infof("Loaded volume plugin %q", name) return nil } @@ -600,14 +639,14 @@ func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) { func (pm *VolumePluginMgr) refreshProbedPlugins() { events, err := pm.prober.Probe() if err != nil { - glog.Errorf("Error dynamically probing plugins: %s", err) + klog.Errorf("Error dynamically probing plugins: %s", err) return // Use cached plugins upon failure. } for _, event := range events { if event.Op == ProbeAddOrUpdate { if err := pm.initProbedPlugin(event.Plugin); err != nil { - glog.Errorf("Error initializing dynamically probed plugin %s; error: %s", + klog.Errorf("Error initializing dynamically probed plugin %s; error: %s", event.Plugin.GetPluginName(), err) continue } @@ -616,7 +655,7 @@ func (pm *VolumePluginMgr) refreshProbedPlugins() { // Plugin is not available on ProbeRemove event, only PluginName delete(pm.probedPlugins, event.PluginName) } else { - glog.Errorf("Unknown Operation on PluginName: %s.", + klog.Errorf("Unknown Operation on PluginName: %s.", event.Plugin.GetPluginName()) } } @@ -797,6 +836,13 @@ func (pm *VolumePluginMgr) FindDeviceMountablePluginByName(name string) (DeviceM func (pm *VolumePluginMgr) FindExpandablePluginBySpec(spec *Spec) (ExpandableVolumePlugin, error) { volumePlugin, err := pm.FindPluginBySpec(spec) if err != nil { + if spec.IsKubeletExpandable() { + // for kubelet expandable volumes, return a noop plugin that + // returns success for expand on the controller + klog.Warningf("FindExpandablePluginBySpec(%s) -> returning noopExpandableVolumePluginInstance", spec.Name()) + return &noopExpandableVolumePluginInstance{spec}, nil + } + klog.Warningf("FindExpandablePluginBySpec(%s) -> err:%v", spec.Name(), err) return nil, err } @@ -845,6 +891,32 @@ func (pm *VolumePluginMgr) FindMapperPluginByName(name string) (BlockVolumePlugi return nil, nil } +// FindFSResizablePluginBySpec fetches a persistent volume plugin by spec +func (pm *VolumePluginMgr) FindFSResizablePluginBySpec(spec *Spec) (FSResizableVolumePlugin, error) { + volumePlugin, err := pm.FindPluginBySpec(spec) + if err != nil { + return nil, err + } + if fsResizablePlugin, ok := volumePlugin.(FSResizableVolumePlugin); ok { + return fsResizablePlugin, nil + } + return nil, nil +} + +// FindFSResizablePluginByName fetches a persistent volume plugin by name +func (pm *VolumePluginMgr) FindFSResizablePluginByName(name string) (FSResizableVolumePlugin, error) { + volumePlugin, err := pm.FindPluginByName(name) + if err != nil { + return nil, err + } + + if fsResizablePlugin, ok := volumePlugin.(FSResizableVolumePlugin); ok { + return fsResizablePlugin, nil + } + + return nil, nil +} + // NewPersistentVolumeRecyclerPodTemplate creates a template for a recycler // pod. By default, a recycler pod simply runs "rm -rf" on a volume and tests // for emptiness. Most attributes of the template will be correct for most diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/BUILD index 19190114902a..e43ba8d037fb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/BUILD @@ -39,12 +39,12 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/libopenstorage/openstorage/api:go_default_library", "//vendor/github.com/libopenstorage/openstorage/api/client:go_default_library", "//vendor/github.com/libopenstorage/openstorage/api/client/volume:go_default_library", "//vendor/github.com/libopenstorage/openstorage/api/spec:go_default_library", "//vendor/github.com/libopenstorage/openstorage/volume:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go index 1edb0bacac30..212a4c23d796 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go @@ -20,11 +20,11 @@ import ( "fmt" "os" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -180,13 +180,13 @@ func (plugin *portworxVolumePlugin) ExpandVolumeDevice( spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { - glog.V(4).Infof("Expanding: %s from %v to %v", spec.Name(), oldSize, newSize) + klog.V(4).Infof("Expanding: %s from %v to %v", spec.Name(), oldSize, newSize) err := plugin.util.ResizeVolume(spec, newSize, plugin.host) if err != nil { return oldSize, err } - glog.V(4).Infof("Successfully resized %s to %v", spec.Name(), newSize) + klog.V(4).Infof("Successfully resized %s to %v", spec.Name(), newSize) return newSize, nil } @@ -269,10 +269,9 @@ var _ volume.Mounter = &portworxVolumeMounter{} func (b *portworxVolumeMounter) GetAttributes() volume.Attributes { return volume.Attributes{ - ReadOnly: b.readOnly, - Managed: !b.readOnly, - // true ? - SupportsSELinux: true, + ReadOnly: b.readOnly, + Managed: !b.readOnly, + SupportsSELinux: false, } } @@ -291,9 +290,9 @@ func (b *portworxVolumeMounter) SetUp(fsGroup *int64) error { // SetUpAt attaches the disk and bind mounts to the volume path. func (b *portworxVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.Infof("Portworx Volume set up. Dir: %s %v %v", dir, !notMnt, err) + klog.Infof("Portworx Volume set up. Dir: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { - glog.Errorf("Cannot validate mountpoint: %s", dir) + klog.Errorf("Cannot validate mountpoint: %s", dir) return err } if !notMnt { @@ -307,7 +306,7 @@ func (b *portworxVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return err } - glog.V(4).Infof("Portworx Volume %s attached", b.volumeID) + klog.V(4).Infof("Portworx Volume %s attached", b.volumeID) if err := os.MkdirAll(dir, 0750); err != nil { return err @@ -319,7 +318,7 @@ func (b *portworxVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { if !b.readOnly { volume.SetVolumeOwnership(b, fsGroup) } - glog.Infof("Portworx Volume %s setup at %s", b.volumeID, dir) + klog.Infof("Portworx Volume %s setup at %s", b.volumeID, dir) return nil } @@ -342,7 +341,7 @@ func (c *portworxVolumeUnmounter) TearDown() error { // Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. func (c *portworxVolumeUnmounter) TearDownAt(dir string) error { - glog.Infof("Portworx Volume TearDown of %s", dir) + klog.Infof("Portworx Volume TearDown of %s", dir) if err := c.manager.UnmountVolume(c, dir); err != nil { return err diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go index ab68e4072686..62b3e3f4d317 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go @@ -19,7 +19,6 @@ package portworx import ( "fmt" - "github.com/golang/glog" osdapi "github.com/libopenstorage/openstorage/api" osdclient "github.com/libopenstorage/openstorage/api/client" volumeclient "github.com/libopenstorage/openstorage/api/client/volume" @@ -28,6 +27,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/volume" volutil "k8s.io/kubernetes/pkg/volume/util" @@ -51,11 +51,11 @@ type PortworxVolumeUtil struct { func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (string, int64, map[string]string, error) { driver, err := util.getPortworxDriver(p.plugin.host, false /*localOnly*/) if err != nil || driver == nil { - glog.Errorf("Failed to get portworx driver. Err: %v", err) + klog.Errorf("Failed to get portworx driver. Err: %v", err) return "", 0, nil, err } - glog.Infof("Creating Portworx volume for PVC: %v", p.options.PVC.Name) + klog.Infof("Creating Portworx volume for PVC: %v", p.options.PVC.Name) capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] // Portworx Volumes are specified in GiB @@ -95,7 +95,7 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri for k, v := range p.options.PVC.Annotations { if _, present := spec.VolumeLabels[k]; present { - glog.Warningf("not saving annotation: %s=%s in spec labels due to an existing key", k, v) + klog.Warningf("not saving annotation: %s=%s in spec labels due to an existing key", k, v) continue } spec.VolumeLabels[k] = v @@ -103,11 +103,11 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri volumeID, err := driver.Create(locator, source, spec) if err != nil { - glog.Errorf("Error creating Portworx Volume : %v", err) + klog.Errorf("Error creating Portworx Volume : %v", err) return "", 0, nil, err } - glog.Infof("Successfully created Portworx volume for PVC: %v", p.options.PVC.Name) + klog.Infof("Successfully created Portworx volume for PVC: %v", p.options.PVC.Name) return volumeID, requestGiB, nil, err } @@ -115,13 +115,13 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri func (util *PortworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error { driver, err := util.getPortworxDriver(d.plugin.host, false /*localOnly*/) if err != nil || driver == nil { - glog.Errorf("Failed to get portworx driver. Err: %v", err) + klog.Errorf("Failed to get portworx driver. Err: %v", err) return err } err = driver.Delete(d.volumeID) if err != nil { - glog.Errorf("Error deleting Portworx Volume (%v): %v", d.volName, err) + klog.Errorf("Error deleting Portworx Volume (%v): %v", d.volName, err) return err } return nil @@ -131,13 +131,13 @@ func (util *PortworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error { func (util *PortworxVolumeUtil) AttachVolume(m *portworxVolumeMounter, attachOptions map[string]string) (string, error) { driver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/) if err != nil || driver == nil { - glog.Errorf("Failed to get portworx driver. Err: %v", err) + klog.Errorf("Failed to get portworx driver. Err: %v", err) return "", err } devicePath, err := driver.Attach(m.volName, attachOptions) if err != nil { - glog.Errorf("Error attaching Portworx Volume (%v): %v", m.volName, err) + klog.Errorf("Error attaching Portworx Volume (%v): %v", m.volName, err) return "", err } return devicePath, nil @@ -147,13 +147,13 @@ func (util *PortworxVolumeUtil) AttachVolume(m *portworxVolumeMounter, attachOpt func (util *PortworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error { driver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/) if err != nil || driver == nil { - glog.Errorf("Failed to get portworx driver. Err: %v", err) + klog.Errorf("Failed to get portworx driver. Err: %v", err) return err } err = driver.Detach(u.volName, false /*doNotForceDetach*/) if err != nil { - glog.Errorf("Error detaching Portworx Volume (%v): %v", u.volName, err) + klog.Errorf("Error detaching Portworx Volume (%v): %v", u.volName, err) return err } return nil @@ -163,13 +163,13 @@ func (util *PortworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error { func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath string) error { driver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/) if err != nil || driver == nil { - glog.Errorf("Failed to get portworx driver. Err: %v", err) + klog.Errorf("Failed to get portworx driver. Err: %v", err) return err } err = driver.Mount(m.volName, mountPath) if err != nil { - glog.Errorf("Error mounting Portworx Volume (%v) on Path (%v): %v", m.volName, mountPath, err) + klog.Errorf("Error mounting Portworx Volume (%v) on Path (%v): %v", m.volName, mountPath, err) return err } return nil @@ -179,13 +179,13 @@ func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountPath string) error { driver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/) if err != nil || driver == nil { - glog.Errorf("Failed to get portworx driver. Err: %v", err) + klog.Errorf("Failed to get portworx driver. Err: %v", err) return err } err = driver.Unmount(u.volName, mountPath) if err != nil { - glog.Errorf("Error unmounting Portworx Volume (%v) on Path (%v): %v", u.volName, mountPath, err) + klog.Errorf("Error unmounting Portworx Volume (%v) on Path (%v): %v", u.volName, mountPath, err) return err } return nil @@ -194,7 +194,7 @@ func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountP func (util *PortworxVolumeUtil) ResizeVolume(spec *volume.Spec, newSize resource.Quantity, volumeHost volume.VolumeHost) error { driver, err := util.getPortworxDriver(volumeHost, false /*localOnly*/) if err != nil || driver == nil { - glog.Errorf("Failed to get portworx driver. Err: %v", err) + klog.Errorf("Failed to get portworx driver. Err: %v", err) return err } @@ -210,7 +210,7 @@ func (util *PortworxVolumeUtil) ResizeVolume(spec *volume.Spec, newSize resource vol := vols[0] newSizeInBytes := uint64(volutil.RoundUpToGiB(newSize) * volutil.GIB) if vol.Spec.Size >= newSizeInBytes { - glog.Infof("Portworx volume: %s already at size: %d greater than or equal to new "+ + klog.Infof("Portworx volume: %s already at size: %d greater than or equal to new "+ "requested size: %d. Skipping resize.", spec.Name(), vol.Spec.Size, newSizeInBytes) return nil } @@ -247,7 +247,7 @@ func isClientValid(client *osdclient.Client) (bool, error) { _, err := client.Versions(osdapi.OsdVolumePath) if err != nil { - glog.Errorf("portworx client failed driver versions check. Err: %v", err) + klog.Errorf("portworx client failed driver versions check. Err: %v", err) return false, err } @@ -285,7 +285,7 @@ func (util *PortworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost, if err != nil { return nil, err } else { - glog.V(4).Infof("Using portworx local service at: %v as api endpoint", volumeHost.GetHostName()) + klog.V(4).Infof("Using portworx local service at: %v as api endpoint", volumeHost.GetHostName()) return volumeclient.VolumeDriver(util.portworxClient), nil } } @@ -301,31 +301,31 @@ func (util *PortworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost, // Create client from portworx service kubeClient := volumeHost.GetKubeClient() if kubeClient == nil { - glog.Error("Failed to get kubeclient when creating portworx client") + klog.Error("Failed to get kubeclient when creating portworx client") return nil, nil } opts := metav1.GetOptions{} svc, err := kubeClient.CoreV1().Services(api.NamespaceSystem).Get(pxServiceName, opts) if err != nil { - glog.Errorf("Failed to get service. Err: %v", err) + klog.Errorf("Failed to get service. Err: %v", err) return nil, err } if svc == nil { - glog.Errorf("Service: %v not found. Consult Portworx docs to deploy it.", pxServiceName) + klog.Errorf("Service: %v not found. Consult Portworx docs to deploy it.", pxServiceName) return nil, err } util.portworxClient, err = createDriverClient(svc.Spec.ClusterIP) if err != nil || util.portworxClient == nil { - glog.Errorf("Failed to connect to portworx service. Err: %v", err) + klog.Errorf("Failed to connect to portworx service. Err: %v", err) return nil, err } - glog.Infof("Using portworx cluster service at: %v as api endpoint", svc.Spec.ClusterIP) + klog.Infof("Using portworx cluster service at: %v as api endpoint", svc.Spec.ClusterIP) } else { - glog.Infof("Using portworx service at: %v as api endpoint", volumeHost.GetHostName()) + klog.Infof("Using portworx service at: %v as api endpoint", volumeHost.GetHostName()) } return volumeclient.VolumeDriver(util.portworxClient), nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/BUILD index 79d29aece80d..65883f906b38 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/BUILD @@ -11,15 +11,22 @@ go_test( srcs = ["projected_test.go"], embed = [":go_default_library"], deps = [ + "//pkg/apis/authentication/v1:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/empty_dir:go_default_library", + "//pkg/volume/emptydir:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util:go_default_library", + "//staging/src/k8s.io/api/authentication/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", + "//staging/src/k8s.io/client-go/testing:go_default_library", ], ) @@ -42,7 +49,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go index ebc0a6f0d700..e93588ce6767 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go @@ -34,7 +34,7 @@ import ( "k8s.io/kubernetes/pkg/volume/secret" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "github.com/golang/glog" + "k8s.io/klog" ) // ProbeVolumePlugins is the entry point for plugin detection in a package. @@ -47,10 +47,11 @@ const ( ) type projectedPlugin struct { - host volume.VolumeHost - getSecret func(namespace, name string) (*v1.Secret, error) - getConfigMap func(namespace, name string) (*v1.ConfigMap, error) - getServiceAccountToken func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) + host volume.VolumeHost + getSecret func(namespace, name string) (*v1.Secret, error) + getConfigMap func(namespace, name string) (*v1.ConfigMap, error) + getServiceAccountToken func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) + deleteServiceAccountToken func(podUID types.UID) } var _ volume.VolumePlugin = &projectedPlugin{} @@ -74,6 +75,7 @@ func (plugin *projectedPlugin) Init(host volume.VolumeHost) error { plugin.getSecret = host.GetSecretFunc() plugin.getConfigMap = host.GetConfigMapFunc() plugin.getServiceAccountToken = host.GetServiceAccountTokenFunc() + plugin.deleteServiceAccountToken = host.DeleteServiceAccountTokenFunc() return nil } @@ -186,7 +188,7 @@ func (s *projectedVolumeMounter) SetUp(fsGroup *int64) error { } func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { - glog.V(3).Infof("Setting up volume %v for pod %v at %v", s.volName, s.pod.UID, dir) + klog.V(3).Infof("Setting up volume %v for pod %v at %v", s.volName, s.pod.UID, dir) wrapped, err := s.plugin.host.NewWrapperMounter(s.volName, wrappedVolumeSpec(), s.pod, *s.opts) if err != nil { @@ -195,9 +197,11 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { data, err := s.collectData() if err != nil { - glog.Errorf("Error preparing data for projected volume %v for pod %v/%v: %s", s.volName, s.pod.Namespace, s.pod.Name, err.Error()) + klog.Errorf("Error preparing data for projected volume %v for pod %v/%v: %s", s.volName, s.pod.Namespace, s.pod.Name, err.Error()) return err } + + setupSuccess := false if err := wrapped.SetUpAt(dir, fsGroup); err != nil { return err } @@ -206,24 +210,40 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return err } + defer func() { + // Clean up directories if setup fails + if !setupSuccess { + unmounter, unmountCreateErr := s.plugin.NewUnmounter(s.volName, s.podUID) + if unmountCreateErr != nil { + klog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", s.volName, unmountCreateErr) + return + } + tearDownErr := unmounter.TearDown() + if tearDownErr != nil { + klog.Errorf("error tearing down volume %s with : %v", s.volName, tearDownErr) + } + } + }() + writerContext := fmt.Sprintf("pod %v/%v volume %v", s.pod.Namespace, s.pod.Name, s.volName) writer, err := volumeutil.NewAtomicWriter(dir, writerContext) if err != nil { - glog.Errorf("Error creating atomic writer: %v", err) + klog.Errorf("Error creating atomic writer: %v", err) return err } err = writer.Write(data) if err != nil { - glog.Errorf("Error writing payload to dir: %v", err) + klog.Errorf("Error writing payload to dir: %v", err) return err } err = volume.SetVolumeOwnership(s, fsGroup) if err != nil { - glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) + klog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) return err } + setupSuccess = true return nil } @@ -246,7 +266,7 @@ func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjec secretapi, err := s.plugin.getSecret(s.pod.Namespace, source.Secret.Name) if err != nil { if !(errors.IsNotFound(err) && optional) { - glog.Errorf("Couldn't get secret %v/%v: %v", s.pod.Namespace, source.Secret.Name, err) + klog.Errorf("Couldn't get secret %v/%v: %v", s.pod.Namespace, source.Secret.Name, err) errlist = append(errlist, err) continue } @@ -259,7 +279,7 @@ func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjec } secretPayload, err := secret.MakePayload(source.Secret.Items, secretapi, s.source.DefaultMode, optional) if err != nil { - glog.Errorf("Couldn't get secret payload %v/%v: %v", s.pod.Namespace, source.Secret.Name, err) + klog.Errorf("Couldn't get secret payload %v/%v: %v", s.pod.Namespace, source.Secret.Name, err) errlist = append(errlist, err) continue } @@ -271,7 +291,7 @@ func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjec configMap, err := s.plugin.getConfigMap(s.pod.Namespace, source.ConfigMap.Name) if err != nil { if !(errors.IsNotFound(err) && optional) { - glog.Errorf("Couldn't get configMap %v/%v: %v", s.pod.Namespace, source.ConfigMap.Name, err) + klog.Errorf("Couldn't get configMap %v/%v: %v", s.pod.Namespace, source.ConfigMap.Name, err) errlist = append(errlist, err) continue } @@ -284,7 +304,7 @@ func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjec } configMapPayload, err := configmap.MakePayload(source.ConfigMap.Items, configMap, s.source.DefaultMode, optional) if err != nil { - glog.Errorf("Couldn't get configMap payload %v/%v: %v", s.pod.Namespace, source.ConfigMap.Name, err) + klog.Errorf("Couldn't get configMap payload %v/%v: %v", s.pod.Namespace, source.ConfigMap.Name, err) errlist = append(errlist, err) continue } @@ -306,11 +326,14 @@ func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjec continue } tp := source.ServiceAccountToken + + var auds []string + if len(tp.Audience) != 0 { + auds = []string{tp.Audience} + } tr, err := s.plugin.getServiceAccountToken(s.pod.Namespace, s.pod.Spec.ServiceAccountName, &authenticationv1.TokenRequest{ Spec: authenticationv1.TokenRequestSpec{ - Audiences: []string{ - tp.Audience, - }, + Audiences: auds, ExpirationSeconds: tp.ExpirationSeconds, BoundObjectRef: &authenticationv1.BoundObjectReference{ APIVersion: "v1", @@ -344,13 +367,18 @@ func (c *projectedVolumeUnmounter) TearDown() error { } func (c *projectedVolumeUnmounter) TearDownAt(dir string) error { - glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir) + klog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir) wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID) if err != nil { return err } - return wrapped.TearDownAt(dir) + if err = wrapped.TearDownAt(dir); err != nil { + return err + } + + c.plugin.deleteServiceAccountToken(c.podUID) + return nil } func getVolumeSource(spec *volume.Spec) (*v1.ProjectedVolumeSource, bool, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/BUILD index b120d9c2f7c7..1142f5f633a6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/BUILD @@ -23,9 +23,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/github.com/quobyte/api:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte.go index 22be50e8f0cd..c9d9e773fa7b 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte.go @@ -22,12 +22,12 @@ import ( "path" gostrings "strings" - "github.com/golang/glog" "github.com/pborman/uuid" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -94,16 +94,16 @@ func (plugin *quobytePlugin) CanSupport(spec *volume.Spec) bool { qm, _ := mounter.(*quobyteMounter) pluginDir := plugin.host.GetPluginDir(strings.EscapeQualifiedNameForDisk(quobytePluginName)) if mounted, err := qm.pluginDirIsMounted(pluginDir); mounted && err == nil { - glog.V(4).Infof("quobyte: can support") + klog.V(4).Infof("quobyte: can support") return true } } else { - glog.V(4).Infof("quobyte: Error: %v", err) + klog.V(4).Infof("quobyte: Error: %v", err) } exec := plugin.host.GetExec(plugin.GetPluginName()) if out, err := exec.Run("ls", "/sbin/mount.quobyte"); err == nil { - glog.V(4).Infof("quobyte: can support: %s", string(out)) + klog.V(4).Infof("quobyte: can support: %s", string(out)) return true } @@ -260,7 +260,7 @@ func (mounter *quobyteMounter) SetUpAt(dir string, fsGroup *int64) error { return fmt.Errorf("quobyte: mount failed: %v", err) } - glog.V(4).Infof("quobyte: mount set up: %s", dir) + klog.V(4).Infof("quobyte: mount set up: %s", dir) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte_util.go index d61879b7bb6c..107c1454bed8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/quobyte/quobyte_util.go @@ -25,8 +25,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/volume/util" - "github.com/golang/glog" quobyteapi "github.com/quobyte/api" + "k8s.io/klog" ) type quobyteVolumeManager struct { @@ -63,7 +63,7 @@ func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProv } } - glog.V(4).Infof("Created Quobyte volume %s", provisioner.volume) + klog.V(4).Infof("Created Quobyte volume %s", provisioner.volume) return &v1.QuobyteVolumeSource{ Registry: provisioner.registry, Volume: provisioner.volume, @@ -96,7 +96,7 @@ func (mounter *quobyteMounter) pluginDirIsMounted(pluginDir string) (bool, error } if mountPoint.Path == pluginDir { - glog.V(4).Infof("quobyte: found mountpoint %s in /proc/mounts", mountPoint.Path) + klog.V(4).Infof("quobyte: found mountpoint %s in /proc/mounts", mountPoint.Path) return true, nil } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD index 41bf823ac4b5..ccd413eefecc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD @@ -35,7 +35,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/attacher.go index 70fc15ea618e..97d944f2b5c9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/attacher.go @@ -21,9 +21,9 @@ import ( "os" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volutil "k8s.io/kubernetes/pkg/volume/util" @@ -107,17 +107,17 @@ func (attacher *rbdAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName t // attach volume onto the node. // This method is idempotent, callers are responsible for retrying on failure. func (attacher *rbdAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) { - glog.V(4).Infof("rbd: waiting for attach volume (name: %s) for pod (name: %s, uid: %s)", spec.Name(), pod.Name, pod.UID) + klog.V(4).Infof("rbd: waiting for attach volume (name: %s) for pod (name: %s, uid: %s)", spec.Name(), pod.Name, pod.UID) mounter, err := attacher.plugin.createMounterFromVolumeSpecAndPod(spec, pod) if err != nil { - glog.Warningf("failed to create mounter: %v", spec) + klog.Warningf("failed to create mounter: %v", spec) return "", err } realDevicePath, err := attacher.manager.AttachDisk(*mounter) if err != nil { return "", err } - glog.V(3).Infof("rbd: successfully wait for attach volume (spec: %s, pool: %s, image: %s) at %s", spec.Name(), mounter.Pool, mounter.Image, realDevicePath) + klog.V(3).Infof("rbd: successfully wait for attach volume (spec: %s, pool: %s, image: %s) at %s", spec.Name(), mounter.Pool, mounter.Image, realDevicePath) return realDevicePath, nil } @@ -138,7 +138,7 @@ func (attacher *rbdAttacher) GetDeviceMountPath(spec *volume.Spec) (string, erro // mount device at the given mount path. // This method is idempotent, callers are responsible for retrying on failure. func (attacher *rbdAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error { - glog.V(4).Infof("rbd: mouting device %s to %s", devicePath, deviceMountPath) + klog.V(4).Infof("rbd: mouting device %s to %s", devicePath, deviceMountPath) notMnt, err := attacher.mounter.IsLikelyNotMountPoint(deviceMountPath) if err != nil { if os.IsNotExist(err) { @@ -171,7 +171,7 @@ func (attacher *rbdAttacher) MountDevice(spec *volume.Spec, devicePath string, d os.Remove(deviceMountPath) return fmt.Errorf("rbd: failed to mount device %s at %s (fstype: %s), error %v", devicePath, deviceMountPath, fstype, err) } - glog.V(3).Infof("rbd: successfully mount device %s at %s (fstype: %s)", devicePath, deviceMountPath, fstype) + klog.V(3).Infof("rbd: successfully mount device %s at %s (fstype: %s)", devicePath, deviceMountPath, fstype) return nil } @@ -200,7 +200,7 @@ func (detacher *rbdDetacher) UnmountDevice(deviceMountPath string) error { if pathExists, pathErr := volutil.PathExists(deviceMountPath); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath) return nil } devicePath, _, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath) @@ -208,23 +208,23 @@ func (detacher *rbdDetacher) UnmountDevice(deviceMountPath string) error { return err } // Unmount the device from the device mount point. - glog.V(4).Infof("rbd: unmouting device mountpoint %s", deviceMountPath) + klog.V(4).Infof("rbd: unmouting device mountpoint %s", deviceMountPath) if err = detacher.mounter.Unmount(deviceMountPath); err != nil { return err } - glog.V(3).Infof("rbd: successfully umount device mountpath %s", deviceMountPath) + klog.V(3).Infof("rbd: successfully umount device mountpath %s", deviceMountPath) - glog.V(4).Infof("rbd: detaching device %s", devicePath) + klog.V(4).Infof("rbd: detaching device %s", devicePath) err = detacher.manager.DetachDisk(detacher.plugin, deviceMountPath, devicePath) if err != nil { return err } - glog.V(3).Infof("rbd: successfully detach device %s", devicePath) + klog.V(3).Infof("rbd: successfully detach device %s", devicePath) err = os.Remove(deviceMountPath) if err != nil { return err } - glog.V(3).Infof("rbd: successfully remove device mount point %s", deviceMountPath) + klog.V(3).Infof("rbd: successfully remove device mount point %s", deviceMountPath) return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go index 6f62f485444e..6067916da753 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go @@ -26,9 +26,9 @@ import ( "fmt" "os" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" @@ -61,7 +61,7 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount. globalPDPath := manager.MakeGlobalPDName(*b.rbd) notMnt, err := mounter.IsLikelyNotMountPoint(globalPDPath) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mountpoint: %s", globalPDPath) + klog.Errorf("cannot validate mountpoint: %s", globalPDPath) return err } if notMnt { @@ -70,7 +70,7 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount. notMnt, err = mounter.IsLikelyNotMountPoint(volPath) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mountpoint: %s", volPath) + klog.Errorf("cannot validate mountpoint: %s", volPath) return err } if !notMnt { @@ -78,7 +78,7 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount. } if err := os.MkdirAll(volPath, 0750); err != nil { - glog.Errorf("failed to mkdir:%s", volPath) + klog.Errorf("failed to mkdir:%s", volPath) return err } // Perform a bind mount to the full path to allow duplicate mounts of the same disk. @@ -89,10 +89,10 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount. mountOptions := util.JoinMountOptions(b.mountOptions, options) err = mounter.Mount(globalPDPath, volPath, "", mountOptions) if err != nil { - glog.Errorf("failed to bind mount:%s", globalPDPath) + klog.Errorf("failed to bind mount:%s", globalPDPath) return err } - glog.V(3).Infof("rbd: successfully bind mount %s to %s with options %v", globalPDPath, volPath, mountOptions) + klog.V(3).Infof("rbd: successfully bind mount %s to %s with options %v", globalPDPath, volPath, mountOptions) if !b.ReadOnly { volume.SetVolumeOwnership(&b, fsGroup) @@ -105,28 +105,28 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount. func diskTearDown(manager diskManager, c rbdUnmounter, volPath string, mounter mount.Interface) error { notMnt, err := mounter.IsLikelyNotMountPoint(volPath) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mountpoint: %s", volPath) + klog.Errorf("cannot validate mountpoint: %s", volPath) return err } if notMnt { - glog.V(3).Infof("volume path %s is not a mountpoint, deleting", volPath) + klog.V(3).Infof("volume path %s is not a mountpoint, deleting", volPath) return os.Remove(volPath) } // Unmount the bind-mount inside this pod. if err := mounter.Unmount(volPath); err != nil { - glog.Errorf("failed to umount %s", volPath) + klog.Errorf("failed to umount %s", volPath) return err } notMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath) if mntErr != nil && !os.IsNotExist(mntErr) { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return mntErr } if notMnt { if err := os.Remove(volPath); err != nil { - glog.V(2).Info("Error removing mountpoint ", volPath, ": ", err) + klog.V(2).Info("Error removing mountpoint ", volPath, ": ", err) return err } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go index b94e307ba97a..d9197428f8ba 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go @@ -23,7 +23,6 @@ import ( "regexp" dstrings "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" @@ -197,6 +197,13 @@ func (plugin *rbdPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource. } } +func (plugin *rbdPlugin) ExpandFS(spec *volume.Spec, devicePath, deviceMountPath string, _, _ resource.Quantity) error { + _, err := volutil.GenericResizeFS(plugin.host, plugin.GetPluginName(), devicePath, deviceMountPath) + return err +} + +var _ volume.FSResizableVolumePlugin = &rbdPlugin{} + func (expander *rbdVolumeExpander) ResizeImage(oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) { return expander.manager.ExpandImage(expander, oldSize, newSize) } @@ -378,7 +385,7 @@ func (plugin *rbdPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*vol // the deprecated format: /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd/{pool}-image-{image}. // So we will try to check whether this old style global device mount path exist or not. // If existed, extract the sourceName from this old style path, otherwise return an error. - glog.V(3).Infof("SourceName %s wrong, fallback to old format", sourceName) + klog.V(3).Infof("SourceName %s wrong, fallback to old format", sourceName) sourceName, err = plugin.getDeviceNameFromOldMountPath(mounter, mountPath) if err != nil { return nil, err @@ -408,7 +415,7 @@ func (plugin *rbdPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, if err != nil { return nil, err } - glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) + klog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) globalMapPath := filepath.Dir(globalMapPathUUID) if len(globalMapPath) == 1 { return nil, fmt.Errorf("failed to retrieve volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) @@ -673,10 +680,10 @@ func (r *rbdVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologie r.rbdMounter.Image = image rbd, sizeMB, err := r.manager.CreateImage(r) if err != nil { - glog.Errorf("rbd: create volume failed, err: %v", err) + klog.Errorf("rbd: create volume failed, err: %v", err) return nil, err } - glog.Infof("successfully created rbd image %q", image) + klog.Infof("successfully created rbd image %q", image) pv := new(v1.PersistentVolume) metav1.SetMetaDataAnnotation(&pv.ObjectMeta, volutil.VolumeDynamicallyCreatedByKey, "rbd-dynamic-provisioner") @@ -817,12 +824,12 @@ func (b *rbdMounter) SetUp(fsGroup *int64) error { func (b *rbdMounter) SetUpAt(dir string, fsGroup *int64) error { // diskSetUp checks mountpoints and prevent repeated calls - glog.V(4).Infof("rbd: attempting to setup at %s", dir) + klog.V(4).Infof("rbd: attempting to setup at %s", dir) err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup) if err != nil { - glog.Errorf("rbd: failed to setup at %s %v", dir, err) + klog.Errorf("rbd: failed to setup at %s %v", dir, err) } - glog.V(3).Infof("rbd: successfully setup at %s", dir) + klog.V(3).Infof("rbd: successfully setup at %s", dir) return err } @@ -840,18 +847,18 @@ func (c *rbdUnmounter) TearDown() error { } func (c *rbdUnmounter) TearDownAt(dir string) error { - glog.V(4).Infof("rbd: attempting to teardown at %s", dir) + klog.V(4).Infof("rbd: attempting to teardown at %s", dir) if pathExists, pathErr := volutil.PathExists(dir); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) return nil } err := diskTearDown(c.manager, *c, dir, c.mounter) if err != nil { return err } - glog.V(3).Infof("rbd: successfully teardown at %s", dir) + klog.V(3).Infof("rbd: successfully teardown at %s", dir) return nil } @@ -959,41 +966,18 @@ func (rbd *rbdDiskUnmapper) TearDownDevice(mapPath, _ string) error { if err != nil { return fmt.Errorf("rbd: failed to get loopback for device: %v, err: %v", device, err) } - // Get loopback device which takes fd lock for device beofore detaching a volume from node. - // TODO: This is a workaround for issue #54108 - // Currently local attach plugins such as FC, iSCSI, RBD can't obtain devicePath during - // GenerateUnmapDeviceFunc() in operation_generator. As a result, these plugins fail to get - // and remove loopback device then it will be remained on kubelet node. To avoid the problem, - // local attach plugins needs to remove loopback device during TearDownDevice(). - blkUtil := volumepathhandler.NewBlockVolumePathHandler() - loop, err := volumepathhandler.BlockVolumePathHandler.GetLoopDevice(blkUtil, device) - if err != nil { - if err.Error() != volumepathhandler.ErrDeviceNotFound { - return fmt.Errorf("rbd: failed to get loopback for device: %v, err: %v", device, err) - } - glog.Warning("rbd: loopback for device: % not found", device) - } else { - if len(loop) != 0 { - // Remove loop device before detaching volume since volume detach operation gets busy if volume is opened by loopback. - err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop) - if err != nil { - return fmt.Errorf("rbd: failed to remove loopback :%v, err: %v", loop, err) - } - glog.V(4).Infof("rbd: successfully removed loop device: %s", loop) - } - } err = rbd.manager.DetachBlockDisk(*rbd, mapPath) if err != nil { return fmt.Errorf("rbd: failed to detach disk: %s\nError: %v", mapPath, err) } - glog.V(4).Infof("rbd: %q is unmapped, deleting the directory", mapPath) + klog.V(4).Infof("rbd: %q is unmapped, deleting the directory", mapPath) err = os.RemoveAll(mapPath) if err != nil { return fmt.Errorf("rbd: failed to delete the directory: %s\nError: %v", mapPath, err) } - glog.V(4).Infof("rbd: successfully detached disk: %s", mapPath) + klog.V(4).Infof("rbd: successfully detached disk: %s", mapPath) return nil } @@ -1093,7 +1077,7 @@ func getVolumeAccessModes(spec *volume.Spec) ([]v1.PersistentVolumeAccessMode, e func parsePodSecret(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (string, error) { secret, err := volutil.GetSecretForPod(pod, secretName, kubeClient) if err != nil { - glog.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName) + klog.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName) return "", fmt.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName) } return parseSecretMap(secret) @@ -1102,7 +1086,7 @@ func parsePodSecret(pod *v1.Pod, secretName string, kubeClient clientset.Interfa func parsePVSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) { secret, err := volutil.GetSecretForPV(namespace, secretName, rbdPluginName, kubeClient) if err != nil { - glog.Errorf("failed to get secret from [%q/%q]", namespace, secretName) + klog.Errorf("failed to get secret from [%q/%q]", namespace, secretName) return "", fmt.Errorf("failed to get secret from [%q/%q]", namespace, secretName) } return parseSecretMap(secret) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go index c0126c4d60ab..2e3de2264429 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go @@ -32,11 +32,11 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" fileutil "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/node" @@ -82,21 +82,21 @@ func getRbdDevFromImageAndPool(pool string, image string) (string, bool) { poolFile := path.Join(sys_path, name, "pool") poolBytes, err := ioutil.ReadFile(poolFile) if err != nil { - glog.V(4).Infof("error reading %s: %v", poolFile, err) + klog.V(4).Infof("error reading %s: %v", poolFile, err) continue } if strings.TrimSpace(string(poolBytes)) != pool { - glog.V(4).Infof("device %s is not %q: %q", name, pool, string(poolBytes)) + klog.V(4).Infof("device %s is not %q: %q", name, pool, string(poolBytes)) continue } imgFile := path.Join(sys_path, name, "name") imgBytes, err := ioutil.ReadFile(imgFile) if err != nil { - glog.V(4).Infof("error reading %s: %v", imgFile, err) + klog.V(4).Infof("error reading %s: %v", imgFile, err) continue } if strings.TrimSpace(string(imgBytes)) != image { - glog.V(4).Infof("device %s is not %q: %q", name, image, string(imgBytes)) + klog.V(4).Infof("device %s is not %q: %q", name, image, string(imgBytes)) continue } // Found a match, check if device exists. @@ -119,7 +119,7 @@ func getMaxNbds() (int, error) { return 0, fmt.Errorf("rbd-nbd: failed to retrieve max_nbds from %s err: %q", maxNbdsPath, err) } - glog.V(4).Infof("found nbds max parameters file at %s", maxNbdsPath) + klog.V(4).Infof("found nbds max parameters file at %s", maxNbdsPath) maxNbdBytes, err := ioutil.ReadFile(maxNbdsPath) if err != nil { @@ -131,7 +131,7 @@ func getMaxNbds() (int, error) { return 0, fmt.Errorf("rbd-nbd: failed to read max_nbds err: %q", err) } - glog.V(4).Infof("rbd-nbd: max_nbds: %d", maxNbds) + klog.V(4).Infof("rbd-nbd: max_nbds: %d", maxNbds) return maxNbds, nil } @@ -148,7 +148,7 @@ func getNbdDevFromImageAndPool(pool string, image string) (string, bool) { maxNbds, maxNbdsErr := getMaxNbds() if maxNbdsErr != nil { - glog.V(4).Infof("error reading nbds_max %v", maxNbdsErr) + klog.V(4).Infof("error reading nbds_max %v", maxNbdsErr) return "", false } @@ -156,18 +156,18 @@ func getNbdDevFromImageAndPool(pool string, image string) (string, bool) { nbdPath := basePath + strconv.Itoa(i) _, err := os.Lstat(nbdPath) if err != nil { - glog.V(4).Infof("error reading nbd info directory %s: %v", nbdPath, err) + klog.V(4).Infof("error reading nbd info directory %s: %v", nbdPath, err) continue } pidBytes, err := ioutil.ReadFile(path.Join(nbdPath, "pid")) if err != nil { - glog.V(5).Infof("did not find valid pid file in dir %s: %v", nbdPath, err) + klog.V(5).Infof("did not find valid pid file in dir %s: %v", nbdPath, err) continue } cmdlineFileName := path.Join("/proc", strings.TrimSpace(string(pidBytes)), "cmdline") rawCmdline, err := ioutil.ReadFile(cmdlineFileName) if err != nil { - glog.V(4).Infof("failed to read cmdline file %s: %v", cmdlineFileName, err) + klog.V(4).Infof("failed to read cmdline file %s: %v", cmdlineFileName, err) continue } cmdlineArgs := strings.FieldsFunc(string(rawCmdline), func(r rune) bool { @@ -177,17 +177,17 @@ func getNbdDevFromImageAndPool(pool string, image string) (string, bool) { // Only accepted pattern of cmdline is from execRbdMap: // rbd-nbd map pool/image ... if len(cmdlineArgs) < 3 || cmdlineArgs[0] != "rbd-nbd" || cmdlineArgs[1] != "map" { - glog.V(4).Infof("nbd device %s is not used by rbd", nbdPath) + klog.V(4).Infof("nbd device %s is not used by rbd", nbdPath) continue } if cmdlineArgs[2] != imgPath { - glog.V(4).Infof("rbd-nbd device %s did not match expected image path: %s with path found: %s", + klog.V(4).Infof("rbd-nbd device %s did not match expected image path: %s with path found: %s", nbdPath, imgPath, cmdlineArgs[2]) continue } devicePath := path.Join("/dev", "nbd"+strconv.Itoa(i)) if _, err := os.Lstat(devicePath); err != nil { - glog.Warningf("Stat device %s for imgpath %s failed %v", devicePath, imgPath, err) + klog.Warningf("Stat device %s for imgpath %s failed %v", devicePath, imgPath, err) continue } return devicePath, true @@ -233,14 +233,14 @@ func execRbdMap(b rbdMounter, rbdCmd string, mon string) ([]byte, error) { func checkRbdNbdTools(e mount.Exec) bool { _, err := e.Run("modprobe", "nbd") if err != nil { - glog.V(5).Infof("rbd-nbd: nbd modprobe failed with error %v", err) + klog.V(5).Infof("rbd-nbd: nbd modprobe failed with error %v", err) return false } if _, err := e.Run("rbd-nbd", "--version"); err != nil { - glog.V(5).Infof("rbd-nbd: getting rbd-nbd version failed with error %v", err) + klog.V(5).Infof("rbd-nbd: getting rbd-nbd version failed with error %v", err) return false } - glog.V(3).Infof("rbd-nbd tools were found.") + klog.V(3).Infof("rbd-nbd tools were found.") return true } @@ -251,7 +251,7 @@ func makePDNameInternal(host volume.VolumeHost, pool string, image string) strin info, err := os.Stat(deprecatedDir) if err == nil && info.IsDir() { // The device mount path has already been created with the deprecated format, return it. - glog.V(5).Infof("Deprecated format path %s found", deprecatedDir) + klog.V(5).Infof("Deprecated format path %s found", deprecatedDir) return deprecatedDir } // Return the canonical format path. @@ -331,7 +331,7 @@ func (util *RBDUtil) rbdUnlock(b rbdMounter) error { args = append(args, secret_opt...) cmd, err = b.exec.Run("rbd", args...) output = string(cmd) - glog.V(4).Infof("lock list output %q", output) + klog.V(4).Infof("lock list output %q", output) if err != nil { return err } @@ -349,9 +349,9 @@ func (util *RBDUtil) rbdUnlock(b rbdMounter) error { args = append(args, secret_opt...) cmd, err = b.exec.Run("rbd", args...) if err == nil { - glog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon) + klog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon) } else { - glog.Warningf("rbd: failed to remove lock (lock_id: %s) on image: %s/%s with id %s mon %s: %v", lock_id, b.Pool, b.Image, b.Id, mon, err) + klog.Warningf("rbd: failed to remove lock (lock_id: %s) on image: %s/%s with id %s mon %s: %v", lock_id, b.Pool, b.Image, b.Id, mon, err) } } @@ -395,49 +395,48 @@ func (util *RBDUtil) AttachDisk(b rbdMounter) (string, error) { Steps: rbdImageWatcherSteps, } needValidUsed := true - // If accessModes contain ReadOnlyMany, we don't need check rbd status of being used. if b.accessModes != nil { - for _, v := range b.accessModes { - if v != v1.ReadWriteOnce { - needValidUsed = false - break - } + // If accessModes only contains ReadOnlyMany, we don't need check rbd status of being used. + if len(b.accessModes) == 1 && b.accessModes[0] == v1.ReadOnlyMany { + needValidUsed = false } - } else { - // ReadOnly rbd volume should not check rbd status of being used to - // support mounted as read-only by multiple consumers simultaneously. - needValidUsed = !b.rbd.ReadOnly } - err := wait.ExponentialBackoff(backoff, func() (bool, error) { - used, rbdOutput, err := util.rbdStatus(&b) + // If accessModes is nil, the volume is referenced by in-line volume. + // We can assume the AccessModes to be {"RWO" and "ROX"}, which is what the volume plugin supports. + // We do not need to consider ReadOnly here, because it is used for VolumeMounts. + + if needValidUsed { + err := wait.ExponentialBackoff(backoff, func() (bool, error) { + used, rbdOutput, err := util.rbdStatus(&b) + if err != nil { + return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput) + } + return !used, nil + }) + // Return error if rbd image has not become available for the specified timeout. + if err == wait.ErrWaitTimeout { + return "", fmt.Errorf("rbd image %s/%s is still being used", b.Pool, b.Image) + } + // Return error if any other errors were encountered during waiting for the image to become available. if err != nil { - return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput) + return "", err } - return !needValidUsed || !used, nil - }) - // Return error if rbd image has not become available for the specified timeout. - if err == wait.ErrWaitTimeout { - return "", fmt.Errorf("rbd image %s/%s is still being used", b.Pool, b.Image) - } - // Return error if any other errors were encountered during wating for the image to become available. - if err != nil { - return "", err } mon := util.kernelRBDMonitorsOpt(b.Mon) - glog.V(1).Infof("rbd: map mon %s", mon) + klog.V(1).Infof("rbd: map mon %s", mon) - _, err = b.exec.Run("modprobe", "rbd") + _, err := b.exec.Run("modprobe", "rbd") if err != nil { - glog.Warningf("rbd: failed to load rbd kernel module:%v", err) + klog.Warningf("rbd: failed to load rbd kernel module:%v", err) } output, err = execRbdMap(b, "rbd", mon) if err != nil { if !nbdToolsFound { - glog.V(1).Infof("rbd: map error %v, rbd output: %s", err, string(output)) + klog.V(1).Infof("rbd: map error %v, rbd output: %s", err, string(output)) return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output)) } - glog.V(3).Infof("rbd: map failed with %v, %s. Retrying with rbd-nbd", err, string(output)) + klog.V(3).Infof("rbd: map failed with %v, %s. Retrying with rbd-nbd", err, string(output)) errList := []error{err} outputList := output output, err = execRbdMap(b, "rbd-nbd", mon) @@ -482,7 +481,7 @@ func (util *RBDUtil) DetachDisk(plugin *rbdPlugin, deviceMountPath string, devic if err != nil { return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %v", device, err, output)) } - glog.V(3).Infof("rbd: successfully unmap device %s", device) + klog.V(3).Infof("rbd: successfully unmap device %s", device) // Currently, we don't persist rbd info on the disk, but for backward // compatbility, we need to clean it if found. @@ -492,13 +491,13 @@ func (util *RBDUtil) DetachDisk(plugin *rbdPlugin, deviceMountPath string, devic return err } if exists { - glog.V(3).Infof("rbd: old rbd.json is found under %s, cleaning it", deviceMountPath) + klog.V(3).Infof("rbd: old rbd.json is found under %s, cleaning it", deviceMountPath) err = util.cleanOldRBDFile(plugin, rbdFile) if err != nil { - glog.Errorf("rbd: failed to clean %s", rbdFile) + klog.Errorf("rbd: failed to clean %s", rbdFile) return err } - glog.V(3).Infof("rbd: successfully remove %s", rbdFile) + klog.V(3).Infof("rbd: successfully remove %s", rbdFile) } return nil } @@ -509,7 +508,7 @@ func (util *RBDUtil) DetachBlockDisk(disk rbdDiskUnmapper, mapPath string) error if pathExists, pathErr := volutil.PathExists(mapPath); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath) + klog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath) return nil } // If we arrive here, device is no longer used, see if we need to logout of the target @@ -530,10 +529,10 @@ func (util *RBDUtil) DetachBlockDisk(disk rbdDiskUnmapper, mapPath string) error // Any nbd device must be unmapped by rbd-nbd if strings.HasPrefix(device, "/dev/nbd") { rbdCmd = "rbd-nbd" - glog.V(4).Infof("rbd: using rbd-nbd for unmap function") + klog.V(4).Infof("rbd: using rbd-nbd for unmap function") } else { rbdCmd = "rbd" - glog.V(4).Infof("rbd: using rbd for unmap function") + klog.V(4).Infof("rbd: using rbd for unmap function") } // rbd unmap @@ -541,7 +540,7 @@ func (util *RBDUtil) DetachBlockDisk(disk rbdDiskUnmapper, mapPath string) error if err != nil { return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %s", device, err, string(output))) } - glog.V(3).Infof("rbd: successfully unmap device %s", device) + klog.V(3).Infof("rbd: successfully unmap device %s", device) return nil } @@ -565,7 +564,7 @@ func (util *RBDUtil) cleanOldRBDFile(plugin *rbdPlugin, rbdFile string) error { } if err != nil { - glog.Errorf("failed to load rbd info from %s: %v", rbdFile, err) + klog.Errorf("failed to load rbd info from %s: %v", rbdFile, err) return err } // Remove rbd lock if found. @@ -590,9 +589,9 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVo volSz := fmt.Sprintf("%d", sz) mon := util.kernelRBDMonitorsOpt(p.Mon) if p.rbdMounter.imageFormat == rbdImageFormat2 { - glog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, p.rbdMounter.imageFeatures, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) + klog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, p.rbdMounter.imageFeatures, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) } else { - glog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) + klog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) } args := []string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key=" + p.rbdMounter.adminSecret, "--image-format", p.rbdMounter.imageFormat} if p.rbdMounter.imageFormat == rbdImageFormat2 { @@ -604,7 +603,7 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVo output, err = p.exec.Run("rbd", args...) if err != nil { - glog.Warningf("failed to create rbd image, output %v", string(output)) + klog.Warningf("failed to create rbd image, output %v", string(output)) return nil, 0, fmt.Errorf("failed to create rbd image: %v, command output: %s", err, string(output)) } @@ -622,19 +621,19 @@ func (util *RBDUtil) DeleteImage(p *rbdVolumeDeleter) error { return fmt.Errorf("error %v, rbd output: %v", err, rbdOutput) } if found { - glog.Info("rbd is still being used ", p.rbdMounter.Image) + klog.Info("rbd is still being used ", p.rbdMounter.Image) return fmt.Errorf("rbd image %s/%s is still being used, rbd output: %v", p.rbdMounter.Pool, p.rbdMounter.Image, rbdOutput) } // rbd rm. mon := util.kernelRBDMonitorsOpt(p.rbdMounter.Mon) - glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) + klog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) output, err = p.exec.Run("rbd", "rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key="+p.rbdMounter.adminSecret) if err == nil { return nil } - glog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output)) + klog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output)) return fmt.Errorf("error %v, rbd output: %v", err, string(output)) } @@ -659,14 +658,14 @@ func (util *RBDUtil) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resourc // rbd resize. mon := util.kernelRBDMonitorsOpt(rbdExpander.rbdMounter.Mon) - glog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key %s", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminId, rbdExpander.rbdMounter.adminSecret) + klog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key %s", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminId, rbdExpander.rbdMounter.adminSecret) output, err = rbdExpander.exec.Run("rbd", "resize", rbdExpander.rbdMounter.Image, "--size", newVolSz, "--pool", rbdExpander.rbdMounter.Pool, "--id", rbdExpander.rbdMounter.adminId, "-m", mon, "--key="+rbdExpander.rbdMounter.adminSecret) if err == nil { return newSizeQuant, nil } - glog.Errorf("failed to resize rbd image: %v, command output: %s", err, string(output)) + klog.Errorf("failed to resize rbd image: %v, command output: %s", err, string(output)) return oldSize, err } @@ -702,14 +701,14 @@ func (util *RBDUtil) rbdInfo(b *rbdMounter) (int, error) { // # image does not exist (exit=2) // rbd: error opening image 1234: (2) No such file or directory // - glog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) + klog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) cmd, err = b.exec.Run("rbd", "info", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret) output = string(cmd) if err, ok := err.(*exec.Error); ok { if err.Err == exec.ErrNotFound { - glog.Errorf("rbd cmd not found") + klog.Errorf("rbd cmd not found") // fail fast if rbd command is not found. return 0, err } @@ -768,14 +767,14 @@ func (util *RBDUtil) rbdStatus(b *rbdMounter) (bool, string, error) { // # image does not exist (exit=2) // rbd: error opening image kubernetes-dynamic-pvc-: (2) No such file or directory // - glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) + klog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) cmd, err = b.exec.Run("rbd", "status", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret) output = string(cmd) if err, ok := err.(*exec.Error); ok { if err.Err == exec.ErrNotFound { - glog.Errorf("rbd cmd not found") + klog.Errorf("rbd cmd not found") // fail fast if command not found return false, output, err } @@ -787,10 +786,10 @@ func (util *RBDUtil) rbdStatus(b *rbdMounter) (bool, string, error) { } if strings.Contains(output, imageWatcherStr) { - glog.V(4).Infof("rbd: watchers on %s: %s", b.Image, output) + klog.V(4).Infof("rbd: watchers on %s: %s", b.Image, output) return true, output, nil } else { - glog.Warningf("rbd: no watchers on %s", b.Image) + klog.Warningf("rbd: no watchers on %s", b.Image) return false, output, nil } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD index 767ad4a40334..526fcbe582bf 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD @@ -25,7 +25,7 @@ go_test( "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//vendor/github.com/codedellemc/goscaleio/types/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -52,7 +52,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/github.com/codedellemc/goscaleio:go_default_library", "//vendor/github.com/codedellemc/goscaleio/types/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go index d789bfd6cf86..2c7041f478ea 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go @@ -33,7 +33,7 @@ import ( sio "github.com/codedellemc/goscaleio" siotypes "github.com/codedellemc/goscaleio/types/v1" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -74,7 +74,7 @@ type sioClient struct { spClient *sio.StoragePool provisionMode string sdcPath string - sdcGuid string + sdcGUID string instanceID string inited bool diskRegex *regexp.Regexp @@ -97,7 +97,7 @@ func newSioClient(gateway, username, password string, sslEnabled bool, exec moun } r, err := regexp.Compile(`^emc-vol-\w*-\w*$`) if err != nil { - glog.Error(log("failed to compile regex: %v", err)) + klog.Error(log("failed to compile regex: %v", err)) return nil, err } client.diskRegex = r @@ -113,10 +113,10 @@ func (c *sioClient) init() error { if c.inited { return nil } - glog.V(4).Infoln(log("initializing scaleio client")) + klog.V(4).Infoln(log("initializing scaleio client")) client, err := sio.NewClientWithArgs(c.gateway, "", c.insecure, c.certsEnabled) if err != nil { - glog.Error(log("failed to create client: %v", err)) + klog.Error(log("failed to create client: %v", err)) return err } c.client = client @@ -127,24 +127,24 @@ func (c *sioClient) init() error { Username: c.username, Password: c.password}, ); err != nil { - glog.Error(log("client authentication failed: %v", err)) + klog.Error(log("client authentication failed: %v", err)) return err } // retrieve system if c.system, err = c.findSystem(c.sysName); err != nil { - glog.Error(log("unable to find system %s: %v", c.sysName, err)) + klog.Error(log("unable to find system %s: %v", c.sysName, err)) return err } // retrieve protection domain if c.protectionDomain, err = c.findProtectionDomain(c.pdName); err != nil { - glog.Error(log("unable to find protection domain %s: %v", c.protectionDomain, err)) + klog.Error(log("unable to find protection domain %s: %v", c.protectionDomain, err)) return err } // retrieve storage pool if c.storagePool, err = c.findStoragePool(c.spName); err != nil { - glog.Error(log("unable to find storage pool %s: %v", c.storagePool, err)) + klog.Error(log("unable to find storage pool %s: %v", c.storagePool, err)) return err } c.inited = true @@ -157,7 +157,7 @@ func (c *sioClient) Volumes() ([]*siotypes.Volume, error) { } vols, err := c.getVolumes() if err != nil { - glog.Error(log("failed to retrieve volumes: %v", err)) + klog.Error(log("failed to retrieve volumes: %v", err)) return nil, err } return vols, nil @@ -170,12 +170,12 @@ func (c *sioClient) Volume(id sioVolumeID) (*siotypes.Volume, error) { vols, err := c.getVolumesByID(id) if err != nil { - glog.Error(log("failed to retrieve volume by id: %v", err)) + klog.Error(log("failed to retrieve volume by id: %v", err)) return nil, err } vol := vols[0] if vol == nil { - glog.V(4).Info(log("volume not found, id %s", id)) + klog.V(4).Info(log("volume not found, id %s", id)) return nil, errors.New("volume not found") } return vol, nil @@ -186,20 +186,20 @@ func (c *sioClient) FindVolume(name string) (*siotypes.Volume, error) { return nil, err } - glog.V(4).Info(log("searching for volume %s", name)) + klog.V(4).Info(log("searching for volume %s", name)) volumes, err := c.getVolumesByName(name) if err != nil { - glog.Error(log("failed to find volume by name %v", err)) + klog.Error(log("failed to find volume by name %v", err)) return nil, err } for _, volume := range volumes { if volume.Name == name { - glog.V(4).Info(log("found volume %s", name)) + klog.V(4).Info(log("found volume %s", name)) return volume, nil } } - glog.V(4).Info(log("volume not found, name %s", name)) + klog.V(4).Info(log("volume not found, name %s", name)) return nil, errors.New("volume not found") } @@ -215,7 +215,7 @@ func (c *sioClient) CreateVolume(name string, sizeGB int64) (*siotypes.Volume, e } createResponse, err := c.client.CreateVolume(params, c.storagePool.Name) if err != nil { - glog.Error(log("failed to create volume %s: %v", name, err)) + klog.Error(log("failed to create volume %s: %v", name, err)) return nil, err } return c.Volume(sioVolumeID(createResponse.ID)) @@ -225,18 +225,18 @@ func (c *sioClient) CreateVolume(name string, sizeGB int64) (*siotypes.Volume, e // is true, ScaleIO will allow other SDC to map to that volume. func (c *sioClient) AttachVolume(id sioVolumeID, multipleMappings bool) error { if err := c.init(); err != nil { - glog.Error(log("failed to init'd client in attach volume: %v", err)) + klog.Error(log("failed to init'd client in attach volume: %v", err)) return err } iid, err := c.IID() if err != nil { - glog.Error(log("failed to get instanceIID for attach volume: %v", err)) + klog.Error(log("failed to get instanceIID for attach volume: %v", err)) return err } params := &siotypes.MapVolumeSdcParam{ - SdcID: iid, + SdcID: iid, AllowMultipleMappings: strconv.FormatBool(multipleMappings), AllSdcs: "", } @@ -244,11 +244,11 @@ func (c *sioClient) AttachVolume(id sioVolumeID, multipleMappings bool) error { volClient.Volume = &siotypes.Volume{ID: string(id)} if err := volClient.MapVolumeSdc(params); err != nil { - glog.Error(log("failed to attach volume id %s: %v", id, err)) + klog.Error(log("failed to attach volume id %s: %v", id, err)) return err } - glog.V(4).Info(log("volume %s attached successfully", id)) + klog.V(4).Info(log("volume %s attached successfully", id)) return nil } @@ -301,35 +301,35 @@ func (c *sioClient) IID() (string, error) { // if instanceID not set, retrieve it if c.instanceID == "" { - guid, err := c.getGuid() + guid, err := c.getGUID() if err != nil { return "", err } - sdc, err := c.sysClient.FindSdc("SdcGuid", guid) + sdc, err := c.sysClient.FindSdc("SdcGUID", guid) if err != nil { - glog.Error(log("failed to retrieve sdc info %s", err)) + klog.Error(log("failed to retrieve sdc info %s", err)) return "", err } c.instanceID = sdc.Sdc.ID - glog.V(4).Info(log("retrieved instanceID %s", c.instanceID)) + klog.V(4).Info(log("retrieved instanceID %s", c.instanceID)) } return c.instanceID, nil } -// getGuid returns instance GUID, if not set using resource labels +// getGUID returns instance GUID, if not set using resource labels // it attempts to fallback to using drv_cfg binary -func (c *sioClient) getGuid() (string, error) { - if c.sdcGuid == "" { - glog.V(4).Info(log("sdc guid label not set, falling back to using drv_cfg")) +func (c *sioClient) getGUID() (string, error) { + if c.sdcGUID == "" { + klog.V(4).Info(log("sdc guid label not set, falling back to using drv_cfg")) cmd := c.getSdcCmd() output, err := c.exec.Run(cmd, "--query_guid") if err != nil { - glog.Error(log("drv_cfg --query_guid failed: %v", err)) + klog.Error(log("drv_cfg --query_guid failed: %v", err)) return "", err } - c.sdcGuid = strings.TrimSpace(string(output)) + c.sdcGUID = strings.TrimSpace(string(output)) } - return c.sdcGuid, nil + return c.sdcGUID, nil } // getSioDiskPaths traverse local disk devices to retrieve device path @@ -342,10 +342,10 @@ func (c *sioClient) getSioDiskPaths() ([]os.FileInfo, error) { if os.IsNotExist(err) { // sioDiskIDPath may not exist yet which is fine return []os.FileInfo{}, nil - } else { - glog.Error(log("failed to ReadDir %s: %v", sioDiskIDPath, err)) - return nil, err } + klog.Error(log("failed to ReadDir %s: %v", sioDiskIDPath, err)) + return nil, err + } result := []os.FileInfo{} for _, file := range files { @@ -360,13 +360,13 @@ func (c *sioClient) getSioDiskPaths() ([]os.FileInfo, error) { // GetVolumeRefs counts the number of references an SIO volume has a disk device. // This is useful in preventing premature detach. -func (c *sioClient) GetVolumeRefs(volId sioVolumeID) (refs int, err error) { +func (c *sioClient) GetVolumeRefs(volID sioVolumeID) (refs int, err error) { files, err := c.getSioDiskPaths() if err != nil { return 0, err } for _, file := range files { - if strings.Contains(file.Name(), string(volId)) { + if strings.Contains(file.Name(), string(volID)) { refs++ } } @@ -391,7 +391,7 @@ func (c *sioClient) Devs() (map[string]string, error) { volumeID := parts[3] devPath, err := filepath.EvalSymlinks(fmt.Sprintf("%s/%s", sioDiskIDPath, f.Name())) if err != nil { - glog.Error(log("devicepath-to-volID mapping error: %v", err)) + klog.Error(log("devicepath-to-volID mapping error: %v", err)) return nil, err } // map volumeID to devicePath @@ -417,18 +417,18 @@ func (c *sioClient) WaitForAttachedDevice(token string) (string, error) { case <-ticker.C: devMap, err := c.Devs() if err != nil { - glog.Error(log("failed while waiting for volume to attach: %v", err)) + klog.Error(log("failed while waiting for volume to attach: %v", err)) return "", err } go func() { - glog.V(4).Info(log("waiting for volume %s to be mapped/attached", token)) + klog.V(4).Info(log("waiting for volume %s to be mapped/attached", token)) }() if path, ok := devMap[token]; ok { - glog.V(4).Info(log("device %s mapped to vol %s", path, token)) + klog.V(4).Info(log("device %s mapped to vol %s", path, token)) return path, nil } case <-timer.C: - glog.Error(log("timed out while waiting for volume to be mapped to a device")) + klog.Error(log("timed out while waiting for volume to be mapped to a device")) return "", fmt.Errorf("volume attach timeout") } } @@ -451,18 +451,18 @@ func (c *sioClient) WaitForDetachedDevice(token string) error { case <-ticker.C: devMap, err := c.Devs() if err != nil { - glog.Error(log("failed while waiting for volume to unmap/detach: %v", err)) + klog.Error(log("failed while waiting for volume to unmap/detach: %v", err)) return err } go func() { - glog.V(4).Info(log("waiting for volume %s to be unmapped/detached", token)) + klog.V(4).Info(log("waiting for volume %s to be unmapped/detached", token)) }() // cant find vol id, then ok. if _, ok := devMap[token]; !ok { return nil } case <-timer.C: - glog.Error(log("timed out while waiting for volume %s to be unmapped/detached", token)) + klog.Error(log("timed out while waiting for volume %s to be unmapped/detached", token)) return fmt.Errorf("volume detach timeout") } } @@ -477,7 +477,7 @@ func (c *sioClient) findSystem(sysname string) (sys *siotypes.System, err error) } systems, err := c.client.GetInstance("") if err != nil { - glog.Error(log("failed to retrieve instances: %v", err)) + klog.Error(log("failed to retrieve instances: %v", err)) return nil, err } for _, sys = range systems { @@ -485,7 +485,7 @@ func (c *sioClient) findSystem(sysname string) (sys *siotypes.System, err error) return sys, nil } } - glog.Error(log("system %s not found", sysname)) + klog.Error(log("system %s not found", sysname)) return nil, errors.New("system not found") } @@ -494,13 +494,13 @@ func (c *sioClient) findProtectionDomain(pdname string) (*siotypes.ProtectionDom if c.sysClient != nil { protectionDomain, err := c.sysClient.FindProtectionDomain("", pdname, "") if err != nil { - glog.Error(log("failed to retrieve protection domains: %v", err)) + klog.Error(log("failed to retrieve protection domains: %v", err)) return nil, err } c.pdClient.ProtectionDomain = protectionDomain return protectionDomain, nil } - glog.Error(log("protection domain %s not set", pdname)) + klog.Error(log("protection domain %s not set", pdname)) return nil, errors.New("protection domain not set") } @@ -509,13 +509,13 @@ func (c *sioClient) findStoragePool(spname string) (*siotypes.StoragePool, error if c.pdClient != nil { sp, err := c.pdClient.FindStoragePool("", spname, "") if err != nil { - glog.Error(log("failed to retrieve storage pool: %v", err)) + klog.Error(log("failed to retrieve storage pool: %v", err)) return nil, err } c.spClient.StoragePool = sp return sp, nil } - glog.Error(log("storage pool %s not set", spname)) + klog.Error(log("storage pool %s not set", spname)) return nil, errors.New("storage pool not set") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr.go index 711fa7fce5d5..a322276b1dda 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr.go @@ -22,7 +22,7 @@ import ( "k8s.io/kubernetes/pkg/util/mount" - "github.com/golang/glog" + "k8s.io/klog" siotypes "github.com/codedellemc/goscaleio/types/v1" ) @@ -57,22 +57,22 @@ func newSioMgr(configs map[string]string, exec mount.Exec) (*sioMgr, error) { // getClient safely returns an sioInterface func (m *sioMgr) getClient() (sioInterface, error) { if m.client == nil { - glog.V(4).Info(log("creating scaleio client")) + klog.V(4).Info(log("creating scaleio client")) configs := m.configData username := configs[confKey.username] password := configs[confKey.password] gateway := configs[confKey.gateway] b, err := strconv.ParseBool(configs[confKey.sslEnabled]) if err != nil { - glog.Error(log("failed to parse sslEnabled, must be either \"true\" or \"false\"")) + klog.Error(log("failed to parse sslEnabled, must be either \"true\" or \"false\"")) return nil, err } certsEnabled := b - glog.V(4).Info(log("creating new client for gateway %s", gateway)) + klog.V(4).Info(log("creating new client for gateway %s", gateway)) client, err := newSioClient(gateway, username, password, certsEnabled, m.exec) if err != nil { - glog.Error(log("failed to create scaleio client: %v", err)) + klog.Error(log("failed to create scaleio client: %v", err)) return nil, err } @@ -81,11 +81,11 @@ func (m *sioMgr) getClient() (sioInterface, error) { client.spName = configs[confKey.storagePool] client.sdcPath = configs[confKey.sdcRootPath] client.provisionMode = configs[confKey.storageMode] - client.sdcGuid = configs[confKey.sdcGuid] + client.sdcGUID = configs[confKey.sdcGUID] m.client = client - glog.V(4).Info(log("client created successfully [gateway=%s]", gateway)) + klog.V(4).Info(log("client created successfully [gateway=%s]", gateway)) } return m.client, nil } @@ -97,13 +97,13 @@ func (m *sioMgr) CreateVolume(volName string, sizeGB int64) (*siotypes.Volume, e return nil, err } - glog.V(4).Infof("scaleio: creating volume %s", volName) + klog.V(4).Infof("scaleio: creating volume %s", volName) vol, err := client.CreateVolume(volName, sizeGB) if err != nil { - glog.V(4).Infof("scaleio: failed creating volume %s: %v", volName, err) + klog.V(4).Infof("scaleio: failed creating volume %s: %v", volName, err) return nil, err } - glog.V(4).Infof("scaleio: created volume %s successfully", volName) + klog.V(4).Infof("scaleio: created volume %s successfully", volName) return vol, nil } @@ -112,17 +112,17 @@ func (m *sioMgr) CreateVolume(volName string, sizeGB int64) (*siotypes.Volume, e func (m *sioMgr) AttachVolume(volName string, multipleMappings bool) (string, error) { client, err := m.getClient() if err != nil { - glog.Error(log("attach volume failed: %v", err)) + klog.Error(log("attach volume failed: %v", err)) return "", err } - glog.V(4).Infoln(log("attaching volume %s", volName)) + klog.V(4).Infoln(log("attaching volume %s", volName)) iid, err := client.IID() if err != nil { - glog.Error(log("failed to get instanceID")) + klog.Error(log("failed to get instanceID")) return "", err } - glog.V(4).Info(log("attaching volume %s to host instance %s", volName, iid)) + klog.V(4).Info(log("attaching volume %s to host instance %s", volName, iid)) devs, err := client.Devs() if err != nil { @@ -131,29 +131,29 @@ func (m *sioMgr) AttachVolume(volName string, multipleMappings bool) (string, er vol, err := client.FindVolume(volName) if err != nil { - glog.Error(log("failed to find volume %s: %v", volName, err)) + klog.Error(log("failed to find volume %s: %v", volName, err)) return "", err } // handle vol if already attached if len(vol.MappedSdcInfo) > 0 { if m.isSdcMappedToVol(iid, vol) { - glog.V(4).Info(log("skippping attachment, volume %s already attached to sdc %s", volName, iid)) + klog.V(4).Info(log("skippping attachment, volume %s already attached to sdc %s", volName, iid)) return devs[vol.ID], nil } } // attach volume, get deviceName if err := client.AttachVolume(sioVolumeID(vol.ID), multipleMappings); err != nil { - glog.Error(log("attachment for volume %s failed :%v", volName, err)) + klog.Error(log("attachment for volume %s failed :%v", volName, err)) return "", err } device, err := client.WaitForAttachedDevice(vol.ID) if err != nil { - glog.Error(log("failed while waiting for device to attach: %v", err)) + klog.Error(log("failed while waiting for device to attach: %v", err)) return "", err } - glog.V(4).Info(log("volume %s attached successfully as %s to instance %s", volName, device, iid)) + klog.V(4).Info(log("volume %s attached successfully as %s to instance %s", volName, device, iid)) return device, nil } @@ -165,7 +165,7 @@ func (m *sioMgr) IsAttached(volName string) (bool, error) { } iid, err := client.IID() if err != nil { - glog.Error("scaleio: failed to get instanceID") + klog.Error("scaleio: failed to get instanceID") return false, err } @@ -184,7 +184,7 @@ func (m *sioMgr) DetachVolume(volName string) error { } iid, err := client.IID() if err != nil { - glog.Error(log("failed to get instanceID: %v", err)) + klog.Error(log("failed to get instanceID: %v", err)) return err } @@ -193,7 +193,7 @@ func (m *sioMgr) DetachVolume(volName string) error { return err } if !m.isSdcMappedToVol(iid, vol) { - glog.Warning(log( + klog.Warning(log( "skipping detached, vol %s not attached to instance %s", volName, iid, )) @@ -201,11 +201,11 @@ func (m *sioMgr) DetachVolume(volName string) error { } if err := client.DetachVolume(sioVolumeID(vol.ID)); err != nil { - glog.Error(log("failed to detach vol %s: %v", volName, err)) + klog.Error(log("failed to detach vol %s: %v", volName, err)) return err } - glog.V(4).Info(log("volume %s detached successfully", volName)) + klog.V(4).Info(log("volume %s detached successfully", volName)) return nil } @@ -223,11 +223,11 @@ func (m *sioMgr) DeleteVolume(volName string) error { } if err := client.DeleteVolume(sioVolumeID(vol.ID)); err != nil { - glog.Error(log("failed to delete volume %s: %v", volName, err)) + klog.Error(log("failed to delete volume %s: %v", volName, err)) return err } - glog.V(4).Info(log("deleted volume %s successfully", volName)) + klog.V(4).Info(log("deleted volume %s successfully", volName)) return nil } @@ -235,7 +235,7 @@ func (m *sioMgr) DeleteVolume(volName string) error { // isSdcMappedToVol returns true if the sdc is mapped to the volume func (m *sioMgr) isSdcMappedToVol(sdcID string, vol *siotypes.Volume) bool { if len(vol.MappedSdcInfo) == 0 { - glog.V(4).Info(log("no attachment found")) + klog.V(4).Info(log("no attachment found")) return false } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go index 367c9b9638da..eda8556f7a9d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go @@ -19,9 +19,9 @@ package scaleio import ( "errors" - "github.com/golang/glog" api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/volume" ) @@ -36,6 +36,7 @@ type sioPlugin struct { volumeMtx keymutex.KeyMutex } +// ProbeVolumePlugins is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { p := &sioPlugin{ host: nil, @@ -107,7 +108,7 @@ func (p *sioPlugin) NewMounter( // NewUnmounter creates a representation of the volume to unmount func (p *sioPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) { - glog.V(4).Info(log("Unmounter for %s", specName)) + klog.V(4).Info(log("Unmounter for %s", specName)) return &sioVolume{ podUID: podUID, @@ -160,7 +161,7 @@ var _ volume.DeletableVolumePlugin = &sioPlugin{} func (p *sioPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { attribs, err := getVolumeSourceAttribs(spec) if err != nil { - glog.Error(log("deleter failed to extract volume attributes from spec: %v", err)) + klog.Error(log("deleter failed to extract volume attributes from spec: %v", err)) return nil, err } @@ -186,11 +187,11 @@ func (p *sioPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { var _ volume.ProvisionableVolumePlugin = &sioPlugin{} func (p *sioPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { - glog.V(4).Info(log("creating Provisioner")) + klog.V(4).Info(log("creating Provisioner")) configData := options.Parameters if configData == nil { - glog.Error(log("provisioner missing parameters, unable to continue")) + klog.Error(log("provisioner missing parameters, unable to continue")) return nil, errors.New("option parameters missing") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util.go index de0c58116657..e1b5116318fd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util.go @@ -24,7 +24,7 @@ import ( "path" "strconv" - "github.com/golang/glog" + "k8s.io/klog" api "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/volume" @@ -54,7 +54,7 @@ var ( username, password, secretNamespace, - sdcGuid string + sdcGUID string }{ gateway: "gateway", sslEnabled: "sslEnabled", @@ -71,9 +71,9 @@ var ( readOnly: "readOnly", username: "username", password: "password", - sdcGuid: "sdcGuid", + sdcGUID: "sdcGUID", } - sdcGuidLabelName = "scaleio.sdcGuid" + sdcGUIDLabelName = "scaleio.sdcGUID" sdcRootPath = "/opt/emc/scaleio/sdc/bin" secretNotFoundErr = errors.New("secret not found") @@ -140,7 +140,7 @@ func validateConfigs(config map[string]string) error { func applyConfigDefaults(config map[string]string) { b, err := strconv.ParseBool(config[confKey.sslEnabled]) if err != nil { - glog.Warning(log("failed to parse param sslEnabled, setting it to false")) + klog.Warning(log("failed to parse param sslEnabled, setting it to false")) b = false } config[confKey.sslEnabled] = strconv.FormatBool(b) @@ -148,7 +148,7 @@ func applyConfigDefaults(config map[string]string) { config[confKey.fsType] = defaultString(config[confKey.fsType], "xfs") b, err = strconv.ParseBool(config[confKey.readOnly]) if err != nil { - glog.Warning(log("failed to parse param readOnly, setting it to false")) + klog.Warning(log("failed to parse param readOnly, setting it to false")) b = false } config[confKey.readOnly] = strconv.FormatBool(b) @@ -163,21 +163,21 @@ func defaultString(val, defVal string) string { // loadConfig loads configuration data from a file on disk func loadConfig(configName string) (map[string]string, error) { - glog.V(4).Info(log("loading config file %s", configName)) + klog.V(4).Info(log("loading config file %s", configName)) file, err := os.Open(configName) if err != nil { - glog.Error(log("failed to open config file %s: %v", configName, err)) + klog.Error(log("failed to open config file %s: %v", configName, err)) return nil, err } defer file.Close() data := map[string]string{} if err := gob.NewDecoder(file).Decode(&data); err != nil { - glog.Error(log("failed to parse config data %s: %v", configName, err)) + klog.Error(log("failed to parse config data %s: %v", configName, err)) return nil, err } applyConfigDefaults(data) if err := validateConfigs(data); err != nil { - glog.Error(log("failed to load ConfigMap %s: %v", err)) + klog.Error(log("failed to load ConfigMap %s: %v", err)) return nil, err } @@ -186,31 +186,31 @@ func loadConfig(configName string) (map[string]string, error) { // saveConfig saves the configuration data to local disk func saveConfig(configName string, data map[string]string) error { - glog.V(4).Info(log("saving config file %s", configName)) + klog.V(4).Info(log("saving config file %s", configName)) dir := path.Dir(configName) if _, err := os.Stat(dir); err != nil { if !os.IsNotExist(err) { return err } - glog.V(4).Info(log("creating config dir for config data: %s", dir)) + klog.V(4).Info(log("creating config dir for config data: %s", dir)) if err := os.MkdirAll(dir, 0750); err != nil { - glog.Error(log("failed to create config data dir %v", err)) + klog.Error(log("failed to create config data dir %v", err)) return err } } file, err := os.Create(configName) if err != nil { - glog.V(4).Info(log("failed to save config data file %s: %v", configName, err)) + klog.V(4).Info(log("failed to save config data file %s: %v", configName, err)) return err } defer file.Close() if err := gob.NewEncoder(file).Encode(data); err != nil { - glog.Error(log("failed to save config %s: %v", configName, err)) + klog.Error(log("failed to save config %s: %v", configName, err)) return err } - glog.V(4).Info(log("config data file saved successfully as %s", configName)) + klog.V(4).Info(log("config data file saved successfully as %s", configName)) return nil } @@ -221,7 +221,7 @@ func attachSecret(plug *sioPlugin, namespace string, configData map[string]strin kubeClient := plug.host.GetKubeClient() secretMap, err := volutil.GetSecretForPV(namespace, secretRefName, sioPluginName, kubeClient) if err != nil { - glog.Error(log("failed to get secret: %v", err)) + klog.Error(log("failed to get secret: %v", err)) return secretNotFoundErr } // merge secret data @@ -232,30 +232,30 @@ func attachSecret(plug *sioPlugin, namespace string, configData map[string]strin return nil } -// attachSdcGuid injects the sdc guid node label value into config -func attachSdcGuid(plug *sioPlugin, conf map[string]string) error { - guid, err := getSdcGuidLabel(plug) +// attachSdcGUID injects the sdc guid node label value into config +func attachSdcGUID(plug *sioPlugin, conf map[string]string) error { + guid, err := getSdcGUIDLabel(plug) if err != nil { return err } - conf[confKey.sdcGuid] = guid + conf[confKey.sdcGUID] = guid return nil } -// getSdcGuidLabel fetches the scaleio.sdcGuid node label +// getSdcGUIDLabel fetches the scaleio.sdcGuid node label // associated with the node executing this code. -func getSdcGuidLabel(plug *sioPlugin) (string, error) { +func getSdcGUIDLabel(plug *sioPlugin) (string, error) { nodeLabels, err := plug.host.GetNodeLabels() if err != nil { return "", err } - label, ok := nodeLabels[sdcGuidLabelName] + label, ok := nodeLabels[sdcGUIDLabelName] if !ok { - glog.V(4).Info(log("node label %s not found", sdcGuidLabelName)) + klog.V(4).Info(log("node label %s not found", sdcGUIDLabelName)) return "", nil } - glog.V(4).Info(log("found node label %s=%s", sdcGuidLabelName, label)) + klog.V(4).Info(log("found node label %s=%s", sdcGUIDLabelName, label)) return label, nil } @@ -284,7 +284,7 @@ func getVolumeSourceAttribs(spec *volume.Spec) (*volSourceAttribs, error) { attribs.readOnly = pSource.ReadOnly } else { msg := log("failed to get ScaleIOVolumeSource or ScaleIOPersistentVolumeSource from spec") - glog.Error(msg) + klog.Error(msg) return nil, errors.New(msg) } return attribs, nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go index 0f10dfa1e835..4a5cb35221eb 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go @@ -23,12 +23,12 @@ import ( "strconv" "strings" - "github.com/golang/glog" api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -85,20 +85,20 @@ func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error { v.plugin.volumeMtx.LockKey(v.volSpecName) defer v.plugin.volumeMtx.UnlockKey(v.volSpecName) - glog.V(4).Info(log("setting up volume for PV.spec %s", v.volSpecName)) + klog.V(4).Info(log("setting up volume for PV.spec %s", v.volSpecName)) if err := v.setSioMgr(); err != nil { - glog.Error(log("setup failed to create scalio manager: %v", err)) + klog.Error(log("setup failed to create scalio manager: %v", err)) return err } mounter := v.plugin.host.GetMounter(v.plugin.GetPluginName()) notDevMnt, err := mounter.IsLikelyNotMountPoint(dir) if err != nil && !os.IsNotExist(err) { - glog.Error(log("IsLikelyNotMountPoint test failed for dir %v", dir)) + klog.Error(log("IsLikelyNotMountPoint test failed for dir %v", dir)) return err } if !notDevMnt { - glog.V(4).Info(log("skipping setup, dir %s already a mount point", v.volName)) + klog.V(4).Info(log("skipping setup, dir %s already a mount point", v.volName)) return nil } @@ -114,12 +114,12 @@ func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error { } } } - glog.V(4).Info(log("multiple mapping enabled = %v", enableMultiMaps)) + klog.V(4).Info(log("multiple mapping enabled = %v", enableMultiMaps)) volName := v.volName devicePath, err := v.sioMgr.AttachVolume(volName, enableMultiMaps) if err != nil { - glog.Error(log("setup of volume %v: %v", v.volSpecName, err)) + klog.Error(log("setup of volume %v: %v", v.volSpecName, err)) return err } options := []string{} @@ -134,31 +134,31 @@ func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error { options = append(options, "ro") } - glog.V(4).Info(log("mounting device %s -> %s", devicePath, dir)) + klog.V(4).Info(log("mounting device %s -> %s", devicePath, dir)) if err := os.MkdirAll(dir, 0750); err != nil { - glog.Error(log("failed to create dir %#v: %v", dir, err)) + klog.Error(log("failed to create dir %#v: %v", dir, err)) return err } - glog.V(4).Info(log("setup created mount point directory %s", dir)) + klog.V(4).Info(log("setup created mount point directory %s", dir)) diskMounter := util.NewSafeFormatAndMountFromHost(v.plugin.GetPluginName(), v.plugin.host) err = diskMounter.FormatAndMount(devicePath, dir, v.fsType, options) if err != nil { - glog.Error(log("mount operation failed during setup: %v", err)) + klog.Error(log("mount operation failed during setup: %v", err)) if err := os.Remove(dir); err != nil && !os.IsNotExist(err) { - glog.Error(log("failed to remove dir %s during a failed mount at setup: %v", dir, err)) + klog.Error(log("failed to remove dir %s during a failed mount at setup: %v", dir, err)) return err } return err } if !v.readOnly && fsGroup != nil { - glog.V(4).Info(log("applying value FSGroup ownership")) + klog.V(4).Info(log("applying value FSGroup ownership")) volume.SetVolumeOwnership(v, fsGroup) } - glog.V(4).Info(log("successfully setup PV %s: volume %s mapped as %s mounted at %s", v.volSpecName, v.volName, devicePath, dir)) + klog.V(4).Info(log("successfully setup PV %s: volume %s mapped as %s mounted at %s", v.volSpecName, v.volName, devicePath, dir)) return nil } @@ -188,21 +188,21 @@ func (v *sioVolume) TearDownAt(dir string) error { mounter := v.plugin.host.GetMounter(v.plugin.GetPluginName()) dev, _, err := mount.GetDeviceNameFromMount(mounter, dir) if err != nil { - glog.Errorf(log("failed to get reference count for volume: %s", dir)) + klog.Errorf(log("failed to get reference count for volume: %s", dir)) return err } - glog.V(4).Info(log("attempting to unmount %s", dir)) + klog.V(4).Info(log("attempting to unmount %s", dir)) if err := util.UnmountPath(dir, mounter); err != nil { - glog.Error(log("teardown failed while unmounting dir %s: %v ", dir, err)) + klog.Error(log("teardown failed while unmounting dir %s: %v ", dir, err)) return err } - glog.V(4).Info(log("dir %s unmounted successfully", dir)) + klog.V(4).Info(log("dir %s unmounted successfully", dir)) // detach/unmap deviceBusy, err := mounter.DeviceOpened(dev) if err != nil { - glog.Error(log("teardown unable to get status for device %s: %v", dev, err)) + klog.Error(log("teardown unable to get status for device %s: %v", dev, err)) return err } @@ -210,16 +210,16 @@ func (v *sioVolume) TearDownAt(dir string) error { // use "last attempt wins" strategy to detach volume from node // only allow volume to detach when it is not busy (not being used by other pods) if !deviceBusy { - glog.V(4).Info(log("teardown is attempting to detach/unmap volume for PV %s", v.volSpecName)) + klog.V(4).Info(log("teardown is attempting to detach/unmap volume for PV %s", v.volSpecName)) if err := v.resetSioMgr(); err != nil { - glog.Error(log("teardown failed, unable to reset scalio mgr: %v", err)) + klog.Error(log("teardown failed, unable to reset scalio mgr: %v", err)) } volName := v.volName if err := v.sioMgr.DetachVolume(volName); err != nil { - glog.Warning(log("warning: detaching failed for volume %s: %v", volName, err)) + klog.Warning(log("warning: detaching failed for volume %s: %v", volName, err)) return nil } - glog.V(4).Infof(log("teardown of volume %v detached successfully", volName)) + klog.V(4).Infof(log("teardown of volume %v detached successfully", volName)) } return nil } @@ -230,20 +230,20 @@ func (v *sioVolume) TearDownAt(dir string) error { var _ volume.Deleter = &sioVolume{} func (v *sioVolume) Delete() error { - glog.V(4).Info(log("deleting pvc %s", v.volSpecName)) + klog.V(4).Info(log("deleting pvc %s", v.volSpecName)) if err := v.setSioMgrFromSpec(); err != nil { - glog.Error(log("delete failed while setting sio manager: %v", err)) + klog.Error(log("delete failed while setting sio manager: %v", err)) return err } err := v.sioMgr.DeleteVolume(v.volName) if err != nil { - glog.Error(log("failed to delete volume %s: %v", v.volName, err)) + klog.Error(log("failed to delete volume %s: %v", v.volName, err)) return err } - glog.V(4).Info(log("successfully deleted PV %s with volume %s", v.volSpecName, v.volName)) + klog.V(4).Info(log("successfully deleted PV %s with volume %s", v.volSpecName, v.volName)) return nil } @@ -253,7 +253,7 @@ func (v *sioVolume) Delete() error { var _ volume.Provisioner = &sioVolume{} func (v *sioVolume) Provision(selectedNode *api.Node, allowedTopologies []api.TopologySelectorTerm) (*api.PersistentVolume, error) { - glog.V(4).Info(log("attempting to dynamically provision pvc %v", v.options.PVC.Name)) + klog.V(4).Info(log("attempting to dynamically provision pvc %v", v.options.PVC.Name)) if !util.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes()) @@ -266,7 +266,7 @@ func (v *sioVolume) Provision(selectedNode *api.Node, allowedTopologies []api.To // setup volume attrributes genName := v.generateName("k8svol", 11) var oneGig int64 = 1024 * 1024 * 1024 - var eightGig int64 = 8 * oneGig + eightGig := 8 * oneGig capacity := v.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] volSizeBytes := capacity.Value() @@ -278,13 +278,13 @@ func (v *sioVolume) Provision(selectedNode *api.Node, allowedTopologies []api.To if volSizeBytes < eightGig { volSizeGB = int64(util.RoundUpSize(eightGig, oneGig)) - glog.V(4).Info(log("capacity less than 8Gi found, adjusted to %dGi", volSizeGB)) + klog.V(4).Info(log("capacity less than 8Gi found, adjusted to %dGi", volSizeGB)) } // create sio manager if err := v.setSioMgrFromConfig(); err != nil { - glog.Error(log("provision failed while setting up sio mgr: %v", err)) + klog.Error(log("provision failed while setting up sio mgr: %v", err)) return nil, err } @@ -292,7 +292,7 @@ func (v *sioVolume) Provision(selectedNode *api.Node, allowedTopologies []api.To volName := genName vol, err := v.sioMgr.CreateVolume(volName, volSizeGB) if err != nil { - glog.Error(log("provision failed while creating volume: %v", err)) + klog.Error(log("provision failed while creating volume: %v", err)) return nil, err } @@ -300,12 +300,12 @@ func (v *sioVolume) Provision(selectedNode *api.Node, allowedTopologies []api.To v.configData[confKey.volumeName] = volName sslEnabled, err := strconv.ParseBool(v.configData[confKey.sslEnabled]) if err != nil { - glog.Warning(log("failed to parse parameter sslEnabled, setting to false")) + klog.Warning(log("failed to parse parameter sslEnabled, setting to false")) sslEnabled = false } readOnly, err := strconv.ParseBool(v.configData[confKey.readOnly]) if err != nil { - glog.Warning(log("failed to parse parameter readOnly, setting it to false")) + klog.Warning(log("failed to parse parameter readOnly, setting it to false")) readOnly = false } @@ -348,24 +348,24 @@ func (v *sioVolume) Provision(selectedNode *api.Node, allowedTopologies []api.To pv.Spec.AccessModes = v.plugin.GetAccessModes() } - glog.V(4).Info(log("provisioner created pv %v and volume %s successfully", pvName, vol.Name)) + klog.V(4).Info(log("provisioner created pv %v and volume %s successfully", pvName, vol.Name)) return pv, nil } // setSioMgr creates scaleio mgr from cached config data if found // otherwise, setups new config data and create mgr func (v *sioVolume) setSioMgr() error { - glog.V(4).Info(log("setting up sio mgr for spec %s", v.volSpecName)) + klog.V(4).Info(log("setting up sio mgr for spec %s", v.volSpecName)) podDir := v.plugin.host.GetPodPluginDir(v.podUID, sioPluginName) configName := path.Join(podDir, sioConfigFileName) if v.sioMgr == nil { configData, err := loadConfig(configName) // try to load config if exist if err != nil { if !os.IsNotExist(err) { - glog.Error(log("failed to load config %s : %v", configName, err)) + klog.Error(log("failed to load config %s : %v", configName, err)) return err } - glog.V(4).Info(log("previous config file not found, creating new one")) + klog.V(4).Info(log("previous config file not found, creating new one")) // prepare config data configData = make(map[string]string) mapVolumeSpec(configData, v.spec) @@ -376,31 +376,31 @@ func (v *sioVolume) setSioMgr() error { configData[confKey.volSpecName] = v.volSpecName if err := validateConfigs(configData); err != nil { - glog.Error(log("config setup failed: %s", err)) + klog.Error(log("config setup failed: %s", err)) return err } // persist config if err := saveConfig(configName, configData); err != nil { - glog.Error(log("failed to save config data: %v", err)) + klog.Error(log("failed to save config data: %v", err)) return err } } // merge in secret if err := attachSecret(v.plugin, v.secretNamespace, configData); err != nil { - glog.Error(log("failed to load secret: %v", err)) + klog.Error(log("failed to load secret: %v", err)) return err } // merge in Sdc Guid label value - if err := attachSdcGuid(v.plugin, configData); err != nil { - glog.Error(log("failed to retrieve sdc guid: %v", err)) + if err := attachSdcGUID(v.plugin, configData); err != nil { + klog.Error(log("failed to retrieve sdc guid: %v", err)) return err } mgr, err := newSioMgr(configData, v.plugin.host.GetExec(v.plugin.GetPluginName())) if err != nil { - glog.Error(log("failed to reset sio manager: %v", err)) + klog.Error(log("failed to reset sio manager: %v", err)) return err } @@ -417,7 +417,7 @@ func (v *sioVolume) resetSioMgr() error { // load config data from disk configData, err := loadConfig(configName) if err != nil { - glog.Error(log("failed to load config data: %v", err)) + klog.Error(log("failed to load config data: %v", err)) return err } v.secretName = configData[confKey.secretName] @@ -427,20 +427,20 @@ func (v *sioVolume) resetSioMgr() error { // attach secret if err := attachSecret(v.plugin, v.secretNamespace, configData); err != nil { - glog.Error(log("failed to load secret: %v", err)) + klog.Error(log("failed to load secret: %v", err)) return err } // merge in Sdc Guid label value - if err := attachSdcGuid(v.plugin, configData); err != nil { - glog.Error(log("failed to retrieve sdc guid: %v", err)) + if err := attachSdcGUID(v.plugin, configData); err != nil { + klog.Error(log("failed to retrieve sdc guid: %v", err)) return err } mgr, err := newSioMgr(configData, v.plugin.host.GetExec(v.plugin.GetPluginName())) if err != nil { - glog.Error(log("failed to reset scaleio mgr: %v", err)) + klog.Error(log("failed to reset scaleio mgr: %v", err)) return err } v.sioMgr = mgr @@ -451,14 +451,14 @@ func (v *sioVolume) resetSioMgr() error { // setSioFromConfig sets up scaleio mgr from an available config data map // designed to be called from dynamic provisioner func (v *sioVolume) setSioMgrFromConfig() error { - glog.V(4).Info(log("setting scaleio mgr from available config")) + klog.V(4).Info(log("setting scaleio mgr from available config")) if v.sioMgr == nil { applyConfigDefaults(v.configData) v.configData[confKey.volSpecName] = v.volSpecName if err := validateConfigs(v.configData); err != nil { - glog.Error(log("config data setup failed: %s", err)) + klog.Error(log("config data setup failed: %s", err)) return err } @@ -469,14 +469,14 @@ func (v *sioVolume) setSioMgrFromConfig() error { } if err := attachSecret(v.plugin, v.secretNamespace, data); err != nil { - glog.Error(log("failed to load secret: %v", err)) + klog.Error(log("failed to load secret: %v", err)) return err } mgr, err := newSioMgr(data, v.plugin.host.GetExec(v.plugin.GetPluginName())) if err != nil { - glog.Error(log("failed while setting scaleio mgr from config: %v", err)) + klog.Error(log("failed while setting scaleio mgr from config: %v", err)) return err } v.sioMgr = mgr @@ -487,7 +487,7 @@ func (v *sioVolume) setSioMgrFromConfig() error { // setSioMgrFromSpec sets the scaleio manager from a spec object. // The spec may be complete or incomplete depending on lifecycle phase. func (v *sioVolume) setSioMgrFromSpec() error { - glog.V(4).Info(log("setting sio manager from spec")) + klog.V(4).Info(log("setting sio manager from spec")) if v.sioMgr == nil { // get config data form spec volume source configData := map[string]string{} @@ -499,20 +499,20 @@ func (v *sioVolume) setSioMgrFromSpec() error { configData[confKey.volSpecName] = v.volSpecName if err := validateConfigs(configData); err != nil { - glog.Error(log("config setup failed: %s", err)) + klog.Error(log("config setup failed: %s", err)) return err } // attach secret object to config data if err := attachSecret(v.plugin, v.secretNamespace, configData); err != nil { - glog.Error(log("failed to load secret: %v", err)) + klog.Error(log("failed to load secret: %v", err)) return err } mgr, err := newSioMgr(configData, v.plugin.host.GetExec(v.plugin.GetPluginName())) if err != nil { - glog.Error(log("failed to reset sio manager: %v", err)) + klog.Error(log("failed to reset sio manager: %v", err)) return err } v.sioMgr = mgr diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/BUILD index a23307dfa7c1..d44d59dd6143 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -32,7 +32,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/volume:go_default_library", - "//pkg/volume/empty_dir:go_default_library", + "//pkg/volume/emptydir:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go index dda1eec335ba..14485181f92c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go @@ -19,18 +19,18 @@ package secret import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) -// ProbeVolumePlugin is the entry point for plugin detection in a package. +// ProbeVolumePlugins is the entry point for plugin detection in a package. func ProbeVolumePlugins() []volume.VolumePlugin { return []volume.VolumePlugin{&secretPlugin{}} } @@ -179,7 +179,7 @@ func (b *secretVolumeMounter) SetUp(fsGroup *int64) error { } func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { - glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir) + klog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir) // Wrap EmptyDir, let it do the setup. wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, *b.opts) @@ -191,7 +191,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { secret, err := b.getSecret(b.pod.Namespace, b.source.SecretName) if err != nil { if !(errors.IsNotFound(err) && optional) { - glog.Errorf("Couldn't get secret %v/%v: %v", b.pod.Namespace, b.source.SecretName, err) + klog.Errorf("Couldn't get secret %v/%v: %v", b.pod.Namespace, b.source.SecretName, err) return err } secret = &v1.Secret{ @@ -202,15 +202,8 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } } - if err := wrapped.SetUpAt(dir, fsGroup); err != nil { - return err - } - if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil { - return err - } - totalBytes := totalSecretBytes(secret) - glog.V(3).Infof("Received secret %v/%v containing (%v) pieces of data, %v total bytes", + klog.V(3).Infof("Received secret %v/%v containing (%v) pieces of data, %v total bytes", b.pod.Namespace, b.source.SecretName, len(secret.Data), @@ -221,28 +214,52 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return err } + setupSuccess := false + if err := wrapped.SetUpAt(dir, fsGroup); err != nil { + return err + } + if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil { + return err + } + + defer func() { + // Clean up directories if setup fails + if !setupSuccess { + unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID) + if unmountCreateErr != nil { + klog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr) + return + } + tearDownErr := unmounter.TearDown() + if tearDownErr != nil { + klog.Errorf("error tearing down volume %s with : %v", b.volName, tearDownErr) + } + } + }() + writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName) writer, err := volumeutil.NewAtomicWriter(dir, writerContext) if err != nil { - glog.Errorf("Error creating atomic writer: %v", err) + klog.Errorf("Error creating atomic writer: %v", err) return err } err = writer.Write(payload) if err != nil { - glog.Errorf("Error writing payload to dir: %v", err) + klog.Errorf("Error writing payload to dir: %v", err) return err } err = volume.SetVolumeOwnership(b, fsGroup) if err != nil { - glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) + klog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) return err } + setupSuccess = true return nil } -// Note: this function is exported so that it can be called from the projection volume driver +// MakePayload function is exported so that it can be called from the projection volume driver func MakePayload(mappings []v1.KeyToPath, secret *v1.Secret, defaultMode *int32, optional bool) (map[string]volumeutil.FileProjection, error) { if defaultMode == nil { return nil, fmt.Errorf("No defaultMode used, not even the default value for it") @@ -264,9 +281,9 @@ func MakePayload(mappings []v1.KeyToPath, secret *v1.Secret, defaultMode *int32, if optional { continue } - err_msg := "references non-existent secret key" - glog.Errorf(err_msg) - return nil, fmt.Errorf(err_msg) + errMsg := "references non-existent secret key" + klog.Errorf(errMsg) + return nil, fmt.Errorf(errMsg) } fileProjection.Data = []byte(content) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/BUILD index ee84092563fd..c89c5fee55fc 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/BUILD @@ -24,9 +24,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/storageos/go-api:go_default_library", "//vendor/github.com/storageos/go-api/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos.go index 1130ae5b5edb..9cb56b353aa8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos.go @@ -24,7 +24,7 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -343,14 +343,14 @@ func (b *storageosMounter) CanMount() error { func (b *storageosMounter) SetUp(fsGroup *int64) error { // Need a namespace to find the volume, try pod's namespace if not set. if b.volNamespace == "" { - glog.V(2).Infof("Setting StorageOS volume namespace to pod namespace: %s", b.podNamespace) + klog.V(2).Infof("Setting StorageOS volume namespace to pod namespace: %s", b.podNamespace) b.volNamespace = b.podNamespace } // Attach the StorageOS volume as a block device devicePath, err := b.manager.AttachVolume(b) if err != nil { - glog.Errorf("Failed to attach StorageOS volume %s: %s", b.volName, err.Error()) + klog.Errorf("Failed to attach StorageOS volume %s: %s", b.volName, err.Error()) return err } @@ -360,7 +360,7 @@ func (b *storageosMounter) SetUp(fsGroup *int64) error { if err != nil { return err } - glog.V(4).Infof("Successfully mounted StorageOS volume %s into global mount directory", b.volName) + klog.V(4).Infof("Successfully mounted StorageOS volume %s into global mount directory", b.volName) // Bind mount the volume into the pod return b.SetUpAt(b.GetPath(), fsGroup) @@ -369,9 +369,9 @@ func (b *storageosMounter) SetUp(fsGroup *int64) error { // SetUp bind mounts the disk global mount to the give volume path. func (b *storageosMounter) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("StorageOS volume set up: %s %v %v", dir, !notMnt, err) + klog.V(4).Infof("StorageOS volume set up: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { - glog.Errorf("Cannot validate mount point: %s %v", dir, err) + klog.Errorf("Cannot validate mount point: %s %v", dir, err) return err } if !notMnt { @@ -379,7 +379,7 @@ func (b *storageosMounter) SetUpAt(dir string, fsGroup *int64) error { } if err = os.MkdirAll(dir, 0750); err != nil { - glog.Errorf("mkdir failed on disk %s (%v)", dir, err) + klog.Errorf("mkdir failed on disk %s (%v)", dir, err) return err } @@ -391,39 +391,39 @@ func (b *storageosMounter) SetUpAt(dir string, fsGroup *int64) error { mountOptions := util.JoinMountOptions(b.mountOptions, options) globalPDPath := makeGlobalPDName(b.plugin.host, b.pvName, b.volNamespace, b.volName) - glog.V(4).Infof("Attempting to bind mount to pod volume at %s", dir) + klog.V(4).Infof("Attempting to bind mount to pod volume at %s", dir) err = b.mounter.Mount(globalPDPath, dir, "", mountOptions) if err != nil { notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) return err } } os.Remove(dir) - glog.Errorf("Mount of disk %s failed: %v", dir, err) + klog.Errorf("Mount of disk %s failed: %v", dir, err) return err } if !b.readOnly { volume.SetVolumeOwnership(b, fsGroup) } - glog.V(4).Infof("StorageOS volume setup complete on %s", dir) + klog.V(4).Infof("StorageOS volume setup complete on %s", dir) return nil } @@ -487,7 +487,7 @@ func (b *storageosUnmounter) GetPath() string { // resource was the last reference to that disk on the kubelet. func (b *storageosUnmounter) TearDown() error { if len(b.volNamespace) == 0 || len(b.volName) == 0 { - glog.Warningf("volNamespace: %q, volName: %q not set, skipping TearDown", b.volNamespace, b.volName) + klog.Warningf("volNamespace: %q, volName: %q not set, skipping TearDown", b.volNamespace, b.volName) return fmt.Errorf("pvName not specified for TearDown, waiting for next sync loop") } // Unmount from pod @@ -495,7 +495,7 @@ func (b *storageosUnmounter) TearDown() error { err := b.TearDownAt(mountPath) if err != nil { - glog.Errorf("Unmount from pod failed: %v", err) + klog.Errorf("Unmount from pod failed: %v", err) return err } @@ -503,25 +503,25 @@ func (b *storageosUnmounter) TearDown() error { globalPDPath := makeGlobalPDName(b.plugin.host, b.pvName, b.volNamespace, b.volName) devicePath, _, err := mount.GetDeviceNameFromMount(b.mounter, globalPDPath) if err != nil { - glog.Errorf("Detach failed when getting device from global mount: %v", err) + klog.Errorf("Detach failed when getting device from global mount: %v", err) return err } // Unmount from plugin's disk global mount dir. err = b.TearDownAt(globalPDPath) if err != nil { - glog.Errorf("Detach failed during unmount: %v", err) + klog.Errorf("Detach failed during unmount: %v", err) return err } // Detach loop device err = b.manager.DetachVolume(b, devicePath) if err != nil { - glog.Errorf("Detach device %s failed for volume %s: %v", devicePath, b.pvName, err) + klog.Errorf("Detach device %s failed for volume %s: %v", devicePath, b.pvName, err) return err } - glog.V(4).Infof("Successfully unmounted StorageOS volume %s and detached devices", b.pvName) + klog.V(4).Infof("Successfully unmounted StorageOS volume %s and detached devices", b.pvName) return nil } @@ -530,10 +530,10 @@ func (b *storageosUnmounter) TearDown() error { // resource was the last reference to that disk on the kubelet. func (b *storageosUnmounter) TearDownAt(dir string) error { if err := util.UnmountPath(dir, b.mounter); err != nil { - glog.V(4).Infof("Unmounted StorageOS volume %s failed with: %v", b.pvName, err) + klog.V(4).Infof("Unmounted StorageOS volume %s failed with: %v", b.pvName, err) } if err := b.manager.UnmountVolume(b); err != nil { - glog.V(4).Infof("Mount reference for volume %s could not be removed from StorageOS: %v", b.pvName, err) + klog.V(4).Infof("Mount reference for volume %s could not be removed from StorageOS: %v", b.pvName, err) } return nil } @@ -616,7 +616,7 @@ func (c *storageosProvisioner) Provision(selectedNode *v1.Node, allowedTopologie vol, err := c.manager.CreateVolume(c) if err != nil { - glog.Errorf("failed to create volume: %v", err) + klog.Errorf("failed to create volume: %v", err) return nil, err } if vol.FSType == "" { @@ -717,7 +717,7 @@ func getAPICfg(spec *volume.Spec, pod *v1.Pod, kubeClient clientset.Interface) ( func parsePodSecret(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (*storageosAPIConfig, error) { secret, err := util.GetSecretForPod(pod, secretName, kubeClient) if err != nil { - glog.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName) + klog.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName) return nil, fmt.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName) } return parseAPIConfig(secret) @@ -728,7 +728,7 @@ func parsePodSecret(pod *v1.Pod, secretName string, kubeClient clientset.Interfa func parsePVSecret(namespace, secretName string, kubeClient clientset.Interface) (*storageosAPIConfig, error) { secret, err := util.GetSecretForPV(namespace, secretName, storageosPluginName, kubeClient) if err != nil { - glog.Errorf("failed to get secret from [%q/%q]", namespace, secretName) + klog.Errorf("failed to get secret from [%q/%q]", namespace, secretName) return nil, fmt.Errorf("failed to get secret from [%q/%q]", namespace, secretName) } return parseAPIConfig(secret) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos_util.go index beac89240871..c102e4226d5d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/storageos/storageos_util.go @@ -25,9 +25,9 @@ import ( "k8s.io/kubernetes/pkg/util/mount" - "github.com/golang/glog" storageosapi "github.com/storageos/go-api" storageostypes "github.com/storageos/go-api/types" + "k8s.io/klog" ) const ( @@ -88,7 +88,7 @@ func (u *storageosUtil) NewAPI(apiCfg *storageosAPIConfig) error { apiPass: defaultAPIPassword, apiVersion: defaultAPIVersion, } - glog.V(4).Infof("Using default StorageOS API settings: addr %s, version: %s", apiCfg.apiAddr, defaultAPIVersion) + klog.V(4).Infof("Using default StorageOS API settings: addr %s, version: %s", apiCfg.apiAddr, defaultAPIVersion) } api, err := storageosapi.NewVersionedClient(apiCfg.apiAddr, defaultAPIVersion) @@ -122,7 +122,7 @@ func (u *storageosUtil) CreateVolume(p *storageosProvisioner) (*storageosVolume, vol, err := u.api.VolumeCreate(opts) if err != nil { - glog.Errorf("volume create failed for volume %q (%v)", opts.Name, err) + klog.Errorf("volume create failed for volume %q (%v)", opts.Name, err) return nil, err } return &storageosVolume{ @@ -157,7 +157,7 @@ func (u *storageosUtil) AttachVolume(b *storageosMounter) (string, error) { vol, err := u.api.Volume(b.volNamespace, b.volName) if err != nil { - glog.Warningf("volume retrieve failed for volume %q with namespace %q (%v)", b.volName, b.volNamespace, err) + klog.Warningf("volume retrieve failed for volume %q with namespace %q (%v)", b.volName, b.volNamespace, err) return "", err } @@ -170,14 +170,14 @@ func (u *storageosUtil) AttachVolume(b *storageosMounter) (string, error) { Namespace: vol.Namespace, } if err := u.api.VolumeUnmount(opts); err != nil { - glog.Warningf("Couldn't clear existing StorageOS mount reference: %v", err) + klog.Warningf("Couldn't clear existing StorageOS mount reference: %v", err) } } srcPath := path.Join(b.deviceDir, vol.ID) dt, err := pathDeviceType(srcPath) if err != nil { - glog.Warningf("volume source path %q for volume %q not ready (%v)", srcPath, b.volName, err) + klog.Warningf("volume source path %q for volume %q not ready (%v)", srcPath, b.volName, err) return "", err } @@ -217,7 +217,7 @@ func (u *storageosUtil) MountVolume(b *storageosMounter, mntDevice, deviceMountP } } if err = os.MkdirAll(deviceMountPath, 0750); err != nil { - glog.Errorf("mkdir failed on disk %s (%v)", deviceMountPath, err) + klog.Errorf("mkdir failed on disk %s (%v)", deviceMountPath, err) return err } options := []string{} @@ -255,7 +255,7 @@ func (u *storageosUtil) UnmountVolume(b *storageosUnmounter) error { if err := u.NewAPI(b.apiCfg); err != nil { // We can't always get the config we need, so allow the unmount to // succeed even if we can't remove the mount reference from the API. - glog.V(4).Infof("Could not remove mount reference in the StorageOS API as no credentials available to the unmount operation") + klog.V(4).Infof("Could not remove mount reference in the StorageOS API as no credentials available to the unmount operation") return nil } @@ -291,11 +291,11 @@ func (u *storageosUtil) DeviceDir(b *storageosMounter) string { ctrl, err := u.api.Controller(b.plugin.host.GetHostName()) if err != nil { - glog.Warningf("node device path lookup failed: %v", err) + klog.Warningf("node device path lookup failed: %v", err) return defaultDeviceDir } if ctrl == nil || ctrl.DeviceDir == "" { - glog.Warningf("node device path not set, using default: %s", defaultDeviceDir) + klog.Warningf("node device path not set, using default: %s", defaultDeviceDir) return defaultDeviceDir } return ctrl.DeviceDir @@ -327,7 +327,7 @@ func attachFileDevice(path string, exec mount.Exec) (string, error) { // If no existing loop device for the path, create one if blockDevicePath == "" { - glog.V(4).Infof("Creating device for path: %s", path) + klog.V(4).Infof("Creating device for path: %s", path) blockDevicePath, err = makeLoopDevice(path, exec) if err != nil { return "", err @@ -349,7 +349,7 @@ func getLoopDevice(path string, exec mount.Exec) (string, error) { args := []string{"-j", path} out, err := exec.Run(losetupPath, args...) if err != nil { - glog.V(2).Infof("Failed device discover command for path %s: %v", path, err) + klog.V(2).Infof("Failed device discover command for path %s: %v", path, err) return "", err } return parseLosetupOutputForDevice(out) @@ -359,7 +359,7 @@ func makeLoopDevice(path string, exec mount.Exec) (string, error) { args := []string{"-f", "-P", "--show", path} out, err := exec.Run(losetupPath, args...) if err != nil { - glog.V(2).Infof("Failed device create command for path %s: %v", path, err) + klog.V(2).Infof("Failed device create command for path %s: %v", path, err) return "", err } return parseLosetupOutputForDevice(out) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD index a6b371ede0f9..64eac778de27 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD @@ -25,6 +25,7 @@ go_library( "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/util/mount:go_default_library", + "//pkg/util/resizefs:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util/types:go_default_library", @@ -40,8 +41,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -51,6 +52,7 @@ go_test( "atomic_writer_test.go", "attach_limit_test.go", "device_util_linux_test.go", + "main_test.go", "nested_volumes_test.go", "resize_util_test.go", "util_test.go", @@ -59,6 +61,7 @@ go_test( deps = [ "//pkg/apis/core/install:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/slice:go_default_library", @@ -69,6 +72,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go index 0613bef6d9f5..7b3d034012f4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/atomic_writer.go @@ -27,7 +27,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" ) @@ -61,6 +61,7 @@ type AtomicWriter struct { logContext string } +// FileProjection contains file Data and access Mode type FileProjection struct { Data []byte Mode int32 @@ -120,7 +121,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { // (1) cleanPayload, err := validatePayload(payload) if err != nil { - glog.Errorf("%s: invalid payload: %v", w.logContext, err) + klog.Errorf("%s: invalid payload: %v", w.logContext, err) return err } @@ -129,7 +130,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { oldTsDir, err := os.Readlink(dataDirPath) if err != nil { if !os.IsNotExist(err) { - glog.Errorf("%s: error reading link for data directory: %v", w.logContext, err) + klog.Errorf("%s: error reading link for data directory: %v", w.logContext, err) return err } // although Readlink() returns "" on err, don't be fragile by relying on it (since it's not specified in docs) @@ -144,41 +145,40 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { // (3) pathsToRemove, err = w.pathsToRemove(cleanPayload, oldTsPath) if err != nil { - glog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err) + klog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err) return err } // (4) if should, err := shouldWritePayload(cleanPayload, oldTsPath); err != nil { - glog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err) + klog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err) return err } else if !should && len(pathsToRemove) == 0 { - glog.V(4).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir) + klog.V(4).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir) return nil } else { - glog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir) + klog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir) } } // (5) tsDir, err := w.newTimestampDir() if err != nil { - glog.V(4).Infof("%s: error creating new ts data directory: %v", w.logContext, err) + klog.V(4).Infof("%s: error creating new ts data directory: %v", w.logContext, err) return err } tsDirName := filepath.Base(tsDir) // (6) if err = w.writePayloadToDir(cleanPayload, tsDir); err != nil { - glog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err) + klog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err) return err - } else { - glog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir) } + klog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir) // (7) if err = w.createUserVisibleFiles(cleanPayload); err != nil { - glog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err) + klog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err) return err } @@ -186,7 +186,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { newDataDirPath := path.Join(w.targetDir, newDataDirName) if err = os.Symlink(tsDirName, newDataDirPath); err != nil { os.RemoveAll(tsDir) - glog.Errorf("%s: error creating symbolic link for atomic update: %v", w.logContext, err) + klog.Errorf("%s: error creating symbolic link for atomic update: %v", w.logContext, err) return err } @@ -201,20 +201,20 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { if err != nil { os.Remove(newDataDirPath) os.RemoveAll(tsDir) - glog.Errorf("%s: error renaming symbolic link for data directory %s: %v", w.logContext, newDataDirPath, err) + klog.Errorf("%s: error renaming symbolic link for data directory %s: %v", w.logContext, newDataDirPath, err) return err } // (10) if err = w.removeUserVisiblePaths(pathsToRemove); err != nil { - glog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err) + klog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err) return err } // (11) if len(oldTsDir) > 0 { if err = os.RemoveAll(oldTsPath); err != nil { - glog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err) + klog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err) return err } } @@ -222,7 +222,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { return nil } -// validatePayload returns an error if any path in the payload returns a copy of the payload with the paths cleaned. +// validatePayload returns an error if any path in the payload returns a copy of the payload with the paths cleaned. func validatePayload(payload map[string]FileProjection) (map[string]FileProjection, error) { cleanPayload := make(map[string]FileProjection) for k, content := range payload { @@ -329,7 +329,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir } else if err != nil { return nil, err } - glog.V(5).Infof("%s: current paths: %+v", w.targetDir, paths.List()) + klog.V(5).Infof("%s: current paths: %+v", w.targetDir, paths.List()) newPaths := sets.NewString() for file := range payload { @@ -341,10 +341,10 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir subPath = strings.TrimSuffix(subPath, string(os.PathSeparator)) } } - glog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List()) + klog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List()) result := paths.Difference(newPaths) - glog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result) + klog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result) return result, nil } @@ -353,7 +353,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir func (w *AtomicWriter) newTimestampDir() (string, error) { tsDir, err := ioutil.TempDir(w.targetDir, time.Now().UTC().Format("..2006_01_02_15_04_05.")) if err != nil { - glog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err) + klog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err) return "", err } @@ -362,7 +362,7 @@ func (w *AtomicWriter) newTimestampDir() (string, error) { // regardless of the process' umask. err = os.Chmod(tsDir, 0755) if err != nil { - glog.Errorf("%s: unable to set mode on new temp directory: %v", w.logContext, err) + klog.Errorf("%s: unable to set mode on new temp directory: %v", w.logContext, err) return "", err } @@ -380,13 +380,13 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir err := os.MkdirAll(baseDir, os.ModePerm) if err != nil { - glog.Errorf("%s: unable to create directory %s: %v", w.logContext, baseDir, err) + klog.Errorf("%s: unable to create directory %s: %v", w.logContext, baseDir, err) return err } err = ioutil.WriteFile(fullPath, content, mode) if err != nil { - glog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err) + klog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err) return err } // Chmod is needed because ioutil.WriteFile() ends up calling @@ -395,7 +395,7 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir // in the file no matter what the umask is. err = os.Chmod(fullPath, mode) if err != nil { - glog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err) + klog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err) } } @@ -445,7 +445,7 @@ func (w *AtomicWriter) removeUserVisiblePaths(paths sets.String) error { continue } if err := os.Remove(path.Join(w.targetDir, p)); err != nil { - glog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, p, err) + klog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, p, err) lasterr = err } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/attach_limit.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/attach_limit.go index 04bda52533e5..8325dbf755b8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/attach_limit.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/attach_limit.go @@ -27,6 +27,14 @@ import ( const ( // EBSVolumeLimitKey resource name that will store volume limits for EBS EBSVolumeLimitKey = "attachable-volumes-aws-ebs" + // EBSNitroLimitRegex finds nitro instance types with different limit than EBS defaults + EBSNitroLimitRegex = "^[cmr]5.*|t3|z1d" + // DefaultMaxEBSVolumes is the limit for volumes attached to an instance. + // Amazon recommends no more than 40; the system root volume uses at least one. + // See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits + DefaultMaxEBSVolumes = 39 + // DefaultMaxEBSNitroVolumeLimit is default EBS volume limit on m5 and c5 instances + DefaultMaxEBSNitroVolumeLimit = 25 // AzureVolumeLimitKey stores resource name that will store volume limits for Azure AzureVolumeLimitKey = "attachable-volumes-azure-disk" // GCEVolumeLimitKey stores resource name that will store volume limits for GCE node diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/device_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/device_util.go index f508399b1a0d..1d0791ee05c1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/device_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/device_util.go @@ -25,10 +25,10 @@ type DeviceUtil interface { } type deviceHandler struct { - get_io IoUtil + getIo IoUtil } //NewDeviceHandler Create a new IoHandler implementation func NewDeviceHandler(io IoUtil) DeviceUtil { - return &deviceHandler{get_io: io} + return &deviceHandler{getIo: io} } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go index 2d1f43c83b00..66e8564915b0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go @@ -21,7 +21,7 @@ package util import ( "errors" "fmt" - "github.com/golang/glog" + "k8s.io/klog" "path" "strconv" "strings" @@ -29,7 +29,7 @@ import ( // FindMultipathDeviceForDevice given a device name like /dev/sdx, find the devicemapper parent func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string { - io := handler.get_io + io := handler.getIo disk, err := findDeviceForPath(device, io) if err != nil { return "" @@ -68,7 +68,7 @@ func findDeviceForPath(path string, io IoUtil) (string, error) { // which are managed by the devicemapper dm-1. func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string { var devices []string - io := handler.get_io + io := handler.getIo // Split path /dev/dm-1 into "", "dev", "dm-1" parts := strings.Split(dm, "/") if len(parts) != 3 || !strings.HasPrefix(parts[1], "dev") { @@ -92,7 +92,7 @@ func (handler *deviceHandler) FindSlaveDevicesOnMultipath(dm string) []string { // } func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) (map[string]int, error) { portalHostMap := make(map[string]int) - io := handler.get_io + io := handler.getIo // Iterate over all the iSCSI hosts in sysfs sysPath := "/sys/class/iscsi_host" @@ -109,7 +109,7 @@ func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) ( } hostNumber, err := strconv.Atoi(strings.TrimPrefix(hostName, "host")) if err != nil { - glog.Errorf("Could not get number from iSCSI host: %s", hostName) + klog.Errorf("Could not get number from iSCSI host: %s", hostName) continue } @@ -205,7 +205,7 @@ func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) ( // corresponding to that LUN. func (handler *deviceHandler) FindDevicesForISCSILun(targetIqn string, lun int) ([]string, error) { devices := make([]string, 0) - io := handler.get_io + io := handler.getIo // Iterate over all the iSCSI hosts in sysfs sysPath := "/sys/class/iscsi_host" @@ -222,7 +222,7 @@ func (handler *deviceHandler) FindDevicesForISCSILun(targetIqn string, lun int) } hostNumber, err := strconv.Atoi(strings.TrimPrefix(hostName, "host")) if err != nil { - glog.Errorf("Could not get number from iSCSI host: %s", hostName) + klog.Errorf("Could not get number from iSCSI host: %s", hostName) continue } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/doc.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/doc.go index 82c976e7df93..d36990b8ffad 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/doc.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Contains utility code for use by volume plugins. +// Package util contains utility code for use by volume plugins. package util diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/error.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/error.go index 2c9655866b04..a52a1b65ce27 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/error.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/error.go @@ -20,7 +20,7 @@ import ( k8stypes "k8s.io/apimachinery/pkg/types" ) -// This error on attach indicates volume is attached to a different node +// DanglingAttachError indicates volume is attached to a different node // than we expected. type DanglingAttachError struct { msg string @@ -32,6 +32,7 @@ func (err *DanglingAttachError) Error() string { return err.msg } +// NewDanglingError create a new dangling error func NewDanglingError(msg string, node k8stypes.NodeName, devicePath string) error { return &DanglingAttachError{ msg: msg, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go index 92d3c2bdd57b..e1fdf5673c43 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go @@ -17,9 +17,9 @@ limitations under the License. package util const ( - // Name of finalizer on PVCs that have a running pod. + // PVCProtectionFinalizer is the name of finalizer on PVCs that have a running pod. PVCProtectionFinalizer = "kubernetes.io/pvc-protection" - // Name of finalizer on PVs that are bound by PVCs + // PVProtectionFinalizer is the name of finalizer on PVs that are bound by PVCs PVProtectionFinalizer = "kubernetes.io/pv-protection" ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go index e3af12df8906..338e812663b5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go @@ -17,9 +17,11 @@ limitations under the License. package util import ( + "fmt" "time" "github.com/prometheus/client_golang/prometheus" + "k8s.io/kubernetes/pkg/volume" ) var storageOperationMetric = prometheus.NewHistogramVec( @@ -62,3 +64,15 @@ func OperationCompleteHook(plugin, operationName string) func(*error) { } return opComplete } + +// GetFullQualifiedPluginNameForVolume returns full qualified plugin name for +// given volume. For CSI plugin, it appends plugin driver name at the end of +// plugin name, e.g. kubernetes.io/csi:csi-hostpath. It helps to distinguish +// between metrics emitted for CSI volumes which may be handled by different +// CSI plugin drivers. +func GetFullQualifiedPluginNameForVolume(pluginName string, spec *volume.Spec) string { + if spec != nil && spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CSI != nil { + return fmt.Sprintf("%s:%s", pluginName, spec.PersistentVolume.Spec.CSI.Driver) + } + return pluginName +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/BUILD index 4622d91fbb1c..945d33dfdf27 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/BUILD @@ -15,7 +15,7 @@ go_library( "//pkg/volume/util/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go index 526ea403ceea..d9d277e0b219 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go @@ -28,9 +28,9 @@ import ( "fmt" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" k8sRuntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" "k8s.io/kubernetes/pkg/volume/util/types" ) @@ -240,7 +240,7 @@ func (grm *nestedPendingOperations) operationComplete( if *err != nil { // Log error logOperationName := getOperationName(volumeName, podName) - glog.Errorf("operation %s failed with: %v", + klog.Errorf("operation %s failed with: %v", logOperationName, *err) } @@ -252,7 +252,7 @@ func (grm *nestedPendingOperations) operationComplete( if getOpErr != nil { // Failed to find existing operation logOperationName := getOperationName(volumeName, podName) - glog.Errorf("Operation %s completed. error: %v. exponentialBackOffOnError is enabled, but failed to get operation to update.", + klog.Errorf("Operation %s completed. error: %v. exponentialBackOffOnError is enabled, but failed to get operation to update.", logOperationName, *err) return @@ -264,7 +264,7 @@ func (grm *nestedPendingOperations) operationComplete( // Log error operationName := getOperationName(volumeName, podName) - glog.Errorf("%v", grm.operations[existingOpIndex].expBackoff. + klog.Errorf("%v", grm.operations[existingOpIndex].expBackoff. GenerateNoRetriesPermittedMsg(operationName)) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/BUILD index f07c3863afd7..3b2cd65d5d76 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/BUILD @@ -18,7 +18,6 @@ go_library( "//pkg/features:go_default_library", "//pkg/kubelet/events:go_default_library", "//pkg/util/mount:go_default_library", - "//pkg/util/resizefs:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/nestedpendingoperations:go_default_library", @@ -31,7 +30,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go index 8983ae48d39e..745910dc220d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go @@ -24,7 +24,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -627,14 +627,14 @@ func (oe *operationExecutor) VerifyVolumesAreAttached( for node, nodeAttachedVolumes := range attachedVolumes { for _, volumeAttached := range nodeAttachedVolumes { if volumeAttached.VolumeSpec == nil { - glog.Errorf("VerifyVolumesAreAttached: nil spec for volume %s", volumeAttached.VolumeName) + klog.Errorf("VerifyVolumesAreAttached: nil spec for volume %s", volumeAttached.VolumeName) continue } volumePlugin, err := oe.operationGenerator.GetVolumePluginMgr().FindPluginBySpec(volumeAttached.VolumeSpec) if err != nil || volumePlugin == nil { - glog.Errorf( + klog.Errorf( "VolumesAreAttached.FindPluginBySpec failed for volume %q (spec.Name: %q) on node %q with error: %v", volumeAttached.VolumeName, volumeAttached.VolumeSpec.Name(), @@ -673,7 +673,7 @@ func (oe *operationExecutor) VerifyVolumesAreAttached( // If node doesn't support Bulk volume polling it is best to poll individually nodeError := oe.VerifyVolumesAreAttachedPerNode(nodeAttachedVolumes, node, actualStateOfWorld) if nodeError != nil { - glog.Errorf("BulkVerifyVolumes.VerifyVolumesAreAttached verifying volumes on node %q with %v", node, nodeError) + klog.Errorf("BulkVerifyVolumes.VerifyVolumesAreAttached verifying volumes on node %q with %v", node, nodeError) } break } @@ -686,14 +686,14 @@ func (oe *operationExecutor) VerifyVolumesAreAttached( volumeSpecMapByPlugin[pluginName], actualStateOfWorld) if err != nil { - glog.Errorf("BulkVerifyVolumes.GenerateBulkVolumeVerifyFunc error bulk verifying volumes for plugin %q with %v", pluginName, err) + klog.Errorf("BulkVerifyVolumes.GenerateBulkVolumeVerifyFunc error bulk verifying volumes for plugin %q with %v", pluginName, err) } // Ugly hack to ensure - we don't do parallel bulk polling of same volume plugin uniquePluginName := v1.UniqueVolumeName(pluginName) err = oe.pendingOperations.Run(uniquePluginName, "" /* Pod Name */, generatedOperations) if err != nil { - glog.Errorf("BulkVerifyVolumes.Run Error bulk volume verification for plugin %q with %v", pluginName, err) + klog.Errorf("BulkVerifyVolumes.Run Error bulk volume verification for plugin %q with %v", pluginName, err) } } } @@ -862,7 +862,7 @@ func (oe *operationExecutor) ReconstructVolumeOperation( // Filesystem Volume case if volumeMode == v1.PersistentVolumeFilesystem { // Create volumeSpec from mount path - glog.V(5).Infof("Starting operationExecutor.ReconstructVolumepodName") + klog.V(5).Infof("Starting operationExecutor.ReconstructVolumepodName") volumeSpec, err := plugin.ConstructVolumeSpec(volumeSpecName, mountPath) if err != nil { return nil, err @@ -872,7 +872,7 @@ func (oe *operationExecutor) ReconstructVolumeOperation( // Block Volume case // Create volumeSpec from mount path - glog.V(5).Infof("Starting operationExecutor.ReconstructVolume") + klog.V(5).Infof("Starting operationExecutor.ReconstructVolume") if mapperPlugin == nil { return nil, fmt.Errorf("Could not find block volume plugin %q (spec.Name: %q) pod %q (UID: %q)", pluginName, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go index 57c7adfa7cb7..36c10b67752a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go @@ -22,7 +22,6 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,11 +29,11 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" + "k8s.io/klog" expandcache "k8s.io/kubernetes/pkg/controller/volume/expand/cache" "k8s.io/kubernetes/pkg/features" kevents "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/util/mount" - "k8s.io/kubernetes/pkg/util/resizefs" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" @@ -72,11 +71,11 @@ func NewOperationGenerator(kubeClient clientset.Interface, blkUtil volumepathhandler.BlockVolumePathHandler) OperationGenerator { return &operationGenerator{ - kubeClient: kubeClient, - volumePluginMgr: volumePluginMgr, - recorder: recorder, + kubeClient: kubeClient, + volumePluginMgr: volumePluginMgr, + recorder: recorder, checkNodeCapabilitiesBeforeMount: checkNodeCapabilitiesBeforeMount, - blkUtil: blkUtil, + blkUtil: blkUtil, } } @@ -140,13 +139,13 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( // Iterate each volume spec and put them into a map index by the pluginName for _, volumeAttached := range attachedVolumes { if volumeAttached.VolumeSpec == nil { - glog.Errorf("VerifyVolumesAreAttached.GenerateVolumesAreAttachedFunc: nil spec for volume %s", volumeAttached.VolumeName) + klog.Errorf("VerifyVolumesAreAttached.GenerateVolumesAreAttachedFunc: nil spec for volume %s", volumeAttached.VolumeName) continue } volumePlugin, err := og.volumePluginMgr.FindPluginBySpec(volumeAttached.VolumeSpec) if err != nil || volumePlugin == nil { - glog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error()) + klog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error()) continue } volumeSpecList, pluginExists := volumesPerPlugin[volumePlugin.GetPluginName()] @@ -166,7 +165,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( attachableVolumePlugin, err := og.volumePluginMgr.FindAttachablePluginByName(pluginName) if err != nil || attachableVolumePlugin == nil { - glog.Errorf( + klog.Errorf( "VolumeAreAttached.FindAttachablePluginBySpec failed for plugin %q with: %v", pluginName, err) @@ -175,7 +174,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher() if newAttacherErr != nil { - glog.Errorf( + klog.Errorf( "VolumesAreAttached.NewAttacher failed for getting plugin %q with: %v", pluginName, newAttacherErr) @@ -184,7 +183,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( attached, areAttachedErr := volumeAttacher.VolumesAreAttached(volumesSpecs, nodeName) if areAttachedErr != nil { - glog.Errorf( + klog.Errorf( "VolumesAreAttached failed for checking on node %q with: %v", nodeName, areAttachedErr) @@ -194,7 +193,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( for spec, check := range attached { if !check { actualStateOfWorld.MarkVolumeAsDetached(volumeSpecMap[spec], nodeName) - glog.V(1).Infof("VerifyVolumesAreAttached determined volume %q (spec.Name: %q) is no longer attached to node %q, therefore it was marked as detached.", + klog.V(1).Infof("VerifyVolumesAreAttached determined volume %q (spec.Name: %q) is no longer attached to node %q, therefore it was marked as detached.", volumeSpecMap[spec], spec.Name(), nodeName) } } @@ -204,7 +203,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( return volumetypes.GeneratedOperations{ OperationFunc: volumesAreAttachedFunc, - CompleteFunc: util.OperationCompleteHook("", "verify_volumes_are_attached_per_node"), + CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume("", nil), "verify_volumes_are_attached_per_node"), EventRecorderFunc: nil, // nil because we do not want to generate event on error }, nil } @@ -219,7 +218,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( attachableVolumePlugin, err := og.volumePluginMgr.FindAttachablePluginByName(pluginName) if err != nil || attachableVolumePlugin == nil { - glog.Errorf( + klog.Errorf( "BulkVerifyVolume.FindAttachablePluginBySpec failed for plugin %q with: %v", pluginName, err) @@ -229,7 +228,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher() if newAttacherErr != nil { - glog.Errorf( + klog.Errorf( "BulkVerifyVolume.NewAttacher failed for getting plugin %q with: %v", attachableVolumePlugin, newAttacherErr) @@ -238,13 +237,13 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( bulkVolumeVerifier, ok := volumeAttacher.(volume.BulkVolumeVerifier) if !ok { - glog.Errorf("BulkVerifyVolume failed to type assert attacher %q", bulkVolumeVerifier) + klog.Errorf("BulkVerifyVolume failed to type assert attacher %q", bulkVolumeVerifier) return nil, nil } attached, bulkAttachErr := bulkVolumeVerifier.BulkVerifyVolumes(pluginNodeVolumes) if bulkAttachErr != nil { - glog.Errorf("BulkVerifyVolume.BulkVerifyVolumes Error checking volumes are attached with %v", bulkAttachErr) + klog.Errorf("BulkVerifyVolume.BulkVerifyVolumes Error checking volumes are attached with %v", bulkAttachErr) return nil, nil } @@ -253,7 +252,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( nodeVolumeSpecs, nodeChecked := attached[nodeName] if !nodeChecked { - glog.V(2).Infof("VerifyVolumesAreAttached.BulkVerifyVolumes failed for node %q and leaving volume %q as attached", + klog.V(2).Infof("VerifyVolumesAreAttached.BulkVerifyVolumes failed for node %q and leaving volume %q as attached", nodeName, volumeSpec.Name()) continue @@ -262,7 +261,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( check := nodeVolumeSpecs[volumeSpec] if !check { - glog.V(2).Infof("VerifyVolumesAreAttached.BulkVerifyVolumes failed for node %q and volume %q", + klog.V(2).Infof("VerifyVolumesAreAttached.BulkVerifyVolumes failed for node %q and volume %q", nodeName, volumeSpec.Name()) actualStateOfWorld.MarkVolumeAsDetached(volumeSpecMap[volumeSpec], nodeName) @@ -275,7 +274,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( return volumetypes.GeneratedOperations{ OperationFunc: bulkVolumeVerifyFunc, - CompleteFunc: util.OperationCompleteHook(pluginName, "verify_volumes_are_attached"), + CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(pluginName, nil), "verify_volumes_are_attached"), EventRecorderFunc: nil, // nil because we do not want to generate event on error }, nil @@ -320,7 +319,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( derr.DevicePath) if addErr != nil { - glog.Errorf("AttachVolume.MarkVolumeAsAttached failed to fix dangling volume error for volume %q with %s", volumeToAttach.VolumeName, addErr) + klog.Errorf("AttachVolume.MarkVolumeAsAttached failed to fix dangling volume error for volume %q with %s", volumeToAttach.VolumeName, addErr) } } @@ -333,7 +332,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( for _, pod := range volumeToAttach.ScheduledPods { og.recorder.Eventf(pod, v1.EventTypeNormal, kevents.SuccessfulAttachVolume, simpleMsg) } - glog.Infof(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", "")) + klog.Infof(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", "")) // Update actual state of world addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached( @@ -349,7 +348,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( return volumetypes.GeneratedOperations{ OperationFunc: attachVolumeFunc, EventRecorderFunc: eventRecorderFunc, - CompleteFunc: util.OperationCompleteHook(attachableVolumePlugin.GetPluginName(), "volume_attach"), + CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(attachableVolumePlugin.GetPluginName(), volumeToAttach.VolumeSpec), "volume_attach"), }, nil } @@ -417,7 +416,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( return volumeToDetach.GenerateError("DetachVolume.Detach failed", err) } - glog.Infof(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", "")) + klog.Infof(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", "")) // Update actual state of world actualStateOfWorld.MarkVolumeAsDetached( @@ -428,7 +427,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( return volumetypes.GeneratedOperations{ OperationFunc: getVolumePluginMgrFunc, - CompleteFunc: util.OperationCompleteHook(pluginName, "volume_detach"), + CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(pluginName, volumeToDetach.VolumeSpec), "volume_detach"), EventRecorderFunc: nil, // nil because we do not want to generate event on error }, nil } @@ -495,7 +494,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( devicePath := volumeToMount.DevicePath if volumeAttacher != nil { // Wait for attachable volumes to finish attaching - glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) + klog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) devicePath, err = volumeAttacher.WaitForAttach( volumeToMount.VolumeSpec, devicePath, volumeToMount.Pod, waitForAttachTimeout) @@ -504,7 +503,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( return volumeToMount.GenerateError("MountVolume.WaitForAttach failed", err) } - glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath))) + klog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath))) } if volumeDeviceMounter != nil { @@ -525,7 +524,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( return volumeToMount.GenerateError("MountVolume.MountDevice failed", err) } - glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.MountDevice succeeded", fmt.Sprintf("device mount path %q", deviceMountPath))) + klog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.MountDevice succeeded", fmt.Sprintf("device mount path %q", deviceMountPath))) // Update actual state of world to reflect volume is globally mounted markDeviceMountedErr := actualStateOfWorld.MarkDeviceAsMounted( @@ -561,11 +560,11 @@ func (og *operationGenerator) GenerateMountVolumeFunc( } _, detailedMsg := volumeToMount.GenerateMsg("MountVolume.SetUp succeeded", "") - verbosity := glog.Level(1) + verbosity := klog.Level(1) if isRemount { - verbosity = glog.Level(4) + verbosity = klog.Level(4) } - glog.V(verbosity).Infof(detailedMsg) + klog.V(verbosity).Infof(detailedMsg) // Update actual state of world markVolMountedErr := actualStateOfWorld.MarkVolumeAsMounted( @@ -594,20 +593,19 @@ func (og *operationGenerator) GenerateMountVolumeFunc( return volumetypes.GeneratedOperations{ OperationFunc: mountVolumeFunc, EventRecorderFunc: eventRecorderFunc, - CompleteFunc: util.OperationCompleteHook(volumePlugin.GetPluginName(), "volume_mount"), + CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "volume_mount"), }, nil } func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devicePath, deviceMountPath, pluginName string) (simpleErr, detailedErr error) { if !utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) { - glog.V(4).Infof("Resizing is not enabled for this volume %s", volumeToMount.VolumeName) + klog.V(4).Infof("Resizing is not enabled for this volume %s", volumeToMount.VolumeName) return nil, nil } - mounter := og.volumePluginMgr.Host.GetMounter(pluginName) // Get expander, if possible expandableVolumePlugin, _ := - og.volumePluginMgr.FindExpandablePluginBySpec(volumeToMount.VolumeSpec) + og.volumePluginMgr.FindFSResizablePluginBySpec(volumeToMount.VolumeSpec) if expandableVolumePlugin != nil && expandableVolumePlugin.RequiresFSResize() && @@ -623,33 +621,20 @@ func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devi pvSpecCap := pv.Spec.Capacity[v1.ResourceStorage] if pvcStatusCap.Cmp(pvSpecCap) < 0 { // File system resize was requested, proceed - glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("MountVolume.resizeFileSystem entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) + klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("MountVolume.resizeFileSystem entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) if volumeToMount.VolumeSpec.ReadOnly { simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.resizeFileSystem failed", "requested read-only file system") - glog.Warningf(detailedMsg) + klog.Warningf(detailedMsg) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg) return nil, nil } - - diskFormatter := &mount.SafeFormatAndMount{ - Interface: mounter, - Exec: og.volumePluginMgr.Host.GetExec(expandableVolumePlugin.GetPluginName()), - } - - resizer := resizefs.NewResizeFs(diskFormatter) - resizeStatus, resizeErr := resizer.Resize(devicePath, deviceMountPath) - - if resizeErr != nil { + if resizeErr := expandableVolumePlugin.ExpandFS(volumeToMount.VolumeSpec, devicePath, deviceMountPath, pvSpecCap, pvcStatusCap); resizeErr != nil { return volumeToMount.GenerateError("MountVolume.resizeFileSystem failed", resizeErr) } - - if resizeStatus { - simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.resizeFileSystem succeeded", "") - og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.FileSystemResizeSuccess, simpleMsg) - glog.Infof(detailedMsg) - } - + simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.resizeFileSystem succeeded", "") + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.FileSystemResizeSuccess, simpleMsg) + klog.Infof(detailedMsg) // File system resize succeeded, now update the PVC's Capacity to match the PV's err = util.MarkFSResizeFinished(pvc, pv.Spec.Capacity, og.kubeClient) if err != nil { @@ -695,7 +680,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc( return volumeToUnmount.GenerateError("UnmountVolume.TearDown failed", unmountErr) } - glog.Infof( + klog.Infof( "UnmountVolume.TearDown succeeded for volume %q (OuterVolumeSpecName: %q) pod %q (UID: %q). InnerVolumeSpecName %q. PluginName %q, VolumeGidValue %q", volumeToUnmount.VolumeName, volumeToUnmount.OuterVolumeSpecName, @@ -710,7 +695,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc( volumeToUnmount.PodName, volumeToUnmount.VolumeName) if markVolMountedErr != nil { // On failure, just log and exit - glog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error()) + klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error()) } return nil, nil @@ -718,7 +703,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc( return volumetypes.GeneratedOperations{ OperationFunc: unmountVolumeFunc, - CompleteFunc: util.OperationCompleteHook(volumePlugin.GetPluginName(), "volume_unmount"), + CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeToUnmount.VolumeSpec), "volume_unmount"), EventRecorderFunc: nil, // nil because we do not want to generate event on error }, nil } @@ -780,7 +765,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( fmt.Errorf("the device is in use when it was no longer expected to be in use")) } - glog.Infof(deviceToDetach.GenerateMsg("UnmountDevice succeeded", "")) + klog.Infof(deviceToDetach.GenerateMsg("UnmountDevice succeeded", "")) // Update actual state of world markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted( @@ -795,7 +780,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( return volumetypes.GeneratedOperations{ OperationFunc: unmountDeviceFunc, - CompleteFunc: util.OperationCompleteHook(deviceMountableVolumePlugin.GetPluginName(), "unmount_device"), + CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(deviceMountableVolumePlugin.GetPluginName(), deviceToDetach.VolumeSpec), "unmount_device"), EventRecorderFunc: nil, // nil because we do not want to generate event on error }, nil } @@ -859,7 +844,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( } if volumeAttacher != nil { // Wait for attachable volumes to finish attaching - glog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) + klog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) devicePath, err = volumeAttacher.WaitForAttach( volumeToMount.VolumeSpec, volumeToMount.DevicePath, volumeToMount.Pod, waitForAttachTimeout) @@ -868,7 +853,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( return volumeToMount.GenerateError("MapVolume.WaitForAttach failed", err) } - glog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath))) + klog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath))) } // A plugin doesn't have attacher also needs to map device to global map path with SetUpDevice() @@ -924,15 +909,15 @@ func (og *operationGenerator) GenerateMapVolumeFunc( // Device mapping for global map path succeeded simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MapVolume.MapDevice succeeded", fmt.Sprintf("globalMapPath %q", globalMapPath)) - verbosity := glog.Level(4) + verbosity := klog.Level(4) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.SuccessfulMountVolume, simpleMsg) - glog.V(verbosity).Infof(detailedMsg) + klog.V(verbosity).Infof(detailedMsg) // Device mapping for pod device map path succeeded simpleMsg, detailedMsg = volumeToMount.GenerateMsg("MapVolume.MapDevice succeeded", fmt.Sprintf("volumeMapPath %q", volumeMapPath)) - verbosity = glog.Level(1) + verbosity = klog.Level(1) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.SuccessfulMountVolume, simpleMsg) - glog.V(verbosity).Infof(detailedMsg) + klog.V(verbosity).Infof(detailedMsg) // Update actual state of world markVolMountedErr := actualStateOfWorld.MarkVolumeAsMounted( @@ -961,7 +946,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( return volumetypes.GeneratedOperations{ OperationFunc: mapVolumeFunc, EventRecorderFunc: eventRecorderFunc, - CompleteFunc: util.OperationCompleteHook(blockVolumePlugin.GetPluginName(), "map_volume"), + CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(blockVolumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "map_volume"), }, nil } @@ -1007,7 +992,7 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc( return volumeToUnmount.GenerateError("UnmapVolume.UnmapDevice on global map path failed", unmapDeviceErr) } - glog.Infof( + klog.Infof( "UnmapVolume succeeded for volume %q (OuterVolumeSpecName: %q) pod %q (UID: %q). InnerVolumeSpecName %q. PluginName %q, VolumeGidValue %q", volumeToUnmount.VolumeName, volumeToUnmount.OuterVolumeSpecName, @@ -1022,7 +1007,7 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc( volumeToUnmount.PodName, volumeToUnmount.VolumeName) if markVolUnmountedErr != nil { // On failure, just log and exit - glog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error()) + klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error()) } return nil, nil @@ -1030,7 +1015,7 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc( return volumetypes.GeneratedOperations{ OperationFunc: unmapVolumeFunc, - CompleteFunc: util.OperationCompleteHook(blockVolumePlugin.GetPluginName(), "unmap_volume"), + CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(blockVolumePlugin.GetPluginName(), volumeToUnmount.VolumeSpec), "unmap_volume"), EventRecorderFunc: nil, // nil because we do not want to generate event on error }, nil } @@ -1060,7 +1045,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( } blockVolumeUnmapper, newUnmapperErr := blockVolumePlugin.NewBlockVolumeUnmapper( - string(deviceToDetach.VolumeName), + deviceToDetach.VolumeSpec.Name(), "" /* podUID */) if newUnmapperErr != nil { return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.NewUnmapper failed", newUnmapperErr) @@ -1079,27 +1064,14 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( return deviceToDetach.GenerateError("UnmapDevice failed", err) } - // Execute tear down device - unmapErr := blockVolumeUnmapper.TearDownDevice(globalMapPath, deviceToDetach.DevicePath) - if unmapErr != nil { - // On failure, return error. Caller will log and retry. - return deviceToDetach.GenerateError("UnmapDevice.TearDownDevice failed", unmapErr) - } - - // Plugin finished TearDownDevice(). Now globalMapPath dir and plugin's stored data - // on the dir are unnecessary, clean up it. - removeMapPathErr := og.blkUtil.RemoveMapPath(globalMapPath) - if removeMapPathErr != nil { - // On failure, return error. Caller will log and retry. - return deviceToDetach.GenerateError("UnmapDevice failed", removeMapPathErr) - } - // The block volume is not referenced from Pods. Release file descriptor lock. - glog.V(4).Infof("UnmapDevice: deviceToDetach.DevicePath: %v", deviceToDetach.DevicePath) + // This should be done before calling TearDownDevice, because some plugins that do local detach + // in TearDownDevice will fail in detaching device due to the refcnt on the loopback device. + klog.V(4).Infof("UnmapDevice: deviceToDetach.DevicePath: %v", deviceToDetach.DevicePath) loopPath, err := og.blkUtil.GetLoopDevice(deviceToDetach.DevicePath) if err != nil { if err.Error() == volumepathhandler.ErrDeviceNotFound { - glog.Warningf(deviceToDetach.GenerateMsgDetailed("UnmapDevice: Couldn't find loopback device which takes file descriptor lock", fmt.Sprintf("device path: %q", deviceToDetach.DevicePath))) + klog.Warningf(deviceToDetach.GenerateMsgDetailed("UnmapDevice: Couldn't find loopback device which takes file descriptor lock", fmt.Sprintf("device path: %q", deviceToDetach.DevicePath))) } else { errInfo := "UnmapDevice.GetLoopDevice failed to get loopback device, " + fmt.Sprintf("device path: %q", deviceToDetach.DevicePath) return deviceToDetach.GenerateError(errInfo, err) @@ -1113,6 +1085,21 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( } } + // Execute tear down device + unmapErr := blockVolumeUnmapper.TearDownDevice(globalMapPath, deviceToDetach.DevicePath) + if unmapErr != nil { + // On failure, return error. Caller will log and retry. + return deviceToDetach.GenerateError("UnmapDevice.TearDownDevice failed", unmapErr) + } + + // Plugin finished TearDownDevice(). Now globalMapPath dir and plugin's stored data + // on the dir are unnecessary, clean up it. + removeMapPathErr := og.blkUtil.RemoveMapPath(globalMapPath) + if removeMapPathErr != nil { + // On failure, return error. Caller will log and retry. + return deviceToDetach.GenerateError("UnmapDevice.RemoveMapPath failed", removeMapPathErr) + } + // Before logging that UnmapDevice succeeded and moving on, // use mounter.PathIsDevice to check if the path is a device, // if so use mounter.DeviceOpened to check if the device is in use anywhere @@ -1128,7 +1115,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( fmt.Errorf("the device is in use when it was no longer expected to be in use")) } - glog.Infof(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", "")) + klog.Infof(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", "")) // Update actual state of world markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted( @@ -1143,7 +1130,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( return volumetypes.GeneratedOperations{ OperationFunc: unmapDeviceFunc, - CompleteFunc: util.OperationCompleteHook(blockVolumePlugin.GetPluginName(), "unmap_device"), + CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(blockVolumePlugin.GetPluginName(), deviceToDetach.VolumeSpec), "unmap_device"), EventRecorderFunc: nil, // nil because we do not want to generate event on error }, nil } @@ -1202,7 +1189,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( if attachedVolume.Name == volumeToMount.VolumeName { addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached( v1.UniqueVolumeName(""), volumeToMount.VolumeSpec, nodeName, attachedVolume.DevicePath) - glog.Infof(volumeToMount.GenerateMsgDetailed("Controller attach succeeded", fmt.Sprintf("device path: %q", attachedVolume.DevicePath))) + klog.Infof(volumeToMount.GenerateMsgDetailed("Controller attach succeeded", fmt.Sprintf("device path: %q", attachedVolume.DevicePath))) if addVolumeNodeErr != nil { // On failure, return error. Caller will log and retry. return volumeToMount.GenerateError("VerifyControllerAttachedVolume.MarkVolumeAsAttached failed", addVolumeNodeErr) @@ -1217,7 +1204,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( return volumetypes.GeneratedOperations{ OperationFunc: verifyControllerAttachedVolumeFunc, - CompleteFunc: util.OperationCompleteHook(volumePlugin.GetPluginName(), "verify_controller_attached_volume"), + CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "verify_controller_attached_volume"), EventRecorderFunc: nil, // nil because we do not want to generate event on error }, nil @@ -1229,7 +1216,7 @@ func (og *operationGenerator) verifyVolumeIsSafeToDetach( node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(string(volumeToDetach.NodeName), metav1.GetOptions{}) if fetchErr != nil { if errors.IsNotFound(fetchErr) { - glog.Warningf(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", "")) + klog.Warningf(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", "")) return nil } @@ -1253,7 +1240,7 @@ func (og *operationGenerator) verifyVolumeIsSafeToDetach( } // Volume is not marked as in use by node - glog.Infof(volumeToDetach.GenerateMsgDetailed("Verified volume is safe to detach", "")) + klog.Infof(volumeToDetach.GenerateMsgDetailed("Verified volume is safe to detach", "")) return nil } @@ -1268,6 +1255,7 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( if err != nil { return volumetypes.GeneratedOperations{}, fmt.Errorf("Error finding plugin for expanding volume: %q with error %v", pvcWithResizeRequest.QualifiedName(), err) } + if volumePlugin == nil { return volumetypes.GeneratedOperations{}, fmt.Errorf("Can not find plugin for expanding volume: %q", pvcWithResizeRequest.QualifiedName()) } @@ -1282,10 +1270,11 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( pvcWithResizeRequest.CurrentSize) if expandErr != nil { - detailedErr := fmt.Errorf("Error expanding volume %q of plugin %s : %v", pvcWithResizeRequest.QualifiedName(), volumePlugin.GetPluginName(), expandErr) + detailedErr := fmt.Errorf("error expanding volume %q of plugin %q: %v", pvcWithResizeRequest.QualifiedName(), volumePlugin.GetPluginName(), expandErr) return detailedErr, detailedErr } - glog.Infof("ExpandVolume succeeded for volume %s", pvcWithResizeRequest.QualifiedName()) + + klog.Infof("ExpandVolume succeeded for volume %s", pvcWithResizeRequest.QualifiedName()) newSize = updatedSize // k8s doesn't have transactions, we can't guarantee that after updating PV - updating PVC will be // successful, that is why all PVCs for which pvc.Spec.Size > pvc.Status.Size must be reprocessed @@ -1296,14 +1285,14 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( detailedErr := fmt.Errorf("Error updating PV spec capacity for volume %q with : %v", pvcWithResizeRequest.QualifiedName(), updateErr) return detailedErr, detailedErr } - glog.Infof("ExpandVolume.UpdatePV succeeded for volume %s", pvcWithResizeRequest.QualifiedName()) + klog.Infof("ExpandVolume.UpdatePV succeeded for volume %s", pvcWithResizeRequest.QualifiedName()) } // No Cloudprovider resize needed, lets mark resizing as done // Rest of the volume expand controller code will assume PVC as *not* resized until pvc.Status.Size // reflects user requested size. if !volumePlugin.RequiresFSResize() { - glog.V(4).Infof("Controller resizing done for PVC %s", pvcWithResizeRequest.QualifiedName()) + klog.V(4).Infof("Controller resizing done for PVC %s", pvcWithResizeRequest.QualifiedName()) err := resizeMap.MarkAsResized(pvcWithResizeRequest, newSize) if err != nil { @@ -1316,7 +1305,7 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( err := resizeMap.MarkForFSResize(pvcWithResizeRequest) if err != nil { detailedErr := fmt.Errorf("Error updating pvc %s condition for fs resize : %v", pvcWithResizeRequest.QualifiedName(), err) - glog.Warning(detailedErr) + klog.Warning(detailedErr) return nil, nil } } @@ -1332,7 +1321,7 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( return volumetypes.GeneratedOperations{ OperationFunc: expandVolumeFunc, EventRecorderFunc: eventRecorderFunc, - CompleteFunc: util.OperationCompleteHook(volumePlugin.GetPluginName(), "expand_volume"), + CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeSpec), "expand_volume"), }, nil } @@ -1369,6 +1358,7 @@ func (og *operationGenerator) GenerateExpandVolumeFSWithoutUnmountingFunc( fsResizeFunc := func() (error, error) { resizeSimpleError, resizeDetailedError := og.resizeFileSystem(volumeToMount, volumeToMount.DevicePath, deviceMountPath, volumePlugin.GetPluginName()) + if resizeSimpleError != nil || resizeDetailedError != nil { return resizeSimpleError, resizeDetailedError } @@ -1388,7 +1378,7 @@ func (og *operationGenerator) GenerateExpandVolumeFSWithoutUnmountingFunc( return volumetypes.GeneratedOperations{ OperationFunc: fsResizeFunc, EventRecorderFunc: eventRecorderFunc, - CompleteFunc: util.OperationCompleteHook(volumePlugin.GetPluginName(), "volume_fs_resize"), + CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(volumePlugin.GetPluginName(), volumeToMount.VolumeSpec), "volume_fs_resize"), }, nil } @@ -1431,8 +1421,10 @@ func isDeviceOpened(deviceToDetach AttachedVolume, mounter mount.Interface) (boo (devicePathErr != nil && strings.Contains(devicePathErr.Error(), "does not exist")) { // not a device path or path doesn't exist //TODO: refer to #36092 - glog.V(3).Infof("The path isn't device path or doesn't exist. Skip checking device path: %s", deviceToDetach.DevicePath) + klog.V(3).Infof("The path isn't device path or doesn't exist. Skip checking device path: %s", deviceToDetach.DevicePath) deviceOpened = false + } else if devicePathErr != nil { + return false, deviceToDetach.GenerateErrorDetailed("PathIsDevice failed", devicePathErr) } else { deviceOpened, deviceOpenedErr = mounter.DeviceOpened(deviceToDetach.DevicePath) if deviceOpenedErr != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/BUILD index e43b35af736d..3ea30eb44913 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/BUILD @@ -12,7 +12,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go index 275f55bbe336..c7a8f147f878 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/recyclerclient/recycler_client.go @@ -20,13 +20,13 @@ import ( "fmt" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" ) type RecycleEventRecorder func(eventtype, message string) @@ -51,7 +51,7 @@ func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeC // same as above func comments, except 'recyclerClient' is a narrower pod API // interface to ease testing func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error { - glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name) + klog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name) // Generate unique name for the recycler pod - we need to get "already // exists" error when a previous controller has already started recycling @@ -63,7 +63,7 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po defer close(stopChannel) podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel) if err != nil { - glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err) + klog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err) return err } @@ -84,10 +84,10 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po err = waitForPod(pod, recyclerClient, podCh) // In all cases delete the recycler pod and log its result. - glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name) + klog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name) deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace) if deleteErr != nil { - glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err) + klog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err) } // Returning recycler error is preferred, the pod will be deleted again on @@ -117,7 +117,7 @@ func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.E case *v1.Pod: // POD changed pod := event.Object.(*v1.Pod) - glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase) + klog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase) switch event.Type { case watch.Added, watch.Modified: if pod.Status.Phase == v1.PodSucceeded { @@ -142,7 +142,7 @@ func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.E case *v1.Event: // Event received podEvent := event.Object.(*v1.Event) - glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message) + klog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message) if event.Type == watch.Added { recyclerClient.Event(podEvent.Type, podEvent.Message) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go index c1d2a1c82acd..9090ff868623 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go @@ -24,10 +24,13 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/util/resizefs" + "k8s.io/kubernetes/pkg/volume" ) var ( - knownResizeConditions map[v1.PersistentVolumeClaimConditionType]bool = map[v1.PersistentVolumeClaimConditionType]bool{ + knownResizeConditions = map[v1.PersistentVolumeClaimConditionType]bool{ v1.PersistentVolumeClaimFileSystemResizePending: true, v1.PersistentVolumeClaimResizing: true, } @@ -123,3 +126,14 @@ func MergeResizeConditionOnPVC( pvc.Status.Conditions = newConditions return pvc } + +// GenericResizeFS : call generic filesystem resizer for plugins that don't have any special filesystem resize requirements +func GenericResizeFS(host volume.VolumeHost, pluginName, devicePath, deviceMountPath string) (bool, error) { + mounter := host.GetMounter(pluginName) + diskFormatter := &mount.SafeFormatAndMount{ + Interface: mounter, + Exec: host.GetExec(pluginName), + } + resizer := resizefs.NewResizeFs(diskFormatter) + return resizer.Resize(devicePath, deviceMountPath) +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index 72d9a781e782..070961c2822a 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -25,7 +25,6 @@ import ( "strings" "syscall" - "github.com/golang/glog" "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" "k8s.io/kubernetes/pkg/api/legacyscheme" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" @@ -101,7 +101,7 @@ func IsReady(dir string) bool { } if !s.Mode().IsRegular() { - glog.Errorf("ready-file is not a file: %s", readyFile) + klog.Errorf("ready-file is not a file: %s", readyFile) return false } @@ -113,14 +113,14 @@ func IsReady(dir string) bool { // created. func SetReady(dir string) { if err := os.MkdirAll(dir, 0750); err != nil && !os.IsExist(err) { - glog.Errorf("Can't mkdir %s: %v", dir, err) + klog.Errorf("Can't mkdir %s: %v", dir, err) return } readyFile := path.Join(dir, readyFileName) file, err := os.Create(readyFile) if err != nil { - glog.Errorf("Can't touch %s: %v", readyFile, err) + klog.Errorf("Can't touch %s: %v", readyFile, err) return } file.Close() @@ -140,7 +140,7 @@ func UnmountPath(mountPath string, mounter mount.Interface) error { func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool) error { pathExists, pathErr := PathExists(mountPath) if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath) return nil } corruptedMnt := IsCorruptedMnt(pathErr) @@ -171,13 +171,13 @@ func doUnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMou } if notMnt { - glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) + klog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) return os.Remove(mountPath) } } // Unmount the mount path - glog.V(4).Infof("%q is a mountpoint, unmounting", mountPath) + klog.V(4).Infof("%q is a mountpoint, unmounting", mountPath) if err := mounter.Unmount(mountPath); err != nil { return err } @@ -186,7 +186,7 @@ func doUnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMou return mntErr } if notMnt { - glog.V(4).Infof("%q is unmounted, deleting the directory", mountPath) + klog.V(4).Infof("%q is unmounted, deleting the directory", mountPath) return os.Remove(mountPath) } return fmt.Errorf("Failed to unmount path %v", mountPath) @@ -261,6 +261,7 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl return secret, nil } +// GetClassForVolume locates storage class by persistent volume func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) (*storage.StorageClass, error) { if kubeClient == nil { return nil, fmt.Errorf("Cannot get kube client") @@ -290,7 +291,7 @@ func checkVolumeNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]stri if pv.Spec.NodeAffinity.Required != nil { terms := pv.Spec.NodeAffinity.Required.NodeSelectorTerms - glog.V(10).Infof("Match for Required node selector terms %+v", terms) + klog.V(10).Infof("Match for Required node selector terms %+v", terms) if !v1helper.MatchNodeSelectorTerms(terms, labels.Set(nodeLabels), nil) { return fmt.Errorf("No matching NodeSelectorTerms") } @@ -379,11 +380,11 @@ func SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent bool, zone } // scheduler will guarantee if node != null above, zoneFromNode is member of allowedZones. // so if zoneFromNode != "", we can safely assume it is part of allowedZones. - if zones, err := chooseZonesForVolumeIncludingZone(allowedZones, pvcName, zoneFromNode, numReplicas); err != nil { + zones, err := chooseZonesForVolumeIncludingZone(allowedZones, pvcName, zoneFromNode, numReplicas) + if err != nil { return nil, fmt.Errorf("cannot process zones in allowedTopologies: %v", err) - } else { - return zones, nil } + return zones, nil } // pick zone from parameters if present @@ -405,11 +406,11 @@ func SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent bool, zone // pick zone from zones with nodes if zonesWithNodes.Len() > 0 { // If node != null (and thus zoneFromNode != ""), zoneFromNode will be member of zonesWithNodes - if zones, err := chooseZonesForVolumeIncludingZone(zonesWithNodes, pvcName, zoneFromNode, numReplicas); err != nil { + zones, err := chooseZonesForVolumeIncludingZone(zonesWithNodes, pvcName, zoneFromNode, numReplicas) + if err != nil { return nil, fmt.Errorf("cannot process zones where nodes exist in the cluster: %v", err) - } else { - return zones, nil } + return zones, nil } return nil, fmt.Errorf("cannot determine zones to provision volume in") } @@ -431,6 +432,7 @@ func ZonesFromAllowedTopologies(allowedTopologies []v1.TopologySelectorTerm) (se return zones, nil } +// ZonesSetToLabelValue converts zones set to label value func ZonesSetToLabelValue(strSet sets.String) string { return strings.Join(strSet.UnsortedList(), kubeletapis.LabelMultiZoneDelimiter) } @@ -511,7 +513,7 @@ func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.Pers func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 { roundedUp := volumeSizeBytes / allocationUnitBytes if volumeSizeBytes%allocationUnitBytes > 0 { - roundedUp += 1 + roundedUp++ } return roundedUp } @@ -605,7 +607,7 @@ func ChooseZoneForVolume(zones sets.String, pvcName string) string { zoneSlice := zones.List() zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))] - glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice) + klog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice) return zone } @@ -664,7 +666,7 @@ func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) se replicaZones.Insert(zone) } - glog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q", + klog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q", pvcName, replicaZones.UnsortedList(), zoneSlice) return replicaZones } @@ -672,7 +674,7 @@ func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) se func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) { if pvcName == "" { // We should always be called with a name; this shouldn't happen - glog.Warningf("No name defined during volume create; choosing random zone") + klog.Warningf("No name defined during volume create; choosing random zone") hash = rand.Uint32() } else { @@ -708,7 +710,7 @@ func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) { hashString = hashString[lastDash+1:] } - glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index) + klog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index) } } @@ -724,7 +726,7 @@ func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) { // UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi // to empty_dir func UnmountViaEmptyDir(dir string, host volume.VolumeHost, volName string, volSpec volume.Spec, podUID utypes.UID) error { - glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir) + klog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir) // Wrap EmptyDir, let it do the teardown. wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID) @@ -766,7 +768,7 @@ func JoinMountOptions(userOptions []string, systemOptions []string) []string { for _, mountOption := range systemOptions { allMountOptions.Insert(mountOption) } - return allMountOptions.UnsortedList() + return allMountOptions.List() } // ValidateZone returns: @@ -945,6 +947,38 @@ func CheckPersistentVolumeClaimModeBlock(pvc *v1.PersistentVolumeClaim) bool { return utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) && pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock } +// IsWindowsUNCPath checks if path is prefixed with \\ +// This can be used to skip any processing of paths +// that point to SMB shares, local named pipes and local UNC path +func IsWindowsUNCPath(goos, path string) bool { + if goos != "windows" { + return false + } + // Check for UNC prefix \\ + if strings.HasPrefix(path, `\\`) { + return true + } + return false +} + +// IsWindowsLocalPath checks if path is a local path +// prefixed with "/" or "\" like "/foo/bar" or "\foo\bar" +func IsWindowsLocalPath(goos, path string) bool { + if goos != "windows" { + return false + } + if IsWindowsUNCPath(goos, path) { + return false + } + if strings.Contains(path, ":") { + return false + } + if !(strings.HasPrefix(path, `/`) || strings.HasPrefix(path, `\`)) { + return false + } + return true +} + // MakeAbsolutePath convert path to absolute path according to GOOS func MakeAbsolutePath(goos, path string) string { if goos != "windows" { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/BUILD index e61e3973ea54..e344849a517c 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/BUILD @@ -11,7 +11,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go index 61680c11577e..a7822efc3e55 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go @@ -23,7 +23,7 @@ import ( "path" "path/filepath" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/types" ) @@ -86,14 +86,14 @@ func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName if !filepath.IsAbs(mapPath) { return fmt.Errorf("The map path should be absolute: map path: %s", mapPath) } - glog.V(5).Infof("MapDevice: devicePath %s", devicePath) - glog.V(5).Infof("MapDevice: mapPath %s", mapPath) - glog.V(5).Infof("MapDevice: linkName %s", linkName) + klog.V(5).Infof("MapDevice: devicePath %s", devicePath) + klog.V(5).Infof("MapDevice: mapPath %s", mapPath) + klog.V(5).Infof("MapDevice: linkName %s", linkName) // Check and create mapPath _, err := os.Stat(mapPath) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate map path: %s", mapPath) + klog.Errorf("cannot validate map path: %s", mapPath) return err } if err = os.MkdirAll(mapPath, 0750); err != nil { @@ -115,15 +115,15 @@ func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error { if len(mapPath) == 0 { return fmt.Errorf("Failed to unmap device from map path. mapPath is empty") } - glog.V(5).Infof("UnmapDevice: mapPath %s", mapPath) - glog.V(5).Infof("UnmapDevice: linkName %s", linkName) + klog.V(5).Infof("UnmapDevice: mapPath %s", mapPath) + klog.V(5).Infof("UnmapDevice: linkName %s", linkName) // Check symbolic link exists linkPath := path.Join(mapPath, string(linkName)) if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil { return checkErr } else if !islinkExist { - glog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath) + klog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath) return nil } err := os.Remove(linkPath) @@ -135,7 +135,7 @@ func (v VolumePathHandler) RemoveMapPath(mapPath string) error { if len(mapPath) == 0 { return fmt.Errorf("Failed to remove map path. mapPath is empty") } - glog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath) + klog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath) err := os.RemoveAll(mapPath) if err != nil && !os.IsNotExist(err) { return err @@ -180,12 +180,12 @@ func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string) if err != nil { return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err) } - glog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath) + klog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath) if filepath == devPath { refs = append(refs, path.Join(mapPath, filename)) } } - glog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs) + klog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs) return refs, nil } @@ -201,7 +201,7 @@ func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath strin return err } if (fi.Mode()&os.ModeSymlink == os.ModeSymlink) && (fi.Name() == string(podUID)) { - glog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath) + klog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath) if res, err := compareSymlinks(path, mapPath); err == nil && res { globalMapPathUUID = path } @@ -211,7 +211,7 @@ func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath strin if err != nil { return "", err } - glog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID) + klog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID) // Return path contains global map path + {pod uuid} return globalMapPathUUID, nil } @@ -225,7 +225,7 @@ func compareSymlinks(global, pod string) (bool, error) { if err != nil { return false, err } - glog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod) + klog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod) if devGlobal == devPod { return true, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go index f9a886d7dc64..7170edc7de00 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go @@ -25,7 +25,7 @@ import ( "os/exec" "strings" - "github.com/golang/glog" + "k8s.io/klog" ) // AttachFileDevice takes a path to a regular file and makes it available as an @@ -38,7 +38,7 @@ func (v VolumePathHandler) AttachFileDevice(path string) (string, error) { // If no existing loop device for the path, create one if blockDevicePath == "" { - glog.V(4).Infof("Creating device for path: %s", path) + klog.V(4).Infof("Creating device for path: %s", path) blockDevicePath, err = makeLoopDevice(path) if err != nil { return "", err @@ -61,7 +61,7 @@ func (v VolumePathHandler) GetLoopDevice(path string) (string, error) { cmd := exec.Command(losetupPath, args...) out, err := cmd.CombinedOutput() if err != nil { - glog.V(2).Infof("Failed device discover command for path %s: %v %s", path, err, out) + klog.V(2).Infof("Failed device discover command for path %s: %v %s", path, err, out) return "", err } return parseLosetupOutputForDevice(out) @@ -72,7 +72,7 @@ func makeLoopDevice(path string) (string, error) { cmd := exec.Command(losetupPath, args...) out, err := cmd.CombinedOutput() if err != nil { - glog.V(2).Infof("Failed device create command for path: %s %v %s ", path, err, out) + klog.V(2).Infof("Failed device create command for path: %s %v %s ", path, err, out) return "", err } return parseLosetupOutputForDevice(out) @@ -87,7 +87,7 @@ func (v VolumePathHandler) RemoveLoopDevice(device string) error { if _, err := os.Stat(device); os.IsNotExist(err) { return nil } - glog.V(2).Infof("Failed to remove loopback device: %s: %v %s", device, err, out) + klog.V(2).Infof("Failed to remove loopback device: %s: %v %s", device, err, out) return err } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go index ef1f45208c95..eb44d5f162fa 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go @@ -24,7 +24,7 @@ import ( "os" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -63,13 +63,13 @@ func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error { } if stat == nil { - glog.Errorf("Got nil stat_t for path %v while setting ownership of volume", path) + klog.Errorf("Got nil stat_t for path %v while setting ownership of volume", path) return nil } err = os.Chown(path, int(stat.Uid), int(*fsGroup)) if err != nil { - glog.Errorf("Chown failed on %v: %v", path, err) + klog.Errorf("Chown failed on %v: %v", path, err) } mask := rwMask @@ -83,7 +83,7 @@ func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error { err = os.Chmod(path, info.Mode()|mask) if err != nil { - glog.Errorf("Chmod failed on %v: %v", path, err) + klog.Errorf("Chmod failed on %v: %v", path, err) } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/BUILD index 364553f4f72e..89892c7621d5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/BUILD @@ -11,23 +11,27 @@ go_library( srcs = [ "attacher.go", "vsphere_volume.go", + "vsphere_volume_block.go", "vsphere_volume_util.go", ], importpath = "k8s.io/kubernetes/pkg/volume/vsphere_volume", deps = [ - "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/vsphere:go_default_library", "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", + "//pkg/features:go_default_library", "//pkg/util/keymutex:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", + "//pkg/volume/util/volumepathhandler:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -35,11 +39,11 @@ go_test( name = "go_default_test", srcs = [ "attacher_test.go", + "vsphere_volume_block_test.go", "vsphere_volume_test.go", ], embed = [":go_default_library"], deps = [ - "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/fake:go_default_library", "//pkg/cloudprovider/providers/vsphere:go_default_library", "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", @@ -47,9 +51,11 @@ go_test( "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/OWNERS b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/OWNERS index 5f7fd3d2e9ee..d72ca0102916 100755 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/OWNERS +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/OWNERS @@ -3,7 +3,10 @@ approvers: - saad-ali - thockin - matchstick -- kerneltime +- SandeepPissay +- divyenpatel +- BaluDontu +- abrarshivani reviewers: - abithap - abrarshivani @@ -13,4 +16,6 @@ reviewers: - rootfs - jingxu97 - msau42 -- kerneltime +- SandeepPissay +- divyenpatel +- BaluDontu diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/attacher.go index 9f292e348fab..ca934b45abd4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/attacher.go @@ -22,9 +22,9 @@ import ( "path" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" @@ -76,7 +76,7 @@ func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.No return "", err } - glog.V(4).Infof("vSphere: Attach disk called for node %s", nodeName) + klog.V(4).Infof("vSphere: Attach disk called for node %s", nodeName) // Keeps concurrent attach operations to same host atomic attachdetachMutex.LockKey(string(nodeName)) @@ -86,7 +86,7 @@ func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.No // succeeds in that case, so no need to do that separately. diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, volumeSource.StoragePolicyName, nodeName) if err != nil { - glog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.VolumePath, nodeName, err) + klog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.VolumePath, nodeName, err) return "", err } @@ -94,14 +94,14 @@ func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.No } func (attacher *vsphereVMDKAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { - glog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for vSphere", nodeName) + klog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for vSphere", nodeName) volumeNodeMap := map[types.NodeName][]*volume.Spec{ nodeName: specs, } nodeVolumesResult := make(map[*volume.Spec]bool) nodesVerificationMap, err := attacher.BulkVerifyVolumes(volumeNodeMap) if err != nil { - glog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err) + klog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err) return nodeVolumesResult, err } if result, ok := nodesVerificationMap[nodeName]; ok { @@ -119,7 +119,7 @@ func (attacher *vsphereVMDKAttacher) BulkVerifyVolumes(volumesByNode map[types.N for _, volumeSpec := range volumeSpecs { volumeSource, _, err := getVolumeSource(volumeSpec) if err != nil { - glog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err) + klog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err) continue } volPath := volumeSource.VolumePath @@ -135,7 +135,7 @@ func (attacher *vsphereVMDKAttacher) BulkVerifyVolumes(volumesByNode map[types.N } attachedResult, err := attacher.vsphereVolumes.DisksAreAttached(volumePathsByNode) if err != nil { - glog.Errorf("Error checking if volumes are attached to nodes: %+v. err: %v", volumePathsByNode, err) + klog.Errorf("Error checking if volumes are attached to nodes: %+v. err: %v", volumePathsByNode, err) return volumesAttachedCheck, err } @@ -169,14 +169,14 @@ func (attacher *vsphereVMDKAttacher) WaitForAttach(spec *volume.Spec, devicePath for { select { case <-ticker.C: - glog.V(5).Infof("Checking VMDK %q is attached", volumeSource.VolumePath) + klog.V(5).Infof("Checking VMDK %q is attached", volumeSource.VolumePath) path, err := verifyDevicePath(devicePath) if err != nil { // Log error, if any, and continue checking periodically. See issue #11321 - glog.Warningf("Error verifying VMDK (%q) is attached: %v", volumeSource.VolumePath, err) + klog.Warningf("Error verifying VMDK (%q) is attached: %v", volumeSource.VolumePath, err) } else if path != "" { // A device path has successfully been created for the VMDK - glog.Infof("Successfully found attached VMDK %q.", volumeSource.VolumePath) + klog.Infof("Successfully found attached VMDK %q.", volumeSource.VolumePath) return path, nil } case <-timer.C: @@ -210,7 +210,7 @@ func (attacher *vsphereVMDKAttacher) MountDevice(spec *volume.Spec, devicePath s if err != nil { if os.IsNotExist(err) { if err := os.MkdirAll(deviceMountPath, 0750); err != nil { - glog.Errorf("Failed to create directory at %#v. err: %s", deviceMountPath, err) + klog.Errorf("Failed to create directory at %#v. err: %s", deviceMountPath, err) return err } notMnt = true @@ -234,7 +234,7 @@ func (attacher *vsphereVMDKAttacher) MountDevice(spec *volume.Spec, devicePath s os.Remove(deviceMountPath) return err } - glog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options) + klog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options) } return nil } @@ -271,21 +271,21 @@ func (detacher *vsphereVMDKDetacher) Detach(volumeName string, nodeName types.No attached, err := detacher.vsphereVolumes.DiskIsAttached(volPath, nodeName) if err != nil { // Log error and continue with detach - glog.Errorf( + klog.Errorf( "Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v", volPath, nodeName, err) } if err == nil && !attached { // Volume is already detached from node. - glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volPath, nodeName) + klog.Infof("detach operation was successful. volume %q is already detached from node %q.", volPath, nodeName) return nil } attachdetachMutex.LockKey(string(nodeName)) defer attachdetachMutex.UnlockKey(string(nodeName)) if err := detacher.vsphereVolumes.DetachDisk(volPath, nodeName); err != nil { - glog.Errorf("Error detaching volume %q: %v", volPath, err) + klog.Errorf("Error detaching volume %q: %v", volPath, err) return err } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go index 0cd3014feeb4..7097005ef9e1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go @@ -22,11 +22,13 @@ import ( "path" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -118,8 +120,10 @@ func (plugin *vsphereVolumePlugin) newMounterInternal(spec *volume.Spec, podUID plugin: plugin, MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)), }, - fsType: fsType, - diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil + fsType: fsType, + diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host), + mountOptions: util.MountOptionFromSpec(spec), + }, nil } func (plugin *vsphereVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.Unmounter, error) { @@ -142,7 +146,7 @@ func (plugin *vsphereVolumePlugin) ConstructVolumeSpec(volumeName, mountPath str return nil, err } volumePath = strings.Replace(volumePath, "\\040", " ", -1) - glog.V(5).Infof("vSphere volume path is %q", volumePath) + klog.V(5).Infof("vSphere volume path is %q", volumePath) vsphereVolume := &v1.Volume{ Name: volumeName, VolumeSource: v1.VolumeSource{ @@ -186,8 +190,9 @@ var _ volume.Mounter = &vsphereVolumeMounter{} type vsphereVolumeMounter struct { *vsphereVolume - fsType string - diskMounter *mount.SafeFormatAndMount + fsType string + diskMounter *mount.SafeFormatAndMount + mountOptions []string } func (b *vsphereVolumeMounter) GetAttributes() volume.Attributes { @@ -211,21 +216,21 @@ func (b *vsphereVolumeMounter) CanMount() error { // SetUp attaches the disk and bind mounts to the volume path. func (b *vsphereVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { - glog.V(5).Infof("vSphere volume setup %s to %s", b.volPath, dir) + klog.V(5).Infof("vSphere volume setup %s to %s", b.volPath, dir) // TODO: handle failed mounts here. notmnt, err := b.mounter.IsLikelyNotMountPoint(dir) if err != nil && !os.IsNotExist(err) { - glog.V(4).Infof("IsLikelyNotMountPoint failed: %v", err) + klog.V(4).Infof("IsLikelyNotMountPoint failed: %v", err) return err } if !notmnt { - glog.V(4).Infof("Something is already mounted to target %s", dir) + klog.V(4).Infof("Something is already mounted to target %s", dir) return nil } if err := os.MkdirAll(dir, 0750); err != nil { - glog.V(4).Infof("Could not create directory %s: %v", dir, err) + klog.V(4).Infof("Could not create directory %s: %v", dir, err) return err } @@ -233,25 +238,26 @@ func (b *vsphereVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { // Perform a bind mount to the full path to allow duplicate mounts of the same PD. globalPDPath := makeGlobalPDPath(b.plugin.host, b.volPath) - err = b.mounter.Mount(globalPDPath, dir, "", options) + mountOptions := util.JoinMountOptions(options, b.mountOptions) + err = b.mounter.Mount(globalPDPath, dir, "", mountOptions) if err != nil { notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notmnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notmnt { - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath()) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath()) return err } } @@ -259,7 +265,7 @@ func (b *vsphereVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return err } volume.SetVolumeOwnership(b, fsGroup) - glog.V(3).Infof("vSphere volume %s mounted to %s", b.volPath, dir) + klog.V(3).Infof("vSphere volume %s mounted to %s", b.volPath, dir) return nil } @@ -352,9 +358,6 @@ func (v *vsphereVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopol if !util.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes()) } - if util.CheckPersistentVolumeClaimModeBlock(v.options.PVC) { - return nil, fmt.Errorf("%s does not support block volume provisioning", v.plugin.GetPluginName()) - } volSpec, err := v.manager.CreateVolume(v) if err != nil { @@ -365,6 +368,15 @@ func (v *vsphereVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopol volSpec.Fstype = "ext4" } + var volumeMode *v1.PersistentVolumeMode + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + volumeMode = v.options.PVC.Spec.VolumeMode + if volumeMode != nil && *volumeMode == v1.PersistentVolumeBlock { + klog.V(5).Infof("vSphere block volume should not have any FSType") + volSpec.Fstype = "" + } + } + pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: v.options.PVName, @@ -379,6 +391,7 @@ func (v *vsphereVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopol Capacity: v1.ResourceList{ v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dKi", volSpec.Size)), }, + VolumeMode: volumeMode, PersistentVolumeSource: v1.PersistentVolumeSource{ VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ VolumePath: volSpec.Path, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_block.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_block.go new file mode 100644 index 000000000000..40342b8aacdf --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_block.go @@ -0,0 +1,161 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vsphere_volume + +import ( + "fmt" + "path" + "path/filepath" + "strings" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/util/mount" + kstrings "k8s.io/kubernetes/pkg/util/strings" + "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" +) + +var _ volume.BlockVolumePlugin = &vsphereVolumePlugin{} + +func (plugin *vsphereVolumePlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) { + + pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName()) + blkUtil := volumepathhandler.NewBlockVolumePathHandler() + globalMapPathUUID, err := blkUtil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID) + if err != nil { + klog.Errorf("Failed to find GlobalMapPathUUID from Pod: %s with error: %+v", podUID, err) + return nil, err + } + klog.V(5).Infof("globalMapPathUUID: %v", globalMapPathUUID) + globalMapPath := filepath.Dir(globalMapPathUUID) + if len(globalMapPath) <= 1 { + return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) + } + return getVolumeSpecFromGlobalMapPath(globalMapPath) +} + +func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) { + // Construct volume spec from globalMapPath + // globalMapPath example: + // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID} + // plugins/kubernetes.io/vsphere-volume/volumeDevices/[datastore1]\\040volumes/myDisk + volPath := filepath.Base(globalMapPath) + volPath = strings.Replace(volPath, "\\040", "", -1) + if len(volPath) <= 1 { + return nil, fmt.Errorf("failed to get volume path from global path=%s", globalMapPath) + } + block := v1.PersistentVolumeBlock + vsphereVolume := &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ + VolumePath: volPath, + }, + }, + VolumeMode: &block, + }, + } + return volume.NewSpecFromPersistentVolume(vsphereVolume, true), nil +} + +func (plugin *vsphereVolumePlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) { + // If this called via GenerateUnmapDeviceFunc(), pod is nil. + // Pass empty string as dummy uid since uid isn't used in the case. + var uid types.UID + if pod != nil { + uid = pod.UID + } + return plugin.newBlockVolumeMapperInternal(spec, uid, &VsphereDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName())) +} + +func (plugin *vsphereVolumePlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.BlockVolumeMapper, error) { + volumeSource, _, err := getVolumeSource(spec) + if err != nil { + klog.Errorf("Failed to get Volume source from volume Spec: %+v with error: %+v", *spec, err) + return nil, err + } + volPath := volumeSource.VolumePath + return &vsphereBlockVolumeMapper{ + vsphereVolume: &vsphereVolume{ + volName: spec.Name(), + podUID: podUID, + volPath: volPath, + manager: manager, + mounter: mounter, + plugin: plugin, + MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)), + }, + }, nil + +} + +func (plugin *vsphereVolumePlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) { + return plugin.newUnmapperInternal(volName, podUID, &VsphereDiskUtil{}) +} + +func (plugin *vsphereVolumePlugin) newUnmapperInternal(volName string, podUID types.UID, manager vdManager) (volume.BlockVolumeUnmapper, error) { + return &vsphereBlockVolumeUnmapper{ + vsphereVolume: &vsphereVolume{ + volName: volName, + podUID: podUID, + volPath: volName, + manager: manager, + plugin: plugin, + }, + }, nil +} + +var _ volume.BlockVolumeMapper = &vsphereBlockVolumeMapper{} + +type vsphereBlockVolumeMapper struct { + *vsphereVolume +} + +func (v vsphereBlockVolumeMapper) SetUpDevice() (string, error) { + return "", nil +} + +func (v vsphereBlockVolumeMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error { + return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID) +} + +var _ volume.BlockVolumeUnmapper = &vsphereBlockVolumeUnmapper{} + +type vsphereBlockVolumeUnmapper struct { + *vsphereVolume +} + +func (v *vsphereBlockVolumeUnmapper) TearDownDevice(mapPath, devicePath string) error { + return nil +} + +// GetGlobalMapPath returns global map path and error +// path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumePath +func (v *vsphereVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) { + volumeSource, _, err := getVolumeSource(spec) + if err != nil { + return "", err + } + return path.Join(v.plugin.host.GetVolumeDevicePluginDir(vsphereVolumePluginName), string(volumeSource.VolumePath)), nil +} + +func (v *vsphereVolume) GetPodDeviceMapPath() (string, string) { + return v.plugin.host.GetPodVolumeDeviceDir(v.podUID, kstrings.EscapeQualifiedNameForDisk(vsphereVolumePluginName)), v.volName +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go index 3babd3614050..a5880ea7fbf8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go @@ -23,9 +23,9 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/cloudprovider" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" "k8s.io/kubernetes/pkg/volume" @@ -114,10 +114,10 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec volumeOptions.Datastore = value case volume.VolumeParameterFSType: fstype = value - glog.V(4).Infof("Setting fstype as %q", fstype) + klog.V(4).Infof("Setting fstype as %q", fstype) case StoragePolicyName: volumeOptions.StoragePolicyName = value - glog.V(4).Infof("Setting StoragePolicyName as %q", volumeOptions.StoragePolicyName) + klog.V(4).Infof("Setting StoragePolicyName as %q", volumeOptions.StoragePolicyName) case HostFailuresToTolerateCapability, ForceProvisioningCapability, CacheReservationCapability, DiskStripesCapability, ObjectSpaceReservationCapability, IopsLimitCapability: @@ -137,7 +137,7 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec } volumeOptions.VSANStorageProfileData = "(" + volumeOptions.VSANStorageProfileData + ")" } - glog.V(4).Infof("VSANStorageProfileData in vsphere volume %q", volumeOptions.VSANStorageProfileData) + klog.V(4).Infof("VSANStorageProfileData in vsphere volume %q", volumeOptions.VSANStorageProfileData) // TODO: implement PVC.Selector parsing if v.options.PVC.Spec.Selector != nil { return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on vSphere") @@ -154,7 +154,7 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec StoragePolicyName: volumeOptions.StoragePolicyName, StoragePolicyID: volumeOptions.StoragePolicyID, } - glog.V(2).Infof("Successfully created vsphere volume %s", name) + klog.V(2).Infof("Successfully created vsphere volume %s", name) return volSpec, nil } @@ -166,10 +166,10 @@ func (util *VsphereDiskUtil) DeleteVolume(vd *vsphereVolumeDeleter) error { } if err = cloud.DeleteVolume(vd.volPath); err != nil { - glog.V(2).Infof("Error deleting vsphere volume %s: %v", vd.volPath, err) + klog.V(2).Infof("Error deleting vsphere volume %s: %v", vd.volPath, err) return err } - glog.V(2).Infof("Successfully deleted vsphere volume %s", vd.volPath) + klog.V(2).Infof("Successfully deleted vsphere volume %s", vd.volPath) return nil } @@ -184,7 +184,7 @@ func getVolPathfromVolumeName(deviceMountPath string) string { func getCloudProvider(cloud cloudprovider.Interface) (*vsphere.VSphere, error) { if cloud == nil { - glog.Errorf("Cloud provider not initialized properly") + klog.Errorf("Cloud provider not initialized properly") return nil, errors.New("Cloud provider not initialized properly") } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/windows/service/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/windows/service/BUILD index 3e9cf1d3bdc6..e64c40488b43 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/windows/service/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/windows/service/BUILD @@ -11,9 +11,9 @@ go_library( importpath = "k8s.io/kubernetes/pkg/windows/service", deps = select({ "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/sys/windows:go_default_library", "//vendor/golang.org/x/sys/windows/svc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "//conditions:default": [], }), diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/windows/service/service.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/windows/service/service.go index acc48246f1e7..a5bffa1822e1 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/windows/service/service.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/windows/service/service.go @@ -21,7 +21,7 @@ package service import ( "os" - "github.com/golang/glog" + "k8s.io/klog" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc" @@ -57,7 +57,7 @@ func InitService(serviceName string) error { if err != nil { return err } - glog.Infof("Running %s as a Windows service!", serviceName) + klog.Infof("Running %s as a Windows service!", serviceName) return nil } @@ -67,7 +67,7 @@ func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.S h.fromsvc <- nil s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)} - glog.Infof("Service running") + klog.Infof("Service running") Loop: for { select { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/BUILD b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/BUILD index 5a1023742f8d..82a0722eebae 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/BUILD +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/BUILD @@ -57,7 +57,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -72,6 +72,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//test/utils/harness:all-srcs", "//test/utils/image:all-srcs", "//test/utils/junit:all-srcs", ], diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/density_utils.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/density_utils.go index c607e5c940e6..60a32d50c19d 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/density_utils.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/density_utils.go @@ -21,12 +21,12 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" ) const ( @@ -79,7 +79,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri if !apierrs.IsConflict(err) { return err } else { - glog.V(2).Infof("Conflict when trying to remove a labels %v from %v", labelKeys, nodeName) + klog.V(2).Infof("Conflict when trying to remove a labels %v from %v", labelKeys, nodeName) } } else { break diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go index 01fb6a0d24c7..4ebfde80e130 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/runners.go @@ -17,9 +17,11 @@ limitations under the License. package utils import ( + "context" "fmt" "math" "os" + "strings" "sync" "time" @@ -45,7 +47,7 @@ import ( extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -161,7 +163,7 @@ type RCConfig struct { // If set to false starting RC will print progress, otherwise only errors will be printed. Silent bool - // If set this function will be used to print log lines instead of glog. + // If set this function will be used to print log lines instead of klog. LogFunc func(fmt string, args ...interface{}) // If set those functions will be used to gather data from Nodes - in integration tests where no // kubelets are running those variables should be nil. @@ -171,13 +173,15 @@ type RCConfig struct { // Names of the secrets and configmaps to mount. SecretNames []string ConfigMapNames []string + + ServiceAccountTokenProjections int } func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) { if rc.LogFunc != nil { rc.LogFunc(fmt, args...) } - glog.Infof(fmt, args...) + klog.Infof(fmt, args...) } type DeploymentConfig struct { @@ -239,6 +243,18 @@ func (p PodDiff) String(ignorePhases sets.String) string { return ret } +// DeletedPods returns a slice of pods that were present at the beginning +// and then disappeared. +func (p PodDiff) DeletedPods() []string { + var deletedPods []string + for podName, podInfo := range p { + if podInfo.hostname == nonExist { + deletedPods = append(deletedPods, podName) + } + } + return deletedPods +} + // Diff computes a PodDiff given 2 lists of pods. func Diff(oldPods []*v1.Pod, curPods []*v1.Pod) PodDiff { podInfoMap := PodDiff{} @@ -321,6 +337,10 @@ func (config *DeploymentConfig) create() error { attachConfigMaps(&deployment.Spec.Template, config.ConfigMapNames) } + for i := 0; i < config.ServiceAccountTokenProjections; i++ { + attachServiceAccountTokenProjection(&deployment.Spec.Template, fmt.Sprintf("tok-%d", i)) + } + config.applyTo(&deployment.Spec.Template) if err := CreateDeploymentWithRetries(config.Client, config.Namespace, deployment); err != nil { @@ -758,9 +778,8 @@ func (config *RCConfig) start() error { pods := ps.List() startupStatus := ComputeRCStartupStatus(pods, config.Replicas) - pods = startupStatus.Created if config.CreatedPods != nil { - *config.CreatedPods = pods + *config.CreatedPods = startupStatus.Created } if !config.Silent { config.RCConfigLog(startupStatus.String(config.Name)) @@ -780,16 +799,15 @@ func (config *RCConfig) start() error { } return fmt.Errorf("%d containers failed which is more than allowed %d", startupStatus.FailedContainers, maxContainerFailures) } - if len(pods) < len(oldPods) || len(pods) > config.Replicas { - // This failure mode includes: - // kubelet is dead, so node controller deleted pods and rc creates more - // - diagnose by noting the pod diff below. - // pod is unhealthy, so replication controller creates another to take its place - // - diagnose by comparing the previous "2 Pod states" lines for inactive pods - errorStr := fmt.Sprintf("Number of reported pods for %s changed: %d vs %d", config.Name, len(pods), len(oldPods)) - config.RCConfigLog("%v, pods that changed since the last iteration:", errorStr) - config.RCConfigLog(Diff(oldPods, pods).String(sets.NewString())) - return fmt.Errorf(errorStr) + + diff := Diff(oldPods, pods) + deletedPods := diff.DeletedPods() + if len(deletedPods) != 0 { + // There are some pods that have disappeared. + err := fmt.Errorf("%d pods disappeared for %s: %v", len(deletedPods), config.Name, strings.Join(deletedPods, ", ")) + config.RCConfigLog(err.Error()) + config.RCConfigLog(diff.String(sets.NewString())) + return err } if len(pods) > len(oldPods) || startupStatus.Running > oldRunning { @@ -1026,7 +1044,7 @@ func MakePodSpec() v1.PodSpec { return v1.PodSpec{ Containers: []v1.Container{{ Name: "pause", - Image: "kubernetes/pause", + Image: "k8s.gcr.io/pause:3.1", Ports: []v1.ContainerPort{{ContainerPort: 80}}, Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ @@ -1061,9 +1079,9 @@ func CreatePod(client clientset.Interface, namespace string, podCount int, podTe } if podCount < 30 { - workqueue.Parallelize(podCount, podCount, createPodFunc) + workqueue.ParallelizeUntil(context.TODO(), podCount, podCount, createPodFunc) } else { - workqueue.Parallelize(30, podCount, createPodFunc) + workqueue.ParallelizeUntil(context.TODO(), 30, podCount, createPodFunc) } return createError } @@ -1127,7 +1145,7 @@ type SecretConfig struct { Client clientset.Interface Name string Namespace string - // If set this function will be used to print log lines instead of glog. + // If set this function will be used to print log lines instead of klog. LogFunc func(fmt string, args ...interface{}) } @@ -1185,7 +1203,7 @@ type ConfigMapConfig struct { Client clientset.Interface Name string Namespace string - // If set this function will be used to print log lines instead of glog. + // If set this function will be used to print log lines instead of klog. LogFunc func(fmt string, args ...interface{}) } @@ -1240,12 +1258,63 @@ func attachConfigMaps(template *v1.PodTemplateSpec, configMapNames []string) { template.Spec.Containers[0].VolumeMounts = mounts } +func attachServiceAccountTokenProjection(template *v1.PodTemplateSpec, name string) { + template.Spec.Containers[0].VolumeMounts = append(template.Spec.Containers[0].VolumeMounts, + v1.VolumeMount{ + Name: name, + MountPath: "/var/service-account-tokens/" + name, + }) + + template.Spec.Volumes = append(template.Spec.Volumes, + v1.Volume{ + Name: name, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + ServiceAccountToken: &v1.ServiceAccountTokenProjection{ + Path: "token", + Audience: name, + }, + }, + { + ConfigMap: &v1.ConfigMapProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "kube-root-ca-crt", + }, + Items: []v1.KeyToPath{ + { + Key: "ca.crt", + Path: "ca.crt", + }, + }, + }, + }, + { + DownwardAPI: &v1.DownwardAPIProjection{ + Items: []v1.DownwardAPIVolumeFile{ + { + Path: "namespace", + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace", + }, + }, + }, + }, + }, + }, + }, + }, + }) +} + type DaemonConfig struct { Client clientset.Interface Name string Namespace string Image string - // If set this function will be used to print log lines instead of glog. + // If set this function will be used to print log lines instead of klog. LogFunc func(fmt string, args ...interface{}) // How long we wait for DaemonSet to become running. Timeout time.Duration @@ -1253,7 +1322,7 @@ type DaemonConfig struct { func (config *DaemonConfig) Run() error { if config.Image == "" { - config.Image = "kubernetes/pause" + config.Image = "k8s.gcr.io/pause:3.1" } nameLabel := map[string]string{ "name": config.Name + "-daemon", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/tmpdir.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/tmpdir.go index 7887a4de9257..0c84c74d7ad4 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/tmpdir.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/test/utils/tmpdir.go @@ -18,9 +18,8 @@ package utils import ( "io/ioutil" - "os" - "github.com/golang/glog" + "k8s.io/klog" ) func MakeTempDirOrDie(prefix string, baseDir string) string { @@ -29,10 +28,7 @@ func MakeTempDirOrDie(prefix string, baseDir string) string { } tempDir, err := ioutil.TempDir(baseDir, prefix) if err != nil { - glog.Fatalf("Can't make a temp rootdir: %v", err) - } - if err = os.MkdirAll(tempDir, 0750); err != nil { - glog.Fatalf("Can't mkdir(%q): %v", tempDir, err) + klog.Fatalf("Can't make a temp rootdir: %v", err) } return tempDir } diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/.gitignore b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/.gitignore new file mode 100644 index 000000000000..e256a31e00a5 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/.gitignore @@ -0,0 +1,20 @@ +# OSX leaves these everywhere on SMB shares +._* + +# Eclipse files +.classpath +.project +.settings/** + +# Emacs save files +*~ + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +# Go test binaries +*.test diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/.travis.yml b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/.travis.yml new file mode 100644 index 000000000000..03ddc7318ae6 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/.travis.yml @@ -0,0 +1,14 @@ +language: go +dist: xenial +go: + - 1.9.x + - 1.10.x + - 1.11.x +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON) + - go tool vet . + - go test -v -race ./... +install: + - go get golang.org/x/lint/golint diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md new file mode 100644 index 000000000000..de4711513724 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + + + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + + diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/LICENSE b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/LICENSE new file mode 100644 index 000000000000..7805d36de730 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/OWNERS b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/OWNERS new file mode 100644 index 000000000000..11ad7ce1a40b --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/OWNERS @@ -0,0 +1,25 @@ +approvers: +- dims +- lavalamp +- smarterclayton +- deads2k +- sttts +- liggitt +- caesarxuchao +reviewers: +- dims +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- gmarek +- sttts +- ncdc +- tallclair +labels: +- sig/api-machinery diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/README.md b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/README.md new file mode 100644 index 000000000000..0200f75b4d12 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/README.md @@ -0,0 +1,121 @@ +# YAML marshaling and unmarshaling support for Go + +[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) + +## Introduction + +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. + +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). + +## Compatibility + +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). + +## Caveats + +**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: + +``` +BAD: + exampleKey: !!binary gIGC + +GOOD: + exampleKey: gIGC +... and decode the base64 data in your code. +``` + +**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. + +## Installation and usage + +To install, run: + +``` +$ go get github.com/ghodss/yaml +``` + +And import using: + +``` +import "github.com/ghodss/yaml" +``` + +Usage is very similar to the JSON library: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +type Person struct { + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` +} + +func main() { + // Marshal a Person struct to YAML. + p := Person{"John", 30} + y, err := yaml.Marshal(p) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + age: 30 + name: John + */ + + // Unmarshal the YAML back into a Person struct. + var p2 Person + err = yaml.Unmarshal(y, &p2) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(p2) + /* Output: + {John 30} + */ +} +``` + +`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +func main() { + j := []byte(`{"name": "John", "age": 30}`) + y, err := yaml.JSONToYAML(j) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + j2, err := yaml.YAMLToJSON(y) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(j2)) + /* Output: + {"age":30,"name":"John"} + */ +} +``` diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/RELEASE.md b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/RELEASE.md new file mode 100644 index 000000000000..6b642464e583 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/RELEASE.md @@ -0,0 +1,9 @@ +# Release Process + +The `yaml` Project is released on an as-needed basis. The process is as follows: + +1. An issue is proposing a new release with a changelog since the last release +1. All [OWNERS](OWNERS) must LGTM this release +1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` +1. The release issue is closed +1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS new file mode 100644 index 000000000000..0648a8ebff7b --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS @@ -0,0 +1,17 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +cjcullen +jessfraz +liggitt +philips +tallclair diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/code-of-conduct.md b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/code-of-conduct.md new file mode 100644 index 000000000000..0d15c00cf325 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/fields.go b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/fields.go new file mode 100644 index 000000000000..235b7f2cf612 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/fields.go @@ -0,0 +1,502 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/yaml.go b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/yaml.go new file mode 100644 index 000000000000..024596112ac5 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/yaml.go @@ -0,0 +1,319 @@ +package yaml + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshal marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// JSONOpt is a decoding option for decoding from JSON format. +type JSONOpt func(*json.Decoder) *json.Decoder + +// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, +// optionally configuring the behavior of the JSON unmarshal. +func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { + return yamlUnmarshal(y, o, false, opts...) +} + +// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal +// into an object, optionally configuring the behavior of the JSON unmarshal. +func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { + return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...) +} + +// yamlUnmarshal unmarshals the given YAML byte stream into the given interface, +// optionally performing the unmarshalling strictly +func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error { + vo := reflect.ValueOf(o) + unmarshalFn := yaml.Unmarshal + if strict { + unmarshalFn = yaml.UnmarshalStrict + } + j, err := yamlToJSON(y, &vo, unmarshalFn) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = jsonUnmarshal(bytes.NewReader(j), o, opts...) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the +// object, optionally applying decoder options prior to decoding. We are not +// using json.Unmarshal directly as we want the chance to pass in non-default +// options. +func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { + d := json.NewDecoder(r) + for _, opt := range opts { + d = opt(d) + } + if err := d.Decode(&o); err != nil { + return fmt.Errorf("while decoding JSON: %v", err) + } + return nil +} + +// JSONToYAML Converts JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, +// passing JSON through this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +// +// For strict decoding of YAML, use YAMLToJSONStrict. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil, yaml.Unmarshal) +} + +// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, +// returning an error on any duplicate field names. +func YAMLToJSONStrict(y []byte) ([]byte, error) { + return yamlToJSON(y, nil, yaml.UnmarshalStrict) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yamlUnmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilties happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } +} diff --git a/cluster-autoscaler/vendor/sigs.k8s.io/yaml/yaml_go110.go b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/yaml_go110.go new file mode 100644 index 000000000000..ab3e06a222a6 --- /dev/null +++ b/cluster-autoscaler/vendor/sigs.k8s.io/yaml/yaml_go110.go @@ -0,0 +1,14 @@ +// This file contains changes that are only compatible with go 1.10 and onwards. + +// +build go1.10 + +package yaml + +import "encoding/json" + +// DisallowUnknownFields configures the JSON decoder to error out if unknown +// fields come along, instead of dropping them by default. +func DisallowUnknownFields(d *json.Decoder) *json.Decoder { + d.DisallowUnknownFields() + return d +} diff --git a/cluster-autoscaler/vendor/vbom.ml/util/LICENSE b/cluster-autoscaler/vendor/vbom.ml/util/LICENSE deleted file mode 100644 index 5c695fb590f7..000000000000 --- a/cluster-autoscaler/vendor/vbom.ml/util/LICENSE +++ /dev/null @@ -1,17 +0,0 @@ -The MIT License (MIT) -Copyright (c) 2015 Frits van Bommel -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/cluster-autoscaler/vendor/vbom.ml/util/sortorder/README.md b/cluster-autoscaler/vendor/vbom.ml/util/sortorder/README.md deleted file mode 100644 index ed8da0e29b5f..000000000000 --- a/cluster-autoscaler/vendor/vbom.ml/util/sortorder/README.md +++ /dev/null @@ -1,5 +0,0 @@ -## sortorder [![GoDoc](https://godoc.org/vbom.ml/util/sortorder?status.svg)](https://godoc.org/vbom.ml/util/sortorder) - - import "vbom.ml/util/sortorder" - -Sort orders and comparison functions. diff --git a/cluster-autoscaler/vendor/vbom.ml/util/sortorder/doc.go b/cluster-autoscaler/vendor/vbom.ml/util/sortorder/doc.go deleted file mode 100644 index 75d5a2928f38..000000000000 --- a/cluster-autoscaler/vendor/vbom.ml/util/sortorder/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package sortorder implements sort orders and comparison functions. -// -// Currently, it only implements so-called "natural order", where integers -// embedded in strings are compared by value. -package sortorder diff --git a/cluster-autoscaler/vendor/vbom.ml/util/sortorder/natsort.go b/cluster-autoscaler/vendor/vbom.ml/util/sortorder/natsort.go deleted file mode 100644 index 1af08c1bd107..000000000000 --- a/cluster-autoscaler/vendor/vbom.ml/util/sortorder/natsort.go +++ /dev/null @@ -1,76 +0,0 @@ -package sortorder - -// Natural implements sort.Interface to sort strings in natural order. This -// means that e.g. "abc2" < "abc12". -// -// Non-digit sequences and numbers are compared separately. The former are -// compared bytewise, while the latter are compared numerically (except that -// the number of leading zeros is used as a tie-breaker, so e.g. "2" < "02") -// -// Limitation: only ASCII digits (0-9) are considered. -type Natural []string - -func (n Natural) Len() int { return len(n) } -func (n Natural) Swap(i, j int) { n[i], n[j] = n[j], n[i] } -func (n Natural) Less(i, j int) bool { return NaturalLess(n[i], n[j]) } - -func isdigit(b byte) bool { return '0' <= b && b <= '9' } - -// NaturalLess compares two strings using natural ordering. This means that e.g. -// "abc2" < "abc12". -// -// Non-digit sequences and numbers are compared separately. The former are -// compared bytewise, while the latter are compared numerically (except that -// the number of leading zeros is used as a tie-breaker, so e.g. "2" < "02") -// -// Limitation: only ASCII digits (0-9) are considered. -func NaturalLess(str1, str2 string) bool { - idx1, idx2 := 0, 0 - for idx1 < len(str1) && idx2 < len(str2) { - c1, c2 := str1[idx1], str2[idx2] - dig1, dig2 := isdigit(c1), isdigit(c2) - switch { - case dig1 != dig2: // Digits before other characters. - return dig1 // True if LHS is a digit, false if the RHS is one. - case !dig1: // && !dig2, because dig1 == dig2 - // UTF-8 compares bytewise-lexicographically, no need to decode - // codepoints. - if c1 != c2 { - return c1 < c2 - } - idx1++ - idx2++ - default: // Digits - // Eat zeros. - for ; idx1 < len(str1) && str1[idx1] == '0'; idx1++ { - } - for ; idx2 < len(str2) && str2[idx2] == '0'; idx2++ { - } - // Eat all digits. - nonZero1, nonZero2 := idx1, idx2 - for ; idx1 < len(str1) && isdigit(str1[idx1]); idx1++ { - } - for ; idx2 < len(str2) && isdigit(str2[idx2]); idx2++ { - } - // If lengths of numbers with non-zero prefix differ, the shorter - // one is less. - if len1, len2 := idx1-nonZero1, idx2-nonZero2; len1 != len2 { - return len1 < len2 - } - // If they're not equal, string comparison is correct. - if nr1, nr2 := str1[nonZero1:idx1], str2[nonZero2:idx2]; nr1 != nr2 { - return nr1 < nr2 - } - // Otherwise, the one with less zeros is less. - // Because everything up to the number is equal, comparing the index - // after the zeros is sufficient. - if nonZero1 != nonZero2 { - return nonZero1 < nonZero2 - } - } - // They're identical so far, so continue comparing. - } - // So far they are identical. At least one is ended. If the other continues, - // it sorts last. - return len(str1) < len(str2) -} diff --git a/cluster-autoscaler/version.go b/cluster-autoscaler/version.go index 369abf76bffc..3f73efc09f3a 100644 --- a/cluster-autoscaler/version.go +++ b/cluster-autoscaler/version.go @@ -17,4 +17,4 @@ limitations under the License. package main // ClusterAutoscalerVersion contains version of CA. -const ClusterAutoscalerVersion = "1.12.0" +const ClusterAutoscalerVersion = "1.13.2" diff --git a/hack/OWNERS b/hack/OWNERS new file mode 100644 index 000000000000..ca9497a1622d --- /dev/null +++ b/hack/OWNERS @@ -0,0 +1,14 @@ +approvers: +- piosz +- aleksandra-malinowska +- losipiuk +- schylek +- kgolab +reviewers: +- feiskyer +- piosz +- aleksandra-malinowska +- losipiuk +- schylek +- kgolab + diff --git a/hack/boilerplate/boilerplate.generatego.txt b/hack/boilerplate/boilerplate.generatego.txt new file mode 100644 index 000000000000..0926592d3895 --- /dev/null +++ b/hack/boilerplate/boilerplate.generatego.txt @@ -0,0 +1,15 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ diff --git a/hack/boilerplate/boilerplate.py b/hack/boilerplate/boilerplate.py index c807a678a080..5bd9f7e44d30 100755 --- a/hack/boilerplate/boilerplate.py +++ b/hack/boilerplate/boilerplate.py @@ -17,14 +17,12 @@ from __future__ import print_function import argparse +import datetime import difflib import glob -import json -import mmap import os import re import sys -from datetime import date parser = argparse.ArgumentParser() parser.add_argument( @@ -63,6 +61,14 @@ def get_refs(): return refs +def is_generated_file(filename, data, regexs): + for d in skipped_ungenerated_files: + if d in filename: + return False + + p = regexs["generated"] + return p.search(data) + def file_passes(filename, refs, regexs): try: f = open(filename, 'r') @@ -73,20 +79,25 @@ def file_passes(filename, refs, regexs): data = f.read() f.close() + # determine if the file is automatically generated + generated = is_generated_file(filename, data, regexs) + basename = os.path.basename(filename) - extension = file_extension(filename) + if generated: + extension = "generatego" + else: + extension = file_extension(filename) + if extension != "": ref = refs[extension] else: ref = refs[basename] - # remove build tags from the top of Go files - if extension == "go": + # remove extra content from the top of files + if extension == "go" or extension == "generatego": p = regexs["go_build_constraints"] (data, found) = p.subn("", data, 1) - - # remove shebang from the top of shell files - if extension == "sh": + elif extension == "sh": p = regexs["shebang"] (data, found) = p.subn("", data, 1) @@ -105,15 +116,19 @@ def file_passes(filename, refs, regexs): p = regexs["year"] for d in data: if p.search(d): - print('File %s is missing the year' % filename, file=verbose_out) + if generated: + print('File %s has the YEAR field, but it should not be in generated file' % filename, file=verbose_out) + else: + print('File %s has the YEAR field, but missing the year of date' % filename, file=verbose_out) return False - # Replace all occurrences of the regex "CURRENT_YEAR|...|2016|2015|2014" with "YEAR" - p = regexs["date"] - for i, d in enumerate(data): - (data[i], found) = p.subn('YEAR', d) - if found != 0: - break + if not generated: + # Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR" + p = regexs["date"] + for i, d in enumerate(data): + (data[i], found) = p.subn('YEAR', d) + if found != 0: + break # if we don't match the reference at this point, fail if ref != data: @@ -131,7 +146,13 @@ def file_extension(filename): return os.path.splitext(filename)[1].split(".")[-1].lower() skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh', - "vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test"] + "vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test", + "pkg/generated/bindata.go"] + +# list all the files contain 'DO NOT EDIT', but are not generated +skipped_ungenerated_files = ['hack/build-ui.sh', 'hack/lib/swagger.sh', + 'hack/boilerplate/boilerplate.py', + 'cluster-autoscaler/cloudprovider/aws/ec2_instance_types/gen.go'] def normalize_files(files): newfiles = [] @@ -171,17 +192,23 @@ def get_files(extensions): outfiles.append(pathname) return outfiles +def get_dates(): + years = datetime.datetime.now().year + return '(%s)' % '|'.join((str(year) for year in range(2014, years+1))) + def get_regexs(): regexs = {} # Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing regexs["year"] = re.compile( 'YEAR' ) - # dates can be 2014, 2015, 2016, ..., CURRENT_YEAR, company holder names can be anything - years = range(2014, date.today().year + 1) - regexs["date"] = re.compile( '(%s)' % "|".join(map(lambda l: str(l), years)) ) + # get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)"; + # company holder names can be anything + regexs["date"] = re.compile(get_dates()) # strip // +build \n\n build constraints regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE) # strip #!.* from shell scripts regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE) + # Search for generated files + regexs["generated"] = re.compile( 'DO NOT EDIT' ) return regexs def main(): @@ -190,9 +217,10 @@ def main(): filenames = get_files(refs.keys()) for filename in filenames: - if "/cluster-autoscaler/_override/" in filename: + if "/cluster-autoscaler/_override/" in filename: continue - if not file_passes(filename, refs, regexs): + + if not file_passes(filename, refs, regexs): print(filename, file=sys.stdout) return 0 diff --git a/hack/check-go-version.sh b/hack/check-go-version.sh index 3feb754291b1..aa9fe0f89e60 100755 --- a/hack/check-go-version.sh +++ b/hack/check-go-version.sh @@ -18,7 +18,7 @@ set -euo pipefail GO_VERSION=($(go version)) -if ! echo "${GO_VERSION[2]}" | grep -Eq 'go1.9|1.10'; then +if ! echo "${GO_VERSION[2]}" | grep -Eq 'go1.9|1.10|1.11'; then echo "Unsupported go version ${GO_VERSION}" return 1 fi diff --git a/hack/go-fmt.sh b/hack/go-fmt.sh deleted file mode 100755 index 22a92bb3c9a4..000000000000 --- a/hack/go-fmt.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh - -REPO_NAME=$(basename "${PWD}") -if [ "$IS_CONTAINER" != "" ]; then - for TARGET in "${@}"; do - find "${TARGET}" -name '*.go' ! -path '*/vendor/*' ! -path '*/.build/*' -exec gofmt -s -w {} \+ - done - git diff --exit-code -else - docker run -it --rm \ - --env IS_CONTAINER=TRUE \ - --volume "${PWD}:/go/src/github.com/openshift/${REPO_NAME}:z" \ - --workdir "/go/src/github.com/openshift/${REPO_NAME}" \ - openshift/origin-release:golang-1.10 \ - ./hack/go-fmt.sh "${@}" -fi diff --git a/hack/go-lint.sh b/hack/go-lint.sh deleted file mode 100755 index 0ae8f774599b..000000000000 --- a/hack/go-lint.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh -# Example: ./hack/go-lint.sh installer/... pkg/... tests/smoke - -REPO_NAME=$(basename "${PWD}") -if [ "$IS_CONTAINER" != "" ]; then - golint -set_exit_status "${@}" -else - docker run --rm \ - --env IS_CONTAINER=TRUE \ - --volume "${PWD}:/go/src/github.com/openshift/${REPO_NAME}:z" \ - --workdir "/go/src/github.com/openshift/${REPO_NAME}" \ - openshift/origin-release:golang-1.10 \ - ./hack/go-lint.sh "${@}" -fi diff --git a/hack/go-vet.sh b/hack/go-vet.sh deleted file mode 100755 index ec2730b601bd..000000000000 --- a/hack/go-vet.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh -REPO_NAME=$(basename "${PWD}") -if [ "$IS_CONTAINER" != "" ]; then - go vet "${@}" -else - docker run --rm \ - --env IS_CONTAINER=TRUE \ - --volume "${PWD}:/go/src/k8s.io/autoscaler:z" \ - --workdir "/go/src/k8s.io/autoscaler" \ - openshift/origin-release:golang-1.10 \ - ./hack/go-vet.sh "${@}" -fi diff --git a/hack/update-gofmt.sh b/hack/update-gofmt.sh new file mode 100755 index 000000000000..81c9dc020ef4 --- /dev/null +++ b/hack/update-gofmt.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +find . -name "*.go" | grep -v "\/vendor\/" | xargs gofmt -s -w diff --git a/hack/verify-all.sh b/hack/verify-all.sh index 39c82925d191..04ff5bb699cd 100755 --- a/hack/verify-all.sh +++ b/hack/verify-all.sh @@ -61,16 +61,16 @@ do echo "Skipping $t" continue fi - if $SILENT ; then + if ! $SILENT ; then echo -e "Verifying $t" - if bash "$t" &> /dev/null; then + if bash "$t"; then echo -e "${color_green}SUCCESS${color_norm}" else echo -e "${color_red}FAILED${color_norm}" ret=1 fi else - bash "$t" || ret=1 + bash "$t" &> /dev/null || ret=1 fi done diff --git a/hack/verify-gofmt.sh b/hack/verify-gofmt.sh index 95d477f61014..6a938f154057 100755 --- a/hack/verify-gofmt.sh +++ b/hack/verify-gofmt.sh @@ -12,7 +12,7 @@ os::golang::verify_go_version bad_files=$(os::util::list_go_src_files | xargs gofmt -s -l) if [[ -n "${bad_files}" ]]; then - os::log::warning "!!! gofmt needs to be run on the listed files" + echo "Please run hack/update-gofmt.sh to fix the following files:" echo "${bad_files}" os::log::fatal "Try running 'gofmt -s -d [path]' Or autocorrect with 'hack/verify-gofmt.sh | xargs -n 1 gofmt -s -w'" diff --git a/hack/verify-golint.sh b/hack/verify-golint.sh index 3c7ba6e27cb7..6d5b609b9544 100755 --- a/hack/verify-golint.sh +++ b/hack/verify-golint.sh @@ -1,34 +1,39 @@ #!/bin/bash -source "$(dirname "${BASH_SOURCE}")/lib/init.sh" -os::golang::verify_go_version -os::util::ensure::system_binary_exists 'golint' +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -arg="${1:-""}" -bad_files="" +set -o errexit +set -o nounset +set -o pipefail -if [ "$arg" == "-m" ]; then - head=$(git rev-parse --short HEAD | xargs echo -n) - set +e - modified_files=$(git diff-tree --no-commit-id --name-only -r master..$head | \ - grep "^pkg" | grep ".go$" | grep -v "bindata.go$" | grep -v "Godeps" | \ - grep -v "third_party") - if [ -n "${modified_files}" ]; then - echo -e "Checking modified files: ${modified_files}\n" - for f in $modified_files; do golint $f; done - echo - fi - set -e -else - bad_files=$(os::util::list_go_src_files | \ - sort -u | \ - sed 's/^.{2}//' | \ - xargs -n1 printf "${GOPATH}/src/${OS_GO_PACKAGE}/%s\n" | \ - xargs -n1 golint) -fi +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +cd "${KUBE_ROOT}" -if [[ -n "${bad_files}" ]]; then - echo "golint detected following problems:" - echo "${bad_files}" - exit 1 +GOLINT=${GOLINT:-"golint"} +PACKAGES=($(go list ./... | grep -v /vendor/ | grep -v vertical-pod-autoscaler/pkg/client | grep -v vertical-pod-autoscaler/pkg/apis)) +bad_files=() +for package in "${PACKAGES[@]}"; do + out=$("${GOLINT}" -min_confidence=0.9 "${package}") + if [[ -n "${out}" ]]; then + bad_files+=("${out}") + fi +done +if [[ "${#bad_files[@]}" -ne 0 ]]; then + echo "!!! '$GOLINT' problems: " + echo "${bad_files[@]}" + exit 1 fi + +# ex: ts=2 sw=2 et filetype=sh diff --git a/hack/verify-spelling.sh b/hack/verify-spelling.sh index 623d291b0d29..68d51a6ab41c 100755 --- a/hack/verify-spelling.sh +++ b/hack/verify-spelling.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env bash # Copyright 2018 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,10 @@ set -o errexit set -o nounset set -o pipefail +DIR=$(dirname $0) + # Install tools we need -go install ../../github.com/client9/misspell/cmd/misspell +go install ${DIR}/../../../github.com/client9/misspell/cmd/misspell # Spell checking git ls-files | grep -v -e vendor | xargs misspell -error -o stderr diff --git a/vertical-pod-autoscaler/Godeps/Godeps.json b/vertical-pod-autoscaler/Godeps/Godeps.json index ea14ce178c9b..374d7f5549e4 100644 --- a/vertical-pod-autoscaler/Godeps/Godeps.json +++ b/vertical-pod-autoscaler/Godeps/Godeps.json @@ -13,8 +13,8 @@ }, { "ImportPath": "cloud.google.com/go/compute/metadata", - "Comment": "v0.27.0-1-g8c9fc24c", - "Rev": "8c9fc24c2b85cbc71e8f1dbb6b7ac70a20bfe1c2" + "Comment": "v0.27.0-10-ge39aa301", + "Rev": "e39aa301e1ad47bfd2ef11191c6d6a9bd79f0e55" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute", @@ -131,178 +131,178 @@ }, { "ImportPath": "github.com/aws/aws-sdk-go/aws", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/awserr", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/client", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/csm", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/defaults", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/endpoints", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/request", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/session", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/internal/sdkio", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/internal/sdkrand", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/internal/shareddefaults", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/autoscaling", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/ec2", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/ecr", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/elb", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/elbv2", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/kms", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/sts", - "Comment": "v1.14.12-2-g3ae5d05b", - "Rev": "3ae5d05bd1c7fd4aec3ce990a57adf91f010f398" + "Comment": "v1.14.12-2-g15933831", + "Rev": "15933831628c2392c66ba4597dcdd05fe57319a5" }, { "ImportPath": "github.com/beorn7/perks/quantile", @@ -313,16 +313,6 @@ "Comment": "v3.5.0", "Rev": "b38d23b8782a487059e8fc8773e9a5b228a77cb6" }, - { - "ImportPath": "github.com/client9/misspell", - "Comment": "v0.3.0-8-g150e8af", - "Rev": "150e8af7b4cd6fdd5b74dc2256723b549ff31bb6" - }, - { - "ImportPath": "github.com/client9/misspell/cmd/misspell", - "Comment": "v0.3.0-8-g150e8af", - "Rev": "150e8af7b4cd6fdd5b74dc2256723b549ff31bb6" - }, { "ImportPath": "github.com/clusterhq/flocker-go", "Rev": "2b8b7259d3139c96c4a6871031355808ab3fd3b3" @@ -346,138 +336,138 @@ }, { "ImportPath": "github.com/containerd/containerd/api/services/containers/v1", - "Comment": "v1.0.2-1-g3de25105", - "Rev": "3de2510509bd08eff3dec4e6e1e32640814dad74" + "Comment": "v1.0.2-1-g448030b7", + "Rev": "448030b71ab0c73b62b01a873c4a591a6f844a25" }, { "ImportPath": "github.com/containerd/containerd/api/services/tasks/v1", - "Comment": "v1.0.2-1-g3de25105", - "Rev": "3de2510509bd08eff3dec4e6e1e32640814dad74" + "Comment": "v1.0.2-1-g448030b7", + "Rev": "448030b71ab0c73b62b01a873c4a591a6f844a25" }, { "ImportPath": "github.com/containerd/containerd/api/services/version/v1", - "Comment": "v1.0.2-1-g3de25105", - "Rev": "3de2510509bd08eff3dec4e6e1e32640814dad74" + "Comment": "v1.0.2-1-g448030b7", + "Rev": "448030b71ab0c73b62b01a873c4a591a6f844a25" }, { "ImportPath": "github.com/containerd/containerd/api/types", - "Comment": "v1.0.2-1-g3de25105", - "Rev": "3de2510509bd08eff3dec4e6e1e32640814dad74" + "Comment": "v1.0.2-1-g448030b7", + "Rev": "448030b71ab0c73b62b01a873c4a591a6f844a25" }, { "ImportPath": "github.com/containerd/containerd/api/types/task", - "Comment": "v1.0.2-1-g3de25105", - "Rev": "3de2510509bd08eff3dec4e6e1e32640814dad74" + "Comment": "v1.0.2-1-g448030b7", + "Rev": "448030b71ab0c73b62b01a873c4a591a6f844a25" }, { "ImportPath": "github.com/containerd/containerd/containers", - "Comment": "v1.0.2-1-g3de25105", - "Rev": "3de2510509bd08eff3dec4e6e1e32640814dad74" + "Comment": "v1.0.2-1-g448030b7", + "Rev": "448030b71ab0c73b62b01a873c4a591a6f844a25" }, { "ImportPath": "github.com/containerd/containerd/dialer", - "Comment": "v1.0.2-1-g3de25105", - "Rev": "3de2510509bd08eff3dec4e6e1e32640814dad74" + "Comment": "v1.0.2-1-g448030b7", + "Rev": "448030b71ab0c73b62b01a873c4a591a6f844a25" }, { "ImportPath": "github.com/containerd/containerd/errdefs", - "Comment": "v1.0.2-1-g3de25105", - "Rev": "3de2510509bd08eff3dec4e6e1e32640814dad74" + "Comment": "v1.0.2-1-g448030b7", + "Rev": "448030b71ab0c73b62b01a873c4a591a6f844a25" }, { "ImportPath": "github.com/containerd/containerd/namespaces", - "Comment": "v1.0.2-1-g3de25105", - "Rev": "3de2510509bd08eff3dec4e6e1e32640814dad74" + "Comment": "v1.0.2-1-g448030b7", + "Rev": "448030b71ab0c73b62b01a873c4a591a6f844a25" }, { "ImportPath": "github.com/containernetworking/cni/libcni", - "Comment": "v0.6.0-1-gd08b460", - "Rev": "d08b4604ea651e5daf893f028cc91df18c8dbdca" + "Comment": "v0.6.0-1-gb362b3b", + "Rev": "b362b3bd2a95151d87626e570eae83b3f55cf63c" }, { "ImportPath": "github.com/containernetworking/cni/pkg/invoke", - "Comment": "v0.6.0-1-gd08b460", - "Rev": "d08b4604ea651e5daf893f028cc91df18c8dbdca" + "Comment": "v0.6.0-1-gb362b3b", + "Rev": "b362b3bd2a95151d87626e570eae83b3f55cf63c" }, { "ImportPath": "github.com/containernetworking/cni/pkg/types", - "Comment": "v0.6.0-1-gd08b460", - "Rev": "d08b4604ea651e5daf893f028cc91df18c8dbdca" + "Comment": "v0.6.0-1-gb362b3b", + "Rev": "b362b3bd2a95151d87626e570eae83b3f55cf63c" }, { "ImportPath": "github.com/containernetworking/cni/pkg/types/020", - "Comment": "v0.6.0-1-gd08b460", - "Rev": "d08b4604ea651e5daf893f028cc91df18c8dbdca" + "Comment": "v0.6.0-1-gb362b3b", + "Rev": "b362b3bd2a95151d87626e570eae83b3f55cf63c" }, { "ImportPath": "github.com/containernetworking/cni/pkg/types/current", - "Comment": "v0.6.0-1-gd08b460", - "Rev": "d08b4604ea651e5daf893f028cc91df18c8dbdca" + "Comment": "v0.6.0-1-gb362b3b", + "Rev": "b362b3bd2a95151d87626e570eae83b3f55cf63c" }, { "ImportPath": "github.com/containernetworking/cni/pkg/version", - "Comment": "v0.6.0-1-gd08b460", - "Rev": "d08b4604ea651e5daf893f028cc91df18c8dbdca" + "Comment": "v0.6.0-1-gb362b3b", + "Rev": "b362b3bd2a95151d87626e570eae83b3f55cf63c" }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Comment": "v3.2.13-1-g8fd113ac7", - "Rev": "8fd113ac72ab6708489747cc2263760c75bdd02b" + "Comment": "v3.2.13-1-g72966afd0", + "Rev": "72966afd02962a8b07b7eae653409c5b40b4d465" }, { "ImportPath": "github.com/coreos/etcd/client", - "Comment": "v3.2.13-1-g8fd113ac7", - "Rev": "8fd113ac72ab6708489747cc2263760c75bdd02b" + "Comment": "v3.2.13-1-g72966afd0", + "Rev": "72966afd02962a8b07b7eae653409c5b40b4d465" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Comment": "v3.2.13-1-g8fd113ac7", - "Rev": "8fd113ac72ab6708489747cc2263760c75bdd02b" + "Comment": "v3.2.13-1-g72966afd0", + "Rev": "72966afd02962a8b07b7eae653409c5b40b4d465" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Comment": "v3.2.13-1-g8fd113ac7", - "Rev": "8fd113ac72ab6708489747cc2263760c75bdd02b" + "Comment": "v3.2.13-1-g72966afd0", + "Rev": "72966afd02962a8b07b7eae653409c5b40b4d465" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Comment": "v3.2.13-1-g8fd113ac7", - "Rev": "8fd113ac72ab6708489747cc2263760c75bdd02b" + "Comment": "v3.2.13-1-g72966afd0", + "Rev": "72966afd02962a8b07b7eae653409c5b40b4d465" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Comment": "v3.2.13-1-g8fd113ac7", - "Rev": "8fd113ac72ab6708489747cc2263760c75bdd02b" + "Comment": "v3.2.13-1-g72966afd0", + "Rev": "72966afd02962a8b07b7eae653409c5b40b4d465" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Comment": "v3.2.13-1-g8fd113ac7", - "Rev": "8fd113ac72ab6708489747cc2263760c75bdd02b" + "Comment": "v3.2.13-1-g72966afd0", + "Rev": "72966afd02962a8b07b7eae653409c5b40b4d465" }, { "ImportPath": "github.com/coreos/etcd/pkg/srv", - "Comment": "v3.2.13-1-g8fd113ac7", - "Rev": "8fd113ac72ab6708489747cc2263760c75bdd02b" + "Comment": "v3.2.13-1-g72966afd0", + "Rev": "72966afd02962a8b07b7eae653409c5b40b4d465" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Comment": "v3.2.13-1-g8fd113ac7", - "Rev": "8fd113ac72ab6708489747cc2263760c75bdd02b" + "Comment": "v3.2.13-1-g72966afd0", + "Rev": "72966afd02962a8b07b7eae653409c5b40b4d465" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Comment": "v3.2.13-1-g8fd113ac7", - "Rev": "8fd113ac72ab6708489747cc2263760c75bdd02b" + "Comment": "v3.2.13-1-g72966afd0", + "Rev": "72966afd02962a8b07b7eae653409c5b40b4d465" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Comment": "v3.2.13-1-g8fd113ac7", - "Rev": "8fd113ac72ab6708489747cc2263760c75bdd02b" + "Comment": "v3.2.13-1-g72966afd0", + "Rev": "72966afd02962a8b07b7eae653409c5b40b4d465" }, { "ImportPath": "github.com/coreos/etcd/version", - "Comment": "v3.2.13-1-g8fd113ac7", - "Rev": "8fd113ac72ab6708489747cc2263760c75bdd02b" + "Comment": "v3.2.13-1-g72966afd0", + "Rev": "72966afd02962a8b07b7eae653409c5b40b4d465" }, { "ImportPath": "github.com/coreos/go-semver/semver", @@ -505,13 +495,13 @@ }, { "ImportPath": "github.com/coreos/rkt/api/v1alpha", - "Comment": "v1.25.0-1-g6b79c47b", - "Rev": "6b79c47b42f838f9103b6c526c1fec53fc7d6c5b" + "Comment": "v1.25.0-1-g62f79d47", + "Rev": "62f79d47b0a0bf6b0c1fb9f67f4ece5b19812997" }, { "ImportPath": "github.com/cyphar/filepath-securejoin", - "Comment": "v0.2.1-2-g3384b0e", - "Rev": "3384b0ee469370f0bb306f19e10493b4d501092a" + "Comment": "v0.2.1-2-gf620ecb", + "Rev": "f620ecb42f4405f1aff9fdcd083d3f16ee36f442" }, { "ImportPath": "github.com/d2g/dhcp4", @@ -533,163 +523,163 @@ }, { "ImportPath": "github.com/docker/distribution/digestset", - "Comment": "v2.6.0-rc.1-210-g1054ae1f", - "Rev": "1054ae1f043f76252eb55beee75b5de9585df05b" + "Comment": "v2.6.0-rc.1-210-gc22f1a31", + "Rev": "c22f1a31ca15627816ca0a4a859b3416087c8210" }, { "ImportPath": "github.com/docker/distribution/reference", - "Comment": "v2.6.0-rc.1-210-g1054ae1f", - "Rev": "1054ae1f043f76252eb55beee75b5de9585df05b" + "Comment": "v2.6.0-rc.1-210-gc22f1a31", + "Rev": "c22f1a31ca15627816ca0a4a859b3416087c8210" }, { "ImportPath": "github.com/docker/docker/api", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/api/types", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/api/types/blkiodev", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/api/types/container", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/api/types/events", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/api/types/filters", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/api/types/image", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/api/types/mount", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/api/types/network", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/api/types/registry", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/api/types/strslice", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/api/types/swarm", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/api/types/swarm/runtime", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/api/types/time", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/api/types/versions", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/api/types/volume", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/client", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/pkg/ioutils", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/pkg/jsonlog", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/pkg/jsonmessage", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/pkg/longpath", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/pkg/mount", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/pkg/parsers", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/pkg/parsers/operatingsystem", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/pkg/stdcopy", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/pkg/sysinfo", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/pkg/system", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/pkg/term", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/pkg/term/windows", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/docker/pkg/tlsconfig", - "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-gdf6ea513d", - "Rev": "df6ea513d9879310e83665d464604ef61048a60a" + "Comment": "docs-v1.12.0-rc4-2016-07-15-7403-g0564fbdcf", + "Rev": "0564fbdcf45f0bcb3aa9bac9aa804ee07a68b78e" }, { "ImportPath": "github.com/docker/go-connections/nat", @@ -713,8 +703,8 @@ }, { "ImportPath": "github.com/docker/libnetwork/ipvs", - "Comment": "v0.8.0-dev.2-911-gf2894550", - "Rev": "f2894550ee8c012c4134c234ed0629e16c755eaf" + "Comment": "v0.8.0-dev.2-911-g7dcd079c", + "Rev": "7dcd079c8a38b4485fbcbffceebdc128df86f001" }, { "ImportPath": "github.com/docker/libtrust", @@ -853,73 +843,44 @@ "ImportPath": "github.com/golang/groupcache/lru", "Rev": "02826c3e79038b59d737d3b1c0a1d937f71a4433" }, - { - "ImportPath": "github.com/golang/lint/golint", - "Rev": "06c8688daad7faa9da5a0c2f163a3d14aac986ca" - }, { "ImportPath": "github.com/golang/mock/gomock", "Rev": "bd3c8e81be01eef76d4b503f5e687d2d1354d2d9" }, { "ImportPath": "github.com/golang/protobuf/proto", - "Comment": "v1.2.0-2-gb27b920", - "Rev": "b27b920f9e71b439b873b17bf99f56467623814a" - }, - { - "ImportPath": "github.com/golang/protobuf/protoc-gen-go", - "Comment": "v1.2.0-2-gb27b920", - "Rev": "b27b920f9e71b439b873b17bf99f56467623814a" + "Comment": "v1.2.0-5-gebb3f58", + "Rev": "ebb3f5824e60ab924d5a647fdc09b85d87c2829f" }, { "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", - "Comment": "v1.2.0-2-gb27b920", - "Rev": "b27b920f9e71b439b873b17bf99f56467623814a" - }, - { - "ImportPath": "github.com/golang/protobuf/protoc-gen-go/generator", - "Comment": "v1.2.0-2-gb27b920", - "Rev": "b27b920f9e71b439b873b17bf99f56467623814a" - }, - { - "ImportPath": "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap", - "Comment": "v1.2.0-2-gb27b920", - "Rev": "b27b920f9e71b439b873b17bf99f56467623814a" - }, - { - "ImportPath": "github.com/golang/protobuf/protoc-gen-go/grpc", - "Comment": "v1.2.0-2-gb27b920", - "Rev": "b27b920f9e71b439b873b17bf99f56467623814a" - }, - { - "ImportPath": "github.com/golang/protobuf/protoc-gen-go/plugin", - "Comment": "v1.2.0-2-gb27b920", - "Rev": "b27b920f9e71b439b873b17bf99f56467623814a" + "Comment": "v1.2.0-5-gebb3f58", + "Rev": "ebb3f5824e60ab924d5a647fdc09b85d87c2829f" }, { "ImportPath": "github.com/golang/protobuf/ptypes", - "Comment": "v1.2.0-2-gb27b920", - "Rev": "b27b920f9e71b439b873b17bf99f56467623814a" + "Comment": "v1.2.0-5-gebb3f58", + "Rev": "ebb3f5824e60ab924d5a647fdc09b85d87c2829f" }, { "ImportPath": "github.com/golang/protobuf/ptypes/any", - "Comment": "v1.2.0-2-gb27b920", - "Rev": "b27b920f9e71b439b873b17bf99f56467623814a" + "Comment": "v1.2.0-5-gebb3f58", + "Rev": "ebb3f5824e60ab924d5a647fdc09b85d87c2829f" }, { "ImportPath": "github.com/golang/protobuf/ptypes/duration", - "Comment": "v1.2.0-2-gb27b920", - "Rev": "b27b920f9e71b439b873b17bf99f56467623814a" + "Comment": "v1.2.0-5-gebb3f58", + "Rev": "ebb3f5824e60ab924d5a647fdc09b85d87c2829f" }, { "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", - "Comment": "v1.2.0-2-gb27b920", - "Rev": "b27b920f9e71b439b873b17bf99f56467623814a" + "Comment": "v1.2.0-5-gebb3f58", + "Rev": "ebb3f5824e60ab924d5a647fdc09b85d87c2829f" }, { "ImportPath": "github.com/golang/protobuf/ptypes/wrappers", - "Comment": "v1.2.0-2-gb27b920", - "Rev": "b27b920f9e71b439b873b17bf99f56467623814a" + "Comment": "v1.2.0-5-gebb3f58", + "Rev": "ebb3f5824e60ab924d5a647fdc09b85d87c2829f" }, { "ImportPath": "github.com/google/btree", @@ -927,178 +888,178 @@ }, { "ImportPath": "github.com/google/cadvisor/accelerators", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/cache/memory", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/collector", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/container", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/container/common", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/container/containerd", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/container/crio", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/container/docker", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/container/libcontainer", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/container/raw", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/container/rkt", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/container/systemd", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/devicemapper", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/events", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/fs", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/info/v2", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/machine", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/manager", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/raw", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/rkt", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/metrics", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/storage", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/summary", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/utils", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/utils/cloudinfo", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload/netlink", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/utils/docker", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/utils/oomparser", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/utils/sysfs", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/utils/sysinfo", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/version", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/cadvisor/zfs", - "Comment": "v0.30.2-1-g173a1426", - "Rev": "173a1426532f2d2faaefc6210d2112392a9c394b" + "Comment": "v0.30.2-1-g13117de6", + "Rev": "13117de6ebccdfa9c1300a077ff0abe395412bce" }, { "ImportPath": "github.com/google/gofuzz", @@ -1341,47 +1302,37 @@ "ImportPath": "github.com/kardianos/osext", "Rev": "8fef92e41e22a70e700a96b29f066cda30ea24ef" }, - { - "ImportPath": "github.com/kisielk/gotool", - "Comment": "v1.0.0", - "Rev": "80517062f582ea3340cd4baf70e86d539ae7d84d" - }, - { - "ImportPath": "github.com/kisielk/gotool/internal/load", - "Comment": "v1.0.0", - "Rev": "80517062f582ea3340cd4baf70e86d539ae7d84d" - }, { "ImportPath": "github.com/kr/fs", "Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b" }, { "ImportPath": "github.com/libopenstorage/openstorage/api", - "Rev": "6ebe79076a46a4c0f76dec194046c0472cc01495" + "Rev": "dfeb28e68106c0bcfc3a1f72321cb4b132e2184a" }, { "ImportPath": "github.com/libopenstorage/openstorage/api/client", - "Rev": "6ebe79076a46a4c0f76dec194046c0472cc01495" + "Rev": "dfeb28e68106c0bcfc3a1f72321cb4b132e2184a" }, { "ImportPath": "github.com/libopenstorage/openstorage/api/client/volume", - "Rev": "6ebe79076a46a4c0f76dec194046c0472cc01495" + "Rev": "dfeb28e68106c0bcfc3a1f72321cb4b132e2184a" }, { "ImportPath": "github.com/libopenstorage/openstorage/api/spec", - "Rev": "6ebe79076a46a4c0f76dec194046c0472cc01495" + "Rev": "dfeb28e68106c0bcfc3a1f72321cb4b132e2184a" }, { "ImportPath": "github.com/libopenstorage/openstorage/pkg/parser", - "Rev": "6ebe79076a46a4c0f76dec194046c0472cc01495" + "Rev": "dfeb28e68106c0bcfc3a1f72321cb4b132e2184a" }, { "ImportPath": "github.com/libopenstorage/openstorage/pkg/units", - "Rev": "6ebe79076a46a4c0f76dec194046c0472cc01495" + "Rev": "dfeb28e68106c0bcfc3a1f72321cb4b132e2184a" }, { "ImportPath": "github.com/libopenstorage/openstorage/volume", - "Rev": "6ebe79076a46a4c0f76dec194046c0472cc01495" + "Rev": "dfeb28e68106c0bcfc3a1f72321cb4b132e2184a" }, { "ImportPath": "github.com/lpabon/godbc", @@ -1624,83 +1575,83 @@ }, { "ImportPath": "github.com/opencontainers/runc/libcontainer", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/apparmor", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs/validate", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/criurpc", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/intelrdt", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/keys", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/mount", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/seccomp", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/stacktrace", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/system", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/user", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/utils", - "Comment": "v1.0.0-rc5-47-gd0be5878", - "Rev": "d0be5878ada97c6dbd4a11cba2a8153e9bf9eadf" + "Comment": "v1.0.0-rc5-47-g9d8eb3ac", + "Rev": "9d8eb3ac494ee2116dfc2e3d7cd6f9254176bf55" }, { "ImportPath": "github.com/opencontainers/runtime-spec/specs-go", @@ -1786,8 +1737,8 @@ }, { "ImportPath": "github.com/rancher/go-rancher/client", - "Comment": "v0.1.0-197-gad2e282", - "Rev": "ad2e282386185b19d4a0a8c196c434b536d6a7d0" + "Comment": "v0.1.0-197-g1483c76", + "Rev": "1483c76f2ed21c50a4e03f318691366003656367" }, { "ImportPath": "github.com/renstrom/dedent", @@ -1796,7 +1747,7 @@ }, { "ImportPath": "github.com/rubiojr/go-vhd/vhd", - "Rev": "c3a6f6f7c5a5c33a1b29d281fb283b503a196dd4" + "Rev": "70b84ecb9ee5e4df29b1b919e1870c4ec361d07c" }, { "ImportPath": "github.com/satori/go.uuid", @@ -1872,13 +1823,13 @@ }, { "ImportPath": "github.com/stretchr/testify/assert", - "Comment": "v1.2.1-15-g9c95e83", - "Rev": "9c95e8398eb8a2a0ca7123bb3fc0aeb7362c666e" + "Comment": "v1.2.1-15-g99c8278", + "Rev": "99c8278d2338e24a70623dd5a55b074e556ac82a" }, { "ImportPath": "github.com/stretchr/testify/mock", - "Comment": "v1.2.1-15-g9c95e83", - "Rev": "9c95e8398eb8a2a0ca7123bb3fc0aeb7362c666e" + "Comment": "v1.2.1-15-g99c8278", + "Rev": "99c8278d2338e24a70623dd5a55b074e556ac82a" }, { "ImportPath": "github.com/syndtr/gocapability/capability", @@ -1902,123 +1853,123 @@ }, { "ImportPath": "github.com/vmware/govmomi/find", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/list", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/lookup", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/lookup/methods", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/lookup/types", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/nfc", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/object", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/pbm", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/pbm/methods", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/pbm/types", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/property", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/session", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/sts", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/sts/internal", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/task", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/vapi/tags", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/vim25", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/vim25/debug", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/vim25/methods", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/vim25/mo", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/vim25/progress", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/vim25/soap", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/vim25/types", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/govmomi/vim25/xml", - "Comment": "v0.18.0-42-g144f03d", - "Rev": "144f03d03a4e09fb5abd4031678f5bb82d8a2d11" + "Comment": "v0.18.0-42-g4e4ab37", + "Rev": "4e4ab37ae4c86f4c15a34d31fe70f1b127fdb2ac" }, { "ImportPath": "github.com/vmware/photon-controller-go-sdk/SSPI", @@ -2078,11 +2029,7 @@ }, { "ImportPath": "golang.org/x/exp/inotify", - "Rev": "bf5910aa160d3362a3eb8028b9fe5c467c5bf805" - }, - { - "ImportPath": "golang.org/x/lint", - "Rev": "06c8688daad7faa9da5a0c2f163a3d14aac986ca" + "Rev": "3f3572563f9c7721417b43b0a974e55194fc3004" }, { "ImportPath": "golang.org/x/net/context", @@ -2154,15 +2101,15 @@ }, { "ImportPath": "golang.org/x/sys/unix", - "Rev": "ebe1bf3edb3325c393447059974de898d5133eb8" + "Rev": "d0be0721c37eeb5299f245a996a483160fc36940" }, { "ImportPath": "golang.org/x/sys/windows", - "Rev": "ebe1bf3edb3325c393447059974de898d5133eb8" + "Rev": "d0be0721c37eeb5299f245a996a483160fc36940" }, { "ImportPath": "golang.org/x/sys/windows/svc", - "Rev": "ebe1bf3edb3325c393447059974de898d5133eb8" + "Rev": "d0be0721c37eeb5299f245a996a483160fc36940" }, { "ImportPath": "golang.org/x/text/cases", @@ -2232,212 +2179,180 @@ "ImportPath": "golang.org/x/time/rate", "Rev": "f51c12702a4d776e4c1fa9b0fabab841babae631" }, - { - "ImportPath": "golang.org/x/tools/cmd/goimports", - "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" - }, - { - "ImportPath": "golang.org/x/tools/go/ast/astutil", - "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" - }, - { - "ImportPath": "golang.org/x/tools/go/buildutil", - "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" - }, - { - "ImportPath": "golang.org/x/tools/go/gcexportdata", - "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" - }, - { - "ImportPath": "golang.org/x/tools/go/gcimporter15", - "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" - }, - { - "ImportPath": "golang.org/x/tools/go/loader", - "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" - }, - { - "ImportPath": "golang.org/x/tools/go/types/typeutil", - "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" - }, - { - "ImportPath": "golang.org/x/tools/imports", - "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" - }, { "ImportPath": "google.golang.org/api/compute/v0.alpha", - "Rev": "0ad5a633fea1d4b64bf5e6a01e30d1fc466038e5" + "Rev": "7ca32eb868bf53ea2fc406698eb98583a8073d19" }, { "ImportPath": "google.golang.org/api/compute/v0.beta", - "Rev": "0ad5a633fea1d4b64bf5e6a01e30d1fc466038e5" + "Rev": "7ca32eb868bf53ea2fc406698eb98583a8073d19" }, { "ImportPath": "google.golang.org/api/compute/v1", - "Rev": "0ad5a633fea1d4b64bf5e6a01e30d1fc466038e5" + "Rev": "7ca32eb868bf53ea2fc406698eb98583a8073d19" }, { "ImportPath": "google.golang.org/api/container/v1", - "Rev": "0ad5a633fea1d4b64bf5e6a01e30d1fc466038e5" + "Rev": "7ca32eb868bf53ea2fc406698eb98583a8073d19" }, { "ImportPath": "google.golang.org/api/gensupport", - "Rev": "0ad5a633fea1d4b64bf5e6a01e30d1fc466038e5" + "Rev": "7ca32eb868bf53ea2fc406698eb98583a8073d19" }, { "ImportPath": "google.golang.org/api/googleapi", - "Rev": "0ad5a633fea1d4b64bf5e6a01e30d1fc466038e5" + "Rev": "7ca32eb868bf53ea2fc406698eb98583a8073d19" }, { "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", - "Rev": "0ad5a633fea1d4b64bf5e6a01e30d1fc466038e5" + "Rev": "7ca32eb868bf53ea2fc406698eb98583a8073d19" }, { "ImportPath": "google.golang.org/api/tpu/v1", - "Rev": "0ad5a633fea1d4b64bf5e6a01e30d1fc466038e5" + "Rev": "7ca32eb868bf53ea2fc406698eb98583a8073d19" }, { "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", - "Rev": "11092d34479b07829b72e10713b159248caf5dad" + "Rev": "36d5787dc5356b711fe8f3040271aaf51c35955b" }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", - "Rev": "11092d34479b07829b72e10713b159248caf5dad" + "Rev": "36d5787dc5356b711fe8f3040271aaf51c35955b" }, { "ImportPath": "google.golang.org/grpc", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/balancer", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/balancer/base", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/balancer/roundrobin", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/codes", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/connectivity", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/encoding", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/encoding/proto", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/internal", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/internal/backoff", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/internal/channelz", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/internal/envconfig", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/internal/grpcrand", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/internal/transport", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/naming", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/peer", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/resolver", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/resolver/dns", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/resolver/passthrough", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/stats", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/status", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "google.golang.org/grpc/tap", - "Comment": "v1.2.0-649-gccd64cf", - "Rev": "ccd64cfcfa597f31f6161da9fc101885b1eadc2f" + "Comment": "v1.2.0-657-g9cc4fdb", + "Rev": "9cc4fdbde2304827ffdbc7896f49db40c5536600" }, { "ImportPath": "gopkg.in/gcfg.v1", @@ -2498,1673 +2413,1617 @@ "ImportPath": "gopkg.in/yaml.v2", "Rev": "670d4cfef0544295bc27a114dbac37980d83185a" }, - { - "ImportPath": "honnef.co/go/tools/callgraph", - "Rev": "88497007e8588ea5b6baee991f74a1607e809487" - }, - { - "ImportPath": "honnef.co/go/tools/callgraph/static", - "Rev": "88497007e8588ea5b6baee991f74a1607e809487" - }, - { - "ImportPath": "honnef.co/go/tools/cmd/staticcheck", - "Rev": "88497007e8588ea5b6baee991f74a1607e809487" - }, - { - "ImportPath": "honnef.co/go/tools/deprecated", - "Rev": "88497007e8588ea5b6baee991f74a1607e809487" - }, - { - "ImportPath": "honnef.co/go/tools/functions", - "Rev": "88497007e8588ea5b6baee991f74a1607e809487" - }, - { - "ImportPath": "honnef.co/go/tools/internal/sharedcheck", - "Rev": "88497007e8588ea5b6baee991f74a1607e809487" - }, - { - "ImportPath": "honnef.co/go/tools/lint", - "Rev": "88497007e8588ea5b6baee991f74a1607e809487" - }, - { - "ImportPath": "honnef.co/go/tools/lint/lintdsl", - "Rev": "88497007e8588ea5b6baee991f74a1607e809487" - }, - { - "ImportPath": "honnef.co/go/tools/lint/lintutil", - "Rev": "88497007e8588ea5b6baee991f74a1607e809487" - }, - { - "ImportPath": "honnef.co/go/tools/ssa", - "Rev": "88497007e8588ea5b6baee991f74a1607e809487" - }, - { - "ImportPath": "honnef.co/go/tools/ssa/ssautil", - "Rev": "88497007e8588ea5b6baee991f74a1607e809487" - }, - { - "ImportPath": "honnef.co/go/tools/staticcheck", - "Rev": "88497007e8588ea5b6baee991f74a1607e809487" - }, - { - "ImportPath": "honnef.co/go/tools/staticcheck/vrp", - "Rev": "88497007e8588ea5b6baee991f74a1607e809487" - }, - { - "ImportPath": "honnef.co/go/tools/version", - "Rev": "88497007e8588ea5b6baee991f74a1607e809487" - }, { "ImportPath": "k8s.io/api/admission/v1beta1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/admissionregistration/v1alpha1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/admissionregistration/v1beta1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/apps/v1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/apps/v1beta1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/apps/v1beta2", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/authentication/v1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/authentication/v1beta1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/authorization/v1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/authorization/v1beta1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/autoscaling/v1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/autoscaling/v2beta1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/batch/v1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/batch/v1beta1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/batch/v2alpha1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/certificates/v1beta1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/coordination/v1beta1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/core/v1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/events/v1beta1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/extensions/v1beta1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/imagepolicy/v1alpha1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/networking/v1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/policy/v1beta1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/rbac/v1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/rbac/v1alpha1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/rbac/v1beta1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/scheduling/v1alpha1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/scheduling/v1beta1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/settings/v1alpha1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/storage/v1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/storage/v1alpha1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/api/storage/v1beta1", - "Rev": "844e4ff6fc376fd48119db68154c98d6af05b2f4" + "Rev": "c36e813271c06cf1d7f907887817a8a9ea2bfed5" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/apiserver", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/apiserver/conversion", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/apiserver/validation", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/cmd/server/options", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/cmd/server/testing", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/controller/establish", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/controller/finalizer", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/controller/status", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/crdserverscheme", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/features", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/registry/customresource", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apiextensions-apiserver/test/integration/fixtures", - "Rev": "3b5e52442136f12f5685e5fc1b48b53c0fd6846d" + "Rev": "d5f258d99968ab84e28c5d551a120ed9d5eca267" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/equality", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/errors", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/meta", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/meta/table", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/resource", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/validation", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/validation/path", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/internalversion", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/validation", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1beta1", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/conversion", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/conversion/queryparams", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/fields", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/labels", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/schema", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/json", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/protobuf", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/recognizer", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/streaming", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/versioning", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/selection", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/types", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/cache", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/clock", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/diff", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/duration", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/errors", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/framer", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/httpstream", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/httpstream/spdy", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/intstr", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/json", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/mergepatch", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/naming", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/net", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/proxy", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/rand", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/remotecommand", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/runtime", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/sets", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/strategicpatch", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/uuid", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/validation", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/validation/field", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/wait", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/waitgroup", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/yaml", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/version", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/pkg/watch", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/json", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/netutil", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/reflect", - "Rev": "d2dcbe2fd200c60cda18ed5ac50dbcbfa9506f9b" + "Rev": "bd0b8021277293fc71a2f800f9ec5901cc676269" }, { "ImportPath": "k8s.io/apiserver/pkg/admission", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/admission/configuration", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/admission/initializer", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/admission/metrics", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/admission/plugin/initialization", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/admission/plugin/webhook/config", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/admission/plugin/webhook/errors", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/admission/plugin/webhook/generic", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/admission/plugin/webhook/namespace", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/admission/plugin/webhook/request", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/admission/plugin/webhook/rules", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/admission/plugin/webhook/validating", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/apiserver", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/apiserver/install", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/audit", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/audit/install", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/audit/v1", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/audit/v1alpha1", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/audit/v1beta1", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/apis/audit/validation", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/audit", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/audit/policy", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/authenticator", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/authenticatorfactory", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/group", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/request/anonymous", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/request/bearertoken", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/request/headerrequest", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/request/union", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/request/websocket", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/request/x509", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/serviceaccount", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/token/tokenfile", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/authentication/user", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/authorization/authorizer", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/authorization/authorizerfactory", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/authorization/union", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/endpoints", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/endpoints/discovery", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/endpoints/filters", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/endpoints/handlers", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/endpoints/handlers/negotiation", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/endpoints/metrics", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/endpoints/openapi", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/endpoints/request", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/features", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/registry/generic", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/registry/generic/registry", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/registry/rest", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/server", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/server/filters", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/server/healthz", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/server/httplog", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/server/mux", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/server/options", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/server/resourceconfig", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/server/routes", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/server/routes/data/swagger", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/server/storage", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/storage", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/storage/cacher", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/storage/errors", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/storage/etcd", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/storage/etcd/metrics", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/storage/etcd/util", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/storage/etcd3", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/storage/names", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/storage/storagebackend", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/storage/storagebackend/factory", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/storage/value", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/util/dryrun", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/util/feature", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/util/flag", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/util/flushwriter", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/util/logs", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/util/openapi", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/util/trace", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/util/webhook", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/pkg/util/wsstream", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/plugin/pkg/audit/buffered", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/plugin/pkg/audit/log", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/plugin/pkg/audit/truncate", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/plugin/pkg/audit/webhook", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/plugin/pkg/authenticator/token/webhook", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/apiserver/plugin/pkg/authorizer/webhook", - "Rev": "311a161d63cd3ac8598207b9bd47a3e3fa2d30f3" + "Rev": "95a189bc8bc7bd39f01cd2fede74a16244bd6213" }, { "ImportPath": "k8s.io/client-go/discovery", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/discovery/cached", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/discovery/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/dynamic", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/admissionregistration", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/admissionregistration/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/admissionregistration/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/apps", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/apps/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/apps/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/apps/v1beta2", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/autoscaling", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/autoscaling/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/autoscaling/v2beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/batch", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/batch/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/batch/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/batch/v2alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/certificates", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/certificates/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/coordination", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/coordination/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/core", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/core/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/events", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/events/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/extensions", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/extensions/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/internalinterfaces", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/networking", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/networking/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/policy", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/policy/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/rbac", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/rbac/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/rbac/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/rbac/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/scheduling", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/scheduling/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/scheduling/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/settings", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/settings/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/storage", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/storage/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/storage/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/informers/storage/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/scheme", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta2", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/coordination/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/events/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/events/v1beta1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/networking/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/networking/v1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/admissionregistration/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/admissionregistration/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/apps/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/apps/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/apps/v1beta2", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/autoscaling/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/autoscaling/v2beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/batch/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/batch/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/batch/v2alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/certificates/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/coordination/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/core/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/events/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/extensions/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/networking/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/policy/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/rbac/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/rbac/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/rbac/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/scheduling/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/scheduling/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/settings/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/storage/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/storage/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/listers/storage/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/pkg/apis/clientauthentication", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/pkg/version", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/plugin/pkg/client/auth/exec", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/plugin/pkg/client/auth/gcp", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/rest", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/rest/watch", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/restmapper", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/scale", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/scale/scheme", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/scale/scheme/appsint", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/scale/scheme/appsv1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/scale/scheme/appsv1beta2", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/scale/scheme/autoscalingv1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/scale/scheme/extensionsint", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/scale/scheme/extensionsv1beta1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/testing", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/third_party/forked/golang/template", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/tools/auth", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/tools/bootstrap/token/api", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/tools/bootstrap/token/util", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/tools/cache", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd/api", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd/api/latest", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd/api/v1", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/tools/metrics", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/tools/pager", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/tools/record", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/tools/reference", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/tools/remotecommand", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/transport", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/transport/spdy", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/util/buffer", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/util/cert", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/util/certificate", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/util/certificate/csr", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/util/connrotation", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/util/exec", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/util/flowcontrol", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/util/homedir", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/util/integer", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/util/jsonpath", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/util/retry", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/client-go/util/workqueue", - "Rev": "ebd97730f0f949d7a6d45f1045e3d9c2071e6195" + "Rev": "18df74b078ebcf0bbf362172e5ab7a53bef6cc5d" }, { "ImportPath": "k8s.io/kube-aggregator/pkg/apis/apiregistration", - "Rev": "014a64e5f64f13c19dfb6bcabe482602c292ea41" + "Rev": "0df81b212d44c3c85a7cd86905fe1701367cb5a2" }, { "ImportPath": "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1", - "Rev": "014a64e5f64f13c19dfb6bcabe482602c292ea41" + "Rev": "0df81b212d44c3c85a7cd86905fe1701367cb5a2" }, { "ImportPath": "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1", - "Rev": "014a64e5f64f13c19dfb6bcabe482602c292ea41" + "Rev": "0df81b212d44c3c85a7cd86905fe1701367cb5a2" }, { "ImportPath": "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset", - "Rev": "014a64e5f64f13c19dfb6bcabe482602c292ea41" + "Rev": "0df81b212d44c3c85a7cd86905fe1701367cb5a2" }, { "ImportPath": "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme", - "Rev": "014a64e5f64f13c19dfb6bcabe482602c292ea41" + "Rev": "0df81b212d44c3c85a7cd86905fe1701367cb5a2" }, { "ImportPath": "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1", - "Rev": "014a64e5f64f13c19dfb6bcabe482602c292ea41" + "Rev": "0df81b212d44c3c85a7cd86905fe1701367cb5a2" }, { "ImportPath": "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1", - "Rev": "014a64e5f64f13c19dfb6bcabe482602c292ea41" + "Rev": "0df81b212d44c3c85a7cd86905fe1701367cb5a2" }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", @@ -4188,2253 +4047,2253 @@ }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-proxy/app", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubeadm/app/constants", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubeadm/app/util", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubelet/app", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubelet/app/options", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/events", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/legacyscheme", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/pod", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/ref", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/resource", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/service", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/testapi", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/node", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/pod", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/resource", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/service", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admission", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admission/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admission/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1beta2", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/coordination", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/coordination/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/coordination/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/helper", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/helper/qos", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/pods", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1/helper", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1/validation", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/validation", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/events", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/events/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/events/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/validation", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking/v1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/validation", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/v1alpha1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/install", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/util", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1alpha1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/capabilities", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/chaosclient", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/coordination/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/conditions", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/coordination", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/coordination/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/apps/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/autoscaling/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/batch/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/certificates/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/coordination/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/core/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/networking/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/policy/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/rbac/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/scheduling/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/settings/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/storage/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/metrics/prometheus", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/aws", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/azure", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/photon", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/deployment/util", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/nodelifecycle", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/util/node", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/events", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/expand/cache", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/persistentvolume", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/aws", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/azure", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/gcp", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/rancher", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/secrets", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/features", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/fieldpath", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/apps", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/genericclioptions", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/scheme", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/hash", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/slice", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/cri", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cadvisor", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/certificate", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/checkpoint", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/checkpointmanager", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cloudresource", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpuset", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/util", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/config", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/configmap", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/container", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/container/testing", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/cm", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/network", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/remote", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/envvars", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/events", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/eviction", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/eviction/api", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/images", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kuberuntime", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/leaky", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/lifecycle", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/logs", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/metrics", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/metrics/collectors", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/mountpod", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/dns", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/nodestatus", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/pleg", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/pod", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/preemption", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/prober", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/prober/results", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/remote", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/secret", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/portforward", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/remotecommand", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/stats", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/streaming", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/stats", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/status", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/sysctl", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/token", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/types", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/cache", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/format", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/ioutils", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/manager", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/queue", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/sliceutils", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/store", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/winstats", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubemark", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/master/ports", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/printers", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/printers/internalversion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/exec", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/http", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/tcp", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/config", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/healthcheck", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/iptables", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/ipvs", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/metrics", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/userspace", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/util", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/winkernel", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/winuserspace", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/allocator", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/ipallocator", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/validation", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/algorithm", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/api", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/cache", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/metrics", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/util", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/scheduler/volumebinder", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/apparmor", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/securitycontext", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/serviceaccount", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/ssh", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/async", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/bandwidth", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/config", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/configz", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/conntrack", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/dbus", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ebtables", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/env", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/file", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/filesystem", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/flag", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/flock", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/goroutinemap", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/hash", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/io", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ipconfig", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ipset", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/iptables", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ipvs", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/keymutex", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/labels", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/metrics", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/mount", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/net", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/net/sets", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/netsh", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/node", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/nsenter", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/oom", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/parsers", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/pod", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/procfs", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/removeall", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/resizefs", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/resourcecontainer", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/rlimit", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/selinux", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/slice", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/strings", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/sysctl", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/system", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/tail", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/taints", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/version", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/version", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/version/verflag", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/aws_ebs", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/azure_dd", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/azure_file", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/cephfs", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/cinder", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/configmap", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/csi", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/csi/labelmanager", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/downwardapi", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/empty_dir", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/fc", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/flexvolume", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/flocker", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/gce_pd", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/git_repo", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/glusterfs", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/host_path", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/iscsi", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/local", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/nfs", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/photon_pd", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/portworx", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/projected", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/quobyte", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/rbd", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/scaleio", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/secret", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/storageos", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/fs", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/operationexecutor", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/recyclerclient", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/types", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/volumepathhandler", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/validation", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/vsphere_volume", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/pkg/windows/service", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/common", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/framework", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/framework/metrics", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/generated", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/manifest", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/perftype", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/test/utils", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/test/utils/image", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/golang/expansion", - "Comment": "v1.12.0-alpha.1-318-g699fed2733", - "Rev": "699fed273342e5dc4287d16452fb52b82a4fbcd2" + "Comment": "v1.12.0-alpha.1-318-gb7fcf0b2fa", + "Rev": "b7fcf0b2fab5f2b4b3f1620720777720bd8b4103" }, { "ImportPath": "k8s.io/metrics/pkg/apis/metrics", - "Rev": "09fd2fe72e4bafa3f7c030996ee7be7b48963080" + "Rev": "ce417d1f8970235ce87f30431730ba755c3ff230" }, { "ImportPath": "k8s.io/metrics/pkg/apis/metrics/v1alpha1", - "Rev": "09fd2fe72e4bafa3f7c030996ee7be7b48963080" + "Rev": "ce417d1f8970235ce87f30431730ba755c3ff230" }, { "ImportPath": "k8s.io/metrics/pkg/apis/metrics/v1beta1", - "Rev": "09fd2fe72e4bafa3f7c030996ee7be7b48963080" + "Rev": "ce417d1f8970235ce87f30431730ba755c3ff230" }, { "ImportPath": "k8s.io/metrics/pkg/client/clientset/versioned", - "Rev": "09fd2fe72e4bafa3f7c030996ee7be7b48963080" + "Rev": "ce417d1f8970235ce87f30431730ba755c3ff230" }, { "ImportPath": "k8s.io/metrics/pkg/client/clientset/versioned/fake", - "Rev": "09fd2fe72e4bafa3f7c030996ee7be7b48963080" + "Rev": "ce417d1f8970235ce87f30431730ba755c3ff230" }, { "ImportPath": "k8s.io/metrics/pkg/client/clientset/versioned/scheme", - "Rev": "09fd2fe72e4bafa3f7c030996ee7be7b48963080" + "Rev": "ce417d1f8970235ce87f30431730ba755c3ff230" }, { "ImportPath": "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1", - "Rev": "09fd2fe72e4bafa3f7c030996ee7be7b48963080" + "Rev": "ce417d1f8970235ce87f30431730ba755c3ff230" }, { "ImportPath": "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/fake", - "Rev": "09fd2fe72e4bafa3f7c030996ee7be7b48963080" + "Rev": "ce417d1f8970235ce87f30431730ba755c3ff230" }, { "ImportPath": "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1", - "Rev": "09fd2fe72e4bafa3f7c030996ee7be7b48963080" + "Rev": "ce417d1f8970235ce87f30431730ba755c3ff230" }, { "ImportPath": "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/fake", - "Rev": "09fd2fe72e4bafa3f7c030996ee7be7b48963080" + "Rev": "ce417d1f8970235ce87f30431730ba755c3ff230" }, { "ImportPath": "k8s.io/utils/clock", diff --git a/vertical-pod-autoscaler/README.md b/vertical-pod-autoscaler/README.md index f613a9c1c5fb..9012701aeed5 100644 --- a/vertical-pod-autoscaler/README.md +++ b/vertical-pod-autoscaler/README.md @@ -8,6 +8,8 @@ When configured, it will set the requests automatically based on usage and thus allow proper scheduling onto nodes so that appropriate resource amount is available for each pod. +It can both down-scale pods that are over-requesting resources, and also up-scale pods that are under-requesting resources based on their usage over time. + Autoscaling is configured with a [Custom Resource Definition object](https://kubernetes.io/docs/concepts/api-extension/custom-resources/) called [VerticalPodAutoscaler](https://github.com/kubernetes/autoscaler/blob/master/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1/types.go). @@ -32,7 +34,7 @@ the old installation of VPA. * It is strongly recommended to use Kubernetes 1.9 or greater. Your cluster must support MutatingAdmissionWebhooks, which are enabled by default since 1.9 ([#58255](https://github.com/kubernetes/kubernetes/pull/58255)). - Read more about [VPA Admission Webhook](./admission-controller/README.md#running). + Read more about [VPA Admission Webhook](./pkg/admission-controller/README.md#running). * `kubectl` should be connected to the cluster you want to install VPA in. * The metrics server must be deployed in your cluster. Read more about [Metrics Server](https://github.com/kubernetes-incubator/metrics-server). * If you are using a GKE Kubernetes cluster, you will need to grant your current Google @@ -87,7 +89,7 @@ There are three modes in which *VPAs* operate: the `"Auto"` mode. * `"Recreate"`: VPA assigns resource requests on pod creation as well as updates them on existing pods by evicting them when the requested resources differ significantly - from the new recommendation (respecting the Pod Distruption Budget, if defined). + from the new recommendation (respecting the Pod Disruption Budget, if defined). This mode should be used rarely, only if you need to ensure that the pods are restarted whenever the resource request changes. Otherwise prefer the `"Auto"` mode which may take advantage of restart free updates once they are available. @@ -179,7 +181,7 @@ will get resources as defined in your controllers (i.e. deployment or replicaset) and not according to previous recommendations made by VPA. To stop using Vertical Pod Autoscaling in your cluster: -* If runnning on GKE, clean up role bindings created in [Prerequisites](#prerequisites): +* If running on GKE, clean up role bindings created in [Prerequisites](#prerequisites): ``` kubectl delete clusterrolebinding myname-cluster-admin-binding ``` diff --git a/vertical-pod-autoscaler/builder/Dockerfile b/vertical-pod-autoscaler/builder/Dockerfile new file mode 100644 index 000000000000..fa7227d416f1 --- /dev/null +++ b/vertical-pod-autoscaler/builder/Dockerfile @@ -0,0 +1,24 @@ +# Copyright 2018 The Kubernetes Authors. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM golang:1.11.1 +LABEL maintainer="Beata Skiba " + +ENV GOPATH /gopath/ +ENV PATH $GOPATH/bin:$PATH + +RUN go version +RUN go get github.com/tools/godep +RUN godep version +CMD ["/bin/bash"] diff --git a/vertical-pod-autoscaler/builder/README.md b/vertical-pod-autoscaler/builder/README.md new file mode 100644 index 000000000000..a4db1c400abe --- /dev/null +++ b/vertical-pod-autoscaler/builder/README.md @@ -0,0 +1 @@ +A Docker image that is used to build vpa-related binaries. diff --git a/vertical-pod-autoscaler/common/version.go b/vertical-pod-autoscaler/common/version.go index de0e06234bd8..bd34d74fa284 100644 --- a/vertical-pod-autoscaler/common/version.go +++ b/vertical-pod-autoscaler/common/version.go @@ -17,4 +17,4 @@ limitations under the License. package common // VerticalPodAutoscalerVersion is the version of VPA. -const VerticalPodAutoscalerVersion = "0.2.0" +const VerticalPodAutoscalerVersion = "0.3.0" diff --git a/vertical-pod-autoscaler/deploy/vpa-beta-crd.yaml b/vertical-pod-autoscaler/deploy/vpa-beta-crd.yaml new file mode 100644 index 000000000000..64ef85a5a5e6 --- /dev/null +++ b/vertical-pod-autoscaler/deploy/vpa-beta-crd.yaml @@ -0,0 +1,47 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: verticalpodautoscalers.autoscaling.k8s.io +spec: + group: autoscaling.k8s.io + version: v1beta1 + scope: Namespaced + names: + plural: verticalpodautoscalers + singular: verticalpodautoscaler + kind: VerticalPodAutoscaler + shortNames: + - vpa + validation: + # openAPIV3Schema is the schema for validating custom objects. + openAPIV3Schema: + properties: + spec: + required: + - selector + properties: + selector: + type: object + updatePolicy: + properties: + updateMode: + type: string + resourcePolicy: + properties: + containerPolicies: + type: array +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: verticalpodautoscalercheckpoints.autoscaling.k8s.io +spec: + group: autoscaling.k8s.io + version: v1beta1 + scope: Namespaced + names: + plural: verticalpodautoscalercheckpoints + singular: verticalpodautoscalercheckpoint + kind: VerticalPodAutoscalerCheckpoint + shortNames: + - vpacheckpoint diff --git a/vertical-pod-autoscaler/deploy/vpa-rbac.yaml b/vertical-pod-autoscaler/deploy/vpa-rbac.yaml index cb56f962698f..7a0ae5fa278c 100644 --- a/vertical-pod-autoscaler/deploy/vpa-rbac.yaml +++ b/vertical-pod-autoscaler/deploy/vpa-rbac.yaml @@ -21,11 +21,19 @@ rules: resources: - pods - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: - events verbs: - get - list - watch + - create - apiGroups: - "poc.autoscaling.k8s.io" resources: @@ -35,6 +43,15 @@ rules: - list - watch - patch +- apiGroups: + - "autoscaling.k8s.io" + resources: + - verticalpodautoscalers + verbs: + - get + - list + - watch + - patch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -52,6 +69,17 @@ rules: - create - patch - delete +- apiGroups: + - "autoscaling.k8s.io" + resources: + - verticalpodautoscalercheckpoints + verbs: + - get + - list + - watch + - create + - patch + - delete - apiGroups: - "" resources: @@ -121,9 +149,45 @@ subjects: namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:controllers-reader +rules: +- apiGroups: + - "" + resources: + - replicationcontrollers + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - statefulsets + - replicasets + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: system:replicasets-reader + name: system:vpa-updater-controllers-reader-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:controllers-reader +subjects: +- kind: ServiceAccount + name: vpa-updater + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:vpa-evictionter-binding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -171,6 +235,14 @@ rules: - get - list - watch +- apiGroups: + - "autoscaling.k8s.io" + resources: + - verticalpodautoscalers + verbs: + - get + - list + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/vertical-pod-autoscaler/e2e/actuation.go b/vertical-pod-autoscaler/e2e/actuation.go index 2e2141bc01ce..d58e1c94b094 100644 --- a/vertical-pod-autoscaler/e2e/actuation.go +++ b/vertical-pod-autoscaler/e2e/actuation.go @@ -22,10 +22,10 @@ import ( appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" - vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -33,60 +33,86 @@ import ( "github.com/onsi/gomega" ) -var _ = actuationSuiteE2eDescribe("Actuation", func() { +var _ = ActuationSuiteE2eDescribe("Actuation", func() { f := framework.NewDefaultFramework("vertical-pod-autoscaling") ginkgo.It("stops when pods get pending", func() { ginkgo.By("Setting up a hamster deployment") - c := f.ClientSet - ns := f.Namespace.Name + d := SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas) - cpuQuantity := parseQuantityOrDie("100m") - memoryQuantity := parseQuantityOrDie("100Mi") + ginkgo.By("Setting up a VPA CRD with ridiculous request") + SetupVPA(f, "9999", vpa_types.UpdateModeAuto) // Request 9999 CPUs to make POD pending - d := newHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity) - d, err := c.AppsV1().Deployments(ns).Create(d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = framework.WaitForDeploymentComplete(c, d) + ginkgo.By("Waiting for pods to be restarted and stuck pending") + err := assertPodsPendingForDuration(f.ClientSet, d, 1, 2*time.Minute) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Setting up a VPA CRD with ridiculous request") - config, err := framework.LoadConfig() + }) + + ginkgo.It("never applies recommendations when update mode is Off", func() { + ginkgo.By("Setting up a hamster deployment") + d := SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas) + cpuRequest := getCPURequest(d.Spec.Template.Spec) + podList, err := GetHamsterPods(f) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + podSet := MakePodSet(podList) - vpaCRD := newVPA(f, "hamster-vpa", &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": "hamster", - }, - }) + ginkgo.By("Setting up a VPA CRD in mode Off") + SetupVPA(f, "200m", vpa_types.UpdateModeOff) - resourceList := apiv1.ResourceList{ - apiv1.ResourceCPU: parseQuantityOrDie("9999"), // Request 9999 CPUs to make POD pending - } + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, podSet) + ginkgo.By("Forcefully killing one pod") + killPod(f, podList) - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ - ContainerName: "hamster", - Target: resourceList, - LowerBound: resourceList, - UpperBound: resourceList, - }}, + ginkgo.By("Checking the requests were not modified") + updatedPodList, err := GetHamsterPods(f) + for _, pod := range updatedPodList.Items { + gomega.Expect(getCPURequest(pod.Spec)).To(gomega.Equal(cpuRequest)) } + }) - vpaClientSet := vpa_clientset.NewForConfigOrDie(config) - vpaClient := vpaClientSet.PocV1alpha1() - _, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpaCRD) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Waiting for pods to be restarted and stuck pending") - - err = assertPodsPendingForDuration(c, d, 1, 2*time.Minute) + ginkgo.It("applies recommendations only on restart when update mode is Initial", func() { + ginkgo.By("Setting up a hamster deployment") + SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas) + podList, err := GetHamsterPods(f) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - + podSet := MakePodSet(podList) + + ginkgo.By("Setting up a VPA CRD in mode Initial") + SetupVPA(f, "200m", vpa_types.UpdateModeInitial) + updatedCPURequest := ParseQuantityOrDie("200m") + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, podSet) + ginkgo.By("Forcefully killing one pod") + killPod(f, podList) + + ginkgo.By("Checking that request was modified after forceful restart") + updatedPodList, err := GetHamsterPods(f) + foundUpdated := 0 + for _, pod := range updatedPodList.Items { + podRequest := getCPURequest(pod.Spec) + framework.Logf("podReq: %v", podRequest) + if podRequest.Cmp(updatedCPURequest) == 0 { + foundUpdated += 1 + } + } + gomega.Expect(foundUpdated).To(gomega.Equal(1)) }) }) +func getCPURequest(podSpec apiv1.PodSpec) resource.Quantity { + return podSpec.Containers[0].Resources.Requests[apiv1.ResourceCPU] +} + +func killPod(f *framework.Framework, podList *apiv1.PodList) { + f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podList.Items[0].Name, &metav1.DeleteOptions{}) + err := WaitForPodsRestarted(f, podList) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} + // assertPodsPendingForDuration checks that at most pendingPodsNum pods are pending for pendingDuration func assertPodsPendingForDuration(c clientset.Interface, deployment *appsv1.Deployment, pendingPodsNum int, pendingDuration time.Duration) error { diff --git a/vertical-pod-autoscaler/e2e/admission_controller.go b/vertical-pod-autoscaler/e2e/admission_controller.go index 875a03d63735..dbbb4230d335 100644 --- a/vertical-pod-autoscaler/e2e/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/admission_controller.go @@ -20,33 +20,33 @@ import ( appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" "k8s.io/kubernetes/test/e2e/framework" "github.com/onsi/ginkgo" "github.com/onsi/gomega" ) -var _ = admissionControllerE2eDescribe("Admission-controller", func() { +var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { f := framework.NewDefaultFramework("vertical-pod-autoscaling") ginkgo.It("starts pods with new recommended request", func() { - d := newHamsterDeploymentWithResources(f, parseQuantityOrDie("100m") /*cpu*/, parseQuantityOrDie("100Mi") /*memory*/) + d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) ginkgo.By("Setting up a VPA CRD") - vpaCRD := newVPA(f, "hamster-vpa", &metav1.LabelSelector{ + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ MatchLabels: d.Spec.Template.Labels, }) vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ ContainerName: "hamster", Target: apiv1.ResourceList{ - apiv1.ResourceCPU: parseQuantityOrDie("250m"), - apiv1.ResourceMemory: parseQuantityOrDie("200Mi"), + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), }, }}, } - installVPA(f, vpaCRD) + InstallVPA(f, vpaCRD) ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -54,32 +54,32 @@ var _ = admissionControllerE2eDescribe("Admission-controller", func() { // Originally Pods had 100m CPU, 100Mi of memory, but admission controller // should change it to recommended 250m CPU and 200Mi of memory. for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(parseQuantityOrDie("250m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(parseQuantityOrDie("200Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) } }) ginkgo.It("caps request to limit set by the user", func() { - d := newHamsterDeploymentWithResources(f, parseQuantityOrDie("100m") /*cpu*/, parseQuantityOrDie("100Mi") /*memory*/) + d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ - apiv1.ResourceCPU: parseQuantityOrDie("222m"), - apiv1.ResourceMemory: parseQuantityOrDie("123Mi"), + apiv1.ResourceCPU: ParseQuantityOrDie("222m"), + apiv1.ResourceMemory: ParseQuantityOrDie("123Mi"), } ginkgo.By("Setting up a VPA CRD") - vpaCRD := newVPA(f, "hamster-vpa", &metav1.LabelSelector{ + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ MatchLabels: d.Spec.Template.Labels, }) vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ ContainerName: "hamster", Target: apiv1.ResourceList{ - apiv1.ResourceCPU: parseQuantityOrDie("250m"), - apiv1.ResourceMemory: parseQuantityOrDie("200Mi"), + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), }, }}, } - installVPA(f, vpaCRD) + InstallVPA(f, vpaCRD) ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -88,24 +88,24 @@ var _ = admissionControllerE2eDescribe("Admission-controller", func() { // should change it to 222m CPU and 123Mi of memory (as this is the recommendation // capped to the limit set by the user) for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(parseQuantityOrDie("222m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(parseQuantityOrDie("123Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("222m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("123Mi"))) } }) ginkgo.It("caps request to max set in VPA", func() { - d := newHamsterDeploymentWithResources(f, parseQuantityOrDie("100m") /*cpu*/, parseQuantityOrDie("100Mi") /*memory*/) + d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) ginkgo.By("Setting up a VPA CRD") - vpaCRD := newVPA(f, "hamster-vpa", &metav1.LabelSelector{ + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ MatchLabels: d.Spec.Template.Labels, }) vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ ContainerName: "hamster", Target: apiv1.ResourceList{ - apiv1.ResourceCPU: parseQuantityOrDie("250m"), - apiv1.ResourceMemory: parseQuantityOrDie("200Mi"), + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), }, }}, } @@ -113,12 +113,12 @@ var _ = admissionControllerE2eDescribe("Admission-controller", func() { ContainerPolicies: []vpa_types.ContainerResourcePolicy{{ ContainerName: "hamster", MaxAllowed: apiv1.ResourceList{ - apiv1.ResourceCPU: parseQuantityOrDie("233m"), - apiv1.ResourceMemory: parseQuantityOrDie("150Mi"), + apiv1.ResourceCPU: ParseQuantityOrDie("233m"), + apiv1.ResourceMemory: ParseQuantityOrDie("150Mi"), }, }}, } - installVPA(f, vpaCRD) + InstallVPA(f, vpaCRD) ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -127,24 +127,24 @@ var _ = admissionControllerE2eDescribe("Admission-controller", func() { // should change it to 233m CPU and 150Mi of memory (as this is the recommendation // capped to max specified in VPA) for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(parseQuantityOrDie("233m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(parseQuantityOrDie("150Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("233m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("150Mi"))) } }) ginkgo.It("raises request to min set in VPA", func() { - d := newHamsterDeploymentWithResources(f, parseQuantityOrDie("100m") /*cpu*/, parseQuantityOrDie("100Mi") /*memory*/) + d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) ginkgo.By("Setting up a VPA CRD") - vpaCRD := newVPA(f, "hamster-vpa", &metav1.LabelSelector{ + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ MatchLabels: d.Spec.Template.Labels, }) vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ ContainerName: "hamster", Target: apiv1.ResourceList{ - apiv1.ResourceCPU: parseQuantityOrDie("50m"), - apiv1.ResourceMemory: parseQuantityOrDie("60Mi"), + apiv1.ResourceCPU: ParseQuantityOrDie("50m"), + apiv1.ResourceMemory: ParseQuantityOrDie("60Mi"), }, }}, } @@ -152,12 +152,12 @@ var _ = admissionControllerE2eDescribe("Admission-controller", func() { ContainerPolicies: []vpa_types.ContainerResourcePolicy{{ ContainerName: "hamster", MinAllowed: apiv1.ResourceList{ - apiv1.ResourceCPU: parseQuantityOrDie("90m"), - apiv1.ResourceMemory: parseQuantityOrDie("80Mi"), + apiv1.ResourceCPU: ParseQuantityOrDie("90m"), + apiv1.ResourceMemory: ParseQuantityOrDie("80Mi"), }, }}, } - installVPA(f, vpaCRD) + InstallVPA(f, vpaCRD) ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -166,38 +166,38 @@ var _ = admissionControllerE2eDescribe("Admission-controller", func() { // should change it to recommended 90m CPU and 800Mi of memory (as this the // recommendation raised to min specified in VPA) for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(parseQuantityOrDie("90m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(parseQuantityOrDie("80Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("90m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("80Mi"))) } }) ginkgo.It("leaves users request when no recommendation", func() { - d := newHamsterDeploymentWithResources(f, parseQuantityOrDie("100m") /*cpu*/, parseQuantityOrDie("100Mi") /*memory*/) + d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) ginkgo.By("Setting up a VPA CRD") - vpaCRD := newVPA(f, "hamster-vpa", &metav1.LabelSelector{ + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ MatchLabels: d.Spec.Template.Labels, }) - installVPA(f, vpaCRD) + InstallVPA(f, vpaCRD) ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) // VPA has no recommendation, so user's request is passed through for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(parseQuantityOrDie("100m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(parseQuantityOrDie("100Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("100m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("100Mi"))) } }) ginkgo.It("passes empty request when no recommendation and no user-specified request", func() { - d := newHamsterDeployment(f) + d := NewHamsterDeployment(f) ginkgo.By("Setting up a VPA CRD") - vpaCRD := newVPA(f, "hamster-vpa", &metav1.LabelSelector{ + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ MatchLabels: d.Spec.Template.Labels, }) - installVPA(f, vpaCRD) + InstallVPA(f, vpaCRD) ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) diff --git a/vertical-pod-autoscaler/e2e/common.go b/vertical-pod-autoscaler/e2e/common.go index 915cf25eb92f..4d3f50c81603 100644 --- a/vertical-pod-autoscaler/e2e/common.go +++ b/vertical-pod-autoscaler/e2e/common.go @@ -23,10 +23,14 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" "k8s.io/kubernetes/test/e2e/framework" ) @@ -39,50 +43,118 @@ const ( actuationSuite = "actuation" pollInterval = 10 * time.Second pollTimeout = 15 * time.Minute + // VpaEvictionTimeout is a timeout for VPA to restart a pod if there are no + // mechanisms blocking it (for example PDB). + VpaEvictionTimeout = 3 * time.Minute + + defaultHamsterReplicas = int32(3) ) -func e2eDescribe(scenario, name string, body func()) bool { - return ginkgo.Describe(fmt.Sprintf("[VPA] [%s] %s", scenario, name), body) +var hamsterLabels = map[string]string{"app": "hamster"} + +// SIGDescribe adds sig-autoscaling tag to test description. +func SIGDescribe(text string, body func()) bool { + return ginkgo.Describe(fmt.Sprintf("[sig-autoscaling] %v", text), body) +} + +// E2eDescribe describes a VPA e2e test. +func E2eDescribe(scenario, name string, body func()) bool { + return SIGDescribe(fmt.Sprintf("[VPA] [%s] %s", scenario, name), body) } -func recommenderE2eDescribe(name string, body func()) bool { - return e2eDescribe(recommenderComponent, name, body) +// RecommenderE2eDescribe describes a VPA recommender e2e test. +func RecommenderE2eDescribe(name string, body func()) bool { + return E2eDescribe(recommenderComponent, name, body) } -func updaterE2eDescribe(name string, body func()) bool { - return e2eDescribe(updateComponent, name, body) +// UpdaterE2eDescribe describes a VPA updater e2e test. +func UpdaterE2eDescribe(name string, body func()) bool { + return E2eDescribe(updateComponent, name, body) } -func admissionControllerE2eDescribe(name string, body func()) bool { - return e2eDescribe(admissionControllerComponent, name, body) +// AdmissionControllerE2eDescribe describes a VPA admission controller e2e test. +func AdmissionControllerE2eDescribe(name string, body func()) bool { + return E2eDescribe(admissionControllerComponent, name, body) } -func fullVpaE2eDescribe(name string, body func()) bool { - return e2eDescribe(fullVpaSuite, name, body) +// FullVpaE2eDescribe describes a VPA full stack e2e test. +func FullVpaE2eDescribe(name string, body func()) bool { + return E2eDescribe(fullVpaSuite, name, body) } -func actuationSuiteE2eDescribe(name string, body func()) bool { - return e2eDescribe(actuationSuite, name, body) +// ActuationSuiteE2eDescribe describes a VPA actuation e2e test. +func ActuationSuiteE2eDescribe(name string, body func()) bool { + return E2eDescribe(actuationSuite, name, body) } -func newHamsterDeployment(f *framework.Framework) *appsv1.Deployment { - d := framework.NewDeployment("hamster-deployment", 3, map[string]string{"app": "hamster"}, "hamster", "k8s.gcr.io/ubuntu-slim:0.1", appsv1.RollingUpdateDeploymentStrategyType) +// SetupHamsterDeployment creates and installs a simple hamster deployment +// for e2e test purposes, then makes sure the deployment is running. +func SetupHamsterDeployment(f *framework.Framework, cpu, memory string, replicas int32) *appsv1.Deployment { + cpuQuantity := ParseQuantityOrDie(cpu) + memoryQuantity := ParseQuantityOrDie(memory) + + d := NewHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity) + d.Spec.Replicas = &replicas + d, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(d) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = framework.WaitForDeploymentComplete(f.ClientSet, d) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return d +} + +// NewHamsterDeployment creates a simple hamster deployment for e2e test +// purposes. +func NewHamsterDeployment(f *framework.Framework) *appsv1.Deployment { + d := framework.NewDeployment("hamster-deployment", defaultHamsterReplicas, hamsterLabels, "hamster", "k8s.gcr.io/ubuntu-slim:0.1", appsv1.RollingUpdateDeploymentStrategyType) d.ObjectMeta.Namespace = f.Namespace.Name d.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh"} d.Spec.Template.Spec.Containers[0].Args = []string{"-c", "/usr/bin/yes >/dev/null"} return d } -func newHamsterDeploymentWithResources(f *framework.Framework, cpuQuantity, memoryQuantity resource.Quantity) *appsv1.Deployment { - d := newHamsterDeployment(f) - d.Spec.Template.Spec.Containers[0].Resources.Requests = v1.ResourceList{ - v1.ResourceCPU: cpuQuantity, - v1.ResourceMemory: memoryQuantity, +// NewHamsterDeploymentWithResources creates a simple hamster deployment with specific +// resource requests for e2e test purposes. +func NewHamsterDeploymentWithResources(f *framework.Framework, cpuQuantity, memoryQuantity resource.Quantity) *appsv1.Deployment { + d := NewHamsterDeployment(f) + d.Spec.Template.Spec.Containers[0].Resources.Requests = apiv1.ResourceList{ + apiv1.ResourceCPU: cpuQuantity, + apiv1.ResourceMemory: memoryQuantity, } return d } -func newVPA(f *framework.Framework, name string, selector *metav1.LabelSelector) *vpa_types.VerticalPodAutoscaler { +// GetHamsterPods returns running hamster pods (matched by hamsterLabels) +func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) { + label := labels.SelectorFromSet(labels.Set(hamsterLabels)) + selector := fields.ParseSelectorOrDie("status.phase!=" + string(apiv1.PodSucceeded) + + ",status.phase!=" + string(apiv1.PodFailed)) + options := metav1.ListOptions{LabelSelector: label.String(), FieldSelector: selector.String()} + return f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(options) +} + +// SetupVPA creates and installs a simple hamster VPA for e2e test purposes. +func SetupVPA(f *framework.Framework, cpu string, mode vpa_types.UpdateMode) { + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: hamsterLabels, + }) + vpaCRD.Spec.UpdatePolicy.UpdateMode = &mode + + cpuQuantity := ParseQuantityOrDie(cpu) + resourceList := apiv1.ResourceList{apiv1.ResourceCPU: cpuQuantity} + + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: resourceList, + LowerBound: resourceList, + UpperBound: resourceList, + }}, + } + InstallVPA(f, vpaCRD) +} + +// NewVPA creates a VPA object for e2e test purposes. +func NewVPA(f *framework.Framework, name string, selector *metav1.LabelSelector) *vpa_types.VerticalPodAutoscaler { updateMode := vpa_types.UpdateModeAuto vpa := vpa_types.VerticalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ @@ -102,18 +174,114 @@ func newVPA(f *framework.Framework, name string, selector *metav1.LabelSelector) return &vpa } -func installVPA(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler) { +// InstallVPA installs a VPA object in the test cluster. +func InstallVPA(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler) { ns := f.Namespace.Name config, err := framework.LoadConfig() gomega.Expect(err).NotTo(gomega.HaveOccurred()) vpaClientSet := vpa_clientset.NewForConfigOrDie(config) - vpaClient := vpaClientSet.PocV1alpha1() + vpaClient := vpaClientSet.AutoscalingV1beta1() _, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpa) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } -func parseQuantityOrDie(text string) resource.Quantity { +// ParseQuantityOrDie parses quantity from string and dies with an error if +// unparsable. +func ParseQuantityOrDie(text string) resource.Quantity { quantity, err := resource.ParseQuantity(text) gomega.Expect(err).NotTo(gomega.HaveOccurred()) return quantity } + +// PodSet is a simplified representation of PodList mapping names to UIDs. +type PodSet map[string]types.UID + +// MakePodSet converts PodList to podset for easier comparison of pod collections. +func MakePodSet(pods *apiv1.PodList) PodSet { + result := make(PodSet) + if pods == nil { + return result + } + for _, p := range pods.Items { + result[p.Name] = p.UID + } + return result +} + +// WaitForPodsRestarted waits until some pods from the list are restarted. +func WaitForPodsRestarted(f *framework.Framework, podList *apiv1.PodList) error { + initialPodSet := MakePodSet(podList) + + err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { + currentPodList, err := GetHamsterPods(f) + if err != nil { + return false, err + } + currentPodSet := MakePodSet(currentPodList) + return WerePodsSuccessfullyRestarted(currentPodSet, initialPodSet), nil + }) + + if err != nil { + return fmt.Errorf("Waiting for set of pods changed: %v", err) + } + return nil +} + +// WaitForPodsEvicted waits until some pods from the list are evicted. +func WaitForPodsEvicted(f *framework.Framework, podList *apiv1.PodList) error { + initialPodSet := MakePodSet(podList) + + err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { + currentPodList, err := GetHamsterPods(f) + if err != nil { + return false, err + } + currentPodSet := MakePodSet(currentPodList) + return GetEvictedPodsCount(currentPodSet, initialPodSet) > 0, nil + }) + + if err != nil { + return fmt.Errorf("Waiting for set of pods changed: %v", err) + } + return nil +} + +// WerePodsSuccessfullyRestarted returns true if some pods from initialPodSet have been +// successfully restarted comparing to currentPodSet (pods were evicted and +// are running). +func WerePodsSuccessfullyRestarted(currentPodSet PodSet, initialPodSet PodSet) bool { + if len(currentPodSet) < len(initialPodSet) { + // If we have less pods running than in the beginning, there is a restart + // in progress - a pod was evicted but not yet recreated. + framework.Logf("Restart in progress") + return false + } + evictedCount := GetEvictedPodsCount(currentPodSet, initialPodSet) + framework.Logf("%v of initial pods were already evicted", evictedCount) + return evictedCount > 0 +} + +// GetEvictedPodsCount returns the count of pods from initialPodSet that have +// been evicted comparing to currentPodSet. +func GetEvictedPodsCount(currentPodSet PodSet, initialPodSet PodSet) int { + diffs := 0 + for name, initialUID := range initialPodSet { + currentUID, inCurrent := currentPodSet[name] + if !inCurrent { + diffs += 1 + } else if initialUID != currentUID { + diffs += 1 + } + } + return diffs +} + +// CheckNoPodsEvicted waits for long enough period for VPA to start evicting +// pods and checks that no pods were restarted. +func CheckNoPodsEvicted(f *framework.Framework, initialPodSet PodSet) { + time.Sleep(VpaEvictionTimeout) + currentPodList, err := GetHamsterPods(f) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + restarted := GetEvictedPodsCount(MakePodSet(currentPodList), initialPodSet) + gomega.Expect(restarted).To(gomega.Equal(0)) +} diff --git a/vertical-pod-autoscaler/e2e/full_vpa.go b/vertical-pod-autoscaler/e2e/full_vpa.go index cf8cd0edbf38..be55689e75ed 100644 --- a/vertical-pod-autoscaler/e2e/full_vpa.go +++ b/vertical-pod-autoscaler/e2e/full_vpa.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" e2e_common "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" @@ -36,7 +36,7 @@ const ( minimalCPU = "50m" ) -var _ = fullVpaE2eDescribe("Pods under VPA", func() { +var _ = FullVpaE2eDescribe("Pods under VPA", func() { var ( rc *ResourceConsumer vpaClientSet *vpa_clientset.Clientset @@ -57,11 +57,11 @@ var _ = fullVpaE2eDescribe("Pods under VPA", func() { ginkgo.By("Setting up a hamster deployment") rc = NewDynamicResourceConsumer("hamster", ns, e2e_common.KindDeployment, replicas, - 1, /*initCPUTotal*/ - 10, /*initMemoryTotal*/ - 1, /*initCustomMetric*/ - parseQuantityOrDie("100m"), /*cpuRequest*/ - parseQuantityOrDie("10Mi"), /*memRequest*/ + 1, /*initCPUTotal*/ + 10, /*initMemoryTotal*/ + 1, /*initCustomMetric*/ + ParseQuantityOrDie("100m"), /*cpuRequest*/ + ParseQuantityOrDie("10Mi"), /*memRequest*/ f.ClientSet, f.InternalClientset) @@ -69,14 +69,14 @@ var _ = fullVpaE2eDescribe("Pods under VPA", func() { config, err := framework.LoadConfig() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - vpaCRD = newVPA(f, "hamster-vpa", &metav1.LabelSelector{ + vpaCRD = NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ MatchLabels: map[string]string{ "name": "hamster", }, }) vpaClientSet = vpa_clientset.NewForConfigOrDie(config) - vpaClient := vpaClientSet.PocV1alpha1() + vpaClient := vpaClientSet.AutoscalingV1beta1() _, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpaCRD) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -84,14 +84,14 @@ var _ = fullVpaE2eDescribe("Pods under VPA", func() { ginkgo.It("stabilize at minimum CPU if doing nothing", func() { err := waitForResourceRequestAboveThresholdInPods( - f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU, parseQuantityOrDie(minimalCPU)) + f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU, ParseQuantityOrDie(minimalCPU)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) ginkgo.It("have cpu requests growing with usage", func() { rc.ConsumeCPU(600 * replicas) err := waitForResourceRequestAboveThresholdInPods( - f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU, parseQuantityOrDie("500m")) + f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU, ParseQuantityOrDie("500m")) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -104,7 +104,7 @@ var _ = fullVpaE2eDescribe("Pods under VPA", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) rc.ConsumeMem(1024 * replicas) err = waitForResourceRequestAboveThresholdInPods( - f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory, parseQuantityOrDie("600Mi")) + f, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory, ParseQuantityOrDie("600Mi")) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) }) diff --git a/vertical-pod-autoscaler/e2e/recommender.go b/vertical-pod-autoscaler/e2e/recommender.go index 411c62d51bd3..f6eedc7f0398 100644 --- a/vertical-pod-autoscaler/e2e/recommender.go +++ b/vertical-pod-autoscaler/e2e/recommender.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/wait" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" @@ -87,9 +87,9 @@ func (o *observer) OnUpdate(oldObj, newObj interface{}) { return } oldVPA, _ := oldObj.(*vpa_types.VerticalPodAutoscaler) - newVPA, _ := newObj.(*vpa_types.VerticalPodAutoscaler) + NewVPA, _ := newObj.(*vpa_types.VerticalPodAutoscaler) oldRecommendation, oldFound := get(oldVPA) - newRecommendation, newFound := get(newVPA) + newRecommendation, newFound := get(NewVPA) result := recommendationChange{ oldMissing: !oldFound, newMissing: !newFound, @@ -99,7 +99,7 @@ func (o *observer) OnUpdate(oldObj, newObj interface{}) { } func getVpaObserver(vpaClientSet *vpa_clientset.Clientset) *observer { - vpaListWatch := cache.NewListWatchFromClient(vpaClientSet.PocV1alpha1().RESTClient(), "verticalpodautoscalers", apiv1.NamespaceAll, fields.Everything()) + vpaListWatch := cache.NewListWatchFromClient(vpaClientSet.AutoscalingV1beta1().RESTClient(), "verticalpodautoscalers", apiv1.NamespaceAll, fields.Everything()) vpaObserver := observer{channel: make(chan recommendationChange)} _, controller := cache.NewIndexerInformer(vpaListWatch, &vpa_types.VerticalPodAutoscaler{}, @@ -115,7 +115,7 @@ func getVpaObserver(vpaClientSet *vpa_clientset.Clientset) *observer { return &vpaObserver } -var _ = recommenderE2eDescribe("Checkpoints", func() { +var _ = RecommenderE2eDescribe("Checkpoints", func() { f := framework.NewDefaultFramework("vertical-pod-autoscaling") ginkgo.It("with missing VPA objects are garbage collected", func() { @@ -134,18 +134,18 @@ var _ = recommenderE2eDescribe("Checkpoints", func() { } vpaClientSet := vpa_clientset.NewForConfigOrDie(config) - _, err = vpaClientSet.PocV1alpha1().VerticalPodAutoscalerCheckpoints(ns).Create(&checkpoint) + _, err = vpaClientSet.AutoscalingV1beta1().VerticalPodAutoscalerCheckpoints(ns).Create(&checkpoint) gomega.Expect(err).NotTo(gomega.HaveOccurred()) time.Sleep(15 * time.Minute) - list, err := vpaClientSet.PocV1alpha1().VerticalPodAutoscalerCheckpoints(ns).List(metav1.ListOptions{}) + list, err := vpaClientSet.AutoscalingV1beta1().VerticalPodAutoscalerCheckpoints(ns).List(metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(list.Items).To(gomega.BeEmpty()) }) }) -var _ = recommenderE2eDescribe("VPA CRD object", func() { +var _ = RecommenderE2eDescribe("VPA CRD object", func() { f := framework.NewDefaultFramework("vertical-pod-autoscaling") var ( @@ -158,10 +158,10 @@ var _ = recommenderE2eDescribe("VPA CRD object", func() { c := f.ClientSet ns := f.Namespace.Name - cpuQuantity := parseQuantityOrDie("100m") - memoryQuantity := parseQuantityOrDie("100Mi") + cpuQuantity := ParseQuantityOrDie("100m") + memoryQuantity := ParseQuantityOrDie("100Mi") - d := newHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity) + d := NewHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity) _, err := c.AppsV1().Deployments(ns).Create(d) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = framework.WaitForDeploymentComplete(c, d) @@ -171,14 +171,14 @@ var _ = recommenderE2eDescribe("VPA CRD object", func() { config, err := framework.LoadConfig() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - vpaCRD = newVPA(f, "hamster-vpa", &metav1.LabelSelector{ + vpaCRD = NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ MatchLabels: map[string]string{ "app": "hamster", }, }) vpaClientSet = vpa_clientset.NewForConfigOrDie(config) - vpaClient := vpaClientSet.PocV1alpha1() + vpaClient := vpaClientSet.AutoscalingV1beta1() _, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpaCRD) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -248,7 +248,7 @@ func deleteRecommender(c clientset.Interface) error { func waitForRecommendationPresent(c *vpa_clientset.Clientset, vpa *vpa_types.VerticalPodAutoscaler) error { err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - polledVpa, err := c.PocV1alpha1().VerticalPodAutoscalers(vpa.Namespace).Get(vpa.Name, metav1.GetOptions{}) + polledVpa, err := c.AutoscalingV1beta1().VerticalPodAutoscalers(vpa.Namespace).Get(vpa.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/vertical-pod-autoscaler/e2e/updater.go b/vertical-pod-autoscaler/e2e/updater.go index 4769538b4d78..7e6f384d73bc 100644 --- a/vertical-pod-autoscaler/e2e/updater.go +++ b/vertical-pod-autoscaler/e2e/updater.go @@ -18,106 +18,258 @@ package autoscaling import ( "fmt" - "reflect" + "time" appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" + policyv1beta1 "k8s.io/api/policy/v1beta1" + apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" - vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo" "github.com/onsi/gomega" ) -var _ = updaterE2eDescribe("Updater", func() { +var _ = UpdaterE2eDescribe("Updater", func() { f := framework.NewDefaultFramework("vertical-pod-autoscaling") - ginkgo.It("restarts pods", func() { + ginkgo.It("evicts pods in a Deployment", func() { + testEvictsPods(f, "Deployment") + }) + + ginkgo.It("evicts pods in a Replication Controller", func() { + testEvictsPods(f, "ReplicationController") + }) + + ginkgo.It("evicts pods in a Job", func() { + testEvictsPods(f, "Job") + }) + + ginkgo.It("evicts pods in a ReplicaSet", func() { + testEvictsPods(f, "ReplicaSet") + }) + + ginkgo.It("evicts pods in a StatefulSet", func() { + testEvictsPods(f, "StatefulSet") + }) + + ginkgo.It("observes pod disruption budget", func() { ginkgo.By("Setting up a hamster deployment") c := f.ClientSet ns := f.Namespace.Name - cpuQuantity := parseQuantityOrDie("100m") - memoryQuantity := parseQuantityOrDie("100Mi") - - d := newHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity) - d, err := c.AppsV1().Deployments(ns).Create(d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = framework.WaitForDeploymentComplete(c, d) + SetupHamsterDeployment(f, "10m", "10Mi", 10) + podList, err := GetHamsterPods(f) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + podSet := MakePodSet(podList) - podList, err := framework.GetPodsForDeployment(c, d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Setting up prohibitive PDB for hamster deployment") + pdb := setupPDB(f, "hamster-pdb", 0 /* maxUnavailable */) ginkgo.By("Setting up a VPA CRD") - config, err := framework.LoadConfig() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + SetupVPA(f, "25m", vpa_types.UpdateModeAuto) - vpaCRD := newVPA(f, "hamster-vpa", &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": "hamster", - }, - }) + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, podSet) - newCPUQuantity, err := resource.ParseQuantity("200m") + ginkgo.By("Updating the PDB to allow for multiple pods to be evicted") + // We will check that 7 replicas are evicted in 3 minutes, which translates + // to 3 updater loops. This gives us relatively good confidence that updater + // evicts more than one pod in a loop if PDB allows it. + permissiveMaxUnavailable := 7 + // Creating new PDB and removing old one, since PDBs are immutable at the moment + setupPDB(f, "hamster-pdb-2", permissiveMaxUnavailable) + err = c.PolicyV1beta1().PodDisruptionBudgets(ns).Delete(pdb.Name, &metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - resourceList := apiv1.ResourceList{apiv1.ResourceCPU: newCPUQuantity} - - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ - ContainerName: "hamster", - Target: resourceList, - LowerBound: resourceList, - UpperBound: resourceList, - }}, - } - - vpaClientSet := vpa_clientset.NewForConfigOrDie(config) - vpaClient := vpaClientSet.PocV1alpha1() - _, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpaCRD) + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, sleep for %s", VpaEvictionTimeout.String())) + time.Sleep(VpaEvictionTimeout) + ginkgo.By("Checking enough pods were evicted.") + currentPodList, err := GetHamsterPods(f) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Waiting for pods to be restarted") - - err = waitForPodSetChangedInDeployment(c, d, podList) + evictedCount := GetEvictedPodsCount(MakePodSet(currentPodList), podSet) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - + gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue()) }) }) -func makePodSet(pods *apiv1.PodList) map[string]bool { - result := make(map[string]bool) - for _, p := range pods.Items { - result[p.Name] = true +func testEvictsPods(f *framework.Framework, controllerKind string) { + ginkgo.By(fmt.Sprintf("Setting up a hamster %v", controllerKind)) + setupHamsterController(f, controllerKind, "100m", "100Mi", defaultHamsterReplicas) + podList, err := GetHamsterPods(f) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Setting up a VPA CRD") + SetupVPA(f, "200m", vpa_types.UpdateModeAuto) + + ginkgo.By("Waiting for pods to be evicted") + err = WaitForPodsEvicted(f, podList) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} + +func setupHamsterController(f *framework.Framework, controllerKind, cpu, memory string, replicas int32) *apiv1.PodList { + switch controllerKind { + case "Deployment": + SetupHamsterDeployment(f, cpu, memory, replicas) + case "ReplicationController": + setupHamsterReplicationController(f, cpu, memory, replicas) + case "Job": + setupHamsterJob(f, cpu, memory, replicas) + case "ReplicaSet": + setupHamsterRS(f, cpu, memory, replicas) + case "StatefulSet": + setupHamsterStateful(f, cpu, memory, replicas) + default: + framework.Failf("Unknown controller kind: %v", controllerKind) + return nil } - return result + pods, err := GetHamsterPods(f) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return pods } -func waitForPodSetChangedInDeployment(c clientset.Interface, deployment *appsv1.Deployment, podList *apiv1.PodList) error { - initialPodSet := makePodSet(podList) +func setupHamsterReplicationController(f *framework.Framework, cpu, memory string, replicas int32) { + hamsterContainer := setupHamsterContainer(cpu, memory) + rc := framework.RcByNameContainer("hamster-rc", replicas, "k8s.gcr.io/ubuntu-slim:0.1", + hamsterLabels, hamsterContainer, nil) - err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - var err error - currentPodList, err := framework.GetPodsForDeployment(c, deployment) + rc.Namespace = f.Namespace.Name + err := testutils.CreateRCWithRetries(f.ClientSet, f.Namespace.Name, rc) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = waitForRCPodsRunning(f, rc) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} + +func waitForRCPodsRunning(f *framework.Framework, rc *apiv1.ReplicationController) error { + return wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { + podList, err := GetHamsterPods(f) if err != nil { - return false, err + framework.Logf("Error listing pods, retrying: %v", err) + return false, nil + } + podsRunning := int32(0) + for _, pod := range podList.Items { + if pod.Status.Phase == apiv1.PodRunning { + podsRunning += 1 + } } + return podsRunning == *rc.Spec.Replicas, nil + }) +} - currentPodSet := makePodSet(currentPodList) +func setupHamsterJob(f *framework.Framework, cpu, memory string, replicas int32) { + job := framework.NewTestJob("notTerminate", "hamster-job", apiv1.RestartPolicyOnFailure, + replicas, replicas, nil, 10) + job.Spec.Template.Spec.Containers[0] = setupHamsterContainer(cpu, memory) + for label, value := range hamsterLabels { + job.Spec.Template.Labels[label] = value + } + err := testutils.CreateJobWithRetries(f.ClientSet, f.Namespace.Name, job) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = framework.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, replicas) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} - return !reflect.DeepEqual(initialPodSet, currentPodSet), nil +func setupHamsterRS(f *framework.Framework, cpu, memory string, replicas int32) { + rs := framework.NewReplicaSet("hamster-rs", f.Namespace.Name, replicas, + hamsterLabels, "", "") + rs.Spec.Template.Spec.Containers[0] = setupHamsterContainer(cpu, memory) + err := createReplicaSetWithRetries(f.ClientSet, f.Namespace.Name, rs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = framework.WaitForReadyReplicaSet(f.ClientSet, f.Namespace.Name, rs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} - }) +func setupHamsterStateful(f *framework.Framework, cpu, memory string, replicas int32) { + stateful := framework.NewStatefulSet("hamster-stateful", f.Namespace.Name, + "hamster-service", replicas, nil, nil, hamsterLabels) + + stateful.Spec.Template.Spec.Containers[0] = setupHamsterContainer(cpu, memory) + err := createStatefulSetSetWithRetries(f.ClientSet, f.Namespace.Name, stateful) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + tester := framework.NewStatefulSetTester(f.ClientSet) + tester.WaitForRunningAndReady(*stateful.Spec.Replicas, stateful) +} + +func setupHamsterContainer(cpu, memory string) apiv1.Container { + cpuQuantity := ParseQuantityOrDie(cpu) + memoryQuantity := ParseQuantityOrDie(memory) + + return apiv1.Container{ + Name: "hamster", + Image: "k8s.gcr.io/ubuntu-slim:0.1", + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: cpuQuantity, + apiv1.ResourceMemory: memoryQuantity, + }, + }, + Command: []string{"/bin/sh"}, + Args: []string{"-c", "while true; do sleep 10 ; done"}, + } +} + +func setupPDB(f *framework.Framework, name string, maxUnavailable int) *policyv1beta1.PodDisruptionBudget { + maxUnavailableIntstr := intstr.FromInt(maxUnavailable) + pdb := &policyv1beta1.PodDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: policyv1beta1.PodDisruptionBudgetSpec{ + MaxUnavailable: &maxUnavailableIntstr, + Selector: &metav1.LabelSelector{ + MatchLabels: hamsterLabels, + }, + }, + } + _, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(f.Namespace.Name).Create(pdb) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return pdb +} + +func getCurrentPodSetForDeployment(c clientset.Interface, d *appsv1.Deployment) PodSet { + podList, err := framework.GetPodsForDeployment(c, d) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return MakePodSet(podList) +} + +func createReplicaSetWithRetries(c clientset.Interface, namespace string, obj *appsv1.ReplicaSet) error { + if obj == nil { + return fmt.Errorf("Object provided to create is empty") + } + createFunc := func() (bool, error) { + _, err := c.AppsV1().ReplicaSets(namespace).Create(obj) + if err == nil || apierrs.IsAlreadyExists(err) { + return true, nil + } + if testutils.IsRetryableAPIError(err) { + return false, nil + } + return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err) + } + return testutils.RetryWithExponentialBackOff(createFunc) +} - if err != nil { - return fmt.Errorf("Waiting for set of pods changed in %v: %v", deployment.Name, err) +func createStatefulSetSetWithRetries(c clientset.Interface, namespace string, obj *appsv1.StatefulSet) error { + if obj == nil { + return fmt.Errorf("Object provided to create is empty") + } + createFunc := func() (bool, error) { + _, err := c.AppsV1().StatefulSets(namespace).Create(obj) + if err == nil || apierrs.IsAlreadyExists(err) { + return true, nil + } + if testutils.IsRetryableAPIError(err) { + return false, nil + } + return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err) } - return nil + return testutils.RetryWithExponentialBackOff(createFunc) } diff --git a/vertical-pod-autoscaler/hack/deploy-for-e2e.sh b/vertical-pod-autoscaler/hack/deploy-for-e2e.sh index c06518a7ccec..19b08066dc01 100755 --- a/vertical-pod-autoscaler/hack/deploy-for-e2e.sh +++ b/vertical-pod-autoscaler/hack/deploy-for-e2e.sh @@ -73,7 +73,7 @@ for i in ${COMPONENTS}; do make --directory ${SCRIPT_ROOT}/pkg/${i} release done -kubectl create -f ${SCRIPT_ROOT}/deploy/vpa-crd.yaml +kubectl create -f ${SCRIPT_ROOT}/deploy/vpa-beta-crd.yaml kubectl create -f ${SCRIPT_ROOT}/deploy/vpa-rbac.yaml for i in ${COMPONENTS}; do diff --git a/vertical-pod-autoscaler/hack/update-codegen.sh b/vertical-pod-autoscaler/hack/update-codegen.sh index 238a27285f7f..41f85d53fb81 100755 --- a/vertical-pod-autoscaler/hack/update-codegen.sh +++ b/vertical-pod-autoscaler/hack/update-codegen.sh @@ -27,7 +27,7 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-ge # instead of the $GOPATH directly. For normal projects this can be dropped. ${CODEGEN_PKG}/generate-groups.sh all \ k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis \ - poc.autoscaling.k8s.io:v1alpha1 \ + "autoscaling.k8s.io:v1beta1 poc.autoscaling.k8s.io:v1alpha1" \ --output-base "$(dirname ${BASH_SOURCE})/../../../.." # To use your own boilerplate text append: diff --git a/vertical-pod-autoscaler/hack/vpa-process-yamls.sh b/vertical-pod-autoscaler/hack/vpa-process-yamls.sh index 92c6cf7b24bf..090d377433b2 100755 --- a/vertical-pod-autoscaler/hack/vpa-process-yamls.sh +++ b/vertical-pod-autoscaler/hack/vpa-process-yamls.sh @@ -39,7 +39,7 @@ if [ $# -gt 2 ]; then fi ACTION=$1 -COMPONENTS="vpa-crd vpa-rbac updater-deployment recommender-deployment admission-controller-deployment" +COMPONENTS="vpa-crd vpa-beta-crd vpa-rbac updater-deployment recommender-deployment admission-controller-deployment" if [ $# -gt 1 ]; then COMPONENTS="$2-deployment" diff --git a/vertical-pod-autoscaler/pkg/admission-controller/Makefile b/vertical-pod-autoscaler/pkg/admission-controller/Makefile index 685eaca2f34f..a13ddc541b6c 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/Makefile +++ b/vertical-pod-autoscaler/pkg/admission-controller/Makefile @@ -1,33 +1,59 @@ all: build -TAG?=v0.0.1 -REGISTRY?=k8s.gcr.io +TAG?=dev +REGISTRY?=staging-k8s.gcr.io FLAGS= ENVVAR= GOOS?=linux +COMPONENT=admission-controller +FULL_COMPONENT=vpa-${COMPONENT} deps: go get github.com/tools/godep build: clean deps $(ENVVAR) GOOS=$(GOOS) godep go build ./... - $(ENVVAR) GOOS=$(GOOS) godep go build -o admission-controller + $(ENVVAR) GOOS=$(GOOS) godep go build -o ${COMPONENT} + +build-binary: clean deps + $(ENVVAR) GOOS=$(GOOS) godep go build -o ${COMPONENT} test-unit: clean deps build $(ENVVAR) godep go test --test.short -race ./... $(FLAGS) -docker: +docker-build: +ifndef REGISTRY + ERR = $(error REGISTRY is undefined) + $(ERR) +endif +ifndef TAG + ERR = $(error TAG is undefined) + $(ERR) +endif + docker build --pull -t ${REGISTRY}/${FULL_COMPONENT}:${TAG} . + +docker-push: ifndef REGISTRY ERR = $(error REGISTRY is undefined) $(ERR) endif - docker build --pull -t ${REGISTRY}/vpa-admission-controller:${TAG} . - docker push ${REGISTRY}/vpa-admission-controller:${TAG} +ifndef TAG + ERR = $(error TAG is undefined) + $(ERR) +endif + docker push ${REGISTRY}/${FULL_COMPONENT}:${TAG} + +docker-builder: + docker build -t vpa-autoscaling-builder ../../builder + +build-in-docker: clean docker-builder + docker run -v `pwd`/../..:/gopath/src/k8s.io/autoscaler/vertical-pod-autoscaler vpa-autoscaling-builder:latest bash -c 'cd /gopath/src/k8s.io/autoscaler/vertical-pod-autoscaler && make build-binary -C pkg/admission-controller' -release: build docker +release: build-in-docker docker-build docker-push + @echo "Full in-docker release ${FULL_COMPONENT}:${TAG} completed" clean: - rm -f admission-controller + rm -f ${COMPONENT} format: test -z "$$(find . -path ./vendor -prune -type f -o -name '*.go' -exec gofmt -s -d {} + | tee /dev/stderr)" || \ diff --git a/vertical-pod-autoscaler/pkg/admission-controller/config.go b/vertical-pod-autoscaler/pkg/admission-controller/config.go index a4fbaec272a4..38c8c0cec4b5 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/config.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/config.go @@ -110,8 +110,8 @@ func selfRegistration(clientset *kubernetes.Clientset, caCert []byte) { { Operations: []v1beta1.OperationType{v1beta1.Create, v1beta1.Update}, Rule: v1beta1.Rule{ - APIGroups: []string{"poc.autoscaling.k8s.io"}, - APIVersions: []string{"v1alpha1"}, + APIGroups: []string{"autoscaling.k8s.io"}, + APIVersions: []string{"v1beta1"}, Resources: []string{"verticalpodautoscalers"}, }, }}, diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go index e931cfa97cd2..33d3681afe05 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go @@ -22,8 +22,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" - vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" + vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1" vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" ) @@ -71,7 +71,7 @@ func getContainersResources(pod *v1.Pod, podRecommendation vpa_types.Recommended func (p *recommendationProvider) getMatchingVPA(pod *v1.Pod) *vpa_types.VerticalPodAutoscaler { configs, err := p.vpaLister.VerticalPodAutoscalers(pod.Namespace).List(labels.Everything()) if err != nil { - glog.Error("failed to get vpa configs: %v", err) + glog.Errorf("failed to get vpa configs: %v", err) return nil } onConfigs := make([]*vpa_types.VerticalPodAutoscaler, 0) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go index cb724ef7f06d..2a0fd0148f1b 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go index e17cf9b4bf97..a71e532ad4b6 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go @@ -28,7 +28,7 @@ import ( "k8s.io/api/admission/v1beta1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" metrics_admission "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics/admission" vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" ) @@ -125,11 +125,73 @@ func (s *AdmissionServer) getPatchesForPodResourceRequest(raw []byte, namespace return patches, nil } -func getPatchesForVPADefaults(raw []byte) ([]patchRecord, error) { +func parseVPA(raw []byte) (*vpa_types.VerticalPodAutoscaler, error) { vpa := vpa_types.VerticalPodAutoscaler{} if err := json.Unmarshal(raw, &vpa); err != nil { return nil, err } + return &vpa, nil +} + +var ( + possibleUpdateModes = map[vpa_types.UpdateMode]interface{}{ + vpa_types.UpdateModeOff: struct{}{}, + vpa_types.UpdateModeInitial: struct{}{}, + vpa_types.UpdateModeRecreate: struct{}{}, + vpa_types.UpdateModeAuto: struct{}{}, + } + + possibleScalingModes = map[vpa_types.ContainerScalingMode]interface{}{ + vpa_types.ContainerScalingModeAuto: struct{}{}, + vpa_types.ContainerScalingModeOff: struct{}{}, + } +) + +func validateVPA(vpa *vpa_types.VerticalPodAutoscaler) error { + if vpa.Spec.UpdatePolicy != nil { + mode := vpa.Spec.UpdatePolicy.UpdateMode + if mode == nil { + return fmt.Errorf("UpdateMode is required if UpdatePolicy is used") + } + if _, found := possibleUpdateModes[*mode]; !found { + return fmt.Errorf("Unexpected UpdateMode value %s", *mode) + } + } + + if vpa.Spec.ResourcePolicy != nil { + for _, policy := range vpa.Spec.ResourcePolicy.ContainerPolicies { + if policy.ContainerName == "" { + return fmt.Errorf("ContainerPolicies.ContainerName is required") + } + mode := policy.Mode + if mode != nil { + if _, found := possibleScalingModes[*mode]; !found { + return fmt.Errorf("Unexpected Mode value %s", *mode) + } + } + for resource, min := range policy.MinAllowed { + max, found := policy.MaxAllowed[resource] + if found && max.Cmp(min) < 0 { + return fmt.Errorf("Max resource for %v is lower than min", resource) + } + } + } + } + + return nil +} + +func getPatchesForVPADefaults(raw []byte) ([]patchRecord, error) { + vpa, err := parseVPA(raw) + if err != nil { + return nil, err + } + + err = validateVPA(vpa) + if err != nil { + return nil, err + } + glog.V(4).Infof("Processing vpa: %v", vpa) patches := []patchRecord{} if vpa.Spec.UpdatePolicy == nil { @@ -144,15 +206,19 @@ func getPatchesForVPADefaults(raw []byte) ([]patchRecord, error) { } func (s *AdmissionServer) admit(data []byte) (*v1beta1.AdmissionResponse, metrics_admission.AdmissionStatus, metrics_admission.AdmissionResource) { + // we don't block the admission by default, even on unparsable JSON + response := v1beta1.AdmissionResponse{} + response.Allowed = true + ar := v1beta1.AdmissionReview{} if err := json.Unmarshal(data, &ar); err != nil { glog.Error(err) - return nil, metrics_admission.Error, metrics_admission.Unknown + return &response, metrics_admission.Error, metrics_admission.Unknown } // The externalAdmissionHookConfiguration registered via selfRegistration // asks the kube-apiserver only to send admission requests regarding pods & VPA objects. podResource := metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} - vpaResource := metav1.GroupVersionResource{Group: "poc.autoscaling.k8s.io", Version: "v1alpha1", Resource: "verticalpodautoscalers"} + vpaResource := metav1.GroupVersionResource{Group: "autoscaling.k8s.io", Version: "v1beta1", Resource: "verticalpodautoscalers"} var patches []patchRecord var err error resource := metrics_admission.Unknown @@ -164,21 +230,28 @@ func (s *AdmissionServer) admit(data []byte) (*v1beta1.AdmissionResponse, metric case vpaResource: patches, err = getPatchesForVPADefaults(ar.Request.Object.Raw) resource = metrics_admission.Vpa + // we don't let in problematic VPA objects - late validation + if err != nil { + status := metav1.Status{} + status.Status = "Failure" + status.Message = err.Error() + response.Result = &status + response.Allowed = false + } default: patches, err = nil, fmt.Errorf("expected the resource to be %v or %v", podResource, vpaResource) } if err != nil { glog.Error(err) - return nil, metrics_admission.Error, resource + return &response, metrics_admission.Error, resource } - response := v1beta1.AdmissionResponse{} - response.Allowed = true + if len(patches) > 0 { patch, err := json.Marshal(patches) if err != nil { glog.Errorf("Cannot marshal the patch %v: %v", patches, err) - return nil, metrics_admission.Error, resource + return &response, metrics_admission.Error, resource } patchType := v1beta1.PatchTypeJSONPatch response.PatchType = &patchType diff --git a/vertical-pod-autoscaler/pkg/admission-controller/main.go b/vertical-pod-autoscaler/pkg/admission-controller/main.go index 6689f18a60bc..51b33074d1b3 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/main.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/main.go @@ -19,13 +19,14 @@ package main import ( "flag" "net/http" + "time" "github.com/golang/glog" kube_flag "k8s.io/apiserver/pkg/util/flag" "k8s.io/autoscaler/vertical-pod-autoscaler/common" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/logic" vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" - vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1" + vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics" metrics_admission "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics/admission" vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" @@ -50,7 +51,8 @@ func main() { kube_flag.InitFlags() glog.V(1).Infof("Vertical Pod Autoscaler %s Admission Controller", common.VerticalPodAutoscalerVersion) - metrics.Initialize(*address) + healthCheck := metrics.NewHealthCheck(time.Minute, false) + metrics.Initialize(*address, healthCheck) metrics_admission.Register() certs := initCerts(*certsDir) @@ -59,6 +61,7 @@ func main() { as := logic.NewAdmissionServer(logic.NewRecommendationProvider(vpaLister, vpa_api_util.NewCappingRecommendationProcessor()), logic.NewDefaultPodPreProcessor()) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { as.Serve(w, r) + healthCheck.UpdateLastActivity() }) clientset := getClient() server := &http.Server{ diff --git a/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/doc.go b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/doc.go new file mode 100644 index 000000000000..2803d49a5cb9 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register + +// Package v1beta1 contains definitions of Vertical Pod Autoscaler related objects. +// +groupName=autoscaling.k8s.io +package v1beta1 diff --git a/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/register.go b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/register.go new file mode 100644 index 000000000000..e160ea393cf0 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: "autoscaling.k8s.io", Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &VerticalPodAutoscaler{}, + &VerticalPodAutoscalerList{}, + &VerticalPodAutoscalerCheckpoint{}, + &VerticalPodAutoscalerCheckpointList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/types.go b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/types.go new file mode 100644 index 000000000000..6c32ca33702a --- /dev/null +++ b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/types.go @@ -0,0 +1,322 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains definitions of Vertical Pod Autoscaler related objects. +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VerticalPodAutoscalerList is a list of VerticalPodAutoscaler objects. +type VerticalPodAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of vertical pod autoscaler objects. + Items []VerticalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VerticalPodAutoscaler is the configuration for a vertical pod +// autoscaler, which automatically manages pod resources based on historical and +// real time resource utilization. +type VerticalPodAutoscaler struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the behavior of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + Spec VerticalPodAutoscalerSpec `json:"spec" protobuf:"bytes,2,name=spec"` + + // Current information about the autoscaler. + // +optional + Status VerticalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// VerticalPodAutoscalerSpec is the specification of the behavior of the autoscaler. +type VerticalPodAutoscalerSpec struct { + // A label query that determines the set of pods controlled by the Autoscaler. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,name=selector"` + + // Describes the rules on how changes are applied to the pods. + // If not specified, all fields in the `PodUpdatePolicy` are set to their + // default values. + // +optional + UpdatePolicy *PodUpdatePolicy `json:"updatePolicy,omitempty" protobuf:"bytes,2,opt,name=updatePolicy"` + + // Controls how the autoscaler computes recommended resources. + // The resource policy may be used to set constraints on the recommendations + // for individual containers. If not specified, the autoscaler computes recommended + // resources for all containers in the pod, without additional constraints. + // +optional + ResourcePolicy *PodResourcePolicy `json:"resourcePolicy,omitempty" protobuf:"bytes,3,opt,name=resourcePolicy"` +} + +// PodUpdatePolicy describes the rules on how changes are applied to the pods. +type PodUpdatePolicy struct { + // Controls when autoscaler applies changes to the pod resources. + // The default is 'Auto'. + // +optional + UpdateMode *UpdateMode `json:"updateMode,omitempty" protobuf:"bytes,1,opt,name=updateMode"` +} + +// UpdateMode controls when autoscaler applies changes to the pod resoures. +type UpdateMode string + +const ( + // UpdateModeOff means that autoscaler never changes Pod resources. + // The recommender still sets the recommended resources in the + // VerticalPodAutoscaler object. This can be used for a "dry run". + UpdateModeOff UpdateMode = "Off" + // UpdateModeInitial means that autoscaler only assigns resources on pod + // creation and does not change them during the lifetime of the pod. + UpdateModeInitial UpdateMode = "Initial" + // UpdateModeRecreate means that autoscaler assigns resources on pod + // creation and additionally can update them during the lifetime of the + // pod by deleting and recreating the pod. + UpdateModeRecreate UpdateMode = "Recreate" + // UpdateModeAuto means that autoscaler assigns resources on pod creation + // and additionally can update them during the lifetime of the pod, + // using any available update method. Currently this is equivalent to + // Recreate, which is the only available update method. + UpdateModeAuto UpdateMode = "Auto" +) + +// PodResourcePolicy controls how autoscaler computes the recommended resources +// for containers belonging to the pod. There can be at most one entry for every +// named container and optionally a single wildcard entry with `containerName` = '*', +// which handles all containers that don't have individual policies. +type PodResourcePolicy struct { + // Per-container resource policies. + // +optional + // +patchMergeKey=containerName + // +patchStrategy=merge + ContainerPolicies []ContainerResourcePolicy `json:"containerPolicies,omitempty" patchStrategy:"merge" patchMergeKey:"containerName" protobuf:"bytes,1,rep,name=containerPolicies"` +} + +// ContainerResourcePolicy controls how autoscaler computes the recommended +// resources for a specific container. +type ContainerResourcePolicy struct { + // Name of the container or DefaultContainerResourcePolicy, in which + // case the policy is used by the containers that don't have their own + // policy specified. + ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` + // Whether autoscaler is enabled for the container. The default is "Auto". + // +optional + Mode *ContainerScalingMode `json:"mode,omitempty" protobuf:"bytes,2,opt,name=mode"` + // Specifies the minimal amount of resources that will be recommended + // for the container. The default is no minimum. + // +optional + MinAllowed v1.ResourceList `json:"minAllowed,omitempty" protobuf:"bytes,3,rep,name=minAllowed,casttype=ResourceList,castkey=ResourceName"` + // Specifies the maximum amount of resources that will be recommended + // for the container. The default is no maximum. + // +optional + MaxAllowed v1.ResourceList `json:"maxAllowed,omitempty" protobuf:"bytes,4,rep,name=maxAllowed,casttype=ResourceList,castkey=ResourceName"` +} + +const ( + // DefaultContainerResourcePolicy can be passed as + // ContainerResourcePolicy.ContainerName to specify the default policy. + DefaultContainerResourcePolicy = "*" +) + +// ContainerScalingMode controls whether autoscaler is enabled for a specific +// container. +type ContainerScalingMode string + +const ( + // ContainerScalingModeAuto means autoscaling is enabled for a container. + ContainerScalingModeAuto ContainerScalingMode = "Auto" + // ContainerScalingModeOff means autoscaling is disabled for a container. + ContainerScalingModeOff ContainerScalingMode = "Off" +) + +// VerticalPodAutoscalerStatus describes the runtime state of the autoscaler. +type VerticalPodAutoscalerStatus struct { + // The most recently computed amount of resources recommended by the + // autoscaler for the controlled pods. + // +optional + Recommendation *RecommendedPodResources `json:"recommendation,omitempty" protobuf:"bytes,1,opt,name=recommendation"` + + // Conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []VerticalPodAutoscalerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` +} + +// RecommendedPodResources is the recommendation of resources computed by +// autoscaler. It contains a recommendation for each container in the pod +// (except for those with `ContainerScalingMode` set to 'Off'). +type RecommendedPodResources struct { + // Resources recommended by the autoscaler for each container. + // +optional + ContainerRecommendations []RecommendedContainerResources `json:"containerRecommendations,omitempty" protobuf:"bytes,1,rep,name=containerRecommendations"` +} + +// RecommendedContainerResources is the recommendation of resources computed by +// autoscaler for a specific container. Respects the container resource policy +// if present in the spec. In particular the recommendation is not produced for +// containers with `ContainerScalingMode` set to 'Off'. +type RecommendedContainerResources struct { + // Name of the container. + ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` + // Recommended amount of resources. Observes ContainerResourcePolicy. + Target v1.ResourceList `json:"target" protobuf:"bytes,2,rep,name=target,casttype=ResourceList,castkey=ResourceName"` + // Minimum recommended amount of resources. Observes ContainerResourcePolicy. + // This amount is not guaranteed to be sufficient for the application to operate in a stable way, however + // running with less resources is likely to have significant impact on performance/availability. + // +optional + LowerBound v1.ResourceList `json:"lowerBound,omitempty" protobuf:"bytes,3,rep,name=lowerBound,casttype=ResourceList,castkey=ResourceName"` + // Maximum recommended amount of resources. Observes ContainerResourcePolicy. + // Any resources allocated beyond this value are likely wasted. This value may be larger than the maximum + // amount of application is actually capable of consuming. + // +optional + UpperBound v1.ResourceList `json:"upperBound,omitempty" protobuf:"bytes,4,rep,name=upperBound,casttype=ResourceList,castkey=ResourceName"` + // The most recent recommended resources target computed by the autoscaler + // for the controlled pods, based only on actual resource usage, not taking + // into account the ContainerResourcePolicy. + // May differ from the Recommendation if the actual resource usage causes + // the target to violate the ContainerResourcePolicy (lower than MinAllowed + // or higher that MaxAllowed). + // Used only as status indication, will not affect actual resource assignment. + // +optional + UncappedTarget v1.ResourceList `json:"uncappedTarget,omitempty" protobuf:"bytes,5,opt,name=uncappedTarget"` +} + +// VerticalPodAutoscalerConditionType are the valid conditions of +// a VerticalPodAutoscaler. +type VerticalPodAutoscalerConditionType string + +var ( + // RecommendationProvided indicates whether the VPA recommender was able to calculate a recommendation. + RecommendationProvided VerticalPodAutoscalerConditionType = "RecommendationProvided" + // LowConfidence indicates whether the VPA recommender has low confidence in the recommendation for + // some of containers. + LowConfidence VerticalPodAutoscalerConditionType = "LowConfidence" + // NoPodsMatched indicates that label selector used with VPA object didn't match any pods. + NoPodsMatched VerticalPodAutoscalerConditionType = "NoPodsMatched" + // FetchingHistory indicates that VPA recommender is in the process of loading additional history samples. + FetchingHistory VerticalPodAutoscalerConditionType = "FetchingHistory" +) + +// VerticalPodAutoscalerCondition describes the state of +// a VerticalPodAutoscaler at a certain point. +type VerticalPodAutoscalerCondition struct { + // type describes the current condition + Type VerticalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about + // the transition + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VerticalPodAutoscalerCheckpoint is the checkpoint of the internal state of VPA that +// is used for recovery after recommender's restart. +type VerticalPodAutoscalerCheckpoint struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the checkpoint. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // +optional + Spec VerticalPodAutoscalerCheckpointSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Data of the checkpoint. + // +optional + Status VerticalPodAutoscalerCheckpointStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VerticalPodAutoscalerCheckpointList is a list of VerticalPodAutoscalerCheckpoint objects. +type VerticalPodAutoscalerCheckpointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []VerticalPodAutoscalerCheckpoint `json:"items"` +} + +// VerticalPodAutoscalerCheckpointSpec is the specification of the checkpoint object. +type VerticalPodAutoscalerCheckpointSpec struct { + // Name of the VPA object that stored VerticalPodAutoscalerCheckpoint object. + VPAObjectName string `json:"vpaObjectName,omitempty" protobuf:"bytes,1,opt,name=vpaObjectName"` + + // Name of the checkpointed container. + ContainerName string `json:"containerName,omitempty" protobuf:"bytes,2,opt,name=containerName"` +} + +// VerticalPodAutoscalerCheckpointStatus contains data of the checkpoint. +type VerticalPodAutoscalerCheckpointStatus struct { + // The time when the status was last refreshed. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,1,opt,name=lastUpdateTime"` + + // Version of the format of the stored data. + Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"` + + // Checkpoint of histogram for consumption of CPU. + CPUHistogram HistogramCheckpoint `json:"cpuHistogram,omitempty" protobuf:"bytes,3,rep,name=cpuHistograms"` + + // Checkpoint of histogram for consumption of memory. + MemoryHistogram HistogramCheckpoint `json:"memoryHistogram,omitempty" protobuf:"bytes,4,rep,name=memoryHistogram"` + + // Timestamp of the fist sample from the histograms. + FirstSampleStart metav1.Time `json:"firstSampleStart,omitempty" protobuf:"bytes,5,opt,name=firstSampleStart"` + + // Timestamp of the last sample from the histograms. + LastSampleStart metav1.Time `json:"lastSampleStart,omitempty" protobuf:"bytes,6,opt,name=lastSampleStart"` + + // Total number of samples in the histograms. + TotalSamplesCount int `json:"totalSamplesCount,omitempty" protobuf:"bytes,7,opt,name=totalSamplesCount"` +} + +// HistogramCheckpoint contains data needed to reconstruct the histogram. +type HistogramCheckpoint struct { + // Reference timestamp for samples collected within this histogram. + ReferenceTimestamp metav1.Time `json:"referenceTimestamp,omitempty" protobuf:"bytes,1,opt,name=referenceTimestamp"` + + // Map from bucket index to bucket weight. + BucketWeights map[int]uint32 `json:"bucketWeights,omitempty" protobuf:"bytes,2,opt,name=bucketWeights"` + + // Sum of samples to be used as denominator for weights from BucketWeights. + TotalWeight float64 `json:"totalWeight,omitempty" protobuf:"bytes,3,opt,name=totalWeight"` +} diff --git a/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/zz_generated.deepcopy.go b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..ca309767e9da --- /dev/null +++ b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,456 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResourcePolicy) DeepCopyInto(out *ContainerResourcePolicy) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + if *in == nil { + *out = nil + } else { + *out = new(ContainerScalingMode) + **out = **in + } + } + if in.MinAllowed != nil { + in, out := &in.MinAllowed, &out.MinAllowed + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxAllowed != nil { + in, out := &in.MaxAllowed, &out.MaxAllowed + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResourcePolicy. +func (in *ContainerResourcePolicy) DeepCopy() *ContainerResourcePolicy { + if in == nil { + return nil + } + out := new(ContainerResourcePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HistogramCheckpoint) DeepCopyInto(out *HistogramCheckpoint) { + *out = *in + in.ReferenceTimestamp.DeepCopyInto(&out.ReferenceTimestamp) + if in.BucketWeights != nil { + in, out := &in.BucketWeights, &out.BucketWeights + *out = make(map[int]uint32, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HistogramCheckpoint. +func (in *HistogramCheckpoint) DeepCopy() *HistogramCheckpoint { + if in == nil { + return nil + } + out := new(HistogramCheckpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodResourcePolicy) DeepCopyInto(out *PodResourcePolicy) { + *out = *in + if in.ContainerPolicies != nil { + in, out := &in.ContainerPolicies, &out.ContainerPolicies + *out = make([]ContainerResourcePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourcePolicy. +func (in *PodResourcePolicy) DeepCopy() *PodResourcePolicy { + if in == nil { + return nil + } + out := new(PodResourcePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodUpdatePolicy) DeepCopyInto(out *PodUpdatePolicy) { + *out = *in + if in.UpdateMode != nil { + in, out := &in.UpdateMode, &out.UpdateMode + if *in == nil { + *out = nil + } else { + *out = new(UpdateMode) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodUpdatePolicy. +func (in *PodUpdatePolicy) DeepCopy() *PodUpdatePolicy { + if in == nil { + return nil + } + out := new(PodUpdatePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecommendedContainerResources) DeepCopyInto(out *RecommendedContainerResources) { + *out = *in + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.LowerBound != nil { + in, out := &in.LowerBound, &out.LowerBound + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.UpperBound != nil { + in, out := &in.UpperBound, &out.UpperBound + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.UncappedTarget != nil { + in, out := &in.UncappedTarget, &out.UncappedTarget + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendedContainerResources. +func (in *RecommendedContainerResources) DeepCopy() *RecommendedContainerResources { + if in == nil { + return nil + } + out := new(RecommendedContainerResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecommendedPodResources) DeepCopyInto(out *RecommendedPodResources) { + *out = *in + if in.ContainerRecommendations != nil { + in, out := &in.ContainerRecommendations, &out.ContainerRecommendations + *out = make([]RecommendedContainerResources, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendedPodResources. +func (in *RecommendedPodResources) DeepCopy() *RecommendedPodResources { + if in == nil { + return nil + } + out := new(RecommendedPodResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscaler) DeepCopyInto(out *VerticalPodAutoscaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscaler. +func (in *VerticalPodAutoscaler) DeepCopy() *VerticalPodAutoscaler { + if in == nil { + return nil + } + out := new(VerticalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VerticalPodAutoscaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerCheckpoint) DeepCopyInto(out *VerticalPodAutoscalerCheckpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerCheckpoint. +func (in *VerticalPodAutoscalerCheckpoint) DeepCopy() *VerticalPodAutoscalerCheckpoint { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerCheckpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VerticalPodAutoscalerCheckpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerCheckpointList) DeepCopyInto(out *VerticalPodAutoscalerCheckpointList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VerticalPodAutoscalerCheckpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerCheckpointList. +func (in *VerticalPodAutoscalerCheckpointList) DeepCopy() *VerticalPodAutoscalerCheckpointList { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerCheckpointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VerticalPodAutoscalerCheckpointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerCheckpointSpec) DeepCopyInto(out *VerticalPodAutoscalerCheckpointSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerCheckpointSpec. +func (in *VerticalPodAutoscalerCheckpointSpec) DeepCopy() *VerticalPodAutoscalerCheckpointSpec { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerCheckpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerCheckpointStatus) DeepCopyInto(out *VerticalPodAutoscalerCheckpointStatus) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.CPUHistogram.DeepCopyInto(&out.CPUHistogram) + in.MemoryHistogram.DeepCopyInto(&out.MemoryHistogram) + in.FirstSampleStart.DeepCopyInto(&out.FirstSampleStart) + in.LastSampleStart.DeepCopyInto(&out.LastSampleStart) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerCheckpointStatus. +func (in *VerticalPodAutoscalerCheckpointStatus) DeepCopy() *VerticalPodAutoscalerCheckpointStatus { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerCheckpointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerCondition) DeepCopyInto(out *VerticalPodAutoscalerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerCondition. +func (in *VerticalPodAutoscalerCondition) DeepCopy() *VerticalPodAutoscalerCondition { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerList) DeepCopyInto(out *VerticalPodAutoscalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VerticalPodAutoscaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerList. +func (in *VerticalPodAutoscalerList) DeepCopy() *VerticalPodAutoscalerList { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VerticalPodAutoscalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerSpec) DeepCopyInto(out *VerticalPodAutoscalerSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.UpdatePolicy != nil { + in, out := &in.UpdatePolicy, &out.UpdatePolicy + if *in == nil { + *out = nil + } else { + *out = new(PodUpdatePolicy) + (*in).DeepCopyInto(*out) + } + } + if in.ResourcePolicy != nil { + in, out := &in.ResourcePolicy, &out.ResourcePolicy + if *in == nil { + *out = nil + } else { + *out = new(PodResourcePolicy) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerSpec. +func (in *VerticalPodAutoscalerSpec) DeepCopy() *VerticalPodAutoscalerSpec { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscalerStatus) DeepCopyInto(out *VerticalPodAutoscalerStatus) { + *out = *in + if in.Recommendation != nil { + in, out := &in.Recommendation, &out.Recommendation + if *in == nil { + *out = nil + } else { + *out = new(RecommendedPodResources) + (*in).DeepCopyInto(*out) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]VerticalPodAutoscalerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscalerStatus. +func (in *VerticalPodAutoscalerStatus) DeepCopy() *VerticalPodAutoscalerStatus { + if in == nil { + return nil + } + out := new(VerticalPodAutoscalerStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1/zz_generated.deepcopy.go b/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1/zz_generated.deepcopy.go index a88addf6a32a..ca7dc04b6cc5 100644 --- a/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1/zz_generated.deepcopy.go +++ b/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by deepcopy-gen. Do not edit it manually! +// Code generated by deepcopy-gen. DO NOT EDIT. package v1alpha1 @@ -221,9 +221,8 @@ func (in *VerticalPodAutoscaler) DeepCopy() *VerticalPodAutoscaler { func (in *VerticalPodAutoscaler) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c - } else { - return nil } + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -250,9 +249,8 @@ func (in *VerticalPodAutoscalerCheckpoint) DeepCopy() *VerticalPodAutoscalerChec func (in *VerticalPodAutoscalerCheckpoint) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c - } else { - return nil } + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -284,9 +282,8 @@ func (in *VerticalPodAutoscalerCheckpointList) DeepCopy() *VerticalPodAutoscaler func (in *VerticalPodAutoscalerCheckpointList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c - } else { - return nil } + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -372,9 +369,8 @@ func (in *VerticalPodAutoscalerList) DeepCopy() *VerticalPodAutoscalerList { func (in *VerticalPodAutoscalerList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c - } else { - return nil } + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/clientset.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/clientset.go index 52fe583bb578..6f390f8027b1 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/clientset.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,10 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package versioned import ( - glog "github.com/golang/glog" + autoscalingv1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1" pocv1alpha1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" @@ -26,6 +28,9 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface + AutoscalingV1beta1() autoscalingv1beta1.AutoscalingV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Autoscaling() autoscalingv1beta1.AutoscalingV1beta1Interface PocV1alpha1() pocv1alpha1.PocV1alpha1Interface // Deprecated: please explicitly pick a version if possible. Poc() pocv1alpha1.PocV1alpha1Interface @@ -35,7 +40,19 @@ type Interface interface { // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient - pocV1alpha1 *pocv1alpha1.PocV1alpha1Client + autoscalingV1beta1 *autoscalingv1beta1.AutoscalingV1beta1Client + pocV1alpha1 *pocv1alpha1.PocV1alpha1Client +} + +// AutoscalingV1beta1 retrieves the AutoscalingV1beta1Client +func (c *Clientset) AutoscalingV1beta1() autoscalingv1beta1.AutoscalingV1beta1Interface { + return c.autoscalingV1beta1 +} + +// Deprecated: Autoscaling retrieves the default version of AutoscalingClient. +// Please explicitly pick a version. +func (c *Clientset) Autoscaling() autoscalingv1beta1.AutoscalingV1beta1Interface { + return c.autoscalingV1beta1 } // PocV1alpha1 retrieves the PocV1alpha1Client @@ -65,6 +82,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { } var cs Clientset var err error + cs.autoscalingV1beta1, err = autoscalingv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.pocV1alpha1, err = pocv1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -72,7 +93,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { - glog.Errorf("failed to create the DiscoveryClient: %v", err) return nil, err } return &cs, nil @@ -82,6 +102,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset + cs.autoscalingV1beta1 = autoscalingv1beta1.NewForConfigOrDie(c) cs.pocV1alpha1 = pocv1alpha1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) @@ -91,6 +112,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset + cs.autoscalingV1beta1 = autoscalingv1beta1.New(c) cs.pocV1alpha1 = pocv1alpha1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/doc.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/doc.go index 9c6dfafa7325..41721ca52d44 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/doc.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + // This package has the automatically generated clientset. package versioned diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/fake/clientset_generated.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/fake/clientset_generated.go index 6b7b131ab05a..f87602943ed7 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" + autoscalingv1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1" + fakeautoscalingv1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/fake" pocv1alpha1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1" fakepocv1alpha1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake" "k8s.io/client-go/discovery" @@ -39,11 +43,20 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { } } - fakePtr := testing.Fake{} - fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) - fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) + cs := &Clientset{} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) - return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}} + return cs } // Clientset implements clientset.Interface. Meant to be embedded into a @@ -60,6 +73,16 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { var _ clientset.Interface = &Clientset{} +// AutoscalingV1beta1 retrieves the AutoscalingV1beta1Client +func (c *Clientset) AutoscalingV1beta1() autoscalingv1beta1.AutoscalingV1beta1Interface { + return &fakeautoscalingv1beta1.FakeAutoscalingV1beta1{Fake: &c.Fake} +} + +// Autoscaling retrieves the AutoscalingV1beta1Client +func (c *Clientset) Autoscaling() autoscalingv1beta1.AutoscalingV1beta1Interface { + return &fakeautoscalingv1beta1.FakeAutoscalingV1beta1{Fake: &c.Fake} +} + // PocV1alpha1 retrieves the PocV1alpha1Client func (c *Clientset) PocV1alpha1() pocv1alpha1.PocV1alpha1Interface { return &fakepocv1alpha1.FakePocV1alpha1{Fake: &c.Fake} diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/fake/doc.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/fake/doc.go index 8a3101e39818..9b99e7167091 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/fake/doc.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + // This package has the automatically generated fake clientset. package fake diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/fake/register.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/fake/register.go index f13074b299c2..a7521ce94254 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/fake/register.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( @@ -21,6 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" + autoscalingv1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" pocv1alpha1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" ) @@ -38,7 +41,7 @@ func init() { // // import ( // "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kuberentes/scheme" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" // aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" // ) // @@ -48,6 +51,6 @@ func init() { // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. func AddToScheme(scheme *runtime.Scheme) { + autoscalingv1beta1.AddToScheme(scheme) pocv1alpha1.AddToScheme(scheme) - } diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/scheme/doc.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/scheme/doc.go index 3d3ab5f4edfa..7dc3756168fa 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/scheme/doc.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + // This package contains the scheme of the automatically generated clientset. package scheme diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/scheme/register.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/scheme/register.go index dd10bc31c0ef..6baa40ca3779 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/scheme/register.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package scheme import ( @@ -21,6 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" + autoscalingv1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" pocv1alpha1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" ) @@ -38,7 +41,7 @@ func init() { // // import ( // "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kuberentes/scheme" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" // aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" // ) // @@ -48,6 +51,6 @@ func init() { // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. func AddToScheme(scheme *runtime.Scheme) { + autoscalingv1beta1.AddToScheme(scheme) pocv1alpha1.AddToScheme(scheme) - } diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/autoscaling.k8s.io_client.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/autoscaling.k8s.io_client.go new file mode 100644 index 000000000000..c31705938617 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/autoscaling.k8s.io_client.go @@ -0,0 +1,95 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type AutoscalingV1beta1Interface interface { + RESTClient() rest.Interface + VerticalPodAutoscalersGetter + VerticalPodAutoscalerCheckpointsGetter +} + +// AutoscalingV1beta1Client is used to interact with features provided by the autoscaling.k8s.io group. +type AutoscalingV1beta1Client struct { + restClient rest.Interface +} + +func (c *AutoscalingV1beta1Client) VerticalPodAutoscalers(namespace string) VerticalPodAutoscalerInterface { + return newVerticalPodAutoscalers(c, namespace) +} + +func (c *AutoscalingV1beta1Client) VerticalPodAutoscalerCheckpoints(namespace string) VerticalPodAutoscalerCheckpointInterface { + return newVerticalPodAutoscalerCheckpoints(c, namespace) +} + +// NewForConfig creates a new AutoscalingV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AutoscalingV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AutoscalingV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AutoscalingV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AutoscalingV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AutoscalingV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AutoscalingV1beta1Client { + return &AutoscalingV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AutoscalingV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/doc.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/doc.go new file mode 100644 index 000000000000..771101956f36 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/fake/doc.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/fake/doc.go new file mode 100644 index 000000000000..16f44399065e --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/fake/fake_autoscaling.k8s.io_client.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/fake/fake_autoscaling.k8s.io_client.go new file mode 100644 index 000000000000..5df35155eeef --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/fake/fake_autoscaling.k8s.io_client.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAutoscalingV1beta1 struct { + *testing.Fake +} + +func (c *FakeAutoscalingV1beta1) VerticalPodAutoscalers(namespace string) v1beta1.VerticalPodAutoscalerInterface { + return &FakeVerticalPodAutoscalers{c, namespace} +} + +func (c *FakeAutoscalingV1beta1) VerticalPodAutoscalerCheckpoints(namespace string) v1beta1.VerticalPodAutoscalerCheckpointInterface { + return &FakeVerticalPodAutoscalerCheckpoints{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAutoscalingV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/fake/fake_verticalpodautoscaler.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/fake/fake_verticalpodautoscaler.go new file mode 100644 index 000000000000..fb350f826c6e --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/fake/fake_verticalpodautoscaler.go @@ -0,0 +1,140 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeVerticalPodAutoscalers implements VerticalPodAutoscalerInterface +type FakeVerticalPodAutoscalers struct { + Fake *FakeAutoscalingV1beta1 + ns string +} + +var verticalpodautoscalersResource = schema.GroupVersionResource{Group: "autoscaling.k8s.io", Version: "v1beta1", Resource: "verticalpodautoscalers"} + +var verticalpodautoscalersKind = schema.GroupVersionKind{Group: "autoscaling.k8s.io", Version: "v1beta1", Kind: "VerticalPodAutoscaler"} + +// Get takes name of the verticalPodAutoscaler, and returns the corresponding verticalPodAutoscaler object, and an error if there is any. +func (c *FakeVerticalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v1beta1.VerticalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(verticalpodautoscalersResource, c.ns, name), &v1beta1.VerticalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.VerticalPodAutoscaler), err +} + +// List takes label and field selectors, and returns the list of VerticalPodAutoscalers that match those selectors. +func (c *FakeVerticalPodAutoscalers) List(opts v1.ListOptions) (result *v1beta1.VerticalPodAutoscalerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(verticalpodautoscalersResource, verticalpodautoscalersKind, c.ns, opts), &v1beta1.VerticalPodAutoscalerList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.VerticalPodAutoscalerList{} + for _, item := range obj.(*v1beta1.VerticalPodAutoscalerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested verticalPodAutoscalers. +func (c *FakeVerticalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(verticalpodautoscalersResource, c.ns, opts)) + +} + +// Create takes the representation of a verticalPodAutoscaler and creates it. Returns the server's representation of the verticalPodAutoscaler, and an error, if there is any. +func (c *FakeVerticalPodAutoscalers) Create(verticalPodAutoscaler *v1beta1.VerticalPodAutoscaler) (result *v1beta1.VerticalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(verticalpodautoscalersResource, c.ns, verticalPodAutoscaler), &v1beta1.VerticalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.VerticalPodAutoscaler), err +} + +// Update takes the representation of a verticalPodAutoscaler and updates it. Returns the server's representation of the verticalPodAutoscaler, and an error, if there is any. +func (c *FakeVerticalPodAutoscalers) Update(verticalPodAutoscaler *v1beta1.VerticalPodAutoscaler) (result *v1beta1.VerticalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(verticalpodautoscalersResource, c.ns, verticalPodAutoscaler), &v1beta1.VerticalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.VerticalPodAutoscaler), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeVerticalPodAutoscalers) UpdateStatus(verticalPodAutoscaler *v1beta1.VerticalPodAutoscaler) (*v1beta1.VerticalPodAutoscaler, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(verticalpodautoscalersResource, "status", c.ns, verticalPodAutoscaler), &v1beta1.VerticalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.VerticalPodAutoscaler), err +} + +// Delete takes name of the verticalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *FakeVerticalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(verticalpodautoscalersResource, c.ns, name), &v1beta1.VerticalPodAutoscaler{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVerticalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(verticalpodautoscalersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.VerticalPodAutoscalerList{}) + return err +} + +// Patch applies the patch and returns the patched verticalPodAutoscaler. +func (c *FakeVerticalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VerticalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(verticalpodautoscalersResource, c.ns, name, data, subresources...), &v1beta1.VerticalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.VerticalPodAutoscaler), err +} diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/fake/fake_verticalpodautoscalercheckpoint.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/fake/fake_verticalpodautoscalercheckpoint.go new file mode 100644 index 000000000000..cd6aceda688e --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/fake/fake_verticalpodautoscalercheckpoint.go @@ -0,0 +1,128 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeVerticalPodAutoscalerCheckpoints implements VerticalPodAutoscalerCheckpointInterface +type FakeVerticalPodAutoscalerCheckpoints struct { + Fake *FakeAutoscalingV1beta1 + ns string +} + +var verticalpodautoscalercheckpointsResource = schema.GroupVersionResource{Group: "autoscaling.k8s.io", Version: "v1beta1", Resource: "verticalpodautoscalercheckpoints"} + +var verticalpodautoscalercheckpointsKind = schema.GroupVersionKind{Group: "autoscaling.k8s.io", Version: "v1beta1", Kind: "VerticalPodAutoscalerCheckpoint"} + +// Get takes name of the verticalPodAutoscalerCheckpoint, and returns the corresponding verticalPodAutoscalerCheckpoint object, and an error if there is any. +func (c *FakeVerticalPodAutoscalerCheckpoints) Get(name string, options v1.GetOptions) (result *v1beta1.VerticalPodAutoscalerCheckpoint, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(verticalpodautoscalercheckpointsResource, c.ns, name), &v1beta1.VerticalPodAutoscalerCheckpoint{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.VerticalPodAutoscalerCheckpoint), err +} + +// List takes label and field selectors, and returns the list of VerticalPodAutoscalerCheckpoints that match those selectors. +func (c *FakeVerticalPodAutoscalerCheckpoints) List(opts v1.ListOptions) (result *v1beta1.VerticalPodAutoscalerCheckpointList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(verticalpodautoscalercheckpointsResource, verticalpodautoscalercheckpointsKind, c.ns, opts), &v1beta1.VerticalPodAutoscalerCheckpointList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.VerticalPodAutoscalerCheckpointList{} + for _, item := range obj.(*v1beta1.VerticalPodAutoscalerCheckpointList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested verticalPodAutoscalerCheckpoints. +func (c *FakeVerticalPodAutoscalerCheckpoints) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(verticalpodautoscalercheckpointsResource, c.ns, opts)) + +} + +// Create takes the representation of a verticalPodAutoscalerCheckpoint and creates it. Returns the server's representation of the verticalPodAutoscalerCheckpoint, and an error, if there is any. +func (c *FakeVerticalPodAutoscalerCheckpoints) Create(verticalPodAutoscalerCheckpoint *v1beta1.VerticalPodAutoscalerCheckpoint) (result *v1beta1.VerticalPodAutoscalerCheckpoint, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(verticalpodautoscalercheckpointsResource, c.ns, verticalPodAutoscalerCheckpoint), &v1beta1.VerticalPodAutoscalerCheckpoint{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.VerticalPodAutoscalerCheckpoint), err +} + +// Update takes the representation of a verticalPodAutoscalerCheckpoint and updates it. Returns the server's representation of the verticalPodAutoscalerCheckpoint, and an error, if there is any. +func (c *FakeVerticalPodAutoscalerCheckpoints) Update(verticalPodAutoscalerCheckpoint *v1beta1.VerticalPodAutoscalerCheckpoint) (result *v1beta1.VerticalPodAutoscalerCheckpoint, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(verticalpodautoscalercheckpointsResource, c.ns, verticalPodAutoscalerCheckpoint), &v1beta1.VerticalPodAutoscalerCheckpoint{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.VerticalPodAutoscalerCheckpoint), err +} + +// Delete takes name of the verticalPodAutoscalerCheckpoint and deletes it. Returns an error if one occurs. +func (c *FakeVerticalPodAutoscalerCheckpoints) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(verticalpodautoscalercheckpointsResource, c.ns, name), &v1beta1.VerticalPodAutoscalerCheckpoint{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVerticalPodAutoscalerCheckpoints) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(verticalpodautoscalercheckpointsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.VerticalPodAutoscalerCheckpointList{}) + return err +} + +// Patch applies the patch and returns the patched verticalPodAutoscalerCheckpoint. +func (c *FakeVerticalPodAutoscalerCheckpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VerticalPodAutoscalerCheckpoint, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(verticalpodautoscalercheckpointsResource, c.ns, name, data, subresources...), &v1beta1.VerticalPodAutoscalerCheckpoint{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.VerticalPodAutoscalerCheckpoint), err +} diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/generated_expansion.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/generated_expansion.go new file mode 100644 index 000000000000..78a312931d78 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type VerticalPodAutoscalerExpansion interface{} + +type VerticalPodAutoscalerCheckpointExpansion interface{} diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/verticalpodautoscaler.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/verticalpodautoscaler.go new file mode 100644 index 000000000000..bec1d48b6c34 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/verticalpodautoscaler.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" + scheme "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +// VerticalPodAutoscalersGetter has a method to return a VerticalPodAutoscalerInterface. +// A group's client should implement this interface. +type VerticalPodAutoscalersGetter interface { + VerticalPodAutoscalers(namespace string) VerticalPodAutoscalerInterface +} + +// VerticalPodAutoscalerInterface has methods to work with VerticalPodAutoscaler resources. +type VerticalPodAutoscalerInterface interface { + Create(*v1beta1.VerticalPodAutoscaler) (*v1beta1.VerticalPodAutoscaler, error) + Update(*v1beta1.VerticalPodAutoscaler) (*v1beta1.VerticalPodAutoscaler, error) + UpdateStatus(*v1beta1.VerticalPodAutoscaler) (*v1beta1.VerticalPodAutoscaler, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.VerticalPodAutoscaler, error) + List(opts v1.ListOptions) (*v1beta1.VerticalPodAutoscalerList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VerticalPodAutoscaler, err error) + VerticalPodAutoscalerExpansion +} + +// verticalPodAutoscalers implements VerticalPodAutoscalerInterface +type verticalPodAutoscalers struct { + client rest.Interface + ns string +} + +// newVerticalPodAutoscalers returns a VerticalPodAutoscalers +func newVerticalPodAutoscalers(c *AutoscalingV1beta1Client, namespace string) *verticalPodAutoscalers { + return &verticalPodAutoscalers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the verticalPodAutoscaler, and returns the corresponding verticalPodAutoscaler object, and an error if there is any. +func (c *verticalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v1beta1.VerticalPodAutoscaler, err error) { + result = &v1beta1.VerticalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("verticalpodautoscalers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VerticalPodAutoscalers that match those selectors. +func (c *verticalPodAutoscalers) List(opts v1.ListOptions) (result *v1beta1.VerticalPodAutoscalerList, err error) { + result = &v1beta1.VerticalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("verticalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested verticalPodAutoscalers. +func (c *verticalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("verticalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a verticalPodAutoscaler and creates it. Returns the server's representation of the verticalPodAutoscaler, and an error, if there is any. +func (c *verticalPodAutoscalers) Create(verticalPodAutoscaler *v1beta1.VerticalPodAutoscaler) (result *v1beta1.VerticalPodAutoscaler, err error) { + result = &v1beta1.VerticalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("verticalpodautoscalers"). + Body(verticalPodAutoscaler). + Do(). + Into(result) + return +} + +// Update takes the representation of a verticalPodAutoscaler and updates it. Returns the server's representation of the verticalPodAutoscaler, and an error, if there is any. +func (c *verticalPodAutoscalers) Update(verticalPodAutoscaler *v1beta1.VerticalPodAutoscaler) (result *v1beta1.VerticalPodAutoscaler, err error) { + result = &v1beta1.VerticalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("verticalpodautoscalers"). + Name(verticalPodAutoscaler.Name). + Body(verticalPodAutoscaler). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *verticalPodAutoscalers) UpdateStatus(verticalPodAutoscaler *v1beta1.VerticalPodAutoscaler) (result *v1beta1.VerticalPodAutoscaler, err error) { + result = &v1beta1.VerticalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("verticalpodautoscalers"). + Name(verticalPodAutoscaler.Name). + SubResource("status"). + Body(verticalPodAutoscaler). + Do(). + Into(result) + return +} + +// Delete takes name of the verticalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *verticalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("verticalpodautoscalers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *verticalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("verticalpodautoscalers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched verticalPodAutoscaler. +func (c *verticalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VerticalPodAutoscaler, err error) { + result = &v1beta1.VerticalPodAutoscaler{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("verticalpodautoscalers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/verticalpodautoscalercheckpoint.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/verticalpodautoscalercheckpoint.go new file mode 100644 index 000000000000..071302035a00 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1/verticalpodautoscalercheckpoint.go @@ -0,0 +1,157 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" + scheme "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +// VerticalPodAutoscalerCheckpointsGetter has a method to return a VerticalPodAutoscalerCheckpointInterface. +// A group's client should implement this interface. +type VerticalPodAutoscalerCheckpointsGetter interface { + VerticalPodAutoscalerCheckpoints(namespace string) VerticalPodAutoscalerCheckpointInterface +} + +// VerticalPodAutoscalerCheckpointInterface has methods to work with VerticalPodAutoscalerCheckpoint resources. +type VerticalPodAutoscalerCheckpointInterface interface { + Create(*v1beta1.VerticalPodAutoscalerCheckpoint) (*v1beta1.VerticalPodAutoscalerCheckpoint, error) + Update(*v1beta1.VerticalPodAutoscalerCheckpoint) (*v1beta1.VerticalPodAutoscalerCheckpoint, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.VerticalPodAutoscalerCheckpoint, error) + List(opts v1.ListOptions) (*v1beta1.VerticalPodAutoscalerCheckpointList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VerticalPodAutoscalerCheckpoint, err error) + VerticalPodAutoscalerCheckpointExpansion +} + +// verticalPodAutoscalerCheckpoints implements VerticalPodAutoscalerCheckpointInterface +type verticalPodAutoscalerCheckpoints struct { + client rest.Interface + ns string +} + +// newVerticalPodAutoscalerCheckpoints returns a VerticalPodAutoscalerCheckpoints +func newVerticalPodAutoscalerCheckpoints(c *AutoscalingV1beta1Client, namespace string) *verticalPodAutoscalerCheckpoints { + return &verticalPodAutoscalerCheckpoints{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the verticalPodAutoscalerCheckpoint, and returns the corresponding verticalPodAutoscalerCheckpoint object, and an error if there is any. +func (c *verticalPodAutoscalerCheckpoints) Get(name string, options v1.GetOptions) (result *v1beta1.VerticalPodAutoscalerCheckpoint, err error) { + result = &v1beta1.VerticalPodAutoscalerCheckpoint{} + err = c.client.Get(). + Namespace(c.ns). + Resource("verticalpodautoscalercheckpoints"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VerticalPodAutoscalerCheckpoints that match those selectors. +func (c *verticalPodAutoscalerCheckpoints) List(opts v1.ListOptions) (result *v1beta1.VerticalPodAutoscalerCheckpointList, err error) { + result = &v1beta1.VerticalPodAutoscalerCheckpointList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("verticalpodautoscalercheckpoints"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested verticalPodAutoscalerCheckpoints. +func (c *verticalPodAutoscalerCheckpoints) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("verticalpodautoscalercheckpoints"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a verticalPodAutoscalerCheckpoint and creates it. Returns the server's representation of the verticalPodAutoscalerCheckpoint, and an error, if there is any. +func (c *verticalPodAutoscalerCheckpoints) Create(verticalPodAutoscalerCheckpoint *v1beta1.VerticalPodAutoscalerCheckpoint) (result *v1beta1.VerticalPodAutoscalerCheckpoint, err error) { + result = &v1beta1.VerticalPodAutoscalerCheckpoint{} + err = c.client.Post(). + Namespace(c.ns). + Resource("verticalpodautoscalercheckpoints"). + Body(verticalPodAutoscalerCheckpoint). + Do(). + Into(result) + return +} + +// Update takes the representation of a verticalPodAutoscalerCheckpoint and updates it. Returns the server's representation of the verticalPodAutoscalerCheckpoint, and an error, if there is any. +func (c *verticalPodAutoscalerCheckpoints) Update(verticalPodAutoscalerCheckpoint *v1beta1.VerticalPodAutoscalerCheckpoint) (result *v1beta1.VerticalPodAutoscalerCheckpoint, err error) { + result = &v1beta1.VerticalPodAutoscalerCheckpoint{} + err = c.client.Put(). + Namespace(c.ns). + Resource("verticalpodautoscalercheckpoints"). + Name(verticalPodAutoscalerCheckpoint.Name). + Body(verticalPodAutoscalerCheckpoint). + Do(). + Into(result) + return +} + +// Delete takes name of the verticalPodAutoscalerCheckpoint and deletes it. Returns an error if one occurs. +func (c *verticalPodAutoscalerCheckpoints) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("verticalpodautoscalercheckpoints"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *verticalPodAutoscalerCheckpoints) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("verticalpodautoscalercheckpoints"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched verticalPodAutoscalerCheckpoint. +func (c *verticalPodAutoscalerCheckpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VerticalPodAutoscalerCheckpoint, err error) { + result = &v1beta1.VerticalPodAutoscalerCheckpoint{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("verticalpodautoscalercheckpoints"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/doc.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/doc.go index 08a9c7ceba43..df51baa4d4c1 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/doc.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + // This package has the automatically generated typed clients. package v1alpha1 diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/doc.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/doc.go index 63e2c8a08219..16f44399065e 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/doc.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + // Package fake has the automatically generated clients. package fake diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/fake_poc.autoscaling.k8s.io_client.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/fake_poc.autoscaling.k8s.io_client.go index 46e97683d69a..d2b4b81d38d4 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/fake_poc.autoscaling.k8s.io_client.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/fake_poc.autoscaling.k8s.io_client.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/fake_verticalpodautoscaler.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/fake_verticalpodautoscaler.go index f5d0eeac608c..d90400f0f723 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/fake_verticalpodautoscaler.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/fake_verticalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/fake_verticalpodautoscalercheckpoint.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/fake_verticalpodautoscalercheckpoint.go index 4bbd8e39f54a..02f9e9998970 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/fake_verticalpodautoscalercheckpoint.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/fake/fake_verticalpodautoscalercheckpoint.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package fake import ( diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/generated_expansion.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/generated_expansion.go index 27f6f1a94b50..0cb9837cfa0e 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/generated_expansion.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 type VerticalPodAutoscalerExpansion interface{} diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/poc.autoscaling.k8s.io_client.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/poc.autoscaling.k8s.io_client.go index 6e43e45b2258..7d842c35c588 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/poc.autoscaling.k8s.io_client.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/poc.autoscaling.k8s.io_client.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscaler.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscaler.go index 007f07eeae24..6dae04549916 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscaler.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( diff --git a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscalercheckpoint.go b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscalercheckpoint.go index 5b953d4caf45..39a9a0def31f 100644 --- a/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscalercheckpoint.go +++ b/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscalercheckpoint.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. + package v1alpha1 import ( diff --git a/vertical-pod-autoscaler/pkg/client/informers/externalversions/autoscaling.k8s.io/interface.go b/vertical-pod-autoscaler/pkg/client/informers/externalversions/autoscaling.k8s.io/interface.go new file mode 100644 index 000000000000..5b7881188b33 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/informers/externalversions/autoscaling.k8s.io/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package autoscaling + +import ( + v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/informers/externalversions/autoscaling.k8s.io/v1beta1" + internalinterfaces "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vertical-pod-autoscaler/pkg/client/informers/externalversions/autoscaling.k8s.io/v1beta1/interface.go b/vertical-pod-autoscaler/pkg/client/informers/externalversions/autoscaling.k8s.io/v1beta1/interface.go new file mode 100644 index 000000000000..0588575d64f9 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/informers/externalversions/autoscaling.k8s.io/v1beta1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + internalinterfaces "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // VerticalPodAutoscalers returns a VerticalPodAutoscalerInformer. + VerticalPodAutoscalers() VerticalPodAutoscalerInformer + // VerticalPodAutoscalerCheckpoints returns a VerticalPodAutoscalerCheckpointInformer. + VerticalPodAutoscalerCheckpoints() VerticalPodAutoscalerCheckpointInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// VerticalPodAutoscalers returns a VerticalPodAutoscalerInformer. +func (v *version) VerticalPodAutoscalers() VerticalPodAutoscalerInformer { + return &verticalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// VerticalPodAutoscalerCheckpoints returns a VerticalPodAutoscalerCheckpointInformer. +func (v *version) VerticalPodAutoscalerCheckpoints() VerticalPodAutoscalerCheckpointInformer { + return &verticalPodAutoscalerCheckpointInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vertical-pod-autoscaler/pkg/client/informers/externalversions/autoscaling.k8s.io/v1beta1/verticalpodautoscaler.go b/vertical-pod-autoscaler/pkg/client/informers/externalversions/autoscaling.k8s.io/v1beta1/verticalpodautoscaler.go new file mode 100644 index 000000000000..f9a48f381463 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/informers/externalversions/autoscaling.k8s.io/v1beta1/verticalpodautoscaler.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + autoscaling_k8s_io_v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" + versioned "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" + internalinterfaces "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/informers/externalversions/internalinterfaces" + v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// VerticalPodAutoscalerInformer provides access to a shared informer and lister for +// VerticalPodAutoscalers. +type VerticalPodAutoscalerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.VerticalPodAutoscalerLister +} + +type verticalPodAutoscalerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewVerticalPodAutoscalerInformer constructs a new informer for VerticalPodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVerticalPodAutoscalerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVerticalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredVerticalPodAutoscalerInformer constructs a new informer for VerticalPodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVerticalPodAutoscalerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV1beta1().VerticalPodAutoscalers(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV1beta1().VerticalPodAutoscalers(namespace).Watch(options) + }, + }, + &autoscaling_k8s_io_v1beta1.VerticalPodAutoscaler{}, + resyncPeriod, + indexers, + ) +} + +func (f *verticalPodAutoscalerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVerticalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *verticalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&autoscaling_k8s_io_v1beta1.VerticalPodAutoscaler{}, f.defaultInformer) +} + +func (f *verticalPodAutoscalerInformer) Lister() v1beta1.VerticalPodAutoscalerLister { + return v1beta1.NewVerticalPodAutoscalerLister(f.Informer().GetIndexer()) +} diff --git a/vertical-pod-autoscaler/pkg/client/informers/externalversions/autoscaling.k8s.io/v1beta1/verticalpodautoscalercheckpoint.go b/vertical-pod-autoscaler/pkg/client/informers/externalversions/autoscaling.k8s.io/v1beta1/verticalpodautoscalercheckpoint.go new file mode 100644 index 000000000000..faf8189d3b08 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/informers/externalversions/autoscaling.k8s.io/v1beta1/verticalpodautoscalercheckpoint.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + autoscaling_k8s_io_v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" + versioned "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" + internalinterfaces "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/informers/externalversions/internalinterfaces" + v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// VerticalPodAutoscalerCheckpointInformer provides access to a shared informer and lister for +// VerticalPodAutoscalerCheckpoints. +type VerticalPodAutoscalerCheckpointInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.VerticalPodAutoscalerCheckpointLister +} + +type verticalPodAutoscalerCheckpointInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewVerticalPodAutoscalerCheckpointInformer constructs a new informer for VerticalPodAutoscalerCheckpoint type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVerticalPodAutoscalerCheckpointInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVerticalPodAutoscalerCheckpointInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredVerticalPodAutoscalerCheckpointInformer constructs a new informer for VerticalPodAutoscalerCheckpoint type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVerticalPodAutoscalerCheckpointInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV1beta1().VerticalPodAutoscalerCheckpoints(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV1beta1().VerticalPodAutoscalerCheckpoints(namespace).Watch(options) + }, + }, + &autoscaling_k8s_io_v1beta1.VerticalPodAutoscalerCheckpoint{}, + resyncPeriod, + indexers, + ) +} + +func (f *verticalPodAutoscalerCheckpointInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVerticalPodAutoscalerCheckpointInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *verticalPodAutoscalerCheckpointInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&autoscaling_k8s_io_v1beta1.VerticalPodAutoscalerCheckpoint{}, f.defaultInformer) +} + +func (f *verticalPodAutoscalerCheckpointInformer) Lister() v1beta1.VerticalPodAutoscalerCheckpointLister { + return v1beta1.NewVerticalPodAutoscalerCheckpointLister(f.Informer().GetIndexer()) +} diff --git a/vertical-pod-autoscaler/pkg/client/informers/externalversions/factory.go b/vertical-pod-autoscaler/pkg/client/informers/externalversions/factory.go index 48ec4b86df14..0ee9db3ffc11 100644 --- a/vertical-pod-autoscaler/pkg/client/informers/externalversions/factory.go +++ b/vertical-pod-autoscaler/pkg/client/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was automatically generated by informer-gen +// Code generated by informer-gen. DO NOT EDIT. package externalversions @@ -27,17 +27,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" versioned "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" + autoscaling_k8s_io "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/informers/externalversions/autoscaling.k8s.io" internalinterfaces "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/informers/externalversions/internalinterfaces" poc_autoscaling_k8s_io "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io" cache "k8s.io/client-go/tools/cache" ) +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + type sharedInformerFactory struct { client versioned.Interface namespace string tweakListOptions internalinterfaces.TweakListOptionsFunc lock sync.Mutex defaultResync time.Duration + customResync map[reflect.Type]time.Duration informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. @@ -45,23 +50,62 @@ type sharedInformerFactory struct { startedInformers map[reflect.Type]bool } -// NewSharedInformerFactory constructs a new instance of sharedInformerFactory +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { - return NewFilteredSharedInformerFactory(client, defaultResync, v1.NamespaceAll, nil) + return NewSharedInformerFactoryWithOptions(client, defaultResync) } // NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. // Listers obtained via this SharedInformerFactory will be subject to the same filters // as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { - return &sharedInformerFactory{ + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ client: client, - namespace: namespace, - tweakListOptions: tweakListOptions, + namespace: v1.NamespaceAll, defaultResync: defaultResync, informers: make(map[reflect.Type]cache.SharedIndexInformer), startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory } // Start initializes all requested informers. @@ -110,7 +154,13 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal if exists { return informer } - informer = newFunc(f.client, f.defaultResync) + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) f.informers[informerType] = informer return informer @@ -123,9 +173,14 @@ type SharedInformerFactory interface { ForResource(resource schema.GroupVersionResource) (GenericInformer, error) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + Autoscaling() autoscaling_k8s_io.Interface Poc() poc_autoscaling_k8s_io.Interface } +func (f *sharedInformerFactory) Autoscaling() autoscaling_k8s_io.Interface { + return autoscaling_k8s_io.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Poc() poc_autoscaling_k8s_io.Interface { return poc_autoscaling_k8s_io.New(f, f.namespace, f.tweakListOptions) } diff --git a/vertical-pod-autoscaler/pkg/client/informers/externalversions/generic.go b/vertical-pod-autoscaler/pkg/client/informers/externalversions/generic.go index eda689b881c0..c376ef29f60b 100644 --- a/vertical-pod-autoscaler/pkg/client/informers/externalversions/generic.go +++ b/vertical-pod-autoscaler/pkg/client/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was automatically generated by informer-gen +// Code generated by informer-gen. DO NOT EDIT. package externalversions @@ -22,6 +22,7 @@ import ( "fmt" schema "k8s.io/apimachinery/pkg/runtime/schema" + v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" v1alpha1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -52,7 +53,13 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=poc.autoscaling.k8s.io, Version=v1alpha1 + // Group=autoscaling.k8s.io, Version=v1beta1 + case v1beta1.SchemeGroupVersion.WithResource("verticalpodautoscalers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V1beta1().VerticalPodAutoscalers().Informer()}, nil + case v1beta1.SchemeGroupVersion.WithResource("verticalpodautoscalercheckpoints"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V1beta1().VerticalPodAutoscalerCheckpoints().Informer()}, nil + + // Group=poc.autoscaling.k8s.io, Version=v1alpha1 case v1alpha1.SchemeGroupVersion.WithResource("verticalpodautoscalers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Poc().V1alpha1().VerticalPodAutoscalers().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("verticalpodautoscalercheckpoints"): diff --git a/vertical-pod-autoscaler/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/vertical-pod-autoscaler/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index 9ca6eff180b3..ed05b0289098 100644 --- a/vertical-pod-autoscaler/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/vertical-pod-autoscaler/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was automatically generated by informer-gen +// Code generated by informer-gen. DO NOT EDIT. package internalinterfaces diff --git a/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/interface.go b/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/interface.go index ed3ec8788f2f..aa905c9e424e 100644 --- a/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/interface.go +++ b/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was automatically generated by informer-gen +// Code generated by informer-gen. DO NOT EDIT. package poc diff --git a/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/v1alpha1/interface.go b/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/v1alpha1/interface.go index ee8dc88573a9..9bc8104d9463 100644 --- a/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/v1alpha1/interface.go +++ b/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was automatically generated by informer-gen +// Code generated by informer-gen. DO NOT EDIT. package v1alpha1 diff --git a/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscaler.go b/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscaler.go index cddd33484baf..702ef746f0fd 100644 --- a/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscaler.go +++ b/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was automatically generated by informer-gen +// Code generated by informer-gen. DO NOT EDIT. package v1alpha1 diff --git a/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscalercheckpoint.go b/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscalercheckpoint.go index 69cc81223f53..34c30c8b6c04 100644 --- a/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscalercheckpoint.go +++ b/vertical-pod-autoscaler/pkg/client/informers/externalversions/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscalercheckpoint.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was automatically generated by informer-gen +// Code generated by informer-gen. DO NOT EDIT. package v1alpha1 diff --git a/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1/expansion_generated.go b/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1/expansion_generated.go new file mode 100644 index 000000000000..9ee9e53dc308 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1/expansion_generated.go @@ -0,0 +1,35 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +// VerticalPodAutoscalerListerExpansion allows custom methods to be added to +// VerticalPodAutoscalerLister. +type VerticalPodAutoscalerListerExpansion interface{} + +// VerticalPodAutoscalerNamespaceListerExpansion allows custom methods to be added to +// VerticalPodAutoscalerNamespaceLister. +type VerticalPodAutoscalerNamespaceListerExpansion interface{} + +// VerticalPodAutoscalerCheckpointListerExpansion allows custom methods to be added to +// VerticalPodAutoscalerCheckpointLister. +type VerticalPodAutoscalerCheckpointListerExpansion interface{} + +// VerticalPodAutoscalerCheckpointNamespaceListerExpansion allows custom methods to be added to +// VerticalPodAutoscalerCheckpointNamespaceLister. +type VerticalPodAutoscalerCheckpointNamespaceListerExpansion interface{} diff --git a/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1/verticalpodautoscaler.go b/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1/verticalpodautoscaler.go new file mode 100644 index 000000000000..1e7346ddfbf3 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1/verticalpodautoscaler.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" + "k8s.io/client-go/tools/cache" +) + +// VerticalPodAutoscalerLister helps list VerticalPodAutoscalers. +type VerticalPodAutoscalerLister interface { + // List lists all VerticalPodAutoscalers in the indexer. + List(selector labels.Selector) (ret []*v1beta1.VerticalPodAutoscaler, err error) + // VerticalPodAutoscalers returns an object that can list and get VerticalPodAutoscalers. + VerticalPodAutoscalers(namespace string) VerticalPodAutoscalerNamespaceLister + VerticalPodAutoscalerListerExpansion +} + +// verticalPodAutoscalerLister implements the VerticalPodAutoscalerLister interface. +type verticalPodAutoscalerLister struct { + indexer cache.Indexer +} + +// NewVerticalPodAutoscalerLister returns a new VerticalPodAutoscalerLister. +func NewVerticalPodAutoscalerLister(indexer cache.Indexer) VerticalPodAutoscalerLister { + return &verticalPodAutoscalerLister{indexer: indexer} +} + +// List lists all VerticalPodAutoscalers in the indexer. +func (s *verticalPodAutoscalerLister) List(selector labels.Selector) (ret []*v1beta1.VerticalPodAutoscaler, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.VerticalPodAutoscaler)) + }) + return ret, err +} + +// VerticalPodAutoscalers returns an object that can list and get VerticalPodAutoscalers. +func (s *verticalPodAutoscalerLister) VerticalPodAutoscalers(namespace string) VerticalPodAutoscalerNamespaceLister { + return verticalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// VerticalPodAutoscalerNamespaceLister helps list and get VerticalPodAutoscalers. +type VerticalPodAutoscalerNamespaceLister interface { + // List lists all VerticalPodAutoscalers in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.VerticalPodAutoscaler, err error) + // Get retrieves the VerticalPodAutoscaler from the indexer for a given namespace and name. + Get(name string) (*v1beta1.VerticalPodAutoscaler, error) + VerticalPodAutoscalerNamespaceListerExpansion +} + +// verticalPodAutoscalerNamespaceLister implements the VerticalPodAutoscalerNamespaceLister +// interface. +type verticalPodAutoscalerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all VerticalPodAutoscalers in the indexer for a given namespace. +func (s verticalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.VerticalPodAutoscaler, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.VerticalPodAutoscaler)) + }) + return ret, err +} + +// Get retrieves the VerticalPodAutoscaler from the indexer for a given namespace and name. +func (s verticalPodAutoscalerNamespaceLister) Get(name string) (*v1beta1.VerticalPodAutoscaler, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("verticalpodautoscaler"), name) + } + return obj.(*v1beta1.VerticalPodAutoscaler), nil +} diff --git a/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1/verticalpodautoscalercheckpoint.go b/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1/verticalpodautoscalercheckpoint.go new file mode 100644 index 000000000000..7eba6bc926dd --- /dev/null +++ b/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1/verticalpodautoscalercheckpoint.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" + "k8s.io/client-go/tools/cache" +) + +// VerticalPodAutoscalerCheckpointLister helps list VerticalPodAutoscalerCheckpoints. +type VerticalPodAutoscalerCheckpointLister interface { + // List lists all VerticalPodAutoscalerCheckpoints in the indexer. + List(selector labels.Selector) (ret []*v1beta1.VerticalPodAutoscalerCheckpoint, err error) + // VerticalPodAutoscalerCheckpoints returns an object that can list and get VerticalPodAutoscalerCheckpoints. + VerticalPodAutoscalerCheckpoints(namespace string) VerticalPodAutoscalerCheckpointNamespaceLister + VerticalPodAutoscalerCheckpointListerExpansion +} + +// verticalPodAutoscalerCheckpointLister implements the VerticalPodAutoscalerCheckpointLister interface. +type verticalPodAutoscalerCheckpointLister struct { + indexer cache.Indexer +} + +// NewVerticalPodAutoscalerCheckpointLister returns a new VerticalPodAutoscalerCheckpointLister. +func NewVerticalPodAutoscalerCheckpointLister(indexer cache.Indexer) VerticalPodAutoscalerCheckpointLister { + return &verticalPodAutoscalerCheckpointLister{indexer: indexer} +} + +// List lists all VerticalPodAutoscalerCheckpoints in the indexer. +func (s *verticalPodAutoscalerCheckpointLister) List(selector labels.Selector) (ret []*v1beta1.VerticalPodAutoscalerCheckpoint, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.VerticalPodAutoscalerCheckpoint)) + }) + return ret, err +} + +// VerticalPodAutoscalerCheckpoints returns an object that can list and get VerticalPodAutoscalerCheckpoints. +func (s *verticalPodAutoscalerCheckpointLister) VerticalPodAutoscalerCheckpoints(namespace string) VerticalPodAutoscalerCheckpointNamespaceLister { + return verticalPodAutoscalerCheckpointNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// VerticalPodAutoscalerCheckpointNamespaceLister helps list and get VerticalPodAutoscalerCheckpoints. +type VerticalPodAutoscalerCheckpointNamespaceLister interface { + // List lists all VerticalPodAutoscalerCheckpoints in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.VerticalPodAutoscalerCheckpoint, err error) + // Get retrieves the VerticalPodAutoscalerCheckpoint from the indexer for a given namespace and name. + Get(name string) (*v1beta1.VerticalPodAutoscalerCheckpoint, error) + VerticalPodAutoscalerCheckpointNamespaceListerExpansion +} + +// verticalPodAutoscalerCheckpointNamespaceLister implements the VerticalPodAutoscalerCheckpointNamespaceLister +// interface. +type verticalPodAutoscalerCheckpointNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all VerticalPodAutoscalerCheckpoints in the indexer for a given namespace. +func (s verticalPodAutoscalerCheckpointNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.VerticalPodAutoscalerCheckpoint, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.VerticalPodAutoscalerCheckpoint)) + }) + return ret, err +} + +// Get retrieves the VerticalPodAutoscalerCheckpoint from the indexer for a given namespace and name. +func (s verticalPodAutoscalerCheckpointNamespaceLister) Get(name string) (*v1beta1.VerticalPodAutoscalerCheckpoint, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("verticalpodautoscalercheckpoint"), name) + } + return obj.(*v1beta1.VerticalPodAutoscalerCheckpoint), nil +} diff --git a/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1/expansion_generated.go b/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1/expansion_generated.go index f3aa5508fb86..f79fdbb1032f 100644 --- a/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1/expansion_generated.go +++ b/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was automatically generated by lister-gen +// Code generated by lister-gen. DO NOT EDIT. package v1alpha1 diff --git a/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscaler.go b/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscaler.go index ad65be1146a6..a31ebf3f47d5 100644 --- a/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscaler.go +++ b/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was automatically generated by lister-gen +// Code generated by lister-gen. DO NOT EDIT. package v1alpha1 diff --git a/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscalercheckpoint.go b/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscalercheckpoint.go index 3df36ec5090b..5881680af760 100644 --- a/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscalercheckpoint.go +++ b/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1/verticalpodautoscalercheckpoint.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was automatically generated by lister-gen +// Code generated by lister-gen. DO NOT EDIT. package v1alpha1 diff --git a/vertical-pod-autoscaler/pkg/recommender/Makefile b/vertical-pod-autoscaler/pkg/recommender/Makefile index 00810b870d91..91c3857698b5 100644 --- a/vertical-pod-autoscaler/pkg/recommender/Makefile +++ b/vertical-pod-autoscaler/pkg/recommender/Makefile @@ -1,33 +1,59 @@ all: build -TAG?=v0.0.1 -REGISTRY?=k8s.gcr.io +TAG?=dev +REGISTRY?=staging-k8s.gcr.io FLAGS= ENVVAR= GOOS?=linux +COMPONENT=recommender +FULL_COMPONENT=vpa-${COMPONENT} deps: go get github.com/tools/godep build: clean deps $(ENVVAR) GOOS=$(GOOS) godep go build ./... - $(ENVVAR) GOOS=$(GOOS) godep go build -o recommender + $(ENVVAR) GOOS=$(GOOS) godep go build -o ${COMPONENT} + +build-binary: clean deps + $(ENVVAR) GOOS=$(GOOS) godep go build -o ${COMPONENT} test-unit: clean deps build $(ENVVAR) godep go test --test.short -race ./... $(FLAGS) -docker: +docker-build: +ifndef REGISTRY + ERR = $(error REGISTRY is undefined) + $(ERR) +endif +ifndef TAG + ERR = $(error TAG is undefined) + $(ERR) +endif + docker build --pull -t ${REGISTRY}/${FULL_COMPONENT}:${TAG} . + +docker-push: ifndef REGISTRY ERR = $(error REGISTRY is undefined) $(ERR) endif - docker build --pull -t ${REGISTRY}/vpa-recommender:${TAG} . - docker push ${REGISTRY}/vpa-recommender:${TAG} +ifndef TAG + ERR = $(error TAG is undefined) + $(ERR) +endif + docker push ${REGISTRY}/${FULL_COMPONENT}:${TAG} + +docker-builder: + docker build -t vpa-autoscaling-builder ../../builder + +build-in-docker: clean docker-builder + docker run -v `pwd`/../..:/gopath/src/k8s.io/autoscaler/vertical-pod-autoscaler vpa-autoscaling-builder:latest bash -c 'cd /gopath/src/k8s.io/autoscaler/vertical-pod-autoscaler && make build-binary -C pkg/recommender' -release: build docker +release: build-in-docker docker-build docker-push + @echo "Full in-docker release ${FULL_COMPONENT}:${TAG} completed" clean: - rm -f recommender + rm -f ${COMPONENT} format: test -z "$$(find . -path ./vendor -prune -type f -o -name '*.go' -exec gofmt -s -d {} + | tee /dev/stderr)" || \ diff --git a/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer.go b/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer.go index 91fac8b3ddf7..47a8bc6cfd5d 100644 --- a/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer.go +++ b/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer.go @@ -17,14 +17,16 @@ limitations under the License. package checkpoint import ( + "context" "fmt" + "sort" "time" "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" - vpa_api "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" + vpa_api "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model" api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" ) @@ -32,7 +34,9 @@ import ( // CheckpointWriter persistently stores aggregated historical usage of containers // controlled by VPA objects. This state can be restored to initialize the model after restart. type CheckpointWriter interface { - StoreCheckpoints(now time.Time) + // StoreCheckpoints writes at least minCheckpoints if there are more checkpoints to write. + // Checkpoints are written until ctx permits or all checkpoints are written. + StoreCheckpoints(ctx context.Context, now time.Time, minCheckpoints int) error } type checkpointWriter struct { @@ -56,13 +60,35 @@ func isFetchingHistory(vpa *model.Vpa) bool { return condition.Status == v1.ConditionTrue } -func (writer *checkpointWriter) StoreCheckpoints(now time.Time) { - for _, vpa := range writer.cluster.Vpas { - +func getVpasToCheckpoint(clusterVpas map[model.VpaID]*model.Vpa) []*model.Vpa { + vpas := make([]*model.Vpa, 0, len(clusterVpas)) + for _, vpa := range clusterVpas { if isFetchingHistory(vpa) { glog.V(3).Infof("VPA %s/%s is loading history, skipping checkpoints", vpa.ID.Namespace, vpa.ID.VpaName) continue } + vpas = append(vpas, vpa) + } + sort.Slice(vpas, func(i, j int) bool { + return vpas[i].CheckpointWritten.Before(vpas[j].CheckpointWritten) + }) + return vpas +} + +func (writer *checkpointWriter) StoreCheckpoints(ctx context.Context, now time.Time, minCheckpoints int) error { + vpas := getVpasToCheckpoint(writer.cluster.Vpas) + for _, vpa := range vpas { + + // Draining ctx.Done() channel. ctx.Err() will be checked if timeout occurred, but minCheckpoints have + // to be written before return from this function. + select { + case <-ctx.Done(): + default: + } + + if ctx.Err() != nil && minCheckpoints <= 0 { + return ctx.Err() + } aggregateContainerStateMap := buildAggregateContainerStateMap(vpa, writer.cluster, now) for container, aggregatedContainerState := range aggregateContainerStateMap { @@ -87,9 +113,12 @@ func (writer *checkpointWriter) StoreCheckpoints(now time.Time) { } else { glog.V(3).Infof("Saved VPA %s/%s checkpoint for %s", vpa.ID.Namespace, vpaCheckpoint.Spec.VPAObjectName, vpaCheckpoint.Spec.ContainerName) + vpa.CheckpointWritten = now } + minCheckpoints-- } } + return nil } // Build the AggregateContainerState for the purpose of the checkpoint. This is an aggregation of state of all @@ -116,6 +145,6 @@ func buildAggregateContainerStateMap(vpa *model.Vpa, cluster *model.ClusterState func subtractCurrentContainerMemoryPeak(a *model.AggregateContainerState, container *model.ContainerState, now time.Time) { if now.Before(container.WindowEnd) { - a.AggregateMemoryPeaks.SubtractSample(model.BytesFromMemoryAmount(container.MemoryPeak), 1.0, container.WindowEnd) + a.AggregateMemoryPeaks.SubtractSample(model.BytesFromMemoryAmount(container.GetMaxMemoryPeak()), 1.0, container.WindowEnd) } } diff --git a/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go b/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go index 0bc242cb7620..b5eade22f118 100644 --- a/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go @@ -17,6 +17,7 @@ limitations under the License. package checkpoint import ( + "fmt" "testing" "time" @@ -24,7 +25,7 @@ import ( "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model" ) @@ -116,3 +117,38 @@ func TestIsFetchingHistory(t *testing.T) { assert.Equalf(t, tc.isFetchingHistory, isFetchingHistory(&tc.vpa), "%+v should have %v as isFetchingHistoryResult", tc.vpa, tc.isFetchingHistory) } } + +func TestGetVpasToCheckpointSorts(t *testing.T) { + + time1 := time.Unix(10000, 0) + time2 := time.Unix(20000, 0) + + genVpaID := func(index int) model.VpaID { + return model.VpaID{ + VpaName: fmt.Sprintf("vpa-%d", index), + } + } + vpa0 := &model.Vpa{ + ID: genVpaID(0), + } + vpa1 := &model.Vpa{ + ID: genVpaID(1), + CheckpointWritten: time1, + } + vpa2 := &model.Vpa{ + ID: genVpaID(2), + CheckpointWritten: time2, + } + vpas := make(map[model.VpaID]*model.Vpa) + addVpa := func(vpa *model.Vpa) { + vpas[vpa.ID] = vpa + } + addVpa(vpa2) + addVpa(vpa0) + addVpa(vpa1) + result := getVpasToCheckpoint(vpas) + assert.Equal(t, genVpaID(0), result[0].ID) + assert.Equal(t, genVpaID(1), result[1].ID) + assert.Equal(t, genVpaID(2), result[2].ID) + +} diff --git a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go index dba501570515..e2e2c7edc529 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go @@ -26,10 +26,10 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/watch" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" - vpa_api "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1" - vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1" + vpa_api "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1" + vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/input/history" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/input/metrics" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/input/oom" @@ -66,23 +66,46 @@ type ClusterStateFeeder interface { GarbageCollectCheckpoints() } -// NewClusterStateFeeder creates new ClusterStateFeeder with internal data providers, based on kube client config and a historyProvider. -func NewClusterStateFeeder(config *rest.Config, clusterState *model.ClusterState) ClusterStateFeeder { - kubeClient := kube_client.NewForConfigOrDie(config) - oomObserver := oom.NewObserver() - podLister := newPodClients(kubeClient, &oomObserver) - watchEvictionEventsWithRetries(kubeClient, &oomObserver) +// ClusterStateFeederFactory makes instances of ClusterStateFeeder. +type ClusterStateFeederFactory struct { + ClusterState *model.ClusterState + KubeClient kube_client.Interface + MetricsClient metrics.MetricsClient + VpaCheckpointClient vpa_api.VerticalPodAutoscalerCheckpointsGetter + VpaLister vpa_lister.VerticalPodAutoscalerLister + PodLister v1lister.PodLister + OOMObserver *oom.Observer +} + +// Make creates new ClusterStateFeeder with internal data providers, based on kube client. +func (m ClusterStateFeederFactory) Make() *clusterStateFeeder { return &clusterStateFeeder{ - coreClient: kubeClient.CoreV1(), - specClient: spec.NewSpecClient(podLister), - metricsClient: newMetricsClient(config), - oomObserver: &oomObserver, - vpaCheckpointClient: vpa_clientset.NewForConfigOrDie(config).PocV1alpha1(), - vpaLister: vpa_api_util.NewAllVpasLister(vpa_clientset.NewForConfigOrDie(config), make(chan struct{})), - clusterState: clusterState, + coreClient: m.KubeClient.CoreV1(), + metricsClient: m.MetricsClient, + oomChan: m.OOMObserver.ObservedOomsChannel, + vpaCheckpointClient: m.VpaCheckpointClient, + vpaLister: m.VpaLister, + clusterState: m.ClusterState, + specClient: spec.NewSpecClient(m.PodLister), } } +// NewClusterStateFeeder creates new ClusterStateFeeder with internal data providers, based on kube client config. +// Deprecated; Use ClusterStateFeederFactory instead. +func NewClusterStateFeeder(config *rest.Config, clusterState *model.ClusterState) ClusterStateFeeder { + kubeClient := kube_client.NewForConfigOrDie(config) + podLister, oomObserver := NewPodListerAndOOMObserver(kubeClient) + return ClusterStateFeederFactory{ + PodLister: podLister, + OOMObserver: oomObserver, + KubeClient: kubeClient, + MetricsClient: newMetricsClient(config), + VpaCheckpointClient: vpa_clientset.NewForConfigOrDie(config).AutoscalingV1beta1(), + VpaLister: vpa_api_util.NewAllVpasLister(vpa_clientset.NewForConfigOrDie(config), make(chan struct{})), + ClusterState: clusterState, + }.Make() +} + func newMetricsClient(config *rest.Config) metrics.MetricsClient { metricsGetter := resourceclient.NewForConfigOrDie(config) return metrics.NewMetricsClient(metricsGetter) @@ -139,11 +162,19 @@ func newPodClients(kubeClient kube_client.Interface, resourceEventHandler cache. return podLister } +// NewPodListerAndOOMObserver creates pair of pod lister and OOM observer. +func NewPodListerAndOOMObserver(kubeClient kube_client.Interface) (v1lister.PodLister, *oom.Observer) { + oomObserver := oom.NewObserver() + podLister := newPodClients(kubeClient, &oomObserver) + watchEvictionEventsWithRetries(kubeClient, &oomObserver) + return podLister, &oomObserver +} + type clusterStateFeeder struct { coreClient corev1.CoreV1Interface specClient spec.SpecClient metricsClient metrics.MetricsClient - oomObserver *oom.Observer + oomChan <-chan oom.OomInfo vpaCheckpointClient vpa_api.VerticalPodAutoscalerCheckpointsGetter vpaLister vpa_lister.VerticalPodAutoscalerLister clusterState *model.ClusterState @@ -320,7 +351,7 @@ func (feeder *clusterStateFeeder) LoadRealTimeMetrics() { Loop: for { select { - case oomInfo := <-feeder.oomObserver.ObservedOomsChannel: + case oomInfo := <-feeder.oomChan: glog.V(3).Infof("OOM detected %+v", oomInfo) container := model.ContainerID{ PodID: model.PodID{ diff --git a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go index 940f091581b1..b9a6f1fb1b1d 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go @@ -40,7 +40,7 @@ type Observer struct { // NewObserver returns new instance of the Observer. func NewObserver() Observer { return Observer{ - ObservedOomsChannel: make(chan OomInfo), + ObservedOomsChannel: make(chan OomInfo, 5000), } } @@ -89,15 +89,30 @@ func parseEvictionEvent(event *apiv1.Event) []OomInfo { // OnEvent inspects k8s eviction events and translates them to OomInfo. func (o *Observer) OnEvent(event *apiv1.Event) { - glog.V(1).Infof("OOM Obeserver processing event: %+v", event) + glog.V(1).Infof("OOM Observer processing event: %+v", event) for _, oomInfo := range parseEvictionEvent(event) { - info := oomInfo - go func() { - o.ObservedOomsChannel <- info - }() + o.ObservedOomsChannel <- oomInfo } } +func findStatus(name string, containerStatuses []apiv1.ContainerStatus) *apiv1.ContainerStatus { + for _, containerStatus := range containerStatuses { + if containerStatus.Name == name { + return &containerStatus + } + } + return nil +} + +func findSpec(name string, containers []apiv1.Container) *apiv1.Container { + for _, containerSpec := range containers { + if containerSpec.Name == name { + return &containerSpec + } + } + return nil +} + // OnAdd is Noop func (*Observer) OnAdd(obj interface{}) {} @@ -106,47 +121,33 @@ func (*Observer) OnAdd(obj interface{}) {} func (o *Observer) OnUpdate(oldObj, newObj interface{}) { oldPod, ok := oldObj.(*apiv1.Pod) if oldPod == nil || !ok { - glog.Errorf("OOM observer received invaild oldObj: %v", oldObj) + glog.Errorf("OOM observer received invalid oldObj: %v", oldObj) } - newPod, ok := newObj.(*apiv1.Pod) - if newPod == nil || !ok { - glog.Errorf("OOM observer received invaild newObj: %v", newObj) - } - - oldContainersStatusMap := make(map[string]apiv1.ContainerStatus) - for _, containerStatus := range oldPod.Status.ContainerStatuses { - oldContainersStatusMap[containerStatus.Name] = containerStatus - } - - oldContainersSpecMap := make(map[string]apiv1.Container) - for _, containerSpec := range oldPod.Spec.Containers { - oldContainersSpecMap[containerSpec.Name] = containerSpec + glog.Errorf("OOM observer received invalid newObj: %v", newObj) } for _, containerStatus := range newPod.Status.ContainerStatuses { - prevContainerStatus, ok := oldContainersStatusMap[containerStatus.Name] - if ok && containerStatus.RestartCount > prevContainerStatus.RestartCount && + if containerStatus.RestartCount > 0 && containerStatus.LastTerminationState.Terminated.Reason == "OOMKilled" { - if spec, ok := oldContainersSpecMap[containerStatus.Name]; ok { - oomInfo := OomInfo{ - Namespace: newPod.ObjectMeta.Namespace, - Pod: newPod.ObjectMeta.Name, - Container: containerStatus.Name, - Memory: spec.Resources.Requests[apiv1.ResourceMemory], - Timestamp: containerStatus.LastTerminationState.Terminated.FinishedAt.Time.UTC(), - } - go func() { + + oldStatus := findStatus(containerStatus.Name, oldPod.Status.ContainerStatuses) + if oldStatus != nil && containerStatus.RestartCount > oldStatus.RestartCount { + oldSpec := findSpec(containerStatus.Name, oldPod.Spec.Containers) + if oldSpec != nil { + oomInfo := OomInfo{ + Namespace: newPod.ObjectMeta.Namespace, + Pod: newPod.ObjectMeta.Name, + Container: containerStatus.Name, + Memory: oldSpec.Resources.Requests[apiv1.ResourceMemory], + Timestamp: containerStatus.LastTerminationState.Terminated.FinishedAt.Time.UTC(), + } o.ObservedOomsChannel <- oomInfo - }() - } else { - glog.Errorf("Shouldn't happen. Missing spec for container %v", containerStatus.Name) + } } - } } - } // OnDelete is Noop diff --git a/vertical-pod-autoscaler/pkg/recommender/logic/estimator_test.go b/vertical-pod-autoscaler/pkg/recommender/logic/estimator_test.go index baf9d5251f60..bedb745de621 100644 --- a/vertical-pod-autoscaler/pkg/recommender/logic/estimator_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/logic/estimator_test.go @@ -56,7 +56,7 @@ func TestPercentileEstimator(t *testing.T) { AggregateCPUUsage: cpuHistogram, AggregateMemoryPeaks: memoryPeaksHistogram, }) - maxRelativeError := 0.03 // Allow 3% relative error to account for histogram rounding. + maxRelativeError := 0.05 // Allow 5% relative error to account for histogram rounding. assert.InEpsilon(t, 1.0, model.CoresFromCPUAmount(resourceEstimation[model.ResourceCPU]), maxRelativeError) assert.InEpsilon(t, 2e9, model.BytesFromMemoryAmount(resourceEstimation[model.ResourceMemory]), maxRelativeError) } diff --git a/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go b/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go index 442d6dc0d914..df7b5be860e7 100644 --- a/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go +++ b/vertical-pod-autoscaler/pkg/recommender/logic/recommender.go @@ -24,8 +24,8 @@ import ( var ( safetyMarginFraction = flag.Float64("recommendation-margin-fraction", 0.15, `Fraction of usage added as the safety margin to the recommended request`) - podMinCPUMillicores = flag.Float64("pod-recommendation-min-cpu-millicores", 200, `Minimum CPU recommendation for a pod`) - podMinMemoryMb = flag.Float64("pod-recommendation-min-memory-mb", 300, `Minimum memory recommendation for a pod`) + podMinCPUMillicores = flag.Float64("pod-recommendation-min-cpu-millicores", 25, `Minimum CPU recommendation for a pod`) + podMinMemoryMb = flag.Float64("pod-recommendation-min-memory-mb", 250, `Minimum memory recommendation for a pod`) ) // PodResourceRecommender computes resource recommendation for a Vpa object. diff --git a/vertical-pod-autoscaler/pkg/recommender/logic/recommender_test.go b/vertical-pod-autoscaler/pkg/recommender/logic/recommender_test.go index 8b856ca9fb66..c47600cb65e3 100644 --- a/vertical-pod-autoscaler/pkg/recommender/logic/recommender_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/logic/recommender_test.go @@ -25,7 +25,7 @@ import ( func TestMinResourcesApplied(t *testing.T) { constEstimator := NewConstEstimator(model.Resources{ - model.ResourceCPU: model.CPUAmountFromCores(0.1), + model.ResourceCPU: model.CPUAmountFromCores(0.001), model.ResourceMemory: model.MemoryAmountFromBytes(1e6), }) recommender := podResourceRecommender{ @@ -44,7 +44,7 @@ func TestMinResourcesApplied(t *testing.T) { func TestMinResourcesSplitAcrossContainers(t *testing.T) { constEstimator := NewConstEstimator(model.Resources{ - model.ResourceCPU: model.CPUAmountFromCores(0.1), + model.ResourceCPU: model.CPUAmountFromCores(0.001), model.ResourceMemory: model.MemoryAmountFromBytes(1e6), }) recommender := podResourceRecommender{ diff --git a/vertical-pod-autoscaler/pkg/recommender/main.go b/vertical-pod-autoscaler/pkg/recommender/main.go index 22a6fb29c95b..7c0854da5787 100644 --- a/vertical-pod-autoscaler/pkg/recommender/main.go +++ b/vertical-pod-autoscaler/pkg/recommender/main.go @@ -28,7 +28,6 @@ import ( "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics" metrics_recommender "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics/recommender" "k8s.io/client-go/rest" - kube_restclient "k8s.io/client-go/rest" ) var ( @@ -47,7 +46,8 @@ func main() { config := createKubeConfig(float32(*kubeApiQps), int(*kubeApiBurst)) - metrics.Initialize(*address) + healthCheck := metrics.NewHealthCheck(*metricsFetcherInterval*5, true) + metrics.Initialize(*address, healthCheck) metrics_recommender.Register() useCheckpoints := *storage != "prometheus" @@ -58,19 +58,16 @@ func main() { recommender.GetClusterStateFeeder().InitFromHistoryProvider(history.NewPrometheusHistoryProvider(*prometheusAddress)) } - for { - select { - case <-time.After(*metricsFetcherInterval): - { - recommender.RunOnce() - } - } + ticker := time.Tick(*metricsFetcherInterval) + for range ticker { + recommender.RunOnce() + healthCheck.UpdateLastActivity() } } func createKubeConfig(kubeApiQps float32, kubeApiBurst int) *rest.Config { - config, err := kube_restclient.InClusterConfig() + config, err := rest.InClusterConfig() if err != nil { glog.Fatalf("Failed to create config: %v", err) } diff --git a/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state.go b/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state.go index e627c6fd9187..996df2cba61b 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state.go @@ -41,7 +41,7 @@ import ( "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/util" ) @@ -52,9 +52,9 @@ type ContainerNameToAggregateStateMap map[string]*AggregateContainerState const ( // SupportedCheckpointVersion is the tag of the supported version of serialized checkpoints. // Version id should be incremented on every non incompatible change, i.e. if the new - // version of the recommender binary can't initialize from the old checkpoint format or the the + // version of the recommender binary can't initialize from the old checkpoint format or the // previous version of the recommender binary can't initialize from the new checkpoint format. - SupportedCheckpointVersion = "v1" + SupportedCheckpointVersion = "v3" ) // ContainerStateAggregator is an interface for objects that consume and @@ -139,7 +139,6 @@ func (a *AggregateContainerState) addCPUSample(sample *ContainerUsageSample) { // Samples are added with the weight equal to the current request. This means that // whenever the request is increased, the history accumulated so far effectively decays, // which helps react quickly to CPU starvation. - minSampleWeight := 0.1 a.AggregateCPUUsage.AddSample( cpuUsageCores, math.Max(cpuRequestCores, minSampleWeight), sample.MeasureStart) if sample.MeasureStart.After(a.LastSampleStart) { diff --git a/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go b/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go index 27856914a76e..192c6644791b 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go @@ -24,7 +24,7 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/util" ) diff --git a/vertical-pod-autoscaler/pkg/recommender/model/aggregations_config.go b/vertical-pod-autoscaler/pkg/recommender/model/aggregations_config.go index cc69c3e6e823..76b34fe6a53d 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/aggregations_config.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/aggregations_config.go @@ -52,10 +52,20 @@ var ( CPUHistogramDecayHalfLife = time.Hour * 24 ) +const ( + // minSampleWeight is the minimal weight of any sample (prior to including decaying factor) + minSampleWeight = 0.1 + // epsilon is the minimal weight kept in histograms, it should be small enough that old samples + // (just inside MemoryAggregationWindowLength) added with minSampleWeight are still kept + epsilon = 0.001 * minSampleWeight +) + func cpuHistogramOptions() util.HistogramOptions { // CPU histograms use exponential bucketing scheme with the smallest bucket - // size of 0.1 core, max of 1000.0 cores and the relative error of HistogramRelativeError. - options, err := util.NewExponentialHistogramOptions(1000.0, 0.1, 1.+HistogramBucketSizeGrowth, 0.1) + // size of 0.01 core, max of 1000.0 cores and the relative error of HistogramRelativeError. + // + // When parameters below are changed SupportedCheckpointVersion has to be bumped. + options, err := util.NewExponentialHistogramOptions(1000.0, 0.01, 1.+HistogramBucketSizeGrowth, epsilon) if err != nil { panic("Invalid CPU histogram options") // Should not happen. } @@ -65,7 +75,9 @@ func cpuHistogramOptions() util.HistogramOptions { func memoryHistogramOptions() util.HistogramOptions { // Memory histograms use exponential bucketing scheme with the smallest // bucket size of 10MB, max of 1TB and the relative error of HistogramRelativeError. - options, err := util.NewExponentialHistogramOptions(1e12, 1e7, 1.+HistogramBucketSizeGrowth, 0.1) + // + // When parameters below are changed SupportedCheckpointVersion has to be bumped. + options, err := util.NewExponentialHistogramOptions(1e12, 1e7, 1.+HistogramBucketSizeGrowth, epsilon) if err != nil { panic("Invalid memory histogram options") // Should not happen. } diff --git a/vertical-pod-autoscaler/pkg/recommender/model/cluster.go b/vertical-pod-autoscaler/pkg/recommender/model/cluster.go index c24733b83f96..eb5d1732827d 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/cluster.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/cluster.go @@ -25,7 +25,7 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" ) // ClusterState holds all runtime information about the cluster required for the @@ -303,7 +303,7 @@ func (cluster *ClusterState) GarbageCollectAggregateCollectionStates(now time.Ti for key, aggregateContainerState := range cluster.aggregateStateMap { if aggregateContainerState.isExpired(now) { keysToDelete = append(keysToDelete, key) - glog.V(1).Info("Removing AggregateCollectionStates for %+v", key) + glog.V(1).Infof("Removing AggregateCollectionStates for %+v", key) } } for _, key := range keysToDelete { diff --git a/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go b/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go index 3eb124e8441e..146468e5c37e 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" ) var ( diff --git a/vertical-pod-autoscaler/pkg/recommender/model/container.go b/vertical-pod-autoscaler/pkg/recommender/model/container.go index 8de041d5df4f..38c6f0452acc 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/container.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/container.go @@ -52,7 +52,9 @@ type ContainerState struct { // Start of the latest CPU usage sample that was aggregated. LastCPUSampleStart time.Time // Max memory usage observed in the current aggregation interval. - MemoryPeak ResourceAmount + memoryPeak ResourceAmount + // Max memory usage estimated from an OOM event in the current aggregation interval. + oomPeak ResourceAmount // End time of the current memory aggregation interval (not inclusive). WindowEnd time.Time // Start of the latest memory usage sample that was aggregated. @@ -86,7 +88,12 @@ func (container *ContainerState) addCPUSample(sample *ContainerUsageSample) bool return true } -func (container *ContainerState) addMemorySample(sample *ContainerUsageSample) bool { +// GetMaxMemoryPeak returns maximum memory usage in the sample, possibly estimated from OOM +func (container *ContainerState) GetMaxMemoryPeak() ResourceAmount { + return ResourceAmountMax(container.memoryPeak, container.oomPeak) +} + +func (container *ContainerState) addMemorySample(sample *ContainerUsageSample, isOOM bool) bool { ts := sample.MeasureStart if !sample.isValid(ResourceMemory) || ts.Before(container.lastMemorySampleStart) { return false // Discard invalid or outdated samples. @@ -102,11 +109,12 @@ func (container *ContainerState) addMemorySample(sample *ContainerUsageSample) b // and adding the new value. addNewPeak := false if ts.Before(container.WindowEnd) { - if container.MemoryPeak != 0 && sample.Usage > container.MemoryPeak { + oldMaxMem := container.GetMaxMemoryPeak() + if oldMaxMem != 0 && sample.Usage > oldMaxMem { // Remove the old peak. oldPeak := ContainerUsageSample{ MeasureStart: container.WindowEnd, - Usage: container.MemoryPeak, + Usage: oldMaxMem, Request: sample.Request, Resource: ResourceMemory, } @@ -117,6 +125,8 @@ func (container *ContainerState) addMemorySample(sample *ContainerUsageSample) b // Shift the memory aggregation window to the next interval. shift := truncate(ts.Sub(container.WindowEnd), MemoryAggregationInterval) + MemoryAggregationInterval container.WindowEnd = container.WindowEnd.Add(shift) + container.memoryPeak = 0 + container.oomPeak = 0 addNewPeak = true } if addNewPeak { @@ -127,7 +137,11 @@ func (container *ContainerState) addMemorySample(sample *ContainerUsageSample) b Resource: ResourceMemory, } container.aggregator.AddSample(&newPeak) - container.MemoryPeak = sample.Usage + if isOOM { + container.oomPeak = sample.Usage + } else { + container.memoryPeak = sample.Usage + } } return true } @@ -138,8 +152,9 @@ func (container *ContainerState) RecordOOM(timestamp time.Time, requestedMemory if timestamp.Before(container.WindowEnd.Add(-1 * MemoryAggregationInterval)) { return fmt.Errorf("OOM event will be discarded - it is too old (%v)", timestamp) } - // Get max of the request and the recent memory peak. - memoryUsed := ResourceAmountMax(requestedMemory, container.MemoryPeak) + // Get max of the request and the recent usage-based memory peak. + // Omitting oomPeak here to protect against recommendation running too high on subsequent OOMs. + memoryUsed := ResourceAmountMax(requestedMemory, container.memoryPeak) memoryNeeded := ResourceAmountMax(memoryUsed+MemoryAmountFromBytes(OOMMinBumpUp), ScaleResource(memoryUsed, OOMBumpUpRatio)) @@ -148,7 +163,7 @@ func (container *ContainerState) RecordOOM(timestamp time.Time, requestedMemory Usage: memoryNeeded, Resource: ResourceMemory, } - if !container.addMemorySample(&oomMemorySample) { + if !container.addMemorySample(&oomMemorySample, true) { return fmt.Errorf("Adding OOM sample failed") } return nil @@ -166,7 +181,7 @@ func (container *ContainerState) AddSample(sample *ContainerUsageSample) bool { case ResourceCPU: return container.addCPUSample(sample) case ResourceMemory: - return container.addMemorySample(sample) + return container.addMemorySample(sample, false) default: return false } diff --git a/vertical-pod-autoscaler/pkg/recommender/model/container_test.go b/vertical-pod-autoscaler/pkg/recommender/model/container_test.go index 733f21006550..b5185cea6d53 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/container_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/container_test.go @@ -129,6 +129,24 @@ func TestRecordOOMIncreasedByBumpUp(t *testing.T) { assert.NoError(t, test.container.RecordOOM(testTimestamp, ResourceAmount(1000*mb))) } +func TestRecordOOMDontRunAway(t *testing.T) { + test := newContainerTest() + memoryAggregationWindowEnd := testTimestamp.Add(MemoryAggregationInterval) + + // Bump Up factor is 20%. + test.mockMemoryHistogram.On("AddSample", 1200.0*mb, 1.0, memoryAggregationWindowEnd) + assert.NoError(t, test.container.RecordOOM(testTimestamp, ResourceAmount(1000*mb))) + + // new smaller OOMs don't influence the sample value (oomPeak) + assert.NoError(t, test.container.RecordOOM(testTimestamp, ResourceAmount(999*mb))) + assert.NoError(t, test.container.RecordOOM(testTimestamp, ResourceAmount(999*mb))) + + test.mockMemoryHistogram.On("SubtractSample", 1200.0*mb, 1.0, memoryAggregationWindowEnd) + test.mockMemoryHistogram.On("AddSample", 2400.0*mb, 1.0, memoryAggregationWindowEnd) + // a larger OOM should increase the sample value + assert.NoError(t, test.container.RecordOOM(testTimestamp, ResourceAmount(2000*mb))) +} + func TestRecordOOMIncreasedByMin(t *testing.T) { test := newContainerTest() memoryAggregationWindowEnd := testTimestamp.Add(MemoryAggregationInterval) diff --git a/vertical-pod-autoscaler/pkg/recommender/model/vpa.go b/vertical-pod-autoscaler/pkg/recommender/model/vpa.go index 413b03cc5c42..8219b8716a2d 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/vpa.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/vpa.go @@ -17,12 +17,13 @@ limitations under the License. package model import ( + "sort" "time" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" ) // Map from VPA condition type to condition. @@ -56,6 +57,12 @@ func (conditionsMap *vpaConditionsMap) AsList() []vpa_types.VerticalPodAutoscale for _, condition := range *conditionsMap { conditions = append(conditions, condition) } + + // Sort conditions by type to avoid elements floating on the list + sort.Slice(conditions, func(i, j int) bool { + return conditions[i].Type < conditions[j].Type + }) + return conditions } @@ -82,6 +89,8 @@ type Vpa struct { UpdateMode *vpa_types.UpdateMode // Created denotes timestamp of the original VPA object creation Created time.Time + // CheckpointWritten indicates when last checkpoint for the VPA object was stored. + CheckpointWritten time.Time } // NewVpa returns a new Vpa with a given ID and pod selector. Doesn't set the @@ -92,8 +101,8 @@ func NewVpa(id VpaID, selector labels.Selector, created time.Time) *Vpa { PodSelector: selector, aggregateContainerStates: make(aggregateContainerStatesMap), ContainersInitialAggregateState: make(ContainerNameToAggregateStateMap), - Created: created, - Conditions: make(vpaConditionsMap), + Created: created, + Conditions: make(vpaConditionsMap), } return vpa } diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go b/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go index a5527fce2d68..aae357c39f71 100644 --- a/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go +++ b/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go @@ -17,18 +17,20 @@ limitations under the License. package routines import ( + "context" + "flag" "time" "github.com/golang/glog" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" - vpa_api "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1" + vpa_api "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/checkpoint" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/input" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/logic" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model" metrics_recommender "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics/recommender" - vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" + vpa_utils "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" "k8s.io/client-go/rest" ) @@ -37,6 +39,11 @@ const ( AggregateContainerStateGCInterval = 1 * time.Hour ) +var ( + checkpointsWriteTimeout = flag.Duration("checkpoints-timeout", time.Minute, `Timeout for writing checkpoints since the start of the recommender's main loop`) + minCheckpointsPerRun = flag.Int("min-checkpoints", 10, "Minimum number of checkpoints to write per recommender's main loop") +) + // Recommender recommend resources for certain containers, based on utilization periodically got from metrics api. type Recommender interface { // RunOnce performs one iteration of recommender duties followed by update of recommendations in VPA objects. @@ -48,7 +55,9 @@ type Recommender interface { // UpdateVPAs computes recommendations and sends VPAs status updates to API Server UpdateVPAs() // MaintainCheckpoints stores current checkpoints in API Server and garbage collect old ones - MaintainCheckpoints() + // MaintainCheckpoints writes at least minCheckpoints if there are more checkpoints to write. + // Checkpoints are written until ctx permits or all checkpoints are written. + MaintainCheckpoints(ctx context.Context, minCheckpoints int) // GarbageCollect removes old AggregateCollectionStates GarbageCollect() } @@ -88,18 +97,8 @@ func (r *recommender) UpdateVPAs() { continue } resources := r.podResourceRecommender.GetRecommendedPodResources(GetContainerNameToAggregateStateMap(vpa)) - containerResources := make([]vpa_types.RecommendedContainerResources, 0, len(resources)) - for containerName, res := range resources { - containerResources = append(containerResources, vpa_types.RecommendedContainerResources{ - ContainerName: containerName, - Target: model.ResourcesAsResourceList(res.Target), - LowerBound: model.ResourcesAsResourceList(res.LowerBound), - UpperBound: model.ResourcesAsResourceList(res.UpperBound), - }) - - } had := vpa.HasRecommendation() - vpa.Recommendation = &vpa_types.RecommendedPodResources{containerResources} + vpa.Recommendation = getCappedRecommendation(vpa.ID, resources, observedVpa.Spec.ResourcePolicy) // Set RecommendationProvided if recommendation not empty. if len(vpa.Recommendation.ContainerRecommendations) > 0 { vpa.Conditions.Set(vpa_types.RecommendationProvided, true, "", "") @@ -109,7 +108,7 @@ func (r *recommender) UpdateVPAs() { } cnt.Add(vpa) - _, err := vpa_api_util.UpdateVpaStatusIfNeeded( + _, err := vpa_utils.UpdateVpaStatusIfNeeded( r.vpaClient.VerticalPodAutoscalers(vpa.ID.Namespace), vpa, &observedVpa.Status) if err != nil { glog.Errorf( @@ -118,10 +117,35 @@ func (r *recommender) UpdateVPAs() { } } -func (r *recommender) MaintainCheckpoints() { +// getCappedRecommendation creates a recommendation based on recommended pod +// resources, setting the UncappedTarget to the calculated recommended target +// and if necessary, capping the Target, LowerBound and UpperBound according +// to the ResourcePolicy. +func getCappedRecommendation(vpaID model.VpaID, resources logic.RecommendedPodResources, + policy *vpa_types.PodResourcePolicy) *vpa_types.RecommendedPodResources { + containerResources := make([]vpa_types.RecommendedContainerResources, 0, len(resources)) + for containerName, res := range resources { + containerResources = append(containerResources, vpa_types.RecommendedContainerResources{ + ContainerName: containerName, + Target: model.ResourcesAsResourceList(res.Target), + LowerBound: model.ResourcesAsResourceList(res.LowerBound), + UpperBound: model.ResourcesAsResourceList(res.UpperBound), + UncappedTarget: model.ResourcesAsResourceList(res.Target), + }) + } + recommendation := &vpa_types.RecommendedPodResources{containerResources} + cappedRecommendation, err := vpa_utils.ApplyVPAPolicy(recommendation, policy) + if err != nil { + glog.Errorf("Failed to apply policy for VPA %v/%v: %v", vpaID.Namespace, vpaID.VpaName, err) + return recommendation + } + return cappedRecommendation +} + +func (r *recommender) MaintainCheckpoints(ctx context.Context, minCheckpointsPerRun int) { now := time.Now() if r.useCheckpoints { - r.checkpointWriter.StoreCheckpoints(now) + r.checkpointWriter.StoreCheckpoints(ctx, now, minCheckpointsPerRun) if time.Now().Sub(r.lastCheckpointGC) > r.checkpointsGCInterval { r.lastCheckpointGC = now r.clusterStateFeeder.GarbageCollectCheckpoints() @@ -142,11 +166,18 @@ func (r *recommender) RunOnce() { timer := metrics_recommender.NewExecutionTimer() defer timer.ObserveTotal() + ctx := context.Background() + ctx, cancelFunc := context.WithDeadline(ctx, time.Now().Add(*checkpointsWriteTimeout)) + defer cancelFunc() + glog.V(3).Infof("Recommender Run") + r.clusterStateFeeder.LoadVPAs() timer.ObserveStep("LoadVPAs") + r.clusterStateFeeder.LoadPods() timer.ObserveStep("LoadPods") + r.clusterStateFeeder.LoadRealTimeMetrics() timer.ObserveStep("LoadMetrics") glog.V(3).Infof("ClusterState is tracking %v PodStates and %v VPAs", len(r.clusterState.Pods), len(r.clusterState.Vpas)) @@ -154,29 +185,56 @@ func (r *recommender) RunOnce() { r.UpdateVPAs() timer.ObserveStep("UpdateVPAs") - r.MaintainCheckpoints() + r.MaintainCheckpoints(ctx, *minCheckpointsPerRun) timer.ObserveStep("MaintainCheckpoints") r.GarbageCollect() timer.ObserveStep("GarbageCollect") } -// NewRecommender creates a new recommender instance, +// RecommenderFactory makes instances of Recommender. +type RecommenderFactory struct { + ClusterState *model.ClusterState + + ClusterStateFeeder input.ClusterStateFeeder + CheckpointWriter checkpoint.CheckpointWriter + PodResourceRecommender logic.PodResourceRecommender + VpaClient vpa_api.VerticalPodAutoscalersGetter + + CheckpointsGCInterval time.Duration + UseCheckpoints bool +} + +// Make creates a new recommender instance, // which can be run in order to provide continuous resource recommendations for containers. -// It requires cluster configuration object and duration between recommender intervals. -func NewRecommender(config *rest.Config, checkpointsGCInterval time.Duration, useCheckpoints bool) Recommender { - clusterState := model.NewClusterState() +func (c RecommenderFactory) Make() Recommender { recommender := &recommender{ - clusterState: clusterState, - clusterStateFeeder: input.NewClusterStateFeeder(config, clusterState), - checkpointWriter: checkpoint.NewCheckpointWriter(clusterState, vpa_clientset.NewForConfigOrDie(config).PocV1alpha1()), - checkpointsGCInterval: checkpointsGCInterval, - lastCheckpointGC: time.Now(), - vpaClient: vpa_clientset.NewForConfigOrDie(config).PocV1alpha1(), - podResourceRecommender: logic.CreatePodResourceRecommender(), - useCheckpoints: useCheckpoints, + clusterState: c.ClusterState, + clusterStateFeeder: c.ClusterStateFeeder, + checkpointWriter: c.CheckpointWriter, + checkpointsGCInterval: c.CheckpointsGCInterval, + useCheckpoints: c.UseCheckpoints, + vpaClient: c.VpaClient, + podResourceRecommender: c.PodResourceRecommender, lastAggregateContainerStateGC: time.Now(), + lastCheckpointGC: time.Now(), } glog.V(3).Infof("New Recommender created %+v", recommender) return recommender } + +// NewRecommender creates a new recommender instance. +// Dependencies are created automatically. +// Deprecated; use RecommenderFactory instead. +func NewRecommender(config *rest.Config, checkpointsGCInterval time.Duration, useCheckpoints bool) Recommender { + clusterState := model.NewClusterState() + return RecommenderFactory{ + ClusterState: clusterState, + ClusterStateFeeder: input.NewClusterStateFeeder(config, clusterState), + CheckpointWriter: checkpoint.NewCheckpointWriter(clusterState, vpa_clientset.NewForConfigOrDie(config).AutoscalingV1beta1()), + VpaClient: vpa_clientset.NewForConfigOrDie(config).AutoscalingV1beta1(), + PodResourceRecommender: logic.CreatePodResourceRecommender(), + CheckpointsGCInterval: checkpointsGCInterval, + UseCheckpoints: useCheckpoints, + }.Make() +} diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/vpa.go b/vertical-pod-autoscaler/pkg/recommender/routines/vpa.go index 793238546b84..dda07bc1f64f 100644 --- a/vertical-pod-autoscaler/pkg/recommender/routines/vpa.go +++ b/vertical-pod-autoscaler/pkg/recommender/routines/vpa.go @@ -17,7 +17,7 @@ limitations under the License. package routines import ( - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model" api_utils "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" ) diff --git a/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram.go b/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram.go index 5f3a1e6855ba..32fb62fd6767 100644 --- a/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram.go +++ b/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram.go @@ -22,7 +22,7 @@ import ( "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" ) var ( diff --git a/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram_test.go b/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram_test.go index 2a6ff54ecde8..40a9a8755ea5 100644 --- a/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/util/decaying_histogram_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" ) var ( @@ -47,8 +47,8 @@ func TestSimpleDecay(t *testing.T) { // Add another sample 20 half life periods later. Its relative weight is // expected to be 2^20 * 0.001 > 1000 times larger than the first sample. h.AddSample(1, 1, startTime.Add(time.Hour*20)) - assert.InEpsilon(t, 1.5, h.Percentile(0.999), valueEpsilon) - assert.InEpsilon(t, 2.5, h.Percentile(1.0), valueEpsilon) + assert.InEpsilon(t, 2, h.Percentile(0.999), valueEpsilon) + assert.InEpsilon(t, 3, h.Percentile(1.0), valueEpsilon) } // Verify that the decaying histogram behaves correctly after the decaying @@ -60,7 +60,7 @@ func TestLongtermDecay(t *testing.T) { // Add another sample later, such that the relative decay factor of the // two samples will exceed 2^maxDecayExponent. h.AddSample(1, 1, startTime.Add(time.Hour*101)) - assert.InEpsilon(t, 1.5, h.Percentile(1.0), valueEpsilon) + assert.InEpsilon(t, 2, h.Percentile(1.0), valueEpsilon) } // Verify specific values of percentiles on an example decaying histogram with @@ -79,14 +79,14 @@ func TestDecayingHistogramPercentiles(t *testing.T) { // bucket = [2..3], weight = 2 * 2^(-2), percentiles ~ 3% ... 10% // bucket = [3..4], weight = 3 * 2^(-1), percentiles ~ 11% ... 34% // bucket = [4..5], weight = 4 * 2^(-0), percentiles ~ 35% ... 100% - assert.InEpsilon(t, 1.5, h.Percentile(0.00), valueEpsilon) - assert.InEpsilon(t, 1.5, h.Percentile(0.02), valueEpsilon) - assert.InEpsilon(t, 2.5, h.Percentile(0.03), valueEpsilon) - assert.InEpsilon(t, 2.5, h.Percentile(0.10), valueEpsilon) - assert.InEpsilon(t, 3.5, h.Percentile(0.11), valueEpsilon) - assert.InEpsilon(t, 3.5, h.Percentile(0.34), valueEpsilon) - assert.InEpsilon(t, 4.5, h.Percentile(0.35), valueEpsilon) - assert.InEpsilon(t, 4.5, h.Percentile(1.00), valueEpsilon) + assert.InEpsilon(t, 2, h.Percentile(0.00), valueEpsilon) + assert.InEpsilon(t, 2, h.Percentile(0.02), valueEpsilon) + assert.InEpsilon(t, 3, h.Percentile(0.03), valueEpsilon) + assert.InEpsilon(t, 3, h.Percentile(0.10), valueEpsilon) + assert.InEpsilon(t, 4, h.Percentile(0.11), valueEpsilon) + assert.InEpsilon(t, 4, h.Percentile(0.34), valueEpsilon) + assert.InEpsilon(t, 5, h.Percentile(0.35), valueEpsilon) + assert.InEpsilon(t, 5, h.Percentile(1.00), valueEpsilon) } // Verifies that the decaying histogram behaves the same way as a regular @@ -96,17 +96,17 @@ func TestNoDecay(t *testing.T) { for i := 1; i <= 4; i++ { h.AddSample(float64(i), float64(i), startTime) } - assert.InEpsilon(t, 1.5, h.Percentile(0.0), valueEpsilon) - assert.InEpsilon(t, 2.5, h.Percentile(0.2), valueEpsilon) - assert.InEpsilon(t, 1.5, h.Percentile(0.1), valueEpsilon) - assert.InEpsilon(t, 2.5, h.Percentile(0.3), valueEpsilon) - assert.InEpsilon(t, 3.5, h.Percentile(0.4), valueEpsilon) - assert.InEpsilon(t, 3.5, h.Percentile(0.5), valueEpsilon) - assert.InEpsilon(t, 3.5, h.Percentile(0.6), valueEpsilon) - assert.InEpsilon(t, 4.5, h.Percentile(0.7), valueEpsilon) - assert.InEpsilon(t, 4.5, h.Percentile(0.8), valueEpsilon) - assert.InEpsilon(t, 4.5, h.Percentile(0.9), valueEpsilon) - assert.InEpsilon(t, 4.5, h.Percentile(1.0), valueEpsilon) + assert.InEpsilon(t, 2, h.Percentile(0.0), valueEpsilon) + assert.InEpsilon(t, 3, h.Percentile(0.2), valueEpsilon) + assert.InEpsilon(t, 2, h.Percentile(0.1), valueEpsilon) + assert.InEpsilon(t, 3, h.Percentile(0.3), valueEpsilon) + assert.InEpsilon(t, 4, h.Percentile(0.4), valueEpsilon) + assert.InEpsilon(t, 4, h.Percentile(0.5), valueEpsilon) + assert.InEpsilon(t, 4, h.Percentile(0.6), valueEpsilon) + assert.InEpsilon(t, 5, h.Percentile(0.7), valueEpsilon) + assert.InEpsilon(t, 5, h.Percentile(0.8), valueEpsilon) + assert.InEpsilon(t, 5, h.Percentile(0.9), valueEpsilon) + assert.InEpsilon(t, 5, h.Percentile(1.0), valueEpsilon) } // Verifies that Merge() works as expected on two sample decaying histograms. diff --git a/vertical-pod-autoscaler/pkg/recommender/util/histogram.go b/vertical-pod-autoscaler/pkg/recommender/util/histogram.go index 18647da26eb9..13b5cad31d3b 100644 --- a/vertical-pod-autoscaler/pkg/recommender/util/histogram.go +++ b/vertical-pod-autoscaler/pkg/recommender/util/histogram.go @@ -21,7 +21,7 @@ import ( "strings" "time" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" ) const ( @@ -84,7 +84,7 @@ func NewHistogram(options HistogramOptions) Histogram { // Simple bucket-based implementation of the Histogram interface. Each bucket // holds the total weight of samples that belong to it. -// Percentile() returns the middle of the correspodning bucket. +// Percentile() returns the upper bound of the corresponding bucket. // Resolution (bucket boundaries) of the histogram depends on the options. // There's no interpolation within buckets (i.e. one sample falls to exactly one // bucket). @@ -110,25 +110,32 @@ func (h *histogram) AddSample(value float64, weight float64, time time.Time) { bucket := h.options.FindBucket(value) h.bucketWeight[bucket] += weight h.totalWeight += weight - if bucket < h.minBucket { + if bucket < h.minBucket && h.bucketWeight[bucket] >= h.options.Epsilon() { h.minBucket = bucket } - if bucket > h.maxBucket { + if bucket > h.maxBucket && h.bucketWeight[bucket] >= h.options.Epsilon() { h.maxBucket = bucket } } +func safeSubtract(value, sub, epsilon float64) float64 { + value -= sub + if value < epsilon { + return 0.0 + } + return value +} + func (h *histogram) SubtractSample(value float64, weight float64, time time.Time) { if weight < 0.0 { panic("sample weight must be non-negative") } bucket := h.options.FindBucket(value) epsilon := h.options.Epsilon() - if weight > h.bucketWeight[bucket]-epsilon { - weight = h.bucketWeight[bucket] - } - h.totalWeight -= weight - h.bucketWeight[bucket] -= weight + + h.totalWeight = safeSubtract(h.totalWeight, weight, epsilon) + h.bucketWeight[bucket] = safeSubtract(h.bucketWeight[bucket], weight, epsilon) + h.updateMinAndMaxBucket() } @@ -162,15 +169,13 @@ func (h *histogram) Percentile(percentile float64) float64 { break } } - bucketStart := h.options.GetBucketStart(bucket) if bucket < h.options.NumBuckets()-1 { - // Return the middle point between the bucket boundaries. - bucketEnd := h.options.GetBucketStart(bucket + 1) - return (bucketStart + bucketEnd) / 2.0 + // Return the end of the bucket. + return h.options.GetBucketStart(bucket + 1) } // Return the start of the last bucket (note that the last bucket // doesn't have an upper bound). - return bucketStart + return h.options.GetBucketStart(bucket) } func (h *histogram) IsEmpty() bool { diff --git a/vertical-pod-autoscaler/pkg/recommender/util/histogram_mock.go b/vertical-pod-autoscaler/pkg/recommender/util/histogram_mock.go index 7521bf858ac1..b3a823088210 100644 --- a/vertical-pod-autoscaler/pkg/recommender/util/histogram_mock.go +++ b/vertical-pod-autoscaler/pkg/recommender/util/histogram_mock.go @@ -20,7 +20,7 @@ import ( "time" "github.com/stretchr/testify/mock" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" ) // MockHistogram is a mock implementation of Histogram interface. diff --git a/vertical-pod-autoscaler/pkg/recommender/util/histogram_test.go b/vertical-pod-autoscaler/pkg/recommender/util/histogram_test.go index 6f1ede26a884..7092f709c429 100644 --- a/vertical-pod-autoscaler/pkg/recommender/util/histogram_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/util/histogram_test.go @@ -21,7 +21,7 @@ import ( "time" "github.com/stretchr/testify/assert" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" ) var ( @@ -51,17 +51,17 @@ func TestPercentiles(t *testing.T) { for i := 1; i <= 4; i++ { h.AddSample(float64(i), float64(i), anyTime) } - assert.InEpsilon(t, 1.5, h.Percentile(0.0), valueEpsilon) - assert.InEpsilon(t, 1.5, h.Percentile(0.1), valueEpsilon) - assert.InEpsilon(t, 2.5, h.Percentile(0.2), valueEpsilon) - assert.InEpsilon(t, 2.5, h.Percentile(0.3), valueEpsilon) - assert.InEpsilon(t, 3.5, h.Percentile(0.4), valueEpsilon) - assert.InEpsilon(t, 3.5, h.Percentile(0.5), valueEpsilon) - assert.InEpsilon(t, 3.5, h.Percentile(0.6), valueEpsilon) - assert.InEpsilon(t, 4.5, h.Percentile(0.7), valueEpsilon) - assert.InEpsilon(t, 4.5, h.Percentile(0.8), valueEpsilon) - assert.InEpsilon(t, 4.5, h.Percentile(0.9), valueEpsilon) - assert.InEpsilon(t, 4.5, h.Percentile(1.0), valueEpsilon) + assert.InEpsilon(t, 2, h.Percentile(0.0), valueEpsilon) + assert.InEpsilon(t, 2, h.Percentile(0.1), valueEpsilon) + assert.InEpsilon(t, 3, h.Percentile(0.2), valueEpsilon) + assert.InEpsilon(t, 3, h.Percentile(0.3), valueEpsilon) + assert.InEpsilon(t, 4, h.Percentile(0.4), valueEpsilon) + assert.InEpsilon(t, 4, h.Percentile(0.5), valueEpsilon) + assert.InEpsilon(t, 4, h.Percentile(0.6), valueEpsilon) + assert.InEpsilon(t, 5, h.Percentile(0.7), valueEpsilon) + assert.InEpsilon(t, 5, h.Percentile(0.8), valueEpsilon) + assert.InEpsilon(t, 5, h.Percentile(0.9), valueEpsilon) + assert.InEpsilon(t, 5, h.Percentile(1.0), valueEpsilon) } // Verifies that querying percentile < 0.0 returns the minimum value in the @@ -71,17 +71,16 @@ func TestPercentileOutOfBounds(t *testing.T) { options, err := NewLinearHistogramOptions(1.0, 0.1, weightEpsilon) assert.Nil(t, err) h := NewHistogram(options) - assert.Nil(t, err) h.AddSample(0.1, 0.1, anyTime) h.AddSample(0.2, 0.2, anyTime) - assert.InEpsilon(t, 0.15, h.Percentile(-0.1), valueEpsilon) - assert.InEpsilon(t, 0.25, h.Percentile(1.1), valueEpsilon) + assert.InEpsilon(t, 0.2, h.Percentile(-0.1), valueEpsilon) + assert.InEpsilon(t, 0.3, h.Percentile(1.1), valueEpsilon) // Fill the boundary buckets. h.AddSample(0.0, 0.1, anyTime) h.AddSample(1.0, 0.2, anyTime) - assert.InEpsilon(t, 0.05, h.Percentile(-0.1), valueEpsilon) + assert.InEpsilon(t, 0.1, h.Percentile(-0.1), valueEpsilon) assert.InEpsilon(t, 1.0, h.Percentile(1.1), valueEpsilon) } @@ -90,7 +89,6 @@ func TestEmptyHistogram(t *testing.T) { options, err := NewLinearHistogramOptions(1.0, 0.1, weightEpsilon) assert.Nil(t, err) h := NewHistogram(options) - assert.Nil(t, err) assert.True(t, h.IsEmpty()) h.AddSample(0.1, weightEpsilon*2.5, anyTime) // Sample weight = epsilon * 2.5. assert.False(t, h.IsEmpty()) @@ -100,6 +98,21 @@ func TestEmptyHistogram(t *testing.T) { assert.True(t, h.IsEmpty()) } +// Verifies that IsEmpty() returns false if we add epsilon-weight sample to a non-empty histogram. +func TestNonEmptyOnEpsilonAddition(t *testing.T) { + options, err := NewLinearHistogramOptions(1.0, 0.1, weightEpsilon) + assert.Nil(t, err) + h := NewHistogram(options) + assert.True(t, h.IsEmpty()) + + h.AddSample(9.9, weightEpsilon*3, anyTime) + assert.False(t, h.IsEmpty()) + h.AddSample(0.1, weightEpsilon*0.3, anyTime) + assert.False(t, h.IsEmpty()) // weight*3 sample should make the histogram non-empty + h.AddSample(999.9, weightEpsilon*0.3, anyTime) + assert.False(t, h.IsEmpty()) +} + // Verifies that Merge() works as expected on two sample histograms. func TestHistogramMerge(t *testing.T) { h1 := NewHistogram(testHistogramOptions) diff --git a/vertical-pod-autoscaler/pkg/updater/Makefile b/vertical-pod-autoscaler/pkg/updater/Makefile index b809b9eb61ff..7645088fd4b1 100644 --- a/vertical-pod-autoscaler/pkg/updater/Makefile +++ b/vertical-pod-autoscaler/pkg/updater/Makefile @@ -1,33 +1,59 @@ all: build -TAG?=v0.0.1 -REGISTRY?=k8s.gcr.io +TAG?=dev +REGISTRY?=staging-k8s.gcr.io FLAGS= ENVVAR= GOOS?=linux +COMPONENT=updater +FULL_COMPONENT=vpa-${COMPONENT} deps: go get github.com/tools/godep build: clean deps $(ENVVAR) GOOS=$(GOOS) godep go build ./... - $(ENVVAR) GOOS=$(GOOS) godep go build -o updater + $(ENVVAR) GOOS=$(GOOS) godep go build -o ${COMPONENT} + +build-binary: clean deps + $(ENVVAR) GOOS=$(GOOS) godep go build -o ${COMPONENT} test-unit: clean deps build $(ENVVAR) godep go test --test.short -race ./... $(FLAGS) -docker: +docker-build: +ifndef REGISTRY + ERR = $(error REGISTRY is undefined) + $(ERR) +endif +ifndef TAG + ERR = $(error TAG is undefined) + $(ERR) +endif + docker build --pull -t ${REGISTRY}/${FULL_COMPONENT}:${TAG} . + +docker-push: ifndef REGISTRY ERR = $(error REGISTRY is undefined) $(ERR) endif - docker build --pull -t ${REGISTRY}/vpa-updater:${TAG} . - docker push ${REGISTRY}/vpa-updater:${TAG} +ifndef TAG + ERR = $(error TAG is undefined) + $(ERR) +endif + docker push ${REGISTRY}/${FULL_COMPONENT}:${TAG} + +docker-builder: + docker build -t vpa-autoscaling-builder ../../builder + +build-in-docker: clean docker-builder + docker run -v `pwd`/../..:/gopath/src/k8s.io/autoscaler/vertical-pod-autoscaler vpa-autoscaling-builder:latest bash -c 'cd /gopath/src/k8s.io/autoscaler/vertical-pod-autoscaler && make build-binary -C pkg/updater' -release: build docker +release: build-in-docker docker-build docker-push + @echo "Full in-docker release ${FULL_COMPONENT}:${TAG} completed" clean: - rm -f updater + rm -f ${COMPONENT} format: test -z "$$(find . -path ./vendor -prune -type f -o -name '*.go' -exec gofmt -s -d {} + | tee /dev/stderr)" || \ diff --git a/vertical-pod-autoscaler/pkg/updater/eviction/pods_eviction_restriction.go b/vertical-pod-autoscaler/pkg/updater/eviction/pods_eviction_restriction.go index aa90b03bdb1f..529dc0a65d7f 100644 --- a/vertical-pod-autoscaler/pkg/updater/eviction/pods_eviction_restriction.go +++ b/vertical-pod-autoscaler/pkg/updater/eviction/pods_eviction_restriction.go @@ -18,13 +18,23 @@ package eviction import ( "fmt" + "time" "github.com/golang/glog" + appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metrics_updater "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics/updater" + appsinformer "k8s.io/client-go/informers/apps/v1" + coreinformer "k8s.io/client-go/informers/core/v1" kube_client "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" +) + +const ( + resyncPeriod time.Duration = 1 * time.Minute ) // PodsEvictionRestriction controls pods evictions. It ensures that we will not evict too @@ -33,7 +43,7 @@ import ( type PodsEvictionRestriction interface { // Evict sends eviction instruction to the api client. // Returns error if pod cannot be evicted or if client returned error. - Evict(pod *apiv1.Pod) error + Evict(pod *apiv1.Pod, eventRecorder record.EventRecorder) error // CanEvict checks if pod can be safely evicted CanEvict(pod *apiv1.Pod) bool } @@ -60,14 +70,26 @@ type PodsEvictionRestrictionFactory interface { type podsEvictionRestrictionFactoryImpl struct { client kube_client.Interface + rcInformer cache.SharedIndexInformer // informer for Replication Controllers + ssInformer cache.SharedIndexInformer // informer for Stateful Sets + rsInformer cache.SharedIndexInformer // informer for Replica Sets minReplicas int evictionToleranceFraction float64 } +type controllerKind string + +const ( + replicationController controllerKind = "ReplicationController" + statefulSet controllerKind = "StatefulSet" + replicaSet controllerKind = "ReplicaSet" + job controllerKind = "Job" +) + type podReplicaCreator struct { Namespace string Name string - Kind string + Kind controllerKind } // CanEvict checks if pod can be safely evicted @@ -96,7 +118,7 @@ func (e *podsEvictionRestrictionImpl) CanEvict(pod *apiv1.Pod) bool { // Evict sends eviction instruction to api client. Returns error if pod cannot be evicted or if client returned error // Does not check if pod was actually evicted after eviction grace period. -func (e *podsEvictionRestrictionImpl) Evict(podToEvict *apiv1.Pod) error { +func (e *podsEvictionRestrictionImpl) Evict(podToEvict *apiv1.Pod, eventRecorder record.EventRecorder) error { cr, present := e.podToReplicaCreatorMap[getPodID(podToEvict)] if !present { return fmt.Errorf("pod not suitable for eviction %v : not in replicated pods map", podToEvict.Name) @@ -114,9 +136,11 @@ func (e *podsEvictionRestrictionImpl) Evict(podToEvict *apiv1.Pod) error { } err := e.client.CoreV1().Pods(podToEvict.Namespace).Evict(eviction) if err != nil { - glog.Errorf("failed to evict pod %s, error: %v", podToEvict.Name, err) + glog.Errorf("failed to evict pod %s/%s, error: %v", podToEvict.Namespace, podToEvict.Name, err) return err } + eventRecorder.Event(podToEvict, apiv1.EventTypeNormal, "EvictedByVPA", + "Pod was evicted by VPA Updater to apply resource recommendation.") metrics_updater.AddEvictedPod() if podToEvict.Status.Phase != apiv1.PodPending { @@ -132,8 +156,27 @@ func (e *podsEvictionRestrictionImpl) Evict(podToEvict *apiv1.Pod) error { } // NewPodsEvictionRestrictionFactory creates PodsEvictionRestrictionFactory -func NewPodsEvictionRestrictionFactory(client kube_client.Interface, minReplicas int, evictionToleranceFraction float64) PodsEvictionRestrictionFactory { - return &podsEvictionRestrictionFactoryImpl{client: client, minReplicas: minReplicas, evictionToleranceFraction: evictionToleranceFraction} +func NewPodsEvictionRestrictionFactory(client kube_client.Interface, minReplicas int, + evictionToleranceFraction float64) (PodsEvictionRestrictionFactory, error) { + rcInformer, err := setUpInformer(client, replicationController) + if err != nil { + return nil, fmt.Errorf("Failed to create rcInformer: %v", err) + } + ssInformer, err := setUpInformer(client, statefulSet) + if err != nil { + return nil, fmt.Errorf("Failed to create ssInformer: %v", err) + } + rsInformer, err := setUpInformer(client, replicaSet) + if err != nil { + return nil, fmt.Errorf("Failed to create rsInformer: %v", err) + } + return &podsEvictionRestrictionFactoryImpl{ + client: client, + rcInformer: rcInformer, // informer for Replication Controllers + ssInformer: ssInformer, // informer for Replica Sets + rsInformer: rsInformer, // informer for Stateful Sets + minReplicas: minReplicas, + evictionToleranceFraction: evictionToleranceFraction}, nil } // NewPodsEvictionRestriction creates PodsEvictionRestriction for a given set of pods. @@ -169,12 +212,12 @@ func (f *podsEvictionRestrictionFactoryImpl) NewPodsEvictionRestriction(pods []* } var configured int - if creator.Kind == "Job" { + if creator.Kind == job { // Job has no replicas configuration, so we will use actual number of live pods as replicas count. configured = actual } else { var err error - configured, err = getReplicaCount(creator, f.client) + configured, err = f.getReplicaCount(creator) if err != nil { glog.Errorf("failed to obtain replication info for %v %v/%v. %v", creator.Kind, creator.Namespace, creator.Name, err) @@ -194,7 +237,10 @@ func (f *podsEvictionRestrictionFactoryImpl) NewPodsEvictionRestriction(pods []* singleGroup.running = len(replicas) - singleGroup.pending creatorToSingleGroupStatsMap[creator] = singleGroup } - return &podsEvictionRestrictionImpl{client: f.client, podToReplicaCreatorMap: podToReplicaCreatorMap, creatorToSingleGroupStatsMap: creatorToSingleGroupStatsMap} + return &podsEvictionRestrictionImpl{ + client: f.client, + podToReplicaCreatorMap: podToReplicaCreatorMap, + creatorToSingleGroupStatsMap: creatorToSingleGroupStatsMap} } func getPodReplicaCreator(pod *apiv1.Pod) (*podReplicaCreator, error) { @@ -205,7 +251,7 @@ func getPodReplicaCreator(pod *apiv1.Pod) (*podReplicaCreator, error) { podReplicaCreator := &podReplicaCreator{ Namespace: pod.Namespace, Name: creator.Name, - Kind: creator.Kind, + Kind: controllerKind(creator.Kind), } return podReplicaCreator, nil } @@ -217,35 +263,54 @@ func getPodID(pod *apiv1.Pod) string { return pod.Namespace + "/" + pod.Name } -func getReplicaCount(creator podReplicaCreator, client kube_client.Interface) (int, error) { +func (f *podsEvictionRestrictionFactoryImpl) getReplicaCount(creator podReplicaCreator) (int, error) { switch creator.Kind { - case "ReplicationController": - rc, err := client.CoreV1().ReplicationControllers(creator.Namespace).Get(creator.Name, metav1.GetOptions{}) - - if err != nil || rc == nil { + case replicationController: + rcObj, exists, err := f.rcInformer.GetStore().GetByKey(creator.Namespace + "/" + creator.Name) + if err != nil { return 0, fmt.Errorf("replication controller %s/%s is not available, err: %v", creator.Namespace, creator.Name, err) } + if !exists { + return 0, fmt.Errorf("replication controller %s/%s does not exist", creator.Namespace, creator.Name) + } + rc, ok := rcObj.(*apiv1.ReplicationController) + if !ok { + return 0, fmt.Errorf("Failed to parse Replication Controller") + } if rc.Spec.Replicas == nil || *rc.Spec.Replicas == 0 { return 0, fmt.Errorf("replication controller %s/%s has no replicas config", creator.Namespace, creator.Name) } return int(*rc.Spec.Replicas), nil - case "ReplicaSet": - rs, err := client.ExtensionsV1beta1().ReplicaSets(creator.Namespace).Get(creator.Name, metav1.GetOptions{}) - - if err != nil || rs == nil { + case replicaSet: + rsObj, exists, err := f.rsInformer.GetStore().GetByKey(creator.Namespace + "/" + creator.Name) + if err != nil { return 0, fmt.Errorf("replica set %s/%s is not available, err: %v", creator.Namespace, creator.Name, err) } + if !exists { + return 0, fmt.Errorf("replica set %s/%s does not exist", creator.Namespace, creator.Name) + } + rs, ok := rsObj.(*appsv1.ReplicaSet) + if !ok { + return 0, fmt.Errorf("Failed to parse Replicaset") + } if rs.Spec.Replicas == nil || *rs.Spec.Replicas == 0 { return 0, fmt.Errorf("replica set %s/%s has no replicas config", creator.Namespace, creator.Name) } return int(*rs.Spec.Replicas), nil - case "StatefulSet": - ss, err := client.AppsV1beta1().StatefulSets(creator.Namespace).Get(creator.Name, metav1.GetOptions{}) - if err != nil || ss == nil { + case statefulSet: + ssObj, exists, err := f.ssInformer.GetStore().GetByKey(creator.Namespace + "/" + creator.Name) + if err != nil { return 0, fmt.Errorf("stateful set %s/%s is not available, err: %v", creator.Namespace, creator.Name, err) } + if !exists { + return 0, fmt.Errorf("stateful set %s/%s does not exist", creator.Namespace, creator.Name) + } + ss, ok := ssObj.(*appsv1.StatefulSet) + if !ok { + return 0, fmt.Errorf("Failed to parse StatefulSet") + } if ss.Spec.Replicas == nil || *ss.Spec.Replicas == 0 { return 0, fmt.Errorf("stateful set %s/%s has no replicas config", creator.Namespace, creator.Name) } @@ -265,3 +330,27 @@ func managingControllerRef(pod *apiv1.Pod) *metav1.OwnerReference { } return &managingController } + +func setUpInformer(kubeClient kube_client.Interface, kind controllerKind) (cache.SharedIndexInformer, error) { + var informer cache.SharedIndexInformer + switch kind { + case replicationController: + informer = coreinformer.NewReplicationControllerInformer(kubeClient, apiv1.NamespaceAll, + resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + case replicaSet: + informer = appsinformer.NewReplicaSetInformer(kubeClient, apiv1.NamespaceAll, + resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + case statefulSet: + informer = appsinformer.NewStatefulSetInformer(kubeClient, apiv1.NamespaceAll, + resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + default: + return nil, fmt.Errorf("Unknown controller kind: %v", kind) + } + stopCh := make(chan struct{}) + go informer.Run(stopCh) + synced := cache.WaitForCacheSync(stopCh, informer.HasSynced) + if !synced { + return nil, fmt.Errorf("Failed to sync %v cache.", kind) + } + return informer, nil +} diff --git a/vertical-pod-autoscaler/pkg/updater/eviction/pods_eviction_restriction_test.go b/vertical-pod-autoscaler/pkg/updater/eviction/pods_eviction_restriction_test.go index 7a38607b3950..94364f8a9a21 100644 --- a/vertical-pod-autoscaler/pkg/updater/eviction/pods_eviction_restriction_test.go +++ b/vertical-pod-autoscaler/pkg/updater/eviction/pods_eviction_restriction_test.go @@ -19,18 +19,18 @@ package eviction import ( "fmt" "testing" + "time" "github.com/stretchr/testify/assert" - appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" apiv1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" - kube_client "k8s.io/client-go/kubernetes" + appsinformer "k8s.io/client-go/informers/apps/v1" + coreinformer "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes/fake" - core "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/api/testapi" ) @@ -199,7 +199,7 @@ func TestEvictReplicatedByController(t *testing.T) { }, }, { - // Pedning pods are always evictable + // Pending pods are always evictable replicas: 4, evictionTollerance: 0.5, pods: []podWithExpectations{ @@ -235,12 +235,13 @@ func TestEvictReplicatedByController(t *testing.T) { for _, p := range testCase.pods { pods = append(pods, p.pod) } - eviction := NewPodsEvictionRestrictionFactory(fakeClient(&rc, nil, nil, nil, pods), 2, testCase.evictionTollerance).NewPodsEvictionRestriction(pods) + factory, _ := getEvictionRestrictionFactory(&rc, nil, nil, 2, testCase.evictionTollerance) + eviction := factory.NewPodsEvictionRestriction(pods) for i, p := range testCase.pods { assert.Equalf(t, p.canEvict, eviction.CanEvict(p.pod), "TC %v - unexpected CanEvict result for pod-%v %#v", tcIndex, i, p.pod) } for i, p := range testCase.pods { - err := eviction.Evict(p.pod) + err := eviction.Evict(p.pod, test.FakeEventRecorder()) if p.evictionSuccess { assert.NoErrorf(t, err, "TC %v - unexpected Evict result for pod-%v %#v", tcIndex, i, p.pod) } else { @@ -255,7 +256,7 @@ func TestEvictReplicatedByReplicaSet(t *testing.T) { replicas := int32(5) livePods := 5 - rs := extensions.ReplicaSet{ + rs := appsv1.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: "rs", Namespace: "default", @@ -264,28 +265,29 @@ func TestEvictReplicatedByReplicaSet(t *testing.T) { TypeMeta: metav1.TypeMeta{ Kind: "ReplicaSet", }, - Spec: extensions.ReplicaSetSpec{ + Spec: appsv1.ReplicaSetSpec{ Replicas: &replicas, }, } pods := make([]*apiv1.Pod, livePods) for i := range pods { - pods[i] = test.Pod().WithName("test"+string(i)).WithCreator(&rs.ObjectMeta, &rs.TypeMeta).Get() + pods[i] = test.Pod().WithName(getTestPodName(i)).WithCreator(&rs.ObjectMeta, &rs.TypeMeta).Get() } - eviction := NewPodsEvictionRestrictionFactory(fakeClient(nil, &rs, nil, nil, pods), 2, 0.5).NewPodsEvictionRestriction(pods) + factory, _ := getEvictionRestrictionFactory(nil, &rs, nil, 2, 0.5) + eviction := factory.NewPodsEvictionRestriction(pods) for _, pod := range pods { assert.True(t, eviction.CanEvict(pod)) } for _, pod := range pods[:2] { - err := eviction.Evict(pod) + err := eviction.Evict(pod, test.FakeEventRecorder()) assert.Nil(t, err, "Should evict with no error") } for _, pod := range pods[2:] { - err := eviction.Evict(pod) + err := eviction.Evict(pod, test.FakeEventRecorder()) assert.Error(t, err, "Error expected") } } @@ -294,37 +296,38 @@ func TestEvictReplicatedByStatefulSet(t *testing.T) { replicas := int32(5) livePods := 5 - ss := appsv1beta1.StatefulSet{ + ss := appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: "ss", Namespace: "default", - SelfLink: "/apiv1s/extensions/v1beta1/namespaces/default/statefulsets/ss", + SelfLink: "/apiv1s/apps/v1/namespaces/default/statefulsets/ss", }, TypeMeta: metav1.TypeMeta{ Kind: "StatefulSet", }, - Spec: appsv1beta1.StatefulSetSpec{ + Spec: appsv1.StatefulSetSpec{ Replicas: &replicas, }, } pods := make([]*apiv1.Pod, livePods) for i := range pods { - pods[i] = test.Pod().WithName("test"+string(i)).WithCreator(&ss.ObjectMeta, &ss.TypeMeta).Get() + pods[i] = test.Pod().WithName(getTestPodName(i)).WithCreator(&ss.ObjectMeta, &ss.TypeMeta).Get() } - eviction := NewPodsEvictionRestrictionFactory(fakeClient(nil, nil, &ss, nil, pods), 2, 0.5).NewPodsEvictionRestriction(pods) + factory, _ := getEvictionRestrictionFactory(nil, nil, &ss, 2, 0.5) + eviction := factory.NewPodsEvictionRestriction(pods) for _, pod := range pods { assert.True(t, eviction.CanEvict(pod)) } for _, pod := range pods[:2] { - err := eviction.Evict(pod) + err := eviction.Evict(pod, test.FakeEventRecorder()) assert.Nil(t, err, "Should evict with no error") } for _, pod := range pods[2:] { - err := eviction.Evict(pod) + err := eviction.Evict(pod, test.FakeEventRecorder()) assert.Error(t, err, "Error expected") } } @@ -334,7 +337,7 @@ func TestEvictReplicatedByJob(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "job", Namespace: "default", - SelfLink: "/apiv1s/extensions/v1beta1/namespaces/default/jobs/job", + SelfLink: "/apiv1s/apps/v1/namespaces/default/jobs/job", }, TypeMeta: metav1.TypeMeta{ Kind: "Job", @@ -345,21 +348,22 @@ func TestEvictReplicatedByJob(t *testing.T) { pods := make([]*apiv1.Pod, livePods) for i := range pods { - pods[i] = test.Pod().WithName("test"+string(i)).WithCreator(&job.ObjectMeta, &job.TypeMeta).Get() + pods[i] = test.Pod().WithName(getTestPodName(i)).WithCreator(&job.ObjectMeta, &job.TypeMeta).Get() } - eviction := NewPodsEvictionRestrictionFactory(fakeClient(nil, nil, nil, &job, pods), 2, 0.5).NewPodsEvictionRestriction(pods) + factory, _ := getEvictionRestrictionFactory(nil, nil, nil, 2, 0.5) + eviction := factory.NewPodsEvictionRestriction(pods) for _, pod := range pods { assert.True(t, eviction.CanEvict(pod)) } for _, pod := range pods[:2] { - err := eviction.Evict(pod) + err := eviction.Evict(pod, test.FakeEventRecorder()) assert.Nil(t, err, "Should evict with no error") } for _, pod := range pods[2:] { - err := eviction.Evict(pod) + err := eviction.Evict(pod, test.FakeEventRecorder()) assert.Error(t, err, "Error expected") } } @@ -384,17 +388,18 @@ func TestEvictTooFewReplicas(t *testing.T) { pods := make([]*apiv1.Pod, livePods) for i := range pods { - pods[i] = test.Pod().WithName("test"+string(i)).WithCreator(&rc.ObjectMeta, &rc.TypeMeta).Get() + pods[i] = test.Pod().WithName(getTestPodName(i)).WithCreator(&rc.ObjectMeta, &rc.TypeMeta).Get() } - eviction := NewPodsEvictionRestrictionFactory(fakeClient(&rc, nil, nil, nil, pods), 10, 0.5).NewPodsEvictionRestriction(pods) + factory, _ := getEvictionRestrictionFactory(&rc, nil, nil, 10, 0.5) + eviction := factory.NewPodsEvictionRestriction(pods) for _, pod := range pods { assert.False(t, eviction.CanEvict(pod)) } for _, pod := range pods { - err := eviction.Evict(pod) + err := eviction.Evict(pod, test.FakeEventRecorder()) assert.Error(t, err, "Error expected") } } @@ -420,21 +425,22 @@ func TestEvictionTolerance(t *testing.T) { pods := make([]*apiv1.Pod, livePods) for i := range pods { - pods[i] = test.Pod().WithName("test"+string(i)).WithCreator(&rc.ObjectMeta, &rc.TypeMeta).Get() + pods[i] = test.Pod().WithName(getTestPodName(i)).WithCreator(&rc.ObjectMeta, &rc.TypeMeta).Get() } - eviction := NewPodsEvictionRestrictionFactory(fakeClient(&rc, nil, nil, nil, pods), 2 /*minReplicas*/, tolerance).NewPodsEvictionRestriction(pods) + factory, _ := getEvictionRestrictionFactory(&rc, nil, nil, 2 /*minReplicas*/, tolerance) + eviction := factory.NewPodsEvictionRestriction(pods) for _, pod := range pods { assert.True(t, eviction.CanEvict(pod)) } for _, pod := range pods[:4] { - err := eviction.Evict(pod) + err := eviction.Evict(pod, test.FakeEventRecorder()) assert.Nil(t, err, "Should evict with no error") } for _, pod := range pods[4:] { - err := eviction.Evict(pod) + err := eviction.Evict(pod, test.FakeEventRecorder()) assert.Error(t, err, "Error expected") } } @@ -460,47 +466,64 @@ func TestEvictAtLeastOne(t *testing.T) { pods := make([]*apiv1.Pod, livePods) for i := range pods { - pods[i] = test.Pod().WithName("test"+string(i)).WithCreator(&rc.ObjectMeta, &rc.TypeMeta).Get() + pods[i] = test.Pod().WithName(getTestPodName(i)).WithCreator(&rc.ObjectMeta, &rc.TypeMeta).Get() } - eviction := NewPodsEvictionRestrictionFactory(fakeClient(&rc, nil, nil, nil, pods), 2, tolerance).NewPodsEvictionRestriction(pods) + factory, _ := getEvictionRestrictionFactory(&rc, nil, nil, 2, tolerance) + eviction := factory.NewPodsEvictionRestriction(pods) for _, pod := range pods { assert.True(t, eviction.CanEvict(pod)) } for _, pod := range pods[:1] { - err := eviction.Evict(pod) + err := eviction.Evict(pod, test.FakeEventRecorder()) assert.Nil(t, err, "Should evict with no error") } for _, pod := range pods[1:] { - err := eviction.Evict(pod) + err := eviction.Evict(pod, test.FakeEventRecorder()) assert.Error(t, err, "Error expected") } } -func fakeClient(rc *apiv1.ReplicationController, rs *extensions.ReplicaSet, ss *appsv1beta1.StatefulSet, job *batchv1.Job, pods []*apiv1.Pod) kube_client.Interface { - fakeClient := &fake.Clientset{} - register := func(resource string, obj runtime.Object, meta metav1.ObjectMeta) { - fakeClient.Fake.AddReactor("get", resource, func(action core.Action) (bool, runtime.Object, error) { - getAction := action.(core.GetAction) - if getAction.GetName() == meta.GetName() && getAction.GetNamespace() == meta.GetNamespace() { - return true, obj, nil - } - return false, nil, fmt.Errorf("Not found") - }) - } +func getEvictionRestrictionFactory(rc *apiv1.ReplicationController, rs *appsv1.ReplicaSet, + ss *appsv1.StatefulSet, minReplicas int, + evictionToleranceFraction float64) (PodsEvictionRestrictionFactory, error) { + kubeClient := &fake.Clientset{} + rcInformer := coreinformer.NewReplicationControllerInformer(kubeClient, apiv1.NamespaceAll, + 0*time.Second, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + rsInformer := appsinformer.NewReplicaSetInformer(kubeClient, apiv1.NamespaceAll, + 0*time.Second, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + ssInformer := appsinformer.NewStatefulSetInformer(kubeClient, apiv1.NamespaceAll, + 0*time.Second, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) if rc != nil { - register("replicationcontrollers", rc, rc.ObjectMeta) + err := rcInformer.GetIndexer().Add(rc) + if err != nil { + return nil, fmt.Errorf("Error adding object to cache: %v", err) + } } if rs != nil { - register("replicasets", rs, rs.ObjectMeta) + err := rsInformer.GetIndexer().Add(rs) + if err != nil { + return nil, fmt.Errorf("Error adding object to cache: %v", err) + } } if ss != nil { - register("statefulsets", ss, ss.ObjectMeta) - } - if job != nil { - register("jobs", job, job.ObjectMeta) + err := ssInformer.GetIndexer().Add(ss) + if err != nil { + return nil, fmt.Errorf("Error adding object to cache: %v", err) + } } - return fakeClient + return &podsEvictionRestrictionFactoryImpl{ + client: kubeClient, + rsInformer: rsInformer, + rcInformer: rcInformer, + ssInformer: ssInformer, + minReplicas: minReplicas, + evictionToleranceFraction: evictionToleranceFraction, + }, nil +} + +func getTestPodName(index int) string { + return fmt.Sprintf("test-%v", index) } diff --git a/vertical-pod-autoscaler/pkg/updater/logic/updater.go b/vertical-pod-autoscaler/pkg/updater/logic/updater.go index 431ab615cb77..f0d5ed799f96 100644 --- a/vertical-pod-autoscaler/pkg/updater/logic/updater.go +++ b/vertical-pod-autoscaler/pkg/updater/logic/updater.go @@ -17,6 +17,7 @@ limitations under the License. package logic import ( + "fmt" "time" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/eviction" @@ -25,14 +26,18 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" - vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1" + vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1" metrics_updater "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics/updater" vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" kube_client "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/kubernetes/scheme" + clientv1 "k8s.io/client-go/kubernetes/typed/core/v1" v1lister "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" "github.com/golang/glog" ) @@ -46,20 +51,26 @@ type Updater interface { type updater struct { vpaLister vpa_lister.VerticalPodAutoscalerLister podLister v1lister.PodLister + eventRecorder record.EventRecorder evictionFactory eviction.PodsEvictionRestrictionFactory recommendationProcessor vpa_api_util.RecommendationProcessor evictionAdmission priority.PodEvictionAdmission } // NewUpdater creates Updater with given configuration -func NewUpdater(kubeClient kube_client.Interface, vpaClient *vpa_clientset.Clientset, minReplicasForEvicition int, evictionToleranceFraction float64, recommendationProcessor vpa_api_util.RecommendationProcessor, evictionAdmission priority.PodEvictionAdmission) Updater { +func NewUpdater(kubeClient kube_client.Interface, vpaClient *vpa_clientset.Clientset, minReplicasForEvicition int, evictionToleranceFraction float64, recommendationProcessor vpa_api_util.RecommendationProcessor, evictionAdmission priority.PodEvictionAdmission) (Updater, error) { + factory, err := eviction.NewPodsEvictionRestrictionFactory(kubeClient, minReplicasForEvicition, evictionToleranceFraction) + if err != nil { + return nil, fmt.Errorf("Failed to create eviction restriction factory: %v", err) + } return &updater{ vpaLister: vpa_api_util.NewAllVpasLister(vpaClient, make(chan struct{})), podLister: newPodLister(kubeClient), - evictionFactory: eviction.NewPodsEvictionRestrictionFactory(kubeClient, minReplicasForEvicition, evictionToleranceFraction), + eventRecorder: newEventRecorder(kubeClient), + evictionFactory: factory, recommendationProcessor: recommendationProcessor, evictionAdmission: evictionAdmission, - } + }, nil } // RunOnce represents single iteration in the main-loop of Updater @@ -124,7 +135,7 @@ func (u *updater) RunOnce() { continue } glog.V(2).Infof("evicting pod %v", pod.Name) - evictErr := evictionLimiter.Evict(pod) + evictErr := evictionLimiter.Evict(pod, u.eventRecorder) if evictErr != nil { glog.Warningf("evicting pod %v failed: %v", pod.Name, evictErr) } @@ -178,3 +189,12 @@ func newPodLister(kubeClient kube_client.Interface) v1lister.PodLister { return podLister } + +func newEventRecorder(kubeClient kube_client.Interface) record.EventRecorder { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(glog.V(4).Infof) + if _, isFake := kubeClient.(*fake.Clientset); !isFake { + eventBroadcaster.StartRecordingToSink(&clientv1.EventSinkImpl{Interface: clientv1.New(kubeClient.CoreV1().RESTClient()).Events("")}) + } + return eventBroadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{Component: "vpa-updater"}) +} diff --git a/vertical-pod-autoscaler/pkg/updater/logic/updater_test.go b/vertical-pod-autoscaler/pkg/updater/logic/updater_test.go index 51a0a26c5433..d0668b93726d 100644 --- a/vertical-pod-autoscaler/pkg/updater/logic/updater_test.go +++ b/vertical-pod-autoscaler/pkg/updater/logic/updater_test.go @@ -22,7 +22,7 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/eviction" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" "k8s.io/kubernetes/pkg/api/testapi" @@ -52,7 +52,7 @@ func TestRunOnce(t *testing.T) { pods[i].Labels = labels eviction.On("CanEvict", pods[i]).Return(true) - eviction.On("Evict", pods[i]).Return(nil) + eviction.On("Evict", pods[i], nil).Return(nil) } factory := &fakeEvictFactory{eviction} @@ -94,7 +94,7 @@ func TestVPAOff(t *testing.T) { pods[i] = test.Pod().WithName("test_" + strconv.Itoa(i)).AddContainer(test.BuildTestContainer(containerName, "1", "100M")).Get() pods[i].Labels = labels eviction.On("CanEvict", pods[i]).Return(true) - eviction.On("Evict", pods[i]).Return(nil) + eviction.On("Evict", pods[i], nil).Return(nil) } factory := &fakeEvictFactory{eviction} diff --git a/vertical-pod-autoscaler/pkg/updater/main.go b/vertical-pod-autoscaler/pkg/updater/main.go index 18b95ece489b..95803df0795d 100644 --- a/vertical-pod-autoscaler/pkg/updater/main.go +++ b/vertical-pod-autoscaler/pkg/updater/main.go @@ -18,6 +18,8 @@ package main import ( "flag" + "time" + "github.com/golang/glog" kube_flag "k8s.io/apiserver/pkg/util/flag" "k8s.io/autoscaler/vertical-pod-autoscaler/common" @@ -28,7 +30,6 @@ import ( vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" kube_client "k8s.io/client-go/kubernetes" kube_restclient "k8s.io/client-go/rest" - "time" ) var ( @@ -48,18 +49,19 @@ func main() { kube_flag.InitFlags() glog.V(1).Infof("Vertical Pod Autoscaler %s Updater", common.VerticalPodAutoscalerVersion) - metrics.Initialize(*address) + healthCheck := metrics.NewHealthCheck(*updaterInterval*5, true) + metrics.Initialize(*address, healthCheck) metrics_updater.Register() kubeClient, vpaClient := createKubeClients() - updater := updater.NewUpdater(kubeClient, vpaClient, *minReplicas, *evictionToleranceFraction, vpa_api_util.NewCappingRecommendationProcessor(), nil) - for { - select { - case <-time.After(*updaterInterval): - { - updater.RunOnce() - } - } + updater, err := updater.NewUpdater(kubeClient, vpaClient, *minReplicas, *evictionToleranceFraction, vpa_api_util.NewCappingRecommendationProcessor(), nil) + if err != nil { + glog.Fatalf("Failed to create updater: %v", err) + } + ticker := time.Tick(*updaterInterval) + for range ticker { + updater.RunOnce() + healthCheck.UpdateLastActivity() } } diff --git a/vertical-pod-autoscaler/pkg/updater/priority/pod_eviction_admission.go b/vertical-pod-autoscaler/pkg/updater/priority/pod_eviction_admission.go index 3460853ddf98..b561ff1e5f09 100644 --- a/vertical-pod-autoscaler/pkg/updater/priority/pod_eviction_admission.go +++ b/vertical-pod-autoscaler/pkg/updater/priority/pod_eviction_admission.go @@ -18,7 +18,7 @@ package priority import ( apiv1 "k8s.io/api/core/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" ) // PodEvictionAdmission controls evictions of pods. diff --git a/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator.go b/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator.go index a9c93a858ff5..8f41c62af6a7 100644 --- a/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator.go +++ b/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator.go @@ -24,7 +24,7 @@ import ( "github.com/golang/glog" apiv1 "k8s.io/api/core/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" ) @@ -177,7 +177,7 @@ func (calc *UpdatePriorityCalculator) getUpdatePriority(pod *apiv1.Pod, recommen resourceDiff += math.Abs(totalRequest-float64(totalRecommended)) / totalRequest } return podPriority{ - pod: pod, + pod: pod, outsideRecommendedRange: outsideRecommendedRange, scaleUp: scaleUp, resourceDiff: resourceDiff, diff --git a/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator_test.go b/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator_test.go index b24b638f0fee..dfa94ca4cc84 100644 --- a/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator_test.go +++ b/vertical-pod-autoscaler/pkg/updater/priority/update_priority_calculator_test.go @@ -20,7 +20,7 @@ import ( "testing" "time" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" apiv1 "k8s.io/api/core/v1" diff --git a/vertical-pod-autoscaler/pkg/utils/metrics/healthcheck.go b/vertical-pod-autoscaler/pkg/utils/metrics/healthcheck.go new file mode 100644 index 000000000000..e72f5c9265d4 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/utils/metrics/healthcheck.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "net/http" + "sync" + "time" +) + +// HealthCheck contains information about last activity time of the monitored component. +// NOTE: This started as a simplified version of ClusterAutoscaler's HealthCheck. +type HealthCheck struct { + activityTimeout time.Duration + checkTimeout bool + lastActivity time.Time + mutex *sync.Mutex +} + +// NewHealthCheck builds new HealthCheck object with given timeout. +func NewHealthCheck(activityTimeout time.Duration, checkTimeout bool) *HealthCheck { + return &HealthCheck{ + activityTimeout: activityTimeout, + checkTimeout: checkTimeout, + lastActivity: time.Now(), + mutex: &sync.Mutex{}, + } +} + +// checkLastActivity returns true if the last activity was too long ago, with duration from it. +func (hc *HealthCheck) checkLastActivity() (bool, time.Duration) { + hc.mutex.Lock() + defer hc.mutex.Unlock() + + now := time.Now() + lastActivity := hc.lastActivity + activityTimedOut := now.After(lastActivity.Add(hc.activityTimeout)) + timedOut := hc.checkTimeout && activityTimedOut + + return timedOut, now.Sub(lastActivity) +} + +// ServeHTTP implements http.Handler interface to provide a health-check endpoint. +func (hc *HealthCheck) ServeHTTP(w http.ResponseWriter, r *http.Request) { + timedOut, ago := hc.checkLastActivity() + if timedOut { + w.WriteHeader(500) + w.Write([]byte(fmt.Sprintf("Error: last activity more than %v ago", ago))) + } else { + w.WriteHeader(200) + w.Write([]byte("OK")) + } +} + +// UpdateLastActivity updates last time of activity to now +func (hc *HealthCheck) UpdateLastActivity() { + hc.mutex.Lock() + defer hc.mutex.Unlock() + + hc.lastActivity = time.Now() +} diff --git a/vertical-pod-autoscaler/pkg/utils/metrics/metrics.go b/vertical-pod-autoscaler/pkg/utils/metrics/metrics.go index 682334970b6a..1dd89e3b7214 100644 --- a/vertical-pod-autoscaler/pkg/utils/metrics/metrics.go +++ b/vertical-pod-autoscaler/pkg/utils/metrics/metrics.go @@ -41,10 +41,13 @@ const ( TopMetricsNamespace = "vpa_" ) -// Initialize sets up Prometheus to expose metrics on the given address -func Initialize(address string) { +// Initialize sets up Prometheus to expose metrics & (optionally) health-check on the given address +func Initialize(address string, healthCheck *HealthCheck) { go func() { http.Handle("/metrics", promhttp.Handler()) + if healthCheck != nil { + http.Handle("/health-check", healthCheck) + } err := http.ListenAndServe(address, nil) glog.Fatalf("Failed to start metrics: %v", err) }() @@ -79,7 +82,8 @@ func CreateExecutionTimeMetric(namespace string, help string) *prometheus.Histog Namespace: namespace, Name: "execution_latency_seconds", Help: help, - Buckets: []float64{0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 30.0, 60.0, 120.0, 300.0}, + Buckets: []float64{0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, + 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 120.0, 150.0, 240.0, 300.0}, }, []string{"step"}, ) } diff --git a/vertical-pod-autoscaler/pkg/utils/metrics/recommender/recommender.go b/vertical-pod-autoscaler/pkg/utils/metrics/recommender/recommender.go index 7234c58f981b..4c834adcb8c2 100644 --- a/vertical-pod-autoscaler/pkg/utils/metrics/recommender/recommender.go +++ b/vertical-pod-autoscaler/pkg/utils/metrics/recommender/recommender.go @@ -22,7 +22,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics" ) @@ -32,7 +32,7 @@ const ( ) var ( - modes = []string{string(v1alpha1.UpdateModeOff), string(v1alpha1.UpdateModeInitial), string(v1alpha1.UpdateModeRecreate), string(v1alpha1.UpdateModeAuto)} + modes = []string{string(vpa_types.UpdateModeOff), string(vpa_types.UpdateModeInitial), string(vpa_types.UpdateModeRecreate), string(vpa_types.UpdateModeAuto)} ) var ( @@ -49,7 +49,7 @@ var ( Namespace: metricsNamespace, Name: "recommendation_latency_seconds", Help: "Time elapsed from creating a valid VPA configuration to the first recommendation.", - Buckets: []float64{10.0, 20.0, 30.0, 40.00, 50.0, 60.0, 90.0, 120.0, 150.0, 180.0, 240.0, 300.0, 600.0, 900.0, 1800.0}, + Buckets: []float64{1.0, 2.0, 5.0, 7.5, 10.0, 20.0, 30.0, 40.00, 50.0, 60.0, 90.0, 120.0, 150.0, 180.0, 240.0, 300.0, 600.0, 900.0, 1800.0}, }, ) diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_recommendation.go b/vertical-pod-autoscaler/pkg/utils/test/test_recommendation.go index bf3c239ae13e..79b7df720a84 100644 --- a/vertical-pod-autoscaler/pkg/utils/test/test_recommendation.go +++ b/vertical-pod-autoscaler/pkg/utils/test/test_recommendation.go @@ -18,7 +18,7 @@ package test import ( apiv1 "k8s.io/api/core/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" ) // RecommendationBuilder helps building test instances of RecommendedPodResources. diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_utils.go b/vertical-pod-autoscaler/pkg/utils/test/test_utils.go index 24deb2593f2a..6bbac9c80b50 100644 --- a/vertical-pod-autoscaler/pkg/utils/test/test_utils.go +++ b/vertical-pod-autoscaler/pkg/utils/test/test_utils.go @@ -23,10 +23,13 @@ import ( "github.com/stretchr/testify/mock" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" - vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1" + "k8s.io/apimachinery/pkg/runtime" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" + vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1" v1 "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/record" ) var ( @@ -125,8 +128,8 @@ type PodsEvictionRestrictionMock struct { } // Evict is a mock implementation of PodsEvictionRestriction.Evict -func (m *PodsEvictionRestrictionMock) Evict(pod *apiv1.Pod) error { - args := m.Called(pod) +func (m *PodsEvictionRestrictionMock) Evict(pod *apiv1.Pod, eventRecorder record.EventRecorder) error { + args := m.Called(pod, eventRecorder) return args.Error(0) } @@ -229,3 +232,26 @@ func (f *FakeRecommendationProcessor) Apply(podRecommendation *vpa_types.Recomme pod *apiv1.Pod) (*vpa_types.RecommendedPodResources, map[string][]string, error) { return podRecommendation, nil, nil } + +// fakeEventRecorder is a dummy implementation of record.EventRecorder. +type fakeEventRecorder struct{} + +// Event is a dummy implementation of record.EventRecorder interface. +func (f *fakeEventRecorder) Event(object runtime.Object, eventtype, reason, message string) {} + +// Eventf is a dummy implementation of record.EventRecorder interface. +func (f *fakeEventRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { +} + +// PastEventf is a dummy implementation of record.EventRecorder interface. +func (f *fakeEventRecorder) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { +} + +// AnnotatedEventf is a dummy implementation of record.EventRecorder interface. +func (f *fakeEventRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { +} + +// FakeEventRecorder returns a dummy implementation of record.EventRecorder. +func FakeEventRecorder() record.EventRecorder { + return &fakeEventRecorder{} +} diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_vpa.go b/vertical-pod-autoscaler/pkg/utils/test/test_vpa.go index 207a8d33f160..3310e7ddc4db 100644 --- a/vertical-pod-autoscaler/pkg/utils/test/test_vpa.go +++ b/vertical-pod-autoscaler/pkg/utils/test/test_vpa.go @@ -21,7 +21,7 @@ import ( core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" ) // VerticalPodAutoscalerBuilder helps building test instances of VerticalPodAutoscaler. diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/api.go b/vertical-pod-autoscaler/pkg/utils/vpa/api.go index 405442557b65..86723e32c243 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/api.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/api.go @@ -29,10 +29,10 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" - vpa_api "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/poc.autoscaling.k8s.io/v1alpha1" - vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/poc.autoscaling.k8s.io/v1alpha1" + vpa_api "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1beta1" + vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model" "k8s.io/client-go/tools/cache" ) @@ -79,7 +79,7 @@ func UpdateVpaStatusIfNeeded(vpaClient vpa_api.VerticalPodAutoscalerInterface, v // NewAllVpasLister returns VerticalPodAutoscalerLister configured to fetch all VPA objects. // The method blocks until vpaLister is initially populated. func NewAllVpasLister(vpaClient *vpa_clientset.Clientset, stopChannel <-chan struct{}) vpa_lister.VerticalPodAutoscalerLister { - vpaListWatch := cache.NewListWatchFromClient(vpaClient.PocV1alpha1().RESTClient(), "verticalpodautoscalers", core.NamespaceAll, fields.Everything()) + vpaListWatch := cache.NewListWatchFromClient(vpaClient.AutoscalingV1beta1().RESTClient(), "verticalpodautoscalers", core.NamespaceAll, fields.Everything()) indexer, controller := cache.NewIndexerInformer(vpaListWatch, &vpa_types.VerticalPodAutoscaler{}, 1*time.Hour, diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go index 00ffc4096523..26df5d201c6e 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go @@ -24,7 +24,7 @@ import ( core "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" vpa_fake "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/fake" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" @@ -90,7 +90,7 @@ func TestUpdateVpaIfNeeded(t *testing.T) { for _, tc := range testCases { t.Run(tc.caseName, func(t *testing.T) { fakeClient := vpa_fake.NewSimpleClientset() - _, err := UpdateVpaStatusIfNeeded(fakeClient.PocV1alpha1().VerticalPodAutoscalers(tc.vpa.ID.Namespace), + _, err := UpdateVpaStatusIfNeeded(fakeClient.AutoscalingV1beta1().VerticalPodAutoscalers(tc.vpa.ID.Namespace), tc.vpa, tc.observedStatus) assert.NoError(t, err, "Unexpected error occurred.") actions := fakeClient.Actions() diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index b3e106880497..61f88723c51d 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -19,9 +19,11 @@ package api import ( "fmt" - "github.com/golang/glog" apiv1 "k8s.io/api/core/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + "k8s.io/apimachinery/pkg/api/resource" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" + + "github.com/golang/glog" ) // NewCappingRecommendationProcessor constructs new RecommendationsProcessor that adjusts recommendation @@ -30,7 +32,7 @@ func NewCappingRecommendationProcessor() RecommendationProcessor { return &cappingRecommendationProcessor{} } -type cappingAction = string +type cappingAction string var ( cappedToMinAllowed cappingAction = "capped to minAllowed" @@ -118,7 +120,7 @@ func getCappedRecommendationForContainer( } // capRecommendationToContainerLimit makes sure recommendation is not above current limit for the container. -// If this function makes adjustments approporiate annotations are returned. +// If this function makes adjustments appropriate annotations are returned. func capRecommendationToContainerLimit(recommendation apiv1.ResourceList, container apiv1.Container) []string { annotations := make([]string, 0) // Iterate over limits set in the container. Unset means Infinite limit. @@ -139,20 +141,90 @@ func applyVPAPolicy(recommendation apiv1.ResourceList, policy *vpa_types.Contain } annotations := make([]string, 0) for resourceName, recommended := range recommendation { - min, found := policy.MinAllowed[resourceName] - if found && !min.IsZero() && recommended.MilliValue() < min.MilliValue() { - recommendation[resourceName] = min + cappedToMin, isCapped := maybeCapToMin(recommended, resourceName, policy) + recommendation[resourceName] = cappedToMin + if isCapped { annotations = append(annotations, toCappingAnnotation(resourceName, cappedToMinAllowed)) } - max, found := policy.MaxAllowed[resourceName] - if found && !max.IsZero() && recommended.MilliValue() > max.MilliValue() { - recommendation[resourceName] = max + cappedToMax, isCapped := maybeCapToMax(cappedToMin, resourceName, policy) + recommendation[resourceName] = cappedToMax + if isCapped { annotations = append(annotations, toCappingAnnotation(resourceName, cappedToMaxAllowed)) } } return annotations } +func applyVPAPolicyForContainer(containerName string, + containerRecommendation *vpa_types.RecommendedContainerResources, + policy *vpa_types.PodResourcePolicy) (*vpa_types.RecommendedContainerResources, error) { + if containerRecommendation == nil { + return nil, fmt.Errorf("no recommendation available for container name %v", containerName) + } + cappedRecommendations := containerRecommendation.DeepCopy() + // containerPolicy can be nil (user does not have to configure it). + containerPolicy := GetContainerResourcePolicy(containerName, policy) + if containerPolicy == nil { + return cappedRecommendations, nil + } + + process := func(recommendation apiv1.ResourceList) { + for resourceName, recommended := range recommendation { + cappedToMin, _ := maybeCapToMin(recommended, resourceName, containerPolicy) + recommendation[resourceName] = cappedToMin + cappedToMax, _ := maybeCapToMax(cappedToMin, resourceName, containerPolicy) + recommendation[resourceName] = cappedToMax + } + } + + process(cappedRecommendations.Target) + process(cappedRecommendations.LowerBound) + process(cappedRecommendations.UpperBound) + + return cappedRecommendations, nil +} + +func maybeCapToMin(recommended resource.Quantity, resourceName apiv1.ResourceName, + containerPolicy *vpa_types.ContainerResourcePolicy) (resource.Quantity, bool) { + min, found := containerPolicy.MinAllowed[resourceName] + if found && !min.IsZero() && recommended.Cmp(min) < 0 { + return min, true + } + return recommended, false +} + +func maybeCapToMax(recommended resource.Quantity, resourceName apiv1.ResourceName, + containerPolicy *vpa_types.ContainerResourcePolicy) (resource.Quantity, bool) { + max, found := containerPolicy.MaxAllowed[resourceName] + if found && !max.IsZero() && recommended.Cmp(max) > 0 { + return max, true + } + return recommended, false +} + +// ApplyVPAPolicy returns a recommendation, adjusted to obey policy. +func ApplyVPAPolicy(podRecommendation *vpa_types.RecommendedPodResources, + policy *vpa_types.PodResourcePolicy) (*vpa_types.RecommendedPodResources, error) { + if podRecommendation == nil { + return nil, nil + } + if policy == nil { + return podRecommendation, nil + } + + updatedRecommendations := []vpa_types.RecommendedContainerResources{} + for _, containerRecommendation := range podRecommendation.ContainerRecommendations { + containerName := containerRecommendation.ContainerName + updatedContainerResources, err := applyVPAPolicyForContainer(containerName, + &containerRecommendation, policy) + if err != nil { + return nil, fmt.Errorf("cannot apply policy on recommendation for container name %v", containerName) + } + updatedRecommendations = append(updatedRecommendations, *updatedContainerResources) + } + return &vpa_types.RecommendedPodResources{ContainerRecommendations: updatedRecommendations}, nil +} + // GetRecommendationForContainer returns recommendation for given container name func GetRecommendationForContainer(containerName string, recommendation *vpa_types.RecommendedPodResources) *vpa_types.RecommendedContainerResources { if recommendation != nil { diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index e121ce5dba64..96a4190128b2 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" ) @@ -95,13 +95,17 @@ func TestRecommendationCappedToMinMaxPolicy(t *testing.T) { { ContainerName: "ctr-name", Target: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewScaledQuantity(10, 1), + apiv1.ResourceCPU: *resource.NewScaledQuantity(30, 1), apiv1.ResourceMemory: *resource.NewScaledQuantity(5000, 1), }, LowerBound: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewScaledQuantity(50, 1), + apiv1.ResourceCPU: *resource.NewScaledQuantity(20, 1), apiv1.ResourceMemory: *resource.NewScaledQuantity(4300, 1), }, + UpperBound: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewScaledQuantity(50, 1), + apiv1.ResourceMemory: *resource.NewScaledQuantity(5500, 1), + }, }, }, } @@ -133,9 +137,14 @@ func TestRecommendationCappedToMinMaxPolicy(t *testing.T) { assert.Contains(t, annotations["ctr-name"], "memory capped to maxAllowed") assert.Equal(t, apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewScaledQuantity(45, 1), + apiv1.ResourceCPU: *resource.NewScaledQuantity(40, 1), apiv1.ResourceMemory: *resource.NewScaledQuantity(4300, 1), }, res.ContainerRecommendations[0].LowerBound) + + assert.Equal(t, apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewScaledQuantity(45, 1), + apiv1.ResourceMemory: *resource.NewScaledQuantity(4500, 1), + }, res.ContainerRecommendations[0].UpperBound) } var podRecommendation *vpa_types.RecommendedPodResources = &vpa_types.RecommendedPodResources{ @@ -161,14 +170,14 @@ var applyTestCases = []struct { ExpectedError error }{ { - PodRecommendation: nil, - Policy: nil, + PodRecommendation: nil, + Policy: nil, ExpectedPodRecommendation: nil, ExpectedError: nil, }, { - PodRecommendation: podRecommendation, - Policy: nil, + PodRecommendation: podRecommendation, + Policy: nil, ExpectedPodRecommendation: podRecommendation, ExpectedError: nil, }, @@ -184,3 +193,66 @@ func TestApply(t *testing.T) { assert.Equal(t, testCase.ExpectedError, err) } } + +func TestApplyVpa(t *testing.T) { + podRecommendation := vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "ctr-name", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewScaledQuantity(30, 1), + apiv1.ResourceMemory: *resource.NewScaledQuantity(5000, 1), + }, + LowerBound: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewScaledQuantity(20, 1), + apiv1.ResourceMemory: *resource.NewScaledQuantity(4300, 1), + }, + UncappedTarget: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewScaledQuantity(30, 1), + apiv1.ResourceMemory: *resource.NewScaledQuantity(5000, 1), + }, + UpperBound: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewScaledQuantity(50, 1), + apiv1.ResourceMemory: *resource.NewScaledQuantity(5500, 1), + }, + }, + }, + } + policy := vpa_types.PodResourcePolicy{ + ContainerPolicies: []vpa_types.ContainerResourcePolicy{ + { + ContainerName: "ctr-name", + MinAllowed: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewScaledQuantity(40, 1), + apiv1.ResourceMemory: *resource.NewScaledQuantity(4000, 1), + }, + MaxAllowed: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewScaledQuantity(45, 1), + apiv1.ResourceMemory: *resource.NewScaledQuantity(4500, 1), + }, + }, + }, + } + + res, err := ApplyVPAPolicy(&podRecommendation, &policy) + assert.Nil(t, err) + assert.Equal(t, apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewScaledQuantity(40, 1), + apiv1.ResourceMemory: *resource.NewScaledQuantity(4500, 1), + }, res.ContainerRecommendations[0].Target) + + assert.Equal(t, apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewScaledQuantity(30, 1), + apiv1.ResourceMemory: *resource.NewScaledQuantity(5000, 1), + }, res.ContainerRecommendations[0].UncappedTarget) + + assert.Equal(t, apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewScaledQuantity(40, 1), + apiv1.ResourceMemory: *resource.NewScaledQuantity(4300, 1), + }, res.ContainerRecommendations[0].LowerBound) + + assert.Equal(t, apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewScaledQuantity(45, 1), + apiv1.ResourceMemory: *resource.NewScaledQuantity(4500, 1), + }, res.ContainerRecommendations[0].UpperBound) +} diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go b/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go index 6537a99b2d94..be67cb2fbaf4 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go @@ -18,8 +18,7 @@ package api import ( "k8s.io/api/core/v1" - - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" ) // ContainerToAnnotationsMap contains annotations per container. diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor.go b/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor.go index cdb6707dbbc9..3ff1ebd53756 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor.go @@ -18,7 +18,7 @@ package api import ( "k8s.io/api/core/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" ) // NewSequentialProcessor constructs RecommendationProcessor that will use provided RecommendationProcessor objects diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor_test.go index 367842bccc43..6d9e5a7222b6 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor_test.go @@ -20,7 +20,7 @@ import ( "testing" "k8s.io/api/core/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" "github.com/stretchr/testify/assert" ) diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/.gitignore b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/.gitignore deleted file mode 100644 index aaca9b8f905d..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -dist/ -bin/ - -# editor turds -*~ -*.gz -*.bz2 -*.csv - -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/.travis.yml b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/.travis.yml deleted file mode 100644 index 5c9f465caea4..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: required -dist: trusty -group: edge -language: go -go: - - 1.8.3 -git: - depth: 1 -script: - - make -e ci -after_success: - - test -n "$TRAVIS_TAG" && ./scripts/goreleaser.sh diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/Dockerfile b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/Dockerfile deleted file mode 100644 index 8b1207c61be9..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/Dockerfile +++ /dev/null @@ -1,37 +0,0 @@ -FROM golang:1.8.3-alpine - -# cache buster -RUN echo 4 - -# git is needed for "go get" below -RUN apk add --no-cache git make - -# these are my standard testing / linting tools -RUN /bin/true \ - && go get -u github.com/golang/dep/cmd/dep \ - && go get -u github.com/alecthomas/gometalinter \ - && gometalinter --install \ - && rm -rf /go/src /go/pkg -# -# * SCOWL word list -# -# Downloads -# http://wordlist.aspell.net/dicts/ -# --> http://app.aspell.net/create -# - -# use en_US large size -# use regular size for others -ENV SOURCE_US_BIG http://app.aspell.net/create?max_size=70&spelling=US&max_variant=2&diacritic=both&special=hacker&special=roman-numerals&download=wordlist&encoding=utf-8&format=inline - -# should be able tell difference between English variations using this -ENV SOURCE_US http://app.aspell.net/create?max_size=60&spelling=US&max_variant=1&diacritic=both&download=wordlist&encoding=utf-8&format=inline -ENV SOURCE_GB_ISE http://app.aspell.net/create?max_size=60&spelling=GBs&max_variant=2&diacritic=both&download=wordlist&encoding=utf-8&format=inline -ENV SOURCE_GB_IZE http://app.aspell.net/create?max_size=60&spelling=GBz&max_variant=2&diacritic=both&download=wordlist&encoding=utf-8&format=inline -ENV SOURCE_CA http://app.aspell.net/create?max_size=60&spelling=CA&max_variant=2&diacritic=both&download=wordlist&encoding=utf-8&format=inline - -RUN /bin/true \ - && mkdir /scowl-wl \ - && wget -O /scowl-wl/words-US-60.txt ${SOURCE_US} \ - && wget -O /scowl-wl/words-GB-ise-60.txt ${SOURCE_GB_ISE} - diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/Gopkg.lock b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/Gopkg.lock deleted file mode 100644 index d58726a0b92b..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/Gopkg.lock +++ /dev/null @@ -1,15 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/gobwas/glob" - packages = [".","compiler","match","syntax","syntax/ast","syntax/lexer","util/runes","util/strings"] - revision = "bea32b9cd2d6f55753d94a28e959b13f0244797a" - version = "v0.2.2" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "e481c81c87260652d25840e1d95d6e530331c095d64d84422a166f37ae0a77d3" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/Gopkg.toml b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/Gopkg.toml deleted file mode 100644 index efabaa461d4d..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/Gopkg.toml +++ /dev/null @@ -1,26 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - name = "github.com/gobwas/glob" - version = "0.2.2" diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/LICENSE b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/LICENSE deleted file mode 100644 index 423e1f9e0f9b..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015-2017 Nick Galbreath - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/Makefile b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/Makefile deleted file mode 100644 index 0ccf74862c2b..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/Makefile +++ /dev/null @@ -1,84 +0,0 @@ -CONTAINER=nickg/misspell - -install: ## install misspell into GOPATH/bin - go install ./cmd/misspell - -build: hooks ## build and lint misspell - go install ./cmd/misspell - gometalinter \ - --vendor \ - --deadline=60s \ - --disable-all \ - --enable=vet \ - --enable=golint \ - --enable=gofmt \ - --enable=goimports \ - --enable=gosimple \ - --enable=staticcheck \ - --enable=ineffassign \ - --exclude=/usr/local/go/src/net/lookup_unix.go \ - ./... - go test . - -test: ## run all tests - go test . - -# the grep in line 2 is to remove misspellings in the spelling dictionary -# that trigger false positives!! -falsepositives: /scowl-wl - cat /scowl-wl/words-US-60.txt | \ - grep -i -v -E "payed|Tyre|Euclidian|nonoccurence|dependancy|reenforced|accidently|surprize|dependance|idealogy|binominal|causalities|conquerer|withing|casette|analyse|analogue|dialogue|paralyse|catalogue|archaeolog|clarinettist|catalyses|cancell|chisell|ageing|cataloguing" | \ - misspell -debug -error - cat /scowl-wl/words-GB-ise-60.txt | \ - grep -v -E "payed|nonoccurence|withing" | \ - misspell -locale=UK -debug -error -# cat /scowl-wl/words-GB-ize-60.txt | \ -# grep -v -E "withing" | \ -# misspell -debug -error -# cat /scowl-wl/words-CA-60.txt | \ -# grep -v -E "withing" | \ -# misspell -debug -error - -bench: ## run benchmarks - go test -bench '.*' - -clean: ## clean up time - rm -rf dist/ bin/ - go clean ./... - git gc --aggressive - -ci: ## run test like travis-ci does, requires docker - docker run --rm \ - -v $(PWD):/go/src/github.com/client9/misspell \ - -w /go/src/github.com/client9/misspell \ - ${CONTAINER} \ - make build falsepositives - -docker-build: ## build a docker test image - docker build -t ${CONTAINER} . - -docker-pull: ## pull latest test image - docker pull ${CONTAINER} - -docker-console: ## log into the test image - docker run --rm -it \ - -v $(PWD):/go/src/github.com/client9/misspell \ - -w /go/src/github.com/client9/misspell \ - ${CONTAINER} sh - -.git/hooks/pre-commit: scripts/pre-commit.sh - cp -f scripts/pre-commit.sh .git/hooks/pre-commit -.git/hooks/commit-msg: scripts/commit-msg.sh - cp -f scripts/commit-msg.sh .git/hooks/commit-msg -hooks: .git/hooks/pre-commit .git/hooks/commit-msg ## install git precommit hooks - -.PHONY: help ci console docker-build bench - -# https://www.client9.com/self-documenting-makefiles/ -help: - @awk -F ':|##' '/^[^\t].+?:.*?##/ {\ - printf "\033[36m%-30s\033[0m %s\n", $$1, $$NF \ - }' $(MAKEFILE_LIST) -.DEFAULT_GOAL=help -.PHONY=help - diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/README.md b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/README.md deleted file mode 100644 index 5b68af04da9d..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/README.md +++ /dev/null @@ -1,424 +0,0 @@ -[![Build Status](https://travis-ci.org/client9/misspell.svg?branch=master)](https://travis-ci.org/client9/misspell) [![Go Report Card](https://goreportcard.com/badge/github.com/client9/misspell)](https://goreportcard.com/report/github.com/client9/misspell) [![GoDoc](https://godoc.org/github.com/client9/misspell?status.svg)](https://godoc.org/github.com/client9/misspell) [![Coverage](http://gocover.io/_badge/github.com/client9/misspell)](http://gocover.io/github.com/client9/misspell) [![license](https://img.shields.io/badge/license-MIT-blue.svg?style=flat)](https://raw.githubusercontent.com/client9/misspell/master/LICENSE) - -Correct commonly misspelled English words... quickly. - -### Install - - -If you just want a binary and to start using `misspell`: - -``` -curl -L -o ./install-misspell.sh https://git.io/misspell -sh ./install-misspell.sh -``` - - -Both will install as `./bin/misspell`. You can adjust the download location using the `-b` flag. File a ticket if you want another platform supported. - - -If you use [Go](https://golang.org/), the best way to run `misspell` is by using [gometalinter](#gometalinter). Otherwise, install `misspell` the old-fashioned way: - -``` -go get -u github.com/client9/misspell/cmd/misspell -``` - -and misspell will be in your `GOPATH` - - -Also if you like to live dangerously, one could do - -```bash -curl -L https://git.io/misspell | bash -``` - -### Usage - - -```bash -$ misspell all.html your.txt important.md files.go -your.txt:42:10 found "langauge" a misspelling of "language" - -# ^ file, line, column -``` - -``` -$ misspell -help -Usage of misspell: - -debug - Debug matching, very slow - -error - Exit with 2 if misspelling found - -f string - 'csv', 'sqlite3' or custom Golang template for output - -i string - ignore the following corrections, comma separated - -j int - Number of workers, 0 = number of CPUs - -legal - Show legal information and exit - -locale string - Correct spellings using locale perferances for US or UK. Default is to use a neutral variety of English. Setting locale to US will correct the British spelling of 'colour' to 'color' - -o string - output file or [stderr|stdout|] (default "stdout") - -q Do not emit misspelling output - -source string - Source mode: auto=guess, go=golang source, text=plain or markdown-like text (default "auto") - -w Overwrite file with corrections (default is just to display) -``` - -## FAQ - -* [Automatic Corrections](#correct) -* [Converting UK spellings to US](#locale) -* [Using pipes and stdin](#stdin) -* [Golang special support](#golang) -* [gometalinter support](#gometalinter) -* [CSV Output](#csv) -* [Using SQLite3](#sqlite) -* [Changing output format](#output) -* [Checking a folder recursively](#recursive) -* [Performance](#performance) -* [Known Issues](#issues) -* [Debugging](#debug) -* [False Negatives and missing words](#missing) -* [Origin of Word Lists](#words) -* [Software License](#license) -* [Problem statement](#problem) -* [Other spelling correctors](#others) -* [Other ideas](#otherideas) - - -### How can I make the corrections automatically? - -Just add the `-w` flag! - -``` -$ misspell -w all.html your.txt important.md files.go -your.txt:9:21:corrected "langauge" to "language" - -# ^ File is rewritten only if a misspelling is found -``` - - -### How do I convert British spellings to American (or vice-versa)? - -Add the `-locale US` flag! - -```bash -$ misspell -locale US important.txt -important.txt:10:20 found "colour" a misspelling of "color" -``` - -Add the `-locale UK` flag! - -```bash -$ echo "My favorite color is blue" | misspell -locale UK -stdin:1:3:found "favorite color" a misspelling of "favourite colour" -``` - -Help is appreciated as I'm neither British nor an -expert in the English language. - - -### How do you check an entire folder recursively? - -Just list a directory you'd like to check - -```bash -misspell . -misspell aDirectory anotherDirectory aFile -``` - -You can also run misspell recursively using the following shell tricks: - -```bash -misspell directory/**/* -``` - -or - -```bash -find . -type f | xargs misspell -``` - -You can select a type of file as well. The following examples selects all `.txt` files that are *not* in the `vendor` directory: - -```bash -find . -type f -name '*.txt' | grep -v vendor/ | xargs misspell -error -``` - - -### Can I use pipes or `stdin` for input? - -Yes! - -Print messages to `stderr` only: - -```bash -$ echo "zeebra" | misspell -stdin:1:0:found "zeebra" a misspelling of "zebra" -``` - -Print messages to `stderr`, and corrected text to `stdout`: - -```bash -$ echo "zeebra" | misspell -w -stdin:1:0:corrected "zeebra" to "zebra" -zebra -``` - -Only print the corrected text to `stdout`: - -```bash -$ echo "zeebra" | misspell -w -q -zebra -``` - - -### Are there special rules for golang source files? - -Yes! If the file ends in `.go`, then misspell will only check spelling in -comments. - -If you want to force a file to be checked as a golang source, use `-source=go` -on the command line. Conversely, you can check a golang source as if it were -pure text by using `-source=text`. You might want to do this since many -variable names have misspellings in them! - -### Can I check only-comments in other other programming languages? - -I'm told the using `-source=go` works well for ruby, javascript, java, c and -c++. - -It doesn't work well for python and bash. - - -### Does this work with gometalinter? - -[gometalinter](https://github.com/alecthomas/gometalinter) runs -multiple golang linters. Starting on [2016-06-12](https://github.com/alecthomas/gometalinter/pull/134) -gometalinter supports `misspell` natively but it is disabled by default. - -```bash -# update your copy of gometalinter -go get -u github.com/alecthomas/gometalinter - -# install updates and misspell -gometalinter --install --update -``` - -To use, just enable `misspell` - -``` -gometalinter --enable misspell ./... -``` - -Note that gometalinter only checks golang files, and uses the default options -of `misspell` - -You may wish to run this on your plaintext (.txt) and/or markdown files too. - - - -### How Can I Get CSV Output? - -Using `-f csv`, the output is standard comma-seprated values with headers in the first row. - -``` -misspell -f csv * -file,line,column,typo,corrected -"README.md",9,22,langauge,language -"README.md",47,25,langauge,language -``` - - -### How can I export to SQLite3? - -Using `-f sqlite`, the output is a [sqlite3](https://www.sqlite.org/index.html) dump-file. - -```bash -$ misspell -f sqlite * > /tmp/misspell.sql -$ cat /tmp/misspell.sql - -PRAGMA foreign_keys=OFF; -BEGIN TRANSACTION; -CREATE TABLE misspell( - "file" TEXT, - "line" INTEGER,i - "column" INTEGER,i - "typo" TEXT, - "corrected" TEXT -); -INSERT INTO misspell VALUES("install.txt",202,31,"immediatly","immediately"); -# etc... -COMMIT; -``` - -```bash -$ sqlite3 -init /tmp/misspell.sql :memory: 'select count(*) from misspell' -1 -``` - -With some tricks you can directly pipe output to sqlite3 by using `-init /dev/stdin`: - -``` -misspell -f sqlite * | sqlite3 -init /dev/stdin -column -cmd '.width 60 15' ':memory' \ - 'select substr(file,35),typo,count(*) as count from misspell group by file, typo order by count desc;' -``` - - -### How can I ignore rules? - -Using the `-i "comma,separated,rules"` flag you can specify corrections to ignore. - -For example, if you were to run `misspell -w -error -source=text` against document that contains the string `Guy Finkelshteyn Braswell`, misspell would change the text to `Guy Finkelstheyn Bras well`. You can then -determine the rules to ignore by reverting the change and running the with the `-debug` flag. You can then see -that the corrections were `htey -> they` and `aswell -> as well`. To ignore these two rules, you add `-i "htey,aswell"` to -your command. With debug mode on, you can see it print the corrections, but it will no longer make them. - - -### How can I change the output format? - -Using the `-f template` flag you can pass in a -[golang text template](https://golang.org/pkg/text/template/) to format the output. - -One can use `printf "%q" VALUE` to safely quote a value. - -The default template is compatible with [gometalinter](https://github.com/alecthomas/gometalinter) -``` -{{ .Filename }}:{{ .Line }}:{{ .Column }}:corrected {{ printf "%q" .Original }} to "{{ printf "%q" .Corrected }}" -``` - -To just print probable misspellings: - -``` --f '{{ .Original }}' -``` - - -### What problem does this solve? - -This corrects commonly misspelled English words in computer source -code, and other text-based formats (`.txt`, `.md`, etc). - -It is designed to run quickly so it can be -used as a [pre-commit hook](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks) -with minimal burden on the developer. - -It does not work with binary formats (e.g. Word, etc). - -It is not a complete spell-checking program nor a grammar checker. - - -### What are other misspelling correctors and what's wrong with them? - -Some other misspelling correctors: - -* https://github.com/vlajos/misspell_fixer -* https://github.com/lyda/misspell-check -* https://github.com/lucasdemarchi/codespell - -They all work but had problems that prevented me from using them at scale: - -* slow, all of the above check one misspelling at a time (i.e. linear) using regexps -* not MIT/Apache2 licensed (or equivalent) -* have dependencies that don't work for me (python3, bash, linux sed, etc) -* don't understand American vs. British English and sometimes makes unwelcome "corrections" - -That said, they might be perfect for you and many have more features -than this project! - - -### How fast is it? - -Misspell is easily 100x to 1000x faster than other spelling correctors. You -should be able to check and correct 1000 files in under 250ms. - -This uses the mighty power of golang's -[strings.Replacer](https://golang.org/pkg/strings/#Replacer) which is -a implementation or variation of the -[Aho–Corasick algorithm](https://en.wikipedia.org/wiki/Aho–Corasick_algorithm). -This makes multiple substring matches *simultaneously*. - -In addition this uses multiple CPU cores to work on multiple files. - - -### What problems does it have? - -Unlike the other projects, this doesn't know what a "word" is. There may be -more false positives and false negatives due to this. On the other hand, it -sometimes catches things others don't. - -Either way, please file bugs and we'll fix them! - -Since it operates in parallel to make corrections, it can be non-obvious to -determine exactly what word was corrected. - - -### It's making mistakes. How can I debug? - -Run using `-debug` flag on the file you want. It should then print what word -it is trying to correct. Then [file a -bug](https://github.com/client9/misspell/issues) describing the problem. -Thanks! - - -### Why is it making mistakes or missing items in golang files? - -The matching function is *case-sensitive*, so variable names that are multiple -worlds either in all-upper or all-lower case sometimes can cause false -positives. For instance a variable named `bodyreader` could trigger a false -positive since `yrea` is in the middle that could be corrected to `year`. -Other problems happen if the variable name uses a English contraction that -should use an apostrophe. The best way of fixing this is to use the -[Effective Go naming -conventions](https://golang.org/doc/effective_go.html#mixed-caps) and use -[camelCase](https://en.wikipedia.org/wiki/CamelCase) for variable names. You -can check your code using [golint](https://github.com/golang/lint) - - -### What license is this? - -The main code is [MIT](https://github.com/client9/misspell/blob/master/LICENSE). - -Misspell also makes uses of the Golang standard library and contains a modified version of Golang's [strings.Replacer](https://golang.org/pkg/strings/#Replacer) -which are covered under a [BSD License](https://github.com/golang/go/blob/master/LICENSE). Type `misspell -legal` for more details or see [legal.go](https://github.com/client9/misspell/blob/master/legal.go) - - -### Where do the word lists come from? - -It started with a word list from -[Wikipedia](https://en.wikipedia.org/wiki/Wikipedia:Lists_of_common_misspellings/For_machines). -Unfortunately, this list had to be highly edited as many of the words are -obsolete or based from mistakes on mechanical typewriters (I'm guessing). - -Additional words were added based on actually mistakes seen in -the wild (meaning self-generated). - -Variations of UK and US spellings are based on many sources including: - -* http://www.tysto.com/uk-us-spelling-list.html (with heavy editing, many are incorrect) -* http://www.oxforddictionaries.com/us/words/american-and-british-spelling-american (excellent site but incomplete) -* Diffing US and UK [scowl dictionaries](http://wordlist.aspell.net) - -American English is more accepting of spelling variations than is British -English, so "what is American or not" is subject to opinion. Corrections and help welcome. - - -### What are some other enhancements that could be done? - -Here's some ideas for enhancements: - -*Capitalization of proper nouns* could be done (e.g. weekday and month names, country names, language names) - -*Opinionated US spellings* US English has a number of words with alternate -spellings. Think [adviser vs. -advisor](http://grammarist.com/spelling/adviser-advisor/). While "advisor" is not wrong, the opinionated US -locale would correct "advisor" to "adviser". - -*Versioning* Some type of versioning is needed so reporting mistakes and errors is easier. - -*Feedback* Mistakes would be sent to some server for agregation and feedback review. - -*Contractions and Apostrophes* This would optionally correct "isnt" to -"isn't", etc. diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/RELEASE-HOWTO.md b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/RELEASE-HOWTO.md deleted file mode 100644 index 7caf7ee16312..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/RELEASE-HOWTO.md +++ /dev/null @@ -1,25 +0,0 @@ -# Release HOWTO - -since I forget. - - -1. Review existing tags and pick new release number - - ```bash - git tag - ``` - -2. Tag locally - - ```bash - git tag -a v0.1.0 -m "First release" - ``` - -3. Push - - ```bash - git push origin v0.1.0 - ``` - -4. Verify release and edit notes. See https://github.com/client9/misspell/releases - diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/ascii.go b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/ascii.go deleted file mode 100644 index 1430718d6a27..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/ascii.go +++ /dev/null @@ -1,62 +0,0 @@ -package misspell - -// ByteToUpper converts an ascii byte to upper cases -// Uses a branchless algorithm -func ByteToUpper(x byte) byte { - b := byte(0x80) | x - c := b - byte(0x61) - d := ^(b - byte(0x7b)) - e := (c & d) & (^x & 0x7f) - return x - (e >> 2) -} - -// ByteToLower converts an ascii byte to lower case -// uses a branchless algorithm -func ByteToLower(eax byte) byte { - ebx := eax&byte(0x7f) + byte(0x25) - ebx = ebx&byte(0x7f) + byte(0x1a) - ebx = ((ebx & ^eax) >> 2) & byte(0x20) - return eax + ebx -} - -// ByteEqualFold does ascii compare, case insensitive -func ByteEqualFold(a, b byte) bool { - return a == b || ByteToLower(a) == ByteToLower(b) -} - -// StringEqualFold ASCII case-insensitive comparison -// golang toUpper/toLower for both bytes and strings -// appears to be Unicode based which is super slow -// based from https://codereview.appspot.com/5180044/patch/14007/21002 -func StringEqualFold(s1, s2 string) bool { - if len(s1) != len(s2) { - return false - } - for i := 0; i < len(s1); i++ { - c1 := s1[i] - c2 := s2[i] - // c1 & c2 - if c1 != c2 { - c1 |= 'a' - 'A' - c2 |= 'a' - 'A' - if c1 != c2 || c1 < 'a' || c1 > 'z' { - return false - } - } - } - return true -} - -// StringHasPrefixFold is similar to strings.HasPrefix but comparison -// is done ignoring ASCII case. -// / -func StringHasPrefixFold(s1, s2 string) bool { - // prefix is bigger than input --> false - if len(s1) < len(s2) { - return false - } - if len(s1) == len(s2) { - return StringEqualFold(s1, s2) - } - return StringEqualFold(s1[:len(s2)], s2) -} diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/case.go b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/case.go deleted file mode 100644 index 2ea3850dfa01..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/case.go +++ /dev/null @@ -1,59 +0,0 @@ -package misspell - -import ( - "strings" -) - -// WordCase is an enum of various word casing styles -type WordCase int - -// Various WordCase types.. likely to be not correct -const ( - CaseUnknown WordCase = iota - CaseLower - CaseUpper - CaseTitle -) - -// CaseStyle returns what case style a word is in -func CaseStyle(word string) WordCase { - upperCount := 0 - lowerCount := 0 - - // this iterates over RUNES not BYTES - for i := 0; i < len(word); i++ { - ch := word[i] - switch { - case ch >= 'a' && ch <= 'z': - lowerCount++ - case ch >= 'A' && ch <= 'Z': - upperCount++ - } - } - - switch { - case upperCount != 0 && lowerCount == 0: - return CaseUpper - case upperCount == 0 && lowerCount != 0: - return CaseLower - case upperCount == 1 && lowerCount > 0 && word[0] >= 'A' && word[0] <= 'Z': - return CaseTitle - } - return CaseUnknown -} - -// CaseVariations returns -// If AllUpper or First-Letter-Only is upcased: add the all upper case version -// If AllLower, add the original, the title and upcase forms -// If Mixed, return the original, and the all upcase form -// -func CaseVariations(word string, style WordCase) []string { - switch style { - case CaseLower: - return []string{word, strings.ToUpper(word[0:1]) + word[1:], strings.ToUpper(word)} - case CaseUpper: - return []string{strings.ToUpper(word)} - default: - return []string{word, strings.ToUpper(word)} - } -} diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/cmd/misspell/main.go b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/cmd/misspell/main.go deleted file mode 100644 index 174d79d882db..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/cmd/misspell/main.go +++ /dev/null @@ -1,326 +0,0 @@ -// The misspell command corrects commonly misspelled English words in source files. -package main - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "runtime" - "strings" - "text/template" - "time" - - "github.com/client9/misspell" -) - -var ( - defaultWrite *template.Template - defaultRead *template.Template - - stdout *log.Logger - debug *log.Logger - - version = "dev" -) - -const ( - // Note for gometalinter it must be "File:Line:Column: Msg" - // note space beteen ": Msg" - defaultWriteTmpl = `{{ .Filename }}:{{ .Line }}:{{ .Column }}: corrected "{{ .Original }}" to "{{ .Corrected }}"` - defaultReadTmpl = `{{ .Filename }}:{{ .Line }}:{{ .Column }}: "{{ .Original }}" is a misspelling of "{{ .Corrected }}"` - csvTmpl = `{{ printf "%q" .Filename }},{{ .Line }},{{ .Column }},{{ .Original }},{{ .Corrected }}` - csvHeader = `file,line,column,typo,corrected` - sqliteTmpl = `INSERT INTO misspell VALUES({{ printf "%q" .Filename }},{{ .Line }},{{ .Column }},{{ printf "%q" .Original }},{{ printf "%q" .Corrected }});` - sqliteHeader = `PRAGMA foreign_keys=OFF; -BEGIN TRANSACTION; -CREATE TABLE misspell( - "file" TEXT, "line" INTEGER, "column" INTEGER, "typo" TEXT, "corrected" TEXT -);` - sqliteFooter = "COMMIT;" -) - -func worker(writeit bool, r *misspell.Replacer, mode string, files <-chan string, results chan<- int) { - count := 0 - for filename := range files { - orig, err := misspell.ReadTextFile(filename) - if err != nil { - log.Println(err) - continue - } - if len(orig) == 0 { - continue - } - - debug.Printf("Processing %s", filename) - - var updated string - var changes []misspell.Diff - - if mode == "go" { - updated, changes = r.ReplaceGo(orig) - } else { - updated, changes = r.Replace(orig) - } - - if len(changes) == 0 { - continue - } - count += len(changes) - for _, diff := range changes { - // add in filename - diff.Filename = filename - - // output can be done by doing multiple goroutines - // and can clobber os.Stdout. - // - // the log package can be used simultaneously from multiple goroutines - var output bytes.Buffer - if writeit { - defaultWrite.Execute(&output, diff) - } else { - defaultRead.Execute(&output, diff) - } - - // goroutine-safe print to os.Stdout - stdout.Println(output.String()) - } - - if writeit { - ioutil.WriteFile(filename, []byte(updated), 0) - } - } - results <- count -} - -func main() { - t := time.Now() - var ( - workers = flag.Int("j", 0, "Number of workers, 0 = number of CPUs") - writeit = flag.Bool("w", false, "Overwrite file with corrections (default is just to display)") - quietFlag = flag.Bool("q", false, "Do not emit misspelling output") - outFlag = flag.String("o", "stdout", "output file or [stderr|stdout|]") - format = flag.String("f", "", "'csv', 'sqlite3' or custom Golang template for output") - ignores = flag.String("i", "", "ignore the following corrections, comma separated") - locale = flag.String("locale", "", "Correct spellings using locale perferances for US or UK. Default is to use a neutral variety of English. Setting locale to US will correct the British spelling of 'colour' to 'color'") - mode = flag.String("source", "auto", "Source mode: auto=guess, go=golang source, text=plain or markdown-like text") - debugFlag = flag.Bool("debug", false, "Debug matching, very slow") - exitError = flag.Bool("error", false, "Exit with 2 if misspelling found") - showVersion = flag.Bool("v", false, "Show version and exit") - - showLegal = flag.Bool("legal", false, "Show legal information and exit") - ) - flag.Parse() - - if *showVersion { - fmt.Println(version) - return - } - if *showLegal { - fmt.Println(misspell.Legal) - return - } - if *debugFlag { - debug = log.New(os.Stderr, "DEBUG ", 0) - } else { - debug = log.New(ioutil.Discard, "", 0) - } - - r := misspell.Replacer{ - Replacements: misspell.DictMain, - Debug: *debugFlag, - } - // - // Figure out regional variations - // - switch strings.ToUpper(*locale) { - case "": - // nothing - case "US": - r.AddRuleList(misspell.DictAmerican) - case "UK", "GB": - r.AddRuleList(misspell.DictBritish) - case "NZ", "AU", "CA": - log.Fatalf("Help wanted. https://github.com/client9/misspell/issues/6") - default: - log.Fatalf("Unknown locale: %q", *locale) - } - - // - // Stuff to ignore - // - if len(*ignores) > 0 { - r.RemoveRule(strings.Split(*ignores, ",")) - } - - // - // Source input mode - // - switch *mode { - case "auto": - case "go": - case "text": - default: - log.Fatalf("Mode must be one of auto=guess, go=golang source, text=plain or markdown-like text") - } - - // - // Custom output - // - switch { - case *format == "csv": - tmpl := template.Must(template.New("csv").Parse(csvTmpl)) - defaultWrite = tmpl - defaultRead = tmpl - stdout.Println(csvHeader) - case *format == "sqlite" || *format == "sqlite3": - tmpl := template.Must(template.New("sqlite3").Parse(sqliteTmpl)) - defaultWrite = tmpl - defaultRead = tmpl - stdout.Println(sqliteHeader) - case len(*format) > 0: - t, err := template.New("custom").Parse(*format) - if err != nil { - log.Fatalf("Unable to compile log format: %s", err) - } - defaultWrite = t - defaultRead = t - default: // format == "" - defaultWrite = template.Must(template.New("defaultWrite").Parse(defaultWriteTmpl)) - defaultRead = template.Must(template.New("defaultRead").Parse(defaultReadTmpl)) - } - - // we cant't just write to os.Stdout directly since we have multiple goroutine - // all writing at the same time causing broken output. Log is routine safe. - // we see it so it doesn't use a prefix or include a time stamp. - switch { - case *quietFlag || *outFlag == "/dev/null": - stdout = log.New(ioutil.Discard, "", 0) - case *outFlag == "/dev/stderr" || *outFlag == "stderr": - stdout = log.New(os.Stderr, "", 0) - case *outFlag == "/dev/stdout" || *outFlag == "stdout": - stdout = log.New(os.Stdout, "", 0) - case *outFlag == "" || *outFlag == "-": - stdout = log.New(os.Stdout, "", 0) - default: - fo, err := os.Create(*outFlag) - if err != nil { - log.Fatalf("unable to create outfile %q: %s", *outFlag, err) - } - defer fo.Close() - stdout = log.New(fo, "", 0) - } - - // - // Number of Workers / CPU to use - // - if *workers < 0 { - log.Fatalf("-j must >= 0") - } - if *workers == 0 { - *workers = runtime.NumCPU() - } - if *debugFlag { - *workers = 1 - } - - // - // Done with Flags. - // Compile the Replacer and process files - // - r.Compile() - - args := flag.Args() - debug.Printf("initialization complete in %v", time.Since(t)) - - // stdin/stdout - if len(args) == 0 { - // if we are working with pipes/stdin/stdout - // there is no concurrency, so we can directly - // send data to the writers - var fileout io.Writer - var errout io.Writer - switch *writeit { - case true: - // if we ARE writing the corrected stream - // the corrected stream goes to stdout - // and the misspelling errors goes to stderr - // so we can do something like this: - // curl something | misspell -w | gzip > afile.gz - fileout = os.Stdout - errout = os.Stderr - case false: - // if we are not writing out the corrected stream - // then work just like files. Misspelling errors - // are sent to stdout - fileout = ioutil.Discard - errout = os.Stdout - } - count := 0 - next := func(diff misspell.Diff) { - count++ - - // don't even evaluate the output templates - if *quietFlag { - return - } - diff.Filename = "stdin" - if *writeit { - defaultWrite.Execute(errout, diff) - } else { - defaultRead.Execute(errout, diff) - } - errout.Write([]byte{'\n'}) - - } - err := r.ReplaceReader(os.Stdin, fileout, next) - if err != nil { - os.Exit(1) - } - switch *format { - case "sqlite", "sqlite3": - fileout.Write([]byte(sqliteFooter)) - } - if count != 0 && *exitError { - // error - os.Exit(2) - } - return - } - - c := make(chan string, 64) - results := make(chan int, *workers) - - for i := 0; i < *workers; i++ { - go worker(*writeit, &r, *mode, c, results) - } - - for _, filename := range args { - filepath.Walk(filename, func(path string, info os.FileInfo, err error) error { - if err == nil && !info.IsDir() { - c <- path - } - return nil - }) - } - close(c) - - count := 0 - for i := 0; i < *workers; i++ { - changed := <-results - count += changed - } - - switch *format { - case "sqlite", "sqlite3": - stdout.Println(sqliteFooter) - } - - if count != 0 && *exitError { - os.Exit(2) - } -} diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/goreleaser.yml b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/goreleaser.yml deleted file mode 100644 index 2bd738f8b08f..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/goreleaser.yml +++ /dev/null @@ -1,29 +0,0 @@ -# goreleaser.yml -# https://github.com/goreleaser/goreleaser -build: - main: cmd/misspell/main.go - binary: misspell - ldflags: -s -w -X main.version={{.Version}} - goos: - - darwin - - linux - - windows - goarch: - - amd64 - env: - - CGO_ENABLED=0 - ignore: - - goos: darwin - goarch: 386 - - goos: windows - goarch: 386 - -archive: - name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}" - replacements: - amd64: 64bit - 386: 32bit - darwin: mac - -snapshot: - name_template: SNAPSHOT-{{.Commit}} diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/install-misspell.sh b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/install-misspell.sh deleted file mode 100755 index 8e0ff5d94700..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/install-misspell.sh +++ /dev/null @@ -1,318 +0,0 @@ -#!/bin/sh -set -e -# Code generated by godownloader. DO NOT EDIT. -# - -usage() { - this=$1 - cat </dev/null -} -uname_os() { - os=$(uname -s | tr '[:upper:]' '[:lower:]') - echo "$os" -} -uname_arch() { - arch=$(uname -m) - case $arch in - x86_64) arch="amd64" ;; - x86) arch="386" ;; - i686) arch="386" ;; - i386) arch="386" ;; - aarch64) arch="arm64" ;; - armv5*) arch="arm5" ;; - armv6*) arch="arm6" ;; - armv7*) arch="arm7" ;; - esac - echo ${arch} -} -uname_os_check() { - os=$(uname_os) - case "$os" in - darwin) return 0 ;; - dragonfly) return 0 ;; - freebsd) return 0 ;; - linux) return 0 ;; - android) return 0 ;; - nacl) return 0 ;; - netbsd) return 0 ;; - openbsd) return 0 ;; - plan9) return 0 ;; - solaris) return 0 ;; - windows) return 0 ;; - esac - echo "$0: uname_os_check: internal error '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib" - return 1 -} -uname_arch_check() { - arch=$(uname_arch) - case "$arch" in - 386) return 0 ;; - amd64) return 0 ;; - arm64) return 0 ;; - armv5) return 0 ;; - armv6) return 0 ;; - armv7) return 0 ;; - ppc64) return 0 ;; - ppc64le) return 0 ;; - mips) return 0 ;; - mipsle) return 0 ;; - mips64) return 0 ;; - mips64le) return 0 ;; - s390x) return 0 ;; - amd64p32) return 0 ;; - esac - echo "$0: uname_arch_check: internal error '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib" - return 1 -} -untar() { - tarball=$1 - case "${tarball}" in - *.tar.gz | *.tgz) tar -xzf "${tarball}" ;; - *.tar) tar -xf "${tarball}" ;; - *.zip) unzip "${tarball}" ;; - *) - echo "Unknown archive format for ${tarball}" - return 1 - ;; - esac -} -mktmpdir() { - test -z "$TMPDIR" && TMPDIR="$(mktemp -d)" - mkdir -p "${TMPDIR}" - echo "${TMPDIR}" -} -http_download() { - local_file=$1 - source_url=$2 - header=$3 - headerflag='' - destflag='' - if is_command curl; then - cmd='curl --fail -sSL' - destflag='-o' - headerflag='-H' - elif is_command wget; then - cmd='wget -q' - destflag='-O' - headerflag='--header' - else - echo "http_download: unable to find wget or curl" - return 1 - fi - if [ -z "$header" ]; then - $cmd $destflag "$local_file" "$source_url" - else - $cmd $headerflag "$header" $destflag "$local_file" "$source_url" - fi -} -github_api() { - local_file=$1 - source_url=$2 - header="" - case "$source_url" in - https://api.github.com*) - test -z "$GITHUB_TOKEN" || header="Authorization: token $GITHUB_TOKEN" - ;; - esac - http_download "$local_file" "$source_url" "$header" -} -github_last_release() { - owner_repo=$1 - giturl="https://api.github.com/repos/${owner_repo}/releases/latest" - html=$(github_api - "$giturl") - version=$(echo "$html" | grep -m 1 "\"tag_name\":" | cut -f4 -d'"') - test -z "$version" && return 1 - echo "$version" -} -hash_sha256() { - TARGET=${1:-/dev/stdin} - if is_command gsha256sum; then - hash=$(gsha256sum "$TARGET") || return 1 - echo "$hash" | cut -d ' ' -f 1 - elif is_command sha256sum; then - hash=$(sha256sum "$TARGET") || return 1 - echo "$hash" | cut -d ' ' -f 1 - elif is_command shasum; then - hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1 - echo "$hash" | cut -d ' ' -f 1 - elif is_command openssl; then - hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1 - echo "$hash" | cut -d ' ' -f a - else - echo "hash_sha256: unable to find command to compute sha-256 hash" - return 1 - fi -} -hash_sha256_verify() { - TARGET=$1 - checksums=$2 - if [ -z "$checksums" ]; then - echo "hash_sha256_verify: checksum file not specified in arg2" - return 1 - fi - BASENAME=${TARGET##*/} - want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1) - if [ -z "$want" ]; then - echo "hash_sha256_verify: unable to find checksum for '${TARGET}' in '${checksums}'" - return 1 - fi - got=$(hash_sha256 "$TARGET") - if [ "$want" != "$got" ]; then - echo "hash_sha256_verify: checksum for '$TARGET' did not verify ${want} vs $got" - return 1 - fi -} -cat /dev/null < 50000 { - fin, err := os.Open(filename) - if err != nil { - return "", fmt.Errorf("Unable to open large file %q: %s", filename, err) - } - defer fin.Close() - buf := make([]byte, 512) - _, err = io.ReadFull(fin, buf) - if err != nil { - return "", fmt.Errorf("Unable to read 512 bytes from %q: %s", filename, err) - } - if !isTextFile(buf) { - return "", nil - } - - // set so we don't double check this file - isText = true - } - - // read in whole file - raw, err := ioutil.ReadFile(filename) - if err != nil { - return "", fmt.Errorf("Unable to read all %q: %s", filename, err) - } - - if !isText && !isTextFile(raw) { - return "", nil - } - return string(raw), nil -} diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/notwords.go b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/notwords.go deleted file mode 100644 index 06d0d5a5adff..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/notwords.go +++ /dev/null @@ -1,85 +0,0 @@ -package misspell - -import ( - "bytes" - "regexp" - "strings" -) - -var ( - reEmail = regexp.MustCompile(`[a-zA-Z0-9_.%+-]+@[a-zA-Z0-9-.]+\.[a-zA-Z]{2,6}[^a-zA-Z]`) - reHost = regexp.MustCompile(`[a-zA-Z0-9-.]+\.[a-zA-Z]+`) - reBackslash = regexp.MustCompile(`\\[a-z]`) -) - -// RemovePath attempts to strip away embedded file system paths, e.g. -// /foo/bar or /static/myimg.png -// -// TODO: windows style -// -func RemovePath(s string) string { - out := bytes.Buffer{} - var idx int - for len(s) > 0 { - if idx = strings.IndexByte(s, '/'); idx == -1 { - out.WriteString(s) - break - } - - if idx > 0 { - idx-- - } - - var chclass string - switch s[idx] { - case '/', ' ', '\n', '\t', '\r': - chclass = " \n\r\t" - case '[': - chclass = "]\n" - case '(': - chclass = ")\n" - default: - out.WriteString(s[:idx+2]) - s = s[idx+2:] - continue - } - - endx := strings.IndexAny(s[idx+1:], chclass) - if endx != -1 { - out.WriteString(s[:idx+1]) - out.Write(bytes.Repeat([]byte{' '}, endx)) - s = s[idx+endx+1:] - } else { - out.WriteString(s) - break - } - } - return out.String() -} - -// replaceWithBlanks returns a string with the same number of spaces as the input -func replaceWithBlanks(s string) string { - return strings.Repeat(" ", len(s)) -} - -// RemoveEmail remove email-like strings, e.g. "nickg+junk@xfoobar.com", "nickg@xyz.abc123.biz" -func RemoveEmail(s string) string { - return reEmail.ReplaceAllStringFunc(s, replaceWithBlanks) -} - -// RemoveHost removes host-like strings "foobar.com" "abc123.fo1231.biz" -func RemoveHost(s string) string { - return reHost.ReplaceAllStringFunc(s, replaceWithBlanks) -} - -// RemoveBackslashEscapes removes characters that are preceeded by a backslash -// commonly found in printf format stringd "\nto" -func removeBackslashEscapes(s string) string { - return reBackslash.ReplaceAllStringFunc(s, replaceWithBlanks) -} - -// RemoveNotWords blanks out all the not words -func RemoveNotWords(s string) string { - // do most selective/specific first - return removeBackslashEscapes(RemoveHost(RemoveEmail(RemovePath(StripURL(s))))) -} diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/replace.go b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/replace.go deleted file mode 100644 index a99bbcc582aa..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/replace.go +++ /dev/null @@ -1,246 +0,0 @@ -package misspell - -import ( - "bufio" - "bytes" - "io" - "regexp" - "strings" - "text/scanner" -) - -func max(x, y int) int { - if x > y { - return x - } - return y -} - -func inArray(haystack []string, needle string) bool { - for _, word := range haystack { - if needle == word { - return true - } - } - return false -} - -var wordRegexp = regexp.MustCompile(`[a-zA-Z0-9']+`) - -// Diff is datastructure showing what changed in a single line -type Diff struct { - Filename string - FullLine string - Line int - Column int - Original string - Corrected string -} - -// Replacer is the main struct for spelling correction -type Replacer struct { - Replacements []string - Debug bool - engine *StringReplacer - corrected map[string]string -} - -// New creates a new default Replacer using the main rule list -func New() *Replacer { - r := Replacer{ - Replacements: DictMain, - } - r.Compile() - return &r -} - -// RemoveRule deletes existings rules. -// TODO: make inplace to save memory -func (r *Replacer) RemoveRule(ignore []string) { - newwords := make([]string, 0, len(r.Replacements)) - for i := 0; i < len(r.Replacements); i += 2 { - if inArray(ignore, r.Replacements[i]) { - continue - } - newwords = append(newwords, r.Replacements[i:i+2]...) - } - r.engine = nil - r.Replacements = newwords -} - -// AddRuleList appends new rules. -// Input is in the same form as Strings.Replacer: [ old1, new1, old2, new2, ....] -// Note: does not check for duplictes -func (r *Replacer) AddRuleList(additions []string) { - r.engine = nil - r.Replacements = append(r.Replacements, additions...) -} - -// Compile compiles the rules. Required before using the Replace functions -func (r *Replacer) Compile() { - - r.corrected = make(map[string]string, len(r.Replacements)/2) - for i := 0; i < len(r.Replacements); i += 2 { - r.corrected[r.Replacements[i]] = r.Replacements[i+1] - } - r.engine = NewStringReplacer(r.Replacements...) -} - -/* -line1 and line2 are different -extract words from each line1 - -replace word -> newword -if word == new-word - continue -if new-word in list of replacements - continue -new word not original, and not in list of replacements - some substring got mixed up. UNdo -*/ -func (r *Replacer) recheckLine(s string, lineNum int, buf io.Writer, next func(Diff)) { - first := 0 - redacted := RemoveNotWords(s) - - idx := wordRegexp.FindAllStringIndex(redacted, -1) - for _, ab := range idx { - word := s[ab[0]:ab[1]] - newword := r.engine.Replace(word) - if newword == word { - // no replacement done - continue - } - - // ignore camelCase words - // https://github.com/client9/misspell/issues/113 - if CaseStyle(word) == CaseUnknown { - continue - } - - if StringEqualFold(r.corrected[strings.ToLower(word)], newword) { - // word got corrected into something we know - io.WriteString(buf, s[first:ab[0]]) - io.WriteString(buf, newword) - first = ab[1] - next(Diff{ - FullLine: s, - Line: lineNum, - Original: word, - Corrected: newword, - Column: ab[0], - }) - continue - } - // Word got corrected into something unknown. Ignore it - } - io.WriteString(buf, s[first:]) -} - -// ReplaceGo is a specialized routine for correcting Golang source -// files. Currently only checks comments, not identifiers for -// spelling. -func (r *Replacer) ReplaceGo(input string) (string, []Diff) { - var s scanner.Scanner - s.Init(strings.NewReader(input)) - s.Mode = scanner.ScanIdents | scanner.ScanFloats | scanner.ScanChars | scanner.ScanStrings | scanner.ScanRawStrings | scanner.ScanComments - lastPos := 0 - output := "" -Loop: - for { - switch s.Scan() { - case scanner.Comment: - origComment := s.TokenText() - newComment := r.engine.Replace(origComment) - - if origComment != newComment { - // s.Pos().Offset is the end of the current token - // subtract len(origComment) to get the start of the token - offset := s.Pos().Offset - output = output + input[lastPos:offset-len(origComment)] + newComment - lastPos = offset - } - case scanner.EOF: - break Loop - } - } - - if lastPos == 0 { - // no changes, no copies - return input, nil - } - if lastPos < len(input) { - output = output + input[lastPos:] - } - diffs := make([]Diff, 0, 8) - buf := bytes.NewBuffer(make([]byte, 0, max(len(input), len(output))+100)) - // faster that making a bytes.Buffer and bufio.ReadString - outlines := strings.SplitAfter(output, "\n") - inlines := strings.SplitAfter(input, "\n") - for i := 0; i < len(inlines); i++ { - if inlines[i] == outlines[i] { - buf.WriteString(outlines[i]) - continue - } - r.recheckLine(inlines[i], i+1, buf, func(d Diff) { - diffs = append(diffs, d) - }) - } - - return buf.String(), diffs - -} - -// Replace is corrects misspellings in input, returning corrected version -// along with a list of diffs. -func (r *Replacer) Replace(input string) (string, []Diff) { - output := r.engine.Replace(input) - if input == output { - return input, nil - } - diffs := make([]Diff, 0, 8) - buf := bytes.NewBuffer(make([]byte, 0, max(len(input), len(output))+100)) - // faster that making a bytes.Buffer and bufio.ReadString - outlines := strings.SplitAfter(output, "\n") - inlines := strings.SplitAfter(input, "\n") - for i := 0; i < len(inlines); i++ { - if inlines[i] == outlines[i] { - buf.WriteString(outlines[i]) - continue - } - r.recheckLine(inlines[i], i+1, buf, func(d Diff) { - diffs = append(diffs, d) - }) - } - - return buf.String(), diffs -} - -// ReplaceReader applies spelling corrections to a reader stream. Diffs are -// emitted through a callback. -func (r *Replacer) ReplaceReader(raw io.Reader, w io.Writer, next func(Diff)) error { - var ( - err error - line string - lineNum int - ) - reader := bufio.NewReader(raw) - for err == nil { - lineNum++ - line, err = reader.ReadString('\n') - - // if it's EOF, then line has the last line - // don't like the check of err here and - // in for loop - if err != nil && err != io.EOF { - return err - } - // easily 5x faster than regexp+map - if line == r.engine.Replace(line) { - io.WriteString(w, line) - continue - } - // but it can be inaccurate, so we need to double check - r.recheckLine(line, lineNum, w, next) - } - return nil -} diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/stringreplacer.go b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/stringreplacer.go deleted file mode 100644 index 3151eceb70fa..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/stringreplacer.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package misspell - -import ( - "io" - // "log" - "strings" -) - -// StringReplacer replaces a list of strings with replacements. -// It is safe for concurrent use by multiple goroutines. -type StringReplacer struct { - r replacer -} - -// replacer is the interface that a replacement algorithm needs to implement. -type replacer interface { - Replace(s string) string - WriteString(w io.Writer, s string) (n int, err error) -} - -// NewStringReplacer returns a new Replacer from a list of old, new string pairs. -// Replacements are performed in order, without overlapping matches. -func NewStringReplacer(oldnew ...string) *StringReplacer { - if len(oldnew)%2 == 1 { - panic("strings.NewReplacer: odd argument count") - } - - return &StringReplacer{r: makeGenericReplacer(oldnew)} -} - -// Replace returns a copy of s with all replacements performed. -func (r *StringReplacer) Replace(s string) string { - return r.r.Replace(s) -} - -// WriteString writes s to w with all replacements performed. -func (r *StringReplacer) WriteString(w io.Writer, s string) (n int, err error) { - return r.r.WriteString(w, s) -} - -// trieNode is a node in a lookup trie for prioritized key/value pairs. Keys -// and values may be empty. For example, the trie containing keys "ax", "ay", -// "bcbc", "x" and "xy" could have eight nodes: -// -// n0 - -// n1 a- -// n2 .x+ -// n3 .y+ -// n4 b- -// n5 .cbc+ -// n6 x+ -// n7 .y+ -// -// n0 is the root node, and its children are n1, n4 and n6; n1's children are -// n2 and n3; n4's child is n5; n6's child is n7. Nodes n0, n1 and n4 (marked -// with a trailing "-") are partial keys, and nodes n2, n3, n5, n6 and n7 -// (marked with a trailing "+") are complete keys. -type trieNode struct { - // value is the value of the trie node's key/value pair. It is empty if - // this node is not a complete key. - value string - // priority is the priority (higher is more important) of the trie node's - // key/value pair; keys are not necessarily matched shortest- or longest- - // first. Priority is positive if this node is a complete key, and zero - // otherwise. In the example above, positive/zero priorities are marked - // with a trailing "+" or "-". - priority int - - // A trie node may have zero, one or more child nodes: - // * if the remaining fields are zero, there are no children. - // * if prefix and next are non-zero, there is one child in next. - // * if table is non-zero, it defines all the children. - // - // Prefixes are preferred over tables when there is one child, but the - // root node always uses a table for lookup efficiency. - - // prefix is the difference in keys between this trie node and the next. - // In the example above, node n4 has prefix "cbc" and n4's next node is n5. - // Node n5 has no children and so has zero prefix, next and table fields. - prefix string - next *trieNode - - // table is a lookup table indexed by the next byte in the key, after - // remapping that byte through genericReplacer.mapping to create a dense - // index. In the example above, the keys only use 'a', 'b', 'c', 'x' and - // 'y', which remap to 0, 1, 2, 3 and 4. All other bytes remap to 5, and - // genericReplacer.tableSize will be 5. Node n0's table will be - // []*trieNode{ 0:n1, 1:n4, 3:n6 }, where the 0, 1 and 3 are the remapped - // 'a', 'b' and 'x'. - table []*trieNode -} - -func (t *trieNode) add(key, val string, priority int, r *genericReplacer) { - if key == "" { - if t.priority == 0 { - t.value = val - t.priority = priority - } - return - } - - if t.prefix != "" { - // Need to split the prefix among multiple nodes. - var n int // length of the longest common prefix - for ; n < len(t.prefix) && n < len(key); n++ { - if t.prefix[n] != key[n] { - break - } - } - if n == len(t.prefix) { - t.next.add(key[n:], val, priority, r) - } else if n == 0 { - // First byte differs, start a new lookup table here. Looking up - // what is currently t.prefix[0] will lead to prefixNode, and - // looking up key[0] will lead to keyNode. - var prefixNode *trieNode - if len(t.prefix) == 1 { - prefixNode = t.next - } else { - prefixNode = &trieNode{ - prefix: t.prefix[1:], - next: t.next, - } - } - keyNode := new(trieNode) - t.table = make([]*trieNode, r.tableSize) - t.table[r.mapping[t.prefix[0]]] = prefixNode - t.table[r.mapping[key[0]]] = keyNode - t.prefix = "" - t.next = nil - keyNode.add(key[1:], val, priority, r) - } else { - // Insert new node after the common section of the prefix. - next := &trieNode{ - prefix: t.prefix[n:], - next: t.next, - } - t.prefix = t.prefix[:n] - t.next = next - next.add(key[n:], val, priority, r) - } - } else if t.table != nil { - // Insert into existing table. - m := r.mapping[key[0]] - if t.table[m] == nil { - t.table[m] = new(trieNode) - } - t.table[m].add(key[1:], val, priority, r) - } else { - t.prefix = key - t.next = new(trieNode) - t.next.add("", val, priority, r) - } -} - -func (r *genericReplacer) lookup(s string, ignoreRoot bool) (val string, keylen int, found bool) { - // Iterate down the trie to the end, and grab the value and keylen with - // the highest priority. - bestPriority := 0 - node := &r.root - n := 0 - for node != nil { - if node.priority > bestPriority && !(ignoreRoot && node == &r.root) { - bestPriority = node.priority - val = node.value - keylen = n - found = true - } - - if s == "" { - break - } - if node.table != nil { - index := r.mapping[ByteToLower(s[0])] - if int(index) == r.tableSize { - break - } - node = node.table[index] - s = s[1:] - n++ - } else if node.prefix != "" && StringHasPrefixFold(s, node.prefix) { - n += len(node.prefix) - s = s[len(node.prefix):] - node = node.next - } else { - break - } - } - return -} - -// genericReplacer is the fully generic algorithm. -// It's used as a fallback when nothing faster can be used. -type genericReplacer struct { - root trieNode - // tableSize is the size of a trie node's lookup table. It is the number - // of unique key bytes. - tableSize int - // mapping maps from key bytes to a dense index for trieNode.table. - mapping [256]byte -} - -func makeGenericReplacer(oldnew []string) *genericReplacer { - r := new(genericReplacer) - // Find each byte used, then assign them each an index. - for i := 0; i < len(oldnew); i += 2 { - key := strings.ToLower(oldnew[i]) - for j := 0; j < len(key); j++ { - r.mapping[key[j]] = 1 - } - } - - for _, b := range r.mapping { - r.tableSize += int(b) - } - - var index byte - for i, b := range r.mapping { - if b == 0 { - r.mapping[i] = byte(r.tableSize) - } else { - r.mapping[i] = index - index++ - } - } - // Ensure root node uses a lookup table (for performance). - r.root.table = make([]*trieNode, r.tableSize) - - for i := 0; i < len(oldnew); i += 2 { - r.root.add(strings.ToLower(oldnew[i]), oldnew[i+1], len(oldnew)-i, r) - } - return r -} - -type appendSliceWriter []byte - -// Write writes to the buffer to satisfy io.Writer. -func (w *appendSliceWriter) Write(p []byte) (int, error) { - *w = append(*w, p...) - return len(p), nil -} - -// WriteString writes to the buffer without string->[]byte->string allocations. -func (w *appendSliceWriter) WriteString(s string) (int, error) { - *w = append(*w, s...) - return len(s), nil -} - -type stringWriterIface interface { - WriteString(string) (int, error) -} - -type stringWriter struct { - w io.Writer -} - -func (w stringWriter) WriteString(s string) (int, error) { - return w.w.Write([]byte(s)) -} - -func getStringWriter(w io.Writer) stringWriterIface { - sw, ok := w.(stringWriterIface) - if !ok { - sw = stringWriter{w} - } - return sw -} - -func (r *genericReplacer) Replace(s string) string { - buf := make(appendSliceWriter, 0, len(s)) - r.WriteString(&buf, s) - return string(buf) -} - -func (r *genericReplacer) WriteString(w io.Writer, s string) (n int, err error) { - sw := getStringWriter(w) - var last, wn int - var prevMatchEmpty bool - for i := 0; i <= len(s); { - // Fast path: s[i] is not a prefix of any pattern. - if i != len(s) && r.root.priority == 0 { - index := int(r.mapping[ByteToLower(s[i])]) - if index == r.tableSize || r.root.table[index] == nil { - i++ - continue - } - } - - // Ignore the empty match iff the previous loop found the empty match. - val, keylen, match := r.lookup(s[i:], prevMatchEmpty) - prevMatchEmpty = match && keylen == 0 - if match { - orig := s[i : i+keylen] - switch CaseStyle(orig) { - case CaseUnknown: - // pretend we didn't match - // i++ - // continue - case CaseUpper: - val = strings.ToUpper(val) - case CaseLower: - val = strings.ToLower(val) - case CaseTitle: - if len(val) < 2 { - val = strings.ToUpper(val) - } else { - val = strings.ToUpper(val[:1]) + strings.ToLower(val[1:]) - } - } - wn, err = sw.WriteString(s[last:i]) - n += wn - if err != nil { - return - } - //log.Printf("%d: Going to correct %q with %q", i, s[i:i+keylen], val) - wn, err = sw.WriteString(val) - n += wn - if err != nil { - return - } - i += keylen - last = i - continue - } - i++ - } - if last != len(s) { - wn, err = sw.WriteString(s[last:]) - n += wn - } - return -} diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/stringreplacer_test.gox b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/stringreplacer_test.gox deleted file mode 100644 index 70da997f6e0d..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/stringreplacer_test.gox +++ /dev/null @@ -1,421 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package misspell_test - -import ( - "bytes" - "fmt" - "strings" - "testing" - - . "github.com/client9/misspell" -) - -var htmlEscaper = NewStringReplacer( - "&", "&", - "<", "<", - ">", ">", - `"`, """, - "'", "'", -) - -var htmlUnescaper = NewStringReplacer( - "&", "&", - "<", "<", - ">", ">", - """, `"`, - "'", "'", -) - -// The http package's old HTML escaping function. -func oldHTMLEscape(s string) string { - s = strings.Replace(s, "&", "&", -1) - s = strings.Replace(s, "<", "<", -1) - s = strings.Replace(s, ">", ">", -1) - s = strings.Replace(s, `"`, """, -1) - s = strings.Replace(s, "'", "'", -1) - return s -} - -var capitalLetters = NewStringReplacer("a", "A", "b", "B") - -// TestReplacer tests the replacer implementations. -func TestReplacer(t *testing.T) { - type testCase struct { - r *StringReplacer - in, out string - } - var testCases []testCase - - // str converts 0xff to "\xff". This isn't just string(b) since that converts to UTF-8. - str := func(b byte) string { - return string([]byte{b}) - } - var s []string - - // inc maps "\x00"->"\x01", ..., "a"->"b", "b"->"c", ..., "\xff"->"\x00". - for i := 0; i < 256; i++ { - s = append(s, str(byte(i)), str(byte(i+1))) - } - inc := NewStringReplacer(s...) - - // Test cases with 1-byte old strings, 1-byte new strings. - testCases = append(testCases, - testCase{capitalLetters, "brad", "BrAd"}, - testCase{capitalLetters, strings.Repeat("a", (32<<10)+123), strings.Repeat("A", (32<<10)+123)}, - testCase{capitalLetters, "", ""}, - - testCase{inc, "brad", "csbe"}, - testCase{inc, "\x00\xff", "\x01\x00"}, - testCase{inc, "", ""}, - - testCase{NewStringReplacer("a", "1", "a", "2"), "brad", "br1d"}, - ) - - // repeat maps "a"->"a", "b"->"bb", "c"->"ccc", ... - s = nil - for i := 0; i < 256; i++ { - n := i + 1 - 'a' - if n < 1 { - n = 1 - } - s = append(s, str(byte(i)), strings.Repeat(str(byte(i)), n)) - } - repeat := NewStringReplacer(s...) - - // Test cases with 1-byte old strings, variable length new strings. - testCases = append(testCases, - testCase{htmlEscaper, "No changes", "No changes"}, - testCase{htmlEscaper, "I <3 escaping & stuff", "I <3 escaping & stuff"}, - testCase{htmlEscaper, "&&&", "&&&"}, - testCase{htmlEscaper, "", ""}, - - testCase{repeat, "brad", "bbrrrrrrrrrrrrrrrrrradddd"}, - testCase{repeat, "abba", "abbbba"}, - testCase{repeat, "", ""}, - - testCase{NewStringReplacer("a", "11", "a", "22"), "brad", "br11d"}, - ) - - // The remaining test cases have variable length old strings. - - testCases = append(testCases, - testCase{htmlUnescaper, "&amp;", "&"}, - testCase{htmlUnescaper, "<b>HTML's neat</b>", "HTML's neat"}, - testCase{htmlUnescaper, "", ""}, - - testCase{NewStringReplacer("a", "1", "a", "2", "xxx", "xxx"), "brad", "br1d"}, - - testCase{NewStringReplacer("a", "1", "aa", "2", "aaa", "3"), "aaaa", "1111"}, - - testCase{NewStringReplacer("aaa", "3", "aa", "2", "a", "1"), "aaaa", "31"}, - ) - - // gen1 has multiple old strings of variable length. There is no - // overall non-empty common prefix, but some pairwise common prefixes. - gen1 := NewStringReplacer( - "aaa", "3[aaa]", - "aa", "2[aa]", - "a", "1[a]", - "i", "i", - "longerst", "most long", - "longer", "medium", - "long", "short", - "xx", "xx", - "x", "X", - "X", "Y", - "Y", "Z", - ) - testCases = append(testCases, - testCase{gen1, "fooaaabar", "foo3[aaa]b1[a]r"}, - testCase{gen1, "long, longerst, longer", "short, most long, medium"}, - testCase{gen1, "xxxxx", "xxxxX"}, - testCase{gen1, "XiX", "YiY"}, - testCase{gen1, "", ""}, - ) - - // gen2 has multiple old strings with no pairwise common prefix. - gen2 := NewStringReplacer( - "roses", "red", - "violets", "blue", - "sugar", "sweet", - ) - testCases = append(testCases, - testCase{gen2, "roses are red, violets are blue...", "red are red, blue are blue..."}, - testCase{gen2, "", ""}, - ) - - // gen3 has multiple old strings with an overall common prefix. - gen3 := NewStringReplacer( - "abracadabra", "poof", - "abracadabrakazam", "splat", - "abraham", "lincoln", - "abrasion", "scrape", - "abraham", "isaac", - ) - testCases = append(testCases, - testCase{gen3, "abracadabrakazam abraham", "poofkazam lincoln"}, - testCase{gen3, "abrasion abracad", "scrape abracad"}, - testCase{gen3, "abba abram abrasive", "abba abram abrasive"}, - testCase{gen3, "", ""}, - ) - - // foo{1,2,3,4} have multiple old strings with an overall common prefix - // and 1- or 2- byte extensions from the common prefix. - foo1 := NewStringReplacer( - "foo1", "A", - "foo2", "B", - "foo3", "C", - ) - foo2 := NewStringReplacer( - "foo1", "A", - "foo2", "B", - "foo31", "C", - "foo32", "D", - ) - foo3 := NewStringReplacer( - "foo11", "A", - "foo12", "B", - "foo31", "C", - "foo32", "D", - ) - foo4 := NewStringReplacer( - "foo12", "B", - "foo32", "D", - ) - testCases = append(testCases, - testCase{foo1, "fofoofoo12foo32oo", "fofooA2C2oo"}, - testCase{foo1, "", ""}, - - testCase{foo2, "fofoofoo12foo32oo", "fofooA2Doo"}, - testCase{foo2, "", ""}, - - testCase{foo3, "fofoofoo12foo32oo", "fofooBDoo"}, - testCase{foo3, "", ""}, - - testCase{foo4, "fofoofoo12foo32oo", "fofooBDoo"}, - testCase{foo4, "", ""}, - ) - - // genAll maps "\x00\x01\x02...\xfe\xff" to "[all]", amongst other things. - allBytes := make([]byte, 256) - for i := range allBytes { - allBytes[i] = byte(i) - } - allString := string(allBytes) - genAll := NewStringReplacer( - allString, "[all]", - "\xff", "[ff]", - "\x00", "[00]", - ) - testCases = append(testCases, - testCase{genAll, allString, "[all]"}, - testCase{genAll, "a\xff" + allString + "\x00", "a[ff][all][00]"}, - testCase{genAll, "", ""}, - ) - - // Test cases with empty old strings. - - blankToX1 := NewStringReplacer("", "X") - blankToX2 := NewStringReplacer("", "X", "", "") - blankHighPriority := NewStringReplacer("", "X", "o", "O") - blankLowPriority := NewStringReplacer("o", "O", "", "X") - blankNoOp1 := NewStringReplacer("", "") - blankNoOp2 := NewStringReplacer("", "", "", "A") - blankFoo := NewStringReplacer("", "X", "foobar", "R", "foobaz", "Z") - testCases = append(testCases, - testCase{blankToX1, "foo", "XfXoXoX"}, - testCase{blankToX1, "", "X"}, - - testCase{blankToX2, "foo", "XfXoXoX"}, - testCase{blankToX2, "", "X"}, - - testCase{blankHighPriority, "oo", "XOXOX"}, - testCase{blankHighPriority, "ii", "XiXiX"}, - testCase{blankHighPriority, "oiio", "XOXiXiXOX"}, - testCase{blankHighPriority, "iooi", "XiXOXOXiX"}, - testCase{blankHighPriority, "", "X"}, - - testCase{blankLowPriority, "oo", "OOX"}, - testCase{blankLowPriority, "ii", "XiXiX"}, - testCase{blankLowPriority, "oiio", "OXiXiOX"}, - testCase{blankLowPriority, "iooi", "XiOOXiX"}, - testCase{blankLowPriority, "", "X"}, - - testCase{blankNoOp1, "foo", "foo"}, - testCase{blankNoOp1, "", ""}, - - testCase{blankNoOp2, "foo", "foo"}, - testCase{blankNoOp2, "", ""}, - - testCase{blankFoo, "foobarfoobaz", "XRXZX"}, - testCase{blankFoo, "foobar-foobaz", "XRX-XZX"}, - testCase{blankFoo, "", "X"}, - ) - - // single string replacer - - abcMatcher := NewStringReplacer("abc", "[match]") - - testCases = append(testCases, - testCase{abcMatcher, "", ""}, - testCase{abcMatcher, "ab", "ab"}, - testCase{abcMatcher, "abc", "[match]"}, - testCase{abcMatcher, "abcd", "[match]d"}, - testCase{abcMatcher, "cabcabcdabca", "c[match][match]d[match]a"}, - ) - - // Issue 6659 cases (more single string replacer) - - noHello := NewStringReplacer("Hello", "") - testCases = append(testCases, - testCase{noHello, "Hello", ""}, - testCase{noHello, "Hellox", "x"}, - testCase{noHello, "xHello", "x"}, - testCase{noHello, "xHellox", "xx"}, - ) - - // No-arg test cases. - - nop := NewStringReplacer() - testCases = append(testCases, - testCase{nop, "abc", "abc"}, - testCase{nop, "", ""}, - ) - - // Run the test cases. - - for i, tc := range testCases { - if s := tc.r.Replace(tc.in); s != tc.out { - t.Errorf("%d. strings.Replace(%q) = %q, want %q", i, tc.in, s, tc.out) - } - var buf bytes.Buffer - n, err := tc.r.WriteString(&buf, tc.in) - if err != nil { - t.Errorf("%d. WriteString: %v", i, err) - continue - } - got := buf.String() - if got != tc.out { - t.Errorf("%d. WriteString(%q) wrote %q, want %q", i, tc.in, got, tc.out) - continue - } - if n != len(tc.out) { - t.Errorf("%d. WriteString(%q) wrote correct string but reported %d bytes; want %d (%q)", - i, tc.in, n, len(tc.out), tc.out) - } - } -} - -type errWriter struct{} - -func (errWriter) Write(p []byte) (n int, err error) { - return 0, fmt.Errorf("unwritable") -} - -func BenchmarkGenericNoMatch(b *testing.B) { - str := strings.Repeat("A", 100) + strings.Repeat("B", 100) - generic := NewStringReplacer("a", "A", "b", "B", "12", "123") // varying lengths forces generic - for i := 0; i < b.N; i++ { - generic.Replace(str) - } -} - -func BenchmarkGenericMatch1(b *testing.B) { - str := strings.Repeat("a", 100) + strings.Repeat("b", 100) - generic := NewStringReplacer("a", "A", "b", "B", "12", "123") - for i := 0; i < b.N; i++ { - generic.Replace(str) - } -} - -func BenchmarkGenericMatch2(b *testing.B) { - str := strings.Repeat("It's <b>HTML</b>!", 100) - for i := 0; i < b.N; i++ { - htmlUnescaper.Replace(str) - } -} - -func benchmarkSingleString(b *testing.B, pattern, text string) { - r := NewStringReplacer(pattern, "[match]") - b.SetBytes(int64(len(text))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - r.Replace(text) - } -} - -func BenchmarkSingleMaxSkipping(b *testing.B) { - benchmarkSingleString(b, strings.Repeat("b", 25), strings.Repeat("a", 10000)) -} - -func BenchmarkSingleLongSuffixFail(b *testing.B) { - benchmarkSingleString(b, "b"+strings.Repeat("a", 500), strings.Repeat("a", 1002)) -} - -func BenchmarkSingleMatch(b *testing.B) { - benchmarkSingleString(b, "abcdef", strings.Repeat("abcdefghijklmno", 1000)) -} - -func BenchmarkByteByteNoMatch(b *testing.B) { - str := strings.Repeat("A", 100) + strings.Repeat("B", 100) - for i := 0; i < b.N; i++ { - capitalLetters.Replace(str) - } -} - -func BenchmarkByteByteMatch(b *testing.B) { - str := strings.Repeat("a", 100) + strings.Repeat("b", 100) - for i := 0; i < b.N; i++ { - capitalLetters.Replace(str) - } -} - -func BenchmarkByteStringMatch(b *testing.B) { - str := "<" + strings.Repeat("a", 99) + strings.Repeat("b", 99) + ">" - for i := 0; i < b.N; i++ { - htmlEscaper.Replace(str) - } -} - -func BenchmarkHTMLEscapeNew(b *testing.B) { - str := "I <3 to escape HTML & other text too." - for i := 0; i < b.N; i++ { - htmlEscaper.Replace(str) - } -} - -func BenchmarkHTMLEscapeOld(b *testing.B) { - str := "I <3 to escape HTML & other text too." - for i := 0; i < b.N; i++ { - oldHTMLEscape(str) - } -} - -func BenchmarkByteStringReplacerWriteString(b *testing.B) { - str := strings.Repeat("I <3 to escape HTML & other text too.", 100) - buf := new(bytes.Buffer) - for i := 0; i < b.N; i++ { - htmlEscaper.WriteString(buf, str) - buf.Reset() - } -} - -func BenchmarkByteReplacerWriteString(b *testing.B) { - str := strings.Repeat("abcdefghijklmnopqrstuvwxyz", 100) - buf := new(bytes.Buffer) - for i := 0; i < b.N; i++ { - capitalLetters.WriteString(buf, str) - buf.Reset() - } -} - -// BenchmarkByteByteReplaces compares byteByteImpl against multiple Replaces. -func BenchmarkByteByteReplaces(b *testing.B) { - str := strings.Repeat("a", 100) + strings.Repeat("b", 100) - for i := 0; i < b.N; i++ { - strings.Replace(strings.Replace(str, "a", "A", -1), "b", "B", -1) - } -} diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/url.go b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/url.go deleted file mode 100644 index 1a259f5f99cc..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/url.go +++ /dev/null @@ -1,17 +0,0 @@ -package misspell - -import ( - "regexp" -) - -// Regexp for URL https://mathiasbynens.be/demo/url-regex -// -// original @imme_emosol (54 chars) has trouble with dashes in hostname -// @(https?|ftp)://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?$@iS -var reURL = regexp.MustCompile(`(?i)(https?|ftp)://(-\.)?([^\s/?\.#]+\.?)+(/[^\s]*)?`) - -// StripURL attemps to replace URLs with blank spaces, e.g. -// "xxx http://foo.com/ yyy -> "xxx yyyy" -func StripURL(s string) string { - return reURL.ReplaceAllStringFunc(s, replaceWithBlanks) -} diff --git a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/words.go b/vertical-pod-autoscaler/vendor/github.com/client9/misspell/words.go deleted file mode 100644 index c92dd19d040b..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/client9/misspell/words.go +++ /dev/null @@ -1,31158 +0,0 @@ -package misspell - -// Code generated automatically. DO NOT EDIT. - -// DictMain is the main rule set, not including locale-specific spellings -var DictMain = []string{ - "differentiatiations", "differentiations", - "disproportionaltely", "disproportionately", - "oversimplificiation", "oversimplification", - "transcendentational", "transcendental", - "anthromorphization", "anthropomorphization", - "disporportionately", "disproportionately", - "dispraportionately", "disproportionately", - "disproportianately", "disproportionately", - "disproportionatley", "disproportionately", - "disproprotionately", "disproportionately", - "fundamentalistisch", "fundamentalists", - "fundamentalistiska", "fundamentalists", - "fundamentalistiske", "fundamentalists", - "fundamentalistiskt", "fundamentalists", - "histocompatability", "histocompatibility", - "microtransacations", "microtransactions", - "microtransacciones", "microtransactions", - "microtransactional", "microtransactions", - "microtransactioned", "microtransactions", - "misunderstandingly", "misunderstandings", - "oversemplification", "oversimplification", - "oversimplifacation", "oversimplification", - "oversimplificaiton", "oversimplification", - "oversimplificating", "oversimplification", - "oversimplyfication", "oversimplification", - "cardiovasculaires", "cardiovascular", - "certificationkits", "certifications", - "counterporductive", "counterproductive", - "coutnerproductive", "counterproductive", - "disporportionatly", "disproportionately", - "disproportiantely", "disproportionately", - "disproportionatly", "disproportionately", - "disproportionnate", "disproportionate", - "disrepresentation", "misrepresentation", - "fundamentalistisk", "fundamentalists", - "incompatabilities", "incompatibilities", - "inconsequentional", "inconsequential", - "indistinguishible", "indistinguishable", - "indistingusihable", "indistinguishable", - "indistinquishable", "indistinguishable", - "indistuingishable", "indistinguishable", - "instatutionalized", "institutionalized", - "institucionalized", "institutionalized", - "institutionilized", "institutionalized", - "instutitionalized", "institutionalized", - "instututionalized", "institutionalized", - "interchangeablely", "interchangeably", - "interchangeablity", "interchangeably", - "intercontinential", "intercontinental", - "micortransactions", "microtransactions", - "microstansactions", "microtransactions", - "microtramsactions", "microtransactions", - "microtranasctions", "microtransactions", - "microtransacitons", "microtransactions", - "microtransacrions", "microtransactions", - "microtransactioms", "microtransactions", - "microtransactiosn", "microtransactions", - "microtranscations", "microtransactions", - "microtrasnactions", "microtransactions", - "mircotransactions", "microtransactions", - "misinterpretating", "misinterpreting", - "misrepresantation", "misrepresentation", - "misrepresentaiton", "misrepresentation", - "misrepresentating", "misrepresenting", - "misunderstantings", "misunderstandings", - "mocrotransactions", "microtransactions", - "oversimplifaction", "oversimplification", - "oversimplificaton", "oversimplification", - "oversimplifiction", "oversimplification", - "responsibillities", "responsibilities", - "unconstitutionnal", "unconstitutional", - "accomplishements", "accomplishments", - "admininistrative", "administrative", - "antidepresssants", "antidepressants", - "architechturally", "architecturally", - "cardiovasculaire", "cardiovascular", - "charactarization", "characterization", - "characterazation", "characterization", - "characterisitics", "characteristics", - "characteristsics", "characteristic", - "characterizarion", "characterization", - "charecterization", "characterization", - "charicterization", "characterization", - "circumstantional", "circumstantial", - "conversationable", "conversational", - "counterprodutive", "counterproductive", - "demonstrationens", "demonstrations", - "deterministische", "deterministic", - "differenciations", "differentiation", - "differentiantion", "differentiation", - "differentiatiors", "differentiation", - "differentitation", "differentiation", - "disperportionate", "disproportionate", - "disporportionate", "disproportionate", - "dispraportionate", "disproportionate", - "disproportianate", "disproportionate", - "disproportionaly", "disproportionately", - "disproprotionate", "disproportionate", - "electromagnectic", "electromagnetic", - "enviornmentalist", "environmentalist", - "environmentality", "environmentally", - "extraordinairily", "extraordinarily", - "extraordinarilly", "extraordinary", - "extraterrestials", "extraterrestrials", - "fundamentalismos", "fundamentalists", - "fundamentalismus", "fundamentalists", - "fundamentalistas", "fundamentalists", - "fundamentalisten", "fundamentalists", - "fundamentalister", "fundamentalists", - "imcomprehensible", "incomprehensible", - "immunosupressant", "immunosuppressant", - "imperfectionists", "imperfections", - "implementaciones", "implementations", - "implementationen", "implementations", - "implementationer", "implementations", - "inappropriatelly", "inappropriately", - "incompatablities", "incompatibilities", - "incompatiblities", "incompatibilities", - "incomprehencible", "incomprehensible", - "incomprehendible", "incomprehensible", - "incomprehenisble", "incomprehensible", - "incomprehensable", "incomprehensible", - "incomprehinsible", "incomprehensible", - "incomprihensible", "incomprehensible", - "inconprehensible", "incomprehensible", - "inconsistentcies", "inconsistencies", - "inconstitutional", "unconstitutional", - "incrompehensible", "incomprehensible", - "indistinguisable", "indistinguishable", - "institutionlized", "institutionalized", - "intellectualiser", "intellectuals", - "intellectualisme", "intellectuals", - "interchangeabley", "interchangeably", - "internationnally", "internationally", - "interpretaciones", "interpretations", - "interpretationen", "interpretations", - "manoeuverability", "maneuverability", - "massachusettians", "massachusetts", - "microtransacions", "microtransactions", - "microtransacting", "microtransactions", - "microtransactios", "microtransactions", - "microtransactons", "microtransactions", - "microtransations", "microtransactions", - "microtranscation", "microtransactions", - "mircotransaction", "microtransactions", - "miscommunciation", "miscommunication", - "miscommunicaiton", "miscommunication", - "miscomunnication", "miscommunication", - "miscummunication", "miscommunication", - "misinterpretated", "misinterpreted", - "misinterpretions", "misinterpreting", - "misinterpretting", "misinterpreting", - "misproportionate", "disproportionate", - "misrepresenation", "misrepresentation", - "misrepresentaion", "misrepresentation", - "misrepresentated", "misrepresented", - "misrepresentatie", "misrepresentation", - "misrepresentativ", "misrepresentation", - "misubderstanding", "misunderstandings", - "misudnerstanding", "misunderstandings", - "misundarstanding", "misunderstandings", - "misunderatanding", "misunderstandings", - "misunderdtanding", "misunderstandings", - "misundersatnding", "misunderstandings", - "misundersranding", "misunderstandings", - "misunderstadings", "misunderstandings", - "misunderstadning", "misunderstandings", - "misunderstamding", "misunderstandings", - "misunderstandigs", "misunderstandings", - "misunderstandimg", "misunderstandings", - "misunderstandind", "misunderstandings", - "misunderstanging", "misunderstandings", - "misunderstanidng", "misunderstandings", - "misunderstanings", "misunderstandings", - "misunderstansing", "misunderstandings", - "misunderstanting", "misunderstandings", - "misunderstending", "misunderstandings", - "misunderstnading", "misunderstandings", - "misunderstsnding", "misunderstandings", - "misunderstunding", "misunderstandings", - "misundertsanding", "misunderstandings", - "misundrestanding", "misunderstandings", - "misunterstanding", "misunderstandings", - "nationalistische", "nationalistic", - "nationalististic", "nationalistic", - "neconstitutional", "unconstitutional", - "notwhithstanding", "notwithstanding", - "objectificiation", "objectification", - "organisationnels", "organisations", - "perpendiculaires", "perpendicular", - "phillosophically", "philosophically", - "preinitalization", "preinitialization", - "prescriptionists", "prescriptions", - "procrastinarting", "procrastinating", - "procrastinationg", "procrastinating", - "procrastinazione", "procrastination", - "professionalisim", "professionalism", - "professionalisme", "professionals", - "professionallism", "professionalism", - "professionnalism", "professionalism", - "programattically", "programmatically", - "proportionallity", "proportionally", - "reaponsibilities", "responsibilities", - "reinitalizations", "reinitializations", - "representaciones", "representations", - "representationen", "representations", - "representationer", "representations", - "repsonsibilities", "responsibilities", - "responcibilities", "responsibilities", - "responisbilities", "responsibilities", - "responsabilities", "responsibilities", - "responsebilities", "responsibilities", - "straightforeward", "straightforward", - "surrepetitiously", "surreptitiously", - "technologicially", "technologically", - "unconditionnally", "unconditionally", - "unconfortability", "discomfort", - "unconstititional", "unconstitutional", - "uncontrollablely", "uncontrollably", - "underestimateing", "underestimating", - "understandablely", "understandably", - "unintentionnally", "unintentionally", - "unsubstantianted", "unsubstantiated", - "unsubstantiative", "unsubstantiated", - "acclimitization", "acclimatization", - "accomplishemnts", "accomplishments", - "accountabillity", "accountability", - "acknolwedgement", "acknowledgement", - "acknoweldgement", "acknowledgement", - "acknowldegement", "acknowledgement", - "acknowlegdement", "acknowledgement", - "administratieve", "administrative", - "administratiors", "administrators", - "administrativne", "administrative", - "aforementionned", "aforementioned", - "anitdepressants", "antidepressants", - "antidepressents", "antidepressants", - "archetecturally", "architecturally", - "associationthis", "associations", - "authobiographic", "autobiographic", - "awknowledgement", "acknowledgement", - "bureaucratische", "bureaucratic", - "cardiovascualar", "cardiovascular", - "carnagie-mellon", "carnegie-mellon", - "carnigie-mellon", "carnegie-mellon", - "celebrationists", "celebrations", - "charactaristics", "characteristics", - "characterisitcs", "characteristics", - "characterisitic", "characteristic", - "characterizaton", "characterization", - "charactersistic", "characteristic", - "charactersitics", "characteristics", - "charactoristics", "characteristics", - "charecteristics", "characteristics", - "comfrontational", "confrontational", - "commuinications", "communications", - "compatabilities", "compatibilities", - "complimentarity", "complimentary", - "compositionwise", "compositions", - "confidenciality", "confidential", - "confidentuality", "confidential", - "confrentational", "confrontational", - "confrontacional", "confrontational", - "conglaturations", "congratulations", - "congradulations", "congratulations", - "congragulations", "congratulations", - "congratualtions", "congratulations", - "congraturations", "congratulations", - "consequentually", "consequently", - "constitutionnal", "constitutional", - "deinitalization", "deinitialization", - "denominationals", "denominations", - "destinationhash", "destinations", - "deterministisch", "deterministic", - "developmentwise", "developments", - "differantiation", "differentiation", - "differenciation", "differentiation", - "differientation", "differentiation", - "discriminatoire", "discriminate", - "discriminatorie", "discriminate", - "disproportiante", "disproportionate", - "disproportinate", "disproportionate", - "elecrtomagnetic", "electromagnetic", - "electormagnetic", "electromagnetic", - "electromagentic", "electromagnetic", - "electromagnatic", "electromagnetic", - "electromangetic", "electromagnetic", - "electromegnetic", "electromagnetic", - "electronagnetic", "electromagnetic", - "enivronmentally", "environmentally", - "entrepreneurers", "entrepreneurs", - "enviornmentally", "environmentally", - "enviromentalist", "environmentalist", - "environemntally", "environmentally", - "envrionmentally", "environmentally", - "evolutionarilly", "evolutionary", - "experementation", "experimentation", - "experimantation", "experimentation", - "experimentacion", "experimentation", - "experimentating", "experimentation", - "experimenterade", "experimented", - "experimintation", "experimentation", - "expirementation", "experimentation", - "extraodrinarily", "extraordinarily", - "extraordinairly", "extraordinarily", - "extraordinarely", "extraordinarily", - "extraordinaryly", "extraordinarily", - "extraterrestial", "extraterrestrial", - "extroardinarily", "extraordinarily", - "fondamentalists", "fundamentalists", - "fundamendalists", "fundamentalists", - "fundamentalisme", "fundamentals", - "fundamentalismo", "fundamentals", - "fundamentalista", "fundamentals", - "fundamentalisti", "fundamentals", - "fundamnetalists", "fundamentalists", - "fundemantalists", "fundamentalists", - "fundimentalists", "fundamentalists", - "fundumentalists", "fundamentalists", - "gongratulations", "congratulations", - "grammaticallity", "grammatically", - "gundamentalists", "fundamentalists", - "idiosynchracies", "idiosyncrasies", - "implementaitons", "implementations", - "implimentations", "implementations", - "inapporpriately", "inappropriately", - "inappropraitely", "inappropriately", - "inappropriatley", "inappropriately", - "incompatability", "incompatibility", - "incompetentence", "incompetence", - "incomprehensibe", "incomprehensible", - "incomprehesible", "incomprehensible", - "inconcequential", "inconsequential", - "inconcistencies", "inconsistencies", - "inconditionally", "unconditionally", - "inconsecuential", "inconsequential", - "inconsequantial", "inconsequential", - "inconsequencial", "inconsequential", - "inconsequentual", "inconsequential", - "inconsiquential", "inconsequential", - "inconsistancies", "inconsistencies", - "inconsistencias", "inconsistencies", - "inconsistensies", "inconsistencies", - "inconsistenties", "inconsistencies", - "independentisme", "independents", - "independentiste", "independents", - "independentness", "independents", - "inexperiencable", "inexperience", - "inplementations", "implementations", - "instantaneoulsy", "instantaneous", - "institutionella", "institutional", - "institutionnels", "institutions", - "instutionalized", "institutionalized", - "insubstantiated", "unsubstantiated", - "interchangabley", "interchangeably", - "interchangebale", "interchangeable", - "intercontinetal", "intercontinental", - "interpertations", "interpretations", - "interpratations", "interpretations", - "interpritations", "interpretations", - "intersectionals", "intersections", - "intrepretations", "interpretations", - "investigationes", "investigations", - "journalistische", "journalistic", - "libertarianisim", "libertarianism", - "libertarianisme", "libertarians", - "libertarianismo", "libertarians", - "libertarianists", "libertarians", - "libertariansism", "libertarianism", - "manisfestations", "manifestations", - "manouverability", "maneuverability", - "manufacturerers", "manufacturers", - "marshmallowiest", "marshmallows", - "marshmallowness", "marshmallows", - "microtransacton", "microtransactions", - "mininterpreting", "misinterpreting", - "miscommuniation", "miscommunication", - "miscommunicatie", "miscommunication", - "miscommuniction", "miscommunication", - "misinterperting", "misinterpreting", - "misinterprating", "misinterpreting", - "misinterprented", "misinterpret", - "misinterprested", "misinterpret", - "misinterpretion", "misinterpreting", - "misinterpretted", "misinterpreted", - "misinterpriting", "misinterpreting", - "misintrepreting", "misinterpreting", - "misrepresention", "misrepresenting", - "misunderstading", "misunderstanding", - "misunderstandig", "misunderstandings", - "misunderstandng", "misunderstandings", - "misunderstaning", "misunderstanding", - "multicultralism", "multiculturalism", - "multinationella", "multinational", - "nationalistisch", "nationalists", - "nationalistisen", "nationalists", - "nationalistiska", "nationalists", - "nationalistiske", "nationalists", - "nationalistiskt", "nationalists", - "nationalistista", "nationalists", - "objectificaiton", "objectification", - "objectivication", "objectification", - "organisationens", "organisations", - "organisationers", "organisations", - "overestimateing", "overestimating", - "paychologically", "psychologically", - "performancetest", "performances", - "performancewise", "performances", - "perpendiculaire", "perpendicular", - "pharamceuticals", "pharmaceutical", - "pharmacueticals", "pharmaceutical", - "philoshopically", "philosophically", - "philosohpically", "philosophically", - "philosophycally", "philosophically", - "phsycologically", "psychologically", - "phychologically", "psychologically", - "phylosophically", "philosophically", - "physcologically", "psychologically", - "precrastination", "procrastination", - "prefessionalism", "professionalism", - "premonasterians", "premonstratensians", - "procastrinating", "procrastinating", - "procastrination", "procrastination", - "procrascinating", "procrastinating", - "procrastenating", "procrastinating", - "procrastiantion", "procrastination", - "procrastibating", "procrastinating", - "procrastibation", "procrastination", - "procrastonating", "procrastinating", - "procrestinating", "procrastinating", - "procrestination", "procrastination", - "professionalsim", "professionalism", - "prograstination", "procrastination", - "progressionists", "progressions", - "progressionwise", "progressions", - "prokrastination", "procrastination", - "proportionallly", "proportionally", - "proscratination", "procrastination", - "pscyhologically", "psychologically", - "pshycologically", "psychologically", - "psichologically", "psychologically", - "psychedelicious", "psychedelics", - "psychedelicness", "psychedelics", - "psycholigically", "psychologically", - "psychopathische", "psychopathic", - "pyschologically", "psychologically", - "racionalization", "rationalization", - "rationalizaiton", "rationalization", - "rationalizating", "rationalization", - "reccomendations", "recommendations", - "recommandations", "recommendations", - "recommondations", "recommendations", - "reinitalization", "reinitialization", - "repersentations", "representations", - "represantations", "representations", - "represantatives", "representatives", - "representatieve", "representative", - "representativas", "representatives", - "representetives", "representatives", - "representitives", "representatives", - "responibilities", "responsibilities", - "responsibilites", "responsibilities", - "responsibilitys", "responsibilities", - "responsibillity", "responsibility", - "responsibilties", "responsibilities", - "responsiblities", "responsibilities", - "ridiculoussness", "ridiculousness", - "saskatchewinian", "saskatchewan", - "satisfactorally", "satisfactory", - "satisfactorilly", "satisfactory", - "schizophreniiic", "schizophrenic", - "sensationalisim", "sensationalism", - "spreadsheeticus", "spreadsheets", - "starightforward", "straightforward", - "straigthforward", "straightforward", - "striaghtforward", "straightforward", - "sustainabillity", "sustainability", - "technoligically", "technologically", - "troubelshooting", "troubleshooting", - "troublehsooting", "troubleshooting", - "troubleshotting", "troubleshooting", - "trustworthyness", "trustworthiness", - "ubsubstantiated", "unsubstantiated", - "unappropriately", "inappropriately", - "uncomfortablely", "uncomfortably", - "uncomfortablity", "uncomfortably", - "unconditionable", "unconditional", - "unconstituional", "unconstitutional", - "uncontitutional", "unconstitutional", - "uncontrollabley", "uncontrollably", - "uncontrollablly", "uncontrollably", - "unconventionnal", "unconventional", - "underastimating", "underestimating", - "underestemating", "underestimating", - "understandabley", "understandably", - "unintensionally", "unintentionally", - "unprofessionnal", "unprofessional", - "unresponsivness", "unresponsive", - "unsibstantiated", "unsubstantiated", - "unsubstanciated", "unsubstantiated", - "unsubstansiated", "unsubstantiated", - "unsusbtantiated", "unsubstantiated", - "untranslateable", "untranslatable", - "vulernabilities", "vulnerabilities", - "vulnarabilities", "vulnerabilities", - "vulnurabilities", "vulnerabilities", - "vunlerabilities", "vulnerabilities", - "vurnerabilities", "vulnerabilities", - "accomplishemnt", "accomplishment", - "accomplishents", "accomplishes", - "acconplishment", "accomplishment", - "acknowledgeing", "acknowledging", - "acknowledgemnt", "acknowledgement", - "acomplishments", "accomplishments", - "administartion", "administration", - "administartors", "administrators", - "administraters", "administrators", - "administratief", "administrative", - "administratiei", "administrative", - "administratior", "administrator", - "administrativo", "administration", - "adminsitration", "administration", - "adminsitrative", "administrative", - "adminsitrators", "administrators", - "affectionatley", "affectionate", - "aforememtioned", "aforementioned", - "aforementioend", "aforementioned", - "alternativelly", "alternatively", - "amministrative", "administrative", - "anitdepressant", "antidepressants", - "approproximate", "approximate", - "approximatelly", "approximately", - "archeaologists", "archeologists", - "architechtures", "architectures", - "architectureal", "architectural", - "architecturial", "architectural", - "assassintation", "assassination", - "authenitcation", "authentication", - "authenticaiton", "authentication", - "authobiography", "autobiography", - "breakthroughts", "breakthroughs", - "bureaucratisch", "bureaucratic", - "calssification", "classification", - "capatilization", "capitalization", - "capitalizacion", "capitalization", - "capitalizaiton", "capitalization", - "capitalizating", "capitalization", - "capitilazation", "capitalization", - "capitolization", "capitalization", - "captialization", "capitalization", - "cardiocascular", "cardiovascular", - "cardiovascualr", "cardiovascular", - "cardiovasuclar", "cardiovascular", - "caridovascular", "cardiovascular", - "cessationalism", "sensationalism", - "cessationalist", "sensationalist", - "charactaristic", "characteristic", - "characterisics", "characteristics", - "characterisitc", "characteristics", - "characteristcs", "characteristics", - "characteritics", "characteristic", - "charactersitic", "characteristics", - "charasteristic", "characteristics", - "charecteristic", "characteristic", - "cheeseburguers", "cheeseburgers", - "cinematagraphy", "cinematography", - "cinematagrophy", "cinematography", - "cinematograhpy", "cinematography", - "cinematogrophy", "cinematography", - "cinematogrpahy", "cinematography", - "cinemetography", "cinematography", - "cinimatography", "cinematography", - "circumstansial", "circumstantial", - "circumstantual", "circumstantial", - "circumstential", "circumstantial", - "circunstantial", "circumstantial", - "classificaiton", "classification", - "coincedentally", "coincidentally", - "coinsidentally", "coincidentally", - "commemmorating", "commemorating", - "communciations", "communications", - "compatablities", "compatibilities", - "compatibillity", "compatibility", - "compatiblities", "compatibilities", - "competitioners", "competitions", - "comphrehensive", "comprehensive", - "computationnal", "computational", - "conciderations", "considerations", - "condescenscion", "condescension", - "condradictions", "contradictions", - "configuartions", "configurations", - "confugurations", "configurations", - "conglaturation", "congratulations", - "congratulatons", "congratulations", - "conicidentally", "coincidentally", - "conifgurations", "configurations", - "conscioussness", "consciousness", - "consentrations", "concentrations", - "consiciousness", "consciousness", - "considerablely", "considerably", - "considerstions", "considerations", - "constititional", "constitutional", - "constitucional", "constitutional", - "contamporaries", "contemporaries", - "contemporaneus", "contemporaneous", - "contraceptivos", "contraceptives", - "contradicitons", "contradictions", - "contradictiong", "contradicting", - "contriceptives", "contraceptives", - "controceptives", "contraceptives", - "controdictions", "contradictions", - "conversacional", "conversational", - "converstaional", "conversational", - "correpsondence", "correspondence", - "correspondants", "correspondents", - "correspondense", "correspondence", - "correspondente", "correspondence", - "corrispondants", "correspondents", - "corrispondence", "correspondence", - "corrospondence", "correspondence", - "costumizations", "customization", - "councidentally", "coincidentally", - "crystalisation", "crystallisation", - "curcumstantial", "circumstantial", - "demenstrations", "demonstrations", - "deminstrations", "demonstrations", - "demonstartions", "demonstrations", - "demonstrativno", "demonstrations", - "demonstrativos", "demonstrations", - "demosntrations", "demonstrations", - "desintegration", "disintegration", - "deterioriating", "deteriorating", - "determinisitic", "deterministic", - "differentiaton", "differentiation", - "disatisfaction", "dissatisfaction", - "discrimanatory", "discriminatory", - "discriminacion", "discrimination", - "discriminitory", "discriminatory", - "disillusionned", "disillusioned", - "diskrimination", "discrimination", - "disproportiate", "disproportionate", - "distingiushing", "distinguishing", - "distingquished", "distinguished", - "distingusihing", "distinguishing", - "distinquishing", "distinguishing", - "distuingishing", "distinguishing", - "dysfunctionnal", "dysfunctional", - "eldistribution", "redistribution", - "electromagnetc", "electromagnetic", - "electromagntic", "electromagnetic", - "endoctrination", "indoctrination", - "enthusiastisch", "enthusiastic", - "entrepreneuers", "entrepreneurs", - "entrepreneures", "entrepreneurs", - "enviormentally", "environmentally", - "enviromentally", "environmentally", - "environmentals", "environments", - "environmentaly", "environmentally", - "experimentaion", "experimentation", - "experimentella", "experimental", - "extraordinairy", "extraordinary", - "extraordinarly", "extraordinary", - "extrordinarily", "extraordinarily", - "fondamentalist", "fundamentalist", - "foreshadowning", "foreshadowing", - "functionallity", "functionality", - "fundamendalist", "fundamentalist", - "fundamentalits", "fundamentalists", - "fundamnetalist", "fundamentalist", - "fundemantalist", "fundamentalist", - "fundimentalist", "fundamentalist", - "fundumentalist", "fundamentalist", - "generalizacion", "generalization", - "generalizating", "generalization", - "generelization", "generalization", - "geographacilly", "geographically", - "geographycally", "geographically", - "geogrpahically", "geographically", - "geopraphically", "geographically", - "goegraphically", "geographically", - "grandchilderen", "grandchildren", - "gravitationnal", "gravitational", - "groubdbreaking", "groundbreaking", - "groudnbreaking", "groundbreaking", - "hallcuinations", "hallucination", - "hallicunations", "hallucinations", - "hallucenations", "hallucinations", - "halluciantions", "hallucinations", - "hallucinaitons", "hallucination", - "hallunications", "hallucinations", - "hallusinations", "hallucinations", - "halluzinations", "hallucinations", - "hellucinations", "hallucinations", - "heterosexuella", "heterosexual", - "hipothetically", "hypothetically", - "homosexuallity", "homosexuality", - "hullucinations", "hallucinations", - "hyopthetically", "hypothetically", - "hypathetically", "hypothetically", - "hypethetically", "hypothetically", - "hypotehtically", "hypothetically", - "hypotethically", "hypothetically", - "identificacion", "identification", - "identificaiton", "identification", - "identificativo", "identification", - "identifikation", "identification", - "imlpementation", "implementations", - "impelmentation", "implementations", - "impersonationg", "impersonating", - "implementacion", "implementation", - "implementaiton", "implementation", - "implementating", "implementation", - "implementatino", "implementations", - "implemetnation", "implementations", - "implimentation", "implementation", - "impossibillity", "impossibility", - "inadvertantely", "inadvertently", - "inappropriatly", "inappropriately", - "inapproprietly", "inappropriately", - "incompatablity", "incompatibility", - "incompatiblity", "incompatibility", - "inconsequental", "inconsequential", - "inconsistentcy", "inconsistency", - "incontrollably", "uncontrollably", - "inconventional", "unconventional", - "inconvienenced", "inconvenience", - "indestrictible", "indestructible", - "indestructuble", "indestructible", - "indetification", "identification", - "indistructible", "indestructible", - "individuallity", "individuality", - "indocrtination", "indoctrination", - "indoctrication", "indoctrination", - "indoktrination", "indoctrination", - "industiralized", "industrialized", - "industrailized", "industrialized", - "industrualized", "industrialized", - "industructible", "indestructible", - "inexplicablely", "inexplicably", - "infrastracture", "infrastructure", - "infrastructuur", "infrastructure", - "infrastrucutre", "infrastructure", - "infrastrukture", "infrastructure", - "infrastrutture", "infrastructure", - "infrasturcture", "infrastructure", - "initalisations", "initialisations", - "initalizations", "initializations", - "inplementation", "implementation", - "inspirationnal", "inspirational", - "instinctivelly", "instinctively", - "institutionale", "institutionalized", - "institutionals", "institutions", - "institutionnal", "institutional", - "intellectualis", "intellectuals", - "intellectualls", "intellectuals", - "intellecutally", "intellectually", - "intercepticons", "interceptions", - "interchangable", "interchangeable", - "interchangably", "interchangeably", - "interchangeble", "interchangeable", - "interchangebly", "interchangeably", - "interlectually", "intellectually", - "internationaal", "international", - "internationaly", "internationally", - "internationnal", "international", - "interpersonnal", "interpersonal", - "interpertation", "interpretation", - "interpratation", "interpretation", - "interpretacion", "interpretation", - "interpretaiton", "interpretations", - "interpretating", "interpretation", - "interpritation", "interpretation", - "interstellaire", "interstellar", - "intillectually", "intellectually", - "intrepretation", "interpretation", - "invesitgations", "investigations", - "investiagtions", "investigations", - "investigatiors", "investigations", - "investigativos", "investigations", - "investigstions", "investigations", - "irrationallity", "irrationally", - "irresponsibile", "irresponsible", - "journalistisch", "journalistic", - "justificativos", "justifications", - "koncentrations", "concentrations", - "liberatrianism", "libertarianism", - "libertarainism", "libertarianism", - "libertariansim", "libertarianism", - "libertarinaism", "libertarianism", - "libertaryanism", "libertarianism", - "libertatianism", "libertarianism", - "liberterianism", "libertarianism", - "libretarianism", "libertarianism", - "manufactureers", "manufactures", - "manufactureras", "manufactures", - "manufacturered", "manufactured", - "manufactureres", "manufacturers", - "manufactureros", "manufactures", - "massachusettes", "massachusetts", - "massachussetts", "massachusetts", - "mataphorically", "metaphorically", - "mathameticians", "mathematicians", - "mathemagically", "mathematically", - "mathematitians", "mathematicians", - "mathemetically", "mathematically", - "mathemeticians", "mathematicians", - "mathimatically", "mathematically", - "mediterainnean", "mediterranean", - "mediterrannean", "mediterranean", - "metaphotically", "metaphorically", - "metephorically", "metaphorically", - "methaporically", "metaphorically", - "metiphorically", "metaphorically", - "metophorically", "metaphorically", - "metropolitaine", "metropolitan", - "misconseptions", "misconceptions", - "misinterperted", "misinterpreted", - "misintrepreted", "misinterpreted", - "mulitnationals", "multinational", - "mulitplication", "multiplication", - "multiplicacion", "multiplication", - "multiplicaiton", "multiplication", - "multiplicativo", "multiplication", - "multiplikation", "multiplication", - "mutlinationals", "multinational", - "mutliplication", "multiplication", - "nationalisitic", "nationalistic", - "nationalistics", "nationalists", - "nationalisties", "nationalists", - "nationalistisk", "nationalists", - "neighbourhoood", "neighbourhood", - "nieghbourhoods", "neighbourhood", - "northereastern", "northeastern", - "objectificaton", "objectification", - "opthalmologist", "ophthalmologist", - "organizacional", "organizational", - "organizaitonal", "organizational", - "organziational", "organizational", - "orginazational", "organizational", - "overestemating", "overestimating", - "overextimating", "overestimating", - "overhwelmingly", "overwhelmingly", - "overhwlemingly", "overwhelmingly", - "overpolulation", "overpopulation", - "overpopluation", "overpopulation", - "oversetimating", "overestimating", - "overshadowered", "overshadowed", - "overwhemlingly", "overwhelmingly", - "overwhlemingly", "overwhelmingly", - "paliamentarian", "parliamentarian", - "parliamentiary", "parliamentary", - "performancepcs", "performances", - "personalitites", "personalities", - "pharamceutical", "pharmaceutical", - "pharmaceudical", "pharmaceutical", - "pharmacuetical", "pharmaceutical", - "pharmaseutical", "pharmaceutical", - "pharmeceutical", "pharmaceutical", - "philosophicaly", "philosophically", - "phramaceutical", "pharmaceutical", - "playthroughers", "playthroughs", - "porportionally", "proportionally", - "practitionners", "practitioners", - "predeterminded", "predetermined", - "predominantely", "predominantly", - "predominantley", "predominantly", - "preinitalizing", "preinitializing", - "prerequisities", "prerequisite", - "procrastinatin", "procrastination", - "procrastinaton", "procrastination", - "professionials", "professionalism", - "professionnals", "professionals", - "profitabillity", "profitability", - "progressivelly", "progressively", - "progressivisme", "progressives", - "pronounciation", "pronunciation", - "proportianally", "proportionally", - "proportionalty", "proportionally", - "proportionella", "proportionally", - "proprotionally", "proportionally", - "protruberances", "protuberances", - "pseudononymous", "pseudonymous", - "psychologicaly", "psychologically", - "qaulifications", "qualification", - "qualifiactions", "qualification", - "qualificaitons", "qualifications", - "quarterbackers", "quarterbacks", - "rationalizaton", "rationalization", - "reaponsibility", "responsibility", - "recommandation", "recommendation", - "recommedations", "recommendations", - "recommondation", "recommendation", - "reconnaissence", "reconnaissance", - "reconstruccion", "reconstruction", - "reconsturction", "reconstruction", - "redistirbution", "redistribution", - "redistribucion", "redistribution", - "redistributivo", "redistribution", - "redistrubition", "redistribution", - "refridgeration", "refrigeration", - "rehabilitacion", "rehabilitation", - "rehabilitaiton", "rehabilitation", - "reinforcemnets", "reinforcements", - "rekommendation", "recommendation", - "rektifications", "certifications", - "reniforcements", "reinforcements", - "repersentation", "representation", - "represantation", "representation", - "represantative", "representative", - "representacion", "representation", - "representaiton", "representations", - "representatief", "representative", - "representating", "representation", - "representativo", "representation", - "representetive", "representative", - "representitive", "representative", - "representstion", "representations", - "representstive", "representatives", - "represetnation", "representations", - "represnetation", "representations", - "reprezentative", "representative", - "repsonsibility", "responsibility", - "resistribution", "redistribution", - "responcibility", "responsibility", - "responisbility", "responsibility", - "responnsibilty", "responsibility", - "responsability", "responsibility", - "responsibilies", "responsibilities", - "responsibities", "responsibilities", - "restaraunteurs", "restaurateurs", - "retroactivelly", "retroactively", - "revolutionairy", "revolutionary", - "revolutionnary", "revolutionary", - "ridicilousness", "ridiculousness", - "ridicoulusness", "ridiculousness", - "rienforcements", "reinforcements", - "righteoussness", "righteousness", - "satisfactoraly", "satisfactory", - "satisfactority", "satisfactorily", - "sceintifically", "scientifically", - "schizophrentic", "schizophrenic", - "screenwrighter", "screenwriter", - "sensacionalism", "sensationalism", - "sensacionalist", "sensationalist", - "sensasionalism", "sensationalism", - "sensasionalist", "sensationalist", - "sensationality", "sensationalist", - "sensationalizm", "sensationalism", - "sensationalsim", "sensationalism", - "sensationilism", "sensationalism", - "sensationilist", "sensationalist", - "sensationslism", "sensationalism", - "sensetionalism", "sensationalism", - "sensibilisiert", "sensibilities", - "sentationalism", "sensationalism", - "sentationalist", "sensationalist", - "senzationalism", "sensationalism", - "senzationalist", "sensationalist", - "sepcifications", "specification", - "simaltaneously", "simultaneously", - "simeltaneously", "simultaneously", - "similtaneously", "simultaneously", - "simlutaneously", "simultaneously", - "simplificacion", "simplification", - "simplificaiton", "simplification", - "simplificating", "simplification", - "simulatenously", "simultaneously", - "simulatneously", "simultaneously", - "simultaenously", "simultaneously", - "simultainously", "simultaneously", - "simultaneoulsy", "simultaneously", - "simultaniously", "simultaneously", - "simulteanously", "simultaneously", - "sistematically", "systematically", - "slaugterhouses", "slaughterhouses", - "specailization", "specialization", - "specialication", "specialization", - "specializaiton", "specialization", - "specificaitons", "specification", - "speciliazation", "specialization", - "spectacularely", "spectacularly", - "spectacularily", "spectacularly", - "spesifications", "specifications", - "spezialisation", "specialization", - "sportsmansship", "sportsmanship", - "spreadsheeters", "spreadsheets", - "straightforwad", "straightforward", - "subconcsiously", "subconsciously", - "subconsicously", "subconsciously", - "subsconciously", "subconsciously", - "sunconsciously", "subconsciously", - "superintendant", "superintendent", - "suppliementing", "supplementing", - "surrepetitious", "surreptitious", - "survivabililty", "survivability", - "survivabillity", "survivability", - "sustainabiltiy", "sustainability", - "syncronization", "synchronization", - "systemetically", "systematically", - "systimatically", "systematically", - "technologicaly", "technologically", - "thermodinamics", "thermodynamics", - "thermodyanmics", "thermodynamics", - "thermodymamics", "thermodynamics", - "thermodymanics", "thermodynamics", - "thermodynamcis", "thermodynamics", - "thermodynanics", "thermodynamics", - "thermodynmaics", "thermodynamics", - "thernodynamics", "thermodynamics", - "theromdynamics", "thermodynamics", - "transformacion", "transformation", - "transfromation", "transformation", - "transitionable", "transitional", - "transitionning", "transitioning", - "transofrmation", "transformation", - "trasnformation", "transformation", - "trasnportation", "transportation", - "unbelievablely", "unbelievably", - "unchallengable", "unchallengeable", - "uncomfortabley", "uncomfortably", - "uncomfortablly", "uncomfortably", - "unconciousness", "unconsciousness", - "unconditionaly", "unconditionally", - "unconditionnal", "unconditional", - "unconsciouslly", "unconsciously", - "uncontrallable", "uncontrollable", - "uncontrallably", "uncontrollably", - "uncontrolablly", "uncontrollably", - "unconvectional", "unconventional", - "unconvencional", "unconventional", - "unconvensional", "unconventional", - "unconventianal", "unconventional", - "underastimated", "underestimated", - "underestamated", "underestimated", - "underestemated", "underestimated", - "underestimeted", "underestimated", - "undersetimated", "underestimated", - "understandebly", "understandably", - "understandible", "understandable", - "understandibly", "understandably", - "undestructible", "indestructible", - "unforetunately", "unfortunately", - "unfortunatelly", "unfortunately", - "unfourtunately", "unfortunately", - "uninitalizable", "uninitializable", - "unintelligient", "unintelligent", - "unintentionaly", "unintentionally", - "unintentionnal", "unintentional", - "unmanouverable", "unmaneuverable", - "unneccessarily", "unnecessarily", - "unnecessarilly", "unnecessarily", - "unprecendented", "unprecedented", - "unprofessionel", "unprofessional", - "unreasonablely", "unreasonably", - "unsubstantiaed", "unsubstantiated", - "unsurprizingly", "unsurprisingly", - "vizualisations", "visualization", - "vulnerabilites", "vulnerabilities", - "vulnerabillity", "vulnerability", - "vulnerablility", "vulnerability", - "wholeheartadly", "wholeheartedly", - "wholeheartidly", "wholeheartedly", - "abbrievations", "abbreviation", - "accelleration", "acceleration", - "accomadations", "accommodations", - "accommadating", "accommodating", - "accommadation", "accommodation", - "accommidation", "accommodation", - "accomodations", "accommodations", - "accomondating", "accommodating", - "accomondation", "accommodation", - "accomplishent", "accomplishment", - "accountabilty", "accountability", - "accredidation", "accreditation", - "acknolwedging", "acknowledging", - "acknowlegding", "acknowledging", - "acomplishment", "accomplishment", - "acquaintaince", "acquaintance", - "acquaintences", "acquaintances", - "acquaintinces", "acquaintances", - "acquanitances", "acquaintance", - "acquantainces", "acquaintances", - "acquantiances", "acquaintances", - "acquiantances", "acquaintances", - "acquiantences", "acquaintances", - "adminastrator", "administrator", - "administartor", "administrator", - "administraion", "administration", - "administraron", "administrator", - "administrater", "administrator", - "administratio", "administrator", - "administraton", "administration", - "adminsitrator", "administrator", - "adminstration", "administration", - "adminstrative", "administrative", - "admissability", "admissibility", - "adnimistrator", "administrators", - "adverticement", "advertisement", - "advertisiment", "advertisement", - "advertisments", "advertisements", - "advirtisement", "advertisement", - "aestethically", "aesthetically", - "aesthatically", "aesthetically", - "aesthitically", "aesthetically", - "affectionnate", "affectionate", - "aforementiond", "aforementioned", - "agriculturual", "agricultural", - "agrumentative", "argumentative", - "alterantively", "alternatively", - "alternativets", "alternatives", - "alternativley", "alternatively", - "alternitavely", "alternatively", - "alternitively", "alternatively", - "aninteresting", "uninteresting", - "annoucnements", "announcements", - "antagonisitic", "antagonistic", - "anthropolgist", "anthropologist", - "apporpriately", "appropriately", - "apporpriation", "appropriation", - "apporximately", "approximately", - "appreciateing", "appreciating", - "appreciateive", "appreciative", - "appreciationg", "appreciating", - "appropirately", "appropriately", - "appropiration", "appropriation", - "appropraitely", "appropriately", - "appropreation", "appropriation", - "appropriatley", "appropriately", - "appropropiate", "appropriate", - "approrpiation", "appropriation", - "approxamately", "approximately", - "approxiamtely", "approximately", - "approximatley", "approximately", - "approximitely", "approximately", - "aqcuaintances", "acquaintances", - "aqquaintances", "acquaintances", - "archaelogical", "archaeological", - "archaelogists", "archaeologists", - "archeaologist", "archeologist", - "archetectural", "architectural", - "architechture", "architecture", - "architechural", "architectural", - "architectrual", "architectural", - "architecutral", "architectural", - "argumentitive", "argumentative", - "arugmentative", "argumentative", - "asethetically", "aesthetically", - "assasinations", "assassinations", - "audomoderator", "automoderator", - "australianess", "australians", - "authenticaion", "authentication", - "authenticaton", "authentication", - "autherization", "authorization", - "authoratitive", "authoritative", - "authoritatian", "authoritarian", - "authoritation", "authorization", - "authorititive", "authoritative", - "authoritorian", "authoritarian", - "authorotative", "authoritative", - "authroization", "authorization", - "automoderador", "automoderator", - "automoderater", "automoderator", - "automodorator", "automoderator", - "automoterator", "automoderator", - "autoritharian", "authoritarian", - "availabillity", "availability", - "awknowledging", "acknowledging", - "billingualism", "bilingualism", - "billionairres", "billionaire", - "borderlanders", "borderlands", - "breadtfeeding", "breastfeeding", - "breastfeading", "breastfeeding", - "breatsfeeding", "breastfeeding", - "broadacasting", "broadcasting", - "bureaucractic", "bureaucratic", - "bureaucratics", "bureaucrats", - "bureaucratius", "bureaucrats", - "californiaman", "californian", - "calrification", "clarification", - "capitalizaton", "capitalization", - "carbohdyrates", "carbohydrates", - "carbohidrates", "carbohydrates", - "carbohyrdates", "carbohydrates", - "carboyhdrates", "carbohydrates", - "carthographer", "cartographer", - "catagorically", "categorically", - "catastrophies", "catastrophe", - "catastrophize", "catastrophe", - "catigorically", "categorically", - "catterpillars", "caterpillars", - "celebrationis", "celebrations", - "ceritfication", "certifications", - "certificaiton", "certification", - "championchips", "championship", - "championshiop", "championships", - "championsship", "championships", - "chanpionships", "championships", - "charactarized", "characterized", - "characterisic", "characteristic", - "characteristc", "characteristics", - "characterists", "characteristics", - "charicterized", "characterized", - "charismatisch", "charismatic", - "checkpointusa", "checkpoints", - "cheeseburgare", "cheeseburger", - "cheeseburgler", "cheeseburger", - "cheeseburguer", "cheeseburger", - "cheezeburgers", "cheeseburgers", - "chornological", "chronological", - "chronoligical", "chronological", - "chronologicly", "chronological", - "cinematograhy", "cinematography", - "cinematograpy", "cinematography", - "circomference", "circumference", - "circumcission", "circumcision", - "circumferance", "circumference", - "circumsicions", "circumcision", - "circumstanial", "circumstantial", - "circumstantal", "circumstantial", - "circumstnaces", "circumstance", - "circunference", "circumference", - "circunstances", "circumstances", - "cirucmference", "circumference", - "cirucmstances", "circumstances", - "civilications", "civilizations", - "civilizaitons", "civilizations", - "clarificaiton", "clarification", - "clasification", "clarification", - "clerification", "clarification", - "coincidentaly", "coincidentally", - "coincidential", "coincidental", - "colaborations", "collaborations", - "collabaration", "collaboration", - "collaberation", "collaboration", - "collaberative", "collaborative", - "collaboratore", "collaborate", - "collectioners", "collections", - "collectivelly", "collectively", - "collobaration", "collaboration", - "combatibility", "compatibility", - "comeptitively", "competitively", - "comfortablely", "comfortably", - "comfortablity", "comfortably", - "comfrontation", "confrontation", - "commemerative", "commemorative", - "commericially", "commercially", - "commerorative", "commemorative", - "comminication", "communication", - "comminucation", "communications", - "commissionees", "commissions", - "commissionned", "commissioned", - "commissionner", "commissioner", - "commmemorated", "commemorated", - "commuications", "communications", - "commuincation", "communications", - "communciation", "communication", - "communiaction", "communications", - "communicaiton", "communication", - "communicatoin", "communications", - "communicatons", "communications", - "compadibility", "compatibility", - "comparativley", "comparatively", - "comparetively", "comparatively", - "comparitavely", "comparatively", - "comparitively", "comparatively", - "compatability", "compatibility", - "compatibiltiy", "compatibility", - "compeditively", "competitively", - "compensantion", "compensation", - "compensationg", "compensating", - "comperatively", "comparatively", - "comperhension", "comprehension", - "competatively", "competitively", - "competitavely", "competitively", - "competitevely", "competitively", - "competitivley", "competitively", - "competiveness", "competitiveness", - "compilcations", "complication", - "compitability", "compatibility", - "complciations", "complication", - "complecations", "complications", - "compliactions", "complication", - "complicaitons", "complication", - "complilations", "complications", - "complimentery", "complimentary", - "complimentoni", "complimenting", - "complimentory", "complimentary", - "comprehention", "comprehension", - "computacional", "computational", - "comtamination", "contamination", - "comtemplating", "contemplating", - "concatination", "contamination", - "conceivablely", "conceivably", - "concencration", "concentration", - "concenrtation", "concentrations", - "concentartion", "concentrations", - "concentracion", "concentration", - "concentraited", "concentrated", - "concentraiton", "concentrations", - "concentratons", "concentrations", - "concervatives", "conservatives", - "concideration", "consideration", - "concioussness", "consciousness", - "concnetration", "concentrations", - "concsiousness", "consciousness", - "condascending", "condescending", - "condescencion", "condescension", - "condescendion", "condescension", - "condescensing", "condescension", - "condiscending", "condescending", - "conditionning", "conditioning", - "condradicting", "contradicting", - "condradiction", "contradiction", - "condradictory", "contradictory", - "conecntration", "concentrations", - "conenctration", "concentrations", - "confidentally", "confidentially", - "configruation", "configurations", - "configuartion", "configuration", - "configuracion", "configuration", - "configuraiton", "configuration", - "configuratoin", "configurations", - "configureable", "configurable", - "confrentation", "confrontation", - "confrontacion", "confrontation", - "confrontating", "confrontation", - "confrontativo", "confrontation", - "congratualted", "congratulate", - "conifguration", "configurations", - "conisderation", "considerations", - "connecticunts", "connecticut", - "connectivitiy", "connectivity", - "conpassionate", "compassionate", - "conplications", "complications", - "conplimentary", "complimentary", - "conplimenting", "complimenting", - "conprehension", "comprehension", - "consdieration", "considerations", - "consenquently", "consequently", - "consentrating", "concentrating", - "consentration", "concentration", - "consequencies", "consequence", - "consequentely", "consequently", - "consequeseces", "consequences", - "conservatisim", "conservatism", - "conservativsm", "conservatism", - "conservitives", "conservatives", - "consicousness", "consciousness", - "considerabely", "considerable", - "considerabile", "considerable", - "considerabley", "considerably", - "considerablly", "considerably", - "consideracion", "consideration", - "consideratoin", "considerations", - "considerstion", "considerations", - "considertaion", "considerations", - "consituencies", "constituencies", - "consitutional", "constitutional", - "constallation", "constellation", - "constarnation", "consternation", - "constillation", "constellation", - "constituintes", "constituents", - "constituional", "constitutional", - "constitutents", "constitutes", - "constitutinal", "constitutional", - "constructicon", "construction", - "constructieve", "constructive", - "constructiong", "constructing", - "consttruction", "construction", - "contaminacion", "contamination", - "contaminanted", "contaminated", - "contanimation", "contamination", - "contenplating", "contemplating", - "contimplating", "contemplating", - "contraceptivo", "contraception", - "contradiccion", "contradiction", - "contradicitng", "contradicting", - "contradiciton", "contradiction", - "contradictary", "contradictory", - "contradictons", "contradicts", - "contraticting", "contradicting", - "contravercial", "controversial", - "contraversial", "controversial", - "contreception", "contraception", - "contreversial", "controversial", - "contributeurs", "contributes", - "contributiors", "contributors", - "contriception", "contraception", - "contridictory", "contradictory", - "contritutions", "contributions", - "contriversial", "controversial", - "controception", "contraception", - "controdicting", "contradicting", - "controdiction", "contradiction", - "controvercial", "controversial", - "controverisal", "controversial", - "controversary", "controversy", - "controversity", "controversy", - "controvertial", "controversial", - "contstruction", "construction", - "conventionnal", "conventional", - "converastions", "conservation", - "conversationa", "conservation", - "conversationg", "conservation", - "conversationy", "conservation", - "conversatiosn", "conservation", - "conversatives", "conservatives", - "converstaions", "conversations", - "convorsations", "conversations", - "cooresponding", "corresponding", - "coorperations", "corporations", - "correctionals", "corrections", - "correpsonding", "corresponding", - "correspondant", "correspondent", - "correspondece", "correspondence", - "corresponders", "corresponds", - "corresponsing", "corresponding", - "corrispondant", "correspondent", - "corrisponding", "corresponding", - "corrosponding", "corresponding", - "costomization", "customization", - "costumization", "customization", - "counterfeight", "counterfeit", - "creationistas", "creationists", - "cricumference", "circumference", - "cringeworthey", "cringeworthy", - "cringeworthly", "cringeworthy", - "crytopgraphic", "cryptographic", - "curcumference", "circumference", - "curcumstances", "circumstances", - "custumization", "customization", - "cuztomization", "customization", - "decentraliced", "decentralized", - "decentrilized", "decentralized", - "decomissioned", "decommissioned", - "decompositing", "decomposing", - "definitivelly", "definitively", - "deinitalizing", "deinitializing", - "demenstration", "demonstration", - "democraticaly", "democratically", - "democraticlly", "democratically", - "demoninations", "denominations", - "demonstarting", "demonstrating", - "demonstartion", "demonstration", - "demonstraiton", "demonstrations", - "demonstratbly", "demonstrably", - "demonstraties", "demonstrate", - "demonstrativo", "demonstration", - "demosntrating", "demonstrating", - "demosntration", "demonstrations", - "denomenations", "denominations", - "denomonations", "denominations", - "deomnstration", "demonstrations", - "dermatalogist", "dermatologist", - "dermatolagist", "dermatologist", - "dermatoligist", "dermatologist", - "dermatologyst", "dermatologist", - "dermetologist", "dermatologist", - "dermitologist", "dermatologist", - "derpatologist", "dermatologist", - "desentralized", "decentralized", - "desillusioned", "disillusioned", - "desintegrated", "disintegrated", - "desinterested", "disinterested", - "determenation", "determination", - "determinacion", "determination", - "determinining", "determining", - "determinisitc", "deterministic", - "determinsitic", "deterministic", - "detmatologist", "dermatologist", - "developmently", "developmental", - "dezentralized", "decentralized", - "differantiate", "differentiate", - "differenciate", "differentiate", - "differintiate", "differentiate", - "diffirentiate", "differentiate", - "disadvandages", "disadvantaged", - "disadvantadge", "disadvantaged", - "disadvanteged", "disadvantaged", - "disadvanteges", "disadvantages", - "disadvatanges", "disadvantages", - "disadventaged", "disadvantaged", - "disadventages", "disadvantages", - "disallusioned", "disillusioned", - "disappearence", "disappearance", - "disappearnace", "disappearance", - "disappearring", "disappearing", - "disatvantaged", "disadvantaged", - "disatvantages", "disadvantages", - "disciplinairy", "disciplinary", - "disciplinerad", "disciplined", - "discipliniary", "disciplinary", - "disconnecters", "disconnects", - "discontinuted", "discontinued", - "discrimianted", "discriminated", - "discriminante", "discriminate", - "discriminatie", "discriminate", - "discriminatin", "discrimination", - "disillisioned", "disillusioned", - "disillutioned", "disillusioned", - "disingenuious", "disingenuous", - "disollusioned", "disillusioned", - "disrecpectful", "disrespectful", - "disrecpecting", "disrespecting", - "disrepsectful", "disrespectful", - "disrepsecting", "disrespecting", - "disresepctful", "disrespectful", - "disresepcting", "disrespecting", - "disrespection", "disrespecting", - "disrespekting", "disrespecting", - "disrispectful", "disrespectful", - "disrispecting", "disrespecting", - "dissagreement", "disagreement", - "dissapearance", "disappearance", - "dissapointted", "dissapointed", - "dissappointed", "disappointed", - "dissobediance", "disobedience", - "dissobedience", "disobedience", - "distingishing", "distinguishing", - "distinguising", "distinguishing", - "distinquished", "distinguished", - "distirbutions", "distributions", - "distiungished", "distinguished", - "distribustion", "distributions", - "distributiors", "distributors", - "distributivos", "distributions", - "distrobutions", "distributions", - "distrubitions", "distributions", - "distuingished", "distinguished", - "documantaries", "documentaries", - "documenatries", "documentaries", - "documentacion", "documentation", - "documentaires", "documentaries", - "documentaiton", "documentation", - "documentarios", "documentaries", - "documentaties", "documentaries", - "documentating", "documentation", - "documenteries", "documentaries", - "documentories", "documentaries", - "drammatically", "grammatically", - "dsyfunctional", "dysfunctional", - "dumbfoundeads", "dumbfounded", - "dusfunctional", "dysfunctional", - "dustification", "justification", - "dysfonctional", "dysfunctional", - "dysfucntional", "dysfunctional", - "dysfuncitonal", "dysfunctional", - "dysfunktional", "dysfunctional", - "easthetically", "aesthetically", - "effectiviness", "effectiveness", - "effictiveness", "effectiveness", - "effortlessely", "effortlessly", - "effortlessley", "effortlessly", - "embarrasement", "embarrassment", - "embarrasments", "embarrassment", - "embarressment", "embarrassment", - "emberrassment", "embarrassment", - "encarceration", "incarceration", - "encorporating", "incorporating", - "encyclopeadia", "encyclopedia", - "encyclopeadic", "encyclopedia", - "encyclopeedia", "encyclopedia", - "encycolpedias", "encyclopedia", - "endoctrinated", "indoctrinated", - "enlightenting", "enlightening", - "enlightnement", "enlightenment", - "enligthenment", "enlightenment", - "enteratinment", "entertainment", - "enterpreneurs", "entrepreneurs", - "enterprenuers", "entrepreneurs", - "enterpreuners", "entrepreneurs", - "entertianment", "entertainment", - "enthusiasists", "enthusiasts", - "enthusiastics", "enthusiasts", - "entrepraneurs", "entrepreneurs", - "entreprenaurs", "entrepreneurs", - "entrepreneuer", "entrepreneurs", - "entreprenours", "entrepreneurs", - "entreprenuers", "entrepreneurs", - "entreprenures", "entrepreneurs", - "entrepreuners", "entrepreneurs", - "entretainment", "entertainment", - "enviornmental", "environmental", - "environemntal", "environmental", - "environmently", "environmental", - "envolutionary", "evolutionary", - "envrionmental", "environmental", - "estabilshment", "establishments", - "establishemnt", "establishments", - "establishmnet", "establishments", - "establsihment", "establishments", - "estbalishment", "establishments", - "ethnocentricm", "ethnocentrism", - "evolutionairy", "evolutionary", - "evolutionarly", "evolutionary", - "evolutionnary", "evolutionary", - "exaggeratting", "exaggerating", - "excpetionally", "exceptionally", - "executioneers", "executioner", - "existentiella", "existential", - "expectionally", "exceptionally", - "experementing", "experimenting", - "experienceing", "experiencing", - "experimentais", "experiments", - "experimention", "experimenting", - "experimentors", "experiments", - "expirementing", "experimenting", - "expodentially", "exponentially", - "exponantially", "exponentially", - "exponencially", "exponentially", - "exponentiella", "exponential", - "extraodrinary", "extraordinary", - "extraordianry", "extraordinary", - "extraordinair", "extraordinary", - "extraordinaly", "extraordinary", - "extraoridnary", "extraordinary", - "extremeophile", "extremophile", - "extroardinary", "extraordinary", - "familiarizate", "familiarize", - "fantasitcally", "fantastically", - "fantasmically", "fantastically", - "fantistically", "fantastically", - "faptastically", "fantastically", - "figurativeley", "figuratively", - "figurativelly", "figuratively", - "frankenstiens", "frankenstein", - "frankenstined", "frankenstein", - "frankenstiner", "frankenstein", - "frankenstines", "frankenstein", - "friendzoneado", "friendzoned", - "fucntionality", "functionality", - "funcitonality", "functionality", - "functionailty", "functionality", - "fundamentalis", "fundamentals", - "fundamnetally", "fundamentally", - "fundementally", "fundamentally", - "fundimentally", "fundamentally", - "gamifications", "ramifications", - "generalizaing", "generalizing", - "generalizaton", "generalization", - "generationals", "generations", - "generationens", "generations", - "generationers", "generations", - "generationnal", "generational", - "geographicaly", "geographically", - "geographicial", "geographical", - "geometricians", "geometers", - "goreshadowing", "foreshadowing", - "governmential", "governmental", - "gradification", "gratification", - "grammarically", "grammatically", - "grandchildern", "grandchildren", - "gratificacion", "gratification", - "gratificaiton", "gratification", - "grativational", "gravitational", - "gravitacional", "gravitational", - "gravitaitonal", "gravitational", - "hallcuination", "hallucination", - "hallicunation", "hallucination", - "hallucenation", "hallucination", - "halluciantion", "hallucinations", - "hallukination", "hallucination", - "hallunication", "hallucination", - "hallusination", "hallucination", - "halluzination", "hallucination", - "heiroglyphics", "hieroglyphics", - "hellucination", "hallucination", - "highlightning", "highlighting", - "homesexuality", "homosexuality", - "homosexualtiy", "homosexuality", - "homosexulaity", "homosexuality", - "horizontallly", "horizontally", - "hullucination", "hallucination", - "hypocriticial", "hypocritical", - "hypotheticaly", "hypothetically", - "hystericallly", "hysterically", - "identificaton", "identification", - "ideoligically", "ideologically", - "ideosyncratic", "idiosyncratic", - "idiologically", "ideologically", - "illistrations", "illustrations", - "illustartions", "illustrations", - "imperfactions", "imperfections", - "impersinating", "impersonating", - "implementaion", "implementation", - "implementatin", "implementations", - "implimenation", "implementation", - "imprefections", "imperfections", - "impresonating", "impersonating", - "inaccessibile", "inaccessible", - "inadventently", "inadvertently", - "inadverdently", "inadvertently", - "inadvertantly", "inadvertently", - "inadvertendly", "inadvertently", - "inapporpriate", "inappropriate", - "inappropirate", "inappropriate", - "inappropraite", "inappropriate", - "inaproppriate", "inappropriate", - "incarcaration", "incarceration", - "incarciration", "incarceration", - "incarseration", "incarceration", - "incerceration", "incarceration", - "incidentially", "incidentally", - "incomfortable", "uncomfortable", - "incomfortably", "uncomfortably", - "incompatabile", "incompatible", - "incompatiable", "incompatible", - "incompatibile", "incompatible", - "inconciderate", "inconsiderate", - "inconcistency", "inconsistency", - "inconditional", "unconditional", - "inconsciously", "unconsciously", - "inconsiderant", "inconsiderate", - "inconsistance", "inconsistency", - "inconsistancy", "inconsistency", - "inconsistenly", "inconsistency", - "inconsistensy", "inconsistency", - "inconsistenty", "inconsistency", - "inconveinence", "inconvenience", - "inconveniance", "inconvenience", - "inconveniente", "inconvenience", - "inconvienence", "inconvenience", - "incoroporated", "incorporated", - "incorparating", "incorporating", - "incorperating", "incorporating", - "incorperation", "incorporation", - "incorruptable", "incorruptible", - "incramentally", "incrementally", - "incrementarla", "incremental", - "incrementarlo", "incremental", - "indavertently", "inadvertently", - "indefinitelly", "indefinitely", - "independantes", "independents", - "independantly", "independently", - "independendet", "independent", - "independendly", "independently", - "indepentently", "independently", - "indespensable", "indispensable", - "indespensible", "indispensable", - "indestructble", "indestructible", - "indestructibe", "indestructible", - "indictrinated", "indoctrinated", - "indipendently", "independently", - "indispensible", "indispensable", - "indivuduality", "individuality", - "indocrtinated", "indoctrinated", - "indocternated", "indoctrinated", - "indoctornated", "indoctrinated", - "indoctrinatie", "indoctrinated", - "indoctrinatin", "indoctrination", - "indoctronated", "indoctrinated", - "industrialied", "industrialized", - "industrialzed", "industrialized", - "inexeprienced", "inexperience", - "inexpeirenced", "inexperience", - "inexpereinced", "inexperienced", - "inexperianced", "inexperienced", - "inexperiecned", "inexperience", - "inexperineced", "inexperience", - "inexpierenced", "inexperienced", - "inexplicabley", "inexplicably", - "inexplicablly", "inexplicably", - "infilitration", "infiltration", - "infrastructre", "infrastructure", - "infrastrucure", "infrastructure", - "inintelligent", "unintelligent", - "ininteresting", "uninteresting", - "initalisation", "initialisation", - "initalization", "initialization", - "inperfections", "imperfections", - "inpersonating", "impersonating", - "inpossibility", "impossibility", - "inpredictable", "unpredictable", - "inresponsible", "irresponsible", - "insectiverous", "insectivorous", - "insecuritites", "insecurities", - "insiginficant", "insignificant", - "insiginifcant", "insignificant", - "insignificent", "insignificant", - "insignificunt", "insignificant", - "insignifigant", "insignificant", - "insiprational", "inspirational", - "insperational", "inspirational", - "inspiritional", "inspirational", - "inspriational", "inspirational", - "instantaenous", "instantaneous", - "instantanious", "instantaneous", - "instanteneous", "instantaneous", - "instantenious", "instantaneous", - "instincitvely", "instinctively", - "instinctivley", "instinctively", - "instititional", "institutional", - "institutionel", "institutional", - "insturmentals", "instrumental", - "instutitional", "institutional", - "insustainable", "unsustainable", - "intelelctuals", "intellectuals", - "intellectualy", "intellectually", - "intellectuels", "intellectuals", - "intellecutals", "intellectuals", - "intellegently", "intelligently", - "intelluctuals", "intellectuals", - "intepretation", "interpretation", - "intereactions", "intersections", - "interesctions", "intersections", - "interlectuals", "intellectuals", - "intermittient", "intermittent", - "intermittment", "intermittent", - "internacional", "international", - "interpersonel", "interpersonal", - "interpresonal", "interpersonal", - "interpretaion", "interpretation", - "interpretarea", "interpreter", - "interpretarem", "interpreter", - "interpretares", "interpreter", - "interpretarse", "interpreter", - "interpretarte", "interpreter", - "interpretatin", "interpretations", - "interpreteert", "interpreter", - "interragation", "interrogation", - "interregation", "interrogation", - "interrigation", "interrogation", - "interrogacion", "interrogation", - "interrogativo", "interrogation", - "intertainment", "entertainment", - "intillectuals", "intellectuals", - "intraspection", "introspection", - "intrensically", "intrinsically", - "intriniscally", "intrinsically", - "intrinsecally", "intrinsically", - "intrisincally", "intrinsically", - "intristically", "intrinsically", - "introductiory", "introductory", - "introspeccion", "introspection", - "introspectivo", "introspection", - "introspektion", "introspection", - "invertibrates", "invertebrates", - "invesitgation", "investigation", - "invesitgative", "investigative", - "invesitgators", "investigators", - "investagators", "investigators", - "investegating", "investigating", - "investegators", "investigators", - "investiagtion", "investigation", - "investiagtive", "investigative", - "investigacion", "investigation", - "investigaiton", "investigations", - "investigaters", "investigators", - "investigativo", "investigation", - "investigatons", "investigations", - "investigsting", "investigating", - "investigstion", "investigations", - "investogators", "investigators", - "invisibillity", "invisibility", - "involuntarely", "involuntary", - "involuntarity", "involuntary", - "invulnerabile", "invulnerable", - "irrationallly", "irrationally", - "irresponcible", "irresponsible", - "irresponisble", "irresponsible", - "irresponsable", "irresponsible", - "irresponsbile", "irresponsible", - "irreversiable", "irreversible", - "irreversibelt", "irreversible", - "irreversibile", "irreversible", - "irrisponsible", "irresponsible", - "jacksonvillle", "jacksonville", - "journalisitic", "journalistic", - "journalistens", "journalists", - "journalisters", "journalists", - "journalistisk", "journalists", - "jsutification", "justifications", - "jurisdicitons", "jurisdictions", - "jurisidctions", "jurisdictions", - "juristictions", "jurisdictions", - "jursidictions", "jurisdictions", - "jusitfication", "justifications", - "justifiaction", "justifications", - "justificacion", "justification", - "justificaiton", "justification", - "justificativo", "justification", - "justificatons", "justifications", - "justificstion", "justifications", - "justiifcation", "justifications", - "karbohydrates", "carbohydrates", - "knoweldgeable", "knowledgeable", - "knowledegable", "knowledgeable", - "knowledgebale", "knowledgable", - "knowlegdeable", "knowledgeable", - "kollaboration", "collaboration", - "koncentration", "concentration", - "konfiguration", "configuration", - "konfrontation", "confrontation", - "konservatives", "conservatives", - "konstellation", "constellation", - "kontamination", "contamination", - "legitimatelly", "legitimately", - "libertariaism", "libertarianism", - "libertariansm", "libertarianism", - "libitarianisn", "libertarianism", - "lighthearthed", "lighthearted", - "mainfestation", "manifestation", - "manafacturers", "manufacturers", - "manafacturing", "manufacturing", - "manafestation", "manifestation", - "manefestation", "manifestation", - "manfuacturers", "manufactures", - "manifacturers", "manufacturers", - "manifacturing", "manufacturing", - "manifastation", "manifestation", - "manifestacion", "manifestation", - "manifestating", "manifestation", - "manifistation", "manifestation", - "manipulationg", "manipulating", - "manufacterers", "manufacturers", - "manufactering", "manufacturing", - "manufacterurs", "manufactures", - "manufactorers", "manufacturers", - "manufactoring", "manufacturing", - "manufactuered", "manufactured", - "manufactuerer", "manufacturer", - "manufactueres", "manufactures", - "manufacturedd", "manufactured", - "manufactureds", "manufactures", - "manufacturerd", "manufactured", - "manufacturier", "manufacturer", - "manufacturors", "manufacturers", - "manufactuters", "manufactures", - "manufacutrers", "manufactures", - "manufcaturers", "manufactures", - "marshmalllows", "marshmallows", - "massachsuetts", "massachusetts", - "massachucetts", "massachusetts", - "massachuestts", "massachusetts", - "massachusents", "massachusetts", - "massachusites", "massachusetts", - "massachussets", "massachusetts", - "massechusetts", "massachusetts", - "masturbateing", "masturbating", - "materialisimo", "materialism", - "mathamatician", "mathematician", - "mathametician", "mathematician", - "mathematicals", "mathematics", - "mathematicaly", "mathematically", - "mathematicans", "mathematics", - "mathematicion", "mathematician", - "mathematitian", "mathematician", - "mathemetician", "mathematician", - "mathmatically", "mathematically", - "mathmaticians", "mathematicians", - "mechanicallly", "mechanically", - "medeterranean", "mediterranean", - "meditarrenean", "mediterranean", - "meditereanean", "mediterranean", - "membranaphone", "membranophone", - "metamorphysis", "metamorphosis", - "metaphoricaly", "metaphorically", - "metaphoricial", "metaphorical", - "metaphysicals", "metaphysics", - "metaphysicans", "metaphysics", - "methamatician", "mathematician", - "methematician", "mathematician", - "metropolitain", "metropolitan", - "metropolitcan", "metropolitan", - "metropolitian", "metropolitan", - "millionairres", "millionaire", - "minneapolites", "minneapolis", - "misanderstood", "misunderstood", - "miscellanious", "miscellaneous", - "misconcpetion", "misconceptions", - "misconecption", "misconceptions", - "misinterperet", "misinterpret", - "misinterprate", "misinterpret", - "misinterprent", "misinterpret", - "misinterprted", "misinterpret", - "misogynisitic", "misogynistic", - "misrepreseted", "misrepresented", - "misunterstood", "misunderstood", - "modificaitons", "modifications", - "motivationals", "motivations", - "motivationnal", "motivational", - "mulitnational", "multinational", - "multimational", "multinational", - "multiplicaton", "multiplication", - "muncipalities", "municipalities", - "munnicipality", "municipality", - "mutlinational", "multinational", - "nacionalistic", "nationalistic", - "narcissisitic", "narcissistic", - "narcississtic", "narcissistic", - "natioanlistic", "nationalistic", - "nationalisitc", "nationalistic", - "nationalistes", "nationalists", - "nationalsitic", "nationalistic", - "neigbhourhood", "neighbourhood", - "neighboorhoud", "neighbourhood", - "neighborehood", "neighbourhood", - "neighborhoood", "neighborhoods", - "neighbourbood", "neighbourhood", - "neighbourgood", "neighbourhood", - "neighbourhoud", "neighbourhood", - "neighourhoods", "neighborhoods", - "nieghborhoods", "neighborhoods", - "nieghbourhood", "neighbourhood", - "noncombatents", "noncombatants", - "noninitalized", "noninitialized", - "northwestener", "northwestern", - "notificaitons", "notifications", - "occassionally", "occasionally", - "operationable", "operational", - "oppertunities", "opportunities", - "opprotunities", "opportunities", - "oppurtunities", "opportunities", - "opthamologist", "ophthalmologist", - "organistaions", "organisations", - "organizatinal", "organizational", - "organizativos", "organizations", - "organsiations", "organisations", - "organziations", "organizations", - "orginasations", "organisations", - "orginazations", "organizations", - "overpopulaton", "overpopulation", - "overreactiong", "overreacting", - "overshaddowed", "overshadowed", - "overwheliming", "overwhelming", - "overwhelmigly", "overwhelmingly", - "overwhelmingy", "overwhelmingly", - "overwhelminly", "overwhelmingly", - "palestininans", "palestinians", - "paraphraseing", "paraphrasing", - "paraphrashing", "paraphrasing", - "parilamentary", "parliamentary", - "parlaimentary", "parliamentary", - "parliamantary", "parliamentary", - "parliamentery", "parliamentary", - "parliamnetary", "parliamentary", - "parliementary", "parliamentary", - "particiaption", "participation", - "participacion", "participation", - "participantes", "participants", - "participativo", "participation", - "particularely", "particularly", - "particularily", "particularly", - "particularlly", "particularly", - "partizipation", "participation", - "passionatelly", "passionately", - "paychiatrists", "psychiatrists", - "paychologists", "psychologists", - "pennsylvainia", "pennsylvania", - "pennsylvanica", "pennsylvania", - "pennsylvannia", "pennsylvania", - "perdominantly", "predominantly", - "perpandicular", "perpendicular", - "perpendicualr", "perpendicular", - "perpenticular", "perpendicular", - "perpetuationg", "perpetuating", - "perpindicular", "perpendicular", - "personalitits", "personalities", - "pessimistisch", "pessimistic", - "pharmaceutial", "pharmaceutical", - "philisophical", "philosophical", - "philosiphical", "philosophical", - "philosohpical", "philosophical", - "philosophycal", "philosophically", - "philospohical", "philosophical", - "phisiological", "physiological", - "photagraphers", "photographers", - "photographics", "photographs", - "photographied", "photographed", - "photographier", "photographer", - "photograpphed", "photographed", - "photogrophers", "photographers", - "photogrpahers", "photographers", - "photoshoppade", "photoshopped", - "photoshoppped", "photoshopped", - "phsyiological", "physiological", - "phychiatrists", "psychiatrists", - "phychological", "psychological", - "phychologists", "psychologists", - "phylosophical", "philosophical", - "physciatrists", "psychiatrists", - "physcological", "psychological", - "physcologists", "psychologists", - "physioligical", "physiological", - "planeswlakers", "planeswalker", - "plansewalkers", "planeswalker", - "playthroughts", "playthroughs", - "polysaccaride", "polysaccharide", - "practicallity", "practically", - "practicioners", "practitioners", - "practisioners", "practitioners", - "practitioneer", "practitioners", - "practitionner", "practitioner", - "pratictioners", "practitioners", - "preconceieved", "preconceived", - "predecessores", "predecessors", - "predetermiend", "predetermined", - "predetirmined", "predetermined", - "preditermined", "predetermined", - "predomenantly", "predominantly", - "predominently", "predominantly", - "pregressively", "progressively", - "preinitalized", "preinitialized", - "preinitalizes", "preinitializes", - "preliferation", "proliferation", - "prependicular", "perpendicular", - "preposterious", "preposterous", - "prerequisties", "prerequisite", - "prerequistite", "prerequisite", - "prescribtions", "prescriptions", - "presumptuious", "presumptuous", - "pretedermined", "predetermined", - "problematisch", "problematic", - "proclaimation", "proclamation", - "prodominantly", "predominantly", - "professionnal", "professional", - "profitiablity", "profitability", - "profitibality", "profitability", - "progressivily", "progressively", - "progressivley", "progressively", - "prononciation", "pronunciation", - "pronouciation", "pronunciation", - "pronunciacion", "pronunciation", - "pronunciating", "pronunciation", - "pronuncuation", "pronunciation", - "pronunication", "pronunciation", - "pronuntiation", "pronunciation", - "propabilities", "probabilities", - "proportionaly", "proportionally", - "proportionnal", "proportional", - "proseletyzing", "proselytizing", - "protagonistas", "protagonists", - "protagonistes", "protagonists", - "protestantisk", "protestants", - "protruberance", "protuberance", - "provocativley", "provocative", - "pscyhiatrists", "psychiatrists", - "pscyhological", "psychological", - "pscyhologists", "psychologists", - "pshycological", "psychological", - "pshycologists", "psychologists", - "psichological", "psychological", - "psychaitrists", "psychiatrists", - "psychedellics", "psychedelics", - "psychiatrisch", "psychiatric", - "psycholigical", "psychological", - "psycholigists", "psychologists", - "psychologycal", "psychologically", - "psychologysts", "psychologists", - "psychyatrists", "psychiatrists", - "psysiological", "physiological", - "purpendicular", "perpendicular", - "pyschiatrists", "psychiatrists", - "pyschological", "psychological", - "pyschologists", "psychologists", - "qaulification", "qualification", - "qualifiaction", "qualification", - "qualificaiton", "qualifications", - "qualificatons", "qualifications", - "qualifikation", "qualification", - "questionalble", "questionable", - "quinessential", "quintessential", - "ramificaitons", "ramifications", - "realisitcally", "realistically", - "realtionships", "relationships", - "reccommending", "recommending", - "receptionnist", "receptionist", - "receptionsist", "receptionist", - "reconaissance", "reconnaissance", - "reconcilation", "reconciliation", - "reconnaisance", "reconnaissance", - "reconstrucion", "reconstruction", - "recreationnal", "recreational", - "rectangulaire", "rectangular", - "redistribuito", "redistribution", - "redistributin", "redistribution", - "reencarnation", "reincarnation", - "refridgerator", "refrigerator", - "rehabilitaion", "rehabilitation", - "rehabilitatin", "rehabilitation", - "rehabilitaton", "rehabilitation", - "reincarantion", "reincarnation", - "reincatnation", "reincarnation", - "reinforcemens", "reinforcements", - "reinforcemnts", "reinforcements", - "reinitalising", "reinitialising", - "reinitalizing", "reinitializing", - "reinkarnation", "reincarnation", - "reinstallling", "reinstalling", - "reintarnation", "reincarnation", - "relationshits", "relationships", - "relationsship", "relationships", - "relatiopnship", "relationship", - "relentlessely", "relentlessly", - "relentlessley", "relentlessly", - "relinqushment", "relinquishment", - "remifications", "ramifications", - "reprehenisble", "reprehensible", - "reprehensable", "reprehensible", - "reprehinsible", "reprehensible", - "represenation", "representation", - "represensible", "reprehensible", - "representaion", "representation", - "representatie", "representatives", - "representatin", "representations", - "representerad", "represented", - "representitve", "representative", - "representives", "representatives", - "repricussions", "repercussions", - "reprihensible", "reprehensible", - "resolutionary", "revolutionary", - "respectivelly", "respectively", - "responsibiliy", "responsibility", - "responsibilty", "responsibility", - "responsiblity", "responsibility", - "respositories", "repositories", - "resssurecting", "resurrecting", - "ressurrection", "resurrection", - "restaraunteur", "restaurateur", - "retoractively", "retroactively", - "retroactivily", "retroactively", - "retroactivley", "retroactively", - "retrocatively", "retroactively", - "revelutionary", "revolutionary", - "revolutionair", "revolutionary", - "revolutionens", "revolutions", - "revolutioners", "revolutions", - "revoultionary", "revolutionary", - "ridiculouness", "ridiculousness", - "rienforcement", "reinforcements", - "righetousness", "righteousness", - "rightiousness", "righteousness", - "rigtheousness", "righteousness", - "rollarcoaster", "rollercoaster", - "rollercaoster", "rollercoaster", - "rollercoaters", "rollercoaster", - "rollercoatser", "rollercoaster", - "rollerocaster", "rollercoaster", - "rollertoaster", "rollercoaster", - "rollorcoaster", "rollercoaster", - "sacrastically", "sarcastically", - "sarcasitcally", "sarcastically", - "satisfactorly", "satisfactory", - "scandianvians", "scandinavian", - "scateboarding", "skateboarding", - "schisophrenic", "schizophrenic", - "schiziphrenic", "schizophrenic", - "schizophernia", "schizophrenia", - "schizophernic", "schizophrenic", - "schizophrania", "schizophrenia", - "schizoprhenia", "schizophrenia", - "schizoprhenic", "schizophrenic", - "schozophrenia", "schizophrenia", - "schozophrenic", "schizophrenic", - "schyzophrenia", "schizophrenia", - "schyzophrenic", "schizophrenic", - "schziophrenia", "schizophrenia", - "schziophrenic", "schizophrenic", - "scientificaly", "scientifically", - "scientificlly", "scientifically", - "segementation", "segmentation", - "sensationable", "sensational", - "sensationails", "sensationalism", - "sensationaism", "sensationalism", - "sensationalim", "sensationalism", - "sensationella", "sensational", - "shcizophrenic", "schizophrenic", - "significanlty", "significantly", - "significently", "significantly", - "signifigantly", "significantly", - "simultaneosly", "simultaneously", - "simultaneuous", "simultaneous", - "simultanously", "simultaneously", - "singificantly", "significantly", - "skatebaording", "skateboarding", - "skateborading", "skateboarding", - "socioecenomic", "socioeconomic", - "socioecomonic", "socioeconomic", - "socioeconimic", "socioeconomic", - "sohpisticated", "sophisticated", - "sophisitcated", "sophisticated", - "sophistacated", "sophisticated", - "sophistocated", "sophisticated", - "sophosticated", "sophisticated", - "spacification", "specification", - "specializaton", "specialization", - "specificaiton", "specifications", - "specificatons", "specifications", - "specifikation", "specification", - "spectaculaire", "spectacular", - "spectaculalry", "spectacularly", - "spectatularly", "spectacularly", - "spesification", "specification", - "spirituallity", "spiritually", - "sponatenously", "spontaneously", - "spontaenously", "spontaneously", - "spontainously", "spontaneously", - "spontaneoulsy", "spontaneously", - "spontaneuosly", "spontaneously", - "spontaniously", "spontaneously", - "spontanuously", "spontaneously", - "sponteanously", "spontaneously", - "sponteneously", "spontaneously", - "sporstmanship", "sportsmanship", - "sportmansship", "sportsmanship", - "sportsmamship", "sportsmanship", - "sportsmenship", "sportsmanship", - "sprotsmanship", "sportsmanship", - "stakeboarding", "skateboarding", - "startegically", "strategically", - "statisitcally", "statistically", - "statistacally", "statistically", - "stereotipical", "stereotypical", - "stereotpyical", "stereotypical", - "stereotypcial", "stereotypical", - "stereotypeing", "stereotyping", - "stereotypying", "stereotyping", - "steriotypical", "stereotypical", - "steroetypical", "stereotypical", - "steryotypical", "stereotypical", - "storytellling", "storytelling", - "stragegically", "strategically", - "stragetically", "strategically", - "straightenend", "straightened", - "straitforward", "straightforward", - "stratagically", "strategically", - "stratigically", "strategically", - "strawberrries", "strawberries", - "stregnthening", "strengthening", - "strenghtening", "strengthening", - "strengthining", "strengthening", - "stretegically", "strategically", - "subcatagories", "subcategories", - "subconsciosly", "subconsciously", - "subconsciouly", "subconsciously", - "subconsiously", "subconsciously", - "subjectivelly", "subjectively", - "subscribtions", "subscriptions", - "substancially", "substantially", - "substanitally", "substantially", - "substansially", "substantially", - "substantiable", "substantial", - "substantually", "substantially", - "substitutents", "substitutes", - "successfullly", "successfully", - "supermarkedet", "supermarket", - "supermarkerts", "supermarkets", - "superpowereds", "superpowers", - "supersticious", "superstitious", - "superstisious", "superstitious", - "superstitiosi", "superstitious", - "superstitiuos", "superstitious", - "superstituous", "superstitious", - "suphisticated", "sophisticated", - "supscriptions", "subscriptions", - "surreptiously", "surreptitiously", - "survavibility", "survivability", - "survibability", "survivability", - "survivabiltiy", "survivability", - "survivalibity", "survivability", - "survivavility", "survivability", - "survivebility", "survivability", - "susbtantially", "substantially", - "sustainabilty", "sustainability", - "synchornously", "synchronously", - "systematicaly", "systematically", - "systematiclly", "systematically", - "techmological", "technological", - "technicallity", "technically", - "technoligical", "technological", - "technologicly", "technological", - "techonlogical", "technological", - "telaportation", "teleportation", - "teleportating", "teleportation", - "teleprotation", "teleportation", - "teliportation", "teleportation", - "teloportation", "teleportation", - "territoriella", "territorial", - "theoratically", "theoretically", - "theoritically", "theoretically", - "therapeutisch", "therapeutic", - "thereotically", "theoretically", - "thermodynaics", "thermodynamics", - "thermodynamcs", "thermodynamics", - "theroetically", "theoretically", - "thoeretically", "theoretically", - "tranistioning", "transitioning", - "transcendance", "transcendence", - "transcribtion", "transcription", - "transcripcion", "transcription", - "transferrring", "transferring", - "transformarea", "transformer", - "transformarem", "transformer", - "transformarse", "transformers", - "transformaton", "transformation", - "transformered", "transformed", - "transgengered", "transgendered", - "transisioning", "transitioning", - "transitionals", "transitions", - "transitionnal", "transitional", - "transitionned", "transitioned", - "transkription", "transcription", - "translyvanian", "transylvania", - "transmisisons", "transmissions", - "transmissable", "transmissible", - "transmisssion", "transmissions", - "transparantie", "transparent", - "transparentcy", "transparency", - "transplantees", "transplants", - "transporation", "transportation", - "transportaion", "transportation", - "transportarme", "transporter", - "transportarse", "transporter", - "transportarte", "transporter", - "transporteurs", "transporter", - "transsexuella", "transsexual", - "transylvannia", "transylvania", - "trasngendered", "transgendered", - "troubleshooot", "troubleshoot", - "udnerestimate", "underestimated", - "umcomfortable", "uncomfortable", - "umcomfortably", "uncomfortably", - "umpredictable", "unpredictable", - "unappropriate", "inappropriate", - "unbelievabley", "unbelievably", - "unbelievablly", "unbelievably", - "uncertaintity", "uncertainty", - "uncomfertable", "uncomfortable", - "uncomfertably", "uncomfortably", - "uncomfortabel", "uncomfortably", - "uncomforyable", "uncomfortably", - "uncomfrotable", "uncomfortable", - "uncomfrotably", "uncomfortably", - "uncomftorable", "uncomfortable", - "uncomftorably", "uncomfortably", - "unconcsiously", "unconsciously", - "unconfortable", "uncomfortable", - "unconfortably", "uncomfortably", - "unconscioulsy", "unconsciously", - "unconsicously", "unconsciously", - "unconsiderate", "inconsiderate", - "uncontrollabe", "uncontrollable", - "uncontrollaby", "uncontrollably", - "unconventinal", "unconventional", - "uncounciously", "unconsciously", - "uncousciously", "unconsciously", - "underastimate", "underestimate", - "underesitmate", "underestimated", - "underestamate", "underestimate", - "underestemate", "underestimate", - "underestiamte", "underestimated", - "undergratuate", "undergraduate", - "underhwelming", "underwhelming", - "underhwleming", "underwhelming", - "underminining", "undermining", - "underpowererd", "underpowered", - "undersetimate", "underestimate", - "understandble", "understandable", - "understandbly", "understandably", - "underwealming", "underwhelming", - "underwhemling", "underwhelming", - "underwhleming", "underwhelming", - "undoctrinated", "indoctrinated", - "unexpectadely", "unexpectedly", - "unfomfortable", "uncomfortable", - "unforgiveable", "unforgivable", - "unfortuantely", "unfortunately", - "unfortunantly", "unfortunately", - "unfortunatley", "unfortunately", - "unfortuneatly", "unfortunately", - "unfortunetely", "unfortunately", - "unilaterallly", "unilaterally", - "uninstallling", "uninstalling", - "unintellegent", "unintelligent", - "unintelligant", "unintelligent", - "unintensional", "unintentional", - "uninteristing", "uninteresting", - "universitites", "universities", - "unnecassarily", "unnecessarily", - "unneccesarily", "unnecessarily", - "unnecessairly", "unnecessarily", - "unnecessarely", "unnecessarily", - "unnecessarity", "unnecessarily", - "unnecesserily", "unnecessarily", - "unnecissarily", "unnecessarily", - "unnessecarily", "unnecessarily", - "unoperational", "nonoperational", - "unprecendeted", "unprecedented", - "unprecidented", "unprecedented", - "unpredecented", "unprecedented", - "unpredicatble", "unpredictable", - "unpredictible", "unpredictable", - "unpresedented", "unprecedented", - "unpridictable", "unpredictable", - "unprofessinal", "unprofessional", - "unrealistisch", "unrealistic", - "unreasonabley", "unreasonably", - "unreasonablly", "unreasonably", - "unrestrictred", "unrestricted", - "unsistainable", "unsustainable", - "unsubscribade", "unsubscribed", - "unsubscribbed", "unsubscribe", - "unsuccesfully", "unsuccessfully", - "unsuccessfull", "unsuccessful", - "unsucessfully", "unsuccessfully", - "unsuprisingly", "unsurprisingly", - "unsuprizingly", "unsurprisingly", - "unsustainible", "unsustainable", - "unsustianable", "unsustainable", - "vertification", "certification", - "villification", "vilification", - "virualization", "visualization", - "visualizacion", "visualization", - "visualizaiton", "visualization", - "visualizating", "visualization", - "vitualization", "visualization", - "vizualization", "visualization", - "volounteering", "volunteering", - "vulberability", "vulnerability", - "vulernability", "vulnerability", - "vulnarability", "vulnerability", - "vulnerabiltiy", "vulnerability", - "vulnurability", "vulnerability", - "vunlerability", "vulnerability", - "vurnerability", "vulnerability", - "weightlfiting", "weightlifting", - "weightlifitng", "weightlifting", - "weightligting", "weightlifting", - "weigthlifting", "weightlifting", - "wholeheartdly", "wholeheartedly", - "wholeheartedy", "wholeheartedly", - "wholeheartely", "wholeheartedly", - "wieghtlifting", "weightlifting", - "abberivation", "abbreviation", - "abberviation", "abbreviation", - "abbreivation", "abbreviation", - "abbreveation", "abbreviation", - "abbrievation", "abbreviation", - "abortificant", "abortifacient", - "abrreviation", "abbreviation", - "academcially", "academically", - "accedentally", "accidentally", - "accelarating", "accelerating", - "accelaration", "acceleration", - "acceleartion", "acceleration", - "acceleraptor", "accelerator", - "accelorating", "accelerating", - "accessibilty", "accessibility", - "accidentlaly", "accidently", - "accomadating", "accommodating", - "accomadation", "accommodation", - "accomodating", "accommodating", - "accomodation", "accommodation", - "accrediation", "accreditation", - "acculumation", "accumulation", - "accumalation", "accumulation", - "accumilation", "accumulation", - "acedemically", "academically", - "acheivements", "achievements", - "acknolwedged", "acknowledged", - "acknolwedges", "acknowledges", - "acknoweldged", "acknowledged", - "acknoweldges", "acknowledges", - "acknowiedged", "acknowledged", - "acknowladges", "acknowledges", - "acknowldeged", "acknowledged", - "acknowledget", "acknowledgement", - "acknowleding", "acknowledging", - "acknowlegded", "acknowledged", - "acknowlegdes", "acknowledges", - "ackumulation", "accumulation", - "acquaintaces", "acquaintances", - "acquaintence", "acquaintance", - "acquantaince", "acquaintance", - "acquantiance", "acquaintances", - "acquiantance", "acquaintances", - "acquiantence", "acquaintance", - "adknowledged", "acknowledged", - "adknowledges", "acknowledges", - "administored", "administer", - "adminsitered", "administered", - "adminstrator", "administrator", - "advantagious", "advantageous", - "advantegeous", "advantageous", - "adventageous", "advantageous", - "adventureous", "adventures", - "adventureres", "adventures", - "adventurious", "adventurous", - "adventuruous", "adventurous", - "advertisiers", "advertisers", - "advertisment", "advertisement", - "advertisters", "advertisers", - "advertisting", "advertising", - "aestheticaly", "aesthetically", - "aestheticlly", "aesthetically", - "afficianados", "aficionados", - "afficionados", "aficionados", - "afghanisthan", "afghanistan", - "afterhtought", "afterthought", - "afterthougth", "afterthought", - "aggressivley", "aggressively", - "agircultural", "agricultural", - "agknowledged", "acknowledged", - "agnosticisim", "agnosticism", - "agracultural", "agricultural", - "agriculteral", "agricultural", - "agriculteurs", "agriculture", - "agricultrual", "agricultural", - "agriculutral", "agricultural", - "agrigultural", "agricultural", - "agrocultural", "agricultural", - "allegiancies", "allegiance", - "alterantives", "alternatives", - "alternatevly", "alternately", - "alternatiely", "alternately", - "alternatieve", "alternative", - "alternativly", "alternatively", - "alternativos", "alternatives", - "alternatvely", "alternately", - "alternitives", "alternatives", - "altruistisch", "altruistic", - "amendmenters", "amendments", - "amohetamines", "amphetamines", - "ampehtamines", "amphetamines", - "ampethamines", "amphetamines", - "amphatamines", "amphetamines", - "amphedamines", "amphetamines", - "amphetamenes", "amphetamines", - "amphetemines", "amphetamines", - "amphetimines", "amphetamines", - "amphetmaines", "amphetamines", - "anecdotallly", "anecdotally", - "annhiliation", "annihilation", - "annihalition", "annihilation", - "annihilatron", "annihilation", - "annihliation", "annihilation", - "annilihation", "annihilation", - "anniversairy", "anniversary", - "anniversarry", "anniversary", - "anniversiary", "anniversary", - "annoucenment", "announcements", - "annoucnement", "announcement", - "announcemnet", "announcements", - "announcemnts", "announcements", - "anphetamines", "amphetamines", - "ansalisation", "nasalisation", - "ansalization", "nasalization", - "antaganistic", "antagonistic", - "antagonisitc", "antagonistic", - "antagonostic", "antagonist", - "antibioticos", "antibiotics", - "anticiaption", "anticipation", - "anticipacion", "anticipation", - "antisipation", "anticipation", - "antogonistic", "antagonistic", - "antrhopology", "anthropology", - "antrophology", "anthropology", - "apllications", "applications", - "apocalypitic", "apocalyptic", - "apologistics", "apologists", - "apologizeing", "apologizing", - "apostrophied", "apostrophe", - "apostrophies", "apostrophe", - "apperciation", "appreciation", - "applicaitons", "applications", - "appoitnments", "appointments", - "apporachable", "approachable", - "appraochable", "approachable", - "appreceating", "appreciating", - "appreciaters", "appreciates", - "appreciatied", "appreciative", - "appreicating", "appreciating", - "appreication", "appreciation", - "appretiation", "appreciation", - "appropriatin", "appropriation", - "appropriatly", "appropriately", - "appropriaton", "appropriation", - "approprietly", "appropriately", - "approstraphe", "apostrophe", - "approxiately", "approximately", - "approximatly", "approximately", - "approximetly", "approximately", - "aproximately", "approximately", - "aqcuaintance", "acquaintance", - "aqquaintance", "acquaintance", - "arbitrariliy", "arbitrarily", - "arbitrarilly", "arbitrarily", - "archetecture", "architecture", - "architechure", "architecture", - "architectual", "architectural", - "architectuur", "architecture", - "architecutre", "architecture", - "architexture", "architecture", - "arcitechture", "architecture", - "areodynamics", "aerodynamics", - "argicultural", "agricultural", - "argumentatie", "argumentative", - "arithmetisch", "arithmetic", - "armageddomon", "armageddon", - "arrengements", "arrangements", - "articifially", "artificially", - "artificailly", "artificially", - "artificiella", "artificial", - "artificually", "artificially", - "artifiically", "artificially", - "assasination", "assassination", - "assassinatin", "assassination", - "assissinated", "assassinated", - "associationg", "associating", - "assoications", "associations", - "assosiations", "associations", - "assosication", "assassination", - "assotiations", "associations", - "assymetrical", "asymmetrical", - "asthetically", "aesthetically", - "astranomical", "astronomical", - "astromonical", "astronomical", - "astronautlis", "astronauts", - "astronimical", "astronomical", - "astronomicly", "astronomical", - "athleticisim", "athleticism", - "atmosphereic", "atmospheric", - "audiobookmrs", "audiobooks", - "auhtenticate", "authenticate", - "australianas", "australians", - "australianos", "australians", - "authentisity", "authenticity", - "authorithies", "authorities", - "authoritiers", "authorities", - "authorizaton", "authorization", - "authrorities", "authorities", - "autochtonous", "autochthonous", - "autocorrrect", "autocorrect", - "automobilies", "automobile", - "automodertor", "automoderator", - "automonomous", "autonomous", - "auxilliaries", "auxiliaries", - "avaliability", "availability", - "avialability", "availability", - "awknowledged", "acknowledged", - "awknowledges", "acknowledges", - "awkwardsness", "awkwardness", - "babysittting", "babysitting", - "beaurocratic", "bureaucratic", - "beautifullly", "beautifully", - "belligerante", "belligerent", - "beuraucratic", "bureaucratic", - "billionairre", "billionaire", - "billionaries", "billionaires", - "billioniares", "billionaires", - "bioligically", "biologically", - "birmingharam", "birmingham", - "bittersweeet", "bittersweet", - "blamethrower", "flamethrower", - "blueberrries", "blueberries", - "blueprintcss", "blueprints", - "boardcasting", "broadcasting", - "bobybuilding", "bodybuilding", - "bodybuidling", "bodybuilding", - "bodybuilidng", "bodybuilding", - "bodybuliding", "bodybuilding", - "bodydbuilder", "bodybuilder", - "bombardement", "bombardment", - "boradcasting", "broadcasting", - "botivational", "motivational", - "brainwahsing", "brainwashing", - "brakethrough", "breakthrough", - "braodcasting", "broadcasting", - "brazilianese", "brazilians", - "brazilianess", "brazilians", - "breakthorugh", "breakthrough", - "breaktrhough", "breakthrough", - "breastfeedig", "breastfeeding", - "breastfeeing", "breastfeeding", - "breasttaking", "breathtaking", - "brianwashing", "brainwashing", - "broadcastors", "broadcasts", - "brotherhoood", "brotherhood", - "buearucratic", "bureaucratic", - "bueraucratic", "bureaucratic", - "bulletprooof", "bulletproof", - "bureaocratic", "bureaucratic", - "bureaucracie", "bureaucratic", - "bureaucracts", "bureaucrats", - "bureaucrates", "bureaucrats", - "bureuacratic", "bureaucratic", - "businessemen", "businessmen", - "cababilities", "capabilities", - "caclulations", "calculations", - "calcluations", "calculation", - "calcualtions", "calculations", - "calculationg", "calculating", - "calculatoare", "calculator", - "californains", "californian", - "californican", "californian", - "californinan", "californian", - "caluclations", "calculations", - "camouflagued", "camouflage", - "canceltation", "cancellation", - "cannibalisim", "cannibalism", - "canniballism", "cannibalism", - "cannotations", "connotations", - "capitalistes", "capitalists", - "caracterized", "characterized", - "carbohydrats", "carbohydrates", - "carbohyrdate", "carbohydrates", - "caricaturale", "caricature", - "caricaturile", "caricature", - "caricaturise", "caricature", - "caricaturize", "caricature", - "catastraphic", "catastrophic", - "catastrohpic", "catastrophic", - "catastrophie", "catastrophe", - "categoricaly", "categorically", - "categoriezed", "categorized", - "catergorized", "categorized", - "caterpillers", "caterpillars", - "catestrophic", "catastrophic", - "catholicisim", "catholicism", - "catholocisim", "catholicism", - "catistrophic", "catastrophic", - "catostraphic", "catastrophic", - "catostrophic", "catastrophic", - "catterpilars", "caterpillars", - "catterpillar", "caterpillar", - "celebratings", "celebrations", - "celebritites", "celebrities", - "celibrations", "celebrations", - "cententenial", "centennial", - "cercumstance", "circumstance", - "cerification", "verification", - "certificiate", "certificate", - "challengeing", "challenging", - "chamiponship", "championships", - "champinoship", "championships", - "championchip", "championship", - "championsihp", "championships", - "championsips", "championships", - "champiosnhip", "championships", - "champoinship", "championship", - "chanpionship", "championship", - "charactarize", "characterize", - "charaterized", "characterized", - "charismastic", "charismatic", - "cheerlearder", "cheerleader", - "cheerleeders", "cheerleaders", - "cheeseberger", "cheeseburger", - "cheeseborger", "cheeseburger", - "cheesebruger", "cheeseburgers", - "cheeseburges", "cheeseburgers", - "cheeseburgie", "cheeseburger", - "cheezeburger", "cheeseburger", - "chirstianity", "christianity", - "chocolateers", "chocolates", - "chrisitanity", "christianity", - "christainity", "christianity", - "christiantiy", "christianity", - "christinaity", "christianity", - "chromosomers", "chromosomes", - "chronologial", "chronological", - "chrsitianity", "christianity", - "cilivization", "civilizations", - "circulatiing", "circulating", - "circulationg", "circulating", - "circumcisied", "circumcised", - "circumcition", "circumcision", - "circumsicion", "circumcision", - "circumsision", "circumcision", - "circumsition", "circumcision", - "circumsizion", "circumcision", - "circumstanes", "circumstance", - "circumstanta", "circumstantial", - "circumstante", "circumstance", - "circuncision", "circumcision", - "circunstance", "circumstance", - "civiliaztion", "civilizations", - "civilizacion", "civilization", - "civilizaiton", "civilization", - "civilizatoin", "civilizations", - "civilizatons", "civilizations", - "civilziation", "civilizations", - "civizilation", "civilizations", - "claculations", "calculations", - "classificato", "classification", - "cockroachers", "cockroaches", - "coefficienct", "coefficient", - "coencidental", "coincidental", - "coincedental", "coincidental", - "coincidencal", "coincidental", - "coincidentia", "coincidental", - "coindidental", "coincidental", - "coinsidental", "coincidental", - "cointerpoint", "counterpoint", - "collaberator", "collaborate", - "collaboratie", "collaborate", - "collaboratin", "collaboration", - "collectivily", "collectively", - "collectivley", "collectively", - "colonialisim", "colonialism", - "colonizacion", "colonization", - "colonizators", "colonizers", - "colonozation", "colonization", - "combanations", "combinations", - "combonations", "combinations", - "comdemnation", "condemnation", - "comemmorates", "commemorates", - "comemoretion", "commemoration", - "comeptitions", "competitions", - "comfirmation", "confirmation", - "comfortabley", "comfortably", - "comfortablly", "comfortably", - "comissioning", "commissioning", - "commandemnts", "commandment", - "commandmants", "commandments", - "commandmends", "commandments", - "commemmorate", "commemorate", - "commendments", "commandments", - "commenteries", "commenters", - "commenwealth", "commonwealth", - "commerciales", "commercials", - "commerically", "commercially", - "comminicated", "communicated", - "commishioned", "commissioned", - "commishioner", "commissioner", - "commisioning", "commissioning", - "commissionar", "commissioner", - "commissionor", "commissioner", - "committments", "commitments", - "commoditites", "commodities", - "commomwealth", "commonwealth", - "commonhealth", "commonwealth", - "commonweatlh", "commonwealth", - "commonwelath", "commonwealth", - "communciated", "communicated", - "communiation", "communication", - "communicatie", "communicate", - "communicatin", "communications", - "communicaton", "communication", - "communitites", "communities", - "compansating", "compensating", - "compansation", "compensation", - "comparativly", "comparatively", - "comparisions", "comparisons", - "comparission", "comparisons", - "comparissons", "comparisons", - "compatablity", "compatibility", - "compatibiliy", "compatibility", - "compatibilty", "compatibility", - "compatiblity", "compatibility", - "compensacion", "compensation", - "compensative", "compensate", - "compesitions", "compositions", - "competetions", "competitions", - "competitevly", "competitively", - "competitiion", "competition", - "competitiors", "competitors", - "competitivly", "competitively", - "competitivos", "competitions", - "compinsating", "compensating", - "compinsation", "compensation", - "complainging", "complaining", - "completetion", "completion", - "compliations", "compilation", - "complicacion", "complication", - "complicatied", "complicate", - "complicaties", "complicate", - "complicatred", "complicate", - "complicatted", "complicate", - "complilation", "complication", - "complimation", "complication", - "complimenary", "complimentary", - "complimentje", "complimented", - "complimentry", "complimentary", - "complination", "complication", - "complitation", "complication", - "composistion", "compositions", - "compramising", "compromising", - "compremising", "compromising", - "compresssion", "compression", - "compromissen", "compromise", - "compromisses", "compromises", - "compromizing", "compromising", - "compromosing", "compromising", - "comptability", "compatibility", - "compulsivley", "compulsive", - "compulsorary", "compulsory", - "computarized", "computerized", - "comrpomising", "compromising", - "comtaminated", "contaminated", - "comtemporary", "contemporary", - "conbinations", "combinations", - "concatinated", "contaminated", - "conceivabley", "conceivably", - "concellation", "cancellation", - "concentraded", "concentrated", - "concentraing", "concentrating", - "concentraion", "concentration", - "concentrarte", "concentrate", - "concentratie", "concentrate", - "concentratin", "concentration", - "concequences", "consequences", - "concequently", "consequently", - "concersation", "conservation", - "concervation", "conservation", - "concervatism", "conservatism", - "concervative", "conservative", - "conciderable", "considerable", - "conciderably", "considerably", - "conciousness", "consciousness", - "conclusiones", "conclusions", - "conclusivley", "conclusive", - "condamnation", "condemnation", - "condemantion", "condemnation", - "condenmation", "condemnation", - "condescening", "condescending", - "condescenion", "condescension", - "conditionnal", "conditional", - "conditionned", "conditioned", - "conditionner", "conditioner", - "condmenation", "condemnation", - "condolencies", "condolences", - "condolensces", "condolences", - "condomnation", "condemnation", - "condradicted", "contradicted", - "conenctivity", "connectivity", - "confedential", "confidential", - "confederancy", "confederacy", - "confederatie", "confederate", - "confermation", "confirmation", - "confersation", "conservation", - "confessionis", "confessions", - "confidencial", "confidential", - "confidentail", "confidential", - "confidentaly", "confidently", - "confidentely", "confidently", - "confidentiel", "confidential", - "configuratin", "configurations", - "configuraton", "configuration", - "confirmacion", "confirmation", - "confrimation", "confirmation", - "confrontaion", "confrontation", - "congegration", "congregation", - "congergation", "congregation", - "congradulate", "congratulate", - "congragation", "congregation", - "congragulate", "congratulate", - "congratualte", "congratulate", - "congregacion", "congregation", - "congresional", "congressional", - "congresssman", "congressman", - "congresssmen", "congressmen", - "congretation", "congregation", - "congrigation", "congregation", - "conicidental", "coincidental", - "connatations", "connotations", - "connecticuit", "connecticut", - "connectivety", "connectivity", - "connetations", "connotations", - "connitations", "connotations", - "connonations", "connotations", - "conolization", "colonization", - "conpensating", "compensating", - "conpensation", "compensation", - "conpetitions", "competitions", - "conplimented", "complimented", - "conpromising", "compromising", - "consciouness", "consciousness", - "consciouslly", "consciously", - "consectutive", "consecutive", - "consecuences", "consequences", - "consecuentes", "consequences", - "consecuently", "consequently", - "consensuarlo", "consensual", - "consentrated", "concentrated", - "consentrates", "concentrates", - "conseqeunces", "consequence", - "consequenses", "consequences", - "consequental", "consequently", - "consequneces", "consequence", - "conservacion", "conservation", - "conservaties", "conservatives", - "conservativo", "conservation", - "conservativs", "conservatism", - "conservitave", "conservatives", - "conservitism", "conservatism", - "conservitive", "conservative", - "considerarle", "considerable", - "considerarte", "considerate", - "consideraste", "considerate", - "consideratie", "considerate", - "consideratin", "considerations", - "consideribly", "considerably", - "consilidated", "consolidated", - "consipracies", "conspiracies", - "consiquently", "consequently", - "consistantly", "consistently", - "consistencey", "consistency", - "consistentcy", "consistently", - "consitutents", "constituents", - "consoldiated", "consolidated", - "consolitated", "consolidate", - "consolodated", "consolidated", - "consoltation", "consultation", - "conspericies", "conspiracies", - "conspiracize", "conspiracies", - "conspiriator", "conspirator", - "conspiricies", "conspiracies", - "conspriacies", "conspiracies", - "consqeuences", "consequence", - "constinually", "continually", - "constitition", "constitution", - "constituante", "constituents", - "constituants", "constituents", - "constituates", "constitutes", - "constitucion", "constitution", - "constituient", "constitute", - "constituinte", "constituents", - "constitutiei", "constitute", - "constitutues", "constitute", - "constiutents", "constituents", - "constracting", "constructing", - "constraction", "construction", - "constrainsts", "constraints", - "construccion", "construction", - "construciton", "construction", - "constructeds", "constructs", - "constructief", "constructive", - "constructies", "constructs", - "constructifs", "constructs", - "constructiin", "constructing", - "constructivo", "construction", - "consturction", "construction", - "consultating", "consultation", - "consumerisim", "consumerism", - "contaiminate", "contaminate", - "contaminatie", "contaminated", - "contaminaton", "contamination", - "contaminents", "containment", - "contamporary", "contemporary", - "contanimated", "contaminated", - "contaniments", "containment", - "contemperary", "contemporary", - "contemporany", "contemporary", - "continentais", "continents", - "continential", "continental", - "contineously", "continuously", - "continiously", "continuously", - "continuacion", "continuation", - "continuating", "continuation", - "continuativo", "continuation", - "continuining", "continuing", - "contirbution", "contribution", - "contirbutors", "contributors", - "contiunation", "continuation", - "contrabution", "contribution", - "contraceptie", "contraceptives", - "contradicing", "contradicting", - "contradicion", "contradiction", - "contradicory", "contradictory", - "contradictie", "contradicted", - "contradictin", "contradiction", - "contradicton", "contradiction", - "contraticted", "contradicted", - "contribucion", "contribution", - "contribuitor", "contributor", - "contributers", "contributors", - "contributivo", "contribution", - "contributons", "contributors", - "contrictions", "contractions", - "contridicted", "contradicted", - "controlleras", "controllers", - "controlllers", "controllers", - "controverial", "controversial", - "controveries", "controversies", - "controversal", "controversial", - "controversey", "controversy", - "contructions", "contractions", - "conveinently", "conveniently", - "convencional", "conventional", - "conveniantly", "conveniently", - "converastion", "conversations", - "converdation", "conservation", - "conversacion", "conversation", - "conversaiton", "conversations", - "conversatino", "conservation", - "conversatism", "conservatism", - "conversatoin", "conversations", - "conversiones", "conversions", - "converstaion", "conversation", - "convertables", "convertibles", - "convertiable", "convertible", - "convertibile", "convertible", - "convervation", "conservation", - "convervatism", "conservatism", - "converzation", "conservation", - "convesration", "conservation", - "convienently", "conveniently", - "convorsation", "conversation", - "convseration", "conservation", - "coordenation", "coordination", - "coordiantion", "coordination", - "coordinacion", "coordination", - "coordinaters", "coordinates", - "coordinatior", "coordinator", - "coordinatore", "coordinate", - "coordonation", "coordination", - "cooridnation", "coordination", - "coorperation", "cooperation", - "coprorations", "corporations", - "corinthianos", "corinthians", - "corinthinans", "corinthians", - "corparations", "corporations", - "corperations", "corporations", - "corporativos", "corporations", - "corproations", "corporations", - "corrdination", "coordination", - "correponding", "corresponding", - "correposding", "corresponding", - "correspondes", "corresponds", - "correspondig", "corresponding", - "corresponing", "corresponding", - "corrisponded", "corresponded", - "costomizable", "customizable", - "costumizable", "customizable", - "councidental", "coincidental", - "counsellling", "counselling", - "counterfiets", "counterfeit", - "counterfited", "counterfeit", - "counterracts", "counterparts", - "countertraps", "counterparts", - "countrywides", "countryside", - "coutnerparts", "counterparts", - "coutnerpoint", "counterpoint", - "covnersation", "conservation", - "crankenstein", "frankenstein", - "creationisim", "creationism", - "creationnism", "creationism", - "creationnist", "creationist", - "creationsism", "creationism", - "creationsist", "creationist", - "creationsits", "creationists", - "credibillity", "credibility", - "crigneworthy", "cringeworthy", - "cringewhorty", "cringeworthy", - "cringeworhty", "cringeworthy", - "cringewrothy", "cringeworthy", - "cringyworthy", "cringeworthy", - "criticallity", "critically", - "criticiszing", "criticising", - "croporations", "corporations", - "crucifiction", "crucifixion", - "cuestionable", "questionable", - "culiminating", "culminating", - "cumulatative", "cumulative", - "cuntaminated", "contaminated", - "curcumcision", "circumcision", - "curcumstance", "circumstance", - "custamizable", "customizable", - "custimizable", "customizable", - "customizaton", "customization", - "customizeble", "customizable", - "customizible", "customizable", - "custumizable", "customizable", - "cuztomizable", "customizable", - "dabilitating", "debilitating", - "dangerousely", "dangerously", - "decensitized", "desensitized", - "deceptionist", "receptionist", - "declareation", "declaration", - "decomposeion", "decomposition", - "decomposited", "decomposed", - "decscription", "description", - "deffensively", "defensively", - "deficiancies", "deficiencies", - "deficiencias", "deficiencies", - "deficiensies", "deficiencies", - "definatively", "definitively", - "defininitely", "definitively", - "definitavely", "definitively", - "definitevely", "definitively", - "definitifely", "definitively", - "definitinely", "definitively", - "definititely", "definitively", - "definitivley", "definitively", - "deinitalized", "deinitialized", - "deinitalizes", "deinitializes", - "delibaretely", "deliberately", - "deliberatley", "deliberately", - "delibirately", "deliberately", - "delibitating", "debilitating", - "deliverately", "deliberately", - "delusionally", "delusively", - "demesticated", "domesticated", - "democracries", "democracies", - "democraphics", "demographics", - "democratisch", "democratic", - "demograhpics", "demographics", - "demogrpahics", "demographics", - "demonination", "denominations", - "demonstarted", "demonstrated", - "demonstartes", "demonstrates", - "demonstrabil", "demonstrably", - "demonstraion", "demonstration", - "demonstraits", "demonstrates", - "demonstrants", "demonstrates", - "demonstratie", "demonstrate", - "demonstratin", "demonstration", - "demonstrerat", "demonstrate", - "demosntrably", "demonstrably", - "demosntrated", "demonstrated", - "demosntrates", "demonstrates", - "demostration", "demonstration", - "denomenation", "denomination", - "denominacion", "denomination", - "denominatior", "denominator", - "denominatons", "denominations", - "denomonation", "denomination", - "deomgraphics", "demographics", - "depencencies", "dependencies", - "dependancies", "dependencies", - "dependencias", "dependencies", - "dependenices", "dependencies", - "dependensies", "dependencies", - "deperecation", "deprecation", - "deplacements", "replacements", - "deregualtion", "deregulation", - "deregulaiton", "deregulation", - "derugulation", "deregulation", - "describtions", "descriptions", - "descriminant", "discriminant", - "descriptivos", "descriptions", - "desctiptions", "descriptions", - "desctruction", "destruction", - "desencitized", "desensitized", - "desensatized", "desensitized", - "desensitived", "desensitized", - "desentisized", "desensitized", - "desentitized", "desensitized", - "desentizised", "desensitized", - "desginations", "destinations", - "desgustingly", "disgustingly", - "desitnations", "destinations", - "despectively", "respectively", - "despensaries", "dispensaries", - "desperatedly", "desperately", - "desperatelly", "desperately", - "desqualified", "disqualified", - "desregarding", "disregarding", - "dessertation", "dissertation", - "destiantions", "destinations", - "destinctions", "destinations", - "destractions", "distractions", - "destributors", "distributors", - "determinanti", "determination", - "determinaton", "determination", - "determinging", "determining", - "determinisic", "deterministic", - "determinisim", "determinism", - "deterministc", "deterministic", - "determinitic", "deterministic", - "detrimential", "detrimental", - "developement", "development", - "developmenet", "developments", - "develpoments", "developments", - "devolopement", "development", - "devolopments", "developments", - "diasspointed", "dissapointed", - "dicitonaries", "dictionaries", - "dictadorship", "dictatorship", - "dictarorship", "dictatorship", - "dictatorshop", "dictatorship", - "dictionaires", "dictionaries", - "didsapointed", "dissapointed", - "differencial", "differential", - "differencies", "differences", - "differentate", "differentiate", - "differnetial", "differential", - "difficulites", "difficulties", - "difficutlies", "difficulties", - "diffuculties", "difficulties", - "dimensionals", "dimensions", - "dimensionnal", "dimensional", - "dimensionsal", "dimensional", - "diplomatisch", "diplomatic", - "directionnal", "directional", - "disaapointed", "dissapointed", - "disadvandage", "disadvantaged", - "disadvantged", "disadvantaged", - "disadvantges", "disadvantages", - "disadvatange", "disadvantage", - "disadventage", "disadvantage", - "disagremeent", "disagreements", - "disapointing", "disappointing", - "disappearnce", "disappearance", - "disappearred", "disappeared", - "disapperaing", "disappearing", - "disaspointed", "dissapointed", - "disastisfied", "dissatisfied", - "disatissfied", "dissatisfied", - "disatvantage", "disadvantage", - "discertation", "dissertation", - "disciniplary", "disciplinary", - "disciplanary", "disciplinary", - "disciplenary", "disciplinary", - "disciplinare", "discipline", - "disciplinera", "disciplinary", - "disciplinery", "disciplinary", - "disclipinary", "disciplinary", - "disconencted", "disconnected", - "disconnectes", "disconnects", - "disconnectme", "disconnected", - "disconnectus", "disconnects", - "discontiuned", "discontinued", - "discountined", "discontinued", - "discreditied", "discredited", - "discreditted", "discredited", - "discriminare", "discriminate", - "discriminted", "discriminated", - "disctinction", "distinction", - "disctinctive", "distinctive", - "disctintions", "distinctions", - "discualified", "disqualified", - "discustingly", "disgustingly", - "disemination", "dissemination", - "disenchanged", "disenchanted", - "disengenuous", "disingenuous", - "disenginuous", "disingenuous", - "disensitized", "desensitized", - "disgareement", "disagreements", - "disgruntaled", "disgruntled", - "disgrunteled", "disgruntled", - "disguntingly", "disgustingly", - "disingeneous", "disingenuous", - "disingenious", "disingenuous", - "disinteresed", "disinterested", - "disintereted", "disinterested", - "dismantleing", "dismantling", - "disobediance", "disobedience", - "disobeidence", "disobedience", - "dispalcement", "displacement", - "dispapointed", "dissapointed", - "dispencaries", "dispensaries", - "dispensaires", "dispensaries", - "dispensarios", "dispensaries", - "dispensiries", "dispensaries", - "dispensories", "dispensaries", - "disqaulified", "disqualified", - "disqualifyed", "disqualified", - "disqustingly", "disgustingly", - "disrecpected", "disrespected", - "disrepsected", "disrespected", - "disresepcted", "disrespected", - "disrespecful", "disrespectful", - "disrespecing", "disrespecting", - "disrespectul", "disrespectful", - "disrespekted", "disrespected", - "disrtibution", "distributions", - "dissapearing", "disappearing", - "dissapionted", "dissapointed", - "dissapoimted", "dissapointed", - "dissapoitned", "dissapointed", - "dissaponited", "dissapointed", - "dissapoonted", "dissapointed", - "dissapounted", "dissapointed", - "dissappinted", "dissapointed", - "dissapponted", "dissapointed", - "dissastified", "dissatisfied", - "dissatisifed", "dissatisfied", - "dissatsified", "dissatisfied", - "dissepointed", "dissapointed", - "dissipointed", "dissapointed", - "dissobediant", "disobedient", - "dissobedient", "disobedient", - "dissopointed", "dissapointed", - "disspaointed", "dissapointed", - "dissppointed", "dissapointed", - "dissspointed", "dissapointed", - "distinations", "distinctions", - "distincitons", "distinctions", - "distingished", "distinguished", - "distingishes", "distinguishes", - "distinguised", "distinguished", - "distirbuting", "distributing", - "distirbution", "distribution", - "distrabution", "distribution", - "distribitors", "distributors", - "distribtuion", "distributions", - "distribucion", "distribution", - "distribuited", "distributed", - "distribuiton", "distributions", - "distribuitor", "distributor", - "distribusion", "distributions", - "distributino", "distributions", - "distributior", "distributor", - "distributons", "distributors", - "distributore", "distribute", - "distriubtion", "distributions", - "distrobution", "distribution", - "distrubances", "disturbance", - "distrubiting", "distributing", - "distrubition", "distribution", - "distrubitors", "distributors", - "distrubution", "distribution", - "distrubutors", "distributors", - "distructions", "distractions", - "distustingly", "disgustingly", - "ditactorship", "dictatorship", - "documenation", "documentation", - "documentaion", "documentation", - "documentaire", "documentaries", - "documentarse", "documentaries", - "documentarsi", "documentaries", - "domesitcated", "domesticated", - "domisticated", "domesticated", - "donesticated", "domesticated", - "donwloadable", "downloadable", - "dossapointed", "dissapointed", - "downlaodable", "downloadable", - "downloadbale", "downloadable", - "downloadeble", "downloadable", - "drankenstein", "frankenstein", - "dublications", "publications", - "dusgustingly", "disgustingly", - "dynamicallly", "dynamically", - "dyregulation", "deregulation", - "earthquackes", "earthquakes", - "earthquakers", "earthquakes", - "econimically", "economically", - "economisesti", "economists", - "educationnal", "educational", - "effectionate", "affectionate", - "effectivelly", "effectively", - "effectivenss", "effectiveness", - "efficienctly", "efficiency", - "effordlessly", "effortlessly", - "ejacualtions", "ejaculation", - "electorlytes", "electrolytes", - "electricrain", "electrician", - "electrictian", "electrician", - "electrobytes", "electrolytes", - "electrocytes", "electrolytes", - "electrolites", "electrolytes", - "electroltyes", "electrolytes", - "electronicas", "electronics", - "electronicos", "electronics", - "electroyltes", "electrolytes", - "elektrolytes", "electrolytes", - "eloctrolytes", "electrolytes", - "embarassment", "embarrassment", - "embarasssing", "embarassing", - "embarrasment", "embarrassment", - "embarressing", "embarrassing", - "embarrissing", "embarrassing", - "emberrassing", "embarrassing", - "emphetamines", "amphetamines", - "emprisonment", "imprisonment", - "encarcerated", "incarcerated", - "enceclopedia", "encyclopedia", - "enchancement", "enhancement", - "enchancments", "enchantments", - "enchantmants", "enchantments", - "enchentments", "enchantments", - "enciclopedia", "encyclopedia", - "enclycopedia", "encyclopedia", - "encorporated", "incorporated", - "encourageing", "encouraging", - "encyclapedia", "encyclopedia", - "encyclepedia", "encyclopedia", - "encyclopadia", "encyclopedia", - "encyclopeida", "encyclopedia", - "encyclopidia", "encyclopedia", - "encycolpedia", "encyclopedia", - "encyklopedia", "encyclopedia", - "encylcopedia", "encyclopedia", - "encyplopedia", "encyclopedia", - "endoresments", "endorsement", - "enemployment", "unemployment", - "enfringement", "infringement", - "enlightended", "enlightened", - "enlightenend", "enlightened", - "enlightented", "enlightened", - "enlightining", "enlightening", - "enligthening", "enlightening", - "entaglements", "entanglements", - "entartaining", "entertaining", - "enterpreneur", "entrepreneurs", - "enterprenuer", "entrepreneur", - "entertainted", "entertained", - "enthusiaists", "enthusiasts", - "enthusuastic", "enthusiastic", - "entoxication", "intoxication", - "entrepeneurs", "entrepreneurs", - "entreperneur", "entrepreneurs", - "entreprenaur", "entrepreneur", - "entrepreners", "entrepreneurs", - "entrepreneus", "entrepreneurs", - "entreprenour", "entrepreneur", - "entreprenure", "entrepreneurs", - "entreprenurs", "entrepreneurs", - "entrepreuner", "entrepreneurs", - "entretaining", "entertaining", - "enviormental", "environmental", - "enviornments", "environments", - "enviromental", "environmental", - "environemnts", "environments", - "environmentl", "environmentally", - "environmetal", "environmental", - "envrionments", "environments", - "establishmet", "establishments", - "evelutionary", "evolutionary", - "exagerrating", "exaggerating", - "exaggarating", "exaggerating", - "exaggaration", "exaggeration", - "exaggeratted", "exaggerated", - "exaggurating", "exaggerating", - "exagguration", "exaggeration", - "exceptionaly", "exceptionally", - "exceptionnal", "exceptional", - "exclusiveity", "exclusivity", - "exclusivelly", "exclusively", - "exclusivitiy", "exclusivity", - "excorciating", "excruciating", - "excrusiating", "excruciating", - "excurciating", "excruciating", - "exectuioners", "executioner", - "executioneer", "executioner", - "executionees", "executions", - "executioness", "executions", - "executionier", "executioner", - "executionner", "executioner", - "exeggerating", "exaggerating", - "exeggeration", "exaggeration", - "expeditonary", "expeditionary", - "expendatures", "expenditures", - "expendetures", "expenditures", - "expentitures", "expenditures", - "experamental", "experimental", - "expereincing", "experiencing", - "experemental", "experimental", - "experiancing", "experiencing", - "experiemntal", "experimental", - "experiemnted", "experimented", - "experimantal", "experimental", - "experimentan", "experimentation", - "experimentes", "experiments", - "experimentle", "experimented", - "experimentos", "experiments", - "experimentul", "experimental", - "expidentures", "expenditures", - "expierencing", "experiencing", - "expiremental", "experimental", - "expiremented", "experimented", - "explaination", "explanation", - "explenations", "explanations", - "expliotation", "exploitation", - "exploitaiton", "exploitation", - "exploitating", "exploitation", - "exploititive", "exploitative", - "explortation", "exploitation", - "explotiation", "exploitation", - "explotiative", "exploitative", - "expolitation", "exploitation", - "expolitative", "exploitative", - "exponentialy", "exponentially", - "expropiation", "expropriation", - "extensivelly", "extensively", - "extradiction", "extradition", - "extraordiary", "extraordinary", - "extraordinay", "extraordinary", - "extrapolerat", "extrapolate", - "extrapoloate", "extrapolate", - "extremistisk", "extremists", - "extrordinary", "extraordinary", - "extruciating", "excruciating", - "facilitatile", "facilitate", - "fahrenheight", "fahrenheit", - "falmethrower", "flamethrower", - "familiarlize", "familiarize", - "fanslaughter", "manslaughter", - "fantasticaly", "fantastically", - "fantasticlly", "fantastically", - "fashionalble", "fashionable", - "fermantation", "fermentation", - "fermentacion", "fermentation", - "fermentaiton", "fermentation", - "fermentating", "fermentation", - "fermintation", "fermentation", - "fictionaries", "dictionaries", - "figuartively", "figuratively", - "figuratevely", "figuratively", - "figurativley", "figuratively", - "figuretively", "figuratively", - "figuritively", "figuratively", - "fingerpoints", "fingerprints", - "firefigthers", "firefighters", - "flamethorwer", "flamethrower", - "flametrhower", "flamethrower", - "flanethrower", "flamethrower", - "flexibillity", "flexibility", - "flourishment", "flourishing", - "fluctiations", "fluctuations", - "flucutations", "fluctuations", - "fluxtuations", "fluctuations", - "forgivenness", "forgiveness", - "fortunatelly", "fortunately", - "framethrower", "flamethrower", - "frankenstain", "frankenstein", - "frankensteen", "frankenstein", - "frankenstine", "frankenstein", - "frankinstein", "frankenstein", - "frementation", "fermentation", - "friendzonded", "friendzoned", - "friendzonned", "friendzoned", - "friendzowned", "friendzoned", - "fringeworthy", "cringeworthy", - "fronkenstein", "frankenstein", - "fruitsations", "frustrations", - "frustrastion", "frustrations", - "fucntionally", "functionally", - "funcitonally", "functionally", - "functionable", "functional", - "functionaliy", "functionally", - "functionalty", "functionality", - "functionlity", "functionality", - "functionning", "functioning", - "fundamentais", "fundamentals", - "fundamentalt", "fundamentalist", - "fundamentaly", "fundamentally", - "fundemantals", "fundamentals", - "fundementals", "fundamentals", - "fundimentals", "fundamentals", - "furstrations", "frustrations", - "futuristisch", "futuristic", - "fwankenstein", "frankenstein", - "geneological", "genealogical", - "generacional", "generational", - "generalizare", "generalize", - "generalizate", "generalize", - "generelizing", "generalizing", - "geograhpical", "geographical", - "geographicly", "geographical", - "geographisch", "geographic", - "geogrpahical", "geographical", - "goegraphical", "geographical", - "governemntal", "governmental", - "governmently", "governmental", - "grammaticaal", "grammatical", - "grammaticaly", "grammatically", - "grandchilden", "grandchildren", - "grandchilder", "grandchildren", - "grandchilren", "grandchildren", - "grassrooters", "grassroots", - "gringeworthy", "cringeworthy", - "guantanameow", "guantanamo", - "guantanamero", "guantanamo", - "hallucinatin", "hallucinations", - "hallucinaton", "hallucination", - "handwritting", "handwriting", - "harrassments", "harassments", - "headqaurters", "headquarters", - "headquatered", "headquartered", - "healthercare", "healthcare", - "heavywieghts", "heavyweight", - "helicopteros", "helicopters", - "hererosexual", "heterosexual", - "heretosexual", "heterosexual", - "heteresexual", "heterosexual", - "hetreosexual", "heterosexual", - "highligthing", "highlighting", - "hipocritical", "hypocritical", - "hipothetical", "hypothetical", - "histarically", "historically", - "histerically", "historically", - "historicians", "historians", - "homogeneized", "homogenized", - "homogenenous", "homogeneous", - "homosexuales", "homosexuals", - "homosexualiy", "homosexuality", - "homosexualls", "homosexuals", - "homosexualty", "homosexuality", - "homosexuella", "homosexual", - "hopsitalized", "hospitalized", - "horisontally", "horizontally", - "horizantally", "horizontally", - "horiztonally", "horizontally", - "horozontally", "horizontally", - "hospitallity", "hospitality", - "hospitilized", "hospitalized", - "hospitolized", "hospitalized", - "hosptialized", "hospitalized", - "humanitarien", "humanitarian", - "humanitarion", "humanitarian", - "humanitatian", "humanitarian", - "humaniterian", "humanitarian", - "humantiarian", "humanitarian", - "huminatarian", "humanitarian", - "hurricanefps", "hurricanes", - "hyopthetical", "hypothetical", - "hypathetical", "hypothetical", - "hypertrophey", "hypertrophy", - "hypethetical", "hypothetical", - "hypocrticial", "hypocritical", - "hypocrytical", "hypocritical", - "hypotehtical", "hypothetical", - "hypotethical", "hypothetical", - "hypotherical", "hypothetical", - "hypotheticly", "hypothetical", - "hystarically", "hysterically", - "hystorically", "hysterically", - "idealistisch", "idealistic", - "identificato", "identification", - "identifierad", "identified", - "identifieras", "identifies", - "identifyable", "identifiable", - "ideologicaly", "ideologically", - "idiosyncracy", "idiosyncrasy", - "illegetimate", "illegitimate", - "illegitamate", "illegitimate", - "illegitamite", "illegitimate", - "illegitemate", "illegitimate", - "illegitimite", "illegitimate", - "illigetimate", "illegitimate", - "illigitemate", "illegitimate", - "illistration", "illustration", - "illsutration", "illustrations", - "illustartion", "illustration", - "illustraitor", "illustrator", - "illustraties", "illustrate", - "illustratior", "illustrator", - "imcompatible", "incompatible", - "imcompetence", "incompetence", - "imexperience", "inexperience", - "immediatelly", "immediately", - "immortallity", "immortality", - "imperialfist", "imperialist", - "imperialisim", "imperialism", - "imperialstic", "imperialist", - "implamenting", "implementing", - "implausibile", "implausible", - "implecations", "implications", - "implementase", "implements", - "implementasi", "implements", - "implementato", "implementation", - "implentation", "implementation", - "implimenting", "implementing", - "imporvements", "improvements", - "impossibilty", "impossibility", - "impossiblely", "impossibly", - "impossiblity", "impossibly", - "impovershied", "impoverished", - "impoversihed", "impoverished", - "imprefection", "imperfections", - "improsonment", "imprisonment", - "improviserad", "improvised", - "imrpovements", "improvements", - "imtimidating", "intimidating", - "imtimidation", "intimidation", - "inaccesibles", "inaccessible", - "inaccessable", "inaccessible", - "inaccessbile", "inaccessible", - "inaccurasies", "inaccuracies", - "inaccuraties", "inaccuracies", - "inaccuricies", "inaccuracies", - "inacuraccies", "inaccuracies", - "inadvertenly", "inadvertently", - "inappropiate", "inappropriate", - "inapproprate", "inappropriate", - "inappropriae", "inappropriately", - "inappropriet", "inappropriately", - "inattractive", "unattractive", - "inbelievable", "unbelievable", - "incarcelated", "incarcerated", - "incarcirated", "incarcerated", - "incarserated", "incarcerated", - "incedentally", "incidentally", - "incentiveise", "incentives", - "incestigator", "investigator", - "incomaptible", "incompatible", - "incomparible", "incompatible", - "incompatable", "incompatible", - "incompatibil", "incompatible", - "incompetance", "incompetence", - "incompetente", "incompetence", - "incompitable", "incompatible", - "incomptetent", "incompetent", - "inconcistent", "inconsistent", - "inconsistant", "inconsistent", - "inconsistecy", "inconsistency", - "inconsisteny", "inconsistency", - "inconveinent", "inconvenient", - "inconveniant", "inconvenient", - "inconveniece", "inconvenience", - "inconvenince", "inconvenience", - "inconvienent", "inconvenient", - "incorparated", "incorporated", - "incorperated", "incorporated", - "incorportaed", "incorporated", - "incorportate", "incorporate", - "incrediblely", "incredibly", - "incrementers", "increments", - "incremential", "incremental", - "indefinately", "indefinitely", - "indefineable", "undefinable", - "indefinetely", "indefinitely", - "indefinitive", "indefinite", - "indefinitley", "indefinitely", - "indefintiely", "indefinitely", - "indepedantly", "independently", - "indepencence", "independence", - "independance", "independence", - "independante", "independents", - "independenet", "independents", - "independenly", "independently", - "independense", "independents", - "independente", "independence", - "independetly", "independently", - "indepentents", "independents", - "indetifiable", "identifiable", - "indianaoplis", "indianapolis", - "indianopolis", "indianapolis", - "indicentally", "incidentally", - "indifferance", "indifference", - "indifferente", "indifference", - "indiffernece", "indifference", - "indimidating", "intimidating", - "indimidation", "intimidation", - "indipendence", "independence", - "indisputible", "indisputable", - "indisputibly", "indisputably", - "individuales", "individuals", - "individualty", "individuality", - "individuella", "individual", - "indiviudally", "individually", - "indivudually", "individually", - "indpendently", "independently", - "indroduction", "introduction", - "indroductory", "introductory", - "industriella", "industrial", - "industrijske", "industries", - "inefficienct", "inefficient", - "inefficienty", "inefficiently", - "inevitablely", "inevitably", - "inevitablity", "inevitably", - "inevititably", "inevitably", - "inexblicably", "inexplicably", - "inexpectedly", "unexpectedly", - "inexpereince", "inexperience", - "inexperiance", "inexperience", - "inexperieced", "inexperienced", - "inexperiened", "inexperienced", - "inexperiente", "inexperience", - "inexpierence", "inexperienced", - "inexplicabil", "inexplicably", - "inexplicibly", "inexplicably", - "infalability", "infallibility", - "infilitrated", "infiltrated", - "infiltraitor", "infiltrator", - "infiltratior", "infiltrator", - "infiltratred", "infiltrate", - "influenceing", "influencing", - "infogrpahics", "infographic", - "inforgivable", "unforgivable", - "infrantryman", "infantryman", - "infridgement", "infringement", - "infrignement", "infringement", - "ingestigator", "investigator", - "ingredientes", "ingredients", - "ingreediants", "ingredients", - "ininterested", "uninterested", - "initalizable", "initializable", - "inkompatible", "incompatible", - "inkompetence", "incompetence", - "inkonsistent", "inconsistent", - "inlightening", "enlightening", - "innersection", "intersection", - "innerstellar", "interstellar", - "inpenetrable", "impenetrable", - "inplementing", "implementing", - "inplications", "implications", - "inpoverished", "impoverished", - "inprisonment", "imprisonment", - "inproductive", "unproductive", - "inprovements", "improvements", - "inresponsive", "unresponsive", - "insentivised", "insensitive", - "insentivises", "insensitive", - "insignifiant", "insignificant", - "insignificat", "insignificant", - "insinuationg", "insinuating", - "instabillity", "instability", - "instalaltion", "installations", - "installatons", "installations", - "installatron", "installation", - "instantaneos", "instantaneous", - "instantaneus", "instantaneous", - "instantanous", "instantaneous", - "instinctivly", "instinctively", - "institutuion", "institution", - "instramental", "instrumental", - "instrcutions", "instruction", - "instrucitons", "instruction", - "instructiosn", "instruction", - "instructores", "instructors", - "instrumentos", "instruments", - "instrumentul", "instrumental", - "insturmental", "instrumental", - "instutitions", "institutions", - "insuccessful", "unsuccessful", - "insufficiant", "insufficient", - "insuffucient", "insufficient", - "insuspecting", "unsuspecting", - "intaxication", "intoxication", - "intelelctual", "intellectuals", - "intellectals", "intellectuals", - "intellectaul", "intellectuals", - "intellectuel", "intellectual", - "intellecutal", "intellectual", - "intelligance", "intelligence", - "intelligenly", "intelligently", - "intelligente", "intelligence", - "intelligenty", "intelligently", - "intelligient", "intelligent", - "intenational", "international", - "intentionnal", "intentional", - "intepretator", "interpretor", - "interatellar", "interstellar", - "interational", "international", - "intercection", "interception", - "intercepcion", "interception", - "interceptons", "interceptions", - "intereaction", "intersection", - "interections", "interactions", - "interersting", "interpreting", - "interesction", "intersection", - "interestigly", "interestingly", - "interestinly", "interestingly", - "interferance", "interference", - "interfereing", "interfering", - "interferisce", "interferes", - "interferisse", "interferes", - "interferring", "interfering", - "intergration", "integration", - "interlectual", "intellectual", - "intermediare", "intermediate", - "intermediete", "intermediate", - "intermettent", "intermittent", - "intermideate", "intermediate", - "intermidiate", "intermediate", - "internatinal", "international", - "internationl", "international", - "internations", "interactions", - "internediate", "intermediate", - "internelized", "internalized", - "internilized", "internalized", - "interperters", "interpreter", - "interperting", "interpreting", - "interprating", "interpreting", - "interpretare", "interpreter", - "interpretato", "interpretation", - "interpreteer", "interpreter", - "interpretier", "interpreter", - "interpretion", "interpreting", - "interpretter", "interpreter", - "interpriting", "interpreting", - "interraccial", "interracial", - "interractial", "interracial", - "interrogatin", "interrogation", - "interrumping", "interrupting", - "interrupteds", "interrupts", - "interruptors", "interrupts", - "interseccion", "intersection", - "interseciton", "intersections", - "interseption", "interception", - "intersetllar", "interstellar", - "interstallar", "interstellar", - "interstaller", "interstellar", - "intersteller", "interstellar", - "interstellor", "interstellar", - "intertaining", "entertaining", - "intertwinded", "intertwined", - "intertwinned", "intertwined", - "interveiwing", "interviewing", - "intervencion", "intervention", - "interveneing", "intervening", - "intervension", "intervention", - "interviening", "interviewing", - "intidimation", "intimidation", - "intillectual", "intellectual", - "intimidacion", "intimidation", - "intimidative", "intimidate", - "intimitading", "intimidating", - "intimitating", "intimidating", - "intimitation", "intimidation", - "intorduction", "introduction", - "intorductory", "introductory", - "intoxicacion", "intoxication", - "intoxination", "intoxication", - "intrepreting", "interpreting", - "intrinsicaly", "intrinsically", - "introdiction", "introduction", - "introduccion", "introduction", - "introduceras", "introduces", - "introduceres", "introduces", - "introduciton", "introduction", - "introductary", "introductory", - "introducting", "introduction", - "introductury", "introductory", - "introduktion", "introduction", - "introspectin", "introspection", - "intruduction", "introduction", - "intruductory", "introductory", - "intsrumental", "instrumental", - "intuitivelly", "intuitively", - "inturrupting", "interrupting", - "invervention", "intervention", - "investagated", "investigated", - "investagator", "investigator", - "investegated", "investigated", - "investegator", "investigator", - "investigaron", "investigator", - "investigater", "investigator", - "investigatie", "investigative", - "investigatin", "investigation", - "investigatio", "investigator", - "investigaton", "investigation", - "investingate", "investigate", - "investogator", "investigator", - "invicibility", "invisibility", - "invididually", "individually", - "invisibiltiy", "invisibility", - "invisilibity", "invisibility", - "invisivility", "invisibility", - "invlunerable", "invulnerable", - "involnerable", "invulnerable", - "involuntairy", "involuntary", - "involuntarly", "involuntary", - "invonvenient", "inconvenient", - "invulenrable", "invulnerable", - "invulernable", "invulnerable", - "invulnarable", "invulnerable", - "invulnerbale", "invulnerable", - "invulnurable", "invulnerable", - "invulverable", "invulnerable", - "invunlerable", "invulnerable", - "invurnerable", "invulnerable", - "irrationably", "irrationally", - "irrationatly", "irrationally", - "irrationella", "irrational", - "irreplacable", "irreplaceable", - "irresistable", "irresistible", - "irresistably", "irresistibly", - "irrespecitve", "irrespective", - "irresponsble", "irresponsible", - "irresponsibe", "irresponsible", - "irreverisble", "irreversible", - "irreversebly", "irreversible", - "irreversibel", "irreversible", - "irrevirsible", "irreversible", - "irrispective", "irrespective", - "irriversible", "irreversible", - "isdefinitely", "indefinitely", - "isntallation", "installation", - "isntrumental", "instrumental", - "jackonsville", "jacksonville", - "jounralistic", "journalistic", - "jouranlistic", "journalistic", - "journalisitc", "journalistic", - "journalistes", "journalists", - "judgementals", "judgements", - "juggernaunts", "juggernaut", - "juridisction", "jurisdictions", - "jurisdiccion", "jurisdiction", - "jurisdiciton", "jurisdiction", - "jurisdiktion", "jurisdiction", - "jurisfiction", "jurisdiction", - "jurisidction", "jurisdiction", - "juristiction", "jurisdiction", - "jursidiction", "jurisdiction", - "jusridiction", "jurisdiction", - "justificatin", "justifications", - "katastrophic", "catastrophic", - "kidnergarten", "kindergarten", - "kindergarden", "kindergarten", - "kingergarten", "kindergarten", - "kintergarten", "kindergarten", - "knolwedgable", "knowledgable", - "knoweldgable", "knowledgable", - "knowladgable", "knowledgable", - "knowldegable", "knowledgable", - "knowldgeable", "knowledgable", - "knowleagable", "knowledgable", - "knowledagble", "knowledgable", - "knowledeable", "knowledgable", - "knowledgabel", "knowledgable", - "knowledgeble", "knowledgeable", - "knowledgebly", "knowledgable", - "knowledgible", "knowledgable", - "knowlegdable", "knowledgable", - "knowlegeable", "knowledgeable", - "knwoledgable", "knowledgable", - "kolonization", "colonization", - "kombinations", "combinations", - "kommissioner", "commissioner", - "kompensation", "compensation", - "konfidential", "confidential", - "konfirmation", "confirmation", - "kongregation", "congregation", - "konservatism", "conservatism", - "konservative", "conservative", - "konsultation", "consultation", - "konversation", "conversation", - "koordination", "coordination", - "krankenstein", "frankenstein", - "leaglization", "legalization", - "legalizacion", "legalization", - "legalizaiton", "legalization", - "legendariske", "legendaries", - "legimitately", "legitimately", - "legislatiors", "legislators", - "legistration", "registration", - "legitamately", "legitimately", - "legitamitely", "legitimately", - "legitemately", "legitimately", - "legitimatley", "legitimately", - "legitimitely", "legitimately", - "liberatrians", "libertarians", - "libertarains", "libertarians", - "libertariens", "libertarians", - "libertaryans", "libertarians", - "libertatians", "libertarians", - "liberterians", "libertarians", - "libretarians", "libertarians", - "lighthearded", "lighthearted", - "linguisticas", "linguistics", - "linguisticos", "linguistics", - "linguistisch", "linguistics", - "litllefinger", "littlefinger", - "littelfinger", "littlefinger", - "litterfinger", "littlefinger", - "littiefinger", "littlefinger", - "littlefigner", "littlefinger", - "littlefinder", "littlefinger", - "littlepinger", "littlefinger", - "lnowledgable", "knowledgable", - "longitudonal", "longitudinal", - "madturbating", "masturbating", - "madturbation", "masturbation", - "magnificient", "magnificent", - "maintainance", "maintenance", - "maintainence", "maintenance", - "maintenaince", "maintenance", - "malfucntions", "malfunction", - "manafactured", "manufactured", - "manafacturer", "manufacturer", - "manafactures", "manufactures", - "manifactured", "manufactured", - "manifacturer", "manufacturer", - "manifactures", "manufactures", - "manifestaion", "manifestation", - "manifestanti", "manifestation", - "manipluating", "manipulating", - "manipluation", "manipulation", - "manipualting", "manipulating", - "manipualtion", "manipulation", - "manipualtive", "manipulative", - "manipulacion", "manipulation", - "manipulitive", "manipulative", - "maniuplating", "manipulating", - "maniuplation", "manipulation", - "maniuplative", "manipulative", - "manouverable", "maneuverable", - "mansalughter", "manslaughter", - "manslaugther", "manslaughter", - "mansluaghter", "manslaughter", - "manufactered", "manufactured", - "manufacterer", "manufacturer", - "manufacteres", "manufactures", - "manufacteurs", "manufactures", - "manufactored", "manufactured", - "manufactorer", "manufacturer", - "manufactores", "manufactures", - "manufactuers", "manufacturers", - "manufactuing", "manufacturing", - "manufacturas", "manufactures", - "manufacturor", "manufacturer", - "manufactuter", "manufacture", - "manufacuters", "manufactures", - "manufacutred", "manufacture", - "manufacutres", "manufactures", - "manufaturing", "manufacturing", - "manupilating", "manipulating", - "manupulating", "manipulating", - "manupulation", "manipulation", - "manupulative", "manipulative", - "marchmallows", "marshmallows", - "marganilized", "marginalized", - "margenalized", "marginalized", - "marginilized", "marginalized", - "marhsmallows", "marshmallows", - "marshamllows", "marshmallows", - "marshmallons", "marshmallows", - "masoginistic", "misogynistic", - "masogynistic", "misogynistic", - "massachusets", "massachusetts", - "massachustts", "massachusetts", - "masterbation", "masturbation", - "masterpeices", "masterpiece", - "mastrubating", "masturbating", - "mastrubation", "masturbation", - "mastubration", "masturbation", - "masturabting", "masturbating", - "masturabtion", "masturbation", - "masturbacion", "masturbation", - "masturbaited", "masturbated", - "masturbathon", "masturbation", - "masturbsting", "masturbating", - "masturdating", "masturbating", - "mastutbation", "masturbation", - "mataphorical", "metaphorical", - "mataphysical", "metaphysical", - "matchmakeing", "matchmaking", - "mathemathics", "mathematics", - "mathematican", "mathematician", - "mathematicas", "mathematics", - "mathematicks", "mathematics", - "mathematicly", "mathematical", - "mathematisch", "mathematics", - "mathemetical", "mathematical", - "matheticians", "mathematicians", - "mathimatical", "mathematical", - "mathmatician", "mathematician", - "mecahnically", "mechanically", - "mechancially", "mechanically", - "meditaciones", "medications", - "mediteranean", "mediterranean", - "mediterraean", "mediterranean", - "mediterranen", "mediterranean", - "memerization", "memorization", - "memorizacion", "memorization", - "memorozation", "memorization", - "metalurgical", "metallurgical", - "metaphisical", "metaphysical", - "metaphoricly", "metaphorical", - "metaphsyical", "metaphysical", - "metaphyiscal", "metaphysical", - "metaphyscial", "metaphysical", - "metaphysisch", "metaphysics", - "metephorical", "metaphorical", - "metephysical", "metaphysical", - "meterologist", "meteorologist", - "meterosexual", "heterosexual", - "methaporical", "metaphorical", - "methematical", "mathematical", - "metiphorical", "metaphorical", - "metophorical", "metaphorical", - "metorpolitan", "metropolitan", - "metrololitan", "metropolitan", - "metropilitan", "metropolitan", - "metroploitan", "metropolitan", - "metropolians", "metropolis", - "metropoliten", "metropolitan", - "metropolitin", "metropolitan", - "metropoliton", "metropolitan", - "microcentres", "microcenter", - "microphonies", "microphones", - "microscophic", "microscopic", - "microscopice", "microscope", - "microscoptic", "microscopic", - "midfieldiers", "midfielders", - "millenialism", "millennialism", - "millionairre", "millionaire", - "millionaries", "millionaires", - "millioniares", "millionaires", - "minimalisitc", "minimalist", - "minimalisity", "minimalist", - "mininterpret", "misinterpret", - "minipulating", "manipulating", - "minipulation", "manipulation", - "minipulative", "manipulative", - "miracilously", "miraculously", - "miracurously", "miraculous", - "miscarraiges", "miscarriage", - "miscelaneous", "miscellaneous", - "miscellanous", "miscellaneous", - "mischievious", "mischievous", - "misdameanors", "misdemeanors", - "misdeamenors", "misdemeanor", - "misfourtunes", "misfortunes", - "misgoynistic", "misogynistic", - "misinterpert", "misinterpret", - "misinterpred", "misinterpreted", - "misinterprit", "misinterpreting", - "misinterpted", "misinterpret", - "misintrepret", "misinterpret", - "misisonaries", "missionaries", - "misoganistic", "misogynistic", - "misogenistic", "misogynistic", - "misoginystic", "misogynistic", - "misognyistic", "misogynistic", - "misogonistic", "misogynistic", - "misogynisitc", "misogynistic", - "misogynsitic", "misogynistic", - "misogynystic", "misogynistic", - "missionaires", "missionaries", - "mississipppi", "mississippi", - "misspellling", "misspelling", - "misteriously", "mysteriously", - "misundersood", "misunderstood", - "misunderstod", "misunderstood", - "misygonistic", "misogynistic", - "modificacion", "modification", - "modificaiton", "modification", - "modificatons", "modifications", - "modifikation", "modification", - "modivational", "motivational", - "moisterizing", "moisturizing", - "moistorizing", "moisturizing", - "moisutrizing", "moisturizing", - "momentarilly", "momentarily", - "monolithisch", "monolithic", - "mositurizing", "moisturizing", - "motherbaords", "motherboards", - "motherborads", "motherboards", - "motivacional", "motivational", - "motovational", "motivational", - "mousturizing", "moisturizing", - "muktitasking", "multitasking", - "mulittasking", "multitasking", - "multinatinal", "multinational", - "multitaksing", "multitasking", - "munipulative", "manipulative", - "mutlitasking", "multitasking", - "mysoganistic", "misogynistic", - "mysogenistic", "misogynistic", - "mysogonistic", "misogynistic", - "mysterioulsy", "mysteriously", - "nacionalists", "nationalists", - "narcisisstic", "narcissistic", - "narcissictic", "narcissistic", - "narcissisism", "narcissism", - "narcissisist", "narcissist", - "narcissisitc", "narcissist", - "narcississts", "narcissist", - "narssicistic", "narcissistic", - "natioanlists", "nationalists", - "nationalisic", "nationalistic", - "nationalisim", "nationalism", - "nationalistc", "nationalistic", - "nationalites", "nationalist", - "nationalitic", "nationalistic", - "nationalitys", "nationalist", - "nationallity", "nationally", - "nationalsits", "nationalists", - "nationalties", "nationalist", - "nazionalists", "nationalists", - "neccessarily", "necessarily", - "neccessities", "necessities", - "necessarilly", "necessarily", - "necessitites", "necessities", - "neckbearders", "neckbeards", - "neckbeardese", "neckbeards", - "neckbeardest", "neckbeards", - "neckbeardies", "neckbeards", - "neckbeardius", "neckbeards", - "negociations", "negotiations", - "negoitations", "negotiations", - "negotiatians", "negotiations", - "negotiatiing", "negotiating", - "negotiationg", "negotiating", - "negotiatiors", "negotiations", - "neigbhorhood", "neighborhoods", - "neigbourhood", "neighbourhood", - "neighboorhod", "neighbourhood", - "neighborhing", "neighboring", - "neighborhods", "neighborhoods", - "neighbourghs", "neighbours", - "neighbourhod", "neighbourhood", - "neighbourood", "neighbourhood", - "neighbrohood", "neighborhoods", - "neighourhood", "neighborhood", - "neoroscience", "neuroscience", - "neruological", "neurological", - "neruoscience", "neuroscience", - "netropolitan", "metropolitan", - "neuorscience", "neuroscience", - "neuralogical", "neurological", - "neuroligical", "neurological", - "neurosceince", "neuroscience", - "neuroscienze", "neuroscience", - "neurosicence", "neuroscience", - "neverhteless", "nevertheless", - "nieghborhood", "neighborhood", - "norhtwestern", "northwestern", - "nothingsness", "nothingness", - "noticeablely", "noticeably", - "notificacion", "notification", - "notificaiton", "notification", - "notificatons", "notifications", - "nuerological", "neurological", - "nueroscience", "neuroscience", - "nutritionnal", "nutritional", - "obersvations", "observations", - "objectivelly", "objectively", - "objectiviser", "objectives", - "objectivitiy", "objectivity", - "obversations", "observations", - "ocassionally", "occasionally", - "occaisonally", "occasionally", - "occasioanlly", "occasionally", - "occassionaly", "occasionally", - "occationally", "occasionally", - "occurrencies", "occurrences", - "offensivelly", "offensively", - "ogranisation", "organisation", - "omniverously", "omnivorously", - "operationnal", "operational", - "opportuniste", "opportunities", - "opportunites", "opportunities", - "oppositition", "opposition", - "opthalmology", "ophthalmology", - "optimistisch", "optimistic", - "optimizacion", "optimization", - "optimizating", "optimization", - "optimziation", "optimization", - "optmizations", "optimizations", - "oragnisation", "organisation", - "orchastrated", "orchestrated", - "orchestarted", "orchestrated", - "orchestraded", "orchestrated", - "orchistrated", "orchestrated", - "orgainsation", "organisation", - "orgainzation", "organizations", - "organisaiton", "organisation", - "organisatons", "organisations", - "organistaion", "organisation", - "organizacion", "organization", - "organizaiton", "organization", - "organizativo", "organization", - "organizatons", "organizations", - "organsiation", "organisation", - "organziation", "organization", - "orginasation", "organisation", - "orginazation", "organization", - "orgnaisation", "organisations", - "originallity", "originality", - "outraegously", "outrageously", - "outrageoulsy", "outrageously", - "outragesouly", "outrageously", - "outrageuosly", "outrageously", - "outragiously", "outrageously", - "outsourceing", "outsourcing", - "overbearring", "overbearing", - "overblocking", "overclocking", - "overclcoking", "overclocking", - "overclicking", "overclocking", - "overcloaking", "overclocking", - "overclockign", "overclocking", - "overclokcing", "overclocking", - "overhearting", "overreacting", - "overheathing", "overheating", - "overhtinking", "overthinking", - "overhwelming", "overwhelming", - "overlappping", "overlapping", - "overlcocking", "overclocking", - "overreaktion", "overreaction", - "overwealming", "overwhelming", - "overwhelemed", "overwhelmed", - "overwhemling", "overwhelming", - "overwhleming", "overwhelming", - "owerpowering", "overpowering", - "painkilllers", "painkillers", - "palastinians", "palestinians", - "palesitnians", "palestinians", - "palestenians", "palestinians", - "palestinains", "palestinians", - "palestiniens", "palestinians", - "palestininan", "palestinian", - "palestininas", "palestinians", - "palistinians", "palestinians", - "palythroughs", "playthroughs", - "parapharsing", "paraphrasing", - "paraphenalia", "paraphernalia", - "paraphrashed", "paraphrase", - "paraphrazing", "paraphrasing", - "paraprashing", "paraphrasing", - "paraprhasing", "paraphrasing", - "parenthesees", "parentheses", - "parenthesies", "parenthesis", - "parliamentry", "parliamentary", - "partecipants", "participants", - "partecipated", "participated", - "parternships", "partnership", - "particapated", "participated", - "particiapnts", "participant", - "particiapted", "participated", - "participante", "participate", - "participaste", "participants", - "participatie", "participated", - "participatin", "participation", - "participatns", "participant", - "participaton", "participant", - "participents", "participants", - "particualrly", "particularly", - "particulalry", "particularly", - "particullary", "particularly", - "passionatley", "passionately", - "pathalogical", "pathological", - "pathelogical", "pathological", - "patholigical", "pathological", - "paychedelics", "psychedelics", - "paychiatrist", "psychiatrist", - "paychologist", "psychologist", - "paychopathic", "psychopathic", - "penetratiing", "penetrating", - "penisylvania", "pennsylvania", - "pennsilvania", "pennsylvania", - "pennslyvania", "pennsylvania", - "pennsylvaina", "pennsylvania", - "pennsyvlania", "pennsylvania", - "pennyslvania", "pennsylvania", - "penssylvania", "pennsylvania", - "pentsylvania", "pennsylvania", - "percentagens", "percentages", - "perferential", "preferential", - "performantes", "performances", - "performences", "performances", - "perfromances", "performances", - "peridoically", "periodically", - "peripathetic", "peripatetic", - "periphereals", "peripherals", - "peripherials", "peripherals", - "permanantely", "permanently", - "permanentely", "permanently", - "permissiable", "permissible", - "peroidically", "periodically", - "perpatrators", "perpetrators", - "perpatuating", "perpetuating", - "perpertators", "perpetrators", - "perpertrated", "perpetrated", - "perpetraitor", "perpetrator", - "perpetraters", "perpetrators", - "perpetuaters", "perpetuates", - "perpitrators", "perpetrators", - "perposefully", "purposefully", - "perposterous", "preposterous", - "perpretators", "perpetrators", - "perpsectives", "perspectives", - "perputrators", "perpetrators", - "perputuating", "perpetuating", - "persepctives", "perspectives", - "perservation", "preservation", - "perseverence", "perseverance", - "personalites", "personalities", - "personallity", "personally", - "personilized", "personalized", - "perspecitves", "perspectives", - "perspectivas", "perspectives", - "persumptuous", "presumptuous", - "perticularly", "particularly", - "pertubations", "perturbations", - "pessimisitic", "pessimistic", - "pessimisstic", "pessimistic", - "phenomenonal", "phenomenal", - "phenomenonly", "phenomenally", - "phenomonenon", "phenomenon", - "phialdelphia", "philadelphia", - "philadalphia", "philadelphia", - "philadelhpia", "philadelphia", - "philadeplhia", "philadelphia", - "philadlephia", "philadelphia", - "philedalphia", "philadelphia", - "philedelphia", "philadelphia", - "philidalphia", "philadelphia", - "philippinnes", "philippines", - "philippinoes", "philippines", - "philisophers", "philosophers", - "philisophies", "philosophies", - "phillippines", "philippines", - "philosiphers", "philosophers", - "philosiphies", "philosophies", - "philosohpers", "philosopher", - "philosohpies", "philosophies", - "philosophiae", "philosophies", - "philosophics", "philosophies", - "philosophios", "philosophies", - "philospohers", "philosophers", - "philospohies", "philosophies", - "photagrapher", "photographer", - "photochopped", "photoshopped", - "photograhper", "photographer", - "photograpers", "photographers", - "photographes", "photographs", - "photographyi", "photographic", - "photogropher", "photographer", - "photogrpahed", "photographed", - "photogrpaher", "photographer", - "photoshipped", "photoshopped", - "photoshooped", "photoshopped", - "photoshoppad", "photoshopped", - "phychedelics", "psychedelics", - "phychiatrist", "psychiatrist", - "phychologist", "psychologist", - "phychopathic", "psychopathic", - "physcedelics", "psychedelics", - "physciatrist", "psychiatrist", - "physcologist", "psychologist", - "physcopathic", "psychopathic", - "physicallity", "physically", - "physiologial", "physiological", - "pilgrimmages", "pilgrimages", - "pitchforkers", "pitchforks", - "pkaythroughs", "playthroughs", - "plabeswalker", "planeswalker", - "plaestinians", "palestinians", - "planeswaller", "planeswalker", - "planeswlaker", "planeswalker", - "planetwalker", "planeswalker", - "plansewalker", "planeswalker", - "plauthroughs", "playthroughs", - "playhtroughs", "playthroughs", - "playtgroughs", "playthroughs", - "playthorughs", "playthroughs", - "playthourghs", "playthroughs", - "playthrougth", "playthroughs", - "playthrouhgs", "playthroughs", - "playthtoughs", "playthroughs", - "playtrhoughs", "playthroughs", - "populationes", "populations", - "pornograpghy", "pornography", - "porportional", "proportional", - "portabillity", "portability", - "portagonists", "protagonists", - "positionning", "positioning", - "positivitely", "positivity", - "possessivize", "possessive", - "possibillity", "possibility", - "possiblility", "possibility", - "possiblities", "possibilities", - "powerfisting", "powerlifting", - "powerlfiting", "powerlifting", - "powerlifitng", "powerlifting", - "powerlisting", "powerlifting", - "powetlifting", "powerlifting", - "powrrlifting", "powerlifting", - "practicioner", "practitioner", - "practisioner", "practitioner", - "pratictioner", "practitioners", - "precedessors", "predecessors", - "preconveived", "preconceived", - "predacessors", "predecessors", - "predeccesors", "predecessor", - "predecesores", "predecessor", - "predescesors", "predecessors", - "predessecors", "predecessors", - "predetermind", "predetermined", - "predicessors", "predecessors", - "predocessors", "predecessors", - "predomiantly", "predominately", - "predominanty", "predominantly", - "predominatly", "predominantly", - "preferantial", "preferential", - "preferentail", "preferential", - "preformances", "performances", - "preinitalize", "preinitialize", - "preliminarly", "preliminary", - "prematurelly", "prematurely", - "premillenial", "premillennial", - "preocupation", "preoccupation", - "preperations", "preparations", - "prepetrators", "perpetrators", - "prepetuating", "perpetuating", - "prepostorous", "preposterous", - "preposturous", "preposterous", - "prerequisets", "prerequisite", - "prescirption", "prescriptions", - "prescribtion", "prescription", - "prescripcion", "prescription", - "prescriptons", "prescriptions", - "prescritpion", "prescriptions", - "presedential", "presidential", - "presentacion", "presentation", - "presentaiton", "presentations", - "preservacion", "preservation", - "preservating", "preservation", - "preservativo", "preservation", - "presidencial", "presidential", - "presidenital", "presidential", - "presidentail", "presidential", - "presnetation", "presentations", - "presonalized", "personalized", - "prespectives", "perspectives", - "presrciption", "prescriptions", - "presumpteous", "presumptuous", - "presumputous", "presumptuous", - "prevantative", "preventative", - "preventation", "presentation", - "preventetive", "preventative", - "preventitive", "preventative", - "prezidential", "presidential", - "principlaity", "principality", - "probabiliste", "probabilities", - "probabilites", "probabilities", - "probabillity", "probability", - "probablistic", "probabilistic", - "proclomation", "proclamation", - "proconceived", "preconceived", - "profesisonal", "professionals", - "professiinal", "professionalism", - "professioanl", "professionals", - "professiomal", "professionalism", - "professionel", "professional", - "professionsl", "professionalism", - "professoinal", "professionals", - "professonial", "professionals", - "proffesional", "professional", - "proficientcy", "proficiency", - "profissional", "professional", - "profitabiliy", "profitability", - "profitabilty", "profitability", - "profressions", "progressions", - "progatonists", "protagonists", - "programmeurs", "programmer", - "progressieve", "progressive", - "progressioin", "progressions", - "progressiong", "progressing", - "progressisme", "progresses", - "progressiste", "progresses", - "progressivas", "progressives", - "progressivey", "progressively", - "progressivly", "progressively", - "progressivsm", "progressives", - "progresssing", "progressing", - "progresssion", "progressions", - "progresssive", "progressives", - "prohibitting", "prohibiting", - "projecticles", "projectiles", - "proletariaat", "proletariat", - "proletariant", "proletariat", - "proletaricat", "proletariat", - "prominantely", "prominently", - "promiscuious", "promiscuous", - "promisculous", "promiscuous", - "promotionnal", "promotional", - "pronounceing", "pronouncing", - "pronunciaton", "pronunciation", - "propertional", "proportional", - "propesterous", "preposterous", - "proportianal", "proportional", - "proportionel", "proportional", - "proposterous", "preposterous", - "proprotional", "proportional", - "prostetution", "prostitution", - "prostitition", "prostitution", - "prostitucion", "prostitution", - "prostituiton", "prostitution", - "prostitutiei", "prostitute", - "protaganists", "protagonists", - "protaginists", "protagonists", - "protagnoists", "protagonists", - "protestantes", "protestants", - "protoganists", "protagonists", - "prouncements", "pronouncements", - "pruposefully", "purposefully", - "pscyhologist", "psychologist", - "pscyhopathic", "psychopathic", - "pshyciatrist", "psychiatrist", - "pshycologist", "psychologist", - "pshycopathic", "psychopathic", - "psichologist", "psychologist", - "psychaitrist", "psychiatrist", - "psychedellic", "psychedelic", - "psychedilics", "psychedelics", - "psychemedics", "psychedelics", - "psychiatirst", "psychiatrists", - "psychiatrics", "psychiatrist", - "psychiatrict", "psychiatrist", - "psychiatrits", "psychiatrists", - "psychistrist", "psychiatrist", - "psychodelics", "psychedelics", - "psycholigist", "psychologist", - "psychologial", "psychological", - "psychologits", "psychologists", - "psychologyst", "psychologist", - "psychopathes", "psychopaths", - "psychyatrist", "psychiatrist", - "puplications", "publications", - "puritannical", "puritanical", - "purpetrators", "perpetrators", - "purpetuating", "perpetuating", - "purpusefully", "purposefully", - "pyschedelics", "psychedelics", - "pyschiatrist", "psychiatrist", - "pyschologist", "psychologist", - "pyschopathic", "psychopathic", - "qualificaton", "qualification", - "qualifierais", "qualifiers", - "qualtitative", "quantitative", - "quantatitive", "quantitative", - "quantititive", "quantitative", - "quarterblack", "quarterback", - "quesitonable", "questionable", - "questionalbe", "questionable", - "questionning", "questioning", - "questionsign", "questioning", - "radioactieve", "radioactive", - "rationallity", "rationally", - "reactionairy", "reactionary", - "reactionnary", "reactionary", - "realisticaly", "realistically", - "realisticlly", "realistically", - "reasonablely", "reasonably", - "recallection", "recollection", - "reccomending", "recommending", - "reccommended", "recommended", - "recepcionist", "receptionist", - "receptionest", "receptionist", - "recgonizable", "recognizable", - "reciporcated", "reciprocate", - "reciprociate", "reciprocate", - "reciprocrate", "reciprocate", - "recognizible", "recognizable", - "recolleciton", "recollection", - "recommanding", "recommending", - "recommendeds", "recommends", - "recommendors", "recommends", - "recommeneded", "recommended", - "recommenting", "recommending", - "recongizable", "recognizable", - "recontructed", "reconstructed", - "recpetionist", "receptionist", - "recreacional", "recreational", - "recriational", "recreational", - "referenceing", "referencing", - "refirgerator", "refrigerator", - "refriderator", "refrigerator", - "refrigarator", "refrigerator", - "refrigerador", "refrigerator", - "refrigerater", "refrigerator", - "refrigirator", "refrigerator", - "regenaration", "regeneration", - "regeneracion", "regeneration", - "regestration", "registration", - "registartion", "registration", - "registrating", "registration", - "regrigerator", "refrigerator", - "regulatorias", "regulators", - "regulatories", "regulators", - "regulatorios", "regulators", - "reicarnation", "reincarnation", - "reinforcemnt", "reinforcement", - "reinitalised", "reinitialised", - "reinitalises", "reinitialises", - "reinitalized", "reinitialized", - "reinitalizes", "reinitializes", - "reinstallled", "reinstalled", - "reisntalling", "reinstalling", - "relaitonship", "relationships", - "relatinoship", "relationships", - "reliabillity", "reliability", - "reluctanctly", "reluctantly", - "remarkablely", "remarkably", - "rememberance", "remembrance", - "reminiscient", "reminiscent", - "renaissaince", "renaissance", - "renegeration", "regeneration", - "reorganision", "reorganisation", - "repalcements", "replacements", - "repersenting", "representing", - "reporduction", "reproduction", - "reporductive", "reproductive", - "reprecussion", "repercussions", - "representate", "representative", - "represention", "representing", - "representive", "representative", - "reproducable", "reproducible", - "reproduccion", "reproduction", - "reproduciton", "reproduction", - "reproducting", "reproduction", - "reproductivo", "reproduction", - "reproduktion", "reproduction", - "repsectfully", "respectfully", - "repsectively", "respectively", - "republicanas", "republicans", - "republicanos", "republicans", - "republicants", "republicans", - "republicians", "republicans", - "requerimento", "requirement", - "requeriments", "requirements", - "requierments", "requirements", - "requriements", "requirements", - "resembelance", "resemblance", - "reseptionist", "receptionist", - "reserrection", "resurrection", - "resintalling", "reinstalling", - "resistancies", "resistances", - "resistencias", "resistances", - "respecitvely", "respectively", - "respectabile", "respectable", - "respectivily", "respectively", - "respectivley", "respectively", - "respectuflly", "respectfully", - "respiratiory", "respiratory", - "responsabile", "responsible", - "responsaveis", "responsive", - "responsbilty", "responsibly", - "responsibile", "responsible", - "responsibily", "responsibility", - "responsibley", "responsibly", - "responsibliy", "responsibly", - "responsiblty", "responsibly", - "ressemblance", "resemblance", - "ressemblence", "resemblance", - "ressurection", "resurrection", - "restaurantes", "restaurants", - "restauration", "restoration", - "restauraunts", "restaurants", - "restirctions", "restrictions", - "restrainting", "restraining", - "restrcitions", "restriction", - "restricitons", "restrictions", - "resurreccion", "resurrection", - "resurrektion", "resurrection", - "retalitation", "retaliation", - "retributioon", "retribution", - "retroactivly", "retroactively", - "revolutionay", "revolutionary", - "revolutionos", "revolutions", - "rezurrection", "resurrection", - "rictatorship", "dictatorship", - "ridicilously", "ridiculously", - "ridicoulusly", "ridiculously", - "righteouness", "righteousness", - "rockerfeller", "rockefeller", - "rollercoaser", "rollercoaster", - "rollercoater", "rollercoaster", - "romanitcally", "romantically", - "roundabounts", "roundabout", - "rudimentatry", "rudimentary", - "rysurrection", "resurrection", - "sacksonville", "jacksonville", - "sacreligious", "sacrilegious", - "sacrificeing", "sacrificing", - "saksatchewan", "saskatchewan", - "salughtering", "slaughtering", - "sanctionning", "sanctioning", - "sarcasticaly", "sarcastically", - "sarcasticlly", "sarcastically", - "sascatchewan", "saskatchewan", - "saskatcehwan", "saskatchewan", - "saskatchawan", "saskatchewan", - "saskatechwan", "saskatchewan", - "sasketchawan", "saskatchewan", - "sasketchewan", "saskatchewan", - "sasktachewan", "saskatchewan", - "satasfaction", "satisfaction", - "satasfactory", "satisfactory", - "satisfaccion", "satisfaction", - "satisfacting", "satisfaction", - "satisfcation", "satisfaction", - "satisfiction", "satisfaction", - "satistactory", "satisfactory", - "satsifaction", "satisfaction", - "satsifactory", "satisfactory", - "scandanivian", "scandinavian", - "scandenavian", "scandinavian", - "scandianvian", "scandinavian", - "scandinacian", "scandinavian", - "scandinaivan", "scandinavia", - "scandinavica", "scandinavian", - "scandinavien", "scandinavian", - "scandinavion", "scandinavian", - "scandivanian", "scandinavian", - "scandonavian", "scandinavian", - "schizophrena", "schizophrenia", - "scholarhsips", "scholarships", - "scholerships", "scholarships", - "scholorships", "scholarships", - "scnadinavian", "scandinavian", - "screenshoots", "screenshot", - "sensationail", "sensational", - "sensationnal", "sensational", - "sensibilites", "sensibilities", - "sensitivitiy", "sensitivity", - "sentimentals", "sentiments", - "sertificates", "certificates", - "serveillance", "surveillance", - "seskatchewan", "saskatchewan", - "shakesperean", "shakespeare", - "shamelessely", "shamelessly", - "shamelessley", "shamelessly", - "shampionship", "championship", - "shardholders", "shareholders", - "shenanigains", "shenanigans", - "shenanigangs", "shenanigans", - "shenaniganns", "shenanigans", - "shenanighans", "shenanigans", - "shopkeeepers", "shopkeepers", - "showboarding", "snowboarding", - "siginificant", "significant", - "significanly", "significantly", - "significante", "significance", - "significanty", "significantly", - "significatly", "significantly", - "signleplayer", "singleplayer", - "simaltaneous", "simultaneous", - "simeltaneous", "simultaneous", - "similaraties", "similarities", - "similiarites", "similarities", - "similiarties", "similarities", - "similiraties", "similarities", - "similtaneous", "simultaneous", - "simliarities", "similarities", - "simlutaneous", "simultaneous", - "simpathizers", "sympathizers", - "simplistisch", "simplistic", - "simulatenous", "simultaneous", - "simulatneous", "simultaneous", - "simultaenous", "simultaneous", - "simultaneuos", "simultaneous", - "simultanious", "simultaneous", - "simulteneous", "simultaneous", - "singelplayer", "singleplayer", - "singlepalyer", "singleplayer", - "sinlgeplayer", "singleplayer", - "situationals", "situations", - "situationnal", "situational", - "skandinavian", "scandinavian", - "skateboaring", "skateboarding", - "skrawberries", "strawberries", - "slaugthering", "slaughtering", - "sloughtering", "slaughtering", - "sluaghtering", "slaughtering", - "snowballling", "snowballing", - "snowbaording", "snowboarding", - "socialistisk", "socialists", - "socialogical", "sociological", - "socioeconimc", "socioeconomic", - "socioeconmic", "socioeconomic", - "socioligical", "sociological", - "sociopolical", "sociological", - "somethingest", "somethings", - "sophisticaed", "sophisticated", - "sophisticted", "sophisticated", - "southamption", "southampton", - "southernerns", "southerners", - "sovereighnty", "sovereignty", - "sovereignety", "sovereignty", - "sovereignity", "sovereignty", - "specialistes", "specialists", - "specializare", "specialize", - "specializate", "specialize", - "specializeds", "specializes", - "specializied", "specialize", - "speciallized", "specialised", - "specifcation", "specification", - "spectacuarly", "spectacular", - "spectaculair", "spectacular", - "spectaculary", "spectacularly", - "spectacullar", "spectacularly", - "specualtions", "speculation", - "spermatozoan", "spermatozoon", - "spesifically", "specifically", - "spirituallly", "spiritually", - "spirtiuality", "spirituality", - "spirutuality", "spirituality", - "spontaneosly", "spontaneously", - "spontaneouly", "spontaneously", - "spreadhseets", "spreadsheets", - "spreadsheats", "spreadsheets", - "spreadsheeds", "spreadsheets", - "spreadsheeet", "spreadsheets", - "standartized", "standardized", - "standerdized", "standardized", - "stardardized", "standardized", - "starightened", "straightened", - "starwberries", "strawberries", - "statisticaly", "statistically", - "stereotpying", "stereotyping", - "stereotypers", "stereotypes", - "stereotypian", "stereotyping", - "steriotyping", "stereotyping", - "steroetyping", "stereotyping", - "steryotyping", "stereotyping", - "straigntened", "straightened", - "straigthened", "straightened", - "strategicaly", "strategically", - "strategiclly", "strategically", - "strawburries", "strawberries", - "streemlining", "streamlining", - "streightened", "straightened", - "strenghening", "strengthening", - "strenghtened", "strengthened", - "strengtheing", "strengthening", - "stroytelling", "storytelling", - "subconcsious", "subconscious", - "subconsicous", "subconscious", - "subcouncious", "subconscious", - "subcsription", "subscriptions", - "subesquently", "subsequently", - "subjectivety", "subjectively", - "subjectivily", "subjectively", - "subjectivley", "subjectively", - "subjudgation", "subjugation", - "subredditors", "subreddits", - "subscirption", "subscriptions", - "subsconcious", "subconscious", - "subscribbers", "subscribers", - "subscribbing", "subscribing", - "subscribirse", "subscriber", - "subscribtion", "subscription", - "subscriptons", "subscriptions", - "subscritpion", "subscriptions", - "subscrpition", "subscriptions", - "subsiquently", "subsequently", - "subsrciption", "subscriptions", - "subsricption", "subscriptions", - "substantialy", "substantially", - "substantitve", "substantive", - "substitition", "substitution", - "substituters", "substitutes", - "substitutivo", "substitution", - "substitutues", "substitutes", - "substracting", "subtracting", - "substraction", "subtraction", - "subterranian", "subterranean", - "succsessfull", "successful", - "sunconscious", "subconscious", - "supermarkeds", "supermarkets", - "supermarkers", "supermarkets", - "supermarkert", "supermarkets", - "supermarkten", "supermarket", - "supermarktes", "supermarkets", - "supernarkets", "supermarkets", - "supernatrual", "supernatural", - "supersticion", "superstition", - "superstision", "superstition", - "superstitios", "superstitious", - "superstitous", "superstitious", - "supervisiors", "supervisors", - "supervisoras", "supervisors", - "supervisores", "supervisors", - "supllemental", "supplemental", - "supplamental", "supplemental", - "supplamented", "supplemented", - "supplimental", "supplemental", - "suppresssion", "suppression", - "supscription", "subscription", - "supsiciously", "suspiciously", - "surprizingly", "surprisingly", - "surrenderred", "surrendered", - "surrundering", "surrendering", - "survaillance", "surveillance", - "survaillence", "surveillance", - "survallience", "surveillance", - "surveillence", "surveillance", - "survelliance", "surveillance", - "surviellance", "surveillance", - "survivabiity", "survivability", - "survivabiliy", "survivability", - "survivabilty", "survivability", - "susceptiable", "susceptible", - "susceptibile", "susceptible", - "suspeciously", "suspiciously", - "suspicioulsy", "suspiciously", - "suspiciuosly", "suspiciously", - "suspisiously", "suspiciously", - "sustainabily", "sustainability", - "symapthizers", "sympathizers", - "symetrically", "symmetrically", - "symmetricaly", "symmetrically", - "sympathethic", "sympathetic", - "sympathsizer", "sympathizers", - "sympathyzers", "sympathizers", - "sympethizers", "sympathizers", - "symphatizers", "sympathizers", - "sympithizers", "sympathizers", - "syncronously", "synchronously", - "sysmatically", "systematically", - "systematisch", "systematic", - "tablespooons", "tablespoon", - "tacticallity", "tactically", - "tangencially", "tangentially", - "tangenitally", "tangentially", - "tangientally", "tangentially", - "teamfighters", "teamfights", - "teansylvania", "transylvania", - "techanically", "mechanically", - "techincality", "technicality", - "technologial", "technological", - "telelevision", "television", - "teleportaion", "teleportation", - "teleportaton", "teleportation", - "temepratures", "temperatures", - "temparatures", "temperatures", - "temperaturas", "temperatures", - "temporarilly", "temporarily", - "tempreatures", "temperatures", - "tempuratures", "temperatures", - "tengentially", "tangentially", - "termendously", "tremendously", - "territorrial", "territorial", - "territorries", "territories", - "testasterone", "testosterone", - "testestorone", "testosterone", - "thanskgiving", "thanksgiving", - "theologicial", "theological", - "theoreticaly", "theoretically", - "thermomenter", "thermometer", - "thermomether", "thermometer", - "thumbnailers", "thumbnails", - "thunderboldt", "thunderbolt", - "tindergarten", "kindergarten", - "torubleshoot", "troubleshoot", - "totalitarion", "totalitarian", - "totalitatian", "totalitarian", - "touchscreeen", "touchscreen", - "traditionaly", "traditionally", - "traditionnal", "traditional", - "tradtionally", "traditionally", - "tramendously", "tremendously", - "tramsformers", "transformers", - "tramsforming", "transforming", - "tranditional", "transitional", - "tranistional", "transitional", - "tranistioned", "transitioned", - "tranlsations", "translations", - "tranmsission", "transmissions", - "transaltions", "translations", - "transaprency", "transparency", - "transational", "transitional", - "transcations", "transactions", - "transcendant", "transcendent", - "transcripton", "transcription", - "transcriptus", "transcripts", - "transesxuals", "transsexuals", - "transfarmers", "transformers", - "transfarring", "transferring", - "transferrred", "transferred", - "transformare", "transformers", - "transformase", "transforms", - "transformees", "transforms", - "transforners", "transformers", - "transfromers", "transformers", - "transfroming", "transforming", - "transgenderd", "transgendered", - "transgendred", "transgendered", - "transgenered", "transgender", - "transicional", "transitional", - "transilvania", "transylvania", - "transimssion", "transmissions", - "transisioned", "transitioned", - "translastion", "translations", - "translateing", "translating", - "translationg", "translating", - "translucient", "translucent", - "translyvania", "transylvania", - "transmisions", "transmission", - "transmisison", "transmission", - "transmissons", "transmissions", - "transmitirte", "transmitter", - "transmittted", "transmitted", - "transmorfers", "transformer", - "transofrmers", "transformers", - "transofrming", "transforming", - "transparancy", "transparency", - "transparenty", "transparency", - "transparrent", "transparent", - "transperancy", "transparency", - "transperency", "transparency", - "transplantes", "transplants", - "transporteur", "transporter", - "transportion", "transporting", - "transpotting", "transporting", - "transsmision", "transmissions", - "transylmania", "transylvania", - "transylvanai", "transylvania", - "trasnferring", "transferring", - "trasnformers", "transformers", - "trasnforming", "transforming", - "trasnmission", "transmissions", - "trasnparency", "transparency", - "trasnporting", "transporting", - "trememdously", "tremendously", - "tremendoulsy", "tremendously", - "tremondously", "tremendously", - "troubelshoot", "troubleshoot", - "troublehsoot", "troubleshoot", - "trumendously", "tremendously", - "trustworthly", "trustworthy", - "ubsubscribed", "unsubscribed", - "udnerpowered", "underpowered", - "umbelievable", "unbelievable", - "umemployment", "unemployment", - "unaccaptable", "unacceptable", - "unacceptible", "unacceptable", - "unaccpetable", "unacceptable", - "unacompanied", "unaccompanied", - "unappealling", "unappealing", - "unattractice", "unattractive", - "unautherized", "unauthorized", - "unauthroized", "unauthorized", - "unbeleivable", "unbelievable", - "unbeleivably", "unbelievably", - "unbeliavable", "unbelievable", - "unbeliavably", "unbelievably", - "unbeliebable", "unbelievable", - "unbelieveble", "unbelievable", - "unbelievibly", "unbelievably", - "unbeliveable", "unbelievable", - "unbeliveably", "unbelievably", - "unbelizeable", "unbelievable", - "unbolievable", "unbelievable", - "uncertainity", "uncertainty", - "uncertaintly", "uncertainty", - "uncompatible", "incompatible", - "unconditinal", "unconditional", - "unconsciosly", "unconsciously", - "unconsciouly", "unconsciously", - "unconsistent", "inconsistent", - "unconvenient", "inconvenient", - "unconvential", "unconventional", - "undecideable", "undecidable", - "undefinitely", "indefinitely", - "undeniablely", "undeniably", - "undergradate", "undergraduate", - "undergradute", "undergraduate", - "underminding", "undermining", - "undermineing", "undermining", - "undermineras", "undermines", - "undermineres", "undermines", - "underminging", "undermining", - "underminning", "undermining", - "undertakeing", "undertaking", - "underwhelimg", "underwhelming", - "underwheling", "underwhelming", - "undesireable", "undesirable", - "undoubtedbly", "undoubtedly", - "unemployemnt", "unemployment", - "unemplyoment", "unemployment", - "unempolyment", "unemployment", - "unenployment", "unemployment", - "unequalities", "inequalities", - "unexpectadly", "unexpectedly", - "unexpectetly", "unexpectedly", - "unexpectidly", "unexpectedly", - "unexperience", "inexperience", - "unexpextedly", "unexpectedly", - "unexplicably", "inexplicably", - "unforgetable", "unforgettable", - "unforgiveble", "unforgivable", - "unforgivible", "unforgivable", - "unfortunatly", "unfortunately", - "unfortunetly", "unfortunately", - "unilatreally", "unilaterally", - "uniliterally", "unilaterally", - "unimpresssed", "unimpressed", - "uninitalised", "uninitialised", - "uninitalized", "uninitialized", - "uninstallimg", "uninstalling", - "uninstallled", "uninstalled", - "unintentinal", "unintentional", - "uninteresing", "uninteresting", - "uninterneted", "uninterested", - "uninterruped", "uninterrupted", - "uninterupted", "uninterrupted", - "unisntalling", "uninstalling", - "unitesstates", "unitedstates", - "univerisites", "universities", - "univeristies", "universities", - "universitets", "universities", - "unliaterally", "unilaterally", - "unneccessary", "unnecessary", - "unnecesarily", "unnecessarily", - "unnecessairy", "unnecessarily", - "unnecessarly", "unnecessarily", - "unnistalling", "uninstalling", - "unpredictabe", "unpredictable", - "unpreductive", "unproductive", - "unproduktive", "unproductive", - "unrealisitic", "unrealistic", - "unreaponsive", "unresponsive", - "unreasonalby", "unreasonably", - "unrepsonsive", "unresponsive", - "unresponcive", "unresponsive", - "unresponisve", "unresponsive", - "unresponsibe", "unresponsive", - "unrestircted", "unrestricted", - "unrestrcited", "unrestricted", - "unristricted", "unrestricted", - "unseccessful", "unsuccessful", - "unsespecting", "unsuspecting", - "unsibscribed", "unsubscribed", - "unsoliciated", "unsolicited", - "unsolicitied", "unsolicited", - "unsubscirbed", "unsubscribed", - "unsubscrible", "unsubscribed", - "unsubscrided", "unsubscribed", - "unsubscriped", "unsubscribed", - "unsubscrubed", "unsubscribed", - "unsubsrcibed", "unsubscribed", - "unsucessfull", "unsuccessful", - "unsunscribed", "unsubscribed", - "unsurprizing", "unsurprising", - "unsusbcribed", "unsubscribed", - "unsustainble", "unsustainable", - "unvelievable", "unbelievable", - "unvelievably", "unbelievably", - "unviersities", "universities", - "unvulnerable", "invulnerable", - "varification", "verification", - "vegetarianas", "vegetarians", - "vegetarianos", "vegetarians", - "verficiation", "verification", - "verificacion", "verification", - "verificaiton", "verification", - "verifikation", "verification", - "vernaculaire", "vernacular", - "versatillity", "versatility", - "verticallity", "vertically", - "videogamemes", "videogames", - "visualizaton", "visualization", - "vocabularily", "vocabulary", - "vocabularity", "vocabulary", - "volonteering", "volunteering", - "volounteered", "volunteered", - "voluntarilly", "voluntarily", - "volunterring", "volunteering", - "vulnerabilty", "vulnerability", - "weightlifing", "weightlifting", - "withdrawalls", "withdrawals", - "withdrawling", "withdrawing", - "withdrawning", "withdrawing", - "wonderfullly", "wonderfully", - "worshippping", "worshipping", - "xenophobical", "xenophobia", - "abandenment", "abandonment", - "abandomnent", "abandonment", - "abandonding", "abandoning", - "abandonnent", "abandonment", - "abandonning", "abandoning", - "abbreviatin", "abbreviation", - "abbreviaton", "abbreviation", - "abdominable", "abdominal", - "abomanation", "abomination", - "abominacion", "abomination", - "abomonation", "abomination", - "abonimation", "abomination", - "aboriginial", "aboriginal", - "aborigional", "aboriginal", - "abreviation", "abbreviation", - "abritrarily", "arbitrarily", - "abritration", "arbitration", - "absolutelly", "absolutely", - "absolutelys", "absolutes", - "absolutisme", "absolutes", - "absolutiste", "absolutes", - "abstraccion", "abstraction", - "abstraktion", "abstraction", - "abstruction", "abstraction", - "abundancies", "abundances", - "academicaly", "academically", - "academicese", "academics", - "accelarated", "accelerated", - "accelarator", "accelerator", - "accelerater", "accelerator", - "acceleratie", "accelerate", - "acceleratio", "accelerator", - "acceleraton", "acceleration", - "accelorated", "accelerated", - "accelorator", "accelerator", - "acceptabelt", "acceptable", - "accesseries", "accessories", - "accessibile", "accessible", - "accessibily", "accessibility", - "accessoires", "accessories", - "accidantely", "accidently", - "accidentaly", "accidentally", - "accidentely", "accidently", - "accidential", "accidental", - "accidentily", "accidently", - "accidentlay", "accidently", - "accidentley", "accidently", - "accidentlly", "accidently", - "accomadated", "accommodated", - "accomadates", "accommodates", - "accommadate", "accommodate", - "accommidate", "accommodate", - "accomodated", "accommodated", - "accomodates", "accommodates", - "accomondate", "accommodate", - "accompained", "accompanied", - "accompanyed", "accompanied", - "accompianed", "accompanied", - "accompinied", "accompanied", - "accomplises", "accomplishes", - "accomplishs", "accomplishes", - "accomponied", "accompanied", - "accountatns", "accountants", - "accountents", "accountants", - "accquainted", "acquainted", - "accrediated", "accredited", - "accreditied", "accredited", - "accreditted", "accredited", - "acculumated", "accumulated", - "accumalated", "accumulated", - "accumelated", "accumulated", - "accumilated", "accumulated", - "accumulatin", "accumulation", - "accumulaton", "accumulation", - "accuratelly", "accurately", - "accustommed", "accustomed", - "acheivement", "achievement", - "acheivments", "achievements", - "achievemint", "achievement", - "achievemnts", "achievements", - "achievments", "achievements", - "achivements", "achievements", - "acknolwedge", "acknowledge", - "acknoweldge", "acknowledge", - "acknowleded", "acknowledged", - "acknowlegde", "acknowledge", - "acknowleged", "acknowledge", - "acknowleges", "acknowledges", - "acknwoledge", "acknowledges", - "acomplished", "accomplished", - "acopalyptic", "apocalyptic", - "acquaintace", "acquaintance", - "acquisation", "acquisition", - "activateing", "activating", - "activationg", "activating", - "activistion", "activision", - "additinally", "additionally", - "additionaly", "additionally", - "additonally", "additionally", - "adequatedly", "adequately", - "adjectiveus", "adjectives", - "administerd", "administered", - "administrar", "administrator", - "administren", "administer", - "administrer", "administer", - "administres", "administer", - "administrez", "administer", - "adminstered", "administered", - "adminstrate", "administrate", - "admittadely", "admittedly", - "adolencence", "adolescence", - "adolescance", "adolescence", - "adolescense", "adolescence", - "advantadges", "advantages", - "advantageos", "advantageous", - "advantageus", "advantageous", - "advantagous", "advantageous", - "adventerous", "adventures", - "adventourus", "adventurous", - "adversiting", "advertising", - "advertisors", "advertisers", - "advertisted", "advertised", - "aesthethics", "aesthetics", - "afficionado", "aficionado", - "affiliction", "affiliation", - "affirmitave", "affirmative", - "affirmitive", "affirmative", - "affixiation", "affiliation", - "affrimative", "affirmative", - "afgahnistan", "afghanistan", - "afganhistan", "afghanistan", - "afghanastan", "afghanistan", - "afghansitan", "afghanistan", - "afhganistan", "afghanistan", - "afternarket", "aftermarket", - "afterthougt", "afterthought", - "aggaravates", "aggravates", - "aggragating", "aggravating", - "aggregatore", "aggregate", - "aggressivly", "aggressively", - "aggresssion", "aggression", - "aggrovating", "aggravating", - "agnostacism", "agnosticism", - "agnostisicm", "agnosticism", - "agnostisism", "agnosticism", - "agnostocism", "agnosticism", - "agnsoticism", "agnosticism", - "agonsticism", "agnosticism", - "agressively", "aggressively", - "agressivley", "agressive", - "agressivnes", "agressive", - "agricolture", "agriculture", - "agriculteur", "agriculture", - "agricultral", "agricultural", - "agricultual", "agricultural", - "agricutlure", "agriculture", - "ahtleticism", "athleticism", - "alcoholicas", "alcoholics", - "alcoholicos", "alcoholics", - "alcoholisim", "alcoholism", - "algorithems", "algorithm", - "algorithims", "algorithm", - "algorithmes", "algorithms", - "algorithmns", "algorithms", - "algorithmus", "algorithms", - "algorithyms", "algorithm", - "algorythims", "algorithms", - "alientating", "alienating", - "alleigances", "allegiance", - "alltogether", "altogether", - "alterantive", "alternative", - "alternatley", "alternately", - "alternitive", "alternative", - "altheticism", "athleticism", - "altnerately", "alternately", - "altruisitic", "altruistic", - "altruistric", "altruistic", - "amalgomated", "amalgamated", - "ambulancier", "ambulance", - "amerliorate", "ameliorate", - "ammendments", "amendments", - "ampehtamine", "amphetamine", - "ampethamine", "amphetamine", - "amphetamies", "amphetamines", - "amphetamins", "amphetamines", - "amphetemine", "amphetamine", - "amphetimine", "amphetamine", - "amphetmaine", "amphetamines", - "analyticals", "analytics", - "anarchistes", "anarchists", - "ancedotally", "anecdotally", - "androgenous", "androgynous", - "anecdatally", "anecdotally", - "anecdotelly", "anecdotally", - "anecodtally", "anecdotally", - "anectodally", "anecdotally", - "anectotally", "anecdotally", - "anedoctally", "anecdotally", - "angosticism", "agnosticism", - "anihilation", "annihilation", - "anitbiotics", "antibiotics", - "annihalated", "annihilated", - "annihilaton", "annihilation", - "annihilited", "annihilated", - "annihliated", "annihilated", - "annilihated", "annihilated", - "anniversery", "anniversary", - "annonymouse", "anonymous", - "announceing", "announcing", - "announcemet", "announcements", - "announcemnt", "announcement", - "announcents", "announces", - "annoymously", "anonymously", - "anonamously", "anonymously", - "anonimously", "anonymously", - "anonmyously", "anonymously", - "anonomously", "anonymously", - "anonymousny", "anonymously", - "anouncement", "announcement", - "antagonisic", "antagonistic", - "antagonistc", "antagonistic", - "antagonstic", "antagonist", - "anthropolgy", "anthropology", - "anthropoloy", "anthropology", - "antibiodics", "antibiotics", - "antibioitcs", "antibiotic", - "antibioitic", "antibiotic", - "antibitoics", "antibiotics", - "antiboitics", "antibiotics", - "anticapated", "anticipated", - "anticiapted", "anticipated", - "anticipatin", "anticipation", - "antiobitics", "antibiotic", - "antiquaited", "antiquated", - "antisipated", "anticipated", - "apacolyptic", "apocalyptic", - "apocaliptic", "apocalyptic", - "apocalpytic", "apocalyptic", - "apocalytpic", "apocalyptic", - "apolagizing", "apologizing", - "apolegetics", "apologetics", - "apologistas", "apologists", - "apologistes", "apologists", - "apostrophie", "apostrophe", - "apparantely", "apparently", - "appareances", "appearances", - "apparentely", "apparently", - "appartments", "apartments", - "appeareance", "appearance", - "appearences", "appearances", - "apperciated", "appreciated", - "apperciates", "appreciates", - "appereances", "appearances", - "applicabile", "applicable", - "applicaiton", "application", - "applicatins", "applicants", - "applicatons", "applications", - "appoitnment", "appointments", - "apporaching", "approaching", - "apporpriate", "appropriate", - "apporximate", "approximate", - "appraoching", "approaching", - "apprearance", "appearance", - "apprecaited", "appreciated", - "apprecaites", "appreciates", - "appreciaite", "appreciative", - "appreciatie", "appreciative", - "appreciatin", "appreciation", - "appreciaton", "appreciation", - "appreciatve", "appreciative", - "appreicated", "appreciated", - "appreicates", "appreciates", - "apprentince", "apprentice", - "appriciated", "appreciated", - "appriciates", "appreciates", - "apprieciate", "appreciate", - "appropirate", "appropriate", - "appropraite", "appropriate", - "appropriato", "appropriation", - "approxamate", "approximate", - "approxiamte", "approximate", - "approxmiate", "approximate", - "aprehensive", "apprehensive", - "apsirations", "aspirations", - "aqcuisition", "acquisition", - "aquaintance", "acquaintance", - "aquiantance", "acquaintance", - "arbitrairly", "arbitrarily", - "arbitralily", "arbitrarily", - "arbitrarely", "arbitrarily", - "arbitrarion", "arbitration", - "arbitratily", "arbitrarily", - "arbritarily", "arbitrarily", - "arbritation", "arbitration", - "arcaheology", "archaeology", - "archaoelogy", "archeology", - "archeaology", "archaeology", - "archimedian", "archimedean", - "architechts", "architect", - "architectes", "architects", - "architecure", "architecture", - "argiculture", "agriculture", - "argumentate", "argumentative", - "aribtrarily", "arbitrarily", - "aribtration", "arbitration", - "arithmentic", "arithmetic", - "arithmethic", "arithmetic", - "arithmetric", "arithmetic", - "armagedddon", "armageddon", - "armageddeon", "armageddon", - "arrangments", "arrangements", - "arrengement", "arrangement", - "articluated", "articulated", - "articualted", "articulated", - "artifically", "artificially", - "artificialy", "artificially", - "aspergerers", "aspergers", - "asphyxation", "asphyxiation", - "aspriations", "aspirations", - "assasinated", "assassinated", - "assasinates", "assassinates", - "assassiante", "assassinate", - "assassinare", "assassinate", - "assassinatd", "assassinated", - "assassinato", "assassination", - "assassinats", "assassins", - "assassinted", "assassinated", - "assembleing", "assembling", - "assemblying", "assembling", - "assertation", "assertion", - "assignemnts", "assignments", - "assimialted", "assimilate", - "assimilatie", "assimilate", - "assimilerat", "assimilate", - "assimiliate", "assimilate", - "assimliated", "assimilate", - "assingments", "assignments", - "assistantes", "assistants", - "assocaition", "associations", - "associaiton", "associations", - "associaties", "associates", - "associatons", "associations", - "assoication", "association", - "assosiating", "associating", - "assosiation", "association", - "assoziation", "association", - "assumptious", "assumptions", - "astonashing", "astonishing", - "astonoshing", "astonishing", - "astronaught", "astronaut", - "astronaunts", "astronaut", - "astronautas", "astronauts", - "astronautes", "astronauts", - "asychronous", "asynchronous", - "asyncronous", "asynchronous", - "atatchments", "attachments", - "atheistisch", "atheistic", - "athelticism", "athleticism", - "athletecism", "athleticism", - "athleticsim", "athleticism", - "athletisicm", "athleticism", - "athletisism", "athleticism", - "atmopsheric", "atmospheric", - "atmoshperic", "atmospheric", - "atmosoheric", "atmospheric", - "atomspheric", "atmospheric", - "atrocitites", "atrocities", - "attachemnts", "attachments", - "attackerasu", "attackers", - "attackerats", "attackers", - "attactments", "attachments", - "attributred", "attributed", - "attributted", "attribute", - "attrocities", "atrocities", - "audiobookas", "audiobooks", - "audioboooks", "audiobook", - "auotcorrect", "autocorrect", - "austrailans", "australians", - "austrailian", "australian", - "australiaan", "australians", - "australiams", "australians", - "australiens", "australians", - "australlian", "australian", - "authenticiy", "authenticity", - "authenticor", "authenticator", - "authenticty", "authenticity", - "authorative", "authoritative", - "authoritate", "authoritative", - "authoroties", "authorities", - "autoatttack", "autoattack", - "autocoreect", "autocorrect", - "autocorrekt", "autocorrect", - "autocorrent", "autocorrect", - "autocorrext", "autocorrect", - "autoctonous", "autochthonous", - "autokorrect", "autocorrect", - "automaticly", "automatically", - "automatonic", "automation", - "automoblies", "automobile", - "auxillaries", "auxiliaries", - "availabiliy", "availability", - "availabilty", "availability", - "availablity", "availability", - "awesoneness", "awesomeness", - "babysittter", "babysitter", - "backbacking", "backpacking", - "backgorunds", "backgrounds", - "backhacking", "backpacking", - "backjacking", "backpacking", - "backtacking", "backpacking", - "bangaldeshi", "bangladesh", - "bangladesch", "bangladesh", - "barceloneta", "barcelona", - "bargainning", "bargaining", - "battelfield", "battlefield", - "battelfront", "battlefront", - "battelships", "battleship", - "battlefeild", "battlefield", - "battlefiend", "battlefield", - "battlefiled", "battlefield", - "battlefornt", "battlefront", - "battlehsips", "battleship", - "beastiality", "bestiality", - "beaurocracy", "bureaucracy", - "beautyfully", "beautifully", - "behaviorial", "behavioral", - "belittleing", "belittling", - "belittlling", "belittling", - "belligerant", "belligerent", - "belligirent", "belligerent", - "bellweather", "bellwether", - "benefitical", "beneficial", - "bestiallity", "bestiality", - "beuatifully", "beautifully", - "beuraucracy", "bureaucracy", - "beuraucrats", "bureaucrats", - "billegerent", "belligerent", - "billionairs", "billionaires", - "billionarie", "billionaire", - "billioniare", "billionaire", - "biologicaly", "biologically", - "birthdayers", "birthdays", - "birthdaymas", "birthdays", - "bittersweat", "bittersweet", - "bitterwseet", "bittersweet", - "blackberrry", "blackberry", - "blacksmitch", "blacksmith", - "bloodboorne", "bloodborne", - "bluebarries", "blueberries", - "blueburries", "blueberries", - "blueprients", "blueprints", - "bodybuildig", "bodybuilding", - "bodybuildng", "bodybuilding", - "bodybuiling", "bodybuilding", - "bombardeada", "bombarded", - "bombardeado", "bombarded", - "bombarderad", "bombarded", - "bordelrands", "borderlands", - "bordlerands", "borderlands", - "bortherhood", "brotherhood", - "bourgeousie", "bourgeois", - "boycottting", "boycotting", - "bracelettes", "bracelets", - "brainwahsed", "brainwashed", - "brainwasing", "brainwashing", - "braziliians", "brazilians", - "breakthough", "breakthrough", - "breakthrouh", "breakthrough", - "breathtakng", "breathtaking", - "brianwashed", "brainwashed", - "brillaintly", "brilliantly", - "broadcasing", "broadcasting", - "broadcastes", "broadcasts", - "broderlands", "borderlands", - "brotherwood", "brotherhood", - "buddhistisk", "buddhists", - "buearucrats", "bureaucrats", - "bueraucracy", "bureaucracy", - "bueraucrats", "bureaucrats", - "buisnessman", "businessman", - "buisnessmen", "businessmen", - "bullerproof", "bulletproof", - "bulletbroof", "bulletproof", - "bulletproff", "bulletproof", - "bulletprrof", "bulletproof", - "bullitproof", "bulletproof", - "bureacuracy", "bureaucracy", - "bureaocracy", "bureaucracy", - "bureaocrats", "bureaucrats", - "bureaucraps", "bureaucrats", - "bureaucrash", "bureaucrats", - "bureaucrasy", "bureaucrats", - "bureaucrazy", "bureaucracy", - "bureuacracy", "bureaucracy", - "bureuacrats", "bureaucrats", - "burueacrats", "bureaucrats", - "businessnes", "businessmen", - "busniessmen", "businessmen", - "butterfiles", "butterflies", - "butterfleye", "butterfly", - "butterflyes", "butterflies", - "butterfries", "butterflies", - "butterlfies", "butterflies", - "caclulating", "calculating", - "caclulation", "calculation", - "caclulators", "calculators", - "cailbration", "calibration", - "calbiration", "calibration", - "calcualting", "calculating", - "calcualtion", "calculations", - "calcualtors", "calculators", - "calculaters", "calculators", - "calculatios", "calculators", - "calculatons", "calculations", - "calibartion", "calibration", - "calibraiton", "calibration", - "califorinan", "californian", - "californain", "californian", - "californica", "california", - "californien", "californian", - "californiia", "californian", - "californina", "californian", - "californnia", "californian", - "califronian", "californian", - "caluclating", "calculating", - "caluclation", "calculation", - "caluclators", "calculators", - "caluculated", "calculated", - "caluiflower", "cauliflower", - "camouflague", "camouflage", - "camouflauge", "camouflage", - "campagining", "campaigning", - "campainging", "campaigning", - "canadianese", "canadians", - "cannabilism", "cannibalism", - "cannabolism", "cannibalism", - "canniablism", "cannibalism", - "cannibalizm", "cannibalism", - "cannibaljim", "cannibalism", - "cannibalsim", "cannibalism", - "cannibilism", "cannibalism", - "cannobalism", "cannibalism", - "cannotation", "connotation", - "capabilites", "capabilities", - "capabilitiy", "capability", - "capabillity", "capability", - "capacitaron", "capacitor", - "capacitores", "capacitors", - "capatilists", "capitalists", - "capatilized", "capitalized", - "caperbility", "capability", - "capitalisim", "capitalism", - "capitilists", "capitalists", - "capitilized", "capitalized", - "capitolists", "capitalists", - "capitolized", "capitalized", - "captialists", "capitalists", - "captialized", "capitalized", - "cariactures", "caricature", - "carniverous", "carnivorous", - "castatrophe", "catastrophe", - "catagorized", "categorized", - "catapillars", "caterpillars", - "catapillers", "caterpillars", - "catasthrope", "catastrophe", - "catastraphe", "catastrophe", - "catastrohpe", "catastrophe", - "catastropic", "catastrophic", - "categroized", "categorized", - "catepillars", "caterpillars", - "catergorize", "categorize", - "caterogized", "categorized", - "caterpilars", "caterpillars", - "caterpiller", "caterpillar", - "catholacism", "catholicism", - "catholicsim", "catholicism", - "catholisicm", "catholicism", - "catholisism", "catholicism", - "catholizism", "catholicism", - "catholocism", "catholicism", - "catogerized", "categorized", - "catterpilar", "caterpillar", - "cauilflower", "cauliflower", - "caulfilower", "cauliflower", - "celebartion", "celebrations", - "celebirties", "celebrities", - "celebracion", "celebration", - "celebrasion", "celebrations", - "celebratons", "celebrations", - "centipeddle", "centipede", - "cerimonious", "ceremonious", - "certaintity", "certainty", - "certificaat", "certificate", - "certificare", "certificate", - "certificato", "certification", - "certificats", "certificates", - "challanging", "challenging", - "challeneged", "challenged", - "challeneger", "challenger", - "challeneges", "challenges", - "chameleooon", "chameleon", - "championshp", "championship", - "championsip", "championship", - "chancellour", "chancellor", - "charachters", "characters", - "charasmatic", "charismatic", - "charimastic", "charismatic", - "charsimatic", "charismatic", - "cheerleadra", "cheerleader", - "cheerleards", "cheerleaders", - "cheerleeder", "cheerleader", - "cheesebuger", "cheeseburger", - "cheeseburgs", "cheeseburgers", - "chihuahuita", "chihuahua", - "childrenmrs", "childrens", - "chloesterol", "cholesterol", - "cholesteral", "cholesterol", - "cholestoral", "cholesterol", - "cholestorol", "cholesterol", - "cholosterol", "cholesterol", - "chormosomes", "chromosomes", - "christianty", "christianity", - "chromasomes", "chromosomes", - "chromesomes", "chromosomes", - "chromisomes", "chromosomes", - "chromosones", "chromosomes", - "chromossome", "chromosomes", - "chromozomes", "chromosomes", - "chronicales", "chronicles", - "chronichles", "chronicles", - "cicrulating", "circulating", - "cincinnasti", "cincinnati", - "cincinnatti", "cincinnati", - "cincinnnati", "cincinnati", - "circimcised", "circumcised", - "circluating", "circulating", - "circualtion", "circulation", - "circulacion", "circulation", - "circumcison", "circumcision", - "circumsiced", "circumcised", - "circumsised", "circumcised", - "circumstace", "circumstance", - "circumvrent", "circumvent", - "circuncised", "circumcised", - "cirticising", "criticising", - "ciruclating", "circulating", - "ciruclation", "circulation", - "citicenship", "citizenship", - "citisenship", "citizenship", - "citizinship", "citizenship", - "civilizatin", "civilizations", - "civilizaton", "civilization", - "claculators", "calculators", - "classifides", "classified", - "cleanilness", "cleanliness", - "cleanleness", "cleanliness", - "cleanlyness", "cleanliness", - "cleansiness", "cleanliness", - "cliffbanger", "cliffhanger", - "cliffhander", "cliffhanger", - "cliffhangar", "cliffhanger", - "clifthanger", "cliffhanger", - "cockaroches", "cockroaches", - "cockraoches", "cockroaches", - "cockroackes", "cockroaches", - "cocktailers", "cocktails", - "coefficeint", "coefficient", - "coefficiant", "coefficient", - "coincedince", "coincidence", - "coincidance", "coincidence", - "coincidense", "coincidence", - "coincidente", "coincidence", - "coincidince", "coincidence", - "coinsidence", "coincidence", - "collabarate", "collaborate", - "collaberate", "collaborate", - "collaborant", "collaborate", - "collaborare", "collaborate", - "collaborato", "collaboration", - "collapseing", "collapsing", - "collaterial", "collateral", - "collectieve", "collective", - "collectivly", "collectively", - "collectivos", "collections", - "collobarate", "collaborate", - "colloborate", "collaborate", - "colonializm", "colonialism", - "colonialsim", "colonialism", - "colonianism", "colonialism", - "colonizaton", "colonization", - "comaprisons", "comparisons", - "combiantion", "combinations", - "combinacion", "combination", - "combinaison", "combinations", - "combinaiton", "combinations", - "combinatino", "combinations", - "combinatins", "combinations", - "combinatios", "combinations", - "combinining", "combining", - "combonation", "combination", - "comediantes", "comedians", - "comeptition", "competition", - "comeptitive", "competitive", - "comeptitors", "competitors", - "comfertable", "comfortable", - "comfertably", "comfortably", - "comfortabel", "comfortably", - "comfortabil", "comfortably", - "comfrotable", "comfortable", - "comftorable", "comfortable", - "comftorably", "comfortably", - "comisioning", "commissioning", - "comissioned", "commissioned", - "comissioner", "commissioner", - "commandered", "commanded", - "commandmant", "commandment", - "commantator", "commentator", - "commendment", "commandment", - "commentarea", "commenter", - "commentaren", "commenter", - "commentater", "commentator", - "commenteers", "commenter", - "commentries", "commenters", - "commercialy", "commercially", - "commericals", "commercials", - "commericial", "commercial", - "comminicate", "communicate", - "comminucate", "communicate", - "commisioned", "commissioned", - "commisioner", "commissioner", - "commisssion", "commissions", - "committment", "commitment", - "commodoties", "commodities", - "commomplace", "commonplace", - "commonspace", "commonplace", - "commonweath", "commonwealth", - "commonwelth", "commonwealth", - "commuincate", "communicated", - "communciate", "communicate", - "communicted", "communicated", - "communistas", "communists", - "communistes", "communists", - "compability", "compatibility", - "compalation", "compilation", - "compansated", "compensated", - "comparabile", "comparable", - "comparasion", "comparison", - "comparasons", "comparisons", - "comparement", "compartment", - "comparetive", "comparative", - "comparision", "comparison", - "comparisson", "comparisons", - "comparitave", "comparative", - "comparitive", "comparative", - "comparsions", "comparisons", - "compassione", "compassionate", - "compasssion", "compassion", - "compatabile", "compatible", - "compatative", "comparative", - "compatiable", "compatible", - "compatibile", "compatible", - "compatibily", "compatibility", - "compeditive", "competitive", - "compeditors", "competitors", - "compeitions", "competitions", - "compeittion", "competitions", - "compelation", "compilation", - "compensante", "compensate", - "compensatie", "compensate", - "compensatin", "compensation", - "compenstate", "compensate", - "comperative", "comparative", - "compesition", "composition", - "competation", "computation", - "competative", "competitive", - "competators", "competitors", - "competetion", "competition", - "competetors", "competitors", - "competiters", "competitors", - "competiting", "competition", - "competitior", "competitor", - "competitivo", "competition", - "competitoin", "competitions", - "competitons", "competitors", - "competution", "computation", - "compilacion", "compilation", - "compilcated", "complicate", - "compination", "compilation", - "compinsated", "compensated", - "compitation", "computation", - "compitetion", "competitions", - "complacient", "complacent", - "complciated", "complicate", - "compleation", "compilation", - "complecated", "complicated", - "completaste", "completes", - "completeing", "completing", - "completeion", "completion", - "completelly", "completely", - "completelyl", "completely", - "completelys", "completes", - "completenes", "completes", - "complexitiy", "complexity", - "compliacted", "complicate", - "compliation", "compilation", - "complicarte", "complicate", - "complicatie", "complicit", - "complicatii", "complicit", - "complicatin", "complicit", - "complictaed", "complicate", - "complimente", "complement", - "complimenty", "complimentary", - "complusions", "compulsion", - "compolation", "compilation", - "componenets", "components", - "componentes", "components", - "composicion", "composition", - "composiiton", "compositions", - "composision", "compositions", - "compositied", "composite", - "composities", "composite", - "compositoin", "compositions", - "compositons", "compositions", - "compositore", "composite", - "compostiion", "compositions", - "compotition", "composition", - "compramised", "compromised", - "compramises", "compromises", - "compremised", "compromised", - "compremises", "compromises", - "comprension", "compression", - "compresores", "compressor", - "compresssed", "compressed", - "compresssor", "compressor", - "comprimised", "compromised", - "comprimises", "compromises", - "compromessi", "compromises", - "compromisng", "compromising", - "compromisse", "compromises", - "compromisso", "compromises", - "compromized", "compromised", - "compulstion", "compulsion", - "compunation", "computation", - "computacion", "computation", - "computating", "computation", - "computition", "computation", - "conceivibly", "conceivably", - "concencrate", "concentrate", - "concentrace", "concentrate", - "concentrade", "concentrated", - "concentrait", "concentrate", - "concentrant", "concentrate", - "concentrare", "concentrate", - "concentrato", "concentration", - "concertmate", "concentrate", - "conceviable", "conceivable", - "conceviably", "conceivably", - "concidering", "considering", - "conciveable", "conceivable", - "conciveably", "conceivably", - "conclsuions", "concussions", - "concludendo", "concluded", - "conclussion", "conclusions", - "conclussive", "conclusive", - "conclutions", "conclusions", - "concsiously", "consciously", - "conculsions", "conclusions", - "concusssion", "concussions", - "condeferacy", "confederacy", - "condicional", "conditional", - "condidtions", "conditions", - "conditionar", "conditioner", - "conditionel", "conditional", - "condolances", "condolences", - "condolenses", "condolences", - "condolonces", "condolences", - "conductiong", "conducting", - "condulences", "condolences", - "conenctions", "connections", - "conescutive", "consecutive", - "confedaracy", "confederacy", - "confedarate", "confederate", - "confederecy", "confederacy", - "conferances", "conferences", - "conferedate", "confederate", - "confererate", "confederate", - "confescated", "confiscated", - "confesssion", "confessions", - "confidantly", "confidently", - "configurare", "configure", - "configurate", "configure", - "configurato", "configuration", - "confilcting", "conflicting", - "confisgated", "confiscated", - "conflciting", "conflicting", - "confortable", "comfortable", - "confrontato", "confrontation", - "confussions", "confessions", - "congrassman", "congressman", - "congratuate", "congratulate", - "conicidence", "coincidence", - "conjonction", "conjunction", - "conjucntion", "conjunction", - "conjuncting", "conjunction", - "conlcusions", "conclusions", - "connatation", "connotation", - "connecitcut", "connecticut", - "connecticon", "connection", - "connectiong", "connecting", - "connectivty", "connectivity", - "connetation", "connotation", - "connonation", "connotation", - "connotacion", "connotation", - "conontation", "connotation", - "conotations", "connotations", - "conquerring", "conquering", - "consdidered", "considered", - "consectuive", "consecutive", - "consecuence", "consequence", - "conseguence", "consequence", - "conselation", "consolation", - "consentrate", "concentrate", - "consequenes", "consequence", - "consequense", "consequences", - "consequente", "consequence", - "consequenty", "consequently", - "consequtive", "consecutive", - "conservanti", "conservation", - "conservatie", "conservatives", - "conservaton", "conservation", - "consficated", "confiscated", - "considerabe", "considerate", - "considerais", "considers", - "considerant", "considerate", - "considerato", "consideration", - "considerble", "considerable", - "considerbly", "considerably", - "considereis", "considers", - "consilation", "consolation", - "consilidate", "consolidate", - "consistance", "consistency", - "consistenly", "consistently", - "consistensy", "consistency", - "consistenty", "consistently", - "consitution", "constitution", - "conslutants", "consultant", - "consolacion", "consolation", - "consoldiate", "consolidate", - "consolidare", "consolidate", - "consolodate", "consolidate", - "consomation", "consolation", - "conspiraces", "conspiracies", - "conspiracys", "conspiracies", - "conspirancy", "conspiracy", - "constantins", "constants", - "constantivs", "constants", - "constarints", "constraint", - "constituant", "constituent", - "constituion", "constitution", - "constituite", "constitute", - "constitutie", "constitutes", - "constrating", "constraint", - "constriants", "constraints", - "construcing", "constructing", - "construcion", "construction", - "construcive", "constructive", - "constructie", "constructive", - "constructos", "constructs", - "constructur", "constructor", - "constructus", "constructs", - "constuction", "construction", - "consturcted", "constructed", - "consuelling", "counselling", - "consulation", "consolation", - "consultaion", "consultation", - "consultanti", "consultation", - "consumation", "consumption", - "consumbales", "consumables", - "consumersim", "consumerism", - "consumibles", "consumables", - "contagiosum", "contagious", - "containered", "contained", - "containmemt", "containment", - "containters", "containers", - "containting", "containing", - "contaminato", "contamination", - "contaminent", "containment", - "contaminted", "contaminated", - "contancting", "contracting", - "contanimate", "contaminated", - "contemplare", "contemplate", - "contempoary", "contemporary", - "contemporay", "contemporary", - "contencious", "contentious", - "contenental", "continental", - "contengency", "contingency", - "contenintal", "continental", - "contenplate", "contemplate", - "contensious", "contentious", - "contentants", "contestants", - "contentuous", "contentious", - "contestaste", "contestants", - "contestents", "contestants", - "contianment", "containment", - "contientous", "contentious", - "contimplate", "contemplate", - "continenets", "continents", - "continentes", "continents", - "continentul", "continental", - "contingancy", "contingency", - "contingient", "contingent", - "contingincy", "contingency", - "continously", "continuously", - "continuarla", "continual", - "continuarlo", "continual", - "continuasse", "continues", - "continueing", "continuing", - "continuemos", "continues", - "continueous", "continuous", - "continuious", "continuous", - "continuning", "continuing", - "continunity", "continuity", - "continuosly", "continuously", - "continuting", "continuing", - "continutity", "continuity", - "continuuing", "continuing", - "continuuity", "continuity", - "contirbuted", "contributed", - "contiunally", "continually", - "contraccion", "contraction", - "contraddice", "contradicted", - "contradices", "contradicts", - "contradtion", "contraction", - "contraversy", "controversy", - "contreversy", "controversy", - "contribuent", "contribute", - "contribuito", "contribution", - "contributer", "contributor", - "contributie", "contribute", - "contributin", "contribution", - "contributos", "contributors", - "contribuyes", "contributes", - "contricting", "contracting", - "contriction", "contraction", - "contridicts", "contradicts", - "contriversy", "controversy", - "controleurs", "controllers", - "controllore", "controllers", - "controvercy", "controversy", - "controversa", "controversial", - "contrubutes", "contributes", - "contructing", "contracting", - "contruction", "construction", - "contructors", "contractors", - "conveinence", "convenience", - "conveneince", "convenience", - "conveniance", "convenience", - "conveniente", "convenience", - "convenietly", "conveniently", - "conventinal", "conventional", - "converitble", "convertible", - "conversaion", "conversion", - "conversatin", "conversations", - "converseley", "conversely", - "converstion", "conversion", - "convertirea", "converter", - "convertirle", "convertible", - "convertirme", "converter", - "convertirte", "converter", - "convicitons", "convictions", - "convienence", "convenience", - "convienient", "convenient", - "convinceing", "convincing", - "convincente", "convenient", - "convincersi", "convinces", - "convirtible", "convertible", - "cooperacion", "cooperation", - "cooperativo", "cooperation", - "cooporation", "cooperation", - "cooporative", "cooperative", - "coordenated", "coordinated", - "coordenates", "coordinates", - "coordianted", "coordinated", - "coordiantes", "coordinates", - "coordiantor", "coordinator", - "coordinador", "coordinator", - "coordinants", "coordinates", - "coordinater", "coordinator", - "coordinaton", "coordination", - "coordonated", "coordinated", - "coordonates", "coordinates", - "coordonator", "coordinator", - "cooridnated", "coordinated", - "cooridnates", "coordinates", - "cooridnator", "coordinator", - "copenhaagen", "copenhagen", - "copenhaegen", "copenhagen", - "copenhaguen", "copenhagen", - "copenhangen", "copenhagen", - "copmetitors", "competitors", - "coproration", "corporation", - "copyrigthed", "copyrighted", - "corinthains", "corinthians", - "corintheans", "corinthians", - "corinthiens", "corinthians", - "corinthinas", "corinthians", - "cornithians", "corinthians", - "corparation", "corporation", - "corperation", "corporation", - "corporacion", "corporation", - "corporativo", "corporation", - "corralation", "correlation", - "correctings", "corrections", - "correctivos", "corrections", - "correktions", "corrections", - "correktness", "correctness", - "correlacion", "correlation", - "correlaties", "correlates", - "corrilation", "correlation", - "corrisponds", "corresponds", - "corrolation", "correlation", - "corrosponds", "corresponds", - "costitution", "constitution", - "councellors", "councillors", - "counrtyside", "countryside", - "counsilling", "counselling", - "countercoat", "counteract", - "counteredit", "counterfeit", - "counterfact", "counteract", - "counterfait", "counterfeit", - "counterfest", "counterfeit", - "counterfiet", "counterfeit", - "counterpaly", "counterplay", - "counterpary", "counterplay", - "counterpath", "counterpart", - "counterpats", "counterparts", - "counterpont", "counterpoint", - "counterract", "counterpart", - "counterside", "countryside", - "countertrap", "counterpart", - "countriside", "countryside", - "countrycide", "countryside", - "countrywise", "countryside", - "courthourse", "courthouse", - "coutnerfeit", "counterfeit", - "coutnerpart", "counterpart", - "coutnerplay", "counterplay", - "creacionism", "creationism", - "creationkit", "creationist", - "creationsim", "creationism", - "creationsit", "creationist", - "creationsts", "creationists", - "creativelly", "creatively", - "credencials", "credentials", - "credentails", "credentials", - "credentaisl", "credentials", - "credientals", "credentials", - "credintials", "credentials", - "cricitising", "criticising", - "criculating", "circulating", - "cringeworhy", "cringeworthy", - "cringeworty", "cringeworthy", - "cringewothy", "cringeworthy", - "criticicing", "criticising", - "criticisied", "criticise", - "criticisims", "criticisms", - "criticisize", "criticise", - "criticiszed", "criticise", - "critisicing", "criticizing", - "critisising", "criticising", - "critizicing", "criticizing", - "critizising", "criticizing", - "critizizing", "criticizing", - "crockodiles", "crocodiles", - "crocodiller", "crocodile", - "crocodilule", "crocodile", - "croporation", "corporation", - "crossfiters", "crossfire", - "cultivative", "cultivate", - "curricullum", "curriculum", - "customizabe", "customizable", - "customizble", "customizable", - "dangeroulsy", "dangerously", - "dardenelles", "dardanelles", - "deadlifters", "deadlifts", - "dealershits", "dealerships", - "deceptivley", "deceptive", - "declaracion", "declaration", - "decleration", "declaration", - "declinining", "declining", - "decloration", "declaration", - "decoartions", "decoration", - "decomposits", "decomposes", - "decoratieve", "decorative", - "decorativos", "decorations", - "decotations", "decorations", - "decsendants", "descendants", - "deductiable", "deductible", - "defenderlas", "defenders", - "defenderlos", "defenders", - "defendernos", "defenders", - "defenesless", "defenseless", - "defenisvely", "defensively", - "defensivley", "defensively", - "deficiencey", "deficiency", - "deficienies", "deficiencies", - "deficientcy", "deficiency", - "definantley", "definately", - "definatedly", "definately", - "definateley", "definately", - "definatelly", "definately", - "definatelty", "definately", - "definatetly", "definately", - "definations", "definitions", - "definatlely", "definately", - "definetally", "definately", - "definetlely", "definetly", - "definitaley", "definately", - "definitelly", "definitely", - "definitevly", "definitively", - "definitiely", "definitively", - "definitieve", "definitive", - "definitiley", "definitively", - "definitivly", "definitively", - "definitivno", "definition", - "definitivos", "definitions", - "definitlely", "definitly", - "definitlety", "definitly", - "deflecticon", "deflection", - "degenererat", "degenerate", - "degradacion", "degradation", - "degradating", "degradation", - "degragation", "degradation", - "degridation", "degradation", - "dehyrdation", "dehydration", - "deinitalize", "deinitialize", - "delaerships", "dealerships", - "delapidated", "dilapidated", - "delcaration", "declaration", - "delearships", "dealerships", - "delevopment", "development", - "deliberante", "deliberate", - "deliberatly", "deliberately", - "deliberetly", "deliberately", - "delightlful", "delightful", - "deliverying", "delivering", - "delusionnal", "delusional", - "deminsional", "dimensional", - "democarcies", "democracies", - "democracize", "democracies", - "democractic", "democratic", - "democraphic", "demographic", - "democrasies", "democracies", - "democrazies", "democracies", - "democrocies", "democracies", - "demograhpic", "demographic", - "demographis", "demographics", - "demograpics", "demographics", - "demogrpahic", "demographic", - "demoninator", "denominator", - "demonstarte", "demonstrate", - "demonstates", "demonstrates", - "demonstraby", "demonstrably", - "demonstrant", "demonstrate", - "demonstrats", "demonstrates", - "demosntrate", "demonstrate", - "denegrating", "denigrating", - "denomenator", "denominator", - "denominador", "denominator", - "denominaron", "denominator", - "denominater", "denominator", - "denominaton", "denomination", - "denomitator", "denominator", - "denomonator", "denominator", - "denonimator", "denominator", - "deocrations", "decorations", - "deomcracies", "democracies", - "deparmental", "departmental", - "depedencies", "dependencies", - "dependancey", "dependency", - "dependencey", "dependency", - "dependencie", "dependence", - "dependenies", "dependencies", - "deplorabile", "deplorable", - "depressieve", "depressive", - "depresssion", "depression", - "deprevation", "deprivation", - "deprication", "deprivation", - "deprivating", "deprivation", - "deprivition", "deprivation", - "deprovation", "deprivation", - "depserately", "desperately", - "depseration", "desperation", - "deregulatin", "deregulation", - "derivativos", "derivatives", - "derivitaves", "derivatives", - "derivitives", "derivatives", - "derpivation", "deprivation", - "derviatives", "derivatives", - "descandants", "descendants", - "descendands", "descendants", - "descendends", "descended", - "descendenta", "descendants", - "descentants", "descendants", - "descirption", "descriptions", - "descprition", "descriptions", - "describiste", "describes", - "describtion", "description", - "descripcion", "description", - "descripiton", "descriptions", - "descripters", "descriptors", - "descriptoin", "descriptions", - "descriptons", "descriptions", - "descritpion", "descriptions", - "descrpition", "descriptions", - "desensitied", "desensitized", - "desensitzed", "desensitized", - "desentisize", "desensitized", - "desgination", "designation", - "designacion", "designation", - "designstion", "designation", - "desinations", "destinations", - "desingation", "designation", - "desitnation", "destination", - "desoriented", "disoriented", - "desparately", "desperately", - "desparation", "desperation", - "desperating", "desperation", - "desperatley", "desperately", - "despirately", "desperately", - "despiration", "desperation", - "destablized", "destabilized", - "destiantion", "destinations", - "destinaiton", "destinations", - "destinatons", "destinations", - "destinction", "destination", - "destraction", "destruction", - "destruccion", "destruction", - "destruciton", "destruction", - "destructivo", "destruction", - "destruktion", "destruction", - "destruktive", "destructive", - "deteoriated", "deteriorated", - "determanism", "determinism", - "determening", "determining", - "determenism", "determinism", - "determinare", "determine", - "determinato", "determination", - "determinded", "determine", - "determinsim", "determinism", - "detramental", "detrimental", - "detremental", "detrimental", - "detrimentul", "detrimental", - "detuschland", "deutschland", - "deustchland", "deutschland", - "deutchsland", "deutschland", - "deutcshland", "deutschland", - "deutschalnd", "deutschland", - "deutshcland", "deutschland", - "develepmont", "developments", - "develompent", "developments", - "developemnt", "developments", - "developmant", "developmental", - "developmetn", "developments", - "developmnet", "developments", - "developpers", "developers", - "develpoment", "developments", - "deveolpment", "developments", - "deveploment", "developments", - "devestating", "devastating", - "devistating", "devastating", - "deyhdration", "dehydration", - "diagnositcs", "diagnostic", - "diagnositic", "diagnostic", - "diagonstics", "diagnostic", - "dictatorhip", "dictatorship", - "dictionaire", "dictionaries", - "dictionairy", "dictionary", - "dictionarys", "dictionaries", - "dictionnary", "dictionary", - "differances", "differences", - "differantly", "differently", - "differental", "differential", - "differentes", "differences", - "differneces", "differences", - "differnetly", "differently", - "difficulity", "difficulty", - "difficultes", "difficulties", - "dificulties", "difficulties", - "dimensiones", "dimensions", - "dimentional", "dimensional", - "dimesnional", "dimensional", - "diminisheds", "diminishes", - "diminsihing", "diminishing", - "diminuitive", "diminutive", - "diminushing", "diminishing", - "dinosaurios", "dinosaurs", - "direccional", "directional", - "direcitonal", "directional", - "directorguy", "directory", - "directorios", "directors", - "direktional", "directional", - "disadvantge", "disadvantage", - "disagreemet", "disagreements", - "disagreemtn", "disagreements", - "disapperead", "disappeared", - "disapporval", "disapproval", - "disapprovel", "disapproval", - "disasterous", "disastrous", - "disastreous", "disastrous", - "disastrious", "disastrous", - "disastruous", "disastrous", - "disatisfied", "dissatisfied", - "disciplened", "disciplined", - "disciplinas", "disciplines", - "disciplince", "disciplines", - "disclipined", "disciplined", - "disclipines", "disciplines", - "discogrophy", "discography", - "discogrpahy", "discography", - "disconencts", "disconnects", - "disconneted", "disconnected", - "disconnnect", "disconnect", - "discontined", "discontinued", - "discontiued", "discontinued", - "discrapency", "discrepancy", - "discretited", "discredited", - "discrimante", "discriminate", - "discrimiate", "discriminate", - "discussiong", "discussing", - "discusssion", "discussions", - "disgraseful", "disgraceful", - "disgrateful", "disgraceful", - "disgrunteld", "disgruntled", - "disgustigly", "disgustingly", - "disgustingy", "disgustingly", - "disgustinly", "disgustingly", - "disicplined", "disciplined", - "disicplines", "disciplines", - "disingenuos", "disingenuous", - "dismanlting", "dismantling", - "dismantaled", "dismantled", - "dismanteled", "dismantled", - "disobediant", "disobedient", - "disocgraphy", "discography", - "disparingly", "disparagingly", - "dispensaire", "dispensaries", - "dispensarie", "dispenser", - "dispensiary", "dispensary", - "displacemnt", "displacement", - "disposicion", "disposition", - "disputandem", "disputandum", - "disqualifed", "disqualified", - "disregaring", "disregarding", - "dissapeared", "disappeared", - "dissapoined", "dissapointed", - "dissapointd", "dissapointed", - "dissapoited", "dissapointed", - "dissappears", "disappears", - "dissatisfed", "dissatisfied", - "disscusions", "discussions", - "dissertaion", "dissertation", - "dissipatore", "dissipate", - "distatesful", "distasteful", - "distatseful", "distasteful", - "disterbance", "disturbance", - "disticntion", "distinctions", - "distinciton", "distinction", - "distincitve", "distinctive", - "distinctily", "distinctly", - "distingiush", "distinguish", - "distinguise", "distinguished", - "distinktion", "distinction", - "distinquish", "distinguish", - "distirbance", "disturbance", - "distirbuted", "distribute", - "distirbutor", "distributor", - "distraccion", "distraction", - "distractons", "distracts", - "distraktion", "distraction", - "distribitor", "distributor", - "distribuent", "distribute", - "distribuite", "distribute", - "distribuito", "distribution", - "distributie", "distributed", - "distributin", "distribution", - "distributio", "distributor", - "distrobuted", "distributed", - "distrubance", "disturbance", - "distrubited", "distributed", - "distrubitor", "distributor", - "distrubuted", "distributed", - "distrubutor", "distributor", - "distructive", "destructive", - "distuingish", "distinguish", - "distunguish", "distinguish", - "disturbante", "disturbance", - "disturbence", "disturbance", - "disucssions", "discussions", - "divisionals", "divisions", - "doccumented", "documented", - "documantary", "documentary", - "documenatry", "documentary", - "documentare", "documentaries", - "documentato", "documentation", - "documentery", "documentary", - "documentory", "documentary", - "domesticted", "domesticated", - "dominateurs", "dominates", - "dominationg", "dominating", - "donwloading", "downloading", - "doublellift", "doublelift", - "downlaoding", "downloading", - "downloadbel", "downloadable", - "downloadbig", "downloading", - "downloadble", "downloadable", - "downvoteers", "downvoters", - "downvoteing", "downvoting", - "downvoteres", "downvoters", - "downvoteros", "downvoters", - "downvoteurs", "downvoters", - "downvotters", "downvoters", - "downvotting", "downvoting", - "dramaticaly", "dramatically", - "dramaticlly", "dramatically", - "drasitcally", "drastically", - "dsyfunction", "dysfunction", - "duetschland", "deutschland", - "durabillity", "durability", - "dyanmically", "dynamically", - "dymanically", "dynamically", - "dysfonction", "dysfunction", - "dysfucntion", "dysfunction", - "dysfunciton", "dysfunction", - "dysfunktion", "dysfunction", - "earhtquakes", "earthquakes", - "earthqaukes", "earthquakes", - "earthquacks", "earthquakes", - "economicaly", "economically", - "economiclly", "economically", - "economisiti", "economist", - "economistes", "economists", - "educacional", "educational", - "effeciently", "efficiently", - "effecitvely", "effectively", - "effectivley", "effectively", - "efficeintly", "efficiently", - "efficiantly", "efficiently", - "efficientcy", "efficiently", - "effortlesly", "effortlessly", - "effortlessy", "effortlessly", - "egaletarian", "egalitarian", - "egalitatian", "egalitarian", - "egaliterian", "egalitarian", - "egostitical", "egotistical", - "egotastical", "egotistical", - "egotestical", "egotistical", - "egotisitcal", "egotistical", - "egotisticle", "egotistical", - "egotystical", "egotistical", - "ehtnicities", "ethnicities", - "ejacluation", "ejaculation", - "ejacualtion", "ejaculation", - "electoratul", "electoral", - "electornics", "electronics", - "electricain", "electrician", - "electricial", "electrical", - "electricien", "electrician", - "electricion", "electrician", - "electricman", "electrician", - "electrisity", "electricity", - "electritian", "electrician", - "electrocity", "electricity", - "electrolyes", "electrolytes", - "electrolyts", "electrolytes", - "electroncis", "electrons", - "electroylte", "electrolytes", - "elementrary", "elementary", - "eleminating", "eliminating", - "elimanation", "elimination", - "eliminacion", "elimination", - "elimintates", "eliminates", - "ellipitcals", "elliptical", - "eloquentely", "eloquently", - "emabrassing", "embarassing", - "embaraasing", "embarassing", - "embarasaing", "embarassing", - "embarassign", "embarassing", - "embarassimg", "embarassing", - "embarassing", "embarrassing", - "embarissing", "embarassing", - "embarrasing", "embarrassing", - "embarressed", "embarrassed", - "embarrssing", "embarassing", - "emergancies", "emergencies", - "emergencias", "emergencies", - "emergenices", "emergencies", - "emmediately", "immediately", - "emmisarries", "emissaries", - "emotionella", "emotionally", - "empahsizing", "emphasizing", - "empathethic", "empathetic", - "emphacizing", "emphasizing", - "emphatising", "emphasizing", - "emphatizing", "emphasizing", - "emphazising", "emphasizing", - "emphesizing", "emphasizing", - "empiracally", "empirically", - "empirialism", "imperialism", - "empirialist", "imperialist", - "enchamtment", "enchantment", - "enchancment", "enchantment", - "enchanement", "enchantment", - "enchanthing", "enchanting", - "enchantmant", "enchantment", - "enchantmens", "enchantments", - "enchantmets", "enchantments", - "encomapsses", "encompasses", - "encompasess", "encompasses", - "encompesses", "encompasses", - "encounteres", "encounters", - "encoutnered", "encountered", - "encryptiion", "encryption", - "encyclopdia", "encyclopedia", - "encylopedia", "encyclopedia", - "endagnering", "endangering", - "endandering", "endangering", - "endorcement", "endorsement", - "endoresment", "endorsement", - "engagaments", "engagements", - "engeneering", "engineering", - "enginerring", "engineering", - "enginnering", "engineering", - "enlargments", "enlargements", - "enligthened", "enlightened", - "enourmously", "enormously", - "enterpirses", "enterprises", - "enterprices", "enterprises", - "enterprishe", "enterprises", - "entertainig", "entertaining", - "entertwined", "entertained", - "enthicities", "ethnicities", - "enthisiasts", "enthusiasts", - "enthuasists", "enthusiasts", - "enthuisasts", "enthusiasts", - "enthusaists", "enthusiasts", - "enthusiants", "enthusiast", - "enthusiasic", "enthusiastic", - "enthusiasim", "enthusiasm", - "enthusiasum", "enthusiasm", - "enthusiatic", "enthusiastic", - "enthusiests", "enthusiasts", - "enthusigasm", "enthusiasm", - "enthusisast", "enthusiasts", - "entrepeneur", "entrepreneur", - "entreperure", "entrepreneur", - "entrepeuner", "entrepreneur", - "entreprener", "entrepreneurs", - "entreprenur", "entrepreneur", - "entretained", "entertained", - "envinroment", "environments", - "enviorments", "environments", - "enviornment", "environment", - "envirnoment", "environment", - "enviroments", "environments", - "enviromnent", "environments", - "environemnt", "environment", - "environmnet", "environments", - "envrionment", "environment", - "equilavents", "equivalents", - "equilbirium", "equilibrium", - "equilevants", "equivalents", - "equilibirum", "equilibrium", - "equilibriam", "equilibrium", - "equilibruim", "equilibrium", - "equivalance", "equivalence", - "equivalants", "equivalents", - "equivalenet", "equivalents", - "equivallent", "equivalent", - "equivelance", "equivalence", - "equivelants", "equivalents", - "equivelents", "equivalents", - "equivilants", "equivalents", - "equivilence", "equivalence", - "equivilents", "equivalents", - "equivlalent", "equivalent", - "equivlanets", "equivalents", - "equivolence", "equivalence", - "equivolents", "equivalents", - "essencially", "essentially", - "essentailly", "essentially", - "essentialls", "essentials", - "essentually", "essentially", - "establising", "establishing", - "ethicallity", "ethically", - "ethincities", "ethnicities", - "ethniticies", "ethnicities", - "europeaners", "europeans", - "europeaness", "europeans", - "evaluatiing", "evaluating", - "evaluationg", "evaluating", - "evangalical", "evangelical", - "evangelikal", "evangelical", - "evengalical", "evangelical", - "evenhtually", "eventually", - "everyonehas", "everyones", - "everyonelse", "everyones", - "evidentally", "evidently", - "exacarbated", "exacerbated", - "exacberated", "exacerbated", - "exagerating", "exaggerating", - "exagerrated", "exaggerated", - "exagerrates", "exaggerates", - "exaggarated", "exaggerated", - "exaggareted", "exaggerate", - "exaggeratin", "exaggeration", - "exaggerrate", "exaggerate", - "exaggurated", "exaggerated", - "exarcebated", "exacerbated", - "excalmation", "exclamation", - "excepcional", "exceptional", - "exceptionel", "exceptional", - "excessivley", "excessively", - "exceutioner", "executioner", - "exchanching", "exchanging", - "exclamacion", "exclamation", - "exclamating", "exclamation", - "exclamativo", "exclamation", - "exclemation", "exclamation", - "exclimation", "exclamation", - "exclucivity", "exclusivity", - "exclusivety", "exclusivity", - "exclusivily", "exclusivity", - "exclusivley", "exclusively", - "excpetional", "exceptional", - "exculsively", "exclusively", - "exculsivity", "exclusivity", - "execitioner", "executioner", - "execptional", "exceptional", - "exectuables", "executable", - "exectuioner", "executioner", - "executionar", "executioner", - "executionor", "executioner", - "exerciseing", "exercising", - "exeuctioner", "executioner", - "existantial", "existential", - "existencial", "existential", - "existensial", "existential", - "existentiel", "existential", - "exlcamation", "exclamation", - "exlcusively", "exclusively", - "exlcusivity", "exclusivity", - "exoskelaton", "exoskeleton", - "expansiones", "expansions", - "expectantcy", "expectancy", - "expectating", "expectation", - "expectional", "exceptional", - "expendature", "expenditure", - "expendeture", "expenditure", - "expentiture", "expenditure", - "expereinced", "experienced", - "expereinces", "experiences", - "experements", "experiments", - "experianced", "experienced", - "experiances", "experiences", - "experiemnts", "experiments", - "experiening", "experiencing", - "experimetal", "experimental", - "experimeted", "experimented", - "experssions", "expressions", - "expiditions", "expeditions", - "expierenced", "experienced", - "expierences", "experiences", - "expirements", "experiments", - "explainging", "explaining", - "explaintory", "explanatory", - "explanaiton", "explanations", - "explanetary", "explanatory", - "explanetory", "explanatory", - "explanitary", "explanatory", - "explanotory", "explanatory", - "explenation", "explanation", - "explenatory", "explanatory", - "explicitely", "explicitly", - "explicitily", "explicitly", - "explination", "explanation", - "explinatory", "explanatory", - "exploitaion", "exploitation", - "exploitatie", "exploitative", - "explonation", "exploration", - "exploracion", "exploration", - "explorating", "exploration", - "explorerers", "explorers", - "explosiones", "explosions", - "explotacion", "exploration", - "expodential", "exponential", - "exponantial", "exponential", - "exponencial", "exponential", - "exponentiel", "exponential", - "expresscoin", "expression", - "expressivos", "expressions", - "expresssive", "expressive", - "expressview", "expressive", - "exprimental", "experimental", - "expropiated", "expropriated", - "extensiones", "extensions", - "extensivley", "extensively", - "extragavant", "extravagant", - "extrapalate", "extrapolate", - "extraploate", "extrapolate", - "extrapolant", "extrapolate", - "extrapolare", "extrapolate", - "extrapolite", "extrapolate", - "extrapulate", "extrapolate", - "extravagent", "extravagant", - "extravagina", "extravagant", - "extravegant", "extravagant", - "extravigant", "extravagant", - "extravogant", "extravagant", - "extremistas", "extremists", - "extremistes", "extremists", - "extropolate", "extrapolate", - "fabircation", "fabrication", - "fabricacion", "fabrication", - "fabrikation", "fabrication", - "facilitarte", "facilitate", - "facilitiate", "facilitate", - "facillitate", "facilitate", - "facisnation", "fascination", - "facsination", "fascination", - "factuallity", "factually", - "familairity", "familiarity", - "familairize", "familiarize", - "familiaries", "familiarize", - "familierize", "familiarize", - "fanatsizing", "fantasizing", - "fanficitons", "fanfiction", - "fantacising", "fantasizing", - "fantacizing", "fantasizing", - "fantasazing", "fantasizing", - "fantasiaing", "fantasizing", - "fantasyzing", "fantasizing", - "fantazising", "fantasizing", - "fascinacion", "fascination", - "fascinatinf", "fascination", - "fascisation", "fascination", - "fascization", "fascination", - "fashionalbe", "fashionable", - "fashoinable", "fashionable", - "fatalitites", "fatalities", - "favoritisme", "favorites", - "favoutrable", "favourable", - "felxibility", "flexibility", - "feministers", "feminists", - "feministisk", "feminists", - "fermentaion", "fermentation", - "fermenterad", "fermented", - "fertilizier", "fertilizer", - "fertizilers", "fertilizer", - "festivalens", "festivals", - "fignernails", "fingernails", - "fignerprint", "fingerprint", - "figurativly", "figuratively", - "finanically", "financially", - "finantially", "financially", - "fingerpints", "fingertips", - "fingerpoint", "fingerprint", - "fingertrips", "fingertips", - "firefighers", "firefighters", - "firefigther", "firefighters", - "firendzoned", "friendzoned", - "firghtening", "frightening", - "flatterende", "flattered", - "flawlessely", "flawlessly", - "flawlessley", "flawlessly", - "flexibiltiy", "flexibility", - "flourescent", "fluorescent", - "fluctuaties", "fluctuate", - "fluctuative", "fluctuate", - "flutteryshy", "fluttershy", - "forcefullly", "forcefully", - "foreseaable", "foreseeable", - "foresseable", "foreseeable", - "forgettting", "forgetting", - "forgiviness", "forgiveness", - "formallized", "formalized", - "formattting", "formatting", - "formidabble", "formidable", - "formidabelt", "formidable", - "formidabile", "formidable", - "fortitudine", "fortitude", - "fortuantely", "fortunately", - "fortunantly", "fortunately", - "fortunatley", "fortunately", - "fortunetely", "fortunately", - "franchieses", "franchises", - "frankensite", "frankenstein", - "frankensten", "frankenstein", - "fransiscans", "franciscans", - "freindships", "friendships", - "freindzoned", "friendzoned", - "frequenices", "frequencies", - "frequensies", "frequencies", - "frequenties", "frequencies", - "frequentily", "frequently", - "frequenzies", "frequencies", - "friendboned", "friendzoned", - "friendlines", "friendlies", - "friendzonie", "friendzoned", - "frientships", "friendships", - "frientzoned", "friendzoned", - "frightenend", "frightened", - "frightining", "frightening", - "frigthening", "frightening", - "frinedzoned", "friendzoned", - "frontlinies", "frontline", - "frontlinjen", "frontline", - "frustartion", "frustrations", - "frustracion", "frustration", - "frustraited", "frustrated", - "frustrantes", "frustrates", - "frustrasion", "frustrations", - "frustrasted", "frustrates", - "frustraties", "frustrates", - "fucntioning", "functioning", - "fulfillling", "fulfilling", - "fulfullment", "fulfillment", - "fullfilment", "fulfillment", - "fullscreeen", "fullscreen", - "funcitoning", "functioning", - "functionaly", "functionally", - "functionnal", "functional", - "fundamentas", "fundamentals", - "fundamently", "fundamental", - "fundametals", "fundamentals", - "fundamnetal", "fundamentals", - "fundemantal", "fundamental", - "fundemental", "fundamental", - "fundimental", "fundamental", - "furhtermore", "furthermore", - "furstration", "frustration", - "furthremore", "furthermore", - "furthurmore", "furthermore", - "futurisitic", "futuristic", - "gangsterest", "gangsters", - "gangsterous", "gangsters", - "gauntlettes", "gauntlets", - "geneologies", "genealogies", - "generalizng", "generalizing", - "generatting", "generating", - "genitaliban", "genitalia", - "gentlemanne", "gentlemen", - "girlfirends", "girlfriends", - "girlfreinds", "girlfriends", - "girlfrients", "girlfriends", - "glorifierad", "glorified", - "glorifindel", "glorified", - "goosebumbps", "goosebumps", - "govenrments", "governments", - "govermental", "governmental", - "governemnts", "governments", - "governmanet", "governmental", - "governmeant", "governmental", - "govormental", "governmental", - "gracefullly", "gracefully", - "grahpically", "graphically", - "grammarical", "grammatical", - "grammaticly", "grammatical", - "grammitical", "grammatical", - "graphcially", "graphically", - "grassrooots", "grassroots", - "gratuitious", "gratuitous", - "gratuituous", "gratuitous", - "gravitatiei", "gravitate", - "grilfriends", "girlfriends", - "grpahically", "graphically", - "guaranteeds", "guarantees", - "guerrillera", "guerrilla", - "gunslingner", "gunslinger", - "hamburgaren", "hamburger", - "hamburgeres", "hamburgers", - "hamburglers", "hamburgers", - "hamburguers", "hamburgers", - "handlebards", "handlebars", - "handrwiting", "handwriting", - "handycapped", "handicapped", - "hanidcapped", "handicapped", - "harassement", "harassment", - "harrasments", "harassments", - "harrassment", "harassment", - "harvestgain", "harvesting", - "headquartes", "headquarters", - "headquaters", "headquarters", - "hearhtstone", "hearthstone", - "heartborken", "heartbroken", - "heartbraker", "heartbreak", - "heartbrakes", "heartbreak", - "heartsthone", "hearthstone", - "heaviweight", "heavyweight", - "heavyweigth", "heavyweight", - "heavywieght", "heavyweight", - "helicoptors", "helicopters", - "helicotpers", "helicopters", - "helicpoters", "helicopters", - "helictopers", "helicopters", - "helikopters", "helicopters", - "hemipsheres", "hemisphere", - "hemishperes", "hemisphere", - "herathstone", "hearthstone", - "heterosexal", "heterosexual", - "hexidecimal", "hexadecimal", - "hierachical", "hierarchical", - "hierarcical", "hierarchical", - "highlighing", "highlighting", - "highschoool", "highschool", - "hipopotamus", "hippopotamus", - "historicaly", "historically", - "historicans", "historians", - "historietas", "histories", - "historinhas", "historians", - "homecomeing", "homecoming", - "homecomming", "homecoming", - "homelesness", "homelessness", - "homelessess", "homelessness", - "homeowneris", "homeowners", - "homoegenous", "homogeneous", - "homogeneize", "homogenize", - "homogenious", "homogeneous", - "homogenuous", "homogeneous", - "homophoboes", "homophobe", - "homosexuais", "homosexuals", - "homosexuels", "homosexuals", - "hopelessely", "hopelessly", - "hopelessley", "hopelessly", - "hopsitality", "hospitality", - "horizonatal", "horizontal", - "horizontaal", "horizontal", - "horizontaly", "horizontally", - "horrendeous", "horrendous", - "horrendious", "horrendous", - "horrenduous", "horrendous", - "hospitalzed", "hospitalized", - "hospotality", "hospitality", - "househoulds", "households", - "humanitarna", "humanitarian", - "humanitites", "humanities", - "humilitaing", "humiliating", - "humilitaion", "humiliation", - "humillating", "humiliating", - "humillation", "humiliation", - "hurricaines", "hurricanes", - "hurricances", "hurricanes", - "hurricanger", "hurricane", - "hyperbollic", "hyperbolic", - "hyperbrophy", "hypertrophy", - "hyperthropy", "hypertrophy", - "hypertorphy", "hypertrophy", - "hypertrohpy", "hypertrophy", - "hypocritcal", "hypocritical", - "hypocritial", "hypocritical", - "hypocrities", "hypocrite", - "hypothesees", "hypotheses", - "hypothesies", "hypothesis", - "hystericaly", "hysterically", - "hystericlly", "hysterically", - "iconclastic", "iconoclastic", - "idealisitic", "idealistic", - "identifible", "identifiable", - "identitites", "identities", - "identitties", "identities", - "ideologiers", "ideologies", - "ideologisen", "ideologies", - "ideologiset", "ideologies", - "ideologiske", "ideologies", - "illegallity", "illegally", - "illegitamte", "illegitimate", - "illegitmate", "illegitimate", - "illsutrator", "illustrator", - "illuminanti", "illuminati", - "illuminarti", "illuminati", - "illuminatti", "illuminati", - "illuminauti", "illuminati", - "illuminiati", "illuminati", - "illuminista", "illuminati", - "illumintati", "illuminati", - "illustarted", "illustrated", - "illustartor", "illustrator", - "illustraded", "illustrated", - "illustraion", "illustration", - "illustrater", "illustrator", - "illustratie", "illustrate", - "illustratin", "illustrations", - "illustraton", "illustration", - "imaganative", "imaginative", - "imaganitive", "imaginative", - "imaginacion", "imagination", - "imaginatiei", "imaginative", - "imaginating", "imagination", - "imaginativo", "imagination", - "imaginitave", "imaginative", - "imbalanaced", "imbalanced", - "imbalanaces", "imbalances", - "imbalancers", "imbalances", - "immatureity", "immaturity", - "immedeately", "immediately", - "immediantly", "immediately", - "immediatley", "immediately", - "immedietely", "immediately", - "immideately", "immediately", - "immidiately", "immediately", - "immigraiton", "immigration", - "immigrantes", "immigrants", - "immoratlity", "immortality", - "immortailty", "immortality", - "immortalisy", "immortals", - "impeccabile", "impeccable", - "imperailist", "imperialist", - "imperealist", "imperialist", - "imperialims", "imperialism", - "imperialsim", "imperialism", - "imperiarist", "imperialist", - "imperically", "empirically", - "imperislist", "imperialist", - "implausable", "implausible", - "implausbile", "implausible", - "implementas", "implements", - "implementes", "implements", - "implementig", "implementing", - "implementos", "implements", - "implicacion", "implication", - "implicatons", "implications", - "implicitely", "implicitly", - "implicitily", "implicitly", - "implikation", "implication", - "implimented", "implemented", - "importantce", "importance", - "importently", "importantly", - "imporvement", "improvement", - "impossibile", "impossible", - "impossibily", "impossibly", - "impossibley", "impossibly", - "impossiblly", "impossibly", - "impoverised", "impoverished", - "impracticle", "impractical", - "impressario", "impresario", - "impresssion", "impressions", - "imprisonent", "imprisonment", - "imprisonned", "imprisoned", - "improbabile", "improbable", - "improtantly", "importantly", - "improvemnts", "improvements", - "improvished", "improvised", - "improvision", "improvisation", - "improvments", "improvements", - "impulsivley", "impulsive", - "imrpovement", "improvement", - "inaccessble", "inaccessible", - "inaccuraces", "inaccuracies", - "inaccurrate", "inaccurate", - "inadvertant", "inadvertent", - "inaguration", "inauguration", - "inahbitants", "inhabitants", - "incarantion", "incarnation", - "incarcerato", "incarceration", - "incarnacion", "incarnation", - "incentivare", "incentive", - "incentivate", "incentive", - "incentivice", "incentive", - "incentivies", "incentives", - "incidencies", "incidence", - "incidentaly", "incidentally", - "incidential", "incidental", - "inclanation", "inclination", - "inclenation", "inclination", - "inclinacion", "inclination", - "inclinaison", "inclination", - "incognition", "incognito", - "incoherrent", "incoherent", - "incompatble", "incompatible", - "incompatent", "incompetent", - "incompetant", "incompetent", - "incompitent", "incompetent", - "incompotent", "incompetent", - "incomptable", "incompatible", - "inconsisent", "inconsistent", - "inconveniet", "inconvenient", - "incoroprate", "incorporate", - "incorparate", "incorporate", - "incorperate", "incorporate", - "incorporare", "incorporate", - "incorported", "incorporated", - "incorprates", "incorporates", - "incorproate", "incorporated", - "incramental", "incremental", - "increadible", "incredible", - "incrediable", "incredible", - "incrediably", "incredibly", - "incredibile", "incredible", - "incredibily", "incredibly", - "incredibley", "incredibly", - "incrememnts", "increments", - "incremenets", "increments", - "incrementas", "increments", - "incremently", "incremental", - "incrementos", "increments", - "incrimental", "incremental", - "inctroduced", "introduced", - "indefinetly", "indefinitely", - "indefininte", "indefinite", - "indefinitly", "indefinitely", - "indepdenent", "independents", - "indepedence", "independence", - "indepednent", "independents", - "independant", "independent", - "independece", "independence", - "independens", "independents", - "independetn", "independents", - "independets", "independents", - "independnet", "independents", - "indepentend", "independents", - "indepentent", "independent", - "indianapols", "indianapolis", - "indicateurs", "indicates", - "indicatiors", "indicators", - "indictement", "indictment", - "indifferant", "indifferent", - "indiffernce", "indifference", - "indigeneous", "indigenous", - "indigenious", "indigenous", - "indigenuous", "indigenous", - "indigineous", "indigenous", - "indipendent", "independent", - "indirectely", "indirectly", - "individiual", "individual", - "individuais", "individuals", - "individualy", "individually", - "individuati", "individuality", - "individuels", "individuals", - "indivuduals", "individuals", - "industriels", "industries", - "ineffecitve", "ineffective", - "ineffektive", "ineffective", - "inefficeint", "inefficient", - "inefficiant", "inefficient", - "ineffictive", "ineffective", - "ineffizient", "inefficient", - "inequallity", "inequality", - "inevitabile", "inevitable", - "inevitabily", "inevitably", - "inevitabley", "inevitably", - "inevitablly", "inevitably", - "inexpencive", "inexpensive", - "inexpenisve", "inexpensive", - "inexperiece", "inexperience", - "inexperince", "inexperience", - "inexplicaby", "inexplicably", - "infallibale", "infallible", - "infallibile", "infallible", - "infectation", "infestation", - "inferioirty", "inferiority", - "infestating", "infestation", - "infilitrate", "infiltrate", - "infiltartor", "infiltrator", - "infiltraron", "infiltrator", - "infiltrarte", "infiltrate", - "infiltrater", "infiltrator", - "infiltratie", "infiltrate", - "infiltrerat", "infiltrate", - "infinitelly", "infinitely", - "infintrator", "infiltrator", - "inflamation", "inflammation", - "inflatabale", "inflatable", - "inflitrator", "infiltrator", - "influancing", "influencing", - "influencial", "influential", - "influencian", "influencing", - "influenting", "influencing", - "influentual", "influential", - "influincing", "influencing", - "infograhpic", "infographic", - "infograpgic", "infographic", - "infogrpahic", "infographic", - "informacion", "information", - "informatice", "informative", - "informatief", "informative", - "informatiei", "informative", - "informatike", "informative", - "informativo", "information", - "informitive", "informative", - "infrigement", "infringement", - "infringeing", "infringing", - "infromation", "information", - "infromative", "informative", - "infulential", "influential", - "ingerdients", "ingredients", - "ingrediants", "ingredients", - "ingreidents", "ingredient", - "ingriedents", "ingredient", - "inhabitents", "inhabitants", - "inheirtance", "inheritance", - "inheratance", "inheritance", - "inheretance", "inheritance", - "inheritence", "inheritance", - "inhertiance", "inheritance", - "initaitives", "initiatives", - "initalisers", "initialisers", - "initalising", "initialising", - "initalizers", "initializers", - "initalizing", "initializing", - "initiaitive", "initiative", - "inititiaves", "initiatives", - "innocenters", "innocents", - "innocentius", "innocents", - "innoculated", "inoculated", - "inpsiration", "inspiration", - "inquisicion", "inquisition", - "inquisistor", "inquisitor", - "inquisiting", "inquisition", - "inquisitior", "inquisitor", - "inquisitivo", "inquisition", - "inquizition", "inquisition", - "insecurites", "insecurities", - "insensative", "insensitive", - "insensetive", "insensitive", - "insentitive", "insensitive", - "insepctions", "inspections", - "inseperable", "inseparable", - "insipration", "inspiration", - "insitutions", "institutions", - "insparation", "inspiration", - "inspecticon", "inspection", - "inspectoras", "inspectors", - "insperation", "inspiration", - "inspiracion", "inspiration", - "inspirating", "inspiration", - "inspriation", "inspiration", - "instalation", "installation", - "instalement", "installment", - "installatin", "installations", - "installeert", "installer", - "installemnt", "installment", - "installling", "installing", - "installmant", "installment", - "instanciate", "instantiate", - "instantaneu", "instantaneous", - "institucion", "institution", - "institutiei", "institute", - "instituttet", "institute", - "instraments", "instruments", - "instruccion", "instruction", - "instruciton", "instruction", - "instructers", "instructors", - "instructior", "instructor", - "instructios", "instructors", - "instructivo", "instruction", - "instructons", "instructors", - "instruktion", "instruction", - "instrumenal", "instrumental", - "instrumetal", "instrumental", - "insturction", "instruction", - "insturctors", "instructors", - "insturments", "instruments", - "instutition", "institution", - "instutution", "institution", - "insufficent", "insufficient", - "insuinating", "insinuating", - "insuniating", "insinuating", - "insurgencey", "insurgency", - "intangiable", "intangible", - "intangibile", "intangible", - "inteferring", "interfering", - "integracion", "integration", - "integratron", "integration", - "integrering", "interfering", - "intelectual", "intellectual", - "inteligence", "intelligence", - "intellectul", "intellectuals", - "intellectus", "intellectuals", - "intellecual", "intellectual", - "intellegent", "intelligent", - "intelligant", "intelligent", - "intencional", "intentional", - "intentionly", "intentional", - "interaccion", "interaction", - "interactice", "interactive", - "interacties", "interacts", - "interactifs", "interacts", - "interactins", "interacts", - "interactios", "interacts", - "interactivo", "interaction", - "interactons", "interacts", - "interaktion", "interaction", - "interaktive", "interactive", - "interasting", "interacting", - "intercation", "integration", - "interceptin", "interception", - "intercoarse", "intercourse", - "intercource", "intercourse", - "interecting", "interacting", - "interection", "interaction", - "interelated", "interrelated", - "interersted", "interpreted", - "interesring", "interfering", - "interessted", "interested", - "interferece", "interference", - "interferens", "interferes", - "interferire", "interfere", - "interfernce", "interference", - "interferred", "interfere", - "interferres", "interferes", - "intergation", "integration", - "intergrated", "integrated", - "intermedate", "intermediate", - "intermedite", "intermediate", - "intermitent", "intermittent", - "internation", "international", - "interneters", "internets", - "internetese", "internets", - "internetest", "internets", - "interneting", "interesting", - "internetors", "internets", - "internettes", "internets", - "interperted", "interpreted", - "interperter", "interpreter", - "interprered", "interpreter", - "interpretor", "interpreter", - "interratial", "interracial", - "interresing", "interfering", - "interrogato", "interrogation", - "interrputed", "interrupted", - "interruping", "interrupting", - "interruptes", "interrupts", - "interruptis", "interrupts", - "intersecton", "intersection", - "interstelar", "interstellar", - "intertained", "intertwined", - "intertvined", "intertwined", - "intertwyned", "intertwined", - "intervalles", "intervals", - "intervation", "integration", - "interveiwed", "interviewed", - "interveiwer", "interviewer", - "intervenion", "intervening", - "intervenire", "intervene", - "interventie", "intervene", - "intervewing", "intervening", - "interviened", "interviewed", - "interviewes", "interviews", - "interviewie", "interviewer", - "intervining", "intervening", - "interwebers", "interwebs", - "interwiever", "interviewer", - "intestinces", "intestines", - "inticracies", "intricacies", - "intimadated", "intimidated", - "intimidades", "intimidated", - "intimidante", "intimidate", - "intimidatie", "intimidated", - "intimidatin", "intimidation", - "intimidaton", "intimidation", - "intimidiate", "intimidate", - "intiminated", "intimidated", - "intimitaded", "intimidated", - "intimitated", "intimidated", - "intiutively", "intuitively", - "intoleranse", "intolerance", - "intolerante", "intolerance", - "intolerence", "intolerance", - "intolernace", "intolerance", - "intolorance", "intolerance", - "intolorence", "intolerance", - "intorducing", "introducing", - "intorverted", "introverted", - "intoxicatin", "intoxication", - "intoxicaton", "intoxication", - "intoxinated", "intoxicated", - "intoxocated", "intoxicated", - "intracacies", "intricacies", - "intracicies", "intricacies", - "intraverted", "introverted", - "intrecacies", "intricacies", - "intrepreted", "interpreted", - "intrepreter", "interpreter", - "intrerupted", "interrupted", - "intricasies", "intricacies", - "intricicies", "intricacies", - "intrigueing", "intriguing", - "intrinsisch", "intrinsic", - "introducion", "introduction", - "introducted", "introduced", - "introductie", "introduce", - "introvertie", "introverted", - "introvertis", "introverts", - "intruducing", "introducing", - "intrumental", "instrumental", - "intuatively", "intuitively", - "intuitevely", "intuitively", - "intuitivley", "intuitively", - "intuituvely", "intuitively", - "inutitively", "intuitively", - "invaldiates", "invalidates", - "invalidades", "invalidates", - "invalidante", "invalidate", - "invariabley", "invariably", - "invariablly", "invariably", - "inventiones", "inventions", - "invesitgate", "investigate", - "investagate", "investigate", - "investiagte", "investigate", - "investigare", "investigate", - "invincibile", "invincible", - "invincinble", "invincible", - "invisibiity", "invisibility", - "invisibiliy", "invisibility", - "involantary", "involuntary", - "involentary", "involuntary", - "involintary", "involuntary", - "involontary", "involuntary", - "involunatry", "involuntary", - "invulnerabe", "invulnerable", - "invulnerble", "invulnerable", - "iresistable", "irresistible", - "iresistably", "irresistibly", - "iresistible", "irresistible", - "iresistibly", "irresistibly", - "irrationaly", "irrationally", - "irrationnal", "irrational", - "islamisists", "islamists", - "islamisters", "islamists", - "islamistisk", "islamists", - "isntruments", "instruments", - "jacksonvile", "jacksonville", - "jailbroaken", "jailbroken", - "jailbrocken", "jailbroken", - "jounralists", "journalists", - "jouranlists", "journalists", - "journalisim", "journalism", - "journalistc", "journalistic", - "journolists", "journalists", - "judegmental", "judgemental", - "judgamental", "judgemental", - "judgementle", "judgemental", - "judgementsl", "judgemental", - "judgenental", "judgemental", - "jugdemental", "judgemental", - "juggernaugt", "juggernaut", - "juggernault", "juggernaut", - "juggernaunt", "juggernaut", - "justifyable", "justifiable", - "kidnappning", "kidnapping", - "kidnappping", "kidnapping", - "kilometeres", "kilometers", - "kindergaten", "kindergarten", - "knowledgabe", "knowledgable", - "knowledgble", "knowledgable", - "kryptoninte", "kryptonite", - "lacklusture", "lackluster", - "laughablely", "laughably", - "legalizaing", "legalizing", - "legalizaton", "legalization", - "legalizeing", "legalizing", - "legenadries", "legendaries", - "legendaires", "legendaries", - "legendarios", "legendaries", - "legendarisk", "legendaries", - "legendaryes", "legendaries", - "legenderies", "legendaries", - "legilsation", "legislation", - "legislacion", "legislation", - "legislativo", "legislation", - "legistation", "legislation", - "legistative", "legislative", - "legistators", "legislators", - "legitematly", "legitimately", - "legitimancy", "legitimacy", - "legitimatcy", "legitimacy", - "legitimatly", "legitimately", - "legitimetly", "legitimately", - "legnedaries", "legendaries", - "lengedaries", "legendaries", - "liberalisim", "liberalism", - "liberatrian", "libertarians", - "libertairan", "libertarians", - "libertarain", "libertarian", - "libertarias", "libertarians", - "libertarien", "libertarian", - "libertaryan", "libertarian", - "libertatian", "libertarian", - "liberterian", "libertarian", - "libguistics", "linguistics", - "libretarian", "libertarian", - "lieutennant", "lieutenant", - "lieutentant", "lieutenant", - "lightenning", "lightening", - "lightenting", "lightening", - "lightheared", "lighthearted", - "lightheated", "lighthearted", - "lightweigth", "lightweight", - "lightwieght", "lightweight", - "lightwright", "lightweight", - "ligthweight", "lightweight", - "limitaitons", "limitation", - "linguisitcs", "linguistics", - "linguisitic", "linguistic", - "lingusitics", "linguistics", - "lithuaninan", "lithuania", - "littlefiger", "littlefinger", - "littlefiner", "littlefinger", - "lockscreeen", "lockscreen", - "longevitity", "longevity", - "lotharingen", "lothringen", - "louisvillle", "louisville", - "maginficent", "magnificent", - "magneficent", "magnificent", - "magnicifent", "magnificent", - "magnifacent", "magnificent", - "magnifecent", "magnificent", - "magnificant", "magnificent", - "magnitudine", "magnitude", - "maintainted", "maintained", - "maintanance", "maintenance", - "maintanence", "maintenance", - "maintenence", "maintenance", - "maintianing", "maintaining", - "maintinaing", "maintaining", - "maintinance", "maintenance", - "maintinence", "maintenance", - "malfonction", "malfunction", - "malfucntion", "malfunction", - "malfunciton", "malfunction", - "malfuncting", "malfunction", - "malfunktion", "malfunction", - "malpractise", "malpractice", - "malpractive", "malpractice", - "maneouvring", "manoeuvring", - "manifestado", "manifesto", - "manifestano", "manifesto", - "manifestato", "manifesto", - "manifestion", "manifesto", - "manifestior", "manifesto", - "manifestons", "manifests", - "manifestors", "manifests", - "manipluated", "manipulated", - "manipualted", "manipulated", - "manipulatie", "manipulative", - "manipulatin", "manipulation", - "manipulaton", "manipulation", - "maniuplated", "manipulated", - "mannerisims", "mannerisms", - "manslaugher", "manslaughter", - "manslaugter", "manslaughter", - "manufacters", "manufactures", - "manufacteur", "manufactures", - "manufactued", "manufactured", - "manufactuer", "manufacture", - "manufacturs", "manufactures", - "manufacuter", "manufacture", - "manufacutre", "manufactures", - "manufatured", "manufactured", - "manupilated", "manipulated", - "marganilize", "marginalized", - "marhsmallow", "marshmallow", - "marijuannas", "marijuana", - "markerplace", "marketplace", - "marketpalce", "marketplace", - "marshamllow", "marshmallow", - "marshmalows", "marshmallows", - "masculanity", "masculinity", - "masculenity", "masculinity", - "masrhmallow", "marshmallow", - "mastermined", "mastermind", - "masterpeace", "masterpiece", - "masterpeice", "masterpiece", - "mastrubated", "masturbated", - "mastrubates", "masturbate", - "masturabted", "masturbated", - "masturbaing", "masturbating", - "masturbarte", "masturbate", - "masturbathe", "masturbated", - "masturbatie", "masturbated", - "masturbatin", "masturbation", - "masturbaton", "masturbation", - "masturbsted", "masturbated", - "masturpiece", "masterpiece", - "masuclinity", "masculinity", - "matchamking", "matchmaking", - "materalists", "materialist", - "materialsim", "materialism", - "mathamatics", "mathematics", - "mathcmaking", "matchmaking", - "mathemagics", "mathematics", - "mathemetics", "mathematics", - "mathimatics", "mathematics", - "matieralism", "materialism", - "maybelleine", "maybelline", - "maybelliene", "maybelline", - "maybellinne", "maybelline", - "maybellline", "maybelline", - "mdifielders", "midfielders", - "meatballers", "meatballs", - "mecernaries", "mercenaries", - "mechanicaly", "mechanically", - "mechanichal", "mechanical", - "mechaniclly", "mechanically", - "mechanicsms", "mechanisms", - "mechanisims", "mechanism", - "mechanismus", "mechanisms", - "medicaitons", "medications", - "medicineras", "medicines", - "meditatiing", "meditating", - "meditationg", "meditating", - "mentionning", "mentioning", - "mercanaries", "mercenaries", - "mercaneries", "mercenaries", - "mercenaires", "mercenaries", - "mercenarias", "mercenaries", - "mercenarios", "mercenaries", - "merceneries", "mercenaries", - "merchandice", "merchandise", - "merchandies", "merchandise", - "merchanidse", "merchandise", - "merchanters", "merchants", - "merchendise", "merchandise", - "merchindise", "merchandise", - "mercinaries", "mercenaries", - "mercineries", "mercenaries", - "metabolisim", "metabolism", - "metabolitic", "metabolic", - "metaphisics", "metaphysics", - "metaphorial", "metaphorical", - "metaphorics", "metaphors", - "metaphsyics", "metaphysics", - "metaphyiscs", "metaphysics", - "methodoligy", "methodology", - "metholodogy", "methodology", - "metropolian", "metropolitan", - "metropolies", "metropolis", - "metropollis", "metropolis", - "metropolois", "metropolis", - "micorcenter", "microcenter", - "micorphones", "microphones", - "microcender", "microcenter", - "microcentre", "microcenter", - "microcentro", "microcenter", - "microhpones", "microphones", - "microscrope", "microscope", - "microwavees", "microwaves", - "microwavers", "microwaves", - "midfeilders", "midfielders", - "midfiedlers", "midfielders", - "midfileders", "midfielders", - "midifelders", "midfielders", - "millienaire", "millionaire", - "millionairs", "millionaires", - "millionarie", "millionaire", - "millioniare", "millionaire", - "mindlessely", "mindlessly", - "mindlessley", "mindlessly", - "minimalstic", "minimalist", - "ministerens", "ministers", - "ministerios", "ministers", - "minneaoplis", "minneapolis", - "minneaplois", "minneapolis", - "minniapolis", "minneapolis", - "miraculaous", "miraculous", - "miraculosly", "miraculously", - "miraculousy", "miraculously", - "mircocenter", "microcenter", - "mircophones", "microphones", - "mircoscopic", "microscopic", - "miscairrage", "miscarriage", - "miscarraige", "miscarriage", - "miscarridge", "miscarriage", - "miscarriege", "miscarriage", - "mischeivous", "mischievous", - "mischevious", "mischievous", - "misdameanor", "misdemeanor", - "misdeamenor", "misdemeanor", - "misdemeaner", "misdemeanor", - "misdemenaor", "misdemeanor", - "misdemenors", "misdemeanors", - "misdimeanor", "misdemeanor", - "misdomeanor", "misdemeanor", - "miserablely", "miserably", - "misfortunte", "misfortune", - "misimformed", "misinformed", - "misinterept", "misinterpret", - "misinterpet", "misinterpret", - "misoginysts", "misogynist", - "misognyists", "misogynist", - "misogyinsts", "misogynist", - "misogynisic", "misogynistic", - "misogynistc", "misogynistic", - "misogynstic", "misogynist", - "missionaire", "missionaries", - "missionairy", "missionary", - "missionares", "missionaries", - "missionaris", "missionaries", - "missionarry", "missionary", - "missionnary", "missionary", - "mississipis", "mississippi", - "misspeeling", "misspelling", - "misspellled", "misspelled", - "mistakengly", "mistakenly", - "mistakently", "mistakenly", - "moderatedly", "moderately", - "moderateurs", "moderates", - "moderatorin", "moderation", - "modificaton", "modification", - "moisterizer", "moisturizer", - "moistruizer", "moisturizer", - "moisturizng", "moisturizing", - "moisturizor", "moisturizer", - "moistutizer", "moisturizer", - "moisutrizer", "moisturizer", - "moleculaire", "molecular", - "molestating", "molestation", - "moleststion", "molestation", - "momemtarily", "momentarily", - "momentairly", "momentarily", - "momentaraly", "momentarily", - "momentarely", "momentarily", - "momenterily", "momentarily", - "monestaries", "monasteries", - "monitoreada", "monitored", - "monitoreado", "monitored", - "monogameous", "monogamous", - "monolitihic", "monolithic", - "monopollies", "monopolies", - "monstorsity", "monstrosity", - "monstrasity", "monstrosity", - "monstrisity", "monstrosity", - "monstrocity", "monstrosity", - "monstrosoty", "monstrosity", - "monstrostiy", "monstrosity", - "monumentaal", "monumental", - "monumentais", "monuments", - "monumentals", "monuments", - "monumentous", "monuments", - "mositurizer", "moisturizer", - "mosntrosity", "monstrosity", - "motehrboard", "motherboard", - "mothebroard", "motherboards", - "motherbaord", "motherboard", - "motherboads", "motherboards", - "motherboars", "motherboards", - "motherborad", "motherboard", - "motherbords", "motherboards", - "motherobard", "motherboards", - "mothreboard", "motherboards", - "motivatinal", "motivational", - "motorcicles", "motorcycles", - "motorcylces", "motorcycles", - "mouthpeices", "mouthpiece", - "mulitplayer", "multiplayer", - "mulitplying", "multiplying", - "multipalyer", "multiplayer", - "multiplater", "multiplayer", - "multiplebgs", "multiples", - "multipleies", "multiples", - "multitaskng", "multitasking", - "multitudine", "multitude", - "multiverese", "multiverse", - "multyplayer", "multiplayer", - "multyplying", "multiplying", - "muncipality", "municipality", - "murdererous", "murderers", - "musicallity", "musically", - "mutliplayer", "multiplayer", - "mutliplying", "multiplying", - "mysterieuse", "mysteries", - "mysteriosly", "mysteriously", - "mysteriouly", "mysteriously", - "mysteriousy", "mysteriously", - "napoleonian", "napoleonic", - "narcisissim", "narcissism", - "narcisissts", "narcissist", - "narcisscism", "narcissism", - "narcisscist", "narcissist", - "narcissisim", "narcissism", - "narcississm", "narcissism", - "narcississt", "narcissist", - "narcissistc", "narcissistic", - "narcissitic", "narcissistic", - "narcisssism", "narcissism", - "narcisssist", "narcissist", - "narcissstic", "narcissist", - "natioanlist", "nationalist", - "nationailty", "nationality", - "nationalesl", "nationals", - "nationalisn", "nationals", - "nationalite", "nationalist", - "nationalits", "nationalist", - "nationalizm", "nationalism", - "nationalsim", "nationalism", - "neccesarily", "necessarily", - "necessairly", "necessarily", - "necessaties", "necessities", - "necesseraly", "necessarily", - "necesserily", "necessarily", - "necessiates", "necessities", - "necessitive", "necessities", - "neckbeardos", "neckbeards", - "neckbeardus", "neckbeards", - "necormancer", "necromancer", - "necromamcer", "necromancer", - "necromanser", "necromancer", - "necromencer", "necromancer", - "needlessley", "needlessly", - "negativeity", "negativity", - "negativelly", "negatively", - "negativitiy", "negativity", - "negiotating", "negotiating", - "negligiable", "negligible", - "negociating", "negotiating", - "negociation", "negotiation", - "negoitating", "negotiating", - "negoitation", "negotiation", - "negotiatied", "negotiate", - "negotiative", "negotiate", - "negotiatons", "negotiations", - "neigborhood", "neighborhood", - "neigbouring", "neighbouring", - "neighborhod", "neighborhood", - "neighbourgs", "neighbours", - "neighouring", "neighboring", - "nercomancer", "necromancer", - "nessasarily", "necessarily", - "neurologial", "neurological", - "neurosciene", "neuroscience", - "neutrallity", "neutrality", - "neverthelss", "nevertheless", - "neverthless", "nevertheless", - "newspapaers", "newspapers", - "newspappers", "newspapers", - "nieghboring", "neighboring", - "nightmarket", "nightmare", - "nonsencical", "nonsensical", - "nonsenscial", "nonsensical", - "nonsensicle", "nonsensical", - "normallized", "normalized", - "northwesten", "northwestern", - "nostalgisch", "nostalgic", - "noteworthly", "noteworthy", - "noticeabley", "noticeably", - "notificaton", "notification", - "notoriuosly", "notoriously", - "numericable", "numerical", - "nurtitional", "nutritional", - "nutricional", "nutritional", - "nutrutional", "nutritional", - "obamination", "abomination", - "obersvation", "observation", - "obilterated", "obliterated", - "objectivety", "objectivity", - "objectivify", "objectivity", - "objectivily", "objectivity", - "objectivley", "objectively", - "obliberated", "obliterated", - "obliderated", "obliterated", - "obligerated", "obliterated", - "oblitarated", "obliterated", - "obliteraded", "obliterated", - "obliterared", "obliterated", - "oblitirated", "obliterated", - "oblitorated", "obliterated", - "obliverated", "obliterated", - "observacion", "observation", - "observaiton", "observant", - "observasion", "observations", - "observating", "observation", - "observerats", "observers", - "obsessivley", "obsessive", - "obstruccion", "obstruction", - "obstruktion", "obstruction", - "obsturction", "obstruction", - "obversation", "observation", - "ocasionally", "occasionally", - "ocassionaly", "occasionally", - "occasionals", "occasions", - "occasionaly", "occasionally", - "occasionnal", "occasional", - "occassional", "occasional", - "occassioned", "occasioned", - "occurrances", "occurrences", - "offensivley", "offensively", - "offesnively", "offensively", - "officiallly", "officially", - "olbiterated", "obliterated", - "omniscienct", "omniscient", - "operacional", "operational", - "operasional", "operational", - "operationel", "operational", - "oppresssing", "oppressing", - "oppresssion", "oppression", - "opprotunity", "opportunity", - "optimisitic", "optimistic", - "optimizaton", "optimization", - "orchestraed", "orchestrated", - "orchestrial", "orchestra", - "oreintation", "orientation", - "organisaton", "organisation", - "organiserad", "organised", - "organistion", "organisation", - "organizarea", "organizer", - "organizarem", "organizer", - "organizarme", "organizer", - "organizarte", "organizer", - "organiztion", "organization", - "oridinarily", "ordinarily", - "orientacion", "orientation", - "originially", "originally", - "originnally", "originally", - "origniality", "originality", - "ostensiably", "ostensibly", - "ostensibily", "ostensibly", - "outclasssed", "outclassed", - "outnunbered", "outnumbered", - "outperfroms", "outperform", - "outpreforms", "outperform", - "outrageosly", "outrageously", - "outrageouly", "outrageously", - "outragerous", "outrageous", - "outskirters", "outskirts", - "outsorucing", "outsourcing", - "outsourcade", "outsourced", - "outsoursing", "outsourcing", - "overbraking", "overbearing", - "overcapping", "overlapping", - "overcharing", "overarching", - "overclcoked", "overclocked", - "overclicked", "overclocked", - "overcloaked", "overclocked", - "overclocing", "overclocking", - "overclockig", "overclocking", - "overclocled", "overclocked", - "overcomeing", "overcoming", - "overcomming", "overcoming", - "overeaching", "overarching", - "overfapping", "overlapping", - "overheading", "overheating", - "overhooking", "overlooking", - "overhwelmed", "overwhelmed", - "overkapping", "overlapping", - "overklocked", "overclocked", - "overlapsing", "overlapping", - "overlcocked", "overclocked", - "overlcoking", "overlooking", - "overlooming", "overlooking", - "overloooked", "overlooked", - "overlordess", "overlords", - "overmapping", "overlapping", - "overpooling", "overlooking", - "overpovered", "overpowered", - "overpoweing", "overpowering", - "overreacing", "overreacting", - "overreactin", "overreaction", - "overreacton", "overreaction", - "overshaddow", "overshadowed", - "overshadowd", "overshadowed", - "overtapping", "overlapping", - "overthining", "overthinking", - "overthinkig", "overthinking", - "overvlocked", "overclocked", - "overwealmed", "overwhelmed", - "overwelming", "overwhelming", - "overwhelemd", "overwhelmed", - "overwhelimg", "overwhelm", - "overwheling", "overwhelming", - "overwhemled", "overwhelmed", - "overwhlemed", "overwhelmed", - "overwritted", "overwrite", - "pakistanais", "pakistani", - "pakistanezi", "pakistani", - "palceholder", "placeholder", - "palesitnian", "palestinians", - "palestenian", "palestinian", - "palestinain", "palestinians", - "palestinans", "palestinians", - "palestinier", "palestine", - "palistinian", "palestinian", - "palythrough", "playthrough", - "papanicalou", "papanicolaou", - "parachutage", "parachute", - "paragraphes", "paragraphs", - "paramedicks", "paramedics", - "paramedicos", "paramedics", - "parameteres", "parameters", - "paranthesis", "parenthesis", - "parapharsed", "paraphrase", - "paraprhased", "paraphrase", - "parasitisme", "parasites", - "parenthasis", "parenthesis", - "parenthesys", "parentheses", - "parenthises", "parenthesis", - "parenthisis", "parenthesis", - "parliamenty", "parliamentary", - "parntership", "partnership", - "parrallelly", "parallelly", - "partecipant", "participant", - "partecipate", "participate", - "parternship", "partnership", - "partiarchal", "patriarchal", - "particapate", "participate", - "particiapte", "participate", - "participait", "participant", - "participans", "participants", - "participare", "participate", - "participatd", "participant", - "participati", "participant", - "participats", "participant", - "participent", "participant", - "particpiate", "participated", - "particually", "particularly", - "particulaly", "particularly", - "particulary", "particularly", - "partnetship", "partnership", - "partonizing", "patronizing", - "passionatly", "passionately", - "passionetly", "passionately", - "passionnate", "passionate", - "passporters", "passports", - "pathologial", "pathological", - "patriarchia", "patriarchal", - "patriarcial", "patriarchal", - "patriarical", "patriarchal", - "patriotisch", "patriotic", - "patriotisim", "patriotism", - "patriottism", "patriotism", - "patronozing", "patronizing", - "peacefullly", "peacefully", - "pedestirans", "pedestrians", - "pedestrains", "pedestrians", - "pedophilies", "pedophile", - "pedophilles", "pedophile", - "penetracion", "penetration", - "penetrading", "penetrating", - "penetrarion", "penetration", - "penninsular", "peninsular", - "pennsylvnia", "pennsylvania", - "pepperocini", "pepperoni", - "percantages", "percentages", - "percautions", "precautions", - "percentille", "percentile", - "percpetions", "perceptions", - "percusssion", "percussion", - "perdicament", "predicament", - "perdictable", "predictable", - "perdictions", "predictions", - "perephirals", "peripherals", - "pereptually", "perpetually", - "perferences", "preferences", - "perfomrance", "performances", - "perforamnce", "performances", - "performaces", "performances", - "performacne", "performances", - "performanes", "performances", - "performanse", "performances", - "performence", "performance", - "performnace", "performances", - "perfromance", "performance", - "perhiperals", "peripherals", - "perihperals", "peripherals", - "periodicaly", "periodically", - "periperhals", "peripherals", - "periphereal", "peripheral", - "peripherial", "peripheral", - "periphirals", "peripherals", - "periphreals", "peripherals", - "periphrials", "peripherals", - "perjorative", "pejorative", - "perliminary", "preliminary", - "permamently", "permanently", - "permanantly", "permanently", - "permaturely", "prematurely", - "permenantly", "permanently", - "permenently", "permanently", - "perminantly", "permanently", - "perminently", "permanently", - "permisisons", "permissions", - "permissable", "permissible", - "permisssion", "permissions", - "pernamently", "permanently", - "perosnality", "personality", - "perparation", "preparation", - "perpatrated", "perpetrated", - "perpatrator", "perpetrator", - "perpatuated", "perpetuated", - "perpatuates", "perpetuates", - "perpertated", "perpetuated", - "perpertator", "perpetrators", - "perpetraded", "perpetrated", - "perpetrador", "perpetrator", - "perpetraron", "perpetrator", - "perpetrater", "perpetrator", - "perpetuaded", "perpetuated", - "perpetutate", "perpetuate", - "perpetuties", "perpetuates", - "perpitrated", "perpetrated", - "perpitrator", "perpetrator", - "perpretated", "perpetrated", - "perpretator", "perpetrators", - "perpsective", "perspective", - "perputrator", "perpetrator", - "perputually", "perpetually", - "perputuated", "perpetuated", - "perputuates", "perpetuates", - "perrogative", "prerogative", - "persceptive", "perspectives", - "persectuion", "persecution", - "persecucion", "persecution", - "persecusion", "persecution", - "persecutted", "persecuted", - "persepctive", "perspective", - "persicution", "persecution", - "persistance", "persistence", - "persistante", "persistent", - "persistense", "persistence", - "persistente", "persistence", - "personhoood", "personhood", - "perspecitve", "perspective", - "perspectief", "perspective", - "perspektive", "perspective", - "persuassion", "persuasion", - "persuassive", "persuasive", - "persucution", "persecution", - "persumption", "presumption", - "pertubation", "perturbation", - "pessimestic", "pessimistic", - "pharamcists", "pharmacist", - "phenomenona", "phenomena", - "philadelpha", "philadelphia", - "philadelpia", "philadelphia", - "philiphines", "philippines", - "philippenes", "philippines", - "philippenis", "philippines", - "philippides", "philippines", - "philippinas", "philippines", - "philippinos", "philippines", - "philisopher", "philosopher", - "phillipines", "philippines", - "philosipher", "philosopher", - "philosopers", "philosophers", - "philosophae", "philosopher", - "philosophia", "philosophical", - "philosopies", "philosophies", - "philosphies", "philosophies", - "philospoher", "philosopher", - "photograhed", "photographed", - "photograher", "photographer", - "photograhic", "photographic", - "photograhpy", "photography", - "photograped", "photographed", - "photograper", "photographer", - "photograpgh", "photographs", - "photograpic", "photographic", - "photogrpahs", "photographs", - "photogrpahy", "photography", - "physcedelic", "psychedelic", - "physciatric", "psychiatric", - "physcopaths", "psychopaths", - "piankillers", "painkillers", - "pilgrimmage", "pilgrimage", - "pitchforcks", "pitchforks", - "pitchforkes", "pitchforks", - "plaestinian", "palestinian", - "plagiariasm", "plagiarism", - "planeswaker", "planeswalker", - "planeswaler", "planeswalker", - "planeswalkr", "planeswalker", - "platfromers", "platformer", - "playhtrough", "playthrough", - "playthorugh", "playthrough", - "playthourgh", "playthrough", - "playthroguh", "playthroughs", - "playthrougs", "playthroughs", - "playthrouhg", "playthroughs", - "playthtough", "playthrough", - "playtrhough", "playthrough", - "ploretariat", "proletariat", - "policitally", "politically", - "policitians", "politicians", - "politicains", "politicians", - "politicanti", "politician", - "politiciens", "politicians", - "politiicans", "politician", - "polititians", "politicians", - "polyphonyic", "polyphonic", - "pomegranite", "pomegranate", - "popluations", "populations", - "poportional", "proportional", - "popoulation", "population", - "porjectiles", "projectiles", - "porletariat", "proletariat", - "pornagraphy", "pornography", - "pornograghy", "pornography", - "pornograhpy", "pornography", - "pornograpgy", "pornography", - "pornogrophy", "pornography", - "pornogrpahy", "pornography", - "porportions", "proportions", - "portestants", "protestants", - "portuguease", "portuguese", - "portuguesse", "portuguese", - "positionial", "positional", - "positionnal", "positional", - "positionned", "positioned", - "positiveity", "positivity", - "positiviely", "positively", - "positivisme", "positives", - "positivisty", "positivity", - "positivitey", "positivity", - "positivitiy", "positivity", - "possesseurs", "possesses", - "possesssion", "possessions", - "possestions", "possessions", - "possiblilty", "possibility", - "potencially", "potentially", - "potentailly", "potentially", - "powerhourse", "powerhouse", - "powerlifing", "powerlifting", - "powerliftng", "powerlifting", - "pracitcally", "practically", - "practicarlo", "practical", - "practioners", "practitioners", - "practitions", "practitioners", - "pragmatisch", "pragmatic", - "precausions", "precautions", - "precedessor", "predecessor", - "precendence", "precedence", - "precentages", "percentages", - "preconceved", "preconceived", - "preconcieve", "preconceived", - "precuations", "precautions", - "predacessor", "predecessor", - "predecesser", "predecessor", - "predections", "predictions", - "predescesor", "predecessors", - "predesessor", "predecessors", - "predesposed", "predisposed", - "predessecor", "predecessor", - "predicatble", "predictable", - "predicement", "predicament", - "predicessor", "predecessor", - "prediciment", "predicament", - "predicitons", "predictions", - "predictible", "predictable", - "predictious", "predictions", - "predictment", "predicament", - "predisposte", "predisposed", - "predocessor", "predecessor", - "preferabbly", "preferably", - "preferabely", "preferable", - "preferabley", "preferably", - "preferablly", "preferably", - "preferances", "preferences", - "preferenser", "preferences", - "preferental", "preferential", - "preferentes", "preferences", - "preferrably", "preferably", - "preferrring", "preferring", - "preformance", "performance", - "pregnanices", "pregnancies", - "pregnencies", "pregnancies", - "pregorative", "prerogative", - "preipherals", "peripherals", - "prejudicies", "prejudice", - "preleminary", "preliminary", - "prelimanary", "preliminary", - "prelimenary", "preliminary", - "premanently", "permanently", - "prematuraly", "prematurely", - "prematurily", "prematurely", - "prematurley", "prematurely", - "premilinary", "preliminary", - "premissible", "permissible", - "premissions", "permissions", - "preorderded", "preordered", - "preorderers", "preorders", - "preparacion", "preparation", - "preperation", "preparation", - "prepetrated", "perpetrated", - "prepetrator", "perpetrator", - "prepetually", "perpetually", - "prepetuated", "perpetuated", - "prepetuates", "perpetuates", - "preporation", "preparation", - "preposterus", "preposterous", - "prerequesit", "prerequisite", - "prerequiste", "prerequisite", - "prerequites", "prerequisite", - "prerogitive", "prerogative", - "prerogotive", "prerogative", - "prescripton", "prescription", - "presecution", "persecution", - "presedintia", "presidential", - "presentaion", "presentation", - "presentatin", "presentations", - "preservaton", "preservation", - "preservered", "preserved", - "presidencey", "presidency", - "presidental", "presidential", - "presidentcy", "presidency", - "presistence", "persistence", - "presitgious", "prestigious", - "presitigous", "prestigious", - "presomption", "presumption", - "prespective", "perspective", - "pressureing", "pressuring", - "prestegious", "prestigious", - "prestigeous", "prestigious", - "prestigieus", "prestigious", - "prestigiosa", "prestigious", - "prestigiose", "prestigious", - "prestigiosi", "prestigious", - "prestigioso", "prestigious", - "prestiguous", "prestigious", - "presumabely", "presumably", - "presumabley", "presumably", - "presumptous", "presumptuous", - "presumptuos", "presumptuous", - "pretencious", "pretentious", - "pretendendo", "pretended", - "pretensious", "pretentious", - "pretentieus", "pretentious", - "prevailaing", "prevailing", - "prevailling", "prevailing", - "preventitve", "preventative", - "preventivno", "prevention", - "primatively", "primitively", - "princessses", "princesses", - "principales", "principles", - "principalis", "principals", - "principielt", "principle", - "privatizied", "privatized", - "priveledges", "privileges", - "privelleges", "privileges", - "privilegeds", "privileges", - "privilegied", "privileged", - "privilegien", "privilege", - "privilegier", "privilege", - "privilegies", "privilege", - "proactivley", "proactive", - "probabilaty", "probability", - "probabilite", "probabilities", - "probalibity", "probability", - "probelmatic", "problematic", - "problamatic", "problematic", - "problimatic", "problematic", - "problomatic", "problematic", - "proccedings", "proceedings", - "proccessing", "processing", - "proceddings", "proceedings", - "procedureal", "procedural", - "procedurial", "procedural", - "procedurile", "procedure", - "processesor", "processors", - "processeurs", "processes", - "processsors", "processors", - "procrastion", "procreation", - "procriation", "procreation", - "prodcutions", "productions", - "prodictions", "productions", - "producerats", "producers", - "producitons", "productions", - "productioin", "productions", - "productivos", "productions", - "productivty", "productivity", - "produktions", "productions", - "professinal", "professional", - "professionl", "professionals", - "professoras", "professors", - "professores", "professors", - "professorin", "profession", - "professsion", "professions", - "proficiancy", "proficiency", - "proficienct", "proficient", - "proficienty", "proficiency", - "proficinecy", "proficiency", - "profitabile", "profitable", - "progerssion", "progressions", - "progerssive", "progressives", - "programable", "programmable", - "programmare", "programmer", - "programmars", "programmers", - "programmate", "programme", - "programmets", "programmers", - "programmeur", "programmer", - "programmier", "programmer", - "programmmed", "programme", - "programmmer", "programme", - "progresison", "progressions", - "progressers", "progresses", - "progressief", "progressive", - "progressino", "progressions", - "progressivo", "progression", - "progressoin", "progressions", - "progressvie", "progressives", - "prohabition", "prohibition", - "prohibation", "prohibition", - "prohibicion", "prohibition", - "prohibiteds", "prohibits", - "prohibitied", "prohibited", - "prohibitifs", "prohibits", - "prohibitivo", "prohibition", - "prohibitons", "prohibits", - "prohibitted", "prohibited", - "projecticle", "projectile", - "projectives", "projectiles", - "projectlies", "projectiles", - "prolateriat", "proletariat", - "proletariet", "proletariat", - "proletariot", "proletariat", - "proletaryat", "proletariat", - "proleteriat", "proletariat", - "prolitariat", "proletariat", - "prologomena", "prolegomena", - "promenantly", "prominently", - "promenently", "prominently", - "prometheius", "prometheus", - "prometheous", "prometheus", - "promethesus", "prometheus", - "prometheyus", "prometheus", - "promimently", "prominently", - "prominantly", "prominently", - "prominately", "prominently", - "promiscious", "promiscuous", - "promocional", "promotional", - "promsicuous", "promiscuous", - "pronography", "pornography", - "pronoucning", "pronouncing", - "pronounched", "pronounced", - "pronunciato", "pronunciation", - "propaganada", "propaganda", - "properitary", "proprietary", - "propertiary", "proprietary", - "propertions", "proportions", - "prophechies", "prophecies", - "propiertary", "proprietary", - "propogation", "propagation", - "proponenets", "proponents", - "proponentes", "proponents", - "proporition", "proposition", - "proportians", "proportions", - "proportinal", "proportional", - "proposicion", "proposition", - "propositivo", "proposition", - "propostions", "proportions", - "propreitary", "proprietary", - "propriatary", "proprietary", - "propriatery", "proprietary", - "propriatory", "proprietary", - "proprietery", "proprietary", - "proprietory", "proprietary", - "propriotary", "proprietary", - "proprotions", "proportions", - "propsective", "prospective", - "propulstion", "propulsion", - "prosectuion", "prosecution", - "prosectuors", "prosecutors", - "prosecuters", "prosecutors", - "prosicution", "prosecution", - "prosocution", "prosecution", - "prosperious", "prosperous", - "prospertity", "prosperity", - "prospettive", "prospective", - "prostethics", "prosthetic", - "prosthethic", "prosthetic", - "prostitites", "prostitutes", - "prostitiute", "prostitute", - "prostituate", "prostitute", - "prostitudes", "prostitutes", - "prostituees", "prostitutes", - "prostituion", "prostitution", - "prostitures", "prostitutes", - "prostitutas", "prostitutes", - "prostitutie", "prostitute", - "prostitutin", "prostitution", - "prostitutke", "prostitutes", - "prostituton", "prostitution", - "prostitutos", "prostitutes", - "protability", "portability", - "protaganist", "protagonist", - "protaginist", "protagonist", - "protagnoist", "protagonist", - "protagoinst", "protagonists", - "protagonits", "protagonists", - "protagonsit", "protagonists", - "protectings", "protections", - "protectoras", "protectors", - "protectores", "protectors", - "protectrons", "protections", - "protelariat", "proletariat", - "protestents", "protestants", - "protistants", "protestants", - "protoganist", "protagonist", - "protogonist", "protagonist", - "protostants", "protestants", - "protototype", "prototype", - "provacative", "provocative", - "provacotive", "provocative", - "provicative", "provocative", - "providencie", "providence", - "provinciaal", "provincial", - "provinicial", "provincial", - "provisiones", "provisions", - "provoactive", "provocative", - "provocatief", "provocative", - "provocitive", "provocative", - "provocotive", "provocative", - "provokative", "provocative", - "pscyhedelic", "psychedelic", - "pscyhiatric", "psychiatric", - "pscyhopaths", "psychopaths", - "pshyciatric", "psychiatric", - "pshycopaths", "psychopaths", - "psychaitric", "psychiatric", - "psychedilic", "psychedelic", - "psychedleic", "psychedelics", - "psychiatist", "psychiatrist", - "psychidelic", "psychedelic", - "psychodelic", "psychedelic", - "psychopants", "psychopaths", - "psychopatch", "psychopath", - "psychopatic", "psychopathic", - "psychotisch", "psychotic", - "psychriatic", "psychiatric", - "publikation", "publication", - "punctiation", "punctuation", - "puncutation", "punctuation", - "punshiments", "punishments", - "punsihments", "punishments", - "purchaseing", "purchasing", - "purchashing", "purchasing", - "purposefuly", "purposefully", - "pyschedelic", "psychedelic", - "pyschiatric", "psychiatric", - "pyschopaths", "psychopaths", - "qaurterback", "quarterback", - "qualificato", "qualification", - "qualifieres", "qualifiers", - "quantitaive", "quantitative", - "quantitatve", "quantitative", - "quantitites", "quantities", - "quantitties", "quantities", - "quarantaine", "quarantine", - "quarantenni", "quarantine", - "quartercask", "quarterbacks", - "quesitoning", "questioning", - "questionned", "questioned", - "questonable", "questionable", - "radiaoctive", "radioactive", - "radioactice", "radioactive", - "radioactief", "radioactive", - "radioaktive", "radioactive", - "radiocative", "radioactive", - "raidoactive", "radioactive", - "reaccurring", "recurring", - "reactionair", "reactionary", - "realibility", "reliability", - "realistisch", "realistic", - "reaserchers", "researchers", - "reaserching", "researching", - "reasonabley", "reasonably", - "reasonablly", "reasonably", - "reassureing", "reassuring", - "reassurring", "reassuring", - "rebuildling", "rebuilding", - "rebuplicans", "republicans", - "reccomended", "recommended", - "receptionst", "receptionist", - "recgonition", "recognition", - "recgonizing", "recognizing", - "rechargable", "rechargeable", - "recipientes", "recipients", - "reciporcate", "reciprocate", - "recipricate", "reciprocate", - "reciprocant", "reciprocate", - "reciprocite", "reciprocate", - "recivership", "receivership", - "reclutantly", "reluctantly", - "recognicing", "recognizing", - "recognision", "recognition", - "recomending", "recommending", - "recommandes", "recommends", - "recommendes", "recommends", - "recommented", "recommended", - "reconcilled", "reconcile", - "recongition", "recognition", - "recongizing", "recognizing", - "reconsidder", "reconsider", - "recrational", "recreational", - "recrutiment", "recruitment", - "rectangluar", "rectangular", - "rectangualr", "rectangular", - "rectengular", "rectangular", - "recuritment", "recruitment", - "redundantcy", "redundancy", - "reevalulate", "reevaluate", - "reevalutate", "reevaluate", - "reevaulated", "reevaluate", - "refelctions", "reflections", - "referancing", "referencing", - "refereneced", "referenced", - "refereneces", "references", - "referincing", "referencing", - "referrences", "references", - "reflectivos", "reflections", - "refreshener", "refresher", - "refrubished", "refurbished", - "refubrished", "refurbished", - "refurbushed", "refurbished", - "regeneratin", "regeneration", - "regeneraton", "regeneration", - "registerdns", "registers", - "registeries", "registers", - "registerred", "registered", - "registraion", "registration", - "regocnition", "recognition", - "regresssion", "regression", - "regresssive", "regressive", - "regualtions", "regulations", - "regulationg", "regulating", - "regulatiors", "regulators", - "reinassance", "renaissance", - "reinforcemt", "reinforcement", - "reinfornced", "reinforced", - "reinitalise", "reinitialise", - "reinitalize", "reinitialize", - "reinstaling", "reinstalling", - "reinstallng", "reinstalling", - "reisntalled", "reinstalled", - "relaibility", "reliability", - "relatiation", "retaliation", - "relationshp", "relationships", - "relativiser", "relatives", - "relativisme", "relatives", - "relativitiy", "relativity", - "relativitly", "relativity", - "relcutantly", "reluctantly", - "relentlesly", "relentlessly", - "relentlessy", "relentlessly", - "relevations", "revelations", - "relfections", "reflections", - "religeously", "religiously", - "religionens", "religions", - "religioners", "religions", - "relpacement", "replacement", - "reluctently", "reluctantly", - "remarkabley", "remarkably", - "remarkablly", "remarkably", - "remasterred", "remastered", - "remembrence", "remembrance", - "reminescent", "reminiscent", - "reminicient", "reminiscent", - "reminiscant", "reminiscent", - "reminiscint", "reminiscent", - "reminscient", "reminiscent", - "reminsicent", "reminiscent", - "renaiisance", "renaissance", - "renaiscance", "renaissance", - "renaissanse", "renaissance", - "renaissence", "renaissance", - "renassaince", "renaissance", - "renassiance", "renaissance", - "reniassance", "renaissance", - "rennovating", "renovating", - "rennovation", "renovation", - "repalcement", "replacement", - "repbulicans", "republicans", - "repeateadly", "repeatedly", - "repectively", "respectively", - "repersented", "represented", - "replacemnet", "replacements", - "replacemnts", "replacements", - "repleacable", "replaceable", - "repositiory", "repository", - "representas", "represents", - "representes", "represents", - "represssion", "repression", - "reproducion", "reproduction", - "reproducive", "reproductive", - "repsectable", "respectable", - "repsonsible", "responsible", - "repsonsibly", "responsibly", - "republcians", "republicans", - "republician", "republican", - "republicons", "republicans", - "repuglicans", "republicans", - "requeriment", "requirement", - "requierment", "requirements", - "resemblence", "resemblance", - "resemblense", "resembles", - "reserachers", "researchers", - "reseraching", "researching", - "resgination", "resignation", - "residencial", "residential", - "residentail", "residential", - "residentual", "residential", - "resignacion", "resignation", - "resignating", "resignation", - "resignement", "resignment", - "resignition", "resignation", - "resintalled", "reinstalled", - "resistansen", "resistances", - "resistanses", "resistances", - "resistences", "resistances", - "resistnaces", "resistances", - "resoltuions", "resolutions", - "resotration", "restoration", - "resoultions", "resolutions", - "respecatble", "respectable", - "respectabil", "respectable", - "respectfuly", "respectfully", - "respectible", "respectable", - "respectivly", "respectively", - "respectuful", "respectful", - "respektable", "respectable", - "resperatory", "respiratory", - "resperitory", "respiratory", - "respiritory", "respiratory", - "respitatory", "respiratory", - "responcible", "responsible", - "responcibly", "responsibly", - "respondendo", "responded", - "responisble", "responsible", - "responisbly", "responsibly", - "responsable", "responsible", - "responsably", "responsibly", - "responsbile", "responsible", - "responsbily", "responsibly", - "responsibel", "responsibly", - "responsibil", "responsibly", - "responsivle", "responsive", - "resporatory", "respiratory", - "respository", "repository", - "respriatory", "respiratory", - "ressembling", "resembling", - "ressurected", "resurrected", - "restaraunts", "restaurants", - "restaruants", "restaurants", - "restauraunt", "restaurant", - "restaurents", "restaurants", - "resteraunts", "restaurants", - "restirction", "restriction", - "restorarion", "restoration", - "restorating", "restoration", - "restrainted", "restrained", - "restrective", "restrictive", - "restriccion", "restriction", - "restricitng", "restricting", - "restriciton", "restrictions", - "restricitve", "restrictive", - "restricteds", "restricts", - "restricters", "restricts", - "restrictied", "restrictive", - "restrictifs", "restricts", - "restrictins", "restricts", - "restrictios", "restricts", - "restrictivo", "restriction", - "restrictons", "restricts", - "restriktion", "restriction", - "restriktive", "restrictive", - "restrittive", "restrictive", - "restructing", "restricting", - "restruction", "restriction", - "restuarants", "restaurants", - "resturaunts", "restaurants", - "resurecting", "resurrecting", - "resurrecion", "resurrection", - "retailation", "retaliation", - "retalitated", "retaliated", - "retardathon", "retardation", - "retardating", "retardation", - "retardatron", "retardation", - "retartation", "retardation", - "retirbution", "retribution", - "retrebution", "retribution", - "retribucion", "retribution", - "retribuiton", "retribution", - "retributivo", "retribution", - "retribvtion", "retribution", - "retrobution", "retribution", - "retrubution", "retribution", - "revealtions", "revelations", - "revelaitons", "revelations", - "revolations", "revolutions", - "revoultions", "revolutions", - "ridiculious", "ridiculous", - "ridiculosly", "ridiculously", - "ridiculouly", "ridiculously", - "ridiculousy", "ridiculously", - "rightfullly", "rightfully", - "rolepalying", "roleplaying", - "romanticaly", "romantically", - "roundabaout", "roundabout", - "roundabount", "roundabout", - "rudimentery", "rudimentary", - "rudimentory", "rudimentary", - "ruidmentary", "rudimentary", - "sacrifacing", "sacrificing", - "sacrificare", "sacrifice", - "sacrificied", "sacrifice", - "sacrificies", "sacrifice", - "sacrifieced", "sacrificed", - "sacrifising", "sacrificing", - "sacrifizing", "sacrificing", - "salughtered", "slaughtered", - "sanctionned", "sanctioned", - "sarcastisch", "sarcastic", - "saskatchewn", "saskatchewan", - "saskatchwan", "saskatchewan", - "satisfacion", "satisfaction", - "satisfacory", "satisfactory", - "scandanavia", "scandinavia", - "scandanivia", "scandinavian", - "scandenavia", "scandinavia", - "scandianvia", "scandinavian", - "scandimania", "scandinavia", - "scandinaiva", "scandinavian", - "scandinavan", "scandinavian", - "scandivania", "scandinavian", - "scandonavia", "scandinavia", - "scarificing", "sacrificing", - "scheduleing", "scheduling", - "schedulling", "scheduling", - "schoalrship", "scholarships", - "scholarhips", "scholarship", - "scholarstic", "scholastic", - "scholership", "scholarship", - "scholorship", "scholarship", - "scientiests", "scientists", - "scnadinavia", "scandinavia", - "scrambleing", "scrambling", - "screenshoot", "screenshot", - "seamlessley", "seamlessly", - "sedentarity", "sedentary", - "seflishness", "selfishness", - "segergation", "segregation", - "segragation", "segregation", - "segregacion", "segregation", - "segretation", "segregation", - "segrigation", "segregation", - "selectivley", "selectively", - "selfeshness", "selfishness", - "senitmental", "sentimental", - "sensacional", "sensational", - "sensasional", "sensational", - "sensationel", "sensational", - "sensetional", "sensational", - "sensitivety", "sensitivity", - "sentamental", "sentimental", - "sentemental", "sentimental", - "sentenceing", "sentencing", - "sentimentos", "sentiments", - "sentimentul", "sentimental", - "separatedly", "separately", - "separatelly", "separately", - "separatisme", "separates", - "separatiste", "separates", - "sepculating", "speculating", - "serivceable", "serviceable", - "serviciable", "serviceable", - "settelement", "settlement", - "settelments", "settlements", - "settlemetns", "settlements", - "sexualizied", "sexualized", - "shakeapeare", "shakespeare", - "shakepseare", "shakespeare", - "shakesphere", "shakespeare", - "shanenigans", "shenanigans", - "shareholdes", "shareholders", - "sharpeneing", "sharpening", - "sharpenning", "sharpening", - "shatterling", "shattering", - "shatterring", "shattering", - "sheakspeare", "shakespeare", - "shenadigans", "shenanigans", - "shenanagans", "shenanigans", - "shenanagins", "shenanigans", - "shenanegans", "shenanigans", - "shenanegins", "shenanigans", - "shenangians", "shenanigans", - "shenanigens", "shenanigans", - "shenanigins", "shenanigans", - "shenenigans", "shenanigans", - "sheninigans", "shenanigans", - "shennaigans", "shenanigans", - "shortenning", "shortening", - "shortenting", "shortening", - "signficiant", "significant", - "signifantly", "significantly", - "significane", "significance", - "significato", "significant", - "signifigant", "significant", - "signifikant", "significant", - "signitories", "signatories", - "signularity", "singularity", - "similarites", "similarities", - "similarlity", "similarity", - "similiarity", "similarity", - "simluations", "simulations", - "simplefying", "simplifying", - "simplicitly", "simplicity", - "simplifiing", "simplifying", - "simplisitic", "simplistic", - "simplyifing", "simplifying", - "simualtions", "simulations", - "simulatious", "simulations", - "simultaneos", "simultaneous", - "simultaneus", "simultaneous", - "simultanous", "simultaneous", - "singluarity", "singularity", - "singualrity", "singularity", - "singulairty", "singularity", - "singularily", "singularity", - "sitautional", "situational", - "situacional", "situational", - "situationly", "situational", - "siutational", "situational", - "skatebaords", "skateboard", - "skateboader", "skateboard", - "skepticisim", "skepticism", - "skillshoots", "skillshots", - "skillshosts", "skillshots", - "slaugthered", "slaughtered", - "slefishness", "selfishness", - "sluaghtered", "slaughtered", - "smarthpones", "smartphones", - "snowboaring", "snowboarding", - "snowbolling", "snowballing", - "snowfalling", "snowballing", - "socailizing", "socializing", - "socialicing", "socializing", - "socialistes", "socialists", - "socialistos", "socialists", - "socializare", "socialize", - "sociapathic", "sociopathic", - "sociologial", "sociological", - "sociopathes", "sociopaths", - "sociopathis", "sociopaths", - "sociophatic", "sociopathic", - "solidariety", "solidarity", - "somethingis", "somethings", - "sorrounding", "surrounding", - "soundtrakcs", "soundtracks", - "southamtpon", "southampton", - "southanpton", "southampton", - "southapmton", "southampton", - "southernese", "southerners", - "southerness", "southerners", - "southernest", "southerners", - "southernors", "southerners", - "southmapton", "southampton", - "southtampon", "southampton", - "soveregnity", "sovereignty", - "sovereighty", "sovereignty", - "sovereingty", "sovereignty", - "sovereinity", "sovereignty", - "soveriegnty", "sovereignty", - "soveriengty", "sovereignty", - "soverignity", "sovereignty", - "specailists", "specialists", - "specailized", "specialized", - "specailizes", "specializes", - "specatcular", "spectacular", - "specialiced", "specialized", - "specialices", "specializes", - "specialites", "specializes", - "speciallist", "specialist", - "speciallity", "specially", - "speciallize", "specialize", - "specialzied", "specialized", - "specifcally", "specifically", - "specificaly", "specifically", - "specificato", "specification", - "specificies", "specifics", - "specifiying", "specifying", - "specilaized", "specialize", - "speciliazed", "specialize", - "spectatores", "spectators", - "spectatular", "spectacular", - "spectauclar", "spectacular", - "spectaulars", "spectaculars", - "spectecular", "spectacular", - "specualting", "speculating", - "specualtion", "speculation", - "specualtive", "speculative", - "specularite", "speculative", - "speculaties", "speculative", - "spiritualiy", "spiritually", - "spiritualty", "spirituality", - "spirituella", "spiritually", - "spirtiually", "spiritually", - "spirutually", "spiritually", - "spitirually", "spiritually", - "sponatenous", "spontaneous", - "sponatneous", "spontaneous", - "sponsership", "sponsorship", - "sponsorhips", "sponsorship", - "sponsorhsip", "sponsorship", - "sponsorshop", "sponsorship", - "spontaenous", "spontaneous", - "spontainous", "spontaneous", - "spontaneuos", "spontaneous", - "spontanious", "spontaneous", - "sponteanous", "spontaneous", - "sponteneous", "spontaneous", - "spreadhseet", "spreadsheet", - "spreadsheat", "spreadsheet", - "spreadshets", "spreadsheets", - "spreedsheet", "spreadsheet", - "springfeild", "springfield", - "springfiled", "springfield", - "sprinklered", "sprinkled", - "squirrelies", "squirrels", - "squirrelius", "squirrels", - "stabilizare", "stabilize", - "stabilizied", "stabilize", - "stabilizier", "stabilize", - "stabilizies", "stabilize", - "staggerring", "staggering", - "staggerwing", "staggering", - "stationairy", "stationary", - "stationerad", "stationed", - "stationnary", "stationary", - "statisitcal", "statistical", - "statisticly", "statistical", - "statistisch", "statistics", - "statsitical", "statistical", - "stereotpyes", "stereotypes", - "stereotying", "stereotyping", - "steriotypes", "stereotypes", - "steroetypes", "stereotypes", - "steryotypes", "stereotypes", - "stimluating", "stimulating", - "stimualting", "stimulating", - "stimualtion", "stimulation", - "stimulantes", "stimulants", - "stockpilled", "stockpile", - "stormfrount", "stormfront", - "storyteling", "storytelling", - "straightden", "straightened", - "straightend", "straightened", - "straightmen", "straighten", - "straightned", "straightened", - "straightner", "straighten", - "strangeshit", "strangest", - "strategisch", "strategic", - "strategiske", "strategies", - "strawberies", "strawberries", - "strawberrry", "strawberry", - "strawbrerry", "strawberry", - "strenghened", "strengthened", - "strenghtend", "strengthen", - "strenghtens", "strengthen", - "strengtened", "strengthened", - "structurels", "structures", - "strugglebus", "struggles", - "struggleing", "struggling", - "stubborness", "stubbornness", - "stutterring", "stuttering", - "subcatagory", "subcategory", - "subconscius", "subconscious", - "subconscous", "subconscious", - "subisdizing", "subsidizing", - "subjectivly", "subjectively", - "submergered", "submerged", - "submisisons", "submissions", - "subredddits", "subreddits", - "subscirbers", "subscribers", - "subscribbed", "subscribe", - "subscribber", "subscriber", - "subscriping", "subscribing", - "subscriptin", "subscriptions", - "subscripton", "subscription", - "subsequenty", "subsequently", - "subsidiezed", "subsidized", - "subsidizied", "subsidized", - "subsidizies", "subsidize", - "subsiziding", "subsidizing", - "subsquently", "subsequently", - "subsrcibers", "subscribers", - "substancial", "substantial", - "substansial", "substantial", - "substansive", "substantive", - "substantied", "substantive", - "substanties", "substantive", - "substential", "substantial", - "substitiute", "substitute", - "substituded", "substituted", - "substitudes", "substitutes", - "substituion", "substitution", - "substitures", "substitutes", - "substitutie", "substitutes", - "substitutos", "substitutes", - "substitutue", "substitutes", - "substracted", "subtracted", - "suburburban", "suburban", - "succesfully", "successfully", - "successeurs", "successes", - "successfull", "successful", - "successfuly", "successfully", - "successsion", "succession", - "successully", "successfully", - "succsesfull", "successfully", - "sucessfully", "successfully", - "sucseptible", "susceptible", - "sufficently", "sufficiently", - "suggestieve", "suggestive", - "sumbissions", "submissions", - "sunglassses", "sunglasses", - "superceeded", "superseded", - "superficiel", "superficial", - "superfulous", "superfluous", - "superhereos", "superhero", - "superifical", "superficial", - "superiorest", "superiors", - "supermacist", "supremacist", - "supermakert", "supermarkets", - "supermakret", "supermarkets", - "supermakter", "supermarkets", - "supermarkts", "supermarkets", - "supermaster", "supermarkets", - "supernatual", "supernatural", - "supersition", "supervision", - "superstiton", "superstition", - "supervisers", "supervisors", - "supervisior", "supervisor", - "suplimented", "supplemented", - "supplaments", "supplements", - "supplemetal", "supplemental", - "supporteurs", "supporters", - "supposedely", "supposedly", - "supposidely", "supposedly", - "supposingly", "supposedly", - "suppresions", "suppression", - "suppresssor", "suppressor", - "supramacist", "supremacist", - "supremacits", "supremacist", - "supremasist", "supremacist", - "supremicist", "supremacist", - "suprimacist", "supremacist", - "suprisingly", "surprisingly", - "suprizingly", "surprisingly", - "suroundings", "surroundings", - "surpemacist", "supremacist", - "surprisinly", "surprisingly", - "surreptious", "surreptitious", - "surroundign", "surroundings", - "surroundigs", "surrounds", - "surroundins", "surrounds", - "surroundngs", "surrounds", - "surveilence", "surveillance", - "survivabily", "survivability", - "susbtantial", "substantial", - "susbtantive", "substantive", - "suscepitble", "susceptible", - "susceptable", "susceptible", - "suscpetible", "susceptible", - "susecptible", "susceptible", - "suspectible", "susceptible", - "suspiciosly", "suspiciously", - "suspiciouly", "suspiciously", - "suspiciouns", "suspicion", - "suspicision", "suspicions", - "suspicisons", "suspicions", - "sustainible", "sustainable", - "switerzland", "switzerland", - "switserland", "switzerland", - "switzlerand", "switzerland", - "swizterland", "switzerland", - "swtizerland", "switzerland", - "symapthetic", "sympathetic", - "symmertical", "symmetrical", - "sympathatic", "sympathetic", - "sympathiers", "sympathizers", - "sympathsize", "sympathize", - "sympethetic", "sympathetic", - "symphatetic", "sympathetic", - "symphatized", "sympathize", - "symphatizer", "sympathizers", - "symphatizes", "sympathize", - "sympothetic", "sympathetic", - "synthesasia", "synthesis", - "synthesesia", "synthesis", - "sypmathetic", "sympathetic", - "tabelspoons", "tablespoons", - "tablepsoons", "tablespoons", - "tablespooon", "tablespoon", - "tablesppons", "tablespoons", - "tailgateing", "tailgating", - "tailgatting", "tailgating", - "tangentialy", "tangentially", - "techincally", "technically", - "techincians", "technicians", - "techiniques", "techniques", - "techncially", "technically", - "technicalty", "technicality", - "technichian", "technician", - "technicials", "technicians", - "techniciens", "technicians", - "technitians", "technicians", - "technnology", "technology", - "technologia", "technological", - "techticians", "technicians", - "teleportato", "teleportation", - "teleportion", "teleporting", - "teleproting", "teleporting", - "temeprature", "temperature", - "temparament", "temperament", - "temparature", "temperature", - "temparement", "temperament", - "tempearture", "temperatures", - "temperamant", "temperament", - "temperarily", "temporarily", - "temperatues", "temperatures", - "temperaturs", "temperatures", - "temperatuur", "temperature", - "temperement", "temperament", - "tempermeant", "temperament", - "tempertaure", "temperature", - "temporairly", "temporarily", - "temporaraly", "temporarily", - "temporarity", "temporarily", - "tempreature", "temperature", - "temproarily", "temporarily", - "tempurature", "temperature", - "tepmorarily", "temporarily", - "termanology", "terminology", - "terminacion", "termination", - "terminaison", "termination", - "terminalogy", "terminology", - "terminatior", "terminator", - "terminatorn", "termination", - "terminilogy", "terminology", - "terminoligy", "terminology", - "terratorial", "territorial", - "terratories", "territories", - "terretorial", "territorial", - "terretories", "territories", - "terrirorial", "territorial", - "terrirories", "territories", - "terriroties", "territories", - "terristrial", "territorial", - "territoires", "territories", - "territorist", "terrorist", - "territority", "territory", - "terroristas", "terrorists", - "terroristes", "terrorists", - "terrorities", "territories", - "terrotorial", "territorial", - "terrotories", "territories", - "testiclular", "testicular", - "thankfullly", "thankfully", - "thanksgivng", "thanksgiving", - "theoligical", "theological", - "theoratical", "theoretical", - "theoreticly", "theoretical", - "theoritical", "theoretical", - "therapautic", "therapeutic", - "therapeudic", "therapeutic", - "therapeutuc", "therapeutic", - "therapuetic", "therapeutic", - "theraupetic", "therapeutic", - "thereaputic", "therapeutic", - "thereotical", "theoretical", - "therepeutic", "therapeutic", - "thermometor", "thermometer", - "thermometre", "thermometer", - "thermomiter", "thermometer", - "thermomoter", "thermometer", - "thermoneter", "thermometer", - "thermostaat", "thermostat", - "theroetical", "theoretical", - "thoeretical", "theoretical", - "threataning", "threatening", - "threatended", "threatened", - "threatining", "threatening", - "throttleing", "throttling", - "throughoput", "throughput", - "throughtout", "throughout", - "throughtput", "throughput", - "thudnerbolt", "thunderbolt", - "thunberbolt", "thunderbolt", - "thunderblot", "thunderbolt", - "thunderboat", "thunderbolt", - "thunderbots", "thunderbolt", - "thunderbowl", "thunderbolt", - "thunderjolt", "thunderbolt", - "thundervolt", "thunderbolt", - "tightenting", "tightening", - "tocuhscreen", "touchscreen", - "torrentking", "torrenting", - "torrentting", "torrenting", - "torublesome", "troublesome", - "torunaments", "tournaments", - "totalitaran", "totalitarian", - "totalitarni", "totalitarian", - "touranments", "tournaments", - "tournamnets", "tournaments", - "tournemants", "tournaments", - "tournements", "tournaments", - "tournmanets", "tournaments", - "tradicional", "traditional", - "tradionally", "traditionally", - "tradisional", "traditional", - "traditionel", "traditional", - "traditition", "tradition", - "tragicallly", "tragically", - "tramautized", "traumatized", - "tramuatized", "traumatized", - "trancendent", "transcendent", - "trancending", "transcending", - "tranclucent", "translucent", - "trandgender", "transgender", - "tranditions", "transitions", - "tranistions", "transitions", - "tranlastion", "translations", - "tranlsating", "translating", - "tranlsation", "translation", - "tranluscent", "translucent", - "trannsexual", "transsexual", - "tranpshobic", "transphobic", - "transaccion", "transaction", - "transaciton", "transactions", - "transalting", "translating", - "transaltion", "translation", - "transations", "transitions", - "transcluent", "translucent", - "transcripto", "transcription", - "transctions", "transitions", - "transculent", "translucent", - "transending", "transcending", - "transfender", "transgender", - "transferers", "transfers", - "transfering", "transferring", - "transfersom", "transforms", - "transfomers", "transforms", - "transformas", "transforms", - "transformes", "transformers", - "transformis", "transforms", - "transformus", "transforms", - "transforums", "transforms", - "transfromed", "transformed", - "transfromer", "transformers", - "transgemder", "transgender", - "transgended", "transgendered", - "transgenger", "transgender", - "transgenres", "transgender", - "transhpobic", "transphobic", - "transisions", "transitions", - "transisitor", "transistor", - "transistion", "transition", - "transistior", "transistor", - "transitiond", "transitioned", - "transitiong", "transitioning", - "translatron", "translation", - "translusent", "translucent", - "transmatter", "transmitter", - "transmision", "transmission", - "transmissin", "transmissions", - "transmisson", "transmission", - "transmittor", "transmitter", - "transmorged", "transformed", - "transmutter", "transmitter", - "transofrmed", "transformed", - "transohobic", "transphobic", - "transparant", "transparent", - "transparecy", "transparency", - "transpareny", "transparency", - "transperant", "transparent", - "transperent", "transparent", - "transphonic", "transphobic", - "transphopic", "transphobic", - "transplanet", "transplant", - "transporder", "transporter", - "transporing", "transporting", - "transportar", "transporter", - "transportng", "transporting", - "transportor", "transporter", - "transseuxal", "transsexual", - "transsexaul", "transsexual", - "transsexuel", "transsexual", - "transulcent", "translucent", - "transylvnia", "transylvania", - "tranzformer", "transformer", - "tranzitions", "transitions", - "tranzporter", "transporter", - "trasncripts", "transcripts", - "trasnferred", "transferred", - "trasnformed", "transformed", - "trasnformer", "transformer", - "trasngender", "transgender", - "trasnmitted", "transmitted", - "trasnmitter", "transmitter", - "trasnparent", "transparent", - "trasnphobic", "transphobic", - "trasnported", "transported", - "trasnporter", "transporter", - "traumatisch", "traumatic", - "traumetized", "traumatized", - "traumitized", "traumatized", - "travellerhd", "travelled", - "travellodge", "travelled", - "tremendeous", "tremendous", - "tremendious", "tremendous", - "tremenduous", "tremendous", - "trespessing", "trespassing", - "tresspasing", "trespassing", - "triggereing", "triggering", - "triggerring", "triggering", - "troubelsome", "troublesome", - "truamatized", "traumatized", - "trushworthy", "trustworthy", - "trustowrthy", "trustworthy", - "trustwhorty", "trustworthy", - "trustworhty", "trustworthy", - "truthfullly", "truthfully", - "tupperwears", "tupperware", - "turstworthy", "trustworthy", - "ubiquitious", "ubiquitous", - "ubiquituous", "ubiquitous", - "ukraininans", "ukrainians", - "ultimatelly", "ultimately", - "unanimoulsy", "unanimous", - "unappeasing", "unappealing", - "unappeeling", "unappealing", - "unathorised", "unauthorised", - "unattendend", "unattended", - "unatteneded", "unattended", - "unattracive", "unattractive", - "unauthoried", "unauthorized", - "unavailible", "unavailable", - "unavaliable", "unavailable", - "unaviodable", "unavoidable", - "unbalanaced", "unbalanced", - "unbraikable", "unbreakable", - "unbrakeable", "unbreakable", - "unbreakabie", "unbreakable", - "unbreakabke", "unbreakable", - "unbreakbale", "unbreakable", - "unbreakeble", "unbreakable", - "unbrearable", "unbreakable", - "uncensorred", "uncensored", - "uncertaincy", "uncertainty", - "uncertanity", "uncertainty", - "uncertianty", "uncertainty", - "unchangable", "unchangeable", - "uncompetive", "uncompetitive", - "unconcsious", "unconscious", - "unconsicous", "unconscious", - "uncouncious", "unconscious", - "undeniabely", "undeniably", - "undeniabley", "undeniably", - "undeniablly", "undeniably", - "undenialbly", "undeniably", - "underestime", "underestimate", - "undergating", "undertaking", - "undergorund", "underground", - "underheight", "underweight", - "undermiming", "undermining", - "undermindes", "undermines", - "undernearth", "underneath", - "underneight", "underweight", - "underpining", "undermining", - "underpowerd", "underpowered", - "underpowred", "underpowered", - "underratted", "underrated", - "understannd", "understands", - "understsand", "understands", - "undertacker", "undertaker", - "underwarter", "underwater", - "underwieght", "underweight", - "underwright", "underweight", - "undesireble", "undesirable", - "undesriable", "undesirable", - "undetecable", "undetectable", - "undiserable", "undesirable", - "undoubedtly", "undoubtedly", - "undoubetdly", "undoubtedly", - "undoubtadly", "undoubtedly", - "undoubtebly", "undoubtedly", - "undoubtetly", "undoubtedly", - "undreground", "underground", - "unemployeed", "unemployed", - "unemployent", "unemployment", - "unemploymed", "unemployed", - "unexpectdly", "unexpectedly", - "unexpectely", "unexpectedly", - "unfamilliar", "unfamiliar", - "unfortuante", "unfortunate", - "ungreatfull", "ungrateful", - "unilateraly", "unilaterally", - "unilaterlly", "unilaterally", - "unimportent", "unimportant", - "uninspiried", "uninspired", - "uninstaling", "uninstalling", - "uninstallng", "uninstalling", - "uninteresed", "uninterested", - "uniquesness", "uniqueness", - "unisntalled", "uninstalled", - "universella", "universally", - "universites", "universities", - "univesities", "universities", - "unjustifyed", "unjustified", - "unknowinlgy", "unknowingly", - "unkowningly", "unknowingly", - "unnecassary", "unnecessary", - "unneccesary", "unnecessary", - "unnecessery", "unnecessary", - "unnecissary", "unnecessary", - "unnessecary", "unnecessary", - "unnistalled", "uninstalled", - "unoriginial", "unoriginal", - "unorigional", "unoriginal", - "unoticeable", "unnoticeable", - "unpleaseant", "unpleasant", - "unportected", "unprotected", - "unprepaired", "unprepared", - "unpreparred", "unprepared", - "unproducive", "unproductive", - "unprotexted", "unprotected", - "unqaulified", "unqualified", - "unrealisitc", "unrealistic", - "unrealsitic", "unrealistic", - "unreasonbly", "unreasonably", - "unregluated", "unregulated", - "unregualted", "unregulated", - "unregulared", "unregulated", - "unrepentent", "unrepentant", - "unresponive", "unresponsive", - "unrestriced", "unrestricted", - "unsettleing", "unsettling", - "unsintalled", "uninstalled", - "unsolicated", "unsolicited", - "unsoliticed", "unsolicited", - "unsolocited", "unsolicited", - "unsubscirbe", "unsubscribe", - "unsubscrbed", "unsubscribed", - "unsubscried", "unsubscribed", - "unsubscripe", "unsubscribe", - "unsubscrive", "unsubscribe", - "unsubscrube", "unsubscribe", - "unsubsrcibe", "unsubscribe", - "unsuccesful", "unsuccessful", - "unsuccessul", "unsuccessful", - "unsucesfuly", "unsuccessfully", - "unsucessful", "unsuccessful", - "unsunscribe", "unsubscribe", - "unsuprising", "unsurprising", - "unsuprizing", "unsurprising", - "unsurprized", "unsurprised", - "unsusbcribe", "unsubscribe", - "unviersally", "universally", - "unwarrented", "unwarranted", - "utiliatrian", "utilitarian", - "utilitatian", "utilitarian", - "utiliterian", "utilitarian", - "utilizacion", "utilization", - "utilizaiton", "utilization", - "utilizating", "utilization", - "utiltiarian", "utilitarian", - "vacciantion", "vaccination", - "vaccinaties", "vaccinate", - "vegaterians", "vegetarians", - "vegetariens", "vegetarians", - "vegetatians", "vegetarians", - "vegeterians", "vegetarians", - "vehementely", "vehemently", - "venezuelean", "venezuela", - "venezuelian", "venezuela", - "ventalation", "ventilation", - "ventelation", "ventilation", - "ventialtion", "ventilation", - "ventilacion", "ventilation", - "verastility", "versatility", - "verfication", "verification", - "versatality", "versatility", - "versitality", "versatility", - "versitilaty", "versatility", - "victorieuse", "victories", - "victoriuous", "victorious", - "vietnameese", "vietnamese", - "vietnamesse", "vietnamese", - "vietnamiese", "vietnamese", - "vietnamnese", "vietnamese", - "vigilanties", "vigilante", - "visibillity", "visibility", - "vocabularly", "vocabulary", - "volatillity", "volatility", - "volonteered", "volunteered", - "volounteers", "volunteers", - "volunatrily", "voluntarily", - "voluntairly", "voluntarily", - "volunteeers", "volunteers", - "volunteraly", "voluntarily", - "voluntereed", "volunteered", - "volunterily", "voluntarily", - "vulnerabile", "vulnerable", - "wallpapaers", "wallpapers", - "wallpappers", "wallpapers", - "washingtion", "washington", - "watermeleon", "watermelon", - "waterprooof", "waterproof", - "wavelegnths", "wavelength", - "wavelenghth", "wavelength", - "wavelenghts", "wavelength", - "weaknessses", "weaknesses", - "wellingston", "wellington", - "wellingtion", "wellington", - "westernerns", "westerners", - "westmisnter", "westminster", - "westmnister", "westminster", - "westmonster", "westminster", - "whisperered", "whispered", - "whitholding", "withholding", - "wikileakers", "wikileaks", - "willingless", "willingness", - "wincheseter", "winchester", - "windsheilds", "windshield", - "withdrawels", "withdrawals", - "withdrawles", "withdrawals", - "withhelding", "withholding", - "withrdawing", "withdrawing", - "witnesssing", "witnessing", - "woodowrking", "woodworking", - "woodworkign", "woodworking", - "worhsipping", "worshipping", - "workstaiton", "workstation", - "workststion", "workstation", - "worshopping", "worshipping", - "xenophoblic", "xenophobic", - "abandining", "abandoning", - "abandonned", "abandoned", - "abbreviato", "abbreviation", - "abnoramlly", "abnormally", - "abnormalty", "abnormally", - "abnornally", "abnormally", - "abominaton", "abomination", - "abondoning", "abandoning", - "aborginial", "aboriginal", - "aboriganal", "aboriginal", - "aborigenal", "aboriginal", - "aborignial", "aboriginal", - "aborigonal", "aboriginal", - "aboroginal", "aboriginal", - "aboslutely", "absolutely", - "abosrption", "absorption", - "abreviated", "abbreviated", - "absintence", "abstinence", - "absitnence", "abstinence", - "absolument", "absolute", - "absolutley", "absolutely", - "absoprtion", "absorption", - "absorbsion", "absorption", - "absorbtion", "absorption", - "absorpsion", "absorption", - "absoultely", "absolutely", - "abstanence", "abstinence", - "abstenance", "abstinence", - "abstenince", "abstinence", - "abstinense", "abstinence", - "abstinince", "abstinence", - "absurditiy", "absurdity", - "abundacies", "abundances", - "academicas", "academics", - "academicos", "academics", - "academicus", "academics", - "accdiently", "accidently", - "accelarate", "accelerate", - "accelerade", "accelerated", - "accelerare", "accelerate", - "accelerato", "acceleration", - "acceleread", "accelerated", - "accelertor", "accelerator", - "accelorate", "accelerate", - "acceptabel", "acceptable", - "acceptabil", "acceptable", - "acceptence", "acceptance", - "accepterad", "accepted", - "acceptible", "acceptable", - "accerelate", "accelerated", - "accesories", "accessories", - "accessable", "accessible", - "accessbile", "accessible", - "accessoire", "accessories", - "accessoirs", "accessories", - "accicently", "accidently", - "accidantly", "accidently", - "accidebtly", "accidently", - "accidenlty", "accidently", - "accidentes", "accidents", - "accidentky", "accidently", - "accidently", "accidentally", - "accidnetly", "accidently", - "accomadate", "accommodate", - "accomodate", "accommodate", - "accompined", "accompanied", - "accomplise", "accomplishes", - "accompliss", "accomplishes", - "accostumed", "accustomed", - "accountent", "accountant", - "accpetable", "acceptable", - "accpetance", "acceptance", - "accuastion", "accusation", - "acculumate", "accumulate", - "accumalate", "accumulate", - "accumelate", "accumulate", - "accumilate", "accumulate", - "accumulare", "accumulate", - "accumulato", "accumulation", - "accumulted", "accumulated", - "accuratley", "accurately", - "accusating", "accusation", - "accusition", "accusation", - "accustumed", "accustomed", - "acheivable", "achievable", - "acheivment", "achievement", - "acheviable", "achievable", - "achiavable", "achievable", - "achieveble", "achievable", - "achievemnt", "achievement", - "achievemts", "achieves", - "achievents", "achieves", - "achievment", "achievement", - "achilleous", "achilles", - "achiveable", "achievable", - "achivement", "achievement", - "acitvating", "activating", - "acitvision", "activision", - "acknowldge", "acknowledge", - "acknowlede", "acknowledge", - "acknowlege", "acknowledge", - "acommodate", "accommodate", - "acopalypse", "apocalypse", - "acordingly", "accordingly", - "acqauinted", "acquainted", - "acquanited", "acquainted", - "acquianted", "acquainted", - "acquinated", "acquainted", - "acquisiton", "acquisition", - "acticating", "activating", - "actication", "activation", - "activacion", "activation", - "activaters", "activates", - "activiates", "activist", - "activiites", "activist", - "activisiom", "activism", - "activisits", "activist", - "activistas", "activists", - "activistes", "activists", - "activiting", "activating", - "activizion", "activision", - "acustommed", "accustomed", - "adaptacion", "adaptation", - "adaptating", "adaptation", - "adaquetely", "adequately", - "addicitons", "addictions", - "addionally", "additionally", - "additivies", "additive", - "additivley", "additive", - "addittions", "addictions", - "addmission", "admission", - "addresable", "addressable", - "addressess", "addresses", - "adequatley", "adequately", - "adequetely", "adequately", - "adequitely", "adequately", - "adernaline", "adrenaline", - "adjectivos", "adjectives", - "adjustible", "adjustable", - "admendment", "amendment", - "administed", "administered", - "administor", "administer", - "administre", "administer", - "administro", "administer", - "adminsiter", "administer", - "admissable", "admissible", - "admittadly", "admittedly", - "admittetly", "admittedly", - "admittidly", "admittedly", - "adolencent", "adolescent", - "adolescant", "adolescent", - "adolescene", "adolescence", - "adoloscent", "adolescent", - "adolsecent", "adolescent", - "adpatation", "adaptation", - "adreanline", "adrenaline", - "adrelanine", "adrenaline", - "adreneline", "adrenaline", - "adreniline", "adrenaline", - "adressable", "addressable", - "advanteges", "advantages", - "advatanges", "advantages", - "adventrous", "adventurous", - "adventrues", "adventures", - "adventuers", "adventures", - "adventuous", "adventurous", - "adventuros", "adventurous", - "adventurus", "adventurous", - "adverticed", "advertised", - "aestethics", "aesthetics", - "aesthatics", "aesthetics", - "aesthestic", "aesthetics", - "affiliaton", "affiliation", - "affilliate", "affiliate", - "affirmitve", "affirmative", - "afflcition", "affliction", - "afflection", "affliction", - "affliation", "affliction", - "affliciton", "affliction", - "afforadble", "affordable", - "affordible", "affordable", - "affortable", "affordable", - "africaners", "africans", - "africaness", "africans", - "aftermaket", "aftermarket", - "afternooon", "afternoon", - "aggravanti", "aggravating", - "aggraveted", "aggravated", - "aggreement", "agreement", - "aggregious", "egregious", - "aggresions", "aggression", - "aggressivo", "aggression", - "aggrovated", "aggravated", - "agnosticim", "agnosticism", - "agnosticsm", "agnosticism", - "agnostisch", "agnostic", - "agnostiscm", "agnosticism", - "agnostisim", "agnosticism", - "agreeement", "agreement", - "agricultre", "agriculture", - "agricultue", "agriculture", - "agriculure", "agriculture", - "agricuture", "agriculture", - "ailenating", "alienating", - "ajdectives", "adjectives", - "alchoholic", "alcoholic", - "alchoolism", "alcoholism", - "alcohalics", "alcoholics", - "alcohalism", "alcoholism", - "alcoholsim", "alcoholism", - "aleinating", "alienating", - "algorhitms", "algorithms", - "algorithem", "algorithm", - "algorithim", "algorithm", - "algorithsm", "algorithms", - "algorithum", "algorithm", - "algorithym", "algorithm", - "algoritmes", "algorithms", - "algoritmos", "algorithms", - "algorthims", "algorithms", - "algortihms", "algorithms", - "algorythms", "algorithms", - "alievating", "alienating", - "alledgedly", "allegedly", - "allegeance", "allegiance", - "allegedely", "allegedly", - "allegedley", "allegedly", - "allegience", "allegiance", - "alleigance", "allegiance", - "allergisch", "allergic", - "alliegance", "allegiance", - "alligeance", "allegiance", - "alocholics", "alcoholics", - "alocholism", "alcoholism", - "alogrithms", "algorithms", - "alphabeast", "alphabet", - "alteracion", "alteration", - "alterarion", "alteration", - "alterating", "alteration", - "alternador", "alternator", - "alternater", "alternator", - "alternatie", "alternatives", - "alternatly", "alternately", - "alternatve", "alternate", - "alternetly", "alternately", - "altogehter", "altogether", - "altogheter", "altogether", - "altriustic", "altruistic", - "altruisitc", "altruistic", - "altrusitic", "altruistic", - "alturistic", "altruistic", - "aluminimum", "aluminum", - "amargeddon", "armageddon", - "amateurest", "amateurs", - "ambassabor", "ambassador", - "ambassader", "ambassador", - "ambassator", "ambassador", - "ambassedor", "ambassador", - "ambassidor", "ambassador", - "ambassodor", "ambassador", - "ambiguitiy", "ambiguity", - "amendmants", "amendments", - "amendmends", "amendments", - "americains", "americas", - "americanas", "americans", - "americanis", "americas", - "americanss", "americas", - "americants", "americas", - "americanus", "americans", - "americares", "americas", - "ammendment", "amendment", - "amrageddon", "armageddon", - "analitical", "analytical", - "analitycal", "analytical", - "analogeous", "analogous", - "analyitcal", "analytical", - "analyseles", "analyses", - "analyseras", "analyses", - "analyseres", "analyses", - "analysised", "analyses", - "analysises", "analyses", - "analysisto", "analysts", - "analystics", "analysts", - "anarchisim", "anarchism", - "anarchistm", "anarchism", - "anarchiszm", "anarchism", - "anarchsits", "anarchists", - "anayltical", "analytical", - "ancilliary", "ancillary", - "androiders", "androids", - "androidtvs", "androids", - "anecdotale", "anecdote", - "anecdotice", "anecdote", - "anestheisa", "anesthesia", - "anesthetia", "anesthesia", - "anesthisia", "anesthesia", - "anitbiotic", "antibiotic", - "anitquated", "antiquated", - "anitsocial", "antisocial", - "aniversary", "anniversary", - "annilihate", "annihilated", - "anniverary", "anniversary", - "anniversay", "anniversary", - "anniversry", "anniversary", - "annointing", "anointing", - "annonceurs", "announcers", - "annoucners", "announcers", - "annoucning", "announcing", - "announched", "announce", - "annyoingly", "annoyingly", - "anonymosly", "anonymously", - "anonymousy", "anonymously", - "antaganist", "antagonist", - "antagnoist", "antagonist", - "antarcitca", "antarctica", - "antarctida", "antarctica", - "anthropoly", "anthropology", - "antibiodic", "antibiotic", - "antibiotcs", "antibiotics", - "antibitoic", "antibiotic", - "antiboitic", "antibiotics", - "anticapate", "anticipate", - "anticiapte", "anticipate", - "anticipare", "anticipate", - "anticipato", "anticipation", - "anticuated", "antiquated", - "antiquited", "antiquated", - "antiqvated", "antiquated", - "antisipate", "anticipate", - "antisocail", "antisocial", - "antisosial", "antisocial", - "antoganist", "antagonist", - "antractica", "antarctica", - "apacolypse", "apocalypse", - "apartheied", "apartheid", - "aplication", "application", - "apocalipse", "apocalypse", - "apocalpyse", "apocalypse", - "apocalypes", "apocalypse", - "apocalypic", "apocalyptic", - "apocalyspe", "apocalypse", - "apocalytic", "apocalyptic", - "apocaplyse", "apocalypse", - "apocolapse", "apocalypse", - "apolagetic", "apologetic", - "apolagized", "apologized", - "apolegetic", "apologetic", - "apoligetic", "apologetic", - "apoligists", "apologists", - "apoligized", "apologized", - "apologisms", "apologists", - "apologiste", "apologise", - "apologitic", "apologetic", - "apostraphe", "apostrophe", - "apostrephe", "apostrophe", - "apostrohpe", "apostrophe", - "apostropes", "apostrophe", - "apparantly", "apparently", - "appareance", "appearance", - "apparenlty", "apparently", - "appartment", "apartment", - "appealling", "appealing", - "appearence", "appearance", - "appearnace", "appearances", - "apperances", "appearances", - "apperantly", "apparently", - "apperciate", "appreciate", - "appereance", "appearance", - "appetities", "appetite", - "appetitite", "appetite", - "appication", "application", - "applainces", "appliances", - "applicaple", "applicable", - "applicates", "applicants", - "applicaton", "application", - "applicible", "applicable", - "appliences", "appliances", - "appointmet", "appointments", - "appologies", "apologies", - "apporached", "approached", - "apporaches", "approaches", - "appraoched", "approached", - "appraoches", "approaches", - "apprecaite", "appreciate", - "appreciato", "appreciation", - "appreciste", "appreciates", - "apprecitae", "appreciates", - "apprecited", "appreciated", - "apprectice", "apprentice", - "appreicate", "appreciate", - "apprendice", "apprentice", - "apprentace", "apprentice", - "apprentise", "apprentice", - "appretiate", "appreciate", - "appretince", "apprentice", - "appriceate", "appreciates", - "appriciate", "appreciate", - "appriecate", "appreciates", - "approacing", "approaching", - "appropiate", "appropriate", - "approprate", "appropriate", - "apropriate", "appropriate", - "aproximate", "approximate", - "apsotrophe", "apostrophe", - "aptitudine", "aptitude", - "aqcuainted", "acquainted", - "aquisition", "acquisition", - "aramgeddon", "armageddon", - "arangement", "arrangement", - "arbitarily", "arbitrarily", - "arbitraily", "arbitrarily", - "arbitraion", "arbitration", - "arbitrairy", "arbitrarily", - "arbitrarly", "arbitrary", - "arbitraton", "arbitration", - "arcehtypes", "archetypes", - "archaelogy", "archaeology", - "archaeolgy", "archaeology", - "archaology", "archeology", - "archatypes", "archetypes", - "archetects", "architects", - "archetipes", "archetypes", - "archetpyes", "archetypes", - "archetypus", "archetypes", - "archeytpes", "archetypes", - "archictect", "architect", - "architechs", "architects", - "architecht", "architect", - "architecte", "architecture", - "architexts", "architects", - "architypes", "archetypes", - "archtiects", "architects", - "archytypes", "archetypes", - "argentinia", "argentina", - "arguements", "arguments", - "argumentas", "arguments", - "argumentos", "arguments", - "arithemtic", "arithmetic", - "arithmitic", "arithmetic", - "aritmethic", "arithmetic", - "armagaddon", "armageddon", - "armageddan", "armageddon", - "armagedden", "armageddon", - "armageddin", "armageddon", - "armagedeon", "armageddon", - "armageedon", "armageddon", - "armagideon", "armageddon", - "armegaddon", "armageddon", - "arrangerad", "arranged", - "arrangment", "arrangement", - "arthimetic", "arithmetic", - "articifial", "artificial", - "articluate", "articulate", - "articualte", "articulate", - "articulted", "articulated", - "artifactos", "artifacts", - "artificiel", "artificial", - "artihmetic", "arithmetic", - "artillerly", "artillery", - "asbestoast", "asbestos", - "asethetics", "aesthetics", - "asisstants", "assistants", - "aspiratons", "aspirations", - "assasinate", "assassinate", - "assassians", "assassin", - "assassinas", "assassins", - "assassines", "assassins", - "assassinos", "assassins", - "assemblare", "assemble", - "assempling", "assembling", - "assersions", "assertions", - "assesement", "assessment", - "assestment", "assessment", - "assignemnt", "assignment", - "assimalate", "assimilate", - "assimilant", "assimilate", - "assimilare", "assimilate", - "assimliate", "assimilate", - "assimulate", "assimilate", - "assingment", "assignment", - "assistanat", "assistants", - "assistanse", "assistants", - "assistante", "assistance", - "assistence", "assistance", - "assistendo", "assisted", - "assistents", "assistants", - "assmebling", "assembling", - "assocaited", "associated", - "assocaites", "associates", - "assocation", "association", - "associatie", "associated", - "associatin", "associations", - "associaton", "association", - "associsted", "associates", - "assoicated", "associated", - "assoicates", "associates", - "assosiated", "associated", - "assosiates", "associates", - "asssassans", "assassins", - "assupmtion", "assumptions", - "assymetric", "asymmetric", - "asteroides", "asteroids", - "asthetical", "aesthetical", - "astonising", "astonishing", - "astornauts", "astronauts", - "astranauts", "astronauts", - "astronatus", "astronauts", - "astronaunt", "astronaut", - "astronomia", "astronomical", - "astronouts", "astronauts", - "astronuats", "astronauts", - "asutralian", "australian", - "atatchment", "attachment", - "athleticos", "athletics", - "athleticsm", "athleticism", - "athletiscm", "athleticism", - "athletisim", "athleticism", - "atmopshere", "atmosphere", - "atmoshpere", "atmosphere", - "atomsphere", "atmosphere", - "atriculate", "articulate", - "atrocoties", "atrocities", - "atrosities", "atrocities", - "attachemnt", "attachment", - "attackeras", "attackers", - "attactment", "attachment", - "attemtping", "attempting", - "attendence", "attendance", - "attendents", "attendants", - "attirbutes", "attributes", - "attmepting", "attempting", - "attracters", "attracts", - "attractice", "attractive", - "attracties", "attracts", - "attractifs", "attracts", - "attraktion", "attraction", - "attraktive", "attractive", - "attribuito", "attribution", - "attritubes", "attributes", - "auctioners", "auctions", - "audioboook", "audiobook", - "audioboost", "audiobooks", - "auidobooks", "audiobooks", - "auotattack", "autoattack", - "austrailan", "australian", - "austrailia", "australia", - "australain", "australians", - "australien", "australian", - "australina", "australians", - "austrlaian", "australians", - "authenticy", "authenticity", - "autherized", "authorized", - "authoritay", "authority", - "authorites", "authorities", - "authorithy", "authority", - "authroized", "authorized", - "autistisch", "autistic", - "autoattaks", "autoattack", - "autocorect", "autocorrect", - "autocorrct", "autocorrect", - "autocorret", "autocorrect", - "autograpgh", "autograph", - "automatice", "automate", - "automatico", "automation", - "automatied", "automate", - "automatiek", "automate", - "automatron", "automation", - "automatted", "automate", - "automibile", "automobile", - "automitive", "automotive", - "automoblie", "automobile", - "automomous", "autonomous", - "automonous", "autonomous", - "automotice", "automotive", - "automotion", "automation", - "automotize", "automotive", - "automotove", "automotive", - "autonamous", "autonomous", - "autonation", "automation", - "autonimous", "autonomous", - "autonomity", "autonomy", - "autononous", "autonomous", - "auttoatack", "autoattack", - "auxilliary", "auxiliary", - "availabale", "available", - "availaible", "available", - "availiable", "available", - "averageadi", "averaged", - "averageifs", "averages", - "awesomeley", "awesomely", - "awesomelly", "awesomely", - "awesomenss", "awesomeness", - "awkwardess", "awkwardness", - "babysister", "babysitter", - "babysiting", "babysitting", - "babysittng", "babysitting", - "bachelores", "bachelors", - "backgorund", "background", - "backgroudn", "backgrounds", - "backgrouds", "backgrounds", - "backgrouns", "backgrounds", - "backgruond", "backgrounds", - "backpacing", "backpacking", - "backpackng", "backpacking", - "backrgound", "backgrounds", - "backrounds", "backgrounds", - "baksetball", "basketball", - "balanceada", "balanced", - "balanceado", "balanced", - "balckberry", "blackberry", - "balckhawks", "blackhawks", - "balcksmith", "blacksmith", - "bandwagoon", "bandwagon", - "bangaldesh", "bangladesh", - "bangladash", "bangladesh", - "bangledash", "bangladesh", - "bangledesh", "bangladesh", - "banglidesh", "bangladesh", - "bankrupcty", "bankruptcy", - "bankruptsy", "bankruptcy", - "bankrutpcy", "bankruptcy", - "barabrians", "barbarians", - "barbariens", "barbarians", - "barbarions", "barbarians", - "barbarisch", "barbaric", - "barberians", "barbarians", - "bargianing", "bargaining", - "bartendars", "bartenders", - "basektball", "basketball", - "baskteball", "basketball", - "bastardous", "bastards", - "battelship", "battleship", - "battelstar", "battlestar", - "battlearts", "battlestar", - "battlechip", "battleship", - "battlefied", "battlefield", - "battlefont", "battlefront", - "battlehips", "battleship", - "battlesaur", "battlestar", - "battlescar", "battlestar", - "battleshop", "battleship", - "battlestsr", "battlestar", - "beahviours", "behaviours", - "beautifuly", "beautifully", - "beautilful", "beautifully", - "beautyfull", "beautiful", - "becnhmarks", "benchmarks", - "beethoveen", "beethoven", - "begginings", "beginnings", - "begininngs", "beginnings", - "beginninng", "beginnings", - "behaivours", "behaviours", - "behaviorly", "behavioral", - "behavoiral", "behavioral", - "behavoiurs", "behaviours", - "behavorial", "behavioral", - "behavoural", "behavioral", - "behvaiours", "behaviours", - "beleagured", "beleaguered", - "beleivable", "believable", - "beliavable", "believable", - "beliebable", "believable", - "believeble", "believable", - "beliveable", "believable", - "benchamrks", "benchmarks", - "benchmakrs", "benchmarks", - "benckmarks", "benchmarks", - "benefecial", "beneficial", - "beneficary", "beneficiary", - "beneficiul", "beneficial", - "benefitial", "beneficial", - "beneifical", "beneficial", - "benelovent", "benevolent", - "benevalent", "benevolent", - "benevelant", "benevolent", - "benevelent", "benevolent", - "benevelont", "benevolent", - "benevloent", "benevolent", - "benevolant", "benevolent", - "benificial", "beneficial", - "benovelent", "benevolent", - "bernouilli", "bernoulli", - "besitality", "bestiality", - "bestaility", "bestiality", - "besteality", "bestiality", - "betrayeado", "betrayed", - "bilateraly", "bilaterally", - "billborads", "billboards", - "bioligical", "biological", - "biologiset", "biologist", - "biologiskt", "biologist", - "birghtness", "brightness", - "birmignham", "birmingham", - "birmimgham", "birmingham", - "bisexuella", "bisexual", - "bitterseet", "bittersweet", - "bitterswet", "bittersweet", - "blackahwks", "blackhawks", - "blackbarry", "blackberry", - "blackbeary", "blackberry", - "blackbeery", "blackberry", - "blackcawks", "blackhawks", - "blackhakws", "blackhawks", - "blackhwaks", "blackhawks", - "blackmsith", "blacksmith", - "blackshits", "blacksmith", - "blasphemey", "blasphemy", - "blitzkreig", "blitzkrieg", - "blochchain", "blockchain", - "blockcahin", "blockchain", - "blockchian", "blockchain", - "bloodboner", "bloodborne", - "bloodbonre", "bloodborne", - "bloodborbe", "bloodborne", - "bloodbrone", "bloodborne", - "bloodporne", "bloodborne", - "bloorborne", "bloodborne", - "blueberies", "blueberries", - "blueberris", "blueberries", - "blueberrry", "blueberry", - "bluebrints", "blueprints", - "boardcasts", "broadcasts", - "bodyheight", "bodyweight", - "bodyweigth", "bodyweight", - "bodywieght", "bodyweight", - "bombarment", "bombardment", - "bookmakred", "bookmarked", - "bootlaoder", "bootloader", - "bootleader", "bootloader", - "boradcasts", "broadcasts", - "borderlads", "borderlands", - "borderlans", "borderlands", - "bottelneck", "bottleneck", - "bottlebeck", "bottleneck", - "boundaires", "boundaries", - "bounderies", "boundaries", - "bourgeoius", "bourgeois", - "boycutting", "boycotting", - "boyfirends", "boyfriends", - "boyfreinds", "boyfriends", - "boyfrients", "boyfriends", - "braceletes", "bracelets", - "braceletts", "bracelets", - "brainwased", "brainwashed", - "brakedowns", "breakdowns", - "braodcasts", "broadcasts", - "brasillian", "brazilian", - "bratenders", "bartenders", - "brazilains", "brazilians", - "brazileans", "brazilians", - "braziliaan", "brazilians", - "brazilions", "brazilians", - "brazillans", "brazilians", - "brightoner", "brighten", - "brigthness", "brightness", - "brillaince", "brilliance", - "brilliante", "brilliance", - "brillianty", "brilliantly", - "brimestone", "brimstone", - "brimingham", "birmingham", - "broacasted", "broadcast", - "brotherhod", "brotherhood", - "brotherood", "brotherhood", - "brusselers", "brussels", - "brutallity", "brutally", - "buisnesses", "businesses", - "bulgariska", "bulgaria", - "bulletpoof", "bulletproof", - "bulletprof", "bulletproof", - "bureaucats", "bureaucrats", - "businesman", "businessman", - "businesmen", "businessmen", - "businessen", "businessmen", - "butterfies", "butterflies", - "cabinettas", "cabinets", - "caclulated", "calculated", - "caclulator", "calculator", - "cahracters", "characters", - "calcluator", "calculators", - "calcualted", "calculated", - "calcualtor", "calculator", - "calculador", "calculator", - "calcularon", "calculator", - "calculater", "calculator", - "calculatin", "calculations", - "calibratin", "calibration", - "calibraton", "calibration", - "califnoria", "californian", - "califonria", "californian", - "califorian", "californian", - "califorina", "california", - "californai", "californian", - "califronia", "california", - "caligraphy", "calligraphy", - "caliofrnia", "californian", - "calrifying", "clarifying", - "calssified", "classified", - "caluclated", "calculated", - "caluclator", "calculator", - "caluculate", "calculate", - "cambodican", "cambodia", - "camofluage", "camouflage", - "camoufalge", "camouflage", - "camouglage", "camouflage", - "campaiging", "campaigning", - "campaignes", "campaigns", - "cancellato", "cancellation", - "candidatas", "candidates", - "candidatxs", "candidates", - "candidiate", "candidate", - "canditates", "candidates", - "cannibalsm", "cannibalism", - "cannisters", "canisters", - "cannonical", "canonical", - "capabality", "capability", - "capabiltiy", "capability", - "capacators", "capacitors", - "capaciters", "capacitors", - "capactiors", "capacitors", - "capasitors", "capacitors", - "capatilism", "capitalism", - "capatilist", "capitalist", - "capatilize", "capitalize", - "capialized", "capitalized", - "capicators", "capacitors", - "capitalisn", "capitals", - "capitalits", "capitalists", - "capitalsim", "capitalism", - "capitalsit", "capitalists", - "capitarist", "capitalist", - "capitilism", "capitalism", - "capitilist", "capitalist", - "capitilize", "capitalize", - "capitlaism", "capitalism", - "capitlaist", "capitalist", - "capitlaize", "capitalized", - "capitolism", "capitalism", - "capitolist", "capitalist", - "capitolize", "capitalize", - "captainers", "captains", - "captialism", "capitalism", - "captialist", "capitalist", - "captialize", "capitalize", - "captivitiy", "captivity", - "caraciture", "caricature", - "carciature", "caricature", - "cardinales", "cardinals", - "cardinalis", "cardinals", - "carefullly", "carefully", - "cariacture", "caricature", - "caricatore", "caricature", - "cariciture", "caricature", - "caricuture", "caricature", - "carismatic", "charismatic", - "carribbean", "caribbean", - "cartdridge", "cartridge", - "cartdriges", "cartridges", - "carthagian", "carthaginian", - "cartilidge", "cartilage", - "cartirdges", "cartridges", - "cartrdiges", "cartridges", - "cartriages", "cartridges", - "cartrigdes", "cartridges", - "casaulties", "casualties", - "cassowarry", "cassowary", - "casualites", "casualties", - "casualries", "casualties", - "casulaties", "casualties", - "cataclysim", "cataclysm", - "cataclysym", "cataclysm", - "catagories", "categories", - "catapillar", "caterpillar", - "catapiller", "caterpillar", - "catastrope", "catastrophe", - "catastrphe", "catastrophe", - "categorice", "categorize", - "categoried", "categorized", - "categoriei", "categorize", - "cateogrize", "categorized", - "catepillar", "caterpillar", - "caterpilar", "caterpillar", - "catholicsm", "catholicism", - "catholicus", "catholics", - "catholisim", "catholicism", - "cativating", "activating", - "cattleship", "battleship", - "causalties", "casualties", - "cautionsly", "cautiously", - "celebratin", "celebration", - "celebrites", "celebrities", - "celebritiy", "celebrity", - "cellpading", "cellpadding", - "cellulaire", "cellular", - "cemetaries", "cemeteries", - "censorhsip", "censorship", - "censurship", "censorship", - "centipedle", "centipede", - "ceremonias", "ceremonies", - "ceremoniis", "ceremonies", - "ceremonije", "ceremonies", - "cerimonial", "ceremonial", - "cerimonies", "ceremonies", - "certainity", "certainty", - "certainlyt", "certainty", - "chairtable", "charitable", - "chalenging", "challenging", - "challanged", "challenged", - "challanges", "challenges", - "challegner", "challenger", - "challender", "challenger", - "challengue", "challenger", - "challengur", "challenger", - "challening", "challenging", - "challneger", "challenger", - "chanceller", "chancellor", - "chancillor", "chancellor", - "chansellor", "chancellor", - "charachter", "character", - "charactere", "characterize", - "characterz", "characterize", - "charactors", "characters", - "charakters", "characters", - "charatable", "charitable", - "charecters", "characters", - "charistics", "characteristics", - "charitible", "charitable", - "chartiable", "charitable", - "chechpoint", "checkpoint", - "checkpiont", "checkpoint", - "checkpoins", "checkpoints", - "checkponts", "checkpoints", - "cheesecase", "cheesecake", - "cheesecave", "cheesecake", - "cheeseface", "cheesecake", - "cheezecake", "cheesecake", - "chemcially", "chemically", - "chidlbirth", "childbirth", - "chihuahuha", "chihuahua", - "childbrith", "childbirth", - "childrends", "childrens", - "childrenis", "childrens", - "childrents", "childrens", - "chirstians", "christians", - "chocalates", "chocolates", - "chocloates", "chocolates", - "chocoaltes", "chocolates", - "chocolatie", "chocolates", - "chocolatos", "chocolates", - "chocolatte", "chocolates", - "chocolotes", "chocolates", - "cholestrol", "cholesterol", - "chormosome", "chromosome", - "chornicles", "chronicles", - "chrisitans", "christians", - "christains", "christians", - "christiaan", "christian", - "christimas", "christians", - "christinas", "christians", - "christines", "christians", - "christmans", "christians", - "chromasome", "chromosome", - "chromesome", "chromosome", - "chromisome", "chromosome", - "chromosmes", "chromosomes", - "chromosoms", "chromosomes", - "chromosone", "chromosome", - "chromosoom", "chromosome", - "chromozome", "chromosome", - "chronciles", "chronicles", - "chronicals", "chronicles", - "chronicels", "chronicles", - "chronocles", "chronicles", - "chronosome", "chromosome", - "chrsitians", "christians", - "cigarattes", "cigarettes", - "cigerattes", "cigarettes", - "cincinatti", "cincinnati", - "cinncinati", "cincinnati", - "circulaire", "circular", - "circulaton", "circulation", - "circumsice", "circumcised", - "circumsied", "circumcised", - "circumwent", "circumvent", - "circunvent", "circumvent", - "cirruculum", "curriculum", - "claculator", "calculator", - "clairfying", "clarifying", - "clasically", "classically", - "classicals", "classics", - "classrooom", "classroom", - "cleanliess", "cleanliness", - "cleareance", "clearance", - "cleverleys", "cleverly", - "cliffhager", "cliffhanger", - "climateers", "climates", - "climatiser", "climates", - "clincially", "clinically", - "clitoridis", "clitoris", - "clitorious", "clitoris", - "co-incided", "coincided", - "cockroachs", "cockroaches", - "cockroahes", "cockroaches", - "coefficent", "coefficient", - "cognatious", "contagious", - "cognitivie", "cognitive", - "coincidnce", "coincide", - "colelctive", "collective", - "colelctors", "collectors", - "collapsers", "collapses", - "collaquial", "colloquial", - "collasping", "collapsing", - "collataral", "collateral", - "collaterol", "collateral", - "collatoral", "collateral", - "collcetion", "collections", - "colleauges", "colleagues", - "colleciton", "collection", - "collectems", "collects", - "collectief", "collective", - "collecties", "collects", - "collectifs", "collects", - "collectivo", "collection", - "collectoin", "collections", - "collectons", "collections", - "collectros", "collects", - "collegaues", "colleagues", - "collequial", "colloquial", - "colleteral", "collateral", - "colliquial", "colloquial", - "collission", "collisions", - "collitions", "collisions", - "colloqiual", "colloquial", - "colloquail", "colloquial", - "colloqueal", "colloquial", - "collpasing", "collapsing", - "colonialsm", "colonialism", - "colorblend", "colorblind", - "coloublind", "colorblind", - "columbidae", "columbia", - "comapnions", "companions", - "comaprable", "comparable", - "comaprison", "comparison", - "comaptible", "compatible", - "combatabts", "combatants", - "combatents", "combatants", - "combinatin", "combinations", - "combinaton", "combination", - "comediants", "comedians", - "comepndium", "compendium", - "comferting", "comforting", - "comforming", "comforting", - "comfortbly", "comfortably", - "comisioned", "commissioned", - "comisioner", "commissioner", - "comissions", "commissions", - "commandbox", "commando", - "commandent", "commandment", - "commandeur", "commanders", - "commandore", "commanders", - "commandpod", "commando", - "commanists", "communists", - "commemters", "commenters", - "commencera", "commerce", - "commenciez", "commence", - "commentaar", "commentary", - "commentare", "commenter", - "commentars", "commenters", - "commentart", "commentator", - "commentery", "commentary", - "commentsry", "commenters", - "commercail", "commercials", - "commercent", "commence", - "commerical", "commercial", - "comminists", "communists", - "commisison", "commissions", - "commissons", "commissions", - "commiteted", "commited", - "commodites", "commodities", - "commtiment", "commitments", - "communicae", "communicated", - "communisim", "communism", - "communiste", "communities", - "communites", "communities", - "communters", "commenters", - "compadible", "compatible", - "compagnons", "companions", - "compainons", "companions", - "compairson", "comparison", - "compalined", "complained", - "compandium", "compendium", - "companians", "companions", - "companines", "companions", - "compansate", "compensate", - "comparabil", "comparable", - "comparason", "comparison", - "comparaste", "compares", - "comparatie", "comparative", - "compareble", "comparable", - "comparemos", "compares", - "comparions", "comparison", - "compariosn", "comparisons", - "comparisen", "compares", - "comparitve", "comparative", - "comparsion", "comparison", - "compartent", "compartment", - "compartmet", "compartment", - "compatibel", "compatible", - "compatibil", "compatible", - "compeating", "completing", - "compeditor", "competitor", - "compednium", "compendium", - "compeeting", "completing", - "compeltely", "completely", - "compelting", "completing", - "compeltion", "completion", - "compemdium", "compendium", - "compenduim", "compendium", - "compenents", "components", - "compenidum", "compendium", - "compensare", "compensate", - "comperable", "comparable", - "comperhend", "comprehend", - "compession", "compassion", - "competance", "competence", - "competator", "competitor", - "competenet", "competence", - "competense", "competence", - "competenze", "competence", - "competeted", "competed", - "competetor", "competitor", - "competidor", "competitor", - "competiors", "competitors", - "competitie", "competitive", - "competitin", "competitions", - "competitio", "competitor", - "competiton", "competition", - "competitve", "competitive", - "compilance", "compliance", - "compilaton", "compilation", - "compinsate", "compensate", - "compitable", "compatible", - "compitance", "compliance", - "complacant", "complacent", - "complaince", "compliance", - "complaines", "complaints", - "complainig", "complaining", - "complainte", "complained", - "complation", "completion", - "compleatly", "completely", - "complecate", "complicate", - "completeds", "completes", - "completent", "complement", - "completily", "complexity", - "completito", "completion", - "completley", "completely", - "complexers", "complexes", - "complexety", "complexity", - "complianed", "compliance", - "compliants", "complaints", - "complicaed", "complicate", - "complicare", "complicate", - "complicati", "complicit", - "complicato", "complication", - "complicite", "complicate", - "complicted", "complicated", - "complience", "compliance", - "complimate", "complicate", - "complition", "completion", - "complusion", "compulsion", - "complusive", "compulsive", - "complusory", "compulsory", - "compolsive", "compulsive", - "compolsory", "compulsory", - "compolsury", "compulsory", - "componants", "components", - "componenet", "components", - "componsate", "compensate", - "comporable", "comparable", - "compositae", "composite", - "compositie", "composite", - "compositon", "composition", - "compraison", "comparisons", - "compramise", "compromise", - "comprassem", "compress", - "comprehand", "comprehend", - "compresion", "compression", - "compresors", "compressor", - "compresser", "compressor", - "compressio", "compressor", - "compresson", "compression", - "comprihend", "comprehend", - "comprimise", "compromise", - "compromiss", "compromises", - "compromize", "compromise", - "compromsie", "compromises", - "comprossor", "compressor", - "compteting", "completing", - "comptetion", "completion", - "compulisve", "compulsive", - "compulosry", "compulsory", - "compulsary", "compulsory", - "compulsery", "compulsory", - "compulsing", "compulsion", - "compulsivo", "compulsion", - "compulsury", "compulsory", - "compuslion", "compulsion", - "compuslive", "compulsive", - "compuslory", "compulsory", - "compustion", "compulsion", - "computanti", "computation", - "conatiners", "containers", - "concedendo", "conceded", - "concedered", "conceded", - "conceitual", "conceptual", - "concentate", "concentrate", - "concenting", "connecting", - "conceptial", "conceptual", - "conceptuel", "conceptual", - "concersion", "concession", - "concesions", "concession", - "concidered", "considered", - "conciously", "consciously", - "concission", "concession", - "conclsuion", "concussion", - "conclusies", "conclusive", - "conclution", "conclusion", - "concorrent", "concurrent", - "concsience", "conscience", - "conculsion", "conclusion", - "conculsive", "conclusive", - "concurment", "concurrent", - "concurrant", "concurrent", - "concurrect", "concurrent", - "concusions", "concussion", - "concusison", "concussions", - "condamning", "condemning", - "condemming", "condemning", - "condencing", "condemning", - "condenming", "condemning", - "condensend", "condensed", - "condidtion", "condition", - "conditinal", "conditional", - "conditiner", "conditioner", - "conditiond", "conditioned", - "conditiong", "conditioning", - "condmening", "condemning", - "conduiting", "conducting", - "conencting", "connecting", - "conenction", "connection", - "conenctors", "connectors", - "conesencus", "consensus", - "confedarcy", "confederacy", - "confedence", "conference", - "confedercy", "confederacy", - "conferance", "conference", - "conferenze", "conference", - "conferming", "confirming", - "confernece", "conferences", - "confessino", "confessions", - "confidance", "confidence", - "confidenly", "confidently", - "confidense", "confidence", - "confidenty", "confidently", - "conflcting", "conflating", - "conflicing", "conflicting", - "conflictos", "conflicts", - "confliting", "conflating", - "confriming", "confirming", - "confussion", "confession", - "congratule", "congratulate", - "congresman", "congressman", - "congresmen", "congressmen", - "congressen", "congressmen", - "conjecutre", "conjecture", - "conjuction", "conjunction", - "conjuncion", "conjunction", - "conlcusion", "conclusion", - "conncetion", "connections", - "conneciton", "connection", - "connecties", "connects", - "connectins", "connects", - "connectivy", "connectivity", - "connectpro", "connector", - "conneticut", "connecticut", - "connotaion", "connotation", - "conpsiracy", "conspiracy", - "conqeuring", "conquering", - "conqouring", "conquering", - "conquerers", "conquerors", - "conquoring", "conquering", - "consciense", "conscience", - "consciouly", "consciously", - "consdiered", "considered", - "consending", "consenting", - "consensuel", "consensual", - "consenusal", "consensual", - "consequece", "consequence", - "consequnce", "consequence", - "conservare", "conserve", - "conservato", "conservation", - "conservice", "conserve", - "conservies", "conserve", - "conservite", "conserve", - "consicence", "conscience", - "consideras", "considers", - "consideret", "considerate", - "consipracy", "conspiracy", - "consistant", "consistent", - "consistens", "consists", - "consisteny", "consistency", - "consitency", "consistency", - "consituted", "constituted", - "conslutant", "consultant", - "consluting", "consulting", - "consolidad", "consolidated", - "consonents", "consonants", - "consorcium", "consortium", - "conspirace", "conspiracies", - "conspiricy", "conspiracy", - "conspriacy", "conspiracy", - "constaints", "constraints", - "constatnly", "constantly", - "constently", "constantly", - "constitude", "constitute", - "constitued", "constitute", - "constituem", "constitute", - "constituer", "constitute", - "constitues", "constitutes", - "constituie", "constitute", - "constituit", "constitute", - "constitutn", "constituents", - "constituye", "constitute", - "constnatly", "constantly", - "constracts", "constructs", - "constraits", "constraints", - "constransi", "constraints", - "constrants", "constraints", - "construced", "constructed", - "constructo", "construction", - "construint", "constraint", - "construits", "constructs", - "construted", "constructed", - "consueling", "consulting", - "consultata", "consultant", - "consultate", "consultant", - "consultati", "consultant", - "consultato", "consultation", - "consultent", "consultant", - "consumated", "consummated", - "consumbale", "consumables", - "consuments", "consumes", - "consumirem", "consumerism", - "consumires", "consumerism", - "consumirse", "consumerism", - "consumiste", "consumes", - "consumpion", "consumption", - "contaction", "contacting", - "contageous", "contagious", - "contagiosa", "contagious", - "contagioso", "contagious", - "contaigous", "contagious", - "containors", "containers", - "contaminen", "containment", - "contanting", "contacting", - "contection", "contention", - "contectual", "contextual", - "conteiners", "contenders", - "contempate", "contemplate", - "contemplat", "contempt", - "contempory", "contemporary", - "contenants", "continents", - "contencion", "contention", - "contendors", "contenders", - "contenents", "continents", - "conteneurs", "contenders", - "contengent", "contingent", - "contension", "contention", - "contentino", "contention", - "contentios", "contentious", - "contentous", "contentious", - "contestais", "contests", - "contestans", "contests", - "contestase", "contests", - "contestion", "contention", - "contestors", "contests", - "contextful", "contextual", - "contextuel", "contextual", - "contextura", "contextual", - "contianers", "containers", - "contianing", "containing", - "contibuted", "contributed", - "contibutes", "contributes", - "contigents", "continents", - "contigious", "contagious", - "contignent", "contingent", - "continants", "continents", - "continenal", "continental", - "continenet", "continents", - "contineous", "continuous", - "continetal", "continental", - "contingecy", "contingency", - "contingeny", "contingency", - "continient", "contingent", - "continious", "continuous", - "continiuty", "continuity", - "contintent", "contingent", - "continualy", "continually", - "continuare", "continue", - "continuati", "continuity", - "continuato", "continuation", - "continuent", "contingent", - "continuety", "continuity", - "continunes", "continents", - "continuons", "continuous", - "continutiy", "continuity", - "continuuum", "continuum", - "contitnent", "contingent", - "contiuning", "containing", - "contiunity", "continuity", - "contorller", "controllers", - "contracing", "contracting", - "contractar", "contractor", - "contracter", "contractor", - "contractin", "contraction", - "contractos", "contracts", - "contradice", "contradicted", - "contradics", "contradicts", - "contredict", "contradict", - "contribued", "contributed", - "contribuem", "contribute", - "contribuer", "contribute", - "contribues", "contributes", - "contribuie", "contribute", - "contribuit", "contribute", - "contributo", "contribution", - "contributs", "contributes", - "contribuye", "contribute", - "contricted", "contracted", - "contridict", "contradict", - "contriubte", "contributes", - "controlelr", "controllers", - "controlers", "controls", - "controling", "controlling", - "controlles", "controls", - "controvery", "controversy", - "controvesy", "controversy", - "contrubite", "contributes", - "contrubute", "contribute", - "contuining", "continuing", - "contuinity", "continuity", - "convaluted", "convoluted", - "convcition", "convictions", - "conveinent", "convenient", - "conveluted", "convoluted", - "convencion", "convention", - "conveniant", "convenient", - "conveniece", "convenience", - "convenince", "convenience", - "convential", "conventional", - "converesly", "conversely", - "convergens", "converse", - "converison", "conversions", - "converning", "converting", - "conversare", "converse", - "conversino", "conversions", - "conversley", "conversely", - "conversoin", "conversions", - "conversons", "conversions", - "convertion", "conversion", - "convertire", "converter", - "converying", "converting", - "conveyered", "conveyed", - "conviccion", "conviction", - "conviciton", "conviction", - "convienent", "convenient", - "conviluted", "convoluted", - "convincted", "convince", - "convinsing", "convincing", - "convinving", "convincing", - "convoluded", "convoluted", - "convoulted", "convoluted", - "convulated", "convoluted", - "convuluted", "convoluted", - "cooperatve", "cooperative", - "coordenate", "coordinate", - "coordiante", "coordinate", - "coordinare", "coordinate", - "coordinato", "coordination", - "coordinats", "coordinates", - "coordonate", "coordinate", - "cooridnate", "coordinate", - "copehnagen", "copenhagen", - "copenaghen", "copenhagen", - "copenahgen", "copenhagen", - "copengagen", "copenhagen", - "copengahen", "copenhagen", - "copenhagan", "copenhagen", - "copenhague", "copenhagen", - "copenhagun", "copenhagen", - "copenhaven", "copenhagen", - "copenhegan", "copenhagen", - "copyrighed", "copyrighted", - "copyrigted", "copyrighted", - "corinthans", "corinthians", - "corinthias", "corinthians", - "corinthins", "corinthians", - "cornmitted", "committed", - "corporatie", "corporate", - "corralated", "correlated", - "corralates", "correlates", - "correccion", "correction", - "correciton", "corrections", - "correcters", "correctors", - "correctess", "correctness", - "correctivo", "correction", - "correctons", "corrections", - "corregated", "correlated", - "correkting", "correcting", - "correlatas", "correlates", - "correlatie", "correlated", - "correlatos", "correlates", - "correspend", "correspond", - "corrilated", "correlated", - "corrilates", "correlates", - "corrispond", "correspond", - "corrolated", "correlated", - "corrolates", "correlates", - "corrospond", "correspond", - "corrpution", "corruption", - "corrulates", "correlates", - "corrupcion", "corruption", - "cosmeticas", "cosmetics", - "cosmeticos", "cosmetics", - "costumized", "customized", - "counceling", "counseling", - "councellor", "councillor", - "councelors", "counselors", - "councilers", "councils", - "counselers", "counselors", - "counsellng", "counselling", - "counsilers", "counselors", - "counsiling", "counseling", - "counsilors", "counselors", - "counsolers", "counselors", - "counsoling", "counseling", - "countepart", "counteract", - "counteratk", "counteract", - "counterbat", "counteract", - "countercat", "counteract", - "countercut", "counteract", - "counteries", "counters", - "countoring", "countering", - "countryies", "countryside", - "countrying", "countering", - "courcework", "coursework", - "coursefork", "coursework", - "courthosue", "courthouse", - "courtrooom", "courtroom", - "cousnelors", "counselors", - "coutneract", "counteract", - "coutnering", "countering", - "covenental", "covenant", - "cranberrry", "cranberry", - "creationis", "creations", - "creationsm", "creationism", - "creationst", "creationist", - "creativily", "creatively", - "creativley", "creatively", - "credibilty", "credibility", - "creeperest", "creepers", - "crimanally", "criminally", - "criminalty", "criminally", - "criminalul", "criminally", - "criticable", "critical", - "criticarlo", "critical", - "criticiing", "criticising", - "criticisim", "criticism", - "criticisme", "criticise", - "criticisng", "criticising", - "criticists", "critics", - "criticisze", "criticise", - "criticizms", "criticisms", - "criticizng", "criticizing", - "critisiced", "criticized", - "critisicms", "criticisms", - "critisicsm", "criticisms", - "critisiscm", "criticisms", - "critisisms", "criticisms", - "critisizes", "criticises", - "critisizms", "criticisms", - "critiziced", "criticized", - "critizised", "criticized", - "critizisms", "criticisms", - "critizized", "criticized", - "crocodille", "crocodile", - "crossfiter", "crossfire", - "crutchetts", "crutches", - "crystalens", "crystals", - "crystalisk", "crystals", - "crystallis", "crystals", - "cuatiously", "cautiously", - "culterally", "culturally", - "cultrually", "culturally", - "culumative", "cumulative", - "culutrally", "culturally", - "cumbersone", "cumbersome", - "cumbursome", "cumbersome", - "cumpolsory", "compulsory", - "cumulitive", "cumulative", - "currancies", "currencies", - "currenctly", "currency", - "currenices", "currencies", - "currentfps", "currents", - "currentlys", "currents", - "currentpos", "currents", - "currentusa", "currents", - "curriculem", "curriculum", - "curriculim", "curriculum", - "curriences", "currencies", - "curroption", "corruption", - "custimized", "customized", - "customzied", "customized", - "custumized", "customized", - "cutscences", "cutscene", - "cutscenses", "cutscene", - "dangerouly", "dangerously", - "dealerhsip", "dealerships", - "deathamtch", "deathmatch", - "deathmacth", "deathmatch", - "debateable", "debatable", - "decembeard", "december", - "decendants", "descendants", - "decendents", "descendants", - "decideable", "decidable", - "deciptions", "depictions", - "decisiones", "decisions", - "declarasen", "declares", - "declaraste", "declares", - "declaremos", "declares", - "decomposit", "decompose", - "decoracion", "decoration", - "decorativo", "decoration", - "decoritive", "decorative", - "decroative", "decorative", - "decsending", "descending", - "dedicacion", "dedication", - "dedikation", "dedication", - "deducatble", "deductible", - "deducitble", "deductible", - "defacation", "defamation", - "defamating", "defamation", - "defanitely", "definately", - "defelction", "deflection", - "defendeers", "defender", - "defendents", "defendants", - "defenderes", "defenders", - "defenesman", "defenseman", - "defenselss", "defenseless", - "defensivly", "defensively", - "defianetly", "definately", - "defiantely", "definately", - "defiantley", "definately", - "defibately", "definately", - "deficately", "definately", - "deficiancy", "deficiency", - "deficience", "deficiencies", - "deficienct", "deficient", - "deficienty", "deficiency", - "defiintely", "definately", - "definaetly", "definately", - "definaitly", "definately", - "definaltey", "definately", - "definataly", "definately", - "definateky", "definately", - "definately", "definitely", - "definatily", "definately", - "defination", "definition", - "definative", "definitive", - "definatlly", "definately", - "definatrly", "definately", - "definayely", "definately", - "defineatly", "definately", - "definetaly", "definately", - "definetely", "definitely", - "definetily", "definately", - "definetlly", "definetly", - "definettly", "definately", - "definicion", "definition", - "definietly", "definitely", - "definining", "defining", - "definitaly", "definately", - "definiteyl", "definitly", - "definitivo", "definition", - "definitley", "definitely", - "definitlly", "definitly", - "definitlry", "definitly", - "definitlty", "definitly", - "definjtely", "definately", - "definltely", "definately", - "definotely", "definately", - "definstely", "definately", - "defintaley", "definately", - "defintiely", "definitely", - "defintiion", "definitions", - "definutely", "definately", - "deflaction", "deflection", - "defleciton", "deflection", - "deflektion", "deflection", - "defniately", "definately", - "degenarate", "degenerate", - "degenerare", "degenerate", - "degenerite", "degenerate", - "degoratory", "derogatory", - "degraderad", "degraded", - "dehydraded", "dehydrated", - "dehyrdated", "dehydrated", - "deifnately", "definately", - "deisgnated", "designated", - "delaership", "dealership", - "delearship", "dealership", - "delegaties", "delegate", - "delegative", "delegate", - "delfection", "deflection", - "delibarate", "deliberate", - "deliberant", "deliberate", - "delibirate", "deliberate", - "deligthful", "delightful", - "deliverate", "deliberate", - "deliverees", "deliveries", - "deliviered", "delivered", - "deliviring", "delivering", - "delporable", "deplorable", - "delpoyment", "deployment", - "delutional", "delusional", - "dementieva", "dementia", - "deminsions", "dimensions", - "democracis", "democracies", - "democracts", "democrat", - "democratas", "democrats", - "democrates", "democrats", - "demograhic", "demographic", - "demographs", "demographics", - "demograpic", "demographic", - "demolation", "demolition", - "demolicion", "demolition", - "demolision", "demolition", - "demolitian", "demolition", - "demoliting", "demolition", - "demoloshed", "demolished", - "demolution", "demolition", - "demonished", "demolished", - "demonstate", "demonstrate", - "demonstras", "demonstrates", - "demorcracy", "democracy", - "denegerate", "degenerate", - "denominato", "denomination", - "denomintor", "denominator", - "deocrative", "decorative", - "deomcratic", "democratic", - "deparments", "departments", - "departmens", "departments", - "departmnet", "departments", - "depcitions", "depictions", - "depdending", "depending", - "depencency", "dependency", - "dependance", "dependence", - "dependancy", "dependency", - "dependandt", "dependant", - "dependends", "depended", - "dependened", "depended", - "dependenta", "dependant", - "dependente", "dependence", - "depicitons", "depictions", - "deplorabel", "deplorable", - "deplorabil", "deplorable", - "deplorible", "deplorable", - "deplyoment", "deployment", - "depolyment", "deployment", - "depositers", "deposits", - "depressief", "depressive", - "depressies", "depressive", - "deprivaton", "deprivation", - "deragotory", "derogatory", - "derivaties", "derivatives", - "deriviated", "derived", - "derivitave", "derivative", - "derivitive", "derivative", - "derogatary", "derogatory", - "derogatery", "derogatory", - "derogetory", "derogatory", - "derogitory", "derogatory", - "derogotary", "derogatory", - "derogotory", "derogatory", - "derviative", "derivative", - "descendats", "descendants", - "descendend", "descended", - "descenting", "descending", - "descerning", "descending", - "descipable", "despicable", - "descisions", "decisions", - "descriibes", "describes", - "descripton", "description", - "desginated", "designated", - "desigining", "designing", - "desireable", "desirable", - "desktopbsd", "desktops", - "despciable", "despicable", - "desperatly", "desperately", - "desperetly", "desperately", - "despicaple", "despicable", - "despicible", "despicable", - "dessicated", "desiccated", - "destinatin", "destinations", - "destinaton", "destination", - "destoryers", "destroyers", - "destorying", "destroying", - "destroyeds", "destroyers", - "destroyeer", "destroyers", - "destrucion", "destruction", - "destrucive", "destructive", - "destryoing", "destroying", - "detectarlo", "detector", - "detectaron", "detector", - "detectoare", "detector", - "determinas", "determines", - "determinig", "determining", - "determinsm", "determinism", - "deutschand", "deutschland", - "devastaded", "devastated", - "devastaing", "devastating", - "devastanti", "devastating", - "devasteted", "devastated", - "develepors", "developers", - "develoeprs", "developers", - "developmet", "developments", - "developors", "develops", - "developped", "developed", - "developres", "develops", - "develpment", "development", - "devestated", "devastated", - "devolvendo", "devolved", - "deyhdrated", "dehydrated", - "diagnosied", "diagnose", - "diagnosies", "diagnosis", - "diagnositc", "diagnostic", - "diagnossed", "diagnose", - "diagnosted", "diagnose", - "diagnotics", "diagnostic", - "diagonstic", "diagnostic", - "dichotomoy", "dichotomy", - "dicitonary", "dictionary", - "diconnects", "disconnects", - "dicovering", "discovering", - "dictateurs", "dictates", - "dictionare", "dictionaries", - "differance", "difference", - "differenly", "differently", - "differense", "differences", - "differente", "difference", - "differentl", "differential", - "differenty", "differently", - "differnece", "difference", - "difficulte", "difficulties", - "difficults", "difficulties", - "difficutly", "difficulty", - "diffuculty", "difficulty", - "diganostic", "diagnostic", - "dimensinal", "dimensional", - "dimentions", "dimensions", - "dimesnions", "dimensions", - "dimineshes", "diminishes", - "diminising", "diminishing", - "dimunitive", "diminutive", - "dinosaures", "dinosaurs", - "dinosaurus", "dinosaurs", - "dipections", "depictions", - "diplimatic", "diplomatic", - "diplomacia", "diplomatic", - "diplomancy", "diplomacy", - "dipolmatic", "diplomatic", - "directinla", "directional", - "directionl", "directional", - "directivos", "directions", - "directores", "directors", - "directorys", "directors", - "directsong", "directions", - "disaapoint", "disappoint", - "disagreeed", "disagreed", - "disapeared", "disappeared", - "disappeard", "disappeared", - "disappered", "disappeared", - "disappiont", "disappoint", - "disaproval", "disapproval", - "disastorus", "disastrous", - "disastrosa", "disastrous", - "disastrose", "disastrous", - "disastrosi", "disastrous", - "disastroso", "disastrous", - "disaterous", "disastrous", - "discalimer", "disclaimer", - "discapline", "discipline", - "discepline", "discipline", - "disception", "discretion", - "discharded", "discharged", - "disciplers", "disciples", - "disciplies", "disciplines", - "disciplins", "disciplines", - "disciprine", "discipline", - "disclamier", "disclaimer", - "discliamer", "disclaimer", - "disclipine", "discipline", - "disclousre", "disclosure", - "disclsoure", "disclosure", - "discograhy", "discography", - "discograpy", "discography", - "discolsure", "disclosure", - "disconenct", "disconnect", - "disconncet", "disconnects", - "disconnets", "disconnects", - "discontued", "discounted", - "discoruage", "discourages", - "discources", "discourse", - "discourgae", "discourages", - "discourges", "discourages", - "discoveres", "discovers", - "discoveryd", "discovered", - "discoverys", "discovers", - "discrecion", "discretion", - "discreddit", "discredited", - "discrepany", "discrepancy", - "discresion", "discretion", - "discreting", "discretion", - "discribing", "describing", - "discrimine", "discriminate", - "discrouage", "discourages", - "discrption", "discretion", - "discusison", "discussions", - "discusting", "discussing", - "disgracful", "disgraceful", - "disgrunted", "disgruntled", - "disgruntld", "disgruntled", - "disguisted", "disguise", - "disgustiny", "disgustingly", - "disgustosa", "disgusts", - "disgustose", "disgusts", - "disgustosi", "disgusts", - "disgustoso", "disgusts", - "dishcarged", "discharged", - "dishinored", "dishonored", - "disicpline", "discipline", - "disiplined", "disciplined", - "dislcaimer", "disclaimer", - "dismanteld", "dismantled", - "dismanting", "dismantling", - "dismentled", "dismantled", - "dispecable", "despicable", - "dispencary", "dispensary", - "dispencers", "dispenser", - "dispencing", "dispensing", - "dispensare", "dispenser", - "dispensory", "dispensary", - "dispesnary", "dispensary", - "dispicable", "despicable", - "displayfps", "displays", - "dispositon", "disposition", - "dispostion", "disposition", - "disputerad", "disputed", - "disrecpect", "disrespect", - "disrection", "discretion", - "disrepsect", "disrespect", - "disresepct", "disrespect", - "disrespekt", "disrespect", - "disription", "disruption", - "disrispect", "disrespect", - "disrputing", "disrupting", - "disruptivo", "disruption", - "disruptron", "disruption", - "dissapears", "disappears", - "dissappear", "disappear", - "disscusion", "discussion", - "dissmisive", "dismissive", - "dissodance", "dissonance", - "dissonante", "dissonance", - "dissonence", "dissonance", - "distastful", "distasteful", - "disticntly", "distinctly", - "distiction", "distinction", - "distincion", "distinction", - "distincive", "distinctive", - "distinclty", "distinctly", - "distinctie", "distinctive", - "distinctin", "distinctions", - "distingish", "distinguish", - "distingush", "distinguish", - "distintcly", "distinctly", - "distoriton", "distortion", - "distorsion", "distortion", - "distortian", "distortion", - "distortron", "distortion", - "distractes", "distracts", - "distractia", "district", - "distractin", "district", - "distractiv", "district", - "distration", "distortion", - "distribuem", "distribute", - "distribuer", "distribute", - "distribuie", "distribute", - "distribuit", "distribute", - "distributs", "distributors", - "distribuye", "distribute", - "distrotion", "distortion", - "distrubing", "disturbing", - "distrubtes", "distrust", - "distrubute", "distribute", - "distubring", "disturbing", - "disturbace", "disturbance", - "disturping", "disrupting", - "disucssing", "discussing", - "disucssion", "discussion", - "disurption", "disruption", - "ditributed", "distributed", - "diversifiy", "diversify", - "dividendes", "dividends", - "dividendos", "dividends", - "divideneds", "dividend", - "divinition", "divination", - "divinitory", "divinity", - "divisiones", "divisions", - "dobulelift", "doublelift", - "doccuments", "documents", - "documentry", "documentary", - "dogmatisch", "dogmatic", - "dolphinese", "dolphins", - "domianting", "dominating", - "domimation", "domination", - "dominacion", "domination", - "dominaters", "dominates", - "donwgraded", "downgraded", - "donwloaded", "downloaded", - "donwvoters", "downvoters", - "donwvoting", "downvoting", - "doomsdaily", "doomsday", - "doubellift", "doublelift", - "doubleiift", "doublelift", - "doubleleft", "doublelift", - "doublelfit", "doublelift", - "doublerift", "doublelift", - "doulbelift", "doublelift", - "downgarded", "downgraded", - "downgrated", "downgrade", - "downlaoded", "downloaded", - "downloadas", "downloads", - "downloades", "downloads", - "downovting", "downvoting", - "downroaded", "downgraded", - "downsiders", "downsides", - "downstaris", "downstairs", - "downstiars", "downstairs", - "downtokers", "downvoters", - "downtoking", "downvoting", - "downtraded", "downgraded", - "downviting", "downvoting", - "downvotear", "downvoters", - "downvoteas", "downvoters", - "downvoteds", "downvoters", - "downvotees", "downvoters", - "downvotesd", "downvoters", - "downvotess", "downvoters", - "downvotest", "downvoters", - "downvoteur", "downvoters", - "downvoties", "downvoters", - "downvotres", "downvoters", - "downvotted", "downvote", - "downvottes", "downvoters", - "downwoters", "downvoters", - "downwoting", "downvoting", - "drasticaly", "drastically", - "drasticlly", "drastically", - "draughtman", "draughtsman", - "dumbbellls", "dumbbells", - "dumbfouded", "dumbfounded", - "dumbfouned", "dumbfounded", - "dungeoness", "dungeons", - "dupilcates", "duplicates", - "duplicants", "duplicates", - "duplicatas", "duplicates", - "duplicitas", "duplicates", - "duplifaces", "duplicates", - "durabiltiy", "durability", - "dyamically", "dynamically", - "dynamicaly", "dynamically", - "dynamicdns", "dynamics", - "dynamiclly", "dynamically", - "dynamicpsf", "dynamics", - "dynamitage", "dynamite", - "dysfuncion", "dysfunction", - "earhtbound", "earthbound", - "earthqauke", "earthquake", - "earthquack", "earthquake", - "earthquaks", "earthquakes", - "earthquate", "earthquake", - "earthqukes", "earthquakes", - "easthetics", "aesthetics", - "ecoligical", "ecological", - "ecomonical", "economical", - "econimical", "economical", - "econimists", "economists", - "economicas", "economics", - "economicos", "economics", - "economicus", "economics", - "economisch", "economic", - "economisit", "economists", - "effeciency", "efficiency", - "effectivly", "effectively", - "efficeincy", "efficiency", - "efficently", "efficiently", - "efficiancy", "efficiency", - "efficienct", "efficient", - "efficienty", "efficiently", - "egotistcal", "egotistical", - "ehtnically", "ethnically", - "ejaculaion", "ejaculation", - "ejaculatie", "ejaculate", - "ejaculatin", "ejaculation", - "ejaculaton", "ejaculation", - "ejaculatte", "ejaculate", - "electircal", "electrical", - "electivite", "elective", - "electoraat", "electorate", - "electorale", "electorate", - "electorite", "electorate", - "electornic", "electronic", - "electrican", "electrician", - "electriciy", "electricity", - "electricty", "electricity", - "electrinic", "electrician", - "electroate", "electorate", - "electrodan", "electron", - "electroinc", "electron", - "electrolye", "electrolytes", - "electroman", "electron", - "electroncs", "electrons", - "electrones", "electrons", - "electronik", "election", - "electronis", "electronics", - "electronix", "election", - "elemantary", "elementary", - "elementery", "elementary", - "elementray", "elementary", - "eleminated", "eliminated", - "elephantes", "elephants", - "elephantis", "elephants", - "elephantos", "elephants", - "elephantus", "elephants", - "eletricity", "electricity", - "elimanates", "eliminates", - "elimenates", "eliminates", - "elimentary", "elementary", - "elimimates", "eliminates", - "eliminaste", "eliminates", - "eliminatin", "elimination", - "eliminaton", "elimination", - "eliminster", "eliminates", - "ellipitcal", "elliptical", - "ellipsical", "elliptical", - "ellipticle", "elliptical", - "ellitpical", "elliptical", - "ellpitical", "elliptical", - "eloquantly", "eloquently", - "eloquintly", "eloquently", - "emapthetic", "empathetic", - "embarassed", "embarrassed", - "embarassig", "embarassing", - "embarrased", "embarrassed", - "embarrases", "embarrassed", - "embezelled", "embezzled", - "emblamatic", "emblematic", - "embodyment", "embodiment", - "emergenies", "emergencies", - "emmigrated", "emigrated", - "emminently", "eminently", - "emmisaries", "emissaries", - "emobdiment", "embodiment", - "emotionaly", "emotionally", - "empahsized", "emphasized", - "empahsizes", "emphasizes", - "empathatic", "empathetic", - "emphacized", "emphasized", - "emphatetic", "empathetic", - "emphatised", "emphasized", - "emphatized", "emphasized", - "emphatizes", "emphasizes", - "emphazised", "emphasized", - "emphazises", "emphasizes", - "emphesized", "emphasized", - "emphesizes", "emphasizes", - "emphisized", "emphasized", - "emphisizes", "emphasizes", - "empiricaly", "empirically", - "employeers", "employees", - "employeurs", "employer", - "emprisoned", "imprisoned", - "encahnting", "enchanting", - "enchancing", "enchanting", - "enchanging", "enchanting", - "enchantent", "enchantment", - "enchantmet", "enchantments", - "encompases", "encompasses", - "encounterd", "encountered", - "encountred", "encountered", - "encouraing", "encouraging", - "encoutners", "encounters", - "encription", "encryption", - "encrpytion", "encryption", - "encyrption", "encryption", - "endlessley", "endlessly", - "endolithes", "endoliths", - "enforceing", "enforcing", - "engagemnet", "engagements", - "engagemnts", "engagements", - "engieneers", "engineers", - "enginereed", "engineered", - "enivitable", "inevitable", - "enlargment", "enlargement", - "enlighment", "enlighten", - "enlightend", "enlightened", - "enlightned", "enlightened", - "enrolement", "enrollment", - "enrollemnt", "enrollment", - "enterpirse", "enterprise", - "enterprice", "enterprise", - "enterpries", "enterprises", - "enterprize", "enterprise", - "enterprsie", "enterprises", - "enterrpise", "enterprises", - "entertaing", "entertaining", - "enthically", "ethnically", - "enthisiast", "enthusiast", - "enthuiasts", "enthusiast", - "enthuisast", "enthusiasts", - "enthusiams", "enthusiasm", - "enthusiant", "enthusiast", - "enthusiats", "enthusiast", - "enthusiest", "enthusiast", - "enthusists", "enthusiasts", - "envelopped", "envelope", - "enveloppen", "envelope", - "enveloppes", "envelope", - "enviorment", "environment", - "enviroment", "environment", - "environmet", "environments", - "equiavlent", "equivalents", - "equilavent", "equivalent", - "equilibium", "equilibrium", - "equilibrim", "equilibrium", - "equilibrum", "equilibrium", - "equippment", "equipment", - "equitorial", "equatorial", - "equivalant", "equivalent", - "equivalnce", "equivalence", - "equivalnet", "equivalents", - "equivelant", "equivalent", - "equivelent", "equivalent", - "equivilant", "equivalent", - "equivilent", "equivalent", - "equivlaent", "equivalents", - "equivolent", "equivalent", - "eratically", "erratically", - "escalative", "escalate", - "escavation", "escalation", - "esitmation", "estimation", - "esoterisch", "esoteric", - "especailly", "especially", - "espeically", "especially", - "espressino", "espresso", - "espression", "espresso", - "essencials", "essentials", - "essensials", "essentials", - "essentails", "essentials", - "essentialy", "essentially", - "essentiels", "essentials", - "essentuals", "essentials", - "estabishes", "establishes", - "estimacion", "estimation", - "estimativo", "estimation", - "estination", "estimation", - "ethicallly", "ethically", - "ethincally", "ethnically", - "ethnicites", "ethnicities", - "ethnicitiy", "ethnicity", - "euphorical", "euphoria", - "euphorisch", "euphoric", - "euthanaisa", "euthanasia", - "euthanazia", "euthanasia", - "euthanesia", "euthanasia", - "evaluacion", "evaluation", - "evalutaion", "evaluation", - "evaulating", "evaluating", - "evaulation", "evaluation", - "eventaully", "eventually", - "eventially", "eventually", - "everyoneis", "everyones", - "exacberate", "exacerbated", - "exagerated", "exaggerated", - "exagerates", "exaggerates", - "exagerrate", "exaggerate", - "exaggarate", "exaggerate", - "exaggurate", "exaggerate", - "exahusting", "exhausting", - "exahustion", "exhaustion", - "examinated", "examined", - "examinerad", "examined", - "exapansion", "expansion", - "exapnsions", "expansions", - "exauhsting", "exhausting", - "exauhstion", "exhaustion", - "excecuting", "executing", - "excecution", "execution", - "exceedigly", "exceedingly", - "exceedinly", "exceedingly", - "excellance", "excellence", - "excellenet", "excellence", - "excellenze", "excellence", - "excerising", "exercising", - "excessivly", "excessively", - "exchangees", "exchanges", - "excitiment", "excitement", - "exclsuives", "exclusives", - "exclusivas", "exclusives", - "exclusivly", "exclusively", - "exclusivos", "exclusives", - "exclusivty", "exclusivity", - "exclussive", "exclusives", - "exclusvies", "exclusives", - "excpetions", "exceptions", - "exculsives", "exclusives", - "exculsivly", "exclusively", - "execptions", "exceptions", - "exectuable", "executable", - "exectuions", "executions", - "exectuives", "executives", - "execusions", "executions", - "executabil", "executable", - "executible", "executable", - "executiner", "executioner", - "executings", "executions", - "executivas", "executives", - "exeedingly", "exceedingly", - "exepmtions", "exemptions", - "exeptional", "exceptional", - "exercicing", "exercising", - "exercizing", "exercising", - "exersicing", "exercising", - "exersising", "exercising", - "exersizing", "exercising", - "exerternal", "external", - "exeuctions", "executions", - "exhasuting", "exhausting", - "exhasution", "exhaustion", - "exhaustivo", "exhaustion", - "exhibicion", "exhibition", - "exhibitons", "exhibits", - "exhuasting", "exhausting", - "exhuastion", "exhaustion", - "exibitions", "exhibitions", - "exictement", "excitement", - "exipration", "expiration", - "existantes", "existent", - "existenial", "existential", - "existental", "existential", - "exlcusives", "exclusives", - "exorbatant", "exorbitant", - "exorbatent", "exorbitant", - "exorbidant", "exorbitant", - "exorbirant", "exorbitant", - "exorbitent", "exorbitant", - "expalining", "explaining", - "expanisons", "expansions", - "expansivos", "expansions", - "expanssion", "expansions", - "expantions", "expansions", - "expecially", "especially", - "expectaion", "expectation", - "expectansy", "expectancy", - "expectency", "expectancy", - "expections", "exceptions", - "expedetion", "expedition", - "expedicion", "expedition", - "expeditivo", "expedition", - "expeiments", "experiments", - "expemtions", "exemptions", - "expendeble", "expendable", - "expendible", "expendable", - "expensable", "expendable", - "expentancy", "expectancy", - "expereince", "experience", - "experement", "experiment", - "experiance", "experience", - "experieced", "experienced", - "experieces", "experiences", - "experiemnt", "experiment", - "experiened", "experienced", - "experiense", "experiences", - "expermient", "experiments", - "experssion", "expression", - "expextancy", "expectancy", - "expidetion", "expedition", - "expierence", "experience", - "expination", "expiration", - "expirement", "experiment", - "explanatin", "explanations", - "explicatia", "explicit", - "explicatie", "explicit", - "explicatif", "explicit", - "explicatii", "explicit", - "explicetly", "explicitly", - "explicilty", "explicitly", - "explioting", "exploiting", - "exploiding", "exploiting", - "exploition", "exploiting", - "explorarea", "explorer", - "exploreres", "explorers", - "explosivas", "explosives", - "explossion", "explosions", - "explossive", "explosives", - "explosvies", "explosives", - "explotions", "explosions", - "explusions", "explosions", - "expodition", "exposition", - "expoliting", "exploiting", - "expolsions", "explosions", - "expolsives", "explosives", - "exponental", "exponential", - "exposicion", "exposition", - "expositivo", "exposition", - "expotition", "exposition", - "exprensive", "expressive", - "expresions", "expression", - "expresison", "expressions", - "expressens", "expresses", - "expressief", "expressive", - "expressley", "expressly", - "expriation", "expiration", - "extensivly", "extensively", - "extentions", "extensions", - "exterioara", "exterior", - "exterioare", "exterior", - "extermally", "externally", - "extermists", "extremists", - "extraccion", "extraction", - "extractivo", "extraction", - "extractnow", "extraction", - "extradtion", "extraction", - "extremaste", "extremes", - "extremeley", "extremely", - "extremelly", "extremely", - "extrememly", "extremely", - "extremests", "extremists", - "extremised", "extremes", - "extremisim", "extremism", - "extremisme", "extremes", - "extremiste", "extremes", - "extrenally", "externally", - "extrimists", "extremists", - "eyeballers", "eyeballs", - "fabriacted", "fabricated", - "fabricatie", "fabricated", - "faciliated", "facilitated", - "facilitait", "facilitate", - "facilitant", "facilitate", - "facilitare", "facilitate", - "facisnated", "fascinated", - "facitilies", "facilities", - "facsinated", "fascinated", - "fahernheit", "fahrenheit", - "fahrenhiet", "fahrenheit", - "fallatious", "fallacious", - "fallicious", "fallacious", - "falshbacks", "flashbacks", - "familiarty", "familiarity", - "familiarze", "familiarize", - "fanaticals", "fanatics", - "fanfaction", "fanfiction", - "fanfcition", "fanfiction", - "fanficiton", "fanfiction", - "fanserivce", "fanservice", - "fanservise", "fanservice", - "fanservive", "fanservice", - "fantasiose", "fantasies", - "farehnheit", "fahrenheit", - "farhenheit", "fahrenheit", - "fascianted", "fascinated", - "fascinatie", "fascinated", - "fascinatin", "fascination", - "fascistisk", "fascists", - "fatalaties", "fatalities", - "favoruites", "favourites", - "favourates", "favourites", - "favouritsm", "favourites", - "favourties", "favourites", - "federacion", "federation", - "federativo", "federation", - "fellowhsip", "fellowship", - "fellowshop", "fellowship", - "feminimity", "femininity", - "feministas", "feminists", - "feminitity", "femininity", - "fermentato", "fermentation", - "fertalizer", "fertilizer", - "fertelizer", "fertilizer", - "fertilizar", "fertilizer", - "fertilzier", "fertilizer", - "fertiziler", "fertilizer", - "festivales", "festivals", - "fetishiste", "fetishes", - "ficticious", "fictitious", - "filessytem", "filesystem", - "filesytems", "filesystem", - "filmamkers", "filmmakers", - "filmmakare", "filmmakers", - "finallizes", "finalizes", - "financialy", "financially", - "fingernals", "fingernails", - "fingerpies", "fingertips", - "fingerpint", "fingerprint", - "fingertaps", "fingertips", - "fingertits", "fingertips", - "fingertops", "fingertips", - "fireballls", "fireballs", - "firefigher", "firefighter", - "firefigter", "firefighter", - "firendlies", "friendlies", - "firghtened", "frightened", - "fisionable", "fissionable", - "flashligth", "flashlight", - "flaskbacks", "flashbacks", - "flawleslly", "flawlessly", - "flexibiliy", "flexibility", - "flexibilty", "flexibility", - "flimmakers", "filmmakers", - "fluctuatie", "fluctuate", - "fluctuatin", "fluctuations", - "flutterhsy", "fluttershy", - "fluttersky", "fluttershy", - "flutterspy", "fluttershy", - "forcifully", "forcefully", - "forecfully", "forcefully", - "foreginers", "foreigners", - "foregorund", "foreground", - "foreignese", "foreigners", - "foreigness", "foreigners", - "foreignors", "foreigners", - "foreingers", "foreigners", - "forensisch", "forensic", - "foreseeble", "foreseeable", - "forgeiners", "foreigners", - "forgieners", "foreigners", - "forgivance", "forgiven", - "forgivenss", "forgiveness", - "forgotting", "forgetting", - "foriegners", "foreigners", - "formadible", "formidable", - "formalhaut", "fomalhaut", - "formallity", "formally", - "formallize", "formalize", - "formatiing", "formatting", - "formatings", "formations", - "formativos", "formations", - "formidabel", "formidable", - "formidabil", "formidable", - "formidible", "formidable", - "forminable", "formidable", - "formitable", "formidable", - "formuladas", "formulas", - "formulados", "formulas", - "forseeable", "foreseeable", - "fortelling", "foretelling", - "fortunatly", "fortunately", - "fortunetly", "fortunately", - "foundaiton", "foundations", - "foundaries", "foundries", - "foundatoin", "foundations", - "fractalers", "fractals", - "fractalius", "fractals", - "fractalpus", "fractals", - "fracturare", "fracture", - "fragmanted", "fragment", - "francaises", "franchises", - "franchices", "franchises", - "franchines", "franchises", - "franchizes", "franchises", - "franchsies", "franchises", - "fransiscan", "franciscan", - "franticaly", "frantically", - "franticlly", "frantically", - "fraternaty", "fraternity", - "fraternety", "fraternity", - "fraterntiy", "fraternity", - "fraturnity", "fraternity", - "fraudalent", "fraudulent", - "fraudelant", "fraudulent", - "fraudelent", "fraudulent", - "fraudolent", "fraudulent", - "fraudulant", "fraudulent", - "freedomers", "freedoms", - "freedomest", "freedoms", - "freindlies", "friendlies", - "freindship", "friendship", - "frequencey", "frequency", - "friednship", "friendships", - "friednzone", "friendzoned", - "friendhsip", "friendship", - "friendsies", "friendlies", - "friendzies", "friendlies", - "friendzond", "friendzoned", - "frientship", "friendship", - "frigthened", "frightened", - "fromatting", "formatting", - "fromidable", "formidable", - "frontlinie", "frontline", - "fruadulent", "fraudulent", - "frustraded", "frustrated", - "frustradet", "frustrates", - "frustraits", "frustrates", - "frustrants", "frustrates", - "frustratin", "frustration", - "frustrsted", "frustrates", - "fucntional", "functional", - "fulfulling", "fulfilling", - "fullfiling", "fulfilling", - "fullfilled", "fulfilled", - "fullscrean", "fullscreen", - "fulttershy", "fluttershy", - "funcitonal", "functional", - "fundametal", "fundamental", - "furstrated", "frustrated", - "furstrates", "frustrates", - "furutistic", "futuristic", - "futhermore", "furthermore", - "futurestic", "futuristic", - "futurisitc", "futuristic", - "futurustic", "futuristic", - "galvinized", "galvanized", - "garuanteed", "guaranteed", - "garuantees", "guarantees", - "gauntanamo", "guantanamo", - "gauntlents", "gauntlet", - "gauranteed", "guaranteed", - "gaurantees", "guarantees", - "gaurenteed", "guaranteed", - "gaurentees", "guarantees", - "generalice", "generalize", - "generalife", "generalize", - "generalnie", "generalize", - "generaters", "generates", - "generaties", "generate", - "generatios", "generators", - "generatons", "generators", - "generatore", "generate", - "generelize", "generalize", - "generocity", "generosity", - "generoisty", "generosity", - "generostiy", "generosity", - "geneticaly", "genetically", - "geneticlly", "genetically", - "genitalias", "genitals", - "genuinelly", "genuinely", - "geographia", "geographical", - "geogrpahic", "geographic", - "germanisch", "germanic", - "gigantisch", "gigantic", - "gimmickers", "gimmicks", - "girlfirend", "girlfriend", - "girlfreind", "girlfriend", - "girlfriens", "girlfriends", - "girlfrinds", "girlfriends", - "girlfrined", "girlfriends", - "goalkeaper", "goalkeeper", - "goalkeeprs", "goalkeeper", - "goalkepeer", "goalkeeper", - "goegraphic", "geographic", - "golakeeper", "goalkeeper", - "goldburger", "goldberg", - "goosebumbs", "goosebumps", - "goosegumps", "goosebumps", - "goosepumps", "goosebumps", - "gothenberg", "gothenburg", - "govenrment", "government", - "govermenet", "goverment", - "govermnent", "governments", - "governemnt", "government", - "governened", "governed", - "governered", "governed", - "governmant", "governmental", - "governmetn", "governments", - "governmnet", "government", - "govnerment", "government", - "govornment", "government", - "gradiating", "graduating", - "gradiation", "graduation", - "graduacion", "graduation", - "grapefriut", "grapefruit", - "grapefrukt", "grapefruit", - "graphicaly", "graphically", - "graphiclly", "graphically", - "gratituous", "gratuitous", - "gratiutous", "gratuitous", - "gratuidous", "gratuitous", - "gratuituos", "gratuitous", - "gratutious", "gratuitous", - "graudating", "graduating", - "graudation", "graduation", - "gravitatie", "gravitate", - "greatfully", "gratefully", - "greenhosue", "greenhouse", - "greviances", "grievances", - "grievences", "grievances", - "grilfriend", "girlfriend", - "guaduloupe", "guadalupe", - "guanatanmo", "guantanamo", - "guantamamo", "guantanamo", - "guantamano", "guantanamo", - "guantanano", "guantanamo", - "guantanemo", "guantanamo", - "guantanoma", "guantanamo", - "guantanomo", "guantanamo", - "guantonamo", "guantanamo", - "guarantess", "guarantees", - "guardiands", "guardians", - "guardianes", "guardians", - "guardianis", "guardians", - "guarenteed", "guaranteed", - "guarentees", "guarantees", - "guarnateed", "guaranteed", - "guarnatees", "guarantees", - "guarunteed", "guaranteed", - "guaruntees", "guarantees", - "guatamalan", "guatemalan", - "gunatanamo", "guantanamo", - "gunlsinger", "gunslinger", - "gunsiinger", "gunslinger", - "gunslanger", "gunslinger", - "gunsligner", "gunslinger", - "gunstinger", "gunslinger", - "gymanstics", "gymnastics", - "gymnasitcs", "gymnastics", - "gynmastics", "gymnastics", - "haemorrage", "haemorrhage", - "halloweeen", "halloween", - "hambergers", "hamburgers", - "hamburgare", "hamburger", - "hamburgesa", "hamburgers", - "hamburgles", "hamburgers", - "hamburgurs", "hamburgers", - "handcuffes", "handcuffs", - "handelbars", "handlebars", - "handicaped", "handicapped", - "handwritng", "handwriting", - "harasments", "harassments", - "hardlinked", "hardline", - "harmoniacs", "harmonic", - "harmonisch", "harmonic", - "harrasment", "harassment", - "harrassing", "harassing", - "harvasting", "harvesting", - "haversting", "harvesting", - "headhpones", "headphones", - "headphoens", "headphones", - "headquarer", "headquarter", - "headquater", "headquarter", - "headshoots", "headshot", - "healtchare", "healthcare", - "healtheast", "healthiest", - "healthyest", "healthiest", - "heapdhones", "headphones", - "heartbeart", "heartbeat", - "heartbeast", "heartbeat", - "heartborne", "heartbroken", - "heartbrake", "heartbreak", - "hearthsone", "hearthstone", - "heatlhcare", "healthcare", - "heavyweght", "heavyweight", - "heavyweigt", "heavyweight", - "hedgehodge", "hedgehog", - "heidelburg", "heidelberg", - "heigthened", "heightened", - "heistation", "hesitation", - "helathcare", "healthcare", - "helicopers", "helicopters", - "helicoptor", "helicopter", - "helicotper", "helicopters", - "helicpoter", "helicopter", - "helictoper", "helicopters", - "helikopter", "helicopter", - "hemingwary", "hemingway", - "hemingwavy", "hemingway", - "hemipshere", "hemisphere", - "hemishpere", "hemisphere", - "hemmorhage", "hemorrhage", - "hempishere", "hemisphere", - "herculeans", "hercules", - "herculeasy", "hercules", - "herculeees", "hercules", - "hesitstion", "hesitation", - "hestiation", "hesitation", - "hieghtened", "heightened", - "hierachies", "hierarchies", - "hieroglphs", "hieroglyphs", - "highalnder", "highlander", - "highlighed", "highlighted", - "highligted", "highlighted", - "highloader", "highlander", - "highpander", "highlander", - "highscholl", "highschool", - "highshcool", "highschool", - "hillarious", "hilarious", - "hinderance", "hindrance", - "hinderence", "hindrance", - "hipsterest", "hipsters", - "hispanicos", "hispanics", - "hispanicus", "hispanics", - "histarical", "historical", - "histerical", "historical", - "historiaan", "historians", - "historicas", "historians", - "historicly", "historical", - "historiens", "histories", - "historisch", "historic", - "hoemopathy", "homeopathy", - "hollywoood", "hollywood", - "homecuming", "homecoming", - "homeoapthy", "homeopathy", - "homeonwers", "homeowners", - "homeopahty", "homeopathy", - "homeophaty", "homeopathy", - "homeopothy", "homeopathy", - "homeothapy", "homeopathy", - "homepoathy", "homeopathy", - "homewoners", "homeowners", - "homoepathy", "homeopathy", - "homogeneos", "homogeneous", - "homogeneus", "homogeneous", - "homophibia", "homophobia", - "homophibic", "homophobic", - "homophobie", "homophobe", - "homophonia", "homophobia", - "homophopia", "homophobia", - "homophopic", "homophobic", - "homosexaul", "homosexual", - "homosexuel", "homosexual", - "honeymooon", "honeymoon", - "hopefullly", "hopefully", - "hopeleslly", "hopelessly", - "horisontal", "horizontal", - "horizantal", "horizontal", - "horizontes", "horizons", - "horiztonal", "horizontal", - "horrendeus", "horrendous", - "horriblely", "horribly", - "hospitales", "hospitals", - "hospitalty", "hospitality", - "hospitible", "hospitable", - "hsitorians", "historians", - "humanaties", "humanities", - "humanitary", "humanity", - "humiliatin", "humiliation", - "humiliaton", "humiliation", - "humilitied", "humiliated", - "humillated", "humiliated", - "hurricance", "hurricane", - "hurriganes", "hurricanes", - "hurrikanes", "hurricanes", - "hurrycanes", "hurricanes", - "hydropilic", "hydrophilic", - "hydropobic", "hydrophobic", - "hyperbolie", "hyperbole", - "hyperlobic", "hyperbolic", - "hyperlogic", "hyperbolic", - "hypertrohy", "hypertrophy", - "hypertropy", "hypertrophy", - "hyphotesis", "hypothesis", - "hypocrates", "hypocrites", - "hypocriscy", "hypocrisy", - "hypocrises", "hypocrites", - "hypocritus", "hypocrites", - "hypocrties", "hypocrites", - "hypocrytes", "hypocrites", - "hypokrites", "hypocrites", - "hypothecis", "hypothesis", - "hypotheiss", "hypotheses", - "hypothesus", "hypotheses", - "hypothises", "hypotheses", - "hypothisis", "hypothesis", - "hypothosis", "hypothesis", - "hyprocites", "hypocrites", - "hystarical", "hysterical", - "hystericly", "hysterical", - "hysteriska", "hysteria", - "ibuprofein", "ibuprofen", - "ibuprofine", "ibuprofen", - "icelandinc", "icelandic", - "idealisitc", "idealistic", - "idealogies", "ideologies", - "identicial", "identical", - "identifyed", "identified", - "identitets", "identities", - "ideolagies", "ideologies", - "ideoligies", "ideologies", - "ideologias", "ideologies", - "ideologice", "ideologies", - "ideologije", "ideologies", - "ideologins", "ideologies", - "ideologisk", "ideologies", - "ideolouges", "ideologies", - "illegalest", "illegals", - "illegallly", "illegally", - "illegimacy", "illegitimacy", - "illegitime", "illegitimate", - "illegitimt", "illegitimate", - "illimunati", "illuminati", - "illinoians", "illinois", - "illistrate", "illiterate", - "illitarate", "illiterate", - "illitirate", "illiterate", - "illumanati", "illuminati", - "illumaniti", "illuminati", - "illumianti", "illuminati", - "illumimati", "illuminati", - "illuminaci", "illuminati", - "illuminadi", "illuminati", - "illuminami", "illuminati", - "illuminazi", "illuminati", - "illuminite", "illuminati", - "illuminiti", "illuminati", - "illuminoti", "illuminati", - "illuminuti", "illuminati", - "illumniati", "illuminati", - "illumunati", "illuminati", - "illuninati", "illuminati", - "illusiones", "illusions", - "illustrant", "illustrate", - "illustrare", "illustrate", - "illustrato", "illustration", - "imablanced", "imbalanced", - "imablances", "imbalances", - "imaginatie", "imaginative", - "imaginaton", "imagination", - "imaginitve", "imaginative", - "imbalenced", "imbalanced", - "imbalences", "imbalances", - "imcomplete", "incomplete", - "imediately", "immediately", - "imigration", "emigration", - "immaturaty", "immaturity", - "immaturety", "immaturity", - "immedeatly", "immediately", - "immediatly", "immediately", - "immedietly", "immediately", - "immenseley", "immensely", - "immidately", "immediately", - "immigranti", "immigration", - "immigrents", "immigrants", - "immitating", "imitating", - "immobilien", "immobile", - "immobilier", "immobile", - "immobilzed", "immobile", - "immobilzer", "immobile", - "immobilzes", "immobile", - "immortales", "immortals", - "immortalis", "immortals", - "immortaliy", "immortality", - "immortalls", "immortals", - "immortalty", "immortality", - "impartirla", "impartial", - "impecabbly", "impeccably", - "impeccible", "impeccable", - "impeckable", "impeccable", - "impelments", "implements", - "imperetive", "imperative", - "imperialsm", "imperialism", - "imperialst", "imperialist", - "imperitave", "imperative", - "imperitive", "imperative", - "implaments", "implements", - "implantase", "implants", - "implausble", "implausible", - "implausibe", "implausible", - "implemenet", "implements", - "implicatia", "implicit", - "implicatie", "implicit", - "implicatii", "implicit", - "implicetly", "implicitly", - "impliciete", "implicit", - "implicilty", "implicitly", - "impliments", "implements", - "imporbable", "improbable", - "importanly", "importantly", - "importanty", "importantly", - "importence", "importance", - "importerad", "imported", - "imporvised", "improvised", - "impossable", "impossible", - "impossbily", "impossibly", - "impossibal", "impossibly", - "impossibel", "impossibly", - "impossibry", "impossibly", - "impossibul", "impossibly", - "impractial", "impractical", - "impreative", "imperative", - "impresison", "impressions", - "impressoin", "impressions", - "impressons", "impressions", - "improbabil", "improbable", - "improbible", "improbable", - "impropable", "improbable", - "improsined", "imprisoned", - "improsoned", "imprisoned", - "improvemnt", "improvement", - "improvents", "improves", - "improvized", "improvised", - "imprsioned", "imprisoned", - "impulsemos", "impulses", - "imrpovised", "improvised", - "inablility", "inability", - "inaccruate", "inaccurate", - "inadaquate", "inadequate", - "inadaquete", "inadequate", - "inadecuate", "inadequate", - "inadeguate", "inadequate", - "inadeqaute", "inadequate", - "inadequete", "inadequate", - "inadequite", "inadequate", - "inadiquate", "inadequate", - "inagurated", "inaugurated", - "inbalanced", "imbalanced", - "inbetweeen", "inbetween", - "incarnaton", "incarnation", - "incentivos", "incentives", - "inchoerent", "incoherent", - "incidentes", "incidents", - "incidently", "incidentally", - "incidentul", "incidental", - "inclreased", "increased", - "incognitio", "incognito", - "incoherant", "incoherent", - "incohorent", "incoherent", - "incorectly", "incorrectly", - "incorrecly", "incorrectly", - "incorrecty", "incorrectly", - "incorretly", "incorrectly", - "incraments", "increments", - "incredable", "incredible", - "incredably", "incredibly", - "incremetal", "incremental", - "incriments", "increments", - "inctroduce", "introduce", - "indefenite", "indefinite", - "indefinate", "indefinite", - "indefinete", "indefinite", - "indefinity", "indefinitely", - "indeginous", "indigenous", - "indentical", "identical", - "independet", "independent", - "indepenent", "independent", - "inderictly", "indirectly", - "indicaters", "indicates", - "indicativo", "indication", - "indicatore", "indicate", - "indicitave", "indicative", - "indicitive", "indicative", - "indiffernt", "indifferent", - "indigenius", "indigenous", - "indiginous", "indigenous", - "indigneous", "indigenous", - "indikation", "indication", - "indireclty", "indirectly", - "indirektly", "indirectly", - "individuel", "individual", - "indiviudal", "individuals", - "indivudual", "individual", - "indoensian", "indonesian", - "indonasian", "indonesian", - "indoneisan", "indonesian", - "indonesean", "indonesian", - "indonesien", "indonesian", - "indonesion", "indonesian", - "indonisian", "indonesian", - "indonistan", "indonesian", - "indpendent", "independent", - "industiral", "industrial", - "industires", "industries", - "industrail", "industrial", - "industrees", "industries", - "industrias", "industries", - "industriel", "industrial", - "industrija", "industrial", - "industrije", "industries", - "indviduals", "individuals", - "inefficent", "inefficient", - "ineqaulity", "inequality", - "inequailty", "inequality", - "inevatible", "inevitable", - "inevetable", "inevitable", - "inevetably", "inevitably", - "inevetible", "inevitable", - "inevidable", "inevitable", - "inevidably", "inevitably", - "inevitible", "inevitable", - "inevitibly", "inevitably", - "inevtiable", "inevitable", - "inevtiably", "inevitably", - "infallable", "infallible", - "infaltable", "inflatable", - "infeccious", "infectious", - "infecteous", "infectious", - "infectuous", "infectious", - "infedility", "infidelity", - "infektious", "infectious", - "inferioara", "inferior", - "inferioare", "inferior", - "inferiorty", "inferiority", - "inferrence", "inference", - "infestaion", "infestation", - "infestaton", "infestation", - "infestions", "infections", - "infideltiy", "infidelity", - "infidility", "infidelity", - "infiltrade", "infiltrate", - "infiltrait", "infiltrate", - "infiltrare", "infiltrate", - "infiltrase", "infiltrate", - "infinately", "infinitely", - "infinetely", "infinitely", - "infiniment", "infinite", - "infinitley", "infinitely", - "infintiely", "infinitely", - "inflamable", "inflatable", - "inflateble", "inflatable", - "inflatible", "inflatable", - "infleunced", "influenced", - "inflitrate", "infiltrate", - "influanced", "influenced", - "influances", "influences", - "influencie", "influences", - "influening", "influencing", - "influensed", "influences", - "influenser", "influences", - "influenses", "influences", - "influental", "influential", - "influented", "influenced", - "influentes", "influences", - "influneced", "influenced", - "infograhic", "infographic", - "infograpic", "infographic", - "infomation", "information", - "informable", "informal", - "informarla", "informal", - "informarle", "informal", - "informarlo", "informal", - "informatie", "informative", - "informella", "informal", - "informerad", "informed", - "informtion", "information", - "infridging", "infringing", - "infrigning", "infringing", - "infulenced", "influenced", - "infulences", "influences", - "ingenuitiy", "ingenuity", - "ingrediant", "ingredient", - "ingrediens", "ingredients", - "ingrediets", "ingredient", - "inhabitans", "inhabitants", - "inhabitats", "inhabitants", - "inherantly", "inherently", - "inherintly", "inherently", - "inheritage", "heritage", - "inhernetly", "inherently", - "inifnitely", "infinitely", - "initaition", "initiation", - "initalised", "initialised", - "initaliser", "initialiser", - "initalises", "initialises", - "initalisms", "initialisms", - "initalized", "initialized", - "initalizer", "initializer", - "initalizes", "initializes", - "initalling", "initialling", - "initalness", "initialness", - "initiaitve", "initiatives", - "initiaties", "initiatives", - "initiativs", "initiatives", - "initiatves", "initiatives", - "initiavite", "initiatives", - "inititaive", "initiatives", - "inititiave", "initiatives", - "initmately", "intimately", - "initmidate", "intimidate", - "inituition", "initiation", - "injustaces", "injustices", - "injusticas", "injustices", - "inmigrants", "immigrants", - "innoavtion", "innovations", - "innocentes", "innocents", - "innotation", "innovation", - "innovacion", "innovation", - "innovaiton", "innovations", - "innovatief", "innovate", - "innovaties", "innovate", - "innovativo", "innovation", - "innvoation", "innovation", - "inofficial", "unofficial", - "inpsection", "inspection", - "inquisator", "inquisitor", - "inquisidor", "inquisitor", - "inquisiter", "inquisitor", - "inquisitio", "inquisitor", - "inquisitir", "inquisitor", - "inquisiton", "inquisition", - "inquistior", "inquisitor", - "inquizitor", "inquisitor", - "inqusitior", "inquisitor", - "insensitve", "insensitive", - "insepction", "inspection", - "insistance", "insistence", - "insistente", "insistence", - "insistenze", "insistence", - "insistince", "insistence", - "insitution", "institution", - "inspeccion", "inspection", - "inspeciton", "inspections", - "inspectons", "inspections", - "inspectres", "inspectors", - "inspektion", "inspection", - "inspektors", "inspectors", - "inspiraste", "inspires", - "inspiraton", "inspiration", - "inspirerad", "inspired", - "inspireras", "inspires", - "insrugency", "insurgency", - "instabiliy", "instability", - "instabilty", "instability", - "installeer", "installer", - "installent", "installment", - "installesd", "installs", - "installion", "installing", - "instatance", "instance", - "instelling", "installing", - "instituded", "instituted", - "instituion", "institution", - "institutie", "institute", - "institutue", "instituted", - "instrament", "instrument", - "instrcutor", "instructors", - "instrucion", "instruction", - "instructer", "instructor", - "instructie", "instructed", - "instruktor", "instructor", - "instuction", "instruction", - "instuments", "instruments", - "insturcted", "instructed", - "insturctor", "instructor", - "insturment", "instrument", - "instutions", "intuitions", - "instututed", "instituted", - "insurgance", "insurgency", - "insurgancy", "insurgency", - "intangable", "intangible", - "intangeble", "intangible", - "intangibil", "intangible", - "intanjible", "intangible", - "integraded", "integrated", - "integrarla", "integral", - "integrarlo", "integral", - "integratie", "integrated", - "integreres", "interferes", - "integreted", "integrated", - "inteligent", "intelligent", - "intenseley", "intensely", - "intensitiy", "intensity", - "intentinal", "intentional", - "intentines", "intestines", - "interacive", "interactive", - "interactes", "interacts", - "interactie", "interactive", - "interactue", "interacted", - "interasted", "interacted", - "interbread", "interbreed", - "intercepto", "interception", - "intercorse", "intercourse", - "intercouse", "intercourse", - "intereacts", "interfaces", - "interected", "interacted", - "interefers", "interferes", - "interesant", "interest", - "interesing", "interesting", - "interestes", "interests", - "interfacce", "interfaces", - "interfears", "interferes", - "interfeers", "interferes", - "interferce", "interferes", - "interferre", "interfere", - "intergated", "integrated", - "interioara", "interior", - "interioare", "interior", - "intermedie", "intermediate", - "internetbs", "internets", - "internetes", "internets", - "internetis", "internets", - "internetts", "internets", - "internetus", "internets", - "interprate", "interpret", - "interrugum", "interregnum", - "interruped", "interrupted", - "interstela", "interstellar", - "intervalls", "intervals", - "intervalos", "intervals", - "interveign", "intervening", - "interveing", "intervening", - "interveiws", "interviews", - "intervento", "intervention", - "intervenue", "intervene", - "interveres", "interferes", - "intervieni", "interviewing", - "intervieuw", "interviews", - "interviewd", "interviewed", - "interviewr", "interviewer", - "intervines", "intervenes", - "interviwed", "interviewed", - "interviwer", "interviewer", - "interwebbs", "interwebs", - "intestents", "intestines", - "intestinas", "intestines", - "intestinos", "intestines", - "intestions", "intestines", - "intidimate", "intimidate", - "intimadate", "intimidate", - "intimatley", "intimately", - "intimiated", "intimidate", - "intimidade", "intimidated", - "intimidant", "intimidate", - "intimidare", "intimidate", - "intimitade", "intimidated", - "intimitaly", "intimately", - "intimitate", "intimidate", - "intimitely", "intimately", - "intolarant", "intolerant", - "intolerace", "intolerance", - "intolerate", "intolerant", - "intolerent", "intolerant", - "intolorant", "intolerant", - "intolorent", "intolerant", - "intorduced", "introduced", - "intorduces", "introduces", - "intorverts", "introverts", - "intoxicted", "intoxicated", - "intraverts", "introverts", - "intreguing", "intriguing", - "intricaces", "intricacies", - "intriguied", "intrigue", - "intrigured", "intrigue", - "intrinseci", "intrinsic", - "intrinsinc", "intrinsic", - "intriquing", "intriguing", - "intriuging", "intriguing", - "introdecks", "introduces", - "introdused", "introduces", - "introvents", "introverts", - "introvered", "introverted", - "introversa", "introverts", - "introverse", "introverts", - "introversi", "introverts", - "introverso", "introverts", - "introversy", "introverts", - "introveted", "introverted", - "intruduced", "introduced", - "intruduces", "introduces", - "intruiging", "intriguing", - "intruments", "instruments", - "intuitevly", "intuitively", - "intuitivly", "intuitively", - "intuitivno", "intuition", - "intutively", "intuitively", - "inumerable", "enumerable", - "inusrgency", "insurgency", - "invaderats", "invaders", - "invaildate", "invalidates", - "invairably", "invariably", - "invaldiate", "invalidates", - "invalidade", "invalidate", - "invalidare", "invalidate", - "invalubale", "invaluable", - "invalueble", "invaluable", - "invaraibly", "invariably", - "invariabil", "invariably", - "invaribaly", "invariably", - "invaulable", "invaluable", - "inveitable", "inevitable", - "inveitably", "inevitably", - "invensions", "inventions", - "inventario", "inventor", - "inventarlo", "inventor", - "inventaron", "inventor", - "inventings", "inventions", - "inventivos", "inventions", - "invertendo", "inverted", - "inverterad", "inverted", - "invertions", "inventions", - "investemnt", "investments", - "investiage", "investigate", - "investions", "inventions", - "investirat", "investigator", - "investmens", "investments", - "invicinble", "invincible", - "invididual", "individual", - "invincable", "invincible", - "invinceble", "invincible", - "invinicble", "invincible", - "invinsible", "invincible", - "invinvible", "invincible", - "invisibily", "invisibility", - "invitacion", "invitation", - "invitating", "invitation", - "involunary", "involuntary", - "involvment", "involvement", - "ironcially", "ironically", - "irracional", "irrational", - "irrationel", "irrational", - "irrelavant", "irrelevant", - "irrelavent", "irrelevant", - "irrelevent", "irrelevant", - "irrelivant", "irrelevant", - "irrelivent", "irrelevant", - "irrevelant", "irrelevant", - "irreverant", "irrelevant", - "irridation", "irritation", - "irriration", "irritation", - "irritacion", "irritation", - "irritaties", "irritate", - "islamisist", "islamist", - "islamistas", "islamists", - "isntalling", "installing", - "isntructed", "instructed", - "isntrument", "instrument", - "israeliens", "israelis", - "israelitas", "israelis", - "italianess", "italians", - "itnroduced", "introduced", - "jailborken", "jailbroken", - "jalibroken", "jailbroken", - "jamaicains", "jamaican", - "jamaicaman", "jamaican", - "jerusaleum", "jerusalem", - "jounralism", "journalism", - "jounralist", "journalist", - "jouranlism", "journalism", - "jouranlist", "journalist", - "journalims", "journals", - "journalits", "journals", - "journalizm", "journalism", - "journalsim", "journalism", - "journolist", "journalist", - "judegments", "judgements", - "judgemenal", "judgemental", - "judgemetal", "judgemental", - "jugdements", "judgements", - "juggarnaut", "juggernaut", - "juggeranut", "juggernaut", - "juggernath", "juggernaut", - "juggernout", "juggernaut", - "juggernuat", "juggernaut", - "juggetnaut", "juggernaut", - "jugglenaut", "juggernaut", - "juggurnaut", "juggernaut", - "justifible", "justifiable", - "juvenilles", "juvenile", - "kickstarer", "kickstarter", - "kickstartr", "kickstarter", - "kickstater", "kickstarter", - "kidnapning", "kidnapping", - "kidnappade", "kidnapped", - "killingest", "killings", - "kilometros", "kilometers", - "kilomiters", "kilometers", - "kilomoters", "kilometers", - "kilomteres", "kilometers", - "kindapping", "kidnapping", - "kingdomers", "kingdoms", - "krpytonite", "kryptonite", - "krypotnite", "kryptonite", - "krypronite", "kryptonite", - "kryptinite", "kryptonite", - "kryptolite", "kryptonite", - "kryptonyte", "kryptonite", - "krypyonite", "kryptonite", - "krytponite", "kryptonite", - "kyrptonite", "kryptonite", - "labarotory", "laboratory", - "laboratroy", "laboratory", - "laborerers", "laborers", - "laboritory", "laboratory", - "laborotory", "laboratory", - "lackbuster", "lackluster", - "lacklaster", "lackluster", - "landacapes", "landscapes", - "landingers", "landings", - "landshapes", "landscapes", - "landspaces", "landscapes", - "lannasters", "lannisters", - "lannesters", "lannisters", - "lannistars", "lannisters", - "lannsiters", "lannisters", - "lateration", "alteration", - "latitudine", "latitude", - "laughabley", "laughably", - "laughablly", "laughably", - "launchered", "launched", - "leaglizing", "legalizing", - "lectureres", "lectures", - "legalazing", "legalizing", - "legalizare", "legalize", - "legalizate", "legalize", - "legendaies", "legendaries", - "legendaris", "legendaries", - "legimitacy", "legitimacy", - "legimitate", "legitimate", - "legislatie", "legislative", - "legitamacy", "legitimacy", - "legitamate", "legitimate", - "legitamicy", "legitimacy", - "legitamite", "legitimate", - "legitemacy", "legitimacy", - "legitemate", "legitimate", - "legitimaly", "legitimacy", - "legitimicy", "legitimacy", - "legitimite", "legitimate", - "leiutenant", "lieutenant", - "lesbianese", "lesbians", - "lesbianest", "lesbians", - "leuitenant", "lieutenant", - "levetating", "levitating", - "liberacion", "liberation", - "liberalest", "liberate", - "liberalizm", "liberalism", - "liberalnim", "liberalism", - "liberalsim", "liberalism", - "liberarion", "liberation", - "liberaties", "liberate", - "liberatore", "liberate", - "libertania", "libertarians", - "libguistic", "linguistic", - "lietuenant", "lieutenant", - "lieutanant", "lieutenant", - "lieutanent", "lieutenant", - "lieutenent", "lieutenant", - "lifestiles", "lifestyles", - "lifestlyes", "lifestyles", - "lifesystem", "filesystem", - "lifesytles", "lifestyles", - "lifetimers", "lifetimes", - "lifetsyles", "lifestyles", - "lighhtning", "lightening", - "lightergas", "lighters", - "lighthning", "lightening", - "lighthorse", "lighthouse", - "lighthosue", "lighthouse", - "lighthours", "lighthouse", - "lightining", "lighting", - "lightneing", "lightening", - "lightnting", "lightening", - "lightrooom", "lightroom", - "lightweigt", "lightweight", - "ligitation", "litigation", - "ligthening", "lightening", - "ligthhouse", "lighthouse", - "likelyhood", "likelihood", - "limination", "limitation", - "limitacion", "limitation", - "limitaiton", "limitation", - "limitating", "limitation", - "limitativo", "limitation", - "linguisics", "linguistics", - "linguisitc", "linguistics", - "linguistcs", "linguistics", - "linguistis", "linguistics", - "linguitics", "linguistic", - "lingusitic", "linguistics", - "lingvistic", "linguistic", - "liousville", "louisville", - "listeneres", "listeners", - "literallly", "literally", - "literarely", "literary", - "literarlly", "literary", - "literatire", "literate", - "literative", "literate", - "literatute", "literate", - "lithuanina", "lithuania", - "litterally", "literally", - "liuetenant", "lieutenant", - "liveatream", "livestream", - "livelehood", "livelihood", - "liverpoool", "liverpool", - "livescream", "livestream", - "livestreem", "livestream", - "livestrems", "livestream", - "livilehood", "livelihood", - "livliehood", "livelihood", - "lobbyistes", "lobbyists", - "lockacreen", "lockscreen", - "logictical", "logistical", - "logisitcal", "logistical", - "logisticas", "logistics", - "logisticly", "logistical", - "loiusville", "louisville", - "lollipoopy", "lollipop", - "lonelyness", "loneliness", - "longevitiy", "longevity", - "lonileness", "loneliness", - "lonlieness", "loneliness", - "louieville", "louisville", - "louisiania", "louisiana", - "louisianna", "louisiana", - "louisivlle", "louisville", - "louisviile", "louisville", - "lousiville", "louisville", - "luietenant", "lieutenant", - "mabyelline", "maybelline", - "magnifient", "magnificent", - "mainpulate", "manipulate", - "mainstreem", "mainstream", - "maintaince", "maintained", - "maintaines", "maintains", - "maintainig", "maintaining", - "maintenace", "maintenance", - "maintianed", "maintained", - "maintioned", "mentioned", - "malfuncion", "malfunction", - "malpractce", "malpractice", - "managebale", "manageable", - "maneagable", "manageable", - "maneouvred", "manoeuvred", - "maneouvres", "manoeuvres", - "maneuveres", "maneuvers", - "maneuveurs", "maneuver", - "manifestas", "manifests", - "manifestes", "manifests", - "manifestus", "manifests", - "manipluate", "manipulate", - "manipualte", "manipulate", - "manipulant", "manipulate", - "manipulare", "manipulate", - "manipulted", "manipulated", - "maniuplate", "manipulate", - "mannarisms", "mannerisms", - "mannersims", "mannerisms", - "mannorisms", "mannerisms", - "manufacter", "manufacture", - "manufacure", "manufacture", - "manufature", "manufacture", - "maraudeurs", "marauder", - "margaritte", "margaret", - "margianlly", "marginally", - "marginaali", "marginal", - "marginable", "marginal", - "marignally", "marginally", - "marijuanna", "marijuana", - "marketting", "marketing", - "marshmalow", "marshmallow", - "masculinty", "masculinity", - "massacrare", "massacre", - "massivelly", "massively", - "masteriers", "masteries", - "masternind", "mastermind", - "masterpice", "masterpiece", - "mastrubate", "masturbate", - "mastubrate", "masturbated", - "masturabte", "masturbate", - "masturbait", "masturbate", - "masturbare", "masturbate", - "masturbeta", "masturbated", - "masturdate", "masturbate", - "materiales", "materials", - "materialsm", "materialism", - "maximazing", "maximizing", - "maximixing", "maximizing", - "mayballine", "maybelline", - "maybellene", "maybelline", - "maybellibe", "maybelline", - "maybilline", "maybelline", - "mccarthyst", "mccarthyist", - "mdifielder", "midfielder", - "meagthread", "megathread", - "meaningess", "meanings", - "meaningles", "meanings", - "meatballls", "meatballs", - "mecahnical", "mechanical", - "mecahnisms", "mechanisms", - "mechancial", "mechanical", - "mechandise", "merchandise", - "mechanichs", "mechanics", - "mechanicle", "mechanical", - "mechanicly", "mechanical", - "mechanicus", "mechanics", - "mechanincs", "mechanic", - "mechanisim", "mechanism", - "mechansims", "mechanisms", - "mechinical", "mechanical", - "mechinisms", "mechanisms", - "mediaction", "medications", - "medicacion", "medication", - "medicaiton", "medication", - "medicalert", "medicare", - "medicallly", "medically", - "medicatons", "medications", - "medicinens", "medicines", - "medicinske", "medicine", - "medicority", "mediocrity", - "medidating", "meditating", - "mediocirty", "mediocrity", - "mediocraty", "mediocrity", - "mediocrety", "mediocrity", - "mediocricy", "mediocrity", - "mediocrily", "mediocrity", - "mediocrisy", "mediocrity", - "meditacion", "medications", - "meditaiton", "meditation", - "melatonian", "melatonin", - "melatonion", "melatonin", - "mellinnium", "millennium", - "melodieuse", "melodies", - "membrances", "membrane", - "mentallity", "mentally", - "mentionnes", "mentions", - "mercenaire", "mercenaries", - "mercenares", "mercenaries", - "mercentile", "mercantile", - "merchanise", "merchandise", - "merchantos", "merchants", - "messagease", "messages", - "messagepad", "messaged", - "messenging", "messaging", - "metabalism", "metabolism", - "metabilism", "metabolism", - "metabloism", "metabolism", - "metablosim", "metabolism", - "metabolics", "metabolism", - "metabolizm", "metabolism", - "metabolsim", "metabolism", - "metalurgic", "metallurgic", - "metaphoras", "metaphors", - "metaphores", "metaphors", - "metaphyics", "metaphysics", - "meterology", "meteorology", - "methaphors", "metaphors", - "methodolgy", "methodology", - "methodoloy", "methodology", - "metrapolis", "metropolis", - "metrolopis", "metropolis", - "metropilis", "metropolis", - "metroplois", "metropolis", - "metropolin", "metropolitan", - "metropolos", "metropolis", - "metropolys", "metropolis", - "mexicanese", "mexicans", - "mexicaness", "mexicans", - "michelline", "michelle", - "micorwaves", "microwaves", - "microhpone", "microphone", - "microscoop", "microscope", - "microvaves", "microwaves", - "microvaxes", "microwaves", - "micrpohone", "microphones", - "midfeilder", "midfielder", - "midfiedler", "midfielder", - "midfieldes", "midfielders", - "midfielers", "midfielders", - "midfileder", "midfielder", - "midifelder", "midfielder", - "midnlessly", "mindlessly", - "migitation", "mitigation", - "migrainers", "migraines", - "miletsones", "milestones", - "milisecond", "millisecond", - "militiades", "militias", - "militiants", "militias", - "millinnium", "millennium", - "miminalist", "minimalist", - "minamilist", "minimalist", - "mindleslly", "mindlessly", - "minimazing", "minimizing", - "minimilast", "minimalist", - "minimilist", "minimalist", - "mininalist", "minimalist", - "ministeres", "ministers", - "ministerns", "ministers", - "minneaplis", "minneapolis", - "minneapols", "minneapolis", - "minnesotta", "minnesota", - "minoritets", "minorities", - "minoroties", "minorities", - "miracalous", "miraculous", - "miracluous", "miraculous", - "miracoulus", "miraculous", - "mircophone", "microphone", - "mircoscope", "microscope", - "mircowaves", "microwaves", - "misandrony", "misandry", - "miscarrage", "miscarriage", - "miscarrige", "miscarriage", - "misdemenor", "misdemeanor", - "miserabley", "miserably", - "miserablly", "miserably", - "misforture", "misfortune", - "misgoynist", "misogynist", - "misinfomed", "misinformed", - "misinterpt", "misinterpret", - "misisonary", "missionary", - "misoganist", "misogynist", - "misogenist", "misogynist", - "misoginist", "misogynist", - "misoginyst", "misogynist", - "misognyist", "misogynist", - "misogonist", "misogynist", - "misogonyst", "misogynist", - "misogyinst", "misogynist", - "misogynyst", "misogynist", - "misoygnist", "misogynist", - "mispelling", "misspelling", - "missionare", "missionaries", - "missionera", "missionary", - "missisippi", "mississippi", - "mississipi", "mississippi", - "mississppi", "mississippi", - "misspeling", "misspelling", - "misspellng", "misspelling", - "mistakedly", "mistakenly", - "mistakinly", "mistakenly", - "mistankely", "mistakenly", - "misterious", "mysterious", - "misteryous", "mysterious", - "mistreaded", "mistreated", - "misygonist", "misogynist", - "mitigaiton", "mitigation", - "moderacion", "moderation", - "moderaters", "moderates", - "moderatley", "moderately", - "moderatore", "moderate", - "moderatorn", "moderation", - "modificato", "modification", - "modifieras", "modifiers", - "modifieres", "modifiers", - "moisturier", "moisturizer", - "moleculair", "molecular", - "molestaion", "molestation", - "molestarle", "molester", - "molestarme", "molester", - "molestarse", "molester", - "molestarte", "molester", - "molestered", "molested", - "momentarly", "momentarily", - "monagomous", "monogamous", - "monetizare", "monetize", - "monitering", "monitoring", - "monogymous", "monogamous", - "monolistic", "monolithic", - "monolitich", "monolithic", - "monolopies", "monopolies", - "monolothic", "monolithic", - "monolythic", "monolithic", - "monopilies", "monopolies", - "monoploies", "monopolies", - "monopolets", "monopolies", - "monopolice", "monopolies", - "monopolios", "monopolies", - "monothilic", "monolithic", - "monsterous", "monsters", - "montioring", "monitoring", - "monumentos", "monuments", - "monumentul", "monumental", - "monumentus", "monuments", - "mormonisim", "mormonism", - "morphinate", "morphine", - "morrisette", "morissette", - "morrisound", "morrison", - "mosquitero", "mosquito", - "mosquiters", "mosquitoes", - "motherbard", "motherboard", - "motherboad", "motherboard", - "motherbord", "motherboard", - "motivaiton", "motivations", - "motiviated", "motivated", - "motorcicle", "motorcycle", - "motorcylce", "motorcycle", - "motorcyles", "motorcycles", - "motorollas", "motorola", - "mouthpeace", "mouthpiece", - "mouthpeice", "mouthpiece", - "movespeeed", "movespeed", - "mozzaralla", "mozzarella", - "mozzeralla", "mozzarella", - "mozzorella", "mozzarella", - "mulitation", "mutilation", - "mulitplied", "multiplied", - "mulitplier", "multiplier", - "mulitverse", "multiverse", - "multilpier", "multiplier", - "multiplaer", "multiplier", - "multiplaye", "multiply", - "multiplayr", "multiply", - "multiplays", "multiply", - "multipleye", "multiply", - "multipling", "multiplying", - "multiplyed", "multiplied", - "multiplyer", "multiple", - "multiplyng", "multiplying", - "murderered", "murdered", - "murdereres", "murderers", - "muscicians", "musicians", - "musculaire", "muscular", - "mushroooms", "mushroom", - "mutialtion", "mutilation", - "mutiliated", "mutilated", - "mutliation", "mutilation", - "mutliplied", "multiplied", - "mutliplier", "multiplier", - "mutliverse", "multiverse", - "mysogynist", "misogynist", - "mysterieus", "mysteries", - "nagivating", "navigating", - "nagivation", "navigation", - "narcassism", "narcissism", - "narcassist", "narcissist", - "narcessist", "narcissist", - "narciscism", "narcissism", - "narciscist", "narcissist", - "narcisissm", "narcissism", - "narcisisst", "narcissist", - "narcisists", "narcissist", - "narcissicm", "narcissism", - "narcissict", "narcissist", - "narcissitc", "narcissist", - "narcissits", "narcissist", - "narcoticos", "narcotics", - "narrativas", "narratives", - "narrativos", "narratives", - "narritives", "narratives", - "nashvillle", "nashville", - "nationales", "nationals", - "nationalis", "nationals", - "nationalit", "nationalist", - "nationaliy", "nationality", - "nationalty", "nationality", - "nationella", "national", - "naturually", "naturally", - "naviagting", "navigating", - "naviagtion", "navigation", - "navigatore", "navigate", - "neccessary", "necessary", - "necesarily", "necessarily", - "necessairy", "necessarily", - "necessarly", "necessary", - "necessarry", "necessary", - "necessiate", "necessitate", - "necessites", "necessities", - "neckbeared", "neckbeard", - "neckboards", "neckbeards", - "neckbreads", "neckbeards", - "neckneards", "neckbeards", - "necromacer", "necromancer", - "necromaner", "necromancer", - "needleslly", "needlessly", - "negativaty", "negativity", - "negativley", "negatively", - "negelcting", "neglecting", - "negilgence", "negligence", - "negiotated", "negotiated", - "neglacting", "neglecting", - "neglagence", "negligence", - "neglegance", "negligence", - "neglegible", "negligible", - "neglegting", "neglecting", - "neglibible", "negligible", - "neglicence", "negligence", - "neglicible", "negligible", - "neglicting", "neglecting", - "negligable", "negligible", - "negligance", "negligence", - "negligeble", "negligible", - "negligente", "negligence", - "negociated", "negotiated", - "negogiated", "negotiated", - "negoitated", "negotiated", - "negotaited", "negotiated", - "negotation", "negotiation", - "negotiaion", "negotiation", - "negotiatie", "negotiated", - "negotiatin", "negotiations", - "negotiaton", "negotiation", - "neigbhours", "neighbours", - "neighbhors", "neighbours", - "neighbords", "neighbours", - "neighbores", "neighbours", - "netowrking", "networking", - "netruality", "neutrality", - "neturality", "neutrality", - "netwroking", "networking", - "neurologia", "neurological", - "neutrailty", "neutrality", - "newletters", "newsletters", - "newlsetter", "newsletter", - "newsettler", "newsletter", - "newslatter", "newsletter", - "nieghbours", "neighbours", - "nightmates", "nightmares", - "nightmears", "nightmares", - "nightmeres", "nightmares", - "nigthmares", "nightmares", - "nipticking", "nitpicking", - "nitpciking", "nitpicking", - "nominacion", "nomination", - "nominatino", "nominations", - "nominativo", "nomination", - "nominatons", "nominations", - "nonsencial", "nonsensical", - "nontheless", "nonetheless", - "northerend", "northern", - "nostalgica", "nostalgia", - "nostalgija", "nostalgia", - "noteworhty", "noteworthy", - "nothingess", "nothingness", - "noticabely", "noticeably", - "noticabley", "noticeably", - "noticiably", "noticeably", - "notoriosly", "notoriously", - "novembeard", "november", - "nuetrality", "neutrality", - "nutricious", "nutritious", - "nutrientes", "nutrients", - "nutritents", "nutrients", - "nutritinal", "nutritional", - "nutritiuos", "nutritious", - "nutritivos", "nutritious", - "nutrituous", "nutritious", - "nutrutious", "nutritious", - "obatinable", "obtainable", - "obejctives", "objectives", - "obilgatory", "obligatory", - "objecitves", "objectives", - "objectivas", "objectives", - "objectivly", "objectively", - "objectivst", "objectives", - "objectivty", "objectivity", - "objektives", "objectives", - "obligitary", "obligatory", - "obligitory", "obligatory", - "observabil", "observable", - "observarse", "observers", - "observaton", "observation", - "observeras", "observers", - "observered", "observed", - "observeres", "observers", - "observible", "observable", - "obstancles", "obstacles", - "obstrucion", "obstruction", - "obstructin", "obstruction", - "obtainabie", "obtainable", - "obtaineble", "obtainable", - "obtainible", "obtainable", - "obtianable", "obtainable", - "ocasionaly", "occasionally", - "ocassional", "occasional", - "ocassioned", "occasioned", - "occaisonal", "occasional", - "occasionly", "occasional", - "occassions", "occasions", - "occational", "occasional", - "occulation", "occupation", - "occupaiton", "occupation", - "occurances", "occurrences", - "occurences", "occurrences", - "occurrance", "occurrence", - "octohedral", "octahedral", - "octohedron", "octahedron", - "offensivly", "offensively", - "offereings", "offerings", - "officailly", "officially", - "olbigatory", "obligatory", - "ominpotent", "omnipotent", - "ominscient", "omniscient", - "omnipetent", "omnipotent", - "omnipitent", "omnipotent", - "omnipotant", "omnipotent", - "omnisicent", "omniscient", - "omniverous", "omnivorous", - "omnsicient", "omniscient", - "onmipotent", "omnipotent", - "onmiscient", "omniscient", - "operatings", "operations", - "operativne", "operative", - "operativos", "operations", - "oportunity", "opportunity", - "opponenets", "opponent", - "oppononent", "opponent", - "oppressiun", "oppressing", - "optimisitc", "optimistic", - "optimizare", "optimize", - "optimizate", "optimize", - "optimizied", "optimize", - "organicaly", "organically", - "organiclly", "organically", - "organisate", "organise", - "organische", "organise", - "organisera", "organizers", - "organisere", "organizers", - "organisert", "organizers", - "organisier", "organise", - "organisims", "organism", - "organismed", "organise", - "organismen", "organise", - "organismer", "organise", - "organismes", "organisms", - "organismus", "organisms", - "organisten", "organise", - "organiszed", "organise", - "organizaed", "organize", - "organizare", "organizer", - "organizate", "organize", - "organizors", "organizers", - "organizuje", "organize", - "organziers", "organizers", - "orientaion", "orientation", - "orientarla", "oriental", - "orientarlo", "oriental", - "origianlly", "originally", - "originales", "originals", - "originalet", "originated", - "originalis", "originals", - "originalty", "originality", - "orignially", "originally", - "origniated", "originated", - "origonally", "originally", - "origonated", "originated", - "ostencibly", "ostensibly", - "ostenisbly", "ostensibly", - "ostensably", "ostensibly", - "ostentibly", "ostensibly", - "ostrasiced", "ostracized", - "ostrasized", "ostracized", - "ostraziced", "ostracized", - "ostrazised", "ostracized", - "ostrecized", "ostracized", - "ostricized", "ostracized", - "ostrocized", "ostracized", - "oustanding", "outstanding", - "outcalssed", "outclassed", - "outlcassed", "outclassed", - "outnumberd", "outnumbered", - "outnumbred", "outnumbered", - "outperfoms", "outperform", - "outperfrom", "outperform", - "outpreform", "outperform", - "outrageuos", "outrageous", - "outragious", "outrageous", - "outragoues", "outrageous", - "outreagous", "outrageous", - "outsourcad", "outsourced", - "outsouring", "outsourcing", - "outsoursed", "outsourced", - "outweighes", "outweighs", - "overarcing", "overarching", - "overclockd", "overclocked", - "overcloked", "overclocked", - "overcoding", "overcoming", - "overheards", "overhead", - "overheared", "overhead", - "overhooked", "overlooked", - "overlanded", "overloaded", - "overlaoded", "overloaded", - "overlaping", "overlapping", - "overlauded", "overloaded", - "overloards", "overload", - "overlorded", "overloaded", - "overlordes", "overlords", - "overnurfed", "overturned", - "overpirced", "overpriced", - "overpowerd", "overpowered", - "overpowred", "overpowered", - "overprised", "overpriced", - "overtunned", "overturned", - "overtunred", "overturned", - "overturing", "overturn", - "overweigth", "overweight", - "overwhemed", "overwhelmed", - "overwieght", "overweight", - "overwritte", "overwrite", - "pahtfinder", "pathfinder", - "painfullly", "painfully", - "painkilers", "painkillers", - "pairlament", "parliament", - "pakistanti", "pakistani", - "paladinlst", "paladins", - "palcements", "placements", - "paleolitic", "paleolithic", - "palestinan", "palestinian", - "paltformer", "platformer", - "palyerbase", "playerbase", - "parachutte", "parachute", - "parademics", "paramedics", - "paradiggum", "paradigm", - "paragraghs", "paragraphs", - "paragrahps", "paragraphs", - "paragrapgh", "paragraphs", - "paragrpahs", "paragraphs", - "parahprase", "paraphrase", - "paralleles", "parallels", - "parallells", "parallels", - "paramadics", "paramedics", - "paramaters", "parameters", - "paramecias", "paramedics", - "parametics", "paramedics", - "parametros", "parameters", - "paramiters", "parameters", - "paramormal", "paranormal", - "paranoicas", "paranoia", - "paranomral", "paranormal", - "paranornal", "paranormal", - "parapharse", "paraphrase", - "paraphraze", "paraphrase", - "paraprhase", "paraphrase", - "parasitter", "parasite", - "parilament", "parliament", - "parituclar", "particular", - "parlaiment", "parliament", - "parliamant", "parliament", - "parliamone", "parliament", - "parliement", "parliament", - "parrallell", "parallel", - "parrallely", "parallelly", - "partiarchy", "patriarchy", - "participas", "participants", - "participat", "participants", - "participte", "participate", - "particualr", "particular", - "partiotism", "patriotism", - "passionais", "passions", - "passionale", "passionately", - "passionant", "passionate", - "passionite", "passionate", - "passivedns", "passives", - "passivelly", "passively", - "patenterad", "patented", - "pathfidner", "pathfinder", - "pathfindir", "pathfinder", - "pathifnder", "pathfinder", - "patientens", "patients", - "patrairchy", "patriarchy", - "patriachry", "patriarchy", - "patriarcal", "patriarchal", - "patriarhal", "patriarchal", - "patriatchy", "patriarchy", - "patriatism", "patriotism", - "patrionism", "patriotism", - "patriotics", "patriotism", - "patriotisk", "patriots", - "patroitism", "patriotism", - "patryarchy", "patriarchy", - "pedantisch", "pedantic", - "pedestiran", "pedestrian", - "pedestrain", "pedestrian", - "pedictions", "depictions", - "pedohpiles", "pedophiles", - "pedohpilia", "pedophilia", - "pedophilac", "pedophilia", - "pedophilea", "pedophilia", - "pedophilie", "pedophile", - "pedophilla", "pedophilia", - "pedophille", "pedophile", - "pedopholia", "pedophilia", - "penetraion", "penetration", - "penetratin", "penetration", - "penetraton", "penetration", - "penguinese", "penguins", - "penguiness", "penguins", - "peninsulla", "peninsula", - "penninsula", "peninsula", - "peodphiles", "pedophiles", - "peodphilia", "pedophilia", - "pepperment", "peppermint", - "pepperonni", "pepperoni", - "percantage", "percentage", - "percantile", "percentile", - "percaution", "precaution", - "percenatge", "percentages", - "percential", "percentile", - "percentige", "percentile", - "perceptoin", "perceptions", - "percession", "percussion", - "percetange", "percentages", - "percetnage", "percentages", - "percintile", "percentile", - "percission", "percussion", - "percpetion", "perceptions", - "percusions", "percussion", - "perdicting", "predicting", - "perdiction", "prediction", - "perdictive", "predictive", - "perenially", "perennially", - "perfeccion", "perfection", - "perfecxion", "perfection", - "perfektion", "perfection", - "perferable", "preferable", - "perferably", "preferably", - "perference", "preference", - "perferring", "preferring", - "perfexcion", "perfection", - "perfomance", "performance", - "performace", "performance", - "performane", "performances", - "performans", "performances", - "performens", "performers", - "performous", "performs", - "perfromers", "performers", - "perhiperal", "peripheral", - "peridinkle", "periwinkle", - "perihperal", "peripheral", - "periodisch", "periodic", - "periperhal", "peripheral", - "peripheals", "peripherals", - "peripheria", "peripheral", - "periphiral", "peripheral", - "periphreal", "peripheral", - "periphrial", "peripheral", - "peritinkle", "periwinkle", - "periwankle", "periwinkle", - "periwinkel", "periwinkle", - "periwinkie", "periwinkle", - "periwinlke", "periwinkle", - "permanenty", "permanently", - "permanetly", "permanently", - "permisions", "permission", - "permisison", "permissions", - "permissble", "permissible", - "permissibe", "permissible", - "permissons", "permissions", - "perogative", "prerogative", - "perordered", "preordered", - "perpatuate", "perpetuate", - "perpetualy", "perpetually", - "perpetuare", "perpetuate", - "persausion", "persuasion", - "persausive", "persuasive", - "persective", "respective", - "persectued", "persecuted", - "persecutie", "persecuted", - "persecutin", "persecution", - "perserving", "preserving", - "persicuted", "persecuted", - "persistant", "persistent", - "persistens", "persists", - "persoanlly", "personally", - "persocuted", "persecuted", - "personalie", "personalized", - "personalis", "personas", - "personarse", "personas", - "personatus", "personas", - "personnell", "personnel", - "perspecive", "perspective", - "perspectie", "perspectives", - "persuasian", "persuasion", - "persuasing", "persuasion", - "persuasivo", "persuasion", - "persuation", "persuasion", - "persucuted", "persecuted", - "persumably", "presumably", - "persussion", "persuasion", - "persvasive", "persuasive", - "perswasion", "persuasion", - "pertinante", "pertinent", - "pervailing", "prevailing", - "pervalence", "prevalence", - "pervention", "prevention", - "perversley", "perverse", - "pesitcides", "pesticides", - "pessimistc", "pessimistic", - "pessimitic", "pessimistic", - "pestacides", "pesticides", - "pestecides", "pesticides", - "pesticedes", "pesticides", - "pesticidas", "pesticides", - "pestisides", "pesticides", - "pestizides", "pesticides", - "pharamcist", "pharmacist", - "pharmacias", "pharmacist", - "pharmacyst", "pharmacist", - "pharmasist", "pharmacist", - "pharmicist", "pharmacist", - "phemonenon", "phenomenon", - "phenemenon", "phenomenon", - "phenemonal", "phenomenal", - "phenomanal", "phenomenal", - "phenomanon", "phenomenon", - "phenomemon", "phenomenon", - "phenomenen", "phenomenon", - "phenomenol", "phenomenal", - "phenomenom", "phenomenon", - "phenominon", "phenomenon", - "phenomonal", "phenomenal", - "phenomonen", "phenomenon", - "phenomonon", "phenomenon", - "phenonemal", "phenomenal", - "phenonemon", "phenomenon", - "phenonmena", "phenomena", - "philipines", "philippines", - "philippins", "philippines", - "philisophy", "philosophy", - "phillipine", "philippine", - "phillipses", "phillies", - "philosiphy", "philosophy", - "philosohpy", "philosophy", - "philosoper", "philosopher", - "philospher", "philosopher", - "philospohy", "philosophy", - "photogragh", "photograph", - "photograhs", "photographs", - "photograhy", "photography", - "photograps", "photographs", - "photograpy", "photography", - "photogrpah", "photographs", - "photoshopd", "photoshopped", - "photoshope", "photoshopped", - "phramacist", "pharmacist", - "phsyically", "physically", - "phsyicians", "physicians", - "phsyicists", "physicists", - "phsyiology", "physiology", - "phycisians", "physicians", - "phycisists", "physicists", - "phyiscally", "physically", - "phyisology", "physiology", - "physcially", "physically", - "physcology", "psychology", - "physcopath", "psychopath", - "physicials", "physicians", - "physiciens", "physicians", - "physioligy", "physiology", - "picthforks", "pitchforks", - "pinoneered", "pioneered", - "pitchferks", "pitchforks", - "pitchfolks", "pitchforks", - "pitchfords", "pitchforks", - "pitchworks", "pitchforks", - "pitckforks", "pitchforks", - "pittaburgh", "pittsburgh", - "pittsbrugh", "pittsburgh", - "placehoder", "placeholder", - "placeholdr", "placeholder", - "placeholer", "placeholder", - "placemenet", "placements", - "plagairism", "plagiarism", - "plagarisim", "plagiarism", - "plagiariam", "plagiarism", - "plagiarios", "plagiarism", - "plagiarius", "plagiarism", - "plagiarizm", "plagiarism", - "plagierism", "plagiarism", - "plaguarism", "plagiarism", - "plaigarism", "plagiarism", - "plasticosa", "plastics", - "platfarmer", "platformer", - "platformar", "platformer", - "platformie", "platformer", - "platfotmer", "platformer", - "platfromer", "platformer", - "platofrmer", "platformer", - "playaround", "playground", - "playersare", "playerbase", - "playgorund", "playground", - "playthrogh", "playthrough", - "playthrouh", "playthrough", - "playwrites", "playwrights", - "plethorian", "plethora", - "policitian", "politician", - "polinators", "pollinators", - "polishuset", "polishes", - "politessen", "politeness", - "politicain", "politician", - "politicaly", "politically", - "politicien", "politician", - "politicing", "politician", - "politicion", "politician", - "politickin", "politician", - "politiikan", "politician", - "politiness", "politeness", - "polititian", "politician", - "popualtion", "populations", - "populairty", "popularity", - "populaiton", "populations", - "popularaty", "popularity", - "popularest", "populate", - "popularily", "popularity", - "populaties", "populate", - "populatiry", "popularity", - "populative", "populate", - "populatoin", "populations", - "popultaion", "populations", - "pormetheus", "prometheus", - "pornograhy", "pornography", - "pornograpy", "pornography", - "pornogrphy", "pornography", - "porportion", "proportion", - "portabilty", "portability", - "portarying", "portraying", - "portoguese", "portuguese", - "portraiing", "portraying", - "portrating", "portraying", - "portrayels", "portrays", - "portugeuse", "portuguese", - "portuguise", "portuguese", - "posessions", "possessions", - "posicional", "positional", - "positevely", "positively", - "positioing", "positioning", - "positionly", "positional", - "positionne", "positioned", - "positivley", "positively", - "possesives", "possessive", - "possessers", "possesses", - "possessess", "possesses", - "possibiliy", "possibility", - "possibilty", "possibility", - "possissive", "possessive", - "posthomous", "posthumous", - "potentialy", "potentially", - "poulations", "populations", - "powerhorse", "powerhouse", - "powerhosue", "powerhouse", - "powerhours", "powerhouse", - "powerhsell", "powershell", - "powerprint", "powerpoint", - "powersehll", "powershell", - "ppublisher", "publisher", - "practially", "practically", - "practicaly", "practically", - "practicess", "practise", - "practiclly", "practically", - "practioner", "practitioner", - "precaucion", "precaution", - "precausion", "precaution", - "precautios", "precautions", - "precedance", "precedence", - "precedense", "precedence", - "preceeding", "preceding", - "precendece", "precedence", - "precentage", "percentage", - "precentile", "percentile", - "preciselly", "precisely", - "precuation", "precautions", - "precussion", "percussion", - "predecated", "predicated", - "predecence", "precedence", - "predecesor", "predecessor", - "predection", "prediction", - "predective", "predictive", - "prediccion", "prediction", - "prediceted", "predicated", - "predicited", "predicated", - "predicitng", "predicting", - "prediciton", "prediction", - "predicitve", "predictive", - "predickted", "predicated", - "predictave", "predictive", - "predictivo", "prediction", - "predictons", "predictions", - "predjuiced", "prejudiced", - "predjuices", "prejudices", - "preduction", "prediction", - "preductive", "predictive", - "predujiced", "prejudiced", - "predujices", "prejudices", - "prefarable", "preferable", - "prefarably", "preferably", - "prefection", "perfection", - "preferance", "preference", - "prefereble", "preferable", - "preferente", "preference", - "preferenze", "preference", - "preferible", "preferable", - "preferibly", "preferably", - "prefernece", "preferences", - "preformers", "performers", - "pregancies", "pregnancies", - "pregnanies", "pregnancies", - "preipheral", "peripheral", - "preisdents", "presidents", - "preisthood", "priesthood", - "prejeduced", "prejudiced", - "prejeduces", "prejudices", - "prejiduced", "prejudiced", - "prejiduces", "prejudices", - "prejucided", "prejudiced", - "prejucides", "prejudices", - "prejuduced", "prejudiced", - "prejuduces", "prejudices", - "prelimiary", "preliminary", - "prematurly", "prematurely", - "preminence", "preeminence", - "premission", "permission", - "preorderes", "preorders", - "prepartion", "preparation", - "prepetuate", "perpetuate", - "preposters", "preposterous", - "prescients", "presidents", - "prescirbed", "prescribed", - "prescriped", "prescribed", - "presearing", "preserving", - "presecuted", "persecuted", - "presedency", "presidency", - "presedents", "presidents", - "presenning", "presenting", - "presentase", "presents", - "presentato", "presentation", - "presention", "presenting", - "presentors", "presents", - "preservare", "preserve", - "preservato", "preservation", - "preserverd", "preserved", - "presidancy", "presidency", - "presidante", "presidents", - "presidenta", "presidential", - "presidenty", "presidency", - "presidunce", "presidency", - "presistent", "persistent", - "presonally", "personally", - "presonhood", "personhood", - "pressuming", "pressuring", - "prestigios", "prestigious", - "prestigous", "prestigious", - "presuambly", "presumably", - "presuasion", "persuasion", - "presuasive", "persuasive", - "presumebly", "presumably", - "presumendo", "presumed", - "presumibly", "presumably", - "presumpton", "presumption", - "pretaining", "pertaining", - "pretection", "protection", - "pretendias", "pretends", - "pretensive", "pretense", - "pretentios", "pretentious", - "pretentous", "pretentious", - "prevalecen", "prevalence", - "prevalente", "prevalence", - "prevencion", "prevention", - "preventivo", "prevention", - "preventors", "prevents", - "previaling", "prevailing", - "previosuly", "previously", - "previoulsy", "previously", - "prevolence", "prevalence", - "pricinpals", "principals", - "primarilly", "primarily", - "primatives", "primitives", - "princepals", "principals", - "princesess", "princesses", - "princibles", "principles", - "principaly", "principality", - "principels", "principals", - "principial", "principal", - "principias", "principals", - "principlas", "principals", - "prinicipal", "principal", - "prinicpals", "principals", - "prinicples", "principles", - "printerest", "printers", - "prioratize", "prioritize", - "prioretize", "prioritize", - "prioritice", "prioritize", - "prioritied", "prioritize", - "prioroties", "priorities", - "priorotize", "prioritize", - "priotities", "priorities", - "priotitize", "prioritize", - "privaleged", "privileged", - "privaleges", "privileges", - "privaticed", "privatized", - "privelaged", "privileged", - "privelages", "privileges", - "priveldges", "privileges", - "priveleged", "privileged", - "priveleges", "privileges", - "privelidge", "privileged", - "priveliged", "privileged", - "priveliges", "privileges", - "privetized", "privatized", - "privilaged", "privileged", - "privilages", "privileges", - "priviledge", "privilege", - "privilegde", "privileges", - "privilegie", "privilege", - "priviliged", "privileged", - "priviliges", "privileges", - "privitazed", "privatized", - "privitized", "privatized", - "probabiliy", "probability", - "probabilty", "probability", - "probablies", "probable", - "probablybe", "probable", - "problemita", "problematic", - "procalimed", "proclaimed", - "procceding", "proceeding", - "procedding", "proceeding", - "procederal", "procedural", - "procedings", "proceedings", - "procedrual", "procedural", - "proceededs", "proceeds", - "proceedure", "procedure", - "proceesing", "proceeding", - "processsor", "processors", - "proclamied", "proclaimed", - "proclaming", "proclaiming", - "procliamed", "proclaimed", - "procreatin", "procreation", - "procudures", "procedures", - "prodcution", "production", - "prodecural", "procedural", - "prodecures", "procedures", - "produccion", "production", - "produceras", "produces", - "produceres", "produces", - "producirse", "producers", - "produciton", "production", - "producting", "production", - "productino", "productions", - "productivo", "production", - "productivy", "productivity", - "productoin", "productions", - "produktion", "production", - "produktive", "productive", - "produtcion", "productions", - "profesions", "profession", - "professers", "professors", - "professorn", "profession", - "professsor", "professors", - "proffesion", "profession", - "proficeint", "proficient", - "proficiant", "proficient", - "proficieny", "proficiency", - "proficincy", "proficiency", - "profitabel", "profitable", - "profitabil", "profitable", - "profitible", "profitable", - "proftiable", "profitable", - "programmar", "programmer", - "programmme", "programme", - "progresing", "progressing", - "progresion", "progression", - "progresive", "progressive", - "progressie", "progressives", - "progressin", "progression", - "progresson", "progression", - "progressos", "progresses", - "progressus", "progresses", - "prohibirte", "prohibit", - "prohibites", "prohibits", - "prohibitng", "prohibiting", - "prohibiton", "prohibition", - "prohibitus", "prohibits", - "prohibitve", "prohibited", - "prohobited", "prohibited", - "prohpecies", "prophecies", - "projecitle", "projectiles", - "projectiel", "projectiles", - "projecties", "projectiles", - "projectils", "projectiles", - "projectles", "projectiles", - "projectlie", "projectiles", - "projectyle", "projectile", - "projektile", "projectile", - "projektion", "projection", - "prometheas", "prometheus", - "promethese", "prometheus", - "promethius", "prometheus", - "promethous", "prometheus", - "promethues", "prometheus", - "prominance", "prominence", - "prominenty", "prominently", - "prominetly", "prominently", - "promiscous", "promiscuous", - "promiscuos", "promiscuous", - "promoteurs", "promotes", - "promotheus", "prometheus", - "promotinal", "promotional", - "pronoucned", "pronounced", - "pronouning", "pronouncing", - "propechies", "prophecies", - "propencity", "propensity", - "propenents", "proponents", - "properites", "properties", - "propersity", "propensity", - "propertion", "proportion", - "propertius", "properties", - "prophacies", "prophecies", - "prophocies", "prophecies", - "propietary", "proprietary", - "proplusion", "propulsion", - "propoganda", "propaganda", - "propogates", "propagates", - "propolsion", "propulsion", - "proponants", "proponents", - "proponenet", "proponent", - "proporcion", "proportion", - "proporties", "properties", - "proporting", "proportion", - "propositon", "proposition", - "propotions", "proportions", - "proprietry", "proprietary", - "proprotion", "proportion", - "propserity", "prosperity", - "propserous", "prosperous", - "propulaios", "propulsion", - "propulsing", "propulsion", - "propultion", "propulsion", - "propuslion", "propulsion", - "prosectued", "prosecuted", - "prosectuor", "prosecutor", - "prosecuter", "prosecutor", - "prosecutie", "prosecuted", - "prosicuted", "prosecuted", - "prosicutor", "prosecutor", - "prosocuted", "prosecuted", - "prosparity", "prosperity", - "prospectos", "prospects", - "prosperety", "prosperity", - "prospertiy", "prosperity", - "prosphetic", "prosthetic", - "prosporous", "prosperous", - "prostehtic", "prosthetic", - "prosterity", "prosperity", - "prostethic", "prosthetic", - "prostitite", "prostitute", - "prostitude", "prostitute", - "prostituee", "prostitute", - "prostituer", "prostitute", - "prostitues", "prostitutes", - "prostiture", "prostitute", - "prostituto", "prostitution", - "prostituye", "prostitute", - "protaginst", "protagonist", - "protastant", "protestant", - "proteccion", "protection", - "proteciton", "protections", - "protectice", "protective", - "protectiei", "protective", - "protectoin", "protections", - "protectons", "protectors", - "protectron", "protection", - "protestans", "protests", - "protestare", "protesters", - "protestato", "protestant", - "protestent", "protestant", - "protestina", "protestant", - "prothsetic", "prosthetic", - "protistant", "protestant", - "protocoles", "protocols", - "protocolls", "protocols", - "protocolos", "protocols", - "protohypes", "prototypes", - "protostant", "protestant", - "prototipes", "prototypes", - "prototpyes", "prototypes", - "protraying", "portraying", - "protuguese", "portuguese", - "provencial", "provincial", - "proveribal", "proverbial", - "provervial", "proverbial", - "providance", "providence", - "providince", "providence", - "provinciae", "province", - "provincies", "province", - "provincija", "provincial", - "provinence", "providence", - "provinical", "provincial", - "provintial", "provincial", - "provinvial", "provincial", - "provisiosn", "provision", - "provisonal", "provisional", - "provocatie", "provocative", - "pscyhology", "psychology", - "pscyhopath", "psychopath", - "pshycology", "psychology", - "pshycopath", "psychopath", - "psychedlic", "psychedelic", - "psychiatic", "psychiatric", - "psycholoog", "psychology", - "psychopaat", "psychopath", - "psychopats", "psychopaths", - "ptichforks", "pitchforks", - "publicitan", "publication", - "publisheed", "published", - "publisherr", "publisher", - "publishher", "publisher", - "publissher", "publisher", - "publlisher", "publisher", - "punihsment", "punishments", - "punishemnt", "punishments", - "punishible", "punishable", - "punishmnet", "punishments", - "punissable", "punishable", - "punsihable", "punishable", - "purchacing", "purchasing", - "purpolsion", "propulsion", - "purposedly", "purposely", - "purposelly", "purposely", - "purpotedly", "purportedly", - "pususading", "persuading", - "pyschology", "psychology", - "pyschopath", "psychopath", - "qaulifiers", "qualifiers", - "quailfiers", "qualifiers", - "qualfiiers", "qualifiers", - "qualifieds", "qualifies", - "qualifiies", "qualifiers", - "qualifiing", "qualifying", - "qualifires", "qualifiers", - "qualifyers", "qualifiers", - "qualitying", "qualifying", - "quanitites", "quantities", - "quantaties", "quantities", - "quantitize", "quantities", - "quarantena", "quarantine", - "quarantene", "quarantine", - "quarantied", "quarantine", - "quarintine", "quarantine", - "quaruntine", "quarantine", - "quesitoned", "questioned", - "questional", "questionable", - "questionne", "questioned", - "rabinnical", "rabbinical", - "radiactive", "radioactive", - "radioacive", "radioactive", - "rainbowers", "rainbows", - "randmoness", "randomness", - "randomzied", "randomized", - "randonmess", "randomness", - "randumness", "randomness", - "raspberrry", "raspberry", - "rationalle", "rationale", - "readmition", "readmission", - "realitvely", "relatively", - "realtively", "relatively", - "realtivity", "relativity", - "reaserched", "researched", - "reasercher", "researcher", - "rebiulding", "rebuilding", - "reboudning", "rebounding", - "rebouncing", "rebounding", - "rebuidling", "rebuilding", - "rebuliding", "rebuilding", - "rebuplican", "republican", - "reccommend", "recommend", - "recepients", "recipients", - "receptoras", "receptors", - "receptores", "receptors", - "recgonised", "recognised", - "recgonized", "recognized", - "recgonizes", "recognizes", - "reciepents", "recipients", - "recipeints", "recipients", - "recipiants", "recipients", - "recocnised", "recognised", - "recoginsed", "recognised", - "recoginzed", "recognized", - "recognices", "recognizes", - "recogniton", "recognition", - "recognzied", "recognised", - "recomended", "recommended", - "recommande", "recommend", - "recommands", "recommends", - "recommeded", "recommended", - "recommened", "recommend", - "recommennd", "recommends", - "recomments", "recommends", - "recompence", "recompense", - "reconcider", "reconsider", - "reconcille", "reconcile", - "recongised", "recognised", - "recongized", "recognized", - "recongizes", "recognizes", - "reconisder", "reconsider", - "reconsiled", "reconsider", - "recordarle", "recorder", - "recordarme", "recorder", - "recordarse", "recorder", - "recordarte", "recorder", - "recreacion", "recreation", - "recreatief", "recreate", - "recreativo", "recreation", - "recrutiers", "recruiters", - "rectanglar", "rectangular", - "rectangual", "rectangular", - "rectanguar", "rectangular", - "recuriters", "recruiters", - "recurrance", "recurrence", - "recursivly", "recursively", - "redefinied", "redefine", - "redefinine", "redefine", - "redemtpion", "redemption", - "redepmtion", "redemption", - "redesiging", "redesign", - "rediculous", "ridiculous", - "redmeption", "redemption", - "redneckers", "rednecks", - "redneckese", "rednecks", - "redneckest", "rednecks", - "reduncancy", "redundancy", - "redundency", "redundancy", - "redundnacy", "redundancy", - "redunduncy", "redundancy", - "reenforced", "reinforced", - "reevaulate", "reevaluate", - "refedendum", "referendum", - "refelcting", "reflecting", - "refelction", "reflection", - "refelctive", "reflective", - "referances", "references", - "referandum", "referendum", - "referemces", "references", - "referemdum", "referendum", - "referendim", "referendum", - "referendom", "referendum", - "referenece", "reference", - "referening", "referencing", - "referenses", "referees", - "referentes", "references", - "referneces", "references", - "referrence", "reference", - "referundum", "referendum", - "refference", "reference", - "refleciton", "reflections", - "reflecters", "reflects", - "reflektion", "reflection", - "reflextion", "reflection", - "reformerad", "reformed", - "refrigerar", "refrigerator", - "refurbised", "refurbished", - "regenarate", "regenerate", - "registeres", "registers", - "registrato", "registration", - "regresives", "regressive", - "regressivo", "regression", - "regualting", "regulating", - "regualtion", "regulations", - "regualtors", "regulators", - "regulacion", "regulation", - "regulament", "regulate", - "regulaotrs", "regulators", - "regularily", "regularly", - "regularing", "regulating", - "regularlas", "regulars", - "regularlos", "regulars", - "regulaters", "regulators", - "regulatios", "regulators", - "regulatons", "regulations", - "rehtorical", "rhetorical", - "reinstaled", "reinstalled", - "reitrement", "retirement", - "relagation", "relaxation", - "relatation", "relaxation", - "relativety", "relativity", - "relativily", "relativity", - "relativley", "relatively", - "relavation", "relaxation", - "relaxating", "relaxation", - "relazation", "relaxation", - "releagtion", "relegation", - "relegetion", "relegation", - "relentness", "relentless", - "reletnless", "relentless", - "relevation", "revelation", - "relexation", "relegation", - "relfecting", "reflecting", - "relfection", "reflection", - "relfective", "reflective", - "reliabilty", "reliability", - "reliablely", "reliably", - "religiones", "religions", - "religiosly", "religiously", - "religiousy", "religiously", - "religously", "religiously", - "relitavely", "relatively", - "reluctanct", "reluctant", - "reluctanly", "reluctantly", - "reluctanty", "reluctantly", - "remarcably", "remarkably", - "remarkibly", "remarkably", - "rememberes", "remembers", - "remenicent", "reminiscent", - "reminisent", "reminiscent", - "reminscent", "reminiscent", - "remmebered", "remembered", - "renaissace", "renaissance", - "renderered", "rendered", - "renegerate", "regenerate", - "renewabels", "renewables", - "renewebles", "renewables", - "rennovated", "renovated", - "renweables", "renewables", - "repatition", "repetition", - "repblicans", "republicans", - "repbulican", "republican", - "repeadedly", "repeatedly", - "repeadetly", "repeatedly", - "repearable", "repeatable", - "repearedly", "repealed", - "repeatadly", "repeatedly", - "repeatedlt", "repealed", - "repeatetly", "repeatedly", - "repeatible", "repeatable", - "repeatidly", "repeatedly", - "repectable", "repeatable", - "repentable", "repeatable", - "repentence", "repentance", - "repersents", "represents", - "repetation", "repetition", - "repeteadly", "repeatedly", - "repetetion", "repetition", - "repeticion", "repetition", - "repetitivo", "repetition", - "replacated", "replicated", - "replaceble", "replaceable", - "replacemet", "replacements", - "replacemnt", "replacement", - "replacemtn", "replacements", - "replecated", "replicated", - "repoistory", "repository", - "reponsible", "responsible", - "reportadly", "reportedly", - "reporteros", "reporters", - "reportidly", "reportedly", - "repositary", "repository", - "reposotory", "repository", - "repostiory", "repository", - "representn", "representing", - "repressent", "represents", - "repressivo", "repression", - "repsectful", "respectful", - "repsecting", "respecting", - "repsective", "respective", - "repsonding", "responding", - "repsonsive", "responsive", - "reptuation", "reputation", - "repubicans", "republicans", - "republcian", "republican", - "republians", "republicans", - "republicon", "republican", - "repuglican", "republican", - "repulicans", "republicans", - "reputacion", "reputation", - "requirment", "requirement", - "requrement", "requirement", - "resemblace", "resemble", - "reserached", "researched", - "reseracher", "researchers", - "reserverad", "reserved", - "reservered", "reserved", - "residental", "residential", - "resistable", "resistible", - "resistanes", "resistances", - "resistanse", "resistances", - "resistence", "resistance", - "resistendo", "resisted", - "resistered", "resisted", - "resistnace", "resistances", - "resitsance", "resistances", - "resoltuion", "resolutions", - "resolucion", "resolution", - "resolutino", "resolutions", - "resolutoin", "resolutions", - "resolutons", "resolutions", - "resolvemos", "resolves", - "resolvendo", "resolved", - "resolveres", "resolves", - "resolverse", "resolves", - "resolviste", "resolves", - "resonabelt", "resonate", - "resoultion", "resolution", - "respecitve", "respective", - "respectifs", "respects", - "respection", "respecting", - "respectons", "respects", - "respectuos", "respects", - "respektive", "respective", - "respiratoy", "respiratory", - "responcive", "responsive", - "responisve", "responsive", - "responsibe", "responsive", - "responsiby", "responsibly", - "responsile", "responsive", - "responsing", "responding", - "ressembled", "resembled", - "restarants", "restaurants", - "restaraunt", "restaurant", - "restaruant", "restaurant", - "restatting", "restarting", - "restaurent", "restaurant", - "restauring", "restarting", - "resteraunt", "restaurant", - "restircted", "restricted", - "restorting", "restarting", - "restrainig", "restraining", - "restrcited", "restricted", - "restrcting", "restarting", - "restricing", "restricting", - "restricion", "restriction", - "restricive", "restrictive", - "restrictes", "restricts", - "restrictie", "restrictive", - "restricton", "restriction", - "restructed", "restricted", - "restuarant", "restaurant", - "resturants", "restaurants", - "resturaunt", "restaurant", - "retaliaton", "retaliation", - "rethorical", "rhetorical", - "retierment", "retirement", - "retribuito", "retribution", - "retrosepct", "retrospect", - "retrospekt", "retrospect", - "revaluated", "reevaluated", - "revealtion", "revelations", - "revelaiton", "revelations", - "revelatons", "revelations", - "revelution", "revelation", - "reversable", "reversible", - "reversably", "reversal", - "reviewtrue", "reviewer", - "revisiones", "revisions", - "revisionis", "revisions", - "revoltuion", "revolution", - "revoluiton", "revolutions", - "revolutoin", "revolutions", - "revoultion", "revolution", - "rewarching", "rewatching", - "rewatchibg", "rewatching", - "rewatchign", "rewatching", - "rewatchimg", "rewatching", - "rhapsodomy", "rhapsody", - "rhetorisch", "rhetoric", - "ridicilous", "ridiculous", - "ridicoulus", "ridiculous", - "ridiculise", "ridicule", - "ridiculize", "ridicule", - "ridiculled", "ridicule", - "ridiculose", "ridicule", - "ridiculued", "ridicule", - "rienforced", "reinforced", - "rigthfully", "rightfully", - "roleplaing", "roleplaying", - "romanmania", "romanian", - "roundaboot", "roundabout", - "rucuperate", "recuperate", - "rudimentry", "rudimentary", - "sacarmento", "sacramento", - "sacntioned", "sanctioned", - "sacraficed", "sacrificed", - "sacrafices", "sacrifices", - "sacramenno", "sacramento", - "sacreficed", "sacrificed", - "sacrefices", "sacrifices", - "sacremento", "sacramento", - "sacrifaced", "sacrificed", - "sacrifaces", "sacrifices", - "sacrifical", "sacrificial", - "sacrificas", "sacrifices", - "sacrificie", "sacrificed", - "sacrificng", "sacrificing", - "sacrifises", "sacrifices", - "sacrifized", "sacrificed", - "sacrifizes", "sacrifices", - "sacromento", "sacramento", - "sadistisch", "sadistic", - "sanctionne", "sanctioned", - "sandiwches", "sandwiches", - "sandviches", "sandwiches", - "sandwishes", "sandwiches", - "sanitazion", "sanitation", - "santiation", "sanitation", - "sastifying", "satisfying", - "satellitte", "satellites", - "satifsying", "satisfying", - "satrically", "satirically", - "satsifying", "satisfying", - "sattelites", "satellites", - "saturacion", "saturation", - "scandalosa", "scandals", - "scandalose", "scandals", - "scandalosi", "scandals", - "scandaloso", "scandals", - "scandaniva", "scandinavia", - "scandinava", "scandinavian", - "scandinvia", "scandinavia", - "scaramento", "sacramento", - "scarificed", "sacrificed", - "scarifices", "sacrifices", - "scarmbling", "scrambling", - "scartching", "scratching", - "sceintific", "scientific", - "sceintists", "scientists", - "scenarioes", "scenarios", - "scenarions", "scenarios", - "scenarious", "scenarios", - "scheudling", "scheduling", - "scholarhip", "scholarship", - "scholarley", "scholarly", - "sciencists", "scientists", - "scientests", "scientists", - "scirptures", "scriptures", - "scooterers", "scooters", - "scorebaord", "scoreboard", - "scoreborad", "scoreboard", - "scorebored", "scoreboard", - "scorpiomon", "scorpion", - "scracthing", "scratching", - "scramblies", "scramble", - "screenshat", "screenshot", - "screenshit", "screenshot", - "scriptores", "scriptures", - "scripturae", "scriptures", - "scriputres", "scriptures", - "scritpures", "scriptures", - "scrutinity", "scrutiny", - "seahawkers", "seahawks", - "sebastiaan", "sebastian", - "segegrated", "segregated", - "segragated", "segregated", - "segregaded", "segregated", - "segregatie", "segregated", - "segretated", "segregated", - "segrigated", "segregated", - "selectiose", "selections", - "selectivly", "selectively", - "selectivos", "selections", - "selfishess", "selfishness", - "senitments", "sentiments", - "sensitiviy", "sensitivity", - "sensitivty", "sensitivity", - "sentaments", "sentiments", - "sentancing", "sentencing", - "sentements", "sentiments", - "sentencian", "sentencing", - "sentensing", "sentencing", - "sentimenal", "sentimental", - "sentimetal", "sentimental", - "sentincing", "sentencing", - "sentinents", "sentiments", - "separacion", "separation", - "separaters", "separates", - "separatley", "separately", - "separatron", "separation", - "separetely", "separately", - "seperately", "separately", - "seperating", "separating", - "seperation", "separation", - "seperatism", "separatism", - "seperatist", "separatist", - "seperatley", "seperate", - "sepulchure", "sepulchre", - "serenitary", "serenity", - "serviceble", "serviceable", - "settelment", "settlement", - "settlemens", "settlements", - "settlemets", "settlements", - "settlemnts", "settlements", - "seuxalized", "sexualized", - "seventeeen", "seventeen", - "sexaulized", "sexualized", - "sexualixed", "sexualized", - "sexuallity", "sexually", - "sexualzied", "sexualized", - "sexulaized", "sexualized", - "shakespare", "shakespeare", - "shakespeer", "shakespeare", - "shakespere", "shakespeare", - "shamelesly", "shamelessly", - "shamelessy", "shamelessly", - "shaprening", "sharpening", - "shareholds", "shareholders", - "sharkening", "sharpening", - "sharpining", "sharpening", - "shartening", "sharpening", - "shatnering", "shattering", - "shattening", "shattering", - "shepharded", "shepherd", - "shilouette", "silhouette", - "shitlasses", "shitless", - "shortenend", "shortened", - "shortining", "shortening", - "sidelinien", "sideline", - "sidelinjen", "sideline", - "sidelinked", "sideline", - "sigantures", "signatures", - "sightstine", "sightstone", - "signficant", "significant", - "signifiant", "significant", - "significat", "significant", - "signitures", "signatures", - "sigthstone", "sightstone", - "sihlouette", "silhouette", - "silohuette", "silhouette", - "silouhette", "silhouette", - "similairty", "similarity", - "similarily", "similarly", - "similarlly", "similarly", - "similiarly", "similarly", - "similiarty", "similarity", - "simliarity", "similarity", - "simluation", "simulation", - "simplictic", "simplistic", - "simplifing", "simplifying", - "simplifyed", "simplified", - "simplifyng", "simplifying", - "simplisitc", "simplistic", - "simplisity", "simplicity", - "simplistes", "simplest", - "simplivity", "simplicity", - "simplyfied", "simplified", - "simualtion", "simulation", - "simulacion", "simulation", - "simulaiton", "simulations", - "simulaties", "simulate", - "simulative", "simulate", - "simulatons", "simulations", - "simulatore", "simulate", - "sincereley", "sincerely", - "sincerelly", "sincerely", - "singatures", "signatures", - "singulaire", "singular", - "singulariy", "singularity", - "singularty", "singularity", - "singulator", "singular", - "sitautions", "situations", - "situatinal", "situational", - "skatebaord", "skateboard", - "skateborad", "skateboard", - "skatebored", "skateboard", - "skatebrand", "skateboard", - "skeletones", "skeletons", - "skeptecism", "skepticism", - "skepticals", "skeptics", - "skepticles", "skeptics", - "skepticons", "skeptics", - "skeptisicm", "skepticism", - "skeptisism", "skepticism", - "sketchysex", "sketches", - "sketpicism", "skepticism", - "skillhosts", "skillshots", - "skillshits", "skillshots", - "skillshoot", "skillshots", - "skillslots", "skillshots", - "skillsofts", "skillshots", - "skillsshot", "skillshots", - "skirmiches", "skirmish", - "skpeticism", "skepticism", - "slaughterd", "slaughtered", - "slipperies", "slippers", - "smarpthone", "smartphones", - "smarthpone", "smartphone", - "snadwiches", "sandwiches", - "snowbaling", "snowballing", - "snowballes", "snowballs", - "snowballls", "snowballs", - "socailists", "socialists", - "socailized", "socialized", - "socialisim", "socialism", - "socializng", "socializing", - "socialsits", "socialists", - "sociapaths", "sociopaths", - "socilaists", "socialists", - "socilaized", "socialized", - "sociologia", "sociological", - "sociopatas", "sociopaths", - "sociopatch", "sociopaths", - "sociopatic", "sociopathic", - "socratease", "socrates", - "socreboard", "scoreboard", - "soemthings", "somethings", - "soldiarity", "solidarity", - "solidairty", "solidarity", - "soliditary", "solidarity", - "solitudine", "solitude", - "somehtings", "somethings", - "someonelse", "someones", - "somethibng", "somethin", - "somethigng", "somethin", - "somethigns", "somethings", - "somethihng", "somethin", - "somethiing", "somethin", - "somethijng", "somethin", - "somethikng", "somethin", - "somethimng", "somethin", - "somethinbg", "somethings", - "somethines", "somethings", - "somethinfg", "somethings", - "somethinhg", "somethings", - "somethinig", "somethings", - "somethinkg", "somethings", - "somethinks", "somethings", - "somethinmg", "somethings", - "somethinng", "somethings", - "somethintg", "somethings", - "somethiong", "somethin", - "somethiung", "somethin", - "sophicated", "sophisticated", - "sotrmfront", "stormfront", - "sotrylines", "storylines", - "soudntrack", "soundtrack", - "soundrtack", "soundtracks", - "soundtracs", "soundtracks", - "soundtrakc", "soundtracks", - "soundtrakk", "soundtrack", - "soundtraks", "soundtracks", - "southampon", "southampton", - "southamton", "southampton", - "southerers", "southerners", - "southernes", "southerners", - "southerton", "southern", - "souveniers", "souvenirs", - "sovereigny", "sovereignty", - "sovereinty", "sovereignty", - "soverignty", "sovereignty", - "spartaniis", "spartans", - "spartanops", "spartans", - "specailist", "specialist", - "specailize", "specializes", - "specialice", "specialize", - "specialied", "specialized", - "specialies", "specializes", - "specialits", "specials", - "speciallly", "specially", - "speciallty", "specially", - "specialops", "specials", - "specialsts", "specialists", - "specialtys", "specials", - "specialzed", "specialized", - "specialzes", "specializes", - "specifices", "specifics", - "specifiing", "specifying", - "specifiyng", "specifying", - "speciliast", "specialists", - "specimines", "specimen", - "spectarors", "spectators", - "spectaters", "spectators", - "spectracal", "spectral", - "spectraply", "spectral", - "spectrolab", "spectral", - "speculatie", "speculative", - "speculatin", "speculation", - "speecheasy", "speeches", - "speicalist", "specialist", - "spiritualy", "spiritually", - "sponsorees", "sponsors", - "sponsorhip", "sponsorship", - "sponsorise", "sponsors", - "spontaneos", "spontaneous", - "spontaneus", "spontaneous", - "spontanous", "spontaneous", - "spoonfulls", "spoonfuls", - "spreadshet", "spreadsheet", - "springfeld", "springfield", - "springfied", "springfield", - "spriritual", "spiritual", - "squirrells", "squirrels", - "squirrelus", "squirrels", - "stabelized", "stabilized", - "stabilzied", "stabilized", - "stablility", "stability", - "stablizied", "stabilized", - "staggaring", "staggering", - "stakeboard", "skateboard", - "starighten", "straighten", - "starnation", "starvation", - "startegies", "strategies", - "startupbus", "startups", - "starwberry", "strawberry", - "statememts", "statements", - "statictics", "statistics", - "stationair", "stationary", - "statisitcs", "statistics", - "statistcal", "statistical", - "statistisk", "statistics", - "stauration", "saturation", - "stealthboy", "stealthy", - "stealthely", "stealthy", - "stealthify", "stealthy", - "stealthray", "stealthy", - "steeleries", "steelers", - "stereotipe", "stereotype", - "stereotpye", "stereotypes", - "steriotype", "stereotype", - "steroetype", "stereotype", - "sterotypes", "stereotypes", - "steryotype", "stereotype", - "stimilants", "stimulants", - "stimilated", "stimulated", - "stimualted", "stimulated", - "stimulatie", "stimulated", - "stimulatin", "stimulation", - "stimulaton", "stimulation", - "stimulents", "stimulants", - "stomrfront", "stormfront", - "storelines", "storylines", - "stormfornt", "stormfront", - "stormfromt", "stormfront", - "stornfront", "stormfront", - "stornghold", "stronghold", - "stradegies", "strategies", - "strageties", "strategies", - "straighted", "straightened", - "straightie", "straighten", - "straightin", "straighten", - "straigthen", "straighten", - "stranglove", "strangle", - "strangreal", "strangle", - "stratagies", "strategies", - "strategems", "strategies", - "strategice", "strategies", - "strategisk", "strategies", - "stravation", "starvation", - "strawbarry", "strawberry", - "strawbeary", "strawberry", - "strawbeery", "strawberry", - "strawbrary", "strawberry", - "strawburry", "strawberry", - "streaching", "stretching", - "streamtrue", "streamer", - "strechting", "stretching", - "strecthing", "stretching", - "stregnthen", "strengthen", - "streichung", "stretching", - "strenghten", "strengthen", - "strengsten", "strengthen", - "strengthes", "strengths", - "strengthin", "strengthen", - "stressende", "stressed", - "striaghten", "straighten", - "stromfront", "stormfront", - "stronkhold", "stronghold", - "stroylines", "storylines", - "structered", "structured", - "structrual", "structural", - "structurel", "structural", - "strucutral", "structural", - "strucutred", "structured", - "strucutres", "structures", - "strugglign", "struggling", - "strwaberry", "strawberry", - "sttutering", "stuttering", - "stupidfree", "stupider", - "stupiditiy", "stupidity", - "sturctural", "structural", - "sturctures", "structures", - "sturggling", "struggling", - "subarmines", "submarines", - "subcultuur", "subculture", - "subesquent", "subsequent", - "subisdized", "subsidized", - "subjectief", "subjective", - "subjectifs", "subjects", - "subjectivy", "subjectively", - "subjektive", "subjective", - "submariens", "submarines", - "submarinas", "submarines", - "submergerd", "submerged", - "submerines", "submarines", - "submisison", "submissions", - "submissies", "submissive", - "submissons", "submissions", - "submittion", "submitting", - "subsadized", "subsidized", - "subscirbed", "subscribed", - "subscirber", "subscribers", - "subscribar", "subscriber", - "subscribir", "subscriber", - "subscrible", "subscriber", - "subscriped", "subscribed", - "subscrubed", "subscribed", - "subscryber", "subscriber", - "subsedized", "subsidized", - "subsequant", "subsequent", - "subsidezed", "subsidized", - "subsidiced", "subsidized", - "subsidizng", "subsidizing", - "subsiduary", "subsidiary", - "subsiquent", "subsequent", - "subsittute", "substitutes", - "subsizided", "subsidized", - "subsrcibed", "subscribed", - "substanial", "substantial", - "substansen", "substances", - "substanser", "substances", - "substanses", "substances", - "substantie", "substantive", - "substatial", "substantial", - "substences", "substances", - "substitite", "substitute", - "substittue", "substitutes", - "substitude", "substitute", - "substitued", "substitute", - "substituer", "substitute", - "substitues", "substitutes", - "substiture", "substitute", - "substituto", "substitution", - "substituts", "substitutes", - "substracts", "subtracts", - "substutite", "substitutes", - "subsudized", "subsidized", - "subtitltes", "subtitle", - "succceeded", "succeeded", - "succcesses", "successes", - "succesfuly", "successfully", - "succesions", "succession", - "successing", "succession", - "successivo", "succession", - "sucesfully", "successfully", - "sucessfull", "successful", - "sucessfuly", "successfully", - "sudnerland", "sunderland", - "sufferered", "suffered", - "sufferring", "suffering", - "sufficiant", "sufficient", - "suggestied", "suggestive", - "suggestief", "suggestive", - "suggestons", "suggests", - "sumbarines", "submarines", - "sumbissive", "submissive", - "sumbitting", "submitting", - "summerized", "summarized", - "summorized", "summarized", - "summurized", "summarized", - "sunderlona", "sunderland", - "sunderlund", "sunderland", - "sungalsses", "sunglasses", - "sunglesses", "sunglasses", - "sunglinger", "gunslinger", - "sunscreeen", "sunscreen", - "superfical", "superficial", - "superfluos", "superfluous", - "superioara", "superior", - "superioare", "superior", - "superioris", "superiors", - "superivsor", "supervisors", - "supermaket", "supermarket", - "supermarkt", "supermarket", - "superouman", "superhuman", - "superposer", "superpowers", - "superviors", "supervisors", - "superviosr", "supervisors", - "supervisar", "supervisor", - "superviser", "supervisor", - "supervisin", "supervision", - "supervison", "supervision", - "supervsior", "supervisors", - "supperssor", "suppressor", - "supplament", "supplement", - "supplemant", "supplemental", - "supplemets", "supplements", - "supportare", "supporters", - "supporteur", "supporter", - "supportied", "supported", - "supportors", "supporters", - "supposdely", "supposedly", - "supposebly", "supposedly", - "supposidly", "supposedly", - "suppresion", "suppression", - "suppresors", "suppressor", - "suppressin", "suppression", - "suppressio", "suppressor", - "suppresson", "suppression", - "suprassing", "surpassing", - "supressing", "suppressing", - "supression", "suppression", - "supsension", "suspension", - "supsicions", "suspicions", - "supsicious", "suspicious", - "surounding", "surrounding", - "surplanted", "supplanted", - "surpressed", "suppressed", - "surprizing", "surprising", - "surrenderd", "surrendered", - "surrouding", "surrounding", - "surroundes", "surrounds", - "surroundig", "surroundings", - "survivours", "survivor", - "suseptable", "susceptible", - "suseptible", "susceptible", - "suspecions", "suspicions", - "suspecious", "suspicious", - "suspencion", "suspension", - "suspendeds", "suspense", - "suspention", "suspension", - "suspicians", "suspicions", - "suspiciois", "suspicions", - "suspicioso", "suspicions", - "suspicioun", "suspicion", - "suspicison", "suspicions", - "suspiciuos", "suspicions", - "suspicsion", "suspicions", - "suspisions", "suspicions", - "suspisious", "suspicious", - "suspitions", "suspicions", - "sustainble", "sustainable", - "swaetshirt", "sweatshirt", - "swearengin", "swearing", - "swearshirt", "sweatshirt", - "sweathsirt", "sweatshirt", - "sweatshits", "sweatshirt", - "sweatshort", "sweatshirt", - "sweatshrit", "sweatshirt", - "sweerheart", "sweetheart", - "sweetshart", "sweetheart", - "switcheasy", "switches", - "switzerand", "switzerland", - "symapthize", "sympathize", - "symbolisch", "symbolic", - "symbolisim", "symbolism", - "symetrical", "symmetrical", - "sympatheic", "sympathetic", - "sympathiek", "sympathize", - "sympathien", "sympathize", - "sympathtic", "sympathetic", - "sympathyze", "sympathize", - "sympethize", "sympathize", - "symphatize", "sympathize", - "symphonity", "symphony", - "sympothize", "sympathize", - "syncronous", "synchronous", - "synomymous", "synonymous", - "synomynous", "synonymous", - "synonamous", "synonymous", - "synonimous", "synonymous", - "synonmyous", "synonymous", - "synonomous", "synonymous", - "synonumous", "synonymous", - "synonynous", "synonymous", - "sypmathize", "sympathize", - "systamatic", "systematic", - "systemetic", "systematic", - "systemisch", "systemic", - "systimatic", "systematic", - "tabelspoon", "tablespoon", - "tablespons", "tablespoons", - "tablesppon", "tablespoon", - "tacitcally", "tactically", - "taiwanesse", "taiwanese", - "taligating", "tailgating", - "tantrumers", "tantrums", - "targetting", "targeting", - "teamfigths", "teamfights", - "teamifghts", "teamfights", - "teamspeack", "teamspeak", - "techicians", "technicians", - "techincian", "technician", - "techinican", "technician", - "techinques", "techniques", - "technicain", "technician", - "technicaly", "technically", - "technicans", "technicians", - "technichan", "technician", - "technicien", "technician", - "technicion", "technician", - "technitian", "technician", - "technqiues", "techniques", - "techtician", "technician", - "tehnically", "ethnically", - "telegrapgh", "telegraph", - "teleporing", "teleporting", - "televesion", "television", - "televisivo", "television", - "temafights", "teamfights", - "temerature", "temperature", - "temperatue", "temperature", - "temperment", "temperament", - "temperture", "temperature", - "templarios", "templars", - "templarius", "templars", - "temporaily", "temporarily", - "temporarly", "temporary", - "temptating", "temptation", - "temptetion", "temptation", - "tendancies", "tendencies", - "tendencias", "tendencies", - "tendencije", "tendencies", - "tendensies", "tendencies", - "tendincies", "tendencies", - "tensionors", "tensions", - "tentacreul", "tentacle", - "termanator", "terminator", - "termendous", "tremendous", - "termiantor", "terminator", - "termigator", "terminator", - "terminales", "terminals", - "terminalis", "terminals", - "terminarla", "terminal", - "terminarlo", "terminal", - "terminaron", "terminator", - "terminater", "terminator", - "terminolgy", "terminology", - "terorrists", "terrorists", - "terrerists", "terrorists", - "terrestial", "terrestrial", - "terriblely", "terribly", - "terriories", "territories", - "territoral", "territorial", - "territores", "territories", - "territoris", "territories", - "territorry", "territory", - "terrorisim", "terrorism", - "terrorsits", "terrorists", - "terrurists", "terrorists", - "testiclees", "testicles", - "testiclies", "testicle", - "testimoney", "testimony", - "thankyooou", "thankyou", - "themselfes", "themselves", - "themsevles", "themselves", - "themsleves", "themselves", - "theocracry", "theocracy", - "theologial", "theological", - "therapetic", "therapeutic", - "therepists", "therapists", - "theripists", "therapists", - "thermastat", "thermostat", - "thermistat", "thermostat", - "thermomter", "thermometer", - "theromstat", "thermostat", - "thorttling", "throttling", - "thorughout", "throughout", - "thouroghly", "thoroughly", - "threadened", "threaded", - "threatenes", "threatens", - "threatning", "threatening", - "threshhold", "threshold", - "throthling", "throttling", - "throtlling", "throttling", - "throughiut", "throughput", - "thubmnails", "thumbnails", - "thumbmails", "thumbnails", - "thunderbot", "thunderbolt", - "thunderolt", "thunderbolt", - "tighetning", "tightening", - "tightining", "tightening", - "tigthening", "tightening", - "tjpanishad", "upanishad", - "toothbruch", "toothbrush", - "toothbruth", "toothbrush", - "toothbursh", "toothbrush", - "toothrbush", "toothbrush", - "toppingest", "toppings", - "torchilght", "torchlight", - "torchlgiht", "torchlight", - "torchligth", "torchlight", - "torhclight", "torchlight", - "torrentbig", "torrenting", - "torrenters", "torrents", - "torrentors", "torrents", - "tortillera", "tortilla", - "tortillias", "tortilla", - "tortillita", "tortilla", - "tortilllas", "tortilla", - "torunament", "tournament", - "totalitara", "totalitarian", - "touchsceen", "touchscreen", - "touchscren", "touchscreen", - "touranment", "tournaments", - "tourmanent", "tournaments", - "tournamets", "tournaments", - "tournamnet", "tournament", - "tournemant", "tournament", - "tournement", "tournament", - "toxicitity", "toxicity", - "trafficing", "trafficking", - "trainwreak", "trainwreck", - "traitorise", "traitors", - "tramboline", "trampoline", - "tramploine", "trampoline", - "trampolene", "trampoline", - "tranformed", "transformed", - "tranistion", "transition", - "tranlsated", "translated", - "transalted", "translated", - "transaltes", "translates", - "transaltor", "translator", - "transation", "transition", - "transciprt", "transcripts", - "transcirpt", "transcripts", - "transcrips", "transcripts", - "transcrito", "transcript", - "transcrits", "transcripts", - "transcrpit", "transcript", - "transfered", "transferred", - "transferer", "transferred", - "transferes", "transfers", - "transferrs", "transfers", - "transferts", "transfers", - "transfomed", "transformed", - "transfored", "transformed", - "transforme", "transfer", - "transfroms", "transforms", - "transgeder", "transgender", - "transgener", "transgender", - "transicion", "transition", - "transision", "transition", - "transister", "transistor", - "transitons", "transitions", - "transitors", "transistor", - "transkript", "transcript", - "translater", "translator", - "translatin", "translations", - "translatio", "translator", - "translpant", "transplants", - "transluent", "translucent", - "transmited", "transmitted", - "transmiter", "transmitter", - "transmitor", "transistor", - "transmorgs", "transforms", - "transpalnt", "transplants", - "transphoic", "transphobic", - "transplain", "transplant", - "transplate", "transplant", - "transplats", "transplants", - "transpoder", "transported", - "transportr", "transporter", - "transsexal", "transsexual", - "transtator", "translator", - "tranzistor", "transistor", - "trasncript", "transcript", - "trasnforms", "transforms", - "trasnlated", "translated", - "trasnlator", "translator", - "trasnplant", "transplant", - "traveleres", "travelers", - "travelodge", "traveled", - "traverlers", "traverse", - "traversare", "traverse", - "traversier", "traverse", - "treasurery", "treasury", - "trememdous", "tremendous", - "tremondous", "tremendous", - "trespasing", "trespassing", - "trianwreck", "trainwreck", - "trochlight", "torchlight", - "trustworhy", "trustworthy", - "trustworty", "trustworthy", - "trustwothy", "trustworthy", - "tryannical", "tyrannical", - "tunraround", "turnaround", - "tupparware", "tupperware", - "turnapound", "turnaround", - "turthfully", "truthfully", - "tutoriales", "tutorials", - "tyrantical", "tyrannical", - "ubiqituous", "ubiquitous", - "ubiquotous", "ubiquitous", - "ubiqutious", "ubiquitous", - "ukrainains", "ukrainians", - "ukraineans", "ukrainians", - "ukrainiens", "ukrainians", - "ukraininas", "ukrainians", - "ukrianians", "ukrainians", - "ulitmately", "ultimately", - "ulterioara", "ulterior", - "ulterioare", "ulterior", - "ultimative", "ultimate", - "ultimatley", "ultimately", - "ultimatuum", "ultimatum", - "unanwsered", "unanswered", - "unasnwered", "unanswered", - "unattanded", "unattended", - "unattented", "unattended", - "unavailabe", "unavailable", - "unavailble", "unavailable", - "unavoidble", "unavoidable", - "unawnsered", "unanswered", - "unbalenced", "unbalanced", - "unballance", "unbalance", - "unbalnaced", "unbalanced", - "unbareable", "unbearable", - "unbeakable", "unbeatable", - "unbeareble", "unbearable", - "unbeatbale", "unbeatable", - "unbeateble", "unbeatable", - "unbeerable", "unbearable", - "unbeetable", "unbeatable", - "unbeknowst", "unbeknownst", - "unbreakble", "unbreakable", - "uncencored", "uncensored", - "uncensered", "uncensored", - "uncersored", "uncensored", - "uncertainy", "uncertainty", - "uncertanty", "uncertainty", - "uncesnored", "uncensored", - "uncomitted", "uncommitted", - "uncommited", "uncommitted", - "unconcious", "unconscious", - "unconscous", "unconscious", - "undebiably", "undeniably", - "undeinable", "undeniable", - "undeinably", "undeniably", - "undenaible", "undeniable", - "undenaibly", "undeniably", - "undenyable", "undeniable", - "undenyably", "undeniably", - "underbaker", "undertaker", - "undercling", "underlying", - "underfaker", "undertaker", - "undergated", "underrated", - "undergrand", "undergrad", - "undergroud", "underground", - "undergrund", "underground", - "undermimes", "undermines", - "underminde", "undermines", - "underminig", "undermining", - "underneeth", "underneath", - "underneith", "underneath", - "undernieth", "underneath", - "underpowed", "underpowered", - "underraged", "underrated", - "underraker", "undertaker", - "underrater", "undertaker", - "undersatnd", "understands", - "understadn", "understands", - "understans", "understands", - "understnad", "understands", - "understoon", "understood", - "understsnd", "understands", - "undertoker", "undertaker", - "undertsand", "understands", - "undertunes", "undertones", - "underwager", "underwater", - "underwares", "underwater", - "underwolrd", "underworld", - "underwoord", "underworld", - "underwrold", "underworld", - "underyling", "underlying", - "undesrtand", "understands", - "undoubtedy", "undoubtedly", - "undoubtely", "undoubtedly", - "undoubtley", "undoubtedly", - "uneccesary", "unnecessary", - "unecessary", "unnecessary", - "unedcuated", "uneducated", - "unedicated", "uneducated", - "unempolyed", "unemployed", - "unexplaind", "unexplained", - "unexplaned", "unexplained", - "unfamilair", "unfamiliar", - "unfamilier", "unfamiliar", - "unfinsihed", "unfinished", - "unfirendly", "unfriendly", - "unfortuate", "unfortunate", - "unfreindly", "unfriendly", - "unfriednly", "unfriendly", - "unfriently", "unfriendly", - "ungrapeful", "ungrateful", - "ungreatful", "ungrateful", - "unhealthly", "unhealthy", - "unicornios", "unicorns", - "unifnished", "unfinished", - "unihabited", "uninhabited", - "unilatreal", "unilateral", - "unimporant", "unimportant", - "unimpresed", "unimpressed", - "unimpressd", "unimpressed", - "uninsipred", "uninspired", - "uninspried", "uninspired", - "uninstaled", "uninstalled", - "uniquiness", "uniqueness", - "univercity", "university", - "univeristy", "university", - "universale", "universe", - "universaly", "universally", - "universels", "universes", - "universets", "universes", - "universite", "universities", - "universtiy", "university", - "unjustifed", "unjustified", - "unknowingy", "unknowingly", - "unknowinly", "unknowingly", - "unnecesary", "unnecessary", - "unofficail", "unofficial", - "unoffocial", "unofficial", - "unorginial", "unoriginal", - "unorignial", "unoriginal", - "unorigonal", "unoriginal", - "unplacable", "unplayable", - "unplaybale", "unplayable", - "unplayeble", "unplayable", - "unpleasent", "unpleasant", - "unpopulair", "unpopular", - "unproteced", "unprotected", - "unqiueness", "uniqueness", - "unqualifed", "unqualified", - "unrealesed", "unreleased", - "unrealible", "unreliable", - "unrealistc", "unrealistic", - "unrealitic", "unrealistic", - "unreasonal", "unreasonably", - "unrelaible", "unreliable", - "unreleated", "unreleased", - "unrelyable", "unreliable", - "unrepetant", "unrepentant", - "unrepetent", "unrepentant", - "unresponse", "unresponsive", - "unsencored", "uncensored", - "unsetlling", "unsettling", - "unsolicted", "unsolicited", - "unsubscibe", "unsubscribe", - "unsubscrbe", "unsubscribe", - "unsucesful", "unsuccessful", - "unsuprised", "unsurprised", - "unsuprized", "unsurprised", - "unviersity", "university", - "unwrittern", "unwritten", - "urkainians", "ukrainians", - "utlimately", "ultimately", - "utlrasound", "ultrasound", - "vaccinatie", "vaccinated", - "vaccineras", "vaccines", - "valentians", "valentines", - "valentiens", "valentines", - "valentimes", "valentines", - "valentinas", "valentines", - "valentinos", "valentines", - "valentones", "valentines", - "validitity", "validity", - "valnetines", "valentines", - "vandalisim", "vandalism", - "vasectomey", "vasectomy", - "vegatarian", "vegetarian", - "vegaterian", "vegetarian", - "vegeratian", "vegetarians", - "vegetairan", "vegetarians", - "vegetarain", "vegetarians", - "vegetarien", "vegetarian", - "vegetarion", "vegetarian", - "vegetatian", "vegetarian", - "vegeterian", "vegetarian", - "vegitables", "vegetables", - "vehemantly", "vehemently", - "vehemontly", "vehemently", - "veitnamese", "vietnamese", - "veiwership", "viewership", - "veiwpoints", "viewpoints", - "venezuella", "venezuela", - "verificato", "verification", - "verifyable", "verifiable", - "veritcally", "vertically", - "veritiable", "verifiable", - "vernecular", "vernacular", - "vernicular", "vernacular", - "versatiliy", "versatility", - "versatille", "versatile", - "versatilty", "versatility", - "versitlity", "versatility", - "vewiership", "viewership", - "vibratoare", "vibrator", - "vicitmized", "victimized", - "vicotrious", "victorious", - "victemized", "victimized", - "victomized", "victimized", - "victorinos", "victorious", - "victorinus", "victorious", - "victoriosa", "victorious", - "victorioso", "victorious", - "victoriuos", "victorious", - "victumized", "victimized", - "videogaems", "videogames", - "videojames", "videogames", - "vidoegames", "videogames", - "vientamese", "vietnamese", - "vietmanese", "vietnamese", - "vietnamees", "vietnamese", - "vietnamise", "vietnamese", - "viewpionts", "viewpoints", - "vigilantie", "vigilante", - "vigoruosly", "vigorously", - "vigourosly", "vigorously", - "villageois", "villages", - "vindicitve", "vindictive", - "vindictave", "vindictive", - "visibiltiy", "visibility", - "vitenamese", "vietnamese", - "vocabluary", "vocabulary", - "volatiltiy", "volatility", - "volativity", "volatility", - "volitality", "volatility", - "volleyboll", "volleyball", - "vollyeball", "volleyball", - "volonteers", "volunteers", - "volounteer", "volunteer", - "voluntairy", "voluntarily", - "voluntarly", "voluntary", - "voluntears", "volunteers", - "volunteeer", "volunteers", - "volunteerd", "volunteered", - "voluntered", "volunteered", - "vulernable", "vulnerable", - "vulnarable", "vulnerable", - "vulnerabil", "vulnerable", - "vulnurable", "vulnerable", - "vunlerable", "vulnerable", - "warrandyte", "warranty", - "warrantles", "warranties", - "warrenties", "warranties", - "washignton", "washington", - "waterlemon", "watermelon", - "watermalon", "watermelon", - "waterproff", "waterproof", - "wavelegnth", "wavelength", - "wavelenghs", "wavelength", - "wavelenght", "wavelength", - "weakensses", "weaknesses", - "weaknesess", "weaknesses", - "weathliest", "wealthiest", - "wedensdays", "wednesdays", - "wednesdsay", "wednesdays", - "wednessday", "wednesdays", - "wednsedays", "wednesdays", - "weightened", "weighted", - "welathiest", "wealthiest", - "wellignton", "wellington", - "wellingotn", "wellington", - "wendesdays", "wednesdays", - "wereabouts", "whereabouts", - "westbroook", "westbrook", - "westernese", "westerners", - "westerness", "westerners", - "westminser", "westminster", - "westminter", "westminster", - "whatosever", "whatsoever", - "whatseover", "whatsoever", - "whipsering", "whispering", - "whsipering", "whispering", - "widepsread", "widespread", - "wikileakes", "wikileaks", - "wilderniss", "wilderness", - "wildreness", "wilderness", - "willfullly", "willfully", - "winchestor", "winchester", - "windhsield", "windshield", - "windsheild", "windshield", - "windshiled", "windshield", - "wisconsion", "wisconsin", - "wishpering", "whispering", - "withdrawan", "withdrawn", - "withdrawel", "withdrawal", - "withdrawin", "withdrawn", - "withholdng", "withholding", - "withrdawal", "withdrawals", - "witnissing", "witnessing", - "wonderfull", "wonderful", - "wonderfuly", "wonderfully", - "wonderwand", "wonderland", - "worhsiping", "worshiping", - "workingest", "workings", - "workstaion", "workstation", - "workstaton", "workstation", - "worshippig", "worshipping", - "worshoping", "worshiping", - "wrestlewar", "wrestler", - "xenohpobic", "xenophobic", - "xenophibia", "xenophobia", - "xenophibic", "xenophobic", - "xenophonic", "xenophobic", - "xenophopia", "xenophobia", - "xenophopic", "xenophobic", - "xeonphobia", "xenophobia", - "xeonphobic", "xenophobic", - "yourselfes", "yourselves", - "yoursleves", "yourselves", - "zimbabwaen", "zimbabwe", - "zionistisk", "zionists", - "abandonig", "abandoning", - "abandonne", "abandonment", - "abanonded", "abandoned", - "abdomnial", "abdominal", - "abdonimal", "abdominal", - "aberation", "aberration", - "abnormaly", "abnormally", - "abodminal", "abdominal", - "abondoned", "abandoned", - "aborigene", "aborigine", - "aboslutes", "absolutes", - "abosrbing", "absorbing", - "abreviate", "abbreviate", - "abritrary", "arbitrary", - "abruptley", "abruptly", - "absailing", "abseiling", - "absloutes", "absolutes", - "absolutey", "absolutely", - "absolutly", "absolutely", - "absoultes", "absolutes", - "abstracto", "abstraction", - "absurdley", "absurdly", - "absuridty", "absurdity", - "abusrdity", "absurdity", - "academica", "academia", - "accademic", "academic", - "accalimed", "acclaimed", - "accelerar", "accelerator", - "accending", "ascending", - "accension", "accession", - "accidenty", "accidently", - "acclamied", "acclaimed", - "accliamed", "acclaimed", - "accomdate", "accommodate", - "accordeon", "accordion", - "accordian", "accordion", - "accoridng", "according", - "accountas", "accountants", - "accountat", "accountants", - "accoustic", "acoustic", - "accroding", "according", - "accuraccy", "accuracy", - "acftually", "factually", - "acheiving", "achieving", - "achieveds", "achieves", - "achillees", "achilles", - "achilleos", "achilles", - "achilleus", "achilles", - "achiveing", "achieving", - "acitvates", "activates", - "aclhemist", "alchemist", - "acomplish", "accomplish", - "acquisito", "acquisition", - "acronymes", "acronyms", - "acronymns", "acronyms", - "acsending", "ascending", - "acsension", "ascension", - "activaste", "activates", - "activatin", "activation", - "activelly", "actively", - "activisim", "activism", - "activisit", "activist", - "activites", "activities", - "actresess", "actresses", - "acusation", "causation", - "acutality", "actuality", - "adavanced", "advanced", - "adbominal", "abdominal", - "additonal", "additional", - "addoptive", "adoptive", - "addresing", "addressing", - "addtional", "additional", - "adhearing", "adhering", - "adherance", "adherence", - "adjectivs", "adjectives", - "adjustabe", "adjustable", - "administr", "administer", - "admitedly", "admittedly", - "adolecent", "adolescent", - "adovcated", "advocated", - "adovcates", "advocates", - "adquiring", "acquiring", - "adresable", "addressable", - "adressing", "addressing", - "aduiobook", "audiobook", - "advatange", "advantage", - "adventurs", "adventures", - "adveristy", "adversity", - "advertisy", "adversity", - "advisorys", "advisors", - "aeorspace", "aerospace", - "aeropsace", "aerospace", - "aerosapce", "aerospace", - "aersopace", "aerospace", - "aestethic", "aesthetic", - "aethistic", "atheistic", - "affiliato", "affiliation", - "affinitiy", "affinity", - "affirmate", "affirmative", - "affliated", "affiliated", - "africanas", "africans", - "africanos", "africans", - "aggegrate", "aggregate", - "aggresive", "aggressive", - "agnosticm", "agnosticism", - "agregates", "aggregates", - "agreggate", "aggregate", - "agrentina", "argentina", - "agression", "aggression", - "agressive", "aggressive", - "agressvie", "agressive", - "agruement", "arguement", - "agruments", "arguments", - "agurement", "arguement", - "ailenated", "alienated", - "airbourne", "airborne", - "aircrafts", "aircraft", - "airplance", "airplane", - "airrcraft", "aircraft", - "aksreddit", "askreddit", - "alcehmist", "alchemist", - "alchemsit", "alchemist", - "alchimest", "alchemist", - "alchmeist", "alchemist", - "alchoolic", "alcoholic", - "alcoholis", "alcoholics", - "alechmist", "alchemist", - "alegience", "allegiance", - "aleinated", "alienated", - "algoriths", "algorithms", - "algoritms", "algorithms", - "algorthim", "algorithm", - "algortihm", "algorithm", - "alignemnt", "alignment", - "alimunium", "aluminium", - "alingment", "alignment", - "allainces", "alliances", - "alledgely", "allegedly", - "allegence", "allegiance", - "alleivate", "alleviate", - "allievate", "alleviate", - "alliviate", "alleviate", - "allopones", "allophones", - "allthough", "although", - "almightly", "almighty", - "alocholic", "alcoholic", - "alogrithm", "algorithm", - "alphabeat", "alphabet", - "alrightey", "alrighty", - "alrightly", "alrighty", - "alrightty", "alrighty", - "alrington", "arlington", - "alrorythm", "algorithm", - "alterante", "alternate", - "alternatr", "alternator", - "althetics", "athletics", - "althought", "although", - "altruisim", "altruism", - "amateures", "amateurs", - "ambluance", "ambulance", - "ambuigity", "ambiguity", - "amendmant", "amendment", - "amercians", "americans", - "americain", "american", - "americams", "americas", - "americaps", "americas", - "americats", "americas", - "amibguity", "ambiguity", - "aminosity", "animosity", - "amrstrong", "armstrong", - "amublance", "ambulance", - "amunition", "ammunition", - "anachrist", "anarchist", - "analagous", "analogous", - "analitycs", "analytics", - "analtyics", "analytics", - "analyitcs", "analytics", - "analyseas", "analyses", - "analysees", "analyses", - "analysens", "analyses", - "analysise", "analyses", - "analystes", "analysts", - "analzying", "analyzing", - "anarchsim", "anarchism", - "anayltics", "analytics", - "anaylzing", "analyzing", - "ancedotal", "anecdotal", - "ancedotes", "anecdotes", - "ancestory", "ancestry", - "androgeny", "androgyny", - "androides", "androids", - "androidos", "androids", - "anecdotle", "anecdote", - "anecodtal", "anecdotal", - "anecodtes", "anecdotes", - "anectodal", "anecdotal", - "anectodes", "anecdotes", - "anedoctal", "anecdotal", - "anedoctes", "anecdotes", - "animostiy", "animosity", - "anitvirus", "antivirus", - "anlaytics", "analytics", - "anniversy", "anniversary", - "annointed", "anointed", - "annoucnes", "announces", - "annoyingy", "annoyingly", - "annoymous", "anonymous", - "annoynace", "annoyance", - "annyoance", "annoyance", - "anomisity", "animosity", - "anomolies", "anomalies", - "anomolous", "anomalous", - "anomynity", "anonymity", - "anomynous", "anonymous", - "anonimity", "anonymity", - "anonmyous", "anonymous", - "anonymoys", "anonymously", - "anorexiac", "anorexic", - "anorexica", "anorexia", - "anrachist", "anarchist", - "ansestors", "ancestors", - "antarctia", "antarctica", - "antennaes", "antennas", - "antiviurs", "antivirus", - "antivrius", "antivirus", - "antivuris", "antivirus", - "anwsering", "answering", - "anynomity", "anonymity", - "anynomous", "anonymous", - "aparthide", "apartheid", - "aparthied", "apartheid", - "apartmens", "apartments", - "apocalype", "apocalypse", - "apostrope", "apostrophe", - "apparenty", "apparently", - "appearane", "appearances", - "appenines", "apennines", - "apperance", "appearance", - "appetitie", "appetite", - "applaudes", "applause", - "applicato", "application", - "appreciae", "appreciates", - "apprentie", "apprentice", - "approachs", "approaches", - "apratheid", "apartheid", - "apsaragus", "asparagus", - "apsergers", "aspergers", - "aquainted", "acquainted", - "arbirtary", "arbitrary", - "arbritary", "arbitrary", - "arcehtype", "archetype", - "archetect", "architect", - "archetpye", "archetype", - "archetyps", "archetypes", - "architecs", "architects", - "archtypes", "archetypes", - "aregument", "arguement", - "areospace", "aerospace", - "argessive", "agressive", - "argeument", "arguement", - "arguabley", "arguably", - "arguablly", "arguably", - "arguement", "argument", - "arguemnet", "arguement", - "arguemnts", "arguments", - "argumeent", "arguement", - "arhtritis", "arthritis", - "aribtrary", "arbitrary", - "ariplanes", "airplanes", - "aristolte", "aristotle", - "aristotel", "aristotle", - "aritfacts", "artifacts", - "arlignton", "arlington", - "arlingotn", "arlington", - "armistace", "armistice", - "armstorng", "armstrong", - "arpatheid", "apartheid", - "arthirtis", "arthritis", - "artifcats", "artifacts", - "artifical", "artificial", - "artillary", "artillery", - "arugement", "arguement", - "arugments", "arguments", - "asapragus", "asparagus", - "asbestoes", "asbestos", - "asborbing", "absorbing", - "asburdity", "absurdity", - "ascendend", "ascended", - "ascneding", "ascending", - "ascnesion", "ascension", - "asethetic", "aesthetic", - "asnwering", "answering", - "asociated", "associated", - "assasined", "assassinated", - "assassian", "assassin", - "assassine", "assassinate", - "assasssin", "assassins", - "assaultes", "assaults", - "assembeld", "assembled", - "assembley", "assembly", - "assemblie", "assemble", - "assisnate", "assassinate", - "assistans", "assistants", - "assistsnt", "assistants", - "assmebled", "assembled", - "associato", "association", - "assoicate", "associate", - "asssasins", "assassins", - "assualted", "assaulted", - "assulated", "assaulted", - "asteorids", "asteroids", - "astericks", "asterisk", - "asteriods", "asteroids", - "astroanut", "astronaut", - "astronuat", "astronaut", - "astrounat", "astronaut", - "asuterity", "austerity", - "atempting", "attempting", - "atheltics", "athletics", - "atheneans", "athenians", - "athesitic", "atheistic", - "athetlics", "athletics", - "athiestic", "atheistic", - "athleticm", "athleticism", - "atmosphir", "atmospheric", - "atributed", "attributed", - "atributes", "attributes", - "atrifacts", "artifacts", - "atrillery", "artillery", - "atrittion", "attrition", - "attachmet", "attachments", - "attaindre", "attainder", - "attemting", "attempting", - "attemtped", "attempted", - "attendent", "attendant", - "attension", "attention", - "attirbute", "attribute", - "attirtion", "attrition", - "attmepted", "attempted", - "attractes", "attracts", - "attractin", "attraction", - "attributo", "attribution", - "attributs", "attributes", - "attritube", "attribute", - "auctionrs", "auctions", - "auidobook", "audiobook", - "auromated", "automated", - "australin", "australians", - "authroity", "authority", - "autoattak", "autoattack", - "autogrpah", "autograph", - "autonomos", "autonomous", - "auxillary", "auxiliary", - "avaialble", "available", - "availible", "available", - "avalaible", "available", - "avaliable", "available", - "averageed", "averaged", - "avialable", "available", - "awakenend", "awakened", - "awesomley", "awesomely", - "awkawrdly", "awkwardly", - "awnsering", "answering", - "bacehlors", "bachelors", - "bachelour", "bachelor", - "bachleors", "bachelors", - "bacholers", "bachelors", - "backdooor", "backdoor", - "backfeild", "backfield", - "backfiled", "backfield", - "backgroud", "background", - "backpakcs", "backpacks", - "badnwagon", "bandwagon", - "badnwidth", "bandwidth", - "balckjack", "blackjack", - "balcklist", "blacklist", - "balitmore", "baltimore", - "ballisitc", "ballistic", - "ballsitic", "ballistic", - "balsphemy", "blasphemy", - "bandiwdth", "bandwidth", - "bandwdith", "bandwidth", - "bandwidht", "bandwidth", - "bandwitdh", "bandwidth", - "bankrupcy", "bankruptcy", - "bankrupty", "bankruptcy", - "banruptcy", "bankruptcy", - "baordwalk", "boardwalk", - "barabrian", "barbarian", - "barbarain", "barbarian", - "barbarina", "barbarian", - "barcelets", "bracelets", - "barcleona", "barcelona", - "bareclona", "barcelona", - "barrackus", "barracks", - "bascially", "basically", - "bastardes", "bastards", - "bastardos", "bastards", - "bastardus", "bastards", - "bathrooom", "bathroom", - "batlimore", "baltimore", - "battailon", "battalion", - "battlaion", "battalion", - "beahviour", "behaviour", - "beauitful", "beautiful", - "beautifyl", "beautifully", - "becnhmark", "benchmark", - "becomeing", "becoming", - "becomming", "becoming", - "beehtoven", "beethoven", - "begginers", "beginners", - "beggining", "beginning", - "begininng", "beginning", - "beginnins", "beginnings", - "behaivors", "behaviors", - "behaivour", "behaviour", - "behavoirs", "behaviors", - "behavoiur", "behaviour", - "behvaiour", "behaviour", - "beleiving", "believing", - "beliveing", "believing", - "belssings", "blessings", - "bemusemnt", "bemusement", - "benchamrk", "benchmark", - "benchmars", "benchmarks", - "benedicat", "benedict", - "benedickt", "benedict", - "benghazhi", "benghazi", - "benghazzi", "benghazi", - "bergamont", "bergamot", - "berkelely", "berkeley", - "bersekrer", "berserker", - "berskerer", "berserker", - "beseiging", "besieging", - "bestialiy", "bestiality", - "beuatiful", "beautiful", - "biginning", "beginning", - "bigrading", "brigading", - "billbaord", "billboard", - "billboars", "billboards", - "binominal", "binomial", - "birgading", "brigading", - "birghtest", "brightest", - "birhtdays", "birthdays", - "bitcoints", "bitcoins", - "blackbery", "blackberry", - "blackhaws", "blackhawks", - "blackshit", "blacksmith", - "blanketts", "blankets", - "blapshemy", "blasphemy", - "blashpemy", "blasphemy", - "blaspehmy", "blasphemy", - "blasphmey", "blasphemy", - "blatanlty", "blatantly", - "blatimore", "baltimore", - "bleuberry", "blueberry", - "bleutooth", "bluetooth", - "blisteres", "blisters", - "blizzcoin", "blizzcon", - "blockchan", "blockchain", - "blockeras", "blockers", - "bloodbore", "bloodborne", - "boardband", "broadband", - "boardcast", "broadcast", - "bodyweigt", "bodyweight", - "bookamrks", "bookmarks", - "bookmakrs", "bookmarks", - "bookmarkd", "bookmarked", - "boradband", "broadband", - "boradcast", "broadcast", - "boradwalk", "boardwalk", - "bouregois", "bourgeois", - "bourgeios", "bourgeois", - "bourgoeis", "bourgeois", - "boyfirend", "boyfriend", - "boyfreind", "boyfriend", - "boyfriens", "boyfriends", - "brabarian", "barbarian", - "bracelona", "barcelona", - "braodband", "broadband", - "braodcast", "broadcast", - "brazilias", "brazilians", - "breakdows", "breakdowns", - "breserker", "berserker", - "bretheren", "brethren", - "bridaging", "brigading", - "brightern", "brighten", - "brigthest", "brightest", - "brilliany", "brilliantly", - "brithdays", "birthdays", - "broadwalk", "boardwalk", - "bruiseres", "bruisers", - "brunettte", "brunette", - "brusseles", "brussels", - "brussells", "brussels", - "brutailty", "brutality", - "brutallly", "brutally", - "buddhisim", "buddhism", - "buddihsts", "buddhists", - "buddishts", "buddhists", - "buhddists", "buddhists", - "buidlings", "buildings", - "bulidings", "buildings", - "burgunday", "burgundy", - "burgundry", "burgundy", - "burritoes", "burritos", - "burtality", "brutality", - "busineses", "business", - "businessa", "businessman", - "businesse", "businessmen", - "businesss", "businesses", - "bussiness", "business", - "buthcered", "butchered", - "butterlfy", "butterfly", - "cacausian", "caucasian", - "caclulate", "calculate", - "cacuasian", "caucasian", - "caculater", "calculator", - "cafeteira", "cafeteria", - "cafetiera", "cafeteria", - "caffeinne", "caffeine", - "calcualte", "calculate", - "californa", "california", - "caluclate", "calculate", - "calulated", "calculated", - "calulater", "calculator", - "cambirdge", "cambridge", - "cambrdige", "cambridge", - "cambrigde", "cambridge", - "camoflage", "camouflage", - "campagins", "campaigns", - "campaings", "campaigns", - "campiagns", "campaigns", - "campusers", "campuses", - "camrbidge", "cambridge", - "canadains", "canadians", - "candadate", "candidate", - "candidats", "candidates", - "cannister", "canister", - "cannoical", "canonical", - "canoncial", "canonical", - "capactior", "capacitor", - "capicator", "capacitor", - "capitalis", "capitals", - "caprenter", "carpenter", - "capsulers", "capsules", - "capsulets", "capsules", - "carachter", "character", - "cardbaord", "cardboard", - "cardborad", "cardboard", - "cardianls", "cardinals", - "cardnials", "cardinals", - "caridnals", "cardinals", - "carmalite", "carmelite", - "carnberry", "cranberry", - "carolinia", "carolina", - "carpetner", "carpenter", - "carptener", "carpenter", - "carribean", "caribbean", - "cartdrige", "cartridge", - "cartilege", "cartilage", - "cartirdge", "cartridge", - "cartrdige", "cartridge", - "cartrigde", "cartridge", - "casaulity", "causality", - "cashieres", "cashiers", - "cassawory", "cassowary", - "cassettte", "cassette", - "casuation", "causation", - "cataclsym", "cataclysm", - "cataclyms", "cataclysm", - "catacylsm", "cataclysm", - "catacyslm", "cataclysm", - "catalcysm", "cataclysm", - "catalgoue", "catalogue", - "cathderal", "cathedral", - "catherdal", "cathedral", - "cathloics", "catholics", - "cathredal", "cathedral", - "caucaisan", "caucasian", - "caucasain", "caucasian", - "causacian", "caucasian", - "causailty", "causality", - "celebirty", "celebrity", - "celebrato", "celebration", - "celebrite", "celebrities", - "celesital", "celestial", - "celestail", "celestial", - "cementary", "cemetery", - "cemetarey", "cemetery", - "cenitpede", "centipede", - "centepide", "centipede", - "centipeed", "centipede", - "centruies", "centuries", - "centuties", "centuries", - "cerebrawl", "cerebral", - "certanity", "certainty", - "certianty", "certainty", - "cesspoool", "cesspool", - "chairmain", "chairman", - "challange", "challenge", - "challengr", "challenger", - "challengs", "challenges", - "chameloen", "chameleon", - "champagen", "champagne", - "champange", "champagne", - "chandlure", "chandler", - "changable", "changeable", - "charactor", "character", - "chatedral", "cathedral", - "chatolics", "catholics", - "checkmeat", "checkmate", - "checkpoit", "checkpoints", - "chekcmate", "checkmate", - "chemestry", "chemistry", - "chemicaly", "chemically", - "chemsitry", "chemistry", - "chernboyl", "chernobyl", - "chernobly", "chernobyl", - "chernoybl", "chernobyl", - "chernyobl", "chernobyl", - "cheronbyl", "chernobyl", - "chidlfree", "childfree", - "chidlrens", "childrens", - "chihauhua", "chihuahua", - "chihuahau", "chihuahua", - "childbird", "childbirth", - "childerns", "childrens", - "childisch", "childish", - "childresn", "childrens", - "chirstian", "christian", - "chirstmas", "christmas", - "chiuhahua", "chihuahua", - "chlidfree", "childfree", - "chlidrens", "childrens", - "chocloate", "chocolate", - "chocoalte", "chocolate", - "chocolats", "chocolates", - "chocolste", "chocolates", - "cholocate", "chocolate", - "chrenobyl", "chernobyl", - "chrisitan", "christian", - "christain", "christian", - "christams", "christmas", - "chrsitian", "christian", - "chrsitmas", "christmas", - "churchers", "churches", - "cigaretts", "cigarettes", - "cigeratte", "cigarette", - "cilivians", "civilians", - "cilpboard", "clipboard", - "cilynders", "cylinders", - "circuitos", "circuits", - "ciriculum", "curriculum", - "cirticise", "criticise", - "civilains", "civilians", - "civillian", "civilian", - "classicos", "classics", - "classicus", "classics", - "classifiy", "classify", - "cleanisng", "cleansing", - "cleasning", "cleansing", - "clikcbait", "clickbait", - "clinicaly", "clinically", - "clipbaord", "clipboard", - "clitories", "clitoris", - "clitorios", "clitoris", - "clitorius", "clitoris", - "clucthing", "clutching", - "clutchign", "clutching", - "cluthcing", "clutching", - "coca cola", "coca-cola", - "cockatils", "cocktails", - "cocktials", "cocktails", - "cognizent", "cognizant", - "colateral", "collateral", - "collabore", "collaborate", - "collasped", "collapsed", - "collaspes", "collapses", - "colleauge", "colleague", - "collectes", "collects", - "collectie", "collective", - "collecton", "collection", - "collectos", "collectors", - "collegaue", "colleague", - "collegues", "colleagues", - "collisson", "collisions", - "collonade", "colonnade", - "collonies", "colonies", - "collpased", "collapsed", - "collpases", "collapses", - "colombina", "colombia", - "columbina", "columbia", - "comapnies", "companies", - "combatans", "combatants", - "combinato", "combination", - "combusion", "combustion", - "comestics", "cosmetics", - "comisions", "commissions", - "comission", "commission", - "comitting", "committing", - "commandes", "commands", - "commentar", "commentator", - "commentes", "commenters", - "commercie", "commerce", - "commision", "commission", - "commiteed", "commited", - "commiting", "committing", - "commitmet", "commitments", - "commongly", "commonly", - "communiss", "communists", - "communite", "communities", - "communits", "communist", - "communsim", "communism", - "compaines", "companies", - "compalins", "complains", - "compalint", "compliant", - "comparisn", "comparisons", - "compeltes", "completes", - "competant", "competent", - "competend", "competed", - "competion", "competition", - "competive", "competitive", - "compilant", "compliant", - "compilare", "compiler", - "compilato", "compilation", - "compitent", "competent", - "complaind", "complained", - "complaing", "complaining", - "completen", "complement", - "completey", "completely", - "completin", "completion", - "complians", "complains", - "componant", "component", - "comprable", "comparable", - "compresas", "compress", - "compreses", "compress", - "compteurs", "computers", - "comptuers", "computers", - "computato", "computation", - "comradets", "comrades", - "comsetics", "cosmetics", - "conanical", "canonical", - "conatiner", "container", - "concelaed", "concealed", - "concelaer", "concealer", - "concelear", "concealer", - "concensus", "consensus", - "conceptos", "concepts", - "conceptul", "conceptual", - "concernig", "concerning", - "concertas", "concerts", - "concevied", "conceived", - "conciders", "considers", - "concieted", "conceited", - "concieved", "conceived", - "conclusie", "conclusive", - "concsious", "conscious", - "concurret", "concurrent", - "condamned", "condemned", - "condemend", "condemned", - "condemmed", "condemned", - "condemnig", "condemning", - "condenmed", "condemned", - "condesend", "condensed", - "condesned", "condensed", - "condmened", "condemned", - "conection", "connection", - "conenctor", "connector", - "conferene", "conferences", - "confessin", "confession", - "confideny", "confidently", - "confilcts", "conflicts", - "confimred", "confirmed", - "confirmas", "confirms", - "conflcits", "conflicts", - "confrimed", "confirmed", - "congitive", "cognitive", - "conlcuded", "concluded", - "connectes", "connects", - "connectit", "connecticut", - "connectos", "connectors", - "conquerer", "conqueror", - "consdider", "consider", - "consensul", "consensual", - "conserned", "concerned", - "consicous", "conscious", - "considerd", "considered", - "considert", "considerate", - "consisent", "consistent", - "consistes", "consists", - "consolato", "consolation", - "consolide", "consolidate", - "consonent", "consonant", - "constanly", "constantly", - "constanst", "constants", - "constanty", "constantly", - "constasnt", "constants", - "constitue", "constitutes", - "constrait", "constraints", - "construcs", "constructs", - "construde", "construed", - "construst", "constructs", - "constured", "construed", - "consulant", "consultant", - "consultat", "consultant", - "consumate", "consummate", - "contactes", "contacts", - "contactos", "contacts", - "contagios", "contagious", - "containes", "contains", - "containig", "containing", - "containts", "contains", - "contemple", "contemplate", - "contendor", "contender", - "contentas", "contents", - "contentes", "contents", - "contentos", "contents", - "contestas", "contests", - "contestat", "contestants", - "contestes", "contests", - "contextes", "contexts", - "contextos", "contexts", - "contianer", "container", - "contibute", "contribute", - "contigent", "contingent", - "continant", "continental", - "continens", "continents", - "continous", "continuous", - "continuos", "continuous", - "continute", "continue", - "contiunal", "continual", - "contracto", "contraction", - "contribue", "contribute", - "contribuo", "contributor", - "controlas", "controls", - "controled", "controlled", - "controles", "controls", - "controlls", "controls", - "convenant", "covenant", - "convencen", "convenience", - "conveniet", "convenient", - "conversie", "converse", - "conversin", "conversions", - "convertie", "convertible", - "convertis", "converts", - "cooldwons", "cooldowns", - "coordinar", "coordinator", - "copenhagn", "copenhagen", - "coprorate", "corporate", - "copywrite", "copyright", - "corcodile", "crocodile", - "corparate", "corporate", - "corproate", "corporate", - "correclty", "correctly", - "correctin", "correction", - "correlato", "correlation", - "corridoor", "corridor", - "corruptin", "corruption", - "corssfire", "crossfire", - "corsshair", "crosshair", - "corsspost", "crosspost", - "coruching", "crouching", - "cosemtics", "cosmetics", - "costumise", "costumes", - "counciles", "councils", - "councills", "councils", - "councilos", "councils", - "countains", "contains", - "counteres", "counters", - "countires", "countries", - "courching", "crouching", - "courtesey", "courtesy", - "courtesty", "courtesy", - "coururier", "courier", - "coutnered", "countered", - "crapenter", "carpenter", - "creativey", "creatively", - "creedence", "credence", - "crhistmas", "christmas", - "cricketts", "crickets", - "criminaly", "criminally", - "critereon", "criterion", - "criterias", "criteria", - "criticaly", "critically", - "criticies", "criticise", - "criticisn", "criticising", - "critisice", "criticise", - "critisicm", "criticism", - "critising", "criticising", - "critisism", "criticism", - "critisize", "criticise", - "critizing", "criticizing", - "crosshiar", "crosshair", - "crossifre", "crossfire", - "crticised", "criticised", - "crusdaers", "crusaders", - "crutchers", "crutches", - "crystalls", "crystals", - "crystalus", "crystals", - "crystalys", "crystals", - "cuacasian", "caucasian", - "cuasality", "causality", - "culitvate", "cultivate", - "culturaly", "culturally", - "culturels", "cultures", - "curiostiy", "curiosity", - "curisoity", "curiosity", - "currenlty", "currently", - "curriculm", "curriculum", - "cursaders", "crusaders", - "custcenes", "cutscenes", - "cutsceens", "cutscenes", - "cutscence", "cutscene", - "cutsences", "cutscenes", - "cyclinder", "cylinder", - "cyclistes", "cyclists", - "cylindres", "cylinders", - "cynicisim", "cynicism", - "dahsboard", "dashboard", - "dalmation", "dalmatian", - "dangeroys", "dangerously", - "dashbaord", "dashboard", - "daugthers", "daughters", - "davantage", "advantage", - "deadlfits", "deadlifts", - "deadpoool", "deadpool", - "dealershp", "dealerships", - "deathmath", "deathmatch", - "decalring", "declaring", - "decendant", "descendant", - "decendent", "descendant", - "decipting", "depicting", - "deciption", "depiction", - "decisivie", "decisive", - "declarase", "declares", - "declarees", "declares", - "decoratie", "decorative", - "decoratin", "decorations", - "decpetion", "deception", - "decpetive", "deceptive", - "decribing", "describing", - "decsended", "descended", - "deductibe", "deductible", - "defaintly", "defiantly", - "defaltion", "deflation", - "defanitly", "defiantly", - "defeintly", "definetly", - "defendent", "defendant", - "defensese", "defenseless", - "defianlty", "defiantly", - "deficeint", "deficient", - "deficieny", "deficiency", - "deficites", "deficits", - "definance", "defiance", - "definatey", "definately", - "definatly", "definitely", - "definetly", "definitely", - "definetyl", "definetly", - "definilty", "definitly", - "definitie", "definitive", - "definitin", "definitions", - "definitly", "definitely", - "definiton", "definition", - "definitve", "definite", - "definityl", "definitly", - "definltey", "definetly", - "defintaly", "defiantly", - "defintily", "definitly", - "defintion", "definition", - "defintley", "definetly", - "defitenly", "definetly", - "defitinly", "definitly", - "defitnaly", "defiantly", - "defitnely", "definetly", - "deflectin", "deflection", - "defnietly", "definetly", - "degeneret", "degenerate", - "degradato", "degradation", - "degradead", "degraded", - "degrassie", "degrasse", - "degrassse", "degrasse", - "deifnetly", "definetly", - "deifnitly", "definitly", - "deisgners", "designers", - "delagates", "delegates", - "delcaring", "declaring", - "delcining", "declining", - "delegatie", "delegate", - "delerious", "delirious", - "delfation", "deflation", - "deliveres", "delivers", - "deliverys", "delivers", - "delpoying", "deploying", - "demcorats", "democrats", - "deminsion", "dimension", - "democarcy", "democracy", - "democract", "democrat", - "demonstre", "demonstrate", - "denominar", "denominator", - "dentistas", "dentists", - "dentistes", "dentists", - "deomcrats", "democrats", - "deopsited", "deposited", - "deparment", "department", - "departmet", "departments", - "depciting", "depicting", - "depcition", "depiction", - "depection", "deception", - "depedency", "dependency", - "depicitng", "depicting", - "depiciton", "depiction", - "deplyoing", "deploying", - "depoisted", "deposited", - "depolying", "deploying", - "depositas", "deposits", - "deposites", "deposits", - "depositis", "deposits", - "depositos", "deposits", - "depostied", "deposited", - "depressie", "depressive", - "depressin", "depression", - "depserate", "desperate", - "depsoited", "deposited", - "descirbes", "describes", - "descision", "decision", - "desginers", "designers", - "desgining", "designing", - "desicions", "decisions", - "designade", "designated", - "designato", "designation", - "desingage", "disengage", - "desingers", "designers", - "desinging", "designing", - "desktopos", "desktops", - "desparate", "desperate", - "desperato", "desperation", - "despoited", "deposited", - "desriable", "desirable", - "dessigned", "designed", - "destinato", "destination", - "destoryed", "destroyed", - "destoryer", "destroyer", - "destroyes", "destroys", - "destructo", "destruction", - "destryoed", "destroyed", - "destryoer", "destroyer", - "desuction", "seduction", - "detailled", "detailed", - "detatched", "detached", - "detectivs", "detectives", - "deteriate", "deteriorate", - "determing", "determining", - "determins", "determines", - "developrs", "develops", - "diabetees", "diabetes", - "diablical", "diabolical", - "diagonaal", "diagonal", - "diagonsed", "diagnosed", - "diagonsis", "diagnosis", - "diagramas", "diagrams", - "diagramms", "diagrams", - "dialectes", "dialects", - "dialectos", "dialects", - "diarrheoa", "diarrhea", - "diasbling", "disabling", - "dichomoty", "dichotomy", - "dicovered", "discovered", - "dictaters", "dictates", - "dictionay", "dictionary", - "difenitly", "definitly", - "diferrent", "different", - "differene", "differences", - "differens", "differences", - "differeny", "differently", - "difficuly", "difficulty", - "diffucult", "difficult", - "dificulty", "difficulty", - "diganosed", "diagnosed", - "diganosis", "diagnosis", - "dimenions", "dimensions", - "dimention", "dimension", - "dimesnion", "dimension", - "diminishs", "diminishes", - "dinasours", "dinosaurs", - "dinosuars", "dinosaurs", - "dinsoaurs", "dinosaurs", - "dionsaurs", "dinosaurs", - "diphtongs", "diphthongs", - "dipthongs", "diphthongs", - "direcotry", "directory", - "directoty", "directory", - "directroy", "directory", - "disaprity", "disparity", - "disastros", "disastrous", - "disatrous", "disastrous", - "disbaling", "disabling", - "disbeleif", "disbelief", - "disbelife", "disbelief", - "disciplen", "disciplines", - "disclamer", "disclaimer", - "disclosue", "disclosure", - "disconnet", "disconnect", - "discosure", "discourse", - "discoverd", "discovered", - "discovere", "discoveries", - "discredid", "discredited", - "discribed", "described", - "discribes", "describes", - "discussin", "discussion", - "diserable", "desirable", - "disgarees", "disagrees", - "disgiused", "disguised", - "disgusied", "disguised", - "disgustes", "disgusts", - "disgustos", "disgusts", - "disgustus", "disgusts", - "dishonesy", "dishonesty", - "dishonord", "dishonored", - "disicples", "disciples", - "dismantel", "dismantle", - "dismisals", "dismissal", - "disnegage", "disengage", - "dispairty", "disparity", - "dispalyed", "displayed", - "dispartiy", "disparity", - "dispenced", "dispensed", - "dispeners", "dispenser", - "displayes", "displays", - "disruptin", "disruption", - "dissapear", "disappear", - "dissarray", "disarray", - "dissmisal", "dismissal", - "disspiate", "dissipate", - "distincte", "distinctive", - "distrcits", "districts", - "distribue", "distributed", - "distrubed", "disturbed", - "distrupts", "distrust", - "disturben", "disturbance", - "diverisfy", "diversify", - "diveristy", "diversity", - "diverstiy", "diversity", - "dividened", "dividend", - "divinitiy", "divinity", - "doccument", "document", - "docrtines", "doctrines", - "docuhebag", "douchebag", - "dogdammit", "goddammit", - "dogfather", "godfather", - "dolphines", "dolphins", - "domecracy", "democracy", - "domecrats", "democrats", - "domiantes", "dominates", - "dominatin", "domination", - "dominaton", "domination", - "dominiant", "dominant", - "donwgrade", "downgrade", - "donwloads", "downloads", - "donwsides", "downsides", - "donwvoted", "downvoted", - "donwvotes", "downvotes", - "doublelit", "doublelift", - "doucehbag", "douchebag", - "downgarde", "downgrade", - "downlaods", "downloads", - "downloaad", "download", - "downovted", "downvoted", - "dravadian", "dravidian", - "drummless", "drumless", - "dsyphoria", "dysphoria", - "dsytopian", "dystopian", - "duaghters", "daughters", - "duplicats", "duplicates", - "durabiliy", "durability", - "dynamicus", "dynamics", - "dypshoria", "dysphoria", - "dyshporia", "dysphoria", - "dysoptian", "dystopian", - "dysphoira", "dysphoria", - "dysphroia", "dysphoria", - "dyspohria", "dysphoria", - "dyspotian", "dystopian", - "dystopain", "dystopian", - "dystpoian", "dystopian", - "eachohter", "eachother", - "eachotehr", "eachother", - "eachtoher", "eachother", - "earpluggs", "earplugs", - "earthboud", "earthbound", - "eastwoood", "eastwood", - "eastwoord", "eastwood", - "ecclectic", "eclectic", - "ecomonics", "economics", - "edficient", "deficient", - "effecient", "efficient", - "efficeint", "efficient", - "efficency", "efficiency", - "efficieny", "efficiency", - "effulence", "effluence", - "egalitara", "egalitarian", - "egpytians", "egyptians", - "egyptains", "egyptians", - "egytpians", "egyptians", - "ehtically", "ethically", - "ehtnicity", "ethnicity", - "eighteeen", "eighteen", - "eitquette", "etiquette", - "ejacualte", "ejaculate", - "electivre", "elective", - "electorns", "electrons", - "electrial", "electrical", - "electricy", "electricity", - "electroal", "electoral", - "elementay", "elementary", - "elepahnts", "elephants", - "eliminase", "eliminates", - "eliminato", "elimination", - "ellignton", "ellington", - "ellingotn", "ellington", - "eloquenty", "eloquently", - "elsehwere", "elsewhere", - "emapthize", "empathize", - "embarress", "embarrassed", - "emmisarry", "emissary", - "emmisions", "emissions", - "emmitting", "emitting", - "empahsize", "emphasize", - "emperical", "empirical", - "emphaised", "emphasised", - "emphatize", "empathize", - "emphazise", "emphasize", - "emphysyma", "emphysema", - "empitness", "emptiness", - "employeer", "employer", - "employeur", "employer", - "empolyees", "employees", - "emtpiness", "emptiness", - "emualtion", "emulation", - "enahncing", "enhancing", - "enchantig", "enchanting", - "enclousre", "enclosure", - "enclsoure", "enclosure", - "encolsure", "enclosure", - "encompase", "encompass", - "encounted", "encountered", - "encrpyted", "encrypted", - "encrytped", "encrypted", - "encyrpted", "encrypted", - "endangerd", "endangered", - "enevlopes", "envelopes", - "enforcees", "enforces", - "engagemet", "engagements", - "engagment", "engagement", - "engieneer", "engineer", - "engineeer", "engineer", - "engineerd", "engineered", - "enhacning", "enhancing", - "enhanceds", "enhances", - "enligthen", "enlighten", - "enourmous", "enormous", - "ensconsed", "ensconced", - "enthicity", "ethnicity", - "enthusiam", "enthusiasm", - "enthusiat", "enthusiast", - "entirelly", "entirely", - "entitlied", "entitled", - "enveloppe", "envelope", - "epidsodes", "episodes", - "epilepsey", "epilepsy", - "epiphanny", "epiphany", - "episonage", "espionage", - "epscially", "specially", - "epsionage", "espionage", - "eqautions", "equations", - "equialent", "equivalent", - "equivalet", "equivalents", - "ermington", "remington", - "erroenous", "erroneous", - "escalatie", "escalate", - "escalatin", "escalation", - "esitmated", "estimated", - "esitmates", "estimates", - "eslewhere", "elsewhere", - "especialy", "especially", - "espianoge", "espionage", - "espinoage", "espionage", - "espoinage", "espionage", - "esponiage", "espionage", - "espressso", "espresso", - "essencial", "essential", - "essentail", "essential", - "essentias", "essentials", - "essentual", "essential", - "essesital", "essential", - "estiamted", "estimated", - "estiamtes", "estimates", - "estimatin", "estimation", - "ethcially", "ethically", - "ethincity", "ethnicity", - "ethnicaly", "ethnically", - "ethniticy", "ethnicity", - "etmyology", "etymology", - "euclidian", "euclidean", - "euorpeans", "europeans", - "euphoriac", "euphoric", - "euphorica", "euphoria", - "europenas", "europeans", - "europians", "europeans", - "eurpoeans", "europeans", - "evangelia", "evangelical", - "evelation", "elevation", - "evenlopes", "envelopes", - "eventally", "eventually", - "eventualy", "eventually", - "everthing", "everything", - "evertyime", "everytime", - "everwhere", "everywhere", - "everyoens", "everyones", - "everyteim", "everytime", - "everytiem", "everytime", - "everyting", "everything", - "eveyrones", "everyones", - "evreyones", "everyones", - "evreytime", "everytime", - "exagerate", "exaggerate", - "exahusted", "exhausted", - "exapnsive", "expansive", - "exauhsted", "exhausted", - "excahnges", "exchanges", - "excecuted", "executed", - "excecutes", "executes", - "excellant", "excellent", - "excercise", "exercise", - "excerised", "exercised", - "excerises", "exercises", - "exceuting", "executing", - "exchnages", "exchanges", - "exclsuive", "exclusive", - "excludeds", "excludes", - "exclusivs", "exclusives", - "exclusivy", "exclusivity", - "excpetion", "exception", - "exculding", "excluding", - "exculsion", "exclusion", - "exculsive", "exclusive", - "execising", "exercising", - "execption", "exception", - "exectuing", "executing", - "exectuion", "execution", - "exectuive", "executive", - "executabe", "executable", - "exepmtion", "exemption", - "exerbated", "exacerbated", - "exercices", "exercise", - "exerciese", "exercises", - "exercizes", "exercise", - "exersices", "exercises", - "exhasuted", "exhausted", - "exhaustin", "exhaustion", - "exhibites", "exhibits", - "exhibitin", "exhibition", - "exhibtion", "exhibition", - "exhuasted", "exhausted", - "exibition", "exhibition", - "existance", "existence", - "existenta", "existential", - "existince", "existence", - "existnace", "existance", - "exlcuding", "excluding", - "exlcusion", "exclusion", - "exlcusive", "exclusive", - "exlpoding", "exploding", - "exlporers", "explorers", - "exlposion", "explosion", - "exonorate", "exonerate", - "expalined", "explained", - "expanisve", "expansive", - "expatriot", "expatriate", - "expectany", "expectancy", - "expection", "exception", - "expemtion", "exemption", - "experimet", "experiments", - "explaines", "explains", - "explainig", "explaining", - "explaning", "explaining", - "expliciet", "explicit", - "explicity", "explicitly", - "explictly", "explicitly", - "explioted", "exploited", - "explodeds", "explodes", - "exploites", "exploits", - "explorare", "explorer", - "explotied", "exploited", - "expolding", "exploding", - "expolited", "exploited", - "expolsion", "explosion", - "expolsive", "explosive", - "expressie", "expressive", - "expressin", "expression", - "exsitance", "existance", - "extention", "extension", - "exteriour", "exterior", - "extermely", "extremely", - "extermism", "extremism", - "extermist", "extremist", - "externaly", "externally", - "extractin", "extraction", - "extrapole", "extrapolate", - "extreemly", "extremely", - "extremers", "extremes", - "extremley", "extremely", - "extrotion", "extortion", - "eyeballls", "eyeballs", - "eyebrowes", "eyebrows", - "eyebrowns", "eyebrows", - "eyesahdow", "eyeshadow", - "eyeshdaow", "eyeshadow", - "eygptians", "egyptians", - "eytmology", "etymology", - "faceboook", "facebook", - "faciliate", "facilitate", - "facilites", "facilities", - "facilitiy", "facility", - "facinated", "fascinated", - "facutally", "factually", - "familiair", "familiar", - "familiare", "familiarize", - "familiary", "familiarity", - "familliar", "familiar", - "fanaticas", "fanatics", - "fanaticos", "fanatics", - "fanaticus", "fanatics", - "fanatsies", "fantasies", - "fanatsize", "fantasize", - "fandation", "foundation", - "fanservie", "fanservice", - "fantazise", "fantasize", - "farenheit", "fahrenheit", - "fascistes", "fascists", - "fashoined", "fashioned", - "favorties", "favorites", - "favoruite", "favourite", - "favourits", "favourites", - "favourtie", "favourite", - "fedreally", "federally", - "feminisim", "feminism", - "feminsits", "feminists", - "femminist", "feminist", - "fesitvals", "festivals", - "fetishers", "fetishes", - "fightings", "fighting", - "filetimes", "lifetimes", - "filiament", "filament", - "filmmakes", "filmmakers", - "fingernal", "fingernails", - "flashligt", "flashlight", - "flavorade", "flavored", - "flavoures", "flavours", - "flavourus", "flavours", - "flawlessy", "flawlessly", - "flexibily", "flexibility", - "fluctaute", "fluctuate", - "flucutate", "fluctuate", - "fluttersy", "fluttershy", - "follwoing", "following", - "foootball", "football", - "forcefuly", "forcefully", - "forcibley", "forcibly", - "forciblly", "forcibly", - "forearmes", "forearms", - "foreginer", "foreigner", - "foregroud", "foreground", - "foreinger", "foreigner", - "forgeiner", "foreigner", - "forgiener", "foreigner", - "forgivens", "forgiveness", - "foriegner", "foreigner", - "forigener", "foreigner", - "formerlly", "formerly", - "formualte", "formulate", - "formulaes", "formulas", - "formulars", "formulas", - "forntline", "frontline", - "forntpage", "frontpage", - "fortuante", "fortunate", - "forumlate", "formulate", - "foundatin", "foundations", - "fourteeen", "fourteen", - "fractales", "fractals", - "fractalis", "fractals", - "fractalus", "fractals", - "fragement", "fragment", - "fragmenot", "fragment", - "franchies", "franchise", - "francsico", "francisco", - "franscico", "francisco", - "frecklers", "freckles", - "freedomes", "freedoms", - "freestlye", "freestyle", - "freesytle", "freestyle", - "fremented", "fermented", - "freqeuncy", "frequency", - "frequence", "frequencies", - "friendlis", "friendlies", - "frightend", "frightened", - "fromation", "formation", - "frontapge", "frontpage", - "frontilne", "frontline", - "frustrato", "frustration", - "frustrats", "frustrates", - "fucntions", "functions", - "fullscren", "fullscreen", - "funcitons", "functions", - "functiong", "functioning", - "functtion", "function", - "furiosuly", "furiously", - "furiuosly", "furiously", - "futuristc", "futuristic", - "gagnsters", "gangsters", - "galations", "galatians", - "galdiator", "gladiator", - "gallaxies", "galaxies", - "garanteed", "guaranteed", - "garantees", "guarantees", - "garuantee", "guarantee", - "gatherins", "gatherings", - "gauntelts", "gauntlets", - "gauntlent", "gauntlet", - "gaurantee", "guarantee", - "gaurentee", "guarantee", - "genatilia", "genitalia", - "geneology", "genealogy", - "generalbs", "generals", - "generalis", "generals", - "generaste", "generates", - "generatie", "generate", - "generatin", "generations", - "generatos", "generators", - "genitaila", "genitalia", - "genitales", "genitals", - "genitalis", "genitals", - "geniunely", "genuinely", - "gentailia", "genitalia", - "gentelmen", "gentlemen", - "gentialia", "genitalia", - "genuienly", "genuinely", - "genuinley", "genuinely", - "geogrpahy", "geography", - "germaniac", "germanic", - "geurrilla", "guerrilla", - "gimmickey", "gimmicky", - "gimmickly", "gimmicky", - "girlfried", "girlfriend", - "goalkeepr", "goalkeeper", - "godafther", "godfather", - "godspeeed", "godspeed", - "goegraphy", "geography", - "goldfisch", "goldfish", - "goosebums", "goosebumps", - "gorvement", "goverment", - "govemrent", "goverment", - "govenment", "government", - "goverance", "governance", - "goveremnt", "goverment", - "goverment", "government", - "govermetn", "goverment", - "govermnet", "goverment", - "governmet", "governments", - "govorment", "government", - "govrement", "goverment", - "gracefull", "graceful", - "gracefuly", "gracefully", - "graduaste", "graduates", - "graduatin", "graduation", - "grahpical", "graphical", - "grativate", "gravitate", - "graudally", "gradually", - "graudates", "graduates", - "greenalnd", "greenland", - "grenaders", "grenades", - "grpahical", "graphical", - "guadulupe", "guadalupe", - "guaranted", "guaranteed", - "guarantes", "guarantees", - "guardains", "guardians", - "guarentee", "guarantee", - "guaridans", "guardians", - "guatamala", "guatemala", - "guerrilas", "guerrillas", - "guradians", "guardians", - "guranteed", "guaranteed", - "gurantees", "guarantees", - "gutiarist", "guitarist", - "habsbourg", "habsburg", - "hairstlye", "hairstyle", - "hairsytle", "hairstyle", - "halarious", "hilarious", - "hambruger", "hamburger", - "hamburges", "hamburgers", - "hamphsire", "hampshire", - "hamsphire", "hampshire", - "handboook", "handbook", - "handedley", "handedly", - "handedlly", "handedly", - "handicape", "handicapped", - "hapmshire", "hampshire", - "happended", "happened", - "happenend", "happened", - "happenned", "happened", - "harasment", "harassment", - "hardenend", "hardened", - "hardwoord", "hardwood", - "haristyle", "hairstyle", - "harrasing", "harassing", - "harrassed", "harassed", - "harrasses", "harassed", - "hdinsight", "hindsight", - "headahces", "headaches", - "headhpone", "headphone", - "headshoot", "headshot", - "healither", "healthier", - "healtheir", "healthier", - "healthiet", "healthiest", - "healthire", "healthier", - "heapdhone", "headphone", - "hedgehoog", "hedgehog", - "hedgehorg", "hedgehog", - "heightend", "heightened", - "heirarchy", "hierarchy", - "herculase", "hercules", - "herculeas", "hercules", - "herculees", "hercules", - "herculeus", "hercules", - "heriarchy", "hierarchy", - "hesistant", "hesitant", - "hesistate", "hesitate", - "hesitatin", "hesitation", - "hieroglph", "hieroglyph", - "highschol", "highschool", - "hindisght", "hindsight", - "hindrence", "hindrance", - "hinduisim", "hinduism", - "hinduisum", "hinduism", - "hipsanics", "hispanics", - "hirearchy", "hierarchy", - "hirsohima", "hiroshima", - "hispancis", "hispanics", - "hitboxers", "hitboxes", - "hoepfully", "hopefully", - "holocasut", "holocaust", - "holocuast", "holocaust", - "homeonwer", "homeowner", - "homeopaty", "homeopathy", - "homewolrd", "homeworld", - "homewoner", "homeowner", - "homewrold", "homeworld", - "homogenes", "homogeneous", - "homosexul", "homosexuals", - "hopelessy", "hopelessly", - "hopsitals", "hospitals", - "horishima", "hiroshima", - "horizones", "horizons", - "horizonts", "horizons", - "horrendos", "horrendous", - "horribley", "horribly", - "horriblly", "horribly", - "horrifing", "horrifying", - "hositlity", "hostility", - "hospitaly", "hospitality", - "hosptials", "hospitals", - "hourgalss", "hourglass", - "hourlgass", "hourglass", - "househols", "households", - "humanitis", "humanities", - "humanoind", "humanoid", - "humiditiy", "humidity", - "hunagrian", "hungarian", - "hurriance", "hurricane", - "hurricans", "hurricanes", - "husbandos", "husbands", - "hydraluic", "hydraulic", - "hydropile", "hydrophile", - "hydropobe", "hydrophobe", - "hydrualic", "hydraulic", - "hyopcrite", "hypocrite", - "hypcorite", "hypocrite", - "hyperoble", "hyperbole", - "hypocracy", "hypocrisy", - "hypocrasy", "hypocrisy", - "hypocricy", "hypocrisy", - "hypocriet", "hypocrite", - "hypocrits", "hypocrites", - "hyporcite", "hypocrite", - "hypothess", "hypotheses", - "hyprocisy", "hypocrisy", - "hyprocite", "hypocrite", - "hyrdation", "hydration", - "hyrdaulic", "hydraulic", - "hysterica", "hysteria", - "hysteriia", "hysteria", - "iburpofen", "ibuprofen", - "icleandic", "icelandic", - "icongnito", "incognito", - "idealisim", "idealism", - "idealistc", "idealistic", - "identifiy", "identify", - "ideologis", "ideologies", - "ignornace", "ignorance", - "illegales", "illegals", - "illegalis", "illegals", - "illegalls", "illegals", - "illnesess", "illnesses", - "illsuions", "illusions", - "illuminai", "illuminati", - "imagenary", "imaginary", - "imaginery", "imaginary", - "imaptient", "impatient", - "imigrated", "emigrated", - "immensley", "immensely", - "immerisve", "immersive", - "immesnely", "immensely", - "immidiate", "immediate", - "immigrato", "immigration", - "immitated", "imitated", - "immitator", "imitator", - "immobilie", "immobile", - "immobille", "immobile", - "immobilze", "immobile", - "immortaly", "immortality", - "immserive", "immersive", - "impaitent", "impatient", - "imparital", "impartial", - "impedence", "impedance", - "implantes", "implants", - "implicati", "implicit", - "impliciet", "implicit", - "implicity", "implicitly", - "impliment", "implement", - "implusive", "impulsive", - "importamt", "important", - "importend", "imported", - "imporving", "improving", - "impossibe", "impossible", - "imprefect", "imperfect", - "impressin", "impressions", - "imprioned", "imprisoned", - "improbabe", "improbable", - "impulisve", "impulsive", - "impuslive", "impulsive", - "imrpoving", "improving", - "inadequet", "inadequate", - "inadquate", "inadequate", - "inaugures", "inaugurates", - "inbalance", "imbalance", - "inbeetwen", "inbetween", - "inbetween", "between", - "inbewteen", "inbetween", - "incarnato", "incarnation", - "incgonito", "incognito", - "inclinato", "inclination", - "includeds", "includes", - "incoginto", "incognito", - "incongito", "incognito", - "incorpore", "incorporate", - "incpetion", "inception", - "incredibe", "incredible", - "incrediby", "incredibly", - "inculding", "including", - "incunabla", "incunabula", - "indicaste", "indicates", - "indicatie", "indicative", - "indicence", "incidence", - "indicents", "incidents", - "indigenos", "indigenous", - "indirecty", "indirectly", - "indisious", "insidious", - "individul", "individual", - "individus", "individuals", - "indoensia", "indonesia", - "indoneisa", "indonesia", - "indutrial", "industrial", - "inersting", "inserting", - "inexpense", "inexpensive", - "infallibe", "infallible", - "inferioir", "inferior", - "inferiour", "inferior", - "infestato", "infestation", - "infiltrar", "infiltrator", - "infinitey", "infinity", - "infinitie", "infinite", - "infinitiy", "infinity", - "infinitly", "infinity", - "inflatabe", "inflatable", - "influense", "influences", - "influenta", "influential", - "informate", "informative", - "infraread", "infrared", - "ingeniuty", "ingenuity", - "ingeunity", "ingenuity", - "ingocnito", "incognito", - "ingorance", "ignorance", - "inguenity", "ingenuity", - "inhabitat", "inhabitants", - "inheirted", "inherited", - "inhertied", "inherited", - "initailly", "initially", - "initalese", "initialese", - "initaling", "initialing", - "initalise", "initialise", - "initalism", "initialism", - "initalize", "initialize", - "initalled", "initialled", - "initation", "initiation", - "initiales", "initials", - "initiatie", "initiatives", - "initiatin", "initiation", - "initiatve", "initiate", - "injustics", "injustices", - "inlcuding", "including", - "inmigrant", "immigrant", - "innoucous", "innocuous", - "innovatin", "innovations", - "innovatve", "innovate", - "inpection", "inception", - "inpending", "impending", - "inproving", "improving", - "inpsector", "inspector", - "inpsiring", "inspiring", - "inquisito", "inquisition", - "inquisitr", "inquisitor", - "inresting", "inserting", - "insanelly", "insanely", - "insepctor", "inspector", - "insidiuos", "insidious", - "insipring", "inspiring", - "insluated", "insulated", - "inspectin", "inspection", - "instabilt", "instability", - "installes", "installs", - "installus", "installs", - "instering", "inserting", - "insticnts", "instincts", - "institude", "instituted", - "instituto", "institution", - "insualted", "insulated", - "insurence", "insurance", - "insurgeny", "insurgency", - "integirty", "integrity", - "integraal", "integral", - "integrade", "integrated", - "integrato", "integration", - "intenisty", "intensity", - "intensley", "intensely", - "interacte", "interactive", - "interents", "internets", - "interesat", "interest", - "interesst", "interests", - "interewbs", "interwebs", - "interfase", "interfaces", - "interfeer", "interfere", - "interfers", "interferes", - "intergate", "integrate", - "intergity", "integrity", - "interiour", "interior", - "internest", "internets", - "interpert", "interpret", - "interprut", "interrupt", - "interrups", "interrupts", - "interstae", "interstate", - "interveen", "intervene", - "intervied", "interviewed", - "intervier", "interviewer", - "intervies", "interviews", - "intesnely", "intensely", - "intesnity", "intensity", - "intestins", "intestines", - "inticrate", "intricate", - "intimidad", "intimidated", - "intircate", "intricate", - "intiution", "intuition", - "intiutive", "intuitive", - "intorduce", "introduce", - "intorvert", "introvert", - "intracite", "intricate", - "intrduced", "introduced", - "intregity", "integrity", - "intrenets", "internets", - "intrepret", "interpret", - "intrerupt", "interrupt", - "intrewebs", "interwebs", - "intrinisc", "intrinsic", - "intrisinc", "intrinsic", - "intrisnic", "intrinsic", - "intriuged", "intrigued", - "introdued", "introduced", - "introduse", "introduces", - "introvers", "introverts", - "intruiged", "intrigued", - "intrument", "instrument", - "inutition", "intuition", - "inutitive", "intuitive", - "invaderas", "invaders", - "invalidas", "invalidates", - "inventios", "inventions", - "investige", "investigate", - "investmet", "investments", - "invincibe", "invincible", - "invloving", "involving", - "invovling", "involving", - "ipubrofen", "ibuprofen", - "iranianos", "iranians", - "irelevent", "irrelevant", - "ironicaly", "ironically", - "irritatie", "irritate", - "irritatin", "irritation", - "isalmists", "islamists", - "isalnders", "islanders", - "islamiskt", "islamist", - "islamsits", "islamists", - "islmaists", "islamists", - "isntaller", "installer", - "isntances", "instances", - "isntantly", "instantly", - "israelies", "israelis", - "israelits", "israelis", - "italianas", "italians", - "italianos", "italians", - "jailbrake", "jailbreak", - "jalibreak", "jailbreak", - "jamaicain", "jamaican", - "jersualem", "jerusalem", - "jeruselam", "jerusalem", - "jeruslaem", "jerusalem", - "journalis", "journals", - "judegment", "judgement", - "judgemant", "judgemental", - "judisuary", "judiciary", - "jugdement", "judgement", - "juggernat", "juggernaut", - "juvenille", "juvenile", - "keneysian", "keynesian", - "kentuckey", "kentucky", - "kenyesian", "keynesian", - "keybaords", "keyboards", - "keyensian", "keynesian", - "keyesnian", "keynesian", - "keynseian", "keynesian", - "keysenian", "keynesian", - "kilometes", "kilometers", - "kindapped", "kidnapped", - "kncokback", "knockback", - "knoweldge", "knowledge", - "knowlegde", "knowledge", - "konckback", "knockback", - "kryptonie", "kryptonite", - "labirynth", "labyrinth", - "laboratoy", "laboratory", - "laboreres", "laborers", - "labratory", "laboratory", - "labriynth", "labyrinth", - "labryinth", "labyrinth", - "labyrnith", "labyrinth", - "landscaps", "landscapes", - "landscspe", "landscapes", - "langauges", "languages", - "languague", "language", - "lanuchers", "launchers", - "lanugages", "languages", - "larington", "arlington", - "latitudie", "latitude", - "lattitude", "latitude", - "laucnhers", "launchers", - "laucnhing", "launching", - "launchign", "launching", - "laybrinth", "labyrinth", - "lebanesse", "lebanese", - "leceister", "leicester", - "leciester", "leicester", - "legitmate", "legitimate", - "legnedary", "legendary", - "lesbianas", "lesbians", - "lesbianus", "lesbians", - "letivicus", "leviticus", - "leutenant", "lieutenant", - "levaithan", "leviathan", - "levellign", "levelling", - "levetated", "levitated", - "levetates", "levitates", - "levicitus", "leviticus", - "levleling", "levelling", - "lfiesteal", "lifesteal", - "liberales", "liberals", - "liberalim", "liberalism", - "liberalis", "liberals", - "liberatin", "liberation", - "libraires", "libraries", - "liecester", "leicester", - "lieuenant", "lieutenant", - "lieutenat", "lieutenant", - "lifespawn", "lifespan", - "lifestlye", "lifestyle", - "lighnting", "lightning", - "lightnign", "lightning", - "ligthning", "lightning", - "ligthroom", "lightroom", - "lingerine", "lingerie", - "lispticks", "lipsticks", - "listenend", "listened", - "literarly", "literary", - "literarry", "literary", - "literatre", "literate", - "literatue", "literate", - "literture", "literature", - "lithaunia", "lithuania", - "lithuaina", "lithuania", - "lithuiana", "lithuania", - "lithunaia", "lithuania", - "litigatin", "litigation", - "lituhania", "lithuania", - "liveprool", "liverpool", - "livestrem", "livestream", - "lobbysits", "lobbyists", - "lockscren", "lockscreen", - "logisitcs", "logistics", - "logsitics", "logistics", - "loiusiana", "louisiana", - "lollipoop", "lollipop", - "louisvile", "louisville", - "luanchers", "launchers", - "luanching", "launching", - "lubicrant", "lubricant", - "lubircant", "lubricant", - "ludcrious", "ludicrous", - "ludricous", "ludicrous", - "lunaticos", "lunatics", - "lunaticus", "lunatics", - "macaronni", "macaroni", - "maestries", "masteries", - "magainzes", "magazines", - "magensium", "magnesium", - "magincian", "magician", - "magintude", "magnitude", - "magneisum", "magnesium", - "magnesuim", "magnesium", - "magnifine", "magnificent", - "mainfesto", "manifesto", - "mainfests", "manifests", - "mainstrem", "mainstream", - "maintaing", "maintaining", - "maintance", "maintenance", - "maintians", "maintains", - "mairjuana", "marijuana", - "malasyian", "malaysian", - "malayisan", "malaysian", - "malaysain", "malaysian", - "maletonin", "melatonin", - "maltesian", "maltese", - "malyasian", "malaysian", - "managable", "manageable", - "managment", "management", - "mandarian", "mandarin", - "mandarijn", "mandarin", - "mandarion", "mandarin", - "maneouvre", "manoeuvre", - "maneuveur", "maneuver", - "maneveurs", "maneuvers", - "manfiesto", "manifesto", - "manfiests", "manifests", - "mangesium", "magnesium", - "mangitude", "magnitude", - "manouvers", "maneuvers", - "mantained", "maintained", - "manuevers", "maneuvers", - "maraudeur", "marauder", - "marevlous", "marvelous", - "margarent", "margaret", - "margarite", "margaret", - "marginaal", "marginal", - "marginaly", "marginally", - "marijauna", "marijuana", - "marineras", "mariners", - "marineris", "mariners", - "marineros", "mariners", - "marjiuana", "marijuana", - "marjority", "majority", - "marmelade", "marmalade", - "marrtyred", "martyred", - "massagens", "massages", - "massivley", "massively", - "masteires", "masteries", - "mastereis", "masteries", - "masterise", "masteries", - "mastermid", "mastermind", - "mastieres", "masteries", - "masturbae", "masturbated", - "materiaal", "material", - "matierals", "materials", - "mattreses", "mattress", - "mayalsian", "malaysian", - "maylasian", "malaysian", - "mccarthey", "mccarthy", - "mecahnics", "mechanics", - "mecernary", "mercenary", - "mechancis", "mechanics", - "mechanims", "mechanism", - "mechaninc", "mechanic", - "mechansim", "mechanism", - "medicince", "medicine", - "mediciney", "mediciny", - "meditatie", "meditate", - "meditatin", "meditation", - "megathred", "megathread", - "melanotin", "melatonin", - "melborune", "melbourne", - "melbounre", "melbourne", - "membrance", "membrane", - "menstraul", "menstrual", - "menstural", "menstrual", - "mensutral", "menstrual", - "mentiones", "mentions", - "mercanery", "mercenary", - "merhcants", "merchants", - "messagers", "messages", - "messanger", "messenger", - "metabloic", "metabolic", - "metalurgy", "metallurgy", - "methaphor", "metaphor", - "methapors", "metaphors", - "methodoly", "methodology", - "metropols", "metropolis", - "mexicanas", "mexicans", - "mexicants", "mexicans", - "mexicanus", "mexicans", - "michellle", "michelle", - "micorwave", "microwave", - "micoscopy", "microscopy", - "microphen", "microphone", - "migrantes", "migrants", - "migrianes", "migraines", - "milawukee", "milwaukee", - "milennium", "millennium", - "milestons", "milestones", - "militians", "militias", - "millenial", "millennial", - "millenian", "millennia", - "millenium", "millennium", - "millionar", "millionaire", - "millitary", "military", - "miluwakee", "milwaukee", - "milwakuee", "milwaukee", - "milwuakee", "milwaukee", - "mindcarck", "mindcrack", - "mindlessy", "mindlessly", - "minerales", "minerals", - "minisclue", "miniscule", - "miniscuel", "miniscule", - "ministery", "ministry", - "minisucle", "miniscule", - "minitaure", "miniature", - "minituare", "miniature", - "minneosta", "minnesota", - "minnestoa", "minnesota", - "minsicule", "miniscule", - "minsiters", "ministers", - "minstries", "ministries", - "miraculos", "miraculous", - "mircowave", "microwave", - "mirrorred", "mirrored", - "miserabel", "miserable", - "mispelled", "misspelled", - "misreable", "miserable", - "misreably", "miserably", - "missisipi", "mississippi", - "missonary", "missionary", - "missourri", "missouri", - "misspelld", "misspelled", - "mobilitiy", "mobility", - "moderatey", "moderately", - "moderatin", "moderation", - "modifires", "modifiers", - "moelcules", "molecules", - "moleclues", "molecules", - "molestare", "molester", - "molestato", "molestation", - "molesterd", "molested", - "monestary", "monastery", - "monitores", "monitors", - "monolgoue", "monologue", - "monolight", "moonlight", - "monolouge", "monologue", - "monopolis", "monopolies", - "monopolly", "monopoly", - "monopoloy", "monopoly", - "monserrat", "montserrat", - "monstorus", "monstrous", - "monstruos", "monstrous", - "montanous", "mountainous", - "monumnets", "monuments", - "moratlity", "mortality", - "morbidley", "morbidly", - "morgatges", "mortgages", - "morgtages", "mortgages", - "morisette", "morissette", - "mormonsim", "mormonism", - "morroccan", "moroccan", - "mortailty", "mortality", - "mosquitto", "mosquito", - "motivatie", "motivate", - "motivatin", "motivations", - "motorcyce", "motorcycles", - "motorolja", "motorola", - "motoroloa", "motorola", - "moustahce", "moustache", - "movepseed", "movespeed", - "mozzarela", "mozzarella", - "mucisians", "musicians", - "mulitated", "mutilated", - "mulitples", "multiples", - "multipled", "multiplied", - "multplies", "multiples", - "murdererd", "murdered", - "muscially", "musically", - "muscician", "musician", - "musculair", "muscular", - "mushrooom", "mushroom", - "musicains", "musicians", - "mutatiohn", "mutation", - "mutialted", "mutilated", - "mutilatin", "mutilation", - "mutliated", "mutilated", - "mutliples", "multiples", - "mutlitude", "multitude", - "mysterise", "mysteries", - "mysterous", "mysterious", - "nacrotics", "narcotics", - "naferious", "nefarious", - "nahsville", "nashville", - "narcissim", "narcissism", - "narcissit", "narcissist", - "narcissts", "narcissist", - "narctoics", "narcotics", - "nasvhille", "nashville", - "nationaal", "national", - "nationaly", "nationally", - "nativelly", "natively", - "natrually", "naturally", - "navigatie", "navigate", - "navigatin", "navigation", - "neccesary", "necessary", - "necessite", "necessities", - "neckbears", "neckbeards", - "neckbread", "neckbeard", - "nedlessly", "endlessly", - "needlessy", "needlessly", - "negiotate", "negotiate", - "negociate", "negotiate", - "negoitate", "negotiate", - "neigbhour", "neighbour", - "neigbours", "neighbours", - "neighboor", "neighbor", - "nessecary", "necessary", - "newcaslte", "newcastle", - "newcastel", "newcastle", - "nieghbour", "neighbour", - "nightfa;;", "nightfall", - "nightlcub", "nightclub", - "nigthclub", "nightclub", - "nigthlife", "nightlife", - "nigthmare", "nightmare", - "nihilisim", "nihilism", - "ninteenth", "nineteenth", - "nominatie", "nominate", - "nominatin", "nomination", - "noninital", "noninitial", - "norhteast", "northeast", - "norhtwest", "northwest", - "normanday", "normandy", - "northeren", "northern", - "norwegain", "norwegian", - "norwiegan", "norwegian", - "nostaglia", "nostalgia", - "nostaglic", "nostalgic", - "nostaliga", "nostalgia", - "nostaligc", "nostalgic", - "nostlagia", "nostalgia", - "nostlagic", "nostalgic", - "nostriles", "nostrils", - "nostrills", "nostrils", - "notacible", "noticable", - "notciable", "noticable", - "noteboook", "notebook", - "noteriety", "notoriety", - "noteworty", "noteworthy", - "noticable", "noticeable", - "noticably", "noticeably", - "noticalbe", "noticable", - "noticeing", "noticing", - "noticible", "noticeable", - "notoroius", "notorious", - "novermber", "november", - "nullabour", "nullarbor", - "numberous", "numerous", - "numercial", "numerical", - "numerious", "numerous", - "nuremburg", "nuremberg", - "nurtients", "nutrients", - "nutirents", "nutrients", - "nutreints", "nutrients", - "nutritent", "nutrient", - "nutritian", "nutritional", - "nutritios", "nutritious", - "obediance", "obedience", - "obeidence", "obedience", - "obersvant", "observant", - "obersvers", "observers", - "obesssion", "obsession", - "obiedence", "obedience", - "obivously", "obviously", - "objectivs", "objectives", - "objectivy", "objectivity", - "obscruity", "obscurity", - "obscuirty", "obscurity", - "observare", "observer", - "observerd", "observed", - "obssesion", "obsession", - "obssesive", "obsessive", - "obssessed", "obsessed", - "obstruced", "obstructed", - "obsucrity", "obscurity", - "obtainabe", "obtainable", - "obviosuly", "obviously", - "obvioulsy", "obviously", - "obvisouly", "obviously", - "obvoiusly", "obviously", - "ocasional", "occasional", - "ocasioned", "occasioned", - "ocassions", "occasions", - "occaisons", "occasions", - "occassion", "occasion", - "occurance", "occurrence", - "occurence", "occurrence", - "octohedra", "octahedra", - "ocuntries", "countries", - "ocurrance", "occurrence", - "ocurrence", "occurrence", - "offcially", "officially", - "offically", "officially", - "officialy", "officially", - "offpsring", "offspring", - "offspirng", "offspring", - "offsrping", "offspring", - "ogliarchy", "oligarchy", - "oilgarchy", "oligarchy", - "oligrachy", "oligarchy", - "ommitting", "omitting", - "onlsaught", "onslaught", - "onsalught", "onslaught", - "onslaugth", "onslaught", - "onsluaght", "onslaught", - "onwership", "ownership", - "opiniones", "opinions", - "oposition", "opposition", - "opponenet", "opponent", - "opposiste", "opposites", - "opposties", "opposites", - "oppressin", "oppression", - "opression", "oppression", - "opressive", "oppressive", - "opthalmic", "ophthalmic", - "optimisim", "optimism", - "optimistc", "optimistic", - "optinally", "optimally", - "oragnered", "orangered", - "oragnised", "organised", - "oragnizer", "organizer", - "orcehstra", "orchestra", - "ordinarly", "ordinary", - "orgainsed", "organised", - "orgainzer", "organizer", - "organered", "orangered", - "organices", "organise", - "organisim", "organism", - "organiske", "organise", - "organiste", "organise", - "organites", "organise", - "organizms", "organism", - "organsied", "organised", - "organsims", "organisms", - "organzier", "organizer", - "orginally", "originally", - "orgnaised", "organised", - "orhcestra", "orchestra", - "orientato", "orientation", - "origanaly", "originally", - "originall", "original", - "originalt", "originality", - "originaly", "originally", - "origintea", "originate", - "origional", "original", - "orignally", "originally", - "orignials", "originals", - "oublisher", "publisher", - "oursleves", "ourselves", - "oustiders", "outsiders", - "oustpoken", "outspoken", - "outisders", "outsiders", - "outnumbed", "outnumbered", - "outpalyed", "outplayed", - "outperfom", "outperform", - "outpsoken", "outspoken", - "outrageos", "outrageous", - "outskirst", "outskirts", - "outskrits", "outskirts", - "outwieghs", "outweighs", - "overbaord", "overboard", - "overclcok", "overclock", - "overdirve", "overdrive", - "overhpyed", "overhyped", - "overhwelm", "overwhelm", - "overlcock", "overclock", - "overloard", "overload", - "overpaied", "overpaid", - "overpowed", "overpowered", - "overriden", "overridden", - "overwhlem", "overwhelm", - "overwirte", "overwrite", - "overwtach", "overwatch", - "overyhped", "overhyped", - "owernship", "ownership", - "pacificts", "pacifist", - "packageid", "packaged", - "pactivity", "captivity", - "painkills", "painkillers", - "paitently", "patiently", - "paitience", "patience", - "pakistain", "pakistani", - "pakistian", "pakistani", - "pakistnai", "pakistani", - "paksitani", "pakistani", - "paladines", "paladins", - "paladinos", "paladins", - "palestein", "palestine", - "palestina", "palestinian", - "palistian", "palestinian", - "paltforms", "platforms", - "palystyle", "playstyle", - "pancakers", "pancakes", - "pantomine", "pantomime", - "paradimes", "paradise", - "paragraps", "paragraphs", - "paragrpah", "paragraph", - "paralells", "parallels", - "paralelly", "parallelly", - "paralisys", "paralysis", - "parallely", "parallelly", - "paralzyed", "paralyzed", - "paramedis", "paramedics", - "paramters", "parameters", - "paranoica", "paranoia", - "paranoida", "paranoia", - "parasties", "parasites", - "paraylsis", "paralysis", - "paraylzed", "paralyzed", - "parellels", "parallels", - "paricular", "particular", - "parisitic", "parasitic", - "paritally", "partially", - "parliment", "parliament", - "parmesaen", "parmesan", - "parntered", "partnered", - "parrallel", "parallel", - "partchett", "pratchett", - "parterned", "partnered", - "participe", "participate", - "partiotic", "patriotic", - "partisain", "partisan", - "pasengers", "passengers", - "passagens", "passages", - "passagers", "passages", - "passerbys", "passersby", - "passiones", "passions", - "passivley", "passively", - "passowrds", "passwords", - "pateintly", "patiently", - "paticular", "particular", - "patinetly", "patiently", - "patriarca", "patriarchal", - "patriarcy", "patriarchy", - "patriotas", "patriots", - "patriotes", "patriots", - "patroitic", "patriotic", - "pattented", "patented", - "pavillion", "pavilion", - "pbulisher", "publisher", - "peacefuly", "peacefully", - "pedohpile", "pedophile", - "pedophila", "pedophilia", - "pedophils", "pedophiles", - "peircings", "piercings", - "penalites", "penalties", - "penatlies", "penalties", - "penduluum", "pendulum", - "penerator", "penetrator", - "penguines", "penguins", - "penguings", "penguins", - "penguinos", "penguins", - "peninsual", "peninsula", - "peninusla", "peninsula", - "penisnula", "peninsula", - "penisular", "peninsular", - "pennisula", "peninsula", - "pensinula", "peninsula", - "pentagoon", "pentagon", - "peodphile", "pedophile", - "pepperino", "pepperoni", - "peppermit", "peppermint", - "percepted", "perceived", - "percevied", "perceived", - "percieved", "perceived", - "percisely", "precisely", - "percision", "precision", - "percursor", "precursor", - "perdators", "predators", - "peremiter", "perimeter", - "perfeclty", "perfectly", - "perfomers", "performers", - "performas", "performs", - "perfromer", "performer", - "pericings", "piercings", - "perimetre", "perimeter", - "peristent", "persistent", - "periwinke", "periwinkle", - "permanant", "permanent", - "permature", "premature", - "permenant", "permanent", - "permieter", "perimeter", - "permissie", "permissible", - "permissin", "permissions", - "permisson", "permission", - "pernament", "permanent", - "perorders", "preorders", - "perpetrar", "perpetrator", - "perpetuae", "perpetuate", - "perpetuas", "perpetuates", - "persauded", "persuaded", - "perscribe", "prescribe", - "perserved", "preserved", - "persistes", "persists", - "personaes", "personas", - "personaly", "personally", - "personell", "personnel", - "personhod", "personhood", - "persuated", "persuade", - "pertended", "pretended", - "pertoleum", "petroleum", - "perusaded", "persuaded", - "pervertes", "perverse", - "pesticids", "pesticides", - "petroluem", "petroleum", - "phemonena", "phenomena", - "phenemona", "phenomena", - "phenonema", "phenomena", - "phillipse", "phillies", - "philosopy", "philosophy", - "philosphy", "philosophy", - "phonecian", "phoenecian", - "phonemena", "phenomena", - "phongraph", "phonograph", - "photograh", "photograph", - "phsyician", "physician", - "phsyicist", "physicist", - "phycisian", "physician", - "phycisist", "physicist", - "physicaly", "physically", - "physicits", "physicist", - "physisict", "physicist", - "piblisher", "publisher", - "picthfork", "pitchfork", - "pinetrest", "pinterest", - "placeheld", "placeholder", - "placemens", "placements", - "plaestine", "palestine", - "plagarism", "plagiarism", - "planatery", "planetary", - "planation", "plantation", - "planteary", "planetary", - "plasticas", "plastics", - "plasticos", "plastics", - "plasticus", "plastics", - "platfroms", "platforms", - "platofrms", "platforms", - "plausable", "plausible", - "plausbile", "plausible", - "plausibel", "plausible", - "playgroud", "playground", - "playright", "playwright", - "playstlye", "playstyle", - "playwrite", "playwright", - "plebicite", "plebiscite", - "plethoria", "plethora", - "ploarized", "polarized", - "pointeres", "pointers", - "polinator", "pollinator", - "polishees", "polishes", - "politelly", "politely", - "politican", "politician", - "politicas", "politics", - "politicin", "politician", - "politicus", "politics", - "polygammy", "polygamy", - "populatin", "populations", - "poralized", "polarized", - "porcelian", "porcelain", - "porcelina", "porcelain", - "poreclain", "porcelain", - "porftolio", "portfolio", - "porgramme", "programme", - "portfoilo", "portfolio", - "portoflio", "portfolio", - "portraing", "portraying", - "portrayes", "portrays", - "portrayls", "portrays", - "portriats", "portraits", - "portugese", "portuguese", - "portugues", "portuguese", - "posessing", "possessing", - "posession", "possession", - "positiond", "positioned", - "positiong", "positioning", - "positionl", "positional", - "positiviy", "positivity", - "possesess", "possesses", - "possesing", "possessing", - "possesion", "possession", - "possessin", "possessions", - "possibile", "possible", - "possibily", "possibility", - "possibley", "possibly", - "possiblly", "possibly", - "possition", "position", - "powderade", "powdered", - "powerfull", "powerful", - "pracitcal", "practical", - "practhett", "pratchett", - "practicly", "practically", - "practives", "practise", - "pragamtic", "pragmatic", - "preadtors", "predators", - "precedeed", "preceded", - "preceeded", "preceded", - "preceived", "perceived", - "preciesly", "precisely", - "precisley", "precisely", - "precurors", "precursor", - "precurosr", "precursor", - "precurser", "precursor", - "predatobr", "predator", - "predictie", "predictive", - "predictin", "prediction", - "predjuice", "prejudice", - "predujice", "prejudice", - "prefectly", "perfectly", - "preferens", "preferences", - "prefering", "preferring", - "preformer", "performer", - "pregnance", "pregnancies", - "preimeter", "perimeter", - "prejiduce", "prejudice", - "prejucide", "prejudice", - "premanent", "permanent", - "premeired", "premiered", - "preorderd", "preordered", - "preparato", "preparation", - "prepatory", "preparatory", - "presentas", "presents", - "presentes", "presents", - "presicely", "precisely", - "presicion", "precision", - "presideny", "presidency", - "prestigiu", "prestigious", - "prestigue", "prestige", - "presuaded", "persuaded", - "pretendas", "pretends", - "pretensje", "pretense", - "pretinent", "pertinent", - "prevelant", "prevalent", - "preventin", "prevention", - "previvous", "previous", - "priesthod", "priesthood", - "priestood", "priesthood", - "primaires", "primaries", - "primairly", "primarily", - "primarliy", "primarily", - "primative", "primitive", - "primordal", "primordial", - "princesas", "princess", - "princeses", "princess", - "princesss", "princesses", - "principas", "principals", - "principly", "principally", - "prinicple", "principle", - "prioritie", "prioritize", - "prioritse", "priorities", - "privalege", "privilege", - "privelege", "privilege", - "privelige", "privilege", - "privilage", "privilege", - "privilegs", "privileges", - "privledge", "privilege", - "probabily", "probability", - "probablly", "probably", - "problemas", "problems", - "procative", "proactive", - "procedger", "procedure", - "proceding", "proceeding", - "proceedes", "proceeds", - "procelain", "porcelain", - "procesess", "processes", - "processer", "processor", - "processos", "processors", - "proclamed", "proclaimed", - "procotols", "protocols", - "prodecure", "procedure", - "productie", "productive", - "productin", "productions", - "productos", "products", - "profesion", "profusion", - "professer", "professor", - "professin", "professions", - "proffesed", "professed", - "proffesor", "professor", - "progessed", "progressed", - "programas", "programs", - "programem", "programme", - "programes", "programs", - "programms", "programs", - "progresso", "progression", - "progresss", "progresses", - "projectie", "projectile", - "projectin", "projection", - "prominant", "prominent", - "promiscus", "promiscuous", - "promotted", "promoted", - "pronomial", "pronominal", - "pronouced", "pronounced", - "pronounds", "pronouns", - "pronounes", "pronouns", - "propagana", "propaganda", - "properies", "properties", - "propertly", "property", - "propeties", "properties", - "prophesie", "prophecies", - "prophetes", "prophets", - "propogate", "propagate", - "proposels", "proposes", - "proposito", "proposition", - "propperly", "properly", - "propsects", "prospects", - "prosperos", "prosperous", - "prostitue", "prostitute", - "protectes", "protects", - "protectie", "protective", - "protectos", "protectors", - "proteinas", "proteins", - "proteines", "proteins", - "protestas", "protests", - "protestat", "protestant", - "protestes", "protests", - "protestos", "protests", - "protfolio", "portfolio", - "protocool", "protocol", - "prototpye", "prototype", - "prototyps", "prototypes", - "protraits", "portraits", - "protrayal", "portrayal", - "protrayed", "portrayed", - "provicial", "provincial", - "provincie", "province", - "provisios", "provisions", - "pruchased", "purchased", - "pruchases", "purchases", - "prugatory", "purgatory", - "pruposely", "purposely", - "pscyhotic", "psychotic", - "pseudonyn", "pseudonym", - "pshycosis", "psychosis", - "pshycotic", "psychotic", - "psycology", "psychology", - "psycothic", "psychotic", - "ptichfork", "pitchfork", - "pubilsher", "publisher", - "publiaher", "publisher", - "publicaly", "publicly", - "publicani", "publication", - "publicher", "publisher", - "publiclly", "publicly", - "publihser", "publisher", - "publisehr", "publisher", - "publisger", "publisher", - "publishor", "publisher", - "publishre", "publisher", - "publsiher", "publisher", - "publusher", "publisher", - "puchasing", "purchasing", - "punishmet", "punishments", - "puplisher", "publisher", - "puragtory", "purgatory", - "purcahsed", "purchased", - "purcahses", "purchases", - "purhcased", "purchased", - "purposley", "purposely", - "pursuaded", "persuaded", - "pursuades", "persuades", - "pyramidas", "pyramids", - "pyramides", "pyramids", - "pyschosis", "psychosis", - "pyschotic", "psychotic", - "qaulifies", "qualifies", - "quantifiy", "quantify", - "quantitiy", "quantity", - "quantitty", "quantity", - "quartlery", "quarterly", - "queations", "equations", - "queenland", "queensland", - "questiond", "questioned", - "questiong", "questioning", - "questionn", "questioning", - "radicalis", "radicals", - "rapsberry", "raspberry", - "rasbperry", "raspberry", - "rationaly", "rationally", - "reactiony", "reactionary", - "realisitc", "realistic", - "realoding", "reloading", - "realsitic", "realistic", - "realtable", "relatable", - "realtions", "relations", - "realtives", "relatives", - "reamining", "remaining", - "reaplying", "replaying", - "reasearch", "research", - "reaveling", "revealing", - "rebellios", "rebellious", - "rebllions", "rebellions", - "recations", "creations", - "reccomend", "recommend", - "reccuring", "recurring", - "receeding", "receding", - "recepient", "recipient", - "recgonise", "recognise", - "recgonize", "recognize", - "recidents", "residents", - "recievers", "receivers", - "recieving", "receiving", - "recipiant", "recipient", - "reciproce", "reciprocate", - "reclutant", "reluctant", - "recoginse", "recognise", - "recoginze", "recognize", - "recomends", "recommends", - "recommens", "recommends", - "reconenct", "reconnect", - "recongise", "recognise", - "recongize", "recognize", - "reconicle", "reconcile", - "reconized", "recognized", - "recordare", "recorder", - "recoveres", "recovers", - "recoverys", "recovers", - "recpetive", "receptive", - "recpetors", "receptors", - "recquired", "required", - "recreatie", "recreate", - "recruitcs", "recruits", - "recruites", "recruits", - "recrusion", "recursion", - "recrutied", "recruited", - "recrutier", "recruiter", - "rectangel", "rectangle", - "rectanlge", "rectangle", - "recuiting", "recruiting", - "recurison", "recursion", - "recurited", "recruited", - "recuriter", "recruiter", - "recusrion", "recursion", - "redeemeed", "redeemed", - "redundany", "redundancy", - "redundent", "redundant", - "reedeming", "redeeming", - "refelcted", "reflected", - "refereces", "references", - "refereees", "referees", - "refereers", "referees", - "referemce", "reference", - "referencs", "references", - "referense", "references", - "referiang", "referring", - "referinng", "refering", - "refernces", "references", - "refernece", "reference", - "refershed", "refreshed", - "refersher", "refresher", - "reffering", "referring", - "reflectie", "reflective", - "refrehser", "refresher", - "refrences", "references", - "refromist", "reformist", - "regionaal", "regional", - "registerd", "registered", - "registery", "registry", - "regualrly", "regularly", - "regualtor", "regulator", - "regulaion", "regulation", - "regulalry", "regularly", - "regulares", "regulars", - "regularis", "regulars", - "regulatin", "regulations", - "regurally", "regularly", - "reigining", "reigning", - "reinstale", "reinstalled", - "reisntall", "reinstall", - "reknowned", "renowned", - "relaoding", "reloading", - "relatiate", "retaliate", - "relativiy", "relativity", - "relativly", "relatively", - "relativno", "relation", - "relavence", "relevance", - "relcutant", "reluctant", - "relevence", "relevance", - "relfected", "reflected", - "reliabily", "reliability", - "reliabley", "reliably", - "religeous", "religious", - "remasterd", "remastered", - "rememberd", "remembered", - "rememebrs", "remembers", - "remianing", "remaining", - "remignton", "remington", - "remingotn", "remington", - "remmebers", "remembers", - "remotelly", "remotely", - "rendevous", "rendezvous", - "rendezous", "rendezvous", - "renedered", "rende", - "renegated", "renegade", - "rennovate", "renovate", - "repalying", "replaying", - "repblican", "republican", - "repeatedy", "repeatedly", - "repective", "receptive", - "repeition", "repetition", - "repentent", "repentant", - "rephrasse", "rephrase", - "replusive", "repulsive", - "reportedy", "reportedly", - "represend", "represented", - "repressin", "repression", - "reprtoire", "repertoire", - "repsonded", "responded", - "reptition", "repetition", - "reptuable", "reputable", - "repubican", "republican", - "republian", "republican", - "repulican", "republican", - "repulisve", "repulsive", - "repuslive", "repulsive", - "resaurant", "restaurant", - "researchs", "researchers", - "resembels", "resembles", - "reserverd", "reserved", - "resintall", "reinstall", - "resistane", "resistances", - "resistans", "resistances", - "resistend", "resisted", - "resistent", "resistant", - "resmebles", "resembles", - "resolutin", "resolutions", - "resoruces", "resources", - "respectes", "respects", - "respectos", "respects", - "responces", "response", - "respondas", "responds", - "respondis", "responds", - "respondus", "responds", - "respoting", "reposting", - "ressemble", "resemble", - "ressurect", "resurrect", - "restarant", "restaurant", - "resticted", "restricted", - "restircts", "restricts", - "restorani", "restoration", - "restraind", "restrained", - "restraing", "restraining", - "restraunt", "restraint", - "restriant", "restraint", - "restricte", "restrictive", - "resturant", "restaurant", - "retailate", "retaliate", - "retalaite", "retaliate", - "retaliers", "retailers", - "reteriver", "retriever", - "retirever", "retriever", - "retrevier", "retriever", - "reuptable", "reputable", - "reveiwers", "reviewers", - "revelaing", "revealing", - "revelance", "relevance", - "revolutin", "revolutions", - "rewachted", "rewatched", - "rewatchig", "rewatching", - "rferences", "references", - "ridiculos", "ridiculous", - "ridiculue", "ridicule", - "ridiculus", "ridiculous", - "righetous", "righteous", - "rightfuly", "rightfully", - "rightoues", "righteous", - "rigourous", "rigorous", - "rigtheous", "righteous", - "rilvaries", "rivalries", - "rininging", "ringing", - "rivarlies", "rivalries", - "rivlaries", "rivalries", - "roaylties", "royalties", - "roboticus", "robotics", - "roganisms", "organisms", - "royalites", "royalties", - "roylaties", "royalties", - "ruleboook", "rulebook", - "sacarstic", "sarcastic", - "sacntuary", "sanctuary", - "sacrafice", "sacrifice", - "sacrastic", "sarcastic", - "sacrifise", "sacrifices", - "salughter", "slaughter", - "samckdown", "smackdown", - "sanctiond", "sanctioned", - "sancturay", "sanctuary", - "sancutary", "sanctuary", - "sandstrom", "sandstorm", - "sandwhich", "sandwich", - "sanhedrim", "sanhedrin", - "santcuary", "sanctuary", - "santioned", "sanctioned", - "sapphirre", "sapphire", - "sastified", "satisfied", - "sastifies", "satisfies", - "satelites", "satellites", - "saterdays", "saturdays", - "satisifed", "satisfied", - "satisifes", "satisfies", - "satrudays", "saturdays", - "satsified", "satisfied", - "satsifies", "satisfies", - "sattelite", "satellite", - "saxaphone", "saxophone", - "scaepgoat", "scapegoat", - "scaleable", "scalable", - "scandales", "scandals", - "scandalos", "scandals", - "scantuary", "sanctuary", - "scaricity", "scarcity", - "scarifice", "sacrifice", - "scarmbled", "scrambled", - "scartched", "scratched", - "scartches", "scratches", - "scavanged", "scavenged", - "sceintist", "scientist", - "scholalry", "scholarly", - "sciencers", "sciences", - "scientfic", "scientific", - "scientifc", "scientific", - "scientits", "scientist", - "sclupture", "sculpture", - "scnearios", "scenarios", - "scoreboad", "scoreboard", - "scottisch", "scottish", - "scracthed", "scratched", - "scracthes", "scratches", - "scrambeld", "scrambled", - "scrathces", "scratches", - "scrollade", "scrolled", - "scrutiney", "scrutiny", - "scrutinty", "scrutiny", - "sculpteur", "sculpture", - "sculputre", "sculpture", - "scultpure", "sculpture", - "scuplture", "sculpture", - "scuptures", "sculptures", - "seamlessy", "seamlessly", - "searchign", "searching", - "sebasitan", "sebastian", - "sebastain", "sebastian", - "sebsatian", "sebastian", - "secceeded", "seceded", - "secertary", "secretary", - "secratary", "secretary", - "secratery", "secretary", - "secretery", "secretary", - "secretley", "secretly", - "sednetary", "sedentary", - "seduciton", "seduction", - "semanitcs", "semantics", - "semestres", "semesters", - "semnatics", "semantics", - "semseters", "semesters", - "senatores", "senators", - "sendetary", "sedentary", - "sensitivy", "sensitivity", - "sentimant", "sentimental", - "sepcially", "specially", - "seperated", "separated", - "seperates", "separates", - "seperator", "separator", - "septmeber", "september", - "seraching", "searching", - "seriosuly", "seriously", - "serioulsy", "seriously", - "seriuosly", "seriously", - "servantes", "servants", - "settlment", "settlement", - "sexualizd", "sexualized", - "sexuallly", "sexually", - "shadasloo", "shadaloo", - "shaprness", "sharpness", - "sharpenss", "sharpness", - "shawhsank", "shawshank", - "sheilding", "shielding", - "shephered", "shepherd", - "shileding", "shielding", - "shitstrom", "shitstorm", - "shletered", "sheltered", - "shoudlers", "shoulders", - "shouldnot", "shouldnt", - "shperical", "spherical", - "shwashank", "shawshank", - "sidebaord", "sideboard", - "siganture", "signature", - "signapore", "singapore", - "signitory", "signatory", - "silhouete", "silhouette", - "similiair", "similiar", - "simliarly", "similarly", - "simluated", "simulated", - "simluator", "simulator", - "simplifiy", "simplify", - "simualted", "simulated", - "simualtor", "simulator", - "simulatie", "simulate", - "simulatin", "simulation", - "sinagpore", "singapore", - "sincerley", "sincerely", - "singature", "signature", - "singpaore", "singapore", - "singulair", "singular", - "singulary", "singularity", - "skateboad", "skateboard", - "skeletaal", "skeletal", - "skepitcal", "skeptical", - "skepticim", "skepticism", - "sketpical", "skeptical", - "slaughted", "slaughtered", - "slaugther", "slaughter", - "slipperly", "slippery", - "sluaghter", "slaughter", - "smackdwon", "smackdown", - "smealting", "smelting", - "smeesters", "semesters", - "snadstorm", "sandstorm", - "snippetts", "snippets", - "snowfalke", "snowflake", - "snowflaek", "snowflake", - "snowlfake", "snowflake", - "snwoballs", "snowballs", - "snythesis", "synthesis", - "snythetic", "synthetic", - "socailism", "socialism", - "socailist", "socialist", - "socailize", "socialize", - "soceities", "societies", - "socialini", "socializing", - "socialiss", "socialists", - "socialsim", "socialism", - "socieites", "societies", - "socilaism", "socialism", - "socilaist", "socialist", - "sociopati", "sociopathic", - "sociopats", "sociopaths", - "socratees", "socrates", - "socrateks", "socrates", - "soemthing", "something", - "sohpomore", "sophomore", - "soliliquy", "soliloquy", - "somehting", "something", - "someoneis", "someones", - "somethign", "something", - "somethins", "somethings", - "sopohmore", "sophomore", - "sotryline", "storyline", - "soundtrak", "soundtrack", - "sountrack", "soundtrack", - "sourthern", "southern", - "souvenier", "souvenir", - "soveregin", "sovereign", - "sovereing", "sovereign", - "soveriegn", "sovereign", - "spacegoat", "scapegoat", - "spagehtti", "spaghetti", - "spahgetti", "spaghetti", - "sparlking", "sparkling", - "spartants", "spartans", - "specailly", "specially", - "specailty", "specialty", - "specality", "specialty", - "speciales", "specials", - "specialis", "specials", - "speciatly", "specialty", - "specifing", "specifying", - "specimine", "specimen", - "spectrail", "spectral", - "specualte", "speculate", - "speechers", "speeches", - "spehrical", "spherical", - "speically", "specially", - "spetember", "september", - "sphagetti", "spaghetti", - "splatooon", "splatoon", - "sponosred", "sponsored", - "sponsered", "sponsored", - "sponsores", "sponsors", - "spontanes", "spontaneous", - "sponzored", "sponsored", - "sprakling", "sparkling", - "sprinkeld", "sprinkled", - "squadroon", "squadron", - "squirrles", "squirrels", - "squirrtle", "squirrel", - "squrriels", "squirrels", - "srirachia", "sriracha", - "srirachra", "sriracha", - "stabliize", "stabilize", - "stainlees", "stainless", - "startegic", "strategic", - "startlxde", "startled", - "statisitc", "statistic", - "staurdays", "saturdays", - "steadilly", "steadily", - "stealthly", "stealthy", - "stichting", "stitching", - "sticthing", "stitching", - "stimulans", "stimulants", - "stockplie", "stockpile", - "stornegst", "strongest", - "stragetic", "strategic", - "straightn", "straighten", - "strangets", "strangest", - "strategis", "strategies", - "strawbery", "strawberry", - "streamade", "streamed", - "streamare", "streamer", - "streamear", "streamer", - "strechted", "stretched", - "strechtes", "stretches", - "strecthed", "stretched", - "strecthes", "stretches", - "stregnths", "strengths", - "strenghen", "strengthen", - "strengthn", "strengthen", - "strentghs", "strengths", - "stressade", "stressed", - "stressers", "stresses", - "strictist", "strictest", - "stringnet", "stringent", - "stroyline", "storyline", - "structual", "structural", - "structurs", "structures", - "strucutre", "structure", - "struggeld", "struggled", - "struggels", "struggles", - "stryofoam", "styrofoam", - "stuctured", "structured", - "stuggling", "struggling", - "stupitidy", "stupidity", - "sturcture", "structure", - "sturggled", "struggled", - "sturggles", "struggles", - "styrofaom", "styrofoam", - "subarmine", "submarine", - "subculter", "subculture", - "submachne", "submachine", - "subpecies", "subspecies", - "subscirbe", "subscribe", - "subsidary", "subsidiary", - "subsizide", "subsidize", - "subsquent", "subsequent", - "subsrcibe", "subscribe", - "substanse", "substances", - "substanta", "substantial", - "substante", "substantive", - "substarte", "substrate", - "substitue", "substitute", - "substract", "subtract", - "subtances", "substances", - "subtiltes", "subtitles", - "subtitels", "subtitles", - "subtletly", "subtlety", - "subtlties", "subtitles", - "succedded", "succeeded", - "succeedes", "succeeds", - "succesful", "successful", - "succesion", "succession", - "succesive", "successive", - "suceeding", "succeeding", - "sucesfuly", "successfully", - "sucessful", "successful", - "sucession", "succession", - "sucessive", "successive", - "sufferage", "suffrage", - "sufferred", "suffered", - "sufficent", "sufficient", - "suggestes", "suggests", - "suggestie", "suggestive", - "sumbarine", "submarine", - "sumberged", "submerged", - "summenors", "summoners", - "summoenrs", "summoners", - "sunderlad", "sunderland", - "sunglases", "sunglasses", - "superfluu", "superfluous", - "superiour", "superior", - "superisor", "superiors", - "supermare", "supermarket", - "superviso", "supervision", - "suposedly", "supposedly", - "supportes", "supports", - "suppreses", "suppress", - "supressed", "suppressed", - "supresses", "suppresses", - "suprising", "surprising", - "suprizing", "surprising", - "supsicion", "suspicion", - "surounded", "surrounded", - "surprized", "surprised", - "surronded", "surrounded", - "surrouded", "surrounded", - "surrouned", "surround", - "survivers", "survivors", - "survivied", "survived", - "survivour", "survivor", - "susbcribe", "subscribe", - "susbtrate", "substrate", - "susncreen", "sunscreen", - "suspectes", "suspects", - "suspendes", "suspense", - "suspensie", "suspense", - "swastikka", "swastika", - "sweatshit", "sweatshirt", - "sweetheat", "sweetheart", - "switchign", "switching", - "swithcing", "switching", - "swtiching", "switching", - "sydnicate", "syndicate", - "sykwalker", "skywalker", - "sylablles", "syllables", - "syllabels", "syllables", - "symbolsim", "symbolism", - "symettric", "symmetric", - "symmetral", "symmetric", - "symmetria", "symmetrical", - "symoblism", "symbolism", - "sympathie", "sympathize", - "symphoney", "symphony", - "symptomes", "symptoms", - "symptomps", "symptoms", - "synagouge", "synagogue", - "syndacite", "syndicate", - "syndiacte", "syndicate", - "synidcate", "syndicate", - "synonymes", "synonyms", - "synonymis", "synonyms", - "synonymns", "synonyms", - "synonymos", "synonymous", - "synonymus", "synonyms", - "synopsies", "synopsis", - "syntehsis", "synthesis", - "syntehtic", "synthetic", - "syntethic", "synthetic", - "syphyllis", "syphilis", - "syracusae", "syracuse", - "sytrofoam", "styrofoam", - "tablespon", "tablespoon", - "tacticaly", "tactically", - "tanenhill", "tannehill", - "tannheill", "tannehill", - "targetted", "targeted", - "tawainese", "taiwanese", - "tawianese", "taiwanese", - "taxanomic", "taxonomic", - "teamfighs", "teamfights", - "teamfigth", "teamfight", - "teamifght", "teamfight", - "teampseak", "teamspeak", - "teaspooon", "teaspoon", - "techician", "technician", - "techinque", "technique", - "technolgy", "technology", - "teeangers", "teenagers", - "tehtering", "tethering", - "telegrpah", "telegraph", - "televsion", "television", - "temafight", "teamfight", - "tempaltes", "templates", - "temparate", "temperate", - "templaras", "templars", - "templares", "templars", - "temporali", "temporarily", - "tenacitiy", "tenacity", - "tensiones", "tensions", - "tentacels", "tentacles", - "tentacuel", "tentacle", - "tentalces", "tentacles", - "termianls", "terminals", - "terminato", "termination", - "terorrism", "terrorism", - "terorrist", "terrorist", - "terrabyte", "terabyte", - "terribley", "terribly", - "terriblly", "terribly", - "terriroty", "territory", - "terrorits", "terrorist", - "terrorsim", "terrorism", - "tesitcles", "testicles", - "tesitmony", "testimony", - "testicels", "testicles", - "testomony", "testimony", - "texturers", "textures", - "thankfuly", "thankfully", - "thankyoou", "thankyou", - "themselfs", "themselves", - "themselvs", "themselves", - "themslves", "themselves", - "theologia", "theological", - "therafter", "thereafter", - "therefoer", "therefor", - "therefour", "therefor", - "theroists", "theorists", - "thetering", "tethering", - "thirlling", "thrilling", - "thirteeen", "thirteen", - "thoecracy", "theocracy", - "thoerists", "theorists", - "thoroughy", "thoroughly", - "thoughout", "throughout", - "threatend", "threatened", - "throrough", "thorough", - "throughly", "thoroughly", - "througout", "throughout", - "thrusdays", "thursdays", - "thurdsays", "thursdays", - "thursdsay", "thursdays", - "thursters", "thrusters", - "tiawanese", "taiwanese", - "timestmap", "timestamp", - "tirangles", "triangles", - "tocuhdown", "touchdown", - "toghether", "together", - "tolerence", "tolerance", - "tommorrow", "tomorrow", - "torandoes", "tornadoes", - "torchligt", "torchlight", - "torelable", "tolerable", - "toritllas", "tortillas", - "tornaodes", "tornadoes", - "torpeados", "torpedoes", - "torrentas", "torrents", - "torrentes", "torrents", - "tortialls", "tortillas", - "tortillia", "tortilla", - "tortillla", "tortilla", - "tottehnam", "tottenham", - "tottenahm", "tottenham", - "tottneham", "tottenham", - "toturials", "tutorials", - "touchdwon", "touchdown", - "touristas", "tourists", - "touristes", "tourists", - "touristey", "touristy", - "touristly", "touristy", - "touristsy", "touristy", - "tournamet", "tournament", - "toxicitiy", "toxicity", - "trafficed", "trafficked", - "tragicaly", "tragically", - "traileras", "trailers", - "traingles", "triangles", - "trainwrek", "trainwreck", - "traitoris", "traitors", - "traitorus", "traitors", - "tramautic", "traumatic", - "tranlsate", "translate", - "transalte", "translate", - "transcris", "transcripts", - "transcrit", "transcript", - "transferd", "transferred", - "transfere", "transferred", - "transfors", "transforms", - "transfrom", "transform", - "transiten", "transient", - "transitin", "transitions", - "transofrm", "transform", - "transplat", "transplant", - "trasnfers", "transfers", - "trasnform", "transform", - "trasnport", "transport", - "traversie", "traverse", - "travestry", "travesty", - "treasuers", "treasures", - "treasurey", "treasury", - "treatmens", "treatments", - "treausres", "treasures", - "tremendos", "tremendous", - "trhilling", "thrilling", - "trhusters", "thrusters", - "triangels", "triangles", - "trianlges", "triangles", - "tribunaal", "tribunal", - "triguered", "triggered", - "trinagles", "triangles", - "truamatic", "traumatic", - "truthfuly", "truthfully", - "tunrtable", "turntable", - "turnaroud", "turnaround", - "turntabel", "turntable", - "typcially", "typically", - "tyrranies", "tyrannies", - "ubiquitos", "ubiquitous", - "ugprading", "upgrading", - "ukrainain", "ukrainian", - "ukrainias", "ukrainians", - "ukrainina", "ukrainian", - "ukrainisn", "ukrainians", - "ukrianian", "ukrainian", - "ulitmatum", "ultimatum", - "ulteriour", "ulterior", - "umbrellla", "umbrella", - "unaminous", "unanimous", - "unanmious", "unanimous", - "unanswerd", "unanswered", - "unanymous", "unanimous", - "unbannend", "unbanned", - "uncensord", "uncensored", - "uncomited", "uncommitted", - "undercunt", "undercut", - "underdong", "underdog", - "undergard", "undergrad", - "underming", "undermining", - "understad", "understands", - "underwaer", "underwear", - "underware", "underwear", - "undescore", "underscore", - "unforseen", "unforeseen", - "unfortune", "unfortunate", - "unfriendy", "unfriendly", - "unhealhty", "unhealthy", - "unheathly", "unhealthy", - "unhelathy", "unhealthy", - "unicornis", "unicorns", - "unicornus", "unicorns", - "uniformes", "uniforms", - "uninamous", "unanimous", - "unintuive", "unintuitive", - "uniquelly", "uniquely", - "unisntall", "uninstall", - "univerity", "university", - "universse", "universes", - "univesity", "university", - "unnistall", "uninstall", - "unoffical", "unofficial", - "unopenend", "unopened", - "unplayabe", "unplayable", - "unplesant", "unpleasant", - "unpopluar", "unpopular", - "unrankend", "unranked", - "unreliabe", "unreliable", - "unrwitten", "unwritten", - "untrianed", "untrained", - "unusaully", "unusually", - "unuseable", "unusable", - "unusuable", "unusable", - "unvierses", "universes", - "unweildly", "unwieldy", - "unwieldly", "unwieldy", - "unwirtten", "unwritten", - "unworthly", "unworthy", - "upcomming", "upcoming", - "upgarding", "upgrading", - "upgradded", "upgraded", - "uplfiting", "uplifting", - "uplifitng", "uplifting", - "urkainian", "ukrainian", - "utlimatum", "ultimatum", - "vacciante", "vaccinate", - "vaccinato", "vaccination", - "vacciners", "vaccines", - "vacestomy", "vasectomy", - "vaguaries", "vagaries", - "vaibility", "viability", - "vaildated", "validated", - "vairables", "variables", - "valdiated", "validated", - "valentein", "valentine", - "valentien", "valentine", - "valentins", "valentines", - "validitiy", "validity", - "valueable", "valuable", - "vanadlism", "vandalism", - "vandalsim", "vandalism", - "varaibles", "variables", - "varations", "variations", - "variantes", "variants", - "vascetomy", "vasectomy", - "vastecomy", "vasectomy", - "veganisim", "veganism", - "vegetarin", "vegetarians", - "vegitable", "vegetable", - "vehementy", "vehemently", - "veiwpoint", "viewpoint", - "velantine", "valentine", - "vendettta", "vendetta", - "venegance", "vengeance", - "veneuzela", "venezuela", - "venezeula", "venezuela", - "venezulea", "venezuela", - "vengaence", "vengeance", - "vengenace", "vengeance", - "ventilato", "ventilation", - "verbatium", "verbatim", - "verfiying", "verifying", - "verifiyng", "verifying", - "verisions", "revisions", - "versalite", "versatile", - "versatily", "versatility", - "versiones", "versions", - "versitale", "versatile", - "verstaile", "versatile", - "verticaly", "vertically", - "veryifing", "verifying", - "vicotrian", "victorian", - "vicotries", "victories", - "victoires", "victories", - "victorain", "victorian", - "victorina", "victorian", - "victorios", "victorious", - "videogaem", "videogame", - "videogams", "videogames", - "vidoegame", "videogame", - "viewpiont", "viewpoint", - "vigilence", "vigilance", - "vigliante", "vigilante", - "vigourous", "vigorous", - "viligante", "vigilante", - "viloently", "violently", - "vincinity", "vicinity", - "vioalting", "violating", - "violentce", "violence", - "virbation", "vibration", - "virgintiy", "virginity", - "virignity", "virginity", - "virutally", "virtually", - "visibiliy", "visibility", - "vitaminas", "vitamins", - "vitamines", "vitamins", - "vitrually", "virtually", - "vociemail", "voicemail", - "voilating", "violating", - "voilation", "violation", - "voilently", "violently", - "volatiliy", "volatility", - "voleyball", "volleyball", - "volontary", "voluntary", - "volonteer", "volunteer", - "volunatry", "voluntary", - "volunteed", "volunteered", - "vriginity", "virginity", - "wallpapes", "wallpapers", - "warrantly", "warranty", - "warrriors", "warriors", - "wavelengh", "wavelength", - "weakenend", "weakened", - "weakneses", "weakness", - "weaknesss", "weaknesses", - "wealtheir", "wealthier", - "weaponary", "weaponry", - "wedensday", "wednesday", - "wednesdsy", "wednesdays", - "wednessay", "wednesdays", - "wednseday", "wednesday", - "welathier", "wealthier", - "wendesday", "wednesday", - "wesbtrook", "westbrook", - "westernes", "westerners", - "westrbook", "westbrook", - "whereever", "wherever", - "whietlist", "whitelist", - "whilrwind", "whirlwind", - "whilsting", "whistling", - "whipsered", "whispered", - "whislting", "whistling", - "whisperes", "whispers", - "whitelsit", "whitelist", - "whitleist", "whitelist", - "whitsling", "whistling", - "whrilwind", "whirlwind", - "whsipered", "whispered", - "whtielist", "whitelist", - "widespred", "widespread", - "widesread", "widespread", - "windshied", "windshield", - "wintesses", "witnesses", - "wisconisn", "wisconsin", - "wishlisht", "wishlist", - "wishpered", "whispered", - "withdrawl", "withdrawal", - "withelist", "whitelist", - "witnesess", "witnesses", - "wolrdview", "worldview", - "wolrdwide", "worldwide", - "wonderlad", "wonderland", - "wordlview", "worldview", - "wordlwide", "worldwide", - "worhtless", "worthless", - "workfroce", "workforce", - "worldivew", "worldview", - "worldveiw", "worldview", - "worstened", "worsened", - "worthelss", "worthless", - "xenbolade", "xenoblade", - "xenobalde", "xenoblade", - "xenophoby", "xenophobia", - "xeonblade", "xenoblade", - "yementite", "yemenite", - "yorkshrie", "yorkshire", - "yorskhire", "yorkshire", - "yosemitie", "yosemite", - "youngents", "youngest", - "yourselvs", "yourselves", - "zimbabwae", "zimbabwe", - "zionistas", "zionists", - "zionistes", "zionists", - "abandond", "abandoned", - "abdomine", "abdomen", - "abilitiy", "ability", - "abilties", "abilities", - "abondons", "abandons", - "aboslute", "absolute", - "abosrbed", "absorbed", - "abruplty", "abruptly", - "abrutply", "abruptly", - "abscence", "absence", - "absestos", "asbestos", - "absoluts", "absolutes", - "absolvte", "absolve", - "absorbes", "absorbs", - "absoulte", "absolute", - "abstante", "bastante", - "abudance", "abundance", - "abudcted", "abducted", - "abundunt", "abundant", - "aburptly", "abruptly", - "abuseres", "abusers", - "abusrdly", "absurdly", - "academis", "academics", - "accademy", "academy", - "acccused", "accused", - "acceptes", "accepts", - "accidens", "accidents", - "accideny", "accidently", - "accoring", "according", - "accountt", "accountant", - "accpeted", "accepted", - "accuarcy", "accuracy", - "accumule", "accumulate", - "accusato", "accusation", - "accussed", "accused", - "acedamia", "academia", - "acedemic", "academic", - "acheived", "achieved", - "acheives", "achieves", - "achieval", "achievable", - "acnedote", "anecdote", - "acording", "according", - "acornyms", "acronyms", - "acousitc", "acoustic", - "acoutsic", "acoustic", - "acovados", "avocados", - "acquifer", "acquire", - "acquited", "acquitted", - "acquried", "acquired", - "acronmys", "acronyms", - "acronysm", "acronyms", - "acroynms", "acronyms", - "acrynoms", "acronyms", - "acsended", "ascended", - "actaully", "actually", - "activite", "activities", - "activits", "activities", - "activley", "actively", - "actresss", "actresses", - "actualey", "actualy", - "actualiy", "actuality", - "actualky", "actualy", - "actualmy", "actualy", - "actualoy", "actualy", - "actualpy", "actualy", - "actualty", "actualy", - "acutally", "actually", - "acutions", "auctions", - "adaptare", "adapter", - "adbandon", "abandon", - "adbucted", "abducted", - "addictes", "addicts", - "addictin", "addictions", - "addictis", "addictions", - "addional", "additional", - "addopted", "adopted", - "addresed", "addressed", - "adealide", "adelaide", - "adecuate", "adequate", - "adeilade", "adelaide", - "adeladie", "adelaide", - "adeliade", "adelaide", - "adeqaute", "adequate", - "adheisve", "adhesive", - "adhevise", "adhesive", - "adivsors", "advisors", - "admiraal", "admiral", - "adolence", "adolescent", - "adorbale", "adorable", - "adovcacy", "advocacy", - "adpaters", "adapters", - "adquired", "acquired", - "adquires", "acquires", - "adresing", "addressing", - "adressed", "addressed", - "adroable", "adorable", - "adultrey", "adultery", - "adventue", "adventures", - "adventus", "adventures", - "advertis", "adverts", - "advesary", "adversary", - "adviseer", "adviser", - "adviseur", "adviser", - "advocade", "advocated", - "advocats", "advocates", - "advsiors", "advisors", - "aethists", "atheists", - "affaires", "affairs", - "affilate", "affiliate", - "affintiy", "affinity", - "affleunt", "affluent", - "affulent", "affluent", - "afircans", "africans", - "africain", "african", - "afternon", "afternoon", - "againnst", "against", - "agnositc", "agnostic", - "agonstic", "agnostic", - "agravate", "aggravate", - "agreemnt", "agreement", - "agregate", "aggregate", - "agressie", "agressive", - "agressor", "aggressor", - "agrieved", "aggrieved", - "agruable", "arguable", - "agruably", "arguably", - "agrument", "argument", - "ahtletes", "athletes", - "aincents", "ancients", - "airboner", "airborne", - "airbrone", "airborne", - "aircarft", "aircraft", - "airplans", "airplanes", - "airporta", "airports", - "airpsace", "airspace", - "airscape", "airspace", - "akransas", "arkansas", - "alchemey", "alchemy", - "alchohol", "alcohol", - "alcholic", "alcoholic", - "alcoholc", "alcoholics", - "aldutery", "adultery", - "aleniate", "alienate", - "algoritm", "algorithm", - "alimoney", "alimony", - "alirghty", "alrighty", - "allaince", "alliance", - "alledged", "alleged", - "alledges", "alleges", - "allegedy", "allegedly", - "allegely", "allegedly", - "allegric", "allergic", - "allergey", "allergy", - "allianse", "alliances", - "alligned", "aligned", - "allinace", "alliance", - "allopone", "allophone", - "allready", "already", - "almigthy", "almighty", - "alpahbet", "alphabet", - "alrigthy", "alrighty", - "altantic", "atlantic", - "alterato", "alteration", - "alternar", "alternator", - "althetes", "athletes", - "althetic", "athletic", - "altriusm", "altruism", - "altrusim", "altruism", - "alturism", "altruism", - "aluminim", "aluminium", - "alumnium", "aluminum", - "alunimum", "aluminum", - "amatersu", "amateurs", - "amaterus", "amateurs", - "amendmet", "amendments", - "amercian", "american", - "amercias", "americas", - "amernian", "armenian", - "amethsyt", "amethyst", - "ameythst", "amethyst", - "ammended", "amended", - "amnestry", "amnesty", - "amoungst", "amongst", - "amplifiy", "amplify", - "amplifly", "amplify", - "amrchair", "armchair", - "amrenian", "armenian", - "amtheyst", "amethyst", - "analgoue", "analogue", - "analisys", "analysis", - "analitic", "analytic", - "analouge", "analogue", - "analysie", "analyse", - "analysit", "analyst", - "analyste", "analyse", - "analysze", "analyse", - "analzyed", "analyzed", - "anaolgue", "analogue", - "anarchim", "anarchism", - "anaylses", "analyses", - "anaylsis", "analysis", - "anaylsts", "analysts", - "anaylzed", "analyzed", - "ancedote", "anecdote", - "anceints", "ancients", - "ancinets", "ancients", - "andoirds", "androids", - "andorids", "androids", - "andriods", "androids", - "anecdots", "anecdotes", - "anectode", "anecdote", - "anedocte", "anecdote", - "aneroxia", "anorexia", - "aneroxic", "anorexic", - "angostic", "agnostic", - "angrilly", "angrily", - "anicents", "ancients", - "animatie", "animate", - "animatte", "animate", - "anlayses", "analyses", - "annoints", "anoints", - "annouced", "announced", - "annoucne", "announce", - "anntenas", "antennas", - "anoerxia", "anorexia", - "anoerxic", "anorexic", - "anonymos", "anonymous", - "anoreixa", "anorexia", - "anounced", "announced", - "anoxeria", "anorexia", - "anoxeric", "anorexic", - "answeres", "answers", - "antartic", "antarctic", - "antennea", "antenna", - "antennna", "antenna", - "anticipe", "anticipate", - "antiquae", "antique", - "antivirs", "antivirus", - "anwsered", "answered", - "anyhting", "anything", - "anyhwere", "anywhere", - "anyoneis", "anyones", - "anythign", "anything", - "anytying", "anything", - "aparment", "apartment", - "apartmet", "apartments", - "apenines", "apennines", - "aperutre", "aperture", - "aplhabet", "alphabet", - "apologes", "apologise", - "aposltes", "apostles", - "apostels", "apostles", - "appaluse", "applause", - "apparant", "apparent", - "appareal", "apparel", - "appareil", "apparel", - "apperead", "appeared", - "applaued", "applaud", - "appluase", "applause", - "appology", "apology", - "apporach", "approach", - "appraoch", "approach", - "apreture", "aperture", - "apsotles", "apostles", - "aqaurium", "aquarium", - "aqcuired", "acquired", - "aquaduct", "aqueduct", - "aquairum", "aquarium", - "aquaruim", "aquarium", - "aquiring", "acquiring", - "aquitted", "acquitted", - "arbitary", "arbitrary", - "arbitray", "arbitrary", - "arbiture", "arbiter", - "architet", "architect", - "archtype", "archetype", - "aremnian", "armenian", - "argentia", "argentina", - "argubaly", "arguably", - "arguemet", "arguement", - "arguemtn", "arguement", - "ariborne", "airborne", - "aricraft", "aircraft", - "ariplane", "airplane", - "ariports", "airports", - "arispace", "airspace", - "aristote", "aristotle", - "aritfact", "artifact", - "arizonia", "arizona", - "arkasnas", "arkansas", - "arlighty", "alrighty", - "armamant", "armament", - "armenain", "armenian", - "armenina", "armenian", - "armpitts", "armpits", - "armstrog", "armstrong", - "arpanoid", "paranoid", - "arpeture", "aperture", - "arragned", "arranged", - "arrestes", "arrests", - "arrestos", "arrests", - "arsenaal", "arsenal", - "artemios", "artemis", - "artemius", "artemis", - "arthrits", "arthritis", - "articule", "articulate", - "artifacs", "artifacts", - "artifcat", "artifact", - "artilley", "artillery", - "artisitc", "artistic", - "artistas", "artists", - "arugable", "arguable", - "arugably", "arguably", - "arugment", "argument", - "asborbed", "absorbed", - "asburdly", "absurdly", - "ascneded", "ascended", - "asissted", "assisted", - "askreddt", "askreddit", - "asnwered", "answered", - "aspectos", "aspects", - "asperges", "aspergers", - "assasins", "assassins", - "assemple", "assemble", - "assertin", "assertions", - "asshates", "asshats", - "asshatts", "asshats", - "assimile", "assimilate", - "assistat", "assistants", - "assitant", "assistant", - "assmeble", "assemble", - "assmebly", "assembly", - "asssasin", "assassin", - "assualts", "assaults", - "asteorid", "asteroid", - "asteriks", "asterisk", - "asteriod", "asteroid", - "asterois", "asteroids", - "astersik", "asterisk", - "asthetic", "aesthetic", - "astronat", "astronaut", - "asutrian", "austrian", - "atheisim", "atheism", - "atheistc", "atheistic", - "atheltes", "athletes", - "atheltic", "athletic", - "athenean", "athenian", - "athesits", "atheists", - "athetlic", "athletic", - "athients", "athiest", - "atittude", "attitude", - "atlantia", "atlanta", - "atmoizer", "atomizer", - "atomzier", "atomizer", - "atribute", "attribute", - "atrifact", "artifact", - "attackes", "attackers", - "attemped", "attempted", - "attemted", "attempted", - "attemtps", "attempts", - "attidute", "attitude", - "attitide", "attitude", - "attribue", "attribute", - "aucitons", "auctions", - "audactiy", "audacity", - "audcaity", "audacity", - "audeince", "audience", - "audiobok", "audiobook", - "austeriy", "austerity", - "austiran", "austrian", - "austitic", "autistic", - "austrain", "austrian", - "australa", "australian", - "austrija", "austria", - "austrila", "austria", - "autisitc", "autistic", - "autoattk", "autoattack", - "autograh", "autograph", - "automato", "automation", - "automony", "autonomy", - "autority", "authority", - "autsitic", "autistic", - "auxilary", "auxiliary", - "avacodos", "avocados", - "avaiable", "available", - "availabe", "available", - "availble", "available", - "avaition", "aviation", - "avalable", "available", - "avalance", "avalanche", - "avataras", "avatars", - "avatards", "avatars", - "avatares", "avatars", - "averadge", "averaged", - "avergaed", "averaged", - "avergaes", "averages", - "aviaiton", "aviation", - "avilable", "available", - "avnegers", "avengers", - "avodacos", "avocados", - "awekened", "weakened", - "awesomey", "awesomely", - "awfullly", "awfully", - "awkwardy", "awkwardly", - "awnsered", "answered", - "babysite", "babysitter", - "baceause", "because", - "bacehlor", "bachelor", - "bachleor", "bachelor", - "bacholer", "bachelor", - "backeast", "backseat", - "backerds", "backers", - "backfied", "backfield", - "backpacs", "backpacks", - "balcanes", "balances", - "balconey", "balcony", - "balconny", "balcony", - "ballistc", "ballistic", - "balnaced", "balanced", - "banannas", "bananas", - "banditas", "bandits", - "bandwith", "bandwidth", - "bangkock", "bangkok", - "baptisim", "baptism", - "barabric", "barbaric", - "barbarin", "barbarian", - "barbaris", "barbarians", - "bardford", "bradford", - "bargaing", "bargaining", - "baristia", "barista", - "barrakcs", "barracks", - "barrells", "barrels", - "basicaly", "basically", - "basiclay", "basicly", - "basicley", "basicly", - "basicliy", "basicly", - "batistia", "batista", - "battalin", "battalion", - "bayonent", "bayonet", - "beachead", "beachhead", - "beacuoup", "beaucoup", - "beardude", "bearded", - "beastley", "beastly", - "beatiful", "beautiful", - "beccause", "because", - "becuasse", "becuase", - "befirend", "befriend", - "befreind", "befriend", - "begginer", "beginner", - "begginig", "begging", - "begginng", "begging", - "begining", "beginning", - "beginnig", "beginning", - "behaivor", "behavior", - "behavios", "behaviours", - "behavoir", "behavior", - "behavour", "behavior", - "behngazi", "benghazi", - "behtesda", "bethesda", - "beleived", "believed", - "beleiver", "believer", - "beleives", "believes", - "beliefes", "beliefs", - "benefica", "beneficial", - "bengahzi", "benghazi", - "bengalas", "bengals", - "bengalos", "bengals", - "bengazhi", "benghazi", - "benghzai", "benghazi", - "bengzhai", "benghazi", - "benhgazi", "benghazi", - "benidect", "benedict", - "benifits", "benefits", - "berekley", "berkeley", - "berserkr", "berserker", - "beseiged", "besieged", - "betehsda", "bethesda", - "beteshda", "bethesda", - "bethdesa", "bethesda", - "bethedsa", "bethesda", - "bethseda", "bethesda", - "beyoncye", "beyonce", - "bibilcal", "biblical", - "bicylces", "bicycles", - "bigfooot", "bigfoot", - "bigining", "beginning", - "bilbical", "biblical", - "billboad", "billboard", - "bilsters", "blisters", - "bilzzard", "blizzard", - "bilzzcon", "blizzcon", - "biologia", "biological", - "birhtday", "birthday", - "birsbane", "brisbane", - "birthdsy", "birthdays", - "biseuxal", "bisexual", - "bisexaul", "bisexual", - "bitcions", "bitcoins", - "bitocins", "bitcoins", - "blackade", "blacked", - "blackend", "blacked", - "blackjak", "blackjack", - "blacklit", "blacklist", - "blatanty", "blatantly", - "blessins", "blessings", - "blessure", "blessing", - "bloggare", "blogger", - "bloggeur", "blogger", - "bluebery", "blueberry", - "bluetooh", "bluetooth", - "blugaria", "bulgaria", - "boardway", "broadway", - "bollcoks", "bollocks", - "bomberos", "bombers", - "bookmars", "bookmarks", - "boradway", "broadway", - "boredoom", "boredom", - "bouldore", "boulder", - "bounites", "bounties", - "boutnies", "bounties", - "boutqiue", "boutique", - "bouyancy", "buoyancy", - "boyfried", "boyfriend", - "bradcast", "broadcast", - "bradfrod", "bradford", - "brakeout", "breakout", - "braodway", "broadway", - "braverly", "bravery", - "breathis", "breaths", - "breathos", "breaths", - "brekaout", "breakout", - "brendamn", "brendan", - "breweres", "brewers", - "brewerey", "brewery", - "brewerks", "brewers", - "brewerys", "brewers", - "brigaged", "brigade", - "brigated", "brigade", - "brigthen", "brighten", - "briliant", "brilliant", - "brillant", "brilliant", - "bristool", "bristol", - "brithday", "birthday", - "brittish", "british", - "briusers", "bruisers", - "broadbad", "broadband", - "broadcat", "broadcasts", - "broadley", "broadly", - "brocolli", "broccoli", - "brodaway", "broadway", - "broncoes", "broncos", - "broswing", "browsing", - "browines", "brownies", - "browisng", "browsing", - "brtually", "brutally", - "brugundy", "burgundy", - "bruisend", "bruised", - "brussles", "brussels", - "brusting", "bursting", - "bubblews", "bubbles", - "buddhits", "buddhist", - "buddhsim", "buddhism", - "buddishm", "buddhism", - "buddisht", "buddhist", - "buglaria", "bulgaria", - "buhddism", "buddhism", - "buhddist", "buddhist", - "buidlers", "builders", - "buidling", "building", - "buildins", "buildings", - "buisness", "business", - "bulagria", "bulgaria", - "bulgaira", "bulgaria", - "buliders", "builders", - "buliding", "building", - "bulletts", "bullets", - "burisers", "bruisers", - "burriots", "burritos", - "burritio", "burrito", - "burritto", "burrito", - "burrtios", "burritos", - "burssels", "brussels", - "burtally", "brutally", - "burtsing", "bursting", - "busrting", "bursting", - "butcherd", "butchered", - "butterey", "buttery", - "butterfy", "butterfly", - "butterry", "buttery", - "butthoel", "butthole", - "bycicles", "bicycles", - "cabbagge", "cabbage", - "cabients", "cabinets", - "cabinate", "cabinet", - "cabinent", "cabinet", - "cabniets", "cabinets", - "caclulus", "calculus", - "cafetera", "cafeteria", - "caffinee", "caffeine", - "cahsiers", "cashiers", - "cainster", "canister", - "calander", "calendar", - "calcular", "calculator", - "calgarry", "calgary", - "calibler", "calibre", - "caloires", "calories", - "calrkson", "clarkson", - "calroies", "calories", - "calssify", "classify", - "calulate", "calculate", - "calymore", "claymore", - "camapign", "campaign", - "cambodai", "cambodia", - "camboida", "cambodia", - "cambpell", "campbell", - "cambride", "cambridge", - "cambrige", "cambridge", - "camoufle", "camouflage", - "campagin", "campaign", - "campaing", "campaign", - "campains", "campaigns", - "camperas", "campers", - "camperos", "campers", - "canadias", "canadians", - "cananbis", "cannabis", - "cancelas", "cancels", - "canceles", "cancels", - "cancells", "cancels", - "canceres", "cancers", - "cancerns", "cancers", - "cancerus", "cancers", - "candiate", "candidate", - "candiens", "candies", - "canistre", "canister", - "cannabil", "cannibal", - "cannbial", "cannibal", - "cannibas", "cannabis", - "cansiter", "canister", - "capitans", "captains", - "capitola", "capital", - "capitulo", "capitol", - "capmbell", "campbell", - "capsuels", "capsules", - "capsulse", "capsules", - "capsumel", "capsule", - "capteurs", "captures", - "captials", "capitals", - "captians", "captains", - "capusles", "capsules", - "caputres", "captures", - "cardboad", "cardboard", - "cardianl", "cardinal", - "cardnial", "cardinal", - "careflly", "carefully", - "carefull", "careful", - "carefuly", "carefully", - "caricate", "caricature", - "caridgan", "cardigan", - "caridnal", "cardinal", - "carinval", "carnival", - "carloina", "carolina", - "carnagie", "carnegie", - "carnigie", "carnegie", - "carnvial", "carnival", - "carrotts", "carrots", - "carrotus", "carrots", - "cartells", "cartels", - "cartmaan", "cartman", - "cartride", "cartridge", - "cartrige", "cartridge", - "carvinal", "carnival", - "casaulty", "casualty", - "casheirs", "cashiers", - "cashieer", "cashier", - "cashires", "cashiers", - "castleos", "castles", - "castlers", "castles", - "casulaty", "casualty", - "cataclym", "cataclysm", - "catagory", "category", - "cataline", "catiline", - "cataloge", "catalogue", - "catalsyt", "catalyst", - "cataylst", "catalyst", - "cathloic", "catholic", - "catlayst", "catalyst", - "caucasin", "caucasian", - "causalty", "casualty", - "cellural", "cellular", - "celullar", "cellular", - "celverly", "cleverly", - "cemetary", "cemetery", - "centeres", "centers", - "centerns", "centers", - "centrase", "centres", - "centrers", "centres", - "ceratine", "creatine", - "cerberal", "cerebral", - "cerbreus", "cerberus", - "cerbures", "cerberus", - "ceremone", "ceremonies", - "cerimony", "ceremony", - "ceromony", "ceremony", - "certainy", "certainty", - "challege", "challenge", - "chambear", "chamber", - "chambres", "chambers", - "champage", "champagne", - "chanisaw", "chainsaw", - "chanlder", "chandler", - "charcaol", "charcoal", - "chargehr", "charger", - "chargeur", "charger", - "chariman", "chairman", - "charimsa", "charisma", - "charmisa", "charisma", - "charocal", "charcoal", - "charsima", "charisma", - "chasiers", "cashiers", - "chassids", "chassis", - "chassies", "chassis", - "chatolic", "catholic", - "chcukles", "chuckles", - "checkare", "checker", - "checkear", "checker", - "cheesees", "cheeses", - "cheeseus", "cheeses", - "cheetoos", "cheetos", - "chemcial", "chemical", - "chemisty", "chemistry", - "chernobl", "chernobyl", - "chiansaw", "chainsaw", - "chidlish", "childish", - "chihuaha", "chihuahua", - "childres", "childrens", - "chillade", "chilled", - "chillead", "chilled", - "chillend", "chilled", - "chilvary", "chivalry", - "chinesse", "chinese", - "chivarly", "chivalry", - "chivlary", "chivalry", - "chlidish", "childish", - "chlroine", "chlorine", - "chmabers", "chambers", - "chocolae", "chocolates", - "chocolet", "chocolates", - "choesive", "cohesive", - "choicers", "choices", - "cholrine", "chlorine", - "chorline", "chlorine", - "chracter", "character", - "christin", "christian", - "chroline", "chlorine", - "chromose", "chromosome", - "chronice", "chronicles", - "chruches", "churches", - "chuckels", "chuckles", - "cielings", "ceilings", - "cigarete", "cigarettes", - "cigarets", "cigarettes", - "cilmbers", "climbers", - "cilnatro", "cilantro", - "ciltoris", "clitoris", - "circiuts", "circuits", - "circkets", "crickets", - "circlebs", "circles", - "circluar", "circular", - "ciricuit", "circuit", - "cirlcing", "circling", - "ciruclar", "circular", - "clannand", "clannad", - "clarifiy", "clarify", - "clarskon", "clarkson", - "clasical", "classical", - "classrom", "classroom", - "classsic", "classics", - "clausens", "clauses", - "cleanies", "cleanse", - "cleasner", "cleanser", - "clenaser", "cleanser", - "clevelry", "cleverly", - "clhorine", "chlorine", - "cliamtes", "climates", - "cliantro", "cilantro", - "clickare", "clicker", - "clickbat", "clickbait", - "clickear", "clicker", - "clientes", "clients", - "clincial", "clinical", - "clinicas", "clinics", - "clinicos", "clinics", - "clipboad", "clipboard", - "clitiros", "clitoris", - "closeing", "closing", - "closeley", "closely", - "clyamore", "claymore", - "clyinder", "cylinder", - "cmoputer", "computer", - "coindice", "coincide", - "collapes", "collapse", - "collares", "collars", - "collaris", "collars", - "collaros", "collars", - "collaspe", "collapse", - "colleage", "colleagues", - "collecte", "collective", - "collegue", "colleague", - "collisin", "collisions", - "collosal", "colossal", - "collpase", "collapse", - "coloardo", "colorado", - "colordao", "colorado", - "colubmia", "columbia", - "columnas", "columns", - "comadres", "comrades", - "comander", "commander", - "comandos", "commandos", - "comapany", "company", - "comapres", "compares", - "combiens", "combines", - "combinig", "combining", - "comediac", "comedic", - "comedias", "comedians", - "comestic", "cosmetic", - "comision", "commission", - "comiting", "committing", - "comitted", "committed", - "comittee", "committee", - "commandd", "commanded", - "commecen", "commence", - "commedic", "comedic", - "commense", "commenters", - "commenty", "commentary", - "commiest", "commits", - "commited", "committed", - "commitee", "committee", - "commites", "commits", - "committe", "committee", - "committs", "commits", - "commitus", "commits", - "commmand", "command", - "communit", "communist", - "companis", "companions", - "comparse", "compares", - "comparte", "compare", - "compasso", "compassion", - "compelte", "complete", - "compense", "compensate", - "complais", "complains", - "complane", "complacent", - "complate", "complacent", - "compleet", "complete", - "completi", "complexity", - "complets", "completes", - "complety", "completely", - "complexs", "complexes", - "complext", "complexity", - "complexy", "complexity", - "complict", "complicit", - "complier", "compiler", - "compones", "compose", - "componet", "components", - "componts", "compost", - "composet", "compost", - "composit", "compost", - "composte", "compose", - "comprese", "compressed", - "compreso", "compressor", - "compsers", "compress", - "comptown", "compton", - "compunet", "compute", - "computre", "compute", - "comradre", "comrade", - "comsetic", "cosmetic", - "conatins", "contains", - "conceald", "concealed", - "conceide", "conceived", - "conceled", "concede", - "concened", "concede", - "concepta", "conceptual", - "concered", "concede", - "concernt", "concert", - "concerte", "concrete", - "concesso", "concession", - "conceted", "concede", - "conceved", "concede", - "concibes", "concise", - "concider", "consider", - "concides", "concise", - "concious", "conscious", - "conclued", "conclude", - "concluse", "conclusive", - "concluso", "conclusion", - "concreet", "concrete", - "concrets", "concerts", - "condemnd", "condemned", - "conditon", "condition", - "condomes", "condoms", - "condomns", "condoms", - "conduict", "conduit", - "conected", "connected", - "conencts", "connects", - "confeses", "confess", - "confesos", "confess", - "confesso", "confession", - "configue", "configure", - "confilct", "conflict", - "confirmd", "confirmed", - "conflcit", "conflict", - "conflics", "conflicts", - "confrims", "confirms", - "conicide", "coincide", - "conlcude", "conclude", - "conqueor", "conquer", - "conquerd", "conquered", - "conqured", "conquered", - "conscent", "consent", - "consious", "conscious", - "constans", "constants", - "constast", "constants", - "constatn", "constant", - "constrat", "constraint", - "construt", "constructs", - "containd", "contained", - "containg", "containing", - "contaire", "containers", - "contanti", "contacting", - "contense", "contenders", - "contenst", "contents", - "contexta", "contextual", - "contextl", "contextual", - "contians", "contains", - "contined", "continued", - "contines", "continents", - "continum", "continuum", - "continus", "continues", - "continut", "continuity", - "continuu", "continuous", - "contracr", "contractor", - "contracs", "contracts", - "controll", "control", - "convenit", "convenient", - "convento", "convention", - "converst", "converts", - "convertr", "converter", - "conviced", "convinced", - "convicto", "conviction", - "convingi", "convincing", - "convinse", "convinces", - "cooldows", "cooldowns", - "coordine", "coordinate", - "coralina", "carolina", - "corollla", "corolla", - "corolloa", "corolla", - "corosion", "corrosion", - "corpsers", "corpses", - "corrdior", "corridor", - "correcty", "correctly", - "correnti", "correcting", - "corretly", "correctly", - "corrupto", "corruption", - "cosemtic", "cosmetic", - "cosutmes", "costumes", - "couldnot", "couldnt", - "coulored", "coloured", - "counries", "countries", - "counseil", "counsel", - "counsole", "counsel", - "counterd", "countered", - "countert", "counteract", - "countres", "counters", - "courtrom", "courtroom", - "courtsey", "courtesy", - "cousines", "cousins", - "cousings", "cousins", - "coutners", "counters", - "covanent", "covenant", - "coverted", "converted", - "coyotees", "coyotes", - "cpatains", "captains", - "cranbery", "cranberry", - "crayones", "crayons", - "creaeted", "created", - "createin", "creatine", - "createur", "creature", - "creatien", "creatine", - "creepgin", "creeping", - "cricling", "circling", - "cringely", "cringey", - "cringery", "cringey", - "criticas", "critics", - "critices", "critics", - "criticie", "criticise", - "criticim", "criticisms", - "criticis", "critics", - "criticms", "critics", - "criticos", "critics", - "criticts", "critics", - "criticus", "critics", - "critiera", "criteria", - "critized", "criticized", - "croatioa", "croatia", - "crossfie", "crossfire", - "crosshar", "crosshair", - "crosspot", "crosspost", - "crowbahr", "crowbar", - "cruasder", "crusader", - "cruciaal", "crucial", - "crucibel", "crucible", - "cruicble", "crucible", - "crusdaer", "crusader", - "crusiers", "cruisers", - "crusiing", "cruising", - "cruthces", "crutches", - "cthulhlu", "cthulhu", - "cthulluh", "cthulhu", - "cubpoard", "cupboard", - "cuddleys", "cuddles", - "culprint", "culprit", - "cultrual", "cultural", - "culutral", "cultural", - "cupbaord", "cupboard", - "cupborad", "cupboard", - "curcible", "crucible", - "curisers", "cruisers", - "curising", "cruising", - "currecny", "currency", - "currence", "currencies", - "currenly", "currently", - "currenty", "currently", - "cursader", "crusader", - "custcene", "cutscene", - "cutsceen", "cutscene", - "cutscens", "cutscenes", - "cutsence", "cutscene", - "cylcists", "cyclists", - "cylidner", "cylinder", - "cylindre", "cylinder", - "cynisicm", "cynicism", - "cyrstals", "crystals", - "dacquiri", "daiquiri", - "daimonds", "diamonds", - "dangeros", "dangers", - "dangerus", "dangers", - "darkenss", "darkness", - "darnkess", "darkness", - "dashboad", "dashboard", - "daugther", "daughter", - "deadlfit", "deadlift", - "deadlifs", "deadlifts", - "deafeted", "defeated", - "deafults", "defaults", - "dealying", "delaying", - "deamenor", "demeanor", - "deathcat", "deathmatch", - "debuffes", "debuffs", - "debufffs", "debuffs", - "decalred", "declared", - "decalres", "declares", - "decembre", "december", - "decidely", "decidedly", - "decieved", "deceived", - "decifits", "deficits", - "decipted", "depicted", - "declears", "declares", - "declinig", "declining", - "decmeber", "december", - "decribed", "described", - "decribes", "describes", - "dedicato", "dedication", - "deductie", "deductible", - "defautls", "defaults", - "defectos", "defects", - "defectus", "defects", - "defendas", "defends", - "defendes", "defenders", - "defendis", "defends", - "defendre", "defender", - "defendrs", "defends", - "defensea", "defenseman", - "defensen", "defenseman", - "defensie", "defensive", - "defetead", "defeated", - "deffined", "defined", - "deficiet", "deficient", - "definate", "definite", - "definaty", "definately", - "definety", "definetly", - "definito", "definition", - "definitv", "definitive", - "deflatin", "deflation", - "deflecto", "deflection", - "defualts", "defaults", - "degarded", "degraded", - "degenere", "degenerate", - "degraged", "degrade", - "degrated", "degrade", - "deisgned", "designed", - "deisgner", "designer", - "dekstops", "desktops", - "delcared", "declared", - "delcares", "declares", - "delepted", "depleted", - "delivere", "deliveries", - "delpeted", "depleted", - "delpoyed", "deployed", - "delyaing", "delaying", - "demandas", "demands", - "demandes", "demands", - "demenaor", "demeanor", - "democray", "democracy", - "demolito", "demolition", - "denseley", "densely", - "densitiy", "density", - "deomcrat", "democrat", - "deovtion", "devotion", - "departer", "departure", - "departue", "departure", - "depcited", "depicted", - "depelted", "depleted", - "dependat", "dependant", - "depictes", "depicts", - "depictin", "depictions", - "depolyed", "deployed", - "depositd", "deposited", - "depostis", "deposits", - "depresse", "depressive", - "depresso", "depression", - "derivate", "derivative", - "descened", "descend", - "descibed", "described", - "descirbe", "describe", - "descrise", "describes", - "desgined", "designed", - "desginer", "designer", - "desicive", "decisive", - "designad", "designated", - "designes", "designs", - "designet", "designated", - "desinged", "designed", - "desinger", "designer", - "desitned", "destined", - "desktiop", "desktop", - "desorder", "disorder", - "despides", "despised", - "despiste", "despise", - "destiney", "destiny", - "destinty", "destiny", - "destkops", "desktops", - "destorys", "destroys", - "destrose", "destroyers", - "destroyd", "destroyed", - "destroyr", "destroyers", - "detalied", "detailed", - "detectas", "detects", - "detectes", "detects", - "detectie", "detectives", - "determen", "determines", - "devasted", "devastated", - "develope", "develop", - "devialet", "deviate", - "deviatie", "deviate", - "devilers", "delivers", - "devloved", "devolved", - "devovled", "devolved", - "diaganol", "diagonal", - "diagnoal", "diagonal", - "diagnoes", "diagnose", - "diagnosi", "diagnostic", - "diagonse", "diagnose", - "diahrrea", "diarrhea", - "dialetcs", "dialects", - "dialgoue", "dialogue", - "dialouge", "dialogue", - "diarreah", "diarrhea", - "diarreha", "diarrhea", - "dichtomy", "dichotomy", - "dickisch", "dickish", - "dicovers", "discovers", - "dicovery", "discovery", - "dicussed", "discussed", - "diferent", "different", - "differnt", "different", - "difficut", "difficulty", - "diffrent", "different", - "diganose", "diagnose", - "dignitiy", "dignity", - "dimaonds", "diamonds", - "dinasour", "dinosaur", - "dinosaus", "dinosaurs", - "dinosuar", "dinosaur", - "dinsoaur", "dinosaur", - "dionsaur", "dinosaur", - "diphtong", "diphthong", - "diplomma", "diploma", - "dipthong", "diphthong", - "direclty", "directly", - "directin", "directions", - "directix", "directx", - "directos", "directors", - "directoy", "directory", - "directrx", "directx", - "dirfting", "drifting", - "disabeld", "disabled", - "disabels", "disables", - "disagred", "disagreed", - "disagres", "disagrees", - "disbaled", "disabled", - "disbales", "disables", - "disbelif", "disbelief", - "dischard", "discharged", - "dischare", "discharged", - "discound", "discounted", - "discoure", "discourse", - "discoved", "discovered", - "discreto", "discretion", - "discribe", "describe", - "disentry", "dysentery", - "disgiuse", "disguise", - "dishoner", "dishonored", - "dishonet", "dishonesty", - "dislikse", "dislikes", - "dismante", "dismantle", - "dismisse", "dismissive", - "disolved", "dissolved", - "dispacth", "dispatch", - "dispalys", "displays", - "dispence", "dispense", - "dispersa", "dispensary", - "displayd", "displayed", - "disposle", "dispose", - "disposte", "dispose", - "dispoves", "dispose", - "disptach", "dispatch", - "disricts", "districts", - "dissovle", "dissolve", - "distates", "distaste", - "distatse", "distaste", - "disticnt", "distinct", - "distorto", "distortion", - "distrcit", "district", - "districs", "districts", - "disturbd", "disturbed", - "disupted", "disputed", - "disuptes", "disputes", - "diversed", "diverse", - "diversiy", "diversify", - "dividens", "dividends", - "divintiy", "divinity", - "divisons", "divisions", - "doapmine", "dopamine", - "docrines", "doctrines", - "docrtine", "doctrine", - "doctines", "doctrines", - "doctirne", "doctrine", - "doctrins", "doctrines", - "dogamtic", "dogmatic", - "dolhpins", "dolphins", - "domapine", "dopamine", - "domecrat", "democrat", - "domiante", "dominate", - "dominato", "domination", - "dominats", "dominates", - "dominent", "dominant", - "dominoin", "dominion", - "donwload", "download", - "donwvote", "downvote", - "doomdsay", "doomsday", - "doosmday", "doomsday", - "doplhins", "dolphins", - "dopmaine", "dopamine", - "dormtund", "dortmund", - "dortumnd", "dortmund", - "dotrmund", "dortmund", - "douchely", "douchey", - "doucheus", "douches", - "dowloads", "downloads", - "downlaod", "download", - "downloas", "downloads", - "downstar", "downstairs", - "downvore", "downvoters", - "downvotr", "downvoters", - "downvots", "downvotes", - "draculea", "dracula", - "draculla", "dracula", - "dragones", "dragons", - "dragonus", "dragons", - "drfiting", "drifting", - "driectly", "directly", - "drifitng", "drifting", - "driveris", "drivers", - "drotmund", "dortmund", - "duaghter", "daughter", - "dumbbels", "dumbbells", - "dumptser", "dumpster", - "dumspter", "dumpster", - "dunegons", "dungeons", - "dungeoun", "dungeon", - "dungoens", "dungeons", - "dupicate", "duplicate", - "duplicas", "duplicates", - "dwarvens", "dwarves", - "dyanmics", "dynamics", - "dyanmite", "dynamite", - "dymanics", "dynamics", - "dymanite", "dynamite", - "dynastry", "dynasty", - "dysentry", "dysentery", - "dysphora", "dysphoria", - "earilest", "earliest", - "eatswood", "eastwood", - "eceonomy", "economy", - "ecidious", "deciduous", - "ecologia", "ecological", - "ecomonic", "economic", - "ecstacys", "ecstasy", - "ecstascy", "ecstasy", - "ecstasty", "ecstasy", - "ectastic", "ecstatic", - "editoras", "editors", - "editores", "editors", - "efficent", "efficient", - "egpytian", "egyptian", - "egyptain", "egyptian", - "egytpian", "egyptian", - "ehtereal", "ethereal", - "ehternet", "ethernet", - "eigtheen", "eighteen", - "electhor", "electro", - "electorn", "electron", - "elementy", "elementary", - "elephans", "elephants", - "elevatin", "elevation", - "elicided", "elicited", - "eligable", "eligible", - "elimiate", "eliminate", - "eliminas", "eliminates", - "elitisim", "elitism", - "elitistm", "elitism", - "ellected", "elected", - "embarass", "embarrass", - "embargos", "embargoes", - "embarras", "embarrass", - "embassay", "embassy", - "embassey", "embassy", - "embasssy", "embassy", - "emergend", "emerged", - "emergerd", "emerged", - "eminated", "emanated", - "emminent", "eminent", - "emmisary", "emissary", - "emmision", "emission", - "emmiting", "emitting", - "emmitted", "emitted", - "empathie", "empathize", - "empirial", "empirical", - "emulatin", "emulation", - "enahnces", "enhances", - "enchanct", "enchant", - "encolsed", "enclosed", - "endanged", "endangered", - "endevors", "endeavors", - "endevour", "endeavour", - "endlessy", "endlessly", - "endorces", "endorse", - "engeneer", "engineer", - "engeries", "energies", - "engineed", "engineered", - "engrames", "engrams", - "engramms", "engrams", - "enigneer", "engineer", - "enitrely", "entirely", - "enlcosed", "enclosed", - "enlsaved", "enslaved", - "ensalved", "enslaved", - "enterity", "entirety", - "entierly", "entirely", - "entierty", "entirety", - "entilted", "entitled", - "entirley", "entirely", - "entiteld", "entitled", - "entitity", "entity", - "entropay", "entropy", - "entrophy", "entropy", - "ephipany", "epiphany", - "epihpany", "epiphany", - "epilespy", "epilepsy", - "epilgoue", "epilogue", - "episdoes", "episodes", - "epitomie", "epitome", - "epliepsy", "epilepsy", - "epliogue", "epilogue", - "epsiodes", "episodes", - "epsresso", "espresso", - "eqaulity", "equality", - "eqaution", "equation", - "equailty", "equality", - "eraticly", "erratically", - "erroneos", "erroneous", - "errupted", "erupted", - "escalato", "escalation", - "esctatic", "ecstatic", - "esential", "essential", - "esitmate", "estimate", - "esperate", "seperate", - "esportes", "esports", - "estiamte", "estimate", - "estoeric", "esoteric", - "estonija", "estonia", - "estoniya", "estonia", - "etherael", "ethereal", - "etherent", "ethernet", - "ethicaly", "ethically", - "etiquete", "etiquette", - "etrailer", "retailer", - "eugencis", "eugenics", - "eugneics", "eugenics", - "euhporia", "euphoria", - "euhporic", "euphoric", - "euorpean", "european", - "euphoira", "euphoria", - "euphroia", "euphoria", - "euphroic", "euphoric", - "europian", "european", - "eurpoean", "european", - "evangers", "avengers", - "everyons", "everyones", - "evidencd", "evidenced", - "evidende", "evidenced", - "evloving", "evolving", - "evolveds", "evolves", - "evolveos", "evolves", - "evovling", "evolving", - "excecute", "execute", - "excedded", "exceeded", - "excelent", "excellent", - "exceptin", "exceptions", - "excerise", "exercise", - "excisted", "existed", - "exclusie", "exclusives", - "exculded", "excluded", - "exculdes", "excludes", - "exection", "execution", - "exectued", "executed", - "executie", "executive", - "executin", "execution", - "exellent", "excellent", - "exerbate", "exacerbate", - "exercide", "exercised", - "exercies", "exercise", - "exersice", "exercise", - "exersize", "exercise", - "exhalted", "exalted", - "exhaustn", "exhaustion", - "exhausto", "exhaustion", - "exicting", "exciting", - "exisitng", "existing", - "existane", "existance", - "existant", "existent", - "existend", "existed", - "exlcuded", "excluded", - "exlcudes", "excludes", - "exlporer", "explorer", - "exoticas", "exotics", - "exoticos", "exotics", - "expalins", "explains", - "expandas", "expands", - "expandes", "expands", - "expansie", "expansive", - "expectes", "expects", - "expectus", "expects", - "expedito", "expedition", - "expences", "expense", - "expensie", "expense", - "expensve", "expense", - "expertas", "experts", - "expertis", "experts", - "expertos", "experts", - "expireds", "expires", - "explaind", "explained", - "explaing", "explaining", - "expliots", "exploits", - "explodie", "explode", - "exploint", "exploit", - "explosie", "explosive", - "explosin", "explosions", - "exploted", "explode", - "expoldes", "explodes", - "expolits", "exploits", - "exportas", "exports", - "exportes", "exports", - "exportfs", "exports", - "exposees", "exposes", - "exposito", "exposition", - "expresse", "expressive", - "expresss", "expresses", - "expressy", "expressly", - "exressed", "expressed", - "exsitent", "existent", - "exsiting", "existing", - "extactly", "exactly", - "extemely", "extremely", - "extendes", "extends", - "extendos", "extends", - "extenion", "extension", - "extensie", "extensive", - "extensis", "extensions", - "extortin", "extortion", - "extracto", "extraction", - "extreems", "extremes", - "extremly", "extremely", - "eygptian", "egyptian", - "faboulus", "fabulous", - "fabricas", "fabrics", - "fabrices", "fabrics", - "fabricus", "fabrics", - "faceplam", "facepalm", - "facilisi", "facilities", - "faciltiy", "facility", - "facsists", "fascists", - "factores", "factors", - "factorys", "factors", - "factualy", "factually", - "faggotts", "faggots", - "faggotus", "faggots", - "falcones", "falcons", - "falgship", "flagship", - "faliures", "failures", - "falseley", "falsely", - "falshing", "flashing", - "falvored", "flavored", - "falvours", "flavours", - "familair", "familiar", - "famoulsy", "famously", - "fanatism", "fanaticism", - "fanatsic", "fanatics", - "fanserve", "fanservice", - "fantasty", "fantasy", - "farcking", "fracking", - "fascisim", "fascism", - "fashiond", "fashioned", - "fasicsts", "fascists", - "fatigure", "fatigue", - "favorits", "favorites", - "favourie", "favourites", - "feasable", "feasible", - "feasbile", "feasible", - "febraury", "february", - "februray", "february", - "feburary", "february", - "fedility", "fidelity", - "fedorahs", "fedoras", - "fedorans", "fedoras", - "feilding", "fielding", - "feisable", "feasible", - "feitshes", "fetishes", - "feltcher", "fletcher", - "felxible", "flexible", - "feminint", "femininity", - "feminsim", "feminism", - "feromone", "pheromone", - "fesiable", "feasible", - "festivas", "festivals", - "festivle", "festive", - "fictious", "fictitious", - "fideling", "fielding", - "fideltiy", "fidelity", - "fiedling", "fielding", - "fiedlity", "fidelity", - "fighitng", "fighting", - "figthing", "fighting", - "fileding", "fielding", - "fimilies", "families", - "finacial", "financial", - "fineshes", "finesse", - "fingersi", "fingertips", - "finnisch", "finnish", - "finsihes", "finishes", - "firebals", "fireballs", - "firendly", "friendly", - "firmwear", "firmware", - "firwmare", "firmware", - "flaghsip", "flagship", - "flamable", "flammable", - "flasghip", "flagship", - "flatterd", "flattered", - "flatteur", "flatter", - "flattire", "flatter", - "flavores", "flavors", - "flechter", "fletcher", - "flecther", "fletcher", - "flemmish", "flemish", - "flethcer", "fletcher", - "flexbile", "flexible", - "flexibel", "flexible", - "flippade", "flipped", - "flitered", "filtered", - "florecen", "florence", - "floridia", "florida", - "floruide", "fluoride", - "floruish", "flourish", - "flourine", "fluorine", - "floursih", "flourish", - "fluorish", "flourish", - "fluroide", "fluoride", - "folowing", "following", - "fontrier", "fontier", - "forasken", "forsaken", - "forbiden", "forbidden", - "foreamrs", "forearms", - "foreksin", "foreskin", - "forenics", "forensic", - "forenisc", "forensic", - "foresnic", "forensic", - "foreward", "foreword", - "foricbly", "forcibly", - "forigven", "forgiven", - "formatin", "formation", - "formelly", "formerly", - "formuals", "formulas", - "fornesic", "forensic", - "forresst", "forrest", - "forsekan", "forsaken", - "forsekin", "foreskin", - "forsenic", "forensic", - "forskaen", "forsaken", - "forsting", "frosting", - "fortitue", "fortitude", - "fortunae", "fortune", - "fortunte", "fortune", - "forumlas", "formulas", - "forunner", "forerunner", - "fossiles", "fossils", - "fossilis", "fossils", - "foundary", "foundry", - "fountian", "fountain", - "fourties", "forties", - "fowrards", "forwards", - "frackign", "fracking", - "framgent", "fragment", - "franches", "franchise", - "franchie", "franchises", - "franciso", "francisco", - "frankiln", "franklin", - "franlkin", "franklin", - "freckels", "freckles", - "freindly", "friendly", - "frequeny", "frequency", - "friendle", "friendlies", - "friendsi", "friendlies", - "frimware", "firmware", - "frogiven", "forgiven", - "frointer", "frontier", - "fromerly", "formerly", - "froniter", "frontier", - "fronteir", "frontier", - "frosaken", "forsaken", - "frutcose", "fructose", - "fucntion", "function", - "fufilled", "fulfilled", - "fulfiled", "fulfilled", - "fullfill", "fulfill", - "funciton", "function", - "fundirse", "fundies", - "funniliy", "funnily", - "funnilly", "funnily", - "furctose", "fructose", - "furition", "fruition", - "furuther", "further", - "futurers", "futures", - "futureus", "futures", - "gamemdoe", "gamemode", - "gamepaly", "gameplay", - "gamergat", "gamertag", - "gammeode", "gamemode", - "ganerate", "generate", - "garantee", "guarantee", - "gardient", "gradient", - "garfeild", "garfield", - "garfiled", "garfield", - "garflied", "garfield", - "garnison", "garrison", - "garrions", "garrison", - "garriosn", "garrison", - "garrsion", "garrison", - "gatherig", "gatherings", - "gauarana", "guaraná", - "gauntelt", "gauntlet", - "gauntles", "gauntlets", - "gaurdian", "guardian", - "gaurding", "guarding", - "gautnlet", "gauntlet", - "gemoetry", "geometry", - "generaly", "generally", - "generase", "generates", - "generats", "generates", - "genialia", "genitalia", - "genisues", "geniuses", - "genitala", "genitalia", - "genrates", "generates", - "gentials", "genitals", - "gentlemn", "gentlemen", - "genuises", "geniuses", - "geograpy", "geography", - "geomerty", "geometry", - "geomtery", "geometry", - "germanos", "germans", - "germanus", "germans", - "gernades", "grenades", - "giagbyte", "gigabyte", - "gigabtye", "gigabyte", - "gigaybte", "gigabyte", - "gigbayte", "gigabyte", - "gignatic", "gigantic", - "giltched", "glitched", - "giltches", "glitches", - "girafffe", "giraffe", - "girefing", "griefing", - "girlling", "grilling", - "gladiatr", "gladiator", - "glichted", "glitched", - "glichtes", "glitches", - "glicthed", "glitched", - "glicthes", "glitches", - "glitchey", "glitchy", - "glitchly", "glitchy", - "glitchty", "glitchy", - "glithced", "glitched", - "glithces", "glitches", - "gloablly", "globally", - "glodberg", "goldberg", - "glodfish", "goldfish", - "gloriuos", "glorious", - "gltiched", "glitched", - "gltiches", "glitches", - "gmaertag", "gamertag", - "goblings", "goblins", - "goddammn", "goddamn", - "goddammt", "goddammit", - "godesses", "goddesses", - "godlberg", "goldberg", - "godlfish", "goldfish", - "godounov", "godunov", - "godpseed", "godspeed", - "godspede", "godspeed", - "goldifsh", "goldfish", - "gonewidl", "gonewild", - "goodlcuk", "goodluck", - "goregous", "gorgeous", - "gorgoeus", "gorgeous", - "gorillia", "gorilla", - "gorillla", "gorilla", - "gospells", "gospels", - "gottleib", "gottlieb", - "gourmelt", "gourmet", - "gourment", "gourmet", - "gouvener", "governor", - "govement", "government", - "goverend", "governed", - "govermet", "goverment", - "governer", "governor", - "gradualy", "gradually", - "grafield", "garfield", - "grafitti", "graffiti", - "grahpics", "graphics", - "grahpite", "graphite", - "graident", "gradient", - "granolla", "granola", - "graphcis", "graphics", - "grapichs", "graphics", - "grappnel", "grapple", - "greandes", "grenades", - "greatful", "grateful", - "greeneer", "greener", - "greenhoe", "greenhouse", - "greenlad", "greenland", - "greenore", "greener", - "greusome", "gruesome", - "grieifng", "griefing", - "grifeing", "griefing", - "grizzlay", "grizzly", - "grizzley", "grizzly", - "grpahics", "graphics", - "grpahite", "graphite", - "gruseome", "gruesome", - "guantano", "guantanamo", - "guardain", "guardian", - "guardias", "guardians", - "guaridan", "guardian", - "guerrila", "guerrilla", - "guidence", "guidance", - "guiseppe", "giuseppe", - "guitards", "guitars", - "guitares", "guitars", - "guitarit", "guitarist", - "gullbile", "gullible", - "gunanine", "guanine", - "guniness", "guinness", - "gunniess", "guinness", - "guradian", "guardian", - "gurading", "guarding", - "gurantee", "guarantee", - "guresome", "gruesome", - "guttaral", "guttural", - "gutteral", "guttural", - "hacthing", "hatching", - "hafltime", "halftime", - "haircuit", "haircut", - "halfitme", "halftime", - "hallowen", "halloween", - "hamburgr", "hamburgers", - "hamitlon", "hamilton", - "hamliton", "hamilton", - "handcufs", "handcuffs", - "handeldy", "handedly", - "handlade", "handled", - "handlare", "handler", - "handledy", "handedly", - "hannbial", "hannibal", - "haording", "hoarding", - "hapening", "happening", - "happends", "happens", - "happenes", "happens", - "happilly", "happily", - "harldine", "hardline", - "harrased", "harassed", - "harrases", "harasses", - "hatchign", "hatching", - "hatesink", "heatsink", - "hathcing", "hatching", - "headachs", "headaches", - "headests", "headsets", - "headhsot", "headshot", - "headseat", "headset", - "healthit", "healthiest", - "heastink", "heatsink", - "heathern", "heathen", - "heatskin", "heatsink", - "heaviliy", "heavily", - "heavilly", "heavily", - "heavnely", "heavenly", - "hedeghog", "hedgehog", - "hegdehog", "hedgehog", - "heighest", "heights", - "heighted", "heightened", - "heirachy", "hierarchy", - "heistant", "hesitant", - "heistate", "hesitate", - "hellifre", "hellfire", - "helluvva", "helluva", - "helpfull", "helpful", - "heratige", "heritage", - "herclues", "hercules", - "heridity", "heredity", - "heroicas", "heroics", - "heroices", "heroics", - "heroicos", "heroics", - "heroicus", "heroics", - "hertiage", "heritage", - "herucles", "hercules", - "hestiant", "hesitant", - "hestiate", "hesitate", - "heveanly", "heavenly", - "hierachy", "hierarchy", - "hierarcy", "hierarchy", - "highlane", "highlander", - "hindiusm", "hinduism", - "hindusim", "hinduism", - "hinudism", "hinduism", - "hiptsers", "hipsters", - "hispanis", "hispanics", - "hispters", "hipsters", - "histroic", "historic", - "hodlings", "holdings", - "hoenstly", "honestly", - "hoildays", "holidays", - "holdiays", "holidays", - "hollywod", "hollywood", - "homeword", "homeworld", - "homineim", "hominem", - "homineum", "hominem", - "honeslty", "honestly", - "honeymon", "honeymoon", - "honsetly", "honestly", - "hopefuly", "hopefully", - "hopkings", "hopkins", - "hopsital", "hospital", - "horading", "hoarding", - "horzions", "horizons", - "hosptial", "hospital", - "hosteles", "hostels", - "hostiliy", "hostility", - "hotshoot", "hotshot", - "hotsport", "hotspot", - "hsyteria", "hysteria", - "htaching", "hatching", - "htiboxes", "hitboxes", - "huanting", "haunting", - "humaniod", "humanoid", - "humanite", "humanities", - "humantiy", "humanity", - "humerous", "humorous", - "huminoid", "humanoid", - "humitidy", "humidity", - "humoural", "humoral", - "humouros", "humorous", - "humurous", "humorous", - "hunderds", "hundreds", - "hundread", "hundred", - "hungarin", "hungarian", - "huntmsan", "huntsman", - "hutnsman", "huntsman", - "hybrides", "hybrids", - "hybridus", "hybrids", - "hydorgen", "hydrogen", - "hydratin", "hydration", - "hydregon", "hydrogen", - "hygience", "hygiene", - "hygienne", "hygiene", - "hyperbel", "hyperbole", - "hypocrit", "hypocrite", - "hyponsis", "hypnosis", - "hyrdogen", "hydrogen", - "icefrong", "icefrog", - "icelings", "ceilings", - "idaeidae", "idea", - "idealogy", "ideology", - "idealsim", "idealism", - "idenfity", "identify", - "idenitfy", "identify", - "identite", "identities", - "ideologe", "ideologies", - "illiegal", "illegal", - "illinios", "illinois", - "illionis", "illinois", - "illnesss", "illnesses", - "illumini", "illuminati", - "illustre", "illustrate", - "illution", "illusion", - "ilogical", "illogical", - "ilterate", "literate", - "imapired", "impaired", - "imgrants", "migrants", - "imigrant", "emigrant", - "immboile", "immobile", - "immenint", "imminent", - "immersie", "immerse", - "immersve", "immerse", - "immitate", "imitate", - "immoblie", "immobile", - "immortas", "immortals", - "impactes", "impacts", - "impactos", "impacts", - "imparied", "impaired", - "imperavi", "imperative", - "imperfet", "imperfect", - "implemet", "implements", - "implosed", "implode", - "impluses", "impulses", - "imporper", "improper", - "importas", "imports", - "importen", "importance", - "importes", "imports", - "imporved", "improved", - "imporves", "improves", - "impropre", "improper", - "improted", "imported", - "improvie", "improvised", - "impusles", "impulses", - "imrpoved", "improved", - "imrpoves", "improves", - "inbetwen", "inbetween", - "inclince", "incline", - "inclinde", "incline", - "includng", "including", - "incorect", "incorrect", - "incuding", "including", - "inculded", "included", - "indianas", "indians", - "indiands", "indians", - "indiania", "indiana", - "indianna", "indiana", - "indianos", "indians", - "indicato", "indication", - "indicats", "indicators", - "indonesa", "indonesia", - "indulgue", "indulge", - "infantis", "infants", - "infantus", "infants", - "infarred", "infrared", - "infectin", "infections", - "infermon", "inferno", - "infiltre", "infiltrate", - "infintie", "infinite", - "infintiy", "infinity", - "inflatie", "inflate", - "influens", "influences", - "informas", "informs", - "informis", "informs", - "infromal", "informal", - "infromed", "informed", - "ingenius", "ingenious", - "ingition", "ignition", - "ingorant", "ignorant", - "inheriet", "inherit", - "inherint", "inherit", - "inhumaan", "inhuman", - "inhumain", "inhuman", - "inifnite", "infinite", - "inifnity", "infinity", - "inisghts", "insights", - "initails", "initials", - "initaite", "initiate", - "initaled", "initialed", - "initally", "initially", - "initialy", "initially", - "initmacy", "intimacy", - "initmate", "intimate", - "injustie", "injustices", - "inlcuded", "included", - "inlcudes", "includes", - "innocens", "innocents", - "innocuos", "innocuous", - "innvoate", "innovate", - "inocence", "innocence", - "inpolite", "impolite", - "inpsired", "inspired", - "inquirey", "inquiry", - "inquirie", "inquire", - "inquiriy", "inquiry", - "inrested", "inserted", - "insanley", "insanely", - "insectes", "insects", - "insectos", "insects", - "insertas", "inserts", - "insertes", "inserts", - "insertos", "inserts", - "insidios", "insidious", - "insigths", "insights", - "insipred", "inspired", - "insipres", "inspires", - "insistas", "insists", - "insistes", "insists", - "insistis", "insists", - "insmonia", "insomnia", - "insomina", "insomnia", - "insonmia", "insomnia", - "inspried", "inspired", - "inspries", "inspires", - "instanse", "instances", - "instanty", "instantly", - "instered", "inserted", - "insticnt", "instinct", - "instincs", "instincts", - "institue", "institute", - "insultas", "insults", - "insultes", "insults", - "insultos", "insults", - "intamicy", "intimacy", - "intamite", "intimate", - "intendes", "intends", - "intendos", "intends", - "intentas", "intents", - "intented", "intended", - "interace", "interacted", - "interacs", "interacts", - "interect", "interacted", - "interent", "internet", - "interese", "interested", - "interfce", "interface", - "intergal", "integral", - "internts", "interns", - "internus", "interns", - "interpet", "interpret", - "interrim", "interim", - "interste", "interstate", - "interupt", "interrupt", - "intevene", "intervene", - "intially", "initially", - "intiials", "initials", - "intimaty", "intimately", - "intimide", "intimidate", - "intregal", "integral", - "intriuge", "intrigue", - "introdue", "introduces", - "introdus", "introduces", - "introvet", "introvert", - "intruige", "intrigue", - "intutive", "intuitive", - "inudstry", "industry", - "inventer", "inventor", - "invertes", "inverse", - "invincil", "invincible", - "invitato", "invitation", - "invloved", "involved", - "invloves", "involves", - "invovled", "involved", - "invovles", "involves", - "iranains", "iranians", - "iraninas", "iranians", - "iritable", "irritable", - "iritated", "irritated", - "ironicly", "ironically", - "irritato", "irritation", - "isalmist", "islamist", - "isarelis", "israelis", - "islamits", "islamist", - "islamsit", "islamist", - "islandes", "islanders", - "ismalist", "islamist", - "isntalls", "installs", - "isolatie", "isolate", - "israelli", "israeli", - "israleis", "israelis", - "isralies", "israelis", - "isrealis", "israelis", - "issueing", "issuing", - "italains", "italians", - "jaguards", "jaguars", - "jaguares", "jaguars", - "jailbrek", "jailbreak", - "jaimacan", "jamaican", - "jamacain", "jamaican", - "jamaicia", "jamaica", - "jamiacan", "jamaican", - "januaray", "january", - "janurary", "january", - "jeapardy", "jeopardy", - "jefferry", "jeffery", - "jefferty", "jeffery", - "jennigns", "jennings", - "jeoprady", "jeopardy", - "jepoardy", "jeopardy", - "jerusalm", "jerusalem", - "jewelrey", "jewelry", - "jewllery", "jewellery", - "joanthan", "jonathan", - "joepardy", "jeopardy", - "johanine", "johannine", - "jonatahn", "jonathan", - "journaal", "journal", - "journied", "journeyed", - "journies", "journeys", - "joysitck", "joystick", - "juadaism", "judaism", - "judaisim", "judaism", - "judgemet", "judgements", - "juducial", "judicial", - "jugnling", "jungling", - "junglign", "jungling", - "junlging", "jungling", - "justifiy", "justify", - "juveline", "juvenile", - "juvenlie", "juvenile", - "katemine", "ketamine", - "kennedey", "kennedy", - "ketmaine", "ketamine", - "keybaord", "keyboard", - "keyboars", "keyboards", - "keyborad", "keyboard", - "keychian", "keychain", - "kicthens", "kitchens", - "kindgoms", "kingdoms", - "kittiens", "kitties", - "knockbak", "knockback", - "knowlege", "knowledge", - "knuckels", "knuckles", - "koreanos", "koreans", - "kunckles", "knuckles", - "kurdisch", "kurdish", - "labatory", "lavatory", - "labenese", "lebanese", - "laboraty", "laboratory", - "laguages", "languages", - "landscae", "landscapes", - "langauge", "language", - "lanucher", "launcher", - "lanuches", "launches", - "laodouts", "loadouts", - "larwence", "lawrence", - "lasagnea", "lasagna", - "lasagnia", "lasagna", - "laucnhed", "launched", - "laucnher", "launcher", - "laucnhes", "launches", - "laundrey", "laundry", - "lawernce", "lawrence", - "lazyness", "laziness", - "leaglize", "legalize", - "lecteurs", "lectures", - "lecutres", "lectures", - "lefitsts", "leftists", - "leftsits", "leftists", - "legenday", "legendary", - "legionis", "legions", - "legitimt", "legitimate", - "lengthes", "lengths", - "lengthly", "lengthy", - "lentiles", "lentils", - "lentills", "lentils", - "lesbains", "lesbians", - "lesibans", "lesbians", - "levander", "lavender", - "levelign", "leveling", - "levetate", "levitate", - "leviathn", "leviathan", - "levleing", "leveling", - "liberato", "liberation", - "libertae", "liberate", - "libertea", "liberate", - "librarse", "libraries", - "licencie", "licence", - "licencse", "licence", - "liebrals", "liberals", - "liekable", "likeable", - "lifepsan", "lifespan", - "lifestel", "lifesteal", - "lifestye", "lifestyle", - "lighitng", "lighting", - "lightnig", "lightning", - "lightres", "lighters", - "lightrom", "lightroom", - "ligthers", "lighters", - "ligthing", "lighting", - "likebale", "likeable", - "limitant", "militant", - "limitato", "limitation", - "lincolin", "lincoln", - "lincolon", "lincoln", - "lineupes", "lineups", - "lingeire", "lingerie", - "lingiere", "lingerie", - "linnaena", "linnaean", - "lipstics", "lipsticks", - "liquidas", "liquids", - "liquides", "liquids", - "liquidos", "liquids", - "liscense", "license", - "lisenced", "silenced", - "listenes", "listens", - "listents", "listens", - "listners", "listeners", - "litature", "literature", - "litecion", "litecoin", - "liteicon", "litecoin", - "literaly", "literally", - "lithuana", "lithuania", - "litigato", "litigation", - "liverpol", "liverpool", - "logtiech", "logitech", - "longitme", "longtime", - "longtiem", "longtime", - "looseley", "loosely", - "loreplay", "roleplay", - "luanched", "launched", - "luancher", "launcher", - "luanches", "launches", - "lubricat", "lubricant", - "lucifear", "lucifer", - "luckilly", "luckily", - "macarino", "macaroni", - "machiens", "machines", - "mackeral", "mackerel", - "macthups", "matchups", - "magasine", "magazine", - "magazins", "magazines", - "magentic", "magnetic", - "magicain", "magician", - "magisine", "magazine", - "magizine", "magazine", - "magnetis", "magnets", - "magnited", "magnitude", - "magnitue", "magnitude", - "mainfest", "manifest", - "maintian", "maintain", - "majoroty", "majority", - "makrsman", "marksman", - "malariya", "malaria", - "malasiya", "malaysia", - "malasyia", "malaysia", - "malayisa", "malaysia", - "malyasia", "malaysia", - "mamalian", "mammalian", - "manadrin", "mandarin", - "manaully", "manually", - "mandaste", "mandates", - "mandrain", "mandarin", - "mandrian", "mandarin", - "maneveur", "maneuver", - "manevuer", "maneuver", - "manfiest", "manifest", - "mangetic", "magnetic", - "manglade", "mangled", - "manifeso", "manifesto", - "manipule", "manipulate", - "manouver", "maneuver", - "manuales", "manuals", - "manuever", "maneuver", - "maraconi", "macaroni", - "maradeur", "marauder", - "maraduer", "marauder", - "maragret", "margaret", - "marbleds", "marbles", - "margerat", "margaret", - "margines", "margins", - "margings", "margins", - "marginis", "margins", - "marignal", "marginal", - "marilyin", "marilyn", - "marinens", "marines", - "markedet", "marketed", - "markeras", "markers", - "markerts", "markers", - "marniers", "mariners", - "marraige", "marriage", - "marryied", "married", - "marskman", "marksman", - "maruader", "marauder", - "marvelos", "marvelous", - "marxisim", "marxism", - "mascarra", "mascara", - "massacer", "massacre", - "massarce", "massacre", - "massasge", "massages", - "masscare", "massacre", - "masteris", "masteries", - "masturbe", "masturbate", - "materias", "materials", - "mathcups", "matchups", - "mathewes", "mathews", - "matieral", "material", - "matterss", "mattress", - "mauarder", "marauder", - "maximini", "maximizing", - "mayalsia", "malaysia", - "maybelle", "maybelline", - "maylasia", "malaysia", - "mccarhty", "mccarthy", - "mcgergor", "mcgregor", - "mchanics", "mechanics", - "mclarean", "mclaren", - "mcreggor", "mcgregor", - "meagtron", "megatron", - "meancing", "menacing", - "meaninng", "meaning", - "meatbals", "meatballs", - "mecahnic", "mechanic", - "mechanim", "mechanism", - "mechanis", "mechanics", - "medacine", "medicine", - "medatite", "meditate", - "medeival", "medieval", - "medevial", "medieval", - "mediavel", "medieval", - "medicaly", "medically", - "mediciad", "medicaid", - "medicins", "medicines", - "medicore", "mediocre", - "medievel", "medieval", - "mediocer", "mediocre", - "mediocry", "mediocrity", - "mediorce", "mediocre", - "meditato", "meditation", - "mediveal", "medieval", - "medoicre", "mediocre", - "meerkrat", "meerkat", - "megatorn", "megatron", - "meidcare", "medicare", - "meixcans", "mexicans", - "melboure", "melbourne", - "meltodwn", "meltdown", - "memoriez", "memorize", - "mencaing", "menacing", - "menstrul", "menstrual", - "mentiong", "mentioning", - "meoldies", "melodies", - "merchans", "merchants", - "mercurcy", "mercury", - "mercurey", "mercury", - "merficul", "merciful", - "merhcant", "merchant", - "mericful", "merciful", - "messgaed", "messaged", - "messiach", "messiah", - "metagaem", "metagame", - "metahpor", "metaphor", - "metamage", "metagame", - "methapor", "metaphor", - "metldown", "meltdown", - "metricas", "metrics", - "metrices", "metrics", - "metropos", "metropolis", - "mexcians", "mexicans", - "mexicain", "mexican", - "mhytical", "mythical", - "michagan", "michigan", - "michgian", "michigan", - "microtax", "microatx", - "microwae", "microwaves", - "midfeild", "midfield", - "midfiled", "midfield", - "midifeld", "midfield", - "migrains", "migraines", - "migriane", "migraine", - "milennia", "millennia", - "miligram", "milligram", - "miliitas", "militias", - "miliraty", "military", - "militais", "militias", - "millenia", "millennia", - "millenna", "millennia", - "miltiant", "militant", - "minature", "miniature", - "mindcrak", "mindcrack", - "minerial", "mineral", - "mingiame", "minigame", - "minimage", "minigame", - "minimals", "minimalist", - "minimalt", "minimalist", - "minimini", "minimizing", - "minimium", "minimum", - "miniscue", "miniscule", - "minsiter", "minister", - "minsitry", "ministry", - "miraculu", "miraculous", - "miralces", "miracles", - "mircales", "miracles", - "mircoatx", "microatx", - "mirgaine", "migraine", - "mirorred", "mirrored", - "misnadry", "misandry", - "misogynt", "misogynist", - "missigno", "mission", - "missiony", "missionary", - "misslies", "missiles", - "missorui", "missouri", - "misspeld", "misspelled", - "mistakey", "mistakenly", - "mistread", "mistreated", - "mobiltiy", "mobility", - "moderats", "moderates", - "modulair", "modular", - "moleculs", "molecules", - "momentos", "moments", - "momentus", "moments", - "monagomy", "monogamy", - "mongoles", "mongols", - "mongolos", "mongols", - "monitord", "monitored", - "monogmay", "monogamy", - "monolite", "monolithic", - "monologe", "monologue", - "monolopy", "monopoly", - "monoploy", "monopoly", - "monopols", "monopolies", - "monrachy", "monarchy", - "monstros", "monstrous", - "montaban", "montana", - "montains", "mountains", - "montanha", "montana", - "montania", "montana", - "montanna", "montana", - "montanta", "montana", - "montanya", "montana", - "montaran", "montana", - "monteize", "monetize", - "monteral", "montreal", - "montiors", "monitors", - "montnana", "montana", - "montypic", "monotypic", - "monumnet", "monument", - "moonligt", "moonlight", - "moprhine", "morphine", - "morbildy", "morbidly", - "mordibly", "morbidly", - "morevoer", "moreover", - "morhpine", "morphine", - "moribdly", "morbidly", - "mormones", "mormons", - "mormonts", "mormons", - "moroever", "moreover", - "morotola", "motorola", - "morphein", "morphine", - "morriosn", "morrison", - "morrocco", "morocco", - "morrsion", "morrison", - "mortards", "mortars", - "mortarts", "mortars", - "moruning", "mourning", - "mosnters", "monsters", - "mosqueto", "mosquitoes", - "mosquite", "mosquitoes", - "mosqutio", "mosquito", - "motoroal", "motorola", - "mounment", "monument", - "mounring", "mourning", - "mountian", "mountain", - "moustace", "moustache", - "movesped", "movespeed", - "mozillia", "mozilla", - "mozillla", "mozilla", - "msytical", "mystical", - "mucnhies", "munchies", - "mudering", "murdering", - "muffings", "muffins", - "muffinus", "muffins", - "mulitple", "multiple", - "mulitply", "multiply", - "multiplr", "multiplier", - "multipls", "multiples", - "mundance", "mundane", - "mundande", "mundane", - "muniches", "munchies", - "murderes", "murders", - "murderus", "murders", - "muscluar", "muscular", - "muscualr", "muscular", - "musicaly", "musically", - "musuclar", "muscular", - "mutliple", "multiple", - "mutliply", "multiply", - "myhtical", "mythical", - "mysitcal", "mystical", - "mysogyny", "misogyny", - "mysteris", "mysteries", - "mythraic", "mithraic", - "nagivate", "navigate", - "naopleon", "napoleon", - "napcakes", "pancakes", - "naploeon", "napoleon", - "napoelon", "napoleon", - "napolean", "napoleon", - "napoloen", "napoleon", - "narcissm", "narcissism", - "narcisst", "narcissist", - "narcotis", "narcotics", - "narwharl", "narwhal", - "naseuous", "nauseous", - "nashvile", "nashville", - "nasueous", "nauseous", - "natievly", "natively", - "nationas", "nationals", - "nationsl", "nationals", - "nativley", "natively", - "natuilus", "nautilus", - "naturaly", "naturally", - "naturels", "natures", - "naturely", "naturally", - "naturens", "natures", - "naturual", "natural", - "nauesous", "nauseous", - "naughtly", "naughty", - "nauitlus", "nautilus", - "nauseuos", "nauseous", - "nautiuls", "nautilus", - "nautlius", "nautilus", - "nautulis", "nautilus", - "naviagte", "navigate", - "navigato", "navigation", - "nazereth", "nazareth", - "necesary", "necessary", - "neckbead", "neckbeard", - "needlees", "needles", - "nefarios", "nefarious", - "negativy", "negativity", - "neglectn", "neglecting", - "neglible", "negligible", - "neigbour", "neighbour", - "neolitic", "neolithic", - "netboook", "netbook", - "neuronas", "neurons", - "neutraal", "neutral", - "neutralt", "neutrality", - "neutraly", "neutrality", - "newcaste", "newcastle", - "nickanme", "nickname", - "nickmane", "nickname", - "nieghbor", "neighbor", - "nightime", "nighttime", - "nightley", "nightly", - "nightlie", "nightlife", - "nihilsim", "nihilism", - "nilihism", "nihilism", - "nirtogen", "nitrogen", - "nirvanna", "nirvana", - "nitorgen", "nitrogen", - "niusance", "nuisance", - "noctrune", "nocturne", - "noctunre", "nocturne", - "nocturen", "nocturne", - "nominato", "nomination", - "nonsence", "nonsense", - "nonsesne", "nonsense", - "noramlly", "normally", - "norhtern", "northern", - "normalis", "normals", - "normalls", "normals", - "normalos", "normals", - "northeat", "northeast", - "northren", "northern", - "northwet", "northwest", - "norwegin", "norwegian", - "nostalga", "nostalgia", - "nostirls", "nostrils", - "notabley", "notably", - "notablly", "notably", - "noteable", "notable", - "noteably", "notably", - "noticabe", "noticable", - "notorios", "notorious", - "novmeber", "november", - "nromandy", "normandy", - "nuatilus", "nautilus", - "nuculear", "nuclear", - "nuetered", "neutered", - "nuisanse", "nuisance", - "nullifiy", "nullify", - "nurtient", "nutrient", - "nusaince", "nuisance", - "nusiance", "nuisance", - "nutirent", "nutrient", - "nutriens", "nutrients", - "nuturing", "nurturing", - "obdisian", "obsidian", - "obediant", "obedient", - "obession", "obsession", - "obilvion", "oblivion", - "obisdian", "obsidian", - "obsessie", "obsessive", - "obsessin", "obsession", - "obsidain", "obsidian", - "obstacal", "obstacle", - "obvilion", "oblivion", - "ocasions", "occasions", - "ocassion", "occasion", - "occaison", "occasion", - "occupato", "occupation", - "occuring", "occurring", - "octobear", "october", - "octopuns", "octopus", - "ofcoruse", "ofcourse", - "ofcoures", "ofcourse", - "ofcousre", "ofcourse", - "ofcrouse", "ofcourse", - "officals", "officials", - "officaly", "officially", - "offsited", "offside", - "ofocurse", "ofcourse", - "oligarcy", "oligarchy", - "olmypics", "olympics", - "olymipcs", "olympics", - "olypmics", "olympics", - "ommision", "omission", - "ommiting", "omitting", - "ommitted", "omitted", - "ongewild", "gonewild", - "onslaugt", "onslaught", - "operatie", "operative", - "opinoins", "opinions", - "oppinion", "opinion", - "opponant", "opponent", - "opposits", "opposites", - "oppossed", "opposed", - "oppresso", "oppression", - "optimaal", "optimal", - "optomism", "optimism", - "oragnise", "organise", - "orangerd", "orangered", - "orangers", "oranges", - "orangism", "organism", - "orchesta", "orchestra", - "ordianry", "ordinary", - "oreintal", "oriental", - "orgainse", "organise", - "orgainze", "organize", - "organims", "organism", - "organsie", "organise", - "organsim", "organism", - "organzie", "organize", - "orgasmes", "orgasms", - "orgasmos", "orgasms", - "orgasmus", "orgasms", - "orginize", "organise", - "orhtodox", "orthodox", - "oridnary", "ordinary", - "originas", "origins", - "origines", "origins", - "originsl", "originals", - "orphanes", "orphans", - "osbidian", "obsidian", - "othrodox", "orthodox", - "ourselvs", "ourselves", - "oustider", "outsider", - "outfeild", "outfield", - "outfidel", "outfield", - "outfiled", "outfield", - "outisder", "outsider", - "outplayd", "outplayed", - "outputed", "outputted", - "outsoure", "outsourced", - "overboad", "overboard", - "overclok", "overclock", - "overdrev", "overdrive", - "overhual", "overhaul", - "overlaod", "overload", - "overpiad", "overpaid", - "overules", "overuse", - "overwath", "overwatch", - "overwhem", "overwhelm", - "oximoron", "oxymoron", - "oylmpics", "olympics", - "pacakged", "packaged", - "packadge", "packaged", - "paficist", "pacifist", - "painfuly", "painfully", - "paitence", "patience", - "paitents", "patients", - "palidans", "paladins", - "palstics", "plastics", - "paltform", "platform", - "paltinum", "platinum", - "palyable", "playable", - "palyoffs", "playoffs", - "pancaeks", "pancakes", - "panckaes", "pancakes", - "pandoria", "pandora", - "pandorra", "pandora", - "panedmic", "pandemic", - "panethon", "pantheon", - "pankaces", "pancakes", - "panmedic", "pandemic", - "pantehon", "pantheon", - "panthoen", "pantheon", - "paradies", "paradise", - "paradyse", "parades", - "paragrah", "paragraph", - "paraiste", "parasite", - "paralell", "parallel", - "paralely", "parallelly", - "paralles", "parallels", - "parameds", "paramedics", - "paramter", "parameter", - "paranioa", "paranoia", - "paraniod", "paranoid", - "paraside", "paradise", - "parasits", "parasites", - "parastie", "parasite", - "parctise", "practise", - "paremsan", "parmesan", - "paristan", "partisan", - "parmasen", "parmesan", - "parmenas", "parmesan", - "parmsean", "parmesan", - "parnters", "partners", - "parralel", "parallel", - "parterns", "partners", - "partialy", "partially", - "partians", "partisan", - "partical", "particular", - "particel", "particle", - "partiets", "parties", - "partiots", "patriots", - "partnerd", "partnered", - "partsian", "partisan", - "passabel", "passable", - "passione", "passionate", - "passisve", "passives", - "passpost", "passports", - "passvies", "passives", - "passwors", "passwords", - "pasttime", "pastime", - "pastural", "pastoral", - "pateince", "patience", - "pateints", "patients", - "patethic", "pathetic", - "patheitc", "pathetic", - "patienty", "patiently", - "patirots", "patriots", - "patriarh", "patriarchy", - "patroits", "patriots", - "patrolls", "patrols", - "patronas", "patrons", - "patrones", "patrons", - "patronis", "patrons", - "patronos", "patrons", - "pattened", "patented", - "patterno", "patterson", - "pattersn", "patterson", - "pblisher", "publisher", - "peageant", "pageant", - "pebbleos", "pebbles", - "pebblers", "pebbles", - "pebblets", "pebbles", - "peciluar", "peculiar", - "pecuilar", "peculiar", - "peculair", "peculiar", - "peculure", "peculiar", - "peformed", "performed", - "peircing", "piercing", - "penaltis", "penalties", - "penatgon", "pentagon", - "penciles", "pencils", - "pendatic", "pedantic", - "pengiuns", "penguins", - "penisula", "peninsula", - "pensioen", "pension", - "pepperin", "pepperoni", - "perceded", "preceded", - "percente", "percentile", - "percieve", "perceive", - "percious", "precious", - "perclude", "preclude", - "perfecty", "perfectly", - "perfroms", "performs", - "perheaps", "perhaps", - "pericing", "piercing", - "peridoic", "periodic", - "perimetr", "perimeter", - "periodes", "periods", - "periodos", "periods", - "permanet", "permanent", - "permiere", "premiere", - "permises", "premises", - "permitas", "permits", - "permites", "permits", - "permitis", "permits", - "permitts", "permits", - "permiums", "premiums", - "peroidic", "periodic", - "perosnas", "personas", - "perpetue", "perpetuate", - "persaude", "persuade", - "perserve", "preserve", - "persisit", "persist", - "personel", "personnel", - "persones", "persons", - "personis", "persons", - "personsa", "personas", - "perstige", "prestige", - "persuaso", "persuasion", - "persuded", "persuaded", - "persuing", "pursuing", - "persuits", "pursuits", - "persumed", "presumed", - "pertaing", "pertaining", - "pertians", "pertains", - "pertinet", "pertinent", - "pervents", "prevents", - "perverst", "pervert", - "perviews", "previews", - "pervious", "previous", - "perxoide", "peroxide", - "pessiary", "pessary", - "petetion", "petition", - "petrolem", "petroleum", - "phantoom", "phantom", - "pharamcy", "pharmacy", - "pharmacs", "pharmacist", - "pharmsci", "pharmacist", - "phenomon", "phenomenon", - "phramacy", "pharmacy", - "phsyical", "physical", - "phsyique", "physique", - "phyiscal", "physical", - "phyisque", "physique", - "physcial", "physical", - "physicis", "physicians", - "physicks", "physics", - "physicts", "physicist", - "physqiue", "physique", - "picthers", "pitchers", - "pillards", "pillars", - "pillaris", "pillars", - "pinancle", "pinnacle", - "pinapple", "pineapple", - "pinnalce", "pinnacle", - "pinnaple", "pineapple", - "pinncale", "pinnacle", - "pinpiont", "pinpoint", - "pinteret", "pinterest", - "piolting", "piloting", - "pioneeer", "pioneer", - "pithcers", "pitchers", - "placebro", "placebo", - "placemet", "placements", - "planetas", "planets", - "planetos", "planets", - "plantiff", "plaintiff", - "plantium", "platinum", - "plasitcs", "plastics", - "platfrom", "platform", - "platimun", "platinum", - "platnium", "platinum", - "platnuim", "platinum", - "plausibe", "plausible", - "playbody", "playboy", - "playstye", "playstyle", - "pleasent", "pleasant", - "plehtora", "plethora", - "pleothra", "plethora", - "plethroa", "plethora", - "ploygamy", "polygamy", - "pnatheon", "pantheon", - "poeoples", "peoples", - "poingant", "poignant", - "pointeur", "pointer", - "pointure", "pointer", - "poisones", "poisons", - "poisonis", "poisons", - "poisonos", "poisons", - "poisonus", "poisons", - "polgyamy", "polygamy", - "polietly", "politely", - "politing", "piloting", - "politley", "politely", - "poltical", "political", - "poluting", "polluting", - "polution", "pollution", - "polygoon", "polygon", - "polymore", "polymer", - "pomotion", "promotion", - "popoulus", "populous", - "populair", "popular", - "populare", "popular", - "populary", "popularity", - "porcelan", "porcelain", - "porposes", "proposes", - "portabel", "portable", - "portalis", "portals", - "portalus", "portals", - "portayed", "portrayed", - "portgual", "portugal", - "portrais", "portraits", - "portrary", "portray", - "portrayl", "portrayal", - "portriat", "portrait", - "posessed", "possessed", - "posesses", "possesses", - "posioned", "poisoned", - "positivs", "positives", - "positivy", "positivity", - "possable", "possible", - "possably", "possibly", - "possbily", "possibly", - "posseses", "possesses", - "possesse", "possessive", - "possesss", "possesses", - "potrayed", "portrayed", - "poverful", "powerful", - "powerded", "powdered", - "powerpot", "powerpoint", - "pracitse", "practise", - "practial", "practical", - "practies", "practise", - "pratcise", "practise", - "praticle", "particle", - "prceeded", "preceded", - "preadtor", "predator", - "preample", "preamble", - "preceeds", "precedes", - "precisie", "precise", - "precisly", "precisely", - "precisou", "precious", - "preculde", "preclude", - "predicat", "predict", - "predicte", "predictive", - "preferas", "prefers", - "prefered", "preferred", - "preferes", "prefers", - "preferis", "prefers", - "preferrs", "prefers", - "preimere", "premiere", - "preimums", "premiums", - "preiodic", "periodic", - "preivews", "previews", - "prejudis", "prejudices", - "prelayed", "replayed", - "premeire", "premiere", - "premesis", "premises", - "premiare", "premier", - "premines", "premise", - "premuims", "premiums", - "preorded", "preordered", - "preordes", "preorders", - "preoxide", "peroxide", - "prepaird", "prepaid", - "preqeuls", "prequels", - "prequles", "prequels", - "prescrie", "prescribed", - "presense", "presence", - "presenst", "presets", - "presidet", "presidents", - "presists", "persists", - "presitge", "prestige", - "presonas", "personas", - "presuade", "persuade", - "pretador", "predator", - "pretains", "pertains", - "preveiws", "previews", - "preverse", "perverse", - "previwes", "previews", - "pricipal", "principal", - "priciple", "principle", - "priemere", "premiere", - "priestes", "priests", - "primaris", "primaries", - "primarly", "primarily", - "princila", "principals", - "principl", "principals", - "prisitne", "pristine", - "probelms", "problems", - "probleem", "problem", - "procalim", "proclaim", - "proccess", "process", - "proceded", "proceeded", - "proceder", "procedure", - "procedes", "proceeds", - "procedue", "procedure", - "proceeed", "proceed", - "procesed", "proceeds", - "processs", "processes", - "proclami", "proclaim", - "procliam", "proclaim", - "procotol", "protocol", - "prodcuts", "products", - "producto", "production", - "profesor", "professor", - "proficit", "proficient", - "profilic", "prolific", - "progroms", "pogroms", - "prohibis", "prohibits", - "prohpecy", "prophecy", - "prohpets", "prophets", - "projecte", "projectile", - "projecto", "projection", - "prolouge", "prologue", - "promplty", "promptly", - "promptes", "prompts", - "promptus", "prompts", - "promtply", "promptly", - "pronoune", "pronounced", - "propechy", "prophecy", - "propehcy", "prophecy", - "propehts", "prophets", - "prophacy", "prophecy", - "propmted", "prompted", - "propmtly", "promptly", - "proponet", "proponents", - "proposse", "proposes", - "proposte", "propose", - "proprety", "property", - "propsect", "prospect", - "prosepct", "prospect", - "prostite", "prostitute", - "protable", "portable", - "protecte", "protective", - "protiens", "proteins", - "protines", "proteins", - "protocal", "protocol", - "prototye", "prototype", - "protrait", "portrait", - "protrays", "portrays", - "protugal", "portugal", - "proverai", "proverbial", - "providee", "providence", - "proximty", "proximity", - "pruchase", "purchase", - "pryamids", "pyramids", - "ptichers", "pitchers", - "pubisher", "publisher", - "publiser", "publisher", - "puinsher", "punisher", - "pulisher", "publisher", - "pumkpins", "pumpkins", - "pumpinks", "pumpkins", - "pumpknis", "pumpkins", - "punshier", "punisher", - "punsiher", "punisher", - "punsihes", "punishes", - "purcahse", "purchase", - "pyramind", "pyramid", - "pyrimads", "pyramids", - "pyrmaids", "pyramids", - "qauntity", "quantity", - "qualifiy", "qualify", - "quanitfy", "quantify", - "quantaty", "quantity", - "quantite", "quantities", - "quantuum", "quantum", - "quarante", "quarantine", - "quartery", "quarterly", - "qucikest", "quickest", - "queation", "equation", - "quention", "quentin", - "quickets", "quickest", - "quicklyu", "quickly", - "rabbitos", "rabbits", - "rabbitts", "rabbits", - "racistas", "racists", - "racistes", "racists", - "radaince", "radiance", - "rahpsody", "rhapsody", - "raidance", "radiance", - "railraod", "railroad", - "randomes", "randoms", - "randomez", "randomized", - "randomns", "randoms", - "randomrs", "randoms", - "randomus", "randoms", - "raosting", "roasting", - "raphsody", "rhapsody", - "raptores", "raptors", - "raspbery", "raspberry", - "rationel", "rationale", - "realible", "reliable", - "realibly", "reliably", - "realiest", "earliest", - "realisim", "realism", - "realisme", "realise", - "realistc", "realistic", - "realiste", "realise", - "realoded", "reloaded", - "realsied", "realised", - "realtion", "relation", - "realtive", "relative", - "reamined", "remained", - "reapired", "repaired", - "reaplugs", "earplugs", - "reaserch", "research", - "reasonal", "reasonably", - "reatiler", "retailer", - "reaveled", "revealed", - "rebellis", "rebellious", - "reboudns", "rebounds", - "rebounce", "rebound", - "rebuildt", "rebuilt", - "rebuplic", "republic", - "receeded", "receded", - "recepits", "receipts", - "receptie", "receptive", - "receptos", "receptors", - "receving", "receiving", - "recident", "resident", - "reciding", "residing", - "recieved", "received", - "reciever", "receiver", - "recieves", "receives", - "recipees", "recipes", - "recipets", "recipes", - "recogise", "recognise", - "recogize", "recognize", - "recognie", "recognizes", - "recomend", "recommend", - "recommed", "recommend", - "reconnet", "reconnect", - "rectange", "rectangle", - "rectifiy", "rectify", - "recuring", "recurring", - "recurits", "recruits", - "redeisgn", "redesign", - "redemeed", "redeemed", - "redesgin", "redesign", - "redesing", "redesign", - "reedemed", "redeemed", - "refeeres", "referees", - "refelcts", "reflects", - "refelxes", "reflexes", - "referede", "referee", - "referene", "referee", - "referens", "references", - "referere", "referee", - "referign", "refering", - "refering", "referring", - "refernce", "references", - "reffered", "referred", - "refilles", "refills", - "refillls", "refills", - "reflecte", "reflective", - "reflecto", "reflection", - "reformes", "reforms", - "refreing", "refering", - "refrence", "reference", - "refreshd", "refreshed", - "refreshr", "refresher", - "refromed", "reformed", - "regardes", "regards", - "regenade", "renegade", - "regenere", "regenerate", - "regiones", "regions", - "regisrty", "registry", - "registed", "registered", - "regresas", "regress", - "regreses", "regress", - "regresos", "regress", - "regresse", "regressive", - "regresso", "regression", - "regrests", "regress", - "regretts", "regrets", - "regsitry", "registry", - "regualrs", "regulars", - "regualte", "regulate", - "reguarly", "regularly", - "regulary", "regularly", - "regulatr", "regulator", - "regulats", "regulators", - "rehersal", "rehearsal", - "rehtoric", "rhetoric", - "reiceved", "recieved", - "reigment", "regiment", - "reigonal", "regional", - "rekenton", "renekton", - "relaible", "reliable", - "relaibly", "reliably", - "relaised", "realised", - "relaoded", "reloaded", - "relasped", "relapsed", - "relatabe", "relatable", - "relateds", "relates", - "relativy", "relativity", - "relavent", "relevant", - "relected", "reelected", - "relegato", "relegation", - "releived", "relieved", - "releiver", "reliever", - "relevent", "relevant", - "relfects", "reflects", - "relfexes", "reflexes", - "reliased", "realised", - "religous", "religious", - "relpased", "relapsed", - "remainds", "remains", - "remainig", "remaining", - "remannts", "remnants", - "remarkes", "remarks", - "remembed", "remembered", - "remembee", "remembered", - "rememebr", "remember", - "remenant", "remnant", - "reminent", "remnant", - "remmeber", "remember", - "remotley", "remotely", - "renderes", "renders", - "reneagde", "renegade", - "renetkon", "renekton", - "renewabe", "renewables", - "renketon", "renekton", - "renmants", "remnants", - "renoylds", "reynolds", - "renteris", "renters", - "renyolds", "reynolds", - "reowrked", "reworked", - "repaires", "repairs", - "repalces", "replaces", - "reparied", "repaired", - "repblics", "republics", - "repbulic", "republic", - "repeatae", "repeatable", - "repeates", "repeats", - "repetion", "repetition", - "repharse", "rephrase", - "repitles", "reptiles", - "replased", "relapsed", - "replayes", "replays", - "replicae", "replicated", - "replubic", "republic", - "reportes", "reporters", - "reposity", "repository", - "repostas", "reposts", - "repostes", "reposts", - "repostig", "reposting", - "repostus", "reposts", - "represet", "represents", - "represso", "repression", - "reprhase", "rephrase", - "repsects", "respects", - "repsonds", "responds", - "repsonse", "response", - "repsoted", "reposted", - "repubics", "republics", - "republis", "republics", - "repulics", "republics", - "repulsie", "repulsive", - "requiers", "requires", - "requieum", "requiem", - "requilme", "requiem", - "requried", "required", - "requries", "requires", - "rescuecd", "rescued", - "researce", "researcher", - "resembes", "resembles", - "reserach", "research", - "resevoir", "reservoir", - "resgined", "resigned", - "residude", "residue", - "residule", "residue", - "resinged", "resigned", - "resistas", "resists", - "resisten", "resistance", - "resistes", "resists", - "resloved", "resolved", - "resloves", "resolves", - "resmeble", "resemble", - "resotred", "restored", - "resourse", "resources", - "resovled", "resolved", - "resovles", "resolves", - "respecte", "respective", - "respesct", "respects", - "responce", "response", - "responed", "respond", - "respones", "response", - "responsd", "responds", - "respoted", "reposted", - "restanti", "restarting", - "restrait", "restraint", - "restrics", "restricts", - "resuable", "reusable", - "retailes", "retailers", - "retalier", "retailer", - "rethoric", "rhetoric", - "retirase", "retires", - "retireds", "retires", - "retireus", "retires", - "retireve", "retrieve", - "retreive", "retrieve", - "retrived", "retrieved", - "retunred", "returned", - "reuasble", "reusable", - "reveales", "reveals", - "reveiwed", "reviewed", - "reveiwer", "reviewer", - "revelaed", "revealed", - "revelant", "relevant", - "revelead", "revealed", - "reverals", "reversal", - "reviewes", "reviewers", - "revlover", "revolver", - "revloves", "revolves", - "revovler", "revolver", - "revovles", "revolves", - "rewatchd", "rewatched", - "rewitten", "rewritten", - "rewritte", "rewrite", - "rewtched", "wretched", - "reynlods", "reynolds", - "reyonlds", "reynolds", - "rhaposdy", "rhapsody", - "rhaspody", "rhapsody", - "rheotric", "rhetoric", - "righteos", "righteous", - "rigntone", "ringtone", - "ringotne", "ringtone", - "ritalian", "ritalin", - "rivalrly", "rivalry", - "roachers", "roaches", - "robberts", "robbers", - "robberys", "robbers", - "robocoop", "robocop", - "robocorp", "robocop", - "robocoup", "robocop", - "roelplay", "roleplay", - "roganism", "organism", - "rolepaly", "roleplay", - "romaanin", "romanian", - "romainan", "romanian", - "romanain", "romanian", - "romanica", "romania", - "rosettta", "rosetta", - "rostaing", "roasting", - "routeros", "routers", - "rutgerus", "rutgers", - "ryenolds", "reynolds", - "sacrifie", "sacrifice", - "saddends", "saddens", - "saddenes", "saddens", - "sadisitc", "sadistic", - "salaires", "salaries", - "sandales", "sandals", - "sandalls", "sandals", - "sandstom", "sandstorm", - "sanotrum", "santorum", - "santourm", "santorum", - "santroum", "santorum", - "santurom", "santorum", - "sapcebar", "spacebar", - "sapphrie", "sapphire", - "sarcasam", "sarcasm", - "sarcasim", "sarcasm", - "sarcastc", "sarcastic", - "sargeant", "sergeant", - "sasauges", "sausages", - "sasuages", "sausages", - "satelite", "satellite", - "satellie", "satellites", - "saterday", "saturday", - "satifies", "satisfies", - "satisfiy", "satisfy", - "satrical", "satirical", - "satruday", "saturday", - "saturdsy", "saturdays", - "sawstika", "swastika", - "scandlas", "scandals", - "scannign", "scanning", - "scarmble", "scramble", - "scepture", "scepter", - "schedual", "schedule", - "schoalrs", "scholars", - "scholary", "scholarly", - "schoodle", "schooled", - "scientic", "scientific", - "scientis", "scientist", - "scoprion", "scorpion", - "scorates", "socrates", - "scoripon", "scorpion", - "scorpoin", "scorpion", - "scostman", "scotsman", - "scratchs", "scratches", - "scriptue", "scriptures", - "scriptus", "scripts", - "scritped", "scripted", - "scroates", "socrates", - "scropion", "scorpion", - "scrpited", "scripted", - "scruitny", "scrutiny", - "scrunity", "scrutiny", - "sctosman", "scotsman", - "sculpter", "sculpture", - "scurtiny", "scrutiny", - "seahakws", "seahawks", - "seahwaks", "seahawks", - "seantors", "senators", - "sebastin", "sebastian", - "seceeded", "succeeded", - "secertly", "secretly", - "secrelty", "secretly", - "secretas", "secrets", - "secretos", "secrets", - "secruity", "security", - "secuirty", "security", - "sedereal", "sidereal", - "seldomly", "seldom", - "selectie", "selective", - "selfiers", "selfies", - "semestre", "semester", - "semseter", "semester", - "senarios", "scenarios", - "senerity", "serenity", - "seniores", "seniors", - "senisble", "sensible", - "sensibel", "sensible", - "sensores", "sensors", - "senstive", "sensitive", - "sentaors", "senators", - "sentiers", "sentries", - "sentinet", "sentient", - "sentinte", "sentient", - "sentires", "sentries", - "sentreis", "sentries", - "separato", "separation", - "separete", "seperate", - "sepearte", "seperate", - "seperate", "separate", - "seplling", "spelling", - "sepreate", "seperate", - "sepulcre", "sepulchre", - "serached", "searched", - "seraches", "searches", - "serentiy", "serenity", - "sergaent", "sergeant", - "settigns", "settings", - "seventen", "seventeen", - "severeal", "several", - "severeid", "severed", - "severide", "severed", - "severley", "severely", - "sexaully", "sexually", - "seziures", "seizures", - "sezuires", "seizures", - "shadoloo", "shadaloo", - "shangahi", "shanghai", - "shanghia", "shanghai", - "sharplay", "sharply", - "sharpley", "sharply", - "shawshak", "shawshank", - "shcolars", "scholars", - "shcooled", "schooled", - "sheilded", "shielded", - "shelterd", "sheltered", - "shelvers", "shelves", - "shelveys", "shelves", - "sherlcok", "sherlock", - "shetlers", "shelters", - "shfiting", "shifting", - "shifitng", "shifting", - "shifteer", "shifter", - "shileded", "shielded", - "shineing", "shining", - "shitstom", "shitstorm", - "shittoon", "shitton", - "shittown", "shitton", - "shleters", "shelters", - "shnaghai", "shanghai", - "shortend", "shortened", - "shotuout", "shoutout", - "shoudlnt", "shouldnt", - "shouldes", "shoulders", - "shoulndt", "shouldnt", - "shrapenl", "shrapnel", - "shrelock", "sherlock", - "shrinked", "shrunk", - "shrpanel", "shrapnel", - "shtiless", "shitless", - "shuoldnt", "shouldnt", - "sideboad", "sideboard", - "sidleine", "sideline", - "siezable", "sizeable", - "siezures", "seizures", - "signatue", "signatures", - "signfies", "signifies", - "signifiy", "signify", - "signigns", "signings", - "signular", "singular", - "silbings", "siblings", - "silicoln", "silicon", - "silicoon", "silicon", - "silimiar", "similiar", - "simialir", "similiar", - "simiilar", "similiar", - "similair", "similar", - "similari", "similiar", - "similart", "similarity", - "similary", "similarly", - "similiar", "similar", - "simliiar", "similiar", - "simluate", "simulate", - "simmilar", "similar", - "simpelst", "simplest", - "simplets", "simplest", - "simplicy", "simplicity", - "simplier", "simpler", - "simulato", "simulation", - "singlers", "singles", - "singluar", "singular", - "sinistre", "sinister", - "sinsiter", "sinister", - "sitckers", "stickers", - "sitrring", "stirring", - "sizebale", "sizeable", - "skateing", "skating", - "skecthes", "sketches", - "skelatel", "skeletal", - "skeletos", "skeletons", - "sketchey", "sketchy", - "sketpics", "skeptics", - "skillsto", "skillshots", - "skimrish", "skirmish", - "skpetics", "skeptics", - "skrimish", "skirmish", - "skteches", "sketches", - "skywalkr", "skywalker", - "slaptoon", "splatoon", - "slaverly", "slavery", - "slienced", "silenced", - "sliently", "silently", - "slighlty", "slightly", - "sligthly", "slightly", - "smartare", "smarter", - "snetries", "sentries", - "snippent", "snippet", - "snippert", "snippet", - "snowbals", "snowballs", - "snugglie", "snuggle", - "snydrome", "syndrome", - "snyopsis", "synopsis", - "soberity", "sobriety", - "sobreity", "sobriety", - "socailly", "socially", - "socalism", "socialism", - "socartes", "socrates", - "socialim", "socialism", - "socities", "societies", - "socttish", "scottish", - "soemthin", "somethin", - "soilders", "soldiers", - "solatary", "solitary", - "soldeirs", "soldiers", - "soliders", "soldiers", - "soluable", "soluble", - "solutide", "solitude", - "somalija", "somalia", - "somehtin", "somethin", - "someoens", "someones", - "somethis", "somethings", - "sometihn", "somethin", - "sometinh", "somethin", - "somoenes", "someones", - "somtimes", "sometimes", - "somwhere", "somewhere", - "soparnos", "sopranos", - "sophmore", "sophomore", - "sorcercy", "sorcery", - "sorcerey", "sorcery", - "sorceror", "sorcerer", - "sorcerry", "sorcery", - "sorpanos", "sopranos", - "southren", "southern", - "soverein", "sovereign", - "soverign", "sovereign", - "sovietes", "soviets", - "spagheti", "spaghetti", - "spainish", "spanish", - "spaltoon", "splatoon", - "spammade", "spammed", - "spammare", "spammer", - "spammear", "spammer", - "spammend", "spammed", - "spammeur", "spammer", - "spanisch", "spanish", - "sparklie", "sparkle", - "spawnign", "spawning", - "specemin", "specimen", - "speciaal", "special", - "specialt", "specialist", - "specialy", "specially", - "specialz", "specialize", - "specifed", "specified", - "specifiy", "specify", - "speciman", "specimen", - "specrtal", "spectral", - "speicals", "specials", - "spellign", "spelling", - "spendour", "splendour", - "sphereos", "spheres", - "spilnter", "splinter", - "spiltter", "splitter", - "spindrel", "spindle", - "spirites", "spirits", - "spiritis", "spirits", - "spiritus", "spirits", - "spirtied", "spirited", - "spleling", "spelling", - "splitner", "splinter", - "spoilerd", "spoiled", - "spoliers", "spoilers", - "sponsord", "sponsored", - "sporanos", "sopranos", - "spotifiy", "spotify", - "spotifty", "spotify", - "sppeches", "speeches", - "sprayade", "sprayed", - "spreaded", "spread", - "springst", "sprints", - "sprinkel", "sprinkle", - "sprintas", "sprints", - "spritual", "spiritual", - "sproutes", "sprouts", - "spwaning", "spawning", - "sqaudron", "squadron", - "sqaurely", "squarely", - "sqiurtle", "squirtle", - "squardon", "squadron", - "squareds", "squares", - "squarley", "squarely", - "squeakey", "squeaky", - "squeakly", "squeaky", - "squirlte", "squirtle", - "squirrle", "squirrel", - "squirtel", "squirtle", - "squishey", "squishy", - "squishly", "squishy", - "squritle", "squirtle", - "squrriel", "squirrel", - "squrtile", "squirtle", - "sriarcha", "sriracha", - "srriacha", "sriracha", - "sryacuse", "syracuse", - "staduims", "stadiums", - "staidums", "stadiums", - "staklers", "stalkers", - "stalekrs", "stalkers", - "stalkear", "stalker", - "staminia", "stamina", - "stampade", "stamped", - "stampeed", "stamped", - "stancels", "stances", - "stancers", "stances", - "standars", "standards", - "standbay", "standby", - "standbuy", "standby", - "stangant", "stagnant", - "staright", "straight", - "starined", "strained", - "starlted", "startled", - "startegy", "strategy", - "starteld", "startled", - "startsup", "startups", - "stateman", "statesman", - "staticts", "statist", - "stationd", "stationed", - "stationy", "stationary", - "statiskt", "statist", - "statistc", "statistic", - "statment", "statement", - "stattues", "statutes", - "statuets", "statutes", - "statuser", "stature", - "staurday", "saturday", - "steadliy", "steadily", - "stealhty", "stealthy", - "steathly", "stealthy", - "stelathy", "stealthy", - "sterilze", "sterile", - "steriods", "steroids", - "stichted", "stitched", - "sticthed", "stitched", - "sticthes", "stitches", - "stimulai", "stimuli", - "stimulas", "stimulants", - "stimulat", "stimulants", - "stimulli", "stimuli", - "stingent", "stringent", - "stirkers", "strikers", - "stlakers", "stalkers", - "stomache", "stomach", - "stormade", "stormed", - "stormend", "stormed", - "stradegy", "strategy", - "stragety", "strategy", - "straignt", "straighten", - "straigth", "straight", - "straings", "strains", - "strangel", "strangle", - "stranget", "strangest", - "stratgey", "strategy", - "stratled", "startled", - "streames", "streams", - "streamos", "streams", - "streamus", "streams", - "streamys", "streams", - "stregnth", "strength", - "stremear", "streamer", - "strenght", "strength", - "strengts", "strengths", - "strenous", "strenuous", - "strentgh", "strength", - "stretchs", "stretches", - "striaght", "straight", - "striclty", "strictly", - "striekrs", "strikers", - "strikely", "strikingly", - "stringet", "stringent", - "stubbron", "stubborn", - "stubmled", "stumbled", - "stucture", "structure", - "studioes", "studios", - "stuipder", "stupider", - "stumbeld", "stumbled", - "stupdily", "stupidly", - "stupidiy", "stupidity", - "stylisch", "stylish", - "styrofom", "styrofoam", - "suasages", "sausages", - "subltety", "subtlety", - "submarie", "submarines", - "subruban", "suburban", - "subscrie", "subscriber", - "subsidie", "subsidized", - "subsidiy", "subsidy", - "substace", "substance", - "substans", "substances", - "substite", "substitute", - "subtelty", "subtlety", - "subtetly", "subtlety", - "subtilte", "subtitle", - "subtitel", "subtitle", - "subtitls", "subtitles", - "subtltey", "subtlety", - "succeded", "succeeded", - "succedes", "succeeds", - "succeeed", "succeed", - "succesed", "succeeds", - "successs", "successes", - "succsess", "success", - "suceeded", "succeeded", - "sucesful", "successful", - "sucesion", "succession", - "sucesses", "successes", - "sucessor", "successor", - "sucessot", "successor", - "sucidial", "suicidal", - "suddnely", "suddenly", - "sufficit", "sufficient", - "suggesst", "suggests", - "suggeste", "suggestive", - "summenor", "summoner", - "summones", "summoners", - "sunfiber", "sunfire", - "sunscren", "sunscreen", - "superham", "superhuman", - "superheo", "superhero", - "superios", "superiors", - "supirsed", "suprised", - "suposing", "supposing", - "supporre", "supporters", - "suprised", "surprised", - "suprized", "surprised", - "suprsied", "suprised", - "supsects", "suspects", - "supsense", "suspense", - "surbuban", "suburban", - "surounds", "surrounds", - "surpases", "surpass", - "surpress", "suppress", - "surprize", "surprise", - "surrouns", "surrounds", - "surveill", "surveil", - "surveyer", "surveyor", - "surviver", "survivor", - "suspened", "suspend", - "suspenso", "suspension", - "swaering", "swearing", - "swansoon", "swanson", - "swasitka", "swastika", - "swaskita", "swastika", - "swatiska", "swastika", - "swatsika", "swastika", - "swedisch", "swedish", - "swiftley", "swiftly", - "swithced", "switched", - "swithces", "switches", - "swtiched", "switched", - "swtiches", "switches", - "syarcuse", "syracuse", - "sydnrome", "syndrome", - "sylablle", "syllable", - "syllabel", "syllable", - "symapthy", "sympathy", - "symboles", "symbols", - "symhpony", "symphony", - "symmerty", "symmetry", - "symmtery", "symmetry", - "symoblic", "symbolic", - "symphaty", "sympathy", - "symptoom", "symptom", - "symtpoms", "symptoms", - "synomyns", "synonyms", - "synonmys", "synonyms", - "synonomy", "synonym", - "synoynms", "synonyms", - "synphony", "symphony", - "synposis", "synopsis", - "sypmathy", "sympathy", - "sypmtoms", "symptoms", - "sypnosis", "synopsis", - "syraucse", "syracuse", - "syrcause", "syracuse", - "syringae", "syringe", - "syringue", "syringe", - "sysamdin", "sysadmin", - "sysdamin", "sysadmin", - "tacticas", "tactics", - "tacticts", "tactics", - "tacticus", "tactics", - "tagliate", "tailgate", - "tahnkyou", "thankyou", - "tailsman", "talisman", - "taiwanee", "taiwanese", - "taligate", "tailgate", - "taliored", "tailored", - "tallents", "tallest", - "talsiman", "talisman", - "tanturms", "tantrums", - "tapitude", "aptitude", - "tasliman", "talisman", - "tattooes", "tattoos", - "tattooos", "tattoos", - "taxanomy", "taxonomy", - "teamfigt", "teamfight", - "teamspek", "teamspeak", - "teancity", "tenacity", - "teapsoon", "teaspoon", - "techniqe", "technique", - "teenages", "teenagers", - "telegrah", "telegraph", - "telphony", "telephony", - "tempalrs", "templars", - "tempalte", "template", - "templats", "templates", - "templeos", "temples", - "templers", "temples", - "temporay", "temporary", - "temprary", "temporary", - "tenacles", "tentacles", - "tenactiy", "tenacity", - "tencaity", "tenacity", - "tendancy", "tendency", - "tendence", "tendencies", - "tentacel", "tentacle", - "tentacls", "tentacles", - "tentalce", "tentacle", - "tequilia", "tequila", - "terriory", "territory", - "territoy", "territory", - "terroist", "terrorist", - "tesitcle", "testicle", - "testicel", "testicle", - "testifiy", "testify", - "teusdays", "tuesdays", - "texutres", "textures", - "thaliand", "thailand", - "theather", "theater", - "theathre", "theater", - "theature", "theater", - "theisitc", "theistic", - "themslef", "themself", - "theorits", "theorist", - "theraphy", "therapy", - "thereian", "therein", - "theroies", "theories", - "theroist", "theorist", - "thesitic", "theistic", - "thialand", "thailand", - "thiestic", "theistic", - "thikning", "thinking", - "thirites", "thirties", - "thirstay", "thirsty", - "thnakyou", "thankyou", - "thoeries", "theories", - "thoerist", "theorist", - "thomspon", "thompson", - "thopmson", "thompson", - "thougths", "thoughts", - "thourogh", "thorough", - "threates", "threatens", - "threefor", "therefor", - "thriteen", "thirteen", - "thrities", "thirties", - "throaths", "throats", - "throners", "thrones", - "throough", "thorough", - "throught", "thought", - "thrusday", "thursday", - "thumbnal", "thumbnails", - "thurdsay", "thursday", - "thursdsy", "thursdays", - "tightare", "tighter", - "timestap", "timestamp", - "tirangle", "triangle", - "tirbunal", "tribunal", - "titainum", "titanium", - "titanuim", "titanium", - "tocuhpad", "touchpad", - "togehter", "together", - "togheter", "together", - "toiletts", "toilets", - "tolerabe", "tolerable", - "tommorow", "tomorrow", - "tonguers", "tongues", - "toriodal", "toroidal", - "toritlla", "tortilla", - "tornadoe", "tornado", - "torotise", "tortoise", - "torpedeo", "torpedo", - "torphies", "trophies", - "tortiose", "tortoise", - "toruisty", "touristy", - "toruneys", "tourneys", - "touchapd", "touchpad", - "tounreys", "tourneys", - "tourisim", "tourism", - "touritsy", "touristy", - "tournyes", "tourneys", - "toursits", "tourists", - "toursity", "touristy", - "toxiticy", "toxicity", - "trabajao", "trabajo", - "trabajdo", "trabajo", - "trackres", "trackers", - "trageted", "targeted", - "traingle", "triangle", - "traitour", "traitor", - "trakcers", "trackers", - "traliers", "trailers", - "tranform", "transform", - "transeat", "translates", - "transfom", "transform", - "transfos", "transforms", - "transiet", "transient", - "transito", "transition", - "transpot", "transport", - "trasnfer", "transfer", - "tratiors", "traitors", - "traveles", "travels", - "traveres", "traverse", - "treasurs", "treasures", - "treatmet", "treatments", - "treatsie", "treaties", - "treausre", "treasure", - "tredning", "trending", - "tremelos", "tremolos", - "tresuary", "treasury", - "trialers", "trailers", - "trianers", "trainers", - "triangel", "triangle", - "triangls", "triangles", - "trianing", "training", - "trianlge", "triangle", - "triators", "traitors", - "tribuanl", "tribunal", - "trickyer", "trickery", - "triggern", "triggering", - "trilogoy", "trilogy", - "trinagle", "triangle", - "trinekts", "trinkets", - "tringale", "triangle", - "trinitiy", "trinity", - "triology", "trilogy", - "triumpth", "triumph", - "trohpies", "trophies", - "trollade", "trolled", - "tropcial", "tropical", - "trotilla", "tortilla", - "trpoical", "tropical", - "trubinal", "tribunal", - "trubines", "turbines", - "tsunamai", "tsunami", - "tuesdsay", "tuesdays", - "tunnells", "tunnels", - "turkisch", "turkish", - "turntabe", "turntable", - "turretts", "turrets", - "tusedays", "tuesdays", - "tutorual", "tutorial", - "twilgiht", "twilight", - "tylenool", "tylenol", - "typicaly", "typically", - "tyranies", "tyrannies", - "tyrannia", "tyrannical", - "ublisher", "publisher", - "udnercut", "undercut", - "udnerdog", "underdog", - "ugpraded", "upgraded", - "ugprades", "upgrades", - "ukrainie", "ukraine", - "ukrainin", "ukrainian", - "ukranian", "ukrainian", - "ulitmate", "ultimate", - "ultamite", "ultimate", - "ultiamte", "ultimate", - "ultimely", "ultimately", - "ultrason", "ultrasound", - "umberlla", "umbrella", - "unabnned", "unbanned", - "unbanend", "unbanned", - "uncanney", "uncanny", - "uncannny", "uncanny", - "underbog", "undergo", - "underglo", "undergo", - "undersog", "undergo", - "undertoe", "undertones", - "underwar", "underwater", - "unfailry", "unfairly", - "unfarily", "unfairly", - "ungodley", "ungodly", - "unhapppy", "unhappy", - "unhealty", "unhealthy", - "unicrons", "unicorns", - "unifroms", "uniforms", - "uniquley", "uniquely", - "univeral", "universal", - "unlikley", "unlikely", - "unlockes", "unlocks", - "unluckly", "unlucky", - "unpoened", "unopened", - "unqiuely", "uniquely", - "unrakned", "unranked", - "unrnaked", "unranked", - "unrpoven", "unproven", - "unsuable", "unusable", - "untraind", "untrained", - "unusualy", "unusually", - "unvierse", "universe", - "unworhty", "unworthy", - "upgarded", "upgraded", - "upgardes", "upgrades", - "uploades", "uploads", - "upstaris", "upstairs", - "upstiars", "upstairs", - "urethrea", "urethra", - "uruguary", "uruguay", - "ususally", "usually", - "utilitiy", "utility", - "utlimate", "ultimate", - "vaccinae", "vaccinated", - "vaccinet", "vaccinated", - "vacinity", "vicinity", - "vaguelly", "vaguely", - "vaiation", "aviation", - "vaieties", "varieties", - "vailidty", "validity", - "vairable", "variable", - "vaklyrie", "valkyrie", - "valenica", "valencia", - "valentie", "valentines", - "valentis", "valentines", - "validade", "validated", - "valkirye", "valkyrie", - "valkiyre", "valkyrie", - "valkriye", "valkyrie", - "valkryie", "valkyrie", - "valkyire", "valkyrie", - "valnecia", "valencia", - "valubale", "valuable", - "valykrie", "valkyrie", - "vamipres", "vampires", - "vampiers", "vampires", - "vampries", "vampires", - "vangurad", "vanguard", - "vanillia", "vanilla", - "vanillla", "vanilla", - "vanugard", "vanguard", - "varaible", "variable", - "varaints", "variants", - "variabel", "variable", - "varibale", "variable", - "varities", "varieties", - "vassales", "vassals", - "vassalls", "vassals", - "vassalos", "vassals", - "vaticaan", "vatican", - "vaticina", "vatican", - "vaulable", "valuable", - "vaylkrie", "valkyrie", - "vechiles", "vehicles", - "vectores", "vectors", - "vegansim", "veganism", - "vegtable", "vegetable", - "vehciles", "vehicles", - "vehicels", "vehicles", - "vehicule", "vehicle", - "veichles", "vehicles", - "venelope", "envelope", - "venemous", "venomous", - "vengance", "vengeance", - "vengence", "vengeance", - "verablly", "verbally", - "verbaitm", "verbatim", - "verisons", "versions", - "versatel", "versatile", - "vertabim", "verbatim", - "vertigro", "vertigo", - "vesseles", "vessels", - "vessells", "vessels", - "viabiliy", "viability", - "viatmins", "vitamins", - "vibratie", "vibrate", - "vibratin", "vibration", - "vicintiy", "vicinity", - "vicseral", "visceral", - "victimas", "victims", - "victimes", "victims", - "victorin", "victorian", - "victoris", "victories", - "vieweres", "viewers", - "viewpoit", "viewpoints", - "vigilane", "vigilante", - "vigliant", "vigilant", - "vikingos", "vikings", - "viligant", "vigilant", - "villegas", "villages", - "vindicte", "vindictive", - "vinicity", "vicinity", - "violatin", "violation", - "violenty", "violently", - "violetas", "violates", - "virament", "vraiment", - "virbator", "vibrator", - "virginas", "virgins", - "virgines", "virgins", - "virgings", "virgins", - "virginis", "virgins", - "virginus", "virgins", - "virtualy", "virtually", - "virtuels", "virtues", - "virtuose", "virtues", - "viscreal", "visceral", - "visercal", "visceral", - "visibily", "visibility", - "visibley", "visibly", - "visiblly", "visibly", - "vitailty", "vitality", - "vitimans", "vitamins", - "vitmains", "vitamins", - "vitories", "victories", - "voicemal", "voicemail", - "voilates", "violates", - "volatily", "volatility", - "volcando", "volcano", - "volcanoe", "volcano", - "volcaron", "volcano", - "vriament", "vraiment", - "wahtever", "whatever", - "wallpapr", "wallpapers", - "warantee", "warranty", - "warcarft", "warcraft", - "warrante", "warranties", - "warriros", "warriors", - "watchemn", "watchmen", - "watchign", "watching", - "wathcing", "watching", - "wathcmen", "watchmen", - "wathever", "whatever", - "watkings", "watkins", - "wealthly", "wealthy", - "webistes", "websites", - "websties", "websites", - "wednesdy", "wednesdays", - "weigthed", "weighted", - "weridest", "weirdest", - "werstler", "wrestler", - "wesbites", "websites", - "westbrok", "westbrook", - "westerse", "westerners", - "wherease", "whereas", - "whipsers", "whispers", - "whislist", "wishlist", - "whisltes", "whistles", - "whisperd", "whispered", - "whistels", "whistles", - "whitsles", "whistles", - "whsipers", "whispers", - "widgetas", "widgets", - "wieghted", "weighted", - "willaims", "williams", - "willfuly", "willfully", - "willimas", "williams", - "windsoar", "windsor", - "wininpeg", "winnipeg", - "winnigns", "winnings", - "winnpieg", "winnipeg", - "wiredest", "weirdest", - "wishlsit", "wishlist", - "wishpers", "whispers", - "withdral", "withdrawal", - "witnesss", "witnesses", - "wonderes", "wonders", - "wonderus", "wonders", - "workfore", "workforce", - "wouldnot", "wouldnt", - "wranlger", "wrangler", - "wreckign", "wrecking", - "wrecthed", "wretched", - "wrekcing", "wrecking", - "wreslter", "wrestler", - "wresters", "wrestlers", - "writting", "writing", - "wrnagler", "wrangler", - "wrteched", "wretched", - "yeilding", "yielding", - "yoesmite", "yosemite", - "yorksher", "yorkshire", - "yorkshie", "yorkshire", - "yosemeti", "yosemite", - "yosimete", "yosemite", - "zealotes", "zealots", - "zealoths", "zealots", - "zealotus", "zealots", - "zealouts", "zealous", - "zepplein", "zeppelin", - "zepplien", "zeppelin", - "zimbabew", "zimbabwe", - "zimbawbe", "zimbabwe", - "zinoists", "zionists", - "zionisim", "zionism", - "zionistm", "zionism", - "zionsits", "zionists", - "zoinists", "zionists", - "abiltiy", "ability", - "abodmen", "abdomen", - "abondon", "abandon", - "aboslve", "absolve", - "abosrbs", "absorbs", - "abriter", "arbiter", - "abrupty", "abruptly", - "absense", "absence", - "absolue", "absolute", - "absovle", "absolve", - "absrobs", "absorbs", - "absuers", "abusers", - "absurdy", "absurdly", - "absymal", "abysmal", - "abymsal", "abysmal", - "acadamy", "academy", - "acadmic", "academic", - "accesss", "access", - "accpets", "accepts", - "accross", "across", - "accuray", "accuracy", - "acheive", "achieve", - "achived", "achieved", - "acident", "accident", - "ackward", "awkward", - "acrlyic", "acrylic", - "actauly", "actualy", - "activit", "activist", - "activly", "actively", - "actualy", "actually", - "actulay", "actualy", - "acuracy", "accuracy", - "acusing", "causing", - "acustom", "accustom", - "acutaly", "actualy", - "acyrlic", "acrylic", - "adaptes", "adapters", - "adatper", "adapter", - "adbomen", "abdomen", - "addcits", "addicts", - "adderss", "address", - "addtion", "addition", - "adequet", "adequate", - "adequit", "adequate", - "adivser", "adviser", - "adivsor", "advisor", - "admited", "admitted", - "admrial", "admiral", - "adpater", "adapter", - "adquire", "acquire", - "adultey", "adultery", - "adverst", "adverts", - "adviced", "advised", - "advocay", "advocacy", - "advsior", "advisor", - "aeriels", "aerials", - "affaris", "affairs", - "affiars", "affairs", - "afircan", "african", - "africas", "africans", - "afwully", "awfully", - "againts", "against", - "agaisnt", "against", - "aganist", "against", - "aggreed", "agreed", - "agianst", "against", - "agreing", "agreeing", - "agruing", "arguing", - "ahtiest", "athiest", - "aicraft", "aircraft", - "ailmony", "alimony", - "airbore", "airborne", - "aircaft", "aircraft", - "airlfow", "airflow", - "airosft", "airsoft", - "airpost", "airports", - "airsfot", "airsoft", - "airzona", "arizona", - "alchmey", "alchemy", - "alchool", "alcohol", - "alcohal", "alcohol", - "aledged", "alleged", - "aledges", "alleges", - "alegbra", "algebra", - "algerba", "algebra", - "alienet", "alienate", - "alledge", "allege", - "allegry", "allergy", - "alltime", "all-time", - "almighy", "almighty", - "alochol", "alcohol", - "alotted", "allotted", - "alowing", "allowing", - "alphabt", "alphabet", - "alreayd", "already", - "alrighy", "alrighty", - "altanta", "atlanta", - "alteast", "atleast", - "altough", "although", - "alusion", "allusion", - "amateus", "amateurs", - "amatuer", "amateur", - "amature", "armature", - "amensia", "amnesia", - "amensty", "amnesty", - "amercia", "america", - "americs", "americas", - "ammount", "amount", - "ammused", "amused", - "amneisa", "amnesia", - "amnsety", "amnesty", - "amognst", "amongst", - "amongts", "amongst", - "amonsgt", "amongst", - "ampilfy", "amplify", - "amrpits", "armpits", - "analoge", "analogue", - "analsyt", "analyst", - "analyes", "analyse", - "analyts", "analyst", - "analzye", "analyze", - "anaylse", "analyse", - "anaylst", "analyst", - "anaylze", "analyze", - "anceint", "ancient", - "andorid", "android", - "andriod", "android", - "androis", "androids", - "angirly", "angrily", - "angluar", "angular", - "angualr", "angular", - "anicent", "ancient", - "anitque", "antique", - "anixety", "anxiety", - "anmesia", "amnesia", - "anmesty", "amnesty", - "annoint", "anoint", - "annualy", "annually", - "annuled", "annulled", - "anohter", "another", - "anomoly", "anomaly", - "answerd", "answered", - "anuglar", "angular", - "anulled", "annulled", - "anwsers", "answers", - "anwyays", "anyways", - "anxeity", "anxiety", - "anyoens", "anyones", - "anyonse", "anyones", - "anywyas", "anyways", - "aparent", "apparent", - "appeard", "appeared", - "appluad", "applaud", - "aproval", "approval", - "apsects", "aspects", - "apshalt", "asphalt", - "apsirin", "aspirin", - "aqcuire", "acquire", - "aquarim", "aquarium", - "aquired", "acquired", - "aranged", "arranged", - "arbitre", "arbiter", - "arcahic", "archaic", - "archiac", "archaic", - "arcylic", "acrylic", - "aresnal", "arsenal", - "aretmis", "artemis", - "argubly", "arguably", - "aribter", "arbiter", - "ariflow", "airflow", - "arisoft", "airsoft", - "aritsts", "artists", - "armchar", "armchair", - "arogant", "arrogant", - "arogent", "arrogant", - "arresst", "arrests", - "arround", "around", - "arsneal", "arsenal", - "artcile", "article", - "artical", "article", - "articel", "article", - "artistc", "artistic", - "artmeis", "artemis", - "artsits", "artists", - "aruging", "arguing", - "aseuxal", "asexual", - "asexaul", "asexual", - "ashpalt", "asphalt", - "asiprin", "aspirin", - "asissts", "assists", - "asnwers", "answers", - "asorbed", "absorbed", - "aspahlt", "asphalt", - "asphlat", "asphalt", - "aspriin", "aspirin", - "assagne", "assange", - "assasin", "assassin", - "assembe", "assemble", - "assemby", "assembly", - "assisst", "assists", - "assnage", "assange", - "asssits", "assists", - "assualt", "assault", - "asterik", "asterisk", - "asutria", "austria", - "atcualy", "actualy", - "atelast", "atleast", - "athesim", "atheism", - "athiesm", "atheism", - "athiest", "atheist", - "athiets", "athiest", - "athlets", "athletes", - "atlantc", "atlantic", - "atleats", "atleast", - "atlesat", "atleast", - "atorney", "attorney", - "atremis", "artemis", - "attemps", "attempts", - "attemts", "attempts", - "attened", "attended", - "attracs", "attracts", - "audbile", "audible", - "audibel", "audible", - "austira", "austria", - "austrai", "austria", - "autistc", "autistic", - "avation", "aviation", - "avtaars", "avatars", - "awakend", "awakened", - "bablyon", "babylon", - "backdor", "backdoor", - "backsta", "backseat", - "baclony", "balcony", - "badnits", "bandits", - "baiscly", "basicly", - "bakcers", "backers", - "balanse", "balances", - "balcked", "blacked", - "banhsee", "banshee", - "bankgok", "bangkok", - "baoynet", "bayonet", - "baptims", "baptism", - "baptsim", "baptism", - "baragin", "bargain", - "bargani", "bargain", - "bargian", "bargain", - "bariner", "brainer", - "barlkey", "barkley", - "barracs", "barracks", - "barrles", "barrels", - "barsita", "barista", - "barvery", "bravery", - "bascily", "basicly", - "basicly", "basically", - "basilcy", "basicly", - "basiton", "bastion", - "basnhee", "banshee", - "bastane", "bastante", - "bastars", "bastards", - "bastino", "bastion", - "bathrom", "bathroom", - "batitsa", "batista", - "batsita", "batista", - "bayblon", "babylon", - "baynoet", "bayonet", - "bayoent", "bayonet", - "bceuase", "becuase", - "beacuse", "because", - "bealtes", "beatles", - "beaslty", "beastly", - "beatels", "beatles", - "beaucop", "beaucoup", - "becamae", "became", - "becames", "becomes", - "becasue", "because", - "becouse", "because", - "becuaes", "becuase", - "becuase", "because", - "becusae", "becuase", - "befried", "befriend", - "beggins", "begins", - "beglian", "belgian", - "beglium", "belgium", - "begnals", "bengals", - "bejiing", "beijing", - "beleifs", "beliefs", - "beleive", "believe", - "belgain", "belgian", - "belguim", "belgium", - "believr", "believer", - "believs", "believes", - "belifes", "beliefs", - "beligan", "belgian", - "beligum", "belgium", - "belived", "believed", - "belives", "believes", - "benagls", "bengals", - "benedit", "benedict", - "benghai", "benghazi", - "benglas", "bengals", - "benifit", "benefit", - "beoynce", "beyonce", - "beraded", "bearded", - "bersekr", "berserk", - "beseige", "besiege", - "betales", "beatles", - "bethesa", "bethesda", - "betrayd", "betrayed", - "beucase", "becuase", - "bewteen", "between", - "bicthes", "bitches", - "bidrman", "birdman", - "biejing", "beijing", - "bifgoot", "bigfoot", - "bigorty", "bigotry", - "bigtoed", "bigoted", - "bigtory", "bigotry", - "biogted", "bigoted", - "biogtry", "bigotry", - "bioplar", "bipolar", - "biploar", "bipolar", - "birdamn", "birdman", - "birdges", "bridges", - "birgade", "brigade", - "bitcion", "bitcoin", - "bithced", "bitched", - "bithces", "bitches", - "bitocin", "bitcoin", - "bizzare", "bizarre", - "blacony", "balcony", - "blaimed", "blamed", - "blankes", "blankets", - "blegian", "belgian", - "blegium", "belgium", - "blizzad", "blizzard", - "blockes", "blockers", - "bloster", "bolster", - "blulets", "bullets", - "bobmers", "bombers", - "bollocs", "bollocks", - "bondary", "boundary", - "bonnano", "bonanno", - "bonsues", "bonuses", - "boraden", "broaden", - "borader", "broader", - "boradly", "broadly", - "bordeom", "boredom", - "boslter", "bolster", - "boudler", "boulder", - "boundry", "boundary", - "bounses", "bonuses", - "boutiqe", "boutique", - "bouyant", "buoyant", - "braevry", "bravery", - "braista", "barista", - "brakley", "barkley", - "branier", "brainer", - "braoden", "broaden", - "braoder", "broader", - "braodly", "broadly", - "brednan", "brendan", - "breifly", "briefly", - "breserk", "berserk", - "brethen", "brethren", - "brewrey", "brewery", - "briagde", "brigade", - "brianer", "brainer", - "bridman", "birdman", - "brielfy", "briefly", - "brigdes", "bridges", - "brightn", "brighten", - "brisben", "brisbane", - "britian", "britain", - "britsol", "bristol", - "briused", "bruised", - "briuser", "bruiser", - "briuses", "bruises", - "brocoli", "broccoli", - "bronocs", "broncos", - "browine", "brownie", - "brownei", "brownie", - "brownis", "brownies", - "bruglar", "burglar", - "brunete", "brunette", - "bruning", "burning", - "brusied", "bruised", - "brusies", "bruises", - "brusses", "brussels", - "brutaly", "brutally", - "btiched", "bitched", - "btiches", "bitches", - "bubbels", "bubbles", - "buddhim", "buddhism", - "buddhit", "buddhist", - "buddist", "buddhist", - "budgest", "budgets", - "bugdets", "budgets", - "buildes", "builders", - "bulgara", "bulgaria", - "bullest", "bullets", - "buoancy", "buoyancy", - "burguny", "burgundy", - "buriser", "bruiser", - "burlgar", "burglar", - "burnign", "burning", - "burried", "buried", - "burrtio", "burrito", - "busines", "business", - "busness", "business", - "butthoe", "butthole", - "buttrey", "buttery", - "cababge", "cabbage", - "cabines", "cabinets", - "cabniet", "cabinet", - "caclium", "calcium", - "cacuses", "caucuses", - "caffeen", "caffeine", - "cahched", "cached", - "cahotic", "chaotic", - "cahsier", "cashier", - "cailbre", "calibre", - "calaber", "caliber", - "calagry", "calgary", - "calback", "callback", - "calbire", "calibre", - "calcuim", "calcium", - "calculs", "calculus", - "calicum", "calcium", - "calrify", "clarify", - "calrity", "clarity", - "caluses", "clauses", - "camboda", "cambodia", - "campain", "campaign", - "campuss", "campuses", - "cancles", "cancels", - "cancres", "cancers", - "cancuks", "canucks", - "canides", "candies", - "cannnot", "cannot", - "canrage", "carnage", - "capible", "capable", - "capitas", "capitals", - "capsuls", "capsules", - "captais", "captains", - "captial", "capital", - "captiol", "capitol", - "captued", "captured", - "capturd", "captured", - "capusle", "capsule", - "carange", "carnage", - "carbien", "carbine", - "cardaic", "cardiac", - "cardina", "cardigan", - "careing", "caring", - "caridac", "cardiac", - "carmtan", "cartman", - "carnege", "carnage", - "carnige", "carnage", - "carolan", "carolina", - "carreer", "career", - "carrers", "careers", - "cartles", "cartels", - "caryons", "crayons", - "casette", "cassette", - "casheir", "cashier", - "cashies", "cashiers", - "cashire", "cashier", - "casltes", "castles", - "caspule", "capsule", - "cassete", "cassette", - "castels", "castles", - "casuing", "causing", - "cathlic", "catholic", - "cauncks", "canucks", - "cavarly", "cavalry", - "cavlary", "cavalry", - "celcius", "celsius", - "celisus", "celsius", - "celitcs", "celtics", - "celsuis", "celsius", - "centruy", "century", - "centuty", "century", - "ceratin", "certain", - "cermaic", "ceramic", - "certian", "certain", - "cervial", "cervical", - "cesspol", "cesspool", - "cetlics", "celtics", - "chambre", "chamber", - "charcol", "charcoal", - "charisa", "charisma", - "chasiss", "chassis", - "chatoic", "chaotic", - "cheeots", "cheetos", - "cheesse", "cheeses", - "chekcer", "checker", - "chelsae", "chelsea", - "cheslea", "chelsea", - "chiense", "chinese", - "childen", "children", - "chimeny", "chimney", - "chinees", "chinese", - "chinmey", "chimney", - "chipest", "chipset", - "chispet", "chipset", - "chivaly", "chivalry", - "chlesea", "chelsea", - "choatic", "chaotic", - "chocies", "choices", - "choosen", "chosen", - "chtulhu", "cthulhu", - "churchs", "churches", - "cilanto", "cilantro", - "cilents", "clients", - "circels", "circles", - "circuis", "circuits", - "cirlces", "circles", - "clacium", "calcium", - "claerer", "clearer", - "claerly", "clearly", - "clagary", "calgary", - "claibre", "calibre", - "claimes", "claims", - "clairfy", "clarify", - "clairty", "clarity", - "clanand", "clannad", - "clarfiy", "clarify", - "classis", "classics", - "clasues", "clauses", - "claymer", "claymore", - "claymoe", "claymore", - "cleanes", "cleanse", - "cleasne", "cleanse", - "cleints", "clients", - "clenase", "cleanse", - "clesius", "celsius", - "cletics", "celtics", - "clevery", "cleverly", - "climats", "climates", - "climbes", "climbers", - "clincis", "clinics", - "clitors", "clitoris", - "cloesly", "closely", - "closley", "closely", - "cluases", "clauses", - "cluprit", "culprit", - "coalese", "coalesce", - "coctail", "cocktail", - "cohesie", "cohesive", - "colgone", "cologne", - "collape", "collapse", - "collest", "collects", - "collony", "colony", - "collumn", "column", - "cologen", "cologne", - "colomba", "colombia", - "colonge", "cologne", - "colorao", "colorado", - "colourd", "coloured", - "columsn", "columns", - "comando", "commando", - "comapny", "company", - "comapre", "compare", - "comarde", "comrade", - "comback", "comeback", - "combins", "combines", - "comdeic", "comedic", - "comited", "committed", - "commano", "commando", - "commans", "commands", - "commere", "commerce", - "comming", "coming", - "commitd", "commited", - "compase", "compares", - "compede", "competed", - "compilr", "compiler", - "compnay", "company", - "compots", "compost", - "comrads", "comrades", - "comtpon", "compton", - "conceed", "concede", - "conceps", "concepts", - "conclue", "conclude", - "concret", "concert", - "condenm", "condemn", - "condiut", "conduit", - "condmen", "condemn", - "confids", "confides", - "confins", "confines", - "confise", "confines", - "conflit", "conflict", - "conived", "connived", - "connecs", "connects", - "conqeur", "conquer", - "conqure", "conquer", - "consept", "concept", - "consern", "concern", - "consums", "consumes", - "contacs", "contacts", - "contais", "contains", - "contast", "contacts", - "contemt", "contempt", - "contens", "contents", - "contess", "contests", - "contian", "contain", - "contine", "continue", - "convers", "converts", - "conveyd", "conveyed", - "convine", "convince", - "coprses", "corpses", - "coputer", "computer", - "corasir", "corsair", - "coratia", "croatia", - "coridal", "cordial", - "corsari", "corsair", - "corsiar", "corsair", - "corspes", "corpses", - "corwbar", "crowbar", - "costums", "costumes", - "coudlnt", "couldnt", - "coulmns", "columns", - "coulndt", "couldnt", - "counsle", "counsel", - "countes", "counters", - "courtey", "courtesy", - "covenat", "covenant", - "coytoes", "coyotes", - "crabine", "carbine", - "cralwed", "crawled", - "craotia", "croatia", - "craweld", "crawled", - "creamic", "ceramic", - "createn", "creatine", - "creater", "creature", - "creatie", "creatine", - "creatue", "creature", - "creepes", "creepers", - "creepig", "creeping", - "creulty", "cruelty", - "cricles", "circles", - "critera", "criteria", - "cropses", "corpses", - "crosair", "corsair", - "crpytic", "cryptic", - "crsytal", "crystal", - "crtical", "critical", - "crucibe", "crucible", - "cruetly", "cruelty", - "cruical", "crucial", - "crulety", "cruelty", - "crusdae", "crusade", - "crusier", "cruiser", - "crusies", "cruises", - "crusive", "cursive", - "crutchs", "crutches", - "crypitc", "cryptic", - "crystas", "crystals", - "crystsl", "crystals", - "crytpic", "cryptic", - "crytsal", "crystal", - "cthluhu", "cthulhu", - "cthuhlu", "cthulhu", - "cthuluh", "cthulhu", - "ctuhlhu", "cthulhu", - "cuasing", "causing", - "cubcile", "cubicle", - "cubilce", "cubicle", - "cuddels", "cuddles", - "culrpit", "culprit", - "culturs", "cultures", - "cupboad", "cupboard", - "cuplrit", "culprit", - "curatin", "curtain", - "curcial", "crucial", - "curcuit", "circuit", - "curelty", "cruelty", - "curiser", "cruiser", - "curisve", "cursive", - "currate", "curate", - "currens", "currents", - "curreny", "currency", - "currest", "currents", - "cursade", "crusade", - "curtian", "curtain", - "cyandie", "cyanide", - "cyclits", "cyclist", - "cycloen", "cyclone", - "cycolps", "cyclops", - "cylcist", "cyclist", - "cylcone", "cyclone", - "cylcops", "cyclops", - "cynaide", "cyanide", - "cyrptic", "cryptic", - "cyrstal", "crystal", - "dagners", "dangers", - "daimond", "diamond", - "damenor", "demeanor", - "dammage", "damage", - "darcula", "dracula", - "dargons", "dragons", - "darkets", "darkest", - "datbase", "database", - "daulity", "duality", - "dawrves", "dwarves", - "ddogers", "dodgers", - "ddoging", "dodging", - "deadlit", "deadlift", - "deadpol", "deadpool", - "deafult", "default", - "deahtly", "deathly", - "deatils", "details", - "deatlhy", "deathly", - "decalre", "declare", - "decison", "decision", - "declars", "declares", - "declase", "declares", - "decress", "decrees", - "decribe", "describe", - "decsend", "descend", - "dectect", "detect", - "defaint", "defiant", - "defauls", "defaults", - "defelct", "deflect", - "defensd", "defends", - "deffine", "define", - "definat", "defiant", - "definet", "definite", - "definie", "definite", - "definig", "defining", - "definit", "definite", - "defualt", "default", - "degarde", "degrade", - "degrase", "degrasse", - "degrate", "degrade", - "deiners", "deniers", - "deisgns", "designs", - "deivant", "deviant", - "dekstop", "desktop", - "delcare", "declare", - "delfect", "deflect", - "demenor", "demeanor", - "dementa", "dementia", - "demsond", "desmond", - "deneirs", "deniers", - "denisty", "density", - "densley", "densely", - "depcits", "depicts", - "dependd", "depended", - "depitcs", "depicts", - "deployd", "deployed", - "depsise", "despise", - "descrie", "describe", - "descuss", "discuss", - "desgins", "designs", - "desings", "designs", - "desitny", "destiny", - "desnely", "densely", - "desnity", "density", - "desomnd", "desmond", - "despict", "depict", - "despide", "despised", - "despies", "despise", - "destkop", "desktop", - "destory", "destroy", - "destros", "destroys", - "detaild", "detailed", - "detials", "details", - "detorit", "detroit", - "detriot", "detroit", - "deuling", "dueling", - "devaint", "deviant", - "devaite", "deviate", - "devided", "divided", - "devlove", "devolve", - "devotin", "devotion", - "devovle", "devolve", - "diabets", "diabetes", - "dialecs", "dialects", - "dialoge", "dialogue", - "diamons", "diamonds", - "diasble", "disable", - "dicksih", "dickish", - "dicover", "discover", - "dictats", "dictates", - "dieties", "deities", - "dilpoma", "diploma", - "dimaond", "diamond", - "dingity", "dignity", - "dinosar", "dinosaur", - "diosese", "diocese", - "dipolma", "diploma", - "dirbble", "dribble", - "directy", "directly", - "diretcx", "directx", - "dirived", "derived", - "dirvers", "drivers", - "disbale", "disable", - "disguss", "disgusts", - "disliks", "dislikes", - "disover", "discover", - "dispair", "despair", - "dispath", "dispatch", - "dispite", "despite", - "dispuse", "disputes", - "disputs", "disputes", - "dissole", "dissolve", - "distase", "distaste", - "distint", "distinct", - "divison", "division", - "docuhes", "douches", - "docuhey", "douchey", - "dogders", "dodgers", - "dogding", "dodging", - "dolhpin", "dolphin", - "dolphis", "dolphins", - "dominae", "dominate", - "dominno", "dominion", - "doplhin", "dolphin", - "dortmud", "dortmund", - "draclua", "dracula", - "dracual", "dracula", - "drakest", "darkest", - "dramtic", "dramatic", - "dribbel", "dribble", - "driectx", "directx", - "driftig", "drifting", - "drinkes", "drinkers", - "druming", "drumming", - "duailty", "duality", - "dualtiy", "duality", - "dubsetp", "dubstep", - "dulaity", "duality", - "duleing", "dueling", - "dunegon", "dungeon", - "dungeos", "dungeons", - "dungoen", "dungeon", - "durring", "during", - "dusbtep", "dubstep", - "dyansty", "dynasty", - "dynamis", "dynamics", - "dynsaty", "dynasty", - "earlies", "earliest", - "earliet", "earliest", - "earplus", "earplugs", - "eastwod", "eastwood", - "ebcuase", "becuase", - "ecilpse", "eclipse", - "eclipes", "eclipse", - "eclispe", "eclipse", - "eclpise", "eclipse", - "ectsasy", "ecstasy", - "edbiles", "edibles", - "edibels", "edibles", - "effords", "efforts", - "ehtanol", "ethanol", - "eifnach", "einfach", - "eighten", "eighteen", - "einfahc", "einfach", - "elasped", "elapsed", - "elcipse", "eclipse", - "elction", "election", - "elecrto", "electro", - "electic", "electric", - "electon", "election", - "ellitot", "elliott", - "elloitt", "elliott", - "elphant", "elephant", - "emabrgo", "embargo", - "emabssy", "embassy", - "emapthy", "empathy", - "embeded", "embedded", - "embrago", "embargo", - "eminate", "emanate", - "emipres", "empires", - "emision", "emission", - "emiting", "emitting", - "emition", "emission", - "emmited", "emitted", - "empahty", "empathy", - "emphsis", "emphasis", - "empiers", "empires", - "empited", "emptied", - "emplore", "employer", - "emporer", "emperor", - "empries", "empires", - "emtpied", "emptied", - "enameld", "enameled", - "encahnt", "enchant", - "encalve", "enclave", - "encrpyt", "encrypt", - "encyrpt", "encrypt", - "endores", "endorse", - "endrose", "endorse", - "energis", "energies", - "enforse", "enforces", - "enginer", "engineer", - "englsih", "english", - "enhanse", "enhances", - "enlcave", "enclave", - "enlgish", "english", - "enlsave", "enslave", - "ensalve", "enslave", - "entbook", "netbook", - "entirey", "entirety", - "entorpy", "entropy", - "epiloge", "epilogue", - "episdoe", "episode", - "epsiode", "episode", - "epsorts", "esports", - "eptiome", "epitome", - "equiped", "equipped", - "erested", "arrested", - "escapse", "escapes", - "escpaes", "escapes", - "esctasy", "ecstasy", - "esporst", "esports", - "espreso", "espresso", - "esprots", "esports", - "essense", "essence", - "etherel", "ethereal", - "ethnaol", "ethanol", - "euphora", "euphoria", - "europen", "european", - "eurpean", "european", - "everets", "everest", - "everset", "everest", - "evloved", "evolved", - "evloves", "evolves", - "evovled", "evolved", - "evovles", "evolves", - "exaclty", "exactly", - "exahust", "exhaust", - "examind", "examined", - "exapnds", "expands", - "exatled", "exalted", - "excange", "exchange", - "excatly", "exactly", - "excells", "excels", - "exceprt", "excerpt", - "excluse", "excludes", - "excrept", "excerpt", - "exculde", "exclude", - "exelent", "excellent", - "exemple", "example", - "exerpts", "excerpts", - "exhasut", "exhaust", - "exhuast", "exhaust", - "exising", "existing", - "existet", "existent", - "exlated", "exalted", - "exlcude", "exclude", - "exliled", "exiled", - "exludes", "excludes", - "exmaple", "example", - "exoitcs", "exotics", - "expalin", "explain", - "expeced", "expected", - "expells", "expels", - "expiers", "expires", - "explict", "explicit", - "expliot", "exploit", - "explods", "explodes", - "explose", "explodes", - "expolde", "explode", - "expolit", "exploit", - "exposse", "exposes", - "expries", "expires", - "exsited", "existed", - "extered", "exerted", - "exterme", "extreme", - "extoics", "exotics", - "extreem", "extreme", - "extrems", "extremes", - "eyebals", "eyeballs", - "eyebros", "eyebrows", - "fabulos", "fabulous", - "facebok", "facebook", - "facepam", "facepalm", - "faclons", "falcons", - "facsism", "fascism", - "facsist", "fascist", - "failurs", "failures", - "faincee", "fiancee", - "falesly", "falsely", - "falired", "flaired", - "falshed", "flashed", - "falshes", "flashes", - "falsley", "falsely", - "falvors", "flavors", - "familes", "families", - "famoust", "famous", - "famousy", "famously", - "fanatsy", "fantasy", - "fantaic", "fanatic", - "faoming", "foaming", - "fascits", "fascist", - "fasicsm", "fascism", - "fasicst", "fascist", - "faslely", "falsely", - "fatiuge", "fatigue", - "febuary", "february", - "fecthed", "fetched", - "fecthes", "fetches", - "feminen", "feminine", - "feminie", "feminine", - "feminim", "feminism", - "feodras", "fedoras", - "fertily", "fertility", - "fesitve", "festive", - "fethced", "fetched", - "fethces", "fetches", - "fetishs", "fetishes", - "fianite", "finite", - "fianlly", "finally", - "fiercly", "fiercely", - "filcker", "flicker", - "filpped", "flipped", - "filterd", "filtered", - "finacee", "fiancee", - "fineses", "finesse", - "fininsh", "finnish", - "finishs", "finishes", - "finisse", "finishes", - "finnsih", "finnish", - "firends", "friends", - "firggin", "friggin", - "firsbee", "frisbee", - "firslty", "firstly", - "firtsly", "firstly", - "fitlers", "filters", - "flacons", "falcons", - "flahsed", "flashed", - "flahses", "flashes", - "flaried", "flaired", - "flasely", "falsely", - "flashig", "flashing", - "flavord", "flavored", - "flavous", "flavours", - "flawess", "flawless", - "flciker", "flicker", - "fliters", "filters", - "flordia", "florida", - "florene", "florence", - "fnaatic", "fanatic", - "fomaing", "foaming", - "fonetic", "phonetic", - "forefit", "forfeit", - "foregin", "foreign", - "foreing", "foreign", - "forfiet", "forfeit", - "forhead", "forehead", - "foriegn", "foreign", - "formaly", "formally", - "formery", "formerly", - "formost", "foremost", - "formual", "formula", - "formuls", "formulas", - "forrset", "forrest", - "forsakn", "forsaken", - "forsane", "forsaken", - "forumla", "formula", - "fountan", "fountain", - "fourten", "fourteen", - "fracter", "fracture", - "fragmet", "fragment", - "freedos", "freedoms", - "freinds", "friends", - "frigign", "friggin", - "fristly", "firstly", - "frostig", "frosting", - "frsibee", "frisbee", - "fruitin", "fruition", - "fullets", "fullest", - "fullset", "fullest", - "funides", "fundies", - "funtion", "function", - "furance", "furnace", - "furncae", "furnace", - "futhroc", "futhark", - "gadgest", "gadgets", - "gagdets", "gadgets", - "galatic", "galactic", - "galcier", "glacier", - "galsgow", "glasgow", - "gameply", "gameplay", - "gamerga", "gamertag", - "gankign", "ganking", - "ganster", "gangster", - "garabge", "garbage", - "garfied", "garfield", - "garnola", "granola", - "generas", "generals", - "genersl", "generals", - "geniuss", "geniuses", - "geogria", "georgia", - "geomety", "geometry", - "georiga", "georgia", - "gernade", "grenade", - "gerogia", "georgia", - "gigabye", "gigabyte", - "giltchy", "glitchy", - "gimmics", "gimmicks", - "gimmicy", "gimmicky", - "girzzly", "grizzly", - "glagsow", "glasgow", - "glaicer", "glacier", - "glicthy", "glitchy", - "glimpes", "glimpse", - "glimspe", "glimpse", - "glipmse", "glimpse", - "glitchd", "glitched", - "glitchs", "glitches", - "glithcy", "glitchy", - "globaly", "globally", - "gloiath", "goliath", - "glorios", "glorious", - "gltichy", "glitchy", - "gnaking", "ganking", - "gnawwed", "gnawed", - "goddanm", "goddamn", - "goddman", "goddamn", - "godliek", "godlike", - "godlman", "goldman", - "godsped", "godspeed", - "goergia", "georgia", - "goilath", "goliath", - "golaith", "goliath", - "golbins", "goblins", - "goldamn", "goldman", - "goldbeg", "goldberg", - "goldike", "godlike", - "golitah", "goliath", - "goodluk", "goodluck", - "gorumet", "gourmet", - "gosepls", "gospels", - "gosples", "gospels", - "gpysies", "gypsies", - "grabage", "garbage", - "grahpic", "graphic", - "grainte", "granite", - "grammer", "grammar", - "graniet", "granite", - "grantie", "granite", - "graphie", "graphite", - "graphis", "graphics", - "grappel", "grapple", - "greande", "grenade", - "grenads", "grenades", - "greneer", "greener", - "griaffe", "giraffe", - "gridles", "griddles", - "grillig", "grilling", - "grpahic", "graphic", - "guardin", "guardian", - "guiness", "guinness", - "gullibe", "gullible", - "gutiars", "guitars", - "gypises", "gypsies", - "gyspies", "gypsies", - "habaeus", "habeas", - "haethen", "heathen", - "hailfax", "halifax", - "halfiax", "halifax", - "handbok", "handbook", - "handedy", "handedly", - "handeld", "handled", - "hanlder", "handler", - "hannibl", "hannibal", - "hanuted", "haunted", - "haorder", "hoarder", - "hapened", "happened", - "happend", "happened", - "happliy", "happily", - "harased", "harassed", - "harases", "harasses", - "hardend", "hardened", - "hardwod", "hardwood", - "haricut", "haircut", - "hatchig", "hatching", - "hauntig", "haunting", - "haviest", "heaviest", - "headest", "headset", - "headses", "headsets", - "heaveny", "heavenly", - "heigher", "higher", - "heigths", "heights", - "helemts", "helmets", - "hellfie", "hellfire", - "hellvua", "helluva", - "helment", "helmet", - "helpped", "helped", - "hemlets", "helmets", - "henious", "heinous", - "heorics", "heroics", - "heorine", "heroine", - "heriocs", "heroics", - "herione", "heroine", - "herocis", "heroics", - "heronie", "heroine", - "hesiman", "heisman", - "hieghts", "heights", - "hienous", "heinous", - "hiesman", "heisman", - "himselv", "himself", - "hiptser", "hipster", - "hismelf", "himself", - "hispter", "hipster", - "hitboxs", "hitboxes", - "hoilday", "holiday", - "hokpins", "hopkins", - "holdiay", "holiday", - "holdins", "holdings", - "homniem", "hominem", - "horader", "hoarder", - "hosited", "hoisted", - "hosthot", "hotshot", - "hostles", "hostels", - "hostpot", "hotspot", - "hothsot", "hotshot", - "hotpsot", "hotspot", - "hotsopt", "hotspot", - "hounour", "honour", - "hseldon", "sheldon", - "huanted", "haunted", - "humanit", "humanist", - "humants", "humanist", - "humidiy", "humidity", - "humoros", "humorous", - "hunagry", "hungary", - "hunderd", "hundred", - "hundres", "hundreds", - "hungray", "hungary", - "hurdels", "hurdles", - "hurldes", "hurdles", - "husbans", "husbands", - "hweaton", "wheaton", - "hybirds", "hybrids", - "hydogen", "hydrogen", - "hygeine", "hygiene", - "hypnoss", "hypnosis", - "hyrbids", "hybrids", - "hystera", "hysteria", - "iceforg", "icefrog", - "ierland", "ireland", - "ignitin", "ignition", - "ignorat", "ignorant", - "illegas", "illegals", - "illegsl", "illegals", - "illinos", "illinois", - "imanent", "eminent", - "imapcts", "impacts", - "iminent", "eminent", - "imminet", "imminent", - "implict", "implicit", - "imploed", "implode", - "imploys", "employs", - "impluse", "impulse", - "impolde", "implode", - "importd", "imported", - "imporve", "improve", - "impules", "impulse", - "impusle", "impulse", - "imrpove", "improve", - "incldue", "include", - "incluse", "includes", - "indains", "indians", - "indiaan", "indiana", - "indluge", "indulge", - "indugle", "indulge", - "infalte", "inflate", - "infenro", "inferno", - "infered", "inferred", - "inferir", "inferior", - "infinet", "infinite", - "infinie", "infinite", - "infinit", "infinite", - "infornt", "infront", - "infroms", "informs", - "infrotn", "infront", - "inheirt", "inherit", - "inidans", "indians", - "initals", "initials", - "initisl", "initials", - "inlcine", "incline", - "inovker", "invoker", - "inpeach", "impeach", - "inpsect", "inspect", - "inpsire", "inspire", - "inquier", "inquire", - "inquriy", "inquiry", - "insaney", "insanely", - "inscets", "insects", - "insepct", "inspect", - "insipre", "inspire", - "insluts", "insults", - "instade", "instead", - "instint", "instinct", - "intenst", "intents", - "intered", "interred", - "interet", "interest", - "internt", "internet", - "interro", "interior", - "intrest", "interest", - "intrige", "intrigue", - "invlove", "involve", - "invoekr", "invoker", - "invovle", "involve", - "iornman", "ironman", - "iranain", "iranian", - "iranias", "iranians", - "iranina", "iranian", - "irleand", "ireland", - "ironamn", "ironman", - "isalmic", "islamic", - "isareli", "israeli", - "islamit", "islamist", - "islmaic", "islamic", - "isloate", "isolate", - "isralei", "israeli", - "isreali", "israeli", - "italias", "italians", - "jagaurs", "jaguars", - "jaguras", "jaguars", - "jamacia", "jamaica", - "jamaina", "jamaican", - "jamiaca", "jamaica", - "jamsine", "jasmine", - "janaury", "january", - "januray", "january", - "japanes", "japanese", - "jasmien", "jasmine", - "jaugars", "jaguars", - "jaunary", "january", - "jeircho", "jericho", - "jennins", "jennings", - "jeopary", "jeopardy", - "jeresys", "jerseys", - "jericoh", "jericho", - "jersyes", "jerseys", - "jewerly", "jewelry", - "jorunal", "journal", - "jounral", "journal", - "joystik", "joystick", - "juadism", "judaism", - "judasim", "judaism", - "judical", "judicial", - "juipter", "jupiter", - "junglig", "jungling", - "juptier", "jupiter", - "jusitfy", "justify", - "justfiy", "justify", - "karakoe", "karaoke", - "karoake", "karaoke", - "kenendy", "kennedy", - "kenndey", "kennedy", - "kentucy", "kentucky", - "keyboad", "keyboard", - "keychan", "keychain", - "keynode", "keynote", - "kicthen", "kitchen", - "killins", "killings", - "kineitc", "kinetic", - "kinghts", "knights", - "kinteic", "kinetic", - "kitches", "kitchens", - "kitites", "kitties", - "knietic", "kinetic", - "knigths", "knights", - "knuckel", "knuckle", - "kroeans", "koreans", - "krudish", "kurdish", - "ktichen", "kitchen", - "kubirck", "kubrick", - "kunckle", "knuckle", - "kurbick", "kubrick", - "kuridsh", "kurdish", - "laguage", "language", - "landins", "landings", - "lantren", "lantern", - "laready", "already", - "laregly", "largely", - "largley", "largely", - "lasanga", "lasagna", - "lasgana", "lasagna", - "latitue", "latitude", - "latnern", "lantern", - "launhed", "launched", - "lavendr", "lavender", - "leathal", "lethal", - "lefitst", "leftist", - "leftits", "leftist", - "legnths", "lengths", - "legnthy", "lengthy", - "legoins", "legions", - "leigons", "legions", - "lenghts", "lengths", - "lenoard", "leonard", - "lepoard", "leopard", - "lesbain", "lesbian", - "lesiban", "lesbian", - "lesiure", "leisure", - "liasion", "liaison", - "liasons", "liaisons", - "liberae", "liberate", - "liberas", "liberals", - "lienups", "lineups", - "liesure", "leisure", - "liftime", "lifetime", - "lighlty", "lightly", - "lightes", "lighters", - "ligthly", "lightly", - "linclon", "lincoln", - "linueps", "lineups", - "liqiuds", "liquids", - "lisence", "license", - "lisense", "license", - "listend", "listened", - "litecon", "litecoin", - "literae", "literate", - "lithuim", "lithium", - "litihum", "lithium", - "loadous", "loadouts", - "loenard", "leonard", - "loepard", "leopard", - "logiteh", "logitech", - "loosley", "loosely", - "luandry", "laundry", - "luckliy", "luckily", - "luicfer", "lucifer", - "lunatis", "lunatics", - "maching", "machine", - "machins", "machines", - "maclolm", "malcolm", - "macthup", "matchup", - "madsion", "madison", - "magents", "magnets", - "magicin", "magician", - "magolia", "magnolia", - "maidson", "madison", - "maintan", "maintain", - "mairlyn", "marilyn", - "malaira", "malaria", - "malaysa", "malaysia", - "malclom", "malcolm", - "manauls", "manuals", - "mandase", "mandates", - "mandats", "mandates", - "mangeld", "mangled", - "mangets", "magnets", - "manualy", "manually", - "manuver", "maneuver", - "marbels", "marbles", - "margart", "margaret", - "mariage", "marriage", - "mariens", "marines", - "maritan", "martian", - "marixsm", "marxism", - "mariyln", "marilyn", - "markede", "marketed", - "marlbes", "marbles", - "marliyn", "marilyn", - "marnies", "marines", - "marrage", "marriage", - "martail", "martial", - "martain", "martian", - "masacra", "mascara", - "massace", "massacre", - "mathcup", "matchup", - "mathwes", "mathews", - "matrial", "martial", - "maunals", "manuals", - "mcalren", "mclaren", - "meanins", "meanings", - "medicad", "medicaid", - "medicae", "medicare", - "medioce", "mediocre", - "meixcan", "mexican", - "meldoic", "melodic", - "melieux", "milieux", - "melodis", "melodies", - "memeber", "member", - "memoery", "memory", - "memorie", "memory", - "menally", "mentally", - "mentaly", "mentally", - "meoldic", "melodic", - "meranda", "veranda", - "merchat", "merchant", - "merucry", "mercury", - "messagd", "messaged", - "messaih", "messiah", - "metagem", "metagame", - "metalic", "metallic", - "mexcian", "mexican", - "michina", "michigan", - "midfied", "midfield", - "midotwn", "midtown", - "midtwon", "midtown", - "migrans", "migrants", - "militat", "militant", - "militis", "militias", - "miltary", "military", - "mimimum", "minimum", - "mineras", "minerals", - "mininos", "minions", - "ministr", "minister", - "ministy", "ministry", - "minoins", "minions", - "minstry", "ministry", - "minumum", "minimum", - "mirrord", "mirrored", - "misandy", "misandry", - "misison", "mission", - "misouri", "missouri", - "mispell", "misspell", - "missils", "missiles", - "mistery", "mystery", - "mobiliy", "mobility", - "modualr", "modular", - "momento", "memento", - "momment", "moment", - "monarcy", "monarchy", - "monatge", "montage", - "monglos", "mongols", - "monitos", "monitors", - "monstre", "monster", - "montaeg", "montage", - "montrel", "montreal", - "monumet", "monument", - "morbidy", "morbidly", - "morgage", "mortgage", - "morphen", "morphine", - "morphie", "morphine", - "morroco", "morocco", - "mortage", "mortgage", - "mosnter", "monster", - "mosture", "moisture", - "motivet", "motivate", - "motnage", "montage", - "motoral", "motorola", - "mountan", "mountain", - "movment", "movement", - "mucuous", "mucous", - "muesums", "museums", - "muliple", "multiple", - "mulsims", "muslims", - "multipe", "multiple", - "multipy", "multiply", - "munbers", "numbers", - "munchis", "munchies", - "murderd", "murdered", - "muscial", "musical", - "mushrom", "mushroom", - "musilms", "muslims", - "muslces", "muscles", - "musuems", "museums", - "mutatin", "mutation", - "mypsace", "myspace", - "mysapce", "myspace", - "napolen", "napoleon", - "narhwal", "narwhal", - "natique", "antique", - "nativey", "natively", - "natrual", "natural", - "naugthy", "naughty", - "nauseos", "nauseous", - "nautils", "nautilus", - "nautral", "natural", - "nautres", "natures", - "nectode", "netcode", - "needels", "needles", - "neruons", "neurons", - "neslave", "enslave", - "netocde", "netcode", - "netowrk", "network", - "netural", "neutral", - "neturon", "neutron", - "netwrok", "network", - "neurton", "neutron", - "neuterd", "neutered", - "nighlty", "nightly", - "nigthly", "nightly", - "nihilim", "nihilism", - "ninties", "1990s", - "niverse", "inverse", - "nocture", "nocturne", - "nominae", "nominate", - "nominet", "nominate", - "nonsene", "nonsense", - "noramls", "normals", - "norhern", "northern", - "normaly", "normally", - "normany", "normandy", - "northen", "northern", - "nostris", "nostrils", - "notario", "ontario", - "notebok", "notebook", - "nothern", "northern", - "nowdays", "nowadays", - "nrivana", "nirvana", - "nuaghty", "naughty", - "nubmers", "numbers", - "nucelar", "nuclear", - "nucelus", "nucleus", - "nuclean", "unclean", - "nuclues", "nucleus", - "nucular", "nuclear", - "nuerons", "neurons", - "nuetral", "neutral", - "nuetron", "neutron", - "nulcear", "nuclear", - "nullfiy", "nullify", - "nusance", "nuisance", - "nutriet", "nutrient", - "oarcles", "oracles", - "obivous", "obvious", - "obvoius", "obvious", - "ocarnia", "ocarina", - "ocasion", "occasion", - "occured", "occurred", - "ocotber", "october", - "ocotpus", "octopus", - "ocraina", "ocarina", - "ocuntry", "country", - "ocurred", "occurred", - "ofcoure", "ofcourse", - "offcers", "officers", - "offical", "official", - "offisde", "offside", - "oftenly", "often", - "ogrilla", "gorilla", - "olmypic", "olympic", - "olreans", "orleans", - "olympis", "olympics", - "olypmic", "olympic", - "omision", "omission", - "omiting", "omitting", - "omlette", "omelette", - "ommited", "omitted", - "onatrio", "ontario", - "onbaord", "onboard", - "onborad", "onboard", - "ontairo", "ontario", - "ontraio", "ontario", - "opartor", "operator", - "openess", "openness", - "opitcal", "optical", - "opitmal", "optimal", - "oponent", "opponent", - "oposite", "opposite", - "oppenly", "openly", - "opponet", "opponent", - "oprhans", "orphans", - "optimim", "optimism", - "oracels", "oracles", - "oragnes", "oranges", - "oragsms", "orgasms", - "oralces", "oracles", - "orbtial", "orbital", - "orcales", "oracles", - "orelans", "orleans", - "organes", "organise", - "organie", "organise", - "organim", "organism", - "orginal", "original", - "orhpans", "orphans", - "oribtal", "orbital", - "orlenas", "orleans", - "orpahns", "orphans", - "orthodx", "orthodox", - "outfied", "outfield", - "outsidr", "outsider", - "overhal", "overhaul", - "overpad", "overpaid", - "oversue", "overuse", - "overtun", "overturn", - "ownders", "wonders", - "owuldve", "wouldve", - "oylmpic", "olympic", - "pacakge", "package", - "pacifit", "pacifist", - "packade", "packaged", - "pacthes", "patches", - "pahntom", "phantom", - "paitent", "patient", - "palcebo", "placebo", - "pallete", "palette", - "palster", "plaster", - "palyboy", "playboy", - "pamflet", "pamphlet", - "pamplet", "pamphlet", - "pancaks", "pancakes", - "pandroa", "pandora", - "panthen", "pantheon", - "paradim", "paradigm", - "paradse", "parades", - "paralel", "parallel", - "paranoa", "paranoia", - "parises", "praises", - "parites", "parties", - "partice", "particle", - "partick", "patrick", - "partiel", "particle", - "partiot", "patriot", - "partols", "patrols", - "passabe", "passable", - "passivs", "passives", - "pasuing", "pausing", - "pateint", "patient", - "pathces", "patches", - "patiens", "patients", - "patirot", "patriot", - "patrcik", "patrick", - "patrios", "patriots", - "patroit", "patriot", - "peaples", "peoples", - "pebbels", "pebbles", - "peirced", "pierced", - "penatly", "penalty", - "pendulm", "pendulum", - "penguis", "penguins", - "penicls", "pencils", - "penison", "pension", - "penisse", "penises", - "penitum", "pentium", - "pensies", "penises", - "pensino", "pension", - "pentuim", "pentium", - "peopels", "peoples", - "percise", "precise", - "perdict", "predict", - "perfers", "prefers", - "perhasp", "perhaps", - "perhpas", "perhaps", - "perisan", "persian", - "perjery", "perjury", - "permade", "premade", - "permier", "premier", - "permise", "premise", - "permium", "premium", - "peroids", "periods", - "peronal", "personal", - "perpaid", "prepaid", - "perphas", "perhaps", - "persain", "persian", - "persets", "presets", - "persits", "persist", - "persued", "pursued", - "persuit", "pursuit", - "pervail", "prevail", - "perview", "preview", - "pharoah", "pharaoh", - "phatnom", "phantom", - "phsyics", "physics", - "phyiscs", "physics", - "physcis", "physics", - "physiqe", "physique", - "picthed", "pitched", - "picther", "pitcher", - "picthes", "pitches", - "piegons", "pigeons", - "piglrim", "pilgrim", - "pigoens", "pigeons", - "pilgirm", "pilgrim", - "pilrgim", "pilgrim", - "pinoeer", "pioneer", - "pinpoit", "pinpoint", - "pionere", "pioneer", - "pireced", "pierced", - "pithces", "pitches", - "plantes", "planets", - "plastis", "plastics", - "plastre", "plaster", - "plataeu", "plateau", - "plateua", "plateau", - "playabe", "playable", - "playofs", "playoffs", - "plesant", "pleasant", - "pligrim", "pilgrim", - "ploygon", "polygon", - "ploymer", "polymer", - "podemso", "podemos", - "podmeos", "podemos", - "poeples", "peoples", - "poignat", "poignant", - "poineer", "pioneer", - "pointes", "pointers", - "poisond", "poisoned", - "polgyon", "polygon", - "polical", "political", - "polishs", "polishes", - "polisse", "polishes", - "politey", "politely", - "poluted", "polluted", - "polutes", "pollutes", - "popluar", "popular", - "populer", "popular", - "populos", "populous", - "porpose", "propose", - "porshan", "portion", - "porshon", "portion", - "portait", "portrait", - "portary", "portray", - "portras", "portrays", - "portrat", "portrait", - "posions", "poisons", - "positon", "position", - "positve", "positive", - "possiby", "possibly", - "postdam", "potsdam", - "postion", "position", - "postive", "positive", - "potatos", "potatoes", - "potical", "optical", - "potrait", "portrait", - "powderd", "powdered", - "poweful", "powerful", - "poylgon", "polygon", - "poylmer", "polymer", - "practie", "practise", - "praisse", "praises", - "praries", "prairies", - "prasied", "praised", - "prasies", "praises", - "pratice", "practice", - "preamde", "premade", - "preceed", "precede", - "precice", "precise", - "preests", "presets", - "prehaps", "perhaps", - "preimer", "premier", - "preimum", "premium", - "preists", "priests", - "preivew", "preview", - "premeir", "premier", - "premiee", "premiere", - "premire", "premier", - "premits", "permits", - "premius", "premiums", - "premuim", "premium", - "prepair", "prepare", - "preriod", "period", - "presens", "presents", - "presest", "presets", - "presist", "persist", - "prestes", "presets", - "presude", "presumed", - "pretene", "pretense", - "pretens", "pretends", - "preveiw", "preview", - "prevert", "pervert", - "previal", "prevail", - "previes", "previews", - "previos", "previous", - "priased", "praised", - "priases", "praises", - "printes", "printers", - "pristen", "pristine", - "probabe", "probable", - "probaly", "probably", - "probelm", "problem", - "procede", "proceed", - "procees", "proceeds", - "procesd", "proceeds", - "proclam", "proclaim", - "produly", "proudly", - "produse", "produces", - "progidy", "prodigy", - "progrom", "pogrom", - "prohibt", "prohibit", - "prohpet", "prophet", - "prologe", "prologue", - "promose", "promotes", - "promots", "promotes", - "prompty", "promptly", - "promtps", "prompts", - "pronous", "pronouns", - "prooved", "proved", - "propeht", "prophet", - "prophey", "prophecy", - "propper", "proper", - "protals", "portals", - "protecs", "protects", - "protess", "protests", - "protocl", "protocol", - "protray", "portray", - "prouldy", "proudly", - "provded", "provided", - "provine", "province", - "prusuit", "pursuit", - "pryamid", "pyramid", - "pscyhed", "psyched", - "ptiched", "pitched", - "pticher", "pitcher", - "puasing", "pausing", - "publicy", "publicly", - "publsih", "publish", - "puhsups", "pushups", - "punishs", "punishes", - "punisse", "punishes", - "pursiut", "pursuit", - "pursude", "pursued", - "purused", "pursued", - "pushpus", "pushups", - "pyarmid", "pyramid", - "pyramis", "pyramids", - "pyrmaid", "pyramid", - "pysched", "psyched", - "qaulify", "qualify", - "qaulity", "quality", - "qauntum", "quantum", - "quailfy", "qualify", - "quailty", "quality", - "queires", "queries", - "queitly", "quietly", - "quereis", "queries", - "quicket", "quickest", - "quielty", "quietly", - "quitely", "quietly", - "qunatum", "quantum", - "qunetin", "quentin", - "racisst", "racists", - "racthet", "ratchet", - "radaint", "radiant", - "radiane", "radiance", - "radicas", "radicals", - "radiers", "raiders", - "raelism", "realism", - "raidant", "radiant", - "railrod", "railroad", - "rainbos", "rainbows", - "raoches", "roaches", - "raoming", "roaming", - "raptros", "raptors", - "raputre", "rapture", - "rathcet", "ratchet", - "ratpure", "rapture", - "reacing", "reaching", - "reagrds", "regards", - "realies", "realise", - "realsie", "realise", - "realsim", "realism", - "realtes", "relates", - "reamins", "remains", - "reapirs", "repairs", - "rebouns", "rebounds", - "rebulit", "rebuilt", - "recalim", "reclaim", - "receips", "receipts", - "recided", "resided", - "reciept", "receipt", - "recievd", "recieved", - "recieve", "receive", - "recitfy", "rectify", - "recived", "received", - "reclami", "reclaim", - "recliam", "reclaim", - "recorre", "recorder", - "recoves", "recovers", - "recpies", "recipes", - "redeemd", "redeemed", - "redners", "renders", - "refelct", "reflect", - "referal", "referral", - "refered", "referred", - "referig", "refering", - "referrs", "refers", - "reflexs", "reflexes", - "refrers", "refers", - "refroms", "reforms", - "refusla", "refusal", - "regerts", "regrets", - "regiems", "regimes", - "regimet", "regiment", - "registy", "registry", - "regluar", "regular", - "regrest", "regrets", - "regulae", "regulate", - "regulas", "regulars", - "regulsr", "regulars", - "reigmes", "regimes", - "reigons", "regions", - "reitres", "retires", - "reivews", "reviews", - "reknown", "renown", - "relaise", "realise", - "relapes", "relapse", - "relaspe", "relapse", - "relatie", "relative", - "relatin", "relation", - "relcaim", "reclaim", - "releive", "relieve", - "releses", "releases", - "relfect", "reflect", - "reliabe", "reliable", - "relient", "reliant", - "relized", "realised", - "relpase", "relapse", - "remaind", "remained", - "remaing", "remaining", - "remakrs", "remarks", - "remannt", "remnant", - "remeber", "remember", - "remians", "remains", - "remnans", "remnants", - "renderd", "rendered", - "renegae", "renegade", - "renmant", "remnant", - "rentors", "renters", - "rentres", "renters", - "renuion", "reunion", - "repaird", "repaired", - "repalys", "replays", - "repblic", "republic", - "repeast", "repeats", - "repitle", "reptile", - "replase", "replaces", - "replayd", "replayed", - "reponse", "response", - "repostd", "reposted", - "repsawn", "respawn", - "repsond", "respond", - "repsots", "reposts", - "reptiel", "reptile", - "reptils", "reptiles", - "repubic", "republic", - "republi", "republic", - "repulic", "republic", - "reqiuem", "requiem", - "requeim", "requiem", - "requime", "requiem", - "requred", "required", - "resapwn", "respawn", - "rescuse", "rescues", - "resembe", "resemble", - "reslove", "resolve", - "resolvs", "resolves", - "resonet", "resonate", - "resovle", "resolve", - "respest", "respects", - "respone", "response", - "respwan", "respawn", - "ressits", "resists", - "restord", "restored", - "resuced", "rescued", - "resuces", "rescues", - "returnd", "returned", - "reuinon", "reunion", - "reveald", "revealed", - "reveiws", "reviews", - "revelas", "reveals", - "reveral", "reversal", - "reviere", "reviewer", - "reviewd", "reviewed", - "reviewr", "reviewer", - "revolvr", "revolver", - "revolvs", "revolves", - "rewirte", "rewrite", - "reworkd", "reworked", - "rewriet", "rewrite", - "reynols", "reynolds", - "rhapsoy", "rhapsody", - "rhythem", "rhythm", - "rhythim", "rhythm", - "rhytmic", "rhythmic", - "riaders", "raiders", - "ritlain", "ritalin", - "ritoers", "rioters", - "rivarly", "rivalry", - "rivlary", "rivalry", - "roahces", "roaches", - "robotis", "robotics", - "rococco", "rococo", - "roestta", "rosetta", - "roiters", "rioters", - "roleply", "roleplay", - "romaina", "romania", - "romaing", "roaming", - "romanin", "romanian", - "romanna", "romanian", - "roomate", "roommate", - "rotuers", "routers", - "rugters", "rutgers", - "rulebok", "rulebook", - "rumorus", "rumours", - "rumuors", "rumours", - "runnung", "running", - "ruslted", "rustled", - "russina", "russian", - "russion", "russian", - "rusteld", "rustled", - "rythmic", "rhythmic", - "rythyms", "rhythms", - "sacrasm", "sarcasm", - "saddnes", "saddens", - "sadistc", "sadistic", - "sadning", "sanding", - "salaris", "salaries", - "salavge", "salvage", - "salvery", "slavery", - "salying", "slaying", - "sampels", "samples", - "samruai", "samurai", - "samuari", "samurai", - "samuria", "samurai", - "sandlas", "sandals", - "sandnig", "sanding", - "sanlder", "sandler", - "santorm", "santorum", - "sapphie", "sapphire", - "sarcams", "sarcasm", - "sargant", "sergeant", - "sasuage", "sausage", - "satifsy", "satisfy", - "satsify", "satisfy", - "satsohi", "satoshi", - "savanha", "savannah", - "savannh", "savannah", - "saveing", "saving", - "sawnsea", "swansea", - "sawnson", "swanson", - "scandas", "scandals", - "scannig", "scanning", - "scartch", "scratch", - "scheems", "schemes", - "schoold", "schooled", - "sciense", "sciences", - "scinece", "science", - "scootes", "scooters", - "scorpin", "scorpion", - "scpeter", "scepter", - "scracth", "scratch", - "scrambe", "scramble", - "scritps", "scripts", - "scrolld", "scrolled", - "scrpits", "scripts", - "scyhter", "scyther", - "seached", "searched", - "seaches", "searches", - "seahaws", "seahawks", - "seantor", "senator", - "searchd", "searched", - "searchs", "searches", - "sebrian", "serbian", - "secerts", "secrets", - "secpter", "scepter", - "secrest", "secrets", - "secrety", "secretly", - "seflies", "selfies", - "seguoys", "segues", - "seinors", "seniors", - "selifes", "selfies", - "senoirs", "seniors", - "sensure", "censure", - "sentaor", "senator", - "sentris", "sentries", - "serbain", "serbian", - "sergeat", "sergeant", - "sergent", "sergeant", - "seriban", "serbian", - "servans", "servants", - "sesnors", "sensors", - "settins", "settings", - "severly", "severely", - "sexualy", "sexually", - "seziure", "seizure", - "shaddow", "shadow", - "shanghi", "shanghai", - "shaprie", "sharpie", - "shaprly", "sharply", - "sharipe", "sharpie", - "shcemes", "schemes", - "sheelpe", "sheeple", - "sheepel", "sheeple", - "shephed", "shepherd", - "sherlok", "sherlock", - "shetler", "shelter", - "shevles", "shelves", - "shfiter", "shifter", - "shieldd", "shielded", - "shiping", "shipping", - "shirely", "shirley", - "shitfer", "shifter", - "shledon", "sheldon", - "shleter", "shelter", - "shoudln", "should", - "shouldt", "shouldnt", - "shoutot", "shoutout", - "showede", "showered", - "showerd", "showered", - "shperes", "spheres", - "shriley", "shirley", - "siblins", "siblings", - "sidelen", "sideline", - "sideral", "sidereal", - "siezing", "seizing", - "siezure", "seizure", - "signfiy", "signify", - "signins", "signings", - "signles", "singles", - "silders", "sliders", - "silenty", "silently", - "similir", "similiar", - "simliar", "similar", - "simplet", "simplest", - "simpley", "simply", - "simplfy", "simplify", - "simpliy", "simplify", - "simposn", "simpson", - "simspon", "simpson", - "singals", "signals", - "singels", "singles", - "singify", "signify", - "singsog", "singsong", - "sitmuli", "stimuli", - "skecthy", "sketchy", - "skeletl", "skeletal", - "skeptis", "skeptics", - "sketchs", "sketches", - "sketpic", "skeptic", - "skpetic", "skeptic", - "sktechy", "sketchy", - "skwyard", "skyward", - "slavage", "salvage", - "slayign", "slaying", - "sldiers", "sliders", - "slefies", "selfies", - "slighly", "slightly", - "slighty", "slightly", - "slippes", "slippers", - "slippey", "slippery", - "smaples", "samples", - "smartre", "smarter", - "smaurai", "samurai", - "snadler", "sandler", - "snigles", "singles", - "snippes", "snippets", - "snodwen", "snowden", - "snwoden", "snowden", - "snycing", "syncing", - "snyergy", "synergy", - "socialy", "socially", - "sofware", "software", - "soildly", "solidly", - "soldies", "soldiers", - "soldily", "solidly", - "somaila", "somalia", - "someons", "someones", - "somethn", "somethin", - "southen", "southern", - "soveits", "soviets", - "spacebr", "spacebar", - "spainsh", "spanish", - "spansih", "spanish", - "spanwed", "spawned", - "sparkel", "sparkle", - "spartas", "spartans", - "spartsn", "spartans", - "sparyed", "sprayed", - "spawend", "spawned", - "spawnig", "spawning", - "specail", "special", - "specfic", "specific", - "specias", "specials", - "specisl", "specials", - "spectum", "spectrum", - "speechs", "speeches", - "spehres", "spheres", - "speical", "special", - "speices", "species", - "spellig", "spelling", - "spindel", "spindle", - "spiritd", "spirited", - "splaton", "splatoon", - "splittr", "splitter", - "spoiles", "spoilers", - "spoitfy", "spotify", - "spolied", "spoiled", - "sponser", "sponsor", - "sporles", "sproles", - "sporuts", "sprouts", - "spotfiy", "spotify", - "sprinke", "sprinkle", - "sproels", "sproles", - "spwaned", "spawned", - "sqaures", "squares", - "sqeuaky", "squeaky", - "sqiushy", "squishy", - "squarey", "squarely", - "squirel", "squirtle", - "squirle", "squirrel", - "squirrl", "squirrel", - "squirte", "squirtle", - "squsihy", "squishy", - "sriraca", "sriracha", - "srpouts", "sprouts", - "sryians", "syrians", - "sryinge", "syringe", - "stadius", "stadiums", - "staduim", "stadium", - "stagnat", "stagnant", - "staidum", "stadium", - "stakler", "stalker", - "stalkes", "stalkers", - "stamnia", "stamina", - "staoshi", "satoshi", - "starins", "strains", - "startde", "startled", - "startus", "startups", - "statits", "statist", - "statsit", "statist", - "statuer", "stature", - "statuse", "statutes", - "statuts", "statutes", - "stautes", "statues", - "stealty", "stealthy", - "steeles", "steelers", - "steorid", "steroid", - "steriel", "sterile", - "sterlie", "sterile", - "stickes", "stickers", - "stiring", "stirring", - "stirker", "striker", - "stirrig", "stirring", - "stitchs", "stitches", - "stlaker", "stalker", - "stlyish", "stylish", - "storeis", "stories", - "storise", "stories", - "stormde", "stormed", - "straigt", "straight", - "straind", "strained", - "streamd", "streamed", - "stregth", "strength", - "strengh", "strength", - "streoid", "steroid", - "stresss", "stresses", - "strians", "strains", - "stricty", "strictly", - "striekr", "striker", - "stromed", "stormed", - "stubbon", "stubborn", - "studing", "studying", - "stuidos", "studios", - "stunami", "tsunami", - "stupidr", "stupider", - "stupidy", "stupidly", - "stupire", "stupider", - "suasage", "sausage", - "subisdy", "subsidy", - "subjest", "subjects", - "subtiel", "subtitle", - "succede", "succeed", - "succeds", "succeeds", - "succees", "succeeds", - "succesd", "succeeds", - "suceeds", "succeeds", - "suddeny", "suddenly", - "suefull", "usefull", - "sufferd", "suffered", - "summonr", "summoner", - "summore", "summoner", - "sunggle", "snuggle", - "sunifre", "sunfire", - "superme", "supreme", - "suposed", "supposed", - "suposes", "supposes", - "suppoed", "supposed", - "suppost", "supports", - "suprass", "surpass", - "supress", "suppress", - "suprisd", "suprised", - "suprise", "surprise", - "suprize", "surprise", - "supsend", "suspend", - "suround", "surround", - "surpeme", "supreme", - "surroud", "surround", - "sweidsh", "swedish", - "swiflty", "swiftly", - "swiming", "swimming", - "switchs", "switches", - "switfly", "swiftly", - "swnasea", "swansea", - "sycning", "syncing", - "sycther", "scyther", - "syirans", "syrians", - "sykward", "skyward", - "syllabe", "syllable", - "symetry", "symmetry", - "symmety", "symmetry", - "symobls", "symbols", - "sympaty", "sympathy", - "symtpom", "symptom", - "synegry", "synergy", - "synoynm", "synonym", - "sypmtom", "symptom", - "syracue", "syracuse", - "syrains", "syrians", - "sysadmn", "sysadmin", - "systemc", "systemic", - "sytlish", "stylish", - "tabacco", "tobacco", - "tailban", "taliban", - "tailord", "tailored", - "talbian", "taliban", - "tallets", "tallest", - "tangeld", "tangled", - "tanlged", "tangled", - "targetd", "targeted", - "taryvon", "trayvon", - "teached", "taught", - "teaspon", "teaspoon", - "techeis", "techies", - "tehcies", "techies", - "temepst", "tempest", - "tempels", "temples", - "tempets", "tempest", - "templas", "templars", - "tempset", "tempest", - "tenacle", "tentacle", - "tendacy", "tendency", - "tequlia", "tequila", - "tesitfy", "testify", - "testice", "testicle", - "teusday", "tuesday", - "thankyu", "thankyou", - "thearpy", "therapy", - "theistc", "theistic", - "theives", "thieves", - "themsef", "themself", - "therefo", "thereof", - "therien", "therein", - "theroem", "theorem", - "thesits", "theists", - "thiests", "theists", - "thirldy", "thirdly", - "thirten", "thirteen", - "thirtsy", "thirsty", - "thoerem", "theorem", - "thorats", "throats", - "thornes", "thrones", - "thoruim", "thorium", - "thoughs", "thoughts", - "threadd", "threaded", - "threeof", "thereof", - "thridly", "thirdly", - "thristy", "thirsty", - "throast", "throats", - "throium", "thorium", - "thryoid", "thyroid", - "thyorid", "thyroid", - "thyriod", "thyroid", - "tigther", "tighter", - "tiolets", "toilets", - "tirdent", "trident", - "titanim", "titanium", - "tlaking", "talking", - "tobbaco", "tobacco", - "toliets", "toilets", - "tolkein", "tolkien", - "tomatos", "tomatoes", - "tongiht", "tonight", - "tonuges", "tongues", - "toppins", "toppings", - "torando", "tornado", - "torndao", "tornado", - "torpdeo", "torpedo", - "torrest", "torrents", - "tortila", "tortilla", - "toruney", "tourney", - "toubles", "troubles", - "touchda", "touchpad", - "tounrey", "tourney", - "tourisy", "touristy", - "tourits", "tourist", - "tournes", "tourneys", - "toursim", "tourism", - "toursit", "tourist", - "towords", "towards", - "trackes", "trackers", - "trailes", "trailers", - "traines", "trainers", - "trainig", "training", - "tralier", "trailer", - "tratior", "traitor", - "traveld", "traveled", - "travere", "traverse", - "travesy", "travesty", - "travles", "travels", - "treasue", "treasure", - "treatis", "treaties", - "tremelo", "tremolo", - "trendig", "trending", - "trialer", "trailer", - "triange", "triangle", - "triator", "traitor", - "trickey", "trickery", - "tridnet", "trident", - "trimuph", "triumph", - "trinkes", "trinkets", - "trinkst", "trinkets", - "trintiy", "trinity", - "triolgy", "trilogy", - "troleld", "trolled", - "troling", "trolling", - "tronado", "tornado", - "tropedo", "torpedo", - "trudnle", "trundle", - "truimph", "triumph", - "trukish", "turkish", - "trundel", "trundle", - "trunlde", "trundle", - "tryahrd", "tryhard", - "tryavon", "trayvon", - "tsamina", "stamina", - "tsnuami", "tsunami", - "tsuanmi", "tsunami", - "tsunmai", "tsunami", - "tuesdsy", "tuesdays", - "tunnles", "tunnels", - "turbins", "turbines", - "turksih", "turkish", - "turltes", "turtles", - "turrest", "turrets", - "turtels", "turtles", - "tuseday", "tuesday", - "tusnami", "tsunami", - "tutrles", "turtles", - "twiligt", "twilight", - "tyelnol", "tylenol", - "typcial", "typical", - "tyrhard", "tryhard", - "tyrrany", "tyranny", - "udpated", "updated", - "uesfull", "usefull", - "ugprade", "upgrade", - "ukarine", "ukraine", - "ukranie", "ukraine", - "ukriane", "ukraine", - "ultimae", "ultimate", - "umbrela", "umbrella", - "unahppy", "unhappy", - "unbannd", "unbanned", - "underog", "undergo", - "unfairy", "unfairly", - "ungoldy", "ungodly", - "unicors", "unicorns", - "uniquey", "uniquely", - "unknwon", "unknown", - "unkonwn", "unknown", - "unlcean", "unclean", - "unlcoks", "unlocks", - "unlcuky", "unlucky", - "unlikey", "unlikely", - "unopend", "unopened", - "unprone", "unproven", - "unusabe", "unusable", - "unworty", "unworthy", - "upgarde", "upgrade", - "upgrads", "upgrades", - "uplaods", "uploads", - "upsteam", "upstream", - "urainum", "uranium", - "uranuim", "uranium", - "uretrha", "urethra", - "urkaine", "ukraine", - "urnaium", "uranium", - "urugauy", "uruguay", - "usefull", "useful", - "usefuly", "usefully", - "utiltiy", "utility", - "utopain", "utopian", - "utpoian", "utopian", - "vaccins", "vaccines", - "vaccume", "vacuum", - "vageuly", "vaguely", - "vaguley", "vaguely", - "vairant", "variant", - "valenca", "valencia", - "valetta", "valletta", - "valkyre", "valkyrie", - "valuabe", "valuable", - "valuble", "valuable", - "vampirs", "vampires", - "vanguad", "vanguard", - "varaint", "variant", - "vareity", "variety", - "varians", "variants", - "varient", "variant", - "varisty", "varsity", - "varitey", "variety", - "varstiy", "varsity", - "vasalls", "vassals", - "vasslas", "vassals", - "vaugely", "vaguely", - "vecotrs", "vectors", - "vectros", "vectors", - "veitnam", "vietnam", - "veiwers", "viewers", - "vendeta", "vendetta", - "verbaly", "verbally", - "verical", "vertical", - "verious", "various", - "verison", "version", - "veritgo", "vertigo", - "versoin", "version", - "vertgio", "vertigo", - "vessles", "vessels", - "vetween", "between", - "viatmin", "vitamin", - "vibratr", "vibrator", - "vicitms", "victims", - "vientam", "vietnam", - "vigrins", "virgins", - "vikigns", "vikings", - "villian", "villain", - "villify", "vilify", - "virbate", "vibrate", - "virigns", "virgins", - "virtiol", "vitriol", - "virutal", "virtual", - "virutes", "virtues", - "visable", "visible", - "visably", "visibly", - "visbily", "visibly", - "visting", "visiting", - "vistors", "visitors", - "vitaliy", "vitality", - "vitamis", "vitamins", - "vitenam", "vietnam", - "vitirol", "vitriol", - "vitmain", "vitamin", - "vitroil", "vitriol", - "vitrual", "virtual", - "vitrues", "virtues", - "volatge", "voltage", - "volumne", "volume", - "votlage", "voltage", - "vrigins", "virgins", - "waclott", "walcott", - "wacther", "watcher", - "waitres", "waiters", - "waktins", "watkins", - "warcrat", "warcraft", - "wardobe", "wardrobe", - "wariwck", "warwick", - "warrany", "warranty", - "warrent", "warrant", - "warrios", "warriors", - "warwcik", "warwick", - "wathcer", "watcher", - "watiers", "waiters", - "waviers", "waivers", - "wawrick", "warwick", - "wayword", "wayward", - "webapge", "webpage", - "webiste", "website", - "webstie", "website", - "weigths", "weights", - "weilded", "wielded", - "weirldy", "weirdly", - "weirods", "weirdos", - "welathy", "wealthy", - "wendsay", "wednesday", - "wensday", "wednesday", - "wepbage", "webpage", - "weridly", "weirdly", - "weridos", "weirdos", - "werstle", "wrestle", - "wesbite", "website", - "whaeton", "wheaton", - "whipser", "whisper", - "whislte", "whistle", - "whistel", "whistle", - "whitsle", "whistle", - "whsiper", "whisper", - "wiaters", "waiters", - "wiavers", "waivers", - "widgest", "widgets", - "wieghts", "weights", - "wigdets", "widgets", - "windosr", "windsor", - "winnins", "winnings", - "winsdor", "windsor", - "wintson", "winston", - "wirting", "writing", - "wisnton", "winston", - "withces", "witches", - "witheld", "withheld", - "withing", "within", - "withold", "withhold", - "wlacott", "walcott", - "wokring", "working", - "workins", "workings", - "woudlnt", "wouldnt", - "woudlve", "wouldve", - "woulndt", "wouldnt", - "wreslte", "wrestle", - "wroking", "working", - "wtiches", "witches", - "wupport", "support", - "yaching", "yachting", - "younget", "youngest", - "youseff", "yousef", - "youself", "yourself", - "zaelots", "zealots", - "zealtos", "zealots", - "zelaots", "zealots", - "zelaous", "zealous", - "zimbabe", "zimbabwe", - "zionsim", "zionism", - "zionsit", "zionist", - "zoinism", "zionism", - "zoinist", "zionist", - "abbout", "about", - "abilty", "ability", - "absail", "abseil", - "abutts", "abuts", - "achive", "achieve", - "acused", "accused", - "addopt", "adopt", - "addres", "address", - "adress", "address", - "aeriel", "aerial", - "affort", "afford", - "agains", "against", - "aginst", "against", - "ahppen", "happen", - "aiport", "airport", - "aisian", "asian", - "albiet", "albeit", - "alchol", "alcohol", - "aledge", "allege", - "aleged", "alleged", - "allign", "align", - "almsot", "almost", - "alomst", "almost", - "alowed", "allowed", - "alwasy", "always", - "alwyas", "always", - "amking", "making", - "ammend", "amend", - "amoung", "among", - "aplied", "applied", - "appart", "apart", - "aquire", "acquire", - "aready", "already", - "arised", "arose", - "arival", "arrival", - "arrary", "array", - "artice", "article", - "asetic", "ascetic", - "asside", "aside", - "attemp", "attempt", - "attemt", "attempt", - "auther", "author", - "awared", "awarded", - "bedore", "before", - "beeing", "being", - "befoer", "before", - "beggin", "begin", - "beleif", "belief", - "belive", "believe", - "beteen", "between", - "betwen", "between", - "beween", "between", - "bianry", "binary", - "boyant", "buoyant", - "broady", "broadly", - "buddah", "buddha", - "buring", "burying", - "carcas", "carcass", - "casion", "caisson", - "casued", "caused", - "casues", "causes", - "ceasar", "caesar", - "cencus", "census", - "censur", "censor", - "cheifs", "chiefs", - "circut", "circuit", - "clasic", "classic", - "coform", "conform", - "comany", "company", - "coucil", "council", - "densly", "densely", - "deside", "decide", - "devels", "delves", - "devide", "divide", - "dieing", "dying", - "divice", "device", - "doulbe", "double", - "dreasm", "dreams", - "duting", "during", - "ealier", "earlier", - "eearly", "early", - "efford", "effort", - "emited", "emitted", - "emnity", "enmity", - "enduce", "induce", - "enlish", "english", - "erally", "orally", - "eratic", "erratic", - "ethose", "those", - "exampt", "exempt", - "excact", "exact", - "excell", "excel", - "exerpt", "excerpt", - "exinct", "extinct", - "expell", "expel", - "expoch", "epoch", - "extint", "extinct", - "facist", "fascist", - "faught", "fought", - "finaly", "finally", - "forsaw", "foresaw", - "fougth", "fought", - "fourty", "forty", - "foward", "forward", - "freind", "friend", - "fromed", "formed", - "fufill", "fulfill", - "futher", "further", - "gardai", "gardaí", - "ghandi", "gandhi", - "glight", "flight", - "gloabl", "global", - "godess", "goddess", - "guilia", "giulia", - "guilio", "giulio", - "habeus", "habeas", - "harras", "harass", - "hatian", "haitian", - "heared", "heard", - "hertzs", "hertz", - "hieght", "height", - "higest", "highest", - "higway", "highway", - "honory", "honorary", - "howver", "however", - "hstory", "history", - "hunman", "human", - "husban", "husband", - "hvaing", "having", - "illess", "illness", - "ilness", "illness", - "imagin", "imagine", - "imense", "immense", - "includ", "include", - "inital", "initial", - "interm", "interim", - "intial", "initial", - "iunior", "junior", - "jaques", "jacques", - "jospeh", "joseph", - "jouney", "journey", - "klenex", "kleenex", - "labled", "labelled", - "largst", "largest", - "larrry", "larry", - "lefted", "left", - "lenght", "length", - "lerans", "learns", - "liason", "liaison", - "libary", "library", - "lieing", "lying", - "lieved", "lived", - "littel", "little", - "livley", "lively", - "lonley", "lonely", - "mailny", "mainly", - "markes", "marks", - "mileau", "milieu", - "milion", "million", - "millon", "million", - "misile", "missile", - "missen", "mizzen", - "missle", "missile", - "mkaing", "making", - "moderm", "modem", - "moreso", "more", - "mounth", "month", - "myraid", "myriad", - "naieve", "naive", - "nestin", "nesting", - "nineth", "ninth", - "noveau", "nouveau", - "occour", "occur", - "occurr", "occur", - "offred", "offered", - "omited", "omitted", - "ouevre", "oeuvre", - "oxigen", "oxygen", - "p0enis", "penis", - "packge", "package", - "peaple", "people", - "pensle", "pencil", - "peopel", "people", - "peotry", "poetry", - "perade", "parade", - "persan", "person", - "persue", "pursue", - "plateu", "plateau", - "poenis", "penis", - "poisin", "poison", - "polute", "pollute", - "posess", "possess", - "posion", "poison", - "prairy", "prairie", - "prarie", "prairie", - "preiod", "period", - "privte", "private", - "proces", "process", - "proove", "prove", - "psuedo", "pseudo", - "psyhic", "psychic", - "pucini", "puccini", - "pumkin", "pumpkin", - "puting", "putting", - "pyscic", "psychic", - "quizes", "quizzes", - "quuery", "query", - "racaus", "raucous", - "radify", "ratify", - "raelly", "really", - "reacll", "recall", - "realyl", "really", - "reched", "reached", - "recide", "reside", - "recrod", "record", - "refect", "reflect", - "relaly", "really", - "renewl", "renewal", - "retuns", "returns", - "reveiw", "review", - "rhymme", "rhyme", - "rigeur", "rigueur", - "rocord", "record", - "rougly", "roughly", - "runing", "running", - "rythem", "rhythm", - "rythim", "rhythm", - "saftey", "safety", - "salery", "salary", - "satisy", "satisfy", - "satric", "satiric", - "saught", "sought", - "scince", "science", - "scirpt", "script", - "seceed", "succeed", - "seinor", "senior", - "sepina", "subpoena", - "sevice", "service", - "shamen", "shaman", - "sheild", "shield", - "shiped", "shipped", - "shorly", "shortly", - "shoudl", "should", - "shreak", "shriek", - "siezed", "seized", - "sixtin", "sistine", - "sneeks", "sneaks", - "somene", "someone", - "soudns", "sounds", - "sourth", "south", - "speach", "speech", - "spects", "aspects", - "spoace", "space", - "sqaure", "square", - "staion", "station", - "stange", "strange", - "stilus", "stylus", - "stirrs", "stirs", - "stopry", "story", - "strnad", "strand", - "studdy", "study", - "suceed", "succeed", - "sucess", "success", - "sucide", "suicide", - "sumary", "summary", - "suport", "support", - "supose", "suppose", - "surfce", "surface", - "surley", "surly", - "swaers", "swears", - "swepth", "swept", - "talekd", "talked", - "theese", "these", - "therby", "thereby", - "thigns", "things", - "thigsn", "things", - "thikns", "thinks", - "thiunk", "think", - "thnigs", "things", - "threee", "three", - "tkaing", "taking", - "tounge", "tongue", - "tourch", "torch", - "towrad", "toward", - "trafic", "traffic", - "troups", "troupes", - "truely", "truly", - "twelth", "twelfth", - "tyrany", "tyranny", - "unabel", "unable", - "unkown", "unknown", - "untill", "until", - "usally", "usually", - "useage", "usage", - "useing", "using", - "usualy", "usually", - "vaccum", "vacuum", - "variey", "variety", - "varing", "varying", - "varity", "variety", - "vasall", "vassal", - "vigeur", "vigueur", - "villin", "villain", - "vreity", "variety", - "vriety", "variety", - "whants", "wants", - "wheras", "whereas", - "wheter", "whether", - "wholey", "wholly", - "whther", "whether", - "wnated", "wanted", - "writen", "written", - "yaerly", "yearly", - "yotube", "youtube", - "zeebra", "zebra", - "abotu", "about", - "adres", "address", - "afair", "affair", - "agian", "again", - "agina", "again", - "agred", "agreed", - "alege", "allege", - "alsot", "also", - "altho", "although", - "amung", "among", - "anual", "annual", - "aroud", "around", - "arund", "around", - "asign", "assign", - "assit", "assist", - "asume", "assume", - "atain", "attain", - "autor", "author", - "baout", "about", - "blaim", "blame", - "boaut", "bout", - "boook", "book", - "borke", "broke", - "breif", "brief", - "caost", "coast", - "casue", "cause", - "chasr", "chaser", - "cheif", "chief", - "chuch", "church", - "claer", "clear", - "clera", "clear", - "coudl", "could", - "crowm", "crown", - "deram", "dram", - "diety", "deity", - "doens", "does", - "doign", "doing", - "donig", "doing", - "drnik", "drink", - "durig", "during", - "earnt", "earned", - "eigth", "eighth", - "eiter", "either", - "emtpy", "empty", - "endig", "ending", - "eveyr", "every", - "exept", "except", - "eyars", "years", - "eyasr", "years", - "fiels", "fields", - "firts", "flirts", - "fleed", "fled", - "fomed", "formed", - "foucs", "focus", - "foudn", "found", - "fouth", "fourth", - "frome", "from", - "ganes", "games", - "gaurd", "guard", - "gerat", "great", - "gogin", "going", - "goign", "going", - "gonig", "going", - "graet", "great", - "greif", "grief", - "gropu", "group", - "guage", "gauge", - "hapen", "happen", - "herad", "heard", - "heroe", "hero", - "higer", "higher", - "housr", "hours", - "htere", "there", - "htikn", "think", - "hting", "thing", - "htink", "think", - "hwihc", "which", - "hwile", "while", - "hwole", "whole", - "idaes", "ideas", - "idesa", "ideas", - "ihaca", "ithaca", - "knwos", "knows", - "konws", "knows", - "lastr", "last", - "lavae", "larvae", - "layed", "laid", - "leage", "league", - "leanr", "lean", - "leran", "learn", - "levle", "level", - "lible", "libel", - "liekd", "liked", - "liuke", "like", - "lmits", "limits", - "lonly", "lonely", - "lukid", "likud", - "lybia", "libya", - "maked", "marked", - "makse", "makes", - "mamal", "mammal", - "mileu", "milieu", - "mkaes", "makes", - "modle", "model", - "moent", "moment", - "moeny", "money", - "monts", "months", - "movei", "movie", - "muder", "murder", - "mysef", "myself", - "neice", "niece", - "ninty", "ninety", - "ocurr", "occur", - "oging", "going", - "opose", "oppose", - "orded", "ordered", - "orgin", "origin", - "otehr", "other", - "ouput", "output", - "owudl", "would", - "paide", "paid", - "palce", "place", - "pased", "passed", - "payed", "paid", - "peice", "piece", - "peoms", "poems", - "poety", "poetry", - "pwoer", "power", - "qtuie", "quite", - "qutie", "quite", - "realy", "really", - "repid", "rapid", - "rised", "raised", - "rulle", "rule", - "rwite", "write", - "rythm", "rhythm", - "safty", "safety", - "scoll", "scroll", - "seach", "search", - "seige", "siege", - "seing", "seeing", - "sence", "sense", - "sicne", "since", - "sieze", "seize", - "sinse", "sines", - "slowy", "slowly", - "snese", "sneeze", - "soley", "solely", - "sotry", "story", - "sotyr", "satyr", - "soudn", "sound", - "sould", "could", - "spred", "spread", - "stlye", "style", - "stong", "strong", - "stoyr", "story", - "strat", "start", - "stroy", "story", - "suppy", "supply", - "swaer", "swear", - "syrap", "syrup", - "sytem", "system", - "sytle", "style", - "tatoo", "tattoo", - "thast", "that", - "theif", "thief", - "theri", "their", - "thgat", "that", - "thier", "their", - "thign", "thing", - "thikn", "think", - "thnig", "thing", - "thrid", "third", - "thsoe", "those", - "thyat", "that", - "tihkn", "think", - "timne", "time", - "tiome", "time", - "tkaes", "takes", - "todya", "today", - "tyhat", "that", - "unsed", "used", - "weild", "wield", - "whant", "want", - "whcih", "which", - "whihc", "which", - "whith", "with", - "whlch", "which", - "wholy", "wholly", - "wierd", "weird", - "wille", "will", - "willk", "will", - "withh", "with", - "witht", "with", - "wiull", "will", - "wnats", "wants", - "wohle", "whole", - "worls", "world", - "woudl", "would", - "wriet", "write", - "wroet", "wrote", - "yaers", "years", - "yatch", "yacht", - "yearm", "year", - "yeasr", "years", - "yeild", "yield", - "yeras", "years", - "yersa", "years", - "agin", "again", - "agre", "agree", - "ahev", "have", - "ahve", "have", - "alse", "else", - "amke", "make", - "anbd", "and", - "andd", "and", - "apon", "upon", - "aslo", "also", - "awya", "away", - "bakc", "back", - "bcak", "back", - "clas", "class", - "cpoy", "coy", - "cxan", "cyan", - "daed", "dead", - "dael", "deal", - "diea", "idea", - "doub", "doubt", - "dyas", "dryas", - "eahc", "each", - "efel", "evil", - "eles", "eels", - "ened", "need", - "enxt", "next", - "esle", "else", - "eyar", "year", - "fatc", "fact", - "fidn", "find", - "fomr", "from", - "grwo", "grow", - "haev", "have", - "halp", "help", - "holf", "hold", - "hten", "then", - "htey", "they", - "htis", "this", - "hvae", "have", - "hvea", "have", - "inot", "into", - "iwll", "will", - "iwth", "with", - "jstu", "just", - "jsut", "just", - "knwo", "know", - "konw", "know", - "kwno", "know", - "liek", "like", - "loev", "love", - "lveo", "love", - "lvoe", "love", - "mkae", "make", - "mkea", "make", - "mroe", "more", - "nkow", "know", - "nkwo", "know", - "nmae", "name", - "noth", "north", - "nowe", "now", - "omre", "more", - "onot", "note", - "onyl", "only", - "owrk", "work", - "peom", "poem", - "pich", "pitch", - "rela", "real", - "sasy", "says", - "smae", "same", - "smoe", "some", - "soem", "some", - "sohw", "show", - "stpo", "stop", - "suop", "soup", - "syas", "says", - "tahn", "than", - "taht", "that", - "tast", "taste", - "tath", "that", - "tehy", "they", - "tghe", "the", - "ther", "there", - "thge", "the", - "thna", "than", - "thne", "then", - "thsi", "this", - "thta", "that", - "tiem", "time", - "tihs", "this", - "tjhe", "the", - "tkae", "take", - "tood", "todo", - "tust", "trust", - "twon", "town", - "twpo", "two", - "tyhe", "they", - "uise", "use", - "vell", "well", - "veyr", "very", - "vrey", "very", - "vyer", "very", - "vyre", "very", - "waht", "what", - "wass", "was", - "watn", "want", - "weas", "was", - "wehn", "when", - "whic", "which", - "whta", "what", - "wich", "which", - "wief", "wife", - "wiew", "view", - "wiht", "with", - "witn", "with", - "wnat", "want", - "wokr", "work", - "wrok", "work", - "wtih", "with", - "yaer", "year", - "yera", "year", - "yrea", "year", - "ytou", "you", - "adn", "and", - "ect", "etc", - "nto", "not", - "teh", "the", - "thn", "then", - "tje", "the", - "whn", "when", - "wih", "with", - "yuo", "you", -} - -// DictAmerican converts UK spellings to US spellings -var DictAmerican = []string{ - "institutionalisation", "institutionalization", - "internationalisation", "internationalization", - "professionalisation", "professionalization", - "compartmentalising", "compartmentalizing", - "institutionalising", "institutionalizing", - "internationalising", "internationalizing", - "compartmentalised", "compartmentalized", - "compartmentalises", "compartmentalizes", - "decriminalisation", "decriminalization", - "denationalisation", "denationalization", - "fictionalisations", "fictionalizations", - "institutionalised", "institutionalized", - "institutionalises", "institutionalizes", - "intellectualising", "intellectualizing", - "internationalised", "internationalized", - "internationalises", "internationalizes", - "pedestrianisation", "pedestrianization", - "professionalising", "professionalizing", - "archaeologically", "archeologically", - "compartmentalise", "compartmentalize", - "decentralisation", "decentralization", - "demilitarisation", "demilitarization", - "externalisations", "externalizations", - "fictionalisation", "fictionalization", - "institutionalise", "institutionalize", - "intellectualised", "intellectualized", - "intellectualises", "intellectualizes", - "internationalise", "internationalize", - "nationalisations", "nationalizations", - "palaeontologists", "paleontologists", - "professionalised", "professionalized", - "professionalises", "professionalizes", - "rationalisations", "rationalizations", - "sensationalising", "sensationalizing", - "sentimentalising", "sentimentalizing", - "acclimatisation", "acclimatization", - "bougainvillaeas", "bougainvilleas", - "commercialising", "commercializing", - "conceptualising", "conceptualizing", - "contextualising", "contextualizing", - "crystallisation", "crystallization", - "decriminalising", "decriminalizing", - "democratisation", "democratization", - "denationalising", "denationalizing", - "depersonalising", "depersonalizing", - "desensitisation", "desensitization", - "destabilisation", "destabilization", - "disorganisation", "disorganization", - "extemporisation", "extemporization", - "externalisation", "externalization", - "familiarisation", "familiarization", - "generalisations", "generalizations", - "hospitalisation", "hospitalization", - "individualising", "individualizing", - "industrialising", "industrializing", - "intellectualise", "intellectualize", - "internalisation", "internalization", - "manoeuvrability", "maneuverability", - "marginalisation", "marginalization", - "materialisation", "materialization", - "miniaturisation", "miniaturization", - "nationalisation", "nationalization", - "neighbourliness", "neighborliness", - "overemphasising", "overemphasizing", - "palaeontologist", "paleontologist", - "particularising", "particularizing", - "pedestrianising", "pedestrianizing", - "professionalise", "professionalize", - "psychoanalysing", "psychoanalyzing", - "rationalisation", "rationalization", - "reorganisations", "reorganizations", - "revolutionising", "revolutionizing", - "sensationalised", "sensationalized", - "sensationalises", "sensationalizes", - "sentimentalised", "sentimentalized", - "sentimentalises", "sentimentalizes", - "specialisations", "specializations", - "standardisation", "standardization", - "synchronisation", "synchronization", - "systematisation", "systematization", - "aggrandisement", "aggrandizement", - "anaesthetising", "anesthetizing", - "archaeological", "archeological", - "archaeologists", "archeologists", - "bougainvillaea", "bougainvillea", - "characterising", "characterizing", - "collectivising", "collectivizing", - "commercialised", "commercialized", - "commercialises", "commercializes", - "conceptualised", "conceptualized", - "conceptualises", "conceptualizes", - "contextualised", "contextualized", - "contextualises", "contextualizes", - "decentralising", "decentralizing", - "decriminalised", "decriminalized", - "decriminalises", "decriminalizes", - "dehumanisation", "dehumanization", - "demilitarising", "demilitarizing", - "demobilisation", "demobilization", - "demoralisation", "demoralization", - "denationalised", "denationalized", - "denationalises", "denationalizes", - "depersonalised", "depersonalized", - "depersonalises", "depersonalizes", - "disembowelling", "disemboweling", - "dramatisations", "dramatizations", - "editorialising", "editorializing", - "encyclopaedias", "encyclopedias", - "fictionalising", "fictionalizing", - "fraternisation", "fraternization", - "generalisation", "generalization", - "gynaecological", "gynecological", - "gynaecologists", "gynecologists", - "haematological", "hematological", - "haematologists", "hematologists", - "immobilisation", "immobilization", - "individualised", "individualized", - "individualises", "individualizes", - "industrialised", "industrialized", - "industrialises", "industrializes", - "liberalisation", "liberalization", - "monopolisation", "monopolization", - "naturalisation", "naturalization", - "neighbourhoods", "neighborhoods", - "neutralisation", "neutralization", - "organisational", "organizational", - "outmanoeuvring", "outmaneuvering", - "overemphasised", "overemphasized", - "overemphasises", "overemphasizes", - "paediatricians", "pediatricians", - "particularised", "particularized", - "particularises", "particularizes", - "pasteurisation", "pasteurization", - "pedestrianised", "pedestrianized", - "pedestrianises", "pedestrianizes", - "philosophising", "philosophizing", - "politicisation", "politicization", - "popularisation", "popularization", - "pressurisation", "pressurization", - "prioritisation", "prioritization", - "privatisations", "privatizations", - "propagandising", "propagandizing", - "psychoanalysed", "psychoanalyzed", - "psychoanalyses", "psychoanalyzes", - "regularisation", "regularization", - "reorganisation", "reorganization", - "revolutionised", "revolutionized", - "revolutionises", "revolutionizes", - "secularisation", "secularization", - "sensationalise", "sensationalize", - "sentimentalise", "sentimentalize", - "serialisations", "serializations", - "specialisation", "specialization", - "sterilisations", "sterilizations", - "stigmatisation", "stigmatization", - "transistorised", "transistorized", - "unrecognisable", "unrecognizable", - "visualisations", "visualizations", - "westernisation", "westernization", - "accessorising", "accessorizing", - "acclimatising", "acclimatizing", - "amortisations", "amortizations", - "amphitheatres", "amphitheaters", - "anaesthetised", "anesthetized", - "anaesthetises", "anesthetizes", - "anaesthetists", "anesthetists", - "archaeologist", "archeologist", - "backpedalling", "backpedaling", - "behaviourists", "behaviorists", - "breathalysers", "breathalyzers", - "breathalysing", "breathalyzing", - "callisthenics", "calisthenics", - "cannibalising", "cannibalizing", - "characterised", "characterized", - "characterises", "characterizes", - "circularising", "circularizing", - "clarinettists", "clarinetists", - "collectivised", "collectivized", - "collectivises", "collectivizes", - "commercialise", "commercialize", - "computerising", "computerizing", - "conceptualise", "conceptualize", - "contextualise", "contextualize", - "criminalising", "criminalizing", - "crystallising", "crystallizing", - "decentralised", "decentralized", - "decentralises", "decentralizes", - "decriminalise", "decriminalize", - "demilitarised", "demilitarized", - "demilitarises", "demilitarizes", - "democratising", "democratizing", - "denationalise", "denationalize", - "depersonalise", "depersonalize", - "desensitising", "desensitizing", - "destabilising", "destabilizing", - "disembowelled", "disemboweled", - "dishonourable", "dishonorable", - "dishonourably", "dishonorably", - "dramatisation", "dramatization", - "editorialised", "editorialized", - "editorialises", "editorializes", - "encyclopaedia", "encyclopedia", - "encyclopaedic", "encyclopedic", - "extemporising", "extemporizing", - "externalising", "externalizing", - "familiarising", "familiarizing", - "fertilisation", "fertilization", - "fictionalised", "fictionalized", - "fictionalises", "fictionalizes", - "formalisation", "formalization", - "fossilisation", "fossilization", - "globalisation", "globalization", - "gynaecologist", "gynecologist", - "haematologist", "hematologist", - "haemophiliacs", "hemophiliacs", - "haemorrhaging", "hemorrhaging", - "harmonisation", "harmonization", - "hospitalising", "hospitalizing", - "hypothesising", "hypothesizing", - "immortalising", "immortalizing", - "individualise", "individualize", - "industrialise", "industrialize", - "internalising", "internalizing", - "marginalising", "marginalizing", - "materialising", "materializing", - "mechanisation", "mechanization", - "memorialising", "memorializing", - "miniaturising", "miniaturizing", - "miscatalogued", "miscataloged", - "misdemeanours", "misdemeanors", - "multicoloured", "multicolored", - "nationalising", "nationalizing", - "neighbourhood", "neighborhood", - "normalisation", "normalization", - "organisations", "organizations", - "outmanoeuvred", "outmaneuvered", - "outmanoeuvres", "outmaneuvers", - "overemphasise", "overemphasize", - "paediatrician", "pediatrician", - "palaeontology", "paleontology", - "particularise", "particularize", - "passivisation", "passivization", - "patronisingly", "patronizingly", - "pedestrianise", "pedestrianize", - "personalising", "personalizing", - "philosophised", "philosophized", - "philosophises", "philosophizes", - "privatisation", "privatization", - "propagandised", "propagandized", - "propagandises", "propagandizes", - "proselytisers", "proselytizers", - "proselytising", "proselytizing", - "psychoanalyse", "psychoanalyze", - "pulverisation", "pulverization", - "rationalising", "rationalizing", - "reconnoitring", "reconnoitering", - "revolutionise", "revolutionize", - "romanticising", "romanticizing", - "serialisation", "serialization", - "socialisation", "socialization", - "stabilisation", "stabilization", - "standardising", "standardizing", - "sterilisation", "sterilization", - "subsidisation", "subsidization", - "synchronising", "synchronizing", - "systematising", "systematizing", - "tantalisingly", "tantalizingly", - "underutilised", "underutilized", - "victimisation", "victimization", - "visualisation", "visualization", - "vocalisations", "vocalizations", - "vulgarisation", "vulgarization", - "accessorised", "accessorized", - "accessorises", "accessorizes", - "acclimatised", "acclimatized", - "acclimatises", "acclimatizes", - "amortisation", "amortization", - "amphitheatre", "amphitheater", - "anaesthetics", "anesthetics", - "anaesthetise", "anesthetize", - "anaesthetist", "anesthetist", - "antagonising", "antagonizing", - "appetisingly", "appetizingly", - "backpedalled", "backpedaled", - "bastardising", "bastardizing", - "behaviourism", "behaviorism", - "behaviourist", "behaviorist", - "bowdlerising", "bowdlerizing", - "breathalysed", "breathalyzed", - "breathalyser", "breathalyzer", - "breathalyses", "breathalyzes", - "cannibalised", "cannibalized", - "cannibalises", "cannibalizes", - "capitalising", "capitalizing", - "caramelising", "caramelizing", - "categorising", "categorizing", - "centigrammes", "centigrams", - "centralising", "centralizing", - "centrepieces", "centerpieces", - "characterise", "characterize", - "circularised", "circularized", - "circularises", "circularizes", - "clarinettist", "clarinetist", - "collectivise", "collectivize", - "colonisation", "colonization", - "computerised", "computerized", - "computerises", "computerizes", - "criminalised", "criminalized", - "criminalises", "criminalizes", - "crystallised", "crystallized", - "crystallises", "crystallizes", - "decentralise", "decentralize", - "dehumanising", "dehumanizing", - "demilitarise", "demilitarize", - "demobilising", "demobilizing", - "democratised", "democratized", - "democratises", "democratizes", - "demoralising", "demoralizing", - "desensitised", "desensitized", - "desensitises", "desensitizes", - "destabilised", "destabilized", - "destabilises", "destabilizes", - "discolouring", "discoloring", - "dishonouring", "dishonoring", - "disorganised", "disorganized", - "editorialise", "editorialize", - "endeavouring", "endeavoring", - "equalisation", "equalization", - "evangelising", "evangelizing", - "extemporised", "extemporized", - "extemporises", "extemporizes", - "externalised", "externalized", - "externalises", "externalizes", - "familiarised", "familiarized", - "familiarises", "familiarizes", - "fictionalise", "fictionalize", - "finalisation", "finalization", - "fraternising", "fraternizing", - "generalising", "generalizing", - "haemophiliac", "hemophiliac", - "haemorrhaged", "hemorrhaged", - "haemorrhages", "hemorrhages", - "haemorrhoids", "hemorrhoids", - "homoeopathic", "homeopathic", - "homogenising", "homogenizing", - "hospitalised", "hospitalized", - "hospitalises", "hospitalizes", - "hypothesised", "hypothesized", - "hypothesises", "hypothesizes", - "idealisation", "idealization", - "immobilisers", "immobilizers", - "immobilising", "immobilizing", - "immortalised", "immortalized", - "immortalises", "immortalizes", - "immunisation", "immunization", - "initialising", "initializing", - "internalised", "internalized", - "internalises", "internalizes", - "jeopardising", "jeopardizing", - "legalisation", "legalization", - "legitimising", "legitimizing", - "liberalising", "liberalizing", - "manoeuvrable", "maneuverable", - "manoeuvrings", "maneuverings", - "marginalised", "marginalized", - "marginalises", "marginalizes", - "marvellously", "marvelously", - "materialised", "materialized", - "materialises", "materializes", - "maximisation", "maximization", - "memorialised", "memorialized", - "memorialises", "memorializes", - "metabolising", "metabolizing", - "militarising", "militarizing", - "milligrammes", "milligrams", - "miniaturised", "miniaturized", - "miniaturises", "miniaturizes", - "misbehaviour", "misbehavior", - "misdemeanour", "misdemeanor", - "mobilisation", "mobilization", - "moisturisers", "moisturizers", - "moisturising", "moisturizing", - "monopolising", "monopolizing", - "moustachioed", "mustachioed", - "nationalised", "nationalized", - "nationalises", "nationalizes", - "naturalising", "naturalizing", - "neighbouring", "neighboring", - "neutralising", "neutralizing", - "oesophaguses", "esophaguses", - "organisation", "organization", - "orthopaedics", "orthopedics", - "outmanoeuvre", "outmaneuver", - "palaeolithic", "paleolithic", - "pasteurising", "pasteurizing", - "personalised", "personalized", - "personalises", "personalizes", - "philosophise", "philosophize", - "plagiarising", "plagiarizing", - "ploughshares", "plowshares", - "polarisation", "polarization", - "politicising", "politicizing", - "popularising", "popularizing", - "pressurising", "pressurizing", - "prioritising", "prioritizing", - "propagandise", "propagandize", - "proselytised", "proselytized", - "proselytiser", "proselytizer", - "proselytises", "proselytizes", - "radicalising", "radicalizing", - "rationalised", "rationalized", - "rationalises", "rationalizes", - "realisations", "realizations", - "recognisable", "recognizable", - "recognisably", "recognizably", - "recognisance", "recognizance", - "reconnoitred", "reconnoitered", - "reconnoitres", "reconnoiters", - "regularising", "regularizing", - "reorganising", "reorganizing", - "revitalising", "revitalizing", - "rhapsodising", "rhapsodizing", - "romanticised", "romanticized", - "romanticises", "romanticizes", - "scandalising", "scandalizing", - "scrutinising", "scrutinizing", - "secularising", "secularizing", - "specialising", "specializing", - "squirrelling", "squirreling", - "standardised", "standardized", - "standardises", "standardizes", - "stigmatising", "stigmatizing", - "sympathisers", "sympathizers", - "sympathising", "sympathizing", - "synchronised", "synchronized", - "synchronises", "synchronizes", - "synthesisers", "synthesizers", - "synthesising", "synthesizing", - "systematised", "systematized", - "systematises", "systematizes", - "technicolour", "technicolor", - "theatregoers", "theatergoers", - "traumatising", "traumatizing", - "trivialising", "trivializing", - "unauthorised", "unauthorized", - "uncatalogued", "uncataloged", - "unfavourable", "unfavorable", - "unfavourably", "unfavorably", - "unionisation", "unionization", - "unrecognised", "unrecognized", - "untrammelled", "untrammeled", - "urbanisation", "urbanization", - "vaporisation", "vaporization", - "vocalisation", "vocalization", - "watercolours", "watercolors", - "westernising", "westernizing", - "accessorise", "accessorize", - "acclimatise", "acclimatize", - "agonisingly", "agonizingly", - "amortisable", "amortizable", - "anaesthesia", "anesthesia", - "anaesthetic", "anesthetic", - "anglicising", "anglicizing", - "antagonised", "antagonized", - "antagonises", "antagonizes", - "apologising", "apologizing", - "archaeology", "archeology", - "authorising", "authorizing", - "bastardised", "bastardized", - "bastardises", "bastardizes", - "bedevilling", "bedeviling", - "behavioural", "behavioral", - "belabouring", "belaboring", - "bowdlerised", "bowdlerized", - "bowdlerises", "bowdlerizes", - "breathalyse", "breathalyze", - "brutalising", "brutalizing", - "cannibalise", "cannibalize", - "capitalised", "capitalized", - "capitalises", "capitalizes", - "caramelised", "caramelized", - "caramelises", "caramelizes", - "carbonising", "carbonizing", - "cataloguing", "cataloging", - "categorised", "categorized", - "categorises", "categorizes", - "cauterising", "cauterizing", - "centigramme", "centigram", - "centilitres", "centiliters", - "centimetres", "centimeters", - "centralised", "centralized", - "centralises", "centralizes", - "centrefolds", "centerfolds", - "centrepiece", "centerpiece", - "channelling", "channeling", - "chequebooks", "checkbooks", - "circularise", "circularize", - "colourfully", "colorfully", - "colourizing", "colorizing", - "computerise", "computerize", - "councillors", "councilors", - "counselling", "counseling", - "counsellors", "counselors", - "criminalise", "criminalize", - "criticising", "criticizing", - "crystallise", "crystallize", - "customising", "customizing", - "defenceless", "defenseless", - "dehumanised", "dehumanized", - "dehumanises", "dehumanizes", - "demobilised", "demobilized", - "demobilises", "demobilizes", - "democratise", "democratize", - "demoralised", "demoralized", - "demoralises", "demoralizes", - "deodorising", "deodorizing", - "desensitise", "desensitize", - "destabilise", "destabilize", - "discoloured", "discolored", - "dishevelled", "disheveled", - "dishonoured", "dishonored", - "dramatising", "dramatizing", - "economising", "economizing", - "empathising", "empathizing", - "emphasising", "emphasizing", - "endeavoured", "endeavored", - "epitomising", "epitomizing", - "evangelised", "evangelized", - "evangelises", "evangelizes", - "extemporise", "extemporize", - "externalise", "externalize", - "factorising", "factorizing", - "familiarise", "familiarize", - "fantasising", "fantasizing", - "favouritism", "favoritism", - "fertilisers", "fertilizers", - "fertilising", "fertilizing", - "flavourings", "flavorings", - "flavourless", "flavorless", - "flavoursome", "flavorsome", - "formalising", "formalizing", - "fossilising", "fossilizing", - "fraternised", "fraternized", - "fraternises", "fraternizes", - "galvanising", "galvanizing", - "generalised", "generalized", - "generalises", "generalizes", - "ghettoising", "ghettoizing", - "globalising", "globalizing", - "gruellingly", "gruelingly", - "gynaecology", "gynecology", - "haematology", "hematology", - "haemoglobin", "hemoglobin", - "haemophilia", "hemophilia", - "haemorrhage", "hemorrhage", - "harmonising", "harmonizing", - "homoeopaths", "homeopaths", - "homoeopathy", "homeopathy", - "homogenised", "homogenized", - "homogenises", "homogenizes", - "hospitalise", "hospitalize", - "hybridising", "hybridizing", - "hypnotising", "hypnotizing", - "hypothesise", "hypothesize", - "immobilised", "immobilized", - "immobiliser", "immobilizer", - "immobilises", "immobilizes", - "immortalise", "immortalize", - "impanelling", "impaneling", - "imperilling", "imperiling", - "initialised", "initialized", - "initialises", "initializes", - "initialling", "initialing", - "instalments", "installments", - "internalise", "internalize", - "italicising", "italicizing", - "jeopardised", "jeopardized", - "jeopardises", "jeopardizes", - "kilogrammes", "kilograms", - "legitimised", "legitimized", - "legitimises", "legitimizes", - "liberalised", "liberalized", - "liberalises", "liberalizes", - "lionisation", "lionization", - "liquidisers", "liquidizers", - "liquidising", "liquidizing", - "magnetising", "magnetizing", - "manoeuvring", "maneuvering", - "marginalise", "marginalize", - "marshalling", "marshaling", - "materialise", "materialize", - "mechanising", "mechanizing", - "memorialise", "memorialize", - "mesmerising", "mesmerizing", - "metabolised", "metabolized", - "metabolises", "metabolizes", - "micrometres", "micrometers", - "militarised", "militarized", - "militarises", "militarizes", - "milligramme", "milligram", - "millilitres", "milliliters", - "millimetres", "millimeters", - "miniaturise", "miniaturize", - "modernising", "modernizing", - "moisturised", "moisturized", - "moisturiser", "moisturizer", - "moisturises", "moisturizes", - "monopolised", "monopolized", - "monopolises", "monopolizes", - "nationalise", "nationalize", - "naturalised", "naturalized", - "naturalises", "naturalizes", - "neighbourly", "neighborly", - "neutralised", "neutralized", - "neutralises", "neutralizes", - "normalising", "normalizing", - "orthopaedic", "orthopedic", - "ostracising", "ostracizing", - "oxidisation", "oxidization", - "paediatrics", "pediatrics", - "paedophiles", "pedophiles", - "paedophilia", "pedophilia", - "passivising", "passivizing", - "pasteurised", "pasteurized", - "pasteurises", "pasteurizes", - "patronising", "patronizing", - "personalise", "personalize", - "plagiarised", "plagiarized", - "plagiarises", "plagiarizes", - "ploughshare", "plowshare", - "politicised", "politicized", - "politicises", "politicizes", - "popularised", "popularized", - "popularises", "popularizes", - "praesidiums", "presidiums", - "pressurised", "pressurized", - "pressurises", "pressurizes", - "prioritised", "prioritized", - "prioritises", "prioritizes", - "privatising", "privatizing", - "proselytise", "proselytize", - "publicising", "publicizing", - "pulverising", "pulverizing", - "quarrelling", "quarreling", - "radicalised", "radicalized", - "radicalises", "radicalizes", - "randomising", "randomizing", - "rationalise", "rationalize", - "realisation", "realization", - "recognising", "recognizing", - "reconnoitre", "reconnoiter", - "regularised", "regularized", - "regularises", "regularizes", - "remodelling", "remodeling", - "reorganised", "reorganized", - "reorganises", "reorganizes", - "revitalised", "revitalized", - "revitalises", "revitalizes", - "rhapsodised", "rhapsodized", - "rhapsodises", "rhapsodizes", - "romanticise", "romanticize", - "scandalised", "scandalized", - "scandalises", "scandalizes", - "sceptically", "skeptically", - "scrutinised", "scrutinized", - "scrutinises", "scrutinizes", - "secularised", "secularized", - "secularises", "secularizes", - "sensitising", "sensitizing", - "serialising", "serializing", - "sermonising", "sermonizing", - "shrivelling", "shriveling", - "signalising", "signalizing", - "snorkelling", "snorkeling", - "snowploughs", "snowplow", - "socialising", "socializing", - "solemnising", "solemnizing", - "specialised", "specialized", - "specialises", "specializes", - "squirrelled", "squirreled", - "stabilisers", "stabilizers", - "stabilising", "stabilizing", - "standardise", "standardize", - "stencilling", "stenciling", - "sterilisers", "sterilizers", - "sterilising", "sterilizing", - "stigmatised", "stigmatized", - "stigmatises", "stigmatizes", - "subsidisers", "subsidizers", - "subsidising", "subsidizing", - "summarising", "summarizing", - "symbolising", "symbolizing", - "sympathised", "sympathized", - "sympathiser", "sympathizer", - "sympathises", "sympathizes", - "synchronise", "synchronize", - "synthesised", "synthesized", - "synthesiser", "synthesizer", - "synthesises", "synthesizes", - "systematise", "systematize", - "tantalising", "tantalizing", - "temporising", "temporizing", - "tenderising", "tenderizing", - "terrorising", "terrorizing", - "theatregoer", "theatergoer", - "traumatised", "traumatized", - "traumatises", "traumatizes", - "trivialised", "trivialized", - "trivialises", "trivializes", - "tyrannising", "tyrannizing", - "uncivilised", "uncivilized", - "unorganised", "unorganized", - "unravelling", "unraveling", - "utilisation", "utilization", - "vandalising", "vandalizing", - "verbalising", "verbalizing", - "victimising", "victimizing", - "visualising", "visualizing", - "vulgarising", "vulgarizing", - "watercolour", "watercolor", - "westernised", "westernized", - "westernises", "westernizes", - "worshipping", "worshiping", - "aeroplanes", "airplanes", - "amortising", "amortizing", - "anglicised", "anglicized", - "anglicises", "anglicizes", - "annualised", "annualized", - "antagonise", "antagonize", - "apologised", "apologized", - "apologises", "apologizes", - "appetisers", "appetizers", - "appetising", "appetizing", - "authorised", "authorized", - "authorises", "authorizes", - "bannisters", "banisters", - "bastardise", "bastardize", - "bedevilled", "bedeviled", - "behaviours", "behaviors", - "bejewelled", "bejeweled", - "belaboured", "belabored", - "bowdlerise", "bowdlerize", - "brutalised", "brutalized", - "brutalises", "brutalizes", - "canalising", "canalizing", - "cancelling", "canceling", - "canonising", "canonizing", - "capitalise", "capitalize", - "caramelise", "caramelize", - "carbonised", "carbonized", - "carbonises", "carbonizes", - "catalogued", "cataloged", - "catalogues", "catalogs", - "catalysing", "catalyzing", - "categorise", "categorize", - "cauterised", "cauterized", - "cauterises", "cauterizes", - "centilitre", "centiliter", - "centimetre", "centimeter", - "centralise", "centralize", - "centrefold", "centerfold", - "channelled", "channeled", - "chequebook", "checkbook", - "chiselling", "chiseling", - "civilising", "civilizing", - "clamouring", "clamoring", - "colonisers", "colonizers", - "colonising", "colonizing", - "colourants", "colorants", - "colourized", "colorized", - "colourizes", "colorizes", - "colourless", "colorless", - "connexions", "connections", - "councillor", "councilor", - "counselled", "counseled", - "counsellor", "counselor", - "criticised", "criticized", - "criticises", "criticizes", - "cudgelling", "cudgeling", - "customised", "customized", - "customises", "customizes", - "dehumanise", "dehumanize", - "demobilise", "demobilize", - "demonising", "demonizing", - "demoralise", "demoralize", - "deodorised", "deodorized", - "deodorises", "deodorizes", - "deputising", "deputizing", - "digitising", "digitizing", - "discolours", "discolors", - "dishonours", "dishonors", - "dramatised", "dramatized", - "dramatises", "dramatizes", - "drivelling", "driveling", - "economised", "economized", - "economises", "economizes", - "empathised", "empathized", - "empathises", "empathizes", - "emphasised", "emphasized", - "emphasises", "emphasizes", - "enamelling", "enameling", - "endeavours", "endeavors", - "energising", "energizing", - "epaulettes", "epaulets", - "epicentres", "epicenters", - "epitomised", "epitomized", - "epitomises", "epitomizes", - "equalisers", "equalizers", - "equalising", "equalizing", - "eulogising", "eulogizing", - "evangelise", "evangelize", - "factorised", "factorized", - "factorises", "factorizes", - "fantasised", "fantasized", - "fantasises", "fantasizes", - "favourable", "favorable", - "favourably", "favorably", - "favourites", "favorites", - "feminising", "feminizing", - "fertilised", "fertilized", - "fertiliser", "fertilizer", - "fertilises", "fertilizes", - "fibreglass", "fiberglass", - "finalising", "finalizing", - "flavouring", "flavoring", - "formalised", "formalized", - "formalises", "formalizes", - "fossilised", "fossilized", - "fossilises", "fossilizes", - "fraternise", "fraternize", - "fulfilment", "fulfillment", - "funnelling", "funneling", - "galvanised", "galvanized", - "galvanises", "galvanizes", - "gambolling", "gamboling", - "gaolbreaks", "jailbreaks", - "generalise", "generalize", - "ghettoised", "ghettoized", - "ghettoises", "ghettoizes", - "globalised", "globalized", - "globalises", "globalizes", - "gonorrhoea", "gonorrhea", - "grovelling", "groveling", - "harbouring", "harboring", - "harmonised", "harmonized", - "harmonises", "harmonizes", - "homoeopath", "homeopath", - "homogenise", "homogenize", - "honourable", "honorable", - "honourably", "honorably", - "humanising", "humanizing", - "humourless", "humorless", - "hybridised", "hybridized", - "hybridises", "hybridizes", - "hypnotised", "hypnotized", - "hypnotises", "hypnotizes", - "idealising", "idealizing", - "immobilise", "immobilize", - "immunising", "immunizing", - "impanelled", "impaneled", - "imperilled", "imperiled", - "inflexions", "inflections", - "initialise", "initialize", - "initialled", "initialed", - "instalment", "installment", - "ionisation", "ionization", - "italicised", "italicized", - "italicises", "italicizes", - "jeopardise", "jeopardize", - "kilogramme", "kilogram", - "kilometres", "kilometers", - "lacklustre", "lackluster", - "legalising", "legalizing", - "legitimise", "legitimize", - "liberalise", "liberalize", - "liquidised", "liquidized", - "liquidiser", "liquidizer", - "liquidises", "liquidizes", - "localising", "localizing", - "magnetised", "magnetized", - "magnetises", "magnetizes", - "manoeuvred", "maneuvered", - "manoeuvres", "maneuvers", - "marshalled", "marshaled", - "marvelling", "marveling", - "marvellous", "marvelous", - "maximising", "maximizing", - "mechanised", "mechanized", - "mechanises", "mechanizes", - "memorising", "memorizing", - "mesmerised", "mesmerized", - "mesmerises", "mesmerizes", - "metabolise", "metabolize", - "micrometre", "micrometer", - "militarise", "militarize", - "millilitre", "milliliter", - "millimetre", "millimeter", - "minimising", "minimizing", - "mobilising", "mobilizing", - "modernised", "modernized", - "modernises", "modernizes", - "moisturise", "moisturize", - "monopolise", "monopolize", - "moralising", "moralizing", - "mouldering", "moldering", - "moustached", "mustached", - "moustaches", "mustaches", - "naturalise", "naturalize", - "neighbours", "neighbors", - "neutralise", "neutralize", - "normalised", "normalized", - "normalises", "normalizes", - "oesophagus", "esophagus", - "optimising", "optimizing", - "organisers", "organizers", - "organising", "organizing", - "ostracised", "ostracized", - "ostracises", "ostracizes", - "paederasts", "pederasts", - "paediatric", "pediatric", - "paedophile", "pedophile", - "panellists", "panelists", - "paralysing", "paralyzing", - "parcelling", "parceling", - "passivised", "passivized", - "passivises", "passivizes", - "pasteurise", "pasteurize", - "patronised", "patronized", - "patronises", "patronizes", - "penalising", "penalizing", - "pencilling", "penciling", - "plagiarise", "plagiarize", - "polarising", "polarizing", - "politicise", "politicize", - "popularise", "popularize", - "practising", "practicing", - "praesidium", "presidium", - "pressurise", "pressurize", - "prioritise", "prioritize", - "privatised", "privatized", - "privatises", "privatizes", - "programmes", "programs", - "publicised", "publicized", - "publicises", "publicizes", - "pulverised", "pulverized", - "pulverises", "pulverizes", - "pummelling", "pummeled", - "quarrelled", "quarreled", - "radicalise", "radicalize", - "randomised", "randomized", - "randomises", "randomizes", - "realisable", "realizable", - "recognised", "recognized", - "recognises", "recognizes", - "refuelling", "refueling", - "regularise", "regularize", - "remodelled", "remodeled", - "remoulding", "remolding", - "reorganise", "reorganize", - "revitalise", "revitalize", - "rhapsodise", "rhapsodize", - "ritualised", "ritualized", - "sanitising", "sanitizing", - "satirising", "satirizing", - "scandalise", "scandalize", - "scepticism", "skepticism", - "scrutinise", "scrutinize", - "secularise", "secularize", - "sensitised", "sensitized", - "sensitises", "sensitizes", - "sepulchres", "sepulchers", - "serialised", "serialized", - "serialises", "serializes", - "sermonised", "sermonized", - "sermonises", "sermonizes", - "shovelling", "shoveling", - "shrivelled", "shriveled", - "signalised", "signalized", - "signalises", "signalizes", - "signalling", "signaling", - "snivelling", "sniveling", - "snorkelled", "snorkeled", - "snowplough", "snowplow", - "socialised", "socialized", - "socialises", "socializes", - "sodomising", "sodomizing", - "solemnised", "solemnized", - "solemnises", "solemnizes", - "specialise", "specialize", - "spiralling", "spiraling", - "splendours", "splendors", - "stabilised", "stabilized", - "stabiliser", "stabilizer", - "stabilises", "stabilizes", - "stencilled", "stenciled", - "sterilised", "sterilized", - "steriliser", "sterilizer", - "sterilises", "sterilizes", - "stigmatise", "stigmatize", - "subsidised", "subsidized", - "subsidiser", "subsidizer", - "subsidises", "subsidizes", - "succouring", "succoring", - "sulphurous", "sulfurous", - "summarised", "summarized", - "summarises", "summarizes", - "swivelling", "swiveling", - "symbolised", "symbolized", - "symbolises", "symbolizes", - "sympathise", "sympathize", - "synthesise", "synthesize", - "tantalised", "tantalized", - "tantalises", "tantalizes", - "temporised", "temporized", - "temporises", "temporizes", - "tenderised", "tenderized", - "tenderises", "tenderizes", - "terrorised", "terrorized", - "terrorises", "terrorizes", - "theorising", "theorizing", - "traumatise", "traumatize", - "travellers", "travelers", - "travelling", "traveling", - "tricolours", "tricolors", - "trivialise", "trivialize", - "tunnelling", "tunneling", - "tyrannised", "tyrannized", - "tyrannises", "tyrannizes", - "unequalled", "unequaled", - "unionising", "unionizing", - "unravelled", "unraveled", - "unrivalled", "unrivaled", - "urbanising", "urbanizing", - "utilisable", "utilizable", - "vandalised", "vandalized", - "vandalises", "vandalizes", - "vaporising", "vaporizing", - "verbalised", "verbalized", - "verbalises", "verbalizes", - "victimised", "victimized", - "victimises", "victimizes", - "visualised", "visualized", - "visualises", "visualizes", - "vocalising", "vocalizing", - "vulcanised", "vulcanized", - "vulgarised", "vulgarized", - "vulgarises", "vulgarizes", - "weaselling", "weaseling", - "westernise", "westernize", - "womanisers", "womanizers", - "womanising", "womanizing", - "worshipped", "worshiped", - "worshipper", "worshiper", - "aeroplane", "airplane", - "aetiology", "etiology", - "agonising", "agonizing", - "almanacks", "almanacs", - "aluminium", "aluminum", - "amortised", "amortized", - "amortises", "amortizes", - "analogues", "analogs", - "analysing", "analyzing", - "anglicise", "anglicize", - "apologise", "apologize", - "appetiser", "appetizer", - "armourers", "armorers", - "armouries", "armories", - "artefacts", "artifacts", - "authorise", "authorize", - "baptising", "baptizing", - "behaviour", "behavior", - "belabours", "belabors", - "brutalise", "brutalize", - "callipers", "calipers", - "canalised", "canalized", - "canalises", "canalizes", - "cancelled", "canceled", - "canonised", "canonized", - "canonises", "canonizes", - "carbonise", "carbonize", - "carolling", "caroling", - "catalogue", "catalog", - "catalysed", "catalyzed", - "catalyses", "catalyzes", - "cauterise", "cauterize", - "cavilling", "caviling", - "chequered", "checkered", - "chiselled", "chiseled", - "civilised", "civilized", - "civilises", "civilizes", - "clamoured", "clamored", - "colonised", "colonized", - "coloniser", "colonizer", - "colonises", "colonizes", - "colourant", "colorant", - "coloureds", "coloreds", - "colourful", "colorful", - "colouring", "coloring", - "colourize", "colorize", - "connexion", "connection", - "criticise", "criticize", - "cruellest", "cruelest", - "cudgelled", "cudgeled", - "customise", "customize", - "demeanour", "demeanor", - "demonised", "demonized", - "demonises", "demonizes", - "deodorise", "deodorize", - "deputised", "deputized", - "deputises", "deputizes", - "dialogues", "dialogs", - "diarrhoea", "diarrhea", - "digitised", "digitized", - "digitises", "digitizes", - "discolour", "discolor", - "disfavour", "disfavor", - "dishonour", "dishonor", - "dramatise", "dramatize", - "drivelled", "driveled", - "economise", "economize", - "empathise", "empathize", - "emphasise", "emphasize", - "enamelled", "enameled", - "enamoured", "enamored", - "endeavour", "endeavor", - "energised", "energized", - "energises", "energizes", - "epaulette", "epaulet", - "epicentre", "epicenter", - "epitomise", "epitomize", - "equalised", "equalized", - "equaliser", "equalizer", - "equalises", "equalizes", - "eulogised", "eulogized", - "eulogises", "eulogizes", - "factorise", "factorize", - "fantasise", "fantasize", - "favouring", "favoring", - "favourite", "favorite", - "feminised", "feminized", - "feminises", "feminizes", - "fertilise", "fertilize", - "finalised", "finalized", - "finalises", "finalizes", - "flautists", "flutists", - "flavoured", "flavored", - "formalise", "formalize", - "fossilise", "fossilize", - "funnelled", "funneled", - "galvanise", "galvanize", - "gambolled", "gamboled", - "gaolbirds", "jailbirds", - "gaolbreak", "jailbreak", - "ghettoise", "ghettoize", - "globalise", "globalize", - "gravelled", "graveled", - "grovelled", "groveled", - "gruelling", "grueling", - "harboured", "harbored", - "harmonise", "harmonize", - "honouring", "honoring", - "humanised", "humanized", - "humanises", "humanizes", - "humouring", "humoring", - "hybridise", "hybridize", - "hypnotise", "hypnotize", - "idealised", "idealized", - "idealises", "idealizes", - "idolising", "idolizing", - "immunised", "immunized", - "immunises", "immunizes", - "inflexion", "inflection", - "italicise", "italicize", - "itemising", "itemizing", - "jewellers", "jewelers", - "jewellery", "jewelry", - "kilometre", "kilometer", - "labelling", "labeling", - "labourers", "laborers", - "labouring", "laboring", - "legalised", "legalized", - "legalises", "legalizes", - "leukaemia", "leukemia", - "levellers", "levelers", - "levelling", "leveling", - "libelling", "libeling", - "libellous", "libelous", - "licencing", "licensing", - "lionising", "lionizing", - "liquidise", "liquidize", - "localised", "localized", - "localises", "localizes", - "magnetise", "magnetize", - "manoeuvre", "maneuver", - "marvelled", "marveled", - "maximised", "maximized", - "maximises", "maximizes", - "mechanise", "mechanize", - "mediaeval", "medieval", - "memorised", "memorized", - "memorises", "memorizes", - "mesmerise", "mesmerize", - "minimised", "minimized", - "minimises", "minimizes", - "mobilised", "mobilized", - "mobilises", "mobilizes", - "modellers", "modelers", - "modelling", "modeling", - "modernise", "modernize", - "moralised", "moralized", - "moralises", "moralizes", - "motorised", "motorized", - "mouldered", "moldered", - "mouldiest", "moldiest", - "mouldings", "moldings", - "moustache", "mustache", - "neighbour", "neighbor", - "normalise", "normalize", - "odourless", "odorless", - "oestrogen", "estrogen", - "optimised", "optimized", - "optimises", "optimizes", - "organised", "organized", - "organiser", "organizer", - "organises", "organizes", - "ostracise", "ostracize", - "oxidising", "oxidizing", - "paederast", "pederast", - "panelling", "paneling", - "panellist", "panelist", - "paralysed", "paralyzed", - "paralyses", "paralyzes", - "parcelled", "parceled", - "passivise", "passivize", - "patronise", "patronize", - "pedalling", "pedaling", - "penalised", "penalized", - "penalises", "penalizes", - "pencilled", "penciled", - "ploughing", "plowing", - "ploughman", "plowman", - "ploughmen", "plowmen", - "polarised", "polarized", - "polarises", "polarizes", - "practised", "practiced", - "practises", "practices", - "pretences", "pretenses", - "primaeval", "primeval", - "privatise", "privatize", - "programme", "program", - "publicise", "publicize", - "pulverise", "pulverize", - "pummelled", "pummel", - "randomise", "randomize", - "ravelling", "raveling", - "realising", "realizing", - "recognise", "recognize", - "refuelled", "refueled", - "remoulded", "remolded", - "revellers", "revelers", - "revelling", "reveling", - "rivalling", "rivaling", - "saltpetre", "saltpeter", - "sanitised", "sanitized", - "sanitises", "sanitizes", - "satirised", "satirized", - "satirises", "satirizes", - "savouries", "savories", - "savouring", "savoring", - "sceptical", "skeptical", - "sensitise", "sensitize", - "sepulchre", "sepulcher", - "serialise", "serialize", - "sermonise", "sermonize", - "shovelled", "shoveled", - "signalise", "signalize", - "signalled", "signaled", - "snivelled", "sniveled", - "socialise", "socialize", - "sodomised", "sodomized", - "sodomises", "sodomizes", - "solemnise", "solemnize", - "spiralled", "spiraled", - "splendour", "splendor", - "stabilise", "stabilize", - "sterilise", "sterilize", - "subsidise", "subsidize", - "succoured", "succored", - "sulphates", "sulfates", - "sulphides", "sulfides", - "summarise", "summarize", - "swivelled", "swiveled", - "symbolise", "symbolize", - "syphoning", "siphoning", - "tantalise", "tantalize", - "tasselled", "tasseled", - "temporise", "temporize", - "tenderise", "tenderize", - "terrorise", "terrorize", - "theorised", "theorized", - "theorises", "theorizes", - "towelling", "toweling", - "travelled", "traveled", - "traveller", "traveler", - "trialling", "trialing", - "tricolour", "tricolor", - "tunnelled", "tunneled", - "tyrannise", "tyrannize", - "unionised", "unionized", - "unionises", "unionizes", - "unsavoury", "unsavory", - "urbanised", "urbanized", - "urbanises", "urbanizes", - "utilising", "utilizing", - "vandalise", "vandalize", - "vaporised", "vaporized", - "vaporises", "vaporizes", - "verbalise", "verbalize", - "victimise", "victimize", - "visualise", "visualize", - "vocalised", "vocalized", - "vocalises", "vocalizes", - "vulgarise", "vulgarize", - "weaselled", "weaseled", - "womanised", "womanized", - "womaniser", "womanizer", - "womanises", "womanizes", - "yodelling", "yodeling", - "yoghourts", "yogurts", - "agonised", "agonized", - "agonises", "agonizes", - "almanack", "almanac", - "amortise", "amortize", - "analogue", "analog", - "analysed", "analyzed", - "analyses", "analyzes", - "armoured", "armored", - "armourer", "armorer", - "artefact", "artifact", - "baptised", "baptized", - "baptises", "baptizes", - "baulking", "balking", - "belabour", "belabor", - "bevelled", "beveled", - "calibres", "calibers", - "calliper", "caliper", - "canalise", "canalize", - "canonise", "canonize", - "carolled", "caroled", - "catalyse", "catalyze", - "cavilled", "caviled", - "civilise", "civilize", - "clamours", "clamors", - "clangour", "clangor", - "colonise", "colonize", - "coloured", "colored", - "cosiness", "coziness", - "crueller", "crueler", - "defences", "defenses", - "demonise", "demonize", - "deputise", "deputize", - "dialling", "dialing", - "dialogue", "dialog", - "digitise", "digitize", - "draughty", "drafty", - "duelling", "dueling", - "energise", "energize", - "enthrals", "enthralls", - "equalise", "equalize", - "eulogise", "eulogize", - "favoured", "favored", - "feminise", "feminize", - "finalise", "finalize", - "flautist", "flutist", - "flavours", "flavors", - "foetuses", "fetuses", - "fuelling", "fueling", - "gaolbird", "jailbird", - "gryphons", "griffins", - "harbours", "harbors", - "honoured", "honored", - "humanise", "humanize", - "humoured", "humored", - "idealise", "idealize", - "idolised", "idolized", - "idolises", "idolizes", - "immunise", "immunize", - "ionisers", "ionizers", - "ionising", "ionizing", - "itemised", "itemized", - "itemises", "itemizes", - "jewelled", "jeweled", - "jeweller", "jeweler", - "labelled", "labeled", - "laboured", "labored", - "labourer", "laborer", - "legalise", "legalize", - "levelled", "leveled", - "leveller", "leveler", - "libelled", "libeled", - "licenced", "licensed", - "licences", "licenses", - "lionised", "lionized", - "lionises", "lionizes", - "localise", "localize", - "maximise", "maximize", - "memorise", "memorize", - "minimise", "minimize", - "misspelt", "misspelled", - "mobilise", "mobilize", - "modelled", "modeled", - "modeller", "modeler", - "moralise", "moralize", - "moulders", "molders", - "mouldier", "moldier", - "moulding", "molding", - "moulting", "molting", - "offences", "offenses", - "optimise", "optimize", - "organise", "organize", - "oxidised", "oxidized", - "oxidises", "oxidizes", - "panelled", "paneled", - "paralyse", "paralyze", - "parlours", "parlors", - "pedalled", "pedaled", - "penalise", "penalize", - "philtres", "filters", - "ploughed", "plowed", - "polarise", "polarize", - "practise", "practice", - "pretence", "pretense", - "ravelled", "raveled", - "realised", "realized", - "realises", "realizes", - "remoulds", "remolds", - "revelled", "reveled", - "reveller", "reveler", - "rivalled", "rivaled", - "rumoured", "rumored", - "sanitise", "sanitize", - "satirise", "satirize", - "saviours", "saviors", - "savoured", "savored", - "sceptics", "skeptics", - "sceptres", "scepters", - "sodomise", "sodomize", - "spectres", "specters", - "succours", "succors", - "sulphate", "sulfate", - "sulphide", "sulfide", - "syphoned", "siphoned", - "theatres", "theaters", - "theorise", "theorize", - "towelled", "toweled", - "toxaemia", "toxemia", - "trialled", "trialed", - "unionise", "unionize", - "urbanise", "urbanize", - "utilised", "utilized", - "utilises", "utilizes", - "vaporise", "vaporize", - "vocalise", "vocalize", - "womanise", "womanize", - "yodelled", "yodeled", - "yoghourt", "yogurt", - "yoghurts", "yogurts", - "agonise", "agonize", - "anaemia", "anemia", - "anaemic", "anemic", - "analyse", "analyze", - "arbours", "arbors", - "armoury", "armory", - "baptise", "baptize", - "baulked", "balked", - "behoved", "behooved", - "behoves", "behooves", - "calibre", "caliber", - "candour", "candor", - "centred", "centered", - "centres", "centers", - "cheques", "checks", - "clamour", "clamor", - "colours", "colors", - "cosiest", "coziest", - "defence", "defense", - "dialled", "dialed", - "distils", "distills", - "duelled", "dueled", - "enthral", "enthrall", - "favours", "favors", - "fervour", "fervor", - "flavour", "flavor", - "fuelled", "fueled", - "fulfils", "fulfills", - "gaolers", "jailers", - "gaoling", "jailing", - "gipsies", "gypsies", - "glueing", "gluing", - "goitres", "goiters", - "grammes", "grams", - "groynes", "groins", - "gryphon", "griffin", - "harbour", "harbor", - "honours", "honors", - "humours", "humors", - "idolise", "idolize", - "instals", "installs", - "instils", "instills", - "ionised", "ionized", - "ioniser", "ionizer", - "ionises", "ionizes", - "itemise", "itemize", - "labours", "labors", - "licence", "license", - "lionise", "lionize", - "louvred", "louvered", - "louvres", "louvers", - "moulded", "molded", - "moulder", "molder", - "moulted", "molted", - "offence", "offense", - "oxidise", "oxidize", - "parlour", "parlor", - "philtre", "filter", - "ploughs", "plows", - "pyjamas", "pajamas", - "rancour", "rancor", - "realise", "realize", - "remould", "remold", - "rigours", "rigors", - "rumours", "rumors", - "saviour", "savior", - "savours", "savors", - "savoury", "savory", - "sceptic", "skeptic", - "sceptre", "scepter", - "spectre", "specter", - "storeys", "stories", - "succour", "succor", - "sulphur", "sulfur", - "syphons", "siphons", - "theatre", "theater", - "tumours", "tumors", - "utilise", "utilize", - "vapours", "vapors", - "waggons", "wagons", - "yoghurt", "yogurt", - "ageing", "aging", - "appals", "appalls", - "arbour", "arbor", - "ardour", "ardor", - "baulks", "balks", - "behove", "behoove", - "centre", "center", - "cheque", "check", - "chilli", "chili", - "colour", "color", - "cosier", "cozier", - "cosies", "cozies", - "cosily", "cozily", - "distil", "distill", - "edoema", "edema", - "enrols", "enrolls", - "faecal", "fecal", - "faeces", "feces", - "favour", "favor", - "fibres", "fibers", - "foetal", "fetal", - "foetid", "fetid", - "foetus", "fetus", - "fulfil", "fulfill", - "gaoled", "jailed", - "gaoler", "jailer", - "goitre", "goiter", - "gramme", "gram", - "groyne", "groin", - "honour", "honor", - "humour", "humor", - "instal", "install", - "instil", "instill", - "ionise", "ionize", - "labour", "labor", - "litres", "liters", - "lustre", "luster", - "meagre", "meager", - "metres", "meters", - "mitres", "miters", - "moulds", "molds", - "mouldy", "moldy", - "moults", "molts", - "odours", "odors", - "plough", "plow", - "pyjama", "pajama", - "rigour", "rigor", - "rumour", "rumor", - "savour", "savor", - "storey", "story", - "syphon", "siphon", - "tumour", "tumor", - "valour", "valor", - "vapour", "vapor", - "vigour", "vigor", - "waggon", "wagon", - "appal", "appall", - "baulk", "balk", - "enrol", "enroll", - "fibre", "fiber", - "gaols", "jails", - "litre", "liter", - "metre", "meter", - "mitre", "miter", - "mould", "mold", - "moult", "molt", - "odour", "odor", - "tyres", "tires", - "cosy", "cozy", - "gaol", "jail", - "tyre", "tire", -} - -// DictBritish converts US spellings to UK spellings -var DictBritish = []string{ - "institutionalization", "institutionalisation", - "internationalization", "internationalisation", - "professionalization", "professionalisation", - "compartmentalizing", "compartmentalising", - "institutionalizing", "institutionalising", - "internationalizing", "internationalising", - "compartmentalized", "compartmentalised", - "compartmentalizes", "compartmentalises", - "decriminalization", "decriminalisation", - "denationalization", "denationalisation", - "fictionalizations", "fictionalisations", - "institutionalized", "institutionalised", - "institutionalizes", "institutionalises", - "intellectualizing", "intellectualising", - "internationalized", "internationalised", - "internationalizes", "internationalises", - "pedestrianization", "pedestrianisation", - "professionalizing", "professionalising", - "compartmentalize", "compartmentalise", - "decentralization", "decentralisation", - "demilitarization", "demilitarisation", - "externalizations", "externalisations", - "fictionalization", "fictionalisation", - "institutionalize", "institutionalise", - "intellectualized", "intellectualised", - "intellectualizes", "intellectualises", - "internationalize", "internationalise", - "nationalizations", "nationalisations", - "professionalized", "professionalised", - "professionalizes", "professionalises", - "rationalizations", "rationalisations", - "sensationalizing", "sensationalising", - "sentimentalizing", "sentimentalising", - "acclimatization", "acclimatisation", - "commercializing", "commercialising", - "conceptualizing", "conceptualising", - "contextualizing", "contextualising", - "crystallization", "crystallisation", - "decriminalizing", "decriminalising", - "democratization", "democratisation", - "denationalizing", "denationalising", - "depersonalizing", "depersonalising", - "desensitization", "desensitisation", - "disorganization", "disorganisation", - "extemporization", "extemporisation", - "externalization", "externalisation", - "familiarization", "familiarisation", - "generalizations", "generalisations", - "hospitalization", "hospitalisation", - "individualizing", "individualising", - "industrializing", "industrialising", - "intellectualize", "intellectualise", - "internalization", "internalisation", - "maneuverability", "manoeuvrability", - "materialization", "materialisation", - "miniaturization", "miniaturisation", - "nationalization", "nationalisation", - "overemphasizing", "overemphasising", - "paleontologists", "palaeontologists", - "particularizing", "particularising", - "pedestrianizing", "pedestrianising", - "professionalize", "professionalise", - "psychoanalyzing", "psychoanalysing", - "rationalization", "rationalisation", - "reorganizations", "reorganisations", - "revolutionizing", "revolutionising", - "sensationalized", "sensationalised", - "sensationalizes", "sensationalises", - "sentimentalized", "sentimentalised", - "sentimentalizes", "sentimentalises", - "specializations", "specialisations", - "standardization", "standardisation", - "synchronization", "synchronisation", - "systematization", "systematisation", - "aggrandizement", "aggrandisement", - "characterizing", "characterising", - "collectivizing", "collectivising", - "commercialized", "commercialised", - "commercializes", "commercialises", - "conceptualized", "conceptualised", - "conceptualizes", "conceptualises", - "contextualized", "contextualised", - "contextualizes", "contextualises", - "decentralizing", "decentralising", - "decriminalized", "decriminalised", - "decriminalizes", "decriminalises", - "dehumanization", "dehumanisation", - "demilitarizing", "demilitarising", - "demobilization", "demobilisation", - "demoralization", "demoralisation", - "denationalized", "denationalised", - "denationalizes", "denationalises", - "depersonalized", "depersonalised", - "depersonalizes", "depersonalises", - "dramatizations", "dramatisations", - "editorializing", "editorialising", - "fictionalizing", "fictionalising", - "fraternization", "fraternisation", - "generalization", "generalisation", - "immobilization", "immobilisation", - "individualized", "individualised", - "individualizes", "individualises", - "industrialized", "industrialised", - "industrializes", "industrialises", - "liberalization", "liberalisation", - "monopolization", "monopolisation", - "naturalization", "naturalisation", - "neighborliness", "neighbourliness", - "neutralization", "neutralisation", - "organizational", "organisational", - "outmaneuvering", "outmanoeuvring", - "overemphasized", "overemphasised", - "overemphasizes", "overemphasises", - "paleontologist", "palaeontologist", - "particularized", "particularised", - "particularizes", "particularises", - "pasteurization", "pasteurisation", - "pedestrianized", "pedestrianised", - "pedestrianizes", "pedestrianises", - "philosophizing", "philosophising", - "politicization", "politicisation", - "popularization", "popularisation", - "pressurization", "pressurisation", - "prioritization", "prioritisation", - "privatizations", "privatisations", - "propagandizing", "propagandising", - "psychoanalyzed", "psychoanalysed", - "psychoanalyzes", "psychoanalyses", - "reconnoitering", "reconnoitring", - "regularization", "regularisation", - "reorganization", "reorganisation", - "revolutionized", "revolutionised", - "revolutionizes", "revolutionises", - "secularization", "secularisation", - "sensationalize", "sensationalise", - "sentimentalize", "sentimentalise", - "serializations", "serialisations", - "specialization", "specialisation", - "sterilizations", "sterilisations", - "stigmatization", "stigmatisation", - "transistorized", "transistorised", - "unrecognizable", "unrecognisable", - "visualizations", "visualisations", - "westernization", "westernisation", - "accessorizing", "accessorising", - "acclimatizing", "acclimatising", - "amortizations", "amortisations", - "amphitheaters", "amphitheatres", - "anesthetizing", "anaesthetising", - "archeologists", "archaeologists", - "breathalyzers", "breathalysers", - "breathalyzing", "breathalysing", - "cannibalizing", "cannibalising", - "characterized", "characterised", - "characterizes", "characterises", - "circularizing", "circularising", - "collectivized", "collectivised", - "collectivizes", "collectivises", - "commercialize", "commercialise", - "computerizing", "computerising", - "conceptualize", "conceptualise", - "contextualize", "contextualise", - "criminalizing", "criminalising", - "crystallizing", "crystallising", - "decentralized", "decentralised", - "decentralizes", "decentralises", - "decriminalize", "decriminalise", - "demilitarized", "demilitarised", - "demilitarizes", "demilitarises", - "democratizing", "democratising", - "denationalize", "denationalise", - "depersonalize", "depersonalise", - "desensitizing", "desensitising", - "destabilizing", "destabilising", - "disemboweling", "disembowelling", - "dramatization", "dramatisation", - "editorialized", "editorialised", - "editorializes", "editorialises", - "extemporizing", "extemporising", - "externalizing", "externalising", - "familiarizing", "familiarising", - "fertilization", "fertilisation", - "fictionalized", "fictionalised", - "fictionalizes", "fictionalises", - "formalization", "formalisation", - "fossilization", "fossilisation", - "globalization", "globalisation", - "gynecological", "gynaecological", - "gynecologists", "gynaecologists", - "harmonization", "harmonisation", - "hematological", "haematological", - "hematologists", "haematologists", - "hospitalizing", "hospitalising", - "hypothesizing", "hypothesising", - "immortalizing", "immortalising", - "individualize", "individualise", - "industrialize", "industrialise", - "internalizing", "internalising", - "marginalizing", "marginalising", - "materializing", "materialising", - "mechanization", "mechanisation", - "memorializing", "memorialising", - "miniaturizing", "miniaturising", - "nationalizing", "nationalising", - "neighborhoods", "neighbourhoods", - "normalization", "normalisation", - "organizations", "organisations", - "outmaneuvered", "outmanoeuvred", - "overemphasize", "overemphasise", - "particularize", "particularise", - "passivization", "passivisation", - "patronizingly", "patronisingly", - "pedestrianize", "pedestrianise", - "pediatricians", "paediatricians", - "personalizing", "personalising", - "philosophized", "philosophised", - "philosophizes", "philosophises", - "privatization", "privatisation", - "propagandized", "propagandised", - "propagandizes", "propagandises", - "proselytizers", "proselytisers", - "proselytizing", "proselytising", - "psychoanalyze", "psychoanalyse", - "pulverization", "pulverisation", - "rationalizing", "rationalising", - "reconnoitered", "reconnoitred", - "revolutionize", "revolutionise", - "romanticizing", "romanticising", - "serialization", "serialisation", - "socialization", "socialisation", - "standardizing", "standardising", - "sterilization", "sterilisation", - "subsidization", "subsidisation", - "synchronizing", "synchronising", - "systematizing", "systematising", - "tantalizingly", "tantalisingly", - "underutilized", "underutilised", - "victimization", "victimisation", - "visualization", "visualisation", - "vocalizations", "vocalisations", - "vulgarization", "vulgarisation", - "accessorized", "accessorised", - "accessorizes", "accessorises", - "acclimatized", "acclimatised", - "acclimatizes", "acclimatises", - "amortization", "amortisation", - "amphitheater", "amphitheatre", - "anesthetists", "anaesthetists", - "anesthetized", "anaesthetised", - "anesthetizes", "anaesthetises", - "antagonizing", "antagonising", - "appetizingly", "appetisingly", - "archeologist", "archaeologist", - "backpedaling", "backpedalling", - "bastardizing", "bastardising", - "behaviorists", "behaviourists", - "bowdlerizing", "bowdlerising", - "breathalyzed", "breathalysed", - "breathalyzes", "breathalyses", - "cannibalized", "cannibalised", - "cannibalizes", "cannibalises", - "capitalizing", "capitalising", - "caramelizing", "caramelising", - "categorizing", "categorising", - "centerpieces", "centrepieces", - "centralizing", "centralising", - "characterize", "characterise", - "circularized", "circularised", - "circularizes", "circularises", - "clarinetists", "clarinettists", - "collectivize", "collectivise", - "colonization", "colonisation", - "computerized", "computerised", - "computerizes", "computerises", - "criminalized", "criminalised", - "criminalizes", "criminalises", - "crystallized", "crystallised", - "crystallizes", "crystallises", - "decentralize", "decentralise", - "dehumanizing", "dehumanising", - "demilitarize", "demilitarise", - "demobilizing", "demobilising", - "democratized", "democratised", - "democratizes", "democratises", - "demoralizing", "demoralising", - "desensitized", "desensitised", - "desensitizes", "desensitises", - "destabilized", "destabilised", - "destabilizes", "destabilises", - "disemboweled", "disembowelled", - "dishonorable", "dishonourable", - "dishonorably", "dishonourably", - "disorganized", "disorganised", - "editorialize", "editorialise", - "equalization", "equalisation", - "evangelizing", "evangelising", - "extemporized", "extemporised", - "extemporizes", "extemporises", - "externalized", "externalised", - "externalizes", "externalises", - "familiarized", "familiarised", - "familiarizes", "familiarises", - "fictionalize", "fictionalise", - "finalization", "finalisation", - "fraternizing", "fraternising", - "generalizing", "generalising", - "gynecologist", "gynaecologist", - "hematologist", "haematologist", - "hemophiliacs", "haemophiliacs", - "hemorrhaging", "haemorrhaging", - "homogenizing", "homogenising", - "hospitalized", "hospitalised", - "hospitalizes", "hospitalises", - "hypothesized", "hypothesised", - "hypothesizes", "hypothesises", - "idealization", "idealisation", - "immobilizers", "immobilisers", - "immobilizing", "immobilising", - "immortalized", "immortalised", - "immortalizes", "immortalises", - "immunization", "immunisation", - "initializing", "initialising", - "installments", "instalments", - "internalized", "internalised", - "internalizes", "internalises", - "jeopardizing", "jeopardising", - "legalization", "legalisation", - "legitimizing", "legitimising", - "liberalizing", "liberalising", - "maneuverable", "manoeuvrable", - "maneuverings", "manoeuvrings", - "marginalized", "marginalised", - "marginalizes", "marginalises", - "materialized", "materialised", - "materializes", "materialises", - "maximization", "maximisation", - "memorialized", "memorialised", - "memorializes", "memorialises", - "metabolizing", "metabolising", - "militarizing", "militarising", - "miniaturized", "miniaturised", - "miniaturizes", "miniaturises", - "miscataloged", "miscatalogued", - "misdemeanors", "misdemeanours", - "mobilization", "mobilisation", - "moisturizers", "moisturisers", - "moisturizing", "moisturising", - "monopolizing", "monopolising", - "multicolored", "multicoloured", - "nationalized", "nationalised", - "nationalizes", "nationalises", - "naturalizing", "naturalising", - "neighborhood", "neighbourhood", - "neutralizing", "neutralising", - "organization", "organisation", - "outmaneuvers", "outmanoeuvres", - "paleontology", "palaeontology", - "pasteurizing", "pasteurising", - "pediatrician", "paediatrician", - "personalized", "personalised", - "personalizes", "personalises", - "philosophize", "philosophise", - "plagiarizing", "plagiarising", - "polarization", "polarisation", - "politicizing", "politicising", - "popularizing", "popularising", - "pressurizing", "pressurising", - "prioritizing", "prioritising", - "propagandize", "propagandise", - "proselytized", "proselytised", - "proselytizer", "proselytiser", - "proselytizes", "proselytises", - "radicalizing", "radicalising", - "rationalized", "rationalised", - "rationalizes", "rationalises", - "realizations", "realisations", - "recognizable", "recognisable", - "recognizably", "recognisably", - "recognizance", "recognisance", - "reconnoiters", "reconnoitres", - "regularizing", "regularising", - "reorganizing", "reorganising", - "revitalizing", "revitalising", - "rhapsodizing", "rhapsodising", - "romanticized", "romanticised", - "romanticizes", "romanticises", - "scandalizing", "scandalising", - "scrutinizing", "scrutinising", - "secularizing", "secularising", - "standardized", "standardised", - "standardizes", "standardises", - "stigmatizing", "stigmatising", - "sympathizers", "sympathisers", - "sympathizing", "sympathising", - "synchronized", "synchronised", - "synchronizes", "synchronises", - "synthesizing", "synthesising", - "systematized", "systematised", - "systematizes", "systematises", - "theatergoers", "theatregoers", - "traumatizing", "traumatising", - "trivializing", "trivialising", - "unauthorized", "unauthorised", - "unionization", "unionisation", - "unrecognized", "unrecognised", - "urbanization", "urbanisation", - "vaporization", "vaporisation", - "vocalization", "vocalisation", - "westernizing", "westernising", - "accessorize", "accessorise", - "acclimatize", "acclimatise", - "agonizingly", "agonisingly", - "amortizable", "amortisable", - "anesthetics", "anaesthetics", - "anesthetist", "anaesthetist", - "anesthetize", "anaesthetise", - "anglicizing", "anglicising", - "antagonized", "antagonised", - "antagonizes", "antagonises", - "apologizing", "apologising", - "backpedaled", "backpedalled", - "bastardized", "bastardised", - "bastardizes", "bastardises", - "behaviorism", "behaviourism", - "behaviorist", "behaviourist", - "bowdlerized", "bowdlerised", - "bowdlerizes", "bowdlerises", - "brutalizing", "brutalising", - "cannibalize", "cannibalise", - "capitalized", "capitalised", - "capitalizes", "capitalises", - "caramelized", "caramelised", - "caramelizes", "caramelises", - "carbonizing", "carbonising", - "categorized", "categorised", - "categorizes", "categorises", - "cauterizing", "cauterising", - "centerfolds", "centrefolds", - "centerpiece", "centrepiece", - "centiliters", "centilitres", - "centimeters", "centimetres", - "centralized", "centralised", - "centralizes", "centralises", - "circularize", "circularise", - "clarinetist", "clarinettist", - "computerize", "computerise", - "criminalize", "criminalise", - "criticizing", "criticising", - "crystallize", "crystallise", - "customizing", "customising", - "defenseless", "defenceless", - "dehumanized", "dehumanised", - "dehumanizes", "dehumanises", - "demobilized", "demobilised", - "demobilizes", "demobilises", - "democratize", "democratise", - "demoralized", "demoralised", - "demoralizes", "demoralises", - "deodorizing", "deodorising", - "desensitize", "desensitise", - "destabilize", "destabilise", - "discoloring", "discolouring", - "dishonoring", "dishonouring", - "dramatizing", "dramatising", - "economizing", "economising", - "empathizing", "empathising", - "emphasizing", "emphasising", - "endeavoring", "endeavouring", - "epitomizing", "epitomising", - "esophaguses", "oesophaguses", - "evangelized", "evangelised", - "evangelizes", "evangelises", - "extemporize", "extemporise", - "externalize", "externalise", - "factorizing", "factorising", - "familiarize", "familiarise", - "fantasizing", "fantasising", - "fertilizers", "fertilisers", - "fertilizing", "fertilising", - "formalizing", "formalising", - "fossilizing", "fossilising", - "fraternized", "fraternised", - "fraternizes", "fraternises", - "fulfillment", "fulfilment", - "galvanizing", "galvanising", - "generalized", "generalised", - "generalizes", "generalises", - "ghettoizing", "ghettoising", - "globalizing", "globalising", - "harmonizing", "harmonising", - "hemophiliac", "haemophiliac", - "hemorrhaged", "haemorrhaged", - "hemorrhages", "haemorrhages", - "hemorrhoids", "haemorrhoids", - "homogenized", "homogenised", - "homogenizes", "homogenises", - "hospitalize", "hospitalise", - "hybridizing", "hybridising", - "hypnotizing", "hypnotising", - "hypothesize", "hypothesise", - "immobilized", "immobilised", - "immobilizer", "immobiliser", - "immobilizes", "immobilises", - "immortalize", "immortalise", - "initialized", "initialised", - "initializes", "initialises", - "installment", "instalment", - "internalize", "internalise", - "italicizing", "italicising", - "jeopardized", "jeopardised", - "jeopardizes", "jeopardises", - "legitimized", "legitimised", - "legitimizes", "legitimises", - "liberalized", "liberalised", - "liberalizes", "liberalises", - "lionization", "lionisation", - "liquidizers", "liquidisers", - "liquidizing", "liquidising", - "magnetizing", "magnetising", - "maneuvering", "manoeuvring", - "marginalize", "marginalise", - "marvelously", "marvellously", - "materialize", "materialise", - "mechanizing", "mechanising", - "memorialize", "memorialise", - "mesmerizing", "mesmerising", - "metabolized", "metabolised", - "metabolizes", "metabolises", - "militarized", "militarised", - "militarizes", "militarises", - "milliliters", "millilitres", - "millimeters", "millimetres", - "miniaturize", "miniaturise", - "misbehavior", "misbehaviour", - "misdemeanor", "misdemeanour", - "modernizing", "modernising", - "moisturized", "moisturised", - "moisturizer", "moisturiser", - "moisturizes", "moisturises", - "monopolized", "monopolised", - "monopolizes", "monopolises", - "nationalize", "nationalise", - "naturalized", "naturalised", - "naturalizes", "naturalises", - "neighboring", "neighbouring", - "neutralized", "neutralised", - "neutralizes", "neutralises", - "normalizing", "normalising", - "orthopedics", "orthopaedics", - "ostracizing", "ostracising", - "outmaneuver", "outmanoeuvre", - "oxidization", "oxidisation", - "pasteurized", "pasteurised", - "pasteurizes", "pasteurises", - "patronizing", "patronising", - "personalize", "personalise", - "plagiarized", "plagiarised", - "plagiarizes", "plagiarises", - "politicized", "politicised", - "politicizes", "politicises", - "popularized", "popularised", - "popularizes", "popularises", - "pressurized", "pressurised", - "pressurizes", "pressurises", - "prioritized", "prioritised", - "prioritizes", "prioritises", - "privatizing", "privatising", - "proselytize", "proselytise", - "publicizing", "publicising", - "pulverizing", "pulverising", - "radicalized", "radicalised", - "radicalizes", "radicalises", - "randomizing", "randomising", - "rationalize", "rationalise", - "realization", "realisation", - "recognizing", "recognising", - "reconnoiter", "reconnoitre", - "regularized", "regularised", - "regularizes", "regularises", - "reorganized", "reorganised", - "reorganizes", "reorganises", - "revitalized", "revitalised", - "revitalizes", "revitalises", - "rhapsodized", "rhapsodised", - "rhapsodizes", "rhapsodises", - "romanticize", "romanticise", - "scandalized", "scandalised", - "scandalizes", "scandalises", - "scrutinized", "scrutinised", - "scrutinizes", "scrutinises", - "secularized", "secularised", - "secularizes", "secularises", - "sensitizing", "sensitising", - "serializing", "serialising", - "sermonizing", "sermonising", - "signalizing", "signalising", - "skeptically", "sceptically", - "socializing", "socialising", - "solemnizing", "solemnising", - "specialized", "specialised", - "specializes", "specialises", - "squirreling", "squirrelling", - "stabilizers", "stabilisers", - "stabilizing", "stabilising", - "standardize", "standardise", - "sterilizers", "sterilisers", - "sterilizing", "sterilising", - "stigmatized", "stigmatised", - "stigmatizes", "stigmatises", - "subsidizers", "subsidisers", - "subsidizing", "subsidising", - "summarizing", "summarising", - "symbolizing", "symbolising", - "sympathized", "sympathised", - "sympathizer", "sympathiser", - "sympathizes", "sympathises", - "synchronize", "synchronise", - "synthesized", "synthesised", - "synthesizes", "synthesises", - "systematize", "systematise", - "tantalizing", "tantalising", - "temporizing", "temporising", - "tenderizing", "tenderising", - "terrorizing", "terrorising", - "theatergoer", "theatregoer", - "traumatized", "traumatised", - "traumatizes", "traumatises", - "trivialized", "trivialised", - "trivializes", "trivialises", - "tyrannizing", "tyrannising", - "uncataloged", "uncatalogued", - "uncivilized", "uncivilised", - "unfavorable", "unfavourable", - "unfavorably", "unfavourably", - "unorganized", "unorganised", - "untrammeled", "untrammelled", - "utilization", "utilisation", - "vandalizing", "vandalising", - "verbalizing", "verbalising", - "victimizing", "victimising", - "visualizing", "visualising", - "vulgarizing", "vulgarising", - "watercolors", "watercolours", - "westernized", "westernised", - "westernizes", "westernises", - "amortizing", "amortising", - "anesthesia", "anaesthesia", - "anesthetic", "anaesthetic", - "anglicized", "anglicised", - "anglicizes", "anglicises", - "annualized", "annualised", - "antagonize", "antagonise", - "apologized", "apologised", - "apologizes", "apologises", - "appetizers", "appetisers", - "appetizing", "appetising", - "archeology", "archaeology", - "authorizes", "authorises", - "bastardize", "bastardise", - "bedeviling", "bedevilling", - "behavioral", "behavioural", - "belaboring", "belabouring", - "bowdlerize", "bowdlerise", - "brutalized", "brutalised", - "brutalizes", "brutalises", - "canalizing", "canalising", - "canonizing", "canonising", - "capitalize", "capitalise", - "caramelize", "caramelise", - "carbonized", "carbonised", - "carbonizes", "carbonises", - "cataloging", "cataloguing", - "catalyzing", "catalysing", - "categorize", "categorise", - "cauterized", "cauterised", - "cauterizes", "cauterises", - "centerfold", "centrefold", - "centiliter", "centilitre", - "centimeter", "centimetre", - "centralize", "centralise", - "channeling", "channelling", - "checkbooks", "chequebooks", - "civilizing", "civilising", - "colonizers", "colonisers", - "colonizing", "colonising", - "colorfully", "colourfully", - "colorizing", "colourizing", - "councilors", "councillors", - "counselors", "counsellors", - "criticized", "criticised", - "criticizes", "criticises", - "customized", "customised", - "customizes", "customises", - "dehumanize", "dehumanise", - "demobilize", "demobilise", - "demonizing", "demonising", - "demoralize", "demoralise", - "deodorized", "deodorised", - "deodorizes", "deodorises", - "deputizing", "deputising", - "digitizing", "digitising", - "discolored", "discoloured", - "disheveled", "dishevelled", - "dishonored", "dishonoured", - "dramatized", "dramatised", - "dramatizes", "dramatises", - "economized", "economised", - "economizes", "economises", - "empathized", "empathised", - "empathizes", "empathises", - "emphasized", "emphasised", - "emphasizes", "emphasises", - "endeavored", "endeavoured", - "energizing", "energising", - "epicenters", "epicentres", - "epitomized", "epitomised", - "epitomizes", "epitomises", - "equalizers", "equalisers", - "equalizing", "equalising", - "eulogizing", "eulogising", - "evangelize", "evangelise", - "factorized", "factorised", - "factorizes", "factorises", - "fantasized", "fantasised", - "fantasizes", "fantasises", - "favoritism", "favouritism", - "feminizing", "feminising", - "fertilized", "fertilised", - "fertilizer", "fertiliser", - "fertilizes", "fertilises", - "fiberglass", "fibreglass", - "finalizing", "finalising", - "flavorings", "flavourings", - "flavorless", "flavourless", - "flavorsome", "flavoursome", - "formalized", "formalised", - "formalizes", "formalises", - "fossilized", "fossilised", - "fossilizes", "fossilises", - "fraternize", "fraternise", - "galvanized", "galvanised", - "galvanizes", "galvanises", - "generalize", "generalise", - "ghettoized", "ghettoised", - "ghettoizes", "ghettoises", - "globalized", "globalised", - "globalizes", "globalises", - "gruelingly", "gruellingly", - "gynecology", "gynaecology", - "harmonized", "harmonised", - "harmonizes", "harmonises", - "hematology", "haematology", - "hemoglobin", "haemoglobin", - "hemophilia", "haemophilia", - "hemorrhage", "haemorrhage", - "homogenize", "homogenise", - "humanizing", "humanising", - "hybridized", "hybridised", - "hybridizes", "hybridises", - "hypnotized", "hypnotised", - "hypnotizes", "hypnotises", - "idealizing", "idealising", - "immobilize", "immobilise", - "immunizing", "immunising", - "impaneling", "impanelling", - "imperiling", "imperilling", - "initialing", "initialling", - "initialize", "initialise", - "ionization", "ionisation", - "italicized", "italicised", - "italicizes", "italicises", - "jeopardize", "jeopardise", - "kilometers", "kilometres", - "lackluster", "lacklustre", - "legalizing", "legalising", - "legitimize", "legitimise", - "liberalize", "liberalise", - "liquidized", "liquidised", - "liquidizer", "liquidiser", - "liquidizes", "liquidises", - "localizing", "localising", - "magnetized", "magnetised", - "magnetizes", "magnetises", - "maneuvered", "manoeuvred", - "marshaling", "marshalling", - "maximizing", "maximising", - "mechanized", "mechanised", - "mechanizes", "mechanises", - "memorizing", "memorising", - "mesmerized", "mesmerised", - "mesmerizes", "mesmerises", - "metabolize", "metabolise", - "militarize", "militarise", - "milliliter", "millilitre", - "millimeter", "millimetre", - "minimizing", "minimising", - "mobilizing", "mobilising", - "modernized", "modernised", - "modernizes", "modernises", - "moisturize", "moisturise", - "monopolize", "monopolise", - "moralizing", "moralising", - "naturalize", "naturalise", - "neighborly", "neighbourly", - "neutralize", "neutralise", - "normalized", "normalised", - "normalizes", "normalises", - "optimizing", "optimising", - "organizers", "organisers", - "organizing", "organising", - "orthopedic", "orthopaedic", - "ostracized", "ostracised", - "ostracizes", "ostracises", - "paralyzing", "paralysing", - "pasteurize", "pasteurise", - "patronized", "patronised", - "patronizes", "patronises", - "pedophiles", "paedophiles", - "pedophilia", "paedophilia", - "penalizing", "penalising", - "plagiarize", "plagiarise", - "plowshares", "ploughshares", - "polarizing", "polarising", - "politicize", "politicise", - "popularize", "popularise", - "prioritize", "prioritise", - "privatized", "privatised", - "privatizes", "privatises", - "publicized", "publicised", - "publicizes", "publicises", - "pulverized", "pulverised", - "pulverizes", "pulverises", - "quarreling", "quarrelling", - "radicalize", "radicalise", - "randomized", "randomised", - "randomizes", "randomises", - "realizable", "realisable", - "recognized", "recognised", - "recognizes", "recognises", - "regularize", "regularise", - "remodeling", "remodelling", - "reorganize", "reorganise", - "revitalize", "revitalise", - "rhapsodize", "rhapsodise", - "ritualized", "ritualised", - "sanitizing", "sanitising", - "satirizing", "satirising", - "scandalize", "scandalise", - "scrutinize", "scrutinise", - "secularize", "secularise", - "sensitized", "sensitised", - "sensitizes", "sensitises", - "sepulchers", "sepulchres", - "serialized", "serialised", - "serializes", "serialises", - "sermonized", "sermonised", - "sermonizes", "sermonises", - "shriveling", "shrivelling", - "signalized", "signalised", - "signalizes", "signalises", - "skepticism", "scepticism", - "socialized", "socialised", - "socializes", "socialises", - "sodomizing", "sodomising", - "solemnized", "solemnised", - "solemnizes", "solemnises", - "specialize", "specialise", - "squirreled", "squirrelled", - "stabilized", "stabilised", - "stabilizer", "stabiliser", - "stabilizes", "stabilises", - "stenciling", "stencilling", - "sterilized", "sterilised", - "sterilizer", "steriliser", - "sterilizes", "sterilises", - "stigmatize", "stigmatise", - "subsidized", "subsidised", - "subsidizer", "subsidiser", - "subsidizes", "subsidises", - "summarized", "summarised", - "summarizes", "summarises", - "symbolized", "symbolised", - "symbolizes", "symbolises", - "sympathize", "sympathise", - "tantalized", "tantalised", - "tantalizes", "tantalises", - "temporized", "temporised", - "temporizes", "temporises", - "tenderized", "tenderised", - "tenderizes", "tenderises", - "terrorized", "terrorised", - "terrorizes", "terrorises", - "theorizing", "theorising", - "traumatize", "traumatise", - "trivialize", "trivialise", - "tyrannized", "tyrannised", - "tyrannizes", "tyrannises", - "unionizing", "unionising", - "unraveling", "unravelling", - "urbanizing", "urbanising", - "utilizable", "utilisable", - "vandalized", "vandalised", - "vandalizes", "vandalises", - "vaporizing", "vaporising", - "verbalized", "verbalised", - "verbalizes", "verbalises", - "victimized", "victimised", - "victimizes", "victimises", - "visualized", "visualised", - "visualizes", "visualises", - "vocalizing", "vocalising", - "vulcanized", "vulcanised", - "vulgarized", "vulgarised", - "vulgarizes", "vulgarises", - "watercolor", "watercolour", - "westernize", "westernise", - "womanizers", "womanisers", - "womanizing", "womanising", - "worshiping", "worshipping", - "agonizing", "agonising", - "airplanes", "aeroplanes", - "amortized", "amortised", - "amortizes", "amortises", - "analyzing", "analysing", - "apologize", "apologise", - "appetizer", "appetiser", - "artifacts", "artefacts", - "baptizing", "baptising", - "bedeviled", "bedevilled", - "behaviors", "behaviours", - "bejeweled", "bejewelled", - "belabored", "belaboured", - "brutalize", "brutalise", - "canalized", "canalised", - "canalizes", "canalises", - "canonized", "canonised", - "canonizes", "canonises", - "carbonize", "carbonise", - "cataloged", "catalogued", - "catalyzed", "catalysed", - "catalyzes", "catalyses", - "cauterize", "cauterise", - "channeled", "channelled", - "checkbook", "chequebook", - "checkered", "chequered", - "chiseling", "chiselling", - "civilized", "civilised", - "civilizes", "civilises", - "clamoring", "clamouring", - "colonized", "colonised", - "colonizer", "coloniser", - "colonizes", "colonises", - "colorants", "colourants", - "colorized", "colourized", - "colorizes", "colourizes", - "colorless", "colourless", - "councilor", "councillor", - "counseled", "counselled", - "counselor", "counsellor", - "criticize", "criticise", - "cudgeling", "cudgelling", - "customize", "customise", - "demonized", "demonised", - "demonizes", "demonises", - "deodorize", "deodorise", - "deputized", "deputised", - "deputizes", "deputises", - "digitized", "digitised", - "digitizes", "digitises", - "discolors", "discolours", - "dishonors", "dishonours", - "dramatize", "dramatise", - "driveling", "drivelling", - "economize", "economise", - "empathize", "empathise", - "emphasize", "emphasise", - "enameling", "enamelling", - "endeavors", "endeavours", - "energized", "energised", - "energizes", "energises", - "enthralls", "enthrals", - "epicenter", "epicentre", - "epitomize", "epitomise", - "equalized", "equalised", - "equalizer", "equaliser", - "equalizes", "equalises", - "eulogized", "eulogised", - "eulogizes", "eulogises", - "factorize", "factorise", - "fantasize", "fantasise", - "favorable", "favourable", - "favorably", "favourably", - "favorites", "favourites", - "feminized", "feminised", - "feminizes", "feminises", - "fertilize", "fertilise", - "finalized", "finalised", - "finalizes", "finalises", - "flavoring", "flavouring", - "formalize", "formalise", - "fossilize", "fossilise", - "funneling", "funnelling", - "galvanize", "galvanise", - "gamboling", "gambolling", - "ghettoize", "ghettoise", - "globalize", "globalise", - "gonorrhea", "gonorrhoea", - "groveling", "grovelling", - "harboring", "harbouring", - "harmonize", "harmonise", - "honorably", "honourably", - "humanized", "humanised", - "humanizes", "humanises", - "hybridize", "hybridise", - "hypnotize", "hypnotise", - "idealized", "idealised", - "idealizes", "idealises", - "idolizing", "idolising", - "immunized", "immunised", - "immunizes", "immunises", - "impaneled", "impanelled", - "imperiled", "imperilled", - "initialed", "initialled", - "italicize", "italicise", - "itemizing", "itemising", - "kilometer", "kilometre", - "legalized", "legalised", - "legalizes", "legalises", - "lionizing", "lionising", - "liquidize", "liquidise", - "localized", "localised", - "localizes", "localises", - "magnetize", "magnetise", - "maneuvers", "manoeuvres", - "marshaled", "marshalled", - "marveling", "marvelling", - "marvelous", "marvellous", - "maximized", "maximised", - "maximizes", "maximises", - "mechanize", "mechanise", - "memorized", "memorised", - "memorizes", "memorises", - "mesmerize", "mesmerise", - "minimized", "minimised", - "minimizes", "minimises", - "mobilized", "mobilised", - "mobilizes", "mobilises", - "modernize", "modernise", - "moldering", "mouldering", - "moralized", "moralised", - "moralizes", "moralises", - "motorized", "motorised", - "mustached", "moustached", - "mustaches", "moustaches", - "neighbors", "neighbours", - "normalize", "normalise", - "optimized", "optimised", - "optimizes", "optimises", - "organized", "organised", - "organizer", "organiser", - "organizes", "organises", - "ostracize", "ostracise", - "oxidizing", "oxidising", - "panelists", "panellists", - "paralyzed", "paralysed", - "paralyzes", "paralyses", - "parceling", "parcelling", - "patronize", "patronise", - "pedophile", "paedophile", - "penalized", "penalised", - "penalizes", "penalises", - "penciling", "pencilling", - "plowshare", "ploughshare", - "polarized", "polarised", - "polarizes", "polarises", - "practiced", "practised", - "pretenses", "pretences", - "privatize", "privatise", - "publicize", "publicise", - "pulverize", "pulverise", - "quarreled", "quarrelled", - "randomize", "randomise", - "realizing", "realising", - "recognize", "recognise", - "refueling", "refuelling", - "remodeled", "remodelled", - "remolding", "remoulding", - "saltpeter", "saltpetre", - "sanitized", "sanitised", - "sanitizes", "sanitises", - "satirized", "satirised", - "satirizes", "satirises", - "sensitize", "sensitise", - "sepulcher", "sepulchre", - "serialize", "serialise", - "sermonize", "sermonise", - "shoveling", "shovelling", - "shriveled", "shrivelled", - "signaling", "signalling", - "signalize", "signalise", - "skeptical", "sceptical", - "sniveling", "snivelling", - "snorkeled", "snorkelled", - "socialize", "socialise", - "sodomized", "sodomised", - "sodomizes", "sodomises", - "solemnize", "solemnise", - "spiraling", "spiralling", - "splendors", "splendours", - "stabilize", "stabilise", - "stenciled", "stencilled", - "sterilize", "sterilise", - "subsidize", "subsidise", - "succoring", "succouring", - "sulfurous", "sulphurous", - "summarize", "summarise", - "swiveling", "swivelling", - "symbolize", "symbolise", - "tantalize", "tantalise", - "temporize", "temporise", - "tenderize", "tenderise", - "terrorize", "terrorise", - "theorized", "theorised", - "theorizes", "theorises", - "travelers", "travellers", - "traveling", "travelling", - "tricolors", "tricolours", - "tunneling", "tunnelling", - "tyrannize", "tyrannise", - "unequaled", "unequalled", - "unionized", "unionised", - "unionizes", "unionises", - "unraveled", "unravelled", - "unrivaled", "unrivalled", - "urbanized", "urbanised", - "urbanizes", "urbanises", - "utilizing", "utilising", - "vandalize", "vandalise", - "vaporized", "vaporised", - "vaporizes", "vaporises", - "verbalize", "verbalise", - "victimize", "victimise", - "visualize", "visualise", - "vocalized", "vocalised", - "vocalizes", "vocalises", - "vulgarize", "vulgarise", - "weaseling", "weaselling", - "womanized", "womanised", - "womanizer", "womaniser", - "womanizes", "womanises", - "worshiped", "worshipped", - "worshiper", "worshipper", - "agonized", "agonised", - "agonizes", "agonises", - "airplane", "aeroplane", - "aluminum", "aluminium", - "amortize", "amortise", - "analyzed", "analysed", - "analyzes", "analyses", - "armorers", "armourers", - "armories", "armouries", - "artifact", "artefact", - "baptized", "baptised", - "baptizes", "baptises", - "behavior", "behaviour", - "behooved", "behoved", - "behooves", "behoves", - "belabors", "belabours", - "calibers", "calibres", - "canalize", "canalise", - "canonize", "canonise", - "catalogs", "catalogues", - "catalyze", "catalyse", - "caviling", "cavilling", - "centered", "centred", - "chiseled", "chiselled", - "civilize", "civilise", - "clamored", "clamoured", - "colonize", "colonise", - "colorant", "colourant", - "coloreds", "coloureds", - "colorful", "colourful", - "coloring", "colouring", - "colorize", "colourize", - "coziness", "cosiness", - "cruelest", "cruellest", - "cudgeled", "cudgelled", - "defenses", "defences", - "demeanor", "demeanour", - "demonize", "demonise", - "deputize", "deputise", - "diarrhea", "diarrhoea", - "digitize", "digitise", - "disfavor", "disfavour", - "dishonor", "dishonour", - "distills", "distils", - "driveled", "drivelled", - "enameled", "enamelled", - "enamored", "enamoured", - "endeavor", "endeavour", - "energize", "energise", - "epaulets", "epaulettes", - "equalize", "equalise", - "estrogen", "oestrogen", - "etiology", "aetiology", - "eulogize", "eulogise", - "favoring", "favouring", - "favorite", "favourite", - "feminize", "feminise", - "finalize", "finalise", - "flavored", "flavoured", - "flutists", "flautists", - "fulfills", "fulfils", - "funneled", "funnelled", - "gamboled", "gambolled", - "graveled", "gravelled", - "groveled", "grovelled", - "grueling", "gruelling", - "harbored", "harboured", - "honoring", "honouring", - "humanize", "humanise", - "humoring", "humouring", - "idealize", "idealise", - "idolized", "idolised", - "idolizes", "idolises", - "immunize", "immunise", - "ionizing", "ionising", - "itemized", "itemised", - "itemizes", "itemises", - "jewelers", "jewellers", - "labeling", "labelling", - "laborers", "labourers", - "laboring", "labouring", - "legalize", "legalise", - "leukemia", "leukaemia", - "levelers", "levellers", - "leveling", "levelling", - "libeling", "libelling", - "libelous", "libellous", - "lionized", "lionised", - "lionizes", "lionises", - "localize", "localise", - "louvered", "louvred", - "maneuver", "manoeuvre", - "marveled", "marvelled", - "maximize", "maximise", - "memorize", "memorise", - "minimize", "minimise", - "mobilize", "mobilise", - "modelers", "modellers", - "modeling", "modelling", - "moldered", "mouldered", - "moldiest", "mouldiest", - "moldings", "mouldings", - "moralize", "moralise", - "mustache", "moustache", - "neighbor", "neighbour", - "odorless", "odourless", - "offenses", "offences", - "optimize", "optimise", - "organize", "organise", - "oxidized", "oxidised", - "oxidizes", "oxidises", - "paneling", "panelling", - "panelist", "panellist", - "paralyze", "paralyse", - "parceled", "parcelled", - "pedaling", "pedalling", - "penalize", "penalise", - "penciled", "pencilled", - "polarize", "polarise", - "pretense", "pretence", - "pummeled", "pummelling", - "raveling", "ravelling", - "realized", "realised", - "realizes", "realises", - "refueled", "refuelled", - "remolded", "remoulded", - "revelers", "revellers", - "reveling", "revelling", - "rivaling", "rivalling", - "sanitize", "sanitise", - "satirize", "satirise", - "savories", "savouries", - "savoring", "savouring", - "scepters", "sceptres", - "shoveled", "shovelled", - "signaled", "signalled", - "skeptics", "sceptics", - "sniveled", "snivelled", - "sodomize", "sodomise", - "specters", "spectres", - "spiraled", "spiralled", - "splendor", "splendour", - "succored", "succoured", - "sulfates", "sulphates", - "sulfides", "sulphides", - "swiveled", "swivelled", - "tasseled", "tasselled", - "theaters", "theatres", - "theorize", "theorise", - "toweling", "towelling", - "traveler", "traveller", - "trialing", "trialling", - "tricolor", "tricolour", - "tunneled", "tunnelled", - "unionize", "unionise", - "unsavory", "unsavoury", - "urbanize", "urbanise", - "utilized", "utilised", - "utilizes", "utilises", - "vaporize", "vaporise", - "vocalize", "vocalise", - "weaseled", "weaselled", - "womanize", "womanise", - "yodeling", "yodelling", - "agonize", "agonise", - "analyze", "analyse", - "appalls", "appals", - "armored", "armoured", - "armorer", "armourer", - "baptize", "baptise", - "behoove", "behove", - "belabor", "belabour", - "beveled", "bevelled", - "caliber", "calibre", - "caroled", "carolled", - "caviled", "cavilled", - "centers", "centres", - "clamors", "clamours", - "clangor", "clangour", - "colored", "coloured", - "coziest", "cosiest", - "crueler", "crueller", - "defense", "defence", - "dialing", "dialling", - "dialogs", "dialogues", - "distill", "distil", - "dueling", "duelling", - "enrolls", "enrols", - "epaulet", "epaulette", - "favored", "favoured", - "flavors", "flavours", - "flutist", "flautist", - "fueling", "fuelling", - "fulfill", "fulfil", - "goiters", "goitres", - "harbors", "harbours", - "honored", "honoured", - "humored", "humoured", - "idolize", "idolise", - "ionized", "ionised", - "ionizes", "ionises", - "itemize", "itemise", - "jeweled", "jewelled", - "jeweler", "jeweller", - "jewelry", "jewellery", - "labeled", "labelled", - "labored", "laboured", - "laborer", "labourer", - "leveled", "levelled", - "leveler", "leveller", - "libeled", "libelled", - "lionize", "lionise", - "louvers", "louvres", - "modeled", "modelled", - "modeler", "modeller", - "molders", "moulders", - "moldier", "mouldier", - "molding", "moulding", - "molting", "moulting", - "offense", "offence", - "oxidize", "oxidise", - "pajamas", "pyjamas", - "paneled", "panelled", - "parlors", "parlours", - "pedaled", "pedalled", - "plowing", "ploughing", - "plowman", "ploughman", - "plowmen", "ploughmen", - "realize", "realise", - "remolds", "remoulds", - "reveled", "revelled", - "reveler", "reveller", - "rivaled", "rivalled", - "rumored", "rumoured", - "saviors", "saviours", - "savored", "savoured", - "scepter", "sceptre", - "skeptic", "sceptic", - "specter", "spectre", - "succors", "succours", - "sulfate", "sulphate", - "sulfide", "sulphide", - "theater", "theatre", - "toweled", "towelled", - "toxemia", "toxaemia", - "trialed", "trialled", - "utilize", "utilise", - "yodeled", "yodelled", - "anemia", "anaemia", - "anemic", "anaemic", - "appall", "appal", - "arbors", "arbours", - "armory", "armoury", - "candor", "candour", - "center", "centre", - "clamor", "clamour", - "colors", "colours", - "cozier", "cosier", - "cozies", "cosies", - "cozily", "cosily", - "dialed", "dialled", - "drafty", "draughty", - "dueled", "duelled", - "favors", "favours", - "fervor", "fervour", - "fibers", "fibres", - "flavor", "flavour", - "fueled", "fuelled", - "goiter", "goitre", - "harbor", "harbour", - "honors", "honours", - "humors", "humours", - "labors", "labours", - "liters", "litres", - "louver", "louvre", - "luster", "lustre", - "meager", "meagre", - "miters", "mitres", - "molded", "moulded", - "molder", "moulder", - "molted", "moulted", - "pajama", "pyjama", - "parlor", "parlour", - "plowed", "ploughed", - "rancor", "rancour", - "remold", "remould", - "rigors", "rigours", - "rumors", "rumours", - "savors", "savours", - "savory", "savoury", - "succor", "succour", - "tumors", "tumours", - "vapors", "vapours", - "aging", "ageing", - "arbor", "arbour", - "ardor", "ardour", - "armor", "armour", - "chili", "chilli", - "color", "colour", - "edema", "edoema", - "favor", "favour", - "fecal", "faecal", - "feces", "faeces", - "fiber", "fibre", - "honor", "honour", - "humor", "humour", - "labor", "labour", - "liter", "litre", - "miter", "mitre", - "molds", "moulds", - "moldy", "mouldy", - "molts", "moults", - "odors", "odours", - "plows", "ploughs", - "rigor", "rigour", - "rumor", "rumour", - "savor", "savour", - "valor", "valour", - "vapor", "vapour", - "vigor", "vigour", - "cozy", "cosy", - "mold", "mould", - "molt", "moult", - "odor", "odour", - "plow", "plough", -} diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/lint/LICENSE b/vertical-pod-autoscaler/vendor/github.com/golang/lint/LICENSE deleted file mode 100644 index 65d761bc9f28..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/golang/lint/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/lint/golint/golint.go b/vertical-pod-autoscaler/vendor/github.com/golang/lint/golint/golint.go deleted file mode 100644 index ac024b6d26f1..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/golang/lint/golint/golint.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright (c) 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// golint lints the Go source files named on its command line. -package main - -import ( - "flag" - "fmt" - "go/build" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "golang.org/x/lint" -) - -var ( - minConfidence = flag.Float64("min_confidence", 0.8, "minimum confidence of a problem to print it") - setExitStatus = flag.Bool("set_exit_status", false, "set exit status to 1 if any issues are found") - suggestions int -) - -func usage() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - fmt.Fprintf(os.Stderr, "\tgolint [flags] # runs on package in current directory\n") - fmt.Fprintf(os.Stderr, "\tgolint [flags] [packages]\n") - fmt.Fprintf(os.Stderr, "\tgolint [flags] [directories] # where a '/...' suffix includes all sub-directories\n") - fmt.Fprintf(os.Stderr, "\tgolint [flags] [files] # all must belong to a single package\n") - fmt.Fprintf(os.Stderr, "Flags:\n") - flag.PrintDefaults() -} - -func main() { - flag.Usage = usage - flag.Parse() - - if flag.NArg() == 0 { - lintDir(".") - } else { - // dirsRun, filesRun, and pkgsRun indicate whether golint is applied to - // directory, file or package targets. The distinction affects which - // checks are run. It is no valid to mix target types. - var dirsRun, filesRun, pkgsRun int - var args []string - for _, arg := range flag.Args() { - if strings.HasSuffix(arg, "/...") && isDir(arg[:len(arg)-len("/...")]) { - dirsRun = 1 - for _, dirname := range allPackagesInFS(arg) { - args = append(args, dirname) - } - } else if isDir(arg) { - dirsRun = 1 - args = append(args, arg) - } else if exists(arg) { - filesRun = 1 - args = append(args, arg) - } else { - pkgsRun = 1 - args = append(args, arg) - } - } - - if dirsRun+filesRun+pkgsRun != 1 { - usage() - os.Exit(2) - } - switch { - case dirsRun == 1: - for _, dir := range args { - lintDir(dir) - } - case filesRun == 1: - lintFiles(args...) - case pkgsRun == 1: - for _, pkg := range importPaths(args) { - lintPackage(pkg) - } - } - } - - if *setExitStatus && suggestions > 0 { - fmt.Fprintf(os.Stderr, "Found %d lint suggestions; failing.\n", suggestions) - os.Exit(1) - } -} - -func isDir(filename string) bool { - fi, err := os.Stat(filename) - return err == nil && fi.IsDir() -} - -func exists(filename string) bool { - _, err := os.Stat(filename) - return err == nil -} - -func lintFiles(filenames ...string) { - files := make(map[string][]byte) - for _, filename := range filenames { - src, err := ioutil.ReadFile(filename) - if err != nil { - fmt.Fprintln(os.Stderr, err) - continue - } - files[filename] = src - } - - l := new(lint.Linter) - ps, err := l.LintFiles(files) - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - return - } - for _, p := range ps { - if p.Confidence >= *minConfidence { - fmt.Printf("%v: %s\n", p.Position, p.Text) - suggestions++ - } - } -} - -func lintDir(dirname string) { - pkg, err := build.ImportDir(dirname, 0) - lintImportedPackage(pkg, err) -} - -func lintPackage(pkgname string) { - pkg, err := build.Import(pkgname, ".", 0) - lintImportedPackage(pkg, err) -} - -func lintImportedPackage(pkg *build.Package, err error) { - if err != nil { - if _, nogo := err.(*build.NoGoError); nogo { - // Don't complain if the failure is due to no Go source files. - return - } - fmt.Fprintln(os.Stderr, err) - return - } - - var files []string - files = append(files, pkg.GoFiles...) - files = append(files, pkg.CgoFiles...) - files = append(files, pkg.TestGoFiles...) - if pkg.Dir != "." { - for i, f := range files { - files[i] = filepath.Join(pkg.Dir, f) - } - } - // TODO(dsymonds): Do foo_test too (pkg.XTestGoFiles) - - lintFiles(files...) -} diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/lint/golint/import.go b/vertical-pod-autoscaler/vendor/github.com/golang/lint/golint/import.go deleted file mode 100644 index 2ba9dea77927..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/golang/lint/golint/import.go +++ /dev/null @@ -1,309 +0,0 @@ -package main - -/* - -This file holds a direct copy of the import path matching code of -https://github.com/golang/go/blob/master/src/cmd/go/main.go. It can be -replaced when https://golang.org/issue/8768 is resolved. - -It has been updated to follow upstream changes in a few ways. - -*/ - -import ( - "fmt" - "go/build" - "log" - "os" - "path" - "path/filepath" - "regexp" - "runtime" - "strings" -) - -var ( - buildContext = build.Default - goroot = filepath.Clean(runtime.GOROOT()) - gorootSrc = filepath.Join(goroot, "src") -) - -// importPathsNoDotExpansion returns the import paths to use for the given -// command line, but it does no ... expansion. -func importPathsNoDotExpansion(args []string) []string { - if len(args) == 0 { - return []string{"."} - } - var out []string - for _, a := range args { - // Arguments are supposed to be import paths, but - // as a courtesy to Windows developers, rewrite \ to / - // in command-line arguments. Handles .\... and so on. - if filepath.Separator == '\\' { - a = strings.Replace(a, `\`, `/`, -1) - } - - // Put argument in canonical form, but preserve leading ./. - if strings.HasPrefix(a, "./") { - a = "./" + path.Clean(a) - if a == "./." { - a = "." - } - } else { - a = path.Clean(a) - } - if a == "all" || a == "std" { - out = append(out, allPackages(a)...) - continue - } - out = append(out, a) - } - return out -} - -// importPaths returns the import paths to use for the given command line. -func importPaths(args []string) []string { - args = importPathsNoDotExpansion(args) - var out []string - for _, a := range args { - if strings.Contains(a, "...") { - if build.IsLocalImport(a) { - out = append(out, allPackagesInFS(a)...) - } else { - out = append(out, allPackages(a)...) - } - continue - } - out = append(out, a) - } - return out -} - -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -func matchPattern(pattern string) func(name string) bool { - re := regexp.QuoteMeta(pattern) - re = strings.Replace(re, `\.\.\.`, `.*`, -1) - // Special case: foo/... matches foo too. - if strings.HasSuffix(re, `/.*`) { - re = re[:len(re)-len(`/.*`)] + `(/.*)?` - } - reg := regexp.MustCompile(`^` + re + `$`) - return func(name string) bool { - return reg.MatchString(name) - } -} - -// hasPathPrefix reports whether the path s begins with the -// elements in prefix. -func hasPathPrefix(s, prefix string) bool { - switch { - default: - return false - case len(s) == len(prefix): - return s == prefix - case len(s) > len(prefix): - if prefix != "" && prefix[len(prefix)-1] == '/' { - return strings.HasPrefix(s, prefix) - } - return s[len(prefix)] == '/' && s[:len(prefix)] == prefix - } -} - -// treeCanMatchPattern(pattern)(name) reports whether -// name or children of name can possibly match pattern. -// Pattern is the same limited glob accepted by matchPattern. -func treeCanMatchPattern(pattern string) func(name string) bool { - wildCard := false - if i := strings.Index(pattern, "..."); i >= 0 { - wildCard = true - pattern = pattern[:i] - } - return func(name string) bool { - return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || - wildCard && strings.HasPrefix(name, pattern) - } -} - -// allPackages returns all the packages that can be found -// under the $GOPATH directories and $GOROOT matching pattern. -// The pattern is either "all" (all packages), "std" (standard packages) -// or a path including "...". -func allPackages(pattern string) []string { - pkgs := matchPackages(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -func matchPackages(pattern string) []string { - match := func(string) bool { return true } - treeCanMatch := func(string) bool { return true } - if pattern != "all" && pattern != "std" { - match = matchPattern(pattern) - treeCanMatch = treeCanMatchPattern(pattern) - } - - have := map[string]bool{ - "builtin": true, // ignore pseudo-package that exists only for documentation - } - if !buildContext.CgoEnabled { - have["runtime/cgo"] = true // ignore during walk - } - var pkgs []string - - // Commands - cmd := filepath.Join(goroot, "src/cmd") + string(filepath.Separator) - filepath.Walk(cmd, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() || path == cmd { - return nil - } - name := path[len(cmd):] - if !treeCanMatch(name) { - return filepath.SkipDir - } - // Commands are all in cmd/, not in subdirectories. - if strings.Contains(name, string(filepath.Separator)) { - return filepath.SkipDir - } - - // We use, e.g., cmd/gofmt as the pseudo import path for gofmt. - name = "cmd/" + name - if have[name] { - return nil - } - have[name] = true - if !match(name) { - return nil - } - _, err = buildContext.ImportDir(path, 0) - if err != nil { - if _, noGo := err.(*build.NoGoError); !noGo { - log.Print(err) - } - return nil - } - pkgs = append(pkgs, name) - return nil - }) - - for _, src := range buildContext.SrcDirs() { - if (pattern == "std" || pattern == "cmd") && src != gorootSrc { - continue - } - src = filepath.Clean(src) + string(filepath.Separator) - root := src - if pattern == "cmd" { - root += "cmd" + string(filepath.Separator) - } - filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() || path == src { - return nil - } - - // Avoid .foo, _foo, and testdata directory trees. - _, elem := filepath.Split(path) - if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { - return filepath.SkipDir - } - - name := filepath.ToSlash(path[len(src):]) - if pattern == "std" && (strings.Contains(name, ".") || name == "cmd") { - // The name "std" is only the standard library. - // If the name is cmd, it's the root of the command tree. - return filepath.SkipDir - } - if !treeCanMatch(name) { - return filepath.SkipDir - } - if have[name] { - return nil - } - have[name] = true - if !match(name) { - return nil - } - _, err = buildContext.ImportDir(path, 0) - if err != nil { - if _, noGo := err.(*build.NoGoError); noGo { - return nil - } - } - pkgs = append(pkgs, name) - return nil - }) - } - return pkgs -} - -// allPackagesInFS is like allPackages but is passed a pattern -// beginning ./ or ../, meaning it should scan the tree rooted -// at the given directory. There are ... in the pattern too. -func allPackagesInFS(pattern string) []string { - pkgs := matchPackagesInFS(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -func matchPackagesInFS(pattern string) []string { - // Find directory to begin the scan. - // Could be smarter but this one optimization - // is enough for now, since ... is usually at the - // end of a path. - i := strings.Index(pattern, "...") - dir, _ := path.Split(pattern[:i]) - - // pattern begins with ./ or ../. - // path.Clean will discard the ./ but not the ../. - // We need to preserve the ./ for pattern matching - // and in the returned import paths. - prefix := "" - if strings.HasPrefix(pattern, "./") { - prefix = "./" - } - match := matchPattern(pattern) - - var pkgs []string - filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() { - return nil - } - if path == dir { - // filepath.Walk starts at dir and recurses. For the recursive case, - // the path is the result of filepath.Join, which calls filepath.Clean. - // The initial case is not Cleaned, though, so we do this explicitly. - // - // This converts a path like "./io/" to "io". Without this step, running - // "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io - // package, because prepending the prefix "./" to the unclean path would - // result in "././io", and match("././io") returns false. - path = filepath.Clean(path) - } - - // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". - _, elem := filepath.Split(path) - dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." - if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { - return filepath.SkipDir - } - - name := prefix + filepath.ToSlash(path) - if !match(name) { - return nil - } - if _, err = build.ImportDir(path, 0); err != nil { - if _, noGo := err.(*build.NoGoError); !noGo { - log.Print(err) - } - return nil - } - pkgs = append(pkgs, name) - return nil - }) - return pkgs -} diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go index 9e029e8cb44b..beab60c6121c 100644 --- a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -72,6 +72,7 @@ var FieldDescriptorProto_Type_name = map[int32]string{ 17: "TYPE_SINT32", 18: "TYPE_SINT64", } + var FieldDescriptorProto_Type_value = map[string]int32{ "TYPE_DOUBLE": 1, "TYPE_FLOAT": 2, @@ -98,9 +99,11 @@ func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { *p = x return p } + func (x FieldDescriptorProto_Type) String() string { return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) } + func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") if err != nil { @@ -109,8 +112,9 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { *x = FieldDescriptorProto_Type(value) return nil } + func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 0} + return fileDescriptor_e5baabe45344a177, []int{4, 0} } type FieldDescriptorProto_Label int32 @@ -127,6 +131,7 @@ var FieldDescriptorProto_Label_name = map[int32]string{ 2: "LABEL_REQUIRED", 3: "LABEL_REPEATED", } + var FieldDescriptorProto_Label_value = map[string]int32{ "LABEL_OPTIONAL": 1, "LABEL_REQUIRED": 2, @@ -138,9 +143,11 @@ func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { *p = x return p } + func (x FieldDescriptorProto_Label) String() string { return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) } + func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") if err != nil { @@ -149,8 +156,9 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { *x = FieldDescriptorProto_Label(value) return nil } + func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 1} + return fileDescriptor_e5baabe45344a177, []int{4, 1} } // Generated classes can be optimized for speed or code size. @@ -168,6 +176,7 @@ var FileOptions_OptimizeMode_name = map[int32]string{ 2: "CODE_SIZE", 3: "LITE_RUNTIME", } + var FileOptions_OptimizeMode_value = map[string]int32{ "SPEED": 1, "CODE_SIZE": 2, @@ -179,9 +188,11 @@ func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { *p = x return p } + func (x FileOptions_OptimizeMode) String() string { return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) } + func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") if err != nil { @@ -190,8 +201,9 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { *x = FileOptions_OptimizeMode(value) return nil } + func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10, 0} + return fileDescriptor_e5baabe45344a177, []int{10, 0} } type FieldOptions_CType int32 @@ -208,6 +220,7 @@ var FieldOptions_CType_name = map[int32]string{ 1: "CORD", 2: "STRING_PIECE", } + var FieldOptions_CType_value = map[string]int32{ "STRING": 0, "CORD": 1, @@ -219,9 +232,11 @@ func (x FieldOptions_CType) Enum() *FieldOptions_CType { *p = x return p } + func (x FieldOptions_CType) String() string { return proto.EnumName(FieldOptions_CType_name, int32(x)) } + func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") if err != nil { @@ -230,8 +245,9 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { *x = FieldOptions_CType(value) return nil } + func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 0} + return fileDescriptor_e5baabe45344a177, []int{12, 0} } type FieldOptions_JSType int32 @@ -250,6 +266,7 @@ var FieldOptions_JSType_name = map[int32]string{ 1: "JS_STRING", 2: "JS_NUMBER", } + var FieldOptions_JSType_value = map[string]int32{ "JS_NORMAL": 0, "JS_STRING": 1, @@ -261,9 +278,11 @@ func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { *p = x return p } + func (x FieldOptions_JSType) String() string { return proto.EnumName(FieldOptions_JSType_name, int32(x)) } + func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") if err != nil { @@ -272,8 +291,9 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { *x = FieldOptions_JSType(value) return nil } + func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 1} + return fileDescriptor_e5baabe45344a177, []int{12, 1} } // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, @@ -292,6 +312,7 @@ var MethodOptions_IdempotencyLevel_name = map[int32]string{ 1: "NO_SIDE_EFFECTS", 2: "IDEMPOTENT", } + var MethodOptions_IdempotencyLevel_value = map[string]int32{ "IDEMPOTENCY_UNKNOWN": 0, "NO_SIDE_EFFECTS": 1, @@ -303,9 +324,11 @@ func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { *p = x return p } + func (x MethodOptions_IdempotencyLevel) String() string { return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) } + func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") if err != nil { @@ -314,8 +337,9 @@ func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { *x = MethodOptions_IdempotencyLevel(value) return nil } + func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17, 0} + return fileDescriptor_e5baabe45344a177, []int{17, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -331,7 +355,7 @@ func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } func (*FileDescriptorSet) ProtoMessage() {} func (*FileDescriptorSet) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{0} + return fileDescriptor_e5baabe45344a177, []int{0} } func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) @@ -392,7 +416,7 @@ func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } func (*FileDescriptorProto) ProtoMessage() {} func (*FileDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{1} + return fileDescriptor_e5baabe45344a177, []int{1} } func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) @@ -519,7 +543,7 @@ func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } func (*DescriptorProto) ProtoMessage() {} func (*DescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2} + return fileDescriptor_e5baabe45344a177, []int{2} } func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) @@ -622,7 +646,7 @@ func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 0} + return fileDescriptor_e5baabe45344a177, []int{2, 0} } func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) @@ -678,7 +702,7 @@ func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_R func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 1} + return fileDescriptor_e5baabe45344a177, []int{2, 1} } func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) @@ -725,7 +749,7 @@ func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } func (*ExtensionRangeOptions) ProtoMessage() {} func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{3} + return fileDescriptor_e5baabe45344a177, []int{3} } var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ @@ -801,7 +825,7 @@ func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } func (*FieldDescriptorProto) ProtoMessage() {} func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4} + return fileDescriptor_e5baabe45344a177, []int{4} } func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) @@ -904,7 +928,7 @@ func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } func (*OneofDescriptorProto) ProtoMessage() {} func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{5} + return fileDescriptor_e5baabe45344a177, []int{5} } func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) @@ -959,7 +983,7 @@ func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumDescriptorProto) ProtoMessage() {} func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6} + return fileDescriptor_e5baabe45344a177, []int{6} } func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) @@ -1032,7 +1056,7 @@ func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescr func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6, 0} + return fileDescriptor_e5baabe45344a177, []int{6, 0} } func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) @@ -1080,7 +1104,7 @@ func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorPro func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumValueDescriptorProto) ProtoMessage() {} func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{7} + return fileDescriptor_e5baabe45344a177, []int{7} } func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) @@ -1135,7 +1159,7 @@ func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } func (*ServiceDescriptorProto) ProtoMessage() {} func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{8} + return fileDescriptor_e5baabe45344a177, []int{8} } func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) @@ -1197,7 +1221,7 @@ func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } func (*MethodDescriptorProto) ProtoMessage() {} func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{9} + return fileDescriptor_e5baabe45344a177, []int{9} } func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) @@ -1349,7 +1373,7 @@ func (m *FileOptions) Reset() { *m = FileOptions{} } func (m *FileOptions) String() string { return proto.CompactTextString(m) } func (*FileOptions) ProtoMessage() {} func (*FileOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10} + return fileDescriptor_e5baabe45344a177, []int{10} } var extRange_FileOptions = []proto.ExtensionRange{ @@ -1584,7 +1608,7 @@ func (m *MessageOptions) Reset() { *m = MessageOptions{} } func (m *MessageOptions) String() string { return proto.CompactTextString(m) } func (*MessageOptions) ProtoMessage() {} func (*MessageOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{11} + return fileDescriptor_e5baabe45344a177, []int{11} } var extRange_MessageOptions = []proto.ExtensionRange{ @@ -1723,7 +1747,7 @@ func (m *FieldOptions) Reset() { *m = FieldOptions{} } func (m *FieldOptions) String() string { return proto.CompactTextString(m) } func (*FieldOptions) ProtoMessage() {} func (*FieldOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12} + return fileDescriptor_e5baabe45344a177, []int{12} } var extRange_FieldOptions = []proto.ExtensionRange{ @@ -1819,7 +1843,7 @@ func (m *OneofOptions) Reset() { *m = OneofOptions{} } func (m *OneofOptions) String() string { return proto.CompactTextString(m) } func (*OneofOptions) ProtoMessage() {} func (*OneofOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{13} + return fileDescriptor_e5baabe45344a177, []int{13} } var extRange_OneofOptions = []proto.ExtensionRange{ @@ -1875,7 +1899,7 @@ func (m *EnumOptions) Reset() { *m = EnumOptions{} } func (m *EnumOptions) String() string { return proto.CompactTextString(m) } func (*EnumOptions) ProtoMessage() {} func (*EnumOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{14} + return fileDescriptor_e5baabe45344a177, []int{14} } var extRange_EnumOptions = []proto.ExtensionRange{ @@ -1944,7 +1968,7 @@ func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } func (*EnumValueOptions) ProtoMessage() {} func (*EnumValueOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{15} + return fileDescriptor_e5baabe45344a177, []int{15} } var extRange_EnumValueOptions = []proto.ExtensionRange{ @@ -2006,7 +2030,7 @@ func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } func (*ServiceOptions) ProtoMessage() {} func (*ServiceOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{16} + return fileDescriptor_e5baabe45344a177, []int{16} } var extRange_ServiceOptions = []proto.ExtensionRange{ @@ -2069,7 +2093,7 @@ func (m *MethodOptions) Reset() { *m = MethodOptions{} } func (m *MethodOptions) String() string { return proto.CompactTextString(m) } func (*MethodOptions) ProtoMessage() {} func (*MethodOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17} + return fileDescriptor_e5baabe45344a177, []int{17} } var extRange_MethodOptions = []proto.ExtensionRange{ @@ -2146,7 +2170,7 @@ func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption) ProtoMessage() {} func (*UninterpretedOption) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18} + return fileDescriptor_e5baabe45344a177, []int{18} } func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) @@ -2232,7 +2256,7 @@ func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOptio func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18, 0} + return fileDescriptor_e5baabe45344a177, []int{18, 0} } func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) @@ -2322,7 +2346,7 @@ func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo) ProtoMessage() {} func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19} + return fileDescriptor_e5baabe45344a177, []int{19} } func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) @@ -2439,7 +2463,7 @@ func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo_Location) ProtoMessage() {} func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19, 0} + return fileDescriptor_e5baabe45344a177, []int{19, 0} } func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) @@ -2510,7 +2534,7 @@ func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo) ProtoMessage() {} func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20} + return fileDescriptor_e5baabe45344a177, []int{20} } func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) @@ -2559,7 +2583,7 @@ func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_ func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20, 0} + return fileDescriptor_e5baabe45344a177, []int{20, 0} } func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) @@ -2643,11 +2667,9 @@ func init() { proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) } -func init() { - proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_descriptor_4df4cb5f42392df6) -} +func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) } -var fileDescriptor_descriptor_4df4cb5f42392df6 = []byte{ +var fileDescriptor_e5baabe45344a177 = []byte{ // 2555 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, 0xf5, 0xcf, 0xf2, 0x4b, 0xe4, 0x21, 0x45, 0x8d, 0x46, 0x8a, 0xbd, 0x56, 0x3e, 0x2c, 0x33, 0x1f, diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go deleted file mode 100644 index 0d6055d610e3..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go +++ /dev/null @@ -1,51 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* - A plugin for the Google protocol buffer compiler to generate Go code. - Run it by building this program and putting it in your path with the name - protoc-gen-go - That word 'go' at the end becomes part of the option string set for the - protocol compiler, so once the protocol compiler (protoc) is installed - you can run - protoc --go_out=output_directory input_directory/file.proto - to generate Go bindings for the protocol defined by file.proto. - With that input, the output will be written to - output_directory/file.pb.go - - The generated code is documented in the package comment for - the library. - - See the README and documentation for protocol buffers to learn more: - https://developers.google.com/protocol-buffers/ - -*/ -package documentation diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go deleted file mode 100644 index c13a9f1e6379..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go +++ /dev/null @@ -1,3086 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* - The code generator for the plugin for the Google protocol buffer compiler. - It generates Go code from the protocol buffer description files read by the - main routine. -*/ -package generator - -import ( - "bufio" - "bytes" - "compress/gzip" - "crypto/sha256" - "encoding/hex" - "fmt" - "go/build" - "go/parser" - "go/printer" - "go/token" - "log" - "os" - "path" - "sort" - "strconv" - "strings" - "unicode" - "unicode/utf8" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap" - - "github.com/golang/protobuf/protoc-gen-go/descriptor" - plugin "github.com/golang/protobuf/protoc-gen-go/plugin" -) - -// generatedCodeVersion indicates a version of the generated code. -// It is incremented whenever an incompatibility between the generated code and -// proto package is introduced; the generated code references -// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion). -const generatedCodeVersion = 2 - -// A Plugin provides functionality to add to the output during Go code generation, -// such as to produce RPC stubs. -type Plugin interface { - // Name identifies the plugin. - Name() string - // Init is called once after data structures are built but before - // code generation begins. - Init(g *Generator) - // Generate produces the code generated by the plugin for this file, - // except for the imports, by calling the generator's methods P, In, and Out. - Generate(file *FileDescriptor) - // GenerateImports produces the import declarations for this file. - // It is called after Generate. - GenerateImports(file *FileDescriptor) -} - -var plugins []Plugin - -// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated. -// It is typically called during initialization. -func RegisterPlugin(p Plugin) { - plugins = append(plugins, p) -} - -// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf". -type GoImportPath string - -func (p GoImportPath) String() string { return strconv.Quote(string(p)) } - -// A GoPackageName is the name of a Go package. e.g., "protobuf". -type GoPackageName string - -// Each type we import as a protocol buffer (other than FileDescriptorProto) needs -// a pointer to the FileDescriptorProto that represents it. These types achieve that -// wrapping by placing each Proto inside a struct with the pointer to its File. The -// structs have the same names as their contents, with "Proto" removed. -// FileDescriptor is used to store the things that it points to. - -// The file and package name method are common to messages and enums. -type common struct { - file *FileDescriptor // File this object comes from. -} - -// GoImportPath is the import path of the Go package containing the type. -func (c *common) GoImportPath() GoImportPath { - return c.file.importPath -} - -func (c *common) File() *FileDescriptor { return c.file } - -func fileIsProto3(file *descriptor.FileDescriptorProto) bool { - return file.GetSyntax() == "proto3" -} - -func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) } - -// Descriptor represents a protocol buffer message. -type Descriptor struct { - common - *descriptor.DescriptorProto - parent *Descriptor // The containing message, if any. - nested []*Descriptor // Inner messages, if any. - enums []*EnumDescriptor // Inner enums, if any. - ext []*ExtensionDescriptor // Extensions, if any. - typename []string // Cached typename vector. - index int // The index into the container, whether the file or another message. - path string // The SourceCodeInfo path as comma-separated integers. - group bool -} - -// TypeName returns the elements of the dotted type name. -// The package name is not part of this name. -func (d *Descriptor) TypeName() []string { - if d.typename != nil { - return d.typename - } - n := 0 - for parent := d; parent != nil; parent = parent.parent { - n++ - } - s := make([]string, n) - for parent := d; parent != nil; parent = parent.parent { - n-- - s[n] = parent.GetName() - } - d.typename = s - return s -} - -// EnumDescriptor describes an enum. If it's at top level, its parent will be nil. -// Otherwise it will be the descriptor of the message in which it is defined. -type EnumDescriptor struct { - common - *descriptor.EnumDescriptorProto - parent *Descriptor // The containing message, if any. - typename []string // Cached typename vector. - index int // The index into the container, whether the file or a message. - path string // The SourceCodeInfo path as comma-separated integers. -} - -// TypeName returns the elements of the dotted type name. -// The package name is not part of this name. -func (e *EnumDescriptor) TypeName() (s []string) { - if e.typename != nil { - return e.typename - } - name := e.GetName() - if e.parent == nil { - s = make([]string, 1) - } else { - pname := e.parent.TypeName() - s = make([]string, len(pname)+1) - copy(s, pname) - } - s[len(s)-1] = name - e.typename = s - return s -} - -// Everything but the last element of the full type name, CamelCased. -// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... . -func (e *EnumDescriptor) prefix() string { - if e.parent == nil { - // If the enum is not part of a message, the prefix is just the type name. - return CamelCase(*e.Name) + "_" - } - typeName := e.TypeName() - return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_" -} - -// The integer value of the named constant in this enumerated type. -func (e *EnumDescriptor) integerValueAsString(name string) string { - for _, c := range e.Value { - if c.GetName() == name { - return fmt.Sprint(c.GetNumber()) - } - } - log.Fatal("cannot find value for enum constant") - return "" -} - -// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil. -// Otherwise it will be the descriptor of the message in which it is defined. -type ExtensionDescriptor struct { - common - *descriptor.FieldDescriptorProto - parent *Descriptor // The containing message, if any. -} - -// TypeName returns the elements of the dotted type name. -// The package name is not part of this name. -func (e *ExtensionDescriptor) TypeName() (s []string) { - name := e.GetName() - if e.parent == nil { - // top-level extension - s = make([]string, 1) - } else { - pname := e.parent.TypeName() - s = make([]string, len(pname)+1) - copy(s, pname) - } - s[len(s)-1] = name - return s -} - -// DescName returns the variable name used for the generated descriptor. -func (e *ExtensionDescriptor) DescName() string { - // The full type name. - typeName := e.TypeName() - // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix. - for i, s := range typeName { - typeName[i] = CamelCase(s) - } - return "E_" + strings.Join(typeName, "_") -} - -// ImportedDescriptor describes a type that has been publicly imported from another file. -type ImportedDescriptor struct { - common - o Object -} - -func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() } - -// FileDescriptor describes an protocol buffer descriptor file (.proto). -// It includes slices of all the messages and enums defined within it. -// Those slices are constructed by WrapTypes. -type FileDescriptor struct { - *descriptor.FileDescriptorProto - desc []*Descriptor // All the messages defined in this file. - enum []*EnumDescriptor // All the enums defined in this file. - ext []*ExtensionDescriptor // All the top-level extensions defined in this file. - imp []*ImportedDescriptor // All types defined in files publicly imported by this file. - - // Comments, stored as a map of path (comma-separated integers) to the comment. - comments map[string]*descriptor.SourceCodeInfo_Location - - // The full list of symbols that are exported, - // as a map from the exported object to its symbols. - // This is used for supporting public imports. - exported map[Object][]symbol - - fingerprint string // Fingerprint of this file's contents. - importPath GoImportPath // Import path of this file's package. - packageName GoPackageName // Name of this file's Go package. - - proto3 bool // whether to generate proto3 code for this file -} - -// VarName is the variable name we'll use in the generated code to refer -// to the compressed bytes of this descriptor. It is not exported, so -// it is only valid inside the generated package. -func (d *FileDescriptor) VarName() string { - name := strings.Map(badToUnderscore, baseName(d.GetName())) - return fmt.Sprintf("fileDescriptor_%s_%s", name, d.fingerprint) -} - -// goPackageOption interprets the file's go_package option. -// If there is no go_package, it returns ("", "", false). -// If there's a simple name, it returns ("", pkg, true). -// If the option implies an import path, it returns (impPath, pkg, true). -func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) { - opt := d.GetOptions().GetGoPackage() - if opt == "" { - return "", "", false - } - // A semicolon-delimited suffix delimits the import path and package name. - sc := strings.Index(opt, ";") - if sc >= 0 { - return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true - } - // The presence of a slash implies there's an import path. - slash := strings.LastIndex(opt, "/") - if slash >= 0 { - return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true - } - return "", cleanPackageName(opt), true -} - -// goFileName returns the output name for the generated Go file. -func (d *FileDescriptor) goFileName(pathType pathType) string { - name := *d.Name - if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { - name = name[:len(name)-len(ext)] - } - name += ".pb.go" - - if pathType == pathTypeSourceRelative { - return name - } - - // Does the file have a "go_package" option? - // If it does, it may override the filename. - if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { - // Replace the existing dirname with the declared import path. - _, name = path.Split(name) - name = path.Join(string(impPath), name) - return name - } - - return name -} - -func (d *FileDescriptor) addExport(obj Object, sym symbol) { - d.exported[obj] = append(d.exported[obj], sym) -} - -// symbol is an interface representing an exported Go symbol. -type symbol interface { - // GenerateAlias should generate an appropriate alias - // for the symbol from the named package. - GenerateAlias(g *Generator, pkg GoPackageName) -} - -type messageSymbol struct { - sym string - hasExtensions, isMessageSet bool - oneofTypes []string -} - -type getterSymbol struct { - name string - typ string - typeName string // canonical name in proto world; empty for proto.Message and similar - genType bool // whether typ contains a generated type (message/group/enum) -} - -func (ms *messageSymbol) GenerateAlias(g *Generator, pkg GoPackageName) { - g.P("type ", ms.sym, " = ", pkg, ".", ms.sym) - for _, name := range ms.oneofTypes { - g.P("type ", name, " = ", pkg, ".", name) - } -} - -type enumSymbol struct { - name string - proto3 bool // Whether this came from a proto3 file. -} - -func (es enumSymbol) GenerateAlias(g *Generator, pkg GoPackageName) { - s := es.name - g.P("type ", s, " = ", pkg, ".", s) - g.P("var ", s, "_name = ", pkg, ".", s, "_name") - g.P("var ", s, "_value = ", pkg, ".", s, "_value") -} - -type constOrVarSymbol struct { - sym string - typ string // either "const" or "var" - cast string // if non-empty, a type cast is required (used for enums) -} - -func (cs constOrVarSymbol) GenerateAlias(g *Generator, pkg GoPackageName) { - v := string(pkg) + "." + cs.sym - if cs.cast != "" { - v = cs.cast + "(" + v + ")" - } - g.P(cs.typ, " ", cs.sym, " = ", v) -} - -// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. -type Object interface { - GoImportPath() GoImportPath - TypeName() []string - File() *FileDescriptor -} - -// Generator is the type whose methods generate the output, stored in the associated response structure. -type Generator struct { - *bytes.Buffer - - Request *plugin.CodeGeneratorRequest // The input. - Response *plugin.CodeGeneratorResponse // The output. - - Param map[string]string // Command-line parameters. - PackageImportPath string // Go import path of the package we're generating code for - ImportPrefix string // String to prefix to imported package file names. - ImportMap map[string]string // Mapping from .proto file name to import path - - Pkg map[string]string // The names under which we import support packages - - outputImportPath GoImportPath // Package we're generating code for. - allFiles []*FileDescriptor // All files in the tree - allFilesByName map[string]*FileDescriptor // All files by filename. - genFiles []*FileDescriptor // Those files we will generate output for. - file *FileDescriptor // The file we are compiling now. - packageNames map[GoImportPath]GoPackageName // Imported package names in the current file. - usedPackages map[GoImportPath]bool // Packages used in current file. - usedPackageNames map[GoPackageName]bool // Package names used in the current file. - typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. - init []string // Lines to emit in the init function. - indent string - pathType pathType // How to generate output filenames. - writeOutput bool - annotateCode bool // whether to store annotations - annotations []*descriptor.GeneratedCodeInfo_Annotation // annotations to store -} - -type pathType int - -const ( - pathTypeImport pathType = iota - pathTypeSourceRelative -) - -// New creates a new generator and allocates the request and response protobufs. -func New() *Generator { - g := new(Generator) - g.Buffer = new(bytes.Buffer) - g.Request = new(plugin.CodeGeneratorRequest) - g.Response = new(plugin.CodeGeneratorResponse) - return g -} - -// Error reports a problem, including an error, and exits the program. -func (g *Generator) Error(err error, msgs ...string) { - s := strings.Join(msgs, " ") + ":" + err.Error() - log.Print("protoc-gen-go: error:", s) - os.Exit(1) -} - -// Fail reports a problem and exits the program. -func (g *Generator) Fail(msgs ...string) { - s := strings.Join(msgs, " ") - log.Print("protoc-gen-go: error:", s) - os.Exit(1) -} - -// CommandLineParameters breaks the comma-separated list of key=value pairs -// in the parameter (a member of the request protobuf) into a key/value map. -// It then sets file name mappings defined by those entries. -func (g *Generator) CommandLineParameters(parameter string) { - g.Param = make(map[string]string) - for _, p := range strings.Split(parameter, ",") { - if i := strings.Index(p, "="); i < 0 { - g.Param[p] = "" - } else { - g.Param[p[0:i]] = p[i+1:] - } - } - - g.ImportMap = make(map[string]string) - pluginList := "none" // Default list of plugin names to enable (empty means all). - for k, v := range g.Param { - switch k { - case "import_prefix": - g.ImportPrefix = v - case "import_path": - g.PackageImportPath = v - case "paths": - switch v { - case "import": - g.pathType = pathTypeImport - case "source_relative": - g.pathType = pathTypeSourceRelative - default: - g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v)) - } - case "plugins": - pluginList = v - case "annotate_code": - if v == "true" { - g.annotateCode = true - } - default: - if len(k) > 0 && k[0] == 'M' { - g.ImportMap[k[1:]] = v - } - } - } - if pluginList != "" { - // Amend the set of plugins. - enabled := make(map[string]bool) - for _, name := range strings.Split(pluginList, "+") { - enabled[name] = true - } - var nplugins []Plugin - for _, p := range plugins { - if enabled[p.Name()] { - nplugins = append(nplugins, p) - } - } - plugins = nplugins - } -} - -// DefaultPackageName returns the package name printed for the object. -// If its file is in a different package, it returns the package name we're using for this file, plus ".". -// Otherwise it returns the empty string. -func (g *Generator) DefaultPackageName(obj Object) string { - importPath := obj.GoImportPath() - if importPath == g.outputImportPath { - return "" - } - return string(g.GoPackageName(importPath)) + "." -} - -// GoPackageName returns the name used for a package. -func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName { - if name, ok := g.packageNames[importPath]; ok { - return name - } - name := cleanPackageName(baseName(string(importPath))) - for i, orig := 1, name; g.usedPackageNames[name]; i++ { - name = orig + GoPackageName(strconv.Itoa(i)) - } - g.packageNames[importPath] = name - g.usedPackageNames[name] = true - return name -} - -var globalPackageNames = map[GoPackageName]bool{ - "fmt": true, - "math": true, - "proto": true, -} - -// Create and remember a guaranteed unique package name. Pkg is the candidate name. -// The FileDescriptor parameter is unused. -func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { - name := cleanPackageName(pkg) - for i, orig := 1, name; globalPackageNames[name]; i++ { - name = orig + GoPackageName(strconv.Itoa(i)) - } - globalPackageNames[name] = true - return string(name) -} - -var isGoKeyword = map[string]bool{ - "break": true, - "case": true, - "chan": true, - "const": true, - "continue": true, - "default": true, - "else": true, - "defer": true, - "fallthrough": true, - "for": true, - "func": true, - "go": true, - "goto": true, - "if": true, - "import": true, - "interface": true, - "map": true, - "package": true, - "range": true, - "return": true, - "select": true, - "struct": true, - "switch": true, - "type": true, - "var": true, -} - -func cleanPackageName(name string) GoPackageName { - name = strings.Map(badToUnderscore, name) - // Identifier must not be keyword: insert _. - if isGoKeyword[name] { - name = "_" + name - } - // Identifier must not begin with digit: insert _. - if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) { - name = "_" + name - } - return GoPackageName(name) -} - -// defaultGoPackage returns the package name to use, -// derived from the import path of the package we're building code for. -func (g *Generator) defaultGoPackage() GoPackageName { - p := g.PackageImportPath - if i := strings.LastIndex(p, "/"); i >= 0 { - p = p[i+1:] - } - return cleanPackageName(p) -} - -// SetPackageNames sets the package name for this run. -// The package name must agree across all files being generated. -// It also defines unique package names for all imported files. -func (g *Generator) SetPackageNames() { - g.outputImportPath = g.genFiles[0].importPath - - defaultPackageNames := make(map[GoImportPath]GoPackageName) - for _, f := range g.genFiles { - if _, p, ok := f.goPackageOption(); ok { - defaultPackageNames[f.importPath] = p - } - } - for _, f := range g.genFiles { - if _, p, ok := f.goPackageOption(); ok { - // Source file: option go_package = "quux/bar"; - f.packageName = p - } else if p, ok := defaultPackageNames[f.importPath]; ok { - // A go_package option in another file in the same package. - // - // This is a poor choice in general, since every source file should - // contain a go_package option. Supported mainly for historical - // compatibility. - f.packageName = p - } else if p := g.defaultGoPackage(); p != "" { - // Command-line: import_path=quux/bar. - // - // The import_path flag sets a package name for files which don't - // contain a go_package option. - f.packageName = p - } else if p := f.GetPackage(); p != "" { - // Source file: package quux.bar; - f.packageName = cleanPackageName(p) - } else { - // Source filename. - f.packageName = cleanPackageName(baseName(f.GetName())) - } - } - - // Check that all files have a consistent package name and import path. - for _, f := range g.genFiles[1:] { - if a, b := g.genFiles[0].importPath, f.importPath; a != b { - g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b)) - } - if a, b := g.genFiles[0].packageName, f.packageName; a != b { - g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b)) - } - } - - // Names of support packages. These never vary (if there are conflicts, - // we rename the conflicting package), so this could be removed someday. - g.Pkg = map[string]string{ - "fmt": "fmt", - "math": "math", - "proto": "proto", - } -} - -// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos -// and FileDescriptorProtos into file-referenced objects within the Generator. -// It also creates the list of files to generate and so should be called before GenerateAllFiles. -func (g *Generator) WrapTypes() { - g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) - g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) - genFileNames := make(map[string]bool) - for _, n := range g.Request.FileToGenerate { - genFileNames[n] = true - } - for _, f := range g.Request.ProtoFile { - fd := &FileDescriptor{ - FileDescriptorProto: f, - exported: make(map[Object][]symbol), - proto3: fileIsProto3(f), - } - // The import path may be set in a number of ways. - if substitution, ok := g.ImportMap[f.GetName()]; ok { - // Command-line: M=foo.proto=quux/bar. - // - // Explicit mapping of source file to import path. - fd.importPath = GoImportPath(substitution) - } else if genFileNames[f.GetName()] && g.PackageImportPath != "" { - // Command-line: import_path=quux/bar. - // - // The import_path flag sets the import path for every file that - // we generate code for. - fd.importPath = GoImportPath(g.PackageImportPath) - } else if p, _, _ := fd.goPackageOption(); p != "" { - // Source file: option go_package = "quux/bar"; - // - // The go_package option sets the import path. Most users should use this. - fd.importPath = p - } else { - // Source filename. - // - // Last resort when nothing else is available. - fd.importPath = GoImportPath(path.Dir(f.GetName())) - } - // We must wrap the descriptors before we wrap the enums - fd.desc = wrapDescriptors(fd) - g.buildNestedDescriptors(fd.desc) - fd.enum = wrapEnumDescriptors(fd, fd.desc) - g.buildNestedEnums(fd.desc, fd.enum) - fd.ext = wrapExtensions(fd) - extractComments(fd) - g.allFiles = append(g.allFiles, fd) - g.allFilesByName[f.GetName()] = fd - } - for _, fd := range g.allFiles { - fd.imp = wrapImported(fd, g) - } - - g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) - for _, fileName := range g.Request.FileToGenerate { - fd := g.allFilesByName[fileName] - if fd == nil { - g.Fail("could not find file named", fileName) - } - fingerprint, err := fingerprintProto(fd.FileDescriptorProto) - if err != nil { - g.Error(err) - } - fd.fingerprint = fingerprint - g.genFiles = append(g.genFiles, fd) - } -} - -// fingerprintProto returns a fingerprint for a message. -// The fingerprint is intended to prevent conflicts between generated fileds, -// not to provide cryptographic security. -func fingerprintProto(m proto.Message) (string, error) { - b, err := proto.Marshal(m) - if err != nil { - return "", err - } - h := sha256.Sum256(b) - return hex.EncodeToString(h[:8]), nil -} - -// Scan the descriptors in this file. For each one, build the slice of nested descriptors -func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { - for _, desc := range descs { - if len(desc.NestedType) != 0 { - for _, nest := range descs { - if nest.parent == desc { - desc.nested = append(desc.nested, nest) - } - } - if len(desc.nested) != len(desc.NestedType) { - g.Fail("internal error: nesting failure for", desc.GetName()) - } - } - } -} - -func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) { - for _, desc := range descs { - if len(desc.EnumType) != 0 { - for _, enum := range enums { - if enum.parent == desc { - desc.enums = append(desc.enums, enum) - } - } - if len(desc.enums) != len(desc.EnumType) { - g.Fail("internal error: enum nesting failure for", desc.GetName()) - } - } - } -} - -// Construct the Descriptor -func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor { - d := &Descriptor{ - common: common{file}, - DescriptorProto: desc, - parent: parent, - index: index, - } - if parent == nil { - d.path = fmt.Sprintf("%d,%d", messagePath, index) - } else { - d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index) - } - - // The only way to distinguish a group from a message is whether - // the containing message has a TYPE_GROUP field that matches. - if parent != nil { - parts := d.TypeName() - if file.Package != nil { - parts = append([]string{*file.Package}, parts...) - } - exp := "." + strings.Join(parts, ".") - for _, field := range parent.Field { - if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp { - d.group = true - break - } - } - } - - for _, field := range desc.Extension { - d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d}) - } - - return d -} - -// Return a slice of all the Descriptors defined within this file -func wrapDescriptors(file *FileDescriptor) []*Descriptor { - sl := make([]*Descriptor, 0, len(file.MessageType)+10) - for i, desc := range file.MessageType { - sl = wrapThisDescriptor(sl, desc, nil, file, i) - } - return sl -} - -// Wrap this Descriptor, recursively -func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor { - sl = append(sl, newDescriptor(desc, parent, file, index)) - me := sl[len(sl)-1] - for i, nested := range desc.NestedType { - sl = wrapThisDescriptor(sl, nested, me, file, i) - } - return sl -} - -// Construct the EnumDescriptor -func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor { - ed := &EnumDescriptor{ - common: common{file}, - EnumDescriptorProto: desc, - parent: parent, - index: index, - } - if parent == nil { - ed.path = fmt.Sprintf("%d,%d", enumPath, index) - } else { - ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index) - } - return ed -} - -// Return a slice of all the EnumDescriptors defined within this file -func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor { - sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) - // Top-level enums. - for i, enum := range file.EnumType { - sl = append(sl, newEnumDescriptor(enum, nil, file, i)) - } - // Enums within messages. Enums within embedded messages appear in the outer-most message. - for _, nested := range descs { - for i, enum := range nested.EnumType { - sl = append(sl, newEnumDescriptor(enum, nested, file, i)) - } - } - return sl -} - -// Return a slice of all the top-level ExtensionDescriptors defined within this file. -func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor { - var sl []*ExtensionDescriptor - for _, field := range file.Extension { - sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) - } - return sl -} - -// Return a slice of all the types that are publicly imported into this file. -func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) { - for _, index := range file.PublicDependency { - df := g.fileByName(file.Dependency[index]) - for _, d := range df.desc { - if d.GetOptions().GetMapEntry() { - continue - } - sl = append(sl, &ImportedDescriptor{common{file}, d}) - } - for _, e := range df.enum { - sl = append(sl, &ImportedDescriptor{common{file}, e}) - } - for _, ext := range df.ext { - sl = append(sl, &ImportedDescriptor{common{file}, ext}) - } - } - return -} - -func extractComments(file *FileDescriptor) { - file.comments = make(map[string]*descriptor.SourceCodeInfo_Location) - for _, loc := range file.GetSourceCodeInfo().GetLocation() { - if loc.LeadingComments == nil { - continue - } - var p []string - for _, n := range loc.Path { - p = append(p, strconv.Itoa(int(n))) - } - file.comments[strings.Join(p, ",")] = loc - } -} - -// BuildTypeNameMap builds the map from fully qualified type names to objects. -// The key names for the map come from the input data, which puts a period at the beginning. -// It should be called after SetPackageNames and before GenerateAllFiles. -func (g *Generator) BuildTypeNameMap() { - g.typeNameToObject = make(map[string]Object) - for _, f := range g.allFiles { - // The names in this loop are defined by the proto world, not us, so the - // package name may be empty. If so, the dotted package name of X will - // be ".X"; otherwise it will be ".pkg.X". - dottedPkg := "." + f.GetPackage() - if dottedPkg != "." { - dottedPkg += "." - } - for _, enum := range f.enum { - name := dottedPkg + dottedSlice(enum.TypeName()) - g.typeNameToObject[name] = enum - } - for _, desc := range f.desc { - name := dottedPkg + dottedSlice(desc.TypeName()) - g.typeNameToObject[name] = desc - } - } -} - -// ObjectNamed, given a fully-qualified input type name as it appears in the input data, -// returns the descriptor for the message or enum with that name. -func (g *Generator) ObjectNamed(typeName string) Object { - o, ok := g.typeNameToObject[typeName] - if !ok { - g.Fail("can't find object with type", typeName) - } - - // If the file of this object isn't a direct dependency of the current file, - // or in the current file, then this object has been publicly imported into - // a dependency of the current file. - // We should return the ImportedDescriptor object for it instead. - direct := *o.File().Name == *g.file.Name - if !direct { - for _, dep := range g.file.Dependency { - if *g.fileByName(dep).Name == *o.File().Name { - direct = true - break - } - } - } - if !direct { - found := false - Loop: - for _, dep := range g.file.Dependency { - df := g.fileByName(*g.fileByName(dep).Name) - for _, td := range df.imp { - if td.o == o { - // Found it! - o = td - found = true - break Loop - } - } - } - if !found { - log.Printf("protoc-gen-go: WARNING: failed finding publicly imported dependency for %v, used in %v", typeName, *g.file.Name) - } - } - - return o -} - -// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated. -type AnnotatedAtoms struct { - source string - path string - atoms []interface{} -} - -// Annotate records the file name and proto AST path of a list of atoms -// so that a later call to P can emit a link from each atom to its origin. -func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms { - return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms} -} - -// printAtom prints the (atomic, non-annotation) argument to the generated output. -func (g *Generator) printAtom(v interface{}) { - switch v := v.(type) { - case string: - g.WriteString(v) - case *string: - g.WriteString(*v) - case bool: - fmt.Fprint(g, v) - case *bool: - fmt.Fprint(g, *v) - case int: - fmt.Fprint(g, v) - case *int32: - fmt.Fprint(g, *v) - case *int64: - fmt.Fprint(g, *v) - case float64: - fmt.Fprint(g, v) - case *float64: - fmt.Fprint(g, *v) - case GoPackageName: - g.WriteString(string(v)) - case GoImportPath: - g.WriteString(strconv.Quote(string(v))) - default: - g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) - } -} - -// P prints the arguments to the generated output. It handles strings and int32s, plus -// handling indirections because they may be *string, etc. Any inputs of type AnnotatedAtoms may emit -// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode -// is true). -func (g *Generator) P(str ...interface{}) { - if !g.writeOutput { - return - } - g.WriteString(g.indent) - for _, v := range str { - switch v := v.(type) { - case *AnnotatedAtoms: - begin := int32(g.Len()) - for _, v := range v.atoms { - g.printAtom(v) - } - if g.annotateCode { - end := int32(g.Len()) - var path []int32 - for _, token := range strings.Split(v.path, ",") { - val, err := strconv.ParseInt(token, 10, 32) - if err != nil { - g.Fail("could not parse proto AST path: ", err.Error()) - } - path = append(path, int32(val)) - } - g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{ - Path: path, - SourceFile: &v.source, - Begin: &begin, - End: &end, - }) - } - default: - g.printAtom(v) - } - } - g.WriteByte('\n') -} - -// addInitf stores the given statement to be printed inside the file's init function. -// The statement is given as a format specifier and arguments. -func (g *Generator) addInitf(stmt string, a ...interface{}) { - g.init = append(g.init, fmt.Sprintf(stmt, a...)) -} - -// In Indents the output one tab stop. -func (g *Generator) In() { g.indent += "\t" } - -// Out unindents the output one tab stop. -func (g *Generator) Out() { - if len(g.indent) > 0 { - g.indent = g.indent[1:] - } -} - -// GenerateAllFiles generates the output for all the files we're outputting. -func (g *Generator) GenerateAllFiles() { - // Initialize the plugins - for _, p := range plugins { - p.Init(g) - } - // Generate the output. The generator runs for every file, even the files - // that we don't generate output for, so that we can collate the full list - // of exported symbols to support public imports. - genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) - for _, file := range g.genFiles { - genFileMap[file] = true - } - for _, file := range g.allFiles { - g.Reset() - g.annotations = nil - g.writeOutput = genFileMap[file] - g.generate(file) - if !g.writeOutput { - continue - } - fname := file.goFileName(g.pathType) - g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ - Name: proto.String(fname), - Content: proto.String(g.String()), - }) - if g.annotateCode { - // Store the generated code annotations in text, as the protoc plugin protocol requires that - // strings contain valid UTF-8. - g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ - Name: proto.String(file.goFileName(g.pathType) + ".meta"), - Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})), - }) - } - } -} - -// Run all the plugins associated with the file. -func (g *Generator) runPlugins(file *FileDescriptor) { - for _, p := range plugins { - p.Generate(file) - } -} - -// Fill the response protocol buffer with the generated output for all the files we're -// supposed to generate. -func (g *Generator) generate(file *FileDescriptor) { - g.file = file - g.usedPackages = make(map[GoImportPath]bool) - g.packageNames = make(map[GoImportPath]GoPackageName) - g.usedPackageNames = make(map[GoPackageName]bool) - for name := range globalPackageNames { - g.usedPackageNames[name] = true - } - - g.P("// This is a compile-time assertion to ensure that this generated file") - g.P("// is compatible with the proto package it is being compiled against.") - g.P("// A compilation error at this line likely means your copy of the") - g.P("// proto package needs to be updated.") - g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") - g.P() - - for _, td := range g.file.imp { - g.generateImported(td) - } - for _, enum := range g.file.enum { - g.generateEnum(enum) - } - for _, desc := range g.file.desc { - // Don't generate virtual messages for maps. - if desc.GetOptions().GetMapEntry() { - continue - } - g.generateMessage(desc) - } - for _, ext := range g.file.ext { - g.generateExtension(ext) - } - g.generateInitFunction() - - // Run the plugins before the imports so we know which imports are necessary. - g.runPlugins(file) - - g.generateFileDescriptor(file) - - // Generate header and imports last, though they appear first in the output. - rem := g.Buffer - remAnno := g.annotations - g.Buffer = new(bytes.Buffer) - g.annotations = nil - g.generateHeader() - g.generateImports() - if !g.writeOutput { - return - } - // Adjust the offsets for annotations displaced by the header and imports. - for _, anno := range remAnno { - *anno.Begin += int32(g.Len()) - *anno.End += int32(g.Len()) - g.annotations = append(g.annotations, anno) - } - g.Write(rem.Bytes()) - - // Reformat generated code and patch annotation locations. - fset := token.NewFileSet() - original := g.Bytes() - if g.annotateCode { - // make a copy independent of g; we'll need it after Reset. - original = append([]byte(nil), original...) - } - ast, err := parser.ParseFile(fset, "", original, parser.ParseComments) - if err != nil { - // Print out the bad code with line numbers. - // This should never happen in practice, but it can while changing generated code, - // so consider this a debugging aid. - var src bytes.Buffer - s := bufio.NewScanner(bytes.NewReader(original)) - for line := 1; s.Scan(); line++ { - fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) - } - g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String()) - } - g.Reset() - err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, ast) - if err != nil { - g.Fail("generated Go source code could not be reformatted:", err.Error()) - } - if g.annotateCode { - m, err := remap.Compute(original, g.Bytes()) - if err != nil { - g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error()) - } - for _, anno := range g.annotations { - new, ok := m.Find(int(*anno.Begin), int(*anno.End)) - if !ok { - g.Fail("span in formatted generated Go source code could not be mapped back to the original code") - } - *anno.Begin = int32(new.Pos) - *anno.End = int32(new.End) - } - } -} - -// Generate the header, including package definition -func (g *Generator) generateHeader() { - g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") - if g.file.GetOptions().GetDeprecated() { - g.P("// ", g.file.Name, " is a deprecated file.") - } else { - g.P("// source: ", g.file.Name) - } - g.P() - - importPath, _, _ := g.file.goPackageOption() - if importPath == "" { - g.P("package ", g.file.packageName) - } else { - g.P("package ", g.file.packageName, " // import ", GoImportPath(g.ImportPrefix)+importPath) - } - g.P() - - if loc, ok := g.file.comments[strconv.Itoa(packagePath)]; ok { - g.P("/*") - // not using g.PrintComments because this is a /* */ comment block. - text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") - for _, line := range strings.Split(text, "\n") { - line = strings.TrimPrefix(line, " ") - // ensure we don't escape from the block comment - line = strings.Replace(line, "*/", "* /", -1) - g.P(line) - } - g.P("*/") - g.P() - } -} - -// deprecationComment is the standard comment added to deprecated -// messages, fields, enums, and enum values. -var deprecationComment = "// Deprecated: Do not use." - -// PrintComments prints any comments from the source .proto file. -// The path is a comma-separated list of integers. -// It returns an indication of whether any comments were printed. -// See descriptor.proto for its format. -func (g *Generator) PrintComments(path string) bool { - if !g.writeOutput { - return false - } - if c, ok := g.makeComments(path); ok { - g.P(c) - return true - } - return false -} - -// makeComments generates the comment string for the field, no "\n" at the end -func (g *Generator) makeComments(path string) (string, bool) { - loc, ok := g.file.comments[path] - if !ok { - return "", false - } - w := new(bytes.Buffer) - nl := "" - for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") { - fmt.Fprintf(w, "%s// %s", nl, strings.TrimPrefix(line, " ")) - nl = "\n" - } - return w.String(), true -} - -func (g *Generator) fileByName(filename string) *FileDescriptor { - return g.allFilesByName[filename] -} - -// weak returns whether the ith import of the current file is a weak import. -func (g *Generator) weak(i int32) bool { - for _, j := range g.file.WeakDependency { - if j == i { - return true - } - } - return false -} - -// Generate the imports -func (g *Generator) generateImports() { - // We almost always need a proto import. Rather than computing when we - // do, which is tricky when there's a plugin, just import it and - // reference it later. The same argument applies to the fmt and math packages. - g.P("import "+g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto") - g.P("import " + g.Pkg["fmt"] + ` "fmt"`) - g.P("import " + g.Pkg["math"] + ` "math"`) - var ( - imports = make(map[GoImportPath]bool) - strongImports = make(map[GoImportPath]bool) - importPaths []string - ) - for i, s := range g.file.Dependency { - fd := g.fileByName(s) - importPath := fd.importPath - // Do not import our own package. - if importPath == g.file.importPath { - continue - } - if !imports[importPath] { - importPaths = append(importPaths, string(importPath)) - } - imports[importPath] = true - if !g.weak(int32(i)) { - strongImports[importPath] = true - } - } - sort.Strings(importPaths) - for i := range importPaths { - importPath := GoImportPath(importPaths[i]) - packageName := g.GoPackageName(importPath) - fullPath := GoImportPath(g.ImportPrefix) + importPath - // Skip weak imports. - if !strongImports[importPath] { - g.P("// skipping weak import ", packageName, " ", fullPath) - continue - } - // We need to import all the dependencies, even if we don't reference them, - // because other code and tools depend on having the full transitive closure - // of protocol buffer types in the binary. - if _, ok := g.usedPackages[importPath]; !ok { - packageName = "_" - } - g.P("import ", packageName, " ", fullPath) - } - g.P() - // TODO: may need to worry about uniqueness across plugins - for _, p := range plugins { - p.GenerateImports(g.file) - g.P() - } - g.P("// Reference imports to suppress errors if they are not otherwise used.") - g.P("var _ = ", g.Pkg["proto"], ".Marshal") - g.P("var _ = ", g.Pkg["fmt"], ".Errorf") - g.P("var _ = ", g.Pkg["math"], ".Inf") - g.P() -} - -func (g *Generator) generateImported(id *ImportedDescriptor) { - tn := id.TypeName() - sn := tn[len(tn)-1] - df := id.o.File() - filename := *df.Name - if df.importPath == g.file.importPath { - // Don't generate type aliases for files in the same Go package as this one. - g.P("// Ignoring public import of ", sn, " from ", filename) - g.P() - return - } - if !supportTypeAliases { - g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename)) - } - g.P("// ", sn, " from public import ", filename) - g.usedPackages[df.importPath] = true - - for _, sym := range df.exported[id.o] { - sym.GenerateAlias(g, g.GoPackageName(df.importPath)) - } - - g.P() -} - -// Generate the enum definitions for this EnumDescriptor. -func (g *Generator) generateEnum(enum *EnumDescriptor) { - // The full type name - typeName := enum.TypeName() - // The full type name, CamelCased. - ccTypeName := CamelCaseSlice(typeName) - ccPrefix := enum.prefix() - - deprecatedEnum := "" - if enum.GetOptions().GetDeprecated() { - deprecatedEnum = deprecationComment - } - g.PrintComments(enum.path) - g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum) - g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) - g.P("const (") - for i, e := range enum.Value { - etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i) - g.PrintComments(etorPath) - - deprecatedValue := "" - if e.GetOptions().GetDeprecated() { - deprecatedValue = deprecationComment - } - - name := ccPrefix + *e.Name - g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue) - g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) - } - g.P(")") - g.P("var ", ccTypeName, "_name = map[int32]string{") - generated := make(map[int32]bool) // avoid duplicate values - for _, e := range enum.Value { - duplicate := "" - if _, present := generated[*e.Number]; present { - duplicate = "// Duplicate value: " - } - g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",") - generated[*e.Number] = true - } - g.P("}") - g.P("var ", ccTypeName, "_value = map[string]int32{") - for _, e := range enum.Value { - g.P(strconv.Quote(*e.Name), ": ", e.Number, ",") - } - g.P("}") - - if !enum.proto3() { - g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {") - g.P("p := new(", ccTypeName, ")") - g.P("*p = x") - g.P("return p") - g.P("}") - } - - g.P("func (x ", ccTypeName, ") String() string {") - g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))") - g.P("}") - - if !enum.proto3() { - g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {") - g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`) - g.P("if err != nil {") - g.P("return err") - g.P("}") - g.P("*x = ", ccTypeName, "(value)") - g.P("return nil") - g.P("}") - } - - var indexes []string - for m := enum.parent; m != nil; m = m.parent { - // XXX: skip groups? - indexes = append([]string{strconv.Itoa(m.index)}, indexes...) - } - indexes = append(indexes, strconv.Itoa(enum.index)) - g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {") - g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") - g.P("}") - if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { - g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) - } - - g.P() -} - -// The tag is a string like "varint,2,opt,name=fieldname,def=7" that -// identifies details of the field for the protocol buffer marshaling and unmarshaling -// code. The fields are: -// wire encoding -// protocol tag number -// opt,req,rep for optional, required, or repeated -// packed whether the encoding is "packed" (optional; repeated primitives only) -// name= the original declared name -// enum= the name of the enum type if it is an enum-typed field. -// proto3 if this field is in a proto3 message -// def= string representation of the default value, if any. -// The default value must be in a representation that can be used at run-time -// to generate the default value. Thus bools become 0 and 1, for instance. -func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string { - optrepreq := "" - switch { - case isOptional(field): - optrepreq = "opt" - case isRequired(field): - optrepreq = "req" - case isRepeated(field): - optrepreq = "rep" - } - var defaultValue string - if dv := field.DefaultValue; dv != nil { // set means an explicit default - defaultValue = *dv - // Some types need tweaking. - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_BOOL: - if defaultValue == "true" { - defaultValue = "1" - } else { - defaultValue = "0" - } - case descriptor.FieldDescriptorProto_TYPE_STRING, - descriptor.FieldDescriptorProto_TYPE_BYTES: - // Nothing to do. Quoting is done for the whole tag. - case descriptor.FieldDescriptorProto_TYPE_ENUM: - // For enums we need to provide the integer constant. - obj := g.ObjectNamed(field.GetTypeName()) - if id, ok := obj.(*ImportedDescriptor); ok { - // It is an enum that was publicly imported. - // We need the underlying type. - obj = id.o - } - enum, ok := obj.(*EnumDescriptor) - if !ok { - log.Printf("obj is a %T", obj) - if id, ok := obj.(*ImportedDescriptor); ok { - log.Printf("id.o is a %T", id.o) - } - g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName())) - } - defaultValue = enum.integerValueAsString(defaultValue) - } - defaultValue = ",def=" + defaultValue - } - enum := "" - if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { - // We avoid using obj.GoPackageName(), because we want to use the - // original (proto-world) package name. - obj := g.ObjectNamed(field.GetTypeName()) - if id, ok := obj.(*ImportedDescriptor); ok { - obj = id.o - } - enum = ",enum=" - if pkg := obj.File().GetPackage(); pkg != "" { - enum += pkg + "." - } - enum += CamelCaseSlice(obj.TypeName()) - } - packed := "" - if (field.Options != nil && field.Options.GetPacked()) || - // Per https://developers.google.com/protocol-buffers/docs/proto3#simple: - // "In proto3, repeated fields of scalar numeric types use packed encoding by default." - (message.proto3() && (field.Options == nil || field.Options.Packed == nil) && - isRepeated(field) && isScalar(field)) { - packed = ",packed" - } - fieldName := field.GetName() - name := fieldName - if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { - // We must use the type name for groups instead of - // the field name to preserve capitalization. - // type_name in FieldDescriptorProto is fully-qualified, - // but we only want the local part. - name = *field.TypeName - if i := strings.LastIndex(name, "."); i >= 0 { - name = name[i+1:] - } - } - if json := field.GetJsonName(); json != "" && json != name { - // TODO: escaping might be needed, in which case - // perhaps this should be in its own "json" tag. - name += ",json=" + json - } - name = ",name=" + name - if message.proto3() { - name += ",proto3" - } - oneof := "" - if field.OneofIndex != nil { - oneof = ",oneof" - } - return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s", - wiretype, - field.GetNumber(), - optrepreq, - packed, - name, - enum, - oneof, - defaultValue)) -} - -func needsStar(typ descriptor.FieldDescriptorProto_Type) bool { - switch typ { - case descriptor.FieldDescriptorProto_TYPE_GROUP: - return false - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - return false - case descriptor.FieldDescriptorProto_TYPE_BYTES: - return false - } - return true -} - -// TypeName is the printed name appropriate for an item. If the object is in the current file, -// TypeName drops the package name and underscores the rest. -// Otherwise the object is from another package; and the result is the underscored -// package name followed by the item name. -// The result always has an initial capital. -func (g *Generator) TypeName(obj Object) string { - return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) -} - -// GoType returns a string representing the type name, and the wire type -func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { - // TODO: Options. - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - typ, wire = "float64", "fixed64" - case descriptor.FieldDescriptorProto_TYPE_FLOAT: - typ, wire = "float32", "fixed32" - case descriptor.FieldDescriptorProto_TYPE_INT64: - typ, wire = "int64", "varint" - case descriptor.FieldDescriptorProto_TYPE_UINT64: - typ, wire = "uint64", "varint" - case descriptor.FieldDescriptorProto_TYPE_INT32: - typ, wire = "int32", "varint" - case descriptor.FieldDescriptorProto_TYPE_UINT32: - typ, wire = "uint32", "varint" - case descriptor.FieldDescriptorProto_TYPE_FIXED64: - typ, wire = "uint64", "fixed64" - case descriptor.FieldDescriptorProto_TYPE_FIXED32: - typ, wire = "uint32", "fixed32" - case descriptor.FieldDescriptorProto_TYPE_BOOL: - typ, wire = "bool", "varint" - case descriptor.FieldDescriptorProto_TYPE_STRING: - typ, wire = "string", "bytes" - case descriptor.FieldDescriptorProto_TYPE_GROUP: - desc := g.ObjectNamed(field.GetTypeName()) - typ, wire = "*"+g.TypeName(desc), "group" - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - desc := g.ObjectNamed(field.GetTypeName()) - typ, wire = "*"+g.TypeName(desc), "bytes" - case descriptor.FieldDescriptorProto_TYPE_BYTES: - typ, wire = "[]byte", "bytes" - case descriptor.FieldDescriptorProto_TYPE_ENUM: - desc := g.ObjectNamed(field.GetTypeName()) - typ, wire = g.TypeName(desc), "varint" - case descriptor.FieldDescriptorProto_TYPE_SFIXED32: - typ, wire = "int32", "fixed32" - case descriptor.FieldDescriptorProto_TYPE_SFIXED64: - typ, wire = "int64", "fixed64" - case descriptor.FieldDescriptorProto_TYPE_SINT32: - typ, wire = "int32", "zigzag32" - case descriptor.FieldDescriptorProto_TYPE_SINT64: - typ, wire = "int64", "zigzag64" - default: - g.Fail("unknown type for", field.GetName()) - } - if isRepeated(field) { - typ = "[]" + typ - } else if message != nil && message.proto3() { - return - } else if field.OneofIndex != nil && message != nil { - return - } else if needsStar(*field.Type) { - typ = "*" + typ - } - return -} - -func (g *Generator) RecordTypeUse(t string) { - if _, ok := g.typeNameToObject[t]; ok { - // Call ObjectNamed to get the true object to record the use. - obj := g.ObjectNamed(t) - g.usedPackages[obj.GoImportPath()] = true - } -} - -// Method names that may be generated. Fields with these names get an -// underscore appended. Any change to this set is a potential incompatible -// API change because it changes generated field names. -var methodNames = [...]string{ - "Reset", - "String", - "ProtoMessage", - "Marshal", - "Unmarshal", - "ExtensionRangeArray", - "ExtensionMap", - "Descriptor", -} - -// Names of messages in the `google.protobuf` package for which -// we will generate XXX_WellKnownType methods. -var wellKnownTypes = map[string]bool{ - "Any": true, - "Duration": true, - "Empty": true, - "Struct": true, - "Timestamp": true, - - "Value": true, - "ListValue": true, - "DoubleValue": true, - "FloatValue": true, - "Int64Value": true, - "UInt64Value": true, - "Int32Value": true, - "UInt32Value": true, - "BoolValue": true, - "StringValue": true, - "BytesValue": true, -} - -// getterDefault finds the default value for the field to return from a getter, -// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName" -func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType string) string { - if isRepeated(field) { - return "nil" - } - if def := field.GetDefaultValue(); def != "" { - defaultConstant := g.defaultConstantName(goMessageType, field.GetName()) - if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES { - return defaultConstant - } - return "append([]byte(nil), " + defaultConstant + "...)" - } - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_BOOL: - return "false" - case descriptor.FieldDescriptorProto_TYPE_STRING: - return `""` - case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_BYTES: - return "nil" - case descriptor.FieldDescriptorProto_TYPE_ENUM: - obj := g.ObjectNamed(field.GetTypeName()) - var enum *EnumDescriptor - if id, ok := obj.(*ImportedDescriptor); ok { - // The enum type has been publicly imported. - enum, _ = id.o.(*EnumDescriptor) - } else { - enum, _ = obj.(*EnumDescriptor) - } - if enum == nil { - log.Printf("don't know how to generate getter for %s", field.GetName()) - return "nil" - } - if len(enum.Value) == 0 { - return "0 // empty enum" - } - first := enum.Value[0].GetName() - return g.DefaultPackageName(obj) + enum.prefix() + first - default: - return "0" - } -} - -// defaultConstantName builds the name of the default constant from the message -// type name and the untouched field name, e.g. "Default_MessageType_FieldName" -func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string { - return "Default_" + goMessageType + "_" + CamelCase(protoFieldName) -} - -// The different types of fields in a message and how to actually print them -// Most of the logic for generateMessage is in the methods of these types. -// -// Note that the content of the field is irrelevant, a simpleField can contain -// anything from a scalar to a group (which is just a message). -// -// Extension fields (and message sets) are however handled separately. -// -// simpleField - a field that is neiter weak nor oneof, possibly repeated -// oneofField - field containing list of subfields: -// - oneofSubField - a field within the oneof - -// msgCtx contais the context for the generator functions. -type msgCtx struct { - goName string // Go struct name of the message, e.g. MessageName - message *Descriptor // The descriptor for the message -} - -// fieldCommon contains data common to all types of fields. -type fieldCommon struct { - goName string // Go name of field, e.g. "FieldName" or "Descriptor_" - protoName string // Name of field in proto language, e.g. "field_name" or "descriptor" - getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_" - goType string // The Go type as a string, e.g. "*int32" or "*OtherMessage" - tags string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"` - fullPath string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0" -} - -// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor". -func (f *fieldCommon) getProtoName() string { - return f.protoName -} - -// getGoType returns the go type of the field as a string, e.g. "*int32". -func (f *fieldCommon) getGoType() string { - return f.goType -} - -// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated. -type simpleField struct { - fieldCommon - protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" - protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 - deprecated string // Deprecation comment, if any, e.g. "// Deprecated: Do not use." - getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" - protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" - comment string // The full comment for the field, e.g. "// Useful information" -} - -// decl prints the declaration of the field in the struct (if any). -func (f *simpleField) decl(g *Generator, mc *msgCtx) { - g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated) -} - -// getter prints the getter for the field. -func (f *simpleField) getter(g *Generator, mc *msgCtx) { - star := "" - tname := f.goType - if needsStar(f.protoType) && tname[0] == '*' { - tname = tname[1:] - star = "*" - } - if f.deprecated != "" { - g.P(f.deprecated) - } - g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() "+tname+" {") - if f.getterDef == "nil" { // Simpler getter - g.P("if m != nil {") - g.P("return m." + f.goName) - g.P("}") - g.P("return nil") - g.P("}") - g.P() - return - } - if mc.message.proto3() { - g.P("if m != nil {") - } else { - g.P("if m != nil && m." + f.goName + " != nil {") - } - g.P("return " + star + "m." + f.goName) - g.P("}") - g.P("return ", f.getterDef) - g.P("}") - g.P() -} - -// setter prints the setter method of the field. -func (f *simpleField) setter(g *Generator, mc *msgCtx) { - // No setter for regular fields yet -} - -// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". -func (f *simpleField) getProtoDef() string { - return f.protoDef -} - -// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". -func (f *simpleField) getProtoTypeName() string { - return f.protoTypeName -} - -// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. -func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type { - return f.protoType -} - -// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message. -type oneofSubField struct { - fieldCommon - protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" - protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 - oneofTypeName string // Type name of the enclosing struct, e.g. "MessageName_FieldName" - fieldNumber int // Actual field number, as defined in proto, e.g. 12 - getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" - protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" -} - -// wireTypeName returns a textual wire type, needed for oneof sub fields in generated code. -func (f *oneofSubField) wireTypeName() string { - switch f.protoType { - case descriptor.FieldDescriptorProto_TYPE_FIXED64, - descriptor.FieldDescriptorProto_TYPE_SFIXED64, - descriptor.FieldDescriptorProto_TYPE_DOUBLE: - return "WireFixed64" - case descriptor.FieldDescriptorProto_TYPE_FIXED32, - descriptor.FieldDescriptorProto_TYPE_SFIXED32, - descriptor.FieldDescriptorProto_TYPE_FLOAT: - return "WireFixed32" - case descriptor.FieldDescriptorProto_TYPE_GROUP: - return "WireStartGroup" - case descriptor.FieldDescriptorProto_TYPE_MESSAGE, - descriptor.FieldDescriptorProto_TYPE_STRING, - descriptor.FieldDescriptorProto_TYPE_BYTES: - return "WireBytes" - default: // All others are Varints - return "WireVarint" - } -} - -// typedNil prints a nil casted to the pointer to this field. -// - for XXX_OneofFuncs -func (f *oneofSubField) typedNil(g *Generator) { - g.P("(*", f.oneofTypeName, ")(nil),") -} - -// marshalCase prints the case matching this oneof subfield in the marshalling code. -func (f *oneofSubField) marshalCase(g *Generator) { - g.P("case *", f.oneofTypeName, ":") - wire := f.wireTypeName() - var pre, post string - val := "x." + f.goName // overridden for TYPE_BOOL - switch f.protoType { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - pre = "b.EncodeFixed64(" + g.Pkg["math"] + ".Float64bits(" - post = "))" - case descriptor.FieldDescriptorProto_TYPE_FLOAT: - pre = "b.EncodeFixed32(uint64(" + g.Pkg["math"] + ".Float32bits(" - post = ")))" - case descriptor.FieldDescriptorProto_TYPE_INT64, descriptor.FieldDescriptorProto_TYPE_UINT64: - pre, post = "b.EncodeVarint(uint64(", "))" - case descriptor.FieldDescriptorProto_TYPE_INT32, descriptor.FieldDescriptorProto_TYPE_UINT32, descriptor.FieldDescriptorProto_TYPE_ENUM: - pre, post = "b.EncodeVarint(uint64(", "))" - case descriptor.FieldDescriptorProto_TYPE_FIXED64, descriptor.FieldDescriptorProto_TYPE_SFIXED64: - pre, post = "b.EncodeFixed64(uint64(", "))" - case descriptor.FieldDescriptorProto_TYPE_FIXED32, descriptor.FieldDescriptorProto_TYPE_SFIXED32: - pre, post = "b.EncodeFixed32(uint64(", "))" - case descriptor.FieldDescriptorProto_TYPE_BOOL: - g.P("t := uint64(0)") - g.P("if ", val, " { t = 1 }") - val = "t" - pre, post = "b.EncodeVarint(", ")" - case descriptor.FieldDescriptorProto_TYPE_STRING: - pre, post = "b.EncodeStringBytes(", ")" - case descriptor.FieldDescriptorProto_TYPE_GROUP: - pre, post = "b.Marshal(", ")" - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - pre, post = "b.EncodeMessage(", ")" - case descriptor.FieldDescriptorProto_TYPE_BYTES: - pre, post = "b.EncodeRawBytes(", ")" - case descriptor.FieldDescriptorProto_TYPE_SINT32: - pre, post = "b.EncodeZigzag32(uint64(", "))" - case descriptor.FieldDescriptorProto_TYPE_SINT64: - pre, post = "b.EncodeZigzag64(uint64(", "))" - default: - g.Fail("unhandled oneof field type ", f.protoType.String()) - } - g.P("b.EncodeVarint(", f.fieldNumber, "<<3|", g.Pkg["proto"], ".", wire, ")") - if t := f.protoType; t != descriptor.FieldDescriptorProto_TYPE_GROUP && t != descriptor.FieldDescriptorProto_TYPE_MESSAGE { - g.P(pre, val, post) - } else { - g.P("if err := ", pre, val, post, "; err != nil {") - g.P("return err") - g.P("}") - } - if f.protoType == descriptor.FieldDescriptorProto_TYPE_GROUP { - g.P("b.EncodeVarint(", f.fieldNumber, "<<3|", g.Pkg["proto"], ".WireEndGroup)") - } -} - -// unmarshalCase prints the case matching this oneof subfield in the unmarshalling code. -func (f *oneofSubField) unmarshalCase(g *Generator, origOneofName string, oneofName string) { - g.P("case ", f.fieldNumber, ": // ", origOneofName, ".", f.getProtoName()) - g.P("if wire != ", g.Pkg["proto"], ".", f.wireTypeName(), " {") - g.P("return true, ", g.Pkg["proto"], ".ErrInternalBadWireType") - g.P("}") - lhs := "x, err" // overridden for TYPE_MESSAGE and TYPE_GROUP - var dec, cast, cast2 string - switch f.protoType { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - dec, cast = "b.DecodeFixed64()", g.Pkg["math"]+".Float64frombits" - case descriptor.FieldDescriptorProto_TYPE_FLOAT: - dec, cast, cast2 = "b.DecodeFixed32()", "uint32", g.Pkg["math"]+".Float32frombits" - case descriptor.FieldDescriptorProto_TYPE_INT64: - dec, cast = "b.DecodeVarint()", "int64" - case descriptor.FieldDescriptorProto_TYPE_UINT64: - dec = "b.DecodeVarint()" - case descriptor.FieldDescriptorProto_TYPE_INT32: - dec, cast = "b.DecodeVarint()", "int32" - case descriptor.FieldDescriptorProto_TYPE_FIXED64: - dec = "b.DecodeFixed64()" - case descriptor.FieldDescriptorProto_TYPE_FIXED32: - dec, cast = "b.DecodeFixed32()", "uint32" - case descriptor.FieldDescriptorProto_TYPE_BOOL: - dec = "b.DecodeVarint()" - // handled specially below - case descriptor.FieldDescriptorProto_TYPE_STRING: - dec = "b.DecodeStringBytes()" - case descriptor.FieldDescriptorProto_TYPE_GROUP: - g.P("msg := new(", f.goType[1:], ")") // drop star - lhs = "err" - dec = "b.DecodeGroup(msg)" - // handled specially below - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - g.P("msg := new(", f.goType[1:], ")") // drop star - lhs = "err" - dec = "b.DecodeMessage(msg)" - // handled specially below - case descriptor.FieldDescriptorProto_TYPE_BYTES: - dec = "b.DecodeRawBytes(true)" - case descriptor.FieldDescriptorProto_TYPE_UINT32: - dec, cast = "b.DecodeVarint()", "uint32" - case descriptor.FieldDescriptorProto_TYPE_ENUM: - dec, cast = "b.DecodeVarint()", f.goType - case descriptor.FieldDescriptorProto_TYPE_SFIXED32: - dec, cast = "b.DecodeFixed32()", "int32" - case descriptor.FieldDescriptorProto_TYPE_SFIXED64: - dec, cast = "b.DecodeFixed64()", "int64" - case descriptor.FieldDescriptorProto_TYPE_SINT32: - dec, cast = "b.DecodeZigzag32()", "int32" - case descriptor.FieldDescriptorProto_TYPE_SINT64: - dec, cast = "b.DecodeZigzag64()", "int64" - default: - g.Fail("unhandled oneof field type ", f.protoType.String()) - } - g.P(lhs, " := ", dec) - val := "x" - if cast != "" { - val = cast + "(" + val + ")" - } - if cast2 != "" { - val = cast2 + "(" + val + ")" - } - switch f.protoType { - case descriptor.FieldDescriptorProto_TYPE_BOOL: - val += " != 0" - case descriptor.FieldDescriptorProto_TYPE_GROUP, - descriptor.FieldDescriptorProto_TYPE_MESSAGE: - val = "msg" - } - g.P("m.", oneofName, " = &", f.oneofTypeName, "{", val, "}") - g.P("return true, err") -} - -// sizerCase prints the case matching this oneof subfield in the sizer code. -func (f *oneofSubField) sizerCase(g *Generator) { - g.P("case *", f.oneofTypeName, ":") - val := "x." + f.goName - var varint, fixed string - switch f.protoType { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - fixed = "8" - case descriptor.FieldDescriptorProto_TYPE_FLOAT: - fixed = "4" - case descriptor.FieldDescriptorProto_TYPE_INT64, descriptor.FieldDescriptorProto_TYPE_UINT64, descriptor.FieldDescriptorProto_TYPE_INT32, descriptor.FieldDescriptorProto_TYPE_UINT32, descriptor.FieldDescriptorProto_TYPE_ENUM: - varint = val - case descriptor.FieldDescriptorProto_TYPE_FIXED64, descriptor.FieldDescriptorProto_TYPE_SFIXED64: - fixed = "8" - case descriptor.FieldDescriptorProto_TYPE_FIXED32, descriptor.FieldDescriptorProto_TYPE_SFIXED32: - fixed = "4" - case descriptor.FieldDescriptorProto_TYPE_BOOL: - fixed = "1" - case descriptor.FieldDescriptorProto_TYPE_STRING: - fixed = "len(" + val + ")" - varint = fixed - case descriptor.FieldDescriptorProto_TYPE_GROUP: - fixed = g.Pkg["proto"] + ".Size(" + val + ")" - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - g.P("s := ", g.Pkg["proto"], ".Size(", val, ")") - fixed = "s" - varint = fixed - case descriptor.FieldDescriptorProto_TYPE_BYTES: - fixed = "len(" + val + ")" - varint = fixed - case descriptor.FieldDescriptorProto_TYPE_SINT32: - varint = "(uint32(" + val + ") << 1) ^ uint32((int32(" + val + ") >> 31))" - case descriptor.FieldDescriptorProto_TYPE_SINT64: - varint = "uint64(" + val + " << 1) ^ uint64((int64(" + val + ") >> 63))" - default: - g.Fail("unhandled oneof field type ", f.protoType.String()) - } - // Tag and wire varint is known statically, - // so don't generate code for that part of the size computation. - tagAndWireSize := proto.SizeVarint(uint64(f.fieldNumber << 3)) // wire doesn't affect varint size - g.P("n += ", tagAndWireSize, " // tag and wire") - if varint != "" { - g.P("n += ", g.Pkg["proto"], ".SizeVarint(uint64(", varint, "))") - } - if fixed != "" { - g.P("n += ", fixed) - } - if f.protoType == descriptor.FieldDescriptorProto_TYPE_GROUP { - g.P("n += ", tagAndWireSize, " // tag and wire") - } -} - -// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". -func (f *oneofSubField) getProtoDef() string { - return f.protoDef -} - -// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". -func (f *oneofSubField) getProtoTypeName() string { - return f.protoTypeName -} - -// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. -func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type { - return f.protoType -} - -// oneofField represents the oneof on top level. -// The alternative fields within the oneof are represented by oneofSubField. -type oneofField struct { - fieldCommon - subFields []*oneofSubField // All the possible oneof fields - comment string // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\" -} - -// decl prints the declaration of the field in the struct (if any). -func (f *oneofField) decl(g *Generator, mc *msgCtx) { - comment := f.comment - for _, sf := range f.subFields { - comment += "//\t*" + sf.oneofTypeName + "\n" - } - g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`") -} - -// getter for a oneof field will print additional discriminators and interfaces for the oneof, -// also it prints all the getters for the sub fields. -func (f *oneofField) getter(g *Generator, mc *msgCtx) { - // The discriminator type - g.P("type ", f.goType, " interface {") - g.P(f.goType, "()") - g.P("}") - g.P() - // The subField types, fulfilling the discriminator type contract - for _, sf := range f.subFields { - g.P("type ", Annotate(mc.message.file, sf.fullPath, sf.oneofTypeName), " struct {") - g.P(Annotate(mc.message.file, sf.fullPath, sf.goName), " ", sf.goType, " `", sf.tags, "`") - g.P("}") - g.P() - } - for _, sf := range f.subFields { - g.P("func (*", sf.oneofTypeName, ") ", f.goType, "() {}") - g.P() - } - // Getter for the oneof field - g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() ", f.goType, " {") - g.P("if m != nil { return m.", f.goName, " }") - g.P("return nil") - g.P("}") - g.P() - // Getters for each oneof - for _, sf := range f.subFields { - g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, sf.fullPath, sf.getterName), "() "+sf.goType+" {") - g.P("if x, ok := m.", f.getterName, "().(*", sf.oneofTypeName, "); ok {") - g.P("return x.", sf.goName) - g.P("}") - g.P("return ", sf.getterDef) - g.P("}") - g.P() - } -} - -// setter prints the setter method of the field. -func (f *oneofField) setter(g *Generator, mc *msgCtx) { - // No setters for oneof yet -} - -// topLevelField interface implemented by all types of fields on the top level (not oneofSubField). -type topLevelField interface { - decl(g *Generator, mc *msgCtx) // print declaration within the struct - getter(g *Generator, mc *msgCtx) // print getter - setter(g *Generator, mc *msgCtx) // print setter if applicable -} - -// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField). -type defField interface { - getProtoDef() string // default value explicitly stated in the proto file, e.g "yoshi" or "5" - getProtoName() string // proto name of a field, e.g. "field_name" or "descriptor" - getGoType() string // go type of the field as a string, e.g. "*int32" - getProtoTypeName() string // protobuf type name for the field, e.g. ".google.protobuf.Duration" - getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 -} - -// generateDefaultConstants adds constants for default values if needed, which is only if the default value is. -// explicit in the proto. -func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) { - // Collect fields that can have defaults - dFields := []defField{} - for _, pf := range topLevelFields { - if f, ok := pf.(*oneofField); ok { - for _, osf := range f.subFields { - dFields = append(dFields, osf) - } - continue - } - dFields = append(dFields, pf.(defField)) - } - for _, df := range dFields { - def := df.getProtoDef() - if def == "" { - continue - } - fieldname := g.defaultConstantName(mc.goName, df.getProtoName()) - typename := df.getGoType() - if typename[0] == '*' { - typename = typename[1:] - } - kind := "const " - switch { - case typename == "bool": - case typename == "string": - def = strconv.Quote(def) - case typename == "[]byte": - def = "[]byte(" + strconv.Quote(unescape(def)) + ")" - kind = "var " - case def == "inf", def == "-inf", def == "nan": - // These names are known to, and defined by, the protocol language. - switch def { - case "inf": - def = "math.Inf(1)" - case "-inf": - def = "math.Inf(-1)" - case "nan": - def = "math.NaN()" - } - if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT { - def = "float32(" + def + ")" - } - kind = "var " - case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM: - // Must be an enum. Need to construct the prefixed name. - obj := g.ObjectNamed(df.getProtoTypeName()) - var enum *EnumDescriptor - if id, ok := obj.(*ImportedDescriptor); ok { - // The enum type has been publicly imported. - enum, _ = id.o.(*EnumDescriptor) - } else { - enum, _ = obj.(*EnumDescriptor) - } - if enum == nil { - log.Printf("don't know how to generate constant for %s", fieldname) - continue - } - def = g.DefaultPackageName(obj) + enum.prefix() + def - } - g.P(kind, fieldname, " ", typename, " = ", def) - g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""}) - } - g.P() -} - -// generateInternalStructFields just adds the XXX_ fields to the message struct. -func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) { - g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals - if len(mc.message.ExtensionRange) > 0 { - messageset := "" - if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() { - messageset = "protobuf_messageset:\"1\" " - } - g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`") - } - g.P("XXX_unrecognized\t[]byte `json:\"-\"`") - g.P("XXX_sizecache\tint32 `json:\"-\"`") - -} - -// generateOneofFuncs adds all the utility functions for oneof, including marshalling, unmarshalling and sizer. -func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) { - ofields := []*oneofField{} - for _, f := range topLevelFields { - if o, ok := f.(*oneofField); ok { - ofields = append(ofields, o) - } - } - if len(ofields) == 0 { - return - } - enc := "_" + mc.goName + "_OneofMarshaler" - dec := "_" + mc.goName + "_OneofUnmarshaler" - size := "_" + mc.goName + "_OneofSizer" - encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error" - decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)" - sizeSig := "(msg " + g.Pkg["proto"] + ".Message) (n int)" - - // OneofFuncs - g.P("// XXX_OneofFuncs is for the internal use of the proto package.") - g.P("func (*", mc.goName, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", func", sizeSig, ", []interface{}) {") - g.P("return ", enc, ", ", dec, ", ", size, ", []interface{}{") - for _, of := range ofields { - for _, sf := range of.subFields { - sf.typedNil(g) - } - } - g.P("}") - g.P("}") - g.P() - - // marshaler - g.P("func ", enc, encSig, " {") - g.P("m := msg.(*", mc.goName, ")") - for _, of := range ofields { - g.P("// ", of.getProtoName()) - g.P("switch x := m.", of.goName, ".(type) {") - for _, sf := range of.subFields { - // also fills in field.wire - sf.marshalCase(g) - } - g.P("case nil:") - g.P("default:") - g.P(" return ", g.Pkg["fmt"], `.Errorf("`, mc.goName, ".", of.goName, ` has unexpected type %T", x)`) - g.P("}") - } - g.P("return nil") - g.P("}") - g.P() - - // unmarshaler - g.P("func ", dec, decSig, " {") - g.P("m := msg.(*", mc.goName, ")") - g.P("switch tag {") - for _, of := range ofields { - for _, sf := range of.subFields { - sf.unmarshalCase(g, of.getProtoName(), of.goName) - } - } - g.P("default:") - g.P("return false, nil") - g.P("}") - g.P("}") - g.P() - - // sizer - g.P("func ", size, sizeSig, " {") - g.P("m := msg.(*", mc.goName, ")") - for _, of := range ofields { - g.P("// ", of.getProtoName()) - g.P("switch x := m.", of.goName, ".(type) {") - for _, sf := range of.subFields { - // also fills in field.wire - sf.sizerCase(g) - } - g.P("case nil:") - g.P("default:") - g.P("panic(", g.Pkg["fmt"], ".Sprintf(\"proto: unexpected type %T in oneof\", x))") - g.P("}") - } - g.P("return n") - g.P("}") - g.P() -} - -// generateMessageStruct adds the actual struct with it's members (but not methods) to the output. -func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) { - comments := g.PrintComments(mc.message.path) - - // Guarantee deprecation comments appear after user-provided comments. - if mc.message.GetOptions().GetDeprecated() { - if comments { - // Convention: Separate deprecation comments from original - // comments with an empty line. - g.P("//") - } - g.P(deprecationComment) - } - - g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {") - for _, pf := range topLevelFields { - pf.decl(g, mc) - } - g.generateInternalStructFields(mc, topLevelFields) - g.P("}") -} - -// generateGetters adds getters for all fields, including oneofs and weak fields when applicable. -func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) { - for _, pf := range topLevelFields { - pf.getter(g, mc) - } -} - -// generateSetters add setters for all fields, including oneofs and weak fields when applicable. -func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) { - for _, pf := range topLevelFields { - pf.setter(g, mc) - } -} - -// generateCommonMethods adds methods to the message that are not on a per field basis. -func (g *Generator) generateCommonMethods(mc *msgCtx) { - // Reset, String and ProtoMessage methods. - g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }") - g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") - g.P("func (*", mc.goName, ") ProtoMessage() {}") - var indexes []string - for m := mc.message; m != nil; m = m.parent { - indexes = append([]string{strconv.Itoa(m.index)}, indexes...) - } - g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {") - g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") - g.P("}") - // TODO: Revisit the decision to use a XXX_WellKnownType method - // if we change proto.MessageName to work with multiple equivalents. - if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] { - g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`) - } - - // Extension support methods - if len(mc.message.ExtensionRange) > 0 { - // message_set_wire_format only makes sense when extensions are defined. - if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() { - g.P() - g.P("func (m *", mc.goName, ") MarshalJSON() ([]byte, error) {") - g.P("return ", g.Pkg["proto"], ".MarshalMessageSetJSON(&m.XXX_InternalExtensions)") - g.P("}") - g.P("func (m *", mc.goName, ") UnmarshalJSON(buf []byte) error {") - g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions)") - g.P("}") - } - - g.P() - g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{") - for _, r := range mc.message.ExtensionRange { - end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends - g.P("{Start: ", r.Start, ", End: ", end, "},") - } - g.P("}") - g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {") - g.P("return extRange_", mc.goName) - g.P("}") - } - - // TODO: It does not scale to keep adding another method for every - // operation on protos that we want to switch over to using the - // table-driven approach. Instead, we should only add a single method - // that allows getting access to the *InternalMessageInfo struct and then - // calling Unmarshal, Marshal, Merge, Size, and Discard directly on that. - - // Wrapper for table-driven marshaling and unmarshaling. - g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {") - g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {") - g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)") - g.P("}") - - g.P("func (dst *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {") - g.P("xxx_messageInfo_", mc.goName, ".Merge(dst, src)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message - g.P("return xxx_messageInfo_", mc.goName, ".Size(m)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {") - g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)") - g.P("}") - - g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo") - g.P() -} - -// Generate the type, methods and default constant definitions for this Descriptor. -func (g *Generator) generateMessage(message *Descriptor) { - topLevelFields := []topLevelField{} - oFields := make(map[int32]*oneofField) - // The full type name - typeName := message.TypeName() - // The full type name, CamelCased. - goTypeName := CamelCaseSlice(typeName) - - usedNames := make(map[string]bool) - for _, n := range methodNames { - usedNames[n] = true - } - - // allocNames finds a conflict-free variation of the given strings, - // consistently mutating their suffixes. - // It returns the same number of strings. - allocNames := func(ns ...string) []string { - Loop: - for { - for _, n := range ns { - if usedNames[n] { - for i := range ns { - ns[i] += "_" - } - continue Loop - } - } - for _, n := range ns { - usedNames[n] = true - } - return ns - } - } - - mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later - - // Build a structure more suitable for generating the text in one pass - for i, field := range message.Field { - // Allocate the getter and the field at the same time so name - // collisions create field/method consistent names. - // TODO: This allocation occurs based on the order of the fields - // in the proto file, meaning that a change in the field - // ordering can change generated Method/Field names. - base := CamelCase(*field.Name) - ns := allocNames(base, "Get"+base) - fieldName, fieldGetterName := ns[0], ns[1] - typename, wiretype := g.GoType(message, field) - jsonName := *field.Name - tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty") - - oneof := field.OneofIndex != nil - if oneof && oFields[*field.OneofIndex] == nil { - odp := message.OneofDecl[int(*field.OneofIndex)] - base := CamelCase(odp.GetName()) - names := allocNames(base, "Get"+base) - fname, gname := names[0], names[1] - - // This is the first field of a oneof we haven't seen before. - // Generate the union field. - oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex) - c, ok := g.makeComments(oneofFullPath) - if ok { - c += "\n//\n" - } - c += "// Types that are valid to be assigned to " + fname + ":\n" - // Generate the rest of this comment later, - // when we've computed any disambiguation. - - dname := "is" + goTypeName + "_" + fname - tag := `protobuf_oneof:"` + odp.GetName() + `"` - of := oneofField{ - fieldCommon: fieldCommon{ - goName: fname, - getterName: gname, - goType: dname, - tags: tag, - protoName: odp.GetName(), - fullPath: oneofFullPath, - }, - comment: c, - } - topLevelFields = append(topLevelFields, &of) - oFields[*field.OneofIndex] = &of - } - - if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { - desc := g.ObjectNamed(field.GetTypeName()) - if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { - // Figure out the Go types and tags for the key and value types. - keyField, valField := d.Field[0], d.Field[1] - keyType, keyWire := g.GoType(d, keyField) - valType, valWire := g.GoType(d, valField) - keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire) - - // We don't use stars, except for message-typed values. - // Message and enum types are the only two possibly foreign types used in maps, - // so record their use. They are not permitted as map keys. - keyType = strings.TrimPrefix(keyType, "*") - switch *valField.Type { - case descriptor.FieldDescriptorProto_TYPE_ENUM: - valType = strings.TrimPrefix(valType, "*") - g.RecordTypeUse(valField.GetTypeName()) - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - g.RecordTypeUse(valField.GetTypeName()) - default: - valType = strings.TrimPrefix(valType, "*") - } - - typename = fmt.Sprintf("map[%s]%s", keyType, valType) - mapFieldTypes[field] = typename // record for the getter generation - - tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag) - } - } - - dvalue := g.getterDefault(field, goTypeName) - if oneof { - tname := goTypeName + "_" + fieldName - // It is possible for this to collide with a message or enum - // nested in this message. Check for collisions. - for { - ok := true - for _, desc := range message.nested { - if CamelCaseSlice(desc.TypeName()) == tname { - ok = false - break - } - } - for _, enum := range message.enums { - if CamelCaseSlice(enum.TypeName()) == tname { - ok = false - break - } - } - if !ok { - tname += "_" - continue - } - break - } - - oneofField := oFields[*field.OneofIndex] - tag := "protobuf:" + g.goTag(message, field, wiretype) - sf := oneofSubField{ - fieldCommon: fieldCommon{ - goName: fieldName, - getterName: fieldGetterName, - goType: typename, - tags: tag, - protoName: field.GetName(), - fullPath: fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i), - }, - protoTypeName: field.GetTypeName(), - fieldNumber: int(*field.Number), - protoType: *field.Type, - getterDef: dvalue, - protoDef: field.GetDefaultValue(), - oneofTypeName: tname, - } - oneofField.subFields = append(oneofField.subFields, &sf) - g.RecordTypeUse(field.GetTypeName()) - continue - } - - fieldDeprecated := "" - if field.GetOptions().GetDeprecated() { - fieldDeprecated = deprecationComment - } - - fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) - c, ok := g.makeComments(fieldFullPath) - if ok { - c += "\n" - } - rf := simpleField{ - fieldCommon: fieldCommon{ - goName: fieldName, - getterName: fieldGetterName, - goType: typename, - tags: tag, - protoName: field.GetName(), - fullPath: fieldFullPath, - }, - protoTypeName: field.GetTypeName(), - protoType: *field.Type, - deprecated: fieldDeprecated, - getterDef: dvalue, - protoDef: field.GetDefaultValue(), - comment: c, - } - var pf topLevelField = &rf - - topLevelFields = append(topLevelFields, pf) - g.RecordTypeUse(field.GetTypeName()) - } - - mc := &msgCtx{ - goName: goTypeName, - message: message, - } - - g.generateMessageStruct(mc, topLevelFields) - g.P() - g.generateCommonMethods(mc) - g.P() - g.generateDefaultConstants(mc, topLevelFields) - g.P() - g.generateGetters(mc, topLevelFields) - g.P() - g.generateSetters(mc, topLevelFields) - g.P() - g.generateOneofFuncs(mc, topLevelFields) - g.P() - - if !message.group { - - var oneofTypes []string - for _, f := range topLevelFields { - if of, ok := f.(*oneofField); ok { - for _, osf := range of.subFields { - oneofTypes = append(oneofTypes, osf.oneofTypeName) - } - } - } - - opts := message.Options - ms := &messageSymbol{ - sym: goTypeName, - hasExtensions: len(message.ExtensionRange) > 0, - isMessageSet: opts != nil && opts.GetMessageSetWireFormat(), - oneofTypes: oneofTypes, - } - g.file.addExport(message, ms) - } - - for _, ext := range message.ext { - g.generateExtension(ext) - } - - fullName := strings.Join(message.TypeName(), ".") - if g.file.Package != nil { - fullName = *g.file.Package + "." + fullName - } - - g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName) - // Register types for native map types. - for _, k := range mapFieldKeys(mapFieldTypes) { - fullName := strings.TrimPrefix(*k.TypeName, ".") - g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName) - } - -} - -type byTypeName []*descriptor.FieldDescriptorProto - -func (a byTypeName) Len() int { return len(a) } -func (a byTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName } - -// mapFieldKeys returns the keys of m in a consistent order. -func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto { - keys := make([]*descriptor.FieldDescriptorProto, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Sort(byTypeName(keys)) - return keys -} - -var escapeChars = [256]byte{ - 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?', -} - -// unescape reverses the "C" escaping that protoc does for default values of bytes fields. -// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape -// sequences are conveyed, unmodified, into the decoded result. -func unescape(s string) string { - // NB: Sadly, we can't use strconv.Unquote because protoc will escape both - // single and double quotes, but strconv.Unquote only allows one or the - // other (based on actual surrounding quotes of its input argument). - - var out []byte - for len(s) > 0 { - // regular character, or too short to be valid escape - if s[0] != '\\' || len(s) < 2 { - out = append(out, s[0]) - s = s[1:] - } else if c := escapeChars[s[1]]; c != 0 { - // escape sequence - out = append(out, c) - s = s[2:] - } else if s[1] == 'x' || s[1] == 'X' { - // hex escape, e.g. "\x80 - if len(s) < 4 { - // too short to be valid - out = append(out, s[:2]...) - s = s[2:] - continue - } - v, err := strconv.ParseUint(s[2:4], 16, 8) - if err != nil { - out = append(out, s[:4]...) - } else { - out = append(out, byte(v)) - } - s = s[4:] - } else if '0' <= s[1] && s[1] <= '7' { - // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164" - // so consume up to 2 more bytes or up to end-of-string - n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567")) - if n > 3 { - n = 3 - } - v, err := strconv.ParseUint(s[1:1+n], 8, 8) - if err != nil { - out = append(out, s[:1+n]...) - } else { - out = append(out, byte(v)) - } - s = s[1+n:] - } else { - // bad escape, just propagate the slash as-is - out = append(out, s[0]) - s = s[1:] - } - } - - return string(out) -} - -func (g *Generator) generateExtension(ext *ExtensionDescriptor) { - ccTypeName := ext.DescName() - - extObj := g.ObjectNamed(*ext.Extendee) - var extDesc *Descriptor - if id, ok := extObj.(*ImportedDescriptor); ok { - // This is extending a publicly imported message. - // We need the underlying type for goTag. - extDesc = id.o.(*Descriptor) - } else { - extDesc = extObj.(*Descriptor) - } - extendedType := "*" + g.TypeName(extObj) // always use the original - field := ext.FieldDescriptorProto - fieldType, wireType := g.GoType(ext.parent, field) - tag := g.goTag(extDesc, field, wireType) - g.RecordTypeUse(*ext.Extendee) - if n := ext.FieldDescriptorProto.TypeName; n != nil { - // foreign extension type - g.RecordTypeUse(*n) - } - - typeName := ext.TypeName() - - // Special case for proto2 message sets: If this extension is extending - // proto2.bridge.MessageSet, and its final name component is "message_set_extension", - // then drop that last component. - // - // TODO: This should be implemented in the text formatter rather than the generator. - // In addition, the situation for when to apply this special case is implemented - // differently in other languages: - // https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560 - mset := false - if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" { - typeName = typeName[:len(typeName)-1] - mset = true - } - - // For text formatting, the package must be exactly what the .proto file declares, - // ignoring overrides such as the go_package option, and with no dot/underscore mapping. - extName := strings.Join(typeName, ".") - if g.file.Package != nil { - extName = *g.file.Package + "." + extName - } - - g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{") - g.P("ExtendedType: (", extendedType, ")(nil),") - g.P("ExtensionType: (", fieldType, ")(nil),") - g.P("Field: ", field.Number, ",") - g.P(`Name: "`, extName, `",`) - g.P("Tag: ", tag, ",") - g.P(`Filename: "`, g.file.GetName(), `",`) - - g.P("}") - g.P() - - if mset { - // Generate a bit more code to register with message_set.go. - g.addInitf("%s.RegisterMessageSetType((%s)(nil), %d, %q)", g.Pkg["proto"], fieldType, *field.Number, extName) - } - - g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""}) -} - -func (g *Generator) generateInitFunction() { - for _, enum := range g.file.enum { - g.generateEnumRegistration(enum) - } - for _, d := range g.file.desc { - for _, ext := range d.ext { - g.generateExtensionRegistration(ext) - } - } - for _, ext := range g.file.ext { - g.generateExtensionRegistration(ext) - } - if len(g.init) == 0 { - return - } - g.P("func init() {") - for _, l := range g.init { - g.P(l) - } - g.P("}") - g.init = nil -} - -func (g *Generator) generateFileDescriptor(file *FileDescriptor) { - // Make a copy and trim source_code_info data. - // TODO: Trim this more when we know exactly what we need. - pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto) - pb.SourceCodeInfo = nil - - b, err := proto.Marshal(pb) - if err != nil { - g.Fail(err.Error()) - } - - var buf bytes.Buffer - w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression) - w.Write(b) - w.Close() - b = buf.Bytes() - - v := file.VarName() - g.P() - g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }") - g.P("var ", v, " = []byte{") - g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto") - for len(b) > 0 { - n := 16 - if n > len(b) { - n = len(b) - } - - s := "" - for _, c := range b[:n] { - s += fmt.Sprintf("0x%02x,", c) - } - g.P(s) - - b = b[n:] - } - g.P("}") -} - -func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { - // // We always print the full (proto-world) package name here. - pkg := enum.File().GetPackage() - if pkg != "" { - pkg += "." - } - // The full type name - typeName := enum.TypeName() - // The full type name, CamelCased. - ccTypeName := CamelCaseSlice(typeName) - g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName) -} - -func (g *Generator) generateExtensionRegistration(ext *ExtensionDescriptor) { - g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName()) -} - -// And now lots of helper functions. - -// Is c an ASCII lower-case letter? -func isASCIILower(c byte) bool { - return 'a' <= c && c <= 'z' -} - -// Is c an ASCII digit? -func isASCIIDigit(c byte) bool { - return '0' <= c && c <= '9' -} - -// CamelCase returns the CamelCased name. -// If there is an interior underscore followed by a lower case letter, -// drop the underscore and convert the letter to upper case. -// There is a remote possibility of this rewrite causing a name collision, -// but it's so remote we're prepared to pretend it's nonexistent - since the -// C++ generator lowercases names, it's extremely unlikely to have two fields -// with different capitalizations. -// In short, _my_field_name_2 becomes XMyFieldName_2. -func CamelCase(s string) string { - if s == "" { - return "" - } - t := make([]byte, 0, 32) - i := 0 - if s[0] == '_' { - // Need a capital letter; drop the '_'. - t = append(t, 'X') - i++ - } - // Invariant: if the next letter is lower case, it must be converted - // to upper case. - // That is, we process a word at a time, where words are marked by _ or - // upper case letter. Digits are treated as words. - for ; i < len(s); i++ { - c := s[i] - if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { - continue // Skip the underscore in s. - } - if isASCIIDigit(c) { - t = append(t, c) - continue - } - // Assume we have a letter now - if not, it's a bogus identifier. - // The next word is a sequence of characters that must start upper case. - if isASCIILower(c) { - c ^= ' ' // Make it a capital letter. - } - t = append(t, c) // Guaranteed not lower case. - // Accept lower case sequence that follows. - for i+1 < len(s) && isASCIILower(s[i+1]) { - i++ - t = append(t, s[i]) - } - } - return string(t) -} - -// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to -// be joined with "_". -func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) } - -// dottedSlice turns a sliced name into a dotted name. -func dottedSlice(elem []string) string { return strings.Join(elem, ".") } - -// Is this field optional? -func isOptional(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL -} - -// Is this field required? -func isRequired(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED -} - -// Is this field repeated? -func isRepeated(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED -} - -// Is this field a scalar numeric type? -func isScalar(field *descriptor.FieldDescriptorProto) bool { - if field.Type == nil { - return false - } - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE, - descriptor.FieldDescriptorProto_TYPE_FLOAT, - descriptor.FieldDescriptorProto_TYPE_INT64, - descriptor.FieldDescriptorProto_TYPE_UINT64, - descriptor.FieldDescriptorProto_TYPE_INT32, - descriptor.FieldDescriptorProto_TYPE_FIXED64, - descriptor.FieldDescriptorProto_TYPE_FIXED32, - descriptor.FieldDescriptorProto_TYPE_BOOL, - descriptor.FieldDescriptorProto_TYPE_UINT32, - descriptor.FieldDescriptorProto_TYPE_ENUM, - descriptor.FieldDescriptorProto_TYPE_SFIXED32, - descriptor.FieldDescriptorProto_TYPE_SFIXED64, - descriptor.FieldDescriptorProto_TYPE_SINT32, - descriptor.FieldDescriptorProto_TYPE_SINT64: - return true - default: - return false - } -} - -// badToUnderscore is the mapping function used to generate Go names from package names, -// which can be dotted in the input .proto file. It replaces non-identifier characters such as -// dot or dash with underscore. -func badToUnderscore(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { - return r - } - return '_' -} - -// baseName returns the last path element of the name, with the last dotted suffix removed. -func baseName(name string) string { - // First, find the last element - if i := strings.LastIndex(name, "/"); i >= 0 { - name = name[i+1:] - } - // Now drop the suffix - if i := strings.LastIndex(name, "."); i >= 0 { - name = name[0:i] - } - return name -} - -// The SourceCodeInfo message describes the location of elements of a parsed -// .proto file by way of a "path", which is a sequence of integers that -// describe the route from a FileDescriptorProto to the relevant submessage. -// The path alternates between a field number of a repeated field, and an index -// into that repeated field. The constants below define the field numbers that -// are used. -// -// See descriptor.proto for more information about this. -const ( - // tag numbers in FileDescriptorProto - packagePath = 2 // package - messagePath = 4 // message_type - enumPath = 5 // enum_type - // tag numbers in DescriptorProto - messageFieldPath = 2 // field - messageMessagePath = 3 // nested_type - messageEnumPath = 4 // enum_type - messageOneofPath = 8 // oneof_decl - // tag numbers in EnumDescriptorProto - enumValuePath = 2 // value -) - -var supportTypeAliases bool - -func init() { - for _, tag := range build.Default.ReleaseTags { - if tag == "go1.9" { - supportTypeAliases = true - return - } - } -} diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go deleted file mode 100644 index a9b61036cc0f..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go +++ /dev/null @@ -1,117 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package remap handles tracking the locations of Go tokens in a source text -across a rewrite by the Go formatter. -*/ -package remap - -import ( - "fmt" - "go/scanner" - "go/token" -) - -// A Location represents a span of byte offsets in the source text. -type Location struct { - Pos, End int // End is exclusive -} - -// A Map represents a mapping between token locations in an input source text -// and locations in the correspnding output text. -type Map map[Location]Location - -// Find reports whether the specified span is recorded by m, and if so returns -// the new location it was mapped to. If the input span was not found, the -// returned location is the same as the input. -func (m Map) Find(pos, end int) (Location, bool) { - key := Location{ - Pos: pos, - End: end, - } - if loc, ok := m[key]; ok { - return loc, true - } - return key, false -} - -func (m Map) add(opos, oend, npos, nend int) { - m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend} -} - -// Compute constructs a location mapping from input to output. An error is -// reported if any of the tokens of output cannot be mapped. -func Compute(input, output []byte) (Map, error) { - itok := tokenize(input) - otok := tokenize(output) - if len(itok) != len(otok) { - return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok)) - } - m := make(Map) - for i, ti := range itok { - to := otok[i] - if ti.Token != to.Token { - return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to) - } - m.add(ti.pos, ti.end, to.pos, to.end) - } - return m, nil -} - -// tokinfo records the span and type of a source token. -type tokinfo struct { - pos, end int - token.Token -} - -func tokenize(src []byte) []tokinfo { - fs := token.NewFileSet() - var s scanner.Scanner - s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments) - var info []tokinfo - for { - pos, next, lit := s.Scan() - switch next { - case token.SEMICOLON: - continue - } - info = append(info, tokinfo{ - pos: int(pos - 1), - end: int(pos + token.Pos(len(lit)) - 1), - Token: next, - }) - if next == token.EOF { - break - } - } - return info -} diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go deleted file mode 100644 index faef1abb7016..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go +++ /dev/null @@ -1,484 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package grpc outputs gRPC service descriptions in Go code. -// It runs as a plugin for the Go protocol buffer compiler plugin. -// It is linked in to protoc-gen-go. -package grpc - -import ( - "fmt" - "path" - "strconv" - "strings" - - pb "github.com/golang/protobuf/protoc-gen-go/descriptor" - "github.com/golang/protobuf/protoc-gen-go/generator" -) - -// generatedCodeVersion indicates a version of the generated code. -// It is incremented whenever an incompatibility between the generated code and -// the grpc package is introduced; the generated code references -// a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion). -const generatedCodeVersion = 4 - -// Paths for packages used by code generated in this file, -// relative to the import_prefix of the generator.Generator. -const ( - contextPkgPath = "golang.org/x/net/context" - grpcPkgPath = "google.golang.org/grpc" -) - -func init() { - generator.RegisterPlugin(new(grpc)) -} - -// grpc is an implementation of the Go protocol buffer compiler's -// plugin architecture. It generates bindings for gRPC support. -type grpc struct { - gen *generator.Generator -} - -// Name returns the name of this plugin, "grpc". -func (g *grpc) Name() string { - return "grpc" -} - -// The names for packages imported in the generated code. -// They may vary from the final path component of the import path -// if the name is used by other packages. -var ( - contextPkg string - grpcPkg string -) - -// Init initializes the plugin. -func (g *grpc) Init(gen *generator.Generator) { - g.gen = gen - contextPkg = generator.RegisterUniquePackageName("context", nil) - grpcPkg = generator.RegisterUniquePackageName("grpc", nil) -} - -// Given a type name defined in a .proto, return its object. -// Also record that we're using it, to guarantee the associated import. -func (g *grpc) objectNamed(name string) generator.Object { - g.gen.RecordTypeUse(name) - return g.gen.ObjectNamed(name) -} - -// Given a type name defined in a .proto, return its name as we will print it. -func (g *grpc) typeName(str string) string { - return g.gen.TypeName(g.objectNamed(str)) -} - -// P forwards to g.gen.P. -func (g *grpc) P(args ...interface{}) { g.gen.P(args...) } - -// Generate generates code for the services in the given file. -func (g *grpc) Generate(file *generator.FileDescriptor) { - if len(file.FileDescriptorProto.Service) == 0 { - return - } - - g.P("// Reference imports to suppress errors if they are not otherwise used.") - g.P("var _ ", contextPkg, ".Context") - g.P("var _ ", grpcPkg, ".ClientConn") - g.P() - - // Assert version compatibility. - g.P("// This is a compile-time assertion to ensure that this generated file") - g.P("// is compatible with the grpc package it is being compiled against.") - g.P("const _ = ", grpcPkg, ".SupportPackageIsVersion", generatedCodeVersion) - g.P() - - for i, service := range file.FileDescriptorProto.Service { - g.generateService(file, service, i) - } -} - -// GenerateImports generates the import declaration for this file. -func (g *grpc) GenerateImports(file *generator.FileDescriptor) { - if len(file.FileDescriptorProto.Service) == 0 { - return - } - g.P("import (") - g.P(contextPkg, " ", generator.GoImportPath(path.Join(string(g.gen.ImportPrefix), contextPkgPath))) - g.P(grpcPkg, " ", generator.GoImportPath(path.Join(string(g.gen.ImportPrefix), grpcPkgPath))) - g.P(")") - g.P() -} - -// reservedClientName records whether a client name is reserved on the client side. -var reservedClientName = map[string]bool{ - // TODO: do we need any in gRPC? -} - -func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } - -// deprecationComment is the standard comment added to deprecated -// messages, fields, enums, and enum values. -var deprecationComment = "// Deprecated: Do not use." - -// generateService generates all the code for the named service. -func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) { - path := fmt.Sprintf("6,%d", index) // 6 means service. - - origServName := service.GetName() - fullServName := origServName - if pkg := file.GetPackage(); pkg != "" { - fullServName = pkg + "." + fullServName - } - servName := generator.CamelCase(origServName) - deprecated := service.GetOptions().GetDeprecated() - - g.P() - g.P(fmt.Sprintf(`// %sClient is the client API for %s service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.`, servName, servName)) - - // Client interface. - if deprecated { - g.P("//") - g.P(deprecationComment) - } - g.P("type ", servName, "Client interface {") - for i, method := range service.Method { - g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. - g.P(g.generateClientSignature(servName, method)) - } - g.P("}") - g.P() - - // Client structure. - g.P("type ", unexport(servName), "Client struct {") - g.P("cc *", grpcPkg, ".ClientConn") - g.P("}") - g.P() - - // NewClient factory. - if deprecated { - g.P(deprecationComment) - } - g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {") - g.P("return &", unexport(servName), "Client{cc}") - g.P("}") - g.P() - - var methodIndex, streamIndex int - serviceDescVar := "_" + servName + "_serviceDesc" - // Client method implementations. - for _, method := range service.Method { - var descExpr string - if !method.GetServerStreaming() && !method.GetClientStreaming() { - // Unary RPC method - descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex) - methodIndex++ - } else { - // Streaming RPC method - descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex) - streamIndex++ - } - g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr) - } - - // Server interface. - serverType := servName + "Server" - g.P("// ", serverType, " is the server API for ", servName, " service.") - if deprecated { - g.P("//") - g.P(deprecationComment) - } - g.P("type ", serverType, " interface {") - for i, method := range service.Method { - g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. - g.P(g.generateServerSignature(servName, method)) - } - g.P("}") - g.P() - - // Server registration. - if deprecated { - g.P(deprecationComment) - } - g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {") - g.P("s.RegisterService(&", serviceDescVar, `, srv)`) - g.P("}") - g.P() - - // Server handler implementations. - var handlerNames []string - for _, method := range service.Method { - hname := g.generateServerMethod(servName, fullServName, method) - handlerNames = append(handlerNames, hname) - } - - // Service descriptor. - g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {") - g.P("ServiceName: ", strconv.Quote(fullServName), ",") - g.P("HandlerType: (*", serverType, ")(nil),") - g.P("Methods: []", grpcPkg, ".MethodDesc{") - for i, method := range service.Method { - if method.GetServerStreaming() || method.GetClientStreaming() { - continue - } - g.P("{") - g.P("MethodName: ", strconv.Quote(method.GetName()), ",") - g.P("Handler: ", handlerNames[i], ",") - g.P("},") - } - g.P("},") - g.P("Streams: []", grpcPkg, ".StreamDesc{") - for i, method := range service.Method { - if !method.GetServerStreaming() && !method.GetClientStreaming() { - continue - } - g.P("{") - g.P("StreamName: ", strconv.Quote(method.GetName()), ",") - g.P("Handler: ", handlerNames[i], ",") - if method.GetServerStreaming() { - g.P("ServerStreams: true,") - } - if method.GetClientStreaming() { - g.P("ClientStreams: true,") - } - g.P("},") - } - g.P("},") - g.P("Metadata: \"", file.GetName(), "\",") - g.P("}") - g.P() -} - -// generateClientSignature returns the client-side signature for a method. -func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string { - origMethName := method.GetName() - methName := generator.CamelCase(origMethName) - if reservedClientName[methName] { - methName += "_" - } - reqArg := ", in *" + g.typeName(method.GetInputType()) - if method.GetClientStreaming() { - reqArg = "" - } - respName := "*" + g.typeName(method.GetOutputType()) - if method.GetServerStreaming() || method.GetClientStreaming() { - respName = servName + "_" + generator.CamelCase(origMethName) + "Client" - } - return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName) -} - -func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) { - sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName()) - methName := generator.CamelCase(method.GetName()) - inType := g.typeName(method.GetInputType()) - outType := g.typeName(method.GetOutputType()) - - if method.GetOptions().GetDeprecated() { - g.P(deprecationComment) - } - g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{") - if !method.GetServerStreaming() && !method.GetClientStreaming() { - g.P("out := new(", outType, ")") - // TODO: Pass descExpr to Invoke. - g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`) - g.P("if err != nil { return nil, err }") - g.P("return out, nil") - g.P("}") - g.P() - return - } - streamType := unexport(servName) + methName + "Client" - g.P("stream, err := c.cc.NewStream(ctx, ", descExpr, `, "`, sname, `", opts...)`) - g.P("if err != nil { return nil, err }") - g.P("x := &", streamType, "{stream}") - if !method.GetClientStreaming() { - g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }") - g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") - } - g.P("return x, nil") - g.P("}") - g.P() - - genSend := method.GetClientStreaming() - genRecv := method.GetServerStreaming() - genCloseAndRecv := !method.GetServerStreaming() - - // Stream auxiliary types and methods. - g.P("type ", servName, "_", methName, "Client interface {") - if genSend { - g.P("Send(*", inType, ") error") - } - if genRecv { - g.P("Recv() (*", outType, ", error)") - } - if genCloseAndRecv { - g.P("CloseAndRecv() (*", outType, ", error)") - } - g.P(grpcPkg, ".ClientStream") - g.P("}") - g.P() - - g.P("type ", streamType, " struct {") - g.P(grpcPkg, ".ClientStream") - g.P("}") - g.P() - - if genSend { - g.P("func (x *", streamType, ") Send(m *", inType, ") error {") - g.P("return x.ClientStream.SendMsg(m)") - g.P("}") - g.P() - } - if genRecv { - g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {") - g.P("m := new(", outType, ")") - g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") - g.P("return m, nil") - g.P("}") - g.P() - } - if genCloseAndRecv { - g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {") - g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") - g.P("m := new(", outType, ")") - g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") - g.P("return m, nil") - g.P("}") - g.P() - } -} - -// generateServerSignature returns the server-side signature for a method. -func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string { - origMethName := method.GetName() - methName := generator.CamelCase(origMethName) - if reservedClientName[methName] { - methName += "_" - } - - var reqArgs []string - ret := "error" - if !method.GetServerStreaming() && !method.GetClientStreaming() { - reqArgs = append(reqArgs, contextPkg+".Context") - ret = "(*" + g.typeName(method.GetOutputType()) + ", error)" - } - if !method.GetClientStreaming() { - reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType())) - } - if method.GetServerStreaming() || method.GetClientStreaming() { - reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server") - } - - return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret -} - -func (g *grpc) generateServerMethod(servName, fullServName string, method *pb.MethodDescriptorProto) string { - methName := generator.CamelCase(method.GetName()) - hname := fmt.Sprintf("_%s_%s_Handler", servName, methName) - inType := g.typeName(method.GetInputType()) - outType := g.typeName(method.GetOutputType()) - - if !method.GetServerStreaming() && !method.GetClientStreaming() { - g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error, interceptor ", grpcPkg, ".UnaryServerInterceptor) (interface{}, error) {") - g.P("in := new(", inType, ")") - g.P("if err := dec(in); err != nil { return nil, err }") - g.P("if interceptor == nil { return srv.(", servName, "Server).", methName, "(ctx, in) }") - g.P("info := &", grpcPkg, ".UnaryServerInfo{") - g.P("Server: srv,") - g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", fullServName, methName)), ",") - g.P("}") - g.P("handler := func(ctx ", contextPkg, ".Context, req interface{}) (interface{}, error) {") - g.P("return srv.(", servName, "Server).", methName, "(ctx, req.(*", inType, "))") - g.P("}") - g.P("return interceptor(ctx, in, info, handler)") - g.P("}") - g.P() - return hname - } - streamType := unexport(servName) + methName + "Server" - g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {") - if !method.GetClientStreaming() { - g.P("m := new(", inType, ")") - g.P("if err := stream.RecvMsg(m); err != nil { return err }") - g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})") - } else { - g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})") - } - g.P("}") - g.P() - - genSend := method.GetServerStreaming() - genSendAndClose := !method.GetServerStreaming() - genRecv := method.GetClientStreaming() - - // Stream auxiliary types and methods. - g.P("type ", servName, "_", methName, "Server interface {") - if genSend { - g.P("Send(*", outType, ") error") - } - if genSendAndClose { - g.P("SendAndClose(*", outType, ") error") - } - if genRecv { - g.P("Recv() (*", inType, ", error)") - } - g.P(grpcPkg, ".ServerStream") - g.P("}") - g.P() - - g.P("type ", streamType, " struct {") - g.P(grpcPkg, ".ServerStream") - g.P("}") - g.P() - - if genSend { - g.P("func (x *", streamType, ") Send(m *", outType, ") error {") - g.P("return x.ServerStream.SendMsg(m)") - g.P("}") - g.P() - } - if genSendAndClose { - g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {") - g.P("return x.ServerStream.SendMsg(m)") - g.P("}") - g.P() - } - if genRecv { - g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {") - g.P("m := new(", inType, ")") - g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }") - g.P("return m, nil") - g.P("}") - g.P() - } - - return hname -} diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go deleted file mode 100644 index 532a550050ee..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go +++ /dev/null @@ -1,34 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package main - -import _ "github.com/golang/protobuf/protoc-gen-go/grpc" diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/main.go b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/main.go deleted file mode 100644 index 8e2486de0b2e..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/main.go +++ /dev/null @@ -1,98 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate -// Go code. Run it by building this program and putting it in your path with -// the name -// protoc-gen-go -// That word 'go' at the end becomes part of the option string set for the -// protocol compiler, so once the protocol compiler (protoc) is installed -// you can run -// protoc --go_out=output_directory input_directory/file.proto -// to generate Go bindings for the protocol defined by file.proto. -// With that input, the output will be written to -// output_directory/file.pb.go -// -// The generated code is documented in the package comment for -// the library. -// -// See the README and documentation for protocol buffers to learn more: -// https://developers.google.com/protocol-buffers/ -package main - -import ( - "io/ioutil" - "os" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/protoc-gen-go/generator" -) - -func main() { - // Begin by allocating a generator. The request and response structures are stored there - // so we can do error handling easily - the response structure contains the field to - // report failure. - g := generator.New() - - data, err := ioutil.ReadAll(os.Stdin) - if err != nil { - g.Error(err, "reading input") - } - - if err := proto.Unmarshal(data, g.Request); err != nil { - g.Error(err, "parsing input proto") - } - - if len(g.Request.FileToGenerate) == 0 { - g.Fail("no files to generate") - } - - g.CommandLineParameters(g.Request.GetParameter()) - - // Create a wrapped version of the Descriptors and EnumDescriptors that - // point to the file that defines them. - g.WrapTypes() - - g.SetPackageNames() - g.BuildTypeNameMap() - - g.GenerateAllFiles() - - // Send back the results. - data, err = proto.Marshal(g.Response) - if err != nil { - g.Error(err, "failed to marshal output proto") - } - _, err = os.Stdout.Write(data) - if err != nil { - g.Error(err, "failed to write output proto") - } -} diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go deleted file mode 100644 index 61bfc10e02e7..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go +++ /dev/null @@ -1,369 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/compiler/plugin.proto - -/* -Package plugin_go is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/compiler/plugin.proto - -It has these top-level messages: - Version - CodeGeneratorRequest - CodeGeneratorResponse -*/ -package plugin_go - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The version number of protocol compiler. -type Version struct { - Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` - Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` - Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (m *Version) Unmarshal(b []byte) error { - return xxx_messageInfo_Version.Unmarshal(m, b) -} -func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Version.Marshal(b, m, deterministic) -} -func (dst *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(dst, src) -} -func (m *Version) XXX_Size() int { - return xxx_messageInfo_Version.Size(m) -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_Version proto.InternalMessageInfo - -func (m *Version) GetMajor() int32 { - if m != nil && m.Major != nil { - return *m.Major - } - return 0 -} - -func (m *Version) GetMinor() int32 { - if m != nil && m.Minor != nil { - return *m.Minor - } - return 0 -} - -func (m *Version) GetPatch() int32 { - if m != nil && m.Patch != nil { - return *m.Patch - } - return 0 -} - -func (m *Version) GetSuffix() string { - if m != nil && m.Suffix != nil { - return *m.Suffix - } - return "" -} - -// An encoded CodeGeneratorRequest is written to the plugin's stdin. -type CodeGeneratorRequest struct { - // The .proto files that were explicitly listed on the command-line. The - // code generator should generate code only for these files. Each file's - // descriptor will be included in proto_file, below. - FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` - // The generator parameter passed on the command-line. - Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` - // FileDescriptorProtos for all files in files_to_generate and everything - // they import. The files will appear in topological order, so each file - // appears before any file that imports it. - // - // protoc guarantees that all proto_files will be written after - // the fields above, even though this is not technically guaranteed by the - // protobuf wire format. This theoretically could allow a plugin to stream - // in the FileDescriptorProtos and handle them one by one rather than read - // the entire set into memory at once. However, as of this writing, this - // is not similarly optimized on protoc's end -- it will store all fields in - // memory at once before sending them to the plugin. - // - // Type names of fields and extensions in the FileDescriptorProto are always - // fully qualified. - ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` - // The version number of protocol compiler. - CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } -func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorRequest) ProtoMessage() {} -func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } -func (m *CodeGeneratorRequest) Unmarshal(b []byte) error { - return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b) -} -func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic) -} -func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src) -} -func (m *CodeGeneratorRequest) XXX_Size() int { - return xxx_messageInfo_CodeGeneratorRequest.Size(m) -} -func (m *CodeGeneratorRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo - -func (m *CodeGeneratorRequest) GetFileToGenerate() []string { - if m != nil { - return m.FileToGenerate - } - return nil -} - -func (m *CodeGeneratorRequest) GetParameter() string { - if m != nil && m.Parameter != nil { - return *m.Parameter - } - return "" -} - -func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto { - if m != nil { - return m.ProtoFile - } - return nil -} - -func (m *CodeGeneratorRequest) GetCompilerVersion() *Version { - if m != nil { - return m.CompilerVersion - } - return nil -} - -// The plugin writes an encoded CodeGeneratorResponse to stdout. -type CodeGeneratorResponse struct { - // Error message. If non-empty, code generation failed. The plugin process - // should exit with status code zero even if it reports an error in this way. - // - // This should be used to indicate errors in .proto files which prevent the - // code generator from generating correct code. Errors which indicate a - // problem in protoc itself -- such as the input CodeGeneratorRequest being - // unparseable -- should be reported by writing a message to stderr and - // exiting with a non-zero status code. - Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` - File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } -func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorResponse) ProtoMessage() {} -func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } -func (m *CodeGeneratorResponse) Unmarshal(b []byte) error { - return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b) -} -func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic) -} -func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src) -} -func (m *CodeGeneratorResponse) XXX_Size() int { - return xxx_messageInfo_CodeGeneratorResponse.Size(m) -} -func (m *CodeGeneratorResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo - -func (m *CodeGeneratorResponse) GetError() string { - if m != nil && m.Error != nil { - return *m.Error - } - return "" -} - -func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { - if m != nil { - return m.File - } - return nil -} - -// Represents a single generated file. -type CodeGeneratorResponse_File struct { - // The file name, relative to the output directory. The name must not - // contain "." or ".." components and must be relative, not be absolute (so, - // the file cannot lie outside the output directory). "/" must be used as - // the path separator, not "\". - // - // If the name is omitted, the content will be appended to the previous - // file. This allows the generator to break large files into small chunks, - // and allows the generated text to be streamed back to protoc so that large - // files need not reside completely in memory at one time. Note that as of - // this writing protoc does not optimize for this -- it will read the entire - // CodeGeneratorResponse before writing files to disk. - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // If non-empty, indicates that the named file should already exist, and the - // content here is to be inserted into that file at a defined insertion - // point. This feature allows a code generator to extend the output - // produced by another code generator. The original generator may provide - // insertion points by placing special annotations in the file that look - // like: - // @@protoc_insertion_point(NAME) - // The annotation can have arbitrary text before and after it on the line, - // which allows it to be placed in a comment. NAME should be replaced with - // an identifier naming the point -- this is what other generators will use - // as the insertion_point. Code inserted at this point will be placed - // immediately above the line containing the insertion point (thus multiple - // insertions to the same point will come out in the order they were added). - // The double-@ is intended to make it unlikely that the generated code - // could contain things that look like insertion points by accident. - // - // For example, the C++ code generator places the following line in the - // .pb.h files that it generates: - // // @@protoc_insertion_point(namespace_scope) - // This line appears within the scope of the file's package namespace, but - // outside of any particular class. Another plugin can then specify the - // insertion_point "namespace_scope" to generate additional classes or - // other declarations that should be placed in this scope. - // - // Note that if the line containing the insertion point begins with - // whitespace, the same whitespace will be added to every line of the - // inserted text. This is useful for languages like Python, where - // indentation matters. In these languages, the insertion point comment - // should be indented the same amount as any inserted code will need to be - // in order to work correctly in that context. - // - // The code generator that generates the initial file and the one which - // inserts into it must both run as part of a single invocation of protoc. - // Code generators are executed in the order in which they appear on the - // command line. - // - // If |insertion_point| is present, |name| must also be present. - InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` - // The file contents. - Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } -func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorResponse_File) ProtoMessage() {} -func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } -func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error { - return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b) -} -func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic) -} -func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) { - xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src) -} -func (m *CodeGeneratorResponse_File) XXX_Size() int { - return xxx_messageInfo_CodeGeneratorResponse_File.Size(m) -} -func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() { - xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m) -} - -var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo - -func (m *CodeGeneratorResponse_File) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { - if m != nil && m.InsertionPoint != nil { - return *m.InsertionPoint - } - return "" -} - -func (m *CodeGeneratorResponse_File) GetContent() string { - if m != nil && m.Content != nil { - return *m.Content - } - return "" -} - -func init() { - proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version") - proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") - proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") - proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") -} - -func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 417 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41, - 0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2, - 0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30, - 0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa, - 0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91, - 0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63, - 0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb, - 0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55, - 0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8, - 0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1, - 0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f, - 0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d, - 0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2, - 0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a, - 0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2, - 0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d, - 0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda, - 0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed, - 0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34, - 0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79, - 0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45, - 0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4, - 0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e, - 0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92, - 0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d, - 0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00, - 0x00, -} diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden deleted file mode 100644 index 8953d0ff827e..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden +++ /dev/null @@ -1,83 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google/protobuf/compiler/plugin.proto -// DO NOT EDIT! - -package google_protobuf_compiler - -import proto "github.com/golang/protobuf/proto" -import "math" -import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference proto and math imports to suppress error if they are not otherwise used. -var _ = proto.GetString -var _ = math.Inf - -type CodeGeneratorRequest struct { - FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` - Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` - ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} } -func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) } -func (*CodeGeneratorRequest) ProtoMessage() {} - -func (this *CodeGeneratorRequest) GetParameter() string { - if this != nil && this.Parameter != nil { - return *this.Parameter - } - return "" -} - -type CodeGeneratorResponse struct { - Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` - File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} } -func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) } -func (*CodeGeneratorResponse) ProtoMessage() {} - -func (this *CodeGeneratorResponse) GetError() string { - if this != nil && this.Error != nil { - return *this.Error - } - return "" -} - -type CodeGeneratorResponse_File struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` - Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} } -func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) } -func (*CodeGeneratorResponse_File) ProtoMessage() {} - -func (this *CodeGeneratorResponse_File) GetName() string { - if this != nil && this.Name != nil { - return *this.Name - } - return "" -} - -func (this *CodeGeneratorResponse_File) GetInsertionPoint() string { - if this != nil && this.InsertionPoint != nil { - return *this.InsertionPoint - } - return "" -} - -func (this *CodeGeneratorResponse_File) GetContent() string { - if this != nil && this.Content != nil { - return *this.Content - } - return "" -} - -func init() { -} diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto deleted file mode 100644 index 5b5574529ed4..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto +++ /dev/null @@ -1,167 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// -// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to -// change. -// -// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is -// just a program that reads a CodeGeneratorRequest from stdin and writes a -// CodeGeneratorResponse to stdout. -// -// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead -// of dealing with the raw protocol defined here. -// -// A plugin executable needs only to be placed somewhere in the path. The -// plugin should be named "protoc-gen-$NAME", and will then be used when the -// flag "--${NAME}_out" is passed to protoc. - -syntax = "proto2"; -package google.protobuf.compiler; -option java_package = "com.google.protobuf.compiler"; -option java_outer_classname = "PluginProtos"; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go"; - -import "google/protobuf/descriptor.proto"; - -// The version number of protocol compiler. -message Version { - optional int32 major = 1; - optional int32 minor = 2; - optional int32 patch = 3; - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - optional string suffix = 4; -} - -// An encoded CodeGeneratorRequest is written to the plugin's stdin. -message CodeGeneratorRequest { - // The .proto files that were explicitly listed on the command-line. The - // code generator should generate code only for these files. Each file's - // descriptor will be included in proto_file, below. - repeated string file_to_generate = 1; - - // The generator parameter passed on the command-line. - optional string parameter = 2; - - // FileDescriptorProtos for all files in files_to_generate and everything - // they import. The files will appear in topological order, so each file - // appears before any file that imports it. - // - // protoc guarantees that all proto_files will be written after - // the fields above, even though this is not technically guaranteed by the - // protobuf wire format. This theoretically could allow a plugin to stream - // in the FileDescriptorProtos and handle them one by one rather than read - // the entire set into memory at once. However, as of this writing, this - // is not similarly optimized on protoc's end -- it will store all fields in - // memory at once before sending them to the plugin. - // - // Type names of fields and extensions in the FileDescriptorProto are always - // fully qualified. - repeated FileDescriptorProto proto_file = 15; - - // The version number of protocol compiler. - optional Version compiler_version = 3; - -} - -// The plugin writes an encoded CodeGeneratorResponse to stdout. -message CodeGeneratorResponse { - // Error message. If non-empty, code generation failed. The plugin process - // should exit with status code zero even if it reports an error in this way. - // - // This should be used to indicate errors in .proto files which prevent the - // code generator from generating correct code. Errors which indicate a - // problem in protoc itself -- such as the input CodeGeneratorRequest being - // unparseable -- should be reported by writing a message to stderr and - // exiting with a non-zero status code. - optional string error = 1; - - // Represents a single generated file. - message File { - // The file name, relative to the output directory. The name must not - // contain "." or ".." components and must be relative, not be absolute (so, - // the file cannot lie outside the output directory). "/" must be used as - // the path separator, not "\". - // - // If the name is omitted, the content will be appended to the previous - // file. This allows the generator to break large files into small chunks, - // and allows the generated text to be streamed back to protoc so that large - // files need not reside completely in memory at one time. Note that as of - // this writing protoc does not optimize for this -- it will read the entire - // CodeGeneratorResponse before writing files to disk. - optional string name = 1; - - // If non-empty, indicates that the named file should already exist, and the - // content here is to be inserted into that file at a defined insertion - // point. This feature allows a code generator to extend the output - // produced by another code generator. The original generator may provide - // insertion points by placing special annotations in the file that look - // like: - // @@protoc_insertion_point(NAME) - // The annotation can have arbitrary text before and after it on the line, - // which allows it to be placed in a comment. NAME should be replaced with - // an identifier naming the point -- this is what other generators will use - // as the insertion_point. Code inserted at this point will be placed - // immediately above the line containing the insertion point (thus multiple - // insertions to the same point will come out in the order they were added). - // The double-@ is intended to make it unlikely that the generated code - // could contain things that look like insertion points by accident. - // - // For example, the C++ code generator places the following line in the - // .pb.h files that it generates: - // // @@protoc_insertion_point(namespace_scope) - // This line appears within the scope of the file's package namespace, but - // outside of any particular class. Another plugin can then specify the - // insertion_point "namespace_scope" to generate additional classes or - // other declarations that should be placed in this scope. - // - // Note that if the line containing the insertion point begins with - // whitespace, the same whitespace will be added to every line of the - // inserted text. This is useful for languages like Python, where - // indentation matters. In these languages, the insertion point comment - // should be indented the same amount as any inserted code will need to be - // in order to work correctly in that context. - // - // The code generator that generates the initial file and the one which - // inserts into it must both run as part of a single invocation of protoc. - // Code generators are executed in the order in which they appear on the - // command line. - // - // If |insertion_point| is present, |name| must also be present. - optional string insertion_point = 2; - - // The file contents. - optional string content = 15; - } - repeated File file = 15; -} diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index ede4cd59169c..a0e6358024dd 100644 --- a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -133,7 +133,7 @@ func (m *Any) Reset() { *m = Any{} } func (m *Any) String() string { return proto.CompactTextString(m) } func (*Any) ProtoMessage() {} func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_any_744b9ca530f228db, []int{0} + return fileDescriptor_b53526c13ae22eb4, []int{0} } func (*Any) XXX_WellKnownType() string { return "Any" } func (m *Any) XXX_Unmarshal(b []byte) error { @@ -172,9 +172,9 @@ func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } -var fileDescriptor_any_744b9ca530f228db = []byte{ +var fileDescriptor_b53526c13ae22eb4 = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index eb6055df5eeb..5ae7d8092e1b 100644 --- a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -99,7 +99,7 @@ func (m *Duration) Reset() { *m = Duration{} } func (m *Duration) String() string { return proto.CompactTextString(m) } func (*Duration) ProtoMessage() {} func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_duration_e7d612259e3f0613, []int{0} + return fileDescriptor_23597b2ebd7ac6c5, []int{0} } func (*Duration) XXX_WellKnownType() string { return "Duration" } func (m *Duration) XXX_Unmarshal(b []byte) error { @@ -138,11 +138,9 @@ func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } -func init() { - proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) -} +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } -var fileDescriptor_duration_e7d612259e3f0613 = []byte{ +var fileDescriptor_23597b2ebd7ac6c5 = []byte{ // 190 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index 6603be739b32..9e3622454823 100644 --- a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -115,7 +115,7 @@ func (m *Timestamp) Reset() { *m = Timestamp{} } func (m *Timestamp) String() string { return proto.CompactTextString(m) } func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} + return fileDescriptor_292007bbfe81227e, []int{0} } func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } func (m *Timestamp) XXX_Unmarshal(b []byte) error { @@ -154,11 +154,9 @@ func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } -func init() { - proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) -} +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } -var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ +var fileDescriptor_292007bbfe81227e = []byte{ // 191 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, diff --git a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go index 15feea5f2c88..170d8c5de698 100644 --- a/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go +++ b/vertical-pod-autoscaler/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -33,7 +33,7 @@ func (m *DoubleValue) Reset() { *m = DoubleValue{} } func (m *DoubleValue) String() string { return proto.CompactTextString(m) } func (*DoubleValue) ProtoMessage() {} func (*DoubleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{0} + return fileDescriptor_5377b62bda767935, []int{0} } func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } func (m *DoubleValue) XXX_Unmarshal(b []byte) error { @@ -76,7 +76,7 @@ func (m *FloatValue) Reset() { *m = FloatValue{} } func (m *FloatValue) String() string { return proto.CompactTextString(m) } func (*FloatValue) ProtoMessage() {} func (*FloatValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{1} + return fileDescriptor_5377b62bda767935, []int{1} } func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } func (m *FloatValue) XXX_Unmarshal(b []byte) error { @@ -119,7 +119,7 @@ func (m *Int64Value) Reset() { *m = Int64Value{} } func (m *Int64Value) String() string { return proto.CompactTextString(m) } func (*Int64Value) ProtoMessage() {} func (*Int64Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{2} + return fileDescriptor_5377b62bda767935, []int{2} } func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } func (m *Int64Value) XXX_Unmarshal(b []byte) error { @@ -162,7 +162,7 @@ func (m *UInt64Value) Reset() { *m = UInt64Value{} } func (m *UInt64Value) String() string { return proto.CompactTextString(m) } func (*UInt64Value) ProtoMessage() {} func (*UInt64Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{3} + return fileDescriptor_5377b62bda767935, []int{3} } func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } func (m *UInt64Value) XXX_Unmarshal(b []byte) error { @@ -205,7 +205,7 @@ func (m *Int32Value) Reset() { *m = Int32Value{} } func (m *Int32Value) String() string { return proto.CompactTextString(m) } func (*Int32Value) ProtoMessage() {} func (*Int32Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{4} + return fileDescriptor_5377b62bda767935, []int{4} } func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } func (m *Int32Value) XXX_Unmarshal(b []byte) error { @@ -248,7 +248,7 @@ func (m *UInt32Value) Reset() { *m = UInt32Value{} } func (m *UInt32Value) String() string { return proto.CompactTextString(m) } func (*UInt32Value) ProtoMessage() {} func (*UInt32Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{5} + return fileDescriptor_5377b62bda767935, []int{5} } func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } func (m *UInt32Value) XXX_Unmarshal(b []byte) error { @@ -291,7 +291,7 @@ func (m *BoolValue) Reset() { *m = BoolValue{} } func (m *BoolValue) String() string { return proto.CompactTextString(m) } func (*BoolValue) ProtoMessage() {} func (*BoolValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{6} + return fileDescriptor_5377b62bda767935, []int{6} } func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } func (m *BoolValue) XXX_Unmarshal(b []byte) error { @@ -334,7 +334,7 @@ func (m *StringValue) Reset() { *m = StringValue{} } func (m *StringValue) String() string { return proto.CompactTextString(m) } func (*StringValue) ProtoMessage() {} func (*StringValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{7} + return fileDescriptor_5377b62bda767935, []int{7} } func (*StringValue) XXX_WellKnownType() string { return "StringValue" } func (m *StringValue) XXX_Unmarshal(b []byte) error { @@ -377,7 +377,7 @@ func (m *BytesValue) Reset() { *m = BytesValue{} } func (m *BytesValue) String() string { return proto.CompactTextString(m) } func (*BytesValue) ProtoMessage() {} func (*BytesValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{8} + return fileDescriptor_5377b62bda767935, []int{8} } func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } func (m *BytesValue) XXX_Unmarshal(b []byte) error { @@ -417,11 +417,9 @@ func init() { proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") } -func init() { - proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_wrappers_16c7c35c009f3253) -} +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) } -var fileDescriptor_wrappers_16c7c35c009f3253 = []byte{ +var fileDescriptor_5377b62bda767935 = []byte{ // 259 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, diff --git a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/.travis.yml b/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/.travis.yml deleted file mode 100644 index d1784e1e23b7..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -sudo: false -language: go -go: - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Skip. -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - go tool vet . - - go test -v -race ./... diff --git a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/LEGAL b/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/LEGAL deleted file mode 100644 index 72b859cd6219..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/LEGAL +++ /dev/null @@ -1,32 +0,0 @@ -All the files in this distribution are covered under either the MIT -license (see the file LICENSE) except some files mentioned below. - -match.go, match_test.go: - - Copyright (c) 2009 The Go Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/LICENSE b/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/LICENSE deleted file mode 100644 index 1cbf651e2fcb..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2013 Kamil Kisiel - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/README.md b/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/README.md deleted file mode 100644 index 6e4e92b2f607..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/README.md +++ /dev/null @@ -1,6 +0,0 @@ -gotool -====== -[![GoDoc](https://godoc.org/github.com/kisielk/gotool?status.svg)](https://godoc.org/github.com/kisielk/gotool) -[![Build Status](https://travis-ci.org/kisielk/gotool.svg?branch=master)](https://travis-ci.org/kisielk/gotool) - -Package gotool contains utility functions used to implement the standard "cmd/go" tool, provided as a convenience to developers who want to write tools with similar semantics. diff --git a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/go.mod b/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/go.mod deleted file mode 100644 index 503b37c6fb1a..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/go.mod +++ /dev/null @@ -1 +0,0 @@ -module "github.com/kisielk/gotool" diff --git a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/go13.go b/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/go13.go deleted file mode 100644 index 2dd9b3fdf0a9..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/go13.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.4 - -package gotool - -import ( - "go/build" - "path/filepath" - "runtime" -) - -var gorootSrc = filepath.Join(runtime.GOROOT(), "src", "pkg") - -func shouldIgnoreImport(p *build.Package) bool { - return true -} diff --git a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/go14-15.go b/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/go14-15.go deleted file mode 100644 index aa99a32270ba..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/go14-15.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build go1.4,!go1.6 - -package gotool - -import ( - "go/build" - "path/filepath" - "runtime" -) - -var gorootSrc = filepath.Join(runtime.GOROOT(), "src") - -func shouldIgnoreImport(p *build.Package) bool { - return true -} diff --git a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/go16-18.go b/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/go16-18.go deleted file mode 100644 index f25cec14a83c..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/go16-18.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build go1.6,!go1.9 - -package gotool - -import ( - "go/build" - "path/filepath" - "runtime" -) - -var gorootSrc = filepath.Join(runtime.GOROOT(), "src") - -func shouldIgnoreImport(p *build.Package) bool { - return p == nil || len(p.InvalidGoFiles) == 0 -} diff --git a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/internal/load/path.go b/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/internal/load/path.go deleted file mode 100644 index 74e15b9d3245..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/internal/load/path.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package load - -import ( - "strings" -) - -// hasPathPrefix reports whether the path s begins with the -// elements in prefix. -func hasPathPrefix(s, prefix string) bool { - switch { - default: - return false - case len(s) == len(prefix): - return s == prefix - case len(s) > len(prefix): - if prefix != "" && prefix[len(prefix)-1] == '/' { - return strings.HasPrefix(s, prefix) - } - return s[len(prefix)] == '/' && s[:len(prefix)] == prefix - } -} diff --git a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/internal/load/pkg.go b/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/internal/load/pkg.go deleted file mode 100644 index b937ede759d6..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/internal/load/pkg.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -// Package load loads packages. -package load - -import ( - "strings" -) - -// isStandardImportPath reports whether $GOROOT/src/path should be considered -// part of the standard distribution. For historical reasons we allow people to add -// their own code to $GOROOT instead of using $GOPATH, but we assume that -// code will start with a domain name (dot in the first element). -func isStandardImportPath(path string) bool { - i := strings.Index(path, "/") - if i < 0 { - i = len(path) - } - elem := path[:i] - return !strings.Contains(elem, ".") -} diff --git a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/internal/load/search.go b/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/internal/load/search.go deleted file mode 100644 index 17ed62ddae76..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/internal/load/search.go +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package load - -import ( - "fmt" - "go/build" - "log" - "os" - "path" - "path/filepath" - "regexp" - "strings" -) - -// Context specifies values for operation of ImportPaths that would -// otherwise come from cmd/go/internal/cfg package. -// -// This is a construct added for gotool purposes and doesn't have -// an equivalent upstream in cmd/go. -type Context struct { - // BuildContext is the build context to use. - BuildContext build.Context - - // GOROOTsrc is the location of the src directory in GOROOT. - // At this time, it's used only in MatchPackages to skip - // GOOROOT/src entry from BuildContext.SrcDirs output. - GOROOTsrc string -} - -// allPackages returns all the packages that can be found -// under the $GOPATH directories and $GOROOT matching pattern. -// The pattern is either "all" (all packages), "std" (standard packages), -// "cmd" (standard commands), or a path including "...". -func (c *Context) allPackages(pattern string) []string { - pkgs := c.MatchPackages(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -// allPackagesInFS is like allPackages but is passed a pattern -// beginning ./ or ../, meaning it should scan the tree rooted -// at the given directory. There are ... in the pattern too. -func (c *Context) allPackagesInFS(pattern string) []string { - pkgs := c.MatchPackagesInFS(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -// MatchPackages returns a list of package paths matching pattern -// (see go help packages for pattern syntax). -func (c *Context) MatchPackages(pattern string) []string { - match := func(string) bool { return true } - treeCanMatch := func(string) bool { return true } - if !IsMetaPackage(pattern) { - match = matchPattern(pattern) - treeCanMatch = treeCanMatchPattern(pattern) - } - - have := map[string]bool{ - "builtin": true, // ignore pseudo-package that exists only for documentation - } - if !c.BuildContext.CgoEnabled { - have["runtime/cgo"] = true // ignore during walk - } - var pkgs []string - - for _, src := range c.BuildContext.SrcDirs() { - if (pattern == "std" || pattern == "cmd") && src != c.GOROOTsrc { - continue - } - src = filepath.Clean(src) + string(filepath.Separator) - root := src - if pattern == "cmd" { - root += "cmd" + string(filepath.Separator) - } - filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil || path == src { - return nil - } - - want := true - // Avoid .foo, _foo, and testdata directory trees. - _, elem := filepath.Split(path) - if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { - want = false - } - - name := filepath.ToSlash(path[len(src):]) - if pattern == "std" && (!isStandardImportPath(name) || name == "cmd") { - // The name "std" is only the standard library. - // If the name is cmd, it's the root of the command tree. - want = false - } - if !treeCanMatch(name) { - want = false - } - - if !fi.IsDir() { - if fi.Mode()&os.ModeSymlink != 0 && want { - if target, err := os.Stat(path); err == nil && target.IsDir() { - fmt.Fprintf(os.Stderr, "warning: ignoring symlink %s\n", path) - } - } - return nil - } - if !want { - return filepath.SkipDir - } - - if have[name] { - return nil - } - have[name] = true - if !match(name) { - return nil - } - pkg, err := c.BuildContext.ImportDir(path, 0) - if err != nil { - if _, noGo := err.(*build.NoGoError); noGo { - return nil - } - } - - // If we are expanding "cmd", skip main - // packages under cmd/vendor. At least as of - // March, 2017, there is one there for the - // vendored pprof tool. - if pattern == "cmd" && strings.HasPrefix(pkg.ImportPath, "cmd/vendor") && pkg.Name == "main" { - return nil - } - - pkgs = append(pkgs, name) - return nil - }) - } - return pkgs -} - -// MatchPackagesInFS returns a list of package paths matching pattern, -// which must begin with ./ or ../ -// (see go help packages for pattern syntax). -func (c *Context) MatchPackagesInFS(pattern string) []string { - // Find directory to begin the scan. - // Could be smarter but this one optimization - // is enough for now, since ... is usually at the - // end of a path. - i := strings.Index(pattern, "...") - dir, _ := path.Split(pattern[:i]) - - // pattern begins with ./ or ../. - // path.Clean will discard the ./ but not the ../. - // We need to preserve the ./ for pattern matching - // and in the returned import paths. - prefix := "" - if strings.HasPrefix(pattern, "./") { - prefix = "./" - } - match := matchPattern(pattern) - - var pkgs []string - filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() { - return nil - } - if path == dir { - // filepath.Walk starts at dir and recurses. For the recursive case, - // the path is the result of filepath.Join, which calls filepath.Clean. - // The initial case is not Cleaned, though, so we do this explicitly. - // - // This converts a path like "./io/" to "io". Without this step, running - // "cd $GOROOT/src; go list ./io/..." would incorrectly skip the io - // package, because prepending the prefix "./" to the unclean path would - // result in "././io", and match("././io") returns false. - path = filepath.Clean(path) - } - - // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". - _, elem := filepath.Split(path) - dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." - if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { - return filepath.SkipDir - } - - name := prefix + filepath.ToSlash(path) - if !match(name) { - return nil - } - - // We keep the directory if we can import it, or if we can't import it - // due to invalid Go source files. This means that directories containing - // parse errors will be built (and fail) instead of being silently skipped - // as not matching the pattern. Go 1.5 and earlier skipped, but that - // behavior means people miss serious mistakes. - // See golang.org/issue/11407. - if p, err := c.BuildContext.ImportDir(path, 0); err != nil && (p == nil || len(p.InvalidGoFiles) == 0) { - if _, noGo := err.(*build.NoGoError); !noGo { - log.Print(err) - } - return nil - } - pkgs = append(pkgs, name) - return nil - }) - return pkgs -} - -// treeCanMatchPattern(pattern)(name) reports whether -// name or children of name can possibly match pattern. -// Pattern is the same limited glob accepted by matchPattern. -func treeCanMatchPattern(pattern string) func(name string) bool { - wildCard := false - if i := strings.Index(pattern, "..."); i >= 0 { - wildCard = true - pattern = pattern[:i] - } - return func(name string) bool { - return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || - wildCard && strings.HasPrefix(name, pattern) - } -} - -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -// Unfortunately, there are two special cases. Quoting "go help packages": -// -// First, /... at the end of the pattern can match an empty string, -// so that net/... matches both net and packages in its subdirectories, like net/http. -// Second, any slash-separted pattern element containing a wildcard never -// participates in a match of the "vendor" element in the path of a vendored -// package, so that ./... does not match packages in subdirectories of -// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. -// Note, however, that a directory named vendor that itself contains code -// is not a vendored package: cmd/vendor would be a command named vendor, -// and the pattern cmd/... matches it. -func matchPattern(pattern string) func(name string) bool { - // Convert pattern to regular expression. - // The strategy for the trailing /... is to nest it in an explicit ? expression. - // The strategy for the vendor exclusion is to change the unmatchable - // vendor strings to a disallowed code point (vendorChar) and to use - // "(anything but that codepoint)*" as the implementation of the ... wildcard. - // This is a bit complicated but the obvious alternative, - // namely a hand-written search like in most shell glob matchers, - // is too easy to make accidentally exponential. - // Using package regexp guarantees linear-time matching. - - const vendorChar = "\x00" - - if strings.Contains(pattern, vendorChar) { - return func(name string) bool { return false } - } - - re := regexp.QuoteMeta(pattern) - re = replaceVendor(re, vendorChar) - switch { - case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): - re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` - case re == vendorChar+`/\.\.\.`: - re = `(/vendor|/` + vendorChar + `/\.\.\.)` - case strings.HasSuffix(re, `/\.\.\.`): - re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` - } - re = strings.Replace(re, `\.\.\.`, `[^`+vendorChar+`]*`, -1) - - reg := regexp.MustCompile(`^` + re + `$`) - - return func(name string) bool { - if strings.Contains(name, vendorChar) { - return false - } - return reg.MatchString(replaceVendor(name, vendorChar)) - } -} - -// replaceVendor returns the result of replacing -// non-trailing vendor path elements in x with repl. -func replaceVendor(x, repl string) string { - if !strings.Contains(x, "vendor") { - return x - } - elem := strings.Split(x, "/") - for i := 0; i < len(elem)-1; i++ { - if elem[i] == "vendor" { - elem[i] = repl - } - } - return strings.Join(elem, "/") -} - -// ImportPaths returns the import paths to use for the given command line. -func (c *Context) ImportPaths(args []string) []string { - args = c.ImportPathsNoDotExpansion(args) - var out []string - for _, a := range args { - if strings.Contains(a, "...") { - if build.IsLocalImport(a) { - out = append(out, c.allPackagesInFS(a)...) - } else { - out = append(out, c.allPackages(a)...) - } - continue - } - out = append(out, a) - } - return out -} - -// ImportPathsNoDotExpansion returns the import paths to use for the given -// command line, but it does no ... expansion. -func (c *Context) ImportPathsNoDotExpansion(args []string) []string { - if len(args) == 0 { - return []string{"."} - } - var out []string - for _, a := range args { - // Arguments are supposed to be import paths, but - // as a courtesy to Windows developers, rewrite \ to / - // in command-line arguments. Handles .\... and so on. - if filepath.Separator == '\\' { - a = strings.Replace(a, `\`, `/`, -1) - } - - // Put argument in canonical form, but preserve leading ./. - if strings.HasPrefix(a, "./") { - a = "./" + path.Clean(a) - if a == "./." { - a = "." - } - } else { - a = path.Clean(a) - } - if IsMetaPackage(a) { - out = append(out, c.allPackages(a)...) - continue - } - out = append(out, a) - } - return out -} - -// IsMetaPackage checks if name is a reserved package name that expands to multiple packages. -func IsMetaPackage(name string) bool { - return name == "std" || name == "cmd" || name == "all" -} diff --git a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/match.go b/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/match.go deleted file mode 100644 index 4dbdbff47f9d..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/match.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2009 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build go1.9 - -package gotool - -import ( - "path/filepath" - - "github.com/kisielk/gotool/internal/load" -) - -// importPaths returns the import paths to use for the given command line. -func (c *Context) importPaths(args []string) []string { - lctx := load.Context{ - BuildContext: c.BuildContext, - GOROOTsrc: c.joinPath(c.BuildContext.GOROOT, "src"), - } - return lctx.ImportPaths(args) -} - -// joinPath calls c.BuildContext.JoinPath (if not nil) or else filepath.Join. -// -// It's a copy of the unexported build.Context.joinPath helper. -func (c *Context) joinPath(elem ...string) string { - if f := c.BuildContext.JoinPath; f != nil { - return f(elem...) - } - return filepath.Join(elem...) -} diff --git a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/match18.go b/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/match18.go deleted file mode 100644 index 6d6b1368c8de..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/match18.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright (c) 2009 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !go1.9 - -package gotool - -import ( - "fmt" - "go/build" - "log" - "os" - "path" - "path/filepath" - "regexp" - "strings" -) - -// This file contains code from the Go distribution. - -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -func matchPattern(pattern string) func(name string) bool { - re := regexp.QuoteMeta(pattern) - re = strings.Replace(re, `\.\.\.`, `.*`, -1) - // Special case: foo/... matches foo too. - if strings.HasSuffix(re, `/.*`) { - re = re[:len(re)-len(`/.*`)] + `(/.*)?` - } - reg := regexp.MustCompile(`^` + re + `$`) - return reg.MatchString -} - -// matchPackages returns a list of package paths matching pattern -// (see go help packages for pattern syntax). -func (c *Context) matchPackages(pattern string) []string { - match := func(string) bool { return true } - treeCanMatch := func(string) bool { return true } - if !isMetaPackage(pattern) { - match = matchPattern(pattern) - treeCanMatch = treeCanMatchPattern(pattern) - } - - have := map[string]bool{ - "builtin": true, // ignore pseudo-package that exists only for documentation - } - if !c.BuildContext.CgoEnabled { - have["runtime/cgo"] = true // ignore during walk - } - var pkgs []string - - for _, src := range c.BuildContext.SrcDirs() { - if (pattern == "std" || pattern == "cmd") && src != gorootSrc { - continue - } - src = filepath.Clean(src) + string(filepath.Separator) - root := src - if pattern == "cmd" { - root += "cmd" + string(filepath.Separator) - } - filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() || path == src { - return nil - } - - // Avoid .foo, _foo, and testdata directory trees. - _, elem := filepath.Split(path) - if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { - return filepath.SkipDir - } - - name := filepath.ToSlash(path[len(src):]) - if pattern == "std" && (!isStandardImportPath(name) || name == "cmd") { - // The name "std" is only the standard library. - // If the name is cmd, it's the root of the command tree. - return filepath.SkipDir - } - if !treeCanMatch(name) { - return filepath.SkipDir - } - if have[name] { - return nil - } - have[name] = true - if !match(name) { - return nil - } - _, err = c.BuildContext.ImportDir(path, 0) - if err != nil { - if _, noGo := err.(*build.NoGoError); noGo { - return nil - } - } - pkgs = append(pkgs, name) - return nil - }) - } - return pkgs -} - -// importPathsNoDotExpansion returns the import paths to use for the given -// command line, but it does no ... expansion. -func (c *Context) importPathsNoDotExpansion(args []string) []string { - if len(args) == 0 { - return []string{"."} - } - var out []string - for _, a := range args { - // Arguments are supposed to be import paths, but - // as a courtesy to Windows developers, rewrite \ to / - // in command-line arguments. Handles .\... and so on. - if filepath.Separator == '\\' { - a = strings.Replace(a, `\`, `/`, -1) - } - - // Put argument in canonical form, but preserve leading ./. - if strings.HasPrefix(a, "./") { - a = "./" + path.Clean(a) - if a == "./." { - a = "." - } - } else { - a = path.Clean(a) - } - if isMetaPackage(a) { - out = append(out, c.allPackages(a)...) - continue - } - out = append(out, a) - } - return out -} - -// importPaths returns the import paths to use for the given command line. -func (c *Context) importPaths(args []string) []string { - args = c.importPathsNoDotExpansion(args) - var out []string - for _, a := range args { - if strings.Contains(a, "...") { - if build.IsLocalImport(a) { - out = append(out, c.allPackagesInFS(a)...) - } else { - out = append(out, c.allPackages(a)...) - } - continue - } - out = append(out, a) - } - return out -} - -// allPackages returns all the packages that can be found -// under the $GOPATH directories and $GOROOT matching pattern. -// The pattern is either "all" (all packages), "std" (standard packages), -// "cmd" (standard commands), or a path including "...". -func (c *Context) allPackages(pattern string) []string { - pkgs := c.matchPackages(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -// allPackagesInFS is like allPackages but is passed a pattern -// beginning ./ or ../, meaning it should scan the tree rooted -// at the given directory. There are ... in the pattern too. -func (c *Context) allPackagesInFS(pattern string) []string { - pkgs := c.matchPackagesInFS(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -// matchPackagesInFS returns a list of package paths matching pattern, -// which must begin with ./ or ../ -// (see go help packages for pattern syntax). -func (c *Context) matchPackagesInFS(pattern string) []string { - // Find directory to begin the scan. - // Could be smarter but this one optimization - // is enough for now, since ... is usually at the - // end of a path. - i := strings.Index(pattern, "...") - dir, _ := path.Split(pattern[:i]) - - // pattern begins with ./ or ../. - // path.Clean will discard the ./ but not the ../. - // We need to preserve the ./ for pattern matching - // and in the returned import paths. - prefix := "" - if strings.HasPrefix(pattern, "./") { - prefix = "./" - } - match := matchPattern(pattern) - - var pkgs []string - filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() { - return nil - } - if path == dir { - // filepath.Walk starts at dir and recurses. For the recursive case, - // the path is the result of filepath.Join, which calls filepath.Clean. - // The initial case is not Cleaned, though, so we do this explicitly. - // - // This converts a path like "./io/" to "io". Without this step, running - // "cd $GOROOT/src; go list ./io/..." would incorrectly skip the io - // package, because prepending the prefix "./" to the unclean path would - // result in "././io", and match("././io") returns false. - path = filepath.Clean(path) - } - - // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". - _, elem := filepath.Split(path) - dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." - if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { - return filepath.SkipDir - } - - name := prefix + filepath.ToSlash(path) - if !match(name) { - return nil - } - - // We keep the directory if we can import it, or if we can't import it - // due to invalid Go source files. This means that directories containing - // parse errors will be built (and fail) instead of being silently skipped - // as not matching the pattern. Go 1.5 and earlier skipped, but that - // behavior means people miss serious mistakes. - // See golang.org/issue/11407. - if p, err := c.BuildContext.ImportDir(path, 0); err != nil && shouldIgnoreImport(p) { - if _, noGo := err.(*build.NoGoError); !noGo { - log.Print(err) - } - return nil - } - pkgs = append(pkgs, name) - return nil - }) - return pkgs -} - -// isMetaPackage checks if name is a reserved package name that expands to multiple packages. -func isMetaPackage(name string) bool { - return name == "std" || name == "cmd" || name == "all" -} - -// isStandardImportPath reports whether $GOROOT/src/path should be considered -// part of the standard distribution. For historical reasons we allow people to add -// their own code to $GOROOT instead of using $GOPATH, but we assume that -// code will start with a domain name (dot in the first element). -func isStandardImportPath(path string) bool { - i := strings.Index(path, "/") - if i < 0 { - i = len(path) - } - elem := path[:i] - return !strings.Contains(elem, ".") -} - -// hasPathPrefix reports whether the path s begins with the -// elements in prefix. -func hasPathPrefix(s, prefix string) bool { - switch { - default: - return false - case len(s) == len(prefix): - return s == prefix - case len(s) > len(prefix): - if prefix != "" && prefix[len(prefix)-1] == '/' { - return strings.HasPrefix(s, prefix) - } - return s[len(prefix)] == '/' && s[:len(prefix)] == prefix - } -} - -// treeCanMatchPattern(pattern)(name) reports whether -// name or children of name can possibly match pattern. -// Pattern is the same limited glob accepted by matchPattern. -func treeCanMatchPattern(pattern string) func(name string) bool { - wildCard := false - if i := strings.Index(pattern, "..."); i >= 0 { - wildCard = true - pattern = pattern[:i] - } - return func(name string) bool { - return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || - wildCard && strings.HasPrefix(name, pattern) - } -} diff --git a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/tool.go b/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/tool.go deleted file mode 100644 index c7409e11e6a8..000000000000 --- a/vertical-pod-autoscaler/vendor/github.com/kisielk/gotool/tool.go +++ /dev/null @@ -1,48 +0,0 @@ -// Package gotool contains utility functions used to implement the standard -// "cmd/go" tool, provided as a convenience to developers who want to write -// tools with similar semantics. -package gotool - -import "go/build" - -// Export functions here to make it easier to keep the implementations up to date with upstream. - -// DefaultContext is the default context that uses build.Default. -var DefaultContext = Context{ - BuildContext: build.Default, -} - -// A Context specifies the supporting context. -type Context struct { - // BuildContext is the build.Context that is used when computing import paths. - BuildContext build.Context -} - -// ImportPaths returns the import paths to use for the given command line. -// -// The path "all" is expanded to all packages in $GOPATH and $GOROOT. -// The path "std" is expanded to all packages in the Go standard library. -// The path "cmd" is expanded to all Go standard commands. -// The string "..." is treated as a wildcard within a path. -// When matching recursively, directories are ignored if they are prefixed with -// a dot or an underscore (such as ".foo" or "_foo"), or are named "testdata". -// Relative import paths are not converted to full import paths. -// If args is empty, a single element "." is returned. -func (c *Context) ImportPaths(args []string) []string { - return c.importPaths(args) -} - -// ImportPaths returns the import paths to use for the given command line -// using default context. -// -// The path "all" is expanded to all packages in $GOPATH and $GOROOT. -// The path "std" is expanded to all packages in the Go standard library. -// The path "cmd" is expanded to all Go standard commands. -// The string "..." is treated as a wildcard within a path. -// When matching recursively, directories are ignored if they are prefixed with -// a dot or an underscore (such as ".foo" or "_foo"), or are named "testdata". -// Relative import paths are not converted to full import paths. -// If args is empty, a single element "." is returned. -func ImportPaths(args []string) []string { - return DefaultContext.importPaths(args) -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/lint/.travis.yml b/vertical-pod-autoscaler/vendor/golang.org/x/lint/.travis.yml deleted file mode 100644 index 47af085f2bcd..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/lint/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -sudo: false -language: go -go: - - 1.7.x - - 1.8.x - - 1.9.x - - master - -install: - - go get -t -v ./... - -script: - - go test -v -race ./... - -matrix: - allow_failures: - - go: master - fast_finish: true diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/lint/CONTRIBUTING.md b/vertical-pod-autoscaler/vendor/golang.org/x/lint/CONTRIBUTING.md deleted file mode 100644 index 1fadda62d2fc..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/lint/CONTRIBUTING.md +++ /dev/null @@ -1,15 +0,0 @@ -# Contributing to Golint - -## Before filing an issue: - -### Are you having trouble building golint? - -Check you have the latest version of its dependencies. Run -``` -go get -u golang.org/x/lint/golint -``` -If you still have problems, consider searching for existing issues before filing a new issue. - -## Before sending a pull request: - -Have you understood the purpose of golint? Make sure to carefully read `README`. diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/lint/LICENSE b/vertical-pod-autoscaler/vendor/golang.org/x/lint/LICENSE deleted file mode 100644 index 65d761bc9f28..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/lint/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/lint/README.md b/vertical-pod-autoscaler/vendor/golang.org/x/lint/README.md deleted file mode 100644 index b12ba6389e68..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/lint/README.md +++ /dev/null @@ -1,86 +0,0 @@ -Golint is a linter for Go source code. - -[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint) - -## Installation - -Golint requires a -[supported release of Go](https://golang.org/doc/devel/release.html#policy). - - go get -u golang.org/x/lint/golint - -## Usage - -Invoke `golint` with one or more filenames, directories, or packages named -by its import path. Golint uses the same -[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as -the `go` command and therefore -also supports relative import paths like `./...`. Additionally the `...` -wildcard can be used as suffix on relative and absolute file paths to recurse -into them. - -The output of this tool is a list of suggestions in Vim quickfix format, -which is accepted by lots of different editors. - -## Purpose - -Golint differs from gofmt. Gofmt reformats Go source code, whereas -golint prints out style mistakes. - -Golint differs from govet. Govet is concerned with correctness, whereas -golint is concerned with coding style. Golint is in use at Google, and it -seeks to match the accepted style of the open source Go project. - -The suggestions made by golint are exactly that: suggestions. -Golint is not perfect, and has both false positives and false negatives. -Do not treat its output as a gold standard. We will not be adding pragmas -or other knobs to suppress specific warnings, so do not expect or require -code to be completely "lint-free". -In short, this tool is not, and will never be, trustworthy enough for its -suggestions to be enforced automatically, for example as part of a build process. -Golint makes suggestions for many of the mechanically checkable items listed in -[Effective Go](https://golang.org/doc/effective_go.html) and the -[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments). - -## Scope - -Golint is meant to carry out the stylistic conventions put forth in -[Effective Go](https://golang.org/doc/effective_go.html) and -[CodeReviewComments](https://golang.org/wiki/CodeReviewComments). -Changes that are not aligned with those documents will not be considered. - -## Contributions - -Contributions to this project are welcome provided they are [in scope](#scope), -though please send mail before starting work on anything major. -Contributors retain their copyright, so we need you to fill out -[a short form](https://developers.google.com/open-source/cla/individual) -before we can accept your contribution. - -## Vim - -Add this to your ~/.vimrc: - - set rtp+=$GOPATH/src/golang.org/x/lint/misc/vim - -If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. - -Running `:Lint` will run golint on the current file and populate the quickfix list. - -Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w` - - autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow - - -## Emacs - -Add this to your `.emacs` file: - - (add-to-list 'load-path (concat (getenv "GOPATH") "/src/github.com/golang/lint/misc/emacs")) - (require 'golint) - -If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. - -Running M-x golint will run golint on the current file. - -For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html). diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/lint/lint.go b/vertical-pod-autoscaler/vendor/golang.org/x/lint/lint.go deleted file mode 100644 index 46bd45f4e2c5..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/lint/lint.go +++ /dev/null @@ -1,1671 +0,0 @@ -// Copyright (c) 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// Package lint contains a linter for Go source code. -package lint - -import ( - "bufio" - "bytes" - "fmt" - "go/ast" - "go/parser" - "go/printer" - "go/token" - "go/types" - "regexp" - "sort" - "strconv" - "strings" - "unicode" - "unicode/utf8" - - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/gcexportdata" -) - -const styleGuideBase = "https://golang.org/wiki/CodeReviewComments" - -// A Linter lints Go source code. -type Linter struct { -} - -// Problem represents a problem in some source code. -type Problem struct { - Position token.Position // position in source file - Text string // the prose that describes the problem - Link string // (optional) the link to the style guide for the problem - Confidence float64 // a value in (0,1] estimating the confidence in this problem's correctness - LineText string // the source line - Category string // a short name for the general category of the problem - - // If the problem has a suggested fix (the minority case), - // ReplacementLine is a full replacement for the relevant line of the source file. - ReplacementLine string -} - -func (p *Problem) String() string { - if p.Link != "" { - return p.Text + "\n\n" + p.Link - } - return p.Text -} - -type byPosition []Problem - -func (p byPosition) Len() int { return len(p) } -func (p byPosition) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p byPosition) Less(i, j int) bool { - pi, pj := p[i].Position, p[j].Position - - if pi.Filename != pj.Filename { - return pi.Filename < pj.Filename - } - if pi.Line != pj.Line { - return pi.Line < pj.Line - } - if pi.Column != pj.Column { - return pi.Column < pj.Column - } - - return p[i].Text < p[j].Text -} - -// Lint lints src. -func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) { - return l.LintFiles(map[string][]byte{filename: src}) -} - -// LintFiles lints a set of files of a single package. -// The argument is a map of filename to source. -func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) { - pkg := &pkg{ - fset: token.NewFileSet(), - files: make(map[string]*file), - } - var pkgName string - for filename, src := range files { - if isGenerated(src) { - continue // See issue #239 - } - f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments) - if err != nil { - return nil, err - } - if pkgName == "" { - pkgName = f.Name.Name - } else if f.Name.Name != pkgName { - return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName) - } - pkg.files[filename] = &file{ - pkg: pkg, - f: f, - fset: pkg.fset, - src: src, - filename: filename, - } - } - if len(pkg.files) == 0 { - return nil, nil - } - return pkg.lint(), nil -} - -var ( - genHdr = []byte("// Code generated ") - genFtr = []byte(" DO NOT EDIT.") -) - -// isGenerated reports whether the source file is generated code -// according the rules from https://golang.org/s/generatedcode. -func isGenerated(src []byte) bool { - sc := bufio.NewScanner(bytes.NewReader(src)) - for sc.Scan() { - b := sc.Bytes() - if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) { - return true - } - } - return false -} - -// pkg represents a package being linted. -type pkg struct { - fset *token.FileSet - files map[string]*file - - typesPkg *types.Package - typesInfo *types.Info - - // sortable is the set of types in the package that implement sort.Interface. - sortable map[string]bool - // main is whether this is a "main" package. - main bool - - problems []Problem -} - -func (p *pkg) lint() []Problem { - if err := p.typeCheck(); err != nil { - /* TODO(dsymonds): Consider reporting these errors when golint operates on entire packages. - if e, ok := err.(types.Error); ok { - pos := p.fset.Position(e.Pos) - conf := 1.0 - if strings.Contains(e.Msg, "can't find import: ") { - // Golint is probably being run in a context that doesn't support - // typechecking (e.g. package files aren't found), so don't warn about it. - conf = 0 - } - if conf > 0 { - p.errorfAt(pos, conf, category("typechecking"), e.Msg) - } - - // TODO(dsymonds): Abort if !e.Soft? - } - */ - } - - p.scanSortable() - p.main = p.isMain() - - for _, f := range p.files { - f.lint() - } - - sort.Sort(byPosition(p.problems)) - - return p.problems -} - -// file represents a file being linted. -type file struct { - pkg *pkg - f *ast.File - fset *token.FileSet - src []byte - filename string -} - -func (f *file) isTest() bool { return strings.HasSuffix(f.filename, "_test.go") } - -func (f *file) lint() { - f.lintPackageComment() - f.lintImports() - f.lintBlankImports() - f.lintExported() - f.lintNames() - f.lintVarDecls() - f.lintElses() - f.lintRanges() - f.lintErrorf() - f.lintErrors() - f.lintErrorStrings() - f.lintReceiverNames() - f.lintIncDec() - f.lintErrorReturn() - f.lintUnexportedReturn() - f.lintTimeNames() - f.lintContextKeyTypes() - f.lintContextArgs() -} - -type link string -type category string - -// The variadic arguments may start with link and category types, -// and must end with a format string and any arguments. -// It returns the new Problem. -func (f *file) errorf(n ast.Node, confidence float64, args ...interface{}) *Problem { - pos := f.fset.Position(n.Pos()) - if pos.Filename == "" { - pos.Filename = f.filename - } - return f.pkg.errorfAt(pos, confidence, args...) -} - -func (p *pkg) errorfAt(pos token.Position, confidence float64, args ...interface{}) *Problem { - problem := Problem{ - Position: pos, - Confidence: confidence, - } - if pos.Filename != "" { - // The file might not exist in our mapping if a //line directive was encountered. - if f, ok := p.files[pos.Filename]; ok { - problem.LineText = srcLine(f.src, pos) - } - } - -argLoop: - for len(args) > 1 { // always leave at least the format string in args - switch v := args[0].(type) { - case link: - problem.Link = string(v) - case category: - problem.Category = string(v) - default: - break argLoop - } - args = args[1:] - } - - problem.Text = fmt.Sprintf(args[0].(string), args[1:]...) - - p.problems = append(p.problems, problem) - return &p.problems[len(p.problems)-1] -} - -var newImporter = func(fset *token.FileSet) types.ImporterFrom { - return gcexportdata.NewImporter(fset, make(map[string]*types.Package)) -} - -func (p *pkg) typeCheck() error { - config := &types.Config{ - // By setting a no-op error reporter, the type checker does as much work as possible. - Error: func(error) {}, - Importer: newImporter(p.fset), - } - info := &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Scopes: make(map[ast.Node]*types.Scope), - } - var anyFile *file - var astFiles []*ast.File - for _, f := range p.files { - anyFile = f - astFiles = append(astFiles, f.f) - } - pkg, err := config.Check(anyFile.f.Name.Name, p.fset, astFiles, info) - // Remember the typechecking info, even if config.Check failed, - // since we will get partial information. - p.typesPkg = pkg - p.typesInfo = info - return err -} - -func (p *pkg) typeOf(expr ast.Expr) types.Type { - if p.typesInfo == nil { - return nil - } - return p.typesInfo.TypeOf(expr) -} - -func (p *pkg) isNamedType(typ types.Type, importPath, name string) bool { - n, ok := typ.(*types.Named) - if !ok { - return false - } - tn := n.Obj() - return tn != nil && tn.Pkg() != nil && tn.Pkg().Path() == importPath && tn.Name() == name -} - -// scopeOf returns the tightest scope encompassing id. -func (p *pkg) scopeOf(id *ast.Ident) *types.Scope { - var scope *types.Scope - if obj := p.typesInfo.ObjectOf(id); obj != nil { - scope = obj.Parent() - } - if scope == p.typesPkg.Scope() { - // We were given a top-level identifier. - // Use the file-level scope instead of the package-level scope. - pos := id.Pos() - for _, f := range p.files { - if f.f.Pos() <= pos && pos < f.f.End() { - scope = p.typesInfo.Scopes[f.f] - break - } - } - } - return scope -} - -func (p *pkg) scanSortable() { - p.sortable = make(map[string]bool) - - // bitfield for which methods exist on each type. - const ( - Len = 1 << iota - Less - Swap - ) - nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap} - has := make(map[string]int) - for _, f := range p.files { - f.walk(func(n ast.Node) bool { - fn, ok := n.(*ast.FuncDecl) - if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { - return true - } - // TODO(dsymonds): We could check the signature to be more precise. - recv := receiverType(fn) - if i, ok := nmap[fn.Name.Name]; ok { - has[recv] |= i - } - return false - }) - } - for typ, ms := range has { - if ms == Len|Less|Swap { - p.sortable[typ] = true - } - } -} - -func (p *pkg) isMain() bool { - for _, f := range p.files { - if f.isMain() { - return true - } - } - return false -} - -func (f *file) isMain() bool { - if f.f.Name.Name == "main" { - return true - } - return false -} - -// lintPackageComment checks package comments. It complains if -// there is no package comment, or if it is not of the right form. -// This has a notable false positive in that a package comment -// could rightfully appear in a different file of the same package, -// but that's not easy to fix since this linter is file-oriented. -func (f *file) lintPackageComment() { - if f.isTest() { - return - } - - const ref = styleGuideBase + "#package-comments" - prefix := "Package " + f.f.Name.Name + " " - - // Look for a detached package comment. - // First, scan for the last comment that occurs before the "package" keyword. - var lastCG *ast.CommentGroup - for _, cg := range f.f.Comments { - if cg.Pos() > f.f.Package { - // Gone past "package" keyword. - break - } - lastCG = cg - } - if lastCG != nil && strings.HasPrefix(lastCG.Text(), prefix) { - endPos := f.fset.Position(lastCG.End()) - pkgPos := f.fset.Position(f.f.Package) - if endPos.Line+1 < pkgPos.Line { - // There isn't a great place to anchor this error; - // the start of the blank lines between the doc and the package statement - // is at least pointing at the location of the problem. - pos := token.Position{ - Filename: endPos.Filename, - // Offset not set; it is non-trivial, and doesn't appear to be needed. - Line: endPos.Line + 1, - Column: 1, - } - f.pkg.errorfAt(pos, 0.9, link(ref), category("comments"), "package comment is detached; there should be no blank lines between it and the package statement") - return - } - } - - if f.f.Doc == nil { - f.errorf(f.f, 0.2, link(ref), category("comments"), "should have a package comment, unless it's in another file for this package") - return - } - s := f.f.Doc.Text() - if ts := strings.TrimLeft(s, " \t"); ts != s { - f.errorf(f.f.Doc, 1, link(ref), category("comments"), "package comment should not have leading space") - s = ts - } - // Only non-main packages need to keep to this form. - if !f.pkg.main && !strings.HasPrefix(s, prefix) { - f.errorf(f.f.Doc, 1, link(ref), category("comments"), `package comment should be of the form "%s..."`, prefix) - } -} - -// lintBlankImports complains if a non-main package has blank imports that are -// not documented. -func (f *file) lintBlankImports() { - // In package main and in tests, we don't complain about blank imports. - if f.pkg.main || f.isTest() { - return - } - - // The first element of each contiguous group of blank imports should have - // an explanatory comment of some kind. - for i, imp := range f.f.Imports { - pos := f.fset.Position(imp.Pos()) - - if !isBlank(imp.Name) { - continue // Ignore non-blank imports. - } - if i > 0 { - prev := f.f.Imports[i-1] - prevPos := f.fset.Position(prev.Pos()) - if isBlank(prev.Name) && prevPos.Line+1 == pos.Line { - continue // A subsequent blank in a group. - } - } - - // This is the first blank import of a group. - if imp.Doc == nil && imp.Comment == nil { - ref := "" - f.errorf(imp, 1, link(ref), category("imports"), "a blank import should be only in a main or test package, or have a comment justifying it") - } - } -} - -// lintImports examines import blocks. -func (f *file) lintImports() { - for i, is := range f.f.Imports { - _ = i - if is.Name != nil && is.Name.Name == "." && !f.isTest() { - f.errorf(is, 1, link(styleGuideBase+"#import-dot"), category("imports"), "should not use dot imports") - } - - } -} - -const docCommentsLink = styleGuideBase + "#doc-comments" - -// lintExported examines the exported names. -// It complains if any required doc comments are missing, -// or if they are not of the right form. The exact rules are in -// lintFuncDoc, lintTypeDoc and lintValueSpecDoc; this function -// also tracks the GenDecl structure being traversed to permit -// doc comments for constants to be on top of the const block. -// It also complains if the names stutter when combined with -// the package name. -func (f *file) lintExported() { - if f.isTest() { - return - } - - var lastGen *ast.GenDecl // last GenDecl entered. - - // Set of GenDecls that have already had missing comments flagged. - genDeclMissingComments := make(map[*ast.GenDecl]bool) - - f.walk(func(node ast.Node) bool { - switch v := node.(type) { - case *ast.GenDecl: - if v.Tok == token.IMPORT { - return false - } - // token.CONST, token.TYPE or token.VAR - lastGen = v - return true - case *ast.FuncDecl: - f.lintFuncDoc(v) - if v.Recv == nil { - // Only check for stutter on functions, not methods. - // Method names are not used package-qualified. - f.checkStutter(v.Name, "func") - } - // Don't proceed inside funcs. - return false - case *ast.TypeSpec: - // inside a GenDecl, which usually has the doc - doc := v.Doc - if doc == nil { - doc = lastGen.Doc - } - f.lintTypeDoc(v, doc) - f.checkStutter(v.Name, "type") - // Don't proceed inside types. - return false - case *ast.ValueSpec: - f.lintValueSpecDoc(v, lastGen, genDeclMissingComments) - return false - } - return true - }) -} - -var ( - allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`) - anyCapsRE = regexp.MustCompile(`[A-Z]`) -) - -// knownNameExceptions is a set of names that are known to be exempt from naming checks. -// This is usually because they are constrained by having to match names in the -// standard library. -var knownNameExceptions = map[string]bool{ - "LastInsertId": true, // must match database/sql - "kWh": true, -} - -// lintNames examines all names in the file. -// It complains if any use underscores or incorrect known initialisms. -func (f *file) lintNames() { - // Package names need slightly different handling than other names. - if strings.Contains(f.f.Name.Name, "_") && !strings.HasSuffix(f.f.Name.Name, "_test") { - f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("naming"), "don't use an underscore in package name") - } - if anyCapsRE.MatchString(f.f.Name.Name) { - f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("mixed-caps"), "don't use MixedCaps in package name; %s should be %s", f.f.Name.Name, strings.ToLower(f.f.Name.Name)) - } - - check := func(id *ast.Ident, thing string) { - if id.Name == "_" { - return - } - if knownNameExceptions[id.Name] { - return - } - - // Handle two common styles from other languages that don't belong in Go. - if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") { - f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use ALL_CAPS in Go names; use CamelCase") - return - } - if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' { - should := string(id.Name[1]+'a'-'A') + id.Name[2:] - f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should) - } - - should := lintName(id.Name) - if id.Name == should { - return - } - - if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") { - f.errorf(id, 0.9, link("http://golang.org/doc/effective_go.html#mixed-caps"), category("naming"), "don't use underscores in Go names; %s %s should be %s", thing, id.Name, should) - return - } - f.errorf(id, 0.8, link(styleGuideBase+"#initialisms"), category("naming"), "%s %s should be %s", thing, id.Name, should) - } - checkList := func(fl *ast.FieldList, thing string) { - if fl == nil { - return - } - for _, f := range fl.List { - for _, id := range f.Names { - check(id, thing) - } - } - } - f.walk(func(node ast.Node) bool { - switch v := node.(type) { - case *ast.AssignStmt: - if v.Tok == token.ASSIGN { - return true - } - for _, exp := range v.Lhs { - if id, ok := exp.(*ast.Ident); ok { - check(id, "var") - } - } - case *ast.FuncDecl: - if f.isTest() && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) { - return true - } - - thing := "func" - if v.Recv != nil { - thing = "method" - } - - // Exclude naming warnings for functions that are exported to C but - // not exported in the Go API. - // See https://github.com/golang/lint/issues/144. - if ast.IsExported(v.Name.Name) || !isCgoExported(v) { - check(v.Name, thing) - } - - checkList(v.Type.Params, thing+" parameter") - checkList(v.Type.Results, thing+" result") - case *ast.GenDecl: - if v.Tok == token.IMPORT { - return true - } - var thing string - switch v.Tok { - case token.CONST: - thing = "const" - case token.TYPE: - thing = "type" - case token.VAR: - thing = "var" - } - for _, spec := range v.Specs { - switch s := spec.(type) { - case *ast.TypeSpec: - check(s.Name, thing) - case *ast.ValueSpec: - for _, id := range s.Names { - check(id, thing) - } - } - } - case *ast.InterfaceType: - // Do not check interface method names. - // They are often constrainted by the method names of concrete types. - for _, x := range v.Methods.List { - ft, ok := x.Type.(*ast.FuncType) - if !ok { // might be an embedded interface name - continue - } - checkList(ft.Params, "interface method parameter") - checkList(ft.Results, "interface method result") - } - case *ast.RangeStmt: - if v.Tok == token.ASSIGN { - return true - } - if id, ok := v.Key.(*ast.Ident); ok { - check(id, "range var") - } - if id, ok := v.Value.(*ast.Ident); ok { - check(id, "range var") - } - case *ast.StructType: - for _, f := range v.Fields.List { - for _, id := range f.Names { - check(id, "struct field") - } - } - } - return true - }) -} - -// lintName returns a different name if it should be different. -func lintName(name string) (should string) { - // Fast path for simple cases: "_" and all lowercase. - if name == "_" { - return name - } - allLower := true - for _, r := range name { - if !unicode.IsLower(r) { - allLower = false - break - } - } - if allLower { - return name - } - - // Split camelCase at any lower->upper transition, and split on underscores. - // Check each word for common initialisms. - runes := []rune(name) - w, i := 0, 0 // index of start of word, scan - for i+1 <= len(runes) { - eow := false // whether we hit the end of a word - if i+1 == len(runes) { - eow = true - } else if runes[i+1] == '_' { - // underscore; shift the remainder forward over any run of underscores - eow = true - n := 1 - for i+n+1 < len(runes) && runes[i+n+1] == '_' { - n++ - } - - // Leave at most one underscore if the underscore is between two digits - if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) { - n-- - } - - copy(runes[i+1:], runes[i+n+1:]) - runes = runes[:len(runes)-n] - } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) { - // lower->non-lower - eow = true - } - i++ - if !eow { - continue - } - - // [w,i) is a word. - word := string(runes[w:i]) - if u := strings.ToUpper(word); commonInitialisms[u] { - // Keep consistent case, which is lowercase only at the start. - if w == 0 && unicode.IsLower(runes[w]) { - u = strings.ToLower(u) - } - // All the common initialisms are ASCII, - // so we can replace the bytes exactly. - copy(runes[w:], []rune(u)) - } else if w > 0 && strings.ToLower(word) == word { - // already all lowercase, and not the first word, so uppercase the first character. - runes[w] = unicode.ToUpper(runes[w]) - } - w = i - } - return string(runes) -} - -// commonInitialisms is a set of common initialisms. -// Only add entries that are highly unlikely to be non-initialisms. -// For instance, "ID" is fine (Freudian code is rare), but "AND" is not. -var commonInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTP": true, - "HTTPS": true, - "ID": true, - "IP": true, - "JSON": true, - "LHS": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, -} - -// lintTypeDoc examines the doc comment on a type. -// It complains if they are missing from an exported type, -// or if they are not of the standard form. -func (f *file) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) { - if !ast.IsExported(t.Name.Name) { - return - } - if doc == nil { - f.errorf(t, 1, link(docCommentsLink), category("comments"), "exported type %v should have comment or be unexported", t.Name) - return - } - - s := doc.Text() - articles := [...]string{"A", "An", "The"} - for _, a := range articles { - if strings.HasPrefix(s, a+" ") { - s = s[len(a)+1:] - break - } - } - if !strings.HasPrefix(s, t.Name.Name+" ") { - f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported type %v should be of the form "%v ..." (with optional leading article)`, t.Name, t.Name) - } -} - -var commonMethods = map[string]bool{ - "Error": true, - "Read": true, - "ServeHTTP": true, - "String": true, - "Write": true, -} - -// lintFuncDoc examines doc comments on functions and methods. -// It complains if they are missing, or not of the right form. -// It has specific exclusions for well-known methods (see commonMethods above). -func (f *file) lintFuncDoc(fn *ast.FuncDecl) { - if !ast.IsExported(fn.Name.Name) { - // func is unexported - return - } - kind := "function" - name := fn.Name.Name - if fn.Recv != nil && len(fn.Recv.List) > 0 { - // method - kind = "method" - recv := receiverType(fn) - if !ast.IsExported(recv) { - // receiver is unexported - return - } - if commonMethods[name] { - return - } - switch name { - case "Len", "Less", "Swap": - if f.pkg.sortable[recv] { - return - } - } - name = recv + "." + name - } - if fn.Doc == nil { - f.errorf(fn, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment or be unexported", kind, name) - return - } - s := fn.Doc.Text() - prefix := fn.Name.Name + " " - if !strings.HasPrefix(s, prefix) { - f.errorf(fn.Doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) - } -} - -// lintValueSpecDoc examines package-global variables and constants. -// It complains if they are not individually declared, -// or if they are not suitably documented in the right form (unless they are in a block that is commented). -func (f *file) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genDeclMissingComments map[*ast.GenDecl]bool) { - kind := "var" - if gd.Tok == token.CONST { - kind = "const" - } - - if len(vs.Names) > 1 { - // Check that none are exported except for the first. - for _, n := range vs.Names[1:] { - if ast.IsExported(n.Name) { - f.errorf(vs, 1, category("comments"), "exported %s %s should have its own declaration", kind, n.Name) - return - } - } - } - - // Only one name. - name := vs.Names[0].Name - if !ast.IsExported(name) { - return - } - - if vs.Doc == nil && gd.Doc == nil { - if genDeclMissingComments[gd] { - return - } - block := "" - if kind == "const" && gd.Lparen.IsValid() { - block = " (or a comment on this block)" - } - f.errorf(vs, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment%s or be unexported", kind, name, block) - genDeclMissingComments[gd] = true - return - } - // If this GenDecl has parens and a comment, we don't check its comment form. - if gd.Lparen.IsValid() && gd.Doc != nil { - return - } - // The relevant text to check will be on either vs.Doc or gd.Doc. - // Use vs.Doc preferentially. - doc := vs.Doc - if doc == nil { - doc = gd.Doc - } - prefix := name + " " - if !strings.HasPrefix(doc.Text(), prefix) { - f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) - } -} - -func (f *file) checkStutter(id *ast.Ident, thing string) { - pkg, name := f.f.Name.Name, id.Name - if !ast.IsExported(name) { - // unexported name - return - } - // A name stutters if the package name is a strict prefix - // and the next character of the name starts a new word. - if len(name) <= len(pkg) { - // name is too short to stutter. - // This permits the name to be the same as the package name. - return - } - if !strings.EqualFold(pkg, name[:len(pkg)]) { - return - } - // We can assume the name is well-formed UTF-8. - // If the next rune after the package name is uppercase or an underscore - // the it's starting a new word and thus this name stutters. - rem := name[len(pkg):] - if next, _ := utf8.DecodeRuneInString(rem); next == '_' || unicode.IsUpper(next) { - f.errorf(id, 0.8, link(styleGuideBase+"#package-names"), category("naming"), "%s name will be used as %s.%s by other packages, and that stutters; consider calling this %s", thing, pkg, name, rem) - } -} - -// zeroLiteral is a set of ast.BasicLit values that are zero values. -// It is not exhaustive. -var zeroLiteral = map[string]bool{ - "false": true, // bool - // runes - `'\x00'`: true, - `'\000'`: true, - // strings - `""`: true, - "``": true, - // numerics - "0": true, - "0.": true, - "0.0": true, - "0i": true, -} - -// lintVarDecls examines variable declarations. It complains about declarations with -// redundant LHS types that can be inferred from the RHS. -func (f *file) lintVarDecls() { - var lastGen *ast.GenDecl // last GenDecl entered. - - f.walk(func(node ast.Node) bool { - switch v := node.(type) { - case *ast.GenDecl: - if v.Tok != token.CONST && v.Tok != token.VAR { - return false - } - lastGen = v - return true - case *ast.ValueSpec: - if lastGen.Tok == token.CONST { - return false - } - if len(v.Names) > 1 || v.Type == nil || len(v.Values) == 0 { - return false - } - rhs := v.Values[0] - // An underscore var appears in a common idiom for compile-time interface satisfaction, - // as in "var _ Interface = (*Concrete)(nil)". - if isIdent(v.Names[0], "_") { - return false - } - // If the RHS is a zero value, suggest dropping it. - zero := false - if lit, ok := rhs.(*ast.BasicLit); ok { - zero = zeroLiteral[lit.Value] - } else if isIdent(rhs, "nil") { - zero = true - } - if zero { - f.errorf(rhs, 0.9, category("zero-value"), "should drop = %s from declaration of var %s; it is the zero value", f.render(rhs), v.Names[0]) - return false - } - lhsTyp := f.pkg.typeOf(v.Type) - rhsTyp := f.pkg.typeOf(rhs) - - if !validType(lhsTyp) || !validType(rhsTyp) { - // Type checking failed (often due to missing imports). - return false - } - - if !types.Identical(lhsTyp, rhsTyp) { - // Assignment to a different type is not redundant. - return false - } - - // The next three conditions are for suppressing the warning in situations - // where we were unable to typecheck. - - // If the LHS type is an interface, don't warn, since it is probably a - // concrete type on the RHS. Note that our feeble lexical check here - // will only pick up interface{} and other literal interface types; - // that covers most of the cases we care to exclude right now. - if _, ok := v.Type.(*ast.InterfaceType); ok { - return false - } - // If the RHS is an untyped const, only warn if the LHS type is its default type. - if defType, ok := f.isUntypedConst(rhs); ok && !isIdent(v.Type, defType) { - return false - } - - f.errorf(v.Type, 0.8, category("type-inference"), "should omit type %s from declaration of var %s; it will be inferred from the right-hand side", f.render(v.Type), v.Names[0]) - return false - } - return true - }) -} - -func validType(T types.Type) bool { - return T != nil && - T != types.Typ[types.Invalid] && - !strings.Contains(T.String(), "invalid type") // good but not foolproof -} - -// lintElses examines else blocks. It complains about any else block whose if block ends in a return. -func (f *file) lintElses() { - // We don't want to flag if { } else if { } else { } constructions. - // They will appear as an IfStmt whose Else field is also an IfStmt. - // Record such a node so we ignore it when we visit it. - ignore := make(map[*ast.IfStmt]bool) - - f.walk(func(node ast.Node) bool { - ifStmt, ok := node.(*ast.IfStmt) - if !ok || ifStmt.Else == nil { - return true - } - if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { - ignore[elseif] = true - return true - } - if ignore[ifStmt] { - return true - } - if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok { - // only care about elses without conditions - return true - } - if len(ifStmt.Body.List) == 0 { - return true - } - shortDecl := false // does the if statement have a ":=" initialization statement? - if ifStmt.Init != nil { - if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { - shortDecl = true - } - } - lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1] - if _, ok := lastStmt.(*ast.ReturnStmt); ok { - extra := "" - if shortDecl { - extra = " (move short variable declaration to its own line if necessary)" - } - f.errorf(ifStmt.Else, 1, link(styleGuideBase+"#indent-error-flow"), category("indent"), "if block ends with a return statement, so drop this else and outdent its block"+extra) - } - return true - }) -} - -// lintRanges examines range clauses. It complains about redundant constructions. -func (f *file) lintRanges() { - f.walk(func(node ast.Node) bool { - rs, ok := node.(*ast.RangeStmt) - if !ok { - return true - } - - if isIdent(rs.Key, "_") && (rs.Value == nil || isIdent(rs.Value, "_")) { - p := f.errorf(rs.Key, 1, category("range-loop"), "should omit values from range; this loop is equivalent to `for range ...`") - - newRS := *rs // shallow copy - newRS.Value = nil - newRS.Key = nil - p.ReplacementLine = f.firstLineOf(&newRS, rs) - - return true - } - - if isIdent(rs.Value, "_") { - p := f.errorf(rs.Value, 1, category("range-loop"), "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok) - - newRS := *rs // shallow copy - newRS.Value = nil - p.ReplacementLine = f.firstLineOf(&newRS, rs) - } - - return true - }) -} - -// lintErrorf examines errors.New and testing.Error calls. It complains if its only argument is an fmt.Sprintf invocation. -func (f *file) lintErrorf() { - f.walk(func(node ast.Node) bool { - ce, ok := node.(*ast.CallExpr) - if !ok || len(ce.Args) != 1 { - return true - } - isErrorsNew := isPkgDot(ce.Fun, "errors", "New") - var isTestingError bool - se, ok := ce.Fun.(*ast.SelectorExpr) - if ok && se.Sel.Name == "Error" { - if typ := f.pkg.typeOf(se.X); typ != nil { - isTestingError = typ.String() == "*testing.T" - } - } - if !isErrorsNew && !isTestingError { - return true - } - if !f.imports("errors") { - return true - } - arg := ce.Args[0] - ce, ok = arg.(*ast.CallExpr) - if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") { - return true - } - errorfPrefix := "fmt" - if isTestingError { - errorfPrefix = f.render(se.X) - } - p := f.errorf(node, 1, category("errors"), "should replace %s(fmt.Sprintf(...)) with %s.Errorf(...)", f.render(se), errorfPrefix) - - m := f.srcLineWithMatch(ce, `^(.*)`+f.render(se)+`\(fmt\.Sprintf\((.*)\)\)(.*)$`) - if m != nil { - p.ReplacementLine = m[1] + errorfPrefix + ".Errorf(" + m[2] + ")" + m[3] - } - - return true - }) -} - -// lintErrors examines global error vars. It complains if they aren't named in the standard way. -func (f *file) lintErrors() { - for _, decl := range f.f.Decls { - gd, ok := decl.(*ast.GenDecl) - if !ok || gd.Tok != token.VAR { - continue - } - for _, spec := range gd.Specs { - spec := spec.(*ast.ValueSpec) - if len(spec.Names) != 1 || len(spec.Values) != 1 { - continue - } - ce, ok := spec.Values[0].(*ast.CallExpr) - if !ok { - continue - } - if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { - continue - } - - id := spec.Names[0] - prefix := "err" - if id.IsExported() { - prefix = "Err" - } - if !strings.HasPrefix(id.Name, prefix) { - f.errorf(id, 0.9, category("naming"), "error var %s should have name of the form %sFoo", id.Name, prefix) - } - } - } -} - -func lintErrorString(s string) (isClean bool, conf float64) { - const basicConfidence = 0.8 - const capConfidence = basicConfidence - 0.2 - first, firstN := utf8.DecodeRuneInString(s) - last, _ := utf8.DecodeLastRuneInString(s) - if last == '.' || last == ':' || last == '!' || last == '\n' { - return false, basicConfidence - } - if unicode.IsUpper(first) { - // People use proper nouns and exported Go identifiers in error strings, - // so decrease the confidence of warnings for capitalization. - if len(s) <= firstN { - return false, capConfidence - } - // Flag strings starting with something that doesn't look like an initialism. - if second, _ := utf8.DecodeRuneInString(s[firstN:]); !unicode.IsUpper(second) { - return false, capConfidence - } - } - return true, 0 -} - -// lintErrorStrings examines error strings. -// It complains if they are capitalized or end in punctuation or a newline. -func (f *file) lintErrorStrings() { - f.walk(func(node ast.Node) bool { - ce, ok := node.(*ast.CallExpr) - if !ok { - return true - } - if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { - return true - } - if len(ce.Args) < 1 { - return true - } - str, ok := ce.Args[0].(*ast.BasicLit) - if !ok || str.Kind != token.STRING { - return true - } - s, _ := strconv.Unquote(str.Value) // can assume well-formed Go - if s == "" { - return true - } - clean, conf := lintErrorString(s) - if clean { - return true - } - - f.errorf(str, conf, link(styleGuideBase+"#error-strings"), category("errors"), - "error strings should not be capitalized or end with punctuation or a newline") - return true - }) -} - -// lintReceiverNames examines receiver names. It complains about inconsistent -// names used for the same type and names such as "this". -func (f *file) lintReceiverNames() { - typeReceiver := map[string]string{} - f.walk(func(n ast.Node) bool { - fn, ok := n.(*ast.FuncDecl) - if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { - return true - } - names := fn.Recv.List[0].Names - if len(names) < 1 { - return true - } - name := names[0].Name - const ref = styleGuideBase + "#receiver-names" - if name == "_" { - f.errorf(n, 1, link(ref), category("naming"), `receiver name should not be an underscore, omit the name if it is unused`) - return true - } - if name == "this" || name == "self" { - f.errorf(n, 1, link(ref), category("naming"), `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`) - return true - } - recv := receiverType(fn) - if prev, ok := typeReceiver[recv]; ok && prev != name { - f.errorf(n, 1, link(ref), category("naming"), "receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv) - return true - } - typeReceiver[recv] = name - return true - }) -} - -// lintIncDec examines statements that increment or decrement a variable. -// It complains if they don't use x++ or x--. -func (f *file) lintIncDec() { - f.walk(func(n ast.Node) bool { - as, ok := n.(*ast.AssignStmt) - if !ok { - return true - } - if len(as.Lhs) != 1 { - return true - } - if !isOne(as.Rhs[0]) { - return true - } - var suffix string - switch as.Tok { - case token.ADD_ASSIGN: - suffix = "++" - case token.SUB_ASSIGN: - suffix = "--" - default: - return true - } - f.errorf(as, 0.8, category("unary-op"), "should replace %s with %s%s", f.render(as), f.render(as.Lhs[0]), suffix) - return true - }) -} - -// lintErrorReturn examines function declarations that return an error. -// It complains if the error isn't the last parameter. -func (f *file) lintErrorReturn() { - f.walk(func(n ast.Node) bool { - fn, ok := n.(*ast.FuncDecl) - if !ok || fn.Type.Results == nil { - return true - } - ret := fn.Type.Results.List - if len(ret) <= 1 { - return true - } - if isIdent(ret[len(ret)-1].Type, "error") { - return true - } - // An error return parameter should be the last parameter. - // Flag any error parameters found before the last. - for _, r := range ret[:len(ret)-1] { - if isIdent(r.Type, "error") { - f.errorf(fn, 0.9, category("arg-order"), "error should be the last type when returning multiple items") - break // only flag one - } - } - return true - }) -} - -// lintUnexportedReturn examines exported function declarations. -// It complains if any return an unexported type. -func (f *file) lintUnexportedReturn() { - f.walk(func(n ast.Node) bool { - fn, ok := n.(*ast.FuncDecl) - if !ok { - return true - } - if fn.Type.Results == nil { - return false - } - if !fn.Name.IsExported() { - return false - } - thing := "func" - if fn.Recv != nil && len(fn.Recv.List) > 0 { - thing = "method" - if !ast.IsExported(receiverType(fn)) { - // Don't report exported methods of unexported types, - // such as private implementations of sort.Interface. - return false - } - } - for _, ret := range fn.Type.Results.List { - typ := f.pkg.typeOf(ret.Type) - if exportedType(typ) { - continue - } - f.errorf(ret.Type, 0.8, category("unexported-type-in-api"), - "exported %s %s returns unexported type %s, which can be annoying to use", - thing, fn.Name.Name, typ) - break // only flag one - } - return false - }) -} - -// exportedType reports whether typ is an exported type. -// It is imprecise, and will err on the side of returning true, -// such as for composite types. -func exportedType(typ types.Type) bool { - switch T := typ.(type) { - case *types.Named: - // Builtin types have no package. - return T.Obj().Pkg() == nil || T.Obj().Exported() - case *types.Map: - return exportedType(T.Key()) && exportedType(T.Elem()) - case interface { - Elem() types.Type - }: // array, slice, pointer, chan - return exportedType(T.Elem()) - } - // Be conservative about other types, such as struct, interface, etc. - return true -} - -// timeSuffixes is a list of name suffixes that imply a time unit. -// This is not an exhaustive list. -var timeSuffixes = []string{ - "Sec", "Secs", "Seconds", - "Msec", "Msecs", - "Milli", "Millis", "Milliseconds", - "Usec", "Usecs", "Microseconds", - "MS", "Ms", -} - -func (f *file) lintTimeNames() { - f.walk(func(node ast.Node) bool { - v, ok := node.(*ast.ValueSpec) - if !ok { - return true - } - for _, name := range v.Names { - origTyp := f.pkg.typeOf(name) - // Look for time.Duration or *time.Duration; - // the latter is common when using flag.Duration. - typ := origTyp - if pt, ok := typ.(*types.Pointer); ok { - typ = pt.Elem() - } - if !f.pkg.isNamedType(typ, "time", "Duration") { - continue - } - suffix := "" - for _, suf := range timeSuffixes { - if strings.HasSuffix(name.Name, suf) { - suffix = suf - break - } - } - if suffix == "" { - continue - } - f.errorf(v, 0.9, category("time"), "var %s is of type %v; don't use unit-specific suffix %q", name.Name, origTyp, suffix) - } - return true - }) -} - -// lintContextKeyTypes checks for call expressions to context.WithValue with -// basic types used for the key argument. -// See: https://golang.org/issue/17293 -func (f *file) lintContextKeyTypes() { - f.walk(func(node ast.Node) bool { - switch node := node.(type) { - case *ast.CallExpr: - f.checkContextKeyType(node) - } - - return true - }) -} - -// checkContextKeyType reports an error if the call expression calls -// context.WithValue with a key argument of basic type. -func (f *file) checkContextKeyType(x *ast.CallExpr) { - sel, ok := x.Fun.(*ast.SelectorExpr) - if !ok { - return - } - pkg, ok := sel.X.(*ast.Ident) - if !ok || pkg.Name != "context" { - return - } - if sel.Sel.Name != "WithValue" { - return - } - - // key is second argument to context.WithValue - if len(x.Args) != 3 { - return - } - key := f.pkg.typesInfo.Types[x.Args[1]] - - if ktyp, ok := key.Type.(*types.Basic); ok && ktyp.Kind() != types.Invalid { - f.errorf(x, 1.0, category("context"), fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type)) - } -} - -// lintContextArgs examines function declarations that contain an -// argument with a type of context.Context -// It complains if that argument isn't the first parameter. -func (f *file) lintContextArgs() { - f.walk(func(n ast.Node) bool { - fn, ok := n.(*ast.FuncDecl) - if !ok || len(fn.Type.Params.List) <= 1 { - return true - } - // A context.Context should be the first parameter of a function. - // Flag any that show up after the first. - for _, arg := range fn.Type.Params.List[1:] { - if isPkgDot(arg.Type, "context", "Context") { - f.errorf(fn, 0.9, link("https://golang.org/pkg/context/"), category("arg-order"), "context.Context should be the first parameter of a function") - break // only flag one - } - } - return true - }) -} - -// containsComments returns whether the interval [start, end) contains any -// comments without "// MATCH " prefix. -func (f *file) containsComments(start, end token.Pos) bool { - for _, cgroup := range f.f.Comments { - comments := cgroup.List - if comments[0].Slash >= end { - // All comments starting with this group are after end pos. - return false - } - if comments[len(comments)-1].Slash < start { - // Comments group ends before start pos. - continue - } - for _, c := range comments { - if start <= c.Slash && c.Slash < end && !strings.HasPrefix(c.Text, "// MATCH ") { - return true - } - } - } - return false -} - -// receiverType returns the named type of the method receiver, sans "*", -// or "invalid-type" if fn.Recv is ill formed. -func receiverType(fn *ast.FuncDecl) string { - switch e := fn.Recv.List[0].Type.(type) { - case *ast.Ident: - return e.Name - case *ast.StarExpr: - if id, ok := e.X.(*ast.Ident); ok { - return id.Name - } - } - // The parser accepts much more than just the legal forms. - return "invalid-type" -} - -func (f *file) walk(fn func(ast.Node) bool) { - ast.Walk(walker(fn), f.f) -} - -func (f *file) render(x interface{}) string { - var buf bytes.Buffer - if err := printer.Fprint(&buf, f.fset, x); err != nil { - panic(err) - } - return buf.String() -} - -func (f *file) debugRender(x interface{}) string { - var buf bytes.Buffer - if err := ast.Fprint(&buf, f.fset, x, nil); err != nil { - panic(err) - } - return buf.String() -} - -// walker adapts a function to satisfy the ast.Visitor interface. -// The function return whether the walk should proceed into the node's children. -type walker func(ast.Node) bool - -func (w walker) Visit(node ast.Node) ast.Visitor { - if w(node) { - return w - } - return nil -} - -func isIdent(expr ast.Expr, ident string) bool { - id, ok := expr.(*ast.Ident) - return ok && id.Name == ident -} - -// isBlank returns whether id is the blank identifier "_". -// If id == nil, the answer is false. -func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" } - -func isPkgDot(expr ast.Expr, pkg, name string) bool { - sel, ok := expr.(*ast.SelectorExpr) - return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name) -} - -func isOne(expr ast.Expr) bool { - lit, ok := expr.(*ast.BasicLit) - return ok && lit.Kind == token.INT && lit.Value == "1" -} - -func isCgoExported(f *ast.FuncDecl) bool { - if f.Recv != nil || f.Doc == nil { - return false - } - - cgoExport := regexp.MustCompile(fmt.Sprintf("(?m)^//export %s$", regexp.QuoteMeta(f.Name.Name))) - for _, c := range f.Doc.List { - if cgoExport.MatchString(c.Text) { - return true - } - } - return false -} - -var basicTypeKinds = map[types.BasicKind]string{ - types.UntypedBool: "bool", - types.UntypedInt: "int", - types.UntypedRune: "rune", - types.UntypedFloat: "float64", - types.UntypedComplex: "complex128", - types.UntypedString: "string", -} - -// isUntypedConst reports whether expr is an untyped constant, -// and indicates what its default type is. -// scope may be nil. -func (f *file) isUntypedConst(expr ast.Expr) (defType string, ok bool) { - // Re-evaluate expr outside of its context to see if it's untyped. - // (An expr evaluated within, for example, an assignment context will get the type of the LHS.) - exprStr := f.render(expr) - tv, err := types.Eval(f.fset, f.pkg.typesPkg, expr.Pos(), exprStr) - if err != nil { - return "", false - } - if b, ok := tv.Type.(*types.Basic); ok { - if dt, ok := basicTypeKinds[b.Kind()]; ok { - return dt, true - } - } - - return "", false -} - -// firstLineOf renders the given node and returns its first line. -// It will also match the indentation of another node. -func (f *file) firstLineOf(node, match ast.Node) string { - line := f.render(node) - if i := strings.Index(line, "\n"); i >= 0 { - line = line[:i] - } - return f.indentOf(match) + line -} - -func (f *file) indentOf(node ast.Node) string { - line := srcLine(f.src, f.fset.Position(node.Pos())) - for i, r := range line { - switch r { - case ' ', '\t': - default: - return line[:i] - } - } - return line // unusual or empty line -} - -func (f *file) srcLineWithMatch(node ast.Node, pattern string) (m []string) { - line := srcLine(f.src, f.fset.Position(node.Pos())) - line = strings.TrimSuffix(line, "\n") - rx := regexp.MustCompile(pattern) - return rx.FindStringSubmatch(line) -} - -// imports returns true if the current file imports the specified package path. -func (f *file) imports(importPath string) bool { - all := astutil.Imports(f.fset, f.f) - for _, p := range all { - for _, i := range p { - uq, err := strconv.Unquote(i.Path.Value) - if err == nil && importPath == uq { - return true - } - } - } - return false -} - -// srcLine returns the complete line at p, including the terminating newline. -func srcLine(src []byte, p token.Position) string { - // Run to end of line in both directions if not at line start/end. - lo, hi := p.Offset, p.Offset+1 - for lo > 0 && src[lo-1] != '\n' { - lo-- - } - for hi < len(src) && src[hi-1] != '\n' { - hi++ - } - return string(src[lo:hi]) -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go new file mode 100644 index 000000000000..512077fe8e44 --- /dev/null +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -0,0 +1,212 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build riscv64,linux + +package unix + +import "unsafe" + +func EpollCreate(size int) (fd int, err error) { + if size <= 0 { + return -1, EINVAL + } + return EpollCreate1(0) +} + +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Getuid() (uid int) +//sys Listen(s int, n int) (err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + var ts *Timespec + if timeout != nil { + ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + } + return Pselect(nfd, r, w, e, ts, nil) +} + +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys Setfsgid(gid int) (err error) +//sys Setfsuid(uid int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setresgid(rgid int, egid int, sgid int) (err error) +//sysnb Setresuid(ruid int, euid int, suid int) (err error) +//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) + +func Stat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, 0) +} + +func Lchown(path string, uid int, gid int) (err error) { + return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW) +} + +func Lstat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) +} + +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + return ENOSYS +} + +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +//sysnb Gettimeofday(tv *Timeval) (err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(dirfd, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func Time(t *Time_t) (Time_t, error) { + var tv Timeval + err := Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +func Utime(path string, buf *Utimbuf) error { + tv := []Timeval{ + {Sec: buf.Actime}, + {Sec: buf.Modtime}, + } + return Utimes(path, tv) +} + +func utimes(path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, 0) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +func (r *PtraceRegs) PC() uint64 { return r.Pc } + +func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} + +func InotifyInit() (fd int, err error) { + return InotifyInit1(0) +} + +func Dup2(oldfd int, newfd int) (err error) { + return Dup3(oldfd, newfd, 0) +} + +func Pause() (err error) { + _, _, e1 := Syscall6(SYS_PPOLL, 0, 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func Poll(fds []PollFd, timeout int) (n int, err error) { + var ts *Timespec + if timeout >= 0 { + ts = new(Timespec) + *ts = NsecToTimespec(int64(timeout) * 1e6) + } + if len(fds) == 0 { + return ppoll(nil, 0, ts, nil) + } + return ppoll(&fds[0], len(fds), ts, nil) +} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go new file mode 100644 index 000000000000..0978dba1ea38 --- /dev/null +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -0,0 +1,2625 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build riscv64,linux + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go + +package unix + +import "syscall" + +const ( + AAFS_MAGIC = 0x5a3c69f0 + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xadff + AFS_FS_MAGIC = 0x6b414653 + AFS_SUPER_MAGIC = 0x5346414f + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2c + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_QIPCRTR = 0x2a + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SMC = 0x2b + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ANON_INODE_FS_MAGIC = 0x9041934 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_RAWIP = 0x207 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a + ARPHRD_X25 = 0x10f + AUTOFS_SUPER_MAGIC = 0x187 + B0 = 0x0 + B1000000 = 0x1008 + B110 = 0x3 + B115200 = 0x1002 + B1152000 = 0x1009 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1500000 = 0x100a + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2000000 = 0x100b + B230400 = 0x1003 + B2400 = 0xb + B2500000 = 0x100c + B300 = 0x7 + B3000000 = 0x100d + B3500000 = 0x100e + B38400 = 0xf + B4000000 = 0x100f + B460800 = 0x1004 + B4800 = 0xc + B50 = 0x1 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B600 = 0x8 + B75 = 0x2 + B921600 = 0x1007 + B9600 = 0xd + BALLOON_KVM_MAGIC = 0x13661366 + BDEVFS_MAGIC = 0x62646576 + BINFMTFS_MAGIC = 0x42494e4d + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIV = 0x30 + BPF_FS_MAGIC = 0xcafe4a11 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_OR = 0x40 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BS1 = 0x2000 + BSDLY = 0x2000 + BTRFS_SUPER_MAGIC = 0x9123683e + BTRFS_TEST_MAGIC = 0x73727279 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x7 + CAN_RAW = 0x1 + CAN_RAW_FILTER_MAX = 0x200 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CFLUSH = 0xf + CGROUP2_SUPER_MAGIC = 0x63677270 + CGROUP_SUPER_MAGIC = 0x27e0eb + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CODA_SUPER_MAGIC = 0x73757245 + CR0 = 0x0 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRAMFS_MAGIC = 0x28cd3d45 + CRDLY = 0x600 + CREAD = 0x80 + CRTSCTS = 0x80000000 + CS5 = 0x0 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIGNAL = 0xff + CSIZE = 0x30 + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSTOPB = 0x40 + CSUSP = 0x1a + DAXFS_MAGIC = 0x64646178 + DEBUGFS_MAGIC = 0x64626720 + DEVPTS_SUPER_MAGIC = 0x1cd1 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + ECRYPTFS_SUPER_MAGIC = 0xf15f + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EFD_SEMAPHORE = 0x1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x414a53 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_ERSPAN = 0x88be + ETH_P_ERSPAN2 = 0x22eb + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IFE = 0xed3e + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MAP = 0xf9 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_NCSI = 0x88f8 + ETH_P_NSH = 0x894f + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 + EXT2_SUPER_MAGIC = 0xef53 + EXT3_SUPER_MAGIC = 0xef53 + EXT4_SUPER_MAGIC = 0xef53 + EXTA = 0xe + EXTB = 0xf + EXTPROC = 0x10000 + F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_UNSHARE_RANGE = 0x40 + FALLOC_FL_ZERO_RANGE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 + FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 + FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 + FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 + FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 + FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 + FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + FS_KEY_DESCRIPTOR_SIZE = 0x8 + FS_KEY_DESC_PREFIX = "fscrypt:" + FS_KEY_DESC_PREFIX_SIZE = 0x8 + FS_MAX_KEY_SIZE = 0x40 + FS_POLICY_FLAGS_PAD_16 = 0x2 + FS_POLICY_FLAGS_PAD_32 = 0x3 + FS_POLICY_FLAGS_PAD_4 = 0x0 + FS_POLICY_FLAGS_PAD_8 = 0x1 + FS_POLICY_FLAGS_PAD_MASK = 0x3 + FS_POLICY_FLAGS_VALID = 0x3 + FUTEXFS_SUPER_MAGIC = 0xbad1dea + F_ADD_SEALS = 0x409 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_GET_FILE_RW_HINT = 0x40d + F_GET_RW_HINT = 0x40b + F_GET_SEALS = 0x40a + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_RDLCK = 0x0 + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SET_FILE_RW_HINT = 0x40e + F_SET_RW_HINT = 0x40c + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + GENL_ADMIN_PERM = 0x1 + GENL_CMD_CAP_DO = 0x2 + GENL_CMD_CAP_DUMP = 0x4 + GENL_CMD_CAP_HASPOL = 0x8 + GENL_HDRLEN = 0x4 + GENL_ID_CTRL = 0x10 + GENL_ID_PMCRAID = 0x12 + GENL_ID_VFS_DQUOT = 0x11 + GENL_MAX_ID = 0x3ff + GENL_MIN_ID = 0x10 + GENL_NAMSIZ = 0x10 + GENL_START_ALLOC = 0x13 + GENL_UNS_ADMIN_PERM = 0x10 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a + HOSTFS_SUPER_MAGIC = 0xc0ffee + HPFS_SUPER_MAGIC = 0xf995e849 + HUGETLBFS_MAGIC = 0x958458f6 + HUPCL = 0x400 + IBSHIFT = 0x10 + ICANON = 0x2 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IEXTEN = 0x8000 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0x9 + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NAPI = 0x10 + IFF_NAPI_FRAGS = 0x20 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOEXEC = 0x80000 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_NONBLOCK = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_FREEBIND = 0x4e + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b + IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISIG = 0x1 + ISOFS_SUPER_MAGIC = 0x9660 + ISTRIP = 0x20 + IUCLC = 0x200 + IUTF8 = 0x4000 + IXANY = 0x800 + IXOFF = 0x1000 + IXON = 0x400 + JFFS2_SUPER_MAGIC = 0x72b6 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CHOWN = 0x4 + KEYCTL_CLEAR = 0x7 + KEYCTL_DESCRIBE = 0x6 + KEYCTL_DH_COMPUTE = 0x17 + KEYCTL_GET_KEYRING_ID = 0x0 + KEYCTL_GET_PERSISTENT = 0x16 + KEYCTL_GET_SECURITY = 0x11 + KEYCTL_INSTANTIATE = 0xc + KEYCTL_INSTANTIATE_IOV = 0x14 + KEYCTL_INVALIDATE = 0x15 + KEYCTL_JOIN_SESSION_KEYRING = 0x1 + KEYCTL_LINK = 0x8 + KEYCTL_NEGATE = 0xd + KEYCTL_READ = 0xb + KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d + KEYCTL_REVOKE = 0x3 + KEYCTL_SEARCH = 0xa + KEYCTL_SESSION_TO_PARENT = 0x12 + KEYCTL_SETPERM = 0x5 + KEYCTL_SET_REQKEY_KEYRING = 0xe + KEYCTL_SET_TIMEOUT = 0xf + KEYCTL_UNLINK = 0x9 + KEYCTL_UPDATE = 0x2 + KEY_REQKEY_DEFL_DEFAULT = 0x0 + KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 + KEY_REQKEY_DEFL_NO_CHANGE = -0x1 + KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 + KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 + KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 + KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 + KEY_REQKEY_DEFL_USER_KEYRING = 0x4 + KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 + KEY_SPEC_GROUP_KEYRING = -0x6 + KEY_SPEC_PROCESS_KEYRING = -0x2 + KEY_SPEC_REQKEY_AUTH_KEY = -0x7 + KEY_SPEC_REQUESTOR_KEYRING = -0x8 + KEY_SPEC_SESSION_KEYRING = -0x3 + KEY_SPEC_THREAD_KEYRING = -0x1 + KEY_SPEC_USER_KEYRING = -0x4 + KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_KEEPONFORK = 0x13 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MADV_WIPEONFORK = 0x12 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FIXED_NOREPLACE = 0x100000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_SHARED_VALIDATE = 0x3 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MAP_TYPE = 0xf + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MINIX2_SUPER_MAGIC = 0x2468 + MINIX2_SUPER_MAGIC2 = 0x2478 + MINIX3_SUPER_MAGIC = 0x4d5a + MINIX_SUPER_MAGIC = 0x137f + MINIX_SUPER_MAGIC2 = 0x138f + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MSDOS_SUPER_MAGIC = 0x4d44 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MSG_ZEROCOPY = 0x4000000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_BORN = 0x20000000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOREMOTELOCK = 0x8000000 + MS_NOSEC = 0x10000000 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + MS_VERBOSE = 0x8000 + MTD_INODE_FS_MAGIC = 0x11307854 + NAME_MAX = 0xff + NCP_SUPER_MAGIC = 0x564c + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NETNSA_MAX = 0x3 + NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFNETLINK_V0 = 0x0 + NFNLGRP_ACCT_QUOTA = 0x8 + NFNLGRP_CONNTRACK_DESTROY = 0x3 + NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 + NFNLGRP_CONNTRACK_EXP_NEW = 0x4 + NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 + NFNLGRP_CONNTRACK_NEW = 0x1 + NFNLGRP_CONNTRACK_UPDATE = 0x2 + NFNLGRP_MAX = 0x9 + NFNLGRP_NFTABLES = 0x7 + NFNLGRP_NFTRACE = 0x9 + NFNLGRP_NONE = 0x0 + NFNL_BATCH_MAX = 0x1 + NFNL_MSG_BATCH_BEGIN = 0x10 + NFNL_MSG_BATCH_END = 0x11 + NFNL_NFA_NEST = 0x8000 + NFNL_SUBSYS_ACCT = 0x7 + NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_CTHELPER = 0x9 + NFNL_SUBSYS_CTNETLINK = 0x1 + NFNL_SUBSYS_CTNETLINK_EXP = 0x2 + NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_IPSET = 0x6 + NFNL_SUBSYS_NFTABLES = 0xa + NFNL_SUBSYS_NFT_COMPAT = 0xb + NFNL_SUBSYS_NONE = 0x0 + NFNL_SUBSYS_OSF = 0x5 + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLDLY = 0x100 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_NONREC = 0x100 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NOFLSH = 0x80 + NSFS_MAGIC = 0x6e736673 + OCFS2_SUPER_MAGIC = 0x7461636f + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + OLCUC = 0x2 + ONLCR = 0x4 + ONLRET = 0x20 + ONOCR = 0x10 + OPENPROM_SUPER_MAGIC = 0x9fa1 + OPOST = 0x1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 + O_ACCMODE = 0x3 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARENB = 0x100 + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PIPEFS_MAGIC = 0x50495045 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROC_SUPER_MAGIC = 0x9fa0 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 + PR_SVE_GET_VL = 0x33 + PR_SVE_SET_VL = 0x32 + PR_SVE_SET_VL_ONEXEC = 0x40000 + PR_SVE_VL_INHERIT = 0x20000 + PR_SVE_VL_LEN_MASK = 0xffff + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PSTOREFS_MAGIC = 0x6165676c + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SECCOMP_GET_METADATA = 0x420d + PTRACE_SEIZE = 0x4206 + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_TRACEME = 0x0 + QNX4_SUPER_MAGIC = 0x2f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + RDTGROUP_SUPER_MAGIC = 0x7655821 + REISERFS_SUPER_MAGIC = 0x52654973 + RENAME_EXCHANGE = 0x2 + RENAME_NOREPLACE = 0x1 + RENAME_WHITEOUT = 0x4 + RLIMIT_AS = 0x9 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_LOCKS = 0xa + RLIMIT_MEMLOCK = 0x8 + RLIMIT_MSGQUEUE = 0xc + RLIMIT_NICE = 0xd + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RLIMIT_RTPRIO = 0xe + RLIMIT_RTTIME = 0xf + RLIMIT_SIGPENDING = 0xb + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0xffffffffffffffff + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FASTOPEN_NO_COOKIE = 0x11 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x11 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x1d + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELLINK = 0x11 + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_PREFIX = 0x800 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_MAX = 0x63 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 + RTM_NEWLINK = 0x10 + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNSID = 0x58 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x19 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BGP = 0xba + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_EIGRP = 0xc0 + RTPROT_GATED = 0x8 + RTPROT_ISIS = 0xbb + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_OSPF = 0xbc + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_RIP = 0xbd + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_WIFI_STATUS = 0x29 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 + SECURITYFS_MAGIC = 0x73636673 + SELINUX_MAGIC = 0xf97cff8c + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCATMARK = 0x8905 + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPGRP = 0x8904 + SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSPGRP = 0x8902 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SMACK_MAGIC = 0x43415d53 + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 + SMB_SUPER_MAGIC = 0x517b + SOCKFS_MAGIC = 0x534f434b + SOCK_CLOEXEC = 0x80000 + SOCK_DCCP = 0x6 + SOCK_DGRAM = 0x2 + SOCK_IOC_TYPE = 0x89 + SOCK_NONBLOCK = 0x800 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_CAN_BASE = 0x64 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_SOCKET = 0x1 + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_X25 = 0x106 + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_FILTER = 0x1a + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_GET_FILTER = 0x1a + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_NO_CHECK = 0xb + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERNAME = 0x1c + SO_PEERSEC = 0x1f + SO_PRIORITY = 0xc + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_TIMESTAMP = 0x1d + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TYPE = 0x3 + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + SQUASHFS_MAGIC = 0x73717368 + STACK_END_MAGIC = 0x57ac6e9d + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 + SYNC_FILE_RANGE_WAIT_AFTER = 0x4 + SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 + SYNC_FILE_RANGE_WRITE = 0x2 + SYSFS_MAGIC = 0x62656572 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TASKSTATS_CMD_ATTR_MAX = 0x4 + TASKSTATS_CMD_MAX = 0x2 + TASKSTATS_GENL_NAME = "TASKSTATS" + TASKSTATS_GENL_VERSION = 0x1 + TASKSTATS_TYPE_MAX = 0x6 + TASKSTATS_VERSION = 0x8 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_CC_INFO = 0x1a + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e + TCP_FASTOPEN_KEY = 0x21 + TCP_FASTOPEN_NO_COOKIE = 0x22 + TCP_INFO = 0xb + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_PREFIX = 0x1 + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_REPAIR_WINDOW = 0x1d + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_ULP = 0x1f + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TMPFS_MAGIC = 0x1021994 + TOSTOP = 0x100 + TPACKET_ALIGNMENT = 0x10 + TPACKET_HDRLEN = 0x34 + TP_STATUS_AVAILABLE = 0x0 + TP_STATUS_BLK_TMO = 0x20 + TP_STATUS_COPY = 0x2 + TP_STATUS_CSUMNOTREADY = 0x8 + TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_KERNEL = 0x0 + TP_STATUS_LOSING = 0x4 + TP_STATUS_SENDING = 0x2 + TP_STATUS_SEND_REQUEST = 0x1 + TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_SOFTWARE = 0x20000000 + TP_STATUS_TS_SYS_HARDWARE = 0x40000000 + TP_STATUS_USER = 0x1 + TP_STATUS_VLAN_TPID_VALID = 0x40 + TP_STATUS_VLAN_VALID = 0x10 + TP_STATUS_WRONG_FORMAT = 0x4 + TRACEFS_MAGIC = 0x74726163 + TS_COMM_LEN = 0x20 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + UDF_SUPER_MAGIC = 0x15013346 + UMOUNT_NOFOLLOW = 0x8 + USBDEVICE_SUPER_MAGIC = 0x9fa2 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe + V9FS_MAGIC = 0x1021997 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_RESERVED = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VMIN = 0x6 + VM_SOCKETS_INVALID_VERSION = 0xffffffff + VQUIT = 0x1 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT0 = 0x0 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 + WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WORDSIZE = 0x40 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + XCASE = 0x4 + XENFS_SUPER_MAGIC = 0xabba1974 + XTABS = 0x1800 + ZSMALLOC_MAGIC = 0x58295829 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EAGAIN = syscall.Errno(0xb) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADF = syscall.Errno(0x9) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x7d) + ECHILD = syscall.Errno(0xa) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOM = syscall.Errno(0x21) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x6a) + EISDIR = syscall.Errno(0x15) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENFILE = syscall.Errno(0x17) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSPC = syscall.Errno(0x1c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x6b) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTTY = syscall.Errno(0x19) + ENOTUNIQ = syscall.Errno(0x4c) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x60) + EPIPE = syscall.Errno(0x20) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + ERANGE = syscall.Errno(0x22) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + EROFS = syscall.Errno(0x1e) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + ETXTBSY = syscall.Errno(0x1a) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x1d) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, +} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go new file mode 100644 index 000000000000..b086f7ed728e --- /dev/null +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -0,0 +1,2114 @@ +// mksyscall.pl -tags linux,riscv64 syscall_linux.go syscall_linux_riscv64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build linux,riscv64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg2) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { + var _p0 unsafe.Pointer + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsgid(gid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setfsuid(uid int) (err error) { + _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go new file mode 100644 index 000000000000..41e4fd1d3ed6 --- /dev/null +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -0,0 +1,286 @@ +// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build riscv64,linux + +package unix + +const ( + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_FSTATAT = 79 + SYS_FSTAT = 80 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRLIMIT = 163 + SYS_SETRLIMIT = 164 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 + SYS_GETRANDOM = 278 + SYS_MEMFD_CREATE = 279 + SYS_BPF = 280 + SYS_EXECVEAT = 281 + SYS_USERFAULTFD = 282 + SYS_MEMBARRIER = 283 + SYS_MLOCK2 = 284 + SYS_COPY_FILE_RANGE = 285 + SYS_PREADV2 = 286 + SYS_PWRITEV2 = 287 + SYS_PKEY_MPROTECT = 288 + SYS_PKEY_ALLOC = 289 + SYS_PKEY_FREE = 290 + SYS_STATX = 291 + SYS_IO_PGETEVENTS = 292 +) diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index cb3729eabb6f..1944dfb1a44c 100644 --- a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -510,6 +510,13 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 0f3c0fb5454f..dd09289d14dd 100644 --- a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -514,6 +514,13 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index bd534da8775e..d9e844d8c982 100644 --- a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -513,6 +513,13 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 3a72fd9f1003..fd2471a3cfbf 100644 --- a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -515,6 +515,13 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index cd360b2709df..89218d9d1b42 100644 --- a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -511,6 +511,13 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index d35ec6022737..059903972dbe 100644 --- a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -515,6 +515,13 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 0f4993024b89..c933cea679ad 100644 --- a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -515,6 +515,13 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 55f97c5cc410..92d358ae1951 100644 --- a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -511,6 +511,13 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index f1d426445345..2dbda84397e7 100644 --- a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -516,6 +516,13 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 90ab1f7afee6..b1d2357034aa 100644 --- a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -516,6 +516,13 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go new file mode 100644 index 000000000000..0a34ccdd751a --- /dev/null +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -0,0 +1,1919 @@ +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build riscv64,linux + +package unix + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + _ [4]byte + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + _ [4]byte + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + _ [4]byte + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + _ [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + _ uint64 + Size int64 + Blksize int32 + _ int32 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + _ [2]int32 +} + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + _ int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + _ [5]byte +} + +type Fsid struct { + Val [2]int32 +} + +type Flock_t struct { + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type FscryptPolicy struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptKey struct { + Mode uint32 + Raw [64]uint8 + Size uint32 +} + +type KeyctlDHParams struct { + Private int32 + Prime int32 + Base int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]uint8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + +type RawSockaddrCAN struct { + Family uint16 + _ [2]byte + Ifindex int32 + Addr [8]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddr struct { + Family uint16 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]uint8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type PacketMreq struct { + Ifindex int32 + Type uint16 + Alen uint16 + Address [8]uint8 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + _ [4]byte + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + _ [2]byte + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa + SizeofSockaddrCAN = 0x10 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofPacketMreq = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 +) + +const ( + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_INFO_KIND = 0x1 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x31 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +const ( + SizeofSockFilter = 0x8 + SizeofSockFprog = 0x10 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + _ [6]byte + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type PtraceRegs struct { + Pc uint64 + Ra uint64 + Sp uint64 + Gp uint64 + Tp uint64 + T0 uint64 + T1 uint64 + T2 uint64 + S0 uint64 + S1 uint64 + A0 uint64 + A1 uint64 + A2 uint64 + A3 uint64 + A4 uint64 + A5 uint64 + A6 uint64 + A7 uint64 + S2 uint64 + S3 uint64 + S4 uint64 + S5 uint64 + S6 uint64 + S7 uint64 + S8 uint64 + S9 uint64 + S10 uint64 + S11 uint64 + T3 uint64 + T4 uint64 + T5 uint64 + T6 uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + _ [4]byte + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + _ [0]uint8 + _ [4]byte +} + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +type Ustat_t struct { + Tfree int32 + _ [4]byte + Tinode uint64 + Fname [6]uint8 + Fpack [6]uint8 + _ [4]byte +} + +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +const ( + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLRDHUP = 0x2000 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type Sigset_t struct { + Val [16]uint64 +} + +const RNDGETENTCNT = 0x80045200 + +const PERF_IOC_FLAG_GROUP = 0x1 + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Taskstats struct { + Version uint16 + _ [2]byte + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + _ [6]byte + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]uint8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + _ [4]byte + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 +} + +const ( + TASKSTATS_CMD_UNSPEC = 0x0 + TASKSTATS_CMD_GET = 0x1 + TASKSTATS_CMD_NEW = 0x2 + TASKSTATS_TYPE_UNSPEC = 0x0 + TASKSTATS_TYPE_PID = 0x1 + TASKSTATS_TYPE_TGID = 0x2 + TASKSTATS_TYPE_STATS = 0x3 + TASKSTATS_TYPE_AGGR_PID = 0x4 + TASKSTATS_TYPE_AGGR_TGID = 0x5 + TASKSTATS_TYPE_NULL = 0x6 + TASKSTATS_CMD_ATTR_UNSPEC = 0x0 + TASKSTATS_CMD_ATTR_PID = 0x1 + TASKSTATS_CMD_ATTR_TGID = 0x2 + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 +) + +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + +type Genlmsghdr struct { + Cmd uint8 + Version uint8 + Reserved uint16 +} + +const ( + CTRL_CMD_UNSPEC = 0x0 + CTRL_CMD_NEWFAMILY = 0x1 + CTRL_CMD_DELFAMILY = 0x2 + CTRL_CMD_GETFAMILY = 0x3 + CTRL_CMD_NEWOPS = 0x4 + CTRL_CMD_DELOPS = 0x5 + CTRL_CMD_GETOPS = 0x6 + CTRL_CMD_NEWMCAST_GRP = 0x7 + CTRL_CMD_DELMCAST_GRP = 0x8 + CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_ATTR_UNSPEC = 0x0 + CTRL_ATTR_FAMILY_ID = 0x1 + CTRL_ATTR_FAMILY_NAME = 0x2 + CTRL_ATTR_VERSION = 0x3 + CTRL_ATTR_HDRSIZE = 0x4 + CTRL_ATTR_MAXATTR = 0x5 + CTRL_ATTR_OPS = 0x6 + CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_OP_UNSPEC = 0x0 + CTRL_ATTR_OP_ID = 0x1 + CTRL_ATTR_OP_FLAGS = 0x2 + CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 + CTRL_ATTR_MCAST_GRP_NAME = 0x1 + CTRL_ATTR_MCAST_GRP_ID = 0x2 +) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]uint8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +const ( + ST_MANDLOCK = 0x40 + ST_NOATIME = 0x400 + ST_NODEV = 0x4 + ST_NODIRATIME = 0x800 + ST_NOEXEC = 0x8 + ST_NOSUID = 0x2 + ST_RDONLY = 0x1 + ST_RELATIME = 0x1000 + ST_SYNCHRONOUS = 0x10 +) + +type TpacketHdr struct { + Status uint64 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 + _ [4]byte +} + +type Tpacket2Hdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Nsec uint32 + Vlan_tci uint16 + Vlan_tpid uint16 + _ [4]uint8 +} + +type Tpacket3Hdr struct { + Next_offset uint32 + Sec uint32 + Nsec uint32 + Snaplen uint32 + Len uint32 + Status uint32 + Mac uint16 + Net uint16 + Hv1 TpacketHdrVariant1 + _ [8]uint8 +} + +type TpacketHdrVariant1 struct { + Rxhash uint32 + Vlan_tci uint32 + Vlan_tpid uint16 + _ uint16 +} + +type TpacketBlockDesc struct { + Version uint32 + To_priv uint32 + Hdr [40]byte +} + +type TpacketReq struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 +} + +type TpacketReq3 struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 + Retire_blk_tov uint32 + Sizeof_priv uint32 + Feature_req_word uint32 +} + +type TpacketStats struct { + Packets uint32 + Drops uint32 +} + +type TpacketStatsV3 struct { + Packets uint32 + Drops uint32 + Freeze_q_cnt uint32 +} + +type TpacketAuxdata struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Vlan_tci uint16 + Vlan_tpid uint16 +} + +const ( + TPACKET_V1 = 0x0 + TPACKET_V2 = 0x1 + TPACKET_V3 = 0x2 +) + +const ( + SizeofTpacketHdr = 0x20 + SizeofTpacket2Hdr = 0x20 + SizeofTpacket3Hdr = 0x30 +) + +const ( + NF_INET_PRE_ROUTING = 0x0 + NF_INET_LOCAL_IN = 0x1 + NF_INET_FORWARD = 0x2 + NF_INET_LOCAL_OUT = 0x3 + NF_INET_POST_ROUTING = 0x4 + NF_INET_NUMHOOKS = 0x5 +) + +const ( + NF_NETDEV_INGRESS = 0x0 + NF_NETDEV_NUMHOOKS = 0x1 +) + +const ( + NFPROTO_UNSPEC = 0x0 + NFPROTO_INET = 0x1 + NFPROTO_IPV4 = 0x2 + NFPROTO_ARP = 0x3 + NFPROTO_NETDEV = 0x5 + NFPROTO_BRIDGE = 0x7 + NFPROTO_IPV6 = 0xa + NFPROTO_DECNET = 0xc + NFPROTO_NUMPROTO = 0xd +) + +type Nfgenmsg struct { + Nfgen_family uint8 + Version uint8 + Res_id uint16 +} + +const ( + NFNL_BATCH_UNSPEC = 0x0 + NFNL_BATCH_GENID = 0x1 +) + +const ( + NFT_REG_VERDICT = 0x0 + NFT_REG_1 = 0x1 + NFT_REG_2 = 0x2 + NFT_REG_3 = 0x3 + NFT_REG_4 = 0x4 + NFT_REG32_00 = 0x8 + NFT_REG32_01 = 0x9 + NFT_REG32_02 = 0xa + NFT_REG32_03 = 0xb + NFT_REG32_04 = 0xc + NFT_REG32_05 = 0xd + NFT_REG32_06 = 0xe + NFT_REG32_07 = 0xf + NFT_REG32_08 = 0x10 + NFT_REG32_09 = 0x11 + NFT_REG32_10 = 0x12 + NFT_REG32_11 = 0x13 + NFT_REG32_12 = 0x14 + NFT_REG32_13 = 0x15 + NFT_REG32_14 = 0x16 + NFT_REG32_15 = 0x17 + NFT_CONTINUE = -0x1 + NFT_BREAK = -0x2 + NFT_JUMP = -0x3 + NFT_GOTO = -0x4 + NFT_RETURN = -0x5 + NFT_MSG_NEWTABLE = 0x0 + NFT_MSG_GETTABLE = 0x1 + NFT_MSG_DELTABLE = 0x2 + NFT_MSG_NEWCHAIN = 0x3 + NFT_MSG_GETCHAIN = 0x4 + NFT_MSG_DELCHAIN = 0x5 + NFT_MSG_NEWRULE = 0x6 + NFT_MSG_GETRULE = 0x7 + NFT_MSG_DELRULE = 0x8 + NFT_MSG_NEWSET = 0x9 + NFT_MSG_GETSET = 0xa + NFT_MSG_DELSET = 0xb + NFT_MSG_NEWSETELEM = 0xc + NFT_MSG_GETSETELEM = 0xd + NFT_MSG_DELSETELEM = 0xe + NFT_MSG_NEWGEN = 0xf + NFT_MSG_GETGEN = 0x10 + NFT_MSG_TRACE = 0x11 + NFT_MSG_NEWOBJ = 0x12 + NFT_MSG_GETOBJ = 0x13 + NFT_MSG_DELOBJ = 0x14 + NFT_MSG_GETOBJ_RESET = 0x15 + NFT_MSG_MAX = 0x19 + NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_ELEM = 0x1 + NFTA_HOOK_UNSPEC = 0x0 + NFTA_HOOK_HOOKNUM = 0x1 + NFTA_HOOK_PRIORITY = 0x2 + NFTA_HOOK_DEV = 0x3 + NFT_TABLE_F_DORMANT = 0x1 + NFTA_TABLE_UNSPEC = 0x0 + NFTA_TABLE_NAME = 0x1 + NFTA_TABLE_FLAGS = 0x2 + NFTA_TABLE_USE = 0x3 + NFTA_CHAIN_UNSPEC = 0x0 + NFTA_CHAIN_TABLE = 0x1 + NFTA_CHAIN_HANDLE = 0x2 + NFTA_CHAIN_NAME = 0x3 + NFTA_CHAIN_HOOK = 0x4 + NFTA_CHAIN_POLICY = 0x5 + NFTA_CHAIN_USE = 0x6 + NFTA_CHAIN_TYPE = 0x7 + NFTA_CHAIN_COUNTERS = 0x8 + NFTA_CHAIN_PAD = 0x9 + NFTA_RULE_UNSPEC = 0x0 + NFTA_RULE_TABLE = 0x1 + NFTA_RULE_CHAIN = 0x2 + NFTA_RULE_HANDLE = 0x3 + NFTA_RULE_EXPRESSIONS = 0x4 + NFTA_RULE_COMPAT = 0x5 + NFTA_RULE_POSITION = 0x6 + NFTA_RULE_USERDATA = 0x7 + NFTA_RULE_PAD = 0x8 + NFTA_RULE_ID = 0x9 + NFT_RULE_COMPAT_F_INV = 0x2 + NFT_RULE_COMPAT_F_MASK = 0x2 + NFTA_RULE_COMPAT_UNSPEC = 0x0 + NFTA_RULE_COMPAT_PROTO = 0x1 + NFTA_RULE_COMPAT_FLAGS = 0x2 + NFT_SET_ANONYMOUS = 0x1 + NFT_SET_CONSTANT = 0x2 + NFT_SET_INTERVAL = 0x4 + NFT_SET_MAP = 0x8 + NFT_SET_TIMEOUT = 0x10 + NFT_SET_EVAL = 0x20 + NFT_SET_OBJECT = 0x40 + NFT_SET_POL_PERFORMANCE = 0x0 + NFT_SET_POL_MEMORY = 0x1 + NFTA_SET_DESC_UNSPEC = 0x0 + NFTA_SET_DESC_SIZE = 0x1 + NFTA_SET_UNSPEC = 0x0 + NFTA_SET_TABLE = 0x1 + NFTA_SET_NAME = 0x2 + NFTA_SET_FLAGS = 0x3 + NFTA_SET_KEY_TYPE = 0x4 + NFTA_SET_KEY_LEN = 0x5 + NFTA_SET_DATA_TYPE = 0x6 + NFTA_SET_DATA_LEN = 0x7 + NFTA_SET_POLICY = 0x8 + NFTA_SET_DESC = 0x9 + NFTA_SET_ID = 0xa + NFTA_SET_TIMEOUT = 0xb + NFTA_SET_GC_INTERVAL = 0xc + NFTA_SET_USERDATA = 0xd + NFTA_SET_PAD = 0xe + NFTA_SET_OBJ_TYPE = 0xf + NFT_SET_ELEM_INTERVAL_END = 0x1 + NFTA_SET_ELEM_UNSPEC = 0x0 + NFTA_SET_ELEM_KEY = 0x1 + NFTA_SET_ELEM_DATA = 0x2 + NFTA_SET_ELEM_FLAGS = 0x3 + NFTA_SET_ELEM_TIMEOUT = 0x4 + NFTA_SET_ELEM_EXPIRATION = 0x5 + NFTA_SET_ELEM_USERDATA = 0x6 + NFTA_SET_ELEM_EXPR = 0x7 + NFTA_SET_ELEM_PAD = 0x8 + NFTA_SET_ELEM_OBJREF = 0x9 + NFTA_SET_ELEM_LIST_UNSPEC = 0x0 + NFTA_SET_ELEM_LIST_TABLE = 0x1 + NFTA_SET_ELEM_LIST_SET = 0x2 + NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 + NFTA_SET_ELEM_LIST_SET_ID = 0x4 + NFT_DATA_VALUE = 0x0 + NFT_DATA_VERDICT = 0xffffff00 + NFTA_DATA_UNSPEC = 0x0 + NFTA_DATA_VALUE = 0x1 + NFTA_DATA_VERDICT = 0x2 + NFTA_VERDICT_UNSPEC = 0x0 + NFTA_VERDICT_CODE = 0x1 + NFTA_VERDICT_CHAIN = 0x2 + NFTA_EXPR_UNSPEC = 0x0 + NFTA_EXPR_NAME = 0x1 + NFTA_EXPR_DATA = 0x2 + NFTA_IMMEDIATE_UNSPEC = 0x0 + NFTA_IMMEDIATE_DREG = 0x1 + NFTA_IMMEDIATE_DATA = 0x2 + NFTA_BITWISE_UNSPEC = 0x0 + NFTA_BITWISE_SREG = 0x1 + NFTA_BITWISE_DREG = 0x2 + NFTA_BITWISE_LEN = 0x3 + NFTA_BITWISE_MASK = 0x4 + NFTA_BITWISE_XOR = 0x5 + NFT_BYTEORDER_NTOH = 0x0 + NFT_BYTEORDER_HTON = 0x1 + NFTA_BYTEORDER_UNSPEC = 0x0 + NFTA_BYTEORDER_SREG = 0x1 + NFTA_BYTEORDER_DREG = 0x2 + NFTA_BYTEORDER_OP = 0x3 + NFTA_BYTEORDER_LEN = 0x4 + NFTA_BYTEORDER_SIZE = 0x5 + NFT_CMP_EQ = 0x0 + NFT_CMP_NEQ = 0x1 + NFT_CMP_LT = 0x2 + NFT_CMP_LTE = 0x3 + NFT_CMP_GT = 0x4 + NFT_CMP_GTE = 0x5 + NFTA_CMP_UNSPEC = 0x0 + NFTA_CMP_SREG = 0x1 + NFTA_CMP_OP = 0x2 + NFTA_CMP_DATA = 0x3 + NFT_RANGE_EQ = 0x0 + NFT_RANGE_NEQ = 0x1 + NFTA_RANGE_UNSPEC = 0x0 + NFTA_RANGE_SREG = 0x1 + NFTA_RANGE_OP = 0x2 + NFTA_RANGE_FROM_DATA = 0x3 + NFTA_RANGE_TO_DATA = 0x4 + NFT_LOOKUP_F_INV = 0x1 + NFTA_LOOKUP_UNSPEC = 0x0 + NFTA_LOOKUP_SET = 0x1 + NFTA_LOOKUP_SREG = 0x2 + NFTA_LOOKUP_DREG = 0x3 + NFTA_LOOKUP_SET_ID = 0x4 + NFTA_LOOKUP_FLAGS = 0x5 + NFT_DYNSET_OP_ADD = 0x0 + NFT_DYNSET_OP_UPDATE = 0x1 + NFT_DYNSET_F_INV = 0x1 + NFTA_DYNSET_UNSPEC = 0x0 + NFTA_DYNSET_SET_NAME = 0x1 + NFTA_DYNSET_SET_ID = 0x2 + NFTA_DYNSET_OP = 0x3 + NFTA_DYNSET_SREG_KEY = 0x4 + NFTA_DYNSET_SREG_DATA = 0x5 + NFTA_DYNSET_TIMEOUT = 0x6 + NFTA_DYNSET_EXPR = 0x7 + NFTA_DYNSET_PAD = 0x8 + NFTA_DYNSET_FLAGS = 0x9 + NFT_PAYLOAD_LL_HEADER = 0x0 + NFT_PAYLOAD_NETWORK_HEADER = 0x1 + NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_CSUM_NONE = 0x0 + NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 + NFTA_PAYLOAD_UNSPEC = 0x0 + NFTA_PAYLOAD_DREG = 0x1 + NFTA_PAYLOAD_BASE = 0x2 + NFTA_PAYLOAD_OFFSET = 0x3 + NFTA_PAYLOAD_LEN = 0x4 + NFTA_PAYLOAD_SREG = 0x5 + NFTA_PAYLOAD_CSUM_TYPE = 0x6 + NFTA_PAYLOAD_CSUM_OFFSET = 0x7 + NFTA_PAYLOAD_CSUM_FLAGS = 0x8 + NFT_EXTHDR_F_PRESENT = 0x1 + NFT_EXTHDR_OP_IPV6 = 0x0 + NFT_EXTHDR_OP_TCPOPT = 0x1 + NFTA_EXTHDR_UNSPEC = 0x0 + NFTA_EXTHDR_DREG = 0x1 + NFTA_EXTHDR_TYPE = 0x2 + NFTA_EXTHDR_OFFSET = 0x3 + NFTA_EXTHDR_LEN = 0x4 + NFTA_EXTHDR_FLAGS = 0x5 + NFTA_EXTHDR_OP = 0x6 + NFTA_EXTHDR_SREG = 0x7 + NFT_META_LEN = 0x0 + NFT_META_PROTOCOL = 0x1 + NFT_META_PRIORITY = 0x2 + NFT_META_MARK = 0x3 + NFT_META_IIF = 0x4 + NFT_META_OIF = 0x5 + NFT_META_IIFNAME = 0x6 + NFT_META_OIFNAME = 0x7 + NFT_META_IIFTYPE = 0x8 + NFT_META_OIFTYPE = 0x9 + NFT_META_SKUID = 0xa + NFT_META_SKGID = 0xb + NFT_META_NFTRACE = 0xc + NFT_META_RTCLASSID = 0xd + NFT_META_SECMARK = 0xe + NFT_META_NFPROTO = 0xf + NFT_META_L4PROTO = 0x10 + NFT_META_BRI_IIFNAME = 0x11 + NFT_META_BRI_OIFNAME = 0x12 + NFT_META_PKTTYPE = 0x13 + NFT_META_CPU = 0x14 + NFT_META_IIFGROUP = 0x15 + NFT_META_OIFGROUP = 0x16 + NFT_META_CGROUP = 0x17 + NFT_META_PRANDOM = 0x18 + NFT_RT_CLASSID = 0x0 + NFT_RT_NEXTHOP4 = 0x1 + NFT_RT_NEXTHOP6 = 0x2 + NFT_RT_TCPMSS = 0x3 + NFT_HASH_JENKINS = 0x0 + NFT_HASH_SYM = 0x1 + NFTA_HASH_UNSPEC = 0x0 + NFTA_HASH_SREG = 0x1 + NFTA_HASH_DREG = 0x2 + NFTA_HASH_LEN = 0x3 + NFTA_HASH_MODULUS = 0x4 + NFTA_HASH_SEED = 0x5 + NFTA_HASH_OFFSET = 0x6 + NFTA_HASH_TYPE = 0x7 + NFTA_META_UNSPEC = 0x0 + NFTA_META_DREG = 0x1 + NFTA_META_KEY = 0x2 + NFTA_META_SREG = 0x3 + NFTA_RT_UNSPEC = 0x0 + NFTA_RT_DREG = 0x1 + NFTA_RT_KEY = 0x2 + NFT_CT_STATE = 0x0 + NFT_CT_DIRECTION = 0x1 + NFT_CT_STATUS = 0x2 + NFT_CT_MARK = 0x3 + NFT_CT_SECMARK = 0x4 + NFT_CT_EXPIRATION = 0x5 + NFT_CT_HELPER = 0x6 + NFT_CT_L3PROTOCOL = 0x7 + NFT_CT_SRC = 0x8 + NFT_CT_DST = 0x9 + NFT_CT_PROTOCOL = 0xa + NFT_CT_PROTO_SRC = 0xb + NFT_CT_PROTO_DST = 0xc + NFT_CT_LABELS = 0xd + NFT_CT_PKTS = 0xe + NFT_CT_BYTES = 0xf + NFT_CT_AVGPKT = 0x10 + NFT_CT_ZONE = 0x11 + NFT_CT_EVENTMASK = 0x12 + NFTA_CT_UNSPEC = 0x0 + NFTA_CT_DREG = 0x1 + NFTA_CT_KEY = 0x2 + NFTA_CT_DIRECTION = 0x3 + NFTA_CT_SREG = 0x4 + NFT_LIMIT_PKTS = 0x0 + NFT_LIMIT_PKT_BYTES = 0x1 + NFT_LIMIT_F_INV = 0x1 + NFTA_LIMIT_UNSPEC = 0x0 + NFTA_LIMIT_RATE = 0x1 + NFTA_LIMIT_UNIT = 0x2 + NFTA_LIMIT_BURST = 0x3 + NFTA_LIMIT_TYPE = 0x4 + NFTA_LIMIT_FLAGS = 0x5 + NFTA_LIMIT_PAD = 0x6 + NFTA_COUNTER_UNSPEC = 0x0 + NFTA_COUNTER_BYTES = 0x1 + NFTA_COUNTER_PACKETS = 0x2 + NFTA_COUNTER_PAD = 0x3 + NFTA_LOG_UNSPEC = 0x0 + NFTA_LOG_GROUP = 0x1 + NFTA_LOG_PREFIX = 0x2 + NFTA_LOG_SNAPLEN = 0x3 + NFTA_LOG_QTHRESHOLD = 0x4 + NFTA_LOG_LEVEL = 0x5 + NFTA_LOG_FLAGS = 0x6 + NFTA_QUEUE_UNSPEC = 0x0 + NFTA_QUEUE_NUM = 0x1 + NFTA_QUEUE_TOTAL = 0x2 + NFTA_QUEUE_FLAGS = 0x3 + NFTA_QUEUE_SREG_QNUM = 0x4 + NFT_QUOTA_F_INV = 0x1 + NFT_QUOTA_F_DEPLETED = 0x2 + NFTA_QUOTA_UNSPEC = 0x0 + NFTA_QUOTA_BYTES = 0x1 + NFTA_QUOTA_FLAGS = 0x2 + NFTA_QUOTA_PAD = 0x3 + NFTA_QUOTA_CONSUMED = 0x4 + NFT_REJECT_ICMP_UNREACH = 0x0 + NFT_REJECT_TCP_RST = 0x1 + NFT_REJECT_ICMPX_UNREACH = 0x2 + NFT_REJECT_ICMPX_NO_ROUTE = 0x0 + NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 + NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 + NFTA_REJECT_UNSPEC = 0x0 + NFTA_REJECT_TYPE = 0x1 + NFTA_REJECT_ICMP_CODE = 0x2 + NFT_NAT_SNAT = 0x0 + NFT_NAT_DNAT = 0x1 + NFTA_NAT_UNSPEC = 0x0 + NFTA_NAT_TYPE = 0x1 + NFTA_NAT_FAMILY = 0x2 + NFTA_NAT_REG_ADDR_MIN = 0x3 + NFTA_NAT_REG_ADDR_MAX = 0x4 + NFTA_NAT_REG_PROTO_MIN = 0x5 + NFTA_NAT_REG_PROTO_MAX = 0x6 + NFTA_NAT_FLAGS = 0x7 + NFTA_MASQ_UNSPEC = 0x0 + NFTA_MASQ_FLAGS = 0x1 + NFTA_MASQ_REG_PROTO_MIN = 0x2 + NFTA_MASQ_REG_PROTO_MAX = 0x3 + NFTA_REDIR_UNSPEC = 0x0 + NFTA_REDIR_REG_PROTO_MIN = 0x1 + NFTA_REDIR_REG_PROTO_MAX = 0x2 + NFTA_REDIR_FLAGS = 0x3 + NFTA_DUP_UNSPEC = 0x0 + NFTA_DUP_SREG_ADDR = 0x1 + NFTA_DUP_SREG_DEV = 0x2 + NFTA_FWD_UNSPEC = 0x0 + NFTA_FWD_SREG_DEV = 0x1 + NFTA_OBJREF_UNSPEC = 0x0 + NFTA_OBJREF_IMM_TYPE = 0x1 + NFTA_OBJREF_IMM_NAME = 0x2 + NFTA_OBJREF_SET_SREG = 0x3 + NFTA_OBJREF_SET_NAME = 0x4 + NFTA_OBJREF_SET_ID = 0x5 + NFTA_GEN_UNSPEC = 0x0 + NFTA_GEN_ID = 0x1 + NFTA_GEN_PROC_PID = 0x2 + NFTA_GEN_PROC_NAME = 0x3 + NFTA_FIB_UNSPEC = 0x0 + NFTA_FIB_DREG = 0x1 + NFTA_FIB_RESULT = 0x2 + NFTA_FIB_FLAGS = 0x3 + NFT_FIB_RESULT_UNSPEC = 0x0 + NFT_FIB_RESULT_OIF = 0x1 + NFT_FIB_RESULT_OIFNAME = 0x2 + NFT_FIB_RESULT_ADDRTYPE = 0x3 + NFTA_FIB_F_SADDR = 0x1 + NFTA_FIB_F_DADDR = 0x2 + NFTA_FIB_F_MARK = 0x4 + NFTA_FIB_F_IIF = 0x8 + NFTA_FIB_F_OIF = 0x10 + NFTA_FIB_F_PRESENT = 0x20 + NFTA_CT_HELPER_UNSPEC = 0x0 + NFTA_CT_HELPER_NAME = 0x1 + NFTA_CT_HELPER_L3PROTO = 0x2 + NFTA_CT_HELPER_L4PROTO = 0x3 + NFTA_OBJ_UNSPEC = 0x0 + NFTA_OBJ_TABLE = 0x1 + NFTA_OBJ_NAME = 0x2 + NFTA_OBJ_TYPE = 0x3 + NFTA_OBJ_DATA = 0x4 + NFTA_OBJ_USE = 0x5 + NFTA_TRACE_UNSPEC = 0x0 + NFTA_TRACE_TABLE = 0x1 + NFTA_TRACE_CHAIN = 0x2 + NFTA_TRACE_RULE_HANDLE = 0x3 + NFTA_TRACE_TYPE = 0x4 + NFTA_TRACE_VERDICT = 0x5 + NFTA_TRACE_ID = 0x6 + NFTA_TRACE_LL_HEADER = 0x7 + NFTA_TRACE_NETWORK_HEADER = 0x8 + NFTA_TRACE_TRANSPORT_HEADER = 0x9 + NFTA_TRACE_IIF = 0xa + NFTA_TRACE_IIFTYPE = 0xb + NFTA_TRACE_OIF = 0xc + NFTA_TRACE_OIFTYPE = 0xd + NFTA_TRACE_MARK = 0xe + NFTA_TRACE_NFPROTO = 0xf + NFTA_TRACE_POLICY = 0x10 + NFTA_TRACE_PAD = 0x11 + NFT_TRACETYPE_UNSPEC = 0x0 + NFT_TRACETYPE_POLICY = 0x1 + NFT_TRACETYPE_RETURN = 0x2 + NFT_TRACETYPE_RULE = 0x3 + NFTA_NG_UNSPEC = 0x0 + NFTA_NG_DREG = 0x1 + NFTA_NG_MODULUS = 0x2 + NFTA_NG_TYPE = 0x3 + NFTA_NG_OFFSET = 0x4 + NFT_NG_INCREMENTAL = 0x0 + NFT_NG_RANDOM = 0x1 +) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} + +type BlkpgIoctlArg struct { + Op int32 + Flags int32 + Datalen int32 + _ [4]byte + Data *byte +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x1269 + BLKPG_ADD_PARTITION = 0x1 + BLKPG_DEL_PARTITION = 0x2 + BLKPG_RESIZE_PARTITION = 0x3 +) + +const ( + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 +) diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 898ac45140e7..0c790546b20e 100644 --- a/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -514,6 +514,13 @@ const ( RTA_FLOW = 0xb RTA_CACHEINFO = 0xc RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 RTN_UNSPEC = 0x0 RTN_UNICAST = 0x1 RTN_LOCAL = 0x2 diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go b/vertical-pod-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go index 4d0402a3019b..141ca81bd74d 100644 --- a/vertical-pod-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vertical-pod-autoscaler/vendor/golang.org/x/sys/windows/types_windows.go @@ -1453,7 +1453,7 @@ type SmallRect struct { Bottom int16 } -// Used with GetConsoleScreenBuffer to retreive information about a console +// Used with GetConsoleScreenBuffer to retrieve information about a console // screen buffer. See // https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str // for details. diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/AUTHORS b/vertical-pod-autoscaler/vendor/golang.org/x/tools/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/CONTRIBUTORS b/vertical-pod-autoscaler/vendor/golang.org/x/tools/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/LICENSE b/vertical-pod-autoscaler/vendor/golang.org/x/tools/LICENSE deleted file mode 100644 index 6a66aea5eafe..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/PATENTS b/vertical-pod-autoscaler/vendor/golang.org/x/tools/PATENTS deleted file mode 100644 index 733099041f84..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/cmd/goimports/doc.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/cmd/goimports/doc.go deleted file mode 100644 index f5bc914e9af0..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/cmd/goimports/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - -Command goimports updates your Go import lines, -adding missing ones and removing unreferenced ones. - - $ go get golang.org/x/tools/cmd/goimports - -It's a drop-in replacement for your editor's gofmt-on-save hook. -It has the same command-line interface as gofmt and formats -your code in the same way. - -For emacs, make sure you have the latest go-mode.el: - https://github.com/dominikh/go-mode.el -Then in your .emacs file: - (setq gofmt-command "goimports") - (add-to-list 'load-path "/home/you/somewhere/emacs/") - (require 'go-mode-load) - (add-hook 'before-save-hook 'gofmt-before-save) - -For vim, set "gofmt_command" to "goimports": - https://golang.org/change/39c724dd7f252 - https://golang.org/wiki/IDEsAndTextEditorPlugins - etc - -For GoSublime, follow the steps described here: - http://michaelwhatcott.com/gosublime-goimports/ - -For other editors, you probably know what to do. - -To exclude directories in your $GOPATH from being scanned for Go -files, goimports respects a configuration file at -$GOPATH/src/.goimportsignore which may contain blank lines, comment -lines (beginning with '#'), or lines naming a directory relative to -the configuration file to ignore when scanning. No globbing or regex -patterns are allowed. Use the "-v" verbose flag to verify it's -working and see what goimports is doing. - -File bugs or feature requests at: - - https://golang.org/issues/new?title=x/tools/cmd/goimports:+ - -Happy hacking! - -*/ -package main diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/cmd/goimports/goimports.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/cmd/goimports/goimports.go deleted file mode 100644 index b722c6a234c8..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/cmd/goimports/goimports.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "go/scanner" - "io" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "runtime" - "runtime/pprof" - "strings" - - "golang.org/x/tools/imports" -) - -var ( - // main operation modes - list = flag.Bool("l", false, "list files whose formatting differs from goimport's") - write = flag.Bool("w", false, "write result to (source) file instead of stdout") - doDiff = flag.Bool("d", false, "display diffs instead of rewriting files") - srcdir = flag.String("srcdir", "", "choose imports as if source code is from `dir`. When operating on a single file, dir may instead be the complete file name.") - verbose bool // verbose logging - - cpuProfile = flag.String("cpuprofile", "", "CPU profile output") - memProfile = flag.String("memprofile", "", "memory profile output") - memProfileRate = flag.Int("memrate", 0, "if > 0, sets runtime.MemProfileRate") - - options = &imports.Options{ - TabWidth: 8, - TabIndent: true, - Comments: true, - Fragment: true, - } - exitCode = 0 -) - -func init() { - flag.BoolVar(&options.AllErrors, "e", false, "report all errors (not just the first 10 on different lines)") - flag.StringVar(&imports.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages") -} - -func report(err error) { - scanner.PrintError(os.Stderr, err) - exitCode = 2 -} - -func usage() { - fmt.Fprintf(os.Stderr, "usage: goimports [flags] [path ...]\n") - flag.PrintDefaults() - os.Exit(2) -} - -func isGoFile(f os.FileInfo) bool { - // ignore non-Go files - name := f.Name() - return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") -} - -// argumentType is which mode goimports was invoked as. -type argumentType int - -const ( - // fromStdin means the user is piping their source into goimports. - fromStdin argumentType = iota - - // singleArg is the common case from editors, when goimports is run on - // a single file. - singleArg - - // multipleArg is when the user ran "goimports file1.go file2.go" - // or ran goimports on a directory tree. - multipleArg -) - -func processFile(filename string, in io.Reader, out io.Writer, argType argumentType) error { - opt := options - if argType == fromStdin { - nopt := *options - nopt.Fragment = true - opt = &nopt - } - - if in == nil { - f, err := os.Open(filename) - if err != nil { - return err - } - defer f.Close() - in = f - } - - src, err := ioutil.ReadAll(in) - if err != nil { - return err - } - - target := filename - if *srcdir != "" { - // Determine whether the provided -srcdirc is a directory or file - // and then use it to override the target. - // - // See https://github.com/dominikh/go-mode.el/issues/146 - if isFile(*srcdir) { - if argType == multipleArg { - return errors.New("-srcdir value can't be a file when passing multiple arguments or when walking directories") - } - target = *srcdir - } else if argType == singleArg && strings.HasSuffix(*srcdir, ".go") && !isDir(*srcdir) { - // For a file which doesn't exist on disk yet, but might shortly. - // e.g. user in editor opens $DIR/newfile.go and newfile.go doesn't yet exist on disk. - // The goimports on-save hook writes the buffer to a temp file - // first and runs goimports before the actual save to newfile.go. - // The editor's buffer is named "newfile.go" so that is passed to goimports as: - // goimports -srcdir=/gopath/src/pkg/newfile.go /tmp/gofmtXXXXXXXX.go - // and then the editor reloads the result from the tmp file and writes - // it to newfile.go. - target = *srcdir - } else { - // Pretend that file is from *srcdir in order to decide - // visible imports correctly. - target = filepath.Join(*srcdir, filepath.Base(filename)) - } - } - - res, err := imports.Process(target, src, opt) - if err != nil { - return err - } - - if !bytes.Equal(src, res) { - // formatting has changed - if *list { - fmt.Fprintln(out, filename) - } - if *write { - err = ioutil.WriteFile(filename, res, 0) - if err != nil { - return err - } - } - if *doDiff { - data, err := diff(src, res, filename) - if err != nil { - return fmt.Errorf("computing diff: %s", err) - } - fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename)) - out.Write(data) - } - } - - if !*list && !*write && !*doDiff { - _, err = out.Write(res) - } - - return err -} - -func visitFile(path string, f os.FileInfo, err error) error { - if err == nil && isGoFile(f) { - err = processFile(path, nil, os.Stdout, multipleArg) - } - if err != nil { - report(err) - } - return nil -} - -func walkDir(path string) { - filepath.Walk(path, visitFile) -} - -func main() { - runtime.GOMAXPROCS(runtime.NumCPU()) - - // call gofmtMain in a separate function - // so that it can use defer and have them - // run before the exit. - gofmtMain() - os.Exit(exitCode) -} - -// parseFlags parses command line flags and returns the paths to process. -// It's a var so that custom implementations can replace it in other files. -var parseFlags = func() []string { - flag.BoolVar(&verbose, "v", false, "verbose logging") - - flag.Parse() - return flag.Args() -} - -func bufferedFileWriter(dest string) (w io.Writer, close func()) { - f, err := os.Create(dest) - if err != nil { - log.Fatal(err) - } - bw := bufio.NewWriter(f) - return bw, func() { - if err := bw.Flush(); err != nil { - log.Fatalf("error flushing %v: %v", dest, err) - } - if err := f.Close(); err != nil { - log.Fatal(err) - } - } -} - -func gofmtMain() { - flag.Usage = usage - paths := parseFlags() - - if *cpuProfile != "" { - bw, flush := bufferedFileWriter(*cpuProfile) - pprof.StartCPUProfile(bw) - defer flush() - defer pprof.StopCPUProfile() - } - // doTrace is a conditionally compiled wrapper around runtime/trace. It is - // used to allow goimports to compile under gccgo, which does not support - // runtime/trace. See https://golang.org/issue/15544. - defer doTrace()() - if *memProfileRate > 0 { - runtime.MemProfileRate = *memProfileRate - bw, flush := bufferedFileWriter(*memProfile) - defer func() { - runtime.GC() // materialize all statistics - if err := pprof.WriteHeapProfile(bw); err != nil { - log.Fatal(err) - } - flush() - }() - } - - if verbose { - log.SetFlags(log.LstdFlags | log.Lmicroseconds) - imports.Debug = true - } - if options.TabWidth < 0 { - fmt.Fprintf(os.Stderr, "negative tabwidth %d\n", options.TabWidth) - exitCode = 2 - return - } - - if len(paths) == 0 { - if err := processFile("", os.Stdin, os.Stdout, fromStdin); err != nil { - report(err) - } - return - } - - argType := singleArg - if len(paths) > 1 { - argType = multipleArg - } - - for _, path := range paths { - switch dir, err := os.Stat(path); { - case err != nil: - report(err) - case dir.IsDir(): - walkDir(path) - default: - if err := processFile(path, nil, os.Stdout, argType); err != nil { - report(err) - } - } - } -} - -func writeTempFile(dir, prefix string, data []byte) (string, error) { - file, err := ioutil.TempFile(dir, prefix) - if err != nil { - return "", err - } - _, err = file.Write(data) - if err1 := file.Close(); err == nil { - err = err1 - } - if err != nil { - os.Remove(file.Name()) - return "", err - } - return file.Name(), nil -} - -func diff(b1, b2 []byte, filename string) (data []byte, err error) { - f1, err := writeTempFile("", "gofmt", b1) - if err != nil { - return - } - defer os.Remove(f1) - - f2, err := writeTempFile("", "gofmt", b2) - if err != nil { - return - } - defer os.Remove(f2) - - cmd := "diff" - if runtime.GOOS == "plan9" { - cmd = "/bin/ape/diff" - } - - data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput() - if len(data) > 0 { - // diff exits with a non-zero status when the files don't match. - // Ignore that failure as long as we get output. - return replaceTempFilename(data, filename) - } - return -} - -// replaceTempFilename replaces temporary filenames in diff with actual one. -// -// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500 -// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500 -// ... -// -> -// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500 -// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500 -// ... -func replaceTempFilename(diff []byte, filename string) ([]byte, error) { - bs := bytes.SplitN(diff, []byte{'\n'}, 3) - if len(bs) < 3 { - return nil, fmt.Errorf("got unexpected diff for %s", filename) - } - // Preserve timestamps. - var t0, t1 []byte - if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 { - t0 = bs[0][i:] - } - if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 { - t1 = bs[1][i:] - } - // Always print filepath with slash separator. - f := filepath.ToSlash(filename) - bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0)) - bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1)) - return bytes.Join(bs, []byte{'\n'}), nil -} - -// isFile reports whether name is a file. -func isFile(name string) bool { - fi, err := os.Stat(name) - return err == nil && fi.Mode().IsRegular() -} - -// isDir reports whether name is a directory. -func isDir(name string) bool { - fi, err := os.Stat(name) - return err == nil && fi.IsDir() -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go deleted file mode 100644 index 21d867eaab55..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -package main - -import ( - "flag" - "runtime/trace" -) - -var traceProfile = flag.String("trace", "", "trace profile output") - -func doTrace() func() { - if *traceProfile != "" { - bw, flush := bufferedFileWriter(*traceProfile) - trace.Start(bw) - return func() { - flush() - trace.Stop() - } - } - return func() {} -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go deleted file mode 100644 index f5531ceb3173..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gc - -package main - -func doTrace() func() { - return func() {} -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go deleted file mode 100644 index 6b7052b892ca..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ /dev/null @@ -1,627 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package astutil - -// This file defines utilities for working with source positions. - -import ( - "fmt" - "go/ast" - "go/token" - "sort" -) - -// PathEnclosingInterval returns the node that encloses the source -// interval [start, end), and all its ancestors up to the AST root. -// -// The definition of "enclosing" used by this function considers -// additional whitespace abutting a node to be enclosed by it. -// In this example: -// -// z := x + y // add them -// <-A-> -// <----B-----> -// -// the ast.BinaryExpr(+) node is considered to enclose interval B -// even though its [Pos()..End()) is actually only interval A. -// This behaviour makes user interfaces more tolerant of imperfect -// input. -// -// This function treats tokens as nodes, though they are not included -// in the result. e.g. PathEnclosingInterval("+") returns the -// enclosing ast.BinaryExpr("x + y"). -// -// If start==end, the 1-char interval following start is used instead. -// -// The 'exact' result is true if the interval contains only path[0] -// and perhaps some adjacent whitespace. It is false if the interval -// overlaps multiple children of path[0], or if it contains only -// interior whitespace of path[0]. -// In this example: -// -// z := x + y // add them -// <--C--> <---E--> -// ^ -// D -// -// intervals C, D and E are inexact. C is contained by the -// z-assignment statement, because it spans three of its children (:=, -// x, +). So too is the 1-char interval D, because it contains only -// interior whitespace of the assignment. E is considered interior -// whitespace of the BlockStmt containing the assignment. -// -// Precondition: [start, end) both lie within the same file as root. -// TODO(adonovan): return (nil, false) in this case and remove precond. -// Requires FileSet; see loader.tokenFileContainsPos. -// -// Postcondition: path is never nil; it always contains at least 'root'. -// -func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { - // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging - - // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). - var visit func(node ast.Node) bool - visit = func(node ast.Node) bool { - path = append(path, node) - - nodePos := node.Pos() - nodeEnd := node.End() - - // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging - - // Intersect [start, end) with interval of node. - if start < nodePos { - start = nodePos - } - if end > nodeEnd { - end = nodeEnd - } - - // Find sole child that contains [start, end). - children := childrenOf(node) - l := len(children) - for i, child := range children { - // [childPos, childEnd) is unaugmented interval of child. - childPos := child.Pos() - childEnd := child.End() - - // [augPos, augEnd) is whitespace-augmented interval of child. - augPos := childPos - augEnd := childEnd - if i > 0 { - augPos = children[i-1].End() // start of preceding whitespace - } - if i < l-1 { - nextChildPos := children[i+1].Pos() - // Does [start, end) lie between child and next child? - if start >= augEnd && end <= nextChildPos { - return false // inexact match - } - augEnd = nextChildPos // end of following whitespace - } - - // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", - // i, augPos, augEnd, start, end) // debugging - - // Does augmented child strictly contain [start, end)? - if augPos <= start && end <= augEnd { - _, isToken := child.(tokenNode) - return isToken || visit(child) - } - - // Does [start, end) overlap multiple children? - // i.e. left-augmented child contains start - // but LR-augmented child does not contain end. - if start < childEnd && end > augEnd { - break - } - } - - // No single child contained [start, end), - // so node is the result. Is it exact? - - // (It's tempting to put this condition before the - // child loop, but it gives the wrong result in the - // case where a node (e.g. ExprStmt) and its sole - // child have equal intervals.) - if start == nodePos && end == nodeEnd { - return true // exact match - } - - return false // inexact: overlaps multiple children - } - - if start > end { - start, end = end, start - } - - if start < root.End() && end > root.Pos() { - if start == end { - end = start + 1 // empty interval => interval of size 1 - } - exact = visit(root) - - // Reverse the path: - for i, l := 0, len(path); i < l/2; i++ { - path[i], path[l-1-i] = path[l-1-i], path[i] - } - } else { - // Selection lies within whitespace preceding the - // first (or following the last) declaration in the file. - // The result nonetheless always includes the ast.File. - path = append(path, root) - } - - return -} - -// tokenNode is a dummy implementation of ast.Node for a single token. -// They are used transiently by PathEnclosingInterval but never escape -// this package. -// -type tokenNode struct { - pos token.Pos - end token.Pos -} - -func (n tokenNode) Pos() token.Pos { - return n.pos -} - -func (n tokenNode) End() token.Pos { - return n.end -} - -func tok(pos token.Pos, len int) ast.Node { - return tokenNode{pos, pos + token.Pos(len)} -} - -// childrenOf returns the direct non-nil children of ast.Node n. -// It may include fake ast.Node implementations for bare tokens. -// it is not safe to call (e.g.) ast.Walk on such nodes. -// -func childrenOf(n ast.Node) []ast.Node { - var children []ast.Node - - // First add nodes for all true subtrees. - ast.Inspect(n, func(node ast.Node) bool { - if node == n { // push n - return true // recur - } - if node != nil { // push child - children = append(children, node) - } - return false // no recursion - }) - - // Then add fake Nodes for bare tokens. - switch n := n.(type) { - case *ast.ArrayType: - children = append(children, - tok(n.Lbrack, len("[")), - tok(n.Elt.End(), len("]"))) - - case *ast.AssignStmt: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.BasicLit: - children = append(children, - tok(n.ValuePos, len(n.Value))) - - case *ast.BinaryExpr: - children = append(children, tok(n.OpPos, len(n.Op.String()))) - - case *ast.BlockStmt: - children = append(children, - tok(n.Lbrace, len("{")), - tok(n.Rbrace, len("}"))) - - case *ast.BranchStmt: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.CallExpr: - children = append(children, - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - if n.Ellipsis != 0 { - children = append(children, tok(n.Ellipsis, len("..."))) - } - - case *ast.CaseClause: - if n.List == nil { - children = append(children, - tok(n.Case, len("default"))) - } else { - children = append(children, - tok(n.Case, len("case"))) - } - children = append(children, tok(n.Colon, len(":"))) - - case *ast.ChanType: - switch n.Dir { - case ast.RECV: - children = append(children, tok(n.Begin, len("<-chan"))) - case ast.SEND: - children = append(children, tok(n.Begin, len("chan<-"))) - case ast.RECV | ast.SEND: - children = append(children, tok(n.Begin, len("chan"))) - } - - case *ast.CommClause: - if n.Comm == nil { - children = append(children, - tok(n.Case, len("default"))) - } else { - children = append(children, - tok(n.Case, len("case"))) - } - children = append(children, tok(n.Colon, len(":"))) - - case *ast.Comment: - // nop - - case *ast.CommentGroup: - // nop - - case *ast.CompositeLit: - children = append(children, - tok(n.Lbrace, len("{")), - tok(n.Rbrace, len("{"))) - - case *ast.DeclStmt: - // nop - - case *ast.DeferStmt: - children = append(children, - tok(n.Defer, len("defer"))) - - case *ast.Ellipsis: - children = append(children, - tok(n.Ellipsis, len("..."))) - - case *ast.EmptyStmt: - // nop - - case *ast.ExprStmt: - // nop - - case *ast.Field: - // TODO(adonovan): Field.{Doc,Comment,Tag}? - - case *ast.FieldList: - children = append(children, - tok(n.Opening, len("(")), - tok(n.Closing, len(")"))) - - case *ast.File: - // TODO test: Doc - children = append(children, - tok(n.Package, len("package"))) - - case *ast.ForStmt: - children = append(children, - tok(n.For, len("for"))) - - case *ast.FuncDecl: - // TODO(adonovan): FuncDecl.Comment? - - // Uniquely, FuncDecl breaks the invariant that - // preorder traversal yields tokens in lexical order: - // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. - // - // As a workaround, we inline the case for FuncType - // here and order things correctly. - // - children = nil // discard ast.Walk(FuncDecl) info subtrees - children = append(children, tok(n.Type.Func, len("func"))) - if n.Recv != nil { - children = append(children, n.Recv) - } - children = append(children, n.Name) - if n.Type.Params != nil { - children = append(children, n.Type.Params) - } - if n.Type.Results != nil { - children = append(children, n.Type.Results) - } - if n.Body != nil { - children = append(children, n.Body) - } - - case *ast.FuncLit: - // nop - - case *ast.FuncType: - if n.Func != 0 { - children = append(children, - tok(n.Func, len("func"))) - } - - case *ast.GenDecl: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - if n.Lparen != 0 { - children = append(children, - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - } - - case *ast.GoStmt: - children = append(children, - tok(n.Go, len("go"))) - - case *ast.Ident: - children = append(children, - tok(n.NamePos, len(n.Name))) - - case *ast.IfStmt: - children = append(children, - tok(n.If, len("if"))) - - case *ast.ImportSpec: - // TODO(adonovan): ImportSpec.{Doc,EndPos}? - - case *ast.IncDecStmt: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.IndexExpr: - children = append(children, - tok(n.Lbrack, len("{")), - tok(n.Rbrack, len("}"))) - - case *ast.InterfaceType: - children = append(children, - tok(n.Interface, len("interface"))) - - case *ast.KeyValueExpr: - children = append(children, - tok(n.Colon, len(":"))) - - case *ast.LabeledStmt: - children = append(children, - tok(n.Colon, len(":"))) - - case *ast.MapType: - children = append(children, - tok(n.Map, len("map"))) - - case *ast.ParenExpr: - children = append(children, - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - - case *ast.RangeStmt: - children = append(children, - tok(n.For, len("for")), - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.ReturnStmt: - children = append(children, - tok(n.Return, len("return"))) - - case *ast.SelectStmt: - children = append(children, - tok(n.Select, len("select"))) - - case *ast.SelectorExpr: - // nop - - case *ast.SendStmt: - children = append(children, - tok(n.Arrow, len("<-"))) - - case *ast.SliceExpr: - children = append(children, - tok(n.Lbrack, len("[")), - tok(n.Rbrack, len("]"))) - - case *ast.StarExpr: - children = append(children, tok(n.Star, len("*"))) - - case *ast.StructType: - children = append(children, tok(n.Struct, len("struct"))) - - case *ast.SwitchStmt: - children = append(children, tok(n.Switch, len("switch"))) - - case *ast.TypeAssertExpr: - children = append(children, - tok(n.Lparen-1, len(".")), - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - - case *ast.TypeSpec: - // TODO(adonovan): TypeSpec.{Doc,Comment}? - - case *ast.TypeSwitchStmt: - children = append(children, tok(n.Switch, len("switch"))) - - case *ast.UnaryExpr: - children = append(children, tok(n.OpPos, len(n.Op.String()))) - - case *ast.ValueSpec: - // TODO(adonovan): ValueSpec.{Doc,Comment}? - - case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: - // nop - } - - // TODO(adonovan): opt: merge the logic of ast.Inspect() into - // the switch above so we can make interleaved callbacks for - // both Nodes and Tokens in the right order and avoid the need - // to sort. - sort.Sort(byPos(children)) - - return children -} - -type byPos []ast.Node - -func (sl byPos) Len() int { - return len(sl) -} -func (sl byPos) Less(i, j int) bool { - return sl[i].Pos() < sl[j].Pos() -} -func (sl byPos) Swap(i, j int) { - sl[i], sl[j] = sl[j], sl[i] -} - -// NodeDescription returns a description of the concrete type of n suitable -// for a user interface. -// -// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, -// StarExpr) we could be much more specific given the path to the AST -// root. Perhaps we should do that. -// -func NodeDescription(n ast.Node) string { - switch n := n.(type) { - case *ast.ArrayType: - return "array type" - case *ast.AssignStmt: - return "assignment" - case *ast.BadDecl: - return "bad declaration" - case *ast.BadExpr: - return "bad expression" - case *ast.BadStmt: - return "bad statement" - case *ast.BasicLit: - return "basic literal" - case *ast.BinaryExpr: - return fmt.Sprintf("binary %s operation", n.Op) - case *ast.BlockStmt: - return "block" - case *ast.BranchStmt: - switch n.Tok { - case token.BREAK: - return "break statement" - case token.CONTINUE: - return "continue statement" - case token.GOTO: - return "goto statement" - case token.FALLTHROUGH: - return "fall-through statement" - } - case *ast.CallExpr: - if len(n.Args) == 1 && !n.Ellipsis.IsValid() { - return "function call (or conversion)" - } - return "function call" - case *ast.CaseClause: - return "case clause" - case *ast.ChanType: - return "channel type" - case *ast.CommClause: - return "communication clause" - case *ast.Comment: - return "comment" - case *ast.CommentGroup: - return "comment group" - case *ast.CompositeLit: - return "composite literal" - case *ast.DeclStmt: - return NodeDescription(n.Decl) + " statement" - case *ast.DeferStmt: - return "defer statement" - case *ast.Ellipsis: - return "ellipsis" - case *ast.EmptyStmt: - return "empty statement" - case *ast.ExprStmt: - return "expression statement" - case *ast.Field: - // Can be any of these: - // struct {x, y int} -- struct field(s) - // struct {T} -- anon struct field - // interface {I} -- interface embedding - // interface {f()} -- interface method - // func (A) func(B) C -- receiver, param(s), result(s) - return "field/method/parameter" - case *ast.FieldList: - return "field/method/parameter list" - case *ast.File: - return "source file" - case *ast.ForStmt: - return "for loop" - case *ast.FuncDecl: - return "function declaration" - case *ast.FuncLit: - return "function literal" - case *ast.FuncType: - return "function type" - case *ast.GenDecl: - switch n.Tok { - case token.IMPORT: - return "import declaration" - case token.CONST: - return "constant declaration" - case token.TYPE: - return "type declaration" - case token.VAR: - return "variable declaration" - } - case *ast.GoStmt: - return "go statement" - case *ast.Ident: - return "identifier" - case *ast.IfStmt: - return "if statement" - case *ast.ImportSpec: - return "import specification" - case *ast.IncDecStmt: - if n.Tok == token.INC { - return "increment statement" - } - return "decrement statement" - case *ast.IndexExpr: - return "index expression" - case *ast.InterfaceType: - return "interface type" - case *ast.KeyValueExpr: - return "key/value association" - case *ast.LabeledStmt: - return "statement label" - case *ast.MapType: - return "map type" - case *ast.Package: - return "package" - case *ast.ParenExpr: - return "parenthesized " + NodeDescription(n.X) - case *ast.RangeStmt: - return "range loop" - case *ast.ReturnStmt: - return "return statement" - case *ast.SelectStmt: - return "select statement" - case *ast.SelectorExpr: - return "selector" - case *ast.SendStmt: - return "channel send" - case *ast.SliceExpr: - return "slice expression" - case *ast.StarExpr: - return "*-operation" // load/store expr or pointer type - case *ast.StructType: - return "struct type" - case *ast.SwitchStmt: - return "switch statement" - case *ast.TypeAssertExpr: - return "type assertion" - case *ast.TypeSpec: - return "type specification" - case *ast.TypeSwitchStmt: - return "type switch" - case *ast.UnaryExpr: - return fmt.Sprintf("unary %s operation", n.Op) - case *ast.ValueSpec: - return "value specification" - - } - panic(fmt.Sprintf("unexpected node type: %T", n)) -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/ast/astutil/imports.go deleted file mode 100644 index 0f5db8b57e55..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package astutil contains common utilities for working with the Go AST. -package astutil - -import ( - "fmt" - "go/ast" - "go/token" - "strconv" - "strings" -) - -// AddImport adds the import path to the file f, if absent. -func AddImport(fset *token.FileSet, f *ast.File, ipath string) (added bool) { - return AddNamedImport(fset, f, "", ipath) -} - -// AddNamedImport adds the import path to the file f, if absent. -// If name is not empty, it is used to rename the import. -// -// For example, calling -// AddNamedImport(fset, f, "pathpkg", "path") -// adds -// import pathpkg "path" -func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) { - if imports(f, ipath) { - return false - } - - newImport := &ast.ImportSpec{ - Path: &ast.BasicLit{ - Kind: token.STRING, - Value: strconv.Quote(ipath), - }, - } - if name != "" { - newImport.Name = &ast.Ident{Name: name} - } - - // Find an import decl to add to. - // The goal is to find an existing import - // whose import path has the longest shared - // prefix with ipath. - var ( - bestMatch = -1 // length of longest shared prefix - lastImport = -1 // index in f.Decls of the file's final import decl - impDecl *ast.GenDecl // import decl containing the best match - impIndex = -1 // spec index in impDecl containing the best match - ) - for i, decl := range f.Decls { - gen, ok := decl.(*ast.GenDecl) - if ok && gen.Tok == token.IMPORT { - lastImport = i - // Do not add to import "C", to avoid disrupting the - // association with its doc comment, breaking cgo. - if declImports(gen, "C") { - continue - } - - // Match an empty import decl if that's all that is available. - if len(gen.Specs) == 0 && bestMatch == -1 { - impDecl = gen - } - - // Compute longest shared prefix with imports in this group. - for j, spec := range gen.Specs { - impspec := spec.(*ast.ImportSpec) - n := matchLen(importPath(impspec), ipath) - if n > bestMatch { - bestMatch = n - impDecl = gen - impIndex = j - } - } - } - } - - // If no import decl found, add one after the last import. - if impDecl == nil { - impDecl = &ast.GenDecl{ - Tok: token.IMPORT, - } - if lastImport >= 0 { - impDecl.TokPos = f.Decls[lastImport].End() - } else { - // There are no existing imports. - // Our new import goes after the package declaration and after - // the comment, if any, that starts on the same line as the - // package declaration. - impDecl.TokPos = f.Package - - file := fset.File(f.Package) - pkgLine := file.Line(f.Package) - for _, c := range f.Comments { - if file.Line(c.Pos()) > pkgLine { - break - } - impDecl.TokPos = c.End() - } - } - f.Decls = append(f.Decls, nil) - copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) - f.Decls[lastImport+1] = impDecl - } - - // Insert new import at insertAt. - insertAt := 0 - if impIndex >= 0 { - // insert after the found import - insertAt = impIndex + 1 - } - impDecl.Specs = append(impDecl.Specs, nil) - copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) - impDecl.Specs[insertAt] = newImport - pos := impDecl.Pos() - if insertAt > 0 { - // If there is a comment after an existing import, preserve the comment - // position by adding the new import after the comment. - if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { - pos = spec.Comment.End() - } else { - // Assign same position as the previous import, - // so that the sorter sees it as being in the same block. - pos = impDecl.Specs[insertAt-1].Pos() - } - } - if newImport.Name != nil { - newImport.Name.NamePos = pos - } - newImport.Path.ValuePos = pos - newImport.EndPos = pos - - // Clean up parens. impDecl contains at least one spec. - if len(impDecl.Specs) == 1 { - // Remove unneeded parens. - impDecl.Lparen = token.NoPos - } else if !impDecl.Lparen.IsValid() { - // impDecl needs parens added. - impDecl.Lparen = impDecl.Specs[0].Pos() - } - - f.Imports = append(f.Imports, newImport) - - if len(f.Decls) <= 1 { - return true - } - - // Merge all the import declarations into the first one. - var first *ast.GenDecl - for i := 0; i < len(f.Decls); i++ { - decl := f.Decls[i] - gen, ok := decl.(*ast.GenDecl) - if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { - continue - } - if first == nil { - first = gen - continue // Don't touch the first one. - } - // We now know there is more than one package in this import - // declaration. Ensure that it ends up parenthesized. - first.Lparen = first.Pos() - // Move the imports of the other import declaration to the first one. - for _, spec := range gen.Specs { - spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() - first.Specs = append(first.Specs, spec) - } - f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) - i-- - } - - return true -} - -// DeleteImport deletes the import path from the file f, if present. -func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { - return DeleteNamedImport(fset, f, "", path) -} - -// DeleteNamedImport deletes the import with the given name and path from the file f, if present. -func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { - var delspecs []*ast.ImportSpec - var delcomments []*ast.CommentGroup - - // Find the import nodes that import path, if any. - for i := 0; i < len(f.Decls); i++ { - decl := f.Decls[i] - gen, ok := decl.(*ast.GenDecl) - if !ok || gen.Tok != token.IMPORT { - continue - } - for j := 0; j < len(gen.Specs); j++ { - spec := gen.Specs[j] - impspec := spec.(*ast.ImportSpec) - if impspec.Name == nil && name != "" { - continue - } - if impspec.Name != nil && impspec.Name.Name != name { - continue - } - if importPath(impspec) != path { - continue - } - - // We found an import spec that imports path. - // Delete it. - delspecs = append(delspecs, impspec) - deleted = true - copy(gen.Specs[j:], gen.Specs[j+1:]) - gen.Specs = gen.Specs[:len(gen.Specs)-1] - - // If this was the last import spec in this decl, - // delete the decl, too. - if len(gen.Specs) == 0 { - copy(f.Decls[i:], f.Decls[i+1:]) - f.Decls = f.Decls[:len(f.Decls)-1] - i-- - break - } else if len(gen.Specs) == 1 { - if impspec.Doc != nil { - delcomments = append(delcomments, impspec.Doc) - } - if impspec.Comment != nil { - delcomments = append(delcomments, impspec.Comment) - } - for _, cg := range f.Comments { - // Found comment on the same line as the import spec. - if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { - delcomments = append(delcomments, cg) - break - } - } - - spec := gen.Specs[0].(*ast.ImportSpec) - - // Move the documentation right after the import decl. - if spec.Doc != nil { - for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { - fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) - } - } - for _, cg := range f.Comments { - if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { - for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { - fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) - } - break - } - } - } - if j > 0 { - lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) - lastLine := fset.Position(lastImpspec.Path.ValuePos).Line - line := fset.Position(impspec.Path.ValuePos).Line - - // We deleted an entry but now there may be - // a blank line-sized hole where the import was. - if line-lastLine > 1 { - // There was a blank line immediately preceding the deleted import, - // so there's no need to close the hole. - // Do nothing. - } else { - // There was no blank line. Close the hole. - fset.File(gen.Rparen).MergeLine(line) - } - } - j-- - } - } - - // Delete imports from f.Imports. - for i := 0; i < len(f.Imports); i++ { - imp := f.Imports[i] - for j, del := range delspecs { - if imp == del { - copy(f.Imports[i:], f.Imports[i+1:]) - f.Imports = f.Imports[:len(f.Imports)-1] - copy(delspecs[j:], delspecs[j+1:]) - delspecs = delspecs[:len(delspecs)-1] - i-- - break - } - } - } - - // Delete comments from f.Comments. - for i := 0; i < len(f.Comments); i++ { - cg := f.Comments[i] - for j, del := range delcomments { - if cg == del { - copy(f.Comments[i:], f.Comments[i+1:]) - f.Comments = f.Comments[:len(f.Comments)-1] - copy(delcomments[j:], delcomments[j+1:]) - delcomments = delcomments[:len(delcomments)-1] - i-- - break - } - } - } - - if len(delspecs) > 0 { - panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) - } - - return -} - -// RewriteImport rewrites any import of path oldPath to path newPath. -func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { - for _, imp := range f.Imports { - if importPath(imp) == oldPath { - rewrote = true - // record old End, because the default is to compute - // it using the length of imp.Path.Value. - imp.EndPos = imp.End() - imp.Path.Value = strconv.Quote(newPath) - } - } - return -} - -// UsesImport reports whether a given import is used. -func UsesImport(f *ast.File, path string) (used bool) { - spec := importSpec(f, path) - if spec == nil { - return - } - - name := spec.Name.String() - switch name { - case "": - // If the package name is not explicitly specified, - // make an educated guess. This is not guaranteed to be correct. - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 { - name = path - } else { - name = path[lastSlash+1:] - } - case "_", ".": - // Not sure if this import is used - err on the side of caution. - return true - } - - ast.Walk(visitFn(func(n ast.Node) { - sel, ok := n.(*ast.SelectorExpr) - if ok && isTopName(sel.X, name) { - used = true - } - }), f) - - return -} - -type visitFn func(node ast.Node) - -func (fn visitFn) Visit(node ast.Node) ast.Visitor { - fn(node) - return fn -} - -// imports returns true if f imports path. -func imports(f *ast.File, path string) bool { - return importSpec(f, path) != nil -} - -// importSpec returns the import spec if f imports path, -// or nil otherwise. -func importSpec(f *ast.File, path string) *ast.ImportSpec { - for _, s := range f.Imports { - if importPath(s) == path { - return s - } - } - return nil -} - -// importPath returns the unquoted import path of s, -// or "" if the path is not properly quoted. -func importPath(s *ast.ImportSpec) string { - t, err := strconv.Unquote(s.Path.Value) - if err == nil { - return t - } - return "" -} - -// declImports reports whether gen contains an import of path. -func declImports(gen *ast.GenDecl, path string) bool { - if gen.Tok != token.IMPORT { - return false - } - for _, spec := range gen.Specs { - impspec := spec.(*ast.ImportSpec) - if importPath(impspec) == path { - return true - } - } - return false -} - -// matchLen returns the length of the longest path segment prefix shared by x and y. -func matchLen(x, y string) int { - n := 0 - for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { - if x[i] == '/' { - n++ - } - } - return n -} - -// isTopName returns true if n is a top-level unresolved identifier with the given name. -func isTopName(n ast.Expr, name string) bool { - id, ok := n.(*ast.Ident) - return ok && id.Name == name && id.Obj == nil -} - -// Imports returns the file imports grouped by paragraph. -func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { - var groups [][]*ast.ImportSpec - - for _, decl := range f.Decls { - genDecl, ok := decl.(*ast.GenDecl) - if !ok || genDecl.Tok != token.IMPORT { - break - } - - group := []*ast.ImportSpec{} - - var lastLine int - for _, spec := range genDecl.Specs { - importSpec := spec.(*ast.ImportSpec) - pos := importSpec.Path.ValuePos - line := fset.Position(pos).Line - if lastLine > 0 && pos > 0 && line-lastLine > 1 { - groups = append(groups, group) - group = []*ast.ImportSpec{} - } - group = append(group, importSpec) - lastLine = line - } - groups = append(groups, group) - } - - return groups -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/ast/astutil/util.go deleted file mode 100644 index 7630629824af..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/ast/astutil/util.go +++ /dev/null @@ -1,14 +0,0 @@ -package astutil - -import "go/ast" - -// Unparen returns e with any enclosing parentheses stripped. -func Unparen(e ast.Expr) ast.Expr { - for { - p, ok := e.(*ast.ParenExpr) - if !ok { - return e - } - e = p.X - } -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/buildutil/allpackages.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/buildutil/allpackages.go deleted file mode 100644 index 3d29cf3b8969..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/buildutil/allpackages.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package buildutil provides utilities related to the go/build -// package in the standard library. -// -// All I/O is done via the build.Context file system interface, which must -// be concurrency-safe. -package buildutil - -import ( - "go/build" - "os" - "path/filepath" - "sort" - "strings" - "sync" -) - -// AllPackages returns the package path of each Go package in any source -// directory of the specified build context (e.g. $GOROOT or an element -// of $GOPATH). Errors are ignored. The results are sorted. -// All package paths are canonical, and thus may contain "/vendor/". -// -// The result may include import paths for directories that contain no -// *.go files, such as "archive" (in $GOROOT/src). -// -// All I/O is done via the build.Context file system interface, -// which must be concurrency-safe. -// -func AllPackages(ctxt *build.Context) []string { - var list []string - ForEachPackage(ctxt, func(pkg string, _ error) { - list = append(list, pkg) - }) - sort.Strings(list) - return list -} - -// ForEachPackage calls the found function with the package path of -// each Go package it finds in any source directory of the specified -// build context (e.g. $GOROOT or an element of $GOPATH). -// All package paths are canonical, and thus may contain "/vendor/". -// -// If the package directory exists but could not be read, the second -// argument to the found function provides the error. -// -// All I/O is done via the build.Context file system interface, -// which must be concurrency-safe. -// -func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) { - ch := make(chan item) - - var wg sync.WaitGroup - for _, root := range ctxt.SrcDirs() { - root := root - wg.Add(1) - go func() { - allPackages(ctxt, root, ch) - wg.Done() - }() - } - go func() { - wg.Wait() - close(ch) - }() - - // All calls to found occur in the caller's goroutine. - for i := range ch { - found(i.importPath, i.err) - } -} - -type item struct { - importPath string - err error // (optional) -} - -// We use a process-wide counting semaphore to limit -// the number of parallel calls to ReadDir. -var ioLimit = make(chan bool, 20) - -func allPackages(ctxt *build.Context, root string, ch chan<- item) { - root = filepath.Clean(root) + string(os.PathSeparator) - - var wg sync.WaitGroup - - var walkDir func(dir string) - walkDir = func(dir string) { - // Avoid .foo, _foo, and testdata directory trees. - base := filepath.Base(dir) - if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" { - return - } - - pkg := filepath.ToSlash(strings.TrimPrefix(dir, root)) - - // Prune search if we encounter any of these import paths. - switch pkg { - case "builtin": - return - } - - ioLimit <- true - files, err := ReadDir(ctxt, dir) - <-ioLimit - if pkg != "" || err != nil { - ch <- item{pkg, err} - } - for _, fi := range files { - fi := fi - if fi.IsDir() { - wg.Add(1) - go func() { - walkDir(filepath.Join(dir, fi.Name())) - wg.Done() - }() - } - } - } - - walkDir(root) - wg.Wait() -} - -// ExpandPatterns returns the set of packages matched by patterns, -// which may have the following forms: -// -// golang.org/x/tools/cmd/guru # a single package -// golang.org/x/tools/... # all packages beneath dir -// ... # the entire workspace. -// -// Order is significant: a pattern preceded by '-' removes matching -// packages from the set. For example, these patterns match all encoding -// packages except encoding/xml: -// -// encoding/... -encoding/xml -// -// A trailing slash in a pattern is ignored. (Path components of Go -// package names are separated by slash, not the platform's path separator.) -// -func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool { - // TODO(adonovan): support other features of 'go list': - // - "std"/"cmd"/"all" meta-packages - // - "..." not at the end of a pattern - // - relative patterns using "./" or "../" prefix - - pkgs := make(map[string]bool) - doPkg := func(pkg string, neg bool) { - if neg { - delete(pkgs, pkg) - } else { - pkgs[pkg] = true - } - } - - // Scan entire workspace if wildcards are present. - // TODO(adonovan): opt: scan only the necessary subtrees of the workspace. - var all []string - for _, arg := range patterns { - if strings.HasSuffix(arg, "...") { - all = AllPackages(ctxt) - break - } - } - - for _, arg := range patterns { - if arg == "" { - continue - } - - neg := arg[0] == '-' - if neg { - arg = arg[1:] - } - - if arg == "..." { - // ... matches all packages - for _, pkg := range all { - doPkg(pkg, neg) - } - } else if dir := strings.TrimSuffix(arg, "/..."); dir != arg { - // dir/... matches all packages beneath dir - for _, pkg := range all { - if strings.HasPrefix(pkg, dir) && - (len(pkg) == len(dir) || pkg[len(dir)] == '/') { - doPkg(pkg, neg) - } - } - } else { - // single package - doPkg(strings.TrimSuffix(arg, "/"), neg) - } - } - - return pkgs -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/buildutil/fakecontext.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/buildutil/fakecontext.go deleted file mode 100644 index 24cbcbea2ba9..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/buildutil/fakecontext.go +++ /dev/null @@ -1,108 +0,0 @@ -package buildutil - -import ( - "fmt" - "go/build" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "sort" - "strings" - "time" -) - -// FakeContext returns a build.Context for the fake file tree specified -// by pkgs, which maps package import paths to a mapping from file base -// names to contents. -// -// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides -// the necessary file access methods to read from memory instead of the -// real file system. -// -// Unlike a real file tree, the fake one has only two levels---packages -// and files---so ReadDir("/go/src/") returns all packages under -// /go/src/ including, for instance, "math" and "math/big". -// ReadDir("/go/src/math/big") would return all the files in the -// "math/big" package. -// -func FakeContext(pkgs map[string]map[string]string) *build.Context { - clean := func(filename string) string { - f := path.Clean(filepath.ToSlash(filename)) - // Removing "/go/src" while respecting segment - // boundaries has this unfortunate corner case: - if f == "/go/src" { - return "" - } - return strings.TrimPrefix(f, "/go/src/") - } - - ctxt := build.Default // copy - ctxt.GOROOT = "/go" - ctxt.GOPATH = "" - ctxt.IsDir = func(dir string) bool { - dir = clean(dir) - if dir == "" { - return true // needed by (*build.Context).SrcDirs - } - return pkgs[dir] != nil - } - ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) { - dir = clean(dir) - var fis []os.FileInfo - if dir == "" { - // enumerate packages - for importPath := range pkgs { - fis = append(fis, fakeDirInfo(importPath)) - } - } else { - // enumerate files of package - for basename := range pkgs[dir] { - fis = append(fis, fakeFileInfo(basename)) - } - } - sort.Sort(byName(fis)) - return fis, nil - } - ctxt.OpenFile = func(filename string) (io.ReadCloser, error) { - filename = clean(filename) - dir, base := path.Split(filename) - content, ok := pkgs[path.Clean(dir)][base] - if !ok { - return nil, fmt.Errorf("file not found: %s", filename) - } - return ioutil.NopCloser(strings.NewReader(content)), nil - } - ctxt.IsAbsPath = func(path string) bool { - path = filepath.ToSlash(path) - // Don't rely on the default (filepath.Path) since on - // Windows, it reports virtual paths as non-absolute. - return strings.HasPrefix(path, "/") - } - return &ctxt -} - -type byName []os.FileInfo - -func (s byName) Len() int { return len(s) } -func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } - -type fakeFileInfo string - -func (fi fakeFileInfo) Name() string { return string(fi) } -func (fakeFileInfo) Sys() interface{} { return nil } -func (fakeFileInfo) ModTime() time.Time { return time.Time{} } -func (fakeFileInfo) IsDir() bool { return false } -func (fakeFileInfo) Size() int64 { return 0 } -func (fakeFileInfo) Mode() os.FileMode { return 0644 } - -type fakeDirInfo string - -func (fd fakeDirInfo) Name() string { return string(fd) } -func (fakeDirInfo) Sys() interface{} { return nil } -func (fakeDirInfo) ModTime() time.Time { return time.Time{} } -func (fakeDirInfo) IsDir() bool { return true } -func (fakeDirInfo) Size() int64 { return 0 } -func (fakeDirInfo) Mode() os.FileMode { return 0755 } diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/buildutil/overlay.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/buildutil/overlay.go deleted file mode 100644 index 3f71c4fef75e..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/buildutil/overlay.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package buildutil - -import ( - "bufio" - "bytes" - "fmt" - "go/build" - "io" - "io/ioutil" - "path/filepath" - "strconv" - "strings" -) - -// OverlayContext overlays a build.Context with additional files from -// a map. Files in the map take precedence over other files. -// -// In addition to plain string comparison, two file names are -// considered equal if their base names match and their directory -// components point at the same directory on the file system. That is, -// symbolic links are followed for directories, but not files. -// -// A common use case for OverlayContext is to allow editors to pass in -// a set of unsaved, modified files. -// -// Currently, only the Context.OpenFile function will respect the -// overlay. This may change in the future. -func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Context { - // TODO(dominikh): Implement IsDir, HasSubdir and ReadDir - - rc := func(data []byte) (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBuffer(data)), nil - } - - copy := *orig // make a copy - ctxt := © - ctxt.OpenFile = func(path string) (io.ReadCloser, error) { - // Fast path: names match exactly. - if content, ok := overlay[path]; ok { - return rc(content) - } - - // Slow path: check for same file under a different - // alias, perhaps due to a symbolic link. - for filename, content := range overlay { - if sameFile(path, filename) { - return rc(content) - } - } - - return OpenFile(orig, path) - } - return ctxt -} - -// ParseOverlayArchive parses an archive containing Go files and their -// contents. The result is intended to be used with OverlayContext. -// -// -// Archive format -// -// The archive consists of a series of files. Each file consists of a -// name, a decimal file size and the file contents, separated by -// newlinews. No newline follows after the file contents. -func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) { - overlay := make(map[string][]byte) - r := bufio.NewReader(archive) - for { - // Read file name. - filename, err := r.ReadString('\n') - if err != nil { - if err == io.EOF { - break // OK - } - return nil, fmt.Errorf("reading archive file name: %v", err) - } - filename = filepath.Clean(strings.TrimSpace(filename)) - - // Read file size. - sz, err := r.ReadString('\n') - if err != nil { - return nil, fmt.Errorf("reading size of archive file %s: %v", filename, err) - } - sz = strings.TrimSpace(sz) - size, err := strconv.ParseUint(sz, 10, 32) - if err != nil { - return nil, fmt.Errorf("parsing size of archive file %s: %v", filename, err) - } - - // Read file content. - content := make([]byte, size) - if _, err := io.ReadFull(r, content); err != nil { - return nil, fmt.Errorf("reading archive file %s: %v", filename, err) - } - overlay[filename] = content - } - - return overlay, nil -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/buildutil/tags.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/buildutil/tags.go deleted file mode 100644 index 486606f3768d..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/buildutil/tags.go +++ /dev/null @@ -1,75 +0,0 @@ -package buildutil - -// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go. - -import "fmt" - -const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " + - "For more information about build tags, see the description of " + - "build constraints in the documentation for the go/build package" - -// TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses -// a flag value in the same manner as go build's -tags flag and -// populates a []string slice. -// -// See $GOROOT/src/go/build/doc.go for description of build tags. -// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag. -// -// Example: -// flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc) -type TagsFlag []string - -func (v *TagsFlag) Set(s string) error { - var err error - *v, err = splitQuotedFields(s) - if *v == nil { - *v = []string{} - } - return err -} - -func (v *TagsFlag) Get() interface{} { return *v } - -func splitQuotedFields(s string) ([]string, error) { - // Split fields allowing '' or "" around elements. - // Quotes further inside the string do not count. - var f []string - for len(s) > 0 { - for len(s) > 0 && isSpaceByte(s[0]) { - s = s[1:] - } - if len(s) == 0 { - break - } - // Accepted quoted string. No unescaping inside. - if s[0] == '"' || s[0] == '\'' { - quote := s[0] - s = s[1:] - i := 0 - for i < len(s) && s[i] != quote { - i++ - } - if i >= len(s) { - return nil, fmt.Errorf("unterminated %c string", quote) - } - f = append(f, s[:i]) - s = s[i+1:] - continue - } - i := 0 - for i < len(s) && !isSpaceByte(s[i]) { - i++ - } - f = append(f, s[:i]) - s = s[i:] - } - return f, nil -} - -func (v *TagsFlag) String() string { - return "" -} - -func isSpaceByte(c byte) bool { - return c == ' ' || c == '\t' || c == '\n' || c == '\r' -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/buildutil/util.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/buildutil/util.go deleted file mode 100644 index fc923d7a7020..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/buildutil/util.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package buildutil - -import ( - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "strings" -) - -// ParseFile behaves like parser.ParseFile, -// but uses the build context's file system interface, if any. -// -// If file is not absolute (as defined by IsAbsPath), the (dir, file) -// components are joined using JoinPath; dir must be absolute. -// -// The displayPath function, if provided, is used to transform the -// filename that will be attached to the ASTs. -// -// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws. -// -func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) { - if !IsAbsPath(ctxt, file) { - file = JoinPath(ctxt, dir, file) - } - rd, err := OpenFile(ctxt, file) - if err != nil { - return nil, err - } - defer rd.Close() // ignore error - if displayPath != nil { - file = displayPath(file) - } - return parser.ParseFile(fset, file, rd, mode) -} - -// ContainingPackage returns the package containing filename. -// -// If filename is not absolute, it is interpreted relative to working directory dir. -// All I/O is via the build context's file system interface, if any. -// -// The '...Files []string' fields of the resulting build.Package are not -// populated (build.FindOnly mode). -// -func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) { - if !IsAbsPath(ctxt, filename) { - filename = JoinPath(ctxt, dir, filename) - } - - // We must not assume the file tree uses - // "/" always, - // `\` always, - // or os.PathSeparator (which varies by platform), - // but to make any progress, we are forced to assume that - // paths will not use `\` unless the PathSeparator - // is also `\`, thus we can rely on filepath.ToSlash for some sanity. - - dirSlash := path.Dir(filepath.ToSlash(filename)) + "/" - - // We assume that no source root (GOPATH[i] or GOROOT) contains any other. - for _, srcdir := range ctxt.SrcDirs() { - srcdirSlash := filepath.ToSlash(srcdir) + "/" - if importPath, ok := HasSubdir(ctxt, srcdirSlash, dirSlash); ok { - return ctxt.Import(importPath, dir, build.FindOnly) - } - } - - return nil, fmt.Errorf("can't find package containing %s", filename) -} - -// -- Effective methods of file system interface ------------------------- - -// (go/build.Context defines these as methods, but does not export them.) - -// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses -// the local file system to answer the question. -func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) { - if f := ctxt.HasSubdir; f != nil { - return f(root, dir) - } - - // Try using paths we received. - if rel, ok = hasSubdir(root, dir); ok { - return - } - - // Try expanding symlinks and comparing - // expanded against unexpanded and - // expanded against expanded. - rootSym, _ := filepath.EvalSymlinks(root) - dirSym, _ := filepath.EvalSymlinks(dir) - - if rel, ok = hasSubdir(rootSym, dir); ok { - return - } - if rel, ok = hasSubdir(root, dirSym); ok { - return - } - return hasSubdir(rootSym, dirSym) -} - -func hasSubdir(root, dir string) (rel string, ok bool) { - const sep = string(filepath.Separator) - root = filepath.Clean(root) - if !strings.HasSuffix(root, sep) { - root += sep - } - - dir = filepath.Clean(dir) - if !strings.HasPrefix(dir, root) { - return "", false - } - - return filepath.ToSlash(dir[len(root):]), true -} - -// FileExists returns true if the specified file exists, -// using the build context's file system interface. -func FileExists(ctxt *build.Context, path string) bool { - if ctxt.OpenFile != nil { - r, err := ctxt.OpenFile(path) - if err != nil { - return false - } - r.Close() // ignore error - return true - } - _, err := os.Stat(path) - return err == nil -} - -// OpenFile behaves like os.Open, -// but uses the build context's file system interface, if any. -func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) { - if ctxt.OpenFile != nil { - return ctxt.OpenFile(path) - } - return os.Open(path) -} - -// IsAbsPath behaves like filepath.IsAbs, -// but uses the build context's file system interface, if any. -func IsAbsPath(ctxt *build.Context, path string) bool { - if ctxt.IsAbsPath != nil { - return ctxt.IsAbsPath(path) - } - return filepath.IsAbs(path) -} - -// JoinPath behaves like filepath.Join, -// but uses the build context's file system interface, if any. -func JoinPath(ctxt *build.Context, path ...string) string { - if ctxt.JoinPath != nil { - return ctxt.JoinPath(path...) - } - return filepath.Join(path...) -} - -// IsDir behaves like os.Stat plus IsDir, -// but uses the build context's file system interface, if any. -func IsDir(ctxt *build.Context, path string) bool { - if ctxt.IsDir != nil { - return ctxt.IsDir(path) - } - fi, err := os.Stat(path) - return err == nil && fi.IsDir() -} - -// ReadDir behaves like ioutil.ReadDir, -// but uses the build context's file system interface, if any. -func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) { - if ctxt.ReadDir != nil { - return ctxt.ReadDir(path) - } - return ioutil.ReadDir(path) -} - -// SplitPathList behaves like filepath.SplitList, -// but uses the build context's file system interface, if any. -func SplitPathList(ctxt *build.Context, s string) []string { - if ctxt.SplitPathList != nil { - return ctxt.SplitPathList(s) - } - return filepath.SplitList(s) -} - -// sameFile returns true if x and y have the same basename and denote -// the same file. -// -func sameFile(x, y string) bool { - if path.Clean(x) == path.Clean(y) { - return true - } - if filepath.Base(x) == filepath.Base(y) { // (optimisation) - if xi, err := os.Stat(x); err == nil { - if yi, err := os.Stat(y); err == nil { - return os.SameFile(xi, yi) - } - } - } - return false -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go deleted file mode 100644 index 23dc019289e5..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gcexportdata provides functions for locating, reading, and -// writing export data files containing type information produced by the -// gc compiler. This package supports go1.7 export data format and all -// later versions. -// -// This package replaces the deprecated golang.org/x/tools/go/gcimporter15 -// package, which will be deleted in October 2017. -// -// Although it might seem convenient for this package to live alongside -// go/types in the standard library, this would cause version skew -// problems for developer tools that use it, since they must be able to -// consume the outputs of the gc compiler both before and after a Go -// update such as from Go 1.7 to Go 1.8. Because this package lives in -// golang.org/x/tools, sites can update their version of this repo some -// time before the Go 1.8 release and rebuild and redeploy their -// developer tools, which will then be able to consume both Go 1.7 and -// Go 1.8 export data files, so they will work before and after the -// Go update. (See discussion at https://github.com/golang/go/issues/15651.) -// -package gcexportdata - -import ( - "bufio" - "bytes" - "fmt" - "go/token" - "go/types" - "io" - "io/ioutil" - - gcimporter "golang.org/x/tools/go/gcimporter15" -) - -// Find returns the name of an object (.o) or archive (.a) file -// containing type information for the specified import path, -// using the workspace layout conventions of go/build. -// If no file was found, an empty filename is returned. -// -// A relative srcDir is interpreted relative to the current working directory. -// -// Find also returns the package's resolved (canonical) import path, -// reflecting the effects of srcDir and vendoring on importPath. -func Find(importPath string, srcDir string) (filename, path string) { - return gcimporter.FindPkg(importPath, srcDir) -} - -// NewReader returns a reader for the export data section of an object -// (.o) or archive (.a) file read from r. The new reader may provide -// additional trailing data beyond the end of the export data. -func NewReader(r io.Reader) (io.Reader, error) { - buf := bufio.NewReader(r) - _, err := gcimporter.FindExportData(buf) - // If we ever switch to a zip-like archive format with the ToC - // at the end, we can return the correct portion of export data, - // but for now we must return the entire rest of the file. - return buf, err -} - -// Read reads export data from in, decodes it, and returns type -// information for the package. -// The package name is specified by path. -// File position information is added to fset. -// -// Read may inspect and add to the imports map to ensure that references -// within the export data to other packages are consistent. The caller -// must ensure that imports[path] does not exist, or exists but is -// incomplete (see types.Package.Complete), and Read inserts the -// resulting package into this map entry. -// -// On return, the state of the reader is undefined. -func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { - data, err := ioutil.ReadAll(in) - if err != nil { - return nil, fmt.Errorf("reading export data for %q: %v", path, err) - } - - if bytes.HasPrefix(data, []byte("!")) { - return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) - } - - // The App Engine Go runtime v1.6 uses the old export data format. - // TODO(adonovan): delete once v1.7 has been around for a while. - if bytes.HasPrefix(data, []byte("package ")) { - return gcimporter.ImportData(imports, path, path, bytes.NewReader(data)) - } - - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) - return pkg, err -} - -// Write writes encoded type information for the specified package to out. -// The FileSet provides file position information for named objects. -func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { - _, err := out.Write(gcimporter.BExportData(fset, pkg)) - return err -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcexportdata/importer.go deleted file mode 100644 index efe221e7e142..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcexportdata/importer.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcexportdata - -import ( - "fmt" - "go/token" - "go/types" - "os" -) - -// NewImporter returns a new instance of the types.Importer interface -// that reads type information from export data files written by gc. -// The Importer also satisfies types.ImporterFrom. -// -// Export data files are located using "go build" workspace conventions -// and the build.Default context. -// -// Use this importer instead of go/importer.For("gc", ...) to avoid the -// version-skew problems described in the documentation of this package, -// or to control the FileSet or access the imports map populated during -// package loading. -// -func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { - return importer{fset, imports} -} - -type importer struct { - fset *token.FileSet - imports map[string]*types.Package -} - -func (imp importer) Import(importPath string) (*types.Package, error) { - return imp.ImportFrom(importPath, "", 0) -} - -func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { - filename, path := Find(importPath, srcDir) - if filename == "" { - if importPath == "unsafe" { - // Even for unsafe, call Find first in case - // the package was vendored. - return types.Unsafe, nil - } - return nil, fmt.Errorf("can't find import: %s", importPath) - } - - if pkg, ok := imp.imports[path]; ok && pkg.Complete() { - return pkg, nil // cache hit - } - - // open file - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer func() { - f.Close() - if err != nil { - // add file name to error - err = fmt.Errorf("reading export data: %s: %v", filename, err) - } - }() - - r, err := NewReader(f) - if err != nil { - return nil, err - } - - return Read(r, imp.fset, imp.imports, path) -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/bexport.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/bexport.go deleted file mode 100644 index cbf8bc00d616..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/bexport.go +++ /dev/null @@ -1,828 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; -// see that file for specification of the format. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/ast" - "go/constant" - "go/token" - "go/types" - "log" - "math" - "math/big" - "sort" - "strings" -) - -// If debugFormat is set, each integer and string value is preceded by a marker -// and position information in the encoding. This mechanism permits an importer -// to recognize immediately when it is out of sync. The importer recognizes this -// mode automatically (i.e., it can import export data produced with debugging -// support even if debugFormat is not set at the time of import). This mode will -// lead to massively larger export data (by a factor of 2 to 3) and should only -// be enabled during development and debugging. -// -// NOTE: This flag is the first flag to enable if importing dies because of -// (suspected) format errors, and whenever a change is made to the format. -const debugFormat = false // default: false - -// If trace is set, debugging output is printed to std out. -const trace = false // default: false - -// Current export format version. Increase with each format change. -// 4: type name objects support type aliases, uses aliasTag -// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) -// 2: removed unused bool in ODCL export (compiler only) -// 1: header format change (more regular), export package for _ struct fields -// 0: Go1.7 encoding -const exportVersion = 4 - -// trackAllTypes enables cycle tracking for all types, not just named -// types. The existing compiler invariants assume that unnamed types -// that are not completely set up are not used, or else there are spurious -// errors. -// If disabled, only named types are tracked, possibly leading to slightly -// less efficient encoding in rare cases. It also prevents the export of -// some corner-case type declarations (but those are not handled correctly -// with with the textual export format either). -// TODO(gri) enable and remove once issues caused by it are fixed -const trackAllTypes = false - -type exporter struct { - fset *token.FileSet - out bytes.Buffer - - // object -> index maps, indexed in order of serialization - strIndex map[string]int - pkgIndex map[*types.Package]int - typIndex map[types.Type]int - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - - // debugging support - written int // bytes written - indent int // for trace -} - -// BExportData returns binary export data for pkg. -// If no file set is provided, position info will be missing. -func BExportData(fset *token.FileSet, pkg *types.Package) []byte { - p := exporter{ - fset: fset, - strIndex: map[string]int{"": 0}, // empty string is mapped to 0 - pkgIndex: make(map[*types.Package]int), - typIndex: make(map[types.Type]int), - posInfoFormat: true, // TODO(gri) might become a flag, eventually - } - - // write version info - // The version string must start with "version %d" where %d is the version - // number. Additional debugging information may follow after a blank; that - // text is ignored by the importer. - p.rawStringln(fmt.Sprintf("version %d", exportVersion)) - var debug string - if debugFormat { - debug = "debug" - } - p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly - p.bool(trackAllTypes) - p.bool(p.posInfoFormat) - - // --- generic export data --- - - // populate type map with predeclared "known" types - for index, typ := range predeclared { - p.typIndex[typ] = index - } - if len(p.typIndex) != len(predeclared) { - log.Fatalf("gcimporter: duplicate entries in type map?") - } - - // write package data - p.pkg(pkg, true) - if trace { - p.tracef("\n") - } - - // write objects - objcount := 0 - scope := pkg.Scope() - for _, name := range scope.Names() { - if !ast.IsExported(name) { - continue - } - if trace { - p.tracef("\n") - } - p.obj(scope.Lookup(name)) - objcount++ - } - - // indicate end of list - if trace { - p.tracef("\n") - } - p.tag(endTag) - - // for self-verification only (redundant) - p.int(objcount) - - if trace { - p.tracef("\n") - } - - // --- end of export data --- - - return p.out.Bytes() -} - -func (p *exporter) pkg(pkg *types.Package, emptypath bool) { - if pkg == nil { - log.Fatalf("gcimporter: unexpected nil pkg") - } - - // if we saw the package before, write its index (>= 0) - if i, ok := p.pkgIndex[pkg]; ok { - p.index('P', i) - return - } - - // otherwise, remember the package, write the package tag (< 0) and package data - if trace { - p.tracef("P%d = { ", len(p.pkgIndex)) - defer p.tracef("} ") - } - p.pkgIndex[pkg] = len(p.pkgIndex) - - p.tag(packageTag) - p.string(pkg.Name()) - if emptypath { - p.string("") - } else { - p.string(pkg.Path()) - } -} - -func (p *exporter) obj(obj types.Object) { - switch obj := obj.(type) { - case *types.Const: - p.tag(constTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - p.value(obj.Val()) - - case *types.TypeName: - if isAlias(obj) { - p.tag(aliasTag) - p.pos(obj) - p.qualifiedName(obj) - } else { - p.tag(typeTag) - } - p.typ(obj.Type()) - - case *types.Var: - p.tag(varTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - - case *types.Func: - p.tag(funcTag) - p.pos(obj) - p.qualifiedName(obj) - sig := obj.Type().(*types.Signature) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - - default: - log.Fatalf("gcimporter: unexpected object %v (%T)", obj, obj) - } -} - -func (p *exporter) pos(obj types.Object) { - if !p.posInfoFormat { - return - } - - file, line := p.fileLine(obj) - if file == p.prevFile { - // common case: write line delta - // delta == 0 means different file or no line change - delta := line - p.prevLine - p.int(delta) - if delta == 0 { - p.int(-1) // -1 means no file change - } - } else { - // different file - p.int(0) - // Encode filename as length of common prefix with previous - // filename, followed by (possibly empty) suffix. Filenames - // frequently share path prefixes, so this can save a lot - // of space and make export data size less dependent on file - // path length. The suffix is unlikely to be empty because - // file names tend to end in ".go". - n := commonPrefixLen(p.prevFile, file) - p.int(n) // n >= 0 - p.string(file[n:]) // write suffix only - p.prevFile = file - p.int(line) - } - p.prevLine = line -} - -func (p *exporter) fileLine(obj types.Object) (file string, line int) { - if p.fset != nil { - pos := p.fset.Position(obj.Pos()) - file = pos.Filename - line = pos.Line - } - return -} - -func commonPrefixLen(a, b string) int { - if len(a) > len(b) { - a, b = b, a - } - // len(a) <= len(b) - i := 0 - for i < len(a) && a[i] == b[i] { - i++ - } - return i -} - -func (p *exporter) qualifiedName(obj types.Object) { - p.string(obj.Name()) - p.pkg(obj.Pkg(), false) -} - -func (p *exporter) typ(t types.Type) { - if t == nil { - log.Fatalf("gcimporter: nil type") - } - - // Possible optimization: Anonymous pointer types *T where - // T is a named type are common. We could canonicalize all - // such types *T to a single type PT = *T. This would lead - // to at most one *T entry in typIndex, and all future *T's - // would be encoded as the respective index directly. Would - // save 1 byte (pointerTag) per *T and reduce the typIndex - // size (at the cost of a canonicalization map). We can do - // this later, without encoding format change. - - // if we saw the type before, write its index (>= 0) - if i, ok := p.typIndex[t]; ok { - p.index('T', i) - return - } - - // otherwise, remember the type, write the type tag (< 0) and type data - if trackAllTypes { - if trace { - p.tracef("T%d = {>\n", len(p.typIndex)) - defer p.tracef("<\n} ") - } - p.typIndex[t] = len(p.typIndex) - } - - switch t := t.(type) { - case *types.Named: - if !trackAllTypes { - // if we don't track all types, track named types now - p.typIndex[t] = len(p.typIndex) - } - - p.tag(namedTag) - p.pos(t.Obj()) - p.qualifiedName(t.Obj()) - p.typ(t.Underlying()) - if !types.IsInterface(t) { - p.assocMethods(t) - } - - case *types.Array: - p.tag(arrayTag) - p.int64(t.Len()) - p.typ(t.Elem()) - - case *types.Slice: - p.tag(sliceTag) - p.typ(t.Elem()) - - case *dddSlice: - p.tag(dddTag) - p.typ(t.elem) - - case *types.Struct: - p.tag(structTag) - p.fieldList(t) - - case *types.Pointer: - p.tag(pointerTag) - p.typ(t.Elem()) - - case *types.Signature: - p.tag(signatureTag) - p.paramList(t.Params(), t.Variadic()) - p.paramList(t.Results(), false) - - case *types.Interface: - p.tag(interfaceTag) - p.iface(t) - - case *types.Map: - p.tag(mapTag) - p.typ(t.Key()) - p.typ(t.Elem()) - - case *types.Chan: - p.tag(chanTag) - p.int(int(3 - t.Dir())) // hack - p.typ(t.Elem()) - - default: - log.Fatalf("gcimporter: unexpected type %T: %s", t, t) - } -} - -func (p *exporter) assocMethods(named *types.Named) { - // Sort methods (for determinism). - var methods []*types.Func - for i := 0; i < named.NumMethods(); i++ { - methods = append(methods, named.Method(i)) - } - sort.Sort(methodsByName(methods)) - - p.int(len(methods)) - - if trace && methods != nil { - p.tracef("associated methods {>\n") - } - - for i, m := range methods { - if trace && i > 0 { - p.tracef("\n") - } - - p.pos(m) - name := m.Name() - p.string(name) - if !exported(name) { - p.pkg(m.Pkg(), false) - } - - sig := m.Type().(*types.Signature) - p.paramList(types.NewTuple(sig.Recv()), false) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - p.int(0) // dummy value for go:nointerface pragma - ignored by importer - } - - if trace && methods != nil { - p.tracef("<\n} ") - } -} - -type methodsByName []*types.Func - -func (x methodsByName) Len() int { return len(x) } -func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } - -func (p *exporter) fieldList(t *types.Struct) { - if trace && t.NumFields() > 0 { - p.tracef("fields {>\n") - defer p.tracef("<\n} ") - } - - p.int(t.NumFields()) - for i := 0; i < t.NumFields(); i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.field(t.Field(i)) - p.string(t.Tag(i)) - } -} - -func (p *exporter) field(f *types.Var) { - if !f.IsField() { - log.Fatalf("gcimporter: field expected") - } - - p.pos(f) - p.fieldName(f) - p.typ(f.Type()) -} - -func (p *exporter) iface(t *types.Interface) { - // TODO(gri): enable importer to load embedded interfaces, - // then emit Embeddeds and ExplicitMethods separately here. - p.int(0) - - n := t.NumMethods() - if trace && n > 0 { - p.tracef("methods {>\n") - defer p.tracef("<\n} ") - } - p.int(n) - for i := 0; i < n; i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.method(t.Method(i)) - } -} - -func (p *exporter) method(m *types.Func) { - sig := m.Type().(*types.Signature) - if sig.Recv() == nil { - log.Fatalf("gcimporter: method expected") - } - - p.pos(m) - p.string(m.Name()) - if m.Name() != "_" && !ast.IsExported(m.Name()) { - p.pkg(m.Pkg(), false) - } - - // interface method; no need to encode receiver. - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) -} - -func (p *exporter) fieldName(f *types.Var) { - name := f.Name() - - if f.Anonymous() { - // anonymous field - we distinguish between 3 cases: - // 1) field name matches base type name and is exported - // 2) field name matches base type name and is not exported - // 3) field name doesn't match base type name (alias name) - bname := basetypeName(f.Type()) - if name == bname { - if ast.IsExported(name) { - name = "" // 1) we don't need to know the field name or package - } else { - name = "?" // 2) use unexported name "?" to force package export - } - } else { - // 3) indicate alias and export name as is - // (this requires an extra "@" but this is a rare case) - p.string("@") - } - } - - p.string(name) - if name != "" && !ast.IsExported(name) { - p.pkg(f.Pkg(), false) - } -} - -func basetypeName(typ types.Type) string { - switch typ := deref(typ).(type) { - case *types.Basic: - return typ.Name() - case *types.Named: - return typ.Obj().Name() - default: - return "" // unnamed type - } -} - -func (p *exporter) paramList(params *types.Tuple, variadic bool) { - // use negative length to indicate unnamed parameters - // (look at the first parameter only since either all - // names are present or all are absent) - n := params.Len() - if n > 0 && params.At(0).Name() == "" { - n = -n - } - p.int(n) - for i := 0; i < params.Len(); i++ { - q := params.At(i) - t := q.Type() - if variadic && i == params.Len()-1 { - t = &dddSlice{t.(*types.Slice).Elem()} - } - p.typ(t) - if n > 0 { - name := q.Name() - p.string(name) - if name != "_" { - p.pkg(q.Pkg(), false) - } - } - p.string("") // no compiler-specific info - } -} - -func (p *exporter) value(x constant.Value) { - if trace { - p.tracef("= ") - } - - switch x.Kind() { - case constant.Bool: - tag := falseTag - if constant.BoolVal(x) { - tag = trueTag - } - p.tag(tag) - - case constant.Int: - if v, exact := constant.Int64Val(x); exact { - // common case: x fits into an int64 - use compact encoding - p.tag(int64Tag) - p.int64(v) - return - } - // uncommon case: large x - use float encoding - // (powers of 2 will be encoded efficiently with exponent) - p.tag(floatTag) - p.float(constant.ToFloat(x)) - - case constant.Float: - p.tag(floatTag) - p.float(x) - - case constant.Complex: - p.tag(complexTag) - p.float(constant.Real(x)) - p.float(constant.Imag(x)) - - case constant.String: - p.tag(stringTag) - p.string(constant.StringVal(x)) - - case constant.Unknown: - // package contains type errors - p.tag(unknownTag) - - default: - log.Fatalf("gcimporter: unexpected value %v (%T)", x, x) - } -} - -func (p *exporter) float(x constant.Value) { - if x.Kind() != constant.Float { - log.Fatalf("gcimporter: unexpected constant %v, want float", x) - } - // extract sign (there is no -0) - sign := constant.Sign(x) - if sign == 0 { - // x == 0 - p.int(0) - return - } - // x != 0 - - var f big.Float - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - r := valueToRat(num) - f.SetRat(r.Quo(r, valueToRat(denom))) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - f.SetFloat64(math.MaxFloat64) // FIXME - } - - // extract exponent such that 0.5 <= m < 1.0 - var m big.Float - exp := f.MantExp(&m) - - // extract mantissa as *big.Int - // - set exponent large enough so mant satisfies mant.IsInt() - // - get *big.Int from mant - m.SetMantExp(&m, int(m.MinPrec())) - mant, acc := m.Int(nil) - if acc != big.Exact { - log.Fatalf("gcimporter: internal error") - } - - p.int(sign) - p.int(exp) - p.string(string(mant.Bytes())) -} - -func valueToRat(x constant.Value) *big.Rat { - // Convert little-endian to big-endian. - // I can't believe this is necessary. - bytes := constant.Bytes(x) - for i := 0; i < len(bytes)/2; i++ { - bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] - } - return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) -} - -func (p *exporter) bool(b bool) bool { - if trace { - p.tracef("[") - defer p.tracef("= %v] ", b) - } - - x := 0 - if b { - x = 1 - } - p.int(x) - return b -} - -// ---------------------------------------------------------------------------- -// Low-level encoders - -func (p *exporter) index(marker byte, index int) { - if index < 0 { - log.Fatalf("gcimporter: invalid index < 0") - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%c%d ", marker, index) - } - p.rawInt64(int64(index)) -} - -func (p *exporter) tag(tag int) { - if tag >= 0 { - log.Fatalf("gcimporter: invalid tag >= 0") - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%s ", tagString[-tag]) - } - p.rawInt64(int64(tag)) -} - -func (p *exporter) int(x int) { - p.int64(int64(x)) -} - -func (p *exporter) int64(x int64) { - if debugFormat { - p.marker('i') - } - if trace { - p.tracef("%d ", x) - } - p.rawInt64(x) -} - -func (p *exporter) string(s string) { - if debugFormat { - p.marker('s') - } - if trace { - p.tracef("%q ", s) - } - // if we saw the string before, write its index (>= 0) - // (the empty string is mapped to 0) - if i, ok := p.strIndex[s]; ok { - p.rawInt64(int64(i)) - return - } - // otherwise, remember string and write its negative length and bytes - p.strIndex[s] = len(p.strIndex) - p.rawInt64(-int64(len(s))) - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } -} - -// marker emits a marker byte and position information which makes -// it easy for a reader to detect if it is "out of sync". Used for -// debugFormat format only. -func (p *exporter) marker(m byte) { - p.rawByte(m) - // Enable this for help tracking down the location - // of an incorrect marker when running in debugFormat. - if false && trace { - p.tracef("#%d ", p.written) - } - p.rawInt64(int64(p.written)) -} - -// rawInt64 should only be used by low-level encoders. -func (p *exporter) rawInt64(x int64) { - var tmp [binary.MaxVarintLen64]byte - n := binary.PutVarint(tmp[:], x) - for i := 0; i < n; i++ { - p.rawByte(tmp[i]) - } -} - -// rawStringln should only be used to emit the initial version string. -func (p *exporter) rawStringln(s string) { - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } - p.rawByte('\n') -} - -// rawByte is the bottleneck interface to write to p.out. -// rawByte escapes b as follows (any encoding does that -// hides '$'): -// -// '$' => '|' 'S' -// '|' => '|' '|' -// -// Necessary so other tools can find the end of the -// export data by searching for "$$". -// rawByte should only be used by low-level encoders. -func (p *exporter) rawByte(b byte) { - switch b { - case '$': - // write '$' as '|' 'S' - b = 'S' - fallthrough - case '|': - // write '|' as '|' '|' - p.out.WriteByte('|') - p.written++ - } - p.out.WriteByte(b) - p.written++ -} - -// tracef is like fmt.Printf but it rewrites the format string -// to take care of indentation. -func (p *exporter) tracef(format string, args ...interface{}) { - if strings.ContainsAny(format, "<>\n") { - var buf bytes.Buffer - for i := 0; i < len(format); i++ { - // no need to deal with runes - ch := format[i] - switch ch { - case '>': - p.indent++ - continue - case '<': - p.indent-- - continue - } - buf.WriteByte(ch) - if ch == '\n' { - for j := p.indent; j > 0; j-- { - buf.WriteString(". ") - } - } - } - format = buf.String() - } - fmt.Printf(format, args...) -} - -// Debugging support. -// (tagString is only used when tracing is enabled) -var tagString = [...]string{ - // Packages - -packageTag: "package", - - // Types - -namedTag: "named type", - -arrayTag: "array", - -sliceTag: "slice", - -dddTag: "ddd", - -structTag: "struct", - -pointerTag: "pointer", - -signatureTag: "signature", - -interfaceTag: "interface", - -mapTag: "map", - -chanTag: "chan", - - // Values - -falseTag: "false", - -trueTag: "true", - -int64Tag: "int64", - -floatTag: "float", - -fractionTag: "fraction", - -complexTag: "complex", - -stringTag: "string", - -unknownTag: "unknown", - - // Type aliases - -aliasTag: "alias", -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/bimport.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/bimport.go deleted file mode 100644 index 1936a7f6c976..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/bimport.go +++ /dev/null @@ -1,993 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. - -package gcimporter - -import ( - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -type importer struct { - imports map[string]*types.Package - data []byte - importpath string - buf []byte // for reading strings - version int // export format version - - // object lists - strList []string // in order of appearance - pathList []string // in order of appearance - pkgList []*types.Package // in order of appearance - typList []types.Type // in order of appearance - interfaceList []*types.Interface // for delayed completion only - trackAllTypes bool - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - fset *token.FileSet - files map[string]*token.File - - // debugging support - debugFormat bool - read int // bytes read -} - -// BImportData imports a package from the serialized package data -// and returns the number of bytes consumed and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - // catch panics and return them as errors - defer func() { - if e := recover(); e != nil { - // The package (filename) causing the problem is added to this - // error by a wrapper in the caller (Import in gcimporter.go). - // Return a (possibly nil or incomplete) package unchanged (see #16088). - err = fmt.Errorf("cannot import, possibly version skew (%v) - reinstall package", e) - } - }() - - p := importer{ - imports: imports, - data: data, - importpath: path, - version: -1, // unknown version - strList: []string{""}, // empty string is mapped to 0 - pathList: []string{""}, // empty string is mapped to 0 - fset: fset, - files: make(map[string]*token.File), - } - - // read version info - var versionstr string - if b := p.rawByte(); b == 'c' || b == 'd' { - // Go1.7 encoding; first byte encodes low-level - // encoding format (compact vs debug). - // For backward-compatibility only (avoid problems with - // old installed packages). Newly compiled packages use - // the extensible format string. - // TODO(gri) Remove this support eventually; after Go1.8. - if b == 'd' { - p.debugFormat = true - } - p.trackAllTypes = p.rawByte() == 'a' - p.posInfoFormat = p.int() != 0 - versionstr = p.string() - if versionstr == "v1" { - p.version = 0 - } - } else { - // Go1.8 extensible encoding - // read version string and extract version number (ignore anything after the version number) - versionstr = p.rawStringln(b) - if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { - if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { - p.version = v - } - } - } - - // read version specific flags - extend as necessary - switch p.version { - // case 6: - // ... - // fallthrough - case 5, 4, 3, 2, 1: - p.debugFormat = p.rawStringln(p.rawByte()) == "debug" - p.trackAllTypes = p.int() != 0 - p.posInfoFormat = p.int() != 0 - case 0: - // Go1.7 encoding format - nothing to do here - default: - errorf("unknown export format version %d (%q)", p.version, versionstr) - } - - // --- generic export data --- - - // populate typList with predeclared "known" types - p.typList = append(p.typList, predeclared...) - - // read package data - pkg = p.pkg() - - // read objects of phase 1 only (see cmd/compiler/internal/gc/bexport.go) - objcount := 0 - for { - tag := p.tagOrIndex() - if tag == endTag { - break - } - p.obj(tag) - objcount++ - } - - // self-verification - if count := p.int(); count != objcount { - errorf("got %d objects; want %d", objcount, count) - } - - // ignore compiler-specific import data - - // complete interfaces - // TODO(gri) re-investigate if we still need to do this in a delayed fashion - for _, typ := range p.interfaceList { - typ.Complete() - } - - // record all referenced packages as imports - list := append(([]*types.Package)(nil), p.pkgList[1:]...) - sort.Sort(byPath(list)) - pkg.SetImports(list) - - // package was imported completely and without errors - pkg.MarkComplete() - - return p.read, pkg, nil -} - -func errorf(format string, args ...interface{}) { - panic(fmt.Sprintf(format, args...)) -} - -func (p *importer) pkg() *types.Package { - // if the package was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.pkgList[i] - } - - // otherwise, i is the package tag (< 0) - if i != packageTag { - errorf("unexpected package tag %d version %d", i, p.version) - } - - // read package data - name := p.string() - var path string - if p.version >= 5 { - path = p.path() - } else { - path = p.string() - } - - // we should never see an empty package name - if name == "" { - errorf("empty package name in import") - } - - // an empty path denotes the package we are currently importing; - // it must be the first package we see - if (path == "") != (len(p.pkgList) == 0) { - errorf("package path %q for pkg index %d", path, len(p.pkgList)) - } - - // if the package was imported before, use that one; otherwise create a new one - if path == "" { - path = p.importpath - } - pkg := p.imports[path] - if pkg == nil { - pkg = types.NewPackage(path, name) - p.imports[path] = pkg - } else if pkg.Name() != name { - errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) - } - p.pkgList = append(p.pkgList, pkg) - - return pkg -} - -// objTag returns the tag value for each object kind. -func objTag(obj types.Object) int { - switch obj.(type) { - case *types.Const: - return constTag - case *types.TypeName: - return typeTag - case *types.Var: - return varTag - case *types.Func: - return funcTag - default: - errorf("unexpected object: %v (%T)", obj, obj) // panics - panic("unreachable") - } -} - -func sameObj(a, b types.Object) bool { - // Because unnamed types are not canonicalized, we cannot simply compare types for - // (pointer) identity. - // Ideally we'd check equality of constant values as well, but this is good enough. - return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) -} - -func (p *importer) declare(obj types.Object) { - pkg := obj.Pkg() - if alt := pkg.Scope().Insert(obj); alt != nil { - // This can only trigger if we import a (non-type) object a second time. - // Excluding type aliases, this cannot happen because 1) we only import a package - // once; and b) we ignore compiler-specific export data which may contain - // functions whose inlined function bodies refer to other functions that - // were already imported. - // However, type aliases require reexporting the original type, so we need - // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, - // method importer.obj, switch case importing functions). - // TODO(gri) review/update this comment once the gc compiler handles type aliases. - if !sameObj(obj, alt) { - errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) - } - } -} - -func (p *importer) obj(tag int) { - switch tag { - case constTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil) - val := p.value() - p.declare(types.NewConst(pos, pkg, name, typ, val)) - - case aliasTag: - // TODO(gri) verify type alias hookup is correct - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil) - p.declare(types.NewTypeName(pos, pkg, name, typ)) - - case typeTag: - p.typ(nil) - - case varTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil) - p.declare(types.NewVar(pos, pkg, name, typ)) - - case funcTag: - pos := p.pos() - pkg, name := p.qualifiedName() - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(nil, params, result, isddd) - p.declare(types.NewFunc(pos, pkg, name, sig)) - - default: - errorf("unexpected object tag %d", tag) - } -} - -const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go - -func (p *importer) pos() token.Pos { - if !p.posInfoFormat { - return token.NoPos - } - - file := p.prevFile - line := p.prevLine - delta := p.int() - line += delta - if p.version >= 5 { - if delta == deltaNewFile { - if n := p.int(); n >= 0 { - // file changed - file = p.path() - line = n - } - } - } else { - if delta == 0 { - if n := p.int(); n >= 0 { - // file changed - file = p.prevFile[:n] + p.string() - line = p.int() - } - } - } - p.prevFile = file - p.prevLine = line - - // Synthesize a token.Pos - - // Since we don't know the set of needed file positions, we - // reserve maxlines positions per file. - const maxlines = 64 * 1024 - f := p.files[file] - if f == nil { - f = p.fset.AddFile(file, -1, maxlines) - p.files[file] = f - // Allocate the fake linebreak indices on first use. - // TODO(adonovan): opt: save ~512KB using a more complex scheme? - fakeLinesOnce.Do(func() { - fakeLines = make([]int, maxlines) - for i := range fakeLines { - fakeLines[i] = i - } - }) - f.SetLines(fakeLines) - } - - if line > maxlines { - line = 1 - } - - // Treat the file as if it contained only newlines - // and column=1: use the line number as the offset. - return f.Pos(line - 1) -} - -var ( - fakeLines []int - fakeLinesOnce sync.Once -) - -func (p *importer) qualifiedName() (pkg *types.Package, name string) { - name = p.string() - pkg = p.pkg() - return -} - -func (p *importer) record(t types.Type) { - p.typList = append(p.typList, t) -} - -// A dddSlice is a types.Type representing ...T parameters. -// It only appears for parameter types and does not escape -// the importer. -type dddSlice struct { - elem types.Type -} - -func (t *dddSlice) Underlying() types.Type { return t } -func (t *dddSlice) String() string { return "..." + t.elem.String() } - -// parent is the package which declared the type; parent == nil means -// the package currently imported. The parent package is needed for -// exported struct fields and interface methods which don't contain -// explicit package information in the export data. -func (p *importer) typ(parent *types.Package) types.Type { - // if the type was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.typList[i] - } - - // otherwise, i is the type tag (< 0) - switch i { - case namedTag: - // read type object - pos := p.pos() - parent, name := p.qualifiedName() - scope := parent.Scope() - obj := scope.Lookup(name) - - // if the object doesn't exist yet, create and insert it - if obj == nil { - obj = types.NewTypeName(pos, parent, name, nil) - scope.Insert(obj) - } - - if _, ok := obj.(*types.TypeName); !ok { - errorf("pkg = %s, name = %s => %s", parent, name, obj) - } - - // associate new named type with obj if it doesn't exist yet - t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) - - // but record the existing type, if any - t := obj.Type().(*types.Named) - p.record(t) - - // read underlying type - t0.SetUnderlying(p.typ(parent)) - - // interfaces don't have associated methods - if types.IsInterface(t0) { - return t - } - - // read associated methods - for i := p.int(); i > 0; i-- { - // TODO(gri) replace this with something closer to fieldName - pos := p.pos() - name := p.string() - if !exported(name) { - p.pkg() - } - - recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? - params, isddd := p.paramList() - result, _ := p.paramList() - p.int() // go:nointerface pragma - discarded - - sig := types.NewSignature(recv.At(0), params, result, isddd) - t0.AddMethod(types.NewFunc(pos, parent, name, sig)) - } - - return t - - case arrayTag: - t := new(types.Array) - if p.trackAllTypes { - p.record(t) - } - - n := p.int64() - *t = *types.NewArray(p.typ(parent), n) - return t - - case sliceTag: - t := new(types.Slice) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewSlice(p.typ(parent)) - return t - - case dddTag: - t := new(dddSlice) - if p.trackAllTypes { - p.record(t) - } - - t.elem = p.typ(parent) - return t - - case structTag: - t := new(types.Struct) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewStruct(p.fieldList(parent)) - return t - - case pointerTag: - t := new(types.Pointer) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewPointer(p.typ(parent)) - return t - - case signatureTag: - t := new(types.Signature) - if p.trackAllTypes { - p.record(t) - } - - params, isddd := p.paramList() - result, _ := p.paramList() - *t = *types.NewSignature(nil, params, result, isddd) - return t - - case interfaceTag: - // Create a dummy entry in the type list. This is safe because we - // cannot expect the interface type to appear in a cycle, as any - // such cycle must contain a named type which would have been - // first defined earlier. - n := len(p.typList) - if p.trackAllTypes { - p.record(nil) - } - - var embeddeds []*types.Named - for n := p.int(); n > 0; n-- { - p.pos() - embeddeds = append(embeddeds, p.typ(parent).(*types.Named)) - } - - t := types.NewInterface(p.methodList(parent), embeddeds) - p.interfaceList = append(p.interfaceList, t) - if p.trackAllTypes { - p.typList[n] = t - } - return t - - case mapTag: - t := new(types.Map) - if p.trackAllTypes { - p.record(t) - } - - key := p.typ(parent) - val := p.typ(parent) - *t = *types.NewMap(key, val) - return t - - case chanTag: - t := new(types.Chan) - if p.trackAllTypes { - p.record(t) - } - - var dir types.ChanDir - // tag values must match the constants in cmd/compile/internal/gc/go.go - switch d := p.int(); d { - case 1 /* Crecv */ : - dir = types.RecvOnly - case 2 /* Csend */ : - dir = types.SendOnly - case 3 /* Cboth */ : - dir = types.SendRecv - default: - errorf("unexpected channel dir %d", d) - } - val := p.typ(parent) - *t = *types.NewChan(dir, val) - return t - - default: - errorf("unexpected type tag %d", i) // panics - panic("unreachable") - } -} - -func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { - if n := p.int(); n > 0 { - fields = make([]*types.Var, n) - tags = make([]string, n) - for i := range fields { - fields[i], tags[i] = p.field(parent) - } - } - return -} - -func (p *importer) field(parent *types.Package) (*types.Var, string) { - pos := p.pos() - pkg, name, alias := p.fieldName(parent) - typ := p.typ(parent) - tag := p.string() - - anonymous := false - if name == "" { - // anonymous field - typ must be T or *T and T must be a type name - switch typ := deref(typ).(type) { - case *types.Basic: // basic types are named types - pkg = nil // // objects defined in Universe scope have no package - name = typ.Name() - case *types.Named: - name = typ.Obj().Name() - default: - errorf("named base type expected") - } - anonymous = true - } else if alias { - // anonymous field: we have an explicit name because it's an alias - anonymous = true - } - - return types.NewField(pos, pkg, name, typ, anonymous), tag -} - -func (p *importer) methodList(parent *types.Package) (methods []*types.Func) { - if n := p.int(); n > 0 { - methods = make([]*types.Func, n) - for i := range methods { - methods[i] = p.method(parent) - } - } - return -} - -func (p *importer) method(parent *types.Package) *types.Func { - pos := p.pos() - pkg, name, _ := p.fieldName(parent) - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(nil, params, result, isddd) - return types.NewFunc(pos, pkg, name, sig) -} - -func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { - name = p.string() - pkg = parent - if pkg == nil { - // use the imported package instead - pkg = p.pkgList[0] - } - if p.version == 0 && name == "_" { - // version 0 didn't export a package for _ fields - return - } - switch name { - case "": - // 1) field name matches base type name and is exported: nothing to do - case "?": - // 2) field name matches base type name and is not exported: need package - name = "" - pkg = p.pkg() - case "@": - // 3) field name doesn't match type name (alias) - name = p.string() - alias = true - fallthrough - default: - if !exported(name) { - pkg = p.pkg() - } - } - return -} - -func (p *importer) paramList() (*types.Tuple, bool) { - n := p.int() - if n == 0 { - return nil, false - } - // negative length indicates unnamed parameters - named := true - if n < 0 { - n = -n - named = false - } - // n > 0 - params := make([]*types.Var, n) - isddd := false - for i := range params { - params[i], isddd = p.param(named) - } - return types.NewTuple(params...), isddd -} - -func (p *importer) param(named bool) (*types.Var, bool) { - t := p.typ(nil) - td, isddd := t.(*dddSlice) - if isddd { - t = types.NewSlice(td.elem) - } - - var pkg *types.Package - var name string - if named { - name = p.string() - if name == "" { - errorf("expected named parameter") - } - if name != "_" { - pkg = p.pkg() - } - if i := strings.Index(name, "·"); i > 0 { - name = name[:i] // cut off gc-specific parameter numbering - } - } - - // read and discard compiler-specific info - p.string() - - return types.NewVar(token.NoPos, pkg, name, t), isddd -} - -func exported(name string) bool { - ch, _ := utf8.DecodeRuneInString(name) - return unicode.IsUpper(ch) -} - -func (p *importer) value() constant.Value { - switch tag := p.tagOrIndex(); tag { - case falseTag: - return constant.MakeBool(false) - case trueTag: - return constant.MakeBool(true) - case int64Tag: - return constant.MakeInt64(p.int64()) - case floatTag: - return p.float() - case complexTag: - re := p.float() - im := p.float() - return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - case stringTag: - return constant.MakeString(p.string()) - case unknownTag: - return constant.MakeUnknown() - default: - errorf("unexpected value tag %d", tag) // panics - panic("unreachable") - } -} - -func (p *importer) float() constant.Value { - sign := p.int() - if sign == 0 { - return constant.MakeInt64(0) - } - - exp := p.int() - mant := []byte(p.string()) // big endian - - // remove leading 0's if any - for len(mant) > 0 && mant[0] == 0 { - mant = mant[1:] - } - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { - mant[i], mant[j] = mant[j], mant[i] - } - - // adjust exponent (constant.MakeFromBytes creates an integer value, - // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) - exp -= len(mant) << 3 - if len(mant) > 0 { - for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { - exp++ - } - } - - x := constant.MakeFromBytes(mant) - switch { - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - } - - if sign < 0 { - x = constant.UnaryOp(token.SUB, x, 0) - } - return x -} - -// ---------------------------------------------------------------------------- -// Low-level decoders - -func (p *importer) tagOrIndex() int { - if p.debugFormat { - p.marker('t') - } - - return int(p.rawInt64()) -} - -func (p *importer) int() int { - x := p.int64() - if int64(int(x)) != x { - errorf("exported integer too large") - } - return int(x) -} - -func (p *importer) int64() int64 { - if p.debugFormat { - p.marker('i') - } - - return p.rawInt64() -} - -func (p *importer) path() string { - if p.debugFormat { - p.marker('p') - } - // if the path was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.pathList[i] - } - // otherwise, i is the negative path length (< 0) - a := make([]string, -i) - for n := range a { - a[n] = p.string() - } - s := strings.Join(a, "/") - p.pathList = append(p.pathList, s) - return s -} - -func (p *importer) string() string { - if p.debugFormat { - p.marker('s') - } - // if the string was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.strList[i] - } - // otherwise, i is the negative string length (< 0) - if n := int(-i); n <= cap(p.buf) { - p.buf = p.buf[:n] - } else { - p.buf = make([]byte, n) - } - for i := range p.buf { - p.buf[i] = p.rawByte() - } - s := string(p.buf) - p.strList = append(p.strList, s) - return s -} - -func (p *importer) marker(want byte) { - if got := p.rawByte(); got != want { - errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) - } - - pos := p.read - if n := int(p.rawInt64()); n != pos { - errorf("incorrect position: got %d; want %d", n, pos) - } -} - -// rawInt64 should only be used by low-level decoders. -func (p *importer) rawInt64() int64 { - i, err := binary.ReadVarint(p) - if err != nil { - errorf("read error: %v", err) - } - return i -} - -// rawStringln should only be used to read the initial version string. -func (p *importer) rawStringln(b byte) string { - p.buf = p.buf[:0] - for b != '\n' { - p.buf = append(p.buf, b) - b = p.rawByte() - } - return string(p.buf) -} - -// needed for binary.ReadVarint in rawInt64 -func (p *importer) ReadByte() (byte, error) { - return p.rawByte(), nil -} - -// byte is the bottleneck interface for reading p.data. -// It unescapes '|' 'S' to '$' and '|' '|' to '|'. -// rawByte should only be used by low-level decoders. -func (p *importer) rawByte() byte { - b := p.data[0] - r := 1 - if b == '|' { - b = p.data[1] - r = 2 - switch b { - case 'S': - b = '$' - case '|': - // nothing to do - default: - errorf("unexpected escape sequence in export data") - } - } - p.data = p.data[r:] - p.read += r - return b - -} - -// ---------------------------------------------------------------------------- -// Export format - -// Tags. Must be < 0. -const ( - // Objects - packageTag = -(iota + 1) - constTag - typeTag - varTag - funcTag - endTag - - // Types - namedTag - arrayTag - sliceTag - dddTag - structTag - pointerTag - signatureTag - interfaceTag - mapTag - chanTag - - // Values - falseTag - trueTag - int64Tag - floatTag - fractionTag // not used by gc - complexTag - stringTag - nilTag // only used by gc (appears in exported inlined function bodies) - unknownTag // not used by gc (only appears in packages with errors) - - // Type aliases - aliasTag -) - -var predeclared = []types.Type{ - // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/exportdata.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/exportdata.go deleted file mode 100644 index f33dc5613e71..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/exportdata.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. - -// This file implements FindExportData. - -package gcimporter - -import ( - "bufio" - "fmt" - "io" - "strconv" - "strings" -) - -func readGopackHeader(r *bufio.Reader) (name string, size int, err error) { - // See $GOROOT/include/ar.h. - hdr := make([]byte, 16+12+6+6+8+10+2) - _, err = io.ReadFull(r, hdr) - if err != nil { - return - } - // leave for debugging - if false { - fmt.Printf("header: %s", hdr) - } - s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) - size, err = strconv.Atoi(s) - if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { - err = fmt.Errorf("invalid archive header") - return - } - name = strings.TrimSpace(string(hdr[:16])) - return -} - -// FindExportData positions the reader r at the beginning of the -// export data section of an underlying GC-created object/archive -// file by reading from it. The reader must be positioned at the -// start of the file before calling this function. The hdr result -// is the string before the export data, either "$$" or "$$B". -// -func FindExportData(r *bufio.Reader) (hdr string, err error) { - // Read first line to make sure this is an object file. - line, err := r.ReadSlice('\n') - if err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - - if string(line) == "!\n" { - // Archive file. Scan to __.PKGDEF. - var name string - if name, _, err = readGopackHeader(r); err != nil { - return - } - - // First entry should be __.PKGDEF. - if name != "__.PKGDEF" { - err = fmt.Errorf("go archive is missing __.PKGDEF") - return - } - - // Read first line of __.PKGDEF data, so that line - // is once again the first line of the input. - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - } - - // Now at __.PKGDEF in archive or still at beginning of file. - // Either way, line should begin with "go object ". - if !strings.HasPrefix(string(line), "go object ") { - err = fmt.Errorf("not a Go object file") - return - } - - // Skip over object header to export data. - // Begins after first line starting with $$. - for line[0] != '$' { - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - } - hdr = string(line) - - return -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/gcimporter.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/gcimporter.go deleted file mode 100644 index 370203b9d928..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/gcimporter.go +++ /dev/null @@ -1,1041 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go, -// but it also contains the original source-based importer code for Go1.6. -// Once we stop supporting 1.6, we can remove that code. - -// Package gcimporter15 provides various functions for reading -// gc-generated object files that can be used to implement the -// Importer interface defined by the Go 1.5 standard library package. -// -// Deprecated: this package will be deleted in October 2017. -// New code should use golang.org/x/tools/go/gcexportdata. -// -package gcimporter - -import ( - "bufio" - "errors" - "fmt" - "go/build" - exact "go/constant" - "go/token" - "go/types" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "text/scanner" -) - -// debugging/development support -const debug = false - -var pkgExts = [...]string{".a", ".o"} - -// FindPkg returns the filename and unique package id for an import -// path based on package information provided by build.Import (using -// the build.Default build.Context). A relative srcDir is interpreted -// relative to the current working directory. -// If no file was found, an empty filename is returned. -// -func FindPkg(path, srcDir string) (filename, id string) { - if path == "" { - return - } - - var noext string - switch { - default: - // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" - // Don't require the source files to be present. - if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 - srcDir = abs - } - bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) - if bp.PkgObj == "" { - return - } - noext = strings.TrimSuffix(bp.PkgObj, ".a") - id = bp.ImportPath - - case build.IsLocalImport(path): - // "./x" -> "/this/directory/x.ext", "/this/directory/x" - noext = filepath.Join(srcDir, path) - id = noext - - case filepath.IsAbs(path): - // for completeness only - go/build.Import - // does not support absolute imports - // "/x" -> "/x.ext", "/x" - noext = path - id = path - } - - if false { // for debugging - if path != id { - fmt.Printf("%s -> %s\n", path, id) - } - } - - // try extensions - for _, ext := range pkgExts { - filename = noext + ext - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - filename = "" // not found - return -} - -// ImportData imports a package by reading the gc-generated export data, -// adds the corresponding package object to the packages map indexed by id, -// and returns the object. -// -// The packages map must contains all packages already imported. The data -// reader position must be the beginning of the export data section. The -// filename is only used in error messages. -// -// If packages[id] contains the completely imported package, that package -// can be used directly, and there is no need to call this function (but -// there is also no harm but for extra time used). -// -func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) { - // support for parser error handling - defer func() { - switch r := recover().(type) { - case nil: - // nothing to do - case importError: - err = r - default: - panic(r) // internal error - } - }() - - var p parser - p.init(filename, id, data, packages) - pkg = p.parseExport() - - return -} - -// Import imports a gc-generated package given its import path and srcDir, adds -// the corresponding package object to the packages map, and returns the object. -// The packages map must contain all packages already imported. -// -func Import(packages map[string]*types.Package, path, srcDir string) (pkg *types.Package, err error) { - filename, id := FindPkg(path, srcDir) - if filename == "" { - if path == "unsafe" { - return types.Unsafe, nil - } - err = fmt.Errorf("can't find import: %s", id) - return - } - - // no need to re-import if the package was imported completely before - if pkg = packages[id]; pkg != nil && pkg.Complete() { - return - } - - // open file - f, err := os.Open(filename) - if err != nil { - return - } - defer func() { - f.Close() - if err != nil { - // add file name to error - err = fmt.Errorf("reading export data: %s: %v", filename, err) - } - }() - - var hdr string - buf := bufio.NewReader(f) - if hdr, err = FindExportData(buf); err != nil { - return - } - - switch hdr { - case "$$\n": - return ImportData(packages, filename, id, buf) - case "$$B\n": - var data []byte - data, err = ioutil.ReadAll(buf) - if err == nil { - fset := token.NewFileSet() - _, pkg, err = BImportData(fset, packages, data, id) - return - } - default: - err = fmt.Errorf("unknown export data header: %q", hdr) - } - - return -} - -// ---------------------------------------------------------------------------- -// Parser - -// TODO(gri) Imported objects don't have position information. -// Ideally use the debug table line info; alternatively -// create some fake position (or the position of the -// import). That way error messages referring to imported -// objects can print meaningful information. - -// parser parses the exports inside a gc compiler-produced -// object/archive file and populates its scope with the results. -type parser struct { - scanner scanner.Scanner - tok rune // current token - lit string // literal string; only valid for Ident, Int, String tokens - id string // package id of imported package - sharedPkgs map[string]*types.Package // package id -> package object (across importer) - localPkgs map[string]*types.Package // package id -> package object (just this package) -} - -func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) { - p.scanner.Init(src) - p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) } - p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments - p.scanner.Whitespace = 1<<'\t' | 1<<' ' - p.scanner.Filename = filename // for good error messages - p.next() - p.id = id - p.sharedPkgs = packages - if debug { - // check consistency of packages map - for _, pkg := range packages { - if pkg.Name() == "" { - fmt.Printf("no package name for %s\n", pkg.Path()) - } - } - } -} - -func (p *parser) next() { - p.tok = p.scanner.Scan() - switch p.tok { - case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·': - p.lit = p.scanner.TokenText() - default: - p.lit = "" - } - if debug { - fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit) - } -} - -func declTypeName(pkg *types.Package, name string) *types.TypeName { - scope := pkg.Scope() - if obj := scope.Lookup(name); obj != nil { - return obj.(*types.TypeName) - } - obj := types.NewTypeName(token.NoPos, pkg, name, nil) - // a named type may be referred to before the underlying type - // is known - set it up - types.NewNamed(obj, nil, nil) - scope.Insert(obj) - return obj -} - -// ---------------------------------------------------------------------------- -// Error handling - -// Internal errors are boxed as importErrors. -type importError struct { - pos scanner.Position - err error -} - -func (e importError) Error() string { - return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err) -} - -func (p *parser) error(err interface{}) { - if s, ok := err.(string); ok { - err = errors.New(s) - } - // panic with a runtime.Error if err is not an error - panic(importError{p.scanner.Pos(), err.(error)}) -} - -func (p *parser) errorf(format string, args ...interface{}) { - p.error(fmt.Sprintf(format, args...)) -} - -func (p *parser) expect(tok rune) string { - lit := p.lit - if p.tok != tok { - p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit) - } - p.next() - return lit -} - -func (p *parser) expectSpecial(tok string) { - sep := 'x' // not white space - i := 0 - for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' { - sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token - p.next() - i++ - } - if i < len(tok) { - p.errorf("expected %q, got %q", tok, tok[0:i]) - } -} - -func (p *parser) expectKeyword(keyword string) { - lit := p.expect(scanner.Ident) - if lit != keyword { - p.errorf("expected keyword %s, got %q", keyword, lit) - } -} - -// ---------------------------------------------------------------------------- -// Qualified and unqualified names - -// PackageId = string_lit . -// -func (p *parser) parsePackageId() string { - id, err := strconv.Unquote(p.expect(scanner.String)) - if err != nil { - p.error(err) - } - // id == "" stands for the imported package id - // (only known at time of package installation) - if id == "" { - id = p.id - } - return id -} - -// PackageName = ident . -// -func (p *parser) parsePackageName() string { - return p.expect(scanner.Ident) -} - -// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . -func (p *parser) parseDotIdent() string { - ident := "" - if p.tok != scanner.Int { - sep := 'x' // not white space - for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' { - ident += p.lit - sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token - p.next() - } - } - if ident == "" { - p.expect(scanner.Ident) // use expect() for error handling - } - return ident -} - -// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . -// -func (p *parser) parseQualifiedName() (id, name string) { - p.expect('@') - id = p.parsePackageId() - p.expect('.') - // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. - if p.tok == '?' { - p.next() - } else { - name = p.parseDotIdent() - } - return -} - -// getPkg returns the package for a given id. If the package is -// not found, create the package and add it to the p.localPkgs -// and p.sharedPkgs maps. name is the (expected) name of the -// package. If name == "", the package name is expected to be -// set later via an import clause in the export data. -// -// id identifies a package, usually by a canonical package path like -// "encoding/json" but possibly by a non-canonical import path like -// "./json". -// -func (p *parser) getPkg(id, name string) *types.Package { - // package unsafe is not in the packages maps - handle explicitly - if id == "unsafe" { - return types.Unsafe - } - - pkg := p.localPkgs[id] - if pkg == nil { - // first import of id from this package - pkg = p.sharedPkgs[id] - if pkg == nil { - // first import of id by this importer; - // add (possibly unnamed) pkg to shared packages - pkg = types.NewPackage(id, name) - p.sharedPkgs[id] = pkg - } - // add (possibly unnamed) pkg to local packages - if p.localPkgs == nil { - p.localPkgs = make(map[string]*types.Package) - } - p.localPkgs[id] = pkg - } else if name != "" { - // package exists already and we have an expected package name; - // make sure names match or set package name if necessary - if pname := pkg.Name(); pname == "" { - pkg.SetName(name) - } else if pname != name { - p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name) - } - } - return pkg -} - -// parseExportedName is like parseQualifiedName, but -// the package id is resolved to an imported *types.Package. -// -func (p *parser) parseExportedName() (pkg *types.Package, name string) { - id, name := p.parseQualifiedName() - pkg = p.getPkg(id, "") - return -} - -// ---------------------------------------------------------------------------- -// Types - -// BasicType = identifier . -// -func (p *parser) parseBasicType() types.Type { - id := p.expect(scanner.Ident) - obj := types.Universe.Lookup(id) - if obj, ok := obj.(*types.TypeName); ok { - return obj.Type() - } - p.errorf("not a basic type: %s", id) - return nil -} - -// ArrayType = "[" int_lit "]" Type . -// -func (p *parser) parseArrayType(parent *types.Package) types.Type { - // "[" already consumed and lookahead known not to be "]" - lit := p.expect(scanner.Int) - p.expect(']') - elem := p.parseType(parent) - n, err := strconv.ParseInt(lit, 10, 64) - if err != nil { - p.error(err) - } - return types.NewArray(elem, n) -} - -// MapType = "map" "[" Type "]" Type . -// -func (p *parser) parseMapType(parent *types.Package) types.Type { - p.expectKeyword("map") - p.expect('[') - key := p.parseType(parent) - p.expect(']') - elem := p.parseType(parent) - return types.NewMap(key, elem) -} - -// Name = identifier | "?" | QualifiedName . -// -// For unqualified and anonymous names, the returned package is the parent -// package unless parent == nil, in which case the returned package is the -// package being imported. (The parent package is not nil if the the name -// is an unqualified struct field or interface method name belonging to a -// type declared in another package.) -// -// For qualified names, the returned package is nil (and not created if -// it doesn't exist yet) unless materializePkg is set (which creates an -// unnamed package with valid package path). In the latter case, a -// subsequent import clause is expected to provide a name for the package. -// -func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) { - pkg = parent - if pkg == nil { - pkg = p.sharedPkgs[p.id] - } - switch p.tok { - case scanner.Ident: - name = p.lit - p.next() - case '?': - // anonymous - p.next() - case '@': - // exported name prefixed with package path - pkg = nil - var id string - id, name = p.parseQualifiedName() - if materializePkg { - pkg = p.getPkg(id, "") - } - default: - p.error("name expected") - } - return -} - -func deref(typ types.Type) types.Type { - if p, _ := typ.(*types.Pointer); p != nil { - return p.Elem() - } - return typ -} - -// Field = Name Type [ string_lit ] . -// -func (p *parser) parseField(parent *types.Package) (*types.Var, string) { - pkg, name := p.parseName(parent, true) - - if name == "_" { - // Blank fields should be package-qualified because they - // are unexported identifiers, but gc does not qualify them. - // Assuming that the ident belongs to the current package - // causes types to change during re-exporting, leading - // to spurious "can't assign A to B" errors from go/types. - // As a workaround, pretend all blank fields belong - // to the same unique dummy package. - const blankpkg = "<_>" - pkg = p.getPkg(blankpkg, blankpkg) - } - - typ := p.parseType(parent) - anonymous := false - if name == "" { - // anonymous field - typ must be T or *T and T must be a type name - switch typ := deref(typ).(type) { - case *types.Basic: // basic types are named types - pkg = nil // objects defined in Universe scope have no package - name = typ.Name() - case *types.Named: - name = typ.Obj().Name() - default: - p.errorf("anonymous field expected") - } - anonymous = true - } - tag := "" - if p.tok == scanner.String { - s := p.expect(scanner.String) - var err error - tag, err = strconv.Unquote(s) - if err != nil { - p.errorf("invalid struct tag %s: %s", s, err) - } - } - return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag -} - -// StructType = "struct" "{" [ FieldList ] "}" . -// FieldList = Field { ";" Field } . -// -func (p *parser) parseStructType(parent *types.Package) types.Type { - var fields []*types.Var - var tags []string - - p.expectKeyword("struct") - p.expect('{') - for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { - if i > 0 { - p.expect(';') - } - fld, tag := p.parseField(parent) - if tag != "" && tags == nil { - tags = make([]string, i) - } - if tags != nil { - tags = append(tags, tag) - } - fields = append(fields, fld) - } - p.expect('}') - - return types.NewStruct(fields, tags) -} - -// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . -// -func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { - _, name := p.parseName(nil, false) - // remove gc-specific parameter numbering - if i := strings.Index(name, "·"); i >= 0 { - name = name[:i] - } - if p.tok == '.' { - p.expectSpecial("...") - isVariadic = true - } - typ := p.parseType(nil) - if isVariadic { - typ = types.NewSlice(typ) - } - // ignore argument tag (e.g. "noescape") - if p.tok == scanner.String { - p.next() - } - // TODO(gri) should we provide a package? - par = types.NewVar(token.NoPos, nil, name, typ) - return -} - -// Parameters = "(" [ ParameterList ] ")" . -// ParameterList = { Parameter "," } Parameter . -// -func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { - p.expect('(') - for p.tok != ')' && p.tok != scanner.EOF { - if len(list) > 0 { - p.expect(',') - } - par, variadic := p.parseParameter() - list = append(list, par) - if variadic { - if isVariadic { - p.error("... not on final argument") - } - isVariadic = true - } - } - p.expect(')') - - return -} - -// Signature = Parameters [ Result ] . -// Result = Type | Parameters . -// -func (p *parser) parseSignature(recv *types.Var) *types.Signature { - params, isVariadic := p.parseParameters() - - // optional result type - var results []*types.Var - if p.tok == '(' { - var variadic bool - results, variadic = p.parseParameters() - if variadic { - p.error("... not permitted on result type") - } - } - - return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic) -} - -// InterfaceType = "interface" "{" [ MethodList ] "}" . -// MethodList = Method { ";" Method } . -// Method = Name Signature . -// -// The methods of embedded interfaces are always "inlined" -// by the compiler and thus embedded interfaces are never -// visible in the export data. -// -func (p *parser) parseInterfaceType(parent *types.Package) types.Type { - var methods []*types.Func - - p.expectKeyword("interface") - p.expect('{') - for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { - if i > 0 { - p.expect(';') - } - pkg, name := p.parseName(parent, true) - sig := p.parseSignature(nil) - methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig)) - } - p.expect('}') - - // Complete requires the type's embedded interfaces to be fully defined, - // but we do not define any - return types.NewInterface(methods, nil).Complete() -} - -// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . -// -func (p *parser) parseChanType(parent *types.Package) types.Type { - dir := types.SendRecv - if p.tok == scanner.Ident { - p.expectKeyword("chan") - if p.tok == '<' { - p.expectSpecial("<-") - dir = types.SendOnly - } - } else { - p.expectSpecial("<-") - p.expectKeyword("chan") - dir = types.RecvOnly - } - elem := p.parseType(parent) - return types.NewChan(dir, elem) -} - -// Type = -// BasicType | TypeName | ArrayType | SliceType | StructType | -// PointerType | FuncType | InterfaceType | MapType | ChanType | -// "(" Type ")" . -// -// BasicType = ident . -// TypeName = ExportedName . -// SliceType = "[" "]" Type . -// PointerType = "*" Type . -// FuncType = "func" Signature . -// -func (p *parser) parseType(parent *types.Package) types.Type { - switch p.tok { - case scanner.Ident: - switch p.lit { - default: - return p.parseBasicType() - case "struct": - return p.parseStructType(parent) - case "func": - // FuncType - p.next() - return p.parseSignature(nil) - case "interface": - return p.parseInterfaceType(parent) - case "map": - return p.parseMapType(parent) - case "chan": - return p.parseChanType(parent) - } - case '@': - // TypeName - pkg, name := p.parseExportedName() - return declTypeName(pkg, name).Type() - case '[': - p.next() // look ahead - if p.tok == ']' { - // SliceType - p.next() - return types.NewSlice(p.parseType(parent)) - } - return p.parseArrayType(parent) - case '*': - // PointerType - p.next() - return types.NewPointer(p.parseType(parent)) - case '<': - return p.parseChanType(parent) - case '(': - // "(" Type ")" - p.next() - typ := p.parseType(parent) - p.expect(')') - return typ - } - p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit) - return nil -} - -// ---------------------------------------------------------------------------- -// Declarations - -// ImportDecl = "import" PackageName PackageId . -// -func (p *parser) parseImportDecl() { - p.expectKeyword("import") - name := p.parsePackageName() - p.getPkg(p.parsePackageId(), name) -} - -// int_lit = [ "+" | "-" ] { "0" ... "9" } . -// -func (p *parser) parseInt() string { - s := "" - switch p.tok { - case '-': - s = "-" - p.next() - case '+': - p.next() - } - return s + p.expect(scanner.Int) -} - -// number = int_lit [ "p" int_lit ] . -// -func (p *parser) parseNumber() (typ *types.Basic, val exact.Value) { - // mantissa - mant := exact.MakeFromLiteral(p.parseInt(), token.INT, 0) - if mant == nil { - panic("invalid mantissa") - } - - if p.lit == "p" { - // exponent (base 2) - p.next() - exp, err := strconv.ParseInt(p.parseInt(), 10, 0) - if err != nil { - p.error(err) - } - if exp < 0 { - denom := exact.MakeInt64(1) - denom = exact.Shift(denom, token.SHL, uint(-exp)) - typ = types.Typ[types.UntypedFloat] - val = exact.BinaryOp(mant, token.QUO, denom) - return - } - if exp > 0 { - mant = exact.Shift(mant, token.SHL, uint(exp)) - } - typ = types.Typ[types.UntypedFloat] - val = mant - return - } - - typ = types.Typ[types.UntypedInt] - val = mant - return -} - -// ConstDecl = "const" ExportedName [ Type ] "=" Literal . -// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . -// bool_lit = "true" | "false" . -// complex_lit = "(" float_lit "+" float_lit "i" ")" . -// rune_lit = "(" int_lit "+" int_lit ")" . -// string_lit = `"` { unicode_char } `"` . -// -func (p *parser) parseConstDecl() { - p.expectKeyword("const") - pkg, name := p.parseExportedName() - - var typ0 types.Type - if p.tok != '=' { - // constant types are never structured - no need for parent type - typ0 = p.parseType(nil) - } - - p.expect('=') - var typ types.Type - var val exact.Value - switch p.tok { - case scanner.Ident: - // bool_lit - if p.lit != "true" && p.lit != "false" { - p.error("expected true or false") - } - typ = types.Typ[types.UntypedBool] - val = exact.MakeBool(p.lit == "true") - p.next() - - case '-', scanner.Int: - // int_lit - typ, val = p.parseNumber() - - case '(': - // complex_lit or rune_lit - p.next() - if p.tok == scanner.Char { - p.next() - p.expect('+') - typ = types.Typ[types.UntypedRune] - _, val = p.parseNumber() - p.expect(')') - break - } - _, re := p.parseNumber() - p.expect('+') - _, im := p.parseNumber() - p.expectKeyword("i") - p.expect(')') - typ = types.Typ[types.UntypedComplex] - val = exact.BinaryOp(re, token.ADD, exact.MakeImag(im)) - - case scanner.Char: - // rune_lit - typ = types.Typ[types.UntypedRune] - val = exact.MakeFromLiteral(p.lit, token.CHAR, 0) - p.next() - - case scanner.String: - // string_lit - typ = types.Typ[types.UntypedString] - val = exact.MakeFromLiteral(p.lit, token.STRING, 0) - p.next() - - default: - p.errorf("expected literal got %s", scanner.TokenString(p.tok)) - } - - if typ0 == nil { - typ0 = typ - } - - pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val)) -} - -// TypeDecl = "type" ExportedName Type . -// -func (p *parser) parseTypeDecl() { - p.expectKeyword("type") - pkg, name := p.parseExportedName() - obj := declTypeName(pkg, name) - - // The type object may have been imported before and thus already - // have a type associated with it. We still need to parse the type - // structure, but throw it away if the object already has a type. - // This ensures that all imports refer to the same type object for - // a given type declaration. - typ := p.parseType(pkg) - - if name := obj.Type().(*types.Named); name.Underlying() == nil { - name.SetUnderlying(typ) - } -} - -// VarDecl = "var" ExportedName Type . -// -func (p *parser) parseVarDecl() { - p.expectKeyword("var") - pkg, name := p.parseExportedName() - typ := p.parseType(pkg) - pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ)) -} - -// Func = Signature [ Body ] . -// Body = "{" ... "}" . -// -func (p *parser) parseFunc(recv *types.Var) *types.Signature { - sig := p.parseSignature(recv) - if p.tok == '{' { - p.next() - for i := 1; i > 0; p.next() { - switch p.tok { - case '{': - i++ - case '}': - i-- - } - } - } - return sig -} - -// MethodDecl = "func" Receiver Name Func . -// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . -// -func (p *parser) parseMethodDecl() { - // "func" already consumed - p.expect('(') - recv, _ := p.parseParameter() // receiver - p.expect(')') - - // determine receiver base type object - base := deref(recv.Type()).(*types.Named) - - // parse method name, signature, and possibly inlined body - _, name := p.parseName(nil, false) - sig := p.parseFunc(recv) - - // methods always belong to the same package as the base type object - pkg := base.Obj().Pkg() - - // add method to type unless type was imported before - // and method exists already - // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small. - base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) -} - -// FuncDecl = "func" ExportedName Func . -// -func (p *parser) parseFuncDecl() { - // "func" already consumed - pkg, name := p.parseExportedName() - typ := p.parseFunc(nil) - pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ)) -} - -// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . -// -func (p *parser) parseDecl() { - if p.tok == scanner.Ident { - switch p.lit { - case "import": - p.parseImportDecl() - case "const": - p.parseConstDecl() - case "type": - p.parseTypeDecl() - case "var": - p.parseVarDecl() - case "func": - p.next() // look ahead - if p.tok == '(' { - p.parseMethodDecl() - } else { - p.parseFuncDecl() - } - } - } - p.expect('\n') -} - -// ---------------------------------------------------------------------------- -// Export - -// Export = "PackageClause { Decl } "$$" . -// PackageClause = "package" PackageName [ "safe" ] "\n" . -// -func (p *parser) parseExport() *types.Package { - p.expectKeyword("package") - name := p.parsePackageName() - if p.tok == scanner.Ident && p.lit == "safe" { - // package was compiled with -u option - ignore - p.next() - } - p.expect('\n') - - pkg := p.getPkg(p.id, name) - - for p.tok != '$' && p.tok != scanner.EOF { - p.parseDecl() - } - - if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' { - // don't call next()/expect() since reading past the - // export data may cause scanner errors (e.g. NUL chars) - p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch) - } - - if n := p.scanner.ErrorCount; n != 0 { - p.errorf("expected no scanner errors, got %d", n) - } - - // Record all locally referenced packages as imports. - var imports []*types.Package - for id, pkg2 := range p.localPkgs { - if pkg2.Name() == "" { - p.errorf("%s package has no name", id) - } - if id == p.id { - continue // avoid self-edge - } - imports = append(imports, pkg2) - } - sort.Sort(byPath(imports)) - pkg.SetImports(imports) - - // package was imported completely and without errors - pkg.MarkComplete() - - return pkg -} - -type byPath []*types.Package - -func (a byPath) Len() int { return len(a) } -func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/isAlias18.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/isAlias18.go deleted file mode 100644 index 225ffeedfa4a..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/isAlias18.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package gcimporter - -import "go/types" - -func isAlias(obj *types.TypeName) bool { - return false // there are no type aliases before Go 1.9 -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/isAlias19.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/isAlias19.go deleted file mode 100644 index c2025d84a952..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/gcimporter15/isAlias19.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package gcimporter - -import "go/types" - -func isAlias(obj *types.TypeName) bool { - return obj.IsAlias() -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/loader/cgo.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/loader/cgo.go deleted file mode 100644 index 72c6f502d193..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/loader/cgo.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package loader - -// This file handles cgo preprocessing of files containing `import "C"`. -// -// DESIGN -// -// The approach taken is to run the cgo processor on the package's -// CgoFiles and parse the output, faking the filenames of the -// resulting ASTs so that the synthetic file containing the C types is -// called "C" (e.g. "~/go/src/net/C") and the preprocessed files -// have their original names (e.g. "~/go/src/net/cgo_unix.go"), -// not the names of the actual temporary files. -// -// The advantage of this approach is its fidelity to 'go build'. The -// downside is that the token.Position.Offset for each AST node is -// incorrect, being an offset within the temporary file. Line numbers -// should still be correct because of the //line comments. -// -// The logic of this file is mostly plundered from the 'go build' -// tool, which also invokes the cgo preprocessor. -// -// -// REJECTED ALTERNATIVE -// -// An alternative approach that we explored is to extend go/types' -// Importer mechanism to provide the identity of the importing package -// so that each time `import "C"` appears it resolves to a different -// synthetic package containing just the objects needed in that case. -// The loader would invoke cgo but parse only the cgo_types.go file -// defining the package-level objects, discarding the other files -// resulting from preprocessing. -// -// The benefit of this approach would have been that source-level -// syntax information would correspond exactly to the original cgo -// file, with no preprocessing involved, making source tools like -// godoc, guru, and eg happy. However, the approach was rejected -// due to the additional complexity it would impose on go/types. (It -// made for a beautiful demo, though.) -// -// cgo files, despite their *.go extension, are not legal Go source -// files per the specification since they may refer to unexported -// members of package "C" such as C.int. Also, a function such as -// C.getpwent has in effect two types, one matching its C type and one -// which additionally returns (errno C.int). The cgo preprocessor -// uses name mangling to distinguish these two functions in the -// processed code, but go/types would need to duplicate this logic in -// its handling of function calls, analogous to the treatment of map -// lookups in which y=m[k] and y,ok=m[k] are both legal. - -import ( - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "regexp" - "strings" -) - -// processCgoFiles invokes the cgo preprocessor on bp.CgoFiles, parses -// the output and returns the resulting ASTs. -// -func processCgoFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) { - tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C") - if err != nil { - return nil, err - } - defer os.RemoveAll(tmpdir) - - pkgdir := bp.Dir - if DisplayPath != nil { - pkgdir = DisplayPath(pkgdir) - } - - cgoFiles, cgoDisplayFiles, err := runCgo(bp, pkgdir, tmpdir) - if err != nil { - return nil, err - } - var files []*ast.File - for i := range cgoFiles { - rd, err := os.Open(cgoFiles[i]) - if err != nil { - return nil, err - } - display := filepath.Join(bp.Dir, cgoDisplayFiles[i]) - f, err := parser.ParseFile(fset, display, rd, mode) - rd.Close() - if err != nil { - return nil, err - } - files = append(files, f) - } - return files, nil -} - -var cgoRe = regexp.MustCompile(`[/\\:]`) - -// runCgo invokes the cgo preprocessor on bp.CgoFiles and returns two -// lists of files: the resulting processed files (in temporary -// directory tmpdir) and the corresponding names of the unprocessed files. -// -// runCgo is adapted from (*builder).cgo in -// $GOROOT/src/cmd/go/build.go, but these features are unsupported: -// Objective C, CGOPKGPATH, CGO_FLAGS. -// -func runCgo(bp *build.Package, pkgdir, tmpdir string) (files, displayFiles []string, err error) { - cgoCPPFLAGS, _, _, _ := cflags(bp, true) - _, cgoexeCFLAGS, _, _ := cflags(bp, false) - - if len(bp.CgoPkgConfig) > 0 { - pcCFLAGS, err := pkgConfigFlags(bp) - if err != nil { - return nil, nil, err - } - cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...) - } - - // Allows including _cgo_export.h from .[ch] files in the package. - cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir) - - // _cgo_gotypes.go (displayed "C") contains the type definitions. - files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go")) - displayFiles = append(displayFiles, "C") - for _, fn := range bp.CgoFiles { - // "foo.cgo1.go" (displayed "foo.go") is the processed Go source. - f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_") - files = append(files, filepath.Join(tmpdir, f+"cgo1.go")) - displayFiles = append(displayFiles, fn) - } - - var cgoflags []string - if bp.Goroot && bp.ImportPath == "runtime/cgo" { - cgoflags = append(cgoflags, "-import_runtime_cgo=false") - } - if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" { - cgoflags = append(cgoflags, "-import_syscall=false") - } - - args := stringList( - "go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--", - cgoCPPFLAGS, cgoexeCFLAGS, bp.CgoFiles, - ) - if false { - log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir) - } - cmd := exec.Command(args[0], args[1:]...) - cmd.Dir = pkgdir - cmd.Stdout = os.Stderr - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err) - } - - return files, displayFiles, nil -} - -// -- unmodified from 'go build' --------------------------------------- - -// Return the flags to use when invoking the C or C++ compilers, or cgo. -func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) { - var defaults string - if def { - defaults = "-g -O2" - } - - cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS) - cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS) - cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS) - ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS) - return -} - -// envList returns the value of the given environment variable broken -// into fields, using the default value when the variable is empty. -func envList(key, def string) []string { - v := os.Getenv(key) - if v == "" { - v = def - } - return strings.Fields(v) -} - -// stringList's arguments should be a sequence of string or []string values. -// stringList flattens them into a single []string. -func stringList(args ...interface{}) []string { - var x []string - for _, arg := range args { - switch arg := arg.(type) { - case []string: - x = append(x, arg...) - case string: - x = append(x, arg) - default: - panic("stringList: invalid argument") - } - } - return x -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/loader/cgo_pkgconfig.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/loader/cgo_pkgconfig.go deleted file mode 100644 index de57422df228..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/loader/cgo_pkgconfig.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package loader - -import ( - "errors" - "fmt" - "go/build" - "os/exec" - "strings" -) - -// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints. -func pkgConfig(mode string, pkgs []string) (flags []string, err error) { - cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...) - out, err := cmd.CombinedOutput() - if err != nil { - s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err) - if len(out) > 0 { - s = fmt.Sprintf("%s: %s", s, out) - } - return nil, errors.New(s) - } - if len(out) > 0 { - flags = strings.Fields(string(out)) - } - return -} - -// pkgConfigFlags calls pkg-config if needed and returns the cflags -// needed to build the package. -func pkgConfigFlags(p *build.Package) (cflags []string, err error) { - if len(p.CgoPkgConfig) == 0 { - return nil, nil - } - return pkgConfig("--cflags", p.CgoPkgConfig) -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/loader/doc.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/loader/doc.go deleted file mode 100644 index 9b51c9ecde05..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/loader/doc.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package loader loads a complete Go program from source code, parsing -// and type-checking the initial packages plus their transitive closure -// of dependencies. The ASTs and the derived facts are retained for -// later use. -// -// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE. -// -// The package defines two primary types: Config, which specifies a -// set of initial packages to load and various other options; and -// Program, which is the result of successfully loading the packages -// specified by a configuration. -// -// The configuration can be set directly, but *Config provides various -// convenience methods to simplify the common cases, each of which can -// be called any number of times. Finally, these are followed by a -// call to Load() to actually load and type-check the program. -// -// var conf loader.Config -// -// // Use the command-line arguments to specify -// // a set of initial packages to load from source. -// // See FromArgsUsage for help. -// rest, err := conf.FromArgs(os.Args[1:], wantTests) -// -// // Parse the specified files and create an ad hoc package with path "foo". -// // All files must have the same 'package' declaration. -// conf.CreateFromFilenames("foo", "foo.go", "bar.go") -// -// // Create an ad hoc package with path "foo" from -// // the specified already-parsed files. -// // All ASTs must have the same 'package' declaration. -// conf.CreateFromFiles("foo", parsedFiles) -// -// // Add "runtime" to the set of packages to be loaded. -// conf.Import("runtime") -// -// // Adds "fmt" and "fmt_test" to the set of packages -// // to be loaded. "fmt" will include *_test.go files. -// conf.ImportWithTests("fmt") -// -// // Finally, load all the packages specified by the configuration. -// prog, err := conf.Load() -// -// See examples_test.go for examples of API usage. -// -// -// CONCEPTS AND TERMINOLOGY -// -// The WORKSPACE is the set of packages accessible to the loader. The -// workspace is defined by Config.Build, a *build.Context. The -// default context treats subdirectories of $GOROOT and $GOPATH as -// packages, but this behavior may be overridden. -// -// An AD HOC package is one specified as a set of source files on the -// command line. In the simplest case, it may consist of a single file -// such as $GOROOT/src/net/http/triv.go. -// -// EXTERNAL TEST packages are those comprised of a set of *_test.go -// files all with the same 'package foo_test' declaration, all in the -// same directory. (go/build.Package calls these files XTestFiles.) -// -// An IMPORTABLE package is one that can be referred to by some import -// spec. Every importable package is uniquely identified by its -// PACKAGE PATH or just PATH, a string such as "fmt", "encoding/json", -// or "cmd/vendor/golang.org/x/arch/x86/x86asm". A package path -// typically denotes a subdirectory of the workspace. -// -// An import declaration uses an IMPORT PATH to refer to a package. -// Most import declarations use the package path as the import path. -// -// Due to VENDORING (https://golang.org/s/go15vendor), the -// interpretation of an import path may depend on the directory in which -// it appears. To resolve an import path to a package path, go/build -// must search the enclosing directories for a subdirectory named -// "vendor". -// -// ad hoc packages and external test packages are NON-IMPORTABLE. The -// path of an ad hoc package is inferred from the package -// declarations of its files and is therefore not a unique package key. -// For example, Config.CreatePkgs may specify two initial ad hoc -// packages, both with path "main". -// -// An AUGMENTED package is an importable package P plus all the -// *_test.go files with same 'package foo' declaration as P. -// (go/build.Package calls these files TestFiles.) -// -// The INITIAL packages are those specified in the configuration. A -// DEPENDENCY is a package loaded to satisfy an import in an initial -// package or another dependency. -// -package loader - -// IMPLEMENTATION NOTES -// -// 'go test', in-package test files, and import cycles -// --------------------------------------------------- -// -// An external test package may depend upon members of the augmented -// package that are not in the unaugmented package, such as functions -// that expose internals. (See bufio/export_test.go for an example.) -// So, the loader must ensure that for each external test package -// it loads, it also augments the corresponding non-test package. -// -// The import graph over n unaugmented packages must be acyclic; the -// import graph over n-1 unaugmented packages plus one augmented -// package must also be acyclic. ('go test' relies on this.) But the -// import graph over n augmented packages may contain cycles. -// -// First, all the (unaugmented) non-test packages and their -// dependencies are imported in the usual way; the loader reports an -// error if it detects an import cycle. -// -// Then, each package P for which testing is desired is augmented by -// the list P' of its in-package test files, by calling -// (*types.Checker).Files. This arrangement ensures that P' may -// reference definitions within P, but P may not reference definitions -// within P'. Furthermore, P' may import any other package, including -// ones that depend upon P, without an import cycle error. -// -// Consider two packages A and B, both of which have lists of -// in-package test files we'll call A' and B', and which have the -// following import graph edges: -// B imports A -// B' imports A -// A' imports B -// This last edge would be expected to create an error were it not -// for the special type-checking discipline above. -// Cycles of size greater than two are possible. For example: -// compress/bzip2/bzip2_test.go (package bzip2) imports "io/ioutil" -// io/ioutil/tempfile_test.go (package ioutil) imports "regexp" -// regexp/exec_test.go (package regexp) imports "compress/bzip2" -// -// -// Concurrency -// ----------- -// -// Let us define the import dependency graph as follows. Each node is a -// list of files passed to (Checker).Files at once. Many of these lists -// are the production code of an importable Go package, so those nodes -// are labelled by the package's path. The remaining nodes are -// ad hoc packages and lists of in-package *_test.go files that augment -// an importable package; those nodes have no label. -// -// The edges of the graph represent import statements appearing within a -// file. An edge connects a node (a list of files) to the node it -// imports, which is importable and thus always labelled. -// -// Loading is controlled by this dependency graph. -// -// To reduce I/O latency, we start loading a package's dependencies -// asynchronously as soon as we've parsed its files and enumerated its -// imports (scanImports). This performs a preorder traversal of the -// import dependency graph. -// -// To exploit hardware parallelism, we type-check unrelated packages in -// parallel, where "unrelated" means not ordered by the partial order of -// the import dependency graph. -// -// We use a concurrency-safe non-blocking cache (importer.imported) to -// record the results of type-checking, whether success or failure. An -// entry is created in this cache by startLoad the first time the -// package is imported. The first goroutine to request an entry becomes -// responsible for completing the task and broadcasting completion to -// subsequent requestors, which block until then. -// -// Type checking occurs in (parallel) postorder: we cannot type-check a -// set of files until we have loaded and type-checked all of their -// immediate dependencies (and thus all of their transitive -// dependencies). If the input were guaranteed free of import cycles, -// this would be trivial: we could simply wait for completion of the -// dependencies and then invoke the typechecker. -// -// But as we saw in the 'go test' section above, some cycles in the -// import graph over packages are actually legal, so long as the -// cycle-forming edge originates in the in-package test files that -// augment the package. This explains why the nodes of the import -// dependency graph are not packages, but lists of files: the unlabelled -// nodes avoid the cycles. Consider packages A and B where B imports A -// and A's in-package tests AT import B. The naively constructed import -// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but -// the graph over lists of files is AT --> B --> A, where AT is an -// unlabelled node. -// -// Awaiting completion of the dependencies in a cyclic graph would -// deadlock, so we must materialize the import dependency graph (as -// importer.graph) and check whether each import edge forms a cycle. If -// x imports y, and the graph already contains a path from y to x, then -// there is an import cycle, in which case the processing of x must not -// wait for the completion of processing of y. -// -// When the type-checker makes a callback (doImport) to the loader for a -// given import edge, there are two possible cases. In the normal case, -// the dependency has already been completely type-checked; doImport -// does a cache lookup and returns it. In the cyclic case, the entry in -// the cache is still necessarily incomplete, indicating a cycle. We -// perform the cycle check again to obtain the error message, and return -// the error. -// -// The result of using concurrency is about a 2.5x speedup for stdlib_test. - -// TODO(adonovan): overhaul the package documentation. diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/loader/loader.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/loader/loader.go deleted file mode 100644 index f69a6684178d..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/loader/loader.go +++ /dev/null @@ -1,1073 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package loader - -// See doc.go for package documentation and implementation notes. - -import ( - "errors" - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "go/types" - "os" - "path/filepath" - "sort" - "strings" - "sync" - "time" - - "golang.org/x/tools/go/ast/astutil" -) - -var ignoreVendor build.ImportMode - -const trace = false // show timing info for type-checking - -// Config specifies the configuration for loading a whole program from -// Go source code. -// The zero value for Config is a ready-to-use default configuration. -type Config struct { - // Fset is the file set for the parser to use when loading the - // program. If nil, it may be lazily initialized by any - // method of Config. - Fset *token.FileSet - - // ParserMode specifies the mode to be used by the parser when - // loading source packages. - ParserMode parser.Mode - - // TypeChecker contains options relating to the type checker. - // - // The supplied IgnoreFuncBodies is not used; the effective - // value comes from the TypeCheckFuncBodies func below. - // The supplied Import function is not used either. - TypeChecker types.Config - - // TypeCheckFuncBodies is a predicate over package paths. - // A package for which the predicate is false will - // have its package-level declarations type checked, but not - // its function bodies; this can be used to quickly load - // dependencies from source. If nil, all func bodies are type - // checked. - TypeCheckFuncBodies func(path string) bool - - // If Build is non-nil, it is used to locate source packages. - // Otherwise &build.Default is used. - // - // By default, cgo is invoked to preprocess Go files that - // import the fake package "C". This behaviour can be - // disabled by setting CGO_ENABLED=0 in the environment prior - // to startup, or by setting Build.CgoEnabled=false. - Build *build.Context - - // The current directory, used for resolving relative package - // references such as "./go/loader". If empty, os.Getwd will be - // used instead. - Cwd string - - // If DisplayPath is non-nil, it is used to transform each - // file name obtained from Build.Import(). This can be used - // to prevent a virtualized build.Config's file names from - // leaking into the user interface. - DisplayPath func(path string) string - - // If AllowErrors is true, Load will return a Program even - // if some of the its packages contained I/O, parser or type - // errors; such errors are accessible via PackageInfo.Errors. If - // false, Load will fail if any package had an error. - AllowErrors bool - - // CreatePkgs specifies a list of non-importable initial - // packages to create. The resulting packages will appear in - // the corresponding elements of the Program.Created slice. - CreatePkgs []PkgSpec - - // ImportPkgs specifies a set of initial packages to load. - // The map keys are package paths. - // - // The map value indicates whether to load tests. If true, Load - // will add and type-check two lists of files to the package: - // non-test files followed by in-package *_test.go files. In - // addition, it will append the external test package (if any) - // to Program.Created. - ImportPkgs map[string]bool - - // FindPackage is called during Load to create the build.Package - // for a given import path from a given directory. - // If FindPackage is nil, (*build.Context).Import is used. - // A client may use this hook to adapt to a proprietary build - // system that does not follow the "go build" layout - // conventions, for example. - // - // It must be safe to call concurrently from multiple goroutines. - FindPackage func(ctxt *build.Context, fromDir, importPath string, mode build.ImportMode) (*build.Package, error) - - // AfterTypeCheck is called immediately after a list of files - // has been type-checked and appended to info.Files. - // - // This optional hook function is the earliest opportunity for - // the client to observe the output of the type checker, - // which may be useful to reduce analysis latency when loading - // a large program. - // - // The function is permitted to modify info.Info, for instance - // to clear data structures that are no longer needed, which can - // dramatically reduce peak memory consumption. - // - // The function may be called twice for the same PackageInfo: - // once for the files of the package and again for the - // in-package test files. - // - // It must be safe to call concurrently from multiple goroutines. - AfterTypeCheck func(info *PackageInfo, files []*ast.File) -} - -// A PkgSpec specifies a non-importable package to be created by Load. -// Files are processed first, but typically only one of Files and -// Filenames is provided. The path needn't be globally unique. -// -// For vendoring purposes, the package's directory is the one that -// contains the first file. -type PkgSpec struct { - Path string // package path ("" => use package declaration) - Files []*ast.File // ASTs of already-parsed files - Filenames []string // names of files to be parsed -} - -// A Program is a Go program loaded from source as specified by a Config. -type Program struct { - Fset *token.FileSet // the file set for this program - - // Created[i] contains the initial package whose ASTs or - // filenames were supplied by Config.CreatePkgs[i], followed by - // the external test package, if any, of each package in - // Config.ImportPkgs ordered by ImportPath. - // - // NOTE: these files must not import "C". Cgo preprocessing is - // only performed on imported packages, not ad hoc packages. - // - // TODO(adonovan): we need to copy and adapt the logic of - // goFilesPackage (from $GOROOT/src/cmd/go/build.go) and make - // Config.Import and Config.Create methods return the same kind - // of entity, essentially a build.Package. - // Perhaps we can even reuse that type directly. - Created []*PackageInfo - - // Imported contains the initially imported packages, - // as specified by Config.ImportPkgs. - Imported map[string]*PackageInfo - - // AllPackages contains the PackageInfo of every package - // encountered by Load: all initial packages and all - // dependencies, including incomplete ones. - AllPackages map[*types.Package]*PackageInfo - - // importMap is the canonical mapping of package paths to - // packages. It contains all Imported initial packages, but not - // Created ones, and all imported dependencies. - importMap map[string]*types.Package -} - -// PackageInfo holds the ASTs and facts derived by the type-checker -// for a single package. -// -// Not mutated once exposed via the API. -// -type PackageInfo struct { - Pkg *types.Package - Importable bool // true if 'import "Pkg.Path()"' would resolve to this - TransitivelyErrorFree bool // true if Pkg and all its dependencies are free of errors - Files []*ast.File // syntax trees for the package's files - Errors []error // non-nil if the package had errors - types.Info // type-checker deductions. - dir string // package directory - - checker *types.Checker // transient type-checker state - errorFunc func(error) -} - -func (info *PackageInfo) String() string { return info.Pkg.Path() } - -func (info *PackageInfo) appendError(err error) { - if info.errorFunc != nil { - info.errorFunc(err) - } else { - fmt.Fprintln(os.Stderr, err) - } - info.Errors = append(info.Errors, err) -} - -func (conf *Config) fset() *token.FileSet { - if conf.Fset == nil { - conf.Fset = token.NewFileSet() - } - return conf.Fset -} - -// ParseFile is a convenience function (intended for testing) that invokes -// the parser using the Config's FileSet, which is initialized if nil. -// -// src specifies the parser input as a string, []byte, or io.Reader, and -// filename is its apparent name. If src is nil, the contents of -// filename are read from the file system. -// -func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) { - // TODO(adonovan): use conf.build() etc like parseFiles does. - return parser.ParseFile(conf.fset(), filename, src, conf.ParserMode) -} - -// FromArgsUsage is a partial usage message that applications calling -// FromArgs may wish to include in their -help output. -const FromArgsUsage = ` - is a list of arguments denoting a set of initial packages. -It may take one of two forms: - -1. A list of *.go source files. - - All of the specified files are loaded, parsed and type-checked - as a single package. All the files must belong to the same directory. - -2. A list of import paths, each denoting a package. - - The package's directory is found relative to the $GOROOT and - $GOPATH using similar logic to 'go build', and the *.go files in - that directory are loaded, parsed and type-checked as a single - package. - - In addition, all *_test.go files in the directory are then loaded - and parsed. Those files whose package declaration equals that of - the non-*_test.go files are included in the primary package. Test - files whose package declaration ends with "_test" are type-checked - as another package, the 'external' test package, so that a single - import path may denote two packages. (Whether this behaviour is - enabled is tool-specific, and may depend on additional flags.) - -A '--' argument terminates the list of packages. -` - -// FromArgs interprets args as a set of initial packages to load from -// source and updates the configuration. It returns the list of -// unconsumed arguments. -// -// It is intended for use in command-line interfaces that require a -// set of initial packages to be specified; see FromArgsUsage message -// for details. -// -// Only superficial errors are reported at this stage; errors dependent -// on I/O are detected during Load. -// -func (conf *Config) FromArgs(args []string, xtest bool) ([]string, error) { - var rest []string - for i, arg := range args { - if arg == "--" { - rest = args[i+1:] - args = args[:i] - break // consume "--" and return the remaining args - } - } - - if len(args) > 0 && strings.HasSuffix(args[0], ".go") { - // Assume args is a list of a *.go files - // denoting a single ad hoc package. - for _, arg := range args { - if !strings.HasSuffix(arg, ".go") { - return nil, fmt.Errorf("named files must be .go files: %s", arg) - } - } - conf.CreateFromFilenames("", args...) - } else { - // Assume args are directories each denoting a - // package and (perhaps) an external test, iff xtest. - for _, arg := range args { - if xtest { - conf.ImportWithTests(arg) - } else { - conf.Import(arg) - } - } - } - - return rest, nil -} - -// CreateFromFilenames is a convenience function that adds -// a conf.CreatePkgs entry to create a package of the specified *.go -// files. -// -func (conf *Config) CreateFromFilenames(path string, filenames ...string) { - conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Filenames: filenames}) -} - -// CreateFromFiles is a convenience function that adds a conf.CreatePkgs -// entry to create package of the specified path and parsed files. -// -func (conf *Config) CreateFromFiles(path string, files ...*ast.File) { - conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Files: files}) -} - -// ImportWithTests is a convenience function that adds path to -// ImportPkgs, the set of initial source packages located relative to -// $GOPATH. The package will be augmented by any *_test.go files in -// its directory that contain a "package x" (not "package x_test") -// declaration. -// -// In addition, if any *_test.go files contain a "package x_test" -// declaration, an additional package comprising just those files will -// be added to CreatePkgs. -// -func (conf *Config) ImportWithTests(path string) { conf.addImport(path, true) } - -// Import is a convenience function that adds path to ImportPkgs, the -// set of initial packages that will be imported from source. -// -func (conf *Config) Import(path string) { conf.addImport(path, false) } - -func (conf *Config) addImport(path string, tests bool) { - if path == "C" { - return // ignore; not a real package - } - if conf.ImportPkgs == nil { - conf.ImportPkgs = make(map[string]bool) - } - conf.ImportPkgs[path] = conf.ImportPkgs[path] || tests -} - -// PathEnclosingInterval returns the PackageInfo and ast.Node that -// contain source interval [start, end), and all the node's ancestors -// up to the AST root. It searches all ast.Files of all packages in prog. -// exact is defined as for astutil.PathEnclosingInterval. -// -// The zero value is returned if not found. -// -func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageInfo, path []ast.Node, exact bool) { - for _, info := range prog.AllPackages { - for _, f := range info.Files { - if f.Pos() == token.NoPos { - // This can happen if the parser saw - // too many errors and bailed out. - // (Use parser.AllErrors to prevent that.) - continue - } - if !tokenFileContainsPos(prog.Fset.File(f.Pos()), start) { - continue - } - if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil { - return info, path, exact - } - } - } - return nil, nil, false -} - -// InitialPackages returns a new slice containing the set of initial -// packages (Created + Imported) in unspecified order. -// -func (prog *Program) InitialPackages() []*PackageInfo { - infos := make([]*PackageInfo, 0, len(prog.Created)+len(prog.Imported)) - infos = append(infos, prog.Created...) - for _, info := range prog.Imported { - infos = append(infos, info) - } - return infos -} - -// Package returns the ASTs and results of type checking for the -// specified package. -func (prog *Program) Package(path string) *PackageInfo { - if info, ok := prog.AllPackages[prog.importMap[path]]; ok { - return info - } - for _, info := range prog.Created { - if path == info.Pkg.Path() { - return info - } - } - return nil -} - -// ---------- Implementation ---------- - -// importer holds the working state of the algorithm. -type importer struct { - conf *Config // the client configuration - start time.Time // for logging - - progMu sync.Mutex // guards prog - prog *Program // the resulting program - - // findpkg is a memoization of FindPackage. - findpkgMu sync.Mutex // guards findpkg - findpkg map[findpkgKey]*findpkgValue - - importedMu sync.Mutex // guards imported - imported map[string]*importInfo // all imported packages (incl. failures) by import path - - // import dependency graph: graph[x][y] => x imports y - // - // Since non-importable packages cannot be cyclic, we ignore - // their imports, thus we only need the subgraph over importable - // packages. Nodes are identified by their import paths. - graphMu sync.Mutex - graph map[string]map[string]bool -} - -type findpkgKey struct { - importPath string - fromDir string - mode build.ImportMode -} - -type findpkgValue struct { - ready chan struct{} // closed to broadcast readiness - bp *build.Package - err error -} - -// importInfo tracks the success or failure of a single import. -// -// Upon completion, exactly one of info and err is non-nil: -// info on successful creation of a package, err otherwise. -// A successful package may still contain type errors. -// -type importInfo struct { - path string // import path - info *PackageInfo // results of typechecking (including errors) - complete chan struct{} // closed to broadcast that info is set. -} - -// awaitCompletion blocks until ii is complete, -// i.e. the info field is safe to inspect. -func (ii *importInfo) awaitCompletion() { - <-ii.complete // wait for close -} - -// Complete marks ii as complete. -// Its info and err fields will not be subsequently updated. -func (ii *importInfo) Complete(info *PackageInfo) { - if info == nil { - panic("info == nil") - } - ii.info = info - close(ii.complete) -} - -type importError struct { - path string // import path - err error // reason for failure to create a package -} - -// Load creates the initial packages specified by conf.{Create,Import}Pkgs, -// loading their dependencies packages as needed. -// -// On success, Load returns a Program containing a PackageInfo for -// each package. On failure, it returns an error. -// -// If AllowErrors is true, Load will return a Program even if some -// packages contained I/O, parser or type errors, or if dependencies -// were missing. (Such errors are accessible via PackageInfo.Errors. If -// false, Load will fail if any package had an error. -// -// It is an error if no packages were loaded. -// -func (conf *Config) Load() (*Program, error) { - // Create a simple default error handler for parse/type errors. - if conf.TypeChecker.Error == nil { - conf.TypeChecker.Error = func(e error) { fmt.Fprintln(os.Stderr, e) } - } - - // Set default working directory for relative package references. - if conf.Cwd == "" { - var err error - conf.Cwd, err = os.Getwd() - if err != nil { - return nil, err - } - } - - // Install default FindPackage hook using go/build logic. - if conf.FindPackage == nil { - conf.FindPackage = (*build.Context).Import - } - - prog := &Program{ - Fset: conf.fset(), - Imported: make(map[string]*PackageInfo), - importMap: make(map[string]*types.Package), - AllPackages: make(map[*types.Package]*PackageInfo), - } - - imp := importer{ - conf: conf, - prog: prog, - findpkg: make(map[findpkgKey]*findpkgValue), - imported: make(map[string]*importInfo), - start: time.Now(), - graph: make(map[string]map[string]bool), - } - - // -- loading proper (concurrent phase) -------------------------------- - - var errpkgs []string // packages that contained errors - - // Load the initially imported packages and their dependencies, - // in parallel. - // No vendor check on packages imported from the command line. - infos, importErrors := imp.importAll("", conf.Cwd, conf.ImportPkgs, ignoreVendor) - for _, ie := range importErrors { - conf.TypeChecker.Error(ie.err) // failed to create package - errpkgs = append(errpkgs, ie.path) - } - for _, info := range infos { - prog.Imported[info.Pkg.Path()] = info - } - - // Augment the designated initial packages by their tests. - // Dependencies are loaded in parallel. - var xtestPkgs []*build.Package - for importPath, augment := range conf.ImportPkgs { - if !augment { - continue - } - - // No vendor check on packages imported from command line. - bp, err := imp.findPackage(importPath, conf.Cwd, ignoreVendor) - if err != nil { - // Package not found, or can't even parse package declaration. - // Already reported by previous loop; ignore it. - continue - } - - // Needs external test package? - if len(bp.XTestGoFiles) > 0 { - xtestPkgs = append(xtestPkgs, bp) - } - - // Consult the cache using the canonical package path. - path := bp.ImportPath - imp.importedMu.Lock() // (unnecessary, we're sequential here) - ii, ok := imp.imported[path] - // Paranoid checks added due to issue #11012. - if !ok { - // Unreachable. - // The previous loop called importAll and thus - // startLoad for each path in ImportPkgs, which - // populates imp.imported[path] with a non-zero value. - panic(fmt.Sprintf("imported[%q] not found", path)) - } - if ii == nil { - // Unreachable. - // The ii values in this loop are the same as in - // the previous loop, which enforced the invariant - // that at least one of ii.err and ii.info is non-nil. - panic(fmt.Sprintf("imported[%q] == nil", path)) - } - if ii.info == nil { - // Unreachable. - // awaitCompletion has the postcondition - // ii.info != nil. - panic(fmt.Sprintf("imported[%q].info = nil", path)) - } - info := ii.info - imp.importedMu.Unlock() - - // Parse the in-package test files. - files, errs := imp.conf.parsePackageFiles(bp, 't') - for _, err := range errs { - info.appendError(err) - } - - // The test files augmenting package P cannot be imported, - // but may import packages that import P, - // so we must disable the cycle check. - imp.addFiles(info, files, false) - } - - createPkg := func(path, dir string, files []*ast.File, errs []error) { - info := imp.newPackageInfo(path, dir) - for _, err := range errs { - info.appendError(err) - } - - // Ad hoc packages are non-importable, - // so no cycle check is needed. - // addFiles loads dependencies in parallel. - imp.addFiles(info, files, false) - prog.Created = append(prog.Created, info) - } - - // Create packages specified by conf.CreatePkgs. - for _, cp := range conf.CreatePkgs { - files, errs := parseFiles(conf.fset(), conf.build(), nil, conf.Cwd, cp.Filenames, conf.ParserMode) - files = append(files, cp.Files...) - - path := cp.Path - if path == "" { - if len(files) > 0 { - path = files[0].Name.Name - } else { - path = "(unnamed)" - } - } - - dir := conf.Cwd - if len(files) > 0 && files[0].Pos().IsValid() { - dir = filepath.Dir(conf.fset().File(files[0].Pos()).Name()) - } - createPkg(path, dir, files, errs) - } - - // Create external test packages. - sort.Sort(byImportPath(xtestPkgs)) - for _, bp := range xtestPkgs { - files, errs := imp.conf.parsePackageFiles(bp, 'x') - createPkg(bp.ImportPath+"_test", bp.Dir, files, errs) - } - - // -- finishing up (sequential) ---------------------------------------- - - if len(prog.Imported)+len(prog.Created) == 0 { - return nil, errors.New("no initial packages were loaded") - } - - // Create infos for indirectly imported packages. - // e.g. incomplete packages without syntax, loaded from export data. - for _, obj := range prog.importMap { - info := prog.AllPackages[obj] - if info == nil { - prog.AllPackages[obj] = &PackageInfo{Pkg: obj, Importable: true} - } else { - // finished - info.checker = nil - info.errorFunc = nil - } - } - - if !conf.AllowErrors { - // Report errors in indirectly imported packages. - for _, info := range prog.AllPackages { - if len(info.Errors) > 0 { - errpkgs = append(errpkgs, info.Pkg.Path()) - } - } - if errpkgs != nil { - var more string - if len(errpkgs) > 3 { - more = fmt.Sprintf(" and %d more", len(errpkgs)-3) - errpkgs = errpkgs[:3] - } - return nil, fmt.Errorf("couldn't load packages due to errors: %s%s", - strings.Join(errpkgs, ", "), more) - } - } - - markErrorFreePackages(prog.AllPackages) - - return prog, nil -} - -type byImportPath []*build.Package - -func (b byImportPath) Len() int { return len(b) } -func (b byImportPath) Less(i, j int) bool { return b[i].ImportPath < b[j].ImportPath } -func (b byImportPath) Swap(i, j int) { b[i], b[j] = b[j], b[i] } - -// markErrorFreePackages sets the TransitivelyErrorFree flag on all -// applicable packages. -func markErrorFreePackages(allPackages map[*types.Package]*PackageInfo) { - // Build the transpose of the import graph. - importedBy := make(map[*types.Package]map[*types.Package]bool) - for P := range allPackages { - for _, Q := range P.Imports() { - clients, ok := importedBy[Q] - if !ok { - clients = make(map[*types.Package]bool) - importedBy[Q] = clients - } - clients[P] = true - } - } - - // Find all packages reachable from some error package. - reachable := make(map[*types.Package]bool) - var visit func(*types.Package) - visit = func(p *types.Package) { - if !reachable[p] { - reachable[p] = true - for q := range importedBy[p] { - visit(q) - } - } - } - for _, info := range allPackages { - if len(info.Errors) > 0 { - visit(info.Pkg) - } - } - - // Mark the others as "transitively error-free". - for _, info := range allPackages { - if !reachable[info.Pkg] { - info.TransitivelyErrorFree = true - } - } -} - -// build returns the effective build context. -func (conf *Config) build() *build.Context { - if conf.Build != nil { - return conf.Build - } - return &build.Default -} - -// parsePackageFiles enumerates the files belonging to package path, -// then loads, parses and returns them, plus a list of I/O or parse -// errors that were encountered. -// -// 'which' indicates which files to include: -// 'g': include non-test *.go source files (GoFiles + processed CgoFiles) -// 't': include in-package *_test.go source files (TestGoFiles) -// 'x': include external *_test.go source files. (XTestGoFiles) -// -func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.File, []error) { - if bp.ImportPath == "unsafe" { - return nil, nil - } - var filenames []string - switch which { - case 'g': - filenames = bp.GoFiles - case 't': - filenames = bp.TestGoFiles - case 'x': - filenames = bp.XTestGoFiles - default: - panic(which) - } - - files, errs := parseFiles(conf.fset(), conf.build(), conf.DisplayPath, bp.Dir, filenames, conf.ParserMode) - - // Preprocess CgoFiles and parse the outputs (sequentially). - if which == 'g' && bp.CgoFiles != nil { - cgofiles, err := processCgoFiles(bp, conf.fset(), conf.DisplayPath, conf.ParserMode) - if err != nil { - errs = append(errs, err) - } else { - files = append(files, cgofiles...) - } - } - - return files, errs -} - -// doImport imports the package denoted by path. -// It implements the types.Importer signature. -// -// It returns an error if a package could not be created -// (e.g. go/build or parse error), but type errors are reported via -// the types.Config.Error callback (the first of which is also saved -// in the package's PackageInfo). -// -// Idempotent. -// -func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, error) { - if to == "C" { - // This should be unreachable, but ad hoc packages are - // not currently subject to cgo preprocessing. - // See https://github.com/golang/go/issues/11627. - return nil, fmt.Errorf(`the loader doesn't cgo-process ad hoc packages like %q; see Go issue 11627`, - from.Pkg.Path()) - } - - bp, err := imp.findPackage(to, from.dir, 0) - if err != nil { - return nil, err - } - - // The standard unsafe package is handled specially, - // and has no PackageInfo. - if bp.ImportPath == "unsafe" { - return types.Unsafe, nil - } - - // Look for the package in the cache using its canonical path. - path := bp.ImportPath - imp.importedMu.Lock() - ii := imp.imported[path] - imp.importedMu.Unlock() - if ii == nil { - panic("internal error: unexpected import: " + path) - } - if ii.info != nil { - return ii.info.Pkg, nil - } - - // Import of incomplete package: this indicates a cycle. - fromPath := from.Pkg.Path() - if cycle := imp.findPath(path, fromPath); cycle != nil { - cycle = append([]string{fromPath}, cycle...) - return nil, fmt.Errorf("import cycle: %s", strings.Join(cycle, " -> ")) - } - - panic("internal error: import of incomplete (yet acyclic) package: " + fromPath) -} - -// findPackage locates the package denoted by the importPath in the -// specified directory. -func (imp *importer) findPackage(importPath, fromDir string, mode build.ImportMode) (*build.Package, error) { - // We use a non-blocking duplicate-suppressing cache (gopl.io §9.7) - // to avoid holding the lock around FindPackage. - key := findpkgKey{importPath, fromDir, mode} - imp.findpkgMu.Lock() - v, ok := imp.findpkg[key] - if ok { - // cache hit - imp.findpkgMu.Unlock() - - <-v.ready // wait for entry to become ready - } else { - // Cache miss: this goroutine becomes responsible for - // populating the map entry and broadcasting its readiness. - v = &findpkgValue{ready: make(chan struct{})} - imp.findpkg[key] = v - imp.findpkgMu.Unlock() - - ioLimit <- true - v.bp, v.err = imp.conf.FindPackage(imp.conf.build(), importPath, fromDir, mode) - <-ioLimit - - if _, ok := v.err.(*build.NoGoError); ok { - v.err = nil // empty directory is not an error - } - - close(v.ready) // broadcast ready condition - } - return v.bp, v.err -} - -// importAll loads, parses, and type-checks the specified packages in -// parallel and returns their completed importInfos in unspecified order. -// -// fromPath is the package path of the importing package, if it is -// importable, "" otherwise. It is used for cycle detection. -// -// fromDir is the directory containing the import declaration that -// caused these imports. -// -func (imp *importer) importAll(fromPath, fromDir string, imports map[string]bool, mode build.ImportMode) (infos []*PackageInfo, errors []importError) { - // TODO(adonovan): opt: do the loop in parallel once - // findPackage is non-blocking. - var pending []*importInfo - for importPath := range imports { - bp, err := imp.findPackage(importPath, fromDir, mode) - if err != nil { - errors = append(errors, importError{ - path: importPath, - err: err, - }) - continue - } - pending = append(pending, imp.startLoad(bp)) - } - - if fromPath != "" { - // We're loading a set of imports. - // - // We must record graph edges from the importing package - // to its dependencies, and check for cycles. - imp.graphMu.Lock() - deps, ok := imp.graph[fromPath] - if !ok { - deps = make(map[string]bool) - imp.graph[fromPath] = deps - } - for _, ii := range pending { - deps[ii.path] = true - } - imp.graphMu.Unlock() - } - - for _, ii := range pending { - if fromPath != "" { - if cycle := imp.findPath(ii.path, fromPath); cycle != nil { - // Cycle-forming import: we must not await its - // completion since it would deadlock. - // - // We don't record the error in ii since - // the error is really associated with the - // cycle-forming edge, not the package itself. - // (Also it would complicate the - // invariants of importPath completion.) - if trace { - fmt.Fprintf(os.Stderr, "import cycle: %q\n", cycle) - } - continue - } - } - ii.awaitCompletion() - infos = append(infos, ii.info) - } - - return infos, errors -} - -// findPath returns an arbitrary path from 'from' to 'to' in the import -// graph, or nil if there was none. -func (imp *importer) findPath(from, to string) []string { - imp.graphMu.Lock() - defer imp.graphMu.Unlock() - - seen := make(map[string]bool) - var search func(stack []string, importPath string) []string - search = func(stack []string, importPath string) []string { - if !seen[importPath] { - seen[importPath] = true - stack = append(stack, importPath) - if importPath == to { - return stack - } - for x := range imp.graph[importPath] { - if p := search(stack, x); p != nil { - return p - } - } - } - return nil - } - return search(make([]string, 0, 20), from) -} - -// startLoad initiates the loading, parsing and type-checking of the -// specified package and its dependencies, if it has not already begun. -// -// It returns an importInfo, not necessarily in a completed state. The -// caller must call awaitCompletion() before accessing its info field. -// -// startLoad is concurrency-safe and idempotent. -// -func (imp *importer) startLoad(bp *build.Package) *importInfo { - path := bp.ImportPath - imp.importedMu.Lock() - ii, ok := imp.imported[path] - if !ok { - ii = &importInfo{path: path, complete: make(chan struct{})} - imp.imported[path] = ii - go func() { - info := imp.load(bp) - ii.Complete(info) - }() - } - imp.importedMu.Unlock() - - return ii -} - -// load implements package loading by parsing Go source files -// located by go/build. -func (imp *importer) load(bp *build.Package) *PackageInfo { - info := imp.newPackageInfo(bp.ImportPath, bp.Dir) - info.Importable = true - files, errs := imp.conf.parsePackageFiles(bp, 'g') - for _, err := range errs { - info.appendError(err) - } - - imp.addFiles(info, files, true) - - imp.progMu.Lock() - imp.prog.importMap[bp.ImportPath] = info.Pkg - imp.progMu.Unlock() - - return info -} - -// addFiles adds and type-checks the specified files to info, loading -// their dependencies if needed. The order of files determines the -// package initialization order. It may be called multiple times on the -// same package. Errors are appended to the info.Errors field. -// -// cycleCheck determines whether the imports within files create -// dependency edges that should be checked for potential cycles. -// -func (imp *importer) addFiles(info *PackageInfo, files []*ast.File, cycleCheck bool) { - // Ensure the dependencies are loaded, in parallel. - var fromPath string - if cycleCheck { - fromPath = info.Pkg.Path() - } - // TODO(adonovan): opt: make the caller do scanImports. - // Callers with a build.Package can skip it. - imp.importAll(fromPath, info.dir, scanImports(files), 0) - - if trace { - fmt.Fprintf(os.Stderr, "%s: start %q (%d)\n", - time.Since(imp.start), info.Pkg.Path(), len(files)) - } - - if info.Pkg == types.Unsafe && len(files) > 0 { - panic(`addFiles("unsafe") not permitted`) - } - - // Ignore the returned (first) error since we - // already collect them all in the PackageInfo. - info.checker.Files(files) - info.Files = append(info.Files, files...) - - if imp.conf.AfterTypeCheck != nil { - imp.conf.AfterTypeCheck(info, files) - } - - if trace { - fmt.Fprintf(os.Stderr, "%s: stop %q\n", - time.Since(imp.start), info.Pkg.Path()) - } -} - -func (imp *importer) newPackageInfo(path, dir string) *PackageInfo { - var pkg *types.Package - if path == "unsafe" { - pkg = types.Unsafe - } else { - pkg = types.NewPackage(path, "") - } - info := &PackageInfo{ - Pkg: pkg, - Info: types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - }, - errorFunc: imp.conf.TypeChecker.Error, - dir: dir, - } - - // Copy the types.Config so we can vary it across PackageInfos. - tc := imp.conf.TypeChecker - tc.IgnoreFuncBodies = false - if f := imp.conf.TypeCheckFuncBodies; f != nil { - tc.IgnoreFuncBodies = !f(path) - } - tc.Importer = closure{imp, info} - tc.Error = info.appendError // appendError wraps the user's Error function - - info.checker = types.NewChecker(&tc, imp.conf.fset(), pkg, &info.Info) - imp.progMu.Lock() - imp.prog.AllPackages[pkg] = info - imp.progMu.Unlock() - return info -} - -type closure struct { - imp *importer - info *PackageInfo -} - -func (c closure) Import(to string) (*types.Package, error) { return c.imp.doImport(c.info, to) } diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/loader/util.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/loader/util.go deleted file mode 100644 index 7f38dd740779..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/loader/util.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package loader - -import ( - "go/ast" - "go/build" - "go/parser" - "go/token" - "io" - "os" - "strconv" - "sync" - - "golang.org/x/tools/go/buildutil" -) - -// We use a counting semaphore to limit -// the number of parallel I/O calls per process. -var ioLimit = make(chan bool, 10) - -// parseFiles parses the Go source files within directory dir and -// returns the ASTs of the ones that could be at least partially parsed, -// along with a list of I/O and parse errors encountered. -// -// I/O is done via ctxt, which may specify a virtual file system. -// displayPath is used to transform the filenames attached to the ASTs. -// -func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) { - if displayPath == nil { - displayPath = func(path string) string { return path } - } - var wg sync.WaitGroup - n := len(files) - parsed := make([]*ast.File, n) - errors := make([]error, n) - for i, file := range files { - if !buildutil.IsAbsPath(ctxt, file) { - file = buildutil.JoinPath(ctxt, dir, file) - } - wg.Add(1) - go func(i int, file string) { - ioLimit <- true // wait - defer func() { - wg.Done() - <-ioLimit // signal - }() - var rd io.ReadCloser - var err error - if ctxt.OpenFile != nil { - rd, err = ctxt.OpenFile(file) - } else { - rd, err = os.Open(file) - } - if err != nil { - errors[i] = err // open failed - return - } - - // ParseFile may return both an AST and an error. - parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode) - rd.Close() - }(i, file) - } - wg.Wait() - - // Eliminate nils, preserving order. - var o int - for _, f := range parsed { - if f != nil { - parsed[o] = f - o++ - } - } - parsed = parsed[:o] - - o = 0 - for _, err := range errors { - if err != nil { - errors[o] = err - o++ - } - } - errors = errors[:o] - - return parsed, errors -} - -// scanImports returns the set of all import paths from all -// import specs in the specified files. -func scanImports(files []*ast.File) map[string]bool { - imports := make(map[string]bool) - for _, f := range files { - for _, decl := range f.Decls { - if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT { - for _, spec := range decl.Specs { - spec := spec.(*ast.ImportSpec) - - // NB: do not assume the program is well-formed! - path, err := strconv.Unquote(spec.Path.Value) - if err != nil { - continue // quietly ignore the error - } - if path == "C" { - continue // skip pseudopackage - } - imports[path] = true - } - } - } - } - return imports -} - -// ---------- Internal helpers ---------- - -// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos) -func tokenFileContainsPos(f *token.File, pos token.Pos) bool { - p := int(pos) - base := f.Base() - return base <= p && p < base+f.Size() -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/types/typeutil/imports.go deleted file mode 100644 index 9c441dba9c06..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/types/typeutil/imports.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -import "go/types" - -// Dependencies returns all dependencies of the specified packages. -// -// Dependent packages appear in topological order: if package P imports -// package Q, Q appears earlier than P in the result. -// The algorithm follows import statements in the order they -// appear in the source code, so the result is a total order. -// -func Dependencies(pkgs ...*types.Package) []*types.Package { - var result []*types.Package - seen := make(map[*types.Package]bool) - var visit func(pkgs []*types.Package) - visit = func(pkgs []*types.Package) { - for _, p := range pkgs { - if !seen[p] { - seen[p] = true - visit(p.Imports()) - result = append(result, p) - } - } - } - visit(pkgs) - return result -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/types/typeutil/map.go deleted file mode 100644 index eb63ea22c17a..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package typeutil defines various utilities for types, such as Map, -// a mapping from types.Type to interface{} values. -package typeutil - -import ( - "bytes" - "fmt" - "go/types" - "reflect" -) - -// Map is a hash-table-based mapping from types (types.Type) to -// arbitrary interface{} values. The concrete types that implement -// the Type interface are pointers. Since they are not canonicalized, -// == cannot be used to check for equivalence, and thus we cannot -// simply use a Go map. -// -// Just as with map[K]V, a nil *Map is a valid empty map. -// -// Not thread-safe. -// -type Map struct { - hasher Hasher // shared by many Maps - table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused - length int // number of map entries -} - -// entry is an entry (key/value association) in a hash bucket. -type entry struct { - key types.Type - value interface{} -} - -// SetHasher sets the hasher used by Map. -// -// All Hashers are functionally equivalent but contain internal state -// used to cache the results of hashing previously seen types. -// -// A single Hasher created by MakeHasher() may be shared among many -// Maps. This is recommended if the instances have many keys in -// common, as it will amortize the cost of hash computation. -// -// A Hasher may grow without bound as new types are seen. Even when a -// type is deleted from the map, the Hasher never shrinks, since other -// types in the map may reference the deleted type indirectly. -// -// Hashers are not thread-safe, and read-only operations such as -// Map.Lookup require updates to the hasher, so a full Mutex lock (not a -// read-lock) is require around all Map operations if a shared -// hasher is accessed from multiple threads. -// -// If SetHasher is not called, the Map will create a private hasher at -// the first call to Insert. -// -func (m *Map) SetHasher(hasher Hasher) { - m.hasher = hasher -} - -// Delete removes the entry with the given key, if any. -// It returns true if the entry was found. -// -func (m *Map) Delete(key types.Type) bool { - if m != nil && m.table != nil { - hash := m.hasher.Hash(key) - bucket := m.table[hash] - for i, e := range bucket { - if e.key != nil && types.Identical(key, e.key) { - // We can't compact the bucket as it - // would disturb iterators. - bucket[i] = entry{} - m.length-- - return true - } - } - } - return false -} - -// At returns the map entry for the given key. -// The result is nil if the entry is not present. -// -func (m *Map) At(key types.Type) interface{} { - if m != nil && m.table != nil { - for _, e := range m.table[m.hasher.Hash(key)] { - if e.key != nil && types.Identical(key, e.key) { - return e.value - } - } - } - return nil -} - -// Set sets the map entry for key to val, -// and returns the previous entry, if any. -func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) { - if m.table != nil { - hash := m.hasher.Hash(key) - bucket := m.table[hash] - var hole *entry - for i, e := range bucket { - if e.key == nil { - hole = &bucket[i] - } else if types.Identical(key, e.key) { - prev = e.value - bucket[i].value = value - return - } - } - - if hole != nil { - *hole = entry{key, value} // overwrite deleted entry - } else { - m.table[hash] = append(bucket, entry{key, value}) - } - } else { - if m.hasher.memo == nil { - m.hasher = MakeHasher() - } - hash := m.hasher.Hash(key) - m.table = map[uint32][]entry{hash: {entry{key, value}}} - } - - m.length++ - return -} - -// Len returns the number of map entries. -func (m *Map) Len() int { - if m != nil { - return m.length - } - return 0 -} - -// Iterate calls function f on each entry in the map in unspecified order. -// -// If f should mutate the map, Iterate provides the same guarantees as -// Go maps: if f deletes a map entry that Iterate has not yet reached, -// f will not be invoked for it, but if f inserts a map entry that -// Iterate has not yet reached, whether or not f will be invoked for -// it is unspecified. -// -func (m *Map) Iterate(f func(key types.Type, value interface{})) { - if m != nil { - for _, bucket := range m.table { - for _, e := range bucket { - if e.key != nil { - f(e.key, e.value) - } - } - } - } -} - -// Keys returns a new slice containing the set of map keys. -// The order is unspecified. -func (m *Map) Keys() []types.Type { - keys := make([]types.Type, 0, m.Len()) - m.Iterate(func(key types.Type, _ interface{}) { - keys = append(keys, key) - }) - return keys -} - -func (m *Map) toString(values bool) string { - if m == nil { - return "{}" - } - var buf bytes.Buffer - fmt.Fprint(&buf, "{") - sep := "" - m.Iterate(func(key types.Type, value interface{}) { - fmt.Fprint(&buf, sep) - sep = ", " - fmt.Fprint(&buf, key) - if values { - fmt.Fprintf(&buf, ": %q", value) - } - }) - fmt.Fprint(&buf, "}") - return buf.String() -} - -// String returns a string representation of the map's entries. -// Values are printed using fmt.Sprintf("%v", v). -// Order is unspecified. -// -func (m *Map) String() string { - return m.toString(true) -} - -// KeysString returns a string representation of the map's key set. -// Order is unspecified. -// -func (m *Map) KeysString() string { - return m.toString(false) -} - -//////////////////////////////////////////////////////////////////////// -// Hasher - -// A Hasher maps each type to its hash value. -// For efficiency, a hasher uses memoization; thus its memory -// footprint grows monotonically over time. -// Hashers are not thread-safe. -// Hashers have reference semantics. -// Call MakeHasher to create a Hasher. -type Hasher struct { - memo map[types.Type]uint32 -} - -// MakeHasher returns a new Hasher instance. -func MakeHasher() Hasher { - return Hasher{make(map[types.Type]uint32)} -} - -// Hash computes a hash value for the given type t such that -// Identical(t, t') => Hash(t) == Hash(t'). -func (h Hasher) Hash(t types.Type) uint32 { - hash, ok := h.memo[t] - if !ok { - hash = h.hashFor(t) - h.memo[t] = hash - } - return hash -} - -// hashString computes the Fowler–Noll–Vo hash of s. -func hashString(s string) uint32 { - var h uint32 - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -// hashFor computes the hash of t. -func (h Hasher) hashFor(t types.Type) uint32 { - // See Identical for rationale. - switch t := t.(type) { - case *types.Basic: - return uint32(t.Kind()) - - case *types.Array: - return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) - - case *types.Slice: - return 9049 + 2*h.Hash(t.Elem()) - - case *types.Struct: - var hash uint32 = 9059 - for i, n := 0, t.NumFields(); i < n; i++ { - f := t.Field(i) - if f.Anonymous() { - hash += 8861 - } - hash += hashString(t.Tag(i)) - hash += hashString(f.Name()) // (ignore f.Pkg) - hash += h.Hash(f.Type()) - } - return hash - - case *types.Pointer: - return 9067 + 2*h.Hash(t.Elem()) - - case *types.Signature: - var hash uint32 = 9091 - if t.Variadic() { - hash *= 8863 - } - return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) - - case *types.Interface: - var hash uint32 = 9103 - for i, n := 0, t.NumMethods(); i < n; i++ { - // See go/types.identicalMethods for rationale. - // Method order is not significant. - // Ignore m.Pkg(). - m := t.Method(i) - hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type()) - } - return hash - - case *types.Map: - return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) - - case *types.Chan: - return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) - - case *types.Named: - // Not safe with a copying GC; objects may move. - return uint32(reflect.ValueOf(t.Obj()).Pointer()) - - case *types.Tuple: - return h.hashTuple(t) - } - panic(t) -} - -func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { - // See go/types.identicalTypes for rationale. - n := tuple.Len() - var hash uint32 = 9137 + 2*uint32(n) - for i := 0; i < n; i++ { - hash += 3 * h.Hash(tuple.At(i).Type()) - } - return hash -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go deleted file mode 100644 index 32084610f49a..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements a cache of method sets. - -package typeutil - -import ( - "go/types" - "sync" -) - -// A MethodSetCache records the method set of each type T for which -// MethodSet(T) is called so that repeat queries are fast. -// The zero value is a ready-to-use cache instance. -type MethodSetCache struct { - mu sync.Mutex - named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N - others map[types.Type]*types.MethodSet // all other types -} - -// MethodSet returns the method set of type T. It is thread-safe. -// -// If cache is nil, this function is equivalent to types.NewMethodSet(T). -// Utility functions can thus expose an optional *MethodSetCache -// parameter to clients that care about performance. -// -func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { - if cache == nil { - return types.NewMethodSet(T) - } - cache.mu.Lock() - defer cache.mu.Unlock() - - switch T := T.(type) { - case *types.Named: - return cache.lookupNamed(T).value - - case *types.Pointer: - if N, ok := T.Elem().(*types.Named); ok { - return cache.lookupNamed(N).pointer - } - } - - // all other types - // (The map uses pointer equivalence, not type identity.) - mset := cache.others[T] - if mset == nil { - mset = types.NewMethodSet(T) - if cache.others == nil { - cache.others = make(map[types.Type]*types.MethodSet) - } - cache.others[T] = mset - } - return mset -} - -func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { - if cache.named == nil { - cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) - } - // Avoid recomputing mset(*T) for each distinct Pointer - // instance whose underlying type is a named type. - msets, ok := cache.named[named] - if !ok { - msets.value = types.NewMethodSet(named) - msets.pointer = types.NewMethodSet(types.NewPointer(named)) - cache.named[named] = msets - } - return msets -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/types/typeutil/ui.go deleted file mode 100644 index 9849c24cef3f..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/go/types/typeutil/ui.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -// This file defines utilities for user interfaces that display types. - -import "go/types" - -// IntuitiveMethodSet returns the intuitive method set of a type T, -// which is the set of methods you can call on an addressable value of -// that type. -// -// The result always contains MethodSet(T), and is exactly MethodSet(T) -// for interface types and for pointer-to-concrete types. -// For all other concrete types T, the result additionally -// contains each method belonging to *T if there is no identically -// named method on T itself. -// -// This corresponds to user intuition about method sets; -// this function is intended only for user interfaces. -// -// The order of the result is as for types.MethodSet(T). -// -func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { - isPointerToConcrete := func(T types.Type) bool { - ptr, ok := T.(*types.Pointer) - return ok && !types.IsInterface(ptr.Elem()) - } - - var result []*types.Selection - mset := msets.MethodSet(T) - if types.IsInterface(T) || isPointerToConcrete(T) { - for i, n := 0, mset.Len(); i < n; i++ { - result = append(result, mset.At(i)) - } - } else { - // T is some other concrete type. - // Report methods of T and *T, preferring those of T. - pmset := msets.MethodSet(types.NewPointer(T)) - for i, n := 0, pmset.Len(); i < n; i++ { - meth := pmset.At(i) - if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { - meth = m - } - result = append(result, meth) - } - - } - return result -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fastwalk.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fastwalk.go deleted file mode 100644 index 31e6e27b0d56..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fastwalk.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// A faster implementation of filepath.Walk. -// -// filepath.Walk's design necessarily calls os.Lstat on each file, -// even if the caller needs less info. And goimports only need to know -// the type of each file. The kernel interface provides the type in -// the Readdir call but the standard library ignored it. -// fastwalk_unix.go contains a fork of the syscall routines. -// -// See golang.org/issue/16399 - -package imports - -import ( - "errors" - "os" - "path/filepath" - "runtime" - "sync" -) - -// traverseLink is a sentinel error for fastWalk, similar to filepath.SkipDir. -var traverseLink = errors.New("traverse symlink, assuming target is a directory") - -// fastWalk walks the file tree rooted at root, calling walkFn for -// each file or directory in the tree, including root. -// -// If fastWalk returns filepath.SkipDir, the directory is skipped. -// -// Unlike filepath.Walk: -// * file stat calls must be done by the user. -// The only provided metadata is the file type, which does not include -// any permission bits. -// * multiple goroutines stat the filesystem concurrently. The provided -// walkFn must be safe for concurrent use. -// * fastWalk can follow symlinks if walkFn returns the traverseLink -// sentinel error. It is the walkFn's responsibility to prevent -// fastWalk from going into symlink cycles. -func fastWalk(root string, walkFn func(path string, typ os.FileMode) error) error { - // TODO(bradfitz): make numWorkers configurable? We used a - // minimum of 4 to give the kernel more info about multiple - // things we want, in hopes its I/O scheduling can take - // advantage of that. Hopefully most are in cache. Maybe 4 is - // even too low of a minimum. Profile more. - numWorkers := 4 - if n := runtime.NumCPU(); n > numWorkers { - numWorkers = n - } - - // Make sure to wait for all workers to finish, otherwise - // walkFn could still be called after returning. This Wait call - // runs after close(e.donec) below. - var wg sync.WaitGroup - defer wg.Wait() - - w := &walker{ - fn: walkFn, - enqueuec: make(chan walkItem, numWorkers), // buffered for performance - workc: make(chan walkItem, numWorkers), // buffered for performance - donec: make(chan struct{}), - - // buffered for correctness & not leaking goroutines: - resc: make(chan error, numWorkers), - } - defer close(w.donec) - - for i := 0; i < numWorkers; i++ { - wg.Add(1) - go w.doWork(&wg) - } - todo := []walkItem{{dir: root}} - out := 0 - for { - workc := w.workc - var workItem walkItem - if len(todo) == 0 { - workc = nil - } else { - workItem = todo[len(todo)-1] - } - select { - case workc <- workItem: - todo = todo[:len(todo)-1] - out++ - case it := <-w.enqueuec: - todo = append(todo, it) - case err := <-w.resc: - out-- - if err != nil { - return err - } - if out == 0 && len(todo) == 0 { - // It's safe to quit here, as long as the buffered - // enqueue channel isn't also readable, which might - // happen if the worker sends both another unit of - // work and its result before the other select was - // scheduled and both w.resc and w.enqueuec were - // readable. - select { - case it := <-w.enqueuec: - todo = append(todo, it) - default: - return nil - } - } - } - } -} - -// doWork reads directories as instructed (via workc) and runs the -// user's callback function. -func (w *walker) doWork(wg *sync.WaitGroup) { - defer wg.Done() - for { - select { - case <-w.donec: - return - case it := <-w.workc: - select { - case <-w.donec: - return - case w.resc <- w.walk(it.dir, !it.callbackDone): - } - } - } -} - -type walker struct { - fn func(path string, typ os.FileMode) error - - donec chan struct{} // closed on fastWalk's return - workc chan walkItem // to workers - enqueuec chan walkItem // from workers - resc chan error // from workers -} - -type walkItem struct { - dir string - callbackDone bool // callback already called; don't do it again -} - -func (w *walker) enqueue(it walkItem) { - select { - case w.enqueuec <- it: - case <-w.donec: - } -} - -func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { - joined := dirName + string(os.PathSeparator) + baseName - if typ == os.ModeDir { - w.enqueue(walkItem{dir: joined}) - return nil - } - - err := w.fn(joined, typ) - if typ == os.ModeSymlink { - if err == traverseLink { - // Set callbackDone so we don't call it twice for both the - // symlink-as-symlink and the symlink-as-directory later: - w.enqueue(walkItem{dir: joined, callbackDone: true}) - return nil - } - if err == filepath.SkipDir { - // Permit SkipDir on symlinks too. - return nil - } - } - return err -} - -func (w *walker) walk(root string, runUserCallback bool) error { - if runUserCallback { - err := w.fn(root, os.ModeDir) - if err == filepath.SkipDir { - return nil - } - if err != nil { - return err - } - } - - return readDir(root, w.onDirEnt) -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fastwalk_dirent_fileno.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fastwalk_dirent_fileno.go deleted file mode 100644 index f1fd64949dbf..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fastwalk_dirent_fileno.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd - -package imports - -import "syscall" - -func direntInode(dirent *syscall.Dirent) uint64 { - return uint64(dirent.Fileno) -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fastwalk_dirent_ino.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fastwalk_dirent_ino.go deleted file mode 100644 index ee85bc4dd4d1..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fastwalk_dirent_ino.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux,!appengine darwin - -package imports - -import "syscall" - -func direntInode(dirent *syscall.Dirent) uint64 { - return uint64(dirent.Ino) -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fastwalk_portable.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fastwalk_portable.go deleted file mode 100644 index 6c2658347d14..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fastwalk_portable.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd - -package imports - -import ( - "io/ioutil" - "os" -) - -// readDir calls fn for each directory entry in dirName. -// It does not descend into directories or follow symlinks. -// If fn returns a non-nil error, readDir returns with that error -// immediately. -func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { - fis, err := ioutil.ReadDir(dirName) - if err != nil { - return err - } - for _, fi := range fis { - if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { - return err - } - } - return nil -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fastwalk_unix.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fastwalk_unix.go deleted file mode 100644 index 5854233db92e..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fastwalk_unix.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux,!appengine darwin freebsd openbsd netbsd - -package imports - -import ( - "bytes" - "fmt" - "os" - "syscall" - "unsafe" -) - -const blockSize = 8 << 10 - -// unknownFileMode is a sentinel (and bogus) os.FileMode -// value used to represent a syscall.DT_UNKNOWN Dirent.Type. -const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice - -func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { - fd, err := syscall.Open(dirName, 0, 0) - if err != nil { - return err - } - defer syscall.Close(fd) - - // The buffer must be at least a block long. - buf := make([]byte, blockSize) // stack-allocated; doesn't escape - bufp := 0 // starting read position in buf - nbuf := 0 // end valid data in buf - for { - if bufp >= nbuf { - bufp = 0 - nbuf, err = syscall.ReadDirent(fd, buf) - if err != nil { - return os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - return nil - } - } - consumed, name, typ := parseDirEnt(buf[bufp:nbuf]) - bufp += consumed - if name == "" || name == "." || name == ".." { - continue - } - // Fallback for filesystems (like old XFS) that don't - // support Dirent.Type and have DT_UNKNOWN (0) there - // instead. - if typ == unknownFileMode { - fi, err := os.Lstat(dirName + "/" + name) - if err != nil { - // It got deleted in the meantime. - if os.IsNotExist(err) { - continue - } - return err - } - typ = fi.Mode() & os.ModeType - } - if err := fn(dirName, name, typ); err != nil { - return err - } - } -} - -func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { - // golang.org/issue/15653 - dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) - if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { - panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) - } - if len(buf) < int(dirent.Reclen) { - panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen)) - } - consumed = int(dirent.Reclen) - if direntInode(dirent) == 0 { // File absent in directory. - return - } - switch dirent.Type { - case syscall.DT_REG: - typ = 0 - case syscall.DT_DIR: - typ = os.ModeDir - case syscall.DT_LNK: - typ = os.ModeSymlink - case syscall.DT_BLK: - typ = os.ModeDevice - case syscall.DT_FIFO: - typ = os.ModeNamedPipe - case syscall.DT_SOCK: - typ = os.ModeSocket - case syscall.DT_UNKNOWN: - typ = unknownFileMode - default: - // Skip weird things. - // It's probably a DT_WHT (http://lwn.net/Articles/325369/) - // or something. Revisit if/when this package is moved outside - // of goimports. goimports only cares about regular files, - // symlinks, and directories. - return - } - - nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) - nameLen := bytes.IndexByte(nameBuf[:], 0) - if nameLen < 0 { - panic("failed to find terminating 0 byte in dirent") - } - - // Special cases for common things: - if nameLen == 1 && nameBuf[0] == '.' { - name = "." - } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' { - name = ".." - } else { - name = string(nameBuf[:nameLen]) - } - return -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fix.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fix.go deleted file mode 100644 index 61a5f062a62e..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/fix.go +++ /dev/null @@ -1,974 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package imports - -import ( - "bufio" - "bytes" - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "io/ioutil" - "log" - "os" - "path" - "path/filepath" - "sort" - "strings" - "sync" - - "golang.org/x/tools/go/ast/astutil" -) - -// Debug controls verbose logging. -var Debug = false - -var ( - inTests = false // set true by fix_test.go; if false, no need to use testMu - testMu sync.RWMutex // guards globals reset by tests; used only if inTests -) - -// If set, LocalPrefix instructs Process to sort import paths with the given -// prefix into another group after 3rd-party packages. -var LocalPrefix string - -// importToGroup is a list of functions which map from an import path to -// a group number. -var importToGroup = []func(importPath string) (num int, ok bool){ - func(importPath string) (num int, ok bool) { - if LocalPrefix != "" && strings.HasPrefix(importPath, LocalPrefix) { - return 3, true - } - return - }, - func(importPath string) (num int, ok bool) { - if strings.HasPrefix(importPath, "appengine") { - return 2, true - } - return - }, - func(importPath string) (num int, ok bool) { - if strings.Contains(importPath, ".") { - return 1, true - } - return - }, -} - -func importGroup(importPath string) int { - for _, fn := range importToGroup { - if n, ok := fn(importPath); ok { - return n - } - } - return 0 -} - -// packageInfo is a summary of features found in a package. -type packageInfo struct { - Globals map[string]bool // symbol => true -} - -// dirPackageInfo exposes the dirPackageInfoFile function so that it can be overridden. -var dirPackageInfo = dirPackageInfoFile - -// dirPackageInfoFile gets information from other files in the package. -func dirPackageInfoFile(pkgName, srcDir, filename string) (*packageInfo, error) { - considerTests := strings.HasSuffix(filename, "_test.go") - - // Handle file from stdin - if _, err := os.Stat(filename); err != nil { - if os.IsNotExist(err) { - return &packageInfo{}, nil - } - return nil, err - } - - fileBase := filepath.Base(filename) - packageFileInfos, err := ioutil.ReadDir(srcDir) - if err != nil { - return nil, err - } - - info := &packageInfo{Globals: make(map[string]bool)} - for _, fi := range packageFileInfos { - if fi.Name() == fileBase || !strings.HasSuffix(fi.Name(), ".go") { - continue - } - if !considerTests && strings.HasSuffix(fi.Name(), "_test.go") { - continue - } - - fileSet := token.NewFileSet() - root, err := parser.ParseFile(fileSet, filepath.Join(srcDir, fi.Name()), nil, 0) - if err != nil { - continue - } - - for _, decl := range root.Decls { - genDecl, ok := decl.(*ast.GenDecl) - if !ok { - continue - } - - for _, spec := range genDecl.Specs { - valueSpec, ok := spec.(*ast.ValueSpec) - if !ok { - continue - } - info.Globals[valueSpec.Names[0].Name] = true - } - } - } - return info, nil -} - -func fixImports(fset *token.FileSet, f *ast.File, filename string) (added []string, err error) { - // refs are a set of possible package references currently unsatisfied by imports. - // first key: either base package (e.g. "fmt") or renamed package - // second key: referenced package symbol (e.g. "Println") - refs := make(map[string]map[string]bool) - - // decls are the current package imports. key is base package or renamed package. - decls := make(map[string]*ast.ImportSpec) - - abs, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - srcDir := filepath.Dir(abs) - if Debug { - log.Printf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) - } - - var packageInfo *packageInfo - var loadedPackageInfo bool - - // collect potential uses of packages. - var visitor visitFn - visitor = visitFn(func(node ast.Node) ast.Visitor { - if node == nil { - return visitor - } - switch v := node.(type) { - case *ast.ImportSpec: - if v.Name != nil { - decls[v.Name.Name] = v - break - } - ipath := strings.Trim(v.Path.Value, `"`) - if ipath == "C" { - break - } - local := importPathToName(ipath, srcDir) - decls[local] = v - case *ast.SelectorExpr: - xident, ok := v.X.(*ast.Ident) - if !ok { - break - } - if xident.Obj != nil { - // if the parser can resolve it, it's not a package ref - break - } - pkgName := xident.Name - if refs[pkgName] == nil { - refs[pkgName] = make(map[string]bool) - } - if !loadedPackageInfo { - loadedPackageInfo = true - packageInfo, _ = dirPackageInfo(f.Name.Name, srcDir, filename) - } - if decls[pkgName] == nil && (packageInfo == nil || !packageInfo.Globals[pkgName]) { - refs[pkgName][v.Sel.Name] = true - } - } - return visitor - }) - ast.Walk(visitor, f) - - // Nil out any unused ImportSpecs, to be removed in following passes - unusedImport := map[string]string{} - for pkg, is := range decls { - if refs[pkg] == nil && pkg != "_" && pkg != "." { - name := "" - if is.Name != nil { - name = is.Name.Name - } - unusedImport[strings.Trim(is.Path.Value, `"`)] = name - } - } - for ipath, name := range unusedImport { - if ipath == "C" { - // Don't remove cgo stuff. - continue - } - astutil.DeleteNamedImport(fset, f, name, ipath) - } - - for pkgName, symbols := range refs { - if len(symbols) == 0 { - // skip over packages already imported - delete(refs, pkgName) - } - } - - // Search for imports matching potential package references. - searches := 0 - type result struct { - ipath string // import path (if err == nil) - name string // optional name to rename import as - err error - } - results := make(chan result) - for pkgName, symbols := range refs { - go func(pkgName string, symbols map[string]bool) { - ipath, rename, err := findImport(pkgName, symbols, filename) - r := result{ipath: ipath, err: err} - if rename { - r.name = pkgName - } - results <- r - }(pkgName, symbols) - searches++ - } - for i := 0; i < searches; i++ { - result := <-results - if result.err != nil { - return nil, result.err - } - if result.ipath != "" { - if result.name != "" { - astutil.AddNamedImport(fset, f, result.name, result.ipath) - } else { - astutil.AddImport(fset, f, result.ipath) - } - added = append(added, result.ipath) - } - } - - return added, nil -} - -// importPathToName returns the package name for the given import path. -var importPathToName func(importPath, srcDir string) (packageName string) = importPathToNameGoPath - -// importPathToNameBasic assumes the package name is the base of import path. -func importPathToNameBasic(importPath, srcDir string) (packageName string) { - return path.Base(importPath) -} - -// importPathToNameGoPath finds out the actual package name, as declared in its .go files. -// If there's a problem, it falls back to using importPathToNameBasic. -func importPathToNameGoPath(importPath, srcDir string) (packageName string) { - // Fast path for standard library without going to disk. - if pkg, ok := stdImportPackage[importPath]; ok { - return pkg - } - - pkgName, err := importPathToNameGoPathParse(importPath, srcDir) - if Debug { - log.Printf("importPathToNameGoPathParse(%q, srcDir=%q) = %q, %v", importPath, srcDir, pkgName, err) - } - if err == nil { - return pkgName - } - return importPathToNameBasic(importPath, srcDir) -} - -// importPathToNameGoPathParse is a faster version of build.Import if -// the only thing desired is the package name. It uses build.FindOnly -// to find the directory and then only parses one file in the package, -// trusting that the files in the directory are consistent. -func importPathToNameGoPathParse(importPath, srcDir string) (packageName string, err error) { - buildPkg, err := build.Import(importPath, srcDir, build.FindOnly) - if err != nil { - return "", err - } - d, err := os.Open(buildPkg.Dir) - if err != nil { - return "", err - } - names, err := d.Readdirnames(-1) - d.Close() - if err != nil { - return "", err - } - sort.Strings(names) // to have predictable behavior - var lastErr error - var nfile int - for _, name := range names { - if !strings.HasSuffix(name, ".go") { - continue - } - if strings.HasSuffix(name, "_test.go") { - continue - } - nfile++ - fullFile := filepath.Join(buildPkg.Dir, name) - - fset := token.NewFileSet() - f, err := parser.ParseFile(fset, fullFile, nil, parser.PackageClauseOnly) - if err != nil { - lastErr = err - continue - } - pkgName := f.Name.Name - if pkgName == "documentation" { - // Special case from go/build.ImportDir, not - // handled by ctx.MatchFile. - continue - } - if pkgName == "main" { - // Also skip package main, assuming it's a +build ignore generator or example. - // Since you can't import a package main anyway, there's no harm here. - continue - } - return pkgName, nil - } - if lastErr != nil { - return "", lastErr - } - return "", fmt.Errorf("no importable package found in %d Go files", nfile) -} - -var stdImportPackage = map[string]string{} // "net/http" => "http" - -func init() { - // Nothing in the standard library has a package name not - // matching its import base name. - for _, pkg := range stdlib { - if _, ok := stdImportPackage[pkg]; !ok { - stdImportPackage[pkg] = path.Base(pkg) - } - } -} - -// Directory-scanning state. -var ( - // scanGoRootOnce guards calling scanGoRoot (for $GOROOT) - scanGoRootOnce sync.Once - // scanGoPathOnce guards calling scanGoPath (for $GOPATH) - scanGoPathOnce sync.Once - - // populateIgnoreOnce guards calling populateIgnore - populateIgnoreOnce sync.Once - ignoredDirs []os.FileInfo - - dirScanMu sync.RWMutex - dirScan map[string]*pkg // abs dir path => *pkg -) - -type pkg struct { - dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") - importPath string // full pkg import path ("net/http", "foo/bar/vendor/a/b") - importPathShort string // vendorless import path ("net/http", "a/b") -} - -// byImportPathShortLength sorts by the short import path length, breaking ties on the -// import string itself. -type byImportPathShortLength []*pkg - -func (s byImportPathShortLength) Len() int { return len(s) } -func (s byImportPathShortLength) Less(i, j int) bool { - vi, vj := s[i].importPathShort, s[j].importPathShort - return len(vi) < len(vj) || (len(vi) == len(vj) && vi < vj) - -} -func (s byImportPathShortLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// guarded by populateIgnoreOnce; populates ignoredDirs. -func populateIgnore() { - for _, srcDir := range build.Default.SrcDirs() { - if srcDir == filepath.Join(build.Default.GOROOT, "src") { - continue - } - populateIgnoredDirs(srcDir) - } -} - -// populateIgnoredDirs reads an optional config file at /.goimportsignore -// of relative directories to ignore when scanning for go files. -// The provided path is one of the $GOPATH entries with "src" appended. -func populateIgnoredDirs(path string) { - file := filepath.Join(path, ".goimportsignore") - slurp, err := ioutil.ReadFile(file) - if Debug { - if err != nil { - log.Print(err) - } else { - log.Printf("Read %s", file) - } - } - if err != nil { - return - } - bs := bufio.NewScanner(bytes.NewReader(slurp)) - for bs.Scan() { - line := strings.TrimSpace(bs.Text()) - if line == "" || strings.HasPrefix(line, "#") { - continue - } - full := filepath.Join(path, line) - if fi, err := os.Stat(full); err == nil { - ignoredDirs = append(ignoredDirs, fi) - if Debug { - log.Printf("Directory added to ignore list: %s", full) - } - } else if Debug { - log.Printf("Error statting entry in .goimportsignore: %v", err) - } - } -} - -func skipDir(fi os.FileInfo) bool { - for _, ignoredDir := range ignoredDirs { - if os.SameFile(fi, ignoredDir) { - return true - } - } - return false -} - -// shouldTraverse reports whether the symlink fi should, found in dir, -// should be followed. It makes sure symlinks were never visited -// before to avoid symlink loops. -func shouldTraverse(dir string, fi os.FileInfo) bool { - path := filepath.Join(dir, fi.Name()) - target, err := filepath.EvalSymlinks(path) - if err != nil { - if !os.IsNotExist(err) { - fmt.Fprintln(os.Stderr, err) - } - return false - } - ts, err := os.Stat(target) - if err != nil { - fmt.Fprintln(os.Stderr, err) - return false - } - if !ts.IsDir() { - return false - } - if skipDir(ts) { - return false - } - // Check for symlink loops by statting each directory component - // and seeing if any are the same file as ts. - for { - parent := filepath.Dir(path) - if parent == path { - // Made it to the root without seeing a cycle. - // Use this symlink. - return true - } - parentInfo, err := os.Stat(parent) - if err != nil { - return false - } - if os.SameFile(ts, parentInfo) { - // Cycle. Don't traverse. - return false - } - path = parent - } - -} - -var testHookScanDir = func(dir string) {} - -var scanGoRootDone = make(chan struct{}) // closed when scanGoRoot is done - -func scanGoRoot() { - go func() { - scanGoDirs(true) - close(scanGoRootDone) - }() -} - -func scanGoPath() { scanGoDirs(false) } - -func scanGoDirs(goRoot bool) { - if Debug { - which := "$GOROOT" - if !goRoot { - which = "$GOPATH" - } - log.Printf("scanning " + which) - defer log.Printf("scanned " + which) - } - dirScanMu.Lock() - if dirScan == nil { - dirScan = make(map[string]*pkg) - } - dirScanMu.Unlock() - - for _, srcDir := range build.Default.SrcDirs() { - isGoroot := srcDir == filepath.Join(build.Default.GOROOT, "src") - if isGoroot != goRoot { - continue - } - testHookScanDir(srcDir) - walkFn := func(path string, typ os.FileMode) error { - dir := filepath.Dir(path) - if typ.IsRegular() { - if dir == srcDir { - // Doesn't make sense to have regular files - // directly in your $GOPATH/src or $GOROOT/src. - return nil - } - if !strings.HasSuffix(path, ".go") { - return nil - } - dirScanMu.Lock() - if _, dup := dirScan[dir]; !dup { - importpath := filepath.ToSlash(dir[len(srcDir)+len("/"):]) - dirScan[dir] = &pkg{ - importPath: importpath, - importPathShort: vendorlessImportPath(importpath), - dir: dir, - } - } - dirScanMu.Unlock() - return nil - } - if typ == os.ModeDir { - base := filepath.Base(path) - if base == "" || base[0] == '.' || base[0] == '_' || - base == "testdata" || base == "node_modules" { - return filepath.SkipDir - } - fi, err := os.Lstat(path) - if err == nil && skipDir(fi) { - if Debug { - log.Printf("skipping directory %q under %s", fi.Name(), dir) - } - return filepath.SkipDir - } - return nil - } - if typ == os.ModeSymlink { - base := filepath.Base(path) - if strings.HasPrefix(base, ".#") { - // Emacs noise. - return nil - } - fi, err := os.Lstat(path) - if err != nil { - // Just ignore it. - return nil - } - if shouldTraverse(dir, fi) { - return traverseLink - } - } - return nil - } - if err := fastWalk(srcDir, walkFn); err != nil { - log.Printf("goimports: scanning directory %v: %v", srcDir, err) - } - } -} - -// vendorlessImportPath returns the devendorized version of the provided import path. -// e.g. "foo/bar/vendor/a/b" => "a/b" -func vendorlessImportPath(ipath string) string { - // Devendorize for use in import statement. - if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 { - return ipath[i+len("/vendor/"):] - } - if strings.HasPrefix(ipath, "vendor/") { - return ipath[len("vendor/"):] - } - return ipath -} - -// loadExports returns the set of exported symbols in the package at dir. -// It returns nil on error or if the package name in dir does not match expectPackage. -var loadExports func(expectPackage, dir string) map[string]bool = loadExportsGoPath - -func loadExportsGoPath(expectPackage, dir string) map[string]bool { - if Debug { - log.Printf("loading exports in dir %s (seeking package %s)", dir, expectPackage) - } - exports := make(map[string]bool) - - ctx := build.Default - - // ReadDir is like ioutil.ReadDir, but only returns *.go files - // and filters out _test.go files since they're not relevant - // and only slow things down. - ctx.ReadDir = func(dir string) (notTests []os.FileInfo, err error) { - all, err := ioutil.ReadDir(dir) - if err != nil { - return nil, err - } - notTests = all[:0] - for _, fi := range all { - name := fi.Name() - if strings.HasSuffix(name, ".go") && !strings.HasSuffix(name, "_test.go") { - notTests = append(notTests, fi) - } - } - return notTests, nil - } - - files, err := ctx.ReadDir(dir) - if err != nil { - log.Print(err) - return nil - } - - fset := token.NewFileSet() - - for _, fi := range files { - match, err := ctx.MatchFile(dir, fi.Name()) - if err != nil || !match { - continue - } - fullFile := filepath.Join(dir, fi.Name()) - f, err := parser.ParseFile(fset, fullFile, nil, 0) - if err != nil { - if Debug { - log.Printf("Parsing %s: %v", fullFile, err) - } - return nil - } - pkgName := f.Name.Name - if pkgName == "documentation" { - // Special case from go/build.ImportDir, not - // handled by ctx.MatchFile. - continue - } - if pkgName != expectPackage { - if Debug { - log.Printf("scan of dir %v is not expected package %v (actually %v)", dir, expectPackage, pkgName) - } - return nil - } - for name := range f.Scope.Objects { - if ast.IsExported(name) { - exports[name] = true - } - } - } - - if Debug { - exportList := make([]string, 0, len(exports)) - for k := range exports { - exportList = append(exportList, k) - } - sort.Strings(exportList) - log.Printf("loaded exports in dir %v (package %v): %v", dir, expectPackage, strings.Join(exportList, ", ")) - } - return exports -} - -// findImport searches for a package with the given symbols. -// If no package is found, findImport returns ("", false, nil) -// -// This is declared as a variable rather than a function so goimports -// can be easily extended by adding a file with an init function. -// -// The rename value tells goimports whether to use the package name as -// a local qualifier in an import. For example, if findImports("pkg", -// "X") returns ("foo/bar", rename=true), then goimports adds the -// import line: -// import pkg "foo/bar" -// to satisfy uses of pkg.X in the file. -var findImport func(pkgName string, symbols map[string]bool, filename string) (foundPkg string, rename bool, err error) = findImportGoPath - -// findImportGoPath is the normal implementation of findImport. -// (Some companies have their own internally.) -func findImportGoPath(pkgName string, symbols map[string]bool, filename string) (foundPkg string, rename bool, err error) { - if inTests { - testMu.RLock() - defer testMu.RUnlock() - } - - // Fast path for the standard library. - // In the common case we hopefully never have to scan the GOPATH, which can - // be slow with moving disks. - if pkg, rename, ok := findImportStdlib(pkgName, symbols); ok { - return pkg, rename, nil - } - if pkgName == "rand" && symbols["Read"] { - // Special-case rand.Read. - // - // If findImportStdlib didn't find it above, don't go - // searching for it, lest it find and pick math/rand - // in GOROOT (new as of Go 1.6) - // - // crypto/rand is the safer choice. - return "", false, nil - } - - // TODO(sameer): look at the import lines for other Go files in the - // local directory, since the user is likely to import the same packages - // in the current Go file. Return rename=true when the other Go files - // use a renamed package that's also used in the current file. - - // Read all the $GOPATH/src/.goimportsignore files before scanning directories. - populateIgnoreOnce.Do(populateIgnore) - - // Start scanning the $GOROOT asynchronously, then run the - // GOPATH scan synchronously if needed, and then wait for the - // $GOROOT to finish. - // - // TODO(bradfitz): run each $GOPATH entry async. But nobody - // really has more than one anyway, so low priority. - scanGoRootOnce.Do(scanGoRoot) // async - if !fileInDir(filename, build.Default.GOROOT) { - scanGoPathOnce.Do(scanGoPath) // blocking - } - <-scanGoRootDone - - // Find candidate packages, looking only at their directory names first. - var candidates []*pkg - for _, pkg := range dirScan { - if pkgIsCandidate(filename, pkgName, pkg) { - candidates = append(candidates, pkg) - } - } - - // Sort the candidates by their import package length, - // assuming that shorter package names are better than long - // ones. Note that this sorts by the de-vendored name, so - // there's no "penalty" for vendoring. - sort.Sort(byImportPathShortLength(candidates)) - if Debug { - for i, pkg := range candidates { - log.Printf("%s candidate %d/%d: %v", pkgName, i+1, len(candidates), pkg.importPathShort) - } - } - - // Collect exports for packages with matching names. - - done := make(chan struct{}) // closed when we find the answer - defer close(done) - - rescv := make([]chan *pkg, len(candidates)) - for i := range candidates { - rescv[i] = make(chan *pkg) - } - const maxConcurrentPackageImport = 4 - loadExportsSem := make(chan struct{}, maxConcurrentPackageImport) - - go func() { - for i, pkg := range candidates { - select { - case loadExportsSem <- struct{}{}: - select { - case <-done: - return - default: - } - case <-done: - return - } - pkg := pkg - resc := rescv[i] - go func() { - if inTests { - testMu.RLock() - defer testMu.RUnlock() - } - defer func() { <-loadExportsSem }() - exports := loadExports(pkgName, pkg.dir) - - // If it doesn't have the right - // symbols, send nil to mean no match. - for symbol := range symbols { - if !exports[symbol] { - pkg = nil - break - } - } - select { - case resc <- pkg: - case <-done: - } - }() - } - }() - for _, resc := range rescv { - pkg := <-resc - if pkg == nil { - continue - } - // If the package name in the source doesn't match the import path's base, - // return true so the rewriter adds a name (import foo "github.com/bar/go-foo") - needsRename := path.Base(pkg.importPath) != pkgName - return pkg.importPathShort, needsRename, nil - } - return "", false, nil -} - -// pkgIsCandidate reports whether pkg is a candidate for satisfying the -// finding which package pkgIdent in the file named by filename is trying -// to refer to. -// -// This check is purely lexical and is meant to be as fast as possible -// because it's run over all $GOPATH directories to filter out poor -// candidates in order to limit the CPU and I/O later parsing the -// exports in candidate packages. -// -// filename is the file being formatted. -// pkgIdent is the package being searched for, like "client" (if -// searching for "client.New") -func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool { - // Check "internal" and "vendor" visibility: - if !canUse(filename, pkg.dir) { - return false - } - - // Speed optimization to minimize disk I/O: - // the last two components on disk must contain the - // package name somewhere. - // - // This permits mismatch naming like directory - // "go-foo" being package "foo", or "pkg.v3" being "pkg", - // or directory "google.golang.org/api/cloudbilling/v1" - // being package "cloudbilling", but doesn't - // permit a directory "foo" to be package - // "bar", which is strongly discouraged - // anyway. There's no reason goimports needs - // to be slow just to accomodate that. - lastTwo := lastTwoComponents(pkg.importPathShort) - if strings.Contains(lastTwo, pkgIdent) { - return true - } - if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { - lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) - if strings.Contains(lastTwo, pkgIdent) { - return true - } - } - - return false -} - -func hasHyphenOrUpperASCII(s string) bool { - for i := 0; i < len(s); i++ { - b := s[i] - if b == '-' || ('A' <= b && b <= 'Z') { - return true - } - } - return false -} - -func lowerASCIIAndRemoveHyphen(s string) (ret string) { - buf := make([]byte, 0, len(s)) - for i := 0; i < len(s); i++ { - b := s[i] - switch { - case b == '-': - continue - case 'A' <= b && b <= 'Z': - buf = append(buf, b+('a'-'A')) - default: - buf = append(buf, b) - } - } - return string(buf) -} - -// canUse reports whether the package in dir is usable from filename, -// respecting the Go "internal" and "vendor" visibility rules. -func canUse(filename, dir string) bool { - // Fast path check, before any allocations. If it doesn't contain vendor - // or internal, it's not tricky: - // Note that this can false-negative on directories like "notinternal", - // but we check it correctly below. This is just a fast path. - if !strings.Contains(dir, "vendor") && !strings.Contains(dir, "internal") { - return true - } - - dirSlash := filepath.ToSlash(dir) - if !strings.Contains(dirSlash, "/vendor/") && !strings.Contains(dirSlash, "/internal/") && !strings.HasSuffix(dirSlash, "/internal") { - return true - } - // Vendor or internal directory only visible from children of parent. - // That means the path from the current directory to the target directory - // can contain ../vendor or ../internal but not ../foo/vendor or ../foo/internal - // or bar/vendor or bar/internal. - // After stripping all the leading ../, the only okay place to see vendor or internal - // is at the very beginning of the path. - absfile, err := filepath.Abs(filename) - if err != nil { - return false - } - absdir, err := filepath.Abs(dir) - if err != nil { - return false - } - rel, err := filepath.Rel(absfile, absdir) - if err != nil { - return false - } - relSlash := filepath.ToSlash(rel) - if i := strings.LastIndex(relSlash, "../"); i >= 0 { - relSlash = relSlash[i+len("../"):] - } - return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal") -} - -// lastTwoComponents returns at most the last two path components -// of v, using either / or \ as the path separator. -func lastTwoComponents(v string) string { - nslash := 0 - for i := len(v) - 1; i >= 0; i-- { - if v[i] == '/' || v[i] == '\\' { - nslash++ - if nslash == 2 { - return v[i:] - } - } - } - return v -} - -type visitFn func(node ast.Node) ast.Visitor - -func (fn visitFn) Visit(node ast.Node) ast.Visitor { - return fn(node) -} - -func findImportStdlib(shortPkg string, symbols map[string]bool) (importPath string, rename, ok bool) { - for symbol := range symbols { - key := shortPkg + "." + symbol - path := stdlib[key] - if path == "" { - if key == "rand.Read" { - continue - } - return "", false, false - } - if importPath != "" && importPath != path { - // Ambiguous. Symbols pointed to different things. - return "", false, false - } - importPath = path - } - if importPath == "" && shortPkg == "rand" && symbols["Read"] { - return "crypto/rand", false, true - } - return importPath, false, importPath != "" -} - -// fileInDir reports whether the provided file path looks like -// it's in dir. (without hitting the filesystem) -func fileInDir(file, dir string) bool { - rest := strings.TrimPrefix(file, dir) - if len(rest) == len(file) { - // dir is not a prefix of file. - return false - } - // Check for boundary: either nothing (file == dir), or a slash. - return len(rest) == 0 || rest[0] == '/' || rest[0] == '\\' -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/imports.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/imports.go deleted file mode 100644 index 2c2eb5c62df2..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/imports.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run mkstdlib.go - -// Package imports implements a Go pretty-printer (like package "go/format") -// that also adds or removes import statements as necessary. -package imports - -import ( - "bufio" - "bytes" - "fmt" - "go/ast" - "go/format" - "go/parser" - "go/printer" - "go/token" - "io" - "regexp" - "strconv" - "strings" - - "golang.org/x/tools/go/ast/astutil" -) - -// Options specifies options for processing files. -type Options struct { - Fragment bool // Accept fragment of a source file (no package statement) - AllErrors bool // Report all errors (not just the first 10 on different lines) - - Comments bool // Print comments (true if nil *Options provided) - TabIndent bool // Use tabs for indent (true if nil *Options provided) - TabWidth int // Tab width (8 if nil *Options provided) - - FormatOnly bool // Disable the insertion and deletion of imports -} - -// Process formats and adjusts imports for the provided file. -// If opt is nil the defaults are used. -// -// Note that filename's directory influences which imports can be chosen, -// so it is important that filename be accurate. -// To process data ``as if'' it were in filename, pass the data as a non-nil src. -func Process(filename string, src []byte, opt *Options) ([]byte, error) { - if opt == nil { - opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} - } - - fileSet := token.NewFileSet() - file, adjust, err := parse(fileSet, filename, src, opt) - if err != nil { - return nil, err - } - - if !opt.FormatOnly { - _, err = fixImports(fileSet, file, filename) - if err != nil { - return nil, err - } - } - - sortImports(fileSet, file) - imps := astutil.Imports(fileSet, file) - - var spacesBefore []string // import paths we need spaces before - for _, impSection := range imps { - // Within each block of contiguous imports, see if any - // import lines are in different group numbers. If so, - // we'll need to put a space between them so it's - // compatible with gofmt. - lastGroup := -1 - for _, importSpec := range impSection { - importPath, _ := strconv.Unquote(importSpec.Path.Value) - groupNum := importGroup(importPath) - if groupNum != lastGroup && lastGroup != -1 { - spacesBefore = append(spacesBefore, importPath) - } - lastGroup = groupNum - } - - } - - printerMode := printer.UseSpaces - if opt.TabIndent { - printerMode |= printer.TabIndent - } - printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth} - - var buf bytes.Buffer - err = printConfig.Fprint(&buf, fileSet, file) - if err != nil { - return nil, err - } - out := buf.Bytes() - if adjust != nil { - out = adjust(src, out) - } - if len(spacesBefore) > 0 { - out = addImportSpaces(bytes.NewReader(out), spacesBefore) - } - - out, err = format.Source(out) - if err != nil { - return nil, err - } - return out, nil -} - -// parse parses src, which was read from filename, -// as a Go source file or statement list. -func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) { - parserMode := parser.Mode(0) - if opt.Comments { - parserMode |= parser.ParseComments - } - if opt.AllErrors { - parserMode |= parser.AllErrors - } - - // Try as whole source file. - file, err := parser.ParseFile(fset, filename, src, parserMode) - if err == nil { - return file, nil, nil - } - // If the error is that the source file didn't begin with a - // package line and we accept fragmented input, fall through to - // try as a source fragment. Stop and return on any other error. - if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") { - return nil, nil, err - } - - // If this is a declaration list, make it a source file - // by inserting a package clause. - // Insert using a ;, not a newline, so that the line numbers - // in psrc match the ones in src. - psrc := append([]byte("package main;"), src...) - file, err = parser.ParseFile(fset, filename, psrc, parserMode) - if err == nil { - // If a main function exists, we will assume this is a main - // package and leave the file. - if containsMainFunc(file) { - return file, nil, nil - } - - adjust := func(orig, src []byte) []byte { - // Remove the package clause. - // Gofmt has turned the ; into a \n. - src = src[len("package main\n"):] - return matchSpace(orig, src) - } - return file, adjust, nil - } - // If the error is that the source file didn't begin with a - // declaration, fall through to try as a statement list. - // Stop and return on any other error. - if !strings.Contains(err.Error(), "expected declaration") { - return nil, nil, err - } - - // If this is a statement list, make it a source file - // by inserting a package clause and turning the list - // into a function body. This handles expressions too. - // Insert using a ;, not a newline, so that the line numbers - // in fsrc match the ones in src. - fsrc := append(append([]byte("package p; func _() {"), src...), '}') - file, err = parser.ParseFile(fset, filename, fsrc, parserMode) - if err == nil { - adjust := func(orig, src []byte) []byte { - // Remove the wrapping. - // Gofmt has turned the ; into a \n\n. - src = src[len("package p\n\nfunc _() {"):] - src = src[:len(src)-len("}\n")] - // Gofmt has also indented the function body one level. - // Remove that indent. - src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1) - return matchSpace(orig, src) - } - return file, adjust, nil - } - - // Failed, and out of options. - return nil, nil, err -} - -// containsMainFunc checks if a file contains a function declaration with the -// function signature 'func main()' -func containsMainFunc(file *ast.File) bool { - for _, decl := range file.Decls { - if f, ok := decl.(*ast.FuncDecl); ok { - if f.Name.Name != "main" { - continue - } - - if len(f.Type.Params.List) != 0 { - continue - } - - if f.Type.Results != nil && len(f.Type.Results.List) != 0 { - continue - } - - return true - } - } - - return false -} - -func cutSpace(b []byte) (before, middle, after []byte) { - i := 0 - for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') { - i++ - } - j := len(b) - for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') { - j-- - } - if i <= j { - return b[:i], b[i:j], b[j:] - } - return nil, nil, b[j:] -} - -// matchSpace reformats src to use the same space context as orig. -// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src. -// 2) matchSpace copies the indentation of the first non-blank line in orig -// to every non-blank line in src. -// 3) matchSpace copies the trailing space from orig and uses it in place -// of src's trailing space. -func matchSpace(orig []byte, src []byte) []byte { - before, _, after := cutSpace(orig) - i := bytes.LastIndex(before, []byte{'\n'}) - before, indent := before[:i+1], before[i+1:] - - _, src, _ = cutSpace(src) - - var b bytes.Buffer - b.Write(before) - for len(src) > 0 { - line := src - if i := bytes.IndexByte(line, '\n'); i >= 0 { - line, src = line[:i+1], line[i+1:] - } else { - src = nil - } - if len(line) > 0 && line[0] != '\n' { // not blank - b.Write(indent) - } - b.Write(line) - } - b.Write(after) - return b.Bytes() -} - -var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`) - -func addImportSpaces(r io.Reader, breaks []string) []byte { - var out bytes.Buffer - sc := bufio.NewScanner(r) - inImports := false - done := false - for sc.Scan() { - s := sc.Text() - - if !inImports && !done && strings.HasPrefix(s, "import") { - inImports = true - } - if inImports && (strings.HasPrefix(s, "var") || - strings.HasPrefix(s, "func") || - strings.HasPrefix(s, "const") || - strings.HasPrefix(s, "type")) { - done = true - inImports = false - } - if inImports && len(breaks) > 0 { - if m := impLine.FindStringSubmatch(s); m != nil { - if m[1] == breaks[0] { - out.WriteByte('\n') - breaks = breaks[1:] - } - } - } - - fmt.Fprintln(&out, s) - } - return out.Bytes() -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/mkindex.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/mkindex.go deleted file mode 100644 index 755e2394f2d7..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/mkindex.go +++ /dev/null @@ -1,173 +0,0 @@ -// +build ignore - -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Command mkindex creates the file "pkgindex.go" containing an index of the Go -// standard library. The file is intended to be built as part of the imports -// package, so that the package may be used in environments where a GOROOT is -// not available (such as App Engine). -package main - -import ( - "bytes" - "fmt" - "go/ast" - "go/build" - "go/format" - "go/parser" - "go/token" - "io/ioutil" - "log" - "os" - "path" - "path/filepath" - "strings" -) - -var ( - pkgIndex = make(map[string][]pkg) - exports = make(map[string]map[string]bool) -) - -func main() { - // Don't use GOPATH. - ctx := build.Default - ctx.GOPATH = "" - - // Populate pkgIndex global from GOROOT. - for _, path := range ctx.SrcDirs() { - f, err := os.Open(path) - if err != nil { - log.Print(err) - continue - } - children, err := f.Readdir(-1) - f.Close() - if err != nil { - log.Print(err) - continue - } - for _, child := range children { - if child.IsDir() { - loadPkg(path, child.Name()) - } - } - } - // Populate exports global. - for _, ps := range pkgIndex { - for _, p := range ps { - e := loadExports(p.dir) - if e != nil { - exports[p.dir] = e - } - } - } - - // Construct source file. - var buf bytes.Buffer - fmt.Fprint(&buf, pkgIndexHead) - fmt.Fprintf(&buf, "var pkgIndexMaster = %#v\n", pkgIndex) - fmt.Fprintf(&buf, "var exportsMaster = %#v\n", exports) - src := buf.Bytes() - - // Replace main.pkg type name with pkg. - src = bytes.Replace(src, []byte("main.pkg"), []byte("pkg"), -1) - // Replace actual GOROOT with "/go". - src = bytes.Replace(src, []byte(ctx.GOROOT), []byte("/go"), -1) - // Add some line wrapping. - src = bytes.Replace(src, []byte("}, "), []byte("},\n"), -1) - src = bytes.Replace(src, []byte("true, "), []byte("true,\n"), -1) - - var err error - src, err = format.Source(src) - if err != nil { - log.Fatal(err) - } - - // Write out source file. - err = ioutil.WriteFile("pkgindex.go", src, 0644) - if err != nil { - log.Fatal(err) - } -} - -const pkgIndexHead = `package imports - -func init() { - pkgIndexOnce.Do(func() { - pkgIndex.m = pkgIndexMaster - }) - loadExports = func(dir string) map[string]bool { - return exportsMaster[dir] - } -} -` - -type pkg struct { - importpath string // full pkg import path, e.g. "net/http" - dir string // absolute file path to pkg directory e.g. "/usr/lib/go/src/fmt" -} - -var fset = token.NewFileSet() - -func loadPkg(root, importpath string) { - shortName := path.Base(importpath) - if shortName == "testdata" { - return - } - - dir := filepath.Join(root, importpath) - pkgIndex[shortName] = append(pkgIndex[shortName], pkg{ - importpath: importpath, - dir: dir, - }) - - pkgDir, err := os.Open(dir) - if err != nil { - return - } - children, err := pkgDir.Readdir(-1) - pkgDir.Close() - if err != nil { - return - } - for _, child := range children { - name := child.Name() - if name == "" { - continue - } - if c := name[0]; c == '.' || ('0' <= c && c <= '9') { - continue - } - if child.IsDir() { - loadPkg(root, filepath.Join(importpath, name)) - } - } -} - -func loadExports(dir string) map[string]bool { - exports := make(map[string]bool) - buildPkg, err := build.ImportDir(dir, 0) - if err != nil { - if strings.Contains(err.Error(), "no buildable Go source files in") { - return nil - } - log.Printf("could not import %q: %v", dir, err) - return nil - } - for _, file := range buildPkg.GoFiles { - f, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0) - if err != nil { - log.Printf("could not parse %q: %v", file, err) - continue - } - for name := range f.Scope.Objects { - if ast.IsExported(name) { - exports[name] = true - } - } - } - return exports -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/mkstdlib.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/mkstdlib.go deleted file mode 100644 index 5602244a7e86..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/mkstdlib.go +++ /dev/null @@ -1,104 +0,0 @@ -// +build ignore - -// mkstdlib generates the zstdlib.go file, containing the Go standard -// library API symbols. It's baked into the binary to avoid scanning -// GOPATH in the common case. -package main - -import ( - "bufio" - "bytes" - "fmt" - "go/format" - "io" - "io/ioutil" - "log" - "os" - "path" - "path/filepath" - "regexp" - "sort" - "strings" -) - -func mustOpen(name string) io.Reader { - f, err := os.Open(name) - if err != nil { - log.Fatal(err) - } - return f -} - -func api(base string) string { - return filepath.Join(os.Getenv("GOROOT"), "api", base) -} - -var sym = regexp.MustCompile(`^pkg (\S+).*?, (?:var|func|type|const) ([A-Z]\w*)`) - -func main() { - var buf bytes.Buffer - outf := func(format string, args ...interface{}) { - fmt.Fprintf(&buf, format, args...) - } - outf("// Code generated by mkstdlib.go. DO NOT EDIT.\n\n") - outf("package imports\n") - outf("var stdlib = map[string]string{\n") - f := io.MultiReader( - mustOpen(api("go1.txt")), - mustOpen(api("go1.1.txt")), - mustOpen(api("go1.2.txt")), - mustOpen(api("go1.3.txt")), - mustOpen(api("go1.4.txt")), - mustOpen(api("go1.5.txt")), - mustOpen(api("go1.6.txt")), - mustOpen(api("go1.7.txt")), - mustOpen(api("go1.8.txt")), - ) - sc := bufio.NewScanner(f) - fullImport := map[string]string{} // "zip.NewReader" => "archive/zip" - ambiguous := map[string]bool{} - var keys []string - for sc.Scan() { - l := sc.Text() - has := func(v string) bool { return strings.Contains(l, v) } - if has("struct, ") || has("interface, ") || has(", method (") { - continue - } - if m := sym.FindStringSubmatch(l); m != nil { - full := m[1] - key := path.Base(full) + "." + m[2] - if exist, ok := fullImport[key]; ok { - if exist != full { - ambiguous[key] = true - } - } else { - fullImport[key] = full - keys = append(keys, key) - } - } - } - if err := sc.Err(); err != nil { - log.Fatal(err) - } - sort.Strings(keys) - for _, key := range keys { - if ambiguous[key] { - outf("\t// %q is ambiguous\n", key) - } else { - outf("\t%q: %q,\n", key, fullImport[key]) - } - } - outf("\n") - for _, sym := range [...]string{"Alignof", "ArbitraryType", "Offsetof", "Pointer", "Sizeof"} { - outf("\t%q: %q,\n", "unsafe."+sym, "unsafe") - } - outf("}\n") - fmtbuf, err := format.Source(buf.Bytes()) - if err != nil { - log.Fatal(err) - } - err = ioutil.WriteFile("zstdlib.go", fmtbuf, 0666) - if err != nil { - log.Fatal(err) - } -} diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/sortimports.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/sortimports.go deleted file mode 100644 index 653afc51776a..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/sortimports.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Hacked up copy of go/ast/import.go - -package imports - -import ( - "go/ast" - "go/token" - "sort" - "strconv" -) - -// sortImports sorts runs of consecutive import lines in import blocks in f. -// It also removes duplicate imports when it is possible to do so without data loss. -func sortImports(fset *token.FileSet, f *ast.File) { - for i, d := range f.Decls { - d, ok := d.(*ast.GenDecl) - if !ok || d.Tok != token.IMPORT { - // Not an import declaration, so we're done. - // Imports are always first. - break - } - - if len(d.Specs) == 0 { - // Empty import block, remove it. - f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) - } - - if !d.Lparen.IsValid() { - // Not a block: sorted by default. - continue - } - - // Identify and sort runs of specs on successive lines. - i := 0 - specs := d.Specs[:0] - for j, s := range d.Specs { - if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line { - // j begins a new run. End this one. - specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...) - i = j - } - } - specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...) - d.Specs = specs - - // Deduping can leave a blank line before the rparen; clean that up. - if len(d.Specs) > 0 { - lastSpec := d.Specs[len(d.Specs)-1] - lastLine := fset.Position(lastSpec.Pos()).Line - if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 { - fset.File(d.Rparen).MergeLine(rParenLine - 1) - } - } - } -} - -func importPath(s ast.Spec) string { - t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value) - if err == nil { - return t - } - return "" -} - -func importName(s ast.Spec) string { - n := s.(*ast.ImportSpec).Name - if n == nil { - return "" - } - return n.Name -} - -func importComment(s ast.Spec) string { - c := s.(*ast.ImportSpec).Comment - if c == nil { - return "" - } - return c.Text() -} - -// collapse indicates whether prev may be removed, leaving only next. -func collapse(prev, next ast.Spec) bool { - if importPath(next) != importPath(prev) || importName(next) != importName(prev) { - return false - } - return prev.(*ast.ImportSpec).Comment == nil -} - -type posSpan struct { - Start token.Pos - End token.Pos -} - -func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { - // Can't short-circuit here even if specs are already sorted, - // since they might yet need deduplication. - // A lone import, however, may be safely ignored. - if len(specs) <= 1 { - return specs - } - - // Record positions for specs. - pos := make([]posSpan, len(specs)) - for i, s := range specs { - pos[i] = posSpan{s.Pos(), s.End()} - } - - // Identify comments in this range. - // Any comment from pos[0].Start to the final line counts. - lastLine := fset.Position(pos[len(pos)-1].End).Line - cstart := len(f.Comments) - cend := len(f.Comments) - for i, g := range f.Comments { - if g.Pos() < pos[0].Start { - continue - } - if i < cstart { - cstart = i - } - if fset.Position(g.End()).Line > lastLine { - cend = i - break - } - } - comments := f.Comments[cstart:cend] - - // Assign each comment to the import spec preceding it. - importComment := map[*ast.ImportSpec][]*ast.CommentGroup{} - specIndex := 0 - for _, g := range comments { - for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() { - specIndex++ - } - s := specs[specIndex].(*ast.ImportSpec) - importComment[s] = append(importComment[s], g) - } - - // Sort the import specs by import path. - // Remove duplicates, when possible without data loss. - // Reassign the import paths to have the same position sequence. - // Reassign each comment to abut the end of its spec. - // Sort the comments by new position. - sort.Sort(byImportSpec(specs)) - - // Dedup. Thanks to our sorting, we can just consider - // adjacent pairs of imports. - deduped := specs[:0] - for i, s := range specs { - if i == len(specs)-1 || !collapse(s, specs[i+1]) { - deduped = append(deduped, s) - } else { - p := s.Pos() - fset.File(p).MergeLine(fset.Position(p).Line) - } - } - specs = deduped - - // Fix up comment positions - for i, s := range specs { - s := s.(*ast.ImportSpec) - if s.Name != nil { - s.Name.NamePos = pos[i].Start - } - s.Path.ValuePos = pos[i].Start - s.EndPos = pos[i].End - for _, g := range importComment[s] { - for _, c := range g.List { - c.Slash = pos[i].End - } - } - } - - sort.Sort(byCommentPos(comments)) - - return specs -} - -type byImportSpec []ast.Spec // slice of *ast.ImportSpec - -func (x byImportSpec) Len() int { return len(x) } -func (x byImportSpec) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x byImportSpec) Less(i, j int) bool { - ipath := importPath(x[i]) - jpath := importPath(x[j]) - - igroup := importGroup(ipath) - jgroup := importGroup(jpath) - if igroup != jgroup { - return igroup < jgroup - } - - if ipath != jpath { - return ipath < jpath - } - iname := importName(x[i]) - jname := importName(x[j]) - - if iname != jname { - return iname < jname - } - return importComment(x[i]) < importComment(x[j]) -} - -type byCommentPos []*ast.CommentGroup - -func (x byCommentPos) Len() int { return len(x) } -func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() } diff --git a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/zstdlib.go b/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/zstdlib.go deleted file mode 100644 index 5b66a6cd42af..000000000000 --- a/vertical-pod-autoscaler/vendor/golang.org/x/tools/imports/zstdlib.go +++ /dev/null @@ -1,9376 +0,0 @@ -// Code generated by mkstdlib.go. DO NOT EDIT. - -package imports - -var stdlib = map[string]string{ - "adler32.Checksum": "hash/adler32", - "adler32.New": "hash/adler32", - "adler32.Size": "hash/adler32", - "aes.BlockSize": "crypto/aes", - "aes.KeySizeError": "crypto/aes", - "aes.NewCipher": "crypto/aes", - "ascii85.CorruptInputError": "encoding/ascii85", - "ascii85.Decode": "encoding/ascii85", - "ascii85.Encode": "encoding/ascii85", - "ascii85.MaxEncodedLen": "encoding/ascii85", - "ascii85.NewDecoder": "encoding/ascii85", - "ascii85.NewEncoder": "encoding/ascii85", - "asn1.BitString": "encoding/asn1", - "asn1.ClassApplication": "encoding/asn1", - "asn1.ClassContextSpecific": "encoding/asn1", - "asn1.ClassPrivate": "encoding/asn1", - "asn1.ClassUniversal": "encoding/asn1", - "asn1.Enumerated": "encoding/asn1", - "asn1.Flag": "encoding/asn1", - "asn1.Marshal": "encoding/asn1", - "asn1.ObjectIdentifier": "encoding/asn1", - "asn1.RawContent": "encoding/asn1", - "asn1.RawValue": "encoding/asn1", - "asn1.StructuralError": "encoding/asn1", - "asn1.SyntaxError": "encoding/asn1", - "asn1.TagBitString": "encoding/asn1", - "asn1.TagBoolean": "encoding/asn1", - "asn1.TagEnum": "encoding/asn1", - "asn1.TagGeneralString": "encoding/asn1", - "asn1.TagGeneralizedTime": "encoding/asn1", - "asn1.TagIA5String": "encoding/asn1", - "asn1.TagInteger": "encoding/asn1", - "asn1.TagOID": "encoding/asn1", - "asn1.TagOctetString": "encoding/asn1", - "asn1.TagPrintableString": "encoding/asn1", - "asn1.TagSequence": "encoding/asn1", - "asn1.TagSet": "encoding/asn1", - "asn1.TagT61String": "encoding/asn1", - "asn1.TagUTCTime": "encoding/asn1", - "asn1.TagUTF8String": "encoding/asn1", - "asn1.Unmarshal": "encoding/asn1", - "asn1.UnmarshalWithParams": "encoding/asn1", - "ast.ArrayType": "go/ast", - "ast.AssignStmt": "go/ast", - "ast.Bad": "go/ast", - "ast.BadDecl": "go/ast", - "ast.BadExpr": "go/ast", - "ast.BadStmt": "go/ast", - "ast.BasicLit": "go/ast", - "ast.BinaryExpr": "go/ast", - "ast.BlockStmt": "go/ast", - "ast.BranchStmt": "go/ast", - "ast.CallExpr": "go/ast", - "ast.CaseClause": "go/ast", - "ast.ChanDir": "go/ast", - "ast.ChanType": "go/ast", - "ast.CommClause": "go/ast", - "ast.Comment": "go/ast", - "ast.CommentGroup": "go/ast", - "ast.CommentMap": "go/ast", - "ast.CompositeLit": "go/ast", - "ast.Con": "go/ast", - "ast.DeclStmt": "go/ast", - "ast.DeferStmt": "go/ast", - "ast.Ellipsis": "go/ast", - "ast.EmptyStmt": "go/ast", - "ast.ExprStmt": "go/ast", - "ast.Field": "go/ast", - "ast.FieldFilter": "go/ast", - "ast.FieldList": "go/ast", - "ast.File": "go/ast", - "ast.FileExports": "go/ast", - "ast.Filter": "go/ast", - "ast.FilterDecl": "go/ast", - "ast.FilterFile": "go/ast", - "ast.FilterFuncDuplicates": "go/ast", - "ast.FilterImportDuplicates": "go/ast", - "ast.FilterPackage": "go/ast", - "ast.FilterUnassociatedComments": "go/ast", - "ast.ForStmt": "go/ast", - "ast.Fprint": "go/ast", - "ast.Fun": "go/ast", - "ast.FuncDecl": "go/ast", - "ast.FuncLit": "go/ast", - "ast.FuncType": "go/ast", - "ast.GenDecl": "go/ast", - "ast.GoStmt": "go/ast", - "ast.Ident": "go/ast", - "ast.IfStmt": "go/ast", - "ast.ImportSpec": "go/ast", - "ast.Importer": "go/ast", - "ast.IncDecStmt": "go/ast", - "ast.IndexExpr": "go/ast", - "ast.Inspect": "go/ast", - "ast.InterfaceType": "go/ast", - "ast.IsExported": "go/ast", - "ast.KeyValueExpr": "go/ast", - "ast.LabeledStmt": "go/ast", - "ast.Lbl": "go/ast", - "ast.MapType": "go/ast", - "ast.MergeMode": "go/ast", - "ast.MergePackageFiles": "go/ast", - "ast.NewCommentMap": "go/ast", - "ast.NewIdent": "go/ast", - "ast.NewObj": "go/ast", - "ast.NewPackage": "go/ast", - "ast.NewScope": "go/ast", - "ast.Node": "go/ast", - "ast.NotNilFilter": "go/ast", - "ast.ObjKind": "go/ast", - "ast.Object": "go/ast", - "ast.Package": "go/ast", - "ast.PackageExports": "go/ast", - "ast.ParenExpr": "go/ast", - "ast.Pkg": "go/ast", - "ast.Print": "go/ast", - "ast.RECV": "go/ast", - "ast.RangeStmt": "go/ast", - "ast.ReturnStmt": "go/ast", - "ast.SEND": "go/ast", - "ast.Scope": "go/ast", - "ast.SelectStmt": "go/ast", - "ast.SelectorExpr": "go/ast", - "ast.SendStmt": "go/ast", - "ast.SliceExpr": "go/ast", - "ast.SortImports": "go/ast", - "ast.StarExpr": "go/ast", - "ast.StructType": "go/ast", - "ast.SwitchStmt": "go/ast", - "ast.Typ": "go/ast", - "ast.TypeAssertExpr": "go/ast", - "ast.TypeSpec": "go/ast", - "ast.TypeSwitchStmt": "go/ast", - "ast.UnaryExpr": "go/ast", - "ast.ValueSpec": "go/ast", - "ast.Var": "go/ast", - "ast.Visitor": "go/ast", - "ast.Walk": "go/ast", - "atomic.AddInt32": "sync/atomic", - "atomic.AddInt64": "sync/atomic", - "atomic.AddUint32": "sync/atomic", - "atomic.AddUint64": "sync/atomic", - "atomic.AddUintptr": "sync/atomic", - "atomic.CompareAndSwapInt32": "sync/atomic", - "atomic.CompareAndSwapInt64": "sync/atomic", - "atomic.CompareAndSwapPointer": "sync/atomic", - "atomic.CompareAndSwapUint32": "sync/atomic", - "atomic.CompareAndSwapUint64": "sync/atomic", - "atomic.CompareAndSwapUintptr": "sync/atomic", - "atomic.LoadInt32": "sync/atomic", - "atomic.LoadInt64": "sync/atomic", - "atomic.LoadPointer": "sync/atomic", - "atomic.LoadUint32": "sync/atomic", - "atomic.LoadUint64": "sync/atomic", - "atomic.LoadUintptr": "sync/atomic", - "atomic.StoreInt32": "sync/atomic", - "atomic.StoreInt64": "sync/atomic", - "atomic.StorePointer": "sync/atomic", - "atomic.StoreUint32": "sync/atomic", - "atomic.StoreUint64": "sync/atomic", - "atomic.StoreUintptr": "sync/atomic", - "atomic.SwapInt32": "sync/atomic", - "atomic.SwapInt64": "sync/atomic", - "atomic.SwapPointer": "sync/atomic", - "atomic.SwapUint32": "sync/atomic", - "atomic.SwapUint64": "sync/atomic", - "atomic.SwapUintptr": "sync/atomic", - "atomic.Value": "sync/atomic", - "base32.CorruptInputError": "encoding/base32", - "base32.Encoding": "encoding/base32", - "base32.HexEncoding": "encoding/base32", - "base32.NewDecoder": "encoding/base32", - "base32.NewEncoder": "encoding/base32", - "base32.NewEncoding": "encoding/base32", - "base32.StdEncoding": "encoding/base32", - "base64.CorruptInputError": "encoding/base64", - "base64.Encoding": "encoding/base64", - "base64.NewDecoder": "encoding/base64", - "base64.NewEncoder": "encoding/base64", - "base64.NewEncoding": "encoding/base64", - "base64.NoPadding": "encoding/base64", - "base64.RawStdEncoding": "encoding/base64", - "base64.RawURLEncoding": "encoding/base64", - "base64.StdEncoding": "encoding/base64", - "base64.StdPadding": "encoding/base64", - "base64.URLEncoding": "encoding/base64", - "big.Above": "math/big", - "big.Accuracy": "math/big", - "big.AwayFromZero": "math/big", - "big.Below": "math/big", - "big.ErrNaN": "math/big", - "big.Exact": "math/big", - "big.Float": "math/big", - "big.Int": "math/big", - "big.Jacobi": "math/big", - "big.MaxBase": "math/big", - "big.MaxExp": "math/big", - "big.MaxPrec": "math/big", - "big.MinExp": "math/big", - "big.NewFloat": "math/big", - "big.NewInt": "math/big", - "big.NewRat": "math/big", - "big.ParseFloat": "math/big", - "big.Rat": "math/big", - "big.RoundingMode": "math/big", - "big.ToNearestAway": "math/big", - "big.ToNearestEven": "math/big", - "big.ToNegativeInf": "math/big", - "big.ToPositiveInf": "math/big", - "big.ToZero": "math/big", - "big.Word": "math/big", - "binary.BigEndian": "encoding/binary", - "binary.ByteOrder": "encoding/binary", - "binary.LittleEndian": "encoding/binary", - "binary.MaxVarintLen16": "encoding/binary", - "binary.MaxVarintLen32": "encoding/binary", - "binary.MaxVarintLen64": "encoding/binary", - "binary.PutUvarint": "encoding/binary", - "binary.PutVarint": "encoding/binary", - "binary.Read": "encoding/binary", - "binary.ReadUvarint": "encoding/binary", - "binary.ReadVarint": "encoding/binary", - "binary.Size": "encoding/binary", - "binary.Uvarint": "encoding/binary", - "binary.Varint": "encoding/binary", - "binary.Write": "encoding/binary", - "bufio.ErrAdvanceTooFar": "bufio", - "bufio.ErrBufferFull": "bufio", - "bufio.ErrFinalToken": "bufio", - "bufio.ErrInvalidUnreadByte": "bufio", - "bufio.ErrInvalidUnreadRune": "bufio", - "bufio.ErrNegativeAdvance": "bufio", - "bufio.ErrNegativeCount": "bufio", - "bufio.ErrTooLong": "bufio", - "bufio.MaxScanTokenSize": "bufio", - "bufio.NewReadWriter": "bufio", - "bufio.NewReader": "bufio", - "bufio.NewReaderSize": "bufio", - "bufio.NewScanner": "bufio", - "bufio.NewWriter": "bufio", - "bufio.NewWriterSize": "bufio", - "bufio.ReadWriter": "bufio", - "bufio.Reader": "bufio", - "bufio.ScanBytes": "bufio", - "bufio.ScanLines": "bufio", - "bufio.ScanRunes": "bufio", - "bufio.ScanWords": "bufio", - "bufio.Scanner": "bufio", - "bufio.SplitFunc": "bufio", - "bufio.Writer": "bufio", - "build.AllowBinary": "go/build", - "build.ArchChar": "go/build", - "build.Context": "go/build", - "build.Default": "go/build", - "build.FindOnly": "go/build", - "build.IgnoreVendor": "go/build", - "build.Import": "go/build", - "build.ImportComment": "go/build", - "build.ImportDir": "go/build", - "build.ImportMode": "go/build", - "build.IsLocalImport": "go/build", - "build.MultiplePackageError": "go/build", - "build.NoGoError": "go/build", - "build.Package": "go/build", - "build.ToolDir": "go/build", - "bytes.Buffer": "bytes", - "bytes.Compare": "bytes", - "bytes.Contains": "bytes", - "bytes.ContainsAny": "bytes", - "bytes.ContainsRune": "bytes", - "bytes.Count": "bytes", - "bytes.Equal": "bytes", - "bytes.EqualFold": "bytes", - "bytes.ErrTooLarge": "bytes", - "bytes.Fields": "bytes", - "bytes.FieldsFunc": "bytes", - "bytes.HasPrefix": "bytes", - "bytes.HasSuffix": "bytes", - "bytes.Index": "bytes", - "bytes.IndexAny": "bytes", - "bytes.IndexByte": "bytes", - "bytes.IndexFunc": "bytes", - "bytes.IndexRune": "bytes", - "bytes.Join": "bytes", - "bytes.LastIndex": "bytes", - "bytes.LastIndexAny": "bytes", - "bytes.LastIndexByte": "bytes", - "bytes.LastIndexFunc": "bytes", - "bytes.Map": "bytes", - "bytes.MinRead": "bytes", - "bytes.NewBuffer": "bytes", - "bytes.NewBufferString": "bytes", - "bytes.NewReader": "bytes", - "bytes.Reader": "bytes", - "bytes.Repeat": "bytes", - "bytes.Replace": "bytes", - "bytes.Runes": "bytes", - "bytes.Split": "bytes", - "bytes.SplitAfter": "bytes", - "bytes.SplitAfterN": "bytes", - "bytes.SplitN": "bytes", - "bytes.Title": "bytes", - "bytes.ToLower": "bytes", - "bytes.ToLowerSpecial": "bytes", - "bytes.ToTitle": "bytes", - "bytes.ToTitleSpecial": "bytes", - "bytes.ToUpper": "bytes", - "bytes.ToUpperSpecial": "bytes", - "bytes.Trim": "bytes", - "bytes.TrimFunc": "bytes", - "bytes.TrimLeft": "bytes", - "bytes.TrimLeftFunc": "bytes", - "bytes.TrimPrefix": "bytes", - "bytes.TrimRight": "bytes", - "bytes.TrimRightFunc": "bytes", - "bytes.TrimSpace": "bytes", - "bytes.TrimSuffix": "bytes", - "bzip2.NewReader": "compress/bzip2", - "bzip2.StructuralError": "compress/bzip2", - "cgi.Handler": "net/http/cgi", - "cgi.Request": "net/http/cgi", - "cgi.RequestFromMap": "net/http/cgi", - "cgi.Serve": "net/http/cgi", - "cipher.AEAD": "crypto/cipher", - "cipher.Block": "crypto/cipher", - "cipher.BlockMode": "crypto/cipher", - "cipher.NewCBCDecrypter": "crypto/cipher", - "cipher.NewCBCEncrypter": "crypto/cipher", - "cipher.NewCFBDecrypter": "crypto/cipher", - "cipher.NewCFBEncrypter": "crypto/cipher", - "cipher.NewCTR": "crypto/cipher", - "cipher.NewGCM": "crypto/cipher", - "cipher.NewGCMWithNonceSize": "crypto/cipher", - "cipher.NewOFB": "crypto/cipher", - "cipher.Stream": "crypto/cipher", - "cipher.StreamReader": "crypto/cipher", - "cipher.StreamWriter": "crypto/cipher", - "cmplx.Abs": "math/cmplx", - "cmplx.Acos": "math/cmplx", - "cmplx.Acosh": "math/cmplx", - "cmplx.Asin": "math/cmplx", - "cmplx.Asinh": "math/cmplx", - "cmplx.Atan": "math/cmplx", - "cmplx.Atanh": "math/cmplx", - "cmplx.Conj": "math/cmplx", - "cmplx.Cos": "math/cmplx", - "cmplx.Cosh": "math/cmplx", - "cmplx.Cot": "math/cmplx", - "cmplx.Exp": "math/cmplx", - "cmplx.Inf": "math/cmplx", - "cmplx.IsInf": "math/cmplx", - "cmplx.IsNaN": "math/cmplx", - "cmplx.Log": "math/cmplx", - "cmplx.Log10": "math/cmplx", - "cmplx.NaN": "math/cmplx", - "cmplx.Phase": "math/cmplx", - "cmplx.Polar": "math/cmplx", - "cmplx.Pow": "math/cmplx", - "cmplx.Rect": "math/cmplx", - "cmplx.Sin": "math/cmplx", - "cmplx.Sinh": "math/cmplx", - "cmplx.Sqrt": "math/cmplx", - "cmplx.Tan": "math/cmplx", - "cmplx.Tanh": "math/cmplx", - "color.Alpha": "image/color", - "color.Alpha16": "image/color", - "color.Alpha16Model": "image/color", - "color.AlphaModel": "image/color", - "color.Black": "image/color", - "color.CMYK": "image/color", - "color.CMYKModel": "image/color", - "color.CMYKToRGB": "image/color", - "color.Color": "image/color", - "color.Gray": "image/color", - "color.Gray16": "image/color", - "color.Gray16Model": "image/color", - "color.GrayModel": "image/color", - "color.Model": "image/color", - "color.ModelFunc": "image/color", - "color.NRGBA": "image/color", - "color.NRGBA64": "image/color", - "color.NRGBA64Model": "image/color", - "color.NRGBAModel": "image/color", - "color.NYCbCrA": "image/color", - "color.NYCbCrAModel": "image/color", - "color.Opaque": "image/color", - "color.Palette": "image/color", - "color.RGBA": "image/color", - "color.RGBA64": "image/color", - "color.RGBA64Model": "image/color", - "color.RGBAModel": "image/color", - "color.RGBToCMYK": "image/color", - "color.RGBToYCbCr": "image/color", - "color.Transparent": "image/color", - "color.White": "image/color", - "color.YCbCr": "image/color", - "color.YCbCrModel": "image/color", - "color.YCbCrToRGB": "image/color", - "constant.BinaryOp": "go/constant", - "constant.BitLen": "go/constant", - "constant.Bool": "go/constant", - "constant.BoolVal": "go/constant", - "constant.Bytes": "go/constant", - "constant.Compare": "go/constant", - "constant.Complex": "go/constant", - "constant.Denom": "go/constant", - "constant.Float": "go/constant", - "constant.Float32Val": "go/constant", - "constant.Float64Val": "go/constant", - "constant.Imag": "go/constant", - "constant.Int": "go/constant", - "constant.Int64Val": "go/constant", - "constant.Kind": "go/constant", - "constant.MakeBool": "go/constant", - "constant.MakeFloat64": "go/constant", - "constant.MakeFromBytes": "go/constant", - "constant.MakeFromLiteral": "go/constant", - "constant.MakeImag": "go/constant", - "constant.MakeInt64": "go/constant", - "constant.MakeString": "go/constant", - "constant.MakeUint64": "go/constant", - "constant.MakeUnknown": "go/constant", - "constant.Num": "go/constant", - "constant.Real": "go/constant", - "constant.Shift": "go/constant", - "constant.Sign": "go/constant", - "constant.String": "go/constant", - "constant.StringVal": "go/constant", - "constant.ToComplex": "go/constant", - "constant.ToFloat": "go/constant", - "constant.ToInt": "go/constant", - "constant.Uint64Val": "go/constant", - "constant.UnaryOp": "go/constant", - "constant.Unknown": "go/constant", - "context.Background": "context", - "context.CancelFunc": "context", - "context.Canceled": "context", - "context.Context": "context", - "context.DeadlineExceeded": "context", - "context.TODO": "context", - "context.WithCancel": "context", - "context.WithDeadline": "context", - "context.WithTimeout": "context", - "context.WithValue": "context", - "cookiejar.Jar": "net/http/cookiejar", - "cookiejar.New": "net/http/cookiejar", - "cookiejar.Options": "net/http/cookiejar", - "cookiejar.PublicSuffixList": "net/http/cookiejar", - "crc32.Castagnoli": "hash/crc32", - "crc32.Checksum": "hash/crc32", - "crc32.ChecksumIEEE": "hash/crc32", - "crc32.IEEE": "hash/crc32", - "crc32.IEEETable": "hash/crc32", - "crc32.Koopman": "hash/crc32", - "crc32.MakeTable": "hash/crc32", - "crc32.New": "hash/crc32", - "crc32.NewIEEE": "hash/crc32", - "crc32.Size": "hash/crc32", - "crc32.Table": "hash/crc32", - "crc32.Update": "hash/crc32", - "crc64.Checksum": "hash/crc64", - "crc64.ECMA": "hash/crc64", - "crc64.ISO": "hash/crc64", - "crc64.MakeTable": "hash/crc64", - "crc64.New": "hash/crc64", - "crc64.Size": "hash/crc64", - "crc64.Table": "hash/crc64", - "crc64.Update": "hash/crc64", - "crypto.Decrypter": "crypto", - "crypto.DecrypterOpts": "crypto", - "crypto.Hash": "crypto", - "crypto.MD4": "crypto", - "crypto.MD5": "crypto", - "crypto.MD5SHA1": "crypto", - "crypto.PrivateKey": "crypto", - "crypto.PublicKey": "crypto", - "crypto.RIPEMD160": "crypto", - "crypto.RegisterHash": "crypto", - "crypto.SHA1": "crypto", - "crypto.SHA224": "crypto", - "crypto.SHA256": "crypto", - "crypto.SHA384": "crypto", - "crypto.SHA3_224": "crypto", - "crypto.SHA3_256": "crypto", - "crypto.SHA3_384": "crypto", - "crypto.SHA3_512": "crypto", - "crypto.SHA512": "crypto", - "crypto.SHA512_224": "crypto", - "crypto.SHA512_256": "crypto", - "crypto.Signer": "crypto", - "crypto.SignerOpts": "crypto", - "csv.ErrBareQuote": "encoding/csv", - "csv.ErrFieldCount": "encoding/csv", - "csv.ErrQuote": "encoding/csv", - "csv.ErrTrailingComma": "encoding/csv", - "csv.NewReader": "encoding/csv", - "csv.NewWriter": "encoding/csv", - "csv.ParseError": "encoding/csv", - "csv.Reader": "encoding/csv", - "csv.Writer": "encoding/csv", - "debug.FreeOSMemory": "runtime/debug", - "debug.GCStats": "runtime/debug", - "debug.PrintStack": "runtime/debug", - "debug.ReadGCStats": "runtime/debug", - "debug.SetGCPercent": "runtime/debug", - "debug.SetMaxStack": "runtime/debug", - "debug.SetMaxThreads": "runtime/debug", - "debug.SetPanicOnFault": "runtime/debug", - "debug.SetTraceback": "runtime/debug", - "debug.Stack": "runtime/debug", - "debug.WriteHeapDump": "runtime/debug", - "des.BlockSize": "crypto/des", - "des.KeySizeError": "crypto/des", - "des.NewCipher": "crypto/des", - "des.NewTripleDESCipher": "crypto/des", - "doc.AllDecls": "go/doc", - "doc.AllMethods": "go/doc", - "doc.Example": "go/doc", - "doc.Examples": "go/doc", - "doc.Filter": "go/doc", - "doc.Func": "go/doc", - "doc.IllegalPrefixes": "go/doc", - "doc.IsPredeclared": "go/doc", - "doc.Mode": "go/doc", - "doc.New": "go/doc", - "doc.Note": "go/doc", - "doc.Package": "go/doc", - "doc.Synopsis": "go/doc", - "doc.ToHTML": "go/doc", - "doc.ToText": "go/doc", - "doc.Type": "go/doc", - "doc.Value": "go/doc", - "draw.Draw": "image/draw", - "draw.DrawMask": "image/draw", - "draw.Drawer": "image/draw", - "draw.FloydSteinberg": "image/draw", - "draw.Image": "image/draw", - "draw.Op": "image/draw", - "draw.Over": "image/draw", - "draw.Quantizer": "image/draw", - "draw.Src": "image/draw", - "driver.Bool": "database/sql/driver", - "driver.ColumnConverter": "database/sql/driver", - "driver.Conn": "database/sql/driver", - "driver.ConnBeginTx": "database/sql/driver", - "driver.ConnPrepareContext": "database/sql/driver", - "driver.DefaultParameterConverter": "database/sql/driver", - "driver.Driver": "database/sql/driver", - "driver.ErrBadConn": "database/sql/driver", - "driver.ErrSkip": "database/sql/driver", - "driver.Execer": "database/sql/driver", - "driver.ExecerContext": "database/sql/driver", - "driver.Int32": "database/sql/driver", - "driver.IsScanValue": "database/sql/driver", - "driver.IsValue": "database/sql/driver", - "driver.IsolationLevel": "database/sql/driver", - "driver.NamedValue": "database/sql/driver", - "driver.NotNull": "database/sql/driver", - "driver.Null": "database/sql/driver", - "driver.Pinger": "database/sql/driver", - "driver.Queryer": "database/sql/driver", - "driver.QueryerContext": "database/sql/driver", - "driver.Result": "database/sql/driver", - "driver.ResultNoRows": "database/sql/driver", - "driver.Rows": "database/sql/driver", - "driver.RowsAffected": "database/sql/driver", - "driver.RowsColumnTypeDatabaseTypeName": "database/sql/driver", - "driver.RowsColumnTypeLength": "database/sql/driver", - "driver.RowsColumnTypeNullable": "database/sql/driver", - "driver.RowsColumnTypePrecisionScale": "database/sql/driver", - "driver.RowsColumnTypeScanType": "database/sql/driver", - "driver.RowsNextResultSet": "database/sql/driver", - "driver.Stmt": "database/sql/driver", - "driver.StmtExecContext": "database/sql/driver", - "driver.StmtQueryContext": "database/sql/driver", - "driver.String": "database/sql/driver", - "driver.Tx": "database/sql/driver", - "driver.TxOptions": "database/sql/driver", - "driver.Value": "database/sql/driver", - "driver.ValueConverter": "database/sql/driver", - "driver.Valuer": "database/sql/driver", - "dsa.ErrInvalidPublicKey": "crypto/dsa", - "dsa.GenerateKey": "crypto/dsa", - "dsa.GenerateParameters": "crypto/dsa", - "dsa.L1024N160": "crypto/dsa", - "dsa.L2048N224": "crypto/dsa", - "dsa.L2048N256": "crypto/dsa", - "dsa.L3072N256": "crypto/dsa", - "dsa.ParameterSizes": "crypto/dsa", - "dsa.Parameters": "crypto/dsa", - "dsa.PrivateKey": "crypto/dsa", - "dsa.PublicKey": "crypto/dsa", - "dsa.Sign": "crypto/dsa", - "dsa.Verify": "crypto/dsa", - "dwarf.AddrType": "debug/dwarf", - "dwarf.ArrayType": "debug/dwarf", - "dwarf.Attr": "debug/dwarf", - "dwarf.AttrAbstractOrigin": "debug/dwarf", - "dwarf.AttrAccessibility": "debug/dwarf", - "dwarf.AttrAddrClass": "debug/dwarf", - "dwarf.AttrAllocated": "debug/dwarf", - "dwarf.AttrArtificial": "debug/dwarf", - "dwarf.AttrAssociated": "debug/dwarf", - "dwarf.AttrBaseTypes": "debug/dwarf", - "dwarf.AttrBitOffset": "debug/dwarf", - "dwarf.AttrBitSize": "debug/dwarf", - "dwarf.AttrByteSize": "debug/dwarf", - "dwarf.AttrCallColumn": "debug/dwarf", - "dwarf.AttrCallFile": "debug/dwarf", - "dwarf.AttrCallLine": "debug/dwarf", - "dwarf.AttrCalling": "debug/dwarf", - "dwarf.AttrCommonRef": "debug/dwarf", - "dwarf.AttrCompDir": "debug/dwarf", - "dwarf.AttrConstValue": "debug/dwarf", - "dwarf.AttrContainingType": "debug/dwarf", - "dwarf.AttrCount": "debug/dwarf", - "dwarf.AttrDataLocation": "debug/dwarf", - "dwarf.AttrDataMemberLoc": "debug/dwarf", - "dwarf.AttrDeclColumn": "debug/dwarf", - "dwarf.AttrDeclFile": "debug/dwarf", - "dwarf.AttrDeclLine": "debug/dwarf", - "dwarf.AttrDeclaration": "debug/dwarf", - "dwarf.AttrDefaultValue": "debug/dwarf", - "dwarf.AttrDescription": "debug/dwarf", - "dwarf.AttrDiscr": "debug/dwarf", - "dwarf.AttrDiscrList": "debug/dwarf", - "dwarf.AttrDiscrValue": "debug/dwarf", - "dwarf.AttrEncoding": "debug/dwarf", - "dwarf.AttrEntrypc": "debug/dwarf", - "dwarf.AttrExtension": "debug/dwarf", - "dwarf.AttrExternal": "debug/dwarf", - "dwarf.AttrFrameBase": "debug/dwarf", - "dwarf.AttrFriend": "debug/dwarf", - "dwarf.AttrHighpc": "debug/dwarf", - "dwarf.AttrIdentifierCase": "debug/dwarf", - "dwarf.AttrImport": "debug/dwarf", - "dwarf.AttrInline": "debug/dwarf", - "dwarf.AttrIsOptional": "debug/dwarf", - "dwarf.AttrLanguage": "debug/dwarf", - "dwarf.AttrLocation": "debug/dwarf", - "dwarf.AttrLowerBound": "debug/dwarf", - "dwarf.AttrLowpc": "debug/dwarf", - "dwarf.AttrMacroInfo": "debug/dwarf", - "dwarf.AttrName": "debug/dwarf", - "dwarf.AttrNamelistItem": "debug/dwarf", - "dwarf.AttrOrdering": "debug/dwarf", - "dwarf.AttrPriority": "debug/dwarf", - "dwarf.AttrProducer": "debug/dwarf", - "dwarf.AttrPrototyped": "debug/dwarf", - "dwarf.AttrRanges": "debug/dwarf", - "dwarf.AttrReturnAddr": "debug/dwarf", - "dwarf.AttrSegment": "debug/dwarf", - "dwarf.AttrSibling": "debug/dwarf", - "dwarf.AttrSpecification": "debug/dwarf", - "dwarf.AttrStartScope": "debug/dwarf", - "dwarf.AttrStaticLink": "debug/dwarf", - "dwarf.AttrStmtList": "debug/dwarf", - "dwarf.AttrStride": "debug/dwarf", - "dwarf.AttrStrideSize": "debug/dwarf", - "dwarf.AttrStringLength": "debug/dwarf", - "dwarf.AttrTrampoline": "debug/dwarf", - "dwarf.AttrType": "debug/dwarf", - "dwarf.AttrUpperBound": "debug/dwarf", - "dwarf.AttrUseLocation": "debug/dwarf", - "dwarf.AttrUseUTF8": "debug/dwarf", - "dwarf.AttrVarParam": "debug/dwarf", - "dwarf.AttrVirtuality": "debug/dwarf", - "dwarf.AttrVisibility": "debug/dwarf", - "dwarf.AttrVtableElemLoc": "debug/dwarf", - "dwarf.BasicType": "debug/dwarf", - "dwarf.BoolType": "debug/dwarf", - "dwarf.CharType": "debug/dwarf", - "dwarf.Class": "debug/dwarf", - "dwarf.ClassAddress": "debug/dwarf", - "dwarf.ClassBlock": "debug/dwarf", - "dwarf.ClassConstant": "debug/dwarf", - "dwarf.ClassExprLoc": "debug/dwarf", - "dwarf.ClassFlag": "debug/dwarf", - "dwarf.ClassLinePtr": "debug/dwarf", - "dwarf.ClassLocListPtr": "debug/dwarf", - "dwarf.ClassMacPtr": "debug/dwarf", - "dwarf.ClassRangeListPtr": "debug/dwarf", - "dwarf.ClassReference": "debug/dwarf", - "dwarf.ClassReferenceAlt": "debug/dwarf", - "dwarf.ClassReferenceSig": "debug/dwarf", - "dwarf.ClassString": "debug/dwarf", - "dwarf.ClassStringAlt": "debug/dwarf", - "dwarf.ClassUnknown": "debug/dwarf", - "dwarf.CommonType": "debug/dwarf", - "dwarf.ComplexType": "debug/dwarf", - "dwarf.Data": "debug/dwarf", - "dwarf.DecodeError": "debug/dwarf", - "dwarf.DotDotDotType": "debug/dwarf", - "dwarf.Entry": "debug/dwarf", - "dwarf.EnumType": "debug/dwarf", - "dwarf.EnumValue": "debug/dwarf", - "dwarf.ErrUnknownPC": "debug/dwarf", - "dwarf.Field": "debug/dwarf", - "dwarf.FloatType": "debug/dwarf", - "dwarf.FuncType": "debug/dwarf", - "dwarf.IntType": "debug/dwarf", - "dwarf.LineEntry": "debug/dwarf", - "dwarf.LineFile": "debug/dwarf", - "dwarf.LineReader": "debug/dwarf", - "dwarf.LineReaderPos": "debug/dwarf", - "dwarf.New": "debug/dwarf", - "dwarf.Offset": "debug/dwarf", - "dwarf.PtrType": "debug/dwarf", - "dwarf.QualType": "debug/dwarf", - "dwarf.Reader": "debug/dwarf", - "dwarf.StructField": "debug/dwarf", - "dwarf.StructType": "debug/dwarf", - "dwarf.Tag": "debug/dwarf", - "dwarf.TagAccessDeclaration": "debug/dwarf", - "dwarf.TagArrayType": "debug/dwarf", - "dwarf.TagBaseType": "debug/dwarf", - "dwarf.TagCatchDwarfBlock": "debug/dwarf", - "dwarf.TagClassType": "debug/dwarf", - "dwarf.TagCommonDwarfBlock": "debug/dwarf", - "dwarf.TagCommonInclusion": "debug/dwarf", - "dwarf.TagCompileUnit": "debug/dwarf", - "dwarf.TagCondition": "debug/dwarf", - "dwarf.TagConstType": "debug/dwarf", - "dwarf.TagConstant": "debug/dwarf", - "dwarf.TagDwarfProcedure": "debug/dwarf", - "dwarf.TagEntryPoint": "debug/dwarf", - "dwarf.TagEnumerationType": "debug/dwarf", - "dwarf.TagEnumerator": "debug/dwarf", - "dwarf.TagFileType": "debug/dwarf", - "dwarf.TagFormalParameter": "debug/dwarf", - "dwarf.TagFriend": "debug/dwarf", - "dwarf.TagImportedDeclaration": "debug/dwarf", - "dwarf.TagImportedModule": "debug/dwarf", - "dwarf.TagImportedUnit": "debug/dwarf", - "dwarf.TagInheritance": "debug/dwarf", - "dwarf.TagInlinedSubroutine": "debug/dwarf", - "dwarf.TagInterfaceType": "debug/dwarf", - "dwarf.TagLabel": "debug/dwarf", - "dwarf.TagLexDwarfBlock": "debug/dwarf", - "dwarf.TagMember": "debug/dwarf", - "dwarf.TagModule": "debug/dwarf", - "dwarf.TagMutableType": "debug/dwarf", - "dwarf.TagNamelist": "debug/dwarf", - "dwarf.TagNamelistItem": "debug/dwarf", - "dwarf.TagNamespace": "debug/dwarf", - "dwarf.TagPackedType": "debug/dwarf", - "dwarf.TagPartialUnit": "debug/dwarf", - "dwarf.TagPointerType": "debug/dwarf", - "dwarf.TagPtrToMemberType": "debug/dwarf", - "dwarf.TagReferenceType": "debug/dwarf", - "dwarf.TagRestrictType": "debug/dwarf", - "dwarf.TagRvalueReferenceType": "debug/dwarf", - "dwarf.TagSetType": "debug/dwarf", - "dwarf.TagSharedType": "debug/dwarf", - "dwarf.TagStringType": "debug/dwarf", - "dwarf.TagStructType": "debug/dwarf", - "dwarf.TagSubprogram": "debug/dwarf", - "dwarf.TagSubrangeType": "debug/dwarf", - "dwarf.TagSubroutineType": "debug/dwarf", - "dwarf.TagTemplateAlias": "debug/dwarf", - "dwarf.TagTemplateTypeParameter": "debug/dwarf", - "dwarf.TagTemplateValueParameter": "debug/dwarf", - "dwarf.TagThrownType": "debug/dwarf", - "dwarf.TagTryDwarfBlock": "debug/dwarf", - "dwarf.TagTypeUnit": "debug/dwarf", - "dwarf.TagTypedef": "debug/dwarf", - "dwarf.TagUnionType": "debug/dwarf", - "dwarf.TagUnspecifiedParameters": "debug/dwarf", - "dwarf.TagUnspecifiedType": "debug/dwarf", - "dwarf.TagVariable": "debug/dwarf", - "dwarf.TagVariant": "debug/dwarf", - "dwarf.TagVariantPart": "debug/dwarf", - "dwarf.TagVolatileType": "debug/dwarf", - "dwarf.TagWithStmt": "debug/dwarf", - "dwarf.Type": "debug/dwarf", - "dwarf.TypedefType": "debug/dwarf", - "dwarf.UcharType": "debug/dwarf", - "dwarf.UintType": "debug/dwarf", - "dwarf.UnspecifiedType": "debug/dwarf", - "dwarf.VoidType": "debug/dwarf", - "ecdsa.GenerateKey": "crypto/ecdsa", - "ecdsa.PrivateKey": "crypto/ecdsa", - "ecdsa.PublicKey": "crypto/ecdsa", - "ecdsa.Sign": "crypto/ecdsa", - "ecdsa.Verify": "crypto/ecdsa", - "elf.ARM_MAGIC_TRAMP_NUMBER": "debug/elf", - "elf.COMPRESS_HIOS": "debug/elf", - "elf.COMPRESS_HIPROC": "debug/elf", - "elf.COMPRESS_LOOS": "debug/elf", - "elf.COMPRESS_LOPROC": "debug/elf", - "elf.COMPRESS_ZLIB": "debug/elf", - "elf.Chdr32": "debug/elf", - "elf.Chdr64": "debug/elf", - "elf.Class": "debug/elf", - "elf.CompressionType": "debug/elf", - "elf.DF_BIND_NOW": "debug/elf", - "elf.DF_ORIGIN": "debug/elf", - "elf.DF_STATIC_TLS": "debug/elf", - "elf.DF_SYMBOLIC": "debug/elf", - "elf.DF_TEXTREL": "debug/elf", - "elf.DT_BIND_NOW": "debug/elf", - "elf.DT_DEBUG": "debug/elf", - "elf.DT_ENCODING": "debug/elf", - "elf.DT_FINI": "debug/elf", - "elf.DT_FINI_ARRAY": "debug/elf", - "elf.DT_FINI_ARRAYSZ": "debug/elf", - "elf.DT_FLAGS": "debug/elf", - "elf.DT_HASH": "debug/elf", - "elf.DT_HIOS": "debug/elf", - "elf.DT_HIPROC": "debug/elf", - "elf.DT_INIT": "debug/elf", - "elf.DT_INIT_ARRAY": "debug/elf", - "elf.DT_INIT_ARRAYSZ": "debug/elf", - "elf.DT_JMPREL": "debug/elf", - "elf.DT_LOOS": "debug/elf", - "elf.DT_LOPROC": "debug/elf", - "elf.DT_NEEDED": "debug/elf", - "elf.DT_NULL": "debug/elf", - "elf.DT_PLTGOT": "debug/elf", - "elf.DT_PLTREL": "debug/elf", - "elf.DT_PLTRELSZ": "debug/elf", - "elf.DT_PREINIT_ARRAY": "debug/elf", - "elf.DT_PREINIT_ARRAYSZ": "debug/elf", - "elf.DT_REL": "debug/elf", - "elf.DT_RELA": "debug/elf", - "elf.DT_RELAENT": "debug/elf", - "elf.DT_RELASZ": "debug/elf", - "elf.DT_RELENT": "debug/elf", - "elf.DT_RELSZ": "debug/elf", - "elf.DT_RPATH": "debug/elf", - "elf.DT_RUNPATH": "debug/elf", - "elf.DT_SONAME": "debug/elf", - "elf.DT_STRSZ": "debug/elf", - "elf.DT_STRTAB": "debug/elf", - "elf.DT_SYMBOLIC": "debug/elf", - "elf.DT_SYMENT": "debug/elf", - "elf.DT_SYMTAB": "debug/elf", - "elf.DT_TEXTREL": "debug/elf", - "elf.DT_VERNEED": "debug/elf", - "elf.DT_VERNEEDNUM": "debug/elf", - "elf.DT_VERSYM": "debug/elf", - "elf.Data": "debug/elf", - "elf.Dyn32": "debug/elf", - "elf.Dyn64": "debug/elf", - "elf.DynFlag": "debug/elf", - "elf.DynTag": "debug/elf", - "elf.EI_ABIVERSION": "debug/elf", - "elf.EI_CLASS": "debug/elf", - "elf.EI_DATA": "debug/elf", - "elf.EI_NIDENT": "debug/elf", - "elf.EI_OSABI": "debug/elf", - "elf.EI_PAD": "debug/elf", - "elf.EI_VERSION": "debug/elf", - "elf.ELFCLASS32": "debug/elf", - "elf.ELFCLASS64": "debug/elf", - "elf.ELFCLASSNONE": "debug/elf", - "elf.ELFDATA2LSB": "debug/elf", - "elf.ELFDATA2MSB": "debug/elf", - "elf.ELFDATANONE": "debug/elf", - "elf.ELFMAG": "debug/elf", - "elf.ELFOSABI_86OPEN": "debug/elf", - "elf.ELFOSABI_AIX": "debug/elf", - "elf.ELFOSABI_ARM": "debug/elf", - "elf.ELFOSABI_FREEBSD": "debug/elf", - "elf.ELFOSABI_HPUX": "debug/elf", - "elf.ELFOSABI_HURD": "debug/elf", - "elf.ELFOSABI_IRIX": "debug/elf", - "elf.ELFOSABI_LINUX": "debug/elf", - "elf.ELFOSABI_MODESTO": "debug/elf", - "elf.ELFOSABI_NETBSD": "debug/elf", - "elf.ELFOSABI_NONE": "debug/elf", - "elf.ELFOSABI_NSK": "debug/elf", - "elf.ELFOSABI_OPENBSD": "debug/elf", - "elf.ELFOSABI_OPENVMS": "debug/elf", - "elf.ELFOSABI_SOLARIS": "debug/elf", - "elf.ELFOSABI_STANDALONE": "debug/elf", - "elf.ELFOSABI_TRU64": "debug/elf", - "elf.EM_386": "debug/elf", - "elf.EM_486": "debug/elf", - "elf.EM_68HC12": "debug/elf", - "elf.EM_68K": "debug/elf", - "elf.EM_860": "debug/elf", - "elf.EM_88K": "debug/elf", - "elf.EM_960": "debug/elf", - "elf.EM_AARCH64": "debug/elf", - "elf.EM_ALPHA": "debug/elf", - "elf.EM_ALPHA_STD": "debug/elf", - "elf.EM_ARC": "debug/elf", - "elf.EM_ARM": "debug/elf", - "elf.EM_COLDFIRE": "debug/elf", - "elf.EM_FR20": "debug/elf", - "elf.EM_H8S": "debug/elf", - "elf.EM_H8_300": "debug/elf", - "elf.EM_H8_300H": "debug/elf", - "elf.EM_H8_500": "debug/elf", - "elf.EM_IA_64": "debug/elf", - "elf.EM_M32": "debug/elf", - "elf.EM_ME16": "debug/elf", - "elf.EM_MIPS": "debug/elf", - "elf.EM_MIPS_RS3_LE": "debug/elf", - "elf.EM_MIPS_RS4_BE": "debug/elf", - "elf.EM_MIPS_X": "debug/elf", - "elf.EM_MMA": "debug/elf", - "elf.EM_NCPU": "debug/elf", - "elf.EM_NDR1": "debug/elf", - "elf.EM_NONE": "debug/elf", - "elf.EM_PARISC": "debug/elf", - "elf.EM_PCP": "debug/elf", - "elf.EM_PPC": "debug/elf", - "elf.EM_PPC64": "debug/elf", - "elf.EM_RCE": "debug/elf", - "elf.EM_RH32": "debug/elf", - "elf.EM_S370": "debug/elf", - "elf.EM_S390": "debug/elf", - "elf.EM_SH": "debug/elf", - "elf.EM_SPARC": "debug/elf", - "elf.EM_SPARC32PLUS": "debug/elf", - "elf.EM_SPARCV9": "debug/elf", - "elf.EM_ST100": "debug/elf", - "elf.EM_STARCORE": "debug/elf", - "elf.EM_TINYJ": "debug/elf", - "elf.EM_TRICORE": "debug/elf", - "elf.EM_V800": "debug/elf", - "elf.EM_VPP500": "debug/elf", - "elf.EM_X86_64": "debug/elf", - "elf.ET_CORE": "debug/elf", - "elf.ET_DYN": "debug/elf", - "elf.ET_EXEC": "debug/elf", - "elf.ET_HIOS": "debug/elf", - "elf.ET_HIPROC": "debug/elf", - "elf.ET_LOOS": "debug/elf", - "elf.ET_LOPROC": "debug/elf", - "elf.ET_NONE": "debug/elf", - "elf.ET_REL": "debug/elf", - "elf.EV_CURRENT": "debug/elf", - "elf.EV_NONE": "debug/elf", - "elf.ErrNoSymbols": "debug/elf", - "elf.File": "debug/elf", - "elf.FileHeader": "debug/elf", - "elf.FormatError": "debug/elf", - "elf.Header32": "debug/elf", - "elf.Header64": "debug/elf", - "elf.ImportedSymbol": "debug/elf", - "elf.Machine": "debug/elf", - "elf.NT_FPREGSET": "debug/elf", - "elf.NT_PRPSINFO": "debug/elf", - "elf.NT_PRSTATUS": "debug/elf", - "elf.NType": "debug/elf", - "elf.NewFile": "debug/elf", - "elf.OSABI": "debug/elf", - "elf.Open": "debug/elf", - "elf.PF_MASKOS": "debug/elf", - "elf.PF_MASKPROC": "debug/elf", - "elf.PF_R": "debug/elf", - "elf.PF_W": "debug/elf", - "elf.PF_X": "debug/elf", - "elf.PT_DYNAMIC": "debug/elf", - "elf.PT_HIOS": "debug/elf", - "elf.PT_HIPROC": "debug/elf", - "elf.PT_INTERP": "debug/elf", - "elf.PT_LOAD": "debug/elf", - "elf.PT_LOOS": "debug/elf", - "elf.PT_LOPROC": "debug/elf", - "elf.PT_NOTE": "debug/elf", - "elf.PT_NULL": "debug/elf", - "elf.PT_PHDR": "debug/elf", - "elf.PT_SHLIB": "debug/elf", - "elf.PT_TLS": "debug/elf", - "elf.Prog": "debug/elf", - "elf.Prog32": "debug/elf", - "elf.Prog64": "debug/elf", - "elf.ProgFlag": "debug/elf", - "elf.ProgHeader": "debug/elf", - "elf.ProgType": "debug/elf", - "elf.R_386": "debug/elf", - "elf.R_386_32": "debug/elf", - "elf.R_386_COPY": "debug/elf", - "elf.R_386_GLOB_DAT": "debug/elf", - "elf.R_386_GOT32": "debug/elf", - "elf.R_386_GOTOFF": "debug/elf", - "elf.R_386_GOTPC": "debug/elf", - "elf.R_386_JMP_SLOT": "debug/elf", - "elf.R_386_NONE": "debug/elf", - "elf.R_386_PC32": "debug/elf", - "elf.R_386_PLT32": "debug/elf", - "elf.R_386_RELATIVE": "debug/elf", - "elf.R_386_TLS_DTPMOD32": "debug/elf", - "elf.R_386_TLS_DTPOFF32": "debug/elf", - "elf.R_386_TLS_GD": "debug/elf", - "elf.R_386_TLS_GD_32": "debug/elf", - "elf.R_386_TLS_GD_CALL": "debug/elf", - "elf.R_386_TLS_GD_POP": "debug/elf", - "elf.R_386_TLS_GD_PUSH": "debug/elf", - "elf.R_386_TLS_GOTIE": "debug/elf", - "elf.R_386_TLS_IE": "debug/elf", - "elf.R_386_TLS_IE_32": "debug/elf", - "elf.R_386_TLS_LDM": "debug/elf", - "elf.R_386_TLS_LDM_32": "debug/elf", - "elf.R_386_TLS_LDM_CALL": "debug/elf", - "elf.R_386_TLS_LDM_POP": "debug/elf", - "elf.R_386_TLS_LDM_PUSH": "debug/elf", - "elf.R_386_TLS_LDO_32": "debug/elf", - "elf.R_386_TLS_LE": "debug/elf", - "elf.R_386_TLS_LE_32": "debug/elf", - "elf.R_386_TLS_TPOFF": "debug/elf", - "elf.R_386_TLS_TPOFF32": "debug/elf", - "elf.R_390": "debug/elf", - "elf.R_390_12": "debug/elf", - "elf.R_390_16": "debug/elf", - "elf.R_390_20": "debug/elf", - "elf.R_390_32": "debug/elf", - "elf.R_390_64": "debug/elf", - "elf.R_390_8": "debug/elf", - "elf.R_390_COPY": "debug/elf", - "elf.R_390_GLOB_DAT": "debug/elf", - "elf.R_390_GOT12": "debug/elf", - "elf.R_390_GOT16": "debug/elf", - "elf.R_390_GOT20": "debug/elf", - "elf.R_390_GOT32": "debug/elf", - "elf.R_390_GOT64": "debug/elf", - "elf.R_390_GOTENT": "debug/elf", - "elf.R_390_GOTOFF": "debug/elf", - "elf.R_390_GOTOFF16": "debug/elf", - "elf.R_390_GOTOFF64": "debug/elf", - "elf.R_390_GOTPC": "debug/elf", - "elf.R_390_GOTPCDBL": "debug/elf", - "elf.R_390_GOTPLT12": "debug/elf", - "elf.R_390_GOTPLT16": "debug/elf", - "elf.R_390_GOTPLT20": "debug/elf", - "elf.R_390_GOTPLT32": "debug/elf", - "elf.R_390_GOTPLT64": "debug/elf", - "elf.R_390_GOTPLTENT": "debug/elf", - "elf.R_390_GOTPLTOFF16": "debug/elf", - "elf.R_390_GOTPLTOFF32": "debug/elf", - "elf.R_390_GOTPLTOFF64": "debug/elf", - "elf.R_390_JMP_SLOT": "debug/elf", - "elf.R_390_NONE": "debug/elf", - "elf.R_390_PC16": "debug/elf", - "elf.R_390_PC16DBL": "debug/elf", - "elf.R_390_PC32": "debug/elf", - "elf.R_390_PC32DBL": "debug/elf", - "elf.R_390_PC64": "debug/elf", - "elf.R_390_PLT16DBL": "debug/elf", - "elf.R_390_PLT32": "debug/elf", - "elf.R_390_PLT32DBL": "debug/elf", - "elf.R_390_PLT64": "debug/elf", - "elf.R_390_RELATIVE": "debug/elf", - "elf.R_390_TLS_DTPMOD": "debug/elf", - "elf.R_390_TLS_DTPOFF": "debug/elf", - "elf.R_390_TLS_GD32": "debug/elf", - "elf.R_390_TLS_GD64": "debug/elf", - "elf.R_390_TLS_GDCALL": "debug/elf", - "elf.R_390_TLS_GOTIE12": "debug/elf", - "elf.R_390_TLS_GOTIE20": "debug/elf", - "elf.R_390_TLS_GOTIE32": "debug/elf", - "elf.R_390_TLS_GOTIE64": "debug/elf", - "elf.R_390_TLS_IE32": "debug/elf", - "elf.R_390_TLS_IE64": "debug/elf", - "elf.R_390_TLS_IEENT": "debug/elf", - "elf.R_390_TLS_LDCALL": "debug/elf", - "elf.R_390_TLS_LDM32": "debug/elf", - "elf.R_390_TLS_LDM64": "debug/elf", - "elf.R_390_TLS_LDO32": "debug/elf", - "elf.R_390_TLS_LDO64": "debug/elf", - "elf.R_390_TLS_LE32": "debug/elf", - "elf.R_390_TLS_LE64": "debug/elf", - "elf.R_390_TLS_LOAD": "debug/elf", - "elf.R_390_TLS_TPOFF": "debug/elf", - "elf.R_AARCH64": "debug/elf", - "elf.R_AARCH64_ABS16": "debug/elf", - "elf.R_AARCH64_ABS32": "debug/elf", - "elf.R_AARCH64_ABS64": "debug/elf", - "elf.R_AARCH64_ADD_ABS_LO12_NC": "debug/elf", - "elf.R_AARCH64_ADR_GOT_PAGE": "debug/elf", - "elf.R_AARCH64_ADR_PREL_LO21": "debug/elf", - "elf.R_AARCH64_ADR_PREL_PG_HI21": "debug/elf", - "elf.R_AARCH64_ADR_PREL_PG_HI21_NC": "debug/elf", - "elf.R_AARCH64_CALL26": "debug/elf", - "elf.R_AARCH64_CONDBR19": "debug/elf", - "elf.R_AARCH64_COPY": "debug/elf", - "elf.R_AARCH64_GLOB_DAT": "debug/elf", - "elf.R_AARCH64_GOT_LD_PREL19": "debug/elf", - "elf.R_AARCH64_IRELATIVE": "debug/elf", - "elf.R_AARCH64_JUMP26": "debug/elf", - "elf.R_AARCH64_JUMP_SLOT": "debug/elf", - "elf.R_AARCH64_LD64_GOT_LO12_NC": "debug/elf", - "elf.R_AARCH64_LDST128_ABS_LO12_NC": "debug/elf", - "elf.R_AARCH64_LDST16_ABS_LO12_NC": "debug/elf", - "elf.R_AARCH64_LDST32_ABS_LO12_NC": "debug/elf", - "elf.R_AARCH64_LDST64_ABS_LO12_NC": "debug/elf", - "elf.R_AARCH64_LDST8_ABS_LO12_NC": "debug/elf", - "elf.R_AARCH64_LD_PREL_LO19": "debug/elf", - "elf.R_AARCH64_MOVW_SABS_G0": "debug/elf", - "elf.R_AARCH64_MOVW_SABS_G1": "debug/elf", - "elf.R_AARCH64_MOVW_SABS_G2": "debug/elf", - "elf.R_AARCH64_MOVW_UABS_G0": "debug/elf", - "elf.R_AARCH64_MOVW_UABS_G0_NC": "debug/elf", - "elf.R_AARCH64_MOVW_UABS_G1": "debug/elf", - "elf.R_AARCH64_MOVW_UABS_G1_NC": "debug/elf", - "elf.R_AARCH64_MOVW_UABS_G2": "debug/elf", - "elf.R_AARCH64_MOVW_UABS_G2_NC": "debug/elf", - "elf.R_AARCH64_MOVW_UABS_G3": "debug/elf", - "elf.R_AARCH64_NONE": "debug/elf", - "elf.R_AARCH64_NULL": "debug/elf", - "elf.R_AARCH64_P32_ABS16": "debug/elf", - "elf.R_AARCH64_P32_ABS32": "debug/elf", - "elf.R_AARCH64_P32_ADD_ABS_LO12_NC": "debug/elf", - "elf.R_AARCH64_P32_ADR_GOT_PAGE": "debug/elf", - "elf.R_AARCH64_P32_ADR_PREL_LO21": "debug/elf", - "elf.R_AARCH64_P32_ADR_PREL_PG_HI21": "debug/elf", - "elf.R_AARCH64_P32_CALL26": "debug/elf", - "elf.R_AARCH64_P32_CONDBR19": "debug/elf", - "elf.R_AARCH64_P32_COPY": "debug/elf", - "elf.R_AARCH64_P32_GLOB_DAT": "debug/elf", - "elf.R_AARCH64_P32_GOT_LD_PREL19": "debug/elf", - "elf.R_AARCH64_P32_IRELATIVE": "debug/elf", - "elf.R_AARCH64_P32_JUMP26": "debug/elf", - "elf.R_AARCH64_P32_JUMP_SLOT": "debug/elf", - "elf.R_AARCH64_P32_LD32_GOT_LO12_NC": "debug/elf", - "elf.R_AARCH64_P32_LDST128_ABS_LO12_NC": "debug/elf", - "elf.R_AARCH64_P32_LDST16_ABS_LO12_NC": "debug/elf", - "elf.R_AARCH64_P32_LDST32_ABS_LO12_NC": "debug/elf", - "elf.R_AARCH64_P32_LDST64_ABS_LO12_NC": "debug/elf", - "elf.R_AARCH64_P32_LDST8_ABS_LO12_NC": "debug/elf", - "elf.R_AARCH64_P32_LD_PREL_LO19": "debug/elf", - "elf.R_AARCH64_P32_MOVW_SABS_G0": "debug/elf", - "elf.R_AARCH64_P32_MOVW_UABS_G0": "debug/elf", - "elf.R_AARCH64_P32_MOVW_UABS_G0_NC": "debug/elf", - "elf.R_AARCH64_P32_MOVW_UABS_G1": "debug/elf", - "elf.R_AARCH64_P32_PREL16": "debug/elf", - "elf.R_AARCH64_P32_PREL32": "debug/elf", - "elf.R_AARCH64_P32_RELATIVE": "debug/elf", - "elf.R_AARCH64_P32_TLSDESC": "debug/elf", - "elf.R_AARCH64_P32_TLSDESC_ADD_LO12_NC": "debug/elf", - "elf.R_AARCH64_P32_TLSDESC_ADR_PAGE21": "debug/elf", - "elf.R_AARCH64_P32_TLSDESC_ADR_PREL21": "debug/elf", - "elf.R_AARCH64_P32_TLSDESC_CALL": "debug/elf", - "elf.R_AARCH64_P32_TLSDESC_LD32_LO12_NC": "debug/elf", - "elf.R_AARCH64_P32_TLSDESC_LD_PREL19": "debug/elf", - "elf.R_AARCH64_P32_TLSGD_ADD_LO12_NC": "debug/elf", - "elf.R_AARCH64_P32_TLSGD_ADR_PAGE21": "debug/elf", - "elf.R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21": "debug/elf", - "elf.R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC": "debug/elf", - "elf.R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19": "debug/elf", - "elf.R_AARCH64_P32_TLSLE_ADD_TPREL_HI12": "debug/elf", - "elf.R_AARCH64_P32_TLSLE_ADD_TPREL_LO12": "debug/elf", - "elf.R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC": "debug/elf", - "elf.R_AARCH64_P32_TLSLE_MOVW_TPREL_G0": "debug/elf", - "elf.R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC": "debug/elf", - "elf.R_AARCH64_P32_TLSLE_MOVW_TPREL_G1": "debug/elf", - "elf.R_AARCH64_P32_TLS_DTPMOD": "debug/elf", - "elf.R_AARCH64_P32_TLS_DTPREL": "debug/elf", - "elf.R_AARCH64_P32_TLS_TPREL": "debug/elf", - "elf.R_AARCH64_P32_TSTBR14": "debug/elf", - "elf.R_AARCH64_PREL16": "debug/elf", - "elf.R_AARCH64_PREL32": "debug/elf", - "elf.R_AARCH64_PREL64": "debug/elf", - "elf.R_AARCH64_RELATIVE": "debug/elf", - "elf.R_AARCH64_TLSDESC": "debug/elf", - "elf.R_AARCH64_TLSDESC_ADD": "debug/elf", - "elf.R_AARCH64_TLSDESC_ADD_LO12_NC": "debug/elf", - "elf.R_AARCH64_TLSDESC_ADR_PAGE21": "debug/elf", - "elf.R_AARCH64_TLSDESC_ADR_PREL21": "debug/elf", - "elf.R_AARCH64_TLSDESC_CALL": "debug/elf", - "elf.R_AARCH64_TLSDESC_LD64_LO12_NC": "debug/elf", - "elf.R_AARCH64_TLSDESC_LDR": "debug/elf", - "elf.R_AARCH64_TLSDESC_LD_PREL19": "debug/elf", - "elf.R_AARCH64_TLSDESC_OFF_G0_NC": "debug/elf", - "elf.R_AARCH64_TLSDESC_OFF_G1": "debug/elf", - "elf.R_AARCH64_TLSGD_ADD_LO12_NC": "debug/elf", - "elf.R_AARCH64_TLSGD_ADR_PAGE21": "debug/elf", - "elf.R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21": "debug/elf", - "elf.R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC": "debug/elf", - "elf.R_AARCH64_TLSIE_LD_GOTTPREL_PREL19": "debug/elf", - "elf.R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC": "debug/elf", - "elf.R_AARCH64_TLSIE_MOVW_GOTTPREL_G1": "debug/elf", - "elf.R_AARCH64_TLSLE_ADD_TPREL_HI12": "debug/elf", - "elf.R_AARCH64_TLSLE_ADD_TPREL_LO12": "debug/elf", - "elf.R_AARCH64_TLSLE_ADD_TPREL_LO12_NC": "debug/elf", - "elf.R_AARCH64_TLSLE_MOVW_TPREL_G0": "debug/elf", - "elf.R_AARCH64_TLSLE_MOVW_TPREL_G0_NC": "debug/elf", - "elf.R_AARCH64_TLSLE_MOVW_TPREL_G1": "debug/elf", - "elf.R_AARCH64_TLSLE_MOVW_TPREL_G1_NC": "debug/elf", - "elf.R_AARCH64_TLSLE_MOVW_TPREL_G2": "debug/elf", - "elf.R_AARCH64_TLS_DTPMOD64": "debug/elf", - "elf.R_AARCH64_TLS_DTPREL64": "debug/elf", - "elf.R_AARCH64_TLS_TPREL64": "debug/elf", - "elf.R_AARCH64_TSTBR14": "debug/elf", - "elf.R_ALPHA": "debug/elf", - "elf.R_ALPHA_BRADDR": "debug/elf", - "elf.R_ALPHA_COPY": "debug/elf", - "elf.R_ALPHA_GLOB_DAT": "debug/elf", - "elf.R_ALPHA_GPDISP": "debug/elf", - "elf.R_ALPHA_GPREL32": "debug/elf", - "elf.R_ALPHA_GPRELHIGH": "debug/elf", - "elf.R_ALPHA_GPRELLOW": "debug/elf", - "elf.R_ALPHA_GPVALUE": "debug/elf", - "elf.R_ALPHA_HINT": "debug/elf", - "elf.R_ALPHA_IMMED_BR_HI32": "debug/elf", - "elf.R_ALPHA_IMMED_GP_16": "debug/elf", - "elf.R_ALPHA_IMMED_GP_HI32": "debug/elf", - "elf.R_ALPHA_IMMED_LO32": "debug/elf", - "elf.R_ALPHA_IMMED_SCN_HI32": "debug/elf", - "elf.R_ALPHA_JMP_SLOT": "debug/elf", - "elf.R_ALPHA_LITERAL": "debug/elf", - "elf.R_ALPHA_LITUSE": "debug/elf", - "elf.R_ALPHA_NONE": "debug/elf", - "elf.R_ALPHA_OP_PRSHIFT": "debug/elf", - "elf.R_ALPHA_OP_PSUB": "debug/elf", - "elf.R_ALPHA_OP_PUSH": "debug/elf", - "elf.R_ALPHA_OP_STORE": "debug/elf", - "elf.R_ALPHA_REFLONG": "debug/elf", - "elf.R_ALPHA_REFQUAD": "debug/elf", - "elf.R_ALPHA_RELATIVE": "debug/elf", - "elf.R_ALPHA_SREL16": "debug/elf", - "elf.R_ALPHA_SREL32": "debug/elf", - "elf.R_ALPHA_SREL64": "debug/elf", - "elf.R_ARM": "debug/elf", - "elf.R_ARM_ABS12": "debug/elf", - "elf.R_ARM_ABS16": "debug/elf", - "elf.R_ARM_ABS32": "debug/elf", - "elf.R_ARM_ABS8": "debug/elf", - "elf.R_ARM_AMP_VCALL9": "debug/elf", - "elf.R_ARM_COPY": "debug/elf", - "elf.R_ARM_GLOB_DAT": "debug/elf", - "elf.R_ARM_GNU_VTENTRY": "debug/elf", - "elf.R_ARM_GNU_VTINHERIT": "debug/elf", - "elf.R_ARM_GOT32": "debug/elf", - "elf.R_ARM_GOTOFF": "debug/elf", - "elf.R_ARM_GOTPC": "debug/elf", - "elf.R_ARM_JUMP_SLOT": "debug/elf", - "elf.R_ARM_NONE": "debug/elf", - "elf.R_ARM_PC13": "debug/elf", - "elf.R_ARM_PC24": "debug/elf", - "elf.R_ARM_PLT32": "debug/elf", - "elf.R_ARM_RABS32": "debug/elf", - "elf.R_ARM_RBASE": "debug/elf", - "elf.R_ARM_REL32": "debug/elf", - "elf.R_ARM_RELATIVE": "debug/elf", - "elf.R_ARM_RPC24": "debug/elf", - "elf.R_ARM_RREL32": "debug/elf", - "elf.R_ARM_RSBREL32": "debug/elf", - "elf.R_ARM_SBREL32": "debug/elf", - "elf.R_ARM_SWI24": "debug/elf", - "elf.R_ARM_THM_ABS5": "debug/elf", - "elf.R_ARM_THM_PC22": "debug/elf", - "elf.R_ARM_THM_PC8": "debug/elf", - "elf.R_ARM_THM_RPC22": "debug/elf", - "elf.R_ARM_THM_SWI8": "debug/elf", - "elf.R_ARM_THM_XPC22": "debug/elf", - "elf.R_ARM_XPC25": "debug/elf", - "elf.R_INFO": "debug/elf", - "elf.R_INFO32": "debug/elf", - "elf.R_MIPS": "debug/elf", - "elf.R_MIPS_16": "debug/elf", - "elf.R_MIPS_26": "debug/elf", - "elf.R_MIPS_32": "debug/elf", - "elf.R_MIPS_64": "debug/elf", - "elf.R_MIPS_ADD_IMMEDIATE": "debug/elf", - "elf.R_MIPS_CALL16": "debug/elf", - "elf.R_MIPS_CALL_HI16": "debug/elf", - "elf.R_MIPS_CALL_LO16": "debug/elf", - "elf.R_MIPS_DELETE": "debug/elf", - "elf.R_MIPS_GOT16": "debug/elf", - "elf.R_MIPS_GOT_DISP": "debug/elf", - "elf.R_MIPS_GOT_HI16": "debug/elf", - "elf.R_MIPS_GOT_LO16": "debug/elf", - "elf.R_MIPS_GOT_OFST": "debug/elf", - "elf.R_MIPS_GOT_PAGE": "debug/elf", - "elf.R_MIPS_GPREL16": "debug/elf", - "elf.R_MIPS_GPREL32": "debug/elf", - "elf.R_MIPS_HI16": "debug/elf", - "elf.R_MIPS_HIGHER": "debug/elf", - "elf.R_MIPS_HIGHEST": "debug/elf", - "elf.R_MIPS_INSERT_A": "debug/elf", - "elf.R_MIPS_INSERT_B": "debug/elf", - "elf.R_MIPS_JALR": "debug/elf", - "elf.R_MIPS_LITERAL": "debug/elf", - "elf.R_MIPS_LO16": "debug/elf", - "elf.R_MIPS_NONE": "debug/elf", - "elf.R_MIPS_PC16": "debug/elf", - "elf.R_MIPS_PJUMP": "debug/elf", - "elf.R_MIPS_REL16": "debug/elf", - "elf.R_MIPS_REL32": "debug/elf", - "elf.R_MIPS_RELGOT": "debug/elf", - "elf.R_MIPS_SCN_DISP": "debug/elf", - "elf.R_MIPS_SHIFT5": "debug/elf", - "elf.R_MIPS_SHIFT6": "debug/elf", - "elf.R_MIPS_SUB": "debug/elf", - "elf.R_MIPS_TLS_DTPMOD32": "debug/elf", - "elf.R_MIPS_TLS_DTPMOD64": "debug/elf", - "elf.R_MIPS_TLS_DTPREL32": "debug/elf", - "elf.R_MIPS_TLS_DTPREL64": "debug/elf", - "elf.R_MIPS_TLS_DTPREL_HI16": "debug/elf", - "elf.R_MIPS_TLS_DTPREL_LO16": "debug/elf", - "elf.R_MIPS_TLS_GD": "debug/elf", - "elf.R_MIPS_TLS_GOTTPREL": "debug/elf", - "elf.R_MIPS_TLS_LDM": "debug/elf", - "elf.R_MIPS_TLS_TPREL32": "debug/elf", - "elf.R_MIPS_TLS_TPREL64": "debug/elf", - "elf.R_MIPS_TLS_TPREL_HI16": "debug/elf", - "elf.R_MIPS_TLS_TPREL_LO16": "debug/elf", - "elf.R_PPC": "debug/elf", - "elf.R_PPC64": "debug/elf", - "elf.R_PPC64_ADDR14": "debug/elf", - "elf.R_PPC64_ADDR14_BRNTAKEN": "debug/elf", - "elf.R_PPC64_ADDR14_BRTAKEN": "debug/elf", - "elf.R_PPC64_ADDR16": "debug/elf", - "elf.R_PPC64_ADDR16_DS": "debug/elf", - "elf.R_PPC64_ADDR16_HA": "debug/elf", - "elf.R_PPC64_ADDR16_HI": "debug/elf", - "elf.R_PPC64_ADDR16_HIGHER": "debug/elf", - "elf.R_PPC64_ADDR16_HIGHERA": "debug/elf", - "elf.R_PPC64_ADDR16_HIGHEST": "debug/elf", - "elf.R_PPC64_ADDR16_HIGHESTA": "debug/elf", - "elf.R_PPC64_ADDR16_LO": "debug/elf", - "elf.R_PPC64_ADDR16_LO_DS": "debug/elf", - "elf.R_PPC64_ADDR24": "debug/elf", - "elf.R_PPC64_ADDR32": "debug/elf", - "elf.R_PPC64_ADDR64": "debug/elf", - "elf.R_PPC64_DTPMOD64": "debug/elf", - "elf.R_PPC64_DTPREL16": "debug/elf", - "elf.R_PPC64_DTPREL16_DS": "debug/elf", - "elf.R_PPC64_DTPREL16_HA": "debug/elf", - "elf.R_PPC64_DTPREL16_HI": "debug/elf", - "elf.R_PPC64_DTPREL16_HIGHER": "debug/elf", - "elf.R_PPC64_DTPREL16_HIGHERA": "debug/elf", - "elf.R_PPC64_DTPREL16_HIGHEST": "debug/elf", - "elf.R_PPC64_DTPREL16_HIGHESTA": "debug/elf", - "elf.R_PPC64_DTPREL16_LO": "debug/elf", - "elf.R_PPC64_DTPREL16_LO_DS": "debug/elf", - "elf.R_PPC64_DTPREL64": "debug/elf", - "elf.R_PPC64_GOT16": "debug/elf", - "elf.R_PPC64_GOT16_DS": "debug/elf", - "elf.R_PPC64_GOT16_HA": "debug/elf", - "elf.R_PPC64_GOT16_HI": "debug/elf", - "elf.R_PPC64_GOT16_LO": "debug/elf", - "elf.R_PPC64_GOT16_LO_DS": "debug/elf", - "elf.R_PPC64_GOT_DTPREL16_DS": "debug/elf", - "elf.R_PPC64_GOT_DTPREL16_HA": "debug/elf", - "elf.R_PPC64_GOT_DTPREL16_HI": "debug/elf", - "elf.R_PPC64_GOT_DTPREL16_LO_DS": "debug/elf", - "elf.R_PPC64_GOT_TLSGD16": "debug/elf", - "elf.R_PPC64_GOT_TLSGD16_HA": "debug/elf", - "elf.R_PPC64_GOT_TLSGD16_HI": "debug/elf", - "elf.R_PPC64_GOT_TLSGD16_LO": "debug/elf", - "elf.R_PPC64_GOT_TLSLD16": "debug/elf", - "elf.R_PPC64_GOT_TLSLD16_HA": "debug/elf", - "elf.R_PPC64_GOT_TLSLD16_HI": "debug/elf", - "elf.R_PPC64_GOT_TLSLD16_LO": "debug/elf", - "elf.R_PPC64_GOT_TPREL16_DS": "debug/elf", - "elf.R_PPC64_GOT_TPREL16_HA": "debug/elf", - "elf.R_PPC64_GOT_TPREL16_HI": "debug/elf", - "elf.R_PPC64_GOT_TPREL16_LO_DS": "debug/elf", - "elf.R_PPC64_JMP_SLOT": "debug/elf", - "elf.R_PPC64_NONE": "debug/elf", - "elf.R_PPC64_REL14": "debug/elf", - "elf.R_PPC64_REL14_BRNTAKEN": "debug/elf", - "elf.R_PPC64_REL14_BRTAKEN": "debug/elf", - "elf.R_PPC64_REL16": "debug/elf", - "elf.R_PPC64_REL16_HA": "debug/elf", - "elf.R_PPC64_REL16_HI": "debug/elf", - "elf.R_PPC64_REL16_LO": "debug/elf", - "elf.R_PPC64_REL24": "debug/elf", - "elf.R_PPC64_REL32": "debug/elf", - "elf.R_PPC64_REL64": "debug/elf", - "elf.R_PPC64_TLS": "debug/elf", - "elf.R_PPC64_TLSGD": "debug/elf", - "elf.R_PPC64_TLSLD": "debug/elf", - "elf.R_PPC64_TOC": "debug/elf", - "elf.R_PPC64_TOC16": "debug/elf", - "elf.R_PPC64_TOC16_DS": "debug/elf", - "elf.R_PPC64_TOC16_HA": "debug/elf", - "elf.R_PPC64_TOC16_HI": "debug/elf", - "elf.R_PPC64_TOC16_LO": "debug/elf", - "elf.R_PPC64_TOC16_LO_DS": "debug/elf", - "elf.R_PPC64_TPREL16": "debug/elf", - "elf.R_PPC64_TPREL16_DS": "debug/elf", - "elf.R_PPC64_TPREL16_HA": "debug/elf", - "elf.R_PPC64_TPREL16_HI": "debug/elf", - "elf.R_PPC64_TPREL16_HIGHER": "debug/elf", - "elf.R_PPC64_TPREL16_HIGHERA": "debug/elf", - "elf.R_PPC64_TPREL16_HIGHEST": "debug/elf", - "elf.R_PPC64_TPREL16_HIGHESTA": "debug/elf", - "elf.R_PPC64_TPREL16_LO": "debug/elf", - "elf.R_PPC64_TPREL16_LO_DS": "debug/elf", - "elf.R_PPC64_TPREL64": "debug/elf", - "elf.R_PPC_ADDR14": "debug/elf", - "elf.R_PPC_ADDR14_BRNTAKEN": "debug/elf", - "elf.R_PPC_ADDR14_BRTAKEN": "debug/elf", - "elf.R_PPC_ADDR16": "debug/elf", - "elf.R_PPC_ADDR16_HA": "debug/elf", - "elf.R_PPC_ADDR16_HI": "debug/elf", - "elf.R_PPC_ADDR16_LO": "debug/elf", - "elf.R_PPC_ADDR24": "debug/elf", - "elf.R_PPC_ADDR32": "debug/elf", - "elf.R_PPC_COPY": "debug/elf", - "elf.R_PPC_DTPMOD32": "debug/elf", - "elf.R_PPC_DTPREL16": "debug/elf", - "elf.R_PPC_DTPREL16_HA": "debug/elf", - "elf.R_PPC_DTPREL16_HI": "debug/elf", - "elf.R_PPC_DTPREL16_LO": "debug/elf", - "elf.R_PPC_DTPREL32": "debug/elf", - "elf.R_PPC_EMB_BIT_FLD": "debug/elf", - "elf.R_PPC_EMB_MRKREF": "debug/elf", - "elf.R_PPC_EMB_NADDR16": "debug/elf", - "elf.R_PPC_EMB_NADDR16_HA": "debug/elf", - "elf.R_PPC_EMB_NADDR16_HI": "debug/elf", - "elf.R_PPC_EMB_NADDR16_LO": "debug/elf", - "elf.R_PPC_EMB_NADDR32": "debug/elf", - "elf.R_PPC_EMB_RELSDA": "debug/elf", - "elf.R_PPC_EMB_RELSEC16": "debug/elf", - "elf.R_PPC_EMB_RELST_HA": "debug/elf", - "elf.R_PPC_EMB_RELST_HI": "debug/elf", - "elf.R_PPC_EMB_RELST_LO": "debug/elf", - "elf.R_PPC_EMB_SDA21": "debug/elf", - "elf.R_PPC_EMB_SDA2I16": "debug/elf", - "elf.R_PPC_EMB_SDA2REL": "debug/elf", - "elf.R_PPC_EMB_SDAI16": "debug/elf", - "elf.R_PPC_GLOB_DAT": "debug/elf", - "elf.R_PPC_GOT16": "debug/elf", - "elf.R_PPC_GOT16_HA": "debug/elf", - "elf.R_PPC_GOT16_HI": "debug/elf", - "elf.R_PPC_GOT16_LO": "debug/elf", - "elf.R_PPC_GOT_TLSGD16": "debug/elf", - "elf.R_PPC_GOT_TLSGD16_HA": "debug/elf", - "elf.R_PPC_GOT_TLSGD16_HI": "debug/elf", - "elf.R_PPC_GOT_TLSGD16_LO": "debug/elf", - "elf.R_PPC_GOT_TLSLD16": "debug/elf", - "elf.R_PPC_GOT_TLSLD16_HA": "debug/elf", - "elf.R_PPC_GOT_TLSLD16_HI": "debug/elf", - "elf.R_PPC_GOT_TLSLD16_LO": "debug/elf", - "elf.R_PPC_GOT_TPREL16": "debug/elf", - "elf.R_PPC_GOT_TPREL16_HA": "debug/elf", - "elf.R_PPC_GOT_TPREL16_HI": "debug/elf", - "elf.R_PPC_GOT_TPREL16_LO": "debug/elf", - "elf.R_PPC_JMP_SLOT": "debug/elf", - "elf.R_PPC_LOCAL24PC": "debug/elf", - "elf.R_PPC_NONE": "debug/elf", - "elf.R_PPC_PLT16_HA": "debug/elf", - "elf.R_PPC_PLT16_HI": "debug/elf", - "elf.R_PPC_PLT16_LO": "debug/elf", - "elf.R_PPC_PLT32": "debug/elf", - "elf.R_PPC_PLTREL24": "debug/elf", - "elf.R_PPC_PLTREL32": "debug/elf", - "elf.R_PPC_REL14": "debug/elf", - "elf.R_PPC_REL14_BRNTAKEN": "debug/elf", - "elf.R_PPC_REL14_BRTAKEN": "debug/elf", - "elf.R_PPC_REL24": "debug/elf", - "elf.R_PPC_REL32": "debug/elf", - "elf.R_PPC_RELATIVE": "debug/elf", - "elf.R_PPC_SDAREL16": "debug/elf", - "elf.R_PPC_SECTOFF": "debug/elf", - "elf.R_PPC_SECTOFF_HA": "debug/elf", - "elf.R_PPC_SECTOFF_HI": "debug/elf", - "elf.R_PPC_SECTOFF_LO": "debug/elf", - "elf.R_PPC_TLS": "debug/elf", - "elf.R_PPC_TPREL16": "debug/elf", - "elf.R_PPC_TPREL16_HA": "debug/elf", - "elf.R_PPC_TPREL16_HI": "debug/elf", - "elf.R_PPC_TPREL16_LO": "debug/elf", - "elf.R_PPC_TPREL32": "debug/elf", - "elf.R_PPC_UADDR16": "debug/elf", - "elf.R_PPC_UADDR32": "debug/elf", - "elf.R_SPARC": "debug/elf", - "elf.R_SPARC_10": "debug/elf", - "elf.R_SPARC_11": "debug/elf", - "elf.R_SPARC_13": "debug/elf", - "elf.R_SPARC_16": "debug/elf", - "elf.R_SPARC_22": "debug/elf", - "elf.R_SPARC_32": "debug/elf", - "elf.R_SPARC_5": "debug/elf", - "elf.R_SPARC_6": "debug/elf", - "elf.R_SPARC_64": "debug/elf", - "elf.R_SPARC_7": "debug/elf", - "elf.R_SPARC_8": "debug/elf", - "elf.R_SPARC_COPY": "debug/elf", - "elf.R_SPARC_DISP16": "debug/elf", - "elf.R_SPARC_DISP32": "debug/elf", - "elf.R_SPARC_DISP64": "debug/elf", - "elf.R_SPARC_DISP8": "debug/elf", - "elf.R_SPARC_GLOB_DAT": "debug/elf", - "elf.R_SPARC_GLOB_JMP": "debug/elf", - "elf.R_SPARC_GOT10": "debug/elf", - "elf.R_SPARC_GOT13": "debug/elf", - "elf.R_SPARC_GOT22": "debug/elf", - "elf.R_SPARC_H44": "debug/elf", - "elf.R_SPARC_HH22": "debug/elf", - "elf.R_SPARC_HI22": "debug/elf", - "elf.R_SPARC_HIPLT22": "debug/elf", - "elf.R_SPARC_HIX22": "debug/elf", - "elf.R_SPARC_HM10": "debug/elf", - "elf.R_SPARC_JMP_SLOT": "debug/elf", - "elf.R_SPARC_L44": "debug/elf", - "elf.R_SPARC_LM22": "debug/elf", - "elf.R_SPARC_LO10": "debug/elf", - "elf.R_SPARC_LOPLT10": "debug/elf", - "elf.R_SPARC_LOX10": "debug/elf", - "elf.R_SPARC_M44": "debug/elf", - "elf.R_SPARC_NONE": "debug/elf", - "elf.R_SPARC_OLO10": "debug/elf", - "elf.R_SPARC_PC10": "debug/elf", - "elf.R_SPARC_PC22": "debug/elf", - "elf.R_SPARC_PCPLT10": "debug/elf", - "elf.R_SPARC_PCPLT22": "debug/elf", - "elf.R_SPARC_PCPLT32": "debug/elf", - "elf.R_SPARC_PC_HH22": "debug/elf", - "elf.R_SPARC_PC_HM10": "debug/elf", - "elf.R_SPARC_PC_LM22": "debug/elf", - "elf.R_SPARC_PLT32": "debug/elf", - "elf.R_SPARC_PLT64": "debug/elf", - "elf.R_SPARC_REGISTER": "debug/elf", - "elf.R_SPARC_RELATIVE": "debug/elf", - "elf.R_SPARC_UA16": "debug/elf", - "elf.R_SPARC_UA32": "debug/elf", - "elf.R_SPARC_UA64": "debug/elf", - "elf.R_SPARC_WDISP16": "debug/elf", - "elf.R_SPARC_WDISP19": "debug/elf", - "elf.R_SPARC_WDISP22": "debug/elf", - "elf.R_SPARC_WDISP30": "debug/elf", - "elf.R_SPARC_WPLT30": "debug/elf", - "elf.R_SYM32": "debug/elf", - "elf.R_SYM64": "debug/elf", - "elf.R_TYPE32": "debug/elf", - "elf.R_TYPE64": "debug/elf", - "elf.R_X86_64": "debug/elf", - "elf.R_X86_64_16": "debug/elf", - "elf.R_X86_64_32": "debug/elf", - "elf.R_X86_64_32S": "debug/elf", - "elf.R_X86_64_64": "debug/elf", - "elf.R_X86_64_8": "debug/elf", - "elf.R_X86_64_COPY": "debug/elf", - "elf.R_X86_64_DTPMOD64": "debug/elf", - "elf.R_X86_64_DTPOFF32": "debug/elf", - "elf.R_X86_64_DTPOFF64": "debug/elf", - "elf.R_X86_64_GLOB_DAT": "debug/elf", - "elf.R_X86_64_GOT32": "debug/elf", - "elf.R_X86_64_GOTPCREL": "debug/elf", - "elf.R_X86_64_GOTTPOFF": "debug/elf", - "elf.R_X86_64_JMP_SLOT": "debug/elf", - "elf.R_X86_64_NONE": "debug/elf", - "elf.R_X86_64_PC16": "debug/elf", - "elf.R_X86_64_PC32": "debug/elf", - "elf.R_X86_64_PC8": "debug/elf", - "elf.R_X86_64_PLT32": "debug/elf", - "elf.R_X86_64_RELATIVE": "debug/elf", - "elf.R_X86_64_TLSGD": "debug/elf", - "elf.R_X86_64_TLSLD": "debug/elf", - "elf.R_X86_64_TPOFF32": "debug/elf", - "elf.R_X86_64_TPOFF64": "debug/elf", - "elf.Rel32": "debug/elf", - "elf.Rel64": "debug/elf", - "elf.Rela32": "debug/elf", - "elf.Rela64": "debug/elf", - "elf.SHF_ALLOC": "debug/elf", - "elf.SHF_COMPRESSED": "debug/elf", - "elf.SHF_EXECINSTR": "debug/elf", - "elf.SHF_GROUP": "debug/elf", - "elf.SHF_INFO_LINK": "debug/elf", - "elf.SHF_LINK_ORDER": "debug/elf", - "elf.SHF_MASKOS": "debug/elf", - "elf.SHF_MASKPROC": "debug/elf", - "elf.SHF_MERGE": "debug/elf", - "elf.SHF_OS_NONCONFORMING": "debug/elf", - "elf.SHF_STRINGS": "debug/elf", - "elf.SHF_TLS": "debug/elf", - "elf.SHF_WRITE": "debug/elf", - "elf.SHN_ABS": "debug/elf", - "elf.SHN_COMMON": "debug/elf", - "elf.SHN_HIOS": "debug/elf", - "elf.SHN_HIPROC": "debug/elf", - "elf.SHN_HIRESERVE": "debug/elf", - "elf.SHN_LOOS": "debug/elf", - "elf.SHN_LOPROC": "debug/elf", - "elf.SHN_LORESERVE": "debug/elf", - "elf.SHN_UNDEF": "debug/elf", - "elf.SHN_XINDEX": "debug/elf", - "elf.SHT_DYNAMIC": "debug/elf", - "elf.SHT_DYNSYM": "debug/elf", - "elf.SHT_FINI_ARRAY": "debug/elf", - "elf.SHT_GNU_ATTRIBUTES": "debug/elf", - "elf.SHT_GNU_HASH": "debug/elf", - "elf.SHT_GNU_LIBLIST": "debug/elf", - "elf.SHT_GNU_VERDEF": "debug/elf", - "elf.SHT_GNU_VERNEED": "debug/elf", - "elf.SHT_GNU_VERSYM": "debug/elf", - "elf.SHT_GROUP": "debug/elf", - "elf.SHT_HASH": "debug/elf", - "elf.SHT_HIOS": "debug/elf", - "elf.SHT_HIPROC": "debug/elf", - "elf.SHT_HIUSER": "debug/elf", - "elf.SHT_INIT_ARRAY": "debug/elf", - "elf.SHT_LOOS": "debug/elf", - "elf.SHT_LOPROC": "debug/elf", - "elf.SHT_LOUSER": "debug/elf", - "elf.SHT_NOBITS": "debug/elf", - "elf.SHT_NOTE": "debug/elf", - "elf.SHT_NULL": "debug/elf", - "elf.SHT_PREINIT_ARRAY": "debug/elf", - "elf.SHT_PROGBITS": "debug/elf", - "elf.SHT_REL": "debug/elf", - "elf.SHT_RELA": "debug/elf", - "elf.SHT_SHLIB": "debug/elf", - "elf.SHT_STRTAB": "debug/elf", - "elf.SHT_SYMTAB": "debug/elf", - "elf.SHT_SYMTAB_SHNDX": "debug/elf", - "elf.STB_GLOBAL": "debug/elf", - "elf.STB_HIOS": "debug/elf", - "elf.STB_HIPROC": "debug/elf", - "elf.STB_LOCAL": "debug/elf", - "elf.STB_LOOS": "debug/elf", - "elf.STB_LOPROC": "debug/elf", - "elf.STB_WEAK": "debug/elf", - "elf.STT_COMMON": "debug/elf", - "elf.STT_FILE": "debug/elf", - "elf.STT_FUNC": "debug/elf", - "elf.STT_HIOS": "debug/elf", - "elf.STT_HIPROC": "debug/elf", - "elf.STT_LOOS": "debug/elf", - "elf.STT_LOPROC": "debug/elf", - "elf.STT_NOTYPE": "debug/elf", - "elf.STT_OBJECT": "debug/elf", - "elf.STT_SECTION": "debug/elf", - "elf.STT_TLS": "debug/elf", - "elf.STV_DEFAULT": "debug/elf", - "elf.STV_HIDDEN": "debug/elf", - "elf.STV_INTERNAL": "debug/elf", - "elf.STV_PROTECTED": "debug/elf", - "elf.ST_BIND": "debug/elf", - "elf.ST_INFO": "debug/elf", - "elf.ST_TYPE": "debug/elf", - "elf.ST_VISIBILITY": "debug/elf", - "elf.Section": "debug/elf", - "elf.Section32": "debug/elf", - "elf.Section64": "debug/elf", - "elf.SectionFlag": "debug/elf", - "elf.SectionHeader": "debug/elf", - "elf.SectionIndex": "debug/elf", - "elf.SectionType": "debug/elf", - "elf.Sym32": "debug/elf", - "elf.Sym32Size": "debug/elf", - "elf.Sym64": "debug/elf", - "elf.Sym64Size": "debug/elf", - "elf.SymBind": "debug/elf", - "elf.SymType": "debug/elf", - "elf.SymVis": "debug/elf", - "elf.Symbol": "debug/elf", - "elf.Type": "debug/elf", - "elf.Version": "debug/elf", - "elliptic.Curve": "crypto/elliptic", - "elliptic.CurveParams": "crypto/elliptic", - "elliptic.GenerateKey": "crypto/elliptic", - "elliptic.Marshal": "crypto/elliptic", - "elliptic.P224": "crypto/elliptic", - "elliptic.P256": "crypto/elliptic", - "elliptic.P384": "crypto/elliptic", - "elliptic.P521": "crypto/elliptic", - "elliptic.Unmarshal": "crypto/elliptic", - "encoding.BinaryMarshaler": "encoding", - "encoding.BinaryUnmarshaler": "encoding", - "encoding.TextMarshaler": "encoding", - "encoding.TextUnmarshaler": "encoding", - "errors.New": "errors", - "exec.Cmd": "os/exec", - "exec.Command": "os/exec", - "exec.CommandContext": "os/exec", - "exec.ErrNotFound": "os/exec", - "exec.Error": "os/exec", - "exec.ExitError": "os/exec", - "exec.LookPath": "os/exec", - "expvar.Do": "expvar", - "expvar.Float": "expvar", - "expvar.Func": "expvar", - "expvar.Get": "expvar", - "expvar.Handler": "expvar", - "expvar.Int": "expvar", - "expvar.KeyValue": "expvar", - "expvar.Map": "expvar", - "expvar.NewFloat": "expvar", - "expvar.NewInt": "expvar", - "expvar.NewMap": "expvar", - "expvar.NewString": "expvar", - "expvar.Publish": "expvar", - "expvar.String": "expvar", - "expvar.Var": "expvar", - "fcgi.ErrConnClosed": "net/http/fcgi", - "fcgi.ErrRequestAborted": "net/http/fcgi", - "fcgi.Serve": "net/http/fcgi", - "filepath.Abs": "path/filepath", - "filepath.Base": "path/filepath", - "filepath.Clean": "path/filepath", - "filepath.Dir": "path/filepath", - "filepath.ErrBadPattern": "path/filepath", - "filepath.EvalSymlinks": "path/filepath", - "filepath.Ext": "path/filepath", - "filepath.FromSlash": "path/filepath", - "filepath.Glob": "path/filepath", - "filepath.HasPrefix": "path/filepath", - "filepath.IsAbs": "path/filepath", - "filepath.Join": "path/filepath", - "filepath.ListSeparator": "path/filepath", - "filepath.Match": "path/filepath", - "filepath.Rel": "path/filepath", - "filepath.Separator": "path/filepath", - "filepath.SkipDir": "path/filepath", - "filepath.Split": "path/filepath", - "filepath.SplitList": "path/filepath", - "filepath.ToSlash": "path/filepath", - "filepath.VolumeName": "path/filepath", - "filepath.Walk": "path/filepath", - "filepath.WalkFunc": "path/filepath", - "flag.Arg": "flag", - "flag.Args": "flag", - "flag.Bool": "flag", - "flag.BoolVar": "flag", - "flag.CommandLine": "flag", - "flag.ContinueOnError": "flag", - "flag.Duration": "flag", - "flag.DurationVar": "flag", - "flag.ErrHelp": "flag", - "flag.ErrorHandling": "flag", - "flag.ExitOnError": "flag", - "flag.Flag": "flag", - "flag.FlagSet": "flag", - "flag.Float64": "flag", - "flag.Float64Var": "flag", - "flag.Getter": "flag", - "flag.Int": "flag", - "flag.Int64": "flag", - "flag.Int64Var": "flag", - "flag.IntVar": "flag", - "flag.Lookup": "flag", - "flag.NArg": "flag", - "flag.NFlag": "flag", - "flag.NewFlagSet": "flag", - "flag.PanicOnError": "flag", - "flag.Parse": "flag", - "flag.Parsed": "flag", - "flag.PrintDefaults": "flag", - "flag.Set": "flag", - "flag.String": "flag", - "flag.StringVar": "flag", - "flag.Uint": "flag", - "flag.Uint64": "flag", - "flag.Uint64Var": "flag", - "flag.UintVar": "flag", - "flag.UnquoteUsage": "flag", - "flag.Usage": "flag", - "flag.Value": "flag", - "flag.Var": "flag", - "flag.Visit": "flag", - "flag.VisitAll": "flag", - "flate.BestCompression": "compress/flate", - "flate.BestSpeed": "compress/flate", - "flate.CorruptInputError": "compress/flate", - "flate.DefaultCompression": "compress/flate", - "flate.HuffmanOnly": "compress/flate", - "flate.InternalError": "compress/flate", - "flate.NewReader": "compress/flate", - "flate.NewReaderDict": "compress/flate", - "flate.NewWriter": "compress/flate", - "flate.NewWriterDict": "compress/flate", - "flate.NoCompression": "compress/flate", - "flate.ReadError": "compress/flate", - "flate.Reader": "compress/flate", - "flate.Resetter": "compress/flate", - "flate.WriteError": "compress/flate", - "flate.Writer": "compress/flate", - "fmt.Errorf": "fmt", - "fmt.Formatter": "fmt", - "fmt.Fprint": "fmt", - "fmt.Fprintf": "fmt", - "fmt.Fprintln": "fmt", - "fmt.Fscan": "fmt", - "fmt.Fscanf": "fmt", - "fmt.Fscanln": "fmt", - "fmt.GoStringer": "fmt", - "fmt.Print": "fmt", - "fmt.Printf": "fmt", - "fmt.Println": "fmt", - "fmt.Scan": "fmt", - "fmt.ScanState": "fmt", - "fmt.Scanf": "fmt", - "fmt.Scanln": "fmt", - "fmt.Scanner": "fmt", - "fmt.Sprint": "fmt", - "fmt.Sprintf": "fmt", - "fmt.Sprintln": "fmt", - "fmt.Sscan": "fmt", - "fmt.Sscanf": "fmt", - "fmt.Sscanln": "fmt", - "fmt.State": "fmt", - "fmt.Stringer": "fmt", - "fnv.New32": "hash/fnv", - "fnv.New32a": "hash/fnv", - "fnv.New64": "hash/fnv", - "fnv.New64a": "hash/fnv", - "format.Node": "go/format", - "format.Source": "go/format", - "gif.Decode": "image/gif", - "gif.DecodeAll": "image/gif", - "gif.DecodeConfig": "image/gif", - "gif.DisposalBackground": "image/gif", - "gif.DisposalNone": "image/gif", - "gif.DisposalPrevious": "image/gif", - "gif.Encode": "image/gif", - "gif.EncodeAll": "image/gif", - "gif.GIF": "image/gif", - "gif.Options": "image/gif", - "gob.CommonType": "encoding/gob", - "gob.Decoder": "encoding/gob", - "gob.Encoder": "encoding/gob", - "gob.GobDecoder": "encoding/gob", - "gob.GobEncoder": "encoding/gob", - "gob.NewDecoder": "encoding/gob", - "gob.NewEncoder": "encoding/gob", - "gob.Register": "encoding/gob", - "gob.RegisterName": "encoding/gob", - "gosym.DecodingError": "debug/gosym", - "gosym.Func": "debug/gosym", - "gosym.LineTable": "debug/gosym", - "gosym.NewLineTable": "debug/gosym", - "gosym.NewTable": "debug/gosym", - "gosym.Obj": "debug/gosym", - "gosym.Sym": "debug/gosym", - "gosym.Table": "debug/gosym", - "gosym.UnknownFileError": "debug/gosym", - "gosym.UnknownLineError": "debug/gosym", - "gzip.BestCompression": "compress/gzip", - "gzip.BestSpeed": "compress/gzip", - "gzip.DefaultCompression": "compress/gzip", - "gzip.ErrChecksum": "compress/gzip", - "gzip.ErrHeader": "compress/gzip", - "gzip.Header": "compress/gzip", - "gzip.HuffmanOnly": "compress/gzip", - "gzip.NewReader": "compress/gzip", - "gzip.NewWriter": "compress/gzip", - "gzip.NewWriterLevel": "compress/gzip", - "gzip.NoCompression": "compress/gzip", - "gzip.Reader": "compress/gzip", - "gzip.Writer": "compress/gzip", - "hash.Hash": "hash", - "hash.Hash32": "hash", - "hash.Hash64": "hash", - "heap.Fix": "container/heap", - "heap.Init": "container/heap", - "heap.Interface": "container/heap", - "heap.Pop": "container/heap", - "heap.Push": "container/heap", - "heap.Remove": "container/heap", - "hex.Decode": "encoding/hex", - "hex.DecodeString": "encoding/hex", - "hex.DecodedLen": "encoding/hex", - "hex.Dump": "encoding/hex", - "hex.Dumper": "encoding/hex", - "hex.Encode": "encoding/hex", - "hex.EncodeToString": "encoding/hex", - "hex.EncodedLen": "encoding/hex", - "hex.ErrLength": "encoding/hex", - "hex.InvalidByteError": "encoding/hex", - "hmac.Equal": "crypto/hmac", - "hmac.New": "crypto/hmac", - "html.EscapeString": "html", - "html.UnescapeString": "html", - "http.CanonicalHeaderKey": "net/http", - "http.Client": "net/http", - "http.CloseNotifier": "net/http", - "http.ConnState": "net/http", - "http.Cookie": "net/http", - "http.CookieJar": "net/http", - "http.DefaultClient": "net/http", - "http.DefaultMaxHeaderBytes": "net/http", - "http.DefaultMaxIdleConnsPerHost": "net/http", - "http.DefaultServeMux": "net/http", - "http.DefaultTransport": "net/http", - "http.DetectContentType": "net/http", - "http.Dir": "net/http", - "http.ErrAbortHandler": "net/http", - "http.ErrBodyNotAllowed": "net/http", - "http.ErrBodyReadAfterClose": "net/http", - "http.ErrContentLength": "net/http", - "http.ErrHandlerTimeout": "net/http", - "http.ErrHeaderTooLong": "net/http", - "http.ErrHijacked": "net/http", - "http.ErrLineTooLong": "net/http", - "http.ErrMissingBoundary": "net/http", - "http.ErrMissingContentLength": "net/http", - "http.ErrMissingFile": "net/http", - "http.ErrNoCookie": "net/http", - "http.ErrNoLocation": "net/http", - "http.ErrNotMultipart": "net/http", - "http.ErrNotSupported": "net/http", - "http.ErrServerClosed": "net/http", - "http.ErrShortBody": "net/http", - "http.ErrSkipAltProtocol": "net/http", - "http.ErrUnexpectedTrailer": "net/http", - "http.ErrUseLastResponse": "net/http", - "http.ErrWriteAfterFlush": "net/http", - "http.Error": "net/http", - "http.File": "net/http", - "http.FileServer": "net/http", - "http.FileSystem": "net/http", - "http.Flusher": "net/http", - "http.Get": "net/http", - "http.Handle": "net/http", - "http.HandleFunc": "net/http", - "http.Handler": "net/http", - "http.HandlerFunc": "net/http", - "http.Head": "net/http", - "http.Header": "net/http", - "http.Hijacker": "net/http", - "http.ListenAndServe": "net/http", - "http.ListenAndServeTLS": "net/http", - "http.LocalAddrContextKey": "net/http", - "http.MaxBytesReader": "net/http", - "http.MethodConnect": "net/http", - "http.MethodDelete": "net/http", - "http.MethodGet": "net/http", - "http.MethodHead": "net/http", - "http.MethodOptions": "net/http", - "http.MethodPatch": "net/http", - "http.MethodPost": "net/http", - "http.MethodPut": "net/http", - "http.MethodTrace": "net/http", - "http.NewFileTransport": "net/http", - "http.NewRequest": "net/http", - "http.NewServeMux": "net/http", - "http.NoBody": "net/http", - "http.NotFound": "net/http", - "http.NotFoundHandler": "net/http", - "http.ParseHTTPVersion": "net/http", - "http.ParseTime": "net/http", - "http.Post": "net/http", - "http.PostForm": "net/http", - "http.ProtocolError": "net/http", - "http.ProxyFromEnvironment": "net/http", - "http.ProxyURL": "net/http", - "http.PushOptions": "net/http", - "http.Pusher": "net/http", - "http.ReadRequest": "net/http", - "http.ReadResponse": "net/http", - "http.Redirect": "net/http", - "http.RedirectHandler": "net/http", - "http.Request": "net/http", - "http.Response": "net/http", - "http.ResponseWriter": "net/http", - "http.RoundTripper": "net/http", - "http.Serve": "net/http", - "http.ServeContent": "net/http", - "http.ServeFile": "net/http", - "http.ServeMux": "net/http", - "http.Server": "net/http", - "http.ServerContextKey": "net/http", - "http.SetCookie": "net/http", - "http.StateActive": "net/http", - "http.StateClosed": "net/http", - "http.StateHijacked": "net/http", - "http.StateIdle": "net/http", - "http.StateNew": "net/http", - "http.StatusAccepted": "net/http", - "http.StatusAlreadyReported": "net/http", - "http.StatusBadGateway": "net/http", - "http.StatusBadRequest": "net/http", - "http.StatusConflict": "net/http", - "http.StatusContinue": "net/http", - "http.StatusCreated": "net/http", - "http.StatusExpectationFailed": "net/http", - "http.StatusFailedDependency": "net/http", - "http.StatusForbidden": "net/http", - "http.StatusFound": "net/http", - "http.StatusGatewayTimeout": "net/http", - "http.StatusGone": "net/http", - "http.StatusHTTPVersionNotSupported": "net/http", - "http.StatusIMUsed": "net/http", - "http.StatusInsufficientStorage": "net/http", - "http.StatusInternalServerError": "net/http", - "http.StatusLengthRequired": "net/http", - "http.StatusLocked": "net/http", - "http.StatusLoopDetected": "net/http", - "http.StatusMethodNotAllowed": "net/http", - "http.StatusMovedPermanently": "net/http", - "http.StatusMultiStatus": "net/http", - "http.StatusMultipleChoices": "net/http", - "http.StatusNetworkAuthenticationRequired": "net/http", - "http.StatusNoContent": "net/http", - "http.StatusNonAuthoritativeInfo": "net/http", - "http.StatusNotAcceptable": "net/http", - "http.StatusNotExtended": "net/http", - "http.StatusNotFound": "net/http", - "http.StatusNotImplemented": "net/http", - "http.StatusNotModified": "net/http", - "http.StatusOK": "net/http", - "http.StatusPartialContent": "net/http", - "http.StatusPaymentRequired": "net/http", - "http.StatusPermanentRedirect": "net/http", - "http.StatusPreconditionFailed": "net/http", - "http.StatusPreconditionRequired": "net/http", - "http.StatusProcessing": "net/http", - "http.StatusProxyAuthRequired": "net/http", - "http.StatusRequestEntityTooLarge": "net/http", - "http.StatusRequestHeaderFieldsTooLarge": "net/http", - "http.StatusRequestTimeout": "net/http", - "http.StatusRequestURITooLong": "net/http", - "http.StatusRequestedRangeNotSatisfiable": "net/http", - "http.StatusResetContent": "net/http", - "http.StatusSeeOther": "net/http", - "http.StatusServiceUnavailable": "net/http", - "http.StatusSwitchingProtocols": "net/http", - "http.StatusTeapot": "net/http", - "http.StatusTemporaryRedirect": "net/http", - "http.StatusText": "net/http", - "http.StatusTooManyRequests": "net/http", - "http.StatusUnauthorized": "net/http", - "http.StatusUnavailableForLegalReasons": "net/http", - "http.StatusUnprocessableEntity": "net/http", - "http.StatusUnsupportedMediaType": "net/http", - "http.StatusUpgradeRequired": "net/http", - "http.StatusUseProxy": "net/http", - "http.StatusVariantAlsoNegotiates": "net/http", - "http.StripPrefix": "net/http", - "http.TimeFormat": "net/http", - "http.TimeoutHandler": "net/http", - "http.TrailerPrefix": "net/http", - "http.Transport": "net/http", - "httptest.DefaultRemoteAddr": "net/http/httptest", - "httptest.NewRecorder": "net/http/httptest", - "httptest.NewRequest": "net/http/httptest", - "httptest.NewServer": "net/http/httptest", - "httptest.NewTLSServer": "net/http/httptest", - "httptest.NewUnstartedServer": "net/http/httptest", - "httptest.ResponseRecorder": "net/http/httptest", - "httptest.Server": "net/http/httptest", - "httptrace.ClientTrace": "net/http/httptrace", - "httptrace.ContextClientTrace": "net/http/httptrace", - "httptrace.DNSDoneInfo": "net/http/httptrace", - "httptrace.DNSStartInfo": "net/http/httptrace", - "httptrace.GotConnInfo": "net/http/httptrace", - "httptrace.WithClientTrace": "net/http/httptrace", - "httptrace.WroteRequestInfo": "net/http/httptrace", - "httputil.BufferPool": "net/http/httputil", - "httputil.ClientConn": "net/http/httputil", - "httputil.DumpRequest": "net/http/httputil", - "httputil.DumpRequestOut": "net/http/httputil", - "httputil.DumpResponse": "net/http/httputil", - "httputil.ErrClosed": "net/http/httputil", - "httputil.ErrLineTooLong": "net/http/httputil", - "httputil.ErrPersistEOF": "net/http/httputil", - "httputil.ErrPipeline": "net/http/httputil", - "httputil.NewChunkedReader": "net/http/httputil", - "httputil.NewChunkedWriter": "net/http/httputil", - "httputil.NewClientConn": "net/http/httputil", - "httputil.NewProxyClientConn": "net/http/httputil", - "httputil.NewServerConn": "net/http/httputil", - "httputil.NewSingleHostReverseProxy": "net/http/httputil", - "httputil.ReverseProxy": "net/http/httputil", - "httputil.ServerConn": "net/http/httputil", - "image.Alpha": "image", - "image.Alpha16": "image", - "image.Black": "image", - "image.CMYK": "image", - "image.Config": "image", - "image.Decode": "image", - "image.DecodeConfig": "image", - "image.ErrFormat": "image", - "image.Gray": "image", - "image.Gray16": "image", - "image.Image": "image", - "image.NRGBA": "image", - "image.NRGBA64": "image", - "image.NYCbCrA": "image", - "image.NewAlpha": "image", - "image.NewAlpha16": "image", - "image.NewCMYK": "image", - "image.NewGray": "image", - "image.NewGray16": "image", - "image.NewNRGBA": "image", - "image.NewNRGBA64": "image", - "image.NewNYCbCrA": "image", - "image.NewPaletted": "image", - "image.NewRGBA": "image", - "image.NewRGBA64": "image", - "image.NewUniform": "image", - "image.NewYCbCr": "image", - "image.Opaque": "image", - "image.Paletted": "image", - "image.PalettedImage": "image", - "image.Point": "image", - "image.Pt": "image", - "image.RGBA": "image", - "image.RGBA64": "image", - "image.Rect": "image", - "image.Rectangle": "image", - "image.RegisterFormat": "image", - "image.Transparent": "image", - "image.Uniform": "image", - "image.White": "image", - "image.YCbCr": "image", - "image.YCbCrSubsampleRatio": "image", - "image.YCbCrSubsampleRatio410": "image", - "image.YCbCrSubsampleRatio411": "image", - "image.YCbCrSubsampleRatio420": "image", - "image.YCbCrSubsampleRatio422": "image", - "image.YCbCrSubsampleRatio440": "image", - "image.YCbCrSubsampleRatio444": "image", - "image.ZP": "image", - "image.ZR": "image", - "importer.Default": "go/importer", - "importer.For": "go/importer", - "importer.Lookup": "go/importer", - "io.ByteReader": "io", - "io.ByteScanner": "io", - "io.ByteWriter": "io", - "io.Closer": "io", - "io.Copy": "io", - "io.CopyBuffer": "io", - "io.CopyN": "io", - "io.EOF": "io", - "io.ErrClosedPipe": "io", - "io.ErrNoProgress": "io", - "io.ErrShortBuffer": "io", - "io.ErrShortWrite": "io", - "io.ErrUnexpectedEOF": "io", - "io.LimitReader": "io", - "io.LimitedReader": "io", - "io.MultiReader": "io", - "io.MultiWriter": "io", - "io.NewSectionReader": "io", - "io.Pipe": "io", - "io.PipeReader": "io", - "io.PipeWriter": "io", - "io.ReadAtLeast": "io", - "io.ReadCloser": "io", - "io.ReadFull": "io", - "io.ReadSeeker": "io", - "io.ReadWriteCloser": "io", - "io.ReadWriteSeeker": "io", - "io.ReadWriter": "io", - "io.Reader": "io", - "io.ReaderAt": "io", - "io.ReaderFrom": "io", - "io.RuneReader": "io", - "io.RuneScanner": "io", - "io.SectionReader": "io", - "io.SeekCurrent": "io", - "io.SeekEnd": "io", - "io.SeekStart": "io", - "io.Seeker": "io", - "io.TeeReader": "io", - "io.WriteCloser": "io", - "io.WriteSeeker": "io", - "io.WriteString": "io", - "io.Writer": "io", - "io.WriterAt": "io", - "io.WriterTo": "io", - "iotest.DataErrReader": "testing/iotest", - "iotest.ErrTimeout": "testing/iotest", - "iotest.HalfReader": "testing/iotest", - "iotest.NewReadLogger": "testing/iotest", - "iotest.NewWriteLogger": "testing/iotest", - "iotest.OneByteReader": "testing/iotest", - "iotest.TimeoutReader": "testing/iotest", - "iotest.TruncateWriter": "testing/iotest", - "ioutil.Discard": "io/ioutil", - "ioutil.NopCloser": "io/ioutil", - "ioutil.ReadAll": "io/ioutil", - "ioutil.ReadDir": "io/ioutil", - "ioutil.ReadFile": "io/ioutil", - "ioutil.TempDir": "io/ioutil", - "ioutil.TempFile": "io/ioutil", - "ioutil.WriteFile": "io/ioutil", - "jpeg.Decode": "image/jpeg", - "jpeg.DecodeConfig": "image/jpeg", - "jpeg.DefaultQuality": "image/jpeg", - "jpeg.Encode": "image/jpeg", - "jpeg.FormatError": "image/jpeg", - "jpeg.Options": "image/jpeg", - "jpeg.Reader": "image/jpeg", - "jpeg.UnsupportedError": "image/jpeg", - "json.Compact": "encoding/json", - "json.Decoder": "encoding/json", - "json.Delim": "encoding/json", - "json.Encoder": "encoding/json", - "json.HTMLEscape": "encoding/json", - "json.Indent": "encoding/json", - "json.InvalidUTF8Error": "encoding/json", - "json.InvalidUnmarshalError": "encoding/json", - "json.Marshal": "encoding/json", - "json.MarshalIndent": "encoding/json", - "json.Marshaler": "encoding/json", - "json.MarshalerError": "encoding/json", - "json.NewDecoder": "encoding/json", - "json.NewEncoder": "encoding/json", - "json.Number": "encoding/json", - "json.RawMessage": "encoding/json", - "json.SyntaxError": "encoding/json", - "json.Token": "encoding/json", - "json.Unmarshal": "encoding/json", - "json.UnmarshalFieldError": "encoding/json", - "json.UnmarshalTypeError": "encoding/json", - "json.Unmarshaler": "encoding/json", - "json.UnsupportedTypeError": "encoding/json", - "json.UnsupportedValueError": "encoding/json", - "jsonrpc.Dial": "net/rpc/jsonrpc", - "jsonrpc.NewClient": "net/rpc/jsonrpc", - "jsonrpc.NewClientCodec": "net/rpc/jsonrpc", - "jsonrpc.NewServerCodec": "net/rpc/jsonrpc", - "jsonrpc.ServeConn": "net/rpc/jsonrpc", - "list.Element": "container/list", - "list.List": "container/list", - "list.New": "container/list", - "log.Fatal": "log", - "log.Fatalf": "log", - "log.Fatalln": "log", - "log.Flags": "log", - "log.LUTC": "log", - "log.Ldate": "log", - "log.Llongfile": "log", - "log.Lmicroseconds": "log", - "log.Logger": "log", - "log.Lshortfile": "log", - "log.LstdFlags": "log", - "log.Ltime": "log", - "log.New": "log", - "log.Output": "log", - "log.Panic": "log", - "log.Panicf": "log", - "log.Panicln": "log", - "log.Prefix": "log", - "log.Print": "log", - "log.Printf": "log", - "log.Println": "log", - "log.SetFlags": "log", - "log.SetOutput": "log", - "log.SetPrefix": "log", - "lzw.LSB": "compress/lzw", - "lzw.MSB": "compress/lzw", - "lzw.NewReader": "compress/lzw", - "lzw.NewWriter": "compress/lzw", - "lzw.Order": "compress/lzw", - "macho.Cpu": "debug/macho", - "macho.Cpu386": "debug/macho", - "macho.CpuAmd64": "debug/macho", - "macho.CpuArm": "debug/macho", - "macho.CpuPpc": "debug/macho", - "macho.CpuPpc64": "debug/macho", - "macho.Dylib": "debug/macho", - "macho.DylibCmd": "debug/macho", - "macho.Dysymtab": "debug/macho", - "macho.DysymtabCmd": "debug/macho", - "macho.ErrNotFat": "debug/macho", - "macho.FatArch": "debug/macho", - "macho.FatArchHeader": "debug/macho", - "macho.FatFile": "debug/macho", - "macho.File": "debug/macho", - "macho.FileHeader": "debug/macho", - "macho.FormatError": "debug/macho", - "macho.Load": "debug/macho", - "macho.LoadBytes": "debug/macho", - "macho.LoadCmd": "debug/macho", - "macho.LoadCmdDylib": "debug/macho", - "macho.LoadCmdDylinker": "debug/macho", - "macho.LoadCmdDysymtab": "debug/macho", - "macho.LoadCmdSegment": "debug/macho", - "macho.LoadCmdSegment64": "debug/macho", - "macho.LoadCmdSymtab": "debug/macho", - "macho.LoadCmdThread": "debug/macho", - "macho.LoadCmdUnixThread": "debug/macho", - "macho.Magic32": "debug/macho", - "macho.Magic64": "debug/macho", - "macho.MagicFat": "debug/macho", - "macho.NewFatFile": "debug/macho", - "macho.NewFile": "debug/macho", - "macho.Nlist32": "debug/macho", - "macho.Nlist64": "debug/macho", - "macho.Open": "debug/macho", - "macho.OpenFat": "debug/macho", - "macho.Regs386": "debug/macho", - "macho.RegsAMD64": "debug/macho", - "macho.Section": "debug/macho", - "macho.Section32": "debug/macho", - "macho.Section64": "debug/macho", - "macho.SectionHeader": "debug/macho", - "macho.Segment": "debug/macho", - "macho.Segment32": "debug/macho", - "macho.Segment64": "debug/macho", - "macho.SegmentHeader": "debug/macho", - "macho.Symbol": "debug/macho", - "macho.Symtab": "debug/macho", - "macho.SymtabCmd": "debug/macho", - "macho.Thread": "debug/macho", - "macho.Type": "debug/macho", - "macho.TypeBundle": "debug/macho", - "macho.TypeDylib": "debug/macho", - "macho.TypeExec": "debug/macho", - "macho.TypeObj": "debug/macho", - "mail.Address": "net/mail", - "mail.AddressParser": "net/mail", - "mail.ErrHeaderNotPresent": "net/mail", - "mail.Header": "net/mail", - "mail.Message": "net/mail", - "mail.ParseAddress": "net/mail", - "mail.ParseAddressList": "net/mail", - "mail.ParseDate": "net/mail", - "mail.ReadMessage": "net/mail", - "math.Abs": "math", - "math.Acos": "math", - "math.Acosh": "math", - "math.Asin": "math", - "math.Asinh": "math", - "math.Atan": "math", - "math.Atan2": "math", - "math.Atanh": "math", - "math.Cbrt": "math", - "math.Ceil": "math", - "math.Copysign": "math", - "math.Cos": "math", - "math.Cosh": "math", - "math.Dim": "math", - "math.E": "math", - "math.Erf": "math", - "math.Erfc": "math", - "math.Exp": "math", - "math.Exp2": "math", - "math.Expm1": "math", - "math.Float32bits": "math", - "math.Float32frombits": "math", - "math.Float64bits": "math", - "math.Float64frombits": "math", - "math.Floor": "math", - "math.Frexp": "math", - "math.Gamma": "math", - "math.Hypot": "math", - "math.Ilogb": "math", - "math.Inf": "math", - "math.IsInf": "math", - "math.IsNaN": "math", - "math.J0": "math", - "math.J1": "math", - "math.Jn": "math", - "math.Ldexp": "math", - "math.Lgamma": "math", - "math.Ln10": "math", - "math.Ln2": "math", - "math.Log": "math", - "math.Log10": "math", - "math.Log10E": "math", - "math.Log1p": "math", - "math.Log2": "math", - "math.Log2E": "math", - "math.Logb": "math", - "math.Max": "math", - "math.MaxFloat32": "math", - "math.MaxFloat64": "math", - "math.MaxInt16": "math", - "math.MaxInt32": "math", - "math.MaxInt64": "math", - "math.MaxInt8": "math", - "math.MaxUint16": "math", - "math.MaxUint32": "math", - "math.MaxUint64": "math", - "math.MaxUint8": "math", - "math.Min": "math", - "math.MinInt16": "math", - "math.MinInt32": "math", - "math.MinInt64": "math", - "math.MinInt8": "math", - "math.Mod": "math", - "math.Modf": "math", - "math.NaN": "math", - "math.Nextafter": "math", - "math.Nextafter32": "math", - "math.Phi": "math", - "math.Pi": "math", - "math.Pow": "math", - "math.Pow10": "math", - "math.Remainder": "math", - "math.Signbit": "math", - "math.Sin": "math", - "math.Sincos": "math", - "math.Sinh": "math", - "math.SmallestNonzeroFloat32": "math", - "math.SmallestNonzeroFloat64": "math", - "math.Sqrt": "math", - "math.Sqrt2": "math", - "math.SqrtE": "math", - "math.SqrtPhi": "math", - "math.SqrtPi": "math", - "math.Tan": "math", - "math.Tanh": "math", - "math.Trunc": "math", - "math.Y0": "math", - "math.Y1": "math", - "math.Yn": "math", - "md5.BlockSize": "crypto/md5", - "md5.New": "crypto/md5", - "md5.Size": "crypto/md5", - "md5.Sum": "crypto/md5", - "mime.AddExtensionType": "mime", - "mime.BEncoding": "mime", - "mime.ExtensionsByType": "mime", - "mime.FormatMediaType": "mime", - "mime.ParseMediaType": "mime", - "mime.QEncoding": "mime", - "mime.TypeByExtension": "mime", - "mime.WordDecoder": "mime", - "mime.WordEncoder": "mime", - "multipart.File": "mime/multipart", - "multipart.FileHeader": "mime/multipart", - "multipart.Form": "mime/multipart", - "multipart.NewReader": "mime/multipart", - "multipart.NewWriter": "mime/multipart", - "multipart.Part": "mime/multipart", - "multipart.Reader": "mime/multipart", - "multipart.Writer": "mime/multipart", - "net.Addr": "net", - "net.AddrError": "net", - "net.Buffers": "net", - "net.CIDRMask": "net", - "net.Conn": "net", - "net.DNSConfigError": "net", - "net.DNSError": "net", - "net.DefaultResolver": "net", - "net.Dial": "net", - "net.DialIP": "net", - "net.DialTCP": "net", - "net.DialTimeout": "net", - "net.DialUDP": "net", - "net.DialUnix": "net", - "net.Dialer": "net", - "net.ErrWriteToConnected": "net", - "net.Error": "net", - "net.FileConn": "net", - "net.FileListener": "net", - "net.FilePacketConn": "net", - "net.FlagBroadcast": "net", - "net.FlagLoopback": "net", - "net.FlagMulticast": "net", - "net.FlagPointToPoint": "net", - "net.FlagUp": "net", - "net.Flags": "net", - "net.HardwareAddr": "net", - "net.IP": "net", - "net.IPAddr": "net", - "net.IPConn": "net", - "net.IPMask": "net", - "net.IPNet": "net", - "net.IPv4": "net", - "net.IPv4Mask": "net", - "net.IPv4allrouter": "net", - "net.IPv4allsys": "net", - "net.IPv4bcast": "net", - "net.IPv4len": "net", - "net.IPv4zero": "net", - "net.IPv6interfacelocalallnodes": "net", - "net.IPv6len": "net", - "net.IPv6linklocalallnodes": "net", - "net.IPv6linklocalallrouters": "net", - "net.IPv6loopback": "net", - "net.IPv6unspecified": "net", - "net.IPv6zero": "net", - "net.Interface": "net", - "net.InterfaceAddrs": "net", - "net.InterfaceByIndex": "net", - "net.InterfaceByName": "net", - "net.Interfaces": "net", - "net.InvalidAddrError": "net", - "net.JoinHostPort": "net", - "net.Listen": "net", - "net.ListenIP": "net", - "net.ListenMulticastUDP": "net", - "net.ListenPacket": "net", - "net.ListenTCP": "net", - "net.ListenUDP": "net", - "net.ListenUnix": "net", - "net.ListenUnixgram": "net", - "net.Listener": "net", - "net.LookupAddr": "net", - "net.LookupCNAME": "net", - "net.LookupHost": "net", - "net.LookupIP": "net", - "net.LookupMX": "net", - "net.LookupNS": "net", - "net.LookupPort": "net", - "net.LookupSRV": "net", - "net.LookupTXT": "net", - "net.MX": "net", - "net.NS": "net", - "net.OpError": "net", - "net.PacketConn": "net", - "net.ParseCIDR": "net", - "net.ParseError": "net", - "net.ParseIP": "net", - "net.ParseMAC": "net", - "net.Pipe": "net", - "net.ResolveIPAddr": "net", - "net.ResolveTCPAddr": "net", - "net.ResolveUDPAddr": "net", - "net.ResolveUnixAddr": "net", - "net.Resolver": "net", - "net.SRV": "net", - "net.SplitHostPort": "net", - "net.TCPAddr": "net", - "net.TCPConn": "net", - "net.TCPListener": "net", - "net.UDPAddr": "net", - "net.UDPConn": "net", - "net.UnixAddr": "net", - "net.UnixConn": "net", - "net.UnixListener": "net", - "net.UnknownNetworkError": "net", - "os.Args": "os", - "os.Chdir": "os", - "os.Chmod": "os", - "os.Chown": "os", - "os.Chtimes": "os", - "os.Clearenv": "os", - "os.Create": "os", - "os.DevNull": "os", - "os.Environ": "os", - "os.ErrClosed": "os", - "os.ErrExist": "os", - "os.ErrInvalid": "os", - "os.ErrNotExist": "os", - "os.ErrPermission": "os", - "os.Executable": "os", - "os.Exit": "os", - "os.Expand": "os", - "os.ExpandEnv": "os", - "os.File": "os", - "os.FileInfo": "os", - "os.FileMode": "os", - "os.FindProcess": "os", - "os.Getegid": "os", - "os.Getenv": "os", - "os.Geteuid": "os", - "os.Getgid": "os", - "os.Getgroups": "os", - "os.Getpagesize": "os", - "os.Getpid": "os", - "os.Getppid": "os", - "os.Getuid": "os", - "os.Getwd": "os", - "os.Hostname": "os", - "os.Interrupt": "os", - "os.IsExist": "os", - "os.IsNotExist": "os", - "os.IsPathSeparator": "os", - "os.IsPermission": "os", - "os.Kill": "os", - "os.Lchown": "os", - "os.Link": "os", - "os.LinkError": "os", - "os.LookupEnv": "os", - "os.Lstat": "os", - "os.Mkdir": "os", - "os.MkdirAll": "os", - "os.ModeAppend": "os", - "os.ModeCharDevice": "os", - "os.ModeDevice": "os", - "os.ModeDir": "os", - "os.ModeExclusive": "os", - "os.ModeNamedPipe": "os", - "os.ModePerm": "os", - "os.ModeSetgid": "os", - "os.ModeSetuid": "os", - "os.ModeSocket": "os", - "os.ModeSticky": "os", - "os.ModeSymlink": "os", - "os.ModeTemporary": "os", - "os.ModeType": "os", - "os.NewFile": "os", - "os.NewSyscallError": "os", - "os.O_APPEND": "os", - "os.O_CREATE": "os", - "os.O_EXCL": "os", - "os.O_RDONLY": "os", - "os.O_RDWR": "os", - "os.O_SYNC": "os", - "os.O_TRUNC": "os", - "os.O_WRONLY": "os", - "os.Open": "os", - "os.OpenFile": "os", - "os.PathError": "os", - "os.PathListSeparator": "os", - "os.PathSeparator": "os", - "os.Pipe": "os", - "os.ProcAttr": "os", - "os.Process": "os", - "os.ProcessState": "os", - "os.Readlink": "os", - "os.Remove": "os", - "os.RemoveAll": "os", - "os.Rename": "os", - "os.SEEK_CUR": "os", - "os.SEEK_END": "os", - "os.SEEK_SET": "os", - "os.SameFile": "os", - "os.Setenv": "os", - "os.Signal": "os", - "os.StartProcess": "os", - "os.Stat": "os", - "os.Stderr": "os", - "os.Stdin": "os", - "os.Stdout": "os", - "os.Symlink": "os", - "os.SyscallError": "os", - "os.TempDir": "os", - "os.Truncate": "os", - "os.Unsetenv": "os", - "palette.Plan9": "image/color/palette", - "palette.WebSafe": "image/color/palette", - "parse.ActionNode": "text/template/parse", - "parse.BoolNode": "text/template/parse", - "parse.BranchNode": "text/template/parse", - "parse.ChainNode": "text/template/parse", - "parse.CommandNode": "text/template/parse", - "parse.DotNode": "text/template/parse", - "parse.FieldNode": "text/template/parse", - "parse.IdentifierNode": "text/template/parse", - "parse.IfNode": "text/template/parse", - "parse.IsEmptyTree": "text/template/parse", - "parse.ListNode": "text/template/parse", - "parse.New": "text/template/parse", - "parse.NewIdentifier": "text/template/parse", - "parse.NilNode": "text/template/parse", - "parse.Node": "text/template/parse", - "parse.NodeAction": "text/template/parse", - "parse.NodeBool": "text/template/parse", - "parse.NodeChain": "text/template/parse", - "parse.NodeCommand": "text/template/parse", - "parse.NodeDot": "text/template/parse", - "parse.NodeField": "text/template/parse", - "parse.NodeIdentifier": "text/template/parse", - "parse.NodeIf": "text/template/parse", - "parse.NodeList": "text/template/parse", - "parse.NodeNil": "text/template/parse", - "parse.NodeNumber": "text/template/parse", - "parse.NodePipe": "text/template/parse", - "parse.NodeRange": "text/template/parse", - "parse.NodeString": "text/template/parse", - "parse.NodeTemplate": "text/template/parse", - "parse.NodeText": "text/template/parse", - "parse.NodeType": "text/template/parse", - "parse.NodeVariable": "text/template/parse", - "parse.NodeWith": "text/template/parse", - "parse.NumberNode": "text/template/parse", - "parse.Parse": "text/template/parse", - "parse.PipeNode": "text/template/parse", - "parse.Pos": "text/template/parse", - "parse.RangeNode": "text/template/parse", - "parse.StringNode": "text/template/parse", - "parse.TemplateNode": "text/template/parse", - "parse.TextNode": "text/template/parse", - "parse.Tree": "text/template/parse", - "parse.VariableNode": "text/template/parse", - "parse.WithNode": "text/template/parse", - "parser.AllErrors": "go/parser", - "parser.DeclarationErrors": "go/parser", - "parser.ImportsOnly": "go/parser", - "parser.Mode": "go/parser", - "parser.PackageClauseOnly": "go/parser", - "parser.ParseComments": "go/parser", - "parser.ParseDir": "go/parser", - "parser.ParseExpr": "go/parser", - "parser.ParseExprFrom": "go/parser", - "parser.ParseFile": "go/parser", - "parser.SpuriousErrors": "go/parser", - "parser.Trace": "go/parser", - "path.Base": "path", - "path.Clean": "path", - "path.Dir": "path", - "path.ErrBadPattern": "path", - "path.Ext": "path", - "path.IsAbs": "path", - "path.Join": "path", - "path.Match": "path", - "path.Split": "path", - "pe.COFFSymbol": "debug/pe", - "pe.COFFSymbolSize": "debug/pe", - "pe.DataDirectory": "debug/pe", - "pe.File": "debug/pe", - "pe.FileHeader": "debug/pe", - "pe.FormatError": "debug/pe", - "pe.IMAGE_FILE_MACHINE_AM33": "debug/pe", - "pe.IMAGE_FILE_MACHINE_AMD64": "debug/pe", - "pe.IMAGE_FILE_MACHINE_ARM": "debug/pe", - "pe.IMAGE_FILE_MACHINE_EBC": "debug/pe", - "pe.IMAGE_FILE_MACHINE_I386": "debug/pe", - "pe.IMAGE_FILE_MACHINE_IA64": "debug/pe", - "pe.IMAGE_FILE_MACHINE_M32R": "debug/pe", - "pe.IMAGE_FILE_MACHINE_MIPS16": "debug/pe", - "pe.IMAGE_FILE_MACHINE_MIPSFPU": "debug/pe", - "pe.IMAGE_FILE_MACHINE_MIPSFPU16": "debug/pe", - "pe.IMAGE_FILE_MACHINE_POWERPC": "debug/pe", - "pe.IMAGE_FILE_MACHINE_POWERPCFP": "debug/pe", - "pe.IMAGE_FILE_MACHINE_R4000": "debug/pe", - "pe.IMAGE_FILE_MACHINE_SH3": "debug/pe", - "pe.IMAGE_FILE_MACHINE_SH3DSP": "debug/pe", - "pe.IMAGE_FILE_MACHINE_SH4": "debug/pe", - "pe.IMAGE_FILE_MACHINE_SH5": "debug/pe", - "pe.IMAGE_FILE_MACHINE_THUMB": "debug/pe", - "pe.IMAGE_FILE_MACHINE_UNKNOWN": "debug/pe", - "pe.IMAGE_FILE_MACHINE_WCEMIPSV2": "debug/pe", - "pe.ImportDirectory": "debug/pe", - "pe.NewFile": "debug/pe", - "pe.Open": "debug/pe", - "pe.OptionalHeader32": "debug/pe", - "pe.OptionalHeader64": "debug/pe", - "pe.Reloc": "debug/pe", - "pe.Section": "debug/pe", - "pe.SectionHeader": "debug/pe", - "pe.SectionHeader32": "debug/pe", - "pe.StringTable": "debug/pe", - "pe.Symbol": "debug/pe", - "pem.Block": "encoding/pem", - "pem.Decode": "encoding/pem", - "pem.Encode": "encoding/pem", - "pem.EncodeToMemory": "encoding/pem", - "pkix.AlgorithmIdentifier": "crypto/x509/pkix", - "pkix.AttributeTypeAndValue": "crypto/x509/pkix", - "pkix.AttributeTypeAndValueSET": "crypto/x509/pkix", - "pkix.CertificateList": "crypto/x509/pkix", - "pkix.Extension": "crypto/x509/pkix", - "pkix.Name": "crypto/x509/pkix", - "pkix.RDNSequence": "crypto/x509/pkix", - "pkix.RelativeDistinguishedNameSET": "crypto/x509/pkix", - "pkix.RevokedCertificate": "crypto/x509/pkix", - "pkix.TBSCertificateList": "crypto/x509/pkix", - "plan9obj.File": "debug/plan9obj", - "plan9obj.FileHeader": "debug/plan9obj", - "plan9obj.Magic386": "debug/plan9obj", - "plan9obj.Magic64": "debug/plan9obj", - "plan9obj.MagicAMD64": "debug/plan9obj", - "plan9obj.MagicARM": "debug/plan9obj", - "plan9obj.NewFile": "debug/plan9obj", - "plan9obj.Open": "debug/plan9obj", - "plan9obj.Section": "debug/plan9obj", - "plan9obj.SectionHeader": "debug/plan9obj", - "plan9obj.Sym": "debug/plan9obj", - "plugin.Open": "plugin", - "plugin.Plugin": "plugin", - "plugin.Symbol": "plugin", - "png.BestCompression": "image/png", - "png.BestSpeed": "image/png", - "png.CompressionLevel": "image/png", - "png.Decode": "image/png", - "png.DecodeConfig": "image/png", - "png.DefaultCompression": "image/png", - "png.Encode": "image/png", - "png.Encoder": "image/png", - "png.FormatError": "image/png", - "png.NoCompression": "image/png", - "png.UnsupportedError": "image/png", - "pprof.Cmdline": "net/http/pprof", - "pprof.Handler": "net/http/pprof", - "pprof.Index": "net/http/pprof", - "pprof.Lookup": "runtime/pprof", - "pprof.NewProfile": "runtime/pprof", - // "pprof.Profile" is ambiguous - "pprof.Profiles": "runtime/pprof", - "pprof.StartCPUProfile": "runtime/pprof", - "pprof.StopCPUProfile": "runtime/pprof", - "pprof.Symbol": "net/http/pprof", - "pprof.Trace": "net/http/pprof", - "pprof.WriteHeapProfile": "runtime/pprof", - "printer.CommentedNode": "go/printer", - "printer.Config": "go/printer", - "printer.Fprint": "go/printer", - "printer.Mode": "go/printer", - "printer.RawFormat": "go/printer", - "printer.SourcePos": "go/printer", - "printer.TabIndent": "go/printer", - "printer.UseSpaces": "go/printer", - "quick.Check": "testing/quick", - "quick.CheckEqual": "testing/quick", - "quick.CheckEqualError": "testing/quick", - "quick.CheckError": "testing/quick", - "quick.Config": "testing/quick", - "quick.Generator": "testing/quick", - "quick.SetupError": "testing/quick", - "quick.Value": "testing/quick", - "quotedprintable.NewReader": "mime/quotedprintable", - "quotedprintable.NewWriter": "mime/quotedprintable", - "quotedprintable.Reader": "mime/quotedprintable", - "quotedprintable.Writer": "mime/quotedprintable", - "rand.ExpFloat64": "math/rand", - "rand.Float32": "math/rand", - "rand.Float64": "math/rand", - // "rand.Int" is ambiguous - "rand.Int31": "math/rand", - "rand.Int31n": "math/rand", - "rand.Int63": "math/rand", - "rand.Int63n": "math/rand", - "rand.Intn": "math/rand", - "rand.New": "math/rand", - "rand.NewSource": "math/rand", - "rand.NewZipf": "math/rand", - "rand.NormFloat64": "math/rand", - "rand.Perm": "math/rand", - "rand.Prime": "crypto/rand", - "rand.Rand": "math/rand", - // "rand.Read" is ambiguous - "rand.Reader": "crypto/rand", - "rand.Seed": "math/rand", - "rand.Source": "math/rand", - "rand.Source64": "math/rand", - "rand.Uint32": "math/rand", - "rand.Uint64": "math/rand", - "rand.Zipf": "math/rand", - "rc4.Cipher": "crypto/rc4", - "rc4.KeySizeError": "crypto/rc4", - "rc4.NewCipher": "crypto/rc4", - "reflect.Append": "reflect", - "reflect.AppendSlice": "reflect", - "reflect.Array": "reflect", - "reflect.ArrayOf": "reflect", - "reflect.Bool": "reflect", - "reflect.BothDir": "reflect", - "reflect.Chan": "reflect", - "reflect.ChanDir": "reflect", - "reflect.ChanOf": "reflect", - "reflect.Complex128": "reflect", - "reflect.Complex64": "reflect", - "reflect.Copy": "reflect", - "reflect.DeepEqual": "reflect", - "reflect.Float32": "reflect", - "reflect.Float64": "reflect", - "reflect.Func": "reflect", - "reflect.FuncOf": "reflect", - "reflect.Indirect": "reflect", - "reflect.Int": "reflect", - "reflect.Int16": "reflect", - "reflect.Int32": "reflect", - "reflect.Int64": "reflect", - "reflect.Int8": "reflect", - "reflect.Interface": "reflect", - "reflect.Invalid": "reflect", - "reflect.Kind": "reflect", - "reflect.MakeChan": "reflect", - "reflect.MakeFunc": "reflect", - "reflect.MakeMap": "reflect", - "reflect.MakeSlice": "reflect", - "reflect.Map": "reflect", - "reflect.MapOf": "reflect", - "reflect.Method": "reflect", - "reflect.New": "reflect", - "reflect.NewAt": "reflect", - "reflect.Ptr": "reflect", - "reflect.PtrTo": "reflect", - "reflect.RecvDir": "reflect", - "reflect.Select": "reflect", - "reflect.SelectCase": "reflect", - "reflect.SelectDefault": "reflect", - "reflect.SelectDir": "reflect", - "reflect.SelectRecv": "reflect", - "reflect.SelectSend": "reflect", - "reflect.SendDir": "reflect", - "reflect.Slice": "reflect", - "reflect.SliceHeader": "reflect", - "reflect.SliceOf": "reflect", - "reflect.String": "reflect", - "reflect.StringHeader": "reflect", - "reflect.Struct": "reflect", - "reflect.StructField": "reflect", - "reflect.StructOf": "reflect", - "reflect.StructTag": "reflect", - "reflect.Swapper": "reflect", - "reflect.TypeOf": "reflect", - "reflect.Uint": "reflect", - "reflect.Uint16": "reflect", - "reflect.Uint32": "reflect", - "reflect.Uint64": "reflect", - "reflect.Uint8": "reflect", - "reflect.Uintptr": "reflect", - "reflect.UnsafePointer": "reflect", - "reflect.Value": "reflect", - "reflect.ValueError": "reflect", - "reflect.ValueOf": "reflect", - "reflect.Zero": "reflect", - "regexp.Compile": "regexp", - "regexp.CompilePOSIX": "regexp", - "regexp.Match": "regexp", - "regexp.MatchReader": "regexp", - "regexp.MatchString": "regexp", - "regexp.MustCompile": "regexp", - "regexp.MustCompilePOSIX": "regexp", - "regexp.QuoteMeta": "regexp", - "regexp.Regexp": "regexp", - "ring.New": "container/ring", - "ring.Ring": "container/ring", - "rpc.Accept": "net/rpc", - "rpc.Call": "net/rpc", - "rpc.Client": "net/rpc", - "rpc.ClientCodec": "net/rpc", - "rpc.DefaultDebugPath": "net/rpc", - "rpc.DefaultRPCPath": "net/rpc", - "rpc.DefaultServer": "net/rpc", - "rpc.Dial": "net/rpc", - "rpc.DialHTTP": "net/rpc", - "rpc.DialHTTPPath": "net/rpc", - "rpc.ErrShutdown": "net/rpc", - "rpc.HandleHTTP": "net/rpc", - "rpc.NewClient": "net/rpc", - "rpc.NewClientWithCodec": "net/rpc", - "rpc.NewServer": "net/rpc", - "rpc.Register": "net/rpc", - "rpc.RegisterName": "net/rpc", - "rpc.Request": "net/rpc", - "rpc.Response": "net/rpc", - "rpc.ServeCodec": "net/rpc", - "rpc.ServeConn": "net/rpc", - "rpc.ServeRequest": "net/rpc", - "rpc.Server": "net/rpc", - "rpc.ServerCodec": "net/rpc", - "rpc.ServerError": "net/rpc", - "rsa.CRTValue": "crypto/rsa", - "rsa.DecryptOAEP": "crypto/rsa", - "rsa.DecryptPKCS1v15": "crypto/rsa", - "rsa.DecryptPKCS1v15SessionKey": "crypto/rsa", - "rsa.EncryptOAEP": "crypto/rsa", - "rsa.EncryptPKCS1v15": "crypto/rsa", - "rsa.ErrDecryption": "crypto/rsa", - "rsa.ErrMessageTooLong": "crypto/rsa", - "rsa.ErrVerification": "crypto/rsa", - "rsa.GenerateKey": "crypto/rsa", - "rsa.GenerateMultiPrimeKey": "crypto/rsa", - "rsa.OAEPOptions": "crypto/rsa", - "rsa.PKCS1v15DecryptOptions": "crypto/rsa", - "rsa.PSSOptions": "crypto/rsa", - "rsa.PSSSaltLengthAuto": "crypto/rsa", - "rsa.PSSSaltLengthEqualsHash": "crypto/rsa", - "rsa.PrecomputedValues": "crypto/rsa", - "rsa.PrivateKey": "crypto/rsa", - "rsa.PublicKey": "crypto/rsa", - "rsa.SignPKCS1v15": "crypto/rsa", - "rsa.SignPSS": "crypto/rsa", - "rsa.VerifyPKCS1v15": "crypto/rsa", - "rsa.VerifyPSS": "crypto/rsa", - "runtime.BlockProfile": "runtime", - "runtime.BlockProfileRecord": "runtime", - "runtime.Breakpoint": "runtime", - "runtime.CPUProfile": "runtime", - "runtime.Caller": "runtime", - "runtime.Callers": "runtime", - "runtime.CallersFrames": "runtime", - "runtime.Compiler": "runtime", - "runtime.Error": "runtime", - "runtime.Frame": "runtime", - "runtime.Frames": "runtime", - "runtime.Func": "runtime", - "runtime.FuncForPC": "runtime", - "runtime.GC": "runtime", - "runtime.GOARCH": "runtime", - "runtime.GOMAXPROCS": "runtime", - "runtime.GOOS": "runtime", - "runtime.GOROOT": "runtime", - "runtime.Goexit": "runtime", - "runtime.GoroutineProfile": "runtime", - "runtime.Gosched": "runtime", - "runtime.KeepAlive": "runtime", - "runtime.LockOSThread": "runtime", - "runtime.MemProfile": "runtime", - "runtime.MemProfileRate": "runtime", - "runtime.MemProfileRecord": "runtime", - "runtime.MemStats": "runtime", - "runtime.MutexProfile": "runtime", - "runtime.NumCPU": "runtime", - "runtime.NumCgoCall": "runtime", - "runtime.NumGoroutine": "runtime", - "runtime.ReadMemStats": "runtime", - "runtime.ReadTrace": "runtime", - "runtime.SetBlockProfileRate": "runtime", - "runtime.SetCPUProfileRate": "runtime", - "runtime.SetCgoTraceback": "runtime", - "runtime.SetFinalizer": "runtime", - "runtime.SetMutexProfileFraction": "runtime", - "runtime.Stack": "runtime", - "runtime.StackRecord": "runtime", - "runtime.StartTrace": "runtime", - "runtime.StopTrace": "runtime", - "runtime.ThreadCreateProfile": "runtime", - "runtime.TypeAssertionError": "runtime", - "runtime.UnlockOSThread": "runtime", - "runtime.Version": "runtime", - "scanner.Char": "text/scanner", - "scanner.Comment": "text/scanner", - "scanner.EOF": "text/scanner", - "scanner.Error": "go/scanner", - "scanner.ErrorHandler": "go/scanner", - "scanner.ErrorList": "go/scanner", - "scanner.Float": "text/scanner", - "scanner.GoTokens": "text/scanner", - "scanner.GoWhitespace": "text/scanner", - "scanner.Ident": "text/scanner", - "scanner.Int": "text/scanner", - "scanner.Mode": "go/scanner", - "scanner.Position": "text/scanner", - "scanner.PrintError": "go/scanner", - "scanner.RawString": "text/scanner", - "scanner.ScanChars": "text/scanner", - // "scanner.ScanComments" is ambiguous - "scanner.ScanFloats": "text/scanner", - "scanner.ScanIdents": "text/scanner", - "scanner.ScanInts": "text/scanner", - "scanner.ScanRawStrings": "text/scanner", - "scanner.ScanStrings": "text/scanner", - // "scanner.Scanner" is ambiguous - "scanner.SkipComments": "text/scanner", - "scanner.String": "text/scanner", - "scanner.TokenString": "text/scanner", - "sha1.BlockSize": "crypto/sha1", - "sha1.New": "crypto/sha1", - "sha1.Size": "crypto/sha1", - "sha1.Sum": "crypto/sha1", - "sha256.BlockSize": "crypto/sha256", - "sha256.New": "crypto/sha256", - "sha256.New224": "crypto/sha256", - "sha256.Size": "crypto/sha256", - "sha256.Size224": "crypto/sha256", - "sha256.Sum224": "crypto/sha256", - "sha256.Sum256": "crypto/sha256", - "sha512.BlockSize": "crypto/sha512", - "sha512.New": "crypto/sha512", - "sha512.New384": "crypto/sha512", - "sha512.New512_224": "crypto/sha512", - "sha512.New512_256": "crypto/sha512", - "sha512.Size": "crypto/sha512", - "sha512.Size224": "crypto/sha512", - "sha512.Size256": "crypto/sha512", - "sha512.Size384": "crypto/sha512", - "sha512.Sum384": "crypto/sha512", - "sha512.Sum512": "crypto/sha512", - "sha512.Sum512_224": "crypto/sha512", - "sha512.Sum512_256": "crypto/sha512", - "signal.Ignore": "os/signal", - "signal.Notify": "os/signal", - "signal.Reset": "os/signal", - "signal.Stop": "os/signal", - "smtp.Auth": "net/smtp", - "smtp.CRAMMD5Auth": "net/smtp", - "smtp.Client": "net/smtp", - "smtp.Dial": "net/smtp", - "smtp.NewClient": "net/smtp", - "smtp.PlainAuth": "net/smtp", - "smtp.SendMail": "net/smtp", - "smtp.ServerInfo": "net/smtp", - "sort.Float64Slice": "sort", - "sort.Float64s": "sort", - "sort.Float64sAreSorted": "sort", - "sort.IntSlice": "sort", - "sort.Interface": "sort", - "sort.Ints": "sort", - "sort.IntsAreSorted": "sort", - "sort.IsSorted": "sort", - "sort.Reverse": "sort", - "sort.Search": "sort", - "sort.SearchFloat64s": "sort", - "sort.SearchInts": "sort", - "sort.SearchStrings": "sort", - "sort.Slice": "sort", - "sort.SliceIsSorted": "sort", - "sort.SliceStable": "sort", - "sort.Sort": "sort", - "sort.Stable": "sort", - "sort.StringSlice": "sort", - "sort.Strings": "sort", - "sort.StringsAreSorted": "sort", - "sql.ColumnType": "database/sql", - "sql.DB": "database/sql", - "sql.DBStats": "database/sql", - "sql.Drivers": "database/sql", - "sql.ErrNoRows": "database/sql", - "sql.ErrTxDone": "database/sql", - "sql.IsolationLevel": "database/sql", - "sql.LevelDefault": "database/sql", - "sql.LevelLinearizable": "database/sql", - "sql.LevelReadCommitted": "database/sql", - "sql.LevelReadUncommitted": "database/sql", - "sql.LevelRepeatableRead": "database/sql", - "sql.LevelSerializable": "database/sql", - "sql.LevelSnapshot": "database/sql", - "sql.LevelWriteCommitted": "database/sql", - "sql.Named": "database/sql", - "sql.NamedArg": "database/sql", - "sql.NullBool": "database/sql", - "sql.NullFloat64": "database/sql", - "sql.NullInt64": "database/sql", - "sql.NullString": "database/sql", - "sql.Open": "database/sql", - "sql.RawBytes": "database/sql", - "sql.Register": "database/sql", - "sql.Result": "database/sql", - "sql.Row": "database/sql", - "sql.Rows": "database/sql", - "sql.Scanner": "database/sql", - "sql.Stmt": "database/sql", - "sql.Tx": "database/sql", - "sql.TxOptions": "database/sql", - "strconv.AppendBool": "strconv", - "strconv.AppendFloat": "strconv", - "strconv.AppendInt": "strconv", - "strconv.AppendQuote": "strconv", - "strconv.AppendQuoteRune": "strconv", - "strconv.AppendQuoteRuneToASCII": "strconv", - "strconv.AppendQuoteRuneToGraphic": "strconv", - "strconv.AppendQuoteToASCII": "strconv", - "strconv.AppendQuoteToGraphic": "strconv", - "strconv.AppendUint": "strconv", - "strconv.Atoi": "strconv", - "strconv.CanBackquote": "strconv", - "strconv.ErrRange": "strconv", - "strconv.ErrSyntax": "strconv", - "strconv.FormatBool": "strconv", - "strconv.FormatFloat": "strconv", - "strconv.FormatInt": "strconv", - "strconv.FormatUint": "strconv", - "strconv.IntSize": "strconv", - "strconv.IsGraphic": "strconv", - "strconv.IsPrint": "strconv", - "strconv.Itoa": "strconv", - "strconv.NumError": "strconv", - "strconv.ParseBool": "strconv", - "strconv.ParseFloat": "strconv", - "strconv.ParseInt": "strconv", - "strconv.ParseUint": "strconv", - "strconv.Quote": "strconv", - "strconv.QuoteRune": "strconv", - "strconv.QuoteRuneToASCII": "strconv", - "strconv.QuoteRuneToGraphic": "strconv", - "strconv.QuoteToASCII": "strconv", - "strconv.QuoteToGraphic": "strconv", - "strconv.Unquote": "strconv", - "strconv.UnquoteChar": "strconv", - "strings.Compare": "strings", - "strings.Contains": "strings", - "strings.ContainsAny": "strings", - "strings.ContainsRune": "strings", - "strings.Count": "strings", - "strings.EqualFold": "strings", - "strings.Fields": "strings", - "strings.FieldsFunc": "strings", - "strings.HasPrefix": "strings", - "strings.HasSuffix": "strings", - "strings.Index": "strings", - "strings.IndexAny": "strings", - "strings.IndexByte": "strings", - "strings.IndexFunc": "strings", - "strings.IndexRune": "strings", - "strings.Join": "strings", - "strings.LastIndex": "strings", - "strings.LastIndexAny": "strings", - "strings.LastIndexByte": "strings", - "strings.LastIndexFunc": "strings", - "strings.Map": "strings", - "strings.NewReader": "strings", - "strings.NewReplacer": "strings", - "strings.Reader": "strings", - "strings.Repeat": "strings", - "strings.Replace": "strings", - "strings.Replacer": "strings", - "strings.Split": "strings", - "strings.SplitAfter": "strings", - "strings.SplitAfterN": "strings", - "strings.SplitN": "strings", - "strings.Title": "strings", - "strings.ToLower": "strings", - "strings.ToLowerSpecial": "strings", - "strings.ToTitle": "strings", - "strings.ToTitleSpecial": "strings", - "strings.ToUpper": "strings", - "strings.ToUpperSpecial": "strings", - "strings.Trim": "strings", - "strings.TrimFunc": "strings", - "strings.TrimLeft": "strings", - "strings.TrimLeftFunc": "strings", - "strings.TrimPrefix": "strings", - "strings.TrimRight": "strings", - "strings.TrimRightFunc": "strings", - "strings.TrimSpace": "strings", - "strings.TrimSuffix": "strings", - "subtle.ConstantTimeByteEq": "crypto/subtle", - "subtle.ConstantTimeCompare": "crypto/subtle", - "subtle.ConstantTimeCopy": "crypto/subtle", - "subtle.ConstantTimeEq": "crypto/subtle", - "subtle.ConstantTimeLessOrEq": "crypto/subtle", - "subtle.ConstantTimeSelect": "crypto/subtle", - "suffixarray.Index": "index/suffixarray", - "suffixarray.New": "index/suffixarray", - "sync.Cond": "sync", - "sync.Locker": "sync", - "sync.Mutex": "sync", - "sync.NewCond": "sync", - "sync.Once": "sync", - "sync.Pool": "sync", - "sync.RWMutex": "sync", - "sync.WaitGroup": "sync", - "syntax.ClassNL": "regexp/syntax", - "syntax.Compile": "regexp/syntax", - "syntax.DotNL": "regexp/syntax", - "syntax.EmptyBeginLine": "regexp/syntax", - "syntax.EmptyBeginText": "regexp/syntax", - "syntax.EmptyEndLine": "regexp/syntax", - "syntax.EmptyEndText": "regexp/syntax", - "syntax.EmptyNoWordBoundary": "regexp/syntax", - "syntax.EmptyOp": "regexp/syntax", - "syntax.EmptyOpContext": "regexp/syntax", - "syntax.EmptyWordBoundary": "regexp/syntax", - "syntax.ErrInternalError": "regexp/syntax", - "syntax.ErrInvalidCharClass": "regexp/syntax", - "syntax.ErrInvalidCharRange": "regexp/syntax", - "syntax.ErrInvalidEscape": "regexp/syntax", - "syntax.ErrInvalidNamedCapture": "regexp/syntax", - "syntax.ErrInvalidPerlOp": "regexp/syntax", - "syntax.ErrInvalidRepeatOp": "regexp/syntax", - "syntax.ErrInvalidRepeatSize": "regexp/syntax", - "syntax.ErrInvalidUTF8": "regexp/syntax", - "syntax.ErrMissingBracket": "regexp/syntax", - "syntax.ErrMissingParen": "regexp/syntax", - "syntax.ErrMissingRepeatArgument": "regexp/syntax", - "syntax.ErrTrailingBackslash": "regexp/syntax", - "syntax.ErrUnexpectedParen": "regexp/syntax", - "syntax.Error": "regexp/syntax", - "syntax.ErrorCode": "regexp/syntax", - "syntax.Flags": "regexp/syntax", - "syntax.FoldCase": "regexp/syntax", - "syntax.Inst": "regexp/syntax", - "syntax.InstAlt": "regexp/syntax", - "syntax.InstAltMatch": "regexp/syntax", - "syntax.InstCapture": "regexp/syntax", - "syntax.InstEmptyWidth": "regexp/syntax", - "syntax.InstFail": "regexp/syntax", - "syntax.InstMatch": "regexp/syntax", - "syntax.InstNop": "regexp/syntax", - "syntax.InstOp": "regexp/syntax", - "syntax.InstRune": "regexp/syntax", - "syntax.InstRune1": "regexp/syntax", - "syntax.InstRuneAny": "regexp/syntax", - "syntax.InstRuneAnyNotNL": "regexp/syntax", - "syntax.IsWordChar": "regexp/syntax", - "syntax.Literal": "regexp/syntax", - "syntax.MatchNL": "regexp/syntax", - "syntax.NonGreedy": "regexp/syntax", - "syntax.OneLine": "regexp/syntax", - "syntax.Op": "regexp/syntax", - "syntax.OpAlternate": "regexp/syntax", - "syntax.OpAnyChar": "regexp/syntax", - "syntax.OpAnyCharNotNL": "regexp/syntax", - "syntax.OpBeginLine": "regexp/syntax", - "syntax.OpBeginText": "regexp/syntax", - "syntax.OpCapture": "regexp/syntax", - "syntax.OpCharClass": "regexp/syntax", - "syntax.OpConcat": "regexp/syntax", - "syntax.OpEmptyMatch": "regexp/syntax", - "syntax.OpEndLine": "regexp/syntax", - "syntax.OpEndText": "regexp/syntax", - "syntax.OpLiteral": "regexp/syntax", - "syntax.OpNoMatch": "regexp/syntax", - "syntax.OpNoWordBoundary": "regexp/syntax", - "syntax.OpPlus": "regexp/syntax", - "syntax.OpQuest": "regexp/syntax", - "syntax.OpRepeat": "regexp/syntax", - "syntax.OpStar": "regexp/syntax", - "syntax.OpWordBoundary": "regexp/syntax", - "syntax.POSIX": "regexp/syntax", - "syntax.Parse": "regexp/syntax", - "syntax.Perl": "regexp/syntax", - "syntax.PerlX": "regexp/syntax", - "syntax.Prog": "regexp/syntax", - "syntax.Regexp": "regexp/syntax", - "syntax.Simple": "regexp/syntax", - "syntax.UnicodeGroups": "regexp/syntax", - "syntax.WasDollar": "regexp/syntax", - "syscall.AF_ALG": "syscall", - "syscall.AF_APPLETALK": "syscall", - "syscall.AF_ARP": "syscall", - "syscall.AF_ASH": "syscall", - "syscall.AF_ATM": "syscall", - "syscall.AF_ATMPVC": "syscall", - "syscall.AF_ATMSVC": "syscall", - "syscall.AF_AX25": "syscall", - "syscall.AF_BLUETOOTH": "syscall", - "syscall.AF_BRIDGE": "syscall", - "syscall.AF_CAIF": "syscall", - "syscall.AF_CAN": "syscall", - "syscall.AF_CCITT": "syscall", - "syscall.AF_CHAOS": "syscall", - "syscall.AF_CNT": "syscall", - "syscall.AF_COIP": "syscall", - "syscall.AF_DATAKIT": "syscall", - "syscall.AF_DECnet": "syscall", - "syscall.AF_DLI": "syscall", - "syscall.AF_E164": "syscall", - "syscall.AF_ECMA": "syscall", - "syscall.AF_ECONET": "syscall", - "syscall.AF_ENCAP": "syscall", - "syscall.AF_FILE": "syscall", - "syscall.AF_HYLINK": "syscall", - "syscall.AF_IEEE80211": "syscall", - "syscall.AF_IEEE802154": "syscall", - "syscall.AF_IMPLINK": "syscall", - "syscall.AF_INET": "syscall", - "syscall.AF_INET6": "syscall", - "syscall.AF_INET6_SDP": "syscall", - "syscall.AF_INET_SDP": "syscall", - "syscall.AF_IPX": "syscall", - "syscall.AF_IRDA": "syscall", - "syscall.AF_ISDN": "syscall", - "syscall.AF_ISO": "syscall", - "syscall.AF_IUCV": "syscall", - "syscall.AF_KEY": "syscall", - "syscall.AF_LAT": "syscall", - "syscall.AF_LINK": "syscall", - "syscall.AF_LLC": "syscall", - "syscall.AF_LOCAL": "syscall", - "syscall.AF_MAX": "syscall", - "syscall.AF_MPLS": "syscall", - "syscall.AF_NATM": "syscall", - "syscall.AF_NDRV": "syscall", - "syscall.AF_NETBEUI": "syscall", - "syscall.AF_NETBIOS": "syscall", - "syscall.AF_NETGRAPH": "syscall", - "syscall.AF_NETLINK": "syscall", - "syscall.AF_NETROM": "syscall", - "syscall.AF_NS": "syscall", - "syscall.AF_OROUTE": "syscall", - "syscall.AF_OSI": "syscall", - "syscall.AF_PACKET": "syscall", - "syscall.AF_PHONET": "syscall", - "syscall.AF_PPP": "syscall", - "syscall.AF_PPPOX": "syscall", - "syscall.AF_PUP": "syscall", - "syscall.AF_RDS": "syscall", - "syscall.AF_RESERVED_36": "syscall", - "syscall.AF_ROSE": "syscall", - "syscall.AF_ROUTE": "syscall", - "syscall.AF_RXRPC": "syscall", - "syscall.AF_SCLUSTER": "syscall", - "syscall.AF_SECURITY": "syscall", - "syscall.AF_SIP": "syscall", - "syscall.AF_SLOW": "syscall", - "syscall.AF_SNA": "syscall", - "syscall.AF_SYSTEM": "syscall", - "syscall.AF_TIPC": "syscall", - "syscall.AF_UNIX": "syscall", - "syscall.AF_UNSPEC": "syscall", - "syscall.AF_VENDOR00": "syscall", - "syscall.AF_VENDOR01": "syscall", - "syscall.AF_VENDOR02": "syscall", - "syscall.AF_VENDOR03": "syscall", - "syscall.AF_VENDOR04": "syscall", - "syscall.AF_VENDOR05": "syscall", - "syscall.AF_VENDOR06": "syscall", - "syscall.AF_VENDOR07": "syscall", - "syscall.AF_VENDOR08": "syscall", - "syscall.AF_VENDOR09": "syscall", - "syscall.AF_VENDOR10": "syscall", - "syscall.AF_VENDOR11": "syscall", - "syscall.AF_VENDOR12": "syscall", - "syscall.AF_VENDOR13": "syscall", - "syscall.AF_VENDOR14": "syscall", - "syscall.AF_VENDOR15": "syscall", - "syscall.AF_VENDOR16": "syscall", - "syscall.AF_VENDOR17": "syscall", - "syscall.AF_VENDOR18": "syscall", - "syscall.AF_VENDOR19": "syscall", - "syscall.AF_VENDOR20": "syscall", - "syscall.AF_VENDOR21": "syscall", - "syscall.AF_VENDOR22": "syscall", - "syscall.AF_VENDOR23": "syscall", - "syscall.AF_VENDOR24": "syscall", - "syscall.AF_VENDOR25": "syscall", - "syscall.AF_VENDOR26": "syscall", - "syscall.AF_VENDOR27": "syscall", - "syscall.AF_VENDOR28": "syscall", - "syscall.AF_VENDOR29": "syscall", - "syscall.AF_VENDOR30": "syscall", - "syscall.AF_VENDOR31": "syscall", - "syscall.AF_VENDOR32": "syscall", - "syscall.AF_VENDOR33": "syscall", - "syscall.AF_VENDOR34": "syscall", - "syscall.AF_VENDOR35": "syscall", - "syscall.AF_VENDOR36": "syscall", - "syscall.AF_VENDOR37": "syscall", - "syscall.AF_VENDOR38": "syscall", - "syscall.AF_VENDOR39": "syscall", - "syscall.AF_VENDOR40": "syscall", - "syscall.AF_VENDOR41": "syscall", - "syscall.AF_VENDOR42": "syscall", - "syscall.AF_VENDOR43": "syscall", - "syscall.AF_VENDOR44": "syscall", - "syscall.AF_VENDOR45": "syscall", - "syscall.AF_VENDOR46": "syscall", - "syscall.AF_VENDOR47": "syscall", - "syscall.AF_WANPIPE": "syscall", - "syscall.AF_X25": "syscall", - "syscall.AI_CANONNAME": "syscall", - "syscall.AI_NUMERICHOST": "syscall", - "syscall.AI_PASSIVE": "syscall", - "syscall.APPLICATION_ERROR": "syscall", - "syscall.ARPHRD_ADAPT": "syscall", - "syscall.ARPHRD_APPLETLK": "syscall", - "syscall.ARPHRD_ARCNET": "syscall", - "syscall.ARPHRD_ASH": "syscall", - "syscall.ARPHRD_ATM": "syscall", - "syscall.ARPHRD_AX25": "syscall", - "syscall.ARPHRD_BIF": "syscall", - "syscall.ARPHRD_CHAOS": "syscall", - "syscall.ARPHRD_CISCO": "syscall", - "syscall.ARPHRD_CSLIP": "syscall", - "syscall.ARPHRD_CSLIP6": "syscall", - "syscall.ARPHRD_DDCMP": "syscall", - "syscall.ARPHRD_DLCI": "syscall", - "syscall.ARPHRD_ECONET": "syscall", - "syscall.ARPHRD_EETHER": "syscall", - "syscall.ARPHRD_ETHER": "syscall", - "syscall.ARPHRD_EUI64": "syscall", - "syscall.ARPHRD_FCAL": "syscall", - "syscall.ARPHRD_FCFABRIC": "syscall", - "syscall.ARPHRD_FCPL": "syscall", - "syscall.ARPHRD_FCPP": "syscall", - "syscall.ARPHRD_FDDI": "syscall", - "syscall.ARPHRD_FRAD": "syscall", - "syscall.ARPHRD_FRELAY": "syscall", - "syscall.ARPHRD_HDLC": "syscall", - "syscall.ARPHRD_HIPPI": "syscall", - "syscall.ARPHRD_HWX25": "syscall", - "syscall.ARPHRD_IEEE1394": "syscall", - "syscall.ARPHRD_IEEE802": "syscall", - "syscall.ARPHRD_IEEE80211": "syscall", - "syscall.ARPHRD_IEEE80211_PRISM": "syscall", - "syscall.ARPHRD_IEEE80211_RADIOTAP": "syscall", - "syscall.ARPHRD_IEEE802154": "syscall", - "syscall.ARPHRD_IEEE802154_PHY": "syscall", - "syscall.ARPHRD_IEEE802_TR": "syscall", - "syscall.ARPHRD_INFINIBAND": "syscall", - "syscall.ARPHRD_IPDDP": "syscall", - "syscall.ARPHRD_IPGRE": "syscall", - "syscall.ARPHRD_IRDA": "syscall", - "syscall.ARPHRD_LAPB": "syscall", - "syscall.ARPHRD_LOCALTLK": "syscall", - "syscall.ARPHRD_LOOPBACK": "syscall", - "syscall.ARPHRD_METRICOM": "syscall", - "syscall.ARPHRD_NETROM": "syscall", - "syscall.ARPHRD_NONE": "syscall", - "syscall.ARPHRD_PIMREG": "syscall", - "syscall.ARPHRD_PPP": "syscall", - "syscall.ARPHRD_PRONET": "syscall", - "syscall.ARPHRD_RAWHDLC": "syscall", - "syscall.ARPHRD_ROSE": "syscall", - "syscall.ARPHRD_RSRVD": "syscall", - "syscall.ARPHRD_SIT": "syscall", - "syscall.ARPHRD_SKIP": "syscall", - "syscall.ARPHRD_SLIP": "syscall", - "syscall.ARPHRD_SLIP6": "syscall", - "syscall.ARPHRD_STRIP": "syscall", - "syscall.ARPHRD_TUNNEL": "syscall", - "syscall.ARPHRD_TUNNEL6": "syscall", - "syscall.ARPHRD_VOID": "syscall", - "syscall.ARPHRD_X25": "syscall", - "syscall.AUTHTYPE_CLIENT": "syscall", - "syscall.AUTHTYPE_SERVER": "syscall", - "syscall.Accept": "syscall", - "syscall.Accept4": "syscall", - "syscall.AcceptEx": "syscall", - "syscall.Access": "syscall", - "syscall.Acct": "syscall", - "syscall.AddrinfoW": "syscall", - "syscall.Adjtime": "syscall", - "syscall.Adjtimex": "syscall", - "syscall.AttachLsf": "syscall", - "syscall.B0": "syscall", - "syscall.B1000000": "syscall", - "syscall.B110": "syscall", - "syscall.B115200": "syscall", - "syscall.B1152000": "syscall", - "syscall.B1200": "syscall", - "syscall.B134": "syscall", - "syscall.B14400": "syscall", - "syscall.B150": "syscall", - "syscall.B1500000": "syscall", - "syscall.B1800": "syscall", - "syscall.B19200": "syscall", - "syscall.B200": "syscall", - "syscall.B2000000": "syscall", - "syscall.B230400": "syscall", - "syscall.B2400": "syscall", - "syscall.B2500000": "syscall", - "syscall.B28800": "syscall", - "syscall.B300": "syscall", - "syscall.B3000000": "syscall", - "syscall.B3500000": "syscall", - "syscall.B38400": "syscall", - "syscall.B4000000": "syscall", - "syscall.B460800": "syscall", - "syscall.B4800": "syscall", - "syscall.B50": "syscall", - "syscall.B500000": "syscall", - "syscall.B57600": "syscall", - "syscall.B576000": "syscall", - "syscall.B600": "syscall", - "syscall.B7200": "syscall", - "syscall.B75": "syscall", - "syscall.B76800": "syscall", - "syscall.B921600": "syscall", - "syscall.B9600": "syscall", - "syscall.BASE_PROTOCOL": "syscall", - "syscall.BIOCFEEDBACK": "syscall", - "syscall.BIOCFLUSH": "syscall", - "syscall.BIOCGBLEN": "syscall", - "syscall.BIOCGDIRECTION": "syscall", - "syscall.BIOCGDIRFILT": "syscall", - "syscall.BIOCGDLT": "syscall", - "syscall.BIOCGDLTLIST": "syscall", - "syscall.BIOCGETBUFMODE": "syscall", - "syscall.BIOCGETIF": "syscall", - "syscall.BIOCGETZMAX": "syscall", - "syscall.BIOCGFEEDBACK": "syscall", - "syscall.BIOCGFILDROP": "syscall", - "syscall.BIOCGHDRCMPLT": "syscall", - "syscall.BIOCGRSIG": "syscall", - "syscall.BIOCGRTIMEOUT": "syscall", - "syscall.BIOCGSEESENT": "syscall", - "syscall.BIOCGSTATS": "syscall", - "syscall.BIOCGSTATSOLD": "syscall", - "syscall.BIOCGTSTAMP": "syscall", - "syscall.BIOCIMMEDIATE": "syscall", - "syscall.BIOCLOCK": "syscall", - "syscall.BIOCPROMISC": "syscall", - "syscall.BIOCROTZBUF": "syscall", - "syscall.BIOCSBLEN": "syscall", - "syscall.BIOCSDIRECTION": "syscall", - "syscall.BIOCSDIRFILT": "syscall", - "syscall.BIOCSDLT": "syscall", - "syscall.BIOCSETBUFMODE": "syscall", - "syscall.BIOCSETF": "syscall", - "syscall.BIOCSETFNR": "syscall", - "syscall.BIOCSETIF": "syscall", - "syscall.BIOCSETWF": "syscall", - "syscall.BIOCSETZBUF": "syscall", - "syscall.BIOCSFEEDBACK": "syscall", - "syscall.BIOCSFILDROP": "syscall", - "syscall.BIOCSHDRCMPLT": "syscall", - "syscall.BIOCSRSIG": "syscall", - "syscall.BIOCSRTIMEOUT": "syscall", - "syscall.BIOCSSEESENT": "syscall", - "syscall.BIOCSTCPF": "syscall", - "syscall.BIOCSTSTAMP": "syscall", - "syscall.BIOCSUDPF": "syscall", - "syscall.BIOCVERSION": "syscall", - "syscall.BPF_A": "syscall", - "syscall.BPF_ABS": "syscall", - "syscall.BPF_ADD": "syscall", - "syscall.BPF_ALIGNMENT": "syscall", - "syscall.BPF_ALIGNMENT32": "syscall", - "syscall.BPF_ALU": "syscall", - "syscall.BPF_AND": "syscall", - "syscall.BPF_B": "syscall", - "syscall.BPF_BUFMODE_BUFFER": "syscall", - "syscall.BPF_BUFMODE_ZBUF": "syscall", - "syscall.BPF_DFLTBUFSIZE": "syscall", - "syscall.BPF_DIRECTION_IN": "syscall", - "syscall.BPF_DIRECTION_OUT": "syscall", - "syscall.BPF_DIV": "syscall", - "syscall.BPF_H": "syscall", - "syscall.BPF_IMM": "syscall", - "syscall.BPF_IND": "syscall", - "syscall.BPF_JA": "syscall", - "syscall.BPF_JEQ": "syscall", - "syscall.BPF_JGE": "syscall", - "syscall.BPF_JGT": "syscall", - "syscall.BPF_JMP": "syscall", - "syscall.BPF_JSET": "syscall", - "syscall.BPF_K": "syscall", - "syscall.BPF_LD": "syscall", - "syscall.BPF_LDX": "syscall", - "syscall.BPF_LEN": "syscall", - "syscall.BPF_LSH": "syscall", - "syscall.BPF_MAJOR_VERSION": "syscall", - "syscall.BPF_MAXBUFSIZE": "syscall", - "syscall.BPF_MAXINSNS": "syscall", - "syscall.BPF_MEM": "syscall", - "syscall.BPF_MEMWORDS": "syscall", - "syscall.BPF_MINBUFSIZE": "syscall", - "syscall.BPF_MINOR_VERSION": "syscall", - "syscall.BPF_MISC": "syscall", - "syscall.BPF_MSH": "syscall", - "syscall.BPF_MUL": "syscall", - "syscall.BPF_NEG": "syscall", - "syscall.BPF_OR": "syscall", - "syscall.BPF_RELEASE": "syscall", - "syscall.BPF_RET": "syscall", - "syscall.BPF_RSH": "syscall", - "syscall.BPF_ST": "syscall", - "syscall.BPF_STX": "syscall", - "syscall.BPF_SUB": "syscall", - "syscall.BPF_TAX": "syscall", - "syscall.BPF_TXA": "syscall", - "syscall.BPF_T_BINTIME": "syscall", - "syscall.BPF_T_BINTIME_FAST": "syscall", - "syscall.BPF_T_BINTIME_MONOTONIC": "syscall", - "syscall.BPF_T_BINTIME_MONOTONIC_FAST": "syscall", - "syscall.BPF_T_FAST": "syscall", - "syscall.BPF_T_FLAG_MASK": "syscall", - "syscall.BPF_T_FORMAT_MASK": "syscall", - "syscall.BPF_T_MICROTIME": "syscall", - "syscall.BPF_T_MICROTIME_FAST": "syscall", - "syscall.BPF_T_MICROTIME_MONOTONIC": "syscall", - "syscall.BPF_T_MICROTIME_MONOTONIC_FAST": "syscall", - "syscall.BPF_T_MONOTONIC": "syscall", - "syscall.BPF_T_MONOTONIC_FAST": "syscall", - "syscall.BPF_T_NANOTIME": "syscall", - "syscall.BPF_T_NANOTIME_FAST": "syscall", - "syscall.BPF_T_NANOTIME_MONOTONIC": "syscall", - "syscall.BPF_T_NANOTIME_MONOTONIC_FAST": "syscall", - "syscall.BPF_T_NONE": "syscall", - "syscall.BPF_T_NORMAL": "syscall", - "syscall.BPF_W": "syscall", - "syscall.BPF_X": "syscall", - "syscall.BRKINT": "syscall", - "syscall.Bind": "syscall", - "syscall.BindToDevice": "syscall", - "syscall.BpfBuflen": "syscall", - "syscall.BpfDatalink": "syscall", - "syscall.BpfHdr": "syscall", - "syscall.BpfHeadercmpl": "syscall", - "syscall.BpfInsn": "syscall", - "syscall.BpfInterface": "syscall", - "syscall.BpfJump": "syscall", - "syscall.BpfProgram": "syscall", - "syscall.BpfStat": "syscall", - "syscall.BpfStats": "syscall", - "syscall.BpfStmt": "syscall", - "syscall.BpfTimeout": "syscall", - "syscall.BpfTimeval": "syscall", - "syscall.BpfVersion": "syscall", - "syscall.BpfZbuf": "syscall", - "syscall.BpfZbufHeader": "syscall", - "syscall.ByHandleFileInformation": "syscall", - "syscall.BytePtrFromString": "syscall", - "syscall.ByteSliceFromString": "syscall", - "syscall.CCR0_FLUSH": "syscall", - "syscall.CERT_CHAIN_POLICY_AUTHENTICODE": "syscall", - "syscall.CERT_CHAIN_POLICY_AUTHENTICODE_TS": "syscall", - "syscall.CERT_CHAIN_POLICY_BASE": "syscall", - "syscall.CERT_CHAIN_POLICY_BASIC_CONSTRAINTS": "syscall", - "syscall.CERT_CHAIN_POLICY_EV": "syscall", - "syscall.CERT_CHAIN_POLICY_MICROSOFT_ROOT": "syscall", - "syscall.CERT_CHAIN_POLICY_NT_AUTH": "syscall", - "syscall.CERT_CHAIN_POLICY_SSL": "syscall", - "syscall.CERT_E_CN_NO_MATCH": "syscall", - "syscall.CERT_E_EXPIRED": "syscall", - "syscall.CERT_E_PURPOSE": "syscall", - "syscall.CERT_E_ROLE": "syscall", - "syscall.CERT_E_UNTRUSTEDROOT": "syscall", - "syscall.CERT_STORE_ADD_ALWAYS": "syscall", - "syscall.CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG": "syscall", - "syscall.CERT_STORE_PROV_MEMORY": "syscall", - "syscall.CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT": "syscall", - "syscall.CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT": "syscall", - "syscall.CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT": "syscall", - "syscall.CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT": "syscall", - "syscall.CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT": "syscall", - "syscall.CERT_TRUST_INVALID_BASIC_CONSTRAINTS": "syscall", - "syscall.CERT_TRUST_INVALID_EXTENSION": "syscall", - "syscall.CERT_TRUST_INVALID_NAME_CONSTRAINTS": "syscall", - "syscall.CERT_TRUST_INVALID_POLICY_CONSTRAINTS": "syscall", - "syscall.CERT_TRUST_IS_CYCLIC": "syscall", - "syscall.CERT_TRUST_IS_EXPLICIT_DISTRUST": "syscall", - "syscall.CERT_TRUST_IS_NOT_SIGNATURE_VALID": "syscall", - "syscall.CERT_TRUST_IS_NOT_TIME_VALID": "syscall", - "syscall.CERT_TRUST_IS_NOT_VALID_FOR_USAGE": "syscall", - "syscall.CERT_TRUST_IS_OFFLINE_REVOCATION": "syscall", - "syscall.CERT_TRUST_IS_REVOKED": "syscall", - "syscall.CERT_TRUST_IS_UNTRUSTED_ROOT": "syscall", - "syscall.CERT_TRUST_NO_ERROR": "syscall", - "syscall.CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY": "syscall", - "syscall.CERT_TRUST_REVOCATION_STATUS_UNKNOWN": "syscall", - "syscall.CFLUSH": "syscall", - "syscall.CLOCAL": "syscall", - "syscall.CLONE_CHILD_CLEARTID": "syscall", - "syscall.CLONE_CHILD_SETTID": "syscall", - "syscall.CLONE_CSIGNAL": "syscall", - "syscall.CLONE_DETACHED": "syscall", - "syscall.CLONE_FILES": "syscall", - "syscall.CLONE_FS": "syscall", - "syscall.CLONE_IO": "syscall", - "syscall.CLONE_NEWIPC": "syscall", - "syscall.CLONE_NEWNET": "syscall", - "syscall.CLONE_NEWNS": "syscall", - "syscall.CLONE_NEWPID": "syscall", - "syscall.CLONE_NEWUSER": "syscall", - "syscall.CLONE_NEWUTS": "syscall", - "syscall.CLONE_PARENT": "syscall", - "syscall.CLONE_PARENT_SETTID": "syscall", - "syscall.CLONE_PID": "syscall", - "syscall.CLONE_PTRACE": "syscall", - "syscall.CLONE_SETTLS": "syscall", - "syscall.CLONE_SIGHAND": "syscall", - "syscall.CLONE_SYSVSEM": "syscall", - "syscall.CLONE_THREAD": "syscall", - "syscall.CLONE_UNTRACED": "syscall", - "syscall.CLONE_VFORK": "syscall", - "syscall.CLONE_VM": "syscall", - "syscall.CPUID_CFLUSH": "syscall", - "syscall.CREAD": "syscall", - "syscall.CREATE_ALWAYS": "syscall", - "syscall.CREATE_NEW": "syscall", - "syscall.CREATE_NEW_PROCESS_GROUP": "syscall", - "syscall.CREATE_UNICODE_ENVIRONMENT": "syscall", - "syscall.CRYPT_DEFAULT_CONTAINER_OPTIONAL": "syscall", - "syscall.CRYPT_DELETEKEYSET": "syscall", - "syscall.CRYPT_MACHINE_KEYSET": "syscall", - "syscall.CRYPT_NEWKEYSET": "syscall", - "syscall.CRYPT_SILENT": "syscall", - "syscall.CRYPT_VERIFYCONTEXT": "syscall", - "syscall.CS5": "syscall", - "syscall.CS6": "syscall", - "syscall.CS7": "syscall", - "syscall.CS8": "syscall", - "syscall.CSIZE": "syscall", - "syscall.CSTART": "syscall", - "syscall.CSTATUS": "syscall", - "syscall.CSTOP": "syscall", - "syscall.CSTOPB": "syscall", - "syscall.CSUSP": "syscall", - "syscall.CTL_MAXNAME": "syscall", - "syscall.CTL_NET": "syscall", - "syscall.CTL_QUERY": "syscall", - "syscall.CTRL_BREAK_EVENT": "syscall", - "syscall.CTRL_C_EVENT": "syscall", - "syscall.CancelIo": "syscall", - "syscall.CancelIoEx": "syscall", - "syscall.CertAddCertificateContextToStore": "syscall", - "syscall.CertChainContext": "syscall", - "syscall.CertChainElement": "syscall", - "syscall.CertChainPara": "syscall", - "syscall.CertChainPolicyPara": "syscall", - "syscall.CertChainPolicyStatus": "syscall", - "syscall.CertCloseStore": "syscall", - "syscall.CertContext": "syscall", - "syscall.CertCreateCertificateContext": "syscall", - "syscall.CertEnhKeyUsage": "syscall", - "syscall.CertEnumCertificatesInStore": "syscall", - "syscall.CertFreeCertificateChain": "syscall", - "syscall.CertFreeCertificateContext": "syscall", - "syscall.CertGetCertificateChain": "syscall", - "syscall.CertOpenStore": "syscall", - "syscall.CertOpenSystemStore": "syscall", - "syscall.CertRevocationInfo": "syscall", - "syscall.CertSimpleChain": "syscall", - "syscall.CertTrustStatus": "syscall", - "syscall.CertUsageMatch": "syscall", - "syscall.CertVerifyCertificateChainPolicy": "syscall", - "syscall.Chdir": "syscall", - "syscall.CheckBpfVersion": "syscall", - "syscall.Chflags": "syscall", - "syscall.Chmod": "syscall", - "syscall.Chown": "syscall", - "syscall.Chroot": "syscall", - "syscall.Clearenv": "syscall", - "syscall.Close": "syscall", - "syscall.CloseHandle": "syscall", - "syscall.CloseOnExec": "syscall", - "syscall.Closesocket": "syscall", - "syscall.CmsgLen": "syscall", - "syscall.CmsgSpace": "syscall", - "syscall.Cmsghdr": "syscall", - "syscall.CommandLineToArgv": "syscall", - "syscall.ComputerName": "syscall", - "syscall.Connect": "syscall", - "syscall.ConnectEx": "syscall", - "syscall.ConvertSidToStringSid": "syscall", - "syscall.ConvertStringSidToSid": "syscall", - "syscall.CopySid": "syscall", - "syscall.Creat": "syscall", - "syscall.CreateDirectory": "syscall", - "syscall.CreateFile": "syscall", - "syscall.CreateFileMapping": "syscall", - "syscall.CreateHardLink": "syscall", - "syscall.CreateIoCompletionPort": "syscall", - "syscall.CreatePipe": "syscall", - "syscall.CreateProcess": "syscall", - "syscall.CreateSymbolicLink": "syscall", - "syscall.CreateToolhelp32Snapshot": "syscall", - "syscall.Credential": "syscall", - "syscall.CryptAcquireContext": "syscall", - "syscall.CryptGenRandom": "syscall", - "syscall.CryptReleaseContext": "syscall", - "syscall.DIOCBSFLUSH": "syscall", - "syscall.DIOCOSFPFLUSH": "syscall", - "syscall.DLL": "syscall", - "syscall.DLLError": "syscall", - "syscall.DLT_A429": "syscall", - "syscall.DLT_A653_ICM": "syscall", - "syscall.DLT_AIRONET_HEADER": "syscall", - "syscall.DLT_AOS": "syscall", - "syscall.DLT_APPLE_IP_OVER_IEEE1394": "syscall", - "syscall.DLT_ARCNET": "syscall", - "syscall.DLT_ARCNET_LINUX": "syscall", - "syscall.DLT_ATM_CLIP": "syscall", - "syscall.DLT_ATM_RFC1483": "syscall", - "syscall.DLT_AURORA": "syscall", - "syscall.DLT_AX25": "syscall", - "syscall.DLT_AX25_KISS": "syscall", - "syscall.DLT_BACNET_MS_TP": "syscall", - "syscall.DLT_BLUETOOTH_HCI_H4": "syscall", - "syscall.DLT_BLUETOOTH_HCI_H4_WITH_PHDR": "syscall", - "syscall.DLT_CAN20B": "syscall", - "syscall.DLT_CAN_SOCKETCAN": "syscall", - "syscall.DLT_CHAOS": "syscall", - "syscall.DLT_CHDLC": "syscall", - "syscall.DLT_CISCO_IOS": "syscall", - "syscall.DLT_C_HDLC": "syscall", - "syscall.DLT_C_HDLC_WITH_DIR": "syscall", - "syscall.DLT_DBUS": "syscall", - "syscall.DLT_DECT": "syscall", - "syscall.DLT_DOCSIS": "syscall", - "syscall.DLT_DVB_CI": "syscall", - "syscall.DLT_ECONET": "syscall", - "syscall.DLT_EN10MB": "syscall", - "syscall.DLT_EN3MB": "syscall", - "syscall.DLT_ENC": "syscall", - "syscall.DLT_ERF": "syscall", - "syscall.DLT_ERF_ETH": "syscall", - "syscall.DLT_ERF_POS": "syscall", - "syscall.DLT_FC_2": "syscall", - "syscall.DLT_FC_2_WITH_FRAME_DELIMS": "syscall", - "syscall.DLT_FDDI": "syscall", - "syscall.DLT_FLEXRAY": "syscall", - "syscall.DLT_FRELAY": "syscall", - "syscall.DLT_FRELAY_WITH_DIR": "syscall", - "syscall.DLT_GCOM_SERIAL": "syscall", - "syscall.DLT_GCOM_T1E1": "syscall", - "syscall.DLT_GPF_F": "syscall", - "syscall.DLT_GPF_T": "syscall", - "syscall.DLT_GPRS_LLC": "syscall", - "syscall.DLT_GSMTAP_ABIS": "syscall", - "syscall.DLT_GSMTAP_UM": "syscall", - "syscall.DLT_HDLC": "syscall", - "syscall.DLT_HHDLC": "syscall", - "syscall.DLT_HIPPI": "syscall", - "syscall.DLT_IBM_SN": "syscall", - "syscall.DLT_IBM_SP": "syscall", - "syscall.DLT_IEEE802": "syscall", - "syscall.DLT_IEEE802_11": "syscall", - "syscall.DLT_IEEE802_11_RADIO": "syscall", - "syscall.DLT_IEEE802_11_RADIO_AVS": "syscall", - "syscall.DLT_IEEE802_15_4": "syscall", - "syscall.DLT_IEEE802_15_4_LINUX": "syscall", - "syscall.DLT_IEEE802_15_4_NOFCS": "syscall", - "syscall.DLT_IEEE802_15_4_NONASK_PHY": "syscall", - "syscall.DLT_IEEE802_16_MAC_CPS": "syscall", - "syscall.DLT_IEEE802_16_MAC_CPS_RADIO": "syscall", - "syscall.DLT_IPFILTER": "syscall", - "syscall.DLT_IPMB": "syscall", - "syscall.DLT_IPMB_LINUX": "syscall", - "syscall.DLT_IPNET": "syscall", - "syscall.DLT_IPOIB": "syscall", - "syscall.DLT_IPV4": "syscall", - "syscall.DLT_IPV6": "syscall", - "syscall.DLT_IP_OVER_FC": "syscall", - "syscall.DLT_JUNIPER_ATM1": "syscall", - "syscall.DLT_JUNIPER_ATM2": "syscall", - "syscall.DLT_JUNIPER_ATM_CEMIC": "syscall", - "syscall.DLT_JUNIPER_CHDLC": "syscall", - "syscall.DLT_JUNIPER_ES": "syscall", - "syscall.DLT_JUNIPER_ETHER": "syscall", - "syscall.DLT_JUNIPER_FIBRECHANNEL": "syscall", - "syscall.DLT_JUNIPER_FRELAY": "syscall", - "syscall.DLT_JUNIPER_GGSN": "syscall", - "syscall.DLT_JUNIPER_ISM": "syscall", - "syscall.DLT_JUNIPER_MFR": "syscall", - "syscall.DLT_JUNIPER_MLFR": "syscall", - "syscall.DLT_JUNIPER_MLPPP": "syscall", - "syscall.DLT_JUNIPER_MONITOR": "syscall", - "syscall.DLT_JUNIPER_PIC_PEER": "syscall", - "syscall.DLT_JUNIPER_PPP": "syscall", - "syscall.DLT_JUNIPER_PPPOE": "syscall", - "syscall.DLT_JUNIPER_PPPOE_ATM": "syscall", - "syscall.DLT_JUNIPER_SERVICES": "syscall", - "syscall.DLT_JUNIPER_SRX_E2E": "syscall", - "syscall.DLT_JUNIPER_ST": "syscall", - "syscall.DLT_JUNIPER_VP": "syscall", - "syscall.DLT_JUNIPER_VS": "syscall", - "syscall.DLT_LAPB_WITH_DIR": "syscall", - "syscall.DLT_LAPD": "syscall", - "syscall.DLT_LIN": "syscall", - "syscall.DLT_LINUX_EVDEV": "syscall", - "syscall.DLT_LINUX_IRDA": "syscall", - "syscall.DLT_LINUX_LAPD": "syscall", - "syscall.DLT_LINUX_PPP_WITHDIRECTION": "syscall", - "syscall.DLT_LINUX_SLL": "syscall", - "syscall.DLT_LOOP": "syscall", - "syscall.DLT_LTALK": "syscall", - "syscall.DLT_MATCHING_MAX": "syscall", - "syscall.DLT_MATCHING_MIN": "syscall", - "syscall.DLT_MFR": "syscall", - "syscall.DLT_MOST": "syscall", - "syscall.DLT_MPEG_2_TS": "syscall", - "syscall.DLT_MPLS": "syscall", - "syscall.DLT_MTP2": "syscall", - "syscall.DLT_MTP2_WITH_PHDR": "syscall", - "syscall.DLT_MTP3": "syscall", - "syscall.DLT_MUX27010": "syscall", - "syscall.DLT_NETANALYZER": "syscall", - "syscall.DLT_NETANALYZER_TRANSPARENT": "syscall", - "syscall.DLT_NFC_LLCP": "syscall", - "syscall.DLT_NFLOG": "syscall", - "syscall.DLT_NG40": "syscall", - "syscall.DLT_NULL": "syscall", - "syscall.DLT_PCI_EXP": "syscall", - "syscall.DLT_PFLOG": "syscall", - "syscall.DLT_PFSYNC": "syscall", - "syscall.DLT_PPI": "syscall", - "syscall.DLT_PPP": "syscall", - "syscall.DLT_PPP_BSDOS": "syscall", - "syscall.DLT_PPP_ETHER": "syscall", - "syscall.DLT_PPP_PPPD": "syscall", - "syscall.DLT_PPP_SERIAL": "syscall", - "syscall.DLT_PPP_WITH_DIR": "syscall", - "syscall.DLT_PPP_WITH_DIRECTION": "syscall", - "syscall.DLT_PRISM_HEADER": "syscall", - "syscall.DLT_PRONET": "syscall", - "syscall.DLT_RAIF1": "syscall", - "syscall.DLT_RAW": "syscall", - "syscall.DLT_RAWAF_MASK": "syscall", - "syscall.DLT_RIO": "syscall", - "syscall.DLT_SCCP": "syscall", - "syscall.DLT_SITA": "syscall", - "syscall.DLT_SLIP": "syscall", - "syscall.DLT_SLIP_BSDOS": "syscall", - "syscall.DLT_STANAG_5066_D_PDU": "syscall", - "syscall.DLT_SUNATM": "syscall", - "syscall.DLT_SYMANTEC_FIREWALL": "syscall", - "syscall.DLT_TZSP": "syscall", - "syscall.DLT_USB": "syscall", - "syscall.DLT_USB_LINUX": "syscall", - "syscall.DLT_USB_LINUX_MMAPPED": "syscall", - "syscall.DLT_USER0": "syscall", - "syscall.DLT_USER1": "syscall", - "syscall.DLT_USER10": "syscall", - "syscall.DLT_USER11": "syscall", - "syscall.DLT_USER12": "syscall", - "syscall.DLT_USER13": "syscall", - "syscall.DLT_USER14": "syscall", - "syscall.DLT_USER15": "syscall", - "syscall.DLT_USER2": "syscall", - "syscall.DLT_USER3": "syscall", - "syscall.DLT_USER4": "syscall", - "syscall.DLT_USER5": "syscall", - "syscall.DLT_USER6": "syscall", - "syscall.DLT_USER7": "syscall", - "syscall.DLT_USER8": "syscall", - "syscall.DLT_USER9": "syscall", - "syscall.DLT_WIHART": "syscall", - "syscall.DLT_X2E_SERIAL": "syscall", - "syscall.DLT_X2E_XORAYA": "syscall", - "syscall.DNSMXData": "syscall", - "syscall.DNSPTRData": "syscall", - "syscall.DNSRecord": "syscall", - "syscall.DNSSRVData": "syscall", - "syscall.DNSTXTData": "syscall", - "syscall.DNS_INFO_NO_RECORDS": "syscall", - "syscall.DNS_TYPE_A": "syscall", - "syscall.DNS_TYPE_A6": "syscall", - "syscall.DNS_TYPE_AAAA": "syscall", - "syscall.DNS_TYPE_ADDRS": "syscall", - "syscall.DNS_TYPE_AFSDB": "syscall", - "syscall.DNS_TYPE_ALL": "syscall", - "syscall.DNS_TYPE_ANY": "syscall", - "syscall.DNS_TYPE_ATMA": "syscall", - "syscall.DNS_TYPE_AXFR": "syscall", - "syscall.DNS_TYPE_CERT": "syscall", - "syscall.DNS_TYPE_CNAME": "syscall", - "syscall.DNS_TYPE_DHCID": "syscall", - "syscall.DNS_TYPE_DNAME": "syscall", - "syscall.DNS_TYPE_DNSKEY": "syscall", - "syscall.DNS_TYPE_DS": "syscall", - "syscall.DNS_TYPE_EID": "syscall", - "syscall.DNS_TYPE_GID": "syscall", - "syscall.DNS_TYPE_GPOS": "syscall", - "syscall.DNS_TYPE_HINFO": "syscall", - "syscall.DNS_TYPE_ISDN": "syscall", - "syscall.DNS_TYPE_IXFR": "syscall", - "syscall.DNS_TYPE_KEY": "syscall", - "syscall.DNS_TYPE_KX": "syscall", - "syscall.DNS_TYPE_LOC": "syscall", - "syscall.DNS_TYPE_MAILA": "syscall", - "syscall.DNS_TYPE_MAILB": "syscall", - "syscall.DNS_TYPE_MB": "syscall", - "syscall.DNS_TYPE_MD": "syscall", - "syscall.DNS_TYPE_MF": "syscall", - "syscall.DNS_TYPE_MG": "syscall", - "syscall.DNS_TYPE_MINFO": "syscall", - "syscall.DNS_TYPE_MR": "syscall", - "syscall.DNS_TYPE_MX": "syscall", - "syscall.DNS_TYPE_NAPTR": "syscall", - "syscall.DNS_TYPE_NBSTAT": "syscall", - "syscall.DNS_TYPE_NIMLOC": "syscall", - "syscall.DNS_TYPE_NS": "syscall", - "syscall.DNS_TYPE_NSAP": "syscall", - "syscall.DNS_TYPE_NSAPPTR": "syscall", - "syscall.DNS_TYPE_NSEC": "syscall", - "syscall.DNS_TYPE_NULL": "syscall", - "syscall.DNS_TYPE_NXT": "syscall", - "syscall.DNS_TYPE_OPT": "syscall", - "syscall.DNS_TYPE_PTR": "syscall", - "syscall.DNS_TYPE_PX": "syscall", - "syscall.DNS_TYPE_RP": "syscall", - "syscall.DNS_TYPE_RRSIG": "syscall", - "syscall.DNS_TYPE_RT": "syscall", - "syscall.DNS_TYPE_SIG": "syscall", - "syscall.DNS_TYPE_SINK": "syscall", - "syscall.DNS_TYPE_SOA": "syscall", - "syscall.DNS_TYPE_SRV": "syscall", - "syscall.DNS_TYPE_TEXT": "syscall", - "syscall.DNS_TYPE_TKEY": "syscall", - "syscall.DNS_TYPE_TSIG": "syscall", - "syscall.DNS_TYPE_UID": "syscall", - "syscall.DNS_TYPE_UINFO": "syscall", - "syscall.DNS_TYPE_UNSPEC": "syscall", - "syscall.DNS_TYPE_WINS": "syscall", - "syscall.DNS_TYPE_WINSR": "syscall", - "syscall.DNS_TYPE_WKS": "syscall", - "syscall.DNS_TYPE_X25": "syscall", - "syscall.DT_BLK": "syscall", - "syscall.DT_CHR": "syscall", - "syscall.DT_DIR": "syscall", - "syscall.DT_FIFO": "syscall", - "syscall.DT_LNK": "syscall", - "syscall.DT_REG": "syscall", - "syscall.DT_SOCK": "syscall", - "syscall.DT_UNKNOWN": "syscall", - "syscall.DT_WHT": "syscall", - "syscall.DUPLICATE_CLOSE_SOURCE": "syscall", - "syscall.DUPLICATE_SAME_ACCESS": "syscall", - "syscall.DeleteFile": "syscall", - "syscall.DetachLsf": "syscall", - "syscall.DeviceIoControl": "syscall", - "syscall.Dirent": "syscall", - "syscall.DnsNameCompare": "syscall", - "syscall.DnsQuery": "syscall", - "syscall.DnsRecordListFree": "syscall", - "syscall.DnsSectionAdditional": "syscall", - "syscall.DnsSectionAnswer": "syscall", - "syscall.DnsSectionAuthority": "syscall", - "syscall.DnsSectionQuestion": "syscall", - "syscall.Dup": "syscall", - "syscall.Dup2": "syscall", - "syscall.Dup3": "syscall", - "syscall.DuplicateHandle": "syscall", - "syscall.E2BIG": "syscall", - "syscall.EACCES": "syscall", - "syscall.EADDRINUSE": "syscall", - "syscall.EADDRNOTAVAIL": "syscall", - "syscall.EADV": "syscall", - "syscall.EAFNOSUPPORT": "syscall", - "syscall.EAGAIN": "syscall", - "syscall.EALREADY": "syscall", - "syscall.EAUTH": "syscall", - "syscall.EBADARCH": "syscall", - "syscall.EBADE": "syscall", - "syscall.EBADEXEC": "syscall", - "syscall.EBADF": "syscall", - "syscall.EBADFD": "syscall", - "syscall.EBADMACHO": "syscall", - "syscall.EBADMSG": "syscall", - "syscall.EBADR": "syscall", - "syscall.EBADRPC": "syscall", - "syscall.EBADRQC": "syscall", - "syscall.EBADSLT": "syscall", - "syscall.EBFONT": "syscall", - "syscall.EBUSY": "syscall", - "syscall.ECANCELED": "syscall", - "syscall.ECAPMODE": "syscall", - "syscall.ECHILD": "syscall", - "syscall.ECHO": "syscall", - "syscall.ECHOCTL": "syscall", - "syscall.ECHOE": "syscall", - "syscall.ECHOK": "syscall", - "syscall.ECHOKE": "syscall", - "syscall.ECHONL": "syscall", - "syscall.ECHOPRT": "syscall", - "syscall.ECHRNG": "syscall", - "syscall.ECOMM": "syscall", - "syscall.ECONNABORTED": "syscall", - "syscall.ECONNREFUSED": "syscall", - "syscall.ECONNRESET": "syscall", - "syscall.EDEADLK": "syscall", - "syscall.EDEADLOCK": "syscall", - "syscall.EDESTADDRREQ": "syscall", - "syscall.EDEVERR": "syscall", - "syscall.EDOM": "syscall", - "syscall.EDOOFUS": "syscall", - "syscall.EDOTDOT": "syscall", - "syscall.EDQUOT": "syscall", - "syscall.EEXIST": "syscall", - "syscall.EFAULT": "syscall", - "syscall.EFBIG": "syscall", - "syscall.EFER_LMA": "syscall", - "syscall.EFER_LME": "syscall", - "syscall.EFER_NXE": "syscall", - "syscall.EFER_SCE": "syscall", - "syscall.EFTYPE": "syscall", - "syscall.EHOSTDOWN": "syscall", - "syscall.EHOSTUNREACH": "syscall", - "syscall.EHWPOISON": "syscall", - "syscall.EIDRM": "syscall", - "syscall.EILSEQ": "syscall", - "syscall.EINPROGRESS": "syscall", - "syscall.EINTR": "syscall", - "syscall.EINVAL": "syscall", - "syscall.EIO": "syscall", - "syscall.EIPSEC": "syscall", - "syscall.EISCONN": "syscall", - "syscall.EISDIR": "syscall", - "syscall.EISNAM": "syscall", - "syscall.EKEYEXPIRED": "syscall", - "syscall.EKEYREJECTED": "syscall", - "syscall.EKEYREVOKED": "syscall", - "syscall.EL2HLT": "syscall", - "syscall.EL2NSYNC": "syscall", - "syscall.EL3HLT": "syscall", - "syscall.EL3RST": "syscall", - "syscall.ELAST": "syscall", - "syscall.ELF_NGREG": "syscall", - "syscall.ELF_PRARGSZ": "syscall", - "syscall.ELIBACC": "syscall", - "syscall.ELIBBAD": "syscall", - "syscall.ELIBEXEC": "syscall", - "syscall.ELIBMAX": "syscall", - "syscall.ELIBSCN": "syscall", - "syscall.ELNRNG": "syscall", - "syscall.ELOOP": "syscall", - "syscall.EMEDIUMTYPE": "syscall", - "syscall.EMFILE": "syscall", - "syscall.EMLINK": "syscall", - "syscall.EMSGSIZE": "syscall", - "syscall.EMT_TAGOVF": "syscall", - "syscall.EMULTIHOP": "syscall", - "syscall.EMUL_ENABLED": "syscall", - "syscall.EMUL_LINUX": "syscall", - "syscall.EMUL_LINUX32": "syscall", - "syscall.EMUL_MAXID": "syscall", - "syscall.EMUL_NATIVE": "syscall", - "syscall.ENAMETOOLONG": "syscall", - "syscall.ENAVAIL": "syscall", - "syscall.ENDRUNDISC": "syscall", - "syscall.ENEEDAUTH": "syscall", - "syscall.ENETDOWN": "syscall", - "syscall.ENETRESET": "syscall", - "syscall.ENETUNREACH": "syscall", - "syscall.ENFILE": "syscall", - "syscall.ENOANO": "syscall", - "syscall.ENOATTR": "syscall", - "syscall.ENOBUFS": "syscall", - "syscall.ENOCSI": "syscall", - "syscall.ENODATA": "syscall", - "syscall.ENODEV": "syscall", - "syscall.ENOENT": "syscall", - "syscall.ENOEXEC": "syscall", - "syscall.ENOKEY": "syscall", - "syscall.ENOLCK": "syscall", - "syscall.ENOLINK": "syscall", - "syscall.ENOMEDIUM": "syscall", - "syscall.ENOMEM": "syscall", - "syscall.ENOMSG": "syscall", - "syscall.ENONET": "syscall", - "syscall.ENOPKG": "syscall", - "syscall.ENOPOLICY": "syscall", - "syscall.ENOPROTOOPT": "syscall", - "syscall.ENOSPC": "syscall", - "syscall.ENOSR": "syscall", - "syscall.ENOSTR": "syscall", - "syscall.ENOSYS": "syscall", - "syscall.ENOTBLK": "syscall", - "syscall.ENOTCAPABLE": "syscall", - "syscall.ENOTCONN": "syscall", - "syscall.ENOTDIR": "syscall", - "syscall.ENOTEMPTY": "syscall", - "syscall.ENOTNAM": "syscall", - "syscall.ENOTRECOVERABLE": "syscall", - "syscall.ENOTSOCK": "syscall", - "syscall.ENOTSUP": "syscall", - "syscall.ENOTTY": "syscall", - "syscall.ENOTUNIQ": "syscall", - "syscall.ENXIO": "syscall", - "syscall.EN_SW_CTL_INF": "syscall", - "syscall.EN_SW_CTL_PREC": "syscall", - "syscall.EN_SW_CTL_ROUND": "syscall", - "syscall.EN_SW_DATACHAIN": "syscall", - "syscall.EN_SW_DENORM": "syscall", - "syscall.EN_SW_INVOP": "syscall", - "syscall.EN_SW_OVERFLOW": "syscall", - "syscall.EN_SW_PRECLOSS": "syscall", - "syscall.EN_SW_UNDERFLOW": "syscall", - "syscall.EN_SW_ZERODIV": "syscall", - "syscall.EOPNOTSUPP": "syscall", - "syscall.EOVERFLOW": "syscall", - "syscall.EOWNERDEAD": "syscall", - "syscall.EPERM": "syscall", - "syscall.EPFNOSUPPORT": "syscall", - "syscall.EPIPE": "syscall", - "syscall.EPOLLERR": "syscall", - "syscall.EPOLLET": "syscall", - "syscall.EPOLLHUP": "syscall", - "syscall.EPOLLIN": "syscall", - "syscall.EPOLLMSG": "syscall", - "syscall.EPOLLONESHOT": "syscall", - "syscall.EPOLLOUT": "syscall", - "syscall.EPOLLPRI": "syscall", - "syscall.EPOLLRDBAND": "syscall", - "syscall.EPOLLRDHUP": "syscall", - "syscall.EPOLLRDNORM": "syscall", - "syscall.EPOLLWRBAND": "syscall", - "syscall.EPOLLWRNORM": "syscall", - "syscall.EPOLL_CLOEXEC": "syscall", - "syscall.EPOLL_CTL_ADD": "syscall", - "syscall.EPOLL_CTL_DEL": "syscall", - "syscall.EPOLL_CTL_MOD": "syscall", - "syscall.EPOLL_NONBLOCK": "syscall", - "syscall.EPROCLIM": "syscall", - "syscall.EPROCUNAVAIL": "syscall", - "syscall.EPROGMISMATCH": "syscall", - "syscall.EPROGUNAVAIL": "syscall", - "syscall.EPROTO": "syscall", - "syscall.EPROTONOSUPPORT": "syscall", - "syscall.EPROTOTYPE": "syscall", - "syscall.EPWROFF": "syscall", - "syscall.ERANGE": "syscall", - "syscall.EREMCHG": "syscall", - "syscall.EREMOTE": "syscall", - "syscall.EREMOTEIO": "syscall", - "syscall.ERESTART": "syscall", - "syscall.ERFKILL": "syscall", - "syscall.EROFS": "syscall", - "syscall.ERPCMISMATCH": "syscall", - "syscall.ERROR_ACCESS_DENIED": "syscall", - "syscall.ERROR_ALREADY_EXISTS": "syscall", - "syscall.ERROR_BROKEN_PIPE": "syscall", - "syscall.ERROR_BUFFER_OVERFLOW": "syscall", - "syscall.ERROR_DIR_NOT_EMPTY": "syscall", - "syscall.ERROR_ENVVAR_NOT_FOUND": "syscall", - "syscall.ERROR_FILE_EXISTS": "syscall", - "syscall.ERROR_FILE_NOT_FOUND": "syscall", - "syscall.ERROR_HANDLE_EOF": "syscall", - "syscall.ERROR_INSUFFICIENT_BUFFER": "syscall", - "syscall.ERROR_IO_PENDING": "syscall", - "syscall.ERROR_MOD_NOT_FOUND": "syscall", - "syscall.ERROR_MORE_DATA": "syscall", - "syscall.ERROR_NETNAME_DELETED": "syscall", - "syscall.ERROR_NOT_FOUND": "syscall", - "syscall.ERROR_NO_MORE_FILES": "syscall", - "syscall.ERROR_OPERATION_ABORTED": "syscall", - "syscall.ERROR_PATH_NOT_FOUND": "syscall", - "syscall.ERROR_PRIVILEGE_NOT_HELD": "syscall", - "syscall.ERROR_PROC_NOT_FOUND": "syscall", - "syscall.ESHLIBVERS": "syscall", - "syscall.ESHUTDOWN": "syscall", - "syscall.ESOCKTNOSUPPORT": "syscall", - "syscall.ESPIPE": "syscall", - "syscall.ESRCH": "syscall", - "syscall.ESRMNT": "syscall", - "syscall.ESTALE": "syscall", - "syscall.ESTRPIPE": "syscall", - "syscall.ETHERCAP_JUMBO_MTU": "syscall", - "syscall.ETHERCAP_VLAN_HWTAGGING": "syscall", - "syscall.ETHERCAP_VLAN_MTU": "syscall", - "syscall.ETHERMIN": "syscall", - "syscall.ETHERMTU": "syscall", - "syscall.ETHERMTU_JUMBO": "syscall", - "syscall.ETHERTYPE_8023": "syscall", - "syscall.ETHERTYPE_AARP": "syscall", - "syscall.ETHERTYPE_ACCTON": "syscall", - "syscall.ETHERTYPE_AEONIC": "syscall", - "syscall.ETHERTYPE_ALPHA": "syscall", - "syscall.ETHERTYPE_AMBER": "syscall", - "syscall.ETHERTYPE_AMOEBA": "syscall", - "syscall.ETHERTYPE_AOE": "syscall", - "syscall.ETHERTYPE_APOLLO": "syscall", - "syscall.ETHERTYPE_APOLLODOMAIN": "syscall", - "syscall.ETHERTYPE_APPLETALK": "syscall", - "syscall.ETHERTYPE_APPLITEK": "syscall", - "syscall.ETHERTYPE_ARGONAUT": "syscall", - "syscall.ETHERTYPE_ARP": "syscall", - "syscall.ETHERTYPE_AT": "syscall", - "syscall.ETHERTYPE_ATALK": "syscall", - "syscall.ETHERTYPE_ATOMIC": "syscall", - "syscall.ETHERTYPE_ATT": "syscall", - "syscall.ETHERTYPE_ATTSTANFORD": "syscall", - "syscall.ETHERTYPE_AUTOPHON": "syscall", - "syscall.ETHERTYPE_AXIS": "syscall", - "syscall.ETHERTYPE_BCLOOP": "syscall", - "syscall.ETHERTYPE_BOFL": "syscall", - "syscall.ETHERTYPE_CABLETRON": "syscall", - "syscall.ETHERTYPE_CHAOS": "syscall", - "syscall.ETHERTYPE_COMDESIGN": "syscall", - "syscall.ETHERTYPE_COMPUGRAPHIC": "syscall", - "syscall.ETHERTYPE_COUNTERPOINT": "syscall", - "syscall.ETHERTYPE_CRONUS": "syscall", - "syscall.ETHERTYPE_CRONUSVLN": "syscall", - "syscall.ETHERTYPE_DCA": "syscall", - "syscall.ETHERTYPE_DDE": "syscall", - "syscall.ETHERTYPE_DEBNI": "syscall", - "syscall.ETHERTYPE_DECAM": "syscall", - "syscall.ETHERTYPE_DECCUST": "syscall", - "syscall.ETHERTYPE_DECDIAG": "syscall", - "syscall.ETHERTYPE_DECDNS": "syscall", - "syscall.ETHERTYPE_DECDTS": "syscall", - "syscall.ETHERTYPE_DECEXPER": "syscall", - "syscall.ETHERTYPE_DECLAST": "syscall", - "syscall.ETHERTYPE_DECLTM": "syscall", - "syscall.ETHERTYPE_DECMUMPS": "syscall", - "syscall.ETHERTYPE_DECNETBIOS": "syscall", - "syscall.ETHERTYPE_DELTACON": "syscall", - "syscall.ETHERTYPE_DIDDLE": "syscall", - "syscall.ETHERTYPE_DLOG1": "syscall", - "syscall.ETHERTYPE_DLOG2": "syscall", - "syscall.ETHERTYPE_DN": "syscall", - "syscall.ETHERTYPE_DOGFIGHT": "syscall", - "syscall.ETHERTYPE_DSMD": "syscall", - "syscall.ETHERTYPE_ECMA": "syscall", - "syscall.ETHERTYPE_ENCRYPT": "syscall", - "syscall.ETHERTYPE_ES": "syscall", - "syscall.ETHERTYPE_EXCELAN": "syscall", - "syscall.ETHERTYPE_EXPERDATA": "syscall", - "syscall.ETHERTYPE_FLIP": "syscall", - "syscall.ETHERTYPE_FLOWCONTROL": "syscall", - "syscall.ETHERTYPE_FRARP": "syscall", - "syscall.ETHERTYPE_GENDYN": "syscall", - "syscall.ETHERTYPE_HAYES": "syscall", - "syscall.ETHERTYPE_HIPPI_FP": "syscall", - "syscall.ETHERTYPE_HITACHI": "syscall", - "syscall.ETHERTYPE_HP": "syscall", - "syscall.ETHERTYPE_IEEEPUP": "syscall", - "syscall.ETHERTYPE_IEEEPUPAT": "syscall", - "syscall.ETHERTYPE_IMLBL": "syscall", - "syscall.ETHERTYPE_IMLBLDIAG": "syscall", - "syscall.ETHERTYPE_IP": "syscall", - "syscall.ETHERTYPE_IPAS": "syscall", - "syscall.ETHERTYPE_IPV6": "syscall", - "syscall.ETHERTYPE_IPX": "syscall", - "syscall.ETHERTYPE_IPXNEW": "syscall", - "syscall.ETHERTYPE_KALPANA": "syscall", - "syscall.ETHERTYPE_LANBRIDGE": "syscall", - "syscall.ETHERTYPE_LANPROBE": "syscall", - "syscall.ETHERTYPE_LAT": "syscall", - "syscall.ETHERTYPE_LBACK": "syscall", - "syscall.ETHERTYPE_LITTLE": "syscall", - "syscall.ETHERTYPE_LLDP": "syscall", - "syscall.ETHERTYPE_LOGICRAFT": "syscall", - "syscall.ETHERTYPE_LOOPBACK": "syscall", - "syscall.ETHERTYPE_MATRA": "syscall", - "syscall.ETHERTYPE_MAX": "syscall", - "syscall.ETHERTYPE_MERIT": "syscall", - "syscall.ETHERTYPE_MICP": "syscall", - "syscall.ETHERTYPE_MOPDL": "syscall", - "syscall.ETHERTYPE_MOPRC": "syscall", - "syscall.ETHERTYPE_MOTOROLA": "syscall", - "syscall.ETHERTYPE_MPLS": "syscall", - "syscall.ETHERTYPE_MPLS_MCAST": "syscall", - "syscall.ETHERTYPE_MUMPS": "syscall", - "syscall.ETHERTYPE_NBPCC": "syscall", - "syscall.ETHERTYPE_NBPCLAIM": "syscall", - "syscall.ETHERTYPE_NBPCLREQ": "syscall", - "syscall.ETHERTYPE_NBPCLRSP": "syscall", - "syscall.ETHERTYPE_NBPCREQ": "syscall", - "syscall.ETHERTYPE_NBPCRSP": "syscall", - "syscall.ETHERTYPE_NBPDG": "syscall", - "syscall.ETHERTYPE_NBPDGB": "syscall", - "syscall.ETHERTYPE_NBPDLTE": "syscall", - "syscall.ETHERTYPE_NBPRAR": "syscall", - "syscall.ETHERTYPE_NBPRAS": "syscall", - "syscall.ETHERTYPE_NBPRST": "syscall", - "syscall.ETHERTYPE_NBPSCD": "syscall", - "syscall.ETHERTYPE_NBPVCD": "syscall", - "syscall.ETHERTYPE_NBS": "syscall", - "syscall.ETHERTYPE_NCD": "syscall", - "syscall.ETHERTYPE_NESTAR": "syscall", - "syscall.ETHERTYPE_NETBEUI": "syscall", - "syscall.ETHERTYPE_NOVELL": "syscall", - "syscall.ETHERTYPE_NS": "syscall", - "syscall.ETHERTYPE_NSAT": "syscall", - "syscall.ETHERTYPE_NSCOMPAT": "syscall", - "syscall.ETHERTYPE_NTRAILER": "syscall", - "syscall.ETHERTYPE_OS9": "syscall", - "syscall.ETHERTYPE_OS9NET": "syscall", - "syscall.ETHERTYPE_PACER": "syscall", - "syscall.ETHERTYPE_PAE": "syscall", - "syscall.ETHERTYPE_PCS": "syscall", - "syscall.ETHERTYPE_PLANNING": "syscall", - "syscall.ETHERTYPE_PPP": "syscall", - "syscall.ETHERTYPE_PPPOE": "syscall", - "syscall.ETHERTYPE_PPPOEDISC": "syscall", - "syscall.ETHERTYPE_PRIMENTS": "syscall", - "syscall.ETHERTYPE_PUP": "syscall", - "syscall.ETHERTYPE_PUPAT": "syscall", - "syscall.ETHERTYPE_QINQ": "syscall", - "syscall.ETHERTYPE_RACAL": "syscall", - "syscall.ETHERTYPE_RATIONAL": "syscall", - "syscall.ETHERTYPE_RAWFR": "syscall", - "syscall.ETHERTYPE_RCL": "syscall", - "syscall.ETHERTYPE_RDP": "syscall", - "syscall.ETHERTYPE_RETIX": "syscall", - "syscall.ETHERTYPE_REVARP": "syscall", - "syscall.ETHERTYPE_SCA": "syscall", - "syscall.ETHERTYPE_SECTRA": "syscall", - "syscall.ETHERTYPE_SECUREDATA": "syscall", - "syscall.ETHERTYPE_SGITW": "syscall", - "syscall.ETHERTYPE_SG_BOUNCE": "syscall", - "syscall.ETHERTYPE_SG_DIAG": "syscall", - "syscall.ETHERTYPE_SG_NETGAMES": "syscall", - "syscall.ETHERTYPE_SG_RESV": "syscall", - "syscall.ETHERTYPE_SIMNET": "syscall", - "syscall.ETHERTYPE_SLOW": "syscall", - "syscall.ETHERTYPE_SLOWPROTOCOLS": "syscall", - "syscall.ETHERTYPE_SNA": "syscall", - "syscall.ETHERTYPE_SNMP": "syscall", - "syscall.ETHERTYPE_SONIX": "syscall", - "syscall.ETHERTYPE_SPIDER": "syscall", - "syscall.ETHERTYPE_SPRITE": "syscall", - "syscall.ETHERTYPE_STP": "syscall", - "syscall.ETHERTYPE_TALARIS": "syscall", - "syscall.ETHERTYPE_TALARISMC": "syscall", - "syscall.ETHERTYPE_TCPCOMP": "syscall", - "syscall.ETHERTYPE_TCPSM": "syscall", - "syscall.ETHERTYPE_TEC": "syscall", - "syscall.ETHERTYPE_TIGAN": "syscall", - "syscall.ETHERTYPE_TRAIL": "syscall", - "syscall.ETHERTYPE_TRANSETHER": "syscall", - "syscall.ETHERTYPE_TYMSHARE": "syscall", - "syscall.ETHERTYPE_UBBST": "syscall", - "syscall.ETHERTYPE_UBDEBUG": "syscall", - "syscall.ETHERTYPE_UBDIAGLOOP": "syscall", - "syscall.ETHERTYPE_UBDL": "syscall", - "syscall.ETHERTYPE_UBNIU": "syscall", - "syscall.ETHERTYPE_UBNMC": "syscall", - "syscall.ETHERTYPE_VALID": "syscall", - "syscall.ETHERTYPE_VARIAN": "syscall", - "syscall.ETHERTYPE_VAXELN": "syscall", - "syscall.ETHERTYPE_VEECO": "syscall", - "syscall.ETHERTYPE_VEXP": "syscall", - "syscall.ETHERTYPE_VGLAB": "syscall", - "syscall.ETHERTYPE_VINES": "syscall", - "syscall.ETHERTYPE_VINESECHO": "syscall", - "syscall.ETHERTYPE_VINESLOOP": "syscall", - "syscall.ETHERTYPE_VITAL": "syscall", - "syscall.ETHERTYPE_VLAN": "syscall", - "syscall.ETHERTYPE_VLTLMAN": "syscall", - "syscall.ETHERTYPE_VPROD": "syscall", - "syscall.ETHERTYPE_VURESERVED": "syscall", - "syscall.ETHERTYPE_WATERLOO": "syscall", - "syscall.ETHERTYPE_WELLFLEET": "syscall", - "syscall.ETHERTYPE_X25": "syscall", - "syscall.ETHERTYPE_X75": "syscall", - "syscall.ETHERTYPE_XNSSM": "syscall", - "syscall.ETHERTYPE_XTP": "syscall", - "syscall.ETHER_ADDR_LEN": "syscall", - "syscall.ETHER_ALIGN": "syscall", - "syscall.ETHER_CRC_LEN": "syscall", - "syscall.ETHER_CRC_POLY_BE": "syscall", - "syscall.ETHER_CRC_POLY_LE": "syscall", - "syscall.ETHER_HDR_LEN": "syscall", - "syscall.ETHER_MAX_DIX_LEN": "syscall", - "syscall.ETHER_MAX_LEN": "syscall", - "syscall.ETHER_MAX_LEN_JUMBO": "syscall", - "syscall.ETHER_MIN_LEN": "syscall", - "syscall.ETHER_PPPOE_ENCAP_LEN": "syscall", - "syscall.ETHER_TYPE_LEN": "syscall", - "syscall.ETHER_VLAN_ENCAP_LEN": "syscall", - "syscall.ETH_P_1588": "syscall", - "syscall.ETH_P_8021Q": "syscall", - "syscall.ETH_P_802_2": "syscall", - "syscall.ETH_P_802_3": "syscall", - "syscall.ETH_P_AARP": "syscall", - "syscall.ETH_P_ALL": "syscall", - "syscall.ETH_P_AOE": "syscall", - "syscall.ETH_P_ARCNET": "syscall", - "syscall.ETH_P_ARP": "syscall", - "syscall.ETH_P_ATALK": "syscall", - "syscall.ETH_P_ATMFATE": "syscall", - "syscall.ETH_P_ATMMPOA": "syscall", - "syscall.ETH_P_AX25": "syscall", - "syscall.ETH_P_BPQ": "syscall", - "syscall.ETH_P_CAIF": "syscall", - "syscall.ETH_P_CAN": "syscall", - "syscall.ETH_P_CONTROL": "syscall", - "syscall.ETH_P_CUST": "syscall", - "syscall.ETH_P_DDCMP": "syscall", - "syscall.ETH_P_DEC": "syscall", - "syscall.ETH_P_DIAG": "syscall", - "syscall.ETH_P_DNA_DL": "syscall", - "syscall.ETH_P_DNA_RC": "syscall", - "syscall.ETH_P_DNA_RT": "syscall", - "syscall.ETH_P_DSA": "syscall", - "syscall.ETH_P_ECONET": "syscall", - "syscall.ETH_P_EDSA": "syscall", - "syscall.ETH_P_FCOE": "syscall", - "syscall.ETH_P_FIP": "syscall", - "syscall.ETH_P_HDLC": "syscall", - "syscall.ETH_P_IEEE802154": "syscall", - "syscall.ETH_P_IEEEPUP": "syscall", - "syscall.ETH_P_IEEEPUPAT": "syscall", - "syscall.ETH_P_IP": "syscall", - "syscall.ETH_P_IPV6": "syscall", - "syscall.ETH_P_IPX": "syscall", - "syscall.ETH_P_IRDA": "syscall", - "syscall.ETH_P_LAT": "syscall", - "syscall.ETH_P_LINK_CTL": "syscall", - "syscall.ETH_P_LOCALTALK": "syscall", - "syscall.ETH_P_LOOP": "syscall", - "syscall.ETH_P_MOBITEX": "syscall", - "syscall.ETH_P_MPLS_MC": "syscall", - "syscall.ETH_P_MPLS_UC": "syscall", - "syscall.ETH_P_PAE": "syscall", - "syscall.ETH_P_PAUSE": "syscall", - "syscall.ETH_P_PHONET": "syscall", - "syscall.ETH_P_PPPTALK": "syscall", - "syscall.ETH_P_PPP_DISC": "syscall", - "syscall.ETH_P_PPP_MP": "syscall", - "syscall.ETH_P_PPP_SES": "syscall", - "syscall.ETH_P_PUP": "syscall", - "syscall.ETH_P_PUPAT": "syscall", - "syscall.ETH_P_RARP": "syscall", - "syscall.ETH_P_SCA": "syscall", - "syscall.ETH_P_SLOW": "syscall", - "syscall.ETH_P_SNAP": "syscall", - "syscall.ETH_P_TEB": "syscall", - "syscall.ETH_P_TIPC": "syscall", - "syscall.ETH_P_TRAILER": "syscall", - "syscall.ETH_P_TR_802_2": "syscall", - "syscall.ETH_P_WAN_PPP": "syscall", - "syscall.ETH_P_WCCP": "syscall", - "syscall.ETH_P_X25": "syscall", - "syscall.ETIME": "syscall", - "syscall.ETIMEDOUT": "syscall", - "syscall.ETOOMANYREFS": "syscall", - "syscall.ETXTBSY": "syscall", - "syscall.EUCLEAN": "syscall", - "syscall.EUNATCH": "syscall", - "syscall.EUSERS": "syscall", - "syscall.EVFILT_AIO": "syscall", - "syscall.EVFILT_FS": "syscall", - "syscall.EVFILT_LIO": "syscall", - "syscall.EVFILT_MACHPORT": "syscall", - "syscall.EVFILT_PROC": "syscall", - "syscall.EVFILT_READ": "syscall", - "syscall.EVFILT_SIGNAL": "syscall", - "syscall.EVFILT_SYSCOUNT": "syscall", - "syscall.EVFILT_THREADMARKER": "syscall", - "syscall.EVFILT_TIMER": "syscall", - "syscall.EVFILT_USER": "syscall", - "syscall.EVFILT_VM": "syscall", - "syscall.EVFILT_VNODE": "syscall", - "syscall.EVFILT_WRITE": "syscall", - "syscall.EV_ADD": "syscall", - "syscall.EV_CLEAR": "syscall", - "syscall.EV_DELETE": "syscall", - "syscall.EV_DISABLE": "syscall", - "syscall.EV_DISPATCH": "syscall", - "syscall.EV_DROP": "syscall", - "syscall.EV_ENABLE": "syscall", - "syscall.EV_EOF": "syscall", - "syscall.EV_ERROR": "syscall", - "syscall.EV_FLAG0": "syscall", - "syscall.EV_FLAG1": "syscall", - "syscall.EV_ONESHOT": "syscall", - "syscall.EV_OOBAND": "syscall", - "syscall.EV_POLL": "syscall", - "syscall.EV_RECEIPT": "syscall", - "syscall.EV_SYSFLAGS": "syscall", - "syscall.EWINDOWS": "syscall", - "syscall.EWOULDBLOCK": "syscall", - "syscall.EXDEV": "syscall", - "syscall.EXFULL": "syscall", - "syscall.EXTA": "syscall", - "syscall.EXTB": "syscall", - "syscall.EXTPROC": "syscall", - "syscall.Environ": "syscall", - "syscall.EpollCreate": "syscall", - "syscall.EpollCreate1": "syscall", - "syscall.EpollCtl": "syscall", - "syscall.EpollEvent": "syscall", - "syscall.EpollWait": "syscall", - "syscall.Errno": "syscall", - "syscall.EscapeArg": "syscall", - "syscall.Exchangedata": "syscall", - "syscall.Exec": "syscall", - "syscall.Exit": "syscall", - "syscall.ExitProcess": "syscall", - "syscall.FD_CLOEXEC": "syscall", - "syscall.FD_SETSIZE": "syscall", - "syscall.FILE_ACTION_ADDED": "syscall", - "syscall.FILE_ACTION_MODIFIED": "syscall", - "syscall.FILE_ACTION_REMOVED": "syscall", - "syscall.FILE_ACTION_RENAMED_NEW_NAME": "syscall", - "syscall.FILE_ACTION_RENAMED_OLD_NAME": "syscall", - "syscall.FILE_APPEND_DATA": "syscall", - "syscall.FILE_ATTRIBUTE_ARCHIVE": "syscall", - "syscall.FILE_ATTRIBUTE_DIRECTORY": "syscall", - "syscall.FILE_ATTRIBUTE_HIDDEN": "syscall", - "syscall.FILE_ATTRIBUTE_NORMAL": "syscall", - "syscall.FILE_ATTRIBUTE_READONLY": "syscall", - "syscall.FILE_ATTRIBUTE_REPARSE_POINT": "syscall", - "syscall.FILE_ATTRIBUTE_SYSTEM": "syscall", - "syscall.FILE_BEGIN": "syscall", - "syscall.FILE_CURRENT": "syscall", - "syscall.FILE_END": "syscall", - "syscall.FILE_FLAG_BACKUP_SEMANTICS": "syscall", - "syscall.FILE_FLAG_OPEN_REPARSE_POINT": "syscall", - "syscall.FILE_FLAG_OVERLAPPED": "syscall", - "syscall.FILE_LIST_DIRECTORY": "syscall", - "syscall.FILE_MAP_COPY": "syscall", - "syscall.FILE_MAP_EXECUTE": "syscall", - "syscall.FILE_MAP_READ": "syscall", - "syscall.FILE_MAP_WRITE": "syscall", - "syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES": "syscall", - "syscall.FILE_NOTIFY_CHANGE_CREATION": "syscall", - "syscall.FILE_NOTIFY_CHANGE_DIR_NAME": "syscall", - "syscall.FILE_NOTIFY_CHANGE_FILE_NAME": "syscall", - "syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS": "syscall", - "syscall.FILE_NOTIFY_CHANGE_LAST_WRITE": "syscall", - "syscall.FILE_NOTIFY_CHANGE_SIZE": "syscall", - "syscall.FILE_SHARE_DELETE": "syscall", - "syscall.FILE_SHARE_READ": "syscall", - "syscall.FILE_SHARE_WRITE": "syscall", - "syscall.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS": "syscall", - "syscall.FILE_SKIP_SET_EVENT_ON_HANDLE": "syscall", - "syscall.FILE_TYPE_CHAR": "syscall", - "syscall.FILE_TYPE_DISK": "syscall", - "syscall.FILE_TYPE_PIPE": "syscall", - "syscall.FILE_TYPE_REMOTE": "syscall", - "syscall.FILE_TYPE_UNKNOWN": "syscall", - "syscall.FILE_WRITE_ATTRIBUTES": "syscall", - "syscall.FLUSHO": "syscall", - "syscall.FORMAT_MESSAGE_ALLOCATE_BUFFER": "syscall", - "syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY": "syscall", - "syscall.FORMAT_MESSAGE_FROM_HMODULE": "syscall", - "syscall.FORMAT_MESSAGE_FROM_STRING": "syscall", - "syscall.FORMAT_MESSAGE_FROM_SYSTEM": "syscall", - "syscall.FORMAT_MESSAGE_IGNORE_INSERTS": "syscall", - "syscall.FORMAT_MESSAGE_MAX_WIDTH_MASK": "syscall", - "syscall.FSCTL_GET_REPARSE_POINT": "syscall", - "syscall.F_ADDFILESIGS": "syscall", - "syscall.F_ADDSIGS": "syscall", - "syscall.F_ALLOCATEALL": "syscall", - "syscall.F_ALLOCATECONTIG": "syscall", - "syscall.F_CANCEL": "syscall", - "syscall.F_CHKCLEAN": "syscall", - "syscall.F_CLOSEM": "syscall", - "syscall.F_DUP2FD": "syscall", - "syscall.F_DUP2FD_CLOEXEC": "syscall", - "syscall.F_DUPFD": "syscall", - "syscall.F_DUPFD_CLOEXEC": "syscall", - "syscall.F_EXLCK": "syscall", - "syscall.F_FLUSH_DATA": "syscall", - "syscall.F_FREEZE_FS": "syscall", - "syscall.F_FSCTL": "syscall", - "syscall.F_FSDIRMASK": "syscall", - "syscall.F_FSIN": "syscall", - "syscall.F_FSINOUT": "syscall", - "syscall.F_FSOUT": "syscall", - "syscall.F_FSPRIV": "syscall", - "syscall.F_FSVOID": "syscall", - "syscall.F_FULLFSYNC": "syscall", - "syscall.F_GETFD": "syscall", - "syscall.F_GETFL": "syscall", - "syscall.F_GETLEASE": "syscall", - "syscall.F_GETLK": "syscall", - "syscall.F_GETLK64": "syscall", - "syscall.F_GETLKPID": "syscall", - "syscall.F_GETNOSIGPIPE": "syscall", - "syscall.F_GETOWN": "syscall", - "syscall.F_GETOWN_EX": "syscall", - "syscall.F_GETPATH": "syscall", - "syscall.F_GETPATH_MTMINFO": "syscall", - "syscall.F_GETPIPE_SZ": "syscall", - "syscall.F_GETPROTECTIONCLASS": "syscall", - "syscall.F_GETSIG": "syscall", - "syscall.F_GLOBAL_NOCACHE": "syscall", - "syscall.F_LOCK": "syscall", - "syscall.F_LOG2PHYS": "syscall", - "syscall.F_LOG2PHYS_EXT": "syscall", - "syscall.F_MARKDEPENDENCY": "syscall", - "syscall.F_MAXFD": "syscall", - "syscall.F_NOCACHE": "syscall", - "syscall.F_NODIRECT": "syscall", - "syscall.F_NOTIFY": "syscall", - "syscall.F_OGETLK": "syscall", - "syscall.F_OK": "syscall", - "syscall.F_OSETLK": "syscall", - "syscall.F_OSETLKW": "syscall", - "syscall.F_PARAM_MASK": "syscall", - "syscall.F_PARAM_MAX": "syscall", - "syscall.F_PATHPKG_CHECK": "syscall", - "syscall.F_PEOFPOSMODE": "syscall", - "syscall.F_PREALLOCATE": "syscall", - "syscall.F_RDADVISE": "syscall", - "syscall.F_RDAHEAD": "syscall", - "syscall.F_RDLCK": "syscall", - "syscall.F_READAHEAD": "syscall", - "syscall.F_READBOOTSTRAP": "syscall", - "syscall.F_SETBACKINGSTORE": "syscall", - "syscall.F_SETFD": "syscall", - "syscall.F_SETFL": "syscall", - "syscall.F_SETLEASE": "syscall", - "syscall.F_SETLK": "syscall", - "syscall.F_SETLK64": "syscall", - "syscall.F_SETLKW": "syscall", - "syscall.F_SETLKW64": "syscall", - "syscall.F_SETLK_REMOTE": "syscall", - "syscall.F_SETNOSIGPIPE": "syscall", - "syscall.F_SETOWN": "syscall", - "syscall.F_SETOWN_EX": "syscall", - "syscall.F_SETPIPE_SZ": "syscall", - "syscall.F_SETPROTECTIONCLASS": "syscall", - "syscall.F_SETSIG": "syscall", - "syscall.F_SETSIZE": "syscall", - "syscall.F_SHLCK": "syscall", - "syscall.F_TEST": "syscall", - "syscall.F_THAW_FS": "syscall", - "syscall.F_TLOCK": "syscall", - "syscall.F_ULOCK": "syscall", - "syscall.F_UNLCK": "syscall", - "syscall.F_UNLCKSYS": "syscall", - "syscall.F_VOLPOSMODE": "syscall", - "syscall.F_WRITEBOOTSTRAP": "syscall", - "syscall.F_WRLCK": "syscall", - "syscall.Faccessat": "syscall", - "syscall.Fallocate": "syscall", - "syscall.Fbootstraptransfer_t": "syscall", - "syscall.Fchdir": "syscall", - "syscall.Fchflags": "syscall", - "syscall.Fchmod": "syscall", - "syscall.Fchmodat": "syscall", - "syscall.Fchown": "syscall", - "syscall.Fchownat": "syscall", - "syscall.FcntlFlock": "syscall", - "syscall.FdSet": "syscall", - "syscall.Fdatasync": "syscall", - "syscall.FileNotifyInformation": "syscall", - "syscall.Filetime": "syscall", - "syscall.FindClose": "syscall", - "syscall.FindFirstFile": "syscall", - "syscall.FindNextFile": "syscall", - "syscall.Flock": "syscall", - "syscall.Flock_t": "syscall", - "syscall.FlushBpf": "syscall", - "syscall.FlushFileBuffers": "syscall", - "syscall.FlushViewOfFile": "syscall", - "syscall.ForkExec": "syscall", - "syscall.ForkLock": "syscall", - "syscall.FormatMessage": "syscall", - "syscall.Fpathconf": "syscall", - "syscall.FreeAddrInfoW": "syscall", - "syscall.FreeEnvironmentStrings": "syscall", - "syscall.FreeLibrary": "syscall", - "syscall.Fsid": "syscall", - "syscall.Fstat": "syscall", - "syscall.Fstatfs": "syscall", - "syscall.Fstore_t": "syscall", - "syscall.Fsync": "syscall", - "syscall.Ftruncate": "syscall", - "syscall.FullPath": "syscall", - "syscall.Futimes": "syscall", - "syscall.Futimesat": "syscall", - "syscall.GENERIC_ALL": "syscall", - "syscall.GENERIC_EXECUTE": "syscall", - "syscall.GENERIC_READ": "syscall", - "syscall.GENERIC_WRITE": "syscall", - "syscall.GUID": "syscall", - "syscall.GetAcceptExSockaddrs": "syscall", - "syscall.GetAdaptersInfo": "syscall", - "syscall.GetAddrInfoW": "syscall", - "syscall.GetCommandLine": "syscall", - "syscall.GetComputerName": "syscall", - "syscall.GetConsoleMode": "syscall", - "syscall.GetCurrentDirectory": "syscall", - "syscall.GetCurrentProcess": "syscall", - "syscall.GetEnvironmentStrings": "syscall", - "syscall.GetEnvironmentVariable": "syscall", - "syscall.GetExitCodeProcess": "syscall", - "syscall.GetFileAttributes": "syscall", - "syscall.GetFileAttributesEx": "syscall", - "syscall.GetFileExInfoStandard": "syscall", - "syscall.GetFileExMaxInfoLevel": "syscall", - "syscall.GetFileInformationByHandle": "syscall", - "syscall.GetFileType": "syscall", - "syscall.GetFullPathName": "syscall", - "syscall.GetHostByName": "syscall", - "syscall.GetIfEntry": "syscall", - "syscall.GetLastError": "syscall", - "syscall.GetLengthSid": "syscall", - "syscall.GetLongPathName": "syscall", - "syscall.GetProcAddress": "syscall", - "syscall.GetProcessTimes": "syscall", - "syscall.GetProtoByName": "syscall", - "syscall.GetQueuedCompletionStatus": "syscall", - "syscall.GetServByName": "syscall", - "syscall.GetShortPathName": "syscall", - "syscall.GetStartupInfo": "syscall", - "syscall.GetStdHandle": "syscall", - "syscall.GetSystemTimeAsFileTime": "syscall", - "syscall.GetTempPath": "syscall", - "syscall.GetTimeZoneInformation": "syscall", - "syscall.GetTokenInformation": "syscall", - "syscall.GetUserNameEx": "syscall", - "syscall.GetUserProfileDirectory": "syscall", - "syscall.GetVersion": "syscall", - "syscall.Getcwd": "syscall", - "syscall.Getdents": "syscall", - "syscall.Getdirentries": "syscall", - "syscall.Getdtablesize": "syscall", - "syscall.Getegid": "syscall", - "syscall.Getenv": "syscall", - "syscall.Geteuid": "syscall", - "syscall.Getfsstat": "syscall", - "syscall.Getgid": "syscall", - "syscall.Getgroups": "syscall", - "syscall.Getpagesize": "syscall", - "syscall.Getpeername": "syscall", - "syscall.Getpgid": "syscall", - "syscall.Getpgrp": "syscall", - "syscall.Getpid": "syscall", - "syscall.Getppid": "syscall", - "syscall.Getpriority": "syscall", - "syscall.Getrlimit": "syscall", - "syscall.Getrusage": "syscall", - "syscall.Getsid": "syscall", - "syscall.Getsockname": "syscall", - "syscall.Getsockopt": "syscall", - "syscall.GetsockoptByte": "syscall", - "syscall.GetsockoptICMPv6Filter": "syscall", - "syscall.GetsockoptIPMreq": "syscall", - "syscall.GetsockoptIPMreqn": "syscall", - "syscall.GetsockoptIPv6MTUInfo": "syscall", - "syscall.GetsockoptIPv6Mreq": "syscall", - "syscall.GetsockoptInet4Addr": "syscall", - "syscall.GetsockoptInt": "syscall", - "syscall.GetsockoptUcred": "syscall", - "syscall.Gettid": "syscall", - "syscall.Gettimeofday": "syscall", - "syscall.Getuid": "syscall", - "syscall.Getwd": "syscall", - "syscall.Getxattr": "syscall", - "syscall.HANDLE_FLAG_INHERIT": "syscall", - "syscall.HKEY_CLASSES_ROOT": "syscall", - "syscall.HKEY_CURRENT_CONFIG": "syscall", - "syscall.HKEY_CURRENT_USER": "syscall", - "syscall.HKEY_DYN_DATA": "syscall", - "syscall.HKEY_LOCAL_MACHINE": "syscall", - "syscall.HKEY_PERFORMANCE_DATA": "syscall", - "syscall.HKEY_USERS": "syscall", - "syscall.HUPCL": "syscall", - "syscall.Handle": "syscall", - "syscall.Hostent": "syscall", - "syscall.ICANON": "syscall", - "syscall.ICMP6_FILTER": "syscall", - "syscall.ICMPV6_FILTER": "syscall", - "syscall.ICMPv6Filter": "syscall", - "syscall.ICRNL": "syscall", - "syscall.IEXTEN": "syscall", - "syscall.IFAN_ARRIVAL": "syscall", - "syscall.IFAN_DEPARTURE": "syscall", - "syscall.IFA_ADDRESS": "syscall", - "syscall.IFA_ANYCAST": "syscall", - "syscall.IFA_BROADCAST": "syscall", - "syscall.IFA_CACHEINFO": "syscall", - "syscall.IFA_F_DADFAILED": "syscall", - "syscall.IFA_F_DEPRECATED": "syscall", - "syscall.IFA_F_HOMEADDRESS": "syscall", - "syscall.IFA_F_NODAD": "syscall", - "syscall.IFA_F_OPTIMISTIC": "syscall", - "syscall.IFA_F_PERMANENT": "syscall", - "syscall.IFA_F_SECONDARY": "syscall", - "syscall.IFA_F_TEMPORARY": "syscall", - "syscall.IFA_F_TENTATIVE": "syscall", - "syscall.IFA_LABEL": "syscall", - "syscall.IFA_LOCAL": "syscall", - "syscall.IFA_MAX": "syscall", - "syscall.IFA_MULTICAST": "syscall", - "syscall.IFA_ROUTE": "syscall", - "syscall.IFA_UNSPEC": "syscall", - "syscall.IFF_ALLMULTI": "syscall", - "syscall.IFF_ALTPHYS": "syscall", - "syscall.IFF_AUTOMEDIA": "syscall", - "syscall.IFF_BROADCAST": "syscall", - "syscall.IFF_CANTCHANGE": "syscall", - "syscall.IFF_CANTCONFIG": "syscall", - "syscall.IFF_DEBUG": "syscall", - "syscall.IFF_DRV_OACTIVE": "syscall", - "syscall.IFF_DRV_RUNNING": "syscall", - "syscall.IFF_DYING": "syscall", - "syscall.IFF_DYNAMIC": "syscall", - "syscall.IFF_LINK0": "syscall", - "syscall.IFF_LINK1": "syscall", - "syscall.IFF_LINK2": "syscall", - "syscall.IFF_LOOPBACK": "syscall", - "syscall.IFF_MASTER": "syscall", - "syscall.IFF_MONITOR": "syscall", - "syscall.IFF_MULTICAST": "syscall", - "syscall.IFF_NOARP": "syscall", - "syscall.IFF_NOTRAILERS": "syscall", - "syscall.IFF_NO_PI": "syscall", - "syscall.IFF_OACTIVE": "syscall", - "syscall.IFF_ONE_QUEUE": "syscall", - "syscall.IFF_POINTOPOINT": "syscall", - "syscall.IFF_POINTTOPOINT": "syscall", - "syscall.IFF_PORTSEL": "syscall", - "syscall.IFF_PPROMISC": "syscall", - "syscall.IFF_PROMISC": "syscall", - "syscall.IFF_RENAMING": "syscall", - "syscall.IFF_RUNNING": "syscall", - "syscall.IFF_SIMPLEX": "syscall", - "syscall.IFF_SLAVE": "syscall", - "syscall.IFF_SMART": "syscall", - "syscall.IFF_STATICARP": "syscall", - "syscall.IFF_TAP": "syscall", - "syscall.IFF_TUN": "syscall", - "syscall.IFF_TUN_EXCL": "syscall", - "syscall.IFF_UP": "syscall", - "syscall.IFF_VNET_HDR": "syscall", - "syscall.IFLA_ADDRESS": "syscall", - "syscall.IFLA_BROADCAST": "syscall", - "syscall.IFLA_COST": "syscall", - "syscall.IFLA_IFALIAS": "syscall", - "syscall.IFLA_IFNAME": "syscall", - "syscall.IFLA_LINK": "syscall", - "syscall.IFLA_LINKINFO": "syscall", - "syscall.IFLA_LINKMODE": "syscall", - "syscall.IFLA_MAP": "syscall", - "syscall.IFLA_MASTER": "syscall", - "syscall.IFLA_MAX": "syscall", - "syscall.IFLA_MTU": "syscall", - "syscall.IFLA_NET_NS_PID": "syscall", - "syscall.IFLA_OPERSTATE": "syscall", - "syscall.IFLA_PRIORITY": "syscall", - "syscall.IFLA_PROTINFO": "syscall", - "syscall.IFLA_QDISC": "syscall", - "syscall.IFLA_STATS": "syscall", - "syscall.IFLA_TXQLEN": "syscall", - "syscall.IFLA_UNSPEC": "syscall", - "syscall.IFLA_WEIGHT": "syscall", - "syscall.IFLA_WIRELESS": "syscall", - "syscall.IFNAMSIZ": "syscall", - "syscall.IFT_1822": "syscall", - "syscall.IFT_A12MPPSWITCH": "syscall", - "syscall.IFT_AAL2": "syscall", - "syscall.IFT_AAL5": "syscall", - "syscall.IFT_ADSL": "syscall", - "syscall.IFT_AFLANE8023": "syscall", - "syscall.IFT_AFLANE8025": "syscall", - "syscall.IFT_ARAP": "syscall", - "syscall.IFT_ARCNET": "syscall", - "syscall.IFT_ARCNETPLUS": "syscall", - "syscall.IFT_ASYNC": "syscall", - "syscall.IFT_ATM": "syscall", - "syscall.IFT_ATMDXI": "syscall", - "syscall.IFT_ATMFUNI": "syscall", - "syscall.IFT_ATMIMA": "syscall", - "syscall.IFT_ATMLOGICAL": "syscall", - "syscall.IFT_ATMRADIO": "syscall", - "syscall.IFT_ATMSUBINTERFACE": "syscall", - "syscall.IFT_ATMVCIENDPT": "syscall", - "syscall.IFT_ATMVIRTUAL": "syscall", - "syscall.IFT_BGPPOLICYACCOUNTING": "syscall", - "syscall.IFT_BLUETOOTH": "syscall", - "syscall.IFT_BRIDGE": "syscall", - "syscall.IFT_BSC": "syscall", - "syscall.IFT_CARP": "syscall", - "syscall.IFT_CCTEMUL": "syscall", - "syscall.IFT_CELLULAR": "syscall", - "syscall.IFT_CEPT": "syscall", - "syscall.IFT_CES": "syscall", - "syscall.IFT_CHANNEL": "syscall", - "syscall.IFT_CNR": "syscall", - "syscall.IFT_COFFEE": "syscall", - "syscall.IFT_COMPOSITELINK": "syscall", - "syscall.IFT_DCN": "syscall", - "syscall.IFT_DIGITALPOWERLINE": "syscall", - "syscall.IFT_DIGITALWRAPPEROVERHEADCHANNEL": "syscall", - "syscall.IFT_DLSW": "syscall", - "syscall.IFT_DOCSCABLEDOWNSTREAM": "syscall", - "syscall.IFT_DOCSCABLEMACLAYER": "syscall", - "syscall.IFT_DOCSCABLEUPSTREAM": "syscall", - "syscall.IFT_DOCSCABLEUPSTREAMCHANNEL": "syscall", - "syscall.IFT_DS0": "syscall", - "syscall.IFT_DS0BUNDLE": "syscall", - "syscall.IFT_DS1FDL": "syscall", - "syscall.IFT_DS3": "syscall", - "syscall.IFT_DTM": "syscall", - "syscall.IFT_DUMMY": "syscall", - "syscall.IFT_DVBASILN": "syscall", - "syscall.IFT_DVBASIOUT": "syscall", - "syscall.IFT_DVBRCCDOWNSTREAM": "syscall", - "syscall.IFT_DVBRCCMACLAYER": "syscall", - "syscall.IFT_DVBRCCUPSTREAM": "syscall", - "syscall.IFT_ECONET": "syscall", - "syscall.IFT_ENC": "syscall", - "syscall.IFT_EON": "syscall", - "syscall.IFT_EPLRS": "syscall", - "syscall.IFT_ESCON": "syscall", - "syscall.IFT_ETHER": "syscall", - "syscall.IFT_FAITH": "syscall", - "syscall.IFT_FAST": "syscall", - "syscall.IFT_FASTETHER": "syscall", - "syscall.IFT_FASTETHERFX": "syscall", - "syscall.IFT_FDDI": "syscall", - "syscall.IFT_FIBRECHANNEL": "syscall", - "syscall.IFT_FRAMERELAYINTERCONNECT": "syscall", - "syscall.IFT_FRAMERELAYMPI": "syscall", - "syscall.IFT_FRDLCIENDPT": "syscall", - "syscall.IFT_FRELAY": "syscall", - "syscall.IFT_FRELAYDCE": "syscall", - "syscall.IFT_FRF16MFRBUNDLE": "syscall", - "syscall.IFT_FRFORWARD": "syscall", - "syscall.IFT_G703AT2MB": "syscall", - "syscall.IFT_G703AT64K": "syscall", - "syscall.IFT_GIF": "syscall", - "syscall.IFT_GIGABITETHERNET": "syscall", - "syscall.IFT_GR303IDT": "syscall", - "syscall.IFT_GR303RDT": "syscall", - "syscall.IFT_H323GATEKEEPER": "syscall", - "syscall.IFT_H323PROXY": "syscall", - "syscall.IFT_HDH1822": "syscall", - "syscall.IFT_HDLC": "syscall", - "syscall.IFT_HDSL2": "syscall", - "syscall.IFT_HIPERLAN2": "syscall", - "syscall.IFT_HIPPI": "syscall", - "syscall.IFT_HIPPIINTERFACE": "syscall", - "syscall.IFT_HOSTPAD": "syscall", - "syscall.IFT_HSSI": "syscall", - "syscall.IFT_HY": "syscall", - "syscall.IFT_IBM370PARCHAN": "syscall", - "syscall.IFT_IDSL": "syscall", - "syscall.IFT_IEEE1394": "syscall", - "syscall.IFT_IEEE80211": "syscall", - "syscall.IFT_IEEE80212": "syscall", - "syscall.IFT_IEEE8023ADLAG": "syscall", - "syscall.IFT_IFGSN": "syscall", - "syscall.IFT_IMT": "syscall", - "syscall.IFT_INFINIBAND": "syscall", - "syscall.IFT_INTERLEAVE": "syscall", - "syscall.IFT_IP": "syscall", - "syscall.IFT_IPFORWARD": "syscall", - "syscall.IFT_IPOVERATM": "syscall", - "syscall.IFT_IPOVERCDLC": "syscall", - "syscall.IFT_IPOVERCLAW": "syscall", - "syscall.IFT_IPSWITCH": "syscall", - "syscall.IFT_IPXIP": "syscall", - "syscall.IFT_ISDN": "syscall", - "syscall.IFT_ISDNBASIC": "syscall", - "syscall.IFT_ISDNPRIMARY": "syscall", - "syscall.IFT_ISDNS": "syscall", - "syscall.IFT_ISDNU": "syscall", - "syscall.IFT_ISO88022LLC": "syscall", - "syscall.IFT_ISO88023": "syscall", - "syscall.IFT_ISO88024": "syscall", - "syscall.IFT_ISO88025": "syscall", - "syscall.IFT_ISO88025CRFPINT": "syscall", - "syscall.IFT_ISO88025DTR": "syscall", - "syscall.IFT_ISO88025FIBER": "syscall", - "syscall.IFT_ISO88026": "syscall", - "syscall.IFT_ISUP": "syscall", - "syscall.IFT_L2VLAN": "syscall", - "syscall.IFT_L3IPVLAN": "syscall", - "syscall.IFT_L3IPXVLAN": "syscall", - "syscall.IFT_LAPB": "syscall", - "syscall.IFT_LAPD": "syscall", - "syscall.IFT_LAPF": "syscall", - "syscall.IFT_LINEGROUP": "syscall", - "syscall.IFT_LOCALTALK": "syscall", - "syscall.IFT_LOOP": "syscall", - "syscall.IFT_MEDIAMAILOVERIP": "syscall", - "syscall.IFT_MFSIGLINK": "syscall", - "syscall.IFT_MIOX25": "syscall", - "syscall.IFT_MODEM": "syscall", - "syscall.IFT_MPC": "syscall", - "syscall.IFT_MPLS": "syscall", - "syscall.IFT_MPLSTUNNEL": "syscall", - "syscall.IFT_MSDSL": "syscall", - "syscall.IFT_MVL": "syscall", - "syscall.IFT_MYRINET": "syscall", - "syscall.IFT_NFAS": "syscall", - "syscall.IFT_NSIP": "syscall", - "syscall.IFT_OPTICALCHANNEL": "syscall", - "syscall.IFT_OPTICALTRANSPORT": "syscall", - "syscall.IFT_OTHER": "syscall", - "syscall.IFT_P10": "syscall", - "syscall.IFT_P80": "syscall", - "syscall.IFT_PARA": "syscall", - "syscall.IFT_PDP": "syscall", - "syscall.IFT_PFLOG": "syscall", - "syscall.IFT_PFLOW": "syscall", - "syscall.IFT_PFSYNC": "syscall", - "syscall.IFT_PLC": "syscall", - "syscall.IFT_PON155": "syscall", - "syscall.IFT_PON622": "syscall", - "syscall.IFT_POS": "syscall", - "syscall.IFT_PPP": "syscall", - "syscall.IFT_PPPMULTILINKBUNDLE": "syscall", - "syscall.IFT_PROPATM": "syscall", - "syscall.IFT_PROPBWAP2MP": "syscall", - "syscall.IFT_PROPCNLS": "syscall", - "syscall.IFT_PROPDOCSWIRELESSDOWNSTREAM": "syscall", - "syscall.IFT_PROPDOCSWIRELESSMACLAYER": "syscall", - "syscall.IFT_PROPDOCSWIRELESSUPSTREAM": "syscall", - "syscall.IFT_PROPMUX": "syscall", - "syscall.IFT_PROPVIRTUAL": "syscall", - "syscall.IFT_PROPWIRELESSP2P": "syscall", - "syscall.IFT_PTPSERIAL": "syscall", - "syscall.IFT_PVC": "syscall", - "syscall.IFT_Q2931": "syscall", - "syscall.IFT_QLLC": "syscall", - "syscall.IFT_RADIOMAC": "syscall", - "syscall.IFT_RADSL": "syscall", - "syscall.IFT_REACHDSL": "syscall", - "syscall.IFT_RFC1483": "syscall", - "syscall.IFT_RS232": "syscall", - "syscall.IFT_RSRB": "syscall", - "syscall.IFT_SDLC": "syscall", - "syscall.IFT_SDSL": "syscall", - "syscall.IFT_SHDSL": "syscall", - "syscall.IFT_SIP": "syscall", - "syscall.IFT_SIPSIG": "syscall", - "syscall.IFT_SIPTG": "syscall", - "syscall.IFT_SLIP": "syscall", - "syscall.IFT_SMDSDXI": "syscall", - "syscall.IFT_SMDSICIP": "syscall", - "syscall.IFT_SONET": "syscall", - "syscall.IFT_SONETOVERHEADCHANNEL": "syscall", - "syscall.IFT_SONETPATH": "syscall", - "syscall.IFT_SONETVT": "syscall", - "syscall.IFT_SRP": "syscall", - "syscall.IFT_SS7SIGLINK": "syscall", - "syscall.IFT_STACKTOSTACK": "syscall", - "syscall.IFT_STARLAN": "syscall", - "syscall.IFT_STF": "syscall", - "syscall.IFT_T1": "syscall", - "syscall.IFT_TDLC": "syscall", - "syscall.IFT_TELINK": "syscall", - "syscall.IFT_TERMPAD": "syscall", - "syscall.IFT_TR008": "syscall", - "syscall.IFT_TRANSPHDLC": "syscall", - "syscall.IFT_TUNNEL": "syscall", - "syscall.IFT_ULTRA": "syscall", - "syscall.IFT_USB": "syscall", - "syscall.IFT_V11": "syscall", - "syscall.IFT_V35": "syscall", - "syscall.IFT_V36": "syscall", - "syscall.IFT_V37": "syscall", - "syscall.IFT_VDSL": "syscall", - "syscall.IFT_VIRTUALIPADDRESS": "syscall", - "syscall.IFT_VIRTUALTG": "syscall", - "syscall.IFT_VOICEDID": "syscall", - "syscall.IFT_VOICEEM": "syscall", - "syscall.IFT_VOICEEMFGD": "syscall", - "syscall.IFT_VOICEENCAP": "syscall", - "syscall.IFT_VOICEFGDEANA": "syscall", - "syscall.IFT_VOICEFXO": "syscall", - "syscall.IFT_VOICEFXS": "syscall", - "syscall.IFT_VOICEOVERATM": "syscall", - "syscall.IFT_VOICEOVERCABLE": "syscall", - "syscall.IFT_VOICEOVERFRAMERELAY": "syscall", - "syscall.IFT_VOICEOVERIP": "syscall", - "syscall.IFT_X213": "syscall", - "syscall.IFT_X25": "syscall", - "syscall.IFT_X25DDN": "syscall", - "syscall.IFT_X25HUNTGROUP": "syscall", - "syscall.IFT_X25MLP": "syscall", - "syscall.IFT_X25PLE": "syscall", - "syscall.IFT_XETHER": "syscall", - "syscall.IGNBRK": "syscall", - "syscall.IGNCR": "syscall", - "syscall.IGNORE": "syscall", - "syscall.IGNPAR": "syscall", - "syscall.IMAXBEL": "syscall", - "syscall.INFINITE": "syscall", - "syscall.INLCR": "syscall", - "syscall.INPCK": "syscall", - "syscall.INVALID_FILE_ATTRIBUTES": "syscall", - "syscall.IN_ACCESS": "syscall", - "syscall.IN_ALL_EVENTS": "syscall", - "syscall.IN_ATTRIB": "syscall", - "syscall.IN_CLASSA_HOST": "syscall", - "syscall.IN_CLASSA_MAX": "syscall", - "syscall.IN_CLASSA_NET": "syscall", - "syscall.IN_CLASSA_NSHIFT": "syscall", - "syscall.IN_CLASSB_HOST": "syscall", - "syscall.IN_CLASSB_MAX": "syscall", - "syscall.IN_CLASSB_NET": "syscall", - "syscall.IN_CLASSB_NSHIFT": "syscall", - "syscall.IN_CLASSC_HOST": "syscall", - "syscall.IN_CLASSC_NET": "syscall", - "syscall.IN_CLASSC_NSHIFT": "syscall", - "syscall.IN_CLASSD_HOST": "syscall", - "syscall.IN_CLASSD_NET": "syscall", - "syscall.IN_CLASSD_NSHIFT": "syscall", - "syscall.IN_CLOEXEC": "syscall", - "syscall.IN_CLOSE": "syscall", - "syscall.IN_CLOSE_NOWRITE": "syscall", - "syscall.IN_CLOSE_WRITE": "syscall", - "syscall.IN_CREATE": "syscall", - "syscall.IN_DELETE": "syscall", - "syscall.IN_DELETE_SELF": "syscall", - "syscall.IN_DONT_FOLLOW": "syscall", - "syscall.IN_EXCL_UNLINK": "syscall", - "syscall.IN_IGNORED": "syscall", - "syscall.IN_ISDIR": "syscall", - "syscall.IN_LINKLOCALNETNUM": "syscall", - "syscall.IN_LOOPBACKNET": "syscall", - "syscall.IN_MASK_ADD": "syscall", - "syscall.IN_MODIFY": "syscall", - "syscall.IN_MOVE": "syscall", - "syscall.IN_MOVED_FROM": "syscall", - "syscall.IN_MOVED_TO": "syscall", - "syscall.IN_MOVE_SELF": "syscall", - "syscall.IN_NONBLOCK": "syscall", - "syscall.IN_ONESHOT": "syscall", - "syscall.IN_ONLYDIR": "syscall", - "syscall.IN_OPEN": "syscall", - "syscall.IN_Q_OVERFLOW": "syscall", - "syscall.IN_RFC3021_HOST": "syscall", - "syscall.IN_RFC3021_MASK": "syscall", - "syscall.IN_RFC3021_NET": "syscall", - "syscall.IN_RFC3021_NSHIFT": "syscall", - "syscall.IN_UNMOUNT": "syscall", - "syscall.IOC_IN": "syscall", - "syscall.IOC_INOUT": "syscall", - "syscall.IOC_OUT": "syscall", - "syscall.IOC_VENDOR": "syscall", - "syscall.IOC_WS2": "syscall", - "syscall.IO_REPARSE_TAG_SYMLINK": "syscall", - "syscall.IPMreq": "syscall", - "syscall.IPMreqn": "syscall", - "syscall.IPPROTO_3PC": "syscall", - "syscall.IPPROTO_ADFS": "syscall", - "syscall.IPPROTO_AH": "syscall", - "syscall.IPPROTO_AHIP": "syscall", - "syscall.IPPROTO_APES": "syscall", - "syscall.IPPROTO_ARGUS": "syscall", - "syscall.IPPROTO_AX25": "syscall", - "syscall.IPPROTO_BHA": "syscall", - "syscall.IPPROTO_BLT": "syscall", - "syscall.IPPROTO_BRSATMON": "syscall", - "syscall.IPPROTO_CARP": "syscall", - "syscall.IPPROTO_CFTP": "syscall", - "syscall.IPPROTO_CHAOS": "syscall", - "syscall.IPPROTO_CMTP": "syscall", - "syscall.IPPROTO_COMP": "syscall", - "syscall.IPPROTO_CPHB": "syscall", - "syscall.IPPROTO_CPNX": "syscall", - "syscall.IPPROTO_DCCP": "syscall", - "syscall.IPPROTO_DDP": "syscall", - "syscall.IPPROTO_DGP": "syscall", - "syscall.IPPROTO_DIVERT": "syscall", - "syscall.IPPROTO_DIVERT_INIT": "syscall", - "syscall.IPPROTO_DIVERT_RESP": "syscall", - "syscall.IPPROTO_DONE": "syscall", - "syscall.IPPROTO_DSTOPTS": "syscall", - "syscall.IPPROTO_EGP": "syscall", - "syscall.IPPROTO_EMCON": "syscall", - "syscall.IPPROTO_ENCAP": "syscall", - "syscall.IPPROTO_EON": "syscall", - "syscall.IPPROTO_ESP": "syscall", - "syscall.IPPROTO_ETHERIP": "syscall", - "syscall.IPPROTO_FRAGMENT": "syscall", - "syscall.IPPROTO_GGP": "syscall", - "syscall.IPPROTO_GMTP": "syscall", - "syscall.IPPROTO_GRE": "syscall", - "syscall.IPPROTO_HELLO": "syscall", - "syscall.IPPROTO_HMP": "syscall", - "syscall.IPPROTO_HOPOPTS": "syscall", - "syscall.IPPROTO_ICMP": "syscall", - "syscall.IPPROTO_ICMPV6": "syscall", - "syscall.IPPROTO_IDP": "syscall", - "syscall.IPPROTO_IDPR": "syscall", - "syscall.IPPROTO_IDRP": "syscall", - "syscall.IPPROTO_IGMP": "syscall", - "syscall.IPPROTO_IGP": "syscall", - "syscall.IPPROTO_IGRP": "syscall", - "syscall.IPPROTO_IL": "syscall", - "syscall.IPPROTO_INLSP": "syscall", - "syscall.IPPROTO_INP": "syscall", - "syscall.IPPROTO_IP": "syscall", - "syscall.IPPROTO_IPCOMP": "syscall", - "syscall.IPPROTO_IPCV": "syscall", - "syscall.IPPROTO_IPEIP": "syscall", - "syscall.IPPROTO_IPIP": "syscall", - "syscall.IPPROTO_IPPC": "syscall", - "syscall.IPPROTO_IPV4": "syscall", - "syscall.IPPROTO_IPV6": "syscall", - "syscall.IPPROTO_IPV6_ICMP": "syscall", - "syscall.IPPROTO_IRTP": "syscall", - "syscall.IPPROTO_KRYPTOLAN": "syscall", - "syscall.IPPROTO_LARP": "syscall", - "syscall.IPPROTO_LEAF1": "syscall", - "syscall.IPPROTO_LEAF2": "syscall", - "syscall.IPPROTO_MAX": "syscall", - "syscall.IPPROTO_MAXID": "syscall", - "syscall.IPPROTO_MEAS": "syscall", - "syscall.IPPROTO_MH": "syscall", - "syscall.IPPROTO_MHRP": "syscall", - "syscall.IPPROTO_MICP": "syscall", - "syscall.IPPROTO_MOBILE": "syscall", - "syscall.IPPROTO_MPLS": "syscall", - "syscall.IPPROTO_MTP": "syscall", - "syscall.IPPROTO_MUX": "syscall", - "syscall.IPPROTO_ND": "syscall", - "syscall.IPPROTO_NHRP": "syscall", - "syscall.IPPROTO_NONE": "syscall", - "syscall.IPPROTO_NSP": "syscall", - "syscall.IPPROTO_NVPII": "syscall", - "syscall.IPPROTO_OLD_DIVERT": "syscall", - "syscall.IPPROTO_OSPFIGP": "syscall", - "syscall.IPPROTO_PFSYNC": "syscall", - "syscall.IPPROTO_PGM": "syscall", - "syscall.IPPROTO_PIGP": "syscall", - "syscall.IPPROTO_PIM": "syscall", - "syscall.IPPROTO_PRM": "syscall", - "syscall.IPPROTO_PUP": "syscall", - "syscall.IPPROTO_PVP": "syscall", - "syscall.IPPROTO_RAW": "syscall", - "syscall.IPPROTO_RCCMON": "syscall", - "syscall.IPPROTO_RDP": "syscall", - "syscall.IPPROTO_ROUTING": "syscall", - "syscall.IPPROTO_RSVP": "syscall", - "syscall.IPPROTO_RVD": "syscall", - "syscall.IPPROTO_SATEXPAK": "syscall", - "syscall.IPPROTO_SATMON": "syscall", - "syscall.IPPROTO_SCCSP": "syscall", - "syscall.IPPROTO_SCTP": "syscall", - "syscall.IPPROTO_SDRP": "syscall", - "syscall.IPPROTO_SEND": "syscall", - "syscall.IPPROTO_SEP": "syscall", - "syscall.IPPROTO_SKIP": "syscall", - "syscall.IPPROTO_SPACER": "syscall", - "syscall.IPPROTO_SRPC": "syscall", - "syscall.IPPROTO_ST": "syscall", - "syscall.IPPROTO_SVMTP": "syscall", - "syscall.IPPROTO_SWIPE": "syscall", - "syscall.IPPROTO_TCF": "syscall", - "syscall.IPPROTO_TCP": "syscall", - "syscall.IPPROTO_TLSP": "syscall", - "syscall.IPPROTO_TP": "syscall", - "syscall.IPPROTO_TPXX": "syscall", - "syscall.IPPROTO_TRUNK1": "syscall", - "syscall.IPPROTO_TRUNK2": "syscall", - "syscall.IPPROTO_TTP": "syscall", - "syscall.IPPROTO_UDP": "syscall", - "syscall.IPPROTO_UDPLITE": "syscall", - "syscall.IPPROTO_VINES": "syscall", - "syscall.IPPROTO_VISA": "syscall", - "syscall.IPPROTO_VMTP": "syscall", - "syscall.IPPROTO_VRRP": "syscall", - "syscall.IPPROTO_WBEXPAK": "syscall", - "syscall.IPPROTO_WBMON": "syscall", - "syscall.IPPROTO_WSN": "syscall", - "syscall.IPPROTO_XNET": "syscall", - "syscall.IPPROTO_XTP": "syscall", - "syscall.IPV6_2292DSTOPTS": "syscall", - "syscall.IPV6_2292HOPLIMIT": "syscall", - "syscall.IPV6_2292HOPOPTS": "syscall", - "syscall.IPV6_2292NEXTHOP": "syscall", - "syscall.IPV6_2292PKTINFO": "syscall", - "syscall.IPV6_2292PKTOPTIONS": "syscall", - "syscall.IPV6_2292RTHDR": "syscall", - "syscall.IPV6_ADDRFORM": "syscall", - "syscall.IPV6_ADD_MEMBERSHIP": "syscall", - "syscall.IPV6_AUTHHDR": "syscall", - "syscall.IPV6_AUTH_LEVEL": "syscall", - "syscall.IPV6_AUTOFLOWLABEL": "syscall", - "syscall.IPV6_BINDANY": "syscall", - "syscall.IPV6_BINDV6ONLY": "syscall", - "syscall.IPV6_BOUND_IF": "syscall", - "syscall.IPV6_CHECKSUM": "syscall", - "syscall.IPV6_DEFAULT_MULTICAST_HOPS": "syscall", - "syscall.IPV6_DEFAULT_MULTICAST_LOOP": "syscall", - "syscall.IPV6_DEFHLIM": "syscall", - "syscall.IPV6_DONTFRAG": "syscall", - "syscall.IPV6_DROP_MEMBERSHIP": "syscall", - "syscall.IPV6_DSTOPTS": "syscall", - "syscall.IPV6_ESP_NETWORK_LEVEL": "syscall", - "syscall.IPV6_ESP_TRANS_LEVEL": "syscall", - "syscall.IPV6_FAITH": "syscall", - "syscall.IPV6_FLOWINFO_MASK": "syscall", - "syscall.IPV6_FLOWLABEL_MASK": "syscall", - "syscall.IPV6_FRAGTTL": "syscall", - "syscall.IPV6_FW_ADD": "syscall", - "syscall.IPV6_FW_DEL": "syscall", - "syscall.IPV6_FW_FLUSH": "syscall", - "syscall.IPV6_FW_GET": "syscall", - "syscall.IPV6_FW_ZERO": "syscall", - "syscall.IPV6_HLIMDEC": "syscall", - "syscall.IPV6_HOPLIMIT": "syscall", - "syscall.IPV6_HOPOPTS": "syscall", - "syscall.IPV6_IPCOMP_LEVEL": "syscall", - "syscall.IPV6_IPSEC_POLICY": "syscall", - "syscall.IPV6_JOIN_ANYCAST": "syscall", - "syscall.IPV6_JOIN_GROUP": "syscall", - "syscall.IPV6_LEAVE_ANYCAST": "syscall", - "syscall.IPV6_LEAVE_GROUP": "syscall", - "syscall.IPV6_MAXHLIM": "syscall", - "syscall.IPV6_MAXOPTHDR": "syscall", - "syscall.IPV6_MAXPACKET": "syscall", - "syscall.IPV6_MAX_GROUP_SRC_FILTER": "syscall", - "syscall.IPV6_MAX_MEMBERSHIPS": "syscall", - "syscall.IPV6_MAX_SOCK_SRC_FILTER": "syscall", - "syscall.IPV6_MIN_MEMBERSHIPS": "syscall", - "syscall.IPV6_MMTU": "syscall", - "syscall.IPV6_MSFILTER": "syscall", - "syscall.IPV6_MTU": "syscall", - "syscall.IPV6_MTU_DISCOVER": "syscall", - "syscall.IPV6_MULTICAST_HOPS": "syscall", - "syscall.IPV6_MULTICAST_IF": "syscall", - "syscall.IPV6_MULTICAST_LOOP": "syscall", - "syscall.IPV6_NEXTHOP": "syscall", - "syscall.IPV6_OPTIONS": "syscall", - "syscall.IPV6_PATHMTU": "syscall", - "syscall.IPV6_PIPEX": "syscall", - "syscall.IPV6_PKTINFO": "syscall", - "syscall.IPV6_PMTUDISC_DO": "syscall", - "syscall.IPV6_PMTUDISC_DONT": "syscall", - "syscall.IPV6_PMTUDISC_PROBE": "syscall", - "syscall.IPV6_PMTUDISC_WANT": "syscall", - "syscall.IPV6_PORTRANGE": "syscall", - "syscall.IPV6_PORTRANGE_DEFAULT": "syscall", - "syscall.IPV6_PORTRANGE_HIGH": "syscall", - "syscall.IPV6_PORTRANGE_LOW": "syscall", - "syscall.IPV6_PREFER_TEMPADDR": "syscall", - "syscall.IPV6_RECVDSTOPTS": "syscall", - "syscall.IPV6_RECVDSTPORT": "syscall", - "syscall.IPV6_RECVERR": "syscall", - "syscall.IPV6_RECVHOPLIMIT": "syscall", - "syscall.IPV6_RECVHOPOPTS": "syscall", - "syscall.IPV6_RECVPATHMTU": "syscall", - "syscall.IPV6_RECVPKTINFO": "syscall", - "syscall.IPV6_RECVRTHDR": "syscall", - "syscall.IPV6_RECVTCLASS": "syscall", - "syscall.IPV6_ROUTER_ALERT": "syscall", - "syscall.IPV6_RTABLE": "syscall", - "syscall.IPV6_RTHDR": "syscall", - "syscall.IPV6_RTHDRDSTOPTS": "syscall", - "syscall.IPV6_RTHDR_LOOSE": "syscall", - "syscall.IPV6_RTHDR_STRICT": "syscall", - "syscall.IPV6_RTHDR_TYPE_0": "syscall", - "syscall.IPV6_RXDSTOPTS": "syscall", - "syscall.IPV6_RXHOPOPTS": "syscall", - "syscall.IPV6_SOCKOPT_RESERVED1": "syscall", - "syscall.IPV6_TCLASS": "syscall", - "syscall.IPV6_UNICAST_HOPS": "syscall", - "syscall.IPV6_USE_MIN_MTU": "syscall", - "syscall.IPV6_V6ONLY": "syscall", - "syscall.IPV6_VERSION": "syscall", - "syscall.IPV6_VERSION_MASK": "syscall", - "syscall.IPV6_XFRM_POLICY": "syscall", - "syscall.IP_ADD_MEMBERSHIP": "syscall", - "syscall.IP_ADD_SOURCE_MEMBERSHIP": "syscall", - "syscall.IP_AUTH_LEVEL": "syscall", - "syscall.IP_BINDANY": "syscall", - "syscall.IP_BLOCK_SOURCE": "syscall", - "syscall.IP_BOUND_IF": "syscall", - "syscall.IP_DEFAULT_MULTICAST_LOOP": "syscall", - "syscall.IP_DEFAULT_MULTICAST_TTL": "syscall", - "syscall.IP_DF": "syscall", - "syscall.IP_DIVERTFL": "syscall", - "syscall.IP_DONTFRAG": "syscall", - "syscall.IP_DROP_MEMBERSHIP": "syscall", - "syscall.IP_DROP_SOURCE_MEMBERSHIP": "syscall", - "syscall.IP_DUMMYNET3": "syscall", - "syscall.IP_DUMMYNET_CONFIGURE": "syscall", - "syscall.IP_DUMMYNET_DEL": "syscall", - "syscall.IP_DUMMYNET_FLUSH": "syscall", - "syscall.IP_DUMMYNET_GET": "syscall", - "syscall.IP_EF": "syscall", - "syscall.IP_ERRORMTU": "syscall", - "syscall.IP_ESP_NETWORK_LEVEL": "syscall", - "syscall.IP_ESP_TRANS_LEVEL": "syscall", - "syscall.IP_FAITH": "syscall", - "syscall.IP_FREEBIND": "syscall", - "syscall.IP_FW3": "syscall", - "syscall.IP_FW_ADD": "syscall", - "syscall.IP_FW_DEL": "syscall", - "syscall.IP_FW_FLUSH": "syscall", - "syscall.IP_FW_GET": "syscall", - "syscall.IP_FW_NAT_CFG": "syscall", - "syscall.IP_FW_NAT_DEL": "syscall", - "syscall.IP_FW_NAT_GET_CONFIG": "syscall", - "syscall.IP_FW_NAT_GET_LOG": "syscall", - "syscall.IP_FW_RESETLOG": "syscall", - "syscall.IP_FW_TABLE_ADD": "syscall", - "syscall.IP_FW_TABLE_DEL": "syscall", - "syscall.IP_FW_TABLE_FLUSH": "syscall", - "syscall.IP_FW_TABLE_GETSIZE": "syscall", - "syscall.IP_FW_TABLE_LIST": "syscall", - "syscall.IP_FW_ZERO": "syscall", - "syscall.IP_HDRINCL": "syscall", - "syscall.IP_IPCOMP_LEVEL": "syscall", - "syscall.IP_IPSECFLOWINFO": "syscall", - "syscall.IP_IPSEC_LOCAL_AUTH": "syscall", - "syscall.IP_IPSEC_LOCAL_CRED": "syscall", - "syscall.IP_IPSEC_LOCAL_ID": "syscall", - "syscall.IP_IPSEC_POLICY": "syscall", - "syscall.IP_IPSEC_REMOTE_AUTH": "syscall", - "syscall.IP_IPSEC_REMOTE_CRED": "syscall", - "syscall.IP_IPSEC_REMOTE_ID": "syscall", - "syscall.IP_MAXPACKET": "syscall", - "syscall.IP_MAX_GROUP_SRC_FILTER": "syscall", - "syscall.IP_MAX_MEMBERSHIPS": "syscall", - "syscall.IP_MAX_SOCK_MUTE_FILTER": "syscall", - "syscall.IP_MAX_SOCK_SRC_FILTER": "syscall", - "syscall.IP_MAX_SOURCE_FILTER": "syscall", - "syscall.IP_MF": "syscall", - "syscall.IP_MINFRAGSIZE": "syscall", - "syscall.IP_MINTTL": "syscall", - "syscall.IP_MIN_MEMBERSHIPS": "syscall", - "syscall.IP_MSFILTER": "syscall", - "syscall.IP_MSS": "syscall", - "syscall.IP_MTU": "syscall", - "syscall.IP_MTU_DISCOVER": "syscall", - "syscall.IP_MULTICAST_IF": "syscall", - "syscall.IP_MULTICAST_IFINDEX": "syscall", - "syscall.IP_MULTICAST_LOOP": "syscall", - "syscall.IP_MULTICAST_TTL": "syscall", - "syscall.IP_MULTICAST_VIF": "syscall", - "syscall.IP_NAT__XXX": "syscall", - "syscall.IP_OFFMASK": "syscall", - "syscall.IP_OLD_FW_ADD": "syscall", - "syscall.IP_OLD_FW_DEL": "syscall", - "syscall.IP_OLD_FW_FLUSH": "syscall", - "syscall.IP_OLD_FW_GET": "syscall", - "syscall.IP_OLD_FW_RESETLOG": "syscall", - "syscall.IP_OLD_FW_ZERO": "syscall", - "syscall.IP_ONESBCAST": "syscall", - "syscall.IP_OPTIONS": "syscall", - "syscall.IP_ORIGDSTADDR": "syscall", - "syscall.IP_PASSSEC": "syscall", - "syscall.IP_PIPEX": "syscall", - "syscall.IP_PKTINFO": "syscall", - "syscall.IP_PKTOPTIONS": "syscall", - "syscall.IP_PMTUDISC": "syscall", - "syscall.IP_PMTUDISC_DO": "syscall", - "syscall.IP_PMTUDISC_DONT": "syscall", - "syscall.IP_PMTUDISC_PROBE": "syscall", - "syscall.IP_PMTUDISC_WANT": "syscall", - "syscall.IP_PORTRANGE": "syscall", - "syscall.IP_PORTRANGE_DEFAULT": "syscall", - "syscall.IP_PORTRANGE_HIGH": "syscall", - "syscall.IP_PORTRANGE_LOW": "syscall", - "syscall.IP_RECVDSTADDR": "syscall", - "syscall.IP_RECVDSTPORT": "syscall", - "syscall.IP_RECVERR": "syscall", - "syscall.IP_RECVIF": "syscall", - "syscall.IP_RECVOPTS": "syscall", - "syscall.IP_RECVORIGDSTADDR": "syscall", - "syscall.IP_RECVPKTINFO": "syscall", - "syscall.IP_RECVRETOPTS": "syscall", - "syscall.IP_RECVRTABLE": "syscall", - "syscall.IP_RECVTOS": "syscall", - "syscall.IP_RECVTTL": "syscall", - "syscall.IP_RETOPTS": "syscall", - "syscall.IP_RF": "syscall", - "syscall.IP_ROUTER_ALERT": "syscall", - "syscall.IP_RSVP_OFF": "syscall", - "syscall.IP_RSVP_ON": "syscall", - "syscall.IP_RSVP_VIF_OFF": "syscall", - "syscall.IP_RSVP_VIF_ON": "syscall", - "syscall.IP_RTABLE": "syscall", - "syscall.IP_SENDSRCADDR": "syscall", - "syscall.IP_STRIPHDR": "syscall", - "syscall.IP_TOS": "syscall", - "syscall.IP_TRAFFIC_MGT_BACKGROUND": "syscall", - "syscall.IP_TRANSPARENT": "syscall", - "syscall.IP_TTL": "syscall", - "syscall.IP_UNBLOCK_SOURCE": "syscall", - "syscall.IP_XFRM_POLICY": "syscall", - "syscall.IPv6MTUInfo": "syscall", - "syscall.IPv6Mreq": "syscall", - "syscall.ISIG": "syscall", - "syscall.ISTRIP": "syscall", - "syscall.IUCLC": "syscall", - "syscall.IUTF8": "syscall", - "syscall.IXANY": "syscall", - "syscall.IXOFF": "syscall", - "syscall.IXON": "syscall", - "syscall.IfAddrmsg": "syscall", - "syscall.IfAnnounceMsghdr": "syscall", - "syscall.IfData": "syscall", - "syscall.IfInfomsg": "syscall", - "syscall.IfMsghdr": "syscall", - "syscall.IfaMsghdr": "syscall", - "syscall.IfmaMsghdr": "syscall", - "syscall.IfmaMsghdr2": "syscall", - "syscall.ImplementsGetwd": "syscall", - "syscall.Inet4Pktinfo": "syscall", - "syscall.Inet6Pktinfo": "syscall", - "syscall.InotifyAddWatch": "syscall", - "syscall.InotifyEvent": "syscall", - "syscall.InotifyInit": "syscall", - "syscall.InotifyInit1": "syscall", - "syscall.InotifyRmWatch": "syscall", - "syscall.InterfaceAddrMessage": "syscall", - "syscall.InterfaceAnnounceMessage": "syscall", - "syscall.InterfaceInfo": "syscall", - "syscall.InterfaceMessage": "syscall", - "syscall.InterfaceMulticastAddrMessage": "syscall", - "syscall.InvalidHandle": "syscall", - "syscall.Ioperm": "syscall", - "syscall.Iopl": "syscall", - "syscall.Iovec": "syscall", - "syscall.IpAdapterInfo": "syscall", - "syscall.IpAddrString": "syscall", - "syscall.IpAddressString": "syscall", - "syscall.IpMaskString": "syscall", - "syscall.Issetugid": "syscall", - "syscall.KEY_ALL_ACCESS": "syscall", - "syscall.KEY_CREATE_LINK": "syscall", - "syscall.KEY_CREATE_SUB_KEY": "syscall", - "syscall.KEY_ENUMERATE_SUB_KEYS": "syscall", - "syscall.KEY_EXECUTE": "syscall", - "syscall.KEY_NOTIFY": "syscall", - "syscall.KEY_QUERY_VALUE": "syscall", - "syscall.KEY_READ": "syscall", - "syscall.KEY_SET_VALUE": "syscall", - "syscall.KEY_WOW64_32KEY": "syscall", - "syscall.KEY_WOW64_64KEY": "syscall", - "syscall.KEY_WRITE": "syscall", - "syscall.Kevent": "syscall", - "syscall.Kevent_t": "syscall", - "syscall.Kill": "syscall", - "syscall.Klogctl": "syscall", - "syscall.Kqueue": "syscall", - "syscall.LANG_ENGLISH": "syscall", - "syscall.LAYERED_PROTOCOL": "syscall", - "syscall.LCNT_OVERLOAD_FLUSH": "syscall", - "syscall.LINUX_REBOOT_CMD_CAD_OFF": "syscall", - "syscall.LINUX_REBOOT_CMD_CAD_ON": "syscall", - "syscall.LINUX_REBOOT_CMD_HALT": "syscall", - "syscall.LINUX_REBOOT_CMD_KEXEC": "syscall", - "syscall.LINUX_REBOOT_CMD_POWER_OFF": "syscall", - "syscall.LINUX_REBOOT_CMD_RESTART": "syscall", - "syscall.LINUX_REBOOT_CMD_RESTART2": "syscall", - "syscall.LINUX_REBOOT_CMD_SW_SUSPEND": "syscall", - "syscall.LINUX_REBOOT_MAGIC1": "syscall", - "syscall.LINUX_REBOOT_MAGIC2": "syscall", - "syscall.LOCK_EX": "syscall", - "syscall.LOCK_NB": "syscall", - "syscall.LOCK_SH": "syscall", - "syscall.LOCK_UN": "syscall", - "syscall.LazyDLL": "syscall", - "syscall.LazyProc": "syscall", - "syscall.Lchown": "syscall", - "syscall.Linger": "syscall", - "syscall.Link": "syscall", - "syscall.Listen": "syscall", - "syscall.Listxattr": "syscall", - "syscall.LoadCancelIoEx": "syscall", - "syscall.LoadConnectEx": "syscall", - "syscall.LoadCreateSymbolicLink": "syscall", - "syscall.LoadDLL": "syscall", - "syscall.LoadGetAddrInfo": "syscall", - "syscall.LoadLibrary": "syscall", - "syscall.LoadSetFileCompletionNotificationModes": "syscall", - "syscall.LocalFree": "syscall", - "syscall.Log2phys_t": "syscall", - "syscall.LookupAccountName": "syscall", - "syscall.LookupAccountSid": "syscall", - "syscall.LookupSID": "syscall", - "syscall.LsfJump": "syscall", - "syscall.LsfSocket": "syscall", - "syscall.LsfStmt": "syscall", - "syscall.Lstat": "syscall", - "syscall.MADV_AUTOSYNC": "syscall", - "syscall.MADV_CAN_REUSE": "syscall", - "syscall.MADV_CORE": "syscall", - "syscall.MADV_DOFORK": "syscall", - "syscall.MADV_DONTFORK": "syscall", - "syscall.MADV_DONTNEED": "syscall", - "syscall.MADV_FREE": "syscall", - "syscall.MADV_FREE_REUSABLE": "syscall", - "syscall.MADV_FREE_REUSE": "syscall", - "syscall.MADV_HUGEPAGE": "syscall", - "syscall.MADV_HWPOISON": "syscall", - "syscall.MADV_MERGEABLE": "syscall", - "syscall.MADV_NOCORE": "syscall", - "syscall.MADV_NOHUGEPAGE": "syscall", - "syscall.MADV_NORMAL": "syscall", - "syscall.MADV_NOSYNC": "syscall", - "syscall.MADV_PROTECT": "syscall", - "syscall.MADV_RANDOM": "syscall", - "syscall.MADV_REMOVE": "syscall", - "syscall.MADV_SEQUENTIAL": "syscall", - "syscall.MADV_SPACEAVAIL": "syscall", - "syscall.MADV_UNMERGEABLE": "syscall", - "syscall.MADV_WILLNEED": "syscall", - "syscall.MADV_ZERO_WIRED_PAGES": "syscall", - "syscall.MAP_32BIT": "syscall", - "syscall.MAP_ALIGNED_SUPER": "syscall", - "syscall.MAP_ALIGNMENT_16MB": "syscall", - "syscall.MAP_ALIGNMENT_1TB": "syscall", - "syscall.MAP_ALIGNMENT_256TB": "syscall", - "syscall.MAP_ALIGNMENT_4GB": "syscall", - "syscall.MAP_ALIGNMENT_64KB": "syscall", - "syscall.MAP_ALIGNMENT_64PB": "syscall", - "syscall.MAP_ALIGNMENT_MASK": "syscall", - "syscall.MAP_ALIGNMENT_SHIFT": "syscall", - "syscall.MAP_ANON": "syscall", - "syscall.MAP_ANONYMOUS": "syscall", - "syscall.MAP_COPY": "syscall", - "syscall.MAP_DENYWRITE": "syscall", - "syscall.MAP_EXECUTABLE": "syscall", - "syscall.MAP_FILE": "syscall", - "syscall.MAP_FIXED": "syscall", - "syscall.MAP_FLAGMASK": "syscall", - "syscall.MAP_GROWSDOWN": "syscall", - "syscall.MAP_HASSEMAPHORE": "syscall", - "syscall.MAP_HUGETLB": "syscall", - "syscall.MAP_INHERIT": "syscall", - "syscall.MAP_INHERIT_COPY": "syscall", - "syscall.MAP_INHERIT_DEFAULT": "syscall", - "syscall.MAP_INHERIT_DONATE_COPY": "syscall", - "syscall.MAP_INHERIT_NONE": "syscall", - "syscall.MAP_INHERIT_SHARE": "syscall", - "syscall.MAP_JIT": "syscall", - "syscall.MAP_LOCKED": "syscall", - "syscall.MAP_NOCACHE": "syscall", - "syscall.MAP_NOCORE": "syscall", - "syscall.MAP_NOEXTEND": "syscall", - "syscall.MAP_NONBLOCK": "syscall", - "syscall.MAP_NORESERVE": "syscall", - "syscall.MAP_NOSYNC": "syscall", - "syscall.MAP_POPULATE": "syscall", - "syscall.MAP_PREFAULT_READ": "syscall", - "syscall.MAP_PRIVATE": "syscall", - "syscall.MAP_RENAME": "syscall", - "syscall.MAP_RESERVED0080": "syscall", - "syscall.MAP_RESERVED0100": "syscall", - "syscall.MAP_SHARED": "syscall", - "syscall.MAP_STACK": "syscall", - "syscall.MAP_TRYFIXED": "syscall", - "syscall.MAP_TYPE": "syscall", - "syscall.MAP_WIRED": "syscall", - "syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE": "syscall", - "syscall.MAXLEN_IFDESCR": "syscall", - "syscall.MAXLEN_PHYSADDR": "syscall", - "syscall.MAX_ADAPTER_ADDRESS_LENGTH": "syscall", - "syscall.MAX_ADAPTER_DESCRIPTION_LENGTH": "syscall", - "syscall.MAX_ADAPTER_NAME_LENGTH": "syscall", - "syscall.MAX_COMPUTERNAME_LENGTH": "syscall", - "syscall.MAX_INTERFACE_NAME_LEN": "syscall", - "syscall.MAX_LONG_PATH": "syscall", - "syscall.MAX_PATH": "syscall", - "syscall.MAX_PROTOCOL_CHAIN": "syscall", - "syscall.MCL_CURRENT": "syscall", - "syscall.MCL_FUTURE": "syscall", - "syscall.MNT_DETACH": "syscall", - "syscall.MNT_EXPIRE": "syscall", - "syscall.MNT_FORCE": "syscall", - "syscall.MSG_BCAST": "syscall", - "syscall.MSG_CMSG_CLOEXEC": "syscall", - "syscall.MSG_COMPAT": "syscall", - "syscall.MSG_CONFIRM": "syscall", - "syscall.MSG_CONTROLMBUF": "syscall", - "syscall.MSG_CTRUNC": "syscall", - "syscall.MSG_DONTROUTE": "syscall", - "syscall.MSG_DONTWAIT": "syscall", - "syscall.MSG_EOF": "syscall", - "syscall.MSG_EOR": "syscall", - "syscall.MSG_ERRQUEUE": "syscall", - "syscall.MSG_FASTOPEN": "syscall", - "syscall.MSG_FIN": "syscall", - "syscall.MSG_FLUSH": "syscall", - "syscall.MSG_HAVEMORE": "syscall", - "syscall.MSG_HOLD": "syscall", - "syscall.MSG_IOVUSRSPACE": "syscall", - "syscall.MSG_LENUSRSPACE": "syscall", - "syscall.MSG_MCAST": "syscall", - "syscall.MSG_MORE": "syscall", - "syscall.MSG_NAMEMBUF": "syscall", - "syscall.MSG_NBIO": "syscall", - "syscall.MSG_NEEDSA": "syscall", - "syscall.MSG_NOSIGNAL": "syscall", - "syscall.MSG_NOTIFICATION": "syscall", - "syscall.MSG_OOB": "syscall", - "syscall.MSG_PEEK": "syscall", - "syscall.MSG_PROXY": "syscall", - "syscall.MSG_RCVMORE": "syscall", - "syscall.MSG_RST": "syscall", - "syscall.MSG_SEND": "syscall", - "syscall.MSG_SYN": "syscall", - "syscall.MSG_TRUNC": "syscall", - "syscall.MSG_TRYHARD": "syscall", - "syscall.MSG_USERFLAGS": "syscall", - "syscall.MSG_WAITALL": "syscall", - "syscall.MSG_WAITFORONE": "syscall", - "syscall.MSG_WAITSTREAM": "syscall", - "syscall.MS_ACTIVE": "syscall", - "syscall.MS_ASYNC": "syscall", - "syscall.MS_BIND": "syscall", - "syscall.MS_DEACTIVATE": "syscall", - "syscall.MS_DIRSYNC": "syscall", - "syscall.MS_INVALIDATE": "syscall", - "syscall.MS_I_VERSION": "syscall", - "syscall.MS_KERNMOUNT": "syscall", - "syscall.MS_KILLPAGES": "syscall", - "syscall.MS_MANDLOCK": "syscall", - "syscall.MS_MGC_MSK": "syscall", - "syscall.MS_MGC_VAL": "syscall", - "syscall.MS_MOVE": "syscall", - "syscall.MS_NOATIME": "syscall", - "syscall.MS_NODEV": "syscall", - "syscall.MS_NODIRATIME": "syscall", - "syscall.MS_NOEXEC": "syscall", - "syscall.MS_NOSUID": "syscall", - "syscall.MS_NOUSER": "syscall", - "syscall.MS_POSIXACL": "syscall", - "syscall.MS_PRIVATE": "syscall", - "syscall.MS_RDONLY": "syscall", - "syscall.MS_REC": "syscall", - "syscall.MS_RELATIME": "syscall", - "syscall.MS_REMOUNT": "syscall", - "syscall.MS_RMT_MASK": "syscall", - "syscall.MS_SHARED": "syscall", - "syscall.MS_SILENT": "syscall", - "syscall.MS_SLAVE": "syscall", - "syscall.MS_STRICTATIME": "syscall", - "syscall.MS_SYNC": "syscall", - "syscall.MS_SYNCHRONOUS": "syscall", - "syscall.MS_UNBINDABLE": "syscall", - "syscall.Madvise": "syscall", - "syscall.MapViewOfFile": "syscall", - "syscall.MaxTokenInfoClass": "syscall", - "syscall.Mclpool": "syscall", - "syscall.MibIfRow": "syscall", - "syscall.Mkdir": "syscall", - "syscall.Mkdirat": "syscall", - "syscall.Mkfifo": "syscall", - "syscall.Mknod": "syscall", - "syscall.Mknodat": "syscall", - "syscall.Mlock": "syscall", - "syscall.Mlockall": "syscall", - "syscall.Mmap": "syscall", - "syscall.Mount": "syscall", - "syscall.MoveFile": "syscall", - "syscall.Mprotect": "syscall", - "syscall.Msghdr": "syscall", - "syscall.Munlock": "syscall", - "syscall.Munlockall": "syscall", - "syscall.Munmap": "syscall", - "syscall.MustLoadDLL": "syscall", - "syscall.NAME_MAX": "syscall", - "syscall.NETLINK_ADD_MEMBERSHIP": "syscall", - "syscall.NETLINK_AUDIT": "syscall", - "syscall.NETLINK_BROADCAST_ERROR": "syscall", - "syscall.NETLINK_CONNECTOR": "syscall", - "syscall.NETLINK_DNRTMSG": "syscall", - "syscall.NETLINK_DROP_MEMBERSHIP": "syscall", - "syscall.NETLINK_ECRYPTFS": "syscall", - "syscall.NETLINK_FIB_LOOKUP": "syscall", - "syscall.NETLINK_FIREWALL": "syscall", - "syscall.NETLINK_GENERIC": "syscall", - "syscall.NETLINK_INET_DIAG": "syscall", - "syscall.NETLINK_IP6_FW": "syscall", - "syscall.NETLINK_ISCSI": "syscall", - "syscall.NETLINK_KOBJECT_UEVENT": "syscall", - "syscall.NETLINK_NETFILTER": "syscall", - "syscall.NETLINK_NFLOG": "syscall", - "syscall.NETLINK_NO_ENOBUFS": "syscall", - "syscall.NETLINK_PKTINFO": "syscall", - "syscall.NETLINK_RDMA": "syscall", - "syscall.NETLINK_ROUTE": "syscall", - "syscall.NETLINK_SCSITRANSPORT": "syscall", - "syscall.NETLINK_SELINUX": "syscall", - "syscall.NETLINK_UNUSED": "syscall", - "syscall.NETLINK_USERSOCK": "syscall", - "syscall.NETLINK_XFRM": "syscall", - "syscall.NET_RT_DUMP": "syscall", - "syscall.NET_RT_DUMP2": "syscall", - "syscall.NET_RT_FLAGS": "syscall", - "syscall.NET_RT_IFLIST": "syscall", - "syscall.NET_RT_IFLIST2": "syscall", - "syscall.NET_RT_IFLISTL": "syscall", - "syscall.NET_RT_IFMALIST": "syscall", - "syscall.NET_RT_MAXID": "syscall", - "syscall.NET_RT_OIFLIST": "syscall", - "syscall.NET_RT_OOIFLIST": "syscall", - "syscall.NET_RT_STAT": "syscall", - "syscall.NET_RT_STATS": "syscall", - "syscall.NET_RT_TABLE": "syscall", - "syscall.NET_RT_TRASH": "syscall", - "syscall.NLA_ALIGNTO": "syscall", - "syscall.NLA_F_NESTED": "syscall", - "syscall.NLA_F_NET_BYTEORDER": "syscall", - "syscall.NLA_HDRLEN": "syscall", - "syscall.NLMSG_ALIGNTO": "syscall", - "syscall.NLMSG_DONE": "syscall", - "syscall.NLMSG_ERROR": "syscall", - "syscall.NLMSG_HDRLEN": "syscall", - "syscall.NLMSG_MIN_TYPE": "syscall", - "syscall.NLMSG_NOOP": "syscall", - "syscall.NLMSG_OVERRUN": "syscall", - "syscall.NLM_F_ACK": "syscall", - "syscall.NLM_F_APPEND": "syscall", - "syscall.NLM_F_ATOMIC": "syscall", - "syscall.NLM_F_CREATE": "syscall", - "syscall.NLM_F_DUMP": "syscall", - "syscall.NLM_F_ECHO": "syscall", - "syscall.NLM_F_EXCL": "syscall", - "syscall.NLM_F_MATCH": "syscall", - "syscall.NLM_F_MULTI": "syscall", - "syscall.NLM_F_REPLACE": "syscall", - "syscall.NLM_F_REQUEST": "syscall", - "syscall.NLM_F_ROOT": "syscall", - "syscall.NOFLSH": "syscall", - "syscall.NOTE_ABSOLUTE": "syscall", - "syscall.NOTE_ATTRIB": "syscall", - "syscall.NOTE_CHILD": "syscall", - "syscall.NOTE_DELETE": "syscall", - "syscall.NOTE_EOF": "syscall", - "syscall.NOTE_EXEC": "syscall", - "syscall.NOTE_EXIT": "syscall", - "syscall.NOTE_EXITSTATUS": "syscall", - "syscall.NOTE_EXTEND": "syscall", - "syscall.NOTE_FFAND": "syscall", - "syscall.NOTE_FFCOPY": "syscall", - "syscall.NOTE_FFCTRLMASK": "syscall", - "syscall.NOTE_FFLAGSMASK": "syscall", - "syscall.NOTE_FFNOP": "syscall", - "syscall.NOTE_FFOR": "syscall", - "syscall.NOTE_FORK": "syscall", - "syscall.NOTE_LINK": "syscall", - "syscall.NOTE_LOWAT": "syscall", - "syscall.NOTE_NONE": "syscall", - "syscall.NOTE_NSECONDS": "syscall", - "syscall.NOTE_PCTRLMASK": "syscall", - "syscall.NOTE_PDATAMASK": "syscall", - "syscall.NOTE_REAP": "syscall", - "syscall.NOTE_RENAME": "syscall", - "syscall.NOTE_RESOURCEEND": "syscall", - "syscall.NOTE_REVOKE": "syscall", - "syscall.NOTE_SECONDS": "syscall", - "syscall.NOTE_SIGNAL": "syscall", - "syscall.NOTE_TRACK": "syscall", - "syscall.NOTE_TRACKERR": "syscall", - "syscall.NOTE_TRIGGER": "syscall", - "syscall.NOTE_TRUNCATE": "syscall", - "syscall.NOTE_USECONDS": "syscall", - "syscall.NOTE_VM_ERROR": "syscall", - "syscall.NOTE_VM_PRESSURE": "syscall", - "syscall.NOTE_VM_PRESSURE_SUDDEN_TERMINATE": "syscall", - "syscall.NOTE_VM_PRESSURE_TERMINATE": "syscall", - "syscall.NOTE_WRITE": "syscall", - "syscall.NameCanonical": "syscall", - "syscall.NameCanonicalEx": "syscall", - "syscall.NameDisplay": "syscall", - "syscall.NameDnsDomain": "syscall", - "syscall.NameFullyQualifiedDN": "syscall", - "syscall.NameSamCompatible": "syscall", - "syscall.NameServicePrincipal": "syscall", - "syscall.NameUniqueId": "syscall", - "syscall.NameUnknown": "syscall", - "syscall.NameUserPrincipal": "syscall", - "syscall.Nanosleep": "syscall", - "syscall.NetApiBufferFree": "syscall", - "syscall.NetGetJoinInformation": "syscall", - "syscall.NetSetupDomainName": "syscall", - "syscall.NetSetupUnjoined": "syscall", - "syscall.NetSetupUnknownStatus": "syscall", - "syscall.NetSetupWorkgroupName": "syscall", - "syscall.NetUserGetInfo": "syscall", - "syscall.NetlinkMessage": "syscall", - "syscall.NetlinkRIB": "syscall", - "syscall.NetlinkRouteAttr": "syscall", - "syscall.NetlinkRouteRequest": "syscall", - "syscall.NewCallback": "syscall", - "syscall.NewCallbackCDecl": "syscall", - "syscall.NewLazyDLL": "syscall", - "syscall.NlAttr": "syscall", - "syscall.NlMsgerr": "syscall", - "syscall.NlMsghdr": "syscall", - "syscall.NsecToFiletime": "syscall", - "syscall.NsecToTimespec": "syscall", - "syscall.NsecToTimeval": "syscall", - "syscall.Ntohs": "syscall", - "syscall.OCRNL": "syscall", - "syscall.OFDEL": "syscall", - "syscall.OFILL": "syscall", - "syscall.OFIOGETBMAP": "syscall", - "syscall.OID_PKIX_KP_SERVER_AUTH": "syscall", - "syscall.OID_SERVER_GATED_CRYPTO": "syscall", - "syscall.OID_SGC_NETSCAPE": "syscall", - "syscall.OLCUC": "syscall", - "syscall.ONLCR": "syscall", - "syscall.ONLRET": "syscall", - "syscall.ONOCR": "syscall", - "syscall.ONOEOT": "syscall", - "syscall.OPEN_ALWAYS": "syscall", - "syscall.OPEN_EXISTING": "syscall", - "syscall.OPOST": "syscall", - "syscall.O_ACCMODE": "syscall", - "syscall.O_ALERT": "syscall", - "syscall.O_ALT_IO": "syscall", - "syscall.O_APPEND": "syscall", - "syscall.O_ASYNC": "syscall", - "syscall.O_CLOEXEC": "syscall", - "syscall.O_CREAT": "syscall", - "syscall.O_DIRECT": "syscall", - "syscall.O_DIRECTORY": "syscall", - "syscall.O_DSYNC": "syscall", - "syscall.O_EVTONLY": "syscall", - "syscall.O_EXCL": "syscall", - "syscall.O_EXEC": "syscall", - "syscall.O_EXLOCK": "syscall", - "syscall.O_FSYNC": "syscall", - "syscall.O_LARGEFILE": "syscall", - "syscall.O_NDELAY": "syscall", - "syscall.O_NOATIME": "syscall", - "syscall.O_NOCTTY": "syscall", - "syscall.O_NOFOLLOW": "syscall", - "syscall.O_NONBLOCK": "syscall", - "syscall.O_NOSIGPIPE": "syscall", - "syscall.O_POPUP": "syscall", - "syscall.O_RDONLY": "syscall", - "syscall.O_RDWR": "syscall", - "syscall.O_RSYNC": "syscall", - "syscall.O_SHLOCK": "syscall", - "syscall.O_SYMLINK": "syscall", - "syscall.O_SYNC": "syscall", - "syscall.O_TRUNC": "syscall", - "syscall.O_TTY_INIT": "syscall", - "syscall.O_WRONLY": "syscall", - "syscall.Open": "syscall", - "syscall.OpenCurrentProcessToken": "syscall", - "syscall.OpenProcess": "syscall", - "syscall.OpenProcessToken": "syscall", - "syscall.Openat": "syscall", - "syscall.Overlapped": "syscall", - "syscall.PACKET_ADD_MEMBERSHIP": "syscall", - "syscall.PACKET_BROADCAST": "syscall", - "syscall.PACKET_DROP_MEMBERSHIP": "syscall", - "syscall.PACKET_FASTROUTE": "syscall", - "syscall.PACKET_HOST": "syscall", - "syscall.PACKET_LOOPBACK": "syscall", - "syscall.PACKET_MR_ALLMULTI": "syscall", - "syscall.PACKET_MR_MULTICAST": "syscall", - "syscall.PACKET_MR_PROMISC": "syscall", - "syscall.PACKET_MULTICAST": "syscall", - "syscall.PACKET_OTHERHOST": "syscall", - "syscall.PACKET_OUTGOING": "syscall", - "syscall.PACKET_RECV_OUTPUT": "syscall", - "syscall.PACKET_RX_RING": "syscall", - "syscall.PACKET_STATISTICS": "syscall", - "syscall.PAGE_EXECUTE_READ": "syscall", - "syscall.PAGE_EXECUTE_READWRITE": "syscall", - "syscall.PAGE_EXECUTE_WRITECOPY": "syscall", - "syscall.PAGE_READONLY": "syscall", - "syscall.PAGE_READWRITE": "syscall", - "syscall.PAGE_WRITECOPY": "syscall", - "syscall.PARENB": "syscall", - "syscall.PARMRK": "syscall", - "syscall.PARODD": "syscall", - "syscall.PENDIN": "syscall", - "syscall.PFL_HIDDEN": "syscall", - "syscall.PFL_MATCHES_PROTOCOL_ZERO": "syscall", - "syscall.PFL_MULTIPLE_PROTO_ENTRIES": "syscall", - "syscall.PFL_NETWORKDIRECT_PROVIDER": "syscall", - "syscall.PFL_RECOMMENDED_PROTO_ENTRY": "syscall", - "syscall.PF_FLUSH": "syscall", - "syscall.PKCS_7_ASN_ENCODING": "syscall", - "syscall.PMC5_PIPELINE_FLUSH": "syscall", - "syscall.PRIO_PGRP": "syscall", - "syscall.PRIO_PROCESS": "syscall", - "syscall.PRIO_USER": "syscall", - "syscall.PRI_IOFLUSH": "syscall", - "syscall.PROCESS_QUERY_INFORMATION": "syscall", - "syscall.PROCESS_TERMINATE": "syscall", - "syscall.PROT_EXEC": "syscall", - "syscall.PROT_GROWSDOWN": "syscall", - "syscall.PROT_GROWSUP": "syscall", - "syscall.PROT_NONE": "syscall", - "syscall.PROT_READ": "syscall", - "syscall.PROT_WRITE": "syscall", - "syscall.PROV_DH_SCHANNEL": "syscall", - "syscall.PROV_DSS": "syscall", - "syscall.PROV_DSS_DH": "syscall", - "syscall.PROV_EC_ECDSA_FULL": "syscall", - "syscall.PROV_EC_ECDSA_SIG": "syscall", - "syscall.PROV_EC_ECNRA_FULL": "syscall", - "syscall.PROV_EC_ECNRA_SIG": "syscall", - "syscall.PROV_FORTEZZA": "syscall", - "syscall.PROV_INTEL_SEC": "syscall", - "syscall.PROV_MS_EXCHANGE": "syscall", - "syscall.PROV_REPLACE_OWF": "syscall", - "syscall.PROV_RNG": "syscall", - "syscall.PROV_RSA_AES": "syscall", - "syscall.PROV_RSA_FULL": "syscall", - "syscall.PROV_RSA_SCHANNEL": "syscall", - "syscall.PROV_RSA_SIG": "syscall", - "syscall.PROV_SPYRUS_LYNKS": "syscall", - "syscall.PROV_SSL": "syscall", - "syscall.PR_CAPBSET_DROP": "syscall", - "syscall.PR_CAPBSET_READ": "syscall", - "syscall.PR_CLEAR_SECCOMP_FILTER": "syscall", - "syscall.PR_ENDIAN_BIG": "syscall", - "syscall.PR_ENDIAN_LITTLE": "syscall", - "syscall.PR_ENDIAN_PPC_LITTLE": "syscall", - "syscall.PR_FPEMU_NOPRINT": "syscall", - "syscall.PR_FPEMU_SIGFPE": "syscall", - "syscall.PR_FP_EXC_ASYNC": "syscall", - "syscall.PR_FP_EXC_DISABLED": "syscall", - "syscall.PR_FP_EXC_DIV": "syscall", - "syscall.PR_FP_EXC_INV": "syscall", - "syscall.PR_FP_EXC_NONRECOV": "syscall", - "syscall.PR_FP_EXC_OVF": "syscall", - "syscall.PR_FP_EXC_PRECISE": "syscall", - "syscall.PR_FP_EXC_RES": "syscall", - "syscall.PR_FP_EXC_SW_ENABLE": "syscall", - "syscall.PR_FP_EXC_UND": "syscall", - "syscall.PR_GET_DUMPABLE": "syscall", - "syscall.PR_GET_ENDIAN": "syscall", - "syscall.PR_GET_FPEMU": "syscall", - "syscall.PR_GET_FPEXC": "syscall", - "syscall.PR_GET_KEEPCAPS": "syscall", - "syscall.PR_GET_NAME": "syscall", - "syscall.PR_GET_PDEATHSIG": "syscall", - "syscall.PR_GET_SECCOMP": "syscall", - "syscall.PR_GET_SECCOMP_FILTER": "syscall", - "syscall.PR_GET_SECUREBITS": "syscall", - "syscall.PR_GET_TIMERSLACK": "syscall", - "syscall.PR_GET_TIMING": "syscall", - "syscall.PR_GET_TSC": "syscall", - "syscall.PR_GET_UNALIGN": "syscall", - "syscall.PR_MCE_KILL": "syscall", - "syscall.PR_MCE_KILL_CLEAR": "syscall", - "syscall.PR_MCE_KILL_DEFAULT": "syscall", - "syscall.PR_MCE_KILL_EARLY": "syscall", - "syscall.PR_MCE_KILL_GET": "syscall", - "syscall.PR_MCE_KILL_LATE": "syscall", - "syscall.PR_MCE_KILL_SET": "syscall", - "syscall.PR_SECCOMP_FILTER_EVENT": "syscall", - "syscall.PR_SECCOMP_FILTER_SYSCALL": "syscall", - "syscall.PR_SET_DUMPABLE": "syscall", - "syscall.PR_SET_ENDIAN": "syscall", - "syscall.PR_SET_FPEMU": "syscall", - "syscall.PR_SET_FPEXC": "syscall", - "syscall.PR_SET_KEEPCAPS": "syscall", - "syscall.PR_SET_NAME": "syscall", - "syscall.PR_SET_PDEATHSIG": "syscall", - "syscall.PR_SET_PTRACER": "syscall", - "syscall.PR_SET_SECCOMP": "syscall", - "syscall.PR_SET_SECCOMP_FILTER": "syscall", - "syscall.PR_SET_SECUREBITS": "syscall", - "syscall.PR_SET_TIMERSLACK": "syscall", - "syscall.PR_SET_TIMING": "syscall", - "syscall.PR_SET_TSC": "syscall", - "syscall.PR_SET_UNALIGN": "syscall", - "syscall.PR_TASK_PERF_EVENTS_DISABLE": "syscall", - "syscall.PR_TASK_PERF_EVENTS_ENABLE": "syscall", - "syscall.PR_TIMING_STATISTICAL": "syscall", - "syscall.PR_TIMING_TIMESTAMP": "syscall", - "syscall.PR_TSC_ENABLE": "syscall", - "syscall.PR_TSC_SIGSEGV": "syscall", - "syscall.PR_UNALIGN_NOPRINT": "syscall", - "syscall.PR_UNALIGN_SIGBUS": "syscall", - "syscall.PTRACE_ARCH_PRCTL": "syscall", - "syscall.PTRACE_ATTACH": "syscall", - "syscall.PTRACE_CONT": "syscall", - "syscall.PTRACE_DETACH": "syscall", - "syscall.PTRACE_EVENT_CLONE": "syscall", - "syscall.PTRACE_EVENT_EXEC": "syscall", - "syscall.PTRACE_EVENT_EXIT": "syscall", - "syscall.PTRACE_EVENT_FORK": "syscall", - "syscall.PTRACE_EVENT_VFORK": "syscall", - "syscall.PTRACE_EVENT_VFORK_DONE": "syscall", - "syscall.PTRACE_GETCRUNCHREGS": "syscall", - "syscall.PTRACE_GETEVENTMSG": "syscall", - "syscall.PTRACE_GETFPREGS": "syscall", - "syscall.PTRACE_GETFPXREGS": "syscall", - "syscall.PTRACE_GETHBPREGS": "syscall", - "syscall.PTRACE_GETREGS": "syscall", - "syscall.PTRACE_GETREGSET": "syscall", - "syscall.PTRACE_GETSIGINFO": "syscall", - "syscall.PTRACE_GETVFPREGS": "syscall", - "syscall.PTRACE_GETWMMXREGS": "syscall", - "syscall.PTRACE_GET_THREAD_AREA": "syscall", - "syscall.PTRACE_KILL": "syscall", - "syscall.PTRACE_OLDSETOPTIONS": "syscall", - "syscall.PTRACE_O_MASK": "syscall", - "syscall.PTRACE_O_TRACECLONE": "syscall", - "syscall.PTRACE_O_TRACEEXEC": "syscall", - "syscall.PTRACE_O_TRACEEXIT": "syscall", - "syscall.PTRACE_O_TRACEFORK": "syscall", - "syscall.PTRACE_O_TRACESYSGOOD": "syscall", - "syscall.PTRACE_O_TRACEVFORK": "syscall", - "syscall.PTRACE_O_TRACEVFORKDONE": "syscall", - "syscall.PTRACE_PEEKDATA": "syscall", - "syscall.PTRACE_PEEKTEXT": "syscall", - "syscall.PTRACE_PEEKUSR": "syscall", - "syscall.PTRACE_POKEDATA": "syscall", - "syscall.PTRACE_POKETEXT": "syscall", - "syscall.PTRACE_POKEUSR": "syscall", - "syscall.PTRACE_SETCRUNCHREGS": "syscall", - "syscall.PTRACE_SETFPREGS": "syscall", - "syscall.PTRACE_SETFPXREGS": "syscall", - "syscall.PTRACE_SETHBPREGS": "syscall", - "syscall.PTRACE_SETOPTIONS": "syscall", - "syscall.PTRACE_SETREGS": "syscall", - "syscall.PTRACE_SETREGSET": "syscall", - "syscall.PTRACE_SETSIGINFO": "syscall", - "syscall.PTRACE_SETVFPREGS": "syscall", - "syscall.PTRACE_SETWMMXREGS": "syscall", - "syscall.PTRACE_SET_SYSCALL": "syscall", - "syscall.PTRACE_SET_THREAD_AREA": "syscall", - "syscall.PTRACE_SINGLEBLOCK": "syscall", - "syscall.PTRACE_SINGLESTEP": "syscall", - "syscall.PTRACE_SYSCALL": "syscall", - "syscall.PTRACE_SYSEMU": "syscall", - "syscall.PTRACE_SYSEMU_SINGLESTEP": "syscall", - "syscall.PTRACE_TRACEME": "syscall", - "syscall.PT_ATTACH": "syscall", - "syscall.PT_ATTACHEXC": "syscall", - "syscall.PT_CONTINUE": "syscall", - "syscall.PT_DATA_ADDR": "syscall", - "syscall.PT_DENY_ATTACH": "syscall", - "syscall.PT_DETACH": "syscall", - "syscall.PT_FIRSTMACH": "syscall", - "syscall.PT_FORCEQUOTA": "syscall", - "syscall.PT_KILL": "syscall", - "syscall.PT_MASK": "syscall", - "syscall.PT_READ_D": "syscall", - "syscall.PT_READ_I": "syscall", - "syscall.PT_READ_U": "syscall", - "syscall.PT_SIGEXC": "syscall", - "syscall.PT_STEP": "syscall", - "syscall.PT_TEXT_ADDR": "syscall", - "syscall.PT_TEXT_END_ADDR": "syscall", - "syscall.PT_THUPDATE": "syscall", - "syscall.PT_TRACE_ME": "syscall", - "syscall.PT_WRITE_D": "syscall", - "syscall.PT_WRITE_I": "syscall", - "syscall.PT_WRITE_U": "syscall", - "syscall.ParseDirent": "syscall", - "syscall.ParseNetlinkMessage": "syscall", - "syscall.ParseNetlinkRouteAttr": "syscall", - "syscall.ParseRoutingMessage": "syscall", - "syscall.ParseRoutingSockaddr": "syscall", - "syscall.ParseSocketControlMessage": "syscall", - "syscall.ParseUnixCredentials": "syscall", - "syscall.ParseUnixRights": "syscall", - "syscall.PathMax": "syscall", - "syscall.Pathconf": "syscall", - "syscall.Pause": "syscall", - "syscall.Pipe": "syscall", - "syscall.Pipe2": "syscall", - "syscall.PivotRoot": "syscall", - "syscall.PostQueuedCompletionStatus": "syscall", - "syscall.Pread": "syscall", - "syscall.Proc": "syscall", - "syscall.ProcAttr": "syscall", - "syscall.Process32First": "syscall", - "syscall.Process32Next": "syscall", - "syscall.ProcessEntry32": "syscall", - "syscall.ProcessInformation": "syscall", - "syscall.Protoent": "syscall", - "syscall.PtraceAttach": "syscall", - "syscall.PtraceCont": "syscall", - "syscall.PtraceDetach": "syscall", - "syscall.PtraceGetEventMsg": "syscall", - "syscall.PtraceGetRegs": "syscall", - "syscall.PtracePeekData": "syscall", - "syscall.PtracePeekText": "syscall", - "syscall.PtracePokeData": "syscall", - "syscall.PtracePokeText": "syscall", - "syscall.PtraceRegs": "syscall", - "syscall.PtraceSetOptions": "syscall", - "syscall.PtraceSetRegs": "syscall", - "syscall.PtraceSingleStep": "syscall", - "syscall.PtraceSyscall": "syscall", - "syscall.Pwrite": "syscall", - "syscall.REG_BINARY": "syscall", - "syscall.REG_DWORD": "syscall", - "syscall.REG_DWORD_BIG_ENDIAN": "syscall", - "syscall.REG_DWORD_LITTLE_ENDIAN": "syscall", - "syscall.REG_EXPAND_SZ": "syscall", - "syscall.REG_FULL_RESOURCE_DESCRIPTOR": "syscall", - "syscall.REG_LINK": "syscall", - "syscall.REG_MULTI_SZ": "syscall", - "syscall.REG_NONE": "syscall", - "syscall.REG_QWORD": "syscall", - "syscall.REG_QWORD_LITTLE_ENDIAN": "syscall", - "syscall.REG_RESOURCE_LIST": "syscall", - "syscall.REG_RESOURCE_REQUIREMENTS_LIST": "syscall", - "syscall.REG_SZ": "syscall", - "syscall.RLIMIT_AS": "syscall", - "syscall.RLIMIT_CORE": "syscall", - "syscall.RLIMIT_CPU": "syscall", - "syscall.RLIMIT_DATA": "syscall", - "syscall.RLIMIT_FSIZE": "syscall", - "syscall.RLIMIT_NOFILE": "syscall", - "syscall.RLIMIT_STACK": "syscall", - "syscall.RLIM_INFINITY": "syscall", - "syscall.RTAX_ADVMSS": "syscall", - "syscall.RTAX_AUTHOR": "syscall", - "syscall.RTAX_BRD": "syscall", - "syscall.RTAX_CWND": "syscall", - "syscall.RTAX_DST": "syscall", - "syscall.RTAX_FEATURES": "syscall", - "syscall.RTAX_FEATURE_ALLFRAG": "syscall", - "syscall.RTAX_FEATURE_ECN": "syscall", - "syscall.RTAX_FEATURE_SACK": "syscall", - "syscall.RTAX_FEATURE_TIMESTAMP": "syscall", - "syscall.RTAX_GATEWAY": "syscall", - "syscall.RTAX_GENMASK": "syscall", - "syscall.RTAX_HOPLIMIT": "syscall", - "syscall.RTAX_IFA": "syscall", - "syscall.RTAX_IFP": "syscall", - "syscall.RTAX_INITCWND": "syscall", - "syscall.RTAX_INITRWND": "syscall", - "syscall.RTAX_LABEL": "syscall", - "syscall.RTAX_LOCK": "syscall", - "syscall.RTAX_MAX": "syscall", - "syscall.RTAX_MTU": "syscall", - "syscall.RTAX_NETMASK": "syscall", - "syscall.RTAX_REORDERING": "syscall", - "syscall.RTAX_RTO_MIN": "syscall", - "syscall.RTAX_RTT": "syscall", - "syscall.RTAX_RTTVAR": "syscall", - "syscall.RTAX_SRC": "syscall", - "syscall.RTAX_SRCMASK": "syscall", - "syscall.RTAX_SSTHRESH": "syscall", - "syscall.RTAX_TAG": "syscall", - "syscall.RTAX_UNSPEC": "syscall", - "syscall.RTAX_WINDOW": "syscall", - "syscall.RTA_ALIGNTO": "syscall", - "syscall.RTA_AUTHOR": "syscall", - "syscall.RTA_BRD": "syscall", - "syscall.RTA_CACHEINFO": "syscall", - "syscall.RTA_DST": "syscall", - "syscall.RTA_FLOW": "syscall", - "syscall.RTA_GATEWAY": "syscall", - "syscall.RTA_GENMASK": "syscall", - "syscall.RTA_IFA": "syscall", - "syscall.RTA_IFP": "syscall", - "syscall.RTA_IIF": "syscall", - "syscall.RTA_LABEL": "syscall", - "syscall.RTA_MAX": "syscall", - "syscall.RTA_METRICS": "syscall", - "syscall.RTA_MULTIPATH": "syscall", - "syscall.RTA_NETMASK": "syscall", - "syscall.RTA_OIF": "syscall", - "syscall.RTA_PREFSRC": "syscall", - "syscall.RTA_PRIORITY": "syscall", - "syscall.RTA_SRC": "syscall", - "syscall.RTA_SRCMASK": "syscall", - "syscall.RTA_TABLE": "syscall", - "syscall.RTA_TAG": "syscall", - "syscall.RTA_UNSPEC": "syscall", - "syscall.RTCF_DIRECTSRC": "syscall", - "syscall.RTCF_DOREDIRECT": "syscall", - "syscall.RTCF_LOG": "syscall", - "syscall.RTCF_MASQ": "syscall", - "syscall.RTCF_NAT": "syscall", - "syscall.RTCF_VALVE": "syscall", - "syscall.RTF_ADDRCLASSMASK": "syscall", - "syscall.RTF_ADDRCONF": "syscall", - "syscall.RTF_ALLONLINK": "syscall", - "syscall.RTF_ANNOUNCE": "syscall", - "syscall.RTF_BLACKHOLE": "syscall", - "syscall.RTF_BROADCAST": "syscall", - "syscall.RTF_CACHE": "syscall", - "syscall.RTF_CLONED": "syscall", - "syscall.RTF_CLONING": "syscall", - "syscall.RTF_CONDEMNED": "syscall", - "syscall.RTF_DEFAULT": "syscall", - "syscall.RTF_DELCLONE": "syscall", - "syscall.RTF_DONE": "syscall", - "syscall.RTF_DYNAMIC": "syscall", - "syscall.RTF_FLOW": "syscall", - "syscall.RTF_FMASK": "syscall", - "syscall.RTF_GATEWAY": "syscall", - "syscall.RTF_GWFLAG_COMPAT": "syscall", - "syscall.RTF_HOST": "syscall", - "syscall.RTF_IFREF": "syscall", - "syscall.RTF_IFSCOPE": "syscall", - "syscall.RTF_INTERFACE": "syscall", - "syscall.RTF_IRTT": "syscall", - "syscall.RTF_LINKRT": "syscall", - "syscall.RTF_LLDATA": "syscall", - "syscall.RTF_LLINFO": "syscall", - "syscall.RTF_LOCAL": "syscall", - "syscall.RTF_MASK": "syscall", - "syscall.RTF_MODIFIED": "syscall", - "syscall.RTF_MPATH": "syscall", - "syscall.RTF_MPLS": "syscall", - "syscall.RTF_MSS": "syscall", - "syscall.RTF_MTU": "syscall", - "syscall.RTF_MULTICAST": "syscall", - "syscall.RTF_NAT": "syscall", - "syscall.RTF_NOFORWARD": "syscall", - "syscall.RTF_NONEXTHOP": "syscall", - "syscall.RTF_NOPMTUDISC": "syscall", - "syscall.RTF_PERMANENT_ARP": "syscall", - "syscall.RTF_PINNED": "syscall", - "syscall.RTF_POLICY": "syscall", - "syscall.RTF_PRCLONING": "syscall", - "syscall.RTF_PROTO1": "syscall", - "syscall.RTF_PROTO2": "syscall", - "syscall.RTF_PROTO3": "syscall", - "syscall.RTF_REINSTATE": "syscall", - "syscall.RTF_REJECT": "syscall", - "syscall.RTF_RNH_LOCKED": "syscall", - "syscall.RTF_SOURCE": "syscall", - "syscall.RTF_SRC": "syscall", - "syscall.RTF_STATIC": "syscall", - "syscall.RTF_STICKY": "syscall", - "syscall.RTF_THROW": "syscall", - "syscall.RTF_TUNNEL": "syscall", - "syscall.RTF_UP": "syscall", - "syscall.RTF_USETRAILERS": "syscall", - "syscall.RTF_WASCLONED": "syscall", - "syscall.RTF_WINDOW": "syscall", - "syscall.RTF_XRESOLVE": "syscall", - "syscall.RTM_ADD": "syscall", - "syscall.RTM_BASE": "syscall", - "syscall.RTM_CHANGE": "syscall", - "syscall.RTM_CHGADDR": "syscall", - "syscall.RTM_DELACTION": "syscall", - "syscall.RTM_DELADDR": "syscall", - "syscall.RTM_DELADDRLABEL": "syscall", - "syscall.RTM_DELETE": "syscall", - "syscall.RTM_DELLINK": "syscall", - "syscall.RTM_DELMADDR": "syscall", - "syscall.RTM_DELNEIGH": "syscall", - "syscall.RTM_DELQDISC": "syscall", - "syscall.RTM_DELROUTE": "syscall", - "syscall.RTM_DELRULE": "syscall", - "syscall.RTM_DELTCLASS": "syscall", - "syscall.RTM_DELTFILTER": "syscall", - "syscall.RTM_DESYNC": "syscall", - "syscall.RTM_F_CLONED": "syscall", - "syscall.RTM_F_EQUALIZE": "syscall", - "syscall.RTM_F_NOTIFY": "syscall", - "syscall.RTM_F_PREFIX": "syscall", - "syscall.RTM_GET": "syscall", - "syscall.RTM_GET2": "syscall", - "syscall.RTM_GETACTION": "syscall", - "syscall.RTM_GETADDR": "syscall", - "syscall.RTM_GETADDRLABEL": "syscall", - "syscall.RTM_GETANYCAST": "syscall", - "syscall.RTM_GETDCB": "syscall", - "syscall.RTM_GETLINK": "syscall", - "syscall.RTM_GETMULTICAST": "syscall", - "syscall.RTM_GETNEIGH": "syscall", - "syscall.RTM_GETNEIGHTBL": "syscall", - "syscall.RTM_GETQDISC": "syscall", - "syscall.RTM_GETROUTE": "syscall", - "syscall.RTM_GETRULE": "syscall", - "syscall.RTM_GETTCLASS": "syscall", - "syscall.RTM_GETTFILTER": "syscall", - "syscall.RTM_IEEE80211": "syscall", - "syscall.RTM_IFANNOUNCE": "syscall", - "syscall.RTM_IFINFO": "syscall", - "syscall.RTM_IFINFO2": "syscall", - "syscall.RTM_LLINFO_UPD": "syscall", - "syscall.RTM_LOCK": "syscall", - "syscall.RTM_LOSING": "syscall", - "syscall.RTM_MAX": "syscall", - "syscall.RTM_MAXSIZE": "syscall", - "syscall.RTM_MISS": "syscall", - "syscall.RTM_NEWACTION": "syscall", - "syscall.RTM_NEWADDR": "syscall", - "syscall.RTM_NEWADDRLABEL": "syscall", - "syscall.RTM_NEWLINK": "syscall", - "syscall.RTM_NEWMADDR": "syscall", - "syscall.RTM_NEWMADDR2": "syscall", - "syscall.RTM_NEWNDUSEROPT": "syscall", - "syscall.RTM_NEWNEIGH": "syscall", - "syscall.RTM_NEWNEIGHTBL": "syscall", - "syscall.RTM_NEWPREFIX": "syscall", - "syscall.RTM_NEWQDISC": "syscall", - "syscall.RTM_NEWROUTE": "syscall", - "syscall.RTM_NEWRULE": "syscall", - "syscall.RTM_NEWTCLASS": "syscall", - "syscall.RTM_NEWTFILTER": "syscall", - "syscall.RTM_NR_FAMILIES": "syscall", - "syscall.RTM_NR_MSGTYPES": "syscall", - "syscall.RTM_OIFINFO": "syscall", - "syscall.RTM_OLDADD": "syscall", - "syscall.RTM_OLDDEL": "syscall", - "syscall.RTM_OOIFINFO": "syscall", - "syscall.RTM_REDIRECT": "syscall", - "syscall.RTM_RESOLVE": "syscall", - "syscall.RTM_RTTUNIT": "syscall", - "syscall.RTM_SETDCB": "syscall", - "syscall.RTM_SETGATE": "syscall", - "syscall.RTM_SETLINK": "syscall", - "syscall.RTM_SETNEIGHTBL": "syscall", - "syscall.RTM_VERSION": "syscall", - "syscall.RTNH_ALIGNTO": "syscall", - "syscall.RTNH_F_DEAD": "syscall", - "syscall.RTNH_F_ONLINK": "syscall", - "syscall.RTNH_F_PERVASIVE": "syscall", - "syscall.RTNLGRP_IPV4_IFADDR": "syscall", - "syscall.RTNLGRP_IPV4_MROUTE": "syscall", - "syscall.RTNLGRP_IPV4_ROUTE": "syscall", - "syscall.RTNLGRP_IPV4_RULE": "syscall", - "syscall.RTNLGRP_IPV6_IFADDR": "syscall", - "syscall.RTNLGRP_IPV6_IFINFO": "syscall", - "syscall.RTNLGRP_IPV6_MROUTE": "syscall", - "syscall.RTNLGRP_IPV6_PREFIX": "syscall", - "syscall.RTNLGRP_IPV6_ROUTE": "syscall", - "syscall.RTNLGRP_IPV6_RULE": "syscall", - "syscall.RTNLGRP_LINK": "syscall", - "syscall.RTNLGRP_ND_USEROPT": "syscall", - "syscall.RTNLGRP_NEIGH": "syscall", - "syscall.RTNLGRP_NONE": "syscall", - "syscall.RTNLGRP_NOTIFY": "syscall", - "syscall.RTNLGRP_TC": "syscall", - "syscall.RTN_ANYCAST": "syscall", - "syscall.RTN_BLACKHOLE": "syscall", - "syscall.RTN_BROADCAST": "syscall", - "syscall.RTN_LOCAL": "syscall", - "syscall.RTN_MAX": "syscall", - "syscall.RTN_MULTICAST": "syscall", - "syscall.RTN_NAT": "syscall", - "syscall.RTN_PROHIBIT": "syscall", - "syscall.RTN_THROW": "syscall", - "syscall.RTN_UNICAST": "syscall", - "syscall.RTN_UNREACHABLE": "syscall", - "syscall.RTN_UNSPEC": "syscall", - "syscall.RTN_XRESOLVE": "syscall", - "syscall.RTPROT_BIRD": "syscall", - "syscall.RTPROT_BOOT": "syscall", - "syscall.RTPROT_DHCP": "syscall", - "syscall.RTPROT_DNROUTED": "syscall", - "syscall.RTPROT_GATED": "syscall", - "syscall.RTPROT_KERNEL": "syscall", - "syscall.RTPROT_MRT": "syscall", - "syscall.RTPROT_NTK": "syscall", - "syscall.RTPROT_RA": "syscall", - "syscall.RTPROT_REDIRECT": "syscall", - "syscall.RTPROT_STATIC": "syscall", - "syscall.RTPROT_UNSPEC": "syscall", - "syscall.RTPROT_XORP": "syscall", - "syscall.RTPROT_ZEBRA": "syscall", - "syscall.RTV_EXPIRE": "syscall", - "syscall.RTV_HOPCOUNT": "syscall", - "syscall.RTV_MTU": "syscall", - "syscall.RTV_RPIPE": "syscall", - "syscall.RTV_RTT": "syscall", - "syscall.RTV_RTTVAR": "syscall", - "syscall.RTV_SPIPE": "syscall", - "syscall.RTV_SSTHRESH": "syscall", - "syscall.RTV_WEIGHT": "syscall", - "syscall.RT_CACHING_CONTEXT": "syscall", - "syscall.RT_CLASS_DEFAULT": "syscall", - "syscall.RT_CLASS_LOCAL": "syscall", - "syscall.RT_CLASS_MAIN": "syscall", - "syscall.RT_CLASS_MAX": "syscall", - "syscall.RT_CLASS_UNSPEC": "syscall", - "syscall.RT_DEFAULT_FIB": "syscall", - "syscall.RT_NORTREF": "syscall", - "syscall.RT_SCOPE_HOST": "syscall", - "syscall.RT_SCOPE_LINK": "syscall", - "syscall.RT_SCOPE_NOWHERE": "syscall", - "syscall.RT_SCOPE_SITE": "syscall", - "syscall.RT_SCOPE_UNIVERSE": "syscall", - "syscall.RT_TABLEID_MAX": "syscall", - "syscall.RT_TABLE_COMPAT": "syscall", - "syscall.RT_TABLE_DEFAULT": "syscall", - "syscall.RT_TABLE_LOCAL": "syscall", - "syscall.RT_TABLE_MAIN": "syscall", - "syscall.RT_TABLE_MAX": "syscall", - "syscall.RT_TABLE_UNSPEC": "syscall", - "syscall.RUSAGE_CHILDREN": "syscall", - "syscall.RUSAGE_SELF": "syscall", - "syscall.RUSAGE_THREAD": "syscall", - "syscall.Radvisory_t": "syscall", - "syscall.RawSockaddr": "syscall", - "syscall.RawSockaddrAny": "syscall", - "syscall.RawSockaddrDatalink": "syscall", - "syscall.RawSockaddrInet4": "syscall", - "syscall.RawSockaddrInet6": "syscall", - "syscall.RawSockaddrLinklayer": "syscall", - "syscall.RawSockaddrNetlink": "syscall", - "syscall.RawSockaddrUnix": "syscall", - "syscall.RawSyscall": "syscall", - "syscall.RawSyscall6": "syscall", - "syscall.Read": "syscall", - "syscall.ReadConsole": "syscall", - "syscall.ReadDirectoryChanges": "syscall", - "syscall.ReadDirent": "syscall", - "syscall.ReadFile": "syscall", - "syscall.Readlink": "syscall", - "syscall.Reboot": "syscall", - "syscall.Recvfrom": "syscall", - "syscall.Recvmsg": "syscall", - "syscall.RegCloseKey": "syscall", - "syscall.RegEnumKeyEx": "syscall", - "syscall.RegOpenKeyEx": "syscall", - "syscall.RegQueryInfoKey": "syscall", - "syscall.RegQueryValueEx": "syscall", - "syscall.RemoveDirectory": "syscall", - "syscall.Removexattr": "syscall", - "syscall.Rename": "syscall", - "syscall.Renameat": "syscall", - "syscall.Revoke": "syscall", - "syscall.Rlimit": "syscall", - "syscall.Rmdir": "syscall", - "syscall.RouteMessage": "syscall", - "syscall.RouteRIB": "syscall", - "syscall.RtAttr": "syscall", - "syscall.RtGenmsg": "syscall", - "syscall.RtMetrics": "syscall", - "syscall.RtMsg": "syscall", - "syscall.RtMsghdr": "syscall", - "syscall.RtNexthop": "syscall", - "syscall.Rusage": "syscall", - "syscall.SCM_BINTIME": "syscall", - "syscall.SCM_CREDENTIALS": "syscall", - "syscall.SCM_CREDS": "syscall", - "syscall.SCM_RIGHTS": "syscall", - "syscall.SCM_TIMESTAMP": "syscall", - "syscall.SCM_TIMESTAMPING": "syscall", - "syscall.SCM_TIMESTAMPNS": "syscall", - "syscall.SCM_TIMESTAMP_MONOTONIC": "syscall", - "syscall.SHUT_RD": "syscall", - "syscall.SHUT_RDWR": "syscall", - "syscall.SHUT_WR": "syscall", - "syscall.SID": "syscall", - "syscall.SIDAndAttributes": "syscall", - "syscall.SIGABRT": "syscall", - "syscall.SIGALRM": "syscall", - "syscall.SIGBUS": "syscall", - "syscall.SIGCHLD": "syscall", - "syscall.SIGCLD": "syscall", - "syscall.SIGCONT": "syscall", - "syscall.SIGEMT": "syscall", - "syscall.SIGFPE": "syscall", - "syscall.SIGHUP": "syscall", - "syscall.SIGILL": "syscall", - "syscall.SIGINFO": "syscall", - "syscall.SIGINT": "syscall", - "syscall.SIGIO": "syscall", - "syscall.SIGIOT": "syscall", - "syscall.SIGKILL": "syscall", - "syscall.SIGLIBRT": "syscall", - "syscall.SIGLWP": "syscall", - "syscall.SIGPIPE": "syscall", - "syscall.SIGPOLL": "syscall", - "syscall.SIGPROF": "syscall", - "syscall.SIGPWR": "syscall", - "syscall.SIGQUIT": "syscall", - "syscall.SIGSEGV": "syscall", - "syscall.SIGSTKFLT": "syscall", - "syscall.SIGSTOP": "syscall", - "syscall.SIGSYS": "syscall", - "syscall.SIGTERM": "syscall", - "syscall.SIGTHR": "syscall", - "syscall.SIGTRAP": "syscall", - "syscall.SIGTSTP": "syscall", - "syscall.SIGTTIN": "syscall", - "syscall.SIGTTOU": "syscall", - "syscall.SIGUNUSED": "syscall", - "syscall.SIGURG": "syscall", - "syscall.SIGUSR1": "syscall", - "syscall.SIGUSR2": "syscall", - "syscall.SIGVTALRM": "syscall", - "syscall.SIGWINCH": "syscall", - "syscall.SIGXCPU": "syscall", - "syscall.SIGXFSZ": "syscall", - "syscall.SIOCADDDLCI": "syscall", - "syscall.SIOCADDMULTI": "syscall", - "syscall.SIOCADDRT": "syscall", - "syscall.SIOCAIFADDR": "syscall", - "syscall.SIOCAIFGROUP": "syscall", - "syscall.SIOCALIFADDR": "syscall", - "syscall.SIOCARPIPLL": "syscall", - "syscall.SIOCATMARK": "syscall", - "syscall.SIOCAUTOADDR": "syscall", - "syscall.SIOCAUTONETMASK": "syscall", - "syscall.SIOCBRDGADD": "syscall", - "syscall.SIOCBRDGADDS": "syscall", - "syscall.SIOCBRDGARL": "syscall", - "syscall.SIOCBRDGDADDR": "syscall", - "syscall.SIOCBRDGDEL": "syscall", - "syscall.SIOCBRDGDELS": "syscall", - "syscall.SIOCBRDGFLUSH": "syscall", - "syscall.SIOCBRDGFRL": "syscall", - "syscall.SIOCBRDGGCACHE": "syscall", - "syscall.SIOCBRDGGFD": "syscall", - "syscall.SIOCBRDGGHT": "syscall", - "syscall.SIOCBRDGGIFFLGS": "syscall", - "syscall.SIOCBRDGGMA": "syscall", - "syscall.SIOCBRDGGPARAM": "syscall", - "syscall.SIOCBRDGGPRI": "syscall", - "syscall.SIOCBRDGGRL": "syscall", - "syscall.SIOCBRDGGSIFS": "syscall", - "syscall.SIOCBRDGGTO": "syscall", - "syscall.SIOCBRDGIFS": "syscall", - "syscall.SIOCBRDGRTS": "syscall", - "syscall.SIOCBRDGSADDR": "syscall", - "syscall.SIOCBRDGSCACHE": "syscall", - "syscall.SIOCBRDGSFD": "syscall", - "syscall.SIOCBRDGSHT": "syscall", - "syscall.SIOCBRDGSIFCOST": "syscall", - "syscall.SIOCBRDGSIFFLGS": "syscall", - "syscall.SIOCBRDGSIFPRIO": "syscall", - "syscall.SIOCBRDGSMA": "syscall", - "syscall.SIOCBRDGSPRI": "syscall", - "syscall.SIOCBRDGSPROTO": "syscall", - "syscall.SIOCBRDGSTO": "syscall", - "syscall.SIOCBRDGSTXHC": "syscall", - "syscall.SIOCDARP": "syscall", - "syscall.SIOCDELDLCI": "syscall", - "syscall.SIOCDELMULTI": "syscall", - "syscall.SIOCDELRT": "syscall", - "syscall.SIOCDEVPRIVATE": "syscall", - "syscall.SIOCDIFADDR": "syscall", - "syscall.SIOCDIFGROUP": "syscall", - "syscall.SIOCDIFPHYADDR": "syscall", - "syscall.SIOCDLIFADDR": "syscall", - "syscall.SIOCDRARP": "syscall", - "syscall.SIOCGARP": "syscall", - "syscall.SIOCGDRVSPEC": "syscall", - "syscall.SIOCGETKALIVE": "syscall", - "syscall.SIOCGETLABEL": "syscall", - "syscall.SIOCGETPFLOW": "syscall", - "syscall.SIOCGETPFSYNC": "syscall", - "syscall.SIOCGETSGCNT": "syscall", - "syscall.SIOCGETVIFCNT": "syscall", - "syscall.SIOCGETVLAN": "syscall", - "syscall.SIOCGHIWAT": "syscall", - "syscall.SIOCGIFADDR": "syscall", - "syscall.SIOCGIFADDRPREF": "syscall", - "syscall.SIOCGIFALIAS": "syscall", - "syscall.SIOCGIFALTMTU": "syscall", - "syscall.SIOCGIFASYNCMAP": "syscall", - "syscall.SIOCGIFBOND": "syscall", - "syscall.SIOCGIFBR": "syscall", - "syscall.SIOCGIFBRDADDR": "syscall", - "syscall.SIOCGIFCAP": "syscall", - "syscall.SIOCGIFCONF": "syscall", - "syscall.SIOCGIFCOUNT": "syscall", - "syscall.SIOCGIFDATA": "syscall", - "syscall.SIOCGIFDESCR": "syscall", - "syscall.SIOCGIFDEVMTU": "syscall", - "syscall.SIOCGIFDLT": "syscall", - "syscall.SIOCGIFDSTADDR": "syscall", - "syscall.SIOCGIFENCAP": "syscall", - "syscall.SIOCGIFFIB": "syscall", - "syscall.SIOCGIFFLAGS": "syscall", - "syscall.SIOCGIFGATTR": "syscall", - "syscall.SIOCGIFGENERIC": "syscall", - "syscall.SIOCGIFGMEMB": "syscall", - "syscall.SIOCGIFGROUP": "syscall", - "syscall.SIOCGIFHARDMTU": "syscall", - "syscall.SIOCGIFHWADDR": "syscall", - "syscall.SIOCGIFINDEX": "syscall", - "syscall.SIOCGIFKPI": "syscall", - "syscall.SIOCGIFMAC": "syscall", - "syscall.SIOCGIFMAP": "syscall", - "syscall.SIOCGIFMEDIA": "syscall", - "syscall.SIOCGIFMEM": "syscall", - "syscall.SIOCGIFMETRIC": "syscall", - "syscall.SIOCGIFMTU": "syscall", - "syscall.SIOCGIFNAME": "syscall", - "syscall.SIOCGIFNETMASK": "syscall", - "syscall.SIOCGIFPDSTADDR": "syscall", - "syscall.SIOCGIFPFLAGS": "syscall", - "syscall.SIOCGIFPHYS": "syscall", - "syscall.SIOCGIFPRIORITY": "syscall", - "syscall.SIOCGIFPSRCADDR": "syscall", - "syscall.SIOCGIFRDOMAIN": "syscall", - "syscall.SIOCGIFRTLABEL": "syscall", - "syscall.SIOCGIFSLAVE": "syscall", - "syscall.SIOCGIFSTATUS": "syscall", - "syscall.SIOCGIFTIMESLOT": "syscall", - "syscall.SIOCGIFTXQLEN": "syscall", - "syscall.SIOCGIFVLAN": "syscall", - "syscall.SIOCGIFWAKEFLAGS": "syscall", - "syscall.SIOCGIFXFLAGS": "syscall", - "syscall.SIOCGLIFADDR": "syscall", - "syscall.SIOCGLIFPHYADDR": "syscall", - "syscall.SIOCGLIFPHYRTABLE": "syscall", - "syscall.SIOCGLIFPHYTTL": "syscall", - "syscall.SIOCGLINKSTR": "syscall", - "syscall.SIOCGLOWAT": "syscall", - "syscall.SIOCGPGRP": "syscall", - "syscall.SIOCGPRIVATE_0": "syscall", - "syscall.SIOCGPRIVATE_1": "syscall", - "syscall.SIOCGRARP": "syscall", - "syscall.SIOCGSPPPPARAMS": "syscall", - "syscall.SIOCGSTAMP": "syscall", - "syscall.SIOCGSTAMPNS": "syscall", - "syscall.SIOCGVH": "syscall", - "syscall.SIOCGVNETID": "syscall", - "syscall.SIOCIFCREATE": "syscall", - "syscall.SIOCIFCREATE2": "syscall", - "syscall.SIOCIFDESTROY": "syscall", - "syscall.SIOCIFGCLONERS": "syscall", - "syscall.SIOCINITIFADDR": "syscall", - "syscall.SIOCPROTOPRIVATE": "syscall", - "syscall.SIOCRSLVMULTI": "syscall", - "syscall.SIOCRTMSG": "syscall", - "syscall.SIOCSARP": "syscall", - "syscall.SIOCSDRVSPEC": "syscall", - "syscall.SIOCSETKALIVE": "syscall", - "syscall.SIOCSETLABEL": "syscall", - "syscall.SIOCSETPFLOW": "syscall", - "syscall.SIOCSETPFSYNC": "syscall", - "syscall.SIOCSETVLAN": "syscall", - "syscall.SIOCSHIWAT": "syscall", - "syscall.SIOCSIFADDR": "syscall", - "syscall.SIOCSIFADDRPREF": "syscall", - "syscall.SIOCSIFALTMTU": "syscall", - "syscall.SIOCSIFASYNCMAP": "syscall", - "syscall.SIOCSIFBOND": "syscall", - "syscall.SIOCSIFBR": "syscall", - "syscall.SIOCSIFBRDADDR": "syscall", - "syscall.SIOCSIFCAP": "syscall", - "syscall.SIOCSIFDESCR": "syscall", - "syscall.SIOCSIFDSTADDR": "syscall", - "syscall.SIOCSIFENCAP": "syscall", - "syscall.SIOCSIFFIB": "syscall", - "syscall.SIOCSIFFLAGS": "syscall", - "syscall.SIOCSIFGATTR": "syscall", - "syscall.SIOCSIFGENERIC": "syscall", - "syscall.SIOCSIFHWADDR": "syscall", - "syscall.SIOCSIFHWBROADCAST": "syscall", - "syscall.SIOCSIFKPI": "syscall", - "syscall.SIOCSIFLINK": "syscall", - "syscall.SIOCSIFLLADDR": "syscall", - "syscall.SIOCSIFMAC": "syscall", - "syscall.SIOCSIFMAP": "syscall", - "syscall.SIOCSIFMEDIA": "syscall", - "syscall.SIOCSIFMEM": "syscall", - "syscall.SIOCSIFMETRIC": "syscall", - "syscall.SIOCSIFMTU": "syscall", - "syscall.SIOCSIFNAME": "syscall", - "syscall.SIOCSIFNETMASK": "syscall", - "syscall.SIOCSIFPFLAGS": "syscall", - "syscall.SIOCSIFPHYADDR": "syscall", - "syscall.SIOCSIFPHYS": "syscall", - "syscall.SIOCSIFPRIORITY": "syscall", - "syscall.SIOCSIFRDOMAIN": "syscall", - "syscall.SIOCSIFRTLABEL": "syscall", - "syscall.SIOCSIFRVNET": "syscall", - "syscall.SIOCSIFSLAVE": "syscall", - "syscall.SIOCSIFTIMESLOT": "syscall", - "syscall.SIOCSIFTXQLEN": "syscall", - "syscall.SIOCSIFVLAN": "syscall", - "syscall.SIOCSIFVNET": "syscall", - "syscall.SIOCSIFXFLAGS": "syscall", - "syscall.SIOCSLIFPHYADDR": "syscall", - "syscall.SIOCSLIFPHYRTABLE": "syscall", - "syscall.SIOCSLIFPHYTTL": "syscall", - "syscall.SIOCSLINKSTR": "syscall", - "syscall.SIOCSLOWAT": "syscall", - "syscall.SIOCSPGRP": "syscall", - "syscall.SIOCSRARP": "syscall", - "syscall.SIOCSSPPPPARAMS": "syscall", - "syscall.SIOCSVH": "syscall", - "syscall.SIOCSVNETID": "syscall", - "syscall.SIOCZIFDATA": "syscall", - "syscall.SIO_GET_EXTENSION_FUNCTION_POINTER": "syscall", - "syscall.SIO_GET_INTERFACE_LIST": "syscall", - "syscall.SIO_KEEPALIVE_VALS": "syscall", - "syscall.SIO_UDP_CONNRESET": "syscall", - "syscall.SOCK_CLOEXEC": "syscall", - "syscall.SOCK_DCCP": "syscall", - "syscall.SOCK_DGRAM": "syscall", - "syscall.SOCK_FLAGS_MASK": "syscall", - "syscall.SOCK_MAXADDRLEN": "syscall", - "syscall.SOCK_NONBLOCK": "syscall", - "syscall.SOCK_NOSIGPIPE": "syscall", - "syscall.SOCK_PACKET": "syscall", - "syscall.SOCK_RAW": "syscall", - "syscall.SOCK_RDM": "syscall", - "syscall.SOCK_SEQPACKET": "syscall", - "syscall.SOCK_STREAM": "syscall", - "syscall.SOL_AAL": "syscall", - "syscall.SOL_ATM": "syscall", - "syscall.SOL_DECNET": "syscall", - "syscall.SOL_ICMPV6": "syscall", - "syscall.SOL_IP": "syscall", - "syscall.SOL_IPV6": "syscall", - "syscall.SOL_IRDA": "syscall", - "syscall.SOL_PACKET": "syscall", - "syscall.SOL_RAW": "syscall", - "syscall.SOL_SOCKET": "syscall", - "syscall.SOL_TCP": "syscall", - "syscall.SOL_X25": "syscall", - "syscall.SOMAXCONN": "syscall", - "syscall.SO_ACCEPTCONN": "syscall", - "syscall.SO_ACCEPTFILTER": "syscall", - "syscall.SO_ATTACH_FILTER": "syscall", - "syscall.SO_BINDANY": "syscall", - "syscall.SO_BINDTODEVICE": "syscall", - "syscall.SO_BINTIME": "syscall", - "syscall.SO_BROADCAST": "syscall", - "syscall.SO_BSDCOMPAT": "syscall", - "syscall.SO_DEBUG": "syscall", - "syscall.SO_DETACH_FILTER": "syscall", - "syscall.SO_DOMAIN": "syscall", - "syscall.SO_DONTROUTE": "syscall", - "syscall.SO_DONTTRUNC": "syscall", - "syscall.SO_ERROR": "syscall", - "syscall.SO_KEEPALIVE": "syscall", - "syscall.SO_LABEL": "syscall", - "syscall.SO_LINGER": "syscall", - "syscall.SO_LINGER_SEC": "syscall", - "syscall.SO_LISTENINCQLEN": "syscall", - "syscall.SO_LISTENQLEN": "syscall", - "syscall.SO_LISTENQLIMIT": "syscall", - "syscall.SO_MARK": "syscall", - "syscall.SO_NETPROC": "syscall", - "syscall.SO_NKE": "syscall", - "syscall.SO_NOADDRERR": "syscall", - "syscall.SO_NOHEADER": "syscall", - "syscall.SO_NOSIGPIPE": "syscall", - "syscall.SO_NOTIFYCONFLICT": "syscall", - "syscall.SO_NO_CHECK": "syscall", - "syscall.SO_NO_DDP": "syscall", - "syscall.SO_NO_OFFLOAD": "syscall", - "syscall.SO_NP_EXTENSIONS": "syscall", - "syscall.SO_NREAD": "syscall", - "syscall.SO_NWRITE": "syscall", - "syscall.SO_OOBINLINE": "syscall", - "syscall.SO_OVERFLOWED": "syscall", - "syscall.SO_PASSCRED": "syscall", - "syscall.SO_PASSSEC": "syscall", - "syscall.SO_PEERCRED": "syscall", - "syscall.SO_PEERLABEL": "syscall", - "syscall.SO_PEERNAME": "syscall", - "syscall.SO_PEERSEC": "syscall", - "syscall.SO_PRIORITY": "syscall", - "syscall.SO_PROTOCOL": "syscall", - "syscall.SO_PROTOTYPE": "syscall", - "syscall.SO_RANDOMPORT": "syscall", - "syscall.SO_RCVBUF": "syscall", - "syscall.SO_RCVBUFFORCE": "syscall", - "syscall.SO_RCVLOWAT": "syscall", - "syscall.SO_RCVTIMEO": "syscall", - "syscall.SO_RESTRICTIONS": "syscall", - "syscall.SO_RESTRICT_DENYIN": "syscall", - "syscall.SO_RESTRICT_DENYOUT": "syscall", - "syscall.SO_RESTRICT_DENYSET": "syscall", - "syscall.SO_REUSEADDR": "syscall", - "syscall.SO_REUSEPORT": "syscall", - "syscall.SO_REUSESHAREUID": "syscall", - "syscall.SO_RTABLE": "syscall", - "syscall.SO_RXQ_OVFL": "syscall", - "syscall.SO_SECURITY_AUTHENTICATION": "syscall", - "syscall.SO_SECURITY_ENCRYPTION_NETWORK": "syscall", - "syscall.SO_SECURITY_ENCRYPTION_TRANSPORT": "syscall", - "syscall.SO_SETFIB": "syscall", - "syscall.SO_SNDBUF": "syscall", - "syscall.SO_SNDBUFFORCE": "syscall", - "syscall.SO_SNDLOWAT": "syscall", - "syscall.SO_SNDTIMEO": "syscall", - "syscall.SO_SPLICE": "syscall", - "syscall.SO_TIMESTAMP": "syscall", - "syscall.SO_TIMESTAMPING": "syscall", - "syscall.SO_TIMESTAMPNS": "syscall", - "syscall.SO_TIMESTAMP_MONOTONIC": "syscall", - "syscall.SO_TYPE": "syscall", - "syscall.SO_UPCALLCLOSEWAIT": "syscall", - "syscall.SO_UPDATE_ACCEPT_CONTEXT": "syscall", - "syscall.SO_UPDATE_CONNECT_CONTEXT": "syscall", - "syscall.SO_USELOOPBACK": "syscall", - "syscall.SO_USER_COOKIE": "syscall", - "syscall.SO_VENDOR": "syscall", - "syscall.SO_WANTMORE": "syscall", - "syscall.SO_WANTOOBFLAG": "syscall", - "syscall.SSLExtraCertChainPolicyPara": "syscall", - "syscall.STANDARD_RIGHTS_ALL": "syscall", - "syscall.STANDARD_RIGHTS_EXECUTE": "syscall", - "syscall.STANDARD_RIGHTS_READ": "syscall", - "syscall.STANDARD_RIGHTS_REQUIRED": "syscall", - "syscall.STANDARD_RIGHTS_WRITE": "syscall", - "syscall.STARTF_USESHOWWINDOW": "syscall", - "syscall.STARTF_USESTDHANDLES": "syscall", - "syscall.STD_ERROR_HANDLE": "syscall", - "syscall.STD_INPUT_HANDLE": "syscall", - "syscall.STD_OUTPUT_HANDLE": "syscall", - "syscall.SUBLANG_ENGLISH_US": "syscall", - "syscall.SW_FORCEMINIMIZE": "syscall", - "syscall.SW_HIDE": "syscall", - "syscall.SW_MAXIMIZE": "syscall", - "syscall.SW_MINIMIZE": "syscall", - "syscall.SW_NORMAL": "syscall", - "syscall.SW_RESTORE": "syscall", - "syscall.SW_SHOW": "syscall", - "syscall.SW_SHOWDEFAULT": "syscall", - "syscall.SW_SHOWMAXIMIZED": "syscall", - "syscall.SW_SHOWMINIMIZED": "syscall", - "syscall.SW_SHOWMINNOACTIVE": "syscall", - "syscall.SW_SHOWNA": "syscall", - "syscall.SW_SHOWNOACTIVATE": "syscall", - "syscall.SW_SHOWNORMAL": "syscall", - "syscall.SYMBOLIC_LINK_FLAG_DIRECTORY": "syscall", - "syscall.SYNCHRONIZE": "syscall", - "syscall.SYSCTL_VERSION": "syscall", - "syscall.SYSCTL_VERS_0": "syscall", - "syscall.SYSCTL_VERS_1": "syscall", - "syscall.SYSCTL_VERS_MASK": "syscall", - "syscall.SYS_ABORT2": "syscall", - "syscall.SYS_ACCEPT": "syscall", - "syscall.SYS_ACCEPT4": "syscall", - "syscall.SYS_ACCEPT_NOCANCEL": "syscall", - "syscall.SYS_ACCESS": "syscall", - "syscall.SYS_ACCESS_EXTENDED": "syscall", - "syscall.SYS_ACCT": "syscall", - "syscall.SYS_ADD_KEY": "syscall", - "syscall.SYS_ADD_PROFIL": "syscall", - "syscall.SYS_ADJFREQ": "syscall", - "syscall.SYS_ADJTIME": "syscall", - "syscall.SYS_ADJTIMEX": "syscall", - "syscall.SYS_AFS_SYSCALL": "syscall", - "syscall.SYS_AIO_CANCEL": "syscall", - "syscall.SYS_AIO_ERROR": "syscall", - "syscall.SYS_AIO_FSYNC": "syscall", - "syscall.SYS_AIO_READ": "syscall", - "syscall.SYS_AIO_RETURN": "syscall", - "syscall.SYS_AIO_SUSPEND": "syscall", - "syscall.SYS_AIO_SUSPEND_NOCANCEL": "syscall", - "syscall.SYS_AIO_WRITE": "syscall", - "syscall.SYS_ALARM": "syscall", - "syscall.SYS_ARCH_PRCTL": "syscall", - "syscall.SYS_ARM_FADVISE64_64": "syscall", - "syscall.SYS_ARM_SYNC_FILE_RANGE": "syscall", - "syscall.SYS_ATGETMSG": "syscall", - "syscall.SYS_ATPGETREQ": "syscall", - "syscall.SYS_ATPGETRSP": "syscall", - "syscall.SYS_ATPSNDREQ": "syscall", - "syscall.SYS_ATPSNDRSP": "syscall", - "syscall.SYS_ATPUTMSG": "syscall", - "syscall.SYS_ATSOCKET": "syscall", - "syscall.SYS_AUDIT": "syscall", - "syscall.SYS_AUDITCTL": "syscall", - "syscall.SYS_AUDITON": "syscall", - "syscall.SYS_AUDIT_SESSION_JOIN": "syscall", - "syscall.SYS_AUDIT_SESSION_PORT": "syscall", - "syscall.SYS_AUDIT_SESSION_SELF": "syscall", - "syscall.SYS_BDFLUSH": "syscall", - "syscall.SYS_BIND": "syscall", - "syscall.SYS_BINDAT": "syscall", - "syscall.SYS_BREAK": "syscall", - "syscall.SYS_BRK": "syscall", - "syscall.SYS_BSDTHREAD_CREATE": "syscall", - "syscall.SYS_BSDTHREAD_REGISTER": "syscall", - "syscall.SYS_BSDTHREAD_TERMINATE": "syscall", - "syscall.SYS_CAPGET": "syscall", - "syscall.SYS_CAPSET": "syscall", - "syscall.SYS_CAP_ENTER": "syscall", - "syscall.SYS_CAP_FCNTLS_GET": "syscall", - "syscall.SYS_CAP_FCNTLS_LIMIT": "syscall", - "syscall.SYS_CAP_GETMODE": "syscall", - "syscall.SYS_CAP_GETRIGHTS": "syscall", - "syscall.SYS_CAP_IOCTLS_GET": "syscall", - "syscall.SYS_CAP_IOCTLS_LIMIT": "syscall", - "syscall.SYS_CAP_NEW": "syscall", - "syscall.SYS_CAP_RIGHTS_GET": "syscall", - "syscall.SYS_CAP_RIGHTS_LIMIT": "syscall", - "syscall.SYS_CHDIR": "syscall", - "syscall.SYS_CHFLAGS": "syscall", - "syscall.SYS_CHFLAGSAT": "syscall", - "syscall.SYS_CHMOD": "syscall", - "syscall.SYS_CHMOD_EXTENDED": "syscall", - "syscall.SYS_CHOWN": "syscall", - "syscall.SYS_CHOWN32": "syscall", - "syscall.SYS_CHROOT": "syscall", - "syscall.SYS_CHUD": "syscall", - "syscall.SYS_CLOCK_ADJTIME": "syscall", - "syscall.SYS_CLOCK_GETCPUCLOCKID2": "syscall", - "syscall.SYS_CLOCK_GETRES": "syscall", - "syscall.SYS_CLOCK_GETTIME": "syscall", - "syscall.SYS_CLOCK_NANOSLEEP": "syscall", - "syscall.SYS_CLOCK_SETTIME": "syscall", - "syscall.SYS_CLONE": "syscall", - "syscall.SYS_CLOSE": "syscall", - "syscall.SYS_CLOSEFROM": "syscall", - "syscall.SYS_CLOSE_NOCANCEL": "syscall", - "syscall.SYS_CONNECT": "syscall", - "syscall.SYS_CONNECTAT": "syscall", - "syscall.SYS_CONNECT_NOCANCEL": "syscall", - "syscall.SYS_COPYFILE": "syscall", - "syscall.SYS_CPUSET": "syscall", - "syscall.SYS_CPUSET_GETAFFINITY": "syscall", - "syscall.SYS_CPUSET_GETID": "syscall", - "syscall.SYS_CPUSET_SETAFFINITY": "syscall", - "syscall.SYS_CPUSET_SETID": "syscall", - "syscall.SYS_CREAT": "syscall", - "syscall.SYS_CREATE_MODULE": "syscall", - "syscall.SYS_CSOPS": "syscall", - "syscall.SYS_DELETE": "syscall", - "syscall.SYS_DELETE_MODULE": "syscall", - "syscall.SYS_DUP": "syscall", - "syscall.SYS_DUP2": "syscall", - "syscall.SYS_DUP3": "syscall", - "syscall.SYS_EACCESS": "syscall", - "syscall.SYS_EPOLL_CREATE": "syscall", - "syscall.SYS_EPOLL_CREATE1": "syscall", - "syscall.SYS_EPOLL_CTL": "syscall", - "syscall.SYS_EPOLL_CTL_OLD": "syscall", - "syscall.SYS_EPOLL_PWAIT": "syscall", - "syscall.SYS_EPOLL_WAIT": "syscall", - "syscall.SYS_EPOLL_WAIT_OLD": "syscall", - "syscall.SYS_EVENTFD": "syscall", - "syscall.SYS_EVENTFD2": "syscall", - "syscall.SYS_EXCHANGEDATA": "syscall", - "syscall.SYS_EXECVE": "syscall", - "syscall.SYS_EXIT": "syscall", - "syscall.SYS_EXIT_GROUP": "syscall", - "syscall.SYS_EXTATTRCTL": "syscall", - "syscall.SYS_EXTATTR_DELETE_FD": "syscall", - "syscall.SYS_EXTATTR_DELETE_FILE": "syscall", - "syscall.SYS_EXTATTR_DELETE_LINK": "syscall", - "syscall.SYS_EXTATTR_GET_FD": "syscall", - "syscall.SYS_EXTATTR_GET_FILE": "syscall", - "syscall.SYS_EXTATTR_GET_LINK": "syscall", - "syscall.SYS_EXTATTR_LIST_FD": "syscall", - "syscall.SYS_EXTATTR_LIST_FILE": "syscall", - "syscall.SYS_EXTATTR_LIST_LINK": "syscall", - "syscall.SYS_EXTATTR_SET_FD": "syscall", - "syscall.SYS_EXTATTR_SET_FILE": "syscall", - "syscall.SYS_EXTATTR_SET_LINK": "syscall", - "syscall.SYS_FACCESSAT": "syscall", - "syscall.SYS_FADVISE64": "syscall", - "syscall.SYS_FADVISE64_64": "syscall", - "syscall.SYS_FALLOCATE": "syscall", - "syscall.SYS_FANOTIFY_INIT": "syscall", - "syscall.SYS_FANOTIFY_MARK": "syscall", - "syscall.SYS_FCHDIR": "syscall", - "syscall.SYS_FCHFLAGS": "syscall", - "syscall.SYS_FCHMOD": "syscall", - "syscall.SYS_FCHMODAT": "syscall", - "syscall.SYS_FCHMOD_EXTENDED": "syscall", - "syscall.SYS_FCHOWN": "syscall", - "syscall.SYS_FCHOWN32": "syscall", - "syscall.SYS_FCHOWNAT": "syscall", - "syscall.SYS_FCHROOT": "syscall", - "syscall.SYS_FCNTL": "syscall", - "syscall.SYS_FCNTL64": "syscall", - "syscall.SYS_FCNTL_NOCANCEL": "syscall", - "syscall.SYS_FDATASYNC": "syscall", - "syscall.SYS_FEXECVE": "syscall", - "syscall.SYS_FFCLOCK_GETCOUNTER": "syscall", - "syscall.SYS_FFCLOCK_GETESTIMATE": "syscall", - "syscall.SYS_FFCLOCK_SETESTIMATE": "syscall", - "syscall.SYS_FFSCTL": "syscall", - "syscall.SYS_FGETATTRLIST": "syscall", - "syscall.SYS_FGETXATTR": "syscall", - "syscall.SYS_FHOPEN": "syscall", - "syscall.SYS_FHSTAT": "syscall", - "syscall.SYS_FHSTATFS": "syscall", - "syscall.SYS_FILEPORT_MAKEFD": "syscall", - "syscall.SYS_FILEPORT_MAKEPORT": "syscall", - "syscall.SYS_FKTRACE": "syscall", - "syscall.SYS_FLISTXATTR": "syscall", - "syscall.SYS_FLOCK": "syscall", - "syscall.SYS_FORK": "syscall", - "syscall.SYS_FPATHCONF": "syscall", - "syscall.SYS_FREEBSD6_FTRUNCATE": "syscall", - "syscall.SYS_FREEBSD6_LSEEK": "syscall", - "syscall.SYS_FREEBSD6_MMAP": "syscall", - "syscall.SYS_FREEBSD6_PREAD": "syscall", - "syscall.SYS_FREEBSD6_PWRITE": "syscall", - "syscall.SYS_FREEBSD6_TRUNCATE": "syscall", - "syscall.SYS_FREMOVEXATTR": "syscall", - "syscall.SYS_FSCTL": "syscall", - "syscall.SYS_FSETATTRLIST": "syscall", - "syscall.SYS_FSETXATTR": "syscall", - "syscall.SYS_FSGETPATH": "syscall", - "syscall.SYS_FSTAT": "syscall", - "syscall.SYS_FSTAT64": "syscall", - "syscall.SYS_FSTAT64_EXTENDED": "syscall", - "syscall.SYS_FSTATAT": "syscall", - "syscall.SYS_FSTATAT64": "syscall", - "syscall.SYS_FSTATFS": "syscall", - "syscall.SYS_FSTATFS64": "syscall", - "syscall.SYS_FSTATV": "syscall", - "syscall.SYS_FSTATVFS1": "syscall", - "syscall.SYS_FSTAT_EXTENDED": "syscall", - "syscall.SYS_FSYNC": "syscall", - "syscall.SYS_FSYNC_NOCANCEL": "syscall", - "syscall.SYS_FSYNC_RANGE": "syscall", - "syscall.SYS_FTIME": "syscall", - "syscall.SYS_FTRUNCATE": "syscall", - "syscall.SYS_FTRUNCATE64": "syscall", - "syscall.SYS_FUTEX": "syscall", - "syscall.SYS_FUTIMENS": "syscall", - "syscall.SYS_FUTIMES": "syscall", - "syscall.SYS_FUTIMESAT": "syscall", - "syscall.SYS_GETATTRLIST": "syscall", - "syscall.SYS_GETAUDIT": "syscall", - "syscall.SYS_GETAUDIT_ADDR": "syscall", - "syscall.SYS_GETAUID": "syscall", - "syscall.SYS_GETCONTEXT": "syscall", - "syscall.SYS_GETCPU": "syscall", - "syscall.SYS_GETCWD": "syscall", - "syscall.SYS_GETDENTS": "syscall", - "syscall.SYS_GETDENTS64": "syscall", - "syscall.SYS_GETDIRENTRIES": "syscall", - "syscall.SYS_GETDIRENTRIES64": "syscall", - "syscall.SYS_GETDIRENTRIESATTR": "syscall", - "syscall.SYS_GETDTABLECOUNT": "syscall", - "syscall.SYS_GETDTABLESIZE": "syscall", - "syscall.SYS_GETEGID": "syscall", - "syscall.SYS_GETEGID32": "syscall", - "syscall.SYS_GETEUID": "syscall", - "syscall.SYS_GETEUID32": "syscall", - "syscall.SYS_GETFH": "syscall", - "syscall.SYS_GETFSSTAT": "syscall", - "syscall.SYS_GETFSSTAT64": "syscall", - "syscall.SYS_GETGID": "syscall", - "syscall.SYS_GETGID32": "syscall", - "syscall.SYS_GETGROUPS": "syscall", - "syscall.SYS_GETGROUPS32": "syscall", - "syscall.SYS_GETHOSTUUID": "syscall", - "syscall.SYS_GETITIMER": "syscall", - "syscall.SYS_GETLCID": "syscall", - "syscall.SYS_GETLOGIN": "syscall", - "syscall.SYS_GETLOGINCLASS": "syscall", - "syscall.SYS_GETPEERNAME": "syscall", - "syscall.SYS_GETPGID": "syscall", - "syscall.SYS_GETPGRP": "syscall", - "syscall.SYS_GETPID": "syscall", - "syscall.SYS_GETPMSG": "syscall", - "syscall.SYS_GETPPID": "syscall", - "syscall.SYS_GETPRIORITY": "syscall", - "syscall.SYS_GETRESGID": "syscall", - "syscall.SYS_GETRESGID32": "syscall", - "syscall.SYS_GETRESUID": "syscall", - "syscall.SYS_GETRESUID32": "syscall", - "syscall.SYS_GETRLIMIT": "syscall", - "syscall.SYS_GETRTABLE": "syscall", - "syscall.SYS_GETRUSAGE": "syscall", - "syscall.SYS_GETSGROUPS": "syscall", - "syscall.SYS_GETSID": "syscall", - "syscall.SYS_GETSOCKNAME": "syscall", - "syscall.SYS_GETSOCKOPT": "syscall", - "syscall.SYS_GETTHRID": "syscall", - "syscall.SYS_GETTID": "syscall", - "syscall.SYS_GETTIMEOFDAY": "syscall", - "syscall.SYS_GETUID": "syscall", - "syscall.SYS_GETUID32": "syscall", - "syscall.SYS_GETVFSSTAT": "syscall", - "syscall.SYS_GETWGROUPS": "syscall", - "syscall.SYS_GETXATTR": "syscall", - "syscall.SYS_GET_KERNEL_SYMS": "syscall", - "syscall.SYS_GET_MEMPOLICY": "syscall", - "syscall.SYS_GET_ROBUST_LIST": "syscall", - "syscall.SYS_GET_THREAD_AREA": "syscall", - "syscall.SYS_GTTY": "syscall", - "syscall.SYS_IDENTITYSVC": "syscall", - "syscall.SYS_IDLE": "syscall", - "syscall.SYS_INITGROUPS": "syscall", - "syscall.SYS_INIT_MODULE": "syscall", - "syscall.SYS_INOTIFY_ADD_WATCH": "syscall", - "syscall.SYS_INOTIFY_INIT": "syscall", - "syscall.SYS_INOTIFY_INIT1": "syscall", - "syscall.SYS_INOTIFY_RM_WATCH": "syscall", - "syscall.SYS_IOCTL": "syscall", - "syscall.SYS_IOPERM": "syscall", - "syscall.SYS_IOPL": "syscall", - "syscall.SYS_IOPOLICYSYS": "syscall", - "syscall.SYS_IOPRIO_GET": "syscall", - "syscall.SYS_IOPRIO_SET": "syscall", - "syscall.SYS_IO_CANCEL": "syscall", - "syscall.SYS_IO_DESTROY": "syscall", - "syscall.SYS_IO_GETEVENTS": "syscall", - "syscall.SYS_IO_SETUP": "syscall", - "syscall.SYS_IO_SUBMIT": "syscall", - "syscall.SYS_IPC": "syscall", - "syscall.SYS_ISSETUGID": "syscall", - "syscall.SYS_JAIL": "syscall", - "syscall.SYS_JAIL_ATTACH": "syscall", - "syscall.SYS_JAIL_GET": "syscall", - "syscall.SYS_JAIL_REMOVE": "syscall", - "syscall.SYS_JAIL_SET": "syscall", - "syscall.SYS_KDEBUG_TRACE": "syscall", - "syscall.SYS_KENV": "syscall", - "syscall.SYS_KEVENT": "syscall", - "syscall.SYS_KEVENT64": "syscall", - "syscall.SYS_KEXEC_LOAD": "syscall", - "syscall.SYS_KEYCTL": "syscall", - "syscall.SYS_KILL": "syscall", - "syscall.SYS_KLDFIND": "syscall", - "syscall.SYS_KLDFIRSTMOD": "syscall", - "syscall.SYS_KLDLOAD": "syscall", - "syscall.SYS_KLDNEXT": "syscall", - "syscall.SYS_KLDSTAT": "syscall", - "syscall.SYS_KLDSYM": "syscall", - "syscall.SYS_KLDUNLOAD": "syscall", - "syscall.SYS_KLDUNLOADF": "syscall", - "syscall.SYS_KQUEUE": "syscall", - "syscall.SYS_KQUEUE1": "syscall", - "syscall.SYS_KTIMER_CREATE": "syscall", - "syscall.SYS_KTIMER_DELETE": "syscall", - "syscall.SYS_KTIMER_GETOVERRUN": "syscall", - "syscall.SYS_KTIMER_GETTIME": "syscall", - "syscall.SYS_KTIMER_SETTIME": "syscall", - "syscall.SYS_KTRACE": "syscall", - "syscall.SYS_LCHFLAGS": "syscall", - "syscall.SYS_LCHMOD": "syscall", - "syscall.SYS_LCHOWN": "syscall", - "syscall.SYS_LCHOWN32": "syscall", - "syscall.SYS_LGETFH": "syscall", - "syscall.SYS_LGETXATTR": "syscall", - "syscall.SYS_LINK": "syscall", - "syscall.SYS_LINKAT": "syscall", - "syscall.SYS_LIO_LISTIO": "syscall", - "syscall.SYS_LISTEN": "syscall", - "syscall.SYS_LISTXATTR": "syscall", - "syscall.SYS_LLISTXATTR": "syscall", - "syscall.SYS_LOCK": "syscall", - "syscall.SYS_LOOKUP_DCOOKIE": "syscall", - "syscall.SYS_LPATHCONF": "syscall", - "syscall.SYS_LREMOVEXATTR": "syscall", - "syscall.SYS_LSEEK": "syscall", - "syscall.SYS_LSETXATTR": "syscall", - "syscall.SYS_LSTAT": "syscall", - "syscall.SYS_LSTAT64": "syscall", - "syscall.SYS_LSTAT64_EXTENDED": "syscall", - "syscall.SYS_LSTATV": "syscall", - "syscall.SYS_LSTAT_EXTENDED": "syscall", - "syscall.SYS_LUTIMES": "syscall", - "syscall.SYS_MAC_SYSCALL": "syscall", - "syscall.SYS_MADVISE": "syscall", - "syscall.SYS_MADVISE1": "syscall", - "syscall.SYS_MAXSYSCALL": "syscall", - "syscall.SYS_MBIND": "syscall", - "syscall.SYS_MIGRATE_PAGES": "syscall", - "syscall.SYS_MINCORE": "syscall", - "syscall.SYS_MINHERIT": "syscall", - "syscall.SYS_MKCOMPLEX": "syscall", - "syscall.SYS_MKDIR": "syscall", - "syscall.SYS_MKDIRAT": "syscall", - "syscall.SYS_MKDIR_EXTENDED": "syscall", - "syscall.SYS_MKFIFO": "syscall", - "syscall.SYS_MKFIFOAT": "syscall", - "syscall.SYS_MKFIFO_EXTENDED": "syscall", - "syscall.SYS_MKNOD": "syscall", - "syscall.SYS_MKNODAT": "syscall", - "syscall.SYS_MLOCK": "syscall", - "syscall.SYS_MLOCKALL": "syscall", - "syscall.SYS_MMAP": "syscall", - "syscall.SYS_MMAP2": "syscall", - "syscall.SYS_MODCTL": "syscall", - "syscall.SYS_MODFIND": "syscall", - "syscall.SYS_MODFNEXT": "syscall", - "syscall.SYS_MODIFY_LDT": "syscall", - "syscall.SYS_MODNEXT": "syscall", - "syscall.SYS_MODSTAT": "syscall", - "syscall.SYS_MODWATCH": "syscall", - "syscall.SYS_MOUNT": "syscall", - "syscall.SYS_MOVE_PAGES": "syscall", - "syscall.SYS_MPROTECT": "syscall", - "syscall.SYS_MPX": "syscall", - "syscall.SYS_MQUERY": "syscall", - "syscall.SYS_MQ_GETSETATTR": "syscall", - "syscall.SYS_MQ_NOTIFY": "syscall", - "syscall.SYS_MQ_OPEN": "syscall", - "syscall.SYS_MQ_TIMEDRECEIVE": "syscall", - "syscall.SYS_MQ_TIMEDSEND": "syscall", - "syscall.SYS_MQ_UNLINK": "syscall", - "syscall.SYS_MREMAP": "syscall", - "syscall.SYS_MSGCTL": "syscall", - "syscall.SYS_MSGGET": "syscall", - "syscall.SYS_MSGRCV": "syscall", - "syscall.SYS_MSGRCV_NOCANCEL": "syscall", - "syscall.SYS_MSGSND": "syscall", - "syscall.SYS_MSGSND_NOCANCEL": "syscall", - "syscall.SYS_MSGSYS": "syscall", - "syscall.SYS_MSYNC": "syscall", - "syscall.SYS_MSYNC_NOCANCEL": "syscall", - "syscall.SYS_MUNLOCK": "syscall", - "syscall.SYS_MUNLOCKALL": "syscall", - "syscall.SYS_MUNMAP": "syscall", - "syscall.SYS_NAME_TO_HANDLE_AT": "syscall", - "syscall.SYS_NANOSLEEP": "syscall", - "syscall.SYS_NEWFSTATAT": "syscall", - "syscall.SYS_NFSCLNT": "syscall", - "syscall.SYS_NFSSERVCTL": "syscall", - "syscall.SYS_NFSSVC": "syscall", - "syscall.SYS_NFSTAT": "syscall", - "syscall.SYS_NICE": "syscall", - "syscall.SYS_NLSTAT": "syscall", - "syscall.SYS_NMOUNT": "syscall", - "syscall.SYS_NSTAT": "syscall", - "syscall.SYS_NTP_ADJTIME": "syscall", - "syscall.SYS_NTP_GETTIME": "syscall", - "syscall.SYS_OABI_SYSCALL_BASE": "syscall", - "syscall.SYS_OBREAK": "syscall", - "syscall.SYS_OLDFSTAT": "syscall", - "syscall.SYS_OLDLSTAT": "syscall", - "syscall.SYS_OLDOLDUNAME": "syscall", - "syscall.SYS_OLDSTAT": "syscall", - "syscall.SYS_OLDUNAME": "syscall", - "syscall.SYS_OPEN": "syscall", - "syscall.SYS_OPENAT": "syscall", - "syscall.SYS_OPENBSD_POLL": "syscall", - "syscall.SYS_OPEN_BY_HANDLE_AT": "syscall", - "syscall.SYS_OPEN_EXTENDED": "syscall", - "syscall.SYS_OPEN_NOCANCEL": "syscall", - "syscall.SYS_OVADVISE": "syscall", - "syscall.SYS_PACCEPT": "syscall", - "syscall.SYS_PATHCONF": "syscall", - "syscall.SYS_PAUSE": "syscall", - "syscall.SYS_PCICONFIG_IOBASE": "syscall", - "syscall.SYS_PCICONFIG_READ": "syscall", - "syscall.SYS_PCICONFIG_WRITE": "syscall", - "syscall.SYS_PDFORK": "syscall", - "syscall.SYS_PDGETPID": "syscall", - "syscall.SYS_PDKILL": "syscall", - "syscall.SYS_PERF_EVENT_OPEN": "syscall", - "syscall.SYS_PERSONALITY": "syscall", - "syscall.SYS_PID_HIBERNATE": "syscall", - "syscall.SYS_PID_RESUME": "syscall", - "syscall.SYS_PID_SHUTDOWN_SOCKETS": "syscall", - "syscall.SYS_PID_SUSPEND": "syscall", - "syscall.SYS_PIPE": "syscall", - "syscall.SYS_PIPE2": "syscall", - "syscall.SYS_PIVOT_ROOT": "syscall", - "syscall.SYS_PMC_CONTROL": "syscall", - "syscall.SYS_PMC_GET_INFO": "syscall", - "syscall.SYS_POLL": "syscall", - "syscall.SYS_POLLTS": "syscall", - "syscall.SYS_POLL_NOCANCEL": "syscall", - "syscall.SYS_POSIX_FADVISE": "syscall", - "syscall.SYS_POSIX_FALLOCATE": "syscall", - "syscall.SYS_POSIX_OPENPT": "syscall", - "syscall.SYS_POSIX_SPAWN": "syscall", - "syscall.SYS_PPOLL": "syscall", - "syscall.SYS_PRCTL": "syscall", - "syscall.SYS_PREAD": "syscall", - "syscall.SYS_PREAD64": "syscall", - "syscall.SYS_PREADV": "syscall", - "syscall.SYS_PREAD_NOCANCEL": "syscall", - "syscall.SYS_PRLIMIT64": "syscall", - "syscall.SYS_PROCCTL": "syscall", - "syscall.SYS_PROCESS_POLICY": "syscall", - "syscall.SYS_PROCESS_VM_READV": "syscall", - "syscall.SYS_PROCESS_VM_WRITEV": "syscall", - "syscall.SYS_PROC_INFO": "syscall", - "syscall.SYS_PROF": "syscall", - "syscall.SYS_PROFIL": "syscall", - "syscall.SYS_PSELECT": "syscall", - "syscall.SYS_PSELECT6": "syscall", - "syscall.SYS_PSET_ASSIGN": "syscall", - "syscall.SYS_PSET_CREATE": "syscall", - "syscall.SYS_PSET_DESTROY": "syscall", - "syscall.SYS_PSYNCH_CVBROAD": "syscall", - "syscall.SYS_PSYNCH_CVCLRPREPOST": "syscall", - "syscall.SYS_PSYNCH_CVSIGNAL": "syscall", - "syscall.SYS_PSYNCH_CVWAIT": "syscall", - "syscall.SYS_PSYNCH_MUTEXDROP": "syscall", - "syscall.SYS_PSYNCH_MUTEXWAIT": "syscall", - "syscall.SYS_PSYNCH_RW_DOWNGRADE": "syscall", - "syscall.SYS_PSYNCH_RW_LONGRDLOCK": "syscall", - "syscall.SYS_PSYNCH_RW_RDLOCK": "syscall", - "syscall.SYS_PSYNCH_RW_UNLOCK": "syscall", - "syscall.SYS_PSYNCH_RW_UNLOCK2": "syscall", - "syscall.SYS_PSYNCH_RW_UPGRADE": "syscall", - "syscall.SYS_PSYNCH_RW_WRLOCK": "syscall", - "syscall.SYS_PSYNCH_RW_YIELDWRLOCK": "syscall", - "syscall.SYS_PTRACE": "syscall", - "syscall.SYS_PUTPMSG": "syscall", - "syscall.SYS_PWRITE": "syscall", - "syscall.SYS_PWRITE64": "syscall", - "syscall.SYS_PWRITEV": "syscall", - "syscall.SYS_PWRITE_NOCANCEL": "syscall", - "syscall.SYS_QUERY_MODULE": "syscall", - "syscall.SYS_QUOTACTL": "syscall", - "syscall.SYS_RASCTL": "syscall", - "syscall.SYS_RCTL_ADD_RULE": "syscall", - "syscall.SYS_RCTL_GET_LIMITS": "syscall", - "syscall.SYS_RCTL_GET_RACCT": "syscall", - "syscall.SYS_RCTL_GET_RULES": "syscall", - "syscall.SYS_RCTL_REMOVE_RULE": "syscall", - "syscall.SYS_READ": "syscall", - "syscall.SYS_READAHEAD": "syscall", - "syscall.SYS_READDIR": "syscall", - "syscall.SYS_READLINK": "syscall", - "syscall.SYS_READLINKAT": "syscall", - "syscall.SYS_READV": "syscall", - "syscall.SYS_READV_NOCANCEL": "syscall", - "syscall.SYS_READ_NOCANCEL": "syscall", - "syscall.SYS_REBOOT": "syscall", - "syscall.SYS_RECV": "syscall", - "syscall.SYS_RECVFROM": "syscall", - "syscall.SYS_RECVFROM_NOCANCEL": "syscall", - "syscall.SYS_RECVMMSG": "syscall", - "syscall.SYS_RECVMSG": "syscall", - "syscall.SYS_RECVMSG_NOCANCEL": "syscall", - "syscall.SYS_REMAP_FILE_PAGES": "syscall", - "syscall.SYS_REMOVEXATTR": "syscall", - "syscall.SYS_RENAME": "syscall", - "syscall.SYS_RENAMEAT": "syscall", - "syscall.SYS_REQUEST_KEY": "syscall", - "syscall.SYS_RESTART_SYSCALL": "syscall", - "syscall.SYS_REVOKE": "syscall", - "syscall.SYS_RFORK": "syscall", - "syscall.SYS_RMDIR": "syscall", - "syscall.SYS_RTPRIO": "syscall", - "syscall.SYS_RTPRIO_THREAD": "syscall", - "syscall.SYS_RT_SIGACTION": "syscall", - "syscall.SYS_RT_SIGPENDING": "syscall", - "syscall.SYS_RT_SIGPROCMASK": "syscall", - "syscall.SYS_RT_SIGQUEUEINFO": "syscall", - "syscall.SYS_RT_SIGRETURN": "syscall", - "syscall.SYS_RT_SIGSUSPEND": "syscall", - "syscall.SYS_RT_SIGTIMEDWAIT": "syscall", - "syscall.SYS_RT_TGSIGQUEUEINFO": "syscall", - "syscall.SYS_SBRK": "syscall", - "syscall.SYS_SCHED_GETAFFINITY": "syscall", - "syscall.SYS_SCHED_GETPARAM": "syscall", - "syscall.SYS_SCHED_GETSCHEDULER": "syscall", - "syscall.SYS_SCHED_GET_PRIORITY_MAX": "syscall", - "syscall.SYS_SCHED_GET_PRIORITY_MIN": "syscall", - "syscall.SYS_SCHED_RR_GET_INTERVAL": "syscall", - "syscall.SYS_SCHED_SETAFFINITY": "syscall", - "syscall.SYS_SCHED_SETPARAM": "syscall", - "syscall.SYS_SCHED_SETSCHEDULER": "syscall", - "syscall.SYS_SCHED_YIELD": "syscall", - "syscall.SYS_SCTP_GENERIC_RECVMSG": "syscall", - "syscall.SYS_SCTP_GENERIC_SENDMSG": "syscall", - "syscall.SYS_SCTP_GENERIC_SENDMSG_IOV": "syscall", - "syscall.SYS_SCTP_PEELOFF": "syscall", - "syscall.SYS_SEARCHFS": "syscall", - "syscall.SYS_SECURITY": "syscall", - "syscall.SYS_SELECT": "syscall", - "syscall.SYS_SELECT_NOCANCEL": "syscall", - "syscall.SYS_SEMCONFIG": "syscall", - "syscall.SYS_SEMCTL": "syscall", - "syscall.SYS_SEMGET": "syscall", - "syscall.SYS_SEMOP": "syscall", - "syscall.SYS_SEMSYS": "syscall", - "syscall.SYS_SEMTIMEDOP": "syscall", - "syscall.SYS_SEM_CLOSE": "syscall", - "syscall.SYS_SEM_DESTROY": "syscall", - "syscall.SYS_SEM_GETVALUE": "syscall", - "syscall.SYS_SEM_INIT": "syscall", - "syscall.SYS_SEM_OPEN": "syscall", - "syscall.SYS_SEM_POST": "syscall", - "syscall.SYS_SEM_TRYWAIT": "syscall", - "syscall.SYS_SEM_UNLINK": "syscall", - "syscall.SYS_SEM_WAIT": "syscall", - "syscall.SYS_SEM_WAIT_NOCANCEL": "syscall", - "syscall.SYS_SEND": "syscall", - "syscall.SYS_SENDFILE": "syscall", - "syscall.SYS_SENDFILE64": "syscall", - "syscall.SYS_SENDMMSG": "syscall", - "syscall.SYS_SENDMSG": "syscall", - "syscall.SYS_SENDMSG_NOCANCEL": "syscall", - "syscall.SYS_SENDTO": "syscall", - "syscall.SYS_SENDTO_NOCANCEL": "syscall", - "syscall.SYS_SETATTRLIST": "syscall", - "syscall.SYS_SETAUDIT": "syscall", - "syscall.SYS_SETAUDIT_ADDR": "syscall", - "syscall.SYS_SETAUID": "syscall", - "syscall.SYS_SETCONTEXT": "syscall", - "syscall.SYS_SETDOMAINNAME": "syscall", - "syscall.SYS_SETEGID": "syscall", - "syscall.SYS_SETEUID": "syscall", - "syscall.SYS_SETFIB": "syscall", - "syscall.SYS_SETFSGID": "syscall", - "syscall.SYS_SETFSGID32": "syscall", - "syscall.SYS_SETFSUID": "syscall", - "syscall.SYS_SETFSUID32": "syscall", - "syscall.SYS_SETGID": "syscall", - "syscall.SYS_SETGID32": "syscall", - "syscall.SYS_SETGROUPS": "syscall", - "syscall.SYS_SETGROUPS32": "syscall", - "syscall.SYS_SETHOSTNAME": "syscall", - "syscall.SYS_SETITIMER": "syscall", - "syscall.SYS_SETLCID": "syscall", - "syscall.SYS_SETLOGIN": "syscall", - "syscall.SYS_SETLOGINCLASS": "syscall", - "syscall.SYS_SETNS": "syscall", - "syscall.SYS_SETPGID": "syscall", - "syscall.SYS_SETPRIORITY": "syscall", - "syscall.SYS_SETPRIVEXEC": "syscall", - "syscall.SYS_SETREGID": "syscall", - "syscall.SYS_SETREGID32": "syscall", - "syscall.SYS_SETRESGID": "syscall", - "syscall.SYS_SETRESGID32": "syscall", - "syscall.SYS_SETRESUID": "syscall", - "syscall.SYS_SETRESUID32": "syscall", - "syscall.SYS_SETREUID": "syscall", - "syscall.SYS_SETREUID32": "syscall", - "syscall.SYS_SETRLIMIT": "syscall", - "syscall.SYS_SETRTABLE": "syscall", - "syscall.SYS_SETSGROUPS": "syscall", - "syscall.SYS_SETSID": "syscall", - "syscall.SYS_SETSOCKOPT": "syscall", - "syscall.SYS_SETTID": "syscall", - "syscall.SYS_SETTID_WITH_PID": "syscall", - "syscall.SYS_SETTIMEOFDAY": "syscall", - "syscall.SYS_SETUID": "syscall", - "syscall.SYS_SETUID32": "syscall", - "syscall.SYS_SETWGROUPS": "syscall", - "syscall.SYS_SETXATTR": "syscall", - "syscall.SYS_SET_MEMPOLICY": "syscall", - "syscall.SYS_SET_ROBUST_LIST": "syscall", - "syscall.SYS_SET_THREAD_AREA": "syscall", - "syscall.SYS_SET_TID_ADDRESS": "syscall", - "syscall.SYS_SGETMASK": "syscall", - "syscall.SYS_SHARED_REGION_CHECK_NP": "syscall", - "syscall.SYS_SHARED_REGION_MAP_AND_SLIDE_NP": "syscall", - "syscall.SYS_SHMAT": "syscall", - "syscall.SYS_SHMCTL": "syscall", - "syscall.SYS_SHMDT": "syscall", - "syscall.SYS_SHMGET": "syscall", - "syscall.SYS_SHMSYS": "syscall", - "syscall.SYS_SHM_OPEN": "syscall", - "syscall.SYS_SHM_UNLINK": "syscall", - "syscall.SYS_SHUTDOWN": "syscall", - "syscall.SYS_SIGACTION": "syscall", - "syscall.SYS_SIGALTSTACK": "syscall", - "syscall.SYS_SIGNAL": "syscall", - "syscall.SYS_SIGNALFD": "syscall", - "syscall.SYS_SIGNALFD4": "syscall", - "syscall.SYS_SIGPENDING": "syscall", - "syscall.SYS_SIGPROCMASK": "syscall", - "syscall.SYS_SIGQUEUE": "syscall", - "syscall.SYS_SIGQUEUEINFO": "syscall", - "syscall.SYS_SIGRETURN": "syscall", - "syscall.SYS_SIGSUSPEND": "syscall", - "syscall.SYS_SIGSUSPEND_NOCANCEL": "syscall", - "syscall.SYS_SIGTIMEDWAIT": "syscall", - "syscall.SYS_SIGWAIT": "syscall", - "syscall.SYS_SIGWAITINFO": "syscall", - "syscall.SYS_SOCKET": "syscall", - "syscall.SYS_SOCKETCALL": "syscall", - "syscall.SYS_SOCKETPAIR": "syscall", - "syscall.SYS_SPLICE": "syscall", - "syscall.SYS_SSETMASK": "syscall", - "syscall.SYS_SSTK": "syscall", - "syscall.SYS_STACK_SNAPSHOT": "syscall", - "syscall.SYS_STAT": "syscall", - "syscall.SYS_STAT64": "syscall", - "syscall.SYS_STAT64_EXTENDED": "syscall", - "syscall.SYS_STATFS": "syscall", - "syscall.SYS_STATFS64": "syscall", - "syscall.SYS_STATV": "syscall", - "syscall.SYS_STATVFS1": "syscall", - "syscall.SYS_STAT_EXTENDED": "syscall", - "syscall.SYS_STIME": "syscall", - "syscall.SYS_STTY": "syscall", - "syscall.SYS_SWAPCONTEXT": "syscall", - "syscall.SYS_SWAPCTL": "syscall", - "syscall.SYS_SWAPOFF": "syscall", - "syscall.SYS_SWAPON": "syscall", - "syscall.SYS_SYMLINK": "syscall", - "syscall.SYS_SYMLINKAT": "syscall", - "syscall.SYS_SYNC": "syscall", - "syscall.SYS_SYNCFS": "syscall", - "syscall.SYS_SYNC_FILE_RANGE": "syscall", - "syscall.SYS_SYSARCH": "syscall", - "syscall.SYS_SYSCALL": "syscall", - "syscall.SYS_SYSCALL_BASE": "syscall", - "syscall.SYS_SYSFS": "syscall", - "syscall.SYS_SYSINFO": "syscall", - "syscall.SYS_SYSLOG": "syscall", - "syscall.SYS_TEE": "syscall", - "syscall.SYS_TGKILL": "syscall", - "syscall.SYS_THREAD_SELFID": "syscall", - "syscall.SYS_THR_CREATE": "syscall", - "syscall.SYS_THR_EXIT": "syscall", - "syscall.SYS_THR_KILL": "syscall", - "syscall.SYS_THR_KILL2": "syscall", - "syscall.SYS_THR_NEW": "syscall", - "syscall.SYS_THR_SELF": "syscall", - "syscall.SYS_THR_SET_NAME": "syscall", - "syscall.SYS_THR_SUSPEND": "syscall", - "syscall.SYS_THR_WAKE": "syscall", - "syscall.SYS_TIME": "syscall", - "syscall.SYS_TIMERFD_CREATE": "syscall", - "syscall.SYS_TIMERFD_GETTIME": "syscall", - "syscall.SYS_TIMERFD_SETTIME": "syscall", - "syscall.SYS_TIMER_CREATE": "syscall", - "syscall.SYS_TIMER_DELETE": "syscall", - "syscall.SYS_TIMER_GETOVERRUN": "syscall", - "syscall.SYS_TIMER_GETTIME": "syscall", - "syscall.SYS_TIMER_SETTIME": "syscall", - "syscall.SYS_TIMES": "syscall", - "syscall.SYS_TKILL": "syscall", - "syscall.SYS_TRUNCATE": "syscall", - "syscall.SYS_TRUNCATE64": "syscall", - "syscall.SYS_TUXCALL": "syscall", - "syscall.SYS_UGETRLIMIT": "syscall", - "syscall.SYS_ULIMIT": "syscall", - "syscall.SYS_UMASK": "syscall", - "syscall.SYS_UMASK_EXTENDED": "syscall", - "syscall.SYS_UMOUNT": "syscall", - "syscall.SYS_UMOUNT2": "syscall", - "syscall.SYS_UNAME": "syscall", - "syscall.SYS_UNDELETE": "syscall", - "syscall.SYS_UNLINK": "syscall", - "syscall.SYS_UNLINKAT": "syscall", - "syscall.SYS_UNMOUNT": "syscall", - "syscall.SYS_UNSHARE": "syscall", - "syscall.SYS_USELIB": "syscall", - "syscall.SYS_USTAT": "syscall", - "syscall.SYS_UTIME": "syscall", - "syscall.SYS_UTIMENSAT": "syscall", - "syscall.SYS_UTIMES": "syscall", - "syscall.SYS_UTRACE": "syscall", - "syscall.SYS_UUIDGEN": "syscall", - "syscall.SYS_VADVISE": "syscall", - "syscall.SYS_VFORK": "syscall", - "syscall.SYS_VHANGUP": "syscall", - "syscall.SYS_VM86": "syscall", - "syscall.SYS_VM86OLD": "syscall", - "syscall.SYS_VMSPLICE": "syscall", - "syscall.SYS_VM_PRESSURE_MONITOR": "syscall", - "syscall.SYS_VSERVER": "syscall", - "syscall.SYS_WAIT4": "syscall", - "syscall.SYS_WAIT4_NOCANCEL": "syscall", - "syscall.SYS_WAIT6": "syscall", - "syscall.SYS_WAITEVENT": "syscall", - "syscall.SYS_WAITID": "syscall", - "syscall.SYS_WAITID_NOCANCEL": "syscall", - "syscall.SYS_WAITPID": "syscall", - "syscall.SYS_WATCHEVENT": "syscall", - "syscall.SYS_WORKQ_KERNRETURN": "syscall", - "syscall.SYS_WORKQ_OPEN": "syscall", - "syscall.SYS_WRITE": "syscall", - "syscall.SYS_WRITEV": "syscall", - "syscall.SYS_WRITEV_NOCANCEL": "syscall", - "syscall.SYS_WRITE_NOCANCEL": "syscall", - "syscall.SYS_YIELD": "syscall", - "syscall.SYS__LLSEEK": "syscall", - "syscall.SYS__LWP_CONTINUE": "syscall", - "syscall.SYS__LWP_CREATE": "syscall", - "syscall.SYS__LWP_CTL": "syscall", - "syscall.SYS__LWP_DETACH": "syscall", - "syscall.SYS__LWP_EXIT": "syscall", - "syscall.SYS__LWP_GETNAME": "syscall", - "syscall.SYS__LWP_GETPRIVATE": "syscall", - "syscall.SYS__LWP_KILL": "syscall", - "syscall.SYS__LWP_PARK": "syscall", - "syscall.SYS__LWP_SELF": "syscall", - "syscall.SYS__LWP_SETNAME": "syscall", - "syscall.SYS__LWP_SETPRIVATE": "syscall", - "syscall.SYS__LWP_SUSPEND": "syscall", - "syscall.SYS__LWP_UNPARK": "syscall", - "syscall.SYS__LWP_UNPARK_ALL": "syscall", - "syscall.SYS__LWP_WAIT": "syscall", - "syscall.SYS__LWP_WAKEUP": "syscall", - "syscall.SYS__NEWSELECT": "syscall", - "syscall.SYS__PSET_BIND": "syscall", - "syscall.SYS__SCHED_GETAFFINITY": "syscall", - "syscall.SYS__SCHED_GETPARAM": "syscall", - "syscall.SYS__SCHED_SETAFFINITY": "syscall", - "syscall.SYS__SCHED_SETPARAM": "syscall", - "syscall.SYS__SYSCTL": "syscall", - "syscall.SYS__UMTX_LOCK": "syscall", - "syscall.SYS__UMTX_OP": "syscall", - "syscall.SYS__UMTX_UNLOCK": "syscall", - "syscall.SYS___ACL_ACLCHECK_FD": "syscall", - "syscall.SYS___ACL_ACLCHECK_FILE": "syscall", - "syscall.SYS___ACL_ACLCHECK_LINK": "syscall", - "syscall.SYS___ACL_DELETE_FD": "syscall", - "syscall.SYS___ACL_DELETE_FILE": "syscall", - "syscall.SYS___ACL_DELETE_LINK": "syscall", - "syscall.SYS___ACL_GET_FD": "syscall", - "syscall.SYS___ACL_GET_FILE": "syscall", - "syscall.SYS___ACL_GET_LINK": "syscall", - "syscall.SYS___ACL_SET_FD": "syscall", - "syscall.SYS___ACL_SET_FILE": "syscall", - "syscall.SYS___ACL_SET_LINK": "syscall", - "syscall.SYS___CLONE": "syscall", - "syscall.SYS___DISABLE_THREADSIGNAL": "syscall", - "syscall.SYS___GETCWD": "syscall", - "syscall.SYS___GETLOGIN": "syscall", - "syscall.SYS___GET_TCB": "syscall", - "syscall.SYS___MAC_EXECVE": "syscall", - "syscall.SYS___MAC_GETFSSTAT": "syscall", - "syscall.SYS___MAC_GET_FD": "syscall", - "syscall.SYS___MAC_GET_FILE": "syscall", - "syscall.SYS___MAC_GET_LCID": "syscall", - "syscall.SYS___MAC_GET_LCTX": "syscall", - "syscall.SYS___MAC_GET_LINK": "syscall", - "syscall.SYS___MAC_GET_MOUNT": "syscall", - "syscall.SYS___MAC_GET_PID": "syscall", - "syscall.SYS___MAC_GET_PROC": "syscall", - "syscall.SYS___MAC_MOUNT": "syscall", - "syscall.SYS___MAC_SET_FD": "syscall", - "syscall.SYS___MAC_SET_FILE": "syscall", - "syscall.SYS___MAC_SET_LCTX": "syscall", - "syscall.SYS___MAC_SET_LINK": "syscall", - "syscall.SYS___MAC_SET_PROC": "syscall", - "syscall.SYS___MAC_SYSCALL": "syscall", - "syscall.SYS___OLD_SEMWAIT_SIGNAL": "syscall", - "syscall.SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL": "syscall", - "syscall.SYS___POSIX_CHOWN": "syscall", - "syscall.SYS___POSIX_FCHOWN": "syscall", - "syscall.SYS___POSIX_LCHOWN": "syscall", - "syscall.SYS___POSIX_RENAME": "syscall", - "syscall.SYS___PTHREAD_CANCELED": "syscall", - "syscall.SYS___PTHREAD_CHDIR": "syscall", - "syscall.SYS___PTHREAD_FCHDIR": "syscall", - "syscall.SYS___PTHREAD_KILL": "syscall", - "syscall.SYS___PTHREAD_MARKCANCEL": "syscall", - "syscall.SYS___PTHREAD_SIGMASK": "syscall", - "syscall.SYS___QUOTACTL": "syscall", - "syscall.SYS___SEMCTL": "syscall", - "syscall.SYS___SEMWAIT_SIGNAL": "syscall", - "syscall.SYS___SEMWAIT_SIGNAL_NOCANCEL": "syscall", - "syscall.SYS___SETLOGIN": "syscall", - "syscall.SYS___SETUGID": "syscall", - "syscall.SYS___SET_TCB": "syscall", - "syscall.SYS___SIGACTION_SIGTRAMP": "syscall", - "syscall.SYS___SIGTIMEDWAIT": "syscall", - "syscall.SYS___SIGWAIT": "syscall", - "syscall.SYS___SIGWAIT_NOCANCEL": "syscall", - "syscall.SYS___SYSCTL": "syscall", - "syscall.SYS___TFORK": "syscall", - "syscall.SYS___THREXIT": "syscall", - "syscall.SYS___THRSIGDIVERT": "syscall", - "syscall.SYS___THRSLEEP": "syscall", - "syscall.SYS___THRWAKEUP": "syscall", - "syscall.S_ARCH1": "syscall", - "syscall.S_ARCH2": "syscall", - "syscall.S_BLKSIZE": "syscall", - "syscall.S_IEXEC": "syscall", - "syscall.S_IFBLK": "syscall", - "syscall.S_IFCHR": "syscall", - "syscall.S_IFDIR": "syscall", - "syscall.S_IFIFO": "syscall", - "syscall.S_IFLNK": "syscall", - "syscall.S_IFMT": "syscall", - "syscall.S_IFREG": "syscall", - "syscall.S_IFSOCK": "syscall", - "syscall.S_IFWHT": "syscall", - "syscall.S_IREAD": "syscall", - "syscall.S_IRGRP": "syscall", - "syscall.S_IROTH": "syscall", - "syscall.S_IRUSR": "syscall", - "syscall.S_IRWXG": "syscall", - "syscall.S_IRWXO": "syscall", - "syscall.S_IRWXU": "syscall", - "syscall.S_ISGID": "syscall", - "syscall.S_ISTXT": "syscall", - "syscall.S_ISUID": "syscall", - "syscall.S_ISVTX": "syscall", - "syscall.S_IWGRP": "syscall", - "syscall.S_IWOTH": "syscall", - "syscall.S_IWRITE": "syscall", - "syscall.S_IWUSR": "syscall", - "syscall.S_IXGRP": "syscall", - "syscall.S_IXOTH": "syscall", - "syscall.S_IXUSR": "syscall", - "syscall.S_LOGIN_SET": "syscall", - "syscall.SecurityAttributes": "syscall", - "syscall.Seek": "syscall", - "syscall.Select": "syscall", - "syscall.Sendfile": "syscall", - "syscall.Sendmsg": "syscall", - "syscall.SendmsgN": "syscall", - "syscall.Sendto": "syscall", - "syscall.Servent": "syscall", - "syscall.SetBpf": "syscall", - "syscall.SetBpfBuflen": "syscall", - "syscall.SetBpfDatalink": "syscall", - "syscall.SetBpfHeadercmpl": "syscall", - "syscall.SetBpfImmediate": "syscall", - "syscall.SetBpfInterface": "syscall", - "syscall.SetBpfPromisc": "syscall", - "syscall.SetBpfTimeout": "syscall", - "syscall.SetCurrentDirectory": "syscall", - "syscall.SetEndOfFile": "syscall", - "syscall.SetEnvironmentVariable": "syscall", - "syscall.SetFileAttributes": "syscall", - "syscall.SetFileCompletionNotificationModes": "syscall", - "syscall.SetFilePointer": "syscall", - "syscall.SetFileTime": "syscall", - "syscall.SetHandleInformation": "syscall", - "syscall.SetKevent": "syscall", - "syscall.SetLsfPromisc": "syscall", - "syscall.SetNonblock": "syscall", - "syscall.Setdomainname": "syscall", - "syscall.Setegid": "syscall", - "syscall.Setenv": "syscall", - "syscall.Seteuid": "syscall", - "syscall.Setfsgid": "syscall", - "syscall.Setfsuid": "syscall", - "syscall.Setgid": "syscall", - "syscall.Setgroups": "syscall", - "syscall.Sethostname": "syscall", - "syscall.Setlogin": "syscall", - "syscall.Setpgid": "syscall", - "syscall.Setpriority": "syscall", - "syscall.Setprivexec": "syscall", - "syscall.Setregid": "syscall", - "syscall.Setresgid": "syscall", - "syscall.Setresuid": "syscall", - "syscall.Setreuid": "syscall", - "syscall.Setrlimit": "syscall", - "syscall.Setsid": "syscall", - "syscall.Setsockopt": "syscall", - "syscall.SetsockoptByte": "syscall", - "syscall.SetsockoptICMPv6Filter": "syscall", - "syscall.SetsockoptIPMreq": "syscall", - "syscall.SetsockoptIPMreqn": "syscall", - "syscall.SetsockoptIPv6Mreq": "syscall", - "syscall.SetsockoptInet4Addr": "syscall", - "syscall.SetsockoptInt": "syscall", - "syscall.SetsockoptLinger": "syscall", - "syscall.SetsockoptString": "syscall", - "syscall.SetsockoptTimeval": "syscall", - "syscall.Settimeofday": "syscall", - "syscall.Setuid": "syscall", - "syscall.Setxattr": "syscall", - "syscall.Shutdown": "syscall", - "syscall.SidTypeAlias": "syscall", - "syscall.SidTypeComputer": "syscall", - "syscall.SidTypeDeletedAccount": "syscall", - "syscall.SidTypeDomain": "syscall", - "syscall.SidTypeGroup": "syscall", - "syscall.SidTypeInvalid": "syscall", - "syscall.SidTypeLabel": "syscall", - "syscall.SidTypeUnknown": "syscall", - "syscall.SidTypeUser": "syscall", - "syscall.SidTypeWellKnownGroup": "syscall", - "syscall.Signal": "syscall", - "syscall.SizeofBpfHdr": "syscall", - "syscall.SizeofBpfInsn": "syscall", - "syscall.SizeofBpfProgram": "syscall", - "syscall.SizeofBpfStat": "syscall", - "syscall.SizeofBpfVersion": "syscall", - "syscall.SizeofBpfZbuf": "syscall", - "syscall.SizeofBpfZbufHeader": "syscall", - "syscall.SizeofCmsghdr": "syscall", - "syscall.SizeofICMPv6Filter": "syscall", - "syscall.SizeofIPMreq": "syscall", - "syscall.SizeofIPMreqn": "syscall", - "syscall.SizeofIPv6MTUInfo": "syscall", - "syscall.SizeofIPv6Mreq": "syscall", - "syscall.SizeofIfAddrmsg": "syscall", - "syscall.SizeofIfAnnounceMsghdr": "syscall", - "syscall.SizeofIfData": "syscall", - "syscall.SizeofIfInfomsg": "syscall", - "syscall.SizeofIfMsghdr": "syscall", - "syscall.SizeofIfaMsghdr": "syscall", - "syscall.SizeofIfmaMsghdr": "syscall", - "syscall.SizeofIfmaMsghdr2": "syscall", - "syscall.SizeofInet4Pktinfo": "syscall", - "syscall.SizeofInet6Pktinfo": "syscall", - "syscall.SizeofInotifyEvent": "syscall", - "syscall.SizeofLinger": "syscall", - "syscall.SizeofMsghdr": "syscall", - "syscall.SizeofNlAttr": "syscall", - "syscall.SizeofNlMsgerr": "syscall", - "syscall.SizeofNlMsghdr": "syscall", - "syscall.SizeofRtAttr": "syscall", - "syscall.SizeofRtGenmsg": "syscall", - "syscall.SizeofRtMetrics": "syscall", - "syscall.SizeofRtMsg": "syscall", - "syscall.SizeofRtMsghdr": "syscall", - "syscall.SizeofRtNexthop": "syscall", - "syscall.SizeofSockFilter": "syscall", - "syscall.SizeofSockFprog": "syscall", - "syscall.SizeofSockaddrAny": "syscall", - "syscall.SizeofSockaddrDatalink": "syscall", - "syscall.SizeofSockaddrInet4": "syscall", - "syscall.SizeofSockaddrInet6": "syscall", - "syscall.SizeofSockaddrLinklayer": "syscall", - "syscall.SizeofSockaddrNetlink": "syscall", - "syscall.SizeofSockaddrUnix": "syscall", - "syscall.SizeofTCPInfo": "syscall", - "syscall.SizeofUcred": "syscall", - "syscall.SlicePtrFromStrings": "syscall", - "syscall.SockFilter": "syscall", - "syscall.SockFprog": "syscall", - "syscall.SockaddrDatalink": "syscall", - "syscall.SockaddrGen": "syscall", - "syscall.SockaddrInet4": "syscall", - "syscall.SockaddrInet6": "syscall", - "syscall.SockaddrLinklayer": "syscall", - "syscall.SockaddrNetlink": "syscall", - "syscall.SockaddrUnix": "syscall", - "syscall.Socket": "syscall", - "syscall.SocketControlMessage": "syscall", - "syscall.SocketDisableIPv6": "syscall", - "syscall.Socketpair": "syscall", - "syscall.Splice": "syscall", - "syscall.StartProcess": "syscall", - "syscall.StartupInfo": "syscall", - "syscall.Stat": "syscall", - "syscall.Stat_t": "syscall", - "syscall.Statfs": "syscall", - "syscall.Statfs_t": "syscall", - "syscall.Stderr": "syscall", - "syscall.Stdin": "syscall", - "syscall.Stdout": "syscall", - "syscall.StringBytePtr": "syscall", - "syscall.StringByteSlice": "syscall", - "syscall.StringSlicePtr": "syscall", - "syscall.StringToSid": "syscall", - "syscall.StringToUTF16": "syscall", - "syscall.StringToUTF16Ptr": "syscall", - "syscall.Symlink": "syscall", - "syscall.Sync": "syscall", - "syscall.SyncFileRange": "syscall", - "syscall.SysProcAttr": "syscall", - "syscall.SysProcIDMap": "syscall", - "syscall.Syscall": "syscall", - "syscall.Syscall12": "syscall", - "syscall.Syscall15": "syscall", - "syscall.Syscall6": "syscall", - "syscall.Syscall9": "syscall", - "syscall.Sysctl": "syscall", - "syscall.SysctlUint32": "syscall", - "syscall.Sysctlnode": "syscall", - "syscall.Sysinfo": "syscall", - "syscall.Sysinfo_t": "syscall", - "syscall.Systemtime": "syscall", - "syscall.TCGETS": "syscall", - "syscall.TCIFLUSH": "syscall", - "syscall.TCIOFLUSH": "syscall", - "syscall.TCOFLUSH": "syscall", - "syscall.TCPInfo": "syscall", - "syscall.TCPKeepalive": "syscall", - "syscall.TCP_CA_NAME_MAX": "syscall", - "syscall.TCP_CONGCTL": "syscall", - "syscall.TCP_CONGESTION": "syscall", - "syscall.TCP_CONNECTIONTIMEOUT": "syscall", - "syscall.TCP_CORK": "syscall", - "syscall.TCP_DEFER_ACCEPT": "syscall", - "syscall.TCP_INFO": "syscall", - "syscall.TCP_KEEPALIVE": "syscall", - "syscall.TCP_KEEPCNT": "syscall", - "syscall.TCP_KEEPIDLE": "syscall", - "syscall.TCP_KEEPINIT": "syscall", - "syscall.TCP_KEEPINTVL": "syscall", - "syscall.TCP_LINGER2": "syscall", - "syscall.TCP_MAXBURST": "syscall", - "syscall.TCP_MAXHLEN": "syscall", - "syscall.TCP_MAXOLEN": "syscall", - "syscall.TCP_MAXSEG": "syscall", - "syscall.TCP_MAXWIN": "syscall", - "syscall.TCP_MAX_SACK": "syscall", - "syscall.TCP_MAX_WINSHIFT": "syscall", - "syscall.TCP_MD5SIG": "syscall", - "syscall.TCP_MD5SIG_MAXKEYLEN": "syscall", - "syscall.TCP_MINMSS": "syscall", - "syscall.TCP_MINMSSOVERLOAD": "syscall", - "syscall.TCP_MSS": "syscall", - "syscall.TCP_NODELAY": "syscall", - "syscall.TCP_NOOPT": "syscall", - "syscall.TCP_NOPUSH": "syscall", - "syscall.TCP_NSTATES": "syscall", - "syscall.TCP_QUICKACK": "syscall", - "syscall.TCP_RXT_CONNDROPTIME": "syscall", - "syscall.TCP_RXT_FINDROP": "syscall", - "syscall.TCP_SACK_ENABLE": "syscall", - "syscall.TCP_SYNCNT": "syscall", - "syscall.TCP_VENDOR": "syscall", - "syscall.TCP_WINDOW_CLAMP": "syscall", - "syscall.TCSAFLUSH": "syscall", - "syscall.TCSETS": "syscall", - "syscall.TF_DISCONNECT": "syscall", - "syscall.TF_REUSE_SOCKET": "syscall", - "syscall.TF_USE_DEFAULT_WORKER": "syscall", - "syscall.TF_USE_KERNEL_APC": "syscall", - "syscall.TF_USE_SYSTEM_THREAD": "syscall", - "syscall.TF_WRITE_BEHIND": "syscall", - "syscall.TH32CS_INHERIT": "syscall", - "syscall.TH32CS_SNAPALL": "syscall", - "syscall.TH32CS_SNAPHEAPLIST": "syscall", - "syscall.TH32CS_SNAPMODULE": "syscall", - "syscall.TH32CS_SNAPMODULE32": "syscall", - "syscall.TH32CS_SNAPPROCESS": "syscall", - "syscall.TH32CS_SNAPTHREAD": "syscall", - "syscall.TIME_ZONE_ID_DAYLIGHT": "syscall", - "syscall.TIME_ZONE_ID_STANDARD": "syscall", - "syscall.TIME_ZONE_ID_UNKNOWN": "syscall", - "syscall.TIOCCBRK": "syscall", - "syscall.TIOCCDTR": "syscall", - "syscall.TIOCCONS": "syscall", - "syscall.TIOCDCDTIMESTAMP": "syscall", - "syscall.TIOCDRAIN": "syscall", - "syscall.TIOCDSIMICROCODE": "syscall", - "syscall.TIOCEXCL": "syscall", - "syscall.TIOCEXT": "syscall", - "syscall.TIOCFLAG_CDTRCTS": "syscall", - "syscall.TIOCFLAG_CLOCAL": "syscall", - "syscall.TIOCFLAG_CRTSCTS": "syscall", - "syscall.TIOCFLAG_MDMBUF": "syscall", - "syscall.TIOCFLAG_PPS": "syscall", - "syscall.TIOCFLAG_SOFTCAR": "syscall", - "syscall.TIOCFLUSH": "syscall", - "syscall.TIOCGDEV": "syscall", - "syscall.TIOCGDRAINWAIT": "syscall", - "syscall.TIOCGETA": "syscall", - "syscall.TIOCGETD": "syscall", - "syscall.TIOCGFLAGS": "syscall", - "syscall.TIOCGICOUNT": "syscall", - "syscall.TIOCGLCKTRMIOS": "syscall", - "syscall.TIOCGLINED": "syscall", - "syscall.TIOCGPGRP": "syscall", - "syscall.TIOCGPTN": "syscall", - "syscall.TIOCGQSIZE": "syscall", - "syscall.TIOCGRANTPT": "syscall", - "syscall.TIOCGRS485": "syscall", - "syscall.TIOCGSERIAL": "syscall", - "syscall.TIOCGSID": "syscall", - "syscall.TIOCGSIZE": "syscall", - "syscall.TIOCGSOFTCAR": "syscall", - "syscall.TIOCGTSTAMP": "syscall", - "syscall.TIOCGWINSZ": "syscall", - "syscall.TIOCINQ": "syscall", - "syscall.TIOCIXOFF": "syscall", - "syscall.TIOCIXON": "syscall", - "syscall.TIOCLINUX": "syscall", - "syscall.TIOCMBIC": "syscall", - "syscall.TIOCMBIS": "syscall", - "syscall.TIOCMGDTRWAIT": "syscall", - "syscall.TIOCMGET": "syscall", - "syscall.TIOCMIWAIT": "syscall", - "syscall.TIOCMODG": "syscall", - "syscall.TIOCMODS": "syscall", - "syscall.TIOCMSDTRWAIT": "syscall", - "syscall.TIOCMSET": "syscall", - "syscall.TIOCM_CAR": "syscall", - "syscall.TIOCM_CD": "syscall", - "syscall.TIOCM_CTS": "syscall", - "syscall.TIOCM_DCD": "syscall", - "syscall.TIOCM_DSR": "syscall", - "syscall.TIOCM_DTR": "syscall", - "syscall.TIOCM_LE": "syscall", - "syscall.TIOCM_RI": "syscall", - "syscall.TIOCM_RNG": "syscall", - "syscall.TIOCM_RTS": "syscall", - "syscall.TIOCM_SR": "syscall", - "syscall.TIOCM_ST": "syscall", - "syscall.TIOCNOTTY": "syscall", - "syscall.TIOCNXCL": "syscall", - "syscall.TIOCOUTQ": "syscall", - "syscall.TIOCPKT": "syscall", - "syscall.TIOCPKT_DATA": "syscall", - "syscall.TIOCPKT_DOSTOP": "syscall", - "syscall.TIOCPKT_FLUSHREAD": "syscall", - "syscall.TIOCPKT_FLUSHWRITE": "syscall", - "syscall.TIOCPKT_IOCTL": "syscall", - "syscall.TIOCPKT_NOSTOP": "syscall", - "syscall.TIOCPKT_START": "syscall", - "syscall.TIOCPKT_STOP": "syscall", - "syscall.TIOCPTMASTER": "syscall", - "syscall.TIOCPTMGET": "syscall", - "syscall.TIOCPTSNAME": "syscall", - "syscall.TIOCPTYGNAME": "syscall", - "syscall.TIOCPTYGRANT": "syscall", - "syscall.TIOCPTYUNLK": "syscall", - "syscall.TIOCRCVFRAME": "syscall", - "syscall.TIOCREMOTE": "syscall", - "syscall.TIOCSBRK": "syscall", - "syscall.TIOCSCONS": "syscall", - "syscall.TIOCSCTTY": "syscall", - "syscall.TIOCSDRAINWAIT": "syscall", - "syscall.TIOCSDTR": "syscall", - "syscall.TIOCSERCONFIG": "syscall", - "syscall.TIOCSERGETLSR": "syscall", - "syscall.TIOCSERGETMULTI": "syscall", - "syscall.TIOCSERGSTRUCT": "syscall", - "syscall.TIOCSERGWILD": "syscall", - "syscall.TIOCSERSETMULTI": "syscall", - "syscall.TIOCSERSWILD": "syscall", - "syscall.TIOCSER_TEMT": "syscall", - "syscall.TIOCSETA": "syscall", - "syscall.TIOCSETAF": "syscall", - "syscall.TIOCSETAW": "syscall", - "syscall.TIOCSETD": "syscall", - "syscall.TIOCSFLAGS": "syscall", - "syscall.TIOCSIG": "syscall", - "syscall.TIOCSLCKTRMIOS": "syscall", - "syscall.TIOCSLINED": "syscall", - "syscall.TIOCSPGRP": "syscall", - "syscall.TIOCSPTLCK": "syscall", - "syscall.TIOCSQSIZE": "syscall", - "syscall.TIOCSRS485": "syscall", - "syscall.TIOCSSERIAL": "syscall", - "syscall.TIOCSSIZE": "syscall", - "syscall.TIOCSSOFTCAR": "syscall", - "syscall.TIOCSTART": "syscall", - "syscall.TIOCSTAT": "syscall", - "syscall.TIOCSTI": "syscall", - "syscall.TIOCSTOP": "syscall", - "syscall.TIOCSTSTAMP": "syscall", - "syscall.TIOCSWINSZ": "syscall", - "syscall.TIOCTIMESTAMP": "syscall", - "syscall.TIOCUCNTL": "syscall", - "syscall.TIOCVHANGUP": "syscall", - "syscall.TIOCXMTFRAME": "syscall", - "syscall.TOKEN_ADJUST_DEFAULT": "syscall", - "syscall.TOKEN_ADJUST_GROUPS": "syscall", - "syscall.TOKEN_ADJUST_PRIVILEGES": "syscall", - "syscall.TOKEN_ALL_ACCESS": "syscall", - "syscall.TOKEN_ASSIGN_PRIMARY": "syscall", - "syscall.TOKEN_DUPLICATE": "syscall", - "syscall.TOKEN_EXECUTE": "syscall", - "syscall.TOKEN_IMPERSONATE": "syscall", - "syscall.TOKEN_QUERY": "syscall", - "syscall.TOKEN_QUERY_SOURCE": "syscall", - "syscall.TOKEN_READ": "syscall", - "syscall.TOKEN_WRITE": "syscall", - "syscall.TOSTOP": "syscall", - "syscall.TRUNCATE_EXISTING": "syscall", - "syscall.TUNATTACHFILTER": "syscall", - "syscall.TUNDETACHFILTER": "syscall", - "syscall.TUNGETFEATURES": "syscall", - "syscall.TUNGETIFF": "syscall", - "syscall.TUNGETSNDBUF": "syscall", - "syscall.TUNGETVNETHDRSZ": "syscall", - "syscall.TUNSETDEBUG": "syscall", - "syscall.TUNSETGROUP": "syscall", - "syscall.TUNSETIFF": "syscall", - "syscall.TUNSETLINK": "syscall", - "syscall.TUNSETNOCSUM": "syscall", - "syscall.TUNSETOFFLOAD": "syscall", - "syscall.TUNSETOWNER": "syscall", - "syscall.TUNSETPERSIST": "syscall", - "syscall.TUNSETSNDBUF": "syscall", - "syscall.TUNSETTXFILTER": "syscall", - "syscall.TUNSETVNETHDRSZ": "syscall", - "syscall.Tee": "syscall", - "syscall.TerminateProcess": "syscall", - "syscall.Termios": "syscall", - "syscall.Tgkill": "syscall", - "syscall.Time": "syscall", - "syscall.Time_t": "syscall", - "syscall.Times": "syscall", - "syscall.Timespec": "syscall", - "syscall.TimespecToNsec": "syscall", - "syscall.Timeval": "syscall", - "syscall.Timeval32": "syscall", - "syscall.TimevalToNsec": "syscall", - "syscall.Timex": "syscall", - "syscall.Timezoneinformation": "syscall", - "syscall.Tms": "syscall", - "syscall.Token": "syscall", - "syscall.TokenAccessInformation": "syscall", - "syscall.TokenAuditPolicy": "syscall", - "syscall.TokenDefaultDacl": "syscall", - "syscall.TokenElevation": "syscall", - "syscall.TokenElevationType": "syscall", - "syscall.TokenGroups": "syscall", - "syscall.TokenGroupsAndPrivileges": "syscall", - "syscall.TokenHasRestrictions": "syscall", - "syscall.TokenImpersonationLevel": "syscall", - "syscall.TokenIntegrityLevel": "syscall", - "syscall.TokenLinkedToken": "syscall", - "syscall.TokenLogonSid": "syscall", - "syscall.TokenMandatoryPolicy": "syscall", - "syscall.TokenOrigin": "syscall", - "syscall.TokenOwner": "syscall", - "syscall.TokenPrimaryGroup": "syscall", - "syscall.TokenPrivileges": "syscall", - "syscall.TokenRestrictedSids": "syscall", - "syscall.TokenSandBoxInert": "syscall", - "syscall.TokenSessionId": "syscall", - "syscall.TokenSessionReference": "syscall", - "syscall.TokenSource": "syscall", - "syscall.TokenStatistics": "syscall", - "syscall.TokenType": "syscall", - "syscall.TokenUIAccess": "syscall", - "syscall.TokenUser": "syscall", - "syscall.TokenVirtualizationAllowed": "syscall", - "syscall.TokenVirtualizationEnabled": "syscall", - "syscall.Tokenprimarygroup": "syscall", - "syscall.Tokenuser": "syscall", - "syscall.TranslateAccountName": "syscall", - "syscall.TranslateName": "syscall", - "syscall.TransmitFile": "syscall", - "syscall.TransmitFileBuffers": "syscall", - "syscall.Truncate": "syscall", - "syscall.USAGE_MATCH_TYPE_AND": "syscall", - "syscall.USAGE_MATCH_TYPE_OR": "syscall", - "syscall.UTF16FromString": "syscall", - "syscall.UTF16PtrFromString": "syscall", - "syscall.UTF16ToString": "syscall", - "syscall.Ucred": "syscall", - "syscall.Umask": "syscall", - "syscall.Uname": "syscall", - "syscall.Undelete": "syscall", - "syscall.UnixCredentials": "syscall", - "syscall.UnixRights": "syscall", - "syscall.Unlink": "syscall", - "syscall.Unlinkat": "syscall", - "syscall.UnmapViewOfFile": "syscall", - "syscall.Unmount": "syscall", - "syscall.Unsetenv": "syscall", - "syscall.Unshare": "syscall", - "syscall.UserInfo10": "syscall", - "syscall.Ustat": "syscall", - "syscall.Ustat_t": "syscall", - "syscall.Utimbuf": "syscall", - "syscall.Utime": "syscall", - "syscall.Utimes": "syscall", - "syscall.UtimesNano": "syscall", - "syscall.Utsname": "syscall", - "syscall.VDISCARD": "syscall", - "syscall.VDSUSP": "syscall", - "syscall.VEOF": "syscall", - "syscall.VEOL": "syscall", - "syscall.VEOL2": "syscall", - "syscall.VERASE": "syscall", - "syscall.VERASE2": "syscall", - "syscall.VINTR": "syscall", - "syscall.VKILL": "syscall", - "syscall.VLNEXT": "syscall", - "syscall.VMIN": "syscall", - "syscall.VQUIT": "syscall", - "syscall.VREPRINT": "syscall", - "syscall.VSTART": "syscall", - "syscall.VSTATUS": "syscall", - "syscall.VSTOP": "syscall", - "syscall.VSUSP": "syscall", - "syscall.VSWTC": "syscall", - "syscall.VT0": "syscall", - "syscall.VT1": "syscall", - "syscall.VTDLY": "syscall", - "syscall.VTIME": "syscall", - "syscall.VWERASE": "syscall", - "syscall.VirtualLock": "syscall", - "syscall.VirtualUnlock": "syscall", - "syscall.WAIT_ABANDONED": "syscall", - "syscall.WAIT_FAILED": "syscall", - "syscall.WAIT_OBJECT_0": "syscall", - "syscall.WAIT_TIMEOUT": "syscall", - "syscall.WALL": "syscall", - "syscall.WALLSIG": "syscall", - "syscall.WALTSIG": "syscall", - "syscall.WCLONE": "syscall", - "syscall.WCONTINUED": "syscall", - "syscall.WCOREFLAG": "syscall", - "syscall.WEXITED": "syscall", - "syscall.WLINUXCLONE": "syscall", - "syscall.WNOHANG": "syscall", - "syscall.WNOTHREAD": "syscall", - "syscall.WNOWAIT": "syscall", - "syscall.WNOZOMBIE": "syscall", - "syscall.WOPTSCHECKED": "syscall", - "syscall.WORDSIZE": "syscall", - "syscall.WSABuf": "syscall", - "syscall.WSACleanup": "syscall", - "syscall.WSADESCRIPTION_LEN": "syscall", - "syscall.WSAData": "syscall", - "syscall.WSAEACCES": "syscall", - "syscall.WSAECONNRESET": "syscall", - "syscall.WSAEnumProtocols": "syscall", - "syscall.WSAID_CONNECTEX": "syscall", - "syscall.WSAIoctl": "syscall", - "syscall.WSAPROTOCOL_LEN": "syscall", - "syscall.WSAProtocolChain": "syscall", - "syscall.WSAProtocolInfo": "syscall", - "syscall.WSARecv": "syscall", - "syscall.WSARecvFrom": "syscall", - "syscall.WSASYS_STATUS_LEN": "syscall", - "syscall.WSASend": "syscall", - "syscall.WSASendTo": "syscall", - "syscall.WSASendto": "syscall", - "syscall.WSAStartup": "syscall", - "syscall.WSTOPPED": "syscall", - "syscall.WTRAPPED": "syscall", - "syscall.WUNTRACED": "syscall", - "syscall.Wait4": "syscall", - "syscall.WaitForSingleObject": "syscall", - "syscall.WaitStatus": "syscall", - "syscall.Win32FileAttributeData": "syscall", - "syscall.Win32finddata": "syscall", - "syscall.Write": "syscall", - "syscall.WriteConsole": "syscall", - "syscall.WriteFile": "syscall", - "syscall.X509_ASN_ENCODING": "syscall", - "syscall.XCASE": "syscall", - "syscall.XP1_CONNECTIONLESS": "syscall", - "syscall.XP1_CONNECT_DATA": "syscall", - "syscall.XP1_DISCONNECT_DATA": "syscall", - "syscall.XP1_EXPEDITED_DATA": "syscall", - "syscall.XP1_GRACEFUL_CLOSE": "syscall", - "syscall.XP1_GUARANTEED_DELIVERY": "syscall", - "syscall.XP1_GUARANTEED_ORDER": "syscall", - "syscall.XP1_IFS_HANDLES": "syscall", - "syscall.XP1_MESSAGE_ORIENTED": "syscall", - "syscall.XP1_MULTIPOINT_CONTROL_PLANE": "syscall", - "syscall.XP1_MULTIPOINT_DATA_PLANE": "syscall", - "syscall.XP1_PARTIAL_MESSAGE": "syscall", - "syscall.XP1_PSEUDO_STREAM": "syscall", - "syscall.XP1_QOS_SUPPORTED": "syscall", - "syscall.XP1_SAN_SUPPORT_SDP": "syscall", - "syscall.XP1_SUPPORT_BROADCAST": "syscall", - "syscall.XP1_SUPPORT_MULTIPOINT": "syscall", - "syscall.XP1_UNI_RECV": "syscall", - "syscall.XP1_UNI_SEND": "syscall", - "syslog.Dial": "log/syslog", - "syslog.LOG_ALERT": "log/syslog", - "syslog.LOG_AUTH": "log/syslog", - "syslog.LOG_AUTHPRIV": "log/syslog", - "syslog.LOG_CRIT": "log/syslog", - "syslog.LOG_CRON": "log/syslog", - "syslog.LOG_DAEMON": "log/syslog", - "syslog.LOG_DEBUG": "log/syslog", - "syslog.LOG_EMERG": "log/syslog", - "syslog.LOG_ERR": "log/syslog", - "syslog.LOG_FTP": "log/syslog", - "syslog.LOG_INFO": "log/syslog", - "syslog.LOG_KERN": "log/syslog", - "syslog.LOG_LOCAL0": "log/syslog", - "syslog.LOG_LOCAL1": "log/syslog", - "syslog.LOG_LOCAL2": "log/syslog", - "syslog.LOG_LOCAL3": "log/syslog", - "syslog.LOG_LOCAL4": "log/syslog", - "syslog.LOG_LOCAL5": "log/syslog", - "syslog.LOG_LOCAL6": "log/syslog", - "syslog.LOG_LOCAL7": "log/syslog", - "syslog.LOG_LPR": "log/syslog", - "syslog.LOG_MAIL": "log/syslog", - "syslog.LOG_NEWS": "log/syslog", - "syslog.LOG_NOTICE": "log/syslog", - "syslog.LOG_SYSLOG": "log/syslog", - "syslog.LOG_USER": "log/syslog", - "syslog.LOG_UUCP": "log/syslog", - "syslog.LOG_WARNING": "log/syslog", - "syslog.New": "log/syslog", - "syslog.NewLogger": "log/syslog", - "syslog.Priority": "log/syslog", - "syslog.Writer": "log/syslog", - "tabwriter.AlignRight": "text/tabwriter", - "tabwriter.Debug": "text/tabwriter", - "tabwriter.DiscardEmptyColumns": "text/tabwriter", - "tabwriter.Escape": "text/tabwriter", - "tabwriter.FilterHTML": "text/tabwriter", - "tabwriter.NewWriter": "text/tabwriter", - "tabwriter.StripEscape": "text/tabwriter", - "tabwriter.TabIndent": "text/tabwriter", - "tabwriter.Writer": "text/tabwriter", - "tar.ErrFieldTooLong": "archive/tar", - "tar.ErrHeader": "archive/tar", - "tar.ErrWriteAfterClose": "archive/tar", - "tar.ErrWriteTooLong": "archive/tar", - "tar.FileInfoHeader": "archive/tar", - "tar.Header": "archive/tar", - "tar.NewReader": "archive/tar", - "tar.NewWriter": "archive/tar", - "tar.Reader": "archive/tar", - "tar.TypeBlock": "archive/tar", - "tar.TypeChar": "archive/tar", - "tar.TypeCont": "archive/tar", - "tar.TypeDir": "archive/tar", - "tar.TypeFifo": "archive/tar", - "tar.TypeGNULongLink": "archive/tar", - "tar.TypeGNULongName": "archive/tar", - "tar.TypeGNUSparse": "archive/tar", - "tar.TypeLink": "archive/tar", - "tar.TypeReg": "archive/tar", - "tar.TypeRegA": "archive/tar", - "tar.TypeSymlink": "archive/tar", - "tar.TypeXGlobalHeader": "archive/tar", - "tar.TypeXHeader": "archive/tar", - "tar.Writer": "archive/tar", - "template.CSS": "html/template", - "template.ErrAmbigContext": "html/template", - "template.ErrBadHTML": "html/template", - "template.ErrBranchEnd": "html/template", - "template.ErrEndContext": "html/template", - "template.ErrNoSuchTemplate": "html/template", - "template.ErrOutputContext": "html/template", - "template.ErrPartialCharset": "html/template", - "template.ErrPartialEscape": "html/template", - "template.ErrRangeLoopReentry": "html/template", - "template.ErrSlashAmbig": "html/template", - "template.Error": "html/template", - "template.ErrorCode": "html/template", - "template.ExecError": "text/template", - // "template.FuncMap" is ambiguous - "template.HTML": "html/template", - "template.HTMLAttr": "html/template", - // "template.HTMLEscape" is ambiguous - // "template.HTMLEscapeString" is ambiguous - // "template.HTMLEscaper" is ambiguous - // "template.IsTrue" is ambiguous - "template.JS": "html/template", - // "template.JSEscape" is ambiguous - // "template.JSEscapeString" is ambiguous - // "template.JSEscaper" is ambiguous - "template.JSStr": "html/template", - // "template.Must" is ambiguous - // "template.New" is ambiguous - "template.OK": "html/template", - // "template.ParseFiles" is ambiguous - // "template.ParseGlob" is ambiguous - // "template.Template" is ambiguous - "template.URL": "html/template", - // "template.URLQueryEscaper" is ambiguous - "testing.AllocsPerRun": "testing", - "testing.B": "testing", - "testing.Benchmark": "testing", - "testing.BenchmarkResult": "testing", - "testing.Cover": "testing", - "testing.CoverBlock": "testing", - "testing.CoverMode": "testing", - "testing.Coverage": "testing", - "testing.InternalBenchmark": "testing", - "testing.InternalExample": "testing", - "testing.InternalTest": "testing", - "testing.M": "testing", - "testing.Main": "testing", - "testing.MainStart": "testing", - "testing.PB": "testing", - "testing.RegisterCover": "testing", - "testing.RunBenchmarks": "testing", - "testing.RunExamples": "testing", - "testing.RunTests": "testing", - "testing.Short": "testing", - "testing.T": "testing", - "testing.Verbose": "testing", - "textproto.CanonicalMIMEHeaderKey": "net/textproto", - "textproto.Conn": "net/textproto", - "textproto.Dial": "net/textproto", - "textproto.Error": "net/textproto", - "textproto.MIMEHeader": "net/textproto", - "textproto.NewConn": "net/textproto", - "textproto.NewReader": "net/textproto", - "textproto.NewWriter": "net/textproto", - "textproto.Pipeline": "net/textproto", - "textproto.ProtocolError": "net/textproto", - "textproto.Reader": "net/textproto", - "textproto.TrimBytes": "net/textproto", - "textproto.TrimString": "net/textproto", - "textproto.Writer": "net/textproto", - "time.ANSIC": "time", - "time.After": "time", - "time.AfterFunc": "time", - "time.April": "time", - "time.August": "time", - "time.Date": "time", - "time.December": "time", - "time.Duration": "time", - "time.February": "time", - "time.FixedZone": "time", - "time.Friday": "time", - "time.Hour": "time", - "time.January": "time", - "time.July": "time", - "time.June": "time", - "time.Kitchen": "time", - "time.LoadLocation": "time", - "time.Local": "time", - "time.Location": "time", - "time.March": "time", - "time.May": "time", - "time.Microsecond": "time", - "time.Millisecond": "time", - "time.Minute": "time", - "time.Monday": "time", - "time.Month": "time", - "time.Nanosecond": "time", - "time.NewTicker": "time", - "time.NewTimer": "time", - "time.November": "time", - "time.Now": "time", - "time.October": "time", - "time.Parse": "time", - "time.ParseDuration": "time", - "time.ParseError": "time", - "time.ParseInLocation": "time", - "time.RFC1123": "time", - "time.RFC1123Z": "time", - "time.RFC3339": "time", - "time.RFC3339Nano": "time", - "time.RFC822": "time", - "time.RFC822Z": "time", - "time.RFC850": "time", - "time.RubyDate": "time", - "time.Saturday": "time", - "time.Second": "time", - "time.September": "time", - "time.Since": "time", - "time.Sleep": "time", - "time.Stamp": "time", - "time.StampMicro": "time", - "time.StampMilli": "time", - "time.StampNano": "time", - "time.Sunday": "time", - "time.Thursday": "time", - "time.Tick": "time", - "time.Ticker": "time", - "time.Time": "time", - "time.Timer": "time", - "time.Tuesday": "time", - "time.UTC": "time", - "time.Unix": "time", - "time.UnixDate": "time", - "time.Until": "time", - "time.Wednesday": "time", - "time.Weekday": "time", - "tls.Certificate": "crypto/tls", - "tls.CertificateRequestInfo": "crypto/tls", - "tls.Client": "crypto/tls", - "tls.ClientAuthType": "crypto/tls", - "tls.ClientHelloInfo": "crypto/tls", - "tls.ClientSessionCache": "crypto/tls", - "tls.ClientSessionState": "crypto/tls", - "tls.Config": "crypto/tls", - "tls.Conn": "crypto/tls", - "tls.ConnectionState": "crypto/tls", - "tls.CurveID": "crypto/tls", - "tls.CurveP256": "crypto/tls", - "tls.CurveP384": "crypto/tls", - "tls.CurveP521": "crypto/tls", - "tls.Dial": "crypto/tls", - "tls.DialWithDialer": "crypto/tls", - "tls.ECDSAWithP256AndSHA256": "crypto/tls", - "tls.ECDSAWithP384AndSHA384": "crypto/tls", - "tls.ECDSAWithP521AndSHA512": "crypto/tls", - "tls.Listen": "crypto/tls", - "tls.LoadX509KeyPair": "crypto/tls", - "tls.NewLRUClientSessionCache": "crypto/tls", - "tls.NewListener": "crypto/tls", - "tls.NoClientCert": "crypto/tls", - "tls.PKCS1WithSHA1": "crypto/tls", - "tls.PKCS1WithSHA256": "crypto/tls", - "tls.PKCS1WithSHA384": "crypto/tls", - "tls.PKCS1WithSHA512": "crypto/tls", - "tls.PSSWithSHA256": "crypto/tls", - "tls.PSSWithSHA384": "crypto/tls", - "tls.PSSWithSHA512": "crypto/tls", - "tls.RecordHeaderError": "crypto/tls", - "tls.RenegotiateFreelyAsClient": "crypto/tls", - "tls.RenegotiateNever": "crypto/tls", - "tls.RenegotiateOnceAsClient": "crypto/tls", - "tls.RenegotiationSupport": "crypto/tls", - "tls.RequestClientCert": "crypto/tls", - "tls.RequireAndVerifyClientCert": "crypto/tls", - "tls.RequireAnyClientCert": "crypto/tls", - "tls.Server": "crypto/tls", - "tls.SignatureScheme": "crypto/tls", - "tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": "crypto/tls", - "tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": "crypto/tls", - "tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": "crypto/tls", - "tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": "crypto/tls", - "tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": "crypto/tls", - "tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": "crypto/tls", - "tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": "crypto/tls", - "tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": "crypto/tls", - "tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": "crypto/tls", - "tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": "crypto/tls", - "tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": "crypto/tls", - "tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": "crypto/tls", - "tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": "crypto/tls", - "tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": "crypto/tls", - "tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA": "crypto/tls", - "tls.TLS_FALLBACK_SCSV": "crypto/tls", - "tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA": "crypto/tls", - "tls.TLS_RSA_WITH_AES_128_CBC_SHA": "crypto/tls", - "tls.TLS_RSA_WITH_AES_128_CBC_SHA256": "crypto/tls", - "tls.TLS_RSA_WITH_AES_128_GCM_SHA256": "crypto/tls", - "tls.TLS_RSA_WITH_AES_256_CBC_SHA": "crypto/tls", - "tls.TLS_RSA_WITH_AES_256_GCM_SHA384": "crypto/tls", - "tls.TLS_RSA_WITH_RC4_128_SHA": "crypto/tls", - "tls.VerifyClientCertIfGiven": "crypto/tls", - "tls.VersionSSL30": "crypto/tls", - "tls.VersionTLS10": "crypto/tls", - "tls.VersionTLS11": "crypto/tls", - "tls.VersionTLS12": "crypto/tls", - "tls.X25519": "crypto/tls", - "tls.X509KeyPair": "crypto/tls", - "token.ADD": "go/token", - "token.ADD_ASSIGN": "go/token", - "token.AND": "go/token", - "token.AND_ASSIGN": "go/token", - "token.AND_NOT": "go/token", - "token.AND_NOT_ASSIGN": "go/token", - "token.ARROW": "go/token", - "token.ASSIGN": "go/token", - "token.BREAK": "go/token", - "token.CASE": "go/token", - "token.CHAN": "go/token", - "token.CHAR": "go/token", - "token.COLON": "go/token", - "token.COMMA": "go/token", - "token.COMMENT": "go/token", - "token.CONST": "go/token", - "token.CONTINUE": "go/token", - "token.DEC": "go/token", - "token.DEFAULT": "go/token", - "token.DEFER": "go/token", - "token.DEFINE": "go/token", - "token.ELLIPSIS": "go/token", - "token.ELSE": "go/token", - "token.EOF": "go/token", - "token.EQL": "go/token", - "token.FALLTHROUGH": "go/token", - "token.FLOAT": "go/token", - "token.FOR": "go/token", - "token.FUNC": "go/token", - "token.File": "go/token", - "token.FileSet": "go/token", - "token.GEQ": "go/token", - "token.GO": "go/token", - "token.GOTO": "go/token", - "token.GTR": "go/token", - "token.HighestPrec": "go/token", - "token.IDENT": "go/token", - "token.IF": "go/token", - "token.ILLEGAL": "go/token", - "token.IMAG": "go/token", - "token.IMPORT": "go/token", - "token.INC": "go/token", - "token.INT": "go/token", - "token.INTERFACE": "go/token", - "token.LAND": "go/token", - "token.LBRACE": "go/token", - "token.LBRACK": "go/token", - "token.LEQ": "go/token", - "token.LOR": "go/token", - "token.LPAREN": "go/token", - "token.LSS": "go/token", - "token.Lookup": "go/token", - "token.LowestPrec": "go/token", - "token.MAP": "go/token", - "token.MUL": "go/token", - "token.MUL_ASSIGN": "go/token", - "token.NEQ": "go/token", - "token.NOT": "go/token", - "token.NewFileSet": "go/token", - "token.NoPos": "go/token", - "token.OR": "go/token", - "token.OR_ASSIGN": "go/token", - "token.PACKAGE": "go/token", - "token.PERIOD": "go/token", - "token.Pos": "go/token", - "token.Position": "go/token", - "token.QUO": "go/token", - "token.QUO_ASSIGN": "go/token", - "token.RANGE": "go/token", - "token.RBRACE": "go/token", - "token.RBRACK": "go/token", - "token.REM": "go/token", - "token.REM_ASSIGN": "go/token", - "token.RETURN": "go/token", - "token.RPAREN": "go/token", - "token.SELECT": "go/token", - "token.SEMICOLON": "go/token", - "token.SHL": "go/token", - "token.SHL_ASSIGN": "go/token", - "token.SHR": "go/token", - "token.SHR_ASSIGN": "go/token", - "token.STRING": "go/token", - "token.STRUCT": "go/token", - "token.SUB": "go/token", - "token.SUB_ASSIGN": "go/token", - "token.SWITCH": "go/token", - "token.TYPE": "go/token", - "token.Token": "go/token", - "token.UnaryPrec": "go/token", - "token.VAR": "go/token", - "token.XOR": "go/token", - "token.XOR_ASSIGN": "go/token", - "trace.Start": "runtime/trace", - "trace.Stop": "runtime/trace", - "types.Array": "go/types", - "types.AssertableTo": "go/types", - "types.AssignableTo": "go/types", - "types.Basic": "go/types", - "types.BasicInfo": "go/types", - "types.BasicKind": "go/types", - "types.Bool": "go/types", - "types.Builtin": "go/types", - "types.Byte": "go/types", - "types.Chan": "go/types", - "types.ChanDir": "go/types", - "types.Checker": "go/types", - "types.Comparable": "go/types", - "types.Complex128": "go/types", - "types.Complex64": "go/types", - "types.Config": "go/types", - "types.Const": "go/types", - "types.ConvertibleTo": "go/types", - "types.DefPredeclaredTestFuncs": "go/types", - "types.Default": "go/types", - "types.Error": "go/types", - "types.Eval": "go/types", - "types.ExprString": "go/types", - "types.FieldVal": "go/types", - "types.Float32": "go/types", - "types.Float64": "go/types", - "types.Func": "go/types", - "types.Id": "go/types", - "types.Identical": "go/types", - "types.IdenticalIgnoreTags": "go/types", - "types.Implements": "go/types", - "types.ImportMode": "go/types", - "types.Importer": "go/types", - "types.ImporterFrom": "go/types", - "types.Info": "go/types", - "types.Initializer": "go/types", - "types.Int": "go/types", - "types.Int16": "go/types", - "types.Int32": "go/types", - "types.Int64": "go/types", - "types.Int8": "go/types", - "types.Interface": "go/types", - "types.Invalid": "go/types", - "types.IsBoolean": "go/types", - "types.IsComplex": "go/types", - "types.IsConstType": "go/types", - "types.IsFloat": "go/types", - "types.IsInteger": "go/types", - "types.IsInterface": "go/types", - "types.IsNumeric": "go/types", - "types.IsOrdered": "go/types", - "types.IsString": "go/types", - "types.IsUnsigned": "go/types", - "types.IsUntyped": "go/types", - "types.Label": "go/types", - "types.LookupFieldOrMethod": "go/types", - "types.Map": "go/types", - "types.MethodExpr": "go/types", - "types.MethodSet": "go/types", - "types.MethodVal": "go/types", - "types.MissingMethod": "go/types", - "types.Named": "go/types", - "types.NewArray": "go/types", - "types.NewChan": "go/types", - "types.NewChecker": "go/types", - "types.NewConst": "go/types", - "types.NewField": "go/types", - "types.NewFunc": "go/types", - "types.NewInterface": "go/types", - "types.NewLabel": "go/types", - "types.NewMap": "go/types", - "types.NewMethodSet": "go/types", - "types.NewNamed": "go/types", - "types.NewPackage": "go/types", - "types.NewParam": "go/types", - "types.NewPkgName": "go/types", - "types.NewPointer": "go/types", - "types.NewScope": "go/types", - "types.NewSignature": "go/types", - "types.NewSlice": "go/types", - "types.NewStruct": "go/types", - "types.NewTuple": "go/types", - "types.NewTypeName": "go/types", - "types.NewVar": "go/types", - "types.Nil": "go/types", - "types.ObjectString": "go/types", - "types.Package": "go/types", - "types.PkgName": "go/types", - "types.Pointer": "go/types", - "types.Qualifier": "go/types", - "types.RecvOnly": "go/types", - "types.RelativeTo": "go/types", - "types.Rune": "go/types", - "types.Scope": "go/types", - "types.Selection": "go/types", - "types.SelectionKind": "go/types", - "types.SelectionString": "go/types", - "types.SendOnly": "go/types", - "types.SendRecv": "go/types", - "types.Signature": "go/types", - "types.Sizes": "go/types", - "types.Slice": "go/types", - "types.StdSizes": "go/types", - "types.String": "go/types", - "types.Struct": "go/types", - "types.Tuple": "go/types", - "types.Typ": "go/types", - "types.Type": "go/types", - "types.TypeAndValue": "go/types", - "types.TypeName": "go/types", - "types.TypeString": "go/types", - "types.Uint": "go/types", - "types.Uint16": "go/types", - "types.Uint32": "go/types", - "types.Uint64": "go/types", - "types.Uint8": "go/types", - "types.Uintptr": "go/types", - "types.Universe": "go/types", - "types.Unsafe": "go/types", - "types.UnsafePointer": "go/types", - "types.UntypedBool": "go/types", - "types.UntypedComplex": "go/types", - "types.UntypedFloat": "go/types", - "types.UntypedInt": "go/types", - "types.UntypedNil": "go/types", - "types.UntypedRune": "go/types", - "types.UntypedString": "go/types", - "types.Var": "go/types", - "types.WriteExpr": "go/types", - "types.WriteSignature": "go/types", - "types.WriteType": "go/types", - "unicode.ASCII_Hex_Digit": "unicode", - "unicode.Adlam": "unicode", - "unicode.Ahom": "unicode", - "unicode.Anatolian_Hieroglyphs": "unicode", - "unicode.Arabic": "unicode", - "unicode.Armenian": "unicode", - "unicode.Avestan": "unicode", - "unicode.AzeriCase": "unicode", - "unicode.Balinese": "unicode", - "unicode.Bamum": "unicode", - "unicode.Bassa_Vah": "unicode", - "unicode.Batak": "unicode", - "unicode.Bengali": "unicode", - "unicode.Bhaiksuki": "unicode", - "unicode.Bidi_Control": "unicode", - "unicode.Bopomofo": "unicode", - "unicode.Brahmi": "unicode", - "unicode.Braille": "unicode", - "unicode.Buginese": "unicode", - "unicode.Buhid": "unicode", - "unicode.C": "unicode", - "unicode.Canadian_Aboriginal": "unicode", - "unicode.Carian": "unicode", - "unicode.CaseRange": "unicode", - "unicode.CaseRanges": "unicode", - "unicode.Categories": "unicode", - "unicode.Caucasian_Albanian": "unicode", - "unicode.Cc": "unicode", - "unicode.Cf": "unicode", - "unicode.Chakma": "unicode", - "unicode.Cham": "unicode", - "unicode.Cherokee": "unicode", - "unicode.Co": "unicode", - "unicode.Common": "unicode", - "unicode.Coptic": "unicode", - "unicode.Cs": "unicode", - "unicode.Cuneiform": "unicode", - "unicode.Cypriot": "unicode", - "unicode.Cyrillic": "unicode", - "unicode.Dash": "unicode", - "unicode.Deprecated": "unicode", - "unicode.Deseret": "unicode", - "unicode.Devanagari": "unicode", - "unicode.Diacritic": "unicode", - "unicode.Digit": "unicode", - "unicode.Duployan": "unicode", - "unicode.Egyptian_Hieroglyphs": "unicode", - "unicode.Elbasan": "unicode", - "unicode.Ethiopic": "unicode", - "unicode.Extender": "unicode", - "unicode.FoldCategory": "unicode", - "unicode.FoldScript": "unicode", - "unicode.Georgian": "unicode", - "unicode.Glagolitic": "unicode", - "unicode.Gothic": "unicode", - "unicode.Grantha": "unicode", - "unicode.GraphicRanges": "unicode", - "unicode.Greek": "unicode", - "unicode.Gujarati": "unicode", - "unicode.Gurmukhi": "unicode", - "unicode.Han": "unicode", - "unicode.Hangul": "unicode", - "unicode.Hanunoo": "unicode", - "unicode.Hatran": "unicode", - "unicode.Hebrew": "unicode", - "unicode.Hex_Digit": "unicode", - "unicode.Hiragana": "unicode", - "unicode.Hyphen": "unicode", - "unicode.IDS_Binary_Operator": "unicode", - "unicode.IDS_Trinary_Operator": "unicode", - "unicode.Ideographic": "unicode", - "unicode.Imperial_Aramaic": "unicode", - "unicode.In": "unicode", - "unicode.Inherited": "unicode", - "unicode.Inscriptional_Pahlavi": "unicode", - "unicode.Inscriptional_Parthian": "unicode", - "unicode.Is": "unicode", - "unicode.IsControl": "unicode", - "unicode.IsDigit": "unicode", - "unicode.IsGraphic": "unicode", - "unicode.IsLetter": "unicode", - "unicode.IsLower": "unicode", - "unicode.IsMark": "unicode", - "unicode.IsNumber": "unicode", - "unicode.IsOneOf": "unicode", - "unicode.IsPrint": "unicode", - "unicode.IsPunct": "unicode", - "unicode.IsSpace": "unicode", - "unicode.IsSymbol": "unicode", - "unicode.IsTitle": "unicode", - "unicode.IsUpper": "unicode", - "unicode.Javanese": "unicode", - "unicode.Join_Control": "unicode", - "unicode.Kaithi": "unicode", - "unicode.Kannada": "unicode", - "unicode.Katakana": "unicode", - "unicode.Kayah_Li": "unicode", - "unicode.Kharoshthi": "unicode", - "unicode.Khmer": "unicode", - "unicode.Khojki": "unicode", - "unicode.Khudawadi": "unicode", - "unicode.L": "unicode", - "unicode.Lao": "unicode", - "unicode.Latin": "unicode", - "unicode.Lepcha": "unicode", - "unicode.Letter": "unicode", - "unicode.Limbu": "unicode", - "unicode.Linear_A": "unicode", - "unicode.Linear_B": "unicode", - "unicode.Lisu": "unicode", - "unicode.Ll": "unicode", - "unicode.Lm": "unicode", - "unicode.Lo": "unicode", - "unicode.Logical_Order_Exception": "unicode", - "unicode.Lower": "unicode", - "unicode.LowerCase": "unicode", - "unicode.Lt": "unicode", - "unicode.Lu": "unicode", - "unicode.Lycian": "unicode", - "unicode.Lydian": "unicode", - "unicode.M": "unicode", - "unicode.Mahajani": "unicode", - "unicode.Malayalam": "unicode", - "unicode.Mandaic": "unicode", - "unicode.Manichaean": "unicode", - "unicode.Marchen": "unicode", - "unicode.Mark": "unicode", - "unicode.MaxASCII": "unicode", - "unicode.MaxCase": "unicode", - "unicode.MaxLatin1": "unicode", - "unicode.MaxRune": "unicode", - "unicode.Mc": "unicode", - "unicode.Me": "unicode", - "unicode.Meetei_Mayek": "unicode", - "unicode.Mende_Kikakui": "unicode", - "unicode.Meroitic_Cursive": "unicode", - "unicode.Meroitic_Hieroglyphs": "unicode", - "unicode.Miao": "unicode", - "unicode.Mn": "unicode", - "unicode.Modi": "unicode", - "unicode.Mongolian": "unicode", - "unicode.Mro": "unicode", - "unicode.Multani": "unicode", - "unicode.Myanmar": "unicode", - "unicode.N": "unicode", - "unicode.Nabataean": "unicode", - "unicode.Nd": "unicode", - "unicode.New_Tai_Lue": "unicode", - "unicode.Newa": "unicode", - "unicode.Nko": "unicode", - "unicode.Nl": "unicode", - "unicode.No": "unicode", - "unicode.Noncharacter_Code_Point": "unicode", - "unicode.Number": "unicode", - "unicode.Ogham": "unicode", - "unicode.Ol_Chiki": "unicode", - "unicode.Old_Hungarian": "unicode", - "unicode.Old_Italic": "unicode", - "unicode.Old_North_Arabian": "unicode", - "unicode.Old_Permic": "unicode", - "unicode.Old_Persian": "unicode", - "unicode.Old_South_Arabian": "unicode", - "unicode.Old_Turkic": "unicode", - "unicode.Oriya": "unicode", - "unicode.Osage": "unicode", - "unicode.Osmanya": "unicode", - "unicode.Other": "unicode", - "unicode.Other_Alphabetic": "unicode", - "unicode.Other_Default_Ignorable_Code_Point": "unicode", - "unicode.Other_Grapheme_Extend": "unicode", - "unicode.Other_ID_Continue": "unicode", - "unicode.Other_ID_Start": "unicode", - "unicode.Other_Lowercase": "unicode", - "unicode.Other_Math": "unicode", - "unicode.Other_Uppercase": "unicode", - "unicode.P": "unicode", - "unicode.Pahawh_Hmong": "unicode", - "unicode.Palmyrene": "unicode", - "unicode.Pattern_Syntax": "unicode", - "unicode.Pattern_White_Space": "unicode", - "unicode.Pau_Cin_Hau": "unicode", - "unicode.Pc": "unicode", - "unicode.Pd": "unicode", - "unicode.Pe": "unicode", - "unicode.Pf": "unicode", - "unicode.Phags_Pa": "unicode", - "unicode.Phoenician": "unicode", - "unicode.Pi": "unicode", - "unicode.Po": "unicode", - "unicode.Prepended_Concatenation_Mark": "unicode", - "unicode.PrintRanges": "unicode", - "unicode.Properties": "unicode", - "unicode.Ps": "unicode", - "unicode.Psalter_Pahlavi": "unicode", - "unicode.Punct": "unicode", - "unicode.Quotation_Mark": "unicode", - "unicode.Radical": "unicode", - "unicode.Range16": "unicode", - "unicode.Range32": "unicode", - "unicode.RangeTable": "unicode", - "unicode.Rejang": "unicode", - "unicode.ReplacementChar": "unicode", - "unicode.Runic": "unicode", - "unicode.S": "unicode", - "unicode.STerm": "unicode", - "unicode.Samaritan": "unicode", - "unicode.Saurashtra": "unicode", - "unicode.Sc": "unicode", - "unicode.Scripts": "unicode", - "unicode.Sentence_Terminal": "unicode", - "unicode.Sharada": "unicode", - "unicode.Shavian": "unicode", - "unicode.Siddham": "unicode", - "unicode.SignWriting": "unicode", - "unicode.SimpleFold": "unicode", - "unicode.Sinhala": "unicode", - "unicode.Sk": "unicode", - "unicode.Sm": "unicode", - "unicode.So": "unicode", - "unicode.Soft_Dotted": "unicode", - "unicode.Sora_Sompeng": "unicode", - "unicode.Space": "unicode", - "unicode.SpecialCase": "unicode", - "unicode.Sundanese": "unicode", - "unicode.Syloti_Nagri": "unicode", - "unicode.Symbol": "unicode", - "unicode.Syriac": "unicode", - "unicode.Tagalog": "unicode", - "unicode.Tagbanwa": "unicode", - "unicode.Tai_Le": "unicode", - "unicode.Tai_Tham": "unicode", - "unicode.Tai_Viet": "unicode", - "unicode.Takri": "unicode", - "unicode.Tamil": "unicode", - "unicode.Tangut": "unicode", - "unicode.Telugu": "unicode", - "unicode.Terminal_Punctuation": "unicode", - "unicode.Thaana": "unicode", - "unicode.Thai": "unicode", - "unicode.Tibetan": "unicode", - "unicode.Tifinagh": "unicode", - "unicode.Tirhuta": "unicode", - "unicode.Title": "unicode", - "unicode.TitleCase": "unicode", - "unicode.To": "unicode", - "unicode.ToLower": "unicode", - "unicode.ToTitle": "unicode", - "unicode.ToUpper": "unicode", - "unicode.TurkishCase": "unicode", - "unicode.Ugaritic": "unicode", - "unicode.Unified_Ideograph": "unicode", - "unicode.Upper": "unicode", - "unicode.UpperCase": "unicode", - "unicode.UpperLower": "unicode", - "unicode.Vai": "unicode", - "unicode.Variation_Selector": "unicode", - "unicode.Version": "unicode", - "unicode.Warang_Citi": "unicode", - "unicode.White_Space": "unicode", - "unicode.Yi": "unicode", - "unicode.Z": "unicode", - "unicode.Zl": "unicode", - "unicode.Zp": "unicode", - "unicode.Zs": "unicode", - "url.Error": "net/url", - "url.EscapeError": "net/url", - "url.InvalidHostError": "net/url", - "url.Parse": "net/url", - "url.ParseQuery": "net/url", - "url.ParseRequestURI": "net/url", - "url.PathEscape": "net/url", - "url.PathUnescape": "net/url", - "url.QueryEscape": "net/url", - "url.QueryUnescape": "net/url", - "url.URL": "net/url", - "url.User": "net/url", - "url.UserPassword": "net/url", - "url.Userinfo": "net/url", - "url.Values": "net/url", - "user.Current": "os/user", - "user.Group": "os/user", - "user.Lookup": "os/user", - "user.LookupGroup": "os/user", - "user.LookupGroupId": "os/user", - "user.LookupId": "os/user", - "user.UnknownGroupError": "os/user", - "user.UnknownGroupIdError": "os/user", - "user.UnknownUserError": "os/user", - "user.UnknownUserIdError": "os/user", - "user.User": "os/user", - "utf16.Decode": "unicode/utf16", - "utf16.DecodeRune": "unicode/utf16", - "utf16.Encode": "unicode/utf16", - "utf16.EncodeRune": "unicode/utf16", - "utf16.IsSurrogate": "unicode/utf16", - "utf8.DecodeLastRune": "unicode/utf8", - "utf8.DecodeLastRuneInString": "unicode/utf8", - "utf8.DecodeRune": "unicode/utf8", - "utf8.DecodeRuneInString": "unicode/utf8", - "utf8.EncodeRune": "unicode/utf8", - "utf8.FullRune": "unicode/utf8", - "utf8.FullRuneInString": "unicode/utf8", - "utf8.MaxRune": "unicode/utf8", - "utf8.RuneCount": "unicode/utf8", - "utf8.RuneCountInString": "unicode/utf8", - "utf8.RuneError": "unicode/utf8", - "utf8.RuneLen": "unicode/utf8", - "utf8.RuneSelf": "unicode/utf8", - "utf8.RuneStart": "unicode/utf8", - "utf8.UTFMax": "unicode/utf8", - "utf8.Valid": "unicode/utf8", - "utf8.ValidRune": "unicode/utf8", - "utf8.ValidString": "unicode/utf8", - "x509.CANotAuthorizedForThisName": "crypto/x509", - "x509.CertPool": "crypto/x509", - "x509.Certificate": "crypto/x509", - "x509.CertificateInvalidError": "crypto/x509", - "x509.CertificateRequest": "crypto/x509", - "x509.ConstraintViolationError": "crypto/x509", - "x509.CreateCertificate": "crypto/x509", - "x509.CreateCertificateRequest": "crypto/x509", - "x509.DSA": "crypto/x509", - "x509.DSAWithSHA1": "crypto/x509", - "x509.DSAWithSHA256": "crypto/x509", - "x509.DecryptPEMBlock": "crypto/x509", - "x509.ECDSA": "crypto/x509", - "x509.ECDSAWithSHA1": "crypto/x509", - "x509.ECDSAWithSHA256": "crypto/x509", - "x509.ECDSAWithSHA384": "crypto/x509", - "x509.ECDSAWithSHA512": "crypto/x509", - "x509.EncryptPEMBlock": "crypto/x509", - "x509.ErrUnsupportedAlgorithm": "crypto/x509", - "x509.Expired": "crypto/x509", - "x509.ExtKeyUsage": "crypto/x509", - "x509.ExtKeyUsageAny": "crypto/x509", - "x509.ExtKeyUsageClientAuth": "crypto/x509", - "x509.ExtKeyUsageCodeSigning": "crypto/x509", - "x509.ExtKeyUsageEmailProtection": "crypto/x509", - "x509.ExtKeyUsageIPSECEndSystem": "crypto/x509", - "x509.ExtKeyUsageIPSECTunnel": "crypto/x509", - "x509.ExtKeyUsageIPSECUser": "crypto/x509", - "x509.ExtKeyUsageMicrosoftServerGatedCrypto": "crypto/x509", - "x509.ExtKeyUsageNetscapeServerGatedCrypto": "crypto/x509", - "x509.ExtKeyUsageOCSPSigning": "crypto/x509", - "x509.ExtKeyUsageServerAuth": "crypto/x509", - "x509.ExtKeyUsageTimeStamping": "crypto/x509", - "x509.HostnameError": "crypto/x509", - "x509.IncompatibleUsage": "crypto/x509", - "x509.IncorrectPasswordError": "crypto/x509", - "x509.InsecureAlgorithmError": "crypto/x509", - "x509.InvalidReason": "crypto/x509", - "x509.IsEncryptedPEMBlock": "crypto/x509", - "x509.KeyUsage": "crypto/x509", - "x509.KeyUsageCRLSign": "crypto/x509", - "x509.KeyUsageCertSign": "crypto/x509", - "x509.KeyUsageContentCommitment": "crypto/x509", - "x509.KeyUsageDataEncipherment": "crypto/x509", - "x509.KeyUsageDecipherOnly": "crypto/x509", - "x509.KeyUsageDigitalSignature": "crypto/x509", - "x509.KeyUsageEncipherOnly": "crypto/x509", - "x509.KeyUsageKeyAgreement": "crypto/x509", - "x509.KeyUsageKeyEncipherment": "crypto/x509", - "x509.MD2WithRSA": "crypto/x509", - "x509.MD5WithRSA": "crypto/x509", - "x509.MarshalECPrivateKey": "crypto/x509", - "x509.MarshalPKCS1PrivateKey": "crypto/x509", - "x509.MarshalPKIXPublicKey": "crypto/x509", - "x509.NameMismatch": "crypto/x509", - "x509.NewCertPool": "crypto/x509", - "x509.NotAuthorizedToSign": "crypto/x509", - "x509.PEMCipher": "crypto/x509", - "x509.PEMCipher3DES": "crypto/x509", - "x509.PEMCipherAES128": "crypto/x509", - "x509.PEMCipherAES192": "crypto/x509", - "x509.PEMCipherAES256": "crypto/x509", - "x509.PEMCipherDES": "crypto/x509", - "x509.ParseCRL": "crypto/x509", - "x509.ParseCertificate": "crypto/x509", - "x509.ParseCertificateRequest": "crypto/x509", - "x509.ParseCertificates": "crypto/x509", - "x509.ParseDERCRL": "crypto/x509", - "x509.ParseECPrivateKey": "crypto/x509", - "x509.ParsePKCS1PrivateKey": "crypto/x509", - "x509.ParsePKCS8PrivateKey": "crypto/x509", - "x509.ParsePKIXPublicKey": "crypto/x509", - "x509.PublicKeyAlgorithm": "crypto/x509", - "x509.RSA": "crypto/x509", - "x509.SHA1WithRSA": "crypto/x509", - "x509.SHA256WithRSA": "crypto/x509", - "x509.SHA256WithRSAPSS": "crypto/x509", - "x509.SHA384WithRSA": "crypto/x509", - "x509.SHA384WithRSAPSS": "crypto/x509", - "x509.SHA512WithRSA": "crypto/x509", - "x509.SHA512WithRSAPSS": "crypto/x509", - "x509.SignatureAlgorithm": "crypto/x509", - "x509.SystemCertPool": "crypto/x509", - "x509.SystemRootsError": "crypto/x509", - "x509.TooManyIntermediates": "crypto/x509", - "x509.UnhandledCriticalExtension": "crypto/x509", - "x509.UnknownAuthorityError": "crypto/x509", - "x509.UnknownPublicKeyAlgorithm": "crypto/x509", - "x509.UnknownSignatureAlgorithm": "crypto/x509", - "x509.VerifyOptions": "crypto/x509", - "xml.Attr": "encoding/xml", - "xml.CharData": "encoding/xml", - "xml.Comment": "encoding/xml", - "xml.CopyToken": "encoding/xml", - "xml.Decoder": "encoding/xml", - "xml.Directive": "encoding/xml", - "xml.Encoder": "encoding/xml", - "xml.EndElement": "encoding/xml", - "xml.Escape": "encoding/xml", - "xml.EscapeText": "encoding/xml", - "xml.HTMLAutoClose": "encoding/xml", - "xml.HTMLEntity": "encoding/xml", - "xml.Header": "encoding/xml", - "xml.Marshal": "encoding/xml", - "xml.MarshalIndent": "encoding/xml", - "xml.Marshaler": "encoding/xml", - "xml.MarshalerAttr": "encoding/xml", - "xml.Name": "encoding/xml", - "xml.NewDecoder": "encoding/xml", - "xml.NewEncoder": "encoding/xml", - "xml.ProcInst": "encoding/xml", - "xml.StartElement": "encoding/xml", - "xml.SyntaxError": "encoding/xml", - "xml.TagPathError": "encoding/xml", - "xml.Token": "encoding/xml", - "xml.Unmarshal": "encoding/xml", - "xml.UnmarshalError": "encoding/xml", - "xml.Unmarshaler": "encoding/xml", - "xml.UnmarshalerAttr": "encoding/xml", - "xml.UnsupportedTypeError": "encoding/xml", - "zip.Compressor": "archive/zip", - "zip.Decompressor": "archive/zip", - "zip.Deflate": "archive/zip", - "zip.ErrAlgorithm": "archive/zip", - "zip.ErrChecksum": "archive/zip", - "zip.ErrFormat": "archive/zip", - "zip.File": "archive/zip", - "zip.FileHeader": "archive/zip", - "zip.FileInfoHeader": "archive/zip", - "zip.NewReader": "archive/zip", - "zip.NewWriter": "archive/zip", - "zip.OpenReader": "archive/zip", - "zip.ReadCloser": "archive/zip", - "zip.Reader": "archive/zip", - "zip.RegisterCompressor": "archive/zip", - "zip.RegisterDecompressor": "archive/zip", - "zip.Store": "archive/zip", - "zip.Writer": "archive/zip", - "zlib.BestCompression": "compress/zlib", - "zlib.BestSpeed": "compress/zlib", - "zlib.DefaultCompression": "compress/zlib", - "zlib.ErrChecksum": "compress/zlib", - "zlib.ErrDictionary": "compress/zlib", - "zlib.ErrHeader": "compress/zlib", - "zlib.HuffmanOnly": "compress/zlib", - "zlib.NewReader": "compress/zlib", - "zlib.NewReaderDict": "compress/zlib", - "zlib.NewWriter": "compress/zlib", - "zlib.NewWriterLevel": "compress/zlib", - "zlib.NewWriterLevelDict": "compress/zlib", - "zlib.NoCompression": "compress/zlib", - "zlib.Resetter": "compress/zlib", - "zlib.Writer": "compress/zlib", - - "unsafe.Alignof": "unsafe", - "unsafe.ArbitraryType": "unsafe", - "unsafe.Offsetof": "unsafe", - "unsafe.Pointer": "unsafe", - "unsafe.Sizeof": "unsafe", -} diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go b/vertical-pod-autoscaler/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go index 4b232211be7f..f93a65edf7f8 100644 --- a/vertical-pod-autoscaler/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go +++ b/vertical-pod-autoscaler/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go @@ -38273,6 +38273,7 @@ func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/acceleratorTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -38461,6 +38462,7 @@ func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -38685,6 +38687,7 @@ func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -38942,6 +38945,7 @@ func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -39136,6 +39140,7 @@ func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -39303,6 +39308,7 @@ func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -39477,6 +39483,7 @@ func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -39700,6 +39707,7 @@ func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -39910,6 +39918,7 @@ func (c *AddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -40073,6 +40082,7 @@ func (c *AddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -40297,6 +40307,7 @@ func (c *AllocationsAggregatedListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/allocations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -40485,6 +40496,7 @@ func (c *AllocationsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/allocations/{allocation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -40648,6 +40660,7 @@ func (c *AllocationsGetIamPolicyCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/allocations/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -40820,6 +40833,7 @@ func (c *AllocationsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/allocations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -41042,6 +41056,7 @@ func (c *AllocationsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/allocations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -41233,6 +41248,7 @@ func (c *AllocationsSetIamPolicyCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/allocations/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -41391,6 +41407,7 @@ func (c *AllocationsTestIamPermissionsCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/allocations/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -41568,6 +41585,7 @@ func (c *AllocationsUpdateResourceShapeCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/allocations/{allocation}/updateResourceShape") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -41796,6 +41814,7 @@ func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -41989,6 +42008,7 @@ func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -42156,6 +42176,7 @@ func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -42329,6 +42350,7 @@ func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -42551,6 +42573,7 @@ func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -42767,6 +42790,7 @@ func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -42927,6 +42951,7 @@ func (c *AutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -43110,6 +43135,7 @@ func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -43287,6 +43313,7 @@ func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -43449,6 +43476,7 @@ func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -43611,6 +43639,7 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Resp reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -43773,6 +43802,7 @@ func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -43925,6 +43955,7 @@ func (c *BackendBucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -44087,6 +44118,7 @@ func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -44298,6 +44330,7 @@ func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -44498,6 +44531,7 @@ func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -44650,6 +44684,7 @@ func (c *BackendBucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -44797,6 +44832,7 @@ func (c *BackendBucketsTestIamPermissionsCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -44964,6 +45000,7 @@ func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -45135,6 +45172,7 @@ func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/addSignedUrlKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -45354,6 +45392,7 @@ func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -45546,6 +45585,7 @@ func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -45708,6 +45748,7 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Res reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/deleteSignedUrlKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -45871,6 +45912,7 @@ func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -46017,6 +46059,7 @@ func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -46184,6 +46227,7 @@ func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -46396,6 +46440,7 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -46600,6 +46645,7 @@ func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -46771,6 +46817,7 @@ func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/setSecurityPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -46922,6 +46969,7 @@ func (c *BackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -47092,6 +47140,7 @@ func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -47312,6 +47361,7 @@ func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -47502,6 +47552,7 @@ func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -47727,6 +47778,7 @@ func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -47938,6 +47990,7 @@ func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/addResourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -48167,6 +48220,7 @@ func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -48374,6 +48428,7 @@ func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -48556,6 +48611,7 @@ func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -48723,6 +48779,7 @@ func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -48886,6 +48943,7 @@ func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -49070,6 +49128,7 @@ func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -49298,6 +49357,7 @@ func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -49507,6 +49567,7 @@ func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -49689,6 +49750,7 @@ func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -49852,6 +49914,7 @@ func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -50029,6 +50092,7 @@ func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -50192,6 +50256,7 @@ func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -50361,6 +50426,7 @@ func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -50517,6 +50583,7 @@ func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -50680,6 +50747,7 @@ func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -50892,6 +50960,7 @@ func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -51093,6 +51162,7 @@ func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -51245,6 +51315,7 @@ func (c *FirewallsTestIamPermissionsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -51415,6 +51486,7 @@ func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -51635,6 +51707,7 @@ func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -51829,6 +51902,7 @@ func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -51996,6 +52070,7 @@ func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -52170,6 +52245,7 @@ func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -52393,6 +52469,7 @@ func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -52605,6 +52682,7 @@ func (c *ForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -52787,6 +52865,7 @@ func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -52970,6 +53049,7 @@ func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -53133,6 +53213,7 @@ func (c *ForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -53302,6 +53383,7 @@ func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -53459,6 +53541,7 @@ func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -53622,6 +53705,7 @@ func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -53833,6 +53917,7 @@ func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -54013,6 +54098,7 @@ func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -54160,6 +54246,7 @@ func (c *GlobalAddressesTestIamPermissionsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -54320,6 +54407,7 @@ func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -54477,6 +54565,7 @@ func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -54640,6 +54729,7 @@ func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -54852,6 +54942,7 @@ func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -55053,6 +55144,7 @@ func (c *GlobalForwardingRulesPatchCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -55205,6 +55297,7 @@ func (c *GlobalForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -55372,6 +55465,7 @@ func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -55524,6 +55618,7 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*ht } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -55740,6 +55835,7 @@ func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -55913,6 +56009,7 @@ func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -56037,6 +56134,7 @@ func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -56251,6 +56349,7 @@ func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -56432,6 +56531,7 @@ func (c *GlobalOperationsWaitCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}/wait") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -56645,6 +56745,7 @@ func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/healthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -56836,6 +56937,7 @@ func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -56992,6 +57094,7 @@ func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -57154,6 +57257,7 @@ func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -57365,6 +57469,7 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -57565,6 +57670,7 @@ func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -57717,6 +57823,7 @@ func (c *HealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -57884,6 +57991,7 @@ func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -58048,6 +58156,7 @@ func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -58205,6 +58314,7 @@ func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -58368,6 +58478,7 @@ func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -58580,6 +58691,7 @@ func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -58781,6 +58893,7 @@ func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -58933,6 +59046,7 @@ func (c *HttpHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Re } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -59101,6 +59215,7 @@ func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -59264,6 +59379,7 @@ func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -59420,6 +59536,7 @@ func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -59582,6 +59699,7 @@ func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -59793,6 +59911,7 @@ func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -59993,6 +60112,7 @@ func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -60145,6 +60265,7 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -60312,6 +60433,7 @@ func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -60476,6 +60598,7 @@ func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -60647,6 +60770,7 @@ func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -60807,6 +60931,7 @@ func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -60959,6 +61084,7 @@ func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -61111,6 +61237,7 @@ func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -61281,6 +61408,7 @@ func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -61506,6 +61634,7 @@ func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -61686,6 +61815,7 @@ func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -61833,6 +61963,7 @@ func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -61980,6 +62111,7 @@ func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -62164,6 +62296,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -62391,6 +62524,7 @@ func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.R } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -62575,6 +62709,7 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -62745,6 +62880,7 @@ func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -62936,6 +63072,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http. } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -63097,6 +63234,7 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -63261,6 +63399,7 @@ func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -63440,6 +63579,7 @@ func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -63661,6 +63801,7 @@ func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -63911,6 +64052,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (* reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -64167,6 +64309,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -64391,6 +64534,7 @@ func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -64584,6 +64728,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*htt } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -64767,6 +64912,7 @@ func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -64965,6 +65111,7 @@ func (c *InstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -65144,6 +65291,7 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -65325,6 +65473,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*h } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -65510,6 +65659,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -65671,6 +65821,7 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*ht } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -65852,6 +66003,7 @@ func (c *InstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -66034,6 +66186,7 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -66215,6 +66368,7 @@ func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -66442,6 +66596,7 @@ func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -66638,6 +66793,7 @@ func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -66803,6 +66959,7 @@ func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -66974,6 +67131,7 @@ func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -67195,6 +67353,7 @@ func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -67447,6 +67606,7 @@ func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -67671,6 +67831,7 @@ func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -67850,6 +68011,7 @@ func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -68011,6 +68173,7 @@ func (c *InstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -68182,6 +68345,7 @@ func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -68339,6 +68503,7 @@ func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -68491,6 +68656,7 @@ func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -68657,6 +68823,7 @@ func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -68869,6 +69036,7 @@ func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -69049,6 +69217,7 @@ func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -69196,6 +69365,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -69367,6 +69537,7 @@ func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -69557,6 +69728,7 @@ func (c *InstancesAddResourcePoliciesCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addResourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -69787,6 +69959,7 @@ func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -69999,6 +70172,7 @@ func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -70180,6 +70354,7 @@ func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -70355,6 +70530,7 @@ func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -70542,6 +70718,7 @@ func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -70717,6 +70894,7 @@ func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -70886,6 +71064,7 @@ func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getGuestAttributes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -71054,6 +71233,7 @@ func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -71236,6 +71416,7 @@ func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -71455,6 +71636,7 @@ func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -71688,6 +71870,7 @@ func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -71949,6 +72132,7 @@ func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, erro } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -72167,6 +72351,7 @@ func (c *InstancesRemoveResourcePoliciesCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/removeResourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -72343,6 +72528,7 @@ func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -72522,6 +72708,7 @@ func (c *InstancesResumeCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/resume") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -72703,6 +72890,7 @@ func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Respon reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setDeletionProtection") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -72884,6 +73072,7 @@ func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -73059,6 +73248,7 @@ func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -73236,6 +73426,7 @@ func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -73418,6 +73609,7 @@ func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -73600,6 +73792,7 @@ func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -73783,6 +73976,7 @@ func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -73967,6 +74161,7 @@ func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -74149,6 +74344,7 @@ func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -74332,6 +74528,7 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -74516,6 +74713,7 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -74699,6 +74897,7 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -74855,6 +75054,7 @@ func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Res reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -75023,6 +75223,7 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -75203,6 +75404,7 @@ func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -75391,6 +75593,7 @@ func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -75580,6 +75783,7 @@ func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/suspend") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -75745,6 +75949,7 @@ func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -75926,6 +76131,7 @@ func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateAccessConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -76116,6 +76322,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -76307,6 +76514,7 @@ func (c *InstancesUpdateShieldedVmConfigCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateShieldedVmConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -76536,6 +76744,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -76730,6 +76939,7 @@ func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Respons reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -76896,6 +77106,7 @@ func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -77059,6 +77270,7 @@ func (c *InterconnectAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.R } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -77232,6 +77444,7 @@ func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -77454,6 +77667,7 @@ func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -77665,6 +77879,7 @@ func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -77828,6 +78043,7 @@ func (c *InterconnectAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -78005,6 +78221,7 @@ func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -78168,6 +78385,7 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (* } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -78333,6 +78551,7 @@ func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -78546,6 +78765,7 @@ func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -78726,6 +78946,7 @@ func (c *InterconnectLocationsTestIamPermissionsCall) doRequest(alt string) (*ht } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -78885,6 +79106,7 @@ func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -79041,6 +79263,7 @@ func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -79193,6 +79416,7 @@ func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}/getDiagnostics") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -79346,6 +79570,7 @@ func (c *InterconnectsGetIamPolicyCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -79508,6 +79733,7 @@ func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -79719,6 +79945,7 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -79919,6 +80146,7 @@ func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -80071,6 +80299,7 @@ func (c *InterconnectsSetIamPolicyCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -80218,6 +80447,7 @@ func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -80365,6 +80595,7 @@ func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -80520,6 +80751,7 @@ func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{licenseCode}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -80672,6 +80904,7 @@ func (c *LicenseCodesGetIamPolicyCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -80817,6 +81050,7 @@ func (c *LicenseCodesSetIamPolicyCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -80964,6 +81198,7 @@ func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -81123,6 +81358,7 @@ func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -81279,6 +81515,7 @@ func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -81431,6 +81668,7 @@ func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -81592,6 +81830,7 @@ func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -81810,6 +82049,7 @@ func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -81990,6 +82230,7 @@ func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -82137,6 +82378,7 @@ func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -82300,6 +82542,7 @@ func (c *MachineImagesDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages/{machineImage}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -82456,6 +82699,7 @@ func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages/{machineImage}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -82608,6 +82852,7 @@ func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -82780,6 +83025,7 @@ func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -82996,6 +83242,7 @@ func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -83176,6 +83423,7 @@ func (c *MachineImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -83323,6 +83571,7 @@ func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -83539,6 +83788,7 @@ func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -83729,6 +83979,7 @@ func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -83954,6 +84205,7 @@ func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -84211,6 +84463,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.R } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -84413,6 +84666,7 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -84588,6 +84842,7 @@ func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -84765,6 +85020,7 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -84933,6 +85189,7 @@ func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -85104,6 +85361,7 @@ func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -85325,6 +85583,7 @@ func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -85578,6 +85837,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (* } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -85781,6 +86041,7 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*ht } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -85956,6 +86217,7 @@ func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/addPeering") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -86120,6 +86382,7 @@ func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -86277,6 +86540,7 @@ func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -86440,6 +86704,7 @@ func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -86652,6 +86917,7 @@ func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -86910,6 +87176,7 @@ func (c *NetworksListIpAddressesCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/listIpAddresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -87208,6 +87475,7 @@ func (c *NetworksListIpOwnersCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/listIpOwners") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -87444,6 +87712,7 @@ func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -87614,6 +87883,7 @@ func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/removePeering") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -87778,6 +88048,7 @@ func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/switchToCustomMode") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -87927,6 +88198,7 @@ func (c *NetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -88096,6 +88368,7 @@ func (c *NetworksUpdatePeeringCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/updatePeering") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -88268,6 +88541,7 @@ func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -88497,6 +88771,7 @@ func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -88690,6 +88965,7 @@ func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -88868,6 +89144,7 @@ func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -89039,6 +89316,7 @@ func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -89202,6 +89480,7 @@ func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, erro } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -89376,6 +89655,7 @@ func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -89607,6 +89887,7 @@ func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -89853,6 +90134,7 @@ func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -90053,6 +90335,7 @@ func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -90229,6 +90512,7 @@ func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -90392,6 +90676,7 @@ func (c *NodeGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -90616,6 +90901,7 @@ func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -90809,6 +91095,7 @@ func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -90976,6 +91263,7 @@ func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -91139,6 +91427,7 @@ func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -91312,6 +91601,7 @@ func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -91534,6 +91824,7 @@ func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -91725,6 +92016,7 @@ func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -91883,6 +92175,7 @@ func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -92107,6 +92400,7 @@ func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -92296,6 +92590,7 @@ func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes/{nodeType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -92520,6 +92815,7 @@ func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -92718,6 +93014,7 @@ func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnHost") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -92875,6 +93172,7 @@ func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnResource") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -93027,6 +93325,7 @@ func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnHost") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -93185,6 +93484,7 @@ func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnResource") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -93333,6 +93633,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -93474,6 +93775,7 @@ func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnHost") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -93677,6 +93979,7 @@ func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnResources") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -93917,6 +94220,7 @@ func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/listXpnHosts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -94115,6 +94419,7 @@ func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -94275,6 +94580,7 @@ func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -94436,6 +94742,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -94598,6 +94905,7 @@ func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setDefaultNetworkTier") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -94759,6 +95067,7 @@ func (c *ProjectsSetDefaultServiceAccountCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setDefaultServiceAccount") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -94922,6 +95231,7 @@ func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -95081,6 +95391,7 @@ func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -95247,6 +95558,7 @@ func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -95420,6 +95732,7 @@ func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -95642,6 +95955,7 @@ func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -95858,6 +96172,7 @@ func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -96018,6 +96333,7 @@ func (c *RegionAutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -96201,6 +96517,7 @@ func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -96372,6 +96689,7 @@ func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -96538,6 +96856,7 @@ func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -96694,6 +97013,7 @@ func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -96872,6 +97192,7 @@ func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -97094,6 +97415,7 @@ func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -97308,6 +97630,7 @@ func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -97471,6 +97794,7 @@ func (c *RegionBackendServicesTestIamPermissionsCall) doRequest(alt string) (*ht } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -97651,6 +97975,7 @@ func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -97879,6 +98204,7 @@ func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Respo } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/commitments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -98068,6 +98394,7 @@ func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{commitment}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -98241,6 +98568,7 @@ func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -98463,6 +98791,7 @@ func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -98654,6 +98983,7 @@ func (c *RegionCommitmentsTestIamPermissionsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -98820,6 +99150,7 @@ func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes/{diskType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -99044,6 +99375,7 @@ func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -99255,6 +99587,7 @@ func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/addResourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -99442,6 +99775,7 @@ func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -99623,6 +99957,7 @@ func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -99788,6 +100123,7 @@ func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -99968,6 +100304,7 @@ func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -100195,6 +100532,7 @@ func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -100405,6 +100743,7 @@ func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/removeResourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -100586,6 +100925,7 @@ func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -100767,6 +101107,7 @@ func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -100930,6 +101271,7 @@ func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -101100,6 +101442,7 @@ func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -101267,6 +101610,7 @@ func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -101440,6 +101784,7 @@ func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -101662,6 +102007,7 @@ func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, erro } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -101873,6 +102219,7 @@ func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -102036,6 +102383,7 @@ func (c *RegionHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http. } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -102214,6 +102562,7 @@ func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -102411,6 +102760,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -102572,6 +102922,7 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt s } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -102740,6 +103091,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Res reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -102931,6 +103283,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) ( } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -103092,6 +103445,7 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -103255,6 +103609,7 @@ func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -103433,6 +103788,7 @@ func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -103654,6 +104010,7 @@ func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Respo } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -103902,6 +104259,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt stri reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -104158,6 +104516,7 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt st reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -104382,6 +104741,7 @@ func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -104575,6 +104935,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -104759,6 +105120,7 @@ func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Res reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -104945,6 +105307,7 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt st } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -105126,6 +105489,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt strin } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -105307,6 +105671,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (* } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -105468,6 +105833,7 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -105649,6 +106015,7 @@ func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -105831,6 +106198,7 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -105998,6 +106366,7 @@ func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -106220,6 +106589,7 @@ func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -106475,6 +106845,7 @@ func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -106696,6 +107067,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -106857,6 +107229,7 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*htt } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -107009,6 +107382,7 @@ func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -107143,6 +107517,7 @@ func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -107368,6 +107743,7 @@ func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -107560,6 +107936,7 @@ func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}/wait") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -107727,6 +108104,7 @@ func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -107895,6 +108273,7 @@ func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -108068,6 +108447,7 @@ func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -108290,6 +108670,7 @@ func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -108481,6 +108862,7 @@ func (c *RegionSslCertificatesTestIamPermissionsCall) doRequest(alt string) (*ht } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -108651,6 +109033,7 @@ func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Respons reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -108819,6 +109202,7 @@ func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -108992,6 +109376,7 @@ func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -109214,6 +109599,7 @@ func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -109423,6 +109809,7 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -109586,6 +109973,7 @@ func (c *RegionTargetHttpProxiesTestIamPermissionsCall) doRequest(alt string) (* } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -109756,6 +110144,7 @@ func (c *RegionTargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Respon reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -109924,6 +110313,7 @@ func (c *RegionTargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -110097,6 +110487,7 @@ func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -110319,6 +110710,7 @@ func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -110528,6 +110920,7 @@ func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) ( } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -110709,6 +111102,7 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -110872,6 +111266,7 @@ func (c *RegionTargetHttpsProxiesTestIamPermissionsCall) doRequest(alt string) ( } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -111030,6 +111425,7 @@ func (c *RegionUrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -111197,6 +111593,7 @@ func (c *RegionUrlMapsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -111358,6 +111755,7 @@ func (c *RegionUrlMapsInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -111519,6 +111917,7 @@ func (c *RegionUrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}/invalidateCache") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -111750,6 +112149,7 @@ func (c *RegionUrlMapsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -111949,6 +112349,7 @@ func (c *RegionUrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -112112,6 +112513,7 @@ func (c *RegionUrlMapsTestIamPermissionsCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -112278,6 +112680,7 @@ func (c *RegionUrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -112442,6 +112845,7 @@ func (c *RegionUrlMapsValidateCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}/validate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -112606,6 +113010,7 @@ func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -112820,6 +113225,7 @@ func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -113067,6 +113473,7 @@ func (c *ResourcePoliciesAggregatedListCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/resourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -113260,6 +113667,7 @@ func (c *ResourcePoliciesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resourcePolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -113426,6 +113834,7 @@ func (c *ResourcePoliciesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resourcePolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -113589,6 +113998,7 @@ func (c *ResourcePoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -113761,6 +114171,7 @@ func (c *ResourcePoliciesInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -113983,6 +114394,7 @@ func (c *ResourcePoliciesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -114174,6 +114586,7 @@ func (c *ResourcePoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -114332,6 +114745,7 @@ func (c *ResourcePoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Re } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -114556,6 +114970,7 @@ func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/routers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -114749,6 +115164,7 @@ func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -114916,6 +115332,7 @@ func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -115142,6 +115559,7 @@ func (c *RoutersGetNatMappingInfoCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getNatMappingInfo") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -115349,6 +115767,7 @@ func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, erro } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getRouterStatus") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -115522,6 +115941,7 @@ func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -115744,6 +116164,7 @@ func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -115955,6 +116376,7 @@ func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -116119,6 +116541,7 @@ func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/preview") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -116278,6 +116701,7 @@ func (c *RoutersTestIamPermissionsCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -116456,6 +116880,7 @@ func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -116629,6 +117054,7 @@ func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -116786,6 +117212,7 @@ func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -116949,6 +117376,7 @@ func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -117161,6 +117589,7 @@ func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -117341,6 +117770,7 @@ func (c *RoutesTestIamPermissionsCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -117495,6 +117925,7 @@ func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/addRule") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -117658,6 +118089,7 @@ func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -117814,6 +118246,7 @@ func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -117972,6 +118405,7 @@ func (c *SecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/getRule") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -118147,6 +118581,7 @@ func (c *SecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -118363,6 +118798,7 @@ func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -118611,6 +119047,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) doRequest(alt stri } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/listPreconfiguredExpressionSets") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -118791,6 +119228,7 @@ func (c *SecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -118956,6 +119394,7 @@ func (c *SecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/patchRule") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -119113,6 +119552,7 @@ func (c *SecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/removeRule") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -119263,6 +119703,7 @@ func (c *SecurityPoliciesSetLabelsCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -119410,6 +119851,7 @@ func (c *SecurityPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Re } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -119576,6 +120018,7 @@ func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -119733,6 +120176,7 @@ func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -119885,6 +120329,7 @@ func (c *SnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -120099,6 +120544,7 @@ func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -120279,6 +120725,7 @@ func (c *SnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -120426,6 +120873,7 @@ func (c *SnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -120573,6 +121021,7 @@ func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -120789,6 +121238,7 @@ func (c *SslCertificatesAggregatedListCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/sslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -120980,6 +121430,7 @@ func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -121136,6 +121587,7 @@ func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -121298,6 +121750,7 @@ func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -121509,6 +121962,7 @@ func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -121689,6 +122143,7 @@ func (c *SslCertificatesTestIamPermissionsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -121850,6 +122305,7 @@ func (c *SslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -122005,6 +122461,7 @@ func (c *SslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -122166,6 +122623,7 @@ func (c *SslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -122377,6 +122835,7 @@ func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -122625,6 +123084,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Resp } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/listAvailableFeatures") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -122805,6 +123265,7 @@ func (c *SslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -122956,6 +123417,7 @@ func (c *SslPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -123171,6 +123633,7 @@ func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -123364,6 +123827,7 @@ func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -123543,6 +124007,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -123713,6 +124178,7 @@ func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -123876,6 +124342,7 @@ func (c *SubnetworksGetIamPolicyCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -124049,6 +124516,7 @@ func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -124271,6 +124739,7 @@ func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -124527,6 +124996,7 @@ func (c *SubnetworksListUsableCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks/listUsable") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -124747,6 +125217,7 @@ func (c *SubnetworksPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -124916,6 +125387,7 @@ func (c *SubnetworksSetIamPolicyCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -125094,6 +125566,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -125257,6 +125730,7 @@ func (c *SubnetworksTestIamPermissionsCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -125482,6 +125956,7 @@ func (c *TargetHttpProxiesAggregatedListCall) doRequest(alt string) (*http.Respo } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -125674,6 +126149,7 @@ func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -125831,6 +126307,7 @@ func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -125994,6 +126471,7 @@ func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -126206,6 +126684,7 @@ func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -126405,6 +126884,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -126557,6 +127037,7 @@ func (c *TargetHttpProxiesTestIamPermissionsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -126773,6 +127254,7 @@ func (c *TargetHttpsProxiesAggregatedListCall) doRequest(alt string) (*http.Resp } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -126964,6 +127446,7 @@ func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -127120,6 +127603,7 @@ func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -127282,6 +127766,7 @@ func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -127493,6 +127978,7 @@ func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, erro } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -127691,6 +128177,7 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -127860,6 +128347,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http. } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -128034,6 +128522,7 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -128203,6 +128692,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -128355,6 +128845,7 @@ func (c *TargetHttpsProxiesTestIamPermissionsCall) doRequest(alt string) (*http. } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -128571,6 +129062,7 @@ func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -128765,6 +129257,7 @@ func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -128933,6 +129426,7 @@ func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -129107,6 +129601,7 @@ func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -129330,6 +129825,7 @@ func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -129521,6 +130017,7 @@ func (c *TargetInstancesTestIamPermissionsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -129699,6 +130196,7 @@ func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -129881,6 +130379,7 @@ func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -130110,6 +130609,7 @@ func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -130304,6 +130804,7 @@ func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -130472,6 +130973,7 @@ func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -130629,6 +131131,7 @@ func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/getHealth") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -130806,6 +131309,7 @@ func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -131029,6 +131533,7 @@ func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -131239,6 +131744,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -131421,6 +131927,7 @@ func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -131610,6 +132117,7 @@ func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/setBackup") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -131779,6 +132287,7 @@ func (c *TargetPoolsTestIamPermissionsCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -131947,6 +132456,7 @@ func (c *TargetSslProxiesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -132103,6 +132613,7 @@ func (c *TargetSslProxiesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -132265,6 +132776,7 @@ func (c *TargetSslProxiesInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -132476,6 +132988,7 @@ func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -132674,6 +133187,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setBackendService") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -132844,6 +133358,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -133014,6 +133529,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Re } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -133187,6 +133703,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -133338,6 +133855,7 @@ func (c *TargetSslProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Re } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -133497,6 +134015,7 @@ func (c *TargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -133653,6 +134172,7 @@ func (c *TargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -133815,6 +134335,7 @@ func (c *TargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -134026,6 +134547,7 @@ func (c *TargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -134224,6 +134746,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -134394,6 +134917,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -134546,6 +135070,7 @@ func (c *TargetTcpProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Re } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -134761,6 +135286,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Respo } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetVpnGateways") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -134954,6 +135480,7 @@ func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -135121,6 +135648,7 @@ func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -135294,6 +135822,7 @@ func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -135516,6 +136045,7 @@ func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -135726,6 +136256,7 @@ func (c *TargetVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -135889,6 +136420,7 @@ func (c *TargetVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -136114,6 +136646,7 @@ func (c *UrlMapsAggregatedListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/urlMaps") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -136306,6 +136839,7 @@ func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -136463,6 +136997,7 @@ func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -136626,6 +137161,7 @@ func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -136788,6 +137324,7 @@ func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/invalidateCache") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -137009,6 +137546,7 @@ func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -137210,6 +137748,7 @@ func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -137362,6 +137901,7 @@ func (c *UrlMapsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -137530,6 +138070,7 @@ func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -137684,6 +138225,7 @@ func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/validate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -137898,6 +138440,7 @@ func (c *VpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/vpnGateways") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -138091,6 +138634,7 @@ func (c *VpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways/{vpnGateway}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -138258,6 +138802,7 @@ func (c *VpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways/{vpnGateway}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -138431,6 +138976,7 @@ func (c *VpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -138653,6 +139199,7 @@ func (c *VpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -138863,6 +139410,7 @@ func (c *VpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -139026,6 +139574,7 @@ func (c *VpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -139250,6 +139799,7 @@ func (c *VpnTunnelsAggregatedListCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/vpnTunnels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -139443,6 +139993,7 @@ func (c *VpnTunnelsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{vpnTunnel}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -139610,6 +140161,7 @@ func (c *VpnTunnelsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{vpnTunnel}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -139783,6 +140335,7 @@ func (c *VpnTunnelsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -140005,6 +140558,7 @@ func (c *VpnTunnelsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -140215,6 +140769,7 @@ func (c *VpnTunnelsSetLabelsCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -140378,6 +140933,7 @@ func (c *VpnTunnelsTestIamPermissionsCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -140530,6 +141086,7 @@ func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -140664,6 +141221,7 @@ func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -140889,6 +141447,7 @@ func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -141081,6 +141640,7 @@ func (c *ZoneOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}/wait") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -141243,6 +141803,7 @@ func (c *ZonesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -141457,6 +142018,7 @@ func (c *ZonesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go b/vertical-pod-autoscaler/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go index 13a2e4e636f2..08d483e6d062 100644 --- a/vertical-pod-autoscaler/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go +++ b/vertical-pod-autoscaler/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go @@ -32089,6 +32089,7 @@ func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/acceleratorTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -32277,6 +32278,7 @@ func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -32501,6 +32503,7 @@ func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -32758,6 +32761,7 @@ func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -32952,6 +32956,7 @@ func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -33119,6 +33124,7 @@ func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -33293,6 +33299,7 @@ func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -33516,6 +33523,7 @@ func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -33726,6 +33734,7 @@ func (c *AddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -33889,6 +33898,7 @@ func (c *AddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -34113,6 +34123,7 @@ func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -34306,6 +34317,7 @@ func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -34473,6 +34485,7 @@ func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -34646,6 +34659,7 @@ func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -34868,6 +34882,7 @@ func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -35084,6 +35099,7 @@ func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -35244,6 +35260,7 @@ func (c *AutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -35427,6 +35444,7 @@ func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -35604,6 +35622,7 @@ func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -35766,6 +35785,7 @@ func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -35928,6 +35948,7 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Resp reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -36090,6 +36111,7 @@ func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -36252,6 +36274,7 @@ func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -36463,6 +36486,7 @@ func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -36663,6 +36687,7 @@ func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -36834,6 +36859,7 @@ func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -37005,6 +37031,7 @@ func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/addSignedUrlKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -37224,6 +37251,7 @@ func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -37416,6 +37444,7 @@ func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -37578,6 +37607,7 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Res reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/deleteSignedUrlKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -37741,6 +37771,7 @@ func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -37887,6 +37918,7 @@ func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -38054,6 +38086,7 @@ func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -38266,6 +38299,7 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -38470,6 +38504,7 @@ func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -38641,6 +38676,7 @@ func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/setSecurityPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -38792,6 +38828,7 @@ func (c *BackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -38962,6 +38999,7 @@ func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -39182,6 +39220,7 @@ func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -39372,6 +39411,7 @@ func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -39597,6 +39637,7 @@ func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -39808,6 +39849,7 @@ func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/addResourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -40037,6 +40079,7 @@ func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -40244,6 +40287,7 @@ func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -40426,6 +40470,7 @@ func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -40593,6 +40638,7 @@ func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -40756,6 +40802,7 @@ func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -40940,6 +40987,7 @@ func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -41168,6 +41216,7 @@ func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -41377,6 +41426,7 @@ func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -41559,6 +41609,7 @@ func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -41722,6 +41773,7 @@ func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -41899,6 +41951,7 @@ func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -42062,6 +42115,7 @@ func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -42231,6 +42285,7 @@ func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -42387,6 +42442,7 @@ func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -42550,6 +42606,7 @@ func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -42762,6 +42819,7 @@ func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -42963,6 +43021,7 @@ func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -43115,6 +43174,7 @@ func (c *FirewallsTestIamPermissionsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -43285,6 +43345,7 @@ func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -43505,6 +43566,7 @@ func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -43699,6 +43761,7 @@ func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -43866,6 +43929,7 @@ func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -44040,6 +44104,7 @@ func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -44263,6 +44328,7 @@ func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -44473,6 +44539,7 @@ func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -44656,6 +44723,7 @@ func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -44819,6 +44887,7 @@ func (c *ForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -44988,6 +45057,7 @@ func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -45145,6 +45215,7 @@ func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -45308,6 +45379,7 @@ func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -45519,6 +45591,7 @@ func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -45699,6 +45772,7 @@ func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -45846,6 +45920,7 @@ func (c *GlobalAddressesTestIamPermissionsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -46006,6 +46081,7 @@ func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -46163,6 +46239,7 @@ func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -46326,6 +46403,7 @@ func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -46538,6 +46616,7 @@ func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -46718,6 +46797,7 @@ func (c *GlobalForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -46885,6 +46965,7 @@ func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -47037,6 +47118,7 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*ht } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -47253,6 +47335,7 @@ func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -47426,6 +47509,7 @@ func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -47550,6 +47634,7 @@ func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -47764,6 +47849,7 @@ func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -47955,6 +48041,7 @@ func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -48111,6 +48198,7 @@ func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -48273,6 +48361,7 @@ func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -48484,6 +48573,7 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -48684,6 +48774,7 @@ func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -48836,6 +48927,7 @@ func (c *HealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -49003,6 +49095,7 @@ func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -49167,6 +49260,7 @@ func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -49324,6 +49418,7 @@ func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -49487,6 +49582,7 @@ func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -49699,6 +49795,7 @@ func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -49900,6 +49997,7 @@ func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -50052,6 +50150,7 @@ func (c *HttpHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Re } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -50220,6 +50319,7 @@ func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -50383,6 +50483,7 @@ func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -50539,6 +50640,7 @@ func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -50701,6 +50803,7 @@ func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -50912,6 +51015,7 @@ func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -51112,6 +51216,7 @@ func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -51264,6 +51369,7 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -51431,6 +51537,7 @@ func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -51595,6 +51702,7 @@ func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -51766,6 +51874,7 @@ func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -51926,6 +52035,7 @@ func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -52078,6 +52188,7 @@ func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -52230,6 +52341,7 @@ func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -52400,6 +52512,7 @@ func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -52625,6 +52738,7 @@ func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -52805,6 +52919,7 @@ func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -52952,6 +53067,7 @@ func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -53099,6 +53215,7 @@ func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -53283,6 +53400,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -53510,6 +53628,7 @@ func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.R } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -53707,6 +53826,7 @@ func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -53898,6 +54018,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http. } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -54067,6 +54188,7 @@ func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -54246,6 +54368,7 @@ func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -54467,6 +54590,7 @@ func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -54717,6 +54841,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (* reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -54941,6 +55066,7 @@ func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -55134,6 +55260,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*htt } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -55317,6 +55444,7 @@ func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -55515,6 +55643,7 @@ func (c *InstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -55694,6 +55823,7 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -55875,6 +56005,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*h } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -56060,6 +56191,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -56221,6 +56353,7 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*ht } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -56402,6 +56535,7 @@ func (c *InstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -56583,6 +56717,7 @@ func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -56810,6 +56945,7 @@ func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -57006,6 +57142,7 @@ func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -57171,6 +57308,7 @@ func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -57342,6 +57480,7 @@ func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -57563,6 +57702,7 @@ func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -57815,6 +57955,7 @@ func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -58039,6 +58180,7 @@ func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -58218,6 +58360,7 @@ func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -58379,6 +58522,7 @@ func (c *InstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -58550,6 +58694,7 @@ func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -58707,6 +58852,7 @@ func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -58859,6 +59005,7 @@ func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -59025,6 +59172,7 @@ func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -59237,6 +59385,7 @@ func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -59417,6 +59566,7 @@ func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -59564,6 +59714,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -59735,6 +59886,7 @@ func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -59972,6 +60124,7 @@ func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -60184,6 +60337,7 @@ func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -60365,6 +60519,7 @@ func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -60540,6 +60695,7 @@ func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -60727,6 +60883,7 @@ func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -60902,6 +61059,7 @@ func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -61065,6 +61223,7 @@ func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -61247,6 +61406,7 @@ func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -61452,6 +61612,7 @@ func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -61680,6 +61841,7 @@ func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -61941,6 +62103,7 @@ func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, erro } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -62154,6 +62317,7 @@ func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -62332,6 +62496,7 @@ func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Respon reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setDeletionProtection") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -62513,6 +62678,7 @@ func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -62688,6 +62854,7 @@ func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -62865,6 +63032,7 @@ func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -63047,6 +63215,7 @@ func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -63229,6 +63398,7 @@ func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -63412,6 +63582,7 @@ func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -63596,6 +63767,7 @@ func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -63778,6 +63950,7 @@ func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -63961,6 +64134,7 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -64145,6 +64319,7 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -64328,6 +64503,7 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -64484,6 +64660,7 @@ func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Res reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -64652,6 +64829,7 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -64832,6 +65010,7 @@ func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -65012,6 +65191,7 @@ func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -65172,6 +65352,7 @@ func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -65353,6 +65534,7 @@ func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateAccessConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -65543,6 +65725,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -65734,6 +65917,7 @@ func (c *InstancesUpdateShieldedVmConfigCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateShieldedVmConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -65963,6 +66147,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -66157,6 +66342,7 @@ func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Respons reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -66323,6 +66509,7 @@ func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -66496,6 +66683,7 @@ func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -66718,6 +66906,7 @@ func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -66929,6 +67118,7 @@ func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -67111,6 +67301,7 @@ func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -67274,6 +67465,7 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (* } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -67439,6 +67631,7 @@ func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -67652,6 +67845,7 @@ func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -67843,6 +68037,7 @@ func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -67999,6 +68194,7 @@ func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -68161,6 +68357,7 @@ func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -68372,6 +68569,7 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -68572,6 +68770,7 @@ func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -68724,6 +68923,7 @@ func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -68871,6 +69071,7 @@ func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -69026,6 +69227,7 @@ func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{licenseCode}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -69182,6 +69384,7 @@ func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -69338,6 +69541,7 @@ func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -69499,6 +69703,7 @@ func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -69717,6 +69922,7 @@ func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -69965,6 +70171,7 @@ func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -70155,6 +70362,7 @@ func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -70380,6 +70588,7 @@ func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -70637,6 +70846,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.R } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -70839,6 +71049,7 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -71014,6 +71225,7 @@ func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -71191,6 +71403,7 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -71359,6 +71572,7 @@ func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -71530,6 +71744,7 @@ func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -71751,6 +71966,7 @@ func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -72004,6 +72220,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (* } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -72207,6 +72424,7 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*ht } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -72382,6 +72600,7 @@ func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/addPeering") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -72546,6 +72765,7 @@ func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -72703,6 +72923,7 @@ func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -72866,6 +73087,7 @@ func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -73078,6 +73300,7 @@ func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -73278,6 +73501,7 @@ func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -73448,6 +73672,7 @@ func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/removePeering") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -73612,6 +73837,7 @@ func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/switchToCustomMode") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -73761,6 +73987,7 @@ func (c *NetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -73929,6 +74156,7 @@ func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -74158,6 +74386,7 @@ func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -74351,6 +74580,7 @@ func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -74529,6 +74759,7 @@ func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -74700,6 +74931,7 @@ func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -74863,6 +75095,7 @@ func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, erro } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -75037,6 +75270,7 @@ func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -75268,6 +75502,7 @@ func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -75514,6 +75749,7 @@ func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -75714,6 +75950,7 @@ func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -75890,6 +76127,7 @@ func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -76053,6 +76291,7 @@ func (c *NodeGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -76277,6 +76516,7 @@ func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -76470,6 +76710,7 @@ func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -76637,6 +76878,7 @@ func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -76800,6 +77042,7 @@ func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -76973,6 +77216,7 @@ func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -77195,6 +77439,7 @@ func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -77386,6 +77631,7 @@ func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -77544,6 +77790,7 @@ func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -77768,6 +78015,7 @@ func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -77957,6 +78205,7 @@ func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes/{nodeType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -78181,6 +78430,7 @@ func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -78379,6 +78629,7 @@ func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnHost") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -78536,6 +78787,7 @@ func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnResource") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -78688,6 +78940,7 @@ func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnHost") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -78846,6 +79099,7 @@ func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnResource") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -78994,6 +79248,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -79135,6 +79390,7 @@ func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnHost") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -79338,6 +79594,7 @@ func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnResources") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -79578,6 +79835,7 @@ func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/listXpnHosts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -79776,6 +80034,7 @@ func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -79936,6 +80195,7 @@ func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -80097,6 +80357,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -80259,6 +80520,7 @@ func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setDefaultNetworkTier") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -80422,6 +80684,7 @@ func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -80581,6 +80844,7 @@ func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -80747,6 +81011,7 @@ func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -80920,6 +81185,7 @@ func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -81142,6 +81408,7 @@ func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -81358,6 +81625,7 @@ func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -81518,6 +81786,7 @@ func (c *RegionAutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -81701,6 +81970,7 @@ func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -81872,6 +82142,7 @@ func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -82038,6 +82309,7 @@ func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -82194,6 +82466,7 @@ func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -82372,6 +82645,7 @@ func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -82594,6 +82868,7 @@ func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -82808,6 +83083,7 @@ func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -82971,6 +83247,7 @@ func (c *RegionBackendServicesTestIamPermissionsCall) doRequest(alt string) (*ht } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -83151,6 +83428,7 @@ func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -83379,6 +83657,7 @@ func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Respo } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/commitments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -83568,6 +83847,7 @@ func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{commitment}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -83741,6 +84021,7 @@ func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -83963,6 +84244,7 @@ func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -84161,6 +84443,7 @@ func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes/{diskType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -84385,6 +84668,7 @@ func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -84596,6 +84880,7 @@ func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/addResourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -84777,6 +85062,7 @@ func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -84954,6 +85240,7 @@ func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -85119,6 +85406,7 @@ func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -85299,6 +85587,7 @@ func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -85526,6 +85815,7 @@ func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -85736,6 +86026,7 @@ func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/removeResourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -85917,6 +86208,7 @@ func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -86098,6 +86390,7 @@ func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -86261,6 +86554,7 @@ func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -86454,6 +86748,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -86627,6 +86922,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Res reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -86818,6 +87114,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) ( } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -86986,6 +87283,7 @@ func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -87164,6 +87462,7 @@ func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -87385,6 +87684,7 @@ func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Respo } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -87633,6 +87933,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt stri reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -87857,6 +88158,7 @@ func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -88050,6 +88352,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -88234,6 +88537,7 @@ func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Res reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -88420,6 +88724,7 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt st } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -88601,6 +88906,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt strin } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -88782,6 +89088,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (* } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -88943,6 +89250,7 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -89124,6 +89432,7 @@ func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -89291,6 +89600,7 @@ func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -89513,6 +89823,7 @@ func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -89768,6 +90079,7 @@ func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -89989,6 +90301,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -90150,6 +90463,7 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*htt } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -90302,6 +90616,7 @@ func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -90436,6 +90751,7 @@ func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -90661,6 +90977,7 @@ func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -90858,6 +91175,7 @@ func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -91072,6 +91390,7 @@ func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -91319,6 +91638,7 @@ func (c *ResourcePoliciesAggregatedListCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/resourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -91512,6 +91832,7 @@ func (c *ResourcePoliciesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resourcePolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -91678,6 +91999,7 @@ func (c *ResourcePoliciesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resourcePolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -91850,6 +92172,7 @@ func (c *ResourcePoliciesInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -92072,6 +92395,7 @@ func (c *ResourcePoliciesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -92263,6 +92587,7 @@ func (c *ResourcePoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Re } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -92487,6 +92812,7 @@ func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/routers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -92680,6 +93006,7 @@ func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -92847,6 +93174,7 @@ func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -93010,6 +93338,7 @@ func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, erro } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getRouterStatus") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -93183,6 +93512,7 @@ func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -93405,6 +93735,7 @@ func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -93616,6 +93947,7 @@ func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -93780,6 +94112,7 @@ func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/preview") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -93939,6 +94272,7 @@ func (c *RoutersTestIamPermissionsCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -94117,6 +94451,7 @@ func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -94290,6 +94625,7 @@ func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -94447,6 +94783,7 @@ func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -94610,6 +94947,7 @@ func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -94822,6 +95160,7 @@ func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -95002,6 +95341,7 @@ func (c *RoutesTestIamPermissionsCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -95149,6 +95489,7 @@ func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/addRule") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -95307,6 +95648,7 @@ func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -95463,6 +95805,7 @@ func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -95621,6 +95964,7 @@ func (c *SecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/getRule") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -95789,6 +96133,7 @@ func (c *SecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -96000,6 +96345,7 @@ func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -96248,6 +96594,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) doRequest(alt stri } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/listPreconfiguredExpressionSets") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -96428,6 +96775,7 @@ func (c *SecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -96586,6 +96934,7 @@ func (c *SecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/patchRule") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -96738,6 +97087,7 @@ func (c *SecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/removeRule") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -96888,6 +97238,7 @@ func (c *SecurityPoliciesSetLabelsCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -97035,6 +97386,7 @@ func (c *SecurityPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Re } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -97201,6 +97553,7 @@ func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -97358,6 +97711,7 @@ func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -97510,6 +97864,7 @@ func (c *SnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -97724,6 +98079,7 @@ func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -97904,6 +98260,7 @@ func (c *SnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -98051,6 +98408,7 @@ func (c *SnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -98198,6 +98556,7 @@ func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -98357,6 +98716,7 @@ func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -98513,6 +98873,7 @@ func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -98675,6 +99036,7 @@ func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -98886,6 +99248,7 @@ func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -99066,6 +99429,7 @@ func (c *SslCertificatesTestIamPermissionsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -99227,6 +99591,7 @@ func (c *SslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -99382,6 +99747,7 @@ func (c *SslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -99543,6 +99909,7 @@ func (c *SslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -99754,6 +100121,7 @@ func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -100002,6 +100370,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Resp } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/listAvailableFeatures") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -100182,6 +100551,7 @@ func (c *SslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -100333,6 +100703,7 @@ func (c *SslPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -100548,6 +100919,7 @@ func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -100741,6 +101113,7 @@ func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -100920,6 +101293,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -101090,6 +101464,7 @@ func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -101253,6 +101628,7 @@ func (c *SubnetworksGetIamPolicyCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -101426,6 +101802,7 @@ func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -101648,6 +102025,7 @@ func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -101904,6 +102282,7 @@ func (c *SubnetworksListUsableCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks/listUsable") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -102109,6 +102488,7 @@ func (c *SubnetworksPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -102272,6 +102652,7 @@ func (c *SubnetworksSetIamPolicyCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -102450,6 +102831,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -102613,6 +102995,7 @@ func (c *SubnetworksTestIamPermissionsCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -102782,6 +103165,7 @@ func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -102939,6 +103323,7 @@ func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -103102,6 +103487,7 @@ func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -103314,6 +103700,7 @@ func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -103513,6 +103900,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -103665,6 +104053,7 @@ func (c *TargetHttpProxiesTestIamPermissionsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -103824,6 +104213,7 @@ func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -103980,6 +104370,7 @@ func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -104142,6 +104533,7 @@ func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -104353,6 +104745,7 @@ func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, erro } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -104551,6 +104944,7 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -104720,6 +105114,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http. } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -104894,6 +105289,7 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -105063,6 +105459,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -105215,6 +105612,7 @@ func (c *TargetHttpsProxiesTestIamPermissionsCall) doRequest(alt string) (*http. } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -105431,6 +105829,7 @@ func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -105625,6 +106024,7 @@ func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -105793,6 +106193,7 @@ func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -105967,6 +106368,7 @@ func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -106190,6 +106592,7 @@ func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -106381,6 +106784,7 @@ func (c *TargetInstancesTestIamPermissionsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -106559,6 +106963,7 @@ func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -106741,6 +107146,7 @@ func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -106970,6 +107376,7 @@ func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -107164,6 +107571,7 @@ func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -107332,6 +107740,7 @@ func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -107489,6 +107898,7 @@ func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/getHealth") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -107666,6 +108076,7 @@ func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -107889,6 +108300,7 @@ func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -108099,6 +108511,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -108281,6 +108694,7 @@ func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -108470,6 +108884,7 @@ func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/setBackup") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -108639,6 +109054,7 @@ func (c *TargetPoolsTestIamPermissionsCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -108807,6 +109223,7 @@ func (c *TargetSslProxiesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -108963,6 +109380,7 @@ func (c *TargetSslProxiesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -109125,6 +109543,7 @@ func (c *TargetSslProxiesInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -109336,6 +109755,7 @@ func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -109534,6 +109954,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setBackendService") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -109704,6 +110125,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -109874,6 +110296,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Re } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -110047,6 +110470,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -110198,6 +110622,7 @@ func (c *TargetSslProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Re } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -110357,6 +110782,7 @@ func (c *TargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -110513,6 +110939,7 @@ func (c *TargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -110675,6 +111102,7 @@ func (c *TargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -110886,6 +111314,7 @@ func (c *TargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -111084,6 +111513,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -111254,6 +111684,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -111473,6 +111904,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Respo } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetVpnGateways") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -111666,6 +112098,7 @@ func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -111833,6 +112266,7 @@ func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -112006,6 +112440,7 @@ func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -112228,6 +112663,7 @@ func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -112438,6 +112874,7 @@ func (c *TargetVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -112601,6 +113038,7 @@ func (c *TargetVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -112770,6 +113208,7 @@ func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -112927,6 +113366,7 @@ func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -113090,6 +113530,7 @@ func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -113252,6 +113693,7 @@ func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/invalidateCache") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -113473,6 +113915,7 @@ func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -113674,6 +114117,7 @@ func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -113826,6 +114270,7 @@ func (c *UrlMapsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -113994,6 +114439,7 @@ func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -114148,6 +114594,7 @@ func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/validate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -114362,6 +114809,7 @@ func (c *VpnTunnelsAggregatedListCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/vpnTunnels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -114555,6 +115003,7 @@ func (c *VpnTunnelsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{vpnTunnel}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -114722,6 +115171,7 @@ func (c *VpnTunnelsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{vpnTunnel}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -114895,6 +115345,7 @@ func (c *VpnTunnelsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -115117,6 +115568,7 @@ func (c *VpnTunnelsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -115327,6 +115779,7 @@ func (c *VpnTunnelsSetLabelsCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -115490,6 +115943,7 @@ func (c *VpnTunnelsTestIamPermissionsCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -115642,6 +116096,7 @@ func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -115776,6 +116231,7 @@ func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -116001,6 +116457,7 @@ func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -116198,6 +116655,7 @@ func (c *ZonesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -116412,6 +116870,7 @@ func (c *ZonesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vertical-pod-autoscaler/vendor/google.golang.org/api/compute/v1/compute-gen.go index 677b30aae388..315b765569ca 100644 --- a/vertical-pod-autoscaler/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vertical-pod-autoscaler/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -28115,6 +28115,7 @@ func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/acceleratorTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -28303,6 +28304,7 @@ func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -28527,6 +28529,7 @@ func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -28784,6 +28787,7 @@ func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -28978,6 +28982,7 @@ func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -29145,6 +29150,7 @@ func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -29319,6 +29325,7 @@ func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -29542,6 +29549,7 @@ func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -29798,6 +29806,7 @@ func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -29991,6 +30000,7 @@ func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -30158,6 +30168,7 @@ func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -30331,6 +30342,7 @@ func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -30553,6 +30565,7 @@ func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -30769,6 +30782,7 @@ func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -30953,6 +30967,7 @@ func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -31130,6 +31145,7 @@ func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -31292,6 +31308,7 @@ func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -31454,6 +31471,7 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Resp reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -31616,6 +31634,7 @@ func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -31778,6 +31797,7 @@ func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -31989,6 +32009,7 @@ func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -32189,6 +32210,7 @@ func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -32360,6 +32382,7 @@ func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -32531,6 +32554,7 @@ func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/addSignedUrlKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -32750,6 +32774,7 @@ func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -32942,6 +32967,7 @@ func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -33104,6 +33130,7 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Res reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/deleteSignedUrlKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -33267,6 +33294,7 @@ func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -33413,6 +33441,7 @@ func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -33580,6 +33609,7 @@ func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -33792,6 +33822,7 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -33996,6 +34027,7 @@ func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -34167,6 +34199,7 @@ func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/setSecurityPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -34340,6 +34373,7 @@ func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -34560,6 +34594,7 @@ func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -34750,6 +34785,7 @@ func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -34975,6 +35011,7 @@ func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -35232,6 +35269,7 @@ func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -35439,6 +35477,7 @@ func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -35621,6 +35660,7 @@ func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -35788,6 +35828,7 @@ func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -35972,6 +36013,7 @@ func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -36200,6 +36242,7 @@ func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -36410,6 +36453,7 @@ func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -36592,6 +36636,7 @@ func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -36765,6 +36810,7 @@ func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -36921,6 +36967,7 @@ func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -37084,6 +37131,7 @@ func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -37296,6 +37344,7 @@ func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -37497,6 +37546,7 @@ func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -37671,6 +37721,7 @@ func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -37891,6 +37942,7 @@ func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -38085,6 +38137,7 @@ func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -38252,6 +38305,7 @@ func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -38426,6 +38480,7 @@ func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -38649,6 +38704,7 @@ func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -38860,6 +38916,7 @@ func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -39033,6 +39090,7 @@ func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -39190,6 +39248,7 @@ func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -39353,6 +39412,7 @@ func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -39564,6 +39624,7 @@ func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -39756,6 +39817,7 @@ func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -39913,6 +39975,7 @@ func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -40076,6 +40139,7 @@ func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -40288,6 +40352,7 @@ func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -40488,6 +40553,7 @@ func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -40708,6 +40774,7 @@ func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -40881,6 +40948,7 @@ func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -41005,6 +41073,7 @@ func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -41219,6 +41288,7 @@ func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -41410,6 +41480,7 @@ func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -41566,6 +41637,7 @@ func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -41728,6 +41800,7 @@ func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -41939,6 +42012,7 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -42139,6 +42213,7 @@ func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -42310,6 +42385,7 @@ func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -42474,6 +42550,7 @@ func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -42631,6 +42708,7 @@ func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -42794,6 +42872,7 @@ func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -43006,6 +43085,7 @@ func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -43207,6 +43287,7 @@ func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -43379,6 +43460,7 @@ func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -43542,6 +43624,7 @@ func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -43698,6 +43781,7 @@ func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -43860,6 +43944,7 @@ func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -44071,6 +44156,7 @@ func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -44271,6 +44357,7 @@ func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -44442,6 +44529,7 @@ func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -44606,6 +44694,7 @@ func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -44777,6 +44866,7 @@ func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -44937,6 +45027,7 @@ func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -45089,6 +45180,7 @@ func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -45259,6 +45351,7 @@ func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -45484,6 +45577,7 @@ func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -45664,6 +45758,7 @@ func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -45847,6 +45942,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -46074,6 +46170,7 @@ func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.R } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -46271,6 +46368,7 @@ func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -46462,6 +46560,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http. } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -46631,6 +46730,7 @@ func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -46810,6 +46910,7 @@ func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -47031,6 +47132,7 @@ func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -47281,6 +47383,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (* reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -47492,6 +47595,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*htt } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -47675,6 +47779,7 @@ func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -47861,6 +47966,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*h } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -48046,6 +48152,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -48227,6 +48334,7 @@ func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -48454,6 +48562,7 @@ func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -48650,6 +48759,7 @@ func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -48815,6 +48925,7 @@ func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -48986,6 +49097,7 @@ func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -49207,6 +49319,7 @@ func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -49459,6 +49572,7 @@ func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -49683,6 +49797,7 @@ func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -49862,6 +49977,7 @@ func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -50035,6 +50151,7 @@ func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -50192,6 +50309,7 @@ func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -50358,6 +50476,7 @@ func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -50570,6 +50689,7 @@ func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -50773,6 +50893,7 @@ func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -51010,6 +51131,7 @@ func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -51222,6 +51344,7 @@ func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -51403,6 +51526,7 @@ func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -51578,6 +51702,7 @@ func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -51765,6 +51890,7 @@ func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -51940,6 +52066,7 @@ func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -52122,6 +52249,7 @@ func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -52327,6 +52455,7 @@ func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -52555,6 +52684,7 @@ func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -52816,6 +52946,7 @@ func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, erro } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -53029,6 +53160,7 @@ func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -53207,6 +53339,7 @@ func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Respon reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setDeletionProtection") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -53388,6 +53521,7 @@ func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -53582,6 +53716,7 @@ func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -53764,6 +53899,7 @@ func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -53946,6 +54082,7 @@ func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -54129,6 +54266,7 @@ func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -54313,6 +54451,7 @@ func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -54495,6 +54634,7 @@ func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -54678,6 +54818,7 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -54861,6 +55002,7 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -55017,6 +55159,7 @@ func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Res reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -55185,6 +55328,7 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -55365,6 +55509,7 @@ func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -55545,6 +55690,7 @@ func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -55727,6 +55873,7 @@ func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateAccessConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -55917,6 +56064,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -56153,6 +56301,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -56347,6 +56496,7 @@ func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Respons reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -56513,6 +56663,7 @@ func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -56686,6 +56837,7 @@ func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -56908,6 +57060,7 @@ func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -57119,6 +57272,7 @@ func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -57288,6 +57442,7 @@ func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -57501,6 +57656,7 @@ func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -57692,6 +57848,7 @@ func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -57848,6 +58005,7 @@ func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -58010,6 +58168,7 @@ func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -58221,6 +58380,7 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -58421,6 +58581,7 @@ func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -58580,6 +58741,7 @@ func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{licenseCode}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -58725,6 +58887,7 @@ func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -58884,6 +59047,7 @@ func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -59040,6 +59204,7 @@ func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -59201,6 +59366,7 @@ func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -59419,6 +59585,7 @@ func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -59599,6 +59766,7 @@ func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -59815,6 +59983,7 @@ func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -60005,6 +60174,7 @@ func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -60230,6 +60400,7 @@ func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -60437,6 +60608,7 @@ func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/addPeering") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -60601,6 +60773,7 @@ func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -60758,6 +60931,7 @@ func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -60921,6 +61095,7 @@ func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -61133,6 +61308,7 @@ func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -61333,6 +61509,7 @@ func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -61503,6 +61680,7 @@ func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/removePeering") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -61667,6 +61845,7 @@ func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/switchToCustomMode") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -61836,6 +62015,7 @@ func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -62065,6 +62245,7 @@ func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -62258,6 +62439,7 @@ func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -62436,6 +62618,7 @@ func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -62607,6 +62790,7 @@ func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -62781,6 +62965,7 @@ func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -63012,6 +63197,7 @@ func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -63258,6 +63444,7 @@ func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -63476,6 +63663,7 @@ func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -63704,6 +63892,7 @@ func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -63897,6 +64086,7 @@ func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -64064,6 +64254,7 @@ func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -64237,6 +64428,7 @@ func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -64459,6 +64651,7 @@ func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -64715,6 +64908,7 @@ func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -64904,6 +65098,7 @@ func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes/{nodeType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -65128,6 +65323,7 @@ func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -65326,6 +65522,7 @@ func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnHost") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -65483,6 +65680,7 @@ func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnResource") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -65635,6 +65833,7 @@ func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnHost") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -65793,6 +65992,7 @@ func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnResource") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -65941,6 +66141,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -66082,6 +66283,7 @@ func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnHost") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -66285,6 +66487,7 @@ func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnResources") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -66525,6 +66728,7 @@ func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/listXpnHosts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -66723,6 +66927,7 @@ func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -66883,6 +67088,7 @@ func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -67044,6 +67250,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -67206,6 +67413,7 @@ func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setDefaultNetworkTier") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -67369,6 +67577,7 @@ func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -67528,6 +67737,7 @@ func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -67694,6 +67904,7 @@ func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -67867,6 +68078,7 @@ func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -68089,6 +68301,7 @@ func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -68305,6 +68518,7 @@ func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -68489,6 +68703,7 @@ func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -68660,6 +68875,7 @@ func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -68826,6 +69042,7 @@ func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -68982,6 +69199,7 @@ func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -69160,6 +69378,7 @@ func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -69382,6 +69601,7 @@ func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -69596,6 +69816,7 @@ func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -69780,6 +70001,7 @@ func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -70008,6 +70230,7 @@ func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Respo } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/commitments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -70197,6 +70420,7 @@ func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{commitment}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -70370,6 +70594,7 @@ func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -70592,6 +70817,7 @@ func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -70790,6 +71016,7 @@ func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes/{diskType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -71014,6 +71241,7 @@ func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -71223,6 +71451,7 @@ func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -71400,6 +71629,7 @@ func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -71565,6 +71795,7 @@ func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -71745,6 +71976,7 @@ func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -71972,6 +72204,7 @@ func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -72181,6 +72414,7 @@ func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -72362,6 +72596,7 @@ func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -72525,6 +72760,7 @@ func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -72718,6 +72954,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -72891,6 +73128,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Res reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -73082,6 +73320,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) ( } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -73250,6 +73489,7 @@ func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -73428,6 +73668,7 @@ func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -73649,6 +73890,7 @@ func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Respo } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -73897,6 +74139,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt stri reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -74108,6 +74351,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -74292,6 +74536,7 @@ func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Res reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -74479,6 +74724,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt strin } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -74660,6 +74906,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (* } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -74827,6 +75074,7 @@ func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -75049,6 +75297,7 @@ func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -75304,6 +75553,7 @@ func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -75525,6 +75775,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -75679,6 +75930,7 @@ func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -75813,6 +76065,7 @@ func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -76038,6 +76291,7 @@ func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -76235,6 +76489,7 @@ func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -76449,6 +76704,7 @@ func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -76696,6 +76952,7 @@ func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/routers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -76889,6 +77146,7 @@ func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -77056,6 +77314,7 @@ func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -77219,6 +77478,7 @@ func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, erro } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getRouterStatus") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -77392,6 +77652,7 @@ func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -77614,6 +77875,7 @@ func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -77825,6 +78087,7 @@ func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -77989,6 +78252,7 @@ func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/preview") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -78167,6 +78431,7 @@ func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -78340,6 +78605,7 @@ func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -78497,6 +78763,7 @@ func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -78660,6 +78927,7 @@ func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -78872,6 +79140,7 @@ func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -79051,6 +79320,7 @@ func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/addRule") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -79209,6 +79479,7 @@ func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -79365,6 +79636,7 @@ func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -79523,6 +79795,7 @@ func (c *SecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/getRule") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -79691,6 +79964,7 @@ func (c *SecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -79902,6 +80176,7 @@ func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -80101,6 +80376,7 @@ func (c *SecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -80259,6 +80535,7 @@ func (c *SecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/patchRule") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -80411,6 +80688,7 @@ func (c *SecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/removeRule") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -80579,6 +80857,7 @@ func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -80736,6 +81015,7 @@ func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -80950,6 +81230,7 @@ func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -81130,6 +81411,7 @@ func (c *SnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -81288,6 +81570,7 @@ func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -81444,6 +81727,7 @@ func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -81606,6 +81890,7 @@ func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -81817,6 +82102,7 @@ func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -82010,6 +82296,7 @@ func (c *SslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -82165,6 +82452,7 @@ func (c *SslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -82326,6 +82614,7 @@ func (c *SslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -82537,6 +82826,7 @@ func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -82785,6 +83075,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Resp } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/listAvailableFeatures") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -82965,6 +83256,7 @@ func (c *SslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -83183,6 +83475,7 @@ func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -83376,6 +83669,7 @@ func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -83555,6 +83849,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -83725,6 +84020,7 @@ func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -83898,6 +84194,7 @@ func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -84120,6 +84417,7 @@ func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -84376,6 +84674,7 @@ func (c *SubnetworksListUsableCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks/listUsable") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -84581,6 +84880,7 @@ func (c *SubnetworksPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -84764,6 +85064,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -84937,6 +85238,7 @@ func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -85094,6 +85396,7 @@ func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -85257,6 +85560,7 @@ func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -85469,6 +85773,7 @@ func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -85668,6 +85973,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -85831,6 +86137,7 @@ func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, er reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -85987,6 +86294,7 @@ func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -86149,6 +86457,7 @@ func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -86360,6 +86669,7 @@ func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, erro } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -86558,6 +86868,7 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -86727,6 +87038,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http. } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -86901,6 +87213,7 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -87070,6 +87383,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -87290,6 +87604,7 @@ func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -87484,6 +87799,7 @@ func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -87652,6 +87968,7 @@ func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -87826,6 +88143,7 @@ func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -88049,6 +88367,7 @@ func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -88259,6 +88578,7 @@ func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -88441,6 +88761,7 @@ func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -88670,6 +88991,7 @@ func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -88864,6 +89186,7 @@ func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -89032,6 +89355,7 @@ func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -89189,6 +89513,7 @@ func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/getHealth") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -89366,6 +89691,7 @@ func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -89589,6 +89915,7 @@ func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -89799,6 +90126,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -89981,6 +90309,7 @@ func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -90170,6 +90499,7 @@ func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/setBackup") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -90348,6 +90678,7 @@ func (c *TargetSslProxiesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -90504,6 +90835,7 @@ func (c *TargetSslProxiesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -90666,6 +90998,7 @@ func (c *TargetSslProxiesInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -90877,6 +91210,7 @@ func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -91075,6 +91409,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setBackendService") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -91245,6 +91580,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -91415,6 +91751,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Re } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -91588,6 +91925,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -91750,6 +92088,7 @@ func (c *TargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -91906,6 +92245,7 @@ func (c *TargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -92068,6 +92408,7 @@ func (c *TargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -92279,6 +92620,7 @@ func (c *TargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -92477,6 +92819,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -92647,6 +92990,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -92866,6 +93210,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Respo } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetVpnGateways") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -93059,6 +93404,7 @@ func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -93226,6 +93572,7 @@ func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -93399,6 +93746,7 @@ func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -93621,6 +93969,7 @@ func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -93822,6 +94171,7 @@ func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -93979,6 +94329,7 @@ func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -94142,6 +94493,7 @@ func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -94304,6 +94656,7 @@ func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/invalidateCache") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -94525,6 +94878,7 @@ func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -94726,6 +95080,7 @@ func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) @@ -94898,6 +95253,7 @@ func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -95052,6 +95408,7 @@ func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/validate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -95266,6 +95623,7 @@ func (c *VpnTunnelsAggregatedListCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/vpnTunnels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -95459,6 +95817,7 @@ func (c *VpnTunnelsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{vpnTunnel}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -95626,6 +95985,7 @@ func (c *VpnTunnelsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{vpnTunnel}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -95799,6 +96159,7 @@ func (c *VpnTunnelsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -96021,6 +96382,7 @@ func (c *VpnTunnelsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -96205,6 +96567,7 @@ func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -96339,6 +96702,7 @@ func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -96564,6 +96928,7 @@ func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -96761,6 +97126,7 @@ func (c *ZonesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -96975,6 +97341,7 @@ func (c *ZonesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/api/container/v1/container-gen.go b/vertical-pod-autoscaler/vendor/google.golang.org/api/container/v1/container-gen.go index 38fe8e190cf6..d8ca50f9fbe7 100644 --- a/vertical-pod-autoscaler/vendor/google.golang.org/api/container/v1/container-gen.go +++ b/vertical-pod-autoscaler/vendor/google.golang.org/api/container/v1/container-gen.go @@ -3349,6 +3349,7 @@ func (c *ProjectsLocationsGetServerConfigCall) doRequest(alt string) (*http.Resp } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/serverConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -3491,6 +3492,7 @@ func (c *ProjectsLocationsClustersCompleteIpRotationCall) doRequest(alt string) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:completeIpRotation") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -3642,6 +3644,7 @@ func (c *ProjectsLocationsClustersCreateCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/clusters") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -3811,6 +3814,7 @@ func (c *ProjectsLocationsClustersDeleteCall) doRequest(alt string) (*http.Respo reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -3995,6 +3999,7 @@ func (c *ProjectsLocationsClustersGetCall) doRequest(alt string) (*http.Response } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -4172,6 +4177,7 @@ func (c *ProjectsLocationsClustersListCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/clusters") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -4314,6 +4320,7 @@ func (c *ProjectsLocationsClustersSetAddonsCall) doRequest(alt string) (*http.Re } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setAddons") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -4450,6 +4457,7 @@ func (c *ProjectsLocationsClustersSetLegacyAbacCall) doRequest(alt string) (*htt } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setLegacyAbac") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -4585,6 +4593,7 @@ func (c *ProjectsLocationsClustersSetLocationsCall) doRequest(alt string) (*http } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setLocations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -4720,6 +4729,7 @@ func (c *ProjectsLocationsClustersSetLoggingCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setLogging") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -4855,6 +4865,7 @@ func (c *ProjectsLocationsClustersSetMaintenancePolicyCall) doRequest(alt string } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setMaintenancePolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -4994,6 +5005,7 @@ func (c *ProjectsLocationsClustersSetMasterAuthCall) doRequest(alt string) (*htt } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setMasterAuth") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -5129,6 +5141,7 @@ func (c *ProjectsLocationsClustersSetMonitoringCall) doRequest(alt string) (*htt } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setMonitoring") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -5264,6 +5277,7 @@ func (c *ProjectsLocationsClustersSetNetworkPolicyCall) doRequest(alt string) (* } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setNetworkPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -5399,6 +5413,7 @@ func (c *ProjectsLocationsClustersSetResourceLabelsCall) doRequest(alt string) ( } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setResourceLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -5534,6 +5549,7 @@ func (c *ProjectsLocationsClustersStartIpRotationCall) doRequest(alt string) (*h } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:startIpRotation") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -5669,6 +5685,7 @@ func (c *ProjectsLocationsClustersUpdateCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -5804,6 +5821,7 @@ func (c *ProjectsLocationsClustersUpdateMasterCall) doRequest(alt string) (*http } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:updateMaster") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -5939,6 +5957,7 @@ func (c *ProjectsLocationsClustersNodePoolsCreateCall) doRequest(alt string) (*h } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/nodePools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -6105,6 +6124,7 @@ func (c *ProjectsLocationsClustersNodePoolsDeleteCall) doRequest(alt string) (*h reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -6302,6 +6322,7 @@ func (c *ProjectsLocationsClustersNodePoolsGetCall) doRequest(alt string) (*http } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -6491,6 +6512,7 @@ func (c *ProjectsLocationsClustersNodePoolsListCall) doRequest(alt string) (*htt } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/nodePools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -6640,6 +6662,7 @@ func (c *ProjectsLocationsClustersNodePoolsRollbackCall) doRequest(alt string) ( } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:rollback") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -6776,6 +6799,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetAutoscalingCall) doRequest(alt str } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setAutoscaling") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -6911,6 +6935,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetManagementCall) doRequest(alt stri } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setManagement") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -7046,6 +7071,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetSizeCall) doRequest(alt string) (* } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setSize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -7182,6 +7208,7 @@ func (c *ProjectsLocationsClustersNodePoolsUpdateCall) doRequest(alt string) (*h } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -7317,6 +7344,7 @@ func (c *ProjectsLocationsOperationsCancelCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -7489,6 +7517,7 @@ func (c *ProjectsLocationsOperationsGetCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -7666,6 +7695,7 @@ func (c *ProjectsLocationsOperationsListCall) doRequest(alt string) (*http.Respo } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -7826,6 +7856,7 @@ func (c *ProjectsZonesGetServerconfigCall) doRequest(alt string) (*http.Response } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/serverconfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -7974,6 +8005,7 @@ func (c *ProjectsZonesClustersAddonsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/addons") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -8128,6 +8160,7 @@ func (c *ProjectsZonesClustersCompleteIpRotationCall) doRequest(alt string) (*ht } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:completeIpRotation") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -8296,6 +8329,7 @@ func (c *ProjectsZonesClustersCreateCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -8454,6 +8488,7 @@ func (c *ProjectsZonesClustersDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -8625,6 +8660,7 @@ func (c *ProjectsZonesClustersGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -8782,6 +8818,7 @@ func (c *ProjectsZonesClustersLegacyAbacCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/legacyAbac") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -8952,6 +8989,7 @@ func (c *ProjectsZonesClustersListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -9100,6 +9138,7 @@ func (c *ProjectsZonesClustersLocationsCall) doRequest(alt string) (*http.Respon } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -9254,6 +9293,7 @@ func (c *ProjectsZonesClustersLoggingCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/logging") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -9408,6 +9448,7 @@ func (c *ProjectsZonesClustersMasterCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/master") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -9562,6 +9603,7 @@ func (c *ProjectsZonesClustersMonitoringCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/monitoring") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -9716,6 +9758,7 @@ func (c *ProjectsZonesClustersResourceLabelsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/resourceLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -9870,6 +9913,7 @@ func (c *ProjectsZonesClustersSetMaintenancePolicyCall) doRequest(alt string) (* } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMaintenancePolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -10028,6 +10072,7 @@ func (c *ProjectsZonesClustersSetMasterAuthCall) doRequest(alt string) (*http.Re } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -10182,6 +10227,7 @@ func (c *ProjectsZonesClustersSetNetworkPolicyCall) doRequest(alt string) (*http } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setNetworkPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -10336,6 +10382,7 @@ func (c *ProjectsZonesClustersStartIpRotationCall) doRequest(alt string) (*http. } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:startIpRotation") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -10490,6 +10537,7 @@ func (c *ProjectsZonesClustersUpdateCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) @@ -10646,6 +10694,7 @@ func (c *ProjectsZonesClustersNodePoolsAutoscalingCall) doRequest(alt string) (* } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/autoscaling") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -10808,6 +10857,7 @@ func (c *ProjectsZonesClustersNodePoolsCreateCall) doRequest(alt string) (*http. } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -10967,6 +11017,7 @@ func (c *ProjectsZonesClustersNodePoolsDeleteCall) doRequest(alt string) (*http. reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -11150,6 +11201,7 @@ func (c *ProjectsZonesClustersNodePoolsGetCall) doRequest(alt string) (*http.Res } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -11329,6 +11381,7 @@ func (c *ProjectsZonesClustersNodePoolsListCall) doRequest(alt string) (*http.Re } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -11489,6 +11542,7 @@ func (c *ProjectsZonesClustersNodePoolsRollbackCall) doRequest(alt string) (*htt } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -11653,6 +11707,7 @@ func (c *ProjectsZonesClustersNodePoolsSetManagementCall) doRequest(alt string) } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setManagement") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -11817,6 +11872,7 @@ func (c *ProjectsZonesClustersNodePoolsSetSizeCall) doRequest(alt string) (*http } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setSize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -11982,6 +12038,7 @@ func (c *ProjectsZonesClustersNodePoolsUpdateCall) doRequest(alt string) (*http. } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/update") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -12144,6 +12201,7 @@ func (c *ProjectsZonesOperationsCancelCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/operations/{operationId}:cancel") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -12313,6 +12371,7 @@ func (c *ProjectsZonesOperationsGetCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/operations/{operationId}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -12484,6 +12543,7 @@ func (c *ProjectsZonesOperationsListCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/api/tpu/v1/tpu-api.json b/vertical-pod-autoscaler/vendor/google.golang.org/api/tpu/v1/tpu-api.json index 565144ce4384..1f96a4ad2af8 100644 --- a/vertical-pod-autoscaler/vendor/google.golang.org/api/tpu/v1/tpu-api.json +++ b/vertical-pod-autoscaler/vendor/google.golang.org/api/tpu/v1/tpu-api.json @@ -658,7 +658,7 @@ } } }, - "revision": "20180807", + "revision": "20180902", "rootUrl": "https://tpu.googleapis.com/", "schemas": { "AcceleratorType": { @@ -913,7 +913,8 @@ "STOPPED", "STOPPING", "STARTING", - "PREEMPTED" + "PREEMPTED", + "TERMINATED" ], "enumDescriptions": [ "TPU node state is not known/set.", @@ -926,7 +927,8 @@ "7 - Reserved. Was SUSPENDED.\nTPU node is stopped.", "TPU node is currently stopping.", "TPU node is currently starting.", - "TPU node has been preempted. Only applies to Preemptible TPU Nodes." + "TPU node has been preempted. Only applies to Preemptible TPU Nodes.", + "TPU node has been terminated due to maintenance or has reached the end of\nits life cycle (for preemptible nodes)." ], "type": "string" }, diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/api/tpu/v1/tpu-gen.go b/vertical-pod-autoscaler/vendor/google.golang.org/api/tpu/v1/tpu-gen.go index 53c034d5a55d..964942b33b9b 100644 --- a/vertical-pod-autoscaler/vendor/google.golang.org/api/tpu/v1/tpu-gen.go +++ b/vertical-pod-autoscaler/vendor/google.golang.org/api/tpu/v1/tpu-gen.go @@ -577,6 +577,9 @@ type Node struct { // "STARTING" - TPU node is currently starting. // "PREEMPTED" - TPU node has been preempted. Only applies to // Preemptible TPU Nodes. + // "TERMINATED" - TPU node has been terminated due to maintenance or + // has reached the end of + // its life cycle (for preemptible nodes). State string `json:"state,omitempty"` // TensorflowVersion: The version of Tensorflow running in the @@ -1024,6 +1027,7 @@ func (c *ProjectsLocationsGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -1185,6 +1189,7 @@ func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/locations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -1361,6 +1366,7 @@ func (c *ProjectsLocationsAcceleratorTypesGetCall) doRequest(alt string) (*http. } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -1526,6 +1532,7 @@ func (c *ProjectsLocationsAcceleratorTypesListCall) doRequest(alt string) (*http } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/acceleratorTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -1707,6 +1714,7 @@ func (c *ProjectsLocationsNodesCreateCall) doRequest(alt string) (*http.Response } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/nodes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -1840,6 +1848,7 @@ func (c *ProjectsLocationsNodesDeleteCall) doRequest(alt string) (*http.Response reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -1979,6 +1988,7 @@ func (c *ProjectsLocationsNodesGetCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -2132,6 +2142,7 @@ func (c *ProjectsLocationsNodesListCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/nodes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -2296,6 +2307,7 @@ func (c *ProjectsLocationsNodesReimageCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:reimage") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -2431,6 +2443,7 @@ func (c *ProjectsLocationsNodesStartCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:start") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -2566,6 +2579,7 @@ func (c *ProjectsLocationsNodesStopCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:stop") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -2711,6 +2725,7 @@ func (c *ProjectsLocationsOperationsCancelCall) doRequest(alt string) (*http.Res reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) @@ -2842,6 +2857,7 @@ func (c *ProjectsLocationsOperationsDeleteCall) doRequest(alt string) (*http.Res reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) @@ -2985,6 +3001,7 @@ func (c *ProjectsLocationsOperationsGetCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -3161,6 +3178,7 @@ func (c *ProjectsLocationsOperationsListCall) doRequest(alt string) (*http.Respo } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -3337,6 +3355,7 @@ func (c *ProjectsLocationsTensorflowVersionsGetCall) doRequest(alt string) (*htt } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) @@ -3502,6 +3521,7 @@ func (c *ProjectsLocationsTensorflowVersionsListCall) doRequest(alt string) (*ht } var body io.Reader = nil c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/tensorflowVersions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/balancer.go b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/balancer.go index e1730166cde6..5aeb646d1740 100644 --- a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/balancer.go +++ b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/balancer.go @@ -19,7 +19,6 @@ package grpc import ( - "fmt" "net" "sync" @@ -118,26 +117,6 @@ type Balancer interface { Close() error } -// downErr implements net.Error. It is constructed by gRPC internals and passed to the down -// call of Balancer. -type downErr struct { - timeout bool - temporary bool - desc string -} - -func (e downErr) Error() string { return e.desc } -func (e downErr) Timeout() bool { return e.timeout } -func (e downErr) Temporary() bool { return e.temporary } - -func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr { - return downErr{ - timeout: timeout, - temporary: temporary, - desc: fmt.Sprintf(format, a...), - } -} - // RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch // the name resolution updates and updates the addresses available correspondingly. // @@ -410,7 +389,3 @@ func (rr *roundRobin) Close() error { type pickFirst struct { *roundRobin } - -func pickFirstBalancerV1(r naming.Resolver) Balancer { - return &pickFirst{&roundRobin{r: r}} -} diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/clientconn.go b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/clientconn.go index bb6aeb8c6cca..318ac4073152 100644 --- a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/clientconn.go +++ b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/clientconn.go @@ -65,8 +65,6 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") - // errConnUnavailable indicates that the connection is unavailable. - errConnUnavailable = errors.New("grpc: the connection is unavailable") // errBalancerClosed indicates that the balancer is closed. errBalancerClosed = errors.New("grpc: balancer is closed") // We use an accessor so that minConnectTimeout can be @@ -89,8 +87,6 @@ var ( // errCredentialsConflict indicates that grpc.WithTransportCredentials() // and grpc.WithInsecure() are both called for a connection. errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") - // errNetworkIO indicates that the connection is down due to some network I/O error. - errNetworkIO = errors.New("grpc: failed with network I/O error") ) const ( @@ -856,14 +852,6 @@ func (ac *addrConn) printf(format string, a ...interface{}) { } } -// errorf records an error in ac's event log, unless ac has been closed. -// REQUIRES ac.mu is held. -func (ac *addrConn) errorf(format string, a ...interface{}) { - if ac.events != nil { - ac.events.Errorf(format, a...) - } -} - // resetTransport recreates a transport to the address for ac. The old // transport will close itself on error or when the clientconn is closed. // The created transport must receive initial settings frame from the server. diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index ce135c4d162c..204ba1588bbf 100644 --- a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -104,7 +104,6 @@ type headerFrame struct { type cleanupStream struct { streamID uint32 - idPtr *uint32 rst bool rstCode http2.ErrCode onWrite func() @@ -138,9 +137,6 @@ type outgoingSettings struct { ss []http2.Setting } -type settingsAck struct { -} - type incomingGoAway struct { } diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 9cb84811eb8f..efb7f53ffc32 100644 --- a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -284,7 +284,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err } // operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) { +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { streamID := frame.Header().StreamID state := decodeState{serverSide: true} if err := state.decodeHeader(frame); err != nil { @@ -296,7 +296,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) } - return + return false } buf := newRecvBuffer() @@ -350,13 +350,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeRefusedStream, onWrite: func() {}, }) - return + return false } } t.mu.Lock() if t.state != reachable { t.mu.Unlock() - return + return false } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -366,7 +366,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeRefusedStream, onWrite: func() {}, }) - return + return false } if streamID%2 != 1 || streamID <= t.maxStreamID { t.mu.Unlock() @@ -417,7 +417,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return + return false } // HandleStreams receives incoming streams using the given handler. This is diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/log.go b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/log.go index ac8e358c5c8c..879df80c4de7 100644 --- a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/log.go +++ b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/log.go @@ -42,9 +42,3 @@ func errorf(format string, args ...interface{}) { grpclog.Errorf(format, args...) } } - -func fatalf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Fatalf(format, args...) - } -} diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/transport.go b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/transport.go index 644cf11cf53c..fdf8ad684c52 100644 --- a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -176,7 +176,6 @@ type Stream struct { buf *recvBuffer trReader io.Reader fc *inFlow - recvQuota uint32 wq *writeQuota // Callback to state application's intentions to read data. This diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/rpc_util.go b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/rpc_util.go index 207f0ee643fe..fa0568302d6c 100644 --- a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/rpc_util.go +++ b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/rpc_util.go @@ -156,13 +156,11 @@ type callInfo struct { compressorType string failFast bool stream *clientStream - traceInfo traceInfo // in trace.go maxReceiveMessageSize *int maxSendMessageSize *int creds credentials.PerRPCCredentials contentSubtype string codec baseCodec - disableRetry bool maxRetryRPCBufferSize int } diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/tools.go b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/tools.go deleted file mode 100644 index d7ee73bfd6c4..000000000000 --- a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/tools.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build tools - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// This file exists to cause `go mod` and `go get` to believe these packages -// are dependencies, even though they are not runtime dependencies. This means -// they will appear in our `go.mod` file, but will not be a part of the build -// unless the "tools" build tag is specified. - -package grpc - -import ( - _ "github.com/client9/misspell/cmd/misspell" - _ "github.com/golang/lint/golint" - _ "github.com/golang/protobuf/protoc-gen-go" - _ "golang.org/x/tools/cmd/goimports" - _ "honnef.co/go/tools/cmd/staticcheck" -) diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/version.go b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/version.go index f384fc0ccd76..a13beb044607 100644 --- a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/version.go +++ b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.15.0-dev" +const Version = "1.16.0-dev" diff --git a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/vet.sh b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/vet.sh index a7d1d6adbd6e..f42c034f7abd 100755 --- a/vertical-pod-autoscaler/vendor/google.golang.org/grpc/vet.sh +++ b/vertical-pod-autoscaler/vendor/google.golang.org/grpc/vet.sh @@ -70,8 +70,8 @@ elif [[ "$#" -ne 0 ]]; then fi git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read) -git ls-files "*.go" | xargs grep -l '"unsafe"' 2>&1 | (! grep -v '_test.go') | tee /dev/stderr | (! read) git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand') | tee /dev/stderr | (! read) +git ls-files | xargs dirname | sort | uniq | xargs go run go_vet/vet.go | tee /dev/stderr | (! read) gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read) goimports -l . 2>&1 | tee /dev/stderr | (! read) golint ./... 2>&1 | (grep -vE "(_mock|\.pb)\.go:" || true) | tee /dev/stderr | (! read) diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/LICENSE b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/LICENSE deleted file mode 100644 index dfd031454603..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2016 Dominik Honnef - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/callgraph/callgraph.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/callgraph/callgraph.go deleted file mode 100644 index d126d8282224..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/callgraph/callgraph.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* - -Package callgraph defines the call graph and various algorithms -and utilities to operate on it. - -A call graph is a labelled directed graph whose nodes represent -functions and whose edge labels represent syntactic function call -sites. The presence of a labelled edge (caller, site, callee) -indicates that caller may call callee at the specified call site. - -A call graph is a multigraph: it may contain multiple edges (caller, -*, callee) connecting the same pair of nodes, so long as the edges -differ by label; this occurs when one function calls another function -from multiple call sites. Also, it may contain multiple edges -(caller, site, *) that differ only by callee; this indicates a -polymorphic call. - -A SOUND call graph is one that overapproximates the dynamic calling -behaviors of the program in all possible executions. One call graph -is more PRECISE than another if it is a smaller overapproximation of -the dynamic behavior. - -All call graphs have a synthetic root node which is responsible for -calling main() and init(). - -Calls to built-in functions (e.g. panic, println) are not represented -in the call graph; they are treated like built-in operators of the -language. - -*/ -package callgraph - -// TODO(adonovan): add a function to eliminate wrappers from the -// callgraph, preserving topology. -// More generally, we could eliminate "uninteresting" nodes such as -// nodes from packages we don't care about. - -import ( - "fmt" - "go/token" - - "honnef.co/go/tools/ssa" -) - -// A Graph represents a call graph. -// -// A graph may contain nodes that are not reachable from the root. -// If the call graph is sound, such nodes indicate unreachable -// functions. -// -type Graph struct { - Root *Node // the distinguished root node - Nodes map[*ssa.Function]*Node // all nodes by function -} - -// New returns a new Graph with the specified root node. -func New(root *ssa.Function) *Graph { - g := &Graph{Nodes: make(map[*ssa.Function]*Node)} - g.Root = g.CreateNode(root) - return g -} - -// CreateNode returns the Node for fn, creating it if not present. -func (g *Graph) CreateNode(fn *ssa.Function) *Node { - n, ok := g.Nodes[fn] - if !ok { - n = &Node{Func: fn, ID: len(g.Nodes)} - g.Nodes[fn] = n - } - return n -} - -// A Node represents a node in a call graph. -type Node struct { - Func *ssa.Function // the function this node represents - ID int // 0-based sequence number - In []*Edge // unordered set of incoming call edges (n.In[*].Callee == n) - Out []*Edge // unordered set of outgoing call edges (n.Out[*].Caller == n) -} - -func (n *Node) String() string { - return fmt.Sprintf("n%d:%s", n.ID, n.Func) -} - -// A Edge represents an edge in the call graph. -// -// Site is nil for edges originating in synthetic or intrinsic -// functions, e.g. reflect.Call or the root of the call graph. -type Edge struct { - Caller *Node - Site ssa.CallInstruction - Callee *Node -} - -func (e Edge) String() string { - return fmt.Sprintf("%s --> %s", e.Caller, e.Callee) -} - -func (e Edge) Description() string { - var prefix string - switch e.Site.(type) { - case nil: - return "synthetic call" - case *ssa.Go: - prefix = "concurrent " - case *ssa.Defer: - prefix = "deferred " - } - return prefix + e.Site.Common().Description() -} - -func (e Edge) Pos() token.Pos { - if e.Site == nil { - return token.NoPos - } - return e.Site.Pos() -} - -// AddEdge adds the edge (caller, site, callee) to the call graph. -// Elimination of duplicate edges is the caller's responsibility. -func AddEdge(caller *Node, site ssa.CallInstruction, callee *Node) { - e := &Edge{caller, site, callee} - callee.In = append(callee.In, e) - caller.Out = append(caller.Out, e) -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/callgraph/static/static.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/callgraph/static/static.go deleted file mode 100644 index 3f723a96ded9..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/callgraph/static/static.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package static computes the call graph of a Go program containing -// only static call edges. -package static - -import ( - "honnef.co/go/tools/callgraph" - "honnef.co/go/tools/ssa" - "honnef.co/go/tools/ssa/ssautil" -) - -// CallGraph computes the call graph of the specified program -// considering only static calls. -// -func CallGraph(prog *ssa.Program) *callgraph.Graph { - cg := callgraph.New(nil) // TODO(adonovan) eliminate concept of rooted callgraph - - // TODO(adonovan): opt: use only a single pass over the ssa.Program. - // TODO(adonovan): opt: this is slower than RTA (perhaps because - // the lower precision means so many edges are allocated)! - for f := range ssautil.AllFunctions(prog) { - fnode := cg.CreateNode(f) - for _, b := range f.Blocks { - for _, instr := range b.Instrs { - if site, ok := instr.(ssa.CallInstruction); ok { - if g := site.Common().StaticCallee(); g != nil { - gnode := cg.CreateNode(g) - callgraph.AddEdge(fnode, site, gnode) - } - } - } - } - } - - return cg -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/callgraph/util.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/callgraph/util.go deleted file mode 100644 index 7aeda964159d..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/callgraph/util.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package callgraph - -import "honnef.co/go/tools/ssa" - -// This file provides various utilities over call graphs, such as -// visitation and path search. - -// CalleesOf returns a new set containing all direct callees of the -// caller node. -// -func CalleesOf(caller *Node) map[*Node]bool { - callees := make(map[*Node]bool) - for _, e := range caller.Out { - callees[e.Callee] = true - } - return callees -} - -// GraphVisitEdges visits all the edges in graph g in depth-first order. -// The edge function is called for each edge in postorder. If it -// returns non-nil, visitation stops and GraphVisitEdges returns that -// value. -// -func GraphVisitEdges(g *Graph, edge func(*Edge) error) error { - seen := make(map[*Node]bool) - var visit func(n *Node) error - visit = func(n *Node) error { - if !seen[n] { - seen[n] = true - for _, e := range n.Out { - if err := visit(e.Callee); err != nil { - return err - } - if err := edge(e); err != nil { - return err - } - } - } - return nil - } - for _, n := range g.Nodes { - if err := visit(n); err != nil { - return err - } - } - return nil -} - -// PathSearch finds an arbitrary path starting at node start and -// ending at some node for which isEnd() returns true. On success, -// PathSearch returns the path as an ordered list of edges; on -// failure, it returns nil. -// -func PathSearch(start *Node, isEnd func(*Node) bool) []*Edge { - stack := make([]*Edge, 0, 32) - seen := make(map[*Node]bool) - var search func(n *Node) []*Edge - search = func(n *Node) []*Edge { - if !seen[n] { - seen[n] = true - if isEnd(n) { - return stack - } - for _, e := range n.Out { - stack = append(stack, e) // push - if found := search(e.Callee); found != nil { - return found - } - stack = stack[:len(stack)-1] // pop - } - } - return nil - } - return search(start) -} - -// DeleteSyntheticNodes removes from call graph g all nodes for -// synthetic functions (except g.Root and package initializers), -// preserving the topology. In effect, calls to synthetic wrappers -// are "inlined". -// -func (g *Graph) DeleteSyntheticNodes() { - // Measurements on the standard library and go.tools show that - // resulting graph has ~15% fewer nodes and 4-8% fewer edges - // than the input. - // - // Inlining a wrapper of in-degree m, out-degree n adds m*n - // and removes m+n edges. Since most wrappers are monomorphic - // (n=1) this results in a slight reduction. Polymorphic - // wrappers (n>1), e.g. from embedding an interface value - // inside a struct to satisfy some interface, cause an - // increase in the graph, but they seem to be uncommon. - - // Hash all existing edges to avoid creating duplicates. - edges := make(map[Edge]bool) - for _, cgn := range g.Nodes { - for _, e := range cgn.Out { - edges[*e] = true - } - } - for fn, cgn := range g.Nodes { - if cgn == g.Root || fn.Synthetic == "" || isInit(cgn.Func) { - continue // keep - } - for _, eIn := range cgn.In { - for _, eOut := range cgn.Out { - newEdge := Edge{eIn.Caller, eIn.Site, eOut.Callee} - if edges[newEdge] { - continue // don't add duplicate - } - AddEdge(eIn.Caller, eIn.Site, eOut.Callee) - edges[newEdge] = true - } - } - g.DeleteNode(cgn) - } -} - -func isInit(fn *ssa.Function) bool { - return fn.Pkg != nil && fn.Pkg.Func("init") == fn -} - -// DeleteNode removes node n and its edges from the graph g. -// (NB: not efficient for batch deletion.) -func (g *Graph) DeleteNode(n *Node) { - n.deleteIns() - n.deleteOuts() - delete(g.Nodes, n.Func) -} - -// deleteIns deletes all incoming edges to n. -func (n *Node) deleteIns() { - for _, e := range n.In { - removeOutEdge(e) - } - n.In = nil -} - -// deleteOuts deletes all outgoing edges from n. -func (n *Node) deleteOuts() { - for _, e := range n.Out { - removeInEdge(e) - } - n.Out = nil -} - -// removeOutEdge removes edge.Caller's outgoing edge 'edge'. -func removeOutEdge(edge *Edge) { - caller := edge.Caller - n := len(caller.Out) - for i, e := range caller.Out { - if e == edge { - // Replace it with the final element and shrink the slice. - caller.Out[i] = caller.Out[n-1] - caller.Out[n-1] = nil // aid GC - caller.Out = caller.Out[:n-1] - return - } - } - panic("edge not found: " + edge.String()) -} - -// removeInEdge removes edge.Callee's incoming edge 'edge'. -func removeInEdge(edge *Edge) { - caller := edge.Callee - n := len(caller.In) - for i, e := range caller.In { - if e == edge { - // Replace it with the final element and shrink the slice. - caller.In[i] = caller.In[n-1] - caller.In[n-1] = nil // aid GC - caller.In = caller.In[:n-1] - return - } - } - panic("edge not found: " + edge.String()) -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/cmd/staticcheck/README.md b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/cmd/staticcheck/README.md deleted file mode 100644 index 7fb4dabf64ec..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/cmd/staticcheck/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# staticcheck - -_staticcheck_ is `go vet` on steroids, applying a ton of static analysis -checks you might be used to from tools like ReSharper for C#. - -## Installation - -Staticcheck requires Go 1.6 or later. - - go get honnef.co/go/tools/cmd/staticcheck - -## Documentation - -Detailed documentation can be found on -[staticcheck.io](https://staticcheck.io/docs/staticcheck). - diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go deleted file mode 100644 index 70c5c4cb89fb..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go +++ /dev/null @@ -1,23 +0,0 @@ -// staticcheck detects a myriad of bugs and inefficiencies in your -// code. -package main - -import ( - "os" - - "honnef.co/go/tools/lint/lintutil" - "honnef.co/go/tools/staticcheck" -) - -func main() { - fs := lintutil.FlagSet("staticcheck") - gen := fs.Bool("generated", false, "Check generated code") - fs.Parse(os.Args[1:]) - c := staticcheck.NewChecker() - c.CheckGenerated = *gen - cfg := lintutil.CheckerConfig{ - Checker: c, - ExitNonZero: true, - } - lintutil.ProcessFlagSet([]lintutil.CheckerConfig{cfg}, fs) -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/deprecated/stdlib.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/deprecated/stdlib.go deleted file mode 100644 index b6b217c3e37d..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/deprecated/stdlib.go +++ /dev/null @@ -1,54 +0,0 @@ -package deprecated - -type Deprecation struct { - DeprecatedSince int - AlternativeAvailableSince int -} - -var Stdlib = map[string]Deprecation{ - "image/jpeg.Reader": {4, 0}, - // FIXME(dh): AllowBinary isn't being detected as deprecated - // because the comment has a newline right after "Deprecated:" - "go/build.AllowBinary": {7, 7}, - "(archive/zip.FileHeader).CompressedSize": {1, 1}, - "(archive/zip.FileHeader).UncompressedSize": {1, 1}, - "(go/doc.Package).Bugs": {1, 1}, - "os.SEEK_SET": {7, 7}, - "os.SEEK_CUR": {7, 7}, - "os.SEEK_END": {7, 7}, - "(net.Dialer).Cancel": {7, 7}, - "runtime.CPUProfile": {9, 0}, - "compress/flate.ReadError": {6, 6}, - "compress/flate.WriteError": {6, 6}, - "path/filepath.HasPrefix": {0, 0}, - "(net/http.Transport).Dial": {7, 7}, - "(*net/http.Transport).CancelRequest": {6, 5}, - "net/http.ErrWriteAfterFlush": {7, 0}, - "net/http.ErrHeaderTooLong": {8, 0}, - "net/http.ErrShortBody": {8, 0}, - "net/http.ErrMissingContentLength": {8, 0}, - "net/http/httputil.ErrPersistEOF": {0, 0}, - "net/http/httputil.ErrClosed": {0, 0}, - "net/http/httputil.ErrPipeline": {0, 0}, - "net/http/httputil.ServerConn": {0, 0}, - "net/http/httputil.NewServerConn": {0, 0}, - "net/http/httputil.ClientConn": {0, 0}, - "net/http/httputil.NewClientConn": {0, 0}, - "net/http/httputil.NewProxyClientConn": {0, 0}, - "(net/http.Request).Cancel": {7, 7}, - "(text/template/parse.PipeNode).Line": {1, 1}, - "(text/template/parse.ActionNode).Line": {1, 1}, - "(text/template/parse.BranchNode).Line": {1, 1}, - "(text/template/parse.TemplateNode).Line": {1, 1}, - "database/sql/driver.ColumnConverter": {9, 9}, - "database/sql/driver.Execer": {8, 8}, - "database/sql/driver.Queryer": {8, 8}, - "(database/sql/driver.Conn).Begin": {8, 8}, - "(database/sql/driver.Stmt).Exec": {8, 8}, - "(database/sql/driver.Stmt).Query": {8, 8}, - "syscall.StringByteSlice": {1, 1}, - "syscall.StringBytePtr": {1, 1}, - "syscall.StringSlicePtr": {1, 1}, - "syscall.StringToUTF16": {1, 1}, - "syscall.StringToUTF16Ptr": {1, 1}, -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/functions/concrete.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/functions/concrete.go deleted file mode 100644 index 932acd03edae..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/functions/concrete.go +++ /dev/null @@ -1,56 +0,0 @@ -package functions - -import ( - "go/token" - "go/types" - - "honnef.co/go/tools/ssa" -) - -func concreteReturnTypes(fn *ssa.Function) []*types.Tuple { - res := fn.Signature.Results() - if res == nil { - return nil - } - ifaces := make([]bool, res.Len()) - any := false - for i := 0; i < res.Len(); i++ { - _, ifaces[i] = res.At(i).Type().Underlying().(*types.Interface) - any = any || ifaces[i] - } - if !any { - return []*types.Tuple{res} - } - var out []*types.Tuple - for _, block := range fn.Blocks { - if len(block.Instrs) == 0 { - continue - } - ret, ok := block.Instrs[len(block.Instrs)-1].(*ssa.Return) - if !ok { - continue - } - vars := make([]*types.Var, res.Len()) - for i, v := range ret.Results { - var typ types.Type - if !ifaces[i] { - typ = res.At(i).Type() - } else if mi, ok := v.(*ssa.MakeInterface); ok { - // TODO(dh): if mi.X is a function call that returns - // an interface, call concreteReturnTypes on that - // function (or, really, go through Descriptions, - // avoid infinite recursion etc, just like nil error - // detection) - - // TODO(dh): support Phi nodes - typ = mi.X.Type() - } else { - typ = res.At(i).Type() - } - vars[i] = types.NewParam(token.NoPos, nil, "", typ) - } - out = append(out, types.NewTuple(vars...)) - } - // TODO(dh): deduplicate out - return out -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/functions/functions.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/functions/functions.go deleted file mode 100644 index 83940412975b..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/functions/functions.go +++ /dev/null @@ -1,150 +0,0 @@ -package functions - -import ( - "go/types" - "sync" - - "honnef.co/go/tools/callgraph" - "honnef.co/go/tools/callgraph/static" - "honnef.co/go/tools/ssa" - "honnef.co/go/tools/staticcheck/vrp" -) - -var stdlibDescs = map[string]Description{ - "errors.New": {Pure: true}, - - "fmt.Errorf": {Pure: true}, - "fmt.Sprintf": {Pure: true}, - "fmt.Sprint": {Pure: true}, - - "sort.Reverse": {Pure: true}, - - "strings.Map": {Pure: true}, - "strings.Repeat": {Pure: true}, - "strings.Replace": {Pure: true}, - "strings.Title": {Pure: true}, - "strings.ToLower": {Pure: true}, - "strings.ToLowerSpecial": {Pure: true}, - "strings.ToTitle": {Pure: true}, - "strings.ToTitleSpecial": {Pure: true}, - "strings.ToUpper": {Pure: true}, - "strings.ToUpperSpecial": {Pure: true}, - "strings.Trim": {Pure: true}, - "strings.TrimFunc": {Pure: true}, - "strings.TrimLeft": {Pure: true}, - "strings.TrimLeftFunc": {Pure: true}, - "strings.TrimPrefix": {Pure: true}, - "strings.TrimRight": {Pure: true}, - "strings.TrimRightFunc": {Pure: true}, - "strings.TrimSpace": {Pure: true}, - "strings.TrimSuffix": {Pure: true}, - - "(*net/http.Request).WithContext": {Pure: true}, - - "math/rand.Read": {NilError: true}, - "(*math/rand.Rand).Read": {NilError: true}, -} - -type Description struct { - // The function is known to be pure - Pure bool - // The function is known to be a stub - Stub bool - // The function is known to never return (panics notwithstanding) - Infinite bool - // Variable ranges - Ranges vrp.Ranges - Loops []Loop - // Function returns an error as its last argument, but it is - // always nil - NilError bool - ConcreteReturnTypes []*types.Tuple -} - -type descriptionEntry struct { - ready chan struct{} - result Description -} - -type Descriptions struct { - CallGraph *callgraph.Graph - mu sync.Mutex - cache map[*ssa.Function]*descriptionEntry -} - -func NewDescriptions(prog *ssa.Program) *Descriptions { - return &Descriptions{ - CallGraph: static.CallGraph(prog), - cache: map[*ssa.Function]*descriptionEntry{}, - } -} - -func (d *Descriptions) Get(fn *ssa.Function) Description { - d.mu.Lock() - fd := d.cache[fn] - if fd == nil { - fd = &descriptionEntry{ - ready: make(chan struct{}), - } - d.cache[fn] = fd - d.mu.Unlock() - - { - fd.result = stdlibDescs[fn.RelString(nil)] - fd.result.Pure = fd.result.Pure || d.IsPure(fn) - fd.result.Stub = fd.result.Stub || d.IsStub(fn) - fd.result.Infinite = fd.result.Infinite || !terminates(fn) - fd.result.Ranges = vrp.BuildGraph(fn).Solve() - fd.result.Loops = findLoops(fn) - fd.result.NilError = fd.result.NilError || IsNilError(fn) - fd.result.ConcreteReturnTypes = concreteReturnTypes(fn) - } - - close(fd.ready) - } else { - d.mu.Unlock() - <-fd.ready - } - return fd.result -} - -func IsNilError(fn *ssa.Function) bool { - // TODO(dh): This is very simplistic, as we only look for constant - // nil returns. A more advanced approach would work transitively. - // An even more advanced approach would be context-aware and - // determine nil errors based on inputs (e.g. io.WriteString to a - // bytes.Buffer will always return nil, but an io.WriteString to - // an os.File might not). Similarly, an os.File opened for reading - // won't error on Close, but other files will. - res := fn.Signature.Results() - if res.Len() == 0 { - return false - } - last := res.At(res.Len() - 1) - if types.TypeString(last.Type(), nil) != "error" { - return false - } - - if fn.Blocks == nil { - return false - } - for _, block := range fn.Blocks { - if len(block.Instrs) == 0 { - continue - } - ins := block.Instrs[len(block.Instrs)-1] - ret, ok := ins.(*ssa.Return) - if !ok { - continue - } - v := ret.Results[len(ret.Results)-1] - c, ok := v.(*ssa.Const) - if !ok { - return false - } - if !c.IsNil() { - return false - } - } - return true -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/functions/loops.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/functions/loops.go deleted file mode 100644 index 63011cf3ef64..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/functions/loops.go +++ /dev/null @@ -1,50 +0,0 @@ -package functions - -import "honnef.co/go/tools/ssa" - -type Loop map[*ssa.BasicBlock]bool - -func findLoops(fn *ssa.Function) []Loop { - if fn.Blocks == nil { - return nil - } - tree := fn.DomPreorder() - var sets []Loop - for _, h := range tree { - for _, n := range h.Preds { - if !h.Dominates(n) { - continue - } - // n is a back-edge to h - // h is the loop header - if n == h { - sets = append(sets, Loop{n: true}) - continue - } - set := Loop{h: true, n: true} - for _, b := range allPredsBut(n, h, nil) { - set[b] = true - } - sets = append(sets, set) - } - } - return sets -} - -func allPredsBut(b, but *ssa.BasicBlock, list []*ssa.BasicBlock) []*ssa.BasicBlock { -outer: - for _, pred := range b.Preds { - if pred == but { - continue - } - for _, p := range list { - // TODO improve big-o complexity of this function - if pred == p { - continue outer - } - } - list = append(list, pred) - list = allPredsBut(pred, but, list) - } - return list -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/functions/pure.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/functions/pure.go deleted file mode 100644 index 7028eb8c6494..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/functions/pure.go +++ /dev/null @@ -1,123 +0,0 @@ -package functions - -import ( - "go/token" - "go/types" - - "honnef.co/go/tools/callgraph" - "honnef.co/go/tools/lint/lintdsl" - "honnef.co/go/tools/ssa" -) - -// IsStub reports whether a function is a stub. A function is -// considered a stub if it has no instructions or exactly one -// instruction, which must be either returning only constant values or -// a panic. -func (d *Descriptions) IsStub(fn *ssa.Function) bool { - if len(fn.Blocks) == 0 { - return true - } - if len(fn.Blocks) > 1 { - return false - } - instrs := lintdsl.FilterDebug(fn.Blocks[0].Instrs) - if len(instrs) != 1 { - return false - } - - switch instrs[0].(type) { - case *ssa.Return: - // Since this is the only instruction, the return value must - // be a constant. We consider all constants as stubs, not just - // the zero value. This does not, unfortunately, cover zero - // initialised structs, as these cause additional - // instructions. - return true - case *ssa.Panic: - return true - default: - return false - } -} - -func (d *Descriptions) IsPure(fn *ssa.Function) bool { - if fn.Signature.Results().Len() == 0 { - // A function with no return values is empty or is doing some - // work we cannot see (for example because of build tags); - // don't consider it pure. - return false - } - - for _, param := range fn.Params { - if _, ok := param.Type().Underlying().(*types.Basic); !ok { - return false - } - } - - if fn.Blocks == nil { - return false - } - checkCall := func(common *ssa.CallCommon) bool { - if common.IsInvoke() { - return false - } - builtin, ok := common.Value.(*ssa.Builtin) - if !ok { - if common.StaticCallee() != fn { - if common.StaticCallee() == nil { - return false - } - // TODO(dh): ideally, IsPure wouldn't be responsible - // for avoiding infinite recursion, but - // FunctionDescriptions would be. - node := d.CallGraph.CreateNode(common.StaticCallee()) - if callgraph.PathSearch(node, func(other *callgraph.Node) bool { - return other.Func == fn - }) != nil { - return false - } - if !d.Get(common.StaticCallee()).Pure { - return false - } - } - } else { - switch builtin.Name() { - case "len", "cap", "make", "new": - default: - return false - } - } - return true - } - for _, b := range fn.Blocks { - for _, ins := range b.Instrs { - switch ins := ins.(type) { - case *ssa.Call: - if !checkCall(ins.Common()) { - return false - } - case *ssa.Defer: - if !checkCall(&ins.Call) { - return false - } - case *ssa.Select: - return false - case *ssa.Send: - return false - case *ssa.Go: - return false - case *ssa.Panic: - return false - case *ssa.Store: - return false - case *ssa.FieldAddr: - return false - case *ssa.UnOp: - if ins.Op == token.MUL || ins.Op == token.AND { - return false - } - } - } - } - return true -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/functions/terminates.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/functions/terminates.go deleted file mode 100644 index 65f9e16dc998..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/functions/terminates.go +++ /dev/null @@ -1,24 +0,0 @@ -package functions - -import "honnef.co/go/tools/ssa" - -// terminates reports whether fn is supposed to return, that is if it -// has at least one theoretic path that returns from the function. -// Explicit panics do not count as terminating. -func terminates(fn *ssa.Function) bool { - if fn.Blocks == nil { - // assuming that a function terminates is the conservative - // choice - return true - } - - for _, block := range fn.Blocks { - if len(block.Instrs) == 0 { - continue - } - if _, ok := block.Instrs[len(block.Instrs)-1].(*ssa.Return); ok { - return true - } - } - return false -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go deleted file mode 100644 index cbbafbcdf4bc..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go +++ /dev/null @@ -1,68 +0,0 @@ -package sharedcheck - -import ( - "go/ast" - "go/types" - - "honnef.co/go/tools/lint" - . "honnef.co/go/tools/lint/lintdsl" - "honnef.co/go/tools/ssa" -) - -func CheckRangeStringRunes(j *lint.Job) { - for _, ssafn := range j.Program.InitialFunctions { - fn := func(node ast.Node) bool { - rng, ok := node.(*ast.RangeStmt) - if !ok || !IsBlank(rng.Key) { - return true - } - - v, _ := ssafn.ValueForExpr(rng.X) - - // Check that we're converting from string to []rune - val, _ := v.(*ssa.Convert) - if val == nil { - return true - } - Tsrc, ok := val.X.Type().(*types.Basic) - if !ok || Tsrc.Kind() != types.String { - return true - } - Tdst, ok := val.Type().(*types.Slice) - if !ok { - return true - } - TdstElem, ok := Tdst.Elem().(*types.Basic) - if !ok || TdstElem.Kind() != types.Int32 { - return true - } - - // Check that the result of the conversion is only used to - // range over - refs := val.Referrers() - if refs == nil { - return true - } - - // Expect two refs: one for obtaining the length of the slice, - // one for accessing the elements - if len(FilterDebug(*refs)) != 2 { - // TODO(dh): right now, we check that only one place - // refers to our slice. This will miss cases such as - // ranging over the slice twice. Ideally, we'd ensure that - // the slice is only used for ranging over (without - // accessing the key), but that is harder to do because in - // SSA form, ranging over a slice looks like an ordinary - // loop with index increments and slice accesses. We'd - // have to look at the associated AST node to check that - // it's a range statement. - return true - } - - j.Errorf(rng, "should range over string, not []rune(string)") - - return true - } - Inspect(ssafn.Syntax(), fn) - } -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/lint/LICENSE b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/lint/LICENSE deleted file mode 100644 index 796130a123a1..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/lint/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 The Go Authors. All rights reserved. -Copyright (c) 2016 Dominik Honnef. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/lint/lint.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/lint/lint.go deleted file mode 100644 index a3a22cd0305b..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/lint/lint.go +++ /dev/null @@ -1,543 +0,0 @@ -// Copyright (c) 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// Package lint provides the foundation for tools like gosimple. -package lint - -import ( - "fmt" - "go/ast" - "go/build" - "go/token" - "go/types" - "path/filepath" - "sort" - "strings" - "sync" - "unicode" - - "golang.org/x/tools/go/loader" - "honnef.co/go/tools/ssa" - "honnef.co/go/tools/ssa/ssautil" -) - -type Job struct { - Program *Program - - checker string - check string - problems []Problem -} - -type Ignore interface { - Match(p Problem) bool -} - -type LineIgnore struct { - File string - Line int - Checks []string - matched bool - pos token.Pos -} - -func (li *LineIgnore) Match(p Problem) bool { - if p.Position.Filename != li.File || p.Position.Line != li.Line { - return false - } - for _, c := range li.Checks { - if m, _ := filepath.Match(c, p.Check); m { - li.matched = true - return true - } - } - return false -} - -func (li *LineIgnore) String() string { - matched := "not matched" - if li.matched { - matched = "matched" - } - return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched) -} - -type FileIgnore struct { - File string - Checks []string -} - -func (fi *FileIgnore) Match(p Problem) bool { - if p.Position.Filename != fi.File { - return false - } - for _, c := range fi.Checks { - if m, _ := filepath.Match(c, p.Check); m { - return true - } - } - return false -} - -type GlobIgnore struct { - Pattern string - Checks []string -} - -func (gi *GlobIgnore) Match(p Problem) bool { - if gi.Pattern != "*" { - pkgpath := p.Package.Path() - if strings.HasSuffix(pkgpath, "_test") { - pkgpath = pkgpath[:len(pkgpath)-len("_test")] - } - name := filepath.Join(pkgpath, filepath.Base(p.Position.Filename)) - if m, _ := filepath.Match(gi.Pattern, name); !m { - return false - } - } - for _, c := range gi.Checks { - if m, _ := filepath.Match(c, p.Check); m { - return true - } - } - return false -} - -type Program struct { - SSA *ssa.Program - Prog *loader.Program - // TODO(dh): Rename to InitialPackages? - Packages []*Pkg - InitialFunctions []*ssa.Function - AllFunctions []*ssa.Function - Files []*ast.File - Info *types.Info - GoVersion int - - tokenFileMap map[*token.File]*ast.File - astFileMap map[*ast.File]*Pkg -} - -type Func func(*Job) - -// Problem represents a problem in some source code. -type Problem struct { - pos token.Pos - Position token.Position // position in source file - Text string // the prose that describes the problem - Check string - Checker string - Package *types.Package - Ignored bool -} - -func (p *Problem) String() string { - if p.Check == "" { - return p.Text - } - return fmt.Sprintf("%s (%s)", p.Text, p.Check) -} - -type Checker interface { - Name() string - Prefix() string - Init(*Program) - Funcs() map[string]Func -} - -// A Linter lints Go source code. -type Linter struct { - Checker Checker - Ignores []Ignore - GoVersion int - ReturnIgnored bool - - automaticIgnores []Ignore -} - -func (l *Linter) ignore(p Problem) bool { - ignored := false - for _, ig := range l.automaticIgnores { - // We cannot short-circuit these, as we want to record, for - // each ignore, whether it matched or not. - if ig.Match(p) { - ignored = true - } - } - if ignored { - // no need to execute other ignores if we've already had a - // match. - return true - } - for _, ig := range l.Ignores { - // We can short-circuit here, as we aren't tracking any - // information. - if ig.Match(p) { - return true - } - } - - return false -} - -func (prog *Program) File(node Positioner) *ast.File { - return prog.tokenFileMap[prog.SSA.Fset.File(node.Pos())] -} - -func (j *Job) File(node Positioner) *ast.File { - return j.Program.File(node) -} - -// TODO(dh): switch to sort.Slice when Go 1.9 lands. -type byPosition struct { - fset *token.FileSet - ps []Problem -} - -func (ps byPosition) Len() int { - return len(ps.ps) -} - -func (ps byPosition) Less(i int, j int) bool { - pi, pj := ps.ps[i].Position, ps.ps[j].Position - - if pi.Filename != pj.Filename { - return pi.Filename < pj.Filename - } - if pi.Line != pj.Line { - return pi.Line < pj.Line - } - if pi.Column != pj.Column { - return pi.Column < pj.Column - } - - return ps.ps[i].Text < ps.ps[j].Text -} - -func (ps byPosition) Swap(i int, j int) { - ps.ps[i], ps.ps[j] = ps.ps[j], ps.ps[i] -} - -func parseDirective(s string) (cmd string, args []string) { - if !strings.HasPrefix(s, "//lint:") { - return "", nil - } - s = strings.TrimPrefix(s, "//lint:") - fields := strings.Split(s, " ") - return fields[0], fields[1:] -} - -func (l *Linter) Lint(lprog *loader.Program, conf *loader.Config) []Problem { - ssaprog := ssautil.CreateProgram(lprog, ssa.GlobalDebug) - ssaprog.Build() - pkgMap := map[*ssa.Package]*Pkg{} - var pkgs []*Pkg - for _, pkginfo := range lprog.InitialPackages() { - ssapkg := ssaprog.Package(pkginfo.Pkg) - var bp *build.Package - if len(pkginfo.Files) != 0 { - path := lprog.Fset.Position(pkginfo.Files[0].Pos()).Filename - dir := filepath.Dir(path) - var err error - ctx := conf.Build - if ctx == nil { - ctx = &build.Default - } - bp, err = ctx.ImportDir(dir, 0) - if err != nil { - // shouldn't happen - } - } - pkg := &Pkg{ - Package: ssapkg, - Info: pkginfo, - BuildPkg: bp, - } - pkgMap[ssapkg] = pkg - pkgs = append(pkgs, pkg) - } - prog := &Program{ - SSA: ssaprog, - Prog: lprog, - Packages: pkgs, - Info: &types.Info{}, - GoVersion: l.GoVersion, - tokenFileMap: map[*token.File]*ast.File{}, - astFileMap: map[*ast.File]*Pkg{}, - } - - initial := map[*types.Package]struct{}{} - for _, pkg := range pkgs { - initial[pkg.Info.Pkg] = struct{}{} - } - for fn := range ssautil.AllFunctions(ssaprog) { - if fn.Pkg == nil { - continue - } - prog.AllFunctions = append(prog.AllFunctions, fn) - if _, ok := initial[fn.Pkg.Pkg]; ok { - prog.InitialFunctions = append(prog.InitialFunctions, fn) - } - } - for _, pkg := range pkgs { - prog.Files = append(prog.Files, pkg.Info.Files...) - - ssapkg := ssaprog.Package(pkg.Info.Pkg) - for _, f := range pkg.Info.Files { - prog.astFileMap[f] = pkgMap[ssapkg] - } - } - - for _, pkginfo := range lprog.AllPackages { - for _, f := range pkginfo.Files { - tf := lprog.Fset.File(f.Pos()) - prog.tokenFileMap[tf] = f - } - } - - var out []Problem - l.automaticIgnores = nil - for _, pkginfo := range lprog.InitialPackages() { - for _, f := range pkginfo.Files { - cm := ast.NewCommentMap(lprog.Fset, f, f.Comments) - for node, cgs := range cm { - for _, cg := range cgs { - for _, c := range cg.List { - if !strings.HasPrefix(c.Text, "//lint:") { - continue - } - cmd, args := parseDirective(c.Text) - switch cmd { - case "ignore", "file-ignore": - if len(args) < 2 { - // FIXME(dh): this causes duplicated warnings when using megacheck - p := Problem{ - pos: c.Pos(), - Position: prog.DisplayPosition(c.Pos()), - Text: "malformed linter directive; missing the required reason field?", - Check: "", - Checker: l.Checker.Name(), - Package: nil, - } - out = append(out, p) - continue - } - default: - // unknown directive, ignore - continue - } - checks := strings.Split(args[0], ",") - pos := prog.DisplayPosition(node.Pos()) - var ig Ignore - switch cmd { - case "ignore": - ig = &LineIgnore{ - File: pos.Filename, - Line: pos.Line, - Checks: checks, - pos: c.Pos(), - } - case "file-ignore": - ig = &FileIgnore{ - File: pos.Filename, - Checks: checks, - } - } - l.automaticIgnores = append(l.automaticIgnores, ig) - } - } - } - } - } - - sizes := struct { - types int - defs int - uses int - implicits int - selections int - scopes int - }{} - for _, pkg := range pkgs { - sizes.types += len(pkg.Info.Info.Types) - sizes.defs += len(pkg.Info.Info.Defs) - sizes.uses += len(pkg.Info.Info.Uses) - sizes.implicits += len(pkg.Info.Info.Implicits) - sizes.selections += len(pkg.Info.Info.Selections) - sizes.scopes += len(pkg.Info.Info.Scopes) - } - prog.Info.Types = make(map[ast.Expr]types.TypeAndValue, sizes.types) - prog.Info.Defs = make(map[*ast.Ident]types.Object, sizes.defs) - prog.Info.Uses = make(map[*ast.Ident]types.Object, sizes.uses) - prog.Info.Implicits = make(map[ast.Node]types.Object, sizes.implicits) - prog.Info.Selections = make(map[*ast.SelectorExpr]*types.Selection, sizes.selections) - prog.Info.Scopes = make(map[ast.Node]*types.Scope, sizes.scopes) - for _, pkg := range pkgs { - for k, v := range pkg.Info.Info.Types { - prog.Info.Types[k] = v - } - for k, v := range pkg.Info.Info.Defs { - prog.Info.Defs[k] = v - } - for k, v := range pkg.Info.Info.Uses { - prog.Info.Uses[k] = v - } - for k, v := range pkg.Info.Info.Implicits { - prog.Info.Implicits[k] = v - } - for k, v := range pkg.Info.Info.Selections { - prog.Info.Selections[k] = v - } - for k, v := range pkg.Info.Info.Scopes { - prog.Info.Scopes[k] = v - } - } - l.Checker.Init(prog) - - funcs := l.Checker.Funcs() - var keys []string - for k := range funcs { - keys = append(keys, k) - } - sort.Strings(keys) - - var jobs []*Job - for _, k := range keys { - j := &Job{ - Program: prog, - checker: l.Checker.Name(), - check: k, - } - jobs = append(jobs, j) - } - wg := &sync.WaitGroup{} - for _, j := range jobs { - wg.Add(1) - go func(j *Job) { - defer wg.Done() - fn := funcs[j.check] - if fn == nil { - return - } - fn(j) - }(j) - } - wg.Wait() - - for _, j := range jobs { - for _, p := range j.problems { - p.Ignored = l.ignore(p) - if l.ReturnIgnored || !p.Ignored { - out = append(out, p) - } - } - } - - for _, ig := range l.automaticIgnores { - ig, ok := ig.(*LineIgnore) - if !ok { - continue - } - if ig.matched { - continue - } - for _, c := range ig.Checks { - idx := strings.IndexFunc(c, func(r rune) bool { - return unicode.IsNumber(r) - }) - if idx == -1 { - // malformed check name, backing out - continue - } - if c[:idx] != l.Checker.Prefix() { - // not for this checker - continue - } - p := Problem{ - pos: ig.pos, - Position: prog.DisplayPosition(ig.pos), - Text: "this linter directive didn't match anything; should it be removed?", - Check: "", - Checker: l.Checker.Name(), - Package: nil, - } - out = append(out, p) - } - } - - sort.Sort(byPosition{lprog.Fset, out}) - return out -} - -// Pkg represents a package being linted. -type Pkg struct { - *ssa.Package - Info *loader.PackageInfo - BuildPkg *build.Package -} - -type Positioner interface { - Pos() token.Pos -} - -func (prog *Program) DisplayPosition(p token.Pos) token.Position { - // The //line compiler directive can be used to change the file - // name and line numbers associated with code. This can, for - // example, be used by code generation tools. The most prominent - // example is 'go tool cgo', which uses //line directives to refer - // back to the original source code. - // - // In the context of our linters, we need to treat these - // directives differently depending on context. For cgo files, we - // want to honour the directives, so that line numbers are - // adjusted correctly. For all other files, we want to ignore the - // directives, so that problems are reported at their actual - // position and not, for example, a yacc grammar file. This also - // affects the ignore mechanism, since it operates on the position - // information stored within problems. With this implementation, a - // user will ignore foo.go, not foo.y - - pkg := prog.astFileMap[prog.tokenFileMap[prog.Prog.Fset.File(p)]] - bp := pkg.BuildPkg - adjPos := prog.Prog.Fset.Position(p) - if bp == nil { - // couldn't find the package for some reason (deleted? faulty - // file system?) - return adjPos - } - base := filepath.Base(adjPos.Filename) - for _, f := range bp.CgoFiles { - if f == base { - // this is a cgo file, use the adjusted position - return adjPos - } - } - // not a cgo file, ignore //line directives - return prog.Prog.Fset.PositionFor(p, false) -} - -func (j *Job) Errorf(n Positioner, format string, args ...interface{}) *Problem { - tf := j.Program.SSA.Fset.File(n.Pos()) - f := j.Program.tokenFileMap[tf] - pkg := j.Program.astFileMap[f].Pkg - - pos := j.Program.DisplayPosition(n.Pos()) - problem := Problem{ - pos: n.Pos(), - Position: pos, - Text: fmt.Sprintf(format, args...), - Check: j.check, - Checker: j.checker, - Package: pkg, - } - j.problems = append(j.problems, problem) - return &j.problems[len(j.problems)-1] -} - -func (j *Job) NodePackage(node Positioner) *Pkg { - f := j.File(node) - return j.Program.astFileMap[f] -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go deleted file mode 100644 index 9720f366ff73..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go +++ /dev/null @@ -1,282 +0,0 @@ -// Package lintdsl provides helpers for implementing static analysis -// checks. Dot-importing this package is encouraged. -package lintdsl - -import ( - "bytes" - "fmt" - "go/ast" - "go/constant" - "go/printer" - "go/token" - "go/types" - "strings" - - "honnef.co/go/tools/lint" - "honnef.co/go/tools/ssa" -) - -type packager interface { - Package() *ssa.Package -} - -func CallName(call *ssa.CallCommon) string { - if call.IsInvoke() { - return "" - } - switch v := call.Value.(type) { - case *ssa.Function: - fn, ok := v.Object().(*types.Func) - if !ok { - return "" - } - return fn.FullName() - case *ssa.Builtin: - return v.Name() - } - return "" -} - -func IsCallTo(call *ssa.CallCommon, name string) bool { return CallName(call) == name } -func IsType(T types.Type, name string) bool { return types.TypeString(T, nil) == name } - -func FilterDebug(instr []ssa.Instruction) []ssa.Instruction { - var out []ssa.Instruction - for _, ins := range instr { - if _, ok := ins.(*ssa.DebugRef); !ok { - out = append(out, ins) - } - } - return out -} - -func IsExample(fn *ssa.Function) bool { - if !strings.HasPrefix(fn.Name(), "Example") { - return false - } - f := fn.Prog.Fset.File(fn.Pos()) - if f == nil { - return false - } - return strings.HasSuffix(f.Name(), "_test.go") -} - -func IsPointerLike(T types.Type) bool { - switch T := T.Underlying().(type) { - case *types.Interface, *types.Chan, *types.Map, *types.Pointer: - return true - case *types.Basic: - return T.Kind() == types.UnsafePointer - } - return false -} - -func IsGenerated(f *ast.File) bool { - comments := f.Comments - if len(comments) > 0 { - comment := comments[0].Text() - return strings.Contains(comment, "Code generated by") || - strings.Contains(comment, "DO NOT EDIT") - } - return false -} - -func IsIdent(expr ast.Expr, ident string) bool { - id, ok := expr.(*ast.Ident) - return ok && id.Name == ident -} - -// isBlank returns whether id is the blank identifier "_". -// If id == nil, the answer is false. -func IsBlank(id ast.Expr) bool { - ident, _ := id.(*ast.Ident) - return ident != nil && ident.Name == "_" -} - -func IsIntLiteral(expr ast.Expr, literal string) bool { - lit, ok := expr.(*ast.BasicLit) - return ok && lit.Kind == token.INT && lit.Value == literal -} - -// Deprecated: use IsIntLiteral instead -func IsZero(expr ast.Expr) bool { - return IsIntLiteral(expr, "0") -} - -func TypeOf(j *lint.Job, expr ast.Expr) types.Type { return j.Program.Info.TypeOf(expr) } -func IsOfType(j *lint.Job, expr ast.Expr, name string) bool { return IsType(TypeOf(j, expr), name) } - -func ObjectOf(j *lint.Job, ident *ast.Ident) types.Object { return j.Program.Info.ObjectOf(ident) } - -func IsInTest(j *lint.Job, node lint.Positioner) bool { - // FIXME(dh): this doesn't work for global variables with - // initializers - f := j.Program.SSA.Fset.File(node.Pos()) - return f != nil && strings.HasSuffix(f.Name(), "_test.go") -} - -func IsInMain(j *lint.Job, node lint.Positioner) bool { - if node, ok := node.(packager); ok { - return node.Package().Pkg.Name() == "main" - } - pkg := j.NodePackage(node) - if pkg == nil { - return false - } - return pkg.Pkg.Name() == "main" -} - -func SelectorName(j *lint.Job, expr *ast.SelectorExpr) string { - sel := j.Program.Info.Selections[expr] - if sel == nil { - if x, ok := expr.X.(*ast.Ident); ok { - pkg, ok := j.Program.Info.ObjectOf(x).(*types.PkgName) - if !ok { - // This shouldn't happen - return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name) - } - return fmt.Sprintf("%s.%s", pkg.Imported().Path(), expr.Sel.Name) - } - panic(fmt.Sprintf("unsupported selector: %v", expr)) - } - return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name()) -} - -func IsNil(j *lint.Job, expr ast.Expr) bool { - return j.Program.Info.Types[expr].IsNil() -} - -func BoolConst(j *lint.Job, expr ast.Expr) bool { - val := j.Program.Info.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val() - return constant.BoolVal(val) -} - -func IsBoolConst(j *lint.Job, expr ast.Expr) bool { - // We explicitly don't support typed bools because more often than - // not, custom bool types are used as binary enums and the - // explicit comparison is desired. - - ident, ok := expr.(*ast.Ident) - if !ok { - return false - } - obj := j.Program.Info.ObjectOf(ident) - c, ok := obj.(*types.Const) - if !ok { - return false - } - basic, ok := c.Type().(*types.Basic) - if !ok { - return false - } - if basic.Kind() != types.UntypedBool && basic.Kind() != types.Bool { - return false - } - return true -} - -func ExprToInt(j *lint.Job, expr ast.Expr) (int64, bool) { - tv := j.Program.Info.Types[expr] - if tv.Value == nil { - return 0, false - } - if tv.Value.Kind() != constant.Int { - return 0, false - } - return constant.Int64Val(tv.Value) -} - -func ExprToString(j *lint.Job, expr ast.Expr) (string, bool) { - val := j.Program.Info.Types[expr].Value - if val == nil { - return "", false - } - if val.Kind() != constant.String { - return "", false - } - return constant.StringVal(val), true -} - -// Dereference returns a pointer's element type; otherwise it returns -// T. -func Dereference(T types.Type) types.Type { - if p, ok := T.Underlying().(*types.Pointer); ok { - return p.Elem() - } - return T -} - -// DereferenceR returns a pointer's element type; otherwise it returns -// T. If the element type is itself a pointer, DereferenceR will be -// applied recursively. -func DereferenceR(T types.Type) types.Type { - if p, ok := T.Underlying().(*types.Pointer); ok { - return DereferenceR(p.Elem()) - } - return T -} - -func IsGoVersion(j *lint.Job, minor int) bool { - return j.Program.GoVersion >= minor -} - -func IsCallToAST(j *lint.Job, node ast.Node, name string) bool { - call, ok := node.(*ast.CallExpr) - if !ok { - return false - } - sel, ok := call.Fun.(*ast.SelectorExpr) - if !ok { - return false - } - fn, ok := j.Program.Info.ObjectOf(sel.Sel).(*types.Func) - return ok && fn.FullName() == name -} - -func IsCallToAnyAST(j *lint.Job, node ast.Node, names ...string) bool { - for _, name := range names { - if IsCallToAST(j, node, name) { - return true - } - } - return false -} - -func Render(j *lint.Job, x interface{}) string { - fset := j.Program.SSA.Fset - var buf bytes.Buffer - if err := printer.Fprint(&buf, fset, x); err != nil { - panic(err) - } - return buf.String() -} - -func RenderArgs(j *lint.Job, args []ast.Expr) string { - var ss []string - for _, arg := range args { - ss = append(ss, Render(j, arg)) - } - return strings.Join(ss, ", ") -} - -func Preamble(f *ast.File) string { - cutoff := f.Package - if f.Doc != nil { - cutoff = f.Doc.Pos() - } - var out []string - for _, cmt := range f.Comments { - if cmt.Pos() >= cutoff { - break - } - out = append(out, cmt.Text()) - } - return strings.Join(out, "\n") -} - -func Inspect(node ast.Node, fn func(node ast.Node) bool) { - if node == nil { - return - } - ast.Inspect(node, fn) -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/lint/lintutil/util.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/lint/lintutil/util.go deleted file mode 100644 index 03128d4a0ddc..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/lint/lintutil/util.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright (c) 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// Package lintutil provides helpers for writing linter command lines. -package lintutil - -import ( - "encoding/json" - "errors" - "flag" - "fmt" - "go/build" - "go/parser" - "go/token" - "go/types" - "io" - "os" - "path/filepath" - "strconv" - "strings" - - "honnef.co/go/tools/lint" - "honnef.co/go/tools/version" - - "github.com/kisielk/gotool" - "golang.org/x/tools/go/loader" -) - -type OutputFormatter interface { - Format(p lint.Problem) -} - -type TextOutput struct { - w io.Writer -} - -func (o TextOutput) Format(p lint.Problem) { - fmt.Fprintf(o.w, "%v: %s\n", relativePositionString(p.Position), p.String()) -} - -type JSONOutput struct { - w io.Writer -} - -func (o JSONOutput) Format(p lint.Problem) { - type location struct { - File string `json:"file"` - Line int `json:"line"` - Column int `json:"column"` - } - jp := struct { - Checker string `json:"checker"` - Code string `json:"code"` - Severity string `json:"severity,omitempty"` - Location location `json:"location"` - Message string `json:"message"` - Ignored bool `json:"ignored"` - }{ - p.Checker, - p.Check, - "", // TODO(dh): support severity - location{ - p.Position.Filename, - p.Position.Line, - p.Position.Column, - }, - p.Text, - p.Ignored, - } - _ = json.NewEncoder(o.w).Encode(jp) -} -func usage(name string, flags *flag.FlagSet) func() { - return func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", name) - fmt.Fprintf(os.Stderr, "\t%s [flags] # runs on package in current directory\n", name) - fmt.Fprintf(os.Stderr, "\t%s [flags] packages\n", name) - fmt.Fprintf(os.Stderr, "\t%s [flags] directory\n", name) - fmt.Fprintf(os.Stderr, "\t%s [flags] files... # must be a single package\n", name) - fmt.Fprintf(os.Stderr, "Flags:\n") - flags.PrintDefaults() - } -} - -type runner struct { - checker lint.Checker - tags []string - ignores []lint.Ignore - version int - returnIgnored bool -} - -func resolveRelative(importPaths []string, tags []string) (goFiles bool, err error) { - if len(importPaths) == 0 { - return false, nil - } - if strings.HasSuffix(importPaths[0], ".go") { - // User is specifying a package in terms of .go files, don't resolve - return true, nil - } - wd, err := os.Getwd() - if err != nil { - return false, err - } - ctx := build.Default - ctx.BuildTags = tags - for i, path := range importPaths { - bpkg, err := ctx.Import(path, wd, build.FindOnly) - if err != nil { - return false, fmt.Errorf("can't load package %q: %v", path, err) - } - importPaths[i] = bpkg.ImportPath - } - return false, nil -} - -func parseIgnore(s string) ([]lint.Ignore, error) { - var out []lint.Ignore - if len(s) == 0 { - return nil, nil - } - for _, part := range strings.Fields(s) { - p := strings.Split(part, ":") - if len(p) != 2 { - return nil, errors.New("malformed ignore string") - } - path := p[0] - checks := strings.Split(p[1], ",") - out = append(out, &lint.GlobIgnore{Pattern: path, Checks: checks}) - } - return out, nil -} - -type versionFlag int - -func (v *versionFlag) String() string { - return fmt.Sprintf("1.%d", *v) -} - -func (v *versionFlag) Set(s string) error { - if len(s) < 3 { - return errors.New("invalid Go version") - } - if s[0] != '1' { - return errors.New("invalid Go version") - } - if s[1] != '.' { - return errors.New("invalid Go version") - } - i, err := strconv.Atoi(s[2:]) - *v = versionFlag(i) - return err -} - -func (v *versionFlag) Get() interface{} { - return int(*v) -} - -func FlagSet(name string) *flag.FlagSet { - flags := flag.NewFlagSet("", flag.ExitOnError) - flags.Usage = usage(name, flags) - flags.Float64("min_confidence", 0, "Deprecated; use -ignore instead") - flags.String("tags", "", "List of `build tags`") - flags.String("ignore", "", "Space separated list of checks to ignore, in the following format: 'import/path/file.go:Check1,Check2,...' Both the import path and file name sections support globbing, e.g. 'os/exec/*_test.go'") - flags.Bool("tests", true, "Include tests") - flags.Bool("version", false, "Print version and exit") - flags.Bool("show-ignored", false, "Don't filter ignored problems") - flags.String("f", "text", "Output `format` (valid choices are 'text' and 'json')") - - tags := build.Default.ReleaseTags - v := tags[len(tags)-1][2:] - version := new(versionFlag) - if err := version.Set(v); err != nil { - panic(fmt.Sprintf("internal error: %s", err)) - } - - flags.Var(version, "go", "Target Go `version` in the format '1.x'") - return flags -} - -type CheckerConfig struct { - Checker lint.Checker - ExitNonZero bool -} - -func ProcessFlagSet(confs []CheckerConfig, fs *flag.FlagSet) { - tags := fs.Lookup("tags").Value.(flag.Getter).Get().(string) - ignore := fs.Lookup("ignore").Value.(flag.Getter).Get().(string) - tests := fs.Lookup("tests").Value.(flag.Getter).Get().(bool) - goVersion := fs.Lookup("go").Value.(flag.Getter).Get().(int) - format := fs.Lookup("f").Value.(flag.Getter).Get().(string) - printVersion := fs.Lookup("version").Value.(flag.Getter).Get().(bool) - showIgnored := fs.Lookup("show-ignored").Value.(flag.Getter).Get().(bool) - - if printVersion { - version.Print() - os.Exit(0) - } - - var cs []lint.Checker - for _, conf := range confs { - cs = append(cs, conf.Checker) - } - pss, err := Lint(cs, fs.Args(), &Options{ - Tags: strings.Fields(tags), - LintTests: tests, - Ignores: ignore, - GoVersion: goVersion, - ReturnIgnored: showIgnored, - }) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - - var ps []lint.Problem - for _, p := range pss { - ps = append(ps, p...) - } - - var f OutputFormatter - switch format { - case "text": - f = TextOutput{os.Stdout} - case "json": - f = JSONOutput{os.Stdout} - default: - fmt.Fprintf(os.Stderr, "unsupported output format %q\n", format) - os.Exit(2) - } - - for _, p := range ps { - f.Format(p) - } - for i, p := range pss { - if len(p) != 0 && confs[i].ExitNonZero { - os.Exit(1) - } - } -} - -type Options struct { - Tags []string - LintTests bool - Ignores string - GoVersion int - ReturnIgnored bool -} - -func Lint(cs []lint.Checker, pkgs []string, opt *Options) ([][]lint.Problem, error) { - if opt == nil { - opt = &Options{} - } - ignores, err := parseIgnore(opt.Ignores) - if err != nil { - return nil, err - } - paths := gotool.ImportPaths(pkgs) - goFiles, err := resolveRelative(paths, opt.Tags) - if err != nil { - return nil, err - } - ctx := build.Default - ctx.BuildTags = opt.Tags - hadError := false - conf := &loader.Config{ - Build: &ctx, - ParserMode: parser.ParseComments, - ImportPkgs: map[string]bool{}, - TypeChecker: types.Config{ - Sizes: types.SizesFor(ctx.Compiler, ctx.GOARCH), - Error: func(err error) { - // Only print the first error found - if hadError { - return - } - hadError = true - fmt.Fprintln(os.Stderr, err) - }, - }, - } - if goFiles { - conf.CreateFromFilenames("adhoc", paths...) - } else { - for _, path := range paths { - conf.ImportPkgs[path] = opt.LintTests - } - } - lprog, err := conf.Load() - if err != nil { - return nil, err - } - - var problems [][]lint.Problem - for _, c := range cs { - runner := &runner{ - checker: c, - tags: opt.Tags, - ignores: ignores, - version: opt.GoVersion, - returnIgnored: opt.ReturnIgnored, - } - problems = append(problems, runner.lint(lprog, conf)) - } - return problems, nil -} - -func shortPath(path string) string { - cwd, err := os.Getwd() - if err != nil { - return path - } - if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) { - return rel - } - return path -} - -func relativePositionString(pos token.Position) string { - s := shortPath(pos.Filename) - if pos.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) - } - if s == "" { - s = "-" - } - return s -} - -func ProcessArgs(name string, cs []CheckerConfig, args []string) { - flags := FlagSet(name) - flags.Parse(args) - - ProcessFlagSet(cs, flags) -} - -func (runner *runner) lint(lprog *loader.Program, conf *loader.Config) []lint.Problem { - l := &lint.Linter{ - Checker: runner.checker, - Ignores: runner.ignores, - GoVersion: runner.version, - ReturnIgnored: runner.returnIgnored, - } - return l.Lint(lprog, conf) -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/LICENSE b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/LICENSE deleted file mode 100644 index aee48041e113..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. -Copyright (c) 2016 Dominik Honnef. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/blockopt.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/blockopt.go deleted file mode 100644 index 22c9a4c0d420..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/blockopt.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// Simple block optimizations to simplify the control flow graph. - -// TODO(adonovan): opt: instead of creating several "unreachable" blocks -// per function in the Builder, reuse a single one (e.g. at Blocks[1]) -// to reduce garbage. - -import ( - "fmt" - "os" -) - -// If true, perform sanity checking and show progress at each -// successive iteration of optimizeBlocks. Very verbose. -const debugBlockOpt = false - -// markReachable sets Index=-1 for all blocks reachable from b. -func markReachable(b *BasicBlock) { - b.Index = -1 - for _, succ := range b.Succs { - if succ.Index == 0 { - markReachable(succ) - } - } -} - -func DeleteUnreachableBlocks(f *Function) { - deleteUnreachableBlocks(f) -} - -// deleteUnreachableBlocks marks all reachable blocks of f and -// eliminates (nils) all others, including possibly cyclic subgraphs. -// -func deleteUnreachableBlocks(f *Function) { - const white, black = 0, -1 - // We borrow b.Index temporarily as the mark bit. - for _, b := range f.Blocks { - b.Index = white - } - markReachable(f.Blocks[0]) - if f.Recover != nil { - markReachable(f.Recover) - } - for i, b := range f.Blocks { - if b.Index == white { - for _, c := range b.Succs { - if c.Index == black { - c.removePred(b) // delete white->black edge - } - } - if debugBlockOpt { - fmt.Fprintln(os.Stderr, "unreachable", b) - } - f.Blocks[i] = nil // delete b - } - } - f.removeNilBlocks() -} - -// jumpThreading attempts to apply simple jump-threading to block b, -// in which a->b->c become a->c if b is just a Jump. -// The result is true if the optimization was applied. -// -func jumpThreading(f *Function, b *BasicBlock) bool { - if b.Index == 0 { - return false // don't apply to entry block - } - if b.Instrs == nil { - return false - } - if _, ok := b.Instrs[0].(*Jump); !ok { - return false // not just a jump - } - c := b.Succs[0] - if c == b { - return false // don't apply to degenerate jump-to-self. - } - if c.hasPhi() { - return false // not sound without more effort - } - for j, a := range b.Preds { - a.replaceSucc(b, c) - - // If a now has two edges to c, replace its degenerate If by Jump. - if len(a.Succs) == 2 && a.Succs[0] == c && a.Succs[1] == c { - jump := new(Jump) - jump.setBlock(a) - a.Instrs[len(a.Instrs)-1] = jump - a.Succs = a.Succs[:1] - c.removePred(b) - } else { - if j == 0 { - c.replacePred(b, a) - } else { - c.Preds = append(c.Preds, a) - } - } - - if debugBlockOpt { - fmt.Fprintln(os.Stderr, "jumpThreading", a, b, c) - } - } - f.Blocks[b.Index] = nil // delete b - return true -} - -// fuseBlocks attempts to apply the block fusion optimization to block -// a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1. -// The result is true if the optimization was applied. -// -func fuseBlocks(f *Function, a *BasicBlock) bool { - if len(a.Succs) != 1 { - return false - } - b := a.Succs[0] - if len(b.Preds) != 1 { - return false - } - - // Degenerate &&/|| ops may result in a straight-line CFG - // containing φ-nodes. (Ideally we'd replace such them with - // their sole operand but that requires Referrers, built later.) - if b.hasPhi() { - return false // not sound without further effort - } - - // Eliminate jump at end of A, then copy all of B across. - a.Instrs = append(a.Instrs[:len(a.Instrs)-1], b.Instrs...) - for _, instr := range b.Instrs { - instr.setBlock(a) - } - - // A inherits B's successors - a.Succs = append(a.succs2[:0], b.Succs...) - - // Fix up Preds links of all successors of B. - for _, c := range b.Succs { - c.replacePred(b, a) - } - - if debugBlockOpt { - fmt.Fprintln(os.Stderr, "fuseBlocks", a, b) - } - - f.Blocks[b.Index] = nil // delete b - return true -} - -func OptimizeBlocks(f *Function) { - optimizeBlocks(f) -} - -// optimizeBlocks() performs some simple block optimizations on a -// completed function: dead block elimination, block fusion, jump -// threading. -// -func optimizeBlocks(f *Function) { - deleteUnreachableBlocks(f) - - // Loop until no further progress. - changed := true - for changed { - changed = false - - if debugBlockOpt { - f.WriteTo(os.Stderr) - mustSanityCheck(f, nil) - } - - for _, b := range f.Blocks { - // f.Blocks will temporarily contain nils to indicate - // deleted blocks; we remove them at the end. - if b == nil { - continue - } - - // Fuse blocks. b->c becomes bc. - if fuseBlocks(f, b) { - changed = true - } - - // a->b->c becomes a->c if b contains only a Jump. - if jumpThreading(f, b) { - changed = true - continue // (b was disconnected) - } - } - } - f.removeNilBlocks() -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/builder.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/builder.go deleted file mode 100644 index 602b27188e21..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/builder.go +++ /dev/null @@ -1,2379 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// This file implements the BUILD phase of SSA construction. -// -// SSA construction has two phases, CREATE and BUILD. In the CREATE phase -// (create.go), all packages are constructed and type-checked and -// definitions of all package members are created, method-sets are -// computed, and wrapper methods are synthesized. -// ssa.Packages are created in arbitrary order. -// -// In the BUILD phase (builder.go), the builder traverses the AST of -// each Go source function and generates SSA instructions for the -// function body. Initializer expressions for package-level variables -// are emitted to the package's init() function in the order specified -// by go/types.Info.InitOrder, then code for each function in the -// package is generated in lexical order. -// The BUILD phases for distinct packages are independent and are -// executed in parallel. -// -// TODO(adonovan): indeed, building functions is now embarrassingly parallel. -// Audit for concurrency then benchmark using more goroutines. -// -// The builder's and Program's indices (maps) are populated and -// mutated during the CREATE phase, but during the BUILD phase they -// remain constant. The sole exception is Prog.methodSets and its -// related maps, which are protected by a dedicated mutex. - -import ( - "fmt" - "go/ast" - exact "go/constant" - "go/token" - "go/types" - "os" - "sync" -) - -type opaqueType struct { - types.Type - name string -} - -func (t *opaqueType) String() string { return t.name } - -var ( - varOk = newVar("ok", tBool) - varIndex = newVar("index", tInt) - - // Type constants. - tBool = types.Typ[types.Bool] - tByte = types.Typ[types.Byte] - tInt = types.Typ[types.Int] - tInvalid = types.Typ[types.Invalid] - tString = types.Typ[types.String] - tUntypedNil = types.Typ[types.UntypedNil] - tRangeIter = &opaqueType{nil, "iter"} // the type of all "range" iterators - tEface = types.NewInterface(nil, nil).Complete() - - // SSA Value constants. - vZero = intConst(0) - vOne = intConst(1) - vTrue = NewConst(exact.MakeBool(true), tBool) -) - -// builder holds state associated with the package currently being built. -// Its methods contain all the logic for AST-to-SSA conversion. -type builder struct{} - -// cond emits to fn code to evaluate boolean condition e and jump -// to t or f depending on its value, performing various simplifications. -// -// Postcondition: fn.currentBlock is nil. -// -func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) { - switch e := e.(type) { - case *ast.ParenExpr: - b.cond(fn, e.X, t, f) - return - - case *ast.BinaryExpr: - switch e.Op { - case token.LAND: - ltrue := fn.newBasicBlock("cond.true") - b.cond(fn, e.X, ltrue, f) - fn.currentBlock = ltrue - b.cond(fn, e.Y, t, f) - return - - case token.LOR: - lfalse := fn.newBasicBlock("cond.false") - b.cond(fn, e.X, t, lfalse) - fn.currentBlock = lfalse - b.cond(fn, e.Y, t, f) - return - } - - case *ast.UnaryExpr: - if e.Op == token.NOT { - b.cond(fn, e.X, f, t) - return - } - } - - // A traditional compiler would simplify "if false" (etc) here - // but we do not, for better fidelity to the source code. - // - // The value of a constant condition may be platform-specific, - // and may cause blocks that are reachable in some configuration - // to be hidden from subsequent analyses such as bug-finding tools. - emitIf(fn, b.expr(fn, e), t, f) -} - -// logicalBinop emits code to fn to evaluate e, a &&- or -// ||-expression whose reified boolean value is wanted. -// The value is returned. -// -func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value { - rhs := fn.newBasicBlock("binop.rhs") - done := fn.newBasicBlock("binop.done") - - // T(e) = T(e.X) = T(e.Y) after untyped constants have been - // eliminated. - // TODO(adonovan): not true; MyBool==MyBool yields UntypedBool. - t := fn.Pkg.typeOf(e) - - var short Value // value of the short-circuit path - switch e.Op { - case token.LAND: - b.cond(fn, e.X, rhs, done) - short = NewConst(exact.MakeBool(false), t) - - case token.LOR: - b.cond(fn, e.X, done, rhs) - short = NewConst(exact.MakeBool(true), t) - } - - // Is rhs unreachable? - if rhs.Preds == nil { - // Simplify false&&y to false, true||y to true. - fn.currentBlock = done - return short - } - - // Is done unreachable? - if done.Preds == nil { - // Simplify true&&y (or false||y) to y. - fn.currentBlock = rhs - return b.expr(fn, e.Y) - } - - // All edges from e.X to done carry the short-circuit value. - var edges []Value - for range done.Preds { - edges = append(edges, short) - } - - // The edge from e.Y to done carries the value of e.Y. - fn.currentBlock = rhs - edges = append(edges, b.expr(fn, e.Y)) - emitJump(fn, done) - fn.currentBlock = done - - phi := &Phi{Edges: edges, Comment: e.Op.String()} - phi.pos = e.OpPos - phi.typ = t - return done.emit(phi) -} - -// exprN lowers a multi-result expression e to SSA form, emitting code -// to fn and returning a single Value whose type is a *types.Tuple. -// The caller must access the components via Extract. -// -// Multi-result expressions include CallExprs in a multi-value -// assignment or return statement, and "value,ok" uses of -// TypeAssertExpr, IndexExpr (when X is a map), and UnaryExpr (when Op -// is token.ARROW). -// -func (b *builder) exprN(fn *Function, e ast.Expr) Value { - typ := fn.Pkg.typeOf(e).(*types.Tuple) - switch e := e.(type) { - case *ast.ParenExpr: - return b.exprN(fn, e.X) - - case *ast.CallExpr: - // Currently, no built-in function nor type conversion - // has multiple results, so we can avoid some of the - // cases for single-valued CallExpr. - var c Call - b.setCall(fn, e, &c.Call) - c.typ = typ - return fn.emit(&c) - - case *ast.IndexExpr: - mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map) - lookup := &Lookup{ - X: b.expr(fn, e.X), - Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()), - CommaOk: true, - } - lookup.setType(typ) - lookup.setPos(e.Lbrack) - return fn.emit(lookup) - - case *ast.TypeAssertExpr: - return emitTypeTest(fn, b.expr(fn, e.X), typ.At(0).Type(), e.Lparen) - - case *ast.UnaryExpr: // must be receive <- - unop := &UnOp{ - Op: token.ARROW, - X: b.expr(fn, e.X), - CommaOk: true, - } - unop.setType(typ) - unop.setPos(e.OpPos) - return fn.emit(unop) - } - panic(fmt.Sprintf("exprN(%T) in %s", e, fn)) -} - -// builtin emits to fn SSA instructions to implement a call to the -// built-in function obj with the specified arguments -// and return type. It returns the value defined by the result. -// -// The result is nil if no special handling was required; in this case -// the caller should treat this like an ordinary library function -// call. -// -func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ types.Type, pos token.Pos) Value { - switch obj.Name() { - case "make": - switch typ.Underlying().(type) { - case *types.Slice: - n := b.expr(fn, args[1]) - m := n - if len(args) == 3 { - m = b.expr(fn, args[2]) - } - if m, ok := m.(*Const); ok { - // treat make([]T, n, m) as new([m]T)[:n] - cap := m.Int64() - at := types.NewArray(typ.Underlying().(*types.Slice).Elem(), cap) - alloc := emitNew(fn, at, pos) - alloc.Comment = "makeslice" - v := &Slice{ - X: alloc, - High: n, - } - v.setPos(pos) - v.setType(typ) - return fn.emit(v) - } - v := &MakeSlice{ - Len: n, - Cap: m, - } - v.setPos(pos) - v.setType(typ) - return fn.emit(v) - - case *types.Map: - var res Value - if len(args) == 2 { - res = b.expr(fn, args[1]) - } - v := &MakeMap{Reserve: res} - v.setPos(pos) - v.setType(typ) - return fn.emit(v) - - case *types.Chan: - var sz Value = vZero - if len(args) == 2 { - sz = b.expr(fn, args[1]) - } - v := &MakeChan{Size: sz} - v.setPos(pos) - v.setType(typ) - return fn.emit(v) - } - - case "new": - alloc := emitNew(fn, deref(typ), pos) - alloc.Comment = "new" - return alloc - - case "len", "cap": - // Special case: len or cap of an array or *array is - // based on the type, not the value which may be nil. - // We must still evaluate the value, though. (If it - // was side-effect free, the whole call would have - // been constant-folded.) - t := deref(fn.Pkg.typeOf(args[0])).Underlying() - if at, ok := t.(*types.Array); ok { - b.expr(fn, args[0]) // for effects only - return intConst(at.Len()) - } - // Otherwise treat as normal. - - case "panic": - fn.emit(&Panic{ - X: emitConv(fn, b.expr(fn, args[0]), tEface), - pos: pos, - }) - fn.currentBlock = fn.newBasicBlock("unreachable") - return vTrue // any non-nil Value will do - } - return nil // treat all others as a regular function call -} - -// addr lowers a single-result addressable expression e to SSA form, -// emitting code to fn and returning the location (an lvalue) defined -// by the expression. -// -// If escaping is true, addr marks the base variable of the -// addressable expression e as being a potentially escaping pointer -// value. For example, in this code: -// -// a := A{ -// b: [1]B{B{c: 1}} -// } -// return &a.b[0].c -// -// the application of & causes a.b[0].c to have its address taken, -// which means that ultimately the local variable a must be -// heap-allocated. This is a simple but very conservative escape -// analysis. -// -// Operations forming potentially escaping pointers include: -// - &x, including when implicit in method call or composite literals. -// - a[:] iff a is an array (not *array) -// - references to variables in lexically enclosing functions. -// -func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { - switch e := e.(type) { - case *ast.Ident: - if isBlankIdent(e) { - return blank{} - } - obj := fn.Pkg.objectOf(e) - v := fn.Prog.packageLevelValue(obj) // var (address) - if v == nil { - v = fn.lookup(obj, escaping) - } - return &address{addr: v, pos: e.Pos(), expr: e} - - case *ast.CompositeLit: - t := deref(fn.Pkg.typeOf(e)) - var v *Alloc - if escaping { - v = emitNew(fn, t, e.Lbrace) - } else { - v = fn.addLocal(t, e.Lbrace) - } - v.Comment = "complit" - var sb storebuf - b.compLit(fn, v, e, true, &sb) - sb.emit(fn) - return &address{addr: v, pos: e.Lbrace, expr: e} - - case *ast.ParenExpr: - return b.addr(fn, e.X, escaping) - - case *ast.SelectorExpr: - sel, ok := fn.Pkg.info.Selections[e] - if !ok { - // qualified identifier - return b.addr(fn, e.Sel, escaping) - } - if sel.Kind() != types.FieldVal { - panic(sel) - } - wantAddr := true - v := b.receiver(fn, e.X, wantAddr, escaping, sel) - last := len(sel.Index()) - 1 - return &address{ - addr: emitFieldSelection(fn, v, sel.Index()[last], true, e.Sel), - pos: e.Sel.Pos(), - expr: e.Sel, - } - - case *ast.IndexExpr: - var x Value - var et types.Type - switch t := fn.Pkg.typeOf(e.X).Underlying().(type) { - case *types.Array: - x = b.addr(fn, e.X, escaping).address(fn) - et = types.NewPointer(t.Elem()) - case *types.Pointer: // *array - x = b.expr(fn, e.X) - et = types.NewPointer(t.Elem().Underlying().(*types.Array).Elem()) - case *types.Slice: - x = b.expr(fn, e.X) - et = types.NewPointer(t.Elem()) - case *types.Map: - return &element{ - m: b.expr(fn, e.X), - k: emitConv(fn, b.expr(fn, e.Index), t.Key()), - t: t.Elem(), - pos: e.Lbrack, - } - default: - panic("unexpected container type in IndexExpr: " + t.String()) - } - v := &IndexAddr{ - X: x, - Index: emitConv(fn, b.expr(fn, e.Index), tInt), - } - v.setPos(e.Lbrack) - v.setType(et) - return &address{addr: fn.emit(v), pos: e.Lbrack, expr: e} - - case *ast.StarExpr: - return &address{addr: b.expr(fn, e.X), pos: e.Star, expr: e} - } - - panic(fmt.Sprintf("unexpected address expression: %T", e)) -} - -type store struct { - lhs lvalue - rhs Value -} - -type storebuf struct{ stores []store } - -func (sb *storebuf) store(lhs lvalue, rhs Value) { - sb.stores = append(sb.stores, store{lhs, rhs}) -} - -func (sb *storebuf) emit(fn *Function) { - for _, s := range sb.stores { - s.lhs.store(fn, s.rhs) - } -} - -// assign emits to fn code to initialize the lvalue loc with the value -// of expression e. If isZero is true, assign assumes that loc holds -// the zero value for its type. -// -// This is equivalent to loc.store(fn, b.expr(fn, e)), but may generate -// better code in some cases, e.g., for composite literals in an -// addressable location. -// -// If sb is not nil, assign generates code to evaluate expression e, but -// not to update loc. Instead, the necessary stores are appended to the -// storebuf sb so that they can be executed later. This allows correct -// in-place update of existing variables when the RHS is a composite -// literal that may reference parts of the LHS. -// -func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *storebuf) { - // Can we initialize it in place? - if e, ok := unparen(e).(*ast.CompositeLit); ok { - // A CompositeLit never evaluates to a pointer, - // so if the type of the location is a pointer, - // an &-operation is implied. - if _, ok := loc.(blank); !ok { // avoid calling blank.typ() - if isPointer(loc.typ()) { - ptr := b.addr(fn, e, true).address(fn) - // copy address - if sb != nil { - sb.store(loc, ptr) - } else { - loc.store(fn, ptr) - } - return - } - } - - if _, ok := loc.(*address); ok { - if isInterface(loc.typ()) { - // e.g. var x interface{} = T{...} - // Can't in-place initialize an interface value. - // Fall back to copying. - } else { - // x = T{...} or x := T{...} - addr := loc.address(fn) - if sb != nil { - b.compLit(fn, addr, e, isZero, sb) - } else { - var sb storebuf - b.compLit(fn, addr, e, isZero, &sb) - sb.emit(fn) - } - - // Subtle: emit debug ref for aggregate types only; - // slice and map are handled by store ops in compLit. - switch loc.typ().Underlying().(type) { - case *types.Struct, *types.Array: - emitDebugRef(fn, e, addr, true) - } - - return - } - } - } - - // simple case: just copy - rhs := b.expr(fn, e) - if sb != nil { - sb.store(loc, rhs) - } else { - loc.store(fn, rhs) - } -} - -// expr lowers a single-result expression e to SSA form, emitting code -// to fn and returning the Value defined by the expression. -// -func (b *builder) expr(fn *Function, e ast.Expr) Value { - e = unparen(e) - - tv := fn.Pkg.info.Types[e] - - // Is expression a constant? - if tv.Value != nil { - return NewConst(tv.Value, tv.Type) - } - - var v Value - if tv.Addressable() { - // Prefer pointer arithmetic ({Index,Field}Addr) followed - // by Load over subelement extraction (e.g. Index, Field), - // to avoid large copies. - v = b.addr(fn, e, false).load(fn) - } else { - v = b.expr0(fn, e, tv) - } - if fn.debugInfo() { - emitDebugRef(fn, e, v, false) - } - return v -} - -func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { - switch e := e.(type) { - case *ast.BasicLit: - panic("non-constant BasicLit") // unreachable - - case *ast.FuncLit: - fn2 := &Function{ - name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)), - Signature: fn.Pkg.typeOf(e.Type).Underlying().(*types.Signature), - pos: e.Type.Func, - parent: fn, - Pkg: fn.Pkg, - Prog: fn.Prog, - syntax: e, - } - fn.AnonFuncs = append(fn.AnonFuncs, fn2) - b.buildFunction(fn2) - if fn2.FreeVars == nil { - return fn2 - } - v := &MakeClosure{Fn: fn2} - v.setType(tv.Type) - for _, fv := range fn2.FreeVars { - v.Bindings = append(v.Bindings, fv.outer) - fv.outer = nil - } - return fn.emit(v) - - case *ast.TypeAssertExpr: // single-result form only - return emitTypeAssert(fn, b.expr(fn, e.X), tv.Type, e.Lparen) - - case *ast.CallExpr: - if fn.Pkg.info.Types[e.Fun].IsType() { - // Explicit type conversion, e.g. string(x) or big.Int(x) - x := b.expr(fn, e.Args[0]) - y := emitConv(fn, x, tv.Type) - if y != x { - switch y := y.(type) { - case *Convert: - y.pos = e.Lparen - case *ChangeType: - y.pos = e.Lparen - case *MakeInterface: - y.pos = e.Lparen - } - } - return y - } - // Call to "intrinsic" built-ins, e.g. new, make, panic. - if id, ok := unparen(e.Fun).(*ast.Ident); ok { - if obj, ok := fn.Pkg.info.Uses[id].(*types.Builtin); ok { - if v := b.builtin(fn, obj, e.Args, tv.Type, e.Lparen); v != nil { - return v - } - } - } - // Regular function call. - var v Call - b.setCall(fn, e, &v.Call) - v.setType(tv.Type) - return fn.emit(&v) - - case *ast.UnaryExpr: - switch e.Op { - case token.AND: // &X --- potentially escaping. - addr := b.addr(fn, e.X, true) - if _, ok := unparen(e.X).(*ast.StarExpr); ok { - // &*p must panic if p is nil (http://golang.org/s/go12nil). - // For simplicity, we'll just (suboptimally) rely - // on the side effects of a load. - // TODO(adonovan): emit dedicated nilcheck. - addr.load(fn) - } - return addr.address(fn) - case token.ADD: - return b.expr(fn, e.X) - case token.NOT, token.ARROW, token.SUB, token.XOR: // ! <- - ^ - v := &UnOp{ - Op: e.Op, - X: b.expr(fn, e.X), - } - v.setPos(e.OpPos) - v.setType(tv.Type) - return fn.emit(v) - default: - panic(e.Op) - } - - case *ast.BinaryExpr: - switch e.Op { - case token.LAND, token.LOR: - return b.logicalBinop(fn, e) - case token.SHL, token.SHR: - fallthrough - case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: - return emitArith(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), tv.Type, e.OpPos) - - case token.EQL, token.NEQ, token.GTR, token.LSS, token.LEQ, token.GEQ: - cmp := emitCompare(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), e.OpPos) - // The type of x==y may be UntypedBool. - return emitConv(fn, cmp, DefaultType(tv.Type)) - default: - panic("illegal op in BinaryExpr: " + e.Op.String()) - } - - case *ast.SliceExpr: - var low, high, max Value - var x Value - switch fn.Pkg.typeOf(e.X).Underlying().(type) { - case *types.Array: - // Potentially escaping. - x = b.addr(fn, e.X, true).address(fn) - case *types.Basic, *types.Slice, *types.Pointer: // *array - x = b.expr(fn, e.X) - default: - panic("unreachable") - } - if e.High != nil { - high = b.expr(fn, e.High) - } - if e.Low != nil { - low = b.expr(fn, e.Low) - } - if e.Slice3 { - max = b.expr(fn, e.Max) - } - v := &Slice{ - X: x, - Low: low, - High: high, - Max: max, - } - v.setPos(e.Lbrack) - v.setType(tv.Type) - return fn.emit(v) - - case *ast.Ident: - obj := fn.Pkg.info.Uses[e] - // Universal built-in or nil? - switch obj := obj.(type) { - case *types.Builtin: - return &Builtin{name: obj.Name(), sig: tv.Type.(*types.Signature)} - case *types.Nil: - return nilConst(tv.Type) - } - // Package-level func or var? - if v := fn.Prog.packageLevelValue(obj); v != nil { - if _, ok := obj.(*types.Var); ok { - return emitLoad(fn, v) // var (address) - } - return v // (func) - } - // Local var. - return emitLoad(fn, fn.lookup(obj, false)) // var (address) - - case *ast.SelectorExpr: - sel, ok := fn.Pkg.info.Selections[e] - if !ok { - // qualified identifier - return b.expr(fn, e.Sel) - } - switch sel.Kind() { - case types.MethodExpr: - // (*T).f or T.f, the method f from the method-set of type T. - // The result is a "thunk". - return emitConv(fn, makeThunk(fn.Prog, sel), tv.Type) - - case types.MethodVal: - // e.f where e is an expression and f is a method. - // The result is a "bound". - obj := sel.Obj().(*types.Func) - rt := recvType(obj) - wantAddr := isPointer(rt) - escaping := true - v := b.receiver(fn, e.X, wantAddr, escaping, sel) - if isInterface(rt) { - // If v has interface type I, - // we must emit a check that v is non-nil. - // We use: typeassert v.(I). - emitTypeAssert(fn, v, rt, token.NoPos) - } - c := &MakeClosure{ - Fn: makeBound(fn.Prog, obj), - Bindings: []Value{v}, - } - c.setPos(e.Sel.Pos()) - c.setType(tv.Type) - return fn.emit(c) - - case types.FieldVal: - indices := sel.Index() - last := len(indices) - 1 - v := b.expr(fn, e.X) - v = emitImplicitSelections(fn, v, indices[:last]) - v = emitFieldSelection(fn, v, indices[last], false, e.Sel) - return v - } - - panic("unexpected expression-relative selector") - - case *ast.IndexExpr: - switch t := fn.Pkg.typeOf(e.X).Underlying().(type) { - case *types.Array: - // Non-addressable array (in a register). - v := &Index{ - X: b.expr(fn, e.X), - Index: emitConv(fn, b.expr(fn, e.Index), tInt), - } - v.setPos(e.Lbrack) - v.setType(t.Elem()) - return fn.emit(v) - - case *types.Map: - // Maps are not addressable. - mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map) - v := &Lookup{ - X: b.expr(fn, e.X), - Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()), - } - v.setPos(e.Lbrack) - v.setType(mapt.Elem()) - return fn.emit(v) - - case *types.Basic: // => string - // Strings are not addressable. - v := &Lookup{ - X: b.expr(fn, e.X), - Index: b.expr(fn, e.Index), - } - v.setPos(e.Lbrack) - v.setType(tByte) - return fn.emit(v) - - case *types.Slice, *types.Pointer: // *array - // Addressable slice/array; use IndexAddr and Load. - return b.addr(fn, e, false).load(fn) - - default: - panic("unexpected container type in IndexExpr: " + t.String()) - } - - case *ast.CompositeLit, *ast.StarExpr: - // Addressable types (lvalues) - return b.addr(fn, e, false).load(fn) - } - - panic(fmt.Sprintf("unexpected expr: %T", e)) -} - -// stmtList emits to fn code for all statements in list. -func (b *builder) stmtList(fn *Function, list []ast.Stmt) { - for _, s := range list { - b.stmt(fn, s) - } -} - -// receiver emits to fn code for expression e in the "receiver" -// position of selection e.f (where f may be a field or a method) and -// returns the effective receiver after applying the implicit field -// selections of sel. -// -// wantAddr requests that the result is an an address. If -// !sel.Indirect(), this may require that e be built in addr() mode; it -// must thus be addressable. -// -// escaping is defined as per builder.addr(). -// -func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *types.Selection) Value { - var v Value - if wantAddr && !sel.Indirect() && !isPointer(fn.Pkg.typeOf(e)) { - v = b.addr(fn, e, escaping).address(fn) - } else { - v = b.expr(fn, e) - } - - last := len(sel.Index()) - 1 - v = emitImplicitSelections(fn, v, sel.Index()[:last]) - if !wantAddr && isPointer(v.Type()) { - v = emitLoad(fn, v) - } - return v -} - -// setCallFunc populates the function parts of a CallCommon structure -// (Func, Method, Recv, Args[0]) based on the kind of invocation -// occurring in e. -// -func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { - c.pos = e.Lparen - - // Is this a method call? - if selector, ok := unparen(e.Fun).(*ast.SelectorExpr); ok { - sel, ok := fn.Pkg.info.Selections[selector] - if ok && sel.Kind() == types.MethodVal { - obj := sel.Obj().(*types.Func) - recv := recvType(obj) - wantAddr := isPointer(recv) - escaping := true - v := b.receiver(fn, selector.X, wantAddr, escaping, sel) - if isInterface(recv) { - // Invoke-mode call. - c.Value = v - c.Method = obj - } else { - // "Call"-mode call. - c.Value = fn.Prog.declaredFunc(obj) - c.Args = append(c.Args, v) - } - return - } - - // sel.Kind()==MethodExpr indicates T.f() or (*T).f(): - // a statically dispatched call to the method f in the - // method-set of T or *T. T may be an interface. - // - // e.Fun would evaluate to a concrete method, interface - // wrapper function, or promotion wrapper. - // - // For now, we evaluate it in the usual way. - // - // TODO(adonovan): opt: inline expr() here, to make the - // call static and to avoid generation of wrappers. - // It's somewhat tricky as it may consume the first - // actual parameter if the call is "invoke" mode. - // - // Examples: - // type T struct{}; func (T) f() {} // "call" mode - // type T interface { f() } // "invoke" mode - // - // type S struct{ T } - // - // var s S - // S.f(s) - // (*S).f(&s) - // - // Suggested approach: - // - consume the first actual parameter expression - // and build it with b.expr(). - // - apply implicit field selections. - // - use MethodVal logic to populate fields of c. - } - - // Evaluate the function operand in the usual way. - c.Value = b.expr(fn, e.Fun) -} - -// emitCallArgs emits to f code for the actual parameters of call e to -// a (possibly built-in) function of effective type sig. -// The argument values are appended to args, which is then returned. -// -func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallExpr, args []Value) []Value { - // f(x, y, z...): pass slice z straight through. - if e.Ellipsis != 0 { - for i, arg := range e.Args { - v := emitConv(fn, b.expr(fn, arg), sig.Params().At(i).Type()) - args = append(args, v) - } - return args - } - - offset := len(args) // 1 if call has receiver, 0 otherwise - - // Evaluate actual parameter expressions. - // - // If this is a chained call of the form f(g()) where g has - // multiple return values (MRV), they are flattened out into - // args; a suffix of them may end up in a varargs slice. - for _, arg := range e.Args { - v := b.expr(fn, arg) - if ttuple, ok := v.Type().(*types.Tuple); ok { // MRV chain - for i, n := 0, ttuple.Len(); i < n; i++ { - args = append(args, emitExtract(fn, v, i)) - } - } else { - args = append(args, v) - } - } - - // Actual->formal assignability conversions for normal parameters. - np := sig.Params().Len() // number of normal parameters - if sig.Variadic() { - np-- - } - for i := 0; i < np; i++ { - args[offset+i] = emitConv(fn, args[offset+i], sig.Params().At(i).Type()) - } - - // Actual->formal assignability conversions for variadic parameter, - // and construction of slice. - if sig.Variadic() { - varargs := args[offset+np:] - st := sig.Params().At(np).Type().(*types.Slice) - vt := st.Elem() - if len(varargs) == 0 { - args = append(args, nilConst(st)) - } else { - // Replace a suffix of args with a slice containing it. - at := types.NewArray(vt, int64(len(varargs))) - a := emitNew(fn, at, token.NoPos) - a.setPos(e.Rparen) - a.Comment = "varargs" - for i, arg := range varargs { - iaddr := &IndexAddr{ - X: a, - Index: intConst(int64(i)), - } - iaddr.setType(types.NewPointer(vt)) - fn.emit(iaddr) - emitStore(fn, iaddr, arg, arg.Pos()) - } - s := &Slice{X: a} - s.setType(st) - args[offset+np] = fn.emit(s) - args = args[:offset+np+1] - } - } - return args -} - -// setCall emits to fn code to evaluate all the parameters of a function -// call e, and populates *c with those values. -// -func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) { - // First deal with the f(...) part and optional receiver. - b.setCallFunc(fn, e, c) - - // Then append the other actual parameters. - sig, _ := fn.Pkg.typeOf(e.Fun).Underlying().(*types.Signature) - if sig == nil { - panic(fmt.Sprintf("no signature for call of %s", e.Fun)) - } - c.Args = b.emitCallArgs(fn, sig, e, c.Args) -} - -// assignOp emits to fn code to perform loc += incr or loc -= incr. -func (b *builder) assignOp(fn *Function, loc lvalue, incr Value, op token.Token, pos token.Pos) { - oldv := loc.load(fn) - loc.store(fn, emitArith(fn, op, oldv, emitConv(fn, incr, oldv.Type()), loc.typ(), pos)) -} - -// localValueSpec emits to fn code to define all of the vars in the -// function-local ValueSpec, spec. -// -func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { - switch { - case len(spec.Values) == len(spec.Names): - // e.g. var x, y = 0, 1 - // 1:1 assignment - for i, id := range spec.Names { - if !isBlankIdent(id) { - fn.addLocalForIdent(id) - } - lval := b.addr(fn, id, false) // non-escaping - b.assign(fn, lval, spec.Values[i], true, nil) - } - - case len(spec.Values) == 0: - // e.g. var x, y int - // Locals are implicitly zero-initialized. - for _, id := range spec.Names { - if !isBlankIdent(id) { - lhs := fn.addLocalForIdent(id) - if fn.debugInfo() { - emitDebugRef(fn, id, lhs, true) - } - } - } - - default: - // e.g. var x, y = pos() - tuple := b.exprN(fn, spec.Values[0]) - for i, id := range spec.Names { - if !isBlankIdent(id) { - fn.addLocalForIdent(id) - lhs := b.addr(fn, id, false) // non-escaping - lhs.store(fn, emitExtract(fn, tuple, i)) - } - } - } -} - -// assignStmt emits code to fn for a parallel assignment of rhss to lhss. -// isDef is true if this is a short variable declaration (:=). -// -// Note the similarity with localValueSpec. -// -func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool) { - // Side effects of all LHSs and RHSs must occur in left-to-right order. - lvals := make([]lvalue, len(lhss)) - isZero := make([]bool, len(lhss)) - for i, lhs := range lhss { - var lval lvalue = blank{} - if !isBlankIdent(lhs) { - if isDef { - if obj := fn.Pkg.info.Defs[lhs.(*ast.Ident)]; obj != nil { - fn.addNamedLocal(obj) - isZero[i] = true - } - } - lval = b.addr(fn, lhs, false) // non-escaping - } - lvals[i] = lval - } - if len(lhss) == len(rhss) { - // Simple assignment: x = f() (!isDef) - // Parallel assignment: x, y = f(), g() (!isDef) - // or short var decl: x, y := f(), g() (isDef) - // - // In all cases, the RHSs may refer to the LHSs, - // so we need a storebuf. - var sb storebuf - for i := range rhss { - b.assign(fn, lvals[i], rhss[i], isZero[i], &sb) - } - sb.emit(fn) - } else { - // e.g. x, y = pos() - tuple := b.exprN(fn, rhss[0]) - emitDebugRef(fn, rhss[0], tuple, false) - for i, lval := range lvals { - lval.store(fn, emitExtract(fn, tuple, i)) - } - } -} - -// arrayLen returns the length of the array whose composite literal elements are elts. -func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 { - var max int64 = -1 - var i int64 = -1 - for _, e := range elts { - if kv, ok := e.(*ast.KeyValueExpr); ok { - i = b.expr(fn, kv.Key).(*Const).Int64() - } else { - i++ - } - if i > max { - max = i - } - } - return max + 1 -} - -// compLit emits to fn code to initialize a composite literal e at -// address addr with type typ. -// -// Nested composite literals are recursively initialized in place -// where possible. If isZero is true, compLit assumes that addr -// holds the zero value for typ. -// -// Because the elements of a composite literal may refer to the -// variables being updated, as in the second line below, -// x := T{a: 1} -// x = T{a: x.a} -// all the reads must occur before all the writes. Thus all stores to -// loc are emitted to the storebuf sb for later execution. -// -// A CompositeLit may have pointer type only in the recursive (nested) -// case when the type name is implicit. e.g. in []*T{{}}, the inner -// literal has type *T behaves like &T{}. -// In that case, addr must hold a T, not a *T. -// -func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) { - typ := deref(fn.Pkg.typeOf(e)) - switch t := typ.Underlying().(type) { - case *types.Struct: - if !isZero && len(e.Elts) != t.NumFields() { - // memclear - sb.store(&address{addr, e.Lbrace, nil}, - zeroValue(fn, deref(addr.Type()))) - isZero = true - } - for i, e := range e.Elts { - fieldIndex := i - pos := e.Pos() - if kv, ok := e.(*ast.KeyValueExpr); ok { - fname := kv.Key.(*ast.Ident).Name - for i, n := 0, t.NumFields(); i < n; i++ { - sf := t.Field(i) - if sf.Name() == fname { - fieldIndex = i - pos = kv.Colon - e = kv.Value - break - } - } - } - sf := t.Field(fieldIndex) - faddr := &FieldAddr{ - X: addr, - Field: fieldIndex, - } - faddr.setType(types.NewPointer(sf.Type())) - fn.emit(faddr) - b.assign(fn, &address{addr: faddr, pos: pos, expr: e}, e, isZero, sb) - } - - case *types.Array, *types.Slice: - var at *types.Array - var array Value - switch t := t.(type) { - case *types.Slice: - at = types.NewArray(t.Elem(), b.arrayLen(fn, e.Elts)) - alloc := emitNew(fn, at, e.Lbrace) - alloc.Comment = "slicelit" - array = alloc - case *types.Array: - at = t - array = addr - - if !isZero && int64(len(e.Elts)) != at.Len() { - // memclear - sb.store(&address{array, e.Lbrace, nil}, - zeroValue(fn, deref(array.Type()))) - } - } - - var idx *Const - for _, e := range e.Elts { - pos := e.Pos() - if kv, ok := e.(*ast.KeyValueExpr); ok { - idx = b.expr(fn, kv.Key).(*Const) - pos = kv.Colon - e = kv.Value - } else { - var idxval int64 - if idx != nil { - idxval = idx.Int64() + 1 - } - idx = intConst(idxval) - } - iaddr := &IndexAddr{ - X: array, - Index: idx, - } - iaddr.setType(types.NewPointer(at.Elem())) - fn.emit(iaddr) - if t != at { // slice - // backing array is unaliased => storebuf not needed. - b.assign(fn, &address{addr: iaddr, pos: pos, expr: e}, e, true, nil) - } else { - b.assign(fn, &address{addr: iaddr, pos: pos, expr: e}, e, true, sb) - } - } - - if t != at { // slice - s := &Slice{X: array} - s.setPos(e.Lbrace) - s.setType(typ) - sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, fn.emit(s)) - } - - case *types.Map: - m := &MakeMap{Reserve: intConst(int64(len(e.Elts)))} - m.setPos(e.Lbrace) - m.setType(typ) - fn.emit(m) - for _, e := range e.Elts { - e := e.(*ast.KeyValueExpr) - - // If a key expression in a map literal is itself a - // composite literal, the type may be omitted. - // For example: - // map[*struct{}]bool{{}: true} - // An &-operation may be implied: - // map[*struct{}]bool{&struct{}{}: true} - var key Value - if _, ok := unparen(e.Key).(*ast.CompositeLit); ok && isPointer(t.Key()) { - // A CompositeLit never evaluates to a pointer, - // so if the type of the location is a pointer, - // an &-operation is implied. - key = b.addr(fn, e.Key, true).address(fn) - } else { - key = b.expr(fn, e.Key) - } - - loc := element{ - m: m, - k: emitConv(fn, key, t.Key()), - t: t.Elem(), - pos: e.Colon, - } - - // We call assign() only because it takes care - // of any &-operation required in the recursive - // case, e.g., - // map[int]*struct{}{0: {}} implies &struct{}{}. - // In-place update is of course impossible, - // and no storebuf is needed. - b.assign(fn, &loc, e.Value, true, nil) - } - sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, m) - - default: - panic("unexpected CompositeLit type: " + t.String()) - } -} - -// switchStmt emits to fn code for the switch statement s, optionally -// labelled by label. -// -func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) { - // We treat SwitchStmt like a sequential if-else chain. - // Multiway dispatch can be recovered later by ssautil.Switches() - // to those cases that are free of side effects. - if s.Init != nil { - b.stmt(fn, s.Init) - } - var tag Value = vTrue - if s.Tag != nil { - tag = b.expr(fn, s.Tag) - } - done := fn.newBasicBlock("switch.done") - if label != nil { - label._break = done - } - // We pull the default case (if present) down to the end. - // But each fallthrough label must point to the next - // body block in source order, so we preallocate a - // body block (fallthru) for the next case. - // Unfortunately this makes for a confusing block order. - var dfltBody *[]ast.Stmt - var dfltFallthrough *BasicBlock - var fallthru, dfltBlock *BasicBlock - ncases := len(s.Body.List) - for i, clause := range s.Body.List { - body := fallthru - if body == nil { - body = fn.newBasicBlock("switch.body") // first case only - } - - // Preallocate body block for the next case. - fallthru = done - if i+1 < ncases { - fallthru = fn.newBasicBlock("switch.body") - } - - cc := clause.(*ast.CaseClause) - if cc.List == nil { - // Default case. - dfltBody = &cc.Body - dfltFallthrough = fallthru - dfltBlock = body - continue - } - - var nextCond *BasicBlock - for _, cond := range cc.List { - nextCond = fn.newBasicBlock("switch.next") - // TODO(adonovan): opt: when tag==vTrue, we'd - // get better code if we use b.cond(cond) - // instead of BinOp(EQL, tag, b.expr(cond)) - // followed by If. Don't forget conversions - // though. - cond := emitCompare(fn, token.EQL, tag, b.expr(fn, cond), token.NoPos) - emitIf(fn, cond, body, nextCond) - fn.currentBlock = nextCond - } - fn.currentBlock = body - fn.targets = &targets{ - tail: fn.targets, - _break: done, - _fallthrough: fallthru, - } - b.stmtList(fn, cc.Body) - fn.targets = fn.targets.tail - emitJump(fn, done) - fn.currentBlock = nextCond - } - if dfltBlock != nil { - emitJump(fn, dfltBlock) - fn.currentBlock = dfltBlock - fn.targets = &targets{ - tail: fn.targets, - _break: done, - _fallthrough: dfltFallthrough, - } - b.stmtList(fn, *dfltBody) - fn.targets = fn.targets.tail - } - emitJump(fn, done) - fn.currentBlock = done -} - -// typeSwitchStmt emits to fn code for the type switch statement s, optionally -// labelled by label. -// -func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lblock) { - // We treat TypeSwitchStmt like a sequential if-else chain. - // Multiway dispatch can be recovered later by ssautil.Switches(). - - // Typeswitch lowering: - // - // var x X - // switch y := x.(type) { - // case T1, T2: S1 // >1 (y := x) - // case nil: SN // nil (y := x) - // default: SD // 0 types (y := x) - // case T3: S3 // 1 type (y := x.(T3)) - // } - // - // ...s.Init... - // x := eval x - // .caseT1: - // t1, ok1 := typeswitch,ok x - // if ok1 then goto S1 else goto .caseT2 - // .caseT2: - // t2, ok2 := typeswitch,ok x - // if ok2 then goto S1 else goto .caseNil - // .S1: - // y := x - // ...S1... - // goto done - // .caseNil: - // if t2, ok2 := typeswitch,ok x - // if x == nil then goto SN else goto .caseT3 - // .SN: - // y := x - // ...SN... - // goto done - // .caseT3: - // t3, ok3 := typeswitch,ok x - // if ok3 then goto S3 else goto default - // .S3: - // y := t3 - // ...S3... - // goto done - // .default: - // y := x - // ...SD... - // goto done - // .done: - - if s.Init != nil { - b.stmt(fn, s.Init) - } - - var x Value - switch ass := s.Assign.(type) { - case *ast.ExprStmt: // x.(type) - x = b.expr(fn, unparen(ass.X).(*ast.TypeAssertExpr).X) - case *ast.AssignStmt: // y := x.(type) - x = b.expr(fn, unparen(ass.Rhs[0]).(*ast.TypeAssertExpr).X) - } - - done := fn.newBasicBlock("typeswitch.done") - if label != nil { - label._break = done - } - var default_ *ast.CaseClause - for _, clause := range s.Body.List { - cc := clause.(*ast.CaseClause) - if cc.List == nil { - default_ = cc - continue - } - body := fn.newBasicBlock("typeswitch.body") - var next *BasicBlock - var casetype types.Type - var ti Value // ti, ok := typeassert,ok x - for _, cond := range cc.List { - next = fn.newBasicBlock("typeswitch.next") - casetype = fn.Pkg.typeOf(cond) - var condv Value - if casetype == tUntypedNil { - condv = emitCompare(fn, token.EQL, x, nilConst(x.Type()), token.NoPos) - ti = x - } else { - yok := emitTypeTest(fn, x, casetype, cc.Case) - ti = emitExtract(fn, yok, 0) - condv = emitExtract(fn, yok, 1) - } - emitIf(fn, condv, body, next) - fn.currentBlock = next - } - if len(cc.List) != 1 { - ti = x - } - fn.currentBlock = body - b.typeCaseBody(fn, cc, ti, done) - fn.currentBlock = next - } - if default_ != nil { - b.typeCaseBody(fn, default_, x, done) - } else { - emitJump(fn, done) - } - fn.currentBlock = done -} - -func (b *builder) typeCaseBody(fn *Function, cc *ast.CaseClause, x Value, done *BasicBlock) { - if obj := fn.Pkg.info.Implicits[cc]; obj != nil { - // In a switch y := x.(type), each case clause - // implicitly declares a distinct object y. - // In a single-type case, y has that type. - // In multi-type cases, 'case nil' and default, - // y has the same type as the interface operand. - emitStore(fn, fn.addNamedLocal(obj), x, obj.Pos()) - } - fn.targets = &targets{ - tail: fn.targets, - _break: done, - } - b.stmtList(fn, cc.Body) - fn.targets = fn.targets.tail - emitJump(fn, done) -} - -// selectStmt emits to fn code for the select statement s, optionally -// labelled by label. -// -func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { - // A blocking select of a single case degenerates to a - // simple send or receive. - // TODO(adonovan): opt: is this optimization worth its weight? - if len(s.Body.List) == 1 { - clause := s.Body.List[0].(*ast.CommClause) - if clause.Comm != nil { - b.stmt(fn, clause.Comm) - done := fn.newBasicBlock("select.done") - if label != nil { - label._break = done - } - fn.targets = &targets{ - tail: fn.targets, - _break: done, - } - b.stmtList(fn, clause.Body) - fn.targets = fn.targets.tail - emitJump(fn, done) - fn.currentBlock = done - return - } - } - - // First evaluate all channels in all cases, and find - // the directions of each state. - var states []*SelectState - blocking := true - debugInfo := fn.debugInfo() - for _, clause := range s.Body.List { - var st *SelectState - switch comm := clause.(*ast.CommClause).Comm.(type) { - case nil: // default case - blocking = false - continue - - case *ast.SendStmt: // ch<- i - ch := b.expr(fn, comm.Chan) - st = &SelectState{ - Dir: types.SendOnly, - Chan: ch, - Send: emitConv(fn, b.expr(fn, comm.Value), - ch.Type().Underlying().(*types.Chan).Elem()), - Pos: comm.Arrow, - } - if debugInfo { - st.DebugNode = comm - } - - case *ast.AssignStmt: // x := <-ch - recv := unparen(comm.Rhs[0]).(*ast.UnaryExpr) - st = &SelectState{ - Dir: types.RecvOnly, - Chan: b.expr(fn, recv.X), - Pos: recv.OpPos, - } - if debugInfo { - st.DebugNode = recv - } - - case *ast.ExprStmt: // <-ch - recv := unparen(comm.X).(*ast.UnaryExpr) - st = &SelectState{ - Dir: types.RecvOnly, - Chan: b.expr(fn, recv.X), - Pos: recv.OpPos, - } - if debugInfo { - st.DebugNode = recv - } - } - states = append(states, st) - } - - // We dispatch on the (fair) result of Select using a - // sequential if-else chain, in effect: - // - // idx, recvOk, r0...r_n-1 := select(...) - // if idx == 0 { // receive on channel 0 (first receive => r0) - // x, ok := r0, recvOk - // ...state0... - // } else if v == 1 { // send on channel 1 - // ...state1... - // } else { - // ...default... - // } - sel := &Select{ - States: states, - Blocking: blocking, - } - sel.setPos(s.Select) - var vars []*types.Var - vars = append(vars, varIndex, varOk) - for _, st := range states { - if st.Dir == types.RecvOnly { - tElem := st.Chan.Type().Underlying().(*types.Chan).Elem() - vars = append(vars, anonVar(tElem)) - } - } - sel.setType(types.NewTuple(vars...)) - - fn.emit(sel) - idx := emitExtract(fn, sel, 0) - - done := fn.newBasicBlock("select.done") - if label != nil { - label._break = done - } - - var defaultBody *[]ast.Stmt - state := 0 - r := 2 // index in 'sel' tuple of value; increments if st.Dir==RECV - for _, cc := range s.Body.List { - clause := cc.(*ast.CommClause) - if clause.Comm == nil { - defaultBody = &clause.Body - continue - } - body := fn.newBasicBlock("select.body") - next := fn.newBasicBlock("select.next") - emitIf(fn, emitCompare(fn, token.EQL, idx, intConst(int64(state)), token.NoPos), body, next) - fn.currentBlock = body - fn.targets = &targets{ - tail: fn.targets, - _break: done, - } - switch comm := clause.Comm.(type) { - case *ast.ExprStmt: // <-ch - if debugInfo { - v := emitExtract(fn, sel, r) - emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) - } - r++ - - case *ast.AssignStmt: // x := <-states[state].Chan - if comm.Tok == token.DEFINE { - fn.addLocalForIdent(comm.Lhs[0].(*ast.Ident)) - } - x := b.addr(fn, comm.Lhs[0], false) // non-escaping - v := emitExtract(fn, sel, r) - if debugInfo { - emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) - } - x.store(fn, v) - - if len(comm.Lhs) == 2 { // x, ok := ... - if comm.Tok == token.DEFINE { - fn.addLocalForIdent(comm.Lhs[1].(*ast.Ident)) - } - ok := b.addr(fn, comm.Lhs[1], false) // non-escaping - ok.store(fn, emitExtract(fn, sel, 1)) - } - r++ - } - b.stmtList(fn, clause.Body) - fn.targets = fn.targets.tail - emitJump(fn, done) - fn.currentBlock = next - state++ - } - if defaultBody != nil { - fn.targets = &targets{ - tail: fn.targets, - _break: done, - } - b.stmtList(fn, *defaultBody) - fn.targets = fn.targets.tail - } else { - // A blocking select must match some case. - // (This should really be a runtime.errorString, not a string.) - fn.emit(&Panic{ - X: emitConv(fn, stringConst("blocking select matched no case"), tEface), - }) - fn.currentBlock = fn.newBasicBlock("unreachable") - } - emitJump(fn, done) - fn.currentBlock = done -} - -// forStmt emits to fn code for the for statement s, optionally -// labelled by label. -// -func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { - // ...init... - // jump loop - // loop: - // if cond goto body else done - // body: - // ...body... - // jump post - // post: (target of continue) - // ...post... - // jump loop - // done: (target of break) - if s.Init != nil { - b.stmt(fn, s.Init) - } - body := fn.newBasicBlock("for.body") - done := fn.newBasicBlock("for.done") // target of 'break' - loop := body // target of back-edge - if s.Cond != nil { - loop = fn.newBasicBlock("for.loop") - } - cont := loop // target of 'continue' - if s.Post != nil { - cont = fn.newBasicBlock("for.post") - } - if label != nil { - label._break = done - label._continue = cont - } - emitJump(fn, loop) - fn.currentBlock = loop - if loop != body { - b.cond(fn, s.Cond, body, done) - fn.currentBlock = body - } - fn.targets = &targets{ - tail: fn.targets, - _break: done, - _continue: cont, - } - b.stmt(fn, s.Body) - fn.targets = fn.targets.tail - emitJump(fn, cont) - - if s.Post != nil { - fn.currentBlock = cont - b.stmt(fn, s.Post) - emitJump(fn, loop) // back-edge - } - fn.currentBlock = done -} - -// rangeIndexed emits to fn the header for an integer-indexed loop -// over array, *array or slice value x. -// The v result is defined only if tv is non-nil. -// forPos is the position of the "for" token. -// -func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { - // - // length = len(x) - // index = -1 - // loop: (target of continue) - // index++ - // if index < length goto body else done - // body: - // k = index - // v = x[index] - // ...body... - // jump loop - // done: (target of break) - - // Determine number of iterations. - var length Value - if arr, ok := deref(x.Type()).Underlying().(*types.Array); ok { - // For array or *array, the number of iterations is - // known statically thanks to the type. We avoid a - // data dependence upon x, permitting later dead-code - // elimination if x is pure, static unrolling, etc. - // Ranging over a nil *array may have >0 iterations. - // We still generate code for x, in case it has effects. - length = intConst(arr.Len()) - } else { - // length = len(x). - var c Call - c.Call.Value = makeLen(x.Type()) - c.Call.Args = []Value{x} - c.setType(tInt) - length = fn.emit(&c) - } - - index := fn.addLocal(tInt, token.NoPos) - emitStore(fn, index, intConst(-1), pos) - - loop = fn.newBasicBlock("rangeindex.loop") - emitJump(fn, loop) - fn.currentBlock = loop - - incr := &BinOp{ - Op: token.ADD, - X: emitLoad(fn, index), - Y: vOne, - } - incr.setType(tInt) - emitStore(fn, index, fn.emit(incr), pos) - - body := fn.newBasicBlock("rangeindex.body") - done = fn.newBasicBlock("rangeindex.done") - emitIf(fn, emitCompare(fn, token.LSS, incr, length, token.NoPos), body, done) - fn.currentBlock = body - - k = emitLoad(fn, index) - if tv != nil { - switch t := x.Type().Underlying().(type) { - case *types.Array: - instr := &Index{ - X: x, - Index: k, - } - instr.setType(t.Elem()) - v = fn.emit(instr) - - case *types.Pointer: // *array - instr := &IndexAddr{ - X: x, - Index: k, - } - instr.setType(types.NewPointer(t.Elem().Underlying().(*types.Array).Elem())) - v = emitLoad(fn, fn.emit(instr)) - - case *types.Slice: - instr := &IndexAddr{ - X: x, - Index: k, - } - instr.setType(types.NewPointer(t.Elem())) - v = emitLoad(fn, fn.emit(instr)) - - default: - panic("rangeIndexed x:" + t.String()) - } - } - return -} - -// rangeIter emits to fn the header for a loop using -// Range/Next/Extract to iterate over map or string value x. -// tk and tv are the types of the key/value results k and v, or nil -// if the respective component is not wanted. -// -func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { - // - // it = range x - // loop: (target of continue) - // okv = next it (ok, key, value) - // ok = extract okv #0 - // if ok goto body else done - // body: - // k = extract okv #1 - // v = extract okv #2 - // ...body... - // jump loop - // done: (target of break) - // - - if tk == nil { - tk = tInvalid - } - if tv == nil { - tv = tInvalid - } - - rng := &Range{X: x} - rng.setPos(pos) - rng.setType(tRangeIter) - it := fn.emit(rng) - - loop = fn.newBasicBlock("rangeiter.loop") - emitJump(fn, loop) - fn.currentBlock = loop - - _, isString := x.Type().Underlying().(*types.Basic) - - okv := &Next{ - Iter: it, - IsString: isString, - } - okv.setType(types.NewTuple( - varOk, - newVar("k", tk), - newVar("v", tv), - )) - fn.emit(okv) - - body := fn.newBasicBlock("rangeiter.body") - done = fn.newBasicBlock("rangeiter.done") - emitIf(fn, emitExtract(fn, okv, 0), body, done) - fn.currentBlock = body - - if tk != tInvalid { - k = emitExtract(fn, okv, 1) - } - if tv != tInvalid { - v = emitExtract(fn, okv, 2) - } - return -} - -// rangeChan emits to fn the header for a loop that receives from -// channel x until it fails. -// tk is the channel's element type, or nil if the k result is -// not wanted -// pos is the position of the '=' or ':=' token. -// -func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) (k Value, loop, done *BasicBlock) { - // - // loop: (target of continue) - // ko = <-x (key, ok) - // ok = extract ko #1 - // if ok goto body else done - // body: - // k = extract ko #0 - // ... - // goto loop - // done: (target of break) - - loop = fn.newBasicBlock("rangechan.loop") - emitJump(fn, loop) - fn.currentBlock = loop - recv := &UnOp{ - Op: token.ARROW, - X: x, - CommaOk: true, - } - recv.setPos(pos) - recv.setType(types.NewTuple( - newVar("k", x.Type().Underlying().(*types.Chan).Elem()), - varOk, - )) - ko := fn.emit(recv) - body := fn.newBasicBlock("rangechan.body") - done = fn.newBasicBlock("rangechan.done") - emitIf(fn, emitExtract(fn, ko, 1), body, done) - fn.currentBlock = body - if tk != nil { - k = emitExtract(fn, ko, 0) - } - return -} - -// rangeStmt emits to fn code for the range statement s, optionally -// labelled by label. -// -func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { - var tk, tv types.Type - if s.Key != nil && !isBlankIdent(s.Key) { - tk = fn.Pkg.typeOf(s.Key) - } - if s.Value != nil && !isBlankIdent(s.Value) { - tv = fn.Pkg.typeOf(s.Value) - } - - // If iteration variables are defined (:=), this - // occurs once outside the loop. - // - // Unlike a short variable declaration, a RangeStmt - // using := never redeclares an existing variable; it - // always creates a new one. - if s.Tok == token.DEFINE { - if tk != nil { - fn.addLocalForIdent(s.Key.(*ast.Ident)) - } - if tv != nil { - fn.addLocalForIdent(s.Value.(*ast.Ident)) - } - } - - x := b.expr(fn, s.X) - - var k, v Value - var loop, done *BasicBlock - switch rt := x.Type().Underlying().(type) { - case *types.Slice, *types.Array, *types.Pointer: // *array - k, v, loop, done = b.rangeIndexed(fn, x, tv, s.For) - - case *types.Chan: - k, loop, done = b.rangeChan(fn, x, tk, s.For) - - case *types.Map, *types.Basic: // string - k, v, loop, done = b.rangeIter(fn, x, tk, tv, s.For) - - default: - panic("Cannot range over: " + rt.String()) - } - - // Evaluate both LHS expressions before we update either. - var kl, vl lvalue - if tk != nil { - kl = b.addr(fn, s.Key, false) // non-escaping - } - if tv != nil { - vl = b.addr(fn, s.Value, false) // non-escaping - } - if tk != nil { - kl.store(fn, k) - } - if tv != nil { - vl.store(fn, v) - } - - if label != nil { - label._break = done - label._continue = loop - } - - fn.targets = &targets{ - tail: fn.targets, - _break: done, - _continue: loop, - } - b.stmt(fn, s.Body) - fn.targets = fn.targets.tail - emitJump(fn, loop) // back-edge - fn.currentBlock = done -} - -// stmt lowers statement s to SSA form, emitting code to fn. -func (b *builder) stmt(fn *Function, _s ast.Stmt) { - // The label of the current statement. If non-nil, its _goto - // target is always set; its _break and _continue are set only - // within the body of switch/typeswitch/select/for/range. - // It is effectively an additional default-nil parameter of stmt(). - var label *lblock -start: - switch s := _s.(type) { - case *ast.EmptyStmt: - // ignore. (Usually removed by gofmt.) - - case *ast.DeclStmt: // Con, Var or Typ - d := s.Decl.(*ast.GenDecl) - if d.Tok == token.VAR { - for _, spec := range d.Specs { - if vs, ok := spec.(*ast.ValueSpec); ok { - b.localValueSpec(fn, vs) - } - } - } - - case *ast.LabeledStmt: - label = fn.labelledBlock(s.Label) - emitJump(fn, label._goto) - fn.currentBlock = label._goto - _s = s.Stmt - goto start // effectively: tailcall stmt(fn, s.Stmt, label) - - case *ast.ExprStmt: - b.expr(fn, s.X) - - case *ast.SendStmt: - fn.emit(&Send{ - Chan: b.expr(fn, s.Chan), - X: emitConv(fn, b.expr(fn, s.Value), - fn.Pkg.typeOf(s.Chan).Underlying().(*types.Chan).Elem()), - pos: s.Arrow, - }) - - case *ast.IncDecStmt: - op := token.ADD - if s.Tok == token.DEC { - op = token.SUB - } - loc := b.addr(fn, s.X, false) - b.assignOp(fn, loc, NewConst(exact.MakeInt64(1), loc.typ()), op, s.Pos()) - - case *ast.AssignStmt: - switch s.Tok { - case token.ASSIGN, token.DEFINE: - b.assignStmt(fn, s.Lhs, s.Rhs, s.Tok == token.DEFINE) - - default: // +=, etc. - op := s.Tok + token.ADD - token.ADD_ASSIGN - b.assignOp(fn, b.addr(fn, s.Lhs[0], false), b.expr(fn, s.Rhs[0]), op, s.Pos()) - } - - case *ast.GoStmt: - // The "intrinsics" new/make/len/cap are forbidden here. - // panic is treated like an ordinary function call. - v := Go{pos: s.Go} - b.setCall(fn, s.Call, &v.Call) - fn.emit(&v) - - case *ast.DeferStmt: - // The "intrinsics" new/make/len/cap are forbidden here. - // panic is treated like an ordinary function call. - v := Defer{pos: s.Defer} - b.setCall(fn, s.Call, &v.Call) - fn.emit(&v) - - // A deferred call can cause recovery from panic, - // and control resumes at the Recover block. - createRecoverBlock(fn) - - case *ast.ReturnStmt: - var results []Value - if len(s.Results) == 1 && fn.Signature.Results().Len() > 1 { - // Return of one expression in a multi-valued function. - tuple := b.exprN(fn, s.Results[0]) - ttuple := tuple.Type().(*types.Tuple) - for i, n := 0, ttuple.Len(); i < n; i++ { - results = append(results, - emitConv(fn, emitExtract(fn, tuple, i), - fn.Signature.Results().At(i).Type())) - } - } else { - // 1:1 return, or no-arg return in non-void function. - for i, r := range s.Results { - v := emitConv(fn, b.expr(fn, r), fn.Signature.Results().At(i).Type()) - results = append(results, v) - } - } - if fn.namedResults != nil { - // Function has named result parameters (NRPs). - // Perform parallel assignment of return operands to NRPs. - for i, r := range results { - emitStore(fn, fn.namedResults[i], r, s.Return) - } - } - // Run function calls deferred in this - // function when explicitly returning from it. - fn.emit(new(RunDefers)) - if fn.namedResults != nil { - // Reload NRPs to form the result tuple. - results = results[:0] - for _, r := range fn.namedResults { - results = append(results, emitLoad(fn, r)) - } - } - fn.emit(&Return{Results: results, pos: s.Return}) - fn.currentBlock = fn.newBasicBlock("unreachable") - - case *ast.BranchStmt: - var block *BasicBlock - switch s.Tok { - case token.BREAK: - if s.Label != nil { - block = fn.labelledBlock(s.Label)._break - } else { - for t := fn.targets; t != nil && block == nil; t = t.tail { - block = t._break - } - } - - case token.CONTINUE: - if s.Label != nil { - block = fn.labelledBlock(s.Label)._continue - } else { - for t := fn.targets; t != nil && block == nil; t = t.tail { - block = t._continue - } - } - - case token.FALLTHROUGH: - for t := fn.targets; t != nil && block == nil; t = t.tail { - block = t._fallthrough - } - - case token.GOTO: - block = fn.labelledBlock(s.Label)._goto - } - emitJump(fn, block) - fn.currentBlock = fn.newBasicBlock("unreachable") - - case *ast.BlockStmt: - b.stmtList(fn, s.List) - - case *ast.IfStmt: - if s.Init != nil { - b.stmt(fn, s.Init) - } - then := fn.newBasicBlock("if.then") - done := fn.newBasicBlock("if.done") - els := done - if s.Else != nil { - els = fn.newBasicBlock("if.else") - } - b.cond(fn, s.Cond, then, els) - fn.currentBlock = then - b.stmt(fn, s.Body) - emitJump(fn, done) - - if s.Else != nil { - fn.currentBlock = els - b.stmt(fn, s.Else) - emitJump(fn, done) - } - - fn.currentBlock = done - - case *ast.SwitchStmt: - b.switchStmt(fn, s, label) - - case *ast.TypeSwitchStmt: - b.typeSwitchStmt(fn, s, label) - - case *ast.SelectStmt: - b.selectStmt(fn, s, label) - - case *ast.ForStmt: - b.forStmt(fn, s, label) - - case *ast.RangeStmt: - b.rangeStmt(fn, s, label) - - default: - panic(fmt.Sprintf("unexpected statement kind: %T", s)) - } -} - -// buildFunction builds SSA code for the body of function fn. Idempotent. -func (b *builder) buildFunction(fn *Function) { - if fn.Blocks != nil { - return // building already started - } - - var recvField *ast.FieldList - var body *ast.BlockStmt - var functype *ast.FuncType - switch n := fn.syntax.(type) { - case nil: - return // not a Go source function. (Synthetic, or from object file.) - case *ast.FuncDecl: - functype = n.Type - recvField = n.Recv - body = n.Body - case *ast.FuncLit: - functype = n.Type - body = n.Body - default: - panic(n) - } - - if body == nil { - // External function. - if fn.Params == nil { - // This condition ensures we add a non-empty - // params list once only, but we may attempt - // the degenerate empty case repeatedly. - // TODO(adonovan): opt: don't do that. - - // We set Function.Params even though there is no body - // code to reference them. This simplifies clients. - if recv := fn.Signature.Recv(); recv != nil { - fn.addParamObj(recv) - } - params := fn.Signature.Params() - for i, n := 0, params.Len(); i < n; i++ { - fn.addParamObj(params.At(i)) - } - } - return - } - if fn.Prog.mode&LogSource != 0 { - defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.pos))() - } - fn.startBody() - fn.createSyntacticParams(recvField, functype) - b.stmt(fn, body) - if cb := fn.currentBlock; cb != nil && (cb == fn.Blocks[0] || cb == fn.Recover || cb.Preds != nil) { - // Control fell off the end of the function's body block. - // - // Block optimizations eliminate the current block, if - // unreachable. It is a builder invariant that - // if this no-arg return is ill-typed for - // fn.Signature.Results, this block must be - // unreachable. The sanity checker checks this. - fn.emit(new(RunDefers)) - fn.emit(new(Return)) - } - fn.finishBody() -} - -// buildFuncDecl builds SSA code for the function or method declared -// by decl in package pkg. -// -func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) { - id := decl.Name - if isBlankIdent(id) { - return // discard - } - fn := pkg.values[pkg.info.Defs[id]].(*Function) - if decl.Recv == nil && id.Name == "init" { - var v Call - v.Call.Value = fn - v.setType(types.NewTuple()) - pkg.init.emit(&v) - } - b.buildFunction(fn) -} - -// Build calls Package.Build for each package in prog. -// Building occurs in parallel unless the BuildSerially mode flag was set. -// -// Build is intended for whole-program analysis; a typical compiler -// need only build a single package. -// -// Build is idempotent and thread-safe. -// -func (prog *Program) Build() { - var wg sync.WaitGroup - for _, p := range prog.packages { - if prog.mode&BuildSerially != 0 { - p.Build() - } else { - wg.Add(1) - go func(p *Package) { - p.Build() - wg.Done() - }(p) - } - } - wg.Wait() -} - -// Build builds SSA code for all functions and vars in package p. -// -// Precondition: CreatePackage must have been called for all of p's -// direct imports (and hence its direct imports must have been -// error-free). -// -// Build is idempotent and thread-safe. -// -func (p *Package) Build() { p.buildOnce.Do(p.build) } - -func (p *Package) build() { - if p.info == nil { - return // synthetic package, e.g. "testmain" - } - - // Ensure we have runtime type info for all exported members. - // TODO(adonovan): ideally belongs in memberFromObject, but - // that would require package creation in topological order. - for name, mem := range p.Members { - if ast.IsExported(name) { - p.Prog.needMethodsOf(mem.Type()) - } - } - if p.Prog.mode&LogSource != 0 { - defer logStack("build %s", p)() - } - init := p.init - init.startBody() - - var done *BasicBlock - - if p.Prog.mode&BareInits == 0 { - // Make init() skip if package is already initialized. - initguard := p.Var("init$guard") - doinit := init.newBasicBlock("init.start") - done = init.newBasicBlock("init.done") - emitIf(init, emitLoad(init, initguard), done, doinit) - init.currentBlock = doinit - emitStore(init, initguard, vTrue, token.NoPos) - - // Call the init() function of each package we import. - for _, pkg := range p.Pkg.Imports() { - prereq := p.Prog.packages[pkg] - if prereq == nil { - panic(fmt.Sprintf("Package(%q).Build(): unsatisfied import: Program.CreatePackage(%q) was not called", p.Pkg.Path(), pkg.Path())) - } - var v Call - v.Call.Value = prereq.init - v.Call.pos = init.pos - v.setType(types.NewTuple()) - init.emit(&v) - } - } - - var b builder - - // Initialize package-level vars in correct order. - for _, varinit := range p.info.InitOrder { - if init.Prog.mode&LogSource != 0 { - fmt.Fprintf(os.Stderr, "build global initializer %v @ %s\n", - varinit.Lhs, p.Prog.Fset.Position(varinit.Rhs.Pos())) - } - if len(varinit.Lhs) == 1 { - // 1:1 initialization: var x, y = a(), b() - var lval lvalue - if v := varinit.Lhs[0]; v.Name() != "_" { - lval = &address{addr: p.values[v].(*Global), pos: v.Pos()} - } else { - lval = blank{} - } - b.assign(init, lval, varinit.Rhs, true, nil) - } else { - // n:1 initialization: var x, y := f() - tuple := b.exprN(init, varinit.Rhs) - for i, v := range varinit.Lhs { - if v.Name() == "_" { - continue - } - emitStore(init, p.values[v].(*Global), emitExtract(init, tuple, i), v.Pos()) - } - } - } - - // Build all package-level functions, init functions - // and methods, including unreachable/blank ones. - // We build them in source order, but it's not significant. - for _, file := range p.files { - for _, decl := range file.Decls { - if decl, ok := decl.(*ast.FuncDecl); ok { - b.buildFuncDecl(p, decl) - } - } - } - - // Finish up init(). - if p.Prog.mode&BareInits == 0 { - emitJump(init, done) - init.currentBlock = done - } - init.emit(new(Return)) - init.finishBody() - - p.info = nil // We no longer need ASTs or go/types deductions. - - if p.Prog.mode&SanityCheckFunctions != 0 { - sanityCheckPackage(p) - } -} - -// Like ObjectOf, but panics instead of returning nil. -// Only valid during p's create and build phases. -func (p *Package) objectOf(id *ast.Ident) types.Object { - if o := p.info.ObjectOf(id); o != nil { - return o - } - panic(fmt.Sprintf("no types.Object for ast.Ident %s @ %s", - id.Name, p.Prog.Fset.Position(id.Pos()))) -} - -// Like TypeOf, but panics instead of returning nil. -// Only valid during p's create and build phases. -func (p *Package) typeOf(e ast.Expr) types.Type { - if T := p.info.TypeOf(e); T != nil { - return T - } - panic(fmt.Sprintf("no type for %T @ %s", - e, p.Prog.Fset.Position(e.Pos()))) -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/const.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/const.go deleted file mode 100644 index ca99adc3f1d8..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/const.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// This file defines the Const SSA value type. - -import ( - "fmt" - exact "go/constant" - "go/token" - "go/types" - "strconv" -) - -// NewConst returns a new constant of the specified value and type. -// val must be valid according to the specification of Const.Value. -// -func NewConst(val exact.Value, typ types.Type) *Const { - return &Const{typ, val} -} - -// intConst returns an 'int' constant that evaluates to i. -// (i is an int64 in case the host is narrower than the target.) -func intConst(i int64) *Const { - return NewConst(exact.MakeInt64(i), tInt) -} - -// nilConst returns a nil constant of the specified type, which may -// be any reference type, including interfaces. -// -func nilConst(typ types.Type) *Const { - return NewConst(nil, typ) -} - -// stringConst returns a 'string' constant that evaluates to s. -func stringConst(s string) *Const { - return NewConst(exact.MakeString(s), tString) -} - -// zeroConst returns a new "zero" constant of the specified type, -// which must not be an array or struct type: the zero values of -// aggregates are well-defined but cannot be represented by Const. -// -func zeroConst(t types.Type) *Const { - switch t := t.(type) { - case *types.Basic: - switch { - case t.Info()&types.IsBoolean != 0: - return NewConst(exact.MakeBool(false), t) - case t.Info()&types.IsNumeric != 0: - return NewConst(exact.MakeInt64(0), t) - case t.Info()&types.IsString != 0: - return NewConst(exact.MakeString(""), t) - case t.Kind() == types.UnsafePointer: - fallthrough - case t.Kind() == types.UntypedNil: - return nilConst(t) - default: - panic(fmt.Sprint("zeroConst for unexpected type:", t)) - } - case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: - return nilConst(t) - case *types.Named: - return NewConst(zeroConst(t.Underlying()).Value, t) - case *types.Array, *types.Struct, *types.Tuple: - panic(fmt.Sprint("zeroConst applied to aggregate:", t)) - } - panic(fmt.Sprint("zeroConst: unexpected ", t)) -} - -func (c *Const) RelString(from *types.Package) string { - var s string - if c.Value == nil { - s = "nil" - } else if c.Value.Kind() == exact.String { - s = exact.StringVal(c.Value) - const max = 20 - // TODO(adonovan): don't cut a rune in half. - if len(s) > max { - s = s[:max-3] + "..." // abbreviate - } - s = strconv.Quote(s) - } else { - s = c.Value.String() - } - return s + ":" + relType(c.Type(), from) -} - -func (c *Const) Name() string { - return c.RelString(nil) -} - -func (c *Const) String() string { - return c.Name() -} - -func (c *Const) Type() types.Type { - return c.typ -} - -func (c *Const) Referrers() *[]Instruction { - return nil -} - -func (c *Const) Parent() *Function { return nil } - -func (c *Const) Pos() token.Pos { - return token.NoPos -} - -// IsNil returns true if this constant represents a typed or untyped nil value. -func (c *Const) IsNil() bool { - return c.Value == nil -} - -// TODO(adonovan): move everything below into honnef.co/go/tools/ssa/interp. - -// Int64 returns the numeric value of this constant truncated to fit -// a signed 64-bit integer. -// -func (c *Const) Int64() int64 { - switch x := exact.ToInt(c.Value); x.Kind() { - case exact.Int: - if i, ok := exact.Int64Val(x); ok { - return i - } - return 0 - case exact.Float: - f, _ := exact.Float64Val(x) - return int64(f) - } - panic(fmt.Sprintf("unexpected constant value: %T", c.Value)) -} - -// Uint64 returns the numeric value of this constant truncated to fit -// an unsigned 64-bit integer. -// -func (c *Const) Uint64() uint64 { - switch x := exact.ToInt(c.Value); x.Kind() { - case exact.Int: - if u, ok := exact.Uint64Val(x); ok { - return u - } - return 0 - case exact.Float: - f, _ := exact.Float64Val(x) - return uint64(f) - } - panic(fmt.Sprintf("unexpected constant value: %T", c.Value)) -} - -// Float64 returns the numeric value of this constant truncated to fit -// a float64. -// -func (c *Const) Float64() float64 { - f, _ := exact.Float64Val(c.Value) - return f -} - -// Complex128 returns the complex value of this constant truncated to -// fit a complex128. -// -func (c *Const) Complex128() complex128 { - re, _ := exact.Float64Val(exact.Real(c.Value)) - im, _ := exact.Float64Val(exact.Imag(c.Value)) - return complex(re, im) -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/create.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/create.go deleted file mode 100644 index 69ac12b1bf1c..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/create.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// This file implements the CREATE phase of SSA construction. -// See builder.go for explanation. - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - "os" - "sync" - - "golang.org/x/tools/go/types/typeutil" -) - -// NewProgram returns a new SSA Program. -// -// mode controls diagnostics and checking during SSA construction. -// -func NewProgram(fset *token.FileSet, mode BuilderMode) *Program { - prog := &Program{ - Fset: fset, - imported: make(map[string]*Package), - packages: make(map[*types.Package]*Package), - thunks: make(map[selectionKey]*Function), - bounds: make(map[*types.Func]*Function), - mode: mode, - } - - h := typeutil.MakeHasher() // protected by methodsMu, in effect - prog.methodSets.SetHasher(h) - prog.canon.SetHasher(h) - - return prog -} - -// memberFromObject populates package pkg with a member for the -// typechecker object obj. -// -// For objects from Go source code, syntax is the associated syntax -// tree (for funcs and vars only); it will be used during the build -// phase. -// -func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { - name := obj.Name() - switch obj := obj.(type) { - case *types.Builtin: - if pkg.Pkg != types.Unsafe { - panic("unexpected builtin object: " + obj.String()) - } - - case *types.TypeName: - pkg.Members[name] = &Type{ - object: obj, - pkg: pkg, - } - - case *types.Const: - c := &NamedConst{ - object: obj, - Value: NewConst(obj.Val(), obj.Type()), - pkg: pkg, - } - pkg.values[obj] = c.Value - pkg.Members[name] = c - - case *types.Var: - g := &Global{ - Pkg: pkg, - name: name, - object: obj, - typ: types.NewPointer(obj.Type()), // address - pos: obj.Pos(), - } - pkg.values[obj] = g - pkg.Members[name] = g - - case *types.Func: - sig := obj.Type().(*types.Signature) - if sig.Recv() == nil && name == "init" { - pkg.ninit++ - name = fmt.Sprintf("init#%d", pkg.ninit) - } - fn := &Function{ - name: name, - object: obj, - Signature: sig, - syntax: syntax, - pos: obj.Pos(), - Pkg: pkg, - Prog: pkg.Prog, - } - if syntax == nil { - fn.Synthetic = "loaded from gc object file" - } - - pkg.values[obj] = fn - if sig.Recv() == nil { - pkg.Members[name] = fn // package-level function - } - - default: // (incl. *types.Package) - panic("unexpected Object type: " + obj.String()) - } -} - -// membersFromDecl populates package pkg with members for each -// typechecker object (var, func, const or type) associated with the -// specified decl. -// -func membersFromDecl(pkg *Package, decl ast.Decl) { - switch decl := decl.(type) { - case *ast.GenDecl: // import, const, type or var - switch decl.Tok { - case token.CONST: - for _, spec := range decl.Specs { - for _, id := range spec.(*ast.ValueSpec).Names { - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], nil) - } - } - } - - case token.VAR: - for _, spec := range decl.Specs { - for _, id := range spec.(*ast.ValueSpec).Names { - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], spec) - } - } - } - - case token.TYPE: - for _, spec := range decl.Specs { - id := spec.(*ast.TypeSpec).Name - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], nil) - } - } - } - - case *ast.FuncDecl: - id := decl.Name - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], decl) - } - } -} - -// CreatePackage constructs and returns an SSA Package from the -// specified type-checked, error-free file ASTs, and populates its -// Members mapping. -// -// importable determines whether this package should be returned by a -// subsequent call to ImportedPackage(pkg.Path()). -// -// The real work of building SSA form for each function is not done -// until a subsequent call to Package.Build(). -// -func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package { - p := &Package{ - Prog: prog, - Members: make(map[string]Member), - values: make(map[types.Object]Value), - Pkg: pkg, - info: info, // transient (CREATE and BUILD phases) - files: files, // transient (CREATE and BUILD phases) - } - - // Add init() function. - p.init = &Function{ - name: "init", - Signature: new(types.Signature), - Synthetic: "package initializer", - Pkg: p, - Prog: prog, - } - p.Members[p.init.name] = p.init - - // CREATE phase. - // Allocate all package members: vars, funcs, consts and types. - if len(files) > 0 { - // Go source package. - for _, file := range files { - for _, decl := range file.Decls { - membersFromDecl(p, decl) - } - } - } else { - // GC-compiled binary package (or "unsafe") - // No code. - // No position information. - scope := p.Pkg.Scope() - for _, name := range scope.Names() { - obj := scope.Lookup(name) - memberFromObject(p, obj, nil) - if obj, ok := obj.(*types.TypeName); ok { - if named, ok := obj.Type().(*types.Named); ok { - for i, n := 0, named.NumMethods(); i < n; i++ { - memberFromObject(p, named.Method(i), nil) - } - } - } - } - } - - if prog.mode&BareInits == 0 { - // Add initializer guard variable. - initguard := &Global{ - Pkg: p, - name: "init$guard", - typ: types.NewPointer(tBool), - } - p.Members[initguard.Name()] = initguard - } - - if prog.mode&GlobalDebug != 0 { - p.SetDebugMode(true) - } - - if prog.mode&PrintPackages != 0 { - printMu.Lock() - p.WriteTo(os.Stdout) - printMu.Unlock() - } - - if importable { - prog.imported[p.Pkg.Path()] = p - } - prog.packages[p.Pkg] = p - - return p -} - -// printMu serializes printing of Packages/Functions to stdout. -var printMu sync.Mutex - -// AllPackages returns a new slice containing all packages in the -// program prog in unspecified order. -// -func (prog *Program) AllPackages() []*Package { - pkgs := make([]*Package, 0, len(prog.packages)) - for _, pkg := range prog.packages { - pkgs = append(pkgs, pkg) - } - return pkgs -} - -// ImportedPackage returns the importable SSA Package whose import -// path is path, or nil if no such SSA package has been created. -// -// Not all packages are importable. For example, no import -// declaration can resolve to the x_test package created by 'go test' -// or the ad-hoc main package created 'go build foo.go'. -// -func (prog *Program) ImportedPackage(path string) *Package { - return prog.imported[path] -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/doc.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/doc.go deleted file mode 100644 index 8ed859267012..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/doc.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ssa defines a representation of the elements of Go programs -// (packages, types, functions, variables and constants) using a -// static single-assignment (SSA) form intermediate representation -// (IR) for the bodies of functions. -// -// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE. -// -// For an introduction to SSA form, see -// http://en.wikipedia.org/wiki/Static_single_assignment_form. -// This page provides a broader reading list: -// http://www.dcs.gla.ac.uk/~jsinger/ssa.html. -// -// The level of abstraction of the SSA form is intentionally close to -// the source language to facilitate construction of source analysis -// tools. It is not intended for machine code generation. -// -// All looping, branching and switching constructs are replaced with -// unstructured control flow. Higher-level control flow constructs -// such as multi-way branch can be reconstructed as needed; see -// ssautil.Switches() for an example. -// -// To construct an SSA-form program, call ssautil.CreateProgram on a -// loader.Program, a set of type-checked packages created from -// parsed Go source files. The resulting ssa.Program contains all the -// packages and their members, but SSA code is not created for -// function bodies until a subsequent call to (*Package).Build. -// -// The builder initially builds a naive SSA form in which all local -// variables are addresses of stack locations with explicit loads and -// stores. Registerisation of eligible locals and φ-node insertion -// using dominance and dataflow are then performed as a second pass -// called "lifting" to improve the accuracy and performance of -// subsequent analyses; this pass can be skipped by setting the -// NaiveForm builder flag. -// -// The primary interfaces of this package are: -// -// - Member: a named member of a Go package. -// - Value: an expression that yields a value. -// - Instruction: a statement that consumes values and performs computation. -// - Node: a Value or Instruction (emphasizing its membership in the SSA value graph) -// -// A computation that yields a result implements both the Value and -// Instruction interfaces. The following table shows for each -// concrete type which of these interfaces it implements. -// -// Value? Instruction? Member? -// *Alloc ✔ ✔ -// *BinOp ✔ ✔ -// *Builtin ✔ -// *Call ✔ ✔ -// *ChangeInterface ✔ ✔ -// *ChangeType ✔ ✔ -// *Const ✔ -// *Convert ✔ ✔ -// *DebugRef ✔ -// *Defer ✔ -// *Extract ✔ ✔ -// *Field ✔ ✔ -// *FieldAddr ✔ ✔ -// *FreeVar ✔ -// *Function ✔ ✔ (func) -// *Global ✔ ✔ (var) -// *Go ✔ -// *If ✔ -// *Index ✔ ✔ -// *IndexAddr ✔ ✔ -// *Jump ✔ -// *Lookup ✔ ✔ -// *MakeChan ✔ ✔ -// *MakeClosure ✔ ✔ -// *MakeInterface ✔ ✔ -// *MakeMap ✔ ✔ -// *MakeSlice ✔ ✔ -// *MapUpdate ✔ -// *NamedConst ✔ (const) -// *Next ✔ ✔ -// *Panic ✔ -// *Parameter ✔ -// *Phi ✔ ✔ -// *Range ✔ ✔ -// *Return ✔ -// *RunDefers ✔ -// *Select ✔ ✔ -// *Send ✔ -// *Slice ✔ ✔ -// *Store ✔ -// *Type ✔ (type) -// *TypeAssert ✔ ✔ -// *UnOp ✔ ✔ -// -// Other key types in this package include: Program, Package, Function -// and BasicBlock. -// -// The program representation constructed by this package is fully -// resolved internally, i.e. it does not rely on the names of Values, -// Packages, Functions, Types or BasicBlocks for the correct -// interpretation of the program. Only the identities of objects and -// the topology of the SSA and type graphs are semantically -// significant. (There is one exception: Ids, used to identify field -// and method names, contain strings.) Avoidance of name-based -// operations simplifies the implementation of subsequent passes and -// can make them very efficient. Many objects are nonetheless named -// to aid in debugging, but it is not essential that the names be -// either accurate or unambiguous. The public API exposes a number of -// name-based maps for client convenience. -// -// The ssa/ssautil package provides various utilities that depend only -// on the public API of this package. -// -// TODO(adonovan): Consider the exceptional control-flow implications -// of defer and recover(). -// -// TODO(adonovan): write a how-to document for all the various cases -// of trying to determine corresponding elements across the four -// domains of source locations, ast.Nodes, types.Objects, -// ssa.Values/Instructions. -// -package ssa diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/dom.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/dom.go deleted file mode 100644 index 12ef4308f3c0..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/dom.go +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// This file defines algorithms related to dominance. - -// Dominator tree construction ---------------------------------------- -// -// We use the algorithm described in Lengauer & Tarjan. 1979. A fast -// algorithm for finding dominators in a flowgraph. -// http://doi.acm.org/10.1145/357062.357071 -// -// We also apply the optimizations to SLT described in Georgiadis et -// al, Finding Dominators in Practice, JGAA 2006, -// http://jgaa.info/accepted/2006/GeorgiadisTarjanWerneck2006.10.1.pdf -// to avoid the need for buckets of size > 1. - -import ( - "bytes" - "fmt" - "math/big" - "os" - "sort" -) - -// Idom returns the block that immediately dominates b: -// its parent in the dominator tree, if any. -// Neither the entry node (b.Index==0) nor recover node -// (b==b.Parent().Recover()) have a parent. -// -func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom } - -// Dominees returns the list of blocks that b immediately dominates: -// its children in the dominator tree. -// -func (b *BasicBlock) Dominees() []*BasicBlock { return b.dom.children } - -// Dominates reports whether b dominates c. -func (b *BasicBlock) Dominates(c *BasicBlock) bool { - return b.dom.pre <= c.dom.pre && c.dom.post <= b.dom.post -} - -type byDomPreorder []*BasicBlock - -func (a byDomPreorder) Len() int { return len(a) } -func (a byDomPreorder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byDomPreorder) Less(i, j int) bool { return a[i].dom.pre < a[j].dom.pre } - -// DomPreorder returns a new slice containing the blocks of f in -// dominator tree preorder. -// -func (f *Function) DomPreorder() []*BasicBlock { - n := len(f.Blocks) - order := make(byDomPreorder, n, n) - copy(order, f.Blocks) - sort.Sort(order) - return order -} - -// domInfo contains a BasicBlock's dominance information. -type domInfo struct { - idom *BasicBlock // immediate dominator (parent in domtree) - children []*BasicBlock // nodes immediately dominated by this one - pre, post int32 // pre- and post-order numbering within domtree -} - -// ltState holds the working state for Lengauer-Tarjan algorithm -// (during which domInfo.pre is repurposed for CFG DFS preorder number). -type ltState struct { - // Each slice is indexed by b.Index. - sdom []*BasicBlock // b's semidominator - parent []*BasicBlock // b's parent in DFS traversal of CFG - ancestor []*BasicBlock // b's ancestor with least sdom -} - -// dfs implements the depth-first search part of the LT algorithm. -func (lt *ltState) dfs(v *BasicBlock, i int32, preorder []*BasicBlock) int32 { - preorder[i] = v - v.dom.pre = i // For now: DFS preorder of spanning tree of CFG - i++ - lt.sdom[v.Index] = v - lt.link(nil, v) - for _, w := range v.Succs { - if lt.sdom[w.Index] == nil { - lt.parent[w.Index] = v - i = lt.dfs(w, i, preorder) - } - } - return i -} - -// eval implements the EVAL part of the LT algorithm. -func (lt *ltState) eval(v *BasicBlock) *BasicBlock { - // TODO(adonovan): opt: do path compression per simple LT. - u := v - for ; lt.ancestor[v.Index] != nil; v = lt.ancestor[v.Index] { - if lt.sdom[v.Index].dom.pre < lt.sdom[u.Index].dom.pre { - u = v - } - } - return u -} - -// link implements the LINK part of the LT algorithm. -func (lt *ltState) link(v, w *BasicBlock) { - lt.ancestor[w.Index] = v -} - -// buildDomTree computes the dominator tree of f using the LT algorithm. -// Precondition: all blocks are reachable (e.g. optimizeBlocks has been run). -// -func buildDomTree(f *Function) { - // The step numbers refer to the original LT paper; the - // reordering is due to Georgiadis. - - // Clear any previous domInfo. - for _, b := range f.Blocks { - b.dom = domInfo{} - } - - n := len(f.Blocks) - // Allocate space for 5 contiguous [n]*BasicBlock arrays: - // sdom, parent, ancestor, preorder, buckets. - space := make([]*BasicBlock, 5*n, 5*n) - lt := ltState{ - sdom: space[0:n], - parent: space[n : 2*n], - ancestor: space[2*n : 3*n], - } - - // Step 1. Number vertices by depth-first preorder. - preorder := space[3*n : 4*n] - root := f.Blocks[0] - prenum := lt.dfs(root, 0, preorder) - recover := f.Recover - if recover != nil { - lt.dfs(recover, prenum, preorder) - } - - buckets := space[4*n : 5*n] - copy(buckets, preorder) - - // In reverse preorder... - for i := int32(n) - 1; i > 0; i-- { - w := preorder[i] - - // Step 3. Implicitly define the immediate dominator of each node. - for v := buckets[i]; v != w; v = buckets[v.dom.pre] { - u := lt.eval(v) - if lt.sdom[u.Index].dom.pre < i { - v.dom.idom = u - } else { - v.dom.idom = w - } - } - - // Step 2. Compute the semidominators of all nodes. - lt.sdom[w.Index] = lt.parent[w.Index] - for _, v := range w.Preds { - u := lt.eval(v) - if lt.sdom[u.Index].dom.pre < lt.sdom[w.Index].dom.pre { - lt.sdom[w.Index] = lt.sdom[u.Index] - } - } - - lt.link(lt.parent[w.Index], w) - - if lt.parent[w.Index] == lt.sdom[w.Index] { - w.dom.idom = lt.parent[w.Index] - } else { - buckets[i] = buckets[lt.sdom[w.Index].dom.pre] - buckets[lt.sdom[w.Index].dom.pre] = w - } - } - - // The final 'Step 3' is now outside the loop. - for v := buckets[0]; v != root; v = buckets[v.dom.pre] { - v.dom.idom = root - } - - // Step 4. Explicitly define the immediate dominator of each - // node, in preorder. - for _, w := range preorder[1:] { - if w == root || w == recover { - w.dom.idom = nil - } else { - if w.dom.idom != lt.sdom[w.Index] { - w.dom.idom = w.dom.idom.dom.idom - } - // Calculate Children relation as inverse of Idom. - w.dom.idom.dom.children = append(w.dom.idom.dom.children, w) - } - } - - pre, post := numberDomTree(root, 0, 0) - if recover != nil { - numberDomTree(recover, pre, post) - } - - // printDomTreeDot(os.Stderr, f) // debugging - // printDomTreeText(os.Stderr, root, 0) // debugging - - if f.Prog.mode&SanityCheckFunctions != 0 { - sanityCheckDomTree(f) - } -} - -// numberDomTree sets the pre- and post-order numbers of a depth-first -// traversal of the dominator tree rooted at v. These are used to -// answer dominance queries in constant time. -// -func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) { - v.dom.pre = pre - pre++ - for _, child := range v.dom.children { - pre, post = numberDomTree(child, pre, post) - } - v.dom.post = post - post++ - return pre, post -} - -// Testing utilities ---------------------------------------- - -// sanityCheckDomTree checks the correctness of the dominator tree -// computed by the LT algorithm by comparing against the dominance -// relation computed by a naive Kildall-style forward dataflow -// analysis (Algorithm 10.16 from the "Dragon" book). -// -func sanityCheckDomTree(f *Function) { - n := len(f.Blocks) - - // D[i] is the set of blocks that dominate f.Blocks[i], - // represented as a bit-set of block indices. - D := make([]big.Int, n) - - one := big.NewInt(1) - - // all is the set of all blocks; constant. - var all big.Int - all.Set(one).Lsh(&all, uint(n)).Sub(&all, one) - - // Initialization. - for i, b := range f.Blocks { - if i == 0 || b == f.Recover { - // A root is dominated only by itself. - D[i].SetBit(&D[0], 0, 1) - } else { - // All other blocks are (initially) dominated - // by every block. - D[i].Set(&all) - } - } - - // Iteration until fixed point. - for changed := true; changed; { - changed = false - for i, b := range f.Blocks { - if i == 0 || b == f.Recover { - continue - } - // Compute intersection across predecessors. - var x big.Int - x.Set(&all) - for _, pred := range b.Preds { - x.And(&x, &D[pred.Index]) - } - x.SetBit(&x, i, 1) // a block always dominates itself. - if D[i].Cmp(&x) != 0 { - D[i].Set(&x) - changed = true - } - } - } - - // Check the entire relation. O(n^2). - // The Recover block (if any) must be treated specially so we skip it. - ok := true - for i := 0; i < n; i++ { - for j := 0; j < n; j++ { - b, c := f.Blocks[i], f.Blocks[j] - if c == f.Recover { - continue - } - actual := b.Dominates(c) - expected := D[j].Bit(i) == 1 - if actual != expected { - fmt.Fprintf(os.Stderr, "dominates(%s, %s)==%t, want %t\n", b, c, actual, expected) - ok = false - } - } - } - - preorder := f.DomPreorder() - for _, b := range f.Blocks { - if got := preorder[b.dom.pre]; got != b { - fmt.Fprintf(os.Stderr, "preorder[%d]==%s, want %s\n", b.dom.pre, got, b) - ok = false - } - } - - if !ok { - panic("sanityCheckDomTree failed for " + f.String()) - } - -} - -// Printing functions ---------------------------------------- - -// printDomTree prints the dominator tree as text, using indentation. -func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) { - fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v) - for _, child := range v.dom.children { - printDomTreeText(buf, child, indent+1) - } -} - -// printDomTreeDot prints the dominator tree of f in AT&T GraphViz -// (.dot) format. -func printDomTreeDot(buf *bytes.Buffer, f *Function) { - fmt.Fprintln(buf, "//", f) - fmt.Fprintln(buf, "digraph domtree {") - for i, b := range f.Blocks { - v := b.dom - fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post) - // TODO(adonovan): improve appearance of edges - // belonging to both dominator tree and CFG. - - // Dominator tree edge. - if i != 0 { - fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.dom.pre, v.pre) - } - // CFG edges. - for _, pred := range b.Preds { - fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.dom.pre, v.pre) - } - } - fmt.Fprintln(buf, "}") -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/emit.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/emit.go deleted file mode 100644 index 1036988adcb8..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/emit.go +++ /dev/null @@ -1,468 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// Helpers for emitting SSA instructions. - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" -) - -// emitNew emits to f a new (heap Alloc) instruction allocating an -// object of type typ. pos is the optional source location. -// -func emitNew(f *Function, typ types.Type, pos token.Pos) *Alloc { - v := &Alloc{Heap: true} - v.setType(types.NewPointer(typ)) - v.setPos(pos) - f.emit(v) - return v -} - -// emitLoad emits to f an instruction to load the address addr into a -// new temporary, and returns the value so defined. -// -func emitLoad(f *Function, addr Value) *UnOp { - v := &UnOp{Op: token.MUL, X: addr} - v.setType(deref(addr.Type())) - f.emit(v) - return v -} - -// emitDebugRef emits to f a DebugRef pseudo-instruction associating -// expression e with value v. -// -func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) { - if !f.debugInfo() { - return // debugging not enabled - } - if v == nil || e == nil { - panic("nil") - } - var obj types.Object - e = unparen(e) - if id, ok := e.(*ast.Ident); ok { - if isBlankIdent(id) { - return - } - obj = f.Pkg.objectOf(id) - switch obj.(type) { - case *types.Nil, *types.Const, *types.Builtin: - return - } - } - f.emit(&DebugRef{ - X: v, - Expr: e, - IsAddr: isAddr, - object: obj, - }) -} - -// emitArith emits to f code to compute the binary operation op(x, y) -// where op is an eager shift, logical or arithmetic operation. -// (Use emitCompare() for comparisons and Builder.logicalBinop() for -// non-eager operations.) -// -func emitArith(f *Function, op token.Token, x, y Value, t types.Type, pos token.Pos) Value { - switch op { - case token.SHL, token.SHR: - x = emitConv(f, x, t) - // y may be signed or an 'untyped' constant. - // TODO(adonovan): whence signed values? - if b, ok := y.Type().Underlying().(*types.Basic); ok && b.Info()&types.IsUnsigned == 0 { - y = emitConv(f, y, types.Typ[types.Uint64]) - } - - case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: - x = emitConv(f, x, t) - y = emitConv(f, y, t) - - default: - panic("illegal op in emitArith: " + op.String()) - - } - v := &BinOp{ - Op: op, - X: x, - Y: y, - } - v.setPos(pos) - v.setType(t) - return f.emit(v) -} - -// emitCompare emits to f code compute the boolean result of -// comparison comparison 'x op y'. -// -func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value { - xt := x.Type().Underlying() - yt := y.Type().Underlying() - - // Special case to optimise a tagless SwitchStmt so that - // these are equivalent - // switch { case e: ...} - // switch true { case e: ... } - // if e==true { ... } - // even in the case when e's type is an interface. - // TODO(adonovan): opt: generalise to x==true, false!=y, etc. - if x == vTrue && op == token.EQL { - if yt, ok := yt.(*types.Basic); ok && yt.Info()&types.IsBoolean != 0 { - return y - } - } - - if types.Identical(xt, yt) { - // no conversion necessary - } else if _, ok := xt.(*types.Interface); ok { - y = emitConv(f, y, x.Type()) - } else if _, ok := yt.(*types.Interface); ok { - x = emitConv(f, x, y.Type()) - } else if _, ok := x.(*Const); ok { - x = emitConv(f, x, y.Type()) - } else if _, ok := y.(*Const); ok { - y = emitConv(f, y, x.Type()) - } else { - // other cases, e.g. channels. No-op. - } - - v := &BinOp{ - Op: op, - X: x, - Y: y, - } - v.setPos(pos) - v.setType(tBool) - return f.emit(v) -} - -// isValuePreserving returns true if a conversion from ut_src to -// ut_dst is value-preserving, i.e. just a change of type. -// Precondition: neither argument is a named type. -// -func isValuePreserving(ut_src, ut_dst types.Type) bool { - // Identical underlying types? - if structTypesIdentical(ut_dst, ut_src) { - return true - } - - switch ut_dst.(type) { - case *types.Chan: - // Conversion between channel types? - _, ok := ut_src.(*types.Chan) - return ok - - case *types.Pointer: - // Conversion between pointers with identical base types? - _, ok := ut_src.(*types.Pointer) - return ok - } - return false -} - -// emitConv emits to f code to convert Value val to exactly type typ, -// and returns the converted value. Implicit conversions are required -// by language assignability rules in assignments, parameter passing, -// etc. Conversions cannot fail dynamically. -// -func emitConv(f *Function, val Value, typ types.Type) Value { - t_src := val.Type() - - // Identical types? Conversion is a no-op. - if types.Identical(t_src, typ) { - return val - } - - ut_dst := typ.Underlying() - ut_src := t_src.Underlying() - - // Just a change of type, but not value or representation? - if isValuePreserving(ut_src, ut_dst) { - c := &ChangeType{X: val} - c.setType(typ) - return f.emit(c) - } - - // Conversion to, or construction of a value of, an interface type? - if _, ok := ut_dst.(*types.Interface); ok { - // Assignment from one interface type to another? - if _, ok := ut_src.(*types.Interface); ok { - c := &ChangeInterface{X: val} - c.setType(typ) - return f.emit(c) - } - - // Untyped nil constant? Return interface-typed nil constant. - if ut_src == tUntypedNil { - return nilConst(typ) - } - - // Convert (non-nil) "untyped" literals to their default type. - if t, ok := ut_src.(*types.Basic); ok && t.Info()&types.IsUntyped != 0 { - val = emitConv(f, val, DefaultType(ut_src)) - } - - f.Pkg.Prog.needMethodsOf(val.Type()) - mi := &MakeInterface{X: val} - mi.setType(typ) - return f.emit(mi) - } - - // Conversion of a compile-time constant value? - if c, ok := val.(*Const); ok { - if _, ok := ut_dst.(*types.Basic); ok || c.IsNil() { - // Conversion of a compile-time constant to - // another constant type results in a new - // constant of the destination type and - // (initially) the same abstract value. - // We don't truncate the value yet. - return NewConst(c.Value, typ) - } - - // We're converting from constant to non-constant type, - // e.g. string -> []byte/[]rune. - } - - // A representation-changing conversion? - // At least one of {ut_src,ut_dst} must be *Basic. - // (The other may be []byte or []rune.) - _, ok1 := ut_src.(*types.Basic) - _, ok2 := ut_dst.(*types.Basic) - if ok1 || ok2 { - c := &Convert{X: val} - c.setType(typ) - return f.emit(c) - } - - panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ)) -} - -// emitStore emits to f an instruction to store value val at location -// addr, applying implicit conversions as required by assignability rules. -// -func emitStore(f *Function, addr, val Value, pos token.Pos) *Store { - s := &Store{ - Addr: addr, - Val: emitConv(f, val, deref(addr.Type())), - pos: pos, - } - f.emit(s) - return s -} - -// emitJump emits to f a jump to target, and updates the control-flow graph. -// Postcondition: f.currentBlock is nil. -// -func emitJump(f *Function, target *BasicBlock) { - b := f.currentBlock - b.emit(new(Jump)) - addEdge(b, target) - f.currentBlock = nil -} - -// emitIf emits to f a conditional jump to tblock or fblock based on -// cond, and updates the control-flow graph. -// Postcondition: f.currentBlock is nil. -// -func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock) { - b := f.currentBlock - b.emit(&If{Cond: cond}) - addEdge(b, tblock) - addEdge(b, fblock) - f.currentBlock = nil -} - -// emitExtract emits to f an instruction to extract the index'th -// component of tuple. It returns the extracted value. -// -func emitExtract(f *Function, tuple Value, index int) Value { - e := &Extract{Tuple: tuple, Index: index} - e.setType(tuple.Type().(*types.Tuple).At(index).Type()) - return f.emit(e) -} - -// emitTypeAssert emits to f a type assertion value := x.(t) and -// returns the value. x.Type() must be an interface. -// -func emitTypeAssert(f *Function, x Value, t types.Type, pos token.Pos) Value { - a := &TypeAssert{X: x, AssertedType: t} - a.setPos(pos) - a.setType(t) - return f.emit(a) -} - -// emitTypeTest emits to f a type test value,ok := x.(t) and returns -// a (value, ok) tuple. x.Type() must be an interface. -// -func emitTypeTest(f *Function, x Value, t types.Type, pos token.Pos) Value { - a := &TypeAssert{ - X: x, - AssertedType: t, - CommaOk: true, - } - a.setPos(pos) - a.setType(types.NewTuple( - newVar("value", t), - varOk, - )) - return f.emit(a) -} - -// emitTailCall emits to f a function call in tail position. The -// caller is responsible for all fields of 'call' except its type. -// Intended for wrapper methods. -// Precondition: f does/will not use deferred procedure calls. -// Postcondition: f.currentBlock is nil. -// -func emitTailCall(f *Function, call *Call) { - tresults := f.Signature.Results() - nr := tresults.Len() - if nr == 1 { - call.typ = tresults.At(0).Type() - } else { - call.typ = tresults - } - tuple := f.emit(call) - var ret Return - switch nr { - case 0: - // no-op - case 1: - ret.Results = []Value{tuple} - default: - for i := 0; i < nr; i++ { - v := emitExtract(f, tuple, i) - // TODO(adonovan): in principle, this is required: - // v = emitConv(f, o.Type, f.Signature.Results[i].Type) - // but in practice emitTailCall is only used when - // the types exactly match. - ret.Results = append(ret.Results, v) - } - } - f.emit(&ret) - f.currentBlock = nil -} - -// emitImplicitSelections emits to f code to apply the sequence of -// implicit field selections specified by indices to base value v, and -// returns the selected value. -// -// If v is the address of a struct, the result will be the address of -// a field; if it is the value of a struct, the result will be the -// value of a field. -// -func emitImplicitSelections(f *Function, v Value, indices []int) Value { - for _, index := range indices { - fld := deref(v.Type()).Underlying().(*types.Struct).Field(index) - - if isPointer(v.Type()) { - instr := &FieldAddr{ - X: v, - Field: index, - } - instr.setType(types.NewPointer(fld.Type())) - v = f.emit(instr) - // Load the field's value iff indirectly embedded. - if isPointer(fld.Type()) { - v = emitLoad(f, v) - } - } else { - instr := &Field{ - X: v, - Field: index, - } - instr.setType(fld.Type()) - v = f.emit(instr) - } - } - return v -} - -// emitFieldSelection emits to f code to select the index'th field of v. -// -// If wantAddr, the input must be a pointer-to-struct and the result -// will be the field's address; otherwise the result will be the -// field's value. -// Ident id is used for position and debug info. -// -func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value { - fld := deref(v.Type()).Underlying().(*types.Struct).Field(index) - if isPointer(v.Type()) { - instr := &FieldAddr{ - X: v, - Field: index, - } - instr.setPos(id.Pos()) - instr.setType(types.NewPointer(fld.Type())) - v = f.emit(instr) - // Load the field's value iff we don't want its address. - if !wantAddr { - v = emitLoad(f, v) - } - } else { - instr := &Field{ - X: v, - Field: index, - } - instr.setPos(id.Pos()) - instr.setType(fld.Type()) - v = f.emit(instr) - } - emitDebugRef(f, id, v, wantAddr) - return v -} - -// zeroValue emits to f code to produce a zero value of type t, -// and returns it. -// -func zeroValue(f *Function, t types.Type) Value { - switch t.Underlying().(type) { - case *types.Struct, *types.Array: - return emitLoad(f, f.addLocal(t, token.NoPos)) - default: - return zeroConst(t) - } -} - -// createRecoverBlock emits to f a block of code to return after a -// recovered panic, and sets f.Recover to it. -// -// If f's result parameters are named, the code loads and returns -// their current values, otherwise it returns the zero values of their -// type. -// -// Idempotent. -// -func createRecoverBlock(f *Function) { - if f.Recover != nil { - return // already created - } - saved := f.currentBlock - - f.Recover = f.newBasicBlock("recover") - f.currentBlock = f.Recover - - var results []Value - if f.namedResults != nil { - // Reload NRPs to form value tuple. - for _, r := range f.namedResults { - results = append(results, emitLoad(f, r)) - } - } else { - R := f.Signature.Results() - for i, n := 0, R.Len(); i < n; i++ { - T := R.At(i).Type() - - // Return zero value of each result type. - results = append(results, zeroValue(f, T)) - } - } - f.emit(&Return{Results: results}) - - f.currentBlock = saved -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/func.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/func.go deleted file mode 100644 index 53635ba0114e..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/func.go +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// This file implements the Function and BasicBlock types. - -import ( - "bytes" - "fmt" - "go/ast" - "go/token" - "go/types" - "io" - "os" - "strings" -) - -// addEdge adds a control-flow graph edge from from to to. -func addEdge(from, to *BasicBlock) { - from.Succs = append(from.Succs, to) - to.Preds = append(to.Preds, from) -} - -// Parent returns the function that contains block b. -func (b *BasicBlock) Parent() *Function { return b.parent } - -// String returns a human-readable label of this block. -// It is not guaranteed unique within the function. -// -func (b *BasicBlock) String() string { - return fmt.Sprintf("%d", b.Index) -} - -// emit appends an instruction to the current basic block. -// If the instruction defines a Value, it is returned. -// -func (b *BasicBlock) emit(i Instruction) Value { - i.setBlock(b) - b.Instrs = append(b.Instrs, i) - v, _ := i.(Value) - return v -} - -// predIndex returns the i such that b.Preds[i] == c or panics if -// there is none. -func (b *BasicBlock) predIndex(c *BasicBlock) int { - for i, pred := range b.Preds { - if pred == c { - return i - } - } - panic(fmt.Sprintf("no edge %s -> %s", c, b)) -} - -// hasPhi returns true if b.Instrs contains φ-nodes. -func (b *BasicBlock) hasPhi() bool { - _, ok := b.Instrs[0].(*Phi) - return ok -} - -func (b *BasicBlock) Phis() []Instruction { - return b.phis() -} - -// phis returns the prefix of b.Instrs containing all the block's φ-nodes. -func (b *BasicBlock) phis() []Instruction { - for i, instr := range b.Instrs { - if _, ok := instr.(*Phi); !ok { - return b.Instrs[:i] - } - } - return nil // unreachable in well-formed blocks -} - -// replacePred replaces all occurrences of p in b's predecessor list with q. -// Ordinarily there should be at most one. -// -func (b *BasicBlock) replacePred(p, q *BasicBlock) { - for i, pred := range b.Preds { - if pred == p { - b.Preds[i] = q - } - } -} - -// replaceSucc replaces all occurrences of p in b's successor list with q. -// Ordinarily there should be at most one. -// -func (b *BasicBlock) replaceSucc(p, q *BasicBlock) { - for i, succ := range b.Succs { - if succ == p { - b.Succs[i] = q - } - } -} - -func (b *BasicBlock) RemovePred(p *BasicBlock) { - b.removePred(p) -} - -// removePred removes all occurrences of p in b's -// predecessor list and φ-nodes. -// Ordinarily there should be at most one. -// -func (b *BasicBlock) removePred(p *BasicBlock) { - phis := b.phis() - - // We must preserve edge order for φ-nodes. - j := 0 - for i, pred := range b.Preds { - if pred != p { - b.Preds[j] = b.Preds[i] - // Strike out φ-edge too. - for _, instr := range phis { - phi := instr.(*Phi) - phi.Edges[j] = phi.Edges[i] - } - j++ - } - } - // Nil out b.Preds[j:] and φ-edges[j:] to aid GC. - for i := j; i < len(b.Preds); i++ { - b.Preds[i] = nil - for _, instr := range phis { - instr.(*Phi).Edges[i] = nil - } - } - b.Preds = b.Preds[:j] - for _, instr := range phis { - phi := instr.(*Phi) - phi.Edges = phi.Edges[:j] - } -} - -// Destinations associated with unlabelled for/switch/select stmts. -// We push/pop one of these as we enter/leave each construct and for -// each BranchStmt we scan for the innermost target of the right type. -// -type targets struct { - tail *targets // rest of stack - _break *BasicBlock - _continue *BasicBlock - _fallthrough *BasicBlock -} - -// Destinations associated with a labelled block. -// We populate these as labels are encountered in forward gotos or -// labelled statements. -// -type lblock struct { - _goto *BasicBlock - _break *BasicBlock - _continue *BasicBlock -} - -// labelledBlock returns the branch target associated with the -// specified label, creating it if needed. -// -func (f *Function) labelledBlock(label *ast.Ident) *lblock { - lb := f.lblocks[label.Obj] - if lb == nil { - lb = &lblock{_goto: f.newBasicBlock(label.Name)} - if f.lblocks == nil { - f.lblocks = make(map[*ast.Object]*lblock) - } - f.lblocks[label.Obj] = lb - } - return lb -} - -// addParam adds a (non-escaping) parameter to f.Params of the -// specified name, type and source position. -// -func (f *Function) addParam(name string, typ types.Type, pos token.Pos) *Parameter { - v := &Parameter{ - name: name, - typ: typ, - pos: pos, - parent: f, - } - f.Params = append(f.Params, v) - return v -} - -func (f *Function) addParamObj(obj types.Object) *Parameter { - name := obj.Name() - if name == "" { - name = fmt.Sprintf("arg%d", len(f.Params)) - } - param := f.addParam(name, obj.Type(), obj.Pos()) - param.object = obj - return param -} - -// addSpilledParam declares a parameter that is pre-spilled to the -// stack; the function body will load/store the spilled location. -// Subsequent lifting will eliminate spills where possible. -// -func (f *Function) addSpilledParam(obj types.Object) { - param := f.addParamObj(obj) - spill := &Alloc{Comment: obj.Name()} - spill.setType(types.NewPointer(obj.Type())) - spill.setPos(obj.Pos()) - f.objects[obj] = spill - f.Locals = append(f.Locals, spill) - f.emit(spill) - f.emit(&Store{Addr: spill, Val: param}) -} - -// startBody initializes the function prior to generating SSA code for its body. -// Precondition: f.Type() already set. -// -func (f *Function) startBody() { - f.currentBlock = f.newBasicBlock("entry") - f.objects = make(map[types.Object]Value) // needed for some synthetics, e.g. init -} - -// createSyntacticParams populates f.Params and generates code (spills -// and named result locals) for all the parameters declared in the -// syntax. In addition it populates the f.objects mapping. -// -// Preconditions: -// f.startBody() was called. -// Postcondition: -// len(f.Params) == len(f.Signature.Params) + (f.Signature.Recv() ? 1 : 0) -// -func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.FuncType) { - // Receiver (at most one inner iteration). - if recv != nil { - for _, field := range recv.List { - for _, n := range field.Names { - f.addSpilledParam(f.Pkg.info.Defs[n]) - } - // Anonymous receiver? No need to spill. - if field.Names == nil { - f.addParamObj(f.Signature.Recv()) - } - } - } - - // Parameters. - if functype.Params != nil { - n := len(f.Params) // 1 if has recv, 0 otherwise - for _, field := range functype.Params.List { - for _, n := range field.Names { - f.addSpilledParam(f.Pkg.info.Defs[n]) - } - // Anonymous parameter? No need to spill. - if field.Names == nil { - f.addParamObj(f.Signature.Params().At(len(f.Params) - n)) - } - } - } - - // Named results. - if functype.Results != nil { - for _, field := range functype.Results.List { - // Implicit "var" decl of locals for named results. - for _, n := range field.Names { - f.namedResults = append(f.namedResults, f.addLocalForIdent(n)) - } - } - } -} - -// numberRegisters assigns numbers to all SSA registers -// (value-defining Instructions) in f, to aid debugging. -// (Non-Instruction Values are named at construction.) -// -func numberRegisters(f *Function) { - v := 0 - for _, b := range f.Blocks { - for _, instr := range b.Instrs { - switch instr.(type) { - case Value: - instr.(interface { - setNum(int) - }).setNum(v) - v++ - } - } - } -} - -// buildReferrers populates the def/use information in all non-nil -// Value.Referrers slice. -// Precondition: all such slices are initially empty. -func buildReferrers(f *Function) { - var rands []*Value - for _, b := range f.Blocks { - for _, instr := range b.Instrs { - rands = instr.Operands(rands[:0]) // recycle storage - for _, rand := range rands { - if r := *rand; r != nil { - if ref := r.Referrers(); ref != nil { - *ref = append(*ref, instr) - } - } - } - } - } -} - -// finishBody() finalizes the function after SSA code generation of its body. -func (f *Function) finishBody() { - f.objects = nil - f.currentBlock = nil - f.lblocks = nil - - // Don't pin the AST in memory (except in debug mode). - if n := f.syntax; n != nil && !f.debugInfo() { - f.syntax = extentNode{n.Pos(), n.End()} - } - - // Remove from f.Locals any Allocs that escape to the heap. - j := 0 - for _, l := range f.Locals { - if !l.Heap { - f.Locals[j] = l - j++ - } - } - // Nil out f.Locals[j:] to aid GC. - for i := j; i < len(f.Locals); i++ { - f.Locals[i] = nil - } - f.Locals = f.Locals[:j] - - optimizeBlocks(f) - - buildReferrers(f) - - buildDomTree(f) - - if f.Prog.mode&NaiveForm == 0 { - // For debugging pre-state of lifting pass: - // numberRegisters(f) - // f.WriteTo(os.Stderr) - lift(f) - } - - f.namedResults = nil // (used by lifting) - - numberRegisters(f) - - if f.Prog.mode&PrintFunctions != 0 { - printMu.Lock() - f.WriteTo(os.Stdout) - printMu.Unlock() - } - - if f.Prog.mode&SanityCheckFunctions != 0 { - mustSanityCheck(f, nil) - } -} - -func (f *Function) RemoveNilBlocks() { - f.removeNilBlocks() -} - -// removeNilBlocks eliminates nils from f.Blocks and updates each -// BasicBlock.Index. Use this after any pass that may delete blocks. -// -func (f *Function) removeNilBlocks() { - j := 0 - for _, b := range f.Blocks { - if b != nil { - b.Index = j - f.Blocks[j] = b - j++ - } - } - // Nil out f.Blocks[j:] to aid GC. - for i := j; i < len(f.Blocks); i++ { - f.Blocks[i] = nil - } - f.Blocks = f.Blocks[:j] -} - -// SetDebugMode sets the debug mode for package pkg. If true, all its -// functions will include full debug info. This greatly increases the -// size of the instruction stream, and causes Functions to depend upon -// the ASTs, potentially keeping them live in memory for longer. -// -func (pkg *Package) SetDebugMode(debug bool) { - // TODO(adonovan): do we want ast.File granularity? - pkg.debug = debug -} - -// debugInfo reports whether debug info is wanted for this function. -func (f *Function) debugInfo() bool { - return f.Pkg != nil && f.Pkg.debug -} - -// addNamedLocal creates a local variable, adds it to function f and -// returns it. Its name and type are taken from obj. Subsequent -// calls to f.lookup(obj) will return the same local. -// -func (f *Function) addNamedLocal(obj types.Object) *Alloc { - l := f.addLocal(obj.Type(), obj.Pos()) - l.Comment = obj.Name() - f.objects[obj] = l - return l -} - -func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc { - return f.addNamedLocal(f.Pkg.info.Defs[id]) -} - -// addLocal creates an anonymous local variable of type typ, adds it -// to function f and returns it. pos is the optional source location. -// -func (f *Function) addLocal(typ types.Type, pos token.Pos) *Alloc { - v := &Alloc{} - v.setType(types.NewPointer(typ)) - v.setPos(pos) - f.Locals = append(f.Locals, v) - f.emit(v) - return v -} - -// lookup returns the address of the named variable identified by obj -// that is local to function f or one of its enclosing functions. -// If escaping, the reference comes from a potentially escaping pointer -// expression and the referent must be heap-allocated. -// -func (f *Function) lookup(obj types.Object, escaping bool) Value { - if v, ok := f.objects[obj]; ok { - if alloc, ok := v.(*Alloc); ok && escaping { - alloc.Heap = true - } - return v // function-local var (address) - } - - // Definition must be in an enclosing function; - // plumb it through intervening closures. - if f.parent == nil { - panic("no ssa.Value for " + obj.String()) - } - outer := f.parent.lookup(obj, true) // escaping - v := &FreeVar{ - name: obj.Name(), - typ: outer.Type(), - pos: outer.Pos(), - outer: outer, - parent: f, - } - f.objects[obj] = v - f.FreeVars = append(f.FreeVars, v) - return v -} - -// emit emits the specified instruction to function f. -func (f *Function) emit(instr Instruction) Value { - return f.currentBlock.emit(instr) -} - -// RelString returns the full name of this function, qualified by -// package name, receiver type, etc. -// -// The specific formatting rules are not guaranteed and may change. -// -// Examples: -// "math.IsNaN" // a package-level function -// "(*bytes.Buffer).Bytes" // a declared method or a wrapper -// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0) -// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure) -// "main.main$1" // an anonymous function in main -// "main.init#1" // a declared init function -// "main.init" // the synthesized package initializer -// -// When these functions are referred to from within the same package -// (i.e. from == f.Pkg.Object), they are rendered without the package path. -// For example: "IsNaN", "(*Buffer).Bytes", etc. -// -// All non-synthetic functions have distinct package-qualified names. -// (But two methods may have the same name "(T).f" if one is a synthetic -// wrapper promoting a non-exported method "f" from another package; in -// that case, the strings are equal but the identifiers "f" are distinct.) -// -func (f *Function) RelString(from *types.Package) string { - // Anonymous? - if f.parent != nil { - // An anonymous function's Name() looks like "parentName$1", - // but its String() should include the type/package/etc. - parent := f.parent.RelString(from) - for i, anon := range f.parent.AnonFuncs { - if anon == f { - return fmt.Sprintf("%s$%d", parent, 1+i) - } - } - - return f.name // should never happen - } - - // Method (declared or wrapper)? - if recv := f.Signature.Recv(); recv != nil { - return f.relMethod(from, recv.Type()) - } - - // Thunk? - if f.method != nil { - return f.relMethod(from, f.method.Recv()) - } - - // Bound? - if len(f.FreeVars) == 1 && strings.HasSuffix(f.name, "$bound") { - return f.relMethod(from, f.FreeVars[0].Type()) - } - - // Package-level function? - // Prefix with package name for cross-package references only. - if p := f.pkg(); p != nil && p != from { - return fmt.Sprintf("%s.%s", p.Path(), f.name) - } - - // Unknown. - return f.name -} - -func (f *Function) relMethod(from *types.Package, recv types.Type) string { - return fmt.Sprintf("(%s).%s", relType(recv, from), f.name) -} - -// writeSignature writes to buf the signature sig in declaration syntax. -func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature, params []*Parameter) { - buf.WriteString("func ") - if recv := sig.Recv(); recv != nil { - buf.WriteString("(") - if n := params[0].Name(); n != "" { - buf.WriteString(n) - buf.WriteString(" ") - } - types.WriteType(buf, params[0].Type(), types.RelativeTo(from)) - buf.WriteString(") ") - } - buf.WriteString(name) - types.WriteSignature(buf, sig, types.RelativeTo(from)) -} - -func (f *Function) pkg() *types.Package { - if f.Pkg != nil { - return f.Pkg.Pkg - } - return nil -} - -var _ io.WriterTo = (*Function)(nil) // *Function implements io.Writer - -func (f *Function) WriteTo(w io.Writer) (int64, error) { - var buf bytes.Buffer - WriteFunction(&buf, f) - n, err := w.Write(buf.Bytes()) - return int64(n), err -} - -// WriteFunction writes to buf a human-readable "disassembly" of f. -func WriteFunction(buf *bytes.Buffer, f *Function) { - fmt.Fprintf(buf, "# Name: %s\n", f.String()) - if f.Pkg != nil { - fmt.Fprintf(buf, "# Package: %s\n", f.Pkg.Pkg.Path()) - } - if syn := f.Synthetic; syn != "" { - fmt.Fprintln(buf, "# Synthetic:", syn) - } - if pos := f.Pos(); pos.IsValid() { - fmt.Fprintf(buf, "# Location: %s\n", f.Prog.Fset.Position(pos)) - } - - if f.parent != nil { - fmt.Fprintf(buf, "# Parent: %s\n", f.parent.Name()) - } - - if f.Recover != nil { - fmt.Fprintf(buf, "# Recover: %s\n", f.Recover) - } - - from := f.pkg() - - if f.FreeVars != nil { - buf.WriteString("# Free variables:\n") - for i, fv := range f.FreeVars { - fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, fv.Name(), relType(fv.Type(), from)) - } - } - - if len(f.Locals) > 0 { - buf.WriteString("# Locals:\n") - for i, l := range f.Locals { - fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(deref(l.Type()), from)) - } - } - writeSignature(buf, from, f.Name(), f.Signature, f.Params) - buf.WriteString(":\n") - - if f.Blocks == nil { - buf.WriteString("\t(external)\n") - } - - // NB. column calculations are confused by non-ASCII - // characters and assume 8-space tabs. - const punchcard = 80 // for old time's sake. - const tabwidth = 8 - for _, b := range f.Blocks { - if b == nil { - // Corrupt CFG. - fmt.Fprintf(buf, ".nil:\n") - continue - } - n, _ := fmt.Fprintf(buf, "%d:", b.Index) - bmsg := fmt.Sprintf("%s P:%d S:%d", b.Comment, len(b.Preds), len(b.Succs)) - fmt.Fprintf(buf, "%*s%s\n", punchcard-1-n-len(bmsg), "", bmsg) - - if false { // CFG debugging - fmt.Fprintf(buf, "\t# CFG: %s --> %s --> %s\n", b.Preds, b, b.Succs) - } - for _, instr := range b.Instrs { - buf.WriteString("\t") - switch v := instr.(type) { - case Value: - l := punchcard - tabwidth - // Left-align the instruction. - if name := v.Name(); name != "" { - n, _ := fmt.Fprintf(buf, "%s = ", name) - l -= n - } - n, _ := buf.WriteString(instr.String()) - l -= n - // Right-align the type if there's space. - if t := v.Type(); t != nil { - buf.WriteByte(' ') - ts := relType(t, from) - l -= len(ts) + len(" ") // (spaces before and after type) - if l > 0 { - fmt.Fprintf(buf, "%*s", l, "") - } - buf.WriteString(ts) - } - case nil: - // Be robust against bad transforms. - buf.WriteString("") - default: - buf.WriteString(instr.String()) - } - buf.WriteString("\n") - } - } - fmt.Fprintf(buf, "\n") -} - -// newBasicBlock adds to f a new basic block and returns it. It does -// not automatically become the current block for subsequent calls to emit. -// comment is an optional string for more readable debugging output. -// -func (f *Function) newBasicBlock(comment string) *BasicBlock { - b := &BasicBlock{ - Index: len(f.Blocks), - Comment: comment, - parent: f, - } - b.Succs = b.succs2[:0] - f.Blocks = append(f.Blocks, b) - return b -} - -// NewFunction returns a new synthetic Function instance belonging to -// prog, with its name and signature fields set as specified. -// -// The caller is responsible for initializing the remaining fields of -// the function object, e.g. Pkg, Params, Blocks. -// -// It is practically impossible for clients to construct well-formed -// SSA functions/packages/programs directly, so we assume this is the -// job of the Builder alone. NewFunction exists to provide clients a -// little flexibility. For example, analysis tools may wish to -// construct fake Functions for the root of the callgraph, a fake -// "reflect" package, etc. -// -// TODO(adonovan): think harder about the API here. -// -func (prog *Program) NewFunction(name string, sig *types.Signature, provenance string) *Function { - return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance} -} - -type extentNode [2]token.Pos - -func (n extentNode) Pos() token.Pos { return n[0] } -func (n extentNode) End() token.Pos { return n[1] } - -// Syntax returns an ast.Node whose Pos/End methods provide the -// lexical extent of the function if it was defined by Go source code -// (f.Synthetic==""), or nil otherwise. -// -// If f was built with debug information (see Package.SetDebugRef), -// the result is the *ast.FuncDecl or *ast.FuncLit that declared the -// function. Otherwise, it is an opaque Node providing only position -// information; this avoids pinning the AST in memory. -// -func (f *Function) Syntax() ast.Node { return f.syntax } diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/identical.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/identical.go deleted file mode 100644 index 53cbee107b6b..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/identical.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build go1.8 - -package ssa - -import "go/types" - -var structTypesIdentical = types.IdenticalIgnoreTags diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/identical_17.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/identical_17.go deleted file mode 100644 index da89d3339a5d..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/identical_17.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !go1.8 - -package ssa - -import "go/types" - -var structTypesIdentical = types.Identical diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/lift.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/lift.go deleted file mode 100644 index 048e9b03260b..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/lift.go +++ /dev/null @@ -1,653 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// This file defines the lifting pass which tries to "lift" Alloc -// cells (new/local variables) into SSA registers, replacing loads -// with the dominating stored value, eliminating loads and stores, and -// inserting φ-nodes as needed. - -// Cited papers and resources: -// -// Ron Cytron et al. 1991. Efficiently computing SSA form... -// http://doi.acm.org/10.1145/115372.115320 -// -// Cooper, Harvey, Kennedy. 2001. A Simple, Fast Dominance Algorithm. -// Software Practice and Experience 2001, 4:1-10. -// http://www.hipersoft.rice.edu/grads/publications/dom14.pdf -// -// Daniel Berlin, llvmdev mailing list, 2012. -// http://lists.cs.uiuc.edu/pipermail/llvmdev/2012-January/046638.html -// (Be sure to expand the whole thread.) - -// TODO(adonovan): opt: there are many optimizations worth evaluating, and -// the conventional wisdom for SSA construction is that a simple -// algorithm well engineered often beats those of better asymptotic -// complexity on all but the most egregious inputs. -// -// Danny Berlin suggests that the Cooper et al. algorithm for -// computing the dominance frontier is superior to Cytron et al. -// Furthermore he recommends that rather than computing the DF for the -// whole function then renaming all alloc cells, it may be cheaper to -// compute the DF for each alloc cell separately and throw it away. -// -// Consider exploiting liveness information to avoid creating dead -// φ-nodes which we then immediately remove. -// -// Also see many other "TODO: opt" suggestions in the code. - -import ( - "fmt" - "go/token" - "go/types" - "math/big" - "os" -) - -// If true, show diagnostic information at each step of lifting. -// Very verbose. -const debugLifting = false - -// domFrontier maps each block to the set of blocks in its dominance -// frontier. The outer slice is conceptually a map keyed by -// Block.Index. The inner slice is conceptually a set, possibly -// containing duplicates. -// -// TODO(adonovan): opt: measure impact of dups; consider a packed bit -// representation, e.g. big.Int, and bitwise parallel operations for -// the union step in the Children loop. -// -// domFrontier's methods mutate the slice's elements but not its -// length, so their receivers needn't be pointers. -// -type domFrontier [][]*BasicBlock - -func (df domFrontier) add(u, v *BasicBlock) { - p := &df[u.Index] - *p = append(*p, v) -} - -// build builds the dominance frontier df for the dominator (sub)tree -// rooted at u, using the Cytron et al. algorithm. -// -// TODO(adonovan): opt: consider Berlin approach, computing pruned SSA -// by pruning the entire IDF computation, rather than merely pruning -// the DF -> IDF step. -func (df domFrontier) build(u *BasicBlock) { - // Encounter each node u in postorder of dom tree. - for _, child := range u.dom.children { - df.build(child) - } - for _, vb := range u.Succs { - if v := vb.dom; v.idom != u { - df.add(u, vb) - } - } - for _, w := range u.dom.children { - for _, vb := range df[w.Index] { - // TODO(adonovan): opt: use word-parallel bitwise union. - if v := vb.dom; v.idom != u { - df.add(u, vb) - } - } - } -} - -func buildDomFrontier(fn *Function) domFrontier { - df := make(domFrontier, len(fn.Blocks)) - df.build(fn.Blocks[0]) - if fn.Recover != nil { - df.build(fn.Recover) - } - return df -} - -func removeInstr(refs []Instruction, instr Instruction) []Instruction { - i := 0 - for _, ref := range refs { - if ref == instr { - continue - } - refs[i] = ref - i++ - } - for j := i; j != len(refs); j++ { - refs[j] = nil // aid GC - } - return refs[:i] -} - -// lift replaces local and new Allocs accessed only with -// load/store by SSA registers, inserting φ-nodes where necessary. -// The result is a program in classical pruned SSA form. -// -// Preconditions: -// - fn has no dead blocks (blockopt has run). -// - Def/use info (Operands and Referrers) is up-to-date. -// - The dominator tree is up-to-date. -// -func lift(fn *Function) { - // TODO(adonovan): opt: lots of little optimizations may be - // worthwhile here, especially if they cause us to avoid - // buildDomFrontier. For example: - // - // - Alloc never loaded? Eliminate. - // - Alloc never stored? Replace all loads with a zero constant. - // - Alloc stored once? Replace loads with dominating store; - // don't forget that an Alloc is itself an effective store - // of zero. - // - Alloc used only within a single block? - // Use degenerate algorithm avoiding φ-nodes. - // - Consider synergy with scalar replacement of aggregates (SRA). - // e.g. *(&x.f) where x is an Alloc. - // Perhaps we'd get better results if we generated this as x.f - // i.e. Field(x, .f) instead of Load(FieldIndex(x, .f)). - // Unclear. - // - // But we will start with the simplest correct code. - df := buildDomFrontier(fn) - - if debugLifting { - title := false - for i, blocks := range df { - if blocks != nil { - if !title { - fmt.Fprintf(os.Stderr, "Dominance frontier of %s:\n", fn) - title = true - } - fmt.Fprintf(os.Stderr, "\t%s: %s\n", fn.Blocks[i], blocks) - } - } - } - - newPhis := make(newPhiMap) - - // During this pass we will replace some BasicBlock.Instrs - // (allocs, loads and stores) with nil, keeping a count in - // BasicBlock.gaps. At the end we will reset Instrs to the - // concatenation of all non-dead newPhis and non-nil Instrs - // for the block, reusing the original array if space permits. - - // While we're here, we also eliminate 'rundefers' - // instructions in functions that contain no 'defer' - // instructions. - usesDefer := false - - // A counter used to generate ~unique ids for Phi nodes, as an - // aid to debugging. We use large numbers to make them highly - // visible. All nodes are renumbered later. - fresh := 1000 - - // Determine which allocs we can lift and number them densely. - // The renaming phase uses this numbering for compact maps. - numAllocs := 0 - for _, b := range fn.Blocks { - b.gaps = 0 - b.rundefers = 0 - for _, instr := range b.Instrs { - switch instr := instr.(type) { - case *Alloc: - index := -1 - if liftAlloc(df, instr, newPhis, &fresh) { - index = numAllocs - numAllocs++ - } - instr.index = index - case *Defer: - usesDefer = true - case *RunDefers: - b.rundefers++ - } - } - } - - // renaming maps an alloc (keyed by index) to its replacement - // value. Initially the renaming contains nil, signifying the - // zero constant of the appropriate type; we construct the - // Const lazily at most once on each path through the domtree. - // TODO(adonovan): opt: cache per-function not per subtree. - renaming := make([]Value, numAllocs) - - // Renaming. - rename(fn.Blocks[0], renaming, newPhis) - - // Eliminate dead φ-nodes. - removeDeadPhis(fn.Blocks, newPhis) - - // Prepend remaining live φ-nodes to each block. - for _, b := range fn.Blocks { - nps := newPhis[b] - j := len(nps) - - rundefersToKill := b.rundefers - if usesDefer { - rundefersToKill = 0 - } - - if j+b.gaps+rundefersToKill == 0 { - continue // fast path: no new phis or gaps - } - - // Compact nps + non-nil Instrs into a new slice. - // TODO(adonovan): opt: compact in situ (rightwards) - // if Instrs has sufficient space or slack. - dst := make([]Instruction, len(b.Instrs)+j-b.gaps-rundefersToKill) - for i, np := range nps { - dst[i] = np.phi - } - for _, instr := range b.Instrs { - if instr == nil { - continue - } - if !usesDefer { - if _, ok := instr.(*RunDefers); ok { - continue - } - } - dst[j] = instr - j++ - } - b.Instrs = dst - } - - // Remove any fn.Locals that were lifted. - j := 0 - for _, l := range fn.Locals { - if l.index < 0 { - fn.Locals[j] = l - j++ - } - } - // Nil out fn.Locals[j:] to aid GC. - for i := j; i < len(fn.Locals); i++ { - fn.Locals[i] = nil - } - fn.Locals = fn.Locals[:j] -} - -// removeDeadPhis removes φ-nodes not transitively needed by a -// non-Phi, non-DebugRef instruction. -func removeDeadPhis(blocks []*BasicBlock, newPhis newPhiMap) { - // First pass: find the set of "live" φ-nodes: those reachable - // from some non-Phi instruction. - // - // We compute reachability in reverse, starting from each φ, - // rather than forwards, starting from each live non-Phi - // instruction, because this way visits much less of the - // Value graph. - livePhis := make(map[*Phi]bool) - for _, npList := range newPhis { - for _, np := range npList { - phi := np.phi - if !livePhis[phi] && phiHasDirectReferrer(phi) { - markLivePhi(livePhis, phi) - } - } - } - - // Existing φ-nodes due to && and || operators - // are all considered live (see Go issue 19622). - for _, b := range blocks { - for _, phi := range b.phis() { - markLivePhi(livePhis, phi.(*Phi)) - } - } - - // Second pass: eliminate unused phis from newPhis. - for block, npList := range newPhis { - j := 0 - for _, np := range npList { - if livePhis[np.phi] { - npList[j] = np - j++ - } else { - // discard it, first removing it from referrers - for _, val := range np.phi.Edges { - if refs := val.Referrers(); refs != nil { - *refs = removeInstr(*refs, np.phi) - } - } - np.phi.block = nil - } - } - newPhis[block] = npList[:j] - } -} - -// markLivePhi marks phi, and all φ-nodes transitively reachable via -// its Operands, live. -func markLivePhi(livePhis map[*Phi]bool, phi *Phi) { - livePhis[phi] = true - for _, rand := range phi.Operands(nil) { - if q, ok := (*rand).(*Phi); ok { - if !livePhis[q] { - markLivePhi(livePhis, q) - } - } - } -} - -// phiHasDirectReferrer reports whether phi is directly referred to by -// a non-Phi instruction. Such instructions are the -// roots of the liveness traversal. -func phiHasDirectReferrer(phi *Phi) bool { - for _, instr := range *phi.Referrers() { - if _, ok := instr.(*Phi); !ok { - return true - } - } - return false -} - -type blockSet struct{ big.Int } // (inherit methods from Int) - -// add adds b to the set and returns true if the set changed. -func (s *blockSet) add(b *BasicBlock) bool { - i := b.Index - if s.Bit(i) != 0 { - return false - } - s.SetBit(&s.Int, i, 1) - return true -} - -// take removes an arbitrary element from a set s and -// returns its index, or returns -1 if empty. -func (s *blockSet) take() int { - l := s.BitLen() - for i := 0; i < l; i++ { - if s.Bit(i) == 1 { - s.SetBit(&s.Int, i, 0) - return i - } - } - return -1 -} - -// newPhi is a pair of a newly introduced φ-node and the lifted Alloc -// it replaces. -type newPhi struct { - phi *Phi - alloc *Alloc -} - -// newPhiMap records for each basic block, the set of newPhis that -// must be prepended to the block. -type newPhiMap map[*BasicBlock][]newPhi - -// liftAlloc determines whether alloc can be lifted into registers, -// and if so, it populates newPhis with all the φ-nodes it may require -// and returns true. -// -// fresh is a source of fresh ids for phi nodes. -// -func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool { - // Don't lift aggregates into registers, because we don't have - // a way to express their zero-constants. - switch deref(alloc.Type()).Underlying().(type) { - case *types.Array, *types.Struct: - return false - } - - // Don't lift named return values in functions that defer - // calls that may recover from panic. - if fn := alloc.Parent(); fn.Recover != nil { - for _, nr := range fn.namedResults { - if nr == alloc { - return false - } - } - } - - // Compute defblocks, the set of blocks containing a - // definition of the alloc cell. - var defblocks blockSet - for _, instr := range *alloc.Referrers() { - // Bail out if we discover the alloc is not liftable; - // the only operations permitted to use the alloc are - // loads/stores into the cell, and DebugRef. - switch instr := instr.(type) { - case *Store: - if instr.Val == alloc { - return false // address used as value - } - if instr.Addr != alloc { - panic("Alloc.Referrers is inconsistent") - } - defblocks.add(instr.Block()) - case *UnOp: - if instr.Op != token.MUL { - return false // not a load - } - if instr.X != alloc { - panic("Alloc.Referrers is inconsistent") - } - case *DebugRef: - // ok - default: - return false // some other instruction - } - } - // The Alloc itself counts as a (zero) definition of the cell. - defblocks.add(alloc.Block()) - - if debugLifting { - fmt.Fprintln(os.Stderr, "\tlifting ", alloc, alloc.Name()) - } - - fn := alloc.Parent() - - // Φ-insertion. - // - // What follows is the body of the main loop of the insert-φ - // function described by Cytron et al, but instead of using - // counter tricks, we just reset the 'hasAlready' and 'work' - // sets each iteration. These are bitmaps so it's pretty cheap. - // - // TODO(adonovan): opt: recycle slice storage for W, - // hasAlready, defBlocks across liftAlloc calls. - var hasAlready blockSet - - // Initialize W and work to defblocks. - var work blockSet = defblocks // blocks seen - var W blockSet // blocks to do - W.Set(&defblocks.Int) - - // Traverse iterated dominance frontier, inserting φ-nodes. - for i := W.take(); i != -1; i = W.take() { - u := fn.Blocks[i] - for _, v := range df[u.Index] { - if hasAlready.add(v) { - // Create φ-node. - // It will be prepended to v.Instrs later, if needed. - phi := &Phi{ - Edges: make([]Value, len(v.Preds)), - Comment: alloc.Comment, - } - // This is merely a debugging aid: - phi.setNum(*fresh) - *fresh++ - - phi.pos = alloc.Pos() - phi.setType(deref(alloc.Type())) - phi.block = v - if debugLifting { - fmt.Fprintf(os.Stderr, "\tplace %s = %s at block %s\n", phi.Name(), phi, v) - } - newPhis[v] = append(newPhis[v], newPhi{phi, alloc}) - - if work.add(v) { - W.add(v) - } - } - } - } - - return true -} - -// replaceAll replaces all intraprocedural uses of x with y, -// updating x.Referrers and y.Referrers. -// Precondition: x.Referrers() != nil, i.e. x must be local to some function. -// -func replaceAll(x, y Value) { - var rands []*Value - pxrefs := x.Referrers() - pyrefs := y.Referrers() - for _, instr := range *pxrefs { - rands = instr.Operands(rands[:0]) // recycle storage - for _, rand := range rands { - if *rand != nil { - if *rand == x { - *rand = y - } - } - } - if pyrefs != nil { - *pyrefs = append(*pyrefs, instr) // dups ok - } - } - *pxrefs = nil // x is now unreferenced -} - -// renamed returns the value to which alloc is being renamed, -// constructing it lazily if it's the implicit zero initialization. -// -func renamed(renaming []Value, alloc *Alloc) Value { - v := renaming[alloc.index] - if v == nil { - v = zeroConst(deref(alloc.Type())) - renaming[alloc.index] = v - } - return v -} - -// rename implements the (Cytron et al) SSA renaming algorithm, a -// preorder traversal of the dominator tree replacing all loads of -// Alloc cells with the value stored to that cell by the dominating -// store instruction. For lifting, we need only consider loads, -// stores and φ-nodes. -// -// renaming is a map from *Alloc (keyed by index number) to its -// dominating stored value; newPhis[x] is the set of new φ-nodes to be -// prepended to block x. -// -func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap) { - // Each φ-node becomes the new name for its associated Alloc. - for _, np := range newPhis[u] { - phi := np.phi - alloc := np.alloc - renaming[alloc.index] = phi - } - - // Rename loads and stores of allocs. - for i, instr := range u.Instrs { - switch instr := instr.(type) { - case *Alloc: - if instr.index >= 0 { // store of zero to Alloc cell - // Replace dominated loads by the zero value. - renaming[instr.index] = nil - if debugLifting { - fmt.Fprintf(os.Stderr, "\tkill alloc %s\n", instr) - } - // Delete the Alloc. - u.Instrs[i] = nil - u.gaps++ - } - - case *Store: - if alloc, ok := instr.Addr.(*Alloc); ok && alloc.index >= 0 { // store to Alloc cell - // Replace dominated loads by the stored value. - renaming[alloc.index] = instr.Val - if debugLifting { - fmt.Fprintf(os.Stderr, "\tkill store %s; new value: %s\n", - instr, instr.Val.Name()) - } - // Remove the store from the referrer list of the stored value. - if refs := instr.Val.Referrers(); refs != nil { - *refs = removeInstr(*refs, instr) - } - // Delete the Store. - u.Instrs[i] = nil - u.gaps++ - } - - case *UnOp: - if instr.Op == token.MUL { - if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // load of Alloc cell - newval := renamed(renaming, alloc) - if debugLifting { - fmt.Fprintf(os.Stderr, "\tupdate load %s = %s with %s\n", - instr.Name(), instr, newval.Name()) - } - // Replace all references to - // the loaded value by the - // dominating stored value. - replaceAll(instr, newval) - // Delete the Load. - u.Instrs[i] = nil - u.gaps++ - } - } - - case *DebugRef: - if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // ref of Alloc cell - if instr.IsAddr { - instr.X = renamed(renaming, alloc) - instr.IsAddr = false - - // Add DebugRef to instr.X's referrers. - if refs := instr.X.Referrers(); refs != nil { - *refs = append(*refs, instr) - } - } else { - // A source expression denotes the address - // of an Alloc that was optimized away. - instr.X = nil - - // Delete the DebugRef. - u.Instrs[i] = nil - u.gaps++ - } - } - } - } - - // For each φ-node in a CFG successor, rename the edge. - for _, v := range u.Succs { - phis := newPhis[v] - if len(phis) == 0 { - continue - } - i := v.predIndex(u) - for _, np := range phis { - phi := np.phi - alloc := np.alloc - newval := renamed(renaming, alloc) - if debugLifting { - fmt.Fprintf(os.Stderr, "\tsetphi %s edge %s -> %s (#%d) (alloc=%s) := %s\n", - phi.Name(), u, v, i, alloc.Name(), newval.Name()) - } - phi.Edges[i] = newval - if prefs := newval.Referrers(); prefs != nil { - *prefs = append(*prefs, phi) - } - } - } - - // Continue depth-first recursion over domtree, pushing a - // fresh copy of the renaming map for each subtree. - for i, v := range u.dom.children { - r := renaming - if i < len(u.dom.children)-1 { - // On all but the final iteration, we must make - // a copy to avoid destructive update. - r = make([]Value, len(renaming)) - copy(r, renaming) - } - rename(v, r, newPhis) - } - -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/lvalue.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/lvalue.go deleted file mode 100644 index eb5d71e188fb..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/lvalue.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// lvalues are the union of addressable expressions and map-index -// expressions. - -import ( - "go/ast" - "go/token" - "go/types" -) - -// An lvalue represents an assignable location that may appear on the -// left-hand side of an assignment. This is a generalization of a -// pointer to permit updates to elements of maps. -// -type lvalue interface { - store(fn *Function, v Value) // stores v into the location - load(fn *Function) Value // loads the contents of the location - address(fn *Function) Value // address of the location - typ() types.Type // returns the type of the location -} - -// An address is an lvalue represented by a true pointer. -type address struct { - addr Value - pos token.Pos // source position - expr ast.Expr // source syntax of the value (not address) [debug mode] -} - -func (a *address) load(fn *Function) Value { - load := emitLoad(fn, a.addr) - load.pos = a.pos - return load -} - -func (a *address) store(fn *Function, v Value) { - store := emitStore(fn, a.addr, v, a.pos) - if a.expr != nil { - // store.Val is v, converted for assignability. - emitDebugRef(fn, a.expr, store.Val, false) - } -} - -func (a *address) address(fn *Function) Value { - if a.expr != nil { - emitDebugRef(fn, a.expr, a.addr, true) - } - return a.addr -} - -func (a *address) typ() types.Type { - return deref(a.addr.Type()) -} - -// An element is an lvalue represented by m[k], the location of an -// element of a map or string. These locations are not addressable -// since pointers cannot be formed from them, but they do support -// load(), and in the case of maps, store(). -// -type element struct { - m, k Value // map or string - t types.Type // map element type or string byte type - pos token.Pos // source position of colon ({k:v}) or lbrack (m[k]=v) -} - -func (e *element) load(fn *Function) Value { - l := &Lookup{ - X: e.m, - Index: e.k, - } - l.setPos(e.pos) - l.setType(e.t) - return fn.emit(l) -} - -func (e *element) store(fn *Function, v Value) { - up := &MapUpdate{ - Map: e.m, - Key: e.k, - Value: emitConv(fn, v, e.t), - } - up.pos = e.pos - fn.emit(up) -} - -func (e *element) address(fn *Function) Value { - panic("map/string elements are not addressable") -} - -func (e *element) typ() types.Type { - return e.t -} - -// A blank is a dummy variable whose name is "_". -// It is not reified: loads are illegal and stores are ignored. -// -type blank struct{} - -func (bl blank) load(fn *Function) Value { - panic("blank.load is illegal") -} - -func (bl blank) store(fn *Function, v Value) { - s := &BlankStore{ - Val: v, - } - fn.emit(s) -} - -func (bl blank) address(fn *Function) Value { - panic("blank var is not addressable") -} - -func (bl blank) typ() types.Type { - // This should be the type of the blank Ident; the typechecker - // doesn't provide this yet, but fortunately, we don't need it - // yet either. - panic("blank.typ is unimplemented") -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/methods.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/methods.go deleted file mode 100644 index 080dca968ef8..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/methods.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// This file defines utilities for population of method sets. - -import ( - "fmt" - "go/types" -) - -// MethodValue returns the Function implementing method sel, building -// wrapper methods on demand. It returns nil if sel denotes an -// abstract (interface) method. -// -// Precondition: sel.Kind() == MethodVal. -// -// Thread-safe. -// -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -// -func (prog *Program) MethodValue(sel *types.Selection) *Function { - if sel.Kind() != types.MethodVal { - panic(fmt.Sprintf("Method(%s) kind != MethodVal", sel)) - } - T := sel.Recv() - if isInterface(T) { - return nil // abstract method - } - if prog.mode&LogSource != 0 { - defer logStack("Method %s %v", T, sel)() - } - - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - - return prog.addMethod(prog.createMethodSet(T), sel) -} - -// LookupMethod returns the implementation of the method of type T -// identified by (pkg, name). It returns nil if the method exists but -// is abstract, and panics if T has no such method. -// -func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function { - sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name) - if sel == nil { - panic(fmt.Sprintf("%s has no method %s", T, types.Id(pkg, name))) - } - return prog.MethodValue(sel) -} - -// methodSet contains the (concrete) methods of a non-interface type. -type methodSet struct { - mapping map[string]*Function // populated lazily - complete bool // mapping contains all methods -} - -// Precondition: !isInterface(T). -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -func (prog *Program) createMethodSet(T types.Type) *methodSet { - mset, ok := prog.methodSets.At(T).(*methodSet) - if !ok { - mset = &methodSet{mapping: make(map[string]*Function)} - prog.methodSets.Set(T, mset) - } - return mset -} - -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function { - if sel.Kind() == types.MethodExpr { - panic(sel) - } - id := sel.Obj().Id() - fn := mset.mapping[id] - if fn == nil { - obj := sel.Obj().(*types.Func) - - needsPromotion := len(sel.Index()) > 1 - needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.Recv()) - if needsPromotion || needsIndirection { - fn = makeWrapper(prog, sel) - } else { - fn = prog.declaredFunc(obj) - } - if fn.Signature.Recv() == nil { - panic(fn) // missing receiver - } - mset.mapping[id] = fn - } - return fn -} - -// RuntimeTypes returns a new unordered slice containing all -// concrete types in the program for which a complete (non-empty) -// method set is required at run-time. -// -// Thread-safe. -// -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -// -func (prog *Program) RuntimeTypes() []types.Type { - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - - var res []types.Type - prog.methodSets.Iterate(func(T types.Type, v interface{}) { - if v.(*methodSet).complete { - res = append(res, T) - } - }) - return res -} - -// declaredFunc returns the concrete function/method denoted by obj. -// Panic ensues if there is none. -// -func (prog *Program) declaredFunc(obj *types.Func) *Function { - if v := prog.packageLevelValue(obj); v != nil { - return v.(*Function) - } - panic("no concrete method: " + obj.String()) -} - -// needMethodsOf ensures that runtime type information (including the -// complete method set) is available for the specified type T and all -// its subcomponents. -// -// needMethodsOf must be called for at least every type that is an -// operand of some MakeInterface instruction, and for the type of -// every exported package member. -// -// Precondition: T is not a method signature (*Signature with Recv()!=nil). -// -// Thread-safe. (Called via emitConv from multiple builder goroutines.) -// -// TODO(adonovan): make this faster. It accounts for 20% of SSA build time. -// -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -// -func (prog *Program) needMethodsOf(T types.Type) { - prog.methodsMu.Lock() - prog.needMethods(T, false) - prog.methodsMu.Unlock() -} - -// Precondition: T is not a method signature (*Signature with Recv()!=nil). -// Recursive case: skip => don't create methods for T. -// -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -// -func (prog *Program) needMethods(T types.Type, skip bool) { - // Each package maintains its own set of types it has visited. - if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok { - // needMethods(T) was previously called - if !prevSkip || skip { - return // already seen, with same or false 'skip' value - } - } - prog.runtimeTypes.Set(T, skip) - - tmset := prog.MethodSets.MethodSet(T) - - if !skip && !isInterface(T) && tmset.Len() > 0 { - // Create methods of T. - mset := prog.createMethodSet(T) - if !mset.complete { - mset.complete = true - n := tmset.Len() - for i := 0; i < n; i++ { - prog.addMethod(mset, tmset.At(i)) - } - } - } - - // Recursion over signatures of each method. - for i := 0; i < tmset.Len(); i++ { - sig := tmset.At(i).Type().(*types.Signature) - prog.needMethods(sig.Params(), false) - prog.needMethods(sig.Results(), false) - } - - switch t := T.(type) { - case *types.Basic: - // nop - - case *types.Interface: - // nop---handled by recursion over method set. - - case *types.Pointer: - prog.needMethods(t.Elem(), false) - - case *types.Slice: - prog.needMethods(t.Elem(), false) - - case *types.Chan: - prog.needMethods(t.Elem(), false) - - case *types.Map: - prog.needMethods(t.Key(), false) - prog.needMethods(t.Elem(), false) - - case *types.Signature: - if t.Recv() != nil { - panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv())) - } - prog.needMethods(t.Params(), false) - prog.needMethods(t.Results(), false) - - case *types.Named: - // A pointer-to-named type can be derived from a named - // type via reflection. It may have methods too. - prog.needMethods(types.NewPointer(T), false) - - // Consider 'type T struct{S}' where S has methods. - // Reflection provides no way to get from T to struct{S}, - // only to S, so the method set of struct{S} is unwanted, - // so set 'skip' flag during recursion. - prog.needMethods(t.Underlying(), true) - - case *types.Array: - prog.needMethods(t.Elem(), false) - - case *types.Struct: - for i, n := 0, t.NumFields(); i < n; i++ { - prog.needMethods(t.Field(i).Type(), false) - } - - case *types.Tuple: - for i, n := 0, t.Len(); i < n; i++ { - prog.needMethods(t.At(i).Type(), false) - } - - default: - panic(T) - } -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/mode.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/mode.go deleted file mode 100644 index d2a269893a71..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/mode.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// This file defines the BuilderMode type and its command-line flag. - -import ( - "bytes" - "fmt" -) - -// BuilderMode is a bitmask of options for diagnostics and checking. -// -// *BuilderMode satisfies the flag.Value interface. Example: -// -// var mode = ssa.BuilderMode(0) -// func init() { flag.Var(&mode, "build", ssa.BuilderModeDoc) } -// -type BuilderMode uint - -const ( - PrintPackages BuilderMode = 1 << iota // Print package inventory to stdout - PrintFunctions // Print function SSA code to stdout - LogSource // Log source locations as SSA builder progresses - SanityCheckFunctions // Perform sanity checking of function bodies - NaiveForm // Build naïve SSA form: don't replace local loads/stores with registers - BuildSerially // Build packages serially, not in parallel. - GlobalDebug // Enable debug info for all packages - BareInits // Build init functions without guards or calls to dependent inits -) - -const BuilderModeDoc = `Options controlling the SSA builder. -The value is a sequence of zero or more of these letters: -C perform sanity [C]hecking of the SSA form. -D include [D]ebug info for every function. -P print [P]ackage inventory. -F print [F]unction SSA code. -S log [S]ource locations as SSA builder progresses. -L build distinct packages seria[L]ly instead of in parallel. -N build [N]aive SSA form: don't replace local loads/stores with registers. -I build bare [I]nit functions: no init guards or calls to dependent inits. -` - -func (m BuilderMode) String() string { - var buf bytes.Buffer - if m&GlobalDebug != 0 { - buf.WriteByte('D') - } - if m&PrintPackages != 0 { - buf.WriteByte('P') - } - if m&PrintFunctions != 0 { - buf.WriteByte('F') - } - if m&LogSource != 0 { - buf.WriteByte('S') - } - if m&SanityCheckFunctions != 0 { - buf.WriteByte('C') - } - if m&NaiveForm != 0 { - buf.WriteByte('N') - } - if m&BuildSerially != 0 { - buf.WriteByte('L') - } - return buf.String() -} - -// Set parses the flag characters in s and updates *m. -func (m *BuilderMode) Set(s string) error { - var mode BuilderMode - for _, c := range s { - switch c { - case 'D': - mode |= GlobalDebug - case 'P': - mode |= PrintPackages - case 'F': - mode |= PrintFunctions - case 'S': - mode |= LogSource | BuildSerially - case 'C': - mode |= SanityCheckFunctions - case 'N': - mode |= NaiveForm - case 'L': - mode |= BuildSerially - default: - return fmt.Errorf("unknown BuilderMode option: %q", c) - } - } - *m = mode - return nil -} - -// Get returns m. -func (m BuilderMode) Get() interface{} { return m } diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/print.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/print.go deleted file mode 100644 index 6fd277277c05..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/print.go +++ /dev/null @@ -1,435 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// This file implements the String() methods for all Value and -// Instruction types. - -import ( - "bytes" - "fmt" - "go/types" - "io" - "reflect" - "sort" - - "golang.org/x/tools/go/types/typeutil" -) - -// relName returns the name of v relative to i. -// In most cases, this is identical to v.Name(), but references to -// Functions (including methods) and Globals use RelString and -// all types are displayed with relType, so that only cross-package -// references are package-qualified. -// -func relName(v Value, i Instruction) string { - var from *types.Package - if i != nil { - from = i.Parent().pkg() - } - switch v := v.(type) { - case Member: // *Function or *Global - return v.RelString(from) - case *Const: - return v.RelString(from) - } - return v.Name() -} - -func relType(t types.Type, from *types.Package) string { - return types.TypeString(t, types.RelativeTo(from)) -} - -func relString(m Member, from *types.Package) string { - // NB: not all globals have an Object (e.g. init$guard), - // so use Package().Object not Object.Package(). - if pkg := m.Package().Pkg; pkg != nil && pkg != from { - return fmt.Sprintf("%s.%s", pkg.Path(), m.Name()) - } - return m.Name() -} - -// Value.String() -// -// This method is provided only for debugging. -// It never appears in disassembly, which uses Value.Name(). - -func (v *Parameter) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("parameter %s : %s", v.Name(), relType(v.Type(), from)) -} - -func (v *FreeVar) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("freevar %s : %s", v.Name(), relType(v.Type(), from)) -} - -func (v *Builtin) String() string { - return fmt.Sprintf("builtin %s", v.Name()) -} - -// Instruction.String() - -func (v *Alloc) String() string { - op := "local" - if v.Heap { - op = "new" - } - from := v.Parent().pkg() - return fmt.Sprintf("%s %s (%s)", op, relType(deref(v.Type()), from), v.Comment) -} - -func (v *Phi) String() string { - var b bytes.Buffer - b.WriteString("phi [") - for i, edge := range v.Edges { - if i > 0 { - b.WriteString(", ") - } - // Be robust against malformed CFG. - if v.block == nil { - b.WriteString("??") - continue - } - block := -1 - if i < len(v.block.Preds) { - block = v.block.Preds[i].Index - } - fmt.Fprintf(&b, "%d: ", block) - edgeVal := "" // be robust - if edge != nil { - edgeVal = relName(edge, v) - } - b.WriteString(edgeVal) - } - b.WriteString("]") - if v.Comment != "" { - b.WriteString(" #") - b.WriteString(v.Comment) - } - return b.String() -} - -func printCall(v *CallCommon, prefix string, instr Instruction) string { - var b bytes.Buffer - b.WriteString(prefix) - if !v.IsInvoke() { - b.WriteString(relName(v.Value, instr)) - } else { - fmt.Fprintf(&b, "invoke %s.%s", relName(v.Value, instr), v.Method.Name()) - } - b.WriteString("(") - for i, arg := range v.Args { - if i > 0 { - b.WriteString(", ") - } - b.WriteString(relName(arg, instr)) - } - if v.Signature().Variadic() { - b.WriteString("...") - } - b.WriteString(")") - return b.String() -} - -func (c *CallCommon) String() string { - return printCall(c, "", nil) -} - -func (v *Call) String() string { - return printCall(&v.Call, "", v) -} - -func (v *BinOp) String() string { - return fmt.Sprintf("%s %s %s", relName(v.X, v), v.Op.String(), relName(v.Y, v)) -} - -func (v *UnOp) String() string { - return fmt.Sprintf("%s%s%s", v.Op, relName(v.X, v), commaOk(v.CommaOk)) -} - -func printConv(prefix string, v, x Value) string { - from := v.Parent().pkg() - return fmt.Sprintf("%s %s <- %s (%s)", - prefix, - relType(v.Type(), from), - relType(x.Type(), from), - relName(x, v.(Instruction))) -} - -func (v *ChangeType) String() string { return printConv("changetype", v, v.X) } -func (v *Convert) String() string { return printConv("convert", v, v.X) } -func (v *ChangeInterface) String() string { return printConv("change interface", v, v.X) } -func (v *MakeInterface) String() string { return printConv("make", v, v.X) } - -func (v *MakeClosure) String() string { - var b bytes.Buffer - fmt.Fprintf(&b, "make closure %s", relName(v.Fn, v)) - if v.Bindings != nil { - b.WriteString(" [") - for i, c := range v.Bindings { - if i > 0 { - b.WriteString(", ") - } - b.WriteString(relName(c, v)) - } - b.WriteString("]") - } - return b.String() -} - -func (v *MakeSlice) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("make %s %s %s", - relType(v.Type(), from), - relName(v.Len, v), - relName(v.Cap, v)) -} - -func (v *Slice) String() string { - var b bytes.Buffer - b.WriteString("slice ") - b.WriteString(relName(v.X, v)) - b.WriteString("[") - if v.Low != nil { - b.WriteString(relName(v.Low, v)) - } - b.WriteString(":") - if v.High != nil { - b.WriteString(relName(v.High, v)) - } - if v.Max != nil { - b.WriteString(":") - b.WriteString(relName(v.Max, v)) - } - b.WriteString("]") - return b.String() -} - -func (v *MakeMap) String() string { - res := "" - if v.Reserve != nil { - res = relName(v.Reserve, v) - } - from := v.Parent().pkg() - return fmt.Sprintf("make %s %s", relType(v.Type(), from), res) -} - -func (v *MakeChan) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("make %s %s", relType(v.Type(), from), relName(v.Size, v)) -} - -func (v *FieldAddr) String() string { - st := deref(v.X.Type()).Underlying().(*types.Struct) - // Be robust against a bad index. - name := "?" - if 0 <= v.Field && v.Field < st.NumFields() { - name = st.Field(v.Field).Name() - } - return fmt.Sprintf("&%s.%s [#%d]", relName(v.X, v), name, v.Field) -} - -func (v *Field) String() string { - st := v.X.Type().Underlying().(*types.Struct) - // Be robust against a bad index. - name := "?" - if 0 <= v.Field && v.Field < st.NumFields() { - name = st.Field(v.Field).Name() - } - return fmt.Sprintf("%s.%s [#%d]", relName(v.X, v), name, v.Field) -} - -func (v *IndexAddr) String() string { - return fmt.Sprintf("&%s[%s]", relName(v.X, v), relName(v.Index, v)) -} - -func (v *Index) String() string { - return fmt.Sprintf("%s[%s]", relName(v.X, v), relName(v.Index, v)) -} - -func (v *Lookup) String() string { - return fmt.Sprintf("%s[%s]%s", relName(v.X, v), relName(v.Index, v), commaOk(v.CommaOk)) -} - -func (v *Range) String() string { - return "range " + relName(v.X, v) -} - -func (v *Next) String() string { - return "next " + relName(v.Iter, v) -} - -func (v *TypeAssert) String() string { - from := v.Parent().pkg() - return fmt.Sprintf("typeassert%s %s.(%s)", commaOk(v.CommaOk), relName(v.X, v), relType(v.AssertedType, from)) -} - -func (v *Extract) String() string { - return fmt.Sprintf("extract %s #%d", relName(v.Tuple, v), v.Index) -} - -func (s *Jump) String() string { - // Be robust against malformed CFG. - block := -1 - if s.block != nil && len(s.block.Succs) == 1 { - block = s.block.Succs[0].Index - } - return fmt.Sprintf("jump %d", block) -} - -func (s *If) String() string { - // Be robust against malformed CFG. - tblock, fblock := -1, -1 - if s.block != nil && len(s.block.Succs) == 2 { - tblock = s.block.Succs[0].Index - fblock = s.block.Succs[1].Index - } - return fmt.Sprintf("if %s goto %d else %d", relName(s.Cond, s), tblock, fblock) -} - -func (s *Go) String() string { - return printCall(&s.Call, "go ", s) -} - -func (s *Panic) String() string { - return "panic " + relName(s.X, s) -} - -func (s *Return) String() string { - var b bytes.Buffer - b.WriteString("return") - for i, r := range s.Results { - if i == 0 { - b.WriteString(" ") - } else { - b.WriteString(", ") - } - b.WriteString(relName(r, s)) - } - return b.String() -} - -func (*RunDefers) String() string { - return "rundefers" -} - -func (s *Send) String() string { - return fmt.Sprintf("send %s <- %s", relName(s.Chan, s), relName(s.X, s)) -} - -func (s *Defer) String() string { - return printCall(&s.Call, "defer ", s) -} - -func (s *Select) String() string { - var b bytes.Buffer - for i, st := range s.States { - if i > 0 { - b.WriteString(", ") - } - if st.Dir == types.RecvOnly { - b.WriteString("<-") - b.WriteString(relName(st.Chan, s)) - } else { - b.WriteString(relName(st.Chan, s)) - b.WriteString("<-") - b.WriteString(relName(st.Send, s)) - } - } - non := "" - if !s.Blocking { - non = "non" - } - return fmt.Sprintf("select %sblocking [%s]", non, b.String()) -} - -func (s *Store) String() string { - return fmt.Sprintf("*%s = %s", relName(s.Addr, s), relName(s.Val, s)) -} - -func (s *BlankStore) String() string { - return fmt.Sprintf("_ = %s", relName(s.Val, s)) -} - -func (s *MapUpdate) String() string { - return fmt.Sprintf("%s[%s] = %s", relName(s.Map, s), relName(s.Key, s), relName(s.Value, s)) -} - -func (s *DebugRef) String() string { - p := s.Parent().Prog.Fset.Position(s.Pos()) - var descr interface{} - if s.object != nil { - descr = s.object // e.g. "var x int" - } else { - descr = reflect.TypeOf(s.Expr) // e.g. "*ast.CallExpr" - } - var addr string - if s.IsAddr { - addr = "address of " - } - return fmt.Sprintf("; %s%s @ %d:%d is %s", addr, descr, p.Line, p.Column, s.X.Name()) -} - -func (p *Package) String() string { - return "package " + p.Pkg.Path() -} - -var _ io.WriterTo = (*Package)(nil) // *Package implements io.Writer - -func (p *Package) WriteTo(w io.Writer) (int64, error) { - var buf bytes.Buffer - WritePackage(&buf, p) - n, err := w.Write(buf.Bytes()) - return int64(n), err -} - -// WritePackage writes to buf a human-readable summary of p. -func WritePackage(buf *bytes.Buffer, p *Package) { - fmt.Fprintf(buf, "%s:\n", p) - - var names []string - maxname := 0 - for name := range p.Members { - if l := len(name); l > maxname { - maxname = l - } - names = append(names, name) - } - - from := p.Pkg - sort.Strings(names) - for _, name := range names { - switch mem := p.Members[name].(type) { - case *NamedConst: - fmt.Fprintf(buf, " const %-*s %s = %s\n", - maxname, name, mem.Name(), mem.Value.RelString(from)) - - case *Function: - fmt.Fprintf(buf, " func %-*s %s\n", - maxname, name, relType(mem.Type(), from)) - - case *Type: - fmt.Fprintf(buf, " type %-*s %s\n", - maxname, name, relType(mem.Type().Underlying(), from)) - for _, meth := range typeutil.IntuitiveMethodSet(mem.Type(), &p.Prog.MethodSets) { - fmt.Fprintf(buf, " %s\n", types.SelectionString(meth, types.RelativeTo(from))) - } - - case *Global: - fmt.Fprintf(buf, " var %-*s %s\n", - maxname, name, relType(mem.Type().(*types.Pointer).Elem(), from)) - } - } - - fmt.Fprintf(buf, "\n") -} - -func commaOk(x bool) string { - if x { - return ",ok" - } - return "" -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/sanity.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/sanity.go deleted file mode 100644 index c56b2682c083..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/sanity.go +++ /dev/null @@ -1,523 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// An optional pass for sanity-checking invariants of the SSA representation. -// Currently it checks CFG invariants but little at the instruction level. - -import ( - "fmt" - "go/types" - "io" - "os" - "strings" -) - -type sanity struct { - reporter io.Writer - fn *Function - block *BasicBlock - instrs map[Instruction]struct{} - insane bool -} - -// sanityCheck performs integrity checking of the SSA representation -// of the function fn and returns true if it was valid. Diagnostics -// are written to reporter if non-nil, os.Stderr otherwise. Some -// diagnostics are only warnings and do not imply a negative result. -// -// Sanity-checking is intended to facilitate the debugging of code -// transformation passes. -// -func sanityCheck(fn *Function, reporter io.Writer) bool { - if reporter == nil { - reporter = os.Stderr - } - return (&sanity{reporter: reporter}).checkFunction(fn) -} - -// mustSanityCheck is like sanityCheck but panics instead of returning -// a negative result. -// -func mustSanityCheck(fn *Function, reporter io.Writer) { - if !sanityCheck(fn, reporter) { - fn.WriteTo(os.Stderr) - panic("SanityCheck failed") - } -} - -func (s *sanity) diagnostic(prefix, format string, args ...interface{}) { - fmt.Fprintf(s.reporter, "%s: function %s", prefix, s.fn) - if s.block != nil { - fmt.Fprintf(s.reporter, ", block %s", s.block) - } - io.WriteString(s.reporter, ": ") - fmt.Fprintf(s.reporter, format, args...) - io.WriteString(s.reporter, "\n") -} - -func (s *sanity) errorf(format string, args ...interface{}) { - s.insane = true - s.diagnostic("Error", format, args...) -} - -func (s *sanity) warnf(format string, args ...interface{}) { - s.diagnostic("Warning", format, args...) -} - -// findDuplicate returns an arbitrary basic block that appeared more -// than once in blocks, or nil if all were unique. -func findDuplicate(blocks []*BasicBlock) *BasicBlock { - if len(blocks) < 2 { - return nil - } - if blocks[0] == blocks[1] { - return blocks[0] - } - // Slow path: - m := make(map[*BasicBlock]bool) - for _, b := range blocks { - if m[b] { - return b - } - m[b] = true - } - return nil -} - -func (s *sanity) checkInstr(idx int, instr Instruction) { - switch instr := instr.(type) { - case *If, *Jump, *Return, *Panic: - s.errorf("control flow instruction not at end of block") - case *Phi: - if idx == 0 { - // It suffices to apply this check to just the first phi node. - if dup := findDuplicate(s.block.Preds); dup != nil { - s.errorf("phi node in block with duplicate predecessor %s", dup) - } - } else { - prev := s.block.Instrs[idx-1] - if _, ok := prev.(*Phi); !ok { - s.errorf("Phi instruction follows a non-Phi: %T", prev) - } - } - if ne, np := len(instr.Edges), len(s.block.Preds); ne != np { - s.errorf("phi node has %d edges but %d predecessors", ne, np) - - } else { - for i, e := range instr.Edges { - if e == nil { - s.errorf("phi node '%s' has no value for edge #%d from %s", instr.Comment, i, s.block.Preds[i]) - } - } - } - - case *Alloc: - if !instr.Heap { - found := false - for _, l := range s.fn.Locals { - if l == instr { - found = true - break - } - } - if !found { - s.errorf("local alloc %s = %s does not appear in Function.Locals", instr.Name(), instr) - } - } - - case *BinOp: - case *Call: - case *ChangeInterface: - case *ChangeType: - case *Convert: - if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok { - if _, ok := instr.Type().Underlying().(*types.Basic); !ok { - s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type()) - } - } - - case *Defer: - case *Extract: - case *Field: - case *FieldAddr: - case *Go: - case *Index: - case *IndexAddr: - case *Lookup: - case *MakeChan: - case *MakeClosure: - numFree := len(instr.Fn.(*Function).FreeVars) - numBind := len(instr.Bindings) - if numFree != numBind { - s.errorf("MakeClosure has %d Bindings for function %s with %d free vars", - numBind, instr.Fn, numFree) - - } - if recv := instr.Type().(*types.Signature).Recv(); recv != nil { - s.errorf("MakeClosure's type includes receiver %s", recv.Type()) - } - - case *MakeInterface: - case *MakeMap: - case *MakeSlice: - case *MapUpdate: - case *Next: - case *Range: - case *RunDefers: - case *Select: - case *Send: - case *Slice: - case *Store: - case *TypeAssert: - case *UnOp: - case *DebugRef: - case *BlankStore: - case *Sigma: - // TODO(adonovan): implement checks. - default: - panic(fmt.Sprintf("Unknown instruction type: %T", instr)) - } - - if call, ok := instr.(CallInstruction); ok { - if call.Common().Signature() == nil { - s.errorf("nil signature: %s", call) - } - } - - // Check that value-defining instructions have valid types - // and a valid referrer list. - if v, ok := instr.(Value); ok { - t := v.Type() - if t == nil { - s.errorf("no type: %s = %s", v.Name(), v) - } else if t == tRangeIter { - // not a proper type; ignore. - } else if b, ok := t.Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 { - s.errorf("instruction has 'untyped' result: %s = %s : %s", v.Name(), v, t) - } - s.checkReferrerList(v) - } - - // Untyped constants are legal as instruction Operands(), - // for example: - // _ = "foo"[0] - // or: - // if wordsize==64 {...} - - // All other non-Instruction Values can be found via their - // enclosing Function or Package. -} - -func (s *sanity) checkFinalInstr(instr Instruction) { - switch instr := instr.(type) { - case *If: - if nsuccs := len(s.block.Succs); nsuccs != 2 { - s.errorf("If-terminated block has %d successors; expected 2", nsuccs) - return - } - if s.block.Succs[0] == s.block.Succs[1] { - s.errorf("If-instruction has same True, False target blocks: %s", s.block.Succs[0]) - return - } - - case *Jump: - if nsuccs := len(s.block.Succs); nsuccs != 1 { - s.errorf("Jump-terminated block has %d successors; expected 1", nsuccs) - return - } - - case *Return: - if nsuccs := len(s.block.Succs); nsuccs != 0 { - s.errorf("Return-terminated block has %d successors; expected none", nsuccs) - return - } - if na, nf := len(instr.Results), s.fn.Signature.Results().Len(); nf != na { - s.errorf("%d-ary return in %d-ary function", na, nf) - } - - case *Panic: - if nsuccs := len(s.block.Succs); nsuccs != 0 { - s.errorf("Panic-terminated block has %d successors; expected none", nsuccs) - return - } - - default: - s.errorf("non-control flow instruction at end of block") - } -} - -func (s *sanity) checkBlock(b *BasicBlock, index int) { - s.block = b - - if b.Index != index { - s.errorf("block has incorrect Index %d", b.Index) - } - if b.parent != s.fn { - s.errorf("block has incorrect parent %s", b.parent) - } - - // Check all blocks are reachable. - // (The entry block is always implicitly reachable, - // as is the Recover block, if any.) - if (index > 0 && b != b.parent.Recover) && len(b.Preds) == 0 { - s.warnf("unreachable block") - if b.Instrs == nil { - // Since this block is about to be pruned, - // tolerating transient problems in it - // simplifies other optimizations. - return - } - } - - // Check predecessor and successor relations are dual, - // and that all blocks in CFG belong to same function. - for _, a := range b.Preds { - found := false - for _, bb := range a.Succs { - if bb == b { - found = true - break - } - } - if !found { - s.errorf("expected successor edge in predecessor %s; found only: %s", a, a.Succs) - } - if a.parent != s.fn { - s.errorf("predecessor %s belongs to different function %s", a, a.parent) - } - } - for _, c := range b.Succs { - found := false - for _, bb := range c.Preds { - if bb == b { - found = true - break - } - } - if !found { - s.errorf("expected predecessor edge in successor %s; found only: %s", c, c.Preds) - } - if c.parent != s.fn { - s.errorf("successor %s belongs to different function %s", c, c.parent) - } - } - - // Check each instruction is sane. - n := len(b.Instrs) - if n == 0 { - s.errorf("basic block contains no instructions") - } - var rands [10]*Value // reuse storage - for j, instr := range b.Instrs { - if instr == nil { - s.errorf("nil instruction at index %d", j) - continue - } - if b2 := instr.Block(); b2 == nil { - s.errorf("nil Block() for instruction at index %d", j) - continue - } else if b2 != b { - s.errorf("wrong Block() (%s) for instruction at index %d ", b2, j) - continue - } - if j < n-1 { - s.checkInstr(j, instr) - } else { - s.checkFinalInstr(instr) - } - - // Check Instruction.Operands. - operands: - for i, op := range instr.Operands(rands[:0]) { - if op == nil { - s.errorf("nil operand pointer %d of %s", i, instr) - continue - } - val := *op - if val == nil { - continue // a nil operand is ok - } - - // Check that "untyped" types only appear on constant operands. - if _, ok := (*op).(*Const); !ok { - if basic, ok := (*op).Type().(*types.Basic); ok { - if basic.Info()&types.IsUntyped != 0 { - s.errorf("operand #%d of %s is untyped: %s", i, instr, basic) - } - } - } - - // Check that Operands that are also Instructions belong to same function. - // TODO(adonovan): also check their block dominates block b. - if val, ok := val.(Instruction); ok { - if val.Block() == nil { - s.errorf("operand %d of %s is an instruction (%s) that belongs to no block", i, instr, val) - } else if val.Parent() != s.fn { - s.errorf("operand %d of %s is an instruction (%s) from function %s", i, instr, val, val.Parent()) - } - } - - // Check that each function-local operand of - // instr refers back to instr. (NB: quadratic) - switch val := val.(type) { - case *Const, *Global, *Builtin: - continue // not local - case *Function: - if val.parent == nil { - continue // only anon functions are local - } - } - - // TODO(adonovan): check val.Parent() != nil <=> val.Referrers() is defined. - - if refs := val.Referrers(); refs != nil { - for _, ref := range *refs { - if ref == instr { - continue operands - } - } - s.errorf("operand %d of %s (%s) does not refer to us", i, instr, val) - } else { - s.errorf("operand %d of %s (%s) has no referrers", i, instr, val) - } - } - } -} - -func (s *sanity) checkReferrerList(v Value) { - refs := v.Referrers() - if refs == nil { - s.errorf("%s has missing referrer list", v.Name()) - return - } - for i, ref := range *refs { - if _, ok := s.instrs[ref]; !ok { - s.errorf("%s.Referrers()[%d] = %s is not an instruction belonging to this function", v.Name(), i, ref) - } - } -} - -func (s *sanity) checkFunction(fn *Function) bool { - // TODO(adonovan): check Function invariants: - // - check params match signature - // - check transient fields are nil - // - warn if any fn.Locals do not appear among block instructions. - s.fn = fn - if fn.Prog == nil { - s.errorf("nil Prog") - } - - fn.String() // must not crash - fn.RelString(fn.pkg()) // must not crash - - // All functions have a package, except delegates (which are - // shared across packages, or duplicated as weak symbols in a - // separate-compilation model), and error.Error. - if fn.Pkg == nil { - if strings.HasPrefix(fn.Synthetic, "wrapper ") || - strings.HasPrefix(fn.Synthetic, "bound ") || - strings.HasPrefix(fn.Synthetic, "thunk ") || - strings.HasSuffix(fn.name, "Error") { - // ok - } else { - s.errorf("nil Pkg") - } - } - if src, syn := fn.Synthetic == "", fn.Syntax() != nil; src != syn { - s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn) - } - for i, l := range fn.Locals { - if l.Parent() != fn { - s.errorf("Local %s at index %d has wrong parent", l.Name(), i) - } - if l.Heap { - s.errorf("Local %s at index %d has Heap flag set", l.Name(), i) - } - } - // Build the set of valid referrers. - s.instrs = make(map[Instruction]struct{}) - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - s.instrs[instr] = struct{}{} - } - } - for i, p := range fn.Params { - if p.Parent() != fn { - s.errorf("Param %s at index %d has wrong parent", p.Name(), i) - } - s.checkReferrerList(p) - } - for i, fv := range fn.FreeVars { - if fv.Parent() != fn { - s.errorf("FreeVar %s at index %d has wrong parent", fv.Name(), i) - } - s.checkReferrerList(fv) - } - - if fn.Blocks != nil && len(fn.Blocks) == 0 { - // Function _had_ blocks (so it's not external) but - // they were "optimized" away, even the entry block. - s.errorf("Blocks slice is non-nil but empty") - } - for i, b := range fn.Blocks { - if b == nil { - s.warnf("nil *BasicBlock at f.Blocks[%d]", i) - continue - } - s.checkBlock(b, i) - } - if fn.Recover != nil && fn.Blocks[fn.Recover.Index] != fn.Recover { - s.errorf("Recover block is not in Blocks slice") - } - - s.block = nil - for i, anon := range fn.AnonFuncs { - if anon.Parent() != fn { - s.errorf("AnonFuncs[%d]=%s but %s.Parent()=%s", i, anon, anon, anon.Parent()) - } - } - s.fn = nil - return !s.insane -} - -// sanityCheckPackage checks invariants of packages upon creation. -// It does not require that the package is built. -// Unlike sanityCheck (for functions), it just panics at the first error. -func sanityCheckPackage(pkg *Package) { - if pkg.Pkg == nil { - panic(fmt.Sprintf("Package %s has no Object", pkg)) - } - pkg.String() // must not crash - - for name, mem := range pkg.Members { - if name != mem.Name() { - panic(fmt.Sprintf("%s: %T.Name() = %s, want %s", - pkg.Pkg.Path(), mem, mem.Name(), name)) - } - obj := mem.Object() - if obj == nil { - // This check is sound because fields - // {Global,Function}.object have type - // types.Object. (If they were declared as - // *types.{Var,Func}, we'd have a non-empty - // interface containing a nil pointer.) - - continue // not all members have typechecker objects - } - if obj.Name() != name { - if obj.Name() == "init" && strings.HasPrefix(mem.Name(), "init#") { - // Ok. The name of a declared init function varies between - // its types.Func ("init") and its ssa.Function ("init#%d"). - } else { - panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s", - pkg.Pkg.Path(), mem, obj.Name(), name)) - } - } - if obj.Pos() != mem.Pos() { - panic(fmt.Sprintf("%s Pos=%d obj.Pos=%d", mem, mem.Pos(), obj.Pos())) - } - } -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/source.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/source.go deleted file mode 100644 index 6d2223edaa36..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/source.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// This file defines utilities for working with source positions -// or source-level named entities ("objects"). - -// TODO(adonovan): test that {Value,Instruction}.Pos() positions match -// the originating syntax, as specified. - -import ( - "go/ast" - "go/token" - "go/types" -) - -// EnclosingFunction returns the function that contains the syntax -// node denoted by path. -// -// Syntax associated with package-level variable specifications is -// enclosed by the package's init() function. -// -// Returns nil if not found; reasons might include: -// - the node is not enclosed by any function. -// - the node is within an anonymous function (FuncLit) and -// its SSA function has not been created yet -// (pkg.Build() has not yet been called). -// -func EnclosingFunction(pkg *Package, path []ast.Node) *Function { - // Start with package-level function... - fn := findEnclosingPackageLevelFunction(pkg, path) - if fn == nil { - return nil // not in any function - } - - // ...then walk down the nested anonymous functions. - n := len(path) -outer: - for i := range path { - if lit, ok := path[n-1-i].(*ast.FuncLit); ok { - for _, anon := range fn.AnonFuncs { - if anon.Pos() == lit.Type.Func { - fn = anon - continue outer - } - } - // SSA function not found: - // - package not yet built, or maybe - // - builder skipped FuncLit in dead block - // (in principle; but currently the Builder - // generates even dead FuncLits). - return nil - } - } - return fn -} - -// HasEnclosingFunction returns true if the AST node denoted by path -// is contained within the declaration of some function or -// package-level variable. -// -// Unlike EnclosingFunction, the behaviour of this function does not -// depend on whether SSA code for pkg has been built, so it can be -// used to quickly reject check inputs that will cause -// EnclosingFunction to fail, prior to SSA building. -// -func HasEnclosingFunction(pkg *Package, path []ast.Node) bool { - return findEnclosingPackageLevelFunction(pkg, path) != nil -} - -// findEnclosingPackageLevelFunction returns the Function -// corresponding to the package-level function enclosing path. -// -func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function { - if n := len(path); n >= 2 { // [... {Gen,Func}Decl File] - switch decl := path[n-2].(type) { - case *ast.GenDecl: - if decl.Tok == token.VAR && n >= 3 { - // Package-level 'var' initializer. - return pkg.init - } - - case *ast.FuncDecl: - if decl.Recv == nil && decl.Name.Name == "init" { - // Explicit init() function. - for _, b := range pkg.init.Blocks { - for _, instr := range b.Instrs { - if instr, ok := instr.(*Call); ok { - if callee, ok := instr.Call.Value.(*Function); ok && callee.Pkg == pkg && callee.Pos() == decl.Name.NamePos { - return callee - } - } - } - } - // Hack: return non-nil when SSA is not yet - // built so that HasEnclosingFunction works. - return pkg.init - } - // Declared function/method. - return findNamedFunc(pkg, decl.Name.NamePos) - } - } - return nil // not in any function -} - -// findNamedFunc returns the named function whose FuncDecl.Ident is at -// position pos. -// -func findNamedFunc(pkg *Package, pos token.Pos) *Function { - // Look at all package members and method sets of named types. - // Not very efficient. - for _, mem := range pkg.Members { - switch mem := mem.(type) { - case *Function: - if mem.Pos() == pos { - return mem - } - case *Type: - mset := pkg.Prog.MethodSets.MethodSet(types.NewPointer(mem.Type())) - for i, n := 0, mset.Len(); i < n; i++ { - // Don't call Program.Method: avoid creating wrappers. - obj := mset.At(i).Obj().(*types.Func) - if obj.Pos() == pos { - return pkg.values[obj].(*Function) - } - } - } - } - return nil -} - -// ValueForExpr returns the SSA Value that corresponds to non-constant -// expression e. -// -// It returns nil if no value was found, e.g. -// - the expression is not lexically contained within f; -// - f was not built with debug information; or -// - e is a constant expression. (For efficiency, no debug -// information is stored for constants. Use -// go/types.Info.Types[e].Value instead.) -// - e is a reference to nil or a built-in function. -// - the value was optimised away. -// -// If e is an addressable expression used in an lvalue context, -// value is the address denoted by e, and isAddr is true. -// -// The types of e (or &e, if isAddr) and the result are equal -// (modulo "untyped" bools resulting from comparisons). -// -// (Tip: to find the ssa.Value given a source position, use -// importer.PathEnclosingInterval to locate the ast.Node, then -// EnclosingFunction to locate the Function, then ValueForExpr to find -// the ssa.Value.) -// -func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) { - if f.debugInfo() { // (opt) - e = unparen(e) - for _, b := range f.Blocks { - for _, instr := range b.Instrs { - if ref, ok := instr.(*DebugRef); ok { - if ref.Expr == e { - return ref.X, ref.IsAddr - } - } - } - } - } - return -} - -// --- Lookup functions for source-level named entities (types.Objects) --- - -// Package returns the SSA Package corresponding to the specified -// type-checker package object. -// It returns nil if no such SSA package has been created. -// -func (prog *Program) Package(obj *types.Package) *Package { - return prog.packages[obj] -} - -// packageLevelValue returns the package-level value corresponding to -// the specified named object, which may be a package-level const -// (*Const), var (*Global) or func (*Function) of some package in -// prog. It returns nil if the object is not found. -// -func (prog *Program) packageLevelValue(obj types.Object) Value { - if pkg, ok := prog.packages[obj.Pkg()]; ok { - return pkg.values[obj] - } - return nil -} - -// FuncValue returns the concrete Function denoted by the source-level -// named function obj, or nil if obj denotes an interface method. -// -// TODO(adonovan): check the invariant that obj.Type() matches the -// result's Signature, both in the params/results and in the receiver. -// -func (prog *Program) FuncValue(obj *types.Func) *Function { - fn, _ := prog.packageLevelValue(obj).(*Function) - return fn -} - -// ConstValue returns the SSA Value denoted by the source-level named -// constant obj. -// -func (prog *Program) ConstValue(obj *types.Const) *Const { - // TODO(adonovan): opt: share (don't reallocate) - // Consts for const objects and constant ast.Exprs. - - // Universal constant? {true,false,nil} - if obj.Parent() == types.Universe { - return NewConst(obj.Val(), obj.Type()) - } - // Package-level named constant? - if v := prog.packageLevelValue(obj); v != nil { - return v.(*Const) - } - return NewConst(obj.Val(), obj.Type()) -} - -// VarValue returns the SSA Value that corresponds to a specific -// identifier denoting the source-level named variable obj. -// -// VarValue returns nil if a local variable was not found, perhaps -// because its package was not built, the debug information was not -// requested during SSA construction, or the value was optimized away. -// -// ref is the path to an ast.Ident (e.g. from PathEnclosingInterval), -// and that ident must resolve to obj. -// -// pkg is the package enclosing the reference. (A reference to a var -// always occurs within a function, so we need to know where to find it.) -// -// If the identifier is a field selector and its base expression is -// non-addressable, then VarValue returns the value of that field. -// For example: -// func f() struct {x int} -// f().x // VarValue(x) returns a *Field instruction of type int -// -// All other identifiers denote addressable locations (variables). -// For them, VarValue may return either the variable's address or its -// value, even when the expression is evaluated only for its value; the -// situation is reported by isAddr, the second component of the result. -// -// If !isAddr, the returned value is the one associated with the -// specific identifier. For example, -// var x int // VarValue(x) returns Const 0 here -// x = 1 // VarValue(x) returns Const 1 here -// -// It is not specified whether the value or the address is returned in -// any particular case, as it may depend upon optimizations performed -// during SSA code generation, such as registerization, constant -// folding, avoidance of materialization of subexpressions, etc. -// -func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) { - // All references to a var are local to some function, possibly init. - fn := EnclosingFunction(pkg, ref) - if fn == nil { - return // e.g. def of struct field; SSA not built? - } - - id := ref[0].(*ast.Ident) - - // Defining ident of a parameter? - if id.Pos() == obj.Pos() { - for _, param := range fn.Params { - if param.Object() == obj { - return param, false - } - } - } - - // Other ident? - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - if dr, ok := instr.(*DebugRef); ok { - if dr.Pos() == id.Pos() { - return dr.X, dr.IsAddr - } - } - } - } - - // Defining ident of package-level var? - if v := prog.packageLevelValue(obj); v != nil { - return v.(*Global), true - } - - return // e.g. debug info not requested, or var optimized away -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/ssa.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/ssa.go deleted file mode 100644 index 8825e7b5990d..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/ssa.go +++ /dev/null @@ -1,1745 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// This package defines a high-level intermediate representation for -// Go programs using static single-assignment (SSA) form. - -import ( - "fmt" - "go/ast" - exact "go/constant" - "go/token" - "go/types" - "sync" - - "golang.org/x/tools/go/types/typeutil" -) - -// A Program is a partial or complete Go program converted to SSA form. -type Program struct { - Fset *token.FileSet // position information for the files of this Program - imported map[string]*Package // all importable Packages, keyed by import path - packages map[*types.Package]*Package // all loaded Packages, keyed by object - mode BuilderMode // set of mode bits for SSA construction - MethodSets typeutil.MethodSetCache // cache of type-checker's method-sets - - methodsMu sync.Mutex // guards the following maps: - methodSets typeutil.Map // maps type to its concrete methodSet - runtimeTypes typeutil.Map // types for which rtypes are needed - canon typeutil.Map // type canonicalization map - bounds map[*types.Func]*Function // bounds for curried x.Method closures - thunks map[selectionKey]*Function // thunks for T.Method expressions -} - -// A Package is a single analyzed Go package containing Members for -// all package-level functions, variables, constants and types it -// declares. These may be accessed directly via Members, or via the -// type-specific accessor methods Func, Type, Var and Const. -// -// Members also contains entries for "init" (the synthetic package -// initializer) and "init#%d", the nth declared init function, -// and unspecified other things too. -// -type Package struct { - Prog *Program // the owning program - Pkg *types.Package // the corresponding go/types.Package - Members map[string]Member // all package members keyed by name (incl. init and init#%d) - values map[types.Object]Value // package members (incl. types and methods), keyed by object - init *Function // Func("init"); the package's init function - debug bool // include full debug info in this package - - // The following fields are set transiently, then cleared - // after building. - buildOnce sync.Once // ensures package building occurs once - ninit int32 // number of init functions - info *types.Info // package type information - files []*ast.File // package ASTs -} - -// A Member is a member of a Go package, implemented by *NamedConst, -// *Global, *Function, or *Type; they are created by package-level -// const, var, func and type declarations respectively. -// -type Member interface { - Name() string // declared name of the package member - String() string // package-qualified name of the package member - RelString(*types.Package) string // like String, but relative refs are unqualified - Object() types.Object // typechecker's object for this member, if any - Pos() token.Pos // position of member's declaration, if known - Type() types.Type // type of the package member - Token() token.Token // token.{VAR,FUNC,CONST,TYPE} - Package() *Package // the containing package -} - -// A Type is a Member of a Package representing a package-level named type. -type Type struct { - object *types.TypeName - pkg *Package -} - -// A NamedConst is a Member of a Package representing a package-level -// named constant. -// -// Pos() returns the position of the declaring ast.ValueSpec.Names[*] -// identifier. -// -// NB: a NamedConst is not a Value; it contains a constant Value, which -// it augments with the name and position of its 'const' declaration. -// -type NamedConst struct { - object *types.Const - Value *Const - pkg *Package -} - -// A Value is an SSA value that can be referenced by an instruction. -type Value interface { - // Name returns the name of this value, and determines how - // this Value appears when used as an operand of an - // Instruction. - // - // This is the same as the source name for Parameters, - // Builtins, Functions, FreeVars, Globals. - // For constants, it is a representation of the constant's value - // and type. For all other Values this is the name of the - // virtual register defined by the instruction. - // - // The name of an SSA Value is not semantically significant, - // and may not even be unique within a function. - Name() string - - // If this value is an Instruction, String returns its - // disassembled form; otherwise it returns unspecified - // human-readable information about the Value, such as its - // kind, name and type. - String() string - - // Type returns the type of this value. Many instructions - // (e.g. IndexAddr) change their behaviour depending on the - // types of their operands. - Type() types.Type - - // Parent returns the function to which this Value belongs. - // It returns nil for named Functions, Builtin, Const and Global. - Parent() *Function - - // Referrers returns the list of instructions that have this - // value as one of their operands; it may contain duplicates - // if an instruction has a repeated operand. - // - // Referrers actually returns a pointer through which the - // caller may perform mutations to the object's state. - // - // Referrers is currently only defined if Parent()!=nil, - // i.e. for the function-local values FreeVar, Parameter, - // Functions (iff anonymous) and all value-defining instructions. - // It returns nil for named Functions, Builtin, Const and Global. - // - // Instruction.Operands contains the inverse of this relation. - Referrers() *[]Instruction - - // Pos returns the location of the AST token most closely - // associated with the operation that gave rise to this value, - // or token.NoPos if it was not explicit in the source. - // - // For each ast.Node type, a particular token is designated as - // the closest location for the expression, e.g. the Lparen - // for an *ast.CallExpr. This permits a compact but - // approximate mapping from Values to source positions for use - // in diagnostic messages, for example. - // - // (Do not use this position to determine which Value - // corresponds to an ast.Expr; use Function.ValueForExpr - // instead. NB: it requires that the function was built with - // debug information.) - Pos() token.Pos -} - -// An Instruction is an SSA instruction that computes a new Value or -// has some effect. -// -// An Instruction that defines a value (e.g. BinOp) also implements -// the Value interface; an Instruction that only has an effect (e.g. Store) -// does not. -// -type Instruction interface { - // String returns the disassembled form of this value. - // - // Examples of Instructions that are Values: - // "x + y" (BinOp) - // "len([])" (Call) - // Note that the name of the Value is not printed. - // - // Examples of Instructions that are not Values: - // "return x" (Return) - // "*y = x" (Store) - // - // (The separation Value.Name() from Value.String() is useful - // for some analyses which distinguish the operation from the - // value it defines, e.g., 'y = local int' is both an allocation - // of memory 'local int' and a definition of a pointer y.) - String() string - - // Parent returns the function to which this instruction - // belongs. - Parent() *Function - - // Block returns the basic block to which this instruction - // belongs. - Block() *BasicBlock - - // setBlock sets the basic block to which this instruction belongs. - setBlock(*BasicBlock) - - // Operands returns the operands of this instruction: the - // set of Values it references. - // - // Specifically, it appends their addresses to rands, a - // user-provided slice, and returns the resulting slice, - // permitting avoidance of memory allocation. - // - // The operands are appended in undefined order, but the order - // is consistent for a given Instruction; the addresses are - // always non-nil but may point to a nil Value. Clients may - // store through the pointers, e.g. to effect a value - // renaming. - // - // Value.Referrers is a subset of the inverse of this - // relation. (Referrers are not tracked for all types of - // Values.) - Operands(rands []*Value) []*Value - - // Pos returns the location of the AST token most closely - // associated with the operation that gave rise to this - // instruction, or token.NoPos if it was not explicit in the - // source. - // - // For each ast.Node type, a particular token is designated as - // the closest location for the expression, e.g. the Go token - // for an *ast.GoStmt. This permits a compact but approximate - // mapping from Instructions to source positions for use in - // diagnostic messages, for example. - // - // (Do not use this position to determine which Instruction - // corresponds to an ast.Expr; see the notes for Value.Pos. - // This position may be used to determine which non-Value - // Instruction corresponds to some ast.Stmts, but not all: If - // and Jump instructions have no Pos(), for example.) - Pos() token.Pos -} - -// A Node is a node in the SSA value graph. Every concrete type that -// implements Node is also either a Value, an Instruction, or both. -// -// Node contains the methods common to Value and Instruction, plus the -// Operands and Referrers methods generalized to return nil for -// non-Instructions and non-Values, respectively. -// -// Node is provided to simplify SSA graph algorithms. Clients should -// use the more specific and informative Value or Instruction -// interfaces where appropriate. -// -type Node interface { - // Common methods: - String() string - Pos() token.Pos - Parent() *Function - - // Partial methods: - Operands(rands []*Value) []*Value // nil for non-Instructions - Referrers() *[]Instruction // nil for non-Values -} - -// Function represents the parameters, results, and code of a function -// or method. -// -// If Blocks is nil, this indicates an external function for which no -// Go source code is available. In this case, FreeVars and Locals -// are nil too. Clients performing whole-program analysis must -// handle external functions specially. -// -// Blocks contains the function's control-flow graph (CFG). -// Blocks[0] is the function entry point; block order is not otherwise -// semantically significant, though it may affect the readability of -// the disassembly. -// To iterate over the blocks in dominance order, use DomPreorder(). -// -// Recover is an optional second entry point to which control resumes -// after a recovered panic. The Recover block may contain only a return -// statement, preceded by a load of the function's named return -// parameters, if any. -// -// A nested function (Parent()!=nil) that refers to one or more -// lexically enclosing local variables ("free variables") has FreeVars. -// Such functions cannot be called directly but require a -// value created by MakeClosure which, via its Bindings, supplies -// values for these parameters. -// -// If the function is a method (Signature.Recv() != nil) then the first -// element of Params is the receiver parameter. -// -// A Go package may declare many functions called "init". -// For each one, Object().Name() returns "init" but Name() returns -// "init#1", etc, in declaration order. -// -// Pos() returns the declaring ast.FuncLit.Type.Func or the position -// of the ast.FuncDecl.Name, if the function was explicit in the -// source. Synthetic wrappers, for which Synthetic != "", may share -// the same position as the function they wrap. -// Syntax.Pos() always returns the position of the declaring "func" token. -// -// Type() returns the function's Signature. -// -type Function struct { - name string - object types.Object // a declared *types.Func or one of its wrappers - method *types.Selection // info about provenance of synthetic methods - Signature *types.Signature - pos token.Pos - - Synthetic string // provenance of synthetic function; "" for true source functions - syntax ast.Node // *ast.Func{Decl,Lit}; replaced with simple ast.Node after build, unless debug mode - parent *Function // enclosing function if anon; nil if global - Pkg *Package // enclosing package; nil for shared funcs (wrappers and error.Error) - Prog *Program // enclosing program - Params []*Parameter // function parameters; for methods, includes receiver - FreeVars []*FreeVar // free variables whose values must be supplied by closure - Locals []*Alloc // local variables of this function - Blocks []*BasicBlock // basic blocks of the function; nil => external - Recover *BasicBlock // optional; control transfers here after recovered panic - AnonFuncs []*Function // anonymous functions directly beneath this one - referrers []Instruction // referring instructions (iff Parent() != nil) - - // The following fields are set transiently during building, - // then cleared. - currentBlock *BasicBlock // where to emit code - objects map[types.Object]Value // addresses of local variables - namedResults []*Alloc // tuple of named results - targets *targets // linked stack of branch targets - lblocks map[*ast.Object]*lblock // labelled blocks -} - -// BasicBlock represents an SSA basic block. -// -// The final element of Instrs is always an explicit transfer of -// control (If, Jump, Return, or Panic). -// -// A block may contain no Instructions only if it is unreachable, -// i.e., Preds is nil. Empty blocks are typically pruned. -// -// BasicBlocks and their Preds/Succs relation form a (possibly cyclic) -// graph independent of the SSA Value graph: the control-flow graph or -// CFG. It is illegal for multiple edges to exist between the same -// pair of blocks. -// -// Each BasicBlock is also a node in the dominator tree of the CFG. -// The tree may be navigated using Idom()/Dominees() and queried using -// Dominates(). -// -// The order of Preds and Succs is significant (to Phi and If -// instructions, respectively). -// -type BasicBlock struct { - Index int // index of this block within Parent().Blocks - Comment string // optional label; no semantic significance - parent *Function // parent function - Instrs []Instruction // instructions in order - Preds, Succs []*BasicBlock // predecessors and successors - succs2 [2]*BasicBlock // initial space for Succs - dom domInfo // dominator tree info - gaps int // number of nil Instrs (transient) - rundefers int // number of rundefers (transient) -} - -// Pure values ---------------------------------------- - -// A FreeVar represents a free variable of the function to which it -// belongs. -// -// FreeVars are used to implement anonymous functions, whose free -// variables are lexically captured in a closure formed by -// MakeClosure. The value of such a free var is an Alloc or another -// FreeVar and is considered a potentially escaping heap address, with -// pointer type. -// -// FreeVars are also used to implement bound method closures. Such a -// free var represents the receiver value and may be of any type that -// has concrete methods. -// -// Pos() returns the position of the value that was captured, which -// belongs to an enclosing function. -// -type FreeVar struct { - name string - typ types.Type - pos token.Pos - parent *Function - referrers []Instruction - - // Transiently needed during building. - outer Value // the Value captured from the enclosing context. -} - -// A Parameter represents an input parameter of a function. -// -type Parameter struct { - name string - object types.Object // a *types.Var; nil for non-source locals - typ types.Type - pos token.Pos - parent *Function - referrers []Instruction -} - -// A Const represents the value of a constant expression. -// -// The underlying type of a constant may be any boolean, numeric, or -// string type. In addition, a Const may represent the nil value of -// any reference type---interface, map, channel, pointer, slice, or -// function---but not "untyped nil". -// -// All source-level constant expressions are represented by a Const -// of the same type and value. -// -// Value holds the exact value of the constant, independent of its -// Type(), using the same representation as package go/exact uses for -// constants, or nil for a typed nil value. -// -// Pos() returns token.NoPos. -// -// Example printed form: -// 42:int -// "hello":untyped string -// 3+4i:MyComplex -// -type Const struct { - typ types.Type - Value exact.Value -} - -// A Global is a named Value holding the address of a package-level -// variable. -// -// Pos() returns the position of the ast.ValueSpec.Names[*] -// identifier. -// -type Global struct { - name string - object types.Object // a *types.Var; may be nil for synthetics e.g. init$guard - typ types.Type - pos token.Pos - - Pkg *Package -} - -// A Builtin represents a specific use of a built-in function, e.g. len. -// -// Builtins are immutable values. Builtins do not have addresses. -// Builtins can only appear in CallCommon.Func. -// -// Name() indicates the function: one of the built-in functions from the -// Go spec (excluding "make" and "new") or one of these ssa-defined -// intrinsics: -// -// // wrapnilchk returns ptr if non-nil, panics otherwise. -// // (For use in indirection wrappers.) -// func ssa:wrapnilchk(ptr *T, recvType, methodName string) *T -// -// Object() returns a *types.Builtin for built-ins defined by the spec, -// nil for others. -// -// Type() returns a *types.Signature representing the effective -// signature of the built-in for this call. -// -type Builtin struct { - name string - sig *types.Signature -} - -// Value-defining instructions ---------------------------------------- - -// The Alloc instruction reserves space for a variable of the given type, -// zero-initializes it, and yields its address. -// -// Alloc values are always addresses, and have pointer types, so the -// type of the allocated variable is actually -// Type().Underlying().(*types.Pointer).Elem(). -// -// If Heap is false, Alloc allocates space in the function's -// activation record (frame); we refer to an Alloc(Heap=false) as a -// "local" alloc. Each local Alloc returns the same address each time -// it is executed within the same activation; the space is -// re-initialized to zero. -// -// If Heap is true, Alloc allocates space in the heap; we -// refer to an Alloc(Heap=true) as a "new" alloc. Each new Alloc -// returns a different address each time it is executed. -// -// When Alloc is applied to a channel, map or slice type, it returns -// the address of an uninitialized (nil) reference of that kind; store -// the result of MakeSlice, MakeMap or MakeChan in that location to -// instantiate these types. -// -// Pos() returns the ast.CompositeLit.Lbrace for a composite literal, -// or the ast.CallExpr.Rparen for a call to new() or for a call that -// allocates a varargs slice. -// -// Example printed form: -// t0 = local int -// t1 = new int -// -type Alloc struct { - register - Comment string - Heap bool - index int // dense numbering; for lifting -} - -var _ Instruction = (*Sigma)(nil) -var _ Value = (*Sigma)(nil) - -type Sigma struct { - register - X Value - Branch bool -} - -func (p *Sigma) Value() Value { - v := p.X - for { - sigma, ok := v.(*Sigma) - if !ok { - break - } - v = sigma - } - return v -} - -func (p *Sigma) String() string { - return fmt.Sprintf("σ [%s.%t]", relName(p.X, p), p.Branch) -} - -// The Phi instruction represents an SSA φ-node, which combines values -// that differ across incoming control-flow edges and yields a new -// value. Within a block, all φ-nodes must appear before all non-φ -// nodes. -// -// Pos() returns the position of the && or || for short-circuit -// control-flow joins, or that of the *Alloc for φ-nodes inserted -// during SSA renaming. -// -// Example printed form: -// t2 = phi [0: t0, 1: t1] -// -type Phi struct { - register - Comment string // a hint as to its purpose - Edges []Value // Edges[i] is value for Block().Preds[i] -} - -// The Call instruction represents a function or method call. -// -// The Call instruction yields the function result if there is exactly -// one. Otherwise it returns a tuple, the components of which are -// accessed via Extract. -// -// See CallCommon for generic function call documentation. -// -// Pos() returns the ast.CallExpr.Lparen, if explicit in the source. -// -// Example printed form: -// t2 = println(t0, t1) -// t4 = t3() -// t7 = invoke t5.Println(...t6) -// -type Call struct { - register - Call CallCommon -} - -// The BinOp instruction yields the result of binary operation X Op Y. -// -// Pos() returns the ast.BinaryExpr.OpPos, if explicit in the source. -// -// Example printed form: -// t1 = t0 + 1:int -// -type BinOp struct { - register - // One of: - // ADD SUB MUL QUO REM + - * / % - // AND OR XOR SHL SHR AND_NOT & | ^ << >> &~ - // EQL LSS GTR NEQ LEQ GEQ == != < <= < >= - Op token.Token - X, Y Value -} - -// The UnOp instruction yields the result of Op X. -// ARROW is channel receive. -// MUL is pointer indirection (load). -// XOR is bitwise complement. -// SUB is negation. -// NOT is logical negation. -// -// If CommaOk and Op=ARROW, the result is a 2-tuple of the value above -// and a boolean indicating the success of the receive. The -// components of the tuple are accessed using Extract. -// -// Pos() returns the ast.UnaryExpr.OpPos, if explicit in the source. -// For receive operations (ARROW) implicit in ranging over a channel, -// Pos() returns the ast.RangeStmt.For. -// For implicit memory loads (STAR), Pos() returns the position of the -// most closely associated source-level construct; the details are not -// specified. -// -// Example printed form: -// t0 = *x -// t2 = <-t1,ok -// -type UnOp struct { - register - Op token.Token // One of: NOT SUB ARROW MUL XOR ! - <- * ^ - X Value - CommaOk bool -} - -// The ChangeType instruction applies to X a value-preserving type -// change to Type(). -// -// Type changes are permitted: -// - between a named type and its underlying type. -// - between two named types of the same underlying type. -// - between (possibly named) pointers to identical base types. -// - from a bidirectional channel to a read- or write-channel, -// optionally adding/removing a name. -// -// This operation cannot fail dynamically. -// -// Pos() returns the ast.CallExpr.Lparen, if the instruction arose -// from an explicit conversion in the source. -// -// Example printed form: -// t1 = changetype *int <- IntPtr (t0) -// -type ChangeType struct { - register - X Value -} - -// The Convert instruction yields the conversion of value X to type -// Type(). One or both of those types is basic (but possibly named). -// -// A conversion may change the value and representation of its operand. -// Conversions are permitted: -// - between real numeric types. -// - between complex numeric types. -// - between string and []byte or []rune. -// - between pointers and unsafe.Pointer. -// - between unsafe.Pointer and uintptr. -// - from (Unicode) integer to (UTF-8) string. -// A conversion may imply a type name change also. -// -// This operation cannot fail dynamically. -// -// Conversions of untyped string/number/bool constants to a specific -// representation are eliminated during SSA construction. -// -// Pos() returns the ast.CallExpr.Lparen, if the instruction arose -// from an explicit conversion in the source. -// -// Example printed form: -// t1 = convert []byte <- string (t0) -// -type Convert struct { - register - X Value -} - -// ChangeInterface constructs a value of one interface type from a -// value of another interface type known to be assignable to it. -// This operation cannot fail. -// -// Pos() returns the ast.CallExpr.Lparen if the instruction arose from -// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the -// instruction arose from an explicit e.(T) operation; or token.NoPos -// otherwise. -// -// Example printed form: -// t1 = change interface interface{} <- I (t0) -// -type ChangeInterface struct { - register - X Value -} - -// MakeInterface constructs an instance of an interface type from a -// value of a concrete type. -// -// Use Program.MethodSets.MethodSet(X.Type()) to find the method-set -// of X, and Program.Method(m) to find the implementation of a method. -// -// To construct the zero value of an interface type T, use: -// NewConst(exact.MakeNil(), T, pos) -// -// Pos() returns the ast.CallExpr.Lparen, if the instruction arose -// from an explicit conversion in the source. -// -// Example printed form: -// t1 = make interface{} <- int (42:int) -// t2 = make Stringer <- t0 -// -type MakeInterface struct { - register - X Value -} - -// The MakeClosure instruction yields a closure value whose code is -// Fn and whose free variables' values are supplied by Bindings. -// -// Type() returns a (possibly named) *types.Signature. -// -// Pos() returns the ast.FuncLit.Type.Func for a function literal -// closure or the ast.SelectorExpr.Sel for a bound method closure. -// -// Example printed form: -// t0 = make closure anon@1.2 [x y z] -// t1 = make closure bound$(main.I).add [i] -// -type MakeClosure struct { - register - Fn Value // always a *Function - Bindings []Value // values for each free variable in Fn.FreeVars -} - -// The MakeMap instruction creates a new hash-table-based map object -// and yields a value of kind map. -// -// Type() returns a (possibly named) *types.Map. -// -// Pos() returns the ast.CallExpr.Lparen, if created by make(map), or -// the ast.CompositeLit.Lbrack if created by a literal. -// -// Example printed form: -// t1 = make map[string]int t0 -// t1 = make StringIntMap t0 -// -type MakeMap struct { - register - Reserve Value // initial space reservation; nil => default -} - -// The MakeChan instruction creates a new channel object and yields a -// value of kind chan. -// -// Type() returns a (possibly named) *types.Chan. -// -// Pos() returns the ast.CallExpr.Lparen for the make(chan) that -// created it. -// -// Example printed form: -// t0 = make chan int 0 -// t0 = make IntChan 0 -// -type MakeChan struct { - register - Size Value // int; size of buffer; zero => synchronous. -} - -// The MakeSlice instruction yields a slice of length Len backed by a -// newly allocated array of length Cap. -// -// Both Len and Cap must be non-nil Values of integer type. -// -// (Alloc(types.Array) followed by Slice will not suffice because -// Alloc can only create arrays of constant length.) -// -// Type() returns a (possibly named) *types.Slice. -// -// Pos() returns the ast.CallExpr.Lparen for the make([]T) that -// created it. -// -// Example printed form: -// t1 = make []string 1:int t0 -// t1 = make StringSlice 1:int t0 -// -type MakeSlice struct { - register - Len Value - Cap Value -} - -// The Slice instruction yields a slice of an existing string, slice -// or *array X between optional integer bounds Low and High. -// -// Dynamically, this instruction panics if X evaluates to a nil *array -// pointer. -// -// Type() returns string if the type of X was string, otherwise a -// *types.Slice with the same element type as X. -// -// Pos() returns the ast.SliceExpr.Lbrack if created by a x[:] slice -// operation, the ast.CompositeLit.Lbrace if created by a literal, or -// NoPos if not explicit in the source (e.g. a variadic argument slice). -// -// Example printed form: -// t1 = slice t0[1:] -// -type Slice struct { - register - X Value // slice, string, or *array - Low, High, Max Value // each may be nil -} - -// The FieldAddr instruction yields the address of Field of *struct X. -// -// The field is identified by its index within the field list of the -// struct type of X. -// -// Dynamically, this instruction panics if X evaluates to a nil -// pointer. -// -// Type() returns a (possibly named) *types.Pointer. -// -// Pos() returns the position of the ast.SelectorExpr.Sel for the -// field, if explicit in the source. -// -// Example printed form: -// t1 = &t0.name [#1] -// -type FieldAddr struct { - register - X Value // *struct - Field int // index into X.Type().Deref().(*types.Struct).Fields -} - -// The Field instruction yields the Field of struct X. -// -// The field is identified by its index within the field list of the -// struct type of X; by using numeric indices we avoid ambiguity of -// package-local identifiers and permit compact representations. -// -// Pos() returns the position of the ast.SelectorExpr.Sel for the -// field, if explicit in the source. -// -// Example printed form: -// t1 = t0.name [#1] -// -type Field struct { - register - X Value // struct - Field int // index into X.Type().(*types.Struct).Fields -} - -// The IndexAddr instruction yields the address of the element at -// index Index of collection X. Index is an integer expression. -// -// The elements of maps and strings are not addressable; use Lookup or -// MapUpdate instead. -// -// Dynamically, this instruction panics if X evaluates to a nil *array -// pointer. -// -// Type() returns a (possibly named) *types.Pointer. -// -// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if -// explicit in the source. -// -// Example printed form: -// t2 = &t0[t1] -// -type IndexAddr struct { - register - X Value // slice or *array, - Index Value // numeric index -} - -// The Index instruction yields element Index of array X. -// -// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if -// explicit in the source. -// -// Example printed form: -// t2 = t0[t1] -// -type Index struct { - register - X Value // array - Index Value // integer index -} - -// The Lookup instruction yields element Index of collection X, a map -// or string. Index is an integer expression if X is a string or the -// appropriate key type if X is a map. -// -// If CommaOk, the result is a 2-tuple of the value above and a -// boolean indicating the result of a map membership test for the key. -// The components of the tuple are accessed using Extract. -// -// Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source. -// -// Example printed form: -// t2 = t0[t1] -// t5 = t3[t4],ok -// -type Lookup struct { - register - X Value // string or map - Index Value // numeric or key-typed index - CommaOk bool // return a value,ok pair -} - -// SelectState is a helper for Select. -// It represents one goal state and its corresponding communication. -// -type SelectState struct { - Dir types.ChanDir // direction of case (SendOnly or RecvOnly) - Chan Value // channel to use (for send or receive) - Send Value // value to send (for send) - Pos token.Pos // position of token.ARROW - DebugNode ast.Node // ast.SendStmt or ast.UnaryExpr(<-) [debug mode] -} - -// The Select instruction tests whether (or blocks until) one -// of the specified sent or received states is entered. -// -// Let n be the number of States for which Dir==RECV and T_i (0<=i string iterator; false => map iterator. -} - -// The TypeAssert instruction tests whether interface value X has type -// AssertedType. -// -// If !CommaOk, on success it returns v, the result of the conversion -// (defined below); on failure it panics. -// -// If CommaOk: on success it returns a pair (v, true) where v is the -// result of the conversion; on failure it returns (z, false) where z -// is AssertedType's zero value. The components of the pair must be -// accessed using the Extract instruction. -// -// If AssertedType is a concrete type, TypeAssert checks whether the -// dynamic type in interface X is equal to it, and if so, the result -// of the conversion is a copy of the value in the interface. -// -// If AssertedType is an interface, TypeAssert checks whether the -// dynamic type of the interface is assignable to it, and if so, the -// result of the conversion is a copy of the interface value X. -// If AssertedType is a superinterface of X.Type(), the operation will -// fail iff the operand is nil. (Contrast with ChangeInterface, which -// performs no nil-check.) -// -// Type() reflects the actual type of the result, possibly a -// 2-types.Tuple; AssertedType is the asserted type. -// -// Pos() returns the ast.CallExpr.Lparen if the instruction arose from -// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the -// instruction arose from an explicit e.(T) operation; or the -// ast.CaseClause.Case if the instruction arose from a case of a -// type-switch statement. -// -// Example printed form: -// t1 = typeassert t0.(int) -// t3 = typeassert,ok t2.(T) -// -type TypeAssert struct { - register - X Value - AssertedType types.Type - CommaOk bool -} - -// The Extract instruction yields component Index of Tuple. -// -// This is used to access the results of instructions with multiple -// return values, such as Call, TypeAssert, Next, UnOp(ARROW) and -// IndexExpr(Map). -// -// Example printed form: -// t1 = extract t0 #1 -// -type Extract struct { - register - Tuple Value - Index int -} - -// Instructions executed for effect. They do not yield a value. -------------------- - -// The Jump instruction transfers control to the sole successor of its -// owning block. -// -// A Jump must be the last instruction of its containing BasicBlock. -// -// Pos() returns NoPos. -// -// Example printed form: -// jump done -// -type Jump struct { - anInstruction -} - -// The If instruction transfers control to one of the two successors -// of its owning block, depending on the boolean Cond: the first if -// true, the second if false. -// -// An If instruction must be the last instruction of its containing -// BasicBlock. -// -// Pos() returns NoPos. -// -// Example printed form: -// if t0 goto done else body -// -type If struct { - anInstruction - Cond Value -} - -// The Return instruction returns values and control back to the calling -// function. -// -// len(Results) is always equal to the number of results in the -// function's signature. -// -// If len(Results) > 1, Return returns a tuple value with the specified -// components which the caller must access using Extract instructions. -// -// There is no instruction to return a ready-made tuple like those -// returned by a "value,ok"-mode TypeAssert, Lookup or UnOp(ARROW) or -// a tail-call to a function with multiple result parameters. -// -// Return must be the last instruction of its containing BasicBlock. -// Such a block has no successors. -// -// Pos() returns the ast.ReturnStmt.Return, if explicit in the source. -// -// Example printed form: -// return -// return nil:I, 2:int -// -type Return struct { - anInstruction - Results []Value - pos token.Pos -} - -// The RunDefers instruction pops and invokes the entire stack of -// procedure calls pushed by Defer instructions in this function. -// -// It is legal to encounter multiple 'rundefers' instructions in a -// single control-flow path through a function; this is useful in -// the combined init() function, for example. -// -// Pos() returns NoPos. -// -// Example printed form: -// rundefers -// -type RunDefers struct { - anInstruction -} - -// The Panic instruction initiates a panic with value X. -// -// A Panic instruction must be the last instruction of its containing -// BasicBlock, which must have no successors. -// -// NB: 'go panic(x)' and 'defer panic(x)' do not use this instruction; -// they are treated as calls to a built-in function. -// -// Pos() returns the ast.CallExpr.Lparen if this panic was explicit -// in the source. -// -// Example printed form: -// panic t0 -// -type Panic struct { - anInstruction - X Value // an interface{} - pos token.Pos -} - -// The Go instruction creates a new goroutine and calls the specified -// function within it. -// -// See CallCommon for generic function call documentation. -// -// Pos() returns the ast.GoStmt.Go. -// -// Example printed form: -// go println(t0, t1) -// go t3() -// go invoke t5.Println(...t6) -// -type Go struct { - anInstruction - Call CallCommon - pos token.Pos -} - -// The Defer instruction pushes the specified call onto a stack of -// functions to be called by a RunDefers instruction or by a panic. -// -// See CallCommon for generic function call documentation. -// -// Pos() returns the ast.DeferStmt.Defer. -// -// Example printed form: -// defer println(t0, t1) -// defer t3() -// defer invoke t5.Println(...t6) -// -type Defer struct { - anInstruction - Call CallCommon - pos token.Pos -} - -// The Send instruction sends X on channel Chan. -// -// Pos() returns the ast.SendStmt.Arrow, if explicit in the source. -// -// Example printed form: -// send t0 <- t1 -// -type Send struct { - anInstruction - Chan, X Value - pos token.Pos -} - -// The Store instruction stores Val at address Addr. -// Stores can be of arbitrary types. -// -// Pos() returns the position of the source-level construct most closely -// associated with the memory store operation. -// Since implicit memory stores are numerous and varied and depend upon -// implementation choices, the details are not specified. -// -// Example printed form: -// *x = y -// -type Store struct { - anInstruction - Addr Value - Val Value - pos token.Pos -} - -// The BlankStore instruction is emitted for assignments to the blank -// identifier. -// -// BlankStore is a pseudo-instruction: it has no dynamic effect. -// -// Pos() returns NoPos. -// -// Example printed form: -// _ = t0 -// -type BlankStore struct { - anInstruction - Val Value -} - -// The MapUpdate instruction updates the association of Map[Key] to -// Value. -// -// Pos() returns the ast.KeyValueExpr.Colon or ast.IndexExpr.Lbrack, -// if explicit in the source. -// -// Example printed form: -// t0[t1] = t2 -// -type MapUpdate struct { - anInstruction - Map Value - Key Value - Value Value - pos token.Pos -} - -// A DebugRef instruction maps a source-level expression Expr to the -// SSA value X that represents the value (!IsAddr) or address (IsAddr) -// of that expression. -// -// DebugRef is a pseudo-instruction: it has no dynamic effect. -// -// Pos() returns Expr.Pos(), the start position of the source-level -// expression. This is not the same as the "designated" token as -// documented at Value.Pos(). e.g. CallExpr.Pos() does not return the -// position of the ("designated") Lparen token. -// -// If Expr is an *ast.Ident denoting a var or func, Object() returns -// the object; though this information can be obtained from the type -// checker, including it here greatly facilitates debugging. -// For non-Ident expressions, Object() returns nil. -// -// DebugRefs are generated only for functions built with debugging -// enabled; see Package.SetDebugMode() and the GlobalDebug builder -// mode flag. -// -// DebugRefs are not emitted for ast.Idents referring to constants or -// predeclared identifiers, since they are trivial and numerous. -// Nor are they emitted for ast.ParenExprs. -// -// (By representing these as instructions, rather than out-of-band, -// consistency is maintained during transformation passes by the -// ordinary SSA renaming machinery.) -// -// Example printed form: -// ; *ast.CallExpr @ 102:9 is t5 -// ; var x float64 @ 109:72 is x -// ; address of *ast.CompositeLit @ 216:10 is t0 -// -type DebugRef struct { - anInstruction - Expr ast.Expr // the referring expression (never *ast.ParenExpr) - object types.Object // the identity of the source var/func - IsAddr bool // Expr is addressable and X is the address it denotes - X Value // the value or address of Expr -} - -// Embeddable mix-ins and helpers for common parts of other structs. ----------- - -// register is a mix-in embedded by all SSA values that are also -// instructions, i.e. virtual registers, and provides a uniform -// implementation of most of the Value interface: Value.Name() is a -// numbered register (e.g. "t0"); the other methods are field accessors. -// -// Temporary names are automatically assigned to each register on -// completion of building a function in SSA form. -// -// Clients must not assume that the 'id' value (and the Name() derived -// from it) is unique within a function. As always in this API, -// semantics are determined only by identity; names exist only to -// facilitate debugging. -// -type register struct { - anInstruction - num int // "name" of virtual register, e.g. "t0". Not guaranteed unique. - typ types.Type // type of virtual register - pos token.Pos // position of source expression, or NoPos - referrers []Instruction -} - -// anInstruction is a mix-in embedded by all Instructions. -// It provides the implementations of the Block and setBlock methods. -type anInstruction struct { - block *BasicBlock // the basic block of this instruction -} - -// CallCommon is contained by Go, Defer and Call to hold the -// common parts of a function or method call. -// -// Each CallCommon exists in one of two modes, function call and -// interface method invocation, or "call" and "invoke" for short. -// -// 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon -// represents an ordinary function call of the value in Value, -// which may be a *Builtin, a *Function or any other value of kind -// 'func'. -// -// Value may be one of: -// (a) a *Function, indicating a statically dispatched call -// to a package-level function, an anonymous function, or -// a method of a named type. -// (b) a *MakeClosure, indicating an immediately applied -// function literal with free variables. -// (c) a *Builtin, indicating a statically dispatched call -// to a built-in function. -// (d) any other value, indicating a dynamically dispatched -// function call. -// StaticCallee returns the identity of the callee in cases -// (a) and (b), nil otherwise. -// -// Args contains the arguments to the call. If Value is a method, -// Args[0] contains the receiver parameter. -// -// Example printed form: -// t2 = println(t0, t1) -// go t3() -// defer t5(...t6) -// -// 2. "invoke" mode: when Method is non-nil (IsInvoke), a CallCommon -// represents a dynamically dispatched call to an interface method. -// In this mode, Value is the interface value and Method is the -// interface's abstract method. Note: an abstract method may be -// shared by multiple interfaces due to embedding; Value.Type() -// provides the specific interface used for this call. -// -// Value is implicitly supplied to the concrete method implementation -// as the receiver parameter; in other words, Args[0] holds not the -// receiver but the first true argument. -// -// Example printed form: -// t1 = invoke t0.String() -// go invoke t3.Run(t2) -// defer invoke t4.Handle(...t5) -// -// For all calls to variadic functions (Signature().Variadic()), -// the last element of Args is a slice. -// -type CallCommon struct { - Value Value // receiver (invoke mode) or func value (call mode) - Method *types.Func // abstract method (invoke mode) - Args []Value // actual parameters (in static method call, includes receiver) - pos token.Pos // position of CallExpr.Lparen, iff explicit in source -} - -// IsInvoke returns true if this call has "invoke" (not "call") mode. -func (c *CallCommon) IsInvoke() bool { - return c.Method != nil -} - -func (c *CallCommon) Pos() token.Pos { return c.pos } - -// Signature returns the signature of the called function. -// -// For an "invoke"-mode call, the signature of the interface method is -// returned. -// -// In either "call" or "invoke" mode, if the callee is a method, its -// receiver is represented by sig.Recv, not sig.Params().At(0). -// -func (c *CallCommon) Signature() *types.Signature { - if c.Method != nil { - return c.Method.Type().(*types.Signature) - } - return c.Value.Type().Underlying().(*types.Signature) -} - -// StaticCallee returns the callee if this is a trivially static -// "call"-mode call to a function. -func (c *CallCommon) StaticCallee() *Function { - switch fn := c.Value.(type) { - case *Function: - return fn - case *MakeClosure: - return fn.Fn.(*Function) - } - return nil -} - -// Description returns a description of the mode of this call suitable -// for a user interface, e.g., "static method call". -func (c *CallCommon) Description() string { - switch fn := c.Value.(type) { - case *Builtin: - return "built-in function call" - case *MakeClosure: - return "static function closure call" - case *Function: - if fn.Signature.Recv() != nil { - return "static method call" - } - return "static function call" - } - if c.IsInvoke() { - return "dynamic method call" // ("invoke" mode) - } - return "dynamic function call" -} - -// The CallInstruction interface, implemented by *Go, *Defer and *Call, -// exposes the common parts of function-calling instructions, -// yet provides a way back to the Value defined by *Call alone. -// -type CallInstruction interface { - Instruction - Common() *CallCommon // returns the common parts of the call - Value() *Call // returns the result value of the call (*Call) or nil (*Go, *Defer) -} - -func (s *Call) Common() *CallCommon { return &s.Call } -func (s *Defer) Common() *CallCommon { return &s.Call } -func (s *Go) Common() *CallCommon { return &s.Call } - -func (s *Call) Value() *Call { return s } -func (s *Defer) Value() *Call { return nil } -func (s *Go) Value() *Call { return nil } - -func (v *Builtin) Type() types.Type { return v.sig } -func (v *Builtin) Name() string { return v.name } -func (*Builtin) Referrers() *[]Instruction { return nil } -func (v *Builtin) Pos() token.Pos { return token.NoPos } -func (v *Builtin) Object() types.Object { return types.Universe.Lookup(v.name) } -func (v *Builtin) Parent() *Function { return nil } - -func (v *FreeVar) Type() types.Type { return v.typ } -func (v *FreeVar) Name() string { return v.name } -func (v *FreeVar) Referrers() *[]Instruction { return &v.referrers } -func (v *FreeVar) Pos() token.Pos { return v.pos } -func (v *FreeVar) Parent() *Function { return v.parent } - -func (v *Global) Type() types.Type { return v.typ } -func (v *Global) Name() string { return v.name } -func (v *Global) Parent() *Function { return nil } -func (v *Global) Pos() token.Pos { return v.pos } -func (v *Global) Referrers() *[]Instruction { return nil } -func (v *Global) Token() token.Token { return token.VAR } -func (v *Global) Object() types.Object { return v.object } -func (v *Global) String() string { return v.RelString(nil) } -func (v *Global) Package() *Package { return v.Pkg } -func (v *Global) RelString(from *types.Package) string { return relString(v, from) } - -func (v *Function) Name() string { return v.name } -func (v *Function) Type() types.Type { return v.Signature } -func (v *Function) Pos() token.Pos { return v.pos } -func (v *Function) Token() token.Token { return token.FUNC } -func (v *Function) Object() types.Object { return v.object } -func (v *Function) String() string { return v.RelString(nil) } -func (v *Function) Package() *Package { return v.Pkg } -func (v *Function) Parent() *Function { return v.parent } -func (v *Function) Referrers() *[]Instruction { - if v.parent != nil { - return &v.referrers - } - return nil -} - -func (v *Parameter) Type() types.Type { return v.typ } -func (v *Parameter) Name() string { return v.name } -func (v *Parameter) Object() types.Object { return v.object } -func (v *Parameter) Referrers() *[]Instruction { return &v.referrers } -func (v *Parameter) Pos() token.Pos { return v.pos } -func (v *Parameter) Parent() *Function { return v.parent } - -func (v *Alloc) Type() types.Type { return v.typ } -func (v *Alloc) Referrers() *[]Instruction { return &v.referrers } -func (v *Alloc) Pos() token.Pos { return v.pos } - -func (v *register) Type() types.Type { return v.typ } -func (v *register) setType(typ types.Type) { v.typ = typ } -func (v *register) Name() string { return fmt.Sprintf("t%d", v.num) } -func (v *register) setNum(num int) { v.num = num } -func (v *register) Referrers() *[]Instruction { return &v.referrers } -func (v *register) Pos() token.Pos { return v.pos } -func (v *register) setPos(pos token.Pos) { v.pos = pos } - -func (v *anInstruction) Parent() *Function { return v.block.parent } -func (v *anInstruction) Block() *BasicBlock { return v.block } -func (v *anInstruction) setBlock(block *BasicBlock) { v.block = block } -func (v *anInstruction) Referrers() *[]Instruction { return nil } - -func (t *Type) Name() string { return t.object.Name() } -func (t *Type) Pos() token.Pos { return t.object.Pos() } -func (t *Type) Type() types.Type { return t.object.Type() } -func (t *Type) Token() token.Token { return token.TYPE } -func (t *Type) Object() types.Object { return t.object } -func (t *Type) String() string { return t.RelString(nil) } -func (t *Type) Package() *Package { return t.pkg } -func (t *Type) RelString(from *types.Package) string { return relString(t, from) } - -func (c *NamedConst) Name() string { return c.object.Name() } -func (c *NamedConst) Pos() token.Pos { return c.object.Pos() } -func (c *NamedConst) String() string { return c.RelString(nil) } -func (c *NamedConst) Type() types.Type { return c.object.Type() } -func (c *NamedConst) Token() token.Token { return token.CONST } -func (c *NamedConst) Object() types.Object { return c.object } -func (c *NamedConst) Package() *Package { return c.pkg } -func (c *NamedConst) RelString(from *types.Package) string { return relString(c, from) } - -// Func returns the package-level function of the specified name, -// or nil if not found. -// -func (p *Package) Func(name string) (f *Function) { - f, _ = p.Members[name].(*Function) - return -} - -// Var returns the package-level variable of the specified name, -// or nil if not found. -// -func (p *Package) Var(name string) (g *Global) { - g, _ = p.Members[name].(*Global) - return -} - -// Const returns the package-level constant of the specified name, -// or nil if not found. -// -func (p *Package) Const(name string) (c *NamedConst) { - c, _ = p.Members[name].(*NamedConst) - return -} - -// Type returns the package-level type of the specified name, -// or nil if not found. -// -func (p *Package) Type(name string) (t *Type) { - t, _ = p.Members[name].(*Type) - return -} - -func (v *Call) Pos() token.Pos { return v.Call.pos } -func (s *Defer) Pos() token.Pos { return s.pos } -func (s *Go) Pos() token.Pos { return s.pos } -func (s *MapUpdate) Pos() token.Pos { return s.pos } -func (s *Panic) Pos() token.Pos { return s.pos } -func (s *Return) Pos() token.Pos { return s.pos } -func (s *Send) Pos() token.Pos { return s.pos } -func (s *Store) Pos() token.Pos { return s.pos } -func (s *BlankStore) Pos() token.Pos { return token.NoPos } -func (s *If) Pos() token.Pos { return token.NoPos } -func (s *Jump) Pos() token.Pos { return token.NoPos } -func (s *RunDefers) Pos() token.Pos { return token.NoPos } -func (s *DebugRef) Pos() token.Pos { return s.Expr.Pos() } - -// Operands. - -func (v *Alloc) Operands(rands []*Value) []*Value { - return rands -} - -func (v *BinOp) Operands(rands []*Value) []*Value { - return append(rands, &v.X, &v.Y) -} - -func (c *CallCommon) Operands(rands []*Value) []*Value { - rands = append(rands, &c.Value) - for i := range c.Args { - rands = append(rands, &c.Args[i]) - } - return rands -} - -func (s *Go) Operands(rands []*Value) []*Value { - return s.Call.Operands(rands) -} - -func (s *Call) Operands(rands []*Value) []*Value { - return s.Call.Operands(rands) -} - -func (s *Defer) Operands(rands []*Value) []*Value { - return s.Call.Operands(rands) -} - -func (v *ChangeInterface) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (v *ChangeType) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (v *Convert) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (s *DebugRef) Operands(rands []*Value) []*Value { - return append(rands, &s.X) -} - -func (v *Extract) Operands(rands []*Value) []*Value { - return append(rands, &v.Tuple) -} - -func (v *Field) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (v *FieldAddr) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (s *If) Operands(rands []*Value) []*Value { - return append(rands, &s.Cond) -} - -func (v *Index) Operands(rands []*Value) []*Value { - return append(rands, &v.X, &v.Index) -} - -func (v *IndexAddr) Operands(rands []*Value) []*Value { - return append(rands, &v.X, &v.Index) -} - -func (*Jump) Operands(rands []*Value) []*Value { - return rands -} - -func (v *Lookup) Operands(rands []*Value) []*Value { - return append(rands, &v.X, &v.Index) -} - -func (v *MakeChan) Operands(rands []*Value) []*Value { - return append(rands, &v.Size) -} - -func (v *MakeClosure) Operands(rands []*Value) []*Value { - rands = append(rands, &v.Fn) - for i := range v.Bindings { - rands = append(rands, &v.Bindings[i]) - } - return rands -} - -func (v *MakeInterface) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (v *MakeMap) Operands(rands []*Value) []*Value { - return append(rands, &v.Reserve) -} - -func (v *MakeSlice) Operands(rands []*Value) []*Value { - return append(rands, &v.Len, &v.Cap) -} - -func (v *MapUpdate) Operands(rands []*Value) []*Value { - return append(rands, &v.Map, &v.Key, &v.Value) -} - -func (v *Next) Operands(rands []*Value) []*Value { - return append(rands, &v.Iter) -} - -func (s *Panic) Operands(rands []*Value) []*Value { - return append(rands, &s.X) -} - -func (v *Sigma) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (v *Phi) Operands(rands []*Value) []*Value { - for i := range v.Edges { - rands = append(rands, &v.Edges[i]) - } - return rands -} - -func (v *Range) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (s *Return) Operands(rands []*Value) []*Value { - for i := range s.Results { - rands = append(rands, &s.Results[i]) - } - return rands -} - -func (*RunDefers) Operands(rands []*Value) []*Value { - return rands -} - -func (v *Select) Operands(rands []*Value) []*Value { - for i := range v.States { - rands = append(rands, &v.States[i].Chan, &v.States[i].Send) - } - return rands -} - -func (s *Send) Operands(rands []*Value) []*Value { - return append(rands, &s.Chan, &s.X) -} - -func (v *Slice) Operands(rands []*Value) []*Value { - return append(rands, &v.X, &v.Low, &v.High, &v.Max) -} - -func (s *Store) Operands(rands []*Value) []*Value { - return append(rands, &s.Addr, &s.Val) -} - -func (s *BlankStore) Operands(rands []*Value) []*Value { - return append(rands, &s.Val) -} - -func (v *TypeAssert) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -func (v *UnOp) Operands(rands []*Value) []*Value { - return append(rands, &v.X) -} - -// Non-Instruction Values: -func (v *Builtin) Operands(rands []*Value) []*Value { return rands } -func (v *FreeVar) Operands(rands []*Value) []*Value { return rands } -func (v *Const) Operands(rands []*Value) []*Value { return rands } -func (v *Function) Operands(rands []*Value) []*Value { return rands } -func (v *Global) Operands(rands []*Value) []*Value { return rands } -func (v *Parameter) Operands(rands []*Value) []*Value { return rands } diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/ssautil/load.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/ssautil/load.go deleted file mode 100644 index 592e5da12d76..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/ssautil/load.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssautil - -// This file defines utility functions for constructing programs in SSA form. - -import ( - "go/ast" - "go/token" - "go/types" - - "golang.org/x/tools/go/loader" - "honnef.co/go/tools/ssa" -) - -// CreateProgram returns a new program in SSA form, given a program -// loaded from source. An SSA package is created for each transitively -// error-free package of lprog. -// -// Code for bodies of functions is not built until Build is called -// on the result. -// -// mode controls diagnostics and checking during SSA construction. -// -func CreateProgram(lprog *loader.Program, mode ssa.BuilderMode) *ssa.Program { - prog := ssa.NewProgram(lprog.Fset, mode) - - for _, info := range lprog.AllPackages { - if info.TransitivelyErrorFree { - prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable) - } - } - - return prog -} - -// BuildPackage builds an SSA program with IR for a single package. -// -// It populates pkg by type-checking the specified file ASTs. All -// dependencies are loaded using the importer specified by tc, which -// typically loads compiler export data; SSA code cannot be built for -// those packages. BuildPackage then constructs an ssa.Program with all -// dependency packages created, and builds and returns the SSA package -// corresponding to pkg. -// -// The caller must have set pkg.Path() to the import path. -// -// The operation fails if there were any type-checking or import errors. -// -// See ../ssa/example_test.go for an example. -// -func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, files []*ast.File, mode ssa.BuilderMode) (*ssa.Package, *types.Info, error) { - if fset == nil { - panic("no token.FileSet") - } - if pkg.Path() == "" { - panic("package has no import path") - } - - info := &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - } - if err := types.NewChecker(tc, fset, pkg, info).Files(files); err != nil { - return nil, nil, err - } - - prog := ssa.NewProgram(fset, mode) - - // Create SSA packages for all imports. - // Order is not significant. - created := make(map[*types.Package]bool) - var createAll func(pkgs []*types.Package) - createAll = func(pkgs []*types.Package) { - for _, p := range pkgs { - if !created[p] { - created[p] = true - prog.CreatePackage(p, nil, nil, true) - createAll(p.Imports()) - } - } - } - createAll(pkg.Imports()) - - // Create and build the primary package. - ssapkg := prog.CreatePackage(pkg, files, info, false) - ssapkg.Build() - return ssapkg, info, nil -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/ssautil/switch.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/ssautil/switch.go deleted file mode 100644 index 9c2f5d06eff7..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/ssautil/switch.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssautil - -// This file implements discovery of switch and type-switch constructs -// from low-level control flow. -// -// Many techniques exist for compiling a high-level switch with -// constant cases to efficient machine code. The optimal choice will -// depend on the data type, the specific case values, the code in the -// body of each case, and the hardware. -// Some examples: -// - a lookup table (for a switch that maps constants to constants) -// - a computed goto -// - a binary tree -// - a perfect hash -// - a two-level switch (to partition constant strings by their first byte). - -import ( - "bytes" - "fmt" - "go/token" - "go/types" - - "honnef.co/go/tools/ssa" -) - -// A ConstCase represents a single constant comparison. -// It is part of a Switch. -type ConstCase struct { - Block *ssa.BasicBlock // block performing the comparison - Body *ssa.BasicBlock // body of the case - Value *ssa.Const // case comparand -} - -// A TypeCase represents a single type assertion. -// It is part of a Switch. -type TypeCase struct { - Block *ssa.BasicBlock // block performing the type assert - Body *ssa.BasicBlock // body of the case - Type types.Type // case type - Binding ssa.Value // value bound by this case -} - -// A Switch is a logical high-level control flow operation -// (a multiway branch) discovered by analysis of a CFG containing -// only if/else chains. It is not part of the ssa.Instruction set. -// -// One of ConstCases and TypeCases has length >= 2; -// the other is nil. -// -// In a value switch, the list of cases may contain duplicate constants. -// A type switch may contain duplicate types, or types assignable -// to an interface type also in the list. -// TODO(adonovan): eliminate such duplicates. -// -type Switch struct { - Start *ssa.BasicBlock // block containing start of if/else chain - X ssa.Value // the switch operand - ConstCases []ConstCase // ordered list of constant comparisons - TypeCases []TypeCase // ordered list of type assertions - Default *ssa.BasicBlock // successor if all comparisons fail -} - -func (sw *Switch) String() string { - // We represent each block by the String() of its - // first Instruction, e.g. "print(42:int)". - var buf bytes.Buffer - if sw.ConstCases != nil { - fmt.Fprintf(&buf, "switch %s {\n", sw.X.Name()) - for _, c := range sw.ConstCases { - fmt.Fprintf(&buf, "case %s: %s\n", c.Value, c.Body.Instrs[0]) - } - } else { - fmt.Fprintf(&buf, "switch %s.(type) {\n", sw.X.Name()) - for _, c := range sw.TypeCases { - fmt.Fprintf(&buf, "case %s %s: %s\n", - c.Binding.Name(), c.Type, c.Body.Instrs[0]) - } - } - if sw.Default != nil { - fmt.Fprintf(&buf, "default: %s\n", sw.Default.Instrs[0]) - } - fmt.Fprintf(&buf, "}") - return buf.String() -} - -// Switches examines the control-flow graph of fn and returns the -// set of inferred value and type switches. A value switch tests an -// ssa.Value for equality against two or more compile-time constant -// values. Switches involving link-time constants (addresses) are -// ignored. A type switch type-asserts an ssa.Value against two or -// more types. -// -// The switches are returned in dominance order. -// -// The resulting switches do not necessarily correspond to uses of the -// 'switch' keyword in the source: for example, a single source-level -// switch statement with non-constant cases may result in zero, one or -// many Switches, one per plural sequence of constant cases. -// Switches may even be inferred from if/else- or goto-based control flow. -// (In general, the control flow constructs of the source program -// cannot be faithfully reproduced from the SSA representation.) -// -func Switches(fn *ssa.Function) []Switch { - // Traverse the CFG in dominance order, so we don't - // enter an if/else-chain in the middle. - var switches []Switch - seen := make(map[*ssa.BasicBlock]bool) // TODO(adonovan): opt: use ssa.blockSet - for _, b := range fn.DomPreorder() { - if x, k := isComparisonBlock(b); x != nil { - // Block b starts a switch. - sw := Switch{Start: b, X: x} - valueSwitch(&sw, k, seen) - if len(sw.ConstCases) > 1 { - switches = append(switches, sw) - } - } - - if y, x, T := isTypeAssertBlock(b); y != nil { - // Block b starts a type switch. - sw := Switch{Start: b, X: x} - typeSwitch(&sw, y, T, seen) - if len(sw.TypeCases) > 1 { - switches = append(switches, sw) - } - } - } - return switches -} - -func valueSwitch(sw *Switch, k *ssa.Const, seen map[*ssa.BasicBlock]bool) { - b := sw.Start - x := sw.X - for x == sw.X { - if seen[b] { - break - } - seen[b] = true - - sw.ConstCases = append(sw.ConstCases, ConstCase{ - Block: b, - Body: b.Succs[0], - Value: k, - }) - b = b.Succs[1] - if len(b.Instrs) > 2 { - // Block b contains not just 'if x == k', - // so it may have side effects that - // make it unsafe to elide. - break - } - if len(b.Preds) != 1 { - // Block b has multiple predecessors, - // so it cannot be treated as a case. - break - } - x, k = isComparisonBlock(b) - } - sw.Default = b -} - -func typeSwitch(sw *Switch, y ssa.Value, T types.Type, seen map[*ssa.BasicBlock]bool) { - b := sw.Start - x := sw.X - for x == sw.X { - if seen[b] { - break - } - seen[b] = true - - sw.TypeCases = append(sw.TypeCases, TypeCase{ - Block: b, - Body: b.Succs[0], - Type: T, - Binding: y, - }) - b = b.Succs[1] - if len(b.Instrs) > 4 { - // Block b contains not just - // {TypeAssert; Extract #0; Extract #1; If} - // so it may have side effects that - // make it unsafe to elide. - break - } - if len(b.Preds) != 1 { - // Block b has multiple predecessors, - // so it cannot be treated as a case. - break - } - y, x, T = isTypeAssertBlock(b) - } - sw.Default = b -} - -// isComparisonBlock returns the operands (v, k) if a block ends with -// a comparison v==k, where k is a compile-time constant. -// -func isComparisonBlock(b *ssa.BasicBlock) (v ssa.Value, k *ssa.Const) { - if n := len(b.Instrs); n >= 2 { - if i, ok := b.Instrs[n-1].(*ssa.If); ok { - if binop, ok := i.Cond.(*ssa.BinOp); ok && binop.Block() == b && binop.Op == token.EQL { - if k, ok := binop.Y.(*ssa.Const); ok { - return binop.X, k - } - if k, ok := binop.X.(*ssa.Const); ok { - return binop.Y, k - } - } - } - } - return -} - -// isTypeAssertBlock returns the operands (y, x, T) if a block ends with -// a type assertion "if y, ok := x.(T); ok {". -// -func isTypeAssertBlock(b *ssa.BasicBlock) (y, x ssa.Value, T types.Type) { - if n := len(b.Instrs); n >= 4 { - if i, ok := b.Instrs[n-1].(*ssa.If); ok { - if ext1, ok := i.Cond.(*ssa.Extract); ok && ext1.Block() == b && ext1.Index == 1 { - if ta, ok := ext1.Tuple.(*ssa.TypeAssert); ok && ta.Block() == b { - // hack: relies upon instruction ordering. - if ext0, ok := b.Instrs[n-3].(*ssa.Extract); ok { - return ext0, ta.X, ta.AssertedType - } - } - } - } - } - return -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/ssautil/visit.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/ssautil/visit.go deleted file mode 100644 index 4d5bb1850c47..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/ssautil/visit.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssautil - -import "honnef.co/go/tools/ssa" - -// This file defines utilities for visiting the SSA representation of -// a Program. -// -// TODO(adonovan): test coverage. - -// AllFunctions finds and returns the set of functions potentially -// needed by program prog, as determined by a simple linker-style -// reachability algorithm starting from the members and method-sets of -// each package. The result may include anonymous functions and -// synthetic wrappers. -// -// Precondition: all packages are built. -// -func AllFunctions(prog *ssa.Program) map[*ssa.Function]bool { - visit := visitor{ - prog: prog, - seen: make(map[*ssa.Function]bool), - } - visit.program() - return visit.seen -} - -type visitor struct { - prog *ssa.Program - seen map[*ssa.Function]bool -} - -func (visit *visitor) program() { - for _, pkg := range visit.prog.AllPackages() { - for _, mem := range pkg.Members { - if fn, ok := mem.(*ssa.Function); ok { - visit.function(fn) - } - } - } - for _, T := range visit.prog.RuntimeTypes() { - mset := visit.prog.MethodSets.MethodSet(T) - for i, n := 0, mset.Len(); i < n; i++ { - visit.function(visit.prog.MethodValue(mset.At(i))) - } - } -} - -func (visit *visitor) function(fn *ssa.Function) { - if !visit.seen[fn] { - visit.seen[fn] = true - var buf [10]*ssa.Value // avoid alloc in common case - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - for _, op := range instr.Operands(buf[:0]) { - if fn, ok := (*op).(*ssa.Function); ok { - visit.function(fn) - } - } - } - } - } -} - -// MainPackages returns the subset of the specified packages -// named "main" that define a main function. -// The result may include synthetic "testmain" packages. -func MainPackages(pkgs []*ssa.Package) []*ssa.Package { - var mains []*ssa.Package - for _, pkg := range pkgs { - if pkg.Pkg.Name() == "main" && pkg.Func("main") != nil { - mains = append(mains, pkg) - } - } - return mains -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/testmain.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/testmain.go deleted file mode 100644 index ea232ada951f..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/testmain.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// CreateTestMainPackage synthesizes a main package that runs all the -// tests of the supplied packages. -// It is closely coupled to $GOROOT/src/cmd/go/test.go and $GOROOT/src/testing. -// -// TODO(adonovan): this file no longer needs to live in the ssa package. -// Move it to ssautil. - -import ( - "bytes" - "fmt" - "go/ast" - "go/parser" - "go/types" - "log" - "os" - "strings" - "text/template" -) - -// FindTests returns the Test, Benchmark, and Example functions -// (as defined by "go test") defined in the specified package, -// and its TestMain function, if any. -func FindTests(pkg *Package) (tests, benchmarks, examples []*Function, main *Function) { - prog := pkg.Prog - - // The first two of these may be nil: if the program doesn't import "testing", - // it can't contain any tests, but it may yet contain Examples. - var testSig *types.Signature // func(*testing.T) - var benchmarkSig *types.Signature // func(*testing.B) - var exampleSig = types.NewSignature(nil, nil, nil, false) // func() - - // Obtain the types from the parameters of testing.MainStart. - if testingPkg := prog.ImportedPackage("testing"); testingPkg != nil { - mainStart := testingPkg.Func("MainStart") - params := mainStart.Signature.Params() - testSig = funcField(params.At(1).Type()) - benchmarkSig = funcField(params.At(2).Type()) - - // Does the package define this function? - // func TestMain(*testing.M) - if f := pkg.Func("TestMain"); f != nil { - sig := f.Type().(*types.Signature) - starM := mainStart.Signature.Results().At(0).Type() // *testing.M - if sig.Results().Len() == 0 && - sig.Params().Len() == 1 && - types.Identical(sig.Params().At(0).Type(), starM) { - main = f - } - } - } - - // TODO(adonovan): use a stable order, e.g. lexical. - for _, mem := range pkg.Members { - if f, ok := mem.(*Function); ok && - ast.IsExported(f.Name()) && - strings.HasSuffix(prog.Fset.Position(f.Pos()).Filename, "_test.go") { - - switch { - case testSig != nil && isTestSig(f, "Test", testSig): - tests = append(tests, f) - case benchmarkSig != nil && isTestSig(f, "Benchmark", benchmarkSig): - benchmarks = append(benchmarks, f) - case isTestSig(f, "Example", exampleSig): - examples = append(examples, f) - default: - continue - } - } - } - return -} - -// Like isTest, but checks the signature too. -func isTestSig(f *Function, prefix string, sig *types.Signature) bool { - return isTest(f.Name(), prefix) && types.Identical(f.Signature, sig) -} - -// Given the type of one of the three slice parameters of testing.Main, -// returns the function type. -func funcField(slice types.Type) *types.Signature { - return slice.(*types.Slice).Elem().Underlying().(*types.Struct).Field(1).Type().(*types.Signature) -} - -// isTest tells whether name looks like a test (or benchmark, according to prefix). -// It is a Test (say) if there is a character after Test that is not a lower-case letter. -// We don't want TesticularCancer. -// Plundered from $GOROOT/src/cmd/go/test.go -func isTest(name, prefix string) bool { - if !strings.HasPrefix(name, prefix) { - return false - } - if len(name) == len(prefix) { // "Test" is ok - return true - } - return ast.IsExported(name[len(prefix):]) -} - -// CreateTestMainPackage creates and returns a synthetic "testmain" -// package for the specified package if it defines tests, benchmarks or -// executable examples, or nil otherwise. The new package is named -// "main" and provides a function named "main" that runs the tests, -// similar to the one that would be created by the 'go test' tool. -// -// Subsequent calls to prog.AllPackages include the new package. -// The package pkg must belong to the program prog. -func (prog *Program) CreateTestMainPackage(pkg *Package) *Package { - if pkg.Prog != prog { - log.Fatal("Package does not belong to Program") - } - - // Template data - var data struct { - Pkg *Package - Tests, Benchmarks, Examples []*Function - Main *Function - Go18 bool - } - data.Pkg = pkg - - // Enumerate tests. - data.Tests, data.Benchmarks, data.Examples, data.Main = FindTests(pkg) - if data.Main == nil && - data.Tests == nil && data.Benchmarks == nil && data.Examples == nil { - return nil - } - - // Synthesize source for testmain package. - path := pkg.Pkg.Path() + "$testmain" - tmpl := testmainTmpl - if testingPkg := prog.ImportedPackage("testing"); testingPkg != nil { - // In Go 1.8, testing.MainStart's first argument is an interface, not a func. - data.Go18 = types.IsInterface(testingPkg.Func("MainStart").Signature.Params().At(0).Type()) - } else { - // The program does not import "testing", but FindTests - // returned non-nil, which must mean there were Examples - // but no Test, Benchmark, or TestMain functions. - - // We'll simply call them from testmain.main; this will - // ensure they don't panic, but will not check any - // "Output:" comments. - // (We should not execute an Example that has no - // "Output:" comment, but it's impossible to tell here.) - tmpl = examplesOnlyTmpl - } - var buf bytes.Buffer - if err := tmpl.Execute(&buf, data); err != nil { - log.Fatalf("internal error expanding template for %s: %v", path, err) - } - if false { // debugging - fmt.Fprintln(os.Stderr, buf.String()) - } - - // Parse and type-check the testmain package. - f, err := parser.ParseFile(prog.Fset, path+".go", &buf, parser.Mode(0)) - if err != nil { - log.Fatalf("internal error parsing %s: %v", path, err) - } - conf := types.Config{ - DisableUnusedImportCheck: true, - Importer: importer{pkg}, - } - files := []*ast.File{f} - info := &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - } - testmainPkg, err := conf.Check(path, prog.Fset, files, info) - if err != nil { - log.Fatalf("internal error type-checking %s: %v", path, err) - } - - // Create and build SSA code. - testmain := prog.CreatePackage(testmainPkg, files, info, false) - testmain.SetDebugMode(false) - testmain.Build() - testmain.Func("main").Synthetic = "test main function" - testmain.Func("init").Synthetic = "package initializer" - return testmain -} - -// An implementation of types.Importer for an already loaded SSA program. -type importer struct { - pkg *Package // package under test; may be non-importable -} - -func (imp importer) Import(path string) (*types.Package, error) { - if p := imp.pkg.Prog.ImportedPackage(path); p != nil { - return p.Pkg, nil - } - if path == imp.pkg.Pkg.Path() { - return imp.pkg.Pkg, nil - } - return nil, fmt.Errorf("not found") // can't happen -} - -var testmainTmpl = template.Must(template.New("testmain").Parse(` -package main - -import "io" -import "os" -import "testing" -import p {{printf "%q" .Pkg.Pkg.Path}} - -{{if .Go18}} -type deps struct{} - -func (deps) ImportPath() string { return "" } -func (deps) MatchString(pat, str string) (bool, error) { return true, nil } -func (deps) StartCPUProfile(io.Writer) error { return nil } -func (deps) StartTestLog(io.Writer) {} -func (deps) StopCPUProfile() {} -func (deps) StopTestLog() error { return nil } -func (deps) WriteHeapProfile(io.Writer) error { return nil } -func (deps) WriteProfileTo(string, io.Writer, int) error { return nil } - -var match deps -{{else}} -func match(_, _ string) (bool, error) { return true, nil } -{{end}} - -func main() { - tests := []testing.InternalTest{ -{{range .Tests}} - { {{printf "%q" .Name}}, p.{{.Name}} }, -{{end}} - } - benchmarks := []testing.InternalBenchmark{ -{{range .Benchmarks}} - { {{printf "%q" .Name}}, p.{{.Name}} }, -{{end}} - } - examples := []testing.InternalExample{ -{{range .Examples}} - {Name: {{printf "%q" .Name}}, F: p.{{.Name}}}, -{{end}} - } - m := testing.MainStart(match, tests, benchmarks, examples) -{{with .Main}} - p.{{.Name}}(m) -{{else}} - os.Exit(m.Run()) -{{end}} -} - -`)) - -var examplesOnlyTmpl = template.Must(template.New("examples").Parse(` -package main - -import p {{printf "%q" .Pkg.Pkg.Path}} - -func main() { -{{range .Examples}} - p.{{.Name}}() -{{end}} -} -`)) diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/util.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/util.go deleted file mode 100644 index ddb118460969..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/util.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// This file defines a number of miscellaneous utility functions. - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - "io" - "os" - - "golang.org/x/tools/go/ast/astutil" -) - -//// AST utilities - -func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } - -// isBlankIdent returns true iff e is an Ident with name "_". -// They have no associated types.Object, and thus no type. -// -func isBlankIdent(e ast.Expr) bool { - id, ok := e.(*ast.Ident) - return ok && id.Name == "_" -} - -//// Type utilities. Some of these belong in go/types. - -// isPointer returns true for types whose underlying type is a pointer. -func isPointer(typ types.Type) bool { - _, ok := typ.Underlying().(*types.Pointer) - return ok -} - -func isInterface(T types.Type) bool { return types.IsInterface(T) } - -// deref returns a pointer's element type; otherwise it returns typ. -func deref(typ types.Type) types.Type { - if p, ok := typ.Underlying().(*types.Pointer); ok { - return p.Elem() - } - return typ -} - -// recvType returns the receiver type of method obj. -func recvType(obj *types.Func) types.Type { - return obj.Type().(*types.Signature).Recv().Type() -} - -// DefaultType returns the default "typed" type for an "untyped" type; -// it returns the incoming type for all other types. The default type -// for untyped nil is untyped nil. -// -// Exported to ssa/interp. -// -// TODO(adonovan): use go/types.DefaultType after 1.8. -// -func DefaultType(typ types.Type) types.Type { - if t, ok := typ.(*types.Basic); ok { - k := t.Kind() - switch k { - case types.UntypedBool: - k = types.Bool - case types.UntypedInt: - k = types.Int - case types.UntypedRune: - k = types.Rune - case types.UntypedFloat: - k = types.Float64 - case types.UntypedComplex: - k = types.Complex128 - case types.UntypedString: - k = types.String - } - typ = types.Typ[k] - } - return typ -} - -// logStack prints the formatted "start" message to stderr and -// returns a closure that prints the corresponding "end" message. -// Call using 'defer logStack(...)()' to show builder stack on panic. -// Don't forget trailing parens! -// -func logStack(format string, args ...interface{}) func() { - msg := fmt.Sprintf(format, args...) - io.WriteString(os.Stderr, msg) - io.WriteString(os.Stderr, "\n") - return func() { - io.WriteString(os.Stderr, msg) - io.WriteString(os.Stderr, " end\n") - } -} - -// newVar creates a 'var' for use in a types.Tuple. -func newVar(name string, typ types.Type) *types.Var { - return types.NewParam(token.NoPos, nil, name, typ) -} - -// anonVar creates an anonymous 'var' for use in a types.Tuple. -func anonVar(typ types.Type) *types.Var { - return newVar("", typ) -} - -var lenResults = types.NewTuple(anonVar(tInt)) - -// makeLen returns the len builtin specialized to type func(T)int. -func makeLen(T types.Type) *Builtin { - lenParams := types.NewTuple(anonVar(T)) - return &Builtin{ - name: "len", - sig: types.NewSignature(nil, lenParams, lenResults, false), - } -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/wrappers.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/wrappers.go deleted file mode 100644 index 701dd90d7d9d..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/wrappers.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssa - -// This file defines synthesis of Functions that delegate to declared -// methods; they come in three kinds: -// -// (1) wrappers: methods that wrap declared methods, performing -// implicit pointer indirections and embedded field selections. -// -// (2) thunks: funcs that wrap declared methods. Like wrappers, -// thunks perform indirections and field selections. The thunk's -// first parameter is used as the receiver for the method call. -// -// (3) bounds: funcs that wrap declared methods. The bound's sole -// free variable, supplied by a closure, is used as the receiver -// for the method call. No indirections or field selections are -// performed since they can be done before the call. - -import ( - "fmt" - - "go/types" -) - -// -- wrappers ----------------------------------------------------------- - -// makeWrapper returns a synthetic method that delegates to the -// declared method denoted by meth.Obj(), first performing any -// necessary pointer indirections or field selections implied by meth. -// -// The resulting method's receiver type is meth.Recv(). -// -// This function is versatile but quite subtle! Consider the -// following axes of variation when making changes: -// - optional receiver indirection -// - optional implicit field selections -// - meth.Obj() may denote a concrete or an interface method -// - the result may be a thunk or a wrapper. -// -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -// -func makeWrapper(prog *Program, sel *types.Selection) *Function { - obj := sel.Obj().(*types.Func) // the declared function - sig := sel.Type().(*types.Signature) // type of this wrapper - - var recv *types.Var // wrapper's receiver or thunk's params[0] - name := obj.Name() - var description string - var start int // first regular param - if sel.Kind() == types.MethodExpr { - name += "$thunk" - description = "thunk" - recv = sig.Params().At(0) - start = 1 - } else { - description = "wrapper" - recv = sig.Recv() - } - - description = fmt.Sprintf("%s for %s", description, sel.Obj()) - if prog.mode&LogSource != 0 { - defer logStack("make %s to (%s)", description, recv.Type())() - } - fn := &Function{ - name: name, - method: sel, - object: obj, - Signature: sig, - Synthetic: description, - Prog: prog, - pos: obj.Pos(), - } - fn.startBody() - fn.addSpilledParam(recv) - createParams(fn, start) - - indices := sel.Index() - - var v Value = fn.Locals[0] // spilled receiver - if isPointer(sel.Recv()) { - v = emitLoad(fn, v) - - // For simple indirection wrappers, perform an informative nil-check: - // "value method (T).f called using nil *T pointer" - if len(indices) == 1 && !isPointer(recvType(obj)) { - var c Call - c.Call.Value = &Builtin{ - name: "ssa:wrapnilchk", - sig: types.NewSignature(nil, - types.NewTuple(anonVar(sel.Recv()), anonVar(tString), anonVar(tString)), - types.NewTuple(anonVar(sel.Recv())), false), - } - c.Call.Args = []Value{ - v, - stringConst(deref(sel.Recv()).String()), - stringConst(sel.Obj().Name()), - } - c.setType(v.Type()) - v = fn.emit(&c) - } - } - - // Invariant: v is a pointer, either - // value of *A receiver param, or - // address of A spilled receiver. - - // We use pointer arithmetic (FieldAddr possibly followed by - // Load) in preference to value extraction (Field possibly - // preceded by Load). - - v = emitImplicitSelections(fn, v, indices[:len(indices)-1]) - - // Invariant: v is a pointer, either - // value of implicit *C field, or - // address of implicit C field. - - var c Call - if r := recvType(obj); !isInterface(r) { // concrete method - if !isPointer(r) { - v = emitLoad(fn, v) - } - c.Call.Value = prog.declaredFunc(obj) - c.Call.Args = append(c.Call.Args, v) - } else { - c.Call.Method = obj - c.Call.Value = emitLoad(fn, v) - } - for _, arg := range fn.Params[1:] { - c.Call.Args = append(c.Call.Args, arg) - } - emitTailCall(fn, &c) - fn.finishBody() - return fn -} - -// createParams creates parameters for wrapper method fn based on its -// Signature.Params, which do not include the receiver. -// start is the index of the first regular parameter to use. -// -func createParams(fn *Function, start int) { - var last *Parameter - tparams := fn.Signature.Params() - for i, n := start, tparams.Len(); i < n; i++ { - last = fn.addParamObj(tparams.At(i)) - } - if fn.Signature.Variadic() { - last.typ = types.NewSlice(last.typ) - } -} - -// -- bounds ----------------------------------------------------------- - -// makeBound returns a bound method wrapper (or "bound"), a synthetic -// function that delegates to a concrete or interface method denoted -// by obj. The resulting function has no receiver, but has one free -// variable which will be used as the method's receiver in the -// tail-call. -// -// Use MakeClosure with such a wrapper to construct a bound method -// closure. e.g.: -// -// type T int or: type T interface { meth() } -// func (t T) meth() -// var t T -// f := t.meth -// f() // calls t.meth() -// -// f is a closure of a synthetic wrapper defined as if by: -// -// f := func() { return t.meth() } -// -// Unlike makeWrapper, makeBound need perform no indirection or field -// selections because that can be done before the closure is -// constructed. -// -// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) -// -func makeBound(prog *Program, obj *types.Func) *Function { - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - fn, ok := prog.bounds[obj] - if !ok { - description := fmt.Sprintf("bound method wrapper for %s", obj) - if prog.mode&LogSource != 0 { - defer logStack("%s", description)() - } - fn = &Function{ - name: obj.Name() + "$bound", - object: obj, - Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver - Synthetic: description, - Prog: prog, - pos: obj.Pos(), - } - - fv := &FreeVar{name: "recv", typ: recvType(obj), parent: fn} - fn.FreeVars = []*FreeVar{fv} - fn.startBody() - createParams(fn, 0) - var c Call - - if !isInterface(recvType(obj)) { // concrete - c.Call.Value = prog.declaredFunc(obj) - c.Call.Args = []Value{fv} - } else { - c.Call.Value = fv - c.Call.Method = obj - } - for _, arg := range fn.Params { - c.Call.Args = append(c.Call.Args, arg) - } - emitTailCall(fn, &c) - fn.finishBody() - - prog.bounds[obj] = fn - } - return fn -} - -// -- thunks ----------------------------------------------------------- - -// makeThunk returns a thunk, a synthetic function that delegates to a -// concrete or interface method denoted by sel.Obj(). The resulting -// function has no receiver, but has an additional (first) regular -// parameter. -// -// Precondition: sel.Kind() == types.MethodExpr. -// -// type T int or: type T interface { meth() } -// func (t T) meth() -// f := T.meth -// var t T -// f(t) // calls t.meth() -// -// f is a synthetic wrapper defined as if by: -// -// f := func(t T) { return t.meth() } -// -// TODO(adonovan): opt: currently the stub is created even when used -// directly in a function call: C.f(i, 0). This is less efficient -// than inlining the stub. -// -// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) -// -func makeThunk(prog *Program, sel *types.Selection) *Function { - if sel.Kind() != types.MethodExpr { - panic(sel) - } - - key := selectionKey{ - kind: sel.Kind(), - recv: sel.Recv(), - obj: sel.Obj(), - index: fmt.Sprint(sel.Index()), - indirect: sel.Indirect(), - } - - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - - // Canonicalize key.recv to avoid constructing duplicate thunks. - canonRecv, ok := prog.canon.At(key.recv).(types.Type) - if !ok { - canonRecv = key.recv - prog.canon.Set(key.recv, canonRecv) - } - key.recv = canonRecv - - fn, ok := prog.thunks[key] - if !ok { - fn = makeWrapper(prog, sel) - if fn.Signature.Recv() != nil { - panic(fn) // unexpected receiver - } - prog.thunks[key] = fn - } - return fn -} - -func changeRecv(s *types.Signature, recv *types.Var) *types.Signature { - return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic()) -} - -// selectionKey is like types.Selection but a usable map key. -type selectionKey struct { - kind types.SelectionKind - recv types.Type // canonicalized via Program.canon - obj types.Object - index string - indirect bool -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/write.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/write.go deleted file mode 100644 index 89761a18a55e..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/ssa/write.go +++ /dev/null @@ -1,5 +0,0 @@ -package ssa - -func NewJump(parent *BasicBlock) *Jump { - return &Jump{anInstruction{parent}} -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/CONTRIBUTING.md b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/CONTRIBUTING.md deleted file mode 100644 index b12c7afc748e..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/CONTRIBUTING.md +++ /dev/null @@ -1,15 +0,0 @@ -# Contributing to staticcheck - -## Before filing an issue: - -### Are you having trouble building staticcheck? - -Check you have the latest version of its dependencies. Run -``` -go get -u honnef.co/go/tools/staticcheck -``` -If you still have problems, consider searching for existing issues before filing a new issue. - -## Before sending a pull request: - -Have you understood the purpose of staticcheck? Make sure to carefully read `README`. diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/buildtag.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/buildtag.go deleted file mode 100644 index 888d3e9dc056..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/buildtag.go +++ /dev/null @@ -1,21 +0,0 @@ -package staticcheck - -import ( - "go/ast" - "strings" - - . "honnef.co/go/tools/lint/lintdsl" -) - -func buildTags(f *ast.File) [][]string { - var out [][]string - for _, line := range strings.Split(Preamble(f), "\n") { - if !strings.HasPrefix(line, "+build ") { - continue - } - line = strings.TrimSpace(strings.TrimPrefix(line, "+build ")) - fields := strings.Fields(line) - out = append(out, fields) - } - return out -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/lint.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/lint.go deleted file mode 100644 index a574bb0de06e..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/lint.go +++ /dev/null @@ -1,2790 +0,0 @@ -// Package staticcheck contains a linter for Go source code. -package staticcheck - -import ( - "fmt" - "go/ast" - "go/constant" - "go/token" - "go/types" - htmltemplate "html/template" - "net/http" - "regexp" - "regexp/syntax" - "sort" - "strconv" - "strings" - "sync" - texttemplate "text/template" - - "honnef.co/go/tools/deprecated" - "honnef.co/go/tools/functions" - "honnef.co/go/tools/internal/sharedcheck" - "honnef.co/go/tools/lint" - . "honnef.co/go/tools/lint/lintdsl" - "honnef.co/go/tools/ssa" - "honnef.co/go/tools/staticcheck/vrp" - - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/loader" -) - -func validRegexp(call *Call) { - arg := call.Args[0] - err := ValidateRegexp(arg.Value) - if err != nil { - arg.Invalid(err.Error()) - } -} - -type runeSlice []rune - -func (rs runeSlice) Len() int { return len(rs) } -func (rs runeSlice) Less(i int, j int) bool { return rs[i] < rs[j] } -func (rs runeSlice) Swap(i int, j int) { rs[i], rs[j] = rs[j], rs[i] } - -func utf8Cutset(call *Call) { - arg := call.Args[1] - if InvalidUTF8(arg.Value) { - arg.Invalid(MsgInvalidUTF8) - } -} - -func uniqueCutset(call *Call) { - arg := call.Args[1] - if !UniqueStringCutset(arg.Value) { - arg.Invalid(MsgNonUniqueCutset) - } -} - -func unmarshalPointer(name string, arg int) CallCheck { - return func(call *Call) { - if !Pointer(call.Args[arg].Value) { - call.Args[arg].Invalid(fmt.Sprintf("%s expects to unmarshal into a pointer, but the provided value is not a pointer", name)) - } - } -} - -func pointlessIntMath(call *Call) { - if ConvertedFromInt(call.Args[0].Value) { - call.Invalid(fmt.Sprintf("calling %s on a converted integer is pointless", CallName(call.Instr.Common()))) - } -} - -func checkValidHostPort(arg int) CallCheck { - return func(call *Call) { - if !ValidHostPort(call.Args[arg].Value) { - call.Args[arg].Invalid(MsgInvalidHostPort) - } - } -} - -var ( - checkRegexpRules = map[string]CallCheck{ - "regexp.MustCompile": validRegexp, - "regexp.Compile": validRegexp, - "regexp.Match": validRegexp, - "regexp.MatchReader": validRegexp, - "regexp.MatchString": validRegexp, - } - - checkTimeParseRules = map[string]CallCheck{ - "time.Parse": func(call *Call) { - arg := call.Args[0] - err := ValidateTimeLayout(arg.Value) - if err != nil { - arg.Invalid(err.Error()) - } - }, - } - - checkEncodingBinaryRules = map[string]CallCheck{ - "encoding/binary.Write": func(call *Call) { - arg := call.Args[2] - if !CanBinaryMarshal(call.Job, arg.Value) { - arg.Invalid(fmt.Sprintf("value of type %s cannot be used with binary.Write", arg.Value.Value.Type())) - } - }, - } - - checkURLsRules = map[string]CallCheck{ - "net/url.Parse": func(call *Call) { - arg := call.Args[0] - err := ValidateURL(arg.Value) - if err != nil { - arg.Invalid(err.Error()) - } - }, - } - - checkSyncPoolValueRules = map[string]CallCheck{ - "(*sync.Pool).Put": func(call *Call) { - arg := call.Args[0] - typ := arg.Value.Value.Type() - if !IsPointerLike(typ) { - arg.Invalid("argument should be pointer-like to avoid allocations") - } - }, - } - - checkRegexpFindAllRules = map[string]CallCheck{ - "(*regexp.Regexp).FindAll": RepeatZeroTimes("a FindAll method", 1), - "(*regexp.Regexp).FindAllIndex": RepeatZeroTimes("a FindAll method", 1), - "(*regexp.Regexp).FindAllString": RepeatZeroTimes("a FindAll method", 1), - "(*regexp.Regexp).FindAllStringIndex": RepeatZeroTimes("a FindAll method", 1), - "(*regexp.Regexp).FindAllStringSubmatch": RepeatZeroTimes("a FindAll method", 1), - "(*regexp.Regexp).FindAllStringSubmatchIndex": RepeatZeroTimes("a FindAll method", 1), - "(*regexp.Regexp).FindAllSubmatch": RepeatZeroTimes("a FindAll method", 1), - "(*regexp.Regexp).FindAllSubmatchIndex": RepeatZeroTimes("a FindAll method", 1), - } - - checkUTF8CutsetRules = map[string]CallCheck{ - "strings.IndexAny": utf8Cutset, - "strings.LastIndexAny": utf8Cutset, - "strings.ContainsAny": utf8Cutset, - "strings.Trim": utf8Cutset, - "strings.TrimLeft": utf8Cutset, - "strings.TrimRight": utf8Cutset, - } - - checkUniqueCutsetRules = map[string]CallCheck{ - "strings.Trim": uniqueCutset, - "strings.TrimLeft": uniqueCutset, - "strings.TrimRight": uniqueCutset, - } - - checkUnmarshalPointerRules = map[string]CallCheck{ - "encoding/xml.Unmarshal": unmarshalPointer("xml.Unmarshal", 1), - "(*encoding/xml.Decoder).Decode": unmarshalPointer("Decode", 0), - "(*encoding/xml.Decoder).DecodeElement": unmarshalPointer("DecodeElement", 0), - "encoding/json.Unmarshal": unmarshalPointer("json.Unmarshal", 1), - "(*encoding/json.Decoder).Decode": unmarshalPointer("Decode", 0), - } - - checkUnbufferedSignalChanRules = map[string]CallCheck{ - "os/signal.Notify": func(call *Call) { - arg := call.Args[0] - if UnbufferedChannel(arg.Value) { - arg.Invalid("the channel used with signal.Notify should be buffered") - } - }, - } - - checkMathIntRules = map[string]CallCheck{ - "math.Ceil": pointlessIntMath, - "math.Floor": pointlessIntMath, - "math.IsNaN": pointlessIntMath, - "math.Trunc": pointlessIntMath, - "math.IsInf": pointlessIntMath, - } - - checkStringsReplaceZeroRules = map[string]CallCheck{ - "strings.Replace": RepeatZeroTimes("strings.Replace", 3), - "bytes.Replace": RepeatZeroTimes("bytes.Replace", 3), - } - - checkListenAddressRules = map[string]CallCheck{ - "net/http.ListenAndServe": checkValidHostPort(0), - "net/http.ListenAndServeTLS": checkValidHostPort(0), - } - - checkBytesEqualIPRules = map[string]CallCheck{ - "bytes.Equal": func(call *Call) { - if ConvertedFrom(call.Args[0].Value, "net.IP") && ConvertedFrom(call.Args[1].Value, "net.IP") { - call.Invalid("use net.IP.Equal to compare net.IPs, not bytes.Equal") - } - }, - } - - checkRegexpMatchLoopRules = map[string]CallCheck{ - "regexp.Match": loopedRegexp("regexp.Match"), - "regexp.MatchReader": loopedRegexp("regexp.MatchReader"), - "regexp.MatchString": loopedRegexp("regexp.MatchString"), - } -) - -type Checker struct { - CheckGenerated bool - funcDescs *functions.Descriptions - deprecatedObjs map[types.Object]string -} - -func NewChecker() *Checker { - return &Checker{} -} - -func (*Checker) Name() string { return "staticcheck" } -func (*Checker) Prefix() string { return "SA" } - -func (c *Checker) Funcs() map[string]lint.Func { - return map[string]lint.Func{ - "SA1000": c.callChecker(checkRegexpRules), - "SA1001": c.CheckTemplate, - "SA1002": c.callChecker(checkTimeParseRules), - "SA1003": c.callChecker(checkEncodingBinaryRules), - "SA1004": c.CheckTimeSleepConstant, - "SA1005": c.CheckExec, - "SA1006": c.CheckUnsafePrintf, - "SA1007": c.callChecker(checkURLsRules), - "SA1008": c.CheckCanonicalHeaderKey, - "SA1009": nil, - "SA1010": c.callChecker(checkRegexpFindAllRules), - "SA1011": c.callChecker(checkUTF8CutsetRules), - "SA1012": c.CheckNilContext, - "SA1013": c.CheckSeeker, - "SA1014": c.callChecker(checkUnmarshalPointerRules), - "SA1015": c.CheckLeakyTimeTick, - "SA1016": c.CheckUntrappableSignal, - "SA1017": c.callChecker(checkUnbufferedSignalChanRules), - "SA1018": c.callChecker(checkStringsReplaceZeroRules), - "SA1019": c.CheckDeprecated, - "SA1020": c.callChecker(checkListenAddressRules), - "SA1021": c.callChecker(checkBytesEqualIPRules), - "SA1022": nil, - "SA1023": c.CheckWriterBufferModified, - "SA1024": c.callChecker(checkUniqueCutsetRules), - - "SA2000": c.CheckWaitgroupAdd, - "SA2001": c.CheckEmptyCriticalSection, - "SA2002": c.CheckConcurrentTesting, - "SA2003": c.CheckDeferLock, - - "SA3000": c.CheckTestMainExit, - "SA3001": c.CheckBenchmarkN, - - "SA4000": c.CheckLhsRhsIdentical, - "SA4001": c.CheckIneffectiveCopy, - "SA4002": c.CheckDiffSizeComparison, - "SA4003": c.CheckUnsignedComparison, - "SA4004": c.CheckIneffectiveLoop, - "SA4005": nil, - "SA4006": c.CheckUnreadVariableValues, - // "SA4007": c.CheckPredeterminedBooleanExprs, - "SA4007": nil, - "SA4008": c.CheckLoopCondition, - "SA4009": c.CheckArgOverwritten, - "SA4010": c.CheckIneffectiveAppend, - "SA4011": c.CheckScopedBreak, - "SA4012": c.CheckNaNComparison, - "SA4013": c.CheckDoubleNegation, - "SA4014": c.CheckRepeatedIfElse, - "SA4015": c.callChecker(checkMathIntRules), - "SA4016": c.CheckSillyBitwiseOps, - "SA4017": c.CheckPureFunctions, - "SA4018": c.CheckSelfAssignment, - "SA4019": c.CheckDuplicateBuildConstraints, - - "SA5000": c.CheckNilMaps, - "SA5001": c.CheckEarlyDefer, - "SA5002": c.CheckInfiniteEmptyLoop, - "SA5003": c.CheckDeferInInfiniteLoop, - "SA5004": c.CheckLoopEmptyDefault, - "SA5005": c.CheckCyclicFinalizer, - // "SA5006": c.CheckSliceOutOfBounds, - "SA5007": c.CheckInfiniteRecursion, - - "SA6000": c.callChecker(checkRegexpMatchLoopRules), - "SA6001": c.CheckMapBytesKey, - "SA6002": c.callChecker(checkSyncPoolValueRules), - "SA6003": c.CheckRangeStringRunes, - "SA6004": c.CheckSillyRegexp, - - "SA9000": nil, - "SA9001": c.CheckDubiousDeferInChannelRangeLoop, - "SA9002": c.CheckNonOctalFileMode, - "SA9003": c.CheckEmptyBranch, - "SA9004": c.CheckMissingEnumTypesInDeclaration, - } -} - -func (c *Checker) filterGenerated(files []*ast.File) []*ast.File { - if c.CheckGenerated { - return files - } - var out []*ast.File - for _, f := range files { - if !IsGenerated(f) { - out = append(out, f) - } - } - return out -} - -func (c *Checker) findDeprecated(prog *lint.Program) { - var docs []*ast.CommentGroup - var names []*ast.Ident - - doDocs := func(pkginfo *loader.PackageInfo, names []*ast.Ident, docs []*ast.CommentGroup) { - var alt string - for _, doc := range docs { - if doc == nil { - continue - } - parts := strings.Split(doc.Text(), "\n\n") - last := parts[len(parts)-1] - if !strings.HasPrefix(last, "Deprecated: ") { - continue - } - alt = last[len("Deprecated: "):] - alt = strings.Replace(alt, "\n", " ", -1) - break - } - if alt == "" { - return - } - - for _, name := range names { - obj := pkginfo.ObjectOf(name) - c.deprecatedObjs[obj] = alt - } - } - - for _, pkginfo := range prog.Prog.AllPackages { - for _, f := range pkginfo.Files { - fn := func(node ast.Node) bool { - if node == nil { - return true - } - var ret bool - switch node := node.(type) { - case *ast.GenDecl: - switch node.Tok { - case token.TYPE, token.CONST, token.VAR: - docs = append(docs, node.Doc) - return true - default: - return false - } - case *ast.FuncDecl: - docs = append(docs, node.Doc) - names = []*ast.Ident{node.Name} - ret = false - case *ast.TypeSpec: - docs = append(docs, node.Doc) - names = []*ast.Ident{node.Name} - ret = true - case *ast.ValueSpec: - docs = append(docs, node.Doc) - names = node.Names - ret = false - case *ast.File: - return true - case *ast.StructType: - for _, field := range node.Fields.List { - doDocs(pkginfo, field.Names, []*ast.CommentGroup{field.Doc}) - } - return false - case *ast.InterfaceType: - for _, field := range node.Methods.List { - doDocs(pkginfo, field.Names, []*ast.CommentGroup{field.Doc}) - } - return false - default: - return false - } - if len(names) == 0 || len(docs) == 0 { - return ret - } - doDocs(pkginfo, names, docs) - - docs = docs[:0] - names = nil - return ret - } - ast.Inspect(f, fn) - } - } -} - -func (c *Checker) Init(prog *lint.Program) { - wg := &sync.WaitGroup{} - wg.Add(2) - go func() { - c.funcDescs = functions.NewDescriptions(prog.SSA) - for _, fn := range prog.AllFunctions { - if fn.Blocks != nil { - applyStdlibKnowledge(fn) - ssa.OptimizeBlocks(fn) - } - } - wg.Done() - }() - - go func() { - c.deprecatedObjs = map[types.Object]string{} - c.findDeprecated(prog) - wg.Done() - }() - - wg.Wait() -} - -func (c *Checker) isInLoop(b *ssa.BasicBlock) bool { - sets := c.funcDescs.Get(b.Parent()).Loops - for _, set := range sets { - if set[b] { - return true - } - } - return false -} - -func applyStdlibKnowledge(fn *ssa.Function) { - if len(fn.Blocks) == 0 { - return - } - - // comma-ok receiving from a time.Tick channel will never return - // ok == false, so any branching on the value of ok can be - // replaced with an unconditional jump. This will primarily match - // `for range time.Tick(x)` loops, but it can also match - // user-written code. - for _, block := range fn.Blocks { - if len(block.Instrs) < 3 { - continue - } - if len(block.Succs) != 2 { - continue - } - var instrs []*ssa.Instruction - for i, ins := range block.Instrs { - if _, ok := ins.(*ssa.DebugRef); ok { - continue - } - instrs = append(instrs, &block.Instrs[i]) - } - - for i, ins := range instrs { - unop, ok := (*ins).(*ssa.UnOp) - if !ok || unop.Op != token.ARROW { - continue - } - call, ok := unop.X.(*ssa.Call) - if !ok { - continue - } - if !IsCallTo(call.Common(), "time.Tick") { - continue - } - ex, ok := (*instrs[i+1]).(*ssa.Extract) - if !ok || ex.Tuple != unop || ex.Index != 1 { - continue - } - - ifstmt, ok := (*instrs[i+2]).(*ssa.If) - if !ok || ifstmt.Cond != ex { - continue - } - - *instrs[i+2] = ssa.NewJump(block) - succ := block.Succs[1] - block.Succs = block.Succs[0:1] - succ.RemovePred(block) - } - } -} - -func hasType(j *lint.Job, expr ast.Expr, name string) bool { - T := TypeOf(j, expr) - return IsType(T, name) -} - -func (c *Checker) CheckUntrappableSignal(j *lint.Job) { - fn := func(node ast.Node) bool { - call, ok := node.(*ast.CallExpr) - if !ok { - return true - } - if !IsCallToAnyAST(j, call, - "os/signal.Ignore", "os/signal.Notify", "os/signal.Reset") { - return true - } - for _, arg := range call.Args { - if conv, ok := arg.(*ast.CallExpr); ok && isName(j, conv.Fun, "os.Signal") { - arg = conv.Args[0] - } - - if isName(j, arg, "os.Kill") || isName(j, arg, "syscall.SIGKILL") { - j.Errorf(arg, "%s cannot be trapped (did you mean syscall.SIGTERM?)", Render(j, arg)) - } - if isName(j, arg, "syscall.SIGSTOP") { - j.Errorf(arg, "%s signal cannot be trapped", Render(j, arg)) - } - } - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckTemplate(j *lint.Job) { - fn := func(node ast.Node) bool { - call, ok := node.(*ast.CallExpr) - if !ok { - return true - } - var kind string - if IsCallToAST(j, call, "(*text/template.Template).Parse") { - kind = "text" - } else if IsCallToAST(j, call, "(*html/template.Template).Parse") { - kind = "html" - } else { - return true - } - sel := call.Fun.(*ast.SelectorExpr) - if !IsCallToAST(j, sel.X, "text/template.New") && - !IsCallToAST(j, sel.X, "html/template.New") { - // TODO(dh): this is a cheap workaround for templates with - // different delims. A better solution with less false - // negatives would use data flow analysis to see where the - // template comes from and where it has been - return true - } - s, ok := ExprToString(j, call.Args[0]) - if !ok { - return true - } - var err error - switch kind { - case "text": - _, err = texttemplate.New("").Parse(s) - case "html": - _, err = htmltemplate.New("").Parse(s) - } - if err != nil { - // TODO(dominikh): whitelist other parse errors, if any - if strings.Contains(err.Error(), "unexpected") { - j.Errorf(call.Args[0], "%s", err) - } - } - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckTimeSleepConstant(j *lint.Job) { - fn := func(node ast.Node) bool { - call, ok := node.(*ast.CallExpr) - if !ok { - return true - } - if !IsCallToAST(j, call, "time.Sleep") { - return true - } - lit, ok := call.Args[0].(*ast.BasicLit) - if !ok { - return true - } - n, err := strconv.Atoi(lit.Value) - if err != nil { - return true - } - if n == 0 || n > 120 { - // time.Sleep(0) is a seldom used pattern in concurrency - // tests. >120 might be intentional. 120 was chosen - // because the user could've meant 2 minutes. - return true - } - recommendation := "time.Sleep(time.Nanosecond)" - if n != 1 { - recommendation = fmt.Sprintf("time.Sleep(%d * time.Nanosecond)", n) - } - j.Errorf(call.Args[0], "sleeping for %d nanoseconds is probably a bug. Be explicit if it isn't: %s", n, recommendation) - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckWaitgroupAdd(j *lint.Job) { - fn := func(node ast.Node) bool { - g, ok := node.(*ast.GoStmt) - if !ok { - return true - } - fun, ok := g.Call.Fun.(*ast.FuncLit) - if !ok { - return true - } - if len(fun.Body.List) == 0 { - return true - } - stmt, ok := fun.Body.List[0].(*ast.ExprStmt) - if !ok { - return true - } - call, ok := stmt.X.(*ast.CallExpr) - if !ok { - return true - } - sel, ok := call.Fun.(*ast.SelectorExpr) - if !ok { - return true - } - fn, ok := ObjectOf(j, sel.Sel).(*types.Func) - if !ok { - return true - } - if fn.FullName() == "(*sync.WaitGroup).Add" { - j.Errorf(sel, "should call %s before starting the goroutine to avoid a race", - Render(j, stmt)) - } - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckInfiniteEmptyLoop(j *lint.Job) { - fn := func(node ast.Node) bool { - loop, ok := node.(*ast.ForStmt) - if !ok || len(loop.Body.List) != 0 || loop.Post != nil { - return true - } - - if loop.Init != nil { - // TODO(dh): this isn't strictly necessary, it just makes - // the check easier. - return true - } - // An empty loop is bad news in two cases: 1) The loop has no - // condition. In that case, it's just a loop that spins - // forever and as fast as it can, keeping a core busy. 2) The - // loop condition only consists of variable or field reads and - // operators on those. The only way those could change their - // value is with unsynchronised access, which constitutes a - // data race. - // - // If the condition contains any function calls, its behaviour - // is dynamic and the loop might terminate. Similarly for - // channel receives. - - if loop.Cond != nil && hasSideEffects(loop.Cond) { - return true - } - - j.Errorf(loop, "this loop will spin, using 100%% CPU") - if loop.Cond != nil { - j.Errorf(loop, "loop condition never changes or has a race condition") - } - - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckDeferInInfiniteLoop(j *lint.Job) { - fn := func(node ast.Node) bool { - mightExit := false - var defers []ast.Stmt - loop, ok := node.(*ast.ForStmt) - if !ok || loop.Cond != nil { - return true - } - fn2 := func(node ast.Node) bool { - switch stmt := node.(type) { - case *ast.ReturnStmt: - mightExit = true - case *ast.BranchStmt: - // TODO(dominikh): if this sees a break in a switch or - // select, it doesn't check if it breaks the loop or - // just the select/switch. This causes some false - // negatives. - if stmt.Tok == token.BREAK { - mightExit = true - } - case *ast.DeferStmt: - defers = append(defers, stmt) - case *ast.FuncLit: - // Don't look into function bodies - return false - } - return true - } - ast.Inspect(loop.Body, fn2) - if mightExit { - return true - } - for _, stmt := range defers { - j.Errorf(stmt, "defers in this infinite loop will never run") - } - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckDubiousDeferInChannelRangeLoop(j *lint.Job) { - fn := func(node ast.Node) bool { - loop, ok := node.(*ast.RangeStmt) - if !ok { - return true - } - typ := TypeOf(j, loop.X) - _, ok = typ.Underlying().(*types.Chan) - if !ok { - return true - } - fn2 := func(node ast.Node) bool { - switch stmt := node.(type) { - case *ast.DeferStmt: - j.Errorf(stmt, "defers in this range loop won't run unless the channel gets closed") - case *ast.FuncLit: - // Don't look into function bodies - return false - } - return true - } - ast.Inspect(loop.Body, fn2) - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckTestMainExit(j *lint.Job) { - fn := func(node ast.Node) bool { - if !isTestMain(j, node) { - return true - } - - arg := ObjectOf(j, node.(*ast.FuncDecl).Type.Params.List[0].Names[0]) - callsRun := false - fn2 := func(node ast.Node) bool { - call, ok := node.(*ast.CallExpr) - if !ok { - return true - } - sel, ok := call.Fun.(*ast.SelectorExpr) - if !ok { - return true - } - ident, ok := sel.X.(*ast.Ident) - if !ok { - return true - } - if arg != ObjectOf(j, ident) { - return true - } - if sel.Sel.Name == "Run" { - callsRun = true - return false - } - return true - } - ast.Inspect(node.(*ast.FuncDecl).Body, fn2) - - callsExit := false - fn3 := func(node ast.Node) bool { - if IsCallToAST(j, node, "os.Exit") { - callsExit = true - return false - } - return true - } - ast.Inspect(node.(*ast.FuncDecl).Body, fn3) - if !callsExit && callsRun { - j.Errorf(node, "TestMain should call os.Exit to set exit code") - } - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func isTestMain(j *lint.Job, node ast.Node) bool { - decl, ok := node.(*ast.FuncDecl) - if !ok { - return false - } - if decl.Name.Name != "TestMain" { - return false - } - if len(decl.Type.Params.List) != 1 { - return false - } - arg := decl.Type.Params.List[0] - if len(arg.Names) != 1 { - return false - } - return IsOfType(j, arg.Type, "*testing.M") -} - -func (c *Checker) CheckExec(j *lint.Job) { - fn := func(node ast.Node) bool { - call, ok := node.(*ast.CallExpr) - if !ok { - return true - } - if !IsCallToAST(j, call, "os/exec.Command") { - return true - } - val, ok := ExprToString(j, call.Args[0]) - if !ok { - return true - } - if !strings.Contains(val, " ") || strings.Contains(val, `\`) || strings.Contains(val, "/") { - return true - } - j.Errorf(call.Args[0], "first argument to exec.Command looks like a shell command, but a program name or path are expected") - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckLoopEmptyDefault(j *lint.Job) { - fn := func(node ast.Node) bool { - loop, ok := node.(*ast.ForStmt) - if !ok || len(loop.Body.List) != 1 || loop.Cond != nil || loop.Init != nil { - return true - } - sel, ok := loop.Body.List[0].(*ast.SelectStmt) - if !ok { - return true - } - for _, c := range sel.Body.List { - if comm, ok := c.(*ast.CommClause); ok && comm.Comm == nil && len(comm.Body) == 0 { - j.Errorf(comm, "should not have an empty default case in a for+select loop. The loop will spin.") - } - } - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckLhsRhsIdentical(j *lint.Job) { - fn := func(node ast.Node) bool { - op, ok := node.(*ast.BinaryExpr) - if !ok { - return true - } - switch op.Op { - case token.EQL, token.NEQ: - if basic, ok := TypeOf(j, op.X).(*types.Basic); ok { - if kind := basic.Kind(); kind == types.Float32 || kind == types.Float64 { - // f == f and f != f might be used to check for NaN - return true - } - } - case token.SUB, token.QUO, token.AND, token.REM, token.OR, token.XOR, token.AND_NOT, - token.LAND, token.LOR, token.LSS, token.GTR, token.LEQ, token.GEQ: - default: - // For some ops, such as + and *, it can make sense to - // have identical operands - return true - } - - if Render(j, op.X) != Render(j, op.Y) { - return true - } - j.Errorf(op, "identical expressions on the left and right side of the '%s' operator", op.Op) - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckScopedBreak(j *lint.Job) { - fn := func(node ast.Node) bool { - var body *ast.BlockStmt - switch node := node.(type) { - case *ast.ForStmt: - body = node.Body - case *ast.RangeStmt: - body = node.Body - default: - return true - } - for _, stmt := range body.List { - var blocks [][]ast.Stmt - switch stmt := stmt.(type) { - case *ast.SwitchStmt: - for _, c := range stmt.Body.List { - blocks = append(blocks, c.(*ast.CaseClause).Body) - } - case *ast.SelectStmt: - for _, c := range stmt.Body.List { - blocks = append(blocks, c.(*ast.CommClause).Body) - } - default: - continue - } - - for _, body := range blocks { - if len(body) == 0 { - continue - } - lasts := []ast.Stmt{body[len(body)-1]} - // TODO(dh): unfold all levels of nested block - // statements, not just a single level if statement - if ifs, ok := lasts[0].(*ast.IfStmt); ok { - if len(ifs.Body.List) == 0 { - continue - } - lasts[0] = ifs.Body.List[len(ifs.Body.List)-1] - - if block, ok := ifs.Else.(*ast.BlockStmt); ok { - if len(block.List) != 0 { - lasts = append(lasts, block.List[len(block.List)-1]) - } - } - } - for _, last := range lasts { - branch, ok := last.(*ast.BranchStmt) - if !ok || branch.Tok != token.BREAK || branch.Label != nil { - continue - } - j.Errorf(branch, "ineffective break statement. Did you mean to break out of the outer loop?") - } - } - } - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckUnsafePrintf(j *lint.Job) { - fn := func(node ast.Node) bool { - call, ok := node.(*ast.CallExpr) - if !ok { - return true - } - if !IsCallToAnyAST(j, call, "fmt.Printf", "fmt.Sprintf", "log.Printf") { - return true - } - if len(call.Args) != 1 { - return true - } - switch call.Args[0].(type) { - case *ast.CallExpr, *ast.Ident: - default: - return true - } - j.Errorf(call.Args[0], "printf-style function with dynamic first argument and no further arguments should use print-style function instead") - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckEarlyDefer(j *lint.Job) { - fn := func(node ast.Node) bool { - block, ok := node.(*ast.BlockStmt) - if !ok { - return true - } - if len(block.List) < 2 { - return true - } - for i, stmt := range block.List { - if i == len(block.List)-1 { - break - } - assign, ok := stmt.(*ast.AssignStmt) - if !ok { - continue - } - if len(assign.Rhs) != 1 { - continue - } - if len(assign.Lhs) < 2 { - continue - } - if lhs, ok := assign.Lhs[len(assign.Lhs)-1].(*ast.Ident); ok && lhs.Name == "_" { - continue - } - call, ok := assign.Rhs[0].(*ast.CallExpr) - if !ok { - continue - } - sig, ok := TypeOf(j, call.Fun).(*types.Signature) - if !ok { - continue - } - if sig.Results().Len() < 2 { - continue - } - last := sig.Results().At(sig.Results().Len() - 1) - // FIXME(dh): check that it's error from universe, not - // another type of the same name - if last.Type().String() != "error" { - continue - } - lhs, ok := assign.Lhs[0].(*ast.Ident) - if !ok { - continue - } - def, ok := block.List[i+1].(*ast.DeferStmt) - if !ok { - continue - } - sel, ok := def.Call.Fun.(*ast.SelectorExpr) - if !ok { - continue - } - ident, ok := selectorX(sel).(*ast.Ident) - if !ok { - continue - } - if ident.Obj != lhs.Obj { - continue - } - if sel.Sel.Name != "Close" { - continue - } - j.Errorf(def, "should check returned error before deferring %s", Render(j, def.Call)) - } - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func selectorX(sel *ast.SelectorExpr) ast.Node { - switch x := sel.X.(type) { - case *ast.SelectorExpr: - return selectorX(x) - default: - return x - } -} - -func (c *Checker) CheckEmptyCriticalSection(j *lint.Job) { - // Initially it might seem like this check would be easier to - // implement in SSA. After all, we're only checking for two - // consecutive method calls. In reality, however, there may be any - // number of other instructions between the lock and unlock, while - // still constituting an empty critical section. For example, - // given `m.x().Lock(); m.x().Unlock()`, there will be a call to - // x(). In the AST-based approach, this has a tiny potential for a - // false positive (the second call to x might be doing work that - // is protected by the mutex). In an SSA-based approach, however, - // it would miss a lot of real bugs. - - mutexParams := func(s ast.Stmt) (x ast.Expr, funcName string, ok bool) { - expr, ok := s.(*ast.ExprStmt) - if !ok { - return nil, "", false - } - call, ok := expr.X.(*ast.CallExpr) - if !ok { - return nil, "", false - } - sel, ok := call.Fun.(*ast.SelectorExpr) - if !ok { - return nil, "", false - } - - fn, ok := ObjectOf(j, sel.Sel).(*types.Func) - if !ok { - return nil, "", false - } - sig := fn.Type().(*types.Signature) - if sig.Params().Len() != 0 || sig.Results().Len() != 0 { - return nil, "", false - } - - return sel.X, fn.Name(), true - } - - fn := func(node ast.Node) bool { - block, ok := node.(*ast.BlockStmt) - if !ok { - return true - } - if len(block.List) < 2 { - return true - } - for i := range block.List[:len(block.List)-1] { - sel1, method1, ok1 := mutexParams(block.List[i]) - sel2, method2, ok2 := mutexParams(block.List[i+1]) - - if !ok1 || !ok2 || Render(j, sel1) != Render(j, sel2) { - continue - } - if (method1 == "Lock" && method2 == "Unlock") || - (method1 == "RLock" && method2 == "RUnlock") { - j.Errorf(block.List[i+1], "empty critical section") - } - } - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -// cgo produces code like fn(&*_Cvar_kSomeCallbacks) which we don't -// want to flag. -var cgoIdent = regexp.MustCompile(`^_C(func|var)_.+$`) - -func (c *Checker) CheckIneffectiveCopy(j *lint.Job) { - fn := func(node ast.Node) bool { - if unary, ok := node.(*ast.UnaryExpr); ok { - if star, ok := unary.X.(*ast.StarExpr); ok && unary.Op == token.AND { - ident, ok := star.X.(*ast.Ident) - if !ok || !cgoIdent.MatchString(ident.Name) { - j.Errorf(unary, "&*x will be simplified to x. It will not copy x.") - } - } - } - - if star, ok := node.(*ast.StarExpr); ok { - if unary, ok := star.X.(*ast.UnaryExpr); ok && unary.Op == token.AND { - j.Errorf(star, "*&x will be simplified to x. It will not copy x.") - } - } - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckDiffSizeComparison(j *lint.Job) { - for _, ssafn := range j.Program.InitialFunctions { - for _, b := range ssafn.Blocks { - for _, ins := range b.Instrs { - binop, ok := ins.(*ssa.BinOp) - if !ok { - continue - } - if binop.Op != token.EQL && binop.Op != token.NEQ { - continue - } - _, ok1 := binop.X.(*ssa.Slice) - _, ok2 := binop.Y.(*ssa.Slice) - if !ok1 && !ok2 { - continue - } - r := c.funcDescs.Get(ssafn).Ranges - r1, ok1 := r.Get(binop.X).(vrp.StringInterval) - r2, ok2 := r.Get(binop.Y).(vrp.StringInterval) - if !ok1 || !ok2 { - continue - } - if r1.Length.Intersection(r2.Length).Empty() { - j.Errorf(binop, "comparing strings of different sizes for equality will always return false") - } - } - } - } -} - -func (c *Checker) CheckCanonicalHeaderKey(j *lint.Job) { - fn := func(node ast.Node) bool { - assign, ok := node.(*ast.AssignStmt) - if ok { - // TODO(dh): This risks missing some Header reads, for - // example in `h1["foo"] = h2["foo"]` – these edge - // cases are probably rare enough to ignore for now. - for _, expr := range assign.Lhs { - op, ok := expr.(*ast.IndexExpr) - if !ok { - continue - } - if hasType(j, op.X, "net/http.Header") { - return false - } - } - return true - } - op, ok := node.(*ast.IndexExpr) - if !ok { - return true - } - if !hasType(j, op.X, "net/http.Header") { - return true - } - s, ok := ExprToString(j, op.Index) - if !ok { - return true - } - if s == http.CanonicalHeaderKey(s) { - return true - } - j.Errorf(op, "keys in http.Header are canonicalized, %q is not canonical; fix the constant or use http.CanonicalHeaderKey", s) - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckBenchmarkN(j *lint.Job) { - fn := func(node ast.Node) bool { - assign, ok := node.(*ast.AssignStmt) - if !ok { - return true - } - if len(assign.Lhs) != 1 || len(assign.Rhs) != 1 { - return true - } - sel, ok := assign.Lhs[0].(*ast.SelectorExpr) - if !ok { - return true - } - if sel.Sel.Name != "N" { - return true - } - if !hasType(j, sel.X, "*testing.B") { - return true - } - j.Errorf(assign, "should not assign to %s", Render(j, sel)) - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckUnreadVariableValues(j *lint.Job) { - for _, ssafn := range j.Program.InitialFunctions { - if IsExample(ssafn) { - continue - } - node := ssafn.Syntax() - if node == nil { - continue - } - - ast.Inspect(node, func(node ast.Node) bool { - assign, ok := node.(*ast.AssignStmt) - if !ok { - return true - } - if len(assign.Lhs) > 1 && len(assign.Rhs) == 1 { - // Either a function call with multiple return values, - // or a comma-ok assignment - - val, _ := ssafn.ValueForExpr(assign.Rhs[0]) - if val == nil { - return true - } - refs := val.Referrers() - if refs == nil { - return true - } - for _, ref := range *refs { - ex, ok := ref.(*ssa.Extract) - if !ok { - continue - } - exrefs := ex.Referrers() - if exrefs == nil { - continue - } - if len(FilterDebug(*exrefs)) == 0 { - lhs := assign.Lhs[ex.Index] - if ident, ok := lhs.(*ast.Ident); !ok || ok && ident.Name == "_" { - continue - } - j.Errorf(lhs, "this value of %s is never used", lhs) - } - } - return true - } - for i, lhs := range assign.Lhs { - rhs := assign.Rhs[i] - if ident, ok := lhs.(*ast.Ident); !ok || ok && ident.Name == "_" { - continue - } - val, _ := ssafn.ValueForExpr(rhs) - if val == nil { - continue - } - - refs := val.Referrers() - if refs == nil { - // TODO investigate why refs can be nil - return true - } - if len(FilterDebug(*refs)) == 0 { - j.Errorf(lhs, "this value of %s is never used", lhs) - } - } - return true - }) - } -} - -func (c *Checker) CheckPredeterminedBooleanExprs(j *lint.Job) { - for _, ssafn := range j.Program.InitialFunctions { - for _, block := range ssafn.Blocks { - for _, ins := range block.Instrs { - ssabinop, ok := ins.(*ssa.BinOp) - if !ok { - continue - } - switch ssabinop.Op { - case token.GTR, token.LSS, token.EQL, token.NEQ, token.LEQ, token.GEQ: - default: - continue - } - - xs, ok1 := consts(ssabinop.X, nil, nil) - ys, ok2 := consts(ssabinop.Y, nil, nil) - if !ok1 || !ok2 || len(xs) == 0 || len(ys) == 0 { - continue - } - - trues := 0 - for _, x := range xs { - for _, y := range ys { - if x.Value == nil { - if y.Value == nil { - trues++ - } - continue - } - if constant.Compare(x.Value, ssabinop.Op, y.Value) { - trues++ - } - } - } - b := trues != 0 - if trues == 0 || trues == len(xs)*len(ys) { - j.Errorf(ssabinop, "binary expression is always %t for all possible values (%s %s %s)", - b, xs, ssabinop.Op, ys) - } - } - } - } -} - -func (c *Checker) CheckNilMaps(j *lint.Job) { - for _, ssafn := range j.Program.InitialFunctions { - for _, block := range ssafn.Blocks { - for _, ins := range block.Instrs { - mu, ok := ins.(*ssa.MapUpdate) - if !ok { - continue - } - c, ok := mu.Map.(*ssa.Const) - if !ok { - continue - } - if c.Value != nil { - continue - } - j.Errorf(mu, "assignment to nil map") - } - } - } -} - -func (c *Checker) CheckUnsignedComparison(j *lint.Job) { - fn := func(node ast.Node) bool { - expr, ok := node.(*ast.BinaryExpr) - if !ok { - return true - } - tx := TypeOf(j, expr.X) - basic, ok := tx.Underlying().(*types.Basic) - if !ok { - return true - } - if (basic.Info() & types.IsUnsigned) == 0 { - return true - } - lit, ok := expr.Y.(*ast.BasicLit) - if !ok || lit.Value != "0" { - return true - } - switch expr.Op { - case token.GEQ: - j.Errorf(expr, "unsigned values are always >= 0") - case token.LSS: - j.Errorf(expr, "unsigned values are never < 0") - } - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func consts(val ssa.Value, out []*ssa.Const, visitedPhis map[string]bool) ([]*ssa.Const, bool) { - if visitedPhis == nil { - visitedPhis = map[string]bool{} - } - var ok bool - switch val := val.(type) { - case *ssa.Phi: - if visitedPhis[val.Name()] { - break - } - visitedPhis[val.Name()] = true - vals := val.Operands(nil) - for _, phival := range vals { - out, ok = consts(*phival, out, visitedPhis) - if !ok { - return nil, false - } - } - case *ssa.Const: - out = append(out, val) - case *ssa.Convert: - out, ok = consts(val.X, out, visitedPhis) - if !ok { - return nil, false - } - default: - return nil, false - } - if len(out) < 2 { - return out, true - } - uniq := []*ssa.Const{out[0]} - for _, val := range out[1:] { - if val.Value == uniq[len(uniq)-1].Value { - continue - } - uniq = append(uniq, val) - } - return uniq, true -} - -func (c *Checker) CheckLoopCondition(j *lint.Job) { - for _, ssafn := range j.Program.InitialFunctions { - fn := func(node ast.Node) bool { - loop, ok := node.(*ast.ForStmt) - if !ok { - return true - } - if loop.Init == nil || loop.Cond == nil || loop.Post == nil { - return true - } - init, ok := loop.Init.(*ast.AssignStmt) - if !ok || len(init.Lhs) != 1 || len(init.Rhs) != 1 { - return true - } - cond, ok := loop.Cond.(*ast.BinaryExpr) - if !ok { - return true - } - x, ok := cond.X.(*ast.Ident) - if !ok { - return true - } - lhs, ok := init.Lhs[0].(*ast.Ident) - if !ok { - return true - } - if x.Obj != lhs.Obj { - return true - } - if _, ok := loop.Post.(*ast.IncDecStmt); !ok { - return true - } - - v, isAddr := ssafn.ValueForExpr(cond.X) - if v == nil || isAddr { - return true - } - switch v := v.(type) { - case *ssa.Phi: - ops := v.Operands(nil) - if len(ops) != 2 { - return true - } - _, ok := (*ops[0]).(*ssa.Const) - if !ok { - return true - } - sigma, ok := (*ops[1]).(*ssa.Sigma) - if !ok { - return true - } - if sigma.X != v { - return true - } - case *ssa.UnOp: - return true - } - j.Errorf(cond, "variable in loop condition never changes") - - return true - } - Inspect(ssafn.Syntax(), fn) - } -} - -func (c *Checker) CheckArgOverwritten(j *lint.Job) { - for _, ssafn := range j.Program.InitialFunctions { - fn := func(node ast.Node) bool { - var typ *ast.FuncType - var body *ast.BlockStmt - switch fn := node.(type) { - case *ast.FuncDecl: - typ = fn.Type - body = fn.Body - case *ast.FuncLit: - typ = fn.Type - body = fn.Body - } - if body == nil { - return true - } - if len(typ.Params.List) == 0 { - return true - } - for _, field := range typ.Params.List { - for _, arg := range field.Names { - obj := ObjectOf(j, arg) - var ssaobj *ssa.Parameter - for _, param := range ssafn.Params { - if param.Object() == obj { - ssaobj = param - break - } - } - if ssaobj == nil { - continue - } - refs := ssaobj.Referrers() - if refs == nil { - continue - } - if len(FilterDebug(*refs)) != 0 { - continue - } - - assigned := false - ast.Inspect(body, func(node ast.Node) bool { - assign, ok := node.(*ast.AssignStmt) - if !ok { - return true - } - for _, lhs := range assign.Lhs { - ident, ok := lhs.(*ast.Ident) - if !ok { - continue - } - if ObjectOf(j, ident) == obj { - assigned = true - return false - } - } - return true - }) - if assigned { - j.Errorf(arg, "argument %s is overwritten before first use", arg) - } - } - } - return true - } - Inspect(ssafn.Syntax(), fn) - } -} - -func (c *Checker) CheckIneffectiveLoop(j *lint.Job) { - // This check detects some, but not all unconditional loop exits. - // We give up in the following cases: - // - // - a goto anywhere in the loop. The goto might skip over our - // return, and we don't check that it doesn't. - // - // - any nested, unlabelled continue, even if it is in another - // loop or closure. - fn := func(node ast.Node) bool { - var body *ast.BlockStmt - switch fn := node.(type) { - case *ast.FuncDecl: - body = fn.Body - case *ast.FuncLit: - body = fn.Body - default: - return true - } - if body == nil { - return true - } - labels := map[*ast.Object]ast.Stmt{} - ast.Inspect(body, func(node ast.Node) bool { - label, ok := node.(*ast.LabeledStmt) - if !ok { - return true - } - labels[label.Label.Obj] = label.Stmt - return true - }) - - ast.Inspect(body, func(node ast.Node) bool { - var loop ast.Node - var body *ast.BlockStmt - switch node := node.(type) { - case *ast.ForStmt: - body = node.Body - loop = node - case *ast.RangeStmt: - typ := TypeOf(j, node.X) - if _, ok := typ.Underlying().(*types.Map); ok { - // looping once over a map is a valid pattern for - // getting an arbitrary element. - return true - } - body = node.Body - loop = node - default: - return true - } - if len(body.List) < 2 { - // avoid flagging the somewhat common pattern of using - // a range loop to get the first element in a slice, - // or the first rune in a string. - return true - } - var unconditionalExit ast.Node - hasBranching := false - for _, stmt := range body.List { - switch stmt := stmt.(type) { - case *ast.BranchStmt: - switch stmt.Tok { - case token.BREAK: - if stmt.Label == nil || labels[stmt.Label.Obj] == loop { - unconditionalExit = stmt - } - case token.CONTINUE: - if stmt.Label == nil || labels[stmt.Label.Obj] == loop { - unconditionalExit = nil - return false - } - } - case *ast.ReturnStmt: - unconditionalExit = stmt - case *ast.IfStmt, *ast.ForStmt, *ast.RangeStmt, *ast.SwitchStmt, *ast.SelectStmt: - hasBranching = true - } - } - if unconditionalExit == nil || !hasBranching { - return false - } - ast.Inspect(body, func(node ast.Node) bool { - if branch, ok := node.(*ast.BranchStmt); ok { - - switch branch.Tok { - case token.GOTO: - unconditionalExit = nil - return false - case token.CONTINUE: - if branch.Label != nil && labels[branch.Label.Obj] != loop { - return true - } - unconditionalExit = nil - return false - } - } - return true - }) - if unconditionalExit != nil { - j.Errorf(unconditionalExit, "the surrounding loop is unconditionally terminated") - } - return true - }) - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckNilContext(j *lint.Job) { - fn := func(node ast.Node) bool { - call, ok := node.(*ast.CallExpr) - if !ok { - return true - } - if len(call.Args) == 0 { - return true - } - if typ, ok := TypeOf(j, call.Args[0]).(*types.Basic); !ok || typ.Kind() != types.UntypedNil { - return true - } - sig, ok := TypeOf(j, call.Fun).(*types.Signature) - if !ok { - return true - } - if sig.Params().Len() == 0 { - return true - } - if !IsType(sig.Params().At(0).Type(), "context.Context") { - return true - } - j.Errorf(call.Args[0], - "do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use") - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckSeeker(j *lint.Job) { - fn := func(node ast.Node) bool { - call, ok := node.(*ast.CallExpr) - if !ok { - return true - } - sel, ok := call.Fun.(*ast.SelectorExpr) - if !ok { - return true - } - if sel.Sel.Name != "Seek" { - return true - } - if len(call.Args) != 2 { - return true - } - arg0, ok := call.Args[0].(*ast.SelectorExpr) - if !ok { - return true - } - switch arg0.Sel.Name { - case "SeekStart", "SeekCurrent", "SeekEnd": - default: - return true - } - pkg, ok := arg0.X.(*ast.Ident) - if !ok { - return true - } - if pkg.Name != "io" { - return true - } - j.Errorf(call, "the first argument of io.Seeker is the offset, but an io.Seek* constant is being used instead") - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckIneffectiveAppend(j *lint.Job) { - isAppend := func(ins ssa.Value) bool { - call, ok := ins.(*ssa.Call) - if !ok { - return false - } - if call.Call.IsInvoke() { - return false - } - if builtin, ok := call.Call.Value.(*ssa.Builtin); !ok || builtin.Name() != "append" { - return false - } - return true - } - - for _, ssafn := range j.Program.InitialFunctions { - for _, block := range ssafn.Blocks { - for _, ins := range block.Instrs { - val, ok := ins.(ssa.Value) - if !ok || !isAppend(val) { - continue - } - - isUsed := false - visited := map[ssa.Instruction]bool{} - var walkRefs func(refs []ssa.Instruction) - walkRefs = func(refs []ssa.Instruction) { - loop: - for _, ref := range refs { - if visited[ref] { - continue - } - visited[ref] = true - if _, ok := ref.(*ssa.DebugRef); ok { - continue - } - switch ref := ref.(type) { - case *ssa.Phi: - walkRefs(*ref.Referrers()) - case *ssa.Sigma: - walkRefs(*ref.Referrers()) - case ssa.Value: - if !isAppend(ref) { - isUsed = true - } else { - walkRefs(*ref.Referrers()) - } - case ssa.Instruction: - isUsed = true - break loop - } - } - } - refs := val.Referrers() - if refs == nil { - continue - } - walkRefs(*refs) - if !isUsed { - j.Errorf(ins, "this result of append is never used, except maybe in other appends") - } - } - } - } -} - -func (c *Checker) CheckConcurrentTesting(j *lint.Job) { - for _, ssafn := range j.Program.InitialFunctions { - for _, block := range ssafn.Blocks { - for _, ins := range block.Instrs { - gostmt, ok := ins.(*ssa.Go) - if !ok { - continue - } - var fn *ssa.Function - switch val := gostmt.Call.Value.(type) { - case *ssa.Function: - fn = val - case *ssa.MakeClosure: - fn = val.Fn.(*ssa.Function) - default: - continue - } - if fn.Blocks == nil { - continue - } - for _, block := range fn.Blocks { - for _, ins := range block.Instrs { - call, ok := ins.(*ssa.Call) - if !ok { - continue - } - if call.Call.IsInvoke() { - continue - } - callee := call.Call.StaticCallee() - if callee == nil { - continue - } - recv := callee.Signature.Recv() - if recv == nil { - continue - } - if !IsType(recv.Type(), "*testing.common") { - continue - } - fn, ok := call.Call.StaticCallee().Object().(*types.Func) - if !ok { - continue - } - name := fn.Name() - switch name { - case "FailNow", "Fatal", "Fatalf", "SkipNow", "Skip", "Skipf": - default: - continue - } - j.Errorf(gostmt, "the goroutine calls T.%s, which must be called in the same goroutine as the test", name) - } - } - } - } - } -} - -func (c *Checker) CheckCyclicFinalizer(j *lint.Job) { - for _, ssafn := range j.Program.InitialFunctions { - node := c.funcDescs.CallGraph.CreateNode(ssafn) - for _, edge := range node.Out { - if edge.Callee.Func.RelString(nil) != "runtime.SetFinalizer" { - continue - } - arg0 := edge.Site.Common().Args[0] - if iface, ok := arg0.(*ssa.MakeInterface); ok { - arg0 = iface.X - } - unop, ok := arg0.(*ssa.UnOp) - if !ok { - continue - } - v, ok := unop.X.(*ssa.Alloc) - if !ok { - continue - } - arg1 := edge.Site.Common().Args[1] - if iface, ok := arg1.(*ssa.MakeInterface); ok { - arg1 = iface.X - } - mc, ok := arg1.(*ssa.MakeClosure) - if !ok { - continue - } - for _, b := range mc.Bindings { - if b == v { - pos := j.Program.DisplayPosition(mc.Fn.Pos()) - j.Errorf(edge.Site, "the finalizer closes over the object, preventing the finalizer from ever running (at %s)", pos) - } - } - } - } -} - -func (c *Checker) CheckSliceOutOfBounds(j *lint.Job) { - for _, ssafn := range j.Program.InitialFunctions { - for _, block := range ssafn.Blocks { - for _, ins := range block.Instrs { - ia, ok := ins.(*ssa.IndexAddr) - if !ok { - continue - } - if _, ok := ia.X.Type().Underlying().(*types.Slice); !ok { - continue - } - sr, ok1 := c.funcDescs.Get(ssafn).Ranges[ia.X].(vrp.SliceInterval) - idxr, ok2 := c.funcDescs.Get(ssafn).Ranges[ia.Index].(vrp.IntInterval) - if !ok1 || !ok2 || !sr.IsKnown() || !idxr.IsKnown() || sr.Length.Empty() || idxr.Empty() { - continue - } - if idxr.Lower.Cmp(sr.Length.Upper) >= 0 { - j.Errorf(ia, "index out of bounds") - } - } - } - } -} - -func (c *Checker) CheckDeferLock(j *lint.Job) { - for _, ssafn := range j.Program.InitialFunctions { - for _, block := range ssafn.Blocks { - instrs := FilterDebug(block.Instrs) - if len(instrs) < 2 { - continue - } - for i, ins := range instrs[:len(instrs)-1] { - call, ok := ins.(*ssa.Call) - if !ok { - continue - } - if !IsCallTo(call.Common(), "(*sync.Mutex).Lock") && !IsCallTo(call.Common(), "(*sync.RWMutex).RLock") { - continue - } - nins, ok := instrs[i+1].(*ssa.Defer) - if !ok { - continue - } - if !IsCallTo(&nins.Call, "(*sync.Mutex).Lock") && !IsCallTo(&nins.Call, "(*sync.RWMutex).RLock") { - continue - } - if call.Common().Args[0] != nins.Call.Args[0] { - continue - } - name := shortCallName(call.Common()) - alt := "" - switch name { - case "Lock": - alt = "Unlock" - case "RLock": - alt = "RUnlock" - } - j.Errorf(nins, "deferring %s right after having locked already; did you mean to defer %s?", name, alt) - } - } - } -} - -func (c *Checker) CheckNaNComparison(j *lint.Job) { - isNaN := func(v ssa.Value) bool { - call, ok := v.(*ssa.Call) - if !ok { - return false - } - return IsCallTo(call.Common(), "math.NaN") - } - for _, ssafn := range j.Program.InitialFunctions { - for _, block := range ssafn.Blocks { - for _, ins := range block.Instrs { - ins, ok := ins.(*ssa.BinOp) - if !ok { - continue - } - if isNaN(ins.X) || isNaN(ins.Y) { - j.Errorf(ins, "no value is equal to NaN, not even NaN itself") - } - } - } - } -} - -func (c *Checker) CheckInfiniteRecursion(j *lint.Job) { - for _, ssafn := range j.Program.InitialFunctions { - node := c.funcDescs.CallGraph.CreateNode(ssafn) - for _, edge := range node.Out { - if edge.Callee != node { - continue - } - if _, ok := edge.Site.(*ssa.Go); ok { - // Recursively spawning goroutines doesn't consume - // stack space infinitely, so don't flag it. - continue - } - - block := edge.Site.Block() - canReturn := false - for _, b := range ssafn.Blocks { - if block.Dominates(b) { - continue - } - if len(b.Instrs) == 0 { - continue - } - if _, ok := b.Instrs[len(b.Instrs)-1].(*ssa.Return); ok { - canReturn = true - break - } - } - if canReturn { - continue - } - j.Errorf(edge.Site, "infinite recursive call") - } - } -} - -func objectName(obj types.Object) string { - if obj == nil { - return "" - } - var name string - if obj.Pkg() != nil && obj.Pkg().Scope().Lookup(obj.Name()) == obj { - var s string - s = obj.Pkg().Path() - if s != "" { - name += s + "." - } - } - name += obj.Name() - return name -} - -func isName(j *lint.Job, expr ast.Expr, name string) bool { - var obj types.Object - switch expr := expr.(type) { - case *ast.Ident: - obj = ObjectOf(j, expr) - case *ast.SelectorExpr: - obj = ObjectOf(j, expr.Sel) - } - return objectName(obj) == name -} - -func (c *Checker) CheckLeakyTimeTick(j *lint.Job) { - for _, ssafn := range j.Program.InitialFunctions { - if IsInMain(j, ssafn) || IsInTest(j, ssafn) { - continue - } - for _, block := range ssafn.Blocks { - for _, ins := range block.Instrs { - call, ok := ins.(*ssa.Call) - if !ok || !IsCallTo(call.Common(), "time.Tick") { - continue - } - if c.funcDescs.Get(call.Parent()).Infinite { - continue - } - j.Errorf(call, "using time.Tick leaks the underlying ticker, consider using it only in endless functions, tests and the main package, and use time.NewTicker here") - } - } - } -} - -func (c *Checker) CheckDoubleNegation(j *lint.Job) { - fn := func(node ast.Node) bool { - unary1, ok := node.(*ast.UnaryExpr) - if !ok { - return true - } - unary2, ok := unary1.X.(*ast.UnaryExpr) - if !ok { - return true - } - if unary1.Op != token.NOT || unary2.Op != token.NOT { - return true - } - j.Errorf(unary1, "negating a boolean twice has no effect; is this a typo?") - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func hasSideEffects(node ast.Node) bool { - dynamic := false - ast.Inspect(node, func(node ast.Node) bool { - switch node := node.(type) { - case *ast.CallExpr: - dynamic = true - return false - case *ast.UnaryExpr: - if node.Op == token.ARROW { - dynamic = true - return false - } - } - return true - }) - return dynamic -} - -func (c *Checker) CheckRepeatedIfElse(j *lint.Job) { - seen := map[ast.Node]bool{} - - var collectConds func(ifstmt *ast.IfStmt, inits []ast.Stmt, conds []ast.Expr) ([]ast.Stmt, []ast.Expr) - collectConds = func(ifstmt *ast.IfStmt, inits []ast.Stmt, conds []ast.Expr) ([]ast.Stmt, []ast.Expr) { - seen[ifstmt] = true - if ifstmt.Init != nil { - inits = append(inits, ifstmt.Init) - } - conds = append(conds, ifstmt.Cond) - if elsestmt, ok := ifstmt.Else.(*ast.IfStmt); ok { - return collectConds(elsestmt, inits, conds) - } - return inits, conds - } - fn := func(node ast.Node) bool { - ifstmt, ok := node.(*ast.IfStmt) - if !ok { - return true - } - if seen[ifstmt] { - return true - } - inits, conds := collectConds(ifstmt, nil, nil) - if len(inits) > 0 { - return true - } - for _, cond := range conds { - if hasSideEffects(cond) { - return true - } - } - counts := map[string]int{} - for _, cond := range conds { - s := Render(j, cond) - counts[s]++ - if counts[s] == 2 { - j.Errorf(cond, "this condition occurs multiple times in this if/else if chain") - } - } - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckSillyBitwiseOps(j *lint.Job) { - for _, ssafn := range j.Program.InitialFunctions { - for _, block := range ssafn.Blocks { - for _, ins := range block.Instrs { - ins, ok := ins.(*ssa.BinOp) - if !ok { - continue - } - - if c, ok := ins.Y.(*ssa.Const); !ok || c.Value == nil || c.Value.Kind() != constant.Int || c.Uint64() != 0 { - continue - } - switch ins.Op { - case token.AND, token.OR, token.XOR: - default: - // we do not flag shifts because too often, x<<0 is part - // of a pattern, x<<0, x<<8, x<<16, ... - continue - } - path, _ := astutil.PathEnclosingInterval(j.File(ins), ins.Pos(), ins.Pos()) - if len(path) == 0 { - continue - } - if node, ok := path[0].(*ast.BinaryExpr); !ok || !IsZero(node.Y) { - continue - } - - switch ins.Op { - case token.AND: - j.Errorf(ins, "x & 0 always equals 0") - case token.OR, token.XOR: - j.Errorf(ins, "x %s 0 always equals x", ins.Op) - } - } - } - } -} - -func (c *Checker) CheckNonOctalFileMode(j *lint.Job) { - fn := func(node ast.Node) bool { - call, ok := node.(*ast.CallExpr) - if !ok { - return true - } - sig, ok := TypeOf(j, call.Fun).(*types.Signature) - if !ok { - return true - } - n := sig.Params().Len() - var args []int - for i := 0; i < n; i++ { - typ := sig.Params().At(i).Type() - if IsType(typ, "os.FileMode") { - args = append(args, i) - } - } - for _, i := range args { - lit, ok := call.Args[i].(*ast.BasicLit) - if !ok { - continue - } - if len(lit.Value) == 3 && - lit.Value[0] != '0' && - lit.Value[0] >= '0' && lit.Value[0] <= '7' && - lit.Value[1] >= '0' && lit.Value[1] <= '7' && - lit.Value[2] >= '0' && lit.Value[2] <= '7' { - - v, err := strconv.ParseInt(lit.Value, 10, 64) - if err != nil { - continue - } - j.Errorf(call.Args[i], "file mode '%s' evaluates to %#o; did you mean '0%s'?", lit.Value, v, lit.Value) - } - } - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) CheckPureFunctions(j *lint.Job) { -fnLoop: - for _, ssafn := range j.Program.InitialFunctions { - if IsInTest(j, ssafn) { - params := ssafn.Signature.Params() - for i := 0; i < params.Len(); i++ { - param := params.At(i) - if IsType(param.Type(), "*testing.B") { - // Ignore discarded pure functions in code related - // to benchmarks. Instead of matching BenchmarkFoo - // functions, we match any function accepting a - // *testing.B. Benchmarks sometimes call generic - // functions for doing the actual work, and - // checking for the parameter is a lot easier and - // faster than analyzing call trees. - continue fnLoop - } - } - } - - for _, b := range ssafn.Blocks { - for _, ins := range b.Instrs { - ins, ok := ins.(*ssa.Call) - if !ok { - continue - } - refs := ins.Referrers() - if refs == nil || len(FilterDebug(*refs)) > 0 { - continue - } - callee := ins.Common().StaticCallee() - if callee == nil { - continue - } - if c.funcDescs.Get(callee).Pure && !c.funcDescs.Get(callee).Stub { - j.Errorf(ins, "%s is a pure function but its return value is ignored", callee.Name()) - continue - } - } - } - } -} - -func (c *Checker) isDeprecated(j *lint.Job, ident *ast.Ident) (bool, string) { - obj := ObjectOf(j, ident) - if obj.Pkg() == nil { - return false, "" - } - alt := c.deprecatedObjs[obj] - return alt != "", alt -} - -func (c *Checker) CheckDeprecated(j *lint.Job) { - // Selectors can appear outside of function literals, e.g. when - // declaring package level variables. - - var ssafn *ssa.Function - stack := 0 - fn := func(node ast.Node) bool { - if node == nil { - stack-- - } else { - stack++ - } - if stack == 1 { - ssafn = nil - } - if fn, ok := node.(*ast.FuncDecl); ok { - ssafn = j.Program.SSA.FuncValue(ObjectOf(j, fn.Name).(*types.Func)) - } - sel, ok := node.(*ast.SelectorExpr) - if !ok { - return true - } - - obj := ObjectOf(j, sel.Sel) - if obj.Pkg() == nil { - return true - } - nodePkg := j.NodePackage(node).Pkg - if nodePkg == obj.Pkg() || obj.Pkg().Path()+"_test" == nodePkg.Path() { - // Don't flag stuff in our own package - return true - } - if ok, alt := c.isDeprecated(j, sel.Sel); ok { - // Look for the first available alternative, not the first - // version something was deprecated in. If a function was - // deprecated in Go 1.6, an alternative has been available - // already in 1.0, and we're targeting 1.2, it still - // makes sense to use the alternative from 1.0, to be - // future-proof. - minVersion := deprecated.Stdlib[SelectorName(j, sel)].AlternativeAvailableSince - if !IsGoVersion(j, minVersion) { - return true - } - - if ssafn != nil { - if _, ok := c.deprecatedObjs[ssafn.Object()]; ok { - // functions that are deprecated may use deprecated - // symbols - return true - } - } - j.Errorf(sel, "%s is deprecated: %s", Render(j, sel), alt) - return true - } - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} - -func (c *Checker) callChecker(rules map[string]CallCheck) func(j *lint.Job) { - return func(j *lint.Job) { - c.checkCalls(j, rules) - } -} - -func (c *Checker) checkCalls(j *lint.Job, rules map[string]CallCheck) { - for _, ssafn := range j.Program.InitialFunctions { - node := c.funcDescs.CallGraph.CreateNode(ssafn) - for _, edge := range node.Out { - callee := edge.Callee.Func - obj, ok := callee.Object().(*types.Func) - if !ok { - continue - } - - r, ok := rules[obj.FullName()] - if !ok { - continue - } - var args []*Argument - ssaargs := edge.Site.Common().Args - if callee.Signature.Recv() != nil { - ssaargs = ssaargs[1:] - } - for _, arg := range ssaargs { - if iarg, ok := arg.(*ssa.MakeInterface); ok { - arg = iarg.X - } - vr := c.funcDescs.Get(edge.Site.Parent()).Ranges[arg] - args = append(args, &Argument{Value: Value{arg, vr}}) - } - call := &Call{ - Job: j, - Instr: edge.Site, - Args: args, - Checker: c, - Parent: edge.Site.Parent(), - } - r(call) - for idx, arg := range call.Args { - _ = idx - for _, e := range arg.invalids { - // path, _ := astutil.PathEnclosingInterval(f.File, edge.Site.Pos(), edge.Site.Pos()) - // if len(path) < 2 { - // continue - // } - // astcall, ok := path[0].(*ast.CallExpr) - // if !ok { - // continue - // } - // j.Errorf(astcall.Args[idx], "%s", e) - - j.Errorf(edge.Site, "%s", e) - } - } - for _, e := range call.invalids { - j.Errorf(call.Instr.Common(), "%s", e) - } - } - } -} - -func unwrapFunction(val ssa.Value) *ssa.Function { - switch val := val.(type) { - case *ssa.Function: - return val - case *ssa.MakeClosure: - return val.Fn.(*ssa.Function) - default: - return nil - } -} - -func shortCallName(call *ssa.CallCommon) string { - if call.IsInvoke() { - return "" - } - switch v := call.Value.(type) { - case *ssa.Function: - fn, ok := v.Object().(*types.Func) - if !ok { - return "" - } - return fn.Name() - case *ssa.Builtin: - return v.Name() - } - return "" -} - -func hasCallTo(block *ssa.BasicBlock, name string) bool { - for _, ins := range block.Instrs { - call, ok := ins.(*ssa.Call) - if !ok { - continue - } - if IsCallTo(call.Common(), name) { - return true - } - } - return false -} - -func (c *Checker) CheckWriterBufferModified(j *lint.Job) { - // TODO(dh): this might be a good candidate for taint analysis. - // Taint the argument as MUST_NOT_MODIFY, then propagate that - // through functions like bytes.Split - - for _, ssafn := range j.Program.InitialFunctions { - sig := ssafn.Signature - if ssafn.Name() != "Write" || sig.Recv() == nil || sig.Params().Len() != 1 || sig.Results().Len() != 2 { - continue - } - tArg, ok := sig.Params().At(0).Type().(*types.Slice) - if !ok { - continue - } - if basic, ok := tArg.Elem().(*types.Basic); !ok || basic.Kind() != types.Byte { - continue - } - if basic, ok := sig.Results().At(0).Type().(*types.Basic); !ok || basic.Kind() != types.Int { - continue - } - if named, ok := sig.Results().At(1).Type().(*types.Named); !ok || !IsType(named, "error") { - continue - } - - for _, block := range ssafn.Blocks { - for _, ins := range block.Instrs { - switch ins := ins.(type) { - case *ssa.Store: - addr, ok := ins.Addr.(*ssa.IndexAddr) - if !ok { - continue - } - if addr.X != ssafn.Params[1] { - continue - } - j.Errorf(ins, "io.Writer.Write must not modify the provided buffer, not even temporarily") - case *ssa.Call: - if !IsCallTo(ins.Common(), "append") { - continue - } - if ins.Common().Args[0] != ssafn.Params[1] { - continue - } - j.Errorf(ins, "io.Writer.Write must not modify the provided buffer, not even temporarily") - } - } - } - } -} - -func loopedRegexp(name string) CallCheck { - return func(call *Call) { - if len(extractConsts(call.Args[0].Value.Value)) == 0 { - return - } - if !call.Checker.isInLoop(call.Instr.Block()) { - return - } - call.Invalid(fmt.Sprintf("calling %s in a loop has poor performance, consider using regexp.Compile", name)) - } -} - -func (c *Checker) CheckEmptyBranch(j *lint.Job) { - for _, ssafn := range j.Program.InitialFunctions { - if ssafn.Syntax() == nil { - continue - } - if IsGenerated(j.File(ssafn.Syntax())) { - continue - } - if IsExample(ssafn) { - continue - } - fn := func(node ast.Node) bool { - ifstmt, ok := node.(*ast.IfStmt) - if !ok { - return true - } - if ifstmt.Else != nil { - b, ok := ifstmt.Else.(*ast.BlockStmt) - if !ok || len(b.List) != 0 { - return true - } - j.Errorf(ifstmt.Else, "empty branch") - } - if len(ifstmt.Body.List) != 0 { - return true - } - j.Errorf(ifstmt, "empty branch") - return true - } - Inspect(ssafn.Syntax(), fn) - } -} - -func (c *Checker) CheckMapBytesKey(j *lint.Job) { - for _, fn := range j.Program.InitialFunctions { - for _, b := range fn.Blocks { - insLoop: - for _, ins := range b.Instrs { - // find []byte -> string conversions - conv, ok := ins.(*ssa.Convert) - if !ok || conv.Type() != types.Universe.Lookup("string").Type() { - continue - } - if s, ok := conv.X.Type().(*types.Slice); !ok || s.Elem() != types.Universe.Lookup("byte").Type() { - continue - } - refs := conv.Referrers() - // need at least two (DebugRef) references: the - // conversion and the *ast.Ident - if refs == nil || len(*refs) < 2 { - continue - } - ident := false - // skip first reference, that's the conversion itself - for _, ref := range (*refs)[1:] { - switch ref := ref.(type) { - case *ssa.DebugRef: - if _, ok := ref.Expr.(*ast.Ident); !ok { - // the string seems to be used somewhere - // unexpected; the default branch should - // catch this already, but be safe - continue insLoop - } else { - ident = true - } - case *ssa.Lookup: - default: - // the string is used somewhere else than a - // map lookup - continue insLoop - } - } - - // the result of the conversion wasn't assigned to an - // identifier - if !ident { - continue - } - j.Errorf(conv, "m[string(key)] would be more efficient than k := string(key); m[k]") - } - } - } -} - -func (c *Checker) CheckRangeStringRunes(j *lint.Job) { - sharedcheck.CheckRangeStringRunes(j) -} - -func (c *Checker) CheckSelfAssignment(j *lint.Job) { - fn := func(node ast.Node) bool { - assign, ok := node.(*ast.AssignStmt) - if !ok { - return true - } - if assign.Tok != token.ASSIGN || len(assign.Lhs) != len(assign.Rhs) { - return true - } - for i, stmt := range assign.Lhs { - rlh := Render(j, stmt) - rrh := Render(j, assign.Rhs[i]) - if rlh == rrh { - j.Errorf(assign, "self-assignment of %s to %s", rrh, rlh) - } - } - return true - } - for _, f := range c.filterGenerated(j.Program.Files) { - ast.Inspect(f, fn) - } -} - -func buildTagsIdentical(s1, s2 []string) bool { - if len(s1) != len(s2) { - return false - } - s1s := make([]string, len(s1)) - copy(s1s, s1) - sort.Strings(s1s) - s2s := make([]string, len(s2)) - copy(s2s, s2) - sort.Strings(s2s) - for i, s := range s1s { - if s != s2s[i] { - return false - } - } - return true -} - -func (c *Checker) CheckDuplicateBuildConstraints(job *lint.Job) { - for _, f := range c.filterGenerated(job.Program.Files) { - constraints := buildTags(f) - for i, constraint1 := range constraints { - for j, constraint2 := range constraints { - if i >= j { - continue - } - if buildTagsIdentical(constraint1, constraint2) { - job.Errorf(f, "identical build constraints %q and %q", - strings.Join(constraint1, " "), - strings.Join(constraint2, " ")) - } - } - } - } -} - -func (c *Checker) CheckSillyRegexp(j *lint.Job) { - // We could use the rule checking engine for this, but the - // arguments aren't really invalid. - for _, fn := range j.Program.InitialFunctions { - for _, b := range fn.Blocks { - for _, ins := range b.Instrs { - call, ok := ins.(*ssa.Call) - if !ok { - continue - } - switch CallName(call.Common()) { - case "regexp.MustCompile", "regexp.Compile", "regexp.Match", "regexp.MatchReader", "regexp.MatchString": - default: - continue - } - c, ok := call.Common().Args[0].(*ssa.Const) - if !ok { - continue - } - s := constant.StringVal(c.Value) - re, err := syntax.Parse(s, 0) - if err != nil { - continue - } - if re.Op != syntax.OpLiteral && re.Op != syntax.OpEmptyMatch { - continue - } - j.Errorf(call, "regular expression does not contain any meta characters") - } - } - } -} - -func (c *Checker) CheckMissingEnumTypesInDeclaration(j *lint.Job) { - fn := func(node ast.Node) bool { - decl, ok := node.(*ast.GenDecl) - if !ok { - return true - } - if !decl.Lparen.IsValid() { - // not a parenthesised gendecl - // - // TODO(dh): do we need this check, considering we require - // decl.Specs to contain 2+ elements? - return true - } - if decl.Tok != token.CONST { - return true - } - if len(decl.Specs) < 2 { - return true - } - if decl.Specs[0].(*ast.ValueSpec).Type == nil { - // first constant doesn't have a type - return true - } - for i, spec := range decl.Specs { - spec := spec.(*ast.ValueSpec) - if len(spec.Names) != 1 || len(spec.Values) != 1 { - return true - } - switch v := spec.Values[0].(type) { - case *ast.BasicLit: - case *ast.UnaryExpr: - if _, ok := v.X.(*ast.BasicLit); !ok { - return true - } - default: - // if it's not a literal it might be typed, such as - // time.Microsecond = 1000 * Nanosecond - return true - } - if i == 0 { - continue - } - if spec.Type != nil { - return true - } - } - j.Errorf(decl, "only the first constant has an explicit type") - return true - } - for _, f := range j.Program.Files { - ast.Inspect(f, fn) - } -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/rules.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/rules.go deleted file mode 100644 index d6af573c218e..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/rules.go +++ /dev/null @@ -1,322 +0,0 @@ -package staticcheck - -import ( - "fmt" - "go/constant" - "go/types" - "net" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" - - "honnef.co/go/tools/lint" - . "honnef.co/go/tools/lint/lintdsl" - "honnef.co/go/tools/ssa" - "honnef.co/go/tools/staticcheck/vrp" -) - -const ( - MsgInvalidHostPort = "invalid port or service name in host:port pair" - MsgInvalidUTF8 = "argument is not a valid UTF-8 encoded string" - MsgNonUniqueCutset = "cutset contains duplicate characters" -) - -type Call struct { - Job *lint.Job - Instr ssa.CallInstruction - Args []*Argument - - Checker *Checker - Parent *ssa.Function - - invalids []string -} - -func (c *Call) Invalid(msg string) { - c.invalids = append(c.invalids, msg) -} - -type Argument struct { - Value Value - invalids []string -} - -func (arg *Argument) Invalid(msg string) { - arg.invalids = append(arg.invalids, msg) -} - -type Value struct { - Value ssa.Value - Range vrp.Range -} - -type CallCheck func(call *Call) - -func extractConsts(v ssa.Value) []*ssa.Const { - switch v := v.(type) { - case *ssa.Const: - return []*ssa.Const{v} - case *ssa.MakeInterface: - return extractConsts(v.X) - default: - return nil - } -} - -func ValidateRegexp(v Value) error { - for _, c := range extractConsts(v.Value) { - if c.Value == nil { - continue - } - if c.Value.Kind() != constant.String { - continue - } - s := constant.StringVal(c.Value) - if _, err := regexp.Compile(s); err != nil { - return err - } - } - return nil -} - -func ValidateTimeLayout(v Value) error { - for _, c := range extractConsts(v.Value) { - if c.Value == nil { - continue - } - if c.Value.Kind() != constant.String { - continue - } - s := constant.StringVal(c.Value) - s = strings.Replace(s, "_", " ", -1) - s = strings.Replace(s, "Z", "-", -1) - _, err := time.Parse(s, s) - if err != nil { - return err - } - } - return nil -} - -func ValidateURL(v Value) error { - for _, c := range extractConsts(v.Value) { - if c.Value == nil { - continue - } - if c.Value.Kind() != constant.String { - continue - } - s := constant.StringVal(c.Value) - _, err := url.Parse(s) - if err != nil { - return fmt.Errorf("%q is not a valid URL: %s", s, err) - } - } - return nil -} - -func IntValue(v Value, z vrp.Z) bool { - r, ok := v.Range.(vrp.IntInterval) - if !ok || !r.IsKnown() { - return false - } - if r.Lower != r.Upper { - return false - } - if r.Lower.Cmp(z) == 0 { - return true - } - return false -} - -func InvalidUTF8(v Value) bool { - for _, c := range extractConsts(v.Value) { - if c.Value == nil { - continue - } - if c.Value.Kind() != constant.String { - continue - } - s := constant.StringVal(c.Value) - if !utf8.ValidString(s) { - return true - } - } - return false -} - -func UnbufferedChannel(v Value) bool { - r, ok := v.Range.(vrp.ChannelInterval) - if !ok || !r.IsKnown() { - return false - } - if r.Size.Lower.Cmp(vrp.NewZ(0)) == 0 && - r.Size.Upper.Cmp(vrp.NewZ(0)) == 0 { - return true - } - return false -} - -func Pointer(v Value) bool { - switch v.Value.Type().Underlying().(type) { - case *types.Pointer, *types.Interface: - return true - } - return false -} - -func ConvertedFromInt(v Value) bool { - conv, ok := v.Value.(*ssa.Convert) - if !ok { - return false - } - b, ok := conv.X.Type().Underlying().(*types.Basic) - if !ok { - return false - } - if (b.Info() & types.IsInteger) == 0 { - return false - } - return true -} - -func validEncodingBinaryType(j *lint.Job, typ types.Type) bool { - typ = typ.Underlying() - switch typ := typ.(type) { - case *types.Basic: - switch typ.Kind() { - case types.Uint8, types.Uint16, types.Uint32, types.Uint64, - types.Int8, types.Int16, types.Int32, types.Int64, - types.Float32, types.Float64, types.Complex64, types.Complex128, types.Invalid: - return true - case types.Bool: - return IsGoVersion(j, 8) - } - return false - case *types.Struct: - n := typ.NumFields() - for i := 0; i < n; i++ { - if !validEncodingBinaryType(j, typ.Field(i).Type()) { - return false - } - } - return true - case *types.Array: - return validEncodingBinaryType(j, typ.Elem()) - case *types.Interface: - // we can't determine if it's a valid type or not - return true - } - return false -} - -func CanBinaryMarshal(j *lint.Job, v Value) bool { - typ := v.Value.Type().Underlying() - if ttyp, ok := typ.(*types.Pointer); ok { - typ = ttyp.Elem().Underlying() - } - if ttyp, ok := typ.(interface { - Elem() types.Type - }); ok { - if _, ok := ttyp.(*types.Pointer); !ok { - typ = ttyp.Elem() - } - } - - return validEncodingBinaryType(j, typ) -} - -func RepeatZeroTimes(name string, arg int) CallCheck { - return func(call *Call) { - arg := call.Args[arg] - if IntValue(arg.Value, vrp.NewZ(0)) { - arg.Invalid(fmt.Sprintf("calling %s with n == 0 will return no results, did you mean -1?", name)) - } - } -} - -func validateServiceName(s string) bool { - if len(s) < 1 || len(s) > 15 { - return false - } - if s[0] == '-' || s[len(s)-1] == '-' { - return false - } - if strings.Contains(s, "--") { - return false - } - hasLetter := false - for _, r := range s { - if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') { - hasLetter = true - continue - } - if r >= '0' && r <= '9' { - continue - } - return false - } - return hasLetter -} - -func validatePort(s string) bool { - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return validateServiceName(s) - } - return n >= 0 && n <= 65535 -} - -func ValidHostPort(v Value) bool { - for _, k := range extractConsts(v.Value) { - if k.Value == nil { - continue - } - if k.Value.Kind() != constant.String { - continue - } - s := constant.StringVal(k.Value) - _, port, err := net.SplitHostPort(s) - if err != nil { - return false - } - // TODO(dh): check hostname - if !validatePort(port) { - return false - } - } - return true -} - -// ConvertedFrom reports whether value v was converted from type typ. -func ConvertedFrom(v Value, typ string) bool { - change, ok := v.Value.(*ssa.ChangeType) - return ok && IsType(change.X.Type(), typ) -} - -func UniqueStringCutset(v Value) bool { - for _, c := range extractConsts(v.Value) { - if c.Value == nil { - continue - } - if c.Value.Kind() != constant.String { - continue - } - s := constant.StringVal(c.Value) - rs := runeSlice(s) - if len(rs) < 2 { - continue - } - sort.Sort(rs) - for i, r := range rs[1:] { - if rs[i] == r { - return false - } - } - } - return true -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/vrp/channel.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/vrp/channel.go deleted file mode 100644 index 0ef73787ba39..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/vrp/channel.go +++ /dev/null @@ -1,73 +0,0 @@ -package vrp - -import ( - "fmt" - - "honnef.co/go/tools/ssa" -) - -type ChannelInterval struct { - Size IntInterval -} - -func (c ChannelInterval) Union(other Range) Range { - i, ok := other.(ChannelInterval) - if !ok { - i = ChannelInterval{EmptyIntInterval} - } - if c.Size.Empty() || !c.Size.IsKnown() { - return i - } - if i.Size.Empty() || !i.Size.IsKnown() { - return c - } - return ChannelInterval{ - Size: c.Size.Union(i.Size).(IntInterval), - } -} - -func (c ChannelInterval) String() string { - return c.Size.String() -} - -func (c ChannelInterval) IsKnown() bool { - return c.Size.IsKnown() -} - -type MakeChannelConstraint struct { - aConstraint - Buffer ssa.Value -} -type ChannelChangeTypeConstraint struct { - aConstraint - X ssa.Value -} - -func NewMakeChannelConstraint(buffer, y ssa.Value) Constraint { - return &MakeChannelConstraint{NewConstraint(y), buffer} -} -func NewChannelChangeTypeConstraint(x, y ssa.Value) Constraint { - return &ChannelChangeTypeConstraint{NewConstraint(y), x} -} - -func (c *MakeChannelConstraint) Operands() []ssa.Value { return []ssa.Value{c.Buffer} } -func (c *ChannelChangeTypeConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} } - -func (c *MakeChannelConstraint) String() string { - return fmt.Sprintf("%s = make(chan, %s)", c.Y().Name(), c.Buffer.Name()) -} -func (c *ChannelChangeTypeConstraint) String() string { - return fmt.Sprintf("%s = changetype(%s)", c.Y().Name(), c.X.Name()) -} - -func (c *MakeChannelConstraint) Eval(g *Graph) Range { - i, ok := g.Range(c.Buffer).(IntInterval) - if !ok { - return ChannelInterval{NewIntInterval(NewZ(0), PInfinity)} - } - if i.Lower.Sign() == -1 { - i.Lower = NewZ(0) - } - return ChannelInterval{i} -} -func (c *ChannelChangeTypeConstraint) Eval(g *Graph) Range { return g.Range(c.X) } diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/vrp/int.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/vrp/int.go deleted file mode 100644 index 926bb7af3d62..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/vrp/int.go +++ /dev/null @@ -1,476 +0,0 @@ -package vrp - -import ( - "fmt" - "go/token" - "go/types" - "math/big" - - "honnef.co/go/tools/ssa" -) - -type Zs []Z - -func (zs Zs) Len() int { - return len(zs) -} - -func (zs Zs) Less(i int, j int) bool { - return zs[i].Cmp(zs[j]) == -1 -} - -func (zs Zs) Swap(i int, j int) { - zs[i], zs[j] = zs[j], zs[i] -} - -type Z struct { - infinity int8 - integer *big.Int -} - -func NewZ(n int64) Z { - return NewBigZ(big.NewInt(n)) -} - -func NewBigZ(n *big.Int) Z { - return Z{integer: n} -} - -func (z1 Z) Infinite() bool { - return z1.infinity != 0 -} - -func (z1 Z) Add(z2 Z) Z { - if z2.Sign() == -1 { - return z1.Sub(z2.Negate()) - } - if z1 == NInfinity { - return NInfinity - } - if z1 == PInfinity { - return PInfinity - } - if z2 == PInfinity { - return PInfinity - } - - if !z1.Infinite() && !z2.Infinite() { - n := &big.Int{} - n.Add(z1.integer, z2.integer) - return NewBigZ(n) - } - - panic(fmt.Sprintf("%s + %s is not defined", z1, z2)) -} - -func (z1 Z) Sub(z2 Z) Z { - if z2.Sign() == -1 { - return z1.Add(z2.Negate()) - } - if !z1.Infinite() && !z2.Infinite() { - n := &big.Int{} - n.Sub(z1.integer, z2.integer) - return NewBigZ(n) - } - - if z1 != PInfinity && z2 == PInfinity { - return NInfinity - } - if z1.Infinite() && !z2.Infinite() { - return Z{infinity: z1.infinity} - } - if z1 == PInfinity && z2 == PInfinity { - return PInfinity - } - panic(fmt.Sprintf("%s - %s is not defined", z1, z2)) -} - -func (z1 Z) Mul(z2 Z) Z { - if (z1.integer != nil && z1.integer.Sign() == 0) || - (z2.integer != nil && z2.integer.Sign() == 0) { - return NewBigZ(&big.Int{}) - } - - if z1.infinity != 0 || z2.infinity != 0 { - return Z{infinity: int8(z1.Sign() * z2.Sign())} - } - - n := &big.Int{} - n.Mul(z1.integer, z2.integer) - return NewBigZ(n) -} - -func (z1 Z) Negate() Z { - if z1.infinity == 1 { - return NInfinity - } - if z1.infinity == -1 { - return PInfinity - } - n := &big.Int{} - n.Neg(z1.integer) - return NewBigZ(n) -} - -func (z1 Z) Sign() int { - if z1.infinity != 0 { - return int(z1.infinity) - } - return z1.integer.Sign() -} - -func (z1 Z) String() string { - if z1 == NInfinity { - return "-∞" - } - if z1 == PInfinity { - return "∞" - } - return fmt.Sprintf("%d", z1.integer) -} - -func (z1 Z) Cmp(z2 Z) int { - if z1.infinity == z2.infinity && z1.infinity != 0 { - return 0 - } - if z1 == PInfinity { - return 1 - } - if z1 == NInfinity { - return -1 - } - if z2 == NInfinity { - return 1 - } - if z2 == PInfinity { - return -1 - } - return z1.integer.Cmp(z2.integer) -} - -func MaxZ(zs ...Z) Z { - if len(zs) == 0 { - panic("Max called with no arguments") - } - if len(zs) == 1 { - return zs[0] - } - ret := zs[0] - for _, z := range zs[1:] { - if z.Cmp(ret) == 1 { - ret = z - } - } - return ret -} - -func MinZ(zs ...Z) Z { - if len(zs) == 0 { - panic("Min called with no arguments") - } - if len(zs) == 1 { - return zs[0] - } - ret := zs[0] - for _, z := range zs[1:] { - if z.Cmp(ret) == -1 { - ret = z - } - } - return ret -} - -var NInfinity = Z{infinity: -1} -var PInfinity = Z{infinity: 1} -var EmptyIntInterval = IntInterval{true, PInfinity, NInfinity} - -func InfinityFor(v ssa.Value) IntInterval { - if b, ok := v.Type().Underlying().(*types.Basic); ok { - if (b.Info() & types.IsUnsigned) != 0 { - return NewIntInterval(NewZ(0), PInfinity) - } - } - return NewIntInterval(NInfinity, PInfinity) -} - -type IntInterval struct { - known bool - Lower Z - Upper Z -} - -func NewIntInterval(l, u Z) IntInterval { - if u.Cmp(l) == -1 { - return EmptyIntInterval - } - return IntInterval{known: true, Lower: l, Upper: u} -} - -func (i IntInterval) IsKnown() bool { - return i.known -} - -func (i IntInterval) Empty() bool { - return i.Lower == PInfinity && i.Upper == NInfinity -} - -func (i IntInterval) IsMaxRange() bool { - return i.Lower == NInfinity && i.Upper == PInfinity -} - -func (i1 IntInterval) Intersection(i2 IntInterval) IntInterval { - if !i1.IsKnown() { - return i2 - } - if !i2.IsKnown() { - return i1 - } - if i1.Empty() || i2.Empty() { - return EmptyIntInterval - } - i3 := NewIntInterval(MaxZ(i1.Lower, i2.Lower), MinZ(i1.Upper, i2.Upper)) - if i3.Lower.Cmp(i3.Upper) == 1 { - return EmptyIntInterval - } - return i3 -} - -func (i1 IntInterval) Union(other Range) Range { - i2, ok := other.(IntInterval) - if !ok { - i2 = EmptyIntInterval - } - if i1.Empty() || !i1.IsKnown() { - return i2 - } - if i2.Empty() || !i2.IsKnown() { - return i1 - } - return NewIntInterval(MinZ(i1.Lower, i2.Lower), MaxZ(i1.Upper, i2.Upper)) -} - -func (i1 IntInterval) Add(i2 IntInterval) IntInterval { - if i1.Empty() || i2.Empty() { - return EmptyIntInterval - } - l1, u1, l2, u2 := i1.Lower, i1.Upper, i2.Lower, i2.Upper - return NewIntInterval(l1.Add(l2), u1.Add(u2)) -} - -func (i1 IntInterval) Sub(i2 IntInterval) IntInterval { - if i1.Empty() || i2.Empty() { - return EmptyIntInterval - } - l1, u1, l2, u2 := i1.Lower, i1.Upper, i2.Lower, i2.Upper - return NewIntInterval(l1.Sub(u2), u1.Sub(l2)) -} - -func (i1 IntInterval) Mul(i2 IntInterval) IntInterval { - if i1.Empty() || i2.Empty() { - return EmptyIntInterval - } - x1, x2 := i1.Lower, i1.Upper - y1, y2 := i2.Lower, i2.Upper - return NewIntInterval( - MinZ(x1.Mul(y1), x1.Mul(y2), x2.Mul(y1), x2.Mul(y2)), - MaxZ(x1.Mul(y1), x1.Mul(y2), x2.Mul(y1), x2.Mul(y2)), - ) -} - -func (i1 IntInterval) String() string { - if !i1.IsKnown() { - return "[⊥, ⊥]" - } - if i1.Empty() { - return "{}" - } - return fmt.Sprintf("[%s, %s]", i1.Lower, i1.Upper) -} - -type IntArithmeticConstraint struct { - aConstraint - A ssa.Value - B ssa.Value - Op token.Token - Fn func(IntInterval, IntInterval) IntInterval -} - -type IntAddConstraint struct{ *IntArithmeticConstraint } -type IntSubConstraint struct{ *IntArithmeticConstraint } -type IntMulConstraint struct{ *IntArithmeticConstraint } - -type IntConversionConstraint struct { - aConstraint - X ssa.Value -} - -type IntIntersectionConstraint struct { - aConstraint - ranges Ranges - A ssa.Value - B ssa.Value - Op token.Token - I IntInterval - resolved bool -} - -type IntIntervalConstraint struct { - aConstraint - I IntInterval -} - -func NewIntArithmeticConstraint(a, b, y ssa.Value, op token.Token, fn func(IntInterval, IntInterval) IntInterval) *IntArithmeticConstraint { - return &IntArithmeticConstraint{NewConstraint(y), a, b, op, fn} -} -func NewIntAddConstraint(a, b, y ssa.Value) Constraint { - return &IntAddConstraint{NewIntArithmeticConstraint(a, b, y, token.ADD, IntInterval.Add)} -} -func NewIntSubConstraint(a, b, y ssa.Value) Constraint { - return &IntSubConstraint{NewIntArithmeticConstraint(a, b, y, token.SUB, IntInterval.Sub)} -} -func NewIntMulConstraint(a, b, y ssa.Value) Constraint { - return &IntMulConstraint{NewIntArithmeticConstraint(a, b, y, token.MUL, IntInterval.Mul)} -} -func NewIntConversionConstraint(x, y ssa.Value) Constraint { - return &IntConversionConstraint{NewConstraint(y), x} -} -func NewIntIntersectionConstraint(a, b ssa.Value, op token.Token, ranges Ranges, y ssa.Value) Constraint { - return &IntIntersectionConstraint{ - aConstraint: NewConstraint(y), - ranges: ranges, - A: a, - B: b, - Op: op, - } -} -func NewIntIntervalConstraint(i IntInterval, y ssa.Value) Constraint { - return &IntIntervalConstraint{NewConstraint(y), i} -} - -func (c *IntArithmeticConstraint) Operands() []ssa.Value { return []ssa.Value{c.A, c.B} } -func (c *IntConversionConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} } -func (c *IntIntersectionConstraint) Operands() []ssa.Value { return []ssa.Value{c.A} } -func (s *IntIntervalConstraint) Operands() []ssa.Value { return nil } - -func (c *IntArithmeticConstraint) String() string { - return fmt.Sprintf("%s = %s %s %s", c.Y().Name(), c.A.Name(), c.Op, c.B.Name()) -} -func (c *IntConversionConstraint) String() string { - return fmt.Sprintf("%s = %s(%s)", c.Y().Name(), c.Y().Type(), c.X.Name()) -} -func (c *IntIntersectionConstraint) String() string { - return fmt.Sprintf("%s = %s %s %s (%t branch)", c.Y().Name(), c.A.Name(), c.Op, c.B.Name(), c.Y().(*ssa.Sigma).Branch) -} -func (c *IntIntervalConstraint) String() string { return fmt.Sprintf("%s = %s", c.Y().Name(), c.I) } - -func (c *IntArithmeticConstraint) Eval(g *Graph) Range { - i1, i2 := g.Range(c.A).(IntInterval), g.Range(c.B).(IntInterval) - if !i1.IsKnown() || !i2.IsKnown() { - return IntInterval{} - } - return c.Fn(i1, i2) -} -func (c *IntConversionConstraint) Eval(g *Graph) Range { - s := &types.StdSizes{ - // XXX is it okay to assume the largest word size, or do we - // need to be platform specific? - WordSize: 8, - MaxAlign: 1, - } - fromI := g.Range(c.X).(IntInterval) - toI := g.Range(c.Y()).(IntInterval) - fromT := c.X.Type().Underlying().(*types.Basic) - toT := c.Y().Type().Underlying().(*types.Basic) - fromB := s.Sizeof(c.X.Type()) - toB := s.Sizeof(c.Y().Type()) - - if !fromI.IsKnown() { - return toI - } - if !toI.IsKnown() { - return fromI - } - - // uint -> sint/uint, M > N: [max(0, l1), min(2**N-1, u2)] - if (fromT.Info()&types.IsUnsigned != 0) && - toB > fromB { - - n := big.NewInt(1) - n.Lsh(n, uint(fromB*8)) - n.Sub(n, big.NewInt(1)) - return NewIntInterval( - MaxZ(NewZ(0), fromI.Lower), - MinZ(NewBigZ(n), toI.Upper), - ) - } - - // sint -> sint, M > N; [max(-∞, l1), min(2**N-1, u2)] - if (fromT.Info()&types.IsUnsigned == 0) && - (toT.Info()&types.IsUnsigned == 0) && - toB > fromB { - - n := big.NewInt(1) - n.Lsh(n, uint(fromB*8)) - n.Sub(n, big.NewInt(1)) - return NewIntInterval( - MaxZ(NInfinity, fromI.Lower), - MinZ(NewBigZ(n), toI.Upper), - ) - } - - return fromI -} -func (c *IntIntersectionConstraint) Eval(g *Graph) Range { - xi := g.Range(c.A).(IntInterval) - if !xi.IsKnown() { - return c.I - } - return xi.Intersection(c.I) -} -func (c *IntIntervalConstraint) Eval(*Graph) Range { return c.I } - -func (c *IntIntersectionConstraint) Futures() []ssa.Value { - return []ssa.Value{c.B} -} - -func (c *IntIntersectionConstraint) Resolve() { - r, ok := c.ranges[c.B].(IntInterval) - if !ok { - c.I = InfinityFor(c.Y()) - return - } - - switch c.Op { - case token.EQL: - c.I = r - case token.GTR: - c.I = NewIntInterval(r.Lower.Add(NewZ(1)), PInfinity) - case token.GEQ: - c.I = NewIntInterval(r.Lower, PInfinity) - case token.LSS: - // TODO(dh): do we need 0 instead of NInfinity for uints? - c.I = NewIntInterval(NInfinity, r.Upper.Sub(NewZ(1))) - case token.LEQ: - c.I = NewIntInterval(NInfinity, r.Upper) - case token.NEQ: - c.I = InfinityFor(c.Y()) - default: - panic("unsupported op " + c.Op.String()) - } -} - -func (c *IntIntersectionConstraint) IsKnown() bool { - return c.I.IsKnown() -} - -func (c *IntIntersectionConstraint) MarkUnresolved() { - c.resolved = false -} - -func (c *IntIntersectionConstraint) MarkResolved() { - c.resolved = true -} - -func (c *IntIntersectionConstraint) IsResolved() bool { - return c.resolved -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/vrp/slice.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/vrp/slice.go deleted file mode 100644 index 40658dd8d860..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/vrp/slice.go +++ /dev/null @@ -1,273 +0,0 @@ -package vrp - -// TODO(dh): most of the constraints have implementations identical to -// that of strings. Consider reusing them. - -import ( - "fmt" - "go/types" - - "honnef.co/go/tools/ssa" -) - -type SliceInterval struct { - Length IntInterval -} - -func (s SliceInterval) Union(other Range) Range { - i, ok := other.(SliceInterval) - if !ok { - i = SliceInterval{EmptyIntInterval} - } - if s.Length.Empty() || !s.Length.IsKnown() { - return i - } - if i.Length.Empty() || !i.Length.IsKnown() { - return s - } - return SliceInterval{ - Length: s.Length.Union(i.Length).(IntInterval), - } -} -func (s SliceInterval) String() string { return s.Length.String() } -func (s SliceInterval) IsKnown() bool { return s.Length.IsKnown() } - -type SliceAppendConstraint struct { - aConstraint - A ssa.Value - B ssa.Value -} - -type SliceSliceConstraint struct { - aConstraint - X ssa.Value - Lower ssa.Value - Upper ssa.Value -} - -type ArraySliceConstraint struct { - aConstraint - X ssa.Value - Lower ssa.Value - Upper ssa.Value -} - -type SliceIntersectionConstraint struct { - aConstraint - X ssa.Value - I IntInterval -} - -type SliceLengthConstraint struct { - aConstraint - X ssa.Value -} - -type MakeSliceConstraint struct { - aConstraint - Size ssa.Value -} - -type SliceIntervalConstraint struct { - aConstraint - I IntInterval -} - -func NewSliceAppendConstraint(a, b, y ssa.Value) Constraint { - return &SliceAppendConstraint{NewConstraint(y), a, b} -} -func NewSliceSliceConstraint(x, lower, upper, y ssa.Value) Constraint { - return &SliceSliceConstraint{NewConstraint(y), x, lower, upper} -} -func NewArraySliceConstraint(x, lower, upper, y ssa.Value) Constraint { - return &ArraySliceConstraint{NewConstraint(y), x, lower, upper} -} -func NewSliceIntersectionConstraint(x ssa.Value, i IntInterval, y ssa.Value) Constraint { - return &SliceIntersectionConstraint{NewConstraint(y), x, i} -} -func NewSliceLengthConstraint(x, y ssa.Value) Constraint { - return &SliceLengthConstraint{NewConstraint(y), x} -} -func NewMakeSliceConstraint(size, y ssa.Value) Constraint { - return &MakeSliceConstraint{NewConstraint(y), size} -} -func NewSliceIntervalConstraint(i IntInterval, y ssa.Value) Constraint { - return &SliceIntervalConstraint{NewConstraint(y), i} -} - -func (c *SliceAppendConstraint) Operands() []ssa.Value { return []ssa.Value{c.A, c.B} } -func (c *SliceSliceConstraint) Operands() []ssa.Value { - ops := []ssa.Value{c.X} - if c.Lower != nil { - ops = append(ops, c.Lower) - } - if c.Upper != nil { - ops = append(ops, c.Upper) - } - return ops -} -func (c *ArraySliceConstraint) Operands() []ssa.Value { - ops := []ssa.Value{c.X} - if c.Lower != nil { - ops = append(ops, c.Lower) - } - if c.Upper != nil { - ops = append(ops, c.Upper) - } - return ops -} -func (c *SliceIntersectionConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} } -func (c *SliceLengthConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} } -func (c *MakeSliceConstraint) Operands() []ssa.Value { return []ssa.Value{c.Size} } -func (s *SliceIntervalConstraint) Operands() []ssa.Value { return nil } - -func (c *SliceAppendConstraint) String() string { - return fmt.Sprintf("%s = append(%s, %s)", c.Y().Name(), c.A.Name(), c.B.Name()) -} -func (c *SliceSliceConstraint) String() string { - var lname, uname string - if c.Lower != nil { - lname = c.Lower.Name() - } - if c.Upper != nil { - uname = c.Upper.Name() - } - return fmt.Sprintf("%s[%s:%s]", c.X.Name(), lname, uname) -} -func (c *ArraySliceConstraint) String() string { - var lname, uname string - if c.Lower != nil { - lname = c.Lower.Name() - } - if c.Upper != nil { - uname = c.Upper.Name() - } - return fmt.Sprintf("%s[%s:%s]", c.X.Name(), lname, uname) -} -func (c *SliceIntersectionConstraint) String() string { - return fmt.Sprintf("%s = %s.%t ⊓ %s", c.Y().Name(), c.X.Name(), c.Y().(*ssa.Sigma).Branch, c.I) -} -func (c *SliceLengthConstraint) String() string { - return fmt.Sprintf("%s = len(%s)", c.Y().Name(), c.X.Name()) -} -func (c *MakeSliceConstraint) String() string { - return fmt.Sprintf("%s = make(slice, %s)", c.Y().Name(), c.Size.Name()) -} -func (c *SliceIntervalConstraint) String() string { return fmt.Sprintf("%s = %s", c.Y().Name(), c.I) } - -func (c *SliceAppendConstraint) Eval(g *Graph) Range { - l1 := g.Range(c.A).(SliceInterval).Length - var l2 IntInterval - switch r := g.Range(c.B).(type) { - case SliceInterval: - l2 = r.Length - case StringInterval: - l2 = r.Length - default: - return SliceInterval{} - } - if !l1.IsKnown() || !l2.IsKnown() { - return SliceInterval{} - } - return SliceInterval{ - Length: l1.Add(l2), - } -} -func (c *SliceSliceConstraint) Eval(g *Graph) Range { - lr := NewIntInterval(NewZ(0), NewZ(0)) - if c.Lower != nil { - lr = g.Range(c.Lower).(IntInterval) - } - ur := g.Range(c.X).(SliceInterval).Length - if c.Upper != nil { - ur = g.Range(c.Upper).(IntInterval) - } - if !lr.IsKnown() || !ur.IsKnown() { - return SliceInterval{} - } - - ls := []Z{ - ur.Lower.Sub(lr.Lower), - ur.Upper.Sub(lr.Lower), - ur.Lower.Sub(lr.Upper), - ur.Upper.Sub(lr.Upper), - } - // TODO(dh): if we don't truncate lengths to 0 we might be able to - // easily detect slices with high < low. we'd need to treat -∞ - // specially, though. - for i, l := range ls { - if l.Sign() == -1 { - ls[i] = NewZ(0) - } - } - - return SliceInterval{ - Length: NewIntInterval(MinZ(ls...), MaxZ(ls...)), - } -} -func (c *ArraySliceConstraint) Eval(g *Graph) Range { - lr := NewIntInterval(NewZ(0), NewZ(0)) - if c.Lower != nil { - lr = g.Range(c.Lower).(IntInterval) - } - var l int64 - switch typ := c.X.Type().(type) { - case *types.Array: - l = typ.Len() - case *types.Pointer: - l = typ.Elem().(*types.Array).Len() - } - ur := NewIntInterval(NewZ(l), NewZ(l)) - if c.Upper != nil { - ur = g.Range(c.Upper).(IntInterval) - } - if !lr.IsKnown() || !ur.IsKnown() { - return SliceInterval{} - } - - ls := []Z{ - ur.Lower.Sub(lr.Lower), - ur.Upper.Sub(lr.Lower), - ur.Lower.Sub(lr.Upper), - ur.Upper.Sub(lr.Upper), - } - // TODO(dh): if we don't truncate lengths to 0 we might be able to - // easily detect slices with high < low. we'd need to treat -∞ - // specially, though. - for i, l := range ls { - if l.Sign() == -1 { - ls[i] = NewZ(0) - } - } - - return SliceInterval{ - Length: NewIntInterval(MinZ(ls...), MaxZ(ls...)), - } -} -func (c *SliceIntersectionConstraint) Eval(g *Graph) Range { - xi := g.Range(c.X).(SliceInterval) - if !xi.IsKnown() { - return c.I - } - return SliceInterval{ - Length: xi.Length.Intersection(c.I), - } -} -func (c *SliceLengthConstraint) Eval(g *Graph) Range { - i := g.Range(c.X).(SliceInterval).Length - if !i.IsKnown() { - return NewIntInterval(NewZ(0), PInfinity) - } - return i -} -func (c *MakeSliceConstraint) Eval(g *Graph) Range { - i, ok := g.Range(c.Size).(IntInterval) - if !ok { - return SliceInterval{NewIntInterval(NewZ(0), PInfinity)} - } - if i.Lower.Sign() == -1 { - i.Lower = NewZ(0) - } - return SliceInterval{i} -} -func (c *SliceIntervalConstraint) Eval(*Graph) Range { return SliceInterval{c.I} } diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/vrp/string.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/vrp/string.go deleted file mode 100644 index e05877f9f782..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/vrp/string.go +++ /dev/null @@ -1,258 +0,0 @@ -package vrp - -import ( - "fmt" - "go/token" - "go/types" - - "honnef.co/go/tools/ssa" -) - -type StringInterval struct { - Length IntInterval -} - -func (s StringInterval) Union(other Range) Range { - i, ok := other.(StringInterval) - if !ok { - i = StringInterval{EmptyIntInterval} - } - if s.Length.Empty() || !s.Length.IsKnown() { - return i - } - if i.Length.Empty() || !i.Length.IsKnown() { - return s - } - return StringInterval{ - Length: s.Length.Union(i.Length).(IntInterval), - } -} - -func (s StringInterval) String() string { - return s.Length.String() -} - -func (s StringInterval) IsKnown() bool { - return s.Length.IsKnown() -} - -type StringSliceConstraint struct { - aConstraint - X ssa.Value - Lower ssa.Value - Upper ssa.Value -} - -type StringIntersectionConstraint struct { - aConstraint - ranges Ranges - A ssa.Value - B ssa.Value - Op token.Token - I IntInterval - resolved bool -} - -type StringConcatConstraint struct { - aConstraint - A ssa.Value - B ssa.Value -} - -type StringLengthConstraint struct { - aConstraint - X ssa.Value -} - -type StringIntervalConstraint struct { - aConstraint - I IntInterval -} - -func NewStringSliceConstraint(x, lower, upper, y ssa.Value) Constraint { - return &StringSliceConstraint{NewConstraint(y), x, lower, upper} -} -func NewStringIntersectionConstraint(a, b ssa.Value, op token.Token, ranges Ranges, y ssa.Value) Constraint { - return &StringIntersectionConstraint{ - aConstraint: NewConstraint(y), - ranges: ranges, - A: a, - B: b, - Op: op, - } -} -func NewStringConcatConstraint(a, b, y ssa.Value) Constraint { - return &StringConcatConstraint{NewConstraint(y), a, b} -} -func NewStringLengthConstraint(x ssa.Value, y ssa.Value) Constraint { - return &StringLengthConstraint{NewConstraint(y), x} -} -func NewStringIntervalConstraint(i IntInterval, y ssa.Value) Constraint { - return &StringIntervalConstraint{NewConstraint(y), i} -} - -func (c *StringSliceConstraint) Operands() []ssa.Value { - vs := []ssa.Value{c.X} - if c.Lower != nil { - vs = append(vs, c.Lower) - } - if c.Upper != nil { - vs = append(vs, c.Upper) - } - return vs -} -func (c *StringIntersectionConstraint) Operands() []ssa.Value { return []ssa.Value{c.A} } -func (c StringConcatConstraint) Operands() []ssa.Value { return []ssa.Value{c.A, c.B} } -func (c *StringLengthConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} } -func (s *StringIntervalConstraint) Operands() []ssa.Value { return nil } - -func (c *StringSliceConstraint) String() string { - var lname, uname string - if c.Lower != nil { - lname = c.Lower.Name() - } - if c.Upper != nil { - uname = c.Upper.Name() - } - return fmt.Sprintf("%s[%s:%s]", c.X.Name(), lname, uname) -} -func (c *StringIntersectionConstraint) String() string { - return fmt.Sprintf("%s = %s %s %s (%t branch)", c.Y().Name(), c.A.Name(), c.Op, c.B.Name(), c.Y().(*ssa.Sigma).Branch) -} -func (c StringConcatConstraint) String() string { - return fmt.Sprintf("%s = %s + %s", c.Y().Name(), c.A.Name(), c.B.Name()) -} -func (c *StringLengthConstraint) String() string { - return fmt.Sprintf("%s = len(%s)", c.Y().Name(), c.X.Name()) -} -func (c *StringIntervalConstraint) String() string { return fmt.Sprintf("%s = %s", c.Y().Name(), c.I) } - -func (c *StringSliceConstraint) Eval(g *Graph) Range { - lr := NewIntInterval(NewZ(0), NewZ(0)) - if c.Lower != nil { - lr = g.Range(c.Lower).(IntInterval) - } - ur := g.Range(c.X).(StringInterval).Length - if c.Upper != nil { - ur = g.Range(c.Upper).(IntInterval) - } - if !lr.IsKnown() || !ur.IsKnown() { - return StringInterval{} - } - - ls := []Z{ - ur.Lower.Sub(lr.Lower), - ur.Upper.Sub(lr.Lower), - ur.Lower.Sub(lr.Upper), - ur.Upper.Sub(lr.Upper), - } - // TODO(dh): if we don't truncate lengths to 0 we might be able to - // easily detect slices with high < low. we'd need to treat -∞ - // specially, though. - for i, l := range ls { - if l.Sign() == -1 { - ls[i] = NewZ(0) - } - } - - return StringInterval{ - Length: NewIntInterval(MinZ(ls...), MaxZ(ls...)), - } -} -func (c *StringIntersectionConstraint) Eval(g *Graph) Range { - var l IntInterval - switch r := g.Range(c.A).(type) { - case StringInterval: - l = r.Length - case IntInterval: - l = r - } - - if !l.IsKnown() { - return StringInterval{c.I} - } - return StringInterval{ - Length: l.Intersection(c.I), - } -} -func (c StringConcatConstraint) Eval(g *Graph) Range { - i1, i2 := g.Range(c.A).(StringInterval), g.Range(c.B).(StringInterval) - if !i1.Length.IsKnown() || !i2.Length.IsKnown() { - return StringInterval{} - } - return StringInterval{ - Length: i1.Length.Add(i2.Length), - } -} -func (c *StringLengthConstraint) Eval(g *Graph) Range { - i := g.Range(c.X).(StringInterval).Length - if !i.IsKnown() { - return NewIntInterval(NewZ(0), PInfinity) - } - return i -} -func (c *StringIntervalConstraint) Eval(*Graph) Range { return StringInterval{c.I} } - -func (c *StringIntersectionConstraint) Futures() []ssa.Value { - return []ssa.Value{c.B} -} - -func (c *StringIntersectionConstraint) Resolve() { - if (c.A.Type().Underlying().(*types.Basic).Info() & types.IsString) != 0 { - // comparing two strings - r, ok := c.ranges[c.B].(StringInterval) - if !ok { - c.I = NewIntInterval(NewZ(0), PInfinity) - return - } - switch c.Op { - case token.EQL: - c.I = r.Length - case token.GTR, token.GEQ: - c.I = NewIntInterval(r.Length.Lower, PInfinity) - case token.LSS, token.LEQ: - c.I = NewIntInterval(NewZ(0), r.Length.Upper) - case token.NEQ: - default: - panic("unsupported op " + c.Op.String()) - } - } else { - r, ok := c.ranges[c.B].(IntInterval) - if !ok { - c.I = NewIntInterval(NewZ(0), PInfinity) - return - } - // comparing two lengths - switch c.Op { - case token.EQL: - c.I = r - case token.GTR: - c.I = NewIntInterval(r.Lower.Add(NewZ(1)), PInfinity) - case token.GEQ: - c.I = NewIntInterval(r.Lower, PInfinity) - case token.LSS: - c.I = NewIntInterval(NInfinity, r.Upper.Sub(NewZ(1))) - case token.LEQ: - c.I = NewIntInterval(NInfinity, r.Upper) - case token.NEQ: - default: - panic("unsupported op " + c.Op.String()) - } - } -} - -func (c *StringIntersectionConstraint) IsKnown() bool { - return c.I.IsKnown() -} - -func (c *StringIntersectionConstraint) MarkUnresolved() { - c.resolved = false -} - -func (c *StringIntersectionConstraint) MarkResolved() { - c.resolved = true -} - -func (c *StringIntersectionConstraint) IsResolved() bool { - return c.resolved -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/vrp/vrp.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/vrp/vrp.go deleted file mode 100644 index cb17f042ac3c..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/staticcheck/vrp/vrp.go +++ /dev/null @@ -1,1049 +0,0 @@ -package vrp - -// TODO(dh) widening and narrowing have a lot of code in common. Make -// it reusable. - -import ( - "fmt" - "go/constant" - "go/token" - "go/types" - "math/big" - "sort" - "strings" - - "honnef.co/go/tools/ssa" -) - -type Future interface { - Constraint - Futures() []ssa.Value - Resolve() - IsKnown() bool - MarkUnresolved() - MarkResolved() - IsResolved() bool -} - -type Range interface { - Union(other Range) Range - IsKnown() bool -} - -type Constraint interface { - Y() ssa.Value - isConstraint() - String() string - Eval(*Graph) Range - Operands() []ssa.Value -} - -type aConstraint struct { - y ssa.Value -} - -func NewConstraint(y ssa.Value) aConstraint { - return aConstraint{y} -} - -func (aConstraint) isConstraint() {} -func (c aConstraint) Y() ssa.Value { return c.y } - -type PhiConstraint struct { - aConstraint - Vars []ssa.Value -} - -func NewPhiConstraint(vars []ssa.Value, y ssa.Value) Constraint { - uniqm := map[ssa.Value]struct{}{} - for _, v := range vars { - uniqm[v] = struct{}{} - } - var uniq []ssa.Value - for v := range uniqm { - uniq = append(uniq, v) - } - return &PhiConstraint{ - aConstraint: NewConstraint(y), - Vars: uniq, - } -} - -func (c *PhiConstraint) Operands() []ssa.Value { - return c.Vars -} - -func (c *PhiConstraint) Eval(g *Graph) Range { - i := Range(nil) - for _, v := range c.Vars { - i = g.Range(v).Union(i) - } - return i -} - -func (c *PhiConstraint) String() string { - names := make([]string, len(c.Vars)) - for i, v := range c.Vars { - names[i] = v.Name() - } - return fmt.Sprintf("%s = φ(%s)", c.Y().Name(), strings.Join(names, ", ")) -} - -func isSupportedType(typ types.Type) bool { - switch typ := typ.Underlying().(type) { - case *types.Basic: - switch typ.Kind() { - case types.String, types.UntypedString: - return true - default: - if (typ.Info() & types.IsInteger) == 0 { - return false - } - } - case *types.Chan: - return true - case *types.Slice: - return true - default: - return false - } - return true -} - -func ConstantToZ(c constant.Value) Z { - s := constant.ToInt(c).ExactString() - n := &big.Int{} - n.SetString(s, 10) - return NewBigZ(n) -} - -func sigmaInteger(g *Graph, ins *ssa.Sigma, cond *ssa.BinOp, ops []*ssa.Value) Constraint { - op := cond.Op - if !ins.Branch { - op = (invertToken(op)) - } - - switch op { - case token.EQL, token.GTR, token.GEQ, token.LSS, token.LEQ: - default: - return nil - } - var a, b ssa.Value - if (*ops[0]) == ins.X { - a = *ops[0] - b = *ops[1] - } else { - a = *ops[1] - b = *ops[0] - op = flipToken(op) - } - return NewIntIntersectionConstraint(a, b, op, g.ranges, ins) -} - -func sigmaString(g *Graph, ins *ssa.Sigma, cond *ssa.BinOp, ops []*ssa.Value) Constraint { - op := cond.Op - if !ins.Branch { - op = (invertToken(op)) - } - - switch op { - case token.EQL, token.GTR, token.GEQ, token.LSS, token.LEQ: - default: - return nil - } - - if ((*ops[0]).Type().Underlying().(*types.Basic).Info() & types.IsString) == 0 { - var a, b ssa.Value - call, ok := (*ops[0]).(*ssa.Call) - if ok && call.Common().Args[0] == ins.X { - a = *ops[0] - b = *ops[1] - } else { - a = *ops[1] - b = *ops[0] - op = flipToken(op) - } - return NewStringIntersectionConstraint(a, b, op, g.ranges, ins) - } - var a, b ssa.Value - if (*ops[0]) == ins.X { - a = *ops[0] - b = *ops[1] - } else { - a = *ops[1] - b = *ops[0] - op = flipToken(op) - } - return NewStringIntersectionConstraint(a, b, op, g.ranges, ins) -} - -func sigmaSlice(g *Graph, ins *ssa.Sigma, cond *ssa.BinOp, ops []*ssa.Value) Constraint { - // TODO(dh) sigmaSlice and sigmaString are a lot alike. Can they - // be merged? - // - // XXX support futures - - op := cond.Op - if !ins.Branch { - op = (invertToken(op)) - } - - k, ok := (*ops[1]).(*ssa.Const) - // XXX investigate in what cases this wouldn't be a Const - // - // XXX what if left and right are swapped? - if !ok { - return nil - } - - call, ok := (*ops[0]).(*ssa.Call) - if !ok { - return nil - } - builtin, ok := call.Common().Value.(*ssa.Builtin) - if !ok { - return nil - } - if builtin.Name() != "len" { - return nil - } - callops := call.Operands(nil) - - v := ConstantToZ(k.Value) - c := NewSliceIntersectionConstraint(*callops[1], IntInterval{}, ins).(*SliceIntersectionConstraint) - switch op { - case token.EQL: - c.I = NewIntInterval(v, v) - case token.GTR, token.GEQ: - off := int64(0) - if cond.Op == token.GTR { - off = 1 - } - c.I = NewIntInterval( - v.Add(NewZ(off)), - PInfinity, - ) - case token.LSS, token.LEQ: - off := int64(0) - if cond.Op == token.LSS { - off = -1 - } - c.I = NewIntInterval( - NInfinity, - v.Add(NewZ(off)), - ) - default: - return nil - } - return c -} - -func BuildGraph(f *ssa.Function) *Graph { - g := &Graph{ - Vertices: map[interface{}]*Vertex{}, - ranges: Ranges{}, - } - - var cs []Constraint - - ops := make([]*ssa.Value, 16) - seen := map[ssa.Value]bool{} - for _, block := range f.Blocks { - for _, ins := range block.Instrs { - ops = ins.Operands(ops[:0]) - for _, op := range ops { - if c, ok := (*op).(*ssa.Const); ok { - if seen[c] { - continue - } - seen[c] = true - if c.Value == nil { - switch c.Type().Underlying().(type) { - case *types.Slice: - cs = append(cs, NewSliceIntervalConstraint(NewIntInterval(NewZ(0), NewZ(0)), c)) - } - continue - } - switch c.Value.Kind() { - case constant.Int: - v := ConstantToZ(c.Value) - cs = append(cs, NewIntIntervalConstraint(NewIntInterval(v, v), c)) - case constant.String: - s := constant.StringVal(c.Value) - n := NewZ(int64(len(s))) - cs = append(cs, NewStringIntervalConstraint(NewIntInterval(n, n), c)) - } - } - } - } - } - for _, block := range f.Blocks { - for _, ins := range block.Instrs { - switch ins := ins.(type) { - case *ssa.Convert: - switch v := ins.Type().Underlying().(type) { - case *types.Basic: - if (v.Info() & types.IsInteger) == 0 { - continue - } - cs = append(cs, NewIntConversionConstraint(ins.X, ins)) - } - case *ssa.Call: - if static := ins.Common().StaticCallee(); static != nil { - if fn, ok := static.Object().(*types.Func); ok { - switch fn.FullName() { - case "bytes.Index", "bytes.IndexAny", "bytes.IndexByte", - "bytes.IndexFunc", "bytes.IndexRune", "bytes.LastIndex", - "bytes.LastIndexAny", "bytes.LastIndexByte", "bytes.LastIndexFunc", - "strings.Index", "strings.IndexAny", "strings.IndexByte", - "strings.IndexFunc", "strings.IndexRune", "strings.LastIndex", - "strings.LastIndexAny", "strings.LastIndexByte", "strings.LastIndexFunc": - // TODO(dh): instead of limiting by +∞, - // limit by the upper bound of the passed - // string - cs = append(cs, NewIntIntervalConstraint(NewIntInterval(NewZ(-1), PInfinity), ins)) - case "bytes.Title", "bytes.ToLower", "bytes.ToTitle", "bytes.ToUpper", - "strings.Title", "strings.ToLower", "strings.ToTitle", "strings.ToUpper": - cs = append(cs, NewCopyConstraint(ins.Common().Args[0], ins)) - case "bytes.ToLowerSpecial", "bytes.ToTitleSpecial", "bytes.ToUpperSpecial", - "strings.ToLowerSpecial", "strings.ToTitleSpecial", "strings.ToUpperSpecial": - cs = append(cs, NewCopyConstraint(ins.Common().Args[1], ins)) - case "bytes.Compare", "strings.Compare": - cs = append(cs, NewIntIntervalConstraint(NewIntInterval(NewZ(-1), NewZ(1)), ins)) - case "bytes.Count", "strings.Count": - // TODO(dh): instead of limiting by +∞, - // limit by the upper bound of the passed - // string. - cs = append(cs, NewIntIntervalConstraint(NewIntInterval(NewZ(0), PInfinity), ins)) - case "bytes.Map", "bytes.TrimFunc", "bytes.TrimLeft", "bytes.TrimLeftFunc", - "bytes.TrimRight", "bytes.TrimRightFunc", "bytes.TrimSpace", - "strings.Map", "strings.TrimFunc", "strings.TrimLeft", "strings.TrimLeftFunc", - "strings.TrimRight", "strings.TrimRightFunc", "strings.TrimSpace": - // TODO(dh): lower = 0, upper = upper of passed string - case "bytes.TrimPrefix", "bytes.TrimSuffix", - "strings.TrimPrefix", "strings.TrimSuffix": - // TODO(dh) range between "unmodified" and len(cutset) removed - case "(*bytes.Buffer).Cap", "(*bytes.Buffer).Len", "(*bytes.Reader).Len", "(*bytes.Reader).Size": - cs = append(cs, NewIntIntervalConstraint(NewIntInterval(NewZ(0), PInfinity), ins)) - } - } - } - builtin, ok := ins.Common().Value.(*ssa.Builtin) - ops := ins.Operands(nil) - if !ok { - continue - } - switch builtin.Name() { - case "len": - switch op1 := (*ops[1]).Type().Underlying().(type) { - case *types.Basic: - if op1.Kind() == types.String || op1.Kind() == types.UntypedString { - cs = append(cs, NewStringLengthConstraint(*ops[1], ins)) - } - case *types.Slice: - cs = append(cs, NewSliceLengthConstraint(*ops[1], ins)) - } - - case "append": - cs = append(cs, NewSliceAppendConstraint(ins.Common().Args[0], ins.Common().Args[1], ins)) - } - case *ssa.BinOp: - ops := ins.Operands(nil) - basic, ok := (*ops[0]).Type().Underlying().(*types.Basic) - if !ok { - continue - } - switch basic.Kind() { - case types.Int, types.Int8, types.Int16, types.Int32, types.Int64, - types.Uint, types.Uint8, types.Uint16, types.Uint32, types.Uint64, types.UntypedInt: - fns := map[token.Token]func(ssa.Value, ssa.Value, ssa.Value) Constraint{ - token.ADD: NewIntAddConstraint, - token.SUB: NewIntSubConstraint, - token.MUL: NewIntMulConstraint, - // XXX support QUO, REM, SHL, SHR - } - fn, ok := fns[ins.Op] - if ok { - cs = append(cs, fn(*ops[0], *ops[1], ins)) - } - case types.String, types.UntypedString: - if ins.Op == token.ADD { - cs = append(cs, NewStringConcatConstraint(*ops[0], *ops[1], ins)) - } - } - case *ssa.Slice: - typ := ins.X.Type().Underlying() - switch typ := typ.(type) { - case *types.Basic: - cs = append(cs, NewStringSliceConstraint(ins.X, ins.Low, ins.High, ins)) - case *types.Slice: - cs = append(cs, NewSliceSliceConstraint(ins.X, ins.Low, ins.High, ins)) - case *types.Array: - cs = append(cs, NewArraySliceConstraint(ins.X, ins.Low, ins.High, ins)) - case *types.Pointer: - if _, ok := typ.Elem().(*types.Array); !ok { - continue - } - cs = append(cs, NewArraySliceConstraint(ins.X, ins.Low, ins.High, ins)) - } - case *ssa.Phi: - if !isSupportedType(ins.Type()) { - continue - } - ops := ins.Operands(nil) - dops := make([]ssa.Value, len(ops)) - for i, op := range ops { - dops[i] = *op - } - cs = append(cs, NewPhiConstraint(dops, ins)) - case *ssa.Sigma: - pred := ins.Block().Preds[0] - instrs := pred.Instrs - cond, ok := instrs[len(instrs)-1].(*ssa.If).Cond.(*ssa.BinOp) - ops := cond.Operands(nil) - if !ok { - continue - } - switch typ := ins.Type().Underlying().(type) { - case *types.Basic: - var c Constraint - switch typ.Kind() { - case types.Int, types.Int8, types.Int16, types.Int32, types.Int64, - types.Uint, types.Uint8, types.Uint16, types.Uint32, types.Uint64, types.UntypedInt: - c = sigmaInteger(g, ins, cond, ops) - case types.String, types.UntypedString: - c = sigmaString(g, ins, cond, ops) - } - if c != nil { - cs = append(cs, c) - } - case *types.Slice: - c := sigmaSlice(g, ins, cond, ops) - if c != nil { - cs = append(cs, c) - } - default: - //log.Printf("unsupported sigma type %T", typ) // XXX - } - case *ssa.MakeChan: - cs = append(cs, NewMakeChannelConstraint(ins.Size, ins)) - case *ssa.MakeSlice: - cs = append(cs, NewMakeSliceConstraint(ins.Len, ins)) - case *ssa.ChangeType: - switch ins.X.Type().Underlying().(type) { - case *types.Chan: - cs = append(cs, NewChannelChangeTypeConstraint(ins.X, ins)) - } - } - } - } - - for _, c := range cs { - if c == nil { - panic("nil constraint") - } - // If V is used in constraint C, then we create an edge V->C - for _, op := range c.Operands() { - g.AddEdge(op, c, false) - } - if c, ok := c.(Future); ok { - for _, op := range c.Futures() { - g.AddEdge(op, c, true) - } - } - // If constraint C defines variable V, then we create an edge - // C->V - g.AddEdge(c, c.Y(), false) - } - - g.FindSCCs() - g.sccEdges = make([][]Edge, len(g.SCCs)) - g.futures = make([][]Future, len(g.SCCs)) - for _, e := range g.Edges { - g.sccEdges[e.From.SCC] = append(g.sccEdges[e.From.SCC], e) - if !e.control { - continue - } - if c, ok := e.To.Value.(Future); ok { - g.futures[e.From.SCC] = append(g.futures[e.From.SCC], c) - } - } - return g -} - -func (g *Graph) Solve() Ranges { - var consts []Z - off := NewZ(1) - for _, n := range g.Vertices { - if c, ok := n.Value.(*ssa.Const); ok { - basic, ok := c.Type().Underlying().(*types.Basic) - if !ok { - continue - } - if (basic.Info() & types.IsInteger) != 0 { - z := ConstantToZ(c.Value) - consts = append(consts, z) - consts = append(consts, z.Add(off)) - consts = append(consts, z.Sub(off)) - } - } - - } - sort.Sort(Zs(consts)) - - for scc, vertices := range g.SCCs { - n := 0 - n = len(vertices) - if n == 1 { - g.resolveFutures(scc) - v := vertices[0] - if v, ok := v.Value.(ssa.Value); ok { - switch typ := v.Type().Underlying().(type) { - case *types.Basic: - switch typ.Kind() { - case types.String, types.UntypedString: - if !g.Range(v).(StringInterval).IsKnown() { - g.SetRange(v, StringInterval{NewIntInterval(NewZ(0), PInfinity)}) - } - default: - if !g.Range(v).(IntInterval).IsKnown() { - g.SetRange(v, InfinityFor(v)) - } - } - case *types.Chan: - if !g.Range(v).(ChannelInterval).IsKnown() { - g.SetRange(v, ChannelInterval{NewIntInterval(NewZ(0), PInfinity)}) - } - case *types.Slice: - if !g.Range(v).(SliceInterval).IsKnown() { - g.SetRange(v, SliceInterval{NewIntInterval(NewZ(0), PInfinity)}) - } - } - } - if c, ok := v.Value.(Constraint); ok { - g.SetRange(c.Y(), c.Eval(g)) - } - } else { - uses := g.uses(scc) - entries := g.entries(scc) - for len(entries) > 0 { - v := entries[len(entries)-1] - entries = entries[:len(entries)-1] - for _, use := range uses[v] { - if g.widen(use, consts) { - entries = append(entries, use.Y()) - } - } - } - - g.resolveFutures(scc) - - // XXX this seems to be necessary, but shouldn't be. - // removing it leads to nil pointer derefs; investigate - // where we're not setting values correctly. - for _, n := range vertices { - if v, ok := n.Value.(ssa.Value); ok { - i, ok := g.Range(v).(IntInterval) - if !ok { - continue - } - if !i.IsKnown() { - g.SetRange(v, InfinityFor(v)) - } - } - } - - actives := g.actives(scc) - for len(actives) > 0 { - v := actives[len(actives)-1] - actives = actives[:len(actives)-1] - for _, use := range uses[v] { - if g.narrow(use) { - actives = append(actives, use.Y()) - } - } - } - } - // propagate scc - for _, edge := range g.sccEdges[scc] { - if edge.control { - continue - } - if edge.From.SCC == edge.To.SCC { - continue - } - if c, ok := edge.To.Value.(Constraint); ok { - g.SetRange(c.Y(), c.Eval(g)) - } - if c, ok := edge.To.Value.(Future); ok { - if !c.IsKnown() { - c.MarkUnresolved() - } - } - } - } - - for v, r := range g.ranges { - i, ok := r.(IntInterval) - if !ok { - continue - } - if (v.Type().Underlying().(*types.Basic).Info() & types.IsUnsigned) == 0 { - if i.Upper != PInfinity { - s := &types.StdSizes{ - // XXX is it okay to assume the largest word size, or do we - // need to be platform specific? - WordSize: 8, - MaxAlign: 1, - } - bits := (s.Sizeof(v.Type()) * 8) - 1 - n := big.NewInt(1) - n = n.Lsh(n, uint(bits)) - upper, lower := &big.Int{}, &big.Int{} - upper.Sub(n, big.NewInt(1)) - lower.Neg(n) - - if i.Upper.Cmp(NewBigZ(upper)) == 1 { - i = NewIntInterval(NInfinity, PInfinity) - } else if i.Lower.Cmp(NewBigZ(lower)) == -1 { - i = NewIntInterval(NInfinity, PInfinity) - } - } - } - - g.ranges[v] = i - } - - return g.ranges -} - -func VertexString(v *Vertex) string { - switch v := v.Value.(type) { - case Constraint: - return v.String() - case ssa.Value: - return v.Name() - case nil: - return "BUG: nil vertex value" - default: - panic(fmt.Sprintf("unexpected type %T", v)) - } -} - -type Vertex struct { - Value interface{} // one of Constraint or ssa.Value - SCC int - index int - lowlink int - stack bool - - Succs []Edge -} - -type Ranges map[ssa.Value]Range - -func (r Ranges) Get(x ssa.Value) Range { - if x == nil { - return nil - } - i, ok := r[x] - if !ok { - switch x := x.Type().Underlying().(type) { - case *types.Basic: - switch x.Kind() { - case types.String, types.UntypedString: - return StringInterval{} - default: - return IntInterval{} - } - case *types.Chan: - return ChannelInterval{} - case *types.Slice: - return SliceInterval{} - } - } - return i -} - -type Graph struct { - Vertices map[interface{}]*Vertex - Edges []Edge - SCCs [][]*Vertex - ranges Ranges - - // map SCCs to futures - futures [][]Future - // map SCCs to edges - sccEdges [][]Edge -} - -func (g Graph) Graphviz() string { - var lines []string - lines = append(lines, "digraph{") - ids := map[interface{}]int{} - i := 1 - for _, v := range g.Vertices { - ids[v] = i - shape := "box" - if _, ok := v.Value.(ssa.Value); ok { - shape = "oval" - } - lines = append(lines, fmt.Sprintf(`n%d [shape="%s", label=%q, colorscheme=spectral11, style="filled", fillcolor="%d"]`, - i, shape, VertexString(v), (v.SCC%11)+1)) - i++ - } - for _, e := range g.Edges { - style := "solid" - if e.control { - style = "dashed" - } - lines = append(lines, fmt.Sprintf(`n%d -> n%d [style="%s"]`, ids[e.From], ids[e.To], style)) - } - lines = append(lines, "}") - return strings.Join(lines, "\n") -} - -func (g *Graph) SetRange(x ssa.Value, r Range) { - g.ranges[x] = r -} - -func (g *Graph) Range(x ssa.Value) Range { - return g.ranges.Get(x) -} - -func (g *Graph) widen(c Constraint, consts []Z) bool { - setRange := func(i Range) { - g.SetRange(c.Y(), i) - } - widenIntInterval := func(oi, ni IntInterval) (IntInterval, bool) { - if !ni.IsKnown() { - return oi, false - } - nlc := NInfinity - nuc := PInfinity - for _, co := range consts { - if co.Cmp(ni.Lower) <= 0 { - nlc = co - break - } - } - for _, co := range consts { - if co.Cmp(ni.Upper) >= 0 { - nuc = co - break - } - } - - if !oi.IsKnown() { - return ni, true - } - if ni.Lower.Cmp(oi.Lower) == -1 && ni.Upper.Cmp(oi.Upper) == 1 { - return NewIntInterval(nlc, nuc), true - } - if ni.Lower.Cmp(oi.Lower) == -1 { - return NewIntInterval(nlc, oi.Upper), true - } - if ni.Upper.Cmp(oi.Upper) == 1 { - return NewIntInterval(oi.Lower, nuc), true - } - return oi, false - } - switch oi := g.Range(c.Y()).(type) { - case IntInterval: - ni := c.Eval(g).(IntInterval) - si, changed := widenIntInterval(oi, ni) - if changed { - setRange(si) - return true - } - return false - case StringInterval: - ni := c.Eval(g).(StringInterval) - si, changed := widenIntInterval(oi.Length, ni.Length) - if changed { - setRange(StringInterval{si}) - return true - } - return false - case SliceInterval: - ni := c.Eval(g).(SliceInterval) - si, changed := widenIntInterval(oi.Length, ni.Length) - if changed { - setRange(SliceInterval{si}) - return true - } - return false - default: - return false - } -} - -func (g *Graph) narrow(c Constraint) bool { - narrowIntInterval := func(oi, ni IntInterval) (IntInterval, bool) { - oLower := oi.Lower - oUpper := oi.Upper - nLower := ni.Lower - nUpper := ni.Upper - - if oLower == NInfinity && nLower != NInfinity { - return NewIntInterval(nLower, oUpper), true - } - if oUpper == PInfinity && nUpper != PInfinity { - return NewIntInterval(oLower, nUpper), true - } - if oLower.Cmp(nLower) == 1 { - return NewIntInterval(nLower, oUpper), true - } - if oUpper.Cmp(nUpper) == -1 { - return NewIntInterval(oLower, nUpper), true - } - return oi, false - } - switch oi := g.Range(c.Y()).(type) { - case IntInterval: - ni := c.Eval(g).(IntInterval) - si, changed := narrowIntInterval(oi, ni) - if changed { - g.SetRange(c.Y(), si) - return true - } - return false - case StringInterval: - ni := c.Eval(g).(StringInterval) - si, changed := narrowIntInterval(oi.Length, ni.Length) - if changed { - g.SetRange(c.Y(), StringInterval{si}) - return true - } - return false - case SliceInterval: - ni := c.Eval(g).(SliceInterval) - si, changed := narrowIntInterval(oi.Length, ni.Length) - if changed { - g.SetRange(c.Y(), SliceInterval{si}) - return true - } - return false - default: - return false - } -} - -func (g *Graph) resolveFutures(scc int) { - for _, c := range g.futures[scc] { - c.Resolve() - } -} - -func (g *Graph) entries(scc int) []ssa.Value { - var entries []ssa.Value - for _, n := range g.Vertices { - if n.SCC != scc { - continue - } - if v, ok := n.Value.(ssa.Value); ok { - // XXX avoid quadratic runtime - // - // XXX I cannot think of any code where the future and its - // variables aren't in the same SCC, in which case this - // code isn't very useful (the variables won't be resolved - // yet). Before we have a cross-SCC example, however, we - // can't really verify that this code is working - // correctly, or indeed doing anything useful. - for _, on := range g.Vertices { - if c, ok := on.Value.(Future); ok { - if c.Y() == v { - if !c.IsResolved() { - g.SetRange(c.Y(), c.Eval(g)) - c.MarkResolved() - } - break - } - } - } - if g.Range(v).IsKnown() { - entries = append(entries, v) - } - } - } - return entries -} - -func (g *Graph) uses(scc int) map[ssa.Value][]Constraint { - m := map[ssa.Value][]Constraint{} - for _, e := range g.sccEdges[scc] { - if e.control { - continue - } - if v, ok := e.From.Value.(ssa.Value); ok { - c := e.To.Value.(Constraint) - sink := c.Y() - if g.Vertices[sink].SCC == scc { - m[v] = append(m[v], c) - } - } - } - return m -} - -func (g *Graph) actives(scc int) []ssa.Value { - var actives []ssa.Value - for _, n := range g.Vertices { - if n.SCC != scc { - continue - } - if v, ok := n.Value.(ssa.Value); ok { - if _, ok := v.(*ssa.Const); !ok { - actives = append(actives, v) - } - } - } - return actives -} - -func (g *Graph) AddEdge(from, to interface{}, ctrl bool) { - vf, ok := g.Vertices[from] - if !ok { - vf = &Vertex{Value: from} - g.Vertices[from] = vf - } - vt, ok := g.Vertices[to] - if !ok { - vt = &Vertex{Value: to} - g.Vertices[to] = vt - } - e := Edge{From: vf, To: vt, control: ctrl} - g.Edges = append(g.Edges, e) - vf.Succs = append(vf.Succs, e) -} - -type Edge struct { - From, To *Vertex - control bool -} - -func (e Edge) String() string { - return fmt.Sprintf("%s -> %s", VertexString(e.From), VertexString(e.To)) -} - -func (g *Graph) FindSCCs() { - // use Tarjan to find the SCCs - - index := 1 - var s []*Vertex - - scc := 0 - var strongconnect func(v *Vertex) - strongconnect = func(v *Vertex) { - // set the depth index for v to the smallest unused index - v.index = index - v.lowlink = index - index++ - s = append(s, v) - v.stack = true - - for _, e := range v.Succs { - w := e.To - if w.index == 0 { - // successor w has not yet been visited; recurse on it - strongconnect(w) - if w.lowlink < v.lowlink { - v.lowlink = w.lowlink - } - } else if w.stack { - // successor w is in stack s and hence in the current scc - if w.index < v.lowlink { - v.lowlink = w.index - } - } - } - - if v.lowlink == v.index { - for { - w := s[len(s)-1] - s = s[:len(s)-1] - w.stack = false - w.SCC = scc - if w == v { - break - } - } - scc++ - } - } - for _, v := range g.Vertices { - if v.index == 0 { - strongconnect(v) - } - } - - g.SCCs = make([][]*Vertex, scc) - for _, n := range g.Vertices { - n.SCC = scc - n.SCC - 1 - g.SCCs[n.SCC] = append(g.SCCs[n.SCC], n) - } -} - -func invertToken(tok token.Token) token.Token { - switch tok { - case token.LSS: - return token.GEQ - case token.GTR: - return token.LEQ - case token.EQL: - return token.NEQ - case token.NEQ: - return token.EQL - case token.GEQ: - return token.LSS - case token.LEQ: - return token.GTR - default: - panic(fmt.Sprintf("unsupported token %s", tok)) - } -} - -func flipToken(tok token.Token) token.Token { - switch tok { - case token.LSS: - return token.GTR - case token.GTR: - return token.LSS - case token.EQL: - return token.EQL - case token.NEQ: - return token.NEQ - case token.GEQ: - return token.LEQ - case token.LEQ: - return token.GEQ - default: - panic(fmt.Sprintf("unsupported token %s", tok)) - } -} - -type CopyConstraint struct { - aConstraint - X ssa.Value -} - -func (c *CopyConstraint) String() string { - return fmt.Sprintf("%s = copy(%s)", c.Y().Name(), c.X.Name()) -} - -func (c *CopyConstraint) Eval(g *Graph) Range { - return g.Range(c.X) -} - -func (c *CopyConstraint) Operands() []ssa.Value { - return []ssa.Value{c.X} -} - -func NewCopyConstraint(x, y ssa.Value) Constraint { - return &CopyConstraint{ - aConstraint: aConstraint{ - y: y, - }, - X: x, - } -} diff --git a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/version/version.go b/vertical-pod-autoscaler/vendor/honnef.co/go/tools/version/version.go deleted file mode 100644 index 232cf7e74f34..000000000000 --- a/vertical-pod-autoscaler/vendor/honnef.co/go/tools/version/version.go +++ /dev/null @@ -1,17 +0,0 @@ -package version - -import ( - "fmt" - "os" - "path/filepath" -) - -const Version = "devel" - -func Print() { - if Version == "devel" { - fmt.Printf("%s (no version)\n", filepath.Base(os.Args[0])) - } else { - fmt.Printf("%s %s\n", filepath.Base(os.Args[0]), Version) - } -}